summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2009-12-23 10:00:05 -0800
committerGreg Kroah-Hartman <gregkh@suse.de>2009-12-23 10:00:05 -0800
commit193cb93e5a5f32c0520eed17e87135d20594d1e1 (patch)
tree3b5584f4e5e73f5f01807e5176cbe29e1db69868
parent23e707dd9b0b26215d801c59e87feed1c218a0f9 (diff)
parentf42ecb2808db5386f983d593a7c08d3ea3b94a27 (diff)
downloadkernel-crypto-193cb93e5a5f32c0520eed17e87135d20594d1e1.tar.gz
kernel-crypto-193cb93e5a5f32c0520eed17e87135d20594d1e1.tar.xz
kernel-crypto-193cb93e5a5f32c0520eed17e87135d20594d1e1.zip
Merge branch 'master' of /home/gregkh/linux/git/torvalds-2.6
-rw-r--r--.gitignore7
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb13
-rw-r--r--Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc13
-rw-r--r--Documentation/ABI/testing/sysfs-devices-memory14
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu47
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-slab109
-rw-r--r--Documentation/ABI/testing/sysfs-memory-page-offline44
-rw-r--r--Documentation/Changes2
-rw-r--r--Documentation/DocBook/Makefile38
-rw-r--r--Documentation/DocBook/media-entities.tmpl18
-rw-r--r--Documentation/DocBook/media-indices.tmpl4
-rw-r--r--Documentation/DocBook/procfs-guide.tmpl626
-rw-r--r--Documentation/DocBook/procfs_example.c201
-rw-r--r--Documentation/DocBook/v4l/common.xml35
-rw-r--r--Documentation/DocBook/v4l/compat.xml16
-rw-r--r--Documentation/DocBook/v4l/v4l2.xml26
-rw-r--r--Documentation/DocBook/v4l/videodev2.h.xml116
-rw-r--r--Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml238
-rw-r--r--Documentation/DocBook/v4l/vidioc-enuminput.xml36
-rw-r--r--Documentation/DocBook/v4l/vidioc-enumoutput.xml36
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-dv-preset.xml111
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-dv-timings.xml224
-rw-r--r--Documentation/DocBook/v4l/vidioc-g-std.xml6
-rw-r--r--Documentation/DocBook/v4l/vidioc-query-dv-preset.xml85
-rw-r--r--Documentation/DocBook/v4l/vidioc-querystd.xml6
-rw-r--r--Documentation/SubmitChecklist5
-rw-r--r--Documentation/acpi/method-customizing.txt66
-rw-r--r--Documentation/arm/OMAP/DSS317
-rw-r--r--Documentation/blackfin/00-INDEX3
-rw-r--r--Documentation/blackfin/Makefile6
-rw-r--r--Documentation/blackfin/cache-lock.txt48
-rw-r--r--Documentation/blackfin/cachefeatures.txt10
-rw-r--r--Documentation/blackfin/gptimers-example.c83
-rw-r--r--Documentation/cpu-freq/cpu-drivers.txt6
-rw-r--r--Documentation/cpu-freq/user-guide.txt11
-rw-r--r--Documentation/cpu-hotplug.txt55
-rw-r--r--Documentation/device-mapper/snapshot.txt60
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/fb/viafb.txt12
-rw-r--r--Documentation/feature-removal-schedule.txt35
-rw-r--r--Documentation/filesystems/00-INDEX12
-rw-r--r--Documentation/filesystems/ext3.txt4
-rw-r--r--Documentation/filesystems/nfs/00-INDEX16
-rw-r--r--Documentation/filesystems/nfs/Exporting (renamed from Documentation/filesystems/Exporting)0
-rw-r--r--Documentation/filesystems/nfs/knfsd-stats.txt (renamed from Documentation/filesystems/knfsd-stats.txt)0
-rw-r--r--Documentation/filesystems/nfs/nfs-rdma.txt (renamed from Documentation/filesystems/nfs-rdma.txt)0
-rw-r--r--Documentation/filesystems/nfs/nfs.txt (renamed from Documentation/filesystems/nfs.txt)0
-rw-r--r--Documentation/filesystems/nfs/nfs41-server.txt (renamed from Documentation/filesystems/nfs41-server.txt)9
-rw-r--r--Documentation/filesystems/nfs/nfsroot.txt (renamed from Documentation/filesystems/nfsroot.txt)0
-rw-r--r--Documentation/filesystems/nfs/rpc-cache.txt (renamed from Documentation/filesystems/rpc-cache.txt)0
-rw-r--r--Documentation/filesystems/nilfs2.txt7
-rw-r--r--Documentation/filesystems/porting2
-rw-r--r--Documentation/filesystems/proc.txt9
-rw-r--r--Documentation/filesystems/seq_file.txt4
-rw-r--r--Documentation/filesystems/vfs.txt2
-rw-r--r--Documentation/gpio.txt15
-rw-r--r--Documentation/hwmon/k10temp60
-rw-r--r--Documentation/hwmon/lis3lv02d55
-rw-r--r--Documentation/hwmon/w83627ehf10
-rw-r--r--Documentation/i2c/writing-clients2
-rw-r--r--Documentation/infiniband/ipoib.txt10
-rw-r--r--Documentation/isdn/README.gigaset116
-rw-r--r--Documentation/kbuild/kbuild.txt14
-rw-r--r--Documentation/kbuild/kconfig.txt8
-rw-r--r--Documentation/kernel-parameters.txt18
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt114
-rw-r--r--Documentation/lockstat.txt12
-rw-r--r--Documentation/md.txt72
-rw-r--r--Documentation/memory-hotplug.txt11
-rw-r--r--Documentation/misc-devices/ad525x_dpot.txt57
-rw-r--r--Documentation/nommu-mmap.txt26
-rw-r--r--Documentation/power/runtime_pm.txt223
-rw-r--r--Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt93
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/board.txt4
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/mpc5200.txt17
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/mpic.txt42
-rw-r--r--Documentation/powerpc/dts-bindings/nintendo/gamecube.txt109
-rw-r--r--Documentation/powerpc/dts-bindings/nintendo/wii.txt184
-rw-r--r--Documentation/powerpc/dts-bindings/xilinx.txt11
-rw-r--r--Documentation/serial/hayes-esp.txt154
-rw-r--r--Documentation/serial/tty.txt9
-rw-r--r--Documentation/spinlocks.txt184
-rw-r--r--Documentation/sysctl/kernel.txt31
-rw-r--r--Documentation/thermal/sysfs-api.txt1
-rw-r--r--Documentation/trace/events-kmem.txt14
-rw-r--r--Documentation/usb/power-management.txt69
-rw-r--r--Documentation/video4linux/gspca.txt34
-rw-r--r--Documentation/video4linux/sh_mobile_ceu_camera.txt157
-rw-r--r--Documentation/video4linux/v4l2-framework.txt16
-rw-r--r--Documentation/vm/hugetlbpage.txt262
-rw-r--r--Documentation/vm/hwpoison.txt52
-rw-r--r--Documentation/vm/ksm.txt22
-rw-r--r--Documentation/vm/page-types.c83
-rw-r--r--Kbuild4
-rw-r--r--MAINTAINERS105
-rw-r--r--Makefile107
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/boot/bootp.c2
-rw-r--r--arch/alpha/boot/bootpz.c2
-rw-r--r--arch/alpha/boot/main.c2
-rw-r--r--arch/alpha/include/asm/asm-offsets.h1
-rw-r--r--arch/alpha/include/asm/bug.h3
-rw-r--r--arch/alpha/include/asm/core_t2.h34
-rw-r--r--arch/alpha/include/asm/elf.h1
-rw-r--r--arch/alpha/include/asm/fcntl.h19
-rw-r--r--arch/alpha/include/asm/perf_event.h9
-rw-r--r--arch/alpha/include/asm/spinlock.h38
-rw-r--r--arch/alpha/include/asm/spinlock_types.h8
-rw-r--r--arch/alpha/include/asm/unistd.h17
-rw-r--r--arch/alpha/kernel/core_t2.c2
-rw-r--r--arch/alpha/kernel/irq.c4
-rw-r--r--arch/alpha/kernel/osf_sys.c19
-rw-r--r--arch/alpha/kernel/srm_env.c65
-rw-r--r--arch/alpha/kernel/systbls.S16
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/Kconfig.debug8
-rw-r--r--arch/arm/Makefile14
-rw-r--r--arch/arm/common/dmabounce.c12
-rw-r--r--arch/arm/configs/htcherald_defconfig9
-rw-r--r--arch/arm/configs/omap3_touchbook_defconfig2431
-rw-r--r--arch/arm/configs/omap_3430sdp_defconfig28
-rw-r--r--arch/arm/configs/omap_4430sdp_defconfig146
-rw-r--r--arch/arm/configs/omap_zoom2_defconfig3
-rw-r--r--arch/arm/configs/omap_zoom3_defconfig3
-rw-r--r--arch/arm/configs/zeus_defconfig2032
-rw-r--r--arch/arm/include/asm/asm-offsets.h1
-rw-r--r--arch/arm/include/asm/cacheflush.h17
-rw-r--r--arch/arm/include/asm/elf.h1
-rw-r--r--arch/arm/include/asm/mach-types.h1
-rw-r--r--arch/arm/include/asm/mach/irq.h4
-rw-r--r--arch/arm/include/asm/mman.h3
-rw-r--r--arch/arm/include/asm/spinlock.h40
-rw-r--r--arch/arm/include/asm/spinlock_types.h8
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/armksyms.c20
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/early_printk.c57
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/irq.c12
-rw-r--r--arch/arm/kernel/smp_twd.c1
-rw-r--r--arch/arm/kernel/sys_arm.c55
-rw-r--r--arch/arm/kernel/vmlinux.lds.S13
-rw-r--r--arch/arm/mach-at91/include/mach/atmel-mci.h24
-rw-r--r--arch/arm/mach-bcmring/arch.c10
-rw-r--r--arch/arm/mach-bcmring/include/mach/reg_nand.h66
-rw-r--r--arch/arm/mach-bcmring/include/mach/reg_umi.h237
-rw-r--r--arch/arm/mach-clps711x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c24
-rw-r--r--arch/arm/mach-davinci/include/mach/nand.h4
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h11
-rw-r--r--arch/arm/mach-footbridge/common.c22
-rw-r--r--arch/arm/mach-footbridge/include/mach/memory.h15
-rw-r--r--arch/arm/mach-integrator/include/mach/memory.h3
-rw-r--r--arch/arm/mach-ixp2000/include/mach/memory.h12
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/memory.h19
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig22
-rw-r--r--arch/arm/mach-ixp4xx/avila-pci.c42
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c4
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c6
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-ixp4xx/coyote-pci.c22
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c9
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-pci.c46
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c17
-rw-r--r--arch/arm/mach-ixp4xx/fsg-pci.c31
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c8
-rw-r--r--arch/arm/mach-ixp4xx/goramo_mlr.c45
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-pci.c40
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c30
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/avila.h39
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/coyote.h33
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/dsmg600.h52
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/fsg.h50
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/gtwx5715.h116
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/hardware.h18
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h307
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/irqs.h69
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/ixdp425.h39
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/nas100d.h52
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/npe.h2
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/nslu2.h55
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/prpmc1100.h33
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/timex.h2
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-pci.c43
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c12
-rw-r--r--arch/arm/mach-ixp4xx/ixp4xx_npe.c2
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-pci.c41
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c16
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-pci.c35
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c21
-rw-r--r--arch/arm/mach-kirkwood/Kconfig6
-rw-r--r--arch/arm/mach-kirkwood/Makefile1
-rw-r--r--arch/arm/mach-kirkwood/netspace_v2-setup.c325
-rw-r--r--arch/arm/mach-lh7a40x/clocks.c8
-rw-r--r--arch/arm/mach-msm/Kconfig30
-rw-r--r--arch/arm/mach-msm/Makefile1
-rw-r--r--arch/arm/mach-msm/board-dream.c93
-rw-r--r--arch/arm/mach-msm/board-dream.h5
-rw-r--r--arch/arm/mach-msm/include/mach/debug-macro.S24
-rw-r--r--arch/arm/mach-msm/include/mach/mmc.h26
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h12
-rw-r--r--arch/arm/mach-msm/include/mach/uncompress.h7
-rw-r--r--arch/arm/mach-msm/io.c3
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c11
-rw-r--r--arch/arm/mach-ns9xxx/irq.c8
-rw-r--r--arch/arm/mach-omap1/Makefile10
-rw-r--r--arch/arm/mach-omap1/board-fsample.c60
-rw-r--r--arch/arm/mach-omap1/board-h2.c59
-rw-r--r--arch/arm/mach-omap1/board-h3.c66
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c64
-rw-r--r--arch/arm/mach-omap1/board-innovator.c12
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c2
-rw-r--r--arch/arm/mach-omap1/board-osk.c10
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c58
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c10
-rw-r--r--arch/arm/mach-omap1/clock.c501
-rw-r--r--arch/arm/mach-omap1/clock.h652
-rw-r--r--arch/arm/mach-omap1/clock_data.c843
-rw-r--r--arch/arm/mach-omap1/i2c.c39
-rw-r--r--arch/arm/mach-omap1/include/mach/lcd_dma.h78
-rw-r--r--arch/arm/mach-omap1/include/mach/lcdc.h57
-rw-r--r--arch/arm/mach-omap1/io.c3
-rw-r--r--arch/arm/mach-omap1/lcd_dma.c448
-rw-r--r--arch/arm/mach-omap1/mux.c8
-rw-r--r--arch/arm/mach-omap1/opp.h28
-rw-r--r--arch/arm/mach-omap1/opp_data.c59
-rw-r--r--arch/arm/mach-omap2/Kconfig43
-rw-r--r--arch/arm/mach-omap2/Makefile23
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c184
-rwxr-xr-xarch/arm/mach-omap2/board-3630sdp.c14
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c11
-rw-r--r--arch/arm/mach-omap2/board-apollon.c10
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c100
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c13
-rw-r--r--arch/arm/mach-omap2/board-ldp.c12
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c23
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c23
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c43
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c572
-rw-r--r--arch/arm/mach-omap2/board-overo.c16
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c129
-rw-r--r--arch/arm/mach-omap2/board-rx51.c16
-rwxr-xr-xarch/arm/mach-omap2/board-zoom-peripherals.c16
-rw-r--r--arch/arm/mach-omap2/board-zoom2.c10
-rw-r--r--arch/arm/mach-omap2/board-zoom3.c10
-rw-r--r--arch/arm/mach-omap2/clock.c47
-rw-r--r--arch/arm/mach-omap2/clock.h50
-rw-r--r--arch/arm/mach-omap2/clock24xx.c805
-rw-r--r--arch/arm/mach-omap2/clock2xxx.c587
-rw-r--r--arch/arm/mach-omap2/clock2xxx.h41
-rw-r--r--arch/arm/mach-omap2/clock2xxx_data.c (renamed from arch/arm/mach-omap2/clock24xx.h)836
-rw-r--r--arch/arm/mach-omap2/clock34xx.c953
-rw-r--r--arch/arm/mach-omap2/clock34xx.h2999
-rw-r--r--arch/arm/mach-omap2/clock34xx_data.c3289
-rw-r--r--arch/arm/mach-omap2/clock44xx.c33
-rw-r--r--arch/arm/mach-omap2/clock44xx.h15
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c2766
-rw-r--r--arch/arm/mach-omap2/clock_common_data.c39
-rw-r--r--arch/arm/mach-omap2/clockdomain.c6
-rw-r--r--arch/arm/mach-omap2/cm-regbits-44xx.h1474
-rw-r--r--arch/arm/mach-omap2/cm.c7
-rw-r--r--arch/arm/mach-omap2/cm.h15
-rw-r--r--arch/arm/mach-omap2/cm44xx.h358
-rw-r--r--arch/arm/mach-omap2/devices.c62
-rw-r--r--arch/arm/mach-omap2/dpll.c538
-rw-r--r--arch/arm/mach-omap2/gpmc-smc91x.c8
-rw-r--r--arch/arm/mach-omap2/gpmc.c2
-rw-r--r--arch/arm/mach-omap2/i2c.c56
-rw-r--r--arch/arm/mach-omap2/id.c31
-rw-r--r--arch/arm/mach-omap2/io.c9
-rw-r--r--arch/arm/mach-omap2/mux.c1061
-rw-r--r--arch/arm/mach-omap2/mux.h163
-rw-r--r--arch/arm/mach-omap2/mux34xx.c2099
-rw-r--r--arch/arm/mach-omap2/mux34xx.h398
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S35
-rw-r--r--arch/arm/mach-omap2/omap-smp.c31
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c164
-rw-r--r--arch/arm/mach-omap2/opp2420_data.c126
-rw-r--r--arch/arm/mach-omap2/opp2430_data.c133
-rw-r--r--arch/arm/mach-omap2/opp2xxx.h424
-rw-r--r--arch/arm/mach-omap2/pm-debug.c4
-rw-r--r--arch/arm/mach-omap2/powerdomain.c36
-rw-r--r--arch/arm/mach-omap2/powerdomains34xx.h1
-rw-r--r--arch/arm/mach-omap2/prcm-common.h73
-rw-r--r--arch/arm/mach-omap2/prcm.c13
-rw-r--r--arch/arm/mach-omap2/prm-regbits-44xx.h2205
-rw-r--r--arch/arm/mach-omap2/prm.h8
-rw-r--r--arch/arm/mach-omap2/prm44xx.h411
-rw-r--r--arch/arm/mach-omap2/sdrc.c16
-rw-r--r--arch/arm/mach-omap2/sdrc.h19
-rw-r--r--arch/arm/mach-omap2/serial.c95
-rw-r--r--arch/arm/mach-omap2/sram34xx.S19
-rw-r--r--arch/arm/mach-omap2/usb-ehci.c166
-rw-r--r--arch/arm/mach-pxa/Kconfig17
-rw-r--r--arch/arm/mach-pxa/Makefile1
-rw-r--r--arch/arm/mach-pxa/devices.c2
-rw-r--r--arch/arm/mach-pxa/em-x270.c11
-rw-r--r--arch/arm/mach-pxa/include/mach/arcom-pcmcia.h11
-rw-r--r--arch/arm/mach-pxa/include/mach/viper.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/zeus.h82
-rw-r--r--arch/arm/mach-pxa/viper.c20
-rw-r--r--arch/arm/mach-pxa/zeus.c820
-rw-r--r--arch/arm/mach-realview/Kconfig2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/spi.h2
-rw-r--r--arch/arm/mach-s3c2442/mach-gta02.c3
-rw-r--r--arch/arm/mach-s3c24a0/include/mach/memory.h2
-rw-r--r--arch/arm/mach-sa1100/Kconfig13
-rw-r--r--arch/arm/mach-sa1100/generic.c12
-rw-r--r--arch/arm/mach-u300/include/mach/coh901318.h281
-rw-r--r--arch/arm/mach-w90x900/include/mach/nuc900_spi.h35
-rw-r--r--arch/arm/mm/cache-fa.S11
-rw-r--r--arch/arm/mm/cache-l2x0.c93
-rw-r--r--arch/arm/mm/cache-v3.S9
-rw-r--r--arch/arm/mm/cache-v4.S9
-rw-r--r--arch/arm/mm/cache-v4wb.S11
-rw-r--r--arch/arm/mm/cache-v4wt.S11
-rw-r--r--arch/arm/mm/cache-v6.S11
-rw-r--r--arch/arm/mm/cache-v7.S13
-rw-r--r--arch/arm/mm/flush.c4
-rw-r--r--arch/arm/mm/highmem.c2
-rw-r--r--arch/arm/mm/mmap.c3
-rw-r--r--arch/arm/mm/nommu.c2
-rw-r--r--arch/arm/mm/proc-arm1020.S11
-rw-r--r--arch/arm/mm/proc-arm1020e.S11
-rw-r--r--arch/arm/mm/proc-arm1022.S11
-rw-r--r--arch/arm/mm/proc-arm1026.S11
-rw-r--r--arch/arm/mm/proc-arm920.S11
-rw-r--r--arch/arm/mm/proc-arm922.S11
-rw-r--r--arch/arm/mm/proc-arm925.S11
-rw-r--r--arch/arm/mm/proc-arm926.S11
-rw-r--r--arch/arm/mm/proc-arm940.S9
-rw-r--r--arch/arm/mm/proc-arm946.S11
-rw-r--r--arch/arm/mm/proc-feroceon.S15
-rw-r--r--arch/arm/mm/proc-mohawk.S11
-rw-r--r--arch/arm/mm/proc-syms.c3
-rw-r--r--arch/arm/mm/proc-v6.S5
-rw-r--r--arch/arm/mm/proc-xsc3.S11
-rw-r--r--arch/arm/mm/proc-xscale.S13
-rw-r--r--arch/arm/plat-mxc/Makefile1
-rw-r--r--arch/arm/plat-mxc/ehci.c92
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_ehci.h37
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_nand.h3
-rw-r--r--arch/arm/plat-omap/Kconfig63
-rw-r--r--arch/arm/plat-omap/clock.c26
-rw-r--r--arch/arm/plat-omap/common.c4
-rw-r--r--arch/arm/plat-omap/debug-devices.c10
-rw-r--r--arch/arm/plat-omap/debug-leds.c2
-rw-r--r--arch/arm/plat-omap/devices.c68
-rw-r--r--arch/arm/plat-omap/dma.c410
-rw-r--r--arch/arm/plat-omap/fb.c49
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/arm/plat-omap/i2c.c44
-rw-r--r--arch/arm/plat-omap/include/plat/board.h9
-rw-r--r--arch/arm/plat-omap/include/plat/clkdev_omap.h41
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h5
-rw-r--r--arch/arm/plat-omap/include/plat/common.h35
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h31
-rw-r--r--arch/arm/plat-omap/include/plat/display.h575
-rw-r--r--arch/arm/plat-omap/include/plat/dma.h60
-rw-r--r--arch/arm/plat-omap/include/plat/gpmc.h2
-rw-r--r--arch/arm/plat-omap/include/plat/i2c.h39
-rw-r--r--arch/arm/plat-omap/include/plat/irqs.h16
-rw-r--r--arch/arm/plat-omap/include/plat/mux.h232
-rw-r--r--arch/arm/plat-omap/include/plat/omap44xx.h6
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h8
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h26
-rw-r--r--arch/arm/plat-omap/include/plat/powerdomain.h17
-rw-r--r--arch/arm/plat-omap/include/plat/sdrc.h9
-rw-r--r--arch/arm/plat-omap/include/plat/serial.h1
-rw-r--r--arch/arm/plat-omap/include/plat/smp.h2
-rw-r--r--arch/arm/plat-omap/include/plat/vram.h62
-rw-r--r--arch/arm/plat-omap/include/plat/vrfb.h50
-rw-r--r--arch/arm/plat-omap/mux.c8
-rw-r--r--arch/arm/plat-omap/omap_device.c18
-rw-r--r--arch/arm/plat-omap/sram.c20
-rw-r--r--arch/arm/plat-omap/usb.c8
-rw-r--r--arch/arm/plat-s3c/include/plat/nand.h2
-rw-r--r--arch/arm/tools/Makefile2
-rw-r--r--arch/arm/tools/gen-mach-types2
-rw-r--r--arch/arm/tools/mach-types44
-rw-r--r--arch/arm/vfp/vfpmodule.c83
-rw-r--r--arch/avr32/Kconfig13
-rw-r--r--arch/avr32/Makefile2
-rw-r--r--arch/avr32/boards/atngw100/Kconfig25
-rw-r--r--arch/avr32/boards/atngw100/evklcd10x.c7
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/avr32/boards/atngw100/setup.c121
-rw-r--r--arch/avr32/configs/atngw100_defconfig383
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig605
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig599
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1414
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1549
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1549
-rw-r--r--arch/avr32/configs/atstk1002_defconfig415
-rw-r--r--arch/avr32/configs/atstk1006_defconfig297
-rw-r--r--arch/avr32/include/asm/asm-offsets.h1
-rw-r--r--arch/avr32/include/asm/elf.h1
-rw-r--r--arch/avr32/include/asm/hardirq.h19
-rw-r--r--arch/avr32/include/asm/syscalls.h4
-rw-r--r--arch/avr32/kernel/irq.c13
-rw-r--r--arch/avr32/kernel/sys_avr32.c31
-rw-r--r--arch/avr32/kernel/syscall-stubs.S2
-rw-r--r--arch/avr32/kernel/vmlinux.lds.S64
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c53
-rw-r--r--arch/avr32/mach-at32ap/include/mach/atmel-mci.h24
-rw-r--r--arch/avr32/mach-at32ap/include/mach/board.h1
-rw-r--r--arch/blackfin/Kconfig45
-rw-r--r--arch/blackfin/Makefile4
-rw-r--r--arch/blackfin/boot/Makefile6
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig14
-rw-r--r--arch/blackfin/configs/BF526-EZBRD_defconfig2
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig2
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig7
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig2
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig332
-rw-r--r--arch/blackfin/configs/BF561-ACVILON_defconfig1643
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig214
-rw-r--r--arch/blackfin/configs/BlackStamp_defconfig2
-rw-r--r--arch/blackfin/configs/CM-BF527_defconfig390
-rw-r--r--arch/blackfin/configs/CM-BF533_defconfig631
-rw-r--r--arch/blackfin/configs/CM-BF537E_defconfig334
-rw-r--r--arch/blackfin/configs/CM-BF537U_defconfig620
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig793
-rw-r--r--arch/blackfin/configs/CM-BF561_defconfig558
-rw-r--r--arch/blackfin/configs/H8606_defconfig2
-rw-r--r--arch/blackfin/configs/IP0X_defconfig2
-rw-r--r--arch/blackfin/configs/PNAV-10_defconfig2
-rw-r--r--arch/blackfin/configs/SRV1_defconfig4
-rw-r--r--arch/blackfin/configs/TCM-BF537_defconfig577
-rw-r--r--arch/blackfin/include/asm/asm-offsets.h1
-rw-r--r--arch/blackfin/include/asm/bfin-global.h10
-rw-r--r--arch/blackfin/include/asm/bfin-lq035q1.h28
-rw-r--r--arch/blackfin/include/asm/bug.h2
-rw-r--r--arch/blackfin/include/asm/cacheflush.h1
-rw-r--r--arch/blackfin/include/asm/checksum.h70
-rw-r--r--arch/blackfin/include/asm/clocks.h2
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h121
-rw-r--r--arch/blackfin/include/asm/dma.h93
-rw-r--r--arch/blackfin/include/asm/dpmc.h107
-rw-r--r--arch/blackfin/include/asm/elf.h1
-rw-r--r--arch/blackfin/include/asm/fcntl.h2
-rw-r--r--arch/blackfin/include/asm/gpio.h5
-rw-r--r--arch/blackfin/include/asm/gptimers.h32
-rw-r--r--arch/blackfin/include/asm/io.h95
-rw-r--r--arch/blackfin/include/asm/ipipe.h14
-rw-r--r--arch/blackfin/include/asm/ipipe_base.h26
-rw-r--r--arch/blackfin/include/asm/irqflags.h13
-rw-r--r--arch/blackfin/include/asm/kgdb.h3
-rw-r--r--arch/blackfin/include/asm/mem_init.h153
-rw-r--r--arch/blackfin/include/asm/mmu_context.h33
-rw-r--r--arch/blackfin/include/asm/module.h2
-rw-r--r--arch/blackfin/include/asm/pci.h130
-rw-r--r--arch/blackfin/include/asm/ptrace.h6
-rw-r--r--arch/blackfin/include/asm/sections.h16
-rw-r--r--arch/blackfin/include/asm/spinlock.h62
-rw-r--r--arch/blackfin/include/asm/spinlock_types.h8
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/include/asm/trace.h2
-rw-r--r--arch/blackfin/include/asm/uaccess.h4
-rw-r--r--arch/blackfin/include/asm/unistd.h3
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c52
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c99
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c2
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c13
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c31
-rw-r--r--arch/blackfin/kernel/dma-mapping.c68
-rw-r--r--arch/blackfin/kernel/gptimers.c32
-rw-r--r--arch/blackfin/kernel/ipipe.c67
-rw-r--r--arch/blackfin/kernel/irqchip.c6
-rw-r--r--arch/blackfin/kernel/kgdb.c17
-rw-r--r--arch/blackfin/kernel/kgdb_test.c67
-rw-r--r--arch/blackfin/kernel/process.c95
-rw-r--r--arch/blackfin/kernel/ptrace.c13
-rw-r--r--arch/blackfin/kernel/setup.c46
-rw-r--r--arch/blackfin/kernel/signal.c18
-rw-r--r--arch/blackfin/kernel/sys_bfin.c33
-rw-r--r--arch/blackfin/kernel/time-ts.c47
-rw-r--r--arch/blackfin/kernel/time.c8
-rw-r--r--arch/blackfin/kernel/traps.c49
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S30
-rw-r--r--arch/blackfin/lib/Makefile2
-rw-r--r--arch/blackfin/lib/checksum.c125
-rw-r--r--arch/blackfin/mach-bf518/Kconfig4
-rw-r--r--arch/blackfin/mach-bf518/include/mach/blackfin.h6
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF514.h13
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF516.h80
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF518.h247
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h75
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF514.h45
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF516.h213
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF518.h592
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF51x_base.h186
-rw-r--r--arch/blackfin/mach-bf527/Kconfig4
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c48
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c62
-rw-r--r--arch/blackfin/mach-bf527/include/mach/blackfin.h6
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF525.h11
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF527.h424
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h23
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF525.h11
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF527.h679
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF52x_base.h186
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c8
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c15
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c6
-rw-r--r--arch/blackfin/mach-bf533/include/mach/defBF532.h115
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c46
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c386
-rw-r--r--arch/blackfin/mach-bf537/include/mach/bf537.h10
-rw-r--r--arch/blackfin/mach-bf537/include/mach/blackfin.h6
-rw-r--r--arch/blackfin/mach-bf537/include/mach/defBF534.h95
-rw-r--r--arch/blackfin/mach-bf538/Makefile1
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c42
-rw-r--r--arch/blackfin/mach-bf538/ext-gpio.c123
-rw-r--r--arch/blackfin/mach-bf538/include/mach/blackfin.h6
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF539.h1261
-rw-r--r--arch/blackfin/mach-bf538/include/mach/gpio.h7
-rw-r--r--arch/blackfin/mach-bf538/include/mach/portmux.h2
-rw-r--r--arch/blackfin/mach-bf548/Kconfig24
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c59
-rw-r--r--arch/blackfin/mach-bf548/include/mach/bf548.h12
-rw-r--r--arch/blackfin/mach-bf548/include/mach/blackfin.h6
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF547.h12
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF548.h788
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF549.h1533
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h22
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF544.h4
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF547.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF548.h1203
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF549.h2526
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF54x_base.h289
-rw-r--r--arch/blackfin/mach-bf561/boards/Kconfig7
-rw-r--r--arch/blackfin/mach-bf561/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c551
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c28
-rw-r--r--arch/blackfin/mach-bf561/coreb.c8
-rw-r--r--arch/blackfin/mach-bf561/include/mach/defBF561.h101
-rw-r--r--arch/blackfin/mach-bf561/smp.c17
-rw-r--r--arch/blackfin/mach-common/clocks-init.c1
-rw-r--r--arch/blackfin/mach-common/cpufreq.c5
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S30
-rw-r--r--arch/blackfin/mach-common/entry.S6
-rw-r--r--arch/blackfin/mach-common/ints-priority.c15
-rw-r--r--arch/blackfin/mach-common/smp.c16
-rw-r--r--arch/cris/arch-v32/kernel/head.S1
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h62
-rw-r--r--arch/cris/include/asm/asm-offsets.h1
-rw-r--r--arch/cris/include/asm/elf.h2
-rw-r--r--arch/cris/kernel/asm-offsets.c1
-rw-r--r--arch/cris/kernel/irq.c4
-rw-r--r--arch/cris/kernel/sys_cris.c30
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/frv/include/asm/asm-offsets.h1
-rw-r--r--arch/frv/include/asm/elf.h1
-rw-r--r--arch/frv/kernel/irq.c4
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/frv/kernel/sys_frv.c66
-rw-r--r--arch/h8300/Kconfig4
-rw-r--r--arch/h8300/include/asm/asm-offsets.h1
-rw-r--r--arch/h8300/include/asm/elf.h1
-rw-r--r--arch/h8300/include/asm/module.h2
-rw-r--r--arch/h8300/kernel/irq.c4
-rw-r--r--arch/h8300/kernel/sys_h8300.c83
-rw-r--r--arch/h8300/kernel/syscalls.S2
-rw-r--r--arch/h8300/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c38
-rw-r--r--arch/ia64/ia32/elfcore32.h2
-rw-r--r--arch/ia64/ia32/sys_ia32.c3
-rw-r--r--arch/ia64/include/asm/asm-offsets.h1
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/elf.h1
-rw-r--r--arch/ia64/include/asm/hw_irq.h6
-rw-r--r--arch/ia64/include/asm/io.h2
-rw-r--r--arch/ia64/include/asm/irq.h2
-rw-r--r--arch/ia64/include/asm/mca.h5
-rw-r--r--arch/ia64/include/asm/meminit.h2
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h3
-rw-r--r--arch/ia64/include/asm/processor.h6
-rw-r--r--arch/ia64/include/asm/rwsem.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h76
-rw-r--r--arch/ia64/include/asm/spinlock_types.h8
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h28
-rw-r--r--arch/ia64/kernel/Makefile7
-rw-r--r--arch/ia64/kernel/acpi.c33
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c10
-rw-r--r--arch/ia64/kernel/mca.c11
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/perfmon.c15
-rw-r--r--arch/ia64/kernel/relocate_kernel.S2
-rw-r--r--arch/ia64/kernel/setup.c27
-rw-r--r--arch/ia64/kernel/sys_ia64.c83
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S11
-rw-r--r--arch/ia64/kvm/asm-offsets.c1
-rw-r--r--arch/ia64/mm/contig.c99
-rw-r--r--arch/ia64/mm/discontig.c129
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/mm/ioremap.c11
-rw-r--r--arch/ia64/pci/pci.c33
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c8
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c19
-rw-r--r--arch/ia64/xen/irq_xen.c131
-rw-r--r--arch/ia64/xen/time.c22
-rw-r--r--arch/m32r/include/asm/elf.h1
-rw-r--r--arch/m32r/include/asm/spinlock.h48
-rw-r--r--arch/m32r/include/asm/spinlock_types.h8
-rw-r--r--arch/m32r/kernel/irq.c4
-rw-r--r--arch/m32r/kernel/sys_m32r.c24
-rw-r--r--arch/m32r/kernel/syscall_table.S2
-rw-r--r--arch/m68k/include/asm/asm-offsets.h1
-rw-r--r--arch/m68k/include/asm/elf.h1
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h4
-rw-r--r--arch/m68k/kernel/head.S2
-rw-r--r--arch/m68k/kernel/sys_m68k.c83
-rw-r--r--arch/m68k/sun3/mmu_emu.c8
-rw-r--r--arch/m68knommu/kernel/sys_m68k.c38
-rw-r--r--arch/m68knommu/kernel/syscalltable.S2
-rw-r--r--arch/microblaze/Kconfig19
-rw-r--r--arch/microblaze/Kconfig.debug3
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/boot/Makefile15
-rw-r--r--arch/microblaze/include/asm/asm-offsets.h1
-rw-r--r--arch/microblaze/include/asm/cache.h16
-rw-r--r--arch/microblaze/include/asm/cacheflush.h123
-rw-r--r--arch/microblaze/include/asm/cpuinfo.h5
-rw-r--r--arch/microblaze/include/asm/device.h12
-rw-r--r--arch/microblaze/include/asm/elf.h1
-rw-r--r--arch/microblaze/include/asm/ftrace.h25
-rw-r--r--arch/microblaze/include/asm/futex.h127
-rw-r--r--arch/microblaze/include/asm/irqflags.h112
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/include/asm/pgalloc.h9
-rw-r--r--arch/microblaze/include/asm/pvr.h30
-rw-r--r--arch/microblaze/include/asm/setup.h2
-rw-r--r--arch/microblaze/include/asm/system.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h12
-rw-r--r--arch/microblaze/kernel/Makefile14
-rw-r--r--arch/microblaze/kernel/cpu/Makefile4
-rw-r--r--arch/microblaze/kernel/cpu/cache.c663
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c15
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c17
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c7
-rw-r--r--arch/microblaze/kernel/cpu/mb.c8
-rw-r--r--arch/microblaze/kernel/cpu/pvr.c2
-rw-r--r--arch/microblaze/kernel/entry-nommu.S2
-rw-r--r--arch/microblaze/kernel/entry.S19
-rw-r--r--arch/microblaze/kernel/ftrace.c237
-rw-r--r--arch/microblaze/kernel/heartbeat.c15
-rw-r--r--arch/microblaze/kernel/intc.c10
-rw-r--r--arch/microblaze/kernel/irq.c4
-rw-r--r--arch/microblaze/kernel/mcount.S170
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c5
-rw-r--r--arch/microblaze/kernel/process.c1
-rw-r--r--arch/microblaze/kernel/reset.c140
-rw-r--r--arch/microblaze/kernel/setup.c40
-rw-r--r--arch/microblaze/kernel/signal.c35
-rw-r--r--arch/microblaze/kernel/stacktrace.c65
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c38
-rw-r--r--arch/microblaze/kernel/syscall_table.S6
-rw-r--r--arch/microblaze/kernel/timer.c28
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S6
-rw-r--r--arch/microblaze/lib/uaccess.c7
-rw-r--r--arch/microblaze/mm/init.c1
-rw-r--r--arch/microblaze/mm/pgtable.c10
-rw-r--r--arch/microblaze/oprofile/Makefile13
-rw-r--r--arch/microblaze/oprofile/microblaze_oprofile.c22
-rw-r--r--arch/microblaze/platform/Kconfig.platform21
-rw-r--r--arch/microblaze/platform/generic/Kconfig.auto29
-rw-r--r--arch/microblaze/platform/generic/system.dts38
-rw-r--r--arch/microblaze/platform/platform.c2
-rw-r--r--arch/mips/Kconfig119
-rw-r--r--arch/mips/Kconfig.debug59
-rw-r--r--arch/mips/Makefile57
-rw-r--r--arch/mips/ar7/platform.c2
-rw-r--r--arch/mips/basler/excite/Kconfig9
-rw-r--r--arch/mips/basler/excite/Makefile8
-rw-r--r--arch/mips/basler/excite/excite_device.c403
-rw-r--r--arch/mips/basler/excite/excite_iodev.c178
-rw-r--r--arch/mips/basler/excite/excite_iodev.h10
-rw-r--r--arch/mips/basler/excite/excite_irq.c122
-rw-r--r--arch/mips/basler/excite/excite_procfs.c92
-rw-r--r--arch/mips/basler/excite/excite_prom.c144
-rw-r--r--arch/mips/basler/excite/excite_setup.c302
-rw-r--r--arch/mips/bcm47xx/prom.c10
-rw-r--r--arch/mips/boot/Makefile8
-rw-r--r--arch/mips/boot/addinitrd.c131
-rw-r--r--arch/mips/boot/compressed/Makefile100
-rw-r--r--arch/mips/boot/compressed/dbg.c37
-rw-r--r--arch/mips/boot/compressed/decompress.c126
-rw-r--r--arch/mips/boot/compressed/dummy.c4
-rw-r--r--arch/mips/boot/compressed/head.S56
-rw-r--r--arch/mips/boot/compressed/ld.script150
-rw-r--r--arch/mips/boot/compressed/uart-16550.c43
-rw-r--r--arch/mips/cavium-octeon/Makefile2
-rw-r--r--arch/mips/cavium-octeon/cpu.c52
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c88
-rw-r--r--arch/mips/configs/ar7_defconfig4
-rw-r--r--arch/mips/configs/bcm47xx_defconfig3
-rw-r--r--arch/mips/configs/bcm63xx_defconfig3
-rw-r--r--arch/mips/configs/bigsur_defconfig3
-rw-r--r--arch/mips/configs/capcella_defconfig3
-rw-r--r--arch/mips/configs/cavium-octeon_defconfig4
-rw-r--r--arch/mips/configs/cobalt_defconfig3
-rw-r--r--arch/mips/configs/db1000_defconfig3
-rw-r--r--arch/mips/configs/db1100_defconfig3
-rw-r--r--arch/mips/configs/db1200_defconfig3
-rw-r--r--arch/mips/configs/db1500_defconfig3
-rw-r--r--arch/mips/configs/db1550_defconfig3
-rw-r--r--arch/mips/configs/decstation_defconfig3
-rw-r--r--arch/mips/configs/e55_defconfig3
-rw-r--r--arch/mips/configs/excite_defconfig1335
-rw-r--r--arch/mips/configs/fuloong2e_defconfig96
-rw-r--r--arch/mips/configs/ip22_defconfig3
-rw-r--r--arch/mips/configs/ip27_defconfig3
-rw-r--r--arch/mips/configs/ip28_defconfig3
-rw-r--r--arch/mips/configs/ip32_defconfig3
-rw-r--r--arch/mips/configs/jazz_defconfig3
-rw-r--r--arch/mips/configs/jmr3927_defconfig3
-rw-r--r--arch/mips/configs/lasat_defconfig3
-rw-r--r--arch/mips/configs/lemote2f_defconfig1835
-rw-r--r--arch/mips/configs/malta_defconfig3
-rw-r--r--arch/mips/configs/markeins_defconfig3
-rw-r--r--arch/mips/configs/mipssim_defconfig3
-rw-r--r--arch/mips/configs/mpc30x_defconfig3
-rw-r--r--arch/mips/configs/msp71xx_defconfig3
-rw-r--r--arch/mips/configs/mtx1_defconfig3
-rw-r--r--arch/mips/configs/pb1100_defconfig3
-rw-r--r--arch/mips/configs/pb1500_defconfig3
-rw-r--r--arch/mips/configs/pb1550_defconfig3
-rw-r--r--arch/mips/configs/pnx8335-stb225_defconfig3
-rw-r--r--arch/mips/configs/pnx8550-jbs_defconfig3
-rw-r--r--arch/mips/configs/pnx8550-stb810_defconfig3
-rw-r--r--arch/mips/configs/powertv_defconfig1550
-rw-r--r--arch/mips/configs/rb532_defconfig3
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig4
-rw-r--r--arch/mips/configs/rm200_defconfig3
-rw-r--r--arch/mips/configs/sb1250-swarm_defconfig3
-rw-r--r--arch/mips/configs/tb0219_defconfig3
-rw-r--r--arch/mips/configs/tb0226_defconfig3
-rw-r--r--arch/mips/configs/tb0287_defconfig3
-rw-r--r--arch/mips/configs/workpad_defconfig3
-rw-r--r--arch/mips/configs/wrppmc_defconfig3
-rw-r--r--arch/mips/configs/yosemite_defconfig3
-rw-r--r--arch/mips/fw/arc/cmdline.c5
-rw-r--r--arch/mips/include/asm/asm-offsets.h1
-rw-r--r--arch/mips/include/asm/bootinfo.h8
-rw-r--r--arch/mips/include/asm/clock.h64
-rw-r--r--arch/mips/include/asm/cop2.h23
-rw-r--r--arch/mips/include/asm/cpu.h2
-rw-r--r--arch/mips/include/asm/elf.h1
-rw-r--r--arch/mips/include/asm/fcntl.h17
-rw-r--r--arch/mips/include/asm/fpu.h8
-rw-r--r--arch/mips/include/asm/fpu_emulator.h24
-rw-r--r--arch/mips/include/asm/ftrace.h91
-rw-r--r--arch/mips/include/asm/irq.h29
-rw-r--r--arch/mips/include/asm/mach-excite/cpu-feature-overrides.h48
-rw-r--r--arch/mips/include/asm/mach-excite/excite.h154
-rw-r--r--arch/mips/include/asm/mach-excite/excite_fpga.h80
-rw-r--r--arch/mips/include/asm/mach-excite/excite_nandflash.h7
-rw-r--r--arch/mips/include/asm/mach-excite/rm9k_eth.h23
-rw-r--r--arch/mips/include/asm/mach-excite/rm9k_wdt.h12
-rw-r--r--arch/mips/include/asm/mach-excite/rm9k_xicap.h16
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536.h305
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h35
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h153
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h31
-rw-r--r--arch/mips/include/asm/mach-loongson/dma-coherence.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h290
-rw-r--r--arch/mips/include/asm/mach-loongson/machine.h9
-rw-r--r--arch/mips/include/asm/mach-loongson/mem.h27
-rw-r--r--arch/mips/include/asm/mach-loongson/pci.h34
-rw-r--r--arch/mips/include/asm/mach-powertv/asic.h107
-rw-r--r--arch/mips/include/asm/mach-powertv/asic_regs.h155
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h119
-rw-r--r--arch/mips/include/asm/mach-powertv/interrupts.h254
-rw-r--r--arch/mips/include/asm/mach-powertv/ioremap.h90
-rw-r--r--arch/mips/include/asm/mach-powertv/irq.h25
-rw-r--r--arch/mips/include/asm/mach-powertv/powertv-clock.h29
-rw-r--r--arch/mips/include/asm/mach-powertv/war.h (renamed from arch/mips/include/asm/mach-excite/war.h)19
-rw-r--r--arch/mips/include/asm/mips-boards/bonito64.h5
-rw-r--r--arch/mips/include/asm/mmu_context.h29
-rw-r--r--arch/mips/include/asm/octeon/cvmx-agl-defs.h1194
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mixx-defs.h248
-rw-r--r--arch/mips/include/asm/octeon/cvmx-smix-defs.h178
-rw-r--r--arch/mips/include/asm/octeon/octeon.h1
-rw-r--r--arch/mips/include/asm/pgtable.h13
-rw-r--r--arch/mips/include/asm/sgialib.h3
-rw-r--r--arch/mips/include/asm/spinlock.h78
-rw-r--r--arch/mips/include/asm/spinlock_types.h8
-rw-r--r--arch/mips/include/asm/stackframe.h40
-rw-r--r--arch/mips/kernel/Makefile14
-rw-r--r--arch/mips/kernel/cpu-probe.c2
-rw-r--r--arch/mips/kernel/cpufreq/Kconfig41
-rw-r--r--arch/mips/kernel/cpufreq/Makefile5
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_clock.c166
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_cpufreq.c227
-rw-r--r--arch/mips/kernel/csrc-powertv.c180
-rw-r--r--arch/mips/kernel/ftrace.c275
-rw-r--r--arch/mips/kernel/irq.c34
-rw-r--r--arch/mips/kernel/kspd.c1
-rw-r--r--arch/mips/kernel/linux32.c19
-rw-r--r--arch/mips/kernel/mcount.S189
-rw-r--r--arch/mips/kernel/mips_ksyms.c5
-rw-r--r--arch/mips/kernel/setup.c44
-rw-r--r--arch/mips/kernel/signal.c46
-rw-r--r--arch/mips/kernel/signal32.c24
-rw-r--r--arch/mips/kernel/smp.c3
-rw-r--r--arch/mips/kernel/smtc.c21
-rw-r--r--arch/mips/kernel/syscall.c32
-rw-r--r--arch/mips/kernel/traps.c136
-rw-r--r--arch/mips/kernel/unaligned.c25
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/lasat/picvue_proc.c113
-rw-r--r--arch/mips/lasat/prom.c4
-rw-r--r--arch/mips/lasat/sysctl.c2
-rw-r--r--arch/mips/loongson/Kconfig108
-rw-r--r--arch/mips/loongson/Makefile6
-rw-r--r--arch/mips/loongson/common/Makefile18
-rw-r--r--arch/mips/loongson/common/bonito-irq.c13
-rw-r--r--arch/mips/loongson/common/cmdline.c4
-rw-r--r--arch/mips/loongson/common/cs5536/Makefile13
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_acc.c140
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ehci.c158
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ide.c179
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_isa.c316
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_mfgpt.c217
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ohci.c147
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_pci.c87
-rw-r--r--arch/mips/loongson/common/early_printk.c17
-rw-r--r--arch/mips/loongson/common/env.c3
-rw-r--r--arch/mips/loongson/common/init.c19
-rw-r--r--arch/mips/loongson/common/irq.c12
-rw-r--r--arch/mips/loongson/common/machtype.c25
-rw-r--r--arch/mips/loongson/common/mem.c93
-rw-r--r--arch/mips/loongson/common/pci.c20
-rw-r--r--arch/mips/loongson/common/platform.c30
-rw-r--r--arch/mips/loongson/common/pm.c161
-rw-r--r--arch/mips/loongson/common/reset.c2
-rw-r--r--arch/mips/loongson/common/serial.c76
-rw-r--r--arch/mips/loongson/common/time.c3
-rw-r--r--arch/mips/loongson/common/uart_base.c45
-rw-r--r--arch/mips/loongson/fuloong-2e/irq.c4
-rw-r--r--arch/mips/loongson/fuloong-2e/reset.c4
-rw-r--r--arch/mips/loongson/lemote-2f/Makefile11
-rw-r--r--arch/mips/loongson/lemote-2f/ec_kb3310b.c130
-rw-r--r--arch/mips/loongson/lemote-2f/ec_kb3310b.h188
-rw-r--r--arch/mips/loongson/lemote-2f/irq.c134
-rw-r--r--arch/mips/loongson/lemote-2f/pm.c149
-rw-r--r--arch/mips/loongson/lemote-2f/reset.c159
-rw-r--r--arch/mips/math-emu/cp1emu.c102
-rw-r--r--arch/mips/math-emu/dsemul.c4
-rw-r--r--arch/mips/mipssim/Makefile3
-rw-r--r--arch/mips/mipssim/sim_setup.c1
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/cerr-sb1.c7
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/tlbex.c28
-rw-r--r--arch/mips/mm/uasm.c16
-rw-r--r--arch/mips/mm/uasm.h7
-rw-r--r--arch/mips/mti-malta/malta-memory.c2
-rw-r--r--arch/mips/nxp/pnx833x/common/interrupts.c4
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c5
-rw-r--r--arch/mips/pci/Makefile4
-rw-r--r--arch/mips/pci/fixup-excite.c36
-rw-r--r--arch/mips/pci/fixup-fuloong2e.c5
-rw-r--r--arch/mips/pci/fixup-lemote2f.c160
-rw-r--r--arch/mips/pci/ops-bonito64.c7
-rw-r--r--arch/mips/pci/ops-loongson2.c208
-rw-r--r--arch/mips/pci/pci-excite.c149
-rw-r--r--arch/mips/powertv/Kconfig21
-rw-r--r--arch/mips/powertv/Makefile28
-rw-r--r--arch/mips/powertv/asic/Kconfig28
-rw-r--r--arch/mips/powertv/asic/Makefile23
-rw-r--r--arch/mips/powertv/asic/asic-calliope.c98
-rw-r--r--arch/mips/powertv/asic/asic-cronus.c98
-rw-r--r--arch/mips/powertv/asic/asic-zeus.c98
-rw-r--r--arch/mips/powertv/asic/asic_devices.c787
-rw-r--r--arch/mips/powertv/asic/asic_int.c125
-rw-r--r--arch/mips/powertv/asic/irq_asic.c116
-rw-r--r--arch/mips/powertv/asic/prealloc-calliope.c620
-rw-r--r--arch/mips/powertv/asic/prealloc-cronus.c608
-rw-r--r--arch/mips/powertv/asic/prealloc-cronuslite.c290
-rw-r--r--arch/mips/powertv/asic/prealloc-zeus.c459
-rw-r--r--arch/mips/powertv/cmdline.c52
-rw-r--r--arch/mips/powertv/init.c128
-rw-r--r--arch/mips/powertv/init.h28
-rw-r--r--arch/mips/powertv/memory.c186
-rw-r--r--arch/mips/powertv/pci/Makefile21
-rw-r--r--arch/mips/powertv/pci/fixup-powertv.c36
-rw-r--r--arch/mips/powertv/pci/powertv-pci.h31
-rw-r--r--arch/mips/powertv/powertv-clock.h26
-rw-r--r--arch/mips/powertv/powertv_setup.c351
-rw-r--r--arch/mips/powertv/reset.c65
-rw-r--r--arch/mips/powertv/reset.h26
-rw-r--r--arch/mips/powertv/time.c (renamed from arch/mips/mipssim/sim_cmdline.c)21
-rw-r--r--arch/mips/rb532/prom.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c3
-rw-r--r--arch/mips/sgi-ip22/ip22-setup.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c3
-rw-r--r--arch/mips/sgi-ip32/ip32-setup.c2
-rw-r--r--arch/mips/sibyte/common/cfe.c4
-rw-r--r--arch/mips/sni/a20r.c2
-rw-r--r--arch/mips/sni/pcimt.c2
-rw-r--r--arch/mips/sni/pcit.c2
-rw-r--r--arch/mips/sni/rm200.c2
-rw-r--r--arch/mips/sni/setup.c2
-rw-r--r--arch/mips/txx9/generic/setup.c4
-rw-r--r--arch/mips/vr41xx/common/icu.c92
-rw-r--r--arch/mn10300/include/asm/asm-offsets.h1
-rw-r--r--arch/mn10300/include/asm/elf.h1
-rw-r--r--arch/mn10300/include/asm/mman.h5
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/mn10300/kernel/irq.c4
-rw-r--r--arch/mn10300/kernel/kprobes.c61
-rw-r--r--arch/mn10300/kernel/sys_mn10300.c36
-rw-r--r--arch/parisc/hpux/sys_hpux.c7
-rw-r--r--arch/parisc/include/asm/asm-offsets.h1
-rw-r--r--arch/parisc/include/asm/atomic.h10
-rw-r--r--arch/parisc/include/asm/bug.h4
-rw-r--r--arch/parisc/include/asm/elf.h1
-rw-r--r--arch/parisc/include/asm/fcntl.h5
-rw-r--r--arch/parisc/include/asm/ftrace.h14
-rw-r--r--arch/parisc/include/asm/spinlock.h64
-rw-r--r--arch/parisc/include/asm/spinlock_types.h12
-rw-r--r--arch/parisc/kernel/asm-offsets.c3
-rw-r--r--arch/parisc/kernel/irq.c8
-rw-r--r--arch/parisc/kernel/signal.c1
-rw-r--r--arch/parisc/kernel/smp.c9
-rw-r--r--arch/parisc/kernel/sys_parisc.c30
-rw-r--r--arch/parisc/kernel/sys_parisc32.c6
-rw-r--r--arch/parisc/kernel/unwind.c50
-rw-r--r--arch/parisc/lib/bitops.c4
-rw-r--r--arch/powerpc/Kconfig51
-rw-r--r--arch/powerpc/Kconfig.debug8
-rw-r--r--arch/powerpc/boot/Makefile7
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts2
-rw-r--r--arch/powerpc/boot/dts/eiger.dts6
-rw-r--r--arch/powerpc/boot/dts/gamecube.dts114
-rw-r--r--arch/powerpc/boot/dts/gef_ppc9a.dts6
-rw-r--r--arch/powerpc/boot/dts/gef_sbc310.dts6
-rw-r--r--arch/powerpc/boot/dts/gef_sbc610.dts6
-rw-r--r--arch/powerpc/boot/dts/glacier.dts6
-rw-r--r--arch/powerpc/boot/dts/haleakala.dts2
-rw-r--r--arch/powerpc/boot/dts/katmai.dts86
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts4
-rw-r--r--arch/powerpc/boot/dts/kmeter1.dts7
-rw-r--r--arch/powerpc/boot/dts/makalu.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts27
-rw-r--r--arch/powerpc/boot/dts/mpc832x_mds.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc832x_rdb.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts82
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc836x_rdk.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts119
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts111
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts26
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dts477
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core0.dts363
-rw-r--r--arch/powerpc/boot/dts/p2020rdb_camp_core1.dts184
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts554
-rw-r--r--arch/powerpc/boot/dts/redwood.dts1
-rw-r--r--arch/powerpc/boot/dts/warp.dts2
-rw-r--r--arch/powerpc/boot/dts/wii.dts218
-rw-r--r--arch/powerpc/boot/dts/yosemite.dts14
-rw-r--r--arch/powerpc/boot/gamecube-head.S111
-rw-r--r--arch/powerpc/boot/gamecube.c35
-rw-r--r--arch/powerpc/boot/ugecon.c147
-rw-r--r--arch/powerpc/boot/ugecon.h24
-rw-r--r--arch/powerpc/boot/wii-head.S142
-rw-r--r--arch/powerpc/boot/wii.c158
-rwxr-xr-xarch/powerpc/boot/wrapper4
-rw-r--r--arch/powerpc/configs/86xx/gef_ppc9a_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc310_defconfig2
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig4
-rw-r--r--arch/powerpc/configs/g5_defconfig6
-rw-r--r--arch/powerpc/configs/gamecube_defconfig1061
-rw-r--r--arch/powerpc/configs/iseries_defconfig4
-rw-r--r--arch/powerpc/configs/ppc64_defconfig14
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig4
-rw-r--r--arch/powerpc/configs/pseries_defconfig14
-rw-r--r--arch/powerpc/configs/wii_defconfig1406
-rw-r--r--arch/powerpc/include/asm/asm-offsets.h1
-rw-r--r--arch/powerpc/include/asm/async_tx.h47
-rw-r--r--arch/powerpc/include/asm/bug.h2
-rw-r--r--arch/powerpc/include/asm/cpm.h82
-rw-r--r--arch/powerpc/include/asm/cpm1.h45
-rw-r--r--arch/powerpc/include/asm/cpm2.h47
-rw-r--r--arch/powerpc/include/asm/dcr-regs.h23
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h2
-rw-r--r--arch/powerpc/include/asm/elf.h1
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/fixmap.h3
-rw-r--r--arch/powerpc/include/asm/gpio.h5
-rw-r--r--arch/powerpc/include/asm/hugetlb.h27
-rw-r--r--arch/powerpc/include/asm/hvcall.h13
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/immap_cpm2.h2
-rw-r--r--arch/powerpc/include/asm/immap_qe.h8
-rw-r--r--arch/powerpc/include/asm/irq.h13
-rw-r--r--arch/powerpc/include/asm/kvm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h40
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h139
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64_asm.h58
-rw-r--r--arch/powerpc/include/asm/kvm_host.h79
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h1
-rw-r--r--arch/powerpc/include/asm/lppaca.h9
-rw-r--r--arch/powerpc/include/asm/machdep.h5
-rw-r--r--arch/powerpc/include/asm/macio.h6
-rw-r--r--arch/powerpc/include/asm/mediabay.h27
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h50
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/module.h5
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h47
-rw-r--r--arch/powerpc/include/asm/nvram.h1
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h1
-rw-r--r--arch/powerpc/include/asm/paca.h9
-rw-r--r--arch/powerpc/include/asm/page.h14
-rw-r--r--arch/powerpc/include/asm/page_64.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h10
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h63
-rw-r--r--arch/powerpc/include/asm/pgalloc.h30
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h14
-rw-r--r--arch/powerpc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h14
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h37
-rw-r--r--arch/powerpc/include/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/asm/qe.h43
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h68
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h8
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/align.c63
-rw-r--r--arch/powerpc/kernel/asm-offsets.c21
-rw-r--r--arch/powerpc/kernel/cputable.c6
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S8
-rw-r--r--arch/powerpc/kernel/head_32.S25
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/head_8xx.S313
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S22
-rw-r--r--arch/powerpc/kernel/io.c4
-rw-r--r--arch/powerpc/kernel/iommu.c4
-rw-r--r--arch/powerpc/kernel/irq.c141
-rw-r--r--arch/powerpc/kernel/lparcfg.c4
-rw-r--r--arch/powerpc/kernel/misc_32.S18
-rw-r--r--arch/powerpc/kernel/nvram_64.c56
-rw-r--r--arch/powerpc/kernel/perf_callchain.c24
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c (renamed from arch/powerpc/kernel/proc_ppc64.c)102
-rw-r--r--arch/powerpc/kernel/rtas.c16
-rw-r--r--arch/powerpc/kernel/rtas_flash.c10
-rw-r--r--arch/powerpc/kernel/rtasd.c (renamed from arch/powerpc/platforms/pseries/rtasd.c)48
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/smp.c5
-rw-r--r--arch/powerpc/kernel/syscalls.c15
-rw-r--r--arch/powerpc/kernel/sysfs.c19
-rw-r--r--arch/powerpc/kernel/time.c1
-rw-r--r--arch/powerpc/kernel/traps.c31
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/kvm/Kconfig17
-rw-r--r--arch/powerpc/kvm/Makefile27
-rw-r--r--arch/powerpc/kvm/book3s.c974
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c372
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c345
-rw-r--r--arch/powerpc/kvm/book3s_64_exports.c24
-rw-r--r--arch/powerpc/kvm/book3s_64_interrupts.S392
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c478
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c408
-rw-r--r--arch/powerpc/kvm/book3s_64_rmhandlers.S131
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S262
-rw-r--r--arch/powerpc/kvm/booke.c5
-rw-r--r--arch/powerpc/kvm/emulate.c66
-rw-r--r--arch/powerpc/kvm/powerpc.c28
-rw-r--r--arch/powerpc/kvm/timing.c1
-rw-r--r--arch/powerpc/kvm/trace.h6
-rw-r--r--arch/powerpc/lib/copy_32.S24
-rw-r--r--arch/powerpc/lib/locks.c8
-rw-r--r--arch/powerpc/mm/40x_mmu.c2
-rw-r--r--arch/powerpc/mm/44x_mmu.c2
-rw-r--r--arch/powerpc/mm/Makefile5
-rw-r--r--arch/powerpc/mm/fault.c8
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c132
-rw-r--r--arch/powerpc/mm/gup.c149
-rw-r--r--arch/powerpc/mm/hash_utils_64.c58
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c139
-rw-r--r--arch/powerpc/mm/hugetlbpage.c792
-rw-r--r--arch/powerpc/mm/init_32.c9
-rw-r--r--arch/powerpc/mm/init_64.c76
-rw-r--r--arch/powerpc/mm/mem.c17
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c26
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h28
-rw-r--r--arch/powerpc/mm/pgtable.c25
-rw-r--r--arch/powerpc/mm/pgtable_32.c38
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c4
-rw-r--r--arch/powerpc/mm/subpage-prot.c15
-rw-r--r--arch/powerpc/mm/tlb_hash64.c8
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c4
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig5
-rw-r--r--arch/powerpc/platforms/52xx/Makefile1
-rw-r--r--arch/powerpc/platforms/52xx/efika.c2
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c14
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c432
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c560
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c10
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c53
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig23
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c125
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.h19
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c34
-rw-r--r--arch/powerpc/platforms/85xx/p4080_ds.c74
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c6
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig3
-rw-r--r--arch/powerpc/platforms/86xx/gef_pic.c6
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c5
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c5
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c5
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c48
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/Kconfig9
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype19
-rw-r--r--arch/powerpc/platforms/Makefile2
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c2
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c2
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c28
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c52
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig33
-rw-r--r--arch/powerpc/platforms/embedded6xx/Makefile4
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c263
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.h25
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c118
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c241
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.h22
-rw-r--r--arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c328
-rw-r--r--arch/powerpc/platforms/embedded6xx/usbgecko_udbg.h32
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c268
-rw-r--r--arch/powerpc/platforms/iseries/htab.c8
-rw-r--r--arch/powerpc/platforms/iseries/irq.c8
-rw-r--r--arch/powerpc/platforms/iseries/mf.c147
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c2
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c10
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c12
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c2
-rw-r--r--arch/powerpc/platforms/ps3/mm.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig5
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c283
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c560
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c18
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c182
-rw-r--r--arch/powerpc/platforms/pseries/offline_states.h18
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h22
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c8
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c4
-rw-r--r--arch/powerpc/platforms/pseries/smp.c19
-rw-r--r--arch/powerpc/platforms/pseries/xics.c74
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/cpm1.c4
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c42
-rw-r--r--arch/powerpc/sysdev/cpm_common.c5
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c8
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c26
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c88
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c2
-rw-r--r--arch/powerpc/sysdev/i8259.c8
-rw-r--r--arch/powerpc/sysdev/ipic.c8
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c6
-rw-r--r--arch/powerpc/sysdev/mpc8xxx_gpio.c21
-rw-r--r--arch/powerpc/sysdev/mpic.c43
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c11
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c2
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c48
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c61
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c8
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c6
-rw-r--r--arch/powerpc/sysdev/uic.c18
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c8
-rw-r--r--arch/powerpc/xmon/xmon.c3
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c4
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/asm-offsets.h1
-rw-r--r--arch/s390/include/asm/elf.h1
-rw-r--r--arch/s390/include/asm/spinlock.h66
-rw-r--r--arch/s390/include/asm/spinlock_types.h8
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/kernel/compat_linux.c37
-rw-r--r--arch/s390/kernel/compat_wrapper.S9
-rw-r--r--arch/s390/kernel/debug.c3
-rw-r--r--arch/s390/kernel/ipl.c6
-rw-r--r--arch/s390/kernel/ptrace.c2
-rw-r--r--arch/s390/kernel/sys_s390.c30
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/traps.c6
-rw-r--r--arch/s390/lib/spinlock.c46
-rw-r--r--arch/score/include/asm/asm-offsets.h1
-rw-r--r--arch/score/include/asm/cacheflush.h4
-rw-r--r--arch/score/include/asm/delay.h2
-rw-r--r--arch/score/include/asm/elf.h1
-rw-r--r--arch/score/include/asm/page.h2
-rw-r--r--arch/score/kernel/setup.c1
-rw-r--r--arch/score/kernel/sys_score.c28
-rw-r--r--arch/score/mm/cache.c26
-rw-r--r--arch/score/mm/init.c5
-rw-r--r--arch/sh/Kconfig.debug44
-rw-r--r--arch/sh/Makefile10
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c44
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c322
-rw-r--r--arch/sh/boards/mach-kfr2r09/lcd_wqvga.c6
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c14
-rw-r--r--arch/sh/boards/mach-migor/setup.c32
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c7
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c17
-rw-r--r--arch/sh/configs/ecovec24-romimage_defconfig2
-rw-r--r--arch/sh/configs/ecovec24_defconfig2
-rw-r--r--arch/sh/configs/rts7751r2d1_defconfig2
-rw-r--r--arch/sh/configs/rts7751r2dplus_defconfig2
-rw-r--r--arch/sh/drivers/pci/fixups-rts7751r2d.c2
-rw-r--r--arch/sh/include/asm/.gitignore1
-rw-r--r--arch/sh/include/asm/asm-offsets.h1
-rw-r--r--arch/sh/include/asm/elf.h1
-rw-r--r--arch/sh/include/asm/io.h11
-rw-r--r--arch/sh/include/asm/machvec.h2
-rw-r--r--arch/sh/include/asm/pgtable_32.h5
-rw-r--r--arch/sh/include/asm/spinlock.h58
-rw-r--r--arch/sh/include/asm/spinlock_types.h8
-rw-r--r--arch/sh/include/asm/unistd_32.h3
-rw-r--r--arch/sh/include/asm/unistd_64.h3
-rw-r--r--arch/sh/include/mach-kfr2r09/mach/kfr2r09.h6
-rw-r--r--arch/sh/kernel/Makefile3
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c7
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c71
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c23
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c181
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c89
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c89
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c49
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c80
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c50
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c50
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c23
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c47
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c89
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c112
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c39
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c91
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c160
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c149
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c92
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c81
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c221
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c60
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c159
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c132
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c4
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c22
-rw-r--r--arch/sh/kernel/early_printk.c157
-rw-r--r--arch/sh/kernel/ftrace.c76
-rw-r--r--arch/sh/kernel/irq.c4
-rw-r--r--arch/sh/kernel/process_64.c4
-rw-r--r--arch/sh/kernel/ptrace_64.c4
-rw-r--r--arch/sh/kernel/setup.c3
-rw-r--r--arch/sh/kernel/signal_64.c2
-rw-r--r--arch/sh/kernel/sys_sh.c28
-rw-r--r--arch/sh/kernel/syscalls_32.S1
-rw-r--r--arch/sh/kernel/traps_32.c18
-rw-r--r--arch/sh/kernel/traps_64.c4
-rw-r--r--arch/sh/mm/cache-sh4.c3
-rw-r--r--arch/sh/mm/ioremap_32.c10
-rw-r--r--arch/sh/mm/ioremap_64.c6
-rw-r--r--arch/sh/mm/mmap.c3
-rw-r--r--arch/sh/mm/numa.c15
-rw-r--r--arch/sh/tools/Makefile4
-rw-r--r--arch/sh/tools/gen-mach-types2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/Kconfig.debug14
-rw-r--r--arch/sparc/include/asm/asm-offsets.h1
-rw-r--r--arch/sparc/include/asm/elf_32.h2
-rw-r--r--arch/sparc/include/asm/elf_64.h1
-rw-r--r--arch/sparc/include/asm/fcntl.h19
-rw-r--r--arch/sparc/include/asm/pci_64.h2
-rw-r--r--arch/sparc/include/asm/spinlock_32.h62
-rw-r--r--arch/sparc/include/asm/spinlock_64.h54
-rw-r--r--arch/sparc/include/asm/spinlock_types.h8
-rw-r--r--arch/sparc/include/asm/string_32.h78
-rw-r--r--arch/sparc/include/asm/string_64.h25
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/include/asm/uaccess_32.h15
-rw-r--r--arch/sparc/include/asm/uaccess_64.h23
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/ftrace.c11
-rw-r--r--arch/sparc/kernel/iommu.c3
-rw-r--r--arch/sparc/kernel/irq_64.c8
-rw-r--r--arch/sparc/kernel/kprobes.c3
-rw-r--r--arch/sparc/kernel/ldc.c20
-rw-r--r--arch/sparc/kernel/mdesc.c21
-rw-r--r--arch/sparc/kernel/nmi.c8
-rw-r--r--arch/sparc/kernel/of_device_64.c14
-rw-r--r--arch/sparc/kernel/pci.c8
-rw-r--r--arch/sparc/kernel/ptrace_64.c10
-rw-r--r--arch/sparc/kernel/sparc_ksyms_64.c12
-rw-r--r--arch/sparc/kernel/sys_sparc32.c22
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c64
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c50
-rw-r--r--arch/sparc/kernel/syscalls.S14
-rw-r--r--arch/sparc/kernel/systbls.h1
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/sparc/kernel/time_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_32.c15
-rw-r--r--arch/sparc/kernel/unaligned_64.c23
-rw-r--r--arch/sparc/kernel/visemul.c3
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/bzero.S5
-rw-r--r--arch/sparc/lib/checksum_32.S2
-rw-r--r--arch/sparc/lib/ksyms.c2
-rw-r--r--arch/sparc/lib/mcount.S5
-rw-r--r--arch/sparc/lib/memcpy.S3
-rw-r--r--arch/sparc/lib/memset.S3
-rw-r--r--arch/sparc/lib/usercopy.c8
-rw-r--r--arch/sparc/math-emu/math_32.c3
-rw-r--r--arch/sparc/math-emu/math_64.c2
-rw-r--r--arch/sparc/mm/fault_64.c24
-rw-r--r--arch/sparc/mm/sun4c.c17
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/mconsole_kern.c30
-rw-r--r--arch/um/drivers/ubd_kern.c36
-rw-r--r--arch/um/include/asm/asm-offsets.h1
-rw-r--r--arch/um/kernel/exitcode.c43
-rw-r--r--arch/um/kernel/irq.c4
-rw-r--r--arch/um/kernel/process.c31
-rw-r--r--arch/um/kernel/syscall.c28
-rw-r--r--arch/um/sys-i386/asm/elf.h1
-rw-r--r--arch/um/sys-i386/shared/sysdep/syscalls.h4
-rw-r--r--arch/um/sys-ppc/asm/elf.h2
-rw-r--r--arch/um/sys-x86_64/asm/elf.h1
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/boot/compressed/relocs.c87
-rw-r--r--arch/x86/boot/header.S2
-rw-r--r--arch/x86/boot/version.c4
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c43
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h4
-rw-r--r--arch/x86/include/asm/asm-offsets.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/include/asm/elf.h1
-rw-r--r--arch/x86/include/asm/geode.h219
-rw-r--r--arch/x86/include/asm/hw_irq.h3
-rw-r--r--arch/x86/include/asm/irq_vectors.h2
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/msr.h19
-rw-r--r--arch/x86/include/asm/olpc.h2
-rw-r--r--arch/x86/include/asm/paravirt.h14
-rw-r--r--arch/x86/include/asm/paravirt_types.h14
-rw-r--r--arch/x86/include/asm/pci_x86.h20
-rw-r--r--arch/x86/include/asm/percpu.h104
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/spinlock.h62
-rw-r--r--arch/x86/include/asm/spinlock_types.h10
-rw-r--r--arch/x86/include/asm/stacktrace.h24
-rw-r--r--arch/x86/include/asm/swiotlb.h8
-rw-r--r--arch/x86/include/asm/sys_ia32.h4
-rw-r--r--arch/x86/include/asm/syscalls.h34
-rw-r--r--arch/x86/include/asm/system.h1
-rw-r--r--arch/x86/include/asm/topology.h9
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/include/asm/uv/bios.h11
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h44
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h27
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/cstate.c2
-rw-r--r--arch/x86/kernel/amd_iommu.c50
-rw-r--r--arch/x86/kernel/amd_iommu_init.c12
-rw-r--r--arch/x86/kernel/aperture_64.c11
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c5
-rw-r--r--arch/x86/kernel/apic/apic_noop.c2
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c5
-rw-r--r--arch/x86/kernel/apic/es7000_32.c12
-rw-r--r--arch/x86/kernel/apic/io_apic.c36
-rw-r--r--arch/x86/kernel/apic/nmi.c8
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c5
-rw-r--r--arch/x86/kernel/bios_uv.c8
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c15
-rw-r--r--arch/x86/kernel/cpu/amd.c55
-rw-r--r--arch/x86/kernel/cpu/common.c16
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c30
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c45
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k6.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c19
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c32
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.h24
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-smi.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c67
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c22
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c20
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event.c32
-rw-r--r--arch/x86/kernel/cpuid.c5
-rw-r--r--arch/x86/kernel/ds.c4
-rw-r--r--arch/x86/kernel/dumpstack.c41
-rw-r--r--arch/x86/kernel/dumpstack.h6
-rw-r--r--arch/x86/kernel/dumpstack_32.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c37
-rw-r--r--arch/x86/kernel/e820.c13
-rw-r--r--arch/x86/kernel/entry_32.S69
-rw-r--r--arch/x86/kernel/entry_64.S55
-rw-r--r--arch/x86/kernel/geode_32.c196
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c5
-rw-r--r--arch/x86/kernel/ioport.c28
-rw-r--r--arch/x86/kernel/irq.c14
-rw-r--r--arch/x86/kernel/kgdb.c14
-rw-r--r--arch/x86/kernel/mfgpt_32.c410
-rw-r--r--arch/x86/kernel/microcode_amd.c40
-rw-r--r--arch/x86/kernel/microcode_core.c26
-rw-r--r--arch/x86/kernel/microcode_intel.c47
-rw-r--r--arch/x86/kernel/mpparse.c3
-rw-r--r--arch/x86/kernel/msr.c9
-rw-r--r--arch/x86/kernel/olpc.c4
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c4
-rw-r--r--arch/x86/kernel/pci-calgary_64.c6
-rw-r--r--arch/x86/kernel/pci-dma.c6
-rw-r--r--arch/x86/kernel/pci-gart_64.c9
-rw-r--r--arch/x86/kernel/pci-swiotlb.c11
-rw-r--r--arch/x86/kernel/process.c91
-rw-r--r--arch/x86/kernel/process_32.c87
-rw-r--r--arch/x86/kernel/process_64.c51
-rw-r--r--arch/x86/kernel/ptrace.c135
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c2
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/setup_percpu.c13
-rw-r--r--arch/x86/kernel/signal.c12
-rw-r--r--arch/x86/kernel/smpboot.c45
-rw-r--r--arch/x86/kernel/stacktrace.c18
-rw-r--r--arch/x86/kernel/sys_i386_32.c27
-rw-r--r--arch/x86/kernel/sys_x86_64.c17
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/kernel/trampoline.c20
-rw-r--r--arch/x86/kernel/tsc.c1
-rw-r--r--arch/x86/kernel/tsc_sync.c10
-rw-r--r--arch/x86/kernel/uv_irq.c3
-rw-r--r--arch/x86/kernel/vm86_32.c11
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c6
-rw-r--r--arch/x86/kvm/i8254.c12
-rw-r--r--arch/x86/kvm/svm.c64
-rw-r--r--arch/x86/lib/Makefile8
-rw-r--r--arch/x86/lib/msr-smp.c204
-rw-r--r--arch/x86/lib/msr.c219
-rw-r--r--arch/x86/mm/kmmio.c42
-rw-r--r--arch/x86/mm/mmio-mod.c71
-rw-r--r--arch/x86/mm/pat.c3
-rw-r--r--arch/x86/mm/srat_32.c2
-rw-r--r--arch/x86/mm/srat_64.c4
-rw-r--r--arch/x86/oprofile/backtrace.c9
-rw-r--r--arch/x86/pci/Makefile5
-rw-r--r--arch/x86/pci/acpi.c74
-rw-r--r--arch/x86/pci/amd_bus.c120
-rw-r--r--arch/x86/pci/bus_numa.c101
-rw-r--r--arch/x86/pci/bus_numa.h27
-rw-r--r--arch/x86/pci/common.c20
-rw-r--r--arch/x86/pci/early.c7
-rw-r--r--arch/x86/pci/i386.c42
-rw-r--r--arch/x86/pci/intel_bus.c90
-rw-r--r--arch/x86/pci/mmconfig-shared.c356
-rw-r--r--arch/x86/pci/mmconfig_32.c16
-rw-r--r--arch/x86/pci/mmconfig_64.c88
-rw-r--r--arch/x86/tools/chkobjdump.awk2
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk20
-rw-r--r--arch/x86/tools/test_get_len.c2
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/x86/xen/smp.c41
-rw-r--r--arch/x86/xen/spinlock.c16
-rw-r--r--arch/x86/xen/time.c24
-rw-r--r--arch/xtensa/include/asm/asm-offsets.h1
-rw-r--r--arch/xtensa/include/asm/elf.h1
-rw-r--r--arch/xtensa/include/asm/syscall.h3
-rw-r--r--arch/xtensa/include/asm/unistd.h4
-rw-r--r--arch/xtensa/kernel/irq.c4
-rw-r--r--arch/xtensa/kernel/syscall.c43
-rw-r--r--arch/xtensa/platforms/iss/console.c2
-rw-r--r--block/blk-settings.c7
-rw-r--r--block/cfq-iosched.c94
-rw-r--r--crypto/cryptd.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/accessibility/braille/braille_console.c1
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_pad.c3
-rw-r--r--drivers/acpi/acpica/acnamesp.h9
-rw-r--r--drivers/acpi/acpica/acobject.h6
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dswload.c64
-rw-r--r--drivers/acpi/acpica/evregion.c4
-rw-r--r--drivers/acpi/acpica/evrgnini.c15
-rw-r--r--drivers/acpi/acpica/evxface.c4
-rw-r--r--drivers/acpi/acpica/evxfevnt.c4
-rw-r--r--drivers/acpi/acpica/evxfregn.c4
-rw-r--r--drivers/acpi/acpica/exmutex.c18
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nseval.c18
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c93
-rw-r--r--drivers/acpi/acpica/nsrepair.c447
-rw-r--r--drivers/acpi/acpica/nsrepair2.c195
-rw-r--r--drivers/acpi/acpica/nsutils.c57
-rw-r--r--drivers/acpi/acpica/nsxfeval.c10
-rw-r--r--drivers/acpi/acpica/nsxfname.c10
-rw-r--r--drivers/acpi/acpica/nsxfobj.c14
-rw-r--r--drivers/acpi/acpica/psxface.c3
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c27
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/bus.c148
-rw-r--r--drivers/acpi/button.c7
-rw-r--r--drivers/acpi/debug.c84
-rw-r--r--drivers/acpi/dock.c261
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/hest.c135
-rw-r--r--drivers/acpi/numa.c21
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/acpi/pci_root.c76
-rw-r--r--drivers/acpi/processor_core.c10
-rw-r--r--drivers/acpi/processor_idle.c11
-rw-r--r--drivers/acpi/processor_perflib.c63
-rw-r--r--drivers/acpi/thermal.c7
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c2
-rw-r--r--drivers/ata/pata_bf54x.c19
-rw-r--r--drivers/ata/pata_cmd64x.c118
-rw-r--r--drivers/ata/pata_hpt3x2n.c64
-rw-r--r--drivers/ata/pata_macio.c1427
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/sata_mv.c144
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/base/cpu.c36
-rw-r--r--drivers/base/devtmpfs.c100
-rw-r--r--drivers/base/firmware_class.c14
-rw-r--r--drivers/base/memory.c80
-rw-r--r--drivers/base/node.c196
-rw-r--r--drivers/base/platform.c29
-rw-r--r--drivers/base/power/main.c144
-rw-r--r--drivers/base/power/runtime.c55
-rw-r--r--drivers/block/drbd/drbd_nl.c3
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/swim3.c39
-rw-r--r--drivers/block/xd.c30
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/char/Kconfig15
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/intel-agp.c103
-rw-r--r--drivers/char/agp/uninorth-agp.c77
-rw-r--r--drivers/char/bfin_jtag_comm.c2
-rw-r--r--drivers/char/efirtc.c1
-rw-r--r--drivers/char/epca.c2
-rw-r--r--drivers/char/esp.c2533
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/char/hvc_iucv.c2
-rw-r--r--drivers/char/hvc_xen.c2
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c118
-rw-r--r--drivers/char/isicom.c115
-rw-r--r--drivers/char/istallion.c185
-rw-r--r--drivers/char/keyboard.c10
-rw-r--r--drivers/char/lp.c115
-rw-r--r--drivers/char/mem.c167
-rw-r--r--drivers/char/misc.c26
-rw-r--r--drivers/char/moxa.c289
-rw-r--r--drivers/char/mxser.c248
-rw-r--r--drivers/char/nozomi.c48
-rw-r--r--drivers/char/nvram.c14
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c2
-rw-r--r--drivers/char/pty.c2
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/riscom8.c89
-rw-r--r--drivers/char/sonypi.c49
-rw-r--r--drivers/char/stallion.c129
-rw-r--r--drivers/char/sysrq.c2
-rw-r--r--drivers/char/tty_io.c149
-rw-r--r--drivers/char/tty_ldisc.c23
-rw-r--r--drivers/char/tty_port.c97
-rw-r--r--drivers/char/vt.c50
-rw-r--r--drivers/clocksource/Kconfig9
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/cs5535-clockevt.c197
-rw-r--r--drivers/cpufreq/cpufreq.c50
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c129
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/cpuidle/governors/ladder.c3
-rw-r--r--drivers/crypto/padlock-aes.c12
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/coh901318.c1325
-rw-r--r--drivers/dma/coh901318_lli.c318
-rw-r--r--drivers/dma/coh901318_lli.h124
-rw-r--r--drivers/dma/dmaengine.c36
-rw-r--r--drivers/dma/dmatest.c16
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/ppc4xx/Makefile1
-rw-r--r--drivers/dma/ppc4xx/adma.c5027
-rw-r--r--drivers/dma/ppc4xx/adma.h195
-rw-r--r--drivers/dma/ppc4xx/dma.h223
-rw-r--r--drivers/dma/ppc4xx/xor.h110
-rw-r--r--drivers/dma/shdma.c36
-rw-r--r--drivers/dma/shdma.h14
-rw-r--r--drivers/dma/txx9dmac.c2
-rw-r--r--drivers/edac/amd64_edac.c1239
-rw-r--r--drivers/edac/amd64_edac.h62
-rw-r--r--drivers/edac/edac_core.h1
-rw-r--r--drivers/edac/edac_mc.c24
-rw-r--r--drivers/edac/edac_mce_amd.c26
-rw-r--r--drivers/edac/i5100_edac.c252
-rw-r--r--drivers/firewire/ohci.c12
-rw-r--r--drivers/firmware/Kconfig4
-rw-r--r--drivers/firmware/dell_rbu.c9
-rw-r--r--drivers/firmware/dmi_scan.c5
-rw-r--r--drivers/gpio/Kconfig16
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/adp5520-gpio.c36
-rw-r--r--drivers/gpio/cs5535-gpio.c355
-rw-r--r--drivers/gpio/gpiolib.c161
-rw-r--r--drivers/gpio/langwell_gpio.c2
-rw-r--r--drivers/gpio/timbgpio.c342
-rw-r--r--drivers/gpio/twl4030-gpio.c20
-rw-r--r--drivers/gpio/wm831x-gpio.c17
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/drm_crtc.c176
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_i2c_helper.c (renamed from drivers/gpu/drm/i915/intel_dp_i2c.c)76
-rw-r--r--drivers/gpu/drm/drm_drv.c55
-rw-r--r--drivers/gpu/drm/drm_edid.c328
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/drm_fops.c112
-rw-r--r--drivers/gpu/drm/drm_ioc32.c89
-rw-r--r--drivers/gpu/drm/drm_irq.c130
-rw-r--r--drivers/gpu/drm/drm_mm.c112
-rw-r--r--drivers/gpu/drm/drm_modes.c28
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i2c/Makefile4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c536
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c468
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h344
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c9
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c16
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c37
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c120
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h80
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c114
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c23
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c163
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c92
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h71
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c86
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c137
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c50
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1036
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c162
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h44
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c140
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1416
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c58
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c13
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig45
-rw-r--r--drivers/gpu/drm/nouveau/Makefile32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c125
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c6078
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h290
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c682
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c478
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c468
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c824
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c206
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h157
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c569
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c409
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1303
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h91
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c382
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c262
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c985
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c161
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.h133
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c1080
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h455
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c269
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c70
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c702
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c572
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c1294
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h836
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c321
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c850
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c131
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c1002
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c524
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c623
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c287
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c316
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c271
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c579
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c208
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c305
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c24
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c260
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c1003
-rw-r--r--drivers/gpu/drm/nouveau/nv17_gpio.c92
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c681
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h156
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c583
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c780
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c62
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c314
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c428
-rw-r--r--drivers/gpu/drm/nouveau/nv40_grctx.c678
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c38
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c769
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c156
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c304
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1015
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c273
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c494
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c389
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c509
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mc.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c309
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h535
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c16
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c45
-rw-r--r--drivers/gpu/drm/radeon/atom.h3
-rw-r--r--drivers/gpu/drm/radeon/atombios.h201
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c59
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c790
-rw-r--r--drivers/gpu/drm/radeon/r100.c303
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h19
-rw-r--r--drivers/gpu/drm/radeon/r200.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c87
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c6
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r420.c25
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c1162
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c267
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c34
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c506
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h74
-rw-r--r--drivers/gpu/drm/radeon/r600d.h212
-rw-r--r--drivers/gpu/drm/radeon/radeon.h192
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c433
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c697
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c206
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c142
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c147
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c310
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c110
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c182
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c127
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h158
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c574
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h151
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h60
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c59
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c108
-rw-r--r--drivers/gpu/drm/radeon/rs400.c19
-rw-r--r--drivers/gpu/drm/radeon/rs600.c236
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h112
-rw-r--r--drivers/gpu/drm/radeon/rs690.c57
-rw-r--r--drivers/gpu/drm/radeon/rv515.c24
-rw-r--r--drivers/gpu/drm/radeon/rv770.c87
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c674
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c311
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile9
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h1793
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h89
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h201
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1346
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c229
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c726
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h513
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c621
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c742
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c519
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c213
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c293
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c872
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h102
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c516
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c634
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1183
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c99
-rw-r--r--drivers/hid/hid-lg.h2
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hwmon/Kconfig59
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adm1021.c11
-rw-r--r--drivers/hwmon/adm1025.c12
-rw-r--r--drivers/hwmon/adm1026.c11
-rw-r--r--drivers/hwmon/adm1029.c14
-rw-r--r--drivers/hwmon/adm1031.c9
-rw-r--r--drivers/hwmon/adm9240.c9
-rw-r--r--drivers/hwmon/ads7828.c13
-rw-r--r--drivers/hwmon/adt7462.c11
-rw-r--r--drivers/hwmon/adt7470.c11
-rw-r--r--drivers/hwmon/adt7473.c11
-rw-r--r--drivers/hwmon/adt7475.c6
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/asb100.c11
-rw-r--r--drivers/hwmon/atxp1.c11
-rw-r--r--drivers/hwmon/dme1737.c10
-rw-r--r--drivers/hwmon/ds1621.c9
-rw-r--r--drivers/hwmon/f75375s.c9
-rw-r--r--drivers/hwmon/fschmd.c9
-rw-r--r--drivers/hwmon/gl518sm.c11
-rw-r--r--drivers/hwmon/gl520sm.c13
-rw-r--r--drivers/hwmon/k10temp.c197
-rw-r--r--drivers/hwmon/lis3lv02d.c231
-rw-r--r--drivers/hwmon/lis3lv02d.h51
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c183
-rw-r--r--drivers/hwmon/lm63.c15
-rw-r--r--drivers/hwmon/lm73.c9
-rw-r--r--drivers/hwmon/lm75.c14
-rw-r--r--drivers/hwmon/lm77.c12
-rw-r--r--drivers/hwmon/lm78.c9
-rw-r--r--drivers/hwmon/lm80.c13
-rw-r--r--drivers/hwmon/lm83.c12
-rw-r--r--drivers/hwmon/lm85.c16
-rw-r--r--drivers/hwmon/lm87.c12
-rw-r--r--drivers/hwmon/lm90.c14
-rw-r--r--drivers/hwmon/lm92.c9
-rw-r--r--drivers/hwmon/lm93.c8
-rw-r--r--drivers/hwmon/lm95241.c9
-rw-r--r--drivers/hwmon/max1619.c14
-rw-r--r--drivers/hwmon/max6650.c10
-rw-r--r--drivers/hwmon/pcf8591.c5
-rw-r--r--drivers/hwmon/sht15.c6
-rw-r--r--drivers/hwmon/smsc47m1.c153
-rw-r--r--drivers/hwmon/smsc47m192.c11
-rw-r--r--drivers/hwmon/thmc50.c8
-rw-r--r--drivers/hwmon/tmp401.c9
-rw-r--r--drivers/hwmon/tmp421.c7
-rw-r--r--drivers/hwmon/via-cputemp.c356
-rw-r--r--drivers/hwmon/w83627ehf.c72
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c10
-rw-r--r--drivers/hwmon/w83791d.c9
-rw-r--r--drivers/hwmon/w83792d.c9
-rw-r--r--drivers/hwmon/w83793.c9
-rw-r--r--drivers/hwmon/w83l785ts.c14
-rw-r--r--drivers/hwmon/w83l786ng.c10
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/i2c-core.c52
-rw-r--r--drivers/ide/pmac.c92
-rw-r--r--drivers/idle/i7300_idle.c15
-rw-r--r--drivers/infiniband/core/addr.c275
-rw-r--r--drivers/infiniband/core/cma.c133
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/ucma.c57
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c9
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c14
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h9
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c75
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c67
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c10
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c25
-rw-r--r--drivers/infiniband/hw/nes/Kconfig9
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c201
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h7
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c40
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h29
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h3
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c817
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c122
-rw-r--r--drivers/input/input.c10
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5520-keys.c220
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c150
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c6
-rw-r--r--drivers/input/misc/bfin_rotary.c2
-rw-r--r--drivers/input/misc/pcf50633-input.c7
-rw-r--r--drivers/input/misc/pcspkr.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c4
-rw-r--r--drivers/input/mouse/alps.c265
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/serio/altera_ps2.c15
-rw-r--r--drivers/input/serio/ambakmi.c9
-rw-r--r--drivers/input/serio/at32psif.c3
-rw-r--r--drivers/input/serio/gscps2.c6
-rw-r--r--drivers/input/serio/hil_mlc.c8
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/input/serio/i8042.c88
-rw-r--r--drivers/input/serio/sa1111ps2.c10
-rw-r--r--drivers/input/tablet/wacom.h11
-rw-r--r--drivers/input/tablet/wacom_sys.c231
-rw-r--r--drivers/input/tablet/wacom_wac.c368
-rw-r--r--drivers/input/tablet/wacom_wac.h29
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c258
-rw-r--r--drivers/input/touchscreen/pcap_ts.c2
-rw-r--r--drivers/input/xen-kbdfront.c3
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c3
-rw-r--r--drivers/isdn/hisax/avma1_cs.c3
-rw-r--r--drivers/isdn/hisax/elsa_cs.c2
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c2
-rw-r--r--drivers/isdn/hisax/teles_cs.c2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/leds/Kconfig33
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/leds-adp5520.c230
-rw-r--r--drivers/leds/leds-alix2.c115
-rw-r--r--drivers/leds/leds-cobalt-qube.c4
-rw-r--r--drivers/leds/leds-cobalt-raq.c2
-rw-r--r--drivers/leds/leds-fsg.c7
-rw-r--r--drivers/leds/leds-lt3593.c217
-rw-r--r--drivers/leds/leds-pwm.c5
-rw-r--r--drivers/leds/leds-regulator.c242
-rw-r--r--drivers/leds/leds-ss4200.c556
-rw-r--r--drivers/leds/ledtrig-timer.c4
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/macintosh/macio_asic.c52
-rw-r--r--drivers/macintosh/mediabay.c328
-rw-r--r--drivers/macintosh/nvram.c11
-rw-r--r--drivers/macintosh/therm_adt746x.c15
-rw-r--r--drivers/macintosh/via-pmu.c160
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c2
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/bitmap.c449
-rw-r--r--drivers/md/bitmap.h19
-rw-r--r--drivers/md/dm-crypt.c207
-rw-r--r--drivers/md/dm-exception-store.c33
-rw-r--r--drivers/md/dm-exception-store.h62
-rw-r--r--drivers/md/dm-io.c120
-rw-r--r--drivers/md/dm-ioctl.c123
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-log.c77
-rw-r--r--drivers/md/dm-mpath.c95
-rw-r--r--drivers/md/dm-raid1.c219
-rw-r--r--drivers/md/dm-region-hash.c31
-rw-r--r--drivers/md/dm-snap-persistent.c195
-rw-r--r--drivers/md/dm-snap-transient.c24
-rw-r--r--drivers/md/dm-snap.c1279
-rw-r--r--drivers/md/dm-sysfs.c10
-rw-r--r--drivers/md/dm-table.c9
-rw-r--r--drivers/md/dm-uevent.c9
-rw-r--r--drivers/md/dm.c643
-rw-r--r--drivers/md/dm.h13
-rw-r--r--drivers/md/faulty.c1
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/md/md.c413
-rw-r--r--drivers/md/md.h51
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c3
-rw-r--r--drivers/md/raid1.c217
-rw-r--r--drivers/md/raid1.h5
-rw-r--r--drivers/md/raid10.c116
-rw-r--r--drivers/md/raid5.c63
-rw-r--r--drivers/md/raid6algos.c20
-rw-r--r--drivers/media/IR/Kconfig9
-rw-r--r--drivers/media/IR/Makefile5
-rw-r--r--drivers/media/IR/ir-functions.c (renamed from drivers/media/common/ir-functions.c)20
-rw-r--r--drivers/media/IR/ir-keymaps.c (renamed from drivers/media/common/ir-keymaps.c)219
-rw-r--r--drivers/media/IR/ir-keytable.c (renamed from drivers/media/common/ir-keytable.c)90
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/Makefile2
-rw-r--r--drivers/media/common/Makefile2
-rw-r--r--drivers/media/common/saa7146_fops.c60
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c14
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig8
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700.h26
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c101
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c725
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c15
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c456
-rw-r--r--drivers/media/dvb/dvb-usb/friio-fe.c10
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk-fe.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig9
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c4
-rw-r--r--drivers/media/dvb/frontends/dib0070.c674
-rw-r--r--drivers/media/dvb/frontends/dib0070.h4
-rw-r--r--drivers/media/dvb/frontends/dib0090.c1522
-rw-r--r--drivers/media/dvb/frontends/dib0090.h108
-rw-r--r--drivers/media/dvb/frontends/dib8000.c137
-rw-r--r--drivers/media/dvb/frontends/dib8000.h32
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.c15
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.h71
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c4
-rw-r--r--drivers/media/dvb/frontends/lnbp21.c28
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c13
-rw-r--r--drivers/media/dvb/frontends/stv090x.c7
-rw-r--r--drivers/media/dvb/frontends/stv090x.h2
-rw-r--r--drivers/media/dvb/siano/smsdvb.c4
-rw-r--r--drivers/media/dvb/siano/smssdio.c8
-rw-r--r--drivers/media/dvb/siano/smsusb.c18
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c52
-rw-r--r--drivers/media/radio/Kconfig7
-rw-r--r--drivers/media/radio/radio-aimslab.c4
-rw-r--r--drivers/media/radio/radio-aztech.c4
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c4
-rw-r--r--drivers/media/radio/radio-maestro.c4
-rw-r--r--drivers/media/radio/radio-maxiradio.c4
-rw-r--r--drivers/media/radio/radio-mr800.c4
-rw-r--r--drivers/media/radio/radio-rtrack2.c4
-rw-r--r--drivers/media/radio/radio-sf16fmi.c82
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c12
-rw-r--r--drivers/media/radio/radio-terratec.c4
-rw-r--r--drivers/media/radio/radio-trust.c4
-rw-r--r--drivers/media/radio/radio-typhoon.c4
-rw-r--r--drivers/media/radio/radio-zoltrix.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c98
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c219
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c97
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h5
-rw-r--r--drivers/media/video/Kconfig10
-rw-r--r--drivers/media/video/Makefile3
-rw-r--r--drivers/media/video/arv.c5
-rw-r--r--drivers/media/video/au0828/au0828-video.c36
-rw-r--r--drivers/media/video/au0828/au0828.h1
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c41
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c9
-rw-r--r--drivers/media/video/c-qcam.c4
-rw-r--r--drivers/media/video/cafe_ccic.c1
-rw-r--r--drivers/media/video/cpia.c221
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c34
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c4
-rw-r--r--drivers/media/video/cx18/cx18-streams.c20
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c23
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c26
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c10
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c58
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx23885/cimax2.c107
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c29
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c11
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c9
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c57
-rw-r--r--drivers/media/video/cx23885/cx23885.h3
-rw-r--r--drivers/media/video/cx23885/cx23888-ir.c44
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c26
-rw-r--r--drivers/media/video/cx88/cx88-input.c9
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c15
-rw-r--r--drivers/media/video/cx88/cx88-video.c68
-rw-r--r--drivers/media/video/cx88/cx88.h2
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c8
-rw-r--r--drivers/media/video/davinci/vpif.c2
-rw-r--r--drivers/media/video/davinci/vpif_capture.c2
-rw-r--r--drivers/media/video/davinci/vpif_display.c1
-rw-r--r--drivers/media/video/davinci/vpss.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c7
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c30
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c68
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c59
-rw-r--r--drivers/media/video/em28xx/em28xx.h5
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c45
-rw-r--r--drivers/media/video/gspca/conex.c4
-rw-r--r--drivers/media/video/gspca/etoms.c4
-rw-r--r--drivers/media/video/gspca/gl860/gl860-mi1320.c2
-rw-r--r--drivers/media/video/gspca/gl860/gl860-mi2020.c2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c14
-rw-r--r--drivers/media/video/gspca/gspca.c67
-rw-r--r--drivers/media/video/gspca/gspca.h10
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c4
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.c2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c10
-rw-r--r--drivers/media/video/gspca/mr97310a.c2
-rw-r--r--drivers/media/video/gspca/ov519.c14
-rw-r--r--drivers/media/video/gspca/pac7302.c25
-rw-r--r--drivers/media/video/gspca/pac7311.c4
-rw-r--r--drivers/media/video/gspca/sn9c20x.c12
-rw-r--r--drivers/media/video/gspca/sonixb.c4
-rw-r--r--drivers/media/video/gspca/spca506.c4
-rw-r--r--drivers/media/video/gspca/stk014.c106
-rw-r--r--drivers/media/video/gspca/sunplus.c237
-rw-r--r--drivers/media/video/gspca/zc3xx.c36
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c16
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c19
-rw-r--r--drivers/media/video/meye.c61
-rw-r--r--drivers/media/video/meye.h4
-rw-r--r--drivers/media/video/mt9m001.c205
-rw-r--r--drivers/media/video/mt9m111.c189
-rw-r--r--drivers/media/video/mt9t031.c281
-rw-r--r--drivers/media/video/mt9t112.c1177
-rw-r--r--drivers/media/video/mt9v022.c228
-rw-r--r--drivers/media/video/mx1_camera.c126
-rw-r--r--drivers/media/video/mx3_camera.c296
-rw-r--r--drivers/media/video/omap24xxcam.c10
-rw-r--r--drivers/media/video/ov511.c9
-rw-r--r--drivers/media/video/ov772x.c233
-rw-r--r--drivers/media/video/ov9640.c107
-rw-r--r--drivers/media/video/pms.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c21
-rw-r--r--drivers/media/video/pwc/pwc-if.c5
-rw-r--r--drivers/media/video/pxa_camera.c335
-rw-r--r--drivers/media/video/rj54n1cb0c.c474
-rw-r--r--drivers/media/video/s2255drv.c55
-rw-r--r--drivers/media/video/saa5246a.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c22
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c19
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c20
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c71
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c39
-rw-r--r--drivers/media/video/se401.c4
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c522
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c47
-rw-r--r--drivers/media/video/soc_camera.c106
-rw-r--r--drivers/media/video/soc_camera_platform.c40
-rw-r--r--drivers/media/video/soc_mediabus.c157
-rw-r--r--drivers/media/video/stk-webcam.c9
-rw-r--r--drivers/media/video/stradis.c4
-rw-r--r--drivers/media/video/stv680.c5
-rw-r--r--drivers/media/video/tw9910.c337
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c4
-rw-r--r--drivers/media/video/usbvideo/vicam.c5
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c4
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c34
-rw-r--r--drivers/media/video/uvc/uvc_driver.c1
-rw-r--r--drivers/media/video/uvc/uvc_video.c2
-rw-r--r--drivers/media/video/v4l2-common.c47
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c6
-rw-r--r--drivers/media/video/v4l2-dev.c22
-rw-r--r--drivers/media/video/v4l2-ioctl.c147
-rw-r--r--drivers/media/video/videobuf-dma-contig.c6
-rw-r--r--drivers/media/video/vino.c1
-rw-r--r--drivers/media/video/vivi.c22
-rw-r--r--drivers/media/video/w9968cf.c34
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c44
-rw-r--r--drivers/media/video/zoran/zoran_driver.c1
-rw-r--r--drivers/media/video/zr364xx.c5
-rw-r--r--drivers/message/fusion/mptbase.c6
-rw-r--r--drivers/mfd/88pm8607.c302
-rw-r--r--drivers/mfd/Kconfig41
-rw-r--r--drivers/mfd/Makefile6
-rw-r--r--drivers/mfd/ab3100-core.c3
-rw-r--r--drivers/mfd/ab4500-core.c208
-rw-r--r--drivers/mfd/adp5520.c379
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/mfd/ezx-pcap.c1
-rw-r--r--drivers/mfd/mc13783-core.c757
-rw-r--r--drivers/mfd/pcf50633-adc.c5
-rw-r--r--drivers/mfd/pcf50633-core.c76
-rw-r--r--drivers/mfd/tps65010.c30
-rw-r--r--drivers/mfd/twl-core.c (renamed from drivers/mfd/twl4030-core.c)385
-rw-r--r--drivers/mfd/twl4030-codec.c10
-rw-r--r--drivers/mfd/twl4030-irq.c158
-rw-r--r--drivers/mfd/twl4030-power.c126
-rw-r--r--drivers/mfd/twl6030-irq.c299
-rw-r--r--drivers/mfd/wm831x-core.c232
-rw-r--r--drivers/mfd/wm831x-irq.c209
-rw-r--r--drivers/mfd/wm8350-core.c771
-rw-r--r--drivers/mfd/wm8350-irq.c529
-rw-r--r--drivers/mfd/wm8350-regmap.c8
-rw-r--r--drivers/misc/Kconfig62
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ad525x_dpot.c666
-rw-r--r--drivers/misc/cs5535-mfgpt.c370
-rw-r--r--drivers/misc/eeprom/eeprom.c8
-rw-r--r--drivers/misc/enclosure.c1
-rw-r--r--drivers/misc/hpilo.h13
-rw-r--r--drivers/misc/ics932s401.c11
-rw-r--r--drivers/misc/ioc4.c16
-rw-r--r--drivers/misc/kgdbts.c14
-rw-r--r--drivers/misc/sgi-gru/gru.h11
-rw-r--r--drivers/misc/sgi-gru/gru_instructions.h144
-rw-r--r--drivers/misc/sgi-gru/grufault.c311
-rw-r--r--drivers/misc/sgi-gru/grufile.c290
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c70
-rw-r--r--drivers/misc/sgi-gru/gruhandles.h37
-rw-r--r--drivers/misc/sgi-gru/grukdump.c13
-rw-r--r--drivers/misc/sgi-gru/grukservices.c211
-rw-r--r--drivers/misc/sgi-gru/grukservices.h14
-rw-r--r--drivers/misc/sgi-gru/grulib.h21
-rw-r--r--drivers/misc/sgi-gru/grumain.c228
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c42
-rw-r--r--drivers/misc/sgi-gru/grutables.h75
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c14
-rw-r--r--drivers/misc/sgi-xp/xp.h1
-rw-r--r--drivers/misc/sgi-xp/xp_main.c3
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c10
-rw-r--r--drivers/misc/sgi-xp/xp_uv.c33
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c13
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c46
-rw-r--r--drivers/misc/ti_dac7512.c101
-rw-r--r--drivers/mmc/card/sdio_uart.c303
-rw-r--r--drivers/mmc/core/Kconfig4
-rw-r--r--drivers/mmc/core/core.c16
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c23
-rw-r--r--drivers/mmc/core/sd.c21
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/core/sdio_bus.c7
-rw-r--r--drivers/mmc/core/sdio_cis.c167
-rw-r--r--drivers/mmc/host/Kconfig64
-rw-r--r--drivers/mmc/host/Makefile8
-rw-r--r--drivers/mmc/host/atmel-mci.c141
-rw-r--r--drivers/mmc/host/bfin_sdh.c639
-rw-r--r--drivers/mmc/host/davinci_mmc.c1349
-rw-r--r--drivers/mmc/host/msm_sdcc.c5
-rw-r--r--drivers/mmc/host/mxcmmc.c10
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/omap.c10
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c13
-rw-r--r--drivers/mmc/host/sdhci-of-core.c (renamed from drivers/mmc/host/sdhci-of.c)143
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c143
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c65
-rw-r--r--drivers/mmc/host/sdhci-of.h42
-rw-r--r--drivers/mmc/host/sdhci-pci.c75
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c35
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c17
-rwxr-xr-xdrivers/mtd/chips/cfi_util.c7
-rw-r--r--drivers/mtd/chips/jedec_probe.c8
-rw-r--r--drivers/mtd/devices/m25p80.c334
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c19
-rw-r--r--drivers/mtd/maps/Kconfig6
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c460
-rw-r--r--drivers/mtd/maps/ixp4xx.c6
-rw-r--r--drivers/mtd/maps/physmap.c21
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c13
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/vmu-flash.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c5
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdoops.c389
-rw-r--r--drivers/mtd/nand/Kconfig24
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/alauda.c11
-rw-r--r--drivers/mtd/nand/atmel_nand.c5
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c213
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c581
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/excite_nandflash.c248
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c86
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/mxc_nand.c783
-rw-r--r--drivers/mtd/nand/nand_base.c141
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.c149
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h358
-rw-r--r--drivers/mtd/nand/nand_ecc.c25
-rw-r--r--drivers/mtd/nand/nandsim.c7
-rw-r--r--drivers/mtd/nand/nomadik_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c50
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c3
-rw-r--r--drivers/mtd/onenand/omap2.c22
-rw-r--r--drivers/mtd/onenand/onenand_base.c745
-rw-r--r--drivers/mtd/tests/Makefile1
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c87
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c18
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c1
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c2
-rw-r--r--drivers/net/atl1c/atl1c.h17
-rw-r--r--drivers/net/atl1c/atl1c_main.c94
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/bcm63xx_enet.c12
-rw-r--r--drivers/net/benet/be_main.c4
-rw-r--r--drivers/net/bnx2.c12
-rw-r--r--drivers/net/bonding/bond_3ad.c171
-rw-r--r--drivers/net/bonding/bond_alb.c38
-rw-r--r--drivers/net/bonding/bond_ipv6.c12
-rw-r--r--drivers/net/bonding/bond_main.c607
-rw-r--r--drivers/net/bonding/bond_sysfs.c327
-rw-r--r--drivers/net/can/Kconfig11
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c783
-rw-r--r--drivers/net/can/mcp251x.c13
-rw-r--r--drivers/net/can/mscan/mscan.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c18
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/chelsio/sge.c5
-rw-r--r--drivers/net/cnic.c11
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/es2lan.c97
-rw-r--r--drivers/net/e1000e/hw.h7
-rw-r--r--drivers/net/e1000e/ich8lan.c8
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/ehea/ehea_hcall.h51
-rw-r--r--drivers/net/ehea/ehea_phyp.h1
-rw-r--r--drivers/net/fec_mpc52xx.c121
-rw-r--r--drivers/net/gianfar.c51
-rw-r--r--drivers/net/gianfar.h18
-rw-r--r--drivers/net/igb/igb_main.c22
-rw-r--r--drivers/net/igbvf/igbvf.h1
-rw-r--r--drivers/net/igbvf/netdev.c8
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c38
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/mlx4/alloc.c37
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/sense.c2
-rw-r--r--drivers/net/mv643xx_eth.c3
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/netxen/netxen_nic_init.c14
-rw-r--r--drivers/net/netxen/netxen_nic_main.c106
-rw-r--r--drivers/net/octeon/Kconfig10
-rw-r--r--drivers/net/octeon/Makefile2
-rw-r--r--drivers/net/octeon/octeon_mgmt.c1176
-rw-r--r--drivers/net/pcmcia/3c574_cs.c4
-rw-r--r--drivers/net/pcmcia/3c589_cs.c4
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2
-rw-r--r--drivers/net/phy/Kconfig11
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/broadcom.c3
-rw-r--r--drivers/net/phy/mdio-octeon.c180
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sh_eth.c7
-rw-r--r--drivers/net/sky2.c16
-rw-r--r--drivers/net/smc91x.c8
-rw-r--r--drivers/net/smc91x.h15
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/usb/kaweth.c4
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wimax/i2400m/usb.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c2
-rw-r--r--drivers/net/wireless/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/libertas/dev.h4
-rw-r--r--drivers/net/wireless/libertas/main.c21
-rw-r--r--drivers/net/wireless/mwl8k.c327
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c12
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c68
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_rfkill.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c36
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/of/platform.c305
-rw-r--r--drivers/oprofile/cpu_buffer.c19
-rw-r--r--drivers/oprofile/cpu_buffer.h4
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/gsc.c2
-rw-r--r--drivers/parisc/iosapic.c2
-rw-r--r--drivers/parisc/led.c59
-rw-r--r--drivers/parisc/pdc_stable.c9
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/pci/Kconfig15
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/dmar.c117
-rw-r--r--drivers/pci/hotplug/Makefile12
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c5
-rw-r--r--drivers/pci/hotplug/acpiphp.h6
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c248
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c3
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c22
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c119
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c57
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c155
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c4
-rw-r--r--drivers/pci/intel-iommu.c88
-rw-r--r--drivers/pci/intr_remapping.c7
-rw-r--r--drivers/pci/ioapic.c127
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-sysfs.c25
-rw-r--r--drivers/pci/pci.c154
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c58
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c70
-rw-r--r--drivers/pci/pcie/aer/ecrc.c4
-rw-r--r--drivers/pci/pcie/aspm.c41
-rw-r--r--drivers/pci/pcie/portdrv.h21
-rw-r--r--drivers/pci/pcie/portdrv_bus.c7
-rw-r--r--drivers/pci/pcie/portdrv_core.c239
-rw-r--r--drivers/pci/pcie/portdrv_pci.c16
-rw-r--r--drivers/pci/probe.c83
-rw-r--r--drivers/pci/quirks.c40
-rw-r--r--drivers/pci/search.c38
-rw-r--r--drivers/pci/setup-bus.c112
-rw-r--r--drivers/pci/setup-res.c68
-rw-r--r--drivers/pcmcia/Kconfig8
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/cardbus.c46
-rw-r--r--drivers/pcmcia/cistpl.c181
-rw-r--r--drivers/pcmcia/cs.c12
-rw-r--r--drivers/pcmcia/ds.c66
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c45
-rw-r--r--drivers/pcmcia/pcmcia_resource.c37
-rw-r--r--drivers/pcmcia/pxa2xx_base.c27
-rw-r--r--drivers/pcmcia/pxa2xx_base.h3
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c2
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c2
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c119
-rw-r--r--drivers/pcmcia/rsrc_mgr.c14
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c315
-rw-r--r--drivers/pcmcia/socket_sysfs.c6
-rw-r--r--drivers/pcmcia/yenta_socket.c149
-rw-r--r--drivers/platform/x86/Kconfig29
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/acerhdf.c65
-rw-r--r--drivers/platform/x86/asus-laptop.c25
-rw-r--r--drivers/platform/x86/asus_acpi.c19
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c86
-rw-r--r--drivers/platform/x86/dell-wmi.c129
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1415
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c30
-rw-r--r--drivers/platform/x86/hp-wmi.c141
-rw-r--r--drivers/platform/x86/msi-wmi.c293
-rw-r--r--drivers/platform/x86/sony-laptop.c56
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1185
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c144
-rw-r--r--drivers/platform/x86/wmi.c175
-rw-r--r--drivers/pnp/interface.c36
-rw-r--r--drivers/pnp/pnpacpi/core.c20
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c9
-rw-r--r--drivers/pnp/pnpbios/proc.c204
-rw-r--r--drivers/pnp/quirks.c13
-rw-r--r--drivers/pnp/resource.c10
-rw-r--r--drivers/pnp/support.c43
-rw-r--r--drivers/pnp/system.c14
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/pcf50633-charger.c231
-rw-r--r--drivers/power/power_supply_sysfs.c5
-rw-r--r--drivers/power/wm831x_backup.c233
-rw-r--r--drivers/power/wm831x_power.c144
-rw-r--r--drivers/power/wm8350_power.c63
-rw-r--r--drivers/power/wm97xx_battery.c2
-rw-r--r--drivers/regulator/88pm8607.c685
-rw-r--r--drivers/regulator/Kconfig15
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/ab3100.c33
-rw-r--r--drivers/regulator/core.c248
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/regulator/max8660.c510
-rw-r--r--drivers/regulator/mc13783-regulator.c245
-rw-r--r--drivers/regulator/mc13783.c410
-rw-r--r--drivers/regulator/pcf50633-regulator.c5
-rw-r--r--drivers/regulator/twl-regulator.c (renamed from drivers/regulator/twl4030-regulator.c)346
-rw-r--r--drivers/regulator/wm831x-dcdc.c207
-rw-r--r--drivers/regulator/wm831x-ldo.c2
-rw-r--r--drivers/regulator/wm8350-regulator.c10
-rw-r--r--drivers/rtc/Kconfig34
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/rtc-at32ap700x.c4
-rw-r--r--drivers/rtc/rtc-bq32k.c204
-rw-r--r--drivers/rtc/rtc-bq4802.c3
-rw-r--r--drivers/rtc/rtc-cmos.c81
-rw-r--r--drivers/rtc/rtc-ds1302.c1
-rw-r--r--drivers/rtc/rtc-ds1305.c16
-rw-r--r--drivers/rtc/rtc-ds1307.c4
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c148
-rw-r--r--drivers/rtc/rtc-ds1553.c149
-rw-r--r--drivers/rtc/rtc-ds1742.c59
-rw-r--r--drivers/rtc/rtc-m48t35.c16
-rw-r--r--drivers/rtc/rtc-m48t59.c11
-rw-r--r--drivers/rtc/rtc-mc13783.c262
-rw-r--r--drivers/rtc/rtc-mv.c157
-rw-r--r--drivers/rtc/rtc-nuc900.c342
-rw-r--r--drivers/rtc/rtc-omap.c47
-rw-r--r--drivers/rtc/rtc-pcf50633.c10
-rw-r--r--drivers/rtc/rtc-pcf8563.c4
-rw-r--r--drivers/rtc/rtc-pcf8583.c3
-rw-r--r--drivers/rtc/rtc-pl031.c23
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-stk17ta8.c124
-rw-r--r--drivers/rtc/rtc-twl.c (renamed from drivers/rtc/rtc-twl4030.c)284
-rw-r--r--drivers/rtc/rtc-tx4939.c51
-rw-r--r--drivers/rtc/rtc-v3020.c8
-rw-r--r--drivers/rtc/rtc-vr41xx.c4
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-wm8350.c39
-rw-r--r--drivers/rtc/rtc-x1205.c53
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_diag.c42
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/monwriter.c2
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c3
-rw-r--r--drivers/s390/char/tape_block.c1
-rw-r--r--drivers/s390/char/tape_char.c3
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/ccwreq.c3
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/device.c3
-rw-r--r--drivers/s390/cio/device_pgid.c29
-rw-r--r--drivers/s390/cio/fcx.c4
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/qdio_main.c3
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/s390/cio/qdio_perf.h1
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/net/netiucv.c10
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h1
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c16
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c24
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_pdu.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c137
-rw-r--r--drivers/scsi/hpsa.c3531
-rw-r--r--drivers/scsi/hpsa.h273
-rw-r--r--drivers/scsi/hpsa_cmd.h326
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/libfc/fc_fcp.c65
-rw-r--r--drivers/scsi/libfc/fc_lport.c7
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/libiscsi_tcp.c36
-rw-r--r--drivers/scsi/libsrp.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c88
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.h10
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c149
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h3
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c57
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h32
-rw-r--r--drivers/scsi/pmcraid.c34
-rw-r--r--drivers/scsi/pmcraid.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c103
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c75
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c17
-rw-r--r--drivers/scsi/sd.c107
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/st.c23
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/serial/8250.c24
-rw-r--r--drivers/serial/ioc3_serial.c4
-rw-r--r--drivers/serial/ioc4_serial.c20
-rw-r--r--drivers/serial/jsm/jsm.h8
-rw-r--r--drivers/serial/jsm/jsm_driver.c48
-rw-r--r--drivers/serial/jsm/jsm_neo.c8
-rw-r--r--drivers/serial/jsm/jsm_tty.c6
-rw-r--r--drivers/serial/pxa.c15
-rw-r--r--drivers/serial/serial_core.c33
-rw-r--r--drivers/serial/sh-sci.c56
-rw-r--r--drivers/serial/ucc_uart.c8
-rw-r--r--drivers/sh/intc.c2
-rw-r--r--drivers/sh/pfc.c2
-rw-r--r--drivers/sn/ioc3.c17
-rw-r--r--drivers/spi/Kconfig79
-rw-r--r--drivers/spi/Makefile16
-rw-r--r--drivers/spi/atmel_spi.c6
-rw-r--r--drivers/spi/au1550_spi.c10
-rw-r--r--drivers/spi/dw_spi.c944
-rw-r--r--drivers/spi/dw_spi_pci.c169
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c25
-rw-r--r--drivers/spi/mpc52xx_spi.c578
-rw-r--r--drivers/spi/omap_spi_100k.c635
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/spi_bfin5xx.c2
-rw-r--r--drivers/spi/spi_imx.c35
-rw-r--r--drivers/spi/spi_mpc8xxx.c622
-rw-r--r--drivers/spi/spi_nuc900.c504
-rw-r--r--drivers/spi/spi_s3c24xx.c246
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.S116
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.h26
-rw-r--r--drivers/spi/spi_s3c64xx.c1196
-rw-r--r--drivers/spi/spi_sh_msiof.c691
-rw-r--r--drivers/spi/spi_sh_sci.c2
-rw-r--r--drivers/spi/spi_txx9.c6
-rw-r--r--drivers/spi/spidev.c24
-rw-r--r--drivers/spi/xilinx_spi.c358
-rw-r--r--drivers/spi/xilinx_spi.h32
-rw-r--r--drivers/spi/xilinx_spi_of.c134
-rw-r--r--drivers/spi/xilinx_spi_pltfm.c102
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c5
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c5
-rw-r--r--drivers/staging/cx25821/cx25821-audups11.c33
-rw-r--r--drivers/staging/cx25821/cx25821-video.c6
-rw-r--r--drivers/staging/cx25821/cx25821-video0.c33
-rw-r--r--drivers/staging/cx25821/cx25821-video1.c33
-rw-r--r--drivers/staging/cx25821/cx25821-video2.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video3.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video4.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video5.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video6.c34
-rw-r--r--drivers/staging/cx25821/cx25821-video7.c34
-rw-r--r--drivers/staging/cx25821/cx25821-videoioctl.c32
-rw-r--r--drivers/staging/cx25821/cx25821-vidups10.c33
-rw-r--r--drivers/staging/cx25821/cx25821-vidups9.c33
-rw-r--r--drivers/staging/dst/dcore.c46
-rw-r--r--drivers/staging/go7007/go7007-v4l2.c5
-rw-r--r--drivers/staging/iio/ring_sw.h1
-rw-r--r--drivers/staging/octeon/Kconfig3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c204
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h2
-rw-r--r--drivers/staging/octeon/ethernet-proc.c112
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c52
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c2
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c2
-rw-r--r--drivers/staging/octeon/ethernet.c23
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h6
-rw-r--r--drivers/staging/panel/panel.c2
-rw-r--r--drivers/staging/pohmelfs/dir.c2
-rw-r--r--drivers/thermal/thermal_sys.c19
-rw-r--r--drivers/uio/uio_pdrv_genirq.c2
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/ueagle-atm.c7
-rw-r--r--drivers/usb/class/cdc-acm.c58
-rw-r--r--drivers/usb/class/usbtmc.c54
-rw-r--r--drivers/usb/core/devio.c110
-rw-r--r--drivers/usb/core/driver.c135
-rw-r--r--drivers/usb/core/file.c1
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c82
-rw-r--r--drivers/usb/core/hcd.h21
-rw-r--r--drivers/usb/core/hub.c142
-rw-r--r--drivers/usb/core/message.c82
-rw-r--r--drivers/usb/core/sysfs.c61
-rw-r--r--drivers/usb/core/urb.c22
-rw-r--r--drivers/usb/core/usb.c72
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/gadget/Kconfig60
-rw-r--r--drivers/usb/gadget/Makefile4
-rw-r--r--drivers/usb/gadget/at91_udc.c6
-rw-r--r--drivers/usb/gadget/audio.c115
-rw-r--r--drivers/usb/gadget/composite.c59
-rw-r--r--drivers/usb/gadget/ether.c16
-rw-r--r--drivers/usb/gadget/f_acm.c28
-rw-r--r--drivers/usb/gadget/f_audio.c76
-rw-r--r--drivers/usb/gadget/f_mass_storage.c3091
-rw-r--r--drivers/usb/gadget/f_rndis.c35
-rw-r--r--drivers/usb/gadget/file_storage.c881
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.h15
-rw-r--r--drivers/usb/gadget/mass_storage.c240
-rw-r--r--drivers/usb/gadget/multi.c358
-rw-r--r--drivers/usb/gadget/storage_common.c778
-rw-r--r--drivers/usb/gadget/u_ether.h2
-rw-r--r--drivers/usb/host/Kconfig22
-rw-r--r--drivers/usb/host/ehci-au1xxx.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c19
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/ehci-mxc.c296
-rw-r--r--drivers/usb/host/ehci-omap.c756
-rw-r--r--drivers/usb/host/ehci-q.c32
-rw-r--r--drivers/usb/host/ehci-sched.c36
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c300
-rw-r--r--drivers/usb/host/fhci-sched.c10
-rw-r--r--drivers/usb/host/fhci-tds.c35
-rw-r--r--drivers/usb/host/fhci.h16
-rw-r--r--drivers/usb/host/isp1362-hcd.c26
-rw-r--r--drivers/usb/host/isp1362.h4
-rw-r--r--drivers/usb/host/ohci-at91.c10
-rw-r--r--drivers/usb/host/ohci-au1xxx.c2
-rw-r--r--drivers/usb/host/ohci-pnx4008.c8
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c6
-rw-r--r--drivers/usb/host/whci/debug.c20
-rw-r--r--drivers/usb/host/whci/hcd.c1
-rw-r--r--drivers/usb/host/whci/qset.c350
-rw-r--r--drivers/usb/host/whci/whcd.h9
-rw-r--r--drivers/usb/host/whci/whci-hc.h14
-rw-r--r--drivers/usb/host/xhci-hcd.c139
-rw-r--r--drivers/usb/host/xhci-mem.c255
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c343
-rw-r--r--drivers/usb/host/xhci.h37
-rw-r--r--drivers/usb/misc/usbtest.c7
-rw-r--r--drivers/usb/mon/mon_bin.c51
-rw-r--r--drivers/usb/mon/mon_text.c23
-rw-r--r--drivers/usb/musb/Kconfig5
-rw-r--r--drivers/usb/musb/blackfin.c16
-rw-r--r--drivers/usb/musb/blackfin.h37
-rw-r--r--drivers/usb/musb/musb_core.c12
-rw-r--r--drivers/usb/musb/musb_core.h8
-rw-r--r--drivers/usb/musb/musb_dma.h11
-rw-r--r--drivers/usb/musb/musb_gadget.c196
-rw-r--r--drivers/usb/musb/musb_gadget.h4
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c70
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musb_regs.h4
-rw-r--r--drivers/usb/musb/musbhsdma.c12
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/otg/Kconfig9
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/twl4030-usb.c42
-rw-r--r--drivers/usb/otg/ulpi.c136
-rw-r--r--drivers/usb/serial/ark3116.c975
-rw-r--r--drivers/usb/serial/ftdi_sio.c32
-rw-r--r--drivers/usb/serial/ftdi_sio.h14
-rw-r--r--drivers/usb/serial/generic.c12
-rw-r--r--drivers/usb/serial/mos7840.c24
-rw-r--r--drivers/usb/serial/opticon.c7
-rw-r--r--drivers/usb/serial/option.c39
-rw-r--r--drivers/usb/serial/sierra.c91
-rw-r--r--drivers/usb/serial/usb-serial.c90
-rw-r--r--drivers/usb/storage/scsiglue.c3
-rw-r--r--drivers/usb/storage/transport.c17
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/usb.c43
-rw-r--r--drivers/usb/storage/usb.h1
-rw-r--r--drivers/usb/usb-skeleton.c4
-rw-r--r--drivers/usb/wusbcore/devconnect.c7
-rw-r--r--drivers/usb/wusbcore/security.c6
-rw-r--r--drivers/usb/wusbcore/wusbhc.c32
-rw-r--r--drivers/usb/wusbcore/wusbhc.h1
-rw-r--r--drivers/video/Kconfig18
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/backlight/adp5520_bl.c125
-rw-r--r--drivers/video/backlight/adx_bl.c2
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c2
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/cr_bllcd.c4
-rw-r--r--drivers/video/backlight/da903x_bl.c4
-rw-r--r--drivers/video/backlight/generic_bl.c2
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/jornada720_bl.c2
-rw-r--r--drivers/video/backlight/kb3886_bl.c2
-rw-r--r--drivers/video/backlight/lcd.c4
-rw-r--r--drivers/video/backlight/locomolcd.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c20
-rw-r--r--drivers/video/backlight/omap1_bl.c2
-rw-r--r--drivers/video/backlight/progear_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c11
-rw-r--r--drivers/video/backlight/tosa_bl.c2
-rw-r--r--drivers/video/backlight/wm831x_bl.c2
-rw-r--r--drivers/video/bfin-lq035q1-fb.c826
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c32
-rw-r--r--drivers/video/clps711xfb.c50
-rw-r--r--drivers/video/da8xx-fb.c175
-rw-r--r--drivers/video/display/display-sysfs.c2
-rw-r--r--drivers/video/ep93xx-fb.c2
-rw-r--r--drivers/video/geode/display_gx.c4
-rw-r--r--drivers/video/geode/gxfb.h2
-rw-r--r--drivers/video/geode/gxfb_core.c2
-rw-r--r--drivers/video/geode/lxfb.h12
-rw-r--r--drivers/video/geode/lxfb_ops.c4
-rw-r--r--drivers/video/geode/suspend_gx.c2
-rw-r--r--drivers/video/geode/video_gx.c2
-rw-r--r--drivers/video/hitfb.c2
-rw-r--r--drivers/video/i810/i810_dvt.c53
-rw-r--r--drivers/video/intelfb/intelfbdrv.c3
-rw-r--r--drivers/video/intelfb/intelfbhw.c47
-rw-r--r--drivers/video/intelfb/intelfbhw.h1
-rw-r--r--drivers/video/matrox/g450_pll.c3
-rw-r--r--drivers/video/maxinefb.c3
-rw-r--r--drivers/video/mb862xx/Makefile2
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c14
-rw-r--r--drivers/video/mb862xx/mb862xxfb.h2
-rw-r--r--drivers/video/mb862xx/mb862xxfb_accel.c331
-rw-r--r--drivers/video/mb862xx/mb862xxfb_accel.h203
-rw-r--r--drivers/video/modedb.c24
-rw-r--r--drivers/video/offb.c15
-rw-r--r--drivers/video/omap/Kconfig5
-rw-r--r--drivers/video/omap/blizzard.c2
-rw-r--r--drivers/video/omap/dispc.c21
-rw-r--r--drivers/video/omap/hwa742.c3
-rw-r--r--drivers/video/omap/lcd_2430sdp.c7
-rw-r--r--drivers/video/omap/lcd_ams_delta.c3
-rw-r--r--drivers/video/omap/lcd_apollon.c3
-rw-r--r--drivers/video/omap/lcd_h3.c2
-rw-r--r--drivers/video/omap/lcd_h4.c2
-rw-r--r--drivers/video/omap/lcd_htcherald.c2
-rw-r--r--drivers/video/omap/lcd_inn1510.c2
-rw-r--r--drivers/video/omap/lcd_inn1610.c2
-rw-r--r--drivers/video/omap/lcd_ldp.c7
-rw-r--r--drivers/video/omap/lcd_mipid.c3
-rw-r--r--drivers/video/omap/lcd_omap2evm.c13
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c6
-rw-r--r--drivers/video/omap/lcd_omap3evm.c13
-rw-r--r--drivers/video/omap/lcd_osk.c2
-rw-r--r--drivers/video/omap/lcd_overo.c5
-rw-r--r--drivers/video/omap/lcd_palmte.c2
-rw-r--r--drivers/video/omap/lcd_palmtt.c2
-rw-r--r--drivers/video/omap/lcd_palmz71.c2
-rw-r--r--drivers/video/omap/lcdc.c36
-rw-r--r--drivers/video/omap/omapfb.h (renamed from arch/arm/plat-omap/include/plat/omapfb.h)191
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/omap/rfbi.c3
-rw-r--r--drivers/video/omap/sossi.c3
-rw-r--r--drivers/video/omap2/Kconfig9
-rw-r--r--drivers/video/omap2/Makefile6
-rw-r--r--drivers/video/omap2/displays/Kconfig22
-rw-r--r--drivers/video/omap2/displays/Makefile4
-rw-r--r--drivers/video/omap2/displays/panel-generic.c104
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c153
-rw-r--r--drivers/video/omap2/displays/panel-taal.c1003
-rw-r--r--drivers/video/omap2/dss/Kconfig89
-rw-r--r--drivers/video/omap2/dss/Makefile6
-rw-r--r--drivers/video/omap2/dss/core.c919
-rw-r--r--drivers/video/omap2/dss/dispc.c3091
-rw-r--r--drivers/video/omap2/dss/display.c671
-rw-r--r--drivers/video/omap2/dss/dpi.c399
-rw-r--r--drivers/video/omap2/dss/dsi.c3710
-rw-r--r--drivers/video/omap2/dss/dss.c596
-rw-r--r--drivers/video/omap2/dss/dss.h370
-rw-r--r--drivers/video/omap2/dss/manager.c1487
-rw-r--r--drivers/video/omap2/dss/overlay.c680
-rw-r--r--drivers/video/omap2/dss/rfbi.c1309
-rw-r--r--drivers/video/omap2/dss/sdi.c277
-rw-r--r--drivers/video/omap2/dss/venc.c797
-rw-r--r--drivers/video/omap2/omapfb/Kconfig37
-rw-r--r--drivers/video/omap2/omapfb/Makefile2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c755
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c2261
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c507
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h146
-rw-r--r--drivers/video/omap2/vram.c655
-rw-r--r--drivers/video/omap2/vrfb.c315
-rw-r--r--drivers/video/output.c2
-rw-r--r--drivers/video/pmag-ba-fb.c3
-rw-r--r--drivers/video/pmagb-b-fb.c3
-rw-r--r--drivers/video/pxafb.c7
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c12
-rw-r--r--drivers/video/sis/sis_main.c2
-rw-r--r--drivers/video/sm501fb.c249
-rw-r--r--drivers/video/via/lcd.c40
-rw-r--r--drivers/video/via/viafbdev.c6
-rw-r--r--drivers/video/xen-fbfront.c3
-rw-r--r--drivers/watchdog/Kconfig16
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/adx_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c40
-rw-r--r--drivers/watchdog/mpc5200_wdt.c293
-rw-r--r--drivers/watchdog/rm9k_wdt.c419
-rw-r--r--drivers/watchdog/twl4030_wdt.c4
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/cpu_hotplug.c1
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/grant-table.c1
-rw-r--r--drivers/xen/sys-hypervisor.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c2
-rw-r--r--drivers/xen/xenfs/super.c2
-rw-r--r--fs/afs/write.c8
-rw-r--r--fs/aio.c40
-rw-r--r--fs/anon_inodes.c35
-rw-r--r--fs/autofs4/autofs_i.h38
-rw-r--r--fs/autofs4/expire.c8
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/root.c616
-rw-r--r--fs/binfmt_aout.c13
-rw-r--r--fs/binfmt_elf.c35
-rw-r--r--fs/binfmt_elf_fdpic.c40
-rw-r--r--fs/binfmt_flat.c6
-rw-r--r--fs/binfmt_som.c2
-rw-r--r--fs/btrfs/acl.c68
-rw-r--r--fs/btrfs/btrfs_inode.h5
-rw-r--r--fs/btrfs/ctree.c229
-rw-r--r--fs/btrfs/ctree.h40
-rw-r--r--fs/btrfs/dir-item.c19
-rw-r--r--fs/btrfs/disk-io.c27
-rw-r--r--fs/btrfs/extent-tree.c72
-rw-r--r--fs/btrfs/file.c673
-rw-r--r--fs/btrfs/inode.c567
-rw-r--r--fs/btrfs/ioctl.c34
-rw-r--r--fs/btrfs/ordered-data.c115
-rw-r--r--fs/btrfs/ordered-data.h5
-rw-r--r--fs/btrfs/relocation.c38
-rw-r--r--fs/btrfs/super.c15
-rw-r--r--fs/btrfs/transaction.c44
-rw-r--r--fs/btrfs/transaction.h6
-rw-r--r--fs/btrfs/tree-log.c86
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/xattr.c80
-rw-r--r--fs/btrfs/xattr.h9
-rw-r--r--fs/cachefiles/bind.c11
-rw-r--r--fs/cachefiles/daemon.c4
-rw-r--r--fs/cachefiles/rdwr.c2
-rw-r--r--fs/cifs/dir.c3
-rw-r--r--fs/cifs/export.c2
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/compat.c2
-rw-r--r--fs/compat_ioctl.c783
-rw-r--r--fs/dcache.c1
-rw-r--r--fs/debugfs/inode.c55
-rw-r--r--fs/devpts/inode.c16
-rw-r--r--fs/direct-io.c165
-rw-r--r--fs/ecryptfs/dentry.c2
-rw-r--r--fs/ecryptfs/inode.c6
-rw-r--r--fs/ecryptfs/main.c9
-rw-r--r--fs/eventfd.c2
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exec.c49
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/ext2/acl.c79
-rw-r--r--fs/ext2/dir.c6
-rw-r--r--fs/ext2/ext2.h3
-rw-r--r--fs/ext2/file.c21
-rw-r--r--fs/ext2/inode.c6
-rw-r--r--fs/ext2/super.c206
-rw-r--r--fs/ext2/xattr.c11
-rw-r--r--fs/ext2/xattr_security.c16
-rw-r--r--fs/ext2/xattr_trusted.c16
-rw-r--r--fs/ext2/xattr_user.c25
-rw-r--r--fs/ext2/xip.c5
-rw-r--r--fs/ext3/acl.c74
-rw-r--r--fs/ext3/inode.c18
-rw-r--r--fs/ext3/resize.c2
-rw-r--r--fs/ext3/super.c468
-rw-r--r--fs/ext3/xattr.c38
-rw-r--r--fs/ext3/xattr_security.c20
-rw-r--r--fs/ext3/xattr_trusted.c18
-rw-r--r--fs/ext3/xattr_user.c25
-rw-r--r--fs/ext4/acl.c74
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/super.c37
-rw-r--r--fs/ext4/xattr.c31
-rw-r--r--fs/ext4/xattr_security.c20
-rw-r--r--fs/ext4/xattr_trusted.c20
-rw-r--r--fs/ext4/xattr_user.c25
-rw-r--r--fs/fat/fat.h3
-rw-r--r--fs/fat/fatent.c25
-rw-r--r--fs/fat/inode.c8
-rw-r--r--fs/fat/misc.c57
-rw-r--r--fs/file_table.c50
-rw-r--r--fs/fscache/object-list.c2
-rw-r--r--fs/generic_acl.c158
-rw-r--r--fs/gfs2/acl.c16
-rw-r--r--fs/gfs2/inode.c5
-rw-r--r--fs/gfs2/sys.c16
-rw-r--r--fs/gfs2/xattr.c69
-rw-r--r--fs/gfs2/xattr.h7
-rw-r--r--fs/hfs/catalog.c4
-rw-r--r--fs/hfs/dir.c11
-rw-r--r--fs/hfs/super.c7
-rw-r--r--fs/hpfs/super.c17
-rw-r--r--fs/hugetlbfs/inode.c17
-rw-r--r--fs/inode.c26
-rw-r--r--fs/internal.h8
-rw-r--r--fs/isofs/compress.c533
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/isofs/rock.c3
-rw-r--r--fs/jbd2/commit.c2
-rw-r--r--fs/jffs2/acl.c65
-rw-r--r--fs/jffs2/gc.c3
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/security.c18
-rw-r--r--fs/jffs2/summary.c2
-rw-r--r--fs/jffs2/xattr.c6
-rw-r--r--fs/jffs2/xattr_trusted.c18
-rw-r--r--fs/jffs2/xattr_user.c18
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/libfs.c1
-rw-r--r--fs/lockd/svc4proc.c4
-rw-r--r--fs/lockd/svcproc.c4
-rw-r--r--fs/namei.c505
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/callback.c13
-rw-r--r--fs/nfs/callback.h16
-rw-r--r--fs/nfs/callback_proc.c64
-rw-r--r--fs/nfs/callback_xdr.c34
-rw-r--r--fs/nfs/client.c14
-rw-r--r--fs/nfs/delegation.c77
-rw-r--r--fs/nfs/delegation.h7
-rw-r--r--fs/nfs/dir.c67
-rw-r--r--fs/nfs/dns_resolve.c4
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/internal.h54
-rw-r--r--fs/nfs/iostat.h24
-rw-r--r--fs/nfs/nfs4_fs.h17
-rw-r--r--fs/nfs/nfs4proc.c633
-rw-r--r--fs/nfs/nfs4state.c241
-rw-r--r--fs/nfs/nfs4xdr.c135
-rw-r--r--fs/nfs/read.c12
-rw-r--r--fs/nfs/super.c104
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c10
-rw-r--r--fs/nfsctl.c2
-rw-r--r--fs/nfsd/auth.c12
-rw-r--r--fs/nfsd/cache.h (renamed from include/linux/nfsd/cache.h)5
-rw-r--r--fs/nfsd/export.c65
-rw-r--r--fs/nfsd/lockd.c10
-rw-r--r--fs/nfsd/nfs2acl.c27
-rw-r--r--fs/nfsd/nfs3acl.c15
-rw-r--r--fs/nfsd/nfs3proc.c20
-rw-r--r--fs/nfsd/nfs3xdr.c15
-rw-r--r--fs/nfsd/nfs4acl.c12
-rw-r--r--fs/nfsd/nfs4callback.c19
-rw-r--r--fs/nfsd/nfs4idmap.c17
-rw-r--r--fs/nfsd/nfs4proc.c19
-rw-r--r--fs/nfsd/nfs4recover.c16
-rw-r--r--fs/nfsd/nfs4state.c84
-rw-r--r--fs/nfsd/nfs4xdr.c26
-rw-r--r--fs/nfsd/nfscache.c14
-rw-r--r--fs/nfsd/nfsctl.c51
-rw-r--r--fs/nfsd/nfsd.h (renamed from include/linux/nfsd/nfsd.h)98
-rw-r--r--fs/nfsd/nfsfh.c102
-rw-r--r--fs/nfsd/nfsfh.h208
-rw-r--r--fs/nfsd/nfsproc.c22
-rw-r--r--fs/nfsd/nfssvc.c22
-rw-r--r--fs/nfsd/nfsxdr.c12
-rw-r--r--fs/nfsd/state.h (renamed from include/linux/nfsd/state.h)14
-rw-r--r--fs/nfsd/stats.c11
-rw-r--r--fs/nfsd/vfs.c139
-rw-r--r--fs/nfsd/vfs.h101
-rw-r--r--fs/nfsd/xdr.h (renamed from include/linux/nfsd/xdr.h)10
-rw-r--r--fs/nfsd/xdr3.h (renamed from include/linux/nfsd/xdr3.h)4
-rw-r--r--fs/nfsd/xdr4.h (renamed from include/linux/nfsd/xdr4.h)5
-rw-r--r--fs/nilfs2/alloc.c108
-rw-r--r--fs/nilfs2/alloc.h21
-rw-r--r--fs/nilfs2/bmap.c8
-rw-r--r--fs/nilfs2/btnode.c76
-rw-r--r--fs/nilfs2/btnode.h6
-rw-r--r--fs/nilfs2/btree.c106
-rw-r--r--fs/nilfs2/btree.h22
-rw-r--r--fs/nilfs2/cpfile.c26
-rw-r--r--fs/nilfs2/cpfile.h3
-rw-r--r--fs/nilfs2/dat.c47
-rw-r--r--fs/nilfs2/dat.h3
-rw-r--r--fs/nilfs2/dir.c24
-rw-r--r--fs/nilfs2/gcdat.c3
-rw-r--r--fs/nilfs2/gcinode.c6
-rw-r--r--fs/nilfs2/ifile.c35
-rw-r--r--fs/nilfs2/ifile.h2
-rw-r--r--fs/nilfs2/inode.c7
-rw-r--r--fs/nilfs2/mdt.c56
-rw-r--r--fs/nilfs2/mdt.h25
-rw-r--r--fs/nilfs2/namei.c83
-rw-r--r--fs/nilfs2/recovery.c34
-rw-r--r--fs/nilfs2/segbuf.c185
-rw-r--r--fs/nilfs2/segbuf.h54
-rw-r--r--fs/nilfs2/segment.c369
-rw-r--r--fs/nilfs2/segment.h2
-rw-r--r--fs/nilfs2/sufile.c203
-rw-r--r--fs/nilfs2/sufile.h14
-rw-r--r--fs/nilfs2/super.c91
-rw-r--r--fs/nilfs2/the_nilfs.c155
-rw-r--r--fs/nilfs2/the_nilfs.h10
-rw-r--r--fs/notify/inotify/inotify_user.c29
-rw-r--r--fs/ntfs/inode.c6
-rw-r--r--fs/ocfs2/acl.c87
-rw-r--r--fs/ocfs2/alloc.c4
-rw-r--r--fs/ocfs2/aops.c34
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/quota.h4
-rw-r--r--fs/ocfs2/quota_local.c2
-rw-r--r--fs/ocfs2/xattr.c72
-rw-r--r--fs/open.c17
-rw-r--r--fs/pipe.c45
-rw-r--r--fs/proc/array.c19
-rw-r--r--fs/proc/base.c72
-rw-r--r--fs/proc/generic.c21
-rw-r--r--fs/proc/inode.c31
-rw-r--r--fs/proc/internal.h10
-rw-r--r--fs/proc/page.c45
-rw-r--r--fs/proc/proc_devtree.c41
-rw-r--r--fs/proc/task_mmu.c45
-rw-r--r--fs/proc/task_nommu.c8
-rw-r--r--fs/qnx4/bitmap.c24
-rw-r--r--fs/qnx4/inode.c22
-rw-r--r--fs/quota/Kconfig8
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/quota/quota_v1.c2
-rw-r--r--fs/quota/quota_v2.c167
-rw-r--r--fs/quota/quotaio_v2.h19
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--fs/reiserfs/Makefile6
-rw-r--r--fs/reiserfs/inode.c18
-rw-r--r--fs/reiserfs/procfs.c65
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/reiserfs/xattr.c36
-rw-r--r--fs/reiserfs/xattr_acl.c69
-rw-r--r--fs/reiserfs/xattr_security.c21
-rw-r--r--fs/reiserfs/xattr_trusted.c21
-rw-r--r--fs/reiserfs/xattr_user.c21
-rw-r--r--fs/signalfd.c2
-rw-r--r--fs/stack.c71
-rw-r--r--fs/sync.c68
-rw-r--r--fs/sysfs/dir.c388
-rw-r--r--fs/sysfs/file.c41
-rw-r--r--fs/sysfs/inode.c176
-rw-r--r--fs/sysfs/symlink.c11
-rw-r--r--fs/sysfs/sysfs.h9
-rw-r--r--fs/timerfd.c2
-rw-r--r--fs/ubifs/debug.c9
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/ubifs/super.c7
-rw-r--r--fs/udf/balloc.c2
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c24
-rw-r--r--fs/udf/namei.c38
-rw-r--r--fs/udf/super.c32
-rw-r--r--fs/ufs/dir.c10
-rw-r--r--fs/ufs/namei.c8
-rw-r--r--fs/ufs/super.c52
-rw-r--r--fs/ufs/ufs.h4
-rw-r--r--fs/xattr.c28
-rw-r--r--fs/xfs/Makefile8
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c58
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c186
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c141
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h43
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c94
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h45
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c175
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c16
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c75
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h1369
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_xattr.c71
-rw-r--r--fs/xfs/quota/xfs_dquot.c110
-rw-r--r--fs/xfs/quota/xfs_dquot.h21
-rw-r--r--fs/xfs/quota/xfs_qm.c40
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c4
-rw-r--r--fs/xfs/support/debug.h18
-rw-r--r--fs/xfs/support/ktrace.c323
-rw-r--r--fs/xfs/support/ktrace.h85
-rw-r--r--fs/xfs/xfs.h16
-rw-r--r--fs/xfs/xfs_acl.h3
-rw-r--r--fs/xfs/xfs_ag.h14
-rw-r--r--fs/xfs/xfs_alloc.c230
-rw-r--r--fs/xfs/xfs_alloc.h27
-rw-r--r--fs/xfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/xfs_attr.c123
-rw-r--r--fs/xfs/xfs_attr.h11
-rw-r--r--fs/xfs/xfs_attr_leaf.c16
-rw-r--r--fs/xfs/xfs_attr_sf.h40
-rw-r--r--fs/xfs/xfs_bmap.c942
-rw-r--r--fs/xfs/xfs_bmap.h58
-rw-r--r--fs/xfs/xfs_bmap_btree.c9
-rw-r--r--fs/xfs/xfs_bmap_btree.h14
-rw-r--r--fs/xfs/xfs_btree.c5
-rw-r--r--fs/xfs/xfs_btree_trace.h17
-rw-r--r--fs/xfs/xfs_buf_item.c87
-rw-r--r--fs/xfs/xfs_buf_item.h20
-rw-r--r--fs/xfs/xfs_da_btree.c3
-rw-r--r--fs/xfs/xfs_da_btree.h7
-rw-r--r--fs/xfs/xfs_dfrag.c2
-rw-r--r--fs/xfs/xfs_dir2.c8
-rw-r--r--fs/xfs/xfs_dir2_block.c20
-rw-r--r--fs/xfs/xfs_dir2_leaf.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c27
-rw-r--r--fs/xfs/xfs_dir2_sf.c26
-rw-r--r--fs/xfs/xfs_dir2_trace.c216
-rw-r--r--fs/xfs/xfs_dir2_trace.h72
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_filestream.h8
-rw-r--r--fs/xfs/xfs_fsops.c27
-rw-r--r--fs/xfs/xfs_ialloc.c2
-rw-r--r--fs/xfs/xfs_iget.c132
-rw-r--r--fs/xfs/xfs_inode.c79
-rw-r--r--fs/xfs/xfs_inode.h82
-rw-r--r--fs/xfs/xfs_inode_item.c5
-rw-r--r--fs/xfs/xfs_inode_item.h6
-rw-r--r--fs/xfs/xfs_iomap.c94
-rw-r--r--fs/xfs/xfs_iomap.h8
-rw-r--r--fs/xfs/xfs_log.c183
-rw-r--r--fs/xfs/xfs_log_priv.h20
-rw-r--r--fs/xfs/xfs_log_recover.c55
-rw-r--r--fs/xfs/xfs_mount.c32
-rw-r--r--fs/xfs/xfs_mount.h27
-rw-r--r--fs/xfs/xfs_quota.h8
-rw-r--r--fs/xfs/xfs_rename.c1
-rw-r--r--fs/xfs/xfs_rtalloc.c1
-rw-r--r--fs/xfs/xfs_rw.c33
-rw-r--r--fs/xfs/xfs_rw.h29
-rw-r--r--fs/xfs/xfs_trans.c7
-rw-r--r--fs/xfs/xfs_trans.h49
-rw-r--r--fs/xfs/xfs_trans_buf.c75
-rw-r--r--fs/xfs/xfs_vnodeops.c87
-rw-r--r--fs/xfs/xfs_vnodeops.h1
-rw-r--r--include/acpi/acoutput.h8
-rw-r--r--include/acpi/acpi_hest.h12
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/processor.h11
-rw-r--r--include/asm-generic/bitops/atomic.h10
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/fcntl.h25
-rw-r--r--include/asm-generic/gpio.h9
-rw-r--r--include/asm-generic/mman-common.h6
-rw-r--r--include/asm-generic/percpu.h5
-rw-r--r--include/asm-generic/unistd.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/drm/Kbuild2
-rw-r--r--include/drm/drm.h65
-rw-r--r--include/drm/drmP.h92
-rw-r--r--include/drm/drm_crtc.h47
-rw-r--r--include/drm/drm_dp_helper.h (renamed from drivers/gpu/drm/i915/intel_dp.h)74
-rw-r--r--include/drm/drm_edid.h8
-rw-r--r--include/drm/drm_mm.h35
-rw-r--r--include/drm/drm_mode.h80
-rw-r--r--include/drm/drm_os_linux.h2
-rw-r--r--include/drm/i2c/ch7006.h86
-rw-r--r--include/drm/i915_drm.h78
-rw-r--r--include/drm/mga_drm.h2
-rw-r--r--include/drm/nouveau_drm.h220
-rw-r--r--include/drm/radeon_drm.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h115
-rw-r--r--include/drm/ttm/ttm_bo_driver.h37
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h107
-rw-r--r--include/drm/ttm/ttm_lock.h247
-rw-r--r--include/drm/ttm/ttm_memory.h1
-rw-r--r--include/drm/ttm/ttm_object.h271
-rw-r--r--include/drm/via_drm.h2
-rw-r--r--include/drm/vmwgfx_drm.h574
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/acpi.h23
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/backlight.h12
-rw-r--r--include/linux/binfmts.h10
-rw-r--r--include/linux/bitmap.h11
-rw-r--r--include/linux/can/dev.h9
-rw-r--r--include/linux/cpu.h15
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/cs5535.h172
-rw-r--r--include/linux/ctype.h3
-rw-r--r--include/linux/decompress/mm.h4
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dm-dirty-log.h6
-rw-r--r--include/linux/dm-ioctl.h13
-rw-r--r--include/linux/dm-region-hash.h3
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/dynamic_debug.h13
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/enclosure.h2
-rw-r--r--include/linux/err.h5
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/ext2_fs.h16
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/firmware.h5
-rw-r--r--include/linux/fs.h71
-rw-r--r--include/linux/fs_stack.h6
-rw-r--r--include/linux/fsl_devices.h11
-rw-r--r--include/linux/ftrace_event.h5
-rw-r--r--include/linux/generic_acl.h41
-rw-r--r--include/linux/gigaset_dev.h22
-rw-r--r--include/linux/gpio.h6
-rw-r--r--include/linux/hayesesp.h114
-rw-r--r--include/linux/hrtimer.h58
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/hw_breakpoint.h40
-rw-r--r--include/linux/i2c.h92
-rw-r--r--include/linux/i2c/tps65010.h19
-rw-r--r--include/linux/i2c/twl.h (renamed from include/linux/i2c/twl4030.h)209
-rw-r--r--include/linux/i8042.h18
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/ima.h12
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/iommu-helper.h3
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/isicom.h1
-rw-r--r--include/linux/kallsyms.h12
-rw-r--r--include/linux/kernel-page-flags.h46
-rw-r--r--include/linux/kernel.h58
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kfifo.h555
-rw-r--r--include/linux/kmemleak.h6
-rw-r--r--include/linux/kmsg_dump.h60
-rw-r--r--include/linux/ksm.h96
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/leds-lp3944.h3
-rw-r--r--include/linux/leds-pca9532.h2
-rw-r--r--include/linux/leds-regulator.h46
-rw-r--r--include/linux/lis3lv02d.h15
-rw-r--r--include/linux/lmb.h1
-rw-r--r--include/linux/memcontrol.h30
-rw-r--r--include/linux/memory.h27
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mfd/88pm8607.h217
-rw-r--r--include/linux/mfd/ab4500.h262
-rw-r--r--include/linux/mfd/adp5520.h299
-rw-r--r--include/linux/mfd/ezx-pcap.h3
-rw-r--r--include/linux/mfd/mc13783-private.h208
-rw-r--r--include/linux/mfd/mc13783.h120
-rw-r--r--include/linux/mfd/pcf50633/core.h17
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/mfd/wm831x/core.h43
-rw-r--r--include/linux/mfd/wm831x/pdata.h18
-rw-r--r--include/linux/mfd/wm8350/core.h14
-rw-r--r--include/linux/mfd/wm8350/gpio.h18
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm.h36
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/cfi.h9
-rw-r--r--include/linux/mtd/flashchip.h9
-rw-r--r--include/linux/mtd/nand.h97
-rw-r--r--include/linux/mtd/nand_ecc.h10
-rw-r--r--include/linux/mtd/onenand.h23
-rw-r--r--include/linux/mtd/onenand_regs.h2
-rw-r--r--include/linux/namei.h3
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h14
-rw-r--r--include/linux/nfsacl.h1
-rw-r--r--include/linux/nfsd/export.h19
-rw-r--r--include/linux/nfsd/nfsfh.h206
-rw-r--r--include/linux/nfsd/syscall.h8
-rw-r--r--include/linux/nilfs2_fs.h24
-rw-r--r--include/linux/node.h16
-rw-r--r--include/linux/nodemask.h33
-rw-r--r--include/linux/numa.h2
-rw-r--r--include/linux/omapfb.h251
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--include/linux/page_cgroup.h7
-rw-r--r--include/linux/pci.h42
-rw-r--r--include/linux/pci_ids.h14
-rw-r--r--include/linux/pci_regs.h18
-rw-r--r--include/linux/pcieport_if.h16
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h434
-rw-r--r--include/linux/perf_counter.h444
-rw-r--r--include/linux/perf_event.h43
-rw-r--r--include/linux/platform_device.h20
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pnp.h13
-rw-r--r--include/linux/ptrace.h23
-rw-r--r--include/linux/pwm_backlight.h2
-rw-r--r--include/linux/quota.h6
-rw-r--r--include/linux/raid/pq.h19
-rw-r--r--include/linux/rcutiny.h5
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/regulator/machine.h6
-rw-r--r--include/linux/regulator/max8660.h57
-rw-r--r--include/linux/reiserfs_fs.h35
-rw-r--r--include/linux/rmap.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rtnetlink.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/rwsem-spinlock.h6
-rw-r--r--include/linux/sched.h54
-rw-r--r--include/linux/security.h7
-rw-r--r--include/linux/sem.h5
-rw-r--r--include/linux/shmem_fs.h16
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/sm501-regs.h2
-rw-r--r--include/linux/spi/dw_spi.h212
-rw-r--r--include/linux/spi/sh_msiof.h10
-rw-r--r--include/linux/spi/xilinx_spi.h20
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
-rw-r--r--include/linux/string.h10
-rw-r--r--include/linux/sunrpc/debug.h3
-rw-r--r--include/linux/sunrpc/rpc_rdma.h2
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h7
-rw-r--r--include/linux/swap.h67
-rw-r--r--include/linux/syscalls.h10
-rw-r--r--include/linux/timb_gpio.h37
-rw-r--r--include/linux/trace_seq.h7
-rw-r--r--include/linux/tracehook.h7
-rw-r--r--include/linux/tty.h27
-rw-r--r--include/linux/usb.h33
-rw-r--r--include/linux/usb/composite.h1
-rw-r--r--include/linux/usb/otg.h68
-rw-r--r--include/linux/usb/serial.h6
-rw-r--r--include/linux/usb/ulpi.h7
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/usbdevice_fs.h26
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/videodev2.h123
-rw-r--r--include/linux/vmstat.h12
-rw-r--r--include/linux/vt.h19
-rw-r--r--include/linux/writeback.h3
-rw-r--r--include/linux/xattr.h13
-rw-r--r--include/media/ir-common.h39
-rw-r--r--include/media/ir-core.h62
-rw-r--r--include/media/mt9t112.h30
-rw-r--r--include/media/ov772x.h4
-rw-r--r--include/media/rj54n1cb0c.h19
-rw-r--r--include/media/saa7146_vv.h4
-rw-r--r--include/media/sh_mobile_ceu.h2
-rw-r--r--include/media/soc_camera.h30
-rw-r--r--include/media/soc_camera_platform.h3
-rw-r--r--include/media/soc_mediabus.h65
-rw-r--r--include/media/tw9910.h1
-rw-r--r--include/media/v4l2-chip-ident.h2
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-dev.h23
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-mediabus.h61
-rw-r--r--include/media/v4l2-subdev.h61
-rw-r--r--include/net/compat.h2
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_hashtables.h8
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ipv6.h8
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/snmp.h50
-rw-r--r--include/net/tcp.h26
-rw-r--r--include/pcmcia/cs.h4
-rw-r--r--include/pcmcia/ds.h6
-rw-r--r--include/pcmcia/mem_op.h2
-rw-r--r--include/pcmcia/ss.h12
-rw-r--r--include/rdma/ib_addr.h36
-rw-r--r--include/rdma/ib_sa.h6
-rw-r--r--include/rdma/ib_user_sa.h16
-rw-r--r--include/rdma/ib_verbs.h5
-rw-r--r--include/rdma/rdma_user_cm.h6
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/libiscsi_tcp.h2
-rw-r--r--include/scsi/libsrp.h2
-rw-r--r--include/scsi/osd_initiator.h5
-rw-r--r--include/trace/events/timer.h8
-rw-r--r--include/trace/ftrace.h56
-rw-r--r--include/video/da8xx-fb.h1
-rw-r--r--include/video/sh_mobile_lcdc.h2
-rw-r--r--include/xen/xen.h32
-rw-r--r--init/Kconfig22
-rw-r--r--init/Makefile8
-rw-r--r--init/initramfs.c10
-rw-r--r--init/main.c25
-rw-r--r--init/version.c4
-rw-r--r--ipc/mqueue.c2
-rw-r--r--ipc/msg.c1
-rw-r--r--ipc/sem.c214
-rw-r--r--ipc/shm.c40
-rw-r--r--kernel/acct.c3
-rw-r--r--kernel/audit_tree.c13
-rw-r--r--kernel/auditsc.c1
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/cpu.c26
-rw-r--r--kernel/cpuset.c18
-rw-r--r--kernel/exit.c40
-rw-r--r--kernel/fork.c15
-rw-r--r--kernel/futex.c60
-rw-r--r--kernel/hrtimer.c171
-rw-r--r--kernel/hw_breakpoint.c146
-rw-r--r--kernel/irq/autoprobe.c20
-rw-r--r--kernel/irq/chip.c86
-rw-r--r--kernel/irq/handle.c22
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c50
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/irq/numa_migrate.c8
-rw-r--r--kernel/irq/pm.c8
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/kexec.c61
-rw-r--r--kernel/kfifo.c361
-rw-r--r--kernel/kgdb.c56
-rw-r--r--kernel/ksysfs.c21
-rw-r--r--kernel/kthread.c23
-rw-r--r--kernel/lockdep.c47
-rw-r--r--kernel/module.c191
-rw-r--r--kernel/mutex-debug.h12
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/params.c8
-rw-r--r--kernel/perf_event.c232
-rw-r--r--kernel/pid.c12
-rw-r--r--kernel/power/console.c7
-rw-r--r--kernel/printk.c119
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/resource.c30
-rw-r--r--kernel/rtmutex-debug.c4
-rw-r--r--kernel/rtmutex.c106
-rw-r--r--kernel/sched.c747
-rw-r--r--kernel/sched_clock.c23
-rw-r--r--kernel/sched_cpupri.c10
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_debug.c17
-rw-r--r--kernel/sched_fair.c208
-rw-r--r--kernel/sched_features.h5
-rw-r--r--kernel/sched_idletask.c6
-rw-r--r--kernel/sched_rt.c66
-rw-r--r--kernel/signal.c63
-rw-r--r--kernel/smp.c35
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c54
-rw-r--r--kernel/spinlock.c306
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sysctl.c50
-rw-r--r--kernel/sysctl_binary.c7
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/time/clockevents.c32
-rw-r--r--kernel/time/tick-broadcast.c42
-rw-r--r--kernel/time/tick-common.c20
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/timecompare.c2
-rw-r--r--kernel/time/timekeeping.c27
-rw-r--r--kernel/time/timer_list.c15
-rw-r--r--kernel/time/timer_stats.c18
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c30
-rw-r--r--kernel/trace/power-traces.c2
-rw-r--r--kernel/trace/ring_buffer.c45
-rw-r--r--kernel/trace/trace.c307
-rw-r--r--kernel/trace/trace.h27
-rw-r--r--kernel/trace/trace_clock.c8
-rw-r--r--kernel/trace/trace_event_profile.c6
-rw-r--r--kernel/trace/trace_events.c41
-rw-r--r--kernel/trace/trace_export.c4
-rw-r--r--kernel/trace/trace_functions_graph.c169
-rw-r--r--kernel/trace/trace_hw_branches.c51
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_kprobe.c77
-rw-r--r--kernel/trace/trace_ksym.c61
-rw-r--r--kernel/trace/trace_output.c75
-rw-r--r--kernel/trace/trace_sched_wakeup.c16
-rw-r--r--kernel/trace/trace_selftest.c4
-rw-r--r--kernel/trace/trace_stack.c16
-rw-r--r--kernel/trace/trace_syscalls.c18
-rw-r--r--kernel/trace/trace_sysprof.c1
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/argv_split.c13
-rw-r--r--lib/bitmap.c81
-rw-r--r--lib/checksum.c14
-rw-r--r--lib/crc32.c121
-rw-r--r--lib/ctype.c50
-rw-r--r--lib/debugobjects.c74
-rw-r--r--lib/decompress_bunzip2.c10
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--lib/genalloc.c33
-rw-r--r--lib/iommu-helper.c59
-rw-r--r--lib/kernel_lock.c26
-rw-r--r--lib/lmb.c7
-rw-r--r--lib/parser.c11
-rw-r--r--lib/plist.c8
-rw-r--r--lib/rwsem-spinlock.c23
-rw-r--r--lib/spinlock_debug.c64
-rw-r--r--lib/string.c25
-rw-r--r--lib/swiotlb.c4
-rw-r--r--lib/vsprintf.c495
-rw-r--r--mm/Kconfig20
-rw-r--r--mm/Makefile5
-rw-r--r--mm/allocpercpu.c177
-rw-r--r--mm/bootmem.c8
-rw-r--r--mm/filemap.c64
-rw-r--r--mm/hugetlb.c551
-rw-r--r--mm/hwpoison-inject.c113
-rw-r--r--mm/internal.h35
-rw-r--r--mm/kmemleak.c188
-rw-r--r--mm/ksm.c953
-rw-r--r--mm/madvise.c21
-rw-r--r--mm/memcontrol.c442
-rw-r--r--mm/memory-failure.c562
-rw-r--r--mm/memory.c35
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/mempolicy.c69
-rw-r--r--mm/migrate.c133
-rw-r--r--mm/mincore.c37
-rw-r--r--mm/mlock.c45
-rw-r--r--mm/mmap.c92
-rw-r--r--mm/mremap.c241
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/oom_kill.c103
-rw-r--r--mm/page_alloc.c108
-rw-r--r--mm/page_io.c17
-rw-r--r--mm/pagewalk.c32
-rw-r--r--mm/percpu.c24
-rw-r--r--mm/readahead.c12
-rw-r--r--mm/rmap.c354
-rw-r--r--mm/shmem.c84
-rw-r--r--mm/shmem_acl.c171
-rw-r--r--mm/slab.c160
-rw-r--r--mm/slub.c24
-rw-r--r--mm/swapfile.c847
-rw-r--r--mm/truncate.c6
-rw-r--r--mm/util.c44
-rw-r--r--mm/vmalloc.c11
-rw-r--r--mm/vmscan.c321
-rw-r--r--mm/vmstat.c10
-rw-r--r--net/9p/trans_fd.c112
-rw-r--r--net/atm/br2684.c11
-rw-r--r--net/atm/lec.c10
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap.c5
-rw-r--r--net/compat.c11
-rw-r--r--net/core/dev.c21
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/probe.c20
-rw-r--r--net/ipv4/Kconfig6
-rw-r--r--net/ipv4/inet_hashtables.c24
-rw-r--r--net/ipv4/inet_timewait_sock.c61
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c21
-rw-r--r--net/ipv4/syncookies.c27
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_input.c59
-rw-r--r--net/ipv4/tcp_ipv4.c23
-rw-r--r--net/ipv4/tcp_minisocks.c10
-rw-r--r--net/ipv4/tcp_output.c18
-rw-r--r--net/ipv4/tcp_timer.c29
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c7
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c1
-rw-r--r--net/ipv6/syncookies.c28
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/irda/irnet/irnet_ppp.c8
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/key/af_key.c1
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mesh.h5
-rw-r--r--net/mac80211/mesh_hwmp.c2
-rw-r--r--net/mac80211/mlme.c10
-rw-r--r--net/mac80211/rx.c1
-rw-r--r--net/mac80211/scan.c20
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/xt_recent.c3
-rw-r--r--net/packet/af_packet.c71
-rw-r--r--net/rds/ib.c4
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/socket.c125
-rw-r--r--net/sunrpc/addr.c10
-rw-r--r--net/sunrpc/auth.c39
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c6
-rw-r--r--net/sunrpc/clnt.c54
-rw-r--r--net/sunrpc/rpcb_clnt.c104
-rw-r--r--net/sunrpc/sched.c15
-rw-r--r--net/sunrpc/sunrpc_syms.c3
-rw-r--r--net/sunrpc/svc_xprt.c31
-rw-r--r--net/sunrpc/svcauth_unix.c53
-rw-r--r--net/sunrpc/xprt.c4
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/wireless/reg.c75
-rw-r--r--net/wireless/wext-compat.c1
-rw-r--r--samples/hw_breakpoint/data_breakpoint.c7
-rw-r--r--scripts/Kbuild.include6
-rw-r--r--scripts/Makefile.build1
-rw-r--r--scripts/Makefile.lib7
-rw-r--r--scripts/Makefile.modbuiltin55
-rw-r--r--scripts/basic/fixdep.c10
-rw-r--r--scripts/genksyms/keywords.c_shipped191
-rw-r--r--scripts/genksyms/keywords.gperf2
-rwxr-xr-xscripts/get_maintainer.pl499
-rwxr-xr-xscripts/headers.sh2
-rw-r--r--scripts/kconfig/Makefile1
-rw-r--r--scripts/kconfig/confdata.c24
-rwxr-xr-xscripts/mkcompile_h2
-rw-r--r--scripts/mod/Makefile2
-rw-r--r--scripts/mod/file2alias.c95
-rw-r--r--scripts/mod/mk_elfconfig.c9
-rw-r--r--scripts/mod/modpost.c177
-rw-r--r--scripts/mod/modpost.h3
-rw-r--r--scripts/package/Makefile20
-rw-r--r--scripts/package/buildtar6
-rwxr-xr-xscripts/recordmcount.pl61
-rwxr-xr-xscripts/tags.sh8
-rw-r--r--scripts/unifdef.c341
-rw-r--r--security/Makefile3
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_iint.c79
-rw-r--r--security/integrity/ima/ima_main.c184
-rw-r--r--security/keys/keyctl.c12
-rw-r--r--security/min_addr.c2
-rw-r--r--security/tomoyo/file.c1
-rw-r--r--sound/arm/aaci.c177
-rw-r--r--sound/arm/aaci.h2
-rw-r--r--sound/arm/pxa2xx-ac97.c2
-rw-r--r--sound/core/hrtimer.c15
-rw-r--r--sound/core/pcm_lib.c4
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/isa/gus/gus_mem.c3
-rw-r--r--sound/isa/msnd/msnd_midi.c2
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c18
-rw-r--r--sound/isa/sb/emu8000.c6
-rw-r--r--sound/mips/sgio2audio.c2
-rw-r--r--sound/oss/pss.c6
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/cs5535audio/Makefile2
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c1
-rw-r--r--sound/pci/cs5535audio/cs5535audio.h4
-rw-r--r--sound/pci/cs5535audio/cs5535audio_olpc.c26
-rw-r--r--sound/pci/hda/hda_codec.h5
-rw-r--r--sound/pci/hda/hda_hwdep.c7
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/hda_proc.c7
-rw-r--r--sound/pci/hda/patch_analog.c8
-rw-r--r--sound/pci/hda/patch_conexant.c43
-rw-r--r--sound/pci/hda/patch_intelhdmi.c114
-rw-r--r--sound/pci/hda/patch_realtek.c388
-rw-r--r--sound/pci/hda/patch_sigmatel.c103
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c3
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c2
-rw-r--r--sound/soc/codecs/ak4642.c2
-rw-r--r--sound/soc/codecs/stac9766.c18
-rw-r--r--sound/soc/codecs/twl4030.c10
-rw-r--r--sound/soc/codecs/wm8350.c25
-rw-r--r--sound/soc/codecs/wm8900.c2
-rw-r--r--sound/soc/codecs/wm8974.c2
-rw-r--r--sound/soc/omap/Makefile6
-rw-r--r--sound/soc/omap/sdp3430.c6
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.c2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.h2
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/usb/usbaudio.c2
-rw-r--r--tools/perf/Documentation/perf-diff.txt55
-rw-r--r--tools/perf/Documentation/perf-kmem.txt13
-rw-r--r--tools/perf/Documentation/perf-probe.txt24
-rw-r--r--tools/perf/Documentation/perf-report.txt4
-rw-r--r--tools/perf/Documentation/perf-trace.txt27
-rw-r--r--tools/perf/Makefile16
-rw-r--r--tools/perf/bench/sched-messaging.c8
-rw-r--r--tools/perf/bench/sched-pipe.c11
-rw-r--r--tools/perf/builtin-annotate.c75
-rw-r--r--tools/perf/builtin-bench.c57
-rw-r--r--tools/perf/builtin-buildid-list.c59
-rw-r--r--tools/perf/builtin-diff.c248
-rw-r--r--tools/perf/builtin-kmem.c178
-rw-r--r--tools/perf/builtin-probe.c209
-rw-r--r--tools/perf/builtin-record.c166
-rw-r--r--tools/perf/builtin-report.c754
-rw-r--r--tools/perf/builtin-sched.c284
-rw-r--r--tools/perf/builtin-timechart.c118
-rw-r--r--tools/perf/builtin-top.c45
-rw-r--r--tools/perf/builtin-trace.c391
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/command-list.txt1
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/perf.h12
-rw-r--r--tools/perf/scripts/perl/bin/check-perf-trace-report1
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-report4
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-report1
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-report1
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-report1
-rw-r--r--tools/perf/scripts/perl/rw-by-file.pl5
-rw-r--r--tools/perf/util/data_map.c167
-rw-r--r--tools/perf/util/data_map.h32
-rw-r--r--tools/perf/util/event.c214
-rw-r--r--tools/perf/util/event.h64
-rw-r--r--tools/perf/util/header.c39
-rw-r--r--tools/perf/util/header.h4
-rw-r--r--tools/perf/util/hist.c518
-rw-r--r--tools/perf/util/hist.h55
-rw-r--r--tools/perf/util/map.c93
-rw-r--r--tools/perf/util/parse-events.c17
-rw-r--r--tools/perf/util/parse-options.c3
-rw-r--r--tools/perf/util/probe-event.c299
-rw-r--r--tools/perf/util/probe-event.h12
-rw-r--r--tools/perf/util/probe-finder.c6
-rw-r--r--tools/perf/util/probe-finder.h58
-rw-r--r--tools/perf/util/session.c150
-rw-r--r--tools/perf/util/session.h61
-rw-r--r--tools/perf/util/sort.c26
-rw-r--r--tools/perf/util/sort.h12
-rw-r--r--tools/perf/util/string.c25
-rw-r--r--tools/perf/util/string.h2
-rw-r--r--tools/perf/util/strlist.c6
-rw-r--r--tools/perf/util/strlist.h41
-rw-r--r--tools/perf/util/symbol.c355
-rw-r--r--tools/perf/util/symbol.h45
-rw-r--r--tools/perf/util/thread.c100
-rw-r--r--tools/perf/util/thread.h50
-rw-r--r--tools/perf/util/trace-event-parse.c4
-rw-r--r--tools/perf/util/trace-event-perl.c107
-rw-r--r--tools/perf/util/trace-event-perl.h4
-rw-r--r--tools/perf/util/trace-event-read.c3
-rw-r--r--tools/perf/util/trace-event.h2
-rw-r--r--usr/gen_init_cpio.c5
-rw-r--r--virt/kvm/kvm_main.c9
3989 files changed, 275983 insertions, 89087 deletions
diff --git a/.gitignore b/.gitignore
index 946c7ec5c92..fb2190c61af 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,6 +22,7 @@
*.lst
*.symtypes
*.order
+modules.builtin
*.elf
*.bin
*.gz
@@ -45,14 +46,8 @@ Module.symvers
#
# Generated include files
#
-include/asm
-include/asm-*/asm-offsets.h
include/config
-include/linux/autoconf.h
-include/linux/compile.h
include/linux/version.h
-include/linux/utsrelease.h
-include/linux/bounds.h
include/generated
# stgit generated dirs
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 7772928ee48..deb6b489e4e 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -144,3 +144,16 @@ Description:
Write a 1 to force the device to disconnect
(equivalent to unplugging a wired USB device).
+
+What: /sys/bus/usb/drivers/.../remove_id
+Date: November 2009
+Contact: CHENG Renquan <rqcheng@smu.edu.sg>
+Description:
+ Writing a device ID to this file will remove an ID
+ that was dynamically added via the new_id sysfs entry.
+ The format for the device ID is:
+ idVendor idProduct. After successfully
+ removing an ID, the driver will no longer support the
+ device. This is useful to ensure auto probing won't
+ match the driver to the device. For example:
+ # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
index 4e8106f7cfd..25b1e751b77 100644
--- a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
+++ b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
@@ -23,3 +23,16 @@ Description:
Since this relates to security (specifically, the
lifetime of PTKs and GTKs) it should not be changed
from the default.
+
+What: /sys/class/uwb_rc/uwbN/wusbhc/wusb_phy_rate
+Date: August 2009
+KernelVersion: 2.6.32
+Contact: David Vrabel <david.vrabel@csr.com>
+Description:
+ The maximum PHY rate to use for all connected devices.
+ This is only of limited use for testing and
+ development as the hardware's automatic rate
+ adaptation is better then this simple control.
+
+ Refer to [ECMA-368] section 10.3.1.1 for the value to
+ use.
diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory
index 9fe91c02ee4..bf1627b02a0 100644
--- a/Documentation/ABI/testing/sysfs-devices-memory
+++ b/Documentation/ABI/testing/sysfs-devices-memory
@@ -60,6 +60,19 @@ Description:
Users: hotplug memory remove tools
https://w3.opensource.ibm.com/projects/powerpc-utils/
+
+What: /sys/devices/system/memoryX/nodeY
+Date: October 2009
+Contact: Linux Memory Management list <linux-mm@kvack.org>
+Description:
+ When CONFIG_NUMA is enabled, a symbolic link that
+ points to the corresponding NUMA node directory.
+
+ For example, the following symbolic link is created for
+ memory section 9 on node0:
+ /sys/devices/system/memory/memory9/node0 -> ../../node/node0
+
+
What: /sys/devices/system/node/nodeX/memoryY
Date: September 2008
Contact: Gary Hade <garyhade@us.ibm.com>
@@ -70,4 +83,3 @@ Description:
memory section directory. For example, the following symbolic
link is created for memory section 9 on node0.
/sys/devices/system/node/node0/memory9 -> ../../memory/memory9
-
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index a703b9e9aeb..84a710f87c6 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -62,6 +62,35 @@ Description: CPU topology files that describe kernel limits related to
See Documentation/cputopology.txt for more information.
+What: /sys/devices/system/cpu/probe
+ /sys/devices/system/cpu/release
+Date: November 2009
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Dynamic addition and removal of CPU's. This is not hotplug
+ removal, this is meant complete removal/addition of the CPU
+ from the system.
+
+ probe: writes to this file will dynamically add a CPU to the
+ system. Information written to the file to add CPU's is
+ architecture specific.
+
+ release: writes to this file dynamically remove a CPU from
+ the system. Information writtento the file to remove CPU's
+ is architecture specific.
+
+What: /sys/devices/system/cpu/cpu#/node
+Date: October 2009
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: Discover NUMA node a CPU belongs to
+
+ When CONFIG_NUMA is enabled, a symbolic link that points
+ to the corresponding NUMA node directory.
+
+ For example, the following symlink is created for cpu42
+ in NUMA node 2:
+
+ /sys/devices/system/cpu/cpu42/node2 -> ../../node/node2
+
What: /sys/devices/system/cpu/cpu#/node
Date: October 2009
@@ -136,6 +165,24 @@ Description: Discover cpuidle policy and mechanism
See files in Documentation/cpuidle/ for more information.
+What: /sys/devices/system/cpu/cpu#/cpufreq/*
+Date: pre-git history
+Contact: cpufreq@vger.kernel.org
+Description: Discover and change clock speed of CPUs
+
+ Clock scaling allows you to change the clock speed of the
+ CPUs on the fly. This is a nice method to save battery
+ power, because the lower the clock speed, the less power
+ the CPU consumes.
+
+ There are many knobs to tweak in this directory.
+
+ See files in Documentation/cpu-freq/ for more information.
+
+ In particular, read Documentation/cpu-freq/user-guide.txt
+ to learn how to control the knobs.
+
+
What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X
Date: August 2008
KernelVersion: 2.6.27
diff --git a/Documentation/ABI/testing/sysfs-kernel-slab b/Documentation/ABI/testing/sysfs-kernel-slab
index 6dcf75e594f..8b093f8222d 100644
--- a/Documentation/ABI/testing/sysfs-kernel-slab
+++ b/Documentation/ABI/testing/sysfs-kernel-slab
@@ -45,8 +45,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The alloc_fastpath file is read-only and specifies how many
- objects have been allocated using the fast path.
+ The alloc_fastpath file shows how many objects have been
+ allocated using the fast path. It can be written to clear the
+ current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/alloc_from_partial
@@ -55,9 +56,10 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The alloc_from_partial file is read-only and specifies how
- many times a cpu slab has been full and it has been refilled
- by using a slab from the list of partially used slabs.
+ The alloc_from_partial file shows how many times a cpu slab has
+ been full and it has been refilled by using a slab from the list
+ of partially used slabs. It can be written to clear the current
+ count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/alloc_refill
@@ -66,9 +68,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The alloc_refill file is read-only and specifies how many
- times the per-cpu freelist was empty but there were objects
- available as the result of remote cpu frees.
+ The alloc_refill file shows how many times the per-cpu freelist
+ was empty but there were objects available as the result of
+ remote cpu frees. It can be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/alloc_slab
@@ -77,8 +79,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The alloc_slab file is read-only and specifies how many times
- a new slab had to be allocated from the page allocator.
+ The alloc_slab file is shows how many times a new slab had to
+ be allocated from the page allocator. It can be written to
+ clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/alloc_slowpath
@@ -87,9 +90,10 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The alloc_slowpath file is read-only and specifies how many
- objects have been allocated using the slow path because of a
- refill or allocation from a partial or new slab.
+ The alloc_slowpath file shows how many objects have been
+ allocated using the slow path because of a refill or
+ allocation from a partial or new slab. It can be written to
+ clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/cache_dma
@@ -117,10 +121,11 @@ KernelVersion: 2.6.31
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file cpuslab_flush is read-only and specifies how many
- times a cache's cpu slabs have been flushed as the result of
- destroying or shrinking a cache, a cpu going offline, or as
- the result of forcing an allocation from a certain node.
+ The file cpuslab_flush shows how many times a cache's cpu slabs
+ have been flushed as the result of destroying or shrinking a
+ cache, a cpu going offline, or as the result of forcing an
+ allocation from a certain node. It can be written to clear the
+ current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/ctor
@@ -139,8 +144,8 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file deactivate_empty is read-only and specifies how many
- times an empty cpu slab was deactivated.
+ The deactivate_empty file shows how many times an empty cpu slab
+ was deactivated. It can be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/deactivate_full
@@ -149,8 +154,8 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file deactivate_full is read-only and specifies how many
- times a full cpu slab was deactivated.
+ The deactivate_full file shows how many times a full cpu slab
+ was deactivated. It can be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/deactivate_remote_frees
@@ -159,9 +164,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file deactivate_remote_frees is read-only and specifies how
- many times a cpu slab has been deactivated and contained free
- objects that were freed remotely.
+ The deactivate_remote_frees file shows how many times a cpu slab
+ has been deactivated and contained free objects that were freed
+ remotely. It can be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/deactivate_to_head
@@ -170,9 +175,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file deactivate_to_head is read-only and specifies how
- many times a partial cpu slab was deactivated and added to the
- head of its node's partial list.
+ The deactivate_to_head file shows how many times a partial cpu
+ slab was deactivated and added to the head of its node's partial
+ list. It can be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/deactivate_to_tail
@@ -181,9 +186,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file deactivate_to_tail is read-only and specifies how
- many times a partial cpu slab was deactivated and added to the
- tail of its node's partial list.
+ The deactivate_to_tail file shows how many times a partial cpu
+ slab was deactivated and added to the tail of its node's partial
+ list. It can be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/destroy_by_rcu
@@ -201,9 +206,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file free_add_partial is read-only and specifies how many
- times an object has been freed in a full slab so that it had to
- added to its node's partial list.
+ The free_add_partial file shows how many times an object has
+ been freed in a full slab so that it had to added to its node's
+ partial list. It can be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/free_calls
@@ -222,9 +227,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The free_fastpath file is read-only and specifies how many
- objects have been freed using the fast path because it was an
- object from the cpu slab.
+ The free_fastpath file shows how many objects have been freed
+ using the fast path because it was an object from the cpu slab.
+ It can be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/free_frozen
@@ -233,9 +238,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The free_frozen file is read-only and specifies how many
- objects have been freed to a frozen slab (i.e. a remote cpu
- slab).
+ The free_frozen file shows how many objects have been freed to
+ a frozen slab (i.e. a remote cpu slab). It can be written to
+ clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/free_remove_partial
@@ -244,9 +249,10 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file free_remove_partial is read-only and specifies how
- many times an object has been freed to a now-empty slab so
- that it had to be removed from its node's partial list.
+ The free_remove_partial file shows how many times an object has
+ been freed to a now-empty slab so that it had to be removed from
+ its node's partial list. It can be written to clear the current
+ count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/free_slab
@@ -255,8 +261,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The free_slab file is read-only and specifies how many times an
- empty slab has been freed back to the page allocator.
+ The free_slab file shows how many times an empty slab has been
+ freed back to the page allocator. It can be written to clear
+ the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/free_slowpath
@@ -265,9 +272,9 @@ KernelVersion: 2.6.25
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The free_slowpath file is read-only and specifies how many
- objects have been freed using the slow path (i.e. to a full or
- partial slab).
+ The free_slowpath file shows how many objects have been freed
+ using the slow path (i.e. to a full or partial slab). It can
+ be written to clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/hwcache_align
@@ -346,10 +353,10 @@ KernelVersion: 2.6.26
Contact: Pekka Enberg <penberg@cs.helsinki.fi>,
Christoph Lameter <cl@linux-foundation.org>
Description:
- The file order_fallback is read-only and specifies how many
- times an allocation of a new slab has not been possible at the
- cache's order and instead fallen back to its minimum possible
- order.
+ The order_fallback file shows how many times an allocation of a
+ new slab has not been possible at the cache's order and instead
+ fallen back to its minimum possible order. It can be written to
+ clear the current count.
Available when CONFIG_SLUB_STATS is enabled.
What: /sys/kernel/slab/cache/partial
diff --git a/Documentation/ABI/testing/sysfs-memory-page-offline b/Documentation/ABI/testing/sysfs-memory-page-offline
new file mode 100644
index 00000000000..e14703f12fd
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-memory-page-offline
@@ -0,0 +1,44 @@
+What: /sys/devices/system/memory/soft_offline_page
+Date: Sep 2009
+KernelVersion: 2.6.33
+Contact: andi@firstfloor.org
+Description:
+ Soft-offline the memory page containing the physical address
+ written into this file. Input is a hex number specifying the
+ physical address of the page. The kernel will then attempt
+ to soft-offline it, by moving the contents elsewhere or
+ dropping it if possible. The kernel will then be placed
+ on the bad page list and never be reused.
+
+ The offlining is done in kernel specific granuality.
+ Normally it's the base page size of the kernel, but
+ this might change.
+
+ The page must be still accessible, not poisoned. The
+ kernel will never kill anything for this, but rather
+ fail the offline. Return value is the size of the
+ number, or a error when the offlining failed. Reading
+ the file is not allowed.
+
+What: /sys/devices/system/memory/hard_offline_page
+Date: Sep 2009
+KernelVersion: 2.6.33
+Contact: andi@firstfloor.org
+Description:
+ Hard-offline the memory page containing the physical
+ address written into this file. Input is a hex number
+ specifying the physical address of the page. The
+ kernel will then attempt to hard-offline the page, by
+ trying to drop the page or killing any owner or
+ triggering IO errors if needed. Note this may kill
+ any processes owning the page. The kernel will avoid
+ to access this page assuming it's poisoned by the
+ hardware.
+
+ The offlining is done in kernel specific granuality.
+ Normally it's the base page size of the kernel, but
+ this might change.
+
+ Return value is the size of the number, or a error when
+ the offlining failed.
+ Reading the file is not allowed.
diff --git a/Documentation/Changes b/Documentation/Changes
index 6d0f1efc5bf..f08b313cd23 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -49,6 +49,8 @@ o oprofile 0.9 # oprofiled --version
o udev 081 # udevinfo -V
o grub 0.93 # grub --version
o mcelog 0.6
+o iptables 1.4.1 # iptables -V
+
Kernel compilation
==================
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index ab8300f6718..325cfd1d6d9 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -8,7 +8,7 @@
DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
kernel-hacking.xml kernel-locking.xml deviceiobook.xml \
- procfs-guide.xml writing_usb_driver.xml networking.xml \
+ writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
@@ -32,10 +32,10 @@ PS_METHOD = $(prefer-db2x)
###
# The targets that may be used.
-PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs media
+PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs xmldoclinks
BOOKS := $(addprefix $(obj)/,$(DOCBOOKS))
-xmldocs: $(BOOKS)
+xmldocs: $(BOOKS) xmldoclinks
sgmldocs: xmldocs
PS := $(patsubst %.xml, %.ps, $(BOOKS))
@@ -45,15 +45,24 @@ PDF := $(patsubst %.xml, %.pdf, $(BOOKS))
pdfdocs: $(PDF)
HTML := $(sort $(patsubst %.xml, %.html, $(BOOKS)))
-htmldocs: media $(HTML)
+htmldocs: $(HTML)
$(call build_main_index)
+ $(call build_images)
MAN := $(patsubst %.xml, %.9, $(BOOKS))
mandocs: $(MAN)
-media:
- mkdir -p $(srctree)/Documentation/DocBook/media/
- cp $(srctree)/Documentation/DocBook/dvb/*.png $(srctree)/Documentation/DocBook/v4l/*.gif $(srctree)/Documentation/DocBook/media/
+build_images = mkdir -p $(objtree)/Documentation/DocBook/media/ && \
+ cp $(srctree)/Documentation/DocBook/dvb/*.png $(srctree)/Documentation/DocBook/v4l/*.gif $(objtree)/Documentation/DocBook/media/
+
+xmldoclinks:
+ifneq ($(objtree),$(srctree))
+ for dep in dvb media-entities.tmpl media-indices.tmpl v4l; do \
+ rm -f $(objtree)/Documentation/DocBook/$$dep \
+ && ln -s $(srctree)/Documentation/DocBook/$$dep $(objtree)/Documentation/DocBook/ \
+ || exit; \
+ done
+endif
installmandocs: mandocs
mkdir -p /usr/local/man/man9/
@@ -65,7 +74,7 @@ KERNELDOC = $(srctree)/scripts/kernel-doc
DOCPROC = $(objtree)/scripts/basic/docproc
XMLTOFLAGS = -m $(srctree)/Documentation/DocBook/stylesheet.xsl
-#XMLTOFLAGS += --skip-validation
+XMLTOFLAGS += --skip-validation
###
# DOCPROC is used for two purposes:
@@ -101,17 +110,6 @@ endif
# Changes in kernel-doc force a rebuild of all documentation
$(BOOKS): $(KERNELDOC)
-###
-# procfs guide uses a .c file as example code.
-# This requires an explicit dependency
-C-procfs-example = procfs_example.xml
-C-procfs-example2 = $(addprefix $(obj)/,$(C-procfs-example))
-$(obj)/procfs-guide.xml: $(C-procfs-example2)
-
-# List of programs to build
-##oops, this is a kernel module::hostprogs-y := procfs_example
-obj-m += procfs_example.o
-
# Tell kbuild to always build the programs
always := $(hostprogs-y)
@@ -238,7 +236,7 @@ clean-files := $(DOCBOOKS) \
$(patsubst %.xml, %.pdf, $(DOCBOOKS)) \
$(patsubst %.xml, %.html, $(DOCBOOKS)) \
$(patsubst %.xml, %.9, $(DOCBOOKS)) \
- $(C-procfs-example) $(index)
+ $(index)
clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index bb5ab741220..c725cb852c5 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -23,6 +23,7 @@
<!ENTITY VIDIOC-ENUMINPUT "<link linkend='vidioc-enuminput'><constant>VIDIOC_ENUMINPUT</constant></link>">
<!ENTITY VIDIOC-ENUMOUTPUT "<link linkend='vidioc-enumoutput'><constant>VIDIOC_ENUMOUTPUT</constant></link>">
<!ENTITY VIDIOC-ENUMSTD "<link linkend='vidioc-enumstd'><constant>VIDIOC_ENUMSTD</constant></link>">
+<!ENTITY VIDIOC-ENUM-DV-PRESETS "<link linkend='vidioc-enum-dv-presets'><constant>VIDIOC_ENUM_DV_PRESETS</constant></link>">
<!ENTITY VIDIOC-ENUM-FMT "<link linkend='vidioc-enum-fmt'><constant>VIDIOC_ENUM_FMT</constant></link>">
<!ENTITY VIDIOC-ENUM-FRAMEINTERVALS "<link linkend='vidioc-enum-frameintervals'><constant>VIDIOC_ENUM_FRAMEINTERVALS</constant></link>">
<!ENTITY VIDIOC-ENUM-FRAMESIZES "<link linkend='vidioc-enum-framesizes'><constant>VIDIOC_ENUM_FRAMESIZES</constant></link>">
@@ -30,6 +31,8 @@
<!ENTITY VIDIOC-G-AUDOUT "<link linkend='vidioc-g-audioout'><constant>VIDIOC_G_AUDOUT</constant></link>">
<!ENTITY VIDIOC-G-CROP "<link linkend='vidioc-g-crop'><constant>VIDIOC_G_CROP</constant></link>">
<!ENTITY VIDIOC-G-CTRL "<link linkend='vidioc-g-ctrl'><constant>VIDIOC_G_CTRL</constant></link>">
+<!ENTITY VIDIOC-G-DV-PRESET "<link linkend='vidioc-g-dv-preset'><constant>VIDIOC_G_DV_PRESET</constant></link>">
+<!ENTITY VIDIOC-G-DV-TIMINGS "<link linkend='vidioc-g-dv-timings'><constant>VIDIOC_G_DV_TIMINGS</constant></link>">
<!ENTITY VIDIOC-G-ENC-INDEX "<link linkend='vidioc-g-enc-index'><constant>VIDIOC_G_ENC_INDEX</constant></link>">
<!ENTITY VIDIOC-G-EXT-CTRLS "<link linkend='vidioc-g-ext-ctrls'><constant>VIDIOC_G_EXT_CTRLS</constant></link>">
<!ENTITY VIDIOC-G-FBUF "<link linkend='vidioc-g-fbuf'><constant>VIDIOC_G_FBUF</constant></link>">
@@ -53,6 +56,7 @@
<!ENTITY VIDIOC-QUERYCTRL "<link linkend='vidioc-queryctrl'><constant>VIDIOC_QUERYCTRL</constant></link>">
<!ENTITY VIDIOC-QUERYMENU "<link linkend='vidioc-queryctrl'><constant>VIDIOC_QUERYMENU</constant></link>">
<!ENTITY VIDIOC-QUERYSTD "<link linkend='vidioc-querystd'><constant>VIDIOC_QUERYSTD</constant></link>">
+<!ENTITY VIDIOC-QUERY-DV-PRESET "<link linkend='vidioc-query-dv-preset'><constant>VIDIOC_QUERY_DV_PRESET</constant></link>">
<!ENTITY VIDIOC-REQBUFS "<link linkend='vidioc-reqbufs'><constant>VIDIOC_REQBUFS</constant></link>">
<!ENTITY VIDIOC-STREAMOFF "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMOFF</constant></link>">
<!ENTITY VIDIOC-STREAMON "<link linkend='vidioc-streamon'><constant>VIDIOC_STREAMON</constant></link>">
@@ -60,6 +64,8 @@
<!ENTITY VIDIOC-S-AUDOUT "<link linkend='vidioc-g-audioout'><constant>VIDIOC_S_AUDOUT</constant></link>">
<!ENTITY VIDIOC-S-CROP "<link linkend='vidioc-g-crop'><constant>VIDIOC_S_CROP</constant></link>">
<!ENTITY VIDIOC-S-CTRL "<link linkend='vidioc-g-ctrl'><constant>VIDIOC_S_CTRL</constant></link>">
+<!ENTITY VIDIOC-S-DV-PRESET "<link linkend='vidioc-g-dv-preset'><constant>VIDIOC_S_DV_PRESET</constant></link>">
+<!ENTITY VIDIOC-S-DV-TIMINGS "<link linkend='vidioc-g-dv-timings'><constant>VIDIOC_S_DV_TIMINGS</constant></link>">
<!ENTITY VIDIOC-S-EXT-CTRLS "<link linkend='vidioc-g-ext-ctrls'><constant>VIDIOC_S_EXT_CTRLS</constant></link>">
<!ENTITY VIDIOC-S-FBUF "<link linkend='vidioc-g-fbuf'><constant>VIDIOC_S_FBUF</constant></link>">
<!ENTITY VIDIOC-S-FMT "<link linkend='vidioc-g-fmt'><constant>VIDIOC_S_FMT</constant></link>">
@@ -118,6 +124,7 @@
<!-- Structures -->
<!ENTITY v4l2-audio "struct&nbsp;<link linkend='v4l2-audio'>v4l2_audio</link>">
<!ENTITY v4l2-audioout "struct&nbsp;<link linkend='v4l2-audioout'>v4l2_audioout</link>">
+<!ENTITY v4l2-bt-timings "struct&nbsp;<link linkend='v4l2-bt-timings'>v4l2_bt_timings</link>">
<!ENTITY v4l2-buffer "struct&nbsp;<link linkend='v4l2-buffer'>v4l2_buffer</link>">
<!ENTITY v4l2-capability "struct&nbsp;<link linkend='v4l2-capability'>v4l2_capability</link>">
<!ENTITY v4l2-captureparm "struct&nbsp;<link linkend='v4l2-captureparm'>v4l2_captureparm</link>">
@@ -128,6 +135,9 @@
<!ENTITY v4l2-dbg-chip-ident "struct&nbsp;<link linkend='v4l2-dbg-chip-ident'>v4l2_dbg_chip_ident</link>">
<!ENTITY v4l2-dbg-match "struct&nbsp;<link linkend='v4l2-dbg-match'>v4l2_dbg_match</link>">
<!ENTITY v4l2-dbg-register "struct&nbsp;<link linkend='v4l2-dbg-register'>v4l2_dbg_register</link>">
+<!ENTITY v4l2-dv-enum-preset "struct&nbsp;<link linkend='v4l2-dv-enum-preset'>v4l2_dv_enum_preset</link>">
+<!ENTITY v4l2-dv-preset "struct&nbsp;<link linkend='v4l2-dv-preset'>v4l2_dv_preset</link>">
+<!ENTITY v4l2-dv-timings "struct&nbsp;<link linkend='v4l2-dv-timings'>v4l2_dv_timings</link>">
<!ENTITY v4l2-enc-idx "struct&nbsp;<link linkend='v4l2-enc-idx'>v4l2_enc_idx</link>">
<!ENTITY v4l2-enc-idx-entry "struct&nbsp;<link linkend='v4l2-enc-idx-entry'>v4l2_enc_idx_entry</link>">
<!ENTITY v4l2-encoder-cmd "struct&nbsp;<link linkend='v4l2-encoder-cmd'>v4l2_encoder_cmd</link>">
@@ -243,6 +253,10 @@
<!ENTITY sub-enumaudioout SYSTEM "v4l/vidioc-enumaudioout.xml">
<!ENTITY sub-enuminput SYSTEM "v4l/vidioc-enuminput.xml">
<!ENTITY sub-enumoutput SYSTEM "v4l/vidioc-enumoutput.xml">
+<!ENTITY sub-enum-dv-presets SYSTEM "v4l/vidioc-enum-dv-presets.xml">
+<!ENTITY sub-g-dv-preset SYSTEM "v4l/vidioc-g-dv-preset.xml">
+<!ENTITY sub-query-dv-preset SYSTEM "v4l/vidioc-query-dv-preset.xml">
+<!ENTITY sub-g-dv-timings SYSTEM "v4l/vidioc-g-dv-timings.xml">
<!ENTITY sub-enumstd SYSTEM "v4l/vidioc-enumstd.xml">
<!ENTITY sub-g-audio SYSTEM "v4l/vidioc-g-audio.xml">
<!ENTITY sub-g-audioout SYSTEM "v4l/vidioc-g-audioout.xml">
@@ -333,6 +347,10 @@
<!ENTITY enumaudioout SYSTEM "v4l/vidioc-enumaudioout.xml">
<!ENTITY enuminput SYSTEM "v4l/vidioc-enuminput.xml">
<!ENTITY enumoutput SYSTEM "v4l/vidioc-enumoutput.xml">
+<!ENTITY enum-dv-presets SYSTEM "v4l/vidioc-enum-dv-presets.xml">
+<!ENTITY g-dv-preset SYSTEM "v4l/vidioc-g-dv-preset.xml">
+<!ENTITY query-dv-preset SYSTEM "v4l/vidioc-query-dv-preset.xml">
+<!ENTITY g-dv-timings SYSTEM "v4l/vidioc-g-dv-timings.xml">
<!ENTITY enumstd SYSTEM "v4l/vidioc-enumstd.xml">
<!ENTITY g-audio SYSTEM "v4l/vidioc-g-audio.xml">
<!ENTITY g-audioout SYSTEM "v4l/vidioc-g-audioout.xml">
diff --git a/Documentation/DocBook/media-indices.tmpl b/Documentation/DocBook/media-indices.tmpl
index 9e30a236d74..78d6031de00 100644
--- a/Documentation/DocBook/media-indices.tmpl
+++ b/Documentation/DocBook/media-indices.tmpl
@@ -36,6 +36,7 @@
<indexentry><primaryie>enum&nbsp;<link linkend='v4l2-preemphasis'>v4l2_preemphasis</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-audio'>v4l2_audio</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-audioout'>v4l2_audioout</link></primaryie></indexentry>
+<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-bt-timings'>v4l2_bt_timings</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-buffer'>v4l2_buffer</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-capability'>v4l2_capability</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-captureparm'>v4l2_captureparm</link></primaryie></indexentry>
@@ -46,6 +47,9 @@
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-chip-ident'>v4l2_dbg_chip_ident</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-match'>v4l2_dbg_match</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dbg-register'>v4l2_dbg_register</link></primaryie></indexentry>
+<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dv-enum-preset'>v4l2_dv_enum_preset</link></primaryie></indexentry>
+<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dv-preset'>v4l2_dv_preset</link></primaryie></indexentry>
+<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-dv-timings'>v4l2_dv_timings</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-enc-idx'>v4l2_enc_idx</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-enc-idx-entry'>v4l2_enc_idx_entry</link></primaryie></indexentry>
<indexentry><primaryie>struct&nbsp;<link linkend='v4l2-encoder-cmd'>v4l2_encoder_cmd</link></primaryie></indexentry>
diff --git a/Documentation/DocBook/procfs-guide.tmpl b/Documentation/DocBook/procfs-guide.tmpl
deleted file mode 100644
index 9eba4b7af73..00000000000
--- a/Documentation/DocBook/procfs-guide.tmpl
+++ /dev/null
@@ -1,626 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
-<!ENTITY procfsexample SYSTEM "procfs_example.xml">
-]>
-
-<book id="LKProcfsGuide">
- <bookinfo>
- <title>Linux Kernel Procfs Guide</title>
-
- <authorgroup>
- <author>
- <firstname>Erik</firstname>
- <othername>(J.A.K.)</othername>
- <surname>Mouw</surname>
- <affiliation>
- <address>
- <email>mouw@nl.linux.org</email>
- </address>
- </affiliation>
- </author>
- <othercredit>
- <contrib>
- This software and documentation were written while working on the
- LART computing board
- (<ulink url="http://www.lartmaker.nl/">http://www.lartmaker.nl/</ulink>),
- which was sponsored by the Delt University of Technology projects
- Mobile Multi-media Communications and Ubiquitous Communications.
- </contrib>
- </othercredit>
- </authorgroup>
-
- <revhistory>
- <revision>
- <revnumber>1.0</revnumber>
- <date>May 30, 2001</date>
- <revremark>Initial revision posted to linux-kernel</revremark>
- </revision>
- <revision>
- <revnumber>1.1</revnumber>
- <date>June 3, 2001</date>
- <revremark>Revised after comments from linux-kernel</revremark>
- </revision>
- </revhistory>
-
- <copyright>
- <year>2001</year>
- <holder>Erik Mouw</holder>
- </copyright>
-
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute it
- and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2 of the License, or (at your option) any later
- version.
- </para>
-
- <para>
- This documentation is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- PURPOSE. See the GNU General Public License for more details.
- </para>
-
- <para>
- You should have received a copy of the GNU General Public
- License along with this program; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
-
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
- </bookinfo>
-
-
-
-
- <toc>
- </toc>
-
-
-
-
- <preface id="Preface">
- <title>Preface</title>
-
- <para>
- This guide describes the use of the procfs file system from
- within the Linux kernel. The idea to write this guide came up on
- the #kernelnewbies IRC channel (see <ulink
- url="http://www.kernelnewbies.org/">http://www.kernelnewbies.org/</ulink>),
- when Jeff Garzik explained the use of procfs and forwarded me a
- message Alexander Viro wrote to the linux-kernel mailing list. I
- agreed to write it up nicely, so here it is.
- </para>
-
- <para>
- I'd like to thank Jeff Garzik
- <email>jgarzik@pobox.com</email> and Alexander Viro
- <email>viro@parcelfarce.linux.theplanet.co.uk</email> for their input,
- Tim Waugh <email>twaugh@redhat.com</email> for his <ulink
- url="http://people.redhat.com/twaugh/docbook/selfdocbook/">Selfdocbook</ulink>,
- and Marc Joosen <email>marcj@historia.et.tudelft.nl</email> for
- proofreading.
- </para>
-
- <para>
- Erik
- </para>
- </preface>
-
-
-
-
- <chapter id="intro">
- <title>Introduction</title>
-
- <para>
- The <filename class="directory">/proc</filename> file system
- (procfs) is a special file system in the linux kernel. It's a
- virtual file system: it is not associated with a block device
- but exists only in memory. The files in the procfs are there to
- allow userland programs access to certain information from the
- kernel (like process information in <filename
- class="directory">/proc/[0-9]+/</filename>), but also for debug
- purposes (like <filename>/proc/ksyms</filename>).
- </para>
-
- <para>
- This guide describes the use of the procfs file system from
- within the Linux kernel. It starts by introducing all relevant
- functions to manage the files within the file system. After that
- it shows how to communicate with userland, and some tips and
- tricks will be pointed out. Finally a complete example will be
- shown.
- </para>
-
- <para>
- Note that the files in <filename
- class="directory">/proc/sys</filename> are sysctl files: they
- don't belong to procfs and are governed by a completely
- different API described in the Kernel API book.
- </para>
- </chapter>
-
-
-
-
- <chapter id="managing">
- <title>Managing procfs entries</title>
-
- <para>
- This chapter describes the functions that various kernel
- components use to populate the procfs with files, symlinks,
- device nodes, and directories.
- </para>
-
- <para>
- A minor note before we start: if you want to use any of the
- procfs functions, be sure to include the correct header file!
- This should be one of the first lines in your code:
- </para>
-
- <programlisting>
-#include &lt;linux/proc_fs.h&gt;
- </programlisting>
-
-
-
-
- <sect1 id="regularfile">
- <title>Creating a regular file</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>struct proc_dir_entry* <function>create_proc_entry</function></funcdef>
- <paramdef>const char* <parameter>name</parameter></paramdef>
- <paramdef>mode_t <parameter>mode</parameter></paramdef>
- <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- This function creates a regular file with the name
- <parameter>name</parameter>, file mode
- <parameter>mode</parameter> in the directory
- <parameter>parent</parameter>. To create a file in the root of
- the procfs, use <constant>NULL</constant> as
- <parameter>parent</parameter> parameter. When successful, the
- function will return a pointer to the freshly created
- <structname>struct proc_dir_entry</structname>; otherwise it
- will return <constant>NULL</constant>. <xref
- linkend="userland"/> describes how to do something useful with
- regular files.
- </para>
-
- <para>
- Note that it is specifically supported that you can pass a
- path that spans multiple directories. For example
- <function>create_proc_entry</function>(<parameter>"drivers/via0/info"</parameter>)
- will create the <filename class="directory">via0</filename>
- directory if necessary, with standard
- <constant>0755</constant> permissions.
- </para>
-
- <para>
- If you only want to be able to read the file, the function
- <function>create_proc_read_entry</function> described in <xref
- linkend="convenience"/> may be used to create and initialise
- the procfs entry in one single call.
- </para>
- </sect1>
-
-
-
-
- <sect1 id="Creating_a_symlink">
- <title>Creating a symlink</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>struct proc_dir_entry*
- <function>proc_symlink</function></funcdef> <paramdef>const
- char* <parameter>name</parameter></paramdef>
- <paramdef>struct proc_dir_entry*
- <parameter>parent</parameter></paramdef> <paramdef>const
- char* <parameter>dest</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- This creates a symlink in the procfs directory
- <parameter>parent</parameter> that points from
- <parameter>name</parameter> to
- <parameter>dest</parameter>. This translates in userland to
- <literal>ln -s</literal> <parameter>dest</parameter>
- <parameter>name</parameter>.
- </para>
- </sect1>
-
- <sect1 id="Creating_a_directory">
- <title>Creating a directory</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>struct proc_dir_entry* <function>proc_mkdir</function></funcdef>
- <paramdef>const char* <parameter>name</parameter></paramdef>
- <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- Create a directory <parameter>name</parameter> in the procfs
- directory <parameter>parent</parameter>.
- </para>
- </sect1>
-
-
-
-
- <sect1 id="Removing_an_entry">
- <title>Removing an entry</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>void <function>remove_proc_entry</function></funcdef>
- <paramdef>const char* <parameter>name</parameter></paramdef>
- <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- Removes the entry <parameter>name</parameter> in the directory
- <parameter>parent</parameter> from the procfs. Entries are
- removed by their <emphasis>name</emphasis>, not by the
- <structname>struct proc_dir_entry</structname> returned by the
- various create functions. Note that this function doesn't
- recursively remove entries.
- </para>
-
- <para>
- Be sure to free the <structfield>data</structfield> entry from
- the <structname>struct proc_dir_entry</structname> before
- <function>remove_proc_entry</function> is called (that is: if
- there was some <structfield>data</structfield> allocated, of
- course). See <xref linkend="usingdata"/> for more information
- on using the <structfield>data</structfield> entry.
- </para>
- </sect1>
- </chapter>
-
-
-
-
- <chapter id="userland">
- <title>Communicating with userland</title>
-
- <para>
- Instead of reading (or writing) information directly from
- kernel memory, procfs works with <emphasis>call back
- functions</emphasis> for files: functions that are called when
- a specific file is being read or written. Such functions have
- to be initialised after the procfs file is created by setting
- the <structfield>read_proc</structfield> and/or
- <structfield>write_proc</structfield> fields in the
- <structname>struct proc_dir_entry*</structname> that the
- function <function>create_proc_entry</function> returned:
- </para>
-
- <programlisting>
-struct proc_dir_entry* entry;
-
-entry->read_proc = read_proc_foo;
-entry->write_proc = write_proc_foo;
- </programlisting>
-
- <para>
- If you only want to use a the
- <structfield>read_proc</structfield>, the function
- <function>create_proc_read_entry</function> described in <xref
- linkend="convenience"/> may be used to create and initialise the
- procfs entry in one single call.
- </para>
-
-
-
- <sect1 id="Reading_data">
- <title>Reading data</title>
-
- <para>
- The read function is a call back function that allows userland
- processes to read data from the kernel. The read function
- should have the following format:
- </para>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>read_func</function></funcdef>
- <paramdef>char* <parameter>buffer</parameter></paramdef>
- <paramdef>char** <parameter>start</parameter></paramdef>
- <paramdef>off_t <parameter>off</parameter></paramdef>
- <paramdef>int <parameter>count</parameter></paramdef>
- <paramdef>int* <parameter>peof</parameter></paramdef>
- <paramdef>void* <parameter>data</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- The read function should write its information into the
- <parameter>buffer</parameter>, which will be exactly
- <literal>PAGE_SIZE</literal> bytes long.
- </para>
-
- <para>
- The parameter
- <parameter>peof</parameter> should be used to signal that the
- end of the file has been reached by writing
- <literal>1</literal> to the memory location
- <parameter>peof</parameter> points to.
- </para>
-
- <para>
- The <parameter>data</parameter>
- parameter can be used to create a single call back function for
- several files, see <xref linkend="usingdata"/>.
- </para>
-
- <para>
- The rest of the parameters and the return value are described
- by a comment in <filename>fs/proc/generic.c</filename> as follows:
- </para>
-
- <blockquote>
- <para>
- You have three ways to return data:
- </para>
- <orderedlist>
- <listitem>
- <para>
- Leave <literal>*start = NULL</literal>. (This is the default.)
- Put the data of the requested offset at that
- offset within the buffer. Return the number (<literal>n</literal>)
- of bytes there are from the beginning of the
- buffer up to the last byte of data. If the
- number of supplied bytes (<literal>= n - offset</literal>) is
- greater than zero and you didn't signal eof
- and the reader is prepared to take more data
- you will be called again with the requested
- offset advanced by the number of bytes
- absorbed. This interface is useful for files
- no larger than the buffer.
- </para>
- </listitem>
- <listitem>
- <para>
- Set <literal>*start</literal> to an unsigned long value less than
- the buffer address but greater than zero.
- Put the data of the requested offset at the
- beginning of the buffer. Return the number of
- bytes of data placed there. If this number is
- greater than zero and you didn't signal eof
- and the reader is prepared to take more data
- you will be called again with the requested
- offset advanced by <literal>*start</literal>. This interface is
- useful when you have a large file consisting
- of a series of blocks which you want to count
- and return as wholes.
- (Hack by Paul.Russell@rustcorp.com.au)
- </para>
- </listitem>
- <listitem>
- <para>
- Set <literal>*start</literal> to an address within the buffer.
- Put the data of the requested offset at <literal>*start</literal>.
- Return the number of bytes of data placed there.
- If this number is greater than zero and you
- didn't signal eof and the reader is prepared to
- take more data you will be called again with the
- requested offset advanced by the number of bytes
- absorbed.
- </para>
- </listitem>
- </orderedlist>
- </blockquote>
-
- <para>
- <xref linkend="example"/> shows how to use a read call back
- function.
- </para>
- </sect1>
-
-
-
-
- <sect1 id="Writing_data">
- <title>Writing data</title>
-
- <para>
- The write call back function allows a userland process to write
- data to the kernel, so it has some kind of control over the
- kernel. The write function should have the following format:
- </para>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>int <function>write_func</function></funcdef>
- <paramdef>struct file* <parameter>file</parameter></paramdef>
- <paramdef>const char* <parameter>buffer</parameter></paramdef>
- <paramdef>unsigned long <parameter>count</parameter></paramdef>
- <paramdef>void* <parameter>data</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- The write function should read <parameter>count</parameter>
- bytes at maximum from the <parameter>buffer</parameter>. Note
- that the <parameter>buffer</parameter> doesn't live in the
- kernel's memory space, so it should first be copied to kernel
- space with <function>copy_from_user</function>. The
- <parameter>file</parameter> parameter is usually
- ignored. <xref linkend="usingdata"/> shows how to use the
- <parameter>data</parameter> parameter.
- </para>
-
- <para>
- Again, <xref linkend="example"/> shows how to use this call back
- function.
- </para>
- </sect1>
-
-
-
-
- <sect1 id="usingdata">
- <title>A single call back for many files</title>
-
- <para>
- When a large number of almost identical files is used, it's
- quite inconvenient to use a separate call back function for
- each file. A better approach is to have a single call back
- function that distinguishes between the files by using the
- <structfield>data</structfield> field in <structname>struct
- proc_dir_entry</structname>. First of all, the
- <structfield>data</structfield> field has to be initialised:
- </para>
-
- <programlisting>
-struct proc_dir_entry* entry;
-struct my_file_data *file_data;
-
-file_data = kmalloc(sizeof(struct my_file_data), GFP_KERNEL);
-entry->data = file_data;
- </programlisting>
-
- <para>
- The <structfield>data</structfield> field is a <type>void
- *</type>, so it can be initialised with anything.
- </para>
-
- <para>
- Now that the <structfield>data</structfield> field is set, the
- <function>read_proc</function> and
- <function>write_proc</function> can use it to distinguish
- between files because they get it passed into their
- <parameter>data</parameter> parameter:
- </para>
-
- <programlisting>
-int foo_read_func(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len;
-
- if(data == file_data) {
- /* special case for this file */
- } else {
- /* normal processing */
- }
-
- return len;
-}
- </programlisting>
-
- <para>
- Be sure to free the <structfield>data</structfield> data field
- when removing the procfs entry.
- </para>
- </sect1>
- </chapter>
-
-
-
-
- <chapter id="tips">
- <title>Tips and tricks</title>
-
-
-
-
- <sect1 id="convenience">
- <title>Convenience functions</title>
-
- <funcsynopsis>
- <funcprototype>
- <funcdef>struct proc_dir_entry* <function>create_proc_read_entry</function></funcdef>
- <paramdef>const char* <parameter>name</parameter></paramdef>
- <paramdef>mode_t <parameter>mode</parameter></paramdef>
- <paramdef>struct proc_dir_entry* <parameter>parent</parameter></paramdef>
- <paramdef>read_proc_t* <parameter>read_proc</parameter></paramdef>
- <paramdef>void* <parameter>data</parameter></paramdef>
- </funcprototype>
- </funcsynopsis>
-
- <para>
- This function creates a regular file in exactly the same way
- as <function>create_proc_entry</function> from <xref
- linkend="regularfile"/> does, but also allows to set the read
- function <parameter>read_proc</parameter> in one call. This
- function can set the <parameter>data</parameter> as well, like
- explained in <xref linkend="usingdata"/>.
- </para>
- </sect1>
-
-
-
- <sect1 id="Modules">
- <title>Modules</title>
-
- <para>
- If procfs is being used from within a module, be sure to set
- the <structfield>owner</structfield> field in the
- <structname>struct proc_dir_entry</structname> to
- <constant>THIS_MODULE</constant>.
- </para>
-
- <programlisting>
-struct proc_dir_entry* entry;
-
-entry->owner = THIS_MODULE;
- </programlisting>
- </sect1>
-
-
-
-
- <sect1 id="Mode_and_ownership">
- <title>Mode and ownership</title>
-
- <para>
- Sometimes it is useful to change the mode and/or ownership of
- a procfs entry. Here is an example that shows how to achieve
- that:
- </para>
-
- <programlisting>
-struct proc_dir_entry* entry;
-
-entry->mode = S_IWUSR |S_IRUSR | S_IRGRP | S_IROTH;
-entry->uid = 0;
-entry->gid = 100;
- </programlisting>
-
- </sect1>
- </chapter>
-
-
-
-
- <chapter id="example">
- <title>Example</title>
-
- <!-- be careful with the example code: it shouldn't be wider than
- approx. 60 columns, or otherwise it won't fit properly on a page
- -->
-
-&procfsexample;
-
- </chapter>
-</book>
diff --git a/Documentation/DocBook/procfs_example.c b/Documentation/DocBook/procfs_example.c
deleted file mode 100644
index a5b11793b1e..00000000000
--- a/Documentation/DocBook/procfs_example.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * procfs_example.c: an example proc interface
- *
- * Copyright (C) 2001, Erik Mouw (mouw@nl.linux.org)
- *
- * This file accompanies the procfs-guide in the Linux kernel
- * source. Its main use is to demonstrate the concepts and
- * functions described in the guide.
- *
- * This software has been developed while working on the LART
- * computing board (http://www.lartmaker.nl), which was sponsored
- * by the Delt University of Technology projects Mobile Multi-media
- * Communications and Ubiquitous Communications.
- *
- * This program is free software; you can redistribute
- * it and/or modify it under the terms of the GNU General
- * Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/jiffies.h>
-#include <asm/uaccess.h>
-
-
-#define MODULE_VERS "1.0"
-#define MODULE_NAME "procfs_example"
-
-#define FOOBAR_LEN 8
-
-struct fb_data_t {
- char name[FOOBAR_LEN + 1];
- char value[FOOBAR_LEN + 1];
-};
-
-
-static struct proc_dir_entry *example_dir, *foo_file,
- *bar_file, *jiffies_file, *symlink;
-
-
-struct fb_data_t foo_data, bar_data;
-
-
-static int proc_read_jiffies(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
-{
- int len;
-
- len = sprintf(page, "jiffies = %ld\n",
- jiffies);
-
- return len;
-}
-
-
-static int proc_read_foobar(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
-{
- int len;
- struct fb_data_t *fb_data = (struct fb_data_t *)data;
-
- /* DON'T DO THAT - buffer overruns are bad */
- len = sprintf(page, "%s = '%s'\n",
- fb_data->name, fb_data->value);
-
- return len;
-}
-
-
-static int proc_write_foobar(struct file *file,
- const char *buffer,
- unsigned long count,
- void *data)
-{
- int len;
- struct fb_data_t *fb_data = (struct fb_data_t *)data;
-
- if(count > FOOBAR_LEN)
- len = FOOBAR_LEN;
- else
- len = count;
-
- if(copy_from_user(fb_data->value, buffer, len))
- return -EFAULT;
-
- fb_data->value[len] = '\0';
-
- return len;
-}
-
-
-static int __init init_procfs_example(void)
-{
- int rv = 0;
-
- /* create directory */
- example_dir = proc_mkdir(MODULE_NAME, NULL);
- if(example_dir == NULL) {
- rv = -ENOMEM;
- goto out;
- }
- /* create jiffies using convenience function */
- jiffies_file = create_proc_read_entry("jiffies",
- 0444, example_dir,
- proc_read_jiffies,
- NULL);
- if(jiffies_file == NULL) {
- rv = -ENOMEM;
- goto no_jiffies;
- }
-
- /* create foo and bar files using same callback
- * functions
- */
- foo_file = create_proc_entry("foo", 0644, example_dir);
- if(foo_file == NULL) {
- rv = -ENOMEM;
- goto no_foo;
- }
-
- strcpy(foo_data.name, "foo");
- strcpy(foo_data.value, "foo");
- foo_file->data = &foo_data;
- foo_file->read_proc = proc_read_foobar;
- foo_file->write_proc = proc_write_foobar;
-
- bar_file = create_proc_entry("bar", 0644, example_dir);
- if(bar_file == NULL) {
- rv = -ENOMEM;
- goto no_bar;
- }
-
- strcpy(bar_data.name, "bar");
- strcpy(bar_data.value, "bar");
- bar_file->data = &bar_data;
- bar_file->read_proc = proc_read_foobar;
- bar_file->write_proc = proc_write_foobar;
-
- /* create symlink */
- symlink = proc_symlink("jiffies_too", example_dir,
- "jiffies");
- if(symlink == NULL) {
- rv = -ENOMEM;
- goto no_symlink;
- }
-
- /* everything OK */
- printk(KERN_INFO "%s %s initialised\n",
- MODULE_NAME, MODULE_VERS);
- return 0;
-
-no_symlink:
- remove_proc_entry("bar", example_dir);
-no_bar:
- remove_proc_entry("foo", example_dir);
-no_foo:
- remove_proc_entry("jiffies", example_dir);
-no_jiffies:
- remove_proc_entry(MODULE_NAME, NULL);
-out:
- return rv;
-}
-
-
-static void __exit cleanup_procfs_example(void)
-{
- remove_proc_entry("jiffies_too", example_dir);
- remove_proc_entry("bar", example_dir);
- remove_proc_entry("foo", example_dir);
- remove_proc_entry("jiffies", example_dir);
- remove_proc_entry(MODULE_NAME, NULL);
-
- printk(KERN_INFO "%s %s removed\n",
- MODULE_NAME, MODULE_VERS);
-}
-
-
-module_init(init_procfs_example);
-module_exit(cleanup_procfs_example);
-
-MODULE_AUTHOR("Erik Mouw");
-MODULE_DESCRIPTION("procfs examples");
-MODULE_LICENSE("GPL");
diff --git a/Documentation/DocBook/v4l/common.xml b/Documentation/DocBook/v4l/common.xml
index b1a81d246d5..c65f0ac9b6e 100644
--- a/Documentation/DocBook/v4l/common.xml
+++ b/Documentation/DocBook/v4l/common.xml
@@ -716,6 +716,41 @@ if (-1 == ioctl (fd, &VIDIOC-S-STD;, &amp;std_id)) {
}
</programlisting>
</example>
+ <section id="dv-timings">
+ <title>Digital Video (DV) Timings</title>
+ <para>
+ The video standards discussed so far has been dealing with Analog TV and the
+corresponding video timings. Today there are many more different hardware interfaces
+such as High Definition TV interfaces (HDMI), VGA, DVI connectors etc., that carry
+video signals and there is a need to extend the API to select the video timings
+for these interfaces. Since it is not possible to extend the &v4l2-std-id; due to
+the limited bits available, a new set of IOCTLs is added to set/get video timings at
+the input and output: </para><itemizedlist>
+ <listitem>
+ <para>DV Presets: Digital Video (DV) presets. These are IDs representing a
+video timing at the input/output. Presets are pre-defined timings implemented
+by the hardware according to video standards. A __u32 data type is used to represent
+a preset unlike the bit mask that is used in &v4l2-std-id; allowing future extensions
+to support as many different presets as needed.</para>
+ </listitem>
+ <listitem>
+ <para>Custom DV Timings: This will allow applications to define more detailed
+custom video timings for the interface. This includes parameters such as width, height,
+polarities, frontporch, backporch etc.
+ </para>
+ </listitem>
+ </itemizedlist>
+ <para>To enumerate and query the attributes of DV presets supported by a device,
+applications use the &VIDIOC-ENUM-DV-PRESETS; ioctl. To get the current DV preset,
+applications use the &VIDIOC-G-DV-PRESET; ioctl and to set a preset they use the
+&VIDIOC-S-DV-PRESET; ioctl.</para>
+ <para>To set custom DV timings for the device, applications use the
+&VIDIOC-S-DV-TIMINGS; ioctl and to get current custom DV timings they use the
+&VIDIOC-G-DV-TIMINGS; ioctl.</para>
+ <para>Applications can make use of the <xref linkend="input-capabilities" /> and
+<xref linkend="output-capabilities"/> flags to decide what ioctls are available to set the
+video timings for the device.</para>
+ </section>
</section>
&sub-controls;
diff --git a/Documentation/DocBook/v4l/compat.xml b/Documentation/DocBook/v4l/compat.xml
index 4d1902a54d6..b9dbdf9e6d2 100644
--- a/Documentation/DocBook/v4l/compat.xml
+++ b/Documentation/DocBook/v4l/compat.xml
@@ -2291,8 +2291,8 @@ was renamed to <structname id="v4l2-chip-ident-old">v4l2_chip_ident_old</structn
<listitem>
<para>New control <constant>V4L2_CID_COLORFX</constant> was added.</para>
</listitem>
- </orderedlist>
- </section>
+ </orderedlist>
+ </section>
<section>
<title>V4L2 in Linux 2.6.32</title>
<orderedlist>
@@ -2322,8 +2322,16 @@ more information.</para>
<listitem>
<para>Added Remote Controller chapter, describing the default Remote Controller mapping for media devices.</para>
</listitem>
- </orderedlist>
- </section>
+ </orderedlist>
+ </section>
+ <section>
+ <title>V4L2 in Linux 2.6.33</title>
+ <orderedlist>
+ <listitem>
+ <para>Added support for Digital Video timings in order to support HDTV receivers and transmitters.</para>
+ </listitem>
+ </orderedlist>
+ </section>
</section>
<section id="other">
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 937b4157a5d..060105af49e 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -74,6 +74,17 @@ Remote Controller chapter.</contrib>
</address>
</affiliation>
</author>
+
+ <author>
+ <firstname>Muralidharan</firstname>
+ <surname>Karicheri</surname>
+ <contrib>Documented the Digital Video timings API.</contrib>
+ <affiliation>
+ <address>
+ <email>m-karicheri2@ti.com</email>
+ </address>
+ </affiliation>
+ </author>
</authorgroup>
<copyright>
@@ -89,7 +100,7 @@ Remote Controller chapter.</contrib>
<year>2008</year>
<year>2009</year>
<holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
-Rubli, Andy Walls, Mauro Carvalho Chehab</holder>
+Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
</copyright>
<legalnotice>
<para>Except when explicitly stated as GPL, programming examples within
@@ -103,6 +114,13 @@ structs, ioctls) must be noted in more detail in the history chapter
applications. -->
<revision>
+ <revnumber>2.6.33</revnumber>
+ <date>2009-12-03</date>
+ <authorinitials>mk</authorinitials>
+ <revremark>Added documentation for the Digital Video timings API.</revremark>
+ </revision>
+
+ <revision>
<revnumber>2.6.32</revnumber>
<date>2009-08-31</date>
<authorinitials>mcc</authorinitials>
@@ -355,7 +373,7 @@ and discussions on the V4L mailing list.</revremark>
</partinfo>
<title>Video for Linux Two API Specification</title>
- <subtitle>Revision 2.6.32</subtitle>
+ <subtitle>Revision 2.6.33</subtitle>
<chapter id="common">
&sub-common;
@@ -411,6 +429,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-encoder-cmd;
&sub-enumaudio;
&sub-enumaudioout;
+ &sub-enum-dv-presets;
&sub-enum-fmt;
&sub-enum-framesizes;
&sub-enum-frameintervals;
@@ -421,6 +440,8 @@ and discussions on the V4L mailing list.</revremark>
&sub-g-audioout;
&sub-g-crop;
&sub-g-ctrl;
+ &sub-g-dv-preset;
+ &sub-g-dv-timings;
&sub-g-enc-index;
&sub-g-ext-ctrls;
&sub-g-fbuf;
@@ -441,6 +462,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-querybuf;
&sub-querycap;
&sub-queryctrl;
+ &sub-query-dv-preset;
&sub-querystd;
&sub-reqbufs;
&sub-s-hw-freq-seek;
diff --git a/Documentation/DocBook/v4l/videodev2.h.xml b/Documentation/DocBook/v4l/videodev2.h.xml
index 3e282ed9f59..06832594065 100644
--- a/Documentation/DocBook/v4l/videodev2.h.xml
+++ b/Documentation/DocBook/v4l/videodev2.h.xml
@@ -734,6 +734,99 @@ struct <link linkend="v4l2-standard">v4l2_standard</link> {
};
/*
+ * V I D E O T I M I N G S D V P R E S E T
+ */
+struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link> {
+ __u32 preset;
+ __u32 reserved[4];
+};
+
+/*
+ * D V P R E S E T S E N U M E R A T I O N
+ */
+struct <link linkend="v4l2-dv-enum-preset">v4l2_dv_enum_preset</link> {
+ __u32 index;
+ __u32 preset;
+ __u8 name[32]; /* Name of the preset timing */
+ __u32 width;
+ __u32 height;
+ __u32 reserved[4];
+};
+
+/*
+ * D V P R E S E T V A L U E S
+ */
+#define V4L2_DV_INVALID 0
+#define V4L2_DV_480P59_94 1 /* BT.1362 */
+#define V4L2_DV_576P50 2 /* BT.1362 */
+#define V4L2_DV_720P24 3 /* SMPTE 296M */
+#define V4L2_DV_720P25 4 /* SMPTE 296M */
+#define V4L2_DV_720P30 5 /* SMPTE 296M */
+#define V4L2_DV_720P50 6 /* SMPTE 296M */
+#define V4L2_DV_720P59_94 7 /* SMPTE 274M */
+#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */
+#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */
+#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */
+#define V4L2_DV_1080I25 11 /* BT.1120 */
+#define V4L2_DV_1080I50 12 /* SMPTE 296M */
+#define V4L2_DV_1080I60 13 /* SMPTE 296M */
+#define V4L2_DV_1080P24 14 /* SMPTE 296M */
+#define V4L2_DV_1080P25 15 /* SMPTE 296M */
+#define V4L2_DV_1080P30 16 /* SMPTE 296M */
+#define V4L2_DV_1080P50 17 /* BT.1120 */
+#define V4L2_DV_1080P60 18 /* BT.1120 */
+
+/*
+ * D V B T T I M I N G S
+ */
+
+/* BT.656/BT.1120 timing data */
+struct <link linkend="v4l2-bt-timings">v4l2_bt_timings</link> {
+ __u32 width; /* width in pixels */
+ __u32 height; /* height in lines */
+ __u32 interlaced; /* Interlaced or progressive */
+ __u32 polarities; /* Positive or negative polarity */
+ __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz-&gt;74250000 */
+ __u32 hfrontporch; /* Horizpontal front porch in pixels */
+ __u32 hsync; /* Horizontal Sync length in pixels */
+ __u32 hbackporch; /* Horizontal back porch in pixels */
+ __u32 vfrontporch; /* Vertical front porch in pixels */
+ __u32 vsync; /* Vertical Sync length in lines */
+ __u32 vbackporch; /* Vertical back porch in lines */
+ __u32 il_vfrontporch; /* Vertical front porch for bottom field of
+ * interlaced field formats
+ */
+ __u32 il_vsync; /* Vertical sync length for bottom field of
+ * interlaced field formats
+ */
+ __u32 il_vbackporch; /* Vertical back porch for bottom field of
+ * interlaced field formats
+ */
+ __u32 reserved[16];
+} __attribute__ ((packed));
+
+/* Interlaced or progressive format */
+#define V4L2_DV_PROGRESSIVE 0
+#define V4L2_DV_INTERLACED 1
+
+/* Polarities. If bit is not set, it is assumed to be negative polarity */
+#define V4L2_DV_VSYNC_POS_POL 0x00000001
+#define V4L2_DV_HSYNC_POS_POL 0x00000002
+
+
+/* DV timings */
+struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link> {
+ __u32 type;
+ union {
+ struct <link linkend="v4l2-bt-timings">v4l2_bt_timings</link> bt;
+ __u32 reserved[32];
+ };
+} __attribute__ ((packed));
+
+/* Values for the type field */
+#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
+
+/*
* V I D E O I N P U T S
*/
struct <link linkend="v4l2-input">v4l2_input</link> {
@@ -744,7 +837,8 @@ struct <link linkend="v4l2-input">v4l2_input</link> {
__u32 tuner; /* Associated tuner */
v4l2_std_id std;
__u32 status;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
/* Values for the 'type' field */
@@ -775,6 +869,11 @@ struct <link linkend="v4l2-input">v4l2_input</link> {
#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
+/* capabilities flags */
+#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
+#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
+
/*
* V I D E O O U T P U T S
*/
@@ -785,13 +884,19 @@ struct <link linkend="v4l2-output">v4l2_output</link> {
__u32 audioset; /* Associated audios (bitfield) */
__u32 modulator; /* Associated modulator */
v4l2_std_id std;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
/* Values for the 'type' field */
#define V4L2_OUTPUT_TYPE_MODULATOR 1
#define V4L2_OUTPUT_TYPE_ANALOG 2
#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
+/* capabilities flags */
+#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
+#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
+
/*
* C O N T R O L S
*/
@@ -1626,6 +1731,13 @@ struct <link linkend="v4l2-dbg-chip-ident">v4l2_dbg_chip_ident</link> {
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct <link linkend="v4l2-hw-freq-seek">v4l2_hw_freq_seek</link>)
+#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct <link linkend="v4l2-dv-enum-preset">v4l2_dv_enum_preset</link>)
+#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link>)
+#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link>)
+#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct <link linkend="v4l2-dv-preset">v4l2_dv_preset</link>)
+#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link>)
+#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct <link linkend="v4l2-dv-timings">v4l2_dv_timings</link>)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml b/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml
new file mode 100644
index 00000000000..1d31427edd1
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-enum-dv-presets.xml
@@ -0,0 +1,238 @@
+<refentry id="vidioc-enum-dv-presets">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_ENUM_DV_PRESETS</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_ENUM_DV_PRESETS</refname>
+ <refpurpose>Enumerate supported Digital Video presets</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_dv_enum_preset *<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_ENUM_DV_PRESETS</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <para>To query the attributes of a DV preset, applications initialize the
+<structfield>index</structfield> field and zero the reserved array of &v4l2-dv-enum-preset;
+and call the <constant>VIDIOC_ENUM_DV_PRESETS</constant> ioctl with a pointer to this
+structure. Drivers fill the rest of the structure or return an
+&EINVAL; when the index is out of bounds. To enumerate all DV Presets supported,
+applications shall begin at index zero, incrementing by one until the
+driver returns <errorcode>EINVAL</errorcode>. Drivers may enumerate a
+different set of DV presets after switching the video input or
+output.</para>
+
+ <table pgwide="1" frame="none" id="v4l2-dv-enum-preset">
+ <title>struct <structname>v4l2_dv_enum_presets</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>index</structfield></entry>
+ <entry>Number of the DV preset, set by the
+application.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>preset</structfield></entry>
+ <entry>This field identifies one of the DV preset values listed in <xref linkend="v4l2-dv-presets-vals"/>.</entry>
+ </row>
+ <row>
+ <entry>__u8</entry>
+ <entry><structfield>name</structfield>[24]</entry>
+ <entry>Name of the preset, a NUL-terminated ASCII string, for example: "720P-60", "1080I-60". This information is
+intended for the user.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>width</structfield></entry>
+ <entry>Width of the active video in pixels for the DV preset.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>height</structfield></entry>
+ <entry>Height of the active video in lines for the DV preset.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[4]</entry>
+ <entry>Reserved for future extensions. Drivers must set the array to zero.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="v4l2-dv-presets-vals">
+ <title>struct <structname>DV Presets</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>Preset</entry>
+ <entry>Preset value</entry>
+ <entry>Description</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_INVALID</entry>
+ <entry>0</entry>
+ <entry>Invalid preset value.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_480P59_94</entry>
+ <entry>1</entry>
+ <entry>720x480 progressive video at 59.94 fps as per BT.1362.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_576P50</entry>
+ <entry>2</entry>
+ <entry>720x576 progressive video at 50 fps as per BT.1362.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_720P24</entry>
+ <entry>3</entry>
+ <entry>1280x720 progressive video at 24 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_720P25</entry>
+ <entry>4</entry>
+ <entry>1280x720 progressive video at 25 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_720P30</entry>
+ <entry>5</entry>
+ <entry>1280x720 progressive video at 30 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_720P50</entry>
+ <entry>6</entry>
+ <entry>1280x720 progressive video at 50 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_720P59_94</entry>
+ <entry>7</entry>
+ <entry>1280x720 progressive video at 59.94 fps as per SMPTE 274M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_720P60</entry>
+ <entry>8</entry>
+ <entry>1280x720 progressive video at 60 fps as per SMPTE 274M/296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080I29_97</entry>
+ <entry>9</entry>
+ <entry>1920x1080 interlaced video at 29.97 fps as per BT.1120/SMPTE 274M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080I30</entry>
+ <entry>10</entry>
+ <entry>1920x1080 interlaced video at 30 fps as per BT.1120/SMPTE 274M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080I25</entry>
+ <entry>11</entry>
+ <entry>1920x1080 interlaced video at 25 fps as per BT.1120.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080I50</entry>
+ <entry>12</entry>
+ <entry>1920x1080 interlaced video at 50 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080I60</entry>
+ <entry>13</entry>
+ <entry>1920x1080 interlaced video at 60 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080P24</entry>
+ <entry>14</entry>
+ <entry>1920x1080 progressive video at 24 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080P25</entry>
+ <entry>15</entry>
+ <entry>1920x1080 progressive video at 25 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080P30</entry>
+ <entry>16</entry>
+ <entry>1920x1080 progressive video at 30 fps as per SMPTE 296M.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080P50</entry>
+ <entry>17</entry>
+ <entry>1920x1080 progressive video at 50 fps as per BT.1120.</entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_1080P60</entry>
+ <entry>18</entry>
+ <entry>1920x1080 progressive video at 60 fps as per BT.1120.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>EINVAL</errorcode></term>
+ <listitem>
+ <para>The &v4l2-dv-enum-preset; <structfield>index</structfield>
+is out of bounds.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+</refentry>
+
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/v4l/vidioc-enuminput.xml b/Documentation/DocBook/v4l/vidioc-enuminput.xml
index 414856b8247..71b868e2fb8 100644
--- a/Documentation/DocBook/v4l/vidioc-enuminput.xml
+++ b/Documentation/DocBook/v4l/vidioc-enuminput.xml
@@ -124,7 +124,13 @@ current input.</entry>
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
+ <entry><structfield>capabilities</structfield></entry>
+ <entry>This field provides capabilities for the
+input. See <xref linkend="input-capabilities" /> for flags.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[3]</entry>
<entry>Reserved for future extensions. Drivers must set
the array to zero.</entry>
</row>
@@ -261,6 +267,34 @@ flag is set Macrovision has been detected.</entry>
</tbody>
</tgroup>
</table>
+
+ <!-- Capability flags based on video timings RFC by Muralidharan
+Karicheri, titled RFC (v1.2): V4L - Support for video timings at the
+input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
+ -->
+ <table frame="none" pgwide="1" id="input-capabilities">
+ <title>Input capabilities</title>
+ <tgroup cols="3">
+ &cs-def;
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_IN_CAP_PRESETS</constant></entry>
+ <entry>0x00000001</entry>
+ <entry>This input supports setting DV presets by using VIDIOC_S_DV_PRESET.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_OUT_CAP_CUSTOM_TIMINGS</constant></entry>
+ <entry>0x00000002</entry>
+ <entry>This input supports setting custom video timings by using VIDIOC_S_DV_TIMINGS.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_IN_CAP_STD</constant></entry>
+ <entry>0x00000004</entry>
+ <entry>This input supports setting the TV standard by using VIDIOC_S_STD.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
</refsect1>
<refsect1>
diff --git a/Documentation/DocBook/v4l/vidioc-enumoutput.xml b/Documentation/DocBook/v4l/vidioc-enumoutput.xml
index e8d16dcd50c..a281d26a195 100644
--- a/Documentation/DocBook/v4l/vidioc-enumoutput.xml
+++ b/Documentation/DocBook/v4l/vidioc-enumoutput.xml
@@ -114,7 +114,13 @@ details on video standards and how to switch see <xref
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[4]</entry>
+ <entry><structfield>capabilities</structfield></entry>
+ <entry>This field provides capabilities for the
+output. See <xref linkend="output-capabilities" /> for flags.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[3]</entry>
<entry>Reserved for future extensions. Drivers must set
the array to zero.</entry>
</row>
@@ -147,6 +153,34 @@ CVBS, S-Video, RGB.</entry>
</tgroup>
</table>
+ <!-- Capabilities flags based on video timings RFC by Muralidharan
+Karicheri, titled RFC (v1.2): V4L - Support for video timings at the
+input/output interface to linux-media@vger.kernel.org on 19 Oct 2009.
+ -->
+ <table frame="none" pgwide="1" id="output-capabilities">
+ <title>Output capabilities</title>
+ <tgroup cols="3">
+ &cs-def;
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_OUT_CAP_PRESETS</constant></entry>
+ <entry>0x00000001</entry>
+ <entry>This output supports setting DV presets by using VIDIOC_S_DV_PRESET.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_OUT_CAP_CUSTOM_TIMINGS</constant></entry>
+ <entry>0x00000002</entry>
+ <entry>This output supports setting custom video timings by using VIDIOC_S_DV_TIMINGS.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_OUT_CAP_STD</constant></entry>
+ <entry>0x00000004</entry>
+ <entry>This output supports setting the TV standard by using VIDIOC_S_STD.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
</refsect1>
<refsect1>
&return-value;
diff --git a/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml b/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
new file mode 100644
index 00000000000..3c6784e132f
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-g-dv-preset.xml
@@ -0,0 +1,111 @@
+<refentry id="vidioc-g-dv-preset">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_G_DV_PRESET</refname>
+ <refname>VIDIOC_S_DV_PRESET</refname>
+ <refpurpose>Query or select the DV preset of the current input or output</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>&v4l2-dv-preset;
+*<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_G_DV_PRESET, VIDIOC_S_DV_PRESET</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+ <para>To query and select the current DV preset, applications
+use the <constant>VIDIOC_G_DV_PRESET</constant> and <constant>VIDIOC_S_DV_PRESET</constant>
+ioctls which take a pointer to a &v4l2-dv-preset; type as argument.
+Applications must zero the reserved array in &v4l2-dv-preset;.
+<constant>VIDIOC_G_DV_PRESET</constant> returns a dv preset in the field
+<structfield>preset</structfield> of &v4l2-dv-preset;.</para>
+
+ <para><constant>VIDIOC_S_DV_PRESET</constant> accepts a pointer to a &v4l2-dv-preset;
+that has the preset value to be set. Applications must zero the reserved array in &v4l2-dv-preset;.
+If the preset is not supported, it returns an &EINVAL; </para>
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>EINVAL</errorcode></term>
+ <listitem>
+ <para>This ioctl is not supported, or the
+<constant>VIDIOC_S_DV_PRESET</constant>,<constant>VIDIOC_S_DV_PRESET</constant> parameter was unsuitable.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorcode>EBUSY</errorcode></term>
+ <listitem>
+ <para>The device is busy and therefore can not change the preset.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <table pgwide="1" frame="none" id="v4l2-dv-preset">
+ <title>struct <structname>v4l2_dv_preset</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>preset</structfield></entry>
+ <entry>Preset value to represent the digital video timings</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved[4]</structfield></entry>
+ <entry>Reserved fields for future use</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </refsect1>
+</refentry>
+
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml b/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
new file mode 100644
index 00000000000..ecc19576bb8
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-g-dv-timings.xml
@@ -0,0 +1,224 @@
+<refentry id="vidioc-g-dv-timings">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_G_DV_TIMINGS</refname>
+ <refname>VIDIOC_S_DV_TIMINGS</refname>
+ <refpurpose>Get or set custom DV timings for input or output</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>&v4l2-dv-timings;
+*<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_G_DV_TIMINGS, VIDIOC_S_DV_TIMINGS</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+ <para>To set custom DV timings for the input or output, applications use the
+<constant>VIDIOC_S_DV_TIMINGS</constant> ioctl and to get the current custom timings,
+applications use the <constant>VIDIOC_G_DV_TIMINGS</constant> ioctl. The detailed timing
+information is filled in using the structure &v4l2-dv-timings;. These ioctls take
+a pointer to the &v4l2-dv-timings; structure as argument. If the ioctl is not supported
+or the timing values are not correct, the driver returns &EINVAL;.</para>
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>EINVAL</errorcode></term>
+ <listitem>
+ <para>This ioctl is not supported, or the
+<constant>VIDIOC_S_DV_TIMINGS</constant> parameter was unsuitable.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorcode>EBUSY</errorcode></term>
+ <listitem>
+ <para>The device is busy and therefore can not change the timings.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+
+ <table pgwide="1" frame="none" id="v4l2-bt-timings">
+ <title>struct <structname>v4l2_bt_timings</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>width</structfield></entry>
+ <entry>Width of the active video in pixels</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>height</structfield></entry>
+ <entry>Height of the active video in lines</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>interlaced</structfield></entry>
+ <entry>Progressive (0) or interlaced (1)</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>polarities</structfield></entry>
+ <entry>This is a bit mask that defines polarities of sync signals.
+bit 0 (V4L2_DV_VSYNC_POS_POL) is for vertical sync polarity and bit 1 (V4L2_DV_HSYNC_POS_POL) is for horizontal sync polarity. If the bit is set
+(1) it is positive polarity and if is cleared (0), it is negative polarity.</entry>
+ </row>
+ <row>
+ <entry>__u64</entry>
+ <entry><structfield>pixelclock</structfield></entry>
+ <entry>Pixel clock in Hz. Ex. 74.25MHz->74250000</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>hfrontporch</structfield></entry>
+ <entry>Horizontal front porch in pixels</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>hsync</structfield></entry>
+ <entry>Horizontal sync length in pixels</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>hbackporch</structfield></entry>
+ <entry>Horizontal back porch in pixels</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>vfrontporch</structfield></entry>
+ <entry>Vertical front porch in lines</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>vsync</structfield></entry>
+ <entry>Vertical sync length in lines</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>vbackporch</structfield></entry>
+ <entry>Vertical back porch in lines</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>il_vfrontporch</structfield></entry>
+ <entry>Vertical front porch in lines for bottom field of interlaced field formats</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>il_vsync</structfield></entry>
+ <entry>Vertical sync length in lines for bottom field of interlaced field formats</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>il_vbackporch</structfield></entry>
+ <entry>Vertical back porch in lines for bottom field of interlaced field formats</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="v4l2-dv-timings">
+ <title>struct <structname>v4l2_dv_timings</structname></title>
+ <tgroup cols="4">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry></entry>
+ <entry>Type of DV timings as listed in <xref linkend="dv-timing-types"/>.</entry>
+ </row>
+ <row>
+ <entry>union</entry>
+ <entry><structfield></structfield></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>&v4l2-bt-timings;</entry>
+ <entry><structfield>bt</structfield></entry>
+ <entry>Timings defined by BT.656/1120 specifications</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[32]</entry>
+ <entry></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="dv-timing-types">
+ <title>DV Timing types</title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>Timing type</entry>
+ <entry>value</entry>
+ <entry>Description</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>V4L2_DV_BT_656_1120</entry>
+ <entry>0</entry>
+ <entry>BT.656/1120 timings</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </refsect1>
+</refentry>
+
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/v4l/vidioc-g-std.xml b/Documentation/DocBook/v4l/vidioc-g-std.xml
index b6f5d267e85..912f8513e5d 100644
--- a/Documentation/DocBook/v4l/vidioc-g-std.xml
+++ b/Documentation/DocBook/v4l/vidioc-g-std.xml
@@ -86,6 +86,12 @@ standards.</para>
<constant>VIDIOC_S_STD</constant> parameter was unsuitable.</para>
</listitem>
</varlistentry>
+ <varlistentry>
+ <term><errorcode>EBUSY</errorcode></term>
+ <listitem>
+ <para>The device is busy and therefore can not change the standard</para>
+ </listitem>
+ </varlistentry>
</variablelist>
</refsect1>
</refentry>
diff --git a/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml b/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
new file mode 100644
index 00000000000..87e4f0f6151
--- /dev/null
+++ b/Documentation/DocBook/v4l/vidioc-query-dv-preset.xml
@@ -0,0 +1,85 @@
+<refentry id="vidioc-query-dv-preset">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_QUERY_DV_PRESET</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_QUERY_DV_PRESET</refname>
+ <refpurpose>Sense the DV preset received by the current
+input</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>&v4l2-dv-preset; *<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_QUERY_DV_PRESET</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <para>The hardware may be able to detect the current DV preset
+automatically, similar to sensing the video standard. To do so, applications
+call <constant> VIDIOC_QUERY_DV_PRESET</constant> with a pointer to a
+&v4l2-dv-preset; type. Once the hardware detects a preset, that preset is
+returned in the preset field of &v4l2-dv-preset;. When detection is not
+possible or fails, the value V4L2_DV_INVALID is returned.</para>
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>EINVAL</errorcode></term>
+ <listitem>
+ <para>This ioctl is not supported.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorcode>EBUSY</errorcode></term>
+ <listitem>
+ <para>The device is busy and therefore can not sense the preset</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+</refentry>
+
+<!--
+Local Variables:
+mode: sgml
+sgml-parent-document: "v4l2.sgml"
+indent-tabs-mode: nil
+End:
+-->
diff --git a/Documentation/DocBook/v4l/vidioc-querystd.xml b/Documentation/DocBook/v4l/vidioc-querystd.xml
index b5a7ff93448..1a9e6039309 100644
--- a/Documentation/DocBook/v4l/vidioc-querystd.xml
+++ b/Documentation/DocBook/v4l/vidioc-querystd.xml
@@ -70,6 +70,12 @@ current video input or output.</para>
<para>This ioctl is not supported.</para>
</listitem>
</varlistentry>
+ <varlistentry>
+ <term><errorcode>EBUSY</errorcode></term>
+ <listitem>
+ <para>The device is busy and therefore can not detect the standard</para>
+ </listitem>
+ </varlistentry>
</variablelist>
</refsect1>
</refentry>
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
index 78a9168ff37..1053a56be3b 100644
--- a/Documentation/SubmitChecklist
+++ b/Documentation/SubmitChecklist
@@ -15,7 +15,7 @@ kernel patches.
2: Passes allnoconfig, allmodconfig
3: Builds on multiple CPU architectures by using local cross-compile tools
- or something like PLM at OSDL.
+ or some other build farm.
4: ppc64 is a good architecture for cross-compilation checking because it
tends to use `unsigned long' for 64-bit quantities.
@@ -88,3 +88,6 @@ kernel patches.
24: All memory barriers {e.g., barrier(), rmb(), wmb()} need a comment in the
source code that explains the logic of what they are doing and why.
+
+25: If any ioctl's are added by the patch, then also update
+ Documentation/ioctl/ioctl-number.txt.
diff --git a/Documentation/acpi/method-customizing.txt b/Documentation/acpi/method-customizing.txt
new file mode 100644
index 00000000000..e628cd23ca8
--- /dev/null
+++ b/Documentation/acpi/method-customizing.txt
@@ -0,0 +1,66 @@
+Linux ACPI Custom Control Method How To
+=======================================
+
+Written by Zhang Rui <rui.zhang@intel.com>
+
+
+Linux supports customizing ACPI control methods at runtime.
+
+Users can use this to
+1. override an existing method which may not work correctly,
+ or just for debugging purposes.
+2. insert a completely new method in order to create a missing
+ method such as _OFF, _ON, _STA, _INI, etc.
+For these cases, it is far simpler to dynamically install a single
+control method rather than override the entire DSDT, because kernel
+rebuild/reboot is not needed and test result can be got in minutes.
+
+Note: Only ACPI METHOD can be overridden, any other object types like
+ "Device", "OperationRegion", are not recognized.
+Note: The same ACPI control method can be overridden for many times,
+ and it's always the latest one that used by Linux/kernel.
+
+1. override an existing method
+ a) get the ACPI table via ACPI sysfs I/F. e.g. to get the DSDT,
+ just run "cat /sys/firmware/acpi/tables/DSDT > /tmp/dsdt.dat"
+ b) disassemble the table by running "iasl -d dsdt.dat".
+ c) rewrite the ASL code of the method and save it in a new file,
+ d) package the new file (psr.asl) to an ACPI table format.
+ Here is an example of a customized \_SB._AC._PSR method,
+
+ DefinitionBlock ("", "SSDT", 1, "", "", 0x20080715)
+ {
+ External (ACON)
+
+ Method (\_SB_.AC._PSR, 0, NotSerialized)
+ {
+ Store ("In AC _PSR", Debug)
+ Return (ACON)
+ }
+ }
+ Note that the full pathname of the method in ACPI namespace
+ should be used.
+ And remember to use "External" to declare external objects.
+ e) assemble the file to generate the AML code of the method.
+ e.g. "iasl psr.asl" (psr.aml is generated as a result)
+ f) mount debugfs by "mount -t debugfs none /sys/kernel/debug"
+ g) override the old method via the debugfs by running
+ "cat /tmp/psr.aml > /sys/kernel/debug/acpi/custom_method"
+
+2. insert a new method
+ This is easier than overriding an existing method.
+ We just need to create the ASL code of the method we want to
+ insert and then follow the step c) ~ g) in section 1.
+
+3. undo your changes
+ The "undo" operation is not supported for a new inserted method
+ right now, i.e. we can not remove a method currently.
+ For an overrided method, in order to undo your changes, please
+ save a copy of the method original ASL code in step c) section 1,
+ and redo step c) ~ g) to override the method with the original one.
+
+
+Note: We can use a kernel with multiple custom ACPI method running,
+ But each individual write to debugfs can implement a SINGLE
+ method override. i.e. if we want to insert/override multiple
+ ACPI methods, we need to redo step c) ~ g) for multiple times.
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS
new file mode 100644
index 00000000000..0af0e9eed5d
--- /dev/null
+++ b/Documentation/arm/OMAP/DSS
@@ -0,0 +1,317 @@
+OMAP2/3 Display Subsystem
+-------------------------
+
+This is an almost total rewrite of the OMAP FB driver in drivers/video/omap
+(let's call it DSS1). The main differences between DSS1 and DSS2 are DSI,
+TV-out and multiple display support, but there are lots of small improvements
+also.
+
+The DSS2 driver (omapdss module) is in arch/arm/plat-omap/dss/, and the FB,
+panel and controller drivers are in drivers/video/omap2/. DSS1 and DSS2 live
+currently side by side, you can choose which one to use.
+
+Features
+--------
+
+Working and tested features include:
+
+- MIPI DPI (parallel) output
+- MIPI DSI output in command mode
+- MIPI DBI (RFBI) output
+- SDI output
+- TV output
+- All pieces can be compiled as a module or inside kernel
+- Use DISPC to update any of the outputs
+- Use CPU to update RFBI or DSI output
+- OMAP DISPC planes
+- RGB16, RGB24 packed, RGB24 unpacked
+- YUV2, UYVY
+- Scaling
+- Adjusting DSS FCK to find a good pixel clock
+- Use DSI DPLL to create DSS FCK
+
+Tested boards include:
+- OMAP3 SDP board
+- Beagle board
+- N810
+
+omapdss driver
+--------------
+
+The DSS driver does not itself have any support for Linux framebuffer, V4L or
+such like the current ones, but it has an internal kernel API that upper level
+drivers can use.
+
+The DSS driver models OMAP's overlays, overlay managers and displays in a
+flexible way to enable non-common multi-display configuration. In addition to
+modelling the hardware overlays, omapdss supports virtual overlays and overlay
+managers. These can be used when updating a display with CPU or system DMA.
+
+Panel and controller drivers
+----------------------------
+
+The drivers implement panel or controller specific functionality and are not
+usually visible to users except through omapfb driver. They register
+themselves to the DSS driver.
+
+omapfb driver
+-------------
+
+The omapfb driver implements arbitrary number of standard linux framebuffers.
+These framebuffers can be routed flexibly to any overlays, thus allowing very
+dynamic display architecture.
+
+The driver exports some omapfb specific ioctls, which are compatible with the
+ioctls in the old driver.
+
+The rest of the non standard features are exported via sysfs. Whether the final
+implementation will use sysfs, or ioctls, is still open.
+
+V4L2 drivers
+------------
+
+V4L2 is being implemented in TI.
+
+From omapdss point of view the V4L2 drivers should be similar to framebuffer
+driver.
+
+Architecture
+--------------------
+
+Some clarification what the different components do:
+
+ - Framebuffer is a memory area inside OMAP's SRAM/SDRAM that contains the
+ pixel data for the image. Framebuffer has width and height and color
+ depth.
+ - Overlay defines where the pixels are read from and where they go on the
+ screen. The overlay may be smaller than framebuffer, thus displaying only
+ part of the framebuffer. The position of the overlay may be changed if
+ the overlay is smaller than the display.
+ - Overlay manager combines the overlays in to one image and feeds them to
+ display.
+ - Display is the actual physical display device.
+
+A framebuffer can be connected to multiple overlays to show the same pixel data
+on all of the overlays. Note that in this case the overlay input sizes must be
+the same, but, in case of video overlays, the output size can be different. Any
+framebuffer can be connected to any overlay.
+
+An overlay can be connected to one overlay manager. Also DISPC overlays can be
+connected only to DISPC overlay managers, and virtual overlays can be only
+connected to virtual overlays.
+
+An overlay manager can be connected to one display. There are certain
+restrictions which kinds of displays an overlay manager can be connected:
+
+ - DISPC TV overlay manager can be only connected to TV display.
+ - Virtual overlay managers can only be connected to DBI or DSI displays.
+ - DISPC LCD overlay manager can be connected to all displays, except TV
+ display.
+
+Sysfs
+-----
+The sysfs interface is mainly used for testing. I don't think sysfs
+interface is the best for this in the final version, but I don't quite know
+what would be the best interfaces for these things.
+
+The sysfs interface is divided to two parts: DSS and FB.
+
+/sys/class/graphics/fb? directory:
+mirror 0=off, 1=on
+rotate Rotation 0-3 for 0, 90, 180, 270 degrees
+rotate_type 0 = DMA rotation, 1 = VRFB rotation
+overlays List of overlay numbers to which framebuffer pixels go
+phys_addr Physical address of the framebuffer
+virt_addr Virtual address of the framebuffer
+size Size of the framebuffer
+
+/sys/devices/platform/omapdss/overlay? directory:
+enabled 0=off, 1=on
+input_size width,height (ie. the framebuffer size)
+manager Destination overlay manager name
+name
+output_size width,height
+position x,y
+screen_width width
+global_alpha global alpha 0-255 0=transparent 255=opaque
+
+/sys/devices/platform/omapdss/manager? directory:
+display Destination display
+name
+alpha_blending_enabled 0=off, 1=on
+trans_key_enabled 0=off, 1=on
+trans_key_type gfx-destination, video-source
+trans_key_value transparency color key (RGB24)
+default_color default background color (RGB24)
+
+/sys/devices/platform/omapdss/display? directory:
+ctrl_name Controller name
+mirror 0=off, 1=on
+update_mode 0=off, 1=auto, 2=manual
+enabled 0=off, 1=on
+name
+rotate Rotation 0-3 for 0, 90, 180, 270 degrees
+timings Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw)
+ When writing, two special timings are accepted for tv-out:
+ "pal" and "ntsc"
+panel_name
+tear_elim Tearing elimination 0=off, 1=on
+
+There are also some debugfs files at <debugfs>/omapdss/ which show information
+about clocks and registers.
+
+Examples
+--------
+
+The following definitions have been made for the examples below:
+
+ovl0=/sys/devices/platform/omapdss/overlay0
+ovl1=/sys/devices/platform/omapdss/overlay1
+ovl2=/sys/devices/platform/omapdss/overlay2
+
+mgr0=/sys/devices/platform/omapdss/manager0
+mgr1=/sys/devices/platform/omapdss/manager1
+
+lcd=/sys/devices/platform/omapdss/display0
+dvi=/sys/devices/platform/omapdss/display1
+tv=/sys/devices/platform/omapdss/display2
+
+fb0=/sys/class/graphics/fb0
+fb1=/sys/class/graphics/fb1
+fb2=/sys/class/graphics/fb2
+
+Default setup on OMAP3 SDP
+--------------------------
+
+Here's the default setup on OMAP3 SDP board. All planes go to LCD. DVI
+and TV-out are not in use. The columns from left to right are:
+framebuffers, overlays, overlay managers, displays. Framebuffers are
+handled by omapfb, and the rest by the DSS.
+
+FB0 --- GFX -\ DVI
+FB1 --- VID1 --+- LCD ---- LCD
+FB2 --- VID2 -/ TV ----- TV
+
+Example: Switch from LCD to DVI
+----------------------
+
+w=`cat $dvi/timings | cut -d "," -f 2 | cut -d "/" -f 1`
+h=`cat $dvi/timings | cut -d "," -f 3 | cut -d "/" -f 1`
+
+echo "0" > $lcd/enabled
+echo "" > $mgr0/display
+fbset -fb /dev/fb0 -xres $w -yres $h -vxres $w -vyres $h
+# at this point you have to switch the dvi/lcd dip-switch from the omap board
+echo "dvi" > $mgr0/display
+echo "1" > $dvi/enabled
+
+After this the configuration looks like:
+
+FB0 --- GFX -\ -- DVI
+FB1 --- VID1 --+- LCD -/ LCD
+FB2 --- VID2 -/ TV ----- TV
+
+Example: Clone GFX overlay to LCD and TV
+-------------------------------
+
+w=`cat $tv/timings | cut -d "," -f 2 | cut -d "/" -f 1`
+h=`cat $tv/timings | cut -d "," -f 3 | cut -d "/" -f 1`
+
+echo "0" > $ovl0/enabled
+echo "0" > $ovl1/enabled
+
+echo "" > $fb1/overlays
+echo "0,1" > $fb0/overlays
+
+echo "$w,$h" > $ovl1/output_size
+echo "tv" > $ovl1/manager
+
+echo "1" > $ovl0/enabled
+echo "1" > $ovl1/enabled
+
+echo "1" > $tv/enabled
+
+After this the configuration looks like (only relevant parts shown):
+
+FB0 +-- GFX ---- LCD ---- LCD
+ \- VID1 ---- TV ---- TV
+
+Misc notes
+----------
+
+OMAP FB allocates the framebuffer memory using the OMAP VRAM allocator.
+
+Using DSI DPLL to generate pixel clock it is possible produce the pixel clock
+of 86.5MHz (max possible), and with that you get 1280x1024@57 output from DVI.
+
+Rotation and mirroring currently only supports RGB565 and RGB8888 modes. VRFB
+does not support mirroring.
+
+VRFB rotation requires much more memory than non-rotated framebuffer, so you
+probably need to increase your vram setting before using VRFB rotation. Also,
+many applications may not work with VRFB if they do not pay attention to all
+framebuffer parameters.
+
+Kernel boot arguments
+---------------------
+
+vram=<size>
+ - Amount of total VRAM to preallocate. For example, "10M". omapfb
+ allocates memory for framebuffers from VRAM.
+
+omapfb.mode=<display>:<mode>[,...]
+ - Default video mode for specified displays. For example,
+ "dvi:800x400MR-24@60". See drivers/video/modedb.c.
+ There are also two special modes: "pal" and "ntsc" that
+ can be used to tv out.
+
+omapfb.vram=<fbnum>:<size>[@<physaddr>][,...]
+ - VRAM allocated for a framebuffer. Normally omapfb allocates vram
+ depending on the display size. With this you can manually allocate
+ more or define the physical address of each framebuffer. For example,
+ "1:4M" to allocate 4M for fb1.
+
+omapfb.debug=<y|n>
+ - Enable debug printing. You have to have OMAPFB debug support enabled
+ in kernel config.
+
+omapfb.test=<y|n>
+ - Draw test pattern to framebuffer whenever framebuffer settings change.
+ You need to have OMAPFB debug support enabled in kernel config.
+
+omapfb.vrfb=<y|n>
+ - Use VRFB rotation for all framebuffers.
+
+omapfb.rotate=<angle>
+ - Default rotation applied to all framebuffers.
+ 0 - 0 degree rotation
+ 1 - 90 degree rotation
+ 2 - 180 degree rotation
+ 3 - 270 degree rotation
+
+omapfb.mirror=<y|n>
+ - Default mirror for all framebuffers. Only works with DMA rotation.
+
+omapdss.def_disp=<display>
+ - Name of default display, to which all overlays will be connected.
+ Common examples are "lcd" or "tv".
+
+omapdss.debug=<y|n>
+ - Enable debug printing. You have to have DSS debug support enabled in
+ kernel config.
+
+TODO
+----
+
+DSS locking
+
+Error checking
+- Lots of checks are missing or implemented just as BUG()
+
+System DMA update for DSI
+- Can be used for RGB16 and RGB24P modes. Probably not for RGB24U (how
+ to skip the empty byte?)
+
+OMAP1 support
+- Not sure if needed
+
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index d6840a91e1e..c34e12440fe 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -1,9 +1,6 @@
00-INDEX
- This file
-cache-lock.txt
- - HOWTO for blackfin cache locking.
-
cachefeatures.txt
- Supported cache features.
diff --git a/Documentation/blackfin/Makefile b/Documentation/blackfin/Makefile
new file mode 100644
index 00000000000..773dbb103f1
--- /dev/null
+++ b/Documentation/blackfin/Makefile
@@ -0,0 +1,6 @@
+obj-m := gptimers-example.o
+
+all: modules
+
+modules clean:
+ $(MAKE) -C ../.. SUBDIRS=$(PWD) $@
diff --git a/Documentation/blackfin/cache-lock.txt b/Documentation/blackfin/cache-lock.txt
deleted file mode 100644
index 88ba1e6c31c..00000000000
--- a/Documentation/blackfin/cache-lock.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * File: Documentation/blackfin/cache-lock.txt
- * Based on:
- * Author:
- *
- * Created:
- * Description: This file contains the simple DMA Implementation for Blackfin
- *
- * Rev: $Id: cache-lock.txt 2384 2006-11-01 04:12:43Z magicyang $
- *
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- */
-
-How to lock your code in cache in uClinux/blackfin
---------------------------------------------------
-
-There are only a few steps required to lock your code into the cache.
-Currently you can lock the code by Way.
-
-Below are the interface provided for locking the cache.
-
-
-1. cache_grab_lock(int Ways);
-
-This function grab the lock for locking your code into the cache specified
-by Ways.
-
-
-2. cache_lock(int Ways);
-
-This function should be called after your critical code has been executed.
-Once the critical code exits, the code is now loaded into the cache. This
-function locks the code into the cache.
-
-
-So, the example sequence will be:
-
- cache_grab_lock(WAY0_L); /* Grab the lock */
-
- critical_code(); /* Execute the code of interest */
-
- cache_lock(WAY0_L); /* Lock the cache */
-
-Where WAY0_L signifies WAY0 locking.
diff --git a/Documentation/blackfin/cachefeatures.txt b/Documentation/blackfin/cachefeatures.txt
index 0fbec23becb..75de51f9451 100644
--- a/Documentation/blackfin/cachefeatures.txt
+++ b/Documentation/blackfin/cachefeatures.txt
@@ -41,16 +41,6 @@
icplb_flush();
dcplb_flush();
- - Locking the cache.
-
- cache_grab_lock();
- cache_lock();
-
- Please refer linux-2.6.x/Documentation/blackfin/cache-lock.txt for how to
- lock the cache.
-
- Locking the cache is optional feature.
-
- Miscellaneous cache functions.
flush_cache_all();
diff --git a/Documentation/blackfin/gptimers-example.c b/Documentation/blackfin/gptimers-example.c
new file mode 100644
index 00000000000..b1bd6340e74
--- /dev/null
+++ b/Documentation/blackfin/gptimers-example.c
@@ -0,0 +1,83 @@
+/*
+ * Simple gptimers example
+ * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:drivers:gptimers
+ *
+ * Copyright 2007-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include <asm/gptimers.h>
+#include <asm/portmux.h>
+
+/* ... random driver includes ... */
+
+#define DRIVER_NAME "gptimer_example"
+
+struct gptimer_data {
+ uint32_t period, width;
+};
+static struct gptimer_data data;
+
+/* ... random driver state ... */
+
+static irqreturn_t gptimer_example_irq(int irq, void *dev_id)
+{
+ struct gptimer_data *data = dev_id;
+
+ /* make sure it was our timer which caused the interrupt */
+ if (!get_gptimer_intr(TIMER5_id))
+ return IRQ_NONE;
+
+ /* read the width/period values that were captured for the waveform */
+ data->width = get_gptimer_pwidth(TIMER5_id);
+ data->period = get_gptimer_period(TIMER5_id);
+
+ /* acknowledge the interrupt */
+ clear_gptimer_intr(TIMER5_id);
+
+ /* tell the upper layers we took care of things */
+ return IRQ_HANDLED;
+}
+
+/* ... random driver code ... */
+
+static int __init gptimer_example_init(void)
+{
+ int ret;
+
+ /* grab the peripheral pins */
+ ret = peripheral_request(P_TMR5, DRIVER_NAME);
+ if (ret) {
+ printk(KERN_NOTICE DRIVER_NAME ": peripheral request failed\n");
+ return ret;
+ }
+
+ /* grab the IRQ for the timer */
+ ret = request_irq(IRQ_TIMER5, gptimer_example_irq, IRQF_SHARED, DRIVER_NAME, &data);
+ if (ret) {
+ printk(KERN_NOTICE DRIVER_NAME ": IRQ request failed\n");
+ peripheral_free(P_TMR5);
+ return ret;
+ }
+
+ /* setup the timer and enable it */
+ set_gptimer_config(TIMER5_id, WDTH_CAP | PULSE_HI | PERIOD_CNT | IRQ_ENA);
+ enable_gptimers(TIMER5bit);
+
+ return 0;
+}
+module_init(gptimer_example_init);
+
+static void __exit gptimer_example_exit(void)
+{
+ disable_gptimers(TIMER5bit);
+ free_irq(IRQ_TIMER5, &data);
+ peripheral_free(P_TMR5);
+}
+module_exit(gptimer_example_exit);
+
+MODULE_LICENSE("BSD");
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index 75a58d14d3c..6c30e930c12 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -92,9 +92,9 @@ policy->cpuinfo.max_freq - the minimum and maximum frequency
(in kHz) which is supported by
this CPU
policy->cpuinfo.transition_latency the time it takes on this CPU to
- switch between two frequencies (if
- appropriate, else specify
- CPUFREQ_ETERNAL)
+ switch between two frequencies in
+ nanoseconds (if appropriate, else
+ specify CPUFREQ_ETERNAL)
policy->cur The current operating frequency of
this CPU (if appropriate)
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt
index 2a5b850847c..04f6b32993e 100644
--- a/Documentation/cpu-freq/user-guide.txt
+++ b/Documentation/cpu-freq/user-guide.txt
@@ -203,6 +203,17 @@ scaling_cur_freq : Current frequency of the CPU as determined by
the frequency the kernel thinks the CPU runs
at.
+bios_limit : If the BIOS tells the OS to limit a CPU to
+ lower frequencies, the user can read out the
+ maximum available frequency from this file.
+ This typically can happen through (often not
+ intended) BIOS settings, restrictions
+ triggered through a service processor or other
+ BIOS/HW based implementations.
+ This does not cover thermal ACPI limitations
+ which can be detected through the generic
+ thermal driver.
+
If you have selected the "userspace" governor which allows you to
set the CPU operating frequency to a specific value, you can read out
the current frequency in
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
index 9d620c153b0..a99d7031cdf 100644
--- a/Documentation/cpu-hotplug.txt
+++ b/Documentation/cpu-hotplug.txt
@@ -49,6 +49,12 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets
cpu_possible_map = cpu_present_map + additional_cpus
+cede_offline={"off","on"} Use this option to disable/enable putting offlined
+ processors to an extended H_CEDE state on
+ supported pseries platforms.
+ If nothing is specified,
+ cede_offline is set to "on".
+
(*) Option valid only for following architectures
- ia64
@@ -309,41 +315,26 @@ A: The following are what is required for CPU hotplug infrastructure to work
Q: I need to ensure that a particular cpu is not removed when there is some
work specific to this cpu is in progress.
-A: First switch the current thread context to preferred cpu
+A: There are two ways. If your code can be run in interrupt context, use
+ smp_call_function_single(), otherwise use work_on_cpu(). Note that
+ work_on_cpu() is slow, and can fail due to out of memory:
int my_func_on_cpu(int cpu)
{
- cpumask_t saved_mask, new_mask = CPU_MASK_NONE;
- int curr_cpu, err = 0;
-
- saved_mask = current->cpus_allowed;
- cpu_set(cpu, new_mask);
- err = set_cpus_allowed(current, new_mask);
-
- if (err)
- return err;
-
- /*
- * If we got scheduled out just after the return from
- * set_cpus_allowed() before running the work, this ensures
- * we stay locked.
- */
- curr_cpu = get_cpu();
-
- if (curr_cpu != cpu) {
- err = -EAGAIN;
- goto ret;
- } else {
- /*
- * Do work : But cant sleep, since get_cpu() disables preempt
- */
- }
- ret:
- put_cpu();
- set_cpus_allowed(current, saved_mask);
- return err;
- }
-
+ int err;
+ get_online_cpus();
+ if (!cpu_online(cpu))
+ err = -EINVAL;
+ else
+#if NEEDS_BLOCKING
+ err = work_on_cpu(cpu, __my_func_on_cpu, NULL);
+#else
+ smp_call_function_single(cpu, __my_func_on_cpu, &err,
+ true);
+#endif
+ put_online_cpus();
+ return err;
+ }
Q: How do we determine how many CPUs are available for hotplug.
A: There is no clear spec defined way from ACPI that can give us that
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt
index a5009c8300f..e3a77b21513 100644
--- a/Documentation/device-mapper/snapshot.txt
+++ b/Documentation/device-mapper/snapshot.txt
@@ -8,13 +8,19 @@ the block device which are also writable without interfering with the
original content;
*) To create device "forks", i.e. multiple different versions of the
same data stream.
+*) To merge a snapshot of a block device back into the snapshot's origin
+device.
+In the first two cases, dm copies only the chunks of data that get
+changed and uses a separate copy-on-write (COW) block device for
+storage.
-In both cases, dm copies only the chunks of data that get changed and
-uses a separate copy-on-write (COW) block device for storage.
+For snapshot merge the contents of the COW storage are merged back into
+the origin device.
-There are two dm targets available: snapshot and snapshot-origin.
+There are three dm targets available:
+snapshot, snapshot-origin, and snapshot-merge.
*) snapshot-origin <origin>
@@ -40,8 +46,25 @@ The difference is that for transient snapshots less metadata must be
saved on disk - they can be kept in memory by the kernel.
-How this is used by LVM2
-========================
+* snapshot-merge <origin> <COW device> <persistent> <chunksize>
+
+takes the same table arguments as the snapshot target except it only
+works with persistent snapshots. This target assumes the role of the
+"snapshot-origin" target and must not be loaded if the "snapshot-origin"
+is still present for <origin>.
+
+Creates a merging snapshot that takes control of the changed chunks
+stored in the <COW device> of an existing snapshot, through a handover
+procedure, and merges these chunks back into the <origin>. Once merging
+has started (in the background) the <origin> may be opened and the merge
+will continue while I/O is flowing to it. Changes to the <origin> are
+deferred until the merging snapshot's corresponding chunk(s) have been
+merged. Once merging has started the snapshot device, associated with
+the "snapshot" target, will return -EIO when accessed.
+
+
+How snapshot is used by LVM2
+============================
When you create the first LVM2 snapshot of a volume, four dm devices are used:
1) a device containing the original mapping table of the source volume;
@@ -72,3 +95,30 @@ brw------- 1 root root 254, 12 29 ago 18:15 /dev/mapper/volumeGroup-snap-cow
brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap
brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base
+
+How snapshot-merge is used by LVM2
+==================================
+A merging snapshot assumes the role of the "snapshot-origin" while
+merging. As such the "snapshot-origin" is replaced with
+"snapshot-merge". The "-real" device is not changed and the "-cow"
+device is renamed to <origin name>-cow to aid LVM2's cleanup of the
+merging snapshot after it completes. The "snapshot" that hands over its
+COW device to the "snapshot-merge" is deactivated (unless using lvchange
+--refresh); but if it is left active it will simply return I/O errors.
+
+A snapshot will merge into its origin with the following command:
+
+lvconvert --merge volumeGroup/snap
+
+we'll now have this situation:
+
+# dmsetup table|grep volumeGroup
+
+volumeGroup-base-real: 0 2097152 linear 8:19 384
+volumeGroup-base-cow: 0 204800 linear 8:19 2097536
+volumeGroup-base: 0 2097152 snapshot-merge 254:11 254:12 P 16
+
+# ls -lL /dev/mapper/volumeGroup-*
+brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real
+brw------- 1 root root 254, 12 29 ago 18:16 /dev/mapper/volumeGroup-base-cow
+brw------- 1 root root 254, 10 29 ago 18:16 /dev/mapper/volumeGroup-base
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index e151b2a3626..3ad6acead94 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -103,6 +103,7 @@ gconf
gen-devlist
gen_crc32table
gen_init_cpio
+generated
genheaders
genksyms
*_gray256.c
diff --git a/Documentation/fb/viafb.txt b/Documentation/fb/viafb.txt
index 67dbf442b0b..f3e046a6a98 100644
--- a/Documentation/fb/viafb.txt
+++ b/Documentation/fb/viafb.txt
@@ -7,7 +7,7 @@
VIA UniChrome Family(CLE266, PM800 / CN400 / CN300,
P4M800CE / P4M800Pro / CN700 / VN800,
CX700 / VX700, K8M890, P4M890,
- CN896 / P4M900, VX800)
+ CN896 / P4M900, VX800, VX855)
[Driver features]
------------------------
@@ -154,13 +154,6 @@
0 : No Dual Edge Panel (default)
1 : Dual Edge Panel
- viafb_video_dev:
- This option is used to specify video output devices(CRT, DVI, LCD) for
- duoview case.
- For example:
- To output video on DVI, we should use:
- modprobe viafb viafb_video_dev=DVI...
-
viafb_lcd_port:
This option is used to specify LCD output port,
available values are "DVP0" "DVP1" "DFP_HIGHLOW" "DFP_HIGH" "DFP_LOW".
@@ -181,9 +174,6 @@ Notes:
and bpp, need to call VIAFB specified ioctl interface VIAFB_SET_DEVICE
instead of calling common ioctl function FBIOPUT_VSCREENINFO since
viafb doesn't support multi-head well, or it will cause screen crush.
- 4. VX800 2D accelerator hasn't been supported in this driver yet. When
- using driver on VX800, the driver will disable the acceleration
- function as default.
[Configure viafb with "fbset" tool]
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 2a4d77946c7..870d190fe61 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -291,22 +291,6 @@ Who: Michael Buesch <mb@bu3sch.de>
---------------------------
-What: usedac i386 kernel parameter
-When: 2.6.27
-Why: replaced by allowdac and no dac combination
-Who: Glauber Costa <gcosta@redhat.com>
-
----------------------------
-
-What: print_fn_descriptor_symbol()
-When: October 2009
-Why: The %pF vsprintf format provides the same functionality in a
- simpler way. print_fn_descriptor_symbol() is deprecated but
- still present to give out-of-tree modules time to change.
-Who: Bjorn Helgaas <bjorn.helgaas@hp.com>
-
----------------------------
-
What: /sys/o2cb symlink
When: January 2010
Why: /sys/fs/o2cb is the proper location for this information - /sys/o2cb
@@ -490,3 +474,22 @@ Why: Obsoleted by the adt7475 driver.
Who: Jean Delvare <khali@linux-fr.org>
---------------------------
+What: Support for lcd_switch and display_get in asus-laptop driver
+When: March 2010
+Why: These two features use non-standard interfaces. There are the
+ only features that really need multiple path to guess what's
+ the right method name on a specific laptop.
+
+ Removing them will allow to remove a lot of code an significantly
+ clean the drivers.
+
+ This will affect the backlight code which won't be able to know
+ if the backlight is on or off. The platform display file will also be
+ write only (like the one in eeepc-laptop).
+
+ This should'nt affect a lot of user because they usually know
+ when their display is on or off.
+
+Who: Corentin Chary <corentin.chary@gmail.com>
+
+----------------------------
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 7001782ab93..875d49696b6 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -1,7 +1,5 @@
00-INDEX
- this file (info on some of the filesystems supported by linux).
-Exporting
- - explanation of how to make filesystems exportable.
Locking
- info on locking rules as they pertain to Linux VFS.
9p.txt
@@ -68,12 +66,8 @@ mandatory-locking.txt
- info on the Linux implementation of Sys V mandatory file locking.
ncpfs.txt
- info on Novell Netware(tm) filesystem using NCP protocol.
-nfs41-server.txt
- - info on the Linux server implementation of NFSv4 minor version 1.
-nfs-rdma.txt
- - how to install and setup the Linux NFS/RDMA client and server software.
-nfsroot.txt
- - short guide on setting up a diskless box with NFS root filesystem.
+nfs/
+ - nfs-related documentation.
nilfs2.txt
- info and mount options for the NILFS2 filesystem.
ntfs.txt
@@ -92,8 +86,6 @@ relay.txt
- info on relay, for efficient streaming from kernel to user space.
romfs.txt
- description of the ROMFS filesystem.
-rpc-cache.txt
- - introduction to the caching mechanisms in the sunrpc layer.
seq_file.txt
- how to use the seq_file API
sharedsubtree.txt
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 05d5cf1d743..867c5b50cb4 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -32,8 +32,8 @@ journal_dev=devnum When the external journal device's major/minor numbers
identified through its new major/minor numbers encoded
in devnum.
-noload Don't load the journal on mounting. Note that this forces
- mount of inconsistent filesystem, which can lead to
+norecovery Don't load the journal on mounting. Note that this forces
+noload mount of inconsistent filesystem, which can lead to
various problems.
data=journal All data are committed into the journal prior to being
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
new file mode 100644
index 00000000000..2f68cd68876
--- /dev/null
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -0,0 +1,16 @@
+00-INDEX
+ - this file (nfs-related documentation).
+Exporting
+ - explanation of how to make filesystems exportable.
+knfsd-stats.txt
+ - statistics which the NFS server makes available to user space.
+nfs.txt
+ - nfs client, and DNS resolution for fs_locations.
+nfs41-server.txt
+ - info on the Linux server implementation of NFSv4 minor version 1.
+nfs-rdma.txt
+ - how to install and setup the Linux NFS/RDMA client and server software
+nfsroot.txt
+ - short guide on setting up a diskless box with NFS root filesystem.
+rpc-cache.txt
+ - introduction to the caching mechanisms in the sunrpc layer.
diff --git a/Documentation/filesystems/Exporting b/Documentation/filesystems/nfs/Exporting
index 87019d2b598..87019d2b598 100644
--- a/Documentation/filesystems/Exporting
+++ b/Documentation/filesystems/nfs/Exporting
diff --git a/Documentation/filesystems/knfsd-stats.txt b/Documentation/filesystems/nfs/knfsd-stats.txt
index 64ced5149d3..64ced5149d3 100644
--- a/Documentation/filesystems/knfsd-stats.txt
+++ b/Documentation/filesystems/nfs/knfsd-stats.txt
diff --git a/Documentation/filesystems/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt
index e386f7e4bce..e386f7e4bce 100644
--- a/Documentation/filesystems/nfs-rdma.txt
+++ b/Documentation/filesystems/nfs/nfs-rdma.txt
diff --git a/Documentation/filesystems/nfs.txt b/Documentation/filesystems/nfs/nfs.txt
index f50f26ce6cd..f50f26ce6cd 100644
--- a/Documentation/filesystems/nfs.txt
+++ b/Documentation/filesystems/nfs/nfs.txt
diff --git a/Documentation/filesystems/nfs41-server.txt b/Documentation/filesystems/nfs/nfs41-server.txt
index 5920fe26e6f..1bd0d0c0517 100644
--- a/Documentation/filesystems/nfs41-server.txt
+++ b/Documentation/filesystems/nfs/nfs41-server.txt
@@ -41,7 +41,7 @@ interoperability problems with future clients. Known issues:
conformant with the spec (for example, we don't use kerberos
on the backchannel correctly).
- no trunking support: no clients currently take advantage of
- trunking, but this is a mandatory failure, and its use is
+ trunking, but this is a mandatory feature, and its use is
recommended to clients in a number of places. (E.g. to ensure
timely renewal in case an existing connection's retry timeouts
have gotten too long; see section 8.3 of the draft.)
@@ -213,3 +213,10 @@ The following cases aren't supported yet:
DESTROY_CLIENTID, DESTROY_SESSION, EXCHANGE_ID.
* DESTROY_SESSION MUST be the final operation in the COMPOUND request.
+Nonstandard compound limitations:
+* No support for a sessions fore channel RPC compound that requires both a
+ ca_maxrequestsize request and a ca_maxresponsesize reply, so we may
+ fail to live up to the promise we made in CREATE_SESSION fore channel
+ negotiation.
+* No more than one IO operation (read, write, readdir) allowed per
+ compound.
diff --git a/Documentation/filesystems/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
index 3ba0b945aaf..3ba0b945aaf 100644
--- a/Documentation/filesystems/nfsroot.txt
+++ b/Documentation/filesystems/nfs/nfsroot.txt
diff --git a/Documentation/filesystems/rpc-cache.txt b/Documentation/filesystems/nfs/rpc-cache.txt
index 8a382bea680..8a382bea680 100644
--- a/Documentation/filesystems/rpc-cache.txt
+++ b/Documentation/filesystems/nfs/rpc-cache.txt
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index 01539f41067..4949fcaa6b6 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -49,8 +49,7 @@ Mount options
NILFS2 supports the following mount options:
(*) == default
-barrier=on(*) This enables/disables barriers. barrier=off disables
- it, barrier=on enables it.
+nobarrier Disables barriers.
errors=continue(*) Keep going on a filesystem error.
errors=remount-ro Remount the filesystem read-only on an error.
errors=panic Panic and halt the machine if an error occurs.
@@ -71,6 +70,10 @@ order=strict Apply strict in-order semantics that preserves sequence
blocks. That means, it is guaranteed that no
overtaking of events occurs in the recovered file
system after a crash.
+norecovery Disable recovery of the filesystem on mount.
+ This disables every write access on the device for
+ read-only mounts or snapshots. This option will fail
+ for r/w mounts on an unclean volume.
NILFS2 usage
============
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 92b888d540a..a7e9746ee7e 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -140,7 +140,7 @@ Callers of notify_change() need ->i_mutex now.
New super_block field "struct export_operations *s_export_op" for
explicit support for exporting, e.g. via NFS. The structure is fully
documented at its declaration in include/linux/fs.h, and in
-Documentation/filesystems/Exporting.
+Documentation/filesystems/nfs/Exporting.
Briefly it allows for the definition of decode_fh and encode_fh operations
to encode and decode filehandles, and allows the filesystem to use
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 94b9f2056f4..220cc6376ef 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -38,6 +38,7 @@ Table of Contents
3.3 /proc/<pid>/io - Display the IO accounting fields
3.4 /proc/<pid>/coredump_filter - Core dump filtering settings
3.5 /proc/<pid>/mountinfo - Information about mounts
+ 3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
------------------------------------------------------------------------------
@@ -1409,3 +1410,11 @@ For more information on mount propagation see:
Documentation/filesystems/sharedsubtree.txt
+
+3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
+--------------------------------------------------------
+These files provide a method to access a tasks comm value. It also allows for
+a task to set its own or one of its thread siblings comm value. The comm value
+is limited in size compared to the cmdline value, so writing anything longer
+then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
+comm value.
diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
index 0d15ebccf5b..a1e2e0dda90 100644
--- a/Documentation/filesystems/seq_file.txt
+++ b/Documentation/filesystems/seq_file.txt
@@ -248,9 +248,7 @@ code, that is done in the initialization code in the usual way:
{
struct proc_dir_entry *entry;
- entry = create_proc_entry("sequence", 0, NULL);
- if (entry)
- entry->proc_fops = &ct_file_ops;
+ proc_create("sequence", 0, NULL, &ct_file_ops);
return 0;
}
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 623f094c9d8..3de2f32edd9 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -472,7 +472,7 @@ __sync_single_inode) to check if ->writepages has been successful in
writing out the whole address_space.
The Writeback tag is used by filemap*wait* and sync_page* functions,
-via wait_on_page_writeback_range, to wait for all writeback to
+via filemap_fdatawait_range, to wait for all writeback to
complete. While waiting ->sync_page (if defined) will be called on
each page that is found to require writeback.
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index e4e7daed2ba..1866c27eec6 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -531,6 +531,13 @@ and have the following read/write attributes:
This file exists only if the pin can be configured as an
interrupt generating input pin.
+ "active_low" ... reads as either 0 (false) or 1 (true). Write
+ any nonzero value to invert the value attribute both
+ for reading and writing. Existing and subsequent
+ poll(2) support configuration via the edge attribute
+ for "rising" and "falling" edges will follow this
+ setting.
+
GPIO controllers have paths like /sys/class/gpio/gpiochip42/ (for the
controller implementing GPIOs starting at #42) and have the following
read-only attributes:
@@ -566,6 +573,8 @@ requested using gpio_request():
int gpio_export_link(struct device *dev, const char *name,
unsigned gpio)
+ /* change the polarity of a GPIO node in sysfs */
+ int gpio_sysfs_set_active_low(unsigned gpio, int value);
After a kernel driver requests a GPIO, it may only be made available in
the sysfs interface by gpio_export(). The driver can control whether the
@@ -580,3 +589,9 @@ After the GPIO has been exported, gpio_export_link() allows creating
symlinks from elsewhere in sysfs to the GPIO sysfs node. Drivers can
use this to provide the interface under their own device in sysfs with
a descriptive name.
+
+Drivers can use gpio_sysfs_set_active_low() to hide GPIO line polarity
+differences between boards from user space. This only affects the
+sysfs interface. Polarity change can be done both before and after
+gpio_export(), and previously enabled poll(2) support for either
+rising or falling edge will be reconfigured to follow this setting.
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
new file mode 100644
index 00000000000..a7a18d453a5
--- /dev/null
+++ b/Documentation/hwmon/k10temp
@@ -0,0 +1,60 @@
+Kernel driver k10temp
+=====================
+
+Supported chips:
+* AMD Family 10h processors:
+ Socket F: Quad-Core/Six-Core/Embedded Opteron
+ Socket AM2+: Opteron, Phenom (II) X3/X4
+ Socket AM3: Quad-Core Opteron, Athlon/Phenom II X2/X3/X4, Sempron II
+ Socket S1G3: Athlon II, Sempron, Turion II
+* AMD Family 11h processors:
+ Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
+
+ Prefix: 'k10temp'
+ Addresses scanned: PCI space
+ Datasheets:
+ BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h Processors:
+ http://support.amd.com/us/Processor_TechDocs/31116.pdf
+ BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
+ http://support.amd.com/us/Processor_TechDocs/41256.pdf
+ Revision Guide for AMD Family 10h Processors:
+ http://support.amd.com/us/Processor_TechDocs/41322.pdf
+ Revision Guide for AMD Family 11h Processors:
+ http://support.amd.com/us/Processor_TechDocs/41788.pdf
+ AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
+ http://support.amd.com/us/Processor_TechDocs/43373.pdf
+ AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
+ http://support.amd.com/us/Processor_TechDocs/43374.pdf
+ AMD Family 10h Desktop Processor Power and Thermal Data Sheet:
+ http://support.amd.com/us/Processor_TechDocs/43375.pdf
+
+Author: Clemens Ladisch <clemens@ladisch.de>
+
+Description
+-----------
+
+This driver permits reading of the internal temperature sensor of AMD
+Family 10h and 11h processors.
+
+All these processors have a sensor, but on older revisions of Family 10h
+processors, the sensor may return inconsistent values (erratum 319). The
+driver will refuse to load on these revisions unless you specify the
+"force=1" module parameter.
+
+There is one temperature measurement value, available as temp1_input in
+sysfs. It is measured in degrees Celsius with a resolution of 1/8th degree.
+Please note that it is defined as a relative value; to quote the AMD manual:
+
+ Tctl is the processor temperature control value, used by the platform to
+ control cooling systems. Tctl is a non-physical temperature on an
+ arbitrary scale measured in degrees. It does _not_ represent an actual
+ physical temperature like die or case temperature. Instead, it specifies
+ the processor temperature relative to the point at which the system must
+ supply the maximum cooling for the processor's specified maximum case
+ temperature and maximum thermal power dissipation.
+
+The maximum value for Tctl is available in the file temp1_max.
+
+If the BIOS has enabled hardware temperature control, the threshold at
+which the processor will throttle itself to avoid damage is available in
+temp1_crit and temp1_crit_hyst.
diff --git a/Documentation/hwmon/lis3lv02d b/Documentation/hwmon/lis3lv02d
index effe949a728..06534f25e64 100644
--- a/Documentation/hwmon/lis3lv02d
+++ b/Documentation/hwmon/lis3lv02d
@@ -3,7 +3,8 @@ Kernel driver lis3lv02d
Supported chips:
- * STMicroelectronics LIS3LV02DL and LIS3LV02DQ
+ * STMicroelectronics LIS3LV02DL, LIS3LV02DQ (12 bits precision)
+ * STMicroelectronics LIS302DL, LIS3L02DQ, LIS331DL (8 bits)
Authors:
Yan Burman <burman.yan@gmail.com>
@@ -13,32 +14,52 @@ Authors:
Description
-----------
-This driver provides support for the accelerometer found in various HP
-laptops sporting the feature officially called "HP Mobile Data
-Protection System 3D" or "HP 3D DriveGuard". It detects automatically
-laptops with this sensor. Known models (for now the HP 2133, nc6420,
-nc2510, nc8510, nc84x0, nw9440 and nx9420) will have their axis
-automatically oriented on standard way (eg: you can directly play
-neverball). The accelerometer data is readable via
-/sys/devices/platform/lis3lv02d.
+This driver provides support for the accelerometer found in various HP laptops
+sporting the feature officially called "HP Mobile Data Protection System 3D" or
+"HP 3D DriveGuard". It detects automatically laptops with this sensor. Known
+models (full list can be found in drivers/hwmon/hp_accel.c) will have their
+axis automatically oriented on standard way (eg: you can directly play
+neverball). The accelerometer data is readable via
+/sys/devices/platform/lis3lv02d. Reported values are scaled
+to mg values (1/1000th of earth gravity).
Sysfs attributes under /sys/devices/platform/lis3lv02d/:
position - 3D position that the accelerometer reports. Format: "(x,y,z)"
-calibrate - read: values (x, y, z) that are used as the base for input
- class device operation.
- write: forces the base to be recalibrated with the current
- position.
-rate - reports the sampling rate of the accelerometer device in HZ
+rate - read reports the sampling rate of the accelerometer device in HZ.
+ write changes sampling rate of the accelerometer device.
+ Only values which are supported by HW are accepted.
+selftest - performs selftest for the chip as specified by chip manufacturer.
This driver also provides an absolute input class device, allowing
-the laptop to act as a pinball machine-esque joystick.
+the laptop to act as a pinball machine-esque joystick. Joystick device can be
+calibrated. Joystick device can be in two different modes.
+By default output values are scaled between -32768 .. 32767. In joystick raw
+mode, joystick and sysfs position entry have the same scale. There can be
+small difference due to input system fuzziness feature.
+Events are also available as input event device.
+
+Selftest is meant only for hardware diagnostic purposes. It is not meant to be
+used during normal operations. Position data is not corrupted during selftest
+but interrupt behaviour is not guaranteed to work reliably. In test mode, the
+sensing element is internally moved little bit. Selftest measures difference
+between normal mode and test mode. Chip specifications tell the acceptance
+limit for each type of the chip. Limits are provided via platform data
+to allow adjustment of the limits without a change to the actual driver.
+Seltest returns either "OK x y z" or "FAIL x y z" where x, y and z are
+measured difference between modes. Axes are not remapped in selftest mode.
+Measurement values are provided to help HW diagnostic applications to make
+final decision.
+
+On HP laptops, if the led infrastructure is activated, support for a led
+indicating disk protection will be provided as /sys/class/leds/hp::hddprotect.
Another feature of the driver is misc device called "freefall" that
acts similar to /dev/rtc and reacts on free-fall interrupts received
from the device. It supports blocking operations, poll/select and
fasync operation modes. You must read 1 bytes from the device. The
result is number of free-fall interrupts since the last successful
-read (or 255 if number of interrupts would not fit).
+read (or 255 if number of interrupts would not fit). See the hpfall.c
+file for an example on using the device.
Axes orientation
@@ -55,7 +76,7 @@ the accelerometer are converted into a "standard" organisation of the axes
* If the laptop is put upside-down, Z becomes negative
If your laptop model is not recognized (cf "dmesg"), you can send an
-email to the authors to add it to the database. When reporting a new
+email to the maintainer to add it to the database. When reporting a new
laptop, please include the output of "dmidecode" plus the value of
/sys/devices/platform/lis3lv02d/position in these four cases.
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index 02b74899eda..b7e42ec4b26 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -81,8 +81,14 @@ pwm[1-4] - this file stores PWM duty cycle or DC value (fan speed) in range:
0 (stop) to 255 (full)
pwm[1-4]_enable - this file controls mode of fan/temperature control:
- * 1 Manual Mode, write to pwm file any value 0-255 (full speed)
- * 2 Thermal Cruise
+ * 1 Manual mode, write to pwm file any value 0-255 (full speed)
+ * 2 "Thermal Cruise" mode
+ * 3 "Fan Speed Cruise" mode
+ * 4 "Smart Fan III" mode
+
+pwm[1-4]_mode - controls if output is PWM or DC level
+ * 0 DC output (0 - 12v)
+ * 1 PWM output
Thermal Cruise mode
-------------------
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 7860aafb483..0a74603eb67 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -44,7 +44,7 @@ static struct i2c_driver foo_driver = {
/* if device autodetection is needed: */
.class = I2C_CLASS_SOMETHING,
.detect = foo_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
.shutdown = foo_shutdown, /* optional */
.suspend = foo_suspend, /* optional */
diff --git a/Documentation/infiniband/ipoib.txt b/Documentation/infiniband/ipoib.txt
index 6d40f00b358..64eeb55d0c0 100644
--- a/Documentation/infiniband/ipoib.txt
+++ b/Documentation/infiniband/ipoib.txt
@@ -36,11 +36,11 @@ Datagram vs Connected modes
fabric with a 2K MTU, the IPoIB MTU will be 2048 - 4 = 2044 bytes.
In connected mode, the IB RC (Reliable Connected) transport is used.
- Connected mode is to takes advantage of the connected nature of the
- IB transport and allows an MTU up to the maximal IP packet size of
- 64K, which reduces the number of IP packets needed for handling
- large UDP datagrams, TCP segments, etc and increases the performance
- for large messages.
+ Connected mode takes advantage of the connected nature of the IB
+ transport and allows an MTU up to the maximal IP packet size of 64K,
+ which reduces the number of IP packets needed for handling large UDP
+ datagrams, TCP segments, etc and increases the performance for large
+ messages.
In connected mode, the interface's UD QP is still used for multicast
and communication with peers that don't support connected mode. In
diff --git a/Documentation/isdn/README.gigaset b/Documentation/isdn/README.gigaset
index 0fc9831d7ec..794941fc949 100644
--- a/Documentation/isdn/README.gigaset
+++ b/Documentation/isdn/README.gigaset
@@ -68,22 +68,38 @@ GigaSet 307x Device Driver
for troubleshooting or to pass module parameters.
The module ser_gigaset provides a serial line discipline N_GIGASET_M101
- which drives the device through the regular serial line driver. It must
- be attached to the serial line to which the M101 is connected with the
- ldattach(8) command (requires util-linux-ng release 2.14 or later), for
- example:
- ldattach GIGASET_M101 /dev/ttyS1
+ which uses the regular serial port driver to access the device, and must
+ therefore be attached to the serial device to which the M101 is connected.
+ The ldattach(8) command (included in util-linux-ng release 2.14 or later)
+ can be used for that purpose, for example:
+ ldattach GIGASET_M101 /dev/ttyS1
This will open the device file, attach the line discipline to it, and
then sleep in the background, keeping the device open so that the line
discipline remains active. To deactivate it, kill the daemon, for example
with
- killall ldattach
+ killall ldattach
before disconnecting the device. To have this happen automatically at
system startup/shutdown on an LSB compatible system, create and activate
an appropriate LSB startup script /etc/init.d/gigaset. (The init name
'gigaset' is officially assigned to this project by LANANA.)
Alternatively, just add the 'ldattach' command line to /etc/rc.local.
+ The modules accept the following parameters:
+
+ Module Parameter Meaning
+
+ gigaset debug debug level (see section 3.2.)
+
+ startmode initial operation mode (see section 2.5.):
+ bas_gigaset ) 1=ISDN4linux/CAPI (default), 0=Unimodem
+ ser_gigaset )
+ usb_gigaset ) cidmode initial Call-ID mode setting (see section
+ 2.5.): 1=on (default), 0=off
+
+ Depending on your distribution you may want to create a separate module
+ configuration file /etc/modprobe.d/gigaset for these, or add them to a
+ custom file like /etc/modprobe.conf.local.
+
2.2. Device nodes for user space programs
------------------------------------
The device can be accessed from user space (eg. by the user space tools
@@ -93,11 +109,48 @@ GigaSet 307x Device Driver
- /dev/ttyGU0 for M105 (USB data boxes)
- /dev/ttyGB0 for the base driver (direct USB connection)
- You can also select a "default device" which is used by the frontends when
+ If you connect more than one device of a type, they will get consecutive
+ device nodes, eg. /dev/ttyGU1 for a second M105.
+
+ You can also set a "default device" for the user space tools to use when
no device node is given as parameter, by creating a symlink /dev/ttyG to
one of them, eg.:
- ln -s /dev/ttyGB0 /dev/ttyG
+ ln -s /dev/ttyGB0 /dev/ttyG
+
+ The devices accept the following device specific ioctl calls
+ (defined in gigaset_dev.h):
+
+ ioctl(int fd, GIGASET_REDIR, int *cmd);
+ If cmd==1, the device is set to be controlled exclusively through the
+ character device node; access from the ISDN subsystem is blocked.
+ If cmd==0, the device is set to be used from the ISDN subsystem and does
+ not communicate through the character device node.
+
+ ioctl(int fd, GIGASET_CONFIG, int *cmd);
+ (ser_gigaset and usb_gigaset only)
+ If cmd==1, the device is set to adapter configuration mode where commands
+ are interpreted by the M10x DECT adapter itself instead of being
+ forwarded to the base station. In this mode, the device accepts the
+ commands described in Siemens document "AT-Kommando Alignment M10x Data"
+ for setting the operation mode, associating with a base station and
+ querying parameters like field strengh and signal quality.
+ Note that there is no ioctl command for leaving adapter configuration
+ mode and returning to regular operation. In order to leave adapter
+ configuration mode, write the command ATO to the device.
+
+ ioctl(int fd, GIGASET_BRKCHARS, unsigned char brkchars[6]);
+ (usb_gigaset only)
+ Set the break characters on an M105's internal serial adapter to the six
+ bytes stored in brkchars[]. Unused bytes should be set to zero.
+
+ ioctl(int fd, GIGASET_VERSION, unsigned version[4]);
+ Retrieve version information from the driver. version[0] must be set to
+ one of:
+ - GIGVER_DRIVER: retrieve driver version
+ - GIGVER_COMPAT: retrieve interface compatibility version
+ - GIGVER_FWBASE: retrieve the firmware version of the base
+ Upon return, version[] is filled with the requested version information.
2.3. ISDN4linux
----------
@@ -113,15 +166,24 @@ GigaSet 307x Device Driver
Connection State: 0, Response: -1
gigaset_process_response: resp_code -1 in ConState 0 !
Timeout occurred
- you might need to use unimodem mode. (see section 2.5.)
+ you probably need to use unimodem mode. (see section 2.5.)
2.4. CAPI
----
If the driver is compiled with CAPI support (kernel configuration option
GIGASET_CAPI, experimental) it can also be used with CAPI 2.0 kernel and
- user space applications. ISDN4Linux is supported in this configuration
+ user space applications. For user space access, the module capi.ko must
+ be loaded. The capiinit command (included in the capi4k-utils package)
+ does this for you.
+
+ The CAPI variant of the driver supports legacy ISDN4Linux applications
via the capidrv compatibility driver. The kernel module capidrv.ko must
- be loaded explicitly ("modprobe capidrv") if needed.
+ be loaded explicitly with the command
+ modprobe capidrv
+ if needed, and cannot be unloaded again without unloading the driver
+ first. (These are limitations of capidrv.)
+
+ The note about unimodem mode in the preceding section applies here, too.
2.5. Unimodem mode
-------------
@@ -134,9 +196,14 @@ GigaSet 307x Device Driver
You can switch back using
gigacontr --mode isdn
- You can also load the driver using e.g.
- modprobe usb_gigaset startmode=0
- to prevent the driver from starting in "isdn4linux mode".
+ You can also put the driver directly into Unimodem mode when it's loaded,
+ by passing the module parameter startmode=0 to the hardware specific
+ module, e.g.
+ modprobe usb_gigaset startmode=0
+ or by adding a line like
+ options usb_gigaset startmode=0
+ to an appropriate module configuration file, like /etc/modprobe.d/gigaset
+ or /etc/modprobe.conf.local.
In this mode the device works like a modem connected to a serial port
(the /dev/ttyGU0, ... mentioned above) which understands the commands
@@ -164,9 +231,8 @@ GigaSet 307x Device Driver
options ppp_async flag_time=0
- to /etc/modprobe.conf. If your distribution has some local module
- configuration file like /etc/modprobe.conf.local,
- using that should be preferred.
+ to an appropriate module configuration file, like /etc/modprobe.d/gigaset
+ or /etc/modprobe.conf.local.
2.6. Call-ID (CID) mode
------------------
@@ -189,12 +255,13 @@ GigaSet 307x Device Driver
settings (CID mode).
- If you have several DECT data devices (M10x) which you want to use
in turn, select Unimodem mode by passing the parameter "cidmode=0" to
- the driver ("modprobe usb_gigaset cidmode=0" or modprobe.conf).
+ the appropriate driver module (ser_gigaset or usb_gigaset).
If you want both of these at once, you are out of luck.
- You can also use /sys/class/tty/ttyGxy/cidmode for changing the CID mode
- setting (ttyGxy is ttyGU0 or ttyGB0).
+ You can also use the tty class parameter "cidmode" of the device to
+ change its CID mode while the driver is loaded, eg.
+ echo 0 > /sys/class/tty/ttyGU0/cidmode
2.7. Unregistered Wireless Devices (M101/M105)
-----------------------------------------
@@ -208,7 +275,7 @@ GigaSet 307x Device Driver
driver. In that situation, a restricted set of functions is available
which includes, in particular, those necessary for registering the device
to a base or for switching it between Fixed Part and Portable Part
- modes.
+ modes. See the gigacontr(8) manpage for details.
3. Troubleshooting
---------------
@@ -222,9 +289,7 @@ GigaSet 307x Device Driver
options isdn dialtimeout=15
- to /etc/modprobe.conf. If your distribution has some local module
- configuration file like /etc/modprobe.conf.local,
- using that should be preferred.
+ to /etc/modprobe.d/gigaset, /etc/modprobe.conf.local or a similar file.
Problem:
Your isdn script aborts with a message about isdnlog.
@@ -264,7 +329,8 @@ GigaSet 307x Device Driver
The initial value can be set using the debug parameter when loading the
module "gigaset", e.g. by adding a line
options gigaset debug=0
- to /etc/modprobe.conf, ...
+ to your module configuration file, eg. /etc/modprobe.d/gigaset or
+ /etc/modprobe.conf.local.
Generated debugging information can be found
- as output of the command
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index bb3bf38f03d..6f8c1cabbc5 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -1,3 +1,17 @@
+Output files
+
+modules.order
+--------------------------------------------------
+This file records the order in which modules appear in Makefiles. This
+is used by modprobe to deterministically resolve aliases that match
+multiple modules.
+
+modules.builtin
+--------------------------------------------------
+This file lists all modules that are built into the kernel. This is used
+by modprobe to not fail when trying to load something builtin.
+
+
Environment variables
KCPPFLAGS
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 849b5e56d06..49efae70397 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -103,10 +103,16 @@ KCONFIG_AUTOCONFIG
This environment variable can be set to specify the path & name of the
"auto.conf" file. Its default value is "include/config/auto.conf".
+KCONFIG_TRISTATE
+--------------------------------------------------
+This environment variable can be set to specify the path & name of the
+"tristate.conf" file. Its default value is "include/config/tristate.conf".
+
KCONFIG_AUTOHEADER
--------------------------------------------------
This environment variable can be set to specify the path & name of the
-"autoconf.h" (header) file. Its default value is "include/linux/autoconf.h".
+"autoconf.h" (header) file.
+Its default value is "include/generated/autoconf.h".
======================================================================
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 777dc8a32df..5ba4d9dff11 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1032,7 +1032,7 @@ and is between 256 and 4096 characters. It is defined in the file
No delay
ip= [IP_PNP]
- See Documentation/filesystems/nfsroot.txt.
+ See Documentation/filesystems/nfs/nfsroot.txt.
ip2= [HW] Set IO/IRQ pairs for up to 4 IntelliPort boards
See comment before ip2_setup() in
@@ -1553,10 +1553,10 @@ and is between 256 and 4096 characters. It is defined in the file
going to be removed in 2.6.29.
nfsaddrs= [NFS]
- See Documentation/filesystems/nfsroot.txt.
+ See Documentation/filesystems/nfs/nfsroot.txt.
nfsroot= [NFS] nfs root filesystem for disk-less boxes.
- See Documentation/filesystems/nfsroot.txt.
+ See Documentation/filesystems/nfs/nfsroot.txt.
nfs.callback_tcpport=
[NFS] set the TCP port on which the NFSv4 callback
@@ -1787,6 +1787,11 @@ and is between 256 and 4096 characters. It is defined in the file
waiting for the ACK, so if this is set too high
interrupts *may* be lost!
+ omap_mux= [OMAP] Override bootloader pin multiplexing.
+ Format: <mux_mode0.mode_name=value>...
+ For example, to override I2C bus2:
+ omap_mux=i2c2_scl.i2c2_scl=0x100,i2c2_sda.i2c2_sda=0x100
+
opl3= [HW,OSS]
Format: <io>
@@ -2663,6 +2668,8 @@ and is between 256 and 4096 characters. It is defined in the file
to a common usb-storage quirk flag as follows:
a = SANE_SENSE (collect more than 18 bytes
of sense data);
+ b = BAD_SENSE (don't collect more than 18
+ bytes of sense data);
c = FIX_CAPACITY (decrease the reported
device capacity by one sector);
h = CAPACITY_HEURISTICS (decrease the
@@ -2722,6 +2729,11 @@ and is between 256 and 4096 characters. It is defined in the file
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
Format: <command>
+ vt.cur_default= [VT] Default cursor shape.
+ Format: 0xCCBBAA, where AA, BB, and CC are the same as
+ the parameters of the <Esc>[?A;B;Cc escape sequence;
+ see VGA-softcursor.txt. Default: 2 = underline.
+
vt.default_blu= [VT]
Format: <blue0>,<blue1>,<blue2>,...,<blue15>
Change the default blue palette of the console.
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index aafcaa63419..169091f75e6 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -1,7 +1,7 @@
ThinkPad ACPI Extras Driver
- Version 0.23
- April 10th, 2009
+ Version 0.24
+ December 11th, 2009
Borislav Deianov <borislav@users.sf.net>
Henrique de Moraes Holschuh <hmh@hmh.eng.br>
@@ -460,6 +460,8 @@ event code Key Notes
For Lenovo ThinkPads with a new
BIOS, it has to be handled either
by the ACPI OSI, or by userspace.
+ The driver does the right thing,
+ never mess with this.
0x1011 0x10 FN+END Brightness down. See brightness
up for details.
@@ -582,46 +584,15 @@ with hotkey_report_mode.
Brightness hotkey notes:
-These are the current sane choices for brightness key mapping in
-thinkpad-acpi:
+Don't mess with the brightness hotkeys in a Thinkpad. If you want
+notifications for OSD, use the sysfs backlight class event support.
-For IBM and Lenovo models *without* ACPI backlight control (the ones on
-which thinkpad-acpi will autoload its backlight interface by default,
-and on which ACPI video does not export a backlight interface):
-
-1. Don't enable or map the brightness hotkeys in thinkpad-acpi, as
- these older firmware versions unfortunately won't respect the hotkey
- mask for brightness keys anyway, and always reacts to them. This
- usually work fine, unless X.org drivers are doing something to block
- the BIOS. In that case, use (3) below. This is the default mode of
- operation.
-
-2. Enable the hotkeys, but map them to something else that is NOT
- KEY_BRIGHTNESS_UP/DOWN or any other keycode that would cause
- userspace to try to change the backlight level, and use that as an
- on-screen-display hint.
-
-3. IF AND ONLY IF X.org drivers find a way to block the firmware from
- automatically changing the brightness, enable the hotkeys and map
- them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN, and feed that to
- something that calls xbacklight. thinkpad-acpi will not be able to
- change brightness in that case either, so you should disable its
- backlight interface.
-
-For Lenovo models *with* ACPI backlight control:
-
-1. Load up ACPI video and use that. ACPI video will report ACPI
- events for brightness change keys. Do not mess with thinkpad-acpi
- defaults in this case. thinkpad-acpi should not have anything to do
- with backlight events in a scenario where ACPI video is loaded:
- brightness hotkeys must be disabled, and the backlight interface is
- to be kept disabled as well. This is the default mode of operation.
-
-2. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi,
- and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN. Process
- these keys on userspace somehow (e.g. by calling xbacklight).
- The driver will do this automatically if it detects that ACPI video
- has been disabled.
+The driver will issue KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN events
+automatically for the cases were userspace has to do something to
+implement brightness changes. When you override these events, you will
+either fail to handle properly the ThinkPads that require explicit
+action to change backlight brightness, or the ThinkPads that require
+that no action be taken to work properly.
Bluetooth
@@ -1121,25 +1092,61 @@ WARNING:
its level up and down at every change.
-Volume control -- /proc/acpi/ibm/volume
----------------------------------------
+Volume control
+--------------
+
+procfs: /proc/acpi/ibm/volume
+ALSA: "ThinkPad Console Audio Control", default ID: "ThinkPadEC"
+
+NOTE: by default, the volume control interface operates in read-only
+mode, as it is supposed to be used for on-screen-display purposes.
+The read/write mode can be enabled through the use of the
+"volume_control=1" module parameter.
-This feature allows volume control on ThinkPad models which don't have
-a hardware volume knob. The available commands are:
+NOTE: distros are urged to not enable volume_control by default, this
+should be done by the local admin only. The ThinkPad UI is for the
+console audio control to be done through the volume keys only, and for
+the desktop environment to just provide on-screen-display feedback.
+Software volume control should be done only in the main AC97/HDA
+mixer.
+
+This feature allows volume control on ThinkPad models with a digital
+volume knob (when available, not all models have it), as well as
+mute/unmute control. The available commands are:
echo up >/proc/acpi/ibm/volume
echo down >/proc/acpi/ibm/volume
echo mute >/proc/acpi/ibm/volume
+ echo unmute >/proc/acpi/ibm/volume
echo 'level <level>' >/proc/acpi/ibm/volume
-The <level> number range is 0 to 15 although not all of them may be
+The <level> number range is 0 to 14 although not all of them may be
distinct. The unmute the volume after the mute command, use either the
-up or down command (the level command will not unmute the volume).
+up or down command (the level command will not unmute the volume), or
+the unmute command.
+
The current volume level and mute state is shown in the file.
-The ALSA mixer interface to this feature is still missing, but patches
-to add it exist. That problem should be addressed in the not so
-distant future.
+You can use the volume_capabilities parameter to tell the driver
+whether your thinkpad has volume control or mute-only control:
+volume_capabilities=1 for mixers with mute and volume control,
+volume_capabilities=2 for mixers with only mute control.
+
+If the driver misdetects the capabilities for your ThinkPad model,
+please report this to ibm-acpi-devel@lists.sourceforge.net, so that we
+can update the driver.
+
+There are two strategies for volume control. To select which one
+should be used, use the volume_mode module parameter: volume_mode=1
+selects EC mode, and volume_mode=3 selects EC mode with NVRAM backing
+(so that volume/mute changes are remembered across shutdown/reboot).
+
+The driver will operate in volume_mode=3 by default. If that does not
+work well on your ThinkPad model, please report this to
+ibm-acpi-devel@lists.sourceforge.net.
+
+The driver supports the standard ALSA module parameters. If the ALSA
+mixer is disabled, the driver will disable all volume functionality.
Fan control and monitoring: fan speed, fan enable/disable
@@ -1405,6 +1412,7 @@ to enable more than one output class, just add their values.
0x0008 HKEY event interface, hotkeys
0x0010 Fan control
0x0020 Backlight brightness
+ 0x0040 Audio mixer/volume control
There is also a kernel build option to enable more debugging
information, which may be necessary to debug driver problems.
@@ -1465,3 +1473,9 @@ Sysfs interface changelog:
and it is always able to disable hot keys. Very old
thinkpads are properly supported. hotkey_bios_mask
is deprecated and marked for removal.
+
+0x020600: Marker for backlight change event support.
+
+0x020700: Support for mute-only mixers.
+ Volume control in read-only mode by default.
+ Marker for ALSA mixer support.
diff --git a/Documentation/lockstat.txt b/Documentation/lockstat.txt
index 9cb9138f7a7..65f4c795015 100644
--- a/Documentation/lockstat.txt
+++ b/Documentation/lockstat.txt
@@ -62,8 +62,20 @@ applicable).
It also tracks 4 contention points per class. A contention point is a call site
that had to wait on lock acquisition.
+ - CONFIGURATION
+
+Lock statistics are enabled via CONFIG_LOCK_STATS.
+
- USAGE
+Enable collection of statistics:
+
+# echo 1 >/proc/sys/kernel/lock_stat
+
+Disable collection of statistics:
+
+# echo 0 >/proc/sys/kernel/lock_stat
+
Look at the current lock statistics:
( line numbers not part of actual output, done for clarity in the explanation
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 4edd39ec7db..188f4768f1d 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -233,9 +233,9 @@ All md devices contain:
resync_start
The point at which resync should start. If no resync is needed,
- this will be a very large number. At array creation it will
- default to 0, though starting the array as 'clean' will
- set it much larger.
+ this will be a very large number (or 'none' since 2.6.30-rc1). At
+ array creation it will default to 0, though starting the array as
+ 'clean' will set it much larger.
new_dev
This file can be written but not read. The value written should
@@ -296,6 +296,51 @@ All md devices contain:
active-idle
like active, but no writes have been seen for a while (safe_mode_delay).
+ bitmap/location
+ This indicates where the write-intent bitmap for the array is
+ stored.
+ It can be one of "none", "file" or "[+-]N".
+ "file" may later be extended to "file:/file/name"
+ "[+-]N" means that many sectors from the start of the metadata.
+ This is replicated on all devices. For arrays with externally
+ managed metadata, the offset is from the beginning of the
+ device.
+ bitmap/chunksize
+ The size, in bytes, of the chunk which will be represented by a
+ single bit. For RAID456, it is a portion of an individual
+ device. For RAID10, it is a portion of the array. For RAID1, it
+ is both (they come to the same thing).
+ bitmap/time_base
+ The time, in seconds, between looking for bits in the bitmap to
+ be cleared. In the current implementation, a bit will be cleared
+ between 2 and 3 times "time_base" after all the covered blocks
+ are known to be in-sync.
+ bitmap/backlog
+ When write-mostly devices are active in a RAID1, write requests
+ to those devices proceed in the background - the filesystem (or
+ other user of the device) does not have to wait for them.
+ 'backlog' sets a limit on the number of concurrent background
+ writes. If there are more than this, new writes will by
+ synchronous.
+ bitmap/metadata
+ This can be either 'internal' or 'external'.
+ 'internal' is the default and means the metadata for the bitmap
+ is stored in the first 256 bytes of the allocated space and is
+ managed by the md module.
+ 'external' means that bitmap metadata is managed externally to
+ the kernel (i.e. by some userspace program)
+ bitmap/can_clear
+ This is either 'true' or 'false'. If 'true', then bits in the
+ bitmap will be cleared when the corresponding blocks are thought
+ to be in-sync. If 'false', bits will never be cleared.
+ This is automatically set to 'false' if a write happens on a
+ degraded array, or if the array becomes degraded during a write.
+ When metadata is managed externally, it should be set to true
+ once the array becomes non-degraded, and this fact has been
+ recorded in the metadata.
+
+
+
As component devices are added to an md array, they appear in the 'md'
directory as new directories named
@@ -334,8 +379,9 @@ Each directory contains:
Writing "writemostly" sets the writemostly flag.
Writing "-writemostly" clears the writemostly flag.
Writing "blocked" sets the "blocked" flag.
- Writing "-blocked" clear the "blocked" flag and allows writes
+ Writing "-blocked" clears the "blocked" flag and allows writes
to complete.
+ Writing "in_sync" sets the in_sync flag.
This file responds to select/poll. Any change to 'faulty'
or 'blocked' causes an event.
@@ -372,6 +418,24 @@ Each directory contains:
array. If a value less than the current component_size is
written, it will be rejected.
+ recovery_start
+
+ When the device is not 'in_sync', this records the number of
+ sectors from the start of the device which are known to be
+ correct. This is normally zero, but during a recovery
+ operation is will steadily increase, and if the recovery is
+ interrupted, restoring this value can cause recovery to
+ avoid repeating the earlier blocks. With v1.x metadata, this
+ value is saved and restored automatically.
+
+ This can be set whenever the device is not an active member of
+ the array, either before the array is activated, or before
+ the 'slot' is set.
+
+ Setting this to 'none' is equivalent to setting 'in_sync'.
+ Setting to any other value also clears the 'in_sync' flag.
+
+
An active md device will also contain and entry for each active device
in the array. These are named
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index bbc8a6a3692..57e7e9cc187 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -160,12 +160,15 @@ Under each section, you can see 4 files.
NOTE:
These directories/files appear after physical memory hotplug phase.
-If CONFIG_NUMA is enabled the
-/sys/devices/system/memory/memoryXXX memory section
-directories can also be accessed via symbolic links located in
-the /sys/devices/system/node/node* directories. For example:
+If CONFIG_NUMA is enabled the memoryXXX/ directories can also be accessed
+via symbolic links located in the /sys/devices/system/node/node* directories.
+
+For example:
/sys/devices/system/node/node0/memory9 -> ../../memory/memory9
+A backlink will also be created:
+/sys/devices/system/memory/memory9/node0 -> ../../node/node0
+
--------------------------------
4. Physical memory hot-add phase
--------------------------------
diff --git a/Documentation/misc-devices/ad525x_dpot.txt b/Documentation/misc-devices/ad525x_dpot.txt
new file mode 100644
index 00000000000..0c9413b1cbf
--- /dev/null
+++ b/Documentation/misc-devices/ad525x_dpot.txt
@@ -0,0 +1,57 @@
+---------------------------------
+ AD525x Digital Potentiometers
+---------------------------------
+
+The ad525x_dpot driver exports a simple sysfs interface. This allows you to
+work with the immediate resistance settings as well as update the saved startup
+settings. Access to the factory programmed tolerance is also provided, but
+interpretation of this settings is required by the end application according to
+the specific part in use.
+
+---------
+ Files
+---------
+
+Each dpot device will have a set of eeprom, rdac, and tolerance files. How
+many depends on the actual part you have, as will the range of allowed values.
+
+The eeprom files are used to program the startup value of the device.
+
+The rdac files are used to program the immediate value of the device.
+
+The tolerance files are the read-only factory programmed tolerance settings
+and may vary greatly on a part-by-part basis. For exact interpretation of
+this field, please consult the datasheet for your part. This is presented
+as a hex file for easier parsing.
+
+-----------
+ Example
+-----------
+
+Locate the device in your sysfs tree. This is probably easiest by going into
+the common i2c directory and locating the device by the i2c slave address.
+
+ # ls /sys/bus/i2c/devices/
+ 0-0022 0-0027 0-002f
+
+So assuming the device in question is on the first i2c bus and has the slave
+address of 0x2f, we descend (unrelated sysfs entries have been trimmed).
+
+ # ls /sys/bus/i2c/devices/0-002f/
+ eeprom0 rdac0 tolerance0
+
+You can use simple reads/writes to access these files:
+
+ # cd /sys/bus/i2c/devices/0-002f/
+
+ # cat eeprom0
+ 0
+ # echo 10 > eeprom0
+ # cat eeprom0
+ 10
+
+ # cat rdac0
+ 5
+ # echo 3 > rdac0
+ # cat rdac0
+ 3
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt
index b565e8279d1..8e1ddec2c78 100644
--- a/Documentation/nommu-mmap.txt
+++ b/Documentation/nommu-mmap.txt
@@ -119,6 +119,32 @@ FURTHER NOTES ON NO-MMU MMAP
granule but will only discard the excess if appropriately configured as
this has an effect on fragmentation.
+ (*) The memory allocated by a request for an anonymous mapping will normally
+ be cleared by the kernel before being returned in accordance with the
+ Linux man pages (ver 2.22 or later).
+
+ In the MMU case this can be achieved with reasonable performance as
+ regions are backed by virtual pages, with the contents only being mapped
+ to cleared physical pages when a write happens on that specific page
+ (prior to which, the pages are effectively mapped to the global zero page
+ from which reads can take place). This spreads out the time it takes to
+ initialize the contents of a page - depending on the write-usage of the
+ mapping.
+
+ In the no-MMU case, however, anonymous mappings are backed by physical
+ pages, and the entire map is cleared at allocation time. This can cause
+ significant delays during a userspace malloc() as the C library does an
+ anonymous mapping and the kernel then does a memset for the entire map.
+
+ However, for memory that isn't required to be precleared - such as that
+ returned by malloc() - mmap() can take a MAP_UNINITIALIZED flag to
+ indicate to the kernel that it shouldn't bother clearing the memory before
+ returning it. Note that CONFIG_MMAP_ALLOW_UNINITIALIZED must be enabled
+ to permit this, otherwise the flag will be ignored.
+
+ uClibc uses this to speed up malloc(), and the ELF-FDPIC binfmt uses this
+ to allocate the brk and stack region.
+
(*) A list of all the private copy and anonymous mappings on the system is
visible through /proc/maps in no-MMU mode.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 4a3109b2884..356fd86f4ea 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -42,80 +42,81 @@ struct dev_pm_ops {
...
};
-The ->runtime_suspend() callback is executed by the PM core for the bus type of
-the device being suspended. The bus type's callback is then _entirely_
-_responsible_ for handling the device as appropriate, which may, but need not
-include executing the device driver's own ->runtime_suspend() callback (from the
+The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks are
+executed by the PM core for either the bus type, or device type (if the bus
+type's callback is not defined), or device class (if the bus type's and device
+type's callbacks are not defined) of given device. The bus type, device type
+and device class callbacks are referred to as subsystem-level callbacks in what
+follows.
+
+The subsystem-level suspend callback is _entirely_ _responsible_ for handling
+the suspend of the device as appropriate, which may, but need not include
+executing the device driver's own ->runtime_suspend() callback (from the
PM core's point of view it is not necessary to implement a ->runtime_suspend()
-callback in a device driver as long as the bus type's ->runtime_suspend() knows
-what to do to handle the device).
+callback in a device driver as long as the subsystem-level suspend callback
+knows what to do to handle the device).
- * Once the bus type's ->runtime_suspend() callback has completed successfully
+ * Once the subsystem-level suspend callback has completed successfully
for given device, the PM core regards the device as suspended, which need
not mean that the device has been put into a low power state. It is
supposed to mean, however, that the device will not process data and will
- not communicate with the CPU(s) and RAM until its bus type's
- ->runtime_resume() callback is executed for it. The run-time PM status of
- a device after successful execution of its bus type's ->runtime_suspend()
- callback is 'suspended'.
-
- * If the bus type's ->runtime_suspend() callback returns -EBUSY or -EAGAIN,
- the device's run-time PM status is supposed to be 'active', which means that
- the device _must_ be fully operational afterwards.
-
- * If the bus type's ->runtime_suspend() callback returns an error code
- different from -EBUSY or -EAGAIN, the PM core regards this as a fatal
- error and will refuse to run the helper functions described in Section 4
- for the device, until the status of it is directly set either to 'active'
- or to 'suspended' (the PM core provides special helper functions for this
- purpose).
-
-In particular, if the driver requires remote wakeup capability for proper
-functioning and device_run_wake() returns 'false' for the device, then
-->runtime_suspend() should return -EBUSY. On the other hand, if
-device_run_wake() returns 'true' for the device and the device is put
-into a low power state during the execution of its bus type's
-->runtime_suspend(), it is expected that remote wake-up (i.e. hardware mechanism
-allowing the device to request a change of its power state, such as PCI PME)
-will be enabled for the device. Generally, remote wake-up should be enabled
-for all input devices put into a low power state at run time.
-
-The ->runtime_resume() callback is executed by the PM core for the bus type of
-the device being woken up. The bus type's callback is then _entirely_
-_responsible_ for handling the device as appropriate, which may, but need not
-include executing the device driver's own ->runtime_resume() callback (from the
-PM core's point of view it is not necessary to implement a ->runtime_resume()
-callback in a device driver as long as the bus type's ->runtime_resume() knows
-what to do to handle the device).
-
- * Once the bus type's ->runtime_resume() callback has completed successfully,
- the PM core regards the device as fully operational, which means that the
- device _must_ be able to complete I/O operations as needed. The run-time
- PM status of the device is then 'active'.
-
- * If the bus type's ->runtime_resume() callback returns an error code, the PM
- core regards this as a fatal error and will refuse to run the helper
- functions described in Section 4 for the device, until its status is
- directly set either to 'active' or to 'suspended' (the PM core provides
- special helper functions for this purpose).
-
-The ->runtime_idle() callback is executed by the PM core for the bus type of
-given device whenever the device appears to be idle, which is indicated to the
-PM core by two counters, the device's usage counter and the counter of 'active'
-children of the device.
+ not communicate with the CPU(s) and RAM until the subsystem-level resume
+ callback is executed for it. The run-time PM status of a device after
+ successful execution of the subsystem-level suspend callback is 'suspended'.
+
+ * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
+ the device's run-time PM status is 'active', which means that the device
+ _must_ be fully operational afterwards.
+
+ * If the subsystem-level suspend callback returns an error code different
+ from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will
+ refuse to run the helper functions described in Section 4 for the device,
+ until the status of it is directly set either to 'active', or to 'suspended'
+ (the PM core provides special helper functions for this purpose).
+
+In particular, if the driver requires remote wake-up capability (i.e. hardware
+mechanism allowing the device to request a change of its power state, such as
+PCI PME) for proper functioning and device_run_wake() returns 'false' for the
+device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
+device_run_wake() returns 'true' for the device and the device is put into a low
+power state during the execution of the subsystem-level suspend callback, it is
+expected that remote wake-up will be enabled for the device. Generally, remote
+wake-up should be enabled for all input devices put into a low power state at
+run time.
+
+The subsystem-level resume callback is _entirely_ _responsible_ for handling the
+resume of the device as appropriate, which may, but need not include executing
+the device driver's own ->runtime_resume() callback (from the PM core's point of
+view it is not necessary to implement a ->runtime_resume() callback in a device
+driver as long as the subsystem-level resume callback knows what to do to handle
+the device).
+
+ * Once the subsystem-level resume callback has completed successfully, the PM
+ core regards the device as fully operational, which means that the device
+ _must_ be able to complete I/O operations as needed. The run-time PM status
+ of the device is then 'active'.
+
+ * If the subsystem-level resume callback returns an error code, the PM core
+ regards this as a fatal error and will refuse to run the helper functions
+ described in Section 4 for the device, until its status is directly set
+ either to 'active' or to 'suspended' (the PM core provides special helper
+ functions for this purpose).
+
+The subsystem-level idle callback is executed by the PM core whenever the device
+appears to be idle, which is indicated to the PM core by two counters, the
+device's usage counter and the counter of 'active' children of the device.
* If any of these counters is decreased using a helper function provided by
the PM core and it turns out to be equal to zero, the other counter is
checked. If that counter also is equal to zero, the PM core executes the
- device bus type's ->runtime_idle() callback (with the device as an
- argument).
+ subsystem-level idle callback with the device as an argument.
-The action performed by a bus type's ->runtime_idle() callback is totally
-dependent on the bus type in question, but the expected and recommended action
-is to check if the device can be suspended (i.e. if all of the conditions
-necessary for suspending the device are satisfied) and to queue up a suspend
-request for the device in that case. The value returned by this callback is
-ignored by the PM core.
+The action performed by a subsystem-level idle callback is totally dependent on
+the subsystem in question, but the expected and recommended action is to check
+if the device can be suspended (i.e. if all of the conditions necessary for
+suspending the device are satisfied) and to queue up a suspend request for the
+device in that case. The value returned by this callback is ignored by the PM
+core.
The helper functions provided by the PM core, described in Section 4, guarantee
that the following constraints are met with respect to the bus type's run-time
@@ -238,41 +239,41 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
removing the device from device hierarchy
int pm_runtime_idle(struct device *dev);
- - execute ->runtime_idle() for the device's bus type; returns 0 on success
- or error code on failure, where -EINPROGRESS means that ->runtime_idle()
- is already being executed
+ - execute the subsystem-level idle callback for the device; returns 0 on
+ success or error code on failure, where -EINPROGRESS means that
+ ->runtime_idle() is already being executed
int pm_runtime_suspend(struct device *dev);
- - execute ->runtime_suspend() for the device's bus type; returns 0 on
+ - execute the subsystem-level suspend callback for the device; returns 0 on
success, 1 if the device's run-time PM status was already 'suspended', or
error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt
to suspend the device again in future
int pm_runtime_resume(struct device *dev);
- - execute ->runtime_resume() for the device's bus type; returns 0 on
+ - execute the subsystem-leve resume callback for the device; returns 0 on
success, 1 if the device's run-time PM status was already 'active' or
error code on failure, where -EAGAIN means it may be safe to attempt to
resume the device again in future, but 'power.runtime_error' should be
checked additionally
int pm_request_idle(struct device *dev);
- - submit a request to execute ->runtime_idle() for the device's bus type
- (the request is represented by a work item in pm_wq); returns 0 on success
- or error code if the request has not been queued up
+ - submit a request to execute the subsystem-level idle callback for the
+ device (the request is represented by a work item in pm_wq); returns 0 on
+ success or error code if the request has not been queued up
int pm_schedule_suspend(struct device *dev, unsigned int delay);
- - schedule the execution of ->runtime_suspend() for the device's bus type
- in future, where 'delay' is the time to wait before queuing up a suspend
- work item in pm_wq, in milliseconds (if 'delay' is zero, the work item is
- queued up immediately); returns 0 on success, 1 if the device's PM
+ - schedule the execution of the subsystem-level suspend callback for the
+ device in future, where 'delay' is the time to wait before queuing up a
+ suspend work item in pm_wq, in milliseconds (if 'delay' is zero, the work
+ item is queued up immediately); returns 0 on success, 1 if the device's PM
run-time status was already 'suspended', or error code if the request
hasn't been scheduled (or queued up if 'delay' is 0); if the execution of
->runtime_suspend() is already scheduled and not yet expired, the new
value of 'delay' will be used as the time to wait
int pm_request_resume(struct device *dev);
- - submit a request to execute ->runtime_resume() for the device's bus type
- (the request is represented by a work item in pm_wq); returns 0 on
+ - submit a request to execute the subsystem-level resume callback for the
+ device (the request is represented by a work item in pm_wq); returns 0 on
success, 1 if the device's run-time PM status was already 'active', or
error code if the request hasn't been queued up
@@ -303,12 +304,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
run-time PM callbacks described in Section 2
int pm_runtime_disable(struct device *dev);
- - prevent the run-time PM helper functions from running the device bus
- type's run-time PM callbacks, make sure that all of the pending run-time
- PM operations on the device are either completed or canceled; returns
- 1 if there was a resume request pending and it was necessary to execute
- ->runtime_resume() for the device's bus type to satisfy that request,
- otherwise 0 is returned
+ - prevent the run-time PM helper functions from running subsystem-level
+ run-time PM callbacks for the device, make sure that all of the pending
+ run-time PM operations on the device are either completed or canceled;
+ returns 1 if there was a resume request pending and it was necessary to
+ execute the subsystem-level resume callback for the device to satisfy that
+ request, otherwise 0 is returned
void pm_suspend_ignore_children(struct device *dev, bool enable);
- set/unset the power.ignore_children flag of the device
@@ -378,5 +379,55 @@ pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts,
they will fail returning -EAGAIN, because the device's usage counter is
incremented by the core before executing ->probe() and ->remove(). Still, it
may be desirable to suspend the device as soon as ->probe() or ->remove() has
-finished, so the PM core uses pm_runtime_idle_sync() to invoke the device bus
-type's ->runtime_idle() callback at that time.
+finished, so the PM core uses pm_runtime_idle_sync() to invoke the
+subsystem-level idle callback for the device at that time.
+
+6. Run-time PM and System Sleep
+
+Run-time PM and system sleep (i.e., system suspend and hibernation, also known
+as suspend-to-RAM and suspend-to-disk) interact with each other in a couple of
+ways. If a device is active when a system sleep starts, everything is
+straightforward. But what should happen if the device is already suspended?
+
+The device may have different wake-up settings for run-time PM and system sleep.
+For example, remote wake-up may be enabled for run-time suspend but disallowed
+for system sleep (device_may_wakeup(dev) returns 'false'). When this happens,
+the subsystem-level system suspend callback is responsible for changing the
+device's wake-up setting (it may leave that to the device driver's system
+suspend routine). It may be necessary to resume the device and suspend it again
+in order to do so. The same is true if the driver uses different power levels
+or other settings for run-time suspend and system sleep.
+
+During system resume, devices generally should be brought back to full power,
+even if they were suspended before the system sleep began. There are several
+reasons for this, including:
+
+ * The device might need to switch power levels, wake-up settings, etc.
+
+ * Remote wake-up events might have been lost by the firmware.
+
+ * The device's children may need the device to be at full power in order
+ to resume themselves.
+
+ * The driver's idea of the device state may not agree with the device's
+ physical state. This can happen during resume from hibernation.
+
+ * The device might need to be reset.
+
+ * Even though the device was suspended, if its usage counter was > 0 then most
+ likely it would need a run-time resume in the near future anyway.
+
+ * Always going back to full power is simplest.
+
+If the device was suspended before the sleep began, then its run-time PM status
+will have to be updated to reflect the actual post-system sleep status. The way
+to do this is:
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+The PM core always increments the run-time usage counter before calling the
+->prepare() callback and decrements it after calling the ->complete() callback.
+Hence disabling run-time PM temporarily like this will not cause any run-time
+suspend callbacks to be lost.
diff --git a/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt b/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
new file mode 100644
index 00000000000..515ebcf1b97
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
@@ -0,0 +1,93 @@
+PPC440SPe DMA/XOR (DMA Controller and XOR Accelerator)
+
+Device nodes needed for operation of the ppc440spe-adma driver
+are specified hereby. These are I2O/DMA, DMA and XOR nodes
+for DMA engines and Memory Queue Module node. The latter is used
+by ADMA driver for configuration of RAID-6 H/W capabilities of
+the PPC440SPe. In addition to the nodes and properties described
+below, the ranges property of PLB node must specify ranges for
+DMA devices.
+
+ i) The I2O node
+
+ Required properties:
+
+ - compatible : "ibm,i2o-440spe";
+ - reg : <registers mapping>
+ - dcr-reg : <DCR registers range>
+
+ Example:
+
+ I2O: i2o@400100000 {
+ compatible = "ibm,i2o-440spe";
+ reg = <0x00000004 0x00100000 0x100>;
+ dcr-reg = <0x060 0x020>;
+ };
+
+
+ ii) The DMA node
+
+ Required properties:
+
+ - compatible : "ibm,dma-440spe";
+ - cell-index : 1 cell, hardware index of the DMA engine
+ (typically 0x0 and 0x1 for DMA0 and DMA1)
+ - reg : <registers mapping>
+ - dcr-reg : <DCR registers range>
+ - interrupts : <interrupt mapping for DMA0/1 interrupts sources:
+ 2 sources: DMAx CS FIFO Needs Service IRQ (on UIC0)
+ and DMA Error IRQ (on UIC1). The latter is common
+ for both DMA engines>.
+ - interrupt-parent : needed for interrupt mapping
+
+ Example:
+
+ DMA0: dma0@400100100 {
+ compatible = "ibm,dma-440spe";
+ cell-index = <0>;
+ reg = <0x00000004 0x00100100 0x100>;
+ dcr-reg = <0x060 0x020>;
+ interrupt-parent = <&DMA0>;
+ interrupts = <0 1>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = <
+ 0 &UIC0 0x14 4
+ 1 &UIC1 0x16 4>;
+ };
+
+
+ iii) XOR Accelerator node
+
+ Required properties:
+
+ - compatible : "amcc,xor-accelerator";
+ - reg : <registers mapping>
+ - interrupts : <interrupt mapping for XOR interrupt source>
+ - interrupt-parent : for interrupt mapping
+
+ Example:
+
+ xor-accel@400200000 {
+ compatible = "amcc,xor-accelerator";
+ reg = <0x00000004 0x00200000 0x400>;
+ interrupt-parent = <&UIC1>;
+ interrupts = <0x1f 4>;
+ };
+
+
+ iv) Memory Queue Module node
+
+ Required properties:
+
+ - compatible : "ibm,mq-440spe";
+ - dcr-reg : <DCR registers range>
+
+ Example:
+
+ MQ0: mq {
+ compatible = "ibm,mq-440spe";
+ dcr-reg = <0x040 0x020>;
+ };
+
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt
index e8b5bc24d0a..39e941515a3 100644
--- a/Documentation/powerpc/dts-bindings/fsl/board.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/board.txt
@@ -20,12 +20,16 @@ Required properities:
- compatible : should be "fsl,fpga-pixis".
- reg : should contain the address and the length of the FPPGA register
set.
+- interrupt-parent: should specify phandle for the interrupt controller.
+- interrupts : should specify event (wakeup) IRQ.
Example (MPC8610HPCD):
board-control@e8000000 {
compatible = "fsl,fpga-pixis";
reg = <0xe8000000 32>;
+ interrupt-parent = <&mpic>;
+ interrupts = <8 8>;
};
* Freescale BCSR GPIO banks
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
index cabc780f725..5c6602dbfdc 100644
--- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
@@ -103,7 +103,22 @@ fsl,mpc5200-gpt nodes
---------------------
On the mpc5200 and 5200b, GPT0 has a watchdog timer function. If the board
design supports the internal wdt, then the device node for GPT0 should
-include the empty property 'fsl,has-wdt'.
+include the empty property 'fsl,has-wdt'. Note that this does not activate
+the watchdog. The timer will function as a GPT if the timer api is used, and
+it will function as watchdog if the watchdog device is used. The watchdog
+mode has priority over the gpt mode, i.e. if the watchdog is activated, any
+gpt api call to this timer will fail with -EBUSY.
+
+If you add the property
+ fsl,wdt-on-boot = <n>;
+GPT0 will be marked as in-use watchdog, i.e. blocking every gpt access to it.
+If n>0, the watchdog is started with a timeout of n seconds. If n=0, the
+configuration of the watchdog is not touched. This is useful in two cases:
+- just mark GPT0 as watchdog, blocking gpt accesses, and configure it later;
+- do not touch a configuration assigned by the boot loader which supervises
+ the boot process itself.
+
+The watchdog will respect the CONFIG_WATCHDOG_NOWAYOUT option.
An mpc5200-gpt can be used as a single line GPIO controller. To do so,
add the following properties to the gpt node:
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpic.txt b/Documentation/powerpc/dts-bindings/fsl/mpic.txt
new file mode 100644
index 00000000000..71e39cf3215
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/fsl/mpic.txt
@@ -0,0 +1,42 @@
+* OpenPIC and its interrupt numbers on Freescale's e500/e600 cores
+
+The OpenPIC specification does not specify which interrupt source has to
+become which interrupt number. This is up to the software implementation
+of the interrupt controller. The only requirement is that every
+interrupt source has to have an unique interrupt number / vector number.
+To accomplish this the current implementation assigns the number zero to
+the first source, the number one to the second source and so on until
+all interrupt sources have their unique number.
+Usually the assigned vector number equals the interrupt number mentioned
+in the documentation for a given core / CPU. This is however not true
+for the e500 cores (MPC85XX CPUs) where the documentation distinguishes
+between internal and external interrupt sources and starts counting at
+zero for both of them.
+
+So what to write for external interrupt source X or internal interrupt
+source Y into the device tree? Here is an example:
+
+The memory map for the interrupt controller in the MPC8544[0] shows,
+that the first interrupt source starts at 0x5_0000 (PIC Register Address
+Map-Interrupt Source Configuration Registers). This source becomes the
+number zero therefore:
+ External interrupt 0 = interrupt number 0
+ External interrupt 1 = interrupt number 1
+ External interrupt 2 = interrupt number 2
+ ...
+Every interrupt number allocates 0x20 bytes register space. So to get
+its number it is sufficient to shift the lower 16bits to right by five.
+So for the external interrupt 10 we have:
+ 0x0140 >> 5 = 10
+
+After the external sources, the internal sources follow. The in core I2C
+controller on the MPC8544 for instance has the internal source number
+27. Oo obtain its interrupt number we take the lower 16bits of its memory
+address (0x5_0560) and shift it right:
+ 0x0560 >> 5 = 43
+
+Therefore the I2C device node for the MPC8544 CPU has to have the
+interrupt number 43 specified in the device tree.
+
+[0] MPC8544E PowerQUICCTM III, Integrated Host Processor Family Reference Manual
+ MPC8544ERM Rev. 1 10/2007
diff --git a/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt b/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt
new file mode 100644
index 00000000000..b558585b1aa
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt
@@ -0,0 +1,109 @@
+
+Nintendo GameCube device tree
+=============================
+
+1) The "flipper" node
+
+ This node represents the multi-function "Flipper" chip, which packages
+ many of the devices found in the Nintendo GameCube.
+
+ Required properties:
+
+ - compatible : Should be "nintendo,flipper"
+
+1.a) The Video Interface (VI) node
+
+ Represents the interface between the graphics processor and a external
+ video encoder.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-vi"
+ - reg : should contain the VI registers location and length
+ - interrupts : should contain the VI interrupt
+
+1.b) The Processor Interface (PI) node
+
+ Represents the data and control interface between the main processor
+ and graphics and audio processor.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-pi"
+ - reg : should contain the PI registers location and length
+
+1.b.i) The "Flipper" interrupt controller node
+
+ Represents the interrupt controller within the "Flipper" chip.
+ The node for the "Flipper" interrupt controller must be placed under
+ the PI node.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-pic"
+
+1.c) The Digital Signal Procesor (DSP) node
+
+ Represents the digital signal processor interface, designed to offload
+ audio related tasks.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-dsp"
+ - reg : should contain the DSP registers location and length
+ - interrupts : should contain the DSP interrupt
+
+1.c.i) The Auxiliary RAM (ARAM) node
+
+ Represents the non cpu-addressable ram designed mainly to store audio
+ related information.
+ The ARAM node must be placed under the DSP node.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-aram"
+ - reg : should contain the ARAM start (zero-based) and length
+
+1.d) The Disk Interface (DI) node
+
+ Represents the interface used to communicate with mass storage devices.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-di"
+ - reg : should contain the DI registers location and length
+ - interrupts : should contain the DI interrupt
+
+1.e) The Audio Interface (AI) node
+
+ Represents the interface to the external 16-bit stereo digital-to-analog
+ converter.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-ai"
+ - reg : should contain the AI registers location and length
+ - interrupts : should contain the AI interrupt
+
+1.f) The Serial Interface (SI) node
+
+ Represents the interface to the four single bit serial interfaces.
+ The SI is a proprietary serial interface used normally to control gamepads.
+ It's NOT a RS232-type interface.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-si"
+ - reg : should contain the SI registers location and length
+ - interrupts : should contain the SI interrupt
+
+1.g) The External Interface (EXI) node
+
+ Represents the multi-channel SPI-like interface.
+
+ Required properties:
+
+ - compatible : should be "nintendo,flipper-exi"
+ - reg : should contain the EXI registers location and length
+ - interrupts : should contain the EXI interrupt
+
diff --git a/Documentation/powerpc/dts-bindings/nintendo/wii.txt b/Documentation/powerpc/dts-bindings/nintendo/wii.txt
new file mode 100644
index 00000000000..a7e155a023b
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/nintendo/wii.txt
@@ -0,0 +1,184 @@
+
+Nintendo Wii device tree
+========================
+
+0) The root node
+
+ This node represents the Nintendo Wii video game console.
+
+ Required properties:
+
+ - model : Should be "nintendo,wii"
+ - compatible : Should be "nintendo,wii"
+
+1) The "hollywood" node
+
+ This node represents the multi-function "Hollywood" chip, which packages
+ many of the devices found in the Nintendo Wii.
+
+ Required properties:
+
+ - compatible : Should be "nintendo,hollywood"
+
+1.a) The Video Interface (VI) node
+
+ Represents the interface between the graphics processor and a external
+ video encoder.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-vi","nintendo,flipper-vi"
+ - reg : should contain the VI registers location and length
+ - interrupts : should contain the VI interrupt
+
+1.b) The Processor Interface (PI) node
+
+ Represents the data and control interface between the main processor
+ and graphics and audio processor.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-pi","nintendo,flipper-pi"
+ - reg : should contain the PI registers location and length
+
+1.b.i) The "Flipper" interrupt controller node
+
+ Represents the "Flipper" interrupt controller within the "Hollywood" chip.
+ The node for the "Flipper" interrupt controller must be placed under
+ the PI node.
+
+ Required properties:
+
+ - #interrupt-cells : <1>
+ - compatible : should be "nintendo,flipper-pic"
+ - interrupt-controller
+
+1.c) The Digital Signal Procesor (DSP) node
+
+ Represents the digital signal processor interface, designed to offload
+ audio related tasks.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-dsp","nintendo,flipper-dsp"
+ - reg : should contain the DSP registers location and length
+ - interrupts : should contain the DSP interrupt
+
+1.d) The Serial Interface (SI) node
+
+ Represents the interface to the four single bit serial interfaces.
+ The SI is a proprietary serial interface used normally to control gamepads.
+ It's NOT a RS232-type interface.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-si","nintendo,flipper-si"
+ - reg : should contain the SI registers location and length
+ - interrupts : should contain the SI interrupt
+
+1.e) The Audio Interface (AI) node
+
+ Represents the interface to the external 16-bit stereo digital-to-analog
+ converter.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-ai","nintendo,flipper-ai"
+ - reg : should contain the AI registers location and length
+ - interrupts : should contain the AI interrupt
+
+1.f) The External Interface (EXI) node
+
+ Represents the multi-channel SPI-like interface.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-exi","nintendo,flipper-exi"
+ - reg : should contain the EXI registers location and length
+ - interrupts : should contain the EXI interrupt
+
+1.g) The Open Host Controller Interface (OHCI) nodes
+
+ Represent the USB 1.x Open Host Controller Interfaces.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-usb-ohci","usb-ohci"
+ - reg : should contain the OHCI registers location and length
+ - interrupts : should contain the OHCI interrupt
+
+1.h) The Enhanced Host Controller Interface (EHCI) node
+
+ Represents the USB 2.0 Enhanced Host Controller Interface.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-usb-ehci","usb-ehci"
+ - reg : should contain the EHCI registers location and length
+ - interrupts : should contain the EHCI interrupt
+
+1.i) The Secure Digital Host Controller Interface (SDHCI) nodes
+
+ Represent the Secure Digital Host Controller Interfaces.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-sdhci","sdhci"
+ - reg : should contain the SDHCI registers location and length
+ - interrupts : should contain the SDHCI interrupt
+
+1.j) The Inter-Processsor Communication (IPC) node
+
+ Represent the Inter-Processor Communication interface. This interface
+ enables communications between the Broadway and the Starlet processors.
+
+ - compatible : should be "nintendo,hollywood-ipc"
+ - reg : should contain the IPC registers location and length
+ - interrupts : should contain the IPC interrupt
+
+1.k) The "Hollywood" interrupt controller node
+
+ Represents the "Hollywood" interrupt controller within the
+ "Hollywood" chip.
+
+ Required properties:
+
+ - #interrupt-cells : <1>
+ - compatible : should be "nintendo,hollywood-pic"
+ - reg : should contain the controller registers location and length
+ - interrupt-controller
+ - interrupts : should contain the cascade interrupt of the "flipper" pic
+ - interrupt-parent: should contain the phandle of the "flipper" pic
+
+1.l) The General Purpose I/O (GPIO) controller node
+
+ Represents the dual access 32 GPIO controller interface.
+
+ Required properties:
+
+ - #gpio-cells : <2>
+ - compatible : should be "nintendo,hollywood-gpio"
+ - reg : should contain the IPC registers location and length
+ - gpio-controller
+
+1.m) The control node
+
+ Represents the control interface used to setup several miscellaneous
+ settings of the "Hollywood" chip like boot memory mappings, resets,
+ disk interface mode, etc.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-control"
+ - reg : should contain the control registers location and length
+
+1.n) The Disk Interface (DI) node
+
+ Represents the interface used to communicate with mass storage devices.
+
+ Required properties:
+
+ - compatible : should be "nintendo,hollywood-di"
+ - reg : should contain the DI registers location and length
+ - interrupts : should contain the DI interrupt
+
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/powerpc/dts-bindings/xilinx.txt
index 80339fe4300..ea68046bb9c 100644
--- a/Documentation/powerpc/dts-bindings/xilinx.txt
+++ b/Documentation/powerpc/dts-bindings/xilinx.txt
@@ -292,4 +292,15 @@
- reg-offset : A value of 3 is required
- reg-shift : A value of 2 is required
+ vii) Xilinx USB Host controller
+
+ The Xilinx USB host controller is EHCI compatible but with a different
+ base address for the EHCI registers, and it is always a big-endian
+ USB Host controller. The hardware can be configured as high speed only,
+ or high speed/full speed hybrid.
+
+ Required properties:
+ - xlnx,support-usb-fs: A value 0 means the core is built as high speed
+ only. A value 1 means the core also supports
+ full speed devices.
diff --git a/Documentation/serial/hayes-esp.txt b/Documentation/serial/hayes-esp.txt
deleted file mode 100644
index 09b5d585675..00000000000
--- a/Documentation/serial/hayes-esp.txt
+++ /dev/null
@@ -1,154 +0,0 @@
-HAYES ESP DRIVER VERSION 2.1
-
-A big thanks to the people at Hayes, especially Alan Adamson. Their support
-has enabled me to provide enhancements to the driver.
-
-Please report your experiences with this driver to me (arobinso@nyx.net). I
-am looking for both positive and negative feedback.
-
-*** IMPORTANT CHANGES FOR 2.1 ***
-Support for PIO mode. Five situations will cause PIO mode to be used:
-1) A multiport card is detected. PIO mode will always be used. (8 port cards
-do not support DMA).
-2) The DMA channel is set to an invalid value (anything other than 1 or 3).
-3) The DMA buffer/channel could not be allocated. The port will revert to PIO
-mode until it is reopened.
-4) Less than a specified number of bytes need to be transferred to/from the
-FIFOs. PIO mode will be used for that transfer only.
-5) A port needs to do a DMA transfer and another port is already using the
-DMA channel. PIO mode will be used for that transfer only.
-
-Since the Hayes ESP seems to conflict with other cards (notably sound cards)
-when using DMA, DMA is turned off by default. To use DMA, it must be turned
-on explicitly, either with the "dma=" option described below or with
-setserial. A multiport card can be forced into DMA mode by using setserial;
-however, most multiport cards don't support DMA.
-
-The latest version of setserial allows the enhanced configuration of the ESP
-card to be viewed and modified.
-***
-
-This package contains the files needed to compile a module to support the Hayes
-ESP card. The drivers are basically a modified version of the serial drivers.
-
-Features:
-
-- Uses the enhanced mode of the ESP card, allowing a wider range of
- interrupts and features than compatibility mode
-- Uses DMA and 16 bit PIO mode to transfer data to and from the ESP's FIFOs,
- reducing CPU load
-- Supports primary and secondary ports
-
-
-If the driver is compiled as a module, the IRQs to use can be specified by
-using the irq= option. The format is:
-
-irq=[0x100],[0x140],[0x180],[0x200],[0x240],[0x280],[0x300],[0x380]
-
-The address in brackets is the base address of the card. The IRQ of
-nonexistent cards can be set to 0. If an IRQ of a card that does exist is set
-to 0, the driver will attempt to guess at the correct IRQ. For example, to set
-the IRQ of the card at address 0x300 to 12, the insmod command would be:
-
-insmod esp irq=0,0,0,0,0,0,12,0
-
-The custom divisor can be set by using the divisor= option. The format is the
-same as for the irq= option. Each divisor value is a series of hex digits,
-with each digit representing the divisor to use for a corresponding port. The
-divisor value is constructed RIGHT TO LEFT. Specifying a nonzero divisor value
-will automatically set the spd_cust flag. To calculate the divisor to use for
-a certain baud rate, divide the port's base baud (generally 921600) by the
-desired rate. For example, to set the divisor of the primary port at 0x300 to
-4 and the divisor of the secondary port at 0x308 to 8, the insmod command would
-be:
-
-insmod esp divisor=0,0,0,0,0,0,0x84,0
-
-The dma= option can be used to set the DMA channel. The channel can be either
-1 or 3. Specifying any other value will force the driver to use PIO mode.
-For example, to set the DMA channel to 3, the insmod command would be:
-
-insmod esp dma=3
-
-The rx_trigger= and tx_trigger= options can be used to set the FIFO trigger
-levels. They specify when the ESP card should send an interrupt. Larger
-values will decrease the number of interrupts; however, a value too high may
-result in data loss. Valid values are 1 through 1023, with 768 being the
-default. For example, to set the receive trigger level to 512 bytes and the
-transmit trigger level to 700 bytes, the insmod command would be:
-
-insmod esp rx_trigger=512 tx_trigger=700
-
-The flow_off= and flow_on= options can be used to set the hardware flow off/
-flow on levels. The flow on level must be lower than the flow off level, and
-the flow off level should be higher than rx_trigger. Valid values are 1
-through 1023, with 1016 being the default flow off level and 944 being the
-default flow on level. For example, to set the flow off level to 1000 bytes
-and the flow on level to 935 bytes, the insmod command would be:
-
-insmod esp flow_off=1000 flow_on=935
-
-The rx_timeout= option can be used to set the receive timeout value. This
-value indicates how long after receiving the last character that the ESP card
-should wait before signalling an interrupt. Valid values are 0 though 255,
-with 128 being the default. A value too high will increase latency, and a
-value too low will cause unnecessary interrupts. For example, to set the
-receive timeout to 255, the insmod command would be:
-
-insmod esp rx_timeout=255
-
-The pio_threshold= option sets the threshold (in number of characters) for
-using PIO mode instead of DMA mode. For example, if this value is 32,
-transfers of 32 bytes or less will always use PIO mode.
-
-insmod esp pio_threshold=32
-
-Multiple options can be listed on the insmod command line by separating each
-option with a space. For example:
-
-insmod esp dma=3 trigger=512
-
-The esp module can be automatically loaded when needed. To cause this to
-happen, add the following lines to /etc/modprobe.conf (replacing the last line
-with options for your configuration):
-
-alias char-major-57 esp
-alias char-major-58 esp
-options esp irq=0,0,0,0,0,0,3,0 divisor=0,0,0,0,0,0,0x4,0
-
-You may also need to run 'depmod -a'.
-
-Devices must be created manually. To create the devices, note the output from
-the module after it is inserted. The output will appear in the location where
-kernel messages usually appear (usually /var/adm/messages). Create two devices
-for each 'tty' mentioned, one with major of 57 and the other with major of 58.
-The minor number should be the same as the tty number reported. The commands
-would be (replace ? with the tty number):
-
-mknod /dev/ttyP? c 57 ?
-mknod /dev/cup? c 58 ?
-
-For example, if the following line appears:
-
-Oct 24 18:17:23 techno kernel: ttyP8 at 0x0140 (irq = 3) is an ESP primary port
-
-...two devices should be created:
-
-mknod /dev/ttyP8 c 57 8
-mknod /dev/cup8 c 58 8
-
-You may need to set the permissions on the devices:
-
-chmod 666 /dev/ttyP*
-chmod 666 /dev/cup*
-
-The ESP module and the serial module should not conflict (they can be used at
-the same time). After the ESP module has been loaded the ports on the ESP card
-will no longer be accessible by the serial driver.
-
-If I/O errors are experienced when accessing the port, check for IRQ and DMA
-conflicts ('cat /proc/interrupts' and 'cat /proc/dma' for a list of IRQs and
-DMAs currently in use).
-
-Enjoy!
-Andrew J. Robinson <arobinso@nyx.net>
diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
index 8e65c4498c5..5e5349a4fcd 100644
--- a/Documentation/serial/tty.txt
+++ b/Documentation/serial/tty.txt
@@ -42,7 +42,8 @@ TTY side interfaces:
open() - Called when the line discipline is attached to
the terminal. No other call into the line
discipline for this tty will occur until it
- completes successfully. Can sleep.
+ completes successfully. Returning an error will
+ prevent the ldisc from being attached. Can sleep.
close() - This is called on a terminal when the line
discipline is being unplugged. At the point of
@@ -52,7 +53,7 @@ close() - This is called on a terminal when the line
hangup() - Called when the tty line is hung up.
The line discipline should cease I/O to the tty.
No further calls into the ldisc code will occur.
- Can sleep.
+ The return value is ignored. Can sleep.
write() - A process is writing data through the line
discipline. Multiple write calls are serialized
@@ -83,6 +84,10 @@ ioctl() - Called when an ioctl is handed to the tty layer
that might be for the ldisc. Multiple ioctl calls
may occur in parallel. May sleep.
+compat_ioctl() - Called when a 32 bit ioctl is handed to the tty layer
+ that might be for the ldisc. Multiple ioctl calls
+ may occur in parallel. May sleep.
+
Driver Side Interfaces:
receive_buf() - Hand buffers of bytes from the driver to the ldisc
diff --git a/Documentation/spinlocks.txt b/Documentation/spinlocks.txt
index 619699dde59..178c831b907 100644
--- a/Documentation/spinlocks.txt
+++ b/Documentation/spinlocks.txt
@@ -1,73 +1,8 @@
-SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
-are hence deprecated.
+Lesson 1: Spin locks
-Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
-__SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate for static
-initialization.
-
-Most of the time, you can simply turn:
-
- static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
-
-into:
-
- static DEFINE_SPINLOCK(xxx_lock);
-
-Static structure member variables go from:
-
- struct foo bar {
- .lock = SPIN_LOCK_UNLOCKED;
- };
-
-to:
-
- struct foo bar {
- .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
- };
-
-Declaration of static rw_locks undergo a similar transformation.
-
-Dynamic initialization, when necessary, may be performed as
-demonstrated below.
-
- spinlock_t xxx_lock;
- rwlock_t xxx_rw_lock;
-
- static int __init xxx_init(void)
- {
- spin_lock_init(&xxx_lock);
- rwlock_init(&xxx_rw_lock);
- ...
- }
-
- module_init(xxx_init);
-
-The following discussion is still valid, however, with the dynamic
-initialization of spinlocks or with DEFINE_SPINLOCK, etc., used
-instead of SPIN_LOCK_UNLOCKED.
-
------------------------
-
-On Fri, 2 Jan 1998, Doug Ledford wrote:
->
-> I'm working on making the aic7xxx driver more SMP friendly (as well as
-> importing the latest FreeBSD sequencer code to have 7895 support) and wanted
-> to get some info from you. The goal here is to make the various routines
-> SMP safe as well as UP safe during interrupts and other manipulating
-> routines. So far, I've added a spin_lock variable to things like my queue
-> structs. Now, from what I recall, there are some spin lock functions I can
-> use to lock these spin locks from other use as opposed to a (nasty)
-> save_flags(); cli(); stuff; restore_flags(); construct. Where do I find
-> these routines and go about making use of them? Do they only lock on a
-> per-processor basis or can they also lock say an interrupt routine from
-> mucking with a queue if the queue routine was manipulating it when the
-> interrupt occurred, or should I still use a cli(); based construct on that
-> one?
-
-See <asm/spinlock.h>. The basic version is:
-
- spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
+The most basic primitive for locking is spinlock.
+static DEFINE_SPINLOCK(xxx_lock);
unsigned long flags;
@@ -75,13 +10,11 @@ See <asm/spinlock.h>. The basic version is:
... critical section here ..
spin_unlock_irqrestore(&xxx_lock, flags);
-and the above is always safe. It will disable interrupts _locally_, but the
+The above is always safe. It will disable interrupts _locally_, but the
spinlock itself will guarantee the global lock, so it will guarantee that
there is only one thread-of-control within the region(s) protected by that
-lock.
-
-Note that it works well even under UP - the above sequence under UP
-essentially is just the same as doing a
+lock. This works well even under UP. The above sequence under UP
+essentially is just the same as doing
unsigned long flags;
@@ -91,15 +24,13 @@ essentially is just the same as doing a
so the code does _not_ need to worry about UP vs SMP issues: the spinlocks
work correctly under both (and spinlocks are actually more efficient on
-architectures that allow doing the "save_flags + cli" in one go because I
-don't export that interface normally).
+architectures that allow doing the "save_flags + cli" in one operation).
+
+ NOTE! Implications of spin_locks for memory are further described in:
-NOTE NOTE NOTE! The reason the spinlock is so much faster than a global
-interrupt lock under SMP is exactly because it disables interrupts only on
-the local CPU. The spin-lock is safe only when you _also_ use the lock
-itself to do locking across CPU's, which implies that EVERYTHING that
-touches a shared variable has to agree about the spinlock they want to
-use.
+ Documentation/memory-barriers.txt
+ (5) LOCK operations.
+ (6) UNLOCK operations.
The above is usually pretty simple (you usually need and want only one
spinlock for most things - using more than one spinlock can make things a
@@ -120,20 +51,24 @@ and another sequence that does
then they are NOT mutually exclusive, and the critical regions can happen
at the same time on two different CPU's. That's fine per se, but the
critical regions had better be critical for different things (ie they
-can't stomp on each other).
+can't stomp on each other).
The above is a problem mainly if you end up mixing code - for example the
routines in ll_rw_block() tend to use cli/sti to protect the atomicity of
their actions, and if a driver uses spinlocks instead then you should
-think about issues like the above..
+think about issues like the above.
This is really the only really hard part about spinlocks: once you start
using spinlocks they tend to expand to areas you might not have noticed
before, because you have to make sure the spinlocks correctly protect the
shared data structures _everywhere_ they are used. The spinlocks are most
-easily added to places that are completely independent of other code (ie
-internal driver data structures that nobody else ever touches, for
-example).
+easily added to places that are completely independent of other code (for
+example, internal driver data structures that nobody else ever touches).
+
+ NOTE! The spin-lock is safe only when you _also_ use the lock itself
+ to do locking across CPU's, which implies that EVERYTHING that
+ touches a shared variable has to agree about the spinlock they want
+ to use.
----
@@ -141,13 +76,17 @@ Lesson 2: reader-writer spinlocks.
If your data accesses have a very natural pattern where you usually tend
to mostly read from the shared variables, the reader-writer locks
-(rw_lock) versions of the spinlocks are often nicer. They allow multiple
+(rw_lock) versions of the spinlocks are sometimes useful. They allow multiple
readers to be in the same critical region at once, but if somebody wants
-to change the variables it has to get an exclusive write lock. The
-routines look the same as above:
+to change the variables it has to get an exclusive write lock.
- rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
+ NOTE! reader-writer locks require more atomic memory operations than
+ simple spinlocks. Unless the reader critical section is long, you
+ are better off just using spinlocks.
+The routines look the same as above:
+
+ rwlock_t xxx_lock = RW_LOCK_UNLOCKED;
unsigned long flags;
@@ -159,18 +98,21 @@ routines look the same as above:
.. read and write exclusive access to the info ...
write_unlock_irqrestore(&xxx_lock, flags);
-The above kind of lock is useful for complex data structures like linked
-lists etc, especially when you know that most of the work is to just
-traverse the list searching for entries without changing the list itself,
-for example. Then you can use the read lock for that kind of list
-traversal, which allows many concurrent readers. Anything that _changes_
-the list will have to get the write lock.
+The above kind of lock may be useful for complex data structures like
+linked lists, especially searching for entries without changing the list
+itself. The read lock allows many concurrent readers. Anything that
+_changes_ the list will have to get the write lock.
+
+ NOTE! RCU is better for list traversal, but requires careful
+ attention to design detail (see Documentation/RCU/listRCU.txt).
-Note: you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
+Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
time need to do any changes (even if you don't do it every time), you have
-to get the write-lock at the very beginning. I could fairly easily add a
-primitive to create a "upgradeable" read-lock, but it hasn't been an issue
-yet. Tell me if you'd want one.
+to get the write-lock at the very beginning.
+
+ NOTE! We are working hard to remove reader-writer spinlocks in most
+ cases, so please don't add a new one without consensus. (Instead, see
+ Documentation/RCU/rcu.txt for complete information.)
----
@@ -233,4 +175,46 @@ indeed), while write-locks need to protect themselves against interrupts.
Linus
+----
+
+Reference information:
+
+For dynamic initialization, use spin_lock_init() or rwlock_init() as
+appropriate:
+
+ spinlock_t xxx_lock;
+ rwlock_t xxx_rw_lock;
+
+ static int __init xxx_init(void)
+ {
+ spin_lock_init(&xxx_lock);
+ rwlock_init(&xxx_rw_lock);
+ ...
+ }
+
+ module_init(xxx_init);
+
+For static initialization, use DEFINE_SPINLOCK() / DEFINE_RWLOCK() or
+__SPIN_LOCK_UNLOCKED() / __RW_LOCK_UNLOCKED() as appropriate.
+
+SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED are deprecated. These interfere
+with lockdep state tracking.
+
+Most of the time, you can simply turn:
+ static spinlock_t xxx_lock = SPIN_LOCK_UNLOCKED;
+into:
+ static DEFINE_SPINLOCK(xxx_lock);
+
+Static structure member variables go from:
+
+ struct foo bar {
+ .lock = SPIN_LOCK_UNLOCKED;
+ };
+
+to:
+ struct foo bar {
+ .lock = __SPIN_LOCK_UNLOCKED(bar.lock);
+ };
+
+Declaration of static rw_locks undergo a similar transformation.
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 8f7a0e73ef4..3894eaa2348 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -19,6 +19,8 @@ Currently, these files might (depending on your configuration)
show up in /proc/sys/kernel:
- acpi_video_flags
- acct
+- bootloader_type [ X86 only ]
+- bootloader_version [ X86 only ]
- callhome [ S390 only ]
- auto_msgmni
- core_pattern
@@ -93,6 +95,35 @@ valid for 30 seconds.
==============================================================
+bootloader_type:
+
+x86 bootloader identification
+
+This gives the bootloader type number as indicated by the bootloader,
+shifted left by 4, and OR'd with the low four bits of the bootloader
+version. The reason for this encoding is that this used to match the
+type_of_loader field in the kernel header; the encoding is kept for
+backwards compatibility. That is, if the full bootloader type number
+is 0x15 and the full version number is 0x234, this file will contain
+the value 340 = 0x154.
+
+See the type_of_loader and ext_loader_type fields in
+Documentation/x86/boot.txt for additional information.
+
+==============================================================
+
+bootloader_version:
+
+x86 bootloader version
+
+The complete bootloader version number. In the example above, this
+file will contain the value 564 = 0x234.
+
+See the type_of_loader and ext_loader_ver fields in
+Documentation/x86/boot.txt for additional information.
+
+==============================================================
+
callhome:
Controls the kernel's callhome behavior in case of a kernel panic.
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index a87dc277a5c..cb3d15bc1ae 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -206,6 +206,7 @@ passive
passive trip point for the zone. Activation is done by polling with
an interval of 1 second.
Unit: millidegrees Celsius
+ Valid values: 0 (disabled) or greater than 1000
RW, Optional
*****************************
diff --git a/Documentation/trace/events-kmem.txt b/Documentation/trace/events-kmem.txt
index 6ef2a8652e1..aa82ee4a5a8 100644
--- a/Documentation/trace/events-kmem.txt
+++ b/Documentation/trace/events-kmem.txt
@@ -1,7 +1,7 @@
Subsystem Trace Points: kmem
-The tracing system kmem captures events related to object and page allocation
-within the kernel. Broadly speaking there are four major subheadings.
+The kmem tracing system captures events related to object and page allocation
+within the kernel. Broadly speaking there are five major subheadings.
o Slab allocation of small objects of unknown type (kmalloc)
o Slab allocation of small objects of known type
@@ -9,7 +9,7 @@ within the kernel. Broadly speaking there are four major subheadings.
o Per-CPU Allocator Activity
o External Fragmentation
-This document will describe what each of the tracepoints are and why they
+This document describes what each of the tracepoints is and why they
might be useful.
1. Slab allocation of small objects of unknown type
@@ -34,7 +34,7 @@ kmem_cache_free call_site=%lx ptr=%p
These events are similar in usage to the kmalloc-related events except that
it is likely easier to pin the event down to a specific cache. At the time
of writing, no information is available on what slab is being allocated from,
-but the call_site can usually be used to extrapolate that information
+but the call_site can usually be used to extrapolate that information.
3. Page allocation
==================
@@ -80,9 +80,9 @@ event indicating whether it is for a percpu_refill or not.
When the per-CPU list is too full, a number of pages are freed, each one
which triggers a mm_page_pcpu_drain event.
-The individual nature of the events are so that pages can be tracked
+The individual nature of the events is so that pages can be tracked
between allocation and freeing. A number of drain or refill pages that occur
-consecutively imply the zone->lock being taken once. Large amounts of PCP
+consecutively imply the zone->lock being taken once. Large amounts of per-CPU
refills and drains could imply an imbalance between CPUs where too much work
is being concentrated in one place. It could also indicate that the per-CPU
lists should be a larger size. Finally, large amounts of refills on one CPU
@@ -102,6 +102,6 @@ is important.
Large numbers of this event implies that memory is fragmenting and
high-order allocations will start failing at some time in the future. One
-means of reducing the occurange of this event is to increase the size of
+means of reducing the occurrence of this event is to increase the size of
min_free_kbytes in increments of 3*pageblock_size*nr_online_nodes where
pageblock_size is usually the size of the default hugepage size.
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index ad642615ad4..c7c1dc2f801 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -2,7 +2,7 @@
Alan Stern <stern@rowland.harvard.edu>
- October 5, 2007
+ November 10, 2009
@@ -123,9 +123,9 @@ relevant attribute files are: wakeup, level, and autosuspend.
power/level
- This file contains one of three words: "on", "auto",
- or "suspend". You can write those words to the file
- to change the device's setting.
+ This file contains one of two words: "on" or "auto".
+ You can write those words to the file to change the
+ device's setting.
"on" means that the device should be resumed and
autosuspend is not allowed. (Of course, system
@@ -134,10 +134,10 @@ relevant attribute files are: wakeup, level, and autosuspend.
"auto" is the normal state in which the kernel is
allowed to autosuspend and autoresume the device.
- "suspend" means that the device should remain
- suspended, and autoresume is not allowed. (But remote
- wakeup may still be allowed, since it is controlled
- separately by the power/wakeup attribute.)
+ (In kernels up to 2.6.32, you could also specify
+ "suspend", meaning that the device should remain
+ suspended and autoresume was not allowed. This
+ setting is no longer supported.)
power/autosuspend
@@ -313,13 +313,14 @@ three of the methods listed above. In addition, a driver indicates
that it supports autosuspend by setting the .supports_autosuspend flag
in its usb_driver structure. It is then responsible for informing the
USB core whenever one of its interfaces becomes busy or idle. The
-driver does so by calling these five functions:
+driver does so by calling these six functions:
int usb_autopm_get_interface(struct usb_interface *intf);
void usb_autopm_put_interface(struct usb_interface *intf);
- int usb_autopm_set_interface(struct usb_interface *intf);
int usb_autopm_get_interface_async(struct usb_interface *intf);
void usb_autopm_put_interface_async(struct usb_interface *intf);
+ void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
+ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
The functions work by maintaining a counter in the usb_interface
structure. When intf->pm_usage_count is > 0 then the interface is
@@ -331,11 +332,13 @@ considered to be idle, and the kernel may autosuspend the device.
associated with the device itself rather than any of its interfaces.
This field is used only by the USB core.)
-The driver owns intf->pm_usage_count; it can modify the value however
-and whenever it likes. A nice aspect of the non-async usb_autopm_*
-routines is that the changes they make are protected by the usb_device
-structure's PM mutex (udev->pm_mutex); however drivers may change
-pm_usage_count without holding the mutex. Drivers using the async
+Drivers must not modify intf->pm_usage_count directly; its value
+should be changed only be using the functions listed above. Drivers
+are responsible for insuring that the overall change to pm_usage_count
+during their lifetime balances out to 0 (it may be necessary for the
+disconnect method to call usb_autopm_put_interface() one or more times
+to fulfill this requirement). The first two routines use the PM mutex
+in struct usb_device for mutual exclusion; drivers using the async
routines are responsible for their own synchronization and mutual
exclusion.
@@ -347,11 +350,6 @@ exclusion.
attempts an autosuspend if the new value is <= 0 and the
device isn't suspended.
- usb_autopm_set_interface() leaves pm_usage_count alone.
- It attempts an autoresume if the value is > 0 and the device
- is suspended, and it attempts an autosuspend if the value is
- <= 0 and the device isn't suspended.
-
usb_autopm_get_interface_async() and
usb_autopm_put_interface_async() do almost the same things as
their non-async counterparts. The differences are: they do
@@ -360,13 +358,11 @@ exclusion.
such as an URB's completion handler, but when they return the
device will not generally not yet be in the desired state.
-There also are a couple of utility routines drivers can use:
-
- usb_autopm_enable() sets pm_usage_cnt to 0 and then calls
- usb_autopm_set_interface(), which will attempt an autosuspend.
-
- usb_autopm_disable() sets pm_usage_cnt to 1 and then calls
- usb_autopm_set_interface(), which will attempt an autoresume.
+ usb_autopm_get_interface_no_resume() and
+ usb_autopm_put_interface_no_suspend() merely increment or
+ decrement the pm_usage_count value; they do not attempt to
+ carry out an autoresume or an autosuspend. Hence they can be
+ called in an atomic context.
The conventional usage pattern is that a driver calls
usb_autopm_get_interface() in its open routine and
@@ -400,11 +396,11 @@ though, setting this flag won't cause the kernel to autoresume it.
Normally a driver would set this flag in its probe method, at which
time the device is guaranteed not to be autosuspended.)
-The usb_autopm_* routines have to run in a sleepable process context;
-they must not be called from an interrupt handler or while holding a
-spinlock. In fact, the entire autosuspend mechanism is not well geared
-toward interrupt-driven operation. However there is one thing a
-driver can do in an interrupt handler:
+The synchronous usb_autopm_* routines have to run in a sleepable
+process context; they must not be called from an interrupt handler or
+while holding a spinlock. In fact, the entire autosuspend mechanism
+is not well geared toward interrupt-driven operation. However there
+is one thing a driver can do in an interrupt handler:
usb_mark_last_busy(struct usb_device *udev);
@@ -423,15 +419,16 @@ an URB had completed too recently.
External suspend calls should never be allowed to fail in this way,
only autosuspend calls. The driver can tell them apart by checking
-udev->auto_pm; this flag will be set to 1 for internal PM events
-(autosuspend or autoresume) and 0 for external PM events.
+the PM_EVENT_AUTO bit in the message.event argument to the suspend
+method; this bit will be set for internal PM events (autosuspend) and
+clear for external PM events.
Many of the ingredients in the autosuspend framework are oriented
towards interfaces: The usb_interface structure contains the
pm_usage_cnt field, and the usb_autopm_* routines take an interface
pointer as their argument. But somewhat confusingly, a few of the
-pieces (usb_mark_last_busy() and udev->auto_pm) use the usb_device
-structure instead. Drivers need to keep this straight; they can call
+pieces (i.e., usb_mark_last_busy()) use the usb_device structure
+instead. Drivers need to keep this straight; they can call
interface_to_usbdev() to find the device structure for a given
interface.
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 319d9838e87..1800a62cf13 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -12,6 +12,7 @@ m5602 0402:5602 ALi Video Camera Controller
spca501 040a:0002 Kodak DVC-325
spca500 040a:0300 Kodak EZ200
zc3xx 041e:041e Creative WebCam Live!
+ov519 041e:4003 Video Blaster WebCam Go Plus
spca500 041e:400a Creative PC-CAM 300
sunplus 041e:400b Creative PC-CAM 600
sunplus 041e:4012 PC-Cam350
@@ -168,10 +169,14 @@ sunplus 055f:c650 Mustek MDC5500Z
zc3xx 055f:d003 Mustek WCam300A
zc3xx 055f:d004 Mustek WCam300 AN
conex 0572:0041 Creative Notebook cx11646
+ov519 05a9:0511 Video Blaster WebCam 3/WebCam Plus, D-Link USB Digital Video Camera
+ov519 05a9:0518 Creative WebCam
ov519 05a9:0519 OV519 Microphone
ov519 05a9:0530 OmniVision
+ov519 05a9:2800 OmniVision SuperCAM
ov519 05a9:4519 Webcam Classic
ov519 05a9:8519 OmniVision
+ov519 05a9:a511 D-Link USB Digital Video Camera
ov519 05a9:a518 D-Link DSB-C310 Webcam
sunplus 05da:1018 Digital Dream Enigma 1.3
stk014 05e1:0893 Syntek DV4000
@@ -187,7 +192,7 @@ ov534 06f8:3002 Hercules Blog Webcam
ov534 06f8:3003 Hercules Dualpix HD Weblog
sonixj 06f8:3004 Hercules Classic Silver
sonixj 06f8:3008 Hercules Deluxe Optical Glass
-pac7311 06f8:3009 Hercules Classic Link
+pac7302 06f8:3009 Hercules Classic Link
spca508 0733:0110 ViewQuest VQ110
spca501 0733:0401 Intel Create and Share
spca501 0733:0402 ViewQuest M318B
@@ -199,6 +204,7 @@ sunplus 0733:2221 Mercury Digital Pro 3.1p
sunplus 0733:3261 Concord 3045 spca536a
sunplus 0733:3281 Cyberpix S550V
spca506 0734:043b 3DeMon USB Capture aka
+ov519 0813:0002 Dual Mode USB Camera Plus
spca500 084d:0003 D-Link DSC-350
spca500 08ca:0103 Aiptek PocketDV
sunplus 08ca:0104 Aiptek PocketDVII 1.3
@@ -236,15 +242,15 @@ pac7311 093a:2603 Philips SPC 500 NC
pac7311 093a:2608 Trust WB-3300p
pac7311 093a:260e Gigaware VGA PC Camera, Trust WB-3350p, SIGMA cam 2350
pac7311 093a:260f SnakeCam
-pac7311 093a:2620 Apollo AC-905
-pac7311 093a:2621 PAC731x
-pac7311 093a:2622 Genius Eye 312
-pac7311 093a:2624 PAC7302
-pac7311 093a:2626 Labtec 2200
-pac7311 093a:2628 Genius iLook 300
-pac7311 093a:2629 Genious iSlim 300
-pac7311 093a:262a Webcam 300k
-pac7311 093a:262c Philips SPC 230 NC
+pac7302 093a:2620 Apollo AC-905
+pac7302 093a:2621 PAC731x
+pac7302 093a:2622 Genius Eye 312
+pac7302 093a:2624 PAC7302
+pac7302 093a:2626 Labtec 2200
+pac7302 093a:2628 Genius iLook 300
+pac7302 093a:2629 Genious iSlim 300
+pac7302 093a:262a Webcam 300k
+pac7302 093a:262c Philips SPC 230 NC
jeilinj 0979:0280 Sakar 57379
zc3xx 0ac8:0302 Z-star Vimicro zc0302
vc032x 0ac8:0321 Vimicro generic vc0321
@@ -259,6 +265,7 @@ vc032x 0ac8:c002 Sony embedded vimicro
vc032x 0ac8:c301 Samsung Q1 Ultra Premium
spca508 0af9:0010 Hama USB Sightcam 100
spca508 0af9:0011 Hama USB Sightcam 100
+ov519 0b62:0059 iBOT2 Webcam
sonixb 0c45:6001 Genius VideoCAM NB
sonixb 0c45:6005 Microdia Sweex Mini Webcam
sonixb 0c45:6007 Sonix sn9c101 + Tas5110D
@@ -318,8 +325,10 @@ sn9c20x 0c45:62b3 PC Camera (SN9C202 + OV9655)
sn9c20x 0c45:62bb PC Camera (SN9C202 + OV7660)
sn9c20x 0c45:62bc PC Camera (SN9C202 + HV7131R)
sunplus 0d64:0303 Sunplus FashionCam DXG
+ov519 0e96:c001 TRUST 380 USB2 SPACEC@M
etoms 102c:6151 Qcam Sangha CIF
etoms 102c:6251 Qcam xxxxxx VGA
+ov519 1046:9967 W9967CF/W9968CF WebCam IC, Video Blaster WebCam Go
zc3xx 10fd:0128 Typhoon Webshot II USB 300k 0x0128
spca561 10fd:7e50 FlyCam Usb 100
zc3xx 10fd:8050 Typhoon Webshot II USB 300k
@@ -332,7 +341,12 @@ spca501 1776:501c Arowana 300K CMOS Camera
t613 17a1:0128 TASCORP JPEG Webcam, NGS Cyclops
vc032x 17ef:4802 Lenovo Vc0323+MI1310_SOC
pac207 2001:f115 D-Link DSB-C120
+sq905c 2770:9050 sq905c
+sq905c 2770:905c DualCamera
+sq905 2770:9120 Argus Digital Camera DC1512
+sq905c 2770:913d sq905c
spca500 2899:012c Toptro Industrial
+ov519 8020:ef04 ov519
spca508 8086:0110 Intel Easy PC Camera
spca500 8086:0630 Intel Pocket PC Camera
spca506 99fa:8988 Grandtec V.cap
diff --git a/Documentation/video4linux/sh_mobile_ceu_camera.txt b/Documentation/video4linux/sh_mobile_ceu_camera.txt
new file mode 100644
index 00000000000..2ae16349a78
--- /dev/null
+++ b/Documentation/video4linux/sh_mobile_ceu_camera.txt
@@ -0,0 +1,157 @@
+ Cropping and Scaling algorithm, used in the sh_mobile_ceu_camera driver
+ =======================================================================
+
+Terminology
+-----------
+
+sensor scales: horizontal and vertical scales, configured by the sensor driver
+host scales: -"- host driver
+combined scales: sensor_scale * host_scale
+
+
+Generic scaling / cropping scheme
+---------------------------------
+
+-1--
+|
+-2-- -\
+| --\
+| --\
++-5-- -\ -- -3--
+| ---\
+| --- -4-- -\
+| -\
+| - -6--
+|
+| - -6'-
+| -/
+| --- -4'- -/
+| ---/
++-5'- -/
+| -- -3'-
+| --/
+| --/
+-2'- -/
+|
+|
+-1'-
+
+Produced by user requests:
+
+S_CROP(left / top = (5) - (1), width / height = (5') - (5))
+S_FMT(width / height = (6') - (6))
+
+Here:
+
+(1) to (1') - whole max width or height
+(1) to (2) - sensor cropped left or top
+(2) to (2') - sensor cropped width or height
+(3) to (3') - sensor scale
+(3) to (4) - CEU cropped left or top
+(4) to (4') - CEU cropped width or height
+(5) to (5') - reverse sensor scale applied to CEU cropped width or height
+(2) to (5) - reverse sensor scale applied to CEU cropped left or top
+(6) to (6') - CEU scale - user window
+
+
+S_FMT
+-----
+
+Do not touch input rectangle - it is already optimal.
+
+1. Calculate current sensor scales:
+
+ scale_s = ((3') - (3)) / ((2') - (2))
+
+2. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at
+current sensor scales onto input window - this is user S_CROP:
+
+ width_u = (5') - (5) = ((4') - (4)) * scale_s
+
+3. Calculate new combined scales from "effective" input window to requested user
+window:
+
+ scale_comb = width_u / ((6') - (6))
+
+4. Calculate sensor output window by applying combined scales to real input
+window:
+
+ width_s_out = ((2') - (2)) / scale_comb
+
+5. Apply iterative sensor S_FMT for sensor output window.
+
+ subdev->video_ops->s_fmt(.width = width_s_out)
+
+6. Retrieve sensor output window (g_fmt)
+
+7. Calculate new sensor scales:
+
+ scale_s_new = ((3')_new - (3)_new) / ((2') - (2))
+
+8. Calculate new CEU crop - apply sensor scales to previously calculated
+"effective" crop:
+
+ width_ceu = (4')_new - (4)_new = width_u / scale_s_new
+ left_ceu = (4)_new - (3)_new = ((5) - (2)) / scale_s_new
+
+9. Use CEU cropping to crop to the new window:
+
+ ceu_crop(.width = width_ceu, .left = left_ceu)
+
+10. Use CEU scaling to scale to the requested user window:
+
+ scale_ceu = width_ceu / width
+
+
+S_CROP
+------
+
+If old scale applied to new crop is invalid produce nearest new scale possible
+
+1. Calculate current combined scales.
+
+ scale_comb = (((4') - (4)) / ((6') - (6))) * (((2') - (2)) / ((3') - (3)))
+
+2. Apply iterative sensor S_CROP for new input window.
+
+3. If old combined scales applied to new crop produce an impossible user window,
+adjust scales to produce nearest possible window.
+
+ width_u_out = ((5') - (5)) / scale_comb
+
+ if (width_u_out > max)
+ scale_comb = ((5') - (5)) / max;
+ else if (width_u_out < min)
+ scale_comb = ((5') - (5)) / min;
+
+4. Issue G_CROP to retrieve actual input window.
+
+5. Using actual input window and calculated combined scales calculate sensor
+target output window.
+
+ width_s_out = ((3') - (3)) = ((2') - (2)) / scale_comb
+
+6. Apply iterative S_FMT for new sensor target output window.
+
+7. Issue G_FMT to retrieve the actual sensor output window.
+
+8. Calculate sensor scales.
+
+ scale_s = ((3') - (3)) / ((2') - (2))
+
+9. Calculate sensor output subwindow to be cropped on CEU by applying sensor
+scales to the requested window.
+
+ width_ceu = ((5') - (5)) / scale_s
+
+10. Use CEU cropping for above calculated window.
+
+11. Calculate CEU scales from sensor scales from results of (10) and user window
+from (3)
+
+ scale_ceu = calc_scale(((5') - (5)), &width_u_out)
+
+12. Apply CEU scales.
+
+--
+Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index b806edaf3e7..74d677c8b03 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -561,6 +561,8 @@ video_device helper functions
There are a few useful helper functions:
+- file/video_device private data
+
You can set/get driver private data in the video_device struct using:
void *video_get_drvdata(struct video_device *vdev);
@@ -575,8 +577,7 @@ struct video_device *video_devdata(struct file *file);
returns the video_device belonging to the file struct.
-The final helper function combines video_get_drvdata with
-video_devdata:
+The video_drvdata function combines video_get_drvdata with video_devdata:
void *video_drvdata(struct file *file);
@@ -584,6 +585,17 @@ You can go from a video_device struct to the v4l2_device struct using:
struct v4l2_device *v4l2_dev = vdev->v4l2_dev;
+- Device node name
+
+The video_device node kernel name can be retrieved using
+
+const char *video_device_node_name(struct video_device *vdev);
+
+The name is used as a hint by userspace tools such as udev. The function
+should be used where possible instead of accessing the video_device::num and
+video_device::minor fields.
+
+
video buffer helper functions
-----------------------------
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index 82a7bd1800b..bc31636973e 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -11,23 +11,21 @@ This optimization is more critical now as bigger and bigger physical memories
(several GBs) are more readily available.
Users can use the huge page support in Linux kernel by either using the mmap
-system call or standard SYSv shared memory system calls (shmget, shmat).
+system call or standard SYSV shared memory system calls (shmget, shmat).
First the Linux kernel needs to be built with the CONFIG_HUGETLBFS
(present under "File systems") and CONFIG_HUGETLB_PAGE (selected
automatically when CONFIG_HUGETLBFS is selected) configuration
options.
-The kernel built with huge page support should show the number of configured
-huge pages in the system by running the "cat /proc/meminfo" command.
+The /proc/meminfo file provides information about the total number of
+persistent hugetlb pages in the kernel's huge page pool. It also displays
+information about the number of free, reserved and surplus huge pages and the
+default huge page size. The huge page size is needed for generating the
+proper alignment and size of the arguments to system calls that map huge page
+regions.
-/proc/meminfo also provides information about the total number of hugetlb
-pages configured in the kernel. It also displays information about the
-number of free hugetlb pages at any time. It also displays information about
-the configured huge page size - this is needed for generating the proper
-alignment and size of the arguments to the above system calls.
-
-The output of "cat /proc/meminfo" will have lines like:
+The output of "cat /proc/meminfo" will include lines like:
.....
HugePages_Total: vvv
@@ -53,59 +51,63 @@ HugePages_Surp is short for "surplus," and is the number of huge pages in
/proc/filesystems should also show a filesystem of type "hugetlbfs" configured
in the kernel.
-/proc/sys/vm/nr_hugepages indicates the current number of configured hugetlb
-pages in the kernel. Super user can dynamically request more (or free some
-pre-configured) huge pages.
-The allocation (or deallocation) of hugetlb pages is possible only if there are
-enough physically contiguous free pages in system (freeing of huge pages is
-possible only if there are enough hugetlb pages free that can be transferred
-back to regular memory pool).
+/proc/sys/vm/nr_hugepages indicates the current number of "persistent" huge
+pages in the kernel's huge page pool. "Persistent" huge pages will be
+returned to the huge page pool when freed by a task. A user with root
+privileges can dynamically allocate more or free some persistent huge pages
+by increasing or decreasing the value of 'nr_hugepages'.
-Pages that are used as hugetlb pages are reserved inside the kernel and cannot
-be used for other purposes.
+Pages that are used as huge pages are reserved inside the kernel and cannot
+be used for other purposes. Huge pages cannot be swapped out under
+memory pressure.
-Once the kernel with Hugetlb page support is built and running, a user can
-use either the mmap system call or shared memory system calls to start using
-the huge pages. It is required that the system administrator preallocate
-enough memory for huge page purposes.
+Once a number of huge pages have been pre-allocated to the kernel huge page
+pool, a user with appropriate privilege can use either the mmap system call
+or shared memory system calls to use the huge pages. See the discussion of
+Using Huge Pages, below.
-The administrator can preallocate huge pages on the kernel boot command line by
-specifying the "hugepages=N" parameter, where 'N' = the number of huge pages
-requested. This is the most reliable method for preallocating huge pages as
-memory has not yet become fragmented.
+The administrator can allocate persistent huge pages on the kernel boot
+command line by specifying the "hugepages=N" parameter, where 'N' = the
+number of huge pages requested. This is the most reliable method of
+allocating huge pages as memory has not yet become fragmented.
-Some platforms support multiple huge page sizes. To preallocate huge pages
+Some platforms support multiple huge page sizes. To allocate huge pages
of a specific size, one must preceed the huge pages boot command parameters
with a huge page size selection parameter "hugepagesz=<size>". <size> must
be specified in bytes with optional scale suffix [kKmMgG]. The default huge
page size may be selected with the "default_hugepagesz=<size>" boot parameter.
-/proc/sys/vm/nr_hugepages indicates the current number of configured [default
-size] hugetlb pages in the kernel. Super user can dynamically request more
-(or free some pre-configured) huge pages.
-
-Use the following command to dynamically allocate/deallocate default sized
-huge pages:
+When multiple huge page sizes are supported, /proc/sys/vm/nr_hugepages
+indicates the current number of pre-allocated huge pages of the default size.
+Thus, one can use the following command to dynamically allocate/deallocate
+default sized persistent huge pages:
echo 20 > /proc/sys/vm/nr_hugepages
-This command will try to configure 20 default sized huge pages in the system.
+This command will try to adjust the number of default sized huge pages in the
+huge page pool to 20, allocating or freeing huge pages, as required.
+
On a NUMA platform, the kernel will attempt to distribute the huge page pool
-over the all on-line nodes. These huge pages, allocated when nr_hugepages
-is increased, are called "persistent huge pages".
+over all the set of allowed nodes specified by the NUMA memory policy of the
+task that modifies nr_hugepages. The default for the allowed nodes--when the
+task has default memory policy--is all on-line nodes with memory. Allowed
+nodes with insufficient available, contiguous memory for a huge page will be
+silently skipped when allocating persistent huge pages. See the discussion
+below of the interaction of task memory policy, cpusets and per node attributes
+with the allocation and freeing of persistent huge pages.
The success or failure of huge page allocation depends on the amount of
-physically contiguous memory that is preset in system at the time of the
+physically contiguous memory that is present in system at the time of the
allocation attempt. If the kernel is unable to allocate huge pages from
some nodes in a NUMA system, it will attempt to make up the difference by
allocating extra pages on other nodes with sufficient available contiguous
memory, if any.
-System administrators may want to put this command in one of the local rc init
-files. This will enable the kernel to request huge pages early in the boot
-process when the possibility of getting physical contiguous pages is still
-very high. Administrators can verify the number of huge pages actually
-allocated by checking the sysctl or meminfo. To check the per node
+System administrators may want to put this command in one of the local rc
+init files. This will enable the kernel to allocate huge pages early in
+the boot process when the possibility of getting physical contiguous pages
+is still very high. Administrators can verify the number of huge pages
+actually allocated by checking the sysctl or meminfo. To check the per node
distribution of huge pages in a NUMA system, use:
cat /sys/devices/system/node/node*/meminfo | fgrep Huge
@@ -113,45 +115,47 @@ distribution of huge pages in a NUMA system, use:
/proc/sys/vm/nr_overcommit_hugepages specifies how large the pool of
huge pages can grow, if more huge pages than /proc/sys/vm/nr_hugepages are
requested by applications. Writing any non-zero value into this file
-indicates that the hugetlb subsystem is allowed to try to obtain "surplus"
-huge pages from the buddy allocator, when the normal pool is exhausted. As
-these surplus huge pages go out of use, they are freed back to the buddy
-allocator.
+indicates that the hugetlb subsystem is allowed to try to obtain that
+number of "surplus" huge pages from the kernel's normal page pool, when the
+persistent huge page pool is exhausted. As these surplus huge pages become
+unused, they are freed back to the kernel's normal page pool.
-When increasing the huge page pool size via nr_hugepages, any surplus
+When increasing the huge page pool size via nr_hugepages, any existing surplus
pages will first be promoted to persistent huge pages. Then, additional
huge pages will be allocated, if necessary and if possible, to fulfill
-the new huge page pool size.
+the new persistent huge page pool size.
-The administrator may shrink the pool of preallocated huge pages for
+The administrator may shrink the pool of persistent huge pages for
the default huge page size by setting the nr_hugepages sysctl to a
smaller value. The kernel will attempt to balance the freeing of huge pages
-across all on-line nodes. Any free huge pages on the selected nodes will
-be freed back to the buddy allocator.
-
-Caveat: Shrinking the pool via nr_hugepages such that it becomes less
-than the number of huge pages in use will convert the balance to surplus
-huge pages even if it would exceed the overcommit value. As long as
-this condition holds, however, no more surplus huge pages will be
-allowed on the system until one of the two sysctls are increased
-sufficiently, or the surplus huge pages go out of use and are freed.
+across all nodes in the memory policy of the task modifying nr_hugepages.
+Any free huge pages on the selected nodes will be freed back to the kernel's
+normal page pool.
+
+Caveat: Shrinking the persistent huge page pool via nr_hugepages such that
+it becomes less than the number of huge pages in use will convert the balance
+of the in-use huge pages to surplus huge pages. This will occur even if
+the number of surplus pages it would exceed the overcommit value. As long as
+this condition holds--that is, until nr_hugepages+nr_overcommit_hugepages is
+increased sufficiently, or the surplus huge pages go out of use and are freed--
+no more surplus huge pages will be allowed to be allocated.
With support for multiple huge page pools at run-time available, much of
-the huge page userspace interface has been duplicated in sysfs. The above
-information applies to the default huge page size which will be
-controlled by the /proc interfaces for backwards compatibility. The root
-huge page control directory in sysfs is:
+the huge page userspace interface in /proc/sys/vm has been duplicated in sysfs.
+The /proc interfaces discussed above have been retained for backwards
+compatibility. The root huge page control directory in sysfs is:
/sys/kernel/mm/hugepages
For each huge page size supported by the running kernel, a subdirectory
-will exist, of the form
+will exist, of the form:
hugepages-${size}kB
Inside each of these directories, the same set of files will exist:
nr_hugepages
+ nr_hugepages_mempolicy
nr_overcommit_hugepages
free_hugepages
resv_hugepages
@@ -159,6 +163,102 @@ Inside each of these directories, the same set of files will exist:
which function as described above for the default huge page-sized case.
+
+Interaction of Task Memory Policy with Huge Page Allocation/Freeing
+
+Whether huge pages are allocated and freed via the /proc interface or
+the /sysfs interface using the nr_hugepages_mempolicy attribute, the NUMA
+nodes from which huge pages are allocated or freed are controlled by the
+NUMA memory policy of the task that modifies the nr_hugepages_mempolicy
+sysctl or attribute. When the nr_hugepages attribute is used, mempolicy
+is ignored.
+
+The recommended method to allocate or free huge pages to/from the kernel
+huge page pool, using the nr_hugepages example above, is:
+
+ numactl --interleave <node-list> echo 20 \
+ >/proc/sys/vm/nr_hugepages_mempolicy
+
+or, more succinctly:
+
+ numactl -m <node-list> echo 20 >/proc/sys/vm/nr_hugepages_mempolicy
+
+This will allocate or free abs(20 - nr_hugepages) to or from the nodes
+specified in <node-list>, depending on whether number of persistent huge pages
+is initially less than or greater than 20, respectively. No huge pages will be
+allocated nor freed on any node not included in the specified <node-list>.
+
+When adjusting the persistent hugepage count via nr_hugepages_mempolicy, any
+memory policy mode--bind, preferred, local or interleave--may be used. The
+resulting effect on persistent huge page allocation is as follows:
+
+1) Regardless of mempolicy mode [see Documentation/vm/numa_memory_policy.txt],
+ persistent huge pages will be distributed across the node or nodes
+ specified in the mempolicy as if "interleave" had been specified.
+ However, if a node in the policy does not contain sufficient contiguous
+ memory for a huge page, the allocation will not "fallback" to the nearest
+ neighbor node with sufficient contiguous memory. To do this would cause
+ undesirable imbalance in the distribution of the huge page pool, or
+ possibly, allocation of persistent huge pages on nodes not allowed by
+ the task's memory policy.
+
+2) One or more nodes may be specified with the bind or interleave policy.
+ If more than one node is specified with the preferred policy, only the
+ lowest numeric id will be used. Local policy will select the node where
+ the task is running at the time the nodes_allowed mask is constructed.
+ For local policy to be deterministic, the task must be bound to a cpu or
+ cpus in a single node. Otherwise, the task could be migrated to some
+ other node at any time after launch and the resulting node will be
+ indeterminate. Thus, local policy is not very useful for this purpose.
+ Any of the other mempolicy modes may be used to specify a single node.
+
+3) The nodes allowed mask will be derived from any non-default task mempolicy,
+ whether this policy was set explicitly by the task itself or one of its
+ ancestors, such as numactl. This means that if the task is invoked from a
+ shell with non-default policy, that policy will be used. One can specify a
+ node list of "all" with numactl --interleave or --membind [-m] to achieve
+ interleaving over all nodes in the system or cpuset.
+
+4) Any task mempolicy specifed--e.g., using numactl--will be constrained by
+ the resource limits of any cpuset in which the task runs. Thus, there will
+ be no way for a task with non-default policy running in a cpuset with a
+ subset of the system nodes to allocate huge pages outside the cpuset
+ without first moving to a cpuset that contains all of the desired nodes.
+
+5) Boot-time huge page allocation attempts to distribute the requested number
+ of huge pages over all on-lines nodes with memory.
+
+Per Node Hugepages Attributes
+
+A subset of the contents of the root huge page control directory in sysfs,
+described above, will be replicated under each the system device of each
+NUMA node with memory in:
+
+ /sys/devices/system/node/node[0-9]*/hugepages/
+
+Under this directory, the subdirectory for each supported huge page size
+contains the following attribute files:
+
+ nr_hugepages
+ free_hugepages
+ surplus_hugepages
+
+The free_' and surplus_' attribute files are read-only. They return the number
+of free and surplus [overcommitted] huge pages, respectively, on the parent
+node.
+
+The nr_hugepages attribute returns the total number of huge pages on the
+specified node. When this attribute is written, the number of persistent huge
+pages on the parent node will be adjusted to the specified value, if sufficient
+resources exist, regardless of the task's mempolicy or cpuset constraints.
+
+Note that the number of overcommit and reserve pages remain global quantities,
+as we don't know until fault time, when the faulting task's mempolicy is
+applied, from which node the huge page allocation will be attempted.
+
+
+Using Huge Pages
+
If the user applications are going to request huge pages using mmap system
call, then it is required that system administrator mount a file system of
type hugetlbfs:
@@ -206,9 +306,11 @@ map_hugetlb.c.
* requesting huge pages.
*
* For the ia64 architecture, the Linux kernel reserves Region number 4 for
- * huge pages. That means the addresses starting with 0x800000... will need
- * to be specified. Specifying a fixed address is not required on ppc64,
- * i386 or x86_64.
+ * huge pages. That means that if one requires a fixed address, a huge page
+ * aligned address starting with 0x800000... will be required. If a fixed
+ * address is not required, the kernel will select an address in the proper
+ * range.
+ * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*
* Note: The default shared memory limit is quite low on many kernels,
* you may need to increase it via:
@@ -237,14 +339,8 @@ map_hugetlb.c.
#define dprintf(x) printf(x)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define SHMAT_FLAGS (SHM_RND)
-#else
-#define ADDR (void *)(0x0UL)
+#define ADDR (void *)(0x0UL) /* let kernel choose address */
#define SHMAT_FLAGS (0)
-#endif
int main(void)
{
@@ -302,10 +398,12 @@ int main(void)
* example, the app is requesting memory of size 256MB that is backed by
* huge pages.
*
- * For ia64 architecture, Linux kernel reserves Region number 4 for huge pages.
- * That means the addresses starting with 0x800000... will need to be
- * specified. Specifying a fixed address is not required on ppc64, i386
- * or x86_64.
+ * For the ia64 architecture, the Linux kernel reserves Region number 4 for
+ * huge pages. That means that if one requires a fixed address, a huge page
+ * aligned address starting with 0x800000... will be required. If a fixed
+ * address is not required, the kernel will select an address in the proper
+ * range.
+ * Other architectures, such as ppc64, i386 or x86_64 are not so constrained.
*/
#include <stdlib.h>
#include <stdio.h>
@@ -317,14 +415,8 @@ int main(void)
#define LENGTH (256UL*1024*1024)
#define PROTECTION (PROT_READ | PROT_WRITE)
-/* Only ia64 requires this */
-#ifdef __ia64__
-#define ADDR (void *)(0x8000000000000000UL)
-#define FLAGS (MAP_SHARED | MAP_FIXED)
-#else
-#define ADDR (void *)(0x0UL)
+#define ADDR (void *)(0x0UL) /* let kernel choose address */
#define FLAGS (MAP_SHARED)
-#endif
void check_bytes(char *addr)
{
diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
index 3ffadf8da61..12f9ba20ccb 100644
--- a/Documentation/vm/hwpoison.txt
+++ b/Documentation/vm/hwpoison.txt
@@ -92,16 +92,62 @@ PR_MCE_KILL_GET
Testing:
-madvise(MADV_POISON, ....)
+madvise(MADV_HWPOISON, ....)
(as root)
Poison a page in the process for testing
hwpoison-inject module through debugfs
- /sys/debug/hwpoison/corrupt-pfn
-Inject hwpoison fault at PFN echoed into this file
+/sys/debug/hwpoison/
+corrupt-pfn
+
+Inject hwpoison fault at PFN echoed into this file. This does
+some early filtering to avoid corrupted unintended pages in test suites.
+
+unpoison-pfn
+
+Software-unpoison page at PFN echoed into this file. This
+way a page can be reused again.
+This only works for Linux injected failures, not for real
+memory failures.
+
+Note these injection interfaces are not stable and might change between
+kernel versions
+
+corrupt-filter-dev-major
+corrupt-filter-dev-minor
+
+Only handle memory failures to pages associated with the file system defined
+by block device major/minor. -1U is the wildcard value.
+This should be only used for testing with artificial injection.
+
+corrupt-filter-memcg
+
+Limit injection to pages owned by memgroup. Specified by inode number
+of the memcg.
+
+Example:
+ mkdir /cgroup/hwpoison
+
+ usemem -m 100 -s 1000 &
+ echo `jobs -p` > /cgroup/hwpoison/tasks
+
+ memcg_ino=$(ls -id /cgroup/hwpoison | cut -f1 -d' ')
+ echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
+
+ page-types -p `pidof init` --hwpoison # shall do nothing
+ page-types -p `pidof usemem` --hwpoison # poison its pages
+
+corrupt-filter-flags-mask
+corrupt-filter-flags-value
+
+When specified, only poison pages if ((page_flags & mask) == value).
+This allows stress testing of many kinds of pages. The page_flags
+are the same as in /proc/kpageflags. The flag bits are defined in
+include/linux/kernel-page-flags.h and documented in
+Documentation/vm/pagemap.txt
Architecture specific MCE injector
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index 262d8e6793a..b392e496f81 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -16,9 +16,9 @@ by sharing the data common between them. But it can be useful to any
application which generates many instances of the same data.
KSM only merges anonymous (private) pages, never pagecache (file) pages.
-KSM's merged pages are at present locked into kernel memory for as long
-as they are shared: so cannot be swapped out like the user pages they
-replace (but swapping KSM pages should follow soon in a later release).
+KSM's merged pages were originally locked into kernel memory, but can now
+be swapped out just like other user pages (but sharing is broken when they
+are swapped back in: ksmd must rediscover their identity and merge again).
KSM only operates on those areas of address space which an application
has advised to be likely candidates for merging, by using the madvise(2)
@@ -44,20 +44,12 @@ includes unmapped gaps (though working on the intervening mapped areas),
and might fail with EAGAIN if not enough memory for internal structures.
Applications should be considerate in their use of MADV_MERGEABLE,
-restricting its use to areas likely to benefit. KSM's scans may use
-a lot of processing power, and its kernel-resident pages are a limited
-resource. Some installations will disable KSM for these reasons.
+restricting its use to areas likely to benefit. KSM's scans may use a lot
+of processing power: some installations will disable KSM for that reason.
The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/,
readable by all but writable only by root:
-max_kernel_pages - set to maximum number of kernel pages that KSM may use
- e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages"
- Value 0 imposes no limit on the kernel pages KSM may use;
- but note that any process using MADV_MERGEABLE can cause
- KSM to allocate these pages, unswappable until it exits.
- Default: quarter of memory (chosen to not pin too much)
-
pages_to_scan - how many present pages to scan before ksmd goes to sleep
e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan"
Default: 100 (chosen for demonstration purposes)
@@ -75,7 +67,7 @@ run - set 0 to stop ksmd from running but keep merged pages,
The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
-pages_shared - how many shared unswappable kernel pages KSM is using
+pages_shared - how many shared pages are being used
pages_sharing - how many more sites are sharing them i.e. how much saved
pages_unshared - how many pages unique but repeatedly checked for merging
pages_volatile - how many pages changing too fast to be placed in a tree
@@ -87,4 +79,4 @@ pages_volatile embraces several different kinds of activity, but a high
proportion there would also indicate poor use of madvise MADV_MERGEABLE.
Izik Eidus,
-Hugh Dickins, 24 Sept 2009
+Hugh Dickins, 17 Nov 2009
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index ea44ea502da..66e9358e214 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -1,11 +1,22 @@
/*
* page-types: Tool for querying page flags
*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should find a copy of v2 of the GNU General Public License somewhere on
+ * your Linux system; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
* Copyright (C) 2009 Intel corporation
*
* Authors: Wu Fengguang <fengguang.wu@intel.com>
- *
- * Released under the General Public License (GPL).
*/
#define _LARGEFILE64_SOURCE
@@ -100,7 +111,7 @@
#define BIT(name) (1ULL << KPF_##name)
#define BITS_COMPOUND (BIT(COMPOUND_HEAD) | BIT(COMPOUND_TAIL))
-static char *page_flag_names[] = {
+static const char *page_flag_names[] = {
[KPF_LOCKED] = "L:locked",
[KPF_ERROR] = "E:error",
[KPF_REFERENCED] = "R:referenced",
@@ -173,7 +184,7 @@ static int kpageflags_fd;
static int opt_hwpoison;
static int opt_unpoison;
-static char *hwpoison_debug_fs = "/debug/hwpoison";
+static const char hwpoison_debug_fs[] = "/debug/hwpoison";
static int hwpoison_inject_fd;
static int hwpoison_forget_fd;
@@ -560,7 +571,7 @@ static void walk_pfn(unsigned long voffset,
{
uint64_t buf[KPAGEFLAGS_BATCH];
unsigned long batch;
- unsigned long pages;
+ long pages;
unsigned long i;
while (count) {
@@ -673,30 +684,35 @@ static void usage(void)
printf(
"page-types [options]\n"
-" -r|--raw Raw mode, for kernel developers\n"
-" -a|--addr addr-spec Walk a range of pages\n"
-" -b|--bits bits-spec Walk pages with specified bits\n"
-" -p|--pid pid Walk process address space\n"
+" -r|--raw Raw mode, for kernel developers\n"
+" -d|--describe flags Describe flags\n"
+" -a|--addr addr-spec Walk a range of pages\n"
+" -b|--bits bits-spec Walk pages with specified bits\n"
+" -p|--pid pid Walk process address space\n"
#if 0 /* planned features */
-" -f|--file filename Walk file address space\n"
+" -f|--file filename Walk file address space\n"
#endif
-" -l|--list Show page details in ranges\n"
-" -L|--list-each Show page details one by one\n"
-" -N|--no-summary Don't show summay info\n"
-" -X|--hwpoison hwpoison pages\n"
-" -x|--unpoison unpoison pages\n"
-" -h|--help Show this usage message\n"
+" -l|--list Show page details in ranges\n"
+" -L|--list-each Show page details one by one\n"
+" -N|--no-summary Don't show summay info\n"
+" -X|--hwpoison hwpoison pages\n"
+" -x|--unpoison unpoison pages\n"
+" -h|--help Show this usage message\n"
+"flags:\n"
+" 0x10 bitfield format, e.g.\n"
+" anon bit-name, e.g.\n"
+" 0x10,anon comma-separated list, e.g.\n"
"addr-spec:\n"
-" N one page at offset N (unit: pages)\n"
-" N+M pages range from N to N+M-1\n"
-" N,M pages range from N to M-1\n"
-" N, pages range from N to end\n"
-" ,M pages range from 0 to M-1\n"
+" N one page at offset N (unit: pages)\n"
+" N+M pages range from N to N+M-1\n"
+" N,M pages range from N to M-1\n"
+" N, pages range from N to end\n"
+" ,M pages range from 0 to M-1\n"
"bits-spec:\n"
-" bit1,bit2 (flags & (bit1|bit2)) != 0\n"
-" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n"
-" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n"
-" =bit1,bit2 flags == (bit1|bit2)\n"
+" bit1,bit2 (flags & (bit1|bit2)) != 0\n"
+" bit1,bit2=bit1 (flags & (bit1|bit2)) == bit1\n"
+" bit1,~bit2 (flags & (bit1|bit2)) == bit1\n"
+" =bit1,bit2 flags == (bit1|bit2)\n"
"bit-names:\n"
);
@@ -884,13 +900,23 @@ static void parse_bits_mask(const char *optarg)
add_bits_filter(mask, bits);
}
+static void describe_flags(const char *optarg)
+{
+ uint64_t flags = parse_flag_names(optarg, 0);
+
+ printf("0x%016llx\t%s\t%s\n",
+ (unsigned long long)flags,
+ page_flag_name(flags),
+ page_flag_longname(flags));
+}
-static struct option opts[] = {
+static const struct option opts[] = {
{ "raw" , 0, NULL, 'r' },
{ "pid" , 1, NULL, 'p' },
{ "file" , 1, NULL, 'f' },
{ "addr" , 1, NULL, 'a' },
{ "bits" , 1, NULL, 'b' },
+ { "describe" , 1, NULL, 'd' },
{ "list" , 0, NULL, 'l' },
{ "list-each" , 0, NULL, 'L' },
{ "no-summary", 0, NULL, 'N' },
@@ -907,7 +933,7 @@ int main(int argc, char *argv[])
page_size = getpagesize();
while ((c = getopt_long(argc, argv,
- "rp:f:a:b:lLNXxh", opts, NULL)) != -1) {
+ "rp:f:a:b:d:lLNXxh", opts, NULL)) != -1) {
switch (c) {
case 'r':
opt_raw = 1;
@@ -924,6 +950,9 @@ int main(int argc, char *argv[])
case 'b':
parse_bits_mask(optarg);
break;
+ case 'd':
+ describe_flags(optarg);
+ exit(0);
case 'l':
opt_list = 1;
break;
diff --git a/Kbuild b/Kbuild
index f056b4feee5..e3737ad72b5 100644
--- a/Kbuild
+++ b/Kbuild
@@ -8,7 +8,7 @@
#####
# 1) Generate bounds.h
-bounds-file := include/linux/bounds.h
+bounds-file := include/generated/bounds.h
always := $(bounds-file)
targets := $(bounds-file) kernel/bounds.s
@@ -43,7 +43,7 @@ $(obj)/$(bounds-file): kernel/bounds.s Kbuild
# 2) Generate asm-offsets.h
#
-offsets-file := include/asm/asm-offsets.h
+offsets-file := include/generated/asm-offsets.h
always += $(offsets-file)
targets += $(offsets-file)
diff --git a/MAINTAINERS b/MAINTAINERS
index d7f8668b7a7..d5244f1580b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -801,6 +801,19 @@ L: openmoko-kernel@lists.openmoko.org (subscribers-only)
W: http://wiki.openmoko.org/wiki/Neo_FreeRunner
S: Supported
+ARM/QUALCOMM MSM MACHINE SUPPORT
+M: David Brown <davidb@codeaurora.org>
+M: Daniel Walker <dwalker@codeaurora.org>
+M: Bryan Huntsman <bryanh@codeaurora.org>
+F: arch/arm/mach-msm/
+F: drivers/video/msm/
+F: drivers/mmc/host/msm_sdcc.c
+F: drivers/mmc/host/msm_sdcc.h
+F: drivers/serial/msm_serial.h
+F: drivers/serial/msm_serial.c
+T: git git://codeaurora.org/quic/kernel/dwalker/linux-msm.git
+S: Maintained
+
ARM/TOSA MACHINE SUPPORT
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
M: Dirk Opfer <dirk@opfer-online.de>
@@ -822,13 +835,13 @@ F: arch/arm/mach-pxa/palmte2.c
F: arch/arm/mach-pxa/include/mach/palmtc.h
F: arch/arm/mach-pxa/palmtc.c
-ARM/PALM TREO 680 SUPPORT
+ARM/PALM TREO SUPPORT
M: Tomas Cech <sleep_walker@suse.cz>
L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
-F: arch/arm/mach-pxa/include/mach/treo680.h
-F: arch/arm/mach-pxa/treo680.c
+F: arch/arm/mach-pxa/include/mach/palmtreo.h
+F: arch/arm/mach-pxa/palmtreo.c
ARM/PALMZ72 SUPPORT
M: Sergey Lapin <slapin@ossfans.org>
@@ -1389,6 +1402,8 @@ L: linux-usb@vger.kernel.org
S: Supported
F: Documentation/usb/WUSB-Design-overview.txt
F: Documentation/usb/wusb-cbaf
+F: drivers/usb/host/hwa-hc.c
+F: drivers/usb/host/whci/
F: drivers/usb/wusbcore/
F: include/linux/usb/wusb*
@@ -1469,8 +1484,8 @@ F: include/linux/coda*.h
COMMON INTERNET FILE SYSTEM (CIFS)
M: Steve French <sfrench@samba.org>
-L: linux-cifs-client@lists.samba.org
-L: samba-technical@lists.samba.org
+L: linux-cifs-client@lists.samba.org (moderated for non-subscribers)
+L: samba-technical@lists.samba.org (moderated for non-subscribers)
W: http://linux-cifs.samba.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git
S: Supported
@@ -2364,6 +2379,15 @@ W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
S: Maintained
F: drivers/hwmon/hdaps.c
+HWPOISON MEMORY FAILURE HANDLING
+M: Andi Kleen <andi@firstfloor.org>
+L: linux-mm@kvack.org
+L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
+S: Maintained
+F: mm/memory-failure.c
+F: mm/hwpoison-inject.c
+
HYPERVISOR VIRTUAL CONSOLE DRIVER
L: linuxppc-dev@ozlabs.org
S: Odd Fixes
@@ -3068,8 +3092,11 @@ S: Maintained
F: fs/autofs4/
KERNEL BUILD
+M: Michal Marek <mmarek@suse.cz>
+T: git git://repo.or.cz/linux-kbuild.git for-next
+T: git git://repo.or.cz/linux-kbuild.git for-linus
L: linux-kbuild@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/kbuild/
F: Makefile
F: scripts/Makefile.*
@@ -3111,7 +3138,6 @@ L: kvm@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
F: arch/x86/include/asm/svm.h
-F: arch/x86/kvm/kvm_svm.h
F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
@@ -3247,6 +3273,7 @@ LINUX FOR IBM pSERIES (RS/6000)
M: Paul Mackerras <paulus@au.ibm.com>
W: http://www.ibm.com/linux/ltc/projects/ppc
S: Supported
+F: arch/powerpc/boot/rs6000.h
LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
@@ -3255,18 +3282,24 @@ W: http://www.penguinppc.org/
L: linuxppc-dev@ozlabs.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git
S: Supported
+F: Documentation/powerpc/
+F: arch/powerpc/
LINUX FOR POWER MACINTOSH
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
W: http://www.penguinppc.org/
L: linuxppc-dev@ozlabs.org
S: Maintained
+F: arch/powerpc/platforms/powermac/
+F: drivers/macintosh/
LINUX FOR POWERPC EMBEDDED MPC5XXX
M: Grant Likely <grant.likely@secretlab.ca>
L: linuxppc-dev@ozlabs.org
T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
+F: arch/powerpc/platforms/512x/
+F: arch/powerpc/platforms/52xx/
LINUX FOR POWERPC EMBEDDED PPC4XX
M: Josh Boyer <jwboyer@linux.vnet.ibm.com>
@@ -3275,6 +3308,8 @@ W: http://www.penguinppc.org/
L: linuxppc-dev@ozlabs.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git
S: Maintained
+F: arch/powerpc/platforms/40x/
+F: arch/powerpc/platforms/44x/
LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
M: Grant Likely <grant.likely@secretlab.ca>
@@ -3282,6 +3317,8 @@ W: http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex
L: linuxppc-dev@ozlabs.org
T: git git://git.secretlab.ca/git/linux-2.6.git
S: Maintained
+F: arch/powerpc/*/*virtex*
+F: arch/powerpc/*/*/*virtex*
LINUX FOR POWERPC EMBEDDED PPC8XX
M: Vitaly Bordug <vitb@kernel.crashing.org>
@@ -3295,12 +3332,16 @@ M: Kumar Gala <galak@kernel.crashing.org>
W: http://www.penguinppc.org/
L: linuxppc-dev@ozlabs.org
S: Maintained
+F: arch/powerpc/platforms/83xx/
LINUX FOR POWERPC PA SEMI PWRFICIENT
M: Olof Johansson <olof@lixom.net>
W: http://www.pasemi.com/
L: linuxppc-dev@ozlabs.org
S: Supported
+F: arch/powerpc/platforms/pasemi/
+F: drivers/*/*pasemi*
+F: drivers/*/*/*pasemi*
LINUX SECURITY MODULE (LSM) FRAMEWORK
M: Chris Wright <chrisw@sous-sol.org>
@@ -3903,6 +3944,23 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/video/omap/
+OMAP DISPLAY SUBSYSTEM SUPPORT (DSS2)
+M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+L: linux-omap@vger.kernel.org
+L: linux-fbdev@vger.kernel.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/video/omap2/dss/
+F: drivers/video/omap2/vrfb.c
+F: drivers/video/omap2/vram.c
+F: Documentation/arm/OMAP/DSS
+
+OMAP FRAMEBUFFER SUPPORT (FOR DSS2)
+M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+L: linux-omap@vger.kernel.org
+L: linux-fbdev@vger.kernel.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/video/omap2/omapfb/
+
OMAP MMC SUPPORT
M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
L: linux-omap@vger.kernel.org
@@ -5035,6 +5093,7 @@ F: drivers/char/specialix*
SPI SUBSYSTEM
M: David Brownell <dbrownell@users.sourceforge.net>
+M: Grant Likely <grant.likely@secretlab.ca>
L: spi-devel-general@lists.sourceforge.net
S: Maintained
F: Documentation/spi/
@@ -5373,10 +5432,19 @@ ULTRA-WIDEBAND (UWB) SUBSYSTEM:
M: David Vrabel <david.vrabel@csr.com>
L: linux-usb@vger.kernel.org
S: Supported
-F: drivers/uwb/*
+F: drivers/uwb/
+X: drivers/uwb/wlp/
+X: drivers/uwb/i1480/i1480u-wlp/
+X: drivers/uwb/i1480/i1480-wlp.h
F: include/linux/uwb.h
F: include/linux/uwb/
+UNIFDEF
+M: Tony Finch <dot@dotat.at>
+W: http://dotat.at/prog/unifdef
+S: Maintained
+F: scripts/unifdef.c
+
UNIFORM CDROM DRIVER
M: Jens Axboe <axboe@kernel.dk>
W: http://www.kernel.dk
@@ -5409,10 +5477,9 @@ S: Supported
F: drivers/block/ub.c
USB CDC ETHERNET DRIVER
-M: Greg Kroah-Hartman <greg@kroah.com>
+M: Oliver Neukum <oliver@neukum.name>
L: linux-usb@vger.kernel.org
S: Maintained
-W: http://www.kroah.com/linux-usb/
F: drivers/net/usb/cdc_*.c
F: include/linux/usb/cdc.h
@@ -5663,9 +5730,11 @@ S: Maintained
F: drivers/net/wireless/rndis_wlan.c
USB XHCI DRIVER
-M: Sarah Sharp <sarah.a.sharp@intel.com>
+M: Sarah Sharp <sarah.a.sharp@linux.intel.com>
L: linux-usb@vger.kernel.org
S: Supported
+F: drivers/usb/host/xhci*
+F: drivers/usb/host/pci-quirks*
USB ZC0301 DRIVER
M: Luca Risolia <luca.risolia@studio.unibo.it>
@@ -5879,9 +5948,12 @@ W: http://linuxwimax.org
WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM
M: David Vrabel <david.vrabel@csr.com>
+L: netdev@vger.kernel.org
S: Maintained
F: include/linux/wlp.h
F: drivers/uwb/wlp/
+F: drivers/uwb/i1480/i1480u-wlp/
+F: drivers/uwb/i1480/i1480-wlp.h
WISTRON LAPTOP BUTTON DRIVER
M: Miloslav Trmac <mitr@volny.cz>
@@ -5927,6 +5999,7 @@ M: Mark Brown <broonie@opensource.wolfsonmicro.com>
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
W: http://opensource.wolfsonmicro.com/node/8
S: Supported
+F: Documentation/hwmon/wm83??
F: drivers/leds/leds-wm83*.c
F: drivers/mfd/wm8*.c
F: drivers/power/wm83*.c
@@ -5936,14 +6009,14 @@ F: drivers/video/backlight/wm83*_bl.c
F: drivers/watchdog/wm83*_wdt.c
F: include/linux/mfd/wm831x/
F: include/linux/mfd/wm8350/
-F: include/linux/mfd/wm8400/
-F: sound/soc/codecs/wm8350.c
-F: sound/soc/codecs/wm8400.c
+F: include/linux/mfd/wm8400*
+F: sound/soc/codecs/wm8350.*
+F: sound/soc/codecs/wm8400.*
X.25 NETWORK LAYER
-M: Henner Eisen <eis@baty.hanse.de>
+M: Andrew Hendry <andrew.hendry@gmail.com>
L: linux-x25@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: Documentation/networking/x25*
F: include/net/x25*
F: net/x25/
diff --git a/Makefile b/Makefile
index 33d4732a6c4..e6b06cbeb47 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
-SUBLEVEL = 32
-EXTRAVERSION =
+SUBLEVEL = 33
+EXTRAVERSION = -rc1
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
@@ -16,6 +16,13 @@ NAME = Man-Eating Seals of Antiquity
# o print "Entering directory ...";
MAKEFLAGS += -rR --no-print-directory
+# Avoid funny character set dependencies
+unexport LC_ALL
+LC_CTYPE=C
+LC_COLLATE=C
+LC_NUMERIC=C
+export LC_CTYPE LC_COLLATE LC_NUMERIC
+
# We are using a recursive build, so we need to do a little thinking
# to get the ordering right.
#
@@ -334,10 +341,9 @@ CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
# Use LINUXINCLUDE when you must reference the include/ directory.
# Needed to be compatible with the O= option
-LINUXINCLUDE := -Iinclude \
- $(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) \
- -I$(srctree)/arch/$(hdr-arch)/include \
- -include include/linux/autoconf.h
+LINUXINCLUDE := -I$(srctree)/arch/$(hdr-arch)/include -Iinclude \
+ $(if $(KBUILD_SRC), -I$(srctree)/include) \
+ -include include/generated/autoconf.h
KBUILD_CPPFLAGS := -D__KERNEL__
@@ -465,7 +471,7 @@ ifeq ($(KBUILD_EXTMOD),)
# Carefully list dependencies so we do not try to build scripts twice
# in parallel
PHONY += scripts
-scripts: scripts_basic include/config/auto.conf
+scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
$(Q)$(MAKE) $(build)=$(@)
# Objects we will link into vmlinux / subdirs we need to visit
@@ -492,18 +498,18 @@ $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
# with it and forgot to run make oldconfig.
# if auto.conf.cmd is missing then we are probably in a cleaned tree so
# we execute the config step to be sure to catch updated Kconfig files
-include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
+include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
$(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
else
-# external modules needs include/linux/autoconf.h and include/config/auto.conf
+# external modules needs include/generated/autoconf.h and include/config/auto.conf
# but do not care if they are up-to-date. Use auto.conf to trigger the test
PHONY += include/config/auto.conf
include/config/auto.conf:
- $(Q)test -e include/linux/autoconf.h -a -e $@ || ( \
+ $(Q)test -e include/generated/autoconf.h -a -e $@ || ( \
echo; \
echo " ERROR: Kernel configuration is invalid."; \
- echo " include/linux/autoconf.h or $@ are missing."; \
+ echo " include/generated/autoconf.h or $@ are missing.";\
echo " Run 'make oldconfig && make prepare' on kernel src to fix it."; \
echo; \
/bin/false)
@@ -877,6 +883,9 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
PHONY += $(vmlinux-dirs)
$(vmlinux-dirs): prepare scripts
$(Q)$(MAKE) $(build)=$@
+ifdef CONFIG_MODULES
+ $(Q)$(MAKE) $(modbuiltin)=$@
+endif
# Build the kernel release string
#
@@ -955,7 +964,6 @@ PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
# 1) Check that make has not been executed in the kernel src $(srctree)
-# 2) Create the include2 directory, used for the second asm symlink
prepare3: include/config/kernel.release
ifneq ($(KBUILD_SRC),)
@$(kecho) ' Using $(srctree) as source for kernel'
@@ -964,17 +972,13 @@ ifneq ($(KBUILD_SRC),)
echo " in the '$(srctree)' directory.";\
/bin/false; \
fi;
- $(Q)if [ ! -d include2 ]; then \
- mkdir -p include2; \
- ln -fsn $(srctree)/include/asm-$(SRCARCH) include2/asm; \
- fi
endif
# prepare2 creates a makefile if using a separate output directory
prepare2: prepare3 outputmakefile
-prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
- include/asm include/config/auto.conf
+prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
+ include/config/auto.conf
$(cmd_crmodverdir)
archprepare: prepare1 scripts_basic
@@ -986,42 +990,6 @@ prepare0: archprepare FORCE
# All the preparing..
prepare: prepare0
-# The asm symlink changes when $(ARCH) changes.
-# Detect this and ask user to run make mrproper
-# If asm is a stale symlink (point to dir that does not exist) remove it
-define check-symlink
- set -e; \
- if [ -L include/asm ]; then \
- asmlink=`readlink include/asm | cut -d '-' -f 2`; \
- if [ "$$asmlink" != "$(SRCARCH)" ]; then \
- echo "ERROR: the symlink $@ points to asm-$$asmlink but asm-$(SRCARCH) was expected"; \
- echo " set ARCH or save .config and run 'make mrproper' to fix it"; \
- exit 1; \
- fi; \
- test -e $$asmlink || rm include/asm; \
- elif [ -d include/asm ]; then \
- echo "ERROR: $@ is a directory but a symlink was expected";\
- exit 1; \
- fi
-endef
-
-# We create the target directory of the symlink if it does
-# not exist so the test in check-symlink works and we have a
-# directory for generated filesas used by some architectures.
-define create-symlink
- if [ ! -L include/asm ]; then \
- $(kecho) ' SYMLINK $@ -> include/asm-$(SRCARCH)'; \
- if [ ! -d include/asm-$(SRCARCH) ]; then \
- mkdir -p include/asm-$(SRCARCH); \
- fi; \
- ln -fsn asm-$(SRCARCH) $@; \
- fi
-endef
-
-include/asm: FORCE
- $(Q)$(check-symlink)
- $(Q)$(create-symlink)
-
# Generate some files
# ---------------------------------------------------------------------------
@@ -1046,7 +1014,7 @@ endef
include/linux/version.h: $(srctree)/Makefile FORCE
$(call filechk,version.h)
-include/linux/utsrelease.h: include/config/kernel.release FORCE
+include/generated/utsrelease.h: include/config/kernel.release FORCE
$(call filechk,utsrelease.h)
PHONY += headerdep
@@ -1076,11 +1044,6 @@ firmware_install: FORCE
export INSTALL_HDR_PATH = $(objtree)/usr
hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
-# Find out where the Kbuild file is located to support
-# arch/$(ARCH)/include/asm
-hdr-dir = $(strip \
- $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/asm/Kbuild), \
- arch/$(hdr-arch)/include/asm, include/asm-$(hdr-arch)))
# If we do an all arch process set dst to asm-$(hdr-arch)
hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
@@ -1095,10 +1058,10 @@ headers_install_all:
PHONY += headers_install
headers_install: __headers
- $(if $(wildcard $(srctree)/$(hdr-dir)/Kbuild),, \
+ $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/asm/Kbuild),, \
$(error Headers not exportable for the $(SRCARCH) architecture))
$(Q)$(MAKE) $(hdr-inst)=include
- $(Q)$(MAKE) $(hdr-inst)=$(hdr-dir) $(hdr-dst)
+ $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/asm $(hdr-dst)
PHONY += headers_check_all
headers_check_all: headers_install_all
@@ -1107,7 +1070,7 @@ headers_check_all: headers_install_all
PHONY += headers_check
headers_check: headers_install
$(Q)$(MAKE) $(hdr-inst)=include HDRCHECK=1
- $(Q)$(MAKE) $(hdr-inst)=$(hdr-dir) $(hdr-dst) HDRCHECK=1
+ $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/asm $(hdr-dst) HDRCHECK=1
# ---------------------------------------------------------------------------
# Modules
@@ -1127,6 +1090,7 @@ all: modules
PHONY += modules
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
+ $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.builtin) > $(objtree)/modules.builtin
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild
@@ -1156,6 +1120,7 @@ _modinst_:
ln -s $(objtree) $(MODLIB)/build ; \
fi
@cp -f $(objtree)/modules.order $(MODLIB)/
+ @cp -f $(objtree)/modules.builtin $(MODLIB)/
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
# This depmod is only for convenience to give the initial
@@ -1194,12 +1159,10 @@ CLEAN_FILES += vmlinux System.map \
.tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
# Directories & files removed with 'make mrproper'
-MRPROPER_DIRS += include/config include2 usr/include include/generated
-MRPROPER_FILES += .config .config.old include/asm .version .old_version \
- include/linux/autoconf.h include/linux/version.h \
- include/linux/utsrelease.h \
- include/linux/bounds.h include/asm*/asm-offsets.h \
- Module.symvers Module.markers tags TAGS cscope*
+MRPROPER_DIRS += include/config usr/include include/generated
+MRPROPER_FILES += .config .config.old .version .old_version \
+ include/linux/version.h \
+ Module.symvers tags TAGS cscope*
# clean - Delete most, but leave enough to build external modules
#
@@ -1218,7 +1181,7 @@ clean: archclean $(clean-dirs)
\( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.symtypes' -o -name 'modules.order' \
- -o -name 'Module.markers' -o -name '.tmp_*.o.*' \
+ -o -name modules.builtin -o -name '.tmp_*.o.*' \
-o -name '*.gcno' \) -type f -print | xargs rm -f
# mrproper - Delete all generated files, including .config
@@ -1416,8 +1379,8 @@ $(clean-dirs):
clean: rm-dirs := $(MODVERDIR)
clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers \
- $(KBUILD_EXTMOD)/Module.markers \
- $(KBUILD_EXTMOD)/modules.order
+ $(KBUILD_EXTMOD)/modules.order \
+ $(KBUILD_EXTMOD)/modules.builtin
clean: $(clean-dirs)
$(call cmd,rmdirs)
$(call cmd,rmfiles)
diff --git a/arch/Kconfig b/arch/Kconfig
index d82875820a1..9d055b4f058 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -135,9 +135,7 @@ config HAVE_DEFAULT_NO_SPIN_MUTEXES
config HAVE_HW_BREAKPOINT
bool
- depends on HAVE_PERF_EVENTS
- select ANON_INODES
- select PERF_EVENTS
+ depends on PERF_EVENTS
config HAVE_USER_RETURN_NOTIFIER
bool
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 443448154f3..bd7261ea8f9 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,6 +9,7 @@ config ALPHA
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS
+ select HAVE_PERF_EVENTS
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c
index 3af21c78933..3c8d1b25c66 100644
--- a/arch/alpha/boot/bootp.c
+++ b/arch/alpha/boot/bootp.c
@@ -9,7 +9,7 @@
*/
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/mm.h>
#include <asm/system.h>
diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c
index 1036b515e20..ade3f129dc2 100644
--- a/arch/alpha/boot/bootpz.c
+++ b/arch/alpha/boot/bootpz.c
@@ -11,7 +11,7 @@
*/
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/mm.h>
#include <asm/system.h>
diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c
index 89f3be071ae..644b7db5543 100644
--- a/arch/alpha/boot/main.c
+++ b/arch/alpha/boot/main.c
@@ -7,7 +7,7 @@
*/
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/mm.h>
#include <asm/system.h>
diff --git a/arch/alpha/include/asm/asm-offsets.h b/arch/alpha/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/alpha/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/alpha/include/asm/bug.h b/arch/alpha/include/asm/bug.h
index 1720c8ad86f..f091682e3cc 100644
--- a/arch/alpha/include/asm/bug.h
+++ b/arch/alpha/include/asm/bug.h
@@ -13,7 +13,8 @@
"call_pal %0 # bugchk\n\t" \
".long %1\n\t.8byte %2" \
: : "i"(PAL_bugchk), "i"(__LINE__), "i"(__FILE__)); \
- for ( ; ; ); } while (0)
+ unreachable(); \
+ } while (0)
#define HAVE_ARCH_BUG
#endif
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h
index 46bfff58f67..471c07292e0 100644
--- a/arch/alpha/include/asm/core_t2.h
+++ b/arch/alpha/include/asm/core_t2.h
@@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr)
set_hae(msb); \
}
-extern spinlock_t t2_hae_lock;
+extern raw_spinlock_t t2_hae_lock;
/*
* NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since
@@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extbl(result, addr & 3);
}
@@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extwl(result, addr & 3);
}
@@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long result, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return result & 0xffffffffUL;
}
@@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long r0, r1, work, msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
r0 = *(vuip)(work);
r1 = *(vuip)(work + (4 << 5));
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
return r1 << 32 | r0;
}
@@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, w;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
@@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, w;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
/*
@@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
@@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM;
unsigned long msb, work;
unsigned long flags;
- spin_lock_irqsave(&t2_hae_lock, flags);
+ raw_spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
*(vuip)work = b;
*(vuip)(work + (4 << 5)) = b >> 32;
- spin_unlock_irqrestore(&t2_hae_lock, flags);
+ raw_spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
index 5c75c1b2352..9baae8afe8a 100644
--- a/arch/alpha/include/asm/elf.h
+++ b/arch/alpha/include/asm/elf.h
@@ -81,7 +81,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_ALPHA
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h
index 25da0017ec8..70145cbb21c 100644
--- a/arch/alpha/include/asm/fcntl.h
+++ b/arch/alpha/include/asm/fcntl.h
@@ -1,8 +1,6 @@
#ifndef _ALPHA_FCNTL_H
#define _ALPHA_FCNTL_H
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_CREAT 01000 /* not fcntl */
#define O_TRUNC 02000 /* not fcntl */
#define O_EXCL 04000 /* not fcntl */
@@ -10,13 +8,28 @@
#define O_NONBLOCK 00004
#define O_APPEND 00010
-#define O_SYNC 040000
+#define O_DSYNC 040000 /* used to be O_SYNC, see below */
#define O_DIRECTORY 0100000 /* must be a directory */
#define O_NOFOLLOW 0200000 /* don't follow links */
#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */
#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */
#define O_NOATIME 04000000
#define O_CLOEXEC 010000000 /* set close_on_exec */
+/*
+ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ *
+ * This has the nice side-effect that we can simply test for O_DSYNC
+ * wherever we do not care if O_DSYNC or O_SYNC is used.
+ *
+ * Note: __O_SYNC must never be used directly.
+ */
+#define __O_SYNC 020000000
+#define O_SYNC (__O_SYNC|O_DSYNC)
#define F_GETLK 7
#define F_SETLK 8
diff --git a/arch/alpha/include/asm/perf_event.h b/arch/alpha/include/asm/perf_event.h
new file mode 100644
index 00000000000..3bef8522017
--- /dev/null
+++ b/arch/alpha/include/asm/perf_event.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_ALPHA_PERF_EVENT_H
+#define __ASM_ALPHA_PERF_EVENT_H
+
+/* Alpha only supports software events through this interface. */
+static inline void set_perf_event_pending(void) { }
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
+#endif /* __ASM_ALPHA_PERF_EVENT_H */
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index e38fb95cb33..d0faca1e992 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -12,18 +12,18 @@
* We make no fairness assumptions. They have a cost.
*/
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_unlock_wait(x) \
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_unlock_wait(x) \
do { cpu_relax(); } while ((x)->lock)
-static inline void __raw_spin_unlock(raw_spinlock_t * lock)
+static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
-static inline void __raw_spin_lock(raw_spinlock_t * lock)
+static inline void arch_spin_lock(arch_spinlock_t * lock)
{
long tmp;
@@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock)
: "m"(lock->lock) : "memory");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
/***********************************************************/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
-static inline void __raw_read_lock(raw_rwlock_t *lock)
+static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
@@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock)
: "m" (*lock) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *lock)
+static inline void arch_write_lock(arch_rwlock_t *lock)
{
long regx;
@@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock)
: "m" (*lock) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t * lock)
+static inline int arch_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock)
return success;
}
-static inline int __raw_write_trylock(raw_rwlock_t * lock)
+static inline int arch_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
@@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock)
return success;
}
-static inline void __raw_read_unlock(raw_rwlock_t * lock)
+static inline void arch_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock)
: "m" (*lock) : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t * lock)
+static inline void arch_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
index 8141eb5ebf0..54c2afce0a1 100644
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 7f23665122d..804e5311c84 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -247,6 +247,7 @@
#define __IGNORE_pause
#define __IGNORE_time
#define __IGNORE_utime
+#define __IGNORE_umount2
/*
* Linux-specific system calls begin at 300
@@ -434,10 +435,24 @@
#define __NR_timerfd 477
#define __NR_eventfd 478
#define __NR_recvmmsg 479
+#define __NR_fallocate 480
+#define __NR_timerfd_create 481
+#define __NR_timerfd_settime 482
+#define __NR_timerfd_gettime 483
+#define __NR_signalfd4 484
+#define __NR_eventfd2 485
+#define __NR_epoll_create1 486
+#define __NR_dup3 487
+#define __NR_pipe2 488
+#define __NR_inotify_init1 489
+#define __NR_preadv 490
+#define __NR_pwritev 491
+#define __NR_rt_tgsigqueueinfo 492
+#define __NR_perf_event_open 493
#ifdef __KERNEL__
-#define NR_SYSCALLS 480
+#define NR_SYSCALLS 494
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
index d9980d47ab8..e6d90568b65 100644
--- a/arch/alpha/kernel/core_t2.c
+++ b/arch/alpha/kernel/core_t2.c
@@ -74,7 +74,7 @@
# define DBG(args)
#endif
-DEFINE_SPINLOCK(t2_hae_lock);
+DEFINE_RAW_SPINLOCK(t2_hae_lock);
static volatile unsigned int t2_mcheck_any_expected;
static volatile unsigned int t2_mcheck_last_taken;
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index c0de072b830..5f2cf23c464 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v)
#endif
if (irq < ACTUAL_NR_IRQS) {
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
action = irq_desc[irq].action;
if (!action)
goto unlock;
@@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
} else if (irq == ACTUAL_NR_IRQS) {
#ifdef CONFIG_SMP
seq_puts(p, "IPI: ");
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 9a3334ae282..62619f25132 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -178,25 +178,18 @@ SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, off)
{
- struct file *file = NULL;
- unsigned long ret = -EBADF;
+ unsigned long ret = -EINVAL;
#if 0
if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
printk("%s: unimplemented OSF mmap flags %04lx\n",
current->comm, flags);
#endif
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- down_write(&current->mm->mmap_sem);
- ret = do_mmap(file, addr, len, prot, flags, off);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
+ if ((off + PAGE_ALIGN(len)) < off)
+ goto out;
+ if (off & ~PAGE_MASK)
+ goto out;
+ ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
out:
return ret;
}
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index d12af472e1c..dbbf04f9230 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <asm/console.h>
#include <asm/uaccess.h>
#include <asm/machvec.h>
@@ -79,42 +80,41 @@ static srm_env_t srm_named_entries[] = {
static srm_env_t srm_numbered_entries[256];
-static int
-srm_env_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int srm_env_proc_show(struct seq_file *m, void *v)
{
- int nbytes;
unsigned long ret;
srm_env_t *entry;
+ char *page;
- if (off != 0) {
- *eof = 1;
- return 0;
- }
+ entry = (srm_env_t *)m->private;
+ page = (char *)__get_free_page(GFP_USER);
+ if (!page)
+ return -ENOMEM;
- entry = (srm_env_t *) data;
- ret = callback_getenv(entry->id, page, count);
+ ret = callback_getenv(entry->id, page, PAGE_SIZE);
if ((ret >> 61) == 0) {
- nbytes = (int) ret;
- *eof = 1;
+ seq_write(m, page, ret);
+ ret = 0;
} else
- nbytes = -EFAULT;
+ ret = -EFAULT;
+ free_page((unsigned long)page);
+ return ret;
+}
- return nbytes;
+static int srm_env_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, srm_env_proc_show, PDE(inode)->data);
}
-static int
-srm_env_write(struct file *file, const char __user *buffer, unsigned long count,
- void *data)
+static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
int res;
- srm_env_t *entry;
+ srm_env_t *entry = PDE(file->f_path.dentry->d_inode)->data;
char *buf = (char *) __get_free_page(GFP_USER);
unsigned long ret1, ret2;
- entry = (srm_env_t *) data;
-
if (!buf)
return -ENOMEM;
@@ -140,6 +140,15 @@ srm_env_write(struct file *file, const char __user *buffer, unsigned long count,
return res;
}
+static const struct file_operations srm_env_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = srm_env_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = srm_env_proc_write,
+};
+
static void
srm_env_cleanup(void)
{
@@ -245,15 +254,10 @@ srm_env_init(void)
*/
entry = srm_named_entries;
while (entry->name && entry->id) {
- entry->proc_entry = create_proc_entry(entry->name,
- 0644, named_dir);
+ entry->proc_entry = proc_create_data(entry->name, 0644, named_dir,
+ &srm_env_proc_fops, entry);
if (!entry->proc_entry)
goto cleanup;
-
- entry->proc_entry->data = (void *) entry;
- entry->proc_entry->read_proc = srm_env_read;
- entry->proc_entry->write_proc = srm_env_write;
-
entry++;
}
@@ -264,15 +268,12 @@ srm_env_init(void)
entry = &srm_numbered_entries[var_num];
entry->name = number[var_num];
- entry->proc_entry = create_proc_entry(entry->name,
- 0644, numbered_dir);
+ entry->proc_entry = proc_create_data(entry->name, 0644, numbered_dir,
+ &srm_env_proc_fops, entry);
if (!entry->proc_entry)
goto cleanup;
entry->id = var_num;
- entry->proc_entry->data = (void *) entry;
- entry->proc_entry->read_proc = srm_env_read;
- entry->proc_entry->write_proc = srm_env_write;
}
printk(KERN_INFO "%s: version %s loaded successfully\n", NAME,
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index cda6b8b3d57..09acb786e72 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -495,9 +495,23 @@ sys_call_table:
.quad sys_epoll_pwait
.quad sys_utimensat /* 475 */
.quad sys_signalfd
- .quad sys_ni_syscall
+ .quad sys_ni_syscall /* sys_timerfd */
.quad sys_eventfd
.quad sys_recvmmsg
+ .quad sys_fallocate /* 480 */
+ .quad sys_timerfd_create
+ .quad sys_timerfd_settime
+ .quad sys_timerfd_gettime
+ .quad sys_signalfd4
+ .quad sys_eventfd2 /* 485 */
+ .quad sys_epoll_create1
+ .quad sys_dup3
+ .quad sys_pipe2
+ .quad sys_inotify_init1
+ .quad sys_preadv /* 490 */
+ .quad sys_pwritev
+ .quad sys_rt_tgsigqueueinfo
+ .quad sys_perf_event_open
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cf8a99f19dc..233a222752c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -603,6 +603,7 @@ config ARCH_SA1100
select ARCH_SPARSEMEM_ENABLE
select ARCH_MTD_XIP
select ARCH_HAS_CPUFREQ
+ select CPU_FREQ
select GENERIC_GPIO
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
@@ -1359,13 +1360,9 @@ source "drivers/cpufreq/Kconfig"
config CPU_FREQ_SA1100
bool
- depends on CPU_FREQ && (SA1100_H3100 || SA1100_H3600 || SA1100_LART || SA1100_PLEB || SA1100_BADGE4 || SA1100_HACKKIT)
- default y
config CPU_FREQ_SA1110
bool
- depends on CPU_FREQ && (SA1100_ASSABET || SA1100_CERF || SA1100_PT_SYSTEM3)
- default y
config CPU_FREQ_INTEGRATOR
tristate "CPUfreq driver for ARM Integrator CPUs"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index ff54c23d085..5cb9326df7a 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -71,6 +71,14 @@ config DEBUG_LL
in the kernel. This is helpful if you are debugging code that
executes before the console is initialized.
+config EARLY_PRINTK
+ bool "Early printk"
+ depends on DEBUG_LL
+ help
+ Say Y here if you want to have an early console using the
+ kernel low-level debugging functions. Add earlyprintk to your
+ kernel parameters to enable this console.
+
config DEBUG_ICEDCC
bool "Kernel low-level debugging via EmbeddedICE DCC channel"
depends on DEBUG_LL
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index fa0cdab2e1d..e9da08483b3 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -242,15 +242,8 @@ all: $(KBUILD_IMAGE)
boot := arch/arm/boot
-# Update machine arch and proc symlinks if something which affects
-# them changed. We use .arch to indicate when they were updated
-# last, otherwise make uses the target directory mtime.
-
-archprepare: maketools
-
-PHONY += maketools FORCE
-maketools: include/linux/version.h FORCE
- $(Q)$(MAKE) $(build)=arch/arm/tools include/asm-arm/mach-types.h
+archprepare:
+ $(Q)$(MAKE) $(build)=arch/arm/tools include/generated/mach-types.h
# Convert bzImage to zImage
bzImage: zImage
@@ -261,9 +254,6 @@ zImage Image xipImage bootpImage uImage: vmlinux
zinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
-CLEAN_FILES += include/asm-arm/mach-types.h \
- include/asm-arm/arch include/asm-arm/.arch
-
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 5a375e5fef2..bc90364a96c 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
memcpy(ptr, buf->safe, size);
/*
- * DMA buffers must have the same cache properties
- * as if they were really used for DMA - which means
- * data must be written back to RAM. Note that
- * we don't use dmac_flush_range() here for the
- * bidirectional case because we know the cache
- * lines will be coherent with the data written.
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
*/
- dmac_clean_range(ptr, ptr + size);
- outer_clean_range(__pa(ptr), __pa(ptr) + size);
+ __cpuc_flush_kernel_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
}
diff --git a/arch/arm/configs/htcherald_defconfig b/arch/arm/configs/htcherald_defconfig
index 33826767407..1b39691b816 100644
--- a/arch/arm/configs/htcherald_defconfig
+++ b/arch/arm/configs/htcherald_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32-rc6
-# Sat Nov 14 10:56:01 2009
+# Linux kernel version: 2.6.32-rc8
+# Sat Dec 5 12:16:24 2009
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -198,7 +198,9 @@ CONFIG_ARCH_OMAP1=y
# OMAP Feature Selections
#
# CONFIG_OMAP_RESET_CLOCKS is not set
-# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MUX=y
+# CONFIG_OMAP_MUX_DEBUG is not set
+CONFIG_OMAP_MUX_WARNINGS=y
CONFIG_OMAP_MCBSP=y
# CONFIG_OMAP_MBOX_FWK is not set
CONFIG_OMAP_MPU_TIMER=y
@@ -207,6 +209,7 @@ CONFIG_OMAP_LL_DEBUG_UART1=y
# CONFIG_OMAP_LL_DEBUG_UART2 is not set
# CONFIG_OMAP_LL_DEBUG_UART3 is not set
# CONFIG_OMAP_LL_DEBUG_NONE is not set
+CONFIG_OMAP_SERIAL_WAKE=y
# CONFIG_OMAP_PM_NONE is not set
CONFIG_OMAP_PM_NOOP=y
diff --git a/arch/arm/configs/omap3_touchbook_defconfig b/arch/arm/configs/omap3_touchbook_defconfig
new file mode 100644
index 00000000000..7c8515e65c0
--- /dev/null
+++ b/arch/arm/configs/omap3_touchbook_defconfig
@@ -0,0 +1,2431 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc8
+# Fri Dec 4 16:02:17 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_OPROFILE_ARMV7=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_ARCH_BCMRING is not set
+
+#
+# TI OMAP Implementations
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+# CONFIG_ARCH_OMAP4 is not set
+
+#
+# OMAP Feature Selections
+#
+# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set
+# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set
+CONFIG_OMAP_RESET_CLOCKS=y
+# CONFIG_OMAP_MUX is not set
+CONFIG_OMAP_MCBSP=y
+# CONFIG_OMAP_MBOX_FWK is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_LL_DEBUG_UART1 is not set
+# CONFIG_OMAP_LL_DEBUG_UART2 is not set
+CONFIG_OMAP_LL_DEBUG_UART3=y
+# CONFIG_OMAP_LL_DEBUG_NONE is not set
+# CONFIG_OMAP_PM_NONE is not set
+CONFIG_OMAP_PM_NOOP=y
+CONFIG_ARCH_OMAP34XX=y
+CONFIG_ARCH_OMAP3430=y
+
+#
+# OMAP Board Type
+#
+# CONFIG_MACH_OMAP3_BEAGLE is not set
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OVERO is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_OMAP3517EVM is not set
+# CONFIG_MACH_OMAP3_PANDORA is not set
+CONFIG_MACH_OMAP3_TOUCHBOOK=y
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_NOKIA_RX51 is not set
+# CONFIG_MACH_OMAP_ZOOM2 is not set
+# CONFIG_MACH_OMAP_ZOOM3 is not set
+# CONFIG_MACH_CM_T35 is not set
+# CONFIG_MACH_IGEP0020 is not set
+# CONFIG_MACH_OMAP_3630SDP is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_HAS_TLS_REG=y
+CONFIG_ARM_L1_CACHE_SHIFT=6
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_LEDS=y
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=" debug "
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_FREQ is not set
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_HAVE_AOUT=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_VERBOSE is not set
+CONFIG_CAN_PM_TRACE=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_APM_EMULATION is not set
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=m
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_TUNNEL=m
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_TCP_CONG_WESTWOOD=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+# CONFIG_DEFAULT_BIC is not set
+CONFIG_DEFAULT_CUBIC=y
+# CONFIG_DEFAULT_HTCP is not set
+# CONFIG_DEFAULT_VEGAS is not set
+# CONFIG_DEFAULT_WESTWOOD is not set
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+# CONFIG_IPV6_PIMSM_V2 is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_QUEUE=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CT_ACCT=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_GRE=m
+CONFIG_NF_CT_PROTO_SCTP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+# CONFIG_NETFILTER_TPROXY is not set
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_HL=m
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_HL=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+# CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT is not set
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_DEBUG=y
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+
+#
+# IPVS application helper
+#
+CONFIG_IP_VS_FTP=m
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_ADDRTYPE=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NF_NAT_PROTO_DCCP=m
+CONFIG_NF_NAT_PROTO_GRE=m
+CONFIG_NF_NAT_PROTO_UDPLITE=m
+CONFIG_NF_NAT_PROTO_SCTP=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_TFTP=m
+CONFIG_NF_NAT_AMANDA=m
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_QUEUE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+CONFIG_IP_DCCP=m
+CONFIG_INET_DCCP_DIAG=m
+
+#
+# DCCP CCIDs Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP_CCID2_DEBUG is not set
+CONFIG_IP_DCCP_CCID3=y
+# CONFIG_IP_DCCP_CCID3_DEBUG is not set
+CONFIG_IP_DCCP_CCID3_RTO=100
+CONFIG_IP_DCCP_TFRC_LIB=y
+
+#
+# DCCP Kernel Hacking
+#
+# CONFIG_IP_DCCP_DEBUG is not set
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
+CONFIG_TIPC=m
+# CONFIG_TIPC_ADVANCED is not set
+# CONFIG_TIPC_DEBUG is not set
+CONFIG_ATM=m
+CONFIG_ATM_CLIP=m
+# CONFIG_ATM_CLIP_NO_ICMP is not set
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_STP=m
+CONFIG_GARP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+CONFIG_WAN_ROUTER=m
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+# CONFIG_NET_EMATCH is not set
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_CLS_IND=y
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=y
+CONFIG_BT_HCIBPA10X=y
+CONFIG_BT_HCIBFUSB=y
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+CONFIG_AF_RXRPC=m
+# CONFIG_AF_RXRPC_DEBUG is not set
+# CONFIG_RXKAD is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+CONFIG_CFG80211_DEFAULT_PS_VALUE=1
+# CONFIG_CFG80211_DEBUGFS is not set
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=y
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+# CONFIG_MAC80211_RC_MINSTREL is not set
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
+CONFIG_MAC80211_RC_DEFAULT="pid"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_WIMAX=m
+CONFIG_WIMAX_DEBUG_LEVEL=8
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_OMAP2=y
+CONFIG_MTD_NAND_OMAP_PREFETCH=y
+# CONFIG_MTD_NAND_OMAP_PREFETCH_DMA is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+CONFIG_EEPROM_93CX6=y
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+CONFIG_SCSI_ISCSI_ATTRS=m
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_ISCSI_TCP=m
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_RAID6_PQ=m
+# CONFIG_ASYNC_RAID6_TEST is not set
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+# CONFIG_DM_LOG_USERSPACE is not set
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+# CONFIG_DM_MULTIPATH_QL is not set
+# CONFIG_DM_MULTIPATH_ST is not set
+CONFIG_DM_DELAY=m
+# CONFIG_DM_UEVENT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_MACVLAN=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_ETHERNET is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+# CONFIG_LIBERTAS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
+# CONFIG_IWM is not set
+
+#
+# WiMAX Wireless Broadband devices
+#
+# CONFIG_WIMAX_I2400M_USB is not set
+# CONFIG_WIMAX_I2400M_SDIO is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_ATM_DRIVERS is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_PPPOATM is not set
+CONFIG_PPPOL2TP=m
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETPOLL=y
+CONFIG_NETPOLL_TRAP=y
+CONFIG_NET_POLL_CONTROLLER=y
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_TWL4030 is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ADS7846=y
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+CONFIG_GPIO_TWL4030=y
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_BATTERY_DS2760 is not set
+# CONFIG_BATTERY_DS2782 is not set
+CONFIG_BATTERY_BQ27x00=y
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_HWMON=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_OMAP_WATCHDOG=y
+# CONFIG_TWL4030_WATCHDOG is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+CONFIG_TWL4030_CORE=y
+# CONFIG_TWL4030_POWER is not set
+# CONFIG_TWL4030_CODEC is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HRTIMER=m
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_ARM is not set
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_WACOM is not set
+# CONFIG_HID_ZEROPLUS is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_OXU210HP_HCD=y
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SOC=y
+
+#
+# OMAP 343x high speed USB support
+#
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+CONFIG_USB_TMC=m
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+# CONFIG_USB_SERIAL_CP210X is not set
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_FUNSOFT=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MOTOROLA=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_HP4X=m
+CONFIG_USB_SERIAL_SAFE=m
+# CONFIG_USB_SERIAL_SAFE_PADDED is not set
+CONFIG_USB_SERIAL_SIEMENS_MPI=m
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+# CONFIG_USB_SERIAL_SYMBOL is not set
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_OPTICON=m
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+CONFIG_USB_SISUSBVGA=m
+CONFIG_USB_SISUSBVGA_CON=y
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+# CONFIG_USB_ATM is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+CONFIG_USB_ZERO_HNPTEST=y
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_MIDI_GADGET=m
+CONFIG_USB_G_PRINTER=m
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+CONFIG_USB_GPIO_VBUS=y
+# CONFIG_ISP1301_OMAP is not set
+CONFIG_TWL4030_USB=y
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_OMAP is not set
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
+CONFIG_MMC_SPI=m
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_UIO=m
+CONFIG_UIO_PDRV=m
+CONFIG_UIO_PDRV_GENIRQ=m
+# CONFIG_UIO_SMX is not set
+# CONFIG_UIO_SERCOS3 is not set
+
+#
+# TI VLYNQ
+#
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_OTUS is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_INPUT_MIMIO is not set
+# CONFIG_TRANZPORT is not set
+
+#
+# Android
+#
+
+#
+# Qualcomm MSM Camera And Video
+#
+
+#
+# Camera Sensor Selection
+#
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_DST is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_PLAN9AUTH is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_FB_UDL is not set
+
+#
+# RAR Register Driver
+#
+# CONFIG_RAR_REGISTER is not set
+# CONFIG_IIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=m
+CONFIG_EXT4_FS_XATTR=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=m
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=m
+CONFIG_REISERFS_FS=m
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+# CONFIG_REISERFS_FS_POSIX_ACL is not set
+# CONFIG_REISERFS_FS_SECURITY is not set
+CONFIG_JFS_FS=m
+# CONFIG_JFS_POSIX_ACL is not set
+# CONFIG_JFS_SECURITY is not set
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_XFS_FS=m
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+CONFIG_NTFS_RW=y
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+# CONFIG_JFFS2_CMODE_PRIORITY is not set
+# CONFIG_JFFS2_CMODE_SIZE is not set
+CONFIG_JFFS2_CMODE_FAVOURLZO=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_XATTR=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_ACL_SUPPORT=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_UPCALL is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_DFS_UPCALL is not set
+CONFIG_CIFS_EXPERIMENTAL=y
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_XOR_BLOCKS=m
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_XOR=m
+CONFIG_ASYNC_PQ=m
+CONFIG_ASYNC_RAID6_RECOV=m
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_FIPS=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_XCBC=m
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+CONFIG_CRYPTO_GHASH=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/omap_3430sdp_defconfig b/arch/arm/configs/omap_3430sdp_defconfig
index 84829587d55..592457cfbbe 100644
--- a/arch/arm/configs/omap_3430sdp_defconfig
+++ b/arch/arm/configs/omap_3430sdp_defconfig
@@ -963,10 +963,32 @@ CONFIG_FB_CFB_IMAGEBLIT=y
#
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
-CONFIG_FB_OMAP=y
-# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_LCD_VGA is not set
# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
-CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+# CONFIG_OMAP2_DSS_SDI is not set
+# CONFIG_OMAP2_DSS_DSI is not set
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
+CONFIG_FB_OMAP2_NUM_FBS=3
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+CONFIG_PANEL_SHARP_LS037V7DW01=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
diff --git a/arch/arm/configs/omap_4430sdp_defconfig b/arch/arm/configs/omap_4430sdp_defconfig
index a464ca332a2..2319113c86b 100644
--- a/arch/arm/configs/omap_4430sdp_defconfig
+++ b/arch/arm/configs/omap_4430sdp_defconfig
@@ -1,26 +1,29 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc7
-# Tue Jun 9 12:36:23 2009
+# Linux kernel version: 2.6.32
+# Sun Dec 6 23:37:45 2009
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
-CONFIG_MMU=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_LOCKBREAK=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -39,11 +42,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
@@ -52,8 +56,7 @@ CONFIG_FAIR_GROUP_SCHED=y
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-# CONFIG_SYSFS_DEPRECATED=y is not set
-# CONFIG_SYSFS_DEPRECATED_V2=y is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
@@ -70,7 +73,6 @@ CONFIG_UID16=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -83,6 +85,10 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
CONFIG_COMPAT_BRK=y
@@ -90,13 +96,16 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLUB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
# CONFIG_KPROBES is not set
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
@@ -110,7 +119,7 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -131,6 +140,7 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
#
# System Type
#
+CONFIG_MMU=y
# CONFIG_ARCH_AAEC2000 is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_REALVIEW is not set
@@ -142,8 +152,10 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_EP93XX is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
# CONFIG_ARCH_NETX is not set
# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
# CONFIG_ARCH_IOP13XX is not set
# CONFIG_ARCH_IOP32X is not set
# CONFIG_ARCH_IOP33X is not set
@@ -166,10 +178,13 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_SA1100 is not set
# CONFIG_ARCH_S3C2410 is not set
# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
# CONFIG_ARCH_SHARK is not set
# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
# CONFIG_ARCH_DAVINCI is not set
CONFIG_ARCH_OMAP=y
+# CONFIG_ARCH_BCMRING is not set
#
# TI OMAP Implementations
@@ -190,9 +205,12 @@ CONFIG_ARCH_OMAP4=y
CONFIG_OMAP_32K_TIMER=y
CONFIG_OMAP_32K_TIMER_HZ=128
CONFIG_OMAP_DM_TIMER=y
-CONFIG_OMAP_LL_DEBUG_UART1=y
+# CONFIG_OMAP_LL_DEBUG_UART1 is not set
# CONFIG_OMAP_LL_DEBUG_UART2 is not set
-# CONFIG_OMAP_LL_DEBUG_UART3 is not set
+CONFIG_OMAP_LL_DEBUG_UART3=y
+# CONFIG_OMAP_LL_DEBUG_NONE is not set
+# CONFIG_OMAP_PM_NONE is not set
+CONFIG_OMAP_PM_NOOP=y
#
# OMAP Board Type
@@ -207,7 +225,7 @@ CONFIG_CPU_32v6K=y
CONFIG_CPU_V7=y
CONFIG_CPU_32v7=y
CONFIG_CPU_ABRT_EV7=y
-CONFIG_CPU_PABRT_IFAR=y
+CONFIG_CPU_PABRT_V7=y
CONFIG_CPU_CACHE_V7=y
CONFIG_CPU_CACHE_VIPT=y
CONFIG_CPU_COPY_V6=y
@@ -222,9 +240,10 @@ CONFIG_CPU_CP15_MMU=y
# CONFIG_ARM_THUMB is not set
# CONFIG_ARM_THUMBEE is not set
# CONFIG_CPU_ICACHE_DISABLE is not set
-CONFIG_CPU_DCACHE_DISABLE=y
+# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
@@ -245,18 +264,20 @@ CONFIG_ARM_GIC=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
CONFIG_SMP=y
CONFIG_HAVE_ARM_SCU=y
-CONFIG_HAVE_ARM_TWD=y
CONFIG_VMSPLIT_3G=y
# CONFIG_VMSPLIT_2G is not set
# CONFIG_VMSPLIT_1G is not set
CONFIG_PAGE_OFFSET=0xC0000000
CONFIG_NR_CPUS=2
# CONFIG_HOTPLUG_CPU is not set
-CONFIG_LOCAL_TIMERS=y
-# CONFIG_PREEMPT is not set
+# CONFIG_LOCAL_TIMERS is not set
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
+CONFIG_OABI_COMPAT=y
# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
# CONFIG_HIGHMEM is not set
@@ -271,10 +292,13 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-# CONFIG_UNEVICTABLE_LRU is not set
CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_LEDS is not set
CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
#
# Boot options
@@ -298,9 +322,11 @@ CONFIG_CMDLINE="root=/dev/ram0 rw mem=128M console=ttyS0,115200n8 initrd=0x81600
#
# At least one emulation must be selected
#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
CONFIG_VFP=y
CONFIG_VFPv3=y
-# CONFIG_NEON is not set
+CONFIG_NEON=y
#
# Userspace binary formats
@@ -325,6 +351,7 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
@@ -342,6 +369,7 @@ CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=16384
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_MG_DISK is not set
# CONFIG_MISC_DEVICES is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -355,6 +383,7 @@ CONFIG_HAVE_IDE=y
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
+# CONFIG_PHONE is not set
#
# Input device support
@@ -427,6 +456,11 @@ CONFIG_HW_RANDOM=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -447,11 +481,14 @@ CONFIG_GPIOLIB=y
#
# SPI GPIO expanders:
#
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -472,21 +509,8 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_T7L66XB is not set
# CONFIG_MFD_TC6387XB is not set
# CONFIG_MFD_TC6393XB is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-CONFIG_DAB=y
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -511,14 +535,17 @@ CONFIG_DUMMY_CONSOLE=y
# CONFIG_USB_SUPPORT is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
-# CONFIG_ACCESSIBILITY is not set
# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
-# CONFIG_REGULATOR is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
@@ -535,9 +562,12 @@ CONFIG_JBD=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -601,7 +631,6 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
#
# Partition Types
@@ -673,23 +702,24 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
-CONFIG_DETECT_SOFTLOCKUP=y
-# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+# CONFIG_DETECT_SOFTLOCKUP is not set
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
-CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHED_DEBUG is not set
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -708,31 +738,22 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_PAGE_POISONING is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
-# CONFIG_FUNCTION_TRACER is not set
-# CONFIG_IRQSOFF_TRACER is not set
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
-# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
-# CONFIG_STACK_TRACER is not set
-# CONFIG_KMEMTRACE is not set
-# CONFIG_WORKQUEUE_TRACER is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_FTRACE is not set
+# CONFIG_BRANCH_PROFILE_NONE is not set
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@@ -754,7 +775,6 @@ CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD2=y
@@ -796,11 +816,13 @@ CONFIG_CRYPTO_PCBC=m
#
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/arm/configs/omap_zoom2_defconfig b/arch/arm/configs/omap_zoom2_defconfig
index eef93627fb1..4b00a430681 100644
--- a/arch/arm/configs/omap_zoom2_defconfig
+++ b/arch/arm/configs/omap_zoom2_defconfig
@@ -610,7 +610,8 @@ CONFIG_INPUT_EVDEV=y
#
# Input Device Drivers
#
-# CONFIG_INPUT_KEYBOARD is not set
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_TWL4030=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
diff --git a/arch/arm/configs/omap_zoom3_defconfig b/arch/arm/configs/omap_zoom3_defconfig
index f0e7d0f8558..0d7e37a3651 100644
--- a/arch/arm/configs/omap_zoom3_defconfig
+++ b/arch/arm/configs/omap_zoom3_defconfig
@@ -629,7 +629,8 @@ CONFIG_INPUT_EVDEV=y
#
# Input Device Drivers
#
-# CONFIG_INPUT_KEYBOARD is not set
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_TWL4030=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
new file mode 100644
index 00000000000..823b11e7091
--- /dev/null
+++ b/arch/arm/configs/zeus_defconfig
@@ -0,0 +1,2032 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32
+# Tue Dec 8 20:27:05 2009
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_MTD_XIP=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+# CONFIG_TREE_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=13
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_DEADLINE=y
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+CONFIG_ARCH_PXA=y
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_U8500 is not set
+
+#
+# Intel PXA2xx/PXA3xx Implementations
+#
+
+#
+# Intel/Marvell Dev Platforms (sorted by hardware release time)
+#
+# CONFIG_ARCH_LUBBOCK is not set
+# CONFIG_MACH_MAINSTONE is not set
+# CONFIG_MACH_ZYLONITE300 is not set
+# CONFIG_MACH_ZYLONITE320 is not set
+# CONFIG_MACH_LITTLETON is not set
+# CONFIG_MACH_TAVOREVB is not set
+# CONFIG_MACH_SAAR is not set
+
+#
+# Third Party Dev Platforms (sorted by vendor name)
+#
+# CONFIG_ARCH_PXA_IDP is not set
+# CONFIG_ARCH_VIPER is not set
+CONFIG_MACH_ARCOM_ZEUS=y
+# CONFIG_MACH_BALLOON3 is not set
+# CONFIG_MACH_CSB726 is not set
+# CONFIG_MACH_ARMCORE is not set
+# CONFIG_MACH_EM_X270 is not set
+# CONFIG_MACH_EXEDA is not set
+# CONFIG_MACH_CM_X300 is not set
+# CONFIG_ARCH_GUMSTIX is not set
+# CONFIG_MACH_INTELMOTE2 is not set
+# CONFIG_MACH_STARGATE2 is not set
+# CONFIG_MACH_XCEP is not set
+# CONFIG_TRIZEPS_PXA is not set
+CONFIG_ARCOM_PCMCIA=y
+# CONFIG_MACH_LOGICPD_PXA270 is not set
+# CONFIG_MACH_PCM027 is not set
+# CONFIG_MACH_COLIBRI is not set
+# CONFIG_MACH_COLIBRI300 is not set
+# CONFIG_MACH_COLIBRI320 is not set
+
+#
+# End-user Products (sorted by vendor name)
+#
+# CONFIG_MACH_H4700 is not set
+# CONFIG_MACH_H5000 is not set
+# CONFIG_MACH_HIMALAYA is not set
+# CONFIG_MACH_MAGICIAN is not set
+# CONFIG_MACH_MIOA701 is not set
+# CONFIG_PXA_EZX is not set
+# CONFIG_MACH_MP900C is not set
+# CONFIG_ARCH_PXA_PALM is not set
+# CONFIG_PXA_SHARPSL is not set
+# CONFIG_ARCH_PXA_ESERIES is not set
+CONFIG_PXA27x=y
+CONFIG_PXA_SSP=y
+CONFIG_PXA_HAVE_BOARD_IRQS=y
+CONFIG_PXA_HAVE_ISA_IRQS=y
+CONFIG_PLAT_PXA=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_XSCALE=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_DCACHE_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_IWMMXT=y
+CONFIG_XSCALE_PMU=y
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+CONFIG_ISA=y
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_PCCARD=m
+CONFIG_PCMCIA=m
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+# CONFIG_I82365 is not set
+# CONFIG_TCIC is not set
+CONFIG_PCMCIA_SOC_COMMON=m
+CONFIG_PCMCIA_PXA2XX=m
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA_PROBE=y
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=31:02 rootfstype=jffs2 ro console=ttyS0,115200"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=m
+CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_APM_EMULATION=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+# CONFIG_BT_SCO is not set
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+# CONFIG_BT_BNEP_MC_FILTER is not set
+# CONFIG_BT_BNEP_PROTO_FILTER is not set
+# CONFIG_BT_HIDP is not set
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTUSB is not set
+# CONFIG_BT_HCIBTSDIO is not set
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+# CONFIG_BT_HCIUART_LL is not set
+# CONFIG_BT_HCIBCM203X is not set
+# CONFIG_BT_HCIBPA10X is not set
+# CONFIG_BT_HCIBFUSB is not set
+# CONFIG_BT_HCIDTL1 is not set
+# CONFIG_BT_HCIBT3C is not set
+# CONFIG_BT_HCIBLUECARD is not set
+# CONFIG_BT_HCIBTUART is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+CONFIG_MTD_REDBOOT_PARTS_READONLY=y
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+# CONFIG_MTD_CFI_I2 is not set
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_OTP is not set
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+CONFIG_MTD_PXA2XX=y
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_GPIO_ADDR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+# CONFIG_PNP is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_AHA152X is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_IN2000 is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DTC3280 is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GENERIC_NCR5380 is not set
+# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
+# CONFIG_SCSI_NCR53C406A is not set
+# CONFIG_SCSI_PAS16 is not set
+# CONFIG_SCSI_QLOGIC_FAS is not set
+# CONFIG_SCSI_SYM53C416 is not set
+# CONFIG_SCSI_T128 is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=m
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
+# CONFIG_SATA_PMP is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_MV is not set
+# CONFIG_PATA_LEGACY is not set
+CONFIG_PATA_PCMCIA=m
+# CONFIG_PATA_QDI is not set
+# CONFIG_PATA_WINBOND_VLB is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_SMC91X is not set
+CONFIG_DM9000=y
+CONFIG_DM9000_DEBUGLEVEL=4
+# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_DNET is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+CONFIG_WLAN=y
+# CONFIG_PCMCIA_RAYCS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_ATMEL is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_AIRO_CS is not set
+# CONFIG_PCMCIA_WL3501 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_IWM is not set
+# CONFIG_LIBERTAS is not set
+CONFIG_HERMES=m
+CONFIG_HERMES_CACHE_FW_ON_INIT=y
+CONFIG_PCMCIA_HERMES=m
+# CONFIG_PCMCIA_SPECTRUM is not set
+# CONFIG_P54_COMMON is not set
+CONFIG_RT2X00=m
+# CONFIG_RT2500USB is not set
+CONFIG_RT73USB=m
+# CONFIG_RT2800USB is not set
+CONFIG_RT2X00_LIB_USB=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+# CONFIG_WL12XX is not set
+# CONFIG_ZD1211RW is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_NET_PCMCIA=y
+# CONFIG_PCMCIA_3C589 is not set
+# CONFIG_PCMCIA_3C574 is not set
+# CONFIG_PCMCIA_FMVJ18X is not set
+# CONFIG_PCMCIA_PCNET is not set
+# CONFIG_PCMCIA_NMCLAN is not set
+# CONFIG_PCMCIA_SMC91C92 is not set
+# CONFIG_PCMCIA_XIRC2PS is not set
+# CONFIG_PCMCIA_AXNET is not set
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+# CONFIG_PPP_MPPE is not set
+# CONFIG_PPPOE is not set
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+CONFIG_TOUCHSCREEN_FUJITSU=m
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+CONFIG_TOUCHSCREEN_ELO=m
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+CONFIG_TOUCHSCREEN_MTOUCH=m
+CONFIG_TOUCHSCREEN_INEXIO=m
+# CONFIG_TOUCHSCREEN_MK712 is not set
+CONFIG_TOUCHSCREEN_HTCPEN=m
+CONFIG_TOUCHSCREEN_PENMOUNT=m
+CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
+CONFIG_TOUCHSCREEN_TOUCHWIN=m
+# CONFIG_TOUCHSCREEN_WM97XX is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+CONFIG_TOUCHSCREEN_TOUCHIT213=m
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=m
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_CS is not set
+CONFIG_SERIAL_8250_NR_UARTS=7
+CONFIG_SERIAL_8250_RUNTIME_UARTS=7
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_PXA is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=m
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_IPWIRELESS is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_PXA=y
+# CONFIG_I2C_PXA_SLAVE is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_ELEKTOR is not set
+# CONFIG_I2C_PCA_ISA is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_PXA2XX=y
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+CONFIG_GPIO_PCA953X=y
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+CONFIG_SENSORS_LM75=m
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_SA1100_WATCHDOG is not set
+
+#
+# ISA-based Watchdog Cards
+#
+# CONFIG_PCWATCHDOG is not set
+# CONFIG_MIXCOMWD is not set
+# CONFIG_WDT is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=m
+CONFIG_FB_CFB_COPYAREA=m
+CONFIG_FB_CFB_IMAGEBLIT=m
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_PXA=m
+# CONFIG_FB_PXA_OVERLAY is not set
+# CONFIG_FB_PXA_SMARTPANEL is not set
+CONFIG_FB_PXA_PARAMETERS=y
+# CONFIG_FB_MBX is not set
+# CONFIG_FB_W100 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=m
+# CONFIG_LCD_LMS283GF05 is not set
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+# CONFIG_LCD_PLATFORM is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=m
+CONFIG_BACKLIGHT_GENERIC=m
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=m
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_AC97_POWER_SAVE is not set
+CONFIG_SND_ARM=y
+CONFIG_SND_PXA2XX_PCM=m
+CONFIG_SND_PXA2XX_LIB=m
+CONFIG_SND_PXA2XX_LIB_AC97=y
+CONFIG_SND_PXA2XX_AC97=m
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_PCMCIA is not set
+CONFIG_SND_SOC=m
+CONFIG_SND_PXA2XX_SOC=m
+CONFIG_SND_SOC_I2C_AND_SPI=m
+# CONFIG_SND_SOC_ALL_CODECS is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=m
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_EZUSB is not set
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+CONFIG_USB_SERIAL_MCT_U232=m
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=m
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+CONFIG_USB_G_PRINTER=m
+# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_PXA=y
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=m
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+CONFIG_RTC_DRV_ISL1208=m
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_SA1100 is not set
+CONFIG_RTC_DRV_PXA=m
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=m
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+# CONFIG_DEBUG_USER is not set
+CONFIG_DEBUG_ERRORS=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=m
+CONFIG_CRYPTO_ALGAPI2=m
+CONFIG_CRYPTO_AEAD2=m
+CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=m
+CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=m
+CONFIG_CRYPTO_RNG2=m
+CONFIG_CRYPTO_PCOMP=m
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=m
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=m
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=m
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/include/asm/asm-offsets.h b/arch/arm/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/arm/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 73eceb87e58..730aefcfbee 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -211,7 +211,7 @@ struct cpu_cache_fns {
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long);
- void (*flush_kern_dcache_page)(void *);
+ void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_inv_range)(const void *, const void *);
void (*dma_clean_range)(const void *, const void *);
@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
-#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
+#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
/*
* These are private to the dma-mapping API. Do not use directly.
@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
-#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
+#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
-extern void __cpuc_flush_dcache_page(void *);
+extern void __cpuc_flush_dcache_area(void *, size_t);
/*
* These are private to the dma-mapping API. Do not use directly.
@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{
/* highmem pages are always flushed upon kunmap already */
if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
#define flush_dcache_mmap_lock(mapping) \
@@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
*/
#define flush_icache_page(vma,page) do { } while (0)
-static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
- unsigned offset, size_t size)
-{
- const void *start = (void __force *)virt + offset;
- dmac_inv_range(start, start + size);
-}
-
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 6aac3f5bb2f..a399bb5730f 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -101,7 +101,6 @@ extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define ELF_CORE_COPY_TASK_REGS dump_task_regs
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/arm/include/asm/mach-types.h b/arch/arm/include/asm/mach-types.h
new file mode 100644
index 00000000000..948178cc6ba
--- /dev/null
+++ b/arch/arm/include/asm/mach-types.h
@@ -0,0 +1 @@
+#include <generated/mach-types.h>
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index acac5302e4e..8920b2d6e3b 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *);
*/
#define do_bad_IRQ(irq,desc) \
do { \
- spin_lock(&desc->lock); \
+ raw_spin_lock(&desc->lock); \
handle_bad_irq(irq, desc); \
- spin_unlock(&desc->lock); \
+ raw_spin_unlock(&desc->lock); \
} while(0)
#endif
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
index 8eebf89f5ab..41f99c573b9 100644
--- a/arch/arm/include/asm/mman.h
+++ b/arch/arm/include/asm/mman.h
@@ -1 +1,4 @@
#include <asm-generic/mman.h>
+
+#define arch_mmap_check(addr, len, flags) \
+ (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0)
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index c13681ac1ed..c91c64cab92 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -17,13 +17,13 @@
* Locked value: 1
*/
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_is_locked(x) ((x)->lock != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
}
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_mb();
@@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* just write zero since the lock is exclusively held.
*/
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
smp_mb();
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;
@@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
@@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
/*
* Read locks are a bit more hairy:
@@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
smp_mb();
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "cc");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;
@@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
}
/* read_can_lock - would read_trylock() succeed? */
-#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index 43e83f6d2ee..d14d197ae04 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e7ccf7e697c..dd00f747e2a 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -54,5 +54,6 @@ endif
head-y := head$(MMUEXT).o
obj-$(CONFIG_DEBUG_LL) += debug.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
extra-y := $(head-y) init_task.o vmlinux.lds
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 0e627705f74..8214bfebfac 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -48,27 +48,7 @@ extern void __aeabi_uidivmod(void);
extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
-extern void fp_enter(void);
-/*
- * This has a special calling convention; it doesn't
- * modify any of the usual registers, except for LR.
- */
-#define EXPORT_CRC_ALIAS(sym) __CRC_SYMBOL(sym, "")
-
-#define EXPORT_SYMBOL_ALIAS(sym,orig) \
- EXPORT_CRC_ALIAS(sym) \
- static const struct kernel_symbol __ksymtab_##sym \
- __used __attribute__((section("__ksymtab"))) = \
- { (unsigned long)&orig, #sym };
-
-/*
- * floating point math emulator support.
- * These symbols will never change their calling convention...
- */
-EXPORT_SYMBOL_ALIAS(kern_fp_enter,fp_enter);
-EXPORT_SYMBOL_ALIAS(fp_printk,printk);
-EXPORT_SYMBOL_ALIAS(fp_send_sig,send_sig);
EXPORT_SYMBOL(__backtrace);
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index f58c1156e77..9314a2d681f 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -172,7 +172,7 @@
/* 160 */ CALL(sys_sched_get_priority_min)
CALL(sys_sched_rr_get_interval)
CALL(sys_nanosleep)
- CALL(sys_arm_mremap)
+ CALL(sys_mremap)
CALL(sys_setresuid16)
/* 165 */ CALL(sys_getresuid16)
CALL(sys_ni_syscall) /* vm86 */
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
new file mode 100644
index 00000000000..85aa2b29269
--- /dev/null
+++ b/arch/arm/kernel/early_printk.c
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/arm/kernel/early_printk.c
+ *
+ * Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+
+extern void printch(int);
+
+static void early_write(const char *s, unsigned n)
+{
+ while (n-- > 0) {
+ if (*s == '\n')
+ printch('\r');
+ printch(*s);
+ s++;
+ }
+}
+
+static void early_console_write(struct console *con, const char *s, unsigned n)
+{
+ early_write(s, n);
+}
+
+static struct console early_console = {
+ .name = "earlycon",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+asmlinkage void early_printk(const char *fmt, ...)
+{
+ char buf[512];
+ int n;
+ va_list ap;
+
+ va_start(ap, fmt);
+ n = vscnprintf(buf, sizeof(buf), fmt, ap);
+ early_write(buf, n);
+ va_end(ap);
+}
+
+static int __init setup_early_printk(char *buf)
+{
+ register_console(&early_console);
+ return 0;
+}
+
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f0fe95b7085..2c1db77d784 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -416,12 +416,12 @@ sys_mmap2:
tst r5, #PGOFF_MASK
moveq r5, r5, lsr #PAGE_SHIFT - 12
streq r5, [sp, #4]
- beq do_mmap2
+ beq sys_mmap_pgoff
mov r0, #-EINVAL
mov pc, lr
#else
str r5, [sp, #4]
- b do_mmap2
+ b sys_mmap_pgoff
#endif
ENDPROC(sys_mmap2)
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index c9a8619f385..b7cb45bb91e 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
#ifdef CONFIG_FIQ
show_fiq_list(p, v);
@@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
}
desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (iflags & IRQF_VALID)
desc->status &= ~IRQ_NOREQUEST;
@@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
desc->status &= ~IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
desc->status &= ~IRQ_NOAUTOEN;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init init_IRQ(void)
@@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
{
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of(cpu));
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index a73a34dccf2..ea02a7b1c24 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -160,6 +160,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
/* Make sure our local interrupt controller has this enabled */
local_irq_save(flags);
+ irq_to_desc(clk->irq)->status |= IRQ_NOPROBE;
get_irq_chip(clk->irq)->unmask(clk->irq);
local_irq_restore(flags);
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 78ecaac6520..ae4027bd01b 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -28,41 +28,6 @@
#include <linux/ipc.h>
#include <linux/uaccess.h>
-extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
- unsigned long new_len, unsigned long flags,
- unsigned long new_addr);
-
-/* common code for old and new mmaps */
-inline long do_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EINVAL;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS)
- goto out;
-
- error = -EBADF;
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
struct mmap_arg_struct {
unsigned long addr;
unsigned long len;
@@ -84,29 +49,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
if (a.offset & ~PAGE_MASK)
goto out;
- error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
out:
return error;
}
-asmlinkage unsigned long
-sys_arm_mremap(unsigned long addr, unsigned long old_len,
- unsigned long new_len, unsigned long flags,
- unsigned long new_addr)
-{
- unsigned long ret = -EINVAL;
-
- if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS)
- goto out;
-
- down_write(&current->mm->mmap_sem);
- ret = do_mremap(addr, old_len, new_len, flags, new_addr);
- up_write(&current->mm->mmap_sem);
-
-out:
- return ret;
-}
-
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls.
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 71151bd87a3..4957e13ef55 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -65,11 +65,11 @@ SECTIONS
__init_end = .;
#endif
- /DISCARD/ : { /* Exit code and data */
- EXIT_TEXT
- EXIT_DATA
- *(.exitcall.exit)
- *(.discard)
+ /*
+ * unwind exit sections must be discarded before the rest of the
+ * unwind sections get included.
+ */
+ /DISCARD/ : {
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
#ifndef CONFIG_HOTPLUG_CPU
@@ -238,6 +238,9 @@ SECTIONS
STABS_DEBUG
.comment 0 : { *(.comment) }
+
+ /* Default discards */
+ DISCARDS
}
/*
diff --git a/arch/arm/mach-at91/include/mach/atmel-mci.h b/arch/arm/mach-at91/include/mach/atmel-mci.h
new file mode 100644
index 00000000000..998cb0c0713
--- /dev/null
+++ b/arch/arm/mach-at91/include/mach/atmel-mci.h
@@ -0,0 +1,24 @@
+#ifndef __MACH_ATMEL_MCI_H
+#define __MACH_ATMEL_MCI_H
+
+#include <mach/at_hdmac.h>
+
+/**
+ * struct mci_dma_data - DMA data for MCI interface
+ */
+struct mci_dma_data {
+ struct at_dma_slave sdata;
+};
+
+/* accessor macros */
+#define slave_data_ptr(s) (&(s)->sdata)
+#define find_slave_dev(s) ((s)->sdata.dma_dev)
+
+#define setup_dma_addr(s, t, r) do { \
+ if (s) { \
+ (s)->sdata.tx_reg = (t); \
+ (s)->sdata.rx_reg = (r); \
+ } \
+} while (0)
+
+#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index fbe6fa02c88..53dd2a9eecf 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -70,9 +70,19 @@ static struct ctl_table bcmring_sysctl_reboot[] = {
{}
};
+static struct resource nand_resource[] = {
+ [0] = {
+ .start = MM_ADDR_IO_NAND,
+ .end = MM_ADDR_IO_NAND + 0x1000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
static struct platform_device nand_device = {
.name = "bcm-nand",
.id = -1,
+ .resource = nand_resource,
+ .num_resources = ARRAY_SIZE(nand_resource),
};
static struct platform_device *devices[] __initdata = {
diff --git a/arch/arm/mach-bcmring/include/mach/reg_nand.h b/arch/arm/mach-bcmring/include/mach/reg_nand.h
new file mode 100644
index 00000000000..387376ffb56
--- /dev/null
+++ b/arch/arm/mach-bcmring/include/mach/reg_nand.h
@@ -0,0 +1,66 @@
+/*****************************************************************************
+* Copyright 2001 - 2008 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+/*
+*
+*****************************************************************************
+*
+* REG_NAND.h
+*
+* PURPOSE:
+*
+* This file contains definitions for the nand registers:
+*
+* NOTES:
+*
+*****************************************************************************/
+
+#if !defined(__ASM_ARCH_REG_NAND_H)
+#define __ASM_ARCH_REG_NAND_H
+
+/* ---- Include Files ---------------------------------------------------- */
+#include <csp/reg.h>
+#include <mach/reg_umi.h>
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+#define HW_NAND_BASE MM_IO_BASE_NAND /* NAND Flash */
+
+/* DMA accesses by the bootstrap need hard nonvirtual addresses */
+#define REG_NAND_CMD __REG16(HW_NAND_BASE + 0)
+#define REG_NAND_ADDR __REG16(HW_NAND_BASE + 4)
+
+#define REG_NAND_PHYS_DATA16 (HW_NAND_BASE + 8)
+#define REG_NAND_PHYS_DATA8 (HW_NAND_BASE + 8)
+#define REG_NAND_DATA16 __REG16(REG_NAND_PHYS_DATA16)
+#define REG_NAND_DATA8 __REG8(REG_NAND_PHYS_DATA8)
+
+/* use appropriate offset to make sure it start at the 1K boundary */
+#define REG_NAND_PHYS_DATA_DMA (HW_NAND_BASE + 0x400)
+#define REG_NAND_DATA_DMA __REG32(REG_NAND_PHYS_DATA_DMA)
+
+/* Linux DMA requires physical address of the data register */
+#define REG_NAND_DATA16_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA16)
+#define REG_NAND_DATA8_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA8)
+#define REG_NAND_DATA_PADDR HW_IO_VIRT_TO_PHYS(REG_NAND_PHYS_DATA_DMA)
+
+#define NAND_BUS_16BIT() (0)
+#define NAND_BUS_8BIT() (!NAND_BUS_16BIT())
+
+/* Register offsets */
+#define REG_NAND_CMD_OFFSET (0)
+#define REG_NAND_ADDR_OFFSET (4)
+#define REG_NAND_DATA8_OFFSET (8)
+
+#endif
diff --git a/arch/arm/mach-bcmring/include/mach/reg_umi.h b/arch/arm/mach-bcmring/include/mach/reg_umi.h
new file mode 100644
index 00000000000..06a355481ea
--- /dev/null
+++ b/arch/arm/mach-bcmring/include/mach/reg_umi.h
@@ -0,0 +1,237 @@
+/*****************************************************************************
+* Copyright 2005 - 2008 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+/*
+*
+*****************************************************************************
+*
+* REG_UMI.h
+*
+* PURPOSE:
+*
+* This file contains definitions for the nand registers:
+*
+* NOTES:
+*
+*****************************************************************************/
+
+#if !defined(__ASM_ARCH_REG_UMI_H)
+#define __ASM_ARCH_REG_UMI_H
+
+/* ---- Include Files ---------------------------------------------------- */
+#include <csp/reg.h>
+#include <mach/csp/mm_io.h>
+
+/* ---- Constants and Types ---------------------------------------------- */
+
+/* Unified Memory Interface Ctrl Register */
+#define HW_UMI_BASE MM_IO_BASE_UMI
+
+/* Flash bank 0 timing and control register */
+#define REG_UMI_FLASH0_TCR __REG32(HW_UMI_BASE + 0x00)
+/* Flash bank 1 timing and control register */
+#define REG_UMI_FLASH1_TCR __REG32(HW_UMI_BASE + 0x04)
+/* Flash bank 2 timing and control register */
+#define REG_UMI_FLASH2_TCR __REG32(HW_UMI_BASE + 0x08)
+/* MMD interface and control register */
+#define REG_UMI_MMD_ICR __REG32(HW_UMI_BASE + 0x0c)
+/* NAND timing and control register */
+#define REG_UMI_NAND_TCR __REG32(HW_UMI_BASE + 0x18)
+/* NAND ready/chip select register */
+#define REG_UMI_NAND_RCSR __REG32(HW_UMI_BASE + 0x1c)
+/* NAND ECC control & status register */
+#define REG_UMI_NAND_ECC_CSR __REG32(HW_UMI_BASE + 0x20)
+/* NAND ECC data register XXB2B1B0 */
+#define REG_UMI_NAND_ECC_DATA __REG32(HW_UMI_BASE + 0x24)
+/* BCH ECC Parameter N */
+#define REG_UMI_BCH_N __REG32(HW_UMI_BASE + 0x40)
+/* BCH ECC Parameter T */
+#define REG_UMI_BCH_K __REG32(HW_UMI_BASE + 0x44)
+/* BCH ECC Parameter K */
+#define REG_UMI_BCH_T __REG32(HW_UMI_BASE + 0x48)
+/* BCH ECC Contro Status */
+#define REG_UMI_BCH_CTRL_STATUS __REG32(HW_UMI_BASE + 0x4C)
+/* BCH WR ECC 31:0 */
+#define REG_UMI_BCH_WR_ECC_0 __REG32(HW_UMI_BASE + 0x50)
+/* BCH WR ECC 63:32 */
+#define REG_UMI_BCH_WR_ECC_1 __REG32(HW_UMI_BASE + 0x54)
+/* BCH WR ECC 95:64 */
+#define REG_UMI_BCH_WR_ECC_2 __REG32(HW_UMI_BASE + 0x58)
+/* BCH WR ECC 127:96 */
+#define REG_UMI_BCH_WR_ECC_3 __REG32(HW_UMI_BASE + 0x5c)
+/* BCH WR ECC 155:128 */
+#define REG_UMI_BCH_WR_ECC_4 __REG32(HW_UMI_BASE + 0x60)
+/* BCH Read Error Location 1,0 */
+#define REG_UMI_BCH_RD_ERR_LOC_1_0 __REG32(HW_UMI_BASE + 0x64)
+/* BCH Read Error Location 3,2 */
+#define REG_UMI_BCH_RD_ERR_LOC_3_2 __REG32(HW_UMI_BASE + 0x68)
+/* BCH Read Error Location 5,4 */
+#define REG_UMI_BCH_RD_ERR_LOC_5_4 __REG32(HW_UMI_BASE + 0x6c)
+/* BCH Read Error Location 7,6 */
+#define REG_UMI_BCH_RD_ERR_LOC_7_6 __REG32(HW_UMI_BASE + 0x70)
+/* BCH Read Error Location 9,8 */
+#define REG_UMI_BCH_RD_ERR_LOC_9_8 __REG32(HW_UMI_BASE + 0x74)
+/* BCH Read Error Location 11,10 */
+#define REG_UMI_BCH_RD_ERR_LOC_B_A __REG32(HW_UMI_BASE + 0x78)
+
+/* REG_UMI_FLASH0/1/2_TCR, REG_UMI_SRAM0/1_TCR bits */
+/* Enable wait pin during burst write or read */
+#define REG_UMI_TCR_WAITEN 0x80000000
+/* Enable mem ctrlr to work iwth ext mem of lower freq than AHB clk */
+#define REG_UMI_TCR_LOWFREQ 0x40000000
+/* 1=synch write, 0=async write */
+#define REG_UMI_TCR_MEMTYPE_SYNCWRITE 0x20000000
+/* 1=synch read, 0=async read */
+#define REG_UMI_TCR_MEMTYPE_SYNCREAD 0x10000000
+/* 1=page mode read, 0=normal mode read */
+#define REG_UMI_TCR_MEMTYPE_PAGEREAD 0x08000000
+/* page size/burst size (wrap only) */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_MASK 0x07000000
+/* 4 word */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_4 0x00000000
+/* 8 word */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_8 0x01000000
+/* 16 word */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_16 0x02000000
+/* 32 word */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_32 0x03000000
+/* 64 word */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_64 0x04000000
+/* 128 word */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_128 0x05000000
+/* 256 word */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_256 0x06000000
+/* 512 word */
+#define REG_UMI_TCR_MEMTYPE_PGSZ_512 0x07000000
+/* Page read access cycle / Burst write latency (n+2 / n+1) */
+#define REG_UMI_TCR_TPRC_TWLC_MASK 0x00f80000
+/* Bus turnaround cycle (n) */
+#define REG_UMI_TCR_TBTA_MASK 0x00070000
+/* Write pulse width cycle (n+1) */
+#define REG_UMI_TCR_TWP_MASK 0x0000f800
+/* Write recovery cycle (n+1) */
+#define REG_UMI_TCR_TWR_MASK 0x00000600
+/* Write address setup cycle (n+1) */
+#define REG_UMI_TCR_TAS_MASK 0x00000180
+/* Output enable delay cycle (n) */
+#define REG_UMI_TCR_TOE_MASK 0x00000060
+/* Read access cycle / Burst read latency (n+2 / n+1) */
+#define REG_UMI_TCR_TRC_TLC_MASK 0x0000001f
+
+/* REG_UMI_MMD_ICR bits */
+/* Flash write protection pin control */
+#define REG_UMI_MMD_ICR_FLASH_WP 0x8000
+/* Extend hold time for sram0, sram1 csn (39 MHz operation) */
+#define REG_UMI_MMD_ICR_XHCS 0x4000
+/* Enable SDRAM 2 interface control */
+#define REG_UMI_MMD_ICR_SDRAM2EN 0x2000
+/* Enable merge of flash banks 0/1 to 512 MBit bank */
+#define REG_UMI_MMD_ICR_INST512 0x1000
+/* Enable merge of flash banks 1/2 to 512 MBit bank */
+#define REG_UMI_MMD_ICR_DATA512 0x0800
+/* Enable SDRAM interface control */
+#define REG_UMI_MMD_ICR_SDRAMEN 0x0400
+/* Polarity of busy state of Burst Wait Signal */
+#define REG_UMI_MMD_ICR_WAITPOL 0x0200
+/* Enable burst clock stopped when not accessing external burst flash/sram */
+#define REG_UMI_MMD_ICR_BCLKSTOP 0x0100
+/* Enable the peri1_csn to replace flash1_csn in 512 Mb flash mode */
+#define REG_UMI_MMD_ICR_PERI1EN 0x0080
+/* Enable the peri2_csn to replace sdram_csn */
+#define REG_UMI_MMD_ICR_PERI2EN 0x0040
+/* Enable the peri3_csn to replace sdram2_csn */
+#define REG_UMI_MMD_ICR_PERI3EN 0x0020
+/* Enable sram bank1 for H/W controlled MRS */
+#define REG_UMI_MMD_ICR_MRSB1 0x0010
+/* Enable sram bank0 for H/W controlled MRS */
+#define REG_UMI_MMD_ICR_MRSB0 0x0008
+/* Polarity for assert3ed state of H/W controlled MRS */
+#define REG_UMI_MMD_ICR_MRSPOL 0x0004
+/* 0: S/W controllable ZZ/MRS/CRE/P-Mode pin */
+/* 1: H/W controlled ZZ/MRS/CRE/P-Mode, same timing as CS */
+#define REG_UMI_MMD_ICR_MRSMODE 0x0002
+/* MRS state for S/W controlled mode */
+#define REG_UMI_MMD_ICR_MRSSTATE 0x0001
+
+/* REG_UMI_NAND_TCR bits */
+/* Enable software to control CS */
+#define REG_UMI_NAND_TCR_CS_SWCTRL 0x80000000
+/* 16-bit nand wordsize if set */
+#define REG_UMI_NAND_TCR_WORD16 0x40000000
+/* Bus turnaround cycle (n) */
+#define REG_UMI_NAND_TCR_TBTA_MASK 0x00070000
+/* Write pulse width cycle (n+1) */
+#define REG_UMI_NAND_TCR_TWP_MASK 0x0000f800
+/* Write recovery cycle (n+1) */
+#define REG_UMI_NAND_TCR_TWR_MASK 0x00000600
+/* Write address setup cycle (n+1) */
+#define REG_UMI_NAND_TCR_TAS_MASK 0x00000180
+/* Output enable delay cycle (n) */
+#define REG_UMI_NAND_TCR_TOE_MASK 0x00000060
+/* Read access cycle (n+2) */
+#define REG_UMI_NAND_TCR_TRC_TLC_MASK 0x0000001f
+
+/* REG_UMI_NAND_RCSR bits */
+/* Status: Ready=1, Busy=0 */
+#define REG_UMI_NAND_RCSR_RDY 0x02
+/* Keep CS asserted during operation */
+#define REG_UMI_NAND_RCSR_CS_ASSERTED 0x01
+
+/* REG_UMI_NAND_ECC_CSR bits */
+/* Interrupt status - read-only */
+#define REG_UMI_NAND_ECC_CSR_NANDINT 0x80000000
+/* Read: Status of ECC done, Write: clear ECC interrupt */
+#define REG_UMI_NAND_ECC_CSR_ECCINT_RAW 0x00800000
+/* Read: Status of R/B, Write: clear R/B interrupt */
+#define REG_UMI_NAND_ECC_CSR_RBINT_RAW 0x00400000
+/* 1 = Enable ECC Interrupt */
+#define REG_UMI_NAND_ECC_CSR_ECCINT_ENABLE 0x00008000
+/* 1 = Assert interrupt at rising edge of R/B_ */
+#define REG_UMI_NAND_ECC_CSR_RBINT_ENABLE 0x00004000
+/* Calculate ECC by 0=512 bytes, 1=256 bytes */
+#define REG_UMI_NAND_ECC_CSR_256BYTE 0x00000080
+/* Enable ECC in hardware */
+#define REG_UMI_NAND_ECC_CSR_ECC_ENABLE 0x00000001
+
+/* REG_UMI_BCH_CTRL_STATUS bits */
+/* Shift to Indicate Number of correctable errors detected */
+#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR_SHIFT 20
+/* Indicate Number of correctable errors detected */
+#define REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR 0x00F00000
+/* Indicate Errors detected during read but uncorrectable */
+#define REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR 0x00080000
+/* Indicate Errors detected during read and are correctable */
+#define REG_UMI_BCH_CTRL_STATUS_CORR_ERR 0x00040000
+/* Flag indicates BCH's ECC status of read process are valid */
+#define REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID 0x00020000
+/* Flag indicates BCH's ECC status of write process are valid */
+#define REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID 0x00010000
+/* Pause ECC calculation */
+#define REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC 0x00000010
+/* Enable Interrupt */
+#define REG_UMI_BCH_CTRL_STATUS_INT_EN 0x00000004
+/* Enable ECC during read */
+#define REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN 0x00000002
+/* Enable ECC during write */
+#define REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN 0x00000001
+/* Mask for location */
+#define REG_UMI_BCH_ERR_LOC_MASK 0x00001FFF
+/* location within a byte */
+#define REG_UMI_BCH_ERR_LOC_BYTE 0x00000007
+/* location within a word */
+#define REG_UMI_BCH_ERR_LOC_WORD 0x00000018
+/* location within a page (512 byte) */
+#define REG_UMI_BCH_ERR_LOC_PAGE 0x00001FE0
+#define REG_UMI_BCH_ERR_LOC_ADDR(index) (__REG32(HW_UMI_BASE + 0x64 + (index / 2)*4) >> ((index % 2) * 16))
+#endif
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h
index e522b20bcbc..f70d52be48a 100644
--- a/arch/arm/mach-clps711x/include/mach/memory.h
+++ b/arch/arm/mach-clps711x/include/mach/memory.h
@@ -30,6 +30,8 @@
#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
+#define __pfn_to_bus(x) (__pfn_to_phys(x) - PHYS_OFFSET)
+#define __bus_to_pfn(x) __phys_to_pfn((x) + PHYS_OFFSET)
#endif
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 62b98bffc15..07de8db1458 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -339,6 +339,15 @@ static struct davinci_mmc_config da850_mmc_config = {
.version = MMC_CTLR_VERSION_2,
};
+static void da850_panel_power_ctrl(int val)
+{
+ /* lcd backlight */
+ gpio_set_value(DA850_LCD_BL_PIN, val);
+
+ /* lcd power */
+ gpio_set_value(DA850_LCD_PWR_PIN, val);
+}
+
static int da850_lcd_hw_init(void)
{
int status;
@@ -356,17 +365,11 @@ static int da850_lcd_hw_init(void)
gpio_direction_output(DA850_LCD_BL_PIN, 0);
gpio_direction_output(DA850_LCD_PWR_PIN, 0);
- /* disable lcd backlight */
- gpio_set_value(DA850_LCD_BL_PIN, 0);
-
- /* disable lcd power */
- gpio_set_value(DA850_LCD_PWR_PIN, 0);
-
- /* enable lcd power */
- gpio_set_value(DA850_LCD_PWR_PIN, 1);
+ /* Switch off panel power and backlight */
+ da850_panel_power_ctrl(0);
- /* enable lcd backlight */
- gpio_set_value(DA850_LCD_BL_PIN, 1);
+ /* Switch on panel power and backlight */
+ da850_panel_power_ctrl(1);
return 0;
}
@@ -674,6 +677,7 @@ static __init void da850_evm_init(void)
pr_warning("da850_evm_init: lcd initialization failed: %d\n",
ret);
+ sharp_lk043t1dg01_pdata.panel_power_ctrl = da850_panel_power_ctrl,
ret = da8xx_register_lcdc(&sharp_lk043t1dg01_pdata);
if (ret)
pr_warning("da850_evm_init: lcdc registration failed: %d\n",
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index b520c4b5678..b2ad8090bd1 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -79,6 +79,10 @@ struct davinci_nand_pdata { /* platform_data */
/* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
unsigned options;
+
+ /* Main and mirror bbt descriptor overrides */
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
};
#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
index 83f31cd0a27..62d17421e48 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_keypad.h
@@ -5,9 +5,6 @@
#ifndef __ASM_ARCH_EP93XX_KEYPAD_H
#define __ASM_ARCH_EP93XX_KEYPAD_H
-#define MAX_MATRIX_KEY_ROWS (8)
-#define MAX_MATRIX_KEY_COLS (8)
-
/* flags for the ep93xx_keypad driver */
#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
@@ -18,8 +15,6 @@
/**
* struct ep93xx_keypad_platform_data - platform specific device structure
- * @matrix_key_rows: number of rows in the keypad matrix
- * @matrix_key_cols: number of columns in the keypad matrix
* @matrix_key_map: array of keycodes defining the keypad matrix
* @matrix_key_map_size: ARRAY_SIZE(matrix_key_map)
* @debounce: debounce start count; terminal count is 0xff
@@ -27,8 +22,6 @@
* @flags: see above
*/
struct ep93xx_keypad_platform_data {
- unsigned int matrix_key_rows;
- unsigned int matrix_key_cols;
unsigned int *matrix_key_map;
int matrix_key_map_size;
unsigned int debounce;
@@ -36,7 +29,7 @@ struct ep93xx_keypad_platform_data {
unsigned int flags;
};
-/* macro for creating the matrix_key_map table */
-#define KEY(row, col, val) (((row) << 28) | ((col) << 24) | (val))
+#define EP93XX_MATRIX_ROWS (8)
+#define EP93XX_MATRIX_COLS (8)
#endif /* __ASM_ARCH_EP93XX_KEYPAD_H */
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index b97f529e58e..41febc796b1 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -201,6 +201,11 @@ void __init footbridge_map_io(void)
#ifdef CONFIG_FOOTBRIDGE_ADDIN
+static inline unsigned long fb_bus_sdram_offset(void)
+{
+ return *CSR_PCISDRAMBASE & 0xfffffff0;
+}
+
/*
* These two functions convert virtual addresses to PCI addresses and PCI
* addresses to virtual addresses. Note that it is only legal to use these
@@ -210,14 +215,13 @@ unsigned long __virt_to_bus(unsigned long res)
{
WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory);
- return (res - PAGE_OFFSET) + (*CSR_PCISDRAMBASE & 0xfffffff0);
+ return res + (fb_bus_sdram_offset() - PAGE_OFFSET);
}
EXPORT_SYMBOL(__virt_to_bus);
unsigned long __bus_to_virt(unsigned long res)
{
- res -= (*CSR_PCISDRAMBASE & 0xfffffff0);
- res += PAGE_OFFSET;
+ res = res - (fb_bus_sdram_offset() - PAGE_OFFSET);
WARN_ON(res < PAGE_OFFSET || res >= (unsigned long)high_memory);
@@ -225,4 +229,16 @@ unsigned long __bus_to_virt(unsigned long res)
}
EXPORT_SYMBOL(__bus_to_virt);
+unsigned long __pfn_to_bus(unsigned long pfn)
+{
+ return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET));
+}
+EXPORT_SYMBOL(__pfn_to_bus);
+
+unsigned long __bus_to_pfn(unsigned long bus)
+{
+ return __phys_to_pfn(bus - (fb_bus_sdram_offset() - PHYS_OFFSET));
+}
+EXPORT_SYMBOL(__bus_to_pfn);
+
#endif
diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h
index cb16e59d87b..8d64f457408 100644
--- a/arch/arm/mach-footbridge/include/mach/memory.h
+++ b/arch/arm/mach-footbridge/include/mach/memory.h
@@ -29,6 +29,8 @@
#ifndef __ASSEMBLY__
extern unsigned long __virt_to_bus(unsigned long);
extern unsigned long __bus_to_virt(unsigned long);
+extern unsigned long __pfn_to_bus(unsigned long);
+extern unsigned long __bus_to_pfn(unsigned long);
#endif
#define __virt_to_bus __virt_to_bus
#define __bus_to_virt __bus_to_virt
@@ -36,14 +38,15 @@ extern unsigned long __bus_to_virt(unsigned long);
#elif defined(CONFIG_FOOTBRIDGE_HOST)
/*
- * The footbridge is programmed to expose the system RAM at the corresponding
- * address. So, if PAGE_OFFSET is 0xc0000000, RAM appears at 0xe0000000.
- * If 0x80000000, then its exposed at 0xa0000000 on the bus. etc.
- * The only requirement is that the RAM isn't placed at bus address 0 which
+ * The footbridge is programmed to expose the system RAM at 0xe0000000.
+ * The requirement is that the RAM isn't placed at bus address 0, which
* would clash with VGA cards.
*/
-#define __virt_to_bus(x) ((x) - 0xe0000000)
-#define __bus_to_virt(x) ((x) + 0xe0000000)
+#define BUS_OFFSET 0xe0000000
+#define __virt_to_bus(x) ((x) + (BUS_OFFSET - PAGE_OFFSET))
+#define __bus_to_virt(x) ((x) - (BUS_OFFSET - PAGE_OFFSET))
+#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET))
+#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET))
#else
diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h
index 4891828454f..991f24d2c11 100644
--- a/arch/arm/mach-integrator/include/mach/memory.h
+++ b/arch/arm/mach-integrator/include/mach/memory.h
@@ -28,6 +28,7 @@
#define BUS_OFFSET UL(0x80000000)
#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET)
#define __bus_to_virt(x) ((x) - BUS_OFFSET + PAGE_OFFSET)
-#define __pfn_to_bus(x) (((x) << PAGE_SHIFT) + BUS_OFFSET)
+#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET))
+#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET))
#endif
diff --git a/arch/arm/mach-ixp2000/include/mach/memory.h b/arch/arm/mach-ixp2000/include/mach/memory.h
index aee7eb8a71b..98e3471be15 100644
--- a/arch/arm/mach-ixp2000/include/mach/memory.h
+++ b/arch/arm/mach-ixp2000/include/mach/memory.h
@@ -17,11 +17,15 @@
#include <mach/ixp2000-regs.h>
-#define __virt_to_bus(v) \
- (((__virt_to_phys(v) - 0x0) + (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)))
+#define IXP2000_PCI_SDRAM_OFFSET (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)
-#define __bus_to_virt(b) \
- __phys_to_virt((((b - (*IXP2000_PCI_SDRAM_BAR & 0xfffffff0)) + 0x0)))
+#define __phys_to_bus(x) ((x) + (IXP2000_PCI_SDRAM_OFFSET - PHYS_OFFSET))
+#define __bus_to_phys(x) ((x) - (IXP2000_PCI_SDRAM_OFFSET - PHYS_OFFSET))
+
+#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
+#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
+#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
+#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
#endif
diff --git a/arch/arm/mach-ixp23xx/include/mach/memory.h b/arch/arm/mach-ixp23xx/include/mach/memory.h
index fdd138706c7..94a3a86cfeb 100644
--- a/arch/arm/mach-ixp23xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp23xx/include/mach/memory.h
@@ -19,16 +19,15 @@
*/
#define PHYS_OFFSET (0x00000000)
-#define __virt_to_bus(v) \
- ({ unsigned int ret; \
- ret = ((__virt_to_phys(v) - 0x00000000) + \
- (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0)); \
- ret; })
-
-#define __bus_to_virt(b) \
- ({ unsigned int data; \
- data = *((volatile int *)IXP23XX_PCI_SDRAM_BAR); \
- __phys_to_virt((((b - (data & 0xfffffff0)) + 0x00000000))); })
+#define IXP23XX_PCI_SDRAM_OFFSET (*((volatile int *)IXP23XX_PCI_SDRAM_BAR) & 0xfffffff0))
+
+#define __phys_to_bus(x) ((x) + (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET))
+#define __bus_to_phys(x) ((x) - (IXP23XX_PCI_SDRAM_OFFSET - PHYS_OFFSET))
+
+#define __virt_to_bus(v) __phys_to_bus(__virt_to_phys(v))
+#define __bus_to_virt(b) __phys_to_virt(__bus_to_phys(b))
+#define __pfn_to_bus(p) __phys_to_bus(__pfn_to_phys(p))
+#define __bus_to_pfn(b) __phys_to_pfn(__bus_to_phys(b))
#define arch_is_coherent() 1
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 264f4d59f89..9e5070da17a 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -179,21 +179,21 @@ config IXP4XX_INDIRECT_PCI
help
IXP4xx provides two methods of accessing PCI memory space:
- 1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
+ 1) A direct mapped window from 0x48000000 to 0x4BFFFFFF (64MB).
To access PCI via this space, we simply ioremap() the BAR
into the kernel and we can use the standard read[bwl]/write[bwl]
macros. This is the preferred method due to speed but it
- limits the system to just 64MB of PCI memory. This can be
+ limits the system to just 64MB of PCI memory. This can be
problematic if using video cards and other memory-heavy devices.
-
- 2) If > 64MB of memory space is required, the IXP4xx can be
- configured to use indirect registers to access PCI This allows
- for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus.
- The disadvantage of this is that every PCI access requires
- three local register accesses plus a spinlock, but in some
- cases the performance hit is acceptable. In addition, you cannot
- mmap() PCI devices in this case due to the indirect nature
- of the PCI window.
+
+ 2) If > 64MB of memory space is required, the IXP4xx can be
+ configured to use indirect registers to access the whole PCI
+ memory space. This currently allows for up to 1 GB (0x10000000
+ to 0x4FFFFFFF) of memory on the bus. The disadvantage of this
+ is that every PCI access requires three local register accesses
+ plus a spinlock, but in some cases the performance hit is
+ acceptable. In addition, you cannot mmap() PCI devices in this
+ case due to the indirect nature of the PCI window.
By default, the direct method is used. Choose this option if you
need to use the indirect method instead. If you don't know
diff --git a/arch/arm/mach-ixp4xx/avila-pci.c b/arch/arm/mach-ixp4xx/avila-pci.c
index 08d65dcdb5f..845e1b50054 100644
--- a/arch/arm/mach-ixp4xx/avila-pci.c
+++ b/arch/arm/mach-ixp4xx/avila-pci.c
@@ -22,40 +22,45 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/delay.h>
-
#include <asm/mach/pci.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
+#define AVILA_MAX_DEV 4
+#define LOFT_MAX_DEV 6
+#define IRQ_LINES 4
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define INTA 11
+#define INTB 10
+#define INTC 9
+#define INTD 8
+
void __init avila_pci_preinit(void)
{
- set_irq_type(IRQ_AVILA_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_AVILA_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_AVILA_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_AVILA_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
-
+ set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init avila_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
- static int pci_irq_table[AVILA_PCI_IRQ_LINES] = {
- IRQ_AVILA_PCI_INTA,
- IRQ_AVILA_PCI_INTB,
- IRQ_AVILA_PCI_INTC,
- IRQ_AVILA_PCI_INTD
+ static int pci_irq_table[IRQ_LINES] = {
+ IXP4XX_GPIO_IRQ(INTA),
+ IXP4XX_GPIO_IRQ(INTB),
+ IXP4XX_GPIO_IRQ(INTC),
+ IXP4XX_GPIO_IRQ(INTD)
};
- int irq = -1;
-
if (slot >= 1 &&
- slot <= (machine_is_loft() ? LOFT_PCI_MAX_DEV : AVILA_PCI_MAX_DEV) &&
- pin >= 1 && pin <= AVILA_PCI_IRQ_LINES) {
- irq = pci_irq_table[(slot + pin - 2) % 4];
- }
+ slot <= (machine_is_loft() ? LOFT_MAX_DEV : AVILA_MAX_DEV) &&
+ pin >= 1 && pin <= IRQ_LINES)
+ return pci_irq_table[(slot + pin - 2) % 4];
- return irq;
+ return -1;
}
struct hw_pci avila_pci __initdata = {
@@ -75,4 +80,3 @@ int __init avila_pci_init(void)
}
subsys_initcall(avila_pci_init);
-
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 797995ce18b..6e558a76457 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -19,7 +19,6 @@
#include <linux/serial_8250.h>
#include <linux/slab.h>
#include <linux/i2c-gpio.h>
-
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
@@ -29,6 +28,9 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#define AVILA_SDA_PIN 7
+#define AVILA_SCL_PIN 6
+
static struct flash_platform_data avila_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 70afcfe5b88..c4a01594c76 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -481,11 +481,7 @@ int ixp4xx_setup(int nr, struct pci_sys_data *sys)
res[1].name = "PCI Memory Space";
res[1].start = PCIBIOS_MIN_MEM;
-#ifndef CONFIG_IXP4XX_INDIRECT_PCI
- res[1].end = 0x4bffffff;
-#else
- res[1].end = 0x4fffffff;
-#endif
+ res[1].end = PCIBIOS_MAX_MEM;
res[1].flags = IORESOURCE_MEM;
request_resource(&ioport_resource, &res[0]);
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index cfd52fb341c..3bbf40f6d96 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -117,7 +117,7 @@ int gpio_to_irq(int gpio)
}
EXPORT_SYMBOL(gpio_to_irq);
-int irq_to_gpio(int irq)
+int irq_to_gpio(unsigned int irq)
{
int gpio = (irq < 32) ? irq2gpio[irq] : -EINVAL;
diff --git a/arch/arm/mach-ixp4xx/coyote-pci.c b/arch/arm/mach-ixp4xx/coyote-pci.c
index efddf01ed17..b978ea8bd6f 100644
--- a/arch/arm/mach-ixp4xx/coyote-pci.c
+++ b/arch/arm/mach-ixp4xx/coyote-pci.c
@@ -18,27 +18,31 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
-
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
-
#include <asm/mach/pci.h>
+#define SLOT0_DEVID 14
+#define SLOT1_DEVID 15
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define SLOT0_INTA 6
+#define SLOT1_INTA 11
+
void __init coyote_pci_preinit(void)
{
- set_irq_type(IRQ_COYOTE_PCI_SLOT0, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_COYOTE_PCI_SLOT1, IRQ_TYPE_LEVEL_LOW);
-
+ set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init coyote_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
- if (slot == COYOTE_PCI_SLOT0_DEVID)
- return IRQ_COYOTE_PCI_SLOT0;
- else if (slot == COYOTE_PCI_SLOT1_DEVID)
- return IRQ_COYOTE_PCI_SLOT1;
+ if (slot == SLOT0_DEVID)
+ return IXP4XX_GPIO_IRQ(SLOT0_INTA);
+ else if (slot == SLOT1_DEVID)
+ return IXP4XX_GPIO_IRQ(SLOT1_INTA);
else return -1;
}
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index aab1954e274..25bf5ad770e 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -25,6 +25,15 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#define COYOTE_IDE_BASE_PHYS IXP4XX_EXP_BUS_BASE(3)
+#define COYOTE_IDE_BASE_VIRT 0xFFFE1000
+#define COYOTE_IDE_REGION_SIZE 0x1000
+
+#define COYOTE_IDE_DATA_PORT 0xFFFE10E0
+#define COYOTE_IDE_CTRL_PORT 0xFFFE10FC
+#define COYOTE_IDE_ERROR_PORT 0xFFFE10E2
+#define IRQ_COYOTE_IDE IRQ_IXP4XX_GPIO5
+
static struct flash_platform_data coyote_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-ixp4xx/dsmg600-pci.c b/arch/arm/mach-ixp4xx/dsmg600-pci.c
index 926d15f885f..fa70fed462b 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-pci.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-pci.c
@@ -19,39 +19,45 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
-
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#define MAX_DEV 4
+#define IRQ_LINES 3
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define INTA 11
+#define INTB 10
+#define INTC 9
+#define INTD 8
+#define INTE 7
+#define INTF 6
+
void __init dsmg600_pci_preinit(void)
{
- set_irq_type(IRQ_DSMG600_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_DSMG600_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_DSMG600_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_DSMG600_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_DSMG600_PCI_INTE, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_DSMG600_PCI_INTF, IRQ_TYPE_LEVEL_LOW);
-
+ set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTF), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init dsmg600_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
- static int pci_irq_table[DSMG600_PCI_MAX_DEV][DSMG600_PCI_IRQ_LINES] =
- {
- { IRQ_DSMG600_PCI_INTE, -1, -1 },
- { IRQ_DSMG600_PCI_INTA, -1, -1 },
- { IRQ_DSMG600_PCI_INTB, IRQ_DSMG600_PCI_INTC, IRQ_DSMG600_PCI_INTD },
- { IRQ_DSMG600_PCI_INTF, -1, -1 },
+ static int pci_irq_table[MAX_DEV][IRQ_LINES] = {
+ { IXP4XX_GPIO_IRQ(INTE), -1, -1 },
+ { IXP4XX_GPIO_IRQ(INTA), -1, -1 },
+ { IXP4XX_GPIO_IRQ(INTB), IXP4XX_GPIO_IRQ(INTC),
+ IXP4XX_GPIO_IRQ(INTD) },
+ { IXP4XX_GPIO_IRQ(INTF), -1, -1 },
};
- int irq = -1;
-
- if (slot >= 1 && slot <= DSMG600_PCI_MAX_DEV &&
- pin >= 1 && pin <= DSMG600_PCI_IRQ_LINES)
- irq = pci_irq_table[slot-1][pin-1];
+ if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES)
+ return pci_irq_table[slot - 1][pin - 1];
- return irq;
+ return -1;
}
struct hw_pci __initdata dsmg600_pci = {
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index a51bfa6978b..7c1fa54a614 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -33,6 +33,23 @@
#include <asm/mach/time.h>
#include <asm/gpio.h>
+#define DSMG600_SDA_PIN 5
+#define DSMG600_SCL_PIN 4
+
+/* DSM-G600 Timer Setting */
+#define DSMG600_FREQ 66000000
+
+/* Buttons */
+#define DSMG600_PB_GPIO 15 /* power button */
+#define DSMG600_RB_GPIO 3 /* reset button */
+
+/* Power control */
+#define DSMG600_PO_GPIO 2 /* power off */
+
+/* LEDs */
+#define DSMG600_LED_PWR_GPIO 0
+#define DSMG600_LED_WLAN_GPIO 14
+
static struct flash_platform_data dsmg600_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-ixp4xx/fsg-pci.c b/arch/arm/mach-ixp4xx/fsg-pci.c
index ca12a9ca083..5a810c93062 100644
--- a/arch/arm/mach-ixp4xx/fsg-pci.c
+++ b/arch/arm/mach-ixp4xx/fsg-pci.c
@@ -19,33 +19,38 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
-
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#define MAX_DEV 3
+#define IRQ_LINES 3
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define INTA 6
+#define INTB 7
+#define INTC 5
+
void __init fsg_pci_preinit(void)
{
- set_irq_type(IRQ_FSG_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_FSG_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_FSG_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
-
+ set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init fsg_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
- static int pci_irq_table[FSG_PCI_IRQ_LINES] = {
- IRQ_FSG_PCI_INTC,
- IRQ_FSG_PCI_INTB,
- IRQ_FSG_PCI_INTA,
+ static int pci_irq_table[IRQ_LINES] = {
+ IXP4XX_GPIO_IRQ(INTC),
+ IXP4XX_GPIO_IRQ(INTB),
+ IXP4XX_GPIO_IRQ(INTA),
};
int irq = -1;
- slot = slot - 11;
+ slot -= 11;
- if (slot >= 1 && slot <= FSG_PCI_MAX_DEV &&
- pin >= 1 && pin <= FSG_PCI_IRQ_LINES)
- irq = pci_irq_table[(slot - 1)];
+ if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES)
+ irq = pci_irq_table[slot - 1];
printk(KERN_INFO "%s: Mapped slot %d pin %d to IRQ %d\n",
__func__, slot, pin, irq);
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 5add22fc989..e7f4befba42 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -24,12 +24,18 @@
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/io.h>
-
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/gpio.h>
+#define FSG_SDA_PIN 12
+#define FSG_SCL_PIN 13
+
+#define FSG_SB_GPIO 4 /* sync button */
+#define FSG_RB_GPIO 9 /* reset button */
+#define FSG_UB_GPIO 10 /* usb button */
+
static struct flash_platform_data fsg_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index a733b8ff3ce..1c28048209c 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -17,29 +17,28 @@
#include <asm/mach/flash.h>
#include <asm/mach/pci.h>
-#define xgpio_irq(n) (IRQ_IXP4XX_GPIO ## n)
-#define gpio_irq(n) xgpio_irq(n)
-
#define SLOT_ETHA 0x0B /* IDSEL = AD21 */
#define SLOT_ETHB 0x0C /* IDSEL = AD20 */
#define SLOT_MPCI 0x0D /* IDSEL = AD19 */
#define SLOT_NEC 0x0E /* IDSEL = AD18 */
-#define IRQ_ETHA IRQ_IXP4XX_GPIO4
-#define IRQ_ETHB IRQ_IXP4XX_GPIO5
-#define IRQ_NEC IRQ_IXP4XX_GPIO3
-#define IRQ_MPCI IRQ_IXP4XX_GPIO12
-
/* GPIO lines */
#define GPIO_SCL 0
#define GPIO_SDA 1
#define GPIO_STR 2
+#define GPIO_IRQ_NEC 3
+#define GPIO_IRQ_ETHA 4
+#define GPIO_IRQ_ETHB 5
#define GPIO_HSS0_DCD_N 6
#define GPIO_HSS1_DCD_N 7
+#define GPIO_UART0_DCD 8
+#define GPIO_UART1_DCD 9
#define GPIO_HSS0_CTS_N 10
#define GPIO_HSS1_CTS_N 11
+#define GPIO_IRQ_MPCI 12
#define GPIO_HSS1_RTS_N 13
#define GPIO_HSS0_RTS_N 14
+/* GPIO15 is not connected */
/* Control outputs from 74HC4094 */
#define CONTROL_HSS0_CLK_INT 0
@@ -152,7 +151,7 @@ static int hss_set_clock(int port, unsigned int clock_type)
static irqreturn_t hss_dcd_irq(int irq, void *pdev)
{
- int i, port = (irq == gpio_irq(GPIO_HSS1_DCD_N));
+ int i, port = (irq == IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N));
gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i);
set_carrier_cb_tab[port](pdev, !i);
return IRQ_HANDLED;
@@ -165,9 +164,9 @@ static int hss_open(int port, void *pdev,
int i, irq;
if (!port)
- irq = gpio_irq(GPIO_HSS0_DCD_N);
+ irq = IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N);
else
- irq = gpio_irq(GPIO_HSS1_DCD_N);
+ irq = IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N);
gpio_line_get(port ? GPIO_HSS1_DCD_N : GPIO_HSS0_DCD_N, &i);
set_carrier_cb(pdev, !i);
@@ -188,8 +187,8 @@ static int hss_open(int port, void *pdev,
static void hss_close(int port, void *pdev)
{
- free_irq(port ? gpio_irq(GPIO_HSS1_DCD_N) : gpio_irq(GPIO_HSS0_DCD_N),
- pdev);
+ free_irq(port ? IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N) :
+ IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), pdev);
set_carrier_cb_tab[!!port] = NULL; /* catch bugs */
set_control(port ? CONTROL_HSS1_DTR_N : CONTROL_HSS0_DTR_N, 1);
@@ -421,8 +420,8 @@ static void __init gmlr_init(void)
gpio_line_config(GPIO_HSS1_RTS_N, IXP4XX_GPIO_OUT);
gpio_line_config(GPIO_HSS0_DCD_N, IXP4XX_GPIO_IN);
gpio_line_config(GPIO_HSS1_DCD_N, IXP4XX_GPIO_IN);
- set_irq_type(gpio_irq(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH);
- set_irq_type(gpio_irq(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH);
+ set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS0_DCD_N), IRQ_TYPE_EDGE_BOTH);
+ set_irq_type(IXP4XX_GPIO_IRQ(GPIO_HSS1_DCD_N), IRQ_TYPE_EDGE_BOTH);
set_control(CONTROL_HSS0_DTR_N, 1);
set_control(CONTROL_HSS1_DTR_N, 1);
@@ -442,10 +441,10 @@ static void __init gmlr_init(void)
#ifdef CONFIG_PCI
static void __init gmlr_pci_preinit(void)
{
- set_irq_type(IRQ_ETHA, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_ETHB, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_NEC, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_MPCI, IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
@@ -466,10 +465,10 @@ static void __init gmlr_pci_postinit(void)
static int __init gmlr_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
switch(slot) {
- case SLOT_ETHA: return IRQ_ETHA;
- case SLOT_ETHB: return IRQ_ETHB;
- case SLOT_NEC: return IRQ_NEC;
- default: return IRQ_MPCI;
+ case SLOT_ETHA: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHA);
+ case SLOT_ETHB: return IXP4XX_GPIO_IRQ(GPIO_IRQ_ETHB);
+ case SLOT_NEC: return IXP4XX_GPIO_IRQ(GPIO_IRQ_NEC);
+ default: return IXP4XX_GPIO_IRQ(GPIO_IRQ_MPCI);
}
}
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-pci.c b/arch/arm/mach-ixp4xx/gtwx5715-pci.c
index 7b8a2c32384..25d2c333c20 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-pci.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-pci.c
@@ -26,14 +26,16 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/irq.h>
-
#include <asm/mach-types.h>
#include <mach/hardware.h>
-#include <mach/gtwx5715.h>
#include <asm/mach/pci.h>
+#define SLOT0_DEVID 0
+#define SLOT1_DEVID 1
+#define INTA 10 /* slot 1 has INTA and INTB crossed */
+#define INTB 11
+
/*
- * The exact GPIO pins and IRQs are defined in arch-ixp4xx/gtwx5715.h
* Slot 0 isn't actually populated with a card connector but
* we initialize it anyway in case a future version has the
* slot populated or someone with good soldering skills has
@@ -41,32 +43,26 @@
*/
void __init gtwx5715_pci_preinit(void)
{
- set_irq_type(GTWX5715_PCI_SLOT0_INTA_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(GTWX5715_PCI_SLOT0_INTB_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(GTWX5715_PCI_SLOT1_INTA_IRQ, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(GTWX5715_PCI_SLOT1_INTB_IRQ, IRQ_TYPE_LEVEL_LOW);
-
+ set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init gtwx5715_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
- int rc;
- static int gtwx5715_irqmap
- [GTWX5715_PCI_SLOT_COUNT]
- [GTWX5715_PCI_INT_PIN_COUNT] = {
- {GTWX5715_PCI_SLOT0_INTA_IRQ, GTWX5715_PCI_SLOT0_INTB_IRQ},
- {GTWX5715_PCI_SLOT1_INTA_IRQ, GTWX5715_PCI_SLOT1_INTB_IRQ},
-};
+ int rc = -1;
- if (slot >= GTWX5715_PCI_SLOT_COUNT ||
- pin >= GTWX5715_PCI_INT_PIN_COUNT) rc = -1;
- else
- rc = gtwx5715_irqmap[slot][pin-1];
+ if ((slot == SLOT0_DEVID && pin == 1) ||
+ (slot == SLOT1_DEVID && pin == 2))
+ rc = IXP4XX_GPIO_IRQ(INTA);
+ else if ((slot == SLOT0_DEVID && pin == 2) ||
+ (slot == SLOT1_DEVID && pin == 1))
+ rc = IXP4XX_GPIO_IRQ(INTB);
- printk("%s: Mapped slot %d pin %d to IRQ %d\n", __func__, slot, pin, rc);
- return(rc);
+ printk(KERN_INFO "%s: Mapped slot %d pin %d to IRQ %d\n",
+ __func__, slot, pin, rc);
+ return rc;
}
struct hw_pci gtwx5715_pci __initdata = {
@@ -81,9 +77,7 @@ struct hw_pci gtwx5715_pci __initdata = {
int __init gtwx5715_pci_init(void)
{
if (machine_is_gtwx5715())
- {
pci_common_init(&gtwx5715_pci);
- }
return 0;
}
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index 25c21d6665e..0bc7185cb6f 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -28,7 +28,6 @@
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <linux/slab.h>
-
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
@@ -37,7 +36,34 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
-#include <mach/gtwx5715.h>
+
+/* GPIO 5,6,7 and 12 are hard wired to the Kendin KS8995M Switch
+ and operate as an SPI type interface. The details of the interface
+ are available on Kendin/Micrel's web site. */
+
+#define GTWX5715_KSSPI_SELECT 5
+#define GTWX5715_KSSPI_TXD 6
+#define GTWX5715_KSSPI_CLOCK 7
+#define GTWX5715_KSSPI_RXD 12
+
+/* The "reset" button is wired to GPIO 3.
+ The GPIO is brought "low" when the button is pushed. */
+
+#define GTWX5715_BUTTON_GPIO 3
+
+/* Board Label Front Label
+ LED1 Power
+ LED2 Wireless-G
+ LED3 not populated but could be
+ LED4 Internet
+ LED5 - LED8 Controlled by KS8995M Switch
+ LED9 DMZ */
+
+#define GTWX5715_LED1_GPIO 2
+#define GTWX5715_LED2_GPIO 9
+#define GTWX5715_LED3_GPIO 8
+#define GTWX5715_LED4_GPIO 1
+#define GTWX5715_LED9_GPIO 4
/*
* Xscale UART registers are 32 bits wide with only the least
diff --git a/arch/arm/mach-ixp4xx/include/mach/avila.h b/arch/arm/mach-ixp4xx/include/mach/avila.h
deleted file mode 100644
index 1640cb61972..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/avila.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/avila.h
- *
- * Gateworks Avila platform specific definitions
- *
- * Author: Michael-Luke Jones <mlj28@cam.ac.uk>
- *
- * Based on ixdp425.h
- * Author: Deepak Saxena <dsaxena@plexity.net>
- *
- * Copyright 2004 (c) MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-
-#define AVILA_SDA_PIN 7
-#define AVILA_SCL_PIN 6
-
-/*
- * AVILA PCI IRQs
- */
-#define AVILA_PCI_MAX_DEV 4
-#define LOFT_PCI_MAX_DEV 6
-#define AVILA_PCI_IRQ_LINES 4
-
-
-/* PCI controller GPIO to IRQ pin mappings */
-#define AVILA_PCI_INTA_PIN 11
-#define AVILA_PCI_INTB_PIN 10
-#define AVILA_PCI_INTC_PIN 9
-#define AVILA_PCI_INTD_PIN 8
-
-
diff --git a/arch/arm/mach-ixp4xx/include/mach/coyote.h b/arch/arm/mach-ixp4xx/include/mach/coyote.h
deleted file mode 100644
index 717ac6d16f5..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/coyote.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/coyote.h
- *
- * ADI Engineering platform specific definitions
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- *
- * Copyright 2004 (c) MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-
-/* PCI controller GPIO to IRQ pin mappings */
-#define COYOTE_PCI_SLOT0_PIN 6
-#define COYOTE_PCI_SLOT1_PIN 11
-
-#define COYOTE_PCI_SLOT0_DEVID 14
-#define COYOTE_PCI_SLOT1_DEVID 15
-
-#define COYOTE_IDE_BASE_PHYS IXP4XX_EXP_BUS_BASE(3)
-#define COYOTE_IDE_BASE_VIRT 0xFFFE1000
-#define COYOTE_IDE_REGION_SIZE 0x1000
-
-#define COYOTE_IDE_DATA_PORT 0xFFFE10E0
-#define COYOTE_IDE_CTRL_PORT 0xFFFE10FC
-#define COYOTE_IDE_ERROR_PORT 0xFFFE10E2
-
diff --git a/arch/arm/mach-ixp4xx/include/mach/dsmg600.h b/arch/arm/mach-ixp4xx/include/mach/dsmg600.h
deleted file mode 100644
index dc087a34a26..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/dsmg600.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * DSM-G600 platform specific definitions
- *
- * Copyright (C) 2006 Tower Technologies
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- *
- * based on ixdp425.h:
- * Copyright 2004 (C) MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-
-#define DSMG600_SDA_PIN 5
-#define DSMG600_SCL_PIN 4
-
-/*
- * DSMG600 PCI IRQs
- */
-#define DSMG600_PCI_MAX_DEV 4
-#define DSMG600_PCI_IRQ_LINES 3
-
-
-/* PCI controller GPIO to IRQ pin mappings */
-#define DSMG600_PCI_INTA_PIN 11
-#define DSMG600_PCI_INTB_PIN 10
-#define DSMG600_PCI_INTC_PIN 9
-#define DSMG600_PCI_INTD_PIN 8
-#define DSMG600_PCI_INTE_PIN 7
-#define DSMG600_PCI_INTF_PIN 6
-
-/* DSM-G600 Timer Setting */
-#define DSMG600_FREQ 66000000
-
-/* Buttons */
-
-#define DSMG600_PB_GPIO 15 /* power button */
-#define DSMG600_RB_GPIO 3 /* reset button */
-
-/* Power control */
-
-#define DSMG600_PO_GPIO 2 /* power off */
-
-/* LEDs */
-
-#define DSMG600_LED_PWR_GPIO 0
-#define DSMG600_LED_WLAN_GPIO 14
diff --git a/arch/arm/mach-ixp4xx/include/mach/fsg.h b/arch/arm/mach-ixp4xx/include/mach/fsg.h
deleted file mode 100644
index 1f02b7e22a1..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/fsg.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/fsg.h
- *
- * Freecom FSG-3 platform specific definitions
- *
- * Author: Rod Whitby <rod@whitby.id.au>
- * Author: Tomasz Chmielewski <mangoo@wpkg.org>
- * Maintainers: http://www.nslu2-linux.org
- *
- * Based on coyote.h by
- * Copyright 2004 (c) MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-
-#define FSG_SDA_PIN 12
-#define FSG_SCL_PIN 13
-
-/*
- * FSG PCI IRQs
- */
-#define FSG_PCI_MAX_DEV 3
-#define FSG_PCI_IRQ_LINES 3
-
-
-/* PCI controller GPIO to IRQ pin mappings */
-#define FSG_PCI_INTA_PIN 6
-#define FSG_PCI_INTB_PIN 7
-#define FSG_PCI_INTC_PIN 5
-
-/* Buttons */
-
-#define FSG_SB_GPIO 4 /* sync button */
-#define FSG_RB_GPIO 9 /* reset button */
-#define FSG_UB_GPIO 10 /* usb button */
-
-/* LEDs */
-
-#define FSG_LED_WLAN_BIT 0
-#define FSG_LED_WAN_BIT 1
-#define FSG_LED_SATA_BIT 2
-#define FSG_LED_USB_BIT 4
-#define FSG_LED_RING_BIT 5
-#define FSG_LED_SYNC_BIT 7
diff --git a/arch/arm/mach-ixp4xx/include/mach/gpio.h b/arch/arm/mach-ixp4xx/include/mach/gpio.h
index cd5aec26c07..a5f87ded2f2 100644
--- a/arch/arm/mach-ixp4xx/include/mach/gpio.h
+++ b/arch/arm/mach-ixp4xx/include/mach/gpio.h
@@ -70,7 +70,7 @@ static inline void gpio_set_value(unsigned gpio, int value)
#include <asm-generic/gpio.h> /* cansleep wrappers */
extern int gpio_to_irq(int gpio);
-extern int irq_to_gpio(int gpio);
+extern int irq_to_gpio(unsigned int irq);
#endif
diff --git a/arch/arm/mach-ixp4xx/include/mach/gtwx5715.h b/arch/arm/mach-ixp4xx/include/mach/gtwx5715.h
deleted file mode 100644
index 5d5e201cac7..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/gtwx5715.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/gtwx5715.h
- *
- * Gemtek GTWX5715 Gateway (Linksys WRV54G)
- *
- * Copyright 2004 (c) George T. Joseph
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-#include "irqs.h"
-
-#define GTWX5715_GPIO0 0
-#define GTWX5715_GPIO1 1
-#define GTWX5715_GPIO2 2
-#define GTWX5715_GPIO3 3
-#define GTWX5715_GPIO4 4
-#define GTWX5715_GPIO5 5
-#define GTWX5715_GPIO6 6
-#define GTWX5715_GPIO7 7
-#define GTWX5715_GPIO8 8
-#define GTWX5715_GPIO9 9
-#define GTWX5715_GPIO10 10
-#define GTWX5715_GPIO11 11
-#define GTWX5715_GPIO12 12
-#define GTWX5715_GPIO13 13
-#define GTWX5715_GPIO14 14
-
-#define GTWX5715_GPIO0_IRQ IRQ_IXP4XX_GPIO0
-#define GTWX5715_GPIO1_IRQ IRQ_IXP4XX_GPIO1
-#define GTWX5715_GPIO2_IRQ IRQ_IXP4XX_GPIO2
-#define GTWX5715_GPIO3_IRQ IRQ_IXP4XX_GPIO3
-#define GTWX5715_GPIO4_IRQ IRQ_IXP4XX_GPIO4
-#define GTWX5715_GPIO5_IRQ IRQ_IXP4XX_GPIO5
-#define GTWX5715_GPIO6_IRQ IRQ_IXP4XX_GPIO6
-#define GTWX5715_GPIO7_IRQ IRQ_IXP4XX_GPIO7
-#define GTWX5715_GPIO8_IRQ IRQ_IXP4XX_GPIO8
-#define GTWX5715_GPIO9_IRQ IRQ_IXP4XX_GPIO9
-#define GTWX5715_GPIO10_IRQ IRQ_IXP4XX_GPIO10
-#define GTWX5715_GPIO11_IRQ IRQ_IXP4XX_GPIO11
-#define GTWX5715_GPIO12_IRQ IRQ_IXP4XX_GPIO12
-#define GTWX5715_GPIO13_IRQ IRQ_IXP4XX_SW_INT1
-#define GTWX5715_GPIO14_IRQ IRQ_IXP4XX_SW_INT2
-
-/* PCI controller GPIO to IRQ pin mappings
-
- INTA INTB
-SLOT 0 10 11
-SLOT 1 11 10
-
-*/
-
-#define GTWX5715_PCI_SLOT0_DEVID 0
-#define GTWX5715_PCI_SLOT0_INTA_GPIO GTWX5715_GPIO10
-#define GTWX5715_PCI_SLOT0_INTB_GPIO GTWX5715_GPIO11
-#define GTWX5715_PCI_SLOT0_INTA_IRQ GTWX5715_GPIO10_IRQ
-#define GTWX5715_PCI_SLOT0_INTB_IRQ GTWX5715_GPIO11_IRQ
-
-#define GTWX5715_PCI_SLOT1_DEVID 1
-#define GTWX5715_PCI_SLOT1_INTA_GPIO GTWX5715_GPIO11
-#define GTWX5715_PCI_SLOT1_INTB_GPIO GTWX5715_GPIO10
-#define GTWX5715_PCI_SLOT1_INTA_IRQ GTWX5715_GPIO11_IRQ
-#define GTWX5715_PCI_SLOT1_INTB_IRQ GTWX5715_GPIO10_IRQ
-
-#define GTWX5715_PCI_SLOT_COUNT 2
-#define GTWX5715_PCI_INT_PIN_COUNT 2
-
-/*
- * GPIO 5,6,7 and12 are hard wired to the Kendin KS8995M Switch
- * and operate as an SPI type interface. The details of the interface
- * are available on Kendin/Micrel's web site.
- */
-
-#define GTWX5715_KSSPI_SELECT GTWX5715_GPIO5
-#define GTWX5715_KSSPI_TXD GTWX5715_GPIO6
-#define GTWX5715_KSSPI_CLOCK GTWX5715_GPIO7
-#define GTWX5715_KSSPI_RXD GTWX5715_GPIO12
-
-/*
- * The "reset" button is wired to GPIO 3.
- * The GPIO is brought "low" when the button is pushed.
- */
-
-#define GTWX5715_BUTTON_GPIO GTWX5715_GPIO3
-#define GTWX5715_BUTTON_IRQ GTWX5715_GPIO3_IRQ
-
-/*
- * Board Label Front Label
- * LED1 Power
- * LED2 Wireless-G
- * LED3 not populated but could be
- * LED4 Internet
- * LED5 - LED8 Controlled by KS8995M Switch
- * LED9 DMZ
- */
-
-#define GTWX5715_LED1_GPIO GTWX5715_GPIO2
-#define GTWX5715_LED2_GPIO GTWX5715_GPIO9
-#define GTWX5715_LED3_GPIO GTWX5715_GPIO8
-#define GTWX5715_LED4_GPIO GTWX5715_GPIO1
-#define GTWX5715_LED9_GPIO GTWX5715_GPIO4
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h
index f58a43a2396..f9d1c43e4a5 100644
--- a/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -18,7 +18,13 @@
#define __ASM_ARCH_HARDWARE_H__
#define PCIBIOS_MIN_IO 0x00001000
-#define PCIBIOS_MIN_MEM (cpu_is_ixp43x() ? 0x40000000 : 0x48000000)
+#ifdef CONFIG_IXP4XX_INDIRECT_PCI
+#define PCIBIOS_MIN_MEM 0x10000000 /* 1 GB of indirect PCI MMIO space */
+#define PCIBIOS_MAX_MEM 0x4FFFFFFF
+#else
+#define PCIBIOS_MIN_MEM 0x48000000 /* 64 MB of PCI MMIO space */
+#define PCIBIOS_MAX_MEM 0x4BFFFFFF
+#endif
/*
* We override the standard dma-mask routines for bouncing.
@@ -37,14 +43,4 @@
/* Platform helper functions and definitions */
#include "platform.h"
-/* Platform specific details */
-#include "ixdp425.h"
-#include "avila.h"
-#include "coyote.h"
-#include "prpmc1100.h"
-#include "nslu2.h"
-#include "nas100d.h"
-#include "dsmg600.h"
-#include "fsg.h"
-
#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 8a947d42a6f..6ea7e2fb270 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -26,22 +26,20 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
/*
* IXP4xx provides two methods of accessing PCI memory space:
*
- * 1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
+ * 1) A direct mapped window from 0x48000000 to 0x4BFFFFFF (64MB).
* To access PCI via this space, we simply ioremap() the BAR
* into the kernel and we can use the standard read[bwl]/write[bwl]
* macros. This is the preffered method due to speed but it
- * limits the system to just 64MB of PCI memory. This can be
- * problamatic if using video cards and other memory-heavy
- * targets.
- *
- * 2) If > 64MB of memory space is required, the IXP4xx can be configured
- * to use indirect registers to access PCI (as we do below for I/O
- * transactions). This allows for up to 128MB (0x48000000 to 0x4fffffff)
- * of memory on the bus. The disadvantage of this is that every
- * PCI access requires three local register accesses plus a spinlock,
- * but in some cases the performance hit is acceptable. In addition,
- * you cannot mmap() PCI devices in this case.
+ * limits the system to just 64MB of PCI memory. This can be
+ * problematic if using video cards and other memory-heavy targets.
*
+ * 2) If > 64MB of memory space is required, the IXP4xx can use indirect
+ * registers to access the whole 4 GB of PCI memory space (as we do below
+ * for I/O transactions). This allows currently for up to 1 GB (0x10000000
+ * to 0x4FFFFFFF) of memory on the bus. The disadvantage of this is that
+ * every PCI access requires three local register accesses plus a spinlock,
+ * but in some cases the performance hit is acceptable. In addition, you
+ * cannot mmap() PCI devices in this case.
*/
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
@@ -55,48 +53,52 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
* access registers. If something outside of PCI is ioremap'd, we
* fallback to the default.
*/
-static inline void __iomem *
-__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned int mtype)
+
+static inline int is_pci_memory(u32 addr)
+{
+ return (addr >= PCIBIOS_MIN_MEM) && (addr <= 0x4FFFFFFF);
+}
+
+static inline void __iomem * __indirect_ioremap(unsigned long addr, size_t size,
+ unsigned int mtype)
{
- if((addr < PCIBIOS_MIN_MEM) || (addr > 0x4fffffff))
+ if (!is_pci_memory(addr))
return __arm_ioremap(addr, size, mtype);
return (void __iomem *)addr;
}
-static inline void
-__ixp4xx_iounmap(void __iomem *addr)
+static inline void __indirect_iounmap(void __iomem *addr)
{
- if ((__force u32)addr >= VMALLOC_START)
+ if (!is_pci_memory((__force u32)addr))
__iounmap(addr);
}
-#define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
-#define __arch_iounmap(a) __ixp4xx_iounmap(a)
+#define __arch_ioremap(a, s, f) __indirect_ioremap(a, s, f)
+#define __arch_iounmap(a) __indirect_iounmap(a)
-#define writeb(v, p) __ixp4xx_writeb(v, p)
-#define writew(v, p) __ixp4xx_writew(v, p)
-#define writel(v, p) __ixp4xx_writel(v, p)
+#define writeb(v, p) __indirect_writeb(v, p)
+#define writew(v, p) __indirect_writew(v, p)
+#define writel(v, p) __indirect_writel(v, p)
-#define writesb(p, v, l) __ixp4xx_writesb(p, v, l)
-#define writesw(p, v, l) __ixp4xx_writesw(p, v, l)
-#define writesl(p, v, l) __ixp4xx_writesl(p, v, l)
-
-#define readb(p) __ixp4xx_readb(p)
-#define readw(p) __ixp4xx_readw(p)
-#define readl(p) __ixp4xx_readl(p)
-
-#define readsb(p, v, l) __ixp4xx_readsb(p, v, l)
-#define readsw(p, v, l) __ixp4xx_readsw(p, v, l)
-#define readsl(p, v, l) __ixp4xx_readsl(p, v, l)
+#define writesb(p, v, l) __indirect_writesb(p, v, l)
+#define writesw(p, v, l) __indirect_writesw(p, v, l)
+#define writesl(p, v, l) __indirect_writesl(p, v, l)
-static inline void
-__ixp4xx_writeb(u8 value, volatile void __iomem *p)
+#define readb(p) __indirect_readb(p)
+#define readw(p) __indirect_readw(p)
+#define readl(p) __indirect_readl(p)
+
+#define readsb(p, v, l) __indirect_readsb(p, v, l)
+#define readsw(p, v, l) __indirect_readsw(p, v, l)
+#define readsl(p, v, l) __indirect_readsl(p, v, l)
+
+static inline void __indirect_writeb(u8 value, volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
- if (addr >= VMALLOC_START) {
+ if (!is_pci_memory(addr)) {
__raw_writeb(value, addr);
return;
}
@@ -107,20 +109,19 @@ __ixp4xx_writeb(u8 value, volatile void __iomem *p)
ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
}
-static inline void
-__ixp4xx_writesb(volatile void __iomem *bus_addr, const u8 *vaddr, int count)
+static inline void __indirect_writesb(volatile void __iomem *bus_addr,
+ const u8 *vaddr, int count)
{
while (count--)
writeb(*vaddr++, bus_addr);
}
-static inline void
-__ixp4xx_writew(u16 value, volatile void __iomem *p)
+static inline void __indirect_writew(u16 value, volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
- if (addr >= VMALLOC_START) {
+ if (!is_pci_memory(addr)) {
__raw_writew(value, addr);
return;
}
@@ -131,18 +132,18 @@ __ixp4xx_writew(u16 value, volatile void __iomem *p)
ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
}
-static inline void
-__ixp4xx_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count)
+static inline void __indirect_writesw(volatile void __iomem *bus_addr,
+ const u16 *vaddr, int count)
{
while (count--)
writew(*vaddr++, bus_addr);
}
-static inline void
-__ixp4xx_writel(u32 value, volatile void __iomem *p)
+static inline void __indirect_writel(u32 value, volatile void __iomem *p)
{
u32 addr = (__force u32)p;
- if (addr >= VMALLOC_START) {
+
+ if (!is_pci_memory(addr)) {
__raw_writel(value, p);
return;
}
@@ -150,20 +151,19 @@ __ixp4xx_writel(u32 value, volatile void __iomem *p)
ixp4xx_pci_write(addr, NP_CMD_MEMWRITE, value);
}
-static inline void
-__ixp4xx_writesl(volatile void __iomem *bus_addr, const u32 *vaddr, int count)
+static inline void __indirect_writesl(volatile void __iomem *bus_addr,
+ const u32 *vaddr, int count)
{
while (count--)
writel(*vaddr++, bus_addr);
}
-static inline unsigned char
-__ixp4xx_readb(const volatile void __iomem *p)
+static inline unsigned char __indirect_readb(const volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
- if (addr >= VMALLOC_START)
+ if (!is_pci_memory(addr))
return __raw_readb(addr);
n = addr % 4;
@@ -174,20 +174,19 @@ __ixp4xx_readb(const volatile void __iomem *p)
return data >> (8*n);
}
-static inline void
-__ixp4xx_readsb(const volatile void __iomem *bus_addr, u8 *vaddr, u32 count)
+static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
+ u8 *vaddr, u32 count)
{
while (count--)
*vaddr++ = readb(bus_addr);
}
-static inline unsigned short
-__ixp4xx_readw(const volatile void __iomem *p)
+static inline unsigned short __indirect_readw(const volatile void __iomem *p)
{
u32 addr = (u32)p;
u32 n, byte_enables, data;
- if (addr >= VMALLOC_START)
+ if (!is_pci_memory(addr))
return __raw_readw(addr);
n = addr % 4;
@@ -198,20 +197,19 @@ __ixp4xx_readw(const volatile void __iomem *p)
return data>>(8*n);
}
-static inline void
-__ixp4xx_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count)
+static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
+ u16 *vaddr, u32 count)
{
while (count--)
*vaddr++ = readw(bus_addr);
}
-static inline unsigned long
-__ixp4xx_readl(const volatile void __iomem *p)
+static inline unsigned long __indirect_readl(const volatile void __iomem *p)
{
u32 addr = (__force u32)p;
u32 data;
- if (addr >= VMALLOC_START)
+ if (!is_pci_memory(addr))
return __raw_readl(p);
if (ixp4xx_pci_read(addr, NP_CMD_MEMREAD, &data))
@@ -220,8 +218,8 @@ __ixp4xx_readl(const volatile void __iomem *p)
return data;
}
-static inline void
-__ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count)
+static inline void __indirect_readsl(const volatile void __iomem *bus_addr,
+ u32 *vaddr, u32 count)
{
while (count--)
*vaddr++ = readl(bus_addr);
@@ -235,7 +233,7 @@ __ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count)
#define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l))
#define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l))
-#endif
+#endif /* CONFIG_IXP4XX_INDIRECT_PCI */
#ifndef CONFIG_PCI
@@ -250,25 +248,8 @@ __ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count)
* transaction. This means that we need to override the default
* I/O functions.
*/
-#define outb(p, v) __ixp4xx_outb(p, v)
-#define outw(p, v) __ixp4xx_outw(p, v)
-#define outl(p, v) __ixp4xx_outl(p, v)
-
-#define outsb(p, v, l) __ixp4xx_outsb(p, v, l)
-#define outsw(p, v, l) __ixp4xx_outsw(p, v, l)
-#define outsl(p, v, l) __ixp4xx_outsl(p, v, l)
-#define inb(p) __ixp4xx_inb(p)
-#define inw(p) __ixp4xx_inw(p)
-#define inl(p) __ixp4xx_inl(p)
-
-#define insb(p, v, l) __ixp4xx_insb(p, v, l)
-#define insw(p, v, l) __ixp4xx_insw(p, v, l)
-#define insl(p, v, l) __ixp4xx_insl(p, v, l)
-
-
-static inline void
-__ixp4xx_outb(u8 value, u32 addr)
+static inline void outb(u8 value, u32 addr)
{
u32 n, byte_enables, data;
n = addr % 4;
@@ -277,15 +258,13 @@ __ixp4xx_outb(u8 value, u32 addr)
ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
}
-static inline void
-__ixp4xx_outsb(u32 io_addr, const u8 *vaddr, u32 count)
+static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count)
{
while (count--)
outb(*vaddr++, io_addr);
}
-static inline void
-__ixp4xx_outw(u16 value, u32 addr)
+static inline void outw(u16 value, u32 addr)
{
u32 n, byte_enables, data;
n = addr % 4;
@@ -294,28 +273,24 @@ __ixp4xx_outw(u16 value, u32 addr)
ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
}
-static inline void
-__ixp4xx_outsw(u32 io_addr, const u16 *vaddr, u32 count)
+static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count)
{
while (count--)
outw(cpu_to_le16(*vaddr++), io_addr);
}
-static inline void
-__ixp4xx_outl(u32 value, u32 addr)
+static inline void outl(u32 value, u32 addr)
{
ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
}
-static inline void
-__ixp4xx_outsl(u32 io_addr, const u32 *vaddr, u32 count)
+static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count)
{
while (count--)
- outl(*vaddr++, io_addr);
+ outl(cpu_to_le32(*vaddr++), io_addr);
}
-static inline u8
-__ixp4xx_inb(u32 addr)
+static inline u8 inb(u32 addr)
{
u32 n, byte_enables, data;
n = addr % 4;
@@ -326,15 +301,13 @@ __ixp4xx_inb(u32 addr)
return data >> (8*n);
}
-static inline void
-__ixp4xx_insb(u32 io_addr, u8 *vaddr, u32 count)
+static inline void insb(u32 io_addr, u8 *vaddr, u32 count)
{
while (count--)
*vaddr++ = inb(io_addr);
}
-static inline u16
-__ixp4xx_inw(u32 addr)
+static inline u16 inw(u32 addr)
{
u32 n, byte_enables, data;
n = addr % 4;
@@ -345,15 +318,13 @@ __ixp4xx_inw(u32 addr)
return data>>(8*n);
}
-static inline void
-__ixp4xx_insw(u32 io_addr, u16 *vaddr, u32 count)
+static inline void insw(u32 io_addr, u16 *vaddr, u32 count)
{
while (count--)
*vaddr++ = le16_to_cpu(inw(io_addr));
}
-static inline u32
-__ixp4xx_inl(u32 addr)
+static inline u32 inl(u32 addr)
{
u32 data;
if (ixp4xx_pci_read(addr, NP_CMD_IOREAD, &data))
@@ -362,11 +333,10 @@ __ixp4xx_inl(u32 addr)
return data;
}
-static inline void
-__ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count)
+static inline void insl(u32 io_addr, u32 *vaddr, u32 count)
{
while (count--)
- *vaddr++ = inl(io_addr);
+ *vaddr++ = le32_to_cpu(inl(io_addr));
}
#define PIO_OFFSET 0x10000UL
@@ -374,194 +344,183 @@ __ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count)
#define __is_io_address(p) (((unsigned long)p >= PIO_OFFSET) && \
((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
-static inline unsigned int
-__ixp4xx_ioread8(const void __iomem *addr)
+
+#define ioread8(p) ioread8(p)
+static inline unsigned int ioread8(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- return (unsigned int)__ixp4xx_inb(port & PIO_MASK);
+ return (unsigned int)inb(port & PIO_MASK);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
return (unsigned int)__raw_readb(port);
#else
- return (unsigned int)__ixp4xx_readb(addr);
+ return (unsigned int)__indirect_readb(addr);
#endif
}
-static inline void
-__ixp4xx_ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
+#define ioread8_rep(p, v, c) ioread8_rep(p, v, c)
+static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_insb(port & PIO_MASK, vaddr, count);
+ insb(port & PIO_MASK, vaddr, count);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_readsb(addr, vaddr, count);
#else
- __ixp4xx_readsb(addr, vaddr, count);
+ __indirect_readsb(addr, vaddr, count);
#endif
}
-static inline unsigned int
-__ixp4xx_ioread16(const void __iomem *addr)
+#define ioread16(p) ioread16(p)
+static inline unsigned int ioread16(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- return (unsigned int)__ixp4xx_inw(port & PIO_MASK);
+ return (unsigned int)inw(port & PIO_MASK);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
return le16_to_cpu(__raw_readw((u32)port));
#else
- return (unsigned int)__ixp4xx_readw(addr);
+ return (unsigned int)__indirect_readw(addr);
#endif
}
-static inline void
-__ixp4xx_ioread16_rep(const void __iomem *addr, void *vaddr, u32 count)
+#define ioread16_rep(p, v, c) ioread16_rep(p, v, c)
+static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
+ u32 count)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_insw(port & PIO_MASK, vaddr, count);
+ insw(port & PIO_MASK, vaddr, count);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_readsw(addr, vaddr, count);
#else
- __ixp4xx_readsw(addr, vaddr, count);
+ __indirect_readsw(addr, vaddr, count);
#endif
}
-static inline unsigned int
-__ixp4xx_ioread32(const void __iomem *addr)
+#define ioread32(p) ioread32(p)
+static inline unsigned int ioread32(const void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- return (unsigned int)__ixp4xx_inl(port & PIO_MASK);
+ return (unsigned int)inl(port & PIO_MASK);
else {
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
return le32_to_cpu((__force __le32)__raw_readl(addr));
#else
- return (unsigned int)__ixp4xx_readl(addr);
+ return (unsigned int)__indirect_readl(addr);
#endif
}
}
-static inline void
-__ixp4xx_ioread32_rep(const void __iomem *addr, void *vaddr, u32 count)
+#define ioread32_rep(p, v, c) ioread32_rep(p, v, c)
+static inline void ioread32_rep(const void __iomem *addr, void *vaddr,
+ u32 count)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_insl(port & PIO_MASK, vaddr, count);
+ insl(port & PIO_MASK, vaddr, count);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_readsl(addr, vaddr, count);
#else
- __ixp4xx_readsl(addr, vaddr, count);
+ __indirect_readsl(addr, vaddr, count);
#endif
}
-static inline void
-__ixp4xx_iowrite8(u8 value, void __iomem *addr)
+#define iowrite8(v, p) iowrite8(v, p)
+static inline void iowrite8(u8 value, void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_outb(value, port & PIO_MASK);
+ outb(value, port & PIO_MASK);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_writeb(value, port);
#else
- __ixp4xx_writeb(value, addr);
+ __indirect_writeb(value, addr);
#endif
}
-static inline void
-__ixp4xx_iowrite8_rep(void __iomem *addr, const void *vaddr, u32 count)
+#define iowrite8_rep(p, v, c) iowrite8_rep(p, v, c)
+static inline void iowrite8_rep(void __iomem *addr, const void *vaddr,
+ u32 count)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_outsb(port & PIO_MASK, vaddr, count);
+ outsb(port & PIO_MASK, vaddr, count);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_writesb(addr, vaddr, count);
#else
- __ixp4xx_writesb(addr, vaddr, count);
+ __indirect_writesb(addr, vaddr, count);
#endif
}
-static inline void
-__ixp4xx_iowrite16(u16 value, void __iomem *addr)
+#define iowrite16(v, p) iowrite16(v, p)
+static inline void iowrite16(u16 value, void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_outw(value, port & PIO_MASK);
+ outw(value, port & PIO_MASK);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_writew(cpu_to_le16(value), addr);
#else
- __ixp4xx_writew(value, addr);
+ __indirect_writew(value, addr);
#endif
}
-static inline void
-__ixp4xx_iowrite16_rep(void __iomem *addr, const void *vaddr, u32 count)
+#define iowrite16_rep(p, v, c) iowrite16_rep(p, v, c)
+static inline void iowrite16_rep(void __iomem *addr, const void *vaddr,
+ u32 count)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_outsw(port & PIO_MASK, vaddr, count);
+ outsw(port & PIO_MASK, vaddr, count);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_writesw(addr, vaddr, count);
#else
- __ixp4xx_writesw(addr, vaddr, count);
+ __indirect_writesw(addr, vaddr, count);
#endif
}
-static inline void
-__ixp4xx_iowrite32(u32 value, void __iomem *addr)
+#define iowrite32(v, p) iowrite32(v, p)
+static inline void iowrite32(u32 value, void __iomem *addr)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_outl(value, port & PIO_MASK);
+ outl(value, port & PIO_MASK);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_writel((u32 __force)cpu_to_le32(value), addr);
#else
- __ixp4xx_writel(value, addr);
+ __indirect_writel(value, addr);
#endif
}
-static inline void
-__ixp4xx_iowrite32_rep(void __iomem *addr, const void *vaddr, u32 count)
+#define iowrite32_rep(p, v, c) iowrite32_rep(p, v, c)
+static inline void iowrite32_rep(void __iomem *addr, const void *vaddr,
+ u32 count)
{
unsigned long port = (unsigned long __force)addr;
if (__is_io_address(port))
- __ixp4xx_outsl(port & PIO_MASK, vaddr, count);
+ outsl(port & PIO_MASK, vaddr, count);
else
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
__raw_writesl(addr, vaddr, count);
#else
- __ixp4xx_writesl(addr, vaddr, count);
+ __indirect_writesl(addr, vaddr, count);
#endif
}
-#define ioread8(p) __ixp4xx_ioread8(p)
-#define ioread16(p) __ixp4xx_ioread16(p)
-#define ioread32(p) __ixp4xx_ioread32(p)
-
-#define ioread8_rep(p, v, c) __ixp4xx_ioread8_rep(p, v, c)
-#define ioread16_rep(p, v, c) __ixp4xx_ioread16_rep(p, v, c)
-#define ioread32_rep(p, v, c) __ixp4xx_ioread32_rep(p, v, c)
-
-#define iowrite8(v,p) __ixp4xx_iowrite8(v,p)
-#define iowrite16(v,p) __ixp4xx_iowrite16(v,p)
-#define iowrite32(v,p) __ixp4xx_iowrite32(v,p)
-
-#define iowrite8_rep(p, v, c) __ixp4xx_iowrite8_rep(p, v, c)
-#define iowrite16_rep(p, v, c) __ixp4xx_iowrite16_rep(p, v, c)
-#define iowrite32_rep(p, v, c) __ixp4xx_iowrite32_rep(p, v, c)
-
#define ioport_map(port, nr) ((void __iomem*)(port + PIO_OFFSET))
#define ioport_unmap(addr)
-#endif // !CONFIG_PCI
-
-#endif // __ASM_ARM_ARCH_IO_H
+#endif /* CONFIG_PCI */
+#endif /* __ASM_ARM_ARCH_IO_H */
diff --git a/arch/arm/mach-ixp4xx/include/mach/irqs.h b/arch/arm/mach-ixp4xx/include/mach/irqs.h
index f4d74de1566..7e6d4cce7c2 100644
--- a/arch/arm/mach-ixp4xx/include/mach/irqs.h
+++ b/arch/arm/mach-ixp4xx/include/mach/irqs.h
@@ -15,7 +15,6 @@
#ifndef _ARCH_IXP4XX_IRQS_H_
#define _ARCH_IXP4XX_IRQS_H_
-
#define IRQ_IXP4XX_NPEA 0
#define IRQ_IXP4XX_NPEB 1
#define IRQ_IXP4XX_NPEC 2
@@ -59,6 +58,9 @@
#define IRQ_IXP4XX_MCU_ECC 61
#define IRQ_IXP4XX_EXP_PE 62
+#define _IXP4XX_GPIO_IRQ(n) (IRQ_IXP4XX_GPIO ## n)
+#define IXP4XX_GPIO_IRQ(n) _IXP4XX_GPIO_IRQ(n)
+
/*
* Only first 32 sources are valid if running on IXP42x systems
*/
@@ -70,69 +72,4 @@
#define XSCALE_PMU_IRQ (IRQ_IXP4XX_XSCALE_PMU)
-/*
- * IXDP425 board IRQs
- */
-#define IRQ_IXDP425_PCI_INTA IRQ_IXP4XX_GPIO11
-#define IRQ_IXDP425_PCI_INTB IRQ_IXP4XX_GPIO10
-#define IRQ_IXDP425_PCI_INTC IRQ_IXP4XX_GPIO9
-#define IRQ_IXDP425_PCI_INTD IRQ_IXP4XX_GPIO8
-
-/*
- * Gateworks Avila board IRQs
- */
-#define IRQ_AVILA_PCI_INTA IRQ_IXP4XX_GPIO11
-#define IRQ_AVILA_PCI_INTB IRQ_IXP4XX_GPIO10
-#define IRQ_AVILA_PCI_INTC IRQ_IXP4XX_GPIO9
-#define IRQ_AVILA_PCI_INTD IRQ_IXP4XX_GPIO8
-
-
-/*
- * PrPMC1100 Board IRQs
- */
-#define IRQ_PRPMC1100_PCI_INTA IRQ_IXP4XX_GPIO11
-#define IRQ_PRPMC1100_PCI_INTB IRQ_IXP4XX_GPIO10
-#define IRQ_PRPMC1100_PCI_INTC IRQ_IXP4XX_GPIO9
-#define IRQ_PRPMC1100_PCI_INTD IRQ_IXP4XX_GPIO8
-
-/*
- * ADI Coyote Board IRQs
- */
-#define IRQ_COYOTE_PCI_SLOT0 IRQ_IXP4XX_GPIO6
-#define IRQ_COYOTE_PCI_SLOT1 IRQ_IXP4XX_GPIO11
-#define IRQ_COYOTE_IDE IRQ_IXP4XX_GPIO5
-
-/*
- * NSLU2 board IRQs
- */
-#define IRQ_NSLU2_PCI_INTA IRQ_IXP4XX_GPIO11
-#define IRQ_NSLU2_PCI_INTB IRQ_IXP4XX_GPIO10
-#define IRQ_NSLU2_PCI_INTC IRQ_IXP4XX_GPIO9
-
-/*
- * NAS100D board IRQs
- */
-#define IRQ_NAS100D_PCI_INTA IRQ_IXP4XX_GPIO11
-#define IRQ_NAS100D_PCI_INTB IRQ_IXP4XX_GPIO10
-#define IRQ_NAS100D_PCI_INTC IRQ_IXP4XX_GPIO9
-#define IRQ_NAS100D_PCI_INTD IRQ_IXP4XX_GPIO8
-#define IRQ_NAS100D_PCI_INTE IRQ_IXP4XX_GPIO7
-
-/*
- * D-Link DSM-G600 RevA board IRQs
- */
-#define IRQ_DSMG600_PCI_INTA IRQ_IXP4XX_GPIO11
-#define IRQ_DSMG600_PCI_INTB IRQ_IXP4XX_GPIO10
-#define IRQ_DSMG600_PCI_INTC IRQ_IXP4XX_GPIO9
-#define IRQ_DSMG600_PCI_INTD IRQ_IXP4XX_GPIO8
-#define IRQ_DSMG600_PCI_INTE IRQ_IXP4XX_GPIO7
-#define IRQ_DSMG600_PCI_INTF IRQ_IXP4XX_GPIO6
-
-/*
- * Freecom FSG-3 Board IRQs
- */
-#define IRQ_FSG_PCI_INTA IRQ_IXP4XX_GPIO6
-#define IRQ_FSG_PCI_INTB IRQ_IXP4XX_GPIO7
-#define IRQ_FSG_PCI_INTC IRQ_IXP4XX_GPIO5
-
#endif
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixdp425.h b/arch/arm/mach-ixp4xx/include/mach/ixdp425.h
deleted file mode 100644
index 2cafe65ebfe..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/ixdp425.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/ixdp425.h
- *
- * IXDP425 platform specific definitions
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- *
- * Copyright 2004 (c) MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-
-#define IXDP425_SDA_PIN 7
-#define IXDP425_SCL_PIN 6
-
-/*
- * IXDP425 PCI IRQs
- */
-#define IXDP425_PCI_MAX_DEV 4
-#define IXDP425_PCI_IRQ_LINES 4
-
-
-/* PCI controller GPIO to IRQ pin mappings */
-#define IXDP425_PCI_INTA_PIN 11
-#define IXDP425_PCI_INTB_PIN 10
-#define IXDP425_PCI_INTC_PIN 9
-#define IXDP425_PCI_INTD_PIN 8
-
-/* NAND Flash pins */
-#define IXDP425_NAND_NCE_PIN 12
-
-#define IXDP425_NAND_CMD_BYTE 0x01
-#define IXDP425_NAND_ADDR_BYTE 0x02
diff --git a/arch/arm/mach-ixp4xx/include/mach/nas100d.h b/arch/arm/mach-ixp4xx/include/mach/nas100d.h
deleted file mode 100644
index 3771d62a974..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/nas100d.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/nas100d.h
- *
- * NAS100D platform specific definitions
- *
- * Copyright (c) 2005 Tower Technologies
- *
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- *
- * based on ixdp425.h:
- * Copyright 2004 (c) MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-
-#define NAS100D_SDA_PIN 5
-#define NAS100D_SCL_PIN 6
-
-/*
- * NAS100D PCI IRQs
- */
-#define NAS100D_PCI_MAX_DEV 3
-#define NAS100D_PCI_IRQ_LINES 3
-
-
-/* PCI controller GPIO to IRQ pin mappings */
-#define NAS100D_PCI_INTA_PIN 11
-#define NAS100D_PCI_INTB_PIN 10
-#define NAS100D_PCI_INTC_PIN 9
-#define NAS100D_PCI_INTD_PIN 8
-#define NAS100D_PCI_INTE_PIN 7
-
-/* Buttons */
-
-#define NAS100D_PB_GPIO 14 /* power button */
-#define NAS100D_RB_GPIO 4 /* reset button */
-
-/* Power control */
-
-#define NAS100D_PO_GPIO 12 /* power off */
-
-/* LEDs */
-
-#define NAS100D_LED_WLAN_GPIO 0
-#define NAS100D_LED_DISK_GPIO 3
-#define NAS100D_LED_PWR_GPIO 15
diff --git a/arch/arm/mach-ixp4xx/include/mach/npe.h b/arch/arm/mach-ixp4xx/include/mach/npe.h
index 37d0511689d..e320db2457a 100644
--- a/arch/arm/mach-ixp4xx/include/mach/npe.h
+++ b/arch/arm/mach-ixp4xx/include/mach/npe.h
@@ -33,7 +33,7 @@ int npe_send_message(struct npe *npe, const void *msg, const char *what);
int npe_recv_message(struct npe *npe, void *msg, const char *what);
int npe_send_recv_message(struct npe *npe, void *msg, const char *what);
int npe_load_firmware(struct npe *npe, const char *name, struct device *dev);
-struct npe *npe_request(int id);
+struct npe *npe_request(unsigned id);
void npe_release(struct npe *npe);
#endif /* __IXP4XX_NPE_H */
diff --git a/arch/arm/mach-ixp4xx/include/mach/nslu2.h b/arch/arm/mach-ixp4xx/include/mach/nslu2.h
deleted file mode 100644
index 85d00adbfb9..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/nslu2.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/nslu2.h
- *
- * NSLU2 platform specific definitions
- *
- * Author: Mark Rakes <mrakes AT mac.com>
- * Maintainers: http://www.nslu2-linux.org
- *
- * based on ixdp425.h:
- * Copyright 2004 (c) MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-
-#define NSLU2_SDA_PIN 7
-#define NSLU2_SCL_PIN 6
-
-/*
- * NSLU2 PCI IRQs
- */
-#define NSLU2_PCI_MAX_DEV 3
-#define NSLU2_PCI_IRQ_LINES 3
-
-
-/* PCI controller GPIO to IRQ pin mappings */
-#define NSLU2_PCI_INTA_PIN 11
-#define NSLU2_PCI_INTB_PIN 10
-#define NSLU2_PCI_INTC_PIN 9
-#define NSLU2_PCI_INTD_PIN 8
-
-/* NSLU2 Timer */
-#define NSLU2_FREQ 66000000
-
-/* Buttons */
-
-#define NSLU2_PB_GPIO 5 /* power button */
-#define NSLU2_PO_GPIO 8 /* power off */
-#define NSLU2_RB_GPIO 12 /* reset button */
-
-/* Buzzer */
-
-#define NSLU2_GPIO_BUZZ 4
-
-/* LEDs */
-
-#define NSLU2_LED_RED_GPIO 0
-#define NSLU2_LED_GRN_GPIO 1
-#define NSLU2_LED_DISK1_GPIO 3
-#define NSLU2_LED_DISK2_GPIO 2
diff --git a/arch/arm/mach-ixp4xx/include/mach/prpmc1100.h b/arch/arm/mach-ixp4xx/include/mach/prpmc1100.h
deleted file mode 100644
index 17274a2e3de..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/prpmc1100.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/prpmc1100.h
- *
- * Motorolla PrPMC1100 platform specific definitions
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- *
- * Copyright 2004 (c) MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_HARDWARE_H__
-#error "Do not include this directly, instead #include <mach/hardware.h>"
-#endif
-
-#define PRPMC1100_FLASH_BASE IXP4XX_EXP_BUS_CS0_BASE_PHYS
-#define PRPMC1100_FLASH_SIZE IXP4XX_EXP_BUS_CSX_REGION_SIZE
-
-#define PRPMC1100_PCI_MIN_DEVID 10
-#define PRPMC1100_PCI_MAX_DEVID 16
-#define PRPMC1100_PCI_IRQ_LINES 4
-
-
-/* PCI controller GPIO to IRQ pin mappings */
-#define PRPMC1100_PCI_INTA_PIN 11
-#define PRPMC1100_PCI_INTB_PIN 10
-#define PRPMC1100_PCI_INTC_PIN 9
-#define PRPMC1100_PCI_INTD_PIN 8
-
-
diff --git a/arch/arm/mach-ixp4xx/include/mach/timex.h b/arch/arm/mach-ixp4xx/include/mach/timex.h
index 89ce3ee8469..2c3f93c3eb7 100644
--- a/arch/arm/mach-ixp4xx/include/mach/timex.h
+++ b/arch/arm/mach-ixp4xx/include/mach/timex.h
@@ -10,6 +10,6 @@
* 66.66... MHz. We do a convulted calculation of CLOCK_TICK_RATE b/c the
* timer register ignores the bottom 2 bits of the LATCH value.
*/
-#define FREQ 66666666
+#define FREQ 66666000
#define CLOCK_TICK_RATE (((FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
diff --git a/arch/arm/mach-ixp4xx/ixdp425-pci.c b/arch/arm/mach-ixp4xx/ixdp425-pci.c
index 64c29aacaac..1ba165a6eda 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-pci.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-pci.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/mach-ixp4xx/ixdp425-pci.c
+ * arch/arm/mach-ixp4xx/ixdp425-pci.c
*
* IXDP425 board-level PCI initialization
*
@@ -19,39 +19,43 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/delay.h>
-
#include <asm/mach/pci.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
+#define MAX_DEV 4
+#define IRQ_LINES 4
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define INTA 11
+#define INTB 10
+#define INTC 9
+#define INTD 8
+
+
void __init ixdp425_pci_preinit(void)
{
- set_irq_type(IRQ_IXDP425_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_IXDP425_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_IXDP425_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_IXDP425_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
-
+ set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init ixdp425_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
- static int pci_irq_table[IXDP425_PCI_IRQ_LINES] = {
- IRQ_IXDP425_PCI_INTA,
- IRQ_IXDP425_PCI_INTB,
- IRQ_IXDP425_PCI_INTC,
- IRQ_IXDP425_PCI_INTD
+ static int pci_irq_table[IRQ_LINES] = {
+ IXP4XX_GPIO_IRQ(INTA),
+ IXP4XX_GPIO_IRQ(INTB),
+ IXP4XX_GPIO_IRQ(INTC),
+ IXP4XX_GPIO_IRQ(INTD)
};
- int irq = -1;
-
- if (slot >= 1 && slot <= IXDP425_PCI_MAX_DEV &&
- pin >= 1 && pin <= IXDP425_PCI_IRQ_LINES) {
- irq = pci_irq_table[(slot + pin - 2) % 4];
- }
+ if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES)
+ return pci_irq_table[(slot + pin - 2) % 4];
- return irq;
+ return -1;
}
struct hw_pci ixdp425_pci __initdata = {
@@ -72,4 +76,3 @@ int __init ixdp425_pci_init(void)
}
subsys_initcall(ixdp425_pci_init);
-
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index f4a0c1bc133..bbb76898884 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -1,7 +1,7 @@
/*
* arch/arm/mach-ixp4xx/ixdp425-setup.c
*
- * IXDP425/IXCDP1100 board-setup
+ * IXDP425/IXCDP1100 board-setup
*
* Copyright (C) 2003-2005 MontaVista Software, Inc.
*
@@ -21,7 +21,6 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
-
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
@@ -31,6 +30,15 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#define IXDP425_SDA_PIN 7
+#define IXDP425_SCL_PIN 6
+
+/* NAND Flash pins */
+#define IXDP425_NAND_NCE_PIN 12
+
+#define IXDP425_NAND_CMD_BYTE 0x01
+#define IXDP425_NAND_ADDR_BYTE 0x02
+
static struct flash_platform_data ixdp425_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-ixp4xx/ixp4xx_npe.c b/arch/arm/mach-ixp4xx/ixp4xx_npe.c
index 47ac69c7ec7..e8bb2577816 100644
--- a/arch/arm/mach-ixp4xx/ixp4xx_npe.c
+++ b/arch/arm/mach-ixp4xx/ixp4xx_npe.c
@@ -665,7 +665,7 @@ err:
}
-struct npe *npe_request(int id)
+struct npe *npe_request(unsigned id)
{
if (id < NPE_COUNT)
if (npe_tab[id].valid)
diff --git a/arch/arm/mach-ixp4xx/nas100d-pci.c b/arch/arm/mach-ixp4xx/nas100d-pci.c
index 1088426fdce..d0cea34cf61 100644
--- a/arch/arm/mach-ixp4xx/nas100d-pci.c
+++ b/arch/arm/mach-ixp4xx/nas100d-pci.c
@@ -18,37 +18,42 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
-
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#define MAX_DEV 3
+#define IRQ_LINES 3
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define INTA 11
+#define INTB 10
+#define INTC 9
+#define INTD 8
+#define INTE 7
+
void __init nas100d_pci_preinit(void)
{
- set_irq_type(IRQ_NAS100D_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_NAS100D_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_NAS100D_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_NAS100D_PCI_INTD, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_NAS100D_PCI_INTE, IRQ_TYPE_LEVEL_LOW);
-
+ set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTD), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTE), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init nas100d_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
- static int pci_irq_table[NAS100D_PCI_MAX_DEV][NAS100D_PCI_IRQ_LINES] =
- {
- { IRQ_NAS100D_PCI_INTA, -1, -1 },
- { IRQ_NAS100D_PCI_INTB, -1, -1 },
- { IRQ_NAS100D_PCI_INTC, IRQ_NAS100D_PCI_INTD, IRQ_NAS100D_PCI_INTE },
+ static int pci_irq_table[MAX_DEV][IRQ_LINES] = {
+ { IXP4XX_GPIO_IRQ(INTA), -1, -1 },
+ { IXP4XX_GPIO_IRQ(INTB), -1, -1 },
+ { IXP4XX_GPIO_IRQ(INTC), IXP4XX_GPIO_IRQ(INTD),
+ IXP4XX_GPIO_IRQ(INTE) },
};
- int irq = -1;
-
- if (slot >= 1 && slot <= NAS100D_PCI_MAX_DEV &&
- pin >= 1 && pin <= NAS100D_PCI_IRQ_LINES)
- irq = pci_irq_table[slot-1][pin-1];
+ if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES)
+ return pci_irq_table[slot - 1][pin - 1];
- return irq;
+ return -1;
}
struct hw_pci __initdata nas100d_pci = {
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 921c947b5b6..e3ee880aa1e 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -29,12 +29,26 @@
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/io.h>
-
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/gpio.h>
+#define NAS100D_SDA_PIN 5
+#define NAS100D_SCL_PIN 6
+
+/* Buttons */
+#define NAS100D_PB_GPIO 14 /* power button */
+#define NAS100D_RB_GPIO 4 /* reset button */
+
+/* Power control */
+#define NAS100D_PO_GPIO 12 /* power off */
+
+/* LEDs */
+#define NAS100D_LED_WLAN_GPIO 0
+#define NAS100D_LED_DISK_GPIO 3
+#define NAS100D_LED_PWR_GPIO 15
+
static struct flash_platform_data nas100d_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-ixp4xx/nslu2-pci.c b/arch/arm/mach-ixp4xx/nslu2-pci.c
index 4429b8448b6..1eb5a90470b 100644
--- a/arch/arm/mach-ixp4xx/nslu2-pci.c
+++ b/arch/arm/mach-ixp4xx/nslu2-pci.c
@@ -18,35 +18,38 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
-
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#define MAX_DEV 3
+#define IRQ_LINES 3
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define INTA 11
+#define INTB 10
+#define INTC 9
+#define INTD 8
+
void __init nslu2_pci_preinit(void)
{
- set_irq_type(IRQ_NSLU2_PCI_INTA, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_NSLU2_PCI_INTB, IRQ_TYPE_LEVEL_LOW);
- set_irq_type(IRQ_NSLU2_PCI_INTC, IRQ_TYPE_LEVEL_LOW);
-
+ set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
+ set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init nslu2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
- static int pci_irq_table[NSLU2_PCI_IRQ_LINES] = {
- IRQ_NSLU2_PCI_INTA,
- IRQ_NSLU2_PCI_INTB,
- IRQ_NSLU2_PCI_INTC,
+ static int pci_irq_table[IRQ_LINES] = {
+ IXP4XX_GPIO_IRQ(INTA),
+ IXP4XX_GPIO_IRQ(INTB),
+ IXP4XX_GPIO_IRQ(INTC),
};
- int irq = -1;
-
- if (slot >= 1 && slot <= NSLU2_PCI_MAX_DEV &&
- pin >= 1 && pin <= NSLU2_PCI_IRQ_LINES) {
- irq = pci_irq_table[(slot + pin - 2) % NSLU2_PCI_IRQ_LINES];
- }
+ if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES)
+ return pci_irq_table[(slot + pin - 2) % IRQ_LINES];
- return irq;
+ return -1;
}
struct hw_pci __initdata nslu2_pci = {
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index ff6a08d02cc..c14e0034be4 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -26,13 +26,32 @@
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/io.h>
-
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/time.h>
#include <asm/gpio.h>
+#define NSLU2_SDA_PIN 7
+#define NSLU2_SCL_PIN 6
+
+/* NSLU2 Timer */
+#define NSLU2_FREQ 66000000
+
+/* Buttons */
+#define NSLU2_PB_GPIO 5 /* power button */
+#define NSLU2_PO_GPIO 8 /* power off */
+#define NSLU2_RB_GPIO 12 /* reset button */
+
+/* Buzzer */
+#define NSLU2_GPIO_BUZZ 4
+
+/* LEDs */
+#define NSLU2_LED_RED_GPIO 0
+#define NSLU2_LED_GRN_GPIO 1
+#define NSLU2_LED_DISK1_GPIO 3
+#define NSLU2_LED_DISK2_GPIO 2
+
static struct flash_platform_data nslu2_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig
index 8bf09ae5b34..f6c6196a51f 100644
--- a/arch/arm/mach-kirkwood/Kconfig
+++ b/arch/arm/mach-kirkwood/Kconfig
@@ -52,6 +52,12 @@ config MACH_OPENRD_BASE
Say 'Y' here if you want your kernel to support the
Marvell OpenRD Base Board.
+config MACH_NETSPACE_V2
+ bool "LaCie Network Space v2 NAS Board"
+ help
+ Say 'Y' here if you want your kernel to support the
+ LaCie Network Space v2 NAS.
+
endmenu
endif
diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile
index 9f2f67b2b63..d4d7f53b0fb 100644
--- a/arch/arm/mach-kirkwood/Makefile
+++ b/arch/arm/mach-kirkwood/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_MACH_SHEEVAPLUG) += sheevaplug-setup.o
obj-$(CONFIG_MACH_TS219) += ts219-setup.o tsx1x-common.o
obj-$(CONFIG_MACH_TS41X) += ts41x-setup.o tsx1x-common.o
obj-$(CONFIG_MACH_OPENRD_BASE) += openrd_base-setup.o
+obj-$(CONFIG_MACH_NETSPACE_V2) += netspace_v2-setup.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
new file mode 100644
index 00000000000..9a064065beb
--- /dev/null
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -0,0 +1,325 @@
+/*
+ * arch/arm/mach-kirkwood/netspace_v2-setup.c
+ *
+ * LaCie Network Space v2 board setup
+ *
+ * Copyright (C) 2009 Simon Guinot <sguinot@lacie.com>
+ * Copyright (C) 2009 Benoît Canet <benoit.canet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/ata_platform.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/i2c.h>
+#include <linux/i2c/at24.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/leds.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/kirkwood.h>
+#include <plat/time.h>
+#include "common.h"
+#include "mpp.h"
+
+/*****************************************************************************
+ * 512KB SPI Flash on Boot Device (MACRONIX MX25L4005)
+ ****************************************************************************/
+
+static struct mtd_partition netspace_v2_flash_parts[] = {
+ {
+ .name = "u-boot",
+ .size = MTDPART_SIZ_FULL,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+};
+
+static const struct flash_platform_data netspace_v2_flash = {
+ .type = "mx25l4005a",
+ .name = "spi_flash",
+ .parts = netspace_v2_flash_parts,
+ .nr_parts = ARRAY_SIZE(netspace_v2_flash_parts),
+};
+
+static struct spi_board_info __initdata netspace_v2_spi_slave_info[] = {
+ {
+ .modalias = "m25p80",
+ .platform_data = &netspace_v2_flash,
+ .irq = -1,
+ .max_speed_hz = 20000000,
+ .bus_num = 0,
+ .chip_select = 0,
+ },
+};
+
+/*****************************************************************************
+ * Ethernet
+ ****************************************************************************/
+
+static struct mv643xx_eth_platform_data netspace_v2_ge00_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(8),
+};
+
+/*****************************************************************************
+ * I2C devices
+ ****************************************************************************/
+
+static struct at24_platform_data at24c04 = {
+ .byte_len = SZ_4K / 8,
+ .page_size = 16,
+};
+
+/*
+ * i2c addr | chip | description
+ * 0x50 | HT24LC04 | eeprom (512B)
+ */
+
+static struct i2c_board_info __initdata netspace_v2_i2c_info[] = {
+ {
+ I2C_BOARD_INFO("24c04", 0x50),
+ .platform_data = &at24c04,
+ }
+};
+
+/*****************************************************************************
+ * SATA
+ ****************************************************************************/
+
+static struct mv_sata_platform_data netspace_v2_sata_data = {
+ .n_ports = 2,
+};
+
+#define NETSPACE_V2_GPIO_SATA0_POWER 16
+#define NETSPACE_V2_GPIO_SATA1_POWER 17
+
+static void __init netspace_v2_sata_power_init(void)
+{
+ int err;
+
+ err = gpio_request(NETSPACE_V2_GPIO_SATA0_POWER, "SATA0 power");
+ if (err == 0) {
+ err = gpio_direction_output(NETSPACE_V2_GPIO_SATA0_POWER, 1);
+ if (err)
+ gpio_free(NETSPACE_V2_GPIO_SATA0_POWER);
+ }
+ if (err)
+ pr_err("netspace_v2: failed to setup SATA0 power\n");
+}
+
+/*****************************************************************************
+ * GPIO keys
+ ****************************************************************************/
+
+#define NETSPACE_V2_PUSH_BUTTON 32
+
+static struct gpio_keys_button netspace_v2_buttons[] = {
+ [0] = {
+ .code = KEY_POWER,
+ .gpio = NETSPACE_V2_PUSH_BUTTON,
+ .desc = "Power push button",
+ .active_low = 0,
+ },
+};
+
+static struct gpio_keys_platform_data netspace_v2_button_data = {
+ .buttons = netspace_v2_buttons,
+ .nbuttons = ARRAY_SIZE(netspace_v2_buttons),
+};
+
+static struct platform_device netspace_v2_gpio_buttons = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &netspace_v2_button_data,
+ },
+};
+
+/*****************************************************************************
+ * GPIO LEDs
+ ****************************************************************************/
+
+/*
+ * The blue front LED is wired to a CPLD and can blink in relation with the
+ * SATA activity.
+ *
+ * The following array detail the different LED registers and the combination
+ * of their possible values:
+ *
+ * cmd_led | slow_led | /SATA active | LED state
+ * | | |
+ * 1 | 0 | x | off
+ * - | 1 | x | on
+ * 0 | 0 | 1 | on
+ * 0 | 0 | 0 | blink (rate 300ms)
+ */
+
+#define NETSPACE_V2_GPIO_RED_LED 12
+#define NETSPACE_V2_GPIO_BLUE_LED_SLOW 29
+#define NETSPACE_V2_GPIO_BLUE_LED_CMD 30
+
+
+static struct gpio_led netspace_v2_gpio_led_pins[] = {
+ {
+ .name = "ns_v2:red:fail",
+ .gpio = NETSPACE_V2_GPIO_RED_LED,
+ },
+};
+
+static struct gpio_led_platform_data netspace_v2_gpio_leds_data = {
+ .num_leds = ARRAY_SIZE(netspace_v2_gpio_led_pins),
+ .leds = netspace_v2_gpio_led_pins,
+};
+
+static struct platform_device netspace_v2_gpio_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &netspace_v2_gpio_leds_data,
+ },
+};
+
+static void __init netspace_v2_gpio_leds_init(void)
+{
+ platform_device_register(&netspace_v2_gpio_leds);
+
+ /*
+ * Configure the front blue LED to blink in relation with the SATA
+ * activity.
+ */
+ if (gpio_request(NETSPACE_V2_GPIO_BLUE_LED_SLOW,
+ "SATA blue LED slow") != 0)
+ return;
+ if (gpio_direction_output(NETSPACE_V2_GPIO_BLUE_LED_SLOW, 0) != 0)
+ goto err_free_1;
+ if (gpio_request(NETSPACE_V2_GPIO_BLUE_LED_CMD,
+ "SATA blue LED command") != 0)
+ goto err_free_1;
+ if (gpio_direction_output(NETSPACE_V2_GPIO_BLUE_LED_CMD, 0) != 0)
+ goto err_free_2;
+
+ return;
+
+err_free_2:
+ gpio_free(NETSPACE_V2_GPIO_BLUE_LED_CMD);
+err_free_1:
+ gpio_free(NETSPACE_V2_GPIO_BLUE_LED_SLOW);
+ pr_err("netspace_v2: failed to configure SATA blue LED\n");
+}
+
+/*****************************************************************************
+ * Timer
+ ****************************************************************************/
+
+static void netspace_v2_timer_init(void)
+{
+ kirkwood_tclk = 166666667;
+ orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
+}
+
+struct sys_timer netspace_v2_timer = {
+ .init = netspace_v2_timer_init,
+};
+
+/*****************************************************************************
+ * General Setup
+ ****************************************************************************/
+
+static unsigned int netspace_v2_mpp_config[] __initdata = {
+ MPP0_SPI_SCn,
+ MPP1_SPI_MOSI,
+ MPP2_SPI_SCK,
+ MPP3_SPI_MISO,
+ MPP4_NF_IO6,
+ MPP5_NF_IO7,
+ MPP6_SYSRST_OUTn,
+ MPP8_TW_SDA,
+ MPP9_TW_SCK,
+ MPP10_UART0_TXD,
+ MPP11_UART0_RXD,
+ MPP12_GPO, /* Red led */
+ MPP14_GPIO, /* USB fuse */
+ MPP16_GPIO, /* SATA 0 power */
+ MPP18_NF_IO0,
+ MPP19_NF_IO1,
+ MPP20_SATA1_ACTn,
+ MPP21_SATA0_ACTn,
+ MPP24_GPIO, /* USB mode select */
+ MPP25_GPIO, /* Fan rotation fail */
+ MPP26_GPIO, /* USB device vbus */
+ MPP28_GPIO, /* USB enable host vbus */
+ MPP29_GPIO, /* Blue led (slow register) */
+ MPP30_GPIO, /* Blue led (command register) */
+ MPP31_GPIO, /* Board power off */
+ MPP32_GPIO, /* Power button (0 = Released, 1 = Pushed) */
+ 0
+};
+
+#define NETSPACE_V2_GPIO_POWER_OFF 31
+
+static void netspace_v2_power_off(void)
+{
+ gpio_set_value(NETSPACE_V2_GPIO_POWER_OFF, 1);
+}
+
+static void __init netspace_v2_init(void)
+{
+ /*
+ * Basic setup. Needs to be called early.
+ */
+ kirkwood_init();
+ kirkwood_mpp_conf(netspace_v2_mpp_config);
+
+ netspace_v2_sata_power_init();
+
+ kirkwood_ehci_init();
+ kirkwood_ge00_init(&netspace_v2_ge00_data);
+ kirkwood_sata_init(&netspace_v2_sata_data);
+ kirkwood_uart0_init();
+ spi_register_board_info(netspace_v2_spi_slave_info,
+ ARRAY_SIZE(netspace_v2_spi_slave_info));
+ kirkwood_spi_init();
+ kirkwood_i2c_init();
+ i2c_register_board_info(0, netspace_v2_i2c_info,
+ ARRAY_SIZE(netspace_v2_i2c_info));
+
+ netspace_v2_gpio_leds_init();
+ platform_device_register(&netspace_v2_gpio_buttons);
+
+ if (gpio_request(NETSPACE_V2_GPIO_POWER_OFF, "power-off") == 0 &&
+ gpio_direction_output(NETSPACE_V2_GPIO_POWER_OFF, 0) == 0)
+ pm_power_off = netspace_v2_power_off;
+ else
+ pr_err("netspace_v2: failed to configure power-off GPIO\n");
+}
+
+MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
+ .phys_io = KIRKWOOD_REGS_PHYS_BASE,
+ .io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x00000100,
+ .init_machine = netspace_v2_init,
+ .map_io = kirkwood_map_io,
+ .init_irq = kirkwood_init_irq,
+ .timer = &netspace_v2_timer,
+MACHINE_END
diff --git a/arch/arm/mach-lh7a40x/clocks.c b/arch/arm/mach-lh7a40x/clocks.c
index 6182f5410b4..fcaf876f19b 100644
--- a/arch/arm/mach-lh7a40x/clocks.c
+++ b/arch/arm/mach-lh7a40x/clocks.c
@@ -7,8 +7,6 @@
* version 2 as published by the Free Software Foundation.
*
*/
-
-#include <linux/cpufreq.h>
#include <mach/hardware.h>
#include <mach/clocks.h>
#include <linux/err.h>
@@ -31,12 +29,6 @@ struct clk {
#define HCLKDIV(c) (((c) >> 0) & 0x02)
#define PCLKDIV(c) (((c) >> 16) & 0x03)
-unsigned int cpufreq_get (unsigned int cpu) /* in kHz */
-{
- return fclkfreq_get ()/1000;
-}
-EXPORT_SYMBOL(cpufreq_get);
-
unsigned int fclkfreq_get (void)
{
unsigned int clkset = CSC_CLKSET;
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index d140abca690..f780086befd 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -3,6 +3,30 @@ if ARCH_MSM
comment "MSM Board Type"
depends on ARCH_MSM
+config MSM_DEBUG_UART
+ int
+ default 1 if MSM_DEBUG_UART1
+ default 2 if MSM_DEBUG_UART2
+ default 3 if MSM_DEBUG_UART3
+
+choice
+ prompt "Debug UART"
+
+ default MSM_DEBUG_UART_NONE
+
+ config MSM_DEBUG_UART_NONE
+ bool "None"
+
+ config MSM_DEBUG_UART1
+ bool "UART1"
+
+ config MSM_DEBUG_UART2
+ bool "UART2"
+
+ config MSM_DEBUG_UART3
+ bool "UART3"
+endchoice
+
config MACH_HALIBUT
depends on ARCH_MSM
default y
@@ -10,4 +34,10 @@ config MACH_HALIBUT
help
Support for the Qualcomm SURF7201A eval board.
+config MACH_TROUT
+ default y
+ bool "HTC Dream (aka trout)"
+ help
+ Support for the HTC Dream, T-Mobile G1, Android ADP1 devices.
+
endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 1aa47001aa3..91e6f5c95dc 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -6,3 +6,4 @@ obj-y += clock.o clock-7x01a.o
obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o
+obj-$(CONFIG_MACH_TROUT) += board-dream.o
diff --git a/arch/arm/mach-msm/board-dream.c b/arch/arm/mach-msm/board-dream.c
new file mode 100644
index 00000000000..21afa851316
--- /dev/null
+++ b/arch/arm/mach-msm/board-dream.c
@@ -0,0 +1,93 @@
+/* linux/arch/arm/mach-msm/board-dream.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/setup.h>
+
+#include <mach/board.h>
+#include <mach/hardware.h>
+#include <mach/msm_iomap.h>
+
+#include "devices.h"
+#include "board-dream.h"
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_uart3,
+ &msm_device_smd,
+ &msm_device_nand,
+ &msm_device_hsusb,
+ &msm_device_i2c,
+};
+
+extern struct sys_timer msm_timer;
+
+static void __init trout_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static void __init trout_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ mi->nr_banks = 1;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ mi->bank[0].size = (101*1024*1024);
+}
+
+static void __init trout_init(void)
+{
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+static struct map_desc trout_io_desc[] __initdata = {
+ {
+ .virtual = TROUT_CPLD_BASE,
+ .pfn = __phys_to_pfn(TROUT_CPLD_START),
+ .length = TROUT_CPLD_SIZE,
+ .type = MT_DEVICE_NONSHARED
+ }
+};
+
+static void __init trout_map_io(void)
+{
+ msm_map_common_io();
+ iotable_init(trout_io_desc, ARRAY_SIZE(trout_io_desc));
+
+#ifdef CONFIG_MSM_DEBUG_UART3
+ /* route UART3 to the "H2W" extended usb connector */
+ writeb(0x80, TROUT_CPLD_BASE + 0x00);
+#endif
+
+ msm_clock_init();
+}
+
+MACHINE_START(TROUT, "HTC Dream")
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+ .boot_params = 0x10000100,
+ .fixup = trout_fixup,
+ .map_io = trout_map_io,
+ .init_irq = trout_init_irq,
+ .init_machine = trout_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-dream.h b/arch/arm/mach-msm/board-dream.h
new file mode 100644
index 00000000000..4f345a5a0a6
--- /dev/null
+++ b/arch/arm/mach-msm/board-dream.h
@@ -0,0 +1,5 @@
+
+#define TROUT_CPLD_BASE 0xE8100000
+#define TROUT_CPLD_START 0x98000000
+#define TROUT_CPLD_SIZE SZ_4K
+
diff --git a/arch/arm/mach-msm/include/mach/debug-macro.S b/arch/arm/mach-msm/include/mach/debug-macro.S
index 1db3c97dbc4..d48747ebcd3 100644
--- a/arch/arm/mach-msm/include/mach/debug-macro.S
+++ b/arch/arm/mach-msm/include/mach/debug-macro.S
@@ -14,15 +14,18 @@
*
*/
+
+
#include <mach/hardware.h>
#include <mach/msm_iomap.h>
+#ifdef CONFIG_MSM_DEBUG_UART
.macro addruart,rx
@ see if the MMU is enabled and select appropriate base address
mrc p15, 0, \rx, c1, c0
tst \rx, #1
- ldreq \rx, =MSM_UART1_PHYS
- movne \rx, #0
+ ldreq \rx, =MSM_DEBUG_UART_PHYS
+ ldrne \rx, =MSM_DEBUG_UART_BASE
.endm
.macro senduart,rd,rx
@@ -32,13 +35,20 @@
.macro waituart,rd,rx
@ wait for TX_READY
- teq \rx, #0
- bne 2f
-1: ldr \rd, [\rx, #0x08]
+1001: ldr \rd, [\rx, #0x08]
tst \rd, #0x04
- beq 1b
-2:
+ beq 1001b
+ .endm
+#else
+ .macro addruart,rx
+ .endm
+
+ .macro senduart,rd,rx
+ .endm
+
+ .macro waituart,rd,rx
.endm
+#endif
.macro busyuart,rd,rx
.endm
diff --git a/arch/arm/mach-msm/include/mach/mmc.h b/arch/arm/mach-msm/include/mach/mmc.h
new file mode 100644
index 00000000000..0ecf2542628
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/mmc.h
@@ -0,0 +1,26 @@
+/*
+ * arch/arm/include/asm/mach/mmc.h
+ */
+#ifndef ASMARM_MACH_MMC_H
+#define ASMARM_MACH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+ struct sdio_cis cis;
+ struct sdio_cccr cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+};
+
+struct mmc_platform_data {
+ unsigned int ocr_mask; /* available voltages */
+ u32 (*translate_vdd)(struct device *, unsigned int);
+ unsigned int (*status)(struct device *);
+ struct embedded_sdio_data *embedded_sdio;
+ int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+};
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 2f7b4c8620d..9dae1a98c77 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -84,6 +84,18 @@
#define MSM_UART3_PHYS 0xA9C00000
#define MSM_UART3_SIZE SZ_4K
+#ifdef CONFIG_MSM_DEBUG_UART
+#define MSM_DEBUG_UART_BASE 0xE1000000
+#if CONFIG_MSM_DEBUG_UART == 1
+#define MSM_DEBUG_UART_PHYS MSM_UART1_PHYS
+#elif CONFIG_MSM_DEBUG_UART == 2
+#define MSM_DEBUG_UART_PHYS MSM_UART2_PHYS
+#elif CONFIG_MSM_DEBUG_UART == 3
+#define MSM_DEBUG_UART_PHYS MSM_UART3_PHYS
+#endif
+#define MSM_DEBUG_UART_SIZE SZ_4K
+#endif
+
#define MSM_SDC1_PHYS 0xA0400000
#define MSM_SDC1_SIZE SZ_4K
diff --git a/arch/arm/mach-msm/include/mach/uncompress.h b/arch/arm/mach-msm/include/mach/uncompress.h
index 026e8955ace..d94292c29d8 100644
--- a/arch/arm/mach-msm/include/mach/uncompress.h
+++ b/arch/arm/mach-msm/include/mach/uncompress.h
@@ -16,9 +16,16 @@
#ifndef __ASM_ARCH_MSM_UNCOMPRESS_H
#include "hardware.h"
+#include "linux/io.h"
+#include "mach/msm_iomap.h"
static void putc(int c)
{
+#if defined(MSM_DEBUG_UART_PHYS)
+ unsigned base = MSM_DEBUG_UART_PHYS;
+ while (!(readl(base + 0x08) & 0x04)) ;
+ writel(c, base + 0x0c);
+#endif
}
static inline void flush(void)
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 6e7692ff6f2..1c5e7dac086 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -42,6 +42,9 @@ static struct map_desc msm_io_desc[] __initdata = {
MSM_DEVICE(GPIO1),
MSM_DEVICE(GPIO2),
MSM_DEVICE(CLK_CTL),
+#ifdef CONFIG_MSM_DEBUG_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
{
.virtual = (unsigned long) MSM_SHARED_RAM_BASE,
.pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 116394484e7..9438bf6613a 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -18,6 +18,7 @@
#include <linux/gpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <asm/sizes.h>
@@ -149,7 +150,7 @@ static struct mtd_partition nhk8815_onenand_partitions[] = {
}
};
-static struct flash_platform_data nhk8815_onenand_data = {
+static struct onenand_platform_data nhk8815_onenand_data = {
.parts = nhk8815_onenand_partitions,
.nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions),
};
@@ -163,7 +164,7 @@ static struct resource nhk8815_onenand_resource[] = {
};
static struct platform_device nhk8815_onenand_device = {
- .name = "onenand",
+ .name = "onenand-flash",
.id = -1,
.dev = {
.platform_data = &nhk8815_onenand_data,
@@ -174,10 +175,10 @@ static struct platform_device nhk8815_onenand_device = {
static void __init nhk8815_onenand_init(void)
{
-#ifdef CONFIG_ONENAND
+#ifdef CONFIG_MTD_ONENAND
/* Set up SMCS0 for OneNand */
- writel(0x000030db, FSMC_BCR0);
- writel(0x02100551, FSMC_BTR0);
+ writel(0x000030db, FSMC_BCR(0));
+ writel(0x02100551, FSMC_BTR(0));
#endif
}
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index feb0e54a91d..038f24d4702 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
BUG_ON(desc->status & IRQ_INPROGRESS);
@@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
goto out_mask;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
@@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
* Maybe this function should go to kernel/irq/chip.c? */
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (desc->status & IRQ_DISABLED)
@@ -97,7 +97,7 @@ out_mask:
/* ack unconditionally to unmask lower prio irqs */
desc->chip->ack(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
#define handle_irq handle_prio_irq
#endif
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 87e539aa8ad..9ce17f13d3f 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,7 +3,8 @@
#
# Common support
-obj-y := io.o id.o sram.o clock.o irq.o mux.o serial.o devices.o
+obj-y := io.o id.o sram.o irq.o mux.o serial.o devices.o
+obj-y += clock.o clock_data.o opp_data.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
@@ -17,6 +18,9 @@ obj-$(CONFIG_PM) += pm.o sleep.o
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o
mailbox_mach-objs := mailbox.o
+i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
+obj-y += $(i2c-omap-m) $(i2c-omap-y)
+
led-y := leds.o
# Specific board support
@@ -48,3 +52,7 @@ led-$(CONFIG_MACH_OMAP_INNOVATOR) += leds-innovator.o
led-$(CONFIG_MACH_OMAP_PERSEUS2) += leds-h2p2-debug.o
led-$(CONFIG_MACH_OMAP_OSK) += leds-osk.o
obj-$(CONFIG_LEDS) += $(led-y)
+
+ifneq ($(CONFIG_FB_OMAP),)
+obj-y += lcd_dma.o
+endif
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index f4b72c1654f..7e70c3c08da 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -19,6 +19,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/input.h>
+#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -30,7 +31,6 @@
#include <mach/gpio.h>
#include <plat/mux.h>
#include <plat/fpga.h>
-#include <plat/nand.h>
#include <plat/keypad.h>
#include <plat/common.h>
#include <plat/board.h>
@@ -100,6 +100,12 @@ static int fsample_keymap[] = {
0
};
+static struct smc91x_platdata smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
static struct resource smc91x_resources[] = {
[0] = {
.start = H2P2_DBG_FPGA_ETHR_START, /* Physical */
@@ -167,8 +173,40 @@ static struct platform_device nor_device = {
.resource = &nor_resource,
};
-static struct omap_nand_platform_data nand_data = {
- .options = NAND_SAMSUNG_LP_OPTIONS,
+static void nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long mask;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ mask = (ctrl & NAND_CLE) ? 0x02 : 0;
+ if (ctrl & NAND_ALE)
+ mask |= 0x04;
+ writeb(cmd, (unsigned long)this->IO_ADDR_W | mask);
+}
+
+#define FSAMPLE_NAND_RB_GPIO_PIN 62
+
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN);
+}
+
+static const char *part_probes[] = { "cmdlinepart", NULL };
+
+static struct platform_nand_data nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .options = NAND_SAMSUNG_LP_OPTIONS,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .cmd_ctrl = nand_cmd_ctl,
+ .dev_ready = nand_dev_ready,
+ },
};
static struct resource nand_resource = {
@@ -178,7 +216,7 @@ static struct resource nand_resource = {
};
static struct platform_device nand_device = {
- .name = "omapnand",
+ .name = "gen_nand",
.id = 0,
.dev = {
.platform_data = &nand_data,
@@ -190,6 +228,9 @@ static struct platform_device nand_device = {
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
+ .dev = {
+ .platform_data = &smc91x_info,
+ },
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
@@ -233,13 +274,6 @@ static struct platform_device *devices[] __initdata = {
&lcd_device,
};
-#define P2_NAND_RB_GPIO_PIN 62
-
-static int nand_dev_ready(struct omap_nand_platform_data *data)
-{
- return gpio_get_value(P2_NAND_RB_GPIO_PIN);
-}
-
static struct omap_lcd_config fsample_lcd_config __initdata = {
.ctrl_name = "internal",
};
@@ -250,9 +284,9 @@ static struct omap_board_config_kernel fsample_config[] = {
static void __init omap_fsample_init(void)
{
- if (gpio_request(P2_NAND_RB_GPIO_PIN, "NAND ready") < 0)
+ if (gpio_request(FSAMPLE_NAND_RB_GPIO_PIN, "NAND ready") < 0)
BUG();
- nand_data.dev_ready = nand_dev_ready;
+ gpio_direction_input(FSAMPLE_NAND_RB_GPIO_PIN);
omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 89ba8ec4bbf..fa7cecea19f 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -28,6 +28,7 @@
#include <linux/mtd/partitions.h>
#include <linux/input.h>
#include <linux/i2c/tps65010.h>
+#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/gpio.h>
@@ -40,7 +41,6 @@
#include <plat/mux.h>
#include <plat/dma.h>
#include <plat/tc.h>
-#include <plat/nand.h>
#include <plat/irda.h>
#include <plat/usb.h>
#include <plat/keypad.h>
@@ -179,11 +179,43 @@ static struct mtd_partition h2_nand_partitions[] = {
},
};
-/* dip switches control NAND chip access: 8 bit, 16 bit, or neither */
-static struct omap_nand_platform_data h2_nand_data = {
- .options = NAND_SAMSUNG_LP_OPTIONS,
- .parts = h2_nand_partitions,
- .nr_parts = ARRAY_SIZE(h2_nand_partitions),
+static void h2_nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long mask;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ mask = (ctrl & NAND_CLE) ? 0x02 : 0;
+ if (ctrl & NAND_ALE)
+ mask |= 0x04;
+ writeb(cmd, (unsigned long)this->IO_ADDR_W | mask);
+}
+
+#define H2_NAND_RB_GPIO_PIN 62
+
+static int h2_nand_dev_ready(struct mtd_info *mtd)
+{
+ return gpio_get_value(H2_NAND_RB_GPIO_PIN);
+}
+
+static const char *h2_part_probes[] = { "cmdlinepart", NULL };
+
+struct platform_nand_data h2_nand_platdata = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .nr_partitions = ARRAY_SIZE(h2_nand_partitions),
+ .partitions = h2_nand_partitions,
+ .options = NAND_SAMSUNG_LP_OPTIONS,
+ .part_probe_types = h2_part_probes,
+ },
+ .ctrl = {
+ .cmd_ctrl = h2_nand_cmd_ctl,
+ .dev_ready = h2_nand_dev_ready,
+
+ },
};
static struct resource h2_nand_resource = {
@@ -191,15 +223,21 @@ static struct resource h2_nand_resource = {
};
static struct platform_device h2_nand_device = {
- .name = "omapnand",
+ .name = "gen_nand",
.id = 0,
.dev = {
- .platform_data = &h2_nand_data,
+ .platform_data = &h2_nand_platdata,
},
.num_resources = 1,
.resource = &h2_nand_resource,
};
+static struct smc91x_platdata h2_smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
static struct resource h2_smc91x_resources[] = {
[0] = {
.start = OMAP1610_ETHR_START, /* Physical */
@@ -216,6 +254,9 @@ static struct resource h2_smc91x_resources[] = {
static struct platform_device h2_smc91x_device = {
.name = "smc91x",
.id = 0,
+ .dev = {
+ .platform_data = &h2_smc91x_info,
+ },
.num_resources = ARRAY_SIZE(h2_smc91x_resources),
.resource = h2_smc91x_resources,
};
@@ -368,8 +409,6 @@ static struct omap_board_config_kernel h2_config[] __initdata = {
{ OMAP_TAG_LCD, &h2_lcd_config },
};
-#define H2_NAND_RB_GPIO_PIN 62
-
static void __init h2_init(void)
{
/* Here we assume the NOR boot config: NOR on CS3 (possibly swapped
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f5cc0a73052..6a7f9c391cf 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -28,6 +28,7 @@
#include <linux/input.h>
#include <linux/spi/spi.h>
#include <linux/i2c/tps65010.h>
+#include <linux/smc91x.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -42,7 +43,6 @@
#include <mach/irqs.h>
#include <plat/mux.h>
#include <plat/tc.h>
-#include <plat/nand.h>
#include <plat/usb.h>
#include <plat/keypad.h>
#include <plat/dma.h>
@@ -181,11 +181,43 @@ static struct mtd_partition nand_partitions[] = {
},
};
-/* dip switches control NAND chip access: 8 bit, 16 bit, or neither */
-static struct omap_nand_platform_data nand_data = {
- .options = NAND_SAMSUNG_LP_OPTIONS,
- .parts = nand_partitions,
- .nr_parts = ARRAY_SIZE(nand_partitions),
+static void nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long mask;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ mask = (ctrl & NAND_CLE) ? 0x02 : 0;
+ if (ctrl & NAND_ALE)
+ mask |= 0x04;
+ writeb(cmd, (unsigned long)this->IO_ADDR_W | mask);
+}
+
+#define H3_NAND_RB_GPIO_PIN 10
+
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ return gpio_get_value(H3_NAND_RB_GPIO_PIN);
+}
+
+static const char *part_probes[] = { "cmdlinepart", NULL };
+
+struct platform_nand_data nand_platdata = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .nr_partitions = ARRAY_SIZE(nand_partitions),
+ .partitions = nand_partitions,
+ .options = NAND_SAMSUNG_LP_OPTIONS,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .cmd_ctrl = nand_cmd_ctl,
+ .dev_ready = nand_dev_ready,
+
+ },
};
static struct resource nand_resource = {
@@ -193,15 +225,21 @@ static struct resource nand_resource = {
};
static struct platform_device nand_device = {
- .name = "omapnand",
+ .name = "gen_nand",
.id = 0,
.dev = {
- .platform_data = &nand_data,
+ .platform_data = &nand_platdata,
},
.num_resources = 1,
.resource = &nand_resource,
};
+static struct smc91x_platdata smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
static struct resource smc91x_resources[] = {
[0] = {
.start = OMAP1710_ETHR_START, /* Physical */
@@ -218,6 +256,9 @@ static struct resource smc91x_resources[] = {
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
+ .dev = {
+ .platform_data = &smc91x_info,
+ },
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
@@ -332,13 +373,6 @@ static struct i2c_board_info __initdata h3_i2c_board_info[] = {
},
};
-#define H3_NAND_RB_GPIO_PIN 10
-
-static int nand_dev_ready(struct omap_nand_platform_data *data)
-{
- return gpio_get_value(H3_NAND_RB_GPIO_PIN);
-}
-
static void __init h3_init(void)
{
/* Here we assume the NOR boot config: NOR on CS3 (possibly swapped
@@ -356,7 +390,7 @@ static void __init h3_init(void)
nand_resource.end += SZ_4K - 1;
if (gpio_request(H3_NAND_RB_GPIO_PIN, "NAND ready") < 0)
BUG();
- nand_data.dev_ready = nand_dev_ready;
+ gpio_direction_input(H3_NAND_RB_GPIO_PIN);
/* GPIO10 Func_MUX_CTRL reg bit 29:27, Configure V2 to mode1 as GPIO */
/* GPIO10 pullup/down register, Enable pullup on GPIO10 */
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 5f28a5ceaca..e36639f6615 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -39,6 +39,7 @@
#include <plat/common.h>
#include <plat/board.h>
#include <plat/keypad.h>
+#include <plat/usb.h>
#include <mach/irqs.h>
@@ -140,6 +141,15 @@ static struct platform_device kp_device = {
.resource = kp_resources,
};
+/* USB Device */
+static struct omap_usb_config htcherald_usb_config __initdata = {
+ .otg = 0,
+ .register_host = 0,
+ .register_dev = 1,
+ .hmc_mode = 4,
+ .pins[0] = 2,
+};
+
/* LCD Device resources */
static struct platform_device lcd_device = {
.name = "lcd_htcherald",
@@ -214,6 +224,57 @@ static void __init htcherald_disable_watchdog(void)
}
}
+#define HTCHERALD_GPIO_USB_EN1 33
+#define HTCHERALD_GPIO_USB_EN2 73
+#define HTCHERALD_GPIO_USB_DM 35
+#define HTCHERALD_GPIO_USB_DP 36
+
+static void __init htcherald_usb_enable(void)
+{
+ unsigned int tries = 20;
+ unsigned int value = 0;
+
+ /* Request the GPIOs we need to control here */
+ if (gpio_request(HTCHERALD_GPIO_USB_EN1, "herald_usb") < 0)
+ goto err1;
+
+ if (gpio_request(HTCHERALD_GPIO_USB_EN2, "herald_usb") < 0)
+ goto err2;
+
+ if (gpio_request(HTCHERALD_GPIO_USB_DM, "herald_usb") < 0)
+ goto err3;
+
+ if (gpio_request(HTCHERALD_GPIO_USB_DP, "herald_usb") < 0)
+ goto err4;
+
+ /* force USB_EN GPIO to 0 */
+ do {
+ /* output low */
+ gpio_direction_output(HTCHERALD_GPIO_USB_EN1, 0);
+ } while ((value = gpio_get_value(HTCHERALD_GPIO_USB_EN1)) == 1 &&
+ --tries);
+
+ if (value == 1)
+ printk(KERN_WARNING "Unable to reset USB, trying to continue\n");
+
+ gpio_direction_output(HTCHERALD_GPIO_USB_EN2, 0); /* output low */
+ gpio_direction_input(HTCHERALD_GPIO_USB_DM); /* input */
+ gpio_direction_input(HTCHERALD_GPIO_USB_DP); /* input */
+
+ goto done;
+
+err4:
+ gpio_free(HTCHERALD_GPIO_USB_DM);
+err3:
+ gpio_free(HTCHERALD_GPIO_USB_EN2);
+err2:
+ gpio_free(HTCHERALD_GPIO_USB_EN1);
+err1:
+ printk(KERN_ERR "Unabled to request GPIO for USB\n");
+done:
+ printk(KERN_INFO "USB setup complete.\n");
+}
+
static void __init htcherald_init(void)
{
printk(KERN_INFO "HTC Herald init.\n");
@@ -225,6 +286,9 @@ static void __init htcherald_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
htcherald_disable_watchdog();
+
+ htcherald_usb_enable();
+ omap_usb_init(&htcherald_usb_config);
}
static void __init htcherald_init_irq(void)
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index cf0fdb9c182..2133b006f6a 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -23,6 +23,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/input.h>
+#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -142,6 +143,11 @@ static struct platform_device innovator_kp_device = {
.resource = innovator_kp_resources,
};
+static struct smc91x_platdata innovator_smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
#ifdef CONFIG_ARCH_OMAP15XX
@@ -175,6 +181,9 @@ static struct resource innovator1510_smc91x_resources[] = {
static struct platform_device innovator1510_smc91x_device = {
.name = "smc91x",
.id = 0,
+ .dev = {
+ .platform_data = &innovator_smc91x_info,
+ },
.num_resources = ARRAY_SIZE(innovator1510_smc91x_resources),
.resource = innovator1510_smc91x_resources,
};
@@ -241,6 +250,9 @@ static struct resource innovator1610_smc91x_resources[] = {
static struct platform_device innovator1610_smc91x_device = {
.name = "smc91x",
.id = 0,
+ .dev = {
+ .platform_data = &innovator_smc91x_info,
+ },
.num_resources = ARRAY_SIZE(innovator1610_smc91x_resources),
.resource = innovator1610_smc91x_resources,
};
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 5a275bab2df..71e1a3fad0e 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/clk.h>
+#include <linux/omapfb.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -32,7 +33,6 @@
#include <plat/keypad.h>
#include <plat/common.h>
#include <plat/dsp_common.h>
-#include <plat/omapfb.h>
#include <plat/hwa742.h>
#include <plat/lcd_mipid.h>
#include <plat/mmc.h>
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 50c92c13e48..ccea4f448e9 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -33,6 +33,7 @@
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/leds.h>
+#include <linux/smc91x.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -115,6 +116,12 @@ static struct platform_device osk5912_flash_device = {
.resource = &osk_flash_resource,
};
+static struct smc91x_platdata osk5912_smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
static struct resource osk5912_smc91x_resources[] = {
[0] = {
.start = OMAP_OSK_ETHR_START, /* Physical */
@@ -131,6 +138,9 @@ static struct resource osk5912_smc91x_resources[] = {
static struct platform_device osk5912_smc91x_device = {
.name = "smc91x",
.id = -1,
+ .dev = {
+ .platform_data = &osk5912_smc91x_info,
+ },
.num_resources = ARRAY_SIZE(osk5912_smc91x_resources),
.resource = osk5912_smc91x_resources,
};
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index ca7df1e93ef..1387a4f15da 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -19,6 +19,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/input.h>
+#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -30,7 +31,6 @@
#include <mach/gpio.h>
#include <plat/mux.h>
#include <plat/fpga.h>
-#include <plat/nand.h>
#include <plat/keypad.h>
#include <plat/common.h>
#include <plat/board.h>
@@ -67,6 +67,12 @@ static int p2_keymap[] = {
0
};
+static struct smc91x_platdata smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
static struct resource smc91x_resources[] = {
[0] = {
.start = H2P2_DBG_FPGA_ETHR_START, /* Physical */
@@ -134,8 +140,40 @@ static struct platform_device nor_device = {
.resource = &nor_resource,
};
-static struct omap_nand_platform_data nand_data = {
- .options = NAND_SAMSUNG_LP_OPTIONS,
+static void nand_cmd_ctl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long mask;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ mask = (ctrl & NAND_CLE) ? 0x02 : 0;
+ if (ctrl & NAND_ALE)
+ mask |= 0x04;
+ writeb(cmd, (unsigned long)this->IO_ADDR_W | mask);
+}
+
+#define P2_NAND_RB_GPIO_PIN 62
+
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ return gpio_get_value(P2_NAND_RB_GPIO_PIN);
+}
+
+static const char *part_probes[] = { "cmdlinepart", NULL };
+
+static struct platform_nand_data nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .options = NAND_SAMSUNG_LP_OPTIONS,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .cmd_ctrl = nand_cmd_ctl,
+ .dev_ready = nand_dev_ready,
+ },
};
static struct resource nand_resource = {
@@ -145,7 +183,7 @@ static struct resource nand_resource = {
};
static struct platform_device nand_device = {
- .name = "omapnand",
+ .name = "gen_nand",
.id = 0,
.dev = {
.platform_data = &nand_data,
@@ -157,6 +195,9 @@ static struct platform_device nand_device = {
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
+ .dev = {
+ .platform_data = &smc91x_info,
+ },
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
@@ -201,13 +242,6 @@ static struct platform_device *devices[] __initdata = {
&lcd_device,
};
-#define P2_NAND_RB_GPIO_PIN 62
-
-static int nand_dev_ready(struct omap_nand_platform_data *data)
-{
- return gpio_get_value(P2_NAND_RB_GPIO_PIN);
-}
-
static struct omap_lcd_config perseus2_lcd_config __initdata = {
.ctrl_name = "internal",
};
@@ -220,7 +254,7 @@ static void __init omap_perseus2_init(void)
{
if (gpio_request(P2_NAND_RB_GPIO_PIN, "NAND ready") < 0)
BUG();
- nand_data.dev_ready = nand_dev_ready;
+ gpio_direction_input(P2_NAND_RB_GPIO_PIN);
omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 35c75c1bd0a..16918353799 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -22,6 +22,7 @@
#include <linux/reboot.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
+#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -106,6 +107,12 @@ static struct platform_device voiceblue_flash_device = {
.resource = &voiceblue_flash_resource,
};
+static struct smc91x_platdata voiceblue_smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
static struct resource voiceblue_smc91x_resources[] = {
[0] = {
.start = OMAP_CS2_PHYS + 0x300,
@@ -122,6 +129,9 @@ static struct resource voiceblue_smc91x_resources[] = {
static struct platform_device voiceblue_smc91x_device = {
.name = "smc91x",
.id = 0,
+ .dev = {
+ .platform_data = &voiceblue_smc91x_info,
+ },
.num_resources = ARRAY_SIZE(voiceblue_smc91x_resources),
.resource = voiceblue_smc91x_resources,
};
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 42cbe203da3..2ba9ab95373 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -1,7 +1,7 @@
/*
* linux/arch/arm/mach-omap1/clock.c
*
- * Copyright (C) 2004 - 2005 Nokia corporation
+ * Copyright (C) 2004 - 2005, 2009 Nokia corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
*
* Modified to use omap shared clock framework by
@@ -26,12 +26,17 @@
#include <plat/usb.h>
#include <plat/clock.h>
#include <plat/sram.h>
-
-static const struct clkops clkops_generic;
-static const struct clkops clkops_uart;
-static const struct clkops clkops_dspck;
+#include <plat/clkdev_omap.h>
#include "clock.h"
+#include "opp.h"
+
+__u32 arm_idlect1_mask;
+struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
+
+/*-------------------------------------------------------------------------
+ * Omap1 specific clock functions
+ *-------------------------------------------------------------------------*/
static int clk_omap1_dummy_enable(struct clk *clk)
{
@@ -42,134 +47,24 @@ static void clk_omap1_dummy_disable(struct clk *clk)
{
}
-static const struct clkops clkops_dummy = {
- .enable = clk_omap1_dummy_enable,
- .disable = clk_omap1_dummy_disable,
-};
-
-static struct clk dummy_ck = {
- .name = "dummy",
- .ops = &clkops_dummy,
- .flags = RATE_FIXED,
-};
-
-struct omap_clk {
- u32 cpu;
- struct clk_lookup lk;
+const struct clkops clkops_dummy = {
+ .enable = clk_omap1_dummy_enable,
+ .disable = clk_omap1_dummy_disable,
};
-#define CLK(dev, con, ck, cp) \
- { \
- .cpu = cp, \
- .lk = { \
- .dev_id = dev, \
- .con_id = con, \
- .clk = ck, \
- }, \
- }
-
-#define CK_310 (1 << 0)
-#define CK_7XX (1 << 1)
-#define CK_1510 (1 << 2)
-#define CK_16XX (1 << 3)
-
-static struct omap_clk omap_clks[] = {
- /* non-ULPD clocks */
- CLK(NULL, "ck_ref", &ck_ref, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK(NULL, "ck_dpll1", &ck_dpll1, CK_16XX | CK_1510 | CK_310),
- /* CK_GEN1 clocks */
- CLK(NULL, "ck_dpll1out", &ck_dpll1out.clk, CK_16XX),
- CLK(NULL, "ck_sossi", &sossi_ck, CK_16XX),
- CLK(NULL, "arm_ck", &arm_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "armper_ck", &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "arm_gpio_ck", &arm_gpio_ck, CK_1510 | CK_310),
- CLK(NULL, "armxor_ck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK(NULL, "armtim_ck", &armtim_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap_wdt", "fck", &armwdt_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap_wdt", "ick", &armper_ck.clk, CK_16XX),
- CLK("omap_wdt", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK(NULL, "arminth_ck", &arminth_ck1510, CK_1510 | CK_310),
- CLK(NULL, "arminth_ck", &arminth_ck16xx, CK_16XX),
- /* CK_GEN2 clocks */
- CLK(NULL, "dsp_ck", &dsp_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dspmmu_ck", &dspmmu_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dspper_ck", &dspper_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dspxor_ck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dsptim_ck", &dsptim_ck, CK_16XX | CK_1510 | CK_310),
- /* CK_GEN3 clocks */
- CLK(NULL, "tc_ck", &tc_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK(NULL, "tipb_ck", &tipb_ck, CK_1510 | CK_310),
- CLK(NULL, "l3_ocpi_ck", &l3_ocpi_ck, CK_16XX | CK_7XX),
- CLK(NULL, "tc1_ck", &tc1_ck, CK_16XX),
- CLK(NULL, "tc2_ck", &tc2_ck, CK_16XX),
- CLK(NULL, "dma_ck", &dma_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "dma_lcdfree_ck", &dma_lcdfree_ck, CK_16XX),
- CLK(NULL, "api_ck", &api_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "lb_ck", &lb_ck.clk, CK_1510 | CK_310),
- CLK(NULL, "rhea1_ck", &rhea1_ck, CK_16XX),
- CLK(NULL, "rhea2_ck", &rhea2_ck, CK_16XX),
- CLK(NULL, "lcd_ck", &lcd_ck_16xx, CK_16XX | CK_7XX),
- CLK(NULL, "lcd_ck", &lcd_ck_1510.clk, CK_1510 | CK_310),
- /* ULPD clocks */
- CLK(NULL, "uart1_ck", &uart1_1510, CK_1510 | CK_310),
- CLK(NULL, "uart1_ck", &uart1_16xx.clk, CK_16XX),
- CLK(NULL, "uart2_ck", &uart2_ck, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "uart3_ck", &uart3_1510, CK_1510 | CK_310),
- CLK(NULL, "uart3_ck", &uart3_16xx.clk, CK_16XX),
- CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
- CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
- CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
- CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX),
- CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
- CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
- CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
- CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
- CLK(NULL, "bclk", &bclk_16xx, CK_16XX),
- CLK("mmci-omap.0", "fck", &mmc1_ck, CK_16XX | CK_1510 | CK_310),
- CLK("mmci-omap.0", "fck", &mmc3_ck, CK_7XX),
- CLK("mmci-omap.0", "ick", &armper_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK("mmci-omap.1", "fck", &mmc2_ck, CK_16XX),
- CLK("mmci-omap.1", "ick", &armper_ck.clk, CK_16XX),
- /* Virtual clocks */
- CLK(NULL, "mpu", &virtual_ck_mpu, CK_16XX | CK_1510 | CK_310),
- CLK("i2c_omap.1", "fck", &i2c_fck, CK_16XX | CK_1510 | CK_310),
- CLK("i2c_omap.1", "ick", &i2c_ick, CK_16XX),
- CLK("i2c_omap.1", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK("omap_uwire", "fck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap-mcbsp.1", "ick", &dspper_ck, CK_16XX),
- CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK("omap-mcbsp.2", "ick", &armper_ck.clk, CK_16XX),
- CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK("omap-mcbsp.3", "ick", &dspper_ck, CK_16XX),
- CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_1510 | CK_310),
- CLK("omap-mcbsp.1", "fck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
- CLK("omap-mcbsp.2", "fck", &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
- CLK("omap-mcbsp.3", "fck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
-};
-
-static int omap1_clk_enable_generic(struct clk * clk);
-static int omap1_clk_enable(struct clk *clk);
-static void omap1_clk_disable_generic(struct clk * clk);
-static void omap1_clk_disable(struct clk *clk);
-
-__u32 arm_idlect1_mask;
-
-/*-------------------------------------------------------------------------
- * Omap1 specific clock functions
- *-------------------------------------------------------------------------*/
-
-static unsigned long omap1_watchdog_recalc(struct clk *clk)
+/* XXX can be replaced with a fixed_divisor_recalc */
+unsigned long omap1_watchdog_recalc(struct clk *clk)
{
return clk->parent->rate / 14;
}
-static unsigned long omap1_uart_recalc(struct clk *clk)
+unsigned long omap1_uart_recalc(struct clk *clk)
{
unsigned int val = __raw_readl(clk->enable_reg);
return val & clk->enable_bit ? 48000000 : 12000000;
}
-static unsigned long omap1_sossi_recalc(struct clk *clk)
+unsigned long omap1_sossi_recalc(struct clk *clk)
{
u32 div = omap_readl(MOD_CONF_CTRL_1);
@@ -179,64 +74,6 @@ static unsigned long omap1_sossi_recalc(struct clk *clk)
return clk->parent->rate / div;
}
-static int omap1_clk_enable_dsp_domain(struct clk *clk)
-{
- int retval;
-
- retval = omap1_clk_enable(&api_ck.clk);
- if (!retval) {
- retval = omap1_clk_enable_generic(clk);
- omap1_clk_disable(&api_ck.clk);
- }
-
- return retval;
-}
-
-static void omap1_clk_disable_dsp_domain(struct clk *clk)
-{
- if (omap1_clk_enable(&api_ck.clk) == 0) {
- omap1_clk_disable_generic(clk);
- omap1_clk_disable(&api_ck.clk);
- }
-}
-
-static const struct clkops clkops_dspck = {
- .enable = &omap1_clk_enable_dsp_domain,
- .disable = &omap1_clk_disable_dsp_domain,
-};
-
-static int omap1_clk_enable_uart_functional(struct clk *clk)
-{
- int ret;
- struct uart_clk *uclk;
-
- ret = omap1_clk_enable_generic(clk);
- if (ret == 0) {
- /* Set smart idle acknowledgement mode */
- uclk = (struct uart_clk *)clk;
- omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
- uclk->sysc_addr);
- }
-
- return ret;
-}
-
-static void omap1_clk_disable_uart_functional(struct clk *clk)
-{
- struct uart_clk *uclk;
-
- /* Set force idle acknowledgement mode */
- uclk = (struct uart_clk *)clk;
- omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
-
- omap1_clk_disable_generic(clk);
-}
-
-static const struct clkops clkops_uart = {
- .enable = &omap1_clk_enable_uart_functional,
- .disable = &omap1_clk_disable_uart_functional,
-};
-
static void omap1_clk_allow_idle(struct clk *clk)
{
struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
@@ -344,7 +181,7 @@ static int calc_dsor_exp(struct clk *clk, unsigned long rate)
return dsor_exp;
}
-static unsigned long omap1_ckctl_recalc(struct clk *clk)
+unsigned long omap1_ckctl_recalc(struct clk *clk)
{
/* Calculate divisor encoded as 2-bit exponent */
int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
@@ -352,7 +189,7 @@ static unsigned long omap1_ckctl_recalc(struct clk *clk)
return clk->parent->rate / dsor;
}
-static unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
+unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
{
int dsor;
@@ -363,28 +200,29 @@ static unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
* Note that DSP_CKCTL virt addr = phys addr, so
* we must use __raw_readw() instead of omap_readw().
*/
- omap1_clk_enable(&api_ck.clk);
+ omap1_clk_enable(api_ck_p);
dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
- omap1_clk_disable(&api_ck.clk);
+ omap1_clk_disable(api_ck_p);
return clk->parent->rate / dsor;
}
/* MPU virtual clock functions */
-static int omap1_select_table_rate(struct clk * clk, unsigned long rate)
+int omap1_select_table_rate(struct clk *clk, unsigned long rate)
{
/* Find the highest supported frequency <= rate and switch to it */
struct mpu_rate * ptr;
+ unsigned long dpll1_rate, ref_rate;
- if (clk != &virtual_ck_mpu)
- return -EINVAL;
+ dpll1_rate = clk_get_rate(ck_dpll1_p);
+ ref_rate = clk_get_rate(ck_ref_p);
- for (ptr = rate_table; ptr->rate; ptr++) {
- if (ptr->xtal != ck_ref.rate)
+ for (ptr = omap1_rate_table; ptr->rate; ptr++) {
+ if (ptr->xtal != ref_rate)
continue;
/* DPLL1 cannot be reprogrammed without risking system crash */
- if (likely(ck_dpll1.rate!=0) && ptr->pll_rate != ck_dpll1.rate)
+ if (likely(dpll1_rate != 0) && ptr->pll_rate != dpll1_rate)
continue;
/* Can check only after xtal frequency check */
@@ -405,11 +243,13 @@ static int omap1_select_table_rate(struct clk * clk, unsigned long rate)
else
omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
- ck_dpll1.rate = ptr->pll_rate;
+ /* XXX Do we need to recalculate the tree below DPLL1 at this point? */
+ ck_dpll1_p->rate = ptr->pll_rate;
+
return 0;
}
-static int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
+int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
{
int dsor_exp;
u16 regval;
@@ -429,7 +269,7 @@ static int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
return 0;
}
-static long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
+long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
{
int dsor_exp = calc_dsor_exp(clk, rate);
if (dsor_exp < 0)
@@ -439,7 +279,7 @@ static long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
return clk->parent->rate / (1 << dsor_exp);
}
-static int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
+int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
{
int dsor_exp;
u16 regval;
@@ -459,19 +299,19 @@ static int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
return 0;
}
-static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate)
+long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
{
/* Find the highest supported frequency <= rate */
struct mpu_rate * ptr;
- long highest_rate;
+ long highest_rate;
+ unsigned long ref_rate;
- if (clk != &virtual_ck_mpu)
- return -EINVAL;
+ ref_rate = clk_get_rate(ck_ref_p);
highest_rate = -EINVAL;
- for (ptr = rate_table; ptr->rate; ptr++) {
- if (ptr->xtal != ck_ref.rate)
+ for (ptr = omap1_rate_table; ptr->rate; ptr++) {
+ if (ptr->xtal != ref_rate)
continue;
highest_rate = ptr->rate;
@@ -506,8 +346,8 @@ static unsigned calc_ext_dsor(unsigned long rate)
return dsor;
}
-/* Only needed on 1510 */
-static int omap1_set_uart_rate(struct clk * clk, unsigned long rate)
+/* XXX Only needed on 1510 */
+int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
{
unsigned int val;
@@ -525,7 +365,7 @@ static int omap1_set_uart_rate(struct clk * clk, unsigned long rate)
}
/* External clock (MCLK & BCLK) functions */
-static int omap1_set_ext_clk_rate(struct clk * clk, unsigned long rate)
+int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
{
unsigned dsor;
__u16 ratio_bits;
@@ -543,7 +383,7 @@ static int omap1_set_ext_clk_rate(struct clk * clk, unsigned long rate)
return 0;
}
-static int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
+int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
{
u32 l;
int div;
@@ -566,12 +406,12 @@ static int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
return 0;
}
-static long omap1_round_ext_clk_rate(struct clk * clk, unsigned long rate)
+long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
{
return 96000000 / calc_ext_dsor(rate);
}
-static void omap1_init_ext_clk(struct clk * clk)
+void omap1_init_ext_clk(struct clk *clk)
{
unsigned dsor;
__u16 ratio_bits;
@@ -589,7 +429,7 @@ static void omap1_init_ext_clk(struct clk * clk)
clk-> rate = 96000000 / dsor;
}
-static int omap1_clk_enable(struct clk *clk)
+int omap1_clk_enable(struct clk *clk)
{
int ret = 0;
@@ -617,7 +457,7 @@ err:
return ret;
}
-static void omap1_clk_disable(struct clk *clk)
+void omap1_clk_disable(struct clk *clk)
{
if (clk->usecount > 0 && !(--clk->usecount)) {
clk->ops->disable(clk);
@@ -672,12 +512,70 @@ static void omap1_clk_disable_generic(struct clk *clk)
}
}
-static const struct clkops clkops_generic = {
- .enable = &omap1_clk_enable_generic,
- .disable = &omap1_clk_disable_generic,
+const struct clkops clkops_generic = {
+ .enable = omap1_clk_enable_generic,
+ .disable = omap1_clk_disable_generic,
+};
+
+static int omap1_clk_enable_dsp_domain(struct clk *clk)
+{
+ int retval;
+
+ retval = omap1_clk_enable(api_ck_p);
+ if (!retval) {
+ retval = omap1_clk_enable_generic(clk);
+ omap1_clk_disable(api_ck_p);
+ }
+
+ return retval;
+}
+
+static void omap1_clk_disable_dsp_domain(struct clk *clk)
+{
+ if (omap1_clk_enable(api_ck_p) == 0) {
+ omap1_clk_disable_generic(clk);
+ omap1_clk_disable(api_ck_p);
+ }
+}
+
+const struct clkops clkops_dspck = {
+ .enable = omap1_clk_enable_dsp_domain,
+ .disable = omap1_clk_disable_dsp_domain,
+};
+
+static int omap1_clk_enable_uart_functional(struct clk *clk)
+{
+ int ret;
+ struct uart_clk *uclk;
+
+ ret = omap1_clk_enable_generic(clk);
+ if (ret == 0) {
+ /* Set smart idle acknowledgement mode */
+ uclk = (struct uart_clk *)clk;
+ omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
+ uclk->sysc_addr);
+ }
+
+ return ret;
+}
+
+static void omap1_clk_disable_uart_functional(struct clk *clk)
+{
+ struct uart_clk *uclk;
+
+ /* Set force idle acknowledgement mode */
+ uclk = (struct uart_clk *)clk;
+ omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
+
+ omap1_clk_disable_generic(clk);
+}
+
+const struct clkops clkops_uart = {
+ .enable = omap1_clk_enable_uart_functional,
+ .disable = omap1_clk_disable_uart_functional,
};
-static long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
+long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
{
if (clk->flags & RATE_FIXED)
return clk->rate;
@@ -688,7 +586,7 @@ static long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
return clk->rate;
}
-static int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
+int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
{
int ret = -EINVAL;
@@ -703,7 +601,7 @@ static int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
#ifdef CONFIG_OMAP_RESET_CLOCKS
-static void __init omap1_clk_disable_unused(struct clk *clk)
+void __init omap1_clk_disable_unused(struct clk *clk)
{
__u32 regval32;
@@ -724,184 +622,9 @@ static void __init omap1_clk_disable_unused(struct clk *clk)
if ((regval32 & (1 << clk->enable_bit)) == 0)
return;
- /* FIXME: This clock seems to be necessary but no-one
- * has asked for its activation. */
- if (clk == &tc2_ck /* FIX: pm.c (SRAM), CCP, Camera */
- || clk == &ck_dpll1out.clk /* FIX: SoSSI, SSR */
- || clk == &arm_gpio_ck /* FIX: GPIO code for 1510 */
- ) {
- printk(KERN_INFO "FIXME: Clock \"%s\" seems unused\n",
- clk->name);
- return;
- }
-
printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
clk->ops->disable(clk);
printk(" done\n");
}
-#else
-#define omap1_clk_disable_unused NULL
#endif
-
-static struct clk_functions omap1_clk_functions = {
- .clk_enable = omap1_clk_enable,
- .clk_disable = omap1_clk_disable,
- .clk_round_rate = omap1_clk_round_rate,
- .clk_set_rate = omap1_clk_set_rate,
- .clk_disable_unused = omap1_clk_disable_unused,
-};
-
-int __init omap1_clk_init(void)
-{
- struct omap_clk *c;
- const struct omap_clock_config *info;
- int crystal_type = 0; /* Default 12 MHz */
- u32 reg, cpu_mask;
-
-#ifdef CONFIG_DEBUG_LL
- /* Resets some clocks that may be left on from bootloader,
- * but leaves serial clocks on.
- */
- omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
-#endif
-
- /* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
- reg = omap_readw(SOFT_REQ_REG) & (1 << 4);
- omap_writew(reg, SOFT_REQ_REG);
- if (!cpu_is_omap15xx())
- omap_writew(0, SOFT_REQ_REG2);
-
- clk_init(&omap1_clk_functions);
-
- /* By default all idlect1 clocks are allowed to idle */
- arm_idlect1_mask = ~0;
-
- for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
- clk_preinit(c->lk.clk);
-
- cpu_mask = 0;
- if (cpu_is_omap16xx())
- cpu_mask |= CK_16XX;
- if (cpu_is_omap1510())
- cpu_mask |= CK_1510;
- if (cpu_is_omap7xx())
- cpu_mask |= CK_7XX;
- if (cpu_is_omap310())
- cpu_mask |= CK_310;
-
- for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
- if (c->cpu & cpu_mask) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- }
-
- info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
- if (info != NULL) {
- if (!cpu_is_omap15xx())
- crystal_type = info->system_clock_type;
- }
-
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- ck_ref.rate = 13000000;
-#elif defined(CONFIG_ARCH_OMAP16XX)
- if (crystal_type == 2)
- ck_ref.rate = 19200000;
-#endif
-
- printk("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: 0x%04x\n",
- omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
- omap_readw(ARM_CKCTL));
-
- /* We want to be in syncronous scalable mode */
- omap_writew(0x1000, ARM_SYSST);
-
-#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
- /* Use values set by bootloader. Determine PLL rate and recalculate
- * dependent clocks as if kernel had changed PLL or divisors.
- */
- {
- unsigned pll_ctl_val = omap_readw(DPLL_CTL);
-
- ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
- if (pll_ctl_val & 0x10) {
- /* PLL enabled, apply multiplier and divisor */
- if (pll_ctl_val & 0xf80)
- ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
- ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
- } else {
- /* PLL disabled, apply bypass divisor */
- switch (pll_ctl_val & 0xc) {
- case 0:
- break;
- case 0x4:
- ck_dpll1.rate /= 2;
- break;
- default:
- ck_dpll1.rate /= 4;
- break;
- }
- }
- }
-#else
- /* Find the highest supported frequency and enable it */
- if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
- printk(KERN_ERR "System frequencies not set. Check your config.\n");
- /* Guess sane values (60MHz) */
- omap_writew(0x2290, DPLL_CTL);
- omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
- ck_dpll1.rate = 60000000;
- }
-#endif
- propagate_rate(&ck_dpll1);
- /* Cache rates for clocks connected to ck_ref (not dpll1) */
- propagate_rate(&ck_ref);
- printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
- "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
- ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
- ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
- arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
-
-#if defined(CONFIG_MACH_OMAP_PERSEUS2) || defined(CONFIG_MACH_OMAP_FSAMPLE)
- /* Select slicer output as OMAP input clock */
- omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1, OMAP7XX_PCC_UPLD_CTRL);
-#endif
-
- /* Amstrad Delta wants BCLK high when inactive */
- if (machine_is_ams_delta())
- omap_writel(omap_readl(ULPD_CLOCK_CTRL) |
- (1 << SDW_MCLK_INV_BIT),
- ULPD_CLOCK_CTRL);
-
- /* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
- /* (on 730, bit 13 must not be cleared) */
- if (cpu_is_omap7xx())
- omap_writew(omap_readw(ARM_CKCTL) & 0x2fff, ARM_CKCTL);
- else
- omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
-
- /* Put DSP/MPUI into reset until needed */
- omap_writew(0, ARM_RSTCT1);
- omap_writew(1, ARM_RSTCT2);
- omap_writew(0x400, ARM_IDLECT1);
-
- /*
- * According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
- * of the ARM_IDLECT2 register must be set to zero. The power-on
- * default value of this bit is one.
- */
- omap_writew(0x0000, ARM_IDLECT2); /* Turn LCD clock off also */
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable(&armper_ck.clk);
- clk_enable(&armxor_ck.clk);
- clk_enable(&armtim_ck.clk); /* This should be done by timer code */
-
- if (cpu_is_omap15xx())
- clk_enable(&arm_gpio_ck);
-
- return 0;
-}
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
index 29ffa97dc7f..a4190afb861 100644
--- a/arch/arm/mach-omap1/clock.h
+++ b/arch/arm/mach-omap1/clock.h
@@ -1,7 +1,7 @@
/*
* linux/arch/arm/mach-omap1/clock.h
*
- * Copyright (C) 2004 - 2005 Nokia corporation
+ * Copyright (C) 2004 - 2005, 2009 Nokia corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
* Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
*
@@ -13,30 +13,36 @@
#ifndef __ARCH_ARM_MACH_OMAP1_CLOCK_H
#define __ARCH_ARM_MACH_OMAP1_CLOCK_H
-static unsigned long omap1_ckctl_recalc(struct clk *clk);
-static unsigned long omap1_watchdog_recalc(struct clk *clk);
-static int omap1_set_sossi_rate(struct clk *clk, unsigned long rate);
-static unsigned long omap1_sossi_recalc(struct clk *clk);
-static unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk);
-static int omap1_clk_set_rate_dsp_domain(struct clk * clk, unsigned long rate);
-static int omap1_set_uart_rate(struct clk * clk, unsigned long rate);
-static unsigned long omap1_uart_recalc(struct clk *clk);
-static int omap1_set_ext_clk_rate(struct clk * clk, unsigned long rate);
-static long omap1_round_ext_clk_rate(struct clk * clk, unsigned long rate);
-static void omap1_init_ext_clk(struct clk * clk);
-static int omap1_select_table_rate(struct clk * clk, unsigned long rate);
-static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate);
-
-static int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate);
-static long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate);
-
-struct mpu_rate {
- unsigned long rate;
- unsigned long xtal;
- unsigned long pll_rate;
- __u16 ckctl_val;
- __u16 dpllctl_val;
-};
+#include <linux/clk.h>
+
+#include <plat/clock.h>
+
+extern int __init omap1_clk_init(void);
+extern int omap1_clk_enable(struct clk *clk);
+extern void omap1_clk_disable(struct clk *clk);
+extern long omap1_clk_round_rate(struct clk *clk, unsigned long rate);
+extern int omap1_clk_set_rate(struct clk *clk, unsigned long rate);
+extern unsigned long omap1_ckctl_recalc(struct clk *clk);
+extern int omap1_set_sossi_rate(struct clk *clk, unsigned long rate);
+extern unsigned long omap1_sossi_recalc(struct clk *clk);
+extern unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk);
+extern int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate);
+extern int omap1_set_uart_rate(struct clk *clk, unsigned long rate);
+extern unsigned long omap1_uart_recalc(struct clk *clk);
+extern int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate);
+extern long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate);
+extern void omap1_init_ext_clk(struct clk *clk);
+extern int omap1_select_table_rate(struct clk *clk, unsigned long rate);
+extern long omap1_round_to_table_rate(struct clk *clk, unsigned long rate);
+extern int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate);
+extern long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate);
+extern unsigned long omap1_watchdog_recalc(struct clk *clk);
+
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+extern void __init omap1_clk_disable_unused(struct clk *clk);
+#else
+#define omap1_clk_disable_unused NULL
+#endif
struct uart_clk {
struct clk clk;
@@ -96,596 +102,12 @@ struct arm_idlect1_clk {
#define SOFT_REQ_REG 0xfffe0834
#define SOFT_REQ_REG2 0xfffe0880
-/*-------------------------------------------------------------------------
- * Omap1 MPU rate table
- *-------------------------------------------------------------------------*/
-static struct mpu_rate rate_table[] = {
- /* MPU MHz, xtal MHz, dpll1 MHz, CKCTL, DPLL_CTL
- * NOTE: Comment order here is different from bits in CKCTL value:
- * armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
- */
-#if defined(CONFIG_OMAP_ARM_216MHZ)
- { 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_195MHZ)
- { 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_192MHZ)
- { 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
- { 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
- { 96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
- { 48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/4/4/8/8/8 */
- { 24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_182MHZ)
- { 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_168MHZ)
- { 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_150MHZ)
- { 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
-#endif
-#if defined(CONFIG_OMAP_ARM_120MHZ)
- { 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
-#endif
-#if defined(CONFIG_OMAP_ARM_96MHZ)
- { 96000000, 12000000, 96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
-#endif
-#if defined(CONFIG_OMAP_ARM_60MHZ)
- { 60000000, 12000000, 60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
-#endif
-#if defined(CONFIG_OMAP_ARM_30MHZ)
- { 30000000, 12000000, 60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
-#endif
- { 0, 0, 0, 0, 0 },
-};
-
-/*-------------------------------------------------------------------------
- * Omap1 clocks
- *-------------------------------------------------------------------------*/
-
-static struct clk ck_ref = {
- .name = "ck_ref",
- .ops = &clkops_null,
- .rate = 12000000,
-};
-
-static struct clk ck_dpll1 = {
- .name = "ck_dpll1",
- .ops = &clkops_null,
- .parent = &ck_ref,
-};
-
-static struct arm_idlect1_clk ck_dpll1out = {
- .clk = {
- .name = "ck_dpll1out",
- .ops = &clkops_generic,
- .parent = &ck_dpll1,
- .flags = CLOCK_IDLE_CONTROL | ENABLE_REG_32BIT,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_CKOUT_ARM,
- .recalc = &followparent_recalc,
- },
- .idlect_shift = 12,
-};
-
-static struct clk sossi_ck = {
- .name = "ck_sossi",
- .ops = &clkops_generic,
- .parent = &ck_dpll1out.clk,
- .flags = CLOCK_NO_IDLE_PARENT | ENABLE_REG_32BIT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1),
- .enable_bit = 16,
- .recalc = &omap1_sossi_recalc,
- .set_rate = &omap1_set_sossi_rate,
-};
-
-static struct clk arm_ck = {
- .name = "arm_ck",
- .ops = &clkops_null,
- .parent = &ck_dpll1,
- .rate_offset = CKCTL_ARMDIV_OFFSET,
- .recalc = &omap1_ckctl_recalc,
- .round_rate = omap1_clk_round_rate_ckctl_arm,
- .set_rate = omap1_clk_set_rate_ckctl_arm,
-};
-
-static struct arm_idlect1_clk armper_ck = {
- .clk = {
- .name = "armper_ck",
- .ops = &clkops_generic,
- .parent = &ck_dpll1,
- .flags = CLOCK_IDLE_CONTROL,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_PERCK,
- .rate_offset = CKCTL_PERDIV_OFFSET,
- .recalc = &omap1_ckctl_recalc,
- .round_rate = omap1_clk_round_rate_ckctl_arm,
- .set_rate = omap1_clk_set_rate_ckctl_arm,
- },
- .idlect_shift = 2,
-};
-
-static struct clk arm_gpio_ck = {
- .name = "arm_gpio_ck",
- .ops = &clkops_generic,
- .parent = &ck_dpll1,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_GPIOCK,
- .recalc = &followparent_recalc,
-};
-
-static struct arm_idlect1_clk armxor_ck = {
- .clk = {
- .name = "armxor_ck",
- .ops = &clkops_generic,
- .parent = &ck_ref,
- .flags = CLOCK_IDLE_CONTROL,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_XORPCK,
- .recalc = &followparent_recalc,
- },
- .idlect_shift = 1,
-};
-
-static struct arm_idlect1_clk armtim_ck = {
- .clk = {
- .name = "armtim_ck",
- .ops = &clkops_generic,
- .parent = &ck_ref,
- .flags = CLOCK_IDLE_CONTROL,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_TIMCK,
- .recalc = &followparent_recalc,
- },
- .idlect_shift = 9,
-};
-
-static struct arm_idlect1_clk armwdt_ck = {
- .clk = {
- .name = "armwdt_ck",
- .ops = &clkops_generic,
- .parent = &ck_ref,
- .flags = CLOCK_IDLE_CONTROL,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_WDTCK,
- .recalc = &omap1_watchdog_recalc,
- },
- .idlect_shift = 0,
-};
-
-static struct clk arminth_ck16xx = {
- .name = "arminth_ck",
- .ops = &clkops_null,
- .parent = &arm_ck,
- .recalc = &followparent_recalc,
- /* Note: On 16xx the frequency can be divided by 2 by programming
- * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
- *
- * 1510 version is in TC clocks.
- */
-};
-
-static struct clk dsp_ck = {
- .name = "dsp_ck",
- .ops = &clkops_generic,
- .parent = &ck_dpll1,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_CKCTL),
- .enable_bit = EN_DSPCK,
- .rate_offset = CKCTL_DSPDIV_OFFSET,
- .recalc = &omap1_ckctl_recalc,
- .round_rate = omap1_clk_round_rate_ckctl_arm,
- .set_rate = omap1_clk_set_rate_ckctl_arm,
-};
-
-static struct clk dspmmu_ck = {
- .name = "dspmmu_ck",
- .ops = &clkops_null,
- .parent = &ck_dpll1,
- .rate_offset = CKCTL_DSPMMUDIV_OFFSET,
- .recalc = &omap1_ckctl_recalc,
- .round_rate = omap1_clk_round_rate_ckctl_arm,
- .set_rate = omap1_clk_set_rate_ckctl_arm,
-};
-
-static struct clk dspper_ck = {
- .name = "dspper_ck",
- .ops = &clkops_dspck,
- .parent = &ck_dpll1,
- .enable_reg = DSP_IDLECT2,
- .enable_bit = EN_PERCK,
- .rate_offset = CKCTL_PERDIV_OFFSET,
- .recalc = &omap1_ckctl_recalc_dsp_domain,
- .round_rate = omap1_clk_round_rate_ckctl_arm,
- .set_rate = &omap1_clk_set_rate_dsp_domain,
-};
-
-static struct clk dspxor_ck = {
- .name = "dspxor_ck",
- .ops = &clkops_dspck,
- .parent = &ck_ref,
- .enable_reg = DSP_IDLECT2,
- .enable_bit = EN_XORPCK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dsptim_ck = {
- .name = "dsptim_ck",
- .ops = &clkops_dspck,
- .parent = &ck_ref,
- .enable_reg = DSP_IDLECT2,
- .enable_bit = EN_DSPTIMCK,
- .recalc = &followparent_recalc,
-};
-
-/* Tie ARM_IDLECT1:IDLIF_ARM to this logical clock structure */
-static struct arm_idlect1_clk tc_ck = {
- .clk = {
- .name = "tc_ck",
- .ops = &clkops_null,
- .parent = &ck_dpll1,
- .flags = CLOCK_IDLE_CONTROL,
- .rate_offset = CKCTL_TCDIV_OFFSET,
- .recalc = &omap1_ckctl_recalc,
- .round_rate = omap1_clk_round_rate_ckctl_arm,
- .set_rate = omap1_clk_set_rate_ckctl_arm,
- },
- .idlect_shift = 6,
-};
-
-static struct clk arminth_ck1510 = {
- .name = "arminth_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
- /* Note: On 1510 the frequency follows TC_CK
- *
- * 16xx version is in MPU clocks.
- */
-};
-
-static struct clk tipb_ck = {
- /* No-idle controlled by "tc_ck" */
- .name = "tipb_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk l3_ocpi_ck = {
- /* No-idle controlled by "tc_ck" */
- .name = "l3_ocpi_ck",
- .ops = &clkops_generic,
- .parent = &tc_ck.clk,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
- .enable_bit = EN_OCPI_CK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk tc1_ck = {
- .name = "tc1_ck",
- .ops = &clkops_generic,
- .parent = &tc_ck.clk,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
- .enable_bit = EN_TC1_CK,
- .recalc = &followparent_recalc,
-};
+extern __u32 arm_idlect1_mask;
+extern struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
-static struct clk tc2_ck = {
- .name = "tc2_ck",
- .ops = &clkops_generic,
- .parent = &tc_ck.clk,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
- .enable_bit = EN_TC2_CK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dma_ck = {
- /* No-idle controlled by "tc_ck" */
- .name = "dma_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dma_lcdfree_ck = {
- .name = "dma_lcdfree_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
-};
-
-static struct arm_idlect1_clk api_ck = {
- .clk = {
- .name = "api_ck",
- .ops = &clkops_generic,
- .parent = &tc_ck.clk,
- .flags = CLOCK_IDLE_CONTROL,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_APICK,
- .recalc = &followparent_recalc,
- },
- .idlect_shift = 8,
-};
-
-static struct arm_idlect1_clk lb_ck = {
- .clk = {
- .name = "lb_ck",
- .ops = &clkops_generic,
- .parent = &tc_ck.clk,
- .flags = CLOCK_IDLE_CONTROL,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_LBCK,
- .recalc = &followparent_recalc,
- },
- .idlect_shift = 4,
-};
-
-static struct clk rhea1_ck = {
- .name = "rhea1_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rhea2_ck = {
- .name = "rhea2_ck",
- .ops = &clkops_null,
- .parent = &tc_ck.clk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk lcd_ck_16xx = {
- .name = "lcd_ck",
- .ops = &clkops_generic,
- .parent = &ck_dpll1,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_LCDCK,
- .rate_offset = CKCTL_LCDDIV_OFFSET,
- .recalc = &omap1_ckctl_recalc,
- .round_rate = omap1_clk_round_rate_ckctl_arm,
- .set_rate = omap1_clk_set_rate_ckctl_arm,
-};
-
-static struct arm_idlect1_clk lcd_ck_1510 = {
- .clk = {
- .name = "lcd_ck",
- .ops = &clkops_generic,
- .parent = &ck_dpll1,
- .flags = CLOCK_IDLE_CONTROL,
- .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
- .enable_bit = EN_LCDCK,
- .rate_offset = CKCTL_LCDDIV_OFFSET,
- .recalc = &omap1_ckctl_recalc,
- .round_rate = omap1_clk_round_rate_ckctl_arm,
- .set_rate = omap1_clk_set_rate_ckctl_arm,
- },
- .idlect_shift = 3,
-};
-
-static struct clk uart1_1510 = {
- .name = "uart1_ck",
- .ops = &clkops_null,
- /* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
- .rate = 12000000,
- .flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
- .enable_bit = 29, /* Chooses between 12MHz and 48MHz */
- .set_rate = &omap1_set_uart_rate,
- .recalc = &omap1_uart_recalc,
-};
-
-static struct uart_clk uart1_16xx = {
- .clk = {
- .name = "uart1_ck",
- .ops = &clkops_uart,
- /* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
- .rate = 48000000,
- .flags = RATE_FIXED | ENABLE_REG_32BIT |
- CLOCK_NO_IDLE_PARENT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
- .enable_bit = 29,
- },
- .sysc_addr = 0xfffb0054,
-};
-
-static struct clk uart2_ck = {
- .name = "uart2_ck",
- .ops = &clkops_null,
- /* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
- .rate = 12000000,
- .flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
- .enable_bit = 30, /* Chooses between 12MHz and 48MHz */
- .set_rate = &omap1_set_uart_rate,
- .recalc = &omap1_uart_recalc,
-};
-
-static struct clk uart3_1510 = {
- .name = "uart3_ck",
- .ops = &clkops_null,
- /* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
- .rate = 12000000,
- .flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
- .enable_bit = 31, /* Chooses between 12MHz and 48MHz */
- .set_rate = &omap1_set_uart_rate,
- .recalc = &omap1_uart_recalc,
-};
-
-static struct uart_clk uart3_16xx = {
- .clk = {
- .name = "uart3_ck",
- .ops = &clkops_uart,
- /* Direct from ULPD, no real parent */
- .parent = &armper_ck.clk,
- .rate = 48000000,
- .flags = RATE_FIXED | ENABLE_REG_32BIT |
- CLOCK_NO_IDLE_PARENT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
- .enable_bit = 31,
- },
- .sysc_addr = 0xfffb9854,
-};
-
-static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
- .name = "usb_clko",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent */
- .rate = 6000000,
- .flags = RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = OMAP1_IO_ADDRESS(ULPD_CLOCK_CTRL),
- .enable_bit = USB_MCLK_EN_BIT,
-};
-
-static struct clk usb_hhc_ck1510 = {
- .name = "usb_hhc_ck",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent */
- .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
- .flags = RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
- .enable_bit = USB_HOST_HHC_UHOST_EN,
-};
-
-static struct clk usb_hhc_ck16xx = {
- .name = "usb_hhc_ck",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- /* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
- .flags = RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = OMAP1_IO_ADDRESS(OTG_BASE + 0x08), /* OTG_SYSCON_2 */
- .enable_bit = 8 /* UHOST_EN */,
-};
-
-static struct clk usb_dc_ck = {
- .name = "usb_dc_ck",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- .flags = RATE_FIXED,
- .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
- .enable_bit = 4,
-};
-
-static struct clk usb_dc_ck7xx = {
- .name = "usb_dc_ck",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- .flags = RATE_FIXED,
- .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
- .enable_bit = 8,
-};
-
-static struct clk mclk_1510 = {
- .name = "mclk",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .rate = 12000000,
- .flags = RATE_FIXED,
- .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
- .enable_bit = 6,
-};
-
-static struct clk mclk_16xx = {
- .name = "mclk",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .enable_reg = OMAP1_IO_ADDRESS(COM_CLK_DIV_CTRL_SEL),
- .enable_bit = COM_ULPD_PLL_CLK_REQ,
- .set_rate = &omap1_set_ext_clk_rate,
- .round_rate = &omap1_round_ext_clk_rate,
- .init = &omap1_init_ext_clk,
-};
-
-static struct clk bclk_1510 = {
- .name = "bclk",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .rate = 12000000,
- .flags = RATE_FIXED,
-};
-
-static struct clk bclk_16xx = {
- .name = "bclk",
- .ops = &clkops_generic,
- /* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .enable_reg = OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL),
- .enable_bit = SWD_ULPD_PLL_CLK_REQ,
- .set_rate = &omap1_set_ext_clk_rate,
- .round_rate = &omap1_round_ext_clk_rate,
- .init = &omap1_init_ext_clk,
-};
-
-static struct clk mmc1_ck = {
- .name = "mmc_ck",
- .ops = &clkops_generic,
- /* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck.clk,
- .rate = 48000000,
- .flags = RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
- .enable_bit = 23,
-};
-
-static struct clk mmc2_ck = {
- .name = "mmc_ck",
- .id = 1,
- .ops = &clkops_generic,
- /* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck.clk,
- .rate = 48000000,
- .flags = RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
- .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
- .enable_bit = 20,
-};
-
-static struct clk mmc3_ck = {
- .name = "mmc_ck",
- .id = 2,
- .ops = &clkops_generic,
- /* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck.clk,
- .rate = 48000000,
- .flags = RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
- .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
- .enable_bit = 12,
-};
-
-static struct clk virtual_ck_mpu = {
- .name = "mpu",
- .ops = &clkops_null,
- .parent = &arm_ck, /* Is smarter alias for */
- .recalc = &followparent_recalc,
- .set_rate = &omap1_select_table_rate,
- .round_rate = &omap1_round_to_table_rate,
-};
-
-/* virtual functional clock domain for I2C. Just for making sure that ARMXOR_CK
-remains active during MPU idle whenever this is enabled */
-static struct clk i2c_fck = {
- .name = "i2c_fck",
- .id = 1,
- .ops = &clkops_null,
- .flags = CLOCK_NO_IDLE_PARENT,
- .parent = &armxor_ck.clk,
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c_ick = {
- .name = "i2c_ick",
- .id = 1,
- .ops = &clkops_null,
- .flags = CLOCK_NO_IDLE_PARENT,
- .parent = &armper_ck.clk,
- .recalc = &followparent_recalc,
-};
+extern const struct clkops clkops_dspck;
+extern const struct clkops clkops_dummy;
+extern const struct clkops clkops_uart;
+extern const struct clkops clkops_generic;
#endif
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
new file mode 100644
index 00000000000..ab995a9c606
--- /dev/null
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -0,0 +1,843 @@
+/*
+ * linux/arch/arm/mach-omap1/clock_data.c
+ *
+ * Copyright (C) 2004 - 2005, 2009 Nokia corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ * Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h> /* for machine_is_* */
+
+#include <plat/clock.h>
+#include <plat/cpu.h>
+#include <plat/clkdev_omap.h>
+#include <plat/usb.h> /* for OTG_BASE */
+
+#include "clock.h"
+
+/*------------------------------------------------------------------------
+ * Omap1 clocks
+ *-------------------------------------------------------------------------*/
+
+/* XXX is this necessary? */
+static struct clk dummy_ck = {
+ .name = "dummy",
+ .ops = &clkops_dummy,
+ .flags = RATE_FIXED,
+};
+
+static struct clk ck_ref = {
+ .name = "ck_ref",
+ .ops = &clkops_null,
+ .rate = 12000000,
+};
+
+static struct clk ck_dpll1 = {
+ .name = "ck_dpll1",
+ .ops = &clkops_null,
+ .parent = &ck_ref,
+};
+
+/*
+ * FIXME: This clock seems to be necessary but no-one has asked for its
+ * activation. [ FIX: SoSSI, SSR ]
+ */
+static struct arm_idlect1_clk ck_dpll1out = {
+ .clk = {
+ .name = "ck_dpll1out",
+ .ops = &clkops_generic,
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IDLE_CONTROL | ENABLE_REG_32BIT |
+ ENABLE_ON_INIT,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_CKOUT_ARM,
+ .recalc = &followparent_recalc,
+ },
+ .idlect_shift = 12,
+};
+
+static struct clk sossi_ck = {
+ .name = "ck_sossi",
+ .ops = &clkops_generic,
+ .parent = &ck_dpll1out.clk,
+ .flags = CLOCK_NO_IDLE_PARENT | ENABLE_REG_32BIT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1),
+ .enable_bit = 16,
+ .recalc = &omap1_sossi_recalc,
+ .set_rate = &omap1_set_sossi_rate,
+};
+
+static struct clk arm_ck = {
+ .name = "arm_ck",
+ .ops = &clkops_null,
+ .parent = &ck_dpll1,
+ .rate_offset = CKCTL_ARMDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .round_rate = omap1_clk_round_rate_ckctl_arm,
+ .set_rate = omap1_clk_set_rate_ckctl_arm,
+};
+
+static struct arm_idlect1_clk armper_ck = {
+ .clk = {
+ .name = "armper_ck",
+ .ops = &clkops_generic,
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IDLE_CONTROL,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_PERCK,
+ .rate_offset = CKCTL_PERDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .round_rate = omap1_clk_round_rate_ckctl_arm,
+ .set_rate = omap1_clk_set_rate_ckctl_arm,
+ },
+ .idlect_shift = 2,
+};
+
+/*
+ * FIXME: This clock seems to be necessary but no-one has asked for its
+ * activation. [ GPIO code for 1510 ]
+ */
+static struct clk arm_gpio_ck = {
+ .name = "arm_gpio_ck",
+ .ops = &clkops_generic,
+ .parent = &ck_dpll1,
+ .flags = ENABLE_ON_INIT,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_GPIOCK,
+ .recalc = &followparent_recalc,
+};
+
+static struct arm_idlect1_clk armxor_ck = {
+ .clk = {
+ .name = "armxor_ck",
+ .ops = &clkops_generic,
+ .parent = &ck_ref,
+ .flags = CLOCK_IDLE_CONTROL,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_XORPCK,
+ .recalc = &followparent_recalc,
+ },
+ .idlect_shift = 1,
+};
+
+static struct arm_idlect1_clk armtim_ck = {
+ .clk = {
+ .name = "armtim_ck",
+ .ops = &clkops_generic,
+ .parent = &ck_ref,
+ .flags = CLOCK_IDLE_CONTROL,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_TIMCK,
+ .recalc = &followparent_recalc,
+ },
+ .idlect_shift = 9,
+};
+
+static struct arm_idlect1_clk armwdt_ck = {
+ .clk = {
+ .name = "armwdt_ck",
+ .ops = &clkops_generic,
+ .parent = &ck_ref,
+ .flags = CLOCK_IDLE_CONTROL,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_WDTCK,
+ .recalc = &omap1_watchdog_recalc,
+ },
+ .idlect_shift = 0,
+};
+
+static struct clk arminth_ck16xx = {
+ .name = "arminth_ck",
+ .ops = &clkops_null,
+ .parent = &arm_ck,
+ .recalc = &followparent_recalc,
+ /* Note: On 16xx the frequency can be divided by 2 by programming
+ * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
+ *
+ * 1510 version is in TC clocks.
+ */
+};
+
+static struct clk dsp_ck = {
+ .name = "dsp_ck",
+ .ops = &clkops_generic,
+ .parent = &ck_dpll1,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_CKCTL),
+ .enable_bit = EN_DSPCK,
+ .rate_offset = CKCTL_DSPDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .round_rate = omap1_clk_round_rate_ckctl_arm,
+ .set_rate = omap1_clk_set_rate_ckctl_arm,
+};
+
+static struct clk dspmmu_ck = {
+ .name = "dspmmu_ck",
+ .ops = &clkops_null,
+ .parent = &ck_dpll1,
+ .rate_offset = CKCTL_DSPMMUDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .round_rate = omap1_clk_round_rate_ckctl_arm,
+ .set_rate = omap1_clk_set_rate_ckctl_arm,
+};
+
+static struct clk dspper_ck = {
+ .name = "dspper_ck",
+ .ops = &clkops_dspck,
+ .parent = &ck_dpll1,
+ .enable_reg = DSP_IDLECT2,
+ .enable_bit = EN_PERCK,
+ .rate_offset = CKCTL_PERDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc_dsp_domain,
+ .round_rate = omap1_clk_round_rate_ckctl_arm,
+ .set_rate = &omap1_clk_set_rate_dsp_domain,
+};
+
+static struct clk dspxor_ck = {
+ .name = "dspxor_ck",
+ .ops = &clkops_dspck,
+ .parent = &ck_ref,
+ .enable_reg = DSP_IDLECT2,
+ .enable_bit = EN_XORPCK,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dsptim_ck = {
+ .name = "dsptim_ck",
+ .ops = &clkops_dspck,
+ .parent = &ck_ref,
+ .enable_reg = DSP_IDLECT2,
+ .enable_bit = EN_DSPTIMCK,
+ .recalc = &followparent_recalc,
+};
+
+/* Tie ARM_IDLECT1:IDLIF_ARM to this logical clock structure */
+static struct arm_idlect1_clk tc_ck = {
+ .clk = {
+ .name = "tc_ck",
+ .ops = &clkops_null,
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IDLE_CONTROL,
+ .rate_offset = CKCTL_TCDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .round_rate = omap1_clk_round_rate_ckctl_arm,
+ .set_rate = omap1_clk_set_rate_ckctl_arm,
+ },
+ .idlect_shift = 6,
+};
+
+static struct clk arminth_ck1510 = {
+ .name = "arminth_ck",
+ .ops = &clkops_null,
+ .parent = &tc_ck.clk,
+ .recalc = &followparent_recalc,
+ /* Note: On 1510 the frequency follows TC_CK
+ *
+ * 16xx version is in MPU clocks.
+ */
+};
+
+static struct clk tipb_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "tipb_ck",
+ .ops = &clkops_null,
+ .parent = &tc_ck.clk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk l3_ocpi_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "l3_ocpi_ck",
+ .ops = &clkops_generic,
+ .parent = &tc_ck.clk,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
+ .enable_bit = EN_OCPI_CK,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk tc1_ck = {
+ .name = "tc1_ck",
+ .ops = &clkops_generic,
+ .parent = &tc_ck.clk,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
+ .enable_bit = EN_TC1_CK,
+ .recalc = &followparent_recalc,
+};
+
+/*
+ * FIXME: This clock seems to be necessary but no-one has asked for its
+ * activation. [ pm.c (SRAM), CCP, Camera ]
+ */
+static struct clk tc2_ck = {
+ .name = "tc2_ck",
+ .ops = &clkops_generic,
+ .parent = &tc_ck.clk,
+ .flags = ENABLE_ON_INIT,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT3),
+ .enable_bit = EN_TC2_CK,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dma_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "dma_ck",
+ .ops = &clkops_null,
+ .parent = &tc_ck.clk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dma_lcdfree_ck = {
+ .name = "dma_lcdfree_ck",
+ .ops = &clkops_null,
+ .parent = &tc_ck.clk,
+ .recalc = &followparent_recalc,
+};
+
+static struct arm_idlect1_clk api_ck = {
+ .clk = {
+ .name = "api_ck",
+ .ops = &clkops_generic,
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IDLE_CONTROL,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_APICK,
+ .recalc = &followparent_recalc,
+ },
+ .idlect_shift = 8,
+};
+
+static struct arm_idlect1_clk lb_ck = {
+ .clk = {
+ .name = "lb_ck",
+ .ops = &clkops_generic,
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IDLE_CONTROL,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_LBCK,
+ .recalc = &followparent_recalc,
+ },
+ .idlect_shift = 4,
+};
+
+static struct clk rhea1_ck = {
+ .name = "rhea1_ck",
+ .ops = &clkops_null,
+ .parent = &tc_ck.clk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk rhea2_ck = {
+ .name = "rhea2_ck",
+ .ops = &clkops_null,
+ .parent = &tc_ck.clk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk lcd_ck_16xx = {
+ .name = "lcd_ck",
+ .ops = &clkops_generic,
+ .parent = &ck_dpll1,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_LCDCK,
+ .rate_offset = CKCTL_LCDDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .round_rate = omap1_clk_round_rate_ckctl_arm,
+ .set_rate = omap1_clk_set_rate_ckctl_arm,
+};
+
+static struct arm_idlect1_clk lcd_ck_1510 = {
+ .clk = {
+ .name = "lcd_ck",
+ .ops = &clkops_generic,
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IDLE_CONTROL,
+ .enable_reg = OMAP1_IO_ADDRESS(ARM_IDLECT2),
+ .enable_bit = EN_LCDCK,
+ .rate_offset = CKCTL_LCDDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .round_rate = omap1_clk_round_rate_ckctl_arm,
+ .set_rate = omap1_clk_set_rate_ckctl_arm,
+ },
+ .idlect_shift = 3,
+};
+
+static struct clk uart1_1510 = {
+ .name = "uart1_ck",
+ .ops = &clkops_null,
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 12000000,
+ .flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
+ .enable_bit = 29, /* Chooses between 12MHz and 48MHz */
+ .set_rate = &omap1_set_uart_rate,
+ .recalc = &omap1_uart_recalc,
+};
+
+static struct uart_clk uart1_16xx = {
+ .clk = {
+ .name = "uart1_ck",
+ .ops = &clkops_uart,
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = RATE_FIXED | ENABLE_REG_32BIT |
+ CLOCK_NO_IDLE_PARENT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
+ .enable_bit = 29,
+ },
+ .sysc_addr = 0xfffb0054,
+};
+
+static struct clk uart2_ck = {
+ .name = "uart2_ck",
+ .ops = &clkops_null,
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 12000000,
+ .flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
+ .enable_bit = 30, /* Chooses between 12MHz and 48MHz */
+ .set_rate = &omap1_set_uart_rate,
+ .recalc = &omap1_uart_recalc,
+};
+
+static struct clk uart3_1510 = {
+ .name = "uart3_ck",
+ .ops = &clkops_null,
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 12000000,
+ .flags = ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
+ .enable_bit = 31, /* Chooses between 12MHz and 48MHz */
+ .set_rate = &omap1_set_uart_rate,
+ .recalc = &omap1_uart_recalc,
+};
+
+static struct uart_clk uart3_16xx = {
+ .clk = {
+ .name = "uart3_ck",
+ .ops = &clkops_uart,
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = RATE_FIXED | ENABLE_REG_32BIT |
+ CLOCK_NO_IDLE_PARENT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
+ .enable_bit = 31,
+ },
+ .sysc_addr = 0xfffb9854,
+};
+
+static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
+ .name = "usb_clko",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent */
+ .rate = 6000000,
+ .flags = RATE_FIXED | ENABLE_REG_32BIT,
+ .enable_reg = OMAP1_IO_ADDRESS(ULPD_CLOCK_CTRL),
+ .enable_bit = USB_MCLK_EN_BIT,
+};
+
+static struct clk usb_hhc_ck1510 = {
+ .name = "usb_hhc_ck",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent */
+ .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
+ .flags = RATE_FIXED | ENABLE_REG_32BIT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
+ .enable_bit = USB_HOST_HHC_UHOST_EN,
+};
+
+static struct clk usb_hhc_ck16xx = {
+ .name = "usb_hhc_ck",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent */
+ .rate = 48000000,
+ /* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
+ .flags = RATE_FIXED | ENABLE_REG_32BIT,
+ .enable_reg = OMAP1_IO_ADDRESS(OTG_BASE + 0x08), /* OTG_SYSCON_2 */
+ .enable_bit = 8 /* UHOST_EN */,
+};
+
+static struct clk usb_dc_ck = {
+ .name = "usb_dc_ck",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent */
+ .rate = 48000000,
+ .flags = RATE_FIXED,
+ .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
+ .enable_bit = 4,
+};
+
+static struct clk usb_dc_ck7xx = {
+ .name = "usb_dc_ck",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent */
+ .rate = 48000000,
+ .flags = RATE_FIXED,
+ .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
+ .enable_bit = 8,
+};
+
+static struct clk mclk_1510 = {
+ .name = "mclk",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .rate = 12000000,
+ .flags = RATE_FIXED,
+ .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
+ .enable_bit = 6,
+};
+
+static struct clk mclk_16xx = {
+ .name = "mclk",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .enable_reg = OMAP1_IO_ADDRESS(COM_CLK_DIV_CTRL_SEL),
+ .enable_bit = COM_ULPD_PLL_CLK_REQ,
+ .set_rate = &omap1_set_ext_clk_rate,
+ .round_rate = &omap1_round_ext_clk_rate,
+ .init = &omap1_init_ext_clk,
+};
+
+static struct clk bclk_1510 = {
+ .name = "bclk",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .rate = 12000000,
+ .flags = RATE_FIXED,
+};
+
+static struct clk bclk_16xx = {
+ .name = "bclk",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .enable_reg = OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL),
+ .enable_bit = SWD_ULPD_PLL_CLK_REQ,
+ .set_rate = &omap1_set_ext_clk_rate,
+ .round_rate = &omap1_round_ext_clk_rate,
+ .init = &omap1_init_ext_clk,
+};
+
+static struct clk mmc1_ck = {
+ .name = "mmc_ck",
+ .ops = &clkops_generic,
+ /* Functional clock is direct from ULPD, interface clock is ARMPER */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
+ .enable_bit = 23,
+};
+
+static struct clk mmc2_ck = {
+ .name = "mmc_ck",
+ .id = 1,
+ .ops = &clkops_generic,
+ /* Functional clock is direct from ULPD, interface clock is ARMPER */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0),
+ .enable_bit = 20,
+};
+
+static struct clk mmc3_ck = {
+ .name = "mmc_ck",
+ .id = 2,
+ .ops = &clkops_generic,
+ /* Functional clock is direct from ULPD, interface clock is ARMPER */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
+ .enable_bit = 12,
+};
+
+static struct clk virtual_ck_mpu = {
+ .name = "mpu",
+ .ops = &clkops_null,
+ .parent = &arm_ck, /* Is smarter alias for */
+ .recalc = &followparent_recalc,
+ .set_rate = &omap1_select_table_rate,
+ .round_rate = &omap1_round_to_table_rate,
+};
+
+/* virtual functional clock domain for I2C. Just for making sure that ARMXOR_CK
+remains active during MPU idle whenever this is enabled */
+static struct clk i2c_fck = {
+ .name = "i2c_fck",
+ .id = 1,
+ .ops = &clkops_null,
+ .flags = CLOCK_NO_IDLE_PARENT,
+ .parent = &armxor_ck.clk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c_ick = {
+ .name = "i2c_ick",
+ .id = 1,
+ .ops = &clkops_null,
+ .flags = CLOCK_NO_IDLE_PARENT,
+ .parent = &armper_ck.clk,
+ .recalc = &followparent_recalc,
+};
+
+/*
+ * clkdev integration
+ */
+
+static struct omap_clk omap_clks[] = {
+ /* non-ULPD clocks */
+ CLK(NULL, "ck_ref", &ck_ref, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK(NULL, "ck_dpll1", &ck_dpll1, CK_16XX | CK_1510 | CK_310),
+ /* CK_GEN1 clocks */
+ CLK(NULL, "ck_dpll1out", &ck_dpll1out.clk, CK_16XX),
+ CLK(NULL, "ck_sossi", &sossi_ck, CK_16XX),
+ CLK(NULL, "arm_ck", &arm_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "armper_ck", &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "arm_gpio_ck", &arm_gpio_ck, CK_1510 | CK_310),
+ CLK(NULL, "armxor_ck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK(NULL, "armtim_ck", &armtim_ck.clk, CK_16XX | CK_1510 | CK_310),
+ CLK("omap_wdt", "fck", &armwdt_ck.clk, CK_16XX | CK_1510 | CK_310),
+ CLK("omap_wdt", "ick", &armper_ck.clk, CK_16XX),
+ CLK("omap_wdt", "ick", &dummy_ck, CK_1510 | CK_310),
+ CLK(NULL, "arminth_ck", &arminth_ck1510, CK_1510 | CK_310),
+ CLK(NULL, "arminth_ck", &arminth_ck16xx, CK_16XX),
+ /* CK_GEN2 clocks */
+ CLK(NULL, "dsp_ck", &dsp_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dspmmu_ck", &dspmmu_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dspper_ck", &dspper_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dspxor_ck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dsptim_ck", &dsptim_ck, CK_16XX | CK_1510 | CK_310),
+ /* CK_GEN3 clocks */
+ CLK(NULL, "tc_ck", &tc_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK(NULL, "tipb_ck", &tipb_ck, CK_1510 | CK_310),
+ CLK(NULL, "l3_ocpi_ck", &l3_ocpi_ck, CK_16XX | CK_7XX),
+ CLK(NULL, "tc1_ck", &tc1_ck, CK_16XX),
+ CLK(NULL, "tc2_ck", &tc2_ck, CK_16XX),
+ CLK(NULL, "dma_ck", &dma_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "dma_lcdfree_ck", &dma_lcdfree_ck, CK_16XX),
+ CLK(NULL, "api_ck", &api_ck.clk, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "lb_ck", &lb_ck.clk, CK_1510 | CK_310),
+ CLK(NULL, "rhea1_ck", &rhea1_ck, CK_16XX),
+ CLK(NULL, "rhea2_ck", &rhea2_ck, CK_16XX),
+ CLK(NULL, "lcd_ck", &lcd_ck_16xx, CK_16XX | CK_7XX),
+ CLK(NULL, "lcd_ck", &lcd_ck_1510.clk, CK_1510 | CK_310),
+ /* ULPD clocks */
+ CLK(NULL, "uart1_ck", &uart1_1510, CK_1510 | CK_310),
+ CLK(NULL, "uart1_ck", &uart1_16xx.clk, CK_16XX),
+ CLK(NULL, "uart2_ck", &uart2_ck, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "uart3_ck", &uart3_1510, CK_1510 | CK_310),
+ CLK(NULL, "uart3_ck", &uart3_16xx.clk, CK_16XX),
+ CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
+ CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
+ CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX),
+ CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
+ CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
+ CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
+ CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
+ CLK(NULL, "bclk", &bclk_16xx, CK_16XX),
+ CLK("mmci-omap.0", "fck", &mmc1_ck, CK_16XX | CK_1510 | CK_310),
+ CLK("mmci-omap.0", "fck", &mmc3_ck, CK_7XX),
+ CLK("mmci-omap.0", "ick", &armper_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK("mmci-omap.1", "fck", &mmc2_ck, CK_16XX),
+ CLK("mmci-omap.1", "ick", &armper_ck.clk, CK_16XX),
+ /* Virtual clocks */
+ CLK(NULL, "mpu", &virtual_ck_mpu, CK_16XX | CK_1510 | CK_310),
+ CLK("i2c_omap.1", "fck", &i2c_fck, CK_16XX | CK_1510 | CK_310 | CK_7XX),
+ CLK("i2c_omap.1", "ick", &i2c_ick, CK_16XX),
+ CLK("i2c_omap.1", "ick", &dummy_ck, CK_1510 | CK_310 | CK_7XX),
+ CLK("omap_uwire", "fck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310),
+ CLK("omap-mcbsp.1", "ick", &dspper_ck, CK_16XX),
+ CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_1510 | CK_310),
+ CLK("omap-mcbsp.2", "ick", &armper_ck.clk, CK_16XX),
+ CLK("omap-mcbsp.2", "ick", &dummy_ck, CK_1510 | CK_310),
+ CLK("omap-mcbsp.3", "ick", &dspper_ck, CK_16XX),
+ CLK("omap-mcbsp.3", "ick", &dummy_ck, CK_1510 | CK_310),
+ CLK("omap-mcbsp.1", "fck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
+ CLK("omap-mcbsp.2", "fck", &armper_ck.clk, CK_16XX | CK_1510 | CK_310),
+ CLK("omap-mcbsp.3", "fck", &dspxor_ck, CK_16XX | CK_1510 | CK_310),
+};
+
+/*
+ * init
+ */
+
+static struct clk_functions omap1_clk_functions __initdata = {
+ .clk_enable = omap1_clk_enable,
+ .clk_disable = omap1_clk_disable,
+ .clk_round_rate = omap1_clk_round_rate,
+ .clk_set_rate = omap1_clk_set_rate,
+ .clk_disable_unused = omap1_clk_disable_unused,
+};
+
+int __init omap1_clk_init(void)
+{
+ struct omap_clk *c;
+ const struct omap_clock_config *info;
+ int crystal_type = 0; /* Default 12 MHz */
+ u32 reg, cpu_mask;
+
+#ifdef CONFIG_DEBUG_LL
+ /*
+ * Resets some clocks that may be left on from bootloader,
+ * but leaves serial clocks on.
+ */
+ omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
+#endif
+
+ /* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
+ reg = omap_readw(SOFT_REQ_REG) & (1 << 4);
+ omap_writew(reg, SOFT_REQ_REG);
+ if (!cpu_is_omap15xx())
+ omap_writew(0, SOFT_REQ_REG2);
+
+ clk_init(&omap1_clk_functions);
+
+ /* By default all idlect1 clocks are allowed to idle */
+ arm_idlect1_mask = ~0;
+
+ for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
+ clk_preinit(c->lk.clk);
+
+ cpu_mask = 0;
+ if (cpu_is_omap16xx())
+ cpu_mask |= CK_16XX;
+ if (cpu_is_omap1510())
+ cpu_mask |= CK_1510;
+ if (cpu_is_omap7xx())
+ cpu_mask |= CK_7XX;
+ if (cpu_is_omap310())
+ cpu_mask |= CK_310;
+
+ for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++)
+ if (c->cpu & cpu_mask) {
+ clkdev_add(&c->lk);
+ clk_register(c->lk.clk);
+ }
+
+ /* Pointers to these clocks are needed by code in clock.c */
+ api_ck_p = clk_get(NULL, "api_ck");
+ ck_dpll1_p = clk_get(NULL, "ck_dpll1");
+ ck_ref_p = clk_get(NULL, "ck_ref");
+
+ info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
+ if (info != NULL) {
+ if (!cpu_is_omap15xx())
+ crystal_type = info->system_clock_type;
+ }
+
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ ck_ref.rate = 13000000;
+#elif defined(CONFIG_ARCH_OMAP16XX)
+ if (crystal_type == 2)
+ ck_ref.rate = 19200000;
+#endif
+
+ pr_info("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: "
+ "0x%04x\n", omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
+ omap_readw(ARM_CKCTL));
+
+ /* We want to be in syncronous scalable mode */
+ omap_writew(0x1000, ARM_SYSST);
+
+#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
+ /* Use values set by bootloader. Determine PLL rate and recalculate
+ * dependent clocks as if kernel had changed PLL or divisors.
+ */
+ {
+ unsigned pll_ctl_val = omap_readw(DPLL_CTL);
+
+ ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
+ if (pll_ctl_val & 0x10) {
+ /* PLL enabled, apply multiplier and divisor */
+ if (pll_ctl_val & 0xf80)
+ ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
+ ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
+ } else {
+ /* PLL disabled, apply bypass divisor */
+ switch (pll_ctl_val & 0xc) {
+ case 0:
+ break;
+ case 0x4:
+ ck_dpll1.rate /= 2;
+ break;
+ default:
+ ck_dpll1.rate /= 4;
+ break;
+ }
+ }
+ }
+#else
+ /* Find the highest supported frequency and enable it */
+ if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+ printk(KERN_ERR "System frequencies not set. Check your config.\n");
+ /* Guess sane values (60MHz) */
+ omap_writew(0x2290, DPLL_CTL);
+ omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
+ ck_dpll1.rate = 60000000;
+ }
+#endif
+ propagate_rate(&ck_dpll1);
+ /* Cache rates for clocks connected to ck_ref (not dpll1) */
+ propagate_rate(&ck_ref);
+ printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
+ "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
+ ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
+ ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
+ arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+
+#if defined(CONFIG_MACH_OMAP_PERSEUS2) || defined(CONFIG_MACH_OMAP_FSAMPLE)
+ /* Select slicer output as OMAP input clock */
+ omap_writew(omap_readw(OMAP7XX_PCC_UPLD_CTRL) & ~0x1, OMAP7XX_PCC_UPLD_CTRL);
+#endif
+
+ /* Amstrad Delta wants BCLK high when inactive */
+ if (machine_is_ams_delta())
+ omap_writel(omap_readl(ULPD_CLOCK_CTRL) |
+ (1 << SDW_MCLK_INV_BIT),
+ ULPD_CLOCK_CTRL);
+
+ /* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
+ /* (on 730, bit 13 must not be cleared) */
+ if (cpu_is_omap7xx())
+ omap_writew(omap_readw(ARM_CKCTL) & 0x2fff, ARM_CKCTL);
+ else
+ omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
+
+ /* Put DSP/MPUI into reset until needed */
+ omap_writew(0, ARM_RSTCT1);
+ omap_writew(1, ARM_RSTCT2);
+ omap_writew(0x400, ARM_IDLECT1);
+
+ /*
+ * According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
+ * of the ARM_IDLECT2 register must be set to zero. The power-on
+ * default value of this bit is one.
+ */
+ omap_writew(0x0000, ARM_IDLECT2); /* Turn LCD clock off also */
+
+ /*
+ * Only enable those clocks we will need, let the drivers
+ * enable other clocks as necessary
+ */
+ clk_enable(&armper_ck.clk);
+ clk_enable(&armxor_ck.clk);
+ clk_enable(&armtim_ck.clk); /* This should be done by timer code */
+
+ if (cpu_is_omap15xx())
+ clk_enable(&arm_gpio_ck);
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap1/i2c.c b/arch/arm/mach-omap1/i2c.c
new file mode 100644
index 00000000000..1bf4735e27a
--- /dev/null
+++ b/arch/arm/mach-omap1/i2c.c
@@ -0,0 +1,39 @@
+/*
+ * Helper module for board specific I2C bus registration
+ *
+ * Copyright (C) 2009 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <plat/i2c.h>
+#include <plat/mux.h>
+#include <plat/cpu.h>
+
+int __init omap_register_i2c_bus(int bus_id, u32 clkrate,
+ struct i2c_board_info const *info,
+ unsigned len)
+{
+ if (cpu_is_omap7xx()) {
+ omap_cfg_reg(I2C_7XX_SDA);
+ omap_cfg_reg(I2C_7XX_SCL);
+ } else {
+ omap_cfg_reg(I2C_SDA);
+ omap_cfg_reg(I2C_SCL);
+ }
+
+ return omap_plat_register_i2c_bus(bus_id, clkrate, info, len);
+}
diff --git a/arch/arm/mach-omap1/include/mach/lcd_dma.h b/arch/arm/mach-omap1/include/mach/lcd_dma.h
new file mode 100644
index 00000000000..d7a457bbcb7
--- /dev/null
+++ b/arch/arm/mach-omap1/include/mach/lcd_dma.h
@@ -0,0 +1,78 @@
+/*
+ * arch/arm/mach-omap1/include/mach/lcd_dma.h
+ *
+ * Extracted from arch/arm/plat-omap/include/plat/dma.h
+ * Copyright (C) 2003 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __MACH_OMAP1_LCD_DMA_H__
+#define __MACH_OMAP1_LCD_DMA_H__
+
+/* Hardware registers for LCD DMA */
+#define OMAP1510_DMA_LCD_BASE (0xfffedb00)
+#define OMAP1510_DMA_LCD_CTRL (OMAP1510_DMA_LCD_BASE + 0x00)
+#define OMAP1510_DMA_LCD_TOP_F1_L (OMAP1510_DMA_LCD_BASE + 0x02)
+#define OMAP1510_DMA_LCD_TOP_F1_U (OMAP1510_DMA_LCD_BASE + 0x04)
+#define OMAP1510_DMA_LCD_BOT_F1_L (OMAP1510_DMA_LCD_BASE + 0x06)
+#define OMAP1510_DMA_LCD_BOT_F1_U (OMAP1510_DMA_LCD_BASE + 0x08)
+
+#define OMAP1610_DMA_LCD_BASE (0xfffee300)
+#define OMAP1610_DMA_LCD_CSDP (OMAP1610_DMA_LCD_BASE + 0xc0)
+#define OMAP1610_DMA_LCD_CCR (OMAP1610_DMA_LCD_BASE + 0xc2)
+#define OMAP1610_DMA_LCD_CTRL (OMAP1610_DMA_LCD_BASE + 0xc4)
+#define OMAP1610_DMA_LCD_TOP_B1_L (OMAP1610_DMA_LCD_BASE + 0xc8)
+#define OMAP1610_DMA_LCD_TOP_B1_U (OMAP1610_DMA_LCD_BASE + 0xca)
+#define OMAP1610_DMA_LCD_BOT_B1_L (OMAP1610_DMA_LCD_BASE + 0xcc)
+#define OMAP1610_DMA_LCD_BOT_B1_U (OMAP1610_DMA_LCD_BASE + 0xce)
+#define OMAP1610_DMA_LCD_TOP_B2_L (OMAP1610_DMA_LCD_BASE + 0xd0)
+#define OMAP1610_DMA_LCD_TOP_B2_U (OMAP1610_DMA_LCD_BASE + 0xd2)
+#define OMAP1610_DMA_LCD_BOT_B2_L (OMAP1610_DMA_LCD_BASE + 0xd4)
+#define OMAP1610_DMA_LCD_BOT_B2_U (OMAP1610_DMA_LCD_BASE + 0xd6)
+#define OMAP1610_DMA_LCD_SRC_EI_B1 (OMAP1610_DMA_LCD_BASE + 0xd8)
+#define OMAP1610_DMA_LCD_SRC_FI_B1_L (OMAP1610_DMA_LCD_BASE + 0xda)
+#define OMAP1610_DMA_LCD_SRC_EN_B1 (OMAP1610_DMA_LCD_BASE + 0xe0)
+#define OMAP1610_DMA_LCD_SRC_FN_B1 (OMAP1610_DMA_LCD_BASE + 0xe4)
+#define OMAP1610_DMA_LCD_LCH_CTRL (OMAP1610_DMA_LCD_BASE + 0xea)
+#define OMAP1610_DMA_LCD_SRC_FI_B1_U (OMAP1610_DMA_LCD_BASE + 0xf4)
+
+/* LCD DMA block numbers */
+enum {
+ OMAP_LCD_DMA_B1_TOP,
+ OMAP_LCD_DMA_B1_BOTTOM,
+ OMAP_LCD_DMA_B2_TOP,
+ OMAP_LCD_DMA_B2_BOTTOM
+};
+
+/* LCD DMA functions */
+extern int omap_request_lcd_dma(void (*callback)(u16 status, void *data),
+ void *data);
+extern void omap_free_lcd_dma(void);
+extern void omap_setup_lcd_dma(void);
+extern void omap_enable_lcd_dma(void);
+extern void omap_stop_lcd_dma(void);
+extern void omap_set_lcd_dma_ext_controller(int external);
+extern void omap_set_lcd_dma_single_transfer(int single);
+extern void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
+ int data_type);
+extern void omap_set_lcd_dma_b1_rotation(int rotate);
+extern void omap_set_lcd_dma_b1_vxres(unsigned long vxres);
+extern void omap_set_lcd_dma_b1_mirror(int mirror);
+extern void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale);
+
+extern int omap_lcd_dma_running(void);
+
+#endif /* __MACH_OMAP1_LCD_DMA_H__ */
diff --git a/arch/arm/mach-omap1/include/mach/lcdc.h b/arch/arm/mach-omap1/include/mach/lcdc.h
new file mode 100644
index 00000000000..89bd703adaf
--- /dev/null
+++ b/arch/arm/mach-omap1/include/mach/lcdc.h
@@ -0,0 +1,57 @@
+/*
+ * arch/arm/mach-omap1/include/mach/lcdc.h
+ *
+ * Extracted from drivers/video/omap/lcdc.c
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __MACH_LCDC_H__
+#define __MACH_LCDC_H__
+
+#define OMAP_LCDC_BASE 0xfffec000
+#define OMAP_LCDC_SIZE 256
+#define OMAP_LCDC_IRQ INT_LCD_CTRL
+
+#define OMAP_LCDC_CONTROL (OMAP_LCDC_BASE + 0x00)
+#define OMAP_LCDC_TIMING0 (OMAP_LCDC_BASE + 0x04)
+#define OMAP_LCDC_TIMING1 (OMAP_LCDC_BASE + 0x08)
+#define OMAP_LCDC_TIMING2 (OMAP_LCDC_BASE + 0x0c)
+#define OMAP_LCDC_STATUS (OMAP_LCDC_BASE + 0x10)
+#define OMAP_LCDC_SUBPANEL (OMAP_LCDC_BASE + 0x14)
+#define OMAP_LCDC_LINE_INT (OMAP_LCDC_BASE + 0x18)
+#define OMAP_LCDC_DISPLAY_STATUS (OMAP_LCDC_BASE + 0x1c)
+
+#define OMAP_LCDC_STAT_DONE (1 << 0)
+#define OMAP_LCDC_STAT_VSYNC (1 << 1)
+#define OMAP_LCDC_STAT_SYNC_LOST (1 << 2)
+#define OMAP_LCDC_STAT_ABC (1 << 3)
+#define OMAP_LCDC_STAT_LINE_INT (1 << 4)
+#define OMAP_LCDC_STAT_FUF (1 << 5)
+#define OMAP_LCDC_STAT_LOADED_PALETTE (1 << 6)
+
+#define OMAP_LCDC_CTRL_LCD_EN (1 << 0)
+#define OMAP_LCDC_CTRL_LCD_TFT (1 << 7)
+#define OMAP_LCDC_CTRL_LINE_IRQ_CLR_SEL (1 << 10)
+
+#define OMAP_LCDC_IRQ_VSYNC (1 << 2)
+#define OMAP_LCDC_IRQ_DONE (1 << 3)
+#define OMAP_LCDC_IRQ_LOADED_PALETTE (1 << 4)
+#define OMAP_LCDC_IRQ_LINE_NIRQ (1 << 5)
+#define OMAP_LCDC_IRQ_LINE (1 << 6)
+#define OMAP_LCDC_IRQ_MASK (((1 << 5) - 1) << 2)
+
+#endif /* __MACH_LCDC_H__ */
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 2a6d68aa348..d9b8d82530a 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -18,7 +18,8 @@
#include <plat/mux.h>
#include <plat/tc.h>
-extern int omap1_clk_init(void);
+#include "clock.h"
+
extern void omap_check_revision(void);
extern void omap_sram_init(void);
extern void omapfb_reserve_sdram(void);
diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c
new file mode 100644
index 00000000000..3be11af687b
--- /dev/null
+++ b/arch/arm/mach-omap1/lcd_dma.c
@@ -0,0 +1,448 @@
+/*
+ * linux/arch/arm/mach-omap1/lcd_dma.c
+ *
+ * Extracted from arch/arm/plat-omap/dma.c
+ * Copyright (C) 2003 - 2008 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
+ * Graphics DMA and LCD DMA graphics tranformations
+ * by Imre Deak <imre.deak@nokia.com>
+ * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
+ * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
+ * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * Support functions for the OMAP internal DMA channels.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/lcdc.h>
+#include <plat/dma.h>
+
+int omap_lcd_dma_running(void)
+{
+ /*
+ * On OMAP1510, internal LCD controller will start the transfer
+ * when it gets enabled, so assume DMA running if LCD enabled.
+ */
+ if (cpu_is_omap1510())
+ if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN)
+ return 1;
+
+ /* Check if LCD DMA is running */
+ if (cpu_is_omap16xx())
+ if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
+ return 1;
+
+ return 0;
+}
+
+static struct lcd_dma_info {
+ spinlock_t lock;
+ int reserved;
+ void (*callback)(u16 status, void *data);
+ void *cb_data;
+
+ int active;
+ unsigned long addr, size;
+ int rotate, data_type, xres, yres;
+ int vxres;
+ int mirror;
+ int xscale, yscale;
+ int ext_ctrl;
+ int src_port;
+ int single_transfer;
+} lcd_dma;
+
+void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
+ int data_type)
+{
+ lcd_dma.addr = addr;
+ lcd_dma.data_type = data_type;
+ lcd_dma.xres = fb_xres;
+ lcd_dma.yres = fb_yres;
+}
+EXPORT_SYMBOL(omap_set_lcd_dma_b1);
+
+void omap_set_lcd_dma_src_port(int port)
+{
+ lcd_dma.src_port = port;
+}
+
+void omap_set_lcd_dma_ext_controller(int external)
+{
+ lcd_dma.ext_ctrl = external;
+}
+EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller);
+
+void omap_set_lcd_dma_single_transfer(int single)
+{
+ lcd_dma.single_transfer = single;
+}
+EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
+
+void omap_set_lcd_dma_b1_rotation(int rotate)
+{
+ if (cpu_is_omap1510()) {
+ printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
+ BUG();
+ return;
+ }
+ lcd_dma.rotate = rotate;
+}
+EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
+
+void omap_set_lcd_dma_b1_mirror(int mirror)
+{
+ if (cpu_is_omap1510()) {
+ printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
+ BUG();
+ }
+ lcd_dma.mirror = mirror;
+}
+EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
+
+void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
+{
+ if (cpu_is_omap1510()) {
+ printk(KERN_ERR "DMA virtual resulotion is not supported "
+ "in 1510 mode\n");
+ BUG();
+ }
+ lcd_dma.vxres = vxres;
+}
+EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
+
+void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
+{
+ if (cpu_is_omap1510()) {
+ printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
+ BUG();
+ }
+ lcd_dma.xscale = xscale;
+ lcd_dma.yscale = yscale;
+}
+EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale);
+
+static void set_b1_regs(void)
+{
+ unsigned long top, bottom;
+ int es;
+ u16 w;
+ unsigned long en, fn;
+ long ei, fi;
+ unsigned long vxres;
+ unsigned int xscale, yscale;
+
+ switch (lcd_dma.data_type) {
+ case OMAP_DMA_DATA_TYPE_S8:
+ es = 1;
+ break;
+ case OMAP_DMA_DATA_TYPE_S16:
+ es = 2;
+ break;
+ case OMAP_DMA_DATA_TYPE_S32:
+ es = 4;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres;
+ xscale = lcd_dma.xscale ? lcd_dma.xscale : 1;
+ yscale = lcd_dma.yscale ? lcd_dma.yscale : 1;
+ BUG_ON(vxres < lcd_dma.xres);
+
+#define PIXADDR(x, y) (lcd_dma.addr + \
+ ((y) * vxres * yscale + (x) * xscale) * es)
+#define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1)
+
+ switch (lcd_dma.rotate) {
+ case 0:
+ if (!lcd_dma.mirror) {
+ top = PIXADDR(0, 0);
+ bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
+ /* 1510 DMA requires the bottom address to be 2 more
+ * than the actual last memory access location. */
+ if (cpu_is_omap1510() &&
+ lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
+ bottom += 2;
+ ei = PIXSTEP(0, 0, 1, 0);
+ fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1);
+ } else {
+ top = PIXADDR(lcd_dma.xres - 1, 0);
+ bottom = PIXADDR(0, lcd_dma.yres - 1);
+ ei = PIXSTEP(1, 0, 0, 0);
+ fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1);
+ }
+ en = lcd_dma.xres;
+ fn = lcd_dma.yres;
+ break;
+ case 90:
+ if (!lcd_dma.mirror) {
+ top = PIXADDR(0, lcd_dma.yres - 1);
+ bottom = PIXADDR(lcd_dma.xres - 1, 0);
+ ei = PIXSTEP(0, 1, 0, 0);
+ fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1);
+ } else {
+ top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
+ bottom = PIXADDR(0, 0);
+ ei = PIXSTEP(0, 1, 0, 0);
+ fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1);
+ }
+ en = lcd_dma.yres;
+ fn = lcd_dma.xres;
+ break;
+ case 180:
+ if (!lcd_dma.mirror) {
+ top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
+ bottom = PIXADDR(0, 0);
+ ei = PIXSTEP(1, 0, 0, 0);
+ fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0);
+ } else {
+ top = PIXADDR(0, lcd_dma.yres - 1);
+ bottom = PIXADDR(lcd_dma.xres - 1, 0);
+ ei = PIXSTEP(0, 0, 1, 0);
+ fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0);
+ }
+ en = lcd_dma.xres;
+ fn = lcd_dma.yres;
+ break;
+ case 270:
+ if (!lcd_dma.mirror) {
+ top = PIXADDR(lcd_dma.xres - 1, 0);
+ bottom = PIXADDR(0, lcd_dma.yres - 1);
+ ei = PIXSTEP(0, 0, 0, 1);
+ fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0);
+ } else {
+ top = PIXADDR(0, 0);
+ bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
+ ei = PIXSTEP(0, 0, 0, 1);
+ fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0);
+ }
+ en = lcd_dma.yres;
+ fn = lcd_dma.xres;
+ break;
+ default:
+ BUG();
+ return; /* Suppress warning about uninitialized vars */
+ }
+
+ if (cpu_is_omap1510()) {
+ omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
+ omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
+ omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
+ omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
+
+ return;
+ }
+
+ /* 1610 regs */
+ omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
+ omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
+ omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
+ omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
+
+ omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
+ omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
+
+ w = omap_readw(OMAP1610_DMA_LCD_CSDP);
+ w &= ~0x03;
+ w |= lcd_dma.data_type;
+ omap_writew(w, OMAP1610_DMA_LCD_CSDP);
+
+ w = omap_readw(OMAP1610_DMA_LCD_CTRL);
+ /* Always set the source port as SDRAM for now*/
+ w &= ~(0x03 << 6);
+ if (lcd_dma.callback != NULL)
+ w |= 1 << 1; /* Block interrupt enable */
+ else
+ w &= ~(1 << 1);
+ omap_writew(w, OMAP1610_DMA_LCD_CTRL);
+
+ if (!(lcd_dma.rotate || lcd_dma.mirror ||
+ lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale))
+ return;
+
+ w = omap_readw(OMAP1610_DMA_LCD_CCR);
+ /* Set the double-indexed addressing mode */
+ w |= (0x03 << 12);
+ omap_writew(w, OMAP1610_DMA_LCD_CCR);
+
+ omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
+ omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
+ omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
+}
+
+static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id)
+{
+ u16 w;
+
+ w = omap_readw(OMAP1610_DMA_LCD_CTRL);
+ if (unlikely(!(w & (1 << 3)))) {
+ printk(KERN_WARNING "Spurious LCD DMA IRQ\n");
+ return IRQ_NONE;
+ }
+ /* Ack the IRQ */
+ w |= (1 << 3);
+ omap_writew(w, OMAP1610_DMA_LCD_CTRL);
+ lcd_dma.active = 0;
+ if (lcd_dma.callback != NULL)
+ lcd_dma.callback(w, lcd_dma.cb_data);
+
+ return IRQ_HANDLED;
+}
+
+int omap_request_lcd_dma(void (*callback)(u16 status, void *data),
+ void *data)
+{
+ spin_lock_irq(&lcd_dma.lock);
+ if (lcd_dma.reserved) {
+ spin_unlock_irq(&lcd_dma.lock);
+ printk(KERN_ERR "LCD DMA channel already reserved\n");
+ BUG();
+ return -EBUSY;
+ }
+ lcd_dma.reserved = 1;
+ spin_unlock_irq(&lcd_dma.lock);
+ lcd_dma.callback = callback;
+ lcd_dma.cb_data = data;
+ lcd_dma.active = 0;
+ lcd_dma.single_transfer = 0;
+ lcd_dma.rotate = 0;
+ lcd_dma.vxres = 0;
+ lcd_dma.mirror = 0;
+ lcd_dma.xscale = 0;
+ lcd_dma.yscale = 0;
+ lcd_dma.ext_ctrl = 0;
+ lcd_dma.src_port = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_request_lcd_dma);
+
+void omap_free_lcd_dma(void)
+{
+ spin_lock(&lcd_dma.lock);
+ if (!lcd_dma.reserved) {
+ spin_unlock(&lcd_dma.lock);
+ printk(KERN_ERR "LCD DMA is not reserved\n");
+ BUG();
+ return;
+ }
+ if (!cpu_is_omap1510())
+ omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
+ OMAP1610_DMA_LCD_CCR);
+ lcd_dma.reserved = 0;
+ spin_unlock(&lcd_dma.lock);
+}
+EXPORT_SYMBOL(omap_free_lcd_dma);
+
+void omap_enable_lcd_dma(void)
+{
+ u16 w;
+
+ /*
+ * Set the Enable bit only if an external controller is
+ * connected. Otherwise the OMAP internal controller will
+ * start the transfer when it gets enabled.
+ */
+ if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+ return;
+
+ w = omap_readw(OMAP1610_DMA_LCD_CTRL);
+ w |= 1 << 8;
+ omap_writew(w, OMAP1610_DMA_LCD_CTRL);
+
+ lcd_dma.active = 1;
+
+ w = omap_readw(OMAP1610_DMA_LCD_CCR);
+ w |= 1 << 7;
+ omap_writew(w, OMAP1610_DMA_LCD_CCR);
+}
+EXPORT_SYMBOL(omap_enable_lcd_dma);
+
+void omap_setup_lcd_dma(void)
+{
+ BUG_ON(lcd_dma.active);
+ if (!cpu_is_omap1510()) {
+ /* Set some reasonable defaults */
+ omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
+ omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
+ omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
+ }
+ set_b1_regs();
+ if (!cpu_is_omap1510()) {
+ u16 w;
+
+ w = omap_readw(OMAP1610_DMA_LCD_CCR);
+ /*
+ * If DMA was already active set the end_prog bit to have
+ * the programmed register set loaded into the active
+ * register set.
+ */
+ w |= 1 << 11; /* End_prog */
+ if (!lcd_dma.single_transfer)
+ w |= (3 << 8); /* Auto_init, repeat */
+ omap_writew(w, OMAP1610_DMA_LCD_CCR);
+ }
+}
+EXPORT_SYMBOL(omap_setup_lcd_dma);
+
+void omap_stop_lcd_dma(void)
+{
+ u16 w;
+
+ lcd_dma.active = 0;
+ if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+ return;
+
+ w = omap_readw(OMAP1610_DMA_LCD_CCR);
+ w &= ~(1 << 7);
+ omap_writew(w, OMAP1610_DMA_LCD_CCR);
+
+ w = omap_readw(OMAP1610_DMA_LCD_CTRL);
+ w &= ~(1 << 8);
+ omap_writew(w, OMAP1610_DMA_LCD_CTRL);
+}
+EXPORT_SYMBOL(omap_stop_lcd_dma);
+
+static int __init omap_init_lcd_dma(void)
+{
+ int r;
+
+ if (cpu_is_omap16xx()) {
+ u16 w;
+
+ /* this would prevent OMAP sleep */
+ w = omap_readw(OMAP1610_DMA_LCD_CTRL);
+ w &= ~(1 << 8);
+ omap_writew(w, OMAP1610_DMA_LCD_CTRL);
+ }
+
+ spin_lock_init(&lcd_dma.lock);
+
+ r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
+ "LCD DMA", NULL);
+ if (r != 0)
+ printk(KERN_ERR "unable to request IRQ for LCD DMA "
+ "(error %d)\n", r);
+
+ return r;
+}
+
+arch_initcall(omap_init_lcd_dma);
+
diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c
index 785371e982f..07212cc621a 100644
--- a/arch/arm/mach-omap1/mux.c
+++ b/arch/arm/mach-omap1/mux.c
@@ -50,12 +50,18 @@ MUX_CFG_7XX("E3_7XX_KBC4", 13, 25, 0, 24, 1, 0)
MUX_CFG_7XX("AA17_7XX_USB_DM", 2, 21, 0, 20, 0, 0)
MUX_CFG_7XX("W16_7XX_USB_PU_EN", 2, 25, 0, 24, 0, 0)
-MUX_CFG_7XX("W17_7XX_USB_VBUSI", 2, 29, 0, 28, 0, 0)
+MUX_CFG_7XX("W17_7XX_USB_VBUSI", 2, 29, 6, 28, 1, 0)
+MUX_CFG_7XX("W18_7XX_USB_DMCK_OUT",3, 3, 1, 2, 0, 0)
+MUX_CFG_7XX("W19_7XX_USB_DCRST", 3, 7, 1, 6, 0, 0)
/* MMC Pins */
MUX_CFG_7XX("MMC_7XX_CMD", 2, 9, 0, 8, 1, 0)
MUX_CFG_7XX("MMC_7XX_CLK", 2, 13, 0, 12, 1, 0)
MUX_CFG_7XX("MMC_7XX_DAT0", 2, 17, 0, 16, 1, 0)
+
+/* I2C interface */
+MUX_CFG_7XX("I2C_7XX_SCL", 5, 1, 0, 0, 1, 0)
+MUX_CFG_7XX("I2C_7XX_SDA", 5, 5, 0, 0, 1, 0)
};
#define OMAP7XX_PINS_SZ ARRAY_SIZE(omap7xx_pins)
#else
diff --git a/arch/arm/mach-omap1/opp.h b/arch/arm/mach-omap1/opp.h
new file mode 100644
index 00000000000..07074d79adc
--- /dev/null
+++ b/arch/arm/mach-omap1/opp.h
@@ -0,0 +1,28 @@
+/*
+ * linux/arch/arm/mach-omap1/opp.h
+ *
+ * Copyright (C) 2004 - 2005 Nokia corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ * Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP1_OPP_H
+#define __ARCH_ARM_MACH_OMAP1_OPP_H
+
+#include <linux/types.h>
+
+struct mpu_rate {
+ unsigned long rate;
+ unsigned long xtal;
+ unsigned long pll_rate;
+ __u16 ckctl_val;
+ __u16 dpllctl_val;
+};
+
+extern struct mpu_rate omap1_rate_table[];
+
+#endif
diff --git a/arch/arm/mach-omap1/opp_data.c b/arch/arm/mach-omap1/opp_data.c
new file mode 100644
index 00000000000..75a54651499
--- /dev/null
+++ b/arch/arm/mach-omap1/opp_data.c
@@ -0,0 +1,59 @@
+/*
+ * linux/arch/arm/mach-omap1/opp_data.c
+ *
+ * Copyright (C) 2004 - 2005 Nokia corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ * Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "opp.h"
+
+/*-------------------------------------------------------------------------
+ * Omap1 MPU rate table
+ *-------------------------------------------------------------------------*/
+struct mpu_rate omap1_rate_table[] = {
+ /* MPU MHz, xtal MHz, dpll1 MHz, CKCTL, DPLL_CTL
+ * NOTE: Comment order here is different from bits in CKCTL value:
+ * armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
+ */
+#if defined(CONFIG_OMAP_ARM_216MHZ)
+ { 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_195MHZ)
+ { 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_192MHZ)
+ { 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
+ { 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
+ { 96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
+ { 48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/4/4/8/8/8 */
+ { 24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_182MHZ)
+ { 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_168MHZ)
+ { 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_150MHZ)
+ { 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
+#endif
+#if defined(CONFIG_OMAP_ARM_120MHZ)
+ { 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
+#endif
+#if defined(CONFIG_OMAP_ARM_96MHZ)
+ { 96000000, 12000000, 96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
+#endif
+#if defined(CONFIG_OMAP_ARM_60MHZ)
+ { 60000000, 12000000, 60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
+#endif
+#if defined(CONFIG_OMAP_ARM_30MHZ)
+ { 30000000, 12000000, 60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
+#endif
+ { 0, 0, 0, 0, 0 },
+};
+
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 7309aab305a..10eafa70a90 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -18,12 +18,25 @@ config ARCH_OMAP2430
config ARCH_OMAP34XX
bool "OMAP34xx Based System"
depends on ARCH_OMAP3
+ select USB_ARCH_HAS_EHCI
config ARCH_OMAP3430
bool "OMAP3430 support"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
select ARCH_OMAP_OTG
+config OMAP_PACKAGE_CBC
+ bool
+
+config OMAP_PACKAGE_CBB
+ bool
+
+config OMAP_PACKAGE_CUS
+ bool
+
+config OMAP_PACKAGE_CBP
+ bool
+
comment "OMAP Board Type"
depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4
@@ -52,14 +65,17 @@ config MACH_OMAP_2430SDP
config MACH_OMAP3_BEAGLE
bool "OMAP3 BEAGLE board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_OMAP_LDP
bool "OMAP3 LDP board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_OVERO
bool "Gumstix Overo board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_OMAP3EVM
bool "OMAP 3530 EVM board"
@@ -68,14 +84,22 @@ config MACH_OMAP3EVM
config MACH_OMAP3517EVM
bool "OMAP3517/ AM3517 EVM board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_OMAP3_PANDORA
bool "OMAP3 Pandora"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
+
+config MACH_OMAP3_TOUCHBOOK
+ bool "OMAP3 Touch Book"
+ depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select BACKLIGHT_CLASS_DEVICE
config MACH_OMAP_3430SDP
bool "OMAP 3430 SDP board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_NOKIA_N800
bool
@@ -96,26 +120,33 @@ config MACH_NOKIA_N8X0
config MACH_NOKIA_RX51
bool "Nokia RX-51 board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_OMAP_ZOOM2
bool "OMAP3 Zoom2 board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_OMAP_ZOOM3
bool "OMAP3630 Zoom3 board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBP
config MACH_CM_T35
bool "CompuLab CM-T35 module"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CUS
+ select OMAP_MUX
config MACH_IGEP0020
bool "IGEP0020"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_OMAP_3630SDP
bool "OMAP3630 SDP board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBP
config MACH_OMAP_4430SDP
bool "OMAP 4430 SDP board"
@@ -128,3 +159,15 @@ config OMAP3_EMU
help
Say Y here to enable debugging hardware of omap3
+config OMAP3_SDRC_AC_TIMING
+ bool "Enable SDRC AC timing register changes"
+ depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ default n
+ help
+ If you know that none of your system initiators will attempt to
+ access SDRAM during CORE DVFS, select Y here. This should boost
+ SDRAM performance at lower CORE OPPs. There are relatively few
+ users who will wish to say yes at this point - almost everyone will
+ wish to say no. Selecting yes without understanding what is
+ going on could result in system crashes;
+
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 32548a4510c..b32678b848b 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -6,11 +6,14 @@
obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer-gp.o
omap-2-3-common = irq.o sdrc.o omap_hwmod.o
+omap-3-4-common = dpll.o
prcm-common = prcm.o powerdomain.o
-clock-common = clock.o clockdomain.o
+clock-common = clock.o clock_common_data.o clockdomain.o
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(prcm-common) $(clock-common)
-obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(prcm-common) $(clock-common)
+obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(prcm-common) $(clock-common) \
+ $(omap-3-4-common)
+obj-$(CONFIG_ARCH_OMAP4) += $(omap-3-4-common) prcm.o clock.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
@@ -23,6 +26,9 @@ obj-$(CONFIG_ARCH_OMAP2420) += sram242x.o
obj-$(CONFIG_ARCH_OMAP2430) += sram243x.o
obj-$(CONFIG_ARCH_OMAP3) += sram34xx.o
+# Pin multiplexing
+obj-$(CONFIG_ARCH_OMAP3) += mux34xx.o
+
# SMS/SDRC
obj-$(CONFIG_ARCH_OMAP2) += sdrc2xxx.o
# obj-$(CONFIG_ARCH_OMAP3) += sdrc3xxx.o
@@ -41,8 +47,11 @@ obj-$(CONFIG_ARCH_OMAP3) += cm.o
obj-$(CONFIG_ARCH_OMAP4) += cm4xxx.o
# Clock framework
-obj-$(CONFIG_ARCH_OMAP2) += clock24xx.o
-obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o
+obj-$(CONFIG_ARCH_OMAP2) += clock2xxx.o clock2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP2420) += opp2420_data.o
+obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clock34xx_data.o
+obj-$(CONFIG_ARCH_OMAP2430) += opp2430_data.o
+obj-$(CONFIG_ARCH_OMAP4) += clock44xx.o clock44xx_data.o
# EMU peripherals
obj-$(CONFIG_OMAP3_EMU) += emu.o
@@ -55,6 +64,9 @@ iommu-$(CONFIG_ARCH_OMAP3) += omap3-iommu.o
obj-$(CONFIG_OMAP_IOMMU) += $(iommu-y)
+i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
+obj-y += $(i2c-omap-m) $(i2c-omap-y)
+
# Specific board support
obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
@@ -93,7 +105,8 @@ obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o \
mmc-twl4030.o
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o \
mmc-twl4030.o
-
+obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \
+ mmc-twl4030.o
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
obj-$(CONFIG_MACH_OMAP3517EVM) += board-am3517evm.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index db9374bc528..e508904fb67 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -19,7 +19,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/delay.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 491364e44c7..c90b0d0b192 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -20,7 +20,7 @@
#include <linux/input/matrix_keypad.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
#include <linux/io.h>
#include <linux/gpio.h>
@@ -31,16 +31,17 @@
#include <asm/mach/map.h>
#include <plat/mcspi.h>
-#include <plat/mux.h>
#include <plat/board.h>
#include <plat/usb.h>
#include <plat/common.h>
#include <plat/dma.h>
#include <plat/gpmc.h>
+#include <plat/display.h>
#include <plat/control.h>
#include <plat/gpmc-smc91x.h>
+#include "mux.h"
#include "sdram-qimonda-hyb18m512160af-6.h"
#include "mmc-twl4030.h"
@@ -152,31 +153,152 @@ static struct spi_board_info sdp3430_spi_board_info[] __initdata = {
},
};
-static struct platform_device sdp3430_lcd_device = {
- .name = "sdp2430_lcd",
- .id = -1,
+
+#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 8
+#define SDP3430_LCD_PANEL_ENABLE_GPIO 5
+
+static unsigned backlight_gpio;
+static unsigned enable_gpio;
+static int lcd_enabled;
+static int dvi_enabled;
+
+static void __init sdp3430_display_init(void)
+{
+ int r;
+
+ enable_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO;
+ backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO;
+
+ r = gpio_request(enable_gpio, "LCD reset");
+ if (r) {
+ printk(KERN_ERR "failed to get LCD reset GPIO\n");
+ goto err0;
+ }
+
+ r = gpio_request(backlight_gpio, "LCD Backlight");
+ if (r) {
+ printk(KERN_ERR "failed to get LCD backlight GPIO\n");
+ goto err1;
+ }
+
+ gpio_direction_output(enable_gpio, 0);
+ gpio_direction_output(backlight_gpio, 0);
+
+ return;
+err1:
+ gpio_free(enable_gpio);
+err0:
+ return;
+}
+
+static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev)
+{
+ if (dvi_enabled) {
+ printk(KERN_ERR "cannot enable LCD, DVI is enabled\n");
+ return -EINVAL;
+ }
+
+ gpio_direction_output(enable_gpio, 1);
+ gpio_direction_output(backlight_gpio, 1);
+
+ lcd_enabled = 1;
+
+ return 0;
+}
+
+static void sdp3430_panel_disable_lcd(struct omap_dss_device *dssdev)
+{
+ lcd_enabled = 0;
+
+ gpio_direction_output(enable_gpio, 0);
+ gpio_direction_output(backlight_gpio, 0);
+}
+
+static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev)
+{
+ if (lcd_enabled) {
+ printk(KERN_ERR "cannot enable DVI, LCD is enabled\n");
+ return -EINVAL;
+ }
+
+ dvi_enabled = 1;
+
+ return 0;
+}
+
+static void sdp3430_panel_disable_dvi(struct omap_dss_device *dssdev)
+{
+ dvi_enabled = 0;
+}
+
+static int sdp3430_panel_enable_tv(struct omap_dss_device *dssdev)
+{
+ return 0;
+}
+
+static void sdp3430_panel_disable_tv(struct omap_dss_device *dssdev)
+{
+}
+
+
+static struct omap_dss_device sdp3430_lcd_device = {
+ .name = "lcd",
+ .driver_name = "sharp_ls_panel",
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .phy.dpi.data_lines = 16,
+ .platform_enable = sdp3430_panel_enable_lcd,
+ .platform_disable = sdp3430_panel_disable_lcd,
};
-static struct regulator_consumer_supply sdp3430_vdac_supply = {
- .supply = "vdac",
- .dev = &sdp3430_lcd_device.dev,
+static struct omap_dss_device sdp3430_dvi_device = {
+ .name = "dvi",
+ .driver_name = "generic_panel",
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .phy.dpi.data_lines = 24,
+ .platform_enable = sdp3430_panel_enable_dvi,
+ .platform_disable = sdp3430_panel_disable_dvi,
};
-static struct regulator_consumer_supply sdp3430_vdvi_supply = {
- .supply = "vdvi",
- .dev = &sdp3430_lcd_device.dev,
+static struct omap_dss_device sdp3430_tv_device = {
+ .name = "tv",
+ .driver_name = "venc",
+ .type = OMAP_DISPLAY_TYPE_VENC,
+ .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .platform_enable = sdp3430_panel_enable_tv,
+ .platform_disable = sdp3430_panel_disable_tv,
};
-static struct platform_device *sdp3430_devices[] __initdata = {
+
+static struct omap_dss_device *sdp3430_dss_devices[] = {
&sdp3430_lcd_device,
+ &sdp3430_dvi_device,
+ &sdp3430_tv_device,
+};
+
+static struct omap_dss_board_info sdp3430_dss_data = {
+ .num_devices = ARRAY_SIZE(sdp3430_dss_devices),
+ .devices = sdp3430_dss_devices,
+ .default_device = &sdp3430_lcd_device,
};
-static struct omap_lcd_config sdp3430_lcd_config __initdata = {
- .ctrl_name = "internal",
+static struct platform_device sdp3430_dss_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .platform_data = &sdp3430_dss_data,
+ },
+};
+
+static struct regulator_consumer_supply sdp3430_vdda_dac_supply = {
+ .supply = "vdda_dac",
+ .dev = &sdp3430_dss_device.dev,
+};
+
+static struct platform_device *sdp3430_devices[] __initdata = {
+ &sdp3430_dss_device,
};
static struct omap_board_config_kernel sdp3430_config[] __initdata = {
- { OMAP_TAG_LCD, &sdp3430_lcd_config },
};
static void __init omap_3430sdp_init_irq(void)
@@ -392,22 +514,34 @@ static struct regulator_init_data sdp3430_vdac = {
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
- .consumer_supplies = &sdp3430_vdac_supply,
+ .consumer_supplies = &sdp3430_vdda_dac_supply,
};
/* VPLL2 for digital video outputs */
+static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
+ {
+ .supply = "vdvi",
+ .dev = &sdp3430_lcd_device.dev,
+ },
+ {
+ .supply = "vdds_dsi",
+ .dev = &sdp3430_dss_device.dev,
+ }
+};
+
static struct regulator_init_data sdp3430_vpll2 = {
.constraints = {
.name = "VDVI",
.min_uV = 1800000,
.max_uV = 1800000,
+ .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
- .num_consumer_supplies = 1,
- .consumer_supplies = &sdp3430_vdvi_supply,
+ .num_consumer_supplies = ARRAY_SIZE(sdp3430_vpll2_supplies),
+ .consumer_supplies = sdp3430_vpll2_supplies,
};
static struct twl4030_codec_audio_data sdp3430_audio = {
@@ -491,7 +625,9 @@ static inline void board_smc91x_init(void)
static void enable_board_wakeup_source(void)
{
- omap_cfg_reg(AF26_34XX_SYS_NIRQ); /* T2 interrupt line (keypad) */
+ /* T2 interrupt line (keypad) */
+ omap_mux_init_signal("sys_nirq",
+ OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
}
static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
@@ -506,8 +642,17 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.reset_gpio_port[2] = -EINVAL
};
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init omap_3430sdp_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap3430_i2c_init();
platform_add_devices(sdp3430_devices, ARRAY_SIZE(sdp3430_devices));
if (omap_rev() > OMAP3430_REV_ES1_0)
@@ -521,6 +666,7 @@ static void __init omap_3430sdp_init(void)
omap_serial_init();
usb_musb_init();
board_smc91x_init();
+ sdp3430_display_init();
enable_board_wakeup_source();
usb_ehci_init(&ehci_pdata);
}
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 348b70b9833..73905963281 100755
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -23,6 +23,7 @@
#include <mach/board-zoom.h>
+#include "mux.h"
#include "sdram-hynix-h8mbx00u0mer-0em.h"
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
@@ -48,7 +49,9 @@ static inline void board_smc91x_init(void)
static void enable_board_wakeup_source(void)
{
- omap_cfg_reg(AF26_34XX_SYS_NIRQ); /* T2 interrupt line (keypad) */
+ /* T2 interrupt line (keypad) */
+ omap_mux_init_signal("sys_nirq",
+ OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
}
static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
@@ -82,8 +85,17 @@ static void __init omap_sdp_init_irq(void)
omap_gpio_init();
}
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init omap_sdp_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
zoom_peripherals_init();
board_smc91x_init();
enable_board_wakeup_source();
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 415a13d767c..b4e6eca0e8a 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -30,6 +30,8 @@
#include <plat/common.h>
#include <plat/usb.h>
+#include "mux.h"
+
/*
* Board initialization
*/
@@ -60,8 +62,17 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = {
.reset_gpio_port[2] = -EINVAL
};
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init am3517_evm_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
platform_add_devices(am3517_evm_devices,
ARRAY_SIZE(am3517_evm_devices));
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 8a2ce77a02e..fbbd68d69cc 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -26,6 +26,7 @@
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -120,6 +121,12 @@ static void __init apollon_flash_init(void)
apollon_flash_resource[0].end = base + SZ_128K - 1;
}
+static struct smc91x_platdata appolon_smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
static struct resource apollon_smc91x_resources[] = {
[0] = {
.flags = IORESOURCE_MEM,
@@ -134,6 +141,9 @@ static struct resource apollon_smc91x_resources[] = {
static struct platform_device apollon_smc91x_device = {
.name = "smc91x",
.id = -1,
+ .dev = {
+ .platform_data = &appolon_smc91x_info,
+ },
.num_resources = ARRAY_SIZE(apollon_smc91x_resources),
.resource = apollon_smc91x_resources,
};
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index 22c45290db6..2626a9f8a73 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -29,7 +29,7 @@
#include <linux/gpio.h>
#include <linux/i2c/at24.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
@@ -38,13 +38,13 @@
#include <plat/board.h>
#include <plat/common.h>
-#include <plat/mux.h>
#include <plat/nand.h>
#include <plat/gpmc.h>
#include <plat/usb.h>
#include <mach/hardware.h>
+#include "mux.h"
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mmc-twl4030.h"
@@ -482,8 +482,102 @@ static void __init cm_t35_map_io(void)
omap2_map_common_io();
}
+static struct omap_board_mux board_mux[] __initdata = {
+ /* nCS and IRQ for CM-T35 ethernet */
+ OMAP3_MUX(GPMC_NCS5, OMAP_MUX_MODE0),
+ OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
+
+ /* nCS and IRQ for SB-T35 ethernet */
+ OMAP3_MUX(GPMC_NCS4, OMAP_MUX_MODE0),
+ OMAP3_MUX(GPMC_WAIT3, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP),
+
+ /* PENDOWN GPIO */
+ OMAP3_MUX(GPMC_NCS6, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
+ /* mUSB */
+ OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* MMC 2 */
+ OMAP3_MUX(SDMMC2_DAT4, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT5, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT6, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(SDMMC2_DAT7, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+
+ /* McSPI 1 */
+ OMAP3_MUX(MCSPI1_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_SIMO, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_SOMI, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCSPI1_CS0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+
+ /* McSPI 4 */
+ OMAP3_MUX(MCBSP1_CLKR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_DX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_DR, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP1_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT_PULLUP),
+
+ /* McBSP 2 */
+ OMAP3_MUX(MCBSP2_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_CLKX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_DR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(MCBSP2_DX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+ /* serial ports */
+ OMAP3_MUX(MCBSP3_CLKX, OMAP_MUX_MODE1 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(MCBSP3_FSX, OMAP_MUX_MODE1 | OMAP_PIN_INPUT),
+ OMAP3_MUX(UART1_TX, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(UART1_RX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ /* DSS */
+ OMAP3_MUX(DSS_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_HSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_VSYNC, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_ACBIAS, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA8, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA9, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA10, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA11, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA12, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA13, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA14, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA15, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA20, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+
+ /* TPS IRQ */
+ OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_WAKEUP_EN | \
+ OMAP_PIN_INPUT_PULLUP),
+
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
static void __init cm_t35_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
omap_serial_init();
cm_t35_init_i2c();
cm_t35_init_nand();
@@ -492,8 +586,6 @@ static void __init cm_t35_init(void)
cm_t35_init_led();
usb_musb_init();
-
- omap_cfg_reg(AF26_34XX_SYS_NIRQ);
}
MACHINE_START(CM_T35, "Compulab CM-T35")
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index fa62e80c13b..117b8fd7e3a 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -19,7 +19,7 @@
#include <linux/interrupt.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -27,9 +27,9 @@
#include <plat/board.h>
#include <plat/common.h>
#include <plat/gpmc.h>
-#include <plat/mux.h>
#include <plat/usb.h>
+#include "mux.h"
#include "mmc-twl4030.h"
#define IGEP2_SMSC911X_CS 5
@@ -203,8 +203,17 @@ static int __init igep2_i2c_init(void)
return 0;
}
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init igep2_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
igep2_i2c_init();
omap_serial_init();
usb_musb_init();
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index c062238fe88..995d4a2b2df 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -24,7 +24,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
@@ -43,6 +43,7 @@
#include <plat/control.h>
#include <plat/usb.h>
+#include "mux.h"
#include "mmc-twl4030.h"
#define LDP_SMSC911X_CS 1
@@ -374,8 +375,17 @@ static struct platform_device *ldp_devices[] __initdata = {
&ldp_gpio_keys_device,
};
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init omap_ldp_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap_i2c_init();
platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices));
ts_gpio = 54;
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 41480bd0e58..231cb4ec184 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -29,7 +29,7 @@
#include <linux/mtd/nand.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
@@ -41,10 +41,10 @@
#include <plat/common.h>
#include <plat/gpmc.h>
#include <plat/nand.h>
-#include <plat/mux.h>
#include <plat/usb.h>
#include <plat/timer-gp.h>
+#include "mux.h"
#include "mmc-twl4030.h"
#define GPMC_CS0_BASE 0x60
@@ -140,10 +140,10 @@ static int beagle_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
if (system_rev >= 0x20 && system_rev <= 0x34301000) {
- omap_cfg_reg(AG9_34XX_GPIO23);
+ omap_mux_init_gpio(23, OMAP_PIN_INPUT);
mmc[0].gpio_wp = 23;
} else {
- omap_cfg_reg(AH8_34XX_GPIO29);
+ omap_mux_init_gpio(29, OMAP_PIN_INPUT);
}
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
mmc[0].gpio_cd = gpio + 0;
@@ -422,14 +422,23 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.reset_gpio_port[2] = -EINVAL
};
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init omap3_beagle_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap3_beagle_i2c_init();
platform_add_devices(omap3_beagle_devices,
ARRAY_SIZE(omap3_beagle_devices));
omap_serial_init();
- omap_cfg_reg(J25_34XX_GPIO170);
+ omap_mux_init_gpio(170, OMAP_PIN_INPUT);
gpio_request(170, "DVI_nPD");
/* REVISIT leave DVI powered down until it's needed ... */
gpio_direction_output(170, true);
@@ -439,8 +448,8 @@ static void __init omap3_beagle_init(void)
omap3beagle_flash_init();
/* Ensure SDRC pins are mux'd for self-refresh */
- omap_cfg_reg(H16_34XX_SDRC_CKE0);
- omap_cfg_reg(H17_34XX_SDRC_CKE1);
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
}
static void __init omap3_beagle_map_io(void)
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 5efc2e9068d..34de1785157 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -26,7 +26,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/usb/otg.h>
#include <linux/smsc911x.h>
@@ -38,11 +38,11 @@
#include <asm/mach/map.h>
#include <plat/board.h>
-#include <plat/mux.h>
#include <plat/usb.h>
#include <plat/common.h>
#include <plat/mcspi.h>
+#include "mux.h"
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mmc-twl4030.h"
@@ -223,7 +223,7 @@ static int omap3evm_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
- omap_cfg_reg(L8_34XX_GPIO63);
+ omap_mux_init_gpio(63, OMAP_PIN_INPUT);
mmc[0].gpio_cd = gpio + 0;
twl4030_mmc_init(mmc);
@@ -422,9 +422,18 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.reset_gpio_port[2] = -EINVAL
};
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init omap3_evm_init(void)
{
omap3_evm_get_revision();
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap3_evm_i2c_init();
@@ -440,24 +449,24 @@ static void __init omap3_evm_init(void)
#endif
if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
/* enable EHCI VBUS using GPIO22 */
- omap_cfg_reg(AF9_34XX_GPIO22);
+ omap_mux_init_gpio(22, OMAP_PIN_INPUT_PULLUP);
gpio_request(OMAP3_EVM_EHCI_VBUS, "enable EHCI VBUS");
gpio_direction_output(OMAP3_EVM_EHCI_VBUS, 0);
gpio_set_value(OMAP3_EVM_EHCI_VBUS, 1);
/* Select EHCI port on main board */
- omap_cfg_reg(U3_34XX_GPIO61);
+ omap_mux_init_gpio(61, OMAP_PIN_INPUT_PULLUP);
gpio_request(OMAP3_EVM_EHCI_SELECT, "select EHCI port");
gpio_direction_output(OMAP3_EVM_EHCI_SELECT, 0);
gpio_set_value(OMAP3_EVM_EHCI_SELECT, 0);
/* setup EHCI phy reset config */
- omap_cfg_reg(AH14_34XX_GPIO21);
+ omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP);
ehci_pdata.reset_gpio_port[1] = 21;
} else {
/* setup EHCI phy reset on MDC */
- omap_cfg_reg(AF4_34XX_GPIO135_OUT);
+ omap_mux_init_gpio(135, OMAP_PIN_OUTPUT);
ehci_pdata.reset_gpio_port[1] = 135;
}
usb_musb_init();
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 2db5ba5b3bf..ef17cf1ab6d 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -24,7 +24,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/leds.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
@@ -40,8 +40,8 @@
#include <mach/hardware.h>
#include <plat/mcspi.h>
#include <plat/usb.h>
-#include <plat/mux.h>
+#include "mux.h"
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mmc-twl4030.h"
@@ -98,10 +98,10 @@ static struct gpio_keys_button pandora_gpio_keys[] = {
GPIO_BUTTON_LOW(103, KEY_DOWN, "down"),
GPIO_BUTTON_LOW(96, KEY_LEFT, "left"),
GPIO_BUTTON_LOW(98, KEY_RIGHT, "right"),
- GPIO_BUTTON_LOW(111, BTN_A, "a"),
- GPIO_BUTTON_LOW(106, BTN_B, "b"),
- GPIO_BUTTON_LOW(109, BTN_X, "x"),
- GPIO_BUTTON_LOW(101, BTN_Y, "y"),
+ GPIO_BUTTON_LOW(109, KEY_KP1, "game 1"),
+ GPIO_BUTTON_LOW(111, KEY_KP2, "game 2"),
+ GPIO_BUTTON_LOW(106, KEY_KP3, "game 3"),
+ GPIO_BUTTON_LOW(101, KEY_KP4, "game 4"),
GPIO_BUTTON_LOW(102, BTN_TL, "l"),
GPIO_BUTTON_LOW(97, BTN_TL2, "l2"),
GPIO_BUTTON_LOW(105, BTN_TR, "r"),
@@ -315,7 +315,7 @@ static int __init omap3pandora_i2c_init(void)
omap_register_i2c_bus(1, 2600, omap3pandora_i2c_boardinfo,
ARRAY_SIZE(omap3pandora_i2c_boardinfo));
/* i2c2 pins are not connected */
- omap_register_i2c_bus(3, 400, NULL, 0);
+ omap_register_i2c_bus(3, 100, NULL, 0);
return 0;
}
@@ -368,23 +368,8 @@ static struct spi_board_info omap3pandora_spi_board_info[] __initdata = {
}
};
-static struct platform_device omap3pandora_lcd_device = {
- .name = "pandora_lcd",
- .id = -1,
-};
-
-static struct omap_lcd_config omap3pandora_lcd_config __initdata = {
- .ctrl_name = "internal",
-};
-
-static struct omap_board_config_kernel omap3pandora_config[] __initdata = {
- { OMAP_TAG_LCD, &omap3pandora_lcd_config },
-};
-
static void __init omap3pandora_init_irq(void)
{
- omap_board_config = omap3pandora_config;
- omap_board_config_size = ARRAY_SIZE(omap3pandora_config);
omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
omap_init_irq();
@@ -392,7 +377,6 @@ static void __init omap3pandora_init_irq(void)
}
static struct platform_device *omap3pandora_devices[] __initdata = {
- &omap3pandora_lcd_device,
&pandora_leds_gpio,
&pandora_keys_gpio,
};
@@ -409,8 +393,17 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.reset_gpio_port[2] = -EINVAL
};
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init omap3pandora_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap3pandora_i2c_init();
platform_add_devices(omap3pandora_devices,
ARRAY_SIZE(omap3pandora_devices));
@@ -423,8 +416,8 @@ static void __init omap3pandora_init(void)
usb_musb_init();
/* Ensure SDRC pins are mux'd for self-refresh */
- omap_cfg_reg(H16_34XX_SDRC_CKE0);
- omap_cfg_reg(H17_34XX_SDRC_CKE1);
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
}
static void __init omap3pandora_map_io(void)
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
new file mode 100644
index 00000000000..fe3d22cb245
--- /dev/null
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -0,0 +1,572 @@
+/*
+ * linux/arch/arm/mach-omap2/board-omap3touchbook.c
+ *
+ * Copyright (C) 2009 Always Innovating
+ *
+ * Modified from mach-omap2/board-omap3beagleboard.c
+ *
+ * Initial code: Grégoire Gentil, Tim Yamin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand.h>
+
+#include <plat/mcspi.h>
+#include <linux/spi/spi.h>
+
+#include <linux/spi/ads7846.h>
+
+#include <linux/regulator/machine.h>
+#include <linux/i2c/twl.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/gpmc.h>
+#include <plat/nand.h>
+#include <plat/usb.h>
+#include <plat/timer-gp.h>
+
+#include "mux.h"
+#include "mmc-twl4030.h"
+
+#include <asm/setup.h>
+
+#define GPMC_CS0_BASE 0x60
+#define GPMC_CS_SIZE 0x30
+
+#define NAND_BLOCK_SIZE SZ_128K
+
+#define OMAP3_AC_GPIO 136
+#define OMAP3_TS_GPIO 162
+#define TB_BL_PWM_TIMER 9
+#define TB_KILL_POWER_GPIO 168
+
+unsigned long touchbook_revision;
+
+static struct mtd_partition omap3touchbook_nand_partitions[] = {
+ /* All the partition sizes are listed in terms of NAND block size */
+ {
+ .name = "X-Loader",
+ .offset = 0,
+ .size = 4 * NAND_BLOCK_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "U-Boot",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
+ .size = 15 * NAND_BLOCK_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "U-Boot Env",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */
+ .size = 1 * NAND_BLOCK_SIZE,
+ },
+ {
+ .name = "Kernel",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
+ .size = 32 * NAND_BLOCK_SIZE,
+ },
+ {
+ .name = "File System",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct omap_nand_platform_data omap3touchbook_nand_data = {
+ .options = NAND_BUSWIDTH_16,
+ .parts = omap3touchbook_nand_partitions,
+ .nr_parts = ARRAY_SIZE(omap3touchbook_nand_partitions),
+ .dma_channel = -1, /* disable DMA in OMAP NAND driver */
+ .nand_setup = NULL,
+ .dev_ready = NULL,
+};
+
+static struct resource omap3touchbook_nand_resource = {
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device omap3touchbook_nand_device = {
+ .name = "omap2-nand",
+ .id = -1,
+ .dev = {
+ .platform_data = &omap3touchbook_nand_data,
+ },
+ .num_resources = 1,
+ .resource = &omap3touchbook_nand_resource,
+};
+
+#include "sdram-micron-mt46h32m32lf-6.h"
+
+static struct twl4030_hsmmc_info mmc[] = {
+ {
+ .mmc = 1,
+ .wires = 8,
+ .gpio_wp = 29,
+ },
+ {} /* Terminator */
+};
+
+static struct platform_device omap3_touchbook_lcd_device = {
+ .name = "omap3touchbook_lcd",
+ .id = -1,
+};
+
+static struct omap_lcd_config omap3_touchbook_lcd_config __initdata = {
+ .ctrl_name = "internal",
+};
+
+static struct regulator_consumer_supply touchbook_vmmc1_supply = {
+ .supply = "vmmc",
+};
+
+static struct regulator_consumer_supply touchbook_vsim_supply = {
+ .supply = "vmmc_aux",
+};
+
+static struct gpio_led gpio_leds[];
+
+static int touchbook_twl_gpio_setup(struct device *dev,
+ unsigned gpio, unsigned ngpio)
+{
+ if (system_rev >= 0x20 && system_rev <= 0x34301000) {
+ omap_mux_init_gpio(23, OMAP_PIN_INPUT);
+ mmc[0].gpio_wp = 23;
+ } else {
+ omap_mux_init_gpio(29, OMAP_PIN_INPUT);
+ }
+ /* gpio + 0 is "mmc0_cd" (input/IRQ) */
+ mmc[0].gpio_cd = gpio + 0;
+ twl4030_mmc_init(mmc);
+
+ /* link regulators to MMC adapters */
+ touchbook_vmmc1_supply.dev = mmc[0].dev;
+ touchbook_vsim_supply.dev = mmc[0].dev;
+
+ /* REVISIT: need ehci-omap hooks for external VBUS
+ * power switch and overcurrent detect
+ */
+
+ gpio_request(gpio + 1, "EHCI_nOC");
+ gpio_direction_input(gpio + 1);
+
+ /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
+ gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
+ gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+
+ /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
+ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
+
+ return 0;
+}
+
+static struct twl4030_gpio_platform_data touchbook_gpio_data = {
+ .gpio_base = OMAP_MAX_GPIO_LINES,
+ .irq_base = TWL4030_GPIO_IRQ_BASE,
+ .irq_end = TWL4030_GPIO_IRQ_END,
+ .use_leds = true,
+ .pullups = BIT(1),
+ .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13)
+ | BIT(15) | BIT(16) | BIT(17),
+ .setup = touchbook_twl_gpio_setup,
+};
+
+static struct regulator_consumer_supply touchbook_vdac_supply = {
+ .supply = "vdac",
+ .dev = &omap3_touchbook_lcd_device.dev,
+};
+
+static struct regulator_consumer_supply touchbook_vdvi_supply = {
+ .supply = "vdvi",
+ .dev = &omap3_touchbook_lcd_device.dev,
+};
+
+/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
+static struct regulator_init_data touchbook_vmmc1 = {
+ .constraints = {
+ .min_uV = 1850000,
+ .max_uV = 3150000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &touchbook_vmmc1_supply,
+};
+
+/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
+static struct regulator_init_data touchbook_vsim = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &touchbook_vsim_supply,
+};
+
+/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
+static struct regulator_init_data touchbook_vdac = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &touchbook_vdac_supply,
+};
+
+/* VPLL2 for digital video outputs */
+static struct regulator_init_data touchbook_vpll2 = {
+ .constraints = {
+ .name = "VDVI",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &touchbook_vdvi_supply,
+};
+
+static struct twl4030_usb_data touchbook_usb_data = {
+ .usb_mode = T2_USB_MODE_ULPI,
+};
+
+static struct twl4030_codec_audio_data touchbook_audio_data = {
+ .audio_mclk = 26000000,
+};
+
+static struct twl4030_codec_data touchbook_codec_data = {
+ .audio_mclk = 26000000,
+ .audio = &touchbook_audio_data,
+};
+
+static struct twl4030_platform_data touchbook_twldata = {
+ .irq_base = TWL4030_IRQ_BASE,
+ .irq_end = TWL4030_IRQ_END,
+
+ /* platform_data for children goes here */
+ .usb = &touchbook_usb_data,
+ .gpio = &touchbook_gpio_data,
+ .codec = &touchbook_codec_data,
+ .vmmc1 = &touchbook_vmmc1,
+ .vsim = &touchbook_vsim,
+ .vdac = &touchbook_vdac,
+ .vpll2 = &touchbook_vpll2,
+};
+
+static struct i2c_board_info __initdata touchbook_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("twl4030", 0x48),
+ .flags = I2C_CLIENT_WAKE,
+ .irq = INT_34XX_SYS_NIRQ,
+ .platform_data = &touchbook_twldata,
+ },
+};
+
+static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("bq27200", 0x55),
+ },
+};
+
+static int __init omap3_touchbook_i2c_init(void)
+{
+ /* Standard TouchBook bus */
+ omap_register_i2c_bus(1, 2600, touchbook_i2c_boardinfo,
+ ARRAY_SIZE(touchbook_i2c_boardinfo));
+
+ /* Additional TouchBook bus */
+ omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo,
+ ARRAY_SIZE(touchBook_i2c_boardinfo));
+
+ return 0;
+}
+
+static void __init omap3_ads7846_init(void)
+{
+ if (gpio_request(OMAP3_TS_GPIO, "ads7846_pen_down")) {
+ printk(KERN_ERR "Failed to request GPIO %d for "
+ "ads7846 pen down IRQ\n", OMAP3_TS_GPIO);
+ return;
+ }
+
+ gpio_direction_input(OMAP3_TS_GPIO);
+ omap_set_gpio_debounce(OMAP3_TS_GPIO, 1);
+ omap_set_gpio_debounce_time(OMAP3_TS_GPIO, 0xa);
+}
+
+static struct ads7846_platform_data ads7846_config = {
+ .x_min = 100,
+ .y_min = 265,
+ .x_max = 3950,
+ .y_max = 3750,
+ .x_plate_ohms = 40,
+ .pressure_max = 255,
+ .debounce_max = 10,
+ .debounce_tol = 5,
+ .debounce_rep = 1,
+ .gpio_pendown = OMAP3_TS_GPIO,
+ .keep_vref_on = 1,
+};
+
+static struct omap2_mcspi_device_config ads7846_mcspi_config = {
+ .turbo_mode = 0,
+ .single_channel = 1, /* 0: slave, 1: master */
+};
+
+static struct spi_board_info omap3_ads7846_spi_board_info[] __initdata = {
+ {
+ .modalias = "ads7846",
+ .bus_num = 4,
+ .chip_select = 0,
+ .max_speed_hz = 1500000,
+ .controller_data = &ads7846_mcspi_config,
+ .irq = OMAP_GPIO_IRQ(OMAP3_TS_GPIO),
+ .platform_data = &ads7846_config,
+ }
+};
+
+static struct gpio_led gpio_leds[] = {
+ {
+ .name = "touchbook::usr0",
+ .default_trigger = "heartbeat",
+ .gpio = 150,
+ },
+ {
+ .name = "touchbook::usr1",
+ .default_trigger = "mmc0",
+ .gpio = 149,
+ },
+ {
+ .name = "touchbook::pmu_stat",
+ .gpio = -EINVAL, /* gets replaced */
+ .active_low = true,
+ },
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device leds_gpio = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_led_info,
+ },
+};
+
+static struct gpio_keys_button gpio_buttons[] = {
+ {
+ .code = BTN_EXTRA,
+ .gpio = 7,
+ .desc = "user",
+ .wakeup = 1,
+ },
+ {
+ .code = KEY_POWER,
+ .gpio = 183,
+ .desc = "power",
+ .wakeup = 1,
+ },
+};
+
+static struct gpio_keys_platform_data gpio_key_info = {
+ .buttons = gpio_buttons,
+ .nbuttons = ARRAY_SIZE(gpio_buttons),
+};
+
+static struct platform_device keys_gpio = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_key_info,
+ },
+};
+
+static struct omap_board_config_kernel omap3_touchbook_config[] __initdata = {
+ { OMAP_TAG_LCD, &omap3_touchbook_lcd_config },
+};
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
+static void __init omap3_touchbook_init_irq(void)
+{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+ omap_board_config = omap3_touchbook_config;
+ omap_board_config_size = ARRAY_SIZE(omap3_touchbook_config);
+ omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
+ mt46h32m32lf6_sdrc_params);
+ omap_init_irq();
+#ifdef CONFIG_OMAP_32K_TIMER
+ omap2_gp_clockevent_set_gptimer(12);
+#endif
+ omap_gpio_init();
+}
+
+static struct platform_device *omap3_touchbook_devices[] __initdata = {
+ &omap3_touchbook_lcd_device,
+ &leds_gpio,
+ &keys_gpio,
+};
+
+static void __init omap3touchbook_flash_init(void)
+{
+ u8 cs = 0;
+ u8 nandcs = GPMC_CS_NUM + 1;
+
+ u32 gpmc_base_add = OMAP34XX_GPMC_VIRT;
+
+ /* find out the chip-select on which NAND exists */
+ while (cs < GPMC_CS_NUM) {
+ u32 ret = 0;
+ ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+
+ if ((ret & 0xC00) == 0x800) {
+ printk(KERN_INFO "Found NAND on CS%d\n", cs);
+ if (nandcs > GPMC_CS_NUM)
+ nandcs = cs;
+ }
+ cs++;
+ }
+
+ if (nandcs > GPMC_CS_NUM) {
+ printk(KERN_INFO "NAND: Unable to find configuration "
+ "in GPMC\n ");
+ return;
+ }
+
+ if (nandcs < GPMC_CS_NUM) {
+ omap3touchbook_nand_data.cs = nandcs;
+ omap3touchbook_nand_data.gpmc_cs_baseaddr = (void *)
+ (gpmc_base_add + GPMC_CS0_BASE + nandcs * GPMC_CS_SIZE);
+ omap3touchbook_nand_data.gpmc_baseaddr =
+ (void *) (gpmc_base_add);
+
+ printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
+ if (platform_device_register(&omap3touchbook_nand_device) < 0)
+ printk(KERN_ERR "Unable to register NAND device\n");
+ }
+}
+
+static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+
+ .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+ .phy_reset = true,
+ .reset_gpio_port[0] = -EINVAL,
+ .reset_gpio_port[1] = 147,
+ .reset_gpio_port[2] = -EINVAL
+};
+
+static void omap3_touchbook_poweroff(void)
+{
+ int r;
+
+ r = gpio_request(TB_KILL_POWER_GPIO, "DVI reset");
+ if (r < 0) {
+ printk(KERN_ERR "Unable to get kill power GPIO\n");
+ return;
+ }
+
+ gpio_direction_output(TB_KILL_POWER_GPIO, 0);
+}
+
+static void __init early_touchbook_revision(char **p)
+{
+ if (!*p)
+ return;
+
+ strict_strtoul(*p, 10, &touchbook_revision);
+}
+__early_param("tbr=", early_touchbook_revision);
+
+static void __init omap3_touchbook_init(void)
+{
+ pm_power_off = omap3_touchbook_poweroff;
+
+ omap3_touchbook_i2c_init();
+ platform_add_devices(omap3_touchbook_devices,
+ ARRAY_SIZE(omap3_touchbook_devices));
+ omap_serial_init();
+
+ omap_mux_init_gpio(170, OMAP_PIN_INPUT);
+ gpio_request(176, "DVI_nPD");
+ /* REVISIT leave DVI powered down until it's needed ... */
+ gpio_direction_output(176, true);
+
+ /* Touchscreen and accelerometer */
+ spi_register_board_info(omap3_ads7846_spi_board_info,
+ ARRAY_SIZE(omap3_ads7846_spi_board_info));
+ omap3_ads7846_init();
+ usb_musb_init();
+ usb_ehci_init(&ehci_pdata);
+ omap3touchbook_flash_init();
+
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
+}
+
+static void __init omap3_touchbook_map_io(void)
+{
+ omap2_set_globals_343x();
+ omap2_map_common_io();
+}
+
+MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
+ /* Maintainer: Gregoire Gentil - http://www.alwaysinnovating.com */
+ .phys_io = 0x48000000,
+ .io_pg_offst = ((0xd8000000) >> 18) & 0xfffc,
+ .boot_params = 0x80000100,
+ .map_io = omap3_touchbook_map_io,
+ .init_irq = omap3_touchbook_init_irq,
+ .init_machine = omap3_touchbook_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 52dfd51a938..d192dd98a59 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -26,7 +26,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
#include <linux/mtd/mtd.h>
@@ -44,9 +44,9 @@
#include <plat/gpmc.h>
#include <mach/hardware.h>
#include <plat/nand.h>
-#include <plat/mux.h>
#include <plat/usb.h>
+#include "mux.h"
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mmc-twl4030.h"
@@ -405,9 +405,17 @@ static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
.reset_gpio_port[2] = -EINVAL
};
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
static void __init overo_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
overo_i2c_init();
platform_add_devices(overo_devices, ARRAY_SIZE(overo_devices));
omap_serial_init();
@@ -418,8 +426,8 @@ static void __init overo_init(void)
overo_init_smsc911x();
/* Ensure SDRC pins are mux'd for self-refresh */
- omap_cfg_reg(H16_34XX_SDRC_CKE0);
- omap_cfg_reg(H17_34XX_SDRC_CKE1);
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
if ((gpio_request(OVERO_GPIO_W2W_NRESET,
"OVERO_GPIO_W2W_NRESET") == 0) &&
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 15ce6514c5f..acafdbc8aa1 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -16,7 +16,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/wl12xx.h>
#include <linux/i2c.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/regulator/machine.h>
@@ -33,6 +33,7 @@
#include <plat/onenand.h>
#include <plat/gpmc-smc91x.h>
+#include "mux.h"
#include "mmc-twl4030.h"
#define SYSTEM_REV_B_USES_VAUX3 0x1699
@@ -59,7 +60,7 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
.bus_num = 4,
.chip_select = 0,
.max_speed_hz = 48000000,
- .mode = SPI_MODE_2,
+ .mode = SPI_MODE_3,
.controller_data = &wl1251_mcspi_config,
.platform_data = &wl1251_pdata,
},
@@ -401,15 +402,9 @@ static struct twl4030_usb_data rx51_usb_data = {
static struct twl4030_ins sleep_on_seq[] __initdata = {
/*
- * Turn off VDD1 and VDD2.
+ * Turn off everything
*/
- {MSG_SINGULAR(DEV_GRP_P1, 0xf, RES_STATE_OFF), 4},
- {MSG_SINGULAR(DEV_GRP_P1, 0x10, RES_STATE_OFF), 2},
-/*
- * And also turn off the OMAP3 PLLs and the sysclk output.
- */
- {MSG_SINGULAR(DEV_GRP_P1, 0x7, RES_STATE_OFF), 3},
- {MSG_SINGULAR(DEV_GRP_P1, 0x17, RES_STATE_OFF), 3},
+ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_SLEEP), 2},
};
static struct twl4030_script sleep_on_script __initdata = {
@@ -420,14 +415,9 @@ static struct twl4030_script sleep_on_script __initdata = {
static struct twl4030_ins wakeup_seq[] __initdata = {
/*
- * Reenable the OMAP3 PLLs.
- * Wakeup VDD1 and VDD2.
- * Reenable sysclk output.
+ * Reenable everything
*/
- {MSG_SINGULAR(DEV_GRP_P1, 0x7, RES_STATE_ACTIVE), 0x30},
- {MSG_SINGULAR(DEV_GRP_P1, 0xf, RES_STATE_ACTIVE), 0x30},
- {MSG_SINGULAR(DEV_GRP_P1, 0x10, RES_STATE_ACTIVE), 0x37},
- {MSG_SINGULAR(DEV_GRP_P1, 0x19, RES_STATE_ACTIVE), 3},
+ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2},
};
static struct twl4030_script wakeup_script __initdata = {
@@ -438,10 +428,9 @@ static struct twl4030_script wakeup_script __initdata = {
static struct twl4030_ins wakeup_p3_seq[] __initdata = {
/*
- * Wakeup VDD1 (dummy to be able to insert a delay)
- * Enable CLKEN
+ * Reenable everything
*/
- {MSG_SINGULAR(DEV_GRP_P1, 0x17, RES_STATE_ACTIVE), 3},
+ {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 1, 0, RES_STATE_ACTIVE), 2},
};
static struct twl4030_script wakeup_p3_script __initdata = {
@@ -462,12 +451,11 @@ static struct twl4030_ins wrst_seq[] __initdata = {
{MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_OFF), 2},
{MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, 0, 1, RES_STATE_ACTIVE),
0x13},
- {MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_PP, 0, 2, RES_STATE_WRST), 0x13},
{MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_PP, 0, 3, RES_STATE_OFF), 0x13},
{MSG_SINGULAR(DEV_GRP_NULL, RES_VDD1, RES_STATE_WRST), 0x13},
{MSG_SINGULAR(DEV_GRP_NULL, RES_VDD2, RES_STATE_WRST), 0x13},
{MSG_SINGULAR(DEV_GRP_NULL, RES_VPLL1, RES_STATE_WRST), 0x35},
- {MSG_SINGULAR(DEV_GRP_P1, RES_HFCLKOUT, RES_STATE_ACTIVE), 2},
+ {MSG_SINGULAR(DEV_GRP_P3, RES_HFCLKOUT, RES_STATE_ACTIVE), 2},
{MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_ACTIVE), 2},
};
@@ -489,22 +477,81 @@ static struct twl4030_script *twl4030_scripts[] __initdata = {
};
static struct twl4030_resconfig twl4030_rconfig[] __initdata = {
- { .resource = RES_VINTANA1, .devgroup = -1, .type = -1, .type2 = 1 },
- { .resource = RES_VINTANA2, .devgroup = -1, .type = -1, .type2 = 1 },
- { .resource = RES_VINTDIG, .devgroup = -1, .type = -1, .type2 = 1 },
- { .resource = RES_VMMC1, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VMMC2, .devgroup = DEV_GRP_NULL, .type = -1,
- .type2 = 3},
- { .resource = RES_VAUX1, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VAUX2, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VAUX3, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VAUX4, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VPLL2, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VDAC, .devgroup = -1, .type = -1, .type2 = 3},
- { .resource = RES_VSIM, .devgroup = DEV_GRP_NULL, .type = -1,
- .type2 = 3},
- { .resource = RES_CLKEN, .devgroup = DEV_GRP_P3, .type = -1,
- .type2 = 1 },
+ { .resource = RES_VDD1, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
+ .remap_sleep = RES_STATE_OFF
+ },
+ { .resource = RES_VDD2, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
+ .remap_sleep = RES_STATE_OFF
+ },
+ { .resource = RES_VPLL1, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = RES_STATE_OFF,
+ .remap_sleep = RES_STATE_OFF
+ },
+ { .resource = RES_VPLL2, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VAUX1, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VAUX2, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VAUX3, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VAUX4, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VMMC1, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VMMC2, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VDAC, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VSIM, .devgroup = -1,
+ .type = -1, .type2 = 3, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VINTANA1, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = -1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VINTANA2, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VINTDIG, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = -1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_VIO, .devgroup = DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_CLKEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1 , .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_REGEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_NRES_PWRON, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_SYSEN, .devgroup = DEV_GRP_P1 | DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_HFCLKOUT, .devgroup = DEV_GRP_P3,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_32KCLKOUT, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_RESET, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
+ { .resource = RES_Main_Ref, .devgroup = -1,
+ .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
+ },
{ 0, 0},
};
@@ -630,9 +677,9 @@ static struct omap_smc91x_platform_data board_smc91x_data = {
static void __init board_smc91x_init(void)
{
- omap_cfg_reg(U8_34XX_GPIO54_DOWN);
- omap_cfg_reg(G25_34XX_GPIO86_OUT);
- omap_cfg_reg(H19_34XX_GPIO164_OUT);
+ omap_mux_init_gpio(54, OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_gpio(86, OMAP_PIN_OUTPUT);
+ omap_mux_init_gpio(164, OMAP_PIN_OUTPUT);
gpmc_smc91x_init(&board_smc91x_data);
}
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 1bb1de24591..67bb3476b70 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -23,13 +23,14 @@
#include <asm/mach/map.h>
#include <plat/mcspi.h>
-#include <plat/mux.h>
#include <plat/board.h>
#include <plat/common.h>
#include <plat/dma.h>
#include <plat/gpmc.h>
#include <plat/usb.h>
+#include "mux.h"
+
struct omap_sdrc_params *rx51_get_sdram_timings(void);
static struct omap_lcd_config rx51_lcd_config = {
@@ -69,15 +70,24 @@ static void __init rx51_init_irq(void)
extern void __init rx51_peripherals_init(void);
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init rx51_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap_serial_init();
usb_musb_init();
rx51_peripherals_init();
/* Ensure SDRC pins are mux'd for self-refresh */
- omap_cfg_reg(H16_34XX_SDRC_CKE0);
- omap_cfg_reg(H17_34XX_SDRC_CKE1);
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
}
static void __init rx51_map_io(void)
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index f14baa39276..8dd277c3666 100755
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -14,7 +14,7 @@
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
@@ -152,14 +152,20 @@ static struct regulator_init_data zoom_vsim = {
static struct twl4030_hsmmc_info mmc[] __initdata = {
{
+ .name = "external",
.mmc = 1,
.wires = 4,
.gpio_wp = -EINVAL,
+ .power_saving = true,
},
{
+ .name = "internal",
.mmc = 2,
- .wires = 4,
+ .wires = 8,
+ .gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
+ .nonremovable = true,
+ .power_saving = true,
},
{} /* Terminator */
};
@@ -167,11 +173,8 @@ static struct twl4030_hsmmc_info mmc[] __initdata = {
static int zoom_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
- /* gpio + 0 is "mmc0_cd" (input/IRQ),
- * gpio + 1 is "mmc1_cd" (input/IRQ)
- */
+ /* gpio + 0 is "mmc0_cd" (input/IRQ) */
mmc[0].gpio_cd = gpio + 0;
- mmc[1].gpio_cd = gpio + 1;
twl4030_mmc_init(mmc);
/* link regulators to MMC adapters ... we "know" the
@@ -236,6 +239,7 @@ static struct twl4030_platform_data zoom_twldata = {
.gpio = &zoom_gpio_data,
.keypad = &zoom_kp_twl4030_data,
.codec = &zoom_codec_data,
+ .vmmc1 = &zoom_vmmc1,
.vmmc2 = &zoom_vmmc2,
.vsim = &zoom_vsim,
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c
index d94d047c7dc..bb87cf7878f 100644
--- a/arch/arm/mach-omap2/board-zoom2.c
+++ b/arch/arm/mach-omap2/board-zoom2.c
@@ -23,6 +23,7 @@
#include <mach/board-zoom.h>
+#include "mux.h"
#include "sdram-micron-mt46h32m32lf-6.h"
static void __init omap_zoom2_init_irq(void)
@@ -68,8 +69,17 @@ static struct twl4030_platform_data zoom2_twldata = {
#endif
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init omap_zoom2_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
zoom_peripherals_init();
zoom_debugboard_init();
}
diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c
index 8d965a6516c..a9fe9181b01 100644
--- a/arch/arm/mach-omap2/board-zoom3.c
+++ b/arch/arm/mach-omap2/board-zoom3.c
@@ -21,6 +21,7 @@
#include <plat/common.h>
#include <plat/board.h>
+#include "mux.h"
#include "sdram-hynix-h8mbx00u0mer-0em.h"
static void __init omap_zoom_map_io(void)
@@ -42,8 +43,17 @@ static void __init omap_zoom_init_irq(void)
omap_gpio_init();
}
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define board_mux NULL
+#endif
+
static void __init omap_zoom_init(void)
{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
zoom_peripherals_init();
zoom_debugboard_init();
}
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 4716206547a..759c72a48f7 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -70,9 +70,41 @@
u8 cpu_mask;
/*-------------------------------------------------------------------------
- * OMAP2/3 specific clock functions
+ * OMAP2/3/4 specific clock functions
*-------------------------------------------------------------------------*/
+void omap2_init_dpll_parent(struct clk *clk)
+{
+ u32 v;
+ struct dpll_data *dd;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return;
+
+ /* Return bypass rate if DPLL is bypassed */
+ v = __raw_readl(dd->control_reg);
+ v &= dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+
+ /* Reparent in case the dpll is in bypass */
+ if (cpu_is_omap24xx()) {
+ if (v == OMAP2XXX_EN_DPLL_LPBYPASS ||
+ v == OMAP2XXX_EN_DPLL_FRBYPASS)
+ clk_reparent(clk, dd->clk_bypass);
+ } else if (cpu_is_omap34xx()) {
+ if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
+ v == OMAP3XXX_EN_DPLL_FRBYPASS)
+ clk_reparent(clk, dd->clk_bypass);
+ } else if (cpu_is_omap44xx()) {
+ if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
+ v == OMAP4XXX_EN_DPLL_FRBYPASS ||
+ v == OMAP4XXX_EN_DPLL_MNBYPASS)
+ clk_reparent(clk, dd->clk_bypass);
+ }
+ return;
+}
+
/**
* _omap2xxx_clk_commit - commit clock parent/rate changes in hardware
* @clk: struct clk *
@@ -149,6 +181,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
* clockdomain pointer, and save it into the struct clk. Intended to be
* called during clk_register(). No return value.
*/
+#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdm f/w is in place */
void omap2_init_clk_clkdm(struct clk *clk)
{
struct clockdomain *clkdm;
@@ -166,6 +199,7 @@ void omap2_init_clk_clkdm(struct clk *clk)
"clkdm %s\n", clk->name, clk->clkdm_name);
}
}
+#endif
/**
* omap2_init_clksel_parent - set a clksel clk's parent field from the hardware
@@ -247,6 +281,11 @@ u32 omap2_get_dpll_rate(struct clk *clk)
if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
v == OMAP3XXX_EN_DPLL_FRBYPASS)
return dd->clk_bypass->rate;
+ } else if (cpu_is_omap44xx()) {
+ if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
+ v == OMAP4XXX_EN_DPLL_FRBYPASS ||
+ v == OMAP4XXX_EN_DPLL_MNBYPASS)
+ return dd->clk_bypass->rate;
}
v = __raw_readl(dd->mult_div1_reg);
@@ -437,8 +476,10 @@ void omap2_clk_disable(struct clk *clk)
_omap2_clk_disable(clk);
if (clk->parent)
omap2_clk_disable(clk->parent);
+#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdm f/w is in place */
if (clk->clkdm)
omap2_clkdm_clk_disable(clk->clkdm, clk);
+#endif
}
}
@@ -448,8 +489,10 @@ int omap2_clk_enable(struct clk *clk)
int ret = 0;
if (clk->usecount++ == 0) {
+#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdm f/w is in place */
if (clk->clkdm)
omap2_clkdm_clk_enable(clk->clkdm, clk);
+#endif
if (clk->parent) {
ret = omap2_clk_enable(clk->parent);
@@ -468,8 +511,10 @@ int omap2_clk_enable(struct clk *clk)
return ret;
err:
+#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdm f/w is in place */
if (clk->clkdm)
omap2_clkdm_clk_disable(clk->clkdm, clk);
+#endif
clk->usecount--;
return ret;
}
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 43b6bedaafd..93c48df3b5b 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -1,8 +1,8 @@
/*
* linux/arch/arm/mach-omap2/clock.h
*
- * Copyright (C) 2005-2008 Texas Instruments, Inc.
- * Copyright (C) 2004-2008 Nokia Corporation
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ * Copyright (C) 2004-2009 Nokia Corporation
*
* Contacts:
* Richard Woodruff <r-woodruff2@ti.com>
@@ -36,6 +36,17 @@
#define OMAP3XXX_EN_DPLL_FRBYPASS 0x6
#define OMAP3XXX_EN_DPLL_LOCKED 0x7
+/* OMAP4xxx CM_CLKMODE_DPLL*.EN_*_DPLL bits - for omap2_get_dpll_rate() */
+#define OMAP4XXX_EN_DPLL_MNBYPASS 0x4
+#define OMAP4XXX_EN_DPLL_LPBYPASS 0x5
+#define OMAP4XXX_EN_DPLL_FRBYPASS 0x6
+#define OMAP4XXX_EN_DPLL_LOCKED 0x7
+
+/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */
+#define DPLL_LOW_POWER_STOP 0x1
+#define DPLL_LOW_POWER_BYPASS 0x5
+#define DPLL_LOCKED 0x7
+
int omap2_clk_init(void);
int omap2_clk_enable(struct clk *clk);
void omap2_clk_disable(struct clk *clk);
@@ -44,6 +55,14 @@ int omap2_clk_set_rate(struct clk *clk, unsigned long rate);
int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent);
int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance);
long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate);
+unsigned long omap3_dpll_recalc(struct clk *clk);
+unsigned long omap3_clkoutx2_recalc(struct clk *clk);
+void omap3_dpll_allow_idle(struct clk *clk);
+void omap3_dpll_deny_idle(struct clk *clk);
+u32 omap3_dpll_autoidle_read(struct clk *clk);
+int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate);
+int omap3_noncore_dpll_enable(struct clk *clk);
+void omap3_noncore_dpll_disable(struct clk *clk);
#ifdef CONFIG_OMAP_RESET_CLOCKS
void omap2_clk_disable_unused(struct clk *clk);
@@ -63,6 +82,7 @@ unsigned long omap2_fixed_divisor_recalc(struct clk *clk);
long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate);
int omap2_clksel_set_rate(struct clk *clk, unsigned long rate);
u32 omap2_get_dpll_rate(struct clk *clk);
+void omap2_init_dpll_parent(struct clk *clk);
int omap2_wait_clock_ready(void __iomem *reg, u32 cval, const char *name);
void omap2_clk_prepare_for_reboot(void);
int omap2_dflt_clk_enable(struct clk *clk);
@@ -72,29 +92,17 @@ void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
u8 *idlest_bit);
+extern u8 cpu_mask;
+
extern const struct clkops clkops_omap2_dflt_wait;
extern const struct clkops clkops_omap2_dflt;
-extern u8 cpu_mask;
+extern struct clk_functions omap2_clk_functions;
+extern struct clk *vclk, *sclk;
-/* clksel_rate data common to 24xx/343x */
-static const struct clksel_rate gpt_32k_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate gpt_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate gfx_l3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_343X },
- { .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
- { .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_343X },
- { .div = 0 }
-};
+extern const struct clksel_rate gpt_32k_rates[];
+extern const struct clksel_rate gpt_sys_rates[];
+extern const struct clksel_rate gfx_l3_rates[];
#endif
diff --git a/arch/arm/mach-omap2/clock24xx.c b/arch/arm/mach-omap2/clock24xx.c
deleted file mode 100644
index e70e7e000ea..00000000000
--- a/arch/arm/mach-omap2/clock24xx.c
+++ /dev/null
@@ -1,805 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/clock.c
- *
- * Copyright (C) 2005-2008 Texas Instruments, Inc.
- * Copyright (C) 2004-2008 Nokia Corporation
- *
- * Contacts:
- * Richard Woodruff <r-woodruff2@ti.com>
- * Paul Walmsley
- *
- * Based on earlier work by Tuukka Tikkanen, Tony Lindgren,
- * Gordon McNutt and RidgeRun, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/cpufreq.h>
-#include <linux/bitops.h>
-
-#include <plat/clock.h>
-#include <plat/sram.h>
-#include <plat/prcm.h>
-#include <asm/div64.h>
-#include <asm/clkdev.h>
-
-#include <plat/sdrc.h>
-#include "clock.h"
-#include "prm.h"
-#include "prm-regbits-24xx.h"
-#include "cm.h"
-#include "cm-regbits-24xx.h"
-
-static const struct clkops clkops_oscck;
-static const struct clkops clkops_fixed;
-
-static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
- void __iomem **idlest_reg,
- u8 *idlest_bit);
-
-/* 2430 I2CHS has non-standard IDLEST register */
-static const struct clkops clkops_omap2430_i2chs_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_idlest = omap2430_clk_i2chs_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
-};
-
-#include "clock24xx.h"
-
-struct omap_clk {
- u32 cpu;
- struct clk_lookup lk;
-};
-
-#define CLK(dev, con, ck, cp) \
- { \
- .cpu = cp, \
- .lk = { \
- .dev_id = dev, \
- .con_id = con, \
- .clk = ck, \
- }, \
- }
-
-#define CK_243X RATE_IN_243X
-#define CK_242X RATE_IN_242X
-
-static struct omap_clk omap24xx_clks[] = {
- /* external root sources */
- CLK(NULL, "func_32k_ck", &func_32k_ck, CK_243X | CK_242X),
- CLK(NULL, "secure_32k_ck", &secure_32k_ck, CK_243X | CK_242X),
- CLK(NULL, "osc_ck", &osc_ck, CK_243X | CK_242X),
- CLK(NULL, "sys_ck", &sys_ck, CK_243X | CK_242X),
- CLK(NULL, "alt_ck", &alt_ck, CK_243X | CK_242X),
- /* internal analog sources */
- CLK(NULL, "dpll_ck", &dpll_ck, CK_243X | CK_242X),
- CLK(NULL, "apll96_ck", &apll96_ck, CK_243X | CK_242X),
- CLK(NULL, "apll54_ck", &apll54_ck, CK_243X | CK_242X),
- /* internal prcm root sources */
- CLK(NULL, "func_54m_ck", &func_54m_ck, CK_243X | CK_242X),
- CLK(NULL, "core_ck", &core_ck, CK_243X | CK_242X),
- CLK(NULL, "func_96m_ck", &func_96m_ck, CK_243X | CK_242X),
- CLK(NULL, "func_48m_ck", &func_48m_ck, CK_243X | CK_242X),
- CLK(NULL, "func_12m_ck", &func_12m_ck, CK_243X | CK_242X),
- CLK(NULL, "ck_wdt1_osc", &wdt1_osc_ck, CK_243X | CK_242X),
- CLK(NULL, "sys_clkout_src", &sys_clkout_src, CK_243X | CK_242X),
- CLK(NULL, "sys_clkout", &sys_clkout, CK_243X | CK_242X),
- CLK(NULL, "sys_clkout2_src", &sys_clkout2_src, CK_242X),
- CLK(NULL, "sys_clkout2", &sys_clkout2, CK_242X),
- CLK(NULL, "emul_ck", &emul_ck, CK_242X),
- /* mpu domain clocks */
- CLK(NULL, "mpu_ck", &mpu_ck, CK_243X | CK_242X),
- /* dsp domain clocks */
- CLK(NULL, "dsp_fck", &dsp_fck, CK_243X | CK_242X),
- CLK(NULL, "dsp_irate_ick", &dsp_irate_ick, CK_243X | CK_242X),
- CLK(NULL, "dsp_ick", &dsp_ick, CK_242X),
- CLK(NULL, "iva2_1_ick", &iva2_1_ick, CK_243X),
- CLK(NULL, "iva1_ifck", &iva1_ifck, CK_242X),
- CLK(NULL, "iva1_mpu_int_ifck", &iva1_mpu_int_ifck, CK_242X),
- /* GFX domain clocks */
- CLK(NULL, "gfx_3d_fck", &gfx_3d_fck, CK_243X | CK_242X),
- CLK(NULL, "gfx_2d_fck", &gfx_2d_fck, CK_243X | CK_242X),
- CLK(NULL, "gfx_ick", &gfx_ick, CK_243X | CK_242X),
- /* Modem domain clocks */
- CLK(NULL, "mdm_ick", &mdm_ick, CK_243X),
- CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X),
- /* DSS domain clocks */
- CLK("omapfb", "ick", &dss_ick, CK_243X | CK_242X),
- CLK("omapfb", "dss1_fck", &dss1_fck, CK_243X | CK_242X),
- CLK("omapfb", "dss2_fck", &dss2_fck, CK_243X | CK_242X),
- CLK("omapfb", "tv_fck", &dss_54m_fck, CK_243X | CK_242X),
- /* L3 domain clocks */
- CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X),
- CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X),
- CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_243X | CK_242X),
- /* L4 domain clocks */
- CLK(NULL, "l4_ck", &l4_ck, CK_243X | CK_242X),
- CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_243X | CK_242X),
- /* virtual meta-group clock */
- CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_243X | CK_242X),
- /* general l4 interface ck, multi-parent functional clk */
- CLK(NULL, "gpt1_ick", &gpt1_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt2_ick", &gpt2_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt3_ick", &gpt3_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt4_ick", &gpt4_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt5_ick", &gpt5_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt6_ick", &gpt6_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt7_ick", &gpt7_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt8_ick", &gpt8_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt9_ick", &gpt9_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt10_ick", &gpt10_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt11_ick", &gpt11_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_243X | CK_242X),
- CLK(NULL, "gpt12_ick", &gpt12_ick, CK_243X | CK_242X),
- CLK(NULL, "gpt12_fck", &gpt12_fck, CK_243X | CK_242X),
- CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_243X | CK_242X),
- CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_243X | CK_242X),
- CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_243X | CK_242X),
- CLK("omap-mcbsp.2", "fck", &mcbsp2_fck, CK_243X | CK_242X),
- CLK("omap-mcbsp.3", "ick", &mcbsp3_ick, CK_243X),
- CLK("omap-mcbsp.3", "fck", &mcbsp3_fck, CK_243X),
- CLK("omap-mcbsp.4", "ick", &mcbsp4_ick, CK_243X),
- CLK("omap-mcbsp.4", "fck", &mcbsp4_fck, CK_243X),
- CLK("omap-mcbsp.5", "ick", &mcbsp5_ick, CK_243X),
- CLK("omap-mcbsp.5", "fck", &mcbsp5_fck, CK_243X),
- CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_243X | CK_242X),
- CLK("omap2_mcspi.1", "fck", &mcspi1_fck, CK_243X | CK_242X),
- CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_243X | CK_242X),
- CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_243X | CK_242X),
- CLK("omap2_mcspi.3", "ick", &mcspi3_ick, CK_243X),
- CLK("omap2_mcspi.3", "fck", &mcspi3_fck, CK_243X),
- CLK(NULL, "uart1_ick", &uart1_ick, CK_243X | CK_242X),
- CLK(NULL, "uart1_fck", &uart1_fck, CK_243X | CK_242X),
- CLK(NULL, "uart2_ick", &uart2_ick, CK_243X | CK_242X),
- CLK(NULL, "uart2_fck", &uart2_fck, CK_243X | CK_242X),
- CLK(NULL, "uart3_ick", &uart3_ick, CK_243X | CK_242X),
- CLK(NULL, "uart3_fck", &uart3_fck, CK_243X | CK_242X),
- CLK(NULL, "gpios_ick", &gpios_ick, CK_243X | CK_242X),
- CLK(NULL, "gpios_fck", &gpios_fck, CK_243X | CK_242X),
- CLK("omap_wdt", "ick", &mpu_wdt_ick, CK_243X | CK_242X),
- CLK("omap_wdt", "fck", &mpu_wdt_fck, CK_243X | CK_242X),
- CLK(NULL, "sync_32k_ick", &sync_32k_ick, CK_243X | CK_242X),
- CLK(NULL, "wdt1_ick", &wdt1_ick, CK_243X | CK_242X),
- CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_243X | CK_242X),
- CLK(NULL, "icr_ick", &icr_ick, CK_243X),
- CLK("omap24xxcam", "fck", &cam_fck, CK_243X | CK_242X),
- CLK("omap24xxcam", "ick", &cam_ick, CK_243X | CK_242X),
- CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_243X | CK_242X),
- CLK(NULL, "wdt4_ick", &wdt4_ick, CK_243X | CK_242X),
- CLK(NULL, "wdt4_fck", &wdt4_fck, CK_243X | CK_242X),
- CLK(NULL, "wdt3_ick", &wdt3_ick, CK_242X),
- CLK(NULL, "wdt3_fck", &wdt3_fck, CK_242X),
- CLK(NULL, "mspro_ick", &mspro_ick, CK_243X | CK_242X),
- CLK(NULL, "mspro_fck", &mspro_fck, CK_243X | CK_242X),
- CLK("mmci-omap.0", "ick", &mmc_ick, CK_242X),
- CLK("mmci-omap.0", "fck", &mmc_fck, CK_242X),
- CLK(NULL, "fac_ick", &fac_ick, CK_243X | CK_242X),
- CLK(NULL, "fac_fck", &fac_fck, CK_243X | CK_242X),
- CLK(NULL, "eac_ick", &eac_ick, CK_242X),
- CLK(NULL, "eac_fck", &eac_fck, CK_242X),
- CLK("omap_hdq.0", "ick", &hdq_ick, CK_243X | CK_242X),
- CLK("omap_hdq.1", "fck", &hdq_fck, CK_243X | CK_242X),
- CLK("i2c_omap.1", "ick", &i2c1_ick, CK_243X | CK_242X),
- CLK("i2c_omap.1", "fck", &i2c1_fck, CK_242X),
- CLK("i2c_omap.1", "fck", &i2chs1_fck, CK_243X),
- CLK("i2c_omap.2", "ick", &i2c2_ick, CK_243X | CK_242X),
- CLK("i2c_omap.2", "fck", &i2c2_fck, CK_242X),
- CLK("i2c_omap.2", "fck", &i2chs2_fck, CK_243X),
- CLK(NULL, "gpmc_fck", &gpmc_fck, CK_243X | CK_242X),
- CLK(NULL, "sdma_fck", &sdma_fck, CK_243X | CK_242X),
- CLK(NULL, "sdma_ick", &sdma_ick, CK_243X | CK_242X),
- CLK(NULL, "vlynq_ick", &vlynq_ick, CK_242X),
- CLK(NULL, "vlynq_fck", &vlynq_fck, CK_242X),
- CLK(NULL, "sdrc_ick", &sdrc_ick, CK_243X),
- CLK(NULL, "des_ick", &des_ick, CK_243X | CK_242X),
- CLK(NULL, "sha_ick", &sha_ick, CK_243X | CK_242X),
- CLK("omap_rng", "ick", &rng_ick, CK_243X | CK_242X),
- CLK(NULL, "aes_ick", &aes_ick, CK_243X | CK_242X),
- CLK(NULL, "pka_ick", &pka_ick, CK_243X | CK_242X),
- CLK(NULL, "usb_fck", &usb_fck, CK_243X | CK_242X),
- CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X),
- CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X),
- CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X),
- CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X),
- CLK("mmci-omap-hs.1", "fck", &mmchs2_fck, CK_243X),
- CLK(NULL, "gpio5_ick", &gpio5_ick, CK_243X),
- CLK(NULL, "gpio5_fck", &gpio5_fck, CK_243X),
- CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X),
- CLK("mmci-omap-hs.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
- CLK("mmci-omap-hs.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
-};
-
-/* CM_CLKEN_PLL.EN_{54,96}M_PLL options (24XX) */
-#define EN_APLL_STOPPED 0
-#define EN_APLL_LOCKED 3
-
-/* CM_CLKSEL1_PLL.APLLS_CLKIN options (24XX) */
-#define APLLS_CLKIN_19_2MHZ 0
-#define APLLS_CLKIN_13MHZ 2
-#define APLLS_CLKIN_12MHZ 3
-
-/* #define DOWN_VARIABLE_DPLL 1 */ /* Experimental */
-
-static struct prcm_config *curr_prcm_set;
-static struct clk *vclk;
-static struct clk *sclk;
-
-static void __iomem *prcm_clksrc_ctrl;
-
-/*-------------------------------------------------------------------------
- * Omap24xx specific clock functions
- *-------------------------------------------------------------------------*/
-
-/**
- * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS
- * @clk: struct clk * being enabled
- * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
- * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
- *
- * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the
- * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function
- * passes back the correct CM_IDLEST register address for I2CHS
- * modules. No return value.
- */
-static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
- void __iomem **idlest_reg,
- u8 *idlest_bit)
-{
- *idlest_reg = OMAP_CM_REGADDR(CORE_MOD, CM_IDLEST);
- *idlest_bit = clk->enable_bit;
-}
-
-
-/**
- * omap2xxx_clk_get_core_rate - return the CORE_CLK rate
- * @clk: pointer to the combined dpll_ck + core_ck (currently "dpll_ck")
- *
- * Returns the CORE_CLK rate. CORE_CLK can have one of three rate
- * sources on OMAP2xxx: the DPLL CLKOUT rate, DPLL CLKOUTX2, or 32KHz
- * (the latter is unusual). This currently should be called with
- * struct clk *dpll_ck, which is a composite clock of dpll_ck and
- * core_ck.
- */
-static unsigned long omap2xxx_clk_get_core_rate(struct clk *clk)
-{
- long long core_clk;
- u32 v;
-
- core_clk = omap2_get_dpll_rate(clk);
-
- v = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
- v &= OMAP24XX_CORE_CLK_SRC_MASK;
-
- if (v == CORE_CLK_SRC_32K)
- core_clk = 32768;
- else
- core_clk *= v;
-
- return core_clk;
-}
-
-static int omap2_enable_osc_ck(struct clk *clk)
-{
- u32 pcc;
-
- pcc = __raw_readl(prcm_clksrc_ctrl);
-
- __raw_writel(pcc & ~OMAP_AUTOEXTCLKMODE_MASK, prcm_clksrc_ctrl);
-
- return 0;
-}
-
-static void omap2_disable_osc_ck(struct clk *clk)
-{
- u32 pcc;
-
- pcc = __raw_readl(prcm_clksrc_ctrl);
-
- __raw_writel(pcc | OMAP_AUTOEXTCLKMODE_MASK, prcm_clksrc_ctrl);
-}
-
-static const struct clkops clkops_oscck = {
- .enable = &omap2_enable_osc_ck,
- .disable = &omap2_disable_osc_ck,
-};
-
-#ifdef OLD_CK
-/* Recalculate SYST_CLK */
-static void omap2_sys_clk_recalc(struct clk * clk)
-{
- u32 div = PRCM_CLKSRC_CTRL;
- div &= (1 << 7) | (1 << 6); /* Test if ext clk divided by 1 or 2 */
- div >>= clk->rate_offset;
- clk->rate = (clk->parent->rate / div);
- propagate_rate(clk);
-}
-#endif /* OLD_CK */
-
-/* Enable an APLL if off */
-static int omap2_clk_fixed_enable(struct clk *clk)
-{
- u32 cval, apll_mask;
-
- apll_mask = EN_APLL_LOCKED << clk->enable_bit;
-
- cval = cm_read_mod_reg(PLL_MOD, CM_CLKEN);
-
- if ((cval & apll_mask) == apll_mask)
- return 0; /* apll already enabled */
-
- cval &= ~apll_mask;
- cval |= apll_mask;
- cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN);
-
- if (clk == &apll96_ck)
- cval = OMAP24XX_ST_96M_APLL;
- else if (clk == &apll54_ck)
- cval = OMAP24XX_ST_54M_APLL;
-
- omap2_cm_wait_idlest(OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), cval,
- clk->name);
-
- /*
- * REVISIT: Should we return an error code if omap2_wait_clock_ready()
- * fails?
- */
- return 0;
-}
-
-/* Stop APLL */
-static void omap2_clk_fixed_disable(struct clk *clk)
-{
- u32 cval;
-
- cval = cm_read_mod_reg(PLL_MOD, CM_CLKEN);
- cval &= ~(EN_APLL_LOCKED << clk->enable_bit);
- cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN);
-}
-
-static const struct clkops clkops_fixed = {
- .enable = &omap2_clk_fixed_enable,
- .disable = &omap2_clk_fixed_disable,
-};
-
-/*
- * Uses the current prcm set to tell if a rate is valid.
- * You can go slower, but not faster within a given rate set.
- */
-static long omap2_dpllcore_round_rate(unsigned long target_rate)
-{
- u32 high, low, core_clk_src;
-
- core_clk_src = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
- core_clk_src &= OMAP24XX_CORE_CLK_SRC_MASK;
-
- if (core_clk_src == CORE_CLK_SRC_DPLL) { /* DPLL clockout */
- high = curr_prcm_set->dpll_speed * 2;
- low = curr_prcm_set->dpll_speed;
- } else { /* DPLL clockout x 2 */
- high = curr_prcm_set->dpll_speed;
- low = curr_prcm_set->dpll_speed / 2;
- }
-
-#ifdef DOWN_VARIABLE_DPLL
- if (target_rate > high)
- return high;
- else
- return target_rate;
-#else
- if (target_rate > low)
- return high;
- else
- return low;
-#endif
-
-}
-
-static unsigned long omap2_dpllcore_recalc(struct clk *clk)
-{
- return omap2xxx_clk_get_core_rate(clk);
-}
-
-static int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate)
-{
- u32 cur_rate, low, mult, div, valid_rate, done_rate;
- u32 bypass = 0;
- struct prcm_config tmpset;
- const struct dpll_data *dd;
-
- cur_rate = omap2xxx_clk_get_core_rate(&dpll_ck);
- mult = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
- mult &= OMAP24XX_CORE_CLK_SRC_MASK;
-
- if ((rate == (cur_rate / 2)) && (mult == 2)) {
- omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1);
- } else if ((rate == (cur_rate * 2)) && (mult == 1)) {
- omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
- } else if (rate != cur_rate) {
- valid_rate = omap2_dpllcore_round_rate(rate);
- if (valid_rate != rate)
- return -EINVAL;
-
- if (mult == 1)
- low = curr_prcm_set->dpll_speed;
- else
- low = curr_prcm_set->dpll_speed / 2;
-
- dd = clk->dpll_data;
- if (!dd)
- return -EINVAL;
-
- tmpset.cm_clksel1_pll = __raw_readl(dd->mult_div1_reg);
- tmpset.cm_clksel1_pll &= ~(dd->mult_mask |
- dd->div1_mask);
- div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
- tmpset.cm_clksel2_pll = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
- tmpset.cm_clksel2_pll &= ~OMAP24XX_CORE_CLK_SRC_MASK;
- if (rate > low) {
- tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL_X2;
- mult = ((rate / 2) / 1000000);
- done_rate = CORE_CLK_SRC_DPLL_X2;
- } else {
- tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL;
- mult = (rate / 1000000);
- done_rate = CORE_CLK_SRC_DPLL;
- }
- tmpset.cm_clksel1_pll |= (div << __ffs(dd->mult_mask));
- tmpset.cm_clksel1_pll |= (mult << __ffs(dd->div1_mask));
-
- /* Worst case */
- tmpset.base_sdrc_rfr = SDRC_RFR_CTRL_BYPASS;
-
- if (rate == curr_prcm_set->xtal_speed) /* If asking for 1-1 */
- bypass = 1;
-
- /* For omap2xxx_sdrc_init_params() */
- omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
-
- /* Force dll lock mode */
- omap2_set_prcm(tmpset.cm_clksel1_pll, tmpset.base_sdrc_rfr,
- bypass);
-
- /* Errata: ret dll entry state */
- omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked());
- omap2xxx_sdrc_reprogram(done_rate, 0);
- }
-
- return 0;
-}
-
-/**
- * omap2_table_mpu_recalc - just return the MPU speed
- * @clk: virt_prcm_set struct clk
- *
- * Set virt_prcm_set's rate to the mpu_speed field of the current PRCM set.
- */
-static unsigned long omap2_table_mpu_recalc(struct clk *clk)
-{
- return curr_prcm_set->mpu_speed;
-}
-
-/*
- * Look for a rate equal or less than the target rate given a configuration set.
- *
- * What's not entirely clear is "which" field represents the key field.
- * Some might argue L3-DDR, others ARM, others IVA. This code is simple and
- * just uses the ARM rates.
- */
-static long omap2_round_to_table_rate(struct clk *clk, unsigned long rate)
-{
- struct prcm_config *ptr;
- long highest_rate;
-
- if (clk != &virt_prcm_set)
- return -EINVAL;
-
- highest_rate = -EINVAL;
-
- for (ptr = rate_table; ptr->mpu_speed; ptr++) {
- if (!(ptr->flags & cpu_mask))
- continue;
- if (ptr->xtal_speed != sys_ck.rate)
- continue;
-
- highest_rate = ptr->mpu_speed;
-
- /* Can check only after xtal frequency check */
- if (ptr->mpu_speed <= rate)
- break;
- }
- return highest_rate;
-}
-
-/* Sets basic clocks based on the specified rate */
-static int omap2_select_table_rate(struct clk *clk, unsigned long rate)
-{
- u32 cur_rate, done_rate, bypass = 0, tmp;
- struct prcm_config *prcm;
- unsigned long found_speed = 0;
- unsigned long flags;
-
- if (clk != &virt_prcm_set)
- return -EINVAL;
-
- for (prcm = rate_table; prcm->mpu_speed; prcm++) {
- if (!(prcm->flags & cpu_mask))
- continue;
-
- if (prcm->xtal_speed != sys_ck.rate)
- continue;
-
- if (prcm->mpu_speed <= rate) {
- found_speed = prcm->mpu_speed;
- break;
- }
- }
-
- if (!found_speed) {
- printk(KERN_INFO "Could not set MPU rate to %luMHz\n",
- rate / 1000000);
- return -EINVAL;
- }
-
- curr_prcm_set = prcm;
- cur_rate = omap2xxx_clk_get_core_rate(&dpll_ck);
-
- if (prcm->dpll_speed == cur_rate / 2) {
- omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1);
- } else if (prcm->dpll_speed == cur_rate * 2) {
- omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
- } else if (prcm->dpll_speed != cur_rate) {
- local_irq_save(flags);
-
- if (prcm->dpll_speed == prcm->xtal_speed)
- bypass = 1;
-
- if ((prcm->cm_clksel2_pll & OMAP24XX_CORE_CLK_SRC_MASK) ==
- CORE_CLK_SRC_DPLL_X2)
- done_rate = CORE_CLK_SRC_DPLL_X2;
- else
- done_rate = CORE_CLK_SRC_DPLL;
-
- /* MPU divider */
- cm_write_mod_reg(prcm->cm_clksel_mpu, MPU_MOD, CM_CLKSEL);
-
- /* dsp + iva1 div(2420), iva2.1(2430) */
- cm_write_mod_reg(prcm->cm_clksel_dsp,
- OMAP24XX_DSP_MOD, CM_CLKSEL);
-
- cm_write_mod_reg(prcm->cm_clksel_gfx, GFX_MOD, CM_CLKSEL);
-
- /* Major subsystem dividers */
- tmp = cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) & OMAP24XX_CLKSEL_DSS2_MASK;
- cm_write_mod_reg(prcm->cm_clksel1_core | tmp, CORE_MOD,
- CM_CLKSEL1);
-
- if (cpu_is_omap2430())
- cm_write_mod_reg(prcm->cm_clksel_mdm,
- OMAP2430_MDM_MOD, CM_CLKSEL);
-
- /* x2 to enter omap2xxx_sdrc_init_params() */
- omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
-
- omap2_set_prcm(prcm->cm_clksel1_pll, prcm->base_sdrc_rfr,
- bypass);
-
- omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked());
- omap2xxx_sdrc_reprogram(done_rate, 0);
-
- local_irq_restore(flags);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_CPU_FREQ
-/*
- * Walk PRCM rate table and fillout cpufreq freq_table
- */
-static struct cpufreq_frequency_table freq_table[ARRAY_SIZE(rate_table)];
-
-void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
-{
- struct prcm_config *prcm;
- int i = 0;
-
- for (prcm = rate_table; prcm->mpu_speed; prcm++) {
- if (!(prcm->flags & cpu_mask))
- continue;
- if (prcm->xtal_speed != sys_ck.rate)
- continue;
-
- /* don't put bypass rates in table */
- if (prcm->dpll_speed == prcm->xtal_speed)
- continue;
-
- freq_table[i].index = i;
- freq_table[i].frequency = prcm->mpu_speed / 1000;
- i++;
- }
-
- if (i == 0) {
- printk(KERN_WARNING "%s: failed to initialize frequency "
- "table\n", __func__);
- return;
- }
-
- freq_table[i].index = i;
- freq_table[i].frequency = CPUFREQ_TABLE_END;
-
- *table = &freq_table[0];
-}
-#endif
-
-static struct clk_functions omap2_clk_functions = {
- .clk_enable = omap2_clk_enable,
- .clk_disable = omap2_clk_disable,
- .clk_round_rate = omap2_clk_round_rate,
- .clk_set_rate = omap2_clk_set_rate,
- .clk_set_parent = omap2_clk_set_parent,
- .clk_disable_unused = omap2_clk_disable_unused,
-#ifdef CONFIG_CPU_FREQ
- .clk_init_cpufreq_table = omap2_clk_init_cpufreq_table,
-#endif
-};
-
-static u32 omap2_get_apll_clkin(void)
-{
- u32 aplls, srate = 0;
-
- aplls = cm_read_mod_reg(PLL_MOD, CM_CLKSEL1);
- aplls &= OMAP24XX_APLLS_CLKIN_MASK;
- aplls >>= OMAP24XX_APLLS_CLKIN_SHIFT;
-
- if (aplls == APLLS_CLKIN_19_2MHZ)
- srate = 19200000;
- else if (aplls == APLLS_CLKIN_13MHZ)
- srate = 13000000;
- else if (aplls == APLLS_CLKIN_12MHZ)
- srate = 12000000;
-
- return srate;
-}
-
-static u32 omap2_get_sysclkdiv(void)
-{
- u32 div;
-
- div = __raw_readl(prcm_clksrc_ctrl);
- div &= OMAP_SYSCLKDIV_MASK;
- div >>= OMAP_SYSCLKDIV_SHIFT;
-
- return div;
-}
-
-static unsigned long omap2_osc_clk_recalc(struct clk *clk)
-{
- return omap2_get_apll_clkin() * omap2_get_sysclkdiv();
-}
-
-static unsigned long omap2_sys_clk_recalc(struct clk *clk)
-{
- return clk->parent->rate / omap2_get_sysclkdiv();
-}
-
-/*
- * Set clocks for bypass mode for reboot to work.
- */
-void omap2_clk_prepare_for_reboot(void)
-{
- u32 rate;
-
- if (vclk == NULL || sclk == NULL)
- return;
-
- rate = clk_get_rate(sclk);
- clk_set_rate(vclk, rate);
-}
-
-/*
- * Switch the MPU rate if specified on cmdline.
- * We cannot do this early until cmdline is parsed.
- */
-static int __init omap2_clk_arch_init(void)
-{
- if (!mpurate)
- return -EINVAL;
-
- if (clk_set_rate(&virt_prcm_set, mpurate))
- printk(KERN_ERR "Could not find matching MPU rate\n");
-
- recalculate_root_clocks();
-
- printk(KERN_INFO "Switched to new clocking rate (Crystal/DPLL/MPU): "
- "%ld.%01ld/%ld/%ld MHz\n",
- (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
- (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
-
- return 0;
-}
-arch_initcall(omap2_clk_arch_init);
-
-int __init omap2_clk_init(void)
-{
- struct prcm_config *prcm;
- struct omap_clk *c;
- u32 clkrate;
-
- if (cpu_is_omap242x()) {
- prcm_clksrc_ctrl = OMAP2420_PRCM_CLKSRC_CTRL;
- cpu_mask = RATE_IN_242X;
- } else if (cpu_is_omap2430()) {
- prcm_clksrc_ctrl = OMAP2430_PRCM_CLKSRC_CTRL;
- cpu_mask = RATE_IN_243X;
- }
-
- clk_init(&omap2_clk_functions);
-
- for (c = omap24xx_clks; c < omap24xx_clks + ARRAY_SIZE(omap24xx_clks); c++)
- clk_preinit(c->lk.clk);
-
- osc_ck.rate = omap2_osc_clk_recalc(&osc_ck);
- propagate_rate(&osc_ck);
- sys_ck.rate = omap2_sys_clk_recalc(&sys_ck);
- propagate_rate(&sys_ck);
-
- for (c = omap24xx_clks; c < omap24xx_clks + ARRAY_SIZE(omap24xx_clks); c++)
- if (c->cpu & cpu_mask) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- omap2_init_clk_clkdm(c->lk.clk);
- }
-
- /* Check the MPU rate set by bootloader */
- clkrate = omap2xxx_clk_get_core_rate(&dpll_ck);
- for (prcm = rate_table; prcm->mpu_speed; prcm++) {
- if (!(prcm->flags & cpu_mask))
- continue;
- if (prcm->xtal_speed != sys_ck.rate)
- continue;
- if (prcm->dpll_speed <= clkrate)
- break;
- }
- curr_prcm_set = prcm;
-
- recalculate_root_clocks();
-
- printk(KERN_INFO "Clocking rate (Crystal/DPLL/MPU): "
- "%ld.%01ld/%ld/%ld MHz\n",
- (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
- (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable_init_clocks();
-
- /* Avoid sleeping sleeping during omap2_clk_prepare_for_reboot() */
- vclk = clk_get(NULL, "virt_prcm_set");
- sclk = clk_get(NULL, "sys_ck");
-
- return 0;
-}
diff --git a/arch/arm/mach-omap2/clock2xxx.c b/arch/arm/mach-omap2/clock2xxx.c
new file mode 100644
index 00000000000..d0e3fb7f929
--- /dev/null
+++ b/arch/arm/mach-omap2/clock2xxx.c
@@ -0,0 +1,587 @@
+/*
+ * linux/arch/arm/mach-omap2/clock.c
+ *
+ * Copyright (C) 2005-2008 Texas Instruments, Inc.
+ * Copyright (C) 2004-2008 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ *
+ * Based on earlier work by Tuukka Tikkanen, Tony Lindgren,
+ * Gordon McNutt and RidgeRun, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/bitops.h>
+
+#include <plat/clock.h>
+#include <plat/sram.h>
+#include <plat/prcm.h>
+#include <plat/clkdev_omap.h>
+#include <asm/div64.h>
+#include <asm/clkdev.h>
+
+#include <plat/sdrc.h>
+#include "clock.h"
+#include "clock2xxx.h"
+#include "opp2xxx.h"
+#include "prm.h"
+#include "prm-regbits-24xx.h"
+#include "cm.h"
+#include "cm-regbits-24xx.h"
+
+
+/* CM_CLKEN_PLL.EN_{54,96}M_PLL options (24XX) */
+#define EN_APLL_STOPPED 0
+#define EN_APLL_LOCKED 3
+
+/* CM_CLKSEL1_PLL.APLLS_CLKIN options (24XX) */
+#define APLLS_CLKIN_19_2MHZ 0
+#define APLLS_CLKIN_13MHZ 2
+#define APLLS_CLKIN_12MHZ 3
+
+/* #define DOWN_VARIABLE_DPLL 1 */ /* Experimental */
+
+const struct prcm_config *curr_prcm_set;
+const struct prcm_config *rate_table;
+
+struct clk *vclk, *sclk, *dclk;
+
+void __iomem *prcm_clksrc_ctrl;
+
+/*-------------------------------------------------------------------------
+ * Omap24xx specific clock functions
+ *-------------------------------------------------------------------------*/
+
+/**
+ * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ *
+ * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the
+ * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function
+ * passes back the correct CM_IDLEST register address for I2CHS
+ * modules. No return value.
+ */
+static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit)
+{
+ *idlest_reg = OMAP_CM_REGADDR(CORE_MOD, CM_IDLEST);
+ *idlest_bit = clk->enable_bit;
+}
+
+/* 2430 I2CHS has non-standard IDLEST register */
+const struct clkops clkops_omap2430_i2chs_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap2430_clk_i2chs_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+/**
+ * omap2xxx_clk_get_core_rate - return the CORE_CLK rate
+ * @clk: pointer to the combined dpll_ck + core_ck (currently "dpll_ck")
+ *
+ * Returns the CORE_CLK rate. CORE_CLK can have one of three rate
+ * sources on OMAP2xxx: the DPLL CLKOUT rate, DPLL CLKOUTX2, or 32KHz
+ * (the latter is unusual). This currently should be called with
+ * struct clk *dpll_ck, which is a composite clock of dpll_ck and
+ * core_ck.
+ */
+unsigned long omap2xxx_clk_get_core_rate(struct clk *clk)
+{
+ long long core_clk;
+ u32 v;
+
+ core_clk = omap2_get_dpll_rate(clk);
+
+ v = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+ v &= OMAP24XX_CORE_CLK_SRC_MASK;
+
+ if (v == CORE_CLK_SRC_32K)
+ core_clk = 32768;
+ else
+ core_clk *= v;
+
+ return core_clk;
+}
+
+static int omap2_enable_osc_ck(struct clk *clk)
+{
+ u32 pcc;
+
+ pcc = __raw_readl(prcm_clksrc_ctrl);
+
+ __raw_writel(pcc & ~OMAP_AUTOEXTCLKMODE_MASK, prcm_clksrc_ctrl);
+
+ return 0;
+}
+
+static void omap2_disable_osc_ck(struct clk *clk)
+{
+ u32 pcc;
+
+ pcc = __raw_readl(prcm_clksrc_ctrl);
+
+ __raw_writel(pcc | OMAP_AUTOEXTCLKMODE_MASK, prcm_clksrc_ctrl);
+}
+
+const struct clkops clkops_oscck = {
+ .enable = omap2_enable_osc_ck,
+ .disable = omap2_disable_osc_ck,
+};
+
+#ifdef OLD_CK
+/* Recalculate SYST_CLK */
+static void omap2_sys_clk_recalc(struct clk *clk)
+{
+ u32 div = PRCM_CLKSRC_CTRL;
+ div &= (1 << 7) | (1 << 6); /* Test if ext clk divided by 1 or 2 */
+ div >>= clk->rate_offset;
+ clk->rate = (clk->parent->rate / div);
+ propagate_rate(clk);
+}
+#endif /* OLD_CK */
+
+/* Enable an APLL if off */
+static int omap2_clk_apll_enable(struct clk *clk, u32 status_mask)
+{
+ u32 cval, apll_mask;
+
+ apll_mask = EN_APLL_LOCKED << clk->enable_bit;
+
+ cval = cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+
+ if ((cval & apll_mask) == apll_mask)
+ return 0; /* apll already enabled */
+
+ cval &= ~apll_mask;
+ cval |= apll_mask;
+ cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN);
+
+ omap2_cm_wait_idlest(OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), status_mask,
+ clk->name);
+
+ /*
+ * REVISIT: Should we return an error code if omap2_wait_clock_ready()
+ * fails?
+ */
+ return 0;
+}
+
+static int omap2_clk_apll96_enable(struct clk *clk)
+{
+ return omap2_clk_apll_enable(clk, OMAP24XX_ST_96M_APLL);
+}
+
+static int omap2_clk_apll54_enable(struct clk *clk)
+{
+ return omap2_clk_apll_enable(clk, OMAP24XX_ST_54M_APLL);
+}
+
+/* Stop APLL */
+static void omap2_clk_apll_disable(struct clk *clk)
+{
+ u32 cval;
+
+ cval = cm_read_mod_reg(PLL_MOD, CM_CLKEN);
+ cval &= ~(EN_APLL_LOCKED << clk->enable_bit);
+ cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN);
+}
+
+const struct clkops clkops_apll96 = {
+ .enable = omap2_clk_apll96_enable,
+ .disable = omap2_clk_apll_disable,
+};
+
+const struct clkops clkops_apll54 = {
+ .enable = omap2_clk_apll54_enable,
+ .disable = omap2_clk_apll_disable,
+};
+
+/*
+ * Uses the current prcm set to tell if a rate is valid.
+ * You can go slower, but not faster within a given rate set.
+ */
+long omap2_dpllcore_round_rate(unsigned long target_rate)
+{
+ u32 high, low, core_clk_src;
+
+ core_clk_src = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+ core_clk_src &= OMAP24XX_CORE_CLK_SRC_MASK;
+
+ if (core_clk_src == CORE_CLK_SRC_DPLL) { /* DPLL clockout */
+ high = curr_prcm_set->dpll_speed * 2;
+ low = curr_prcm_set->dpll_speed;
+ } else { /* DPLL clockout x 2 */
+ high = curr_prcm_set->dpll_speed;
+ low = curr_prcm_set->dpll_speed / 2;
+ }
+
+#ifdef DOWN_VARIABLE_DPLL
+ if (target_rate > high)
+ return high;
+ else
+ return target_rate;
+#else
+ if (target_rate > low)
+ return high;
+ else
+ return low;
+#endif
+
+}
+
+unsigned long omap2_dpllcore_recalc(struct clk *clk)
+{
+ return omap2xxx_clk_get_core_rate(clk);
+}
+
+int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate)
+{
+ u32 cur_rate, low, mult, div, valid_rate, done_rate;
+ u32 bypass = 0;
+ struct prcm_config tmpset;
+ const struct dpll_data *dd;
+
+ cur_rate = omap2xxx_clk_get_core_rate(dclk);
+ mult = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+ mult &= OMAP24XX_CORE_CLK_SRC_MASK;
+
+ if ((rate == (cur_rate / 2)) && (mult == 2)) {
+ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1);
+ } else if ((rate == (cur_rate * 2)) && (mult == 1)) {
+ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
+ } else if (rate != cur_rate) {
+ valid_rate = omap2_dpllcore_round_rate(rate);
+ if (valid_rate != rate)
+ return -EINVAL;
+
+ if (mult == 1)
+ low = curr_prcm_set->dpll_speed;
+ else
+ low = curr_prcm_set->dpll_speed / 2;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ tmpset.cm_clksel1_pll = __raw_readl(dd->mult_div1_reg);
+ tmpset.cm_clksel1_pll &= ~(dd->mult_mask |
+ dd->div1_mask);
+ div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
+ tmpset.cm_clksel2_pll = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
+ tmpset.cm_clksel2_pll &= ~OMAP24XX_CORE_CLK_SRC_MASK;
+ if (rate > low) {
+ tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL_X2;
+ mult = ((rate / 2) / 1000000);
+ done_rate = CORE_CLK_SRC_DPLL_X2;
+ } else {
+ tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL;
+ mult = (rate / 1000000);
+ done_rate = CORE_CLK_SRC_DPLL;
+ }
+ tmpset.cm_clksel1_pll |= (div << __ffs(dd->mult_mask));
+ tmpset.cm_clksel1_pll |= (mult << __ffs(dd->div1_mask));
+
+ /* Worst case */
+ tmpset.base_sdrc_rfr = SDRC_RFR_CTRL_BYPASS;
+
+ if (rate == curr_prcm_set->xtal_speed) /* If asking for 1-1 */
+ bypass = 1;
+
+ /* For omap2xxx_sdrc_init_params() */
+ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
+
+ /* Force dll lock mode */
+ omap2_set_prcm(tmpset.cm_clksel1_pll, tmpset.base_sdrc_rfr,
+ bypass);
+
+ /* Errata: ret dll entry state */
+ omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked());
+ omap2xxx_sdrc_reprogram(done_rate, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * omap2_table_mpu_recalc - just return the MPU speed
+ * @clk: virt_prcm_set struct clk
+ *
+ * Set virt_prcm_set's rate to the mpu_speed field of the current PRCM set.
+ */
+unsigned long omap2_table_mpu_recalc(struct clk *clk)
+{
+ return curr_prcm_set->mpu_speed;
+}
+
+/*
+ * Look for a rate equal or less than the target rate given a configuration set.
+ *
+ * What's not entirely clear is "which" field represents the key field.
+ * Some might argue L3-DDR, others ARM, others IVA. This code is simple and
+ * just uses the ARM rates.
+ */
+long omap2_round_to_table_rate(struct clk *clk, unsigned long rate)
+{
+ const struct prcm_config *ptr;
+ long highest_rate;
+ long sys_ck_rate;
+
+ sys_ck_rate = clk_get_rate(sclk);
+
+ highest_rate = -EINVAL;
+
+ for (ptr = rate_table; ptr->mpu_speed; ptr++) {
+ if (!(ptr->flags & cpu_mask))
+ continue;
+ if (ptr->xtal_speed != sys_ck_rate)
+ continue;
+
+ highest_rate = ptr->mpu_speed;
+
+ /* Can check only after xtal frequency check */
+ if (ptr->mpu_speed <= rate)
+ break;
+ }
+ return highest_rate;
+}
+
+/* Sets basic clocks based on the specified rate */
+int omap2_select_table_rate(struct clk *clk, unsigned long rate)
+{
+ u32 cur_rate, done_rate, bypass = 0, tmp;
+ const struct prcm_config *prcm;
+ unsigned long found_speed = 0;
+ unsigned long flags;
+ long sys_ck_rate;
+
+ sys_ck_rate = clk_get_rate(sclk);
+
+ for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+ if (!(prcm->flags & cpu_mask))
+ continue;
+
+ if (prcm->xtal_speed != sys_ck_rate)
+ continue;
+
+ if (prcm->mpu_speed <= rate) {
+ found_speed = prcm->mpu_speed;
+ break;
+ }
+ }
+
+ if (!found_speed) {
+ printk(KERN_INFO "Could not set MPU rate to %luMHz\n",
+ rate / 1000000);
+ return -EINVAL;
+ }
+
+ curr_prcm_set = prcm;
+ cur_rate = omap2xxx_clk_get_core_rate(dclk);
+
+ if (prcm->dpll_speed == cur_rate / 2) {
+ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1);
+ } else if (prcm->dpll_speed == cur_rate * 2) {
+ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
+ } else if (prcm->dpll_speed != cur_rate) {
+ local_irq_save(flags);
+
+ if (prcm->dpll_speed == prcm->xtal_speed)
+ bypass = 1;
+
+ if ((prcm->cm_clksel2_pll & OMAP24XX_CORE_CLK_SRC_MASK) ==
+ CORE_CLK_SRC_DPLL_X2)
+ done_rate = CORE_CLK_SRC_DPLL_X2;
+ else
+ done_rate = CORE_CLK_SRC_DPLL;
+
+ /* MPU divider */
+ cm_write_mod_reg(prcm->cm_clksel_mpu, MPU_MOD, CM_CLKSEL);
+
+ /* dsp + iva1 div(2420), iva2.1(2430) */
+ cm_write_mod_reg(prcm->cm_clksel_dsp,
+ OMAP24XX_DSP_MOD, CM_CLKSEL);
+
+ cm_write_mod_reg(prcm->cm_clksel_gfx, GFX_MOD, CM_CLKSEL);
+
+ /* Major subsystem dividers */
+ tmp = cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) & OMAP24XX_CLKSEL_DSS2_MASK;
+ cm_write_mod_reg(prcm->cm_clksel1_core | tmp, CORE_MOD,
+ CM_CLKSEL1);
+
+ if (cpu_is_omap2430())
+ cm_write_mod_reg(prcm->cm_clksel_mdm,
+ OMAP2430_MDM_MOD, CM_CLKSEL);
+
+ /* x2 to enter omap2xxx_sdrc_init_params() */
+ omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
+
+ omap2_set_prcm(prcm->cm_clksel1_pll, prcm->base_sdrc_rfr,
+ bypass);
+
+ omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked());
+ omap2xxx_sdrc_reprogram(done_rate, 0);
+
+ local_irq_restore(flags);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CPU_FREQ
+/*
+ * Walk PRCM rate table and fillout cpufreq freq_table
+ */
+static struct cpufreq_frequency_table freq_table[ARRAY_SIZE(rate_table)];
+
+void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
+{
+ struct prcm_config *prcm;
+ int i = 0;
+
+ for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+ if (!(prcm->flags & cpu_mask))
+ continue;
+ if (prcm->xtal_speed != sys_ck.rate)
+ continue;
+
+ /* don't put bypass rates in table */
+ if (prcm->dpll_speed == prcm->xtal_speed)
+ continue;
+
+ freq_table[i].index = i;
+ freq_table[i].frequency = prcm->mpu_speed / 1000;
+ i++;
+ }
+
+ if (i == 0) {
+ printk(KERN_WARNING "%s: failed to initialize frequency "
+ "table\n", __func__);
+ return;
+ }
+
+ freq_table[i].index = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ *table = &freq_table[0];
+}
+#endif
+
+struct clk_functions omap2_clk_functions = {
+ .clk_enable = omap2_clk_enable,
+ .clk_disable = omap2_clk_disable,
+ .clk_round_rate = omap2_clk_round_rate,
+ .clk_set_rate = omap2_clk_set_rate,
+ .clk_set_parent = omap2_clk_set_parent,
+ .clk_disable_unused = omap2_clk_disable_unused,
+#ifdef CONFIG_CPU_FREQ
+ .clk_init_cpufreq_table = omap2_clk_init_cpufreq_table,
+#endif
+};
+
+static u32 omap2_get_apll_clkin(void)
+{
+ u32 aplls, srate = 0;
+
+ aplls = cm_read_mod_reg(PLL_MOD, CM_CLKSEL1);
+ aplls &= OMAP24XX_APLLS_CLKIN_MASK;
+ aplls >>= OMAP24XX_APLLS_CLKIN_SHIFT;
+
+ if (aplls == APLLS_CLKIN_19_2MHZ)
+ srate = 19200000;
+ else if (aplls == APLLS_CLKIN_13MHZ)
+ srate = 13000000;
+ else if (aplls == APLLS_CLKIN_12MHZ)
+ srate = 12000000;
+
+ return srate;
+}
+
+static u32 omap2_get_sysclkdiv(void)
+{
+ u32 div;
+
+ div = __raw_readl(prcm_clksrc_ctrl);
+ div &= OMAP_SYSCLKDIV_MASK;
+ div >>= OMAP_SYSCLKDIV_SHIFT;
+
+ return div;
+}
+
+unsigned long omap2_osc_clk_recalc(struct clk *clk)
+{
+ return omap2_get_apll_clkin() * omap2_get_sysclkdiv();
+}
+
+unsigned long omap2_sys_clk_recalc(struct clk *clk)
+{
+ return clk->parent->rate / omap2_get_sysclkdiv();
+}
+
+/*
+ * Set clocks for bypass mode for reboot to work.
+ */
+void omap2_clk_prepare_for_reboot(void)
+{
+ u32 rate;
+
+ if (vclk == NULL || sclk == NULL)
+ return;
+
+ rate = clk_get_rate(sclk);
+ clk_set_rate(vclk, rate);
+}
+
+/*
+ * Switch the MPU rate if specified on cmdline.
+ * We cannot do this early until cmdline is parsed.
+ */
+static int __init omap2_clk_arch_init(void)
+{
+ struct clk *virt_prcm_set, *sys_ck, *dpll_ck, *mpu_ck;
+ unsigned long sys_ck_rate;
+
+ if (!mpurate)
+ return -EINVAL;
+
+ virt_prcm_set = clk_get(NULL, "virt_prcm_set");
+ sys_ck = clk_get(NULL, "sys_ck");
+ dpll_ck = clk_get(NULL, "dpll_ck");
+ mpu_ck = clk_get(NULL, "mpu_ck");
+
+ if (clk_set_rate(virt_prcm_set, mpurate))
+ printk(KERN_ERR "Could not find matching MPU rate\n");
+
+ recalculate_root_clocks();
+
+ sys_ck_rate = clk_get_rate(sys_ck);
+
+ pr_info("Switched to new clocking rate (Crystal/DPLL/MPU): "
+ "%ld.%01ld/%ld/%ld MHz\n",
+ (sys_ck_rate / 1000000), (sys_ck_rate / 100000) % 10,
+ (clk_get_rate(dpll_ck) / 1000000),
+ (clk_get_rate(mpu_ck) / 1000000));
+
+ return 0;
+}
+arch_initcall(omap2_clk_arch_init);
+
+
diff --git a/arch/arm/mach-omap2/clock2xxx.h b/arch/arm/mach-omap2/clock2xxx.h
new file mode 100644
index 00000000000..e35efde4bd8
--- /dev/null
+++ b/arch/arm/mach-omap2/clock2xxx.h
@@ -0,0 +1,41 @@
+/*
+ * OMAP2 clock function prototypes and macros
+ *
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ * Copyright (C) 2004-2009 Nokia Corporation
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK_24XX_H
+#define __ARCH_ARM_MACH_OMAP2_CLOCK_24XX_H
+
+unsigned long omap2_table_mpu_recalc(struct clk *clk);
+int omap2_select_table_rate(struct clk *clk, unsigned long rate);
+long omap2_round_to_table_rate(struct clk *clk, unsigned long rate);
+unsigned long omap2_sys_clk_recalc(struct clk *clk);
+unsigned long omap2_osc_clk_recalc(struct clk *clk);
+unsigned long omap2_sys_clk_recalc(struct clk *clk);
+unsigned long omap2_dpllcore_recalc(struct clk *clk);
+int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate);
+unsigned long omap2xxx_clk_get_core_rate(struct clk *clk);
+
+/* REVISIT: These should be set dynamically for CONFIG_MULTI_OMAP2 */
+#ifdef CONFIG_ARCH_OMAP2420
+#define OMAP_CM_REGADDR OMAP2420_CM_REGADDR
+#define OMAP24XX_PRCM_CLKOUT_CTRL OMAP2420_PRCM_CLKOUT_CTRL
+#define OMAP24XX_PRCM_CLKEMUL_CTRL OMAP2420_PRCM_CLKEMUL_CTRL
+#else
+#define OMAP_CM_REGADDR OMAP2430_CM_REGADDR
+#define OMAP24XX_PRCM_CLKOUT_CTRL OMAP2430_PRCM_CLKOUT_CTRL
+#define OMAP24XX_PRCM_CLKEMUL_CTRL OMAP2430_PRCM_CLKEMUL_CTRL
+#endif
+
+extern void __iomem *prcm_clksrc_ctrl;
+
+extern struct clk *dclk;
+
+extern const struct clkops clkops_omap2430_i2chs_wait;
+extern const struct clkops clkops_oscck;
+extern const struct clkops clkops_apll96;
+extern const struct clkops clkops_apll54;
+
+#endif
diff --git a/arch/arm/mach-omap2/clock24xx.h b/arch/arm/mach-omap2/clock2xxx_data.c
index d19cf7a7d8d..97dc7cf7751 100644
--- a/arch/arm/mach-omap2/clock24xx.h
+++ b/arch/arm/mach-omap2/clock2xxx_data.c
@@ -1,8 +1,8 @@
/*
- * linux/arch/arm/mach-omap2/clock24xx.h
+ * linux/arch/arm/mach-omap2/clock2xxx_data.c
*
- * Copyright (C) 2005-2008 Texas Instruments, Inc.
- * Copyright (C) 2004-2008 Nokia Corporation
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ * Copyright (C) 2004-2009 Nokia Corporation
*
* Contacts:
* Richard Woodruff <r-woodruff2@ti.com>
@@ -13,600 +13,21 @@
* published by the Free Software Foundation.
*/
-#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK24XX_H
-#define __ARCH_ARM_MACH_OMAP2_CLOCK24XX_H
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
-#include "clock.h"
+#include <plat/clkdev_omap.h>
+#include "clock.h"
+#include "clock2xxx.h"
+#include "opp2xxx.h"
#include "prm.h"
#include "cm.h"
#include "prm-regbits-24xx.h"
#include "cm-regbits-24xx.h"
#include "sdrc.h"
-/* REVISIT: These should be set dynamically for CONFIG_MULTI_OMAP2 */
-#ifdef CONFIG_ARCH_OMAP2420
-#define OMAP_CM_REGADDR OMAP2420_CM_REGADDR
-#define OMAP24XX_PRCM_CLKOUT_CTRL OMAP2420_PRCM_CLKOUT_CTRL
-#define OMAP24XX_PRCM_CLKEMUL_CTRL OMAP2420_PRCM_CLKEMUL_CTRL
-#else
-#define OMAP_CM_REGADDR OMAP2430_CM_REGADDR
-#define OMAP24XX_PRCM_CLKOUT_CTRL OMAP2430_PRCM_CLKOUT_CTRL
-#define OMAP24XX_PRCM_CLKEMUL_CTRL OMAP2430_PRCM_CLKEMUL_CTRL
-#endif
-
-static unsigned long omap2_table_mpu_recalc(struct clk *clk);
-static int omap2_select_table_rate(struct clk *clk, unsigned long rate);
-static long omap2_round_to_table_rate(struct clk *clk, unsigned long rate);
-static unsigned long omap2_sys_clk_recalc(struct clk *clk);
-static unsigned long omap2_osc_clk_recalc(struct clk *clk);
-static unsigned long omap2_sys_clk_recalc(struct clk *clk);
-static unsigned long omap2_dpllcore_recalc(struct clk *clk);
-static int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate);
-
-/* Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
- * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,CM_CLKSEL_DSP
- * CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL CM_CLKSEL2_PLL, CM_CLKSEL_MDM
- */
-struct prcm_config {
- unsigned long xtal_speed; /* crystal rate */
- unsigned long dpll_speed; /* dpll: out*xtal*M/(N-1)table_recalc */
- unsigned long mpu_speed; /* speed of MPU */
- unsigned long cm_clksel_mpu; /* mpu divider */
- unsigned long cm_clksel_dsp; /* dsp+iva1 div(2420), iva2.1(2430) */
- unsigned long cm_clksel_gfx; /* gfx dividers */
- unsigned long cm_clksel1_core; /* major subsystem dividers */
- unsigned long cm_clksel1_pll; /* m,n */
- unsigned long cm_clksel2_pll; /* dpllx1 or x2 out */
- unsigned long cm_clksel_mdm; /* modem dividers 2430 only */
- unsigned long base_sdrc_rfr; /* base refresh timing for a set */
- unsigned char flags;
-};
-
-/*
- * The OMAP2 processor can be run at several discrete 'PRCM configurations'.
- * These configurations are characterized by voltage and speed for clocks.
- * The device is only validated for certain combinations. One way to express
- * these combinations is via the 'ratio's' which the clocks operate with
- * respect to each other. These ratio sets are for a given voltage/DPLL
- * setting. All configurations can be described by a DPLL setting and a ratio
- * There are 3 ratio sets for the 2430 and X ratio sets for 2420.
- *
- * 2430 differs from 2420 in that there are no more phase synchronizers used.
- * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs
- * 2430 (iva2.1, NOdsp, mdm)
- */
-
-/* Core fields for cm_clksel, not ratio governed */
-#define RX_CLKSEL_DSS1 (0x10 << 8)
-#define RX_CLKSEL_DSS2 (0x0 << 13)
-#define RX_CLKSEL_SSI (0x5 << 20)
-
-/*-------------------------------------------------------------------------
- * Voltage/DPLL ratios
- *-------------------------------------------------------------------------*/
-
-/* 2430 Ratio's, 2430-Ratio Config 1 */
-#define R1_CLKSEL_L3 (4 << 0)
-#define R1_CLKSEL_L4 (2 << 5)
-#define R1_CLKSEL_USB (4 << 25)
-#define R1_CM_CLKSEL1_CORE_VAL R1_CLKSEL_USB | RX_CLKSEL_SSI | \
- RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
- R1_CLKSEL_L4 | R1_CLKSEL_L3
-#define R1_CLKSEL_MPU (2 << 0)
-#define R1_CM_CLKSEL_MPU_VAL R1_CLKSEL_MPU
-#define R1_CLKSEL_DSP (2 << 0)
-#define R1_CLKSEL_DSP_IF (2 << 5)
-#define R1_CM_CLKSEL_DSP_VAL R1_CLKSEL_DSP | R1_CLKSEL_DSP_IF
-#define R1_CLKSEL_GFX (2 << 0)
-#define R1_CM_CLKSEL_GFX_VAL R1_CLKSEL_GFX
-#define R1_CLKSEL_MDM (4 << 0)
-#define R1_CM_CLKSEL_MDM_VAL R1_CLKSEL_MDM
-
-/* 2430-Ratio Config 2 */
-#define R2_CLKSEL_L3 (6 << 0)
-#define R2_CLKSEL_L4 (2 << 5)
-#define R2_CLKSEL_USB (2 << 25)
-#define R2_CM_CLKSEL1_CORE_VAL R2_CLKSEL_USB | RX_CLKSEL_SSI | \
- RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
- R2_CLKSEL_L4 | R2_CLKSEL_L3
-#define R2_CLKSEL_MPU (2 << 0)
-#define R2_CM_CLKSEL_MPU_VAL R2_CLKSEL_MPU
-#define R2_CLKSEL_DSP (2 << 0)
-#define R2_CLKSEL_DSP_IF (3 << 5)
-#define R2_CM_CLKSEL_DSP_VAL R2_CLKSEL_DSP | R2_CLKSEL_DSP_IF
-#define R2_CLKSEL_GFX (2 << 0)
-#define R2_CM_CLKSEL_GFX_VAL R2_CLKSEL_GFX
-#define R2_CLKSEL_MDM (6 << 0)
-#define R2_CM_CLKSEL_MDM_VAL R2_CLKSEL_MDM
-
-/* 2430-Ratio Bootm (BYPASS) */
-#define RB_CLKSEL_L3 (1 << 0)
-#define RB_CLKSEL_L4 (1 << 5)
-#define RB_CLKSEL_USB (1 << 25)
-#define RB_CM_CLKSEL1_CORE_VAL RB_CLKSEL_USB | RX_CLKSEL_SSI | \
- RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
- RB_CLKSEL_L4 | RB_CLKSEL_L3
-#define RB_CLKSEL_MPU (1 << 0)
-#define RB_CM_CLKSEL_MPU_VAL RB_CLKSEL_MPU
-#define RB_CLKSEL_DSP (1 << 0)
-#define RB_CLKSEL_DSP_IF (1 << 5)
-#define RB_CM_CLKSEL_DSP_VAL RB_CLKSEL_DSP | RB_CLKSEL_DSP_IF
-#define RB_CLKSEL_GFX (1 << 0)
-#define RB_CM_CLKSEL_GFX_VAL RB_CLKSEL_GFX
-#define RB_CLKSEL_MDM (1 << 0)
-#define RB_CM_CLKSEL_MDM_VAL RB_CLKSEL_MDM
-
-/* 2420 Ratio Equivalents */
-#define RXX_CLKSEL_VLYNQ (0x12 << 15)
-#define RXX_CLKSEL_SSI (0x8 << 20)
-
-/* 2420-PRCM III 532MHz core */
-#define RIII_CLKSEL_L3 (4 << 0) /* 133MHz */
-#define RIII_CLKSEL_L4 (2 << 5) /* 66.5MHz */
-#define RIII_CLKSEL_USB (4 << 25) /* 33.25MHz */
-#define RIII_CM_CLKSEL1_CORE_VAL RIII_CLKSEL_USB | RXX_CLKSEL_SSI | \
- RXX_CLKSEL_VLYNQ | RX_CLKSEL_DSS2 | \
- RX_CLKSEL_DSS1 | RIII_CLKSEL_L4 | \
- RIII_CLKSEL_L3
-#define RIII_CLKSEL_MPU (2 << 0) /* 266MHz */
-#define RIII_CM_CLKSEL_MPU_VAL RIII_CLKSEL_MPU
-#define RIII_CLKSEL_DSP (3 << 0) /* c5x - 177.3MHz */
-#define RIII_CLKSEL_DSP_IF (2 << 5) /* c5x - 88.67MHz */
-#define RIII_SYNC_DSP (1 << 7) /* Enable sync */
-#define RIII_CLKSEL_IVA (6 << 8) /* iva1 - 88.67MHz */
-#define RIII_SYNC_IVA (1 << 13) /* Enable sync */
-#define RIII_CM_CLKSEL_DSP_VAL RIII_SYNC_IVA | RIII_CLKSEL_IVA | \
- RIII_SYNC_DSP | RIII_CLKSEL_DSP_IF | \
- RIII_CLKSEL_DSP
-#define RIII_CLKSEL_GFX (2 << 0) /* 66.5MHz */
-#define RIII_CM_CLKSEL_GFX_VAL RIII_CLKSEL_GFX
-
-/* 2420-PRCM II 600MHz core */
-#define RII_CLKSEL_L3 (6 << 0) /* 100MHz */
-#define RII_CLKSEL_L4 (2 << 5) /* 50MHz */
-#define RII_CLKSEL_USB (2 << 25) /* 50MHz */
-#define RII_CM_CLKSEL1_CORE_VAL RII_CLKSEL_USB | \
- RXX_CLKSEL_SSI | RXX_CLKSEL_VLYNQ | \
- RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
- RII_CLKSEL_L4 | RII_CLKSEL_L3
-#define RII_CLKSEL_MPU (2 << 0) /* 300MHz */
-#define RII_CM_CLKSEL_MPU_VAL RII_CLKSEL_MPU
-#define RII_CLKSEL_DSP (3 << 0) /* c5x - 200MHz */
-#define RII_CLKSEL_DSP_IF (2 << 5) /* c5x - 100MHz */
-#define RII_SYNC_DSP (0 << 7) /* Bypass sync */
-#define RII_CLKSEL_IVA (3 << 8) /* iva1 - 200MHz */
-#define RII_SYNC_IVA (0 << 13) /* Bypass sync */
-#define RII_CM_CLKSEL_DSP_VAL RII_SYNC_IVA | RII_CLKSEL_IVA | \
- RII_SYNC_DSP | RII_CLKSEL_DSP_IF | \
- RII_CLKSEL_DSP
-#define RII_CLKSEL_GFX (2 << 0) /* 50MHz */
-#define RII_CM_CLKSEL_GFX_VAL RII_CLKSEL_GFX
-
-/* 2420-PRCM I 660MHz core */
-#define RI_CLKSEL_L3 (4 << 0) /* 165MHz */
-#define RI_CLKSEL_L4 (2 << 5) /* 82.5MHz */
-#define RI_CLKSEL_USB (4 << 25) /* 41.25MHz */
-#define RI_CM_CLKSEL1_CORE_VAL RI_CLKSEL_USB | \
- RXX_CLKSEL_SSI | RXX_CLKSEL_VLYNQ | \
- RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
- RI_CLKSEL_L4 | RI_CLKSEL_L3
-#define RI_CLKSEL_MPU (2 << 0) /* 330MHz */
-#define RI_CM_CLKSEL_MPU_VAL RI_CLKSEL_MPU
-#define RI_CLKSEL_DSP (3 << 0) /* c5x - 220MHz */
-#define RI_CLKSEL_DSP_IF (2 << 5) /* c5x - 110MHz */
-#define RI_SYNC_DSP (1 << 7) /* Activate sync */
-#define RI_CLKSEL_IVA (4 << 8) /* iva1 - 165MHz */
-#define RI_SYNC_IVA (0 << 13) /* Bypass sync */
-#define RI_CM_CLKSEL_DSP_VAL RI_SYNC_IVA | RI_CLKSEL_IVA | \
- RI_SYNC_DSP | RI_CLKSEL_DSP_IF | \
- RI_CLKSEL_DSP
-#define RI_CLKSEL_GFX (1 << 0) /* 165MHz */
-#define RI_CM_CLKSEL_GFX_VAL RI_CLKSEL_GFX
-
-/* 2420-PRCM VII (boot) */
-#define RVII_CLKSEL_L3 (1 << 0)
-#define RVII_CLKSEL_L4 (1 << 5)
-#define RVII_CLKSEL_DSS1 (1 << 8)
-#define RVII_CLKSEL_DSS2 (0 << 13)
-#define RVII_CLKSEL_VLYNQ (1 << 15)
-#define RVII_CLKSEL_SSI (1 << 20)
-#define RVII_CLKSEL_USB (1 << 25)
-
-#define RVII_CM_CLKSEL1_CORE_VAL RVII_CLKSEL_USB | RVII_CLKSEL_SSI | \
- RVII_CLKSEL_VLYNQ | RVII_CLKSEL_DSS2 | \
- RVII_CLKSEL_DSS1 | RVII_CLKSEL_L4 | RVII_CLKSEL_L3
-
-#define RVII_CLKSEL_MPU (1 << 0) /* all divide by 1 */
-#define RVII_CM_CLKSEL_MPU_VAL RVII_CLKSEL_MPU
-
-#define RVII_CLKSEL_DSP (1 << 0)
-#define RVII_CLKSEL_DSP_IF (1 << 5)
-#define RVII_SYNC_DSP (0 << 7)
-#define RVII_CLKSEL_IVA (1 << 8)
-#define RVII_SYNC_IVA (0 << 13)
-#define RVII_CM_CLKSEL_DSP_VAL RVII_SYNC_IVA | RVII_CLKSEL_IVA | RVII_SYNC_DSP | \
- RVII_CLKSEL_DSP_IF | RVII_CLKSEL_DSP
-
-#define RVII_CLKSEL_GFX (1 << 0)
-#define RVII_CM_CLKSEL_GFX_VAL RVII_CLKSEL_GFX
-
-/*-------------------------------------------------------------------------
- * 2430 Target modes: Along with each configuration the CPU has several
- * modes which goes along with them. Modes mainly are the addition of
- * describe DPLL combinations to go along with a ratio.
- *-------------------------------------------------------------------------*/
-
-/* Hardware governed */
-#define MX_48M_SRC (0 << 3)
-#define MX_54M_SRC (0 << 5)
-#define MX_APLLS_CLIKIN_12 (3 << 23)
-#define MX_APLLS_CLIKIN_13 (2 << 23)
-#define MX_APLLS_CLIKIN_19_2 (0 << 23)
-
-/*
- * 2430 - standalone, 2*ref*M/(n+1), M/N is for exactness not relock speed
- * #5a (ratio1) baseport-target, target DPLL = 266*2 = 532MHz
- */
-#define M5A_DPLL_MULT_12 (133 << 12)
-#define M5A_DPLL_DIV_12 (5 << 8)
-#define M5A_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
- M5A_DPLL_DIV_12 | M5A_DPLL_MULT_12 | \
- MX_APLLS_CLIKIN_12
-#define M5A_DPLL_MULT_13 (61 << 12)
-#define M5A_DPLL_DIV_13 (2 << 8)
-#define M5A_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
- M5A_DPLL_DIV_13 | M5A_DPLL_MULT_13 | \
- MX_APLLS_CLIKIN_13
-#define M5A_DPLL_MULT_19 (55 << 12)
-#define M5A_DPLL_DIV_19 (3 << 8)
-#define M5A_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | \
- M5A_DPLL_DIV_19 | M5A_DPLL_MULT_19 | \
- MX_APLLS_CLIKIN_19_2
-/* #5b (ratio1) target DPLL = 200*2 = 400MHz */
-#define M5B_DPLL_MULT_12 (50 << 12)
-#define M5B_DPLL_DIV_12 (2 << 8)
-#define M5B_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
- M5B_DPLL_DIV_12 | M5B_DPLL_MULT_12 | \
- MX_APLLS_CLIKIN_12
-#define M5B_DPLL_MULT_13 (200 << 12)
-#define M5B_DPLL_DIV_13 (12 << 8)
-
-#define M5B_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
- M5B_DPLL_DIV_13 | M5B_DPLL_MULT_13 | \
- MX_APLLS_CLIKIN_13
-#define M5B_DPLL_MULT_19 (125 << 12)
-#define M5B_DPLL_DIV_19 (31 << 8)
-#define M5B_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | \
- M5B_DPLL_DIV_19 | M5B_DPLL_MULT_19 | \
- MX_APLLS_CLIKIN_19_2
-/*
- * #4 (ratio2), DPLL = 399*2 = 798MHz, L3=133MHz
- */
-#define M4_DPLL_MULT_12 (133 << 12)
-#define M4_DPLL_DIV_12 (3 << 8)
-#define M4_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
- M4_DPLL_DIV_12 | M4_DPLL_MULT_12 | \
- MX_APLLS_CLIKIN_12
-
-#define M4_DPLL_MULT_13 (399 << 12)
-#define M4_DPLL_DIV_13 (12 << 8)
-#define M4_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
- M4_DPLL_DIV_13 | M4_DPLL_MULT_13 | \
- MX_APLLS_CLIKIN_13
-
-#define M4_DPLL_MULT_19 (145 << 12)
-#define M4_DPLL_DIV_19 (6 << 8)
-#define M4_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | \
- M4_DPLL_DIV_19 | M4_DPLL_MULT_19 | \
- MX_APLLS_CLIKIN_19_2
-
-/*
- * #3 (ratio2) baseport-target, target DPLL = 330*2 = 660MHz
- */
-#define M3_DPLL_MULT_12 (55 << 12)
-#define M3_DPLL_DIV_12 (1 << 8)
-#define M3_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
- M3_DPLL_DIV_12 | M3_DPLL_MULT_12 | \
- MX_APLLS_CLIKIN_12
-#define M3_DPLL_MULT_13 (76 << 12)
-#define M3_DPLL_DIV_13 (2 << 8)
-#define M3_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
- M3_DPLL_DIV_13 | M3_DPLL_MULT_13 | \
- MX_APLLS_CLIKIN_13
-#define M3_DPLL_MULT_19 (17 << 12)
-#define M3_DPLL_DIV_19 (0 << 8)
-#define M3_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | \
- M3_DPLL_DIV_19 | M3_DPLL_MULT_19 | \
- MX_APLLS_CLIKIN_19_2
-
-/*
- * #2 (ratio1) DPLL = 330*2 = 660MHz, L3=165MHz
- */
-#define M2_DPLL_MULT_12 (55 << 12)
-#define M2_DPLL_DIV_12 (1 << 8)
-#define M2_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
- M2_DPLL_DIV_12 | M2_DPLL_MULT_12 | \
- MX_APLLS_CLIKIN_12
-
-/* Speed changes - Used 658.7MHz instead of 660MHz for LP-Refresh M=76 N=2,
- * relock time issue */
-/* Core frequency changed from 330/165 to 329/164 MHz*/
-#define M2_DPLL_MULT_13 (76 << 12)
-#define M2_DPLL_DIV_13 (2 << 8)
-#define M2_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
- M2_DPLL_DIV_13 | M2_DPLL_MULT_13 | \
- MX_APLLS_CLIKIN_13
-
-#define M2_DPLL_MULT_19 (17 << 12)
-#define M2_DPLL_DIV_19 (0 << 8)
-#define M2_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | \
- M2_DPLL_DIV_19 | M2_DPLL_MULT_19 | \
- MX_APLLS_CLIKIN_19_2
-
-/* boot (boot) */
-#define MB_DPLL_MULT (1 << 12)
-#define MB_DPLL_DIV (0 << 8)
-#define MB_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
- MB_DPLL_MULT | MX_APLLS_CLIKIN_12
-
-#define MB_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
- MB_DPLL_MULT | MX_APLLS_CLIKIN_13
-
-#define MB_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
- MB_DPLL_MULT | MX_APLLS_CLIKIN_19
-
-/*
- * 2430 - chassis (sedna)
- * 165 (ratio1) same as above #2
- * 150 (ratio1)
- * 133 (ratio2) same as above #4
- * 110 (ratio2) same as above #3
- * 104 (ratio2)
- * boot (boot)
- */
-
-/* PRCM I target DPLL = 2*330MHz = 660MHz */
-#define MI_DPLL_MULT_12 (55 << 12)
-#define MI_DPLL_DIV_12 (1 << 8)
-#define MI_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
- MI_DPLL_DIV_12 | MI_DPLL_MULT_12 | \
- MX_APLLS_CLIKIN_12
-
-/*
- * 2420 Equivalent - mode registers
- * PRCM II , target DPLL = 2*300MHz = 600MHz
- */
-#define MII_DPLL_MULT_12 (50 << 12)
-#define MII_DPLL_DIV_12 (1 << 8)
-#define MII_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
- MII_DPLL_DIV_12 | MII_DPLL_MULT_12 | \
- MX_APLLS_CLIKIN_12
-#define MII_DPLL_MULT_13 (300 << 12)
-#define MII_DPLL_DIV_13 (12 << 8)
-#define MII_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
- MII_DPLL_DIV_13 | MII_DPLL_MULT_13 | \
- MX_APLLS_CLIKIN_13
-
-/* PRCM III target DPLL = 2*266 = 532MHz*/
-#define MIII_DPLL_MULT_12 (133 << 12)
-#define MIII_DPLL_DIV_12 (5 << 8)
-#define MIII_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
- MIII_DPLL_DIV_12 | MIII_DPLL_MULT_12 | \
- MX_APLLS_CLIKIN_12
-#define MIII_DPLL_MULT_13 (266 << 12)
-#define MIII_DPLL_DIV_13 (12 << 8)
-#define MIII_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
- MIII_DPLL_DIV_13 | MIII_DPLL_MULT_13 | \
- MX_APLLS_CLIKIN_13
-
-/* PRCM VII (boot bypass) */
-#define MVII_CM_CLKSEL1_PLL_12_VAL MB_CM_CLKSEL1_PLL_12_VAL
-#define MVII_CM_CLKSEL1_PLL_13_VAL MB_CM_CLKSEL1_PLL_13_VAL
-
-/* High and low operation value */
-#define MX_CLKSEL2_PLL_2x_VAL (2 << 0)
-#define MX_CLKSEL2_PLL_1x_VAL (1 << 0)
-
-/* MPU speed defines */
-#define S12M 12000000
-#define S13M 13000000
-#define S19M 19200000
-#define S26M 26000000
-#define S100M 100000000
-#define S133M 133000000
-#define S150M 150000000
-#define S164M 164000000
-#define S165M 165000000
-#define S199M 199000000
-#define S200M 200000000
-#define S266M 266000000
-#define S300M 300000000
-#define S329M 329000000
-#define S330M 330000000
-#define S399M 399000000
-#define S400M 400000000
-#define S532M 532000000
-#define S600M 600000000
-#define S658M 658000000
-#define S660M 660000000
-#define S798M 798000000
-
-/*-------------------------------------------------------------------------
- * Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
- * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,
- * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL,
- * CM_CLKSEL2_PLL, CM_CLKSEL_MDM
- *
- * Filling in table based on H4 boards and 2430-SDPs variants available.
- * There are quite a few more rates combinations which could be defined.
- *
- * When multiple values are defined the start up will try and choose the
- * fastest one. If a 'fast' value is defined, then automatically, the /2
- * one should be included as it can be used. Generally having more that
- * one fast set does not make sense, as static timings need to be changed
- * to change the set. The exception is the bypass setting which is
- * availble for low power bypass.
- *
- * Note: This table needs to be sorted, fastest to slowest.
- *-------------------------------------------------------------------------*/
-static struct prcm_config rate_table[] = {
- /* PRCM I - FAST */
- {S12M, S660M, S330M, RI_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */
- RI_CM_CLKSEL_DSP_VAL, RI_CM_CLKSEL_GFX_VAL,
- RI_CM_CLKSEL1_CORE_VAL, MI_CM_CLKSEL1_PLL_12_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_165MHz,
- RATE_IN_242X},
-
- /* PRCM II - FAST */
- {S12M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */
- RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
- RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz,
- RATE_IN_242X},
-
- {S13M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */
- RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
- RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz,
- RATE_IN_242X},
-
- /* PRCM III - FAST */
- {S12M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
- RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
- RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz,
- RATE_IN_242X},
-
- {S13M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
- RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
- RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz,
- RATE_IN_242X},
-
- /* PRCM II - SLOW */
- {S12M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */
- RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
- RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz,
- RATE_IN_242X},
-
- {S13M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */
- RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
- RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz,
- RATE_IN_242X},
-
- /* PRCM III - SLOW */
- {S12M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
- RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
- RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz,
- RATE_IN_242X},
-
- {S13M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
- RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
- RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz,
- RATE_IN_242X},
-
- /* PRCM-VII (boot-bypass) */
- {S12M, S12M, S12M, RVII_CM_CLKSEL_MPU_VAL, /* 12MHz ARM*/
- RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL,
- RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_12_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_BYPASS,
- RATE_IN_242X},
-
- /* PRCM-VII (boot-bypass) */
- {S13M, S13M, S13M, RVII_CM_CLKSEL_MPU_VAL, /* 13MHz ARM */
- RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL,
- RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_BYPASS,
- RATE_IN_242X},
-
- /* PRCM #4 - ratio2 (ES2.1) - FAST */
- {S13M, S798M, S399M, R2_CM_CLKSEL_MPU_VAL, /* 399MHz ARM */
- R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL,
- R2_CM_CLKSEL1_CORE_VAL, M4_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, R2_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_133MHz,
- RATE_IN_243X},
-
- /* PRCM #2 - ratio1 (ES2) - FAST */
- {S13M, S658M, S329M, R1_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */
- R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
- R1_CM_CLKSEL1_CORE_VAL, M2_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_165MHz,
- RATE_IN_243X},
-
- /* PRCM #5a - ratio1 - FAST */
- {S13M, S532M, S266M, R1_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
- R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
- R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_133MHz,
- RATE_IN_243X},
-
- /* PRCM #5b - ratio1 - FAST */
- {S13M, S400M, S200M, R1_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */
- R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
- R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_100MHz,
- RATE_IN_243X},
-
- /* PRCM #4 - ratio1 (ES2.1) - SLOW */
- {S13M, S399M, S199M, R2_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */
- R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL,
- R2_CM_CLKSEL1_CORE_VAL, M4_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_1x_VAL, R2_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_133MHz,
- RATE_IN_243X},
-
- /* PRCM #2 - ratio1 (ES2) - SLOW */
- {S13M, S329M, S164M, R1_CM_CLKSEL_MPU_VAL, /* 165MHz ARM */
- R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
- R1_CM_CLKSEL1_CORE_VAL, M2_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_165MHz,
- RATE_IN_243X},
-
- /* PRCM #5a - ratio1 - SLOW */
- {S13M, S266M, S133M, R1_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
- R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
- R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_133MHz,
- RATE_IN_243X},
-
- /* PRCM #5b - ratio1 - SLOW*/
- {S13M, S200M, S100M, R1_CM_CLKSEL_MPU_VAL, /* 100MHz ARM */
- R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
- R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_100MHz,
- RATE_IN_243X},
-
- /* PRCM-boot/bypass */
- {S13M, S13M, S13M, RB_CM_CLKSEL_MPU_VAL, /* 13Mhz */
- RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL,
- RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_13_VAL,
- MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_BYPASS,
- RATE_IN_243X},
-
- /* PRCM-boot/bypass */
- {S12M, S12M, S12M, RB_CM_CLKSEL_MPU_VAL, /* 12Mhz */
- RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL,
- RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_12_VAL,
- MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL,
- SDRC_RFR_CTRL_BYPASS,
- RATE_IN_243X},
-
- { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-};
-
/*-------------------------------------------------------------------------
* 24xx clock tree.
*
@@ -708,7 +129,7 @@ static struct clk dpll_ck = {
static struct clk apll96_ck = {
.name = "apll96_ck",
- .ops = &clkops_fixed,
+ .ops = &clkops_apll96,
.parent = &sys_ck,
.rate = 96000000,
.flags = RATE_FIXED | ENABLE_ON_INIT,
@@ -719,7 +140,7 @@ static struct clk apll96_ck = {
static struct clk apll54_ck = {
.name = "apll54_ck",
- .ops = &clkops_fixed,
+ .ops = &clkops_apll54,
.parent = &sys_ck,
.rate = 54000000,
.flags = RATE_FIXED | ENABLE_ON_INIT,
@@ -2653,5 +2074,236 @@ static struct clk virt_prcm_set = {
.round_rate = &omap2_round_to_table_rate,
};
-#endif
+
+/*
+ * clkdev integration
+ */
+
+static struct omap_clk omap24xx_clks[] = {
+ /* external root sources */
+ CLK(NULL, "func_32k_ck", &func_32k_ck, CK_243X | CK_242X),
+ CLK(NULL, "secure_32k_ck", &secure_32k_ck, CK_243X | CK_242X),
+ CLK(NULL, "osc_ck", &osc_ck, CK_243X | CK_242X),
+ CLK(NULL, "sys_ck", &sys_ck, CK_243X | CK_242X),
+ CLK(NULL, "alt_ck", &alt_ck, CK_243X | CK_242X),
+ /* internal analog sources */
+ CLK(NULL, "dpll_ck", &dpll_ck, CK_243X | CK_242X),
+ CLK(NULL, "apll96_ck", &apll96_ck, CK_243X | CK_242X),
+ CLK(NULL, "apll54_ck", &apll54_ck, CK_243X | CK_242X),
+ /* internal prcm root sources */
+ CLK(NULL, "func_54m_ck", &func_54m_ck, CK_243X | CK_242X),
+ CLK(NULL, "core_ck", &core_ck, CK_243X | CK_242X),
+ CLK(NULL, "func_96m_ck", &func_96m_ck, CK_243X | CK_242X),
+ CLK(NULL, "func_48m_ck", &func_48m_ck, CK_243X | CK_242X),
+ CLK(NULL, "func_12m_ck", &func_12m_ck, CK_243X | CK_242X),
+ CLK(NULL, "ck_wdt1_osc", &wdt1_osc_ck, CK_243X | CK_242X),
+ CLK(NULL, "sys_clkout_src", &sys_clkout_src, CK_243X | CK_242X),
+ CLK(NULL, "sys_clkout", &sys_clkout, CK_243X | CK_242X),
+ CLK(NULL, "sys_clkout2_src", &sys_clkout2_src, CK_242X),
+ CLK(NULL, "sys_clkout2", &sys_clkout2, CK_242X),
+ CLK(NULL, "emul_ck", &emul_ck, CK_242X),
+ /* mpu domain clocks */
+ CLK(NULL, "mpu_ck", &mpu_ck, CK_243X | CK_242X),
+ /* dsp domain clocks */
+ CLK(NULL, "dsp_fck", &dsp_fck, CK_243X | CK_242X),
+ CLK(NULL, "dsp_irate_ick", &dsp_irate_ick, CK_243X | CK_242X),
+ CLK(NULL, "dsp_ick", &dsp_ick, CK_242X),
+ CLK(NULL, "iva2_1_ick", &iva2_1_ick, CK_243X),
+ CLK(NULL, "iva1_ifck", &iva1_ifck, CK_242X),
+ CLK(NULL, "iva1_mpu_int_ifck", &iva1_mpu_int_ifck, CK_242X),
+ /* GFX domain clocks */
+ CLK(NULL, "gfx_3d_fck", &gfx_3d_fck, CK_243X | CK_242X),
+ CLK(NULL, "gfx_2d_fck", &gfx_2d_fck, CK_243X | CK_242X),
+ CLK(NULL, "gfx_ick", &gfx_ick, CK_243X | CK_242X),
+ /* Modem domain clocks */
+ CLK(NULL, "mdm_ick", &mdm_ick, CK_243X),
+ CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X),
+ /* DSS domain clocks */
+ CLK("omapdss", "ick", &dss_ick, CK_243X | CK_242X),
+ CLK("omapdss", "dss1_fck", &dss1_fck, CK_243X | CK_242X),
+ CLK("omapdss", "dss2_fck", &dss2_fck, CK_243X | CK_242X),
+ CLK("omapdss", "tv_fck", &dss_54m_fck, CK_243X | CK_242X),
+ /* L3 domain clocks */
+ CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X),
+ CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X),
+ CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_243X | CK_242X),
+ /* L4 domain clocks */
+ CLK(NULL, "l4_ck", &l4_ck, CK_243X | CK_242X),
+ CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_243X | CK_242X),
+ /* virtual meta-group clock */
+ CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_243X | CK_242X),
+ /* general l4 interface ck, multi-parent functional clk */
+ CLK(NULL, "gpt1_ick", &gpt1_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt1_fck", &gpt1_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt2_ick", &gpt2_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt2_fck", &gpt2_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt3_ick", &gpt3_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt3_fck", &gpt3_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt4_ick", &gpt4_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt4_fck", &gpt4_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt5_ick", &gpt5_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt5_fck", &gpt5_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt6_ick", &gpt6_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt6_fck", &gpt6_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt7_ick", &gpt7_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt7_fck", &gpt7_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt8_ick", &gpt8_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt8_fck", &gpt8_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt9_ick", &gpt9_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt9_fck", &gpt9_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt10_ick", &gpt10_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt10_fck", &gpt10_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt11_ick", &gpt11_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt11_fck", &gpt11_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpt12_ick", &gpt12_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpt12_fck", &gpt12_fck, CK_243X | CK_242X),
+ CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_243X | CK_242X),
+ CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_243X | CK_242X),
+ CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_243X | CK_242X),
+ CLK("omap-mcbsp.2", "fck", &mcbsp2_fck, CK_243X | CK_242X),
+ CLK("omap-mcbsp.3", "ick", &mcbsp3_ick, CK_243X),
+ CLK("omap-mcbsp.3", "fck", &mcbsp3_fck, CK_243X),
+ CLK("omap-mcbsp.4", "ick", &mcbsp4_ick, CK_243X),
+ CLK("omap-mcbsp.4", "fck", &mcbsp4_fck, CK_243X),
+ CLK("omap-mcbsp.5", "ick", &mcbsp5_ick, CK_243X),
+ CLK("omap-mcbsp.5", "fck", &mcbsp5_fck, CK_243X),
+ CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_243X | CK_242X),
+ CLK("omap2_mcspi.1", "fck", &mcspi1_fck, CK_243X | CK_242X),
+ CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_243X | CK_242X),
+ CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_243X | CK_242X),
+ CLK("omap2_mcspi.3", "ick", &mcspi3_ick, CK_243X),
+ CLK("omap2_mcspi.3", "fck", &mcspi3_fck, CK_243X),
+ CLK(NULL, "uart1_ick", &uart1_ick, CK_243X | CK_242X),
+ CLK(NULL, "uart1_fck", &uart1_fck, CK_243X | CK_242X),
+ CLK(NULL, "uart2_ick", &uart2_ick, CK_243X | CK_242X),
+ CLK(NULL, "uart2_fck", &uart2_fck, CK_243X | CK_242X),
+ CLK(NULL, "uart3_ick", &uart3_ick, CK_243X | CK_242X),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_243X | CK_242X),
+ CLK(NULL, "gpios_ick", &gpios_ick, CK_243X | CK_242X),
+ CLK(NULL, "gpios_fck", &gpios_fck, CK_243X | CK_242X),
+ CLK("omap_wdt", "ick", &mpu_wdt_ick, CK_243X | CK_242X),
+ CLK("omap_wdt", "fck", &mpu_wdt_fck, CK_243X | CK_242X),
+ CLK(NULL, "sync_32k_ick", &sync_32k_ick, CK_243X | CK_242X),
+ CLK(NULL, "wdt1_ick", &wdt1_ick, CK_243X | CK_242X),
+ CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_243X | CK_242X),
+ CLK(NULL, "icr_ick", &icr_ick, CK_243X),
+ CLK("omap24xxcam", "fck", &cam_fck, CK_243X | CK_242X),
+ CLK("omap24xxcam", "ick", &cam_ick, CK_243X | CK_242X),
+ CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_243X | CK_242X),
+ CLK(NULL, "wdt4_ick", &wdt4_ick, CK_243X | CK_242X),
+ CLK(NULL, "wdt4_fck", &wdt4_fck, CK_243X | CK_242X),
+ CLK(NULL, "wdt3_ick", &wdt3_ick, CK_242X),
+ CLK(NULL, "wdt3_fck", &wdt3_fck, CK_242X),
+ CLK(NULL, "mspro_ick", &mspro_ick, CK_243X | CK_242X),
+ CLK(NULL, "mspro_fck", &mspro_fck, CK_243X | CK_242X),
+ CLK("mmci-omap.0", "ick", &mmc_ick, CK_242X),
+ CLK("mmci-omap.0", "fck", &mmc_fck, CK_242X),
+ CLK(NULL, "fac_ick", &fac_ick, CK_243X | CK_242X),
+ CLK(NULL, "fac_fck", &fac_fck, CK_243X | CK_242X),
+ CLK(NULL, "eac_ick", &eac_ick, CK_242X),
+ CLK(NULL, "eac_fck", &eac_fck, CK_242X),
+ CLK("omap_hdq.0", "ick", &hdq_ick, CK_243X | CK_242X),
+ CLK("omap_hdq.1", "fck", &hdq_fck, CK_243X | CK_242X),
+ CLK("i2c_omap.1", "ick", &i2c1_ick, CK_243X | CK_242X),
+ CLK("i2c_omap.1", "fck", &i2c1_fck, CK_242X),
+ CLK("i2c_omap.1", "fck", &i2chs1_fck, CK_243X),
+ CLK("i2c_omap.2", "ick", &i2c2_ick, CK_243X | CK_242X),
+ CLK("i2c_omap.2", "fck", &i2c2_fck, CK_242X),
+ CLK("i2c_omap.2", "fck", &i2chs2_fck, CK_243X),
+ CLK(NULL, "gpmc_fck", &gpmc_fck, CK_243X | CK_242X),
+ CLK(NULL, "sdma_fck", &sdma_fck, CK_243X | CK_242X),
+ CLK(NULL, "sdma_ick", &sdma_ick, CK_243X | CK_242X),
+ CLK(NULL, "vlynq_ick", &vlynq_ick, CK_242X),
+ CLK(NULL, "vlynq_fck", &vlynq_fck, CK_242X),
+ CLK(NULL, "sdrc_ick", &sdrc_ick, CK_243X),
+ CLK(NULL, "des_ick", &des_ick, CK_243X | CK_242X),
+ CLK(NULL, "sha_ick", &sha_ick, CK_243X | CK_242X),
+ CLK("omap_rng", "ick", &rng_ick, CK_243X | CK_242X),
+ CLK(NULL, "aes_ick", &aes_ick, CK_243X | CK_242X),
+ CLK(NULL, "pka_ick", &pka_ick, CK_243X | CK_242X),
+ CLK(NULL, "usb_fck", &usb_fck, CK_243X | CK_242X),
+ CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X),
+ CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X),
+ CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X),
+ CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X),
+ CLK("mmci-omap-hs.1", "fck", &mmchs2_fck, CK_243X),
+ CLK(NULL, "gpio5_ick", &gpio5_ick, CK_243X),
+ CLK(NULL, "gpio5_fck", &gpio5_fck, CK_243X),
+ CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X),
+ CLK("mmci-omap-hs.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
+ CLK("mmci-omap-hs.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
+};
+
+/*
+ * init code
+ */
+
+int __init omap2_clk_init(void)
+{
+ const struct prcm_config *prcm;
+ struct omap_clk *c;
+ u32 clkrate;
+ u16 cpu_clkflg;
+
+ if (cpu_is_omap242x()) {
+ prcm_clksrc_ctrl = OMAP2420_PRCM_CLKSRC_CTRL;
+ cpu_mask = RATE_IN_242X;
+ cpu_clkflg = CK_242X;
+ rate_table = omap2420_rate_table;
+ } else if (cpu_is_omap2430()) {
+ prcm_clksrc_ctrl = OMAP2430_PRCM_CLKSRC_CTRL;
+ cpu_mask = RATE_IN_243X;
+ cpu_clkflg = CK_243X;
+ rate_table = omap2430_rate_table;
+ }
+
+ clk_init(&omap2_clk_functions);
+
+ for (c = omap24xx_clks; c < omap24xx_clks + ARRAY_SIZE(omap24xx_clks); c++)
+ clk_preinit(c->lk.clk);
+
+ osc_ck.rate = omap2_osc_clk_recalc(&osc_ck);
+ propagate_rate(&osc_ck);
+ sys_ck.rate = omap2_sys_clk_recalc(&sys_ck);
+ propagate_rate(&sys_ck);
+
+ for (c = omap24xx_clks; c < omap24xx_clks + ARRAY_SIZE(omap24xx_clks); c++)
+ if (c->cpu & cpu_clkflg) {
+ clkdev_add(&c->lk);
+ clk_register(c->lk.clk);
+ omap2_init_clk_clkdm(c->lk.clk);
+ }
+
+ /* Check the MPU rate set by bootloader */
+ clkrate = omap2xxx_clk_get_core_rate(&dpll_ck);
+ for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+ if (!(prcm->flags & cpu_mask))
+ continue;
+ if (prcm->xtal_speed != sys_ck.rate)
+ continue;
+ if (prcm->dpll_speed <= clkrate)
+ break;
+ }
+ curr_prcm_set = prcm;
+
+ recalculate_root_clocks();
+
+ printk(KERN_INFO "Clocking rate (Crystal/DPLL/MPU): "
+ "%ld.%01ld/%ld/%ld MHz\n",
+ (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
+ (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
+
+ /*
+ * Only enable those clocks we will need, let the drivers
+ * enable other clocks as necessary
+ */
+ clk_enable_init_clocks();
+
+ /* Avoid sleeping sleeping during omap2_clk_prepare_for_reboot() */
+ vclk = clk_get(NULL, "virt_prcm_set");
+ sclk = clk_get(NULL, "sys_ck");
+ dclk = clk_get(NULL, "dpll_ck");
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 9f2feaf7986..ded32364f32 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -30,314 +30,21 @@
#include <plat/cpu.h>
#include <plat/clock.h>
#include <plat/sram.h>
+#include <plat/sdrc.h>
#include <asm/div64.h>
#include <asm/clkdev.h>
#include <plat/sdrc.h>
#include "clock.h"
+#include "clock34xx.h"
+#include "sdrc.h"
#include "prm.h"
#include "prm-regbits-34xx.h"
#include "cm.h"
#include "cm-regbits-34xx.h"
-static const struct clkops clkops_noncore_dpll_ops;
-
-static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
- void __iomem **idlest_reg,
- u8 *idlest_bit);
-static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
- void __iomem **idlest_reg,
- u8 *idlest_bit);
-static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
- void __iomem **idlest_reg,
- u8 *idlest_bit);
-
-static const struct clkops clkops_omap3430es2_ssi_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_idlest = omap3430es2_clk_ssi_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
-};
-
-static const struct clkops clkops_omap3430es2_hsotgusb_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
-};
-
-static const struct clkops clkops_omap3430es2_dss_usbhost_wait = {
- .enable = omap2_dflt_clk_enable,
- .disable = omap2_dflt_clk_disable,
- .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
- .find_companion = omap2_clk_dflt_find_companion,
-};
-
-#include "clock34xx.h"
-
-struct omap_clk {
- u32 cpu;
- struct clk_lookup lk;
-};
-
-#define CLK(dev, con, ck, cp) \
- { \
- .cpu = cp, \
- .lk = { \
- .dev_id = dev, \
- .con_id = con, \
- .clk = ck, \
- }, \
- }
-
-#define CK_343X (1 << 0)
-#define CK_3430ES1 (1 << 1)
-#define CK_3430ES2 (1 << 2)
-
-static struct omap_clk omap34xx_clks[] = {
- CLK(NULL, "omap_32k_fck", &omap_32k_fck, CK_343X),
- CLK(NULL, "virt_12m_ck", &virt_12m_ck, CK_343X),
- CLK(NULL, "virt_13m_ck", &virt_13m_ck, CK_343X),
- CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck, CK_3430ES2),
- CLK(NULL, "virt_19_2m_ck", &virt_19_2m_ck, CK_343X),
- CLK(NULL, "virt_26m_ck", &virt_26m_ck, CK_343X),
- CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck, CK_343X),
- CLK(NULL, "osc_sys_ck", &osc_sys_ck, CK_343X),
- CLK(NULL, "sys_ck", &sys_ck, CK_343X),
- CLK(NULL, "sys_altclk", &sys_altclk, CK_343X),
- CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_343X),
- CLK(NULL, "sys_clkout1", &sys_clkout1, CK_343X),
- CLK(NULL, "dpll1_ck", &dpll1_ck, CK_343X),
- CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck, CK_343X),
- CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck, CK_343X),
- CLK(NULL, "dpll2_ck", &dpll2_ck, CK_343X),
- CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck, CK_343X),
- CLK(NULL, "dpll3_ck", &dpll3_ck, CK_343X),
- CLK(NULL, "core_ck", &core_ck, CK_343X),
- CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck, CK_343X),
- CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck, CK_343X),
- CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck, CK_343X),
- CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck, CK_343X),
- CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck, CK_343X),
- CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck, CK_343X),
- CLK(NULL, "dpll4_ck", &dpll4_ck, CK_343X),
- CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck, CK_343X),
- CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck, CK_343X),
- CLK(NULL, "omap_96m_fck", &omap_96m_fck, CK_343X),
- CLK(NULL, "cm_96m_fck", &cm_96m_fck, CK_343X),
- CLK(NULL, "omap_54m_fck", &omap_54m_fck, CK_343X),
- CLK(NULL, "omap_48m_fck", &omap_48m_fck, CK_343X),
- CLK(NULL, "omap_12m_fck", &omap_12m_fck, CK_343X),
- CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck, CK_343X),
- CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck, CK_343X),
- CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck, CK_343X),
- CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck, CK_343X),
- CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck, CK_343X),
- CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck, CK_343X),
- CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck, CK_343X),
- CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck, CK_343X),
- CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck, CK_343X),
- CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck, CK_343X),
- CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck, CK_343X),
- CLK(NULL, "dpll5_ck", &dpll5_ck, CK_3430ES2),
- CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck, CK_3430ES2),
- CLK(NULL, "clkout2_src_ck", &clkout2_src_ck, CK_343X),
- CLK(NULL, "sys_clkout2", &sys_clkout2, CK_343X),
- CLK(NULL, "corex2_fck", &corex2_fck, CK_343X),
- CLK(NULL, "dpll1_fck", &dpll1_fck, CK_343X),
- CLK(NULL, "mpu_ck", &mpu_ck, CK_343X),
- CLK(NULL, "arm_fck", &arm_fck, CK_343X),
- CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck, CK_343X),
- CLK(NULL, "dpll2_fck", &dpll2_fck, CK_343X),
- CLK(NULL, "iva2_ck", &iva2_ck, CK_343X),
- CLK(NULL, "l3_ick", &l3_ick, CK_343X),
- CLK(NULL, "l4_ick", &l4_ick, CK_343X),
- CLK(NULL, "rm_ick", &rm_ick, CK_343X),
- CLK(NULL, "gfx_l3_ck", &gfx_l3_ck, CK_3430ES1),
- CLK(NULL, "gfx_l3_fck", &gfx_l3_fck, CK_3430ES1),
- CLK(NULL, "gfx_l3_ick", &gfx_l3_ick, CK_3430ES1),
- CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck, CK_3430ES1),
- CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck, CK_3430ES1),
- CLK(NULL, "sgx_fck", &sgx_fck, CK_3430ES2),
- CLK(NULL, "sgx_ick", &sgx_ick, CK_3430ES2),
- CLK(NULL, "d2d_26m_fck", &d2d_26m_fck, CK_3430ES1),
- CLK(NULL, "modem_fck", &modem_fck, CK_343X),
- CLK(NULL, "sad2d_ick", &sad2d_ick, CK_343X),
- CLK(NULL, "mad2d_ick", &mad2d_ick, CK_343X),
- CLK(NULL, "gpt10_fck", &gpt10_fck, CK_343X),
- CLK(NULL, "gpt11_fck", &gpt11_fck, CK_343X),
- CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2),
- CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2),
- CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2),
- CLK(NULL, "core_96m_fck", &core_96m_fck, CK_343X),
- CLK("mmci-omap-hs.2", "fck", &mmchs3_fck, CK_3430ES2),
- CLK("mmci-omap-hs.1", "fck", &mmchs2_fck, CK_343X),
- CLK(NULL, "mspro_fck", &mspro_fck, CK_343X),
- CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_343X),
- CLK("i2c_omap.3", "fck", &i2c3_fck, CK_343X),
- CLK("i2c_omap.2", "fck", &i2c2_fck, CK_343X),
- CLK("i2c_omap.1", "fck", &i2c1_fck, CK_343X),
- CLK("omap-mcbsp.5", "fck", &mcbsp5_fck, CK_343X),
- CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_343X),
- CLK(NULL, "core_48m_fck", &core_48m_fck, CK_343X),
- CLK("omap2_mcspi.4", "fck", &mcspi4_fck, CK_343X),
- CLK("omap2_mcspi.3", "fck", &mcspi3_fck, CK_343X),
- CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_343X),
- CLK("omap2_mcspi.1", "fck", &mcspi1_fck, CK_343X),
- CLK(NULL, "uart2_fck", &uart2_fck, CK_343X),
- CLK(NULL, "uart1_fck", &uart1_fck, CK_343X),
- CLK(NULL, "fshostusb_fck", &fshostusb_fck, CK_3430ES1),
- CLK(NULL, "core_12m_fck", &core_12m_fck, CK_343X),
- CLK("omap_hdq.0", "fck", &hdq_fck, CK_343X),
- CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1, CK_3430ES1),
- CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2, CK_3430ES2),
- CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1),
- CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2),
- CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X),
- CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es1, CK_3430ES1),
- CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es2, CK_3430ES2),
- CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X),
- CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X),
- CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X),
- CLK(NULL, "pka_ick", &pka_ick, CK_343X),
- CLK(NULL, "core_l4_ick", &core_l4_ick, CK_343X),
- CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2),
- CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2),
- CLK(NULL, "icr_ick", &icr_ick, CK_343X),
- CLK(NULL, "aes2_ick", &aes2_ick, CK_343X),
- CLK(NULL, "sha12_ick", &sha12_ick, CK_343X),
- CLK(NULL, "des2_ick", &des2_ick, CK_343X),
- CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_343X),
- CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_343X),
- CLK(NULL, "mspro_ick", &mspro_ick, CK_343X),
- CLK("omap_hdq.0", "ick", &hdq_ick, CK_343X),
- CLK("omap2_mcspi.4", "ick", &mcspi4_ick, CK_343X),
- CLK("omap2_mcspi.3", "ick", &mcspi3_ick, CK_343X),
- CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_343X),
- CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_343X),
- CLK("i2c_omap.3", "ick", &i2c3_ick, CK_343X),
- CLK("i2c_omap.2", "ick", &i2c2_ick, CK_343X),
- CLK("i2c_omap.1", "ick", &i2c1_ick, CK_343X),
- CLK(NULL, "uart2_ick", &uart2_ick, CK_343X),
- CLK(NULL, "uart1_ick", &uart1_ick, CK_343X),
- CLK(NULL, "gpt11_ick", &gpt11_ick, CK_343X),
- CLK(NULL, "gpt10_ick", &gpt10_ick, CK_343X),
- CLK("omap-mcbsp.5", "ick", &mcbsp5_ick, CK_343X),
- CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_343X),
- CLK(NULL, "fac_ick", &fac_ick, CK_3430ES1),
- CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_343X),
- CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_343X),
- CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_343X),
- CLK(NULL, "ssi_ick", &ssi_ick_3430es1, CK_3430ES1),
- CLK(NULL, "ssi_ick", &ssi_ick_3430es2, CK_3430ES2),
- CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_3430ES1),
- CLK(NULL, "security_l4_ick2", &security_l4_ick2, CK_343X),
- CLK(NULL, "aes1_ick", &aes1_ick, CK_343X),
- CLK("omap_rng", "ick", &rng_ick, CK_343X),
- CLK(NULL, "sha11_ick", &sha11_ick, CK_343X),
- CLK(NULL, "des1_ick", &des1_ick, CK_343X),
- CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
- CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es2, CK_3430ES2),
- CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X),
- CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X),
- CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X),
- CLK("omapfb", "ick", &dss_ick_3430es1, CK_3430ES1),
- CLK("omapfb", "ick", &dss_ick_3430es2, CK_3430ES2),
- CLK(NULL, "cam_mclk", &cam_mclk, CK_343X),
- CLK(NULL, "cam_ick", &cam_ick, CK_343X),
- CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),
- CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2),
- CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2),
- CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2),
- CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2),
- CLK(NULL, "gpt1_fck", &gpt1_fck, CK_343X),
- CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_343X),
- CLK(NULL, "gpio1_dbck", &gpio1_dbck, CK_343X),
- CLK("omap_wdt", "fck", &wdt2_fck, CK_343X),
- CLK(NULL, "wkup_l4_ick", &wkup_l4_ick, CK_343X),
- CLK(NULL, "usim_ick", &usim_ick, CK_3430ES2),
- CLK("omap_wdt", "ick", &wdt2_ick, CK_343X),
- CLK(NULL, "wdt1_ick", &wdt1_ick, CK_343X),
- CLK(NULL, "gpio1_ick", &gpio1_ick, CK_343X),
- CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick, CK_343X),
- CLK(NULL, "gpt12_ick", &gpt12_ick, CK_343X),
- CLK(NULL, "gpt1_ick", &gpt1_ick, CK_343X),
- CLK(NULL, "per_96m_fck", &per_96m_fck, CK_343X),
- CLK(NULL, "per_48m_fck", &per_48m_fck, CK_343X),
- CLK(NULL, "uart3_fck", &uart3_fck, CK_343X),
- CLK(NULL, "gpt2_fck", &gpt2_fck, CK_343X),
- CLK(NULL, "gpt3_fck", &gpt3_fck, CK_343X),
- CLK(NULL, "gpt4_fck", &gpt4_fck, CK_343X),
- CLK(NULL, "gpt5_fck", &gpt5_fck, CK_343X),
- CLK(NULL, "gpt6_fck", &gpt6_fck, CK_343X),
- CLK(NULL, "gpt7_fck", &gpt7_fck, CK_343X),
- CLK(NULL, "gpt8_fck", &gpt8_fck, CK_343X),
- CLK(NULL, "gpt9_fck", &gpt9_fck, CK_343X),
- CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck, CK_343X),
- CLK(NULL, "gpio6_dbck", &gpio6_dbck, CK_343X),
- CLK(NULL, "gpio5_dbck", &gpio5_dbck, CK_343X),
- CLK(NULL, "gpio4_dbck", &gpio4_dbck, CK_343X),
- CLK(NULL, "gpio3_dbck", &gpio3_dbck, CK_343X),
- CLK(NULL, "gpio2_dbck", &gpio2_dbck, CK_343X),
- CLK(NULL, "wdt3_fck", &wdt3_fck, CK_343X),
- CLK(NULL, "per_l4_ick", &per_l4_ick, CK_343X),
- CLK(NULL, "gpio6_ick", &gpio6_ick, CK_343X),
- CLK(NULL, "gpio5_ick", &gpio5_ick, CK_343X),
- CLK(NULL, "gpio4_ick", &gpio4_ick, CK_343X),
- CLK(NULL, "gpio3_ick", &gpio3_ick, CK_343X),
- CLK(NULL, "gpio2_ick", &gpio2_ick, CK_343X),
- CLK(NULL, "wdt3_ick", &wdt3_ick, CK_343X),
- CLK(NULL, "uart3_ick", &uart3_ick, CK_343X),
- CLK(NULL, "gpt9_ick", &gpt9_ick, CK_343X),
- CLK(NULL, "gpt8_ick", &gpt8_ick, CK_343X),
- CLK(NULL, "gpt7_ick", &gpt7_ick, CK_343X),
- CLK(NULL, "gpt6_ick", &gpt6_ick, CK_343X),
- CLK(NULL, "gpt5_ick", &gpt5_ick, CK_343X),
- CLK(NULL, "gpt4_ick", &gpt4_ick, CK_343X),
- CLK(NULL, "gpt3_ick", &gpt3_ick, CK_343X),
- CLK(NULL, "gpt2_ick", &gpt2_ick, CK_343X),
- CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_343X),
- CLK("omap-mcbsp.3", "ick", &mcbsp3_ick, CK_343X),
- CLK("omap-mcbsp.4", "ick", &mcbsp4_ick, CK_343X),
- CLK("omap-mcbsp.2", "fck", &mcbsp2_fck, CK_343X),
- CLK("omap-mcbsp.3", "fck", &mcbsp3_fck, CK_343X),
- CLK("omap-mcbsp.4", "fck", &mcbsp4_fck, CK_343X),
- CLK("etb", "emu_src_ck", &emu_src_ck, CK_343X),
- CLK(NULL, "pclk_fck", &pclk_fck, CK_343X),
- CLK(NULL, "pclkx2_fck", &pclkx2_fck, CK_343X),
- CLK(NULL, "atclk_fck", &atclk_fck, CK_343X),
- CLK(NULL, "traceclk_src_fck", &traceclk_src_fck, CK_343X),
- CLK(NULL, "traceclk_fck", &traceclk_fck, CK_343X),
- CLK(NULL, "sr1_fck", &sr1_fck, CK_343X),
- CLK(NULL, "sr2_fck", &sr2_fck, CK_343X),
- CLK(NULL, "sr_l4_ick", &sr_l4_ick, CK_343X),
- CLK(NULL, "secure_32k_fck", &secure_32k_fck, CK_343X),
- CLK(NULL, "gpt12_fck", &gpt12_fck, CK_343X),
- CLK(NULL, "wdt1_fck", &wdt1_fck, CK_343X),
-};
-
-/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
-#define DPLL_AUTOIDLE_DISABLE 0x0
-#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
-
-#define MAX_DPLL_WAIT_TRIES 1000000
-
-#define MIN_SDRC_DLL_LOCK_FREQ 83000000
-
#define CYCLES_PER_MHZ 1000000
-/* Scale factor for fixed-point arith in omap3_core_dpll_m2_set_rate() */
-#define SDRC_MPURATE_SCALE 8
-
-/* 2^SDRC_MPURATE_BASE_SHIFT: MPU MHz that SDRC_MPURATE_LOOPS is defined for */
-#define SDRC_MPURATE_BASE_SHIFT 9
-
-/*
- * SDRC_MPURATE_LOOPS: Number of MPU loops to execute at
- * 2^MPURATE_BASE_SHIFT MHz for SDRC to stabilize
- */
-#define SDRC_MPURATE_LOOPS 96
-
/*
* DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
* that are sourced by DPLL5, and both of these require this clock
@@ -345,6 +52,9 @@ static struct omap_clk omap34xx_clks[] = {
*/
#define DPLL5_FREQ_FOR_USBHOST 120000000
+/* needed by omap3_core_dpll_m2_set_rate() */
+struct clk *sdrc_ick_p, *arm_fck_p;
+
/**
* omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
* @clk: struct clk * being enabled
@@ -366,6 +76,13 @@ static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
}
+const struct clkops clkops_omap3430es2_ssi_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap3430es2_clk_ssi_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
/**
* omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
* @clk: struct clk * being enabled
@@ -391,6 +108,13 @@ static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
*idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
}
+const struct clkops clkops_omap3430es2_dss_usbhost_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
/**
* omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
* @clk: struct clk * being enabled
@@ -412,395 +136,19 @@ static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
*idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
}
-/**
- * omap3_dpll_recalc - recalculate DPLL rate
- * @clk: DPLL struct clk
- *
- * Recalculate and propagate the DPLL rate.
- */
-static unsigned long omap3_dpll_recalc(struct clk *clk)
-{
- return omap2_get_dpll_rate(clk);
-}
-
-/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
-static void _omap3_dpll_write_clken(struct clk *clk, u8 clken_bits)
-{
- const struct dpll_data *dd;
- u32 v;
-
- dd = clk->dpll_data;
-
- v = __raw_readl(dd->control_reg);
- v &= ~dd->enable_mask;
- v |= clken_bits << __ffs(dd->enable_mask);
- __raw_writel(v, dd->control_reg);
-}
-
-/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
-static int _omap3_wait_dpll_status(struct clk *clk, u8 state)
-{
- const struct dpll_data *dd;
- int i = 0;
- int ret = -EINVAL;
-
- dd = clk->dpll_data;
-
- state <<= __ffs(dd->idlest_mask);
-
- while (((__raw_readl(dd->idlest_reg) & dd->idlest_mask) != state) &&
- i < MAX_DPLL_WAIT_TRIES) {
- i++;
- udelay(1);
- }
-
- if (i == MAX_DPLL_WAIT_TRIES) {
- printk(KERN_ERR "clock: %s failed transition to '%s'\n",
- clk->name, (state) ? "locked" : "bypassed");
- } else {
- pr_debug("clock: %s transition to '%s' in %d loops\n",
- clk->name, (state) ? "locked" : "bypassed", i);
-
- ret = 0;
- }
-
- return ret;
-}
-
-/* From 3430 TRM ES2 4.7.6.2 */
-static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n)
-{
- unsigned long fint;
- u16 f = 0;
-
- fint = clk->dpll_data->clk_ref->rate / n;
-
- pr_debug("clock: fint is %lu\n", fint);
-
- if (fint >= 750000 && fint <= 1000000)
- f = 0x3;
- else if (fint > 1000000 && fint <= 1250000)
- f = 0x4;
- else if (fint > 1250000 && fint <= 1500000)
- f = 0x5;
- else if (fint > 1500000 && fint <= 1750000)
- f = 0x6;
- else if (fint > 1750000 && fint <= 2100000)
- f = 0x7;
- else if (fint > 7500000 && fint <= 10000000)
- f = 0xB;
- else if (fint > 10000000 && fint <= 12500000)
- f = 0xC;
- else if (fint > 12500000 && fint <= 15000000)
- f = 0xD;
- else if (fint > 15000000 && fint <= 17500000)
- f = 0xE;
- else if (fint > 17500000 && fint <= 21000000)
- f = 0xF;
- else
- pr_debug("clock: unknown freqsel setting for %d\n", n);
-
- return f;
-}
-
-/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
-
-/*
- * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report
- * readiness before returning. Will save and restore the DPLL's
- * autoidle state across the enable, per the CDP code. If the DPLL
- * locked successfully, return 0; if the DPLL did not lock in the time
- * allotted, or DPLL3 was passed in, return -EINVAL.
- */
-static int _omap3_noncore_dpll_lock(struct clk *clk)
-{
- u8 ai;
- int r;
-
- if (clk == &dpll3_ck)
- return -EINVAL;
-
- pr_debug("clock: locking DPLL %s\n", clk->name);
-
- ai = omap3_dpll_autoidle_read(clk);
-
- omap3_dpll_deny_idle(clk);
-
- _omap3_dpll_write_clken(clk, DPLL_LOCKED);
-
- r = _omap3_wait_dpll_status(clk, 1);
-
- if (ai)
- omap3_dpll_allow_idle(clk);
-
- return r;
-}
-
-/*
- * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to enter low-power bypass mode. In
- * bypass mode, the DPLL's rate is set equal to its parent clock's
- * rate. Waits for the DPLL to report readiness before returning.
- * Will save and restore the DPLL's autoidle state across the enable,
- * per the CDP code. If the DPLL entered bypass mode successfully,
- * return 0; if the DPLL did not enter bypass in the time allotted, or
- * DPLL3 was passed in, or the DPLL does not support low-power bypass,
- * return -EINVAL.
- */
-static int _omap3_noncore_dpll_bypass(struct clk *clk)
-{
- int r;
- u8 ai;
-
- if (clk == &dpll3_ck)
- return -EINVAL;
-
- if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
- return -EINVAL;
-
- pr_debug("clock: configuring DPLL %s for low-power bypass\n",
- clk->name);
-
- ai = omap3_dpll_autoidle_read(clk);
-
- _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
-
- r = _omap3_wait_dpll_status(clk, 0);
-
- if (ai)
- omap3_dpll_allow_idle(clk);
- else
- omap3_dpll_deny_idle(clk);
-
- return r;
-}
-
-/*
- * _omap3_noncore_dpll_stop - instruct a DPLL to stop
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to enter low-power stop. Will save and
- * restore the DPLL's autoidle state across the stop, per the CDP
- * code. If DPLL3 was passed in, or the DPLL does not support
- * low-power stop, return -EINVAL; otherwise, return 0.
- */
-static int _omap3_noncore_dpll_stop(struct clk *clk)
-{
- u8 ai;
-
- if (clk == &dpll3_ck)
- return -EINVAL;
-
- if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
- return -EINVAL;
-
- pr_debug("clock: stopping DPLL %s\n", clk->name);
-
- ai = omap3_dpll_autoidle_read(clk);
-
- _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
-
- if (ai)
- omap3_dpll_allow_idle(clk);
- else
- omap3_dpll_deny_idle(clk);
-
- return 0;
-}
-
-/**
- * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
- * The choice of modes depends on the DPLL's programmed rate: if it is
- * the same as the DPLL's parent clock, it will enter bypass;
- * otherwise, it will enter lock. This code will wait for the DPLL to
- * indicate readiness before returning, unless the DPLL takes too long
- * to enter the target state. Intended to be used as the struct clk's
- * enable function. If DPLL3 was passed in, or the DPLL does not
- * support low-power stop, or if the DPLL took too long to enter
- * bypass or lock, return -EINVAL; otherwise, return 0.
- */
-static int omap3_noncore_dpll_enable(struct clk *clk)
-{
- int r;
- struct dpll_data *dd;
-
- if (clk == &dpll3_ck)
- return -EINVAL;
-
- dd = clk->dpll_data;
- if (!dd)
- return -EINVAL;
-
- if (clk->rate == dd->clk_bypass->rate) {
- WARN_ON(clk->parent != dd->clk_bypass);
- r = _omap3_noncore_dpll_bypass(clk);
- } else {
- WARN_ON(clk->parent != dd->clk_ref);
- r = _omap3_noncore_dpll_lock(clk);
- }
- /* FIXME: this is dubious - if clk->rate has changed, what about propagating? */
- if (!r)
- clk->rate = omap2_get_dpll_rate(clk);
-
- return r;
-}
-
-/**
- * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
- * The choice of modes depends on the DPLL's programmed rate: if it is
- * the same as the DPLL's parent clock, it will enter bypass;
- * otherwise, it will enter lock. This code will wait for the DPLL to
- * indicate readiness before returning, unless the DPLL takes too long
- * to enter the target state. Intended to be used as the struct clk's
- * enable function. If DPLL3 was passed in, or the DPLL does not
- * support low-power stop, or if the DPLL took too long to enter
- * bypass or lock, return -EINVAL; otherwise, return 0.
- */
-static void omap3_noncore_dpll_disable(struct clk *clk)
-{
- if (clk == &dpll3_ck)
- return;
-
- _omap3_noncore_dpll_stop(clk);
-}
-
-
-/* Non-CORE DPLL rate set code */
-
-/*
- * omap3_noncore_dpll_program - set non-core DPLL M,N values directly
- * @clk: struct clk * of DPLL to set
- * @m: DPLL multiplier to set
- * @n: DPLL divider to set
- * @freqsel: FREQSEL value to set
- *
- * Program the DPLL with the supplied M, N values, and wait for the DPLL to
- * lock.. Returns -EINVAL upon error, or 0 upon success.
- */
-static int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
-{
- struct dpll_data *dd = clk->dpll_data;
- u32 v;
-
- /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
- _omap3_noncore_dpll_bypass(clk);
-
- /* Set jitter correction */
- v = __raw_readl(dd->control_reg);
- v &= ~dd->freqsel_mask;
- v |= freqsel << __ffs(dd->freqsel_mask);
- __raw_writel(v, dd->control_reg);
-
- /* Set DPLL multiplier, divider */
- v = __raw_readl(dd->mult_div1_reg);
- v &= ~(dd->mult_mask | dd->div1_mask);
- v |= m << __ffs(dd->mult_mask);
- v |= (n - 1) << __ffs(dd->div1_mask);
- __raw_writel(v, dd->mult_div1_reg);
-
- /* We let the clock framework set the other output dividers later */
-
- /* REVISIT: Set ramp-up delay? */
-
- _omap3_noncore_dpll_lock(clk);
-
- return 0;
-}
-
-/**
- * omap3_noncore_dpll_set_rate - set non-core DPLL rate
- * @clk: struct clk * of DPLL to set
- * @rate: rounded target rate
- *
- * Set the DPLL CLKOUT to the target rate. If the DPLL can enter
- * low-power bypass, and the target rate is the bypass source clock
- * rate, then configure the DPLL for bypass. Otherwise, round the
- * target rate if it hasn't been done already, then program and lock
- * the DPLL. Returns -EINVAL upon error, or 0 upon success.
- */
-static int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate)
-{
- struct clk *new_parent = NULL;
- u16 freqsel;
- struct dpll_data *dd;
- int ret;
-
- if (!clk || !rate)
- return -EINVAL;
-
- dd = clk->dpll_data;
- if (!dd)
- return -EINVAL;
-
- if (rate == omap2_get_dpll_rate(clk))
- return 0;
-
- /*
- * Ensure both the bypass and ref clocks are enabled prior to
- * doing anything; we need the bypass clock running to reprogram
- * the DPLL.
- */
- omap2_clk_enable(dd->clk_bypass);
- omap2_clk_enable(dd->clk_ref);
-
- if (dd->clk_bypass->rate == rate &&
- (clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
- pr_debug("clock: %s: set rate: entering bypass.\n", clk->name);
-
- ret = _omap3_noncore_dpll_bypass(clk);
- if (!ret)
- new_parent = dd->clk_bypass;
- } else {
- if (dd->last_rounded_rate != rate)
- omap2_dpll_round_rate(clk, rate);
-
- if (dd->last_rounded_rate == 0)
- return -EINVAL;
-
- freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
- if (!freqsel)
- WARN_ON(1);
-
- pr_debug("clock: %s: set rate: locking rate to %lu.\n",
- clk->name, rate);
-
- ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m,
- dd->last_rounded_n, freqsel);
- if (!ret)
- new_parent = dd->clk_ref;
- }
- if (!ret) {
- /*
- * Switch the parent clock in the heirarchy, and make sure
- * that the new parent's usecount is correct. Note: we
- * enable the new parent before disabling the old to avoid
- * any unnecessary hardware disable->enable transitions.
- */
- if (clk->usecount) {
- omap2_clk_enable(new_parent);
- omap2_clk_disable(clk->parent);
- }
- clk_reparent(clk, new_parent);
- clk->rate = rate;
- }
- omap2_clk_disable(dd->clk_ref);
- omap2_clk_disable(dd->clk_bypass);
+const struct clkops clkops_omap3430es2_hsotgusb_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
- return 0;
-}
+const struct clkops clkops_noncore_dpll_ops = {
+ .enable = omap3_noncore_dpll_enable,
+ .disable = omap3_noncore_dpll_disable,
+};
-static int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate)
+int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate)
{
/*
* According to the 12-5 CDP code from TI, "Limitation 2.5"
@@ -831,12 +179,12 @@ static int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate)
* Program the DPLL M2 divider with the rounded target rate. Returns
* -EINVAL upon error, or 0 upon success.
*/
-static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
+int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
u32 new_div = 0;
u32 unlock_dll = 0;
u32 c;
- unsigned long validrate, sdrcrate, mpurate;
+ unsigned long validrate, sdrcrate, _mpurate;
struct omap_sdrc_params *sdrc_cs0;
struct omap_sdrc_params *sdrc_cs1;
int ret;
@@ -844,14 +192,11 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
if (!clk || !rate)
return -EINVAL;
- if (clk != &dpll3_m2_ck)
- return -EINVAL;
-
validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
if (validrate != rate)
return -EINVAL;
- sdrcrate = sdrc_ick.rate;
+ sdrcrate = sdrc_ick_p->rate;
if (rate > clk->rate)
sdrcrate <<= ((rate / clk->rate) >> 1);
else
@@ -869,8 +214,8 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
/*
* XXX This only needs to be done when the CPU frequency changes
*/
- mpurate = arm_fck.rate / CYCLES_PER_MHZ;
- c = (mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT;
+ _mpurate = arm_fck_p->rate / CYCLES_PER_MHZ;
+ c = (_mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT;
c += 1; /* for safety */
c *= SDRC_MPURATE_LOOPS;
c >>= SDRC_MPURATE_SCALE;
@@ -906,129 +251,6 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
return 0;
}
-
-static const struct clkops clkops_noncore_dpll_ops = {
- .enable = &omap3_noncore_dpll_enable,
- .disable = &omap3_noncore_dpll_disable,
-};
-
-/* DPLL autoidle read/set code */
-
-
-/**
- * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
- * @clk: struct clk * of the DPLL to read
- *
- * Return the DPLL's autoidle bits, shifted down to bit 0. Returns
- * -EINVAL if passed a null pointer or if the struct clk does not
- * appear to refer to a DPLL.
- */
-static u32 omap3_dpll_autoidle_read(struct clk *clk)
-{
- const struct dpll_data *dd;
- u32 v;
-
- if (!clk || !clk->dpll_data)
- return -EINVAL;
-
- dd = clk->dpll_data;
-
- v = __raw_readl(dd->autoidle_reg);
- v &= dd->autoidle_mask;
- v >>= __ffs(dd->autoidle_mask);
-
- return v;
-}
-
-/**
- * omap3_dpll_allow_idle - enable DPLL autoidle bits
- * @clk: struct clk * of the DPLL to operate on
- *
- * Enable DPLL automatic idle control. This automatic idle mode
- * switching takes effect only when the DPLL is locked, at least on
- * OMAP3430. The DPLL will enter low-power stop when its downstream
- * clocks are gated. No return value.
- */
-static void omap3_dpll_allow_idle(struct clk *clk)
-{
- const struct dpll_data *dd;
- u32 v;
-
- if (!clk || !clk->dpll_data)
- return;
-
- dd = clk->dpll_data;
-
- /*
- * REVISIT: CORE DPLL can optionally enter low-power bypass
- * by writing 0x5 instead of 0x1. Add some mechanism to
- * optionally enter this mode.
- */
- v = __raw_readl(dd->autoidle_reg);
- v &= ~dd->autoidle_mask;
- v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
- __raw_writel(v, dd->autoidle_reg);
-}
-
-/**
- * omap3_dpll_deny_idle - prevent DPLL from automatically idling
- * @clk: struct clk * of the DPLL to operate on
- *
- * Disable DPLL automatic idle control. No return value.
- */
-static void omap3_dpll_deny_idle(struct clk *clk)
-{
- const struct dpll_data *dd;
- u32 v;
-
- if (!clk || !clk->dpll_data)
- return;
-
- dd = clk->dpll_data;
-
- v = __raw_readl(dd->autoidle_reg);
- v &= ~dd->autoidle_mask;
- v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
- __raw_writel(v, dd->autoidle_reg);
-}
-
-/* Clock control for DPLL outputs */
-
-/**
- * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
- * @clk: DPLL output struct clk
- *
- * Using parent clock DPLL data, look up DPLL state. If locked, set our
- * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
- */
-static unsigned long omap3_clkoutx2_recalc(struct clk *clk)
-{
- const struct dpll_data *dd;
- unsigned long rate;
- u32 v;
- struct clk *pclk;
-
- /* Walk up the parents of clk, looking for a DPLL */
- pclk = clk->parent;
- while (pclk && !pclk->dpll_data)
- pclk = pclk->parent;
-
- /* clk does not have a DPLL as a parent? */
- WARN_ON(!pclk);
-
- dd = pclk->dpll_data;
-
- WARN_ON(!dd->enable_mask);
-
- v = __raw_readl(dd->control_reg) & dd->enable_mask;
- v >>= __ffs(dd->enable_mask);
- if (v != OMAP3XXX_EN_DPLL_LOCKED)
- rate = clk->parent->rate;
- else
- rate = clk->parent->rate * 2;
- return rate;
-}
-
/* Common clock code */
/*
@@ -1037,7 +259,7 @@ static unsigned long omap3_clkoutx2_recalc(struct clk *clk)
*/
#if defined(CONFIG_ARCH_OMAP3)
-static struct clk_functions omap2_clk_functions = {
+struct clk_functions omap2_clk_functions = {
.clk_enable = omap2_clk_enable,
.clk_disable = omap2_clk_disable,
.clk_round_rate = omap2_clk_round_rate,
@@ -1063,7 +285,7 @@ void omap2_clk_prepare_for_reboot(void)
#endif
}
-static void omap3_clk_lock_dpll5(void)
+void omap3_clk_lock_dpll5(void)
{
struct clk *dpll5_clk;
struct clk *dpll5_m2_clk;
@@ -1093,19 +315,32 @@ static void omap3_clk_lock_dpll5(void)
*/
static int __init omap2_clk_arch_init(void)
{
+ struct clk *osc_sys_ck, *dpll1_ck, *arm_fck, *core_ck;
+ unsigned long osc_sys_rate;
+
if (!mpurate)
return -EINVAL;
+ /* XXX test these for success */
+ dpll1_ck = clk_get(NULL, "dpll1_ck");
+ arm_fck = clk_get(NULL, "arm_fck");
+ core_ck = clk_get(NULL, "core_ck");
+ osc_sys_ck = clk_get(NULL, "osc_sys_ck");
+
/* REVISIT: not yet ready for 343x */
- if (clk_set_rate(&dpll1_ck, mpurate))
+ if (clk_set_rate(dpll1_ck, mpurate))
printk(KERN_ERR "*** Unable to set MPU rate\n");
recalculate_root_clocks();
- printk(KERN_INFO "Switched to new clocking rate (Crystal/Core/MPU): "
- "%ld.%01ld/%ld/%ld MHz\n",
- (osc_sys_ck.rate / 1000000), ((osc_sys_ck.rate / 100000) % 10),
- (core_ck.rate / 1000000), (arm_fck.rate / 1000000)) ;
+ osc_sys_rate = clk_get_rate(osc_sys_ck);
+
+ pr_info("Switched to new clocking rate (Crystal/Core/MPU): "
+ "%ld.%01ld/%ld/%ld MHz\n",
+ (osc_sys_rate / 1000000),
+ ((osc_sys_rate / 100000) % 10),
+ (clk_get_rate(core_ck) / 1000000),
+ (clk_get_rate(arm_fck) / 1000000));
calibrate_delay();
@@ -1113,83 +348,7 @@ static int __init omap2_clk_arch_init(void)
}
arch_initcall(omap2_clk_arch_init);
-int __init omap2_clk_init(void)
-{
- /* struct prcm_config *prcm; */
- struct omap_clk *c;
- /* u32 clkrate; */
- u32 cpu_clkflg;
-
- if (cpu_is_omap34xx()) {
- cpu_mask = RATE_IN_343X;
- cpu_clkflg = CK_343X;
-
- /*
- * Update this if there are further clock changes between ES2
- * and production parts
- */
- if (omap_rev() == OMAP3430_REV_ES1_0) {
- /* No 3430ES1-only rates exist, so no RATE_IN_3430ES1 */
- cpu_clkflg |= CK_3430ES1;
- } else {
- cpu_mask |= RATE_IN_3430ES2;
- cpu_clkflg |= CK_3430ES2;
- }
- }
-
- clk_init(&omap2_clk_functions);
-
- for (c = omap34xx_clks; c < omap34xx_clks + ARRAY_SIZE(omap34xx_clks); c++)
- clk_preinit(c->lk.clk);
- for (c = omap34xx_clks; c < omap34xx_clks + ARRAY_SIZE(omap34xx_clks); c++)
- if (c->cpu & cpu_clkflg) {
- clkdev_add(&c->lk);
- clk_register(c->lk.clk);
- omap2_init_clk_clkdm(c->lk.clk);
- }
-
- /* REVISIT: Not yet ready for OMAP3 */
-#if 0
- /* Check the MPU rate set by bootloader */
- clkrate = omap2_get_dpll_rate_24xx(&dpll_ck);
- for (prcm = rate_table; prcm->mpu_speed; prcm++) {
- if (!(prcm->flags & cpu_mask))
- continue;
- if (prcm->xtal_speed != sys_ck.rate)
- continue;
- if (prcm->dpll_speed <= clkrate)
- break;
- }
- curr_prcm_set = prcm;
#endif
- recalculate_root_clocks();
-
- printk(KERN_INFO "Clocking rate (Crystal/Core/MPU): "
- "%ld.%01ld/%ld/%ld MHz\n",
- (osc_sys_ck.rate / 1000000), (osc_sys_ck.rate / 100000) % 10,
- (core_ck.rate / 1000000), (arm_fck.rate / 1000000));
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_enable_init_clocks();
-
- /*
- * Lock DPLL5 and put it in autoidle.
- */
- if (omap_rev() >= OMAP3430_REV_ES2_0)
- omap3_clk_lock_dpll5();
- /* Avoid sleeping during omap2_clk_prepare_for_reboot() */
- /* REVISIT: not yet ready for 343x */
-#if 0
- vclk = clk_get(NULL, "virt_prcm_set");
- sclk = clk_get(NULL, "sys_ck");
-#endif
- return 0;
-}
-
-#endif
diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h
index 8fe1bcb23dd..9a2c07eac9a 100644
--- a/arch/arm/mach-omap2/clock34xx.h
+++ b/arch/arm/mach-omap2/clock34xx.h
@@ -1,2993 +1,24 @@
/*
- * OMAP3 clock framework
+ * OMAP3 clock function prototypes and macros
*
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2008 Nokia Corporation
- *
- * Written by Paul Walmsley
- * With many device clock fixes by Kevin Hilman and Jouni Högander
- * DPLL bypass clock support added by Roman Tereshonkov
- *
- */
-
-/*
- * Virtual clocks are introduced as convenient tools.
- * They are sources for other clocks and not supposed
- * to be requested from drivers directly.
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H
-#define __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H
-
-#include <plat/control.h>
-
-#include "clock.h"
-#include "cm.h"
-#include "cm-regbits-34xx.h"
-#include "prm.h"
-#include "prm-regbits-34xx.h"
-
-#define OMAP_CM_REGADDR OMAP34XX_CM_REGADDR
-
-static unsigned long omap3_dpll_recalc(struct clk *clk);
-static unsigned long omap3_clkoutx2_recalc(struct clk *clk);
-static void omap3_dpll_allow_idle(struct clk *clk);
-static void omap3_dpll_deny_idle(struct clk *clk);
-static u32 omap3_dpll_autoidle_read(struct clk *clk);
-static int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate);
-static int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate);
-static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate);
-
-/* Maximum DPLL multiplier, divider values for OMAP3 */
-#define OMAP3_MAX_DPLL_MULT 2048
-#define OMAP3_MAX_DPLL_DIV 128
-
-/*
- * DPLL1 supplies clock to the MPU.
- * DPLL2 supplies clock to the IVA2.
- * DPLL3 supplies CORE domain clocks.
- * DPLL4 supplies peripheral clocks.
- * DPLL5 supplies other peripheral clocks (USBHOST, USIM).
- */
-
-/* Forward declarations for DPLL bypass clocks */
-static struct clk dpll1_fck;
-static struct clk dpll2_fck;
-
-/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */
-#define DPLL_LOW_POWER_STOP 0x1
-#define DPLL_LOW_POWER_BYPASS 0x5
-#define DPLL_LOCKED 0x7
-
-/* PRM CLOCKS */
-
-/* According to timer32k.c, this is a 32768Hz clock, not a 32000Hz clock. */
-static struct clk omap_32k_fck = {
- .name = "omap_32k_fck",
- .ops = &clkops_null,
- .rate = 32768,
- .flags = RATE_FIXED,
-};
-
-static struct clk secure_32k_fck = {
- .name = "secure_32k_fck",
- .ops = &clkops_null,
- .rate = 32768,
- .flags = RATE_FIXED,
-};
-
-/* Virtual source clocks for osc_sys_ck */
-static struct clk virt_12m_ck = {
- .name = "virt_12m_ck",
- .ops = &clkops_null,
- .rate = 12000000,
- .flags = RATE_FIXED,
-};
-
-static struct clk virt_13m_ck = {
- .name = "virt_13m_ck",
- .ops = &clkops_null,
- .rate = 13000000,
- .flags = RATE_FIXED,
-};
-
-static struct clk virt_16_8m_ck = {
- .name = "virt_16_8m_ck",
- .ops = &clkops_null,
- .rate = 16800000,
- .flags = RATE_FIXED,
-};
-
-static struct clk virt_19_2m_ck = {
- .name = "virt_19_2m_ck",
- .ops = &clkops_null,
- .rate = 19200000,
- .flags = RATE_FIXED,
-};
-
-static struct clk virt_26m_ck = {
- .name = "virt_26m_ck",
- .ops = &clkops_null,
- .rate = 26000000,
- .flags = RATE_FIXED,
-};
-
-static struct clk virt_38_4m_ck = {
- .name = "virt_38_4m_ck",
- .ops = &clkops_null,
- .rate = 38400000,
- .flags = RATE_FIXED,
-};
-
-static const struct clksel_rate osc_sys_12m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_13m_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_16_8m_rates[] = {
- { .div = 1, .val = 5, .flags = RATE_IN_3430ES2 | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_19_2m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_26m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate osc_sys_38_4m_rates[] = {
- { .div = 1, .val = 4, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel osc_sys_clksel[] = {
- { .parent = &virt_12m_ck, .rates = osc_sys_12m_rates },
- { .parent = &virt_13m_ck, .rates = osc_sys_13m_rates },
- { .parent = &virt_16_8m_ck, .rates = osc_sys_16_8m_rates },
- { .parent = &virt_19_2m_ck, .rates = osc_sys_19_2m_rates },
- { .parent = &virt_26m_ck, .rates = osc_sys_26m_rates },
- { .parent = &virt_38_4m_ck, .rates = osc_sys_38_4m_rates },
- { .parent = NULL },
-};
-
-/* Oscillator clock */
-/* 12, 13, 16.8, 19.2, 26, or 38.4 MHz */
-static struct clk osc_sys_ck = {
- .name = "osc_sys_ck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP3430_PRM_CLKSEL,
- .clksel_mask = OMAP3430_SYS_CLKIN_SEL_MASK,
- .clksel = osc_sys_clksel,
- /* REVISIT: deal with autoextclkmode? */
- .flags = RATE_FIXED,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate div2_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 0 }
-};
-
-static const struct clksel sys_clksel[] = {
- { .parent = &osc_sys_ck, .rates = div2_rates },
- { .parent = NULL }
-};
-
-/* Latency: this clock is only enabled after PRM_CLKSETUP.SETUP_TIME */
-/* Feeds DPLLs - divided first by PRM_CLKSRC_CTRL.SYSCLKDIV? */
-static struct clk sys_ck = {
- .name = "sys_ck",
- .ops = &clkops_null,
- .parent = &osc_sys_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP3430_PRM_CLKSRC_CTRL,
- .clksel_mask = OMAP_SYSCLKDIV_MASK,
- .clksel = sys_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk sys_altclk = {
- .name = "sys_altclk",
- .ops = &clkops_null,
-};
-
-/* Optional external clock input for some McBSPs */
-static struct clk mcbsp_clks = {
- .name = "mcbsp_clks",
- .ops = &clkops_null,
-};
-
-/* PRM EXTERNAL CLOCK OUTPUT */
-
-static struct clk sys_clkout1 = {
- .name = "sys_clkout1",
- .ops = &clkops_omap2_dflt,
- .parent = &osc_sys_ck,
- .enable_reg = OMAP3430_PRM_CLKOUT_CTRL,
- .enable_bit = OMAP3430_CLKOUT_EN_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* DPLLS */
-
-/* CM CLOCKS */
-
-static const struct clksel_rate div16_dpll_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 3, .val = 3, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 5, .val = 5, .flags = RATE_IN_343X },
- { .div = 6, .val = 6, .flags = RATE_IN_343X },
- { .div = 7, .val = 7, .flags = RATE_IN_343X },
- { .div = 8, .val = 8, .flags = RATE_IN_343X },
- { .div = 9, .val = 9, .flags = RATE_IN_343X },
- { .div = 10, .val = 10, .flags = RATE_IN_343X },
- { .div = 11, .val = 11, .flags = RATE_IN_343X },
- { .div = 12, .val = 12, .flags = RATE_IN_343X },
- { .div = 13, .val = 13, .flags = RATE_IN_343X },
- { .div = 14, .val = 14, .flags = RATE_IN_343X },
- { .div = 15, .val = 15, .flags = RATE_IN_343X },
- { .div = 16, .val = 16, .flags = RATE_IN_343X },
- { .div = 0 }
-};
-
-/* DPLL1 */
-/* MPU clock source */
-/* Type: DPLL */
-static struct dpll_data dpll1_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
- .mult_mask = OMAP3430_MPU_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_MPU_DPLL_DIV_MASK,
- .clk_bypass = &dpll1_fck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430_MPU_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
- .enable_mask = OMAP3430_EN_MPU_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
- .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_MPU_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
- .autoidle_mask = OMAP3430_AUTO_MPU_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
- .idlest_mask = OMAP3430_ST_MPU_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
- .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
-};
-
-static struct clk dpll1_ck = {
- .name = "dpll1_ck",
- .ops = &clkops_null,
- .parent = &sys_ck,
- .dpll_data = &dpll1_dd,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
- .clkdm_name = "dpll1_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-/*
- * This virtual clock provides the CLKOUTX2 output from the DPLL if the
- * DPLL isn't bypassed.
- */
-static struct clk dpll1_x2_ck = {
- .name = "dpll1_x2_ck",
- .ops = &clkops_null,
- .parent = &dpll1_ck,
- .clkdm_name = "dpll1_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/* On DPLL1, unlike other DPLLs, the divider is downstream from CLKOUTX2 */
-static const struct clksel div16_dpll1_x2m2_clksel[] = {
- { .parent = &dpll1_x2_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-/*
- * Does not exist in the TRM - needed to separate the M2 divider from
- * bypass selection in mpu_ck
- */
-static struct clk dpll1_x2m2_ck = {
- .name = "dpll1_x2m2_ck",
- .ops = &clkops_null,
- .parent = &dpll1_x2_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
- .clksel_mask = OMAP3430_MPU_DPLL_CLKOUT_DIV_MASK,
- .clksel = div16_dpll1_x2m2_clksel,
- .clkdm_name = "dpll1_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* DPLL2 */
-/* IVA2 clock source */
-/* Type: DPLL */
-
-static struct dpll_data dpll2_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
- .mult_mask = OMAP3430_IVA2_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_IVA2_DPLL_DIV_MASK,
- .clk_bypass = &dpll2_fck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430_IVA2_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
- .enable_mask = OMAP3430_EN_IVA2_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
- (1 << DPLL_LOW_POWER_BYPASS),
- .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
- .autoidle_mask = OMAP3430_AUTO_IVA2_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
- .idlest_mask = OMAP3430_ST_IVA2_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
- .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
-};
-
-static struct clk dpll2_ck = {
- .name = "dpll2_ck",
- .ops = &clkops_noncore_dpll_ops,
- .parent = &sys_ck,
- .dpll_data = &dpll2_dd,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
- .clkdm_name = "dpll2_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-static const struct clksel div16_dpll2_m2x2_clksel[] = {
- { .parent = &dpll2_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-/*
- * The TRM is conflicted on whether IVA2 clock comes from DPLL2 CLKOUT
- * or CLKOUTX2. CLKOUT seems most plausible.
- */
-static struct clk dpll2_m2_ck = {
- .name = "dpll2_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll2_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD,
- OMAP3430_CM_CLKSEL2_PLL),
- .clksel_mask = OMAP3430_IVA2_DPLL_CLKOUT_DIV_MASK,
- .clksel = div16_dpll2_m2x2_clksel,
- .clkdm_name = "dpll2_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/*
- * DPLL3
- * Source clock for all interfaces and for some device fclks
- * REVISIT: Also supports fast relock bypass - not included below
- */
-static struct dpll_data dpll3_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .mult_mask = OMAP3430_CORE_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_CORE_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430_CORE_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_mask = OMAP3430_EN_CORE_DPLL_MASK,
- .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_CORE_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
- .autoidle_mask = OMAP3430_AUTO_CORE_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
- .idlest_mask = OMAP3430_ST_CORE_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
- .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
-};
-
-static struct clk dpll3_ck = {
- .name = "dpll3_ck",
- .ops = &clkops_null,
- .parent = &sys_ck,
- .dpll_data = &dpll3_dd,
- .round_rate = &omap2_dpll_round_rate,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-/*
- * This virtual clock provides the CLKOUTX2 output from the DPLL if the
- * DPLL isn't bypassed
- */
-static struct clk dpll3_x2_ck = {
- .name = "dpll3_x2_ck",
- .ops = &clkops_null,
- .parent = &dpll3_ck,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel_rate div31_dpll3_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 3, .val = 3, .flags = RATE_IN_3430ES2 },
- { .div = 4, .val = 4, .flags = RATE_IN_3430ES2 },
- { .div = 5, .val = 5, .flags = RATE_IN_3430ES2 },
- { .div = 6, .val = 6, .flags = RATE_IN_3430ES2 },
- { .div = 7, .val = 7, .flags = RATE_IN_3430ES2 },
- { .div = 8, .val = 8, .flags = RATE_IN_3430ES2 },
- { .div = 9, .val = 9, .flags = RATE_IN_3430ES2 },
- { .div = 10, .val = 10, .flags = RATE_IN_3430ES2 },
- { .div = 11, .val = 11, .flags = RATE_IN_3430ES2 },
- { .div = 12, .val = 12, .flags = RATE_IN_3430ES2 },
- { .div = 13, .val = 13, .flags = RATE_IN_3430ES2 },
- { .div = 14, .val = 14, .flags = RATE_IN_3430ES2 },
- { .div = 15, .val = 15, .flags = RATE_IN_3430ES2 },
- { .div = 16, .val = 16, .flags = RATE_IN_3430ES2 },
- { .div = 17, .val = 17, .flags = RATE_IN_3430ES2 },
- { .div = 18, .val = 18, .flags = RATE_IN_3430ES2 },
- { .div = 19, .val = 19, .flags = RATE_IN_3430ES2 },
- { .div = 20, .val = 20, .flags = RATE_IN_3430ES2 },
- { .div = 21, .val = 21, .flags = RATE_IN_3430ES2 },
- { .div = 22, .val = 22, .flags = RATE_IN_3430ES2 },
- { .div = 23, .val = 23, .flags = RATE_IN_3430ES2 },
- { .div = 24, .val = 24, .flags = RATE_IN_3430ES2 },
- { .div = 25, .val = 25, .flags = RATE_IN_3430ES2 },
- { .div = 26, .val = 26, .flags = RATE_IN_3430ES2 },
- { .div = 27, .val = 27, .flags = RATE_IN_3430ES2 },
- { .div = 28, .val = 28, .flags = RATE_IN_3430ES2 },
- { .div = 29, .val = 29, .flags = RATE_IN_3430ES2 },
- { .div = 30, .val = 30, .flags = RATE_IN_3430ES2 },
- { .div = 31, .val = 31, .flags = RATE_IN_3430ES2 },
- { .div = 0 },
-};
-
-static const struct clksel div31_dpll3m2_clksel[] = {
- { .parent = &dpll3_ck, .rates = div31_dpll3_rates },
- { .parent = NULL }
-};
-
-/* DPLL3 output M2 - primary control point for CORE speed */
-static struct clk dpll3_m2_ck = {
- .name = "dpll3_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll3_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CORE_DPLL_CLKOUT_DIV_MASK,
- .clksel = div31_dpll3m2_clksel,
- .clkdm_name = "dpll3_clkdm",
- .round_rate = &omap2_clksel_round_rate,
- .set_rate = &omap3_core_dpll_m2_set_rate,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk core_ck = {
- .name = "core_ck",
- .ops = &clkops_null,
- .parent = &dpll3_m2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dpll3_m2x2_ck = {
- .name = "dpll3_m2x2_ck",
- .ops = &clkops_null,
- .parent = &dpll3_m2_ck,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static const struct clksel div16_dpll3_clksel[] = {
- { .parent = &dpll3_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-/* This virtual clock is the source for dpll3_m3x2_ck */
-static struct clk dpll3_m3_ck = {
- .name = "dpll3_m3_ck",
- .ops = &clkops_null,
- .parent = &dpll3_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_DIV_DPLL3_MASK,
- .clksel = div16_dpll3_clksel,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll3_m3x2_ck = {
- .name = "dpll3_m3x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll3_m3_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_EMU_CORE_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static struct clk emu_core_alwon_ck = {
- .name = "emu_core_alwon_ck",
- .ops = &clkops_null,
- .parent = &dpll3_m3x2_ck,
- .clkdm_name = "dpll3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* DPLL4 */
-/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
-/* Type: DPLL */
-static struct dpll_data dpll4_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
- .mult_mask = OMAP3430_PERIPH_DPLL_MULT_MASK,
- .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
- .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
- .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
- .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
- .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
-};
-
-static struct clk dpll4_ck = {
- .name = "dpll4_ck",
- .ops = &clkops_noncore_dpll_ops,
- .parent = &sys_ck,
- .dpll_data = &dpll4_dd,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_dpll4_set_rate,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-/*
- * This virtual clock provides the CLKOUTX2 output from the DPLL if the
- * DPLL isn't bypassed --
- * XXX does this serve any downstream clocks?
- */
-static struct clk dpll4_x2_ck = {
- .name = "dpll4_x2_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel div16_dpll4_clksel[] = {
- { .parent = &dpll4_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-/* This virtual clock is the source for dpll4_m2x2_ck */
-static struct clk dpll4_m2_ck = {
- .name = "dpll4_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
- .clksel_mask = OMAP3430_DIV_96M_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m2x2_ck = {
- .name = "dpll4_m2x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m2_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_96M_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/*
- * DPLL4 generates DPLL4_M2X2_CLK which is then routed into the PRM as
- * PRM_96M_ALWON_(F)CLK. Two clocks then emerge from the PRM:
- * 96M_ALWON_FCLK (called "omap_96m_alwon_fck" below) and
- * CM_96K_(F)CLK.
- */
-static struct clk omap_96m_alwon_fck = {
- .name = "omap_96m_alwon_fck",
- .ops = &clkops_null,
- .parent = &dpll4_m2x2_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk cm_96m_fck = {
- .name = "cm_96m_fck",
- .ops = &clkops_null,
- .parent = &omap_96m_alwon_fck,
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel_rate omap_96m_dpll_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate omap_96m_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel omap_96m_fck_clksel[] = {
- { .parent = &cm_96m_fck, .rates = omap_96m_dpll_rates },
- { .parent = &sys_ck, .rates = omap_96m_sys_rates },
- { .parent = NULL }
-};
-
-static struct clk omap_96m_fck = {
- .name = "omap_96m_fck",
- .ops = &clkops_null,
- .parent = &sys_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_SOURCE_96M_MASK,
- .clksel = omap_96m_fck_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* This virtual clock is the source for dpll4_m3x2_ck */
-static struct clk dpll4_m3_ck = {
- .name = "dpll4_m3_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_TV_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m3x2_ck = {
- .name = "dpll4_m3x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m3_ck,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_TV_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static const struct clksel_rate omap_54m_d4m3x2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate omap_54m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel omap_54m_clksel[] = {
- { .parent = &dpll4_m3x2_ck, .rates = omap_54m_d4m3x2_rates },
- { .parent = &sys_altclk, .rates = omap_54m_alt_rates },
- { .parent = NULL }
-};
-
-static struct clk omap_54m_fck = {
- .name = "omap_54m_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_SOURCE_54M_MASK,
- .clksel = omap_54m_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate omap_48m_cm96m_rates[] = {
- { .div = 2, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate omap_48m_alt_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel omap_48m_clksel[] = {
- { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
- { .parent = &sys_altclk, .rates = omap_48m_alt_rates },
- { .parent = NULL }
-};
-
-static struct clk omap_48m_fck = {
- .name = "omap_48m_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_SOURCE_48M_MASK,
- .clksel = omap_48m_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk omap_12m_fck = {
- .name = "omap_12m_fck",
- .ops = &clkops_null,
- .parent = &omap_48m_fck,
- .fixed_div = 4,
- .recalc = &omap2_fixed_divisor_recalc,
-};
-
-/* This virstual clock is the source for dpll4_m4x2_ck */
-static struct clk dpll4_m4_ck = {
- .name = "dpll4_m4_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_DSS1_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
- .set_rate = &omap2_clksel_set_rate,
- .round_rate = &omap2_clksel_round_rate,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m4x2_ck = {
- .name = "dpll4_m4x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m4_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_CAM_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/* This virtual clock is the source for dpll4_m5x2_ck */
-static struct clk dpll4_m5_ck = {
- .name = "dpll4_m5_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_CAM_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m5x2_ck = {
- .name = "dpll4_m5x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m5_ck,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_CAM_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-/* This virtual clock is the source for dpll4_m6x2_ck */
-static struct clk dpll4_m6_ck = {
- .name = "dpll4_m6_ck",
- .ops = &clkops_null,
- .parent = &dpll4_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_DIV_DPLL4_MASK,
- .clksel = div16_dpll4_clksel,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* The PWRDN bit is apparently only available on 3430ES2 and above */
-static struct clk dpll4_m6x2_ck = {
- .name = "dpll4_m6x2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll4_m6_ck,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
- .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
- .flags = INVERT_ENABLE,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &omap3_clkoutx2_recalc,
-};
-
-static struct clk emu_per_alwon_ck = {
- .name = "emu_per_alwon_ck",
- .ops = &clkops_null,
- .parent = &dpll4_m6x2_ck,
- .clkdm_name = "dpll4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* DPLL5 */
-/* Supplies 120MHz clock, USIM source clock */
-/* Type: DPLL */
-/* 3430ES2 only */
-static struct dpll_data dpll5_dd = {
- .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
- .mult_mask = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
- .div1_mask = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
- .clk_bypass = &sys_ck,
- .clk_ref = &sys_ck,
- .freqsel_mask = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
- .control_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
- .enable_mask = OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
- .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
- .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
- .recal_en_bit = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
- .recal_st_bit = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
- .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
- .autoidle_mask = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
- .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
- .idlest_mask = OMAP3430ES2_ST_PERIPH2_CLK_MASK,
- .max_multiplier = OMAP3_MAX_DPLL_MULT,
- .min_divider = 1,
- .max_divider = OMAP3_MAX_DPLL_DIV,
- .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
-};
-
-static struct clk dpll5_ck = {
- .name = "dpll5_ck",
- .ops = &clkops_noncore_dpll_ops,
- .parent = &sys_ck,
- .dpll_data = &dpll5_dd,
- .round_rate = &omap2_dpll_round_rate,
- .set_rate = &omap3_noncore_dpll_set_rate,
- .clkdm_name = "dpll5_clkdm",
- .recalc = &omap3_dpll_recalc,
-};
-
-static const struct clksel div16_dpll5_clksel[] = {
- { .parent = &dpll5_ck, .rates = div16_dpll_rates },
- { .parent = NULL }
-};
-
-static struct clk dpll5_m2_ck = {
- .name = "dpll5_m2_ck",
- .ops = &clkops_null,
- .parent = &dpll5_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
- .clksel_mask = OMAP3430ES2_DIV_120M_MASK,
- .clksel = div16_dpll5_clksel,
- .clkdm_name = "dpll5_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* CM EXTERNAL CLOCK OUTPUTS */
-
-static const struct clksel_rate clkout2_src_core_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_sys_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_96m_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_54m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel clkout2_src_clksel[] = {
- { .parent = &core_ck, .rates = clkout2_src_core_rates },
- { .parent = &sys_ck, .rates = clkout2_src_sys_rates },
- { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
- { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
- { .parent = NULL }
-};
-
-static struct clk clkout2_src_ck = {
- .name = "clkout2_src_ck",
- .ops = &clkops_omap2_dflt,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP3430_CM_CLKOUT_CTRL,
- .enable_bit = OMAP3430_CLKOUT2_EN_SHIFT,
- .clksel_reg = OMAP3430_CM_CLKOUT_CTRL,
- .clksel_mask = OMAP3430_CLKOUT2SOURCE_MASK,
- .clksel = clkout2_src_clksel,
- .clkdm_name = "core_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate sys_clkout2_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 1, .flags = RATE_IN_343X },
- { .div = 4, .val = 2, .flags = RATE_IN_343X },
- { .div = 8, .val = 3, .flags = RATE_IN_343X },
- { .div = 16, .val = 4, .flags = RATE_IN_343X },
- { .div = 0 },
-};
-
-static const struct clksel sys_clkout2_clksel[] = {
- { .parent = &clkout2_src_ck, .rates = sys_clkout2_rates },
- { .parent = NULL },
-};
-
-static struct clk sys_clkout2 = {
- .name = "sys_clkout2",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP3430_CM_CLKOUT_CTRL,
- .clksel_mask = OMAP3430_CLKOUT2_DIV_MASK,
- .clksel = sys_clkout2_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* CM OUTPUT CLOCKS */
-
-static struct clk corex2_fck = {
- .name = "corex2_fck",
- .ops = &clkops_null,
- .parent = &dpll3_m2x2_ck,
- .recalc = &followparent_recalc,
-};
-
-/* DPLL power domain clock controls */
-
-static const struct clksel_rate div4_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 0 }
-};
-
-static const struct clksel div4_core_clksel[] = {
- { .parent = &core_ck, .rates = div4_rates },
- { .parent = NULL }
-};
-
-/*
- * REVISIT: Are these in DPLL power domain or CM power domain? docs
- * may be inconsistent here?
- */
-static struct clk dpll1_fck = {
- .name = "dpll1_fck",
- .ops = &clkops_null,
- .parent = &core_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
- .clksel_mask = OMAP3430_MPU_CLK_SRC_MASK,
- .clksel = div4_core_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mpu_ck = {
- .name = "mpu_ck",
- .ops = &clkops_null,
- .parent = &dpll1_x2m2_ck,
- .clkdm_name = "mpu_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* arm_fck is divided by two when DPLL1 locked; otherwise, passthrough mpu_ck */
-static const struct clksel_rate arm_fck_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 1, .flags = RATE_IN_343X },
- { .div = 0 },
-};
-
-static const struct clksel arm_fck_clksel[] = {
- { .parent = &mpu_ck, .rates = arm_fck_rates },
- { .parent = NULL }
-};
-
-static struct clk arm_fck = {
- .name = "arm_fck",
- .ops = &clkops_null,
- .parent = &mpu_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
- .clksel_mask = OMAP3430_ST_MPU_CLK_MASK,
- .clksel = arm_fck_clksel,
- .clkdm_name = "mpu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* XXX What about neon_clkdm ? */
-
-/*
- * REVISIT: This clock is never specifically defined in the 3430 TRM,
- * although it is referenced - so this is a guess
- */
-static struct clk emu_mpu_alwon_ck = {
- .name = "emu_mpu_alwon_ck",
- .ops = &clkops_null,
- .parent = &mpu_ck,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dpll2_fck = {
- .name = "dpll2_fck",
- .ops = &clkops_null,
- .parent = &core_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
- .clksel_mask = OMAP3430_IVA2_CLK_SRC_MASK,
- .clksel = div4_core_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk iva2_ck = {
- .name = "iva2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &dpll2_m2_ck,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
- .clkdm_name = "iva2_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* Common interface clocks */
-
-static const struct clksel div2_core_clksel[] = {
- { .parent = &core_ck, .rates = div2_rates },
- { .parent = NULL }
-};
-
-static struct clk l3_ick = {
- .name = "l3_ick",
- .ops = &clkops_null,
- .parent = &core_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_L3_MASK,
- .clksel = div2_core_clksel,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel div2_l3_clksel[] = {
- { .parent = &l3_ick, .rates = div2_rates },
- { .parent = NULL }
-};
-
-static struct clk l4_ick = {
- .name = "l4_ick",
- .ops = &clkops_null,
- .parent = &l3_ick,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_L4_MASK,
- .clksel = div2_l3_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-
-};
-
-static const struct clksel div2_l4_clksel[] = {
- { .parent = &l4_ick, .rates = div2_rates },
- { .parent = NULL }
-};
-
-static struct clk rm_ick = {
- .name = "rm_ick",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_RM_MASK,
- .clksel = div2_l4_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* GFX power domain */
-
-/* GFX clocks are in 3430ES1 only. 3430ES2 and later uses the SGX instead */
-
-static const struct clksel gfx_l3_clksel[] = {
- { .parent = &l3_ick, .rates = gfx_l3_rates },
- { .parent = NULL }
-};
-
-/* Virtual parent clock for gfx_l3_ick and gfx_l3_fck */
-static struct clk gfx_l3_ck = {
- .name = "gfx_l3_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &l3_ick,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
- .enable_bit = OMAP_EN_GFX_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk gfx_l3_fck = {
- .name = "gfx_l3_fck",
- .ops = &clkops_null,
- .parent = &gfx_l3_ck,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
- .clksel_mask = OMAP_CLKSEL_GFX_MASK,
- .clksel = gfx_l3_clksel,
- .clkdm_name = "gfx_3430es1_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gfx_l3_ick = {
- .name = "gfx_l3_ick",
- .ops = &clkops_null,
- .parent = &gfx_l3_ck,
- .clkdm_name = "gfx_3430es1_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gfx_cg1_ck = {
- .name = "gfx_cg1_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &gfx_l3_fck, /* REVISIT: correct? */
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES1_EN_2D_SHIFT,
- .clkdm_name = "gfx_3430es1_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gfx_cg2_ck = {
- .name = "gfx_cg2_ck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &gfx_l3_fck, /* REVISIT: correct? */
- .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES1_EN_3D_SHIFT,
- .clkdm_name = "gfx_3430es1_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* SGX power domain - 3430ES2 only */
-
-static const struct clksel_rate sgx_core_rates[] = {
- { .div = 3, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 4, .val = 1, .flags = RATE_IN_343X },
- { .div = 6, .val = 2, .flags = RATE_IN_343X },
- { .div = 0 },
-};
-
-static const struct clksel_rate sgx_96m_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 },
-};
-
-static const struct clksel sgx_clksel[] = {
- { .parent = &core_ck, .rates = sgx_core_rates },
- { .parent = &cm_96m_fck, .rates = sgx_96m_rates },
- { .parent = NULL },
-};
-
-static struct clk sgx_fck = {
- .name = "sgx_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430ES2_CLKSEL_SGX_MASK,
- .clksel = sgx_clksel,
- .clkdm_name = "sgx_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk sgx_ick = {
- .name = "sgx_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &l3_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
- .clkdm_name = "sgx_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* CORE power domain */
-
-static struct clk d2d_26m_fck = {
- .name = "d2d_26m_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430ES1_EN_D2D_SHIFT,
- .clkdm_name = "d2d_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk modem_fck = {
- .name = "modem_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MODEM_SHIFT,
- .clkdm_name = "d2d_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk sad2d_ick = {
- .name = "sad2d_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SAD2D_SHIFT,
- .clkdm_name = "d2d_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mad2d_ick = {
- .name = "mad2d_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP3430_EN_MAD2D_SHIFT,
- .clkdm_name = "d2d_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel omap343x_gpt_clksel[] = {
- { .parent = &omap_32k_fck, .rates = gpt_32k_rates },
- { .parent = &sys_ck, .rates = gpt_sys_rates },
- { .parent = NULL}
-};
-
-static struct clk gpt10_fck = {
- .name = "gpt10_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_GPT10_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT10_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt11_fck = {
- .name = "gpt11_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_GPT11_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT11_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk cpefuse_fck = {
- .name = "cpefuse_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
- .enable_bit = OMAP3430ES2_EN_CPEFUSE_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk ts_fck = {
- .name = "ts_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &omap_32k_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
- .enable_bit = OMAP3430ES2_EN_TS_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbtll_fck = {
- .name = "usbtll_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &dpll5_m2_ck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
- .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* CORE 96M FCLK-derived clocks */
-
-static struct clk core_96m_fck = {
- .name = "core_96m_fck",
- .ops = &clkops_null,
- .parent = &omap_96m_fck,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs3_fck = {
- .name = "mmchs_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 2,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs2_fck = {
- .name = "mmchs_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 1,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MMC2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mspro_fck = {
- .name = "mspro_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs1_fck = {
- .name = "mmchs_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MMC1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c3_fck = {
- .name = "i2c_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 3,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_I2C3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c2_fck = {
- .name = "i2c_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 2,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_I2C2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c1_fck = {
- .name = "i2c_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 1,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_I2C1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/*
- * MCBSP 1 & 5 get their 96MHz clock from core_96m_fck;
- * MCBSP 2, 3, 4 get their 96MHz clock from per_96m_fck.
- */
-static const struct clksel_rate common_mcbsp_96m_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 }
-};
-
-static const struct clksel mcbsp_15_clksel[] = {
- { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
- { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
- { .parent = NULL }
-};
-
-static struct clk mcbsp5_fck = {
- .name = "mcbsp_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 5,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCBSP5_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP5_CLKS_MASK,
- .clksel = mcbsp_15_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp1_fck = {
- .name = "mcbsp_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 1,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCBSP1_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
- .clksel_mask = OMAP2_MCBSP1_CLKS_MASK,
- .clksel = mcbsp_15_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* CORE_48M_FCK-derived clocks */
-
-static struct clk core_48m_fck = {
- .name = "core_48m_fck",
- .ops = &clkops_null,
- .parent = &omap_48m_fck,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi4_fck = {
- .name = "mcspi_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 4,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi3_fck = {
- .name = "mcspi_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 3,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi2_fck = {
- .name = "mcspi_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 2,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi1_fck = {
- .name = "mcspi_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 1,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart2_fck = {
- .name = "uart2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_UART2_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart1_fck = {
- .name = "uart1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_UART1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk fshostusb_fck = {
- .name = "fshostusb_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* CORE_12M_FCK based clocks */
-
-static struct clk core_12m_fck = {
- .name = "core_12m_fck",
- .ops = &clkops_null,
- .parent = &omap_12m_fck,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk hdq_fck = {
- .name = "hdq_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_12m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_HDQ_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* DPLL3-derived clock */
-
-static const struct clksel_rate ssi_ssr_corex2_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 3, .val = 3, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 6, .val = 6, .flags = RATE_IN_343X },
- { .div = 8, .val = 8, .flags = RATE_IN_343X },
- { .div = 0 }
-};
-
-static const struct clksel ssi_ssr_clksel[] = {
- { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
- { .parent = NULL }
-};
-
-static struct clk ssi_ssr_fck_3430es1 = {
- .name = "ssi_ssr_fck",
- .ops = &clkops_omap2_dflt,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_SSI_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_SSI_MASK,
- .clksel = ssi_ssr_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk ssi_ssr_fck_3430es2 = {
- .name = "ssi_ssr_fck",
- .ops = &clkops_omap3430es2_ssi_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_SSI_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_SSI_MASK,
- .clksel = ssi_ssr_clksel,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk ssi_sst_fck_3430es1 = {
- .name = "ssi_sst_fck",
- .ops = &clkops_null,
- .parent = &ssi_ssr_fck_3430es1,
- .fixed_div = 2,
- .recalc = &omap2_fixed_divisor_recalc,
-};
-
-static struct clk ssi_sst_fck_3430es2 = {
- .name = "ssi_sst_fck",
- .ops = &clkops_null,
- .parent = &ssi_ssr_fck_3430es2,
- .fixed_div = 2,
- .recalc = &omap2_fixed_divisor_recalc,
-};
-
-
-
-/* CORE_L3_ICK based clocks */
-
-/*
- * XXX must add clk_enable/clk_disable for these if standard code won't
- * handle it
- */
-static struct clk core_l3_ick = {
- .name = "core_l3_ick",
- .ops = &clkops_null,
- .parent = &l3_ick,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk hsotgusb_ick_3430es1 = {
- .name = "hsotgusb_ick",
- .ops = &clkops_omap2_dflt,
- .parent = &core_l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk hsotgusb_ick_3430es2 = {
- .name = "hsotgusb_ick",
- .ops = &clkops_omap3430es2_hsotgusb_wait,
- .parent = &core_l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk sdrc_ick = {
- .name = "sdrc_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SDRC_SHIFT,
- .flags = ENABLE_ON_INIT,
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpmc_fck = {
- .name = "gpmc_fck",
- .ops = &clkops_null,
- .parent = &core_l3_ick,
- .flags = ENABLE_ON_INIT, /* huh? */
- .clkdm_name = "core_l3_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* SECURITY_L3_ICK based clocks */
-
-static struct clk security_l3_ick = {
- .name = "security_l3_ick",
- .ops = &clkops_null,
- .parent = &l3_ick,
- .recalc = &followparent_recalc,
-};
-
-static struct clk pka_ick = {
- .name = "pka_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &security_l3_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_PKA_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* CORE_L4_ICK based clocks */
-
-static struct clk core_l4_ick = {
- .name = "core_l4_ick",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbtll_ick = {
- .name = "usbtll_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
- .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs3_ick = {
- .name = "mmchs_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 2,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* Intersystem Communication Registers - chassis mode only */
-static struct clk icr_ick = {
- .name = "icr_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_ICR_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk aes2_ick = {
- .name = "aes2_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_AES2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk sha12_ick = {
- .name = "sha12_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SHA12_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk des2_ick = {
- .name = "des2_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_DES2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs2_ick = {
- .name = "mmchs_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 1,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MMC2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mmchs1_ick = {
- .name = "mmchs_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MMC1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mspro_ick = {
- .name = "mspro_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk hdq_ick = {
- .name = "hdq_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_HDQ_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi4_ick = {
- .name = "mcspi_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 4,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi3_ick = {
- .name = "mcspi_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 3,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi2_ick = {
- .name = "mcspi_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 2,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcspi1_ick = {
- .name = "mcspi_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 1,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c3_ick = {
- .name = "i2c_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 3,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_I2C3_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c2_ick = {
- .name = "i2c_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 2,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_I2C2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk i2c1_ick = {
- .name = "i2c_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 1,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_I2C1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart2_ick = {
- .name = "uart2_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_UART2_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart1_ick = {
- .name = "uart1_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_UART1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt11_ick = {
- .name = "gpt11_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_GPT11_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt10_ick = {
- .name = "gpt10_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_GPT10_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp5_ick = {
- .name = "mcbsp_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 5,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCBSP5_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp1_ick = {
- .name = "mcbsp_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 1,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MCBSP1_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk fac_ick = {
- .name = "fac_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430ES1_EN_FAC_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mailboxes_ick = {
- .name = "mailboxes_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_MAILBOXES_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk omapctrl_ick = {
- .name = "omapctrl_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &core_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_OMAPCTRL_SHIFT,
- .flags = ENABLE_ON_INIT,
- .recalc = &followparent_recalc,
-};
-
-/* SSI_L4_ICK based clocks */
-
-static struct clk ssi_l4_ick = {
- .name = "ssi_l4_ick",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk ssi_ick_3430es1 = {
- .name = "ssi_ick",
- .ops = &clkops_omap2_dflt,
- .parent = &ssi_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SSI_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk ssi_ick_3430es2 = {
- .name = "ssi_ick",
- .ops = &clkops_omap3430es2_ssi_wait,
- .parent = &ssi_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430_EN_SSI_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* REVISIT: Technically the TRM claims that this is CORE_CLK based,
- * but l4_ick makes more sense to me */
-
-static const struct clksel usb_l4_clksel[] = {
- { .parent = &l4_ick, .rates = div2_rates },
- { .parent = NULL },
-};
-
-static struct clk usb_l4_ick = {
- .name = "usb_l4_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &l4_ick,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
- .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
- .clksel = usb_l4_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* SECURITY_L4_ICK2 based clocks */
-
-static struct clk security_l4_ick2 = {
- .name = "security_l4_ick2",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .recalc = &followparent_recalc,
-};
-
-static struct clk aes1_ick = {
- .name = "aes1_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &security_l4_ick2,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_AES1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rng_ick = {
- .name = "rng_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &security_l4_ick2,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_RNG_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk sha11_ick = {
- .name = "sha11_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &security_l4_ick2,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_SHA11_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-static struct clk des1_ick = {
- .name = "des1_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &security_l4_ick2,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
- .enable_bit = OMAP3430_EN_DES1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* DSS */
-static struct clk dss1_alwon_fck_3430es1 = {
- .name = "dss1_alwon_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &dpll4_m4x2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_DSS1_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss1_alwon_fck_3430es2 = {
- .name = "dss1_alwon_fck",
- .ops = &clkops_omap3430es2_dss_usbhost_wait,
- .parent = &dpll4_m4x2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_DSS1_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_tv_fck = {
- .name = "dss_tv_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &omap_54m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_TV_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_96m_fck = {
- .name = "dss_96m_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &omap_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_TV_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss2_alwon_fck = {
- .name = "dss2_alwon_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_DSS2_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_ick_3430es1 = {
- /* Handles both L3 and L4 clocks */
- .name = "dss_ick",
- .ops = &clkops_omap2_dflt,
- .parent = &l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk dss_ick_3430es2 = {
- /* Handles both L3 and L4 clocks */
- .name = "dss_ick",
- .ops = &clkops_omap3430es2_dss_usbhost_wait,
- .parent = &l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
- .clkdm_name = "dss_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* CAM */
-
-static struct clk cam_mclk = {
- .name = "cam_mclk",
- .ops = &clkops_omap2_dflt,
- .parent = &dpll4_m5x2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_CAM_SHIFT,
- .clkdm_name = "cam_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk cam_ick = {
- /* Handles both L3 and L4 clocks */
- .name = "cam_ick",
- .ops = &clkops_omap2_dflt,
- .parent = &l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_CAM_SHIFT,
- .clkdm_name = "cam_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk csi2_96m_fck = {
- .name = "csi2_96m_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &core_96m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_CSI2_SHIFT,
- .clkdm_name = "cam_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* USBHOST - 3430ES2 only */
-
-static struct clk usbhost_120m_fck = {
- .name = "usbhost_120m_fck",
- .ops = &clkops_omap2_dflt,
- .parent = &dpll5_m2_ck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES2_EN_USBHOST2_SHIFT,
- .clkdm_name = "usbhost_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbhost_48m_fck = {
- .name = "usbhost_48m_fck",
- .ops = &clkops_omap3430es2_dss_usbhost_wait,
- .parent = &omap_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES2_EN_USBHOST1_SHIFT,
- .clkdm_name = "usbhost_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk usbhost_ick = {
- /* Handles both L3 and L4 clocks */
- .name = "usbhost_ick",
- .ops = &clkops_omap3430es2_dss_usbhost_wait,
- .parent = &l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430ES2_EN_USBHOST_SHIFT,
- .clkdm_name = "usbhost_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* WKUP */
-
-static const struct clksel_rate usim_96m_rates[] = {
- { .div = 2, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 8, .val = 5, .flags = RATE_IN_343X },
- { .div = 10, .val = 6, .flags = RATE_IN_343X },
- { .div = 0 },
-};
-
-static const struct clksel_rate usim_120m_rates[] = {
- { .div = 4, .val = 7, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 8, .val = 8, .flags = RATE_IN_343X },
- { .div = 16, .val = 9, .flags = RATE_IN_343X },
- { .div = 20, .val = 10, .flags = RATE_IN_343X },
- { .div = 0 },
-};
-
-static const struct clksel usim_clksel[] = {
- { .parent = &omap_96m_fck, .rates = usim_96m_rates },
- { .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
- { .parent = &sys_ck, .rates = div2_rates },
- { .parent = NULL },
-};
-
-/* 3430ES2 only */
-static struct clk usim_fck = {
- .name = "usim_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430ES2_CLKSEL_USIMOCP_MASK,
- .clksel = usim_clksel,
- .recalc = &omap2_clksel_recalc,
-};
-
-/* XXX should gpt1's clksel have wkup_32k_fck as the 32k opt? */
-static struct clk gpt1_fck = {
- .name = "gpt1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT1_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT1_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "wkup_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk wkup_32k_fck = {
- .name = "wkup_32k_fck",
- .ops = &clkops_null,
- .parent = &omap_32k_fck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio1_dbck = {
- .name = "gpio1_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &wkup_32k_fck,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt2_fck = {
- .name = "wdt2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_32k_fck,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_WDT2_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wkup_l4_ick = {
- .name = "wkup_l4_ick",
- .ops = &clkops_null,
- .parent = &sys_ck,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* 3430ES2 only */
-/* Never specifically named in the TRM, so we have to infer a likely name */
-static struct clk usim_ick = {
- .name = "usim_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt2_ick = {
- .name = "wdt2_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_WDT2_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt1_ick = {
- .name = "wdt1_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_WDT1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio1_ick = {
- .name = "gpio1_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk omap_32ksync_ick = {
- .name = "omap_32ksync_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_32KSYNC_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-/* XXX This clock no longer exists in 3430 TRM rev F */
-static struct clk gpt12_ick = {
- .name = "gpt12_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT12_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt1_ick = {
- .name = "gpt1_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &wkup_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT1_SHIFT,
- .clkdm_name = "wkup_clkdm",
- .recalc = &followparent_recalc,
-};
-
-
-
-/* PER clock domain */
-
-static struct clk per_96m_fck = {
- .name = "per_96m_fck",
- .ops = &clkops_null,
- .parent = &omap_96m_alwon_fck,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk per_48m_fck = {
- .name = "per_48m_fck",
- .ops = &clkops_null,
- .parent = &omap_48m_fck,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_fck = {
- .name = "uart3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_UART3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt2_fck = {
- .name = "gpt2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT2_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT2_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt3_fck = {
- .name = "gpt3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT3_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT3_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt4_fck = {
- .name = "gpt4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT4_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT4_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt5_fck = {
- .name = "gpt5_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT5_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT5_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt6_fck = {
- .name = "gpt6_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT6_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT6_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt7_fck = {
- .name = "gpt7_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT7_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT7_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt8_fck = {
- .name = "gpt8_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT8_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT8_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk gpt9_fck = {
- .name = "gpt9_fck",
- .ops = &clkops_omap2_dflt_wait,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPT9_SHIFT,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
- .clksel_mask = OMAP3430_CLKSEL_GPT9_MASK,
- .clksel = omap343x_gpt_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk per_32k_alwon_fck = {
- .name = "per_32k_alwon_fck",
- .ops = &clkops_null,
- .parent = &omap_32k_fck,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio6_dbck = {
- .name = "gpio6_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio5_dbck = {
- .name = "gpio5_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio4_dbck = {
- .name = "gpio4_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio3_dbck = {
- .name = "gpio3_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio2_dbck = {
- .name = "gpio2_dbck",
- .ops = &clkops_omap2_dflt,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt3_fck = {
- .name = "wdt3_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_32k_alwon_fck,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_WDT3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk per_l4_ick = {
- .name = "per_l4_ick",
- .ops = &clkops_null,
- .parent = &l4_ick,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio6_ick = {
- .name = "gpio6_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio5_ick = {
- .name = "gpio5_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio4_ick = {
- .name = "gpio4_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio3_ick = {
- .name = "gpio3_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpio2_ick = {
- .name = "gpio2_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk wdt3_ick = {
- .name = "wdt3_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_WDT3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk uart3_ick = {
- .name = "uart3_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_UART3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt9_ick = {
- .name = "gpt9_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT9_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt8_ick = {
- .name = "gpt8_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT8_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt7_ick = {
- .name = "gpt7_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT7_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt6_ick = {
- .name = "gpt6_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT6_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt5_ick = {
- .name = "gpt5_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT5_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt4_ick = {
- .name = "gpt4_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt3_ick = {
- .name = "gpt3_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk gpt2_ick = {
- .name = "gpt2_ick",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_GPT2_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp2_ick = {
- .name = "mcbsp_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 2,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_MCBSP2_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp3_ick = {
- .name = "mcbsp_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 3,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_MCBSP3_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static struct clk mcbsp4_ick = {
- .name = "mcbsp_ick",
- .ops = &clkops_omap2_dflt_wait,
- .id = 4,
- .parent = &per_l4_ick,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
- .enable_bit = OMAP3430_EN_MCBSP4_SHIFT,
- .clkdm_name = "per_clkdm",
- .recalc = &followparent_recalc,
-};
-
-static const struct clksel mcbsp_234_clksel[] = {
- { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
- { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
- { .parent = NULL }
-};
-
-static struct clk mcbsp2_fck = {
- .name = "mcbsp_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 2,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_MCBSP2_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
- .clksel_mask = OMAP2_MCBSP2_CLKS_MASK,
- .clksel = mcbsp_234_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp3_fck = {
- .name = "mcbsp_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 3,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_MCBSP3_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP3_CLKS_MASK,
- .clksel = mcbsp_234_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk mcbsp4_fck = {
- .name = "mcbsp_fck",
- .ops = &clkops_omap2_dflt_wait,
- .id = 4,
- .init = &omap2_init_clksel_parent,
- .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_MCBSP4_SHIFT,
- .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
- .clksel_mask = OMAP2_MCBSP4_CLKS_MASK,
- .clksel = mcbsp_234_clksel,
- .clkdm_name = "per_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* EMU clocks */
-
-/* More information: ARM Cortex-A8 Technical Reference Manual, sect 10.1 */
-
-static const struct clksel_rate emu_src_sys_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_core_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_per_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_mpu_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 0 },
-};
-
-static const struct clksel emu_src_clksel[] = {
- { .parent = &sys_ck, .rates = emu_src_sys_rates },
- { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
- { .parent = &emu_per_alwon_ck, .rates = emu_src_per_rates },
- { .parent = &emu_mpu_alwon_ck, .rates = emu_src_mpu_rates },
- { .parent = NULL },
-};
-
-/*
- * Like the clkout_src clocks, emu_src_clk is a virtual clock, existing only
- * to switch the source of some of the EMU clocks.
- * XXX Are there CLKEN bits for these EMU clks?
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009 Nokia Corporation
*/
-static struct clk emu_src_ck = {
- .name = "emu_src_ck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_MUX_CTRL_MASK,
- .clksel = emu_src_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate pclk_emu_rates[] = {
- { .div = 2, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 3, .val = 3, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 6, .val = 6, .flags = RATE_IN_343X },
- { .div = 0 },
-};
-
-static const struct clksel pclk_emu_clksel[] = {
- { .parent = &emu_src_ck, .rates = pclk_emu_rates },
- { .parent = NULL },
-};
-
-static struct clk pclk_fck = {
- .name = "pclk_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CLKSEL_PCLK_MASK,
- .clksel = pclk_emu_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate pclkx2_emu_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 3, .val = 3, .flags = RATE_IN_343X },
- { .div = 0 },
-};
-
-static const struct clksel pclkx2_emu_clksel[] = {
- { .parent = &emu_src_ck, .rates = pclkx2_emu_rates },
- { .parent = NULL },
-};
-
-static struct clk pclkx2_fck = {
- .name = "pclkx2_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CLKSEL_PCLKX2_MASK,
- .clksel = pclkx2_emu_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel atclk_emu_clksel[] = {
- { .parent = &emu_src_ck, .rates = div2_rates },
- { .parent = NULL },
-};
-
-static struct clk atclk_fck = {
- .name = "atclk_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CLKSEL_ATCLK_MASK,
- .clksel = atclk_emu_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static struct clk traceclk_src_fck = {
- .name = "traceclk_src_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_TRACE_MUX_CTRL_MASK,
- .clksel = emu_src_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-static const struct clksel_rate traceclk_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
- { .div = 2, .val = 2, .flags = RATE_IN_343X },
- { .div = 4, .val = 4, .flags = RATE_IN_343X },
- { .div = 0 },
-};
-
-static const struct clksel traceclk_clksel[] = {
- { .parent = &traceclk_src_fck, .rates = traceclk_rates },
- { .parent = NULL },
-};
-
-static struct clk traceclk_fck = {
- .name = "traceclk_fck",
- .ops = &clkops_null,
- .init = &omap2_init_clksel_parent,
- .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
- .clksel_mask = OMAP3430_CLKSEL_TRACECLK_MASK,
- .clksel = traceclk_clksel,
- .clkdm_name = "emu_clkdm",
- .recalc = &omap2_clksel_recalc,
-};
-
-/* SR clocks */
-
-/* SmartReflex fclk (VDD1) */
-static struct clk sr1_fck = {
- .name = "sr1_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_SR1_SHIFT,
- .recalc = &followparent_recalc,
-};
-
-/* SmartReflex fclk (VDD2) */
-static struct clk sr2_fck = {
- .name = "sr2_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &sys_ck,
- .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
- .enable_bit = OMAP3430_EN_SR2_SHIFT,
- .recalc = &followparent_recalc,
-};
-static struct clk sr_l4_ick = {
- .name = "sr_l4_ick",
- .ops = &clkops_null, /* RMK: missing? */
- .parent = &l4_ick,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
-};
+#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK_34XX_H
+#define __ARCH_ARM_MACH_OMAP2_CLOCK_34XX_H
-/* SECURE_32K_FCK clocks */
+int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate);
+int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate);
+void omap3_clk_lock_dpll5(void);
-static struct clk gpt12_fck = {
- .name = "gpt12_fck",
- .ops = &clkops_null,
- .parent = &secure_32k_fck,
- .recalc = &followparent_recalc,
-};
+extern struct clk *sdrc_ick_p;
+extern struct clk *arm_fck_p;
-static struct clk wdt1_fck = {
- .name = "wdt1_fck",
- .ops = &clkops_null,
- .parent = &secure_32k_fck,
- .recalc = &followparent_recalc,
-};
+/* OMAP34xx-specific clkops */
+extern const struct clkops clkops_omap3430es2_ssi_wait;
+extern const struct clkops clkops_omap3430es2_hsotgusb_wait;
+extern const struct clkops clkops_omap3430es2_dss_usbhost_wait;
+extern const struct clkops clkops_noncore_dpll_ops;
#endif
diff --git a/arch/arm/mach-omap2/clock34xx_data.c b/arch/arm/mach-omap2/clock34xx_data.c
new file mode 100644
index 00000000000..8bdcc9cc7f9
--- /dev/null
+++ b/arch/arm/mach-omap2/clock34xx_data.c
@@ -0,0 +1,3289 @@
+/*
+ * OMAP3 clock data
+ *
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009 Nokia Corporation
+ *
+ * Written by Paul Walmsley
+ * With many device clock fixes by Kevin Hilman and Jouni Högander
+ * DPLL bypass clock support added by Roman Tereshonkov
+ *
+ */
+
+/*
+ * Virtual clocks are introduced as convenient tools.
+ * They are sources for other clocks and not supposed
+ * to be requested from drivers directly.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+
+#include <plat/control.h>
+#include <plat/clkdev_omap.h>
+
+#include "clock.h"
+#include "clock34xx.h"
+#include "cm.h"
+#include "cm-regbits-34xx.h"
+#include "prm.h"
+#include "prm-regbits-34xx.h"
+
+/*
+ * clocks
+ */
+
+#define OMAP_CM_REGADDR OMAP34XX_CM_REGADDR
+
+/* Maximum DPLL multiplier, divider values for OMAP3 */
+#define OMAP3_MAX_DPLL_MULT 2048
+#define OMAP3_MAX_DPLL_DIV 128
+
+/*
+ * DPLL1 supplies clock to the MPU.
+ * DPLL2 supplies clock to the IVA2.
+ * DPLL3 supplies CORE domain clocks.
+ * DPLL4 supplies peripheral clocks.
+ * DPLL5 supplies other peripheral clocks (USBHOST, USIM).
+ */
+
+/* Forward declarations for DPLL bypass clocks */
+static struct clk dpll1_fck;
+static struct clk dpll2_fck;
+
+/* PRM CLOCKS */
+
+/* According to timer32k.c, this is a 32768Hz clock, not a 32000Hz clock. */
+static struct clk omap_32k_fck = {
+ .name = "omap_32k_fck",
+ .ops = &clkops_null,
+ .rate = 32768,
+ .flags = RATE_FIXED,
+};
+
+static struct clk secure_32k_fck = {
+ .name = "secure_32k_fck",
+ .ops = &clkops_null,
+ .rate = 32768,
+ .flags = RATE_FIXED,
+};
+
+/* Virtual source clocks for osc_sys_ck */
+static struct clk virt_12m_ck = {
+ .name = "virt_12m_ck",
+ .ops = &clkops_null,
+ .rate = 12000000,
+ .flags = RATE_FIXED,
+};
+
+static struct clk virt_13m_ck = {
+ .name = "virt_13m_ck",
+ .ops = &clkops_null,
+ .rate = 13000000,
+ .flags = RATE_FIXED,
+};
+
+static struct clk virt_16_8m_ck = {
+ .name = "virt_16_8m_ck",
+ .ops = &clkops_null,
+ .rate = 16800000,
+ .flags = RATE_FIXED,
+};
+
+static struct clk virt_19_2m_ck = {
+ .name = "virt_19_2m_ck",
+ .ops = &clkops_null,
+ .rate = 19200000,
+ .flags = RATE_FIXED,
+};
+
+static struct clk virt_26m_ck = {
+ .name = "virt_26m_ck",
+ .ops = &clkops_null,
+ .rate = 26000000,
+ .flags = RATE_FIXED,
+};
+
+static struct clk virt_38_4m_ck = {
+ .name = "virt_38_4m_ck",
+ .ops = &clkops_null,
+ .rate = 38400000,
+ .flags = RATE_FIXED,
+};
+
+static const struct clksel_rate osc_sys_12m_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate osc_sys_13m_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate osc_sys_16_8m_rates[] = {
+ { .div = 1, .val = 5, .flags = RATE_IN_3430ES2 | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate osc_sys_19_2m_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate osc_sys_26m_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate osc_sys_38_4m_rates[] = {
+ { .div = 1, .val = 4, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel osc_sys_clksel[] = {
+ { .parent = &virt_12m_ck, .rates = osc_sys_12m_rates },
+ { .parent = &virt_13m_ck, .rates = osc_sys_13m_rates },
+ { .parent = &virt_16_8m_ck, .rates = osc_sys_16_8m_rates },
+ { .parent = &virt_19_2m_ck, .rates = osc_sys_19_2m_rates },
+ { .parent = &virt_26m_ck, .rates = osc_sys_26m_rates },
+ { .parent = &virt_38_4m_ck, .rates = osc_sys_38_4m_rates },
+ { .parent = NULL },
+};
+
+/* Oscillator clock */
+/* 12, 13, 16.8, 19.2, 26, or 38.4 MHz */
+static struct clk osc_sys_ck = {
+ .name = "osc_sys_ck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP3430_PRM_CLKSEL,
+ .clksel_mask = OMAP3430_SYS_CLKIN_SEL_MASK,
+ .clksel = osc_sys_clksel,
+ /* REVISIT: deal with autoextclkmode? */
+ .flags = RATE_FIXED,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel_rate div2_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_343X },
+ { .div = 0 }
+};
+
+static const struct clksel sys_clksel[] = {
+ { .parent = &osc_sys_ck, .rates = div2_rates },
+ { .parent = NULL }
+};
+
+/* Latency: this clock is only enabled after PRM_CLKSETUP.SETUP_TIME */
+/* Feeds DPLLs - divided first by PRM_CLKSRC_CTRL.SYSCLKDIV? */
+static struct clk sys_ck = {
+ .name = "sys_ck",
+ .ops = &clkops_null,
+ .parent = &osc_sys_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP3430_PRM_CLKSRC_CTRL,
+ .clksel_mask = OMAP_SYSCLKDIV_MASK,
+ .clksel = sys_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk sys_altclk = {
+ .name = "sys_altclk",
+ .ops = &clkops_null,
+};
+
+/* Optional external clock input for some McBSPs */
+static struct clk mcbsp_clks = {
+ .name = "mcbsp_clks",
+ .ops = &clkops_null,
+};
+
+/* PRM EXTERNAL CLOCK OUTPUT */
+
+static struct clk sys_clkout1 = {
+ .name = "sys_clkout1",
+ .ops = &clkops_omap2_dflt,
+ .parent = &osc_sys_ck,
+ .enable_reg = OMAP3430_PRM_CLKOUT_CTRL,
+ .enable_bit = OMAP3430_CLKOUT_EN_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+/* DPLLS */
+
+/* CM CLOCKS */
+
+static const struct clksel_rate div16_dpll_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_343X },
+ { .div = 3, .val = 3, .flags = RATE_IN_343X },
+ { .div = 4, .val = 4, .flags = RATE_IN_343X },
+ { .div = 5, .val = 5, .flags = RATE_IN_343X },
+ { .div = 6, .val = 6, .flags = RATE_IN_343X },
+ { .div = 7, .val = 7, .flags = RATE_IN_343X },
+ { .div = 8, .val = 8, .flags = RATE_IN_343X },
+ { .div = 9, .val = 9, .flags = RATE_IN_343X },
+ { .div = 10, .val = 10, .flags = RATE_IN_343X },
+ { .div = 11, .val = 11, .flags = RATE_IN_343X },
+ { .div = 12, .val = 12, .flags = RATE_IN_343X },
+ { .div = 13, .val = 13, .flags = RATE_IN_343X },
+ { .div = 14, .val = 14, .flags = RATE_IN_343X },
+ { .div = 15, .val = 15, .flags = RATE_IN_343X },
+ { .div = 16, .val = 16, .flags = RATE_IN_343X },
+ { .div = 0 }
+};
+
+/* DPLL1 */
+/* MPU clock source */
+/* Type: DPLL */
+static struct dpll_data dpll1_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
+ .mult_mask = OMAP3430_MPU_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_MPU_DPLL_DIV_MASK,
+ .clk_bypass = &dpll1_fck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430_MPU_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
+ .enable_mask = OMAP3430_EN_MPU_DPLL_MASK,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_MPU_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
+ .autoidle_mask = OMAP3430_AUTO_MPU_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
+ .idlest_mask = OMAP3430_ST_MPU_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+ .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
+};
+
+static struct clk dpll1_ck = {
+ .name = "dpll1_ck",
+ .ops = &clkops_null,
+ .parent = &sys_ck,
+ .dpll_data = &dpll1_dd,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .clkdm_name = "dpll1_clkdm",
+ .recalc = &omap3_dpll_recalc,
+};
+
+/*
+ * This virtual clock provides the CLKOUTX2 output from the DPLL if the
+ * DPLL isn't bypassed.
+ */
+static struct clk dpll1_x2_ck = {
+ .name = "dpll1_x2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll1_ck,
+ .clkdm_name = "dpll1_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+/* On DPLL1, unlike other DPLLs, the divider is downstream from CLKOUTX2 */
+static const struct clksel div16_dpll1_x2m2_clksel[] = {
+ { .parent = &dpll1_x2_ck, .rates = div16_dpll_rates },
+ { .parent = NULL }
+};
+
+/*
+ * Does not exist in the TRM - needed to separate the M2 divider from
+ * bypass selection in mpu_ck
+ */
+static struct clk dpll1_x2m2_ck = {
+ .name = "dpll1_x2m2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll1_x2_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
+ .clksel_mask = OMAP3430_MPU_DPLL_CLKOUT_DIV_MASK,
+ .clksel = div16_dpll1_x2m2_clksel,
+ .clkdm_name = "dpll1_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* DPLL2 */
+/* IVA2 clock source */
+/* Type: DPLL */
+
+static struct dpll_data dpll2_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
+ .mult_mask = OMAP3430_IVA2_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_IVA2_DPLL_DIV_MASK,
+ .clk_bypass = &dpll2_fck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430_IVA2_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
+ .enable_mask = OMAP3430_EN_IVA2_DPLL_MASK,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
+ (1 << DPLL_LOW_POWER_BYPASS),
+ .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
+ .autoidle_mask = OMAP3430_AUTO_IVA2_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
+ .idlest_mask = OMAP3430_ST_IVA2_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+ .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
+};
+
+static struct clk dpll2_ck = {
+ .name = "dpll2_ck",
+ .ops = &clkops_noncore_dpll_ops,
+ .parent = &sys_ck,
+ .dpll_data = &dpll2_dd,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .clkdm_name = "dpll2_clkdm",
+ .recalc = &omap3_dpll_recalc,
+};
+
+static const struct clksel div16_dpll2_m2x2_clksel[] = {
+ { .parent = &dpll2_ck, .rates = div16_dpll_rates },
+ { .parent = NULL }
+};
+
+/*
+ * The TRM is conflicted on whether IVA2 clock comes from DPLL2 CLKOUT
+ * or CLKOUTX2. CLKOUT seems most plausible.
+ */
+static struct clk dpll2_m2_ck = {
+ .name = "dpll2_m2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll2_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD,
+ OMAP3430_CM_CLKSEL2_PLL),
+ .clksel_mask = OMAP3430_IVA2_DPLL_CLKOUT_DIV_MASK,
+ .clksel = div16_dpll2_m2x2_clksel,
+ .clkdm_name = "dpll2_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/*
+ * DPLL3
+ * Source clock for all interfaces and for some device fclks
+ * REVISIT: Also supports fast relock bypass - not included below
+ */
+static struct dpll_data dpll3_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .mult_mask = OMAP3430_CORE_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_CORE_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430_CORE_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_mask = OMAP3430_EN_CORE_DPLL_MASK,
+ .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_CORE_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
+ .autoidle_mask = OMAP3430_AUTO_CORE_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
+ .idlest_mask = OMAP3430_ST_CORE_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+ .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
+};
+
+static struct clk dpll3_ck = {
+ .name = "dpll3_ck",
+ .ops = &clkops_null,
+ .parent = &sys_ck,
+ .dpll_data = &dpll3_dd,
+ .round_rate = &omap2_dpll_round_rate,
+ .clkdm_name = "dpll3_clkdm",
+ .recalc = &omap3_dpll_recalc,
+};
+
+/*
+ * This virtual clock provides the CLKOUTX2 output from the DPLL if the
+ * DPLL isn't bypassed
+ */
+static struct clk dpll3_x2_ck = {
+ .name = "dpll3_x2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll3_ck,
+ .clkdm_name = "dpll3_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+static const struct clksel_rate div31_dpll3_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_343X },
+ { .div = 3, .val = 3, .flags = RATE_IN_3430ES2 },
+ { .div = 4, .val = 4, .flags = RATE_IN_3430ES2 },
+ { .div = 5, .val = 5, .flags = RATE_IN_3430ES2 },
+ { .div = 6, .val = 6, .flags = RATE_IN_3430ES2 },
+ { .div = 7, .val = 7, .flags = RATE_IN_3430ES2 },
+ { .div = 8, .val = 8, .flags = RATE_IN_3430ES2 },
+ { .div = 9, .val = 9, .flags = RATE_IN_3430ES2 },
+ { .div = 10, .val = 10, .flags = RATE_IN_3430ES2 },
+ { .div = 11, .val = 11, .flags = RATE_IN_3430ES2 },
+ { .div = 12, .val = 12, .flags = RATE_IN_3430ES2 },
+ { .div = 13, .val = 13, .flags = RATE_IN_3430ES2 },
+ { .div = 14, .val = 14, .flags = RATE_IN_3430ES2 },
+ { .div = 15, .val = 15, .flags = RATE_IN_3430ES2 },
+ { .div = 16, .val = 16, .flags = RATE_IN_3430ES2 },
+ { .div = 17, .val = 17, .flags = RATE_IN_3430ES2 },
+ { .div = 18, .val = 18, .flags = RATE_IN_3430ES2 },
+ { .div = 19, .val = 19, .flags = RATE_IN_3430ES2 },
+ { .div = 20, .val = 20, .flags = RATE_IN_3430ES2 },
+ { .div = 21, .val = 21, .flags = RATE_IN_3430ES2 },
+ { .div = 22, .val = 22, .flags = RATE_IN_3430ES2 },
+ { .div = 23, .val = 23, .flags = RATE_IN_3430ES2 },
+ { .div = 24, .val = 24, .flags = RATE_IN_3430ES2 },
+ { .div = 25, .val = 25, .flags = RATE_IN_3430ES2 },
+ { .div = 26, .val = 26, .flags = RATE_IN_3430ES2 },
+ { .div = 27, .val = 27, .flags = RATE_IN_3430ES2 },
+ { .div = 28, .val = 28, .flags = RATE_IN_3430ES2 },
+ { .div = 29, .val = 29, .flags = RATE_IN_3430ES2 },
+ { .div = 30, .val = 30, .flags = RATE_IN_3430ES2 },
+ { .div = 31, .val = 31, .flags = RATE_IN_3430ES2 },
+ { .div = 0 },
+};
+
+static const struct clksel div31_dpll3m2_clksel[] = {
+ { .parent = &dpll3_ck, .rates = div31_dpll3_rates },
+ { .parent = NULL }
+};
+
+/* DPLL3 output M2 - primary control point for CORE speed */
+static struct clk dpll3_m2_ck = {
+ .name = "dpll3_m2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll3_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_CORE_DPLL_CLKOUT_DIV_MASK,
+ .clksel = div31_dpll3m2_clksel,
+ .clkdm_name = "dpll3_clkdm",
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap3_core_dpll_m2_set_rate,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk core_ck = {
+ .name = "core_ck",
+ .ops = &clkops_null,
+ .parent = &dpll3_m2_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dpll3_m2x2_ck = {
+ .name = "dpll3_m2x2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll3_m2_ck,
+ .clkdm_name = "dpll3_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+/* The PWRDN bit is apparently only available on 3430ES2 and above */
+static const struct clksel div16_dpll3_clksel[] = {
+ { .parent = &dpll3_ck, .rates = div16_dpll_rates },
+ { .parent = NULL }
+};
+
+/* This virtual clock is the source for dpll3_m3x2_ck */
+static struct clk dpll3_m3_ck = {
+ .name = "dpll3_m3_ck",
+ .ops = &clkops_null,
+ .parent = &dpll3_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_DIV_DPLL3_MASK,
+ .clksel = div16_dpll3_clksel,
+ .clkdm_name = "dpll3_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* The PWRDN bit is apparently only available on 3430ES2 and above */
+static struct clk dpll3_m3x2_ck = {
+ .name = "dpll3_m3x2_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &dpll3_m3_ck,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_EMU_CORE_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll3_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+static struct clk emu_core_alwon_ck = {
+ .name = "emu_core_alwon_ck",
+ .ops = &clkops_null,
+ .parent = &dpll3_m3x2_ck,
+ .clkdm_name = "dpll3_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* DPLL4 */
+/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
+/* Type: DPLL */
+static struct dpll_data dpll4_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
+ .mult_mask = OMAP3430_PERIPH_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
+ .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
+ .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+ .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
+};
+
+static struct clk dpll4_ck = {
+ .name = "dpll4_ck",
+ .ops = &clkops_noncore_dpll_ops,
+ .parent = &sys_ck,
+ .dpll_data = &dpll4_dd,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_dpll4_set_rate,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap3_dpll_recalc,
+};
+
+/*
+ * This virtual clock provides the CLKOUTX2 output from the DPLL if the
+ * DPLL isn't bypassed --
+ * XXX does this serve any downstream clocks?
+ */
+static struct clk dpll4_x2_ck = {
+ .name = "dpll4_x2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll4_ck,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+static const struct clksel div16_dpll4_clksel[] = {
+ { .parent = &dpll4_ck, .rates = div16_dpll_rates },
+ { .parent = NULL }
+};
+
+/* This virtual clock is the source for dpll4_m2x2_ck */
+static struct clk dpll4_m2_ck = {
+ .name = "dpll4_m2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll4_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
+ .clksel_mask = OMAP3430_DIV_96M_MASK,
+ .clksel = div16_dpll4_clksel,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* The PWRDN bit is apparently only available on 3430ES2 and above */
+static struct clk dpll4_m2x2_ck = {
+ .name = "dpll4_m2x2_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &dpll4_m2_ck,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_96M_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+/*
+ * DPLL4 generates DPLL4_M2X2_CLK which is then routed into the PRM as
+ * PRM_96M_ALWON_(F)CLK. Two clocks then emerge from the PRM:
+ * 96M_ALWON_FCLK (called "omap_96m_alwon_fck" below) and
+ * CM_96K_(F)CLK.
+ */
+static struct clk omap_96m_alwon_fck = {
+ .name = "omap_96m_alwon_fck",
+ .ops = &clkops_null,
+ .parent = &dpll4_m2x2_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk cm_96m_fck = {
+ .name = "cm_96m_fck",
+ .ops = &clkops_null,
+ .parent = &omap_96m_alwon_fck,
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel_rate omap_96m_dpll_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate omap_96m_sys_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel omap_96m_fck_clksel[] = {
+ { .parent = &cm_96m_fck, .rates = omap_96m_dpll_rates },
+ { .parent = &sys_ck, .rates = omap_96m_sys_rates },
+ { .parent = NULL }
+};
+
+static struct clk omap_96m_fck = {
+ .name = "omap_96m_fck",
+ .ops = &clkops_null,
+ .parent = &sys_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_SOURCE_96M_MASK,
+ .clksel = omap_96m_fck_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* This virtual clock is the source for dpll4_m3x2_ck */
+static struct clk dpll4_m3_ck = {
+ .name = "dpll4_m3_ck",
+ .ops = &clkops_null,
+ .parent = &dpll4_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_TV_MASK,
+ .clksel = div16_dpll4_clksel,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* The PWRDN bit is apparently only available on 3430ES2 and above */
+static struct clk dpll4_m3x2_ck = {
+ .name = "dpll4_m3x2_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &dpll4_m3_ck,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_TV_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+static const struct clksel_rate omap_54m_d4m3x2_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate omap_54m_alt_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel omap_54m_clksel[] = {
+ { .parent = &dpll4_m3x2_ck, .rates = omap_54m_d4m3x2_rates },
+ { .parent = &sys_altclk, .rates = omap_54m_alt_rates },
+ { .parent = NULL }
+};
+
+static struct clk omap_54m_fck = {
+ .name = "omap_54m_fck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_SOURCE_54M_MASK,
+ .clksel = omap_54m_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel_rate omap_48m_cm96m_rates[] = {
+ { .div = 2, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate omap_48m_alt_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel omap_48m_clksel[] = {
+ { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
+ { .parent = &sys_altclk, .rates = omap_48m_alt_rates },
+ { .parent = NULL }
+};
+
+static struct clk omap_48m_fck = {
+ .name = "omap_48m_fck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_SOURCE_48M_MASK,
+ .clksel = omap_48m_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk omap_12m_fck = {
+ .name = "omap_12m_fck",
+ .ops = &clkops_null,
+ .parent = &omap_48m_fck,
+ .fixed_div = 4,
+ .recalc = &omap2_fixed_divisor_recalc,
+};
+
+/* This virstual clock is the source for dpll4_m4x2_ck */
+static struct clk dpll4_m4_ck = {
+ .name = "dpll4_m4_ck",
+ .ops = &clkops_null,
+ .parent = &dpll4_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_DSS1_MASK,
+ .clksel = div16_dpll4_clksel,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
+};
+
+/* The PWRDN bit is apparently only available on 3430ES2 and above */
+static struct clk dpll4_m4x2_ck = {
+ .name = "dpll4_m4x2_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &dpll4_m4_ck,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_CAM_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+/* This virtual clock is the source for dpll4_m5x2_ck */
+static struct clk dpll4_m5_ck = {
+ .name = "dpll4_m5_ck",
+ .ops = &clkops_null,
+ .parent = &dpll4_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_CAM_MASK,
+ .clksel = div16_dpll4_clksel,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* The PWRDN bit is apparently only available on 3430ES2 and above */
+static struct clk dpll4_m5x2_ck = {
+ .name = "dpll4_m5x2_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &dpll4_m5_ck,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_CAM_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+/* This virtual clock is the source for dpll4_m6x2_ck */
+static struct clk dpll4_m6_ck = {
+ .name = "dpll4_m6_ck",
+ .ops = &clkops_null,
+ .parent = &dpll4_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_DIV_DPLL4_MASK,
+ .clksel = div16_dpll4_clksel,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* The PWRDN bit is apparently only available on 3430ES2 and above */
+static struct clk dpll4_m6x2_ck = {
+ .name = "dpll4_m6x2_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &dpll4_m6_ck,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
+ .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
+ .flags = INVERT_ENABLE,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+static struct clk emu_per_alwon_ck = {
+ .name = "emu_per_alwon_ck",
+ .ops = &clkops_null,
+ .parent = &dpll4_m6x2_ck,
+ .clkdm_name = "dpll4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* DPLL5 */
+/* Supplies 120MHz clock, USIM source clock */
+/* Type: DPLL */
+/* 3430ES2 only */
+static struct dpll_data dpll5_dd = {
+ .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
+ .mult_mask = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
+ .div1_mask = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
+ .clk_bypass = &sys_ck,
+ .clk_ref = &sys_ck,
+ .freqsel_mask = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
+ .control_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
+ .enable_mask = OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
+ .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
+ .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
+ .recal_en_bit = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
+ .recal_st_bit = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
+ .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
+ .autoidle_mask = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
+ .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
+ .idlest_mask = OMAP3430ES2_ST_PERIPH2_CLK_MASK,
+ .max_multiplier = OMAP3_MAX_DPLL_MULT,
+ .min_divider = 1,
+ .max_divider = OMAP3_MAX_DPLL_DIV,
+ .rate_tolerance = DEFAULT_DPLL_RATE_TOLERANCE
+};
+
+static struct clk dpll5_ck = {
+ .name = "dpll5_ck",
+ .ops = &clkops_noncore_dpll_ops,
+ .parent = &sys_ck,
+ .dpll_data = &dpll5_dd,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .clkdm_name = "dpll5_clkdm",
+ .recalc = &omap3_dpll_recalc,
+};
+
+static const struct clksel div16_dpll5_clksel[] = {
+ { .parent = &dpll5_ck, .rates = div16_dpll_rates },
+ { .parent = NULL }
+};
+
+static struct clk dpll5_m2_ck = {
+ .name = "dpll5_m2_ck",
+ .ops = &clkops_null,
+ .parent = &dpll5_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
+ .clksel_mask = OMAP3430ES2_DIV_120M_MASK,
+ .clksel = div16_dpll5_clksel,
+ .clkdm_name = "dpll5_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* CM EXTERNAL CLOCK OUTPUTS */
+
+static const struct clksel_rate clkout2_src_core_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate clkout2_src_sys_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate clkout2_src_96m_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate clkout2_src_54m_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel clkout2_src_clksel[] = {
+ { .parent = &core_ck, .rates = clkout2_src_core_rates },
+ { .parent = &sys_ck, .rates = clkout2_src_sys_rates },
+ { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
+ { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
+ { .parent = NULL }
+};
+
+static struct clk clkout2_src_ck = {
+ .name = "clkout2_src_ck",
+ .ops = &clkops_omap2_dflt,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP3430_CM_CLKOUT_CTRL,
+ .enable_bit = OMAP3430_CLKOUT2_EN_SHIFT,
+ .clksel_reg = OMAP3430_CM_CLKOUT_CTRL,
+ .clksel_mask = OMAP3430_CLKOUT2SOURCE_MASK,
+ .clksel = clkout2_src_clksel,
+ .clkdm_name = "core_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel_rate sys_clkout2_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 1, .flags = RATE_IN_343X },
+ { .div = 4, .val = 2, .flags = RATE_IN_343X },
+ { .div = 8, .val = 3, .flags = RATE_IN_343X },
+ { .div = 16, .val = 4, .flags = RATE_IN_343X },
+ { .div = 0 },
+};
+
+static const struct clksel sys_clkout2_clksel[] = {
+ { .parent = &clkout2_src_ck, .rates = sys_clkout2_rates },
+ { .parent = NULL },
+};
+
+static struct clk sys_clkout2 = {
+ .name = "sys_clkout2",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP3430_CM_CLKOUT_CTRL,
+ .clksel_mask = OMAP3430_CLKOUT2_DIV_MASK,
+ .clksel = sys_clkout2_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* CM OUTPUT CLOCKS */
+
+static struct clk corex2_fck = {
+ .name = "corex2_fck",
+ .ops = &clkops_null,
+ .parent = &dpll3_m2x2_ck,
+ .recalc = &followparent_recalc,
+};
+
+/* DPLL power domain clock controls */
+
+static const struct clksel_rate div4_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_343X },
+ { .div = 4, .val = 4, .flags = RATE_IN_343X },
+ { .div = 0 }
+};
+
+static const struct clksel div4_core_clksel[] = {
+ { .parent = &core_ck, .rates = div4_rates },
+ { .parent = NULL }
+};
+
+/*
+ * REVISIT: Are these in DPLL power domain or CM power domain? docs
+ * may be inconsistent here?
+ */
+static struct clk dpll1_fck = {
+ .name = "dpll1_fck",
+ .ops = &clkops_null,
+ .parent = &core_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
+ .clksel_mask = OMAP3430_MPU_CLK_SRC_MASK,
+ .clksel = div4_core_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk mpu_ck = {
+ .name = "mpu_ck",
+ .ops = &clkops_null,
+ .parent = &dpll1_x2m2_ck,
+ .clkdm_name = "mpu_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* arm_fck is divided by two when DPLL1 locked; otherwise, passthrough mpu_ck */
+static const struct clksel_rate arm_fck_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 1, .flags = RATE_IN_343X },
+ { .div = 0 },
+};
+
+static const struct clksel arm_fck_clksel[] = {
+ { .parent = &mpu_ck, .rates = arm_fck_rates },
+ { .parent = NULL }
+};
+
+static struct clk arm_fck = {
+ .name = "arm_fck",
+ .ops = &clkops_null,
+ .parent = &mpu_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
+ .clksel_mask = OMAP3430_ST_MPU_CLK_MASK,
+ .clksel = arm_fck_clksel,
+ .clkdm_name = "mpu_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* XXX What about neon_clkdm ? */
+
+/*
+ * REVISIT: This clock is never specifically defined in the 3430 TRM,
+ * although it is referenced - so this is a guess
+ */
+static struct clk emu_mpu_alwon_ck = {
+ .name = "emu_mpu_alwon_ck",
+ .ops = &clkops_null,
+ .parent = &mpu_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dpll2_fck = {
+ .name = "dpll2_fck",
+ .ops = &clkops_null,
+ .parent = &core_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
+ .clksel_mask = OMAP3430_IVA2_CLK_SRC_MASK,
+ .clksel = div4_core_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk iva2_ck = {
+ .name = "iva2_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &dpll2_m2_ck,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
+ .clkdm_name = "iva2_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* Common interface clocks */
+
+static const struct clksel div2_core_clksel[] = {
+ { .parent = &core_ck, .rates = div2_rates },
+ { .parent = NULL }
+};
+
+static struct clk l3_ick = {
+ .name = "l3_ick",
+ .ops = &clkops_null,
+ .parent = &core_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_L3_MASK,
+ .clksel = div2_core_clksel,
+ .clkdm_name = "core_l3_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel div2_l3_clksel[] = {
+ { .parent = &l3_ick, .rates = div2_rates },
+ { .parent = NULL }
+};
+
+static struct clk l4_ick = {
+ .name = "l4_ick",
+ .ops = &clkops_null,
+ .parent = &l3_ick,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_L4_MASK,
+ .clksel = div2_l3_clksel,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+
+};
+
+static const struct clksel div2_l4_clksel[] = {
+ { .parent = &l4_ick, .rates = div2_rates },
+ { .parent = NULL }
+};
+
+static struct clk rm_ick = {
+ .name = "rm_ick",
+ .ops = &clkops_null,
+ .parent = &l4_ick,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_RM_MASK,
+ .clksel = div2_l4_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* GFX power domain */
+
+/* GFX clocks are in 3430ES1 only. 3430ES2 and later uses the SGX instead */
+
+static const struct clksel gfx_l3_clksel[] = {
+ { .parent = &l3_ick, .rates = gfx_l3_rates },
+ { .parent = NULL }
+};
+
+/* Virtual parent clock for gfx_l3_ick and gfx_l3_fck */
+static struct clk gfx_l3_ck = {
+ .name = "gfx_l3_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &l3_ick,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
+ .enable_bit = OMAP_EN_GFX_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gfx_l3_fck = {
+ .name = "gfx_l3_fck",
+ .ops = &clkops_null,
+ .parent = &gfx_l3_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP_CLKSEL_GFX_MASK,
+ .clksel = gfx_l3_clksel,
+ .clkdm_name = "gfx_3430es1_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gfx_l3_ick = {
+ .name = "gfx_l3_ick",
+ .ops = &clkops_null,
+ .parent = &gfx_l3_ck,
+ .clkdm_name = "gfx_3430es1_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gfx_cg1_ck = {
+ .name = "gfx_cg1_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &gfx_l3_fck, /* REVISIT: correct? */
+ .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES1_EN_2D_SHIFT,
+ .clkdm_name = "gfx_3430es1_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gfx_cg2_ck = {
+ .name = "gfx_cg2_ck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &gfx_l3_fck, /* REVISIT: correct? */
+ .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES1_EN_3D_SHIFT,
+ .clkdm_name = "gfx_3430es1_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* SGX power domain - 3430ES2 only */
+
+static const struct clksel_rate sgx_core_rates[] = {
+ { .div = 3, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 4, .val = 1, .flags = RATE_IN_343X },
+ { .div = 6, .val = 2, .flags = RATE_IN_343X },
+ { .div = 0 },
+};
+
+static const struct clksel_rate sgx_96m_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 },
+};
+
+static const struct clksel sgx_clksel[] = {
+ { .parent = &core_ck, .rates = sgx_core_rates },
+ { .parent = &cm_96m_fck, .rates = sgx_96m_rates },
+ { .parent = NULL },
+};
+
+static struct clk sgx_fck = {
+ .name = "sgx_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430ES2_CLKSEL_SGX_MASK,
+ .clksel = sgx_clksel,
+ .clkdm_name = "sgx_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk sgx_ick = {
+ .name = "sgx_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &l3_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
+ .clkdm_name = "sgx_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* CORE power domain */
+
+static struct clk d2d_26m_fck = {
+ .name = "d2d_26m_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &sys_ck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430ES1_EN_D2D_SHIFT,
+ .clkdm_name = "d2d_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk modem_fck = {
+ .name = "modem_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &sys_ck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MODEM_SHIFT,
+ .clkdm_name = "d2d_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sad2d_ick = {
+ .name = "sad2d_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &l3_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SAD2D_SHIFT,
+ .clkdm_name = "d2d_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mad2d_ick = {
+ .name = "mad2d_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &l3_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP3430_EN_MAD2D_SHIFT,
+ .clkdm_name = "d2d_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel omap343x_gpt_clksel[] = {
+ { .parent = &omap_32k_fck, .rates = gpt_32k_rates },
+ { .parent = &sys_ck, .rates = gpt_sys_rates },
+ { .parent = NULL}
+};
+
+static struct clk gpt10_fck = {
+ .name = "gpt10_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &sys_ck,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_GPT10_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT10_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpt11_fck = {
+ .name = "gpt11_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &sys_ck,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_GPT11_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT11_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk cpefuse_fck = {
+ .name = "cpefuse_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &sys_ck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
+ .enable_bit = OMAP3430ES2_EN_CPEFUSE_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk ts_fck = {
+ .name = "ts_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &omap_32k_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
+ .enable_bit = OMAP3430ES2_EN_TS_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usbtll_fck = {
+ .name = "usbtll_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &dpll5_m2_ck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
+ .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+/* CORE 96M FCLK-derived clocks */
+
+static struct clk core_96m_fck = {
+ .name = "core_96m_fck",
+ .ops = &clkops_null,
+ .parent = &omap_96m_fck,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmchs3_fck = {
+ .name = "mmchs_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 2,
+ .parent = &core_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmchs2_fck = {
+ .name = "mmchs_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 1,
+ .parent = &core_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MMC2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mspro_fck = {
+ .name = "mspro_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmchs1_fck = {
+ .name = "mmchs_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MMC1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c3_fck = {
+ .name = "i2c_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 3,
+ .parent = &core_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_I2C3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c2_fck = {
+ .name = "i2c_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 2,
+ .parent = &core_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_I2C2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c1_fck = {
+ .name = "i2c_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 1,
+ .parent = &core_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_I2C1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/*
+ * MCBSP 1 & 5 get their 96MHz clock from core_96m_fck;
+ * MCBSP 2, 3, 4 get their 96MHz clock from per_96m_fck.
+ */
+static const struct clksel_rate common_mcbsp_96m_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+static const struct clksel mcbsp_15_clksel[] = {
+ { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
+ { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
+ { .parent = NULL }
+};
+
+static struct clk mcbsp5_fck = {
+ .name = "mcbsp_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 5,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCBSP5_SHIFT,
+ .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
+ .clksel_mask = OMAP2_MCBSP5_CLKS_MASK,
+ .clksel = mcbsp_15_clksel,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk mcbsp1_fck = {
+ .name = "mcbsp_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 1,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCBSP1_SHIFT,
+ .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ .clksel_mask = OMAP2_MCBSP1_CLKS_MASK,
+ .clksel = mcbsp_15_clksel,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* CORE_48M_FCK-derived clocks */
+
+static struct clk core_48m_fck = {
+ .name = "core_48m_fck",
+ .ops = &clkops_null,
+ .parent = &omap_48m_fck,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi4_fck = {
+ .name = "mcspi_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 4,
+ .parent = &core_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi3_fck = {
+ .name = "mcspi_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 3,
+ .parent = &core_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi2_fck = {
+ .name = "mcspi_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 2,
+ .parent = &core_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi1_fck = {
+ .name = "mcspi_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 1,
+ .parent = &core_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart2_fck = {
+ .name = "uart2_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_UART2_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart1_fck = {
+ .name = "uart1_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_UART1_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk fshostusb_fck = {
+ .name = "fshostusb_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+/* CORE_12M_FCK based clocks */
+
+static struct clk core_12m_fck = {
+ .name = "core_12m_fck",
+ .ops = &clkops_null,
+ .parent = &omap_12m_fck,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk hdq_fck = {
+ .name = "hdq_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_12m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_HDQ_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+/* DPLL3-derived clock */
+
+static const struct clksel_rate ssi_ssr_corex2_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_343X },
+ { .div = 3, .val = 3, .flags = RATE_IN_343X },
+ { .div = 4, .val = 4, .flags = RATE_IN_343X },
+ { .div = 6, .val = 6, .flags = RATE_IN_343X },
+ { .div = 8, .val = 8, .flags = RATE_IN_343X },
+ { .div = 0 }
+};
+
+static const struct clksel ssi_ssr_clksel[] = {
+ { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
+ { .parent = NULL }
+};
+
+static struct clk ssi_ssr_fck_3430es1 = {
+ .name = "ssi_ssr_fck",
+ .ops = &clkops_omap2_dflt,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_SSI_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_SSI_MASK,
+ .clksel = ssi_ssr_clksel,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk ssi_ssr_fck_3430es2 = {
+ .name = "ssi_ssr_fck",
+ .ops = &clkops_omap3430es2_ssi_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_SSI_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_SSI_MASK,
+ .clksel = ssi_ssr_clksel,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk ssi_sst_fck_3430es1 = {
+ .name = "ssi_sst_fck",
+ .ops = &clkops_null,
+ .parent = &ssi_ssr_fck_3430es1,
+ .fixed_div = 2,
+ .recalc = &omap2_fixed_divisor_recalc,
+};
+
+static struct clk ssi_sst_fck_3430es2 = {
+ .name = "ssi_sst_fck",
+ .ops = &clkops_null,
+ .parent = &ssi_ssr_fck_3430es2,
+ .fixed_div = 2,
+ .recalc = &omap2_fixed_divisor_recalc,
+};
+
+
+
+/* CORE_L3_ICK based clocks */
+
+/*
+ * XXX must add clk_enable/clk_disable for these if standard code won't
+ * handle it
+ */
+static struct clk core_l3_ick = {
+ .name = "core_l3_ick",
+ .ops = &clkops_null,
+ .parent = &l3_ick,
+ .clkdm_name = "core_l3_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk hsotgusb_ick_3430es1 = {
+ .name = "hsotgusb_ick",
+ .ops = &clkops_omap2_dflt,
+ .parent = &core_l3_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk hsotgusb_ick_3430es2 = {
+ .name = "hsotgusb_ick",
+ .ops = &clkops_omap3430es2_hsotgusb_wait,
+ .parent = &core_l3_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sdrc_ick = {
+ .name = "sdrc_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l3_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SDRC_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .clkdm_name = "core_l3_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpmc_fck = {
+ .name = "gpmc_fck",
+ .ops = &clkops_null,
+ .parent = &core_l3_ick,
+ .flags = ENABLE_ON_INIT, /* huh? */
+ .clkdm_name = "core_l3_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* SECURITY_L3_ICK based clocks */
+
+static struct clk security_l3_ick = {
+ .name = "security_l3_ick",
+ .ops = &clkops_null,
+ .parent = &l3_ick,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk pka_ick = {
+ .name = "pka_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &security_l3_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_PKA_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+/* CORE_L4_ICK based clocks */
+
+static struct clk core_l4_ick = {
+ .name = "core_l4_ick",
+ .ops = &clkops_null,
+ .parent = &l4_ick,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usbtll_ick = {
+ .name = "usbtll_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
+ .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmchs3_ick = {
+ .name = "mmchs_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 2,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* Intersystem Communication Registers - chassis mode only */
+static struct clk icr_ick = {
+ .name = "icr_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_ICR_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk aes2_ick = {
+ .name = "aes2_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_AES2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sha12_ick = {
+ .name = "sha12_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SHA12_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk des2_ick = {
+ .name = "des2_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_DES2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmchs2_ick = {
+ .name = "mmchs_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 1,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MMC2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmchs1_ick = {
+ .name = "mmchs_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MMC1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mspro_ick = {
+ .name = "mspro_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MSPRO_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk hdq_ick = {
+ .name = "hdq_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_HDQ_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi4_ick = {
+ .name = "mcspi_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 4,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi3_ick = {
+ .name = "mcspi_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 3,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi2_ick = {
+ .name = "mcspi_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 2,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi1_ick = {
+ .name = "mcspi_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 1,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCSPI1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c3_ick = {
+ .name = "i2c_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 3,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_I2C3_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c2_ick = {
+ .name = "i2c_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 2,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_I2C2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c1_ick = {
+ .name = "i2c_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 1,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_I2C1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart2_ick = {
+ .name = "uart2_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_UART2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart1_ick = {
+ .name = "uart1_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_UART1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt11_ick = {
+ .name = "gpt11_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_GPT11_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt10_ick = {
+ .name = "gpt10_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_GPT10_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcbsp5_ick = {
+ .name = "mcbsp_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 5,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCBSP5_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcbsp1_ick = {
+ .name = "mcbsp_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 1,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MCBSP1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk fac_ick = {
+ .name = "fac_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430ES1_EN_FAC_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mailboxes_ick = {
+ .name = "mailboxes_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_MAILBOXES_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk omapctrl_ick = {
+ .name = "omapctrl_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_OMAPCTRL_SHIFT,
+ .flags = ENABLE_ON_INIT,
+ .recalc = &followparent_recalc,
+};
+
+/* SSI_L4_ICK based clocks */
+
+static struct clk ssi_l4_ick = {
+ .name = "ssi_l4_ick",
+ .ops = &clkops_null,
+ .parent = &l4_ick,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk ssi_ick_3430es1 = {
+ .name = "ssi_ick",
+ .ops = &clkops_omap2_dflt,
+ .parent = &ssi_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SSI_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk ssi_ick_3430es2 = {
+ .name = "ssi_ick",
+ .ops = &clkops_omap3430es2_ssi_wait,
+ .parent = &ssi_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SSI_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* REVISIT: Technically the TRM claims that this is CORE_CLK based,
+ * but l4_ick makes more sense to me */
+
+static const struct clksel usb_l4_clksel[] = {
+ { .parent = &l4_ick, .rates = div2_rates },
+ { .parent = NULL },
+};
+
+static struct clk usb_l4_ick = {
+ .name = "usb_l4_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &l4_ick,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
+ .clksel = usb_l4_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* SECURITY_L4_ICK2 based clocks */
+
+static struct clk security_l4_ick2 = {
+ .name = "security_l4_ick2",
+ .ops = &clkops_null,
+ .parent = &l4_ick,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk aes1_ick = {
+ .name = "aes1_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &security_l4_ick2,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_AES1_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk rng_ick = {
+ .name = "rng_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &security_l4_ick2,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_RNG_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sha11_ick = {
+ .name = "sha11_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &security_l4_ick2,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_SHA11_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk des1_ick = {
+ .name = "des1_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &security_l4_ick2,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
+ .enable_bit = OMAP3430_EN_DES1_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+/* DSS */
+static struct clk dss1_alwon_fck_3430es1 = {
+ .name = "dss1_alwon_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &dpll4_m4x2_ck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_DSS1_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss1_alwon_fck_3430es2 = {
+ .name = "dss1_alwon_fck",
+ .ops = &clkops_omap3430es2_dss_usbhost_wait,
+ .parent = &dpll4_m4x2_ck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_DSS1_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss_tv_fck = {
+ .name = "dss_tv_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &omap_54m_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_TV_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss_96m_fck = {
+ .name = "dss_96m_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &omap_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_TV_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss2_alwon_fck = {
+ .name = "dss2_alwon_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &sys_ck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_DSS2_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss_ick_3430es1 = {
+ /* Handles both L3 and L4 clocks */
+ .name = "dss_ick",
+ .ops = &clkops_omap2_dflt,
+ .parent = &l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dss_ick_3430es2 = {
+ /* Handles both L3 and L4 clocks */
+ .name = "dss_ick",
+ .ops = &clkops_omap3430es2_dss_usbhost_wait,
+ .parent = &l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* CAM */
+
+static struct clk cam_mclk = {
+ .name = "cam_mclk",
+ .ops = &clkops_omap2_dflt,
+ .parent = &dpll4_m5x2_ck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_CAM_SHIFT,
+ .clkdm_name = "cam_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk cam_ick = {
+ /* Handles both L3 and L4 clocks */
+ .name = "cam_ick",
+ .ops = &clkops_omap2_dflt,
+ .parent = &l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_CAM_SHIFT,
+ .clkdm_name = "cam_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk csi2_96m_fck = {
+ .name = "csi2_96m_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &core_96m_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_CSI2_SHIFT,
+ .clkdm_name = "cam_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* USBHOST - 3430ES2 only */
+
+static struct clk usbhost_120m_fck = {
+ .name = "usbhost_120m_fck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &dpll5_m2_ck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES2_EN_USBHOST2_SHIFT,
+ .clkdm_name = "usbhost_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usbhost_48m_fck = {
+ .name = "usbhost_48m_fck",
+ .ops = &clkops_omap3430es2_dss_usbhost_wait,
+ .parent = &omap_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES2_EN_USBHOST1_SHIFT,
+ .clkdm_name = "usbhost_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usbhost_ick = {
+ /* Handles both L3 and L4 clocks */
+ .name = "usbhost_ick",
+ .ops = &clkops_omap3430es2_dss_usbhost_wait,
+ .parent = &l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430ES2_EN_USBHOST_SHIFT,
+ .clkdm_name = "usbhost_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* WKUP */
+
+static const struct clksel_rate usim_96m_rates[] = {
+ { .div = 2, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 4, .val = 4, .flags = RATE_IN_343X },
+ { .div = 8, .val = 5, .flags = RATE_IN_343X },
+ { .div = 10, .val = 6, .flags = RATE_IN_343X },
+ { .div = 0 },
+};
+
+static const struct clksel_rate usim_120m_rates[] = {
+ { .div = 4, .val = 7, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 8, .val = 8, .flags = RATE_IN_343X },
+ { .div = 16, .val = 9, .flags = RATE_IN_343X },
+ { .div = 20, .val = 10, .flags = RATE_IN_343X },
+ { .div = 0 },
+};
+
+static const struct clksel usim_clksel[] = {
+ { .parent = &omap_96m_fck, .rates = usim_96m_rates },
+ { .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
+ { .parent = &sys_ck, .rates = div2_rates },
+ { .parent = NULL },
+};
+
+/* 3430ES2 only */
+static struct clk usim_fck = {
+ .name = "usim_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430ES2_CLKSEL_USIMOCP_MASK,
+ .clksel = usim_clksel,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* XXX should gpt1's clksel have wkup_32k_fck as the 32k opt? */
+static struct clk gpt1_fck = {
+ .name = "gpt1_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT1_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT1_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk wkup_32k_fck = {
+ .name = "wkup_32k_fck",
+ .ops = &clkops_null,
+ .parent = &omap_32k_fck,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio1_dbck = {
+ .name = "gpio1_dbck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &wkup_32k_fck,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wdt2_fck = {
+ .name = "wdt2_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &wkup_32k_fck,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_WDT2_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wkup_l4_ick = {
+ .name = "wkup_l4_ick",
+ .ops = &clkops_null,
+ .parent = &sys_ck,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* 3430ES2 only */
+/* Never specifically named in the TRM, so we have to infer a likely name */
+static struct clk usim_ick = {
+ .name = "usim_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &wkup_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wdt2_ick = {
+ .name = "wdt2_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &wkup_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_WDT2_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wdt1_ick = {
+ .name = "wdt1_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &wkup_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_WDT1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio1_ick = {
+ .name = "gpio1_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &wkup_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk omap_32ksync_ick = {
+ .name = "omap_32ksync_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &wkup_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_32KSYNC_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* XXX This clock no longer exists in 3430 TRM rev F */
+static struct clk gpt12_ick = {
+ .name = "gpt12_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &wkup_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT12_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt1_ick = {
+ .name = "gpt1_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &wkup_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT1_SHIFT,
+ .clkdm_name = "wkup_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+
+
+/* PER clock domain */
+
+static struct clk per_96m_fck = {
+ .name = "per_96m_fck",
+ .ops = &clkops_null,
+ .parent = &omap_96m_alwon_fck,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk per_48m_fck = {
+ .name = "per_48m_fck",
+ .ops = &clkops_null,
+ .parent = &omap_48m_fck,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart3_fck = {
+ .name = "uart3_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_UART3_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt2_fck = {
+ .name = "gpt2_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT2_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT2_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpt3_fck = {
+ .name = "gpt3_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT3_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT3_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpt4_fck = {
+ .name = "gpt4_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT4_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT4_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpt5_fck = {
+ .name = "gpt5_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT5_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT5_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpt6_fck = {
+ .name = "gpt6_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT6_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT6_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpt7_fck = {
+ .name = "gpt7_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT7_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT7_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpt8_fck = {
+ .name = "gpt8_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT8_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT8_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpt9_fck = {
+ .name = "gpt9_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPT9_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_GPT9_MASK,
+ .clksel = omap343x_gpt_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk per_32k_alwon_fck = {
+ .name = "per_32k_alwon_fck",
+ .ops = &clkops_null,
+ .parent = &omap_32k_fck,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio6_dbck = {
+ .name = "gpio6_dbck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &per_32k_alwon_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio5_dbck = {
+ .name = "gpio5_dbck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &per_32k_alwon_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio4_dbck = {
+ .name = "gpio4_dbck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &per_32k_alwon_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio3_dbck = {
+ .name = "gpio3_dbck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &per_32k_alwon_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio2_dbck = {
+ .name = "gpio2_dbck",
+ .ops = &clkops_omap2_dflt,
+ .parent = &per_32k_alwon_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wdt3_fck = {
+ .name = "wdt3_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_32k_alwon_fck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_WDT3_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk per_l4_ick = {
+ .name = "per_l4_ick",
+ .ops = &clkops_null,
+ .parent = &l4_ick,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio6_ick = {
+ .name = "gpio6_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO6_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio5_ick = {
+ .name = "gpio5_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO5_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio4_ick = {
+ .name = "gpio4_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO4_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio3_ick = {
+ .name = "gpio3_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO3_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio2_ick = {
+ .name = "gpio2_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPIO2_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wdt3_ick = {
+ .name = "wdt3_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_WDT3_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart3_ick = {
+ .name = "uart3_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_UART3_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt9_ick = {
+ .name = "gpt9_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT9_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt8_ick = {
+ .name = "gpt8_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT8_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt7_ick = {
+ .name = "gpt7_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT7_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt6_ick = {
+ .name = "gpt6_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT6_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt5_ick = {
+ .name = "gpt5_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT5_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt4_ick = {
+ .name = "gpt4_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT4_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt3_ick = {
+ .name = "gpt3_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT3_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpt2_ick = {
+ .name = "gpt2_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_GPT2_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcbsp2_ick = {
+ .name = "mcbsp_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 2,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP2_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcbsp3_ick = {
+ .name = "mcbsp_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 3,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP3_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcbsp4_ick = {
+ .name = "mcbsp_ick",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 4,
+ .parent = &per_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP4_SHIFT,
+ .clkdm_name = "per_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel mcbsp_234_clksel[] = {
+ { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
+ { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
+ { .parent = NULL }
+};
+
+static struct clk mcbsp2_fck = {
+ .name = "mcbsp_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 2,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP2_SHIFT,
+ .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
+ .clksel_mask = OMAP2_MCBSP2_CLKS_MASK,
+ .clksel = mcbsp_234_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk mcbsp3_fck = {
+ .name = "mcbsp_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 3,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP3_SHIFT,
+ .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
+ .clksel_mask = OMAP2_MCBSP3_CLKS_MASK,
+ .clksel = mcbsp_234_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk mcbsp4_fck = {
+ .name = "mcbsp_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .id = 4,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_MCBSP4_SHIFT,
+ .clksel_reg = OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
+ .clksel_mask = OMAP2_MCBSP4_CLKS_MASK,
+ .clksel = mcbsp_234_clksel,
+ .clkdm_name = "per_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* EMU clocks */
+
+/* More information: ARM Cortex-A8 Technical Reference Manual, sect 10.1 */
+
+static const struct clksel_rate emu_src_sys_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 },
+};
+
+static const struct clksel_rate emu_src_core_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 },
+};
+
+static const struct clksel_rate emu_src_per_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 },
+};
+
+static const struct clksel_rate emu_src_mpu_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 },
+};
+
+static const struct clksel emu_src_clksel[] = {
+ { .parent = &sys_ck, .rates = emu_src_sys_rates },
+ { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
+ { .parent = &emu_per_alwon_ck, .rates = emu_src_per_rates },
+ { .parent = &emu_mpu_alwon_ck, .rates = emu_src_mpu_rates },
+ { .parent = NULL },
+};
+
+/*
+ * Like the clkout_src clocks, emu_src_clk is a virtual clock, existing only
+ * to switch the source of some of the EMU clocks.
+ * XXX Are there CLKEN bits for these EMU clks?
+ */
+static struct clk emu_src_ck = {
+ .name = "emu_src_ck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_MUX_CTRL_MASK,
+ .clksel = emu_src_clksel,
+ .clkdm_name = "emu_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel_rate pclk_emu_rates[] = {
+ { .div = 2, .val = 2, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 3, .val = 3, .flags = RATE_IN_343X },
+ { .div = 4, .val = 4, .flags = RATE_IN_343X },
+ { .div = 6, .val = 6, .flags = RATE_IN_343X },
+ { .div = 0 },
+};
+
+static const struct clksel pclk_emu_clksel[] = {
+ { .parent = &emu_src_ck, .rates = pclk_emu_rates },
+ { .parent = NULL },
+};
+
+static struct clk pclk_fck = {
+ .name = "pclk_fck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_CLKSEL_PCLK_MASK,
+ .clksel = pclk_emu_clksel,
+ .clkdm_name = "emu_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel_rate pclkx2_emu_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_343X },
+ { .div = 3, .val = 3, .flags = RATE_IN_343X },
+ { .div = 0 },
+};
+
+static const struct clksel pclkx2_emu_clksel[] = {
+ { .parent = &emu_src_ck, .rates = pclkx2_emu_rates },
+ { .parent = NULL },
+};
+
+static struct clk pclkx2_fck = {
+ .name = "pclkx2_fck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_CLKSEL_PCLKX2_MASK,
+ .clksel = pclkx2_emu_clksel,
+ .clkdm_name = "emu_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel atclk_emu_clksel[] = {
+ { .parent = &emu_src_ck, .rates = div2_rates },
+ { .parent = NULL },
+};
+
+static struct clk atclk_fck = {
+ .name = "atclk_fck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_CLKSEL_ATCLK_MASK,
+ .clksel = atclk_emu_clksel,
+ .clkdm_name = "emu_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk traceclk_src_fck = {
+ .name = "traceclk_src_fck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_TRACE_MUX_CTRL_MASK,
+ .clksel = emu_src_clksel,
+ .clkdm_name = "emu_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel_rate traceclk_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_343X | DEFAULT_RATE },
+ { .div = 2, .val = 2, .flags = RATE_IN_343X },
+ { .div = 4, .val = 4, .flags = RATE_IN_343X },
+ { .div = 0 },
+};
+
+static const struct clksel traceclk_clksel[] = {
+ { .parent = &traceclk_src_fck, .rates = traceclk_rates },
+ { .parent = NULL },
+};
+
+static struct clk traceclk_fck = {
+ .name = "traceclk_fck",
+ .ops = &clkops_null,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
+ .clksel_mask = OMAP3430_CLKSEL_TRACECLK_MASK,
+ .clksel = traceclk_clksel,
+ .clkdm_name = "emu_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* SR clocks */
+
+/* SmartReflex fclk (VDD1) */
+static struct clk sr1_fck = {
+ .name = "sr1_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &sys_ck,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_SR1_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+/* SmartReflex fclk (VDD2) */
+static struct clk sr2_fck = {
+ .name = "sr2_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &sys_ck,
+ .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_SR2_SHIFT,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sr_l4_ick = {
+ .name = "sr_l4_ick",
+ .ops = &clkops_null, /* RMK: missing? */
+ .parent = &l4_ick,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+/* SECURE_32K_FCK clocks */
+
+static struct clk gpt12_fck = {
+ .name = "gpt12_fck",
+ .ops = &clkops_null,
+ .parent = &secure_32k_fck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wdt1_fck = {
+ .name = "wdt1_fck",
+ .ops = &clkops_null,
+ .parent = &secure_32k_fck,
+ .recalc = &followparent_recalc,
+};
+
+
+/*
+ * clkdev
+ */
+
+static struct omap_clk omap34xx_clks[] = {
+ CLK(NULL, "omap_32k_fck", &omap_32k_fck, CK_343X),
+ CLK(NULL, "virt_12m_ck", &virt_12m_ck, CK_343X),
+ CLK(NULL, "virt_13m_ck", &virt_13m_ck, CK_343X),
+ CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck, CK_3430ES2),
+ CLK(NULL, "virt_19_2m_ck", &virt_19_2m_ck, CK_343X),
+ CLK(NULL, "virt_26m_ck", &virt_26m_ck, CK_343X),
+ CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck, CK_343X),
+ CLK(NULL, "osc_sys_ck", &osc_sys_ck, CK_343X),
+ CLK(NULL, "sys_ck", &sys_ck, CK_343X),
+ CLK(NULL, "sys_altclk", &sys_altclk, CK_343X),
+ CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_343X),
+ CLK(NULL, "sys_clkout1", &sys_clkout1, CK_343X),
+ CLK(NULL, "dpll1_ck", &dpll1_ck, CK_343X),
+ CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck, CK_343X),
+ CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck, CK_343X),
+ CLK(NULL, "dpll2_ck", &dpll2_ck, CK_343X),
+ CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck, CK_343X),
+ CLK(NULL, "dpll3_ck", &dpll3_ck, CK_343X),
+ CLK(NULL, "core_ck", &core_ck, CK_343X),
+ CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck, CK_343X),
+ CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck, CK_343X),
+ CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck, CK_343X),
+ CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck, CK_343X),
+ CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck, CK_343X),
+ CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck, CK_343X),
+ CLK(NULL, "dpll4_ck", &dpll4_ck, CK_343X),
+ CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck, CK_343X),
+ CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck, CK_343X),
+ CLK(NULL, "omap_96m_fck", &omap_96m_fck, CK_343X),
+ CLK(NULL, "cm_96m_fck", &cm_96m_fck, CK_343X),
+ CLK(NULL, "omap_54m_fck", &omap_54m_fck, CK_343X),
+ CLK(NULL, "omap_48m_fck", &omap_48m_fck, CK_343X),
+ CLK(NULL, "omap_12m_fck", &omap_12m_fck, CK_343X),
+ CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck, CK_343X),
+ CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck, CK_343X),
+ CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck, CK_343X),
+ CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck, CK_343X),
+ CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck, CK_343X),
+ CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck, CK_343X),
+ CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck, CK_343X),
+ CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck, CK_343X),
+ CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck, CK_343X),
+ CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck, CK_343X),
+ CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck, CK_343X),
+ CLK(NULL, "dpll5_ck", &dpll5_ck, CK_3430ES2),
+ CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck, CK_3430ES2),
+ CLK(NULL, "clkout2_src_ck", &clkout2_src_ck, CK_343X),
+ CLK(NULL, "sys_clkout2", &sys_clkout2, CK_343X),
+ CLK(NULL, "corex2_fck", &corex2_fck, CK_343X),
+ CLK(NULL, "dpll1_fck", &dpll1_fck, CK_343X),
+ CLK(NULL, "mpu_ck", &mpu_ck, CK_343X),
+ CLK(NULL, "arm_fck", &arm_fck, CK_343X),
+ CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck, CK_343X),
+ CLK(NULL, "dpll2_fck", &dpll2_fck, CK_343X),
+ CLK(NULL, "iva2_ck", &iva2_ck, CK_343X),
+ CLK(NULL, "l3_ick", &l3_ick, CK_343X),
+ CLK(NULL, "l4_ick", &l4_ick, CK_343X),
+ CLK(NULL, "rm_ick", &rm_ick, CK_343X),
+ CLK(NULL, "gfx_l3_ck", &gfx_l3_ck, CK_3430ES1),
+ CLK(NULL, "gfx_l3_fck", &gfx_l3_fck, CK_3430ES1),
+ CLK(NULL, "gfx_l3_ick", &gfx_l3_ick, CK_3430ES1),
+ CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck, CK_3430ES1),
+ CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck, CK_3430ES1),
+ CLK(NULL, "sgx_fck", &sgx_fck, CK_3430ES2),
+ CLK(NULL, "sgx_ick", &sgx_ick, CK_3430ES2),
+ CLK(NULL, "d2d_26m_fck", &d2d_26m_fck, CK_3430ES1),
+ CLK(NULL, "modem_fck", &modem_fck, CK_343X),
+ CLK(NULL, "sad2d_ick", &sad2d_ick, CK_343X),
+ CLK(NULL, "mad2d_ick", &mad2d_ick, CK_343X),
+ CLK(NULL, "gpt10_fck", &gpt10_fck, CK_343X),
+ CLK(NULL, "gpt11_fck", &gpt11_fck, CK_343X),
+ CLK(NULL, "cpefuse_fck", &cpefuse_fck, CK_3430ES2),
+ CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2),
+ CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2),
+ CLK(NULL, "core_96m_fck", &core_96m_fck, CK_343X),
+ CLK("mmci-omap-hs.2", "fck", &mmchs3_fck, CK_3430ES2),
+ CLK("mmci-omap-hs.1", "fck", &mmchs2_fck, CK_343X),
+ CLK(NULL, "mspro_fck", &mspro_fck, CK_343X),
+ CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_343X),
+ CLK("i2c_omap.3", "fck", &i2c3_fck, CK_343X),
+ CLK("i2c_omap.2", "fck", &i2c2_fck, CK_343X),
+ CLK("i2c_omap.1", "fck", &i2c1_fck, CK_343X),
+ CLK("omap-mcbsp.5", "fck", &mcbsp5_fck, CK_343X),
+ CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_343X),
+ CLK(NULL, "core_48m_fck", &core_48m_fck, CK_343X),
+ CLK("omap2_mcspi.4", "fck", &mcspi4_fck, CK_343X),
+ CLK("omap2_mcspi.3", "fck", &mcspi3_fck, CK_343X),
+ CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_343X),
+ CLK("omap2_mcspi.1", "fck", &mcspi1_fck, CK_343X),
+ CLK(NULL, "uart2_fck", &uart2_fck, CK_343X),
+ CLK(NULL, "uart1_fck", &uart1_fck, CK_343X),
+ CLK(NULL, "fshostusb_fck", &fshostusb_fck, CK_3430ES1),
+ CLK(NULL, "core_12m_fck", &core_12m_fck, CK_343X),
+ CLK("omap_hdq.0", "fck", &hdq_fck, CK_343X),
+ CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2, CK_3430ES2),
+ CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2),
+ CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X),
+ CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es1, CK_3430ES1),
+ CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es2, CK_3430ES2),
+ CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X),
+ CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X),
+ CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X),
+ CLK(NULL, "pka_ick", &pka_ick, CK_343X),
+ CLK(NULL, "core_l4_ick", &core_l4_ick, CK_343X),
+ CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2),
+ CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2),
+ CLK(NULL, "icr_ick", &icr_ick, CK_343X),
+ CLK(NULL, "aes2_ick", &aes2_ick, CK_343X),
+ CLK(NULL, "sha12_ick", &sha12_ick, CK_343X),
+ CLK(NULL, "des2_ick", &des2_ick, CK_343X),
+ CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_343X),
+ CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_343X),
+ CLK(NULL, "mspro_ick", &mspro_ick, CK_343X),
+ CLK("omap_hdq.0", "ick", &hdq_ick, CK_343X),
+ CLK("omap2_mcspi.4", "ick", &mcspi4_ick, CK_343X),
+ CLK("omap2_mcspi.3", "ick", &mcspi3_ick, CK_343X),
+ CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_343X),
+ CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_343X),
+ CLK("i2c_omap.3", "ick", &i2c3_ick, CK_343X),
+ CLK("i2c_omap.2", "ick", &i2c2_ick, CK_343X),
+ CLK("i2c_omap.1", "ick", &i2c1_ick, CK_343X),
+ CLK(NULL, "uart2_ick", &uart2_ick, CK_343X),
+ CLK(NULL, "uart1_ick", &uart1_ick, CK_343X),
+ CLK(NULL, "gpt11_ick", &gpt11_ick, CK_343X),
+ CLK(NULL, "gpt10_ick", &gpt10_ick, CK_343X),
+ CLK("omap-mcbsp.5", "ick", &mcbsp5_ick, CK_343X),
+ CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_343X),
+ CLK(NULL, "fac_ick", &fac_ick, CK_3430ES1),
+ CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_343X),
+ CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_343X),
+ CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_343X),
+ CLK(NULL, "ssi_ick", &ssi_ick_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_ick", &ssi_ick_3430es2, CK_3430ES2),
+ CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_3430ES1),
+ CLK(NULL, "security_l4_ick2", &security_l4_ick2, CK_343X),
+ CLK(NULL, "aes1_ick", &aes1_ick, CK_343X),
+ CLK("omap_rng", "ick", &rng_ick, CK_343X),
+ CLK(NULL, "sha11_ick", &sha11_ick, CK_343X),
+ CLK(NULL, "des1_ick", &des1_ick, CK_343X),
+ CLK("omapdss", "dss1_fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
+ CLK("omapdss", "dss1_fck", &dss1_alwon_fck_3430es2, CK_3430ES2),
+ CLK("omapdss", "tv_fck", &dss_tv_fck, CK_343X),
+ CLK("omapdss", "video_fck", &dss_96m_fck, CK_343X),
+ CLK("omapdss", "dss2_fck", &dss2_alwon_fck, CK_343X),
+ CLK("omapdss", "ick", &dss_ick_3430es1, CK_3430ES1),
+ CLK("omapdss", "ick", &dss_ick_3430es2, CK_3430ES2),
+ CLK(NULL, "cam_mclk", &cam_mclk, CK_343X),
+ CLK(NULL, "cam_ick", &cam_ick, CK_343X),
+ CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),
+ CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck, CK_3430ES2),
+ CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck, CK_3430ES2),
+ CLK(NULL, "usbhost_ick", &usbhost_ick, CK_3430ES2),
+ CLK(NULL, "usim_fck", &usim_fck, CK_3430ES2),
+ CLK(NULL, "gpt1_fck", &gpt1_fck, CK_343X),
+ CLK(NULL, "wkup_32k_fck", &wkup_32k_fck, CK_343X),
+ CLK(NULL, "gpio1_dbck", &gpio1_dbck, CK_343X),
+ CLK("omap_wdt", "fck", &wdt2_fck, CK_343X),
+ CLK(NULL, "wkup_l4_ick", &wkup_l4_ick, CK_343X),
+ CLK(NULL, "usim_ick", &usim_ick, CK_3430ES2),
+ CLK("omap_wdt", "ick", &wdt2_ick, CK_343X),
+ CLK(NULL, "wdt1_ick", &wdt1_ick, CK_343X),
+ CLK(NULL, "gpio1_ick", &gpio1_ick, CK_343X),
+ CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick, CK_343X),
+ CLK(NULL, "gpt12_ick", &gpt12_ick, CK_343X),
+ CLK(NULL, "gpt1_ick", &gpt1_ick, CK_343X),
+ CLK(NULL, "per_96m_fck", &per_96m_fck, CK_343X),
+ CLK(NULL, "per_48m_fck", &per_48m_fck, CK_343X),
+ CLK(NULL, "uart3_fck", &uart3_fck, CK_343X),
+ CLK(NULL, "gpt2_fck", &gpt2_fck, CK_343X),
+ CLK(NULL, "gpt3_fck", &gpt3_fck, CK_343X),
+ CLK(NULL, "gpt4_fck", &gpt4_fck, CK_343X),
+ CLK(NULL, "gpt5_fck", &gpt5_fck, CK_343X),
+ CLK(NULL, "gpt6_fck", &gpt6_fck, CK_343X),
+ CLK(NULL, "gpt7_fck", &gpt7_fck, CK_343X),
+ CLK(NULL, "gpt8_fck", &gpt8_fck, CK_343X),
+ CLK(NULL, "gpt9_fck", &gpt9_fck, CK_343X),
+ CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck, CK_343X),
+ CLK(NULL, "gpio6_dbck", &gpio6_dbck, CK_343X),
+ CLK(NULL, "gpio5_dbck", &gpio5_dbck, CK_343X),
+ CLK(NULL, "gpio4_dbck", &gpio4_dbck, CK_343X),
+ CLK(NULL, "gpio3_dbck", &gpio3_dbck, CK_343X),
+ CLK(NULL, "gpio2_dbck", &gpio2_dbck, CK_343X),
+ CLK(NULL, "wdt3_fck", &wdt3_fck, CK_343X),
+ CLK(NULL, "per_l4_ick", &per_l4_ick, CK_343X),
+ CLK(NULL, "gpio6_ick", &gpio6_ick, CK_343X),
+ CLK(NULL, "gpio5_ick", &gpio5_ick, CK_343X),
+ CLK(NULL, "gpio4_ick", &gpio4_ick, CK_343X),
+ CLK(NULL, "gpio3_ick", &gpio3_ick, CK_343X),
+ CLK(NULL, "gpio2_ick", &gpio2_ick, CK_343X),
+ CLK(NULL, "wdt3_ick", &wdt3_ick, CK_343X),
+ CLK(NULL, "uart3_ick", &uart3_ick, CK_343X),
+ CLK(NULL, "gpt9_ick", &gpt9_ick, CK_343X),
+ CLK(NULL, "gpt8_ick", &gpt8_ick, CK_343X),
+ CLK(NULL, "gpt7_ick", &gpt7_ick, CK_343X),
+ CLK(NULL, "gpt6_ick", &gpt6_ick, CK_343X),
+ CLK(NULL, "gpt5_ick", &gpt5_ick, CK_343X),
+ CLK(NULL, "gpt4_ick", &gpt4_ick, CK_343X),
+ CLK(NULL, "gpt3_ick", &gpt3_ick, CK_343X),
+ CLK(NULL, "gpt2_ick", &gpt2_ick, CK_343X),
+ CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_343X),
+ CLK("omap-mcbsp.3", "ick", &mcbsp3_ick, CK_343X),
+ CLK("omap-mcbsp.4", "ick", &mcbsp4_ick, CK_343X),
+ CLK("omap-mcbsp.2", "fck", &mcbsp2_fck, CK_343X),
+ CLK("omap-mcbsp.3", "fck", &mcbsp3_fck, CK_343X),
+ CLK("omap-mcbsp.4", "fck", &mcbsp4_fck, CK_343X),
+ CLK("etb", "emu_src_ck", &emu_src_ck, CK_343X),
+ CLK(NULL, "pclk_fck", &pclk_fck, CK_343X),
+ CLK(NULL, "pclkx2_fck", &pclkx2_fck, CK_343X),
+ CLK(NULL, "atclk_fck", &atclk_fck, CK_343X),
+ CLK(NULL, "traceclk_src_fck", &traceclk_src_fck, CK_343X),
+ CLK(NULL, "traceclk_fck", &traceclk_fck, CK_343X),
+ CLK(NULL, "sr1_fck", &sr1_fck, CK_343X),
+ CLK(NULL, "sr2_fck", &sr2_fck, CK_343X),
+ CLK(NULL, "sr_l4_ick", &sr_l4_ick, CK_343X),
+ CLK(NULL, "secure_32k_fck", &secure_32k_fck, CK_343X),
+ CLK(NULL, "gpt12_fck", &gpt12_fck, CK_343X),
+ CLK(NULL, "wdt1_fck", &wdt1_fck, CK_343X),
+};
+
+
+int __init omap2_clk_init(void)
+{
+ /* struct prcm_config *prcm; */
+ struct omap_clk *c;
+ /* u32 clkrate; */
+ u32 cpu_clkflg;
+
+ if (cpu_is_omap34xx()) {
+ cpu_mask = RATE_IN_343X;
+ cpu_clkflg = CK_343X;
+
+ /*
+ * Update this if there are further clock changes between ES2
+ * and production parts
+ */
+ if (omap_rev() == OMAP3430_REV_ES1_0) {
+ /* No 3430ES1-only rates exist, so no RATE_IN_3430ES1 */
+ cpu_clkflg |= CK_3430ES1;
+ } else {
+ cpu_mask |= RATE_IN_3430ES2;
+ cpu_clkflg |= CK_3430ES2;
+ }
+ }
+
+ clk_init(&omap2_clk_functions);
+
+ for (c = omap34xx_clks; c < omap34xx_clks + ARRAY_SIZE(omap34xx_clks); c++)
+ clk_preinit(c->lk.clk);
+
+ for (c = omap34xx_clks; c < omap34xx_clks + ARRAY_SIZE(omap34xx_clks); c++)
+ if (c->cpu & cpu_clkflg) {
+ clkdev_add(&c->lk);
+ clk_register(c->lk.clk);
+ omap2_init_clk_clkdm(c->lk.clk);
+ }
+
+ /* REVISIT: Not yet ready for OMAP3 */
+#if 0
+ /* Check the MPU rate set by bootloader */
+ clkrate = omap2_get_dpll_rate_24xx(&dpll_ck);
+ for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+ if (!(prcm->flags & cpu_mask))
+ continue;
+ if (prcm->xtal_speed != sys_ck.rate)
+ continue;
+ if (prcm->dpll_speed <= clkrate)
+ break;
+ }
+ curr_prcm_set = prcm;
+#endif
+
+ recalculate_root_clocks();
+
+ printk(KERN_INFO "Clocking rate (Crystal/Core/MPU): "
+ "%ld.%01ld/%ld/%ld MHz\n",
+ (osc_sys_ck.rate / 1000000), (osc_sys_ck.rate / 100000) % 10,
+ (core_ck.rate / 1000000), (arm_fck.rate / 1000000));
+
+ /*
+ * Only enable those clocks we will need, let the drivers
+ * enable other clocks as necessary
+ */
+ clk_enable_init_clocks();
+
+ /*
+ * Lock DPLL5 and put it in autoidle.
+ */
+ if (omap_rev() >= OMAP3430_REV_ES2_0)
+ omap3_clk_lock_dpll5();
+
+ /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
+ sdrc_ick_p = clk_get(NULL, "sdrc_ick");
+ arm_fck_p = clk_get(NULL, "arm_fck");
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/clock44xx.c b/arch/arm/mach-omap2/clock44xx.c
new file mode 100644
index 00000000000..e370868a79a
--- /dev/null
+++ b/arch/arm/mach-omap2/clock44xx.c
@@ -0,0 +1,33 @@
+/*
+ * OMAP4-specific clock framework functions
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ *
+ * Rajendra Nayak (rnayak@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/errno.h>
+#include "clock.h"
+
+struct clk_functions omap2_clk_functions = {
+ .clk_enable = omap2_clk_enable,
+ .clk_disable = omap2_clk_disable,
+ .clk_round_rate = omap2_clk_round_rate,
+ .clk_set_rate = omap2_clk_set_rate,
+ .clk_set_parent = omap2_clk_set_parent,
+ .clk_disable_unused = omap2_clk_disable_unused,
+};
+
+const struct clkops clkops_noncore_dpll_ops = {
+ .enable = &omap3_noncore_dpll_enable,
+ .disable = &omap3_noncore_dpll_disable,
+};
+
+void omap2_clk_prepare_for_reboot(void)
+{
+ return;
+}
diff --git a/arch/arm/mach-omap2/clock44xx.h b/arch/arm/mach-omap2/clock44xx.h
new file mode 100644
index 00000000000..59b9ced4daa
--- /dev/null
+++ b/arch/arm/mach-omap2/clock44xx.h
@@ -0,0 +1,15 @@
+/*
+ * OMAP4 clock function prototypes and macros
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CLOCK_44XX_H
+
+#define OMAP4430_MAX_DPLL_MULT 2048
+#define OMAP4430_MAX_DPLL_DIV 128
+
+extern const struct clkops clkops_noncore_dpll_ops;
+
+#endif
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
new file mode 100644
index 00000000000..2210e227d78
--- /dev/null
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -0,0 +1,2766 @@
+/*
+ * OMAP4 Clock data
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+
+#include <plat/control.h>
+#include <plat/clkdev_omap.h>
+
+#include "clock.h"
+#include "clock44xx.h"
+#include "cm.h"
+#include "cm-regbits-44xx.h"
+#include "prm.h"
+#include "prm-regbits-44xx.h"
+
+/* Root clocks */
+
+static struct clk extalt_clkin_ck = {
+ .name = "extalt_clkin_ck",
+ .rate = 59000000,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk pad_clks_ck = {
+ .name = "pad_clks_ck",
+ .rate = 12000000,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk pad_slimbus_core_clks_ck = {
+ .name = "pad_slimbus_core_clks_ck",
+ .rate = 12000000,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk secure_32k_clk_src_ck = {
+ .name = "secure_32k_clk_src_ck",
+ .rate = 32768,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk slimbus_clk = {
+ .name = "slimbus_clk",
+ .rate = 12000000,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk sys_32k_ck = {
+ .name = "sys_32k_ck",
+ .rate = 32768,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk virt_12000000_ck = {
+ .name = "virt_12000000_ck",
+ .ops = &clkops_null,
+ .rate = 12000000,
+};
+
+static struct clk virt_13000000_ck = {
+ .name = "virt_13000000_ck",
+ .ops = &clkops_null,
+ .rate = 13000000,
+};
+
+static struct clk virt_16800000_ck = {
+ .name = "virt_16800000_ck",
+ .ops = &clkops_null,
+ .rate = 16800000,
+};
+
+static struct clk virt_19200000_ck = {
+ .name = "virt_19200000_ck",
+ .ops = &clkops_null,
+ .rate = 19200000,
+};
+
+static struct clk virt_26000000_ck = {
+ .name = "virt_26000000_ck",
+ .ops = &clkops_null,
+ .rate = 26000000,
+};
+
+static struct clk virt_27000000_ck = {
+ .name = "virt_27000000_ck",
+ .ops = &clkops_null,
+ .rate = 27000000,
+};
+
+static struct clk virt_38400000_ck = {
+ .name = "virt_38400000_ck",
+ .ops = &clkops_null,
+ .rate = 38400000,
+};
+
+static const struct clksel_rate div_1_0_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel_rate div_1_1_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel_rate div_1_2_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel_rate div_1_3_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel_rate div_1_4_rates[] = {
+ { .div = 1, .val = 4, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel_rate div_1_5_rates[] = {
+ { .div = 1, .val = 5, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel_rate div_1_6_rates[] = {
+ { .div = 1, .val = 6, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel_rate div_1_7_rates[] = {
+ { .div = 1, .val = 7, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel sys_clkin_sel[] = {
+ { .parent = &virt_12000000_ck, .rates = div_1_1_rates },
+ { .parent = &virt_13000000_ck, .rates = div_1_2_rates },
+ { .parent = &virt_16800000_ck, .rates = div_1_3_rates },
+ { .parent = &virt_19200000_ck, .rates = div_1_4_rates },
+ { .parent = &virt_26000000_ck, .rates = div_1_5_rates },
+ { .parent = &virt_27000000_ck, .rates = div_1_6_rates },
+ { .parent = &virt_38400000_ck, .rates = div_1_7_rates },
+ { .parent = NULL },
+};
+
+static struct clk sys_clkin_ck = {
+ .name = "sys_clkin_ck",
+ .rate = 38400000,
+ .clksel = sys_clkin_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_SYS_CLKSEL,
+ .clksel_mask = OMAP4430_SYS_CLKSEL_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk utmi_phy_clkout_ck = {
+ .name = "utmi_phy_clkout_ck",
+ .rate = 12000000,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk xclk60mhsp1_ck = {
+ .name = "xclk60mhsp1_ck",
+ .rate = 12000000,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk xclk60mhsp2_ck = {
+ .name = "xclk60mhsp2_ck",
+ .rate = 12000000,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+static struct clk xclk60motg_ck = {
+ .name = "xclk60motg_ck",
+ .rate = 60000000,
+ .ops = &clkops_null,
+ .flags = CLOCK_IN_OMAP4430 | ALWAYS_ENABLED,
+};
+
+/* Module clocks and DPLL outputs */
+
+static const struct clksel_rate div2_1to2_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 2, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel dpll_sys_ref_clk_div[] = {
+ { .parent = &sys_clkin_ck, .rates = div2_1to2_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_sys_ref_clk = {
+ .name = "dpll_sys_ref_clk",
+ .parent = &sys_clkin_ck,
+ .clksel = dpll_sys_ref_clk_div,
+ .clksel_reg = OMAP4430_CM_DPLL_SYS_REF_CLKSEL,
+ .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel abe_dpll_refclk_mux_sel[] = {
+ { .parent = &dpll_sys_ref_clk, .rates = div_1_0_rates },
+ { .parent = &sys_32k_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk abe_dpll_refclk_mux_ck = {
+ .name = "abe_dpll_refclk_mux_ck",
+ .parent = &dpll_sys_ref_clk,
+ .clksel = abe_dpll_refclk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_ABE_PLL_REF_CLKSEL,
+ .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/* DPLL_ABE */
+static struct dpll_data dpll_abe_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_ABE,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &abe_dpll_refclk_mux_ck,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_ABE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_ABE,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_ABE,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = OMAP4430_MAX_DPLL_MULT,
+ .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+
+static struct clk dpll_abe_ck = {
+ .name = "dpll_abe_ck",
+ .parent = &abe_dpll_refclk_mux_ck,
+ .dpll_data = &dpll_abe_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_noncore_dpll_ops,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_abe_m2x2_ck = {
+ .name = "dpll_abe_m2x2_ck",
+ .parent = &dpll_abe_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk abe_24m_fclk = {
+ .name = "abe_24m_fclk",
+ .parent = &dpll_abe_m2x2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel_rate div3_1to4_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 2, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 4, .val = 2, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel abe_clk_div[] = {
+ { .parent = &dpll_abe_m2x2_ck, .rates = div3_1to4_rates },
+ { .parent = NULL },
+};
+
+static struct clk abe_clk = {
+ .name = "abe_clk",
+ .parent = &dpll_abe_m2x2_ck,
+ .clksel = abe_clk_div,
+ .clksel_reg = OMAP4430_CM_CLKSEL_ABE,
+ .clksel_mask = OMAP4430_CLKSEL_OPP_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel aess_fclk_div[] = {
+ { .parent = &abe_clk, .rates = div2_1to2_rates },
+ { .parent = NULL },
+};
+
+static struct clk aess_fclk = {
+ .name = "aess_fclk",
+ .parent = &abe_clk,
+ .clksel = aess_fclk_div,
+ .clksel_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_AESS_FCLK_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel_rate div31_1to31_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 2, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 3, .val = 2, .flags = RATE_IN_4430 },
+ { .div = 4, .val = 3, .flags = RATE_IN_4430 },
+ { .div = 5, .val = 4, .flags = RATE_IN_4430 },
+ { .div = 6, .val = 5, .flags = RATE_IN_4430 },
+ { .div = 7, .val = 6, .flags = RATE_IN_4430 },
+ { .div = 8, .val = 7, .flags = RATE_IN_4430 },
+ { .div = 9, .val = 8, .flags = RATE_IN_4430 },
+ { .div = 10, .val = 9, .flags = RATE_IN_4430 },
+ { .div = 11, .val = 10, .flags = RATE_IN_4430 },
+ { .div = 12, .val = 11, .flags = RATE_IN_4430 },
+ { .div = 13, .val = 12, .flags = RATE_IN_4430 },
+ { .div = 14, .val = 13, .flags = RATE_IN_4430 },
+ { .div = 15, .val = 14, .flags = RATE_IN_4430 },
+ { .div = 16, .val = 15, .flags = RATE_IN_4430 },
+ { .div = 17, .val = 16, .flags = RATE_IN_4430 },
+ { .div = 18, .val = 17, .flags = RATE_IN_4430 },
+ { .div = 19, .val = 18, .flags = RATE_IN_4430 },
+ { .div = 20, .val = 19, .flags = RATE_IN_4430 },
+ { .div = 21, .val = 20, .flags = RATE_IN_4430 },
+ { .div = 22, .val = 21, .flags = RATE_IN_4430 },
+ { .div = 23, .val = 22, .flags = RATE_IN_4430 },
+ { .div = 24, .val = 23, .flags = RATE_IN_4430 },
+ { .div = 25, .val = 24, .flags = RATE_IN_4430 },
+ { .div = 26, .val = 25, .flags = RATE_IN_4430 },
+ { .div = 27, .val = 26, .flags = RATE_IN_4430 },
+ { .div = 28, .val = 27, .flags = RATE_IN_4430 },
+ { .div = 29, .val = 28, .flags = RATE_IN_4430 },
+ { .div = 30, .val = 29, .flags = RATE_IN_4430 },
+ { .div = 31, .val = 30, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel dpll_abe_m3_div[] = {
+ { .parent = &dpll_abe_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_abe_m3_ck = {
+ .name = "dpll_abe_m3_ck",
+ .parent = &dpll_abe_ck,
+ .clksel = dpll_abe_m3_div,
+ .clksel_reg = OMAP4430_CM_DIV_M3_DPLL_ABE,
+ .clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel core_hsd_byp_clk_mux_sel[] = {
+ { .parent = &dpll_sys_ref_clk, .rates = div_1_0_rates },
+ { .parent = &dpll_abe_m3_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk core_hsd_byp_clk_mux_ck = {
+ .name = "core_hsd_byp_clk_mux_ck",
+ .parent = &dpll_sys_ref_clk,
+ .clksel = core_hsd_byp_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_CLKSEL_DPLL_CORE,
+ .clksel_mask = OMAP4430_DPLL_BYP_CLKSEL_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/* DPLL_CORE */
+static struct dpll_data dpll_core_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_CORE,
+ .clk_bypass = &core_hsd_byp_clk_mux_ck,
+ .clk_ref = &dpll_sys_ref_clk,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_CORE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_CORE,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_CORE,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = OMAP4430_MAX_DPLL_MULT,
+ .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+
+static struct clk dpll_core_ck = {
+ .name = "dpll_core_ck",
+ .parent = &dpll_sys_ref_clk,
+ .dpll_data = &dpll_core_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_null,
+ .recalc = &omap3_dpll_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel dpll_core_m6_div[] = {
+ { .parent = &dpll_core_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_core_m6_ck = {
+ .name = "dpll_core_m6_ck",
+ .parent = &dpll_core_ck,
+ .clksel = dpll_core_m6_div,
+ .clksel_reg = OMAP4430_CM_DIV_M6_DPLL_CORE,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel dbgclk_mux_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_core_m6_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk dbgclk_mux_ck = {
+ .name = "dbgclk_mux_ck",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_core_m2_ck = {
+ .name = "dpll_core_m2_ck",
+ .parent = &dpll_core_ck,
+ .clksel = dpll_core_m6_div,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_CORE,
+ .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk ddrphy_ck = {
+ .name = "ddrphy_ck",
+ .parent = &dpll_core_m2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_core_m5_ck = {
+ .name = "dpll_core_m5_ck",
+ .parent = &dpll_core_ck,
+ .clksel = dpll_core_m6_div,
+ .clksel_reg = OMAP4430_CM_DIV_M5_DPLL_CORE,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel div_core_div[] = {
+ { .parent = &dpll_core_m5_ck, .rates = div2_1to2_rates },
+ { .parent = NULL },
+};
+
+static struct clk div_core_ck = {
+ .name = "div_core_ck",
+ .parent = &dpll_core_m5_ck,
+ .clksel = div_core_div,
+ .clksel_reg = OMAP4430_CM_CLKSEL_CORE,
+ .clksel_mask = OMAP4430_CLKSEL_CORE_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel_rate div4_1to8_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 2, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 4, .val = 2, .flags = RATE_IN_4430 },
+ { .div = 8, .val = 3, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel div_iva_hs_clk_div[] = {
+ { .parent = &dpll_core_m5_ck, .rates = div4_1to8_rates },
+ { .parent = NULL },
+};
+
+static struct clk div_iva_hs_clk = {
+ .name = "div_iva_hs_clk",
+ .parent = &dpll_core_m5_ck,
+ .clksel = div_iva_hs_clk_div,
+ .clksel_reg = OMAP4430_CM_BYPCLK_DPLL_IVA,
+ .clksel_mask = OMAP4430_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk div_mpu_hs_clk = {
+ .name = "div_mpu_hs_clk",
+ .parent = &dpll_core_m5_ck,
+ .clksel = div_iva_hs_clk_div,
+ .clksel_reg = OMAP4430_CM_BYPCLK_DPLL_MPU,
+ .clksel_mask = OMAP4430_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_core_m4_ck = {
+ .name = "dpll_core_m4_ck",
+ .parent = &dpll_core_ck,
+ .clksel = dpll_core_m6_div,
+ .clksel_reg = OMAP4430_CM_DIV_M4_DPLL_CORE,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dll_clk_div_ck = {
+ .name = "dll_clk_div_ck",
+ .parent = &dpll_core_m4_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_abe_m2_ck = {
+ .name = "dpll_abe_m2_ck",
+ .parent = &dpll_abe_ck,
+ .clksel = dpll_abe_m3_div,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_ABE,
+ .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_core_m3_ck = {
+ .name = "dpll_core_m3_ck",
+ .parent = &dpll_core_ck,
+ .clksel = dpll_core_m6_div,
+ .clksel_reg = OMAP4430_CM_DIV_M3_DPLL_CORE,
+ .clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_core_m7_ck = {
+ .name = "dpll_core_m7_ck",
+ .parent = &dpll_core_ck,
+ .clksel = dpll_core_m6_div,
+ .clksel_reg = OMAP4430_CM_DIV_M7_DPLL_CORE,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel iva_hsd_byp_clk_mux_sel[] = {
+ { .parent = &dpll_sys_ref_clk, .rates = div_1_0_rates },
+ { .parent = &div_iva_hs_clk, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk iva_hsd_byp_clk_mux_ck = {
+ .name = "iva_hsd_byp_clk_mux_ck",
+ .parent = &dpll_sys_ref_clk,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/* DPLL_IVA */
+static struct dpll_data dpll_iva_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_IVA,
+ .clk_bypass = &iva_hsd_byp_clk_mux_ck,
+ .clk_ref = &dpll_sys_ref_clk,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_IVA,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_IVA,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_IVA,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = OMAP4430_MAX_DPLL_MULT,
+ .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+
+static struct clk dpll_iva_ck = {
+ .name = "dpll_iva_ck",
+ .parent = &dpll_sys_ref_clk,
+ .dpll_data = &dpll_iva_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_noncore_dpll_ops,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel dpll_iva_m4_div[] = {
+ { .parent = &dpll_iva_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_iva_m4_ck = {
+ .name = "dpll_iva_m4_ck",
+ .parent = &dpll_iva_ck,
+ .clksel = dpll_iva_m4_div,
+ .clksel_reg = OMAP4430_CM_DIV_M4_DPLL_IVA,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_iva_m5_ck = {
+ .name = "dpll_iva_m5_ck",
+ .parent = &dpll_iva_ck,
+ .clksel = dpll_iva_m4_div,
+ .clksel_reg = OMAP4430_CM_DIV_M5_DPLL_IVA,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/* DPLL_MPU */
+static struct dpll_data dpll_mpu_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_MPU,
+ .clk_bypass = &div_mpu_hs_clk,
+ .clk_ref = &dpll_sys_ref_clk,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_MPU,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_MPU,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_MPU,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = OMAP4430_MAX_DPLL_MULT,
+ .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+
+static struct clk dpll_mpu_ck = {
+ .name = "dpll_mpu_ck",
+ .parent = &dpll_sys_ref_clk,
+ .dpll_data = &dpll_mpu_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_noncore_dpll_ops,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel dpll_mpu_m2_div[] = {
+ { .parent = &dpll_mpu_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_mpu_m2_ck = {
+ .name = "dpll_mpu_m2_ck",
+ .parent = &dpll_mpu_ck,
+ .clksel = dpll_mpu_m2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_MPU,
+ .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk per_hs_clk_div_ck = {
+ .name = "per_hs_clk_div_ck",
+ .parent = &dpll_abe_m3_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel per_hsd_byp_clk_mux_sel[] = {
+ { .parent = &dpll_sys_ref_clk, .rates = div_1_0_rates },
+ { .parent = &per_hs_clk_div_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk per_hsd_byp_clk_mux_ck = {
+ .name = "per_hsd_byp_clk_mux_ck",
+ .parent = &dpll_sys_ref_clk,
+ .clksel = per_hsd_byp_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_CLKSEL_DPLL_PER,
+ .clksel_mask = OMAP4430_DPLL_BYP_CLKSEL_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/* DPLL_PER */
+static struct dpll_data dpll_per_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_PER,
+ .clk_bypass = &per_hsd_byp_clk_mux_ck,
+ .clk_ref = &dpll_sys_ref_clk,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_PER,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_PER,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_PER,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = OMAP4430_MAX_DPLL_MULT,
+ .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+
+static struct clk dpll_per_ck = {
+ .name = "dpll_per_ck",
+ .parent = &dpll_sys_ref_clk,
+ .dpll_data = &dpll_per_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_noncore_dpll_ops,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel dpll_per_m2_div[] = {
+ { .parent = &dpll_per_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_per_m2_ck = {
+ .name = "dpll_per_m2_ck",
+ .parent = &dpll_per_ck,
+ .clksel = dpll_per_m2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_PER,
+ .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_per_m2x2_ck = {
+ .name = "dpll_per_m2x2_ck",
+ .parent = &dpll_per_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_per_m3_ck = {
+ .name = "dpll_per_m3_ck",
+ .parent = &dpll_per_ck,
+ .clksel = dpll_per_m2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M3_DPLL_PER,
+ .clksel_mask = OMAP4430_DPLL_CLKOUTHIF_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_per_m4_ck = {
+ .name = "dpll_per_m4_ck",
+ .parent = &dpll_per_ck,
+ .clksel = dpll_per_m2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M4_DPLL_PER,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_per_m5_ck = {
+ .name = "dpll_per_m5_ck",
+ .parent = &dpll_per_ck,
+ .clksel = dpll_per_m2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M5_DPLL_PER,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_per_m6_ck = {
+ .name = "dpll_per_m6_ck",
+ .parent = &dpll_per_ck,
+ .clksel = dpll_per_m2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M6_DPLL_PER,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_per_m7_ck = {
+ .name = "dpll_per_m7_ck",
+ .parent = &dpll_per_ck,
+ .clksel = dpll_per_m2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M7_DPLL_PER,
+ .clksel_mask = OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/* DPLL_UNIPRO */
+static struct dpll_data dpll_unipro_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_UNIPRO,
+ .clk_bypass = &dpll_sys_ref_clk,
+ .clk_ref = &dpll_sys_ref_clk,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_UNIPRO,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_UNIPRO,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = OMAP4430_MAX_DPLL_MULT,
+ .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+
+static struct clk dpll_unipro_ck = {
+ .name = "dpll_unipro_ck",
+ .parent = &dpll_sys_ref_clk,
+ .dpll_data = &dpll_unipro_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_noncore_dpll_ops,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel dpll_unipro_m2x2_div[] = {
+ { .parent = &dpll_unipro_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_unipro_m2x2_ck = {
+ .name = "dpll_unipro_m2x2_ck",
+ .parent = &dpll_unipro_ck,
+ .clksel = dpll_unipro_m2x2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_UNIPRO,
+ .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk usb_hs_clk_div_ck = {
+ .name = "usb_hs_clk_div_ck",
+ .parent = &dpll_abe_m3_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/* DPLL_USB */
+static struct dpll_data dpll_usb_dd = {
+ .mult_div1_reg = OMAP4430_CM_CLKSEL_DPLL_USB,
+ .clk_bypass = &usb_hs_clk_div_ck,
+ .clk_ref = &dpll_sys_ref_clk,
+ .control_reg = OMAP4430_CM_CLKMODE_DPLL_USB,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .autoidle_reg = OMAP4430_CM_AUTOIDLE_DPLL_USB,
+ .idlest_reg = OMAP4430_CM_IDLEST_DPLL_USB,
+ .mult_mask = OMAP4430_DPLL_MULT_MASK,
+ .div1_mask = OMAP4430_DPLL_DIV_MASK,
+ .enable_mask = OMAP4430_DPLL_EN_MASK,
+ .autoidle_mask = OMAP4430_AUTO_DPLL_MODE_MASK,
+ .idlest_mask = OMAP4430_ST_DPLL_CLK_MASK,
+ .max_multiplier = OMAP4430_MAX_DPLL_MULT,
+ .max_divider = OMAP4430_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+
+static struct clk dpll_usb_ck = {
+ .name = "dpll_usb_ck",
+ .parent = &dpll_sys_ref_clk,
+ .dpll_data = &dpll_usb_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_noncore_dpll_ops,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk dpll_usb_clkdcoldo_ck = {
+ .name = "dpll_usb_clkdcoldo_ck",
+ .parent = &dpll_usb_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel dpll_usb_m2_div[] = {
+ { .parent = &dpll_usb_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_usb_m2_ck = {
+ .name = "dpll_usb_m2_ck",
+ .parent = &dpll_usb_ck,
+ .clksel = dpll_usb_m2_div,
+ .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_USB,
+ .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel ducati_clk_mux_sel[] = {
+ { .parent = &div_core_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_per_m6_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk ducati_clk_mux_ck = {
+ .name = "ducati_clk_mux_ck",
+ .parent = &div_core_ck,
+ .clksel = ducati_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT,
+ .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk func_12m_fclk = {
+ .name = "func_12m_fclk",
+ .parent = &dpll_per_m2x2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk func_24m_clk = {
+ .name = "func_24m_clk",
+ .parent = &dpll_per_m2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk func_24mc_fclk = {
+ .name = "func_24mc_fclk",
+ .parent = &dpll_per_m2x2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel_rate div2_4to8_rates[] = {
+ { .div = 4, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 8, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel func_48m_fclk_div[] = {
+ { .parent = &dpll_per_m2x2_ck, .rates = div2_4to8_rates },
+ { .parent = NULL },
+};
+
+static struct clk func_48m_fclk = {
+ .name = "func_48m_fclk",
+ .parent = &dpll_per_m2x2_ck,
+ .clksel = func_48m_fclk_div,
+ .clksel_reg = OMAP4430_CM_SCALE_FCLK,
+ .clksel_mask = OMAP4430_SCALE_FCLK_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk func_48mc_fclk = {
+ .name = "func_48mc_fclk",
+ .parent = &dpll_per_m2x2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel_rate div2_2to4_rates[] = {
+ { .div = 2, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 4, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel func_64m_fclk_div[] = {
+ { .parent = &dpll_per_m4_ck, .rates = div2_2to4_rates },
+ { .parent = NULL },
+};
+
+static struct clk func_64m_fclk = {
+ .name = "func_64m_fclk",
+ .parent = &dpll_per_m4_ck,
+ .clksel = func_64m_fclk_div,
+ .clksel_reg = OMAP4430_CM_SCALE_FCLK,
+ .clksel_mask = OMAP4430_SCALE_FCLK_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel func_96m_fclk_div[] = {
+ { .parent = &dpll_per_m2x2_ck, .rates = div2_2to4_rates },
+ { .parent = NULL },
+};
+
+static struct clk func_96m_fclk = {
+ .name = "func_96m_fclk",
+ .parent = &dpll_per_m2x2_ck,
+ .clksel = func_96m_fclk_div,
+ .clksel_reg = OMAP4430_CM_SCALE_FCLK,
+ .clksel_mask = OMAP4430_SCALE_FCLK_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel hsmmc6_fclk_sel[] = {
+ { .parent = &func_64m_fclk, .rates = div_1_0_rates },
+ { .parent = &func_96m_fclk, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk hsmmc6_fclk = {
+ .name = "hsmmc6_fclk",
+ .parent = &func_64m_fclk,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel_rate div2_1to8_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 8, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel init_60m_fclk_div[] = {
+ { .parent = &dpll_usb_m2_ck, .rates = div2_1to8_rates },
+ { .parent = NULL },
+};
+
+static struct clk init_60m_fclk = {
+ .name = "init_60m_fclk",
+ .parent = &dpll_usb_m2_ck,
+ .clksel = init_60m_fclk_div,
+ .clksel_reg = OMAP4430_CM_CLKSEL_USB_60MHZ,
+ .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel l3_div_div[] = {
+ { .parent = &div_core_ck, .rates = div2_1to2_rates },
+ { .parent = NULL },
+};
+
+static struct clk l3_div_ck = {
+ .name = "l3_div_ck",
+ .parent = &div_core_ck,
+ .clksel = l3_div_div,
+ .clksel_reg = OMAP4430_CM_CLKSEL_CORE,
+ .clksel_mask = OMAP4430_CLKSEL_L3_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel l4_div_div[] = {
+ { .parent = &l3_div_ck, .rates = div2_1to2_rates },
+ { .parent = NULL },
+};
+
+static struct clk l4_div_ck = {
+ .name = "l4_div_ck",
+ .parent = &l3_div_ck,
+ .clksel = l4_div_div,
+ .clksel_reg = OMAP4430_CM_CLKSEL_CORE,
+ .clksel_mask = OMAP4430_CLKSEL_L4_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk lp_clk_div_ck = {
+ .name = "lp_clk_div_ck",
+ .parent = &dpll_abe_m2x2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel l4_wkup_clk_mux_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &lp_clk_div_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk l4_wkup_clk_mux_ck = {
+ .name = "l4_wkup_clk_mux_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = l4_wkup_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4_WKUP_CLKSEL,
+ .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel per_abe_nc_fclk_div[] = {
+ { .parent = &dpll_abe_m2_ck, .rates = div2_1to2_rates },
+ { .parent = NULL },
+};
+
+static struct clk per_abe_nc_fclk = {
+ .name = "per_abe_nc_fclk",
+ .parent = &dpll_abe_m2_ck,
+ .clksel = per_abe_nc_fclk_div,
+ .clksel_reg = OMAP4430_CM_SCALE_FCLK,
+ .clksel_mask = OMAP4430_SCALE_FCLK_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel mcasp2_fclk_sel[] = {
+ { .parent = &func_96m_fclk, .rates = div_1_0_rates },
+ { .parent = &per_abe_nc_fclk, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk mcasp2_fclk = {
+ .name = "mcasp2_fclk",
+ .parent = &func_96m_fclk,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk mcasp3_fclk = {
+ .name = "mcasp3_fclk",
+ .parent = &func_96m_fclk,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk ocp_abe_iclk = {
+ .name = "ocp_abe_iclk",
+ .parent = &aess_fclk,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk per_abe_24m_fclk = {
+ .name = "per_abe_24m_fclk",
+ .parent = &dpll_abe_m2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel pmd_stm_clock_mux_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_core_m6_ck, .rates = div_1_1_rates },
+ { .parent = &dpll_per_m7_ck, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static struct clk pmd_stm_clock_mux_ck = {
+ .name = "pmd_stm_clock_mux_ck",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk pmd_trace_clk_mux_ck = {
+ .name = "pmd_trace_clk_mux_ck",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static struct clk syc_clk_div_ck = {
+ .name = "syc_clk_div_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = dpll_sys_ref_clk_div,
+ .clksel_reg = OMAP4430_CM_ABE_DSS_SYS_CLKSEL,
+ .clksel_mask = OMAP4430_CLKSEL_0_0_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/* Leaf clocks controlled by modules */
+
+static struct clk aes1_ck = {
+ .name = "aes1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4SEC_AES1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_secure_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk aes2_ck = {
+ .name = "aes2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4SEC_AES2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_secure_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk aess_ck = {
+ .name = "aess_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_AESS_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+ .parent = &aess_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk cust_efuse_ck = {
+ .name = "cust_efuse_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_cefuse_clkdm",
+ .parent = &sys_clkin_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk des3des_ck = {
+ .name = "des3des_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4SEC_DES3DES_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_secure_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel dmic_sync_mux_sel[] = {
+ { .parent = &abe_24m_fclk, .rates = div_1_0_rates },
+ { .parent = &syc_clk_div_ck, .rates = div_1_1_rates },
+ { .parent = &func_24m_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static struct clk dmic_sync_mux_ck = {
+ .name = "dmic_sync_mux_ck",
+ .parent = &abe_24m_fclk,
+ .clksel = dmic_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel func_dmic_abe_gfclk_sel[] = {
+ { .parent = &dmic_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+/* Merged func_dmic_abe_gfclk into dmic_ck */
+static struct clk dmic_ck = {
+ .name = "dmic_ck",
+ .parent = &dmic_sync_mux_ck,
+ .clksel = func_dmic_abe_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_DMIC_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+static struct clk dss_ck = {
+ .name = "dss_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_DSS_DSS_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l3_dss_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk ducati_ck = {
+ .name = "ducati_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_DUCATI_DUCATI_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "ducati_clkdm",
+ .parent = &ducati_clk_mux_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk emif1_ck = {
+ .name = "emif1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_emif_clkdm",
+ .parent = &ddrphy_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk emif2_ck = {
+ .name = "emif2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_emif_clkdm",
+ .parent = &ddrphy_ck,
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel fdif_fclk_div[] = {
+ { .parent = &dpll_per_m4_ck, .rates = div3_1to4_rates },
+ { .parent = NULL },
+};
+
+/* Merged fdif_fclk into fdif_ck */
+static struct clk fdif_ck = {
+ .name = "fdif_ck",
+ .parent = &dpll_per_m4_ck,
+ .clksel = fdif_fclk_div,
+ .clksel_reg = OMAP4430_CM_CAM_FDIF_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_FCLK_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_CAM_FDIF_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "iss_clkdm",
+};
+
+static const struct clksel per_sgx_fclk_div[] = {
+ { .parent = &dpll_per_m2x2_ck, .rates = div3_1to4_rates },
+ { .parent = NULL },
+};
+
+static struct clk per_sgx_fclk = {
+ .name = "per_sgx_fclk",
+ .parent = &dpll_per_m2x2_ck,
+ .clksel = per_sgx_fclk_div,
+ .clksel_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_PER_192M_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel sgx_clk_mux_sel[] = {
+ { .parent = &dpll_core_m7_ck, .rates = div_1_0_rates },
+ { .parent = &per_sgx_fclk, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+/* Merged sgx_clk_mux into gfx_ck */
+static struct clk gfx_ck = {
+ .name = "gfx_ck",
+ .parent = &dpll_core_m7_ck,
+ .clksel = sgx_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_SGX_FCLK_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_GFX_GFX_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l3_gfx_clkdm",
+};
+
+static struct clk gpio1_ck = {
+ .name = "gpio1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_WKUP_GPIO1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &l4_wkup_clk_mux_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio2_ck = {
+ .name = "gpio2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio3_ck = {
+ .name = "gpio3_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio4_ck = {
+ .name = "gpio4_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO4_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio5_ck = {
+ .name = "gpio5_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO5_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio6_ck = {
+ .name = "gpio6_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_GPIO6_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpmc_ck = {
+ .name = "gpmc_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3_2_GPMC_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_2_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel dmt1_clk_mux_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &sys_32k_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+/* Merged dmt1_clk_mux into gptimer1_ck */
+static struct clk gptimer1_ck = {
+ .name = "gptimer1_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = dmt1_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_WKUP_TIMER1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_wkup_clkdm",
+};
+
+/* Merged cm2_dm10_mux into gptimer10_ck */
+static struct clk gptimer10_ck = {
+ .name = "gptimer10_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = dmt1_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+/* Merged cm2_dm11_mux into gptimer11_ck */
+static struct clk gptimer11_ck = {
+ .name = "gptimer11_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = dmt1_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+/* Merged cm2_dm2_mux into gptimer2_ck */
+static struct clk gptimer2_ck = {
+ .name = "gptimer2_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = dmt1_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+/* Merged cm2_dm3_mux into gptimer3_ck */
+static struct clk gptimer3_ck = {
+ .name = "gptimer3_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = dmt1_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+/* Merged cm2_dm4_mux into gptimer4_ck */
+static struct clk gptimer4_ck = {
+ .name = "gptimer4_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = dmt1_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+static const struct clksel timer5_sync_mux_sel[] = {
+ { .parent = &syc_clk_div_ck, .rates = div_1_0_rates },
+ { .parent = &sys_32k_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+/* Merged timer5_sync_mux into gptimer5_ck */
+static struct clk gptimer5_ck = {
+ .name = "gptimer5_ck",
+ .parent = &syc_clk_div_ck,
+ .clksel = timer5_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_TIMER5_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+/* Merged timer6_sync_mux into gptimer6_ck */
+static struct clk gptimer6_ck = {
+ .name = "gptimer6_ck",
+ .parent = &syc_clk_div_ck,
+ .clksel = timer5_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_TIMER6_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+/* Merged timer7_sync_mux into gptimer7_ck */
+static struct clk gptimer7_ck = {
+ .name = "gptimer7_ck",
+ .parent = &syc_clk_div_ck,
+ .clksel = timer5_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_TIMER7_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+/* Merged timer8_sync_mux into gptimer8_ck */
+static struct clk gptimer8_ck = {
+ .name = "gptimer8_ck",
+ .parent = &syc_clk_div_ck,
+ .clksel = timer5_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_TIMER8_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+/* Merged cm2_dm9_mux into gptimer9_ck */
+static struct clk gptimer9_ck = {
+ .name = "gptimer9_ck",
+ .parent = &sys_clkin_ck,
+ .clksel = dmt1_clk_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+static struct clk hdq1w_ck = {
+ .name = "hdq1w_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_HDQ1W_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_12m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+/* Merged hsi_fclk into hsi_ck */
+static struct clk hsi_ck = {
+ .name = "hsi_ck",
+ .parent = &dpll_per_m2x2_ck,
+ .clksel = per_sgx_fclk_div,
+ .clksel_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_24_25_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L3INIT_HSI_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+};
+
+static struct clk i2c1_ck = {
+ .name = "i2c1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_I2C1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_96m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c2_ck = {
+ .name = "i2c2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_I2C2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_96m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c3_ck = {
+ .name = "i2c3_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_I2C3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_96m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk i2c4_ck = {
+ .name = "i2c4_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_I2C4_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_96m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk iss_ck = {
+ .name = "iss_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_CAM_ISS_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "iss_clkdm",
+ .parent = &ducati_clk_mux_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk ivahd_ck = {
+ .name = "ivahd_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_IVAHD_IVAHD_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "ivahd_clkdm",
+ .parent = &dpll_iva_m5_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk keyboard_ck = {
+ .name = "keyboard_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk l3_instr_interconnect_ck = {
+ .name = "l3_instr_interconnect_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_instr_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk l3_interconnect_3_ck = {
+ .name = "l3_interconnect_3_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INSTR_L3_3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_instr_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcasp_sync_mux_ck = {
+ .name = "mcasp_sync_mux_ck",
+ .parent = &abe_24m_fclk,
+ .clksel = dmic_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel func_mcasp_abe_gfclk_sel[] = {
+ { .parent = &mcasp_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+/* Merged func_mcasp_abe_gfclk into mcasp_ck */
+static struct clk mcasp_ck = {
+ .name = "mcasp_ck",
+ .parent = &mcasp_sync_mux_ck,
+ .clksel = func_mcasp_abe_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_MCASP_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+static struct clk mcbsp1_sync_mux_ck = {
+ .name = "mcbsp1_sync_mux_ck",
+ .parent = &abe_24m_fclk,
+ .clksel = dmic_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel func_mcbsp1_gfclk_sel[] = {
+ { .parent = &mcbsp1_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+/* Merged func_mcbsp1_gfclk into mcbsp1_ck */
+static struct clk mcbsp1_ck = {
+ .name = "mcbsp1_ck",
+ .parent = &mcbsp1_sync_mux_ck,
+ .clksel = func_mcbsp1_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_MCBSP1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+static struct clk mcbsp2_sync_mux_ck = {
+ .name = "mcbsp2_sync_mux_ck",
+ .parent = &abe_24m_fclk,
+ .clksel = dmic_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel func_mcbsp2_gfclk_sel[] = {
+ { .parent = &mcbsp2_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+/* Merged func_mcbsp2_gfclk into mcbsp2_ck */
+static struct clk mcbsp2_ck = {
+ .name = "mcbsp2_ck",
+ .parent = &mcbsp2_sync_mux_ck,
+ .clksel = func_mcbsp2_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_MCBSP2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+static struct clk mcbsp3_sync_mux_ck = {
+ .name = "mcbsp3_sync_mux_ck",
+ .parent = &abe_24m_fclk,
+ .clksel = dmic_sync_mux_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel func_mcbsp3_gfclk_sel[] = {
+ { .parent = &mcbsp3_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = &slimbus_clk, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+/* Merged func_mcbsp3_gfclk into mcbsp3_ck */
+static struct clk mcbsp3_ck = {
+ .name = "mcbsp3_ck",
+ .parent = &mcbsp3_sync_mux_ck,
+ .clksel = func_mcbsp3_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_SOURCE_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM1_ABE_MCBSP3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+};
+
+static struct clk mcbsp4_sync_mux_ck = {
+ .name = "mcbsp4_sync_mux_ck",
+ .parent = &func_96m_fclk,
+ .clksel = mcasp2_fclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel per_mcbsp4_gfclk_sel[] = {
+ { .parent = &mcbsp4_sync_mux_ck, .rates = div_1_0_rates },
+ { .parent = &pad_clks_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+/* Merged per_mcbsp4_gfclk into mcbsp4_ck */
+static struct clk mcbsp4_ck = {
+ .name = "mcbsp4_ck",
+ .parent = &mcbsp4_sync_mux_ck,
+ .clksel = per_mcbsp4_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_SOURCE_24_24_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L4PER_MCBSP4_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+};
+
+static struct clk mcspi1_ck = {
+ .name = "mcspi1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_MCSPI1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi2_ck = {
+ .name = "mcspi2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_MCSPI2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi3_ck = {
+ .name = "mcspi3_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_MCSPI3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcspi4_ck = {
+ .name = "mcspi4_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_MCSPI4_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+/* Merged hsmmc1_fclk into mmc1_ck */
+static struct clk mmc1_ck = {
+ .name = "mmc1_ck",
+ .parent = &func_64m_fclk,
+ .clksel = hsmmc6_fclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L3INIT_MMC1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+};
+
+/* Merged hsmmc2_fclk into mmc2_ck */
+static struct clk mmc2_ck = {
+ .name = "mmc2_ck",
+ .parent = &func_64m_fclk,
+ .clksel = hsmmc6_fclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_MASK,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+ .enable_reg = OMAP4430_CM_L3INIT_MMC2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+};
+
+static struct clk mmc3_ck = {
+ .name = "mmc3_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_MMCSD3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmc4_ck = {
+ .name = "mmc4_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_MMCSD4_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmc5_ck = {
+ .name = "mmc5_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_MMCSD5_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk ocp_wp1_ck = {
+ .name = "ocp_wp1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_instr_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk pdm_ck = {
+ .name = "pdm_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_PDM_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+ .parent = &pad_clks_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk pkaeip29_ck = {
+ .name = "pkaeip29_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_secure_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk rng_ck = {
+ .name = "rng_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4SEC_RNG_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l4_secure_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sha2md51_ck = {
+ .name = "sha2md51_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_secure_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sl2_ck = {
+ .name = "sl2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_IVAHD_SL2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "ivahd_clkdm",
+ .parent = &dpll_iva_m5_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk slimbus1_ck = {
+ .name = "slimbus1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+ .parent = &ocp_abe_iclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk slimbus2_ck = {
+ .name = "slimbus2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sr_core_ck = {
+ .name = "sr_core_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_ALWON_SR_CORE_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_ao_clkdm",
+ .parent = &l4_wkup_clk_mux_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sr_iva_ck = {
+ .name = "sr_iva_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_ALWON_SR_IVA_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_ao_clkdm",
+ .parent = &l4_wkup_clk_mux_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sr_mpu_ck = {
+ .name = "sr_mpu_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_ALWON_SR_MPU_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_ao_clkdm",
+ .parent = &l4_wkup_clk_mux_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk tesla_ck = {
+ .name = "tesla_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_TESLA_TESLA_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "tesla_clkdm",
+ .parent = &dpll_iva_m4_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart1_ck = {
+ .name = "uart1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_UART1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart2_ck = {
+ .name = "uart2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_UART2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart3_ck = {
+ .name = "uart3_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_UART3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk uart4_ck = {
+ .name = "uart4_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L4PER_UART4_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_per_clkdm",
+ .parent = &func_48m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk unipro1_ck = {
+ .name = "unipro1_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_UNIPRO1_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &func_96m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usb_host_ck = {
+ .name = "usb_host_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &init_60m_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usb_host_fs_ck = {
+ .name = "usb_host_fs_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &func_48mc_fclk,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usb_otg_ck = {
+ .name = "usb_otg_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &l3_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usb_tll_ck = {
+ .name = "usb_tll_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usbphyocp2scp_ck = {
+ .name = "usbphyocp2scp_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_HWCTRL,
+ .clkdm_name = "l3_init_clkdm",
+ .parent = &l4_div_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk usim_ck = {
+ .name = "usim_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wdt2_ck = {
+ .name = "wdt2_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM_WKUP_WDT2_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk wdt3_ck = {
+ .name = "wdt3_ck",
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = OMAP4430_CM1_ABE_WDT3_CLKCTRL,
+ .enable_bit = OMAP4430_MODULEMODE_SWCTRL,
+ .clkdm_name = "abe_clkdm",
+ .parent = &sys_32k_ck,
+ .recalc = &followparent_recalc,
+};
+
+/* Remaining optional clocks */
+static const struct clksel otg_60m_gfclk_sel[] = {
+ { .parent = &utmi_phy_clkout_ck, .rates = div_1_0_rates },
+ { .parent = &xclk60motg_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk otg_60m_gfclk_ck = {
+ .name = "otg_60m_gfclk_ck",
+ .parent = &utmi_phy_clkout_ck,
+ .clksel = otg_60m_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_60M_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel stm_clk_div_div[] = {
+ { .parent = &pmd_stm_clock_mux_ck, .rates = div3_1to4_rates },
+ { .parent = NULL },
+};
+
+static struct clk stm_clk_div_ck = {
+ .name = "stm_clk_div_ck",
+ .parent = &pmd_stm_clock_mux_ck,
+ .clksel = stm_clk_div_div,
+ .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_PMD_STM_CLK_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel trace_clk_div_div[] = {
+ { .parent = &pmd_trace_clk_mux_ck, .rates = div3_1to4_rates },
+ { .parent = NULL },
+};
+
+static struct clk trace_clk_div_ck = {
+ .name = "trace_clk_div_ck",
+ .parent = &pmd_trace_clk_mux_ck,
+ .clksel = trace_clk_div_div,
+ .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel_rate div2_14to18_rates[] = {
+ { .div = 14, .val = 0, .flags = RATE_IN_4430 },
+ { .div = 18, .val = 1, .flags = RATE_IN_4430 },
+ { .div = 0 },
+};
+
+static const struct clksel usim_fclk_div[] = {
+ { .parent = &dpll_per_m4_ck, .rates = div2_14to18_rates },
+ { .parent = NULL },
+};
+
+static struct clk usim_fclk = {
+ .name = "usim_fclk",
+ .parent = &dpll_per_m4_ck,
+ .clksel = usim_fclk_div,
+ .clksel_reg = OMAP4430_CM_WKUP_USIM_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel utmi_p1_gfclk_sel[] = {
+ { .parent = &init_60m_fclk, .rates = div_1_0_rates },
+ { .parent = &xclk60mhsp1_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk utmi_p1_gfclk_ck = {
+ .name = "utmi_p1_gfclk_ck",
+ .parent = &init_60m_fclk,
+ .clksel = utmi_p1_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_UTMI_P1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+static const struct clksel utmi_p2_gfclk_sel[] = {
+ { .parent = &init_60m_fclk, .rates = div_1_0_rates },
+ { .parent = &xclk60mhsp2_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk utmi_p2_gfclk_ck = {
+ .name = "utmi_p2_gfclk_ck",
+ .parent = &init_60m_fclk,
+ .clksel = utmi_p2_gfclk_sel,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL,
+ .clksel_mask = OMAP4430_CLKSEL_UTMI_P2_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .flags = CLOCK_IN_OMAP4430,
+};
+
+/*
+ * clkdev
+ */
+
+static struct omap_clk omap44xx_clks[] = {
+ CLK(NULL, "extalt_clkin_ck", &extalt_clkin_ck, CK_443X),
+ CLK(NULL, "pad_clks_ck", &pad_clks_ck, CK_443X),
+ CLK(NULL, "pad_slimbus_core_clks_ck", &pad_slimbus_core_clks_ck, CK_443X),
+ CLK(NULL, "secure_32k_clk_src_ck", &secure_32k_clk_src_ck, CK_443X),
+ CLK(NULL, "slimbus_clk", &slimbus_clk, CK_443X),
+ CLK(NULL, "sys_32k_ck", &sys_32k_ck, CK_443X),
+ CLK(NULL, "virt_12000000_ck", &virt_12000000_ck, CK_443X),
+ CLK(NULL, "virt_13000000_ck", &virt_13000000_ck, CK_443X),
+ CLK(NULL, "virt_16800000_ck", &virt_16800000_ck, CK_443X),
+ CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_443X),
+ CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_443X),
+ CLK(NULL, "virt_27000000_ck", &virt_27000000_ck, CK_443X),
+ CLK(NULL, "virt_38400000_ck", &virt_38400000_ck, CK_443X),
+ CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_443X),
+ CLK(NULL, "utmi_phy_clkout_ck", &utmi_phy_clkout_ck, CK_443X),
+ CLK(NULL, "xclk60mhsp1_ck", &xclk60mhsp1_ck, CK_443X),
+ CLK(NULL, "xclk60mhsp2_ck", &xclk60mhsp2_ck, CK_443X),
+ CLK(NULL, "xclk60motg_ck", &xclk60motg_ck, CK_443X),
+ CLK(NULL, "dpll_sys_ref_clk", &dpll_sys_ref_clk, CK_443X),
+ CLK(NULL, "abe_dpll_refclk_mux_ck", &abe_dpll_refclk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_abe_ck", &dpll_abe_ck, CK_443X),
+ CLK(NULL, "dpll_abe_m2x2_ck", &dpll_abe_m2x2_ck, CK_443X),
+ CLK(NULL, "abe_24m_fclk", &abe_24m_fclk, CK_443X),
+ CLK(NULL, "abe_clk", &abe_clk, CK_443X),
+ CLK(NULL, "aess_fclk", &aess_fclk, CK_443X),
+ CLK(NULL, "dpll_abe_m3_ck", &dpll_abe_m3_ck, CK_443X),
+ CLK(NULL, "core_hsd_byp_clk_mux_ck", &core_hsd_byp_clk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_443X),
+ CLK(NULL, "dpll_core_m6_ck", &dpll_core_m6_ck, CK_443X),
+ CLK(NULL, "dbgclk_mux_ck", &dbgclk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_core_m2_ck", &dpll_core_m2_ck, CK_443X),
+ CLK(NULL, "ddrphy_ck", &ddrphy_ck, CK_443X),
+ CLK(NULL, "dpll_core_m5_ck", &dpll_core_m5_ck, CK_443X),
+ CLK(NULL, "div_core_ck", &div_core_ck, CK_443X),
+ CLK(NULL, "div_iva_hs_clk", &div_iva_hs_clk, CK_443X),
+ CLK(NULL, "div_mpu_hs_clk", &div_mpu_hs_clk, CK_443X),
+ CLK(NULL, "dpll_core_m4_ck", &dpll_core_m4_ck, CK_443X),
+ CLK(NULL, "dll_clk_div_ck", &dll_clk_div_ck, CK_443X),
+ CLK(NULL, "dpll_abe_m2_ck", &dpll_abe_m2_ck, CK_443X),
+ CLK(NULL, "dpll_core_m3_ck", &dpll_core_m3_ck, CK_443X),
+ CLK(NULL, "dpll_core_m7_ck", &dpll_core_m7_ck, CK_443X),
+ CLK(NULL, "iva_hsd_byp_clk_mux_ck", &iva_hsd_byp_clk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_iva_ck", &dpll_iva_ck, CK_443X),
+ CLK(NULL, "dpll_iva_m4_ck", &dpll_iva_m4_ck, CK_443X),
+ CLK(NULL, "dpll_iva_m5_ck", &dpll_iva_m5_ck, CK_443X),
+ CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_443X),
+ CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_443X),
+ CLK(NULL, "per_hs_clk_div_ck", &per_hs_clk_div_ck, CK_443X),
+ CLK(NULL, "per_hsd_byp_clk_mux_ck", &per_hsd_byp_clk_mux_ck, CK_443X),
+ CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_443X),
+ CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_443X),
+ CLK(NULL, "dpll_per_m2x2_ck", &dpll_per_m2x2_ck, CK_443X),
+ CLK(NULL, "dpll_per_m3_ck", &dpll_per_m3_ck, CK_443X),
+ CLK(NULL, "dpll_per_m4_ck", &dpll_per_m4_ck, CK_443X),
+ CLK(NULL, "dpll_per_m5_ck", &dpll_per_m5_ck, CK_443X),
+ CLK(NULL, "dpll_per_m6_ck", &dpll_per_m6_ck, CK_443X),
+ CLK(NULL, "dpll_per_m7_ck", &dpll_per_m7_ck, CK_443X),
+ CLK(NULL, "dpll_unipro_ck", &dpll_unipro_ck, CK_443X),
+ CLK(NULL, "dpll_unipro_m2x2_ck", &dpll_unipro_m2x2_ck, CK_443X),
+ CLK(NULL, "usb_hs_clk_div_ck", &usb_hs_clk_div_ck, CK_443X),
+ CLK(NULL, "dpll_usb_ck", &dpll_usb_ck, CK_443X),
+ CLK(NULL, "dpll_usb_clkdcoldo_ck", &dpll_usb_clkdcoldo_ck, CK_443X),
+ CLK(NULL, "dpll_usb_m2_ck", &dpll_usb_m2_ck, CK_443X),
+ CLK(NULL, "ducati_clk_mux_ck", &ducati_clk_mux_ck, CK_443X),
+ CLK(NULL, "func_12m_fclk", &func_12m_fclk, CK_443X),
+ CLK(NULL, "func_24m_clk", &func_24m_clk, CK_443X),
+ CLK(NULL, "func_24mc_fclk", &func_24mc_fclk, CK_443X),
+ CLK(NULL, "func_48m_fclk", &func_48m_fclk, CK_443X),
+ CLK(NULL, "func_48mc_fclk", &func_48mc_fclk, CK_443X),
+ CLK(NULL, "func_64m_fclk", &func_64m_fclk, CK_443X),
+ CLK(NULL, "func_96m_fclk", &func_96m_fclk, CK_443X),
+ CLK(NULL, "hsmmc6_fclk", &hsmmc6_fclk, CK_443X),
+ CLK(NULL, "init_60m_fclk", &init_60m_fclk, CK_443X),
+ CLK(NULL, "l3_div_ck", &l3_div_ck, CK_443X),
+ CLK(NULL, "l4_div_ck", &l4_div_ck, CK_443X),
+ CLK(NULL, "lp_clk_div_ck", &lp_clk_div_ck, CK_443X),
+ CLK(NULL, "l4_wkup_clk_mux_ck", &l4_wkup_clk_mux_ck, CK_443X),
+ CLK(NULL, "per_abe_nc_fclk", &per_abe_nc_fclk, CK_443X),
+ CLK(NULL, "mcasp2_fclk", &mcasp2_fclk, CK_443X),
+ CLK(NULL, "mcasp3_fclk", &mcasp3_fclk, CK_443X),
+ CLK(NULL, "ocp_abe_iclk", &ocp_abe_iclk, CK_443X),
+ CLK(NULL, "per_abe_24m_fclk", &per_abe_24m_fclk, CK_443X),
+ CLK(NULL, "pmd_stm_clock_mux_ck", &pmd_stm_clock_mux_ck, CK_443X),
+ CLK(NULL, "pmd_trace_clk_mux_ck", &pmd_trace_clk_mux_ck, CK_443X),
+ CLK(NULL, "syc_clk_div_ck", &syc_clk_div_ck, CK_443X),
+ CLK(NULL, "aes1_ck", &aes1_ck, CK_443X),
+ CLK(NULL, "aes2_ck", &aes2_ck, CK_443X),
+ CLK(NULL, "aess_ck", &aess_ck, CK_443X),
+ CLK(NULL, "cust_efuse_ck", &cust_efuse_ck, CK_443X),
+ CLK(NULL, "des3des_ck", &des3des_ck, CK_443X),
+ CLK(NULL, "dmic_sync_mux_ck", &dmic_sync_mux_ck, CK_443X),
+ CLK(NULL, "dmic_ck", &dmic_ck, CK_443X),
+ CLK(NULL, "dss_ck", &dss_ck, CK_443X),
+ CLK(NULL, "ducati_ck", &ducati_ck, CK_443X),
+ CLK(NULL, "emif1_ck", &emif1_ck, CK_443X),
+ CLK(NULL, "emif2_ck", &emif2_ck, CK_443X),
+ CLK(NULL, "fdif_ck", &fdif_ck, CK_443X),
+ CLK(NULL, "per_sgx_fclk", &per_sgx_fclk, CK_443X),
+ CLK(NULL, "gfx_ck", &gfx_ck, CK_443X),
+ CLK(NULL, "gpio1_ck", &gpio1_ck, CK_443X),
+ CLK(NULL, "gpio2_ck", &gpio2_ck, CK_443X),
+ CLK(NULL, "gpio3_ck", &gpio3_ck, CK_443X),
+ CLK(NULL, "gpio4_ck", &gpio4_ck, CK_443X),
+ CLK(NULL, "gpio5_ck", &gpio5_ck, CK_443X),
+ CLK(NULL, "gpio6_ck", &gpio6_ck, CK_443X),
+ CLK(NULL, "gpmc_ck", &gpmc_ck, CK_443X),
+ CLK(NULL, "gptimer1_ck", &gptimer1_ck, CK_443X),
+ CLK(NULL, "gptimer10_ck", &gptimer10_ck, CK_443X),
+ CLK(NULL, "gptimer11_ck", &gptimer11_ck, CK_443X),
+ CLK(NULL, "gptimer2_ck", &gptimer2_ck, CK_443X),
+ CLK(NULL, "gptimer3_ck", &gptimer3_ck, CK_443X),
+ CLK(NULL, "gptimer4_ck", &gptimer4_ck, CK_443X),
+ CLK(NULL, "gptimer5_ck", &gptimer5_ck, CK_443X),
+ CLK(NULL, "gptimer6_ck", &gptimer6_ck, CK_443X),
+ CLK(NULL, "gptimer7_ck", &gptimer7_ck, CK_443X),
+ CLK(NULL, "gptimer8_ck", &gptimer8_ck, CK_443X),
+ CLK(NULL, "gptimer9_ck", &gptimer9_ck, CK_443X),
+ CLK("omap2_hdq.0", "ick", &hdq1w_ck, CK_443X),
+ CLK(NULL, "hsi_ck", &hsi_ck, CK_443X),
+ CLK("i2c_omap.1", "ick", &i2c1_ck, CK_443X),
+ CLK("i2c_omap.2", "ick", &i2c2_ck, CK_443X),
+ CLK("i2c_omap.3", "ick", &i2c3_ck, CK_443X),
+ CLK("i2c_omap.4", "ick", &i2c4_ck, CK_443X),
+ CLK(NULL, "iss_ck", &iss_ck, CK_443X),
+ CLK(NULL, "ivahd_ck", &ivahd_ck, CK_443X),
+ CLK(NULL, "keyboard_ck", &keyboard_ck, CK_443X),
+ CLK(NULL, "l3_instr_interconnect_ck", &l3_instr_interconnect_ck, CK_443X),
+ CLK(NULL, "l3_interconnect_3_ck", &l3_interconnect_3_ck, CK_443X),
+ CLK(NULL, "mcasp_sync_mux_ck", &mcasp_sync_mux_ck, CK_443X),
+ CLK(NULL, "mcasp_ck", &mcasp_ck, CK_443X),
+ CLK(NULL, "mcbsp1_sync_mux_ck", &mcbsp1_sync_mux_ck, CK_443X),
+ CLK("omap-mcbsp.1", "fck", &mcbsp1_ck, CK_443X),
+ CLK(NULL, "mcbsp2_sync_mux_ck", &mcbsp2_sync_mux_ck, CK_443X),
+ CLK("omap-mcbsp.2", "fck", &mcbsp2_ck, CK_443X),
+ CLK(NULL, "mcbsp3_sync_mux_ck", &mcbsp3_sync_mux_ck, CK_443X),
+ CLK("omap-mcbsp.3", "fck", &mcbsp3_ck, CK_443X),
+ CLK(NULL, "mcbsp4_sync_mux_ck", &mcbsp4_sync_mux_ck, CK_443X),
+ CLK("omap-mcbsp.4", "fck", &mcbsp4_ck, CK_443X),
+ CLK("omap2_mcspi.1", "fck", &mcspi1_ck, CK_443X),
+ CLK("omap2_mcspi.2", "fck", &mcspi2_ck, CK_443X),
+ CLK("omap2_mcspi.3", "fck", &mcspi3_ck, CK_443X),
+ CLK("omap2_mcspi.4", "fck", &mcspi4_ck, CK_443X),
+ CLK("mmci-omap-hs.0", "fck", &mmc1_ck, CK_443X),
+ CLK("mmci-omap-hs.1", "fck", &mmc2_ck, CK_443X),
+ CLK("mmci-omap-hs.2", "fck", &mmc3_ck, CK_443X),
+ CLK("mmci-omap-hs.3", "fck", &mmc4_ck, CK_443X),
+ CLK("mmci-omap-hs.4", "fck", &mmc5_ck, CK_443X),
+ CLK(NULL, "ocp_wp1_ck", &ocp_wp1_ck, CK_443X),
+ CLK(NULL, "pdm_ck", &pdm_ck, CK_443X),
+ CLK(NULL, "pkaeip29_ck", &pkaeip29_ck, CK_443X),
+ CLK("omap_rng", "ick", &rng_ck, CK_443X),
+ CLK(NULL, "sha2md51_ck", &sha2md51_ck, CK_443X),
+ CLK(NULL, "sl2_ck", &sl2_ck, CK_443X),
+ CLK(NULL, "slimbus1_ck", &slimbus1_ck, CK_443X),
+ CLK(NULL, "slimbus2_ck", &slimbus2_ck, CK_443X),
+ CLK(NULL, "sr_core_ck", &sr_core_ck, CK_443X),
+ CLK(NULL, "sr_iva_ck", &sr_iva_ck, CK_443X),
+ CLK(NULL, "sr_mpu_ck", &sr_mpu_ck, CK_443X),
+ CLK(NULL, "tesla_ck", &tesla_ck, CK_443X),
+ CLK(NULL, "uart1_ck", &uart1_ck, CK_443X),
+ CLK(NULL, "uart2_ck", &uart2_ck, CK_443X),
+ CLK(NULL, "uart3_ck", &uart3_ck, CK_443X),
+ CLK(NULL, "uart4_ck", &uart4_ck, CK_443X),
+ CLK(NULL, "unipro1_ck", &unipro1_ck, CK_443X),
+ CLK(NULL, "usb_host_ck", &usb_host_ck, CK_443X),
+ CLK(NULL, "usb_host_fs_ck", &usb_host_fs_ck, CK_443X),
+ CLK("musb_hdrc", "ick", &usb_otg_ck, CK_443X),
+ CLK(NULL, "usb_tll_ck", &usb_tll_ck, CK_443X),
+ CLK(NULL, "usbphyocp2scp_ck", &usbphyocp2scp_ck, CK_443X),
+ CLK(NULL, "usim_ck", &usim_ck, CK_443X),
+ CLK("omap_wdt", "fck", &wdt2_ck, CK_443X),
+ CLK(NULL, "wdt3_ck", &wdt3_ck, CK_443X),
+ CLK(NULL, "otg_60m_gfclk_ck", &otg_60m_gfclk_ck, CK_443X),
+ CLK(NULL, "stm_clk_div_ck", &stm_clk_div_ck, CK_443X),
+ CLK(NULL, "trace_clk_div_ck", &trace_clk_div_ck, CK_443X),
+ CLK(NULL, "usim_fclk", &usim_fclk, CK_443X),
+ CLK(NULL, "utmi_p1_gfclk_ck", &utmi_p1_gfclk_ck, CK_443X),
+ CLK(NULL, "utmi_p2_gfclk_ck", &utmi_p2_gfclk_ck, CK_443X),
+};
+
+int __init omap2_clk_init(void)
+{
+ /* struct prcm_config *prcm; */
+ struct omap_clk *c;
+ /* u32 clkrate; */
+ u32 cpu_clkflg;
+
+ if (cpu_is_omap44xx()) {
+ cpu_mask = RATE_IN_4430;
+ cpu_clkflg = CK_443X;
+ }
+
+ clk_init(&omap2_clk_functions);
+
+ for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
+ c++)
+ clk_preinit(c->lk.clk);
+
+ for (c = omap44xx_clks; c < omap44xx_clks + ARRAY_SIZE(omap44xx_clks);
+ c++)
+ if (c->cpu & cpu_clkflg) {
+ clkdev_add(&c->lk);
+ clk_register(c->lk.clk);
+ /* TODO
+ omap2_init_clk_clkdm(c->lk.clk);
+ */
+ }
+
+ recalculate_root_clocks();
+
+ /*
+ * Only enable those clocks we will need, let the drivers
+ * enable other clocks as necessary
+ */
+ clk_enable_init_clocks();
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c
new file mode 100644
index 00000000000..f69096b88cd
--- /dev/null
+++ b/arch/arm/mach-omap2/clock_common_data.c
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/arm/mach-omap2/clock_common_data.c
+ *
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ * Copyright (C) 2004-2009 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains clock data that is common to both the OMAP2xxx and
+ * OMAP3xxx clock definition files.
+ */
+
+#include "clock.h"
+
+/* clksel_rate data common to 24xx/343x */
+const struct clksel_rate gpt_32k_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+const struct clksel_rate gpt_sys_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
+ { .div = 0 }
+};
+
+const struct clksel_rate gfx_l3_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_343X },
+ { .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_343X | DEFAULT_RATE },
+ { .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_343X },
+ { .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_343X },
+ { .div = 0 }
+};
+
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index fcd82320a6a..1a45ed1e8ba 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -2,7 +2,7 @@
* OMAP2/3 clockdomain framework functions
*
* Copyright (C) 2008 Texas Instruments, Inc.
- * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Paul Walmsley and Jouni Högander
*
@@ -10,9 +10,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifdef CONFIG_OMAP_DEBUG_CLOCKDOMAIN
-# define DEBUG
-#endif
+#undef DEBUG
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/arch/arm/mach-omap2/cm-regbits-44xx.h b/arch/arm/mach-omap2/cm-regbits-44xx.h
new file mode 100644
index 00000000000..0e67f75aa35
--- /dev/null
+++ b/arch/arm/mach-omap2/cm-regbits-44xx.h
@@ -0,0 +1,1474 @@
+/*
+ * OMAP44xx Clock Management register bits
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM_REGBITS_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM_REGBITS_44XX_H
+
+#include "cm.h"
+
+
+/* Used by CM_L3_1_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP */
+#define OMAP4430_ABE_DYNDEP_SHIFT (1 << 3)
+#define OMAP4430_ABE_DYNDEP_MASK BITFIELD(3, 3)
+
+/*
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP,
+ * CM_TESLA_STATICDEP
+ */
+#define OMAP4430_ABE_STATDEP_SHIFT (1 << 3)
+#define OMAP4430_ABE_STATDEP_MASK BITFIELD(3, 3)
+
+/* Used by CM_L4CFG_DYNAMICDEP */
+#define OMAP4430_ALWONCORE_DYNDEP_SHIFT (1 << 16)
+#define OMAP4430_ALWONCORE_DYNDEP_MASK BITFIELD(16, 16)
+
+/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_TESLA_STATICDEP */
+#define OMAP4430_ALWONCORE_STATDEP_SHIFT (1 << 16)
+#define OMAP4430_ALWONCORE_STATDEP_MASK BITFIELD(16, 16)
+
+/*
+ * Used by CM_AUTOIDLE_DPLL_PER, CM_AUTOIDLE_DPLL_UNIPRO, CM_AUTOIDLE_DPLL_USB,
+ * CM_AUTOIDLE_DPLL_CORE_RESTORE, CM_AUTOIDLE_DPLL_ABE, CM_AUTOIDLE_DPLL_CORE,
+ * CM_AUTOIDLE_DPLL_DDRPHY, CM_AUTOIDLE_DPLL_IVA, CM_AUTOIDLE_DPLL_MPU
+ */
+#define OMAP4430_AUTO_DPLL_MODE_SHIFT (1 << 0)
+#define OMAP4430_AUTO_DPLL_MODE_MASK BITFIELD(0, 2)
+
+/* Used by CM_L4CFG_DYNAMICDEP */
+#define OMAP4430_CEFUSE_DYNDEP_SHIFT (1 << 17)
+#define OMAP4430_CEFUSE_DYNDEP_MASK BITFIELD(17, 17)
+
+/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP, CM_TESLA_STATICDEP */
+#define OMAP4430_CEFUSE_STATDEP_SHIFT (1 << 17)
+#define OMAP4430_CEFUSE_STATDEP_MASK BITFIELD(17, 17)
+
+/* Used by CM1_ABE_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_ABE_24M_GFCLK_SHIFT (1 << 13)
+#define OMAP4430_CLKACTIVITY_ABE_24M_GFCLK_MASK BITFIELD(13, 13)
+
+/* Used by CM1_ABE_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_ABE_ALWON_32K_CLK_SHIFT (1 << 12)
+#define OMAP4430_CLKACTIVITY_ABE_ALWON_32K_CLK_MASK BITFIELD(12, 12)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_ABE_LP_CLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_ABE_LP_CLK_MASK BITFIELD(9, 9)
+
+/* Used by CM1_ABE_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_ABE_SYSCLK_SHIFT (1 << 11)
+#define OMAP4430_CLKACTIVITY_ABE_SYSCLK_MASK BITFIELD(11, 11)
+
+/* Used by CM1_ABE_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_ABE_X2_CLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_ABE_X2_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_SHIFT (1 << 11)
+#define OMAP4430_CLKACTIVITY_ASYNC_DLL_CLK_MASK BITFIELD(11, 11)
+
+/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_SHIFT (1 << 12)
+#define OMAP4430_CLKACTIVITY_ASYNC_PHY1_CLK_MASK BITFIELD(12, 12)
+
+/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_SHIFT (1 << 13)
+#define OMAP4430_CLKACTIVITY_ASYNC_PHY2_CLK_MASK BITFIELD(13, 13)
+
+/* Used by CM_CAM_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_CAM_PHY_CTRL_GCLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_CAM_PHY_CTRL_GCLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_EMU_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_CORE_DPLL_EMU_CLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_CORE_DPLL_EMU_CLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_CEFUSE_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_CUST_EFUSE_SYS_CLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_DLL_CLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_DLL_CLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_DMT10_GFCLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_DMT10_GFCLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_DMT11_GFCLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_DMT11_GFCLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_DMT2_GFCLK_SHIFT (1 << 11)
+#define OMAP4430_CLKACTIVITY_DMT2_GFCLK_MASK BITFIELD(11, 11)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_DMT3_GFCLK_SHIFT (1 << 12)
+#define OMAP4430_CLKACTIVITY_DMT3_GFCLK_MASK BITFIELD(12, 12)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_DMT4_GFCLK_SHIFT (1 << 13)
+#define OMAP4430_CLKACTIVITY_DMT4_GFCLK_MASK BITFIELD(13, 13)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_DMT9_GFCLK_SHIFT (1 << 14)
+#define OMAP4430_CLKACTIVITY_DMT9_GFCLK_MASK BITFIELD(14, 14)
+
+/* Used by CM_DSS_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_DSS_ALWON_SYS_CLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_DSS_ALWON_SYS_CLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_DSS_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_DSS_FCLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_DSS_FCLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_DUCATI_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_DUCATI_GCLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_DUCATI_GCLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_EMAC_50MHZ_CLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_EMAC_50MHZ_CLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_EMU_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_EMU_SYS_CLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_EMU_SYS_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_CAM_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_FDIF_GFCLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_FDIF_GFCLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_SHIFT (1 << 15)
+#define OMAP4430_CLKACTIVITY_FUNC_12M_GFCLK_MASK BITFIELD(15, 15)
+
+/* Used by CM1_ABE_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_FUNC_24M_GFCLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_FUNC_24M_GFCLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_DSS_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_SHIFT (1 << 11)
+#define OMAP4430_CLKACTIVITY_HDMI_PHY_48MHZ_GFCLK_MASK BITFIELD(11, 11)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_SHIFT (1 << 20)
+#define OMAP4430_CLKACTIVITY_HSIC_P1_480M_GFCLK_MASK BITFIELD(20, 20)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_SHIFT (1 << 26)
+#define OMAP4430_CLKACTIVITY_HSIC_P1_GFCLK_MASK BITFIELD(26, 26)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_SHIFT (1 << 21)
+#define OMAP4430_CLKACTIVITY_HSIC_P2_480M_GFCLK_MASK BITFIELD(21, 21)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_SHIFT (1 << 27)
+#define OMAP4430_CLKACTIVITY_HSIC_P2_GFCLK_MASK BITFIELD(27, 27)
+
+/* Used by CM_L3INIT_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_INIT_32K_GFCLK_SHIFT (1 << 31)
+#define OMAP4430_CLKACTIVITY_INIT_32K_GFCLK_MASK BITFIELD(31, 31)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_SHIFT (1 << 13)
+#define OMAP4430_CLKACTIVITY_INIT_48MC_GFCLK_MASK BITFIELD(13, 13)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_SHIFT (1 << 12)
+#define OMAP4430_CLKACTIVITY_INIT_48M_GFCLK_MASK BITFIELD(12, 12)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_SHIFT (1 << 28)
+#define OMAP4430_CLKACTIVITY_INIT_60M_P1_GFCLK_MASK BITFIELD(28, 28)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_SHIFT (1 << 29)
+#define OMAP4430_CLKACTIVITY_INIT_60M_P2_GFCLK_MASK BITFIELD(29, 29)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_SHIFT (1 << 11)
+#define OMAP4430_CLKACTIVITY_INIT_96M_GFCLK_MASK BITFIELD(11, 11)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_SHIFT (1 << 16)
+#define OMAP4430_CLKACTIVITY_INIT_HSI_GFCLK_MASK BITFIELD(16, 16)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_SHIFT (1 << 17)
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC1_GFCLK_MASK BITFIELD(17, 17)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_SHIFT (1 << 18)
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC2_GFCLK_MASK BITFIELD(18, 18)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_SHIFT (1 << 19)
+#define OMAP4430_CLKACTIVITY_INIT_HSMMC6_GFCLK_MASK BITFIELD(19, 19)
+
+/* Used by CM_CAM_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_ISS_GCLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_ISS_GCLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_IVAHD_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_IVAHD_ROOT_CLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_IVAHD_ROOT_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_L3INIT_DPLL_ALWON_CLK_SHIFT (1 << 14)
+#define OMAP4430_CLKACTIVITY_L3INIT_DPLL_ALWON_CLK_MASK BITFIELD(14, 14)
+
+/* Used by CM_L3_1_CLKSTCTRL, CM_L3_1_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_L3_1_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_1_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3_2_CLKSTCTRL, CM_L3_2_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_L3_2_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_2_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_D2D_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L3_D2D_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_D2D_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_SDMA_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L3_DMA_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_DMA_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_DSS_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_DSS_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_EMIF_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_GFX_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_GFX_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_INIT_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INSTR_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L3_INSTR_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_INSTR_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L4SEC_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L3_SECURE_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L3_SECURE_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_ALWON_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L4_AO_ICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L4_AO_ICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_CEFUSE_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L4_CEFUSE_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L4CFG_CLKSTCTRL, CM_L4CFG_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L4_CFG_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_D2D_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_L4_D2D_GICLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_L4_INIT_GICLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_L4_PER_GICLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_L4_PER_GICLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L4SEC_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L4_SECURE_GICLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_L4_SECURE_GICLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_SHIFT (1 << 12)
+#define OMAP4430_CLKACTIVITY_L4_WKUP_GICLK_MASK BITFIELD(12, 12)
+
+/* Used by CM_MPU_CLKSTCTRL, CM_MPU_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_MPU_DPLL_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM1_ABE_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_OCP_ABE_GICLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_SHIFT (1 << 16)
+#define OMAP4430_CLKACTIVITY_PER_24MC_GFCLK_MASK BITFIELD(16, 16)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_SHIFT (1 << 17)
+#define OMAP4430_CLKACTIVITY_PER_32K_GFCLK_MASK BITFIELD(17, 17)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_SHIFT (1 << 18)
+#define OMAP4430_CLKACTIVITY_PER_48M_GFCLK_MASK BITFIELD(18, 18)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_SHIFT (1 << 19)
+#define OMAP4430_CLKACTIVITY_PER_96M_GFCLK_MASK BITFIELD(19, 19)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_SHIFT (1 << 25)
+#define OMAP4430_CLKACTIVITY_PER_ABE_24M_GFCLK_MASK BITFIELD(25, 25)
+
+/* Used by CM_EMU_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_PER_DPLL_EMU_CLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_PER_DPLL_EMU_CLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_SHIFT (1 << 20)
+#define OMAP4430_CLKACTIVITY_PER_MCASP2_GFCLK_MASK BITFIELD(20, 20)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_SHIFT (1 << 21)
+#define OMAP4430_CLKACTIVITY_PER_MCASP3_GFCLK_MASK BITFIELD(21, 21)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_SHIFT (1 << 22)
+#define OMAP4430_CLKACTIVITY_PER_MCBSP4_GFCLK_MASK BITFIELD(22, 22)
+
+/* Used by CM_L4PER_CLKSTCTRL, CM_L4PER_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_SHIFT (1 << 24)
+#define OMAP4430_CLKACTIVITY_PER_SYS_GFCLK_MASK BITFIELD(24, 24)
+
+/* Used by CM_MEMIF_CLKSTCTRL, CM_MEMIF_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_PHY_ROOT_CLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_GFX_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_SGX_GFCLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_SGX_GFCLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_ALWON_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_SR_CORE_SYSCLK_SHIFT (1 << 11)
+#define OMAP4430_CLKACTIVITY_SR_CORE_SYSCLK_MASK BITFIELD(11, 11)
+
+/* Used by CM_ALWON_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_SR_IVA_SYSCLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_SR_IVA_SYSCLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_ALWON_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_SR_MPU_SYSCLK_SHIFT (1 << 9)
+#define OMAP4430_CLKACTIVITY_SR_MPU_SYSCLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_SYS_CLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_SYS_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_TESLA_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_SHIFT (1 << 8)
+#define OMAP4430_CLKACTIVITY_TESLA_ROOT_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_SHIFT (1 << 22)
+#define OMAP4430_CLKACTIVITY_TLL_CH0_GFCLK_MASK BITFIELD(22, 22)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_SHIFT (1 << 23)
+#define OMAP4430_CLKACTIVITY_TLL_CH1_GFCLK_MASK BITFIELD(23, 23)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_SHIFT (1 << 24)
+#define OMAP4430_CLKACTIVITY_TLL_CH2_GFCLK_MASK BITFIELD(24, 24)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_SHIFT (1 << 15)
+#define OMAP4430_CLKACTIVITY_USB_DPLL_HS_CLK_MASK BITFIELD(15, 15)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_USIM_GFCLK_SHIFT (1 << 10)
+#define OMAP4430_CLKACTIVITY_USIM_GFCLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_SHIFT (1 << 30)
+#define OMAP4430_CLKACTIVITY_UTMI_P3_GFCLK_MASK BITFIELD(30, 30)
+
+/* Used by CM_L3INIT_CLKSTCTRL, CM_L3INIT_CLKSTCTRL_RESTORE */
+#define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_SHIFT (1 << 25)
+#define OMAP4430_CLKACTIVITY_UTMI_ROOT_GFCLK_MASK BITFIELD(25, 25)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define OMAP4430_CLKACTIVITY_WKUP_32K_GFCLK_SHIFT (1 << 11)
+#define OMAP4430_CLKACTIVITY_WKUP_32K_GFCLK_MASK BITFIELD(11, 11)
+
+/*
+ * Used by CM_WKUP_TIMER1_CLKCTRL, CM_L4PER_DMTIMER10_CLKCTRL,
+ * CM_L4PER_DMTIMER11_CLKCTRL, CM_L4PER_DMTIMER2_CLKCTRL,
+ * CM_L4PER_DMTIMER3_CLKCTRL, CM_L4PER_DMTIMER4_CLKCTRL,
+ * CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL,
+ * CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL, CM_L3INIT_MMC6_CLKCTRL,
+ * CM1_ABE_TIMER5_CLKCTRL, CM1_ABE_TIMER6_CLKCTRL, CM1_ABE_TIMER7_CLKCTRL,
+ * CM1_ABE_TIMER8_CLKCTRL
+ */
+#define OMAP4430_CLKSEL_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_MASK BITFIELD(24, 24)
+
+/*
+ * Renamed from CLKSEL Used by CM_ABE_DSS_SYS_CLKSEL, CM_ABE_PLL_REF_CLKSEL,
+ * CM_DPLL_SYS_REF_CLKSEL, CM_L4_WKUP_CLKSEL, CM_CLKSEL_DUCATI_ISS_ROOT,
+ * CM_CLKSEL_USB_60MHZ
+ */
+#define OMAP4430_CLKSEL_0_0_SHIFT (1 << 0)
+#define OMAP4430_CLKSEL_0_0_MASK BITFIELD(0, 0)
+
+/* Renamed from CLKSEL Used by CM_BYPCLK_DPLL_IVA, CM_BYPCLK_DPLL_MPU */
+#define OMAP4430_CLKSEL_0_1_SHIFT (1 << 0)
+#define OMAP4430_CLKSEL_0_1_MASK BITFIELD(0, 1)
+
+/* Renamed from CLKSEL Used by CM_L3INIT_HSI_CLKCTRL */
+#define OMAP4430_CLKSEL_24_25_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_24_25_MASK BITFIELD(24, 25)
+
+/* Used by CM_L3INIT_USB_OTG_CLKCTRL */
+#define OMAP4430_CLKSEL_60M_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_60M_MASK BITFIELD(24, 24)
+
+/* Used by CM1_ABE_AESS_CLKCTRL */
+#define OMAP4430_CLKSEL_AESS_FCLK_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_AESS_FCLK_MASK BITFIELD(24, 24)
+
+/* Used by CM_CLKSEL_CORE_RESTORE, CM_CLKSEL_CORE */
+#define OMAP4430_CLKSEL_CORE_SHIFT (1 << 0)
+#define OMAP4430_CLKSEL_CORE_MASK BITFIELD(0, 0)
+
+/* Renamed from CLKSEL_CORE Used by CM_SHADOW_FREQ_CONFIG2 */
+#define OMAP4430_CLKSEL_CORE_1_1_SHIFT (1 << 1)
+#define OMAP4430_CLKSEL_CORE_1_1_MASK BITFIELD(1, 1)
+
+/* Used by CM_WKUP_USIM_CLKCTRL */
+#define OMAP4430_CLKSEL_DIV_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_DIV_MASK BITFIELD(24, 24)
+
+/* Used by CM_CAM_FDIF_CLKCTRL */
+#define OMAP4430_CLKSEL_FCLK_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_FCLK_MASK BITFIELD(24, 25)
+
+/* Used by CM_L4PER_MCBSP4_CLKCTRL */
+#define OMAP4430_CLKSEL_INTERNAL_SOURCE_SHIFT (1 << 25)
+#define OMAP4430_CLKSEL_INTERNAL_SOURCE_MASK BITFIELD(25, 25)
+
+/*
+ * Renamed from CLKSEL_INTERNAL_SOURCE Used by CM1_ABE_DMIC_CLKCTRL,
+ * CM1_ABE_MCASP_CLKCTRL, CM1_ABE_MCBSP1_CLKCTRL, CM1_ABE_MCBSP2_CLKCTRL,
+ * CM1_ABE_MCBSP3_CLKCTRL
+ */
+#define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_SHIFT (1 << 26)
+#define OMAP4430_CLKSEL_INTERNAL_SOURCE_CM1_ABE_DMIC_MASK BITFIELD(26, 27)
+
+/* Used by CM_CLKSEL_CORE_RESTORE, CM_CLKSEL_CORE */
+#define OMAP4430_CLKSEL_L3_SHIFT (1 << 4)
+#define OMAP4430_CLKSEL_L3_MASK BITFIELD(4, 4)
+
+/* Renamed from CLKSEL_L3 Used by CM_SHADOW_FREQ_CONFIG2 */
+#define OMAP4430_CLKSEL_L3_SHADOW_SHIFT (1 << 2)
+#define OMAP4430_CLKSEL_L3_SHADOW_MASK BITFIELD(2, 2)
+
+/* Used by CM_CLKSEL_CORE_RESTORE, CM_CLKSEL_CORE */
+#define OMAP4430_CLKSEL_L4_SHIFT (1 << 8)
+#define OMAP4430_CLKSEL_L4_MASK BITFIELD(8, 8)
+
+/* Used by CM_CLKSEL_ABE */
+#define OMAP4430_CLKSEL_OPP_SHIFT (1 << 0)
+#define OMAP4430_CLKSEL_OPP_MASK BITFIELD(0, 1)
+
+/* Used by CM_GFX_GFX_CLKCTRL */
+#define OMAP4430_CLKSEL_PER_192M_SHIFT (1 << 25)
+#define OMAP4430_CLKSEL_PER_192M_MASK BITFIELD(25, 26)
+
+/* Used by CM_EMU_DEBUGSS_CLKCTRL */
+#define OMAP4430_CLKSEL_PMD_STM_CLK_SHIFT (1 << 27)
+#define OMAP4430_CLKSEL_PMD_STM_CLK_MASK BITFIELD(27, 29)
+
+/* Used by CM_EMU_DEBUGSS_CLKCTRL */
+#define OMAP4430_CLKSEL_PMD_TRACE_CLK_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK BITFIELD(24, 26)
+
+/* Used by CM_GFX_GFX_CLKCTRL */
+#define OMAP4430_CLKSEL_SGX_FCLK_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_SGX_FCLK_MASK BITFIELD(24, 24)
+
+/*
+ * Used by CM1_ABE_DMIC_CLKCTRL, CM1_ABE_MCASP_CLKCTRL, CM1_ABE_MCBSP1_CLKCTRL,
+ * CM1_ABE_MCBSP2_CLKCTRL, CM1_ABE_MCBSP3_CLKCTRL
+ */
+#define OMAP4430_CLKSEL_SOURCE_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_SOURCE_MASK BITFIELD(24, 25)
+
+/* Renamed from CLKSEL_SOURCE Used by CM_L4PER_MCBSP4_CLKCTRL */
+#define OMAP4430_CLKSEL_SOURCE_24_24_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_SOURCE_24_24_MASK BITFIELD(24, 24)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_CLKSEL_UTMI_P1_SHIFT (1 << 24)
+#define OMAP4430_CLKSEL_UTMI_P1_MASK BITFIELD(24, 24)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_CLKSEL_UTMI_P2_SHIFT (1 << 25)
+#define OMAP4430_CLKSEL_UTMI_P2_MASK BITFIELD(25, 25)
+
+/*
+ * Used by CM_WKUP_CLKSTCTRL, CM_EMU_CLKSTCTRL, CM_D2D_CLKSTCTRL,
+ * CM_DUCATI_CLKSTCTRL, CM_L3INSTR_CLKSTCTRL, CM_L3_1_CLKSTCTRL,
+ * CM_L3_2_CLKSTCTRL, CM_L4CFG_CLKSTCTRL, CM_MEMIF_CLKSTCTRL,
+ * CM_SDMA_CLKSTCTRL, CM_GFX_CLKSTCTRL, CM_L4PER_CLKSTCTRL, CM_L4SEC_CLKSTCTRL,
+ * CM_L3INIT_CLKSTCTRL, CM_CAM_CLKSTCTRL, CM_CEFUSE_CLKSTCTRL,
+ * CM_L3INIT_CLKSTCTRL_RESTORE, CM_L3_1_CLKSTCTRL_RESTORE,
+ * CM_L3_2_CLKSTCTRL_RESTORE, CM_L4CFG_CLKSTCTRL_RESTORE,
+ * CM_L4PER_CLKSTCTRL_RESTORE, CM_MEMIF_CLKSTCTRL_RESTORE, CM_ALWON_CLKSTCTRL,
+ * CM_IVAHD_CLKSTCTRL, CM_DSS_CLKSTCTRL, CM_MPU_CLKSTCTRL, CM_TESLA_CLKSTCTRL,
+ * CM1_ABE_CLKSTCTRL, CM_MPU_CLKSTCTRL_RESTORE
+ */
+#define OMAP4430_CLKTRCTRL_SHIFT (1 << 0)
+#define OMAP4430_CLKTRCTRL_MASK BITFIELD(0, 1)
+
+/* Used by CM_EMU_OVERRIDE_DPLL_CORE */
+#define OMAP4430_CORE_DPLL_EMU_DIV_SHIFT (1 << 0)
+#define OMAP4430_CORE_DPLL_EMU_DIV_MASK BITFIELD(0, 6)
+
+/* Used by CM_EMU_OVERRIDE_DPLL_CORE */
+#define OMAP4430_CORE_DPLL_EMU_MULT_SHIFT (1 << 8)
+#define OMAP4430_CORE_DPLL_EMU_MULT_MASK BITFIELD(8, 18)
+
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP */
+#define OMAP4430_D2D_DYNDEP_SHIFT (1 << 18)
+#define OMAP4430_D2D_DYNDEP_MASK BITFIELD(18, 18)
+
+/* Used by CM_MPU_STATICDEP */
+#define OMAP4430_D2D_STATDEP_SHIFT (1 << 18)
+#define OMAP4430_D2D_STATDEP_MASK BITFIELD(18, 18)
+
+/*
+ * Used by CM_SSC_DELTAMSTEP_DPLL_PER, CM_SSC_DELTAMSTEP_DPLL_UNIPRO,
+ * CM_SSC_DELTAMSTEP_DPLL_USB, CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE,
+ * CM_SSC_DELTAMSTEP_DPLL_ABE, CM_SSC_DELTAMSTEP_DPLL_CORE,
+ * CM_SSC_DELTAMSTEP_DPLL_DDRPHY, CM_SSC_DELTAMSTEP_DPLL_IVA,
+ * CM_SSC_DELTAMSTEP_DPLL_MPU
+ */
+#define OMAP4430_DELTAMSTEP_SHIFT (1 << 0)
+#define OMAP4430_DELTAMSTEP_MASK BITFIELD(0, 19)
+
+/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+#define OMAP4430_DLL_OVERRIDE_SHIFT (1 << 2)
+#define OMAP4430_DLL_OVERRIDE_MASK BITFIELD(2, 2)
+
+/* Renamed from DLL_OVERRIDE Used by CM_DLL_CTRL */
+#define OMAP4430_DLL_OVERRIDE_0_0_SHIFT (1 << 0)
+#define OMAP4430_DLL_OVERRIDE_0_0_MASK BITFIELD(0, 0)
+
+/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+#define OMAP4430_DLL_RESET_SHIFT (1 << 3)
+#define OMAP4430_DLL_RESET_MASK BITFIELD(3, 3)
+
+/*
+ * Used by CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO, CM_CLKSEL_DPLL_USB,
+ * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
+ * CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU
+ */
+#define OMAP4430_DPLL_BYP_CLKSEL_SHIFT (1 << 23)
+#define OMAP4430_DPLL_BYP_CLKSEL_MASK BITFIELD(23, 23)
+
+/* Used by CM_CLKDCOLDO_DPLL_USB */
+#define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_SHIFT (1 << 8)
+#define OMAP4430_DPLL_CLKDCOLDO_GATE_CTRL_MASK BITFIELD(8, 8)
+
+/* Used by CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_CORE */
+#define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_SHIFT (1 << 20)
+#define OMAP4430_DPLL_CLKOUTHIF_CLKSEL_MASK BITFIELD(20, 20)
+
+/*
+ * Used by CM_DIV_M3_DPLL_PER, CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_ABE,
+ * CM_DIV_M3_DPLL_CORE
+ */
+#define OMAP4430_DPLL_CLKOUTHIF_DIV_SHIFT (1 << 0)
+#define OMAP4430_DPLL_CLKOUTHIF_DIV_MASK BITFIELD(0, 4)
+
+/*
+ * Used by CM_DIV_M3_DPLL_PER, CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_ABE,
+ * CM_DIV_M3_DPLL_CORE
+ */
+#define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_SHIFT (1 << 5)
+#define OMAP4430_DPLL_CLKOUTHIF_DIVCHACK_MASK BITFIELD(5, 5)
+
+/*
+ * Used by CM_DIV_M3_DPLL_PER, CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_ABE,
+ * CM_DIV_M3_DPLL_CORE
+ */
+#define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_SHIFT (1 << 8)
+#define OMAP4430_DPLL_CLKOUTHIF_GATE_CTRL_MASK BITFIELD(8, 8)
+
+/* Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO, CM_DIV_M2_DPLL_ABE */
+#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_SHIFT (1 << 10)
+#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK BITFIELD(10, 10)
+
+/*
+ * Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO,
+ * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
+ * CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU
+ */
+#define OMAP4430_DPLL_CLKOUT_DIV_SHIFT (1 << 0)
+#define OMAP4430_DPLL_CLKOUT_DIV_MASK BITFIELD(0, 4)
+
+/* Renamed from DPLL_CLKOUT_DIV Used by CM_DIV_M2_DPLL_USB */
+#define OMAP4430_DPLL_CLKOUT_DIV_0_6_SHIFT (1 << 0)
+#define OMAP4430_DPLL_CLKOUT_DIV_0_6_MASK BITFIELD(0, 6)
+
+/*
+ * Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO,
+ * CM_DIV_M2_DPLL_CORE_RESTORE, CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE,
+ * CM_DIV_M2_DPLL_DDRPHY, CM_DIV_M2_DPLL_MPU
+ */
+#define OMAP4430_DPLL_CLKOUT_DIVCHACK_SHIFT (1 << 5)
+#define OMAP4430_DPLL_CLKOUT_DIVCHACK_MASK BITFIELD(5, 5)
+
+/* Renamed from DPLL_CLKOUT_DIVCHACK Used by CM_DIV_M2_DPLL_USB */
+#define OMAP4430_DPLL_CLKOUT_DIVCHACK_M2_USB_SHIFT (1 << 7)
+#define OMAP4430_DPLL_CLKOUT_DIVCHACK_M2_USB_MASK BITFIELD(7, 7)
+
+/*
+ * Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB, CM_DIV_M2_DPLL_CORE_RESTORE,
+ * CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU
+ */
+#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_SHIFT (1 << 8)
+#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK BITFIELD(8, 8)
+
+/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+#define OMAP4430_DPLL_CORE_DPLL_EN_SHIFT (1 << 8)
+#define OMAP4430_DPLL_CORE_DPLL_EN_MASK BITFIELD(8, 10)
+
+/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+#define OMAP4430_DPLL_CORE_M2_DIV_SHIFT (1 << 11)
+#define OMAP4430_DPLL_CORE_M2_DIV_MASK BITFIELD(11, 15)
+
+/* Used by CM_SHADOW_FREQ_CONFIG2 */
+#define OMAP4430_DPLL_CORE_M5_DIV_SHIFT (1 << 3)
+#define OMAP4430_DPLL_CORE_M5_DIV_MASK BITFIELD(3, 7)
+
+/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+#define OMAP4430_DPLL_CORE_SYS_REF_CLKSEL_SHIFT (1 << 1)
+#define OMAP4430_DPLL_CORE_SYS_REF_CLKSEL_MASK BITFIELD(1, 1)
+
+/*
+ * Used by CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO,
+ * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
+ * CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU
+ */
+#define OMAP4430_DPLL_DIV_SHIFT (1 << 0)
+#define OMAP4430_DPLL_DIV_MASK BITFIELD(0, 6)
+
+/* Renamed from DPLL_DIV Used by CM_CLKSEL_DPLL_USB */
+#define OMAP4430_DPLL_DIV_0_7_SHIFT (1 << 0)
+#define OMAP4430_DPLL_DIV_0_7_MASK BITFIELD(0, 7)
+
+/*
+ * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_USB,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ */
+#define OMAP4430_DPLL_DRIFTGUARD_EN_SHIFT (1 << 8)
+#define OMAP4430_DPLL_DRIFTGUARD_EN_MASK BITFIELD(8, 8)
+
+/* Renamed from DPLL_DRIFTGUARD_EN Used by CM_CLKMODE_DPLL_UNIPRO */
+#define OMAP4430_DPLL_DRIFTGUARD_EN_3_3_SHIFT (1 << 3)
+#define OMAP4430_DPLL_DRIFTGUARD_EN_3_3_MASK BITFIELD(3, 3)
+
+/*
+ * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ */
+#define OMAP4430_DPLL_EN_SHIFT (1 << 0)
+#define OMAP4430_DPLL_EN_MASK BITFIELD(0, 2)
+
+/*
+ * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ */
+#define OMAP4430_DPLL_LPMODE_EN_SHIFT (1 << 10)
+#define OMAP4430_DPLL_LPMODE_EN_MASK BITFIELD(10, 10)
+
+/*
+ * Used by CM_CLKSEL_DPLL_PER, CM_CLKSEL_DPLL_UNIPRO,
+ * CM_CLKSEL_DPLL_CORE_RESTORE, CM_CLKSEL_DPLL_ABE, CM_CLKSEL_DPLL_CORE,
+ * CM_CLKSEL_DPLL_DDRPHY, CM_CLKSEL_DPLL_IVA, CM_CLKSEL_DPLL_MPU
+ */
+#define OMAP4430_DPLL_MULT_SHIFT (1 << 8)
+#define OMAP4430_DPLL_MULT_MASK BITFIELD(8, 18)
+
+/* Renamed from DPLL_MULT Used by CM_CLKSEL_DPLL_USB */
+#define OMAP4430_DPLL_MULT_USB_SHIFT (1 << 8)
+#define OMAP4430_DPLL_MULT_USB_MASK BITFIELD(8, 19)
+
+/*
+ * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ */
+#define OMAP4430_DPLL_REGM4XEN_SHIFT (1 << 11)
+#define OMAP4430_DPLL_REGM4XEN_MASK BITFIELD(11, 11)
+
+/* Used by CM_CLKSEL_DPLL_USB */
+#define OMAP4430_DPLL_SD_DIV_SHIFT (1 << 24)
+#define OMAP4430_DPLL_SD_DIV_MASK BITFIELD(24, 31)
+
+/*
+ * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ */
+#define OMAP4430_DPLL_SSC_ACK_SHIFT (1 << 13)
+#define OMAP4430_DPLL_SSC_ACK_MASK BITFIELD(13, 13)
+
+/*
+ * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ */
+#define OMAP4430_DPLL_SSC_DOWNSPREAD_SHIFT (1 << 14)
+#define OMAP4430_DPLL_SSC_DOWNSPREAD_MASK BITFIELD(14, 14)
+
+/*
+ * Used by CM_CLKMODE_DPLL_PER, CM_CLKMODE_DPLL_UNIPRO, CM_CLKMODE_DPLL_USB,
+ * CM_CLKMODE_DPLL_CORE_RESTORE, CM_CLKMODE_DPLL_ABE, CM_CLKMODE_DPLL_CORE,
+ * CM_CLKMODE_DPLL_DDRPHY, CM_CLKMODE_DPLL_IVA, CM_CLKMODE_DPLL_MPU
+ */
+#define OMAP4430_DPLL_SSC_EN_SHIFT (1 << 12)
+#define OMAP4430_DPLL_SSC_EN_MASK BITFIELD(12, 12)
+
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
+#define OMAP4430_DSS_DYNDEP_SHIFT (1 << 8)
+#define OMAP4430_DSS_DYNDEP_MASK BITFIELD(8, 8)
+
+/*
+ * Used by CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
+ * CM_MPU_STATICDEP
+ */
+#define OMAP4430_DSS_STATDEP_SHIFT (1 << 8)
+#define OMAP4430_DSS_STATDEP_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3_2_DYNAMICDEP */
+#define OMAP4430_DUCATI_DYNDEP_SHIFT (1 << 0)
+#define OMAP4430_DUCATI_DYNDEP_MASK BITFIELD(0, 0)
+
+/* Used by CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP */
+#define OMAP4430_DUCATI_STATDEP_SHIFT (1 << 0)
+#define OMAP4430_DUCATI_STATDEP_MASK BITFIELD(0, 0)
+
+/* Used by CM_SHADOW_FREQ_CONFIG1_RESTORE, CM_SHADOW_FREQ_CONFIG1 */
+#define OMAP4430_FREQ_UPDATE_SHIFT (1 << 0)
+#define OMAP4430_FREQ_UPDATE_MASK BITFIELD(0, 0)
+
+/* Used by CM_L3_2_DYNAMICDEP */
+#define OMAP4430_GFX_DYNDEP_SHIFT (1 << 10)
+#define OMAP4430_GFX_DYNDEP_MASK BITFIELD(10, 10)
+
+/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP */
+#define OMAP4430_GFX_STATDEP_SHIFT (1 << 10)
+#define OMAP4430_GFX_STATDEP_MASK BITFIELD(10, 10)
+
+/* Used by CM_SHADOW_FREQ_CONFIG2 */
+#define OMAP4430_GPMC_FREQ_UPDATE_SHIFT (1 << 0)
+#define OMAP4430_GPMC_FREQ_UPDATE_MASK BITFIELD(0, 0)
+
+/*
+ * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT1_DIV_SHIFT (1 << 0)
+#define OMAP4430_HSDIVIDER_CLKOUT1_DIV_MASK BITFIELD(0, 4)
+
+/*
+ * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_SHIFT (1 << 5)
+#define OMAP4430_HSDIVIDER_CLKOUT1_DIVCHACK_MASK BITFIELD(5, 5)
+
+/*
+ * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_SHIFT (1 << 8)
+#define OMAP4430_HSDIVIDER_CLKOUT1_GATE_CTRL_MASK BITFIELD(8, 8)
+
+/*
+ * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_SHIFT (1 << 12)
+#define OMAP4430_HSDIVIDER_CLKOUT1_PWDN_MASK BITFIELD(12, 12)
+
+/*
+ * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT2_DIV_SHIFT (1 << 0)
+#define OMAP4430_HSDIVIDER_CLKOUT2_DIV_MASK BITFIELD(0, 4)
+
+/*
+ * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_SHIFT (1 << 5)
+#define OMAP4430_HSDIVIDER_CLKOUT2_DIVCHACK_MASK BITFIELD(5, 5)
+
+/*
+ * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_SHIFT (1 << 8)
+#define OMAP4430_HSDIVIDER_CLKOUT2_GATE_CTRL_MASK BITFIELD(8, 8)
+
+/*
+ * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_SHIFT (1 << 12)
+#define OMAP4430_HSDIVIDER_CLKOUT2_PWDN_MASK BITFIELD(12, 12)
+
+/*
+ * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT3_DIV_SHIFT (1 << 0)
+#define OMAP4430_HSDIVIDER_CLKOUT3_DIV_MASK BITFIELD(0, 4)
+
+/*
+ * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_SHIFT (1 << 5)
+#define OMAP4430_HSDIVIDER_CLKOUT3_DIVCHACK_MASK BITFIELD(5, 5)
+
+/*
+ * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_SHIFT (1 << 8)
+#define OMAP4430_HSDIVIDER_CLKOUT3_GATE_CTRL_MASK BITFIELD(8, 8)
+
+/*
+ * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_SHIFT (1 << 12)
+#define OMAP4430_HSDIVIDER_CLKOUT3_PWDN_MASK BITFIELD(12, 12)
+
+/*
+ * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_CORE
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT4_DIV_SHIFT (1 << 0)
+#define OMAP4430_HSDIVIDER_CLKOUT4_DIV_MASK BITFIELD(0, 4)
+
+/*
+ * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_CORE
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_SHIFT (1 << 5)
+#define OMAP4430_HSDIVIDER_CLKOUT4_DIVCHACK_MASK BITFIELD(5, 5)
+
+/*
+ * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_CORE
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_SHIFT (1 << 8)
+#define OMAP4430_HSDIVIDER_CLKOUT4_GATE_CTRL_MASK BITFIELD(8, 8)
+
+/*
+ * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_CORE
+ */
+#define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_SHIFT (1 << 12)
+#define OMAP4430_HSDIVIDER_CLKOUT4_PWDN_MASK BITFIELD(12, 12)
+
+/*
+ * Used by PRM_PRM_PROFILING_CLKCTRL, CM_WKUP_GPIO1_CLKCTRL,
+ * CM_WKUP_KEYBOARD_CLKCTRL, CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_RTC_CLKCTRL,
+ * CM_WKUP_SARRAM_CLKCTRL, CM_WKUP_SYNCTIMER_CLKCTRL, CM_WKUP_TIMER12_CLKCTRL,
+ * CM_WKUP_TIMER1_CLKCTRL, CM_WKUP_USIM_CLKCTRL, CM_WKUP_WDT1_CLKCTRL,
+ * CM_WKUP_WDT2_CLKCTRL, CM_EMU_DEBUGSS_CLKCTRL, CM_D2D_MODEM_ICR_CLKCTRL,
+ * CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
+ * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL,
+ * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL,
+ * CM_L3_2_L3_2_CLKCTRL, CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL,
+ * CM_L4CFG_L4_CFG_CLKCTRL, CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL,
+ * CM_MEMIF_DMM_CLKCTRL, CM_MEMIF_EMIF_1_CLKCTRL, CM_MEMIF_EMIF_2_CLKCTRL,
+ * CM_MEMIF_EMIF_FW_CLKCTRL, CM_MEMIF_EMIF_H1_CLKCTRL,
+ * CM_MEMIF_EMIF_H2_CLKCTRL, CM_SDMA_SDMA_CLKCTRL, CM_GFX_GFX_CLKCTRL,
+ * CM_L4PER_ADC_CLKCTRL, CM_L4PER_DMTIMER10_CLKCTRL,
+ * CM_L4PER_DMTIMER11_CLKCTRL, CM_L4PER_DMTIMER2_CLKCTRL,
+ * CM_L4PER_DMTIMER3_CLKCTRL, CM_L4PER_DMTIMER4_CLKCTRL,
+ * CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL,
+ * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL,
+ * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL,
+ * CM_L4PER_HECC2_CLKCTRL, CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL,
+ * CM_L4PER_I2C3_CLKCTRL, CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL,
+ * CM_L4PER_L4PER_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL,
+ * CM_L4PER_MCBSP4_CLKCTRL, CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL,
+ * CM_L4PER_MCSPI3_CLKCTRL, CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL,
+ * CM_L4PER_MMCSD3_CLKCTRL, CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL,
+ * CM_L4PER_MSPROHG_CLKCTRL, CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL,
+ * CM_L4PER_UART2_CLKCTRL, CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL,
+ * CM_L4SEC_AES1_CLKCTRL, CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL,
+ * CM_L4SEC_DES3DES_CLKCTRL, CM_L4SEC_PKAEIP29_CLKCTRL, CM_L4SEC_RNG_CLKCTRL,
+ * CM_L4SEC_SHA2MD51_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
+ * CM_L3INIT_HSI_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL,
+ * CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
+ * CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
+ * CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL,
+ * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
+ * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_CAM_FDIF_CLKCTRL,
+ * CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
+ * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE,
+ * CM_L3INSTR_L3_3_CLKCTRL_RESTORE, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
+ * CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO3_CLKCTRL_RESTORE, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO5_CLKCTRL_RESTORE, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
+ * CM_ALWON_MDMINTC_CLKCTRL, CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL,
+ * CM_ALWON_SR_MPU_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL, CM_IVAHD_SL2_CLKCTRL,
+ * CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_CM2_PROFILING_CLKCTRL,
+ * CM_MPU_MPU_CLKCTRL, CM_TESLA_TESLA_CLKCTRL, CM1_ABE_AESS_CLKCTRL,
+ * CM1_ABE_DMIC_CLKCTRL, CM1_ABE_L4ABE_CLKCTRL, CM1_ABE_MCASP_CLKCTRL,
+ * CM1_ABE_MCBSP1_CLKCTRL, CM1_ABE_MCBSP2_CLKCTRL, CM1_ABE_MCBSP3_CLKCTRL,
+ * CM1_ABE_PDM_CLKCTRL, CM1_ABE_SLIMBUS_CLKCTRL, CM1_ABE_TIMER5_CLKCTRL,
+ * CM1_ABE_TIMER6_CLKCTRL, CM1_ABE_TIMER7_CLKCTRL, CM1_ABE_TIMER8_CLKCTRL,
+ * CM1_ABE_WDT3_CLKCTRL, CM_CM1_PROFILING_CLKCTRL
+ */
+#define OMAP4430_IDLEST_SHIFT (1 << 16)
+#define OMAP4430_IDLEST_MASK BITFIELD(16, 17)
+
+/* Used by CM_DUCATI_DYNAMICDEP, CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP */
+#define OMAP4430_ISS_DYNDEP_SHIFT (1 << 9)
+#define OMAP4430_ISS_DYNDEP_MASK BITFIELD(9, 9)
+
+/*
+ * Used by CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
+ * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ */
+#define OMAP4430_ISS_STATDEP_SHIFT (1 << 9)
+#define OMAP4430_ISS_STATDEP_MASK BITFIELD(9, 9)
+
+/* Used by CM_L3_2_DYNAMICDEP, CM_TESLA_DYNAMICDEP */
+#define OMAP4430_IVAHD_DYNDEP_SHIFT (1 << 2)
+#define OMAP4430_IVAHD_DYNDEP_MASK BITFIELD(2, 2)
+
+/*
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_GFX_STATICDEP, CM_L3INIT_STATICDEP, CM_CAM_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_DSS_STATICDEP, CM_MPU_STATICDEP,
+ * CM_TESLA_STATICDEP
+ */
+#define OMAP4430_IVAHD_STATDEP_SHIFT (1 << 2)
+#define OMAP4430_IVAHD_STATDEP_MASK BITFIELD(2, 2)
+
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
+#define OMAP4430_L3INIT_DYNDEP_SHIFT (1 << 7)
+#define OMAP4430_L3INIT_DYNDEP_MASK BITFIELD(7, 7)
+
+/*
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ */
+#define OMAP4430_L3INIT_STATDEP_SHIFT (1 << 7)
+#define OMAP4430_L3INIT_STATDEP_MASK BITFIELD(7, 7)
+
+/*
+ * Used by CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_L3INIT_DYNAMICDEP,
+ * CM_DSS_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
+ */
+#define OMAP4430_L3_1_DYNDEP_SHIFT (1 << 5)
+#define OMAP4430_L3_1_DYNDEP_MASK BITFIELD(5, 5)
+
+/*
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_GFX_STATICDEP, CM_L4SEC_STATICDEP, CM_L3INIT_STATICDEP, CM_CAM_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_IVAHD_STATICDEP, CM_DSS_STATICDEP,
+ * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ */
+#define OMAP4430_L3_1_STATDEP_SHIFT (1 << 5)
+#define OMAP4430_L3_1_STATDEP_MASK BITFIELD(5, 5)
+
+/*
+ * Used by CM_EMU_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_DUCATI_DYNAMICDEP,
+ * CM_L3_1_DYNAMICDEP, CM_L4CFG_DYNAMICDEP, CM_SDMA_DYNAMICDEP,
+ * CM_GFX_DYNAMICDEP, CM_L4SEC_DYNAMICDEP, CM_L3INIT_DYNAMICDEP,
+ * CM_CAM_DYNAMICDEP, CM_IVAHD_DYNAMICDEP
+ */
+#define OMAP4430_L3_2_DYNDEP_SHIFT (1 << 6)
+#define OMAP4430_L3_2_DYNDEP_MASK BITFIELD(6, 6)
+
+/*
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_GFX_STATICDEP, CM_L4SEC_STATICDEP, CM_L3INIT_STATICDEP, CM_CAM_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_IVAHD_STATICDEP, CM_DSS_STATICDEP,
+ * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ */
+#define OMAP4430_L3_2_STATDEP_SHIFT (1 << 6)
+#define OMAP4430_L3_2_STATDEP_MASK BITFIELD(6, 6)
+
+/* Used by CM_L3_1_DYNAMICDEP */
+#define OMAP4430_L4CFG_DYNDEP_SHIFT (1 << 12)
+#define OMAP4430_L4CFG_DYNDEP_MASK BITFIELD(12, 12)
+
+/*
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_L3INIT_STATICDEP, CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP,
+ * CM_TESLA_STATICDEP
+ */
+#define OMAP4430_L4CFG_STATDEP_SHIFT (1 << 12)
+#define OMAP4430_L4CFG_STATDEP_MASK BITFIELD(12, 12)
+
+/* Used by CM_L3_2_DYNAMICDEP */
+#define OMAP4430_L4PER_DYNDEP_SHIFT (1 << 13)
+#define OMAP4430_L4PER_DYNDEP_MASK BITFIELD(13, 13)
+
+/*
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_L4SEC_STATICDEP, CM_L3INIT_STATICDEP, CM_SDMA_STATICDEP_RESTORE,
+ * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ */
+#define OMAP4430_L4PER_STATDEP_SHIFT (1 << 13)
+#define OMAP4430_L4PER_STATDEP_MASK BITFIELD(13, 13)
+
+/* Used by CM_L3_2_DYNAMICDEP, CM_L4PER_DYNAMICDEP */
+#define OMAP4430_L4SEC_DYNDEP_SHIFT (1 << 14)
+#define OMAP4430_L4SEC_DYNDEP_MASK BITFIELD(14, 14)
+
+/*
+ * Used by CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP
+ */
+#define OMAP4430_L4SEC_STATDEP_SHIFT (1 << 14)
+#define OMAP4430_L4SEC_STATDEP_MASK BITFIELD(14, 14)
+
+/* Used by CM_L4CFG_DYNAMICDEP */
+#define OMAP4430_L4WKUP_DYNDEP_SHIFT (1 << 15)
+#define OMAP4430_L4WKUP_DYNDEP_MASK BITFIELD(15, 15)
+
+/*
+ * Used by CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP, CM_L3INIT_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ */
+#define OMAP4430_L4WKUP_STATDEP_SHIFT (1 << 15)
+#define OMAP4430_L4WKUP_STATDEP_MASK BITFIELD(15, 15)
+
+/*
+ * Used by CM_D2D_DYNAMICDEP, CM_L3_1_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
+ * CM_MPU_DYNAMICDEP
+ */
+#define OMAP4430_MEMIF_DYNDEP_SHIFT (1 << 4)
+#define OMAP4430_MEMIF_DYNDEP_MASK BITFIELD(4, 4)
+
+/*
+ * Used by CM_D2D_STATICDEP, CM_DUCATI_STATICDEP, CM_SDMA_STATICDEP,
+ * CM_GFX_STATICDEP, CM_L4SEC_STATICDEP, CM_L3INIT_STATICDEP, CM_CAM_STATICDEP,
+ * CM_SDMA_STATICDEP_RESTORE, CM_IVAHD_STATICDEP, CM_DSS_STATICDEP,
+ * CM_MPU_STATICDEP, CM_TESLA_STATICDEP
+ */
+#define OMAP4430_MEMIF_STATDEP_SHIFT (1 << 4)
+#define OMAP4430_MEMIF_STATDEP_MASK BITFIELD(4, 4)
+
+/*
+ * Used by CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
+ * CM_SSC_MODFREQDIV_DPLL_USB, CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE,
+ * CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
+ * CM_SSC_MODFREQDIV_DPLL_DDRPHY, CM_SSC_MODFREQDIV_DPLL_IVA,
+ * CM_SSC_MODFREQDIV_DPLL_MPU
+ */
+#define OMAP4430_MODFREQDIV_EXPONENT_SHIFT (1 << 8)
+#define OMAP4430_MODFREQDIV_EXPONENT_MASK BITFIELD(8, 10)
+
+/*
+ * Used by CM_SSC_MODFREQDIV_DPLL_PER, CM_SSC_MODFREQDIV_DPLL_UNIPRO,
+ * CM_SSC_MODFREQDIV_DPLL_USB, CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE,
+ * CM_SSC_MODFREQDIV_DPLL_ABE, CM_SSC_MODFREQDIV_DPLL_CORE,
+ * CM_SSC_MODFREQDIV_DPLL_DDRPHY, CM_SSC_MODFREQDIV_DPLL_IVA,
+ * CM_SSC_MODFREQDIV_DPLL_MPU
+ */
+#define OMAP4430_MODFREQDIV_MANTISSA_SHIFT (1 << 0)
+#define OMAP4430_MODFREQDIV_MANTISSA_MASK BITFIELD(0, 6)
+
+/*
+ * Used by PRM_PRM_PROFILING_CLKCTRL, CM_WKUP_GPIO1_CLKCTRL,
+ * CM_WKUP_KEYBOARD_CLKCTRL, CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_RTC_CLKCTRL,
+ * CM_WKUP_SARRAM_CLKCTRL, CM_WKUP_SYNCTIMER_CLKCTRL, CM_WKUP_TIMER12_CLKCTRL,
+ * CM_WKUP_TIMER1_CLKCTRL, CM_WKUP_USIM_CLKCTRL, CM_WKUP_WDT1_CLKCTRL,
+ * CM_WKUP_WDT2_CLKCTRL, CM_EMU_DEBUGSS_CLKCTRL, CM_D2D_MODEM_ICR_CLKCTRL,
+ * CM_D2D_SAD2D_CLKCTRL, CM_D2D_SAD2D_FW_CLKCTRL, CM_DUCATI_DUCATI_CLKCTRL,
+ * CM_L3INSTR_L3_3_CLKCTRL, CM_L3INSTR_L3_INSTR_CLKCTRL,
+ * CM_L3INSTR_OCP_WP1_CLKCTRL, CM_L3_1_L3_1_CLKCTRL, CM_L3_2_GPMC_CLKCTRL,
+ * CM_L3_2_L3_2_CLKCTRL, CM_L3_2_OCMC_RAM_CLKCTRL, CM_L4CFG_HW_SEM_CLKCTRL,
+ * CM_L4CFG_L4_CFG_CLKCTRL, CM_L4CFG_MAILBOX_CLKCTRL, CM_L4CFG_SAR_ROM_CLKCTRL,
+ * CM_MEMIF_DMM_CLKCTRL, CM_MEMIF_EMIF_1_CLKCTRL, CM_MEMIF_EMIF_2_CLKCTRL,
+ * CM_MEMIF_EMIF_FW_CLKCTRL, CM_MEMIF_EMIF_H1_CLKCTRL,
+ * CM_MEMIF_EMIF_H2_CLKCTRL, CM_SDMA_SDMA_CLKCTRL, CM_GFX_GFX_CLKCTRL,
+ * CM_L4PER_ADC_CLKCTRL, CM_L4PER_DMTIMER10_CLKCTRL,
+ * CM_L4PER_DMTIMER11_CLKCTRL, CM_L4PER_DMTIMER2_CLKCTRL,
+ * CM_L4PER_DMTIMER3_CLKCTRL, CM_L4PER_DMTIMER4_CLKCTRL,
+ * CM_L4PER_DMTIMER9_CLKCTRL, CM_L4PER_ELM_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL,
+ * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL,
+ * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_HDQ1W_CLKCTRL, CM_L4PER_HECC1_CLKCTRL,
+ * CM_L4PER_HECC2_CLKCTRL, CM_L4PER_I2C1_CLKCTRL, CM_L4PER_I2C2_CLKCTRL,
+ * CM_L4PER_I2C3_CLKCTRL, CM_L4PER_I2C4_CLKCTRL, CM_L4PER_I2C5_CLKCTRL,
+ * CM_L4PER_L4PER_CLKCTRL, CM_L4PER_MCASP2_CLKCTRL, CM_L4PER_MCASP3_CLKCTRL,
+ * CM_L4PER_MCBSP4_CLKCTRL, CM_L4PER_MCSPI1_CLKCTRL, CM_L4PER_MCSPI2_CLKCTRL,
+ * CM_L4PER_MCSPI3_CLKCTRL, CM_L4PER_MCSPI4_CLKCTRL, CM_L4PER_MGATE_CLKCTRL,
+ * CM_L4PER_MMCSD3_CLKCTRL, CM_L4PER_MMCSD4_CLKCTRL, CM_L4PER_MMCSD5_CLKCTRL,
+ * CM_L4PER_MSPROHG_CLKCTRL, CM_L4PER_SLIMBUS2_CLKCTRL, CM_L4PER_UART1_CLKCTRL,
+ * CM_L4PER_UART2_CLKCTRL, CM_L4PER_UART3_CLKCTRL, CM_L4PER_UART4_CLKCTRL,
+ * CM_L4SEC_AES1_CLKCTRL, CM_L4SEC_AES2_CLKCTRL, CM_L4SEC_CRYPTODMA_CLKCTRL,
+ * CM_L4SEC_DES3DES_CLKCTRL, CM_L4SEC_PKAEIP29_CLKCTRL, CM_L4SEC_RNG_CLKCTRL,
+ * CM_L4SEC_SHA2MD51_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
+ * CM_L3INIT_HSI_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL,
+ * CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
+ * CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
+ * CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL,
+ * CM_L3INIT_USB_HOST_FS_CLKCTRL, CM_L3INIT_USB_OTG_CLKCTRL,
+ * CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_CAM_FDIF_CLKCTRL,
+ * CM_CAM_ISS_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL,
+ * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE,
+ * CM_L3INSTR_L3_3_CLKCTRL_RESTORE, CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE,
+ * CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO3_CLKCTRL_RESTORE, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO5_CLKCTRL_RESTORE, CM_L4PER_GPIO6_CLKCTRL_RESTORE,
+ * CM_ALWON_MDMINTC_CLKCTRL, CM_ALWON_SR_CORE_CLKCTRL, CM_ALWON_SR_IVA_CLKCTRL,
+ * CM_ALWON_SR_MPU_CLKCTRL, CM_IVAHD_IVAHD_CLKCTRL, CM_IVAHD_SL2_CLKCTRL,
+ * CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL, CM_CM2_PROFILING_CLKCTRL,
+ * CM_MPU_MPU_CLKCTRL, CM_TESLA_TESLA_CLKCTRL, CM1_ABE_AESS_CLKCTRL,
+ * CM1_ABE_DMIC_CLKCTRL, CM1_ABE_L4ABE_CLKCTRL, CM1_ABE_MCASP_CLKCTRL,
+ * CM1_ABE_MCBSP1_CLKCTRL, CM1_ABE_MCBSP2_CLKCTRL, CM1_ABE_MCBSP3_CLKCTRL,
+ * CM1_ABE_PDM_CLKCTRL, CM1_ABE_SLIMBUS_CLKCTRL, CM1_ABE_TIMER5_CLKCTRL,
+ * CM1_ABE_TIMER6_CLKCTRL, CM1_ABE_TIMER7_CLKCTRL, CM1_ABE_TIMER8_CLKCTRL,
+ * CM1_ABE_WDT3_CLKCTRL, CM_CM1_PROFILING_CLKCTRL
+ */
+#define OMAP4430_MODULEMODE_SHIFT (1 << 0)
+#define OMAP4430_MODULEMODE_MASK BITFIELD(0, 1)
+
+/* Used by CM_DSS_DSS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_48MHZ_CLK_SHIFT (1 << 9)
+#define OMAP4430_OPTFCLKEN_48MHZ_CLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_WKUP_BANDGAP_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_BGAP_32K_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_BGAP_32K_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INIT_USBPHYOCP2SCP_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_CLK32K_SHIFT (1 << 9)
+#define OMAP4430_OPTFCLKEN_CLK32K_MASK BITFIELD(9, 9)
+
+/* Used by CM_CAM_ISS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_CTRLCLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_CTRLCLK_MASK BITFIELD(8, 8)
+
+/*
+ * Used by CM_WKUP_GPIO1_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL,
+ * CM_L4PER_GPIO3_CLKCTRL, CM_L4PER_GPIO4_CLKCTRL, CM_L4PER_GPIO5_CLKCTRL,
+ * CM_L4PER_GPIO6_CLKCTRL, CM_L4PER_GPIO2_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO3_CLKCTRL_RESTORE, CM_L4PER_GPIO4_CLKCTRL_RESTORE,
+ * CM_L4PER_GPIO5_CLKCTRL_RESTORE, CM_L4PER_GPIO6_CLKCTRL_RESTORE
+ */
+#define OMAP4430_OPTFCLKEN_DBCLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_DBCLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_MEMIF_DLL_CLKCTRL, CM_MEMIF_DLL_H_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_DLL_CLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_DLL_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_DSS_DSS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_DSSCLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_DSSCLK_MASK BITFIELD(8, 8)
+
+/* Used by CM1_ABE_SLIMBUS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_FCLK0_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_FCLK0_MASK BITFIELD(8, 8)
+
+/* Used by CM1_ABE_SLIMBUS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_FCLK1_SHIFT (1 << 9)
+#define OMAP4430_OPTFCLKEN_FCLK1_MASK BITFIELD(9, 9)
+
+/* Used by CM1_ABE_SLIMBUS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_FCLK2_SHIFT (1 << 10)
+#define OMAP4430_OPTFCLKEN_FCLK2_MASK BITFIELD(10, 10)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_FUNC48MCLK_SHIFT (1 << 15)
+#define OMAP4430_OPTFCLKEN_FUNC48MCLK_MASK BITFIELD(15, 15)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_SHIFT (1 << 13)
+#define OMAP4430_OPTFCLKEN_HSIC480M_P1_CLK_MASK BITFIELD(13, 13)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_SHIFT (1 << 14)
+#define OMAP4430_OPTFCLKEN_HSIC480M_P2_CLK_MASK BITFIELD(14, 14)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_SHIFT (1 << 11)
+#define OMAP4430_OPTFCLKEN_HSIC60M_P1_CLK_MASK BITFIELD(11, 11)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_SHIFT (1 << 12)
+#define OMAP4430_OPTFCLKEN_HSIC60M_P2_CLK_MASK BITFIELD(12, 12)
+
+/* Used by CM_L4PER_SLIMBUS2_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_PER24MC_GFCLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_PER24MC_GFCLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L4PER_SLIMBUS2_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_SHIFT (1 << 9)
+#define OMAP4430_OPTFCLKEN_PERABE24M_GFCLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_L3INIT_USBPHYOCP2SCP_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_PHY_48M_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_PHY_48M_MASK BITFIELD(8, 8)
+
+/* Used by CM_L4PER_SLIMBUS2_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_SHIFT (1 << 10)
+#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_MASK BITFIELD(10, 10)
+
+/* Renamed from OPTFCLKEN_SLIMBUS_CLK Used by CM1_ABE_SLIMBUS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_SHIFT (1 << 11)
+#define OMAP4430_OPTFCLKEN_SLIMBUS_CLK_11_11_MASK BITFIELD(11, 11)
+
+/* Used by CM_DSS_DSS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_SYS_CLK_SHIFT (1 << 10)
+#define OMAP4430_OPTFCLKEN_SYS_CLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_DSS_DSS_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_TV_CLK_SHIFT (1 << 11)
+#define OMAP4430_OPTFCLKEN_TV_CLK_MASK BITFIELD(11, 11)
+
+/* Used by CM_L3INIT_UNIPRO1_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_TXPHYCLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_TXPHYCLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_USB_CH0_CLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_USB_CH0_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_USB_CH1_CLK_SHIFT (1 << 9)
+#define OMAP4430_OPTFCLKEN_USB_CH1_CLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_L3INIT_USB_TLL_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_USB_CH2_CLK_SHIFT (1 << 10)
+#define OMAP4430_OPTFCLKEN_USB_CH2_CLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_UTMI_P1_CLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_SHIFT (1 << 9)
+#define OMAP4430_OPTFCLKEN_UTMI_P2_CLK_MASK BITFIELD(9, 9)
+
+/* Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE */
+#define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_SHIFT (1 << 10)
+#define OMAP4430_OPTFCLKEN_UTMI_P3_CLK_MASK BITFIELD(10, 10)
+
+/* Used by CM_L3INIT_USB_OTG_CLKCTRL */
+#define OMAP4430_OPTFCLKEN_XCLK_SHIFT (1 << 8)
+#define OMAP4430_OPTFCLKEN_XCLK_MASK BITFIELD(8, 8)
+
+/* Used by CM_EMU_OVERRIDE_DPLL_PER, CM_EMU_OVERRIDE_DPLL_CORE */
+#define OMAP4430_OVERRIDE_ENABLE_SHIFT (1 << 19)
+#define OMAP4430_OVERRIDE_ENABLE_MASK BITFIELD(19, 19)
+
+/* Used by CM_CLKSEL_ABE */
+#define OMAP4430_PAD_CLKS_GATE_SHIFT (1 << 8)
+#define OMAP4430_PAD_CLKS_GATE_MASK BITFIELD(8, 8)
+
+/* Used by CM_CORE_DVFS_CURRENT, CM_IVA_DVFS_CURRENT */
+#define OMAP4430_PERF_CURRENT_SHIFT (1 << 0)
+#define OMAP4430_PERF_CURRENT_MASK BITFIELD(0, 7)
+
+/*
+ * Used by CM_CORE_DVFS_PERF1, CM_CORE_DVFS_PERF2, CM_CORE_DVFS_PERF3,
+ * CM_CORE_DVFS_PERF4, CM_IVA_DVFS_PERF_ABE, CM_IVA_DVFS_PERF_IVAHD,
+ * CM_IVA_DVFS_PERF_TESLA
+ */
+#define OMAP4430_PERF_REQ_SHIFT (1 << 0)
+#define OMAP4430_PERF_REQ_MASK BITFIELD(0, 7)
+
+/* Used by CM_EMU_OVERRIDE_DPLL_PER */
+#define OMAP4430_PER_DPLL_EMU_DIV_SHIFT (1 << 0)
+#define OMAP4430_PER_DPLL_EMU_DIV_MASK BITFIELD(0, 6)
+
+/* Used by CM_EMU_OVERRIDE_DPLL_PER */
+#define OMAP4430_PER_DPLL_EMU_MULT_SHIFT (1 << 8)
+#define OMAP4430_PER_DPLL_EMU_MULT_MASK BITFIELD(8, 18)
+
+/* Used by CM_RESTORE_ST */
+#define OMAP4430_PHASE1_COMPLETED_SHIFT (1 << 0)
+#define OMAP4430_PHASE1_COMPLETED_MASK BITFIELD(0, 0)
+
+/* Used by CM_RESTORE_ST */
+#define OMAP4430_PHASE2A_COMPLETED_SHIFT (1 << 1)
+#define OMAP4430_PHASE2A_COMPLETED_MASK BITFIELD(1, 1)
+
+/* Used by CM_RESTORE_ST */
+#define OMAP4430_PHASE2B_COMPLETED_SHIFT (1 << 2)
+#define OMAP4430_PHASE2B_COMPLETED_MASK BITFIELD(2, 2)
+
+/* Used by CM_EMU_DEBUGSS_CLKCTRL */
+#define OMAP4430_PMD_STM_MUX_CTRL_SHIFT (1 << 20)
+#define OMAP4430_PMD_STM_MUX_CTRL_MASK BITFIELD(20, 21)
+
+/* Used by CM_EMU_DEBUGSS_CLKCTRL */
+#define OMAP4430_PMD_TRACE_MUX_CTRL_SHIFT (1 << 22)
+#define OMAP4430_PMD_TRACE_MUX_CTRL_MASK BITFIELD(22, 23)
+
+/* Used by CM_DYN_DEP_PRESCAL */
+#define OMAP4430_PRESCAL_SHIFT (1 << 0)
+#define OMAP4430_PRESCAL_MASK BITFIELD(0, 5)
+
+/* Used by REVISION_CM2, REVISION_CM1 */
+#define OMAP4430_REV_SHIFT (1 << 0)
+#define OMAP4430_REV_MASK BITFIELD(0, 7)
+
+/*
+ * Used by CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_TLL_CLKCTRL,
+ * CM_L3INIT_USB_HOST_CLKCTRL_RESTORE, CM_L3INIT_USB_TLL_CLKCTRL_RESTORE
+ */
+#define OMAP4430_SAR_MODE_SHIFT (1 << 4)
+#define OMAP4430_SAR_MODE_MASK BITFIELD(4, 4)
+
+/* Used by CM_SCALE_FCLK */
+#define OMAP4430_SCALE_FCLK_SHIFT (1 << 0)
+#define OMAP4430_SCALE_FCLK_MASK BITFIELD(0, 0)
+
+/* Used by CM_L4CFG_DYNAMICDEP */
+#define OMAP4430_SDMA_DYNDEP_SHIFT (1 << 11)
+#define OMAP4430_SDMA_DYNDEP_MASK BITFIELD(11, 11)
+
+/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP */
+#define OMAP4430_SDMA_STATDEP_SHIFT (1 << 11)
+#define OMAP4430_SDMA_STATDEP_MASK BITFIELD(11, 11)
+
+/* Used by CM_CLKSEL_ABE */
+#define OMAP4430_SLIMBUS_CLK_GATE_SHIFT (1 << 10)
+#define OMAP4430_SLIMBUS_CLK_GATE_MASK BITFIELD(10, 10)
+
+/*
+ * Used by CM_EMU_DEBUGSS_CLKCTRL, CM_D2D_SAD2D_CLKCTRL,
+ * CM_DUCATI_DUCATI_CLKCTRL, CM_SDMA_SDMA_CLKCTRL, CM_GFX_GFX_CLKCTRL,
+ * CM_L4SEC_CRYPTODMA_CLKCTRL, CM_L3INIT_CCPTX_CLKCTRL, CM_L3INIT_EMAC_CLKCTRL,
+ * CM_L3INIT_HSI_CLKCTRL, CM_L3INIT_MMC1_CLKCTRL, CM_L3INIT_MMC2_CLKCTRL,
+ * CM_L3INIT_MMC6_CLKCTRL, CM_L3INIT_P1500_CLKCTRL, CM_L3INIT_PCIESS_CLKCTRL,
+ * CM_L3INIT_SATA_CLKCTRL, CM_L3INIT_TPPSS_CLKCTRL, CM_L3INIT_UNIPRO1_CLKCTRL,
+ * CM_L3INIT_USB_HOST_CLKCTRL, CM_L3INIT_USB_HOST_FS_CLKCTRL,
+ * CM_L3INIT_USB_OTG_CLKCTRL, CM_L3INIT_XHPI_CLKCTRL, CM_CAM_FDIF_CLKCTRL,
+ * CM_CAM_ISS_CLKCTRL, CM_L3INIT_USB_HOST_CLKCTRL_RESTORE,
+ * CM_IVAHD_IVAHD_CLKCTRL, CM_DSS_DEISS_CLKCTRL, CM_DSS_DSS_CLKCTRL,
+ * CM_MPU_MPU_CLKCTRL, CM_TESLA_TESLA_CLKCTRL, CM1_ABE_AESS_CLKCTRL
+ */
+#define OMAP4430_STBYST_SHIFT (1 << 18)
+#define OMAP4430_STBYST_MASK BITFIELD(18, 18)
+
+/*
+ * Used by CM_IDLEST_DPLL_PER, CM_IDLEST_DPLL_UNIPRO, CM_IDLEST_DPLL_USB,
+ * CM_IDLEST_DPLL_ABE, CM_IDLEST_DPLL_CORE, CM_IDLEST_DPLL_DDRPHY,
+ * CM_IDLEST_DPLL_IVA, CM_IDLEST_DPLL_MPU
+ */
+#define OMAP4430_ST_DPLL_CLK_SHIFT (1 << 0)
+#define OMAP4430_ST_DPLL_CLK_MASK BITFIELD(0, 0)
+
+/* Used by CM_CLKDCOLDO_DPLL_USB */
+#define OMAP4430_ST_DPLL_CLKDCOLDO_SHIFT (1 << 9)
+#define OMAP4430_ST_DPLL_CLKDCOLDO_MASK BITFIELD(9, 9)
+
+/*
+ * Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_USB, CM_DIV_M2_DPLL_CORE_RESTORE,
+ * CM_DIV_M2_DPLL_ABE, CM_DIV_M2_DPLL_CORE, CM_DIV_M2_DPLL_DDRPHY,
+ * CM_DIV_M2_DPLL_MPU
+ */
+#define OMAP4430_ST_DPLL_CLKOUT_SHIFT (1 << 9)
+#define OMAP4430_ST_DPLL_CLKOUT_MASK BITFIELD(9, 9)
+
+/*
+ * Used by CM_DIV_M3_DPLL_PER, CM_DIV_M3_DPLL_CORE_RESTORE, CM_DIV_M3_DPLL_ABE,
+ * CM_DIV_M3_DPLL_CORE
+ */
+#define OMAP4430_ST_DPLL_CLKOUTHIF_SHIFT (1 << 9)
+#define OMAP4430_ST_DPLL_CLKOUTHIF_MASK BITFIELD(9, 9)
+
+/* Used by CM_DIV_M2_DPLL_PER, CM_DIV_M2_DPLL_UNIPRO, CM_DIV_M2_DPLL_ABE */
+#define OMAP4430_ST_DPLL_CLKOUTX2_SHIFT (1 << 11)
+#define OMAP4430_ST_DPLL_CLKOUTX2_MASK BITFIELD(11, 11)
+
+/*
+ * Used by CM_DIV_M4_DPLL_PER, CM_DIV_M4_DPLL_CORE_RESTORE,
+ * CM_DIV_M4_DPLL_CORE, CM_DIV_M4_DPLL_DDRPHY, CM_DIV_M4_DPLL_IVA
+ */
+#define OMAP4430_ST_HSDIVIDER_CLKOUT1_SHIFT (1 << 9)
+#define OMAP4430_ST_HSDIVIDER_CLKOUT1_MASK BITFIELD(9, 9)
+
+/*
+ * Used by CM_DIV_M5_DPLL_PER, CM_DIV_M5_DPLL_CORE_RESTORE,
+ * CM_DIV_M5_DPLL_CORE, CM_DIV_M5_DPLL_DDRPHY, CM_DIV_M5_DPLL_IVA
+ */
+#define OMAP4430_ST_HSDIVIDER_CLKOUT2_SHIFT (1 << 9)
+#define OMAP4430_ST_HSDIVIDER_CLKOUT2_MASK BITFIELD(9, 9)
+
+/*
+ * Used by CM_DIV_M6_DPLL_PER, CM_DIV_M6_DPLL_CORE_RESTORE,
+ * CM_DIV_M6_DPLL_CORE, CM_DIV_M6_DPLL_DDRPHY
+ */
+#define OMAP4430_ST_HSDIVIDER_CLKOUT3_SHIFT (1 << 9)
+#define OMAP4430_ST_HSDIVIDER_CLKOUT3_MASK BITFIELD(9, 9)
+
+/*
+ * Used by CM_DIV_M7_DPLL_PER, CM_DIV_M7_DPLL_CORE_RESTORE,
+ * CM_DIV_M7_DPLL_CORE
+ */
+#define OMAP4430_ST_HSDIVIDER_CLKOUT4_SHIFT (1 << 9)
+#define OMAP4430_ST_HSDIVIDER_CLKOUT4_MASK BITFIELD(9, 9)
+
+/* Used by CM_SYS_CLKSEL */
+#define OMAP4430_SYS_CLKSEL_SHIFT (1 << 0)
+#define OMAP4430_SYS_CLKSEL_MASK BITFIELD(0, 2)
+
+/* Used by CM_L4CFG_DYNAMICDEP */
+#define OMAP4430_TESLA_DYNDEP_SHIFT (1 << 1)
+#define OMAP4430_TESLA_DYNDEP_MASK BITFIELD(1, 1)
+
+/* Used by CM_DUCATI_STATICDEP, CM_MPU_STATICDEP */
+#define OMAP4430_TESLA_STATDEP_SHIFT (1 << 1)
+#define OMAP4430_TESLA_STATDEP_MASK BITFIELD(1, 1)
+
+/*
+ * Used by CM_EMU_DYNAMICDEP, CM_D2D_DYNAMICDEP, CM_DUCATI_DYNAMICDEP,
+ * CM_L3_1_DYNAMICDEP, CM_L3_2_DYNAMICDEP, CM_L4CFG_DYNAMICDEP,
+ * CM_L4PER_DYNAMICDEP, CM_MPU_DYNAMICDEP, CM_TESLA_DYNAMICDEP
+ */
+#define OMAP4430_WINDOWSIZE_SHIFT (1 << 24)
+#define OMAP4430_WINDOWSIZE_MASK BITFIELD(24, 27)
+#endif
diff --git a/arch/arm/mach-omap2/cm.c b/arch/arm/mach-omap2/cm.c
index 8eb2dab8c7d..58e4a1c557d 100644
--- a/arch/arm/mach-omap2/cm.c
+++ b/arch/arm/mach-omap2/cm.c
@@ -21,6 +21,8 @@
#include <asm/atomic.h>
+#include <plat/common.h>
+
#include "cm.h"
#include "cm-regbits-24xx.h"
#include "cm-regbits-34xx.h"
@@ -61,9 +63,8 @@ int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id, u8 idlest_shift)
mask = 1 << idlest_shift;
/* XXX should be OMAP2 CM */
- while (((cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) != ena) &&
- (i++ < MAX_MODULE_READY_TIME))
- udelay(1);
+ omap_test_timeout(((cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena),
+ MAX_MODULE_READY_TIME, i);
return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
}
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index a2fcfcc253c..90a4086fbdf 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -4,8 +4,8 @@
/*
* OMAP2/3 Clock Management (CM) register definitions
*
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2008 Nokia Corporation
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009 Nokia Corporation
*
* Written by Paul Walmsley
*
@@ -22,6 +22,12 @@
OMAP2_L4_IO_ADDRESS(OMAP2430_CM_BASE + (module) + (reg))
#define OMAP34XX_CM_REGADDR(module, reg) \
OMAP2_L4_IO_ADDRESS(OMAP3430_CM_BASE + (module) + (reg))
+#define OMAP44XX_CM1_REGADDR(module, reg) \
+ OMAP2_L4_IO_ADDRESS(OMAP4430_CM1_BASE + (module) + (reg))
+#define OMAP44XX_CM2_REGADDR(module, reg) \
+ OMAP2_L4_IO_ADDRESS(OMAP4430_CM2_BASE + (module) + (reg))
+
+#include "cm44xx.h"
/*
* Architecture-specific global CM registers
@@ -89,6 +95,11 @@
#define OMAP3430_CM_CLKSEL2_EMU 0x0050
#define OMAP3430_CM_CLKSEL3_EMU 0x0054
+/* CM2.CEFUSE_CM2 register offsets */
+
+/* OMAP4 modulemode control */
+#define OMAP4430_MODULEMODE_HWCTRL 0
+#define OMAP4430_MODULEMODE_SWCTRL 1
/* Clock management domain register get/set */
diff --git a/arch/arm/mach-omap2/cm44xx.h b/arch/arm/mach-omap2/cm44xx.h
new file mode 100644
index 00000000000..c575b9b0c04
--- /dev/null
+++ b/arch/arm/mach-omap2/cm44xx.h
@@ -0,0 +1,358 @@
+/*
+ * OMAP44xx CM1 & CM2 instance offset macros
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM44XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM44XX_H
+
+
+/* CM1 */
+
+
+/* CM1.OCP_SOCKET_CM1 register offsets */
+#define OMAP4430_REVISION_CM1 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4430_CM_CM1_PROFILING_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_OCP_SOCKET_MOD, 0x0040)
+
+/* CM1.CKGEN_CM1 register offsets */
+#define OMAP4430_CM_CLKSEL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0000)
+#define OMAP4430_CM_CLKSEL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0008)
+#define OMAP4430_CM_DLL_CTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0010)
+#define OMAP4430_CM_CLKMODE_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0020)
+#define OMAP4430_CM_IDLEST_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0024)
+#define OMAP4430_CM_AUTOIDLE_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0028)
+#define OMAP4430_CM_CLKSEL_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x002c)
+#define OMAP4430_CM_DIV_M2_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0030)
+#define OMAP4430_CM_DIV_M3_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0034)
+#define OMAP4430_CM_DIV_M4_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0038)
+#define OMAP4430_CM_DIV_M5_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x003c)
+#define OMAP4430_CM_DIV_M6_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0040)
+#define OMAP4430_CM_DIV_M7_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0044)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0048)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x004c)
+#define OMAP4430_CM_EMU_OVERRIDE_DPLL_CORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0050)
+#define OMAP4430_CM_CLKMODE_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0060)
+#define OMAP4430_CM_IDLEST_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0064)
+#define OMAP4430_CM_AUTOIDLE_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0068)
+#define OMAP4430_CM_CLKSEL_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x006c)
+#define OMAP4430_CM_DIV_M2_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0070)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0088)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x008c)
+#define OMAP4430_CM_BYPCLK_DPLL_MPU OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x009c)
+#define OMAP4430_CM_CLKMODE_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a0)
+#define OMAP4430_CM_IDLEST_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a4)
+#define OMAP4430_CM_AUTOIDLE_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00a8)
+#define OMAP4430_CM_CLKSEL_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00ac)
+#define OMAP4430_CM_DIV_M4_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00b8)
+#define OMAP4430_CM_DIV_M5_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00bc)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00c8)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00cc)
+#define OMAP4430_CM_BYPCLK_DPLL_IVA OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00dc)
+#define OMAP4430_CM_CLKMODE_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e0)
+#define OMAP4430_CM_IDLEST_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e4)
+#define OMAP4430_CM_AUTOIDLE_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00e8)
+#define OMAP4430_CM_CLKSEL_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00ec)
+#define OMAP4430_CM_DIV_M2_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00f0)
+#define OMAP4430_CM_DIV_M3_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x00f4)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0108)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_ABE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x010c)
+#define OMAP4430_CM_CLKMODE_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0120)
+#define OMAP4430_CM_IDLEST_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0124)
+#define OMAP4430_CM_AUTOIDLE_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0128)
+#define OMAP4430_CM_CLKSEL_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x012c)
+#define OMAP4430_CM_DIV_M2_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0130)
+#define OMAP4430_CM_DIV_M4_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0138)
+#define OMAP4430_CM_DIV_M5_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x013c)
+#define OMAP4430_CM_DIV_M6_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0140)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0148)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_DDRPHY OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x014c)
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG1 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0160)
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG2 OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0164)
+#define OMAP4430_CM_DYN_DEP_PRESCAL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0170)
+#define OMAP4430_CM_RESTORE_ST OMAP44XX_CM1_REGADDR(OMAP4430_CM1_CKGEN_MOD, 0x0180)
+
+/* CM1.MPU_CM1 register offsets */
+#define OMAP4430_CM_MPU_CLKSTCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0000)
+#define OMAP4430_CM_MPU_STATICDEP OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0004)
+#define OMAP4430_CM_MPU_DYNAMICDEP OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0008)
+#define OMAP4430_CM_MPU_MPU_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_MPU_MOD, 0x0020)
+
+/* CM1.TESLA_CM1 register offsets */
+#define OMAP4430_CM_TESLA_CLKSTCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0000)
+#define OMAP4430_CM_TESLA_STATICDEP OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0004)
+#define OMAP4430_CM_TESLA_DYNAMICDEP OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0008)
+#define OMAP4430_CM_TESLA_TESLA_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_TESLA_MOD, 0x0020)
+
+/* CM1.ABE_CM1 register offsets */
+#define OMAP4430_CM1_ABE_CLKSTCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0000)
+#define OMAP4430_CM1_ABE_L4ABE_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0020)
+#define OMAP4430_CM1_ABE_AESS_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0028)
+#define OMAP4430_CM1_ABE_PDM_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0030)
+#define OMAP4430_CM1_ABE_DMIC_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0038)
+#define OMAP4430_CM1_ABE_MCASP_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0040)
+#define OMAP4430_CM1_ABE_MCBSP1_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0048)
+#define OMAP4430_CM1_ABE_MCBSP2_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0050)
+#define OMAP4430_CM1_ABE_MCBSP3_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0058)
+#define OMAP4430_CM1_ABE_SLIMBUS_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0060)
+#define OMAP4430_CM1_ABE_TIMER5_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0068)
+#define OMAP4430_CM1_ABE_TIMER6_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0070)
+#define OMAP4430_CM1_ABE_TIMER7_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0078)
+#define OMAP4430_CM1_ABE_TIMER8_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0080)
+#define OMAP4430_CM1_ABE_WDT3_CLKCTRL OMAP44XX_CM1_REGADDR(OMAP4430_CM1_ABE_MOD, 0x0088)
+
+/* CM1.RESTORE_CM1 register offsets */
+#define OMAP4430_CM_CLKSEL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0000)
+#define OMAP4430_CM_DIV_M2_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0004)
+#define OMAP4430_CM_DIV_M3_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0008)
+#define OMAP4430_CM_DIV_M4_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x000c)
+#define OMAP4430_CM_DIV_M5_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0010)
+#define OMAP4430_CM_DIV_M6_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0014)
+#define OMAP4430_CM_DIV_M7_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0018)
+#define OMAP4430_CM_CLKSEL_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x001c)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0020)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0024)
+#define OMAP4430_CM_CLKMODE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0028)
+#define OMAP4430_CM_SHADOW_FREQ_CONFIG1_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x002c)
+#define OMAP4430_CM_AUTOIDLE_DPLL_CORE_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0030)
+#define OMAP4430_CM_MPU_CLKSTCTRL_RESTORE OMAP44XX_CM1_REGADDR(OMAP4430_CM1_RESTORE_MOD, 0x0034)
+
+/* CM2 */
+
+
+/* CM2.OCP_SOCKET_CM2 register offsets */
+#define OMAP4430_REVISION_CM2 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4430_CM_CM2_PROFILING_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_OCP_SOCKET_MOD, 0x0040)
+
+/* CM2.CKGEN_CM2 register offsets */
+#define OMAP4430_CM_CLKSEL_DUCATI_ISS_ROOT OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0000)
+#define OMAP4430_CM_CLKSEL_USB_60MHZ OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0004)
+#define OMAP4430_CM_SCALE_FCLK OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0008)
+#define OMAP4430_CM_CORE_DVFS_PERF1 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0010)
+#define OMAP4430_CM_CORE_DVFS_PERF2 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0014)
+#define OMAP4430_CM_CORE_DVFS_PERF3 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0018)
+#define OMAP4430_CM_CORE_DVFS_PERF4 OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x001c)
+#define OMAP4430_CM_CORE_DVFS_CURRENT OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0024)
+#define OMAP4430_CM_IVA_DVFS_PERF_TESLA OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0028)
+#define OMAP4430_CM_IVA_DVFS_PERF_IVAHD OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x002c)
+#define OMAP4430_CM_IVA_DVFS_PERF_ABE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0030)
+#define OMAP4430_CM_IVA_DVFS_CURRENT OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0038)
+#define OMAP4430_CM_CLKMODE_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0040)
+#define OMAP4430_CM_IDLEST_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0044)
+#define OMAP4430_CM_AUTOIDLE_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0048)
+#define OMAP4430_CM_CLKSEL_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x004c)
+#define OMAP4430_CM_DIV_M2_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0050)
+#define OMAP4430_CM_DIV_M3_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0054)
+#define OMAP4430_CM_DIV_M4_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0058)
+#define OMAP4430_CM_DIV_M5_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x005c)
+#define OMAP4430_CM_DIV_M6_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0060)
+#define OMAP4430_CM_DIV_M7_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0064)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0068)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x006c)
+#define OMAP4430_CM_EMU_OVERRIDE_DPLL_PER OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0070)
+#define OMAP4430_CM_CLKMODE_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0080)
+#define OMAP4430_CM_IDLEST_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0084)
+#define OMAP4430_CM_AUTOIDLE_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0088)
+#define OMAP4430_CM_CLKSEL_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x008c)
+#define OMAP4430_CM_DIV_M2_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x0090)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00a8)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00ac)
+#define OMAP4430_CM_CLKDCOLDO_DPLL_USB OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00b4)
+#define OMAP4430_CM_CLKMODE_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c0)
+#define OMAP4430_CM_IDLEST_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c4)
+#define OMAP4430_CM_AUTOIDLE_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00c8)
+#define OMAP4430_CM_CLKSEL_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00cc)
+#define OMAP4430_CM_DIV_M2_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00d0)
+#define OMAP4430_CM_SSC_DELTAMSTEP_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00e8)
+#define OMAP4430_CM_SSC_MODFREQDIV_DPLL_UNIPRO OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CKGEN_MOD, 0x00ec)
+
+/* CM2.ALWAYS_ON_CM2 register offsets */
+#define OMAP4430_CM_ALWON_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0000)
+#define OMAP4430_CM_ALWON_MDMINTC_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0020)
+#define OMAP4430_CM_ALWON_SR_MPU_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0028)
+#define OMAP4430_CM_ALWON_SR_IVA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0030)
+#define OMAP4430_CM_ALWON_SR_CORE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_ALWAYS_ON_MOD, 0x0038)
+
+/* CM2.CORE_CM2 register offsets */
+#define OMAP4430_CM_L3_1_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0000)
+#define OMAP4430_CM_L3_1_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0008)
+#define OMAP4430_CM_L3_1_L3_1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0020)
+#define OMAP4430_CM_L3_2_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0100)
+#define OMAP4430_CM_L3_2_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0108)
+#define OMAP4430_CM_L3_2_L3_2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0120)
+#define OMAP4430_CM_L3_2_GPMC_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0128)
+#define OMAP4430_CM_L3_2_OCMC_RAM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0130)
+#define OMAP4430_CM_DUCATI_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0200)
+#define OMAP4430_CM_DUCATI_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0204)
+#define OMAP4430_CM_DUCATI_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0208)
+#define OMAP4430_CM_DUCATI_DUCATI_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0220)
+#define OMAP4430_CM_SDMA_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0300)
+#define OMAP4430_CM_SDMA_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0304)
+#define OMAP4430_CM_SDMA_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0308)
+#define OMAP4430_CM_SDMA_SDMA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0320)
+#define OMAP4430_CM_MEMIF_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0400)
+#define OMAP4430_CM_MEMIF_DMM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0420)
+#define OMAP4430_CM_MEMIF_EMIF_FW_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0428)
+#define OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0430)
+#define OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0438)
+#define OMAP4430_CM_MEMIF_DLL_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0440)
+#define OMAP4430_CM_MEMIF_EMIF_H1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0450)
+#define OMAP4430_CM_MEMIF_EMIF_H2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0458)
+#define OMAP4430_CM_MEMIF_DLL_H_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0460)
+#define OMAP4430_CM_D2D_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0500)
+#define OMAP4430_CM_D2D_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0504)
+#define OMAP4430_CM_D2D_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0508)
+#define OMAP4430_CM_D2D_SAD2D_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0520)
+#define OMAP4430_CM_D2D_MODEM_ICR_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0528)
+#define OMAP4430_CM_D2D_SAD2D_FW_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0530)
+#define OMAP4430_CM_L4CFG_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0600)
+#define OMAP4430_CM_L4CFG_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0608)
+#define OMAP4430_CM_L4CFG_L4_CFG_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0620)
+#define OMAP4430_CM_L4CFG_HW_SEM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0628)
+#define OMAP4430_CM_L4CFG_MAILBOX_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0630)
+#define OMAP4430_CM_L4CFG_SAR_ROM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0638)
+#define OMAP4430_CM_L3INSTR_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0700)
+#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0720)
+#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0728)
+#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CORE_MOD, 0x0740)
+
+/* CM2.IVAHD_CM2 register offsets */
+#define OMAP4430_CM_IVAHD_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0000)
+#define OMAP4430_CM_IVAHD_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0004)
+#define OMAP4430_CM_IVAHD_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0008)
+#define OMAP4430_CM_IVAHD_IVAHD_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0020)
+#define OMAP4430_CM_IVAHD_SL2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_IVAHD_MOD, 0x0028)
+
+/* CM2.CAM_CM2 register offsets */
+#define OMAP4430_CM_CAM_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0000)
+#define OMAP4430_CM_CAM_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0004)
+#define OMAP4430_CM_CAM_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0008)
+#define OMAP4430_CM_CAM_ISS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0020)
+#define OMAP4430_CM_CAM_FDIF_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CAM_MOD, 0x0028)
+
+/* CM2.DSS_CM2 register offsets */
+#define OMAP4430_CM_DSS_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0000)
+#define OMAP4430_CM_DSS_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0004)
+#define OMAP4430_CM_DSS_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0008)
+#define OMAP4430_CM_DSS_DSS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0020)
+#define OMAP4430_CM_DSS_DEISS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_DSS_MOD, 0x0028)
+
+/* CM2.GFX_CM2 register offsets */
+#define OMAP4430_CM_GFX_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0000)
+#define OMAP4430_CM_GFX_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0004)
+#define OMAP4430_CM_GFX_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0008)
+#define OMAP4430_CM_GFX_GFX_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_GFX_MOD, 0x0020)
+
+/* CM2.L3INIT_CM2 register offsets */
+#define OMAP4430_CM_L3INIT_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0000)
+#define OMAP4430_CM_L3INIT_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0004)
+#define OMAP4430_CM_L3INIT_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0008)
+#define OMAP4430_CM_L3INIT_MMC1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0028)
+#define OMAP4430_CM_L3INIT_MMC2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0030)
+#define OMAP4430_CM_L3INIT_HSI_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0038)
+#define OMAP4430_CM_L3INIT_UNIPRO1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0040)
+#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0058)
+#define OMAP4430_CM_L3INIT_USB_OTG_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0060)
+#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0068)
+#define OMAP4430_CM_L3INIT_P1500_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0078)
+#define OMAP4430_CM_L3INIT_EMAC_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0080)
+#define OMAP4430_CM_L3INIT_SATA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0088)
+#define OMAP4430_CM_L3INIT_TPPSS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0090)
+#define OMAP4430_CM_L3INIT_PCIESS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x0098)
+#define OMAP4430_CM_L3INIT_CCPTX_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00a8)
+#define OMAP4430_CM_L3INIT_XHPI_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00c0)
+#define OMAP4430_CM_L3INIT_MMC6_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00c8)
+#define OMAP4430_CM_L3INIT_USB_HOST_FS_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00d0)
+#define OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L3INIT_MOD, 0x00e0)
+
+/* CM2.L4PER_CM2 register offsets */
+#define OMAP4430_CM_L4PER_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0000)
+#define OMAP4430_CM_L4PER_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0008)
+#define OMAP4430_CM_L4PER_ADC_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0020)
+#define OMAP4430_CM_L4PER_DMTIMER10_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0028)
+#define OMAP4430_CM_L4PER_DMTIMER11_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0030)
+#define OMAP4430_CM_L4PER_DMTIMER2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0038)
+#define OMAP4430_CM_L4PER_DMTIMER3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0040)
+#define OMAP4430_CM_L4PER_DMTIMER4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0048)
+#define OMAP4430_CM_L4PER_DMTIMER9_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0050)
+#define OMAP4430_CM_L4PER_ELM_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0058)
+#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0060)
+#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0068)
+#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0070)
+#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0078)
+#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0080)
+#define OMAP4430_CM_L4PER_HDQ1W_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0088)
+#define OMAP4430_CM_L4PER_HECC1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0090)
+#define OMAP4430_CM_L4PER_HECC2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0098)
+#define OMAP4430_CM_L4PER_I2C1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00a0)
+#define OMAP4430_CM_L4PER_I2C2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00a8)
+#define OMAP4430_CM_L4PER_I2C3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00b0)
+#define OMAP4430_CM_L4PER_I2C4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00b8)
+#define OMAP4430_CM_L4PER_L4PER_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00c0)
+#define OMAP4430_CM_L4PER_MCASP2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00d0)
+#define OMAP4430_CM_L4PER_MCASP3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00d8)
+#define OMAP4430_CM_L4PER_MCBSP4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00e0)
+#define OMAP4430_CM_L4PER_MGATE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00e8)
+#define OMAP4430_CM_L4PER_MCSPI1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00f0)
+#define OMAP4430_CM_L4PER_MCSPI2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x00f8)
+#define OMAP4430_CM_L4PER_MCSPI3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0100)
+#define OMAP4430_CM_L4PER_MCSPI4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0108)
+#define OMAP4430_CM_L4PER_MMCSD3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0120)
+#define OMAP4430_CM_L4PER_MMCSD4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0128)
+#define OMAP4430_CM_L4PER_MSPROHG_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0130)
+#define OMAP4430_CM_L4PER_SLIMBUS2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0138)
+#define OMAP4430_CM_L4PER_UART1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0140)
+#define OMAP4430_CM_L4PER_UART2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0148)
+#define OMAP4430_CM_L4PER_UART3_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0150)
+#define OMAP4430_CM_L4PER_UART4_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0158)
+#define OMAP4430_CM_L4PER_MMCSD5_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0160)
+#define OMAP4430_CM_L4PER_I2C5_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0168)
+#define OMAP4430_CM_L4SEC_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0180)
+#define OMAP4430_CM_L4SEC_STATICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0184)
+#define OMAP4430_CM_L4SEC_DYNAMICDEP OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x0188)
+#define OMAP4430_CM_L4SEC_AES1_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01a0)
+#define OMAP4430_CM_L4SEC_AES2_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01a8)
+#define OMAP4430_CM_L4SEC_DES3DES_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01b0)
+#define OMAP4430_CM_L4SEC_PKAEIP29_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01b8)
+#define OMAP4430_CM_L4SEC_RNG_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01c0)
+#define OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01c8)
+#define OMAP4430_CM_L4SEC_CRYPTODMA_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_L4PER_MOD, 0x01d8)
+
+/* CM2.CEFUSE_CM2 register offsets */
+#define OMAP4430_CM_CEFUSE_CLKSTCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0000)
+#define OMAP4430_CM_CEFUSE_CEFUSE_CLKCTRL OMAP44XX_CM2_REGADDR(OMAP4430_CM2_CEFUSE_MOD, 0x0020)
+
+/* CM2.RESTORE_CM2 register offsets */
+#define OMAP4430_CM_L3_1_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0000)
+#define OMAP4430_CM_L3_2_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0004)
+#define OMAP4430_CM_L4CFG_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0008)
+#define OMAP4430_CM_MEMIF_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x000c)
+#define OMAP4430_CM_L4PER_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0010)
+#define OMAP4430_CM_L3INIT_CLKSTCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0014)
+#define OMAP4430_CM_L3INSTR_L3_3_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0018)
+#define OMAP4430_CM_L3INSTR_L3_INSTR_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x001c)
+#define OMAP4430_CM_L3INSTR_OCP_WP1_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0020)
+#define OMAP4430_CM_L4PER_GPIO2_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0024)
+#define OMAP4430_CM_L4PER_GPIO3_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0028)
+#define OMAP4430_CM_L4PER_GPIO4_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x002c)
+#define OMAP4430_CM_L4PER_GPIO5_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0030)
+#define OMAP4430_CM_L4PER_GPIO6_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0034)
+#define OMAP4430_CM_L3INIT_USB_HOST_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0038)
+#define OMAP4430_CM_L3INIT_USB_TLL_CLKCTRL_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x003c)
+#define OMAP4430_CM_SDMA_STATICDEP_RESTORE OMAP44XX_CM2_REGADDR(OMAP4430_CM2_RESTORE_MOD, 0x0040)
+#endif
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 733d3dcff98..18ad93160ab 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -27,6 +27,8 @@
#include <mach/gpio.h>
#include <plat/mmc.h>
+#include "mux.h"
+
#if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
static struct resource cam_resources[] = {
@@ -595,27 +597,40 @@ static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
if (cpu_is_omap34xx()) {
if (controller_nr == 0) {
- omap_cfg_reg(N28_3430_MMC1_CLK);
- omap_cfg_reg(M27_3430_MMC1_CMD);
- omap_cfg_reg(N27_3430_MMC1_DAT0);
+ omap_mux_init_signal("sdmmc1_clk",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_cmd",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat0",
+ OMAP_PIN_INPUT_PULLUP);
if (mmc_controller->slots[0].wires == 4 ||
mmc_controller->slots[0].wires == 8) {
- omap_cfg_reg(N26_3430_MMC1_DAT1);
- omap_cfg_reg(N25_3430_MMC1_DAT2);
- omap_cfg_reg(P28_3430_MMC1_DAT3);
+ omap_mux_init_signal("sdmmc1_dat1",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat2",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat3",
+ OMAP_PIN_INPUT_PULLUP);
}
if (mmc_controller->slots[0].wires == 8) {
- omap_cfg_reg(P27_3430_MMC1_DAT4);
- omap_cfg_reg(P26_3430_MMC1_DAT5);
- omap_cfg_reg(R27_3430_MMC1_DAT6);
- omap_cfg_reg(R25_3430_MMC1_DAT7);
+ omap_mux_init_signal("sdmmc1_dat4",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat5",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat6",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc1_dat7",
+ OMAP_PIN_INPUT_PULLUP);
}
}
if (controller_nr == 1) {
/* MMC2 */
- omap_cfg_reg(AE2_3430_MMC2_CLK);
- omap_cfg_reg(AG5_3430_MMC2_CMD);
- omap_cfg_reg(AH5_3430_MMC2_DAT0);
+ omap_mux_init_signal("sdmmc2_clk",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_cmd",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat0",
+ OMAP_PIN_INPUT_PULLUP);
/*
* For 8 wire configurations, Lines DAT4, 5, 6 and 7 need to be muxed
@@ -623,15 +638,22 @@ static inline void omap2_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
*/
if (mmc_controller->slots[0].wires == 4 ||
mmc_controller->slots[0].wires == 8) {
- omap_cfg_reg(AH4_3430_MMC2_DAT1);
- omap_cfg_reg(AG4_3430_MMC2_DAT2);
- omap_cfg_reg(AF4_3430_MMC2_DAT3);
+ omap_mux_init_signal("sdmmc2_dat1",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat2",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat3",
+ OMAP_PIN_INPUT_PULLUP);
}
if (mmc_controller->slots[0].wires == 8) {
- omap_cfg_reg(AE4_3430_MMC2_DAT4);
- omap_cfg_reg(AH3_3430_MMC2_DAT5);
- omap_cfg_reg(AF3_3430_MMC2_DAT6);
- omap_cfg_reg(AE3_3430_MMC2_DAT7);
+ omap_mux_init_signal("sdmmc2_dat4.sdmmc2_dat4",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat5.sdmmc2_dat5",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat6.sdmmc2_dat6",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("sdmmc2_dat7.sdmmc2_dat7",
+ OMAP_PIN_INPUT_PULLUP);
}
}
diff --git a/arch/arm/mach-omap2/dpll.c b/arch/arm/mach-omap2/dpll.c
new file mode 100644
index 00000000000..f6055b49329
--- /dev/null
+++ b/arch/arm/mach-omap2/dpll.c
@@ -0,0 +1,538 @@
+/*
+ * OMAP3/4 - specific DPLL control functions
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Written by Paul Walmsley
+ * Testing and integration fixes by Jouni Högander
+ *
+ * Parts of this code are based on code written by
+ * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/limits.h>
+#include <linux/bitops.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/sram.h>
+#include <asm/div64.h>
+#include <asm/clkdev.h>
+
+#include "clock.h"
+#include "prm.h"
+#include "prm-regbits-34xx.h"
+#include "cm.h"
+#include "cm-regbits-34xx.h"
+
+/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
+#define DPLL_AUTOIDLE_DISABLE 0x0
+#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
+
+#define MAX_DPLL_WAIT_TRIES 1000000
+
+
+/**
+ * omap3_dpll_recalc - recalculate DPLL rate
+ * @clk: DPLL struct clk
+ *
+ * Recalculate and propagate the DPLL rate.
+ */
+unsigned long omap3_dpll_recalc(struct clk *clk)
+{
+ return omap2_get_dpll_rate(clk);
+}
+
+/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
+static void _omap3_dpll_write_clken(struct clk *clk, u8 clken_bits)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ dd = clk->dpll_data;
+
+ v = __raw_readl(dd->control_reg);
+ v &= ~dd->enable_mask;
+ v |= clken_bits << __ffs(dd->enable_mask);
+ __raw_writel(v, dd->control_reg);
+}
+
+/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
+static int _omap3_wait_dpll_status(struct clk *clk, u8 state)
+{
+ const struct dpll_data *dd;
+ int i = 0;
+ int ret = -EINVAL;
+
+ dd = clk->dpll_data;
+
+ state <<= __ffs(dd->idlest_mask);
+
+ while (((__raw_readl(dd->idlest_reg) & dd->idlest_mask) != state) &&
+ i < MAX_DPLL_WAIT_TRIES) {
+ i++;
+ udelay(1);
+ }
+
+ if (i == MAX_DPLL_WAIT_TRIES) {
+ printk(KERN_ERR "clock: %s failed transition to '%s'\n",
+ clk->name, (state) ? "locked" : "bypassed");
+ } else {
+ pr_debug("clock: %s transition to '%s' in %d loops\n",
+ clk->name, (state) ? "locked" : "bypassed", i);
+
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* From 3430 TRM ES2 4.7.6.2 */
+static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n)
+{
+ unsigned long fint;
+ u16 f = 0;
+
+ fint = clk->dpll_data->clk_ref->rate / n;
+
+ pr_debug("clock: fint is %lu\n", fint);
+
+ if (fint >= 750000 && fint <= 1000000)
+ f = 0x3;
+ else if (fint > 1000000 && fint <= 1250000)
+ f = 0x4;
+ else if (fint > 1250000 && fint <= 1500000)
+ f = 0x5;
+ else if (fint > 1500000 && fint <= 1750000)
+ f = 0x6;
+ else if (fint > 1750000 && fint <= 2100000)
+ f = 0x7;
+ else if (fint > 7500000 && fint <= 10000000)
+ f = 0xB;
+ else if (fint > 10000000 && fint <= 12500000)
+ f = 0xC;
+ else if (fint > 12500000 && fint <= 15000000)
+ f = 0xD;
+ else if (fint > 15000000 && fint <= 17500000)
+ f = 0xE;
+ else if (fint > 17500000 && fint <= 21000000)
+ f = 0xF;
+ else
+ pr_debug("clock: unknown freqsel setting for %d\n", n);
+
+ return f;
+}
+
+/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
+
+/*
+ * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report
+ * readiness before returning. Will save and restore the DPLL's
+ * autoidle state across the enable, per the CDP code. If the DPLL
+ * locked successfully, return 0; if the DPLL did not lock in the time
+ * allotted, or DPLL3 was passed in, return -EINVAL.
+ */
+static int _omap3_noncore_dpll_lock(struct clk *clk)
+{
+ u8 ai;
+ int r;
+
+ pr_debug("clock: locking DPLL %s\n", clk->name);
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ omap3_dpll_deny_idle(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOCKED);
+
+ r = _omap3_wait_dpll_status(clk, 1);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+
+ return r;
+}
+
+/*
+ * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power bypass mode. In
+ * bypass mode, the DPLL's rate is set equal to its parent clock's
+ * rate. Waits for the DPLL to report readiness before returning.
+ * Will save and restore the DPLL's autoidle state across the enable,
+ * per the CDP code. If the DPLL entered bypass mode successfully,
+ * return 0; if the DPLL did not enter bypass in the time allotted, or
+ * DPLL3 was passed in, or the DPLL does not support low-power bypass,
+ * return -EINVAL.
+ */
+static int _omap3_noncore_dpll_bypass(struct clk *clk)
+{
+ int r;
+ u8 ai;
+
+ if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
+ return -EINVAL;
+
+ pr_debug("clock: configuring DPLL %s for low-power bypass\n",
+ clk->name);
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
+
+ r = _omap3_wait_dpll_status(clk, 0);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+ else
+ omap3_dpll_deny_idle(clk);
+
+ return r;
+}
+
+/*
+ * _omap3_noncore_dpll_stop - instruct a DPLL to stop
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power stop. Will save and
+ * restore the DPLL's autoidle state across the stop, per the CDP
+ * code. If DPLL3 was passed in, or the DPLL does not support
+ * low-power stop, return -EINVAL; otherwise, return 0.
+ */
+static int _omap3_noncore_dpll_stop(struct clk *clk)
+{
+ u8 ai;
+
+ if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
+ return -EINVAL;
+
+ pr_debug("clock: stopping DPLL %s\n", clk->name);
+
+ ai = omap3_dpll_autoidle_read(clk);
+
+ _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
+
+ if (ai)
+ omap3_dpll_allow_idle(clk);
+ else
+ omap3_dpll_deny_idle(clk);
+
+ return 0;
+}
+
+/**
+ * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
+ * The choice of modes depends on the DPLL's programmed rate: if it is
+ * the same as the DPLL's parent clock, it will enter bypass;
+ * otherwise, it will enter lock. This code will wait for the DPLL to
+ * indicate readiness before returning, unless the DPLL takes too long
+ * to enter the target state. Intended to be used as the struct clk's
+ * enable function. If DPLL3 was passed in, or the DPLL does not
+ * support low-power stop, or if the DPLL took too long to enter
+ * bypass or lock, return -EINVAL; otherwise, return 0.
+ */
+int omap3_noncore_dpll_enable(struct clk *clk)
+{
+ int r;
+ struct dpll_data *dd;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (clk->rate == dd->clk_bypass->rate) {
+ WARN_ON(clk->parent != dd->clk_bypass);
+ r = _omap3_noncore_dpll_bypass(clk);
+ } else {
+ WARN_ON(clk->parent != dd->clk_ref);
+ r = _omap3_noncore_dpll_lock(clk);
+ }
+ /*
+ *FIXME: this is dubious - if clk->rate has changed, what about
+ * propagating?
+ */
+ if (!r)
+ clk->rate = omap2_get_dpll_rate(clk);
+
+ return r;
+}
+
+/**
+ * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power stop. This function is
+ * intended for use in struct clkops. No return value.
+ */
+void omap3_noncore_dpll_disable(struct clk *clk)
+{
+ _omap3_noncore_dpll_stop(clk);
+}
+
+
+/* Non-CORE DPLL rate set code */
+
+/*
+ * omap3_noncore_dpll_program - set non-core DPLL M,N values directly
+ * @clk: struct clk * of DPLL to set
+ * @m: DPLL multiplier to set
+ * @n: DPLL divider to set
+ * @freqsel: FREQSEL value to set
+ *
+ * Program the DPLL with the supplied M, N values, and wait for the DPLL to
+ * lock.. Returns -EINVAL upon error, or 0 upon success.
+ */
+int omap3_noncore_dpll_program(struct clk *clk, u16 m, u8 n, u16 freqsel)
+{
+ struct dpll_data *dd = clk->dpll_data;
+ u32 v;
+
+ /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
+ _omap3_noncore_dpll_bypass(clk);
+
+ /* Set jitter correction */
+ if (!cpu_is_omap44xx()) {
+ v = __raw_readl(dd->control_reg);
+ v &= ~dd->freqsel_mask;
+ v |= freqsel << __ffs(dd->freqsel_mask);
+ __raw_writel(v, dd->control_reg);
+ }
+
+ /* Set DPLL multiplier, divider */
+ v = __raw_readl(dd->mult_div1_reg);
+ v &= ~(dd->mult_mask | dd->div1_mask);
+ v |= m << __ffs(dd->mult_mask);
+ v |= (n - 1) << __ffs(dd->div1_mask);
+ __raw_writel(v, dd->mult_div1_reg);
+
+ /* We let the clock framework set the other output dividers later */
+
+ /* REVISIT: Set ramp-up delay? */
+
+ _omap3_noncore_dpll_lock(clk);
+
+ return 0;
+}
+
+/**
+ * omap3_noncore_dpll_set_rate - set non-core DPLL rate
+ * @clk: struct clk * of DPLL to set
+ * @rate: rounded target rate
+ *
+ * Set the DPLL CLKOUT to the target rate. If the DPLL can enter
+ * low-power bypass, and the target rate is the bypass source clock
+ * rate, then configure the DPLL for bypass. Otherwise, round the
+ * target rate if it hasn't been done already, then program and lock
+ * the DPLL. Returns -EINVAL upon error, or 0 upon success.
+ */
+int omap3_noncore_dpll_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct clk *new_parent = NULL;
+ u16 freqsel = 0;
+ struct dpll_data *dd;
+ int ret;
+
+ if (!clk || !rate)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+ if (!dd)
+ return -EINVAL;
+
+ if (rate == omap2_get_dpll_rate(clk))
+ return 0;
+
+ /*
+ * Ensure both the bypass and ref clocks are enabled prior to
+ * doing anything; we need the bypass clock running to reprogram
+ * the DPLL.
+ */
+ omap2_clk_enable(dd->clk_bypass);
+ omap2_clk_enable(dd->clk_ref);
+
+ if (dd->clk_bypass->rate == rate &&
+ (clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
+ pr_debug("clock: %s: set rate: entering bypass.\n", clk->name);
+
+ ret = _omap3_noncore_dpll_bypass(clk);
+ if (!ret)
+ new_parent = dd->clk_bypass;
+ } else {
+ if (dd->last_rounded_rate != rate)
+ omap2_dpll_round_rate(clk, rate);
+
+ if (dd->last_rounded_rate == 0)
+ return -EINVAL;
+
+ /* No freqsel on OMAP4 */
+ if (!cpu_is_omap44xx()) {
+ freqsel = _omap3_dpll_compute_freqsel(clk,
+ dd->last_rounded_n);
+ if (!freqsel)
+ WARN_ON(1);
+ }
+
+ pr_debug("clock: %s: set rate: locking rate to %lu.\n",
+ clk->name, rate);
+
+ ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m,
+ dd->last_rounded_n, freqsel);
+ if (!ret)
+ new_parent = dd->clk_ref;
+ }
+ if (!ret) {
+ /*
+ * Switch the parent clock in the heirarchy, and make sure
+ * that the new parent's usecount is correct. Note: we
+ * enable the new parent before disabling the old to avoid
+ * any unnecessary hardware disable->enable transitions.
+ */
+ if (clk->usecount) {
+ omap2_clk_enable(new_parent);
+ omap2_clk_disable(clk->parent);
+ }
+ clk_reparent(clk, new_parent);
+ clk->rate = rate;
+ }
+ omap2_clk_disable(dd->clk_ref);
+ omap2_clk_disable(dd->clk_bypass);
+
+ return 0;
+}
+
+/* DPLL autoidle read/set code */
+
+/**
+ * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
+ * @clk: struct clk * of the DPLL to read
+ *
+ * Return the DPLL's autoidle bits, shifted down to bit 0. Returns
+ * -EINVAL if passed a null pointer or if the struct clk does not
+ * appear to refer to a DPLL.
+ */
+u32 omap3_dpll_autoidle_read(struct clk *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return -EINVAL;
+
+ dd = clk->dpll_data;
+
+ v = __raw_readl(dd->autoidle_reg);
+ v &= dd->autoidle_mask;
+ v >>= __ffs(dd->autoidle_mask);
+
+ return v;
+}
+
+/**
+ * omap3_dpll_allow_idle - enable DPLL autoidle bits
+ * @clk: struct clk * of the DPLL to operate on
+ *
+ * Enable DPLL automatic idle control. This automatic idle mode
+ * switching takes effect only when the DPLL is locked, at least on
+ * OMAP3430. The DPLL will enter low-power stop when its downstream
+ * clocks are gated. No return value.
+ */
+void omap3_dpll_allow_idle(struct clk *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return;
+
+ dd = clk->dpll_data;
+
+ /*
+ * REVISIT: CORE DPLL can optionally enter low-power bypass
+ * by writing 0x5 instead of 0x1. Add some mechanism to
+ * optionally enter this mode.
+ */
+ v = __raw_readl(dd->autoidle_reg);
+ v &= ~dd->autoidle_mask;
+ v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
+ __raw_writel(v, dd->autoidle_reg);
+}
+
+/**
+ * omap3_dpll_deny_idle - prevent DPLL from automatically idling
+ * @clk: struct clk * of the DPLL to operate on
+ *
+ * Disable DPLL automatic idle control. No return value.
+ */
+void omap3_dpll_deny_idle(struct clk *clk)
+{
+ const struct dpll_data *dd;
+ u32 v;
+
+ if (!clk || !clk->dpll_data)
+ return;
+
+ dd = clk->dpll_data;
+
+ v = __raw_readl(dd->autoidle_reg);
+ v &= ~dd->autoidle_mask;
+ v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
+ __raw_writel(v, dd->autoidle_reg);
+
+}
+
+/* Clock control for DPLL outputs */
+
+/**
+ * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
+ * @clk: DPLL output struct clk
+ *
+ * Using parent clock DPLL data, look up DPLL state. If locked, set our
+ * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
+ */
+unsigned long omap3_clkoutx2_recalc(struct clk *clk)
+{
+ const struct dpll_data *dd;
+ unsigned long rate;
+ u32 v;
+ struct clk *pclk;
+
+ /* Walk up the parents of clk, looking for a DPLL */
+ pclk = clk->parent;
+ while (pclk && !pclk->dpll_data)
+ pclk = pclk->parent;
+
+ /* clk does not have a DPLL as a parent? */
+ WARN_ON(!pclk);
+
+ dd = pclk->dpll_data;
+
+ WARN_ON(!dd->enable_mask);
+
+ v = __raw_readl(dd->control_reg) & dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+ if (v != OMAP3XXX_EN_DPLL_LOCKED)
+ rate = clk->parent->rate;
+ else
+ rate = clk->parent->rate * 2;
+ return rate;
+}
diff --git a/arch/arm/mach-omap2/gpmc-smc91x.c b/arch/arm/mach-omap2/gpmc-smc91x.c
index 6083e21b3be..877c6f5807b 100644
--- a/arch/arm/mach-omap2/gpmc-smc91x.c
+++ b/arch/arm/mach-omap2/gpmc-smc91x.c
@@ -33,17 +33,19 @@ static struct resource gpmc_smc91x_resources[] = {
};
static struct smc91x_platdata gpmc_smc91x_info = {
- .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_IO_SHIFT_0,
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_IO_SHIFT_0,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
};
static struct platform_device gpmc_smc91x_device = {
.name = "smc91x",
.id = -1,
- .num_resources = ARRAY_SIZE(gpmc_smc91x_resources),
- .resource = gpmc_smc91x_resources,
.dev = {
.platform_data = &gpmc_smc91x_info,
},
+ .num_resources = ARRAY_SIZE(gpmc_smc91x_resources),
+ .resource = gpmc_smc91x_resources,
};
/*
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index e86f5ca180e..bd8cb597472 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -517,7 +517,7 @@ void __init gpmc_init(void)
ck = "gpmc_fck";
l = OMAP34XX_GPMC_BASE;
} else if (cpu_is_omap44xx()) {
- ck = "gpmc_fck";
+ ck = "gpmc_ck";
l = OMAP44XX_GPMC_BASE;
}
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
new file mode 100644
index 00000000000..789ca8c02f0
--- /dev/null
+++ b/arch/arm/mach-omap2/i2c.c
@@ -0,0 +1,56 @@
+/*
+ * Helper module for board specific I2C bus registration
+ *
+ * Copyright (C) 2009 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <plat/cpu.h>
+#include <plat/i2c.h>
+#include <plat/mux.h>
+
+#include "mux.h"
+
+int __init omap_register_i2c_bus(int bus_id, u32 clkrate,
+ struct i2c_board_info const *info,
+ unsigned len)
+{
+ if (cpu_is_omap24xx()) {
+ const int omap24xx_pins[][2] = {
+ { M19_24XX_I2C1_SCL, L15_24XX_I2C1_SDA },
+ { J15_24XX_I2C2_SCL, H19_24XX_I2C2_SDA },
+ };
+ int scl, sda;
+
+ scl = omap24xx_pins[bus_id - 1][0];
+ sda = omap24xx_pins[bus_id - 1][1];
+ omap_cfg_reg(sda);
+ omap_cfg_reg(scl);
+ }
+
+ /* First I2C bus is not muxable */
+ if (cpu_is_omap34xx() && bus_id > 1) {
+ char mux_name[sizeof("i2c2_scl.i2c2_scl")];
+
+ sprintf(mux_name, "i2c%i_scl.i2c%i_scl", bus_id, bus_id);
+ omap_mux_init_signal(mux_name, OMAP_PIN_INPUT);
+ sprintf(mux_name, "i2c%i_sda.i2c%i_sda", bus_id, bus_id);
+ omap_mux_init_signal(mux_name, OMAP_PIN_INPUT);
+ }
+
+ return omap_plat_register_i2c_bus(bus_id, clkrate, info, len);
+}
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index f48a4b2654d..a091b53657b 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -246,6 +246,31 @@ void __init omap3_check_revision(void)
}
}
+void __init omap4_check_revision(void)
+{
+ u32 idcode;
+ u16 hawkeye;
+ u8 rev;
+ char *rev_name = "ES1.0";
+
+ /*
+ * The IC rev detection is done with hawkeye and rev.
+ * Note that rev does not map directly to defined processor
+ * revision numbers as ES1.0 uses value 0.
+ */
+ idcode = read_tap_reg(OMAP_TAP_IDCODE);
+ hawkeye = (idcode >> 12) & 0xffff;
+ rev = (idcode >> 28) & 0xff;
+
+ if ((hawkeye == 0xb852) && (rev == 0x0)) {
+ omap_revision = OMAP4430_REV_ES1_0;
+ pr_info("OMAP%04x %s\n", omap_rev() >> 16, rev_name);
+ return;
+ }
+
+ pr_err("Unknown OMAP4 CPU id\n");
+}
+
#define OMAP3_SHOW_FEATURE(feat) \
if (omap3_has_ ##feat()) \
printk(#feat" ");
@@ -277,10 +302,10 @@ void __init omap3_cpuinfo(void)
} else if (omap3_has_iva() && omap3_has_sgx()) {
/* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */
strcpy(cpu_name, "OMAP3430/3530");
- } else if (omap3_has_sgx()) {
+ } else if (omap3_has_iva()) {
omap_revision = OMAP3525_REV(rev);
strcpy(cpu_name, "OMAP3525");
- } else if (omap3_has_iva()) {
+ } else if (omap3_has_sgx()) {
omap_revision = OMAP3515_REV(rev);
strcpy(cpu_name, "OMAP3515");
} else {
@@ -336,7 +361,7 @@ void __init omap2_check_revision(void)
omap3_check_features();
omap3_cpuinfo();
} else if (cpu_is_omap44xx()) {
- printk(KERN_INFO "FIXME: CPU revision = OMAP4430\n");
+ omap4_check_revision();
return;
} else {
pr_err("OMAP revision unknown, please fix!\n");
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 59d28b2fd8c..a8749e8017b 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -22,19 +22,20 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/omapfb.h>
#include <asm/tlb.h>
#include <asm/mach/map.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
#include <plat/sram.h>
#include <plat/sdrc.h>
#include <plat/gpmc.h>
#include <plat/serial.h>
+#include <plat/mux.h>
+#include <plat/vram.h>
-#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdev is ready */
#include "clock.h"
#include <plat/omap-pm.h>
@@ -43,7 +44,6 @@
#include <plat/clockdomain.h>
#include "clockdomains.h"
-#endif
#include <plat/omap_hwmod.h>
#include "omap_hwmod_2420.h"
#include "omap_hwmod_2430.h"
@@ -264,6 +264,7 @@ void __init omap2_map_common_io(void)
omap2_check_revision();
omap_sram_init();
omapfb_reserve_sdram();
+ omap_vram_reserve_sdram();
}
/*
@@ -319,8 +320,8 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);
pwrdm_init(powerdomains_omap);
clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
- omap2_clk_init();
#endif
+ omap2_clk_init();
omap_serial_early_init();
#ifndef CONFIG_ARCH_OMAP4
omap_hwmod_late_init();
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index c18a94eca64..e071b3fd187 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -27,19 +27,52 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
#include <plat/control.h>
#include <plat/mux.h>
-#ifdef CONFIG_OMAP_MUX
+#include "mux.h"
+
+#define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */
+#define OMAP_MUX_BASE_SZ 0x5ca
+
+struct omap_mux_entry {
+ struct omap_mux mux;
+ struct list_head node;
+};
+
+static unsigned long mux_phys;
+static void __iomem *mux_base;
+
+static inline u16 omap_mux_read(u16 reg)
+{
+ if (cpu_is_omap24xx())
+ return __raw_readb(mux_base + reg);
+ else
+ return __raw_readw(mux_base + reg);
+}
+
+static inline void omap_mux_write(u16 val, u16 reg)
+{
+ if (cpu_is_omap24xx())
+ __raw_writeb(val, mux_base + reg);
+ else
+ __raw_writew(val, mux_base + reg);
+}
+
+#if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_OMAP_MUX)
static struct omap_mux_cfg arch_mux_cfg;
/* NOTE: See mux.h for the enumeration */
-#ifdef CONFIG_ARCH_OMAP24XX
static struct pin_config __initdata_or_module omap24xx_pins[] = {
/*
* description mux mux pull pull debug
@@ -249,342 +282,14 @@ MUX_CFG_24XX("AF19_2430_GPIO_85", 0x0113, 3, 0, 0, 1)
#define OMAP24XX_PINS_SZ ARRAY_SIZE(omap24xx_pins)
-#else
-#define omap24xx_pins NULL
-#define OMAP24XX_PINS_SZ 0
-#endif /* CONFIG_ARCH_OMAP24XX */
-
-#ifdef CONFIG_ARCH_OMAP34XX
-static struct pin_config __initdata_or_module omap34xx_pins[] = {
-/*
- * Name, reg-offset,
- * mux-mode | [active-mode | off-mode]
- */
-
-/* 34xx I2C */
-MUX_CFG_34XX("K21_34XX_I2C1_SCL", 0x1ba,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("J21_34XX_I2C1_SDA", 0x1bc,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AF15_34XX_I2C2_SCL", 0x1be,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AE15_34XX_I2C2_SDA", 0x1c0,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AF14_34XX_I2C3_SCL", 0x1c2,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AG14_34XX_I2C3_SDA", 0x1c4,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AD26_34XX_I2C4_SCL", 0xa00,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AE26_34XX_I2C4_SDA", 0xa02,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-
-/* PHY - HSUSB: 12-pin ULPI PHY: Port 1*/
-MUX_CFG_34XX("Y8_3430_USB1HS_PHY_CLK", 0x5da,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("Y9_3430_USB1HS_PHY_STP", 0x5d8,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("AA14_3430_USB1HS_PHY_DIR", 0x5ec,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AA11_3430_USB1HS_PHY_NXT", 0x5ee,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W13_3430_USB1HS_PHY_D0", 0x5dc,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W12_3430_USB1HS_PHY_D1", 0x5de,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W11_3430_USB1HS_PHY_D2", 0x5e0,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y11_3430_USB1HS_PHY_D3", 0x5ea,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W9_3430_USB1HS_PHY_D4", 0x5e4,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y12_3430_USB1HS_PHY_D5", 0x5e6,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W8_3430_USB1HS_PHY_D6", 0x5e8,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y13_3430_USB1HS_PHY_D7", 0x5e2,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-
-/* PHY - HSUSB: 12-pin ULPI PHY: Port 2*/
-MUX_CFG_34XX("AA8_3430_USB2HS_PHY_CLK", 0x5f0,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("AA10_3430_USB2HS_PHY_STP", 0x5f2,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("AA9_3430_USB2HS_PHY_DIR", 0x5f4,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB11_3430_USB2HS_PHY_NXT", 0x5f6,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB10_3430_USB2HS_PHY_D0", 0x5f8,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB9_3430_USB2HS_PHY_D1", 0x5fa,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W3_3430_USB2HS_PHY_D2", 0x1d4,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("T4_3430_USB2HS_PHY_D3", 0x1de,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("T3_3430_USB2HS_PHY_D4", 0x1d8,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("R3_3430_USB2HS_PHY_D5", 0x1da,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("R4_3430_USB2HS_PHY_D6", 0x1dc,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("T2_3430_USB2HS_PHY_D7", 0x1d6,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLDOWN)
-
-/* TLL - HSUSB: 12-pin TLL Port 1*/
-MUX_CFG_34XX("Y8_3430_USB1HS_TLL_CLK", 0x5da,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y9_3430_USB1HS_TLL_STP", 0x5d8,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AA14_3430_USB1HS_TLL_DIR", 0x5ec,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AA11_3430_USB1HS_TLL_NXT", 0x5ee,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W13_3430_USB1HS_TLL_D0", 0x5dc,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W12_3430_USB1HS_TLL_D1", 0x5de,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W11_3430_USB1HS_TLL_D2", 0x5e0,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y11_3430_USB1HS_TLL_D3", 0x5ea,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W9_3430_USB1HS_TLL_D4", 0x5e4,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y12_3430_USB1HS_TLL_D5", 0x5e6,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W8_3430_USB1HS_TLL_D6", 0x5e8,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y13_3430_USB1HS_TLL_D7", 0x5e2,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-
-/* TLL - HSUSB: 12-pin TLL Port 2*/
-MUX_CFG_34XX("AA8_3430_USB2HS_TLL_CLK", 0x5f0,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AA10_3430_USB2HS_TLL_STP", 0x5f2,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AA9_3430_USB2HS_TLL_DIR", 0x5f4,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB11_3430_USB2HS_TLL_NXT", 0x5f6,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB10_3430_USB2HS_TLL_D0", 0x5f8,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB9_3430_USB2HS_TLL_D1", 0x5fa,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W3_3430_USB2HS_TLL_D2", 0x1d4,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("T4_3430_USB2HS_TLL_D3", 0x1de,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("T3_3430_USB2HS_TLL_D4", 0x1d8,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("R3_3430_USB2HS_TLL_D5", 0x1da,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("R4_3430_USB2HS_TLL_D6", 0x1dc,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("T2_3430_USB2HS_TLL_D7", 0x1d6,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLDOWN)
-
-/* TLL - HSUSB: 12-pin TLL Port 3*/
-MUX_CFG_34XX("AA6_3430_USB3HS_TLL_CLK", 0x180,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB3_3430_USB3HS_TLL_STP", 0x166,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AA3_3430_USB3HS_TLL_DIR", 0x168,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y3_3430_USB3HS_TLL_NXT", 0x16a,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AA5_3430_USB3HS_TLL_D0", 0x186,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y4_3430_USB3HS_TLL_D1", 0x184,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y5_3430_USB3HS_TLL_D2", 0x188,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W5_3430_USB3HS_TLL_D3", 0x18a,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB12_3430_USB3HS_TLL_D4", 0x16c,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB13_3430_USB3HS_TLL_D5", 0x16e,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AA13_3430_USB3HS_TLL_D6", 0x170,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AA12_3430_USB3HS_TLL_D7", 0x172,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-
-/* PHY FSUSB: FS Serial for Port 1 (multiple PHY modes supported) */
-MUX_CFG_34XX("AF10_3430_USB1FS_PHY_MM1_RXDP", 0x5d8,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AG9_3430_USB1FS_PHY_MM1_RXDM", 0x5ee,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W13_3430_USB1FS_PHY_MM1_RXRCV", 0x5dc,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W12_3430_USB1FS_PHY_MM1_TXSE0", 0x5de,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W11_3430_USB1FS_PHY_MM1_TXDAT", 0x5e0,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("Y11_3430_USB1FS_PHY_MM1_TXEN_N", 0x5ea,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_OUTPUT)
-
-/* PHY FSUSB: FS Serial for Port 2 (multiple PHY modes supported) */
-MUX_CFG_34XX("AF7_3430_USB2FS_PHY_MM2_RXDP", 0x5f2,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AH7_3430_USB2FS_PHY_MM2_RXDM", 0x5f6,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB10_3430_USB2FS_PHY_MM2_RXRCV", 0x5f8,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AB9_3430_USB2FS_PHY_MM2_TXSE0", 0x5fa,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("W3_3430_USB2FS_PHY_MM2_TXDAT", 0x1d4,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("T4_3430_USB2FS_PHY_MM2_TXEN_N", 0x1de,
- OMAP34XX_MUX_MODE5 | OMAP34XX_PIN_OUTPUT)
-
-/* PHY FSUSB: FS Serial for Port 3 (multiple PHY modes supported) */
-MUX_CFG_34XX("AH3_3430_USB3FS_PHY_MM3_RXDP", 0x166,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AE3_3430_USB3FS_PHY_MM3_RXDM", 0x16a,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AD1_3430_USB3FS_PHY_MM3_RXRCV", 0x186,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AE1_3430_USB3FS_PHY_MM3_TXSE0", 0x184,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AD2_3430_USB3FS_PHY_MM3_TXDAT", 0x188,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("AC1_3430_USB3FS_PHY_MM3_TXEN_N", 0x18a,
- OMAP34XX_MUX_MODE6 | OMAP34XX_PIN_OUTPUT)
-
-
-/* 34XX GPIO - bidirectional, unless the name has an "_OUT" suffix.
- * (Always specify PIN_INPUT, except for names suffixed by "_OUT".)
- * No internal pullup/pulldown without "_UP" or "_DOWN" suffix.
- */
-MUX_CFG_34XX("AF26_34XX_GPIO0", 0x1e0,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-MUX_CFG_34XX("AF22_34XX_GPIO9", 0xa18,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-MUX_CFG_34XX("AG9_34XX_GPIO23", 0x5ee,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-MUX_CFG_34XX("AH8_34XX_GPIO29", 0x5fa,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-MUX_CFG_34XX("U8_34XX_GPIO54_OUT", 0x0b4,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("U8_34XX_GPIO54_DOWN", 0x0b4,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT_PULLDOWN)
-MUX_CFG_34XX("L8_34XX_GPIO63", 0x0ce,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-MUX_CFG_34XX("G25_34XX_GPIO86_OUT", 0x0fc,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("AG4_34XX_GPIO134_OUT", 0x160,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("AF4_34XX_GPIO135_OUT", 0x162,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("AE4_34XX_GPIO136_OUT", 0x164,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("AF6_34XX_GPIO140_UP", 0x16c,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AE6_34XX_GPIO141", 0x16e,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-MUX_CFG_34XX("AF5_34XX_GPIO142", 0x170,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-MUX_CFG_34XX("AE5_34XX_GPIO143", 0x172,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-MUX_CFG_34XX("H19_34XX_GPIO164_OUT", 0x19c,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("J25_34XX_GPIO170", 0x1c6,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
-
-/* OMAP3 SDRC CKE signals to SDR/DDR ram chips */
-MUX_CFG_34XX("H16_34XX_SDRC_CKE0", 0x262,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT)
-MUX_CFG_34XX("H17_34XX_SDRC_CKE1", 0x264,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT)
-
-/* MMC1 */
-MUX_CFG_34XX("N28_3430_MMC1_CLK", 0x144,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("M27_3430_MMC1_CMD", 0x146,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("N27_3430_MMC1_DAT0", 0x148,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("N26_3430_MMC1_DAT1", 0x14a,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("N25_3430_MMC1_DAT2", 0x14c,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("P28_3430_MMC1_DAT3", 0x14e,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("P27_3430_MMC1_DAT4", 0x150,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("P26_3430_MMC1_DAT5", 0x152,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("R27_3430_MMC1_DAT6", 0x154,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("R25_3430_MMC1_DAT7", 0x156,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-
-/* MMC2 */
-MUX_CFG_34XX("AE2_3430_MMC2_CLK", 0x158,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AG5_3430_MMC2_CMD", 0x15A,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AH5_3430_MMC2_DAT0", 0x15c,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AH4_3430_MMC2_DAT1", 0x15e,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AG4_3430_MMC2_DAT2", 0x160,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AF4_3430_MMC2_DAT3", 0x162,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AE4_3430_MMC2_DAT4", 0x164,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AH3_3430_MMC2_DAT5", 0x166,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AF3_3430_MMC2_DAT6", 0x168,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AE3_3430_MMC2_DAT7", 0x16A,
- OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_INPUT_PULLUP)
-
-/* MMC3 */
-MUX_CFG_34XX("AF10_3430_MMC3_CLK", 0x5d8,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AC3_3430_MMC3_CMD", 0x1d0,
- OMAP34XX_MUX_MODE3 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AE11_3430_MMC3_DAT0", 0x5e4,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AH9_3430_MMC3_DAT1", 0x5e6,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AF13_3430_MMC3_DAT2", 0x5e8,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AF13_3430_MMC3_DAT3", 0x5e2,
- OMAP34XX_MUX_MODE2 | OMAP34XX_PIN_INPUT_PULLUP)
-
-/* SYS_NIRQ T2 INT1 */
-MUX_CFG_34XX("AF26_34XX_SYS_NIRQ", 0x1E0,
- OMAP3_WAKEUP_EN | OMAP34XX_PIN_INPUT_PULLUP |
- OMAP34XX_MUX_MODE0)
-/* EHCI GPIO's on OMAP3EVM (Rev >= E) */
-MUX_CFG_34XX("AH14_34XX_GPIO21", 0x5ea,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("AF9_34XX_GPIO22", 0x5ec,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT_PULLUP)
-MUX_CFG_34XX("U3_34XX_GPIO61", 0x0c8,
- OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT_PULLUP)
-};
-
-#define OMAP34XX_PINS_SZ ARRAY_SIZE(omap34xx_pins)
-
-#else
-#define omap34xx_pins NULL
-#define OMAP34XX_PINS_SZ 0
-#endif /* CONFIG_ARCH_OMAP34XX */
-
#if defined(CONFIG_OMAP_MUX_DEBUG) || defined(CONFIG_OMAP_MUX_WARNINGS)
+
static void __init_or_module omap2_cfg_debug(const struct pin_config *cfg, u16 reg)
{
u16 orig;
u8 warn = 0, debug = 0;
- if (cpu_is_omap24xx())
- orig = omap_ctrl_readb(cfg->mux_reg);
- else
- orig = omap_ctrl_readw(cfg->mux_reg);
+ orig = omap_mux_read(cfg->mux_reg - OMAP_MUX_BASE_OFFSET);
#ifdef CONFIG_OMAP_MUX_DEBUG
debug = cfg->debug;
@@ -600,7 +305,6 @@ static void __init_or_module omap2_cfg_debug(const struct pin_config *cfg, u16 r
#define omap2_cfg_debug(x, y) do {} while (0)
#endif
-#ifdef CONFIG_ARCH_OMAP24XX
static int __init_or_module omap24xx_cfg_reg(const struct pin_config *cfg)
{
static DEFINE_SPINLOCK(mux_spin_lock);
@@ -614,47 +318,692 @@ static int __init_or_module omap24xx_cfg_reg(const struct pin_config *cfg)
if (cfg->pu_pd_val)
reg |= OMAP2_PULL_UP;
omap2_cfg_debug(cfg, reg);
- omap_ctrl_writeb(reg, cfg->mux_reg);
+ omap_mux_write(reg, cfg->mux_reg - OMAP_MUX_BASE_OFFSET);
spin_unlock_irqrestore(&mux_spin_lock, flags);
return 0;
}
+
+int __init omap2_mux_init(void)
+{
+ u32 mux_pbase;
+
+ if (cpu_is_omap2420())
+ mux_pbase = OMAP2420_CTRL_BASE + OMAP_MUX_BASE_OFFSET;
+ else if (cpu_is_omap2430())
+ mux_pbase = OMAP243X_CTRL_BASE + OMAP_MUX_BASE_OFFSET;
+ else
+ return -ENODEV;
+
+ mux_base = ioremap(mux_pbase, OMAP_MUX_BASE_SZ);
+ if (!mux_base) {
+ printk(KERN_ERR "mux: Could not ioremap\n");
+ return -ENODEV;
+ }
+
+ if (cpu_is_omap24xx()) {
+ arch_mux_cfg.pins = omap24xx_pins;
+ arch_mux_cfg.size = OMAP24XX_PINS_SZ;
+ arch_mux_cfg.cfg_reg = omap24xx_cfg_reg;
+
+ return omap_mux_register(&arch_mux_cfg);
+ }
+
+ return 0;
+}
+
#else
-#define omap24xx_cfg_reg NULL
-#endif
+int __init omap2_mux_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_OMAP_MUX */
+
+/*----------------------------------------------------------------------------*/
#ifdef CONFIG_ARCH_OMAP34XX
-static int __init_or_module omap34xx_cfg_reg(const struct pin_config *cfg)
+static LIST_HEAD(muxmodes);
+static DEFINE_MUTEX(muxmode_mutex);
+
+#ifdef CONFIG_OMAP_MUX
+
+static char *omap_mux_options;
+
+int __init omap_mux_init_gpio(int gpio, int val)
{
- static DEFINE_SPINLOCK(mux_spin_lock);
- unsigned long flags;
- u16 reg = 0;
+ struct omap_mux_entry *e;
+ int found = 0;
+
+ if (!gpio)
+ return -EINVAL;
+
+ list_for_each_entry(e, &muxmodes, node) {
+ struct omap_mux *m = &e->mux;
+ if (gpio == m->gpio) {
+ u16 old_mode;
+ u16 mux_mode;
+
+ old_mode = omap_mux_read(m->reg_offset);
+ mux_mode = val & ~(OMAP_MUX_NR_MODES - 1);
+ mux_mode |= OMAP_MUX_MODE4;
+ printk(KERN_DEBUG "mux: Setting signal "
+ "%s.gpio%i 0x%04x -> 0x%04x\n",
+ m->muxnames[0], gpio, old_mode, mux_mode);
+ omap_mux_write(mux_mode, m->reg_offset);
+ found++;
+ }
+ }
- spin_lock_irqsave(&mux_spin_lock, flags);
- reg |= cfg->mux_val;
- omap2_cfg_debug(cfg, reg);
- omap_ctrl_writew(reg, cfg->mux_reg);
- spin_unlock_irqrestore(&mux_spin_lock, flags);
+ if (found == 1)
+ return 0;
+
+ if (found > 1) {
+ printk(KERN_ERR "mux: Multiple gpio paths for gpio%i\n", gpio);
+ return -EINVAL;
+ }
+
+ printk(KERN_ERR "mux: Could not set gpio%i\n", gpio);
+
+ return -ENODEV;
+}
+
+int __init omap_mux_init_signal(char *muxname, int val)
+{
+ struct omap_mux_entry *e;
+ char *m0_name = NULL, *mode_name = NULL;
+ int found = 0;
+
+ mode_name = strchr(muxname, '.');
+ if (mode_name) {
+ *mode_name = '\0';
+ mode_name++;
+ m0_name = muxname;
+ } else {
+ mode_name = muxname;
+ }
+
+ list_for_each_entry(e, &muxmodes, node) {
+ struct omap_mux *m = &e->mux;
+ char *m0_entry = m->muxnames[0];
+ int i;
+
+ if (m0_name && strcmp(m0_name, m0_entry))
+ continue;
+
+ for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
+ char *mode_cur = m->muxnames[i];
+
+ if (!mode_cur)
+ continue;
+
+ if (!strcmp(mode_name, mode_cur)) {
+ u16 old_mode;
+ u16 mux_mode;
+
+ old_mode = omap_mux_read(m->reg_offset);
+ mux_mode = val | i;
+ printk(KERN_DEBUG "mux: Setting signal "
+ "%s.%s 0x%04x -> 0x%04x\n",
+ m0_entry, muxname, old_mode, mux_mode);
+ omap_mux_write(mux_mode, m->reg_offset);
+ found++;
+ }
+ }
+ }
+
+ if (found == 1)
+ return 0;
+
+ if (found > 1) {
+ printk(KERN_ERR "mux: Multiple signal paths (%i) for %s\n",
+ found, muxname);
+ return -EINVAL;
+ }
+
+ printk(KERN_ERR "mux: Could not set signal %s\n", muxname);
+
+ return -ENODEV;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define OMAP_MUX_MAX_NR_FLAGS 10
+#define OMAP_MUX_TEST_FLAG(val, mask) \
+ if (((val) & (mask)) == (mask)) { \
+ i++; \
+ flags[i] = #mask; \
+ }
+
+/* REVISIT: Add checking for non-optimal mux settings */
+static inline void omap_mux_decode(struct seq_file *s, u16 val)
+{
+ char *flags[OMAP_MUX_MAX_NR_FLAGS];
+ char mode[14];
+ int i = -1;
+
+ sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7);
+ i++;
+ flags[i] = mode;
+
+ OMAP_MUX_TEST_FLAG(val, OMAP_PIN_OFF_WAKEUPENABLE);
+ if (val & OMAP_OFF_EN) {
+ if (!(val & OMAP_OFFOUT_EN)) {
+ if (!(val & OMAP_OFF_PULL_UP)) {
+ OMAP_MUX_TEST_FLAG(val,
+ OMAP_PIN_OFF_INPUT_PULLDOWN);
+ } else {
+ OMAP_MUX_TEST_FLAG(val,
+ OMAP_PIN_OFF_INPUT_PULLUP);
+ }
+ } else {
+ if (!(val & OMAP_OFFOUT_VAL)) {
+ OMAP_MUX_TEST_FLAG(val,
+ OMAP_PIN_OFF_OUTPUT_LOW);
+ } else {
+ OMAP_MUX_TEST_FLAG(val,
+ OMAP_PIN_OFF_OUTPUT_HIGH);
+ }
+ }
+ }
+
+ if (val & OMAP_INPUT_EN) {
+ if (val & OMAP_PULL_ENA) {
+ if (!(val & OMAP_PULL_UP)) {
+ OMAP_MUX_TEST_FLAG(val,
+ OMAP_PIN_INPUT_PULLDOWN);
+ } else {
+ OMAP_MUX_TEST_FLAG(val, OMAP_PIN_INPUT_PULLUP);
+ }
+ } else {
+ OMAP_MUX_TEST_FLAG(val, OMAP_PIN_INPUT);
+ }
+ } else {
+ i++;
+ flags[i] = "OMAP_PIN_OUTPUT";
+ }
+
+ do {
+ seq_printf(s, "%s", flags[i]);
+ if (i > 0)
+ seq_printf(s, " | ");
+ } while (i-- > 0);
+}
+
+#define OMAP_MUX_DEFNAME_LEN 16
+
+static int omap_mux_dbg_board_show(struct seq_file *s, void *unused)
+{
+ struct omap_mux_entry *e;
+
+ list_for_each_entry(e, &muxmodes, node) {
+ struct omap_mux *m = &e->mux;
+ char m0_def[OMAP_MUX_DEFNAME_LEN];
+ char *m0_name = m->muxnames[0];
+ u16 val;
+ int i, mode;
+
+ if (!m0_name)
+ continue;
+
+ for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) {
+ if (m0_name[i] == '\0') {
+ m0_def[i] = m0_name[i];
+ break;
+ }
+ m0_def[i] = toupper(m0_name[i]);
+ }
+ val = omap_mux_read(m->reg_offset);
+ mode = val & OMAP_MUX_MODE7;
+
+ seq_printf(s, "OMAP%i_MUX(%s, ",
+ cpu_is_omap34xx() ? 3 : 0, m0_def);
+ omap_mux_decode(s, val);
+ seq_printf(s, "),\n");
+ }
+
+ return 0;
+}
+
+static int omap_mux_dbg_board_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, omap_mux_dbg_board_show, &inode->i_private);
+}
+
+static const struct file_operations omap_mux_dbg_board_fops = {
+ .open = omap_mux_dbg_board_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int omap_mux_dbg_signal_show(struct seq_file *s, void *unused)
+{
+ struct omap_mux *m = s->private;
+ const char *none = "NA";
+ u16 val;
+ int mode;
+
+ val = omap_mux_read(m->reg_offset);
+ mode = val & OMAP_MUX_MODE7;
+
+ seq_printf(s, "name: %s.%s (0x%08lx/0x%03x = 0x%04x), b %s, t %s\n",
+ m->muxnames[0], m->muxnames[mode],
+ mux_phys + m->reg_offset, m->reg_offset, val,
+ m->balls[0] ? m->balls[0] : none,
+ m->balls[1] ? m->balls[1] : none);
+ seq_printf(s, "mode: ");
+ omap_mux_decode(s, val);
+ seq_printf(s, "\n");
+ seq_printf(s, "signals: %s | %s | %s | %s | %s | %s | %s | %s\n",
+ m->muxnames[0] ? m->muxnames[0] : none,
+ m->muxnames[1] ? m->muxnames[1] : none,
+ m->muxnames[2] ? m->muxnames[2] : none,
+ m->muxnames[3] ? m->muxnames[3] : none,
+ m->muxnames[4] ? m->muxnames[4] : none,
+ m->muxnames[5] ? m->muxnames[5] : none,
+ m->muxnames[6] ? m->muxnames[6] : none,
+ m->muxnames[7] ? m->muxnames[7] : none);
return 0;
}
+
+#define OMAP_MUX_MAX_ARG_CHAR 7
+
+static ssize_t omap_mux_dbg_signal_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[OMAP_MUX_MAX_ARG_CHAR];
+ struct seq_file *seqf;
+ struct omap_mux *m;
+ unsigned long val;
+ int buf_size, ret;
+
+ if (count > OMAP_MUX_MAX_ARG_CHAR)
+ return -EINVAL;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ ret = strict_strtoul(buf, 0x10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 0xffff)
+ return -EINVAL;
+
+ seqf = file->private_data;
+ m = seqf->private;
+
+ omap_mux_write((u16)val, m->reg_offset);
+ *ppos += count;
+
+ return count;
+}
+
+static int omap_mux_dbg_signal_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, omap_mux_dbg_signal_show, inode->i_private);
+}
+
+static const struct file_operations omap_mux_dbg_signal_fops = {
+ .open = omap_mux_dbg_signal_open,
+ .read = seq_read,
+ .write = omap_mux_dbg_signal_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *mux_dbg_dir;
+
+static void __init omap_mux_dbg_init(void)
+{
+ struct omap_mux_entry *e;
+
+ mux_dbg_dir = debugfs_create_dir("omap_mux", NULL);
+ if (!mux_dbg_dir)
+ return;
+
+ (void)debugfs_create_file("board", S_IRUGO, mux_dbg_dir,
+ NULL, &omap_mux_dbg_board_fops);
+
+ list_for_each_entry(e, &muxmodes, node) {
+ struct omap_mux *m = &e->mux;
+
+ (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
+ m, &omap_mux_dbg_signal_fops);
+ }
+}
+
#else
-#define omap34xx_cfg_reg NULL
+static inline void omap_mux_dbg_init(void)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void __init omap_mux_free_names(struct omap_mux *m)
+{
+ int i;
+
+ for (i = 0; i < OMAP_MUX_NR_MODES; i++)
+ kfree(m->muxnames[i]);
+
+#ifdef CONFIG_DEBUG_FS
+ for (i = 0; i < OMAP_MUX_NR_SIDES; i++)
+ kfree(m->balls[i]);
#endif
-int __init omap2_mux_init(void)
+}
+
+/* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */
+static int __init omap_mux_late_init(void)
{
- if (cpu_is_omap24xx()) {
- arch_mux_cfg.pins = omap24xx_pins;
- arch_mux_cfg.size = OMAP24XX_PINS_SZ;
- arch_mux_cfg.cfg_reg = omap24xx_cfg_reg;
- } else if (cpu_is_omap34xx()) {
- arch_mux_cfg.pins = omap34xx_pins;
- arch_mux_cfg.size = OMAP34XX_PINS_SZ;
- arch_mux_cfg.cfg_reg = omap34xx_cfg_reg;
+ struct omap_mux_entry *e, *tmp;
+
+ list_for_each_entry_safe(e, tmp, &muxmodes, node) {
+ struct omap_mux *m = &e->mux;
+ u16 mode = omap_mux_read(m->reg_offset);
+
+ if (OMAP_MODE_GPIO(mode))
+ continue;
+
+#ifndef CONFIG_DEBUG_FS
+ mutex_lock(&muxmode_mutex);
+ list_del(&e->node);
+ mutex_unlock(&muxmode_mutex);
+ omap_mux_free_names(m);
+ kfree(m);
+#endif
+
+ }
+
+ omap_mux_dbg_init();
+
+ return 0;
+}
+late_initcall(omap_mux_late_init);
+
+static void __init omap_mux_package_fixup(struct omap_mux *p,
+ struct omap_mux *superset)
+{
+ while (p->reg_offset != OMAP_MUX_TERMINATOR) {
+ struct omap_mux *s = superset;
+ int found = 0;
+
+ while (s->reg_offset != OMAP_MUX_TERMINATOR) {
+ if (s->reg_offset == p->reg_offset) {
+ *s = *p;
+ found++;
+ break;
+ }
+ s++;
+ }
+ if (!found)
+ printk(KERN_ERR "mux: Unknown entry offset 0x%x\n",
+ p->reg_offset);
+ p++;
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void __init omap_mux_package_init_balls(struct omap_ball *b,
+ struct omap_mux *superset)
+{
+ while (b->reg_offset != OMAP_MUX_TERMINATOR) {
+ struct omap_mux *s = superset;
+ int found = 0;
+
+ while (s->reg_offset != OMAP_MUX_TERMINATOR) {
+ if (s->reg_offset == b->reg_offset) {
+ s->balls[0] = b->balls[0];
+ s->balls[1] = b->balls[1];
+ found++;
+ break;
+ }
+ s++;
+ }
+ if (!found)
+ printk(KERN_ERR "mux: Unknown ball offset 0x%x\n",
+ b->reg_offset);
+ b++;
+ }
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void omap_mux_package_init_balls(struct omap_ball *b,
+ struct omap_mux *superset)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __init omap_mux_setup(char *options)
+{
+ if (!options)
+ return 0;
+
+ omap_mux_options = options;
+
+ return 1;
+}
+__setup("omap_mux=", omap_mux_setup);
+
+/*
+ * Note that the omap_mux=some.signal1=0x1234,some.signal2=0x1234
+ * cmdline options only override the bootloader values.
+ * During development, please enable CONFIG_DEBUG_FS, and use the
+ * signal specific entries under debugfs.
+ */
+static void __init omap_mux_set_cmdline_signals(void)
+{
+ char *options, *next_opt, *token;
+
+ if (!omap_mux_options)
+ return;
+
+ options = kmalloc(strlen(omap_mux_options) + 1, GFP_KERNEL);
+ if (!options)
+ return;
+
+ strcpy(options, omap_mux_options);
+ next_opt = options;
+
+ while ((token = strsep(&next_opt, ",")) != NULL) {
+ char *keyval, *name;
+ unsigned long val;
+
+ keyval = token;
+ name = strsep(&keyval, "=");
+ if (name) {
+ int res;
+
+ res = strict_strtoul(keyval, 0x10, &val);
+ if (res < 0)
+ continue;
+
+ omap_mux_init_signal(name, (u16)val);
+ }
+ }
+
+ kfree(options);
+}
+
+static void __init omap_mux_set_board_signals(struct omap_board_mux *board_mux)
+{
+ while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
+ omap_mux_write(board_mux->value, board_mux->reg_offset);
+ board_mux++;
+ }
+}
+
+static int __init omap_mux_copy_names(struct omap_mux *src,
+ struct omap_mux *dst)
+{
+ int i;
+
+ for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
+ if (src->muxnames[i]) {
+ dst->muxnames[i] =
+ kmalloc(strlen(src->muxnames[i]) + 1,
+ GFP_KERNEL);
+ if (!dst->muxnames[i])
+ goto free;
+ strcpy(dst->muxnames[i], src->muxnames[i]);
+ }
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ for (i = 0; i < OMAP_MUX_NR_SIDES; i++) {
+ if (src->balls[i]) {
+ dst->balls[i] =
+ kmalloc(strlen(src->balls[i]) + 1,
+ GFP_KERNEL);
+ if (!dst->balls[i])
+ goto free;
+ strcpy(dst->balls[i], src->balls[i]);
+ }
+ }
+#endif
+
+ return 0;
+
+free:
+ omap_mux_free_names(dst);
+ return -ENOMEM;
+
+}
+
+#endif /* CONFIG_OMAP_MUX */
+
+static u16 omap_mux_get_by_gpio(int gpio)
+{
+ struct omap_mux_entry *e;
+ u16 offset = OMAP_MUX_TERMINATOR;
+
+ list_for_each_entry(e, &muxmodes, node) {
+ struct omap_mux *m = &e->mux;
+ if (m->gpio == gpio) {
+ offset = m->reg_offset;
+ break;
+ }
+ }
+
+ return offset;
+}
+
+/* Needed for dynamic muxing of GPIO pins for off-idle */
+u16 omap_mux_get_gpio(int gpio)
+{
+ u16 offset;
+
+ offset = omap_mux_get_by_gpio(gpio);
+ if (offset == OMAP_MUX_TERMINATOR) {
+ printk(KERN_ERR "mux: Could not get gpio%i\n", gpio);
+ return offset;
+ }
+
+ return omap_mux_read(offset);
+}
+
+/* Needed for dynamic muxing of GPIO pins for off-idle */
+void omap_mux_set_gpio(u16 val, int gpio)
+{
+ u16 offset;
+
+ offset = omap_mux_get_by_gpio(gpio);
+ if (offset == OMAP_MUX_TERMINATOR) {
+ printk(KERN_ERR "mux: Could not set gpio%i\n", gpio);
+ return;
+ }
+
+ omap_mux_write(val, offset);
+}
+
+static struct omap_mux * __init omap_mux_list_add(struct omap_mux *src)
+{
+ struct omap_mux_entry *entry;
+ struct omap_mux *m;
+
+ entry = kzalloc(sizeof(struct omap_mux_entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ m = &entry->mux;
+ memcpy(m, src, sizeof(struct omap_mux_entry));
+
+#ifdef CONFIG_OMAP_MUX
+ if (omap_mux_copy_names(src, m)) {
+ kfree(entry);
+ return NULL;
}
+#endif
+
+ mutex_lock(&muxmode_mutex);
+ list_add_tail(&entry->node, &muxmodes);
+ mutex_unlock(&muxmode_mutex);
- return omap_mux_register(&arch_mux_cfg);
+ return m;
}
+/*
+ * Note if CONFIG_OMAP_MUX is not selected, we will only initialize
+ * the GPIO to mux offset mapping that is needed for dynamic muxing
+ * of GPIO pins for off-idle.
+ */
+static void __init omap_mux_init_list(struct omap_mux *superset)
+{
+ while (superset->reg_offset != OMAP_MUX_TERMINATOR) {
+ struct omap_mux *entry;
+
+#ifndef CONFIG_OMAP_MUX
+ /* Skip pins that are not muxed as GPIO by bootloader */
+ if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
+ superset++;
+ continue;
+ }
#endif
+
+ entry = omap_mux_list_add(superset);
+ if (!entry) {
+ printk(KERN_ERR "mux: Could not add entry\n");
+ return;
+ }
+ superset++;
+ }
+}
+
+int __init omap_mux_init(u32 mux_pbase, u32 mux_size,
+ struct omap_mux *superset,
+ struct omap_mux *package_subset,
+ struct omap_board_mux *board_mux,
+ struct omap_ball *package_balls)
+{
+ if (mux_base)
+ return -EBUSY;
+
+ mux_phys = mux_pbase;
+ mux_base = ioremap(mux_pbase, mux_size);
+ if (!mux_base) {
+ printk(KERN_ERR "mux: Could not ioremap\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_OMAP_MUX
+ omap_mux_package_fixup(package_subset, superset);
+ omap_mux_package_init_balls(package_balls, superset);
+ omap_mux_set_cmdline_signals();
+ omap_mux_set_board_signals(board_mux);
+#endif
+
+ omap_mux_init_list(superset);
+
+ return 0;
+}
+
+#endif /* CONFIG_ARCH_OMAP34XX */
+
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
new file mode 100644
index 00000000000..d8b4d5ad227
--- /dev/null
+++ b/arch/arm/mach-omap2/mux.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2009 Nokia
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "mux34xx.h"
+
+#define OMAP_MUX_TERMINATOR 0xffff
+
+/* 34xx mux mode options for each pin. See TRM for options */
+#define OMAP_MUX_MODE0 0
+#define OMAP_MUX_MODE1 1
+#define OMAP_MUX_MODE2 2
+#define OMAP_MUX_MODE3 3
+#define OMAP_MUX_MODE4 4
+#define OMAP_MUX_MODE5 5
+#define OMAP_MUX_MODE6 6
+#define OMAP_MUX_MODE7 7
+
+/* 24xx/34xx mux bit defines */
+#define OMAP_PULL_ENA (1 << 3)
+#define OMAP_PULL_UP (1 << 4)
+#define OMAP_ALTELECTRICALSEL (1 << 5)
+
+/* 34xx specific mux bit defines */
+#define OMAP_INPUT_EN (1 << 8)
+#define OMAP_OFF_EN (1 << 9)
+#define OMAP_OFFOUT_EN (1 << 10)
+#define OMAP_OFFOUT_VAL (1 << 11)
+#define OMAP_OFF_PULL_EN (1 << 12)
+#define OMAP_OFF_PULL_UP (1 << 13)
+#define OMAP_WAKEUP_EN (1 << 14)
+
+/* Active pin states */
+#define OMAP_PIN_OUTPUT 0
+#define OMAP_PIN_INPUT OMAP_INPUT_EN
+#define OMAP_PIN_INPUT_PULLUP (OMAP_PULL_ENA | OMAP_INPUT_EN \
+ | OMAP_PULL_UP)
+#define OMAP_PIN_INPUT_PULLDOWN (OMAP_PULL_ENA | OMAP_INPUT_EN)
+
+/* Off mode states */
+#define OMAP_PIN_OFF_NONE 0
+#define OMAP_PIN_OFF_OUTPUT_HIGH (OMAP_OFF_EN | OMAP_OFFOUT_EN \
+ | OMAP_OFFOUT_VAL)
+#define OMAP_PIN_OFF_OUTPUT_LOW (OMAP_OFF_EN | OMAP_OFFOUT_EN)
+#define OMAP_PIN_OFF_INPUT_PULLUP (OMAP_OFF_EN | OMAP_OFF_PULL_EN \
+ | OMAP_OFF_PULL_UP)
+#define OMAP_PIN_OFF_INPUT_PULLDOWN (OMAP_OFF_EN | OMAP_OFF_PULL_EN)
+#define OMAP_PIN_OFF_WAKEUPENABLE OMAP_WAKEUP_EN
+
+#define OMAP_MODE_GPIO(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
+
+/* Flags for omap_mux_init */
+#define OMAP_PACKAGE_MASK 0xffff
+#define OMAP_PACKAGE_CBP 4 /* 515-pin 0.40 0.50 */
+#define OMAP_PACKAGE_CUS 3 /* 423-pin 0.65 */
+#define OMAP_PACKAGE_CBB 2 /* 515-pin 0.40 0.50 */
+#define OMAP_PACKAGE_CBC 1 /* 515-pin 0.50 0.65 */
+
+
+#define OMAP_MUX_NR_MODES 8 /* Available modes */
+#define OMAP_MUX_NR_SIDES 2 /* Bottom & top */
+
+/**
+ * struct omap_mux - data for omap mux register offset and it's value
+ * @reg_offset: mux register offset from the mux base
+ * @gpio: GPIO number
+ * @muxnames: available signal modes for a ball
+ */
+struct omap_mux {
+ u16 reg_offset;
+ u16 gpio;
+#ifdef CONFIG_OMAP_MUX
+ char *muxnames[OMAP_MUX_NR_MODES];
+#ifdef CONFIG_DEBUG_FS
+ char *balls[OMAP_MUX_NR_SIDES];
+#endif
+#endif
+};
+
+/**
+ * struct omap_ball - data for balls on omap package
+ * @reg_offset: mux register offset from the mux base
+ * @balls: available balls on the package
+ */
+struct omap_ball {
+ u16 reg_offset;
+ char *balls[OMAP_MUX_NR_SIDES];
+};
+
+/**
+ * struct omap_board_mux - data for initializing mux registers
+ * @reg_offset: mux register offset from the mux base
+ * @mux_value: desired mux value to set
+ */
+struct omap_board_mux {
+ u16 reg_offset;
+ u16 value;
+};
+
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_ARCH_OMAP34XX)
+
+/**
+ * omap_mux_init_gpio - initialize a signal based on the GPIO number
+ * @gpio: GPIO number
+ * @val: Options for the mux register value
+ */
+int omap_mux_init_gpio(int gpio, int val);
+
+/**
+ * omap_mux_init_signal - initialize a signal based on the signal name
+ * @muxname: Mux name in mode0_name.signal_name format
+ * @val: Options for the mux register value
+ */
+int omap_mux_init_signal(char *muxname, int val);
+
+#else
+
+static inline int omap_mux_init_gpio(int gpio, int val)
+{
+ return 0;
+}
+static inline int omap_mux_init_signal(char *muxname, int val)
+{
+ return 0;
+}
+
+#endif
+
+/**
+ * omap_mux_get_gpio() - get mux register value based on GPIO number
+ * @gpio: GPIO number
+ *
+ */
+u16 omap_mux_get_gpio(int gpio);
+
+/**
+ * omap_mux_set_gpio() - set mux register value based on GPIO number
+ * @val: New mux register value
+ * @gpio: GPIO number
+ *
+ */
+void omap_mux_set_gpio(u16 val, int gpio);
+
+/**
+ * omap3_mux_init() - initialize mux system with board specific set
+ * @board_mux: Board specific mux table
+ * @flags: OMAP package type used for the board
+ */
+int omap3_mux_init(struct omap_board_mux *board_mux, int flags);
+
+/**
+ * omap_mux_init - private mux init function, do not call
+ */
+int omap_mux_init(u32 mux_pbase, u32 mux_size,
+ struct omap_mux *superset,
+ struct omap_mux *package_subset,
+ struct omap_board_mux *board_mux,
+ struct omap_ball *package_balls);
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
new file mode 100644
index 00000000000..68e0a595f9a
--- /dev/null
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -0,0 +1,2099 @@
+/*
+ * Copyright (C) 2009 Nokia
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "mux.h"
+
+#ifdef CONFIG_OMAP_MUX
+
+#define _OMAP3_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7) \
+{ \
+ .reg_offset = (OMAP3_CONTROL_PADCONF_##M0##_OFFSET), \
+ .gpio = (g), \
+ .muxnames = { m0, m1, m2, m3, m4, m5, m6, m7 }, \
+}
+
+#else
+
+#define _OMAP3_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7) \
+{ \
+ .reg_offset = (OMAP3_CONTROL_PADCONF_##M0##_OFFSET), \
+ .gpio = (g), \
+}
+
+#endif
+
+#define _OMAP3_BALLENTRY(M0, bb, bt) \
+{ \
+ .reg_offset = (OMAP3_CONTROL_PADCONF_##M0##_OFFSET), \
+ .balls = { bb, bt }, \
+}
+
+/*
+ * Superset of all mux modes for omap3
+ */
+static struct omap_mux __initdata omap3_muxmodes[] = {
+ _OMAP3_MUXENTRY(CAM_D0, 99,
+ "cam_d0", NULL, NULL, NULL,
+ "gpio_99", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D1, 100,
+ "cam_d1", NULL, NULL, NULL,
+ "gpio_100", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D10, 109,
+ "cam_d10", NULL, NULL, NULL,
+ "gpio_109", "hw_dbg8", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D11, 110,
+ "cam_d11", NULL, NULL, NULL,
+ "gpio_110", "hw_dbg9", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D2, 101,
+ "cam_d2", NULL, NULL, NULL,
+ "gpio_101", "hw_dbg4", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D3, 102,
+ "cam_d3", NULL, NULL, NULL,
+ "gpio_102", "hw_dbg5", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D4, 103,
+ "cam_d4", NULL, NULL, NULL,
+ "gpio_103", "hw_dbg6", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D5, 104,
+ "cam_d5", NULL, NULL, NULL,
+ "gpio_104", "hw_dbg7", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D6, 105,
+ "cam_d6", NULL, NULL, NULL,
+ "gpio_105", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D7, 106,
+ "cam_d7", NULL, NULL, NULL,
+ "gpio_106", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D8, 107,
+ "cam_d8", NULL, NULL, NULL,
+ "gpio_107", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D9, 108,
+ "cam_d9", NULL, NULL, NULL,
+ "gpio_108", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_FLD, 98,
+ "cam_fld", NULL, "cam_global_reset", NULL,
+ "gpio_98", "hw_dbg3", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_HS, 94,
+ "cam_hs", NULL, NULL, NULL,
+ "gpio_94", "hw_dbg0", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_PCLK, 97,
+ "cam_pclk", NULL, NULL, NULL,
+ "gpio_97", "hw_dbg2", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_STROBE, 126,
+ "cam_strobe", NULL, NULL, NULL,
+ "gpio_126", "hw_dbg11", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_VS, 95,
+ "cam_vs", NULL, NULL, NULL,
+ "gpio_95", "hw_dbg1", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_WEN, 167,
+ "cam_wen", NULL, "cam_shutter", NULL,
+ "gpio_167", "hw_dbg10", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_XCLKA, 96,
+ "cam_xclka", NULL, NULL, NULL,
+ "gpio_96", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_XCLKB, 111,
+ "cam_xclkb", NULL, NULL, NULL,
+ "gpio_111", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CSI2_DX0, 112,
+ "csi2_dx0", NULL, NULL, NULL,
+ "gpio_112", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CSI2_DX1, 114,
+ "csi2_dx1", NULL, NULL, NULL,
+ "gpio_114", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CSI2_DY0, 113,
+ "csi2_dy0", NULL, NULL, NULL,
+ "gpio_113", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CSI2_DY1, 115,
+ "csi2_dy1", NULL, NULL, NULL,
+ "gpio_115", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_ACBIAS, 69,
+ "dss_acbias", NULL, NULL, NULL,
+ "gpio_69", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA0, 70,
+ "dss_data0", NULL, "uart1_cts", NULL,
+ "gpio_70", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA1, 71,
+ "dss_data1", NULL, "uart1_rts", NULL,
+ "gpio_71", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA10, 80,
+ "dss_data10", NULL, NULL, NULL,
+ "gpio_80", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA11, 81,
+ "dss_data11", NULL, NULL, NULL,
+ "gpio_81", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA12, 82,
+ "dss_data12", NULL, NULL, NULL,
+ "gpio_82", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA13, 83,
+ "dss_data13", NULL, NULL, NULL,
+ "gpio_83", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA14, 84,
+ "dss_data14", NULL, NULL, NULL,
+ "gpio_84", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA15, 85,
+ "dss_data15", NULL, NULL, NULL,
+ "gpio_85", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA16, 86,
+ "dss_data16", NULL, NULL, NULL,
+ "gpio_86", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA17, 87,
+ "dss_data17", NULL, NULL, NULL,
+ "gpio_87", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA18, 88,
+ "dss_data18", NULL, "mcspi3_clk", "dss_data0",
+ "gpio_88", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA19, 89,
+ "dss_data19", NULL, "mcspi3_simo", "dss_data1",
+ "gpio_89", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA20, 90,
+ "dss_data20", NULL, "mcspi3_somi", "dss_data2",
+ "gpio_90", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA21, 91,
+ "dss_data21", NULL, "mcspi3_cs0", "dss_data3",
+ "gpio_91", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA22, 92,
+ "dss_data22", NULL, "mcspi3_cs1", "dss_data4",
+ "gpio_92", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA23, 93,
+ "dss_data23", NULL, NULL, "dss_data5",
+ "gpio_93", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA2, 72,
+ "dss_data2", NULL, NULL, NULL,
+ "gpio_72", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA3, 73,
+ "dss_data3", NULL, NULL, NULL,
+ "gpio_73", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA4, 74,
+ "dss_data4", NULL, "uart3_rx_irrx", NULL,
+ "gpio_74", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA5, 75,
+ "dss_data5", NULL, "uart3_tx_irtx", NULL,
+ "gpio_75", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA6, 76,
+ "dss_data6", NULL, "uart1_tx", NULL,
+ "gpio_76", "hw_dbg14", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA7, 77,
+ "dss_data7", NULL, "uart1_rx", NULL,
+ "gpio_77", "hw_dbg15", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA8, 78,
+ "dss_data8", NULL, NULL, NULL,
+ "gpio_78", "hw_dbg16", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA9, 79,
+ "dss_data9", NULL, NULL, NULL,
+ "gpio_79", "hw_dbg17", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_HSYNC, 67,
+ "dss_hsync", NULL, NULL, NULL,
+ "gpio_67", "hw_dbg13", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_PCLK, 66,
+ "dss_pclk", NULL, NULL, NULL,
+ "gpio_66", "hw_dbg12", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_VSYNC, 68,
+ "dss_vsync", NULL, NULL, NULL,
+ "gpio_68", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(ETK_CLK, 12,
+ "etk_clk", "mcbsp5_clkx", "sdmmc3_clk", "hsusb1_stp",
+ "gpio_12", "mm1_rxdp", "hsusb1_tll_stp", "hw_dbg0"),
+ _OMAP3_MUXENTRY(ETK_CTL, 13,
+ "etk_ctl", NULL, "sdmmc3_cmd", "hsusb1_clk",
+ "gpio_13", NULL, "hsusb1_tll_clk", "hw_dbg1"),
+ _OMAP3_MUXENTRY(ETK_D0, 14,
+ "etk_d0", "mcspi3_simo", "sdmmc3_dat4", "hsusb1_data0",
+ "gpio_14", "mm1_rxrcv", "hsusb1_tll_data0", "hw_dbg2"),
+ _OMAP3_MUXENTRY(ETK_D1, 15,
+ "etk_d1", "mcspi3_somi", NULL, "hsusb1_data1",
+ "gpio_15", "mm1_txse0", "hsusb1_tll_data1", "hw_dbg3"),
+ _OMAP3_MUXENTRY(ETK_D10, 24,
+ "etk_d10", NULL, "uart1_rx", "hsusb2_clk",
+ "gpio_24", NULL, "hsusb2_tll_clk", "hw_dbg12"),
+ _OMAP3_MUXENTRY(ETK_D11, 25,
+ "etk_d11", NULL, NULL, "hsusb2_stp",
+ "gpio_25", "mm2_rxdp", "hsusb2_tll_stp", "hw_dbg13"),
+ _OMAP3_MUXENTRY(ETK_D12, 26,
+ "etk_d12", NULL, NULL, "hsusb2_dir",
+ "gpio_26", NULL, "hsusb2_tll_dir", "hw_dbg14"),
+ _OMAP3_MUXENTRY(ETK_D13, 27,
+ "etk_d13", NULL, NULL, "hsusb2_nxt",
+ "gpio_27", "mm2_rxdm", "hsusb2_tll_nxt", "hw_dbg15"),
+ _OMAP3_MUXENTRY(ETK_D14, 28,
+ "etk_d14", NULL, NULL, "hsusb2_data0",
+ "gpio_28", "mm2_rxrcv", "hsusb2_tll_data0", "hw_dbg16"),
+ _OMAP3_MUXENTRY(ETK_D15, 29,
+ "etk_d15", NULL, NULL, "hsusb2_data1",
+ "gpio_29", "mm2_txse0", "hsusb2_tll_data1", "hw_dbg17"),
+ _OMAP3_MUXENTRY(ETK_D2, 16,
+ "etk_d2", "mcspi3_cs0", NULL, "hsusb1_data2",
+ "gpio_16", "mm1_txdat", "hsusb1_tll_data2", "hw_dbg4"),
+ _OMAP3_MUXENTRY(ETK_D3, 17,
+ "etk_d3", "mcspi3_clk", "sdmmc3_dat3", "hsusb1_data7",
+ "gpio_17", NULL, "hsusb1_tll_data7", "hw_dbg5"),
+ _OMAP3_MUXENTRY(ETK_D4, 18,
+ "etk_d4", "mcbsp5_dr", "sdmmc3_dat0", "hsusb1_data4",
+ "gpio_18", NULL, "hsusb1_tll_data4", "hw_dbg6"),
+ _OMAP3_MUXENTRY(ETK_D5, 19,
+ "etk_d5", "mcbsp5_fsx", "sdmmc3_dat1", "hsusb1_data5",
+ "gpio_19", NULL, "hsusb1_tll_data5", "hw_dbg7"),
+ _OMAP3_MUXENTRY(ETK_D6, 20,
+ "etk_d6", "mcbsp5_dx", "sdmmc3_dat2", "hsusb1_data6",
+ "gpio_20", NULL, "hsusb1_tll_data6", "hw_dbg8"),
+ _OMAP3_MUXENTRY(ETK_D7, 21,
+ "etk_d7", "mcspi3_cs1", "sdmmc3_dat7", "hsusb1_data3",
+ "gpio_21", "mm1_txen_n", "hsusb1_tll_data3", "hw_dbg9"),
+ _OMAP3_MUXENTRY(ETK_D8, 22,
+ "etk_d8", "sys_drm_msecure", "sdmmc3_dat6", "hsusb1_dir",
+ "gpio_22", NULL, "hsusb1_tll_dir", "hw_dbg10"),
+ _OMAP3_MUXENTRY(ETK_D9, 23,
+ "etk_d9", "sys_secure_indicator", "sdmmc3_dat5", "hsusb1_nxt",
+ "gpio_23", "mm1_rxdm", "hsusb1_tll_nxt", "hw_dbg11"),
+ _OMAP3_MUXENTRY(GPMC_A1, 34,
+ "gpmc_a1", NULL, NULL, NULL,
+ "gpio_34", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A10, 43,
+ "gpmc_a10", "sys_ndmareq3", NULL, NULL,
+ "gpio_43", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A2, 35,
+ "gpmc_a2", NULL, NULL, NULL,
+ "gpio_35", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A3, 36,
+ "gpmc_a3", NULL, NULL, NULL,
+ "gpio_36", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A4, 37,
+ "gpmc_a4", NULL, NULL, NULL,
+ "gpio_37", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A5, 38,
+ "gpmc_a5", NULL, NULL, NULL,
+ "gpio_38", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A6, 39,
+ "gpmc_a6", NULL, NULL, NULL,
+ "gpio_39", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A7, 40,
+ "gpmc_a7", NULL, NULL, NULL,
+ "gpio_40", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A8, 41,
+ "gpmc_a8", NULL, NULL, NULL,
+ "gpio_41", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_A9, 42,
+ "gpmc_a9", "sys_ndmareq2", NULL, NULL,
+ "gpio_42", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_CLK, 59,
+ "gpmc_clk", NULL, NULL, NULL,
+ "gpio_59", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_D10, 46,
+ "gpmc_d10", NULL, NULL, NULL,
+ "gpio_46", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_D11, 47,
+ "gpmc_d11", NULL, NULL, NULL,
+ "gpio_47", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_D12, 48,
+ "gpmc_d12", NULL, NULL, NULL,
+ "gpio_48", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_D13, 49,
+ "gpmc_d13", NULL, NULL, NULL,
+ "gpio_49", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_D14, 50,
+ "gpmc_d14", NULL, NULL, NULL,
+ "gpio_50", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_D15, 51,
+ "gpmc_d15", NULL, NULL, NULL,
+ "gpio_51", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_D8, 44,
+ "gpmc_d8", NULL, NULL, NULL,
+ "gpio_44", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_D9, 45,
+ "gpmc_d9", NULL, NULL, NULL,
+ "gpio_45", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NBE0_CLE, 60,
+ "gpmc_nbe0_cle", NULL, NULL, NULL,
+ "gpio_60", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NBE1, 61,
+ "gpmc_nbe1", NULL, NULL, NULL,
+ "gpio_61", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NCS1, 52,
+ "gpmc_ncs1", NULL, NULL, NULL,
+ "gpio_52", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NCS2, 53,
+ "gpmc_ncs2", NULL, NULL, NULL,
+ "gpio_53", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NCS3, 54,
+ "gpmc_ncs3", "sys_ndmareq0", NULL, NULL,
+ "gpio_54", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NCS4, 55,
+ "gpmc_ncs4", "sys_ndmareq1", "mcbsp4_clkx", "gpt9_pwm_evt",
+ "gpio_55", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NCS5, 56,
+ "gpmc_ncs5", "sys_ndmareq2", "mcbsp4_dr", "gpt10_pwm_evt",
+ "gpio_56", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NCS6, 57,
+ "gpmc_ncs6", "sys_ndmareq3", "mcbsp4_dx", "gpt11_pwm_evt",
+ "gpio_57", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NCS7, 58,
+ "gpmc_ncs7", "gpmc_io_dir", "mcbsp4_fsx", "gpt8_pwm_evt",
+ "gpio_58", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_NWP, 62,
+ "gpmc_nwp", NULL, NULL, NULL,
+ "gpio_62", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_WAIT1, 63,
+ "gpmc_wait1", NULL, NULL, NULL,
+ "gpio_63", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_WAIT2, 64,
+ "gpmc_wait2", NULL, NULL, NULL,
+ "gpio_64", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_WAIT3, 65,
+ "gpmc_wait3", "sys_ndmareq1", NULL, NULL,
+ "gpio_65", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HDQ_SIO, 170,
+ "hdq_sio", "sys_altclk", "i2c2_sccbe", "i2c3_sccbe",
+ "gpio_170", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_CLK, 120,
+ "hsusb0_clk", NULL, NULL, NULL,
+ "gpio_120", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA0, 125,
+ "hsusb0_data0", NULL, "uart3_tx_irtx", NULL,
+ "gpio_125", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA1, 130,
+ "hsusb0_data1", NULL, "uart3_rx_irrx", NULL,
+ "gpio_130", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA2, 131,
+ "hsusb0_data2", NULL, "uart3_rts_sd", NULL,
+ "gpio_131", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA3, 169,
+ "hsusb0_data3", NULL, "uart3_cts_rctx", NULL,
+ "gpio_169", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA4, 188,
+ "hsusb0_data4", NULL, NULL, NULL,
+ "gpio_188", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA5, 189,
+ "hsusb0_data5", NULL, NULL, NULL,
+ "gpio_189", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA6, 190,
+ "hsusb0_data6", NULL, NULL, NULL,
+ "gpio_190", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA7, 191,
+ "hsusb0_data7", NULL, NULL, NULL,
+ "gpio_191", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DIR, 122,
+ "hsusb0_dir", NULL, NULL, NULL,
+ "gpio_122", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_NXT, 124,
+ "hsusb0_nxt", NULL, NULL, NULL,
+ "gpio_124", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_STP, 121,
+ "hsusb0_stp", NULL, NULL, NULL,
+ "gpio_121", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(I2C2_SCL, 168,
+ "i2c2_scl", NULL, NULL, NULL,
+ "gpio_168", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(I2C2_SDA, 183,
+ "i2c2_sda", NULL, NULL, NULL,
+ "gpio_183", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(I2C3_SCL, 184,
+ "i2c3_scl", NULL, NULL, NULL,
+ "gpio_184", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(I2C3_SDA, 185,
+ "i2c3_sda", NULL, NULL, NULL,
+ "gpio_185", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(I2C4_SCL, 0,
+ "i2c4_scl", "sys_nvmode1", NULL, NULL,
+ NULL, NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(I2C4_SDA, 0,
+ "i2c4_sda", "sys_nvmode2", NULL, NULL,
+ NULL, NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(JTAG_EMU0, 11,
+ "jtag_emu0", NULL, NULL, NULL,
+ "gpio_11", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(JTAG_EMU1, 31,
+ "jtag_emu1", NULL, NULL, NULL,
+ "gpio_31", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP1_CLKR, 156,
+ "mcbsp1_clkr", "mcspi4_clk", NULL, NULL,
+ "gpio_156", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP1_CLKX, 162,
+ "mcbsp1_clkx", NULL, "mcbsp3_clkx", NULL,
+ "gpio_162", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP1_DR, 159,
+ "mcbsp1_dr", "mcspi4_somi", "mcbsp3_dr", NULL,
+ "gpio_159", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP1_DX, 158,
+ "mcbsp1_dx", "mcspi4_simo", "mcbsp3_dx", NULL,
+ "gpio_158", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP1_FSR, 157,
+ "mcbsp1_fsr", NULL, "cam_global_reset", NULL,
+ "gpio_157", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP1_FSX, 161,
+ "mcbsp1_fsx", "mcspi4_cs0", "mcbsp3_fsx", NULL,
+ "gpio_161", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP2_CLKX, 117,
+ "mcbsp2_clkx", NULL, NULL, NULL,
+ "gpio_117", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP2_DR, 118,
+ "mcbsp2_dr", NULL, NULL, NULL,
+ "gpio_118", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP2_DX, 119,
+ "mcbsp2_dx", NULL, NULL, NULL,
+ "gpio_119", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP2_FSX, 116,
+ "mcbsp2_fsx", NULL, NULL, NULL,
+ "gpio_116", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP3_CLKX, 142,
+ "mcbsp3_clkx", "uart2_tx", NULL, NULL,
+ "gpio_142", "hsusb3_tll_data6", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP3_DR, 141,
+ "mcbsp3_dr", "uart2_rts", NULL, NULL,
+ "gpio_141", "hsusb3_tll_data5", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP3_DX, 140,
+ "mcbsp3_dx", "uart2_cts", NULL, NULL,
+ "gpio_140", "hsusb3_tll_data4", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP3_FSX, 143,
+ "mcbsp3_fsx", "uart2_rx", NULL, NULL,
+ "gpio_143", "hsusb3_tll_data7", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP4_CLKX, 152,
+ "mcbsp4_clkx", NULL, NULL, NULL,
+ "gpio_152", "hsusb3_tll_data1", "mm3_txse0", "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP4_DR, 153,
+ "mcbsp4_dr", NULL, NULL, NULL,
+ "gpio_153", "hsusb3_tll_data0", "mm3_rxrcv", "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP4_DX, 154,
+ "mcbsp4_dx", NULL, NULL, NULL,
+ "gpio_154", "hsusb3_tll_data2", "mm3_txdat", "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP4_FSX, 155,
+ "mcbsp4_fsx", NULL, NULL, NULL,
+ "gpio_155", "hsusb3_tll_data3", "mm3_txen_n", "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP_CLKS, 160,
+ "mcbsp_clks", NULL, "cam_shutter", NULL,
+ "gpio_160", "uart1_cts", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI1_CLK, 171,
+ "mcspi1_clk", "sdmmc2_dat4", NULL, NULL,
+ "gpio_171", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI1_CS0, 174,
+ "mcspi1_cs0", "sdmmc2_dat7", NULL, NULL,
+ "gpio_174", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI1_CS1, 175,
+ "mcspi1_cs1", NULL, NULL, "sdmmc3_cmd",
+ "gpio_175", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI1_CS2, 176,
+ "mcspi1_cs2", NULL, NULL, "sdmmc3_clk",
+ "gpio_176", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI1_CS3, 177,
+ "mcspi1_cs3", NULL, "hsusb2_tll_data2", "hsusb2_data2",
+ "gpio_177", "mm2_txdat", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI1_SIMO, 172,
+ "mcspi1_simo", "sdmmc2_dat5", NULL, NULL,
+ "gpio_172", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI1_SOMI, 173,
+ "mcspi1_somi", "sdmmc2_dat6", NULL, NULL,
+ "gpio_173", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI2_CLK, 178,
+ "mcspi2_clk", NULL, "hsusb2_tll_data7", "hsusb2_data7",
+ "gpio_178", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI2_CS0, 181,
+ "mcspi2_cs0", "gpt11_pwm_evt",
+ "hsusb2_tll_data6", "hsusb2_data6",
+ "gpio_181", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI2_CS1, 182,
+ "mcspi2_cs1", "gpt8_pwm_evt",
+ "hsusb2_tll_data3", "hsusb2_data3",
+ "gpio_182", "mm2_txen_n", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI2_SIMO, 179,
+ "mcspi2_simo", "gpt9_pwm_evt",
+ "hsusb2_tll_data4", "hsusb2_data4",
+ "gpio_179", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI2_SOMI, 180,
+ "mcspi2_somi", "gpt10_pwm_evt",
+ "hsusb2_tll_data5", "hsusb2_data5",
+ "gpio_180", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_CLK, 120,
+ "sdmmc1_clk", NULL, NULL, NULL,
+ "gpio_120", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_CMD, 121,
+ "sdmmc1_cmd", NULL, NULL, NULL,
+ "gpio_121", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT0, 122,
+ "sdmmc1_dat0", NULL, NULL, NULL,
+ "gpio_122", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT1, 123,
+ "sdmmc1_dat1", NULL, NULL, NULL,
+ "gpio_123", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT2, 124,
+ "sdmmc1_dat2", NULL, NULL, NULL,
+ "gpio_124", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT3, 125,
+ "sdmmc1_dat3", NULL, NULL, NULL,
+ "gpio_125", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT4, 126,
+ "sdmmc1_dat4", NULL, "sim_io", NULL,
+ "gpio_126", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT5, 127,
+ "sdmmc1_dat5", NULL, "sim_clk", NULL,
+ "gpio_127", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT6, 128,
+ "sdmmc1_dat6", NULL, "sim_pwrctrl", NULL,
+ "gpio_128", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT7, 129,
+ "sdmmc1_dat7", NULL, "sim_rst", NULL,
+ "gpio_129", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_CLK, 130,
+ "sdmmc2_clk", "mcspi3_clk", NULL, NULL,
+ "gpio_130", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_CMD, 131,
+ "sdmmc2_cmd", "mcspi3_simo", NULL, NULL,
+ "gpio_131", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT0, 132,
+ "sdmmc2_dat0", "mcspi3_somi", NULL, NULL,
+ "gpio_132", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT1, 133,
+ "sdmmc2_dat1", NULL, NULL, NULL,
+ "gpio_133", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT2, 134,
+ "sdmmc2_dat2", "mcspi3_cs1", NULL, NULL,
+ "gpio_134", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT3, 135,
+ "sdmmc2_dat3", "mcspi3_cs0", NULL, NULL,
+ "gpio_135", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT4, 136,
+ "sdmmc2_dat4", "sdmmc2_dir_dat0", NULL, "sdmmc3_dat0",
+ "gpio_136", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT5, 137,
+ "sdmmc2_dat5", "sdmmc2_dir_dat1",
+ "cam_global_reset", "sdmmc3_dat1",
+ "gpio_137", "hsusb3_tll_stp", "mm3_rxdp", "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT6, 138,
+ "sdmmc2_dat6", "sdmmc2_dir_cmd", "cam_shutter", "sdmmc3_dat2",
+ "gpio_138", "hsusb3_tll_dir", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT7, 139,
+ "sdmmc2_dat7", "sdmmc2_clkin", NULL, "sdmmc3_dat3",
+ "gpio_139", "hsusb3_tll_nxt", "mm3_rxdm", "safe_mode"),
+ _OMAP3_MUXENTRY(SDRC_CKE0, 0,
+ "sdrc_cke0", NULL, NULL, NULL,
+ NULL, NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDRC_CKE1, 0,
+ "sdrc_cke1", NULL, NULL, NULL,
+ NULL, NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT0, 2,
+ "sys_boot0", NULL, NULL, NULL,
+ "gpio_2", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT1, 3,
+ "sys_boot1", NULL, NULL, NULL,
+ "gpio_3", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT2, 4,
+ "sys_boot2", NULL, NULL, NULL,
+ "gpio_4", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT3, 5,
+ "sys_boot3", NULL, NULL, NULL,
+ "gpio_5", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT4, 6,
+ "sys_boot4", "sdmmc2_dir_dat2", NULL, NULL,
+ "gpio_6", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT5, 7,
+ "sys_boot5", "sdmmc2_dir_dat3", NULL, NULL,
+ "gpio_7", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT6, 8,
+ "sys_boot6", NULL, NULL, NULL,
+ "gpio_8", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_CLKOUT1, 10,
+ "sys_clkout1", NULL, NULL, NULL,
+ "gpio_10", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_CLKOUT2, 186,
+ "sys_clkout2", NULL, NULL, NULL,
+ "gpio_186", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_CLKREQ, 1,
+ "sys_clkreq", NULL, NULL, NULL,
+ "gpio_1", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_NIRQ, 0,
+ "sys_nirq", NULL, NULL, NULL,
+ "gpio_0", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_NRESWARM, 30,
+ "sys_nreswarm", NULL, NULL, NULL,
+ "gpio_30", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_OFF_MODE, 9,
+ "sys_off_mode", NULL, NULL, NULL,
+ "gpio_9", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART1_CTS, 150,
+ "uart1_cts", NULL, NULL, NULL,
+ "gpio_150", "hsusb3_tll_clk", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART1_RTS, 149,
+ "uart1_rts", NULL, NULL, NULL,
+ "gpio_149", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART1_RX, 151,
+ "uart1_rx", NULL, "mcbsp1_clkr", "mcspi4_clk",
+ "gpio_151", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART1_TX, 148,
+ "uart1_tx", NULL, NULL, NULL,
+ "gpio_148", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART2_CTS, 144,
+ "uart2_cts", "mcbsp3_dx", "gpt9_pwm_evt", NULL,
+ "gpio_144", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART2_RTS, 145,
+ "uart2_rts", "mcbsp3_dr", "gpt10_pwm_evt", NULL,
+ "gpio_145", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART2_RX, 147,
+ "uart2_rx", "mcbsp3_fsx", "gpt8_pwm_evt", NULL,
+ "gpio_147", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART2_TX, 146,
+ "uart2_tx", "mcbsp3_clkx", "gpt11_pwm_evt", NULL,
+ "gpio_146", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART3_CTS_RCTX, 163,
+ "uart3_cts_rctx", NULL, NULL, NULL,
+ "gpio_163", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART3_RTS_SD, 164,
+ "uart3_rts_sd", NULL, NULL, NULL,
+ "gpio_164", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART3_RX_IRRX, 165,
+ "uart3_rx_irrx", NULL, NULL, NULL,
+ "gpio_165", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART3_TX_IRTX, 166,
+ "uart3_tx_irtx", NULL, NULL, NULL,
+ "gpio_166", NULL, NULL, "safe_mode"),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
+/*
+ * Signals different on CBC package compared to the superset
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBC)
+struct omap_mux __initdata omap3_cbc_subset[] = {
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap3_cbc_subset NULL
+#endif
+
+/*
+ * Balls for CBC package
+ * 515-pin s-PBGA Package, 0.65mm Ball Pitch (Top), 0.50mm Ball Pitch (Bottom)
+ *
+ * FIXME: What's up with the outdated TI documentation? See:
+ *
+ * http://wiki.davincidsp.com/index.php/Datasheet_Errata_for_OMAP35x_CBC_Package
+ * http://community.ti.com/forums/t/10982.aspx
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \
+ && defined(CONFIG_OMAP_PACKAGE_CBC)
+struct omap_ball __initdata omap3_cbc_ball[] = {
+ _OMAP3_BALLENTRY(CAM_D0, "ae16", NULL),
+ _OMAP3_BALLENTRY(CAM_D1, "ae15", NULL),
+ _OMAP3_BALLENTRY(CAM_D10, "d25", NULL),
+ _OMAP3_BALLENTRY(CAM_D11, "e26", NULL),
+ _OMAP3_BALLENTRY(CAM_D2, "a24", NULL),
+ _OMAP3_BALLENTRY(CAM_D3, "b24", NULL),
+ _OMAP3_BALLENTRY(CAM_D4, "d24", NULL),
+ _OMAP3_BALLENTRY(CAM_D5, "c24", NULL),
+ _OMAP3_BALLENTRY(CAM_D6, "p25", NULL),
+ _OMAP3_BALLENTRY(CAM_D7, "p26", NULL),
+ _OMAP3_BALLENTRY(CAM_D8, "n25", NULL),
+ _OMAP3_BALLENTRY(CAM_D9, "n26", NULL),
+ _OMAP3_BALLENTRY(CAM_FLD, "b23", NULL),
+ _OMAP3_BALLENTRY(CAM_HS, "c23", NULL),
+ _OMAP3_BALLENTRY(CAM_PCLK, "c26", NULL),
+ _OMAP3_BALLENTRY(CAM_STROBE, "d26", NULL),
+ _OMAP3_BALLENTRY(CAM_VS, "d23", NULL),
+ _OMAP3_BALLENTRY(CAM_WEN, "a23", NULL),
+ _OMAP3_BALLENTRY(CAM_XCLKA, "c25", NULL),
+ _OMAP3_BALLENTRY(CAM_XCLKB, "e25", NULL),
+ _OMAP3_BALLENTRY(CSI2_DX0, "ad17", NULL),
+ _OMAP3_BALLENTRY(CSI2_DX1, "ae18", NULL),
+ _OMAP3_BALLENTRY(CSI2_DY0, "ad16", NULL),
+ _OMAP3_BALLENTRY(CSI2_DY1, "ae17", NULL),
+ _OMAP3_BALLENTRY(DSS_ACBIAS, "f26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA0, "ae21", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA1, "ae22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA10, "ac26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA11, "ad26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA12, "aa25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA13, "y25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA14, "aa26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA15, "ab26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA16, "l25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA17, "l26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA18, "m24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA19, "m26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA2, "ae23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA20, "f25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA21, "n24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA22, "ac25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA23, "ab25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA3, "ae24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA4, "ad23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA5, "ad24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA6, "g26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA7, "h25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA8, "h26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA9, "j26", NULL),
+ _OMAP3_BALLENTRY(DSS_HSYNC, "k24", NULL),
+ _OMAP3_BALLENTRY(DSS_PCLK, "g25", NULL),
+ _OMAP3_BALLENTRY(DSS_VSYNC, "m25", NULL),
+ _OMAP3_BALLENTRY(ETK_CLK, "ab2", NULL),
+ _OMAP3_BALLENTRY(ETK_CTL, "ab3", NULL),
+ _OMAP3_BALLENTRY(ETK_D0, "ac3", NULL),
+ _OMAP3_BALLENTRY(ETK_D1, "ad4", NULL),
+ _OMAP3_BALLENTRY(ETK_D10, "ae4", NULL),
+ _OMAP3_BALLENTRY(ETK_D11, "af6", NULL),
+ _OMAP3_BALLENTRY(ETK_D12, "ae6", NULL),
+ _OMAP3_BALLENTRY(ETK_D13, "af7", NULL),
+ _OMAP3_BALLENTRY(ETK_D14, "af9", NULL),
+ _OMAP3_BALLENTRY(ETK_D15, "ae9", NULL),
+ _OMAP3_BALLENTRY(ETK_D2, "ad3", NULL),
+ _OMAP3_BALLENTRY(ETK_D3, "aa3", NULL),
+ _OMAP3_BALLENTRY(ETK_D4, "y3", NULL),
+ _OMAP3_BALLENTRY(ETK_D5, "ab1", NULL),
+ _OMAP3_BALLENTRY(ETK_D6, "ae3", NULL),
+ _OMAP3_BALLENTRY(ETK_D7, "ad2", NULL),
+ _OMAP3_BALLENTRY(ETK_D8, "aa4", NULL),
+ _OMAP3_BALLENTRY(ETK_D9, "v2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A1, "j2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A10, "d2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A2, "h1", NULL),
+ _OMAP3_BALLENTRY(GPMC_A3, "h2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A4, "g2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A5, "f1", NULL),
+ _OMAP3_BALLENTRY(GPMC_A6, "f2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A7, "e1", NULL),
+ _OMAP3_BALLENTRY(GPMC_A8, "e2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A9, "d1", NULL),
+ _OMAP3_BALLENTRY(GPMC_CLK, "n1", "l1"),
+ _OMAP3_BALLENTRY(GPMC_D10, "t1", "n1"),
+ _OMAP3_BALLENTRY(GPMC_D11, "u2", "p2"),
+ _OMAP3_BALLENTRY(GPMC_D12, "u1", "p1"),
+ _OMAP3_BALLENTRY(GPMC_D13, "p1", "m1"),
+ _OMAP3_BALLENTRY(GPMC_D14, "l2", "j2"),
+ _OMAP3_BALLENTRY(GPMC_D15, "m2", "k2"),
+ _OMAP3_BALLENTRY(GPMC_D8, "v1", "r1"),
+ _OMAP3_BALLENTRY(GPMC_D9, "y1", "t1"),
+ _OMAP3_BALLENTRY(GPMC_NBE0_CLE, "k2", NULL),
+ _OMAP3_BALLENTRY(GPMC_NBE1, "j1", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS1, "ad1", "w1"),
+ _OMAP3_BALLENTRY(GPMC_NCS2, "a3", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS3, "b6", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS4, "b4", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS5, "c4", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS6, "b5", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS7, "c5", NULL),
+ _OMAP3_BALLENTRY(GPMC_NWP, "ac6", "y5"),
+ _OMAP3_BALLENTRY(GPMC_WAIT1, "ac8", "y8"),
+ _OMAP3_BALLENTRY(GPMC_WAIT2, "b3", NULL),
+ _OMAP3_BALLENTRY(GPMC_WAIT3, "c6", NULL),
+ _OMAP3_BALLENTRY(HDQ_SIO, "j23", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_CLK, "w19", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA0, "v20", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA1, "y20", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA2, "v18", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA3, "w20", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA4, "w17", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA5, "y18", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA6, "y19", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA7, "y17", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DIR, "v19", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_NXT, "w18", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_STP, "u20", NULL),
+ _OMAP3_BALLENTRY(I2C2_SCL, "c2", NULL),
+ _OMAP3_BALLENTRY(I2C2_SDA, "c1", NULL),
+ _OMAP3_BALLENTRY(I2C3_SCL, "ab4", NULL),
+ _OMAP3_BALLENTRY(I2C3_SDA, "ac4", NULL),
+ _OMAP3_BALLENTRY(I2C4_SCL, "ad15", NULL),
+ _OMAP3_BALLENTRY(I2C4_SDA, "w16", NULL),
+ _OMAP3_BALLENTRY(JTAG_EMU0, "y15", NULL),
+ _OMAP3_BALLENTRY(JTAG_EMU1, "y14", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_CLKR, "u19", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_CLKX, "t17", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_DR, "t20", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_DX, "u17", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_FSR, "v17", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_FSX, "p20", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_CLKX, "r18", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_DR, "t18", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_DX, "r19", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_FSX, "u18", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_CLKX, "u3", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_DR, "n3", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_DX, "p3", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_FSX, "w3", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_CLKX, "v3", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_DR, "u4", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_DX, "r3", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_FSX, "t3", NULL),
+ _OMAP3_BALLENTRY(MCBSP_CLKS, "t19", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CLK, "p9", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS0, "r7", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS1, "r8", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS2, "r9", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS3, "t8", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_SIMO, "p8", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_SOMI, "p7", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CLK, "w7", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CS0, "v8", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CS1, "v9", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_SIMO, "w8", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_SOMI, "u8", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_CLK, "n19", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_CMD, "l18", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT0, "m19", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT1, "m18", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT2, "k18", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT3, "n20", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT4, "m20", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT5, "p17", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT6, "p18", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT7, "p19", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_CLK, "w10", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_CMD, "r10", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT0, "t10", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT1, "t9", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT2, "u10", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT3, "u9", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT4, "v10", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT5, "m3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT6, "l3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT7, "k3", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT0, "f3", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT1, "d3", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT2, "c3", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT3, "e3", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT4, "e4", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT5, "g3", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT6, "d4", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKOUT1, "ae14", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKOUT2, "w11", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKREQ, "w15", NULL),
+ _OMAP3_BALLENTRY(SYS_NIRQ, "v16", NULL),
+ _OMAP3_BALLENTRY(SYS_NRESWARM, "ad7", "aa5"),
+ _OMAP3_BALLENTRY(SYS_OFF_MODE, "v12", NULL),
+ _OMAP3_BALLENTRY(UART1_CTS, "w2", NULL),
+ _OMAP3_BALLENTRY(UART1_RTS, "r2", NULL),
+ _OMAP3_BALLENTRY(UART1_RX, "h3", NULL),
+ _OMAP3_BALLENTRY(UART1_TX, "l4", NULL),
+ _OMAP3_BALLENTRY(UART2_CTS, "y24", NULL),
+ _OMAP3_BALLENTRY(UART2_RTS, "aa24", NULL),
+ _OMAP3_BALLENTRY(UART2_RX, "ad21", NULL),
+ _OMAP3_BALLENTRY(UART2_TX, "ad22", NULL),
+ _OMAP3_BALLENTRY(UART3_CTS_RCTX, "f23", NULL),
+ _OMAP3_BALLENTRY(UART3_RTS_SD, "f24", NULL),
+ _OMAP3_BALLENTRY(UART3_RX_IRRX, "h24", NULL),
+ _OMAP3_BALLENTRY(UART3_TX_IRTX, "g24", NULL),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap3_cbc_ball NULL
+#endif
+
+/*
+ * Signals different on CUS package compared to superset
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CUS)
+struct omap_mux __initdata omap3_cus_subset[] = {
+ _OMAP3_MUXENTRY(CAM_D10, 109,
+ "cam_d10", NULL, NULL, NULL,
+ "gpio_109", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D11, 110,
+ "cam_d11", NULL, NULL, NULL,
+ "gpio_110", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D2, 101,
+ "cam_d2", NULL, NULL, NULL,
+ "gpio_101", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D3, 102,
+ "cam_d3", NULL, NULL, NULL,
+ "gpio_102", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D4, 103,
+ "cam_d4", NULL, NULL, NULL,
+ "gpio_103", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D5, 104,
+ "cam_d5", NULL, NULL, NULL,
+ "gpio_104", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_FLD, 98,
+ "cam_fld", NULL, "cam_global_reset", NULL,
+ "gpio_98", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_HS, 94,
+ "cam_hs", NULL, NULL, NULL,
+ "gpio_94", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_PCLK, 97,
+ "cam_pclk", NULL, NULL, NULL,
+ "gpio_97", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_STROBE, 126,
+ "cam_strobe", NULL, NULL, NULL,
+ "gpio_126", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_VS, 95,
+ "cam_vs", NULL, NULL, NULL,
+ "gpio_95", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_WEN, 167,
+ "cam_wen", NULL, "cam_shutter", NULL,
+ "gpio_167", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA6, 76,
+ "dss_data6", NULL, "uart1_tx", NULL,
+ "gpio_76", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA7, 77,
+ "dss_data7", NULL, "uart1_rx", NULL,
+ "gpio_77", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA8, 78,
+ "dss_data8", NULL, NULL, NULL,
+ "gpio_78", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA9, 79,
+ "dss_data9", NULL, NULL, NULL,
+ "gpio_79", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_HSYNC, 67,
+ "dss_hsync", NULL, NULL, NULL,
+ "gpio_67", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_PCLK, 66,
+ "dss_pclk", NULL, NULL, NULL,
+ "gpio_66", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(ETK_CLK, 12,
+ "etk_clk", "mcbsp5_clkx", "sdmmc3_clk", "hsusb1_stp",
+ "gpio_12", "mm1_rxdp", "hsusb1_tll_stp", NULL),
+ _OMAP3_MUXENTRY(ETK_CTL, 13,
+ "etk_ctl", NULL, "sdmmc3_cmd", "hsusb1_clk",
+ "gpio_13", NULL, "hsusb1_tll_clk", NULL),
+ _OMAP3_MUXENTRY(ETK_D0, 14,
+ "etk_d0", "mcspi3_simo", "sdmmc3_dat4", "hsusb1_data0",
+ "gpio_14", "mm1_rxrcv", "hsusb1_tll_data0", NULL),
+ _OMAP3_MUXENTRY(ETK_D1, 15,
+ "etk_d1", "mcspi3_somi", NULL, "hsusb1_data1",
+ "gpio_15", "mm1_txse0", "hsusb1_tll_data1", NULL),
+ _OMAP3_MUXENTRY(ETK_D10, 24,
+ "etk_d10", NULL, "uart1_rx", "hsusb2_clk",
+ "gpio_24", NULL, "hsusb2_tll_clk", NULL),
+ _OMAP3_MUXENTRY(ETK_D11, 25,
+ "etk_d11", NULL, NULL, "hsusb2_stp",
+ "gpio_25", "mm2_rxdp", "hsusb2_tll_stp", NULL),
+ _OMAP3_MUXENTRY(ETK_D12, 26,
+ "etk_d12", NULL, NULL, "hsusb2_dir",
+ "gpio_26", NULL, "hsusb2_tll_dir", NULL),
+ _OMAP3_MUXENTRY(ETK_D13, 27,
+ "etk_d13", NULL, NULL, "hsusb2_nxt",
+ "gpio_27", "mm2_rxdm", "hsusb2_tll_nxt", NULL),
+ _OMAP3_MUXENTRY(ETK_D14, 28,
+ "etk_d14", NULL, NULL, "hsusb2_data0",
+ "gpio_28", "mm2_rxrcv", "hsusb2_tll_data0", NULL),
+ _OMAP3_MUXENTRY(ETK_D15, 29,
+ "etk_d15", NULL, NULL, "hsusb2_data1",
+ "gpio_29", "mm2_txse0", "hsusb2_tll_data1", NULL),
+ _OMAP3_MUXENTRY(ETK_D2, 16,
+ "etk_d2", "mcspi3_cs0", NULL, "hsusb1_data2",
+ "gpio_16", "mm1_txdat", "hsusb1_tll_data2", NULL),
+ _OMAP3_MUXENTRY(ETK_D3, 17,
+ "etk_d3", "mcspi3_clk", "sdmmc3_dat3", "hsusb1_data7",
+ "gpio_17", NULL, "hsusb1_tll_data7", NULL),
+ _OMAP3_MUXENTRY(ETK_D4, 18,
+ "etk_d4", "mcbsp5_dr", "sdmmc3_dat0", "hsusb1_data4",
+ "gpio_18", NULL, "hsusb1_tll_data4", NULL),
+ _OMAP3_MUXENTRY(ETK_D5, 19,
+ "etk_d5", "mcbsp5_fsx", "sdmmc3_dat1", "hsusb1_data5",
+ "gpio_19", NULL, "hsusb1_tll_data5", NULL),
+ _OMAP3_MUXENTRY(ETK_D6, 20,
+ "etk_d6", "mcbsp5_dx", "sdmmc3_dat2", "hsusb1_data6",
+ "gpio_20", NULL, "hsusb1_tll_data6", NULL),
+ _OMAP3_MUXENTRY(ETK_D7, 21,
+ "etk_d7", "mcspi3_cs1", "sdmmc3_dat7", "hsusb1_data3",
+ "gpio_21", "mm1_txen_n", "hsusb1_tll_data3", NULL),
+ _OMAP3_MUXENTRY(ETK_D8, 22,
+ "etk_d8", "sys_drm_msecure", "sdmmc3_dat6", "hsusb1_dir",
+ "gpio_22", NULL, "hsusb1_tll_dir", NULL),
+ _OMAP3_MUXENTRY(ETK_D9, 23,
+ "etk_d9", "sys_secure_indicator", "sdmmc3_dat5", "hsusb1_nxt",
+ "gpio_23", "mm1_rxdm", "hsusb1_tll_nxt", NULL),
+ _OMAP3_MUXENTRY(MCBSP3_CLKX, 142,
+ "mcbsp3_clkx", "uart2_tx", NULL, NULL,
+ "gpio_142", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP3_DR, 141,
+ "mcbsp3_dr", "uart2_rts", NULL, NULL,
+ "gpio_141", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP3_DX, 140,
+ "mcbsp3_dx", "uart2_cts", NULL, NULL,
+ "gpio_140", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP3_FSX, 143,
+ "mcbsp3_fsx", "uart2_rx", NULL, NULL,
+ "gpio_143", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT5, 137,
+ "sdmmc2_dat5", "sdmmc2_dir_dat1",
+ "cam_global_reset", "sdmmc3_dat1",
+ "gpio_137", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT6, 138,
+ "sdmmc2_dat6", "sdmmc2_dir_cmd", "cam_shutter", "sdmmc3_dat2",
+ "gpio_138", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC2_DAT7, 139,
+ "sdmmc2_dat7", "sdmmc2_clkin", NULL, "sdmmc3_dat3",
+ "gpio_139", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART1_CTS, 150,
+ "uart1_cts", NULL, NULL, NULL,
+ "gpio_150", NULL, NULL, "safe_mode"),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap3_cus_subset NULL
+#endif
+
+/*
+ * Balls for CUS package
+ * 423-pin s-PBGA Package, 0.65mm Ball Pitch (Bottom)
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \
+ && defined(CONFIG_OMAP_PACKAGE_CUS)
+struct omap_ball __initdata omap3_cus_ball[] = {
+ _OMAP3_BALLENTRY(CAM_D0, "ab18", NULL),
+ _OMAP3_BALLENTRY(CAM_D1, "ac18", NULL),
+ _OMAP3_BALLENTRY(CAM_D10, "f21", NULL),
+ _OMAP3_BALLENTRY(CAM_D11, "g21", NULL),
+ _OMAP3_BALLENTRY(CAM_D2, "g19", NULL),
+ _OMAP3_BALLENTRY(CAM_D3, "f19", NULL),
+ _OMAP3_BALLENTRY(CAM_D4, "g20", NULL),
+ _OMAP3_BALLENTRY(CAM_D5, "b21", NULL),
+ _OMAP3_BALLENTRY(CAM_D6, "l24", NULL),
+ _OMAP3_BALLENTRY(CAM_D7, "k24", NULL),
+ _OMAP3_BALLENTRY(CAM_D8, "j23", NULL),
+ _OMAP3_BALLENTRY(CAM_D9, "k23", NULL),
+ _OMAP3_BALLENTRY(CAM_FLD, "h24", NULL),
+ _OMAP3_BALLENTRY(CAM_HS, "a22", NULL),
+ _OMAP3_BALLENTRY(CAM_PCLK, "j19", NULL),
+ _OMAP3_BALLENTRY(CAM_STROBE, "j20", NULL),
+ _OMAP3_BALLENTRY(CAM_VS, "e18", NULL),
+ _OMAP3_BALLENTRY(CAM_WEN, "f18", NULL),
+ _OMAP3_BALLENTRY(CAM_XCLKA, "b22", NULL),
+ _OMAP3_BALLENTRY(CAM_XCLKB, "c22", NULL),
+ _OMAP3_BALLENTRY(DSS_ACBIAS, "j21", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA0, "ac19", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA1, "ab19", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA10, "ac22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA11, "ac23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA12, "ab22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA13, "y22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA14, "w22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA15, "v22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA16, "j22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA17, "g23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA18, "g24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA19, "h23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA2, "ad20", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA20, "d23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA21, "k22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA22, "v21", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA23, "w21", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA3, "ac20", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA4, "ad21", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA5, "ac21", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA6, "d24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA7, "e23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA8, "e24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA9, "f23", NULL),
+ _OMAP3_BALLENTRY(DSS_HSYNC, "e22", NULL),
+ _OMAP3_BALLENTRY(DSS_PCLK, "g22", NULL),
+ _OMAP3_BALLENTRY(DSS_VSYNC, "f22", NULL),
+ _OMAP3_BALLENTRY(ETK_CLK, "ac1", NULL),
+ _OMAP3_BALLENTRY(ETK_CTL, "ad3", NULL),
+ _OMAP3_BALLENTRY(ETK_D0, "ad6", NULL),
+ _OMAP3_BALLENTRY(ETK_D1, "ac6", NULL),
+ _OMAP3_BALLENTRY(ETK_D10, "ac3", NULL),
+ _OMAP3_BALLENTRY(ETK_D11, "ac9", NULL),
+ _OMAP3_BALLENTRY(ETK_D12, "ac10", NULL),
+ _OMAP3_BALLENTRY(ETK_D13, "ad11", NULL),
+ _OMAP3_BALLENTRY(ETK_D14, "ac11", NULL),
+ _OMAP3_BALLENTRY(ETK_D15, "ad12", NULL),
+ _OMAP3_BALLENTRY(ETK_D2, "ac7", NULL),
+ _OMAP3_BALLENTRY(ETK_D3, "ad8", NULL),
+ _OMAP3_BALLENTRY(ETK_D4, "ac5", NULL),
+ _OMAP3_BALLENTRY(ETK_D5, "ad2", NULL),
+ _OMAP3_BALLENTRY(ETK_D6, "ac8", NULL),
+ _OMAP3_BALLENTRY(ETK_D7, "ad9", NULL),
+ _OMAP3_BALLENTRY(ETK_D8, "ac4", NULL),
+ _OMAP3_BALLENTRY(ETK_D9, "ad5", NULL),
+ _OMAP3_BALLENTRY(GPMC_A1, "k4", NULL),
+ _OMAP3_BALLENTRY(GPMC_A10, "g2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A2, "k3", NULL),
+ _OMAP3_BALLENTRY(GPMC_A3, "k2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A4, "j4", NULL),
+ _OMAP3_BALLENTRY(GPMC_A5, "j3", NULL),
+ _OMAP3_BALLENTRY(GPMC_A6, "j2", NULL),
+ _OMAP3_BALLENTRY(GPMC_A7, "j1", NULL),
+ _OMAP3_BALLENTRY(GPMC_A8, "h1", NULL),
+ _OMAP3_BALLENTRY(GPMC_A9, "h2", NULL),
+ _OMAP3_BALLENTRY(GPMC_CLK, "w2", NULL),
+ _OMAP3_BALLENTRY(GPMC_D10, "u1", NULL),
+ _OMAP3_BALLENTRY(GPMC_D11, "r3", NULL),
+ _OMAP3_BALLENTRY(GPMC_D12, "t3", NULL),
+ _OMAP3_BALLENTRY(GPMC_D13, "u2", NULL),
+ _OMAP3_BALLENTRY(GPMC_D14, "v1", NULL),
+ _OMAP3_BALLENTRY(GPMC_D15, "v2", NULL),
+ _OMAP3_BALLENTRY(GPMC_D8, "r2", NULL),
+ _OMAP3_BALLENTRY(GPMC_D9, "t2", NULL),
+ _OMAP3_BALLENTRY(GPMC_NBE0_CLE, "k5", NULL),
+ _OMAP3_BALLENTRY(GPMC_NBE1, "l1", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS3, "d2", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS4, "f4", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS5, "g5", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS6, "f3", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS7, "g4", NULL),
+ _OMAP3_BALLENTRY(GPMC_NWP, "e1", NULL),
+ _OMAP3_BALLENTRY(GPMC_WAIT3, "c2", NULL),
+ _OMAP3_BALLENTRY(HDQ_SIO, "a24", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_CLK, "r21", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA0, "t24", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA1, "t23", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA2, "u24", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA3, "u23", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA4, "w24", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA5, "v23", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA6, "w23", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA7, "t22", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DIR, "p23", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_NXT, "r22", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_STP, "r23", NULL),
+ _OMAP3_BALLENTRY(I2C2_SCL, "ac15", NULL),
+ _OMAP3_BALLENTRY(I2C2_SDA, "ac14", NULL),
+ _OMAP3_BALLENTRY(I2C3_SCL, "ac13", NULL),
+ _OMAP3_BALLENTRY(I2C3_SDA, "ac12", NULL),
+ _OMAP3_BALLENTRY(I2C4_SCL, "y16", NULL),
+ _OMAP3_BALLENTRY(I2C4_SDA, "y15", NULL),
+ _OMAP3_BALLENTRY(JTAG_EMU0, "ac24", NULL),
+ _OMAP3_BALLENTRY(JTAG_EMU1, "ad24", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_CLKR, "w19", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_CLKX, "v18", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_DR, "y18", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_DX, "w18", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_FSR, "ab20", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_FSX, "aa19", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_CLKX, "t21", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_DR, "v19", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_DX, "r20", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_FSX, "v20", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_CLKX, "w4", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_DR, "v5", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_DX, "v6", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_FSX, "v4", NULL),
+ _OMAP3_BALLENTRY(MCBSP_CLKS, "aa18", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CLK, "t5", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS0, "t6", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS3, "r5", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_SIMO, "r4", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_SOMI, "t4", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CLK, "n5", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CS0, "m5", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CS1, "m4", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_SIMO, "n4", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_SOMI, "n3", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_CLK, "m23", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_CMD, "l23", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT0, "m22", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT1, "m21", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT2, "m20", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT3, "n23", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT4, "n22", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT5, "n21", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT6, "n20", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT7, "p24", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_CLK, "y1", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_CMD, "ab5", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT0, "ab3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT1, "y3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT2, "w3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT3, "v3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT4, "ab2", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT5, "aa2", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT6, "y2", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT7, "aa1", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT0, "ab12", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT1, "ac16", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT2, "ad17", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT3, "ad18", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT4, "ac17", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT5, "ab16", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT6, "aa15", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKOUT1, "y7", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKOUT2, "aa6", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKREQ, "y13", NULL),
+ _OMAP3_BALLENTRY(SYS_NIRQ, "w16", NULL),
+ _OMAP3_BALLENTRY(SYS_NRESWARM, "y10", NULL),
+ _OMAP3_BALLENTRY(SYS_OFF_MODE, "ad23", NULL),
+ _OMAP3_BALLENTRY(UART1_CTS, "ac2", NULL),
+ _OMAP3_BALLENTRY(UART1_RTS, "w6", NULL),
+ _OMAP3_BALLENTRY(UART1_RX, "v7", NULL),
+ _OMAP3_BALLENTRY(UART1_TX, "w7", NULL),
+ _OMAP3_BALLENTRY(UART3_CTS_RCTX, "a23", NULL),
+ _OMAP3_BALLENTRY(UART3_RTS_SD, "b23", NULL),
+ _OMAP3_BALLENTRY(UART3_RX_IRRX, "b24", NULL),
+ _OMAP3_BALLENTRY(UART3_TX_IRTX, "c23", NULL),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap3_cus_ball NULL
+#endif
+
+/*
+ * Signals different on CBB package comapared to superset
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBB)
+struct omap_mux __initdata omap3_cbb_subset[] = {
+ _OMAP3_MUXENTRY(CAM_D10, 109,
+ "cam_d10", NULL, NULL, NULL,
+ "gpio_109", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D11, 110,
+ "cam_d11", NULL, NULL, NULL,
+ "gpio_110", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D2, 101,
+ "cam_d2", NULL, NULL, NULL,
+ "gpio_101", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D3, 102,
+ "cam_d3", NULL, NULL, NULL,
+ "gpio_102", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D4, 103,
+ "cam_d4", NULL, NULL, NULL,
+ "gpio_103", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D5, 104,
+ "cam_d5", NULL, NULL, NULL,
+ "gpio_104", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_FLD, 98,
+ "cam_fld", NULL, "cam_global_reset", NULL,
+ "gpio_98", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_HS, 94,
+ "cam_hs", NULL, NULL, NULL,
+ "gpio_94", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_PCLK, 97,
+ "cam_pclk", NULL, NULL, NULL,
+ "gpio_97", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_STROBE, 126,
+ "cam_strobe", NULL, NULL, NULL,
+ "gpio_126", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_VS, 95,
+ "cam_vs", NULL, NULL, NULL,
+ "gpio_95", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_WEN, 167,
+ "cam_wen", NULL, "cam_shutter", NULL,
+ "gpio_167", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA6, 76,
+ "dss_data6", NULL, "uart1_tx", NULL,
+ "gpio_76", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA7, 77,
+ "dss_data7", NULL, "uart1_rx", NULL,
+ "gpio_77", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA8, 78,
+ "dss_data8", NULL, NULL, NULL,
+ "gpio_78", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA9, 79,
+ "dss_data9", NULL, NULL, NULL,
+ "gpio_79", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_HSYNC, 67,
+ "dss_hsync", NULL, NULL, NULL,
+ "gpio_67", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_PCLK, 66,
+ "dss_pclk", NULL, NULL, NULL,
+ "gpio_66", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(ETK_CLK, 12,
+ "etk_clk", "mcbsp5_clkx", "sdmmc3_clk", "hsusb1_stp",
+ "gpio_12", "mm1_rxdp", "hsusb1_tll_stp", NULL),
+ _OMAP3_MUXENTRY(ETK_CTL, 13,
+ "etk_ctl", NULL, "sdmmc3_cmd", "hsusb1_clk",
+ "gpio_13", NULL, "hsusb1_tll_clk", NULL),
+ _OMAP3_MUXENTRY(ETK_D0, 14,
+ "etk_d0", "mcspi3_simo", "sdmmc3_dat4", "hsusb1_data0",
+ "gpio_14", "mm1_rxrcv", "hsusb1_tll_data0", NULL),
+ _OMAP3_MUXENTRY(ETK_D1, 15,
+ "etk_d1", "mcspi3_somi", NULL, "hsusb1_data1",
+ "gpio_15", "mm1_txse0", "hsusb1_tll_data1", NULL),
+ _OMAP3_MUXENTRY(ETK_D10, 24,
+ "etk_d10", NULL, "uart1_rx", "hsusb2_clk",
+ "gpio_24", NULL, "hsusb2_tll_clk", NULL),
+ _OMAP3_MUXENTRY(ETK_D11, 25,
+ "etk_d11", NULL, NULL, "hsusb2_stp",
+ "gpio_25", "mm2_rxdp", "hsusb2_tll_stp", NULL),
+ _OMAP3_MUXENTRY(ETK_D12, 26,
+ "etk_d12", NULL, NULL, "hsusb2_dir",
+ "gpio_26", NULL, "hsusb2_tll_dir", NULL),
+ _OMAP3_MUXENTRY(ETK_D13, 27,
+ "etk_d13", NULL, NULL, "hsusb2_nxt",
+ "gpio_27", "mm2_rxdm", "hsusb2_tll_nxt", NULL),
+ _OMAP3_MUXENTRY(ETK_D14, 28,
+ "etk_d14", NULL, NULL, "hsusb2_data0",
+ "gpio_28", "mm2_rxrcv", "hsusb2_tll_data0", NULL),
+ _OMAP3_MUXENTRY(ETK_D15, 29,
+ "etk_d15", NULL, NULL, "hsusb2_data1",
+ "gpio_29", "mm2_txse0", "hsusb2_tll_data1", NULL),
+ _OMAP3_MUXENTRY(ETK_D2, 16,
+ "etk_d2", "mcspi3_cs0", NULL, "hsusb1_data2",
+ "gpio_16", "mm1_txdat", "hsusb1_tll_data2", NULL),
+ _OMAP3_MUXENTRY(ETK_D3, 17,
+ "etk_d3", "mcspi3_clk", "sdmmc3_dat3", "hsusb1_data7",
+ "gpio_17", NULL, "hsusb1_tll_data7", NULL),
+ _OMAP3_MUXENTRY(ETK_D4, 18,
+ "etk_d4", "mcbsp5_dr", "sdmmc3_dat0", "hsusb1_data4",
+ "gpio_18", NULL, "hsusb1_tll_data4", NULL),
+ _OMAP3_MUXENTRY(ETK_D5, 19,
+ "etk_d5", "mcbsp5_fsx", "sdmmc3_dat1", "hsusb1_data5",
+ "gpio_19", NULL, "hsusb1_tll_data5", NULL),
+ _OMAP3_MUXENTRY(ETK_D6, 20,
+ "etk_d6", "mcbsp5_dx", "sdmmc3_dat2", "hsusb1_data6",
+ "gpio_20", NULL, "hsusb1_tll_data6", NULL),
+ _OMAP3_MUXENTRY(ETK_D7, 21,
+ "etk_d7", "mcspi3_cs1", "sdmmc3_dat7", "hsusb1_data3",
+ "gpio_21", "mm1_txen_n", "hsusb1_tll_data3", NULL),
+ _OMAP3_MUXENTRY(ETK_D8, 22,
+ "etk_d8", "sys_drm_msecure", "sdmmc3_dat6", "hsusb1_dir",
+ "gpio_22", NULL, "hsusb1_tll_dir", NULL),
+ _OMAP3_MUXENTRY(ETK_D9, 23,
+ "etk_d9", "sys_secure_indicator", "sdmmc3_dat5", "hsusb1_nxt",
+ "gpio_23", "mm1_rxdm", "hsusb1_tll_nxt", NULL),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap3_cbb_subset NULL
+#endif
+
+/*
+ * Balls for CBB package
+ * 515-pin s-PBGA Package, 0.50mm Ball Pitch (Top), 0.40mm Ball Pitch (Bottom)
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \
+ && defined(CONFIG_OMAP_PACKAGE_CBB)
+struct omap_ball __initdata omap3_cbb_ball[] = {
+ _OMAP3_BALLENTRY(CAM_D0, "ag17", NULL),
+ _OMAP3_BALLENTRY(CAM_D1, "ah17", NULL),
+ _OMAP3_BALLENTRY(CAM_D10, "b25", NULL),
+ _OMAP3_BALLENTRY(CAM_D11, "c26", NULL),
+ _OMAP3_BALLENTRY(CAM_D2, "b24", NULL),
+ _OMAP3_BALLENTRY(CAM_D3, "c24", NULL),
+ _OMAP3_BALLENTRY(CAM_D4, "d24", NULL),
+ _OMAP3_BALLENTRY(CAM_D5, "a25", NULL),
+ _OMAP3_BALLENTRY(CAM_D6, "k28", NULL),
+ _OMAP3_BALLENTRY(CAM_D7, "l28", NULL),
+ _OMAP3_BALLENTRY(CAM_D8, "k27", NULL),
+ _OMAP3_BALLENTRY(CAM_D9, "l27", NULL),
+ _OMAP3_BALLENTRY(CAM_FLD, "c23", NULL),
+ _OMAP3_BALLENTRY(CAM_HS, "a24", NULL),
+ _OMAP3_BALLENTRY(CAM_PCLK, "c27", NULL),
+ _OMAP3_BALLENTRY(CAM_STROBE, "d25", NULL),
+ _OMAP3_BALLENTRY(CAM_VS, "a23", NULL),
+ _OMAP3_BALLENTRY(CAM_WEN, "b23", NULL),
+ _OMAP3_BALLENTRY(CAM_XCLKA, "c25", NULL),
+ _OMAP3_BALLENTRY(CAM_XCLKB, "b26", NULL),
+ _OMAP3_BALLENTRY(CSI2_DX0, "ag19", NULL),
+ _OMAP3_BALLENTRY(CSI2_DX1, "ag18", NULL),
+ _OMAP3_BALLENTRY(CSI2_DY0, "ah19", NULL),
+ _OMAP3_BALLENTRY(CSI2_DY1, "ah18", NULL),
+ _OMAP3_BALLENTRY(DSS_ACBIAS, "e27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA0, "ag22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA1, "ah22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA10, "ad28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA11, "ad27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA12, "ab28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA13, "ab27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA14, "aa28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA15, "aa27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA16, "g25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA17, "h27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA18, "h26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA19, "h25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA2, "ag23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA20, "e28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA21, "j26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA22, "ac27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA23, "ac28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA3, "ah23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA4, "ag24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA5, "ah24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA6, "e26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA7, "f28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA8, "f27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA9, "g26", NULL),
+ _OMAP3_BALLENTRY(DSS_HSYNC, "d26", NULL),
+ _OMAP3_BALLENTRY(DSS_PCLK, "d28", NULL),
+ _OMAP3_BALLENTRY(DSS_VSYNC, "d27", NULL),
+ _OMAP3_BALLENTRY(ETK_CLK, "af10", NULL),
+ _OMAP3_BALLENTRY(ETK_CTL, "ae10", NULL),
+ _OMAP3_BALLENTRY(ETK_D0, "af11", NULL),
+ _OMAP3_BALLENTRY(ETK_D1, "ag12", NULL),
+ _OMAP3_BALLENTRY(ETK_D10, "ae7", NULL),
+ _OMAP3_BALLENTRY(ETK_D11, "af7", NULL),
+ _OMAP3_BALLENTRY(ETK_D12, "ag7", NULL),
+ _OMAP3_BALLENTRY(ETK_D13, "ah7", NULL),
+ _OMAP3_BALLENTRY(ETK_D14, "ag8", NULL),
+ _OMAP3_BALLENTRY(ETK_D15, "ah8", NULL),
+ _OMAP3_BALLENTRY(ETK_D2, "ah12", NULL),
+ _OMAP3_BALLENTRY(ETK_D3, "ae13", NULL),
+ _OMAP3_BALLENTRY(ETK_D4, "ae11", NULL),
+ _OMAP3_BALLENTRY(ETK_D5, "ah9", NULL),
+ _OMAP3_BALLENTRY(ETK_D6, "af13", NULL),
+ _OMAP3_BALLENTRY(ETK_D7, "ah14", NULL),
+ _OMAP3_BALLENTRY(ETK_D8, "af9", NULL),
+ _OMAP3_BALLENTRY(ETK_D9, "ag9", NULL),
+ _OMAP3_BALLENTRY(GPMC_A1, "n4", "ac15"),
+ _OMAP3_BALLENTRY(GPMC_A10, "k3", "ab19"),
+ _OMAP3_BALLENTRY(GPMC_A2, "m4", "ab15"),
+ _OMAP3_BALLENTRY(GPMC_A3, "l4", "ac16"),
+ _OMAP3_BALLENTRY(GPMC_A4, "k4", "ab16"),
+ _OMAP3_BALLENTRY(GPMC_A5, "t3", "ac17"),
+ _OMAP3_BALLENTRY(GPMC_A6, "r3", "ab17"),
+ _OMAP3_BALLENTRY(GPMC_A7, "n3", "ac18"),
+ _OMAP3_BALLENTRY(GPMC_A8, "m3", "ab18"),
+ _OMAP3_BALLENTRY(GPMC_A9, "l3", "ac19"),
+ _OMAP3_BALLENTRY(GPMC_CLK, "t4", "w2"),
+ _OMAP3_BALLENTRY(GPMC_D10, "p1", "ab4"),
+ _OMAP3_BALLENTRY(GPMC_D11, "r1", "ac4"),
+ _OMAP3_BALLENTRY(GPMC_D12, "r2", "ab6"),
+ _OMAP3_BALLENTRY(GPMC_D13, "t2", "ac6"),
+ _OMAP3_BALLENTRY(GPMC_D14, "w1", "ab7"),
+ _OMAP3_BALLENTRY(GPMC_D15, "y1", "ac7"),
+ _OMAP3_BALLENTRY(GPMC_D8, "h2", "ab3"),
+ _OMAP3_BALLENTRY(GPMC_D9, "k2", "ac3"),
+ _OMAP3_BALLENTRY(GPMC_NBE0_CLE, "g3", "ac12"),
+ _OMAP3_BALLENTRY(GPMC_NBE1, "u3", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS1, "h3", "y1"),
+ _OMAP3_BALLENTRY(GPMC_NCS2, "v8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS3, "u8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS4, "t8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS5, "r8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS6, "p8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS7, "n8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NWP, "h1", "ab10"),
+ _OMAP3_BALLENTRY(GPMC_WAIT1, "l8", "ac10"),
+ _OMAP3_BALLENTRY(GPMC_WAIT2, "k8", NULL),
+ _OMAP3_BALLENTRY(GPMC_WAIT3, "j8", NULL),
+ _OMAP3_BALLENTRY(HDQ_SIO, "j25", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_CLK, "t28", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA0, "t27", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA1, "u28", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA2, "u27", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA3, "u26", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA4, "u25", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA5, "v28", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA6, "v27", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA7, "v26", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DIR, "r28", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_NXT, "t26", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_STP, "t25", NULL),
+ _OMAP3_BALLENTRY(I2C2_SCL, "af15", NULL),
+ _OMAP3_BALLENTRY(I2C2_SDA, "ae15", NULL),
+ _OMAP3_BALLENTRY(I2C3_SCL, "af14", NULL),
+ _OMAP3_BALLENTRY(I2C3_SDA, "ag14", NULL),
+ _OMAP3_BALLENTRY(I2C4_SCL, "ad26", NULL),
+ _OMAP3_BALLENTRY(I2C4_SDA, "ae26", NULL),
+ _OMAP3_BALLENTRY(JTAG_EMU0, "aa11", NULL),
+ _OMAP3_BALLENTRY(JTAG_EMU1, "aa10", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_CLKR, "y21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_CLKX, "w21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_DR, "u21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_DX, "v21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_FSR, "aa21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_FSX, "k26", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_CLKX, "n21", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_DR, "r21", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_DX, "m21", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_FSX, "p21", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_CLKX, "af5", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_DR, "ae6", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_DX, "af6", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_FSX, "ae5", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_CLKX, "ae1", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_DR, "ad1", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_DX, "ad2", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_FSX, "ac1", NULL),
+ _OMAP3_BALLENTRY(MCBSP_CLKS, "t21", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CLK, "ab3", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS0, "ac2", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS1, "ac3", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS2, "ab1", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS3, "ab2", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_SIMO, "ab4", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_SOMI, "aa4", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CLK, "aa3", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CS0, "y4", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CS1, "v3", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_SIMO, "y2", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_SOMI, "y3", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_CLK, "n28", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_CMD, "m27", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT0, "n27", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT1, "n26", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT2, "n25", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT3, "p28", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT4, "p27", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT5, "p26", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT6, "r27", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT7, "r25", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_CLK, "ae2", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_CMD, "ag5", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT0, "ah5", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT1, "ah4", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT2, "ag4", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT3, "af4", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT4, "ae4", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT5, "ah3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT6, "af3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT7, "ae3", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT0, "ah26", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT1, "ag26", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT2, "ae14", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT3, "af18", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT4, "af19", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT5, "ae21", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT6, "af21", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKOUT1, "ag25", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKOUT2, "ae22", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKREQ, "af25", NULL),
+ _OMAP3_BALLENTRY(SYS_NIRQ, "af26", NULL),
+ _OMAP3_BALLENTRY(SYS_NRESWARM, "af24", NULL),
+ _OMAP3_BALLENTRY(SYS_OFF_MODE, "af22", NULL),
+ _OMAP3_BALLENTRY(UART1_CTS, "w8", NULL),
+ _OMAP3_BALLENTRY(UART1_RTS, "aa9", NULL),
+ _OMAP3_BALLENTRY(UART1_RX, "y8", NULL),
+ _OMAP3_BALLENTRY(UART1_TX, "aa8", NULL),
+ _OMAP3_BALLENTRY(UART2_CTS, "ab26", NULL),
+ _OMAP3_BALLENTRY(UART2_RTS, "ab25", NULL),
+ _OMAP3_BALLENTRY(UART2_RX, "ad25", NULL),
+ _OMAP3_BALLENTRY(UART2_TX, "aa25", NULL),
+ _OMAP3_BALLENTRY(UART3_CTS_RCTX, "h18", NULL),
+ _OMAP3_BALLENTRY(UART3_RTS_SD, "h19", NULL),
+ _OMAP3_BALLENTRY(UART3_RX_IRRX, "h20", NULL),
+ _OMAP3_BALLENTRY(UART3_TX_IRTX, "h21", NULL),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap3_cbb_ball NULL
+#endif
+
+/*
+ * Signals different on 36XX CBP package comapared to 34XX CBC package
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBP)
+struct omap_mux __initdata omap36xx_cbp_subset[] = {
+ _OMAP3_MUXENTRY(CAM_D0, 99,
+ "cam_d0", NULL, "csi2_dx2", NULL,
+ "gpio_99", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D1, 100,
+ "cam_d1", NULL, "csi2_dy2", NULL,
+ "gpio_100", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D10, 109,
+ "cam_d10", "ssi2_wake", NULL, NULL,
+ "gpio_109", "hw_dbg8", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D2, 101,
+ "cam_d2", "ssi2_rdy_tx", NULL, NULL,
+ "gpio_101", "hw_dbg4", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D3, 102,
+ "cam_d3", "ssi2_dat_rx", NULL, NULL,
+ "gpio_102", "hw_dbg5", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D4, 103,
+ "cam_d4", "ssi2_flag_rx", NULL, NULL,
+ "gpio_103", "hw_dbg6", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_D5, 104,
+ "cam_d5", "ssi2_rdy_rx", NULL, NULL,
+ "gpio_104", "hw_dbg7", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_HS, 94,
+ "cam_hs", "ssi2_dat_tx", NULL, NULL,
+ "gpio_94", "hw_dbg0", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(CAM_VS, 95,
+ "cam_vs", "ssi2_flag_tx", NULL, NULL,
+ "gpio_95", "hw_dbg1", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA0, 70,
+ "dss_data0", "dsi_dx0", "uart1_cts", NULL,
+ "gpio_70", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA1, 71,
+ "dss_data1", "dsi_dy0", "uart1_rts", NULL,
+ "gpio_71", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA2, 72,
+ "dss_data2", "dsi_dx1", NULL, NULL,
+ "gpio_72", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA3, 73,
+ "dss_data3", "dsi_dy1", NULL, NULL,
+ "gpio_73", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA4, 74,
+ "dss_data4", "dsi_dx2", "uart3_rx_irrx", NULL,
+ "gpio_74", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA5, 75,
+ "dss_data5", "dsi_dy2", "uart3_tx_irtx", NULL,
+ "gpio_75", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA6, 76,
+ "dss_data6", NULL, "uart1_tx", "dssvenc656_data6",
+ "gpio_76", "hw_dbg14", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA7, 77,
+ "dss_data7", NULL, "uart1_rx", "dssvenc656_data7",
+ "gpio_77", "hw_dbg15", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA8, 78,
+ "dss_data8", NULL, "uart3_rx_irrx", NULL,
+ "gpio_78", "hw_dbg16", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(DSS_DATA9, 79,
+ "dss_data9", NULL, "uart3_tx_irtx", NULL,
+ "gpio_79", "hw_dbg17", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(ETK_D12, 26,
+ "etk_d12", "sys_drm_msecure", NULL, "hsusb2_dir",
+ "gpio_26", NULL, "hsusb2_tll_dir", "hw_dbg14"),
+ _OMAP3_MUXENTRY(GPMC_A11, 0,
+ "gpmc_a11", NULL, NULL, NULL,
+ NULL, NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_WAIT2, 64,
+ "gpmc_wait2", NULL, "uart4_tx", NULL,
+ "gpio_64", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(GPMC_WAIT3, 65,
+ "gpmc_wait3", "sys_ndmareq1", "uart4_rx", NULL,
+ "gpio_65", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA0, 125,
+ "hsusb0_data0", NULL, "uart3_tx_irtx", NULL,
+ "gpio_125", "uart2_tx", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA1, 130,
+ "hsusb0_data1", NULL, "uart3_rx_irrx", NULL,
+ "gpio_130", "uart2_rx", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA2, 131,
+ "hsusb0_data2", NULL, "uart3_rts_sd", NULL,
+ "gpio_131", "uart2_rts", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(HSUSB0_DATA3, 169,
+ "hsusb0_data3", NULL, "uart3_cts_rctx", NULL,
+ "gpio_169", "uart2_cts", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP1_CLKR, 156,
+ "mcbsp1_clkr", "mcspi4_clk", "sim_cd", NULL,
+ "gpio_156", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP1_FSR, 157,
+ "mcbsp1_fsr", "adpllv2d_dithering_en1",
+ "cam_global_reset", NULL,
+ "gpio_157", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP4_CLKX, 152,
+ "mcbsp4_clkx", "ssi1_dat_rx", NULL, NULL,
+ "gpio_152", "hsusb3_tll_data1", "mm3_txse0", "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP4_DR, 153,
+ "mcbsp4_dr", "ssi1_flag_rx", NULL, NULL,
+ "gpio_153", "hsusb3_tll_data0", "mm3_rxrcv", "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP4_DX, 154,
+ "mcbsp4_dx", "ssi1_rdy_rx", NULL, NULL,
+ "gpio_154", "hsusb3_tll_data2", "mm3_txdat", "safe_mode"),
+ _OMAP3_MUXENTRY(MCBSP4_FSX, 155,
+ "mcbsp4_fsx", "ssi1_wake", NULL, NULL,
+ "gpio_155", "hsusb3_tll_data3", "mm3_txen_n", "safe_mode"),
+ _OMAP3_MUXENTRY(MCSPI1_CS1, 175,
+ "mcspi1_cs1", "adpllv2d_dithering_en2", NULL, "sdmmc3_cmd",
+ "gpio_175", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SAD2D_MBUSFLAG, 0,
+ "sad2d_mbusflag", "mad2d_sbusflag", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD28, 0,
+ "sad2d_mcad28", "mad2d_mcad28", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD29, 0,
+ "sad2d_mcad29", "mad2d_mcad29", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD32, 0,
+ "sad2d_mcad32", "mad2d_mcad32", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD33, 0,
+ "sad2d_mcad33", "mad2d_mcad33", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD34, 0,
+ "sad2d_mcad34", "mad2d_mcad34", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD35, 0,
+ "sad2d_mcad35", "mad2d_mcad35", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD36, 0,
+ "sad2d_mcad36", "mad2d_mcad36", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MREAD, 0,
+ "sad2d_mread", "mad2d_sread", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MWRITE, 0,
+ "sad2d_mwrite", "mad2d_swrite", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_SBUSFLAG, 0,
+ "sad2d_sbusflag", "mad2d_mbusflag", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_SREAD, 0,
+ "sad2d_sread", "mad2d_mread", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_SWRITE, 0,
+ "sad2d_swrite", "mad2d_mwrite", NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SDMMC1_CLK, 120,
+ "sdmmc1_clk", "ms_clk", NULL, NULL,
+ "gpio_120", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_CMD, 121,
+ "sdmmc1_cmd", "ms_bs", NULL, NULL,
+ "gpio_121", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT0, 122,
+ "sdmmc1_dat0", "ms_dat0", NULL, NULL,
+ "gpio_122", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT1, 123,
+ "sdmmc1_dat1", "ms_dat1", NULL, NULL,
+ "gpio_123", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT2, 124,
+ "sdmmc1_dat2", "ms_dat2", NULL, NULL,
+ "gpio_124", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDMMC1_DAT3, 125,
+ "sdmmc1_dat3", "ms_dat3", NULL, NULL,
+ "gpio_125", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SDRC_CKE0, 0,
+ "sdrc_cke0", NULL, NULL, NULL,
+ NULL, NULL, NULL, "safe_mode_out1"),
+ _OMAP3_MUXENTRY(SDRC_CKE1, 0,
+ "sdrc_cke1", NULL, NULL, NULL,
+ NULL, NULL, NULL, "safe_mode_out1"),
+ _OMAP3_MUXENTRY(SIM_IO, 126,
+ "sim_io", "sim_io_low_impedance", NULL, NULL,
+ "gpio_126", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SIM_CLK, 127,
+ "sim_clk", NULL, NULL, NULL,
+ "gpio_127", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SIM_PWRCTRL, 128,
+ "sim_pwrctrl", NULL, NULL, NULL,
+ "gpio_128", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SIM_RST, 129,
+ "sim_rst", NULL, NULL, NULL,
+ "gpio_129", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT0, 2,
+ "sys_boot0", NULL, NULL, "dss_data18",
+ "gpio_2", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT1, 3,
+ "sys_boot1", NULL, NULL, "dss_data19",
+ "gpio_3", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT3, 5,
+ "sys_boot3", NULL, NULL, "dss_data20",
+ "gpio_5", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT4, 6,
+ "sys_boot4", "sdmmc2_dir_dat2", NULL, "dss_data21",
+ "gpio_6", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT5, 7,
+ "sys_boot5", "sdmmc2_dir_dat3", NULL, "dss_data22",
+ "gpio_7", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(SYS_BOOT6, 8,
+ "sys_boot6", NULL, NULL, "dss_data23",
+ "gpio_8", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART1_CTS, 150,
+ "uart1_cts", "ssi1_rdy_tx", NULL, NULL,
+ "gpio_150", "hsusb3_tll_clk", NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART1_RTS, 149,
+ "uart1_rts", "ssi1_flag_tx", NULL, NULL,
+ "gpio_149", NULL, NULL, "safe_mode"),
+ _OMAP3_MUXENTRY(UART1_TX, 148,
+ "uart1_tx", "ssi1_dat_tx", NULL, NULL,
+ "gpio_148", NULL, NULL, "safe_mode"),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap36xx_cbp_subset NULL
+#endif
+
+/*
+ * Balls for 36XX CBP package
+ * 515-pin s-PBGA Package, 0.50mm Ball Pitch (Top), 0.40mm Ball Pitch (Bottom)
+ */
+#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) \
+ && defined (CONFIG_OMAP_PACKAGE_CBP)
+struct omap_ball __initdata omap36xx_cbp_ball[] = {
+ _OMAP3_BALLENTRY(CAM_D0, "ag17", NULL),
+ _OMAP3_BALLENTRY(CAM_D1, "ah17", NULL),
+ _OMAP3_BALLENTRY(CAM_D10, "b25", NULL),
+ _OMAP3_BALLENTRY(CAM_D11, "c26", NULL),
+ _OMAP3_BALLENTRY(CAM_D2, "b24", NULL),
+ _OMAP3_BALLENTRY(CAM_D3, "c24", NULL),
+ _OMAP3_BALLENTRY(CAM_D4, "d24", NULL),
+ _OMAP3_BALLENTRY(CAM_D5, "a25", NULL),
+ _OMAP3_BALLENTRY(CAM_D6, "k28", NULL),
+ _OMAP3_BALLENTRY(CAM_D7, "l28", NULL),
+ _OMAP3_BALLENTRY(CAM_D8, "k27", NULL),
+ _OMAP3_BALLENTRY(CAM_D9, "l27", NULL),
+ _OMAP3_BALLENTRY(CAM_FLD, "c23", NULL),
+ _OMAP3_BALLENTRY(CAM_HS, "a24", NULL),
+ _OMAP3_BALLENTRY(CAM_PCLK, "c27", NULL),
+ _OMAP3_BALLENTRY(CAM_STROBE, "d25", NULL),
+ _OMAP3_BALLENTRY(CAM_VS, "a23", NULL),
+ _OMAP3_BALLENTRY(CAM_WEN, "b23", NULL),
+ _OMAP3_BALLENTRY(CAM_XCLKA, "c25", NULL),
+ _OMAP3_BALLENTRY(CAM_XCLKB, "b26", NULL),
+ _OMAP3_BALLENTRY(CSI2_DX0, "ag19", NULL),
+ _OMAP3_BALLENTRY(CSI2_DX1, "ag18", NULL),
+ _OMAP3_BALLENTRY(CSI2_DY0, "ah19", NULL),
+ _OMAP3_BALLENTRY(CSI2_DY1, "ah18", NULL),
+ _OMAP3_BALLENTRY(DSS_ACBIAS, "e27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA0, "ag22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA1, "ah22", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA10, "ad28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA11, "ad27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA12, "ab28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA13, "ab27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA14, "aa28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA15, "aa27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA16, "g25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA17, "h27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA18, "h26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA19, "h25", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA2, "ag23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA20, "e28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA21, "j26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA22, "ac27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA23, "ac28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA3, "ah23", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA4, "ag24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA5, "ah24", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA6, "e26", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA7, "f28", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA8, "f27", NULL),
+ _OMAP3_BALLENTRY(DSS_DATA9, "g26", NULL),
+ _OMAP3_BALLENTRY(DSS_HSYNC, "d26", NULL),
+ _OMAP3_BALLENTRY(DSS_PCLK, "d28", NULL),
+ _OMAP3_BALLENTRY(DSS_VSYNC, "d27", NULL),
+ _OMAP3_BALLENTRY(ETK_CLK, "af10", NULL),
+ _OMAP3_BALLENTRY(ETK_CTL, "ae10", NULL),
+ _OMAP3_BALLENTRY(ETK_D0, "af11", NULL),
+ _OMAP3_BALLENTRY(ETK_D1, "ag12", NULL),
+ _OMAP3_BALLENTRY(ETK_D10, "ae7", NULL),
+ _OMAP3_BALLENTRY(ETK_D11, "af7", NULL),
+ _OMAP3_BALLENTRY(ETK_D12, "ag7", NULL),
+ _OMAP3_BALLENTRY(ETK_D13, "ah7", NULL),
+ _OMAP3_BALLENTRY(ETK_D14, "ag8", NULL),
+ _OMAP3_BALLENTRY(ETK_D15, "ah8", NULL),
+ _OMAP3_BALLENTRY(ETK_D2, "ah12", NULL),
+ _OMAP3_BALLENTRY(ETK_D3, "ae13", NULL),
+ _OMAP3_BALLENTRY(ETK_D4, "ae11", NULL),
+ _OMAP3_BALLENTRY(ETK_D5, "ah9", NULL),
+ _OMAP3_BALLENTRY(ETK_D6, "af13", NULL),
+ _OMAP3_BALLENTRY(ETK_D7, "ah14", NULL),
+ _OMAP3_BALLENTRY(ETK_D8, "af9", NULL),
+ _OMAP3_BALLENTRY(ETK_D9, "ag9", NULL),
+ _OMAP3_BALLENTRY(GPMC_A1, "n4", "ac15"),
+ _OMAP3_BALLENTRY(GPMC_A10, "k3", "ab19"),
+ _OMAP3_BALLENTRY(GPMC_A11, NULL, "ac20"),
+ _OMAP3_BALLENTRY(GPMC_A2, "m4", "ab15"),
+ _OMAP3_BALLENTRY(GPMC_A3, "l4", "ac16"),
+ _OMAP3_BALLENTRY(GPMC_A4, "k4", "ab16"),
+ _OMAP3_BALLENTRY(GPMC_A5, "t3", "ac17"),
+ _OMAP3_BALLENTRY(GPMC_A6, "r3", "ab17"),
+ _OMAP3_BALLENTRY(GPMC_A7, "n3", "ac18"),
+ _OMAP3_BALLENTRY(GPMC_A8, "m3", "ab18"),
+ _OMAP3_BALLENTRY(GPMC_A9, "l3", "ac19"),
+ _OMAP3_BALLENTRY(GPMC_CLK, "t4", "w2"),
+ _OMAP3_BALLENTRY(GPMC_D0, "k1", "m2"),
+ _OMAP3_BALLENTRY(GPMC_D1, "l1", "m1"),
+ _OMAP3_BALLENTRY(GPMC_D10, "p1", "ab4"),
+ _OMAP3_BALLENTRY(GPMC_D11, "r1", "ac4"),
+ _OMAP3_BALLENTRY(GPMC_D12, "r2", "ab6"),
+ _OMAP3_BALLENTRY(GPMC_D13, "t2", "ac6"),
+ _OMAP3_BALLENTRY(GPMC_D14, "w1", "ab7"),
+ _OMAP3_BALLENTRY(GPMC_D15, "y1", "ac7"),
+ _OMAP3_BALLENTRY(GPMC_D2, "l2", "n2"),
+ _OMAP3_BALLENTRY(GPMC_D3, "p2", "n1"),
+ _OMAP3_BALLENTRY(GPMC_D4, "t1", "r2"),
+ _OMAP3_BALLENTRY(GPMC_D5, "v1", "r1"),
+ _OMAP3_BALLENTRY(GPMC_D6, "v2", "t2"),
+ _OMAP3_BALLENTRY(GPMC_D7, "w2", "t1"),
+ _OMAP3_BALLENTRY(GPMC_D8, "h2", "ab3"),
+ _OMAP3_BALLENTRY(GPMC_D9, "k2", "ac3"),
+ _OMAP3_BALLENTRY(GPMC_NADV_ALE, "f3", "w1"),
+ _OMAP3_BALLENTRY(GPMC_NBE0_CLE, "g3", "ac12"),
+ _OMAP3_BALLENTRY(GPMC_NBE1, "u3", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS0, "g4", "y2"),
+ _OMAP3_BALLENTRY(GPMC_NCS1, "h3", "y1"),
+ _OMAP3_BALLENTRY(GPMC_NCS2, "v8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS3, "u8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS4, "t8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS5, "r8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS6, "p8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NCS7, "n8", NULL),
+ _OMAP3_BALLENTRY(GPMC_NOE, "g2", "v2"),
+ _OMAP3_BALLENTRY(GPMC_NWE, "f4", "v1"),
+ _OMAP3_BALLENTRY(GPMC_NWP, "h1", "ab10"),
+ _OMAP3_BALLENTRY(GPMC_WAIT0, "m8", "ab12"),
+ _OMAP3_BALLENTRY(GPMC_WAIT1, "l8", "ac10"),
+ _OMAP3_BALLENTRY(GPMC_WAIT2, "k8", NULL),
+ _OMAP3_BALLENTRY(GPMC_WAIT3, "j8", NULL),
+ _OMAP3_BALLENTRY(HDQ_SIO, "j25", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_CLK, "t28", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA0, "t27", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA1, "u28", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA2, "u27", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA3, "u26", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA4, "u25", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA5, "v28", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA6, "v27", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DATA7, "v26", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_DIR, "r28", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_NXT, "t26", NULL),
+ _OMAP3_BALLENTRY(HSUSB0_STP, "t25", NULL),
+ _OMAP3_BALLENTRY(I2C1_SCL, "k21", NULL),
+ _OMAP3_BALLENTRY(I2C1_SDA, "j21", NULL),
+ _OMAP3_BALLENTRY(I2C2_SCL, "af15", NULL),
+ _OMAP3_BALLENTRY(I2C2_SDA, "ae15", NULL),
+ _OMAP3_BALLENTRY(I2C3_SCL, "af14", NULL),
+ _OMAP3_BALLENTRY(I2C3_SDA, "ag14", NULL),
+ _OMAP3_BALLENTRY(I2C4_SCL, "ad26", NULL),
+ _OMAP3_BALLENTRY(I2C4_SDA, "ae26", NULL),
+ _OMAP3_BALLENTRY(JTAG_EMU0, "aa11", NULL),
+ _OMAP3_BALLENTRY(JTAG_EMU1, "aa10", NULL),
+ _OMAP3_BALLENTRY(JTAG_RTCK, "aa12", NULL),
+ _OMAP3_BALLENTRY(JTAG_TCK, "aa13", NULL),
+ _OMAP3_BALLENTRY(JTAG_TDI, "aa20", NULL),
+ _OMAP3_BALLENTRY(JTAG_TDO, "aa19", NULL),
+ _OMAP3_BALLENTRY(JTAG_TMS_TMSC, "aa18", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_CLKR, "y21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_CLKX, "w21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_DR, "u21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_DX, "v21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_FSR, "aa21", NULL),
+ _OMAP3_BALLENTRY(MCBSP1_FSX, "k26", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_CLKX, "n21", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_DR, "r21", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_DX, "m21", NULL),
+ _OMAP3_BALLENTRY(MCBSP2_FSX, "p21", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_CLKX, "af5", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_DR, "ae6", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_DX, "af6", NULL),
+ _OMAP3_BALLENTRY(MCBSP3_FSX, "ae5", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_CLKX, "ae1", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_DR, "ad1", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_DX, "ad2", NULL),
+ _OMAP3_BALLENTRY(MCBSP4_FSX, "ac1", NULL),
+ _OMAP3_BALLENTRY(MCBSP_CLKS, "t21", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CLK, "ab3", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS0, "ac2", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS1, "ac3", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS2, "ab1", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_CS3, "ab2", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_SIMO, "ab4", NULL),
+ _OMAP3_BALLENTRY(MCSPI1_SOMI, "aa4", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CLK, "aa3", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CS0, "y4", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_CS1, "v3", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_SIMO, "y2", NULL),
+ _OMAP3_BALLENTRY(MCSPI2_SOMI, "y3", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_CLK, "n28", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_CMD, "m27", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT0, "n27", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT1, "n26", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT2, "n25", NULL),
+ _OMAP3_BALLENTRY(SDMMC1_DAT3, "p28", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_CLK, "ae2", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_CMD, "ag5", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT0, "ah5", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT1, "ah4", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT2, "ag4", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT3, "af4", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT4, "ae4", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT5, "ah3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT6, "af3", NULL),
+ _OMAP3_BALLENTRY(SDMMC2_DAT7, "ae3", NULL),
+ _OMAP3_BALLENTRY(SDRC_A0, NULL, "n22"),
+ _OMAP3_BALLENTRY(SDRC_A1, NULL, "n23"),
+ _OMAP3_BALLENTRY(SDRC_A10, NULL, "v22"),
+ _OMAP3_BALLENTRY(SDRC_A11, NULL, "v23"),
+ _OMAP3_BALLENTRY(SDRC_A12, NULL, "w22"),
+ _OMAP3_BALLENTRY(SDRC_A13, NULL, "w23"),
+ _OMAP3_BALLENTRY(SDRC_A14, NULL, "y22"),
+ _OMAP3_BALLENTRY(SDRC_A2, NULL, "p22"),
+ _OMAP3_BALLENTRY(SDRC_A3, NULL, "p23"),
+ _OMAP3_BALLENTRY(SDRC_A4, NULL, "r22"),
+ _OMAP3_BALLENTRY(SDRC_A5, NULL, "r23"),
+ _OMAP3_BALLENTRY(SDRC_A6, NULL, "t22"),
+ _OMAP3_BALLENTRY(SDRC_A7, NULL, "t23"),
+ _OMAP3_BALLENTRY(SDRC_A8, NULL, "u22"),
+ _OMAP3_BALLENTRY(SDRC_A9, NULL, "u23"),
+ _OMAP3_BALLENTRY(SDRC_BA0, "h9", "ab21"),
+ _OMAP3_BALLENTRY(SDRC_BA1, "h10", "ac21"),
+ _OMAP3_BALLENTRY(SDRC_CKE0, "h16", "j22"),
+ _OMAP3_BALLENTRY(SDRC_CKE1, "h17", "j23"),
+ _OMAP3_BALLENTRY(SDRC_CLK, "a13", "a11"),
+ _OMAP3_BALLENTRY(SDRC_D0, NULL, "j2"),
+ _OMAP3_BALLENTRY(SDRC_D1, NULL, "j1"),
+ _OMAP3_BALLENTRY(SDRC_D10, "c15", "b14"),
+ _OMAP3_BALLENTRY(SDRC_D11, "b16", "a14"),
+ _OMAP3_BALLENTRY(SDRC_D12, "d17", "b16"),
+ _OMAP3_BALLENTRY(SDRC_D13, "c17", "a16"),
+ _OMAP3_BALLENTRY(SDRC_D14, "b17", "b19"),
+ _OMAP3_BALLENTRY(SDRC_D15, "d18", "a19"),
+ _OMAP3_BALLENTRY(SDRC_D16, NULL, "b3"),
+ _OMAP3_BALLENTRY(SDRC_D17, NULL, "a3"),
+ _OMAP3_BALLENTRY(SDRC_D18, NULL, "b5"),
+ _OMAP3_BALLENTRY(SDRC_D19, NULL, "a5"),
+ _OMAP3_BALLENTRY(SDRC_D2, NULL, "g2"),
+ _OMAP3_BALLENTRY(SDRC_D20, NULL, "b8"),
+ _OMAP3_BALLENTRY(SDRC_D21, NULL, "a8"),
+ _OMAP3_BALLENTRY(SDRC_D22, NULL, "b9"),
+ _OMAP3_BALLENTRY(SDRC_D23, NULL, "a9"),
+ _OMAP3_BALLENTRY(SDRC_D24, NULL, "b21"),
+ _OMAP3_BALLENTRY(SDRC_D25, NULL, "a21"),
+ _OMAP3_BALLENTRY(SDRC_D26, NULL, "d22"),
+ _OMAP3_BALLENTRY(SDRC_D27, NULL, "d23"),
+ _OMAP3_BALLENTRY(SDRC_D28, NULL, "e22"),
+ _OMAP3_BALLENTRY(SDRC_D29, NULL, "e23"),
+ _OMAP3_BALLENTRY(SDRC_D3, NULL, "g1"),
+ _OMAP3_BALLENTRY(SDRC_D30, NULL, "g22"),
+ _OMAP3_BALLENTRY(SDRC_D31, NULL, "g23"),
+ _OMAP3_BALLENTRY(SDRC_D4, NULL, "f2"),
+ _OMAP3_BALLENTRY(SDRC_D5, NULL, "f1"),
+ _OMAP3_BALLENTRY(SDRC_D6, NULL, "d2"),
+ _OMAP3_BALLENTRY(SDRC_D7, NULL, "d1"),
+ _OMAP3_BALLENTRY(SDRC_D8, "c14", "b13"),
+ _OMAP3_BALLENTRY(SDRC_D9, "b14", "a13"),
+ _OMAP3_BALLENTRY(SDRC_DM0, NULL, "c1"),
+ _OMAP3_BALLENTRY(SDRC_DM1, "a16", "a17"),
+ _OMAP3_BALLENTRY(SDRC_DM2, NULL, "a6"),
+ _OMAP3_BALLENTRY(SDRC_DM3, NULL, "a20"),
+ _OMAP3_BALLENTRY(SDRC_DQS0, NULL, "c2"),
+ _OMAP3_BALLENTRY(SDRC_DQS1, "a17", "b17"),
+ _OMAP3_BALLENTRY(SDRC_DQS2, NULL, "b6"),
+ _OMAP3_BALLENTRY(SDRC_DQS3, NULL, "b20"),
+ _OMAP3_BALLENTRY(SDRC_NCAS, "h13", "l22"),
+ _OMAP3_BALLENTRY(SDRC_NCLK, "a14", "b11"),
+ _OMAP3_BALLENTRY(SDRC_NCS0, "h11", "m22"),
+ _OMAP3_BALLENTRY(SDRC_NCS1, "h12", "m23"),
+ _OMAP3_BALLENTRY(SDRC_NRAS, "h14", "l23"),
+ _OMAP3_BALLENTRY(SDRC_NWE, "h15", "k23"),
+ _OMAP3_BALLENTRY(SIM_CLK, "p26", NULL),
+ _OMAP3_BALLENTRY(SIM_IO, "p27", NULL),
+ _OMAP3_BALLENTRY(SIM_PWRCTRL, "r27", NULL),
+ _OMAP3_BALLENTRY(SIM_RST, "r25", NULL),
+ _OMAP3_BALLENTRY(SYS_32K, "ae25", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT0, "ah26", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT1, "ag26", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT2, "ae14", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT3, "af18", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT4, "af19", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT5, "ae21", NULL),
+ _OMAP3_BALLENTRY(SYS_BOOT6, "af21", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKOUT1, "ag25", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKOUT2, "ae22", NULL),
+ _OMAP3_BALLENTRY(SYS_CLKREQ, "af25", NULL),
+ _OMAP3_BALLENTRY(SYS_NIRQ, "af26", NULL),
+ _OMAP3_BALLENTRY(SYS_NRESWARM, "af24", NULL),
+ _OMAP3_BALLENTRY(SYS_OFF_MODE, "af22", NULL),
+ _OMAP3_BALLENTRY(UART1_CTS, "w8", NULL),
+ _OMAP3_BALLENTRY(UART1_RTS, "aa9", NULL),
+ _OMAP3_BALLENTRY(UART1_RX, "y8", NULL),
+ _OMAP3_BALLENTRY(UART1_TX, "aa8", NULL),
+ _OMAP3_BALLENTRY(UART2_CTS, "ab26", NULL),
+ _OMAP3_BALLENTRY(UART2_RTS, "ab25", NULL),
+ _OMAP3_BALLENTRY(UART2_RX, "ad25", NULL),
+ _OMAP3_BALLENTRY(UART2_TX, "aa25", NULL),
+ _OMAP3_BALLENTRY(UART3_CTS_RCTX, "h18", NULL),
+ _OMAP3_BALLENTRY(UART3_RTS_SD, "h19", NULL),
+ _OMAP3_BALLENTRY(UART3_RX_IRRX, "h20", NULL),
+ _OMAP3_BALLENTRY(UART3_TX_IRTX, "h21", NULL),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#else
+#define omap36xx_cbp_ball NULL
+#endif
+
+int __init omap3_mux_init(struct omap_board_mux *board_subset, int flags)
+{
+ struct omap_mux *package_subset;
+ struct omap_ball *package_balls;
+
+ switch (flags & OMAP_PACKAGE_MASK) {
+ case (OMAP_PACKAGE_CBC):
+ package_subset = omap3_cbc_subset;
+ package_balls = omap3_cbc_ball;
+ break;
+ case (OMAP_PACKAGE_CBB):
+ package_subset = omap3_cbb_subset;
+ package_balls = omap3_cbb_ball;
+ break;
+ case (OMAP_PACKAGE_CUS):
+ package_subset = omap3_cus_subset;
+ package_balls = omap3_cus_ball;
+ break;
+ case (OMAP_PACKAGE_CBP):
+ package_subset = omap36xx_cbp_subset;
+ package_balls = omap36xx_cbp_ball;
+ break;
+ default:
+ printk(KERN_ERR "mux: Unknown omap package, mux disabled\n");
+ return -EINVAL;
+ }
+
+ return omap_mux_init(OMAP3_CONTROL_PADCONF_MUX_PBASE,
+ OMAP3_CONTROL_PADCONF_MUX_SIZE,
+ omap3_muxmodes, package_subset, board_subset,
+ package_balls);
+}
diff --git a/arch/arm/mach-omap2/mux34xx.h b/arch/arm/mach-omap2/mux34xx.h
new file mode 100644
index 00000000000..6543ebf8ecf
--- /dev/null
+++ b/arch/arm/mach-omap2/mux34xx.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2009 Nokia
+ * Copyright (C) 2009 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define OMAP3_CONTROL_PADCONF_MUX_PBASE 0x48002030LU
+
+#define OMAP3_MUX(mode0, mux_value) \
+{ \
+ .reg_offset = (OMAP3_CONTROL_PADCONF_##mode0##_OFFSET), \
+ .value = (mux_value), \
+}
+
+/*
+ * OMAP3 CONTROL_PADCONF* register offsets for pin-muxing
+ *
+ * Extracted from the TRM. Add 0x48002030 to these values to get the
+ * absolute addresses. The name in the macro is the mode-0 name of
+ * the pin. NOTE: These registers are 16-bits wide.
+ *
+ * Note that 34XX TRM uses MMC instead of SDMMC and SAD2D instead
+ * of CHASSIS for some registers. For the defines, we follow the
+ * 36XX naming, and use SDMMC and CHASSIS.
+ */
+#define OMAP3_CONTROL_PADCONF_SDRC_D0_OFFSET 0x000
+#define OMAP3_CONTROL_PADCONF_SDRC_D1_OFFSET 0x002
+#define OMAP3_CONTROL_PADCONF_SDRC_D2_OFFSET 0x004
+#define OMAP3_CONTROL_PADCONF_SDRC_D3_OFFSET 0x006
+#define OMAP3_CONTROL_PADCONF_SDRC_D4_OFFSET 0x008
+#define OMAP3_CONTROL_PADCONF_SDRC_D5_OFFSET 0x00a
+#define OMAP3_CONTROL_PADCONF_SDRC_D6_OFFSET 0x00c
+#define OMAP3_CONTROL_PADCONF_SDRC_D7_OFFSET 0x00e
+#define OMAP3_CONTROL_PADCONF_SDRC_D8_OFFSET 0x010
+#define OMAP3_CONTROL_PADCONF_SDRC_D9_OFFSET 0x012
+#define OMAP3_CONTROL_PADCONF_SDRC_D10_OFFSET 0x014
+#define OMAP3_CONTROL_PADCONF_SDRC_D11_OFFSET 0x016
+#define OMAP3_CONTROL_PADCONF_SDRC_D12_OFFSET 0x018
+#define OMAP3_CONTROL_PADCONF_SDRC_D13_OFFSET 0x01a
+#define OMAP3_CONTROL_PADCONF_SDRC_D14_OFFSET 0x01c
+#define OMAP3_CONTROL_PADCONF_SDRC_D15_OFFSET 0x01e
+#define OMAP3_CONTROL_PADCONF_SDRC_D16_OFFSET 0x020
+#define OMAP3_CONTROL_PADCONF_SDRC_D17_OFFSET 0x022
+#define OMAP3_CONTROL_PADCONF_SDRC_D18_OFFSET 0x024
+#define OMAP3_CONTROL_PADCONF_SDRC_D19_OFFSET 0x026
+#define OMAP3_CONTROL_PADCONF_SDRC_D20_OFFSET 0x028
+#define OMAP3_CONTROL_PADCONF_SDRC_D21_OFFSET 0x02a
+#define OMAP3_CONTROL_PADCONF_SDRC_D22_OFFSET 0x02c
+#define OMAP3_CONTROL_PADCONF_SDRC_D23_OFFSET 0x02e
+#define OMAP3_CONTROL_PADCONF_SDRC_D24_OFFSET 0x030
+#define OMAP3_CONTROL_PADCONF_SDRC_D25_OFFSET 0x032
+#define OMAP3_CONTROL_PADCONF_SDRC_D26_OFFSET 0x034
+#define OMAP3_CONTROL_PADCONF_SDRC_D27_OFFSET 0x036
+#define OMAP3_CONTROL_PADCONF_SDRC_D28_OFFSET 0x038
+#define OMAP3_CONTROL_PADCONF_SDRC_D29_OFFSET 0x03a
+#define OMAP3_CONTROL_PADCONF_SDRC_D30_OFFSET 0x03c
+#define OMAP3_CONTROL_PADCONF_SDRC_D31_OFFSET 0x03e
+#define OMAP3_CONTROL_PADCONF_SDRC_CLK_OFFSET 0x040
+#define OMAP3_CONTROL_PADCONF_SDRC_DQS0_OFFSET 0x042
+#define OMAP3_CONTROL_PADCONF_SDRC_DQS1_OFFSET 0x044
+#define OMAP3_CONTROL_PADCONF_SDRC_DQS2_OFFSET 0x046
+#define OMAP3_CONTROL_PADCONF_SDRC_DQS3_OFFSET 0x048
+#define OMAP3_CONTROL_PADCONF_GPMC_A1_OFFSET 0x04a
+#define OMAP3_CONTROL_PADCONF_GPMC_A2_OFFSET 0x04c
+#define OMAP3_CONTROL_PADCONF_GPMC_A3_OFFSET 0x04e
+#define OMAP3_CONTROL_PADCONF_GPMC_A4_OFFSET 0x050
+#define OMAP3_CONTROL_PADCONF_GPMC_A5_OFFSET 0x052
+#define OMAP3_CONTROL_PADCONF_GPMC_A6_OFFSET 0x054
+#define OMAP3_CONTROL_PADCONF_GPMC_A7_OFFSET 0x056
+#define OMAP3_CONTROL_PADCONF_GPMC_A8_OFFSET 0x058
+#define OMAP3_CONTROL_PADCONF_GPMC_A9_OFFSET 0x05a
+#define OMAP3_CONTROL_PADCONF_GPMC_A10_OFFSET 0x05c
+#define OMAP3_CONTROL_PADCONF_GPMC_D0_OFFSET 0x05e
+#define OMAP3_CONTROL_PADCONF_GPMC_D1_OFFSET 0x060
+#define OMAP3_CONTROL_PADCONF_GPMC_D2_OFFSET 0x062
+#define OMAP3_CONTROL_PADCONF_GPMC_D3_OFFSET 0x064
+#define OMAP3_CONTROL_PADCONF_GPMC_D4_OFFSET 0x066
+#define OMAP3_CONTROL_PADCONF_GPMC_D5_OFFSET 0x068
+#define OMAP3_CONTROL_PADCONF_GPMC_D6_OFFSET 0x06a
+#define OMAP3_CONTROL_PADCONF_GPMC_D7_OFFSET 0x06c
+#define OMAP3_CONTROL_PADCONF_GPMC_D8_OFFSET 0x06e
+#define OMAP3_CONTROL_PADCONF_GPMC_D9_OFFSET 0x070
+#define OMAP3_CONTROL_PADCONF_GPMC_D10_OFFSET 0x072
+#define OMAP3_CONTROL_PADCONF_GPMC_D11_OFFSET 0x074
+#define OMAP3_CONTROL_PADCONF_GPMC_D12_OFFSET 0x076
+#define OMAP3_CONTROL_PADCONF_GPMC_D13_OFFSET 0x078
+#define OMAP3_CONTROL_PADCONF_GPMC_D14_OFFSET 0x07a
+#define OMAP3_CONTROL_PADCONF_GPMC_D15_OFFSET 0x07c
+#define OMAP3_CONTROL_PADCONF_GPMC_NCS0_OFFSET 0x07e
+#define OMAP3_CONTROL_PADCONF_GPMC_NCS1_OFFSET 0x080
+#define OMAP3_CONTROL_PADCONF_GPMC_NCS2_OFFSET 0x082
+#define OMAP3_CONTROL_PADCONF_GPMC_NCS3_OFFSET 0x084
+#define OMAP3_CONTROL_PADCONF_GPMC_NCS4_OFFSET 0x086
+#define OMAP3_CONTROL_PADCONF_GPMC_NCS5_OFFSET 0x088
+#define OMAP3_CONTROL_PADCONF_GPMC_NCS6_OFFSET 0x08a
+#define OMAP3_CONTROL_PADCONF_GPMC_NCS7_OFFSET 0x08c
+#define OMAP3_CONTROL_PADCONF_GPMC_CLK_OFFSET 0x08e
+#define OMAP3_CONTROL_PADCONF_GPMC_NADV_ALE_OFFSET 0x090
+#define OMAP3_CONTROL_PADCONF_GPMC_NOE_OFFSET 0x092
+#define OMAP3_CONTROL_PADCONF_GPMC_NWE_OFFSET 0x094
+#define OMAP3_CONTROL_PADCONF_GPMC_NBE0_CLE_OFFSET 0x096
+#define OMAP3_CONTROL_PADCONF_GPMC_NBE1_OFFSET 0x098
+#define OMAP3_CONTROL_PADCONF_GPMC_NWP_OFFSET 0x09a
+#define OMAP3_CONTROL_PADCONF_GPMC_WAIT0_OFFSET 0x09c
+#define OMAP3_CONTROL_PADCONF_GPMC_WAIT1_OFFSET 0x09e
+#define OMAP3_CONTROL_PADCONF_GPMC_WAIT2_OFFSET 0x0a0
+#define OMAP3_CONTROL_PADCONF_GPMC_WAIT3_OFFSET 0x0a2
+#define OMAP3_CONTROL_PADCONF_DSS_PCLK_OFFSET 0x0a4
+#define OMAP3_CONTROL_PADCONF_DSS_HSYNC_OFFSET 0x0a6
+#define OMAP3_CONTROL_PADCONF_DSS_VSYNC_OFFSET 0x0a8
+#define OMAP3_CONTROL_PADCONF_DSS_ACBIAS_OFFSET 0x0aa
+#define OMAP3_CONTROL_PADCONF_DSS_DATA0_OFFSET 0x0ac
+#define OMAP3_CONTROL_PADCONF_DSS_DATA1_OFFSET 0x0ae
+#define OMAP3_CONTROL_PADCONF_DSS_DATA2_OFFSET 0x0b0
+#define OMAP3_CONTROL_PADCONF_DSS_DATA3_OFFSET 0x0b2
+#define OMAP3_CONTROL_PADCONF_DSS_DATA4_OFFSET 0x0b4
+#define OMAP3_CONTROL_PADCONF_DSS_DATA5_OFFSET 0x0b6
+#define OMAP3_CONTROL_PADCONF_DSS_DATA6_OFFSET 0x0b8
+#define OMAP3_CONTROL_PADCONF_DSS_DATA7_OFFSET 0x0ba
+#define OMAP3_CONTROL_PADCONF_DSS_DATA8_OFFSET 0x0bc
+#define OMAP3_CONTROL_PADCONF_DSS_DATA9_OFFSET 0x0be
+#define OMAP3_CONTROL_PADCONF_DSS_DATA10_OFFSET 0x0c0
+#define OMAP3_CONTROL_PADCONF_DSS_DATA11_OFFSET 0x0c2
+#define OMAP3_CONTROL_PADCONF_DSS_DATA12_OFFSET 0x0c4
+#define OMAP3_CONTROL_PADCONF_DSS_DATA13_OFFSET 0x0c6
+#define OMAP3_CONTROL_PADCONF_DSS_DATA14_OFFSET 0x0c8
+#define OMAP3_CONTROL_PADCONF_DSS_DATA15_OFFSET 0x0ca
+#define OMAP3_CONTROL_PADCONF_DSS_DATA16_OFFSET 0x0cc
+#define OMAP3_CONTROL_PADCONF_DSS_DATA17_OFFSET 0x0ce
+#define OMAP3_CONTROL_PADCONF_DSS_DATA18_OFFSET 0x0d0
+#define OMAP3_CONTROL_PADCONF_DSS_DATA19_OFFSET 0x0d2
+#define OMAP3_CONTROL_PADCONF_DSS_DATA20_OFFSET 0x0d4
+#define OMAP3_CONTROL_PADCONF_DSS_DATA21_OFFSET 0x0d6
+#define OMAP3_CONTROL_PADCONF_DSS_DATA22_OFFSET 0x0d8
+#define OMAP3_CONTROL_PADCONF_DSS_DATA23_OFFSET 0x0da
+#define OMAP3_CONTROL_PADCONF_CAM_HS_OFFSET 0x0dc
+#define OMAP3_CONTROL_PADCONF_CAM_VS_OFFSET 0x0de
+#define OMAP3_CONTROL_PADCONF_CAM_XCLKA_OFFSET 0x0e0
+#define OMAP3_CONTROL_PADCONF_CAM_PCLK_OFFSET 0x0e2
+#define OMAP3_CONTROL_PADCONF_CAM_FLD_OFFSET 0x0e4
+#define OMAP3_CONTROL_PADCONF_CAM_D0_OFFSET 0x0e6
+#define OMAP3_CONTROL_PADCONF_CAM_D1_OFFSET 0x0e8
+#define OMAP3_CONTROL_PADCONF_CAM_D2_OFFSET 0x0ea
+#define OMAP3_CONTROL_PADCONF_CAM_D3_OFFSET 0x0ec
+#define OMAP3_CONTROL_PADCONF_CAM_D4_OFFSET 0x0ee
+#define OMAP3_CONTROL_PADCONF_CAM_D5_OFFSET 0x0f0
+#define OMAP3_CONTROL_PADCONF_CAM_D6_OFFSET 0x0f2
+#define OMAP3_CONTROL_PADCONF_CAM_D7_OFFSET 0x0f4
+#define OMAP3_CONTROL_PADCONF_CAM_D8_OFFSET 0x0f6
+#define OMAP3_CONTROL_PADCONF_CAM_D9_OFFSET 0x0f8
+#define OMAP3_CONTROL_PADCONF_CAM_D10_OFFSET 0x0fa
+#define OMAP3_CONTROL_PADCONF_CAM_D11_OFFSET 0x0fc
+#define OMAP3_CONTROL_PADCONF_CAM_XCLKB_OFFSET 0x0fe
+#define OMAP3_CONTROL_PADCONF_CAM_WEN_OFFSET 0x100
+#define OMAP3_CONTROL_PADCONF_CAM_STROBE_OFFSET 0x102
+#define OMAP3_CONTROL_PADCONF_CSI2_DX0_OFFSET 0x104
+#define OMAP3_CONTROL_PADCONF_CSI2_DY0_OFFSET 0x106
+#define OMAP3_CONTROL_PADCONF_CSI2_DX1_OFFSET 0x108
+#define OMAP3_CONTROL_PADCONF_CSI2_DY1_OFFSET 0x10a
+#define OMAP3_CONTROL_PADCONF_MCBSP2_FSX_OFFSET 0x10c
+#define OMAP3_CONTROL_PADCONF_MCBSP2_CLKX_OFFSET 0x10e
+#define OMAP3_CONTROL_PADCONF_MCBSP2_DR_OFFSET 0x110
+#define OMAP3_CONTROL_PADCONF_MCBSP2_DX_OFFSET 0x112
+#define OMAP3_CONTROL_PADCONF_SDMMC1_CLK_OFFSET 0x114
+#define OMAP3_CONTROL_PADCONF_SDMMC1_CMD_OFFSET 0x116
+#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT0_OFFSET 0x118
+#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT1_OFFSET 0x11a
+#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT2_OFFSET 0x11c
+#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT3_OFFSET 0x11e
+
+/* SDMMC1_DAT4 - DAT7 are SIM_IO SIM_CLK SIM_PWRCTRL and SIM_RST on 36xx */
+#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT4_OFFSET 0x120
+#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT5_OFFSET 0x122
+#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT6_OFFSET 0x124
+#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT7_OFFSET 0x126
+
+#define OMAP3_CONTROL_PADCONF_SDMMC2_CLK_OFFSET 0x128
+#define OMAP3_CONTROL_PADCONF_SDMMC2_CMD_OFFSET 0x12a
+#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT0_OFFSET 0x12c
+#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT1_OFFSET 0x12e
+#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT2_OFFSET 0x130
+#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT3_OFFSET 0x132
+#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT4_OFFSET 0x134
+#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT5_OFFSET 0x136
+#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT6_OFFSET 0x138
+#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT7_OFFSET 0x13a
+#define OMAP3_CONTROL_PADCONF_MCBSP3_DX_OFFSET 0x13c
+#define OMAP3_CONTROL_PADCONF_MCBSP3_DR_OFFSET 0x13e
+#define OMAP3_CONTROL_PADCONF_MCBSP3_CLKX_OFFSET 0x140
+#define OMAP3_CONTROL_PADCONF_MCBSP3_FSX_OFFSET 0x142
+#define OMAP3_CONTROL_PADCONF_UART2_CTS_OFFSET 0x144
+#define OMAP3_CONTROL_PADCONF_UART2_RTS_OFFSET 0x146
+#define OMAP3_CONTROL_PADCONF_UART2_TX_OFFSET 0x148
+#define OMAP3_CONTROL_PADCONF_UART2_RX_OFFSET 0x14a
+#define OMAP3_CONTROL_PADCONF_UART1_TX_OFFSET 0x14c
+#define OMAP3_CONTROL_PADCONF_UART1_RTS_OFFSET 0x14e
+#define OMAP3_CONTROL_PADCONF_UART1_CTS_OFFSET 0x150
+#define OMAP3_CONTROL_PADCONF_UART1_RX_OFFSET 0x152
+#define OMAP3_CONTROL_PADCONF_MCBSP4_CLKX_OFFSET 0x154
+#define OMAP3_CONTROL_PADCONF_MCBSP4_DR_OFFSET 0x156
+#define OMAP3_CONTROL_PADCONF_MCBSP4_DX_OFFSET 0x158
+#define OMAP3_CONTROL_PADCONF_MCBSP4_FSX_OFFSET 0x15a
+#define OMAP3_CONTROL_PADCONF_MCBSP1_CLKR_OFFSET 0x15c
+#define OMAP3_CONTROL_PADCONF_MCBSP1_FSR_OFFSET 0x15e
+#define OMAP3_CONTROL_PADCONF_MCBSP1_DX_OFFSET 0x160
+#define OMAP3_CONTROL_PADCONF_MCBSP1_DR_OFFSET 0x162
+#define OMAP3_CONTROL_PADCONF_MCBSP_CLKS_OFFSET 0x164
+#define OMAP3_CONTROL_PADCONF_MCBSP1_FSX_OFFSET 0x166
+#define OMAP3_CONTROL_PADCONF_MCBSP1_CLKX_OFFSET 0x168
+#define OMAP3_CONTROL_PADCONF_UART3_CTS_RCTX_OFFSET 0x16a
+#define OMAP3_CONTROL_PADCONF_UART3_RTS_SD_OFFSET 0x16c
+#define OMAP3_CONTROL_PADCONF_UART3_RX_IRRX_OFFSET 0x16e
+#define OMAP3_CONTROL_PADCONF_UART3_TX_IRTX_OFFSET 0x170
+#define OMAP3_CONTROL_PADCONF_HSUSB0_CLK_OFFSET 0x172
+#define OMAP3_CONTROL_PADCONF_HSUSB0_STP_OFFSET 0x174
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DIR_OFFSET 0x176
+#define OMAP3_CONTROL_PADCONF_HSUSB0_NXT_OFFSET 0x178
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA0_OFFSET 0x17a
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA1_OFFSET 0x17c
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA2_OFFSET 0x17e
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA3_OFFSET 0x180
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA4_OFFSET 0x182
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA5_OFFSET 0x184
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA6_OFFSET 0x186
+#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA7_OFFSET 0x188
+#define OMAP3_CONTROL_PADCONF_I2C1_SCL_OFFSET 0x18a
+#define OMAP3_CONTROL_PADCONF_I2C1_SDA_OFFSET 0x18c
+#define OMAP3_CONTROL_PADCONF_I2C2_SCL_OFFSET 0x18e
+#define OMAP3_CONTROL_PADCONF_I2C2_SDA_OFFSET 0x190
+#define OMAP3_CONTROL_PADCONF_I2C3_SCL_OFFSET 0x192
+#define OMAP3_CONTROL_PADCONF_I2C3_SDA_OFFSET 0x194
+#define OMAP3_CONTROL_PADCONF_HDQ_SIO_OFFSET 0x196
+#define OMAP3_CONTROL_PADCONF_MCSPI1_CLK_OFFSET 0x198
+#define OMAP3_CONTROL_PADCONF_MCSPI1_SIMO_OFFSET 0x19a
+#define OMAP3_CONTROL_PADCONF_MCSPI1_SOMI_OFFSET 0x19c
+#define OMAP3_CONTROL_PADCONF_MCSPI1_CS0_OFFSET 0x19e
+#define OMAP3_CONTROL_PADCONF_MCSPI1_CS1_OFFSET 0x1a0
+#define OMAP3_CONTROL_PADCONF_MCSPI1_CS2_OFFSET 0x1a2
+#define OMAP3_CONTROL_PADCONF_MCSPI1_CS3_OFFSET 0x1a4
+#define OMAP3_CONTROL_PADCONF_MCSPI2_CLK_OFFSET 0x1a6
+#define OMAP3_CONTROL_PADCONF_MCSPI2_SIMO_OFFSET 0x1a8
+#define OMAP3_CONTROL_PADCONF_MCSPI2_SOMI_OFFSET 0x1aa
+#define OMAP3_CONTROL_PADCONF_MCSPI2_CS0_OFFSET 0x1ac
+#define OMAP3_CONTROL_PADCONF_MCSPI2_CS1_OFFSET 0x1ae
+#define OMAP3_CONTROL_PADCONF_SYS_NIRQ_OFFSET 0x1b0
+#define OMAP3_CONTROL_PADCONF_SYS_CLKOUT2_OFFSET 0x1b2
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD0_OFFSET 0x1b4
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD1_OFFSET 0x1b6
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD2_OFFSET 0x1b8
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD3_OFFSET 0x1ba
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD4_OFFSET 0x1bc
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD5_OFFSET 0x1be
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD6_OFFSET 0x1c0
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD7_OFFSET 0x1c2
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD8_OFFSET 0x1c4
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD9_OFFSET 0x1c6
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD10_OFFSET 0x1c8
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD11_OFFSET 0x1ca
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD12_OFFSET 0x1cc
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD13_OFFSET 0x1ce
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD14_OFFSET 0x1d0
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD15_OFFSET 0x1d2
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD16_OFFSET 0x1d4
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD17_OFFSET 0x1d6
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD18_OFFSET 0x1d8
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD19_OFFSET 0x1da
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD20_OFFSET 0x1dc
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD21_OFFSET 0x1de
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD22_OFFSET 0x1e0
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD23_OFFSET 0x1e2
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD24_OFFSET 0x1e4
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD25_OFFSET 0x1e6
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD26_OFFSET 0x1e8
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD27_OFFSET 0x1ea
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD28_OFFSET 0x1ec
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD29_OFFSET 0x1ee
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD30_OFFSET 0x1f0
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD31_OFFSET 0x1f2
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD32_OFFSET 0x1f4
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD33_OFFSET 0x1f6
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD34_OFFSET 0x1f8
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD35_OFFSET 0x1fa
+#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD36_OFFSET 0x1fc
+
+/* Note that 34xx TRM has SAD2D instead of CHASSIS for these */
+#define OMAP3_CONTROL_PADCONF_CHASSIS_CLK26MI_OFFSET 0x1fe
+#define OMAP3_CONTROL_PADCONF_CHASSIS_NRESPWRON_OFFSET 0x200
+#define OMAP3_CONTROL_PADCONF_CHASSIS_NRESWARW_OFFSET 0x202
+#define OMAP3_CONTROL_PADCONF_CHASSIS_NIRQ_OFFSET 0x204
+#define OMAP3_CONTROL_PADCONF_CHASSIS_FIQ_OFFSET 0x206
+#define OMAP3_CONTROL_PADCONF_CHASSIS_ARMIRQ_OFFSET 0x208
+#define OMAP3_CONTROL_PADCONF_CHASSIS_IVAIRQ_OFFSET 0x20a
+#define OMAP3_CONTROL_PADCONF_CHASSIS_DMAREQ0_OFFSET 0x20c
+#define OMAP3_CONTROL_PADCONF_CHASSIS_DMAREQ1_OFFSET 0x20e
+#define OMAP3_CONTROL_PADCONF_CHASSIS_DMAREQ2_OFFSET 0x210
+#define OMAP3_CONTROL_PADCONF_CHASSIS_DMAREQ3_OFFSET 0x212
+#define OMAP3_CONTROL_PADCONF_CHASSIS_NTRST_OFFSET 0x214
+#define OMAP3_CONTROL_PADCONF_CHASSIS_TDI_OFFSET 0x216
+#define OMAP3_CONTROL_PADCONF_CHASSIS_TDO_OFFSET 0x218
+#define OMAP3_CONTROL_PADCONF_CHASSIS_TMS_OFFSET 0x21a
+#define OMAP3_CONTROL_PADCONF_CHASSIS_TCK_OFFSET 0x21c
+#define OMAP3_CONTROL_PADCONF_CHASSIS_RTCK_OFFSET 0x21e
+#define OMAP3_CONTROL_PADCONF_CHASSIS_MSTDBY_OFFSET 0x220
+#define OMAP3_CONTROL_PADCONF_CHASSIS_IDLEREQ_OFFSET 0x222
+#define OMAP3_CONTROL_PADCONF_CHASSIS_IDLEACK_OFFSET 0x224
+
+#define OMAP3_CONTROL_PADCONF_SAD2D_MWRITE_OFFSET 0x226
+#define OMAP3_CONTROL_PADCONF_SAD2D_SWRITE_OFFSET 0x228
+#define OMAP3_CONTROL_PADCONF_SAD2D_MREAD_OFFSET 0x22a
+#define OMAP3_CONTROL_PADCONF_SAD2D_SREAD_OFFSET 0x22c
+#define OMAP3_CONTROL_PADCONF_SAD2D_MBUSFLAG_OFFSET 0x22e
+#define OMAP3_CONTROL_PADCONF_SAD2D_SBUSFLAG_OFFSET 0x230
+#define OMAP3_CONTROL_PADCONF_SDRC_CKE0_OFFSET 0x232
+#define OMAP3_CONTROL_PADCONF_SDRC_CKE1_OFFSET 0x234
+
+/* 36xx only */
+#define OMAP3_CONTROL_PADCONF_GPMC_A11_OFFSET 0x236
+#define OMAP3_CONTROL_PADCONF_SDRC_BA0_OFFSET 0x570
+#define OMAP3_CONTROL_PADCONF_SDRC_BA1_OFFSET 0x572
+#define OMAP3_CONTROL_PADCONF_SDRC_A0_OFFSET 0x574
+#define OMAP3_CONTROL_PADCONF_SDRC_A1_OFFSET 0x576
+#define OMAP3_CONTROL_PADCONF_SDRC_A2_OFFSET 0x578
+#define OMAP3_CONTROL_PADCONF_SDRC_A3_OFFSET 0x57a
+#define OMAP3_CONTROL_PADCONF_SDRC_A4_OFFSET 0x57c
+#define OMAP3_CONTROL_PADCONF_SDRC_A5_OFFSET 0x57e
+#define OMAP3_CONTROL_PADCONF_SDRC_A6_OFFSET 0x580
+#define OMAP3_CONTROL_PADCONF_SDRC_A7_OFFSET 0x582
+#define OMAP3_CONTROL_PADCONF_SDRC_A8_OFFSET 0x584
+#define OMAP3_CONTROL_PADCONF_SDRC_A9_OFFSET 0x586
+#define OMAP3_CONTROL_PADCONF_SDRC_A10_OFFSET 0x588
+#define OMAP3_CONTROL_PADCONF_SDRC_A11_OFFSET 0x58a
+#define OMAP3_CONTROL_PADCONF_SDRC_A12_OFFSET 0x58c
+#define OMAP3_CONTROL_PADCONF_SDRC_A13_OFFSET 0x58e
+#define OMAP3_CONTROL_PADCONF_SDRC_A14_OFFSET 0x590
+#define OMAP3_CONTROL_PADCONF_SDRC_NCS0_OFFSET 0x592
+#define OMAP3_CONTROL_PADCONF_SDRC_NCS1_OFFSET 0x594
+#define OMAP3_CONTROL_PADCONF_SDRC_NCLK_OFFSET 0x596
+#define OMAP3_CONTROL_PADCONF_SDRC_NRAS_OFFSET 0x598
+#define OMAP3_CONTROL_PADCONF_SDRC_NCAS_OFFSET 0x59a
+#define OMAP3_CONTROL_PADCONF_SDRC_NWE_OFFSET 0x59c
+#define OMAP3_CONTROL_PADCONF_SDRC_DM0_OFFSET 0x59e
+#define OMAP3_CONTROL_PADCONF_SDRC_DM1_OFFSET 0x5a0
+#define OMAP3_CONTROL_PADCONF_SDRC_DM2_OFFSET 0x5a2
+#define OMAP3_CONTROL_PADCONF_SDRC_DM3_OFFSET 0x5a4
+
+/* 36xx only, these are SDMMC1_DAT4 - DAT7 on 34xx */
+#define OMAP3_CONTROL_PADCONF_SIM_IO_OFFSET 0x120
+#define OMAP3_CONTROL_PADCONF_SIM_CLK_OFFSET 0x122
+#define OMAP3_CONTROL_PADCONF_SIM_PWRCTRL_OFFSET 0x124
+#define OMAP3_CONTROL_PADCONF_SIM_RST_OFFSET 0x126
+
+#define OMAP3_CONTROL_PADCONF_ETK_CLK_OFFSET 0x5a8
+#define OMAP3_CONTROL_PADCONF_ETK_CTL_OFFSET 0x5aa
+#define OMAP3_CONTROL_PADCONF_ETK_D0_OFFSET 0x5ac
+#define OMAP3_CONTROL_PADCONF_ETK_D1_OFFSET 0x5ae
+#define OMAP3_CONTROL_PADCONF_ETK_D2_OFFSET 0x5b0
+#define OMAP3_CONTROL_PADCONF_ETK_D3_OFFSET 0x5b2
+#define OMAP3_CONTROL_PADCONF_ETK_D4_OFFSET 0x5b4
+#define OMAP3_CONTROL_PADCONF_ETK_D5_OFFSET 0x5b6
+#define OMAP3_CONTROL_PADCONF_ETK_D6_OFFSET 0x5b8
+#define OMAP3_CONTROL_PADCONF_ETK_D7_OFFSET 0x5ba
+#define OMAP3_CONTROL_PADCONF_ETK_D8_OFFSET 0x5bc
+#define OMAP3_CONTROL_PADCONF_ETK_D9_OFFSET 0x5be
+#define OMAP3_CONTROL_PADCONF_ETK_D10_OFFSET 0x5c0
+#define OMAP3_CONTROL_PADCONF_ETK_D11_OFFSET 0x5c2
+#define OMAP3_CONTROL_PADCONF_ETK_D12_OFFSET 0x5c4
+#define OMAP3_CONTROL_PADCONF_ETK_D13_OFFSET 0x5c6
+#define OMAP3_CONTROL_PADCONF_ETK_D14_OFFSET 0x5c8
+#define OMAP3_CONTROL_PADCONF_ETK_D15_OFFSET 0x5ca
+#define OMAP3_CONTROL_PADCONF_I2C4_SCL_OFFSET 0x9d0
+#define OMAP3_CONTROL_PADCONF_I2C4_SDA_OFFSET 0x9d2
+#define OMAP3_CONTROL_PADCONF_SYS_32K_OFFSET 0x9d4
+#define OMAP3_CONTROL_PADCONF_SYS_CLKREQ_OFFSET 0x9d6
+#define OMAP3_CONTROL_PADCONF_SYS_NRESWARM_OFFSET 0x9d8
+#define OMAP3_CONTROL_PADCONF_SYS_BOOT0_OFFSET 0x9da
+#define OMAP3_CONTROL_PADCONF_SYS_BOOT1_OFFSET 0x9dc
+#define OMAP3_CONTROL_PADCONF_SYS_BOOT2_OFFSET 0x9de
+#define OMAP3_CONTROL_PADCONF_SYS_BOOT3_OFFSET 0x9e0
+#define OMAP3_CONTROL_PADCONF_SYS_BOOT4_OFFSET 0x9e2
+#define OMAP3_CONTROL_PADCONF_SYS_BOOT5_OFFSET 0x9e4
+#define OMAP3_CONTROL_PADCONF_SYS_BOOT6_OFFSET 0x9e6
+#define OMAP3_CONTROL_PADCONF_SYS_OFF_MODE_OFFSET 0x9e8
+#define OMAP3_CONTROL_PADCONF_SYS_CLKOUT1_OFFSET 0x9ea
+#define OMAP3_CONTROL_PADCONF_JTAG_NTRST_OFFSET 0x9ec
+#define OMAP3_CONTROL_PADCONF_JTAG_TCK_OFFSET 0x9ee
+#define OMAP3_CONTROL_PADCONF_JTAG_TMS_TMSC_OFFSET 0x9f0
+#define OMAP3_CONTROL_PADCONF_JTAG_TDI_OFFSET 0x9f2
+#define OMAP3_CONTROL_PADCONF_JTAG_EMU0_OFFSET 0x9f4
+#define OMAP3_CONTROL_PADCONF_JTAG_EMU1_OFFSET 0x9f6
+#define OMAP3_CONTROL_PADCONF_SAD2D_SWAKEUP_OFFSET 0xa1c
+#define OMAP3_CONTROL_PADCONF_JTAG_RTCK_OFFSET 0xa1e
+#define OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET 0xa20
+
+#define OMAP3_CONTROL_PADCONF_MUX_SIZE \
+ (OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET + 0x2)
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4afadba0947..aa3f65c2ac9 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -27,20 +27,39 @@
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
- * The primary core will update the this flag using a hardware
- * register AuxCoreBoot1.
+ * The primary core will update this flag using a hardware
+ * register AuxCoreBoot0.
*/
ENTRY(omap_secondary_startup)
- mrc p15, 0, r0, c0, c0, 5
- and r0, r0, #0x0f
-hold: ldr r1, =OMAP4_AUX_CORE_BOOT1_PA @ read from AuxCoreBoot1
- ldr r2, [r1]
- cmp r2, r0
+hold: ldr r12,=0x103
+ dsb
+ smc @ read from AuxCoreBoot0
+ mov r0, r0, lsr #9
+ mrc p15, 0, r4, c0, c0, 5
+ and r4, r4, #0x0f
+ cmp r0, r4
bne hold
/*
- * we've been released from the cpu_release,secondary_stack
+ * we've been released from the wait loop,secondary_stack
* should now contain the SVC stack for this core
*/
b secondary_startup
+END(omap_secondary_startup)
+
+ENTRY(omap_modify_auxcoreboot0)
+ stmfd sp!, {r1-r12, lr}
+ ldr r12, =0x104
+ dsb
+ smc
+ ldmfd sp!, {r1-r12, pc}
+END(omap_modify_auxcoreboot0)
+
+ENTRY(omap_auxcoreboot_addr)
+ stmfd sp!, {r2-r12, lr}
+ ldr r12, =0x105
+ dsb
+ smc
+ ldmfd sp!, {r2-r12, pc}
+END(omap_auxcoreboot_addr)
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 4890bcf4dad..38153e5fbca 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -17,19 +17,15 @@
*/
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
+#include <asm/cacheflush.h>
#include <asm/localtimer.h>
#include <asm/smp_scu.h>
#include <mach/hardware.h>
#include <plat/common.h>
-/* Registers used for communicating startup information */
-static void __iomem *omap4_auxcoreboot_reg0;
-static void __iomem *omap4_auxcoreboot_reg1;
-
/* SCU base address */
static void __iomem *scu_base;
@@ -65,8 +61,6 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
-
/*
* Set synchronisation state between this boot processor
* and the secondary one
@@ -74,18 +68,15 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
spin_lock(&boot_lock);
/*
- * Update the AuxCoreBoot1 with boot state for secondary core.
+ * Update the AuxCoreBoot0 with boot state for secondary core.
* omap_secondary_startup() routine will hold the secondary core till
* the AuxCoreBoot1 register is updated with cpu state
* A barrier is added to ensure that write buffer is drained
*/
- __raw_writel(cpu, omap4_auxcoreboot_reg1);
+ omap_modify_auxcoreboot0(0x200, 0x0);
+ flush_cache_all();
smp_wmb();
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout))
- ;
-
/*
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
@@ -99,17 +90,18 @@ static void __init wakeup_secondary(void)
{
/*
* Write the address of secondary startup routine into the
- * AuxCoreBoot0 where ROM code will jump and start executing
+ * AuxCoreBoot1 where ROM code will jump and start executing
* on secondary core once out of WFE
* A barrier is added to ensure that write buffer is drained
*/
- __raw_writel(virt_to_phys(omap_secondary_startup), \
- omap4_auxcoreboot_reg0);
+ omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup));
smp_wmb();
/*
* Send a 'sev' to wake the secondary core from WFE.
+ * Drain the outstanding writes to memory
*/
+ dsb();
set_event();
mb();
}
@@ -136,7 +128,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int ncores = get_core_count();
unsigned int cpu = smp_processor_id();
- void __iomem *omap4_wkupgen_base;
int i;
/* sanity check */
@@ -168,12 +159,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
- /* Never released */
- omap4_wkupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K);
- BUG_ON(!omap4_wkupgen_base);
- omap4_auxcoreboot_reg0 = omap4_wkupgen_base + 0x800;
- omap4_auxcoreboot_reg1 = omap4_wkupgen_base + 0x804;
-
if (max_cpus > 1) {
/*
* Enable the local timer or broadcast device for the
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 633b216a8b2..d8c8545875b 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -45,6 +45,7 @@
#include <linux/mutex.h>
#include <linux/bootmem.h>
+#include <plat/common.h>
#include <plat/cpu.h>
#include <plat/clockdomain.h>
#include <plat/powerdomain.h>
@@ -210,6 +211,32 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
}
/**
+ * _set_module_autoidle: set the OCP_SYSCONFIG AUTOIDLE field in @v
+ * @oh: struct omap_hwmod *
+ * @autoidle: desired AUTOIDLE bitfield value (0 or 1)
+ * @v: pointer to register contents to modify
+ *
+ * Update the module autoidle bit in @v to be @autoidle for the @oh
+ * hwmod. The autoidle bit controls whether the module can gate
+ * internal clocks automatically when it isn't doing anything; the
+ * exact function of this bit varies on a per-module basis. This
+ * function does not write to the hardware. Returns -EINVAL upon
+ * error or 0 upon success.
+ */
+static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle,
+ u32 *v)
+{
+ if (!oh->sysconfig ||
+ !(oh->sysconfig->sysc_flags & SYSC_HAS_AUTOIDLE))
+ return -EINVAL;
+
+ *v &= ~SYSC_AUTOIDLE_MASK;
+ *v |= autoidle << SYSC_AUTOIDLE_SHIFT;
+
+ return 0;
+}
+
+/**
* _enable_wakeup: set OCP_SYSCONFIG.ENAWAKEUP bit in the hardware
* @oh: struct omap_hwmod *
*
@@ -326,6 +353,9 @@ static int _init_main_clk(struct omap_hwmod *oh)
ret = -EINVAL;
oh->_clk = c;
+ WARN(!c->clkdm, "omap_hwmod: %s: missing clockdomain for %s.\n",
+ oh->clkdev_con_id, c->name);
+
return ret;
}
@@ -557,8 +587,19 @@ static void _sysc_enable(struct omap_hwmod *oh)
_set_master_standbymode(oh, idlemode, &v);
}
- /* XXX OCP AUTOIDLE bit? */
+ if (oh->sysconfig->sysc_flags & SYSC_HAS_AUTOIDLE) {
+ idlemode = (oh->flags & HWMOD_NO_OCP_AUTOIDLE) ?
+ 0 : 1;
+ _set_module_autoidle(oh, idlemode, &v);
+ }
+
+ /* XXX OCP ENAWAKEUP bit? */
+ /*
+ * XXX The clock framework should handle this, by
+ * calling into this code. But this must wait until the
+ * clock structures are tagged with omap_hwmod entries
+ */
if (oh->flags & HWMOD_SET_DEFAULT_CLOCKACT &&
oh->sysconfig->sysc_flags & SYSC_HAS_CLOCKACTIVITY)
_set_clockactivity(oh, oh->sysconfig->clockact, &v);
@@ -622,7 +663,8 @@ static void _sysc_shutdown(struct omap_hwmod *oh)
if (oh->sysconfig->sysc_flags & SYSC_HAS_MIDLEMODE)
_set_master_standbymode(oh, HWMOD_IDLEMODE_FORCE, &v);
- /* XXX clear OCP AUTOIDLE bit? */
+ if (oh->sysconfig->sysc_flags & SYSC_HAS_AUTOIDLE)
+ _set_module_autoidle(oh, 1, &v);
_write_sysconfig(v, oh);
}
@@ -736,7 +778,7 @@ static int _wait_target_ready(struct omap_hwmod *oh)
static int _reset(struct omap_hwmod *oh)
{
u32 r, v;
- int c;
+ int c = 0;
if (!oh->sysconfig ||
!(oh->sysconfig->sysc_flags & SYSC_HAS_SOFTRESET) ||
@@ -758,13 +800,9 @@ static int _reset(struct omap_hwmod *oh)
return r;
_write_sysconfig(v, oh);
- c = 0;
- while (c < MAX_MODULE_RESET_WAIT &&
- !(omap_hwmod_readl(oh, oh->sysconfig->syss_offs) &
- SYSS_RESETDONE_MASK)) {
- udelay(1);
- c++;
- }
+ omap_test_timeout((omap_hwmod_readl(oh, oh->sysconfig->syss_offs) &
+ SYSS_RESETDONE_MASK),
+ MAX_MODULE_RESET_WAIT, c);
if (c == MAX_MODULE_RESET_WAIT)
WARN(1, "omap_hwmod: %s: failed to reset in %d usec\n",
@@ -884,33 +922,6 @@ static int _shutdown(struct omap_hwmod *oh)
}
/**
- * _write_clockact_lock - set the module's clockactivity bits
- * @oh: struct omap_hwmod *
- * @clockact: CLOCKACTIVITY field bits
- *
- * Writes the CLOCKACTIVITY bits @clockact to the hwmod @oh
- * OCP_SYSCONFIG register. Returns -EINVAL if the hwmod is in the
- * wrong state or returns 0.
- */
-static int _write_clockact_lock(struct omap_hwmod *oh, u8 clockact)
-{
- u32 v;
-
- if (!oh->sysconfig ||
- !(oh->sysconfig->sysc_flags & SYSC_HAS_CLOCKACTIVITY))
- return -EINVAL;
-
- mutex_lock(&omap_hwmod_mutex);
- v = oh->_sysc_cache;
- _set_clockactivity(oh, clockact, &v);
- _write_sysconfig(v, oh);
- mutex_unlock(&omap_hwmod_mutex);
-
- return 0;
-}
-
-
-/**
* _setup - do initial configuration of omap_hwmod
* @oh: struct omap_hwmod *
*
@@ -948,11 +959,19 @@ static int _setup(struct omap_hwmod *oh)
_enable(oh);
- if (!(oh->flags & HWMOD_INIT_NO_RESET))
- _reset(oh);
-
- /* XXX OCP AUTOIDLE bit? */
- /* XXX OCP ENAWAKEUP bit? */
+ if (!(oh->flags & HWMOD_INIT_NO_RESET)) {
+ /*
+ * XXX Do the OCP_SYSCONFIG bits need to be
+ * reprogrammed after a reset? If not, then this can
+ * be removed. If they do, then probably the
+ * _enable() function should be split to avoid the
+ * rewrite of the OCP_SYSCONFIG register.
+ */
+ if (oh->sysconfig) {
+ _update_sysc_cache(oh);
+ _sysc_enable(oh);
+ }
+ }
if (!(oh->flags & HWMOD_INIT_NO_IDLE))
_idle(oh);
@@ -1348,8 +1367,9 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
/* For each IRQ, DMA, memory area, fill in array.*/
for (i = 0; i < oh->mpu_irqs_cnt; i++) {
- (res + r)->start = *(oh->mpu_irqs + i);
- (res + r)->end = *(oh->mpu_irqs + i);
+ (res + r)->name = (oh->mpu_irqs + i)->name;
+ (res + r)->start = (oh->mpu_irqs + i)->irq;
+ (res + r)->end = (oh->mpu_irqs + i)->irq;
(res + r)->flags = IORESOURCE_IRQ;
r++;
}
@@ -1454,62 +1474,6 @@ int omap_hwmod_del_initiator_dep(struct omap_hwmod *oh,
}
/**
- * omap_hwmod_set_clockact_none - set clockactivity test to BOTH
- * @oh: struct omap_hwmod *
- *
- * On some modules, this function can affect the wakeup latency vs.
- * power consumption balance. Intended to be called by the
- * omap_device layer. Passes along the return value from
- * _write_clockact_lock().
- */
-int omap_hwmod_set_clockact_both(struct omap_hwmod *oh)
-{
- return _write_clockact_lock(oh, CLOCKACT_TEST_BOTH);
-}
-
-/**
- * omap_hwmod_set_clockact_none - set clockactivity test to MAIN
- * @oh: struct omap_hwmod *
- *
- * On some modules, this function can affect the wakeup latency vs.
- * power consumption balance. Intended to be called by the
- * omap_device layer. Passes along the return value from
- * _write_clockact_lock().
- */
-int omap_hwmod_set_clockact_main(struct omap_hwmod *oh)
-{
- return _write_clockact_lock(oh, CLOCKACT_TEST_MAIN);
-}
-
-/**
- * omap_hwmod_set_clockact_none - set clockactivity test to ICLK
- * @oh: struct omap_hwmod *
- *
- * On some modules, this function can affect the wakeup latency vs.
- * power consumption balance. Intended to be called by the
- * omap_device layer. Passes along the return value from
- * _write_clockact_lock().
- */
-int omap_hwmod_set_clockact_iclk(struct omap_hwmod *oh)
-{
- return _write_clockact_lock(oh, CLOCKACT_TEST_ICLK);
-}
-
-/**
- * omap_hwmod_set_clockact_none - set clockactivity test to NONE
- * @oh: struct omap_hwmod *
- *
- * On some modules, this function can affect the wakeup latency vs.
- * power consumption balance. Intended to be called by the
- * omap_device layer. Passes along the return value from
- * _write_clockact_lock().
- */
-int omap_hwmod_set_clockact_none(struct omap_hwmod *oh)
-{
- return _write_clockact_lock(oh, CLOCKACT_TEST_NONE);
-}
-
-/**
* omap_hwmod_enable_wakeup - allow device to wake up the system
* @oh: struct omap_hwmod *
*
diff --git a/arch/arm/mach-omap2/opp2420_data.c b/arch/arm/mach-omap2/opp2420_data.c
new file mode 100644
index 00000000000..126a9396b3a
--- /dev/null
+++ b/arch/arm/mach-omap2/opp2420_data.c
@@ -0,0 +1,126 @@
+/*
+ * opp2420_data.c - old-style "OPP" table for OMAP2420
+ *
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ * Copyright (C) 2004-2009 Nokia Corporation
+ *
+ * Richard Woodruff <r-woodruff2@ti.com>
+ *
+ * The OMAP2 processor can be run at several discrete 'PRCM configurations'.
+ * These configurations are characterized by voltage and speed for clocks.
+ * The device is only validated for certain combinations. One way to express
+ * these combinations is via the 'ratio's' which the clocks operate with
+ * respect to each other. These ratio sets are for a given voltage/DPLL
+ * setting. All configurations can be described by a DPLL setting and a ratio
+ * There are 3 ratio sets for the 2430 and X ratio sets for 2420.
+ *
+ * 2430 differs from 2420 in that there are no more phase synchronizers used.
+ * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs
+ * 2430 (iva2.1, NOdsp, mdm)
+ *
+ * XXX Missing voltage data.
+ *
+ * THe format described in this file is deprecated. Once a reasonable
+ * OPP API exists, the data in this file should be converted to use it.
+ *
+ * This is technically part of the OMAP2xxx clock code.
+ */
+
+#include "opp2xxx.h"
+#include "sdrc.h"
+#include "clock.h"
+
+/*-------------------------------------------------------------------------
+ * Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+ * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,
+ * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL,
+ * CM_CLKSEL2_PLL, CM_CLKSEL_MDM
+ *
+ * Filling in table based on H4 boards and 2430-SDPs variants available.
+ * There are quite a few more rates combinations which could be defined.
+ *
+ * When multiple values are defined the start up will try and choose the
+ * fastest one. If a 'fast' value is defined, then automatically, the /2
+ * one should be included as it can be used. Generally having more that
+ * one fast set does not make sense, as static timings need to be changed
+ * to change the set. The exception is the bypass setting which is
+ * availble for low power bypass.
+ *
+ * Note: This table needs to be sorted, fastest to slowest.
+ *-------------------------------------------------------------------------*/
+const struct prcm_config omap2420_rate_table[] = {
+ /* PRCM I - FAST */
+ {S12M, S660M, S330M, RI_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */
+ RI_CM_CLKSEL_DSP_VAL, RI_CM_CLKSEL_GFX_VAL,
+ RI_CM_CLKSEL1_CORE_VAL, MI_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_165MHz,
+ RATE_IN_242X},
+
+ /* PRCM II - FAST */
+ {S12M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */
+ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+ RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz,
+ RATE_IN_242X},
+
+ {S13M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */
+ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+ RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz,
+ RATE_IN_242X},
+
+ /* PRCM III - FAST */
+ {S12M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
+ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+ RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz,
+ RATE_IN_242X},
+
+ {S13M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
+ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+ RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz,
+ RATE_IN_242X},
+
+ /* PRCM II - SLOW */
+ {S12M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */
+ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+ RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz,
+ RATE_IN_242X},
+
+ {S13M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */
+ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+ RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_100MHz,
+ RATE_IN_242X},
+
+ /* PRCM III - SLOW */
+ {S12M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
+ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+ RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz,
+ RATE_IN_242X},
+
+ {S13M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
+ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+ RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_133MHz,
+ RATE_IN_242X},
+
+ /* PRCM-VII (boot-bypass) */
+ {S12M, S12M, S12M, RVII_CM_CLKSEL_MPU_VAL, /* 12MHz ARM*/
+ RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL,
+ RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_BYPASS,
+ RATE_IN_242X},
+
+ /* PRCM-VII (boot-bypass) */
+ {S13M, S13M, S13M, RVII_CM_CLKSEL_MPU_VAL, /* 13MHz ARM */
+ RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL,
+ RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, SDRC_RFR_CTRL_BYPASS,
+ RATE_IN_242X},
+
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
diff --git a/arch/arm/mach-omap2/opp2430_data.c b/arch/arm/mach-omap2/opp2430_data.c
new file mode 100644
index 00000000000..edb81672c84
--- /dev/null
+++ b/arch/arm/mach-omap2/opp2430_data.c
@@ -0,0 +1,133 @@
+/*
+ * opp2420_data.c - old-style "OPP" table for OMAP2420
+ *
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ * Copyright (C) 2004-2009 Nokia Corporation
+ *
+ * Richard Woodruff <r-woodruff2@ti.com>
+ *
+ * The OMAP2 processor can be run at several discrete 'PRCM configurations'.
+ * These configurations are characterized by voltage and speed for clocks.
+ * The device is only validated for certain combinations. One way to express
+ * these combinations is via the 'ratio's' which the clocks operate with
+ * respect to each other. These ratio sets are for a given voltage/DPLL
+ * setting. All configurations can be described by a DPLL setting and a ratio
+ * There are 3 ratio sets for the 2430 and X ratio sets for 2420.
+ *
+ * 2430 differs from 2420 in that there are no more phase synchronizers used.
+ * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs
+ * 2430 (iva2.1, NOdsp, mdm)
+ *
+ * XXX Missing voltage data.
+ *
+ * THe format described in this file is deprecated. Once a reasonable
+ * OPP API exists, the data in this file should be converted to use it.
+ *
+ * This is technically part of the OMAP2xxx clock code.
+ */
+
+#include "opp2xxx.h"
+#include "sdrc.h"
+#include "clock.h"
+
+/*-------------------------------------------------------------------------
+ * Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+ * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,
+ * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL,
+ * CM_CLKSEL2_PLL, CM_CLKSEL_MDM
+ *
+ * Filling in table based on H4 boards and 2430-SDPs variants available.
+ * There are quite a few more rates combinations which could be defined.
+ *
+ * When multiple values are defined the start up will try and choose the
+ * fastest one. If a 'fast' value is defined, then automatically, the /2
+ * one should be included as it can be used. Generally having more that
+ * one fast set does not make sense, as static timings need to be changed
+ * to change the set. The exception is the bypass setting which is
+ * availble for low power bypass.
+ *
+ * Note: This table needs to be sorted, fastest to slowest.
+ *-------------------------------------------------------------------------*/
+const struct prcm_config omap2430_rate_table[] = {
+ /* PRCM #4 - ratio2 (ES2.1) - FAST */
+ {S13M, S798M, S399M, R2_CM_CLKSEL_MPU_VAL, /* 399MHz ARM */
+ R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL,
+ R2_CM_CLKSEL1_CORE_VAL, M4_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, R2_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_133MHz,
+ RATE_IN_243X},
+
+ /* PRCM #2 - ratio1 (ES2) - FAST */
+ {S13M, S658M, S329M, R1_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M2_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_165MHz,
+ RATE_IN_243X},
+
+ /* PRCM #5a - ratio1 - FAST */
+ {S13M, S532M, S266M, R1_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_133MHz,
+ RATE_IN_243X},
+
+ /* PRCM #5b - ratio1 - FAST */
+ {S13M, S400M, S200M, R1_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_100MHz,
+ RATE_IN_243X},
+
+ /* PRCM #4 - ratio1 (ES2.1) - SLOW */
+ {S13M, S399M, S199M, R2_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */
+ R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL,
+ R2_CM_CLKSEL1_CORE_VAL, M4_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_1x_VAL, R2_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_133MHz,
+ RATE_IN_243X},
+
+ /* PRCM #2 - ratio1 (ES2) - SLOW */
+ {S13M, S329M, S164M, R1_CM_CLKSEL_MPU_VAL, /* 165MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M2_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_165MHz,
+ RATE_IN_243X},
+
+ /* PRCM #5a - ratio1 - SLOW */
+ {S13M, S266M, S133M, R1_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_133MHz,
+ RATE_IN_243X},
+
+ /* PRCM #5b - ratio1 - SLOW*/
+ {S13M, S200M, S100M, R1_CM_CLKSEL_MPU_VAL, /* 100MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_100MHz,
+ RATE_IN_243X},
+
+ /* PRCM-boot/bypass */
+ {S13M, S13M, S13M, RB_CM_CLKSEL_MPU_VAL, /* 13Mhz */
+ RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL,
+ RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_BYPASS,
+ RATE_IN_243X},
+
+ /* PRCM-boot/bypass */
+ {S12M, S12M, S12M, RB_CM_CLKSEL_MPU_VAL, /* 12Mhz */
+ RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL,
+ RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL,
+ SDRC_RFR_CTRL_BYPASS,
+ RATE_IN_243X},
+
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
diff --git a/arch/arm/mach-omap2/opp2xxx.h b/arch/arm/mach-omap2/opp2xxx.h
new file mode 100644
index 00000000000..ed6df04e2f2
--- /dev/null
+++ b/arch/arm/mach-omap2/opp2xxx.h
@@ -0,0 +1,424 @@
+/*
+ * opp2xxx.h - macros for old-style OMAP2xxx "OPP" definitions
+ *
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ * Copyright (C) 2004-2009 Nokia Corporation
+ *
+ * Richard Woodruff <r-woodruff2@ti.com>
+ *
+ * The OMAP2 processor can be run at several discrete 'PRCM configurations'.
+ * These configurations are characterized by voltage and speed for clocks.
+ * The device is only validated for certain combinations. One way to express
+ * these combinations is via the 'ratio's' which the clocks operate with
+ * respect to each other. These ratio sets are for a given voltage/DPLL
+ * setting. All configurations can be described by a DPLL setting and a ratio
+ * There are 3 ratio sets for the 2430 and X ratio sets for 2420.
+ *
+ * 2430 differs from 2420 in that there are no more phase synchronizers used.
+ * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs
+ * 2430 (iva2.1, NOdsp, mdm)
+ *
+ * XXX Missing voltage data.
+ *
+ * THe format described in this file is deprecated. Once a reasonable
+ * OPP API exists, the data in this file should be converted to use it.
+ *
+ * This is technically part of the OMAP2xxx clock code.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_OPP2XXX_H
+#define __ARCH_ARM_MACH_OMAP2_OPP2XXX_H
+
+/**
+ * struct prcm_config - define clock rates on a per-OPP basis (24xx)
+ *
+ * Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+ * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,CM_CLKSEL_DSP
+ * CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL CM_CLKSEL2_PLL, CM_CLKSEL_MDM
+ *
+ * This is deprecated. As soon as we have a decent OPP API, we should
+ * move all this stuff to it.
+ */
+struct prcm_config {
+ unsigned long xtal_speed; /* crystal rate */
+ unsigned long dpll_speed; /* dpll: out*xtal*M/(N-1)table_recalc */
+ unsigned long mpu_speed; /* speed of MPU */
+ unsigned long cm_clksel_mpu; /* mpu divider */
+ unsigned long cm_clksel_dsp; /* dsp+iva1 div(2420), iva2.1(2430) */
+ unsigned long cm_clksel_gfx; /* gfx dividers */
+ unsigned long cm_clksel1_core; /* major subsystem dividers */
+ unsigned long cm_clksel1_pll; /* m,n */
+ unsigned long cm_clksel2_pll; /* dpllx1 or x2 out */
+ unsigned long cm_clksel_mdm; /* modem dividers 2430 only */
+ unsigned long base_sdrc_rfr; /* base refresh timing for a set */
+ unsigned char flags;
+};
+
+
+/* Core fields for cm_clksel, not ratio governed */
+#define RX_CLKSEL_DSS1 (0x10 << 8)
+#define RX_CLKSEL_DSS2 (0x0 << 13)
+#define RX_CLKSEL_SSI (0x5 << 20)
+
+/*-------------------------------------------------------------------------
+ * Voltage/DPLL ratios
+ *-------------------------------------------------------------------------*/
+
+/* 2430 Ratio's, 2430-Ratio Config 1 */
+#define R1_CLKSEL_L3 (4 << 0)
+#define R1_CLKSEL_L4 (2 << 5)
+#define R1_CLKSEL_USB (4 << 25)
+#define R1_CM_CLKSEL1_CORE_VAL (R1_CLKSEL_USB | RX_CLKSEL_SSI | \
+ RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+ R1_CLKSEL_L4 | R1_CLKSEL_L3)
+#define R1_CLKSEL_MPU (2 << 0)
+#define R1_CM_CLKSEL_MPU_VAL R1_CLKSEL_MPU
+#define R1_CLKSEL_DSP (2 << 0)
+#define R1_CLKSEL_DSP_IF (2 << 5)
+#define R1_CM_CLKSEL_DSP_VAL (R1_CLKSEL_DSP | R1_CLKSEL_DSP_IF)
+#define R1_CLKSEL_GFX (2 << 0)
+#define R1_CM_CLKSEL_GFX_VAL R1_CLKSEL_GFX
+#define R1_CLKSEL_MDM (4 << 0)
+#define R1_CM_CLKSEL_MDM_VAL R1_CLKSEL_MDM
+
+/* 2430-Ratio Config 2 */
+#define R2_CLKSEL_L3 (6 << 0)
+#define R2_CLKSEL_L4 (2 << 5)
+#define R2_CLKSEL_USB (2 << 25)
+#define R2_CM_CLKSEL1_CORE_VAL (R2_CLKSEL_USB | RX_CLKSEL_SSI | \
+ RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+ R2_CLKSEL_L4 | R2_CLKSEL_L3)
+#define R2_CLKSEL_MPU (2 << 0)
+#define R2_CM_CLKSEL_MPU_VAL R2_CLKSEL_MPU
+#define R2_CLKSEL_DSP (2 << 0)
+#define R2_CLKSEL_DSP_IF (3 << 5)
+#define R2_CM_CLKSEL_DSP_VAL (R2_CLKSEL_DSP | R2_CLKSEL_DSP_IF)
+#define R2_CLKSEL_GFX (2 << 0)
+#define R2_CM_CLKSEL_GFX_VAL R2_CLKSEL_GFX
+#define R2_CLKSEL_MDM (6 << 0)
+#define R2_CM_CLKSEL_MDM_VAL R2_CLKSEL_MDM
+
+/* 2430-Ratio Bootm (BYPASS) */
+#define RB_CLKSEL_L3 (1 << 0)
+#define RB_CLKSEL_L4 (1 << 5)
+#define RB_CLKSEL_USB (1 << 25)
+#define RB_CM_CLKSEL1_CORE_VAL (RB_CLKSEL_USB | RX_CLKSEL_SSI | \
+ RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+ RB_CLKSEL_L4 | RB_CLKSEL_L3)
+#define RB_CLKSEL_MPU (1 << 0)
+#define RB_CM_CLKSEL_MPU_VAL RB_CLKSEL_MPU
+#define RB_CLKSEL_DSP (1 << 0)
+#define RB_CLKSEL_DSP_IF (1 << 5)
+#define RB_CM_CLKSEL_DSP_VAL (RB_CLKSEL_DSP | RB_CLKSEL_DSP_IF)
+#define RB_CLKSEL_GFX (1 << 0)
+#define RB_CM_CLKSEL_GFX_VAL RB_CLKSEL_GFX
+#define RB_CLKSEL_MDM (1 << 0)
+#define RB_CM_CLKSEL_MDM_VAL RB_CLKSEL_MDM
+
+/* 2420 Ratio Equivalents */
+#define RXX_CLKSEL_VLYNQ (0x12 << 15)
+#define RXX_CLKSEL_SSI (0x8 << 20)
+
+/* 2420-PRCM III 532MHz core */
+#define RIII_CLKSEL_L3 (4 << 0) /* 133MHz */
+#define RIII_CLKSEL_L4 (2 << 5) /* 66.5MHz */
+#define RIII_CLKSEL_USB (4 << 25) /* 33.25MHz */
+#define RIII_CM_CLKSEL1_CORE_VAL (RIII_CLKSEL_USB | RXX_CLKSEL_SSI | \
+ RXX_CLKSEL_VLYNQ | RX_CLKSEL_DSS2 | \
+ RX_CLKSEL_DSS1 | RIII_CLKSEL_L4 | \
+ RIII_CLKSEL_L3)
+#define RIII_CLKSEL_MPU (2 << 0) /* 266MHz */
+#define RIII_CM_CLKSEL_MPU_VAL RIII_CLKSEL_MPU
+#define RIII_CLKSEL_DSP (3 << 0) /* c5x - 177.3MHz */
+#define RIII_CLKSEL_DSP_IF (2 << 5) /* c5x - 88.67MHz */
+#define RIII_SYNC_DSP (1 << 7) /* Enable sync */
+#define RIII_CLKSEL_IVA (6 << 8) /* iva1 - 88.67MHz */
+#define RIII_SYNC_IVA (1 << 13) /* Enable sync */
+#define RIII_CM_CLKSEL_DSP_VAL (RIII_SYNC_IVA | RIII_CLKSEL_IVA | \
+ RIII_SYNC_DSP | RIII_CLKSEL_DSP_IF | \
+ RIII_CLKSEL_DSP)
+#define RIII_CLKSEL_GFX (2 << 0) /* 66.5MHz */
+#define RIII_CM_CLKSEL_GFX_VAL RIII_CLKSEL_GFX
+
+/* 2420-PRCM II 600MHz core */
+#define RII_CLKSEL_L3 (6 << 0) /* 100MHz */
+#define RII_CLKSEL_L4 (2 << 5) /* 50MHz */
+#define RII_CLKSEL_USB (2 << 25) /* 50MHz */
+#define RII_CM_CLKSEL1_CORE_VAL (RII_CLKSEL_USB | RXX_CLKSEL_SSI | \
+ RXX_CLKSEL_VLYNQ | RX_CLKSEL_DSS2 | \
+ RX_CLKSEL_DSS1 | RII_CLKSEL_L4 | \
+ RII_CLKSEL_L3)
+#define RII_CLKSEL_MPU (2 << 0) /* 300MHz */
+#define RII_CM_CLKSEL_MPU_VAL RII_CLKSEL_MPU
+#define RII_CLKSEL_DSP (3 << 0) /* c5x - 200MHz */
+#define RII_CLKSEL_DSP_IF (2 << 5) /* c5x - 100MHz */
+#define RII_SYNC_DSP (0 << 7) /* Bypass sync */
+#define RII_CLKSEL_IVA (3 << 8) /* iva1 - 200MHz */
+#define RII_SYNC_IVA (0 << 13) /* Bypass sync */
+#define RII_CM_CLKSEL_DSP_VAL (RII_SYNC_IVA | RII_CLKSEL_IVA | \
+ RII_SYNC_DSP | RII_CLKSEL_DSP_IF | \
+ RII_CLKSEL_DSP)
+#define RII_CLKSEL_GFX (2 << 0) /* 50MHz */
+#define RII_CM_CLKSEL_GFX_VAL RII_CLKSEL_GFX
+
+/* 2420-PRCM I 660MHz core */
+#define RI_CLKSEL_L3 (4 << 0) /* 165MHz */
+#define RI_CLKSEL_L4 (2 << 5) /* 82.5MHz */
+#define RI_CLKSEL_USB (4 << 25) /* 41.25MHz */
+#define RI_CM_CLKSEL1_CORE_VAL (RI_CLKSEL_USB | \
+ RXX_CLKSEL_SSI | RXX_CLKSEL_VLYNQ | \
+ RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+ RI_CLKSEL_L4 | RI_CLKSEL_L3)
+#define RI_CLKSEL_MPU (2 << 0) /* 330MHz */
+#define RI_CM_CLKSEL_MPU_VAL RI_CLKSEL_MPU
+#define RI_CLKSEL_DSP (3 << 0) /* c5x - 220MHz */
+#define RI_CLKSEL_DSP_IF (2 << 5) /* c5x - 110MHz */
+#define RI_SYNC_DSP (1 << 7) /* Activate sync */
+#define RI_CLKSEL_IVA (4 << 8) /* iva1 - 165MHz */
+#define RI_SYNC_IVA (0 << 13) /* Bypass sync */
+#define RI_CM_CLKSEL_DSP_VAL (RI_SYNC_IVA | RI_CLKSEL_IVA | \
+ RI_SYNC_DSP | RI_CLKSEL_DSP_IF | \
+ RI_CLKSEL_DSP)
+#define RI_CLKSEL_GFX (1 << 0) /* 165MHz */
+#define RI_CM_CLKSEL_GFX_VAL RI_CLKSEL_GFX
+
+/* 2420-PRCM VII (boot) */
+#define RVII_CLKSEL_L3 (1 << 0)
+#define RVII_CLKSEL_L4 (1 << 5)
+#define RVII_CLKSEL_DSS1 (1 << 8)
+#define RVII_CLKSEL_DSS2 (0 << 13)
+#define RVII_CLKSEL_VLYNQ (1 << 15)
+#define RVII_CLKSEL_SSI (1 << 20)
+#define RVII_CLKSEL_USB (1 << 25)
+
+#define RVII_CM_CLKSEL1_CORE_VAL (RVII_CLKSEL_USB | RVII_CLKSEL_SSI | \
+ RVII_CLKSEL_VLYNQ | \
+ RVII_CLKSEL_DSS2 | RVII_CLKSEL_DSS1 | \
+ RVII_CLKSEL_L4 | RVII_CLKSEL_L3)
+
+#define RVII_CLKSEL_MPU (1 << 0) /* all divide by 1 */
+#define RVII_CM_CLKSEL_MPU_VAL RVII_CLKSEL_MPU
+
+#define RVII_CLKSEL_DSP (1 << 0)
+#define RVII_CLKSEL_DSP_IF (1 << 5)
+#define RVII_SYNC_DSP (0 << 7)
+#define RVII_CLKSEL_IVA (1 << 8)
+#define RVII_SYNC_IVA (0 << 13)
+#define RVII_CM_CLKSEL_DSP_VAL (RVII_SYNC_IVA | RVII_CLKSEL_IVA | \
+ RVII_SYNC_DSP | RVII_CLKSEL_DSP_IF | \
+ RVII_CLKSEL_DSP)
+
+#define RVII_CLKSEL_GFX (1 << 0)
+#define RVII_CM_CLKSEL_GFX_VAL RVII_CLKSEL_GFX
+
+/*-------------------------------------------------------------------------
+ * 2430 Target modes: Along with each configuration the CPU has several
+ * modes which goes along with them. Modes mainly are the addition of
+ * describe DPLL combinations to go along with a ratio.
+ *-------------------------------------------------------------------------*/
+
+/* Hardware governed */
+#define MX_48M_SRC (0 << 3)
+#define MX_54M_SRC (0 << 5)
+#define MX_APLLS_CLIKIN_12 (3 << 23)
+#define MX_APLLS_CLIKIN_13 (2 << 23)
+#define MX_APLLS_CLIKIN_19_2 (0 << 23)
+
+/*
+ * 2430 - standalone, 2*ref*M/(n+1), M/N is for exactness not relock speed
+ * #5a (ratio1) baseport-target, target DPLL = 266*2 = 532MHz
+ */
+#define M5A_DPLL_MULT_12 (133 << 12)
+#define M5A_DPLL_DIV_12 (5 << 8)
+#define M5A_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M5A_DPLL_DIV_12 | M5A_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12)
+#define M5A_DPLL_MULT_13 (61 << 12)
+#define M5A_DPLL_DIV_13 (2 << 8)
+#define M5A_CM_CLKSEL1_PLL_13_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M5A_DPLL_DIV_13 | M5A_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13)
+#define M5A_DPLL_MULT_19 (55 << 12)
+#define M5A_DPLL_DIV_19 (3 << 8)
+#define M5A_CM_CLKSEL1_PLL_19_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M5A_DPLL_DIV_19 | M5A_DPLL_MULT_19 | \
+ MX_APLLS_CLIKIN_19_2)
+/* #5b (ratio1) target DPLL = 200*2 = 400MHz */
+#define M5B_DPLL_MULT_12 (50 << 12)
+#define M5B_DPLL_DIV_12 (2 << 8)
+#define M5B_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M5B_DPLL_DIV_12 | M5B_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12)
+#define M5B_DPLL_MULT_13 (200 << 12)
+#define M5B_DPLL_DIV_13 (12 << 8)
+
+#define M5B_CM_CLKSEL1_PLL_13_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M5B_DPLL_DIV_13 | M5B_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13)
+#define M5B_DPLL_MULT_19 (125 << 12)
+#define M5B_DPLL_DIV_19 (31 << 8)
+#define M5B_CM_CLKSEL1_PLL_19_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M5B_DPLL_DIV_19 | M5B_DPLL_MULT_19 | \
+ MX_APLLS_CLIKIN_19_2)
+/*
+ * #4 (ratio2), DPLL = 399*2 = 798MHz, L3=133MHz
+ */
+#define M4_DPLL_MULT_12 (133 << 12)
+#define M4_DPLL_DIV_12 (3 << 8)
+#define M4_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M4_DPLL_DIV_12 | M4_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12)
+
+#define M4_DPLL_MULT_13 (399 << 12)
+#define M4_DPLL_DIV_13 (12 << 8)
+#define M4_CM_CLKSEL1_PLL_13_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M4_DPLL_DIV_13 | M4_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13)
+
+#define M4_DPLL_MULT_19 (145 << 12)
+#define M4_DPLL_DIV_19 (6 << 8)
+#define M4_CM_CLKSEL1_PLL_19_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M4_DPLL_DIV_19 | M4_DPLL_MULT_19 | \
+ MX_APLLS_CLIKIN_19_2)
+
+/*
+ * #3 (ratio2) baseport-target, target DPLL = 330*2 = 660MHz
+ */
+#define M3_DPLL_MULT_12 (55 << 12)
+#define M3_DPLL_DIV_12 (1 << 8)
+#define M3_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M3_DPLL_DIV_12 | M3_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12)
+#define M3_DPLL_MULT_13 (76 << 12)
+#define M3_DPLL_DIV_13 (2 << 8)
+#define M3_CM_CLKSEL1_PLL_13_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M3_DPLL_DIV_13 | M3_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13)
+#define M3_DPLL_MULT_19 (17 << 12)
+#define M3_DPLL_DIV_19 (0 << 8)
+#define M3_CM_CLKSEL1_PLL_19_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M3_DPLL_DIV_19 | M3_DPLL_MULT_19 | \
+ MX_APLLS_CLIKIN_19_2)
+
+/*
+ * #2 (ratio1) DPLL = 330*2 = 660MHz, L3=165MHz
+ */
+#define M2_DPLL_MULT_12 (55 << 12)
+#define M2_DPLL_DIV_12 (1 << 8)
+#define M2_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M2_DPLL_DIV_12 | M2_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12)
+
+/* Speed changes - Used 658.7MHz instead of 660MHz for LP-Refresh M=76 N=2,
+ * relock time issue */
+/* Core frequency changed from 330/165 to 329/164 MHz*/
+#define M2_DPLL_MULT_13 (76 << 12)
+#define M2_DPLL_DIV_13 (2 << 8)
+#define M2_CM_CLKSEL1_PLL_13_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M2_DPLL_DIV_13 | M2_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13)
+
+#define M2_DPLL_MULT_19 (17 << 12)
+#define M2_DPLL_DIV_19 (0 << 8)
+#define M2_CM_CLKSEL1_PLL_19_VAL (MX_48M_SRC | MX_54M_SRC | \
+ M2_DPLL_DIV_19 | M2_DPLL_MULT_19 | \
+ MX_APLLS_CLIKIN_19_2)
+
+/* boot (boot) */
+#define MB_DPLL_MULT (1 << 12)
+#define MB_DPLL_DIV (0 << 8)
+#define MB_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ MB_DPLL_DIV | MB_DPLL_MULT | \
+ MX_APLLS_CLIKIN_12)
+
+#define MB_CM_CLKSEL1_PLL_13_VAL (MX_48M_SRC | MX_54M_SRC | \
+ MB_DPLL_DIV | MB_DPLL_MULT | \
+ MX_APLLS_CLIKIN_13)
+
+#define MB_CM_CLKSEL1_PLL_19_VAL (MX_48M_SRC | MX_54M_SRC | \
+ MB_DPLL_DIV | MB_DPLL_MULT | \
+ MX_APLLS_CLIKIN_19)
+
+/*
+ * 2430 - chassis (sedna)
+ * 165 (ratio1) same as above #2
+ * 150 (ratio1)
+ * 133 (ratio2) same as above #4
+ * 110 (ratio2) same as above #3
+ * 104 (ratio2)
+ * boot (boot)
+ */
+
+/* PRCM I target DPLL = 2*330MHz = 660MHz */
+#define MI_DPLL_MULT_12 (55 << 12)
+#define MI_DPLL_DIV_12 (1 << 8)
+#define MI_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ MI_DPLL_DIV_12 | MI_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12)
+
+/*
+ * 2420 Equivalent - mode registers
+ * PRCM II , target DPLL = 2*300MHz = 600MHz
+ */
+#define MII_DPLL_MULT_12 (50 << 12)
+#define MII_DPLL_DIV_12 (1 << 8)
+#define MII_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ MII_DPLL_DIV_12 | MII_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12)
+#define MII_DPLL_MULT_13 (300 << 12)
+#define MII_DPLL_DIV_13 (12 << 8)
+#define MII_CM_CLKSEL1_PLL_13_VAL (MX_48M_SRC | MX_54M_SRC | \
+ MII_DPLL_DIV_13 | MII_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13)
+
+/* PRCM III target DPLL = 2*266 = 532MHz*/
+#define MIII_DPLL_MULT_12 (133 << 12)
+#define MIII_DPLL_DIV_12 (5 << 8)
+#define MIII_CM_CLKSEL1_PLL_12_VAL (MX_48M_SRC | MX_54M_SRC | \
+ MIII_DPLL_DIV_12 | \
+ MIII_DPLL_MULT_12 | MX_APLLS_CLIKIN_12)
+#define MIII_DPLL_MULT_13 (266 << 12)
+#define MIII_DPLL_DIV_13 (12 << 8)
+#define MIII_CM_CLKSEL1_PLL_13_VAL (MX_48M_SRC | MX_54M_SRC | \
+ MIII_DPLL_DIV_13 | \
+ MIII_DPLL_MULT_13 | MX_APLLS_CLIKIN_13)
+
+/* PRCM VII (boot bypass) */
+#define MVII_CM_CLKSEL1_PLL_12_VAL MB_CM_CLKSEL1_PLL_12_VAL
+#define MVII_CM_CLKSEL1_PLL_13_VAL MB_CM_CLKSEL1_PLL_13_VAL
+
+/* High and low operation value */
+#define MX_CLKSEL2_PLL_2x_VAL (2 << 0)
+#define MX_CLKSEL2_PLL_1x_VAL (1 << 0)
+
+/* MPU speed defines */
+#define S12M 12000000
+#define S13M 13000000
+#define S19M 19200000
+#define S26M 26000000
+#define S100M 100000000
+#define S133M 133000000
+#define S150M 150000000
+#define S164M 164000000
+#define S165M 165000000
+#define S199M 199000000
+#define S200M 200000000
+#define S266M 266000000
+#define S300M 300000000
+#define S329M 329000000
+#define S330M 330000000
+#define S399M 399000000
+#define S400M 400000000
+#define S532M 532000000
+#define S600M 600000000
+#define S658M 658000000
+#define S660M 660000000
+#define S798M 798000000
+
+
+extern const struct prcm_config omap2420_rate_table[];
+extern const struct prcm_config omap2430_rate_table[];
+extern const struct prcm_config *rate_table;
+extern const struct prcm_config *curr_prcm_set;
+
+#endif
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 8baa30d2acf..860b755d222 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -326,7 +326,7 @@ int pm_dbg_regset_save(int reg_set)
return 0;
}
-static const char pwrdm_state_names[][4] = {
+static const char pwrdm_state_names[][PWRDM_MAX_PWRSTS] = {
"OFF",
"RET",
"INA",
@@ -381,7 +381,7 @@ static int pwrdm_dbg_show_counter(struct powerdomain *pwrdm, void *user)
seq_printf(s, "%s (%s)", pwrdm->name,
pwrdm_state_names[pwrdm->state]);
- for (i = 0; i < 4; i++)
+ for (i = 0; i < PWRDM_MAX_PWRSTS; i++)
seq_printf(s, ",%s:%d", pwrdm_state_names[i],
pwrdm->state_counter[i]);
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index b6990e37778..26b3f3ee82a 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -10,9 +10,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifdef CONFIG_OMAP_DEBUG_POWERDOMAIN
-# define DEBUG
-#endif
+#undef DEBUG
#include <linux/kernel.h>
#include <linux/module.h>
@@ -160,7 +158,7 @@ static __init void _pwrdm_setup(struct powerdomain *pwrdm)
{
int i;
- for (i = 0; i < 4; i++)
+ for (i = 0; i < PWRDM_MAX_PWRSTS; i++)
pwrdm->state_counter[i] = 0;
pwrdm_wait_transition(pwrdm);
@@ -480,7 +478,7 @@ int pwrdm_add_wkdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
if (IS_ERR(p)) {
pr_debug("powerdomain: hardware cannot set/clear wake up of "
"%s when %s wakes up\n", pwrdm1->name, pwrdm2->name);
- return IS_ERR(p);
+ return PTR_ERR(p);
}
pr_debug("powerdomain: hardware will wake up %s when %s wakes up\n",
@@ -513,7 +511,7 @@ int pwrdm_del_wkdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
if (IS_ERR(p)) {
pr_debug("powerdomain: hardware cannot set/clear wake up of "
"%s when %s wakes up\n", pwrdm1->name, pwrdm2->name);
- return IS_ERR(p);
+ return PTR_ERR(p);
}
pr_debug("powerdomain: hardware will no longer wake up %s after %s "
@@ -550,7 +548,7 @@ int pwrdm_read_wkdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
if (IS_ERR(p)) {
pr_debug("powerdomain: hardware cannot set/clear wake up of "
"%s when %s wakes up\n", pwrdm1->name, pwrdm2->name);
- return IS_ERR(p);
+ return PTR_ERR(p);
}
return prm_read_mod_bits_shift(pwrdm1->prcm_offs, PM_WKDEP,
@@ -573,10 +571,10 @@ int pwrdm_add_sleepdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
{
struct powerdomain *p;
- if (!pwrdm1)
+ if (!cpu_is_omap34xx())
return -EINVAL;
- if (!cpu_is_omap34xx())
+ if (!pwrdm1)
return -EINVAL;
p = _pwrdm_deps_lookup(pwrdm2, pwrdm1->sleepdep_srcs);
@@ -584,7 +582,7 @@ int pwrdm_add_sleepdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
pr_debug("powerdomain: hardware cannot set/clear sleep "
"dependency affecting %s from %s\n", pwrdm1->name,
pwrdm2->name);
- return IS_ERR(p);
+ return PTR_ERR(p);
}
pr_debug("powerdomain: will prevent %s from sleeping if %s is active\n",
@@ -612,10 +610,10 @@ int pwrdm_del_sleepdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
{
struct powerdomain *p;
- if (!pwrdm1)
+ if (!cpu_is_omap34xx())
return -EINVAL;
- if (!cpu_is_omap34xx())
+ if (!pwrdm1)
return -EINVAL;
p = _pwrdm_deps_lookup(pwrdm2, pwrdm1->sleepdep_srcs);
@@ -623,7 +621,7 @@ int pwrdm_del_sleepdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
pr_debug("powerdomain: hardware cannot set/clear sleep "
"dependency affecting %s from %s\n", pwrdm1->name,
pwrdm2->name);
- return IS_ERR(p);
+ return PTR_ERR(p);
}
pr_debug("powerdomain: will no longer prevent %s from sleeping if "
@@ -655,10 +653,10 @@ int pwrdm_read_sleepdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
{
struct powerdomain *p;
- if (!pwrdm1)
+ if (!cpu_is_omap34xx())
return -EINVAL;
- if (!cpu_is_omap34xx())
+ if (!pwrdm1)
return -EINVAL;
p = _pwrdm_deps_lookup(pwrdm2, pwrdm1->sleepdep_srcs);
@@ -666,7 +664,7 @@ int pwrdm_read_sleepdep(struct powerdomain *pwrdm1, struct powerdomain *pwrdm2)
pr_debug("powerdomain: hardware cannot set/clear sleep "
"dependency affecting %s from %s\n", pwrdm1->name,
pwrdm2->name);
- return IS_ERR(p);
+ return PTR_ERR(p);
}
return prm_read_mod_bits_shift(pwrdm1->prcm_offs, OMAP3430_CM_SLEEPDEP,
@@ -985,6 +983,9 @@ int pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
if (pwrdm->banks < (bank + 1))
return -EEXIST;
+ if (pwrdm->flags & PWRDM_HAS_MPU_QUIRK)
+ bank = 1;
+
/*
* The register bit names below may not correspond to the
* actual names of the bits in each powerdomain's register,
@@ -1032,6 +1033,9 @@ int pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
if (pwrdm->banks < (bank + 1))
return -EEXIST;
+ if (pwrdm->flags & PWRDM_HAS_MPU_QUIRK)
+ bank = 1;
+
/*
* The register bit names below may not correspond to the
* actual names of the bits in each powerdomain's register,
diff --git a/arch/arm/mach-omap2/powerdomains34xx.h b/arch/arm/mach-omap2/powerdomains34xx.h
index fd09b0827df..588f7e07d0e 100644
--- a/arch/arm/mach-omap2/powerdomains34xx.h
+++ b/arch/arm/mach-omap2/powerdomains34xx.h
@@ -190,6 +190,7 @@ static struct powerdomain mpu_34xx_pwrdm = {
.wkdep_srcs = mpu_34xx_wkdeps,
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
+ .flags = PWRDM_HAS_MPU_QUIRK,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF_RET,
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index cb1ae84e092..61ac2a418bd 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -4,10 +4,12 @@
/*
* OMAP2/3 PRCM base and module definitions
*
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2008 Nokia Corporation
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2007-2009 Nokia Corporation
*
* Written by Paul Walmsley
+ * OMAP4 defines in this file are automatically generated from the OMAP hardware
+ * databases.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -49,6 +51,73 @@
#define OMAP3430_NEON_MOD 0xb00
#define OMAP3430ES2_USBHOST_MOD 0xc00
+#define BITS(n_bit) \
+ (((1 << n_bit) - 1) | (1 << n_bit))
+
+#define BITFIELD(l_bit, u_bit) \
+ (BITS(u_bit) & ~((BITS(l_bit)) >> 1))
+
+/* OMAP44XX specific module offsets */
+
+/* CM1 instances */
+
+#define OMAP4430_CM1_OCP_SOCKET_MOD 0x0000
+#define OMAP4430_CM1_CKGEN_MOD 0x0100
+#define OMAP4430_CM1_MPU_MOD 0x0300
+#define OMAP4430_CM1_TESLA_MOD 0x0400
+#define OMAP4430_CM1_ABE_MOD 0x0500
+#define OMAP4430_CM1_RESTORE_MOD 0x0e00
+#define OMAP4430_CM1_INSTR_MOD 0x0f00
+
+/* CM2 instances */
+
+#define OMAP4430_CM2_OCP_SOCKET_MOD 0x0000
+#define OMAP4430_CM2_CKGEN_MOD 0x0100
+#define OMAP4430_CM2_ALWAYS_ON_MOD 0x0600
+#define OMAP4430_CM2_CORE_MOD 0x0700
+#define OMAP4430_CM2_IVAHD_MOD 0x0f00
+#define OMAP4430_CM2_CAM_MOD 0x1000
+#define OMAP4430_CM2_DSS_MOD 0x1100
+#define OMAP4430_CM2_GFX_MOD 0x1200
+#define OMAP4430_CM2_L3INIT_MOD 0x1300
+#define OMAP4430_CM2_L4PER_MOD 0x1400
+#define OMAP4430_CM2_CEFUSE_MOD 0x1600
+#define OMAP4430_CM2_RESTORE_MOD 0x1e00
+#define OMAP4430_CM2_INSTR_MOD 0x1f00
+
+/* PRM instances */
+
+#define OMAP4430_PRM_OCP_SOCKET_MOD 0x0000
+#define OMAP4430_PRM_CKGEN_MOD 0x0100
+#define OMAP4430_PRM_MPU_MOD 0x0300
+#define OMAP4430_PRM_TESLA_MOD 0x0400
+#define OMAP4430_PRM_ABE_MOD 0x0500
+#define OMAP4430_PRM_ALWAYS_ON_MOD 0x0600
+#define OMAP4430_PRM_CORE_MOD 0x0700
+#define OMAP4430_PRM_IVAHD_MOD 0x0f00
+#define OMAP4430_PRM_CAM_MOD 0x1000
+#define OMAP4430_PRM_DSS_MOD 0x1100
+#define OMAP4430_PRM_GFX_MOD 0x1200
+#define OMAP4430_PRM_L3INIT_MOD 0x1300
+#define OMAP4430_PRM_L4PER_MOD 0x1400
+#define OMAP4430_PRM_CEFUSE_MOD 0x1600
+#define OMAP4430_PRM_WKUP_MOD 0x1700
+#define OMAP4430_PRM_WKUP_CM_MOD 0x1800
+#define OMAP4430_PRM_EMU_MOD 0x1900
+#define OMAP4430_PRM_EMU_CM_MOD 0x1a00
+#define OMAP4430_PRM_DEVICE_MOD 0x1b00
+#define OMAP4430_PRM_INSTR_MOD 0x1f00
+
+/* SCRM instances */
+
+#define OMAP4430_SCRM_SCRM_MOD 0x0000
+
+/* CHIRONSS instances */
+
+#define OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD 0x0000
+#define OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD 0x0200
+#define OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD 0x0400
+#define OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD 0x0800
/* 24XX register bits shared between CM & PRM registers */
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 029d376198d..3ea8177ffb2 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -34,6 +34,7 @@
static void __iomem *prm_base;
static void __iomem *cm_base;
+static void __iomem *cm2_base;
#define MAX_MODULE_ENABLE_WAIT 100000
@@ -170,14 +171,12 @@ u32 prm_read_mod_reg(s16 module, u16 idx)
{
return __omap_prcm_read(prm_base, module, idx);
}
-EXPORT_SYMBOL(prm_read_mod_reg);
/* Write into a register in a PRM module */
void prm_write_mod_reg(u32 val, s16 module, u16 idx)
{
__omap_prcm_write(val, prm_base, module, idx);
}
-EXPORT_SYMBOL(prm_write_mod_reg);
/* Read-modify-write a register in a PRM module. Caller must lock */
u32 prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
@@ -191,21 +190,18 @@ u32 prm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
return v;
}
-EXPORT_SYMBOL(prm_rmw_mod_reg_bits);
/* Read a register in a CM module */
u32 cm_read_mod_reg(s16 module, u16 idx)
{
return __omap_prcm_read(cm_base, module, idx);
}
-EXPORT_SYMBOL(cm_read_mod_reg);
/* Write into a register in a CM module */
void cm_write_mod_reg(u32 val, s16 module, u16 idx)
{
__omap_prcm_write(val, cm_base, module, idx);
}
-EXPORT_SYMBOL(cm_write_mod_reg);
/* Read-modify-write a register in a CM module. Caller must lock */
u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
@@ -219,7 +215,6 @@ u32 cm_rmw_mod_reg_bits(u32 mask, u32 bits, s16 module, s16 idx)
return v;
}
-EXPORT_SYMBOL(cm_rmw_mod_reg_bits);
/**
* omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness
@@ -247,9 +242,8 @@ int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, const char *name)
BUG();
/* Wait for lock */
- while (((__raw_readl(reg) & mask) != ena) &&
- (i++ < MAX_MODULE_ENABLE_WAIT))
- udelay(1);
+ omap_test_timeout(((__raw_readl(reg) & mask) == ena),
+ MAX_MODULE_ENABLE_WAIT, i);
if (i < MAX_MODULE_ENABLE_WAIT)
pr_debug("cm: Module associated with clock %s ready after %d "
@@ -265,6 +259,7 @@ void __init omap2_set_globals_prcm(struct omap_globals *omap2_globals)
{
prm_base = omap2_globals->prm;
cm_base = omap2_globals->cm;
+ cm2_base = omap2_globals->cm2;
}
#ifdef CONFIG_ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
new file mode 100644
index 00000000000..301c810fb26
--- /dev/null
+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
@@ -0,0 +1,2205 @@
+/*
+ * OMAP44xx Power Management register bits
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_44XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_44XX_H
+
+#include "prm.h"
+
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP
+ */
+#define OMAP4430_ABBOFF_ACT_EXPORT_SHIFT (1 << 1)
+#define OMAP4430_ABBOFF_ACT_EXPORT_MASK BITFIELD(1, 1)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP
+ */
+#define OMAP4430_ABBOFF_SLEEP_EXPORT_SHIFT (1 << 2)
+#define OMAP4430_ABBOFF_SLEEP_EXPORT_MASK BITFIELD(2, 2)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_ABB_IVA_DONE_EN_SHIFT (1 << 31)
+#define OMAP4430_ABB_IVA_DONE_EN_MASK BITFIELD(31, 31)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_ABB_IVA_DONE_ST_SHIFT (1 << 31)
+#define OMAP4430_ABB_IVA_DONE_ST_MASK BITFIELD(31, 31)
+
+/* Used by PRM_IRQENABLE_MPU_2 */
+#define OMAP4430_ABB_MPU_DONE_EN_SHIFT (1 << 7)
+#define OMAP4430_ABB_MPU_DONE_EN_MASK BITFIELD(7, 7)
+
+/* Used by PRM_IRQSTATUS_MPU_2 */
+#define OMAP4430_ABB_MPU_DONE_ST_SHIFT (1 << 7)
+#define OMAP4430_ABB_MPU_DONE_ST_MASK BITFIELD(7, 7)
+
+/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
+#define OMAP4430_ACTIVE_FBB_SEL_SHIFT (1 << 2)
+#define OMAP4430_ACTIVE_FBB_SEL_MASK BITFIELD(2, 2)
+
+/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
+#define OMAP4430_ACTIVE_RBB_SEL_SHIFT (1 << 1)
+#define OMAP4430_ACTIVE_RBB_SEL_MASK BITFIELD(1, 1)
+
+/* Used by PM_ABE_PWRSTCTRL */
+#define OMAP4430_AESSMEM_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_AESSMEM_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_ABE_PWRSTCTRL */
+#define OMAP4430_AESSMEM_RETSTATE_SHIFT (1 << 8)
+#define OMAP4430_AESSMEM_RETSTATE_MASK BITFIELD(8, 8)
+
+/* Used by PM_ABE_PWRSTST */
+#define OMAP4430_AESSMEM_STATEST_SHIFT (1 << 4)
+#define OMAP4430_AESSMEM_STATEST_MASK BITFIELD(4, 5)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP
+ */
+#define OMAP4430_AIPOFF_SHIFT (1 << 8)
+#define OMAP4430_AIPOFF_MASK BITFIELD(8, 8)
+
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_AUTO_CTRL_VDD_CORE_L_SHIFT (1 << 0)
+#define OMAP4430_AUTO_CTRL_VDD_CORE_L_MASK BITFIELD(0, 1)
+
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_AUTO_CTRL_VDD_IVA_L_SHIFT (1 << 4)
+#define OMAP4430_AUTO_CTRL_VDD_IVA_L_MASK BITFIELD(4, 5)
+
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_AUTO_CTRL_VDD_MPU_L_SHIFT (1 << 2)
+#define OMAP4430_AUTO_CTRL_VDD_MPU_L_MASK BITFIELD(2, 3)
+
+/* Used by PM_CAM_PWRSTCTRL */
+#define OMAP4430_CAM_MEM_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_CAM_MEM_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_CAM_PWRSTST */
+#define OMAP4430_CAM_MEM_STATEST_SHIFT (1 << 4)
+#define OMAP4430_CAM_MEM_STATEST_MASK BITFIELD(4, 5)
+
+/* Used by PRM_CLKREQCTRL */
+#define OMAP4430_CLKREQ_COND_SHIFT (1 << 0)
+#define OMAP4430_CLKREQ_COND_MASK BITFIELD(0, 2)
+
+/* Used by PRM_VC_VAL_SMPS_RA_CMD */
+#define OMAP4430_CMDRA_VDD_CORE_L_SHIFT (1 << 0)
+#define OMAP4430_CMDRA_VDD_CORE_L_MASK BITFIELD(0, 7)
+
+/* Used by PRM_VC_VAL_SMPS_RA_CMD */
+#define OMAP4430_CMDRA_VDD_IVA_L_SHIFT (1 << 8)
+#define OMAP4430_CMDRA_VDD_IVA_L_MASK BITFIELD(8, 15)
+
+/* Used by PRM_VC_VAL_SMPS_RA_CMD */
+#define OMAP4430_CMDRA_VDD_MPU_L_SHIFT (1 << 16)
+#define OMAP4430_CMDRA_VDD_MPU_L_MASK BITFIELD(16, 23)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_CMD_VDD_CORE_L_SHIFT (1 << 4)
+#define OMAP4430_CMD_VDD_CORE_L_MASK BITFIELD(4, 4)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_CMD_VDD_IVA_L_SHIFT (1 << 12)
+#define OMAP4430_CMD_VDD_IVA_L_MASK BITFIELD(12, 12)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_CMD_VDD_MPU_L_SHIFT (1 << 17)
+#define OMAP4430_CMD_VDD_MPU_L_MASK BITFIELD(17, 17)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_CORE_OCMRAM_ONSTATE_SHIFT (1 << 18)
+#define OMAP4430_CORE_OCMRAM_ONSTATE_MASK BITFIELD(18, 19)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_CORE_OCMRAM_RETSTATE_SHIFT (1 << 9)
+#define OMAP4430_CORE_OCMRAM_RETSTATE_MASK BITFIELD(9, 9)
+
+/* Used by PM_CORE_PWRSTST */
+#define OMAP4430_CORE_OCMRAM_STATEST_SHIFT (1 << 6)
+#define OMAP4430_CORE_OCMRAM_STATEST_MASK BITFIELD(6, 7)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_CORE_OTHER_BANK_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_CORE_OTHER_BANK_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_CORE_OTHER_BANK_RETSTATE_SHIFT (1 << 8)
+#define OMAP4430_CORE_OTHER_BANK_RETSTATE_MASK BITFIELD(8, 8)
+
+/* Used by PM_CORE_PWRSTST */
+#define OMAP4430_CORE_OTHER_BANK_STATEST_SHIFT (1 << 4)
+#define OMAP4430_CORE_OTHER_BANK_STATEST_MASK BITFIELD(4, 5)
+
+/* Used by PRM_VC_VAL_BYPASS */
+#define OMAP4430_DATA_SHIFT (1 << 16)
+#define OMAP4430_DATA_MASK BITFIELD(16, 23)
+
+/* Used by PRM_DEVICE_OFF_CTRL */
+#define OMAP4430_DEVICE_OFF_ENABLE_SHIFT (1 << 0)
+#define OMAP4430_DEVICE_OFF_ENABLE_MASK BITFIELD(0, 0)
+
+/* Used by PRM_VC_CFG_I2C_MODE */
+#define OMAP4430_DFILTEREN_SHIFT (1 << 6)
+#define OMAP4430_DFILTEREN_MASK BITFIELD(6, 6)
+
+/* Used by PRM_IRQENABLE_MPU, PRM_IRQENABLE_TESLA */
+#define OMAP4430_DPLL_ABE_RECAL_EN_SHIFT (1 << 4)
+#define OMAP4430_DPLL_ABE_RECAL_EN_MASK BITFIELD(4, 4)
+
+/* Used by PRM_IRQSTATUS_MPU, PRM_IRQSTATUS_TESLA */
+#define OMAP4430_DPLL_ABE_RECAL_ST_SHIFT (1 << 4)
+#define OMAP4430_DPLL_ABE_RECAL_ST_MASK BITFIELD(4, 4)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_DPLL_CORE_RECAL_EN_SHIFT (1 << 0)
+#define OMAP4430_DPLL_CORE_RECAL_EN_MASK BITFIELD(0, 0)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_DPLL_CORE_RECAL_ST_SHIFT (1 << 0)
+#define OMAP4430_DPLL_CORE_RECAL_ST_MASK BITFIELD(0, 0)
+
+/* Used by PRM_IRQENABLE_MPU */
+#define OMAP4430_DPLL_DDRPHY_RECAL_EN_SHIFT (1 << 6)
+#define OMAP4430_DPLL_DDRPHY_RECAL_EN_MASK BITFIELD(6, 6)
+
+/* Used by PRM_IRQSTATUS_MPU */
+#define OMAP4430_DPLL_DDRPHY_RECAL_ST_SHIFT (1 << 6)
+#define OMAP4430_DPLL_DDRPHY_RECAL_ST_MASK BITFIELD(6, 6)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU, PRM_IRQENABLE_TESLA */
+#define OMAP4430_DPLL_IVA_RECAL_EN_SHIFT (1 << 2)
+#define OMAP4430_DPLL_IVA_RECAL_EN_MASK BITFIELD(2, 2)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU, PRM_IRQSTATUS_TESLA */
+#define OMAP4430_DPLL_IVA_RECAL_ST_SHIFT (1 << 2)
+#define OMAP4430_DPLL_IVA_RECAL_ST_MASK BITFIELD(2, 2)
+
+/* Used by PRM_IRQENABLE_MPU */
+#define OMAP4430_DPLL_MPU_RECAL_EN_SHIFT (1 << 1)
+#define OMAP4430_DPLL_MPU_RECAL_EN_MASK BITFIELD(1, 1)
+
+/* Used by PRM_IRQSTATUS_MPU */
+#define OMAP4430_DPLL_MPU_RECAL_ST_SHIFT (1 << 1)
+#define OMAP4430_DPLL_MPU_RECAL_ST_MASK BITFIELD(1, 1)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_DPLL_PER_RECAL_EN_SHIFT (1 << 3)
+#define OMAP4430_DPLL_PER_RECAL_EN_MASK BITFIELD(3, 3)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_DPLL_PER_RECAL_ST_SHIFT (1 << 3)
+#define OMAP4430_DPLL_PER_RECAL_ST_MASK BITFIELD(3, 3)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_DPLL_UNIPRO_RECAL_EN_SHIFT (1 << 7)
+#define OMAP4430_DPLL_UNIPRO_RECAL_EN_MASK BITFIELD(7, 7)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_DPLL_UNIPRO_RECAL_ST_SHIFT (1 << 7)
+#define OMAP4430_DPLL_UNIPRO_RECAL_ST_MASK BITFIELD(7, 7)
+
+/* Used by PRM_IRQENABLE_MPU */
+#define OMAP4430_DPLL_USB_RECAL_EN_SHIFT (1 << 5)
+#define OMAP4430_DPLL_USB_RECAL_EN_MASK BITFIELD(5, 5)
+
+/* Used by PRM_IRQSTATUS_MPU */
+#define OMAP4430_DPLL_USB_RECAL_ST_SHIFT (1 << 5)
+#define OMAP4430_DPLL_USB_RECAL_ST_MASK BITFIELD(5, 5)
+
+/* Used by PM_DSS_PWRSTCTRL */
+#define OMAP4430_DSS_MEM_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_DSS_MEM_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_DSS_PWRSTCTRL */
+#define OMAP4430_DSS_MEM_RETSTATE_SHIFT (1 << 8)
+#define OMAP4430_DSS_MEM_RETSTATE_MASK BITFIELD(8, 8)
+
+/* Used by PM_DSS_PWRSTST */
+#define OMAP4430_DSS_MEM_STATEST_SHIFT (1 << 4)
+#define OMAP4430_DSS_MEM_STATEST_MASK BITFIELD(4, 5)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_DUCATI_L2RAM_ONSTATE_SHIFT (1 << 20)
+#define OMAP4430_DUCATI_L2RAM_ONSTATE_MASK BITFIELD(20, 21)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_DUCATI_L2RAM_RETSTATE_SHIFT (1 << 10)
+#define OMAP4430_DUCATI_L2RAM_RETSTATE_MASK BITFIELD(10, 10)
+
+/* Used by PM_CORE_PWRSTST */
+#define OMAP4430_DUCATI_L2RAM_STATEST_SHIFT (1 << 8)
+#define OMAP4430_DUCATI_L2RAM_STATEST_MASK BITFIELD(8, 9)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_DUCATI_UNICACHE_ONSTATE_SHIFT (1 << 22)
+#define OMAP4430_DUCATI_UNICACHE_ONSTATE_MASK BITFIELD(22, 23)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_DUCATI_UNICACHE_RETSTATE_SHIFT (1 << 11)
+#define OMAP4430_DUCATI_UNICACHE_RETSTATE_MASK BITFIELD(11, 11)
+
+/* Used by PM_CORE_PWRSTST */
+#define OMAP4430_DUCATI_UNICACHE_STATEST_SHIFT (1 << 10)
+#define OMAP4430_DUCATI_UNICACHE_STATEST_MASK BITFIELD(10, 11)
+
+/* Used by RM_MPU_RSTST */
+#define OMAP4430_EMULATION_RST_SHIFT (1 << 0)
+#define OMAP4430_EMULATION_RST_MASK BITFIELD(0, 0)
+
+/* Used by RM_DUCATI_RSTST */
+#define OMAP4430_EMULATION_RST1ST_SHIFT (1 << 3)
+#define OMAP4430_EMULATION_RST1ST_MASK BITFIELD(3, 3)
+
+/* Used by RM_DUCATI_RSTST */
+#define OMAP4430_EMULATION_RST2ST_SHIFT (1 << 4)
+#define OMAP4430_EMULATION_RST2ST_MASK BITFIELD(4, 4)
+
+/* Used by RM_IVAHD_RSTST */
+#define OMAP4430_EMULATION_SEQ1_RST1ST_SHIFT (1 << 3)
+#define OMAP4430_EMULATION_SEQ1_RST1ST_MASK BITFIELD(3, 3)
+
+/* Used by RM_IVAHD_RSTST */
+#define OMAP4430_EMULATION_SEQ2_RST2ST_SHIFT (1 << 4)
+#define OMAP4430_EMULATION_SEQ2_RST2ST_MASK BITFIELD(4, 4)
+
+/* Used by PM_EMU_PWRSTCTRL */
+#define OMAP4430_EMU_BANK_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_EMU_BANK_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_EMU_PWRSTST */
+#define OMAP4430_EMU_BANK_STATEST_SHIFT (1 << 4)
+#define OMAP4430_EMU_BANK_STATEST_MASK BITFIELD(4, 5)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP, PRM_SRAM_WKUP_SETUP
+ */
+#define OMAP4430_ENABLE_RTA_EXPORT_SHIFT (1 << 0)
+#define OMAP4430_ENABLE_RTA_EXPORT_MASK BITFIELD(0, 0)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP
+ */
+#define OMAP4430_ENFUNC1_SHIFT (1 << 3)
+#define OMAP4430_ENFUNC1_MASK BITFIELD(3, 3)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP
+ */
+#define OMAP4430_ENFUNC3_SHIFT (1 << 5)
+#define OMAP4430_ENFUNC3_MASK BITFIELD(5, 5)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP
+ */
+#define OMAP4430_ENFUNC4_SHIFT (1 << 6)
+#define OMAP4430_ENFUNC4_MASK BITFIELD(6, 6)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_IVA_SETUP,
+ * PRM_LDO_SRAM_MPU_SETUP
+ */
+#define OMAP4430_ENFUNC5_SHIFT (1 << 7)
+#define OMAP4430_ENFUNC5_MASK BITFIELD(7, 7)
+
+/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
+#define OMAP4430_ERRORGAIN_SHIFT (1 << 16)
+#define OMAP4430_ERRORGAIN_MASK BITFIELD(16, 23)
+
+/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
+#define OMAP4430_ERROROFFSET_SHIFT (1 << 24)
+#define OMAP4430_ERROROFFSET_MASK BITFIELD(24, 31)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_EXTERNAL_WARM_RST_SHIFT (1 << 5)
+#define OMAP4430_EXTERNAL_WARM_RST_MASK BITFIELD(5, 5)
+
+/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
+#define OMAP4430_FORCEUPDATE_SHIFT (1 << 1)
+#define OMAP4430_FORCEUPDATE_MASK BITFIELD(1, 1)
+
+/* Used by PRM_VP_CORE_VOLTAGE, PRM_VP_IVA_VOLTAGE, PRM_VP_MPU_VOLTAGE */
+#define OMAP4430_FORCEUPDATEWAIT_SHIFT (1 << 8)
+#define OMAP4430_FORCEUPDATEWAIT_MASK BITFIELD(8, 31)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_TESLA */
+#define OMAP4430_FORCEWKUP_EN_SHIFT (1 << 10)
+#define OMAP4430_FORCEWKUP_EN_MASK BITFIELD(10, 10)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_TESLA */
+#define OMAP4430_FORCEWKUP_ST_SHIFT (1 << 10)
+#define OMAP4430_FORCEWKUP_ST_MASK BITFIELD(10, 10)
+
+/* Used by PM_GFX_PWRSTCTRL */
+#define OMAP4430_GFX_MEM_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_GFX_MEM_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_GFX_PWRSTST */
+#define OMAP4430_GFX_MEM_STATEST_SHIFT (1 << 4)
+#define OMAP4430_GFX_MEM_STATEST_MASK BITFIELD(4, 5)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_GLOBAL_COLD_RST_SHIFT (1 << 0)
+#define OMAP4430_GLOBAL_COLD_RST_MASK BITFIELD(0, 0)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT (1 << 1)
+#define OMAP4430_GLOBAL_WARM_SW_RST_MASK BITFIELD(1, 1)
+
+/* Used by PRM_IO_PMCTRL */
+#define OMAP4430_GLOBAL_WUEN_SHIFT (1 << 16)
+#define OMAP4430_GLOBAL_WUEN_MASK BITFIELD(16, 16)
+
+/* Used by PRM_VC_CFG_I2C_MODE */
+#define OMAP4430_HSMCODE_SHIFT (1 << 0)
+#define OMAP4430_HSMCODE_MASK BITFIELD(0, 2)
+
+/* Used by PRM_VC_CFG_I2C_MODE */
+#define OMAP4430_HSMODEEN_SHIFT (1 << 3)
+#define OMAP4430_HSMODEEN_MASK BITFIELD(3, 3)
+
+/* Used by PRM_VC_CFG_I2C_CLK */
+#define OMAP4430_HSSCLH_SHIFT (1 << 16)
+#define OMAP4430_HSSCLH_MASK BITFIELD(16, 23)
+
+/* Used by PRM_VC_CFG_I2C_CLK */
+#define OMAP4430_HSSCLL_SHIFT (1 << 24)
+#define OMAP4430_HSSCLL_MASK BITFIELD(24, 31)
+
+/* Used by PM_IVAHD_PWRSTCTRL */
+#define OMAP4430_HWA_MEM_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_HWA_MEM_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_IVAHD_PWRSTCTRL */
+#define OMAP4430_HWA_MEM_RETSTATE_SHIFT (1 << 8)
+#define OMAP4430_HWA_MEM_RETSTATE_MASK BITFIELD(8, 8)
+
+/* Used by PM_IVAHD_PWRSTST */
+#define OMAP4430_HWA_MEM_STATEST_SHIFT (1 << 4)
+#define OMAP4430_HWA_MEM_STATEST_MASK BITFIELD(4, 5)
+
+/* Used by RM_MPU_RSTST */
+#define OMAP4430_ICECRUSHER_MPU_RST_SHIFT (1 << 1)
+#define OMAP4430_ICECRUSHER_MPU_RST_MASK BITFIELD(1, 1)
+
+/* Used by RM_DUCATI_RSTST */
+#define OMAP4430_ICECRUSHER_RST1ST_SHIFT (1 << 5)
+#define OMAP4430_ICECRUSHER_RST1ST_MASK BITFIELD(5, 5)
+
+/* Used by RM_DUCATI_RSTST */
+#define OMAP4430_ICECRUSHER_RST2ST_SHIFT (1 << 6)
+#define OMAP4430_ICECRUSHER_RST2ST_MASK BITFIELD(6, 6)
+
+/* Used by RM_IVAHD_RSTST */
+#define OMAP4430_ICECRUSHER_SEQ1_RST1ST_SHIFT (1 << 5)
+#define OMAP4430_ICECRUSHER_SEQ1_RST1ST_MASK BITFIELD(5, 5)
+
+/* Used by RM_IVAHD_RSTST */
+#define OMAP4430_ICECRUSHER_SEQ2_RST2ST_SHIFT (1 << 6)
+#define OMAP4430_ICECRUSHER_SEQ2_RST2ST_MASK BITFIELD(6, 6)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_ICEPICK_RST_SHIFT (1 << 9)
+#define OMAP4430_ICEPICK_RST_MASK BITFIELD(9, 9)
+
+/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
+#define OMAP4430_INITVDD_SHIFT (1 << 2)
+#define OMAP4430_INITVDD_MASK BITFIELD(2, 2)
+
+/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
+#define OMAP4430_INITVOLTAGE_SHIFT (1 << 8)
+#define OMAP4430_INITVOLTAGE_MASK BITFIELD(8, 15)
+
+/*
+ * Used by PM_EMU_PWRSTST, PM_CORE_PWRSTST, PM_CAM_PWRSTST, PM_L3INIT_PWRSTST,
+ * PM_ABE_PWRSTST, PM_GFX_PWRSTST, PM_MPU_PWRSTST, PM_CEFUSE_PWRSTST,
+ * PM_DSS_PWRSTST, PM_L4PER_PWRSTST, PM_TESLA_PWRSTST, PM_IVAHD_PWRSTST
+ */
+#define OMAP4430_INTRANSITION_SHIFT (1 << 20)
+#define OMAP4430_INTRANSITION_MASK BITFIELD(20, 20)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_IO_EN_SHIFT (1 << 9)
+#define OMAP4430_IO_EN_MASK BITFIELD(9, 9)
+
+/* Used by PRM_IO_PMCTRL */
+#define OMAP4430_IO_ON_STATUS_SHIFT (1 << 5)
+#define OMAP4430_IO_ON_STATUS_MASK BITFIELD(5, 5)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_IO_ST_SHIFT (1 << 9)
+#define OMAP4430_IO_ST_MASK BITFIELD(9, 9)
+
+/* Used by PRM_IO_PMCTRL */
+#define OMAP4430_ISOCLK_OVERRIDE_SHIFT (1 << 0)
+#define OMAP4430_ISOCLK_OVERRIDE_MASK BITFIELD(0, 0)
+
+/* Used by PRM_IO_PMCTRL */
+#define OMAP4430_ISOCLK_STATUS_SHIFT (1 << 1)
+#define OMAP4430_ISOCLK_STATUS_MASK BITFIELD(1, 1)
+
+/* Used by PRM_IO_PMCTRL */
+#define OMAP4430_ISOOVR_EXTEND_SHIFT (1 << 4)
+#define OMAP4430_ISOOVR_EXTEND_MASK BITFIELD(4, 4)
+
+/* Used by PRM_IO_COUNT */
+#define OMAP4430_ISO_2_ON_TIME_SHIFT (1 << 0)
+#define OMAP4430_ISO_2_ON_TIME_MASK BITFIELD(0, 7)
+
+/* Used by PM_L3INIT_PWRSTCTRL */
+#define OMAP4430_L3INIT_BANK1_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_L3INIT_BANK1_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_L3INIT_PWRSTCTRL */
+#define OMAP4430_L3INIT_BANK1_RETSTATE_SHIFT (1 << 8)
+#define OMAP4430_L3INIT_BANK1_RETSTATE_MASK BITFIELD(8, 8)
+
+/* Used by PM_L3INIT_PWRSTST */
+#define OMAP4430_L3INIT_BANK1_STATEST_SHIFT (1 << 4)
+#define OMAP4430_L3INIT_BANK1_STATEST_MASK BITFIELD(4, 5)
+
+/*
+ * Used by PM_CORE_PWRSTCTRL, PM_L3INIT_PWRSTCTRL, PM_ABE_PWRSTCTRL,
+ * PM_MPU_PWRSTCTRL, PM_DSS_PWRSTCTRL, PM_L4PER_PWRSTCTRL, PM_TESLA_PWRSTCTRL,
+ * PM_IVAHD_PWRSTCTRL
+ */
+#define OMAP4430_LOGICRETSTATE_SHIFT (1 << 2)
+#define OMAP4430_LOGICRETSTATE_MASK BITFIELD(2, 2)
+
+/*
+ * Used by PM_EMU_PWRSTST, PM_CORE_PWRSTST, PM_CAM_PWRSTST, PM_L3INIT_PWRSTST,
+ * PM_ABE_PWRSTST, PM_GFX_PWRSTST, PM_MPU_PWRSTST, PM_CEFUSE_PWRSTST,
+ * PM_DSS_PWRSTST, PM_L4PER_PWRSTST, PM_TESLA_PWRSTST, PM_IVAHD_PWRSTST
+ */
+#define OMAP4430_LOGICSTATEST_SHIFT (1 << 2)
+#define OMAP4430_LOGICSTATEST_MASK BITFIELD(2, 2)
+
+/*
+ * Used by RM_WKUP_GPIO1_CONTEXT, RM_WKUP_KEYBOARD_CONTEXT,
+ * RM_WKUP_L4WKUP_CONTEXT, RM_WKUP_RTC_CONTEXT, RM_WKUP_SARRAM_CONTEXT,
+ * RM_WKUP_SYNCTIMER_CONTEXT, RM_WKUP_TIMER12_CONTEXT, RM_WKUP_TIMER1_CONTEXT,
+ * RM_WKUP_USIM_CONTEXT, RM_WKUP_WDT1_CONTEXT, RM_WKUP_WDT2_CONTEXT,
+ * RM_EMU_DEBUGSS_CONTEXT, RM_D2D_SAD2D_CONTEXT, RM_D2D_SAD2D_FW_CONTEXT,
+ * RM_DUCATI_DUCATI_CONTEXT, RM_L3INSTR_L3_3_CONTEXT,
+ * RM_L3INSTR_L3_INSTR_CONTEXT, RM_L3INSTR_OCP_WP1_CONTEXT,
+ * RM_L3_1_L3_1_CONTEXT, RM_L3_2_L3_2_CONTEXT, RM_L3_2_OCMC_RAM_CONTEXT,
+ * RM_L4CFG_L4_CFG_CONTEXT, RM_L4CFG_SAR_ROM_CONTEXT, RM_MEMIF_DLL_CONTEXT,
+ * RM_MEMIF_DLL_H_CONTEXT, RM_MEMIF_DMM_CONTEXT, RM_MEMIF_EMIF_FW_CONTEXT,
+ * RM_CAM_FDIF_CONTEXT, RM_CAM_ISS_CONTEXT, RM_L3INIT_CCPTX_CONTEXT,
+ * RM_L3INIT_EMAC_CONTEXT, RM_L3INIT_P1500_CONTEXT, RM_L3INIT_PCIESS_CONTEXT,
+ * RM_L3INIT_SATA_CONTEXT, RM_L3INIT_TPPSS_CONTEXT, RM_L3INIT_UNIPRO1_CONTEXT,
+ * RM_L3INIT_USBPHYOCP2SCP_CONTEXT, RM_L3INIT_XHPI_CONTEXT,
+ * RM_ABE_AESS_CONTEXT, RM_ABE_DMIC_CONTEXT, RM_ABE_MCASP_CONTEXT,
+ * RM_ABE_MCBSP1_CONTEXT, RM_ABE_MCBSP2_CONTEXT, RM_ABE_MCBSP3_CONTEXT,
+ * RM_ABE_PDM_CONTEXT, RM_ABE_SLIMBUS_CONTEXT, RM_ABE_TIMER5_CONTEXT,
+ * RM_ABE_TIMER6_CONTEXT, RM_ABE_TIMER7_CONTEXT, RM_ABE_TIMER8_CONTEXT,
+ * RM_ABE_WDT3_CONTEXT, RM_GFX_GFX_CONTEXT, RM_MPU_MPU_CONTEXT,
+ * RM_CEFUSE_CEFUSE_CONTEXT, RM_ALWON_MDMINTC_CONTEXT,
+ * RM_ALWON_SR_CORE_CONTEXT, RM_ALWON_SR_IVA_CONTEXT, RM_ALWON_SR_MPU_CONTEXT,
+ * RM_DSS_DEISS_CONTEXT, RM_DSS_DSS_CONTEXT, RM_L4PER_ADC_CONTEXT,
+ * RM_L4PER_DMTIMER10_CONTEXT, RM_L4PER_DMTIMER11_CONTEXT,
+ * RM_L4PER_DMTIMER2_CONTEXT, RM_L4PER_DMTIMER3_CONTEXT,
+ * RM_L4PER_DMTIMER4_CONTEXT, RM_L4PER_DMTIMER9_CONTEXT, RM_L4PER_ELM_CONTEXT,
+ * RM_L4PER_HDQ1W_CONTEXT, RM_L4PER_HECC1_CONTEXT, RM_L4PER_HECC2_CONTEXT,
+ * RM_L4PER_I2C2_CONTEXT, RM_L4PER_I2C3_CONTEXT, RM_L4PER_I2C4_CONTEXT,
+ * RM_L4PER_I2C5_CONTEXT, RM_L4PER_L4_PER_CONTEXT, RM_L4PER_MCASP2_CONTEXT,
+ * RM_L4PER_MCASP3_CONTEXT, RM_L4PER_MCBSP4_CONTEXT, RM_L4PER_MCSPI1_CONTEXT,
+ * RM_L4PER_MCSPI2_CONTEXT, RM_L4PER_MCSPI3_CONTEXT, RM_L4PER_MCSPI4_CONTEXT,
+ * RM_L4PER_MGATE_CONTEXT, RM_L4PER_MMCSD3_CONTEXT, RM_L4PER_MMCSD4_CONTEXT,
+ * RM_L4PER_MMCSD5_CONTEXT, RM_L4PER_MSPROHG_CONTEXT,
+ * RM_L4PER_SLIMBUS2_CONTEXT, RM_L4SEC_PKAEIP29_CONTEXT,
+ * RM_TESLA_TESLA_CONTEXT, RM_IVAHD_IVAHD_CONTEXT, RM_IVAHD_SL2_CONTEXT
+ */
+#define OMAP4430_LOSTCONTEXT_DFF_SHIFT (1 << 0)
+#define OMAP4430_LOSTCONTEXT_DFF_MASK BITFIELD(0, 0)
+
+/*
+ * Used by RM_D2D_MODEM_ICR_CONTEXT, RM_D2D_SAD2D_CONTEXT,
+ * RM_D2D_SAD2D_FW_CONTEXT, RM_DUCATI_DUCATI_CONTEXT, RM_L3INSTR_L3_3_CONTEXT,
+ * RM_L3INSTR_OCP_WP1_CONTEXT, RM_L3_1_L3_1_CONTEXT, RM_L3_2_GPMC_CONTEXT,
+ * RM_L3_2_L3_2_CONTEXT, RM_L4CFG_HW_SEM_CONTEXT, RM_L4CFG_L4_CFG_CONTEXT,
+ * RM_L4CFG_MAILBOX_CONTEXT, RM_MEMIF_DMM_CONTEXT, RM_MEMIF_EMIF_1_CONTEXT,
+ * RM_MEMIF_EMIF_2_CONTEXT, RM_MEMIF_EMIF_FW_CONTEXT, RM_MEMIF_EMIF_H1_CONTEXT,
+ * RM_MEMIF_EMIF_H2_CONTEXT, RM_SDMA_SDMA_CONTEXT, RM_L3INIT_HSI_CONTEXT,
+ * RM_L3INIT_MMC1_CONTEXT, RM_L3INIT_MMC2_CONTEXT, RM_L3INIT_MMC6_CONTEXT,
+ * RM_L3INIT_USB_HOST_CONTEXT, RM_L3INIT_USB_HOST_FS_CONTEXT,
+ * RM_L3INIT_USB_OTG_CONTEXT, RM_L3INIT_USB_TLL_CONTEXT, RM_DSS_DSS_CONTEXT,
+ * RM_L4PER_GPIO2_CONTEXT, RM_L4PER_GPIO3_CONTEXT, RM_L4PER_GPIO4_CONTEXT,
+ * RM_L4PER_GPIO5_CONTEXT, RM_L4PER_GPIO6_CONTEXT, RM_L4PER_I2C1_CONTEXT,
+ * RM_L4PER_L4_PER_CONTEXT, RM_L4PER_UART1_CONTEXT, RM_L4PER_UART2_CONTEXT,
+ * RM_L4PER_UART3_CONTEXT, RM_L4PER_UART4_CONTEXT, RM_L4SEC_AES1_CONTEXT,
+ * RM_L4SEC_AES2_CONTEXT, RM_L4SEC_CRYPTODMA_CONTEXT, RM_L4SEC_DES3DES_CONTEXT,
+ * RM_L4SEC_RNG_CONTEXT, RM_L4SEC_SHA2MD51_CONTEXT, RM_TESLA_TESLA_CONTEXT
+ */
+#define OMAP4430_LOSTCONTEXT_RFF_SHIFT (1 << 1)
+#define OMAP4430_LOSTCONTEXT_RFF_MASK BITFIELD(1, 1)
+
+/* Used by RM_ABE_AESS_CONTEXT */
+#define OMAP4430_LOSTMEM_AESSMEM_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_AESSMEM_MASK BITFIELD(8, 8)
+
+/* Used by RM_CAM_FDIF_CONTEXT, RM_CAM_ISS_CONTEXT */
+#define OMAP4430_LOSTMEM_CAM_MEM_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_CAM_MEM_MASK BITFIELD(8, 8)
+
+/* Used by RM_L3INSTR_OCP_WP1_CONTEXT */
+#define OMAP4430_LOSTMEM_CORE_NRET_BANK_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_CORE_NRET_BANK_MASK BITFIELD(8, 8)
+
+/* Renamed from LOSTMEM_CORE_NRET_BANK Used by RM_MEMIF_DMM_CONTEXT */
+#define OMAP4430_LOSTMEM_CORE_NRET_BANK_9_9_SHIFT (1 << 9)
+#define OMAP4430_LOSTMEM_CORE_NRET_BANK_9_9_MASK BITFIELD(9, 9)
+
+/* Used by RM_L3_2_OCMC_RAM_CONTEXT */
+#define OMAP4430_LOSTMEM_CORE_OCMRAM_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_CORE_OCMRAM_MASK BITFIELD(8, 8)
+
+/*
+ * Used by RM_D2D_MODEM_ICR_CONTEXT, RM_MEMIF_DMM_CONTEXT,
+ * RM_SDMA_SDMA_CONTEXT
+ */
+#define OMAP4430_LOSTMEM_CORE_OTHER_BANK_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_CORE_OTHER_BANK_MASK BITFIELD(8, 8)
+
+/* Used by RM_DSS_DEISS_CONTEXT, RM_DSS_DSS_CONTEXT */
+#define OMAP4430_LOSTMEM_DSS_MEM_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_DSS_MEM_MASK BITFIELD(8, 8)
+
+/* Used by RM_DUCATI_DUCATI_CONTEXT */
+#define OMAP4430_LOSTMEM_DUCATI_L2RAM_SHIFT (1 << 9)
+#define OMAP4430_LOSTMEM_DUCATI_L2RAM_MASK BITFIELD(9, 9)
+
+/* Used by RM_DUCATI_DUCATI_CONTEXT */
+#define OMAP4430_LOSTMEM_DUCATI_UNICACHE_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_DUCATI_UNICACHE_MASK BITFIELD(8, 8)
+
+/* Used by RM_EMU_DEBUGSS_CONTEXT */
+#define OMAP4430_LOSTMEM_EMU_BANK_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_EMU_BANK_MASK BITFIELD(8, 8)
+
+/* Used by RM_GFX_GFX_CONTEXT */
+#define OMAP4430_LOSTMEM_GFX_MEM_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_GFX_MEM_MASK BITFIELD(8, 8)
+
+/* Used by RM_IVAHD_IVAHD_CONTEXT */
+#define OMAP4430_LOSTMEM_HWA_MEM_SHIFT (1 << 10)
+#define OMAP4430_LOSTMEM_HWA_MEM_MASK BITFIELD(10, 10)
+
+/*
+ * Used by RM_L3INIT_CCPTX_CONTEXT, RM_L3INIT_EMAC_CONTEXT,
+ * RM_L3INIT_HSI_CONTEXT, RM_L3INIT_MMC1_CONTEXT, RM_L3INIT_MMC2_CONTEXT,
+ * RM_L3INIT_MMC6_CONTEXT, RM_L3INIT_PCIESS_CONTEXT, RM_L3INIT_SATA_CONTEXT,
+ * RM_L3INIT_TPPSS_CONTEXT, RM_L3INIT_UNIPRO1_CONTEXT,
+ * RM_L3INIT_USB_OTG_CONTEXT, RM_L3INIT_XHPI_CONTEXT
+ */
+#define OMAP4430_LOSTMEM_L3INIT_BANK1_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_L3INIT_BANK1_MASK BITFIELD(8, 8)
+
+/* Used by RM_MPU_MPU_CONTEXT */
+#define OMAP4430_LOSTMEM_MPU_L1_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_MPU_L1_MASK BITFIELD(8, 8)
+
+/* Used by RM_MPU_MPU_CONTEXT */
+#define OMAP4430_LOSTMEM_MPU_L2_SHIFT (1 << 9)
+#define OMAP4430_LOSTMEM_MPU_L2_MASK BITFIELD(9, 9)
+
+/* Used by RM_MPU_MPU_CONTEXT */
+#define OMAP4430_LOSTMEM_MPU_RAM_SHIFT (1 << 10)
+#define OMAP4430_LOSTMEM_MPU_RAM_MASK BITFIELD(10, 10)
+
+/*
+ * Used by RM_L4PER_HECC1_CONTEXT, RM_L4PER_HECC2_CONTEXT,
+ * RM_L4PER_MCBSP4_CONTEXT, RM_L4PER_MMCSD3_CONTEXT, RM_L4PER_MMCSD4_CONTEXT,
+ * RM_L4PER_MMCSD5_CONTEXT, RM_L4PER_SLIMBUS2_CONTEXT, RM_L4SEC_PKAEIP29_CONTEXT
+ */
+#define OMAP4430_LOSTMEM_NONRETAINED_BANK_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_NONRETAINED_BANK_MASK BITFIELD(8, 8)
+
+/*
+ * Used by RM_ABE_DMIC_CONTEXT, RM_ABE_MCBSP1_CONTEXT, RM_ABE_MCBSP2_CONTEXT,
+ * RM_ABE_MCBSP3_CONTEXT, RM_ABE_PDM_CONTEXT, RM_ABE_SLIMBUS_CONTEXT
+ */
+#define OMAP4430_LOSTMEM_PERIHPMEM_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_PERIHPMEM_MASK BITFIELD(8, 8)
+
+/*
+ * Used by RM_L4PER_MSPROHG_CONTEXT, RM_L4PER_UART1_CONTEXT,
+ * RM_L4PER_UART2_CONTEXT, RM_L4PER_UART3_CONTEXT, RM_L4PER_UART4_CONTEXT,
+ * RM_L4SEC_CRYPTODMA_CONTEXT
+ */
+#define OMAP4430_LOSTMEM_RETAINED_BANK_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_RETAINED_BANK_MASK BITFIELD(8, 8)
+
+/* Used by RM_IVAHD_SL2_CONTEXT */
+#define OMAP4430_LOSTMEM_SL2_MEM_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_SL2_MEM_MASK BITFIELD(8, 8)
+
+/* Used by RM_IVAHD_IVAHD_CONTEXT */
+#define OMAP4430_LOSTMEM_TCM1_MEM_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_TCM1_MEM_MASK BITFIELD(8, 8)
+
+/* Used by RM_IVAHD_IVAHD_CONTEXT */
+#define OMAP4430_LOSTMEM_TCM2_MEM_SHIFT (1 << 9)
+#define OMAP4430_LOSTMEM_TCM2_MEM_MASK BITFIELD(9, 9)
+
+/* Used by RM_TESLA_TESLA_CONTEXT */
+#define OMAP4430_LOSTMEM_TESLA_EDMA_SHIFT (1 << 10)
+#define OMAP4430_LOSTMEM_TESLA_EDMA_MASK BITFIELD(10, 10)
+
+/* Used by RM_TESLA_TESLA_CONTEXT */
+#define OMAP4430_LOSTMEM_TESLA_L1_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_TESLA_L1_MASK BITFIELD(8, 8)
+
+/* Used by RM_TESLA_TESLA_CONTEXT */
+#define OMAP4430_LOSTMEM_TESLA_L2_SHIFT (1 << 9)
+#define OMAP4430_LOSTMEM_TESLA_L2_MASK BITFIELD(9, 9)
+
+/* Used by RM_WKUP_SARRAM_CONTEXT */
+#define OMAP4430_LOSTMEM_WKUP_BANK_SHIFT (1 << 8)
+#define OMAP4430_LOSTMEM_WKUP_BANK_MASK BITFIELD(8, 8)
+
+/*
+ * Used by PM_CORE_PWRSTCTRL, PM_CAM_PWRSTCTRL, PM_L3INIT_PWRSTCTRL,
+ * PM_ABE_PWRSTCTRL, PM_GFX_PWRSTCTRL, PM_MPU_PWRSTCTRL, PM_CEFUSE_PWRSTCTRL,
+ * PM_DSS_PWRSTCTRL, PM_L4PER_PWRSTCTRL, PM_TESLA_PWRSTCTRL, PM_IVAHD_PWRSTCTRL
+ */
+#define OMAP4430_LOWPOWERSTATECHANGE_SHIFT (1 << 4)
+#define OMAP4430_LOWPOWERSTATECHANGE_MASK BITFIELD(4, 4)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_MEMORYCHANGE_SHIFT (1 << 3)
+#define OMAP4430_MEMORYCHANGE_MASK BITFIELD(3, 3)
+
+/* Used by PRM_MODEM_IF_CTRL */
+#define OMAP4430_MODEM_READY_SHIFT (1 << 1)
+#define OMAP4430_MODEM_READY_MASK BITFIELD(1, 1)
+
+/* Used by PRM_MODEM_IF_CTRL */
+#define OMAP4430_MODEM_SHUTDOWN_IRQ_SHIFT (1 << 9)
+#define OMAP4430_MODEM_SHUTDOWN_IRQ_MASK BITFIELD(9, 9)
+
+/* Used by PRM_MODEM_IF_CTRL */
+#define OMAP4430_MODEM_SLEEP_ST_SHIFT (1 << 16)
+#define OMAP4430_MODEM_SLEEP_ST_MASK BITFIELD(16, 16)
+
+/* Used by PRM_MODEM_IF_CTRL */
+#define OMAP4430_MODEM_WAKE_IRQ_SHIFT (1 << 8)
+#define OMAP4430_MODEM_WAKE_IRQ_MASK BITFIELD(8, 8)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define OMAP4430_MPU_L1_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_MPU_L1_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define OMAP4430_MPU_L1_RETSTATE_SHIFT (1 << 8)
+#define OMAP4430_MPU_L1_RETSTATE_MASK BITFIELD(8, 8)
+
+/* Used by PM_MPU_PWRSTST */
+#define OMAP4430_MPU_L1_STATEST_SHIFT (1 << 4)
+#define OMAP4430_MPU_L1_STATEST_MASK BITFIELD(4, 5)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define OMAP4430_MPU_L2_ONSTATE_SHIFT (1 << 18)
+#define OMAP4430_MPU_L2_ONSTATE_MASK BITFIELD(18, 19)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define OMAP4430_MPU_L2_RETSTATE_SHIFT (1 << 9)
+#define OMAP4430_MPU_L2_RETSTATE_MASK BITFIELD(9, 9)
+
+/* Used by PM_MPU_PWRSTST */
+#define OMAP4430_MPU_L2_STATEST_SHIFT (1 << 6)
+#define OMAP4430_MPU_L2_STATEST_MASK BITFIELD(6, 7)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define OMAP4430_MPU_RAM_ONSTATE_SHIFT (1 << 20)
+#define OMAP4430_MPU_RAM_ONSTATE_MASK BITFIELD(20, 21)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define OMAP4430_MPU_RAM_RETSTATE_SHIFT (1 << 10)
+#define OMAP4430_MPU_RAM_RETSTATE_MASK BITFIELD(10, 10)
+
+/* Used by PM_MPU_PWRSTST */
+#define OMAP4430_MPU_RAM_STATEST_SHIFT (1 << 8)
+#define OMAP4430_MPU_RAM_STATEST_MASK BITFIELD(8, 9)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_MPU_SECURITY_VIOL_RST_SHIFT (1 << 2)
+#define OMAP4430_MPU_SECURITY_VIOL_RST_MASK BITFIELD(2, 2)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_MPU_WDT_RST_SHIFT (1 << 3)
+#define OMAP4430_MPU_WDT_RST_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_PWRSTCTRL */
+#define OMAP4430_NONRETAINED_BANK_ONSTATE_SHIFT (1 << 18)
+#define OMAP4430_NONRETAINED_BANK_ONSTATE_MASK BITFIELD(18, 19)
+
+/* Used by PM_L4PER_PWRSTCTRL */
+#define OMAP4430_NONRETAINED_BANK_RETSTATE_SHIFT (1 << 9)
+#define OMAP4430_NONRETAINED_BANK_RETSTATE_MASK BITFIELD(9, 9)
+
+/* Used by PM_L4PER_PWRSTST */
+#define OMAP4430_NONRETAINED_BANK_STATEST_SHIFT (1 << 6)
+#define OMAP4430_NONRETAINED_BANK_STATEST_MASK BITFIELD(6, 7)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_OCP_NRET_BANK_ONSTATE_SHIFT (1 << 24)
+#define OMAP4430_OCP_NRET_BANK_ONSTATE_MASK BITFIELD(24, 25)
+
+/* Used by PM_CORE_PWRSTCTRL */
+#define OMAP4430_OCP_NRET_BANK_RETSTATE_SHIFT (1 << 12)
+#define OMAP4430_OCP_NRET_BANK_RETSTATE_MASK BITFIELD(12, 12)
+
+/* Used by PM_CORE_PWRSTST */
+#define OMAP4430_OCP_NRET_BANK_STATEST_SHIFT (1 << 12)
+#define OMAP4430_OCP_NRET_BANK_STATEST_MASK BITFIELD(12, 13)
+
+/*
+ * Used by PRM_VC_VAL_CMD_VDD_CORE_L, PRM_VC_VAL_CMD_VDD_IVA_L,
+ * PRM_VC_VAL_CMD_VDD_MPU_L
+ */
+#define OMAP4430_OFF_SHIFT (1 << 0)
+#define OMAP4430_OFF_MASK BITFIELD(0, 7)
+
+/* Used by PRM_LDO_BANDGAP_CTRL */
+#define OMAP4430_OFF_ENABLE_SHIFT (1 << 0)
+#define OMAP4430_OFF_ENABLE_MASK BITFIELD(0, 0)
+
+/*
+ * Used by PRM_VC_VAL_CMD_VDD_CORE_L, PRM_VC_VAL_CMD_VDD_IVA_L,
+ * PRM_VC_VAL_CMD_VDD_MPU_L
+ */
+#define OMAP4430_ON_SHIFT (1 << 24)
+#define OMAP4430_ON_MASK BITFIELD(24, 31)
+
+/*
+ * Used by PRM_VC_VAL_CMD_VDD_CORE_L, PRM_VC_VAL_CMD_VDD_IVA_L,
+ * PRM_VC_VAL_CMD_VDD_MPU_L
+ */
+#define OMAP4430_ONLP_SHIFT (1 << 16)
+#define OMAP4430_ONLP_MASK BITFIELD(16, 23)
+
+/* Used by PRM_LDO_ABB_IVA_CTRL, PRM_LDO_ABB_MPU_CTRL */
+#define OMAP4430_OPP_CHANGE_SHIFT (1 << 2)
+#define OMAP4430_OPP_CHANGE_MASK BITFIELD(2, 2)
+
+/* Used by PRM_LDO_ABB_IVA_CTRL, PRM_LDO_ABB_MPU_CTRL */
+#define OMAP4430_OPP_SEL_SHIFT (1 << 0)
+#define OMAP4430_OPP_SEL_MASK BITFIELD(0, 1)
+
+/* Used by PRM_SRAM_COUNT */
+#define OMAP4430_PCHARGECNT_VALUE_SHIFT (1 << 0)
+#define OMAP4430_PCHARGECNT_VALUE_MASK BITFIELD(0, 5)
+
+/* Used by PRM_PSCON_COUNT */
+#define OMAP4430_PCHARGE_TIME_SHIFT (1 << 0)
+#define OMAP4430_PCHARGE_TIME_MASK BITFIELD(0, 7)
+
+/* Used by PM_ABE_PWRSTCTRL */
+#define OMAP4430_PERIPHMEM_ONSTATE_SHIFT (1 << 20)
+#define OMAP4430_PERIPHMEM_ONSTATE_MASK BITFIELD(20, 21)
+
+/* Used by PM_ABE_PWRSTCTRL */
+#define OMAP4430_PERIPHMEM_RETSTATE_SHIFT (1 << 10)
+#define OMAP4430_PERIPHMEM_RETSTATE_MASK BITFIELD(10, 10)
+
+/* Used by PM_ABE_PWRSTST */
+#define OMAP4430_PERIPHMEM_STATEST_SHIFT (1 << 8)
+#define OMAP4430_PERIPHMEM_STATEST_MASK BITFIELD(8, 9)
+
+/* Used by PRM_PHASE1_CNDP */
+#define OMAP4430_PHASE1_CNDP_SHIFT (1 << 0)
+#define OMAP4430_PHASE1_CNDP_MASK BITFIELD(0, 31)
+
+/* Used by PRM_PHASE2A_CNDP */
+#define OMAP4430_PHASE2A_CNDP_SHIFT (1 << 0)
+#define OMAP4430_PHASE2A_CNDP_MASK BITFIELD(0, 31)
+
+/* Used by PRM_PHASE2B_CNDP */
+#define OMAP4430_PHASE2B_CNDP_SHIFT (1 << 0)
+#define OMAP4430_PHASE2B_CNDP_MASK BITFIELD(0, 31)
+
+/* Used by PRM_PSCON_COUNT */
+#define OMAP4430_PONOUT_2_PGOODIN_TIME_SHIFT (1 << 8)
+#define OMAP4430_PONOUT_2_PGOODIN_TIME_MASK BITFIELD(8, 15)
+
+/*
+ * Used by PM_EMU_PWRSTCTRL, PM_CORE_PWRSTCTRL, PM_CAM_PWRSTCTRL,
+ * PM_L3INIT_PWRSTCTRL, PM_ABE_PWRSTCTRL, PM_GFX_PWRSTCTRL, PM_MPU_PWRSTCTRL,
+ * PM_CEFUSE_PWRSTCTRL, PM_DSS_PWRSTCTRL, PM_L4PER_PWRSTCTRL,
+ * PM_TESLA_PWRSTCTRL, PM_IVAHD_PWRSTCTRL
+ */
+#define OMAP4430_POWERSTATE_SHIFT (1 << 0)
+#define OMAP4430_POWERSTATE_MASK BITFIELD(0, 1)
+
+/*
+ * Used by PM_EMU_PWRSTST, PM_CORE_PWRSTST, PM_CAM_PWRSTST, PM_L3INIT_PWRSTST,
+ * PM_ABE_PWRSTST, PM_GFX_PWRSTST, PM_MPU_PWRSTST, PM_CEFUSE_PWRSTST,
+ * PM_DSS_PWRSTST, PM_L4PER_PWRSTST, PM_TESLA_PWRSTST, PM_IVAHD_PWRSTST
+ */
+#define OMAP4430_POWERSTATEST_SHIFT (1 << 0)
+#define OMAP4430_POWERSTATEST_MASK BITFIELD(0, 1)
+
+/* Used by PRM_PWRREQCTRL */
+#define OMAP4430_PWRREQ_COND_SHIFT (1 << 0)
+#define OMAP4430_PWRREQ_COND_MASK BITFIELD(0, 1)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RACEN_VDD_CORE_L_SHIFT (1 << 3)
+#define OMAP4430_RACEN_VDD_CORE_L_MASK BITFIELD(3, 3)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RACEN_VDD_IVA_L_SHIFT (1 << 11)
+#define OMAP4430_RACEN_VDD_IVA_L_MASK BITFIELD(11, 11)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RACEN_VDD_MPU_L_SHIFT (1 << 20)
+#define OMAP4430_RACEN_VDD_MPU_L_MASK BITFIELD(20, 20)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RAC_VDD_CORE_L_SHIFT (1 << 2)
+#define OMAP4430_RAC_VDD_CORE_L_MASK BITFIELD(2, 2)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RAC_VDD_IVA_L_SHIFT (1 << 10)
+#define OMAP4430_RAC_VDD_IVA_L_MASK BITFIELD(10, 10)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RAC_VDD_MPU_L_SHIFT (1 << 19)
+#define OMAP4430_RAC_VDD_MPU_L_MASK BITFIELD(19, 19)
+
+/*
+ * Used by PRM_VOLTSETUP_CORE_OFF, PRM_VOLTSETUP_CORE_RET_SLEEP,
+ * PRM_VOLTSETUP_IVA_OFF, PRM_VOLTSETUP_IVA_RET_SLEEP, PRM_VOLTSETUP_MPU_OFF,
+ * PRM_VOLTSETUP_MPU_RET_SLEEP
+ */
+#define OMAP4430_RAMP_DOWN_COUNT_SHIFT (1 << 16)
+#define OMAP4430_RAMP_DOWN_COUNT_MASK BITFIELD(16, 21)
+
+/*
+ * Used by PRM_VOLTSETUP_CORE_OFF, PRM_VOLTSETUP_CORE_RET_SLEEP,
+ * PRM_VOLTSETUP_IVA_OFF, PRM_VOLTSETUP_IVA_RET_SLEEP, PRM_VOLTSETUP_MPU_OFF,
+ * PRM_VOLTSETUP_MPU_RET_SLEEP
+ */
+#define OMAP4430_RAMP_DOWN_PRESCAL_SHIFT (1 << 24)
+#define OMAP4430_RAMP_DOWN_PRESCAL_MASK BITFIELD(24, 25)
+
+/*
+ * Used by PRM_VOLTSETUP_CORE_OFF, PRM_VOLTSETUP_CORE_RET_SLEEP,
+ * PRM_VOLTSETUP_IVA_OFF, PRM_VOLTSETUP_IVA_RET_SLEEP, PRM_VOLTSETUP_MPU_OFF,
+ * PRM_VOLTSETUP_MPU_RET_SLEEP
+ */
+#define OMAP4430_RAMP_UP_COUNT_SHIFT (1 << 0)
+#define OMAP4430_RAMP_UP_COUNT_MASK BITFIELD(0, 5)
+
+/*
+ * Used by PRM_VOLTSETUP_CORE_OFF, PRM_VOLTSETUP_CORE_RET_SLEEP,
+ * PRM_VOLTSETUP_IVA_OFF, PRM_VOLTSETUP_IVA_RET_SLEEP, PRM_VOLTSETUP_MPU_OFF,
+ * PRM_VOLTSETUP_MPU_RET_SLEEP
+ */
+#define OMAP4430_RAMP_UP_PRESCAL_SHIFT (1 << 8)
+#define OMAP4430_RAMP_UP_PRESCAL_MASK BITFIELD(8, 9)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RAV_VDD_CORE_L_SHIFT (1 << 1)
+#define OMAP4430_RAV_VDD_CORE_L_MASK BITFIELD(1, 1)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RAV_VDD_IVA_L_SHIFT (1 << 9)
+#define OMAP4430_RAV_VDD_IVA_L_MASK BITFIELD(9, 9)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_RAV_VDD_MPU_L_SHIFT (1 << 18)
+#define OMAP4430_RAV_VDD_MPU_L_MASK BITFIELD(18, 18)
+
+/* Used by PRM_VC_VAL_BYPASS */
+#define OMAP4430_REGADDR_SHIFT (1 << 8)
+#define OMAP4430_REGADDR_MASK BITFIELD(8, 15)
+
+/*
+ * Used by PRM_VC_VAL_CMD_VDD_CORE_L, PRM_VC_VAL_CMD_VDD_IVA_L,
+ * PRM_VC_VAL_CMD_VDD_MPU_L
+ */
+#define OMAP4430_RET_SHIFT (1 << 8)
+#define OMAP4430_RET_MASK BITFIELD(8, 15)
+
+/* Used by PM_L4PER_PWRSTCTRL */
+#define OMAP4430_RETAINED_BANK_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_RETAINED_BANK_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_L4PER_PWRSTCTRL */
+#define OMAP4430_RETAINED_BANK_RETSTATE_SHIFT (1 << 8)
+#define OMAP4430_RETAINED_BANK_RETSTATE_MASK BITFIELD(8, 8)
+
+/* Used by PM_L4PER_PWRSTST */
+#define OMAP4430_RETAINED_BANK_STATEST_SHIFT (1 << 4)
+#define OMAP4430_RETAINED_BANK_STATEST_MASK BITFIELD(4, 5)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_IVA_CTRL,
+ * PRM_LDO_SRAM_MPU_CTRL
+ */
+#define OMAP4430_RETMODE_ENABLE_SHIFT (1 << 0)
+#define OMAP4430_RETMODE_ENABLE_MASK BITFIELD(0, 0)
+
+/* Used by REVISION_PRM */
+#define OMAP4430_REV_SHIFT (1 << 0)
+#define OMAP4430_REV_MASK BITFIELD(0, 7)
+
+/* Used by RM_DUCATI_RSTCTRL, RM_TESLA_RSTCTRL, RM_IVAHD_RSTCTRL */
+#define OMAP4430_RST1_SHIFT (1 << 0)
+#define OMAP4430_RST1_MASK BITFIELD(0, 0)
+
+/* Used by RM_DUCATI_RSTST, RM_TESLA_RSTST, RM_IVAHD_RSTST */
+#define OMAP4430_RST1ST_SHIFT (1 << 0)
+#define OMAP4430_RST1ST_MASK BITFIELD(0, 0)
+
+/* Used by RM_DUCATI_RSTCTRL, RM_TESLA_RSTCTRL, RM_IVAHD_RSTCTRL */
+#define OMAP4430_RST2_SHIFT (1 << 1)
+#define OMAP4430_RST2_MASK BITFIELD(1, 1)
+
+/* Used by RM_DUCATI_RSTST, RM_TESLA_RSTST, RM_IVAHD_RSTST */
+#define OMAP4430_RST2ST_SHIFT (1 << 1)
+#define OMAP4430_RST2ST_MASK BITFIELD(1, 1)
+
+/* Used by RM_DUCATI_RSTCTRL, RM_IVAHD_RSTCTRL */
+#define OMAP4430_RST3_SHIFT (1 << 2)
+#define OMAP4430_RST3_MASK BITFIELD(2, 2)
+
+/* Used by RM_DUCATI_RSTST, RM_IVAHD_RSTST */
+#define OMAP4430_RST3ST_SHIFT (1 << 2)
+#define OMAP4430_RST3ST_MASK BITFIELD(2, 2)
+
+/* Used by PRM_RSTTIME */
+#define OMAP4430_RSTTIME1_SHIFT (1 << 0)
+#define OMAP4430_RSTTIME1_MASK BITFIELD(0, 9)
+
+/* Used by PRM_RSTTIME */
+#define OMAP4430_RSTTIME2_SHIFT (1 << 10)
+#define OMAP4430_RSTTIME2_MASK BITFIELD(10, 14)
+
+/* Used by PRM_RSTCTRL */
+#define OMAP4430_RST_GLOBAL_COLD_SW_SHIFT (1 << 1)
+#define OMAP4430_RST_GLOBAL_COLD_SW_MASK BITFIELD(1, 1)
+
+/* Used by PRM_RSTCTRL */
+#define OMAP4430_RST_GLOBAL_WARM_SW_SHIFT (1 << 0)
+#define OMAP4430_RST_GLOBAL_WARM_SW_MASK BITFIELD(0, 0)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_SA_VDD_CORE_L_SHIFT (1 << 0)
+#define OMAP4430_SA_VDD_CORE_L_MASK BITFIELD(0, 0)
+
+/* Renamed from SA_VDD_CORE_L Used by PRM_VC_SMPS_SA */
+#define OMAP4430_SA_VDD_CORE_L_0_6_SHIFT (1 << 0)
+#define OMAP4430_SA_VDD_CORE_L_0_6_MASK BITFIELD(0, 6)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_SA_VDD_IVA_L_SHIFT (1 << 8)
+#define OMAP4430_SA_VDD_IVA_L_MASK BITFIELD(8, 8)
+
+/* Renamed from SA_VDD_IVA_L Used by PRM_VC_SMPS_SA */
+#define OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_SHIFT (1 << 8)
+#define OMAP4430_SA_VDD_IVA_L_PRM_VC_SMPS_SA_MASK BITFIELD(8, 14)
+
+/* Used by PRM_VC_CFG_CHANNEL */
+#define OMAP4430_SA_VDD_MPU_L_SHIFT (1 << 16)
+#define OMAP4430_SA_VDD_MPU_L_MASK BITFIELD(16, 16)
+
+/* Renamed from SA_VDD_MPU_L Used by PRM_VC_SMPS_SA */
+#define OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_SHIFT (1 << 16)
+#define OMAP4430_SA_VDD_MPU_L_PRM_VC_SMPS_SA_MASK BITFIELD(16, 22)
+
+/* Used by PRM_VC_CFG_I2C_CLK */
+#define OMAP4430_SCLH_SHIFT (1 << 0)
+#define OMAP4430_SCLH_MASK BITFIELD(0, 7)
+
+/* Used by PRM_VC_CFG_I2C_CLK */
+#define OMAP4430_SCLL_SHIFT (1 << 8)
+#define OMAP4430_SCLL_MASK BITFIELD(8, 15)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_SECURE_WDT_RST_SHIFT (1 << 4)
+#define OMAP4430_SECURE_WDT_RST_MASK BITFIELD(4, 4)
+
+/* Used by PM_IVAHD_PWRSTCTRL */
+#define OMAP4430_SL2_MEM_ONSTATE_SHIFT (1 << 18)
+#define OMAP4430_SL2_MEM_ONSTATE_MASK BITFIELD(18, 19)
+
+/* Used by PM_IVAHD_PWRSTCTRL */
+#define OMAP4430_SL2_MEM_RETSTATE_SHIFT (1 << 9)
+#define OMAP4430_SL2_MEM_RETSTATE_MASK BITFIELD(9, 9)
+
+/* Used by PM_IVAHD_PWRSTST */
+#define OMAP4430_SL2_MEM_STATEST_SHIFT (1 << 6)
+#define OMAP4430_SL2_MEM_STATEST_MASK BITFIELD(6, 7)
+
+/* Used by PRM_VC_VAL_BYPASS */
+#define OMAP4430_SLAVEADDR_SHIFT (1 << 0)
+#define OMAP4430_SLAVEADDR_MASK BITFIELD(0, 6)
+
+/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
+#define OMAP4430_SLEEP_RBB_SEL_SHIFT (1 << 3)
+#define OMAP4430_SLEEP_RBB_SEL_MASK BITFIELD(3, 3)
+
+/* Used by PRM_SRAM_COUNT */
+#define OMAP4430_SLPCNT_VALUE_SHIFT (1 << 16)
+#define OMAP4430_SLPCNT_VALUE_MASK BITFIELD(16, 23)
+
+/* Used by PRM_VP_CORE_VSTEPMAX, PRM_VP_IVA_VSTEPMAX, PRM_VP_MPU_VSTEPMAX */
+#define OMAP4430_SMPSWAITTIMEMAX_SHIFT (1 << 8)
+#define OMAP4430_SMPSWAITTIMEMAX_MASK BITFIELD(8, 23)
+
+/* Used by PRM_VP_CORE_VSTEPMIN, PRM_VP_IVA_VSTEPMIN, PRM_VP_MPU_VSTEPMIN */
+#define OMAP4430_SMPSWAITTIMEMIN_SHIFT (1 << 8)
+#define OMAP4430_SMPSWAITTIMEMIN_MASK BITFIELD(8, 23)
+
+/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
+#define OMAP4430_SR2EN_SHIFT (1 << 0)
+#define OMAP4430_SR2EN_MASK BITFIELD(0, 0)
+
+/* Used by PRM_LDO_ABB_IVA_CTRL, PRM_LDO_ABB_MPU_CTRL */
+#define OMAP4430_SR2_IN_TRANSITION_SHIFT (1 << 6)
+#define OMAP4430_SR2_IN_TRANSITION_MASK BITFIELD(6, 6)
+
+/* Used by PRM_LDO_ABB_IVA_CTRL, PRM_LDO_ABB_MPU_CTRL */
+#define OMAP4430_SR2_STATUS_SHIFT (1 << 3)
+#define OMAP4430_SR2_STATUS_MASK BITFIELD(3, 4)
+
+/* Used by PRM_LDO_ABB_IVA_SETUP, PRM_LDO_ABB_MPU_SETUP */
+#define OMAP4430_SR2_WTCNT_VALUE_SHIFT (1 << 8)
+#define OMAP4430_SR2_WTCNT_VALUE_MASK BITFIELD(8, 15)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_IVA_CTRL,
+ * PRM_LDO_SRAM_MPU_CTRL
+ */
+#define OMAP4430_SRAMLDO_STATUS_SHIFT (1 << 8)
+#define OMAP4430_SRAMLDO_STATUS_MASK BITFIELD(8, 8)
+
+/*
+ * Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_IVA_CTRL,
+ * PRM_LDO_SRAM_MPU_CTRL
+ */
+#define OMAP4430_SRAM_IN_TRANSITION_SHIFT (1 << 9)
+#define OMAP4430_SRAM_IN_TRANSITION_MASK BITFIELD(9, 9)
+
+/* Used by PRM_VC_CFG_I2C_MODE */
+#define OMAP4430_SRMODEEN_SHIFT (1 << 4)
+#define OMAP4430_SRMODEEN_MASK BITFIELD(4, 4)
+
+/* Used by PRM_VOLTSETUP_WARMRESET */
+#define OMAP4430_STABLE_COUNT_SHIFT (1 << 0)
+#define OMAP4430_STABLE_COUNT_MASK BITFIELD(0, 5)
+
+/* Used by PRM_VOLTSETUP_WARMRESET */
+#define OMAP4430_STABLE_PRESCAL_SHIFT (1 << 8)
+#define OMAP4430_STABLE_PRESCAL_MASK BITFIELD(8, 9)
+
+/* Used by PM_IVAHD_PWRSTCTRL */
+#define OMAP4430_TCM1_MEM_ONSTATE_SHIFT (1 << 20)
+#define OMAP4430_TCM1_MEM_ONSTATE_MASK BITFIELD(20, 21)
+
+/* Used by PM_IVAHD_PWRSTCTRL */
+#define OMAP4430_TCM1_MEM_RETSTATE_SHIFT (1 << 10)
+#define OMAP4430_TCM1_MEM_RETSTATE_MASK BITFIELD(10, 10)
+
+/* Used by PM_IVAHD_PWRSTST */
+#define OMAP4430_TCM1_MEM_STATEST_SHIFT (1 << 8)
+#define OMAP4430_TCM1_MEM_STATEST_MASK BITFIELD(8, 9)
+
+/* Used by PM_IVAHD_PWRSTCTRL */
+#define OMAP4430_TCM2_MEM_ONSTATE_SHIFT (1 << 22)
+#define OMAP4430_TCM2_MEM_ONSTATE_MASK BITFIELD(22, 23)
+
+/* Used by PM_IVAHD_PWRSTCTRL */
+#define OMAP4430_TCM2_MEM_RETSTATE_SHIFT (1 << 11)
+#define OMAP4430_TCM2_MEM_RETSTATE_MASK BITFIELD(11, 11)
+
+/* Used by PM_IVAHD_PWRSTST */
+#define OMAP4430_TCM2_MEM_STATEST_SHIFT (1 << 10)
+#define OMAP4430_TCM2_MEM_STATEST_MASK BITFIELD(10, 11)
+
+/* Used by RM_TESLA_RSTST */
+#define OMAP4430_TESLASS_EMU_RSTST_SHIFT (1 << 2)
+#define OMAP4430_TESLASS_EMU_RSTST_MASK BITFIELD(2, 2)
+
+/* Used by RM_TESLA_RSTST */
+#define OMAP4430_TESLA_DSP_EMU_REQ_RSTST_SHIFT (1 << 3)
+#define OMAP4430_TESLA_DSP_EMU_REQ_RSTST_MASK BITFIELD(3, 3)
+
+/* Used by PM_TESLA_PWRSTCTRL */
+#define OMAP4430_TESLA_EDMA_ONSTATE_SHIFT (1 << 20)
+#define OMAP4430_TESLA_EDMA_ONSTATE_MASK BITFIELD(20, 21)
+
+/* Used by PM_TESLA_PWRSTCTRL */
+#define OMAP4430_TESLA_EDMA_RETSTATE_SHIFT (1 << 10)
+#define OMAP4430_TESLA_EDMA_RETSTATE_MASK BITFIELD(10, 10)
+
+/* Used by PM_TESLA_PWRSTST */
+#define OMAP4430_TESLA_EDMA_STATEST_SHIFT (1 << 8)
+#define OMAP4430_TESLA_EDMA_STATEST_MASK BITFIELD(8, 9)
+
+/* Used by PM_TESLA_PWRSTCTRL */
+#define OMAP4430_TESLA_L1_ONSTATE_SHIFT (1 << 16)
+#define OMAP4430_TESLA_L1_ONSTATE_MASK BITFIELD(16, 17)
+
+/* Used by PM_TESLA_PWRSTCTRL */
+#define OMAP4430_TESLA_L1_RETSTATE_SHIFT (1 << 8)
+#define OMAP4430_TESLA_L1_RETSTATE_MASK BITFIELD(8, 8)
+
+/* Used by PM_TESLA_PWRSTST */
+#define OMAP4430_TESLA_L1_STATEST_SHIFT (1 << 4)
+#define OMAP4430_TESLA_L1_STATEST_MASK BITFIELD(4, 5)
+
+/* Used by PM_TESLA_PWRSTCTRL */
+#define OMAP4430_TESLA_L2_ONSTATE_SHIFT (1 << 18)
+#define OMAP4430_TESLA_L2_ONSTATE_MASK BITFIELD(18, 19)
+
+/* Used by PM_TESLA_PWRSTCTRL */
+#define OMAP4430_TESLA_L2_RETSTATE_SHIFT (1 << 9)
+#define OMAP4430_TESLA_L2_RETSTATE_MASK BITFIELD(9, 9)
+
+/* Used by PM_TESLA_PWRSTST */
+#define OMAP4430_TESLA_L2_STATEST_SHIFT (1 << 6)
+#define OMAP4430_TESLA_L2_STATEST_MASK BITFIELD(6, 7)
+
+/* Used by PRM_VP_CORE_VLIMITTO, PRM_VP_IVA_VLIMITTO, PRM_VP_MPU_VLIMITTO */
+#define OMAP4430_TIMEOUT_SHIFT (1 << 0)
+#define OMAP4430_TIMEOUT_MASK BITFIELD(0, 15)
+
+/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
+#define OMAP4430_TIMEOUTEN_SHIFT (1 << 3)
+#define OMAP4430_TIMEOUTEN_MASK BITFIELD(3, 3)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_TRANSITION_EN_SHIFT (1 << 8)
+#define OMAP4430_TRANSITION_EN_MASK BITFIELD(8, 8)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_TRANSITION_ST_SHIFT (1 << 8)
+#define OMAP4430_TRANSITION_ST_MASK BITFIELD(8, 8)
+
+/* Used by PRM_VC_VAL_BYPASS */
+#define OMAP4430_VALID_SHIFT (1 << 24)
+#define OMAP4430_VALID_MASK BITFIELD(24, 24)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VC_BYPASSACK_EN_SHIFT (1 << 14)
+#define OMAP4430_VC_BYPASSACK_EN_MASK BITFIELD(14, 14)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VC_BYPASSACK_ST_SHIFT (1 << 14)
+#define OMAP4430_VC_BYPASSACK_ST_MASK BITFIELD(14, 14)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VC_IVA_VPACK_EN_SHIFT (1 << 30)
+#define OMAP4430_VC_IVA_VPACK_EN_MASK BITFIELD(30, 30)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VC_IVA_VPACK_ST_SHIFT (1 << 30)
+#define OMAP4430_VC_IVA_VPACK_ST_MASK BITFIELD(30, 30)
+
+/* Used by PRM_IRQENABLE_MPU_2 */
+#define OMAP4430_VC_MPU_VPACK_EN_SHIFT (1 << 6)
+#define OMAP4430_VC_MPU_VPACK_EN_MASK BITFIELD(6, 6)
+
+/* Used by PRM_IRQSTATUS_MPU_2 */
+#define OMAP4430_VC_MPU_VPACK_ST_SHIFT (1 << 6)
+#define OMAP4430_VC_MPU_VPACK_ST_MASK BITFIELD(6, 6)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VC_RAERR_EN_SHIFT (1 << 12)
+#define OMAP4430_VC_RAERR_EN_MASK BITFIELD(12, 12)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VC_RAERR_ST_SHIFT (1 << 12)
+#define OMAP4430_VC_RAERR_ST_MASK BITFIELD(12, 12)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VC_SAERR_EN_SHIFT (1 << 11)
+#define OMAP4430_VC_SAERR_EN_MASK BITFIELD(11, 11)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VC_SAERR_ST_SHIFT (1 << 11)
+#define OMAP4430_VC_SAERR_ST_MASK BITFIELD(11, 11)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VC_TOERR_EN_SHIFT (1 << 13)
+#define OMAP4430_VC_TOERR_EN_MASK BITFIELD(13, 13)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VC_TOERR_ST_SHIFT (1 << 13)
+#define OMAP4430_VC_TOERR_ST_MASK BITFIELD(13, 13)
+
+/* Used by PRM_VP_CORE_VLIMITTO, PRM_VP_IVA_VLIMITTO, PRM_VP_MPU_VLIMITTO */
+#define OMAP4430_VDDMAX_SHIFT (1 << 24)
+#define OMAP4430_VDDMAX_MASK BITFIELD(24, 31)
+
+/* Used by PRM_VP_CORE_VLIMITTO, PRM_VP_IVA_VLIMITTO, PRM_VP_MPU_VLIMITTO */
+#define OMAP4430_VDDMIN_SHIFT (1 << 16)
+#define OMAP4430_VDDMIN_MASK BITFIELD(16, 23)
+
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_VDD_CORE_I2C_DISABLE_SHIFT (1 << 12)
+#define OMAP4430_VDD_CORE_I2C_DISABLE_MASK BITFIELD(12, 12)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_VDD_CORE_VOLT_MGR_RST_SHIFT (1 << 8)
+#define OMAP4430_VDD_CORE_VOLT_MGR_RST_MASK BITFIELD(8, 8)
+
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_VDD_IVA_I2C_DISABLE_SHIFT (1 << 14)
+#define OMAP4430_VDD_IVA_I2C_DISABLE_MASK BITFIELD(14, 14)
+
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_VDD_IVA_PRESENCE_SHIFT (1 << 9)
+#define OMAP4430_VDD_IVA_PRESENCE_MASK BITFIELD(9, 9)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_VDD_IVA_VOLT_MGR_RST_SHIFT (1 << 7)
+#define OMAP4430_VDD_IVA_VOLT_MGR_RST_MASK BITFIELD(7, 7)
+
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_VDD_MPU_I2C_DISABLE_SHIFT (1 << 13)
+#define OMAP4430_VDD_MPU_I2C_DISABLE_MASK BITFIELD(13, 13)
+
+/* Used by PRM_VOLTCTRL */
+#define OMAP4430_VDD_MPU_PRESENCE_SHIFT (1 << 8)
+#define OMAP4430_VDD_MPU_PRESENCE_MASK BITFIELD(8, 8)
+
+/* Used by PRM_RSTST */
+#define OMAP4430_VDD_MPU_VOLT_MGR_RST_SHIFT (1 << 6)
+#define OMAP4430_VDD_MPU_VOLT_MGR_RST_MASK BITFIELD(6, 6)
+
+/* Used by PRM_VC_VAL_SMPS_RA_VOL */
+#define OMAP4430_VOLRA_VDD_CORE_L_SHIFT (1 << 0)
+#define OMAP4430_VOLRA_VDD_CORE_L_MASK BITFIELD(0, 7)
+
+/* Used by PRM_VC_VAL_SMPS_RA_VOL */
+#define OMAP4430_VOLRA_VDD_IVA_L_SHIFT (1 << 8)
+#define OMAP4430_VOLRA_VDD_IVA_L_MASK BITFIELD(8, 15)
+
+/* Used by PRM_VC_VAL_SMPS_RA_VOL */
+#define OMAP4430_VOLRA_VDD_MPU_L_SHIFT (1 << 16)
+#define OMAP4430_VOLRA_VDD_MPU_L_MASK BITFIELD(16, 23)
+
+/* Used by PRM_VP_CORE_CONFIG, PRM_VP_IVA_CONFIG, PRM_VP_MPU_CONFIG */
+#define OMAP4430_VPENABLE_SHIFT (1 << 0)
+#define OMAP4430_VPENABLE_MASK BITFIELD(0, 0)
+
+/* Used by PRM_VP_CORE_STATUS, PRM_VP_IVA_STATUS, PRM_VP_MPU_STATUS */
+#define OMAP4430_VPINIDLE_SHIFT (1 << 0)
+#define OMAP4430_VPINIDLE_MASK BITFIELD(0, 0)
+
+/* Used by PRM_VP_CORE_VOLTAGE, PRM_VP_IVA_VOLTAGE, PRM_VP_MPU_VOLTAGE */
+#define OMAP4430_VPVOLTAGE_SHIFT (1 << 0)
+#define OMAP4430_VPVOLTAGE_MASK BITFIELD(0, 7)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_CORE_EQVALUE_EN_SHIFT (1 << 20)
+#define OMAP4430_VP_CORE_EQVALUE_EN_MASK BITFIELD(20, 20)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_CORE_EQVALUE_ST_SHIFT (1 << 20)
+#define OMAP4430_VP_CORE_EQVALUE_ST_MASK BITFIELD(20, 20)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_CORE_MAXVDD_EN_SHIFT (1 << 18)
+#define OMAP4430_VP_CORE_MAXVDD_EN_MASK BITFIELD(18, 18)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_CORE_MAXVDD_ST_SHIFT (1 << 18)
+#define OMAP4430_VP_CORE_MAXVDD_ST_MASK BITFIELD(18, 18)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_CORE_MINVDD_EN_SHIFT (1 << 17)
+#define OMAP4430_VP_CORE_MINVDD_EN_MASK BITFIELD(17, 17)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_CORE_MINVDD_ST_SHIFT (1 << 17)
+#define OMAP4430_VP_CORE_MINVDD_ST_MASK BITFIELD(17, 17)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_CORE_NOSMPSACK_EN_SHIFT (1 << 19)
+#define OMAP4430_VP_CORE_NOSMPSACK_EN_MASK BITFIELD(19, 19)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_CORE_NOSMPSACK_ST_SHIFT (1 << 19)
+#define OMAP4430_VP_CORE_NOSMPSACK_ST_MASK BITFIELD(19, 19)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_CORE_OPPCHANGEDONE_EN_SHIFT (1 << 16)
+#define OMAP4430_VP_CORE_OPPCHANGEDONE_EN_MASK BITFIELD(16, 16)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_CORE_OPPCHANGEDONE_ST_SHIFT (1 << 16)
+#define OMAP4430_VP_CORE_OPPCHANGEDONE_ST_MASK BITFIELD(16, 16)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_CORE_TRANXDONE_EN_SHIFT (1 << 21)
+#define OMAP4430_VP_CORE_TRANXDONE_EN_MASK BITFIELD(21, 21)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_CORE_TRANXDONE_ST_SHIFT (1 << 21)
+#define OMAP4430_VP_CORE_TRANXDONE_ST_MASK BITFIELD(21, 21)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_IVA_EQVALUE_EN_SHIFT (1 << 28)
+#define OMAP4430_VP_IVA_EQVALUE_EN_MASK BITFIELD(28, 28)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_IVA_EQVALUE_ST_SHIFT (1 << 28)
+#define OMAP4430_VP_IVA_EQVALUE_ST_MASK BITFIELD(28, 28)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_IVA_MAXVDD_EN_SHIFT (1 << 26)
+#define OMAP4430_VP_IVA_MAXVDD_EN_MASK BITFIELD(26, 26)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_IVA_MAXVDD_ST_SHIFT (1 << 26)
+#define OMAP4430_VP_IVA_MAXVDD_ST_MASK BITFIELD(26, 26)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_IVA_MINVDD_EN_SHIFT (1 << 25)
+#define OMAP4430_VP_IVA_MINVDD_EN_MASK BITFIELD(25, 25)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_IVA_MINVDD_ST_SHIFT (1 << 25)
+#define OMAP4430_VP_IVA_MINVDD_ST_MASK BITFIELD(25, 25)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_IVA_NOSMPSACK_EN_SHIFT (1 << 27)
+#define OMAP4430_VP_IVA_NOSMPSACK_EN_MASK BITFIELD(27, 27)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_IVA_NOSMPSACK_ST_SHIFT (1 << 27)
+#define OMAP4430_VP_IVA_NOSMPSACK_ST_MASK BITFIELD(27, 27)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_IVA_OPPCHANGEDONE_EN_SHIFT (1 << 24)
+#define OMAP4430_VP_IVA_OPPCHANGEDONE_EN_MASK BITFIELD(24, 24)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_IVA_OPPCHANGEDONE_ST_SHIFT (1 << 24)
+#define OMAP4430_VP_IVA_OPPCHANGEDONE_ST_MASK BITFIELD(24, 24)
+
+/* Used by PRM_IRQENABLE_DUCATI, PRM_IRQENABLE_MPU */
+#define OMAP4430_VP_IVA_TRANXDONE_EN_SHIFT (1 << 29)
+#define OMAP4430_VP_IVA_TRANXDONE_EN_MASK BITFIELD(29, 29)
+
+/* Used by PRM_IRQSTATUS_DUCATI, PRM_IRQSTATUS_MPU */
+#define OMAP4430_VP_IVA_TRANXDONE_ST_SHIFT (1 << 29)
+#define OMAP4430_VP_IVA_TRANXDONE_ST_MASK BITFIELD(29, 29)
+
+/* Used by PRM_IRQENABLE_MPU_2 */
+#define OMAP4430_VP_MPU_EQVALUE_EN_SHIFT (1 << 4)
+#define OMAP4430_VP_MPU_EQVALUE_EN_MASK BITFIELD(4, 4)
+
+/* Used by PRM_IRQSTATUS_MPU_2 */
+#define OMAP4430_VP_MPU_EQVALUE_ST_SHIFT (1 << 4)
+#define OMAP4430_VP_MPU_EQVALUE_ST_MASK BITFIELD(4, 4)
+
+/* Used by PRM_IRQENABLE_MPU_2 */
+#define OMAP4430_VP_MPU_MAXVDD_EN_SHIFT (1 << 2)
+#define OMAP4430_VP_MPU_MAXVDD_EN_MASK BITFIELD(2, 2)
+
+/* Used by PRM_IRQSTATUS_MPU_2 */
+#define OMAP4430_VP_MPU_MAXVDD_ST_SHIFT (1 << 2)
+#define OMAP4430_VP_MPU_MAXVDD_ST_MASK BITFIELD(2, 2)
+
+/* Used by PRM_IRQENABLE_MPU_2 */
+#define OMAP4430_VP_MPU_MINVDD_EN_SHIFT (1 << 1)
+#define OMAP4430_VP_MPU_MINVDD_EN_MASK BITFIELD(1, 1)
+
+/* Used by PRM_IRQSTATUS_MPU_2 */
+#define OMAP4430_VP_MPU_MINVDD_ST_SHIFT (1 << 1)
+#define OMAP4430_VP_MPU_MINVDD_ST_MASK BITFIELD(1, 1)
+
+/* Used by PRM_IRQENABLE_MPU_2 */
+#define OMAP4430_VP_MPU_NOSMPSACK_EN_SHIFT (1 << 3)
+#define OMAP4430_VP_MPU_NOSMPSACK_EN_MASK BITFIELD(3, 3)
+
+/* Used by PRM_IRQSTATUS_MPU_2 */
+#define OMAP4430_VP_MPU_NOSMPSACK_ST_SHIFT (1 << 3)
+#define OMAP4430_VP_MPU_NOSMPSACK_ST_MASK BITFIELD(3, 3)
+
+/* Used by PRM_IRQENABLE_MPU_2 */
+#define OMAP4430_VP_MPU_OPPCHANGEDONE_EN_SHIFT (1 << 0)
+#define OMAP4430_VP_MPU_OPPCHANGEDONE_EN_MASK BITFIELD(0, 0)
+
+/* Used by PRM_IRQSTATUS_MPU_2 */
+#define OMAP4430_VP_MPU_OPPCHANGEDONE_ST_SHIFT (1 << 0)
+#define OMAP4430_VP_MPU_OPPCHANGEDONE_ST_MASK BITFIELD(0, 0)
+
+/* Used by PRM_IRQENABLE_MPU_2 */
+#define OMAP4430_VP_MPU_TRANXDONE_EN_SHIFT (1 << 5)
+#define OMAP4430_VP_MPU_TRANXDONE_EN_MASK BITFIELD(5, 5)
+
+/* Used by PRM_IRQSTATUS_MPU_2 */
+#define OMAP4430_VP_MPU_TRANXDONE_ST_SHIFT (1 << 5)
+#define OMAP4430_VP_MPU_TRANXDONE_ST_MASK BITFIELD(5, 5)
+
+/* Used by PRM_SRAM_COUNT */
+#define OMAP4430_VSETUPCNT_VALUE_SHIFT (1 << 8)
+#define OMAP4430_VSETUPCNT_VALUE_MASK BITFIELD(8, 15)
+
+/* Used by PRM_VP_CORE_VSTEPMAX, PRM_VP_IVA_VSTEPMAX, PRM_VP_MPU_VSTEPMAX */
+#define OMAP4430_VSTEPMAX_SHIFT (1 << 0)
+#define OMAP4430_VSTEPMAX_MASK BITFIELD(0, 7)
+
+/* Used by PRM_VP_CORE_VSTEPMIN, PRM_VP_IVA_VSTEPMIN, PRM_VP_MPU_VSTEPMIN */
+#define OMAP4430_VSTEPMIN_SHIFT (1 << 0)
+#define OMAP4430_VSTEPMIN_MASK BITFIELD(0, 7)
+
+/* Used by PRM_MODEM_IF_CTRL */
+#define OMAP4430_WAKE_MODEM_SHIFT (1 << 0)
+#define OMAP4430_WAKE_MODEM_MASK BITFIELD(0, 0)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DISPC_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_DISPC_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DISPC_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_DISPC_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DISPC_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_DISPC_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DISPC_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_DISPC_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_DMIC_WKDEP */
+#define OMAP4430_WKUPDEP_DMIC_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_DMIC_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_ABE_DMIC_WKDEP */
+#define OMAP4430_WKUPDEP_DMIC_DMA_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_DMIC_DMA_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_ABE_DMIC_WKDEP */
+#define OMAP4430_WKUPDEP_DMIC_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_DMIC_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_DMIC_WKDEP */
+#define OMAP4430_WKUPDEP_DMIC_IRQ_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_DMIC_IRQ_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_DMTIMER10_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER10_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_DMTIMER10_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_DMTIMER11_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER11_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_DMTIMER11_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_DMTIMER11_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER11_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_DMTIMER11_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_DMTIMER2_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER2_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_DMTIMER2_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_DMTIMER3_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER3_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_DMTIMER3_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_DMTIMER3_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER3_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_DMTIMER3_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_DMTIMER4_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER4_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_DMTIMER4_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_DMTIMER4_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER4_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_DMTIMER4_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_DMTIMER9_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER9_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_DMTIMER9_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_DMTIMER9_WKDEP */
+#define OMAP4430_WKUPDEP_DMTIMER9_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_DMTIMER9_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DSI1_DUCATI_SHIFT (1 << 5)
+#define OMAP4430_WKUPDEP_DSI1_DUCATI_MASK BITFIELD(5, 5)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DSI1_MPU_SHIFT (1 << 4)
+#define OMAP4430_WKUPDEP_DSI1_MPU_MASK BITFIELD(4, 4)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DSI1_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_DSI1_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DSI1_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_DSI1_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DSI2_DUCATI_SHIFT (1 << 9)
+#define OMAP4430_WKUPDEP_DSI2_DUCATI_MASK BITFIELD(9, 9)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DSI2_MPU_SHIFT (1 << 8)
+#define OMAP4430_WKUPDEP_DSI2_MPU_MASK BITFIELD(8, 8)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DSI2_SDMA_SHIFT (1 << 11)
+#define OMAP4430_WKUPDEP_DSI2_SDMA_MASK BITFIELD(11, 11)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_DSI2_TESLA_SHIFT (1 << 10)
+#define OMAP4430_WKUPDEP_DSI2_TESLA_MASK BITFIELD(10, 10)
+
+/* Used by PM_WKUP_GPIO1_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO1_IRQ1_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_GPIO1_IRQ1_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_WKUP_GPIO1_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO1_IRQ1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_GPIO1_IRQ1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_WKUP_GPIO1_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO1_IRQ2_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_GPIO1_IRQ2_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L4PER_GPIO2_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO2_IRQ1_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_GPIO2_IRQ1_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_GPIO2_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO2_IRQ1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_GPIO2_IRQ1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_GPIO2_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO2_IRQ2_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_GPIO2_IRQ2_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L4PER_GPIO3_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO3_IRQ1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_GPIO3_IRQ1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_GPIO3_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO3_IRQ2_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_GPIO3_IRQ2_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L4PER_GPIO4_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO4_IRQ1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_GPIO4_IRQ1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_GPIO4_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO4_IRQ2_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_GPIO4_IRQ2_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L4PER_GPIO5_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO5_IRQ1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_GPIO5_IRQ1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_GPIO5_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO5_IRQ2_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_GPIO5_IRQ2_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L4PER_GPIO6_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO6_IRQ1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_GPIO6_IRQ1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_GPIO6_WKDEP */
+#define OMAP4430_WKUPDEP_GPIO6_IRQ2_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_GPIO6_IRQ2_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_HDMIDMA_SDMA_SHIFT (1 << 19)
+#define OMAP4430_WKUPDEP_HDMIDMA_SDMA_MASK BITFIELD(19, 19)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_HDMIIRQ_DUCATI_SHIFT (1 << 13)
+#define OMAP4430_WKUPDEP_HDMIIRQ_DUCATI_MASK BITFIELD(13, 13)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_HDMIIRQ_MPU_SHIFT (1 << 12)
+#define OMAP4430_WKUPDEP_HDMIIRQ_MPU_MASK BITFIELD(12, 12)
+
+/* Used by PM_DSS_DSS_WKDEP */
+#define OMAP4430_WKUPDEP_HDMIIRQ_TESLA_SHIFT (1 << 14)
+#define OMAP4430_WKUPDEP_HDMIIRQ_TESLA_MASK BITFIELD(14, 14)
+
+/* Used by PM_L4PER_HECC1_WKDEP */
+#define OMAP4430_WKUPDEP_HECC1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_HECC1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_HECC2_WKDEP */
+#define OMAP4430_WKUPDEP_HECC2_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_HECC2_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_HSI_WKDEP */
+#define OMAP4430_WKUPDEP_HSI_DSP_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_HSI_DSP_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L3INIT_HSI_WKDEP */
+#define OMAP4430_WKUPDEP_HSI_MCU_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_HSI_MCU_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_HSI_WKDEP */
+#define OMAP4430_WKUPDEP_HSI_MCU_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_HSI_MCU_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_I2C1_WKDEP */
+#define OMAP4430_WKUPDEP_I2C1_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_I2C1_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_L4PER_I2C1_WKDEP */
+#define OMAP4430_WKUPDEP_I2C1_IRQ_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_I2C1_IRQ_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_I2C1_WKDEP */
+#define OMAP4430_WKUPDEP_I2C1_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_I2C1_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_I2C2_WKDEP */
+#define OMAP4430_WKUPDEP_I2C2_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_I2C2_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_L4PER_I2C2_WKDEP */
+#define OMAP4430_WKUPDEP_I2C2_IRQ_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_I2C2_IRQ_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_I2C2_WKDEP */
+#define OMAP4430_WKUPDEP_I2C2_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_I2C2_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_I2C3_WKDEP */
+#define OMAP4430_WKUPDEP_I2C3_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_I2C3_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_L4PER_I2C3_WKDEP */
+#define OMAP4430_WKUPDEP_I2C3_IRQ_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_I2C3_IRQ_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_I2C3_WKDEP */
+#define OMAP4430_WKUPDEP_I2C3_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_I2C3_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_I2C4_WKDEP */
+#define OMAP4430_WKUPDEP_I2C4_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_I2C4_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_L4PER_I2C4_WKDEP */
+#define OMAP4430_WKUPDEP_I2C4_IRQ_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_I2C4_IRQ_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_I2C4_WKDEP */
+#define OMAP4430_WKUPDEP_I2C4_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_I2C4_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_I2C5_WKDEP */
+#define OMAP4430_WKUPDEP_I2C5_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_I2C5_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_L4PER_I2C5_WKDEP */
+#define OMAP4430_WKUPDEP_I2C5_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_I2C5_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_WKUP_KEYBOARD_WKDEP */
+#define OMAP4430_WKUPDEP_KEYBOARD_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_KEYBOARD_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_MCASP_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP1_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_MCASP1_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_ABE_MCASP_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP1_DMA_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_MCASP1_DMA_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_ABE_MCASP_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP1_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCASP1_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_MCASP_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP1_IRQ_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MCASP1_IRQ_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_MCASP2_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP2_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_MCASP2_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_L4PER_MCASP2_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP2_DMA_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_MCASP2_DMA_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L4PER_MCASP2_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP2_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCASP2_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MCASP2_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP2_IRQ_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MCASP2_IRQ_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_MCASP3_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP3_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_MCASP3_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_L4PER_MCASP3_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP3_DMA_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_MCASP3_DMA_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L4PER_MCASP3_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP3_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCASP3_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MCASP3_WKDEP */
+#define OMAP4430_WKUPDEP_MCASP3_IRQ_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MCASP3_IRQ_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_MCBSP1_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCBSP1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_MCBSP1_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP1_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MCBSP1_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_ABE_MCBSP1_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP1_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MCBSP1_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_MCBSP2_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP2_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCBSP2_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_MCBSP2_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP2_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MCBSP2_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_ABE_MCBSP2_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP2_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MCBSP2_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_MCBSP3_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP3_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCBSP3_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_MCBSP3_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP3_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MCBSP3_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_ABE_MCBSP3_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP3_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MCBSP3_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_MCBSP4_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP4_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCBSP4_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MCBSP4_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP4_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MCBSP4_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_MCBSP4_WKDEP */
+#define OMAP4430_WKUPDEP_MCBSP4_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MCBSP4_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_MCSPI1_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI1_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_MCSPI1_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_MCSPI1_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCSPI1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MCSPI1_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI1_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MCSPI1_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_MCSPI1_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI1_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MCSPI1_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_MCSPI2_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI2_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_MCSPI2_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_MCSPI2_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI2_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCSPI2_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MCSPI2_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI2_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MCSPI2_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_MCSPI3_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI3_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCSPI3_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MCSPI3_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI3_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MCSPI3_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_MCSPI4_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI4_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MCSPI4_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MCSPI4_WKDEP */
+#define OMAP4430_WKUPDEP_MCSPI4_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MCSPI4_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L3INIT_MMC1_WKDEP */
+#define OMAP4430_WKUPDEP_MMC1_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_MMC1_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_MMC1_WKDEP */
+#define OMAP4430_WKUPDEP_MMC1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MMC1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_MMC1_WKDEP */
+#define OMAP4430_WKUPDEP_MMC1_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MMC1_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L3INIT_MMC1_WKDEP */
+#define OMAP4430_WKUPDEP_MMC1_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MMC1_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L3INIT_MMC2_WKDEP */
+#define OMAP4430_WKUPDEP_MMC2_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_MMC2_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_MMC2_WKDEP */
+#define OMAP4430_WKUPDEP_MMC2_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MMC2_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_MMC2_WKDEP */
+#define OMAP4430_WKUPDEP_MMC2_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MMC2_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L3INIT_MMC2_WKDEP */
+#define OMAP4430_WKUPDEP_MMC2_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MMC2_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L3INIT_MMC6_WKDEP */
+#define OMAP4430_WKUPDEP_MMC6_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_MMC6_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_MMC6_WKDEP */
+#define OMAP4430_WKUPDEP_MMC6_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MMC6_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_MMC6_WKDEP */
+#define OMAP4430_WKUPDEP_MMC6_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_MMC6_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_MMCSD3_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD3_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_MMCSD3_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_MMCSD3_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD3_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MMCSD3_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MMCSD3_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD3_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MMCSD3_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_MMCSD4_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD4_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_MMCSD4_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_MMCSD4_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD4_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MMCSD4_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MMCSD4_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD4_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MMCSD4_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_MMCSD5_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD5_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_MMCSD5_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_MMCSD5_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD5_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_MMCSD5_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_MMCSD5_WKDEP */
+#define OMAP4430_WKUPDEP_MMCSD5_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_MMCSD5_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L3INIT_PCIESS_WKDEP */
+#define OMAP4430_WKUPDEP_PCIESS_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_PCIESS_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_PCIESS_WKDEP */
+#define OMAP4430_WKUPDEP_PCIESS_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_PCIESS_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_PDM_WKDEP */
+#define OMAP4430_WKUPDEP_PDM_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_PDM_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_ABE_PDM_WKDEP */
+#define OMAP4430_WKUPDEP_PDM_DMA_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_PDM_DMA_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_ABE_PDM_WKDEP */
+#define OMAP4430_WKUPDEP_PDM_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_PDM_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_PDM_WKDEP */
+#define OMAP4430_WKUPDEP_PDM_IRQ_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_PDM_IRQ_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_WKUP_RTC_WKDEP */
+#define OMAP4430_WKUPDEP_RTC_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_RTC_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_SATA_WKDEP */
+#define OMAP4430_WKUPDEP_SATA_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_SATA_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_SATA_WKDEP */
+#define OMAP4430_WKUPDEP_SATA_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_SATA_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_SLIMBUS_WKDEP */
+#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_ABE_SLIMBUS_WKDEP */
+#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_SLIMBUS1_DMA_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_ABE_SLIMBUS_WKDEP */
+#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_SLIMBUS_WKDEP */
+#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_SLIMBUS1_IRQ_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_SLIMBUS2_WKDEP */
+#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_SDMA_SHIFT (1 << 7)
+#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_SDMA_MASK BITFIELD(7, 7)
+
+/* Used by PM_L4PER_SLIMBUS2_WKDEP */
+#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_TESLA_SHIFT (1 << 6)
+#define OMAP4430_WKUPDEP_SLIMBUS2_DMA_TESLA_MASK BITFIELD(6, 6)
+
+/* Used by PM_L4PER_SLIMBUS2_WKDEP */
+#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_SLIMBUS2_WKDEP */
+#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_SLIMBUS2_IRQ_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ALWON_SR_CORE_WKDEP */
+#define OMAP4430_WKUPDEP_SR_CORE_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_SR_CORE_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_ALWON_SR_CORE_WKDEP */
+#define OMAP4430_WKUPDEP_SR_CORE_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_SR_CORE_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ALWON_SR_IVA_WKDEP */
+#define OMAP4430_WKUPDEP_SR_IVA_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_SR_IVA_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_ALWON_SR_IVA_WKDEP */
+#define OMAP4430_WKUPDEP_SR_IVA_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_SR_IVA_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ALWON_SR_MPU_WKDEP */
+#define OMAP4430_WKUPDEP_SR_MPU_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_SR_MPU_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_WKUP_TIMER12_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER12_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_TIMER12_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_WKUP_TIMER1_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_TIMER1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_TIMER5_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER5_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_TIMER5_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_TIMER5_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER5_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_TIMER5_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_TIMER6_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER6_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_TIMER6_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_TIMER6_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER6_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_TIMER6_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_TIMER7_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER7_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_TIMER7_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_TIMER7_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER7_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_TIMER7_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_ABE_TIMER8_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER8_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_TIMER8_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_TIMER8_WKDEP */
+#define OMAP4430_WKUPDEP_TIMER8_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_TIMER8_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_UART1_WKDEP */
+#define OMAP4430_WKUPDEP_UART1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_UART1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_UART1_WKDEP */
+#define OMAP4430_WKUPDEP_UART1_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_UART1_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_UART2_WKDEP */
+#define OMAP4430_WKUPDEP_UART2_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_UART2_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_UART2_WKDEP */
+#define OMAP4430_WKUPDEP_UART2_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_UART2_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_UART3_WKDEP */
+#define OMAP4430_WKUPDEP_UART3_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_UART3_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L4PER_UART3_WKDEP */
+#define OMAP4430_WKUPDEP_UART3_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_UART3_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_UART3_WKDEP */
+#define OMAP4430_WKUPDEP_UART3_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_UART3_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L4PER_UART3_WKDEP */
+#define OMAP4430_WKUPDEP_UART3_TESLA_SHIFT (1 << 2)
+#define OMAP4430_WKUPDEP_UART3_TESLA_MASK BITFIELD(2, 2)
+
+/* Used by PM_L4PER_UART4_WKDEP */
+#define OMAP4430_WKUPDEP_UART4_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_UART4_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L4PER_UART4_WKDEP */
+#define OMAP4430_WKUPDEP_UART4_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_UART4_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_L3INIT_UNIPRO1_WKDEP */
+#define OMAP4430_WKUPDEP_UNIPRO1_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_UNIPRO1_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_UNIPRO1_WKDEP */
+#define OMAP4430_WKUPDEP_UNIPRO1_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_UNIPRO1_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_USB_HOST_WKDEP */
+#define OMAP4430_WKUPDEP_USB_HOST_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_USB_HOST_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_USB_HOST_FS_WKDEP */
+#define OMAP4430_WKUPDEP_USB_HOST_FS_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_USB_HOST_FS_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_USB_HOST_FS_WKDEP */
+#define OMAP4430_WKUPDEP_USB_HOST_FS_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_USB_HOST_FS_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_USB_HOST_WKDEP */
+#define OMAP4430_WKUPDEP_USB_HOST_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_USB_HOST_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_USB_OTG_WKDEP */
+#define OMAP4430_WKUPDEP_USB_OTG_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_USB_OTG_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_USB_OTG_WKDEP */
+#define OMAP4430_WKUPDEP_USB_OTG_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_USB_OTG_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_USB_TLL_WKDEP */
+#define OMAP4430_WKUPDEP_USB_TLL_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_USB_TLL_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_L3INIT_USB_TLL_WKDEP */
+#define OMAP4430_WKUPDEP_USB_TLL_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_USB_TLL_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_WKUP_USIM_WKDEP */
+#define OMAP4430_WKUPDEP_USIM_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_USIM_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_WKUP_USIM_WKDEP */
+#define OMAP4430_WKUPDEP_USIM_SDMA_SHIFT (1 << 3)
+#define OMAP4430_WKUPDEP_USIM_SDMA_MASK BITFIELD(3, 3)
+
+/* Used by PM_WKUP_WDT2_WKDEP */
+#define OMAP4430_WKUPDEP_WDT2_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_WDT2_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PM_WKUP_WDT2_WKDEP */
+#define OMAP4430_WKUPDEP_WDT2_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_WDT2_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_ABE_WDT3_WKDEP */
+#define OMAP4430_WKUPDEP_WDT3_MPU_SHIFT (1 << 0)
+#define OMAP4430_WKUPDEP_WDT3_MPU_MASK BITFIELD(0, 0)
+
+/* Used by PM_L3INIT_HSI_WKDEP */
+#define OMAP4430_WKUPDEP_WGM_HSI_WAKE_MPU_SHIFT (1 << 8)
+#define OMAP4430_WKUPDEP_WGM_HSI_WAKE_MPU_MASK BITFIELD(8, 8)
+
+/* Used by PM_L3INIT_XHPI_WKDEP */
+#define OMAP4430_WKUPDEP_XHPI_DUCATI_SHIFT (1 << 1)
+#define OMAP4430_WKUPDEP_XHPI_DUCATI_MASK BITFIELD(1, 1)
+
+/* Used by PRM_IO_PMCTRL */
+#define OMAP4430_WUCLK_CTRL_SHIFT (1 << 8)
+#define OMAP4430_WUCLK_CTRL_MASK BITFIELD(8, 8)
+
+/* Used by PRM_IO_PMCTRL */
+#define OMAP4430_WUCLK_STATUS_SHIFT (1 << 9)
+#define OMAP4430_WUCLK_STATUS_MASK BITFIELD(9, 9)
+#endif
diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h
index a117f853ea3..ea050ce188a 100644
--- a/arch/arm/mach-omap2/prm.h
+++ b/arch/arm/mach-omap2/prm.h
@@ -4,8 +4,8 @@
/*
* OMAP2/3 Power/Reset Management (PRM) register definitions
*
- * Copyright (C) 2007 Texas Instruments, Inc.
- * Copyright (C) 2007 Nokia Corporation
+ * Copyright (C) 2007-2009 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
*
* Written by Paul Walmsley
*
@@ -22,6 +22,10 @@
OMAP2_L4_IO_ADDRESS(OMAP2430_PRM_BASE + (module) + (reg))
#define OMAP34XX_PRM_REGADDR(module, reg) \
OMAP2_L4_IO_ADDRESS(OMAP3430_PRM_BASE + (module) + (reg))
+#define OMAP44XX_PRM_REGADDR(module, reg) \
+ OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (module) + (reg))
+
+#include "prm44xx.h"
/*
* Architecture-specific global PRM registers
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
new file mode 100644
index 00000000000..89be97f0589
--- /dev/null
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -0,0 +1,411 @@
+/*
+ * OMAP44xx PRM instance offset macros
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Paul Walmsley (paul@pwsan.com)
+ * Rajendra Nayak (rnayak@ti.com)
+ * Benoit Cousson (b-cousson@ti.com)
+ *
+ * This file is automatically generated from the OMAP hardware databases.
+ * We respectfully ask that any modifications to this file be coordinated
+ * with the public linux-omap@vger.kernel.org mailing list and the
+ * authors above to ensure that the autogeneration scripts are kept
+ * up-to-date with the file contents.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRM44XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRM44XX_H
+
+
+/* PRM */
+
+
+/* PRM.OCP_SOCKET_PRM register offsets */
+#define OMAP4430_REVISION_PRM OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0000)
+#define OMAP4430_PRM_IRQSTATUS_MPU OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0010)
+#define OMAP4430_PRM_IRQSTATUS_MPU_2 OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0014)
+#define OMAP4430_PRM_IRQENABLE_MPU OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0018)
+#define OMAP4430_PRM_IRQENABLE_MPU_2 OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x001c)
+#define OMAP4430_PRM_IRQSTATUS_DUCATI OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0020)
+#define OMAP4430_PRM_IRQENABLE_DUCATI OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0028)
+#define OMAP4430_PRM_IRQSTATUS_TESLA OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0030)
+#define OMAP4430_PRM_IRQENABLE_TESLA OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0038)
+#define OMAP4430_PRM_PRM_PROFILING_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_MOD, 0x0040)
+
+/* PRM.CKGEN_PRM register offsets */
+#define OMAP4430_CM_ABE_DSS_SYS_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0000)
+#define OMAP4430_CM_DPLL_SYS_REF_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0004)
+#define OMAP4430_CM_L4_WKUP_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0008)
+#define OMAP4430_CM_ABE_PLL_REF_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x000c)
+#define OMAP4430_CM_SYS_CLKSEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CKGEN_MOD, 0x0010)
+
+/* PRM.MPU_PRM register offsets */
+#define OMAP4430_PM_MPU_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0000)
+#define OMAP4430_PM_MPU_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0004)
+#define OMAP4430_RM_MPU_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0014)
+#define OMAP4430_RM_MPU_MPU_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_MPU_MOD, 0x0024)
+
+/* PRM.TESLA_PRM register offsets */
+#define OMAP4430_PM_TESLA_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0000)
+#define OMAP4430_PM_TESLA_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0004)
+#define OMAP4430_RM_TESLA_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0010)
+#define OMAP4430_RM_TESLA_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0014)
+#define OMAP4430_RM_TESLA_TESLA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_TESLA_MOD, 0x0024)
+
+/* PRM.ABE_PRM register offsets */
+#define OMAP4430_PM_ABE_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0000)
+#define OMAP4430_PM_ABE_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0004)
+#define OMAP4430_RM_ABE_AESS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x002c)
+#define OMAP4430_PM_ABE_PDM_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0030)
+#define OMAP4430_RM_ABE_PDM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0034)
+#define OMAP4430_PM_ABE_DMIC_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0038)
+#define OMAP4430_RM_ABE_DMIC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x003c)
+#define OMAP4430_PM_ABE_MCASP_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0040)
+#define OMAP4430_RM_ABE_MCASP_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0044)
+#define OMAP4430_PM_ABE_MCBSP1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0048)
+#define OMAP4430_RM_ABE_MCBSP1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x004c)
+#define OMAP4430_PM_ABE_MCBSP2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0050)
+#define OMAP4430_RM_ABE_MCBSP2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0054)
+#define OMAP4430_PM_ABE_MCBSP3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0058)
+#define OMAP4430_RM_ABE_MCBSP3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x005c)
+#define OMAP4430_PM_ABE_SLIMBUS_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0060)
+#define OMAP4430_RM_ABE_SLIMBUS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0064)
+#define OMAP4430_PM_ABE_TIMER5_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0068)
+#define OMAP4430_RM_ABE_TIMER5_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x006c)
+#define OMAP4430_PM_ABE_TIMER6_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0070)
+#define OMAP4430_RM_ABE_TIMER6_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0074)
+#define OMAP4430_PM_ABE_TIMER7_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0078)
+#define OMAP4430_RM_ABE_TIMER7_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x007c)
+#define OMAP4430_PM_ABE_TIMER8_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0080)
+#define OMAP4430_RM_ABE_TIMER8_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0084)
+#define OMAP4430_PM_ABE_WDT3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x0088)
+#define OMAP4430_RM_ABE_WDT3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ABE_MOD, 0x008c)
+
+/* PRM.ALWAYS_ON_PRM register offsets */
+#define OMAP4430_RM_ALWON_MDMINTC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0024)
+#define OMAP4430_PM_ALWON_SR_MPU_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0028)
+#define OMAP4430_RM_ALWON_SR_MPU_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x002c)
+#define OMAP4430_PM_ALWON_SR_IVA_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0030)
+#define OMAP4430_RM_ALWON_SR_IVA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0034)
+#define OMAP4430_PM_ALWON_SR_CORE_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x0038)
+#define OMAP4430_RM_ALWON_SR_CORE_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_ALWAYS_ON_MOD, 0x003c)
+
+/* PRM.CORE_PRM register offsets */
+#define OMAP4430_PM_CORE_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0000)
+#define OMAP4430_PM_CORE_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0004)
+#define OMAP4430_RM_L3_1_L3_1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0024)
+#define OMAP4430_RM_L3_2_L3_2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0124)
+#define OMAP4430_RM_L3_2_GPMC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x012c)
+#define OMAP4430_RM_L3_2_OCMC_RAM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0134)
+#define OMAP4430_RM_DUCATI_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0210)
+#define OMAP4430_RM_DUCATI_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0214)
+#define OMAP4430_RM_DUCATI_DUCATI_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0224)
+#define OMAP4430_RM_SDMA_SDMA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0324)
+#define OMAP4430_RM_MEMIF_DMM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0424)
+#define OMAP4430_RM_MEMIF_EMIF_FW_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x042c)
+#define OMAP4430_RM_MEMIF_EMIF_1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0434)
+#define OMAP4430_RM_MEMIF_EMIF_2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x043c)
+#define OMAP4430_RM_MEMIF_DLL_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0444)
+#define OMAP4430_RM_MEMIF_EMIF_H1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0454)
+#define OMAP4430_RM_MEMIF_EMIF_H2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x045c)
+#define OMAP4430_RM_MEMIF_DLL_H_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0464)
+#define OMAP4430_RM_D2D_SAD2D_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0524)
+#define OMAP4430_RM_D2D_MODEM_ICR_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x052c)
+#define OMAP4430_RM_D2D_SAD2D_FW_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0534)
+#define OMAP4430_RM_L4CFG_L4_CFG_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0624)
+#define OMAP4430_RM_L4CFG_HW_SEM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x062c)
+#define OMAP4430_RM_L4CFG_MAILBOX_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0634)
+#define OMAP4430_RM_L4CFG_SAR_ROM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x063c)
+#define OMAP4430_RM_L3INSTR_L3_3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0724)
+#define OMAP4430_RM_L3INSTR_L3_INSTR_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x072c)
+#define OMAP4430_RM_L3INSTR_OCP_WP1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CORE_MOD, 0x0744)
+
+/* PRM.IVAHD_PRM register offsets */
+#define OMAP4430_PM_IVAHD_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0000)
+#define OMAP4430_PM_IVAHD_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0004)
+#define OMAP4430_RM_IVAHD_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0010)
+#define OMAP4430_RM_IVAHD_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0014)
+#define OMAP4430_RM_IVAHD_IVAHD_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x0024)
+#define OMAP4430_RM_IVAHD_SL2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_IVAHD_MOD, 0x002c)
+
+/* PRM.CAM_PRM register offsets */
+#define OMAP4430_PM_CAM_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0000)
+#define OMAP4430_PM_CAM_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0004)
+#define OMAP4430_RM_CAM_ISS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x0024)
+#define OMAP4430_RM_CAM_FDIF_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CAM_MOD, 0x002c)
+
+/* PRM.DSS_PRM register offsets */
+#define OMAP4430_PM_DSS_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0000)
+#define OMAP4430_PM_DSS_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0004)
+#define OMAP4430_PM_DSS_DSS_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0020)
+#define OMAP4430_RM_DSS_DSS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x0024)
+#define OMAP4430_RM_DSS_DEISS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DSS_MOD, 0x002c)
+
+/* PRM.GFX_PRM register offsets */
+#define OMAP4430_PM_GFX_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0000)
+#define OMAP4430_PM_GFX_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0004)
+#define OMAP4430_RM_GFX_GFX_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_GFX_MOD, 0x0024)
+
+/* PRM.L3INIT_PRM register offsets */
+#define OMAP4430_PM_L3INIT_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0000)
+#define OMAP4430_PM_L3INIT_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0004)
+#define OMAP4430_PM_L3INIT_MMC1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0028)
+#define OMAP4430_RM_L3INIT_MMC1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x002c)
+#define OMAP4430_PM_L3INIT_MMC2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0030)
+#define OMAP4430_RM_L3INIT_MMC2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0034)
+#define OMAP4430_PM_L3INIT_HSI_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0038)
+#define OMAP4430_RM_L3INIT_HSI_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x003c)
+#define OMAP4430_PM_L3INIT_UNIPRO1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0040)
+#define OMAP4430_RM_L3INIT_UNIPRO1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0044)
+#define OMAP4430_PM_L3INIT_USB_HOST_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0058)
+#define OMAP4430_RM_L3INIT_USB_HOST_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x005c)
+#define OMAP4430_PM_L3INIT_USB_OTG_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0060)
+#define OMAP4430_RM_L3INIT_USB_OTG_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0064)
+#define OMAP4430_PM_L3INIT_USB_TLL_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0068)
+#define OMAP4430_RM_L3INIT_USB_TLL_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x006c)
+#define OMAP4430_RM_L3INIT_P1500_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x007c)
+#define OMAP4430_RM_L3INIT_EMAC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0084)
+#define OMAP4430_PM_L3INIT_SATA_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0088)
+#define OMAP4430_RM_L3INIT_SATA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x008c)
+#define OMAP4430_RM_L3INIT_TPPSS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0094)
+#define OMAP4430_PM_L3INIT_PCIESS_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x0098)
+#define OMAP4430_RM_L3INIT_PCIESS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x009c)
+#define OMAP4430_RM_L3INIT_CCPTX_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00ac)
+#define OMAP4430_PM_L3INIT_XHPI_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c0)
+#define OMAP4430_RM_L3INIT_XHPI_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c4)
+#define OMAP4430_PM_L3INIT_MMC6_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00c8)
+#define OMAP4430_RM_L3INIT_MMC6_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00cc)
+#define OMAP4430_PM_L3INIT_USB_HOST_FS_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00d0)
+#define OMAP4430_RM_L3INIT_USB_HOST_FS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00d4)
+#define OMAP4430_RM_L3INIT_USBPHYOCP2SCP_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L3INIT_MOD, 0x00e4)
+
+/* PRM.L4PER_PRM register offsets */
+#define OMAP4430_PM_L4PER_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0000)
+#define OMAP4430_PM_L4PER_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0004)
+#define OMAP4430_RM_L4PER_ADC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0024)
+#define OMAP4430_PM_L4PER_DMTIMER10_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0028)
+#define OMAP4430_RM_L4PER_DMTIMER10_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x002c)
+#define OMAP4430_PM_L4PER_DMTIMER11_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0030)
+#define OMAP4430_RM_L4PER_DMTIMER11_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0034)
+#define OMAP4430_PM_L4PER_DMTIMER2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0038)
+#define OMAP4430_RM_L4PER_DMTIMER2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x003c)
+#define OMAP4430_PM_L4PER_DMTIMER3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0040)
+#define OMAP4430_RM_L4PER_DMTIMER3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0044)
+#define OMAP4430_PM_L4PER_DMTIMER4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0048)
+#define OMAP4430_RM_L4PER_DMTIMER4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x004c)
+#define OMAP4430_PM_L4PER_DMTIMER9_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0050)
+#define OMAP4430_RM_L4PER_DMTIMER9_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0054)
+#define OMAP4430_RM_L4PER_ELM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x005c)
+#define OMAP4430_PM_L4PER_GPIO2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0060)
+#define OMAP4430_RM_L4PER_GPIO2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0064)
+#define OMAP4430_PM_L4PER_GPIO3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0068)
+#define OMAP4430_RM_L4PER_GPIO3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x006c)
+#define OMAP4430_PM_L4PER_GPIO4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0070)
+#define OMAP4430_RM_L4PER_GPIO4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0074)
+#define OMAP4430_PM_L4PER_GPIO5_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0078)
+#define OMAP4430_RM_L4PER_GPIO5_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x007c)
+#define OMAP4430_PM_L4PER_GPIO6_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0080)
+#define OMAP4430_RM_L4PER_GPIO6_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0084)
+#define OMAP4430_RM_L4PER_HDQ1W_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x008c)
+#define OMAP4430_PM_L4PER_HECC1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0090)
+#define OMAP4430_RM_L4PER_HECC1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0094)
+#define OMAP4430_PM_L4PER_HECC2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0098)
+#define OMAP4430_RM_L4PER_HECC2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x009c)
+#define OMAP4430_PM_L4PER_I2C1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a0)
+#define OMAP4430_RM_L4PER_I2C1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a4)
+#define OMAP4430_PM_L4PER_I2C2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00a8)
+#define OMAP4430_RM_L4PER_I2C2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00ac)
+#define OMAP4430_PM_L4PER_I2C3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b0)
+#define OMAP4430_RM_L4PER_I2C3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b4)
+#define OMAP4430_PM_L4PER_I2C4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00b8)
+#define OMAP4430_RM_L4PER_I2C4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00bc)
+#define OMAP4430_RM_L4PER_L4_PER_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00c0)
+#define OMAP4430_PM_L4PER_MCASP2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d0)
+#define OMAP4430_RM_L4PER_MCASP2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d4)
+#define OMAP4430_PM_L4PER_MCASP3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00d8)
+#define OMAP4430_RM_L4PER_MCASP3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00dc)
+#define OMAP4430_PM_L4PER_MCBSP4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00e0)
+#define OMAP4430_RM_L4PER_MCBSP4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00e4)
+#define OMAP4430_RM_L4PER_MGATE_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00ec)
+#define OMAP4430_PM_L4PER_MCSPI1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f0)
+#define OMAP4430_RM_L4PER_MCSPI1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f4)
+#define OMAP4430_PM_L4PER_MCSPI2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00f8)
+#define OMAP4430_RM_L4PER_MCSPI2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x00fc)
+#define OMAP4430_PM_L4PER_MCSPI3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0100)
+#define OMAP4430_RM_L4PER_MCSPI3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0104)
+#define OMAP4430_PM_L4PER_MCSPI4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0108)
+#define OMAP4430_RM_L4PER_MCSPI4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x010c)
+#define OMAP4430_PM_L4PER_MMCSD3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0120)
+#define OMAP4430_RM_L4PER_MMCSD3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0124)
+#define OMAP4430_PM_L4PER_MMCSD4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0128)
+#define OMAP4430_RM_L4PER_MMCSD4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x012c)
+#define OMAP4430_RM_L4PER_MSPROHG_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0134)
+#define OMAP4430_PM_L4PER_SLIMBUS2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0138)
+#define OMAP4430_RM_L4PER_SLIMBUS2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x013c)
+#define OMAP4430_PM_L4PER_UART1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0140)
+#define OMAP4430_RM_L4PER_UART1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0144)
+#define OMAP4430_PM_L4PER_UART2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0148)
+#define OMAP4430_RM_L4PER_UART2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x014c)
+#define OMAP4430_PM_L4PER_UART3_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0150)
+#define OMAP4430_RM_L4PER_UART3_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0154)
+#define OMAP4430_PM_L4PER_UART4_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0158)
+#define OMAP4430_RM_L4PER_UART4_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x015c)
+#define OMAP4430_PM_L4PER_MMCSD5_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0160)
+#define OMAP4430_RM_L4PER_MMCSD5_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0164)
+#define OMAP4430_PM_L4PER_I2C5_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x0168)
+#define OMAP4430_RM_L4PER_I2C5_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x016c)
+#define OMAP4430_RM_L4SEC_AES1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01a4)
+#define OMAP4430_RM_L4SEC_AES2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01ac)
+#define OMAP4430_RM_L4SEC_DES3DES_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01b4)
+#define OMAP4430_RM_L4SEC_PKAEIP29_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01bc)
+#define OMAP4430_RM_L4SEC_RNG_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01c4)
+#define OMAP4430_RM_L4SEC_SHA2MD51_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01cc)
+#define OMAP4430_RM_L4SEC_CRYPTODMA_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_L4PER_MOD, 0x01dc)
+
+/* PRM.CEFUSE_PRM register offsets */
+#define OMAP4430_PM_CEFUSE_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0000)
+#define OMAP4430_PM_CEFUSE_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0004)
+#define OMAP4430_RM_CEFUSE_CEFUSE_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_CEFUSE_MOD, 0x0024)
+
+/* PRM.WKUP_PRM register offsets */
+#define OMAP4430_RM_WKUP_L4WKUP_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0024)
+#define OMAP4430_RM_WKUP_WDT1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x002c)
+#define OMAP4430_PM_WKUP_WDT2_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0030)
+#define OMAP4430_RM_WKUP_WDT2_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0034)
+#define OMAP4430_PM_WKUP_GPIO1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0038)
+#define OMAP4430_RM_WKUP_GPIO1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x003c)
+#define OMAP4430_PM_WKUP_TIMER1_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0040)
+#define OMAP4430_RM_WKUP_TIMER1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0044)
+#define OMAP4430_PM_WKUP_TIMER12_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0048)
+#define OMAP4430_RM_WKUP_TIMER12_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x004c)
+#define OMAP4430_RM_WKUP_SYNCTIMER_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0054)
+#define OMAP4430_PM_WKUP_USIM_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0058)
+#define OMAP4430_RM_WKUP_USIM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x005c)
+#define OMAP4430_RM_WKUP_SARRAM_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0064)
+#define OMAP4430_PM_WKUP_KEYBOARD_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0078)
+#define OMAP4430_RM_WKUP_KEYBOARD_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x007c)
+#define OMAP4430_PM_WKUP_RTC_WKDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0080)
+#define OMAP4430_RM_WKUP_RTC_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_MOD, 0x0084)
+
+/* PRM.WKUP_CM register offsets */
+#define OMAP4430_CM_WKUP_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0000)
+#define OMAP4430_CM_WKUP_L4WKUP_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0020)
+#define OMAP4430_CM_WKUP_WDT1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0028)
+#define OMAP4430_CM_WKUP_WDT2_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0030)
+#define OMAP4430_CM_WKUP_GPIO1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0038)
+#define OMAP4430_CM_WKUP_TIMER1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0040)
+#define OMAP4430_CM_WKUP_TIMER12_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0048)
+#define OMAP4430_CM_WKUP_SYNCTIMER_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0050)
+#define OMAP4430_CM_WKUP_USIM_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0058)
+#define OMAP4430_CM_WKUP_SARRAM_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0060)
+#define OMAP4430_CM_WKUP_KEYBOARD_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0078)
+#define OMAP4430_CM_WKUP_RTC_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0080)
+#define OMAP4430_CM_WKUP_BANDGAP_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_WKUP_CM_MOD, 0x0088)
+
+/* PRM.EMU_PRM register offsets */
+#define OMAP4430_PM_EMU_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0000)
+#define OMAP4430_PM_EMU_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0004)
+#define OMAP4430_RM_EMU_DEBUGSS_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_MOD, 0x0024)
+
+/* PRM.EMU_CM register offsets */
+#define OMAP4430_CM_EMU_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0000)
+#define OMAP4430_CM_EMU_DYNAMICDEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0008)
+#define OMAP4430_CM_EMU_DEBUGSS_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_EMU_CM_MOD, 0x0020)
+
+/* PRM.DEVICE_PRM register offsets */
+#define OMAP4430_PRM_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0000)
+#define OMAP4430_PRM_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0004)
+#define OMAP4430_PRM_RSTTIME OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0008)
+#define OMAP4430_PRM_CLKREQCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x000c)
+#define OMAP4430_PRM_VOLTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0010)
+#define OMAP4430_PRM_PWRREQCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0014)
+#define OMAP4430_PRM_PSCON_COUNT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0018)
+#define OMAP4430_PRM_IO_COUNT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x001c)
+#define OMAP4430_PRM_IO_PMCTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0020)
+#define OMAP4430_PRM_VOLTSETUP_WARMRESET OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0024)
+#define OMAP4430_PRM_VOLTSETUP_CORE_OFF OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0028)
+#define OMAP4430_PRM_VOLTSETUP_MPU_OFF OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x002c)
+#define OMAP4430_PRM_VOLTSETUP_IVA_OFF OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0030)
+#define OMAP4430_PRM_VOLTSETUP_CORE_RET_SLEEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0034)
+#define OMAP4430_PRM_VOLTSETUP_MPU_RET_SLEEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0038)
+#define OMAP4430_PRM_VOLTSETUP_IVA_RET_SLEEP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x003c)
+#define OMAP4430_PRM_VP_CORE_CONFIG OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0040)
+#define OMAP4430_PRM_VP_CORE_STATUS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0044)
+#define OMAP4430_PRM_VP_CORE_VLIMITTO OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0048)
+#define OMAP4430_PRM_VP_CORE_VOLTAGE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x004c)
+#define OMAP4430_PRM_VP_CORE_VSTEPMAX OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0050)
+#define OMAP4430_PRM_VP_CORE_VSTEPMIN OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0054)
+#define OMAP4430_PRM_VP_MPU_CONFIG OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0058)
+#define OMAP4430_PRM_VP_MPU_STATUS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x005c)
+#define OMAP4430_PRM_VP_MPU_VLIMITTO OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0060)
+#define OMAP4430_PRM_VP_MPU_VOLTAGE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0064)
+#define OMAP4430_PRM_VP_MPU_VSTEPMAX OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0068)
+#define OMAP4430_PRM_VP_MPU_VSTEPMIN OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x006c)
+#define OMAP4430_PRM_VP_IVA_CONFIG OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0070)
+#define OMAP4430_PRM_VP_IVA_STATUS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0074)
+#define OMAP4430_PRM_VP_IVA_VLIMITTO OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0078)
+#define OMAP4430_PRM_VP_IVA_VOLTAGE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x007c)
+#define OMAP4430_PRM_VP_IVA_VSTEPMAX OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0080)
+#define OMAP4430_PRM_VP_IVA_VSTEPMIN OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0084)
+#define OMAP4430_PRM_VC_SMPS_SA OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0088)
+#define OMAP4430_PRM_VC_VAL_SMPS_RA_VOL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x008c)
+#define OMAP4430_PRM_VC_VAL_SMPS_RA_CMD OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0090)
+#define OMAP4430_PRM_VC_VAL_CMD_VDD_CORE_L OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0094)
+#define OMAP4430_PRM_VC_VAL_CMD_VDD_MPU_L OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x0098)
+#define OMAP4430_PRM_VC_VAL_CMD_VDD_IVA_L OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x009c)
+#define OMAP4430_PRM_VC_VAL_BYPASS OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a0)
+#define OMAP4430_PRM_VC_CFG_CHANNEL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a4)
+#define OMAP4430_PRM_VC_CFG_I2C_MODE OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00a8)
+#define OMAP4430_PRM_VC_CFG_I2C_CLK OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00ac)
+#define OMAP4430_PRM_SRAM_COUNT OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b0)
+#define OMAP4430_PRM_SRAM_WKUP_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b4)
+#define OMAP4430_PRM_LDO_SRAM_CORE_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00b8)
+#define OMAP4430_PRM_LDO_SRAM_CORE_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00bc)
+#define OMAP4430_PRM_LDO_SRAM_MPU_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c0)
+#define OMAP4430_PRM_LDO_SRAM_MPU_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c4)
+#define OMAP4430_PRM_LDO_SRAM_IVA_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00c8)
+#define OMAP4430_PRM_LDO_SRAM_IVA_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00cc)
+#define OMAP4430_PRM_LDO_ABB_MPU_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d0)
+#define OMAP4430_PRM_LDO_ABB_MPU_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d4)
+#define OMAP4430_PRM_LDO_ABB_IVA_SETUP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00d8)
+#define OMAP4430_PRM_LDO_ABB_IVA_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00dc)
+#define OMAP4430_PRM_LDO_BANDGAP_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e0)
+#define OMAP4430_PRM_DEVICE_OFF_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e4)
+#define OMAP4430_PRM_PHASE1_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00e8)
+#define OMAP4430_PRM_PHASE2A_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00ec)
+#define OMAP4430_PRM_PHASE2B_CNDP OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f0)
+#define OMAP4430_PRM_MODEM_IF_CTRL OMAP44XX_PRM_REGADDR(OMAP4430_PRM_DEVICE_MOD, 0x00f4)
+
+/* CHIRON_PRCM */
+
+
+/* CHIRON_PRCM.CHIRONSS_OCP_SOCKET_PRCM register offsets */
+#define OMAP4430_REVISION_PRCM OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_OCP_SOCKET_PRCM_MOD, 0x0000)
+
+/* CHIRON_PRCM.CHIRONSS_DEVICE_PRM register offsets */
+#define OMAP4430_CHIRON_PRCM_PRM_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_DEVICE_PRM_MOD, 0x0000)
+
+/* CHIRON_PRCM.CHIRONSS_CPU0 register offsets */
+#define OMAP4430_PM_PDA_CPU0_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0000)
+#define OMAP4430_PM_PDA_CPU0_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0004)
+#define OMAP4430_RM_PDA_CPU0_CPU0_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0008)
+#define OMAP4430_RM_PDA_CPU0_CPU0_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x000c)
+#define OMAP4430_RM_PDA_CPU0_CPU0_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0010)
+#define OMAP4430_CM_PDA_CPU0_CPU0_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0014)
+#define OMAP4430_CM_PDA_CPU0_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU0_MOD, 0x0018)
+
+/* CHIRON_PRCM.CHIRONSS_CPU1 register offsets */
+#define OMAP4430_PM_PDA_CPU1_PWRSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0000)
+#define OMAP4430_PM_PDA_CPU1_PWRSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0004)
+#define OMAP4430_RM_PDA_CPU1_CPU1_CONTEXT OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0008)
+#define OMAP4430_RM_PDA_CPU1_CPU1_RSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x000c)
+#define OMAP4430_RM_PDA_CPU1_CPU1_RSTST OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0010)
+#define OMAP4430_CM_PDA_CPU1_CPU1_CLKCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0014)
+#define OMAP4430_CM_PDA_CPU1_CLKSTCTRL OMAP44XX_PRM_REGADDR(OMAP4430_CHIRONSS_CHIRONSS_CPU1_MOD, 0x0018)
+#endif
diff --git a/arch/arm/mach-omap2/sdrc.c b/arch/arm/mach-omap2/sdrc.c
index 9a592199321..cbfbd142e94 100644
--- a/arch/arm/mach-omap2/sdrc.c
+++ b/arch/arm/mach-omap2/sdrc.c
@@ -160,3 +160,19 @@ void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
sdrc_write_reg(l, SDRC_POWER);
omap2_sms_save_context();
}
+
+void omap2_sms_write_rot_control(u32 val, unsigned ctx)
+{
+ sms_write_reg(val, SMS_ROT_CONTROL(ctx));
+}
+
+void omap2_sms_write_rot_size(u32 val, unsigned ctx)
+{
+ sms_write_reg(val, SMS_ROT_SIZE(ctx));
+}
+
+void omap2_sms_write_rot_physical_ba(u32 val, unsigned ctx)
+{
+ sms_write_reg(val, SMS_ROT_PHYSICAL_BA(ctx));
+}
+
diff --git a/arch/arm/mach-omap2/sdrc.h b/arch/arm/mach-omap2/sdrc.h
index 48207b01898..68f57bb67fc 100644
--- a/arch/arm/mach-omap2/sdrc.h
+++ b/arch/arm/mach-omap2/sdrc.h
@@ -18,6 +18,9 @@
#include <plat/sdrc.h>
#ifndef __ASSEMBLER__
+
+#include <linux/io.h>
+
extern void __iomem *omap2_sdrc_base;
extern void __iomem *omap2_sms_base;
@@ -56,4 +59,20 @@ static inline u32 sms_read_reg(u16 reg)
OMAP2_L3_IO_ADDRESS(OMAP343X_SDRC_BASE + (reg))
#endif /* __ASSEMBLER__ */
+/* Minimum frequency that the SDRC DLL can lock at */
+#define MIN_SDRC_DLL_LOCK_FREQ 83000000
+
+/* Scale factor for fixed-point arith in omap3_core_dpll_m2_set_rate() */
+#define SDRC_MPURATE_SCALE 8
+
+/* 2^SDRC_MPURATE_BASE_SHIFT: MPU MHz that SDRC_MPURATE_LOOPS is defined for */
+#define SDRC_MPURATE_BASE_SHIFT 9
+
+/*
+ * SDRC_MPURATE_LOOPS: Number of MPU loops to execute at
+ * 2^MPURATE_BASE_SHIFT MHz for SDRC to stabilize
+ */
+#define SDRC_MPURATE_LOOPS 96
+
+
#endif
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 2e17b57f5b2..19805a7de06 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -33,6 +33,7 @@
#include "pm.h"
#include "prm-regbits-34xx.h"
+#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52
#define UART_OMAP_WER 0x17 /* Wake-up enable register */
#define DEFAULT_TIMEOUT (5 * HZ)
@@ -572,6 +573,23 @@ static struct omap_uart_state omap_uart[] = {
#endif
};
+/*
+ * Override the default 8250 read handler: mem_serial_in()
+ * Empty RX fifo read causes an abort on omap3630 and omap4
+ * This function makes sure that an empty rx fifo is not read on these silicons
+ * (OMAP1/2/3430 are not affected)
+ */
+static unsigned int serial_in_override(struct uart_port *up, int offset)
+{
+ if (UART_RX == offset) {
+ unsigned int lsr;
+ lsr = serial_read_reg(omap_uart[up->line].p, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return -EPERM;
+ }
+ return serial_read_reg(omap_uart[up->line].p, offset);
+}
+
void __init omap_serial_early_init(void)
{
int i;
@@ -622,33 +640,74 @@ void __init omap_serial_early_init(void)
uart->num = i;
p->private_data = uart;
uart->p = p;
- list_add_tail(&uart->node, &uart_list);
if (cpu_is_omap44xx())
p->irq += 32;
-
- omap_uart_enable_clocks(uart);
}
}
-void __init omap_serial_init(void)
+/**
+ * omap_serial_init_port() - initialize single serial port
+ * @port: serial port number (0-3)
+ *
+ * This function initialies serial driver for given @port only.
+ * Platforms can call this function instead of omap_serial_init()
+ * if they don't plan to use all available UARTs as serial ports.
+ *
+ * Don't mix calls to omap_serial_init_port() and omap_serial_init(),
+ * use only one of the two.
+ */
+void __init omap_serial_init_port(int port)
{
- int i;
+ struct omap_uart_state *uart;
+ struct platform_device *pdev;
+ struct device *dev;
- for (i = 0; i < ARRAY_SIZE(omap_uart); i++) {
- struct omap_uart_state *uart = &omap_uart[i];
- struct platform_device *pdev = &uart->pdev;
- struct device *dev = &pdev->dev;
+ BUG_ON(port < 0);
+ BUG_ON(port >= ARRAY_SIZE(omap_uart));
- omap_uart_reset(uart);
- omap_uart_idle_init(uart);
+ uart = &omap_uart[port];
+ pdev = &uart->pdev;
+ dev = &pdev->dev;
- if (WARN_ON(platform_device_register(pdev)))
- continue;
- if ((cpu_is_omap34xx() && uart->padconf) ||
- (uart->wk_en && uart->wk_mask)) {
- device_init_wakeup(dev, true);
- DEV_CREATE_FILE(dev, &dev_attr_sleep_timeout);
- }
+ omap_uart_enable_clocks(uart);
+
+ omap_uart_reset(uart);
+ omap_uart_idle_init(uart);
+
+ list_add_tail(&uart->node, &uart_list);
+
+ if (WARN_ON(platform_device_register(pdev)))
+ return;
+
+ if ((cpu_is_omap34xx() && uart->padconf) ||
+ (uart->wk_en && uart->wk_mask)) {
+ device_init_wakeup(dev, true);
+ DEV_CREATE_FILE(dev, &dev_attr_sleep_timeout);
}
+
+ /* omap44xx: Never read empty UART fifo
+ * omap3xxx: Never read empty UART fifo on UARTs
+ * with IP rev >=0x52
+ */
+ if (cpu_is_omap44xx())
+ uart->p->serial_in = serial_in_override;
+ else if ((serial_read_reg(uart->p, UART_OMAP_MVER) & 0xFF)
+ >= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV)
+ uart->p->serial_in = serial_in_override;
+}
+
+/**
+ * omap_serial_init() - intialize all supported serial ports
+ *
+ * Initializes all available UARTs as serial ports. Platforms
+ * can call this function when they want to have default behaviour
+ * for serial ports (e.g initialize them all as serial ports).
+ */
+void __init omap_serial_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(omap_uart); i++)
+ omap_serial_init_port(i);
}
diff --git a/arch/arm/mach-omap2/sram34xx.S b/arch/arm/mach-omap2/sram34xx.S
index 82aa4a3d160..de99ba2a57a 100644
--- a/arch/arm/mach-omap2/sram34xx.S
+++ b/arch/arm/mach-omap2/sram34xx.S
@@ -91,8 +91,19 @@
* new SDRC_ACTIM_CTRL_B_1 register contents
* new SDRC_MR_1 register value
*
- * If the param SDRC_RFR_CTRL_1 is 0, the parameters
- * are not programmed into the SDRC CS1 registers
+ * If the param SDRC_RFR_CTRL_1 is 0, the parameters are not programmed into
+ * the SDRC CS1 registers
+ *
+ * NOTE: This code no longer attempts to program the SDRC AC timing and MR
+ * registers. This is because the code currently cannot ensure that all
+ * L3 initiators (e.g., sDMA, IVA, DSS DISPC, etc.) are not accessing the
+ * SDRAM when the registers are written. If the registers are changed while
+ * an initiator is accessing SDRAM, memory can be corrupted and/or the SDRC
+ * may enter an unpredictable state. In the future, the intent is to
+ * re-enable this code in cases where we can ensure that no initiators are
+ * touching the SDRAM. Until that time, users who know that their use case
+ * can satisfy the above requirement can enable the CONFIG_OMAP3_SDRC_AC_TIMING
+ * option.
*/
ENTRY(omap3_sram_configure_core_dpll)
stmfd sp!, {r1-r12, lr} @ store regs to stack
@@ -219,6 +230,7 @@ configure_sdrc:
ldr r12, omap_sdrc_rfr_ctrl_0_val @ fetch value from SRAM
ldr r11, omap3_sdrc_rfr_ctrl_0 @ fetch addr from SRAM
str r12, [r11] @ store
+#ifdef CONFIG_OMAP3_SDRC_AC_TIMING
ldr r12, omap_sdrc_actim_ctrl_a_0_val
ldr r11, omap3_sdrc_actim_ctrl_a_0
str r12, [r11]
@@ -228,11 +240,13 @@ configure_sdrc:
ldr r12, omap_sdrc_mr_0_val
ldr r11, omap3_sdrc_mr_0
str r12, [r11]
+#endif
ldr r12, omap_sdrc_rfr_ctrl_1_val
cmp r12, #0 @ if SDRC_RFR_CTRL_1 is 0,
beq skip_cs1_prog @ do not program cs1 params
ldr r11, omap3_sdrc_rfr_ctrl_1
str r12, [r11]
+#ifdef CONFIG_OMAP3_SDRC_AC_TIMING
ldr r12, omap_sdrc_actim_ctrl_a_1_val
ldr r11, omap3_sdrc_actim_ctrl_a_1
str r12, [r11]
@@ -242,6 +256,7 @@ configure_sdrc:
ldr r12, omap_sdrc_mr_1_val
ldr r11, omap3_sdrc_mr_1
str r12, [r11]
+#endif
skip_cs1_prog:
ldr r12, [r11] @ posted-write barrier for SDRC
bx lr
diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-ehci.c
index e448abd5ec5..f1df873d59d 100644
--- a/arch/arm/mach-omap2/usb-ehci.c
+++ b/arch/arm/mach-omap2/usb-ehci.c
@@ -27,6 +27,8 @@
#include <mach/irqs.h>
#include <plat/usb.h>
+#include "mux.h"
+
#if defined(CONFIG_USB_EHCI_HCD) || defined(CONFIG_USB_EHCI_HCD_MODULE)
static struct resource ehci_resources[] = {
@@ -72,32 +74,44 @@ static void setup_ehci_io_mux(enum ehci_hcd_omap_mode *port_mode)
{
switch (port_mode[0]) {
case EHCI_HCD_OMAP_MODE_PHY:
- omap_cfg_reg(Y9_3430_USB1HS_PHY_STP);
- omap_cfg_reg(Y8_3430_USB1HS_PHY_CLK);
- omap_cfg_reg(AA14_3430_USB1HS_PHY_DIR);
- omap_cfg_reg(AA11_3430_USB1HS_PHY_NXT);
- omap_cfg_reg(W13_3430_USB1HS_PHY_DATA0);
- omap_cfg_reg(W12_3430_USB1HS_PHY_DATA1);
- omap_cfg_reg(W11_3430_USB1HS_PHY_DATA2);
- omap_cfg_reg(Y11_3430_USB1HS_PHY_DATA3);
- omap_cfg_reg(W9_3430_USB1HS_PHY_DATA4);
- omap_cfg_reg(Y12_3430_USB1HS_PHY_DATA5);
- omap_cfg_reg(W8_3430_USB1HS_PHY_DATA6);
- omap_cfg_reg(Y13_3430_USB1HS_PHY_DATA7);
+ omap_mux_init_signal("hsusb1_stp", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("hsusb1_clk", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("hsusb1_dir", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_nxt", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_data0", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_data1", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_data2", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_data3", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_data4", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_data5", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_data6", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_data7", OMAP_PIN_INPUT_PULLDOWN);
break;
case EHCI_HCD_OMAP_MODE_TLL:
- omap_cfg_reg(Y9_3430_USB1HS_TLL_STP);
- omap_cfg_reg(Y8_3430_USB1HS_TLL_CLK);
- omap_cfg_reg(AA14_3430_USB1HS_TLL_DIR);
- omap_cfg_reg(AA11_3430_USB1HS_TLL_NXT);
- omap_cfg_reg(W13_3430_USB1HS_TLL_DATA0);
- omap_cfg_reg(W12_3430_USB1HS_TLL_DATA1);
- omap_cfg_reg(W11_3430_USB1HS_TLL_DATA2);
- omap_cfg_reg(Y11_3430_USB1HS_TLL_DATA3);
- omap_cfg_reg(W9_3430_USB1HS_TLL_DATA4);
- omap_cfg_reg(Y12_3430_USB1HS_TLL_DATA5);
- omap_cfg_reg(W8_3430_USB1HS_TLL_DATA6);
- omap_cfg_reg(Y13_3430_USB1HS_TLL_DATA7);
+ omap_mux_init_signal("hsusb1_tll_stp",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("hsusb1_tll_clk",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_dir",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_nxt",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_data0",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_data1",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_data2",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_data3",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_data4",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_data5",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_data6",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb1_tll_data7",
+ OMAP_PIN_INPUT_PULLDOWN);
break;
case EHCI_HCD_OMAP_MODE_UNKNOWN:
/* FALLTHROUGH */
@@ -107,32 +121,52 @@ static void setup_ehci_io_mux(enum ehci_hcd_omap_mode *port_mode)
switch (port_mode[1]) {
case EHCI_HCD_OMAP_MODE_PHY:
- omap_cfg_reg(AA10_3430_USB2HS_PHY_STP);
- omap_cfg_reg(AA8_3430_USB2HS_PHY_CLK);
- omap_cfg_reg(AA9_3430_USB2HS_PHY_DIR);
- omap_cfg_reg(AB11_3430_USB2HS_PHY_NXT);
- omap_cfg_reg(AB10_3430_USB2HS_PHY_DATA0);
- omap_cfg_reg(AB9_3430_USB2HS_PHY_DATA1);
- omap_cfg_reg(W3_3430_USB2HS_PHY_DATA2);
- omap_cfg_reg(T4_3430_USB2HS_PHY_DATA3);
- omap_cfg_reg(T3_3430_USB2HS_PHY_DATA4);
- omap_cfg_reg(R3_3430_USB2HS_PHY_DATA5);
- omap_cfg_reg(R4_3430_USB2HS_PHY_DATA6);
- omap_cfg_reg(T2_3430_USB2HS_PHY_DATA7);
+ omap_mux_init_signal("hsusb2_stp", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("hsusb2_clk", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("hsusb2_dir", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_nxt", OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_data0",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_data1",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_data2",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_data3",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_data4",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_data5",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_data6",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_data7",
+ OMAP_PIN_INPUT_PULLDOWN);
break;
case EHCI_HCD_OMAP_MODE_TLL:
- omap_cfg_reg(AA10_3430_USB2HS_TLL_STP);
- omap_cfg_reg(AA8_3430_USB2HS_TLL_CLK);
- omap_cfg_reg(AA9_3430_USB2HS_TLL_DIR);
- omap_cfg_reg(AB11_3430_USB2HS_TLL_NXT);
- omap_cfg_reg(AB10_3430_USB2HS_TLL_DATA0);
- omap_cfg_reg(AB9_3430_USB2HS_TLL_DATA1);
- omap_cfg_reg(W3_3430_USB2HS_TLL_DATA2);
- omap_cfg_reg(T4_3430_USB2HS_TLL_DATA3);
- omap_cfg_reg(T3_3430_USB2HS_TLL_DATA4);
- omap_cfg_reg(R3_3430_USB2HS_TLL_DATA5);
- omap_cfg_reg(R4_3430_USB2HS_TLL_DATA6);
- omap_cfg_reg(T2_3430_USB2HS_TLL_DATA7);
+ omap_mux_init_signal("hsusb2_tll_stp",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("hsusb2_tll_clk",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_dir",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_nxt",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_data0",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_data1",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_data2",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_data3",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_data4",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_data5",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_data6",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb2_tll_data7",
+ OMAP_PIN_INPUT_PULLDOWN);
break;
case EHCI_HCD_OMAP_MODE_UNKNOWN:
/* FALLTHROUGH */
@@ -145,18 +179,30 @@ static void setup_ehci_io_mux(enum ehci_hcd_omap_mode *port_mode)
printk(KERN_WARNING "Port3 can't be used in PHY mode\n");
break;
case EHCI_HCD_OMAP_MODE_TLL:
- omap_cfg_reg(AB3_3430_USB3HS_TLL_STP);
- omap_cfg_reg(AA6_3430_USB3HS_TLL_CLK);
- omap_cfg_reg(AA3_3430_USB3HS_TLL_DIR);
- omap_cfg_reg(Y3_3430_USB3HS_TLL_NXT);
- omap_cfg_reg(AA5_3430_USB3HS_TLL_DATA0);
- omap_cfg_reg(Y4_3430_USB3HS_TLL_DATA1);
- omap_cfg_reg(Y5_3430_USB3HS_TLL_DATA2);
- omap_cfg_reg(W5_3430_USB3HS_TLL_DATA3);
- omap_cfg_reg(AB12_3430_USB3HS_TLL_DATA4);
- omap_cfg_reg(AB13_3430_USB3HS_TLL_DATA5);
- omap_cfg_reg(AA13_3430_USB3HS_TLL_DATA6);
- omap_cfg_reg(AA12_3430_USB3HS_TLL_DATA7);
+ omap_mux_init_signal("hsusb3_tll_stp",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("hsusb3_tll_clk",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_dir",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_nxt",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_data0",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_data1",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_data2",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_data3",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_data4",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_data5",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_data6",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hsusb3_tll_data7",
+ OMAP_PIN_INPUT_PULLDOWN);
break;
case EHCI_HCD_OMAP_MODE_UNKNOWN:
/* FALLTHROUGH */
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index d89c6adbe8b..8a0837ea029 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -63,6 +63,15 @@ config ARCH_VIPER
select HAVE_PWM
select PXA_HAVE_BOARD_IRQS
select PXA_HAVE_ISA_IRQS
+ select ARCOM_PCMCIA
+
+config MACH_ARCOM_ZEUS
+ bool "Arcom/Eurotech ZEUS SBC"
+ select PXA27x
+ select ISA
+ select PXA_HAVE_BOARD_IRQS
+ select PXA_HAVE_ISA_IRQS
+ select ARCOM_PCMCIA
config MACH_BALLOON3
bool "Balloon 3 board"
@@ -101,6 +110,8 @@ config MACH_CM_X300
bool "CompuLab CM-X300 modules"
select PXA3xx
select CPU_PXA300
+ select CPU_PXA310
+ select HAVE_PWM
config ARCH_GUMSTIX
bool "Gumstix XScale 255 boards"
@@ -179,6 +190,11 @@ config MACH_TRIZEPS_ANY
endchoice
+config ARCOM_PCMCIA
+ bool
+ help
+ Generic option for Arcom Viper/Zeus PCMCIA
+
config TRIZEPS_PCMCIA
bool
help
@@ -226,7 +242,6 @@ config MACH_COLIBRI300
select PXA3xx
select CPU_PXA300
select CPU_PXA310
- select HAVE_PWM
config MACH_COLIBRI320
bool "Toradex Colibri PXA320"
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index b5d29e60a34..f64afda7e6f 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_MACH_SAAR) += saar.o
# 3rd Party Dev Platforms
obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
obj-$(CONFIG_ARCH_VIPER) += viper.o
+obj-$(CONFIG_MACH_ARCOM_ZEUS) += zeus.o
obj-$(CONFIG_MACH_BALLOON3) += balloon3.o
obj-$(CONFIG_MACH_CSB726) += csb726.o
obj-$(CONFIG_CSB726_CSB701) += csb701.o
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 3395463bb5a..8e10db148f1 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -4,7 +4,6 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <mach/hardware.h>
#include <mach/udc.h>
#include <mach/pxafb.h>
#include <mach/mmc.h>
@@ -14,6 +13,7 @@
#include <mach/pxa2xx_spi.h>
#include <mach/camera.h>
#include <mach/audio.h>
+#include <mach/hardware.h>
#include <plat/i2c.h>
#include <plat/pxa3xx_nand.h>
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 1c0de808b54..c8a01bc85fd 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -497,16 +497,15 @@ static int em_x270_usb_hub_init(void)
goto err_free_vbus_gpio;
/* USB Hub power-on and reset */
- gpio_direction_output(usb_hub_reset, 0);
+ gpio_direction_output(usb_hub_reset, 1);
+ gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
regulator_enable(em_x270_usb_ldo);
- gpio_set_value(usb_hub_reset, 1);
gpio_set_value(usb_hub_reset, 0);
+ gpio_set_value(usb_hub_reset, 1);
regulator_disable(em_x270_usb_ldo);
regulator_enable(em_x270_usb_ldo);
- gpio_set_value(usb_hub_reset, 1);
-
- /* enable VBUS */
- gpio_direction_output(GPIO9_USB_VBUS_EN, 1);
+ gpio_set_value(usb_hub_reset, 0);
+ gpio_set_value(GPIO9_USB_VBUS_EN, 1);
return 0;
diff --git a/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h b/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h
new file mode 100644
index 00000000000..d428be4db44
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/arcom-pcmcia.h
@@ -0,0 +1,11 @@
+#ifndef __ARCOM_PCMCIA_H
+#define __ARCOM_PCMCIA_H
+
+struct arcom_pcmcia_pdata {
+ int cd_gpio;
+ int rdy_gpio;
+ int pwr_gpio;
+ void (*reset)(int state);
+};
+
+#endif
diff --git a/arch/arm/mach-pxa/include/mach/viper.h b/arch/arm/mach-pxa/include/mach/viper.h
index 10988c270ca..5f5fbf1f648 100644
--- a/arch/arm/mach-pxa/include/mach/viper.h
+++ b/arch/arm/mach-pxa/include/mach/viper.h
@@ -85,8 +85,6 @@
/* Interrupt and Configuration Register (VIPER_ICR) */
/* This is a write only register. Only CF_RST is used under Linux */
-extern void viper_cf_rst(int state);
-
#define VIPER_ICR_RETRIG (1 << 0)
#define VIPER_ICR_AUTO_CLR (1 << 1)
#define VIPER_ICR_R_DIS (1 << 2)
diff --git a/arch/arm/mach-pxa/include/mach/zeus.h b/arch/arm/mach-pxa/include/mach/zeus.h
new file mode 100644
index 00000000000..c387046d2f2
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/zeus.h
@@ -0,0 +1,82 @@
+/*
+ * arch/arm/mach-pxa/include/mach/zeus.h
+ *
+ * Author: David Vrabel
+ * Created: Sept 28, 2005
+ * Copyright: Arcom Control Systems Ltd.
+ *
+ * Maintained by: Marc Zyngier <maz@misterjones.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MACH_ZEUS_H
+#define _MACH_ZEUS_H
+
+/* Physical addresses */
+#define ZEUS_FLASH_PHYS PXA_CS0_PHYS
+#define ZEUS_ETH0_PHYS PXA_CS1_PHYS
+#define ZEUS_ETH1_PHYS PXA_CS2_PHYS
+#define ZEUS_CPLD_PHYS (PXA_CS4_PHYS+0x2000000)
+#define ZEUS_SRAM_PHYS PXA_CS5_PHYS
+#define ZEUS_PC104IO_PHYS (0x30000000)
+
+#define ZEUS_CPLD_VERSION_PHYS (ZEUS_CPLD_PHYS + 0x00000000)
+#define ZEUS_CPLD_ISA_IRQ_PHYS (ZEUS_CPLD_PHYS + 0x00800000)
+#define ZEUS_CPLD_CONTROL_PHYS (ZEUS_CPLD_PHYS + 0x01000000)
+#define ZEUS_CPLD_EXTWDOG_PHYS (ZEUS_CPLD_PHYS + 0x01800000)
+
+/* GPIOs */
+#define ZEUS_AC97_GPIO 0
+#define ZEUS_WAKEUP_GPIO 1
+#define ZEUS_UARTA_GPIO 9
+#define ZEUS_UARTB_GPIO 10
+#define ZEUS_UARTC_GPIO 12
+#define ZEUS_UARTD_GPIO 11
+#define ZEUS_ETH0_GPIO 14
+#define ZEUS_ISA_GPIO 17
+#define ZEUS_BKLEN_GPIO 19
+#define ZEUS_USB2_PWREN_GPIO 22
+#define ZEUS_PTT_GPIO 27
+#define ZEUS_CF_CD_GPIO 35
+#define ZEUS_MMC_WP_GPIO 52
+#define ZEUS_MMC_CD_GPIO 53
+#define ZEUS_EXTGPIO_GPIO 91
+#define ZEUS_CF_PWEN_GPIO 97
+#define ZEUS_CF_RDY_GPIO 99
+#define ZEUS_LCD_EN_GPIO 101
+#define ZEUS_ETH1_GPIO 113
+#define ZEUS_CAN_GPIO 116
+
+#define ZEUS_EXT0_GPIO_BASE 128
+#define ZEUS_EXT1_GPIO_BASE 160
+#define ZEUS_USER_GPIO_BASE 192
+
+#define ZEUS_EXT0_GPIO(x) (ZEUS_EXT0_GPIO_BASE + (x))
+#define ZEUS_EXT1_GPIO(x) (ZEUS_EXT1_GPIO_BASE + (x))
+#define ZEUS_USER_GPIO(x) (ZEUS_USER_GPIO_BASE + (x))
+
+/*
+ * CPLD registers:
+ * Only 4 registers, but spreaded over a 32MB address space.
+ * Be gentle, and remap that over 32kB...
+ */
+
+#define ZEUS_CPLD (0xf0000000)
+#define ZEUS_CPLD_VERSION (ZEUS_CPLD + 0x0000)
+#define ZEUS_CPLD_ISA_IRQ (ZEUS_CPLD + 0x1000)
+#define ZEUS_CPLD_CONTROL (ZEUS_CPLD + 0x2000)
+#define ZEUS_CPLD_EXTWDOG (ZEUS_CPLD + 0x3000)
+
+/* CPLD register bits */
+#define ZEUS_CPLD_CONTROL_CF_RST 0x01
+
+#define ZEUS_PC104IO (0xf1000000)
+
+#define ZEUS_SRAM_SIZE (256 * 1024)
+
+#endif
+
+
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index cf0d71b7797..5352b4e5a7d 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -47,6 +47,7 @@
#include <mach/pxafb.h>
#include <plat/i2c.h>
#include <mach/regs-uart.h>
+#include <mach/arcom-pcmcia.h>
#include <mach/viper.h>
#include <asm/setup.h>
@@ -76,14 +77,28 @@ static void viper_icr_clear_bit(unsigned int bit)
}
/* This function is used from the pcmcia module to reset the CF */
-void viper_cf_rst(int state)
+static void viper_cf_reset(int state)
{
if (state)
viper_icr_set_bit(VIPER_ICR_CF_RST);
else
viper_icr_clear_bit(VIPER_ICR_CF_RST);
}
-EXPORT_SYMBOL(viper_cf_rst);
+
+static struct arcom_pcmcia_pdata viper_pcmcia_info = {
+ .cd_gpio = VIPER_CF_CD_GPIO,
+ .rdy_gpio = VIPER_CF_RDY_GPIO,
+ .pwr_gpio = VIPER_CF_POWER_GPIO,
+ .reset = viper_cf_reset,
+};
+
+static struct platform_device viper_pcmcia_device = {
+ .name = "viper-pcmcia",
+ .id = -1,
+ .dev = {
+ .platform_data = &viper_pcmcia_info,
+ },
+};
/*
* The CPLD version register was not present on VIPER boards prior to
@@ -685,6 +700,7 @@ static struct platform_device *viper_devs[] __initdata = {
&viper_mtd_devices[0],
&viper_mtd_devices[1],
&viper_backlight_device,
+ &viper_pcmcia_device,
};
static mfp_cfg_t viper_pin_config[] __initdata = {
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
new file mode 100644
index 00000000000..5b986a8bd9e
--- /dev/null
+++ b/arch/arm/mach-pxa/zeus.c
@@ -0,0 +1,820 @@
+/*
+ * Support for the Arcom ZEUS.
+ *
+ * Copyright (C) 2006 Arcom Control Systems Ltd.
+ *
+ * Loosely based on Arcom's 2.6.16.28.
+ * Maintained by Marc Zyngier <maz@misterjones.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pm.h>
+#include <linux/gpio.h>
+#include <linux/serial_8250.h>
+#include <linux/dm9000.h>
+#include <linux/mmc/host.h>
+#include <linux/spi/spi.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <plat/i2c.h>
+
+#include <mach/pxa2xx-regs.h>
+#include <mach/regs-uart.h>
+#include <mach/ohci.h>
+#include <mach/mmc.h>
+#include <mach/pxa27x-udc.h>
+#include <mach/udc.h>
+#include <mach/pxafb.h>
+#include <mach/pxa2xx_spi.h>
+#include <mach/mfp-pxa27x.h>
+#include <mach/pm.h>
+#include <mach/audio.h>
+#include <mach/arcom-pcmcia.h>
+#include <mach/zeus.h>
+
+#include "generic.h"
+
+/*
+ * Interrupt handling
+ */
+
+static unsigned long zeus_irq_enabled_mask;
+static const int zeus_isa_irqs[] = { 3, 4, 5, 6, 7, 10, 11, 12, };
+static const int zeus_isa_irq_map[] = {
+ 0, /* ISA irq #0, invalid */
+ 0, /* ISA irq #1, invalid */
+ 0, /* ISA irq #2, invalid */
+ 1 << 0, /* ISA irq #3 */
+ 1 << 1, /* ISA irq #4 */
+ 1 << 2, /* ISA irq #5 */
+ 1 << 3, /* ISA irq #6 */
+ 1 << 4, /* ISA irq #7 */
+ 0, /* ISA irq #8, invalid */
+ 0, /* ISA irq #9, invalid */
+ 1 << 5, /* ISA irq #10 */
+ 1 << 6, /* ISA irq #11 */
+ 1 << 7, /* ISA irq #12 */
+};
+
+static inline int zeus_irq_to_bitmask(unsigned int irq)
+{
+ return zeus_isa_irq_map[irq - PXA_ISA_IRQ(0)];
+}
+
+static inline int zeus_bit_to_irq(int bit)
+{
+ return zeus_isa_irqs[bit] + PXA_ISA_IRQ(0);
+}
+
+static void zeus_ack_irq(unsigned int irq)
+{
+ __raw_writew(zeus_irq_to_bitmask(irq), ZEUS_CPLD_ISA_IRQ);
+}
+
+static void zeus_mask_irq(unsigned int irq)
+{
+ zeus_irq_enabled_mask &= ~(zeus_irq_to_bitmask(irq));
+}
+
+static void zeus_unmask_irq(unsigned int irq)
+{
+ zeus_irq_enabled_mask |= zeus_irq_to_bitmask(irq);
+}
+
+static inline unsigned long zeus_irq_pending(void)
+{
+ return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask;
+}
+
+static void zeus_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned long pending;
+
+ pending = zeus_irq_pending();
+ do {
+ /* we're in a chained irq handler,
+ * so ack the interrupt by hand */
+ desc->chip->ack(gpio_to_irq(ZEUS_ISA_GPIO));
+
+ if (likely(pending)) {
+ irq = zeus_bit_to_irq(__ffs(pending));
+ generic_handle_irq(irq);
+ }
+ pending = zeus_irq_pending();
+ } while (pending);
+}
+
+static struct irq_chip zeus_irq_chip = {
+ .name = "ISA",
+ .ack = zeus_ack_irq,
+ .mask = zeus_mask_irq,
+ .unmask = zeus_unmask_irq,
+};
+
+static void __init zeus_init_irq(void)
+{
+ int level;
+ int isa_irq;
+
+ pxa27x_init_irq();
+
+ /* Peripheral IRQs. It would be nice to move those inside driver
+ configuration, but it is not supported at the moment. */
+ set_irq_type(gpio_to_irq(ZEUS_AC97_GPIO), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(ZEUS_WAKEUP_GPIO), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(ZEUS_PTT_GPIO), IRQ_TYPE_EDGE_RISING);
+ set_irq_type(gpio_to_irq(ZEUS_EXTGPIO_GPIO), IRQ_TYPE_EDGE_FALLING);
+ set_irq_type(gpio_to_irq(ZEUS_CAN_GPIO), IRQ_TYPE_EDGE_FALLING);
+
+ /* Setup ISA IRQs */
+ for (level = 0; level < ARRAY_SIZE(zeus_isa_irqs); level++) {
+ isa_irq = zeus_bit_to_irq(level);
+ set_irq_chip(isa_irq, &zeus_irq_chip);
+ set_irq_handler(isa_irq, handle_edge_irq);
+ set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING);
+ set_irq_chained_handler(gpio_to_irq(ZEUS_ISA_GPIO), zeus_irq_handler);
+}
+
+
+/*
+ * Platform devices
+ */
+
+/* Flash */
+static struct resource zeus_mtd_resources[] = {
+ [0] = { /* NOR Flash (up to 64MB) */
+ .start = ZEUS_FLASH_PHYS,
+ .end = ZEUS_FLASH_PHYS + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* SRAM */
+ .start = ZEUS_SRAM_PHYS,
+ .end = ZEUS_SRAM_PHYS + SZ_512K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct physmap_flash_data zeus_flash_data[] = {
+ [0] = {
+ .width = 2,
+ .parts = NULL,
+ .nr_parts = 0,
+ },
+};
+
+static struct platform_device zeus_mtd_devices[] = {
+ [0] = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &zeus_flash_data[0],
+ },
+ .resource = &zeus_mtd_resources[0],
+ .num_resources = 1,
+ },
+};
+
+/* Serial */
+static struct resource zeus_serial_resources[] = {
+ {
+ .start = 0x10000000,
+ .end = 0x1000000f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x10800000,
+ .end = 0x1080000f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x11000000,
+ .end = 0x1100000f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x40100000,
+ .end = 0x4010001f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x40200000,
+ .end = 0x4020001f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0x40700000,
+ .end = 0x4070001f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct plat_serial8250_port serial_platform_data[] = {
+ /* External UARTs */
+ /* FIXME: Shared IRQs on COM1-COM4 will not work properly on v1i1 hardware. */
+ { /* COM1 */
+ .mapbase = 0x10000000,
+ .irq = gpio_to_irq(ZEUS_UARTA_GPIO),
+ .irqflags = IRQF_TRIGGER_RISING,
+ .uartclk = 14745600,
+ .regshift = 1,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* COM2 */
+ .mapbase = 0x10800000,
+ .irq = gpio_to_irq(ZEUS_UARTB_GPIO),
+ .irqflags = IRQF_TRIGGER_RISING,
+ .uartclk = 14745600,
+ .regshift = 1,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* COM3 */
+ .mapbase = 0x11000000,
+ .irq = gpio_to_irq(ZEUS_UARTC_GPIO),
+ .irqflags = IRQF_TRIGGER_RISING,
+ .uartclk = 14745600,
+ .regshift = 1,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* COM4 */
+ .mapbase = 0x11800000,
+ .irq = gpio_to_irq(ZEUS_UARTD_GPIO),
+ .irqflags = IRQF_TRIGGER_RISING,
+ .uartclk = 14745600,
+ .regshift = 1,
+ .flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ /* Internal UARTs */
+ { /* FFUART */
+ .membase = (void *)&FFUART,
+ .mapbase = __PREG(FFUART),
+ .irq = IRQ_FFUART,
+ .uartclk = 921600 * 16,
+ .regshift = 2,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* BTUART */
+ .membase = (void *)&BTUART,
+ .mapbase = __PREG(BTUART),
+ .irq = IRQ_BTUART,
+ .uartclk = 921600 * 16,
+ .regshift = 2,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { /* STUART */
+ .membase = (void *)&STUART,
+ .mapbase = __PREG(STUART),
+ .irq = IRQ_STUART,
+ .uartclk = 921600 * 16,
+ .regshift = 2,
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
+ .iotype = UPIO_MEM,
+ },
+ { },
+};
+
+static struct platform_device zeus_serial_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = serial_platform_data,
+ },
+ .num_resources = ARRAY_SIZE(zeus_serial_resources),
+ .resource = zeus_serial_resources,
+};
+
+/* Ethernet */
+static struct resource zeus_dm9k0_resource[] = {
+ [0] = {
+ .start = ZEUS_ETH0_PHYS,
+ .end = ZEUS_ETH0_PHYS + 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = ZEUS_ETH0_PHYS + 2,
+ .end = ZEUS_ETH0_PHYS + 3,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = gpio_to_irq(ZEUS_ETH0_GPIO),
+ .end = gpio_to_irq(ZEUS_ETH0_GPIO),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+};
+
+static struct resource zeus_dm9k1_resource[] = {
+ [0] = {
+ .start = ZEUS_ETH1_PHYS,
+ .end = ZEUS_ETH1_PHYS + 1,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = ZEUS_ETH1_PHYS + 2,
+ .end = ZEUS_ETH1_PHYS + 3,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = {
+ .start = gpio_to_irq(ZEUS_ETH1_GPIO),
+ .end = gpio_to_irq(ZEUS_ETH1_GPIO),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+};
+
+static struct dm9000_plat_data zeus_dm9k_platdata = {
+ .flags = DM9000_PLATF_16BITONLY,
+};
+
+static struct platform_device zeus_dm9k0_device = {
+ .name = "dm9000",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(zeus_dm9k0_resource),
+ .resource = zeus_dm9k0_resource,
+ .dev = {
+ .platform_data = &zeus_dm9k_platdata,
+ }
+};
+
+static struct platform_device zeus_dm9k1_device = {
+ .name = "dm9000",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(zeus_dm9k1_resource),
+ .resource = zeus_dm9k1_resource,
+ .dev = {
+ .platform_data = &zeus_dm9k_platdata,
+ }
+};
+
+/* External SRAM */
+static struct resource zeus_sram_resource = {
+ .start = ZEUS_SRAM_PHYS,
+ .end = ZEUS_SRAM_PHYS + ZEUS_SRAM_SIZE * 2 - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device zeus_sram_device = {
+ .name = "pxa2xx-8bit-sram",
+ .id = 0,
+ .num_resources = 1,
+ .resource = &zeus_sram_resource,
+};
+
+/* SPI interface on SSP3 */
+static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
+ .num_chipselect = 1,
+ .enable_dma = 1,
+};
+
+static struct platform_device pxa2xx_spi_ssp3_device = {
+ .name = "pxa2xx-spi",
+ .id = 3,
+ .dev = {
+ .platform_data = &pxa2xx_spi_ssp3_master_info,
+ },
+};
+
+/* Leds */
+static struct gpio_led zeus_leds[] = {
+ [0] = {
+ .name = "zeus:yellow:1",
+ .default_trigger = "heartbeat",
+ .gpio = ZEUS_EXT0_GPIO(3),
+ .active_low = 1,
+ },
+ [1] = {
+ .name = "zeus:yellow:2",
+ .default_trigger = "default-on",
+ .gpio = ZEUS_EXT0_GPIO(4),
+ .active_low = 1,
+ },
+ [2] = {
+ .name = "zeus:yellow:3",
+ .default_trigger = "default-on",
+ .gpio = ZEUS_EXT0_GPIO(5),
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led_platform_data zeus_leds_info = {
+ .leds = zeus_leds,
+ .num_leds = ARRAY_SIZE(zeus_leds),
+};
+
+static struct platform_device zeus_leds_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &zeus_leds_info,
+ },
+};
+
+static void zeus_cf_reset(int state)
+{
+ u16 cpld_state = __raw_readw(ZEUS_CPLD_CONTROL);
+
+ if (state)
+ cpld_state |= ZEUS_CPLD_CONTROL_CF_RST;
+ else
+ cpld_state &= ~ZEUS_CPLD_CONTROL_CF_RST;
+
+ __raw_writew(cpld_state, ZEUS_CPLD_CONTROL);
+}
+
+static struct arcom_pcmcia_pdata zeus_pcmcia_info = {
+ .cd_gpio = ZEUS_CF_CD_GPIO,
+ .rdy_gpio = ZEUS_CF_RDY_GPIO,
+ .pwr_gpio = ZEUS_CF_PWEN_GPIO,
+ .reset = zeus_cf_reset,
+};
+
+static struct platform_device zeus_pcmcia_device = {
+ .name = "zeus-pcmcia",
+ .id = -1,
+ .dev = {
+ .platform_data = &zeus_pcmcia_info,
+ },
+};
+
+static struct platform_device *zeus_devices[] __initdata = {
+ &zeus_serial_device,
+ &zeus_mtd_devices[0],
+ &zeus_dm9k0_device,
+ &zeus_dm9k1_device,
+ &zeus_sram_device,
+ &pxa2xx_spi_ssp3_device,
+ &zeus_leds_device,
+ &zeus_pcmcia_device,
+};
+
+/* AC'97 */
+static pxa2xx_audio_ops_t zeus_ac97_info = {
+ .reset_gpio = 95,
+};
+
+
+/*
+ * USB host
+ */
+
+static int zeus_ohci_init(struct device *dev)
+{
+ int err;
+
+ /* Switch on port 2. */
+ if ((err = gpio_request(ZEUS_USB2_PWREN_GPIO, "USB2_PWREN"))) {
+ dev_err(dev, "Can't request USB2_PWREN\n");
+ return err;
+ }
+
+ if ((err = gpio_direction_output(ZEUS_USB2_PWREN_GPIO, 1))) {
+ gpio_free(ZEUS_USB2_PWREN_GPIO);
+ dev_err(dev, "Can't enable USB2_PWREN\n");
+ return err;
+ }
+
+ /* Port 2 is shared between host and client interface. */
+ UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
+
+ return 0;
+}
+
+static void zeus_ohci_exit(struct device *dev)
+{
+ /* Power-off port 2 */
+ gpio_direction_output(ZEUS_USB2_PWREN_GPIO, 0);
+ gpio_free(ZEUS_USB2_PWREN_GPIO);
+}
+
+static struct pxaohci_platform_data zeus_ohci_platform_data = {
+ .port_mode = PMM_NPS_MODE,
+ .flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW,
+ .init = zeus_ohci_init,
+ .exit = zeus_ohci_exit,
+};
+
+/*
+ * Flat Panel
+ */
+
+static void zeus_lcd_power(int on, struct fb_var_screeninfo *si)
+{
+ gpio_set_value(ZEUS_LCD_EN_GPIO, on);
+}
+
+static void zeus_backlight_power(int on)
+{
+ gpio_set_value(ZEUS_BKLEN_GPIO, on);
+}
+
+static int zeus_setup_fb_gpios(void)
+{
+ int err;
+
+ if ((err = gpio_request(ZEUS_LCD_EN_GPIO, "LCD_EN")))
+ goto out_err;
+
+ if ((err = gpio_direction_output(ZEUS_LCD_EN_GPIO, 0)))
+ goto out_err_lcd;
+
+ if ((err = gpio_request(ZEUS_BKLEN_GPIO, "BKLEN")))
+ goto out_err_lcd;
+
+ if ((err = gpio_direction_output(ZEUS_BKLEN_GPIO, 0)))
+ goto out_err_bkl;
+
+ return 0;
+
+out_err_bkl:
+ gpio_free(ZEUS_BKLEN_GPIO);
+out_err_lcd:
+ gpio_free(ZEUS_LCD_EN_GPIO);
+out_err:
+ return err;
+}
+
+static struct pxafb_mode_info zeus_fb_mode_info[] = {
+ {
+ .pixclock = 39722,
+
+ .xres = 640,
+ .yres = 480,
+
+ .bpp = 16,
+
+ .hsync_len = 63,
+ .left_margin = 16,
+ .right_margin = 81,
+
+ .vsync_len = 2,
+ .upper_margin = 12,
+ .lower_margin = 31,
+
+ .sync = 0,
+ },
+};
+
+static struct pxafb_mach_info zeus_fb_info = {
+ .modes = zeus_fb_mode_info,
+ .num_modes = 1,
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
+ .pxafb_lcd_power = zeus_lcd_power,
+ .pxafb_backlight_power = zeus_backlight_power,
+};
+
+/*
+ * MMC/SD Device
+ *
+ * The card detect interrupt isn't debounced so we delay it by 250ms
+ * to give the card a chance to fully insert/eject.
+ */
+
+static struct pxamci_platform_data zeus_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .detect_delay = HZ/4,
+ .gpio_card_detect = ZEUS_MMC_CD_GPIO,
+ .gpio_card_ro = ZEUS_MMC_WP_GPIO,
+ .gpio_card_ro_invert = 1,
+ .gpio_power = -1
+};
+
+/*
+ * USB Device Controller
+ */
+static void zeus_udc_command(int cmd)
+{
+ switch (cmd) {
+ case PXA2XX_UDC_CMD_DISCONNECT:
+ pr_info("zeus: disconnecting USB client\n");
+ UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
+ break;
+
+ case PXA2XX_UDC_CMD_CONNECT:
+ pr_info("zeus: connecting USB client\n");
+ UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE;
+ break;
+ }
+}
+
+static struct pxa2xx_udc_mach_info zeus_udc_info = {
+ .udc_command = zeus_udc_command,
+};
+
+static void zeus_power_off(void)
+{
+ local_irq_disable();
+ pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP);
+}
+
+int zeus_get_pcb_info(struct i2c_client *client, unsigned gpio,
+ unsigned ngpio, void *context)
+{
+ int i;
+ u8 pcb_info = 0;
+
+ for (i = 0; i < 8; i++) {
+ int pcb_bit = gpio + i + 8;
+
+ if (gpio_request(pcb_bit, "pcb info")) {
+ dev_err(&client->dev, "Can't request pcb info %d\n", i);
+ continue;
+ }
+
+ if (gpio_direction_input(pcb_bit)) {
+ dev_err(&client->dev, "Can't read pcb info %d\n", i);
+ gpio_free(pcb_bit);
+ continue;
+ }
+
+ pcb_info |= !!gpio_get_value(pcb_bit) << i;
+
+ gpio_free(pcb_bit);
+ }
+
+ dev_info(&client->dev, "Zeus PCB version %d issue %d\n",
+ pcb_info >> 4, pcb_info & 0xf);
+
+ return 0;
+}
+
+static struct pca953x_platform_data zeus_pca953x_pdata[] = {
+ [0] = { .gpio_base = ZEUS_EXT0_GPIO_BASE, },
+ [1] = {
+ .gpio_base = ZEUS_EXT1_GPIO_BASE,
+ .setup = zeus_get_pcb_info,
+ },
+ [2] = { .gpio_base = ZEUS_USER_GPIO_BASE, },
+};
+
+static struct i2c_board_info __initdata zeus_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("pca9535", 0x21),
+ .platform_data = &zeus_pca953x_pdata[0],
+ },
+ {
+ I2C_BOARD_INFO("pca9535", 0x22),
+ .platform_data = &zeus_pca953x_pdata[1],
+ },
+ {
+ I2C_BOARD_INFO("pca9535", 0x20),
+ .platform_data = &zeus_pca953x_pdata[2],
+ .irq = gpio_to_irq(ZEUS_EXTGPIO_GPIO),
+ },
+ { I2C_BOARD_INFO("lm75a", 0x48) },
+ { I2C_BOARD_INFO("24c01", 0x50) },
+ { I2C_BOARD_INFO("isl1208", 0x6f) },
+};
+
+static mfp_cfg_t zeus_pin_config[] __initdata = {
+ GPIO15_nCS_1,
+ GPIO78_nCS_2,
+ GPIO80_nCS_4,
+ GPIO33_nCS_5,
+
+ GPIO22_GPIO,
+ GPIO32_MMC_CLK,
+ GPIO92_MMC_DAT_0,
+ GPIO109_MMC_DAT_1,
+ GPIO110_MMC_DAT_2,
+ GPIO111_MMC_DAT_3,
+ GPIO112_MMC_CMD,
+
+ GPIO88_USBH1_PWR,
+ GPIO89_USBH1_PEN,
+ GPIO119_USBH2_PWR,
+ GPIO120_USBH2_PEN,
+
+ GPIO86_LCD_LDD_16,
+ GPIO87_LCD_LDD_17,
+
+ GPIO102_GPIO,
+ GPIO104_CIF_DD_2,
+ GPIO105_CIF_DD_1,
+
+ GPIO48_nPOE,
+ GPIO49_nPWE,
+ GPIO50_nPIOR,
+ GPIO51_nPIOW,
+ GPIO85_nPCE_1,
+ GPIO54_nPCE_2,
+ GPIO79_PSKTSEL,
+ GPIO55_nPREG,
+ GPIO56_nPWAIT,
+ GPIO57_nIOIS16,
+ GPIO36_GPIO, /* CF CD */
+ GPIO97_GPIO, /* CF PWREN */
+ GPIO99_GPIO, /* CF RDY */
+};
+
+static void __init zeus_init(void)
+{
+ u16 dm9000_msc = 0xe279;
+
+ system_rev = __raw_readw(ZEUS_CPLD_VERSION);
+ pr_info("Zeus CPLD V%dI%d\n", (system_rev & 0xf0) >> 4, (system_rev & 0x0f));
+
+ /* Fix timings for dm9000s (CS1/CS2)*/
+ MSC0 = (MSC0 & 0xffff) | (dm9000_msc << 16);
+ MSC1 = (MSC1 & 0xffff0000) | dm9000_msc;
+
+ pm_power_off = zeus_power_off;
+
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(zeus_pin_config));
+
+ platform_add_devices(zeus_devices, ARRAY_SIZE(zeus_devices));
+
+ pxa_set_ohci_info(&zeus_ohci_platform_data);
+
+ if (zeus_setup_fb_gpios())
+ pr_err("Failed to setup fb gpios\n");
+ else
+ set_pxa_fb_info(&zeus_fb_info);
+
+ pxa_set_mci_info(&zeus_mci_platform_data);
+ pxa_set_udc_info(&zeus_udc_info);
+ pxa_set_ac97_info(&zeus_ac97_info);
+ pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
+}
+
+static struct map_desc zeus_io_desc[] __initdata = {
+ {
+ .virtual = ZEUS_CPLD_VERSION,
+ .pfn = __phys_to_pfn(ZEUS_CPLD_VERSION_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = ZEUS_CPLD_ISA_IRQ,
+ .pfn = __phys_to_pfn(ZEUS_CPLD_ISA_IRQ_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = ZEUS_CPLD_CONTROL,
+ .pfn = __phys_to_pfn(ZEUS_CPLD_CONTROL_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = ZEUS_CPLD_EXTWDOG,
+ .pfn = __phys_to_pfn(ZEUS_CPLD_EXTWDOG_PHYS),
+ .length = 0x1000,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = ZEUS_PC104IO,
+ .pfn = __phys_to_pfn(ZEUS_PC104IO_PHYS),
+ .length = 0x00800000,
+ .type = MT_DEVICE,
+ },
+};
+
+static void __init zeus_map_io(void)
+{
+ pxa_map_io();
+
+ iotable_init(zeus_io_desc, ARRAY_SIZE(zeus_io_desc));
+
+ /* Clear PSPR to ensure a full restart on wake-up. */
+ PMCR = PSPR = 0;
+
+ /* enable internal 32.768Khz oscillator (ignore OSCC_OOK) */
+ OSCC |= OSCC_OON;
+
+ /* Some clock cycles later (from OSCC_ON), programme PCFR (OPDE...).
+ * float chip selects and PCMCIA */
+ PCFR = PCFR_OPDE | PCFR_DC_EN | PCFR_FS | PCFR_FP;
+}
+
+MACHINE_START(ARCOM_ZEUS, "Arcom ZEUS")
+ /* Maintainer: Marc Zyngier <maz@misterjones.org> */
+ .phys_io = 0x40000000,
+ .io_pg_offst = ((io_p2v(0x40000000) >> 18) & 0xfffc),
+ .boot_params = 0xa0000100,
+ .map_io = zeus_map_io,
+ .init_irq = zeus_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = zeus_init,
+MACHINE_END
+
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index c48e1f2c334..ee5e392430e 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -70,7 +70,7 @@ config MACH_REALVIEW_PBX
bool "Support RealView/PBX platform"
select ARM_GIC
select HAVE_PATA_PLATFORM
- select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !HIGH_PHYS_OFFSET
+ select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
select ZONE_DMA if SPARSEMEM
help
Include support for the ARM(R) RealView PBX platform.
diff --git a/arch/arm/mach-s3c2410/include/mach/spi.h b/arch/arm/mach-s3c2410/include/mach/spi.h
index 193b39d654e..4d9588373aa 100644
--- a/arch/arm/mach-s3c2410/include/mach/spi.h
+++ b/arch/arm/mach-s3c2410/include/mach/spi.h
@@ -18,6 +18,8 @@ struct s3c2410_spi_info {
unsigned int num_cs; /* total chipselects */
int bus_num; /* bus number to use. */
+ unsigned int use_fiq:1; /* use fiq */
+
void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable);
void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
};
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c
index f76d6ff4aeb..0b4a3a03071 100644
--- a/arch/arm/mach-s3c2442/mach-gta02.c
+++ b/arch/arm/mach-s3c2442/mach-gta02.c
@@ -268,6 +268,9 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
.batteries = gta02_batteries,
.num_batteries = ARRAY_SIZE(gta02_batteries),
+
+ .charger_reference_current_ma = 1000,
+
.reg_init_data = {
[PCF50633_REGULATOR_AUTO] = {
.constraints = {
diff --git a/arch/arm/mach-s3c24a0/include/mach/memory.h b/arch/arm/mach-s3c24a0/include/mach/memory.h
index 585211ca018..7d74fd5c8d6 100644
--- a/arch/arm/mach-s3c24a0/include/mach/memory.h
+++ b/arch/arm/mach-s3c24a0/include/mach/memory.h
@@ -15,5 +15,7 @@
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
+#define __pfn_to_bus(x) __pfn_to_phys(x)
+#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index 03a7f3857c5..b17d52f7cc4 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -4,6 +4,7 @@ menu "SA11x0 Implementations"
config SA1100_ASSABET
bool "Assabet"
+ select CPU_FREQ_SA1110
help
Say Y here if you are using the Intel(R) StrongARM(R) SA-1110
Microprocessor Development Board (also known as the Assabet).
@@ -19,6 +20,7 @@ config ASSABET_NEPONSET
config SA1100_CERF
bool "CerfBoard"
+ select CPU_FREQ_SA1110
help
The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued).
More information is available at:
@@ -45,6 +47,7 @@ endchoice
config SA1100_COLLIE
bool "Sharp Zaurus SL5500"
+ # FIXME: select CPU_FREQ_SA11x0
select SHARP_LOCOMO
select SHARP_SCOOP
select SHARP_PARAM
@@ -54,6 +57,7 @@ config SA1100_COLLIE
config SA1100_H3100
bool "Compaq iPAQ H3100"
select HTC_EGPIO
+ select CPU_FREQ_SA1100
help
Say Y here if you intend to run this kernel on the Compaq iPAQ
H3100 handheld computer. Information about this machine and the
@@ -64,6 +68,7 @@ config SA1100_H3100
config SA1100_H3600
bool "Compaq iPAQ H3600/H3700"
select HTC_EGPIO
+ select CPU_FREQ_SA1100
help
Say Y here if you intend to run this kernel on the Compaq iPAQ
H3600 handheld computer. Information about this machine and the
@@ -74,6 +79,7 @@ config SA1100_H3600
config SA1100_BADGE4
bool "HP Labs BadgePAD 4"
select SA1111
+ select CPU_FREQ_SA1100
help
Say Y here if you want to build a kernel for the HP Laboratories
BadgePAD 4.
@@ -81,6 +87,7 @@ config SA1100_BADGE4
config SA1100_JORNADA720
bool "HP Jornada 720"
select SA1111
+ # FIXME: select CPU_FREQ_SA11x0
help
Say Y here if you want to build a kernel for the HP Jornada 720
handheld computer. See <http://www.hp.com/jornada/products/720>
@@ -98,12 +105,14 @@ config SA1100_JORNADA720_SSP
config SA1100_HACKKIT
bool "HackKit Core CPU Board"
+ select CPU_FREQ_SA1100
help
Say Y here to support the HackKit Core CPU Board
<http://hackkit.eletztrick.de>;
config SA1100_LART
bool "LART"
+ select CPU_FREQ_SA1100
help
Say Y here if you are using the Linux Advanced Radio Terminal
(also known as the LART). See <http://www.lartmaker.nl/> for
@@ -111,6 +120,7 @@ config SA1100_LART
config SA1100_PLEB
bool "PLEB"
+ select CPU_FREQ_SA1100
help
Say Y here if you are using version 1 of the Portable Linux
Embedded Board (also known as PLEB).
@@ -119,6 +129,7 @@ config SA1100_PLEB
config SA1100_SHANNON
bool "Shannon"
+ select CPU_FREQ_SA1100
help
The Shannon (also known as a Tuxscreen, and also as a IS2630) was a
limited edition webphone produced by Philips. The Shannon is a SA1100
@@ -127,6 +138,7 @@ config SA1100_SHANNON
config SA1100_SIMPAD
bool "Simpad"
+ select CPU_FREQ_SA1110
help
The SIEMENS webpad SIMpad is based on the StrongARM 1110. There
are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB
@@ -145,3 +157,4 @@ config SA1100_SSP
endmenu
endif
+
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 9faea1511c1..3c1fcd69671 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -58,7 +58,6 @@ static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
2802 /* 280.2 MHz */
};
-#if defined(CONFIG_CPU_FREQ_SA1100) || defined(CONFIG_CPU_FREQ_SA1110)
/* rounds up(!) */
unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
{
@@ -110,17 +109,6 @@ unsigned int sa11x0_getspeed(unsigned int cpu)
return cclk_frequency_100khz[PPCR & 0xf] * 100;
}
-#else
-/*
- * We still need to provide this so building without cpufreq works.
- */
-unsigned int cpufreq_get(unsigned int cpu)
-{
- return cclk_frequency_100khz[PPCR & 0xf] * 100;
-}
-EXPORT_SYMBOL(cpufreq_get);
-#endif
-
/*
* This is the SA11x0 sched_clock implementation. This has
* a resolution of 271ns, and a maximum value of 32025597s (370 days).
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
new file mode 100644
index 00000000000..f4cfee9c7d2
--- /dev/null
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -0,0 +1,281 @@
+/*
+ *
+ * include/linux/coh901318.h
+ *
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * DMA driver for COH 901 318
+ * Author: Per Friden <per.friden@stericsson.com>
+ */
+
+#ifndef COH901318_H
+#define COH901318_H
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+#define MAX_DMA_PACKET_SIZE_SHIFT 11
+#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
+
+/**
+ * struct coh901318_lli - linked list item for DMAC
+ * @control: control settings for DMAC
+ * @src_addr: transfer source address
+ * @dst_addr: transfer destination address
+ * @link_addr: physical address to next lli
+ * @virt_link_addr: virtual addres of next lli (only used by pool_free)
+ * @phy_this: physical address of current lli (only used by pool_free)
+ */
+struct coh901318_lli {
+ u32 control;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ dma_addr_t link_addr;
+
+ void *virt_link_addr;
+ dma_addr_t phy_this;
+};
+/**
+ * struct coh901318_params - parameters for DMAC configuration
+ * @config: DMA config register
+ * @ctrl_lli_last: DMA control register for the last lli in the list
+ * @ctrl_lli: DMA control register for an lli
+ * @ctrl_lli_chained: DMA control register for a chained lli
+ */
+struct coh901318_params {
+ u32 config;
+ u32 ctrl_lli_last;
+ u32 ctrl_lli;
+ u32 ctrl_lli_chained;
+};
+/**
+ * struct coh_dma_channel - dma channel base
+ * @name: ascii name of dma channel
+ * @number: channel id number
+ * @desc_nbr_max: number of preallocated descriptortors
+ * @priority_high: prio of channel, 0 low otherwise high.
+ * @param: configuration parameters
+ * @dev_addr: physical address of periphal connected to channel
+ */
+struct coh_dma_channel {
+ const char name[32];
+ const int number;
+ const int desc_nbr_max;
+ const int priority_high;
+ const struct coh901318_params param;
+ const dma_addr_t dev_addr;
+};
+
+/**
+ * dma_access_memory_state_t - register dma for memory access
+ *
+ * @dev: The dma device
+ * @active: 1 means dma intends to access memory
+ * 0 means dma wont access memory
+ */
+typedef void (*dma_access_memory_state_t)(struct device *dev,
+ bool active);
+
+/**
+ * struct powersave - DMA power save structure
+ * @lock: lock protecting data in this struct
+ * @started_channels: bit mask indicating active dma channels
+ */
+struct powersave {
+ spinlock_t lock;
+ u64 started_channels;
+};
+/**
+ * struct coh901318_platform - platform arch structure
+ * @chans_slave: specifying dma slave channels
+ * @chans_memcpy: specifying dma memcpy channels
+ * @access_memory_state: requesting DMA memeory access (on / off)
+ * @chan_conf: dma channel configurations
+ * @max_channels: max number of dma chanenls
+ */
+struct coh901318_platform {
+ const int *chans_slave;
+ const int *chans_memcpy;
+ const dma_access_memory_state_t access_memory_state;
+ const struct coh_dma_channel *chan_conf;
+ const int max_channels;
+};
+
+/**
+ * coh901318_get_bytes_left() - Get number of bytes left on a current transfer
+ * @chan: dma channel handle
+ * return number of bytes left, or negative on error
+ */
+u32 coh901318_get_bytes_left(struct dma_chan *chan);
+
+/**
+ * coh901318_stop() - Stops dma transfer
+ * @chan: dma channel handle
+ * return 0 on success otherwise negative value
+ */
+void coh901318_stop(struct dma_chan *chan);
+
+/**
+ * coh901318_continue() - Resumes a stopped dma transfer
+ * @chan: dma channel handle
+ * return 0 on success otherwise negative value
+ */
+void coh901318_continue(struct dma_chan *chan);
+
+/**
+ * coh901318_filter_id() - DMA channel filter function
+ * @chan: dma channel handle
+ * @chan_id: id of dma channel to be filter out
+ *
+ * In dma_request_channel() it specifies what channel id to be requested
+ */
+bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
+
+/*
+ * DMA Controller - this access the static mappings of the coh901318 dma.
+ *
+ */
+
+#define COH901318_MOD32_MASK (0x1F)
+#define COH901318_WORD_MASK (0xFFFFFFFF)
+/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
+#define COH901318_INT_STATUS1 (0x0000)
+#define COH901318_INT_STATUS2 (0x0004)
+/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_TC_INT_STATUS1 (0x0008)
+#define COH901318_TC_INT_STATUS2 (0x000C)
+/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_TC_INT_CLEAR1 (0x0010)
+#define COH901318_TC_INT_CLEAR2 (0x0014)
+/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
+#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
+/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
+#define COH901318_BE_INT_STATUS1 (0x0020)
+#define COH901318_BE_INT_STATUS2 (0x0024)
+/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
+#define COH901318_BE_INT_CLEAR1 (0x0028)
+#define COH901318_BE_INT_CLEAR2 (0x002C)
+/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
+#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
+#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
+
+/*
+ * CX_CFG - Channel Configuration Registers 32bit (R/W)
+ */
+#define COH901318_CX_CFG (0x0100)
+#define COH901318_CX_CFG_SPACING (0x04)
+/* Channel enable activates tha dma job */
+#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
+#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
+/* Request Mode */
+#define COH901318_CX_CFG_RM_MASK (0x00000006)
+#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
+#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
+#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
+#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
+/* Linked channel request field. RM must == 11 */
+#define COH901318_CX_CFG_LCRF_SHIFT 3
+#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
+#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
+/* Terminal Counter Interrupt Request Mask */
+#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
+#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
+/* Bus Error interrupt Mask */
+#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
+#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
+
+/*
+ * CX_STAT - Channel Status Registers 32bit (R/-)
+ */
+#define COH901318_CX_STAT (0x0200)
+#define COH901318_CX_STAT_SPACING (0x04)
+#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
+#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
+#define COH901318_CX_STAT_ACTIVE (0x00000002)
+#define COH901318_CX_STAT_ENABLED (0x00000001)
+
+/*
+ * CX_CTRL - Channel Control Registers 32bit (R/W)
+ */
+#define COH901318_CX_CTRL (0x0400)
+#define COH901318_CX_CTRL_SPACING (0x10)
+/* Transfer Count Enable */
+#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
+#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
+/* Transfer Count Value 0 - 4095 */
+#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
+/* Burst count */
+#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
+#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
+#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
+/* Source bus size */
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
+#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
+/* Source address increment */
+#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
+#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
+/* Destination Bus Size */
+#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
+#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
+/* Destination address increment */
+#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
+#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
+/* Master Mode (Master2 is only connected to MSL) */
+#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
+#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
+#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
+/* Terminal Count flag to PER enable */
+#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
+#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
+/* Terminal Count flags to CPU enable */
+#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
+#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
+/* Hand shake to peripheral */
+#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
+#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
+#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
+#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
+/* DMA mode */
+#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
+#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
+#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
+/* Primary Request Data Destination */
+#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
+#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
+#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
+
+/*
+ * CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_SRC_ADDR (0x0404)
+#define COH901318_CX_SRC_ADDR_SPACING (0x10)
+
+/*
+ * CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
+ */
+#define COH901318_CX_DST_ADDR (0x0408)
+#define COH901318_CX_DST_ADDR_SPACING (0x10)
+
+/*
+ * CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
+ */
+#define COH901318_CX_LNK_ADDR (0x040C)
+#define COH901318_CX_LNK_ADDR_SPACING (0x10)
+#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
+#endif /* COH901318_H */
diff --git a/arch/arm/mach-w90x900/include/mach/nuc900_spi.h b/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
new file mode 100644
index 00000000000..bd94819e314
--- /dev/null
+++ b/arch/arm/mach-w90x900/include/mach/nuc900_spi.h
@@ -0,0 +1,35 @@
+/*
+ * arch/arm/mach-w90x900/include/mach/nuc900_spi.h
+ *
+ * Copyright (c) 2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#ifndef __ASM_ARCH_SPI_H
+#define __ASM_ARCH_SPI_H
+
+extern void mfp_set_groupg(struct device *dev);
+
+struct nuc900_spi_info {
+ unsigned int num_cs;
+ unsigned int lsb;
+ unsigned int txneg;
+ unsigned int rxneg;
+ unsigned int divider;
+ unsigned int sleep;
+ unsigned int txnum;
+ unsigned int txbitlen;
+ int bus_num;
+};
+
+struct nuc900_spi_chip {
+ unsigned char bits_per_word;
+};
+
+#endif /* __ASM_ARCH_SPI_H */
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index b63a8f7b95c..a89444a3c01 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(kaddr)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
- * - kaddr - kernel address (guaranteed to be page aligned)
+ * - addr - kernel address
+ * - size - size of region
*/
-ENTRY(fa_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(fa_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
.long fa_flush_user_cache_range
.long fa_coherent_kern_range
.long fa_coherent_user_range
- .long fa_flush_kern_dcache_page
+ .long fa_flush_kern_dcache_area
.long fa_dma_inv_range
.long fa_dma_clean_range
.long fa_dma_flush_range
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 747f9a9021b..cb8fc6573b1 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -28,69 +28,120 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
-static inline void sync_writel(unsigned long val, unsigned long reg,
- unsigned long complete_mask)
+static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
- unsigned long flags;
-
- spin_lock_irqsave(&l2x0_lock, flags);
- writel(val, l2x0_base + reg);
/* wait for the operation to complete */
- while (readl(l2x0_base + reg) & complete_mask)
+ while (readl(reg) & mask)
;
- spin_unlock_irqrestore(&l2x0_lock, flags);
}
static inline void cache_sync(void)
{
- sync_writel(0, L2X0_CACHE_SYNC, 1);
+ void __iomem *base = l2x0_base;
+ writel(0, base + L2X0_CACHE_SYNC);
+ cache_wait(base + L2X0_CACHE_SYNC, 1);
}
static inline void l2x0_inv_all(void)
{
+ unsigned long flags;
+
/* invalidate all ways */
- sync_writel(0xff, L2X0_INV_WAY, 0xff);
+ spin_lock_irqsave(&l2x0_lock, flags);
+ writel(0xff, l2x0_base + L2X0_INV_WAY);
+ cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_range(unsigned long start, unsigned long end)
{
- unsigned long addr;
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+ spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
- sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ writel(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
- sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1);
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ writel(end, base + L2X0_CLEAN_INV_LINE_PA);
}
- for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
- sync_writel(addr, L2X0_INV_LINE_PA, 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+ while (start < blk_end) {
+ cache_wait(base + L2X0_INV_LINE_PA, 1);
+ writel(start, base + L2X0_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (blk_end < end) {
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+ spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_range(unsigned long start, unsigned long end)
{
- unsigned long addr;
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+ spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
- for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
- sync_writel(addr, L2X0_CLEAN_LINE_PA, 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+ while (start < blk_end) {
+ cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+ writel(start, base + L2X0_CLEAN_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (blk_end < end) {
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+ spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_flush_range(unsigned long start, unsigned long end)
{
- unsigned long addr;
+ void __iomem *base = l2x0_base;
+ unsigned long flags;
+ spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
- for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
- sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1);
+ while (start < end) {
+ unsigned long blk_end = start + min(end - start, 4096UL);
+
+ while (start < blk_end) {
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ writel(start, base + L2X0_CLEAN_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (blk_end < end) {
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+ spin_lock_irqsave(&l2x0_lock, flags);
+ }
+ }
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 8a4abebc478..2a482731ea3 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *page, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v3_flush_kern_dcache_page)
+ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
.long v3_flush_user_cache_range
.long v3_coherent_kern_range
.long v3_coherent_user_range
- .long v3_flush_kern_dcache_page
+ .long v3_flush_kern_dcache_area
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 3668611cb40..5c7da3e372e 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v4_flush_kern_dcache_page)
+ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns)
.long v4_flush_user_cache_range
.long v4_coherent_kern_range
.long v4_coherent_user_range
- .long v4_flush_kern_dcache_page
+ .long v4_flush_kern_dcache_area
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 2ebc1b3bf85..3dbedf1ec0e 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v4wb_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(v4wb_flush_kern_dcache_area)
+ add r1, r0, r1
/* fall through */
/*
@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range
.long v4wb_coherent_user_range
- .long v4wb_flush_kern_dcache_page
+ .long v4wb_flush_kern_dcache_area
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index c54fa2cc40e..b3b7410270b 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v4wt_flush_kern_dcache_page)
+ENTRY(v4wt_flush_kern_dcache_area)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
/* fallthrough */
/*
@@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range
.long v4wt_coherent_user_range
- .long v4wt_flush_kern_dcache_page
+ .long v4wt_flush_kern_dcache_area
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 295e25dd638..4ba0a24ce6f 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range)
/*
- * v6_flush_kern_dcache_page(kaddr)
+ * v6_flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
- * - kaddr - kernel address (guaranteed to be page aligned)
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v6_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(v6_flush_kern_dcache_area)
+ add r1, r0, r1
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
@@ -271,7 +272,7 @@ ENTRY(v6_cache_fns)
.long v6_flush_user_cache_range
.long v6_coherent_kern_range
.long v6_coherent_user_range
- .long v6_flush_kern_dcache_page
+ .long v6_flush_kern_dcache_area
.long v6_dma_inv_range
.long v6_dma_clean_range
.long v6_dma_flush_range
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index e1bd9759617..9073db849fb 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -186,16 +186,17 @@ ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
/*
- * v7_flush_kern_dcache_page(kaddr)
+ * v7_flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
- * - kaddr - kernel address (guaranteed to be page aligned)
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(v7_flush_kern_dcache_page)
+ENTRY(v7_flush_kern_dcache_area)
dcache_line_size r2, r3
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
@@ -203,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page)
blo 1b
dsb
mov pc, lr
-ENDPROC(v7_flush_kern_dcache_page)
+ENDPROC(v7_flush_kern_dcache_area)
/*
* v7_dma_inv_range(start,end)
@@ -279,7 +280,7 @@ ENTRY(v7_cache_fns)
.long v7_flush_user_cache_range
.long v7_coherent_kern_range
.long v7_coherent_user_range
- .long v7_flush_kern_dcache_page
+ .long v7_flush_kern_dcache_area
.long v7_dma_inv_range
.long v7_dma_clean_range
.long v7_dma_flush_range
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 329594e760c..6f3a4b7a3b8 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -131,7 +131,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/
if (addr)
#endif
- __cpuc_flush_dcache_page(addr);
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
@@ -258,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
* in this mapping of the page. FIXME: this is overkill
* since we actually ask for a write-back and invalidate.
*/
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 30f82fb5918..2be1ec7c1b4 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
if (kvaddr >= (void *)FIXADDR_START) {
- __cpuc_flush_dcache_page((void *)vaddr);
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 2b7996401b0..f5abc51c5a0 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -54,7 +54,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
- if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
+ if (aliasing && flags & MAP_SHARED &&
+ (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 900811cc913..374a8311bc8 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode)
void flush_dcache_page(struct page *page)
{
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index d9fb4b98c49..8012e24282b 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - page - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm1020_flush_kern_dcache_page)
+ENTRY(arm1020_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
mcr p15, 0, ip, c7, c10, 4 @ drain WB
add r0, r0, #CACHE_DLINESIZE
@@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns)
.long arm1020_flush_user_cache_range
.long arm1020_coherent_kern_range
.long arm1020_coherent_user_range
- .long arm1020_flush_kern_dcache_page
+ .long arm1020_flush_kern_dcache_area
.long arm1020_dma_inv_range
.long arm1020_dma_clean_range
.long arm1020_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 7453b75dcea..41fe25d234f 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - page - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm1020e_flush_kern_dcache_page)
+ENTRY(arm1020e_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_user_cache_range
.long arm1020e_coherent_kern_range
.long arm1020e_coherent_user_range
- .long arm1020e_flush_kern_dcache_page
+ .long arm1020e_flush_kern_dcache_area
.long arm1020e_dma_inv_range
.long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8eb72d75a8b..20a5b1b31a7 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - page - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm1022_flush_kern_dcache_page)
+ENTRY(arm1022_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns)
.long arm1022_flush_user_cache_range
.long arm1022_coherent_kern_range
.long arm1022_coherent_user_range
- .long arm1022_flush_kern_dcache_page
+ .long arm1022_flush_kern_dcache_area
.long arm1022_dma_inv_range
.long arm1022_dma_clean_range
.long arm1022_dma_flush_range
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 3b59f0d6713..96aedb10fcc 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - page - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm1026_flush_kern_dcache_page)
+ENTRY(arm1026_flush_kern_dcache_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
- add r1, r0, #PAGE_SZ
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns)
.long arm1026_flush_user_cache_range
.long arm1026_coherent_kern_range
.long arm1026_coherent_user_range
- .long arm1026_flush_kern_dcache_page
+ .long arm1026_flush_kern_dcache_area
.long arm1026_dma_inv_range
.long arm1026_dma_clean_range
.long arm1026_dma_flush_range
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2b7c197cc58..471669e2d7c 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm920_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm920_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns)
.long arm920_flush_user_cache_range
.long arm920_coherent_kern_range
.long arm920_coherent_user_range
- .long arm920_flush_kern_dcache_page
+ .long arm920_flush_kern_dcache_area
.long arm920_dma_inv_range
.long arm920_dma_clean_range
.long arm920_dma_flush_range
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 06a1aa4e339..ee111b00fa4 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm922_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm922_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns)
.long arm922_flush_user_cache_range
.long arm922_coherent_kern_range
.long arm922_coherent_user_range
- .long arm922_flush_kern_dcache_page
+ .long arm922_flush_kern_dcache_area
.long arm922_dma_inv_range
.long arm922_dma_clean_range
.long arm922_dma_flush_range
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index cb53435a85a..8deb5bde58e 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm925_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm925_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns)
.long arm925_flush_user_cache_range
.long arm925_coherent_kern_range
.long arm925_coherent_user_range
- .long arm925_flush_kern_dcache_page
+ .long arm925_flush_kern_dcache_area
.long arm925_dma_inv_range
.long arm925_dma_clean_range
.long arm925_dma_flush_range
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 1c4848704bb..64db6e275a4 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm926_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm926_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns)
.long arm926_flush_user_cache_range
.long arm926_coherent_kern_range
.long arm926_coherent_user_range
- .long arm926_flush_kern_dcache_page
+ .long arm926_flush_kern_dcache_area
.long arm926_dma_inv_range
.long arm926_dma_clean_range
.long arm926_dma_flush_range
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 5b0f8464c8f..8196b9f401f 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range)
/* FALLTHROUGH */
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(arm940_flush_kern_dcache_page)
+ENTRY(arm940_flush_kern_dcache_area)
mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns)
.long arm940_flush_user_cache_range
.long arm940_coherent_kern_range
.long arm940_coherent_user_range
- .long arm940_flush_kern_dcache_page
+ .long arm940_flush_kern_dcache_area
.long arm940_dma_inv_range
.long arm940_dma_clean_range
.long arm940_dma_flush_range
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 40c0449a139..9a951239c86 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
* (same as arm926)
*/
-ENTRY(arm946_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(arm946_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns)
.long arm946_flush_user_cache_range
.long arm946_coherent_kern_range
.long arm946_coherent_user_range
- .long arm946_flush_kern_dcache_page
+ .long arm946_flush_kern_dcache_area
.long arm946_dma_inv_range
.long arm946_dma_clean_range
.long arm946_dma_flush_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index d0d7795200f..dbc39383e66 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
.align 5
-ENTRY(feroceon_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(feroceon_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page)
mov pc, lr
.align 5
-ENTRY(feroceon_range_flush_kern_dcache_page)
+ENTRY(feroceon_range_flush_kern_dcache_area)
mrs r2, cpsr
add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
orr r3, r2, #PSR_I_BIT
@@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns)
.long feroceon_flush_user_cache_range
.long feroceon_coherent_kern_range
.long feroceon_coherent_user_range
- .long feroceon_flush_kern_dcache_page
+ .long feroceon_flush_kern_dcache_area
.long feroceon_dma_inv_range
.long feroceon_dma_clean_range
.long feroceon_dma_flush_range
@@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns)
.long feroceon_flush_user_cache_range
.long feroceon_coherent_kern_range
.long feroceon_coherent_user_range
- .long feroceon_range_flush_kern_dcache_page
+ .long feroceon_range_flush_kern_dcache_area
.long feroceon_range_dma_inv_range
.long feroceon_range_dma_clean_range
.long feroceon_range_dma_flush_range
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 52b5fd74fbb..9674d36cc97 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(mohawk_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(mohawk_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
@@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns)
.long mohawk_flush_user_cache_range
.long mohawk_coherent_kern_range
.long mohawk_coherent_user_range
- .long mohawk_flush_kern_dcache_page
+ .long mohawk_flush_kern_dcache_area
.long mohawk_dma_inv_range
.long mohawk_dma_clean_range
.long mohawk_dma_flush_range
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index ac5c80062b7..3e6210b4d6d 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,8 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
-EXPORT_SYMBOL(__cpuc_flush_dcache_page);
-EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */
+EXPORT_SYMBOL(__cpuc_flush_dcache_area);
#else
EXPORT_SYMBOL(cpu_cache);
#endif
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 5485c821101..395cc90c661 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -254,10 +254,9 @@ __pj4_v6_proc_info:
.long 0x560f5810
.long 0xff0ffff0
.long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index fab134e2982..96456f54879 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache.
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(xsc3_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(xsc3_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE
cmp r0, r1
@@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns)
.long xsc3_flush_user_cache_range
.long xsc3_coherent_kern_range
.long xsc3_coherent_user_range
- .long xsc3_flush_kern_dcache_page
+ .long xsc3_flush_kern_dcache_area
.long xsc3_dma_inv_range
.long xsc3_dma_clean_range
.long xsc3_dma_flush_range
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index f056c283682..93df47265f2 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range)
mov pc, lr
/*
- * flush_kern_dcache_page(void *page)
+ * flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
- * - addr - page aligned address
+ * - addr - kernel address
+ * - size - region size
*/
-ENTRY(xscale_flush_kern_dcache_page)
- add r1, r0, #PAGE_SZ
+ENTRY(xscale_flush_kern_dcache_area)
+ add r1, r0, r1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE
@@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns)
.long xscale_flush_user_cache_range
.long xscale_coherent_kern_range
.long xscale_coherent_user_range
- .long xscale_flush_kern_dcache_page
+ .long xscale_flush_kern_dcache_area
.long xscale_dma_inv_range
.long xscale_dma_clean_range
.long xscale_dma_flush_range
@@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
.long xscale_flush_user_cache_range
.long xscale_coherent_kern_range
.long xscale_coherent_user_range
- .long xscale_flush_kern_dcache_page
+ .long xscale_flush_kern_dcache_area
.long xscale_dma_flush_range
.long xscale_dma_clean_range
.long xscale_dma_flush_range
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index 4cbca9da150..996cbac6932 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_ARCH_MX1) += iomux-mx1-mx2.o dma-mx1-mx2.o
obj-$(CONFIG_ARCH_MX2) += iomux-mx1-mx2.o dma-mx1-mx2.o
obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o
obj-$(CONFIG_MXC_PWM) += pwm.o
+obj-$(CONFIG_USB_EHCI_MXC) += ehci.o
obj-$(CONFIG_MXC_ULPI) += ulpi.o
obj-$(CONFIG_ARCH_MXC_AUDMUX_V1) += audmux-v1.o
obj-$(CONFIG_ARCH_MXC_AUDMUX_V2) += audmux-v2.o
diff --git a/arch/arm/plat-mxc/ehci.c b/arch/arm/plat-mxc/ehci.c
new file mode 100644
index 00000000000..41599be882e
--- /dev/null
+++ b/arch/arm/plat-mxc/ehci.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/mxc_ehci.h>
+
+#define USBCTRL_OTGBASE_OFFSET 0x600
+
+#define MX31_OTG_SIC_SHIFT 29
+#define MX31_OTG_SIC_MASK (0xf << MX31_OTG_SIC_SHIFT)
+#define MX31_OTG_PM_BIT (1 << 24)
+
+#define MX31_H2_SIC_SHIFT 21
+#define MX31_H2_SIC_MASK (0xf << MX31_H2_SIC_SHIFT)
+#define MX31_H2_PM_BIT (1 << 16)
+#define MX31_H2_DT_BIT (1 << 5)
+
+#define MX31_H1_SIC_SHIFT 13
+#define MX31_H1_SIC_MASK (0xf << MX31_H1_SIC_SHIFT)
+#define MX31_H1_PM_BIT (1 << 8)
+#define MX31_H1_DT_BIT (1 << 4)
+
+int mxc_set_usbcontrol(int port, unsigned int flags)
+{
+ unsigned int v;
+
+ if (cpu_is_mx31()) {
+ v = readl(IO_ADDRESS(MX31_OTG_BASE_ADDR +
+ USBCTRL_OTGBASE_OFFSET));
+
+ switch (port) {
+ case 0: /* OTG port */
+ v &= ~(MX31_OTG_SIC_MASK | MX31_OTG_PM_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK)
+ << MX31_OTG_SIC_SHIFT;
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v |= MX31_OTG_PM_BIT;
+
+ break;
+ case 1: /* H1 port */
+ v &= ~(MX31_H1_SIC_MASK | MX31_H1_PM_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK)
+ << MX31_H1_SIC_SHIFT;
+ if (flags & MXC_EHCI_POWER_PINS_ENABLED)
+ v |= MX31_H1_PM_BIT;
+
+ if (!(flags & MXC_EHCI_TTL_ENABLED))
+ v |= MX31_H1_DT_BIT;
+
+ break;
+ case 2: /* H2 port */
+ v &= ~(MX31_H2_SIC_MASK | MX31_H2_PM_BIT);
+ v |= (flags & MXC_EHCI_INTERFACE_MASK)
+ << MX31_H2_SIC_SHIFT;
+ if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
+ v |= MX31_H2_PM_BIT;
+
+ if (!(flags & MXC_EHCI_TTL_ENABLED))
+ v |= MX31_H2_DT_BIT;
+
+ break;
+ }
+
+ writel(v, IO_ADDRESS(MX31_OTG_BASE_ADDR +
+ USBCTRL_OTGBASE_OFFSET));
+ return 0;
+ }
+
+ printk(KERN_WARNING
+ "%s() unable to setup USBCONTROL for this CPU\n", __func__);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(mxc_set_usbcontrol);
+
diff --git a/arch/arm/plat-mxc/include/mach/mxc_ehci.h b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
new file mode 100644
index 00000000000..8f796239393
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
@@ -0,0 +1,37 @@
+#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
+#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
+
+/* values for portsc field */
+#define MXC_EHCI_PHY_LOW_POWER_SUSPEND (1 << 23)
+#define MXC_EHCI_FORCE_FS (1 << 24)
+#define MXC_EHCI_UTMI_8BIT (0 << 28)
+#define MXC_EHCI_UTMI_16BIT (1 << 28)
+#define MXC_EHCI_SERIAL (1 << 29)
+#define MXC_EHCI_MODE_UTMI (0 << 30)
+#define MXC_EHCI_MODE_PHILIPS (1 << 30)
+#define MXC_EHCI_MODE_ULPI (2 << 30)
+#define MXC_EHCI_MODE_SERIAL (3 << 30)
+
+/* values for flags field */
+#define MXC_EHCI_INTERFACE_DIFF_UNI (0 << 0)
+#define MXC_EHCI_INTERFACE_DIFF_BI (1 << 0)
+#define MXC_EHCI_INTERFACE_SINGLE_UNI (2 << 0)
+#define MXC_EHCI_INTERFACE_SINGLE_BI (3 << 0)
+#define MXC_EHCI_INTERFACE_MASK (0xf)
+
+#define MXC_EHCI_POWER_PINS_ENABLED (1 << 5)
+#define MXC_EHCI_TTL_ENABLED (1 << 6)
+
+struct mxc_usbh_platform_data {
+ int (*init)(struct platform_device *pdev);
+ int (*exit)(struct platform_device *pdev);
+
+ unsigned int portsc;
+ unsigned int flags;
+ struct otg_transceiver *otg;
+};
+
+int mxc_set_usbcontrol(int port, unsigned int flags);
+
+#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
+
diff --git a/arch/arm/plat-mxc/include/mach/mxc_nand.h b/arch/arm/plat-mxc/include/mach/mxc_nand.h
index 2b972df22d1..5d2d21d414e 100644
--- a/arch/arm/plat-mxc/include/mach/mxc_nand.h
+++ b/arch/arm/plat-mxc/include/mach/mxc_nand.h
@@ -22,6 +22,7 @@
struct mxc_nand_platform_data {
int width; /* data bus width in bytes */
- int hw_ecc; /* 0 if supress hardware ECC */
+ int hw_ecc:1; /* 0 if supress hardware ECC */
+ int flash_bbt:1; /* set to 1 to use a flash based bbt */
};
#endif /* __ASM_ARCH_NAND_H */
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index f348ddfb049..e2ea04a4c8a 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -27,6 +27,7 @@ config ARCH_OMAP4
bool "TI OMAP4"
select CPU_V7
select ARM_GIC
+ select COMMON_CLKDEV
endchoice
@@ -42,28 +43,6 @@ config OMAP_DEBUG_LEDS
depends on OMAP_DEBUG_DEVICES
default y if LEDS || LEDS_OMAP_DEBUG
-config OMAP_DEBUG_POWERDOMAIN
- bool "Emit debug messages from powerdomain layer"
- depends on ARCH_OMAP2 || ARCH_OMAP3
- help
- Say Y here if you want to compile in powerdomain layer
- debugging messages for OMAP2/3. These messages can
- provide more detail as to why some powerdomain calls
- may be failing, and will also emit a descriptive message
- for every powerdomain register write. However, the
- extra detail costs some memory.
-
-config OMAP_DEBUG_CLOCKDOMAIN
- bool "Emit debug messages from clockdomain layer"
- depends on ARCH_OMAP2 || ARCH_OMAP3
- help
- Say Y here if you want to compile in clockdomain layer
- debugging messages for OMAP2/3. These messages can
- provide more detail as to why some clockdomain calls
- may be failing, and will also emit a descriptive message
- for every clockdomain register write. However, the
- extra detail costs some memory.
-
config OMAP_RESET_CLOCKS
bool "Reset unused clocks during boot"
depends on ARCH_OMAP
@@ -78,28 +57,28 @@ config OMAP_RESET_CLOCKS
config OMAP_MUX
bool "OMAP multiplexing support"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP
default y
- help
- Pin multiplexing support for OMAP boards. If your bootloader
- sets the multiplexing correctly, say N. Otherwise, or if unsure,
- say Y.
+ help
+ Pin multiplexing support for OMAP boards. If your bootloader
+ sets the multiplexing correctly, say N. Otherwise, or if unsure,
+ say Y.
config OMAP_MUX_DEBUG
bool "Multiplexing debug output"
- depends on OMAP_MUX
- help
- Makes the multiplexing functions print out a lot of debug info.
- This is useful if you want to find out the correct values of the
- multiplexing registers.
+ depends on OMAP_MUX
+ help
+ Makes the multiplexing functions print out a lot of debug info.
+ This is useful if you want to find out the correct values of the
+ multiplexing registers.
config OMAP_MUX_WARNINGS
bool "Warn about pins the bootloader didn't set up"
- depends on OMAP_MUX
- default y
- help
+ depends on OMAP_MUX
+ default y
+ help
Choose Y here to warn whenever driver initialization logic needs
- to change the pin multiplexing setup. When there are no warnings
+ to change the pin multiplexing setup. When there are no warnings
printed, it's safe to deselect OMAP_MUX for your product.
config OMAP_MCBSP
@@ -125,7 +104,7 @@ config OMAP_IOMMU_DEBUG
tristate
choice
- prompt "System timer"
+ prompt "System timer"
default OMAP_MPU_TIMER
config OMAP_MPU_TIMER
@@ -148,11 +127,11 @@ config OMAP_32K_TIMER
endchoice
config OMAP_32K_TIMER_HZ
- int "Kernel internal timer frequency for 32KHz timer"
- range 32 1024
- depends on OMAP_32K_TIMER
- default "128"
- help
+ int "Kernel internal timer frequency for 32KHz timer"
+ range 32 1024
+ depends on OMAP_32K_TIMER
+ default "128"
+ help
Kernel internal timer frequency should be a divisor of 32768,
such as 64 or 128.
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 681bfc37ebb..89cafc93724 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -40,36 +40,10 @@ static struct clk_functions *arch_clock;
* clock framework is not up , it is defined here to avoid rework in
* every driver. Also dummy prcm reset function is added */
-/* Dummy hooks only for OMAP4.For rest OMAPs, common clkdev is used */
-#if defined(CONFIG_ARCH_OMAP4)
-struct clk *clk_get(struct device *dev, const char *id)
-{
- return NULL;
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
-void omap2_clk_prepare_for_reboot(void)
-{
-}
-EXPORT_SYMBOL(omap2_clk_prepare_for_reboot);
-
-void omap_prcm_arch_reset(char mode)
-{
-}
-EXPORT_SYMBOL(omap_prcm_arch_reset);
-#endif
int clk_enable(struct clk *clk)
{
unsigned long flags;
int ret = 0;
- if (cpu_is_omap44xx())
- /* OMAP4 clk framework not supported yet */
- return 0;
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index cc050b3313b..bf1eaf3a27d 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -280,16 +280,18 @@ void __init omap2_set_globals_343x(void)
#if defined(CONFIG_ARCH_OMAP4)
static struct omap_globals omap4_globals = {
.class = OMAP443X_CLASS,
- .tap = OMAP2_L4_IO_ADDRESS(0x4830a000),
+ .tap = OMAP2_L4_IO_ADDRESS(OMAP443X_SCM_BASE),
.ctrl = OMAP2_L4_IO_ADDRESS(OMAP443X_CTRL_BASE),
.prm = OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE),
.cm = OMAP2_L4_IO_ADDRESS(OMAP4430_CM_BASE),
+ .cm2 = OMAP2_L4_IO_ADDRESS(OMAP4430_CM2_BASE),
};
void __init omap2_set_globals_443x(void)
{
omap2_set_globals_tap(&omap4_globals);
omap2_set_globals_control(&omap4_globals);
+ omap2_set_globals_prcm(&omap4_globals);
}
#endif
diff --git a/arch/arm/plat-omap/debug-devices.c b/arch/arm/plat-omap/debug-devices.c
index 09c1107637f..923c9621096 100644
--- a/arch/arm/plat-omap/debug-devices.c
+++ b/arch/arm/plat-omap/debug-devices.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/smc91x.h>
#include <mach/hardware.h>
@@ -24,6 +25,12 @@
* platforms include H2, H3, H4, and Perseus2.
*/
+static struct smc91x_platdata smc91x_info = {
+ .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
static struct resource smc91x_resources[] = {
[0] = {
.flags = IORESOURCE_MEM,
@@ -36,6 +43,9 @@ static struct resource smc91x_resources[] = {
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = -1,
+ .dev = {
+ .platform_data = &smc91x_info,
+ },
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
diff --git a/arch/arm/plat-omap/debug-leds.c b/arch/arm/plat-omap/debug-leds.c
index 6c768b71ad6..53fcef7c520 100644
--- a/arch/arm/plat-omap/debug-leds.c
+++ b/arch/arm/plat-omap/debug-leds.c
@@ -293,7 +293,7 @@ static int fpga_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops fpga_dev_pm_ops = {
+static const struct dev_pm_ops fpga_dev_pm_ops = {
.suspend_noirq = fpga_suspend_noirq,
.resume_noirq = fpga_resume_noirq,
};
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index f86617869b3..30b5db73017 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -242,6 +242,39 @@ fail:
/*-------------------------------------------------------------------------*/
+#if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP24XX
+#define OMAP_RNG_BASE 0x480A0000
+#else
+#define OMAP_RNG_BASE 0xfffe5000
+#endif
+
+static struct resource rng_resources[] = {
+ {
+ .start = OMAP_RNG_BASE,
+ .end = OMAP_RNG_BASE + 0x4f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device omap_rng_device = {
+ .name = "omap_rng",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rng_resources),
+ .resource = rng_resources,
+};
+
+static void omap_init_rng(void)
+{
+ (void) platform_device_register(&omap_rng_device);
+}
+#else
+static inline void omap_init_rng(void) {}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
/* Numbering for the SPI-capable controllers when used for SPI:
* spi = 1
* uwire = 2
@@ -324,39 +357,6 @@ static void omap_init_wdt(void)
static inline void omap_init_wdt(void) {}
#endif
-/*-------------------------------------------------------------------------*/
-
-#if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
-
-#ifdef CONFIG_ARCH_OMAP24XX
-#define OMAP_RNG_BASE 0x480A0000
-#else
-#define OMAP_RNG_BASE 0xfffe5000
-#endif
-
-static struct resource rng_resources[] = {
- {
- .start = OMAP_RNG_BASE,
- .end = OMAP_RNG_BASE + 0x4f,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device omap_rng_device = {
- .name = "omap_rng",
- .id = -1,
- .num_resources = ARRAY_SIZE(rng_resources),
- .resource = rng_resources,
-};
-
-static void omap_init_rng(void)
-{
- (void) platform_device_register(&omap_rng_device);
-}
-#else
-static inline void omap_init_rng(void) {}
-#endif
-
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
@@ -384,9 +384,9 @@ static int __init omap_init_devices(void)
*/
omap_init_dsp();
omap_init_kp();
+ omap_init_rng();
omap_init_uwire();
omap_init_wdt();
- omap_init_rng();
return 0;
}
arch_initcall(omap_init_devices);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index d17375e06a1..09d82b3c66c 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -47,7 +47,6 @@ enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
#endif
#define OMAP_DMA_ACTIVE 0x01
-#define OMAP_DMA_CCR_EN (1 << 7)
#define OMAP2_DMA_CSR_CLEAR_MASK 0xffe
#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
@@ -1120,17 +1119,8 @@ int omap_dma_running(void)
{
int lch;
- /*
- * On OMAP1510, internal LCD controller will start the transfer
- * when it gets enabled, so assume DMA running if LCD enabled.
- */
- if (cpu_is_omap1510())
- if (omap_readw(0xfffec000 + 0x00) & (1 << 0))
- return 1;
-
- /* Check if LCD DMA is running */
- if (cpu_is_omap16xx())
- if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
+ if (cpu_class_is_omap1())
+ if (omap_lcd_dma_running())
return 1;
for (lch = 0; lch < dma_chan_count; lch++)
@@ -1990,377 +1980,6 @@ static struct irqaction omap24xx_dma_irq;
/*----------------------------------------------------------------------------*/
-static struct lcd_dma_info {
- spinlock_t lock;
- int reserved;
- void (*callback)(u16 status, void *data);
- void *cb_data;
-
- int active;
- unsigned long addr, size;
- int rotate, data_type, xres, yres;
- int vxres;
- int mirror;
- int xscale, yscale;
- int ext_ctrl;
- int src_port;
- int single_transfer;
-} lcd_dma;
-
-void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
- int data_type)
-{
- lcd_dma.addr = addr;
- lcd_dma.data_type = data_type;
- lcd_dma.xres = fb_xres;
- lcd_dma.yres = fb_yres;
-}
-EXPORT_SYMBOL(omap_set_lcd_dma_b1);
-
-void omap_set_lcd_dma_src_port(int port)
-{
- lcd_dma.src_port = port;
-}
-
-void omap_set_lcd_dma_ext_controller(int external)
-{
- lcd_dma.ext_ctrl = external;
-}
-EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller);
-
-void omap_set_lcd_dma_single_transfer(int single)
-{
- lcd_dma.single_transfer = single;
-}
-EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
-
-void omap_set_lcd_dma_b1_rotation(int rotate)
-{
- if (omap_dma_in_1510_mode()) {
- printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
- BUG();
- return;
- }
- lcd_dma.rotate = rotate;
-}
-EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
-
-void omap_set_lcd_dma_b1_mirror(int mirror)
-{
- if (omap_dma_in_1510_mode()) {
- printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
- BUG();
- }
- lcd_dma.mirror = mirror;
-}
-EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
-
-void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
-{
- if (omap_dma_in_1510_mode()) {
- printk(KERN_ERR "DMA virtual resulotion is not supported "
- "in 1510 mode\n");
- BUG();
- }
- lcd_dma.vxres = vxres;
-}
-EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
-
-void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
-{
- if (omap_dma_in_1510_mode()) {
- printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
- BUG();
- }
- lcd_dma.xscale = xscale;
- lcd_dma.yscale = yscale;
-}
-EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale);
-
-static void set_b1_regs(void)
-{
- unsigned long top, bottom;
- int es;
- u16 w;
- unsigned long en, fn;
- long ei, fi;
- unsigned long vxres;
- unsigned int xscale, yscale;
-
- switch (lcd_dma.data_type) {
- case OMAP_DMA_DATA_TYPE_S8:
- es = 1;
- break;
- case OMAP_DMA_DATA_TYPE_S16:
- es = 2;
- break;
- case OMAP_DMA_DATA_TYPE_S32:
- es = 4;
- break;
- default:
- BUG();
- return;
- }
-
- vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres;
- xscale = lcd_dma.xscale ? lcd_dma.xscale : 1;
- yscale = lcd_dma.yscale ? lcd_dma.yscale : 1;
- BUG_ON(vxres < lcd_dma.xres);
-
-#define PIXADDR(x, y) (lcd_dma.addr + \
- ((y) * vxres * yscale + (x) * xscale) * es)
-#define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1)
-
- switch (lcd_dma.rotate) {
- case 0:
- if (!lcd_dma.mirror) {
- top = PIXADDR(0, 0);
- bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
- /* 1510 DMA requires the bottom address to be 2 more
- * than the actual last memory access location. */
- if (omap_dma_in_1510_mode() &&
- lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
- bottom += 2;
- ei = PIXSTEP(0, 0, 1, 0);
- fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1);
- } else {
- top = PIXADDR(lcd_dma.xres - 1, 0);
- bottom = PIXADDR(0, lcd_dma.yres - 1);
- ei = PIXSTEP(1, 0, 0, 0);
- fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1);
- }
- en = lcd_dma.xres;
- fn = lcd_dma.yres;
- break;
- case 90:
- if (!lcd_dma.mirror) {
- top = PIXADDR(0, lcd_dma.yres - 1);
- bottom = PIXADDR(lcd_dma.xres - 1, 0);
- ei = PIXSTEP(0, 1, 0, 0);
- fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1);
- } else {
- top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
- bottom = PIXADDR(0, 0);
- ei = PIXSTEP(0, 1, 0, 0);
- fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1);
- }
- en = lcd_dma.yres;
- fn = lcd_dma.xres;
- break;
- case 180:
- if (!lcd_dma.mirror) {
- top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
- bottom = PIXADDR(0, 0);
- ei = PIXSTEP(1, 0, 0, 0);
- fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0);
- } else {
- top = PIXADDR(0, lcd_dma.yres - 1);
- bottom = PIXADDR(lcd_dma.xres - 1, 0);
- ei = PIXSTEP(0, 0, 1, 0);
- fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0);
- }
- en = lcd_dma.xres;
- fn = lcd_dma.yres;
- break;
- case 270:
- if (!lcd_dma.mirror) {
- top = PIXADDR(lcd_dma.xres - 1, 0);
- bottom = PIXADDR(0, lcd_dma.yres - 1);
- ei = PIXSTEP(0, 0, 0, 1);
- fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0);
- } else {
- top = PIXADDR(0, 0);
- bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
- ei = PIXSTEP(0, 0, 0, 1);
- fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0);
- }
- en = lcd_dma.yres;
- fn = lcd_dma.xres;
- break;
- default:
- BUG();
- return; /* Suppress warning about uninitialized vars */
- }
-
- if (omap_dma_in_1510_mode()) {
- omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
- omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
- omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
- omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L);
-
- return;
- }
-
- /* 1610 regs */
- omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U);
- omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L);
- omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U);
- omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L);
-
- omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1);
- omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1);
-
- w = omap_readw(OMAP1610_DMA_LCD_CSDP);
- w &= ~0x03;
- w |= lcd_dma.data_type;
- omap_writew(w, OMAP1610_DMA_LCD_CSDP);
-
- w = omap_readw(OMAP1610_DMA_LCD_CTRL);
- /* Always set the source port as SDRAM for now*/
- w &= ~(0x03 << 6);
- if (lcd_dma.callback != NULL)
- w |= 1 << 1; /* Block interrupt enable */
- else
- w &= ~(1 << 1);
- omap_writew(w, OMAP1610_DMA_LCD_CTRL);
-
- if (!(lcd_dma.rotate || lcd_dma.mirror ||
- lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale))
- return;
-
- w = omap_readw(OMAP1610_DMA_LCD_CCR);
- /* Set the double-indexed addressing mode */
- w |= (0x03 << 12);
- omap_writew(w, OMAP1610_DMA_LCD_CCR);
-
- omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1);
- omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U);
- omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
-}
-
-static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id)
-{
- u16 w;
-
- w = omap_readw(OMAP1610_DMA_LCD_CTRL);
- if (unlikely(!(w & (1 << 3)))) {
- printk(KERN_WARNING "Spurious LCD DMA IRQ\n");
- return IRQ_NONE;
- }
- /* Ack the IRQ */
- w |= (1 << 3);
- omap_writew(w, OMAP1610_DMA_LCD_CTRL);
- lcd_dma.active = 0;
- if (lcd_dma.callback != NULL)
- lcd_dma.callback(w, lcd_dma.cb_data);
-
- return IRQ_HANDLED;
-}
-
-int omap_request_lcd_dma(void (*callback)(u16 status, void *data),
- void *data)
-{
- spin_lock_irq(&lcd_dma.lock);
- if (lcd_dma.reserved) {
- spin_unlock_irq(&lcd_dma.lock);
- printk(KERN_ERR "LCD DMA channel already reserved\n");
- BUG();
- return -EBUSY;
- }
- lcd_dma.reserved = 1;
- spin_unlock_irq(&lcd_dma.lock);
- lcd_dma.callback = callback;
- lcd_dma.cb_data = data;
- lcd_dma.active = 0;
- lcd_dma.single_transfer = 0;
- lcd_dma.rotate = 0;
- lcd_dma.vxres = 0;
- lcd_dma.mirror = 0;
- lcd_dma.xscale = 0;
- lcd_dma.yscale = 0;
- lcd_dma.ext_ctrl = 0;
- lcd_dma.src_port = 0;
-
- return 0;
-}
-EXPORT_SYMBOL(omap_request_lcd_dma);
-
-void omap_free_lcd_dma(void)
-{
- spin_lock(&lcd_dma.lock);
- if (!lcd_dma.reserved) {
- spin_unlock(&lcd_dma.lock);
- printk(KERN_ERR "LCD DMA is not reserved\n");
- BUG();
- return;
- }
- if (!enable_1510_mode)
- omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
- OMAP1610_DMA_LCD_CCR);
- lcd_dma.reserved = 0;
- spin_unlock(&lcd_dma.lock);
-}
-EXPORT_SYMBOL(omap_free_lcd_dma);
-
-void omap_enable_lcd_dma(void)
-{
- u16 w;
-
- /*
- * Set the Enable bit only if an external controller is
- * connected. Otherwise the OMAP internal controller will
- * start the transfer when it gets enabled.
- */
- if (enable_1510_mode || !lcd_dma.ext_ctrl)
- return;
-
- w = omap_readw(OMAP1610_DMA_LCD_CTRL);
- w |= 1 << 8;
- omap_writew(w, OMAP1610_DMA_LCD_CTRL);
-
- lcd_dma.active = 1;
-
- w = omap_readw(OMAP1610_DMA_LCD_CCR);
- w |= 1 << 7;
- omap_writew(w, OMAP1610_DMA_LCD_CCR);
-}
-EXPORT_SYMBOL(omap_enable_lcd_dma);
-
-void omap_setup_lcd_dma(void)
-{
- BUG_ON(lcd_dma.active);
- if (!enable_1510_mode) {
- /* Set some reasonable defaults */
- omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
- omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
- omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
- }
- set_b1_regs();
- if (!enable_1510_mode) {
- u16 w;
-
- w = omap_readw(OMAP1610_DMA_LCD_CCR);
- /*
- * If DMA was already active set the end_prog bit to have
- * the programmed register set loaded into the active
- * register set.
- */
- w |= 1 << 11; /* End_prog */
- if (!lcd_dma.single_transfer)
- w |= (3 << 8); /* Auto_init, repeat */
- omap_writew(w, OMAP1610_DMA_LCD_CCR);
- }
-}
-EXPORT_SYMBOL(omap_setup_lcd_dma);
-
-void omap_stop_lcd_dma(void)
-{
- u16 w;
-
- lcd_dma.active = 0;
- if (enable_1510_mode || !lcd_dma.ext_ctrl)
- return;
-
- w = omap_readw(OMAP1610_DMA_LCD_CCR);
- w &= ~(1 << 7);
- omap_writew(w, OMAP1610_DMA_LCD_CCR);
-
- w = omap_readw(OMAP1610_DMA_LCD_CTRL);
- w &= ~(1 << 8);
- omap_writew(w, OMAP1610_DMA_LCD_CTRL);
-}
-EXPORT_SYMBOL(omap_stop_lcd_dma);
-
void omap_dma_global_context_save(void)
{
omap_dma_global_context.dma_irqenable_l0 =
@@ -2465,14 +2084,6 @@ static int __init omap_init_dma(void)
dma_chan_count = 16;
} else
dma_chan_count = 9;
- if (cpu_is_omap16xx()) {
- u16 w;
-
- /* this would prevent OMAP sleep */
- w = omap_readw(OMAP1610_DMA_LCD_CTRL);
- w &= ~(1 << 8);
- omap_writew(w, OMAP1610_DMA_LCD_CTRL);
- }
} else if (cpu_class_is_omap2()) {
u8 revision = dma_read(REVISION) & 0xff;
printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
@@ -2483,7 +2094,6 @@ static int __init omap_init_dma(void)
return 0;
}
- spin_lock_init(&lcd_dma.lock);
spin_lock_init(&dma_chan_lock);
for (ch = 0; ch < dma_chan_count; ch++) {
@@ -2548,22 +2158,6 @@ static int __init omap_init_dma(void)
}
}
-
- /* FIXME: Update LCD DMA to work on 24xx */
- if (cpu_class_is_omap1()) {
- r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
- "LCD DMA", NULL);
- if (r != 0) {
- int i;
-
- printk(KERN_ERR "unable to request IRQ for LCD DMA "
- "(error %d)\n", r);
- for (i = 0; i < dma_chan_count; i++)
- free_irq(omap1_dma_irq[i], (void *) (i + 1));
- goto out_free;
- }
- }
-
return 0;
out_free:
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index 78a4ce538db..d3eea4f4753 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -28,13 +28,13 @@
#include <linux/platform_device.h>
#include <linux/bootmem.h>
#include <linux/io.h>
+#include <linux/omapfb.h>
#include <mach/hardware.h>
#include <asm/mach/map.h>
#include <plat/board.h>
#include <plat/sram.h>
-#include <plat/omapfb.h>
#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
@@ -55,6 +55,10 @@ static struct platform_device omap_fb_device = {
.num_resources = 0,
};
+void omapfb_set_platform_data(struct omapfb_platform_data *data)
+{
+}
+
static inline int ranges_overlap(unsigned long start1, unsigned long size1,
unsigned long start2, unsigned long size2)
{
@@ -327,7 +331,33 @@ static inline int omap_init_fb(void)
arch_initcall(omap_init_fb);
-#else
+#elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
+
+static u64 omap_fb_dma_mask = ~(u32)0;
+static struct omapfb_platform_data omapfb_config;
+
+static struct platform_device omap_fb_device = {
+ .name = "omapfb",
+ .id = -1,
+ .dev = {
+ .dma_mask = &omap_fb_dma_mask,
+ .coherent_dma_mask = ~(u32)0,
+ .platform_data = &omapfb_config,
+ },
+ .num_resources = 0,
+};
+
+void omapfb_set_platform_data(struct omapfb_platform_data *data)
+{
+ omapfb_config = *data;
+}
+
+static inline int omap_init_fb(void)
+{
+ return platform_device_register(&omap_fb_device);
+}
+
+arch_initcall(omap_init_fb);
void omapfb_reserve_sdram(void) {}
unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
@@ -339,5 +369,20 @@ unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
return 0;
}
+#else
+
+void omapfb_set_platform_data(struct omapfb_platform_data *data)
+{
+}
+
+void omapfb_reserve_sdram(void) {}
+unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
+ unsigned long sram_vstart,
+ unsigned long sram_size,
+ unsigned long start_avail,
+ unsigned long size_avail)
+{
+ return 0;
+}
#endif
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 055160e0620..04846811d0a 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -1431,7 +1431,7 @@ static int omap_mpuio_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops omap_mpuio_dev_pm_ops = {
+static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
.suspend_noirq = omap_mpuio_suspend_noirq,
.resume_noirq = omap_mpuio_resume_noirq,
};
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
index c08362dbb8e..33fff4ef382 100644
--- a/arch/arm/plat-omap/i2c.c
+++ b/arch/arm/plat-omap/i2c.c
@@ -80,47 +80,8 @@ static struct platform_device omap_i2c_devices[] = {
#endif
};
-#if defined(CONFIG_ARCH_OMAP24XX)
-static const int omap24xx_pins[][2] = {
- { M19_24XX_I2C1_SCL, L15_24XX_I2C1_SDA },
- { J15_24XX_I2C2_SCL, H19_24XX_I2C2_SDA },
-};
-#else
-static const int omap24xx_pins[][2] = {};
-#endif
-#if defined(CONFIG_ARCH_OMAP34XX)
-static const int omap34xx_pins[][2] = {
- { K21_34XX_I2C1_SCL, J21_34XX_I2C1_SDA},
- { AF15_34XX_I2C2_SCL, AE15_34XX_I2C2_SDA},
- { AF14_34XX_I2C3_SCL, AG14_34XX_I2C3_SDA},
-};
-#else
-static const int omap34xx_pins[][2] = {};
-#endif
-
#define OMAP_I2C_CMDLINE_SETUP (BIT(31))
-static void __init omap_i2c_mux_pins(int bus)
-{
- int scl, sda;
-
- if (cpu_class_is_omap1()) {
- scl = I2C_SCL;
- sda = I2C_SDA;
- } else if (cpu_is_omap24xx()) {
- scl = omap24xx_pins[bus][0];
- sda = omap24xx_pins[bus][1];
- } else if (cpu_is_omap34xx()) {
- scl = omap34xx_pins[bus][0];
- sda = omap34xx_pins[bus][1];
- } else {
- return;
- }
-
- omap_cfg_reg(sda);
- omap_cfg_reg(scl);
-}
-
static int __init omap_i2c_nr_ports(void)
{
int ports = 0;
@@ -156,7 +117,6 @@ static int __init omap_i2c_add_bus(int bus_id)
res[1].start = irq;
}
- omap_i2c_mux_pins(bus_id - 1);
return platform_device_register(pdev);
}
@@ -209,7 +169,7 @@ out:
subsys_initcall(omap_register_i2c_bus_cmdline);
/**
- * omap_register_i2c_bus - register I2C bus with device descriptors
+ * omap_plat_register_i2c_bus - register I2C bus with device descriptors
* @bus_id: bus id counting from number 1
* @clkrate: clock rate of the bus in kHz
* @info: pointer into I2C device descriptor table or NULL
@@ -217,7 +177,7 @@ subsys_initcall(omap_register_i2c_bus_cmdline);
*
* Returns 0 on success or an error code.
*/
-int __init omap_register_i2c_bus(int bus_id, u32 clkrate,
+int __init omap_plat_register_i2c_bus(int bus_id, u32 clkrate,
struct i2c_board_info const *info,
unsigned len)
{
diff --git a/arch/arm/plat-omap/include/plat/board.h b/arch/arm/plat-omap/include/plat/board.h
index abb17b604f8..376ce18216f 100644
--- a/arch/arm/plat-omap/include/plat/board.h
+++ b/arch/arm/plat-omap/include/plat/board.h
@@ -114,15 +114,6 @@ struct omap_pwm_led_platform_data {
void (*set_power)(struct omap_pwm_led_platform_data *self, int on_off);
};
-/* See arch/arm/plat-omap/include/mach/gpio-switch.h for definitions */
-struct omap_gpio_switch_config {
- char name[12];
- u16 gpio;
- int flags:4;
- int type:4;
- int key_code:24; /* Linux key code */
-};
-
struct omap_uart_config {
/* Bit field of UARTs present; bit 0 --> UART1 */
unsigned int enabled_uarts;
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
new file mode 100644
index 00000000000..35b36caf5f9
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -0,0 +1,41 @@
+/*
+ * clkdev <-> OMAP integration
+ *
+ * Russell King <linux@arm.linux.org.uk>
+ *
+ */
+
+#ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_CLKDEV_OMAP_H
+#define __ARCH_ARM_PLAT_OMAP_INCLUDE_PLAT_CLKDEV_OMAP_H
+
+#include <asm/clkdev.h>
+
+struct omap_clk {
+ u16 cpu;
+ struct clk_lookup lk;
+};
+
+#define CLK(dev, con, ck, cp) \
+ { \
+ .cpu = cp, \
+ .lk = { \
+ .dev_id = dev, \
+ .con_id = con, \
+ .clk = ck, \
+ }, \
+ }
+
+
+#define CK_310 (1 << 0)
+#define CK_7XX (1 << 1)
+#define CK_1510 (1 << 2)
+#define CK_16XX (1 << 3)
+#define CK_243X (1 << 4)
+#define CK_242X (1 << 5)
+#define CK_343X (1 << 6)
+#define CK_3430ES1 (1 << 7)
+#define CK_3430ES2 (1 << 8)
+#define CK_443X (1 << 9)
+
+#endif
+
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 4b8b0d65cbf..309b6d1dccd 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -13,6 +13,8 @@
#ifndef __ARCH_ARM_OMAP_CLOCK_H
#define __ARCH_ARM_OMAP_CLOCK_H
+#include <linux/list.h>
+
struct module;
struct clk;
struct clockdomain;
@@ -148,6 +150,8 @@ extern const struct clkops clkops_null;
#define CONFIG_PARTICIPANT (1 << 10) /* Fundamental clock */
#define ENABLE_ON_INIT (1 << 11) /* Enable upon framework init */
#define INVERT_ENABLE (1 << 12) /* 0 enables, 1 disables */
+#define CLOCK_IN_OMAP4430 (1 << 13)
+#define ALWAYS_ENABLED (1 << 14)
/* bits 13-31 are currently free */
/* Clksel_rate flags */
@@ -156,6 +160,7 @@ extern const struct clkops clkops_null;
#define RATE_IN_243X (1 << 2)
#define RATE_IN_343X (1 << 3) /* rates common to all 343X */
#define RATE_IN_3430ES2 (1 << 4) /* 3430ES2 rates only */
+#define RATE_IN_4430 (1 << 5)
#define RATE_IN_24XX (RATE_IN_242X | RATE_IN_243X)
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 064f1730f43..32c22272425 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -27,7 +27,7 @@
#ifndef __ARCH_ARM_MACH_OMAP_COMMON_H
#define __ARCH_ARM_MACH_OMAP_COMMON_H
-#include <linux/i2c.h>
+#include <plat/i2c.h>
struct sys_timer;
@@ -36,18 +36,6 @@ extern void __iomem *gic_cpu_base_addr;
extern void omap_map_common_io(void);
extern struct sys_timer omap_timer;
-#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
-extern int omap_register_i2c_bus(int bus_id, u32 clkrate,
- struct i2c_board_info const *info,
- unsigned len);
-#else
-static inline int omap_register_i2c_bus(int bus_id, u32 clkrate,
- struct i2c_board_info const *info,
- unsigned len)
-{
- return 0;
-}
-#endif
/* IO bases for various OMAP processors */
struct omap_globals {
@@ -58,6 +46,7 @@ struct omap_globals {
void __iomem *ctrl; /* System Control Module */
void __iomem *prm; /* Power and Reset Management */
void __iomem *cm; /* Clock Management */
+ void __iomem *cm2;
};
void omap2_set_globals_242x(void);
@@ -71,4 +60,24 @@ void omap2_set_globals_sdrc(struct omap_globals *);
void omap2_set_globals_control(struct omap_globals *);
void omap2_set_globals_prcm(struct omap_globals *);
+/**
+ * omap_test_timeout - busy-loop, testing a condition
+ * @cond: condition to test until it evaluates to true
+ * @timeout: maximum number of microseconds in the timeout
+ * @index: loop index (integer)
+ *
+ * Loop waiting for @cond to become true or until at least @timeout
+ * microseconds have passed. To use, define some integer @index in the
+ * calling code. After running, if @index == @timeout, then the loop has
+ * timed out.
+ */
+#define omap_test_timeout(cond, timeout, index) \
+({ \
+ for (index = 0; index < timeout; index++) { \
+ if (cond) \
+ break; \
+ udelay(1); \
+ } \
+})
+
#endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 2e1789001df..9a028bdebb0 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -176,11 +176,13 @@ IS_OMAP_CLASS(15xx, 0x15)
IS_OMAP_CLASS(16xx, 0x16)
IS_OMAP_CLASS(24xx, 0x24)
IS_OMAP_CLASS(34xx, 0x34)
+IS_OMAP_CLASS(44xx, 0x44)
IS_OMAP_SUBCLASS(242x, 0x242)
IS_OMAP_SUBCLASS(243x, 0x243)
IS_OMAP_SUBCLASS(343x, 0x343)
IS_OMAP_SUBCLASS(363x, 0x363)
+IS_OMAP_SUBCLASS(443x, 0x443)
#define cpu_is_omap7xx() 0
#define cpu_is_omap15xx() 0
@@ -393,11 +395,11 @@ IS_OMAP_TYPE(3517, 0x3517)
(!omap3_has_iva()) && \
(!omap3_has_sgx()))
# define cpu_is_omap3515() (cpu_is_omap3430() && \
- (omap3_has_iva()) && \
- (!omap3_has_sgx()))
+ (!omap3_has_iva()) && \
+ (omap3_has_sgx()))
# define cpu_is_omap3525() (cpu_is_omap3430() && \
- (omap3_has_sgx()) && \
- (!omap3_has_iva()))
+ (!omap3_has_sgx()) && \
+ (omap3_has_iva()))
# define cpu_is_omap3530() (cpu_is_omap3430())
# define cpu_is_omap3505() is_omap3505()
# define cpu_is_omap3517() is_omap3517()
@@ -408,8 +410,8 @@ IS_OMAP_TYPE(3517, 0x3517)
# if defined(CONFIG_ARCH_OMAP4)
# undef cpu_is_omap44xx
# undef cpu_is_omap443x
-# define cpu_is_omap44xx() 1
-# define cpu_is_omap443x() 1
+# define cpu_is_omap44xx() is_omap44xx()
+# define cpu_is_omap443x() is_omap443x()
# endif
/* Macros to detect if we have OMAP1 or OMAP2 */
@@ -436,14 +438,15 @@ IS_OMAP_TYPE(3517, 0x3517)
#define OMAP3630_REV_ES1_0 0x36300034
#define OMAP35XX_CLASS 0x35000034
-#define OMAP3503_REV(v) (OMAP35XX_CLASS | (0x3503 << 16) | (v << 12))
-#define OMAP3515_REV(v) (OMAP35XX_CLASS | (0x3515 << 16) | (v << 12))
-#define OMAP3525_REV(v) (OMAP35XX_CLASS | (0x3525 << 16) | (v << 12))
-#define OMAP3530_REV(v) (OMAP35XX_CLASS | (0x3530 << 16) | (v << 12))
-#define OMAP3505_REV(v) (OMAP35XX_CLASS | (0x3505 << 16) | (v << 12))
-#define OMAP3517_REV(v) (OMAP35XX_CLASS | (0x3517 << 16) | (v << 12))
-
-#define OMAP443X_CLASS 0x44300034
+#define OMAP3503_REV(v) (OMAP35XX_CLASS | (0x3503 << 16) | (v << 8))
+#define OMAP3515_REV(v) (OMAP35XX_CLASS | (0x3515 << 16) | (v << 8))
+#define OMAP3525_REV(v) (OMAP35XX_CLASS | (0x3525 << 16) | (v << 8))
+#define OMAP3530_REV(v) (OMAP35XX_CLASS | (0x3530 << 16) | (v << 8))
+#define OMAP3505_REV(v) (OMAP35XX_CLASS | (0x3505 << 16) | (v << 8))
+#define OMAP3517_REV(v) (OMAP35XX_CLASS | (0x3517 << 16) | (v << 8))
+
+#define OMAP443X_CLASS 0x44300044
+#define OMAP4430_REV_ES1_0 0x44300044
/*
* omap_chip bits
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
new file mode 100644
index 00000000000..c66e464732d
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -0,0 +1,575 @@
+/*
+ * linux/include/asm-arm/arch-omap/display.h
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARCH_OMAP_DISPLAY_H
+#define __ASM_ARCH_OMAP_DISPLAY_H
+
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+
+#define DISPC_IRQ_FRAMEDONE (1 << 0)
+#define DISPC_IRQ_VSYNC (1 << 1)
+#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
+#define DISPC_IRQ_EVSYNC_ODD (1 << 3)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4)
+#define DISPC_IRQ_PROG_LINE_NUM (1 << 5)
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6)
+#define DISPC_IRQ_GFX_END_WIN (1 << 7)
+#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8)
+#define DISPC_IRQ_OCP_ERR (1 << 9)
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10)
+#define DISPC_IRQ_VID1_END_WIN (1 << 11)
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12)
+#define DISPC_IRQ_VID2_END_WIN (1 << 13)
+#define DISPC_IRQ_SYNC_LOST (1 << 14)
+#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15)
+#define DISPC_IRQ_WAKEUP (1 << 16)
+
+struct omap_dss_device;
+struct omap_overlay_manager;
+
+enum omap_display_type {
+ OMAP_DISPLAY_TYPE_NONE = 0,
+ OMAP_DISPLAY_TYPE_DPI = 1 << 0,
+ OMAP_DISPLAY_TYPE_DBI = 1 << 1,
+ OMAP_DISPLAY_TYPE_SDI = 1 << 2,
+ OMAP_DISPLAY_TYPE_DSI = 1 << 3,
+ OMAP_DISPLAY_TYPE_VENC = 1 << 4,
+};
+
+enum omap_plane {
+ OMAP_DSS_GFX = 0,
+ OMAP_DSS_VIDEO1 = 1,
+ OMAP_DSS_VIDEO2 = 2
+};
+
+enum omap_channel {
+ OMAP_DSS_CHANNEL_LCD = 0,
+ OMAP_DSS_CHANNEL_DIGIT = 1,
+};
+
+enum omap_color_mode {
+ OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
+ OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
+ OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
+ OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
+ OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
+ OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
+ OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
+ OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
+ OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */
+ OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
+ OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
+ OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
+ OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
+ OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
+
+ OMAP_DSS_COLOR_GFX_OMAP2 =
+ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
+ OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
+ OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P,
+
+ OMAP_DSS_COLOR_VID_OMAP2 =
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
+ OMAP_DSS_COLOR_UYVY,
+
+ OMAP_DSS_COLOR_GFX_OMAP3 =
+ OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
+ OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
+ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+
+ OMAP_DSS_COLOR_VID1_OMAP3 =
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
+ OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
+ OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
+
+ OMAP_DSS_COLOR_VID2_OMAP3 =
+ OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
+ OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
+ OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
+ OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
+ OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
+};
+
+enum omap_lcd_display_type {
+ OMAP_DSS_LCD_DISPLAY_STN,
+ OMAP_DSS_LCD_DISPLAY_TFT,
+};
+
+enum omap_dss_load_mode {
+ OMAP_DSS_LOAD_CLUT_AND_FRAME = 0,
+ OMAP_DSS_LOAD_CLUT_ONLY = 1,
+ OMAP_DSS_LOAD_FRAME_ONLY = 2,
+ OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3,
+};
+
+enum omap_dss_trans_key_type {
+ OMAP_DSS_COLOR_KEY_GFX_DST = 0,
+ OMAP_DSS_COLOR_KEY_VID_SRC = 1,
+};
+
+enum omap_rfbi_te_mode {
+ OMAP_DSS_RFBI_TE_MODE_1 = 1,
+ OMAP_DSS_RFBI_TE_MODE_2 = 2,
+};
+
+enum omap_panel_config {
+ OMAP_DSS_LCD_IVS = 1<<0,
+ OMAP_DSS_LCD_IHS = 1<<1,
+ OMAP_DSS_LCD_IPC = 1<<2,
+ OMAP_DSS_LCD_IEO = 1<<3,
+ OMAP_DSS_LCD_RF = 1<<4,
+ OMAP_DSS_LCD_ONOFF = 1<<5,
+
+ OMAP_DSS_LCD_TFT = 1<<20,
+};
+
+enum omap_dss_venc_type {
+ OMAP_DSS_VENC_TYPE_COMPOSITE,
+ OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+enum omap_display_caps {
+ OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
+ OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
+};
+
+enum omap_dss_update_mode {
+ OMAP_DSS_UPDATE_DISABLED = 0,
+ OMAP_DSS_UPDATE_AUTO,
+ OMAP_DSS_UPDATE_MANUAL,
+};
+
+enum omap_dss_display_state {
+ OMAP_DSS_DISPLAY_DISABLED = 0,
+ OMAP_DSS_DISPLAY_ACTIVE,
+ OMAP_DSS_DISPLAY_SUSPENDED,
+};
+
+/* XXX perhaps this should be removed */
+enum omap_dss_overlay_managers {
+ OMAP_DSS_OVL_MGR_LCD,
+ OMAP_DSS_OVL_MGR_TV,
+};
+
+enum omap_dss_rotation_type {
+ OMAP_DSS_ROT_DMA = 0,
+ OMAP_DSS_ROT_VRFB = 1,
+};
+
+/* clockwise rotation angle */
+enum omap_dss_rotation_angle {
+ OMAP_DSS_ROT_0 = 0,
+ OMAP_DSS_ROT_90 = 1,
+ OMAP_DSS_ROT_180 = 2,
+ OMAP_DSS_ROT_270 = 3,
+};
+
+enum omap_overlay_caps {
+ OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
+ OMAP_DSS_OVL_CAP_DISPC = 1 << 1,
+};
+
+enum omap_overlay_manager_caps {
+ OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0,
+};
+
+/* RFBI */
+
+struct rfbi_timings {
+ int cs_on_time;
+ int cs_off_time;
+ int we_on_time;
+ int we_off_time;
+ int re_on_time;
+ int re_off_time;
+ int we_cycle_time;
+ int re_cycle_time;
+ int cs_pulse_width;
+ int access_time;
+
+ int clk_div;
+
+ u32 tim[5]; /* set by rfbi_convert_timings() */
+
+ int converted;
+};
+
+void omap_rfbi_write_command(const void *buf, u32 len);
+void omap_rfbi_read_data(void *buf, u32 len);
+void omap_rfbi_write_data(const void *buf, u32 len);
+void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
+ u16 x, u16 y,
+ u16 w, u16 h);
+int omap_rfbi_enable_te(bool enable, unsigned line);
+int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
+ unsigned hs_pulse_time, unsigned vs_pulse_time,
+ int hs_pol_inv, int vs_pol_inv, int extif_div);
+
+/* DSI */
+void dsi_bus_lock(void);
+void dsi_bus_unlock(void);
+int dsi_vc_dcs_write(int channel, u8 *data, int len);
+int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len);
+int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen);
+int dsi_vc_set_max_rx_packet_size(int channel, u16 len);
+int dsi_vc_send_null(int channel);
+int dsi_vc_send_bta_sync(int channel);
+
+/* Board specific data */
+struct omap_dss_board_info {
+ int (*get_last_off_on_transaction_id)(struct device *dev);
+ int num_devices;
+ struct omap_dss_device **devices;
+ struct omap_dss_device *default_device;
+};
+
+struct omap_video_timings {
+ /* Unit: pixels */
+ u16 x_res;
+ /* Unit: pixels */
+ u16 y_res;
+ /* Unit: KHz */
+ u32 pixel_clock;
+ /* Unit: pixel clocks */
+ u16 hsw; /* Horizontal synchronization pulse width */
+ /* Unit: pixel clocks */
+ u16 hfp; /* Horizontal front porch */
+ /* Unit: pixel clocks */
+ u16 hbp; /* Horizontal back porch */
+ /* Unit: line clocks */
+ u16 vsw; /* Vertical synchronization pulse width */
+ /* Unit: line clocks */
+ u16 vfp; /* Vertical front porch */
+ /* Unit: line clocks */
+ u16 vbp; /* Vertical back porch */
+};
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+/* Hardcoded timings for tv modes. Venc only uses these to
+ * identify the mode, and does not actually use the configs
+ * itself. However, the configs should be something that
+ * a normal monitor can also show */
+const extern struct omap_video_timings omap_dss_pal_timings;
+const extern struct omap_video_timings omap_dss_ntsc_timings;
+#endif
+
+struct omap_overlay_info {
+ bool enabled;
+
+ u32 paddr;
+ void __iomem *vaddr;
+ u16 screen_width;
+ u16 width;
+ u16 height;
+ enum omap_color_mode color_mode;
+ u8 rotation;
+ enum omap_dss_rotation_type rotation_type;
+ bool mirror;
+
+ u16 pos_x;
+ u16 pos_y;
+ u16 out_width; /* if 0, out_width == width */
+ u16 out_height; /* if 0, out_height == height */
+ u8 global_alpha;
+};
+
+struct omap_overlay {
+ struct kobject kobj;
+ struct list_head list;
+
+ /* static fields */
+ const char *name;
+ int id;
+ enum omap_color_mode supported_modes;
+ enum omap_overlay_caps caps;
+
+ /* dynamic fields */
+ struct omap_overlay_manager *manager;
+ struct omap_overlay_info info;
+
+ /* if true, info has been changed, but not applied() yet */
+ bool info_dirty;
+
+ int (*set_manager)(struct omap_overlay *ovl,
+ struct omap_overlay_manager *mgr);
+ int (*unset_manager)(struct omap_overlay *ovl);
+
+ int (*set_overlay_info)(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+ void (*get_overlay_info)(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+
+ int (*wait_for_go)(struct omap_overlay *ovl);
+};
+
+struct omap_overlay_manager_info {
+ u32 default_color;
+
+ enum omap_dss_trans_key_type trans_key_type;
+ u32 trans_key;
+ bool trans_enabled;
+
+ bool alpha_enabled;
+};
+
+struct omap_overlay_manager {
+ struct kobject kobj;
+ struct list_head list;
+
+ /* static fields */
+ const char *name;
+ int id;
+ enum omap_overlay_manager_caps caps;
+ int num_overlays;
+ struct omap_overlay **overlays;
+ enum omap_display_type supported_displays;
+
+ /* dynamic fields */
+ struct omap_dss_device *device;
+ struct omap_overlay_manager_info info;
+
+ bool device_changed;
+ /* if true, info has been changed but not applied() yet */
+ bool info_dirty;
+
+ int (*set_device)(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev);
+ int (*unset_device)(struct omap_overlay_manager *mgr);
+
+ int (*set_manager_info)(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+ void (*get_manager_info)(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+
+ int (*apply)(struct omap_overlay_manager *mgr);
+ int (*wait_for_go)(struct omap_overlay_manager *mgr);
+};
+
+struct omap_dss_device {
+ struct device dev;
+
+ enum omap_display_type type;
+
+ union {
+ struct {
+ u8 data_lines;
+ } dpi;
+
+ struct {
+ u8 channel;
+ u8 data_lines;
+ } rfbi;
+
+ struct {
+ u8 datapairs;
+ } sdi;
+
+ struct {
+ u8 clk_lane;
+ u8 clk_pol;
+ u8 data1_lane;
+ u8 data1_pol;
+ u8 data2_lane;
+ u8 data2_pol;
+
+ struct {
+ u16 regn;
+ u16 regm;
+ u16 regm3;
+ u16 regm4;
+
+ u16 lp_clk_div;
+
+ u16 lck_div;
+ u16 pck_div;
+ } div;
+
+ bool ext_te;
+ u8 ext_te_gpio;
+ } dsi;
+
+ struct {
+ enum omap_dss_venc_type type;
+ bool invert_polarity;
+ } venc;
+ } phy;
+
+ struct {
+ struct omap_video_timings timings;
+
+ int acbi; /* ac-bias pin transitions per interrupt */
+ /* Unit: line clocks */
+ int acb; /* ac-bias pin frequency */
+
+ enum omap_panel_config config;
+
+ u8 recommended_bpp;
+
+ struct omap_dss_device *ctrl;
+ } panel;
+
+ struct {
+ u8 pixel_size;
+ struct rfbi_timings rfbi_timings;
+ struct omap_dss_device *panel;
+ } ctrl;
+
+ int reset_gpio;
+
+ int max_backlight_level;
+
+ const char *name;
+
+ /* used to match device to driver */
+ const char *driver_name;
+
+ void *data;
+
+ struct omap_dss_driver *driver;
+
+ /* helper variable for driver suspend/resume */
+ bool activate_after_resume;
+
+ enum omap_display_caps caps;
+
+ struct omap_overlay_manager *manager;
+
+ enum omap_dss_display_state state;
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*suspend)(struct omap_dss_device *dssdev);
+ int (*resume)(struct omap_dss_device *dssdev);
+
+ void (*get_resolution)(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres);
+ int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ int (*update)(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h);
+ int (*sync)(struct omap_dss_device *dssdev);
+ int (*wait_vsync)(struct omap_dss_device *dssdev);
+
+ int (*set_update_mode)(struct omap_dss_device *dssdev,
+ enum omap_dss_update_mode);
+ enum omap_dss_update_mode (*get_update_mode)
+ (struct omap_dss_device *dssdev);
+
+ int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+ int (*get_te)(struct omap_dss_device *dssdev);
+
+ u8 (*get_rotate)(struct omap_dss_device *dssdev);
+ int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
+
+ bool (*get_mirror)(struct omap_dss_device *dssdev);
+ int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
+
+ int (*run_test)(struct omap_dss_device *dssdev, int test);
+ int (*memory_read)(struct omap_dss_device *dssdev,
+ void *buf, size_t size,
+ u16 x, u16 y, u16 w, u16 h);
+
+ int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+ u32 (*get_wss)(struct omap_dss_device *dssdev);
+
+ /* platform specific */
+ int (*platform_enable)(struct omap_dss_device *dssdev);
+ void (*platform_disable)(struct omap_dss_device *dssdev);
+ int (*set_backlight)(struct omap_dss_device *dssdev, int level);
+ int (*get_backlight)(struct omap_dss_device *dssdev);
+};
+
+struct omap_dss_driver {
+ struct device_driver driver;
+
+ int (*probe)(struct omap_dss_device *);
+ void (*remove)(struct omap_dss_device *);
+
+ int (*enable)(struct omap_dss_device *display);
+ void (*disable)(struct omap_dss_device *display);
+ int (*suspend)(struct omap_dss_device *display);
+ int (*resume)(struct omap_dss_device *display);
+ int (*run_test)(struct omap_dss_device *display, int test);
+
+ void (*setup_update)(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h);
+
+ int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+ int (*wait_for_te)(struct omap_dss_device *dssdev);
+
+ u8 (*get_rotate)(struct omap_dss_device *dssdev);
+ int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
+
+ bool (*get_mirror)(struct omap_dss_device *dssdev);
+ int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
+
+ int (*memory_read)(struct omap_dss_device *dssdev,
+ void *buf, size_t size,
+ u16 x, u16 y, u16 w, u16 h);
+};
+
+int omap_dss_register_driver(struct omap_dss_driver *);
+void omap_dss_unregister_driver(struct omap_dss_driver *);
+
+int omap_dss_register_device(struct omap_dss_device *);
+void omap_dss_unregister_device(struct omap_dss_device *);
+
+void omap_dss_get_device(struct omap_dss_device *dssdev);
+void omap_dss_put_device(struct omap_dss_device *dssdev);
+#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
+struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
+struct omap_dss_device *omap_dss_find_device(void *data,
+ int (*match)(struct omap_dss_device *dssdev, void *data));
+
+int omap_dss_start_device(struct omap_dss_device *dssdev);
+void omap_dss_stop_device(struct omap_dss_device *dssdev);
+
+int omap_dss_get_num_overlay_managers(void);
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
+
+int omap_dss_get_num_overlays(void);
+struct omap_overlay *omap_dss_get_overlay(int num);
+
+typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+
+int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout);
+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
+ unsigned long timeout);
+
+#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
+#define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index 1c017b29b7e..4ede9e17a0b 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -401,33 +401,6 @@
/*----------------------------------------------------------------------------*/
-/* Hardware registers for LCD DMA */
-#define OMAP1510_DMA_LCD_BASE (0xfffedb00)
-#define OMAP1510_DMA_LCD_CTRL (OMAP1510_DMA_LCD_BASE + 0x00)
-#define OMAP1510_DMA_LCD_TOP_F1_L (OMAP1510_DMA_LCD_BASE + 0x02)
-#define OMAP1510_DMA_LCD_TOP_F1_U (OMAP1510_DMA_LCD_BASE + 0x04)
-#define OMAP1510_DMA_LCD_BOT_F1_L (OMAP1510_DMA_LCD_BASE + 0x06)
-#define OMAP1510_DMA_LCD_BOT_F1_U (OMAP1510_DMA_LCD_BASE + 0x08)
-
-#define OMAP1610_DMA_LCD_BASE (0xfffee300)
-#define OMAP1610_DMA_LCD_CSDP (OMAP1610_DMA_LCD_BASE + 0xc0)
-#define OMAP1610_DMA_LCD_CCR (OMAP1610_DMA_LCD_BASE + 0xc2)
-#define OMAP1610_DMA_LCD_CTRL (OMAP1610_DMA_LCD_BASE + 0xc4)
-#define OMAP1610_DMA_LCD_TOP_B1_L (OMAP1610_DMA_LCD_BASE + 0xc8)
-#define OMAP1610_DMA_LCD_TOP_B1_U (OMAP1610_DMA_LCD_BASE + 0xca)
-#define OMAP1610_DMA_LCD_BOT_B1_L (OMAP1610_DMA_LCD_BASE + 0xcc)
-#define OMAP1610_DMA_LCD_BOT_B1_U (OMAP1610_DMA_LCD_BASE + 0xce)
-#define OMAP1610_DMA_LCD_TOP_B2_L (OMAP1610_DMA_LCD_BASE + 0xd0)
-#define OMAP1610_DMA_LCD_TOP_B2_U (OMAP1610_DMA_LCD_BASE + 0xd2)
-#define OMAP1610_DMA_LCD_BOT_B2_L (OMAP1610_DMA_LCD_BASE + 0xd4)
-#define OMAP1610_DMA_LCD_BOT_B2_U (OMAP1610_DMA_LCD_BASE + 0xd6)
-#define OMAP1610_DMA_LCD_SRC_EI_B1 (OMAP1610_DMA_LCD_BASE + 0xd8)
-#define OMAP1610_DMA_LCD_SRC_FI_B1_L (OMAP1610_DMA_LCD_BASE + 0xda)
-#define OMAP1610_DMA_LCD_SRC_EN_B1 (OMAP1610_DMA_LCD_BASE + 0xe0)
-#define OMAP1610_DMA_LCD_SRC_FN_B1 (OMAP1610_DMA_LCD_BASE + 0xe4)
-#define OMAP1610_DMA_LCD_LCH_CTRL (OMAP1610_DMA_LCD_BASE + 0xea)
-#define OMAP1610_DMA_LCD_SRC_FI_B1_U (OMAP1610_DMA_LCD_BASE + 0xf4)
-
#define OMAP1_DMA_TOUT_IRQ (1 << 0)
#define OMAP_DMA_DROP_IRQ (1 << 1)
#define OMAP_DMA_HALF_IRQ (1 << 2)
@@ -441,6 +414,8 @@
#define OMAP2_DMA_SUPERVISOR_ERR_IRQ (1 << 10)
#define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
+#define OMAP_DMA_CCR_EN (1 << 7)
+
#define OMAP_DMA_DATA_TYPE_S8 0x00
#define OMAP_DMA_DATA_TYPE_S16 0x01
#define OMAP_DMA_DATA_TYPE_S32 0x02
@@ -503,14 +478,6 @@
#define DMA_CH_PRIO_HIGH 0x1
#define DMA_CH_PRIO_LOW 0x0 /* Def */
-/* LCD DMA block numbers */
-enum {
- OMAP_LCD_DMA_B1_TOP,
- OMAP_LCD_DMA_B1_BOTTOM,
- OMAP_LCD_DMA_B2_TOP,
- OMAP_LCD_DMA_B2_BOTTOM
-};
-
enum omap_dma_burst_mode {
OMAP_DMA_DATA_BURST_DIS = 0,
OMAP_DMA_DATA_BURST_4,
@@ -661,20 +628,13 @@ extern int omap_modify_dma_chain_params(int chain_id,
extern int omap_dma_chain_status(int chain_id);
#endif
-/* LCD DMA functions */
-extern int omap_request_lcd_dma(void (*callback)(u16 status, void *data),
- void *data);
-extern void omap_free_lcd_dma(void);
-extern void omap_setup_lcd_dma(void);
-extern void omap_enable_lcd_dma(void);
-extern void omap_stop_lcd_dma(void);
-extern void omap_set_lcd_dma_ext_controller(int external);
-extern void omap_set_lcd_dma_single_transfer(int single);
-extern void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres,
- int data_type);
-extern void omap_set_lcd_dma_b1_rotation(int rotate);
-extern void omap_set_lcd_dma_b1_vxres(unsigned long vxres);
-extern void omap_set_lcd_dma_b1_mirror(int mirror);
-extern void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale);
+#if defined(CONFIG_ARCH_OMAP1) && defined(CONFIG_FB_OMAP)
+#include <mach/lcd_dma.h>
+#else
+static inline int omap_lcd_dma_running(void)
+{
+ return 0;
+}
+#endif
#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
index 696e0ca051b..e081338e0b2 100644
--- a/arch/arm/plat-omap/include/plat/gpmc.h
+++ b/arch/arm/plat-omap/include/plat/gpmc.h
@@ -45,7 +45,7 @@
#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10)
#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
-#define GPMC_CONFIG1_DEVICETYPE_NAND GPMC_CONFIG1_DEVICETYPE(1)
+#define GPMC_CONFIG1_DEVICETYPE_NAND GPMC_CONFIG1_DEVICETYPE(2)
#define GPMC_CONFIG1_MUXADDDATA (1 << 9)
#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4)
#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3)
diff --git a/arch/arm/plat-omap/include/plat/i2c.h b/arch/arm/plat-omap/include/plat/i2c.h
new file mode 100644
index 00000000000..585d9ca68b9
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/i2c.h
@@ -0,0 +1,39 @@
+/*
+ * Helper module for board specific I2C bus registration
+ *
+ * Copyright (C) 2009 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/i2c.h>
+
+#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
+extern int omap_register_i2c_bus(int bus_id, u32 clkrate,
+ struct i2c_board_info const *info,
+ unsigned len);
+#else
+static inline int omap_register_i2c_bus(int bus_id, u32 clkrate,
+ struct i2c_board_info const *info,
+ unsigned len)
+{
+ return 0;
+}
+#endif
+
+int omap_plat_register_i2c_bus(int bus_id, u32 clkrate,
+ struct i2c_board_info const *info,
+ unsigned len);
diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h
index ce5dd2d1dc2..97d6c50c3dc 100644
--- a/arch/arm/plat-omap/include/plat/irqs.h
+++ b/arch/arm/plat-omap/include/plat/irqs.h
@@ -472,8 +472,22 @@
#endif
#define TWL4030_GPIO_IRQ_END (TWL4030_GPIO_IRQ_BASE + TWL4030_GPIO_NR_IRQS)
+#define TWL6030_IRQ_BASE (OMAP_FPGA_IRQ_END)
+#ifdef CONFIG_TWL4030_CORE
+#define TWL6030_BASE_NR_IRQS 20
+#else
+#define TWL6030_BASE_NR_IRQS 0
+#endif
+#define TWL6030_IRQ_END (TWL6030_IRQ_BASE + TWL6030_BASE_NR_IRQS)
+
/* Total number of interrupts depends on the enabled blocks above */
-#define NR_IRQS TWL4030_GPIO_IRQ_END
+#if (TWL4030_GPIO_IRQ_END > TWL6030_IRQ_END)
+#define TWL_IRQ_END TWL4030_GPIO_IRQ_END
+#else
+#define TWL_IRQ_END TWL6030_IRQ_END
+#endif
+
+#define NR_IRQS TWL_IRQ_END
#define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32))
diff --git a/arch/arm/plat-omap/include/plat/mux.h b/arch/arm/plat-omap/include/plat/mux.h
index ba77de60150..8f069cc8035 100644
--- a/arch/arm/plat-omap/include/plat/mux.h
+++ b/arch/arm/plat-omap/include/plat/mux.h
@@ -130,58 +130,11 @@
#define OMAP2_PULL_UP (1 << 4)
#define OMAP2_ALTELECTRICALSEL (1 << 5)
-/* 34xx specific mux bit defines */
-#define OMAP3_INPUT_EN (1 << 8)
-#define OMAP3_OFF_EN (1 << 9)
-#define OMAP3_OFFOUT_EN (1 << 10)
-#define OMAP3_OFFOUT_VAL (1 << 11)
-#define OMAP3_OFF_PULL_EN (1 << 12)
-#define OMAP3_OFF_PULL_UP (1 << 13)
-#define OMAP3_WAKEUP_EN (1 << 14)
-
-/* 34xx mux mode options for each pin. See TRM for options */
-#define OMAP34XX_MUX_MODE0 0
-#define OMAP34XX_MUX_MODE1 1
-#define OMAP34XX_MUX_MODE2 2
-#define OMAP34XX_MUX_MODE3 3
-#define OMAP34XX_MUX_MODE4 4
-#define OMAP34XX_MUX_MODE5 5
-#define OMAP34XX_MUX_MODE6 6
-#define OMAP34XX_MUX_MODE7 7
-
-/* 34xx active pin states */
-#define OMAP34XX_PIN_OUTPUT 0
-#define OMAP34XX_PIN_INPUT OMAP3_INPUT_EN
-#define OMAP34XX_PIN_INPUT_PULLUP (OMAP2_PULL_ENA | OMAP3_INPUT_EN \
- | OMAP2_PULL_UP)
-#define OMAP34XX_PIN_INPUT_PULLDOWN (OMAP2_PULL_ENA | OMAP3_INPUT_EN)
-
-/* 34xx off mode states */
-#define OMAP34XX_PIN_OFF_NONE 0
-#define OMAP34XX_PIN_OFF_OUTPUT_HIGH (OMAP3_OFF_EN | OMAP3_OFFOUT_EN \
- | OMAP3_OFFOUT_VAL)
-#define OMAP34XX_PIN_OFF_OUTPUT_LOW (OMAP3_OFF_EN | OMAP3_OFFOUT_EN)
-#define OMAP34XX_PIN_OFF_INPUT_PULLUP (OMAP3_OFF_EN | OMAP3_OFF_PULL_EN \
- | OMAP3_OFF_PULL_UP)
-#define OMAP34XX_PIN_OFF_INPUT_PULLDOWN (OMAP3_OFF_EN | OMAP3_OFF_PULL_EN)
-#define OMAP34XX_PIN_OFF_WAKEUPENABLE OMAP3_WAKEUP_EN
-
-#define MUX_CFG_34XX(desc, reg_offset, mux_value) { \
- .name = desc, \
- .debug = 0, \
- .mux_reg = reg_offset, \
- .mux_val = mux_value \
-},
-
struct pin_config {
char *name;
const unsigned int mux_reg;
unsigned char debug;
-#if defined(CONFIG_ARCH_OMAP34XX)
- u16 mux_val; /* Wake-up, off mode, pull, mux mode */
-#endif
-
#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP24XX)
const unsigned char mask_offset;
const unsigned char mask;
@@ -219,11 +172,17 @@ enum omap7xx_index {
AA17_7XX_USB_DM,
W16_7XX_USB_PU_EN,
W17_7XX_USB_VBUSI,
+ W18_7XX_USB_DMCK_OUT,
+ W19_7XX_USB_DCRST,
/* MMC */
MMC_7XX_CMD,
MMC_7XX_CLK,
MMC_7XX_DAT0,
+
+ /* I2C */
+ I2C_7XX_SCL,
+ I2C_7XX_SDA,
};
enum omap1xxx_index {
@@ -681,181 +640,6 @@ enum omap24xx_index {
};
-enum omap34xx_index {
- /* 34xx I2C */
- K21_34XX_I2C1_SCL,
- J21_34XX_I2C1_SDA,
- AF15_34XX_I2C2_SCL,
- AE15_34XX_I2C2_SDA,
- AF14_34XX_I2C3_SCL,
- AG14_34XX_I2C3_SDA,
- AD26_34XX_I2C4_SCL,
- AE26_34XX_I2C4_SDA,
-
- /* PHY - HSUSB: 12-pin ULPI PHY: Port 1*/
- Y8_3430_USB1HS_PHY_CLK,
- Y9_3430_USB1HS_PHY_STP,
- AA14_3430_USB1HS_PHY_DIR,
- AA11_3430_USB1HS_PHY_NXT,
- W13_3430_USB1HS_PHY_DATA0,
- W12_3430_USB1HS_PHY_DATA1,
- W11_3430_USB1HS_PHY_DATA2,
- Y11_3430_USB1HS_PHY_DATA3,
- W9_3430_USB1HS_PHY_DATA4,
- Y12_3430_USB1HS_PHY_DATA5,
- W8_3430_USB1HS_PHY_DATA6,
- Y13_3430_USB1HS_PHY_DATA7,
-
- /* PHY - HSUSB: 12-pin ULPI PHY: Port 2*/
- AA8_3430_USB2HS_PHY_CLK,
- AA10_3430_USB2HS_PHY_STP,
- AA9_3430_USB2HS_PHY_DIR,
- AB11_3430_USB2HS_PHY_NXT,
- AB10_3430_USB2HS_PHY_DATA0,
- AB9_3430_USB2HS_PHY_DATA1,
- W3_3430_USB2HS_PHY_DATA2,
- T4_3430_USB2HS_PHY_DATA3,
- T3_3430_USB2HS_PHY_DATA4,
- R3_3430_USB2HS_PHY_DATA5,
- R4_3430_USB2HS_PHY_DATA6,
- T2_3430_USB2HS_PHY_DATA7,
-
-
- /* TLL - HSUSB: 12-pin TLL Port 1*/
- Y8_3430_USB1HS_TLL_CLK,
- Y9_3430_USB1HS_TLL_STP,
- AA14_3430_USB1HS_TLL_DIR,
- AA11_3430_USB1HS_TLL_NXT,
- W13_3430_USB1HS_TLL_DATA0,
- W12_3430_USB1HS_TLL_DATA1,
- W11_3430_USB1HS_TLL_DATA2,
- Y11_3430_USB1HS_TLL_DATA3,
- W9_3430_USB1HS_TLL_DATA4,
- Y12_3430_USB1HS_TLL_DATA5,
- W8_3430_USB1HS_TLL_DATA6,
- Y13_3430_USB1HS_TLL_DATA7,
-
- /* TLL - HSUSB: 12-pin TLL Port 2*/
- AA8_3430_USB2HS_TLL_CLK,
- AA10_3430_USB2HS_TLL_STP,
- AA9_3430_USB2HS_TLL_DIR,
- AB11_3430_USB2HS_TLL_NXT,
- AB10_3430_USB2HS_TLL_DATA0,
- AB9_3430_USB2HS_TLL_DATA1,
- W3_3430_USB2HS_TLL_DATA2,
- T4_3430_USB2HS_TLL_DATA3,
- T3_3430_USB2HS_TLL_DATA4,
- R3_3430_USB2HS_TLL_DATA5,
- R4_3430_USB2HS_TLL_DATA6,
- T2_3430_USB2HS_TLL_DATA7,
-
- /* TLL - HSUSB: 12-pin TLL Port 3*/
- AA6_3430_USB3HS_TLL_CLK,
- AB3_3430_USB3HS_TLL_STP,
- AA3_3430_USB3HS_TLL_DIR,
- Y3_3430_USB3HS_TLL_NXT,
- AA5_3430_USB3HS_TLL_DATA0,
- Y4_3430_USB3HS_TLL_DATA1,
- Y5_3430_USB3HS_TLL_DATA2,
- W5_3430_USB3HS_TLL_DATA3,
- AB12_3430_USB3HS_TLL_DATA4,
- AB13_3430_USB3HS_TLL_DATA5,
- AA13_3430_USB3HS_TLL_DATA6,
- AA12_3430_USB3HS_TLL_DATA7,
-
- /* PHY FSUSB: FS Serial for Port 1 (multiple PHY modes supported) */
- AF10_3430_USB1FS_PHY_MM1_RXDP,
- AG9_3430_USB1FS_PHY_MM1_RXDM,
- W13_3430_USB1FS_PHY_MM1_RXRCV,
- W12_3430_USB1FS_PHY_MM1_TXSE0,
- W11_3430_USB1FS_PHY_MM1_TXDAT,
- Y11_3430_USB1FS_PHY_MM1_TXEN_N,
-
- /* PHY FSUSB: FS Serial for Port 2 (multiple PHY modes supported) */
- AF7_3430_USB2FS_PHY_MM2_RXDP,
- AH7_3430_USB2FS_PHY_MM2_RXDM,
- AB10_3430_USB2FS_PHY_MM2_RXRCV,
- AB9_3430_USB2FS_PHY_MM2_TXSE0,
- W3_3430_USB2FS_PHY_MM2_TXDAT,
- T4_3430_USB2FS_PHY_MM2_TXEN_N,
-
- /* PHY FSUSB: FS Serial for Port 3 (multiple PHY modes supported) */
- AH3_3430_USB3FS_PHY_MM3_RXDP,
- AE3_3430_USB3FS_PHY_MM3_RXDM,
- AD1_3430_USB3FS_PHY_MM3_RXRCV,
- AE1_3430_USB3FS_PHY_MM3_TXSE0,
- AD2_3430_USB3FS_PHY_MM3_TXDAT,
- AC1_3430_USB3FS_PHY_MM3_TXEN_N,
-
- /* 34xx GPIO
- * - normally these are bidirectional, no internal pullup/pulldown
- * - "_UP" suffix (GPIO3_UP) if internal pullup is configured
- * - "_DOWN" suffix (GPIO3_DOWN) with internal pulldown
- * - "_OUT" suffix (GPIO3_OUT) for output-only pins (unlike 24xx)
- */
- AF26_34XX_GPIO0,
- AF22_34XX_GPIO9,
- AG9_34XX_GPIO23,
- AH8_34XX_GPIO29,
- U8_34XX_GPIO54_OUT,
- U8_34XX_GPIO54_DOWN,
- L8_34XX_GPIO63,
- G25_34XX_GPIO86_OUT,
- AG4_34XX_GPIO134_OUT,
- AF4_34XX_GPIO135_OUT,
- AE4_34XX_GPIO136_OUT,
- AF6_34XX_GPIO140_UP,
- AE6_34XX_GPIO141,
- AF5_34XX_GPIO142,
- AE5_34XX_GPIO143,
- H19_34XX_GPIO164_OUT,
- J25_34XX_GPIO170,
-
- /* OMAP3 SDRC CKE signals to SDR/DDR ram chips */
- H16_34XX_SDRC_CKE0,
- H17_34XX_SDRC_CKE1,
-
- /* MMC1 */
- N28_3430_MMC1_CLK,
- M27_3430_MMC1_CMD,
- N27_3430_MMC1_DAT0,
- N26_3430_MMC1_DAT1,
- N25_3430_MMC1_DAT2,
- P28_3430_MMC1_DAT3,
- P27_3430_MMC1_DAT4,
- P26_3430_MMC1_DAT5,
- R27_3430_MMC1_DAT6,
- R25_3430_MMC1_DAT7,
-
- /* MMC2 */
- AE2_3430_MMC2_CLK,
- AG5_3430_MMC2_CMD,
- AH5_3430_MMC2_DAT0,
- AH4_3430_MMC2_DAT1,
- AG4_3430_MMC2_DAT2,
- AF4_3430_MMC2_DAT3,
- AE4_3430_MMC2_DAT4,
- AH3_3430_MMC2_DAT5,
- AF3_3430_MMC2_DAT6,
- AE3_3430_MMC2_DAT7,
-
- /* MMC3 */
- AF10_3430_MMC3_CLK,
- AC3_3430_MMC3_CMD,
- AE11_3430_MMC3_DAT0,
- AH9_3430_MMC3_DAT1,
- AF13_3430_MMC3_DAT2,
- AF13_3430_MMC3_DAT3,
-
- /* SYS_NIRQ T2 INT1 */
- AF26_34XX_SYS_NIRQ,
-
- /* EHCI GPIO's for OMAP3EVM (Rev >= E) */
- AH14_34XX_GPIO21,
- AF9_34XX_GPIO22,
- U3_34XX_GPIO61,
-};
-
struct omap_mux_cfg {
struct pin_config *pins;
unsigned long size;
@@ -865,14 +649,14 @@ struct omap_mux_cfg {
#ifdef CONFIG_OMAP_MUX
/* setup pin muxing in Linux */
extern int omap1_mux_init(void);
-extern int omap2_mux_init(void);
extern int omap_mux_register(struct omap_mux_cfg *);
extern int omap_cfg_reg(unsigned long reg_cfg);
#else
/* boot loader does it all (no warnings from CONFIG_OMAP_MUX_WARNINGS) */
static inline int omap1_mux_init(void) { return 0; }
-static inline int omap2_mux_init(void) { return 0; }
static inline int omap_cfg_reg(unsigned long reg_cfg) { return 0; }
#endif
+extern int omap2_mux_init(void);
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h
index e52902a15c1..ef870de43c2 100644
--- a/arch/arm/plat-omap/include/plat/omap44xx.h
+++ b/arch/arm/plat-omap/include/plat/omap44xx.h
@@ -26,8 +26,10 @@
#define OMAP44XX_EMIF2_BASE 0x4d000000
#define OMAP44XX_DMM_BASE 0x4e000000
#define OMAP4430_32KSYNCT_BASE 0x4a304000
-#define OMAP4430_CM_BASE 0x4a004000
-#define OMAP4430_PRM_BASE 0x48306000
+#define OMAP4430_CM1_BASE 0x4a004000
+#define OMAP4430_CM_BASE OMAP4430_CM1_BASE
+#define OMAP4430_CM2_BASE 0x4a008000
+#define OMAP4430_PRM_BASE 0x4a306000
#define OMAP44XX_GPMC_BASE 0x50000000
#define OMAP443X_SCM_BASE 0x4a002000
#define OMAP443X_CTRL_BASE OMAP443X_SCM_BASE
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index 11a9773a4e7..dc1fac1d805 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -50,8 +50,8 @@
* @pm_lats: ptr to an omap_device_pm_latency table
* @pm_lats_cnt: ARRAY_SIZE() of what is passed to @pm_lats
* @pm_lat_level: array index of the last odpl entry executed - -1 if never
- * @dev_wakeup_lat: dev wakeup latency in microseconds
- * @_dev_wakeup_lat_limit: dev wakeup latency limit in usec - set by OMAP PM
+ * @dev_wakeup_lat: dev wakeup latency in nanoseconds
+ * @_dev_wakeup_lat_limit: dev wakeup latency limit in nsec - set by OMAP PM
* @_state: one of OMAP_DEVICE_STATE_* (see above)
* @flags: device flags
*
@@ -137,5 +137,7 @@ struct omap_device_pm_latency {
};
-#endif
+/* Get omap_device pointer from platform_device pointer */
+#define to_omap_device(x) container_of((x), struct omap_device, pdev)
+#endif
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index dbdd123eca1..007935a921e 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -50,6 +50,8 @@ struct omap_device;
#define SYSC_ENAWAKEUP_MASK (1 << SYSC_ENAWAKEUP_SHIFT)
#define SYSC_SOFTRESET_SHIFT 1
#define SYSC_SOFTRESET_MASK (1 << SYSC_SOFTRESET_SHIFT)
+#define SYSC_AUTOIDLE_SHIFT 0
+#define SYSC_AUTOIDLE_MASK (1 << SYSC_AUTOIDLE_SHIFT)
/* OCP SYSSTATUS bit shifts/masks */
#define SYSS_RESETDONE_SHIFT 0
@@ -62,7 +64,21 @@ struct omap_device;
/**
- * struct omap_hwmod_dma_info - MPU address space handled by the hwmod
+ * struct omap_hwmod_irq_info - MPU IRQs used by the hwmod
+ * @name: name of the IRQ channel (module local name)
+ * @irq_ch: IRQ channel ID
+ *
+ * @name should be something short, e.g., "tx" or "rx". It is for use
+ * by platform_get_resource_byname(). It is defined locally to the
+ * hwmod.
+ */
+struct omap_hwmod_irq_info {
+ const char *name;
+ u16 irq;
+};
+
+/**
+ * struct omap_hwmod_dma_info - DMA channels used by the hwmod
* @name: name of the DMA channel (module local name)
* @dma_ch: DMA channel ID
*
@@ -294,13 +310,17 @@ struct omap_hwmod_omap4_prcm {
* SDRAM controller, etc.
* HWMOD_INIT_NO_IDLE: don't idle this module at boot - important for SDRAM
* controller, etc.
+ * HWMOD_NO_AUTOIDLE: disable module autoidle (OCP_SYSCONFIG.AUTOIDLE)
+ * when module is enabled, rather than the default, which is to
+ * enable autoidle
* HWMOD_SET_DEFAULT_CLOCKACT: program CLOCKACTIVITY bits at startup
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
#define HWMOD_INIT_NO_RESET (1 << 2)
#define HWMOD_INIT_NO_IDLE (1 << 3)
-#define HWMOD_SET_DEFAULT_CLOCKACT (1 << 4)
+#define HWMOD_NO_OCP_AUTOIDLE (1 << 4)
+#define HWMOD_SET_DEFAULT_CLOCKACT (1 << 5)
/*
* omap_hwmod._int_flags definitions
@@ -373,7 +393,7 @@ struct omap_hwmod_omap4_prcm {
struct omap_hwmod {
const char *name;
struct omap_device *od;
- u8 *mpu_irqs;
+ struct omap_hwmod_irq_info *mpu_irqs;
struct omap_hwmod_dma_info *sdma_chs;
union {
struct omap_hwmod_omap2_prcm omap2;
diff --git a/arch/arm/plat-omap/include/plat/powerdomain.h b/arch/arm/plat-omap/include/plat/powerdomain.h
index 3d45ee1d3cf..0b960051eae 100644
--- a/arch/arm/plat-omap/include/plat/powerdomain.h
+++ b/arch/arm/plat-omap/include/plat/powerdomain.h
@@ -28,6 +28,8 @@
#define PWRDM_POWER_INACTIVE 0x2
#define PWRDM_POWER_ON 0x3
+#define PWRDM_MAX_PWRSTS 4
+
/* Powerdomain allowable state bitfields */
#define PWRSTS_OFF_ON ((1 << PWRDM_POWER_OFF) | \
(1 << PWRDM_POWER_ON))
@@ -40,7 +42,10 @@
/* Powerdomain flags */
#define PWRDM_HAS_HDWR_SAR (1 << 0) /* hardware save-and-restore support */
-
+#define PWRDM_HAS_MPU_QUIRK (1 << 1) /* MPU pwr domain has MEM bank 0 bits
+ * in MEM bank 1 position. This is
+ * true for OMAP3430
+ */
/*
* Number of memory banks that are power-controllable. On OMAP3430, the
@@ -85,15 +90,15 @@ struct powerdomain {
/* Used to represent the OMAP chip types containing this pwrdm */
const struct omap_chip_id omap_chip;
- /* Bit shift of this powerdomain's PM_WKDEP/CM_SLEEPDEP bit */
- const u8 dep_bit;
-
/* Powerdomains that can be told to wake this powerdomain up */
struct pwrdm_dep *wkdep_srcs;
/* Powerdomains that can be told to keep this pwrdm from inactivity */
struct pwrdm_dep *sleepdep_srcs;
+ /* Bit shift of this powerdomain's PM_WKDEP/CM_SLEEPDEP bit */
+ const u8 dep_bit;
+
/* Possible powerdomain power states */
const u8 pwrsts;
@@ -118,11 +123,11 @@ struct powerdomain {
struct list_head node;
int state;
- unsigned state_counter[4];
+ unsigned state_counter[PWRDM_MAX_PWRSTS];
#ifdef CONFIG_PM_DEBUG
s64 timer;
- s64 state_timer[4];
+ s64 state_timer[PWRDM_MAX_PWRSTS];
#endif
};
diff --git a/arch/arm/plat-omap/include/plat/sdrc.h b/arch/arm/plat-omap/include/plat/sdrc.h
index f704030d2a7..7b76f50564b 100644
--- a/arch/arm/plat-omap/include/plat/sdrc.h
+++ b/arch/arm/plat-omap/include/plat/sdrc.h
@@ -94,7 +94,10 @@
/* SMS register offsets - read/write with sms_{read,write}_reg() */
-#define SMS_SYSCONFIG 0x010
+#define SMS_SYSCONFIG 0x010
+#define SMS_ROT_CONTROL(context) (0x180 + 0x10 * context)
+#define SMS_ROT_SIZE(context) (0x184 + 0x10 * context)
+#define SMS_ROT_PHYSICAL_BA(context) (0x188 + 0x10 * context)
/* REVISIT: fill in other SMS registers here */
@@ -129,6 +132,10 @@ int omap2_sdrc_get_params(unsigned long r,
void omap2_sms_save_context(void);
void omap2_sms_restore_context(void);
+void omap2_sms_write_rot_control(u32 val, unsigned ctx);
+void omap2_sms_write_rot_size(u32 val, unsigned ctx);
+void omap2_sms_write_rot_physical_ba(u32 val, unsigned ctx);
+
#ifdef CONFIG_ARCH_OMAP2
struct memory_timings {
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index 9951345a25d..f5a4a92393e 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -53,6 +53,7 @@
#ifndef __ASSEMBLER__
extern void __init omap_serial_early_init(void);
extern void omap_serial_init(void);
+extern void omap_serial_init_port(int port);
extern int omap_uart_can_sleep(void);
extern void omap_uart_check_wakeup(void);
extern void omap_uart_prepare_suspend(void);
diff --git a/arch/arm/plat-omap/include/plat/smp.h b/arch/arm/plat-omap/include/plat/smp.h
index dcaa8fde706..8983d54c4fd 100644
--- a/arch/arm/plat-omap/include/plat/smp.h
+++ b/arch/arm/plat-omap/include/plat/smp.h
@@ -28,6 +28,8 @@
/* Needed for secondary core boot */
extern void omap_secondary_startup(void);
+extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
+extern void omap_auxcoreboot_addr(u32 cpu_addr);
/*
* We use Soft IRQ1 as the IPI
diff --git a/arch/arm/plat-omap/include/plat/vram.h b/arch/arm/plat-omap/include/plat/vram.h
new file mode 100644
index 00000000000..edd4987758a
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/vram.h
@@ -0,0 +1,62 @@
+/*
+ * VRAM manager for OMAP
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __OMAP_VRAM_H__
+#define __OMAP_VRAM_H__
+
+#include <linux/types.h>
+
+#define OMAP_VRAM_MEMTYPE_SDRAM 0
+#define OMAP_VRAM_MEMTYPE_SRAM 1
+#define OMAP_VRAM_MEMTYPE_MAX 1
+
+extern int omap_vram_add_region(unsigned long paddr, size_t size);
+extern int omap_vram_free(unsigned long paddr, size_t size);
+extern int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr);
+extern int omap_vram_reserve(unsigned long paddr, size_t size);
+extern void omap_vram_get_info(unsigned long *vram, unsigned long *free_vram,
+ unsigned long *largest_free_block);
+
+#ifdef CONFIG_OMAP2_VRAM
+extern void omap_vram_set_sdram_vram(u32 size, u32 start);
+extern void omap_vram_set_sram_vram(u32 size, u32 start);
+
+extern void omap_vram_reserve_sdram(void);
+extern unsigned long omap_vram_reserve_sram(unsigned long sram_pstart,
+ unsigned long sram_vstart,
+ unsigned long sram_size,
+ unsigned long pstart_avail,
+ unsigned long size_avail);
+#else
+static inline void omap_vram_set_sdram_vram(u32 size, u32 start) { }
+static inline void omap_vram_set_sram_vram(u32 size, u32 start) { }
+
+static inline void omap_vram_reserve_sdram(void) { }
+static inline unsigned long omap_vram_reserve_sram(unsigned long sram_pstart,
+ unsigned long sram_vstart,
+ unsigned long sram_size,
+ unsigned long pstart_avail,
+ unsigned long size_avail)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/vrfb.h b/arch/arm/plat-omap/include/plat/vrfb.h
new file mode 100644
index 00000000000..d8a03ced3b1
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/vrfb.h
@@ -0,0 +1,50 @@
+/*
+ * VRFB Rotation Engine
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __OMAP_VRFB_H__
+#define __OMAP_VRFB_H__
+
+#define OMAP_VRFB_LINE_LEN 2048
+
+struct vrfb {
+ u8 context;
+ void __iomem *vaddr[4];
+ unsigned long paddr[4];
+ u16 xres;
+ u16 yres;
+ u16 xoffset;
+ u16 yoffset;
+ u8 bytespp;
+ bool yuv_mode;
+};
+
+extern int omap_vrfb_request_ctx(struct vrfb *vrfb);
+extern void omap_vrfb_release_ctx(struct vrfb *vrfb);
+extern void omap_vrfb_adjust_size(u16 *width, u16 *height,
+ u8 bytespp);
+extern u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp);
+extern u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp);
+extern void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
+ u16 width, u16 height,
+ unsigned bytespp, bool yuv_mode);
+extern int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot);
+extern void omap_vrfb_restore_context(void);
+
+#endif /* __VRFB_H */
diff --git a/arch/arm/plat-omap/mux.c b/arch/arm/plat-omap/mux.c
index 05aebcad215..06703635ace 100644
--- a/arch/arm/plat-omap/mux.c
+++ b/arch/arm/plat-omap/mux.c
@@ -54,8 +54,12 @@ int __init_or_module omap_cfg_reg(const unsigned long index)
{
struct pin_config *reg;
- if (cpu_is_omap44xx())
- return 0;
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ printk(KERN_ERR "mux: Broken omap_cfg_reg(%lu) entry\n",
+ index);
+ WARN_ON(1);
+ return -EINVAL;
+ }
if (mux_cfg == NULL) {
printk(KERN_ERR "Pin mux table not initialized\n");
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index bb16e624a55..1e5648d3e3d 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -134,18 +134,18 @@ static int _omap_device_activate(struct omap_device *od, u8 ignore_lat)
(od->dev_wakeup_lat <= od->_dev_wakeup_lat_limit))
break;
- getnstimeofday(&a);
+ read_persistent_clock(&a);
/* XXX check return code */
odpl->activate_func(od);
- getnstimeofday(&b);
+ read_persistent_clock(&b);
c = timespec_sub(b, a);
- act_lat = timespec_to_ns(&c) * NSEC_PER_USEC;
+ act_lat = timespec_to_ns(&c);
pr_debug("omap_device: %s: pm_lat %d: activate: elapsed time "
- "%llu usec\n", od->pdev.name, od->pm_lat_level,
+ "%llu nsec\n", od->pdev.name, od->pm_lat_level,
act_lat);
WARN(act_lat > odpl->activate_lat, "omap_device: %s.%d: "
@@ -190,18 +190,18 @@ static int _omap_device_deactivate(struct omap_device *od, u8 ignore_lat)
od->_dev_wakeup_lat_limit))
break;
- getnstimeofday(&a);
+ read_persistent_clock(&a);
/* XXX check return code */
odpl->deactivate_func(od);
- getnstimeofday(&b);
+ read_persistent_clock(&b);
c = timespec_sub(b, a);
- deact_lat = timespec_to_ns(&c) * NSEC_PER_USEC;
+ deact_lat = timespec_to_ns(&c);
pr_debug("omap_device: %s: pm_lat %d: deactivate: elapsed time "
- "%llu usec\n", od->pdev.name, od->pm_lat_level,
+ "%llu nsec\n", od->pdev.name, od->pm_lat_level,
deact_lat);
WARN(deact_lat > odpl->deactivate_lat, "omap_device: %s.%d: "
@@ -459,7 +459,7 @@ int omap_device_enable(struct platform_device *pdev)
ret = _omap_device_activate(od, IGNORE_WAKEUP_LAT);
od->dev_wakeup_lat = 0;
- od->_dev_wakeup_lat_limit = INT_MAX;
+ od->_dev_wakeup_lat_limit = UINT_MAX;
od->_state = OMAP_DEVICE_STATE_ENABLED;
return ret;
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 3e923668778..d8d5094b37e 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -28,6 +28,7 @@
#include <plat/sram.h>
#include <plat/board.h>
#include <plat/cpu.h>
+#include <plat/vram.h>
#include <plat/control.h>
@@ -47,8 +48,10 @@
#define OMAP3_SRAM_VA 0xfe400000
#define OMAP3_SRAM_PUB_PA 0x40208000
#define OMAP3_SRAM_PUB_VA (OMAP3_SRAM_VA + 0x8000)
-#define OMAP4_SRAM_PA 0x40200000 /*0x402f0000*/
-#define OMAP4_SRAM_VA 0xfe400000 /*0xfe4f0000*/
+#define OMAP4_SRAM_PA 0x40300000
+#define OMAP4_SRAM_VA 0xfe400000
+#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000)
+#define OMAP4_SRAM_PUB_VA (OMAP4_SRAM_VA + 0x4000)
#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
#define SRAM_BOOTLOADER_SZ 0x00
@@ -139,6 +142,10 @@ void __init omap_detect_sram(void)
} else {
omap_sram_size = 0x8000; /* 32K */
}
+ } else if (cpu_is_omap44xx()) {
+ omap_sram_base = OMAP4_SRAM_PUB_VA;
+ omap_sram_start = OMAP4_SRAM_PUB_PA;
+ omap_sram_size = 0xa000; /* 40K */
} else {
omap_sram_base = OMAP2_SRAM_PUB_VA;
omap_sram_start = OMAP2_SRAM_PUB_PA;
@@ -152,7 +159,7 @@ void __init omap_detect_sram(void)
} else if (cpu_is_omap44xx()) {
omap_sram_base = OMAP4_SRAM_VA;
omap_sram_start = OMAP4_SRAM_PA;
- omap_sram_size = 0x8000; /* 32K */
+ omap_sram_size = 0xe000; /* 56K */
} else {
omap_sram_base = OMAP2_SRAM_VA;
omap_sram_start = OMAP2_SRAM_PA;
@@ -185,6 +192,13 @@ void __init omap_detect_sram(void)
omap_sram_start + SRAM_BOOTLOADER_SZ,
omap_sram_size - SRAM_BOOTLOADER_SZ);
omap_sram_size -= reserved;
+
+ reserved = omap_vram_reserve_sram(omap_sram_start, omap_sram_base,
+ omap_sram_size,
+ omap_sram_start + SRAM_BOOTLOADER_SZ,
+ omap_sram_size - SRAM_BOOTLOADER_SZ);
+ omap_sram_size -= reserved;
+
omap_sram_ceil = omap_sram_base + omap_sram_size;
}
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
index 51033a4503c..d3bf17cd36f 100644
--- a/arch/arm/plat-omap/usb.c
+++ b/arch/arm/plat-omap/usb.c
@@ -137,7 +137,13 @@ static u32 __init omap_usb0_init(unsigned nwires, unsigned is_device)
if (is_device) {
if (cpu_is_omap24xx())
omap_cfg_reg(J20_24XX_USB0_PUEN);
- else
+ else if (cpu_is_omap7xx()) {
+ omap_cfg_reg(AA17_7XX_USB_DM);
+ omap_cfg_reg(W16_7XX_USB_PU_EN);
+ omap_cfg_reg(W17_7XX_USB_VBUSI);
+ omap_cfg_reg(W18_7XX_USB_DMCK_OUT);
+ omap_cfg_reg(W19_7XX_USB_DCRST);
+ } else
omap_cfg_reg(W4_USB_PUEN);
}
diff --git a/arch/arm/plat-s3c/include/plat/nand.h b/arch/arm/plat-s3c/include/plat/nand.h
index 06598597841..226147b7e02 100644
--- a/arch/arm/plat-s3c/include/plat/nand.h
+++ b/arch/arm/plat-s3c/include/plat/nand.h
@@ -17,6 +17,7 @@
* Setting this flag will allow the kernel to
* look for it at boot time and also skip the NAND
* scan.
+ * @options: Default value to set into 'struct nand_chip' options.
* @nr_chips: Number of chips in this set
* @nr_partitions: Number of partitions pointed to by @partitions
* @name: Name of set (optional)
@@ -31,6 +32,7 @@ struct s3c2410_nand_set {
unsigned int disable_ecc:1;
unsigned int flash_bbt:1;
+ unsigned int options;
int nr_chips;
int nr_partitions;
char *name;
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile
index 1dbaa29ac4d..635cb1865e4 100644
--- a/arch/arm/tools/Makefile
+++ b/arch/arm/tools/Makefile
@@ -4,7 +4,7 @@
# Copyright (C) 2001 Russell King
#
-include/asm-arm/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
+include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
@echo ' Generating $@'
@mkdir -p $(dir $@)
$(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
diff --git a/arch/arm/tools/gen-mach-types b/arch/arm/tools/gen-mach-types
index ce319ef64bc..04fef71d7be 100644
--- a/arch/arm/tools/gen-mach-types
+++ b/arch/arm/tools/gen-mach-types
@@ -1,6 +1,6 @@
#!/bin/awk
#
-# Awk script to generate include/asm-arm/mach-types.h
+# Awk script to generate include/generated/mach-types.h
#
BEGIN { nr = 0 }
/^#/ { next }
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 07b976da617..c3a74ce24ef 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Wed Nov 25 22:14:58 2009
+# Last update: Wed Dec 16 20:06:34 2009
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -1776,6 +1776,7 @@ cybook3 MACH_CYBOOK3 CYBOOK3 1784
wdg002 MACH_WDG002 WDG002 1785
sg560adsl MACH_SG560ADSL SG560ADSL 1786
nextio_n2800_ica MACH_NEXTIO_N2800_ICA NEXTIO_N2800_ICA 1787
+dove_db MACH_DOVE_DB DOVE_DB 1788
marvell_newdb MACH_MARVELL_NEWDB MARVELL_NEWDB 1789
vandihud MACH_VANDIHUD VANDIHUD 1790
magx_e8 MACH_MAGX_E8 MAGX_E8 1791
@@ -2536,3 +2537,44 @@ c3ax03 MACH_C3AX03 C3AX03 2549
mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
esyx MACH_ESYX ESYX 2551
bulldog MACH_BULLDOG BULLDOG 2553
+derell_me2000 MACH_DERELL_ME2000 DERELL_ME2000 2554
+bcmring_base MACH_BCMRING_BASE BCMRING_BASE 2555
+bcmring_evm MACH_BCMRING_EVM BCMRING_EVM 2556
+bcmring_evm_jazz MACH_BCMRING_EVM_JAZZ BCMRING_EVM_JAZZ 2557
+bcmring_sp MACH_BCMRING_SP BCMRING_SP 2558
+bcmring_sv MACH_BCMRING_SV BCMRING_SV 2559
+bcmring_sv_jazz MACH_BCMRING_SV_JAZZ BCMRING_SV_JAZZ 2560
+bcmring_tablet MACH_BCMRING_TABLET BCMRING_TABLET 2561
+bcmring_vp MACH_BCMRING_VP BCMRING_VP 2562
+bcmring_evm_seikor MACH_BCMRING_EVM_SEIKOR BCMRING_EVM_SEIKOR 2563
+bcmring_sp_wqvga MACH_BCMRING_SP_WQVGA BCMRING_SP_WQVGA 2564
+bcmring_custom MACH_BCMRING_CUSTOM BCMRING_CUSTOM 2565
+acer_s200 MACH_ACER_S200 ACER_S200 2566
+bt270 MACH_BT270 BT270 2567
+iseo MACH_ISEO ISEO 2568
+cezanne MACH_CEZANNE CEZANNE 2569
+lucca MACH_LUCCA LUCCA 2570
+supersmart MACH_SUPERSMART SUPERSMART 2571
+magnolia2 MACH_MAGNOLIA2 MAGNOLIA2 2573
+emxx MACH_EMXX EMXX 2574
+outlaw MACH_OUTLAW OUTLAW 2575
+riot_bei2 MACH_RIOT_BEI2 RIOT_BEI2 2576
+riot_vox MACH_RIOT_VOX RIOT_VOX 2577
+riot_x37 MACH_RIOT_X37 RIOT_X37 2578
+mega25mx MACH_MEGA25MX MEGA25MX 2579
+benzina2 MACH_BENZINA2 BENZINA2 2580
+ignite MACH_IGNITE IGNITE 2581
+foggia MACH_FOGGIA FOGGIA 2582
+arezzo MACH_AREZZO AREZZO 2583
+leica_skywalker MACH_LEICA_SKYWALKER LEICA_SKYWALKER 2584
+jacinto2_jamr MACH_JACINTO2_JAMR JACINTO2_JAMR 2585
+gts_nova MACH_GTS_NOVA GTS_NOVA 2586
+p3600 MACH_P3600 P3600 2587
+dlt2 MACH_DLT2 DLT2 2588
+df3120 MACH_DF3120 DF3120 2589
+ecucore_9g20 MACH_ECUCORE_9G20 ECUCORE_9G20 2590
+nautel_lpc3240 MACH_NAUTEL_LPC3240 NAUTEL_LPC3240 2591
+glacier MACH_GLACIER GLACIER 2592
+phrazer_bulldog MACH_PHRAZER_BULLDOG PHRAZER_BULLDOG 2593
+omap3_bulldog MACH_OMAP3_BULLDOG OMAP3_BULLDOG 2594
+pca101 MACH_PCA101 PCA101 2595
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 2d7423af119..aed05bc3c2e 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -38,16 +38,72 @@ union vfp_state *last_VFP_context[NR_CPUS];
*/
unsigned int VFP_arch;
+/*
+ * Per-thread VFP initialization.
+ */
+static void vfp_thread_flush(struct thread_info *thread)
+{
+ union vfp_state *vfp = &thread->vfpstate;
+ unsigned int cpu;
+
+ memset(vfp, 0, sizeof(union vfp_state));
+
+ vfp->hard.fpexc = FPEXC_EN;
+ vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
+
+ /*
+ * Disable VFP to ensure we initialize it first. We must ensure
+ * that the modification of last_VFP_context[] and hardware disable
+ * are done for the same CPU and without preemption.
+ */
+ cpu = get_cpu();
+ if (last_VFP_context[cpu] == vfp)
+ last_VFP_context[cpu] = NULL;
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ put_cpu();
+}
+
+static void vfp_thread_release(struct thread_info *thread)
+{
+ /* release case: Per-thread VFP cleanup. */
+ union vfp_state *vfp = &thread->vfpstate;
+ unsigned int cpu = thread->cpu;
+
+ if (last_VFP_context[cpu] == vfp)
+ last_VFP_context[cpu] = NULL;
+}
+
+/*
+ * When this function is called with the following 'cmd's, the following
+ * is true while this function is being run:
+ * THREAD_NOFTIFY_SWTICH:
+ * - the previously running thread will not be scheduled onto another CPU.
+ * - the next thread to be run (v) will not be running on another CPU.
+ * - thread->cpu is the local CPU number
+ * - not preemptible as we're called in the middle of a thread switch
+ * THREAD_NOTIFY_FLUSH:
+ * - the thread (v) will be running on the local CPU, so
+ * v === current_thread_info()
+ * - thread->cpu is the local CPU number at the time it is accessed,
+ * but may change at any time.
+ * - we could be preempted if tree preempt rcu is enabled, so
+ * it is unsafe to use thread->cpu.
+ * THREAD_NOTIFY_RELEASE:
+ * - the thread (v) will not be running on any CPU; it is a dead thread.
+ * - thread->cpu will be the last CPU the thread ran on, which may not
+ * be the current CPU.
+ * - we could be preempted if tree preempt rcu is enabled.
+ */
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
struct thread_info *thread = v;
- union vfp_state *vfp;
- __u32 cpu = thread->cpu;
if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
u32 fpexc = fmrx(FPEXC);
#ifdef CONFIG_SMP
+ unsigned int cpu = thread->cpu;
+
/*
* On SMP, if VFP is enabled, save the old state in
* case the thread migrates to a different CPU. The
@@ -74,25 +130,10 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
return NOTIFY_DONE;
}
- vfp = &thread->vfpstate;
- if (cmd == THREAD_NOTIFY_FLUSH) {
- /*
- * Per-thread VFP initialisation.
- */
- memset(vfp, 0, sizeof(union vfp_state));
-
- vfp->hard.fpexc = FPEXC_EN;
- vfp->hard.fpscr = FPSCR_ROUND_NEAREST;
-
- /*
- * Disable VFP to ensure we initialise it first.
- */
- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
- }
-
- /* flush and release case: Per-thread VFP cleanup. */
- if (last_VFP_context[cpu] == vfp)
- last_VFP_context[cpu] = NULL;
+ if (cmd == THREAD_NOTIFY_FLUSH)
+ vfp_thread_flush(thread);
+ else
+ vfp_thread_release(thread);
return NOTIFY_DONE;
}
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index d856354f427..f2b31933318 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -112,6 +112,11 @@ config CPU_AT32AP7002
bool
select CPU_AT32AP700X
+# AP700X boards
+config BOARD_ATNGW100_COMMON
+ bool
+ select CPU_AT32AP7000
+
choice
prompt "AVR32 board type"
default BOARD_ATSTK1000
@@ -119,9 +124,13 @@ choice
config BOARD_ATSTK1000
bool "ATSTK1000 evaluation board"
-config BOARD_ATNGW100
+config BOARD_ATNGW100_MKI
bool "ATNGW100 Network Gateway"
- select CPU_AT32AP7000
+ select BOARD_ATNGW100_COMMON
+
+config BOARD_ATNGW100_MKII
+ bool "ATNGW100 mkII Network Gateway"
+ select BOARD_ATNGW100_COMMON
config BOARD_HAMMERHEAD
bool "Hammerhead board"
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index c21a3290d54..ead8a75203a 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -32,7 +32,7 @@ head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o
head-y += arch/avr32/kernel/head.o
core-y += $(machdirs)
core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/
-core-$(CONFIG_BOARD_ATNGW100) += arch/avr32/boards/atngw100/
+core-$(CONFIG_BOARD_ATNGW100_COMMON) += arch/avr32/boards/atngw100/
core-$(CONFIG_BOARD_HAMMERHEAD) += arch/avr32/boards/hammerhead/
core-$(CONFIG_BOARD_FAVR_32) += arch/avr32/boards/favr-32/
core-$(CONFIG_BOARD_MERISC) += arch/avr32/boards/merisc/
diff --git a/arch/avr32/boards/atngw100/Kconfig b/arch/avr32/boards/atngw100/Kconfig
index be27a0218ab..4e55617ade2 100644
--- a/arch/avr32/boards/atngw100/Kconfig
+++ b/arch/avr32/boards/atngw100/Kconfig
@@ -1,6 +1,17 @@
# NGW100 customization
-if BOARD_ATNGW100
+if BOARD_ATNGW100_COMMON
+
+config BOARD_ATNGW100_MKII_LCD
+ bool "Enable ATNGW100 mkII LCD interface"
+ depends on BOARD_ATNGW100_MKII
+ help
+ This enables the LCD controller (LCDC) in the AT32AP7000. Since the
+ LCDC is multiplexed with MACB1 (LAN) Ethernet port, only one can be
+ enabled at a time.
+
+ This choice enables the LCDC and disables the MACB1 interface marked
+ LAN on the PCB.
choice
prompt "Select an NGW100 add-on board to support"
@@ -11,15 +22,11 @@ config BOARD_ATNGW100_ADDON_NONE
config BOARD_ATNGW100_EVKLCD10X
bool "EVKLCD10X addon board"
+ depends on BOARD_ATNGW100_MKI || BOARD_ATNGW100_MKII_LCD
help
This enables support for the EVKLCD100 (QVGA) or EVKLCD101 (VGA)
- addon board for the NGW100. By enabling this the LCD controller and
- AC97 controller is added as platform devices.
-
- This choice disables the detect pin and the write-protect pin for the
- MCI platform device, since it conflicts with the LCD platform device.
- The MCI pins can be reenabled by editing the "add device function" but
- this may break the setup for other displays that use these pins.
+ addon board for the NGW100 and NGW100 mkII. By enabling this the LCD
+ controller and AC97 controller is added as platform devices.
config BOARD_ATNGW100_MRMT
bool "Mediama RMT1/2 add-on board"
@@ -55,4 +62,4 @@ if BOARD_ATNGW100_MRMT
source "arch/avr32/boards/atngw100/Kconfig_mrmt"
endif
-endif # BOARD_ATNGW100
+endif # BOARD_ATNGW100_COMMON
diff --git a/arch/avr32/boards/atngw100/evklcd10x.c b/arch/avr32/boards/atngw100/evklcd10x.c
index 00337112c5a..20388750d56 100644
--- a/arch/avr32/boards/atngw100/evklcd10x.c
+++ b/arch/avr32/boards/atngw100/evklcd10x.c
@@ -164,7 +164,12 @@ static int __init atevklcd10x_init(void)
at32_add_device_lcdc(0, &atevklcd10x_lcdc_data,
fbmem_start, fbmem_size,
- ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL);
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+ ATMEL_LCDC_PRI_18BIT | ATMEL_LCDC_PC_DVAL
+#else
+ ATMEL_LCDC_ALT_18BIT | ATMEL_LCDC_PE_DVAL
+#endif
+ );
at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH);
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index bf78e516a85..7919be311f4 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -302,6 +302,7 @@ static int __init mrmt1_init(void)
at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ),
GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH);
set_irq_type( AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING );
+ at32_spi_setup_slaves(0,spi01_board_info,ARRAY_SIZE(spi01_board_info));
spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info));
#endif
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index bc299fbbeb4..8c6a2440e34 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -20,6 +20,7 @@
#include <linux/leds.h>
#include <linux/spi/spi.h>
#include <linux/atmel-mci.h>
+#include <linux/usb/atmel_usba_udc.h>
#include <asm/io.h>
#include <asm/setup.h>
@@ -36,6 +37,75 @@ unsigned long at32_board_osc_rates[3] = {
[2] = 12000000, /* 12 MHz on osc1 */
};
+/*
+ * The ATNGW100 mkII is very similar to the ATNGW100. Both have the AT32AP7000
+ * chip on board; the difference is that the ATNGW100 mkII has 128 MB 32-bit
+ * SDRAM (the ATNGW100 has 32 MB 16-bit SDRAM) and 256 MB 16-bit NAND flash
+ * (the ATNGW100 has none.)
+ *
+ * The RAM difference is handled by the boot loader, so the only difference we
+ * end up handling here is the NAND flash, EBI pin reservation and if LCDC or
+ * MACB1 should be enabled.
+ */
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+#include <linux/mtd/partitions.h>
+#include <mach/smc.h>
+
+static struct smc_timing nand_timing __initdata = {
+ .ncs_read_setup = 0,
+ .nrd_setup = 10,
+ .ncs_write_setup = 0,
+ .nwe_setup = 10,
+
+ .ncs_read_pulse = 30,
+ .nrd_pulse = 15,
+ .ncs_write_pulse = 30,
+ .nwe_pulse = 15,
+
+ .read_cycle = 30,
+ .write_cycle = 30,
+
+ .ncs_read_recover = 0,
+ .nrd_recover = 15,
+ .ncs_write_recover = 0,
+ /* WE# high -> RE# low min 60 ns */
+ .nwe_recover = 50,
+};
+
+static struct smc_config nand_config __initdata = {
+ .bus_width = 2,
+ .nrd_controlled = 1,
+ .nwe_controlled = 1,
+ .nwait_mode = 0,
+ .byte_write = 0,
+ .tdf_cycles = 2,
+ .tdf_mode = 0,
+};
+
+static struct mtd_partition nand_partitions[] = {
+ {
+ .name = "main",
+ .offset = 0x00000000,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct mtd_partition *nand_part_info(int size, int *num_partitions)
+{
+ *num_partitions = ARRAY_SIZE(nand_partitions);
+ return nand_partitions;
+}
+
+static struct atmel_nand_data atngw100mkii_nand_data __initdata = {
+ .cle = 21,
+ .ale = 22,
+ .rdy_pin = GPIO_PIN_PB(28),
+ .enable_pin = GPIO_PIN_PE(23),
+ .bus_width_16 = true,
+ .partition_info = nand_part_info,
+};
+#endif
+
/* Initialized by bootloader-specific startup code. */
struct tag *bootloader_tags __initdata;
@@ -56,9 +126,9 @@ static struct spi_board_info spi0_board_info[] __initdata = {
static struct mci_platform_data __initdata mci0_data = {
.slot[0] = {
.bus_width = 4,
-#if defined(CONFIG_BOARD_ATNGW100_EVKLCD10X) || defined(CONFIG_BOARD_ATNGW100_MRMT1)
- .detect_pin = GPIO_PIN_NONE,
- .wp_pin = GPIO_PIN_NONE,
+#if defined(CONFIG_BOARD_ATNGW100_MKII)
+ .detect_pin = GPIO_PIN_PC(25),
+ .wp_pin = GPIO_PIN_PE(22),
#else
.detect_pin = GPIO_PIN_PC(25),
.wp_pin = GPIO_PIN_PE(0),
@@ -66,6 +136,14 @@ static struct mci_platform_data __initdata mci0_data = {
},
};
+static struct usba_platform_data atngw100_usba_data __initdata = {
+#if defined(CONFIG_BOARD_ATNGW100_MKII)
+ .vbus_pin = GPIO_PIN_PE(26),
+#else
+ .vbus_pin = -ENODEV,
+#endif
+};
+
/*
* The next two functions should go away as the boot loader is
* supposed to initialize the macb address registers with a valid
@@ -173,18 +251,27 @@ static int __init atngw100_init(void)
unsigned i;
/*
- * ATNGW100 uses 16-bit SDRAM interface, so we don't need to
- * reserve any pins for it.
+ * ATNGW100 mkII uses 32-bit SDRAM interface. Reserve the
+ * SDRAM-specific pins so that nobody messes with them.
*/
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+ at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
+
+ smc_set_timing(&nand_config, &nand_timing);
+ smc_set_configuration(3, &nand_config);
+ at32_add_device_nand(0, &atngw100mkii_nand_data);
+#endif
at32_add_device_usart(0);
set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
+#ifndef CONFIG_BOARD_ATNGW100_MKII_LCD
set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
+#endif
at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
at32_add_device_mci(0, &mci0_data);
- at32_add_device_usba(0, NULL);
+ at32_add_device_usba(0, &atngw100_usba_data);
for (i = 0; i < ARRAY_SIZE(ngw_leds); i++) {
at32_select_gpio(ngw_leds[i].gpio,
@@ -194,10 +281,14 @@ static int __init atngw100_init(void)
/* all these i2c/smbus pins should have external pullups for
* open-drain sharing among all I2C devices. SDA and SCL do;
- * PB28/EXTINT3 doesn't; it should be SMBALERT# (for PMBus),
- * but it's not available off-board.
+ * PB28/EXTINT3 (ATNGW100) and PE21 (ATNGW100 mkII) doesn't; it should
+ * be SMBALERT# (for PMBus), but it's not available off-board.
*/
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+ at32_select_periph(GPIO_PIOE_BASE, 1 << 21, 0, AT32_GPIOF_PULLUP);
+#else
at32_select_periph(GPIO_PIOB_BASE, 1 << 28, 0, AT32_GPIOF_PULLUP);
+#endif
at32_select_gpio(i2c_gpio_data.sda_pin,
AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
at32_select_gpio(i2c_gpio_data.scl_pin,
@@ -211,14 +302,22 @@ postcore_initcall(atngw100_init);
static int __init atngw100_arch_init(void)
{
- /* PB30 is the otherwise unused jumper on the mainboard, with an
- * external pullup; the jumper grounds it. Use it however you
- * like, including letting U-Boot or Linux tweak boot sequences.
+ /* PB30 (ATNGW100) and PE30 (ATNGW100 mkII) is the otherwise unused
+ * jumper on the mainboard, with an external pullup; the jumper grounds
+ * it. Use it however you like, including letting U-Boot or Linux tweak
+ * boot sequences.
*/
+#ifdef CONFIG_BOARD_ATNGW100_MKII
+ at32_select_gpio(GPIO_PIN_PE(30), 0);
+ gpio_request(GPIO_PIN_PE(30), "j15");
+ gpio_direction_input(GPIO_PIN_PE(30));
+ gpio_export(GPIO_PIN_PE(30), false);
+#else
at32_select_gpio(GPIO_PIN_PB(30), 0);
gpio_request(GPIO_PIN_PB(30), "j15");
gpio_direction_input(GPIO_PIN_PB(30));
gpio_export(GPIO_PIN_PB(30), false);
+#endif
/* set_irq_type() after the arch_initcall for EIC has run, and
* before the I2C subsystem could try using this IRQ.
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 574aca97533..32205c9d37d 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27-rc1
-# Tue Aug 5 16:00:47 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 09:39:22 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -59,38 +75,40 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
-# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
-# CONFIG_HAVE_IOREMAP_PROT is not set
CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-# CONFIG_USE_GENERIC_SMP_HELPERS is not set
CONFIG_HAVE_CLK=y
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -98,11 +116,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -118,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# System Type and features
@@ -133,8 +148,23 @@ CONFIG_PERFORMANCE_COUNTERS=y
CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
# CONFIG_BOARD_ATSTK1000 is not set
-CONFIG_BOARD_ATNGW100=y
+CONFIG_BOARD_ATNGW100_MKI=y
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+# CONFIG_BOARD_ATSTK1002 is not set
+# CONFIG_BOARD_ATSTK1003 is not set
+# CONFIG_BOARD_ATSTK1004 is not set
+# CONFIG_BOARD_ATSTK1006 is not set
+# CONFIG_BOARD_ATSTK1000_J2_LED8 is not set
+# CONFIG_BOARD_ATSTK1000_J2_RGB is not set
+CONFIG_BOARD_ATNGW100_ADDON_NONE=y
+# CONFIG_BOARD_ATNGW100_EVKLCD10X is not set
+# CONFIG_BOARD_ATNGW100_MRMT is not set
CONFIG_LOADER_U_BOOT=y
#
@@ -150,7 +180,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_QUICKLIST=y
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -162,14 +192,16 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
# CONFIG_HZ_100 is not set
@@ -177,7 +209,7 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_CMDLINE=""
#
@@ -188,6 +220,7 @@ CONFIG_PM=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
@@ -219,6 +252,8 @@ CONFIG_CPU_FREQ_AT32AP=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_NET=y
@@ -271,7 +306,6 @@ CONFIG_INET_TCP_DIAG=y
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -314,10 +348,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_IPTABLES=m
@@ -343,16 +379,18 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
@@ -364,26 +402,33 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -395,6 +440,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -404,6 +450,7 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -453,16 +500,17 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x80000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -478,9 +526,22 @@ CONFIG_MTD_DATAFLASH=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
-# CONFIG_MTD_UBI is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
@@ -498,10 +559,20 @@ CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
# CONFIG_ATMEL_SSC is not set
# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
@@ -534,26 +605,37 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -603,9 +685,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -614,7 +698,9 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOBIT=m
#
@@ -624,6 +710,7 @@ CONFIG_I2C_ALGOBIT=m
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
@@ -644,14 +731,6 @@ CONFIG_I2C_GPIO=m
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-CONFIG_EEPROM_AT24=m
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -666,19 +745,28 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
CONFIG_GPIO_SYSFS=y
#
+# Memory mapped GPIO expanders:
+#
+
+#
# I2C GPIO expanders:
#
# CONFIG_GPIO_MAX732X is not set
@@ -694,11 +782,15 @@ CONFIG_GPIO_SYSFS=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -707,11 +799,11 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@@ -720,22 +812,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -756,32 +843,43 @@ CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
CONFIG_USB_GADGET_ATMEL_USBA=y
CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA25X is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -789,12 +887,18 @@ CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
@@ -802,10 +906,12 @@ CONFIG_MMC_BLOCK_BOUNCE=y
CONFIG_MMC_TEST=m
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
CONFIG_MMC_SPI=m
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -815,7 +921,11 @@ CONFIG_LEDS_CLASS=y
# LED drivers
#
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -823,7 +933,13 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
@@ -855,25 +971,33 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
# CONFIG_RTC_DRV_M41T94 is not set
# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -892,24 +1016,38 @@ CONFIG_DMA_ENGINE=y
# DMA Clients
#
# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
-CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
-CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
-CONFIG_JBD=m
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -917,6 +1055,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -940,15 +1084,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -967,7 +1109,9 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_UBIFS_FS is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -975,7 +1119,9 @@ CONFIG_JFFS2_RTIME=y
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
+CONFIG_UFS_FS=y
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_UFS_DEBUG is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@@ -1060,14 +1206,18 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@@ -1083,6 +1233,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -1091,13 +1242,39 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1105,19 +1282,30 @@ CONFIG_FRAME_POINTER=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=y
# CONFIG_CRYPTO_TEST is not set
@@ -1145,11 +1333,13 @@ CONFIG_CRYPTO_PCBC=m
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1166,7 +1356,7 @@ CONFIG_CRYPTO_SHA1=y
#
# Ciphers
#
-# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES=m
# CONFIG_CRYPTO_ANUBIS is not set
CONFIG_CRYPTO_ARC4=m
# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1186,15 +1376,21 @@ CONFIG_CRYPTO_DES=y
# Compression
#
CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
-# CONFIG_GENERIC_FIND_NEXT_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
# CONFIG_CRC_T10DIF is not set
@@ -1204,8 +1400,9 @@ CONFIG_CRC7=m
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 86a45b5c9d0..c732cc397ad 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25.6
-# Wed Jun 18 16:06:32 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 09:36:39 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -59,43 +75,51 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
@@ -109,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# System Type and features
@@ -124,13 +148,26 @@ CONFIG_PERFORMANCE_COUNTERS=y
CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
# CONFIG_BOARD_ATSTK1000 is not set
-CONFIG_BOARD_ATNGW100=y
+CONFIG_BOARD_ATNGW100_MKI=y
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+# CONFIG_BOARD_ATSTK1002 is not set
+# CONFIG_BOARD_ATSTK1003 is not set
+# CONFIG_BOARD_ATSTK1004 is not set
+# CONFIG_BOARD_ATSTK1006 is not set
+# CONFIG_BOARD_ATSTK1000_J2_LED8 is not set
+# CONFIG_BOARD_ATSTK1000_J2_RGB is not set
+# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
CONFIG_BOARD_ATNGW100_EVKLCD10X=y
+# CONFIG_BOARD_ATNGW100_MRMT is not set
CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y
# CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set
# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
-CONFIG_BOARD_ATNGW100_I2C_GPIO=y
CONFIG_LOADER_U_BOOT=y
#
@@ -139,14 +176,14 @@ CONFIG_LOADER_U_BOOT=y
# CONFIG_AP700X_32_BIT_SMC is not set
CONFIG_AP700X_16_BIT_SMC=y
# CONFIG_AP700X_8_BIT_SMC is not set
-CONFIG_GPIO_DEV=y
CONFIG_LOAD_ADDRESS=0x10000000
CONFIG_ENTRY_ADDRESS=0x90000000
CONFIG_PHYS_OFFSET=0x10000000
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -158,33 +195,36 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_DW_DMAC=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_CMDLINE=""
#
# Power management options
#
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
# CPU Frequency scaling
@@ -194,6 +234,7 @@ CONFIG_CPU_FREQ_TABLE=y
# CONFIG_CPU_FREQ_DEBUG is not set
# CONFIG_CPU_FREQ_STAT is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
@@ -214,11 +255,9 @@ CONFIG_CPU_FREQ_AT32AP=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
@@ -232,6 +271,7 @@ CONFIG_XFRM_USER=y
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
CONFIG_NET_KEY=y
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -269,7 +309,6 @@ CONFIG_INET_TCP_DIAG=y
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -285,8 +324,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
CONFIG_INET6_XFRM_MODE_BEET=y
# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_IPV6_TUNNEL is not set
# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
# CONFIG_NETWORK_SECMARK is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
@@ -310,10 +351,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_IPTABLES=m
@@ -339,16 +382,20 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
+CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
CONFIG_LLC=m
# CONFIG_LLC2 is not set
@@ -358,26 +405,33 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -389,6 +443,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -398,10 +453,12 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -446,16 +503,17 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x80000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -471,6 +529,11 @@ CONFIG_MTD_DATAFLASH=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
CONFIG_MTD_UBI=y
@@ -499,10 +562,20 @@ CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
# CONFIG_ATMEL_SSC is not set
# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
@@ -514,7 +587,6 @@ CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -536,25 +608,37 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -598,15 +682,30 @@ CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_TABLET is not set
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
# CONFIG_TOUCHSCREEN_PENMOUNT is not set
# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
# CONFIG_INPUT_MISC is not set
#
@@ -619,9 +718,11 @@ CONFIG_INPUT_TOUCHSCREEN=y
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -636,9 +737,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -647,45 +750,44 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
#
-# I2C Algorithms
+# I2C Hardware Bus support
#
-CONFIG_I2C_ALGOBIT=m
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
#
-# I2C Hardware Bus support
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
-CONFIG_I2C_ATMELTWI=m
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -695,30 +797,48 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
-CONFIG_HAVE_GPIO_LIB=y
#
-# GPIO Support
+# PPS support
#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
#
+# PCI GPIO expanders:
+#
+
+#
# SPI GPIO expanders:
#
+# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -731,24 +851,31 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -758,6 +885,7 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
@@ -765,8 +893,8 @@ CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_SVGALIB is not set
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_BACKLIGHT is not set
@@ -779,6 +907,9 @@ CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_ATMEL=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -792,119 +923,124 @@ CONFIG_FB_ATMEL=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE is not set
# CONFIG_LOGO is not set
-
-#
-# Sound
-#
CONFIG_SOUND=y
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=y
-CONFIG_SND_TIMER=m
+CONFIG_SND_TIMER=y
CONFIG_SND_PCM=m
# CONFIG_SND_SEQUENCER is not set
CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
# CONFIG_SND_DYNAMIC_MINORS is not set
# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_AC97_CODEC=m
-# CONFIG_SND_DUMMY is not set
-# CONFIG_SND_MTPAV is not set
-# CONFIG_SND_SERIAL_U16550 is not set
-# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_DRIVERS is not set
#
-# AVR32 devices
-#
-CONFIG_SND_ATMEL_AC97=m
-
-#
-# SPI devices
-#
-
-#
-# System on Chip audio support
+# Atmel devices (AVR32 and AT91)
#
+# CONFIG_SND_ATMEL_ABDAC is not set
+CONFIG_SND_ATMEL_AC97C=m
+# CONFIG_SND_SPI is not set
# CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
-#
-# Open Sound System
-#
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=350
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
CONFIG_USB_GADGET_ATMEL_USBA=y
CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -913,7 +1049,13 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -921,6 +1063,14 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
@@ -950,51 +1100,84 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
# on-CPU RTC drivers
#
CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
#
-# Userspace I/O
+# DMA Devices
#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1002,6 +1185,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1025,15 +1214,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=y
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1059,8 +1246,10 @@ CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
@@ -1071,19 +1260,16 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
+CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
# CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
-CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_BIND34 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@@ -1151,16 +1337,24 @@ CONFIG_NLS_UTF8=m
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
@@ -1172,19 +1366,48 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1192,63 +1415,118 @@ CONFIG_FRAME_POINTER=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
-# CONFIG_CRYPTO_SEQIV is not set
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=y
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_PCBC is not set
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_XTS is not set
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_SEED is not set
# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_TEST is not set
-CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_ZLIB is not set
CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
@@ -1257,8 +1535,9 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index a96b68ea5e8..5ef67da343b 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25.6
-# Wed Jun 18 16:09:32 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 09:37:19 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,22 +35,37 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -59,43 +75,51 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
@@ -109,7 +133,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# System Type and features
@@ -124,13 +148,20 @@ CONFIG_PERFORMANCE_COUNTERS=y
CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
# CONFIG_BOARD_ATSTK1000 is not set
-CONFIG_BOARD_ATNGW100=y
+CONFIG_BOARD_ATNGW100_MKI=y
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
CONFIG_BOARD_ATNGW100_EVKLCD10X=y
+# CONFIG_BOARD_ATNGW100_MRMT is not set
# CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set
CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y
# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
-CONFIG_BOARD_ATNGW100_I2C_GPIO=y
CONFIG_LOADER_U_BOOT=y
#
@@ -139,14 +170,14 @@ CONFIG_LOADER_U_BOOT=y
# CONFIG_AP700X_32_BIT_SMC is not set
CONFIG_AP700X_16_BIT_SMC=y
# CONFIG_AP700X_8_BIT_SMC is not set
-CONFIG_GPIO_DEV=y
CONFIG_LOAD_ADDRESS=0x10000000
CONFIG_ENTRY_ADDRESS=0x90000000
CONFIG_PHYS_OFFSET=0x10000000
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -158,33 +189,36 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
-CONFIG_DW_DMAC=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_CMDLINE=""
#
# Power management options
#
-CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
# CPU Frequency scaling
@@ -194,6 +228,7 @@ CONFIG_CPU_FREQ_TABLE=y
# CONFIG_CPU_FREQ_DEBUG is not set
# CONFIG_CPU_FREQ_STAT is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
@@ -214,11 +249,9 @@ CONFIG_CPU_FREQ_AT32AP=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
@@ -232,6 +265,7 @@ CONFIG_XFRM_USER=y
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
CONFIG_NET_KEY=y
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -269,7 +303,6 @@ CONFIG_INET_TCP_DIAG=y
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IP_VS is not set
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
# CONFIG_IPV6_ROUTER_PREF is not set
@@ -285,8 +318,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
CONFIG_INET6_XFRM_MODE_BEET=y
# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_IPV6_TUNNEL is not set
# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
# CONFIG_NETWORK_SECMARK is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
@@ -310,10 +345,12 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
#
# IP: Netfilter Configuration
#
+CONFIG_NF_DEFRAG_IPV4=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_CONNTRACK_PROC_COMPAT=y
CONFIG_IP_NF_IPTABLES=m
@@ -339,16 +376,20 @@ CONFIG_IP_NF_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
+CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
# CONFIG_DECNET is not set
CONFIG_LLC=m
# CONFIG_LLC2 is not set
@@ -358,26 +399,33 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -389,6 +437,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -398,10 +447,12 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -446,16 +497,17 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x80000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -471,6 +523,11 @@ CONFIG_MTD_DATAFLASH=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
CONFIG_MTD_UBI=y
@@ -499,10 +556,20 @@ CONFIG_MISC_DEVICES=y
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
# CONFIG_ATMEL_SSC is not set
# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
@@ -514,7 +581,6 @@ CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -536,25 +602,37 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -598,15 +676,30 @@ CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_TABLET is not set
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
# CONFIG_TOUCHSCREEN_PENMOUNT is not set
# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
# CONFIG_INPUT_MISC is not set
#
@@ -619,9 +712,11 @@ CONFIG_INPUT_TOUCHSCREEN=y
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -636,9 +731,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -647,45 +744,44 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
#
-# I2C Algorithms
+# I2C Hardware Bus support
#
-CONFIG_I2C_ALGOBIT=m
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
#
-# I2C Hardware Bus support
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
-CONFIG_I2C_ATMELTWI=m
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
-
-#
-# SPI support
-#
CONFIG_SPI=y
# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
@@ -695,30 +791,48 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
-CONFIG_HAVE_GPIO_LIB=y
#
-# GPIO Support
+# PPS support
#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
#
# I2C GPIO expanders:
#
+# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
#
+# PCI GPIO expanders:
+#
+
+#
# SPI GPIO expanders:
#
+# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -731,24 +845,31 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -758,6 +879,7 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
@@ -765,8 +887,8 @@ CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_SYS_FILLRECT is not set
# CONFIG_FB_SYS_COPYAREA is not set
# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
# CONFIG_FB_SYS_FOPS is not set
-CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_SVGALIB is not set
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_BACKLIGHT is not set
@@ -779,6 +901,9 @@ CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_ATMEL=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -792,119 +917,124 @@ CONFIG_FB_ATMEL=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE is not set
# CONFIG_LOGO is not set
-
-#
-# Sound
-#
CONFIG_SOUND=y
-
-#
-# Advanced Linux Sound Architecture
-#
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=y
-CONFIG_SND_TIMER=m
+CONFIG_SND_TIMER=y
CONFIG_SND_PCM=m
# CONFIG_SND_SEQUENCER is not set
CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
# CONFIG_SND_DYNAMIC_MINORS is not set
# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
-
-#
-# Generic devices
-#
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_AC97_CODEC=m
-# CONFIG_SND_DUMMY is not set
-# CONFIG_SND_MTPAV is not set
-# CONFIG_SND_SERIAL_U16550 is not set
-# CONFIG_SND_MPU401 is not set
+# CONFIG_SND_DRIVERS is not set
#
-# AVR32 devices
-#
-CONFIG_SND_ATMEL_AC97=m
-
-#
-# SPI devices
-#
-
-#
-# System on Chip audio support
+# Atmel devices (AVR32 and AT91)
#
+# CONFIG_SND_ATMEL_ABDAC is not set
+CONFIG_SND_ATMEL_AC97C=m
+# CONFIG_SND_SPI is not set
# CONFIG_SND_SOC is not set
-
-#
-# SoC Audio support for SuperH
-#
-
-#
-# ALSA SoC audio for Freescale SOCs
-#
-
-#
-# Open Sound System
-#
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=350
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
CONFIG_USB_GADGET_ATMEL_USBA=y
CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -913,7 +1043,13 @@ CONFIG_LEDS_CLASS=y
#
# LED drivers
#
+# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -921,6 +1057,14 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_HCTOSYS=y
@@ -950,51 +1094,84 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
# on-CPU RTC drivers
#
CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
#
-# Userspace I/O
+# DMA Devices
#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1002,6 +1179,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1025,15 +1208,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
# CONFIG_PROC_KCORE is not set
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_CONFIGFS_FS=y
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1059,8 +1240,10 @@ CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
# CONFIG_ROMFS_FS is not set
@@ -1071,19 +1254,16 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
+CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
# CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
-CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_BIND34 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@@ -1151,16 +1331,24 @@ CONFIG_NLS_UTF8=m
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
+CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SHIRQ is not set
CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_SLUB_DEBUG_ON is not set
# CONFIG_SLUB_STATS is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
@@ -1172,19 +1360,48 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1192,63 +1409,118 @@ CONFIG_FRAME_POINTER=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
-# CONFIG_CRYPTO_SEQIV is not set
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=y
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
# CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=y
-# CONFIG_CRYPTO_PCBC is not set
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_XTS is not set
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_SEED is not set
# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_TEST is not set
-CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_ZLIB is not set
CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
@@ -1257,8 +1529,9 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
new file mode 100644
index 00000000000..9b8b5b3b9c7
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -0,0 +1,1414 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc5
+# Thu Nov 5 15:32:26 2009
+#
+CONFIG_AVR32=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# System Type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SUBARCH_AVR32B=y
+CONFIG_MMU=y
+CONFIG_PERFORMANCE_COUNTERS=y
+CONFIG_PLATFORM_AT32AP=y
+CONFIG_CPU_AT32AP700X=y
+CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
+# CONFIG_BOARD_ATSTK1000 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+CONFIG_BOARD_ATNGW100_MKII=y
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+# CONFIG_BOARD_ATNGW100_MKII_LCD is not set
+CONFIG_BOARD_ATNGW100_ADDON_NONE=y
+# CONFIG_BOARD_ATNGW100_EVKLCD10X is not set
+# CONFIG_BOARD_ATNGW100_MRMT is not set
+CONFIG_LOADER_U_BOOT=y
+
+#
+# Atmel AVR32 AP options
+#
+# CONFIG_AP700X_32_BIT_SMC is not set
+CONFIG_AP700X_16_BIT_SMC=y
+# CONFIG_AP700X_8_BIT_SMC is not set
+CONFIG_LOAD_ADDRESS=0x10000000
+CONFIG_ENTRY_ADDRESS=0x90000000
+CONFIG_PHYS_OFFSET=0x10000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
+# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
+# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_OWNERSHIP_TRACE is not set
+CONFIG_NMI_DEBUGGING=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_CMDLINE=""
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_AT32AP=y
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_ATMEL_ECC_HW=y
+# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ATMEL_PWM is not set
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+CONFIG_USB_GADGET_ATMEL_USBA=y
+CONFIG_USB_ATMEL_USBA=y
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+CONFIG_MMC_TEST=m
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
+CONFIG_MMC_SPI=m
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_UBIFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+CONFIG_UFS_FS=y
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_UFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+CONFIG_CRC7=m
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
new file mode 100644
index 00000000000..01e913d66be
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -0,0 +1,1549 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc5
+# Thu Nov 5 15:33:09 2009
+#
+CONFIG_AVR32=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# System Type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SUBARCH_AVR32B=y
+CONFIG_MMU=y
+CONFIG_PERFORMANCE_COUNTERS=y
+CONFIG_PLATFORM_AT32AP=y
+CONFIG_CPU_AT32AP700X=y
+CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
+# CONFIG_BOARD_ATSTK1000 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+CONFIG_BOARD_ATNGW100_MKII=y
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+CONFIG_BOARD_ATNGW100_MKII_LCD=y
+# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
+CONFIG_BOARD_ATNGW100_EVKLCD10X=y
+# CONFIG_BOARD_ATNGW100_MRMT is not set
+CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA=y
+# CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA is not set
+# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
+CONFIG_LOADER_U_BOOT=y
+
+#
+# Atmel AVR32 AP options
+#
+# CONFIG_AP700X_32_BIT_SMC is not set
+CONFIG_AP700X_16_BIT_SMC=y
+# CONFIG_AP700X_8_BIT_SMC is not set
+CONFIG_LOAD_ADDRESS=0x10000000
+CONFIG_ENTRY_ADDRESS=0x90000000
+CONFIG_PHYS_OFFSET=0x10000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
+# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
+# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_OWNERSHIP_TRACE is not set
+CONFIG_NMI_DEBUGGING=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_CMDLINE=""
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_AT32AP=y
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_ATMEL_ECC_HW=y
+# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ATMEL_PWM is not set
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_ATMEL=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=m
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=m
+# CONFIG_SND_DRIVERS is not set
+
+#
+# Atmel devices (AVR32 and AT91)
+#
+# CONFIG_SND_ATMEL_ABDAC is not set
+CONFIG_SND_ATMEL_AC97C=m
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=350
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+CONFIG_USB_GADGET_ATMEL_USBA=y
+CONFIG_USB_ATMEL_USBA=y
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=y
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
new file mode 100644
index 00000000000..bbf6bc316ec
--- /dev/null
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -0,0 +1,1549 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc5
+# Thu Nov 5 15:33:32 2009
+#
+CONFIG_AVR32=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_FULL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=1
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# System Type and features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_SUBARCH_AVR32B=y
+CONFIG_MMU=y
+CONFIG_PERFORMANCE_COUNTERS=y
+CONFIG_PLATFORM_AT32AP=y
+CONFIG_CPU_AT32AP700X=y
+CONFIG_CPU_AT32AP7000=y
+CONFIG_BOARD_ATNGW100_COMMON=y
+# CONFIG_BOARD_ATSTK1000 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+CONFIG_BOARD_ATNGW100_MKII=y
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
+CONFIG_BOARD_ATNGW100_MKII_LCD=y
+# CONFIG_BOARD_ATNGW100_ADDON_NONE is not set
+CONFIG_BOARD_ATNGW100_EVKLCD10X=y
+# CONFIG_BOARD_ATNGW100_MRMT is not set
+# CONFIG_BOARD_ATNGW100_EVKLCD10X_QVGA is not set
+CONFIG_BOARD_ATNGW100_EVKLCD10X_VGA=y
+# CONFIG_BOARD_ATNGW100_EVKLCD10X_POW_QVGA is not set
+CONFIG_LOADER_U_BOOT=y
+
+#
+# Atmel AVR32 AP options
+#
+# CONFIG_AP700X_32_BIT_SMC is not set
+CONFIG_AP700X_16_BIT_SMC=y
+# CONFIG_AP700X_8_BIT_SMC is not set
+CONFIG_LOAD_ADDRESS=0x10000000
+CONFIG_ENTRY_ADDRESS=0x90000000
+CONFIG_PHYS_OFFSET=0x10000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_QUICKLIST=y
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
+# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
+# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+# CONFIG_ARCH_SPARSEMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_NR_QUICK=2
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+# CONFIG_OWNERSHIP_TRACE is not set
+CONFIG_NMI_DEBUGGING=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_CMDLINE=""
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+# CONFIG_CPU_FREQ_STAT is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_AT32AP=y
+
+#
+# Bus options
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+# CONFIG_IP_PIMSM_V2 is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_NETFILTER_ADVANCED is not set
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=m
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_LOG=m
+# CONFIG_IP_NF_TARGET_ULOG is not set
+CONFIG_NF_NAT=m
+CONFIG_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_NF_NAT_FTP=m
+CONFIG_NF_NAT_IRC=m
+# CONFIG_NF_NAT_TFTP is not set
+# CONFIG_NF_NAT_AMANDA is not set
+# CONFIG_NF_NAT_PPTP is not set
+# CONFIG_NF_NAT_H323 is not set
+CONFIG_NF_NAT_SIP=m
+CONFIG_IP_NF_MANGLE=m
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_TARGET_LOG=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=y
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+CONFIG_MTD_NAND_ATMEL=y
+CONFIG_MTD_NAND_ATMEL_ECC_HW=y
+# CONFIG_MTD_NAND_ATMEL_ECC_SOFT is not set
+# CONFIG_MTD_NAND_ATMEL_ECC_NONE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ATMEL_PWM is not set
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+# CONFIG_ICS932S401 is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+CONFIG_MACB=y
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+# CONFIG_PPP_SYNC_TTY is not set
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+CONFIG_TOUCHSCREEN_WM97XX=m
+CONFIG_TOUCHSCREEN_WM9705=y
+CONFIG_TOUCHSCREEN_WM9712=y
+CONFIG_TOUCHSCREEN_WM9713=y
+# CONFIG_TOUCHSCREEN_WM97XX_ATMEL is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+CONFIG_I2C_GPIO=m
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_ATMEL=y
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=m
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=m
+# CONFIG_SND_DRIVERS is not set
+
+#
+# Atmel devices (AVR32 and AT91)
+#
+# CONFIG_SND_ATMEL_ABDAC is not set
+CONFIG_SND_ATMEL_AC97C=m
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=350
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+CONFIG_USB_GADGET_ATMEL_USBA=y
+CONFIG_USB_ATMEL_USBA=y
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_AT32AP700X=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+CONFIG_DW_DMAC=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=y
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_LKDTM is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index 0abe90adb1a..42dafce0238 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27-rc1
-# Mon Aug 4 16:02:27 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 13:00:55 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,21 +35,36 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -58,38 +74,40 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
-# CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is not set
-# CONFIG_HAVE_IOREMAP_PROT is not set
CONFIG_HAVE_KPROBES=y
-# CONFIG_HAVE_KRETPROBES is not set
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-# CONFIG_USE_GENERIC_SMP_HELPERS is not set
CONFIG_HAVE_CLK=y
-CONFIG_PROC_PAGE_MONITOR=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -97,11 +115,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -117,7 +132,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
#
# System Type and features
@@ -133,7 +148,12 @@ CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
CONFIG_BOARD_ATSTK1000=y
-# CONFIG_BOARD_ATNGW100 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
+# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
+# CONFIG_BOARD_MIMC200 is not set
CONFIG_BOARD_ATSTK1002=y
# CONFIG_BOARD_ATSTK1003 is not set
# CONFIG_BOARD_ATSTK1004 is not set
@@ -159,7 +179,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_QUICKLIST=y
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -171,14 +191,16 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
# CONFIG_HZ_100 is not set
@@ -186,7 +208,7 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
-# CONFIG_SCHED_HRTICK is not set
+CONFIG_SCHED_HRTICK=y
CONFIG_CMDLINE=""
#
@@ -197,6 +219,7 @@ CONFIG_PM=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
@@ -228,6 +251,8 @@ CONFIG_CPU_FREQ_AT32AP=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
CONFIG_NET=y
@@ -295,10 +320,12 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=m
CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
CONFIG_LLC=m
@@ -309,26 +336,33 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
#
-# Wireless
+# CFG80211 needs to be enabled for MAC80211
#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -340,6 +374,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -349,6 +384,7 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -398,17 +434,18 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
CONFIG_MTD_DATAFLASH=m
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
CONFIG_MTD_M25P80=m
CONFIG_M25PXX_USE_FAST_READ=y
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -424,9 +461,22 @@ CONFIG_M25PXX_USE_FAST_READ=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
-# CONFIG_MTD_UBI is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
@@ -444,10 +494,20 @@ CONFIG_ATMEL_PWM=m
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
CONFIG_ATMEL_SSC=m
# CONFIG_ENCLOSURE_SERVICES is not set
-# CONFIG_HAVE_IDE is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
#
# SCSI device support
@@ -469,10 +529,6 @@ CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -489,8 +545,10 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=m
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
# CONFIG_SATA_PMP is not set
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -519,26 +577,37 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -580,18 +649,25 @@ CONFIG_INPUT_EVDEV=m
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_VSXXXAA is not set
CONFIG_MOUSE_GPIO=m
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -622,9 +698,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -633,7 +711,9 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOBIT=m
#
@@ -643,6 +723,7 @@ CONFIG_I2C_ALGOBIT=m
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
@@ -663,14 +744,6 @@ CONFIG_I2C_GPIO=m
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-CONFIG_EEPROM_AT24=m
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -685,19 +758,28 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
CONFIG_GPIO_SYSFS=y
#
+# Memory mapped GPIO expanders:
+#
+
+#
# I2C GPIO expanders:
#
# CONFIG_GPIO_MAX732X is not set
@@ -713,11 +795,15 @@ CONFIG_GPIO_SYSFS=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -726,11 +812,11 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_AT32AP700X_WDT=y
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@@ -739,22 +825,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -764,6 +845,7 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_FB=y
# CONFIG_FIRMWARE_EDID is not set
# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
@@ -785,10 +867,15 @@ CONFIG_FB_CFB_IMAGEBLIT=y
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_ATMEL=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LMS283GF05 is not set
CONFIG_LCD_LTV350QV=y
# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
# CONFIG_LCD_VGG2432A4 is not set
# CONFIG_LCD_PLATFORM is not set
# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
@@ -799,6 +886,8 @@ CONFIG_LCD_LTV350QV=y
# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_LOGO is not set
CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -807,12 +896,24 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
# CONFIG_SND_DRIVERS is not set
+
+#
+# Atmel devices (AVR32 and AT91)
+#
+# CONFIG_SND_ATMEL_ABDAC is not set
+# CONFIG_SND_ATMEL_AC97C is not set
CONFIG_SND_SPI=y
CONFIG_SND_AT73C213=m
CONFIG_SND_AT73C213_TARGET_BITRATE=48000
@@ -825,33 +926,43 @@ CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
# CONFIG_USB_GADGET_DEBUG_FILES is not set
# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
-# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_AT91 is not set
CONFIG_USB_GADGET_ATMEL_USBA=y
CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_FSL_USB2 is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA25X is not set
-# CONFIG_USB_GADGET_M66592 is not set
-# CONFIG_USB_GADGET_PXA27X is not set
-# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
-# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -859,12 +970,18 @@ CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
CONFIG_MMC_BLOCK=y
CONFIG_MMC_BLOCK_BOUNCE=y
@@ -872,10 +989,12 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
+# CONFIG_MMC_ATMELMCI_DMA is not set
CONFIG_MMC_SPI=m
# CONFIG_MEMSTICK is not set
CONFIG_NEW_LEDS=y
@@ -887,7 +1006,11 @@ CONFIG_LEDS_CLASS=m
CONFIG_LEDS_ATMEL_PWM=m
# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -895,7 +1018,13 @@ CONFIG_LEDS_GPIO=m
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
@@ -927,25 +1056,33 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
#
# CONFIG_RTC_DRV_M41T94 is not set
# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -964,25 +1101,45 @@ CONFIG_DMA_ENGINE=y
# DMA Clients
#
# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_EXT4DEV_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_XATTR=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -990,6 +1147,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1013,15 +1176,13 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1039,7 +1200,14 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
+CONFIG_UBIFS_FS=y
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
CONFIG_MINIX_FS=m
# CONFIG_OMFS_FS is not set
@@ -1122,6 +1290,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1130,6 +1299,9 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@@ -1145,6 +1317,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -1153,13 +1326,39 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1167,19 +1366,30 @@ CONFIG_FRAME_POINTER=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
-CONFIG_CRYPTO_ALGAPI=m
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=m
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=m
# CONFIG_CRYPTO_TEST is not set
@@ -1207,11 +1417,13 @@ CONFIG_CRYPTO_CBC=m
#
CONFIG_CRYPTO_HMAC=m
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1228,7 +1440,7 @@ CONFIG_CRYPTO_SHA1=m
#
# Ciphers
#
-# CONFIG_CRYPTO_AES is not set
+CONFIG_CRYPTO_AES=m
# CONFIG_CRYPTO_ANUBIS is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_BLOWFISH is not set
@@ -1247,18 +1459,24 @@ CONFIG_CRYPTO_DES=m
#
# Compression
#
-CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
-# CONFIG_GENERIC_FIND_NEXT_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
-# CONFIG_CRC16 is not set
+CONFIG_CRC16=y
CONFIG_CRC_T10DIF=m
CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
@@ -1266,8 +1484,11 @@ CONFIG_CRC7=m
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index c1603c4860e..363e2381f32 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28-rc8
-# Thu Dec 18 11:22:23 2008
+# Linux kernel version: 2.6.32-rc5
+# Thu Oct 29 13:00:25 2009
#
CONFIG_AVR32=y
CONFIG_GENERIC_GPIO=y
@@ -21,6 +21,7 @@ CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -34,21 +35,36 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
@@ -58,32 +74,40 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
-# CONFIG_COMPAT_BRK is not set
# CONFIG_BASE_FULL is not set
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
+CONFIG_TRACEPOINTS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=1
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -91,11 +115,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+CONFIG_LBDAF=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -111,7 +132,6 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
CONFIG_FREEZER=y
#
@@ -128,8 +148,11 @@ CONFIG_PLATFORM_AT32AP=y
CONFIG_CPU_AT32AP700X=y
CONFIG_CPU_AT32AP7000=y
CONFIG_BOARD_ATSTK1000=y
-# CONFIG_BOARD_ATNGW100 is not set
+# CONFIG_BOARD_ATNGW100_MKI is not set
+# CONFIG_BOARD_ATNGW100_MKII is not set
+# CONFIG_BOARD_HAMMERHEAD is not set
# CONFIG_BOARD_FAVR_32 is not set
+# CONFIG_BOARD_MERISC is not set
# CONFIG_BOARD_MIMC200 is not set
# CONFIG_BOARD_ATSTK1002 is not set
# CONFIG_BOARD_ATSTK1003 is not set
@@ -156,7 +179,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_QUICKLIST=y
-# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set
+# CONFIG_HAVE_ARCH_BOOTMEM is not set
# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set
# CONFIG_NEED_NODE_MEMMAP_SIZE is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -170,12 +193,14 @@ CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# CONFIG_OWNERSHIP_TRACE is not set
CONFIG_NMI_DEBUGGING=y
# CONFIG_HZ_100 is not set
@@ -194,6 +219,7 @@ CONFIG_PM=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_PM_RUNTIME is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
@@ -294,6 +320,7 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
CONFIG_STP=m
@@ -309,20 +336,24 @@ CONFIG_LLC=m
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -334,6 +365,7 @@ CONFIG_LLC=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
@@ -343,6 +375,7 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -393,9 +426,7 @@ CONFIG_MTD_CFI_UTIL=y
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x8000000
-CONFIG_MTD_PHYSMAP_LEN=0x0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -406,6 +437,7 @@ CONFIG_MTD_DATAFLASH=m
CONFIG_MTD_DATAFLASH_OTP=y
CONFIG_MTD_M25P80=m
CONFIG_M25PXX_USE_FAST_READ=y
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -432,6 +464,11 @@ CONFIG_MTD_NAND_ATMEL_ECC_HW=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
CONFIG_MTD_UBI=y
@@ -460,13 +497,22 @@ CONFIG_ATMEL_PWM=m
CONFIG_ATMEL_TCLIB=y
CONFIG_ATMEL_TCB_CLKSRC=y
CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_ICS932S401 is not set
CONFIG_ATMEL_SSC=m
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
# CONFIG_C2PORT is not set
#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+
+#
# SCSI device support
#
# CONFIG_RAID_ATTRS is not set
@@ -486,10 +532,6 @@ CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -506,8 +548,10 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=m
# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_ATA_VERBOSE_ERROR=y
# CONFIG_SATA_PMP is not set
CONFIG_ATA_SFF=y
# CONFIG_SATA_MV is not set
@@ -536,12 +580,17 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
CONFIG_MACB=y
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -550,15 +599,18 @@ CONFIG_MACB=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
#
-# Wireless LAN
+# Enable WiMAX (Networking options) to see the WiMAX drivers
#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_WAN is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
@@ -600,18 +652,25 @@ CONFIG_INPUT_EVDEV=m
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
-CONFIG_KEYBOARD_GPIO=m
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_VSXXXAA is not set
CONFIG_MOUSE_GPIO=m
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
@@ -642,9 +701,11 @@ CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_PDC=y
# CONFIG_SERIAL_ATMEL_TTYAT is not set
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
@@ -653,6 +714,7 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_TCG_TPM is not set
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_HELPER_AUTO=y
CONFIG_I2C_ALGOBIT=m
@@ -664,6 +726,7 @@ CONFIG_I2C_ALGOBIT=m
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
+# CONFIG_I2C_DESIGNWARE is not set
CONFIG_I2C_GPIO=m
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
@@ -684,14 +747,6 @@ CONFIG_I2C_GPIO=m
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_EEPROM_AT24 is not set
-# CONFIG_EEPROM_LEGACY is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_TPS65010 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -706,13 +761,18 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_ATMEL=y
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
CONFIG_SPI_SPIDEV=m
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_REQUIRE_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -738,11 +798,15 @@ CONFIG_GPIO_SYSFS=y
#
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -764,26 +828,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -817,8 +872,10 @@ CONFIG_FB_ATMEL=y
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LMS283GF05 is not set
CONFIG_LCD_LTV350QV=y
# CONFIG_LCD_ILI9320 is not set
# CONFIG_LCD_TDO24M is not set
@@ -833,6 +890,7 @@ CONFIG_LCD_LTV350QV=y
# CONFIG_LOGO is not set
CONFIG_SOUND=m
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -841,16 +899,28 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_HRTIMER is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_DRIVERS=y
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
+
+#
+# Atmel devices (AVR32 and AT91)
+#
+# CONFIG_SND_ATMEL_ABDAC is not set
+# CONFIG_SND_ATMEL_AC97C is not set
CONFIG_SND_SPI=y
CONFIG_SND_AT73C213=m
CONFIG_SND_AT73C213_TARGET_BITRATE=48000
@@ -863,11 +933,10 @@ CONFIG_USB_SUPPORT=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
-# CONFIG_USB_MUSB_HDRC is not set
# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG is not set
@@ -882,18 +951,25 @@ CONFIG_USB_ATMEL_USBA=y
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C_HSOTG is not set
+# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
# CONFIG_USB_GADGET_M66592 is not set
# CONFIG_USB_GADGET_AMD5536UDC is not set
# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
# CONFIG_USB_GADGET_NET2280 is not set
# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
CONFIG_USB_ETH=m
CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH_EEM is not set
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
@@ -901,6 +977,12 @@ CONFIG_USB_G_SERIAL=m
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
@@ -917,6 +999,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
CONFIG_MMC_ATMELMCI=y
# CONFIG_MMC_ATMELMCI_DMA is not set
CONFIG_MMC_SPI=m
@@ -930,7 +1013,11 @@ CONFIG_LEDS_CLASS=m
CONFIG_LEDS_ATMEL_PWM=m
# CONFIG_LEDS_PCA9532 is not set
CONFIG_LEDS_GPIO=m
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
# LED Triggers
@@ -939,7 +1026,12 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
# CONFIG_ACCESSIBILITY is not set
CONFIG_RTC_LIB=y
CONFIG_RTC_CLASS=y
@@ -972,6 +1064,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -983,6 +1076,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_R9701 is not set
# CONFIG_RTC_DRV_RS5C348 is not set
# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
# Platform RTC drivers
@@ -1014,32 +1108,42 @@ CONFIG_DMA_ENGINE=y
# DMA Clients
#
# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_DMATEST is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
-CONFIG_STAGING_EXCLUDE_BUILD=y
#
# File systems
#
-CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
-CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_EXT4_FS=m
-CONFIG_EXT4DEV_COMPAT=y
+CONFIG_EXT4_FS=y
# CONFIG_EXT4_FS_XATTR is not set
-CONFIG_JBD=m
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
-CONFIG_JBD2=m
+CONFIG_JBD2=y
# CONFIG_JBD2_DEBUG is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1047,6 +1151,12 @@ CONFIG_INOTIFY_USER=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
@@ -1076,10 +1186,7 @@ CONFIG_TMPFS=y
# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1099,12 +1206,13 @@ CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
CONFIG_UBIFS_FS=y
-CONFIG_UBIFS_FS_XATTR=y
+# CONFIG_UBIFS_FS_XATTR is not set
# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
CONFIG_UBIFS_FS_LZO=y
CONFIG_UBIFS_FS_ZLIB=y
# CONFIG_UBIFS_FS_DEBUG is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
CONFIG_MINIX_FS=m
# CONFIG_OMFS_FS is not set
@@ -1124,7 +1232,6 @@ CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
-# CONFIG_SUNRPC_REGISTER_V4 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
@@ -1188,6 +1295,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1196,6 +1304,9 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@@ -1211,6 +1322,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
@@ -1219,6 +1331,8 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
CONFIG_FRAME_POINTER=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
@@ -1226,17 +1340,30 @@ CONFIG_FRAME_POINTER=y
# CONFIG_KPROBES_SANITY_TEST is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
-
-#
-# Tracers
-#
+# CONFIG_PAGE_POISONING is not set
+CONFIG_NOP_TRACER=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
#
@@ -1262,10 +1389,12 @@ CONFIG_CRYPTO_HASH=m
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG=m
CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=m
CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_AUTHENC=m
# CONFIG_CRYPTO_TEST is not set
@@ -1293,11 +1422,13 @@ CONFIG_CRYPTO_CBC=m
#
CONFIG_CRYPTO_HMAC=m
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -1334,6 +1465,7 @@ CONFIG_CRYPTO_DES=m
# Compression
#
CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
CONFIG_CRYPTO_LZO=y
#
@@ -1341,11 +1473,13 @@ CONFIG_CRYPTO_LZO=y
#
CONFIG_CRYPTO_ANSI_CPRNG=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_BINARY_PRINTF=y
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
CONFIG_CRC16=y
CONFIG_CRC_T10DIF=m
@@ -1357,8 +1491,9 @@ CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_GENERIC_ALLOCATOR=y
-CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/avr32/include/asm/asm-offsets.h b/arch/avr32/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/avr32/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
index d5d1d41c600..3b3159b710d 100644
--- a/arch/avr32/include/asm/elf.h
+++ b/arch/avr32/include/asm/elf.h
@@ -77,7 +77,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#endif
#define ELF_ARCH EM_AVR32
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/avr32/include/asm/hardirq.h b/arch/avr32/include/asm/hardirq.h
index 015bc75ea79..9e36e3ff77d 100644
--- a/arch/avr32/include/asm/hardirq.h
+++ b/arch/avr32/include/asm/hardirq.h
@@ -1,23 +1,6 @@
#ifndef __ASM_AVR32_HARDIRQ_H
#define __ASM_AVR32_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <asm/irq.h>
-
#ifndef __ASSEMBLY__
-
-#include <linux/cache.h>
-
-/* entry.S is sensitive to the offsets of these fields */
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-void ack_bad_irq(unsigned int irq);
-
-/* Standard mappings for irq_cpustat_t above */
-#include <linux/irq_cpustat.h>
-
+#include <asm-generic/hardirq.h>
#endif /* __ASSEMBLY__ */
-
#endif /* __ASM_AVR32_HARDIRQ_H */
diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
index 483d666c27c..66a19726663 100644
--- a/arch/avr32/include/asm/syscalls.h
+++ b/arch/avr32/include/asm/syscalls.h
@@ -29,10 +29,6 @@ asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
struct pt_regs *);
asmlinkage int sys_rt_sigreturn(struct pt_regs *);
-/* kernel/sys_avr32.c */
-asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long, off_t);
-
/* mm/cache.c */
asmlinkage int sys_cacheflush(int, void __user *, size_t);
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
index 9f572229d31..9604f7758f9 100644
--- a/arch/avr32/kernel/irq.c
+++ b/arch/avr32/kernel/irq.c
@@ -16,15 +16,6 @@
#include <linux/seq_file.h>
#include <linux/sysdev.h>
-/*
- * 'what should we do if we get a hw irq event on an illegal vector'.
- * each architecture has to answer this themselves.
- */
-void ack_bad_irq(unsigned int irq)
-{
- printk("unexpected IRQ %u\n", irq);
-}
-
/* May be overridden by platform code */
int __weak nmi_enable(void)
{
@@ -51,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -66,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c
index 5d2daeaf356..459349b5ed5 100644
--- a/arch/avr32/kernel/sys_avr32.c
+++ b/arch/avr32/kernel/sys_avr32.c
@@ -5,39 +5,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/mm.h>
#include <linux/unistd.h>
-#include <asm/mman.h>
-#include <asm/uaccess.h>
-#include <asm/syscalls.h>
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t offset)
-{
- int error = -EBADF;
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- return error;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, offset);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
- return error;
-}
-
int kernel_execve(const char *file, char **argv, char **envp)
{
register long scno asm("r8") = __NR_execve;
diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
index f7244cd02fb..0447a3e2ba6 100644
--- a/arch/avr32/kernel/syscall-stubs.S
+++ b/arch/avr32/kernel/syscall-stubs.S
@@ -61,7 +61,7 @@ __sys_execve:
__sys_mmap2:
pushm lr
st.w --sp, ARG6
- call sys_mmap2
+ call sys_mmap_pgoff
sub sp, -4
popm pc
diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S
index c4b56654349..9cd2bd91d64 100644
--- a/arch/avr32/kernel/vmlinux.lds.S
+++ b/arch/avr32/kernel/vmlinux.lds.S
@@ -39,30 +39,10 @@ SECTIONS
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
- INIT_DATA
- . = ALIGN(16);
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- . = ALIGN(4);
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- __security_initcall_start = .;
- *(.security_initcall.init)
- __security_initcall_end = .;
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(32);
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
-#endif
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
}
+ INIT_DATA_SECTION(16)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_evba = .;
@@ -78,34 +58,16 @@ SECTIONS
_etext = .;
} = 0xd703d703
- . = ALIGN(4);
- __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
-
+ EXCEPTION_TABLE(4)
RODATA
- . = ALIGN(THREAD_SIZE);
-
.data : AT(ADDR(.data) - LOAD_OFFSET) {
_data = .;
_sdata = .;
- /*
- * First, the init task union, aligned to an 8K boundary.
- */
- *(.data.init_task)
- /* Then, the page-aligned data */
- . = ALIGN(PAGE_SIZE);
- *(.data.page_aligned)
-
- /* Then, the cacheline aligned data */
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.cacheline_aligned)
-
- /* And the rest... */
+ INIT_TASK_DATA(THREAD_SIZE)
+ PAGE_ALIGNED_DATA(PAGE_SIZE);
+ CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
*(.data.rel*)
DATA_DATA
CONSTRUCTORS
@@ -113,16 +75,8 @@ SECTIONS
_edata = .;
}
-
- . = ALIGN(8);
- .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
- __bss_start = .;
- *(.bss)
- *(COMMON)
- . = ALIGN(8);
- __bss_stop = .;
- _end = .;
- }
+ BSS_SECTION(0, 8, 8)
+ _end = .;
DWARF_DEBUG
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index eb9d4dc2e86..1aa1ea5e921 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -15,6 +15,8 @@
#include <linux/gpio.h>
#include <linux/spi/spi.h>
#include <linux/usb/atmel_usba_udc.h>
+
+#include <mach/atmel-mci.h>
#include <linux/atmel-mci.h>
#include <asm/io.h>
@@ -1181,19 +1183,32 @@ static struct resource atmel_spi1_resource[] = {
DEFINE_DEV(atmel_spi, 1);
DEV_CLK(spi_clk, atmel_spi1, pba, 1);
-static void __init
-at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b,
- unsigned int n, const u8 *pins)
+void __init
+at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n)
{
+ /*
+ * Manage the chipselects as GPIOs, normally using the same pins
+ * the SPI controller expects; but boards can use other pins.
+ */
+ static u8 __initdata spi_pins[][4] = {
+ { GPIO_PIN_PA(3), GPIO_PIN_PA(4),
+ GPIO_PIN_PA(5), GPIO_PIN_PA(20) },
+ { GPIO_PIN_PB(2), GPIO_PIN_PB(3),
+ GPIO_PIN_PB(4), GPIO_PIN_PA(27) },
+ };
unsigned int pin, mode;
+ /* There are only 2 SPI controllers */
+ if (bus_num > 1)
+ return;
+
for (; n; n--, b++) {
b->bus_num = bus_num;
if (b->chip_select >= 4)
continue;
pin = (unsigned)b->controller_data;
if (!pin) {
- pin = pins[b->chip_select];
+ pin = spi_pins[bus_num][b->chip_select];
b->controller_data = (void *)pin;
}
mode = AT32_GPIOF_OUTPUT;
@@ -1206,16 +1221,6 @@ at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b,
struct platform_device *__init
at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
{
- /*
- * Manage the chipselects as GPIOs, normally using the same pins
- * the SPI controller expects; but boards can use other pins.
- */
- static u8 __initdata spi0_pins[] =
- { GPIO_PIN_PA(3), GPIO_PIN_PA(4),
- GPIO_PIN_PA(5), GPIO_PIN_PA(20), };
- static u8 __initdata spi1_pins[] =
- { GPIO_PIN_PB(2), GPIO_PIN_PB(3),
- GPIO_PIN_PB(4), GPIO_PIN_PA(27), };
struct platform_device *pdev;
u32 pin_mask;
@@ -1228,7 +1233,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
select_peripheral(PIOA, (1 << 0), PERIPH_A, AT32_GPIOF_PULLUP);
select_peripheral(PIOA, pin_mask, PERIPH_A, 0);
- at32_spi_setup_slaves(0, b, n, spi0_pins);
+ at32_spi_setup_slaves(0, b, n);
break;
case 1:
@@ -1239,7 +1244,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n)
select_peripheral(PIOB, (1 << 0), PERIPH_B, AT32_GPIOF_PULLUP);
select_peripheral(PIOB, pin_mask, PERIPH_B, 0);
- at32_spi_setup_slaves(1, b, n, spi1_pins);
+ at32_spi_setup_slaves(1, b, n);
break;
default:
@@ -1320,7 +1325,7 @@ struct platform_device *__init
at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
{
struct platform_device *pdev;
- struct dw_dma_slave *dws = &data->dma_slave;
+ struct mci_dma_slave *slave;
u32 pioa_mask;
u32 piob_mask;
@@ -1339,13 +1344,17 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
ARRAY_SIZE(atmel_mci0_resource)))
goto fail;
- dws->dma_dev = &dw_dmac0_device.dev;
- dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
- dws->cfg_hi = (DWC_CFGH_SRC_PER(0)
+ slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
+
+ slave->sdata.dma_dev = &dw_dmac0_device.dev;
+ slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
+ slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0)
| DWC_CFGH_DST_PER(1));
- dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL
+ slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL
| DWC_CFGL_HS_SRC_POL);
+ data->dma_slave = slave;
+
if (platform_device_add_data(pdev, data,
sizeof(struct mci_platform_data)))
goto fail;
@@ -1411,6 +1420,8 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
return pdev;
fail:
+ data->dma_slave = NULL;
+ kfree(slave);
platform_device_put(pdev);
return NULL;
}
diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
new file mode 100644
index 00000000000..a9b38967f70
--- /dev/null
+++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
@@ -0,0 +1,24 @@
+#ifndef __MACH_ATMEL_MCI_H
+#define __MACH_ATMEL_MCI_H
+
+#include <linux/dw_dmac.h>
+
+/**
+ * struct mci_dma_data - DMA data for MCI interface
+ */
+struct mci_dma_data {
+ struct dw_dma_slave sdata;
+};
+
+/* accessor macros */
+#define slave_data_ptr(s) (&(s)->sdata)
+#define find_slave_dev(s) ((s)->sdata.dma_dev)
+
+#define setup_dma_addr(s, t, r) do { \
+ if (s) { \
+ (s)->sdata.tx_reg = (t); \
+ (s)->sdata.rx_reg = (r); \
+ } \
+} while (0)
+
+#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index ddedb471f33..c7f25bb1d06 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -49,6 +49,7 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data);
struct spi_board_info;
struct platform_device *
at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n);
+void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n);
struct atmel_lcdfb_info;
struct platform_device *
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index ae6a60f1012..53c1e1d45c6 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -5,6 +5,10 @@
mainmenu "Blackfin Kernel Configuration"
+config SYMBOL_PREFIX
+ string
+ default "_"
+
config MMU
def_bool n
@@ -28,6 +32,9 @@ config BLACKFIN
select HAVE_OPROFILE
select ARCH_WANT_OPTIONAL_GPIOLIB
+config GENERIC_CSUM
+ def_bool y
+
config GENERIC_BUG
def_bool y
depends on BUG
@@ -173,7 +180,7 @@ config BF539
help
BF539 Processor Support.
-config BF542
+config BF542_std
bool "BF542"
help
BF542 Processor Support.
@@ -183,7 +190,7 @@ config BF542M
help
BF542 Processor Support.
-config BF544
+config BF544_std
bool "BF544"
help
BF544 Processor Support.
@@ -193,7 +200,7 @@ config BF544M
help
BF544 Processor Support.
-config BF547
+config BF547_std
bool "BF547"
help
BF547 Processor Support.
@@ -203,7 +210,7 @@ config BF547M
help
BF547 Processor Support.
-config BF548
+config BF548_std
bool "BF548"
help
BF548 Processor Support.
@@ -213,7 +220,7 @@ config BF548M
help
BF548 Processor Support.
-config BF549
+config BF549_std
bool "BF549"
help
BF549 Processor Support.
@@ -307,31 +314,11 @@ config BF_REV_NONE
endchoice
-config BF51x
- bool
- depends on (BF512 || BF514 || BF516 || BF518)
- default y
-
-config BF52x
- bool
- depends on (BF522 || BF523 || BF524 || BF525 || BF526 || BF527)
- default y
-
config BF53x
bool
depends on (BF531 || BF532 || BF533 || BF534 || BF536 || BF537)
default y
-config BF54xM
- bool
- depends on (BF542M || BF544M || BF547M || BF548M || BF549M)
- default y
-
-config BF54x
- bool
- depends on (BF542 || BF544 || BF547 || BF548 || BF549 || BF54xM)
- default y
-
config MEM_GENERIC_BOARD
bool
depends on GENERIC_BOARD
@@ -913,6 +900,12 @@ config DMA_UNCACHED_2M
bool "Enable 2M DMA region"
config DMA_UNCACHED_1M
bool "Enable 1M DMA region"
+config DMA_UNCACHED_512K
+ bool "Enable 512K DMA region"
+config DMA_UNCACHED_256K
+ bool "Enable 256K DMA region"
+config DMA_UNCACHED_128K
+ bool "Enable 128K DMA region"
config DMA_UNCACHED_NONE
bool "Disable DMA region"
endchoice
@@ -1274,6 +1267,8 @@ source "net/Kconfig"
source "drivers/Kconfig"
+source "drivers/firmware/Kconfig"
+
source "fs/Kconfig"
source "arch/blackfin/Kconfig.debug"
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index f063b772934..d4c7177e765 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -16,6 +16,7 @@ GZFLAGS := -9
KBUILD_CFLAGS += $(call cc-option,-mno-fdpic)
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
CFLAGS_MODULE += -mlong-calls
+LDFLAGS_MODULE += -m elf32bfin
KALLSYMS += --symbol-prefix=_
KBUILD_DEFCONFIG := BF537-STAMP_defconfig
@@ -137,7 +138,7 @@ archclean:
INSTALL_PATH ?= /tftpboot
boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage vmImage.bz2 vmImage.gz vmImage.lzma
+BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma
PHONY += $(BOOT_TARGETS) install
KBUILD_IMAGE := $(boot)/vmImage
@@ -151,6 +152,7 @@ install:
define archhelp
echo '* vmImage - Alias to selected kernel format (vmImage.gz by default)'
+ echo ' vmImage.bin - Uncompressed Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bin)'
echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)'
echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)'
echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)'
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index fd9ccc5fea1..e9c48c6f8c1 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -8,7 +8,7 @@
MKIMAGE := $(srctree)/scripts/mkuboot.sh
-targets := vmImage vmImage.bz2 vmImage.gz vmImage.lzma
+targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma
extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma
quiet_cmd_uimage = UIMAGE $@
@@ -29,6 +29,9 @@ $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzma)
+$(obj)/vmImage.bin: $(obj)/vmlinux.bin
+ $(call if_changed,uimage,none)
+
$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
$(call if_changed,uimage,bzip2)
@@ -38,6 +41,7 @@ $(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
$(call if_changed,uimage,lzma)
+suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZMA) := lzma
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index 9905b26009e..e3155941981 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -67,6 +67,7 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -316,6 +317,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
@@ -438,17 +440,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
-CONFIG_NET_DSA=y
-# CONFIG_NET_DSA_TAG_DSA is not set
-# CONFIG_NET_DSA_TAG_EDSA is not set
-# CONFIG_NET_DSA_TAG_TRAILER is not set
-CONFIG_NET_DSA_TAG_STPID=y
-# CONFIG_NET_DSA_MV88E6XXX is not set
-# CONFIG_NET_DSA_MV88E6060 is not set
-# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
-# CONFIG_NET_DSA_MV88E6131 is not set
-# CONFIG_NET_DSA_MV88E6123_61_65 is not set
-CONFIG_NET_DSA_KSZ8893M=y
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 9dc68208802..075e0fdcb39 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -67,6 +67,7 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -321,6 +322,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 77e35d4baf5..6d1a623fb14 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -67,6 +67,7 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -321,6 +322,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=y
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index 4c044805cb5..50f9a23ccdb 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -67,6 +67,7 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -283,6 +284,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index c99bbcd09a6..6c60c828631 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -67,6 +67,7 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -283,6 +284,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 092ffda80e6..2908595b67c 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -67,6 +67,7 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -290,6 +291,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
@@ -704,10 +706,7 @@ CONFIG_CONFIG_INPUT_PCF8574=m
#
# Hardware I/O ports
#
-CONFIG_SERIO=y
-CONFIG_SERIO_SERPORT=y
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO is not set
# CONFIG_GAMEPORT is not set
#
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index fa698a89f6f..09ea2499555 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -67,6 +67,7 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -301,6 +302,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index f773ad1155d..eb3e98b6f3f 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -1,22 +1,29 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28.10
-# Thu May 21 05:50:01 2009
+# Linux kernel version: 2.6.31.5
+# Mon Nov 2 22:02:56 2009
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -26,22 +33,40 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -62,17 +87,28 @@ CONFIG_EPOLL=y
# CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set
# CONFIG_AIO is not set
+
+#
+# Performance Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
-CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -80,11 +116,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -94,13 +127,12 @@ CONFIG_BLOCK=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_IOSCHED_CFQ=y
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
-CONFIG_CLASSIC_RCU=y
# CONFIG_PREEMPT_NONE is not set
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
@@ -137,7 +169,7 @@ CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_BF544M is not set
# CONFIG_BF547 is not set
# CONFIG_BF547M is not set
-CONFIG_BF548=y
+CONFIG_BF548_std=y
# CONFIG_BF548M is not set
# CONFIG_BF549 is not set
# CONFIG_BF549M is not set
@@ -195,7 +227,7 @@ CONFIG_BFIN548_EZKIT=y
#
# BF548 Specific Configuration
#
-# CONFIG_DEB_DMA_URGENT is not set
+CONFIG_DEB_DMA_URGENT=y
# CONFIG_BF548_ATAPI_ALTERNATIVE_PORT is not set
#
@@ -352,10 +384,11 @@ CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
# CONFIG_DMA_UNCACHED_4M is not set
CONFIG_DMA_UNCACHED_2M=y
@@ -366,14 +399,13 @@ CONFIG_DMA_UNCACHED_2M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+# CONFIG_BFIN_L2_ICACHEABLE is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
-CONFIG_BFIN_EXTMEM_WRITEBACK=y
-# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
-# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_EXTMEM_WRITEBACK is not set
+CONFIG_BFIN_EXTMEM_WRITETHROUGH=y
# CONFIG_BFIN_L2_DCACHEABLE is not set
#
@@ -382,7 +414,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -441,11 +473,6 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
-# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -469,13 +496,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
-# CONFIG_NETLABEL is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
@@ -493,7 +518,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
@@ -548,14 +576,10 @@ CONFIG_SIR_BFIN_DMA=y
# CONFIG_MCS_FIR is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
-CONFIG_WIRELESS=y
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_OLD_REGULATORY=y
+# CONFIG_WIRELESS is not set
CONFIG_WIRELESS_EXT=y
-CONFIG_WIRELESS_EXT_SYSFS=y
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+CONFIG_LIB80211=m
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -578,6 +602,7 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -653,7 +678,6 @@ CONFIG_MTD_NAND=y
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
# CONFIG_MTD_NAND_ECC_SMC is not set
# CONFIG_MTD_NAND_MUSEUM_IDS is not set
-# CONFIG_MTD_NAND_BFIN is not set
CONFIG_MTD_NAND_IDS=y
CONFIG_MTD_NAND_BF5XX=y
CONFIG_MTD_NAND_BF5XX_HWECC=y
@@ -665,6 +689,11 @@ CONFIG_MTD_NAND_BF5XX_HWECC=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -682,10 +711,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_AD525X_DPOT is not set
# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -709,10 +748,6 @@ CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
@@ -729,6 +764,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
CONFIG_ATA=y
# CONFIG_ATA_NONSTANDARD is not set
CONFIG_SATA_PMP=y
@@ -744,13 +780,34 @@ CONFIG_NETDEVICES=y
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_VETH is not set
-# CONFIG_PHYLIB is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_SMC91X is not set
-CONFIG_SMSC911X=y
# CONFIG_DM9000 is not set
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+CONFIG_SMSC911X=y
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -759,6 +816,8 @@ CONFIG_SMSC911X=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
@@ -771,13 +830,16 @@ CONFIG_LIBERTAS=m
# CONFIG_LIBERTAS_USB is not set
CONFIG_LIBERTAS_SDIO=m
CONFIG_POWEROF2_BLOCKSIZE_ONLY=y
+# CONFIG_LIBERTAS_SPI is not set
# CONFIG_LIBERTAS_DEBUG is not set
# CONFIG_USB_ZD1201 is not set
-# CONFIG_USB_NET_RNDIS_WLAN is not set
-# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_HOSTAP is not set
#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
# USB Network Adapters
#
# CONFIG_USB_CATC is not set
@@ -813,28 +875,31 @@ CONFIG_INPUT_EVBUG=m
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_BFIN=y
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-# CONFIG_KEYBOARD_STOWAWAY is not set
# CONFIG_KEYBOARD_GPIO is not set
-CONFIG_KEYBOARD_BFIN=y
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_NEWTON is not set
# CONFIG_KEYBOARD_OPENCORES is not set
-# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
CONFIG_TOUCHSCREEN_AD7877=m
# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
# CONFIG_TOUCHSCREEN_AD7879 is not set
-# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
@@ -844,6 +909,8 @@ CONFIG_TOUCHSCREEN_AD7877=m
# CONFIG_TOUCHSCREEN_WM97XX is not set
# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
CONFIG_INPUT_MISC=y
# CONFIG_INPUT_ATI_REMOTE is not set
# CONFIG_INPUT_ATI_REMOTE2 is not set
@@ -852,7 +919,11 @@ CONFIG_INPUT_MISC=y
# CONFIG_INPUT_YEALINK is not set
# CONFIG_INPUT_CM109 is not set
# CONFIG_INPUT_UINPUT is not set
-# CONFIG_CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_BFIN_ROTARY is not set
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_PCF8574 is not set
#
# Hardware I/O ports
@@ -863,16 +934,13 @@ CONFIG_INPUT_MISC=y
#
# Character devices
#
-# CONFIG_AD9960 is not set
CONFIG_BFIN_DMA_INTERFACE=m
# CONFIG_BFIN_PPI is not set
# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
# CONFIG_BFIN_SPI_ADC is not set
CONFIG_BFIN_SPORT=m
-# CONFIG_BFIN_TIMER_LATENCY is not set
# CONFIG_BFIN_TWI_LCD is not set
-CONFIG_SIMPLE_GPIO=m
CONFIG_VT=y
CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
@@ -890,6 +958,7 @@ CONFIG_BFIN_JTAG_COMM=m
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
CONFIG_SERIAL_BFIN_DMA=y
@@ -903,6 +972,7 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_BFIN_OTP=y
# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
@@ -951,14 +1021,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_EEPROM_AT24 is not set
-# CONFIG_SENSORS_AD5252 is not set
-# CONFIG_EEPROM_LEGACY is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
-# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -975,13 +1040,18 @@ CONFIG_SPI_BFIN=y
# CONFIG_SPI_BFIN_LOCK is not set
# CONFIG_SPI_BFIN_SPORT is not set
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -997,6 +1067,7 @@ CONFIG_GPIO_SYSFS=y
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -1038,28 +1109,19 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -1096,6 +1158,7 @@ CONFIG_FB_BF54X_LQ043=y
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@@ -1132,6 +1195,7 @@ CONFIG_SOUND_OSS_CORE=y
CONFIG_SND=y
CONFIG_SND_TIMER=y
CONFIG_SND_PCM=y
+CONFIG_SND_JACK=y
# CONFIG_SND_SEQUENCER is not set
CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
@@ -1142,6 +1206,11 @@ CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_DRIVERS=y
# CONFIG_SND_DUMMY is not set
# CONFIG_SND_MTPAV is not set
@@ -1152,7 +1221,6 @@ CONFIG_SND_SPI=y
#
# ALSA Blackfin devices
#
-# CONFIG_SND_BLACKFIN_AD1836 is not set
# CONFIG_SND_BFIN_AD73322 is not set
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
@@ -1160,15 +1228,17 @@ CONFIG_SND_USB=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_AC97_BUS=y
# CONFIG_SND_BF5XX_I2S is not set
+# CONFIG_SND_BF5XX_TDM is not set
CONFIG_SND_BF5XX_AC97=y
CONFIG_SND_BF5XX_MMAP_SUPPORT=y
# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set
+CONFIG_SND_BF5XX_HAVE_COLD_RESET=y
+CONFIG_SND_BF5XX_RESET_GPIO_NUM=19
+CONFIG_SND_BF5XX_SOC_AD1980=y
CONFIG_SND_BF5XX_SOC_SPORT=y
CONFIG_SND_BF5XX_SOC_AC97=y
-CONFIG_SND_BF5XX_SOC_AD1980=y
CONFIG_SND_BF5XX_SPORT_NUM=0
-CONFIG_SND_BF5XX_HAVE_COLD_RESET=y
-CONFIG_SND_BF5XX_RESET_GPIO_NUM=19
+CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_ALL_CODECS is not set
CONFIG_SND_SOC_AD1980=y
# CONFIG_SOUND_PRIME is not set
@@ -1188,30 +1258,34 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
-CONFIG_HID_COMPAT=y
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
-CONFIG_HID_BRIGHT=y
CONFIG_HID_CHERRY=y
CONFIG_HID_CHICONY=y
CONFIG_HID_CYPRESS=y
-CONFIG_HID_DELL=y
+# CONFIG_HID_DRAGONRISE is not set
CONFIG_HID_EZKEY=y
+# CONFIG_HID_KYE is not set
CONFIG_HID_GYRATION=y
+# CONFIG_HID_KENSINGTON is not set
CONFIG_HID_LOGITECH=y
# CONFIG_LOGITECH_FF is not set
# CONFIG_LOGIRUMBLEPAD2_FF is not set
CONFIG_HID_MICROSOFT=y
CONFIG_HID_MONTEREY=y
+# CONFIG_HID_NTRIG is not set
CONFIG_HID_PANTHERLORD=y
# CONFIG_PANTHERLORD_FF is not set
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
-CONFIG_THRUSTMASTER_FF=m
-CONFIG_ZEROPLUS_FF=m
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB_ARCH_HAS_OHCI is not set
@@ -1237,6 +1311,7 @@ CONFIG_USB_MON=y
# USB Host Controller Drivers
#
# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
# CONFIG_USB_ISP1362_HCD is not set
@@ -1267,18 +1342,17 @@ CONFIG_USB_INVENTRA_DMA=y
# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
-# see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
-# CONFIG_USB_STORAGE_DPCM is not set
# CONFIG_USB_STORAGE_USBAT is not set
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_SDDR55 is not set
@@ -1314,7 +1388,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGET is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_FTDI_ELAN is not set
# CONFIG_USB_APPLEDISPLAY is not set
@@ -1326,6 +1399,13 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+CONFIG_NOP_USB_XCEIV=y
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
@@ -1380,6 +1460,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
# SPI RTC drivers
@@ -1411,10 +1492,21 @@ CONFIG_RTC_INTF_DEV=y
#
CONFIG_RTC_DRV_BFIN=y
# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
+# Firmware Drivers
+#
+# CONFIG_FIRMWARE_MEMMAP is not set
+# CONFIG_SIGMA is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
@@ -1427,9 +1519,11 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1439,6 +1533,11 @@ CONFIG_INOTIFY_USER=y
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
CONFIG_ISO9660_FS=m
@@ -1467,10 +1566,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1489,17 +1585,8 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
-CONFIG_YAFFS_FS=m
-CONFIG_YAFFS_YAFFS1=y
-# CONFIG_YAFFS_9BYTE_TAGS is not set
-# CONFIG_YAFFS_DOES_ECC is not set
-CONFIG_YAFFS_YAFFS2=y
-CONFIG_YAFFS_AUTO_YAFFS2=y
-# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
-# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
-CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -1508,6 +1595,7 @@ CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
@@ -1522,7 +1610,6 @@ CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_REGISTER_V4 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@@ -1596,11 +1683,15 @@ CONFIG_FRAME_WARN=1024
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@@ -1608,16 +1699,21 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_NOMMU_REGIONS is not set
# CONFIG_DEBUG_WRITECOUNT is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
@@ -1625,17 +1721,16 @@ CONFIG_DEBUG_INFO=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
-
-#
-# Tracers
-#
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_BOOT_TRACER is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
+# CONFIG_KMEMCHECK is not set
# CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_VERBOSE=y
@@ -1657,17 +1752,15 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
#
# CONFIG_KEYS is not set
-CONFIG_SECURITY=y
+# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_NETWORK is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-# CONFIG_SECURITY_ROOTPLUG is not set
-CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
CONFIG_CRYPTO=y
#
@@ -1746,6 +1839,7 @@ CONFIG_CRYPTO=y
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
#
@@ -1753,11 +1847,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
# CONFIG_CRC_T10DIF is not set
@@ -1767,6 +1863,8 @@ CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
new file mode 100644
index 00000000000..b9b0f93d0bd
--- /dev/null
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -0,0 +1,1643 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.31.4
+# Sat Oct 24 12:15:32 2009
+#
+# CONFIG_MMU is not set
+# CONFIG_FPU is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+CONFIG_BLACKFIN=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_BUG=y
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_FORCE_MAX_ZONEORDER=14
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+# CONFIG_FUTEX is not set
+CONFIG_EPOLL=y
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+
+#
+# Performance Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
+
+#
+# Blackfin Processor Options
+#
+
+#
+# Processor and Board Settings
+#
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
+# CONFIG_BF522 is not set
+# CONFIG_BF523 is not set
+# CONFIG_BF524 is not set
+# CONFIG_BF525 is not set
+# CONFIG_BF526 is not set
+# CONFIG_BF527 is not set
+# CONFIG_BF531 is not set
+# CONFIG_BF532 is not set
+# CONFIG_BF533 is not set
+# CONFIG_BF534 is not set
+# CONFIG_BF536 is not set
+# CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
+# CONFIG_BF542 is not set
+# CONFIG_BF542M is not set
+# CONFIG_BF544 is not set
+# CONFIG_BF544M is not set
+# CONFIG_BF547 is not set
+# CONFIG_BF547M is not set
+# CONFIG_BF548 is not set
+# CONFIG_BF548M is not set
+# CONFIG_BF549 is not set
+# CONFIG_BF549M is not set
+CONFIG_BF561=y
+# CONFIG_SMP is not set
+CONFIG_BF_REV_MIN=3
+CONFIG_BF_REV_MAX=5
+# CONFIG_BF_REV_0_0 is not set
+# CONFIG_BF_REV_0_1 is not set
+# CONFIG_BF_REV_0_2 is not set
+# CONFIG_BF_REV_0_3 is not set
+# CONFIG_BF_REV_0_4 is not set
+CONFIG_BF_REV_0_5=y
+# CONFIG_BF_REV_0_6 is not set
+# CONFIG_BF_REV_ANY is not set
+# CONFIG_BF_REV_NONE is not set
+CONFIG_IRQ_PLL_WAKEUP=7
+CONFIG_IRQ_SPORT0_ERROR=7
+CONFIG_IRQ_SPORT1_ERROR=7
+CONFIG_IRQ_TIMER0=10
+CONFIG_IRQ_TIMER1=10
+CONFIG_IRQ_TIMER2=10
+CONFIG_IRQ_TIMER3=10
+CONFIG_IRQ_TIMER4=10
+CONFIG_IRQ_TIMER5=10
+CONFIG_IRQ_TIMER6=10
+CONFIG_IRQ_TIMER7=10
+CONFIG_IRQ_SPI_ERROR=7
+# CONFIG_BFIN561_EZKIT is not set
+# CONFIG_BFIN561_TEPLA is not set
+# CONFIG_BFIN561_BLUETECHNIX_CM is not set
+CONFIG_BFIN561_ACVILON=y
+
+#
+# BF561 Specific Configuration
+#
+
+#
+# Core B Support
+#
+# CONFIG_BF561_COREB is not set
+
+#
+# Interrupt Priority Assignment
+#
+
+#
+# Priority
+#
+CONFIG_IRQ_DMA1_ERROR=7
+CONFIG_IRQ_DMA2_ERROR=7
+CONFIG_IRQ_IMDMA_ERROR=7
+CONFIG_IRQ_PPI0_ERROR=7
+CONFIG_IRQ_PPI1_ERROR=7
+CONFIG_IRQ_UART_ERROR=7
+CONFIG_IRQ_RESERVED_ERROR=7
+CONFIG_IRQ_DMA1_0=8
+CONFIG_IRQ_DMA1_1=8
+CONFIG_IRQ_DMA1_2=8
+CONFIG_IRQ_DMA1_3=8
+CONFIG_IRQ_DMA1_4=8
+CONFIG_IRQ_DMA1_5=8
+CONFIG_IRQ_DMA1_6=8
+CONFIG_IRQ_DMA1_7=8
+CONFIG_IRQ_DMA1_8=8
+CONFIG_IRQ_DMA1_9=8
+CONFIG_IRQ_DMA1_10=8
+CONFIG_IRQ_DMA1_11=8
+CONFIG_IRQ_DMA2_0=9
+CONFIG_IRQ_DMA2_1=9
+CONFIG_IRQ_DMA2_2=9
+CONFIG_IRQ_DMA2_3=9
+CONFIG_IRQ_DMA2_4=9
+CONFIG_IRQ_DMA2_5=9
+CONFIG_IRQ_DMA2_6=9
+CONFIG_IRQ_DMA2_7=9
+CONFIG_IRQ_DMA2_8=9
+CONFIG_IRQ_DMA2_9=9
+CONFIG_IRQ_DMA2_10=9
+CONFIG_IRQ_DMA2_11=9
+CONFIG_IRQ_TIMER8=10
+CONFIG_IRQ_TIMER9=10
+CONFIG_IRQ_TIMER10=10
+CONFIG_IRQ_TIMER11=10
+CONFIG_IRQ_PROG0_INTA=11
+CONFIG_IRQ_PROG0_INTB=11
+CONFIG_IRQ_PROG1_INTA=11
+CONFIG_IRQ_PROG1_INTB=11
+CONFIG_IRQ_PROG2_INTA=11
+CONFIG_IRQ_PROG2_INTB=11
+CONFIG_IRQ_DMA1_WRRD0=8
+CONFIG_IRQ_DMA1_WRRD1=8
+CONFIG_IRQ_DMA2_WRRD0=9
+CONFIG_IRQ_DMA2_WRRD1=9
+CONFIG_IRQ_IMDMA_WRRD0=12
+CONFIG_IRQ_IMDMA_WRRD1=12
+CONFIG_IRQ_WDTIMER=13
+
+#
+# Board customizations
+#
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
+
+#
+# Clock/PLL Setup
+#
+CONFIG_CLKIN_HZ=12000000
+# CONFIG_BFIN_KERNEL_CLOCK is not set
+CONFIG_MAX_VCO_HZ=600000000
+CONFIG_MIN_VCO_HZ=50000000
+CONFIG_MAX_SCLK_HZ=133333333
+CONFIG_MIN_SCLK_HZ=27000000
+
+#
+# Kernel Timer/Scheduler
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_SCHED_HRTICK=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_TICKSOURCE_GPTMR0 is not set
+CONFIG_TICKSOURCE_CORETMR=y
+CONFIG_CYCLES_CLOCKSOURCE=y
+# CONFIG_GPTMR0_CLOCKSOURCE is not set
+CONFIG_TICK_ONESHOT=y
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+
+#
+# Misc
+#
+CONFIG_BFIN_SCRATCH_REG_RETN=y
+# CONFIG_BFIN_SCRATCH_REG_RETE is not set
+# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
+
+#
+# Blackfin Kernel Optimizations
+#
+
+#
+# Memory Optimizations
+#
+CONFIG_I_ENTRY_L1=y
+CONFIG_EXCPT_IRQ_SYSC_L1=y
+CONFIG_DO_IRQ_L1=y
+CONFIG_CORE_TIMER_IRQ_L1=y
+CONFIG_IDLE_L1=y
+CONFIG_SCHEDULE_L1=y
+CONFIG_ARITHMETIC_OPS_L1=y
+CONFIG_ACCESS_OK_L1=y
+CONFIG_MEMSET_L1=y
+CONFIG_MEMCPY_L1=y
+CONFIG_SYS_BFIN_SPINLOCK_L1=y
+# CONFIG_IP_CHECKSUM_L1 is not set
+CONFIG_CACHELINE_ALIGNED_L1=y
+# CONFIG_SYSCALL_TAB_L1 is not set
+# CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
+CONFIG_RAMKERNEL=y
+# CONFIG_ROMKERNEL is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_VIRT_TO_BUS=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+CONFIG_BFIN_GPTIMERS=y
+CONFIG_DMA_UNCACHED_4M=y
+# CONFIG_DMA_UNCACHED_2M is not set
+# CONFIG_DMA_UNCACHED_1M is not set
+# CONFIG_DMA_UNCACHED_NONE is not set
+
+#
+# Cache Support
+#
+CONFIG_BFIN_ICACHE=y
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+CONFIG_BFIN_DCACHE=y
+# CONFIG_BFIN_DCACHE_BANKA is not set
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
+# CONFIG_MPU is not set
+
+#
+# Asynchronous Memory Configuration
+#
+
+#
+# EBIU_AMGCTL Global Control
+#
+CONFIG_C_AMCKEN=y
+CONFIG_C_CDPRIO=y
+CONFIG_C_B0PEN=y
+CONFIG_C_B1PEN=y
+CONFIG_C_B2PEN=y
+# CONFIG_C_B3PEN is not set
+# CONFIG_C_AMBEN is not set
+# CONFIG_C_AMBEN_B0 is not set
+# CONFIG_C_AMBEN_B0_B1 is not set
+# CONFIG_C_AMBEN_B0_B1_B2 is not set
+CONFIG_C_AMBEN_ALL=y
+
+#
+# EBIU_AMBCTL Control
+#
+CONFIG_BANK_0=0x99b2
+CONFIG_BANK_1=0x3350
+CONFIG_BANK_2=0x7BB0
+CONFIG_BANK_3=0xAAC2
+
+#
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF_FDPIC=y
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETLABEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_RAM=y
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_UCLINUX is not set
+CONFIG_MTD_PLATRAM=y
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+CONFIG_MTD_PHRAM=y
+# CONFIG_MTD_MTDRAM is not set
+CONFIG_MTD_BLOCK2MTD=y
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_VERIFY_WRITE=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=y
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+CONFIG_SMSC911X=y
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_BFIN_DMA_INTERFACE is not set
+# CONFIG_BFIN_PPI is not set
+# CONFIG_BFIN_PPIFCD is not set
+CONFIG_BFIN_SIMPLE_TIMER=y
+# CONFIG_BFIN_SPI_ADC is not set
+# CONFIG_BFIN_SPORT is not set
+# CONFIG_BFIN_TWI_LCD is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+# CONFIG_SERIAL_BFIN_DMA is not set
+CONFIG_SERIAL_BFIN_PIO=y
+CONFIG_SERIAL_BFIN_UART0=y
+# CONFIG_BFIN_UART0_CTSRTS is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_BFIN_SPORT is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# CAN, the car bus and industrial fieldbus
+#
+# CONFIG_CAN4LINUX is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOPCA=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+CONFIG_I2C_PCA_PLATFORM=y
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BFIN_SPORT is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+CONFIG_GPIO_PCF857X=y
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_BFIN_WDT=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+# CONFIG_SND_SEQUENCER is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+# CONFIG_SND_HRTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_SPI=y
+
+#
+# ALSA Blackfin devices
+#
+# CONFIG_SND_BFIN_AD73322 is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_BF5XX_I2S=y
+# CONFIG_SND_BF5XX_SOC_SSM2602 is not set
+# CONFIG_SND_BF5XX_SOC_AD73311 is not set
+# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set
+# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set
+# CONFIG_SND_BF5XX_TDM is not set
+# CONFIG_SND_BF5XX_AC97 is not set
+CONFIG_SND_BF5XX_SOC_SPORT=y
+CONFIG_SND_BF5XX_SPORT_NUM=1
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=y
+# CONFIG_USB_SERIAL_CONSOLE is not set
+# CONFIG_USB_EZUSB is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=y
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+CONFIG_USB_SERIAL_PL2303=y
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+CONFIG_RTC_DRV_DS1307=y
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_FIRMWARE_MEMMAP is not set
+# CONFIG_SIGMA is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=866
+CONFIG_FAT_DEFAULT_IOCHARSET="cp1251"
+CONFIG_NTFS_FS=y
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_JFFS2_LZO=y
+# CONFIG_JFFS2_RTIME is not set
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+# CONFIG_JFFS2_CMODE_PRIORITY is not set
+# CONFIG_JFFS2_CMODE_SIZE is not set
+CONFIG_JFFS2_CMODE_FAVOURLZO=y
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+CONFIG_MINIX_FS=y
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp1251"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+CONFIG_NLS_CODEPAGE_866=y
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+CONFIG_NLS_CODEPAGE_1251=y
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+CONFIG_NLS_KOI8_R=y
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_NOMMU_REGIONS is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_KMEMCHECK is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_VERBOSE=y
+CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
+CONFIG_DEBUG_HUNT_FOR_ZERO=y
+CONFIG_DEBUG_BFIN_HWTRACE_ON=y
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE is not set
+# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
+# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
+# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_CPLB_INFO=y
+CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITY_ROOTPLUG is not set
+# CONFIG_SECURITY_TOMOYO is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 0313cd1d982..e3ecdcc3e76 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -1,22 +1,29 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28.10
-# Thu May 21 05:50:01 2009
+# Linux kernel version: 2.6.31.5
+# Mon Nov 2 21:59:31 2009
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -26,22 +33,40 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -62,17 +87,28 @@ CONFIG_EPOLL=y
# CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set
# CONFIG_AIO is not set
+
+#
+# Performance Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
-CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -80,11 +116,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -94,13 +127,12 @@ CONFIG_BLOCK=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_IOSCHED_CFQ=y
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
-CONFIG_CLASSIC_RCU=y
# CONFIG_PREEMPT_NONE is not set
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
@@ -170,6 +202,7 @@ CONFIG_IRQ_SPI_ERROR=7
CONFIG_BFIN561_EZKIT=y
# CONFIG_BFIN561_TEPLA is not set
# CONFIG_BFIN561_BLUETECHNIX_CM is not set
+# CONFIG_BFIN561_ACVILON is not set
#
# BF561 Specific Configuration
@@ -317,10 +350,11 @@ CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=m
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
@@ -331,14 +365,13 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+# CONFIG_BFIN_L2_ICACHEABLE is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
-# CONFIG_BFIN_L2_ICACHEABLE is not set
# CONFIG_BFIN_L2_DCACHEABLE is not set
#
@@ -347,7 +380,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -407,11 +440,6 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
-# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -435,13 +463,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
-# CONFIG_NETLABEL is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
@@ -459,7 +485,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
@@ -503,13 +532,8 @@ CONFIG_IRTTY_SIR=m
#
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
-CONFIG_WIRELESS=y
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_OLD_REGULATORY=y
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -530,6 +554,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
@@ -603,6 +628,11 @@ CONFIG_MTD_PHYSMAP=m
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -619,9 +649,14 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -645,9 +680,11 @@ CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_SMC91X=y
-# CONFIG_SMSC911X is not set
# CONFIG_DM9000 is not set
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -656,6 +693,8 @@ CONFIG_SMC91X=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
@@ -664,7 +703,10 @@ CONFIG_SMC91X=y
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -708,15 +750,12 @@ CONFIG_INPUT_EVDEV=m
#
# Character devices
#
-# CONFIG_AD9960 is not set
CONFIG_BFIN_DMA_INTERFACE=m
# CONFIG_BFIN_PPI is not set
# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
# CONFIG_BFIN_SPI_ADC is not set
# CONFIG_BFIN_SPORT is not set
-# CONFIG_BFIN_TIMER_LATENCY is not set
-CONFIG_SIMPLE_GPIO=m
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
CONFIG_BFIN_JTAG_COMM=m
@@ -730,6 +769,7 @@ CONFIG_BFIN_JTAG_COMM=m
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
CONFIG_SERIAL_BFIN_DMA=y
@@ -740,6 +780,7 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
#
@@ -763,13 +804,18 @@ CONFIG_SPI_BFIN=y
# CONFIG_SPI_BFIN_LOCK is not set
# CONFIG_SPI_BFIN_SPORT is not set
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
CONFIG_GPIOLIB=y
# CONFIG_DEBUG_GPIO is not set
@@ -782,9 +828,6 @@ CONFIG_GPIO_SYSFS=y
#
# I2C GPIO expanders:
#
-# CONFIG_GPIO_MAX732X is not set
-# CONFIG_GPIO_PCA953X is not set
-# CONFIG_GPIO_PCF857X is not set
#
# PCI GPIO expanders:
@@ -822,23 +865,9 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
+# CONFIG_EZX_PCAP is not set
# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -862,7 +891,6 @@ CONFIG_HID=m
#
# Special HID drivers
#
-CONFIG_HID_COMPAT=y
# CONFIG_USB_SUPPORT is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
@@ -870,10 +898,20 @@ CONFIG_HID_COMPAT=y
# CONFIG_ACCESSIBILITY is not set
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
+# Firmware Drivers
+#
+# CONFIG_FIRMWARE_MEMMAP is not set
+
+#
# File systems
#
# CONFIG_EXT2_FS is not set
@@ -882,9 +920,11 @@ CONFIG_HID_COMPAT=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -894,6 +934,11 @@ CONFIG_INOTIFY_USER=y
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -915,10 +960,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -937,17 +979,8 @@ CONFIG_JFFS2_ZLIB=y
# CONFIG_JFFS2_LZO is not set
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
-CONFIG_YAFFS_FS=m
-CONFIG_YAFFS_YAFFS1=y
-# CONFIG_YAFFS_9BYTE_TAGS is not set
-# CONFIG_YAFFS_DOES_ECC is not set
-CONFIG_YAFFS_YAFFS2=y
-CONFIG_YAFFS_AUTO_YAFFS2=y
-# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
-# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
-CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -956,6 +989,7 @@ CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
@@ -966,7 +1000,6 @@ CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_REGISTER_V4 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@@ -1034,11 +1067,15 @@ CONFIG_FRAME_WARN=1024
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@@ -1046,16 +1083,21 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_NOMMU_REGIONS is not set
# CONFIG_DEBUG_WRITECOUNT is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
@@ -1063,17 +1105,19 @@ CONFIG_DEBUG_INFO=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
-
-#
-# Tracers
-#
-# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_BOOT_TRACER is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_BRANCH_PROFILE_NONE is not set
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
+# CONFIG_KMEMCHECK is not set
# CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set
CONFIG_DEBUG_VERBOSE=y
@@ -1095,16 +1139,15 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y
CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
#
# CONFIG_KEYS is not set
-CONFIG_SECURITY=y
+# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_NETWORK is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
CONFIG_CRYPTO=y
#
@@ -1183,6 +1226,7 @@ CONFIG_CRYPTO=y
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
#
@@ -1190,11 +1234,13 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
# CONFIG_CRC_T10DIF is not set
@@ -1204,6 +1250,8 @@ CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
+CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 5d944ffd4ab..9e65d885ec0 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -66,6 +66,7 @@ CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -275,6 +276,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=y
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index 648a31d01bf..4432150d89e 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -1,12 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28
+# Linux kernel version: 2.6.30.5
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
@@ -15,6 +16,9 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -25,55 +29,72 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_UID16=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
-CONFIG_COMPAT_BRK=y
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_AIO is not set
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -81,11 +102,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -101,7 +119,6 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
# CONFIG_PREEMPT_NONE is not set
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
@@ -265,7 +282,10 @@ CONFIG_HZ=250
# CONFIG_SCHED_HRTICK is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_TICKSOURCE_GPTMR0 is not set
+CONFIG_TICKSOURCE_CORETMR=y
# CONFIG_CYCLES_CLOCKSOURCE is not set
+# CONFIG_GPTMR0_CLOCKSOURCE is not set
# CONFIG_NO_HZ is not set
# CONFIG_HIGH_RES_TIMERS is not set
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -315,10 +335,12 @@ CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=y
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
@@ -329,10 +351,9 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -343,7 +364,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -361,7 +382,7 @@ CONFIG_C_AMBEN_ALL=y
# EBIU_AMBCTL Control
#
CONFIG_BANK_0=0x7BB0
-CONFIG_BANK_1=0x5554
+CONFIG_BANK_1=0x7BB0
CONFIG_BANK_2=0x7BB0
CONFIG_BANK_3=0xFFC0
@@ -386,7 +407,6 @@ CONFIG_BINFMT_ZFLAT=y
#
# CONFIG_PM is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
-# CONFIG_PM_WAKEUP_BY_GPIO is not set
#
# CPU Frequency scaling
@@ -400,11 +420,6 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
-# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -428,7 +443,6 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -452,7 +466,9 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
@@ -463,13 +479,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
-CONFIG_WIRELESS=y
-# CONFIG_CFG80211 is not set
-CONFIG_WIRELESS_OLD_REGULATORY=y
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -484,22 +495,21 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
-# CONFIG_DEBUG_DRIVER is not set
-# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
-# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
#
-CONFIG_MTD_CHAR=m
+CONFIG_MTD_CHAR=y
CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
# CONFIG_FTL is not set
@@ -512,9 +522,9 @@ CONFIG_MTD_BLOCK=y
#
# RAM/ROM/Flash chip drivers
#
-# CONFIG_MTD_CFI is not set
-CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_GEN_PROBE=m
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
@@ -526,9 +536,11 @@ CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
-# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_INTELEXT=y
# CONFIG_MTD_CFI_AMDSTD is not set
# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_PSD4256G is not set
+CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=y
CONFIG_MTD_ROM=m
# CONFIG_MTD_ABSENT is not set
@@ -538,7 +550,7 @@ CONFIG_MTD_ROM=m
#
CONFIG_MTD_COMPLEX_MAPPINGS=y
# CONFIG_MTD_PHYSMAP is not set
-# CONFIG_MTD_GPIO_ADDR is not set
+CONFIG_MTD_GPIO_ADDR=y
# CONFIG_MTD_UCLINUX is not set
# CONFIG_MTD_PLATRAM is not set
@@ -562,6 +574,11 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -586,12 +603,46 @@ CONFIG_HAVE_IDE=y
# SCSI device support
#
# CONFIG_RAID_ATTRS is not set
-# CONFIG_SCSI is not set
-# CONFIG_SCSI_DMA is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -613,6 +664,9 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
@@ -623,9 +677,11 @@ CONFIG_BFIN_TX_DESC_NUM=10
CONFIG_BFIN_RX_DESC_NUM=20
CONFIG_BFIN_MAC_RMII=y
# CONFIG_SMC91X is not set
-# CONFIG_SMSC911X is not set
# CONFIG_DM9000 is not set
# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -633,6 +689,7 @@ CONFIG_BFIN_MAC_RMII=y
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
@@ -641,7 +698,10 @@ CONFIG_BFIN_MAC_RMII=y
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
#
# USB Network Adapters
@@ -674,17 +734,13 @@ CONFIG_BFIN_MAC_RMII=y
#
# Character devices
#
-# CONFIG_AD9960 is not set
-# CONFIG_SPI_ADC_BF533 is not set
-# CONFIG_BF5xx_PPIFCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
+# CONFIG_BFIN_PPI is not set
+# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
-# CONFIG_BF5xx_PPI is not set
-# CONFIG_BF5xx_EPPI is not set
+# CONFIG_BFIN_SPI_ADC is not set
# CONFIG_BFIN_SPORT is not set
-# CONFIG_BFIN_TIMER_LATENCY is not set
-# CONFIG_TWI_LCD is not set
-CONFIG_BFIN_DMA_INTERFACE=m
-CONFIG_SIMPLE_GPIO=m
+# CONFIG_BFIN_TWI_LCD is not set
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
# CONFIG_BFIN_JTAG_COMM is not set
@@ -698,6 +754,7 @@ CONFIG_SIMPLE_GPIO=m
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
CONFIG_SERIAL_BFIN_DMA=y
@@ -710,6 +767,7 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_BFIN_OTP=y
# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
@@ -758,13 +816,9 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_AT24 is not set
-# CONFIG_SENSORS_AD5252 is not set
-# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
@@ -772,7 +826,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
-# CONFIG_SPI_DEBUG is not set
CONFIG_SPI_MASTER=y
#
@@ -780,17 +833,17 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_BFIN=y
# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BFIN_SPORT is not set
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_SPI_AT25 is not set
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
CONFIG_GPIOLIB=y
-# CONFIG_DEBUG_GPIO is not set
CONFIG_GPIO_SYSFS=y
#
@@ -803,6 +856,7 @@ CONFIG_GPIO_SYSFS=y
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@@ -829,11 +883,13 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
# CONFIG_SENSORS_GL518SM is not set
# CONFIG_SENSORS_GL520SM is not set
# CONFIG_SENSORS_IT87 is not set
@@ -849,11 +905,16 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_LM90 is not set
# CONFIG_SENSORS_LM92 is not set
# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
# CONFIG_SENSORS_MAX1111 is not set
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_MAX6650 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
# CONFIG_SENSORS_DME1737 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47M192 is not set
@@ -885,6 +946,12 @@ CONFIG_BFIN_WDT=y
# USB-based Watchdog Cards
#
# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
#
# Multifunction device drivers
@@ -892,10 +959,14 @@ CONFIG_BFIN_WDT=y
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
# CONFIG_REGULATOR is not set
#
@@ -931,20 +1002,20 @@ CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
-CONFIG_USB=y
+CONFIG_USB=m
# CONFIG_USB_DEBUG is not set
-# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
#
# Miscellaneous USB options
#
-# CONFIG_USB_DEVICEFS is not set
-CONFIG_USB_DEVICE_CLASS=y
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_OTG is not set
# CONFIG_USB_OTG_WHITELIST is not set
CONFIG_USB_OTG_BLACKLIST_HUB=y
-CONFIG_USB_MON=y
+CONFIG_USB_MON=m
# CONFIG_USB_WUSB is not set
# CONFIG_USB_WUSB_CBAF is not set
@@ -952,24 +1023,24 @@ CONFIG_USB_MON=y
# USB Host Controller Drivers
#
# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_HWA_HCD is not set
-CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SOC=y
#
# Blackfin high speed USB Support
#
-CONFIG_USB_MUSB_HOST=y
-# CONFIG_USB_MUSB_PERIPHERAL is not set
+# CONFIG_USB_MUSB_HOST is not set
+CONFIG_USB_MUSB_PERIPHERAL=y
# CONFIG_USB_MUSB_OTG is not set
-CONFIG_USB_MUSB_HDRC_HCD=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
CONFIG_MUSB_PIO_ONLY=y
-CONFIG_MUSB_DMA_POLL=y
# CONFIG_USB_MUSB_DEBUG is not set
#
@@ -981,18 +1052,31 @@ CONFIG_MUSB_DMA_POLL=y
# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
-# see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
# CONFIG_USB_LIBUSUAL is not set
#
# USB Imaging devices
#
# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
#
# USB port drivers
@@ -1013,7 +1097,6 @@ CONFIG_MUSB_DMA_POLL=y
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGET is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_FTDI_ELAN is not set
# CONFIG_USB_APPLEDISPLAY is not set
@@ -1021,9 +1104,50 @@ CONFIG_MUSB_DMA_POLL=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
-# CONFIG_USB_GADGET is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2272 is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+CONFIG_USB_G_PRINTER=m
+# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -1090,6 +1214,7 @@ CONFIG_RTC_INTF_DEV=y
#
CONFIG_RTC_DRV_BFIN=y
# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
@@ -1102,9 +1227,10 @@ CONFIG_RTC_DRV_BFIN=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1114,6 +1240,11 @@ CONFIG_INOTIFY_USER=y
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -1122,8 +1253,11 @@ CONFIG_INOTIFY_USER=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
@@ -1135,10 +1269,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1146,9 +1277,19 @@ CONFIG_SYSFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_YAFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -1157,6 +1298,7 @@ CONFIG_SYSFS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
@@ -1167,7 +1309,6 @@ CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_REGISTER_V4 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
CONFIG_SMB_FS=m
@@ -1182,9 +1323,9 @@ CONFIG_SMB_FS=m
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-CONFIG_NLS=m
+CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=y
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
# CONFIG_NLS_CODEPAGE_850 is not set
@@ -1208,7 +1349,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
# CONFIG_NLS_ASCII is not set
-# CONFIG_NLS_ISO8859_1 is not set
+CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_4 is not set
@@ -1235,55 +1376,34 @@ CONFIG_FRAME_WARN=1024
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DEBUG_SHIRQ is not set
-CONFIG_DETECT_SOFTLOCKUP=y
-# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_SCHEDSTATS is not set
-# CONFIG_TIMER_STATS is not set
-# CONFIG_DEBUG_OBJECTS is not set
-# CONFIG_DEBUG_SLAB is not set
-# CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
-# CONFIG_DEBUG_SPINLOCK is not set
-# CONFIG_DEBUG_MUTEXES is not set
-# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
-# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
-# CONFIG_DEBUG_KOBJECT is not set
-CONFIG_DEBUG_BUGVERBOSE=y
-# CONFIG_DEBUG_INFO is not set
-# CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_DEBUG_LIST is not set
-# CONFIG_DEBUG_SG is not set
-# CONFIG_FRAME_POINTER is not set
-# CONFIG_BOOT_PRINTK_DELAY is not set
-# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_BACKTRACE_SELF_TEST is not set
-# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
-# CONFIG_FAULT_INJECTION is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
#
# Tracers
#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
-# CONFIG_KGDB is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
-# CONFIG_DEBUG_STACK_USAGE is not set
-# CONFIG_KGDB_TESTCASE is not set
CONFIG_DEBUG_VERBOSE=y
-CONFIG_DEBUG_MMRS=y
-# CONFIG_DEBUG_HWERR is not set
+# CONFIG_DEBUG_MMRS is not set
# CONFIG_DEBUG_DOUBLEFAULT is not set
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
@@ -1293,9 +1413,10 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
-# CONFIG_EARLY_PRINTK is not set
+CONFIG_EARLY_PRINTK=y
# CONFIG_CPLB_INFO is not set
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
@@ -1304,9 +1425,9 @@ CONFIG_ACCESS_CHECK=y
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_PATH is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-# CONFIG_SECURITY_ROOTPLUG is not set
-CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+# CONFIG_SECURITY_TOMOYO is not set
CONFIG_CRYPTO=y
#
@@ -1385,6 +1506,7 @@ CONFIG_CRYPTO=y
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
#
@@ -1392,20 +1514,24 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
# CONFIG_CRC_T10DIF is not set
-# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC_ITU_T=y
CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
+CONFIG_CRC7=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
-CONFIG_PLIST=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index ae665b93b87..df56639ab2f 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -1,94 +1,110 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22.16
+# Linux kernel version: 2.6.30.5
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
-# Code maturity level options
+# General setup
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
-# CONFIG_BLK_DEV_INITRD is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
+CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=3
-# CONFIG_NP2 is not set
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
-CONFIG_RT_MUTEXES=y
-CONFIG_TINY_SHMEM=y
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODULE_FORCE_LOAD is not set
+# CONFIG_MODULE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
-
-#
-# Block layer
-#
CONFIG_BLOCK=y
# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
@@ -96,7 +112,7 @@ CONFIG_BLOCK=y
CONFIG_IOSCHED_NOOP=y
# CONFIG_IOSCHED_AS is not set
# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_IOSCHED_CFQ=y
+# CONFIG_IOSCHED_CFQ is not set
# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
@@ -105,6 +121,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
#
# Blackfin Processor Options
@@ -113,6 +130,10 @@ CONFIG_PREEMPT_NONE=y
#
# Processor and Board Settings
#
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
# CONFIG_BF522 is not set
# CONFIG_BF523 is not set
# CONFIG_BF524 is not set
@@ -125,28 +146,38 @@ CONFIG_BF533=y
# CONFIG_BF534 is not set
# CONFIG_BF536 is not set
# CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
# CONFIG_BF542 is not set
+# CONFIG_BF542M is not set
# CONFIG_BF544 is not set
+# CONFIG_BF544M is not set
# CONFIG_BF547 is not set
+# CONFIG_BF547M is not set
# CONFIG_BF548 is not set
+# CONFIG_BF548M is not set
# CONFIG_BF549 is not set
+# CONFIG_BF549M is not set
# CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=3
+CONFIG_BF_REV_MAX=6
# CONFIG_BF_REV_0_0 is not set
# CONFIG_BF_REV_0_1 is not set
# CONFIG_BF_REV_0_2 is not set
CONFIG_BF_REV_0_3=y
# CONFIG_BF_REV_0_4 is not set
# CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
# CONFIG_BF_REV_ANY is not set
# CONFIG_BF_REV_NONE is not set
CONFIG_BF53x=y
-CONFIG_BFIN_SINGLE_CORE=y
CONFIG_MEM_MT48LC16M16A2TG_75=y
# CONFIG_BFIN533_EZKIT is not set
# CONFIG_BFIN533_STAMP is not set
+# CONFIG_BLACKSTAMP is not set
CONFIG_BFIN533_BLUETECHNIX_CM=y
# CONFIG_H8606_HVSISTEMAS is not set
-# CONFIG_GENERIC_BF533_BOARD is not set
+# CONFIG_BFIN532_IP0X is not set
#
# BF533/2/1 Specific Configuration
@@ -188,6 +219,7 @@ CONFIG_WDTIMER=13
# Board customizations
#
# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
#
# Clock/PLL Setup
@@ -207,13 +239,20 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_TICKSOURCE_GPTMR0 is not set
+CONFIG_TICKSOURCE_CORETMR=y
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+# CONFIG_GPTMR0_CLOCKSOURCE is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
#
-# Memory Setup
+# Misc
#
-CONFIG_MAX_MEM_SIZE=32
-CONFIG_MEM_ADD_WIDTH=9
-CONFIG_BOOT_LOAD=0x1000
CONFIG_BFIN_SCRATCH_REG_RETN=y
# CONFIG_BFIN_SCRATCH_REG_RETE is not set
# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
@@ -240,6 +279,12 @@ CONFIG_IP_CHECKSUM_L1=y
CONFIG_CACHELINE_ALIGNED_L1=y
CONFIG_SYSCALL_TAB_L1=y
CONFIG_CPLB_SWITCH_TAB_L1=y
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
CONFIG_RAMKERNEL=y
# CONFIG_ROMKERNEL is not set
CONFIG_SELECT_MEMORY_MODEL=y
@@ -248,12 +293,16 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
-CONFIG_LARGE_ALLOCS=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
# CONFIG_BFIN_GPTIMERS is not set
+# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
CONFIG_DMA_UNCACHED_1M=y
# CONFIG_DMA_UNCACHED_NONE is not set
@@ -262,10 +311,9 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -276,7 +324,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -301,12 +349,8 @@ CONFIG_BANK_3=0xFFC2
#
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
#
-# CONFIG_PCI is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
+# CONFIG_PCCARD is not set
#
# Executable file formats
@@ -315,22 +359,19 @@ CONFIG_BINFMT_ELF_FDPIC=y
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
CONFIG_BINFMT_SHARED_FLAT=y
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
#
# Power management options
#
# CONFIG_PM is not set
-# CONFIG_PM_WAKEUP_BY_GPIO is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
# CPU Frequency scaling
#
# CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
@@ -339,45 +380,13 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-# CONFIG_IP_PNP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-# CONFIG_TCP_MD5SIG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
-# CONFIG_NETLABEL is not set
+# CONFIG_INET is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
-# CONFIG_IP_DCCP is not set
-# CONFIG_IP_SCTP is not set
-# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@@ -385,31 +394,23 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_ATALK is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
+# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
-# CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -418,20 +419,22 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
-# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -444,12 +447,15 @@ CONFIG_MTD_BLOCK=y
# CONFIG_INFTL is not set
# CONFIG_RFD_FTL is not set
# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
#
# RAM/ROM/Flash chip drivers
#
-# CONFIG_MTD_CFI is not set
+CONFIG_MTD_CFI=y
# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
CONFIG_MTD_MAP_BANK_WIDTH_4=y
@@ -460,6 +466,11 @@ CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_PSD4256G is not set
+CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=y
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
@@ -468,12 +479,16 @@ CONFIG_MTD_RAM=y
# Mapping drivers for chip access
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-CONFIG_MTD_UCLINUX=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_UCLINUX is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -489,36 +504,25 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_ONENAND is not set
#
-# UBI - Unsorted block images
+# LPDDR flash memory drivers
#
-# CONFIG_MTD_UBI is not set
+# CONFIG_MTD_LPDDR is not set
#
-# Parallel port support
+# UBI - Unsorted block images
#
+# CONFIG_MTD_UBI is not set
# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_RAM is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
-
-#
-# Misc devices
-#
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
@@ -526,34 +530,19 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
#
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
# CONFIG_MD is not set
-
-#
-# Network device support
-#
CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
-# CONFIG_PHYLIB is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_SMC91X=y
-# CONFIG_SMSC911X is not set
-# CONFIG_DM9000 is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_ETHERNET is not set
# CONFIG_NETDEV_1000 is not set
-# CONFIG_AX88180 is not set
# CONFIG_NETDEV_10000 is not set
#
@@ -561,22 +550,17 @@ CONFIG_SMC91X=y
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
# CONFIG_PHONE is not set
#
@@ -593,16 +577,15 @@ CONFIG_SMC91X=y
#
# Character devices
#
-# CONFIG_AD9960 is not set
-# CONFIG_SPI_ADC_BF533 is not set
-# CONFIG_BF5xx_PFLAGS is not set
-# CONFIG_BF5xx_PPIFCD is not set
+# CONFIG_BFIN_DMA_INTERFACE is not set
+# CONFIG_BFIN_PPI is not set
+# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
-# CONFIG_BF5xx_PPI is not set
-CONFIG_BFIN_SPORT=y
-# CONFIG_BFIN_TIMER_LATENCY is not set
+# CONFIG_BFIN_SPI_ADC is not set
+# CONFIG_BFIN_SPORT is not set
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -613,6 +596,7 @@ CONFIG_BFIN_SPORT=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
CONFIG_SERIAL_BFIN_DMA=y
@@ -623,176 +607,141 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
#
# CAN, the car bus and industrial fieldbus
#
# CONFIG_CAN4LINUX is not set
-
-#
-# IPMI
-#
# CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
-
-CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_SYSFS=y
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
#
-# SPI support
+# SPI Master Controller Drivers
#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BFIN_SPORT is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
-# Dallas's 1-wire bus
+# SPI Protocol Masters
#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
# CONFIG_W1 is not set
-CONFIG_HWMON=y
-# CONFIG_HWMON_VID is not set
-# CONFIG_SENSORS_ABITUGURU is not set
-# CONFIG_SENSORS_F71805F is not set
-# CONFIG_SENSORS_PC87427 is not set
-# CONFIG_SENSORS_SMSC47M1 is not set
-# CONFIG_SENSORS_SMSC47B397 is not set
-# CONFIG_SENSORS_VT1211 is not set
-# CONFIG_SENSORS_W83627HF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
#
# Multimedia devices
#
+
+#
+# Multimedia core support
+#
# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_VIDEO_MEDIA is not set
#
-# Graphics support
+# Multimedia drivers
#
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_DAB is not set
#
-# Display device support
+# Graphics support
#
-# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
-# Sound
+# Display device support
#
+# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# USB support
+# MMC/SD/SDIO Card Drivers
#
-CONFIG_USB_ARCH_HAS_HCD=y
-# CONFIG_USB_ARCH_HAS_OHCI is not set
-# CONFIG_USB_ARCH_HAS_EHCI is not set
-# CONFIG_USB is not set
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# Enable Host or Gadget support to see Inventra options
-#
-
-#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-# CONFIG_MMC is not set
-
-#
-# LED devices
+# MMC/SD/SDIO Host Controller Drivers
#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_SPI=m
+# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
-
-#
-# LED drivers
-#
-
-#
-# LED Triggers
-#
-
-#
-# InfiniBand support
-#
-
-#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
-
-#
-# Real Time Clock
-#
+# CONFIG_ACCESSIBILITY is not set
# CONFIG_RTC_CLASS is not set
-
-#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
-
-#
-# PBX support
-#
-# CONFIG_PBX is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
#
# File systems
#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-# CONFIG_EXT2_FS_POSIX_ACL is not set
-# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT2_FS is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
-CONFIG_FS_MBCACHE=y
+# CONFIG_EXT4_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_INOTIFY=y
-CONFIG_INOTIFY_USER=y
-# CONFIG_QUOTA is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -801,8 +750,11 @@ CONFIG_INOTIFY_USER=y
#
# DOS/FAT/NT Filesystems
#
+CONFIG_FAT_FS=y
# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
@@ -813,12 +765,8 @@ CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -826,60 +774,106 @@ CONFIG_RAMFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_YAFFS_FS is not set
# CONFIG_JFFS2_FS is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-# CONFIG_NFS_FS is not set
-# CONFIG_NFSD is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
#
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-# CONFIG_NLS is not set
-
-#
-# Distributed Lock Manager
-#
-# CONFIG_DLM is not set
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_DEBUG_VERBOSE=y
CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_DOUBLEFAULT is not set
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -888,34 +882,39 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
-# CONFIG_EARLY_PRINTK is not set
+CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
#
# CONFIG_KEYS is not set
CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
-CONFIG_SECURITY_CAPABILITIES=y
-
-#
-# Cryptographic options
-#
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITY_TOMOYO is not set
# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
-CONFIG_CRC_CCITT=m
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
# CONFIG_CRC16 is not set
-# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=y
CONFIG_CRC32=y
+CONFIG_CRC7=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
-CONFIG_PLIST=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index d74b6f4db35..22e565c51d6 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -1,13 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28.10
-# Wed Jun 3 06:27:41 2009
+# Linux kernel version: 2.6.30.5
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
@@ -16,6 +16,9 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -26,21 +29,40 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
-# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -49,7 +71,8 @@ CONFIG_EMBEDDED=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
@@ -65,12 +88,13 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
-CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -78,11 +102,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -98,7 +119,6 @@ CONFIG_IOSCHED_CFQ=y
# CONFIG_DEFAULT_CFQ is not set
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
-CONFIG_CLASSIC_RCU=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
@@ -181,7 +201,8 @@ CONFIG_IRQ_MEM_DMA1=13
CONFIG_IRQ_WATCH=13
CONFIG_IRQ_SPI=10
# CONFIG_BFIN537_STAMP is not set
-CONFIG_BFIN537_BLUETECHNIX_CM=y
+CONFIG_BFIN537_BLUETECHNIX_CM_E=y
+# CONFIG_BFIN537_BLUETECHNIX_CM_U is not set
# CONFIG_BFIN537_BLUETECHNIX_TCM is not set
# CONFIG_PNAV10 is not set
# CONFIG_CAMSIG_MINOTAUR is not set
@@ -283,10 +304,12 @@ CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
# CONFIG_BFIN_GPTIMERS is not set
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
@@ -297,10 +320,9 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -311,7 +333,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -337,6 +359,7 @@ CONFIG_BANK_3=0xFFC2
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
#
# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
#
# Executable file formats
@@ -366,11 +389,6 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
-# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -394,7 +412,6 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -418,7 +435,9 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
@@ -429,8 +448,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_PHONET is not set
# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -441,16 +460,21 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
-# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
#
@@ -486,22 +510,26 @@ CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
# CONFIG_MTD_CFI_AMDSTD is not set
# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_PSD4256G is not set
CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=y
-# CONFIG_MTD_ROM is not set
+CONFIG_MTD_ROM=m
# CONFIG_MTD_ABSENT is not set
#
# Mapping drivers for chip access
#
CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_PHYSMAP is not set
CONFIG_MTD_GPIO_ADDR=y
-CONFIG_MTD_UCLINUX=y
+# CONFIG_MTD_UCLINUX is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -517,6 +545,11 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -533,9 +566,14 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -549,6 +587,7 @@ CONFIG_HAVE_IDE=y
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -570,6 +609,9 @@ CONFIG_PHYLIB=y
# CONFIG_BROADCOM_PHY is not set
# CONFIG_ICPLUS_PHY is not set
# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
@@ -580,8 +622,11 @@ CONFIG_BFIN_TX_DESC_NUM=10
CONFIG_BFIN_RX_DESC_NUM=20
# CONFIG_BFIN_MAC_RMII is not set
# CONFIG_SMC91X is not set
-# CONFIG_SMSC911X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
@@ -598,7 +643,10 @@ CONFIG_BFIN_RX_DESC_NUM=20
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
-# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
@@ -622,15 +670,12 @@ CONFIG_BFIN_RX_DESC_NUM=20
#
# Character devices
#
-# CONFIG_AD9960 is not set
CONFIG_BFIN_DMA_INTERFACE=m
# CONFIG_BFIN_PPI is not set
# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
# CONFIG_BFIN_SPI_ADC is not set
CONFIG_BFIN_SPORT=y
-# CONFIG_BFIN_TIMER_LATENCY is not set
-# CONFIG_SIMPLE_GPIO is not set
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
# CONFIG_BFIN_JTAG_COMM is not set
@@ -644,6 +689,7 @@ CONFIG_BFIN_SPORT=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
CONFIG_SERIAL_BFIN_DMA=y
@@ -656,6 +702,7 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
#
@@ -668,7 +715,23 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
-# CONFIG_SPI is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BFIN_SPORT is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
@@ -688,15 +751,21 @@ CONFIG_GPIO_SYSFS=y
#
# SPI GPIO expanders:
#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADCXX is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_MAX1111 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SHT15 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_VT1211 is not set
@@ -758,21 +827,74 @@ CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB is not set
# CONFIG_USB_OTG_WHITELIST is not set
# CONFIG_USB_OTG_BLACKLIST_HUB is not set
-
-#
-# Enable Host or Gadget support to see Inventra options
-#
-
-#
-# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
-#
-# CONFIG_USB_GADGET is not set
-# CONFIG_MMC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2272=y
+CONFIG_USB_NET2272=m
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_SPI=m
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
@@ -789,9 +911,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -801,6 +924,11 @@ CONFIG_INOTIFY_USER=y
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -809,8 +937,11 @@ CONFIG_INOTIFY_USER=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
@@ -822,10 +953,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -833,9 +961,19 @@ CONFIG_SYSFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_JFFS2_FS is not set
-# CONFIG_YAFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -844,14 +982,70 @@ CONFIG_SYSFS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
#
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
# CONFIG_DLM is not set
#
@@ -867,14 +1061,28 @@ CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
#
# Tracers
#
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEBUG_VERBOSE=y
@@ -888,9 +1096,10 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
-# CONFIG_EARLY_PRINTK is not set
+CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
@@ -899,8 +1108,9 @@ CONFIG_ACCESS_CHECK=y
CONFIG_SECURITY=y
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_PATH is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+# CONFIG_SECURITY_TOMOYO is not set
CONFIG_CRYPTO=y
#
@@ -979,6 +1189,7 @@ CONFIG_CRYPTO=y
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
#
@@ -986,19 +1197,24 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
# CONFIG_CRC_T10DIF is not set
-# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC_ITU_T=y
CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
+CONFIG_CRC7=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 7fc8dfa1719..efcc90d2f34 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -1,94 +1,111 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.22.16
+# Linux kernel version: 2.6.30.5
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
-# Code maturity level options
+# General setup
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
-# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
+CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=3
-# CONFIG_NP2 is not set
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
-CONFIG_RT_MUTEXES=y
-CONFIG_TINY_SHMEM=y
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
CONFIG_BASE_SMALL=0
-
-#
-# Loadable module support
-#
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
-
-#
-# Block layer
-#
CONFIG_BLOCK=y
# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
@@ -105,6 +122,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
#
# Blackfin Processor Options
@@ -113,6 +131,10 @@ CONFIG_PREEMPT_NONE=y
#
# Processor and Board Settings
#
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
# CONFIG_BF522 is not set
# CONFIG_BF523 is not set
# CONFIG_BF524 is not set
@@ -125,22 +147,31 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_BF534 is not set
# CONFIG_BF536 is not set
CONFIG_BF537=y
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
# CONFIG_BF542 is not set
+# CONFIG_BF542M is not set
# CONFIG_BF544 is not set
+# CONFIG_BF544M is not set
# CONFIG_BF547 is not set
+# CONFIG_BF547M is not set
# CONFIG_BF548 is not set
+# CONFIG_BF548M is not set
# CONFIG_BF549 is not set
+# CONFIG_BF549M is not set
# CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=2
+CONFIG_BF_REV_MAX=3
# CONFIG_BF_REV_0_0 is not set
# CONFIG_BF_REV_0_1 is not set
CONFIG_BF_REV_0_2=y
# CONFIG_BF_REV_0_3 is not set
# CONFIG_BF_REV_0_4 is not set
# CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
# CONFIG_BF_REV_ANY is not set
# CONFIG_BF_REV_NONE is not set
CONFIG_BF53x=y
-CONFIG_BFIN_SINGLE_CORE=y
CONFIG_MEM_MT48LC16M16A2TG_75=y
CONFIG_IRQ_PLL_WAKEUP=7
CONFIG_IRQ_RTC=8
@@ -150,7 +181,6 @@ CONFIG_IRQ_SPORT0_TX=9
CONFIG_IRQ_SPORT1_RX=9
CONFIG_IRQ_SPORT1_TX=9
CONFIG_IRQ_TWI=10
-CONFIG_IRQ_SPI=10
CONFIG_IRQ_UART0_RX=10
CONFIG_IRQ_UART0_TX=10
CONFIG_IRQ_UART1_RX=10
@@ -169,11 +199,13 @@ CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_MEM_DMA1=13
CONFIG_IRQ_WATCH=13
+CONFIG_IRQ_SPI=10
# CONFIG_BFIN537_STAMP is not set
-CONFIG_BFIN537_BLUETECHNIX_CM=y
+# CONFIG_BFIN537_BLUETECHNIX_CM_E is not set
+CONFIG_BFIN537_BLUETECHNIX_CM_U=y
+# CONFIG_BFIN537_BLUETECHNIX_TCM is not set
# CONFIG_PNAV10 is not set
# CONFIG_CAMSIG_MINOTAUR is not set
-# CONFIG_GENERIC_BF537_BOARD is not set
#
# BF537 Specific Configuration
@@ -196,6 +228,7 @@ CONFIG_IRQ_PROG_INTA=12
# Board customizations
#
# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
#
# Clock/PLL Setup
@@ -215,13 +248,20 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_TICKSOURCE_GPTMR0 is not set
+CONFIG_TICKSOURCE_CORETMR=y
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+# CONFIG_GPTMR0_CLOCKSOURCE is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
#
-# Memory Setup
+# Misc
#
-CONFIG_MAX_MEM_SIZE=32
-CONFIG_MEM_ADD_WIDTH=9
-CONFIG_BOOT_LOAD=0x1000
CONFIG_BFIN_SCRATCH_REG_RETN=y
# CONFIG_BFIN_SCRATCH_REG_RETE is not set
# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
@@ -248,6 +288,12 @@ CONFIG_IP_CHECKSUM_L1=y
CONFIG_CACHELINE_ALIGNED_L1=y
CONFIG_SYSCALL_TAB_L1=y
CONFIG_CPLB_SWITCH_TAB_L1=y
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
CONFIG_RAMKERNEL=y
# CONFIG_ROMKERNEL is not set
CONFIG_SELECT_MEMORY_MODEL=y
@@ -256,12 +302,16 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
-CONFIG_LARGE_ALLOCS=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
# CONFIG_BFIN_GPTIMERS is not set
+# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
CONFIG_DMA_UNCACHED_1M=y
# CONFIG_DMA_UNCACHED_NONE is not set
@@ -270,10 +320,9 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -284,7 +333,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -309,12 +358,8 @@ CONFIG_BANK_3=0xFFC2
#
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
#
-# CONFIG_PCI is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
+# CONFIG_PCCARD is not set
#
# Executable file formats
@@ -323,22 +368,19 @@ CONFIG_BINFMT_ELF_FDPIC=y
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
CONFIG_BINFMT_SHARED_FLAT=y
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
#
# Power management options
#
# CONFIG_PM is not set
-# CONFIG_PM_WAKEUP_BY_GPIO is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
# CPU Frequency scaling
#
# CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
@@ -347,10 +389,6 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -369,15 +407,13 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_LRO=y
# CONFIG_INET_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
# CONFIG_NETLABEL is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
@@ -386,6 +422,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@@ -395,29 +432,23 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
+# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -426,20 +457,22 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
-# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -452,12 +485,15 @@ CONFIG_MTD_BLOCK=y
# CONFIG_INFTL is not set
# CONFIG_RFD_FTL is not set
# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
#
# RAM/ROM/Flash chip drivers
#
-# CONFIG_MTD_CFI is not set
+CONFIG_MTD_CFI=y
# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
CONFIG_MTD_MAP_BANK_WIDTH_4=y
@@ -468,20 +504,29 @@ CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_PSD4256G is not set
+CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=y
-# CONFIG_MTD_ROM is not set
+CONFIG_MTD_ROM=m
# CONFIG_MTD_ABSENT is not set
#
# Mapping drivers for chip access
#
-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-CONFIG_MTD_UCLINUX=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_GPIO_ADDR=y
+# CONFIG_MTD_UCLINUX is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -497,36 +542,36 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_ONENAND is not set
#
-# UBI - Unsorted block images
+# LPDDR flash memory drivers
#
-# CONFIG_MTD_UBI is not set
+# CONFIG_MTD_LPDDR is not set
#
-# Parallel port support
+# UBI - Unsorted block images
#
+# CONFIG_MTD_UBI is not set
# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
#
-# Misc devices
+# EEPROM support
#
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
@@ -534,35 +579,20 @@ CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
#
# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
# CONFIG_MD is not set
-
-#
-# Network device support
-#
CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
-# CONFIG_PHYLIB is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_SMC91X=y
-# CONFIG_BFIN_MAC is not set
-# CONFIG_SMSC911X is not set
-# CONFIG_DM9000 is not set
+# CONFIG_VETH is not set
+# CONFIG_NET_ETHERNET is not set
# CONFIG_NETDEV_1000 is not set
-# CONFIG_AX88180 is not set
# CONFIG_NETDEV_10000 is not set
#
@@ -570,22 +600,17 @@ CONFIG_SMC91X=y
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
# CONFIG_PHONE is not set
#
@@ -602,16 +627,15 @@ CONFIG_SMC91X=y
#
# Character devices
#
-# CONFIG_AD9960 is not set
-# CONFIG_SPI_ADC_BF533 is not set
-# CONFIG_BF5xx_PFLAGS is not set
-# CONFIG_BF5xx_PPIFCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
+# CONFIG_BFIN_PPI is not set
+# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
-# CONFIG_BF5xx_PPI is not set
+# CONFIG_BFIN_SPI_ADC is not set
CONFIG_BFIN_SPORT=y
-# CONFIG_BFIN_TIMER_LATENCY is not set
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -622,6 +646,7 @@ CONFIG_BFIN_SPORT=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
CONFIG_SERIAL_BFIN_DMA=y
@@ -634,165 +659,201 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
#
# CAN, the car bus and industrial fieldbus
#
# CONFIG_CAN4LINUX is not set
-
-#
-# IPMI
-#
# CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
#
-# TPM devices
+# SPI Master Controller Drivers
#
-# CONFIG_TCG_TPM is not set
-# CONFIG_I2C is not set
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BFIN_SPORT is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
#
-# SPI support
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
#
-# Dallas's 1-wire bus
+# PCI GPIO expanders:
#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
-# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ADCXX is not set
# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SHT15 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
#
# Multimedia devices
#
+
+#
+# Multimedia core support
+#
# CONFIG_VIDEO_DEV is not set
# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_VIDEO_MEDIA is not set
#
-# Graphics support
+# Multimedia drivers
#
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_DAB is not set
#
-# Display device support
+# Graphics support
#
-# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
-# Sound
+# Display device support
#
+# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_SOUND is not set
-
-#
-# USB support
-#
+CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB is not set
-# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
-#
-
-#
-# USB Gadget Support
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
CONFIG_USB_GADGET_NET2272=y
CONFIG_USB_NET2272=y
# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA2XX is not set
# CONFIG_USB_GADGET_GOKU is not set
-# CONFIG_USB_GADGET_LH7A40X is not set
-# CONFIG_USB_GADGET_OMAP is not set
-# CONFIG_USB_GADGET_AT91 is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_ZERO is not set
-# CONFIG_USB_ETH is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=y
+CONFIG_USB_ETH_RNDIS=y
# CONFIG_USB_GADGETFS is not set
# CONFIG_USB_FILE_STORAGE is not set
# CONFIG_USB_G_SERIAL is not set
# CONFIG_USB_MIDI_GADGET is not set
-# CONFIG_MMC is not set
-
-#
-# LED devices
-#
-# CONFIG_NEW_LEDS is not set
-
-#
-# LED drivers
-#
-
-#
-# LED Triggers
-#
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
#
-# InfiniBand support
+# OTG and related infrastructure
#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+# MMC/SD/SDIO Card Drivers
#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# Real Time Clock
+# MMC/SD/SDIO Host Controller Drivers
#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_SPI=m
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
# CONFIG_RTC_CLASS is not set
-
-#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
-
-#
-# PBX support
-#
-# CONFIG_PBX is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
#
# File systems
@@ -802,25 +863,29 @@ CONFIG_EXT2_FS_XATTR=y
# CONFIG_EXT2_FS_POSIX_ACL is not set
# CONFIG_EXT2_FS_SECURITY is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -829,8 +894,11 @@ CONFIG_INOTIFY_USER=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
@@ -841,12 +909,8 @@ CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -854,18 +918,29 @@ CONFIG_RAMFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_YAFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
@@ -873,41 +948,94 @@ CONFIG_RAMFS=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
#
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
-# CONFIG_NLS is not set
-
-#
-# Distributed Lock Manager
-#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
# CONFIG_DLM is not set
#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_DEBUG_VERBOSE=y
CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_DOUBLEFAULT is not set
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -916,34 +1044,40 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
-# CONFIG_EARLY_PRINTK is not set
+CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
#
# CONFIG_KEYS is not set
CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
-CONFIG_SECURITY_CAPABILITIES=y
-
-#
-# Cryptographic options
-#
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITY_TOMOYO is not set
# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
-# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=y
CONFIG_CRC32=y
+CONFIG_CRC7=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
-CONFIG_PLIST=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index acca4e51a45..7f579cf5112 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -1,14 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.4
+# Linux kernel version: 2.6.30.5
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
@@ -16,6 +16,9 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -26,79 +29,100 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
-CONFIG_UID16=y
+# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
+CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=3
-# CONFIG_NP2 is not set
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_AS is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
-# CONFIG_PREEMPT_NONE is not set
-CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
#
# Blackfin Processor Options
@@ -107,6 +131,10 @@ CONFIG_PREEMPT_VOLUNTARY=y
#
# Processor and Board Settings
#
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
# CONFIG_BF522 is not set
# CONFIG_BF523 is not set
# CONFIG_BF524 is not set
@@ -119,19 +147,29 @@ CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_BF534 is not set
# CONFIG_BF536 is not set
# CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
# CONFIG_BF542 is not set
+# CONFIG_BF542M is not set
# CONFIG_BF544 is not set
+# CONFIG_BF544M is not set
# CONFIG_BF547 is not set
-CONFIG_BF548=y
+# CONFIG_BF547M is not set
+CONFIG_BF548_std=y
+# CONFIG_BF548M is not set
# CONFIG_BF549 is not set
+# CONFIG_BF549M is not set
# CONFIG_BF561 is not set
+CONFIG_BF_REV_MIN=0
+CONFIG_BF_REV_MAX=2
# CONFIG_BF_REV_0_0 is not set
# CONFIG_BF_REV_0_1 is not set
-CONFIG_BF_REV_0_2=y
+# CONFIG_BF_REV_0_2 is not set
# CONFIG_BF_REV_0_3 is not set
# CONFIG_BF_REV_0_4 is not set
# CONFIG_BF_REV_0_5 is not set
-# CONFIG_BF_REV_ANY is not set
+# CONFIG_BF_REV_0_6 is not set
+CONFIG_BF_REV_ANY=y
# CONFIG_BF_REV_NONE is not set
CONFIG_BF54x=y
CONFIG_IRQ_PLL_WAKEUP=7
@@ -140,15 +178,12 @@ CONFIG_IRQ_SPORT0_RX=9
CONFIG_IRQ_SPORT0_TX=9
CONFIG_IRQ_SPORT1_RX=9
CONFIG_IRQ_SPORT1_TX=9
+CONFIG_IRQ_SPI0=10
CONFIG_IRQ_UART0_RX=10
CONFIG_IRQ_UART0_TX=10
CONFIG_IRQ_UART1_RX=10
CONFIG_IRQ_UART1_TX=10
CONFIG_IRQ_CNT=8
-CONFIG_IRQ_USB_INT0=11
-CONFIG_IRQ_USB_INT1=11
-CONFIG_IRQ_USB_INT2=11
-CONFIG_IRQ_USB_DMA=11
CONFIG_IRQ_TIMER0=11
CONFIG_IRQ_TIMER1=11
CONFIG_IRQ_TIMER2=11
@@ -157,9 +192,21 @@ CONFIG_IRQ_TIMER4=11
CONFIG_IRQ_TIMER5=11
CONFIG_IRQ_TIMER6=11
CONFIG_IRQ_TIMER7=11
+CONFIG_IRQ_USB_INT0=11
+CONFIG_IRQ_USB_INT1=11
+CONFIG_IRQ_USB_INT2=11
+CONFIG_IRQ_USB_DMA=11
CONFIG_IRQ_TIMER8=11
CONFIG_IRQ_TIMER9=11
CONFIG_IRQ_TIMER10=11
+CONFIG_IRQ_SPORT2_RX=9
+CONFIG_IRQ_SPORT2_TX=9
+CONFIG_IRQ_SPORT3_RX=9
+CONFIG_IRQ_SPORT3_TX=9
+CONFIG_IRQ_SPI1=10
+CONFIG_IRQ_SPI2=10
+CONFIG_IRQ_TWI0=11
+CONFIG_IRQ_TWI1=11
# CONFIG_BFIN548_EZKIT is not set
CONFIG_BFIN548_BLUETECHNIX_CM=y
@@ -167,6 +214,7 @@ CONFIG_BFIN548_BLUETECHNIX_CM=y
# BF548 Specific Configuration
#
# CONFIG_DEB_DMA_URGENT is not set
+# CONFIG_BF548_ATAPI_ALTERNATIVE_PORT is not set
#
# Interrupt Priority Assignment
@@ -182,7 +230,6 @@ CONFIG_IRQ_SPORT1_ERR=7
CONFIG_IRQ_SPI0_ERR=7
CONFIG_IRQ_UART0_ERR=7
CONFIG_IRQ_EPPI0=8
-CONFIG_IRQ_SPI0=10
CONFIG_IRQ_PINT0=12
CONFIG_IRQ_PINT1=12
CONFIG_IRQ_MDMAS0=13
@@ -197,18 +244,10 @@ CONFIG_IRQ_SPI2_ERR=7
CONFIG_IRQ_UART1_ERR=7
CONFIG_IRQ_UART2_ERR=7
CONFIG_IRQ_CAN0_ERR=7
-CONFIG_IRQ_SPORT2_RX=9
-CONFIG_IRQ_SPORT2_TX=9
-CONFIG_IRQ_SPORT3_RX=9
-CONFIG_IRQ_SPORT3_TX=9
CONFIG_IRQ_EPPI1=9
CONFIG_IRQ_EPPI2=9
-CONFIG_IRQ_SPI1=10
-CONFIG_IRQ_SPI2=10
CONFIG_IRQ_ATAPI_RX=10
CONFIG_IRQ_ATAPI_TX=10
-CONFIG_IRQ_TWI0=11
-CONFIG_IRQ_TWI1=11
CONFIG_IRQ_CAN0_RX=11
CONFIG_IRQ_CAN0_TX=11
CONFIG_IRQ_MDMAS2=13
@@ -255,6 +294,7 @@ CONFIG_PINT3_ASSIGN=0x02020303
# Board customizations
#
# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
#
# Clock/PLL Setup
@@ -274,16 +314,12 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
# CONFIG_GENERIC_TIME is not set
-# CONFIG_TICK_ONESHOT is not set
#
-# Memory Setup
+# Misc
#
-CONFIG_MAX_MEM_SIZE=64
-# CONFIG_MEM_MT46V32M16_6T is not set
-CONFIG_MEM_MT46V32M16_5B=y
-CONFIG_BOOT_LOAD=0x1000
CONFIG_BFIN_SCRATCH_REG_RETN=y
# CONFIG_BFIN_SCRATCH_REG_RETE is not set
# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
@@ -310,6 +346,12 @@ CONFIG_ACCESS_OK_L1=y
CONFIG_CACHELINE_ALIGNED_L1=y
# CONFIG_SYSCALL_TAB_L1 is not set
# CONFIG_CPLB_SWITCH_TAB_L1 is not set
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
CONFIG_RAMKERNEL=y
# CONFIG_ROMKERNEL is not set
CONFIG_SELECT_MEMORY_MODEL=y
@@ -318,13 +360,16 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
# CONFIG_BFIN_GPTIMERS is not set
+# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
CONFIG_DMA_UNCACHED_1M=y
# CONFIG_DMA_UNCACHED_NONE is not set
@@ -333,14 +378,13 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+# CONFIG_BFIN_L2_ICACHEABLE is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
-CONFIG_BFIN_EXTMEM_WRITEBACK=y
-# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
-# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_EXTMEM_WRITEBACK
+CONFIG_BFIN_EXTMEM_WRITETHROUGH=y
# CONFIG_BFIN_L2_DCACHEABLE is not set
#
@@ -349,7 +393,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -369,7 +413,7 @@ CONFIG_C_AMBEN_ALL=y
CONFIG_BANK_0=0x7BB0
CONFIG_BANK_1=0x5554
CONFIG_BANK_2=0x7BB0
-CONFIG_BANK_3=0x99B2
+CONFIG_BANK_3=0x99B3
CONFIG_EBIU_MBSCTLVAL=0x0
CONFIG_EBIU_MODEVAL=0x1
CONFIG_EBIU_FCTLVAL=0x6
@@ -377,7 +421,6 @@ CONFIG_EBIU_FCTLVAL=0x6
#
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
#
-# CONFIG_PCI is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
# CONFIG_PCCARD is not set
@@ -388,23 +431,19 @@ CONFIG_BINFMT_ELF_FDPIC=y
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
#
# Power management options
#
# CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
-# CONFIG_PM_WAKEUP_BY_GPIO is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
# CPU Frequency scaling
#
# CONFIG_CPU_FREQ is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
@@ -417,6 +456,7 @@ CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -435,19 +475,16 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
# CONFIG_NETLABEL is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
@@ -456,6 +493,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@@ -465,24 +503,21 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -501,10 +536,12 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -539,6 +576,7 @@ CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
# CONFIG_MTD_CFI_AMDSTD is not set
# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_PSD4256G is not set
CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=y
# CONFIG_MTD_ROM is not set
@@ -549,9 +587,8 @@ CONFIG_MTD_RAM=y
#
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_START=0x20000000
-CONFIG_MTD_PHYSMAP_LEN=0
-CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_GPIO_ADDR is not set
# CONFIG_MTD_UCLINUX is not set
# CONFIG_MTD_PLATRAM is not set
@@ -575,6 +612,11 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=2
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -587,31 +629,31 @@ CONFIG_BLK_DEV=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
-CONFIG_MISC_DEVICES=y
-# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
# SCSI device support
#
# CONFIG_RAID_ATTRS is not set
-CONFIG_SCSI=y
+CONFIG_SCSI=m
CONFIG_SCSI_DMA=y
# CONFIG_SCSI_TGT is not set
# CONFIG_SCSI_NETLINK is not set
-# CONFIG_SCSI_PROC_FS is not set
+CONFIG_SCSI_PROC_FS=y
#
# SCSI support type (disk, tape, CD-ROM)
#
-CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SD=m
# CONFIG_CHR_DEV_ST is not set
# CONFIG_CHR_DEV_OSST is not set
-CONFIG_BLK_DEV_SR=y
-# CONFIG_BLK_DEV_SR_VENDOR is not set
+# CONFIG_BLK_DEV_SR is not set
# CONFIG_CHR_DEV_SG is not set
# CONFIG_CHR_DEV_SCH is not set
@@ -632,29 +674,54 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_LIBSAS is not set
# CONFIG_SCSI_SRP_ATTRS is not set
-CONFIG_SCSI_LOWLEVEL=y
-# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
+CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_VETH is not set
-# CONFIG_PHYLIB is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_SMC91X is not set
-CONFIG_SMSC911X=y
# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+CONFIG_SMSC911X=y
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
@@ -666,6 +733,10 @@ CONFIG_SMSC911X=y
# CONFIG_WLAN_80211 is not set
#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
# USB Network Adapters
#
# CONFIG_USB_CATC is not set
@@ -676,7 +747,6 @@ CONFIG_SMSC911X=y
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
@@ -711,6 +781,7 @@ CONFIG_INPUT_KEYBOARD=y
# CONFIG_KEYBOARD_GPIO is not set
# CONFIG_KEYBOARD_BFIN is not set
# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -726,19 +797,16 @@ CONFIG_INPUT_KEYBOARD=y
#
# Character devices
#
-# CONFIG_AD9960 is not set
-# CONFIG_SPI_ADC_BF533 is not set
-# CONFIG_BF5xx_PPIFCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
+# CONFIG_BFIN_PPI is not set
+# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
-# CONFIG_BF5xx_PPI is not set
-CONFIG_BFIN_OTP=y
-# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
+# CONFIG_BFIN_SPI_ADC is not set
# CONFIG_BFIN_SPORT is not set
-# CONFIG_BFIN_TIMER_LATENCY is not set
-# CONFIG_TWI_LCD is not set
-# CONFIG_SIMPLE_GPIO is not set
+# CONFIG_BFIN_TWI_LCD is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
+CONFIG_DEVKMEM=y
+# CONFIG_BFIN_JTAG_COMM is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -749,10 +817,11 @@ CONFIG_BFIN_OTP=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
-CONFIG_SERIAL_BFIN_DMA=y
-# CONFIG_SERIAL_BFIN_PIO is not set
+# CONFIG_SERIAL_BFIN_DMA is not set
+CONFIG_SERIAL_BFIN_PIO=y
# CONFIG_SERIAL_BFIN_UART0 is not set
CONFIG_SERIAL_BFIN_UART1=y
# CONFIG_BFIN_UART1_CTSRTS is not set
@@ -762,7 +831,10 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
+CONFIG_BFIN_OTP=y
+# CONFIG_BFIN_OTP_WRITE_ENABLE is not set
#
# CAN, the car bus and industrial fieldbus
@@ -770,61 +842,53 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_CAN4LINUX is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
#
-# I2C Algorithms
+# I2C Hardware Bus support
#
-# CONFIG_I2C_ALGOBIT is not set
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
#
-# I2C Hardware Bus support
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
CONFIG_I2C_BLACKFIN_TWI=y
-CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
+CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=50
# CONFIG_I2C_GPIO is not set
# CONFIG_I2C_OCORES is not set
-# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
-# CONFIG_I2C_STUB is not set
# CONFIG_I2C_TINY_USB is not set
#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
# Miscellaneous I2C Chip support
#
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_AD5252 is not set
-# CONFIG_EEPROM_LEGACY is not set
# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_SENSORS_PCF8575 is not set
-# CONFIG_SENSORS_PCA9543 is not set
+# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
-# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
-
-CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_SYSFS=y
-
-#
-# SPI support
-#
CONFIG_SPI=y
CONFIG_SPI_MASTER=y
@@ -832,64 +896,23 @@ CONFIG_SPI_MASTER=y
# SPI Master Controller Drivers
#
CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BFIN_SPORT is not set
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
-CONFIG_HWMON=y
-# CONFIG_HWMON_VID is not set
-# CONFIG_SENSORS_AD7418 is not set
-# CONFIG_SENSORS_ADM1021 is not set
-# CONFIG_SENSORS_ADM1025 is not set
-# CONFIG_SENSORS_ADM1026 is not set
-# CONFIG_SENSORS_ADM1029 is not set
-# CONFIG_SENSORS_ADM1031 is not set
-# CONFIG_SENSORS_ADM9240 is not set
-# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ATXP1 is not set
-# CONFIG_SENSORS_DS1621 is not set
-# CONFIG_SENSORS_F71805F is not set
-# CONFIG_SENSORS_F71882FG is not set
-# CONFIG_SENSORS_F75375S is not set
-# CONFIG_SENSORS_GL518SM is not set
-# CONFIG_SENSORS_GL520SM is not set
-# CONFIG_SENSORS_IT87 is not set
-# CONFIG_SENSORS_LM63 is not set
-# CONFIG_SENSORS_LM70 is not set
-# CONFIG_SENSORS_LM75 is not set
-# CONFIG_SENSORS_LM77 is not set
-# CONFIG_SENSORS_LM78 is not set
-# CONFIG_SENSORS_LM80 is not set
-# CONFIG_SENSORS_LM83 is not set
-# CONFIG_SENSORS_LM85 is not set
-# CONFIG_SENSORS_LM87 is not set
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_LM92 is not set
-# CONFIG_SENSORS_LM93 is not set
-# CONFIG_SENSORS_MAX1619 is not set
-# CONFIG_SENSORS_MAX6650 is not set
-# CONFIG_SENSORS_PC87360 is not set
-# CONFIG_SENSORS_PC87427 is not set
-# CONFIG_SENSORS_DME1737 is not set
-# CONFIG_SENSORS_SMSC47M1 is not set
-# CONFIG_SENSORS_SMSC47M192 is not set
-# CONFIG_SENSORS_SMSC47B397 is not set
-# CONFIG_SENSORS_THMC50 is not set
-# CONFIG_SENSORS_VT1211 is not set
-# CONFIG_SENSORS_W83781D is not set
-# CONFIG_SENSORS_W83791D is not set
-# CONFIG_SENSORS_W83792D is not set
-# CONFIG_SENSORS_W83793 is not set
-# CONFIG_SENSORS_W83L785TS is not set
-# CONFIG_SENSORS_W83627HF is not set
-# CONFIG_SENSORS_W83627EHF is not set
-# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -903,25 +926,43 @@ CONFIG_BFIN_WDT=y
# USB-based Watchdog Cards
#
# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_REGULATOR is not set
#
# Multimedia devices
#
+
+#
+# Multimedia core support
+#
# CONFIG_VIDEO_DEV is not set
# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
# CONFIG_DAB is not set
-# CONFIG_USB_DABUSB is not set
#
# Graphics support
@@ -935,80 +976,75 @@ CONFIG_SSB_POSSIBLE=y
# Display device support
#
# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
# CONFIG_SOUND is not set
-CONFIG_HID_SUPPORT=y
-CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
-# CONFIG_HIDRAW is not set
-
-#
-# USB Input Devices
-#
-CONFIG_USB_HID=y
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
-# CONFIG_USB_HIDDEV is not set
+# CONFIG_HID_SUPPORT is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
-CONFIG_USB=y
+CONFIG_USB=m
# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
#
# Miscellaneous USB options
#
-# CONFIG_USB_DEVICEFS is not set
-CONFIG_USB_DEVICE_CLASS=y
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=m
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
#
# USB Host Controller Drivers
#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
-# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
-CONFIG_USB_MUSB_HDRC=y
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=m
CONFIG_USB_MUSB_SOC=y
#
-# Blackfin BF54x, BF525 and BF527 high speed USB support
+# Blackfin high speed USB Support
#
-CONFIG_USB_MUSB_HOST=y
-# CONFIG_USB_MUSB_PERIPHERAL is not set
+# CONFIG_USB_MUSB_HOST is not set
+CONFIG_USB_MUSB_PERIPHERAL=y
# CONFIG_USB_MUSB_OTG is not set
-CONFIG_USB_MUSB_HDRC_HCD=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
# CONFIG_MUSB_PIO_ONLY is not set
-# CONFIG_USB_INVENTRA_DMA is not set
+CONFIG_USB_INVENTRA_DMA=y
# CONFIG_USB_TI_CPPI_DMA is not set
-CONFIG_USB_MUSB_LOGLEVEL=0
+# CONFIG_USB_MUSB_DEBUG is not set
#
# USB Device Class drivers
#
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# also be needed; see USB_STORAGE Help for more info
#
-CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
-# CONFIG_USB_STORAGE_DPCM is not set
# CONFIG_USB_STORAGE_USBAT is not set
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_SDDR55 is not set
@@ -1016,6 +1052,7 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_ALAUDA is not set
# CONFIG_USB_STORAGE_ONETOUCH is not set
# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
# CONFIG_USB_LIBUSUAL is not set
#
@@ -1023,15 +1060,10 @@ CONFIG_USB_STORAGE=y
#
# CONFIG_USB_MDC800 is not set
# CONFIG_USB_MICROTEK is not set
-CONFIG_USB_MON=y
#
# USB port drivers
#
-
-#
-# USB Serial Converter support
-#
# CONFIG_USB_SERIAL is not set
#
@@ -1040,7 +1072,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_EMI62 is not set
# CONFIG_USB_EMI26 is not set
# CONFIG_USB_ADUTUX is not set
-# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_SEVSEG is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
@@ -1048,7 +1080,6 @@ CONFIG_USB_MON=y
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGET is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_FTDI_ELAN is not set
# CONFIG_USB_APPLEDISPLAY is not set
@@ -1056,38 +1087,75 @@ CONFIG_USB_MON=y
# CONFIG_USB_LD is not set
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
-
-#
-# USB DSL modem support
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-CONFIG_MMC=y
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2272 is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+CONFIG_USB_ZERO=m
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+# CONFIG_USB_ETH_RNDIS is not set
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+CONFIG_USB_G_PRINTER=m
+# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=m
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# MMC/SD Card Drivers
+# MMC/SD/SDIO Card Drivers
#
-CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK=m
CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# MMC/SD Host Controller Drivers
+# MMC/SD/SDIO Host Controller Drivers
#
-CONFIG_SDH_BFIN=y
+# CONFIG_MMC_SDHCI is not set
+CONFIG_SDH_BFIN=m
+# CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND is not set
+# CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ is not set
# CONFIG_MMC_SPI is not set
-# CONFIG_SPI_MMC is not set
+# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
-CONFIG_RTC_LIB=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_HCTOSYS=y
-CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
-# CONFIG_RTC_DEBUG is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=m
+CONFIG_RTC_CLASS=m
#
# RTC interfaces
@@ -1111,66 +1179,74 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
#
# SPI RTC drivers
#
-# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
#
# Platform RTC drivers
#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
# CONFIG_RTC_DRV_DS1553 is not set
-# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
# on-CPU RTC drivers
#
-CONFIG_RTC_DRV_BFIN=y
-
-#
-# Userspace I/O
-#
+CONFIG_RTC_DRV_BFIN=m
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
-
-#
-# PBX support
-#
-# CONFIG_PBX is not set
+# CONFIG_STAGING is not set
#
# File systems
#
-# CONFIG_EXT2_FS is not set
+CONFIG_EXT2_FS=m
+# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_INOTIFY=y
-CONFIG_INOTIFY_USER=y
-# CONFIG_QUOTA is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
+# CONFIG_ISO9660_FS is not set
# CONFIG_UDF_FS is not set
#
@@ -1194,10 +1270,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -1205,17 +1278,7 @@ CONFIG_SYSFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-CONFIG_YAFFS_FS=m
-CONFIG_YAFFS_YAFFS1=y
-# CONFIG_YAFFS_DOES_ECC is not set
-CONFIG_YAFFS_YAFFS2=y
-CONFIG_YAFFS_AUTO_YAFFS2=y
-# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
-CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=10
-# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
-# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set
-CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
-CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_WRITEBUFFER=y
# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
@@ -1227,34 +1290,30 @@ CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-# CONFIG_NFSD_V3_ACL is not set
-# CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
+# CONFIG_NFSD is not set
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
-# CONFIG_SUNRPC_BIND34 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_SMB_NLS_REMOTE="cp437"
-CONFIG_CIFS=y
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=m
# CONFIG_CIFS_STATS is not set
# CONFIG_CIFS_WEAK_PW_HASH is not set
# CONFIG_CIFS_XATTR is not set
@@ -1267,24 +1326,8 @@ CONFIG_CIFS=y
#
# Partition Types
#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
+# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_KARMA_PARTITION is not set
-# CONFIG_EFI_PARTITION is not set
-# CONFIG_SYSV68_PARTITION is not set
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
CONFIG_NLS_CODEPAGE_437=m
@@ -1326,9 +1369,6 @@ CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
# CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
#
# Kernel hacking
@@ -1336,14 +1376,39 @@ CONFIG_INSTRUMENTATION=y
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
-CONFIG_DEBUG_MMRS=y
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_DEBUG_VERBOSE=y
+# CONFIG_DEBUG_MMRS is not set
+# CONFIG_DEBUG_DOUBLEFAULT is not set
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -1352,33 +1417,125 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
-# CONFIG_EARLY_PRINTK is not set
+CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
#
# CONFIG_KEYS is not set
CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
-# CONFIG_SECURITY_CAPABILITIES is not set
-# CONFIG_SECURITY_ROOTPLUG is not set
-# CONFIG_CRYPTO is not set
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITY_TOMOYO is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=m
-CONFIG_PLIST=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index bae4ee6e68b..a6df01dac98 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -1,15 +1,14 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24.4
-# Tue Apr 1 10:50:11 2008
+# Linux kernel version: 2.6.30.5
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
-CONFIG_SEMAPHORE_SLEEPERS=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
@@ -17,6 +16,9 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -27,62 +29,83 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
-CONFIG_FAIR_GROUP_SCHED=y
-CONFIG_FAIR_USER_SCHED=y
-# CONFIG_FAIR_CGROUP_SCHED is not set
-# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
-# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
-CONFIG_SIGNALFD=y
-CONFIG_EVENTFD=y
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=3
-# CONFIG_NP2 is not set
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
@@ -99,6 +122,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
+# CONFIG_FREEZER is not set
#
# Blackfin Processor Options
@@ -107,6 +131,10 @@ CONFIG_PREEMPT_NONE=y
#
# Processor and Board Settings
#
+# CONFIG_BF512 is not set
+# CONFIG_BF514 is not set
+# CONFIG_BF516 is not set
+# CONFIG_BF518 is not set
# CONFIG_BF522 is not set
# CONFIG_BF523 is not set
# CONFIG_BF524 is not set
@@ -119,30 +147,47 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_BF534 is not set
# CONFIG_BF536 is not set
# CONFIG_BF537 is not set
+# CONFIG_BF538 is not set
+# CONFIG_BF539 is not set
# CONFIG_BF542 is not set
+# CONFIG_BF542M is not set
# CONFIG_BF544 is not set
+# CONFIG_BF544M is not set
# CONFIG_BF547 is not set
+# CONFIG_BF547M is not set
# CONFIG_BF548 is not set
+# CONFIG_BF548M is not set
# CONFIG_BF549 is not set
+# CONFIG_BF549M is not set
CONFIG_BF561=y
+# CONFIG_SMP is not set
+CONFIG_BF_REV_MIN=3
+CONFIG_BF_REV_MAX=5
# CONFIG_BF_REV_0_0 is not set
# CONFIG_BF_REV_0_1 is not set
# CONFIG_BF_REV_0_2 is not set
CONFIG_BF_REV_0_3=y
# CONFIG_BF_REV_0_4 is not set
# CONFIG_BF_REV_0_5 is not set
+# CONFIG_BF_REV_0_6 is not set
# CONFIG_BF_REV_ANY is not set
# CONFIG_BF_REV_NONE is not set
-CONFIG_BFIN_DUAL_CORE=y
CONFIG_MEM_MT48LC8M32B2B5_7=y
CONFIG_IRQ_PLL_WAKEUP=7
CONFIG_IRQ_SPORT0_ERROR=7
CONFIG_IRQ_SPORT1_ERROR=7
+CONFIG_IRQ_TIMER0=10
+CONFIG_IRQ_TIMER1=10
+CONFIG_IRQ_TIMER2=10
+CONFIG_IRQ_TIMER3=10
+CONFIG_IRQ_TIMER4=10
+CONFIG_IRQ_TIMER5=10
+CONFIG_IRQ_TIMER6=10
+CONFIG_IRQ_TIMER7=10
CONFIG_IRQ_SPI_ERROR=7
# CONFIG_BFIN561_EZKIT is not set
# CONFIG_BFIN561_TEPLA is not set
CONFIG_BFIN561_BLUETECHNIX_CM=y
-# CONFIG_GENERIC_BF561_BOARD is not set
#
# BF561 Specific Configuration
@@ -151,12 +196,7 @@ CONFIG_BFIN561_BLUETECHNIX_CM=y
#
# Core B Support
#
-
-#
-# Core B Support
-#
CONFIG_BF561_COREB=y
-# CONFIG_BF561_COREB_RESET is not set
#
# Interrupt Priority Assignment
@@ -196,14 +236,6 @@ CONFIG_IRQ_DMA2_8=9
CONFIG_IRQ_DMA2_9=9
CONFIG_IRQ_DMA2_10=9
CONFIG_IRQ_DMA2_11=9
-CONFIG_IRQ_TIMER0=10
-CONFIG_IRQ_TIMER1=10
-CONFIG_IRQ_TIMER2=10
-CONFIG_IRQ_TIMER3=10
-CONFIG_IRQ_TIMER4=10
-CONFIG_IRQ_TIMER5=10
-CONFIG_IRQ_TIMER6=10
-CONFIG_IRQ_TIMER7=10
CONFIG_IRQ_TIMER8=10
CONFIG_IRQ_TIMER9=10
CONFIG_IRQ_TIMER10=10
@@ -226,6 +258,7 @@ CONFIG_IRQ_WDTIMER=13
# Board customizations
#
# CONFIG_CMDLINE_BOOL is not set
+CONFIG_BOOT_LOAD=0x1000
#
# Clock/PLL Setup
@@ -245,19 +278,20 @@ CONFIG_HZ_250=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_TICKSOURCE_GPTMR0 is not set
+CONFIG_TICKSOURCE_CORETMR=y
# CONFIG_CYCLES_CLOCKSOURCE is not set
-# CONFIG_TICK_ONESHOT is not set
+# CONFIG_GPTMR0_CLOCKSOURCE is not set
# CONFIG_NO_HZ is not set
# CONFIG_HIGH_RES_TIMERS is not set
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
#
-# Memory Setup
+# Misc
#
-CONFIG_MAX_MEM_SIZE=32
-CONFIG_BOOT_LOAD=0x1000
CONFIG_BFIN_SCRATCH_REG_RETN=y
# CONFIG_BFIN_SCRATCH_REG_RETE is not set
# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set
@@ -284,6 +318,12 @@ CONFIG_IP_CHECKSUM_L1=y
CONFIG_CACHELINE_ALIGNED_L1=y
CONFIG_SYSCALL_TAB_L1=y
CONFIG_CPLB_SWITCH_TAB_L1=y
+CONFIG_APP_STACK_L1=y
+
+#
+# Speed Optimizations
+#
+CONFIG_BFIN_INS_LOWOVERHEAD=y
CONFIG_RAMKERNEL=y
# CONFIG_ROMKERNEL is not set
CONFIG_SELECT_MEMORY_MODEL=y
@@ -292,14 +332,16 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
-CONFIG_LARGE_ALLOCS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
# CONFIG_BFIN_GPTIMERS is not set
+# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
CONFIG_DMA_UNCACHED_1M=y
# CONFIG_DMA_UNCACHED_NONE is not set
@@ -308,15 +350,16 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+# CONFIG_BFIN_L2_ICACHEABLE is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
-CONFIG_BFIN_EXTMEM_WRITEBACK=y
-# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
-# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_EXTMEM_WRITEBACK is not set
+CONFIG_BFIN_EXTMEM_WRITETHROUGH=y
# CONFIG_BFIN_L2_DCACHEABLE is not set
+# CONFIG_BFIN_L2_WRITEBACK is not set
+# CONFIG_BFIN_L2_WRITETHROUGH is not set
#
# Memory Protection Unit
@@ -324,7 +367,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -353,8 +396,8 @@ CONFIG_BANK_3=0xFFC2
#
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
#
-# CONFIG_PCI is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
#
# Executable file formats
@@ -363,18 +406,19 @@ CONFIG_BINFMT_ELF_FDPIC=y
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
CONFIG_BINFMT_SHARED_FLAT=y
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
#
# Power management options
#
# CONFIG_PM is not set
-CONFIG_SUSPEND_UP_POSSIBLE=y
-# CONFIG_PM_WAKEUP_BY_GPIO is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
-# Networking
+# CPU Frequency scaling
#
+# CONFIG_CPU_FREQ is not set
CONFIG_NET=y
#
@@ -383,10 +427,6 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-# CONFIG_XFRM_MIGRATE is not set
# CONFIG_NET_KEY is not set
CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set
@@ -407,14 +447,11 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
-CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
# CONFIG_NETLABEL is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set
@@ -423,6 +460,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@@ -432,24 +470,21 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -460,16 +495,22 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
-# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -487,8 +528,10 @@ CONFIG_MTD_BLOCK=y
#
# RAM/ROM/Flash chip drivers
#
-# CONFIG_MTD_CFI is not set
+CONFIG_MTD_CFI=y
# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
CONFIG_MTD_MAP_BANK_WIDTH_4=y
@@ -499,20 +542,29 @@ CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_PSD4256G is not set
+CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=y
-# CONFIG_MTD_ROM is not set
+CONFIG_MTD_ROM=m
# CONFIG_MTD_ABSENT is not set
#
# Mapping drivers for chip access
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-CONFIG_MTD_UCLINUX=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_UCLINUX is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -528,6 +580,11 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -539,14 +596,21 @@ CONFIG_BLK_DEV=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT25 is not set
# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
-# CONFIG_BFIN_IDE_ADDRESS_MAPPING_MODE0 is not set
-# CONFIG_BFIN_IDE_ADDRESS_MAPPING_MODE1 is not set
#
# SCSI device support
@@ -558,26 +622,50 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
+CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_VETH is not set
-# CONFIG_PHYLIB is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
-CONFIG_SMC91X=y
-# CONFIG_SMSC911X is not set
+# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+CONFIG_SMSC911X=m
+# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
# CONFIG_NETDEV_1000 is not set
-# CONFIG_AX88180 is not set
# CONFIG_NETDEV_10000 is not set
#
@@ -585,10 +673,13 @@ CONFIG_SMC91X=y
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
@@ -609,16 +700,15 @@ CONFIG_SMC91X=y
#
# Character devices
#
-# CONFIG_AD9960 is not set
-# CONFIG_SPI_ADC_BF533 is not set
-# CONFIG_BF5xx_PPIFCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
+# CONFIG_BFIN_PPI is not set
+# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
-# CONFIG_BF5xx_PPI is not set
+# CONFIG_BFIN_SPI_ADC is not set
# CONFIG_BFIN_SPORT is not set
-# CONFIG_BFIN_TIMER_LATENCY is not set
-# CONFIG_SIMPLE_GPIO is not set
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
+# CONFIG_BFIN_JTAG_COMM is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -629,6 +719,7 @@ CONFIG_SMC91X=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
CONFIG_SERIAL_BFIN_DMA=y
@@ -639,6 +730,7 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
#
@@ -647,54 +739,100 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_CAN4LINUX is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BFIN=y
+# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BFIN_SPORT is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
#
-# SPI support
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+
#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADCXX is not set
# CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_F71882FG is not set
# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_MAX1111 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SHT15 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
# CONFIG_SENSORS_VT1211 is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
#
# Multimedia devices
#
+
+#
+# Multimedia core support
+#
# CONFIG_VIDEO_DEV is not set
# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
# CONFIG_DAB is not set
#
@@ -709,42 +847,85 @@ CONFIG_SSB_POSSIBLE=y
# Display device support
#
# CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
# CONFIG_SOUND is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB_ARCH_HAS_OHCI is not set
# CONFIG_USB_ARCH_HAS_EHCI is not set
# CONFIG_USB is not set
-
-#
-# Enable Host or Gadget support to see Inventra options
-#
-
-#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-# CONFIG_MMC is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=m
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2272=y
+CONFIG_USB_NET2272=m
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+CONFIG_USB_G_PRINTER=m
+# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_SPI=m
+# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
# CONFIG_RTC_CLASS is not set
-
-#
-# Userspace I/O
-#
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
-
-#
-# PBX support
-#
-# CONFIG_PBX is not set
+# CONFIG_STAGING is not set
#
# File systems
@@ -754,25 +935,29 @@ CONFIG_EXT2_FS_XATTR=y
# CONFIG_EXT2_FS_POSIX_ACL is not set
# CONFIG_EXT2_FS_SECURITY is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -781,8 +966,11 @@ CONFIG_INOTIFY_USER=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
@@ -794,10 +982,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -805,14 +990,28 @@ CONFIG_SYSFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_YAFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
@@ -827,11 +1026,47 @@ CONFIG_NETWORK_FILESYSTEMS=y
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
# CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
#
# Kernel hacking
@@ -839,14 +1074,40 @@ CONFIG_INSTRUMENTATION=y
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_DEBUG_VERBOSE=y
CONFIG_DEBUG_MMRS=y
+# CONFIG_DEBUG_DOUBLEFAULT is not set
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
@@ -855,33 +1116,40 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
-# CONFIG_EARLY_PRINTK is not set
-# CONFIG_DUAL_CORE_TEST_MODULE is not set
+CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
#
# CONFIG_KEYS is not set
CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_NETWORK is not set
-CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY_PATH is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_SECURITY_TOMOYO is not set
# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
-# CONFIG_CRC_ITU_T is not set
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=y
CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
+CONFIG_CRC7=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
-CONFIG_PLIST=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index a6a7c8ede70..bc7fae3d8b8 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -67,6 +67,7 @@ CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=3
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_RT_MUTEXES=y
CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
@@ -249,6 +250,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_LARGE_ALLOCS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=y
# CONFIG_DMA_UNCACHED_2M is not set
CONFIG_DMA_UNCACHED_1M=y
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index 1ec9ae2e964..a7e49d63122 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -68,6 +68,7 @@ CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=3
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_RT_MUTEXES=y
CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
@@ -261,6 +262,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_LARGE_ALLOCS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
# CONFIG_BFIN_GPTIMERS is not set
# CONFIG_DMA_UNCACHED_2M is not set
CONFIG_DMA_UNCACHED_1M=y
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index ff377fae061..67d12768602 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -63,6 +63,7 @@ CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
@@ -285,6 +286,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_BFIN_GPTIMERS=y
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index 814f9cacf40..52bfa6bf18d 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -72,6 +72,7 @@ CONFIG_BIG_ORDER_ALLOC_NOFAIL_MAGIC=3
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_RT_MUTEXES=y
CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
@@ -271,6 +272,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_LARGE_ALLOCS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
CONFIG_DMA_UNCACHED_2M=y
# CONFIG_DMA_UNCACHED_1M is not set
# CONFIG_DMA_UNCACHED_NONE is not set
@@ -700,7 +702,7 @@ CONFIG_INPUT_MISC=y
# CONFIG_INPUT_YEALINK is not set
CONFIG_INPUT_UINPUT=y
# CONFIG_BF53X_PFBUTTONS is not set
-# CONFIG_TWI_KEYPAD is not set
+# CONFIG_INPUT_PCF8574 is not set
#
# Hardware I/O ports
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 375e75a27ab..60adfad54db 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -1,13 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28-rc2
-# Tue Jan 6 09:22:17 2009
+# Linux kernel version: 2.6.30.5
#
# CONFIG_MMU is not set
# CONFIG_FPU is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
CONFIG_BLACKFIN=y
+CONFIG_GENERIC_BUG=y
CONFIG_ZONE_DMA=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
@@ -16,6 +16,9 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_GPIO=y
CONFIG_FORCE_MAX_ZONEORDER=14
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -26,49 +29,72 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CGROUPS is not set
# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
-# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+CONFIG_RD_LZMA=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
# CONFIG_UID16 is not set
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_HOTPLUG is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
# CONFIG_ELF_CORE is not set
-CONFIG_COMPAT_BRK=y
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
# CONFIG_AIO is not set
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
+# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
-CONFIG_RT_MUTEXES=y
-CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
# CONFIG_MODULE_FORCE_LOAD is not set
@@ -76,11 +102,8 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@@ -96,7 +119,6 @@ CONFIG_IOSCHED_CFQ=y
# CONFIG_DEFAULT_CFQ is not set
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
-CONFIG_CLASSIC_RCU=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
@@ -128,10 +150,15 @@ CONFIG_BF537=y
# CONFIG_BF538 is not set
# CONFIG_BF539 is not set
# CONFIG_BF542 is not set
+# CONFIG_BF542M is not set
# CONFIG_BF544 is not set
+# CONFIG_BF544M is not set
# CONFIG_BF547 is not set
+# CONFIG_BF547M is not set
# CONFIG_BF548 is not set
+# CONFIG_BF548M is not set
# CONFIG_BF549 is not set
+# CONFIG_BF549M is not set
# CONFIG_BF561 is not set
CONFIG_BF_REV_MIN=2
CONFIG_BF_REV_MAX=3
@@ -173,11 +200,11 @@ CONFIG_IRQ_MEM_DMA1=13
CONFIG_IRQ_WATCH=13
CONFIG_IRQ_SPI=10
# CONFIG_BFIN537_STAMP is not set
-# CONFIG_BFIN537_BLUETECHNIX_CM is not set
+# CONFIG_BFIN537_BLUETECHNIX_CM_E is not set
+# CONFIG_BFIN537_BLUETECHNIX_CM_U is not set
CONFIG_BFIN537_BLUETECHNIX_TCM=y
# CONFIG_PNAV10 is not set
# CONFIG_CAMSIG_MINOTAUR is not set
-# CONFIG_GENERIC_BF537_BOARD is not set
#
# BF537 Specific Configuration
@@ -223,7 +250,10 @@ CONFIG_HZ=250
# CONFIG_SCHED_HRTICK is not set
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
+# CONFIG_TICKSOURCE_GPTMR0 is not set
+CONFIG_TICKSOURCE_CORETMR=y
# CONFIG_CYCLES_CLOCKSOURCE is not set
+# CONFIG_GPTMR0_CLOCKSOURCE is not set
# CONFIG_NO_HZ is not set
# CONFIG_HIGH_RES_TIMERS is not set
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -273,10 +303,12 @@ CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
# CONFIG_BFIN_GPTIMERS is not set
# CONFIG_DMA_UNCACHED_4M is not set
# CONFIG_DMA_UNCACHED_2M is not set
@@ -287,10 +319,9 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
-# CONFIG_BFIN_ICACHE_LOCK is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_EXTMEM_ICACHEABLE=y
CONFIG_BFIN_EXTMEM_DCACHEABLE=y
CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
@@ -301,7 +332,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y
# CONFIG_MPU is not set
#
-# Asynchonous Memory Configuration
+# Asynchronous Memory Configuration
#
#
@@ -327,6 +358,7 @@ CONFIG_BANK_3=0xFFC2
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
#
# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
#
# Executable file formats
@@ -343,13 +375,83 @@ CONFIG_BINFMT_SHARED_FLAT=y
#
# CONFIG_PM is not set
CONFIG_ARCH_SUSPEND_POSSIBLE=y
-# CONFIG_PM_WAKEUP_BY_GPIO is not set
#
# CPU Frequency scaling
#
# CONFIG_CPU_FREQ is not set
-# CONFIG_NET is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_LRO=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -358,15 +460,21 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
-# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_AR7_PARTS is not set
#
@@ -402,9 +510,10 @@ CONFIG_MTD_CFI_I2=y
CONFIG_MTD_CFI_INTELEXT=y
# CONFIG_MTD_CFI_AMDSTD is not set
# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_PSD4256G is not set
CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_RAM=y
-# CONFIG_MTD_ROM is not set
+CONFIG_MTD_ROM=m
# CONFIG_MTD_ABSENT is not set
#
@@ -413,7 +522,7 @@ CONFIG_MTD_RAM=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
# CONFIG_MTD_PHYSMAP is not set
CONFIG_MTD_GPIO_ADDR=y
-CONFIG_MTD_UCLINUX=y
+# CONFIG_MTD_UCLINUX is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -436,6 +545,11 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_ONENAND is not set
#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
# UBI - Unsorted block images
#
# CONFIG_MTD_UBI is not set
@@ -443,15 +557,23 @@ CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
-# CONFIG_EEPROM_93CX6 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_93CX6 is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -464,6 +586,74 @@ CONFIG_HAVE_IDE=y
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_BFIN_MAC=y
+CONFIG_BFIN_MAC_USE_L1=y
+CONFIG_BFIN_TX_DESC_NUM=10
+CONFIG_BFIN_RX_DESC_NUM=20
+# CONFIG_BFIN_MAC_RMII is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
#
@@ -480,15 +670,12 @@ CONFIG_HAVE_IDE=y
#
# Character devices
#
-# CONFIG_AD9960 is not set
-# CONFIG_SPI_ADC_BF533 is not set
-# CONFIG_BF5xx_PPIFCD is not set
+CONFIG_BFIN_DMA_INTERFACE=m
+# CONFIG_BFIN_PPI is not set
+# CONFIG_BFIN_PPIFCD is not set
# CONFIG_BFIN_SIMPLE_TIMER is not set
-# CONFIG_BF5xx_PPI is not set
+# CONFIG_BFIN_SPI_ADC is not set
CONFIG_BFIN_SPORT=y
-# CONFIG_BFIN_TIMER_LATENCY is not set
-CONFIG_BFIN_DMA_INTERFACE=m
-# CONFIG_SIMPLE_GPIO is not set
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
# CONFIG_BFIN_JTAG_COMM is not set
@@ -502,6 +689,7 @@ CONFIG_BFIN_DMA_INTERFACE=m
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_BFIN=y
CONFIG_SERIAL_BFIN_CONSOLE=y
CONFIG_SERIAL_BFIN_DMA=y
@@ -514,6 +702,7 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_BFIN_SPORT is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
#
@@ -534,39 +723,17 @@ CONFIG_SPI_MASTER=y
#
CONFIG_SPI_BFIN=y
# CONFIG_SPI_BFIN_LOCK is not set
+# CONFIG_SPI_BFIN_SPORT is not set
# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
#
# SPI Protocol Masters
#
-# CONFIG_EEPROM_AT25 is not set
# CONFIG_SPI_SPIDEV is not set
# CONFIG_SPI_TLE62X0 is not set
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
-CONFIG_GPIOLIB=y
-# CONFIG_DEBUG_GPIO is not set
-CONFIG_GPIO_SYSFS=y
-
-#
-# Memory mapped GPIO expanders:
-#
-
-#
-# I2C GPIO expanders:
-#
-# CONFIG_GPIO_MAX732X is not set
-# CONFIG_GPIO_PCA953X is not set
-# CONFIG_GPIO_PCF857X is not set
-
-#
-# PCI GPIO expanders:
-#
-
-#
-# SPI GPIO expanders:
-#
-# CONFIG_GPIO_MAX7301 is not set
-# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIOLIB is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
@@ -580,6 +747,12 @@ CONFIG_WATCHDOG=y
#
# CONFIG_SOFT_WATCHDOG is not set
CONFIG_BFIN_WDT=y
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
#
# Multifunction device drivers
@@ -588,7 +761,7 @@ CONFIG_BFIN_WDT=y
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
-# CONFIG_MFD_WM8400 is not set
+# CONFIG_REGULATOR is not set
#
# Multimedia devices
@@ -598,6 +771,7 @@ CONFIG_BFIN_WDT=y
# Multimedia core support
#
# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
# CONFIG_VIDEO_MEDIA is not set
#
@@ -618,13 +792,81 @@ CONFIG_BFIN_WDT=y
#
# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_SOUND is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_MMC is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+CONFIG_USB_GADGET_NET2272=y
+CONFIG_USB_NET2272=y
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+CONFIG_USB_ETH=y
+CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_SPI=m
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
@@ -641,8 +883,10 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -652,6 +896,11 @@ CONFIG_INOTIFY_USER=y
# CONFIG_FUSE_FS is not set
#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -660,8 +909,11 @@ CONFIG_INOTIFY_USER=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
@@ -673,10 +925,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -684,9 +933,19 @@ CONFIG_SYSFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_YAFFS_FS is not set
-# CONFIG_JFFS2_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
@@ -695,13 +954,62 @@ CONFIG_SYSFS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
#
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
#
# Kernel hacking
@@ -714,12 +1022,30 @@ CONFIG_FRAME_WARN=1024
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_SYSCTL_SYSCALL_CHECK is not set
-# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
CONFIG_DEBUG_VERBOSE=y
@@ -733,9 +1059,10 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF=y
CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0
# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set
# CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE is not set
-# CONFIG_EARLY_PRINTK is not set
+CONFIG_EARLY_PRINTK=y
CONFIG_CPLB_INFO=y
CONFIG_ACCESS_CHECK=y
+# CONFIG_BFIN_ISRAM_SELF_TEST is not set
#
# Security options
@@ -744,20 +1071,110 @@ CONFIG_ACCESS_CHECK=y
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
-# CONFIG_CRYPTO is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+# CONFIG_CRYPTO_MANAGER is not set
+# CONFIG_CRYPTO_MANAGER2 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
# CONFIG_CRC_T10DIF is not set
-# CONFIG_CRC_ITU_T is not set
-# CONFIG_CRC32 is not set
-# CONFIG_CRC7 is not set
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
-CONFIG_PLIST=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_DECOMPRESS_LZMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/blackfin/include/asm/asm-offsets.h b/arch/blackfin/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/blackfin/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index 10064f902d2..e6485c305ea 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -11,9 +11,6 @@
#ifndef __ASSEMBLY__
-#include <asm/sections.h>
-#include <asm/ptrace.h>
-#include <asm/user.h>
#include <linux/linkage.h>
#include <linux/types.h>
@@ -23,6 +20,12 @@
# define DMA_UNCACHED_REGION (2 * 1024 * 1024)
#elif defined(CONFIG_DMA_UNCACHED_1M)
# define DMA_UNCACHED_REGION (1024 * 1024)
+#elif defined(CONFIG_DMA_UNCACHED_512K)
+# define DMA_UNCACHED_REGION (512 * 1024)
+#elif defined(CONFIG_DMA_UNCACHED_256K)
+# define DMA_UNCACHED_REGION (256 * 1024)
+#elif defined(CONFIG_DMA_UNCACHED_128K)
+# define DMA_UNCACHED_REGION (128 * 1024)
#else
# define DMA_UNCACHED_REGION (0)
#endif
@@ -35,6 +38,7 @@ extern unsigned long get_sclk(void);
extern unsigned long sclk_to_usecs(unsigned long sclk);
extern unsigned long usecs_to_sclk(unsigned long usecs);
+struct pt_regs;
extern void dump_bfin_process(struct pt_regs *regs);
extern void dump_bfin_mem(struct pt_regs *regs);
extern void dump_bfin_trace_buffer(void);
diff --git a/arch/blackfin/include/asm/bfin-lq035q1.h b/arch/blackfin/include/asm/bfin-lq035q1.h
new file mode 100644
index 00000000000..57bc21ac229
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin-lq035q1.h
@@ -0,0 +1,28 @@
+/*
+ * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef BFIN_LQ035Q1_H
+#define BFIN_LQ035Q1_H
+
+#define LQ035_RL (0 << 8) /* Right -> Left Scan */
+#define LQ035_LR (1 << 8) /* Left -> Right Scan */
+#define LQ035_TB (1 << 9) /* Top -> Botton Scan */
+#define LQ035_BT (0 << 9) /* Botton -> Top Scan */
+#define LQ035_BGR (1 << 11) /* Use BGR format */
+#define LQ035_RGB (0 << 11) /* Use RGB format */
+#define LQ035_NORM (1 << 13) /* Reversal */
+#define LQ035_REV (0 << 13) /* Reversal */
+
+struct bfin_lq035q1fb_disp_info {
+
+ unsigned mode;
+ /* GPIOs */
+ int use_bl;
+ unsigned gpio_bl;
+};
+
+#endif /* BFIN_LQ035Q1_H */
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
index 6f4548a1355..75f6dc336d4 100644
--- a/arch/blackfin/include/asm/bug.h
+++ b/arch/blackfin/include/asm/bug.h
@@ -47,7 +47,7 @@
#define BUG() \
do { \
_BUG_OR_WARN(0); \
- for (;;); \
+ unreachable(); \
} while (0)
#define WARN_ON(condition) \
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 417eaac7fe9..2666ff8ea95 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -10,6 +10,7 @@
#define _BLACKFIN_CACHEFLUSH_H
#include <asm/blackfin.h> /* for SSYNC() */
+#include <asm/sections.h> /* for _ramend */
extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
diff --git a/arch/blackfin/include/asm/checksum.h b/arch/blackfin/include/asm/checksum.h
index a23415be0de..623cc7fb00b 100644
--- a/arch/blackfin/include/asm/checksum.h
+++ b/arch/blackfin/include/asm/checksum.h
@@ -9,63 +9,12 @@
#define _BFIN_CHECKSUM_H
/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-__wsum csum_partial_copy(const void *src, void *dst,
- int len, __wsum sum);
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err);
-
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy((src), (dst), (len), (sum))
-
-__sum16 ip_fast_csum(unsigned char *iph, unsigned int ihl);
-
-/*
- * Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
- while (sum >> 16)
- sum = (sum & 0xffff) + (sum >> 16);
- return ((~(sum << 16)) >> 16);
-}
-
-/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+__csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
unsigned int carry;
@@ -88,19 +37,8 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
return (sum);
}
+#define csum_tcpudp_nofold __csum_tcpudp_nofold
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
- unsigned short proto, __wsum sum)
-{
- return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
-extern __sum16 ip_compute_csum(const void *buff, int len);
+#include <asm-generic/checksum.h>
-#endif /* _BFIN_CHECKSUM_H */
+#endif
diff --git a/arch/blackfin/include/asm/clocks.h b/arch/blackfin/include/asm/clocks.h
index f80dad5ff25..6f0b61852f5 100644
--- a/arch/blackfin/include/asm/clocks.h
+++ b/arch/blackfin/include/asm/clocks.h
@@ -9,6 +9,8 @@
#ifndef _BFIN_CLOCKS_H
#define _BFIN_CLOCKS_H
+#include <asm/dpmc.h>
+
#ifdef CONFIG_CCLK_DIV_1
# define CONFIG_CCLK_ACT_DIV CCLK_DIV1
# define CONFIG_CCLK_DIV 1
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index 7a23d824ac9..f9172ff30e5 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -7,9 +7,9 @@
#ifndef _BLACKFIN_DMA_MAPPING_H
#define _BLACKFIN_DMA_MAPPING_H
-#include <asm/scatterlist.h>
+#include <asm/cacheflush.h>
+struct scatterlist;
-void dma_alloc_init(unsigned long start, unsigned long end);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
@@ -20,13 +20,51 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
*/
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_supported(d, m) (1)
+#define dma_get_cache_alignment() (32)
+#define dma_is_consistent(d, h) (1)
-static inline
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+static inline int
+dma_set_mask(struct device *dev, u64 dma_mask)
{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
return 0;
}
+static inline int
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+extern void
+__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
+static inline void
+_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ if (!__builtin_constant_p(dir)) {
+ __dma_sync(addr, size, dir);
+ return;
+ }
+
+ switch (dir) {
+ case DMA_NONE:
+ BUG();
+ case DMA_TO_DEVICE: /* writeback only */
+ flush_dcache_range(addr, addr + size);
+ break;
+ case DMA_FROM_DEVICE: /* invalidate only */
+ case DMA_BIDIRECTIONAL: /* flush and invalidate */
+ /* Blackfin has no dedicated invalidate (it includes a flush) */
+ invalidate_dcache_range(addr, addr + size);
+ break;
+ }
+}
+
/*
* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
@@ -34,8 +72,13 @@ int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single is performed.
*/
-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction);
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ _dma_sync((dma_addr_t)ptr, size, dir);
+ return (dma_addr_t) ptr;
+}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
@@ -53,8 +96,12 @@ dma_map_page(struct device *dev, struct page *page,
* After this call, reads by the cpu to the buffer are guarenteed to see
* whatever the device wrote there.
*/
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction);
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
@@ -80,38 +127,66 @@ dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
* the same here.
*/
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
+ enum dma_data_direction dir);
/*
* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction);
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+}
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
+ BUG_ON(!valid_dma_direction(dir));
}
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
{
+ _dma_sync(handle + offset, size, dir);
}
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir)
{
+ dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_device(dev, handle, 0, size, dir);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ BUG_ON(!valid_dma_direction(dir));
+}
+
+extern void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir);
+
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
{
+ _dma_sync((dma_addr_t)vaddr, size, dir);
}
#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index c9a59622e23..bd2e62243ab 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -10,46 +10,70 @@
#include <linux/interrupt.h>
#include <mach/dma.h>
+#include <asm/atomic.h>
#include <asm/blackfin.h>
#include <asm/page.h>
-
-#define MAX_DMA_ADDRESS PAGE_OFFSET
-
-/*****************************************************************************
-* Generic DMA Declarations
-*
-****************************************************************************/
-enum dma_chan_status {
- DMA_CHANNEL_FREE,
- DMA_CHANNEL_REQUESTED,
- DMA_CHANNEL_ENABLED,
-};
+#include <asm-generic/dma.h>
+
+/* DMA_CONFIG Masks */
+#define DMAEN 0x0001 /* DMA Channel Enable */
+#define WNR 0x0002 /* Channel Direction (W/R*) */
+#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */
+#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */
+#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */
+#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */
+#define RESTART 0x0020 /* DMA Buffer Clear */
+#define DI_SEL 0x0040 /* Data Interrupt Timing Select */
+#define DI_EN 0x0080 /* Data Interrupt Enable */
+#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
+#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
+#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
+#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
+#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
+#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
+#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
+#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
+#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
+#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
+#define NDSIZE 0x0f00 /* Next Descriptor Size */
+#define DMAFLOW 0x7000 /* Flow Control */
+#define DMAFLOW_STOP 0x0000 /* Stop Mode */
+#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
+#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
+#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
+#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
+
+/* DMA_IRQ_STATUS Masks */
+#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */
+#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */
+#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */
+#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */
/*-------------------------
* config reg bits value
*-------------------------*/
-#define DATA_SIZE_8 0
-#define DATA_SIZE_16 1
-#define DATA_SIZE_32 2
+#define DATA_SIZE_8 0
+#define DATA_SIZE_16 1
+#define DATA_SIZE_32 2
-#define DMA_FLOW_STOP 0
-#define DMA_FLOW_AUTO 1
-#define DMA_FLOW_ARRAY 4
-#define DMA_FLOW_SMALL 6
-#define DMA_FLOW_LARGE 7
+#define DMA_FLOW_STOP 0
+#define DMA_FLOW_AUTO 1
+#define DMA_FLOW_ARRAY 4
+#define DMA_FLOW_SMALL 6
+#define DMA_FLOW_LARGE 7
-#define DIMENSION_LINEAR 0
-#define DIMENSION_2D 1
+#define DIMENSION_LINEAR 0
+#define DIMENSION_2D 1
-#define DIR_READ 0
-#define DIR_WRITE 1
+#define DIR_READ 0
+#define DIR_WRITE 1
-#define INTR_DISABLE 0
-#define INTR_ON_BUF 2
-#define INTR_ON_ROW 3
+#define INTR_DISABLE 0
+#define INTR_ON_BUF 2
+#define INTR_ON_ROW 3
#define DMA_NOSYNC_KEEP_DMA_BUF 0
-#define DMA_SYNC_RESTART 1
+#define DMA_SYNC_RESTART 1
struct dmasg {
void *next_desc_addr;
@@ -104,11 +128,9 @@ struct dma_register {
};
-struct mutex;
struct dma_channel {
- struct mutex dmalock;
const char *device_id;
- enum dma_chan_status chan_status;
+ atomic_t chan_status;
volatile struct dma_register *regs;
struct dmasg *sg; /* large mode descriptor */
unsigned int irq;
@@ -220,27 +242,20 @@ static inline void set_dma_sg(unsigned int channel, struct dmasg *sg, int ndsize
static inline int dma_channel_active(unsigned int channel)
{
- if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE)
- return 0;
- else
- return 1;
+ return atomic_read(&dma_ch[channel].chan_status);
}
static inline void disable_dma(unsigned int channel)
{
dma_ch[channel].regs->cfg &= ~DMAEN;
SSYNC();
- dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
}
static inline void enable_dma(unsigned int channel)
{
dma_ch[channel].regs->curr_x_count = 0;
dma_ch[channel].regs->curr_y_count = 0;
dma_ch[channel].regs->cfg |= DMAEN;
- dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED;
}
-void free_dma(unsigned int channel);
-int request_dma(unsigned int channel, const char *device_id);
int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data);
static inline void dma_disable_irq(unsigned int channel)
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index 925e66cb2d4..1597ae5041e 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -1,7 +1,7 @@
/*
* Miscellaneous IOCTL commands for Dynamic Power Management Controller Driver
*
- * Copyright (C) 2004-2008 Analog Device Inc.
+ * Copyright (C) 2004-2009 Analog Device Inc.
*
* Licensed under the GPL-2
*/
@@ -9,7 +9,109 @@
#ifndef _BLACKFIN_DPMC_H_
#define _BLACKFIN_DPMC_H_
-#ifdef __KERNEL__
+/* PLL_CTL Masks */
+#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
+#define PLL_OFF 0x0002 /* PLL Not Powered */
+#define STOPCK 0x0008 /* Core Clock Off */
+#define PDWN 0x0020 /* Enter Deep Sleep Mode */
+#ifdef __ADSPBF539__
+# define IN_DELAY 0x0014 /* Add 200ps Delay To EBIU Input Latches */
+# define OUT_DELAY 0x00C0 /* Add 200ps Delay To EBIU Output Signals */
+#else
+# define IN_DELAY 0x0040 /* Add 200ps Delay To EBIU Input Latches */
+# define OUT_DELAY 0x0080 /* Add 200ps Delay To EBIU Output Signals */
+#endif
+#define BYPASS 0x0100 /* Bypass the PLL */
+#define MSEL 0x7E00 /* Multiplier Select For CCLK/VCO Factors */
+#define SPORT_HYST 0x8000 /* Enable Additional Hysteresis on SPORT Input Pins */
+#define SET_MSEL(x) (((x)&0x3F) << 0x9) /* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
+
+/* PLL_DIV Masks */
+#define SSEL 0x000F /* System Select */
+#define CSEL 0x0030 /* Core Select */
+#define CSEL_DIV1 0x0000 /* CCLK = VCO / 1 */
+#define CSEL_DIV2 0x0010 /* CCLK = VCO / 2 */
+#define CSEL_DIV4 0x0020 /* CCLK = VCO / 4 */
+#define CSEL_DIV8 0x0030 /* CCLK = VCO / 8 */
+
+#define CCLK_DIV1 CSEL_DIV1
+#define CCLK_DIV2 CSEL_DIV2
+#define CCLK_DIV4 CSEL_DIV4
+#define CCLK_DIV8 CSEL_DIV8
+
+#define SET_SSEL(x) ((x) & 0xF) /* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
+#define SCLK_DIV(x) (x) /* SCLK = VCO / x */
+
+/* PLL_STAT Masks */
+#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */
+#define FULL_ON 0x0002 /* Processor In Full On Mode */
+#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */
+#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */
+
+#define RTCWS 0x0400 /* RTC/Reset Wake-Up Status */
+#define CANWS 0x0800 /* CAN Wake-Up Status */
+#define USBWS 0x2000 /* USB Wake-Up Status */
+#define KPADWS 0x4000 /* Keypad Wake-Up Status */
+#define ROTWS 0x8000 /* Rotary Wake-Up Status */
+#define GPWS 0x1000 /* General-Purpose Wake-Up Status */
+
+/* VR_CTL Masks */
+#if defined(__ADSPBF52x__) || defined(__ADSPBF51x__)
+#define FREQ 0x3000 /* Switching Oscillator Frequency For Regulator */
+#define FREQ_1000 0x3000 /* Switching Frequency Is 1 MHz */
+#else
+#define FREQ 0x0003 /* Switching Oscillator Frequency For Regulator */
+#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */
+#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */
+#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */
+#endif
+#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */
+
+#define GAIN 0x000C /* Voltage Level Gain */
+#define GAIN_5 0x0000 /* GAIN = 5 */
+#define GAIN_10 0x0004 /* GAIN = 1 */
+#define GAIN_20 0x0008 /* GAIN = 2 */
+#define GAIN_50 0x000C /* GAIN = 5 */
+
+#define VLEV 0x00F0 /* Internal Voltage Level */
+#ifdef __ADSPBF52x__
+#define VLEV_085 0x0040 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
+#define VLEV_090 0x0050 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
+#define VLEV_095 0x0060 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
+#define VLEV_100 0x0070 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
+#define VLEV_105 0x0080 /* VLEV = 1.05 V (-5% - +10% Accuracy) */
+#define VLEV_110 0x0090 /* VLEV = 1.10 V (-5% - +10% Accuracy) */
+#define VLEV_115 0x00A0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
+#define VLEV_120 0x00B0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
+#else
+#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
+#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
+#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
+#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
+#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */
+#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */
+#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
+#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
+#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */
+#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */
+#endif
+
+#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */
+#define CANWE 0x0200 /* Enable CAN Wakeup From Hibernate */
+#define PHYWE 0x0400 /* Enable PHY Wakeup From Hibernate */
+#define GPWE 0x0400 /* General-Purpose Wake-Up Enable */
+#define MXVRWE 0x0400 /* Enable MXVR Wakeup From Hibernate */
+#define KPADWE 0x1000 /* Keypad Wake-Up Enable */
+#define ROTWE 0x2000 /* Rotary Wake-Up Enable */
+#define CLKBUFOE 0x4000 /* CLKIN Buffer Output Enable */
+#define SCKELOW 0x8000 /* Do Not Drive SCKE High During Reset After Hibernate */
+
+#if defined(__ADSPBF52x__) || defined(__ADSPBF51x__)
+#define USBWE 0x0200 /* Enable USB Wakeup From Hibernate */
+#else
+#define USBWE 0x0800 /* Enable USB Wakeup From Hibernate */
+#endif
+
#ifndef __ASSEMBLY__
void sleep_mode(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
@@ -54,6 +156,5 @@ struct bfin_dpmc_platform_data {
w[P0 + (x - PLL_CTL)] = R0;\
#endif
-#endif /* __KERNEL__ */
#endif /*_BLACKFIN_DPMC_H_*/
diff --git a/arch/blackfin/include/asm/elf.h b/arch/blackfin/include/asm/elf.h
index 8e0764c81ea..5b50f0ecacf 100644
--- a/arch/blackfin/include/asm/elf.h
+++ b/arch/blackfin/include/asm/elf.h
@@ -55,7 +55,6 @@ do { \
_regs->p2 = _dynamic_addr; \
} while(0)
-#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/blackfin/include/asm/fcntl.h b/arch/blackfin/include/asm/fcntl.h
index 8727b2b382f..251c911d59c 100644
--- a/arch/blackfin/include/asm/fcntl.h
+++ b/arch/blackfin/include/asm/fcntl.h
@@ -7,8 +7,6 @@
#ifndef _BFIN_FCNTL_H
#define _BFIN_FCNTL_H
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_DIRECTORY 040000 /* must be a directory */
#define O_NOFOLLOW 0100000 /* don't follow links */
#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 5b44d05ca53..539468a0505 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -159,6 +159,11 @@ struct gpio_port_t {
};
#endif
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+void bfin_special_gpio_free(unsigned gpio);
+int bfin_special_gpio_request(unsigned gpio, const char *label);
+#endif
+
#ifdef CONFIG_PM
unsigned int bfin_pm_standby_setup(void);
diff --git a/arch/blackfin/include/asm/gptimers.h b/arch/blackfin/include/asm/gptimers.h
index 89f08decb8e..c722acdda0d 100644
--- a/arch/blackfin/include/asm/gptimers.h
+++ b/arch/blackfin/include/asm/gptimers.h
@@ -172,25 +172,25 @@
/* The actual gptimer API */
-void set_gptimer_pwidth(int timer_id, uint32_t width);
-uint32_t get_gptimer_pwidth(int timer_id);
-void set_gptimer_period(int timer_id, uint32_t period);
-uint32_t get_gptimer_period(int timer_id);
-uint32_t get_gptimer_count(int timer_id);
-int get_gptimer_intr(int timer_id);
-void clear_gptimer_intr(int timer_id);
-int get_gptimer_over(int timer_id);
-void clear_gptimer_over(int timer_id);
-void set_gptimer_config(int timer_id, uint16_t config);
-uint16_t get_gptimer_config(int timer_id);
-int get_gptimer_run(int timer_id);
-void set_gptimer_pulse_hi(int timer_id);
-void clear_gptimer_pulse_hi(int timer_id);
+void set_gptimer_pwidth(unsigned int timer_id, uint32_t width);
+uint32_t get_gptimer_pwidth(unsigned int timer_id);
+void set_gptimer_period(unsigned int timer_id, uint32_t period);
+uint32_t get_gptimer_period(unsigned int timer_id);
+uint32_t get_gptimer_count(unsigned int timer_id);
+int get_gptimer_intr(unsigned int timer_id);
+void clear_gptimer_intr(unsigned int timer_id);
+int get_gptimer_over(unsigned int timer_id);
+void clear_gptimer_over(unsigned int timer_id);
+void set_gptimer_config(unsigned int timer_id, uint16_t config);
+uint16_t get_gptimer_config(unsigned int timer_id);
+int get_gptimer_run(unsigned int timer_id);
+void set_gptimer_pulse_hi(unsigned int timer_id);
+void clear_gptimer_pulse_hi(unsigned int timer_id);
void enable_gptimers(uint16_t mask);
void disable_gptimers(uint16_t mask);
void disable_gptimers_sync(uint16_t mask);
uint16_t get_enabled_gptimers(void);
-uint32_t get_gptimer_status(int group);
-void set_gptimer_status(int group, uint32_t value);
+uint32_t get_gptimer_status(unsigned int group);
+void set_gptimer_status(unsigned int group, uint32_t value);
#endif
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index d1f5029189a..29e55b9d88b 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -31,12 +31,14 @@ static inline unsigned char readb(const volatile void __iomem *addr)
unsigned int val;
int tmp;
- __asm__ __volatile__ ("cli %1;\n\t"
- "NOP; NOP; SSYNC;\n\t"
- "%0 = b [%2] (z);\n\t"
- "sti %1;\n\t"
- : "=d"(val), "=d"(tmp): "a"(addr)
- );
+ __asm__ __volatile__ (
+ "cli %1;"
+ "NOP; NOP; SSYNC;"
+ "%0 = b [%2] (z);"
+ "sti %1;"
+ : "=d"(val), "=d"(tmp)
+ : "a"(addr)
+ );
return (unsigned char) val;
}
@@ -46,12 +48,14 @@ static inline unsigned short readw(const volatile void __iomem *addr)
unsigned int val;
int tmp;
- __asm__ __volatile__ ("cli %1;\n\t"
- "NOP; NOP; SSYNC;\n\t"
- "%0 = w [%2] (z);\n\t"
- "sti %1;\n\t"
- : "=d"(val), "=d"(tmp): "a"(addr)
- );
+ __asm__ __volatile__ (
+ "cli %1;"
+ "NOP; NOP; SSYNC;"
+ "%0 = w [%2] (z);"
+ "sti %1;"
+ : "=d"(val), "=d"(tmp)
+ : "a"(addr)
+ );
return (unsigned short) val;
}
@@ -61,20 +65,23 @@ static inline unsigned int readl(const volatile void __iomem *addr)
unsigned int val;
int tmp;
- __asm__ __volatile__ ("cli %1;\n\t"
- "NOP; NOP; SSYNC;\n\t"
- "%0 = [%2];\n\t"
- "sti %1;\n\t"
- : "=d"(val), "=d"(tmp): "a"(addr)
- );
+ __asm__ __volatile__ (
+ "cli %1;"
+ "NOP; NOP; SSYNC;"
+ "%0 = [%2];"
+ "sti %1;"
+ : "=d"(val), "=d"(tmp)
+ : "a"(addr)
+ );
+
return val;
}
#endif /* __ASSEMBLY__ */
-#define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b))
-#define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
-#define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
+#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
#define __raw_readb readb
#define __raw_readw readw
@@ -82,9 +89,9 @@ static inline unsigned int readl(const volatile void __iomem *addr)
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
-#define memset_io(a,b,c) memset((void *)(a),(b),(c))
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+#define memset_io(a, b, c) memset((void *)(a), (b), (c))
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
/* Convert "I/O port addresses" to actual addresses. i.e. ugly casts. */
#define __io(port) ((void *)(unsigned long)(port))
@@ -92,30 +99,30 @@ static inline unsigned int readl(const volatile void __iomem *addr)
#define inb(port) readb(__io(port))
#define inw(port) readw(__io(port))
#define inl(port) readl(__io(port))
-#define outb(x,port) writeb(x,__io(port))
-#define outw(x,port) writew(x,__io(port))
-#define outl(x,port) writel(x,__io(port))
+#define outb(x, port) writeb(x, __io(port))
+#define outw(x, port) writew(x, __io(port))
+#define outl(x, port) writel(x, __io(port))
#define inb_p(port) inb(__io(port))
#define inw_p(port) inw(__io(port))
#define inl_p(port) inl(__io(port))
-#define outb_p(x,port) outb(x,__io(port))
-#define outw_p(x,port) outw(x,__io(port))
-#define outl_p(x,port) outl(x,__io(port))
-
-#define ioread8_rep(a,d,c) readsb(a,d,c)
-#define ioread16_rep(a,d,c) readsw(a,d,c)
-#define ioread32_rep(a,d,c) readsl(a,d,c)
-#define iowrite8_rep(a,s,c) writesb(a,s,c)
-#define iowrite16_rep(a,s,c) writesw(a,s,c)
-#define iowrite32_rep(a,s,c) writesl(a,s,c)
-
-#define ioread8(X) readb(X)
-#define ioread16(X) readw(X)
-#define ioread32(X) readl(X)
-#define iowrite8(val,X) writeb(val,X)
-#define iowrite16(val,X) writew(val,X)
-#define iowrite32(val,X) writel(val,X)
+#define outb_p(x, port) outb(x, __io(port))
+#define outw_p(x, port) outw(x, __io(port))
+#define outl_p(x, port) outl(x, __io(port))
+
+#define ioread8_rep(a, d, c) readsb(a, d, c)
+#define ioread16_rep(a, d, c) readsw(a, d, c)
+#define ioread32_rep(a, d, c) readsl(a, d, c)
+#define iowrite8_rep(a, s, c) writesb(a, s, c)
+#define iowrite16_rep(a, s, c) writesw(a, s, c)
+#define iowrite32_rep(a, s, c) writesl(a, s, c)
+
+#define ioread8(x) readb(x)
+#define ioread16(x) readw(x)
+#define ioread32(x) readl(x)
+#define iowrite8(val, x) writeb(val, x)
+#define iowrite16(val, x) writew(val, x)
+#define iowrite32(val, x) writel(val, x)
#define mmiowb() wmb()
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index 4617ba66278..d3b40449ca0 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,9 +35,9 @@
#include <asm/atomic.h>
#include <asm/traps.h>
-#define IPIPE_ARCH_STRING "1.11-00"
+#define IPIPE_ARCH_STRING "1.12-00"
#define IPIPE_MAJOR_NUMBER 1
-#define IPIPE_MINOR_NUMBER 11
+#define IPIPE_MINOR_NUMBER 12
#define IPIPE_PATCH_NUMBER 0
#ifdef CONFIG_SMP
@@ -124,16 +124,6 @@ static inline int __ipipe_check_tickdev(const char *devname)
return 1;
}
-static inline void __ipipe_lock_root(void)
-{
- set_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
-}
-
-static inline void __ipipe_unlock_root(void)
-{
- clear_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status));
-}
-
void __ipipe_enable_pipeline(void);
#define __ipipe_hook_critical_ipi(ipd) do { } while (0)
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h
index 490098f532a..00409201d9e 100644
--- a/arch/blackfin/include/asm/ipipe_base.h
+++ b/arch/blackfin/include/asm/ipipe_base.h
@@ -51,23 +51,15 @@
extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
-#define __ipipe_stall_root() \
- do { \
- volatile unsigned long *p = &__ipipe_root_status; \
- set_bit(0, p); \
- } while (0)
-
-#define __ipipe_test_and_stall_root() \
- ({ \
- volatile unsigned long *p = &__ipipe_root_status; \
- test_and_set_bit(0, p); \
- })
-
-#define __ipipe_test_root() \
- ({ \
- const unsigned long *p = &__ipipe_root_status; \
- test_bit(0, p); \
- })
+void __ipipe_stall_root(void);
+
+unsigned long __ipipe_test_and_stall_root(void);
+
+unsigned long __ipipe_test_root(void);
+
+void __ipipe_lock_root(void);
+
+void __ipipe_unlock_root(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 9b19a19d9ae..813a1af3e86 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -33,6 +33,7 @@ static inline unsigned long bfin_cli(void)
#ifdef CONFIG_IPIPE
+#include <linux/compiler.h>
#include <linux/ipipe_base.h>
#include <linux/ipipe_trace.h>
@@ -49,12 +50,12 @@ static inline unsigned long bfin_cli(void)
barrier(); \
} while (0)
-static inline void raw_local_irq_enable(void)
-{
- barrier();
- ipipe_check_context(ipipe_root_domain);
- __ipipe_unstall_root();
-}
+#define raw_local_irq_enable() \
+ do { \
+ barrier(); \
+ ipipe_check_context(ipipe_root_domain); \
+ __ipipe_unstall_root(); \
+ } while (0)
#define raw_local_save_flags_ptr(x) \
do { \
diff --git a/arch/blackfin/include/asm/kgdb.h b/arch/blackfin/include/asm/kgdb.h
index c8b256d2ea3..8651afe1299 100644
--- a/arch/blackfin/include/asm/kgdb.h
+++ b/arch/blackfin/include/asm/kgdb.h
@@ -10,9 +10,6 @@
#include <linux/ptrace.h>
-/* gdb locks */
-#define KGDB_MAX_NO_CPUS 8
-
/*
* BUFMAX defines the maximum number of characters in inbound/outbound buffers.
* At least NUMREGBYTES*2 are needed for register packets.
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
index 4179e329b9c..7c8fe834ff2 100644
--- a/arch/blackfin/include/asm/mem_init.h
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -295,156 +295,3 @@
#else
#define PLL_BYPASS 0
#endif
-
-/***************************************Currently Not Being Used *********************************/
-
-#if defined(CONFIG_FLASH_SPEED_BWAT) && \
-defined(CONFIG_FLASH_SPEED_BRAT) && \
-defined(CONFIG_FLASH_SPEED_BHT) && \
-defined(CONFIG_FLASH_SPEED_BST) && \
-defined(CONFIG_FLASH_SPEED_BTT)
-
-#define flash_EBIU_AMBCTL_WAT ((CONFIG_FLASH_SPEED_BWAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_RAT ((CONFIG_FLASH_SPEED_BRAT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_HT ((CONFIG_FLASH_SPEED_BHT * 4) / (4000000000 / CONFIG_SCLK_HZ))
-#define flash_EBIU_AMBCTL_ST ((CONFIG_FLASH_SPEED_BST * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-#define flash_EBIU_AMBCTL_TT ((CONFIG_FLASH_SPEED_BTT * 4) / (4000000000 / CONFIG_SCLK_HZ)) + 1
-
-#if (flash_EBIU_AMBCTL_TT > 3)
-#define flash_EBIU_AMBCTL0_TT B0TT_4
-#endif
-#if (flash_EBIU_AMBCTL_TT == 3)
-#define flash_EBIU_AMBCTL0_TT B0TT_3
-#endif
-#if (flash_EBIU_AMBCTL_TT == 2)
-#define flash_EBIU_AMBCTL0_TT B0TT_2
-#endif
-#if (flash_EBIU_AMBCTL_TT < 2)
-#define flash_EBIU_AMBCTL0_TT B0TT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_ST > 3)
-#define flash_EBIU_AMBCTL0_ST B0ST_4
-#endif
-#if (flash_EBIU_AMBCTL_ST == 3)
-#define flash_EBIU_AMBCTL0_ST B0ST_3
-#endif
-#if (flash_EBIU_AMBCTL_ST == 2)
-#define flash_EBIU_AMBCTL0_ST B0ST_2
-#endif
-#if (flash_EBIU_AMBCTL_ST < 2)
-#define flash_EBIU_AMBCTL0_ST B0ST_1
-#endif
-
-#if (flash_EBIU_AMBCTL_HT > 2)
-#define flash_EBIU_AMBCTL0_HT B0HT_3
-#endif
-#if (flash_EBIU_AMBCTL_HT == 2)
-#define flash_EBIU_AMBCTL0_HT B0HT_2
-#endif
-#if (flash_EBIU_AMBCTL_HT == 1)
-#define flash_EBIU_AMBCTL0_HT B0HT_1
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT == 0)
-#define flash_EBIU_AMBCTL0_HT B0HT_0
-#endif
-#if (flash_EBIU_AMBCTL_HT == 0 && CONFIG_FLASH_SPEED_BHT != 0)
-#define flash_EBIU_AMBCTL0_HT B0HT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_WAT > 14)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_15
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 14)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_14
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 13)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_13
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 12)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_12
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 11)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_11
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 10)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_10
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 9)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_9
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 8)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_8
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 7)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_7
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 6)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_6
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 5)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_5
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 4)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_4
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 3)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_3
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 2)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_2
-#endif
-#if (flash_EBIU_AMBCTL_WAT == 1)
-#define flash_EBIU_AMBCTL0_WAT B0WAT_1
-#endif
-
-#if (flash_EBIU_AMBCTL_RAT > 14)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_15
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 14)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_14
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 13)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_13
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 12)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_12
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 11)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_11
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 10)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_10
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 9)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_9
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 8)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_8
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 7)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_7
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 6)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_6
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 5)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_5
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 4)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_4
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 3)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_3
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 2)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_2
-#endif
-#if (flash_EBIU_AMBCTL_RAT == 1)
-#define flash_EBIU_AMBCTL0_RAT B0RAT_1
-#endif
-
-#define flash_EBIU_AMBCTL0 \
- (flash_EBIU_AMBCTL0_WAT | flash_EBIU_AMBCTL0_RAT | flash_EBIU_AMBCTL0_HT | \
- flash_EBIU_AMBCTL0_ST | flash_EBIU_AMBCTL0_TT | CONFIG_FLASH_SPEED_RDYEN)
-#endif
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h
index 4a3be376ad5..ae8ef4ffd80 100644
--- a/arch/blackfin/include/asm/mmu_context.h
+++ b/arch/blackfin/include/asm/mmu_context.h
@@ -66,8 +66,8 @@ activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
#define activate_mm(prev, next) switch_mm(prev, next, NULL)
-static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
- struct task_struct *tsk)
+static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
+ struct task_struct *tsk)
{
#ifdef CONFIG_MPU
unsigned int cpu = smp_processor_id();
@@ -95,7 +95,24 @@ static inline void switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_m
#endif
}
+#ifdef CONFIG_IPIPE
+#define lock_mm_switch(flags) local_irq_save_hw_cond(flags)
+#define unlock_mm_switch(flags) local_irq_restore_hw_cond(flags)
+#else
+#define lock_mm_switch(flags) do { (void)(flags); } while (0)
+#define unlock_mm_switch(flags) do { (void)(flags); } while (0)
+#endif /* CONFIG_IPIPE */
+
#ifdef CONFIG_MPU
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ lock_mm_switch(flags);
+ __switch_mm(prev, next, tsk);
+ unlock_mm_switch(flags);
+}
+
static inline void protect_page(struct mm_struct *mm, unsigned long addr,
unsigned long flags)
{
@@ -128,6 +145,12 @@ static inline void update_protections(struct mm_struct *mm)
set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
}
}
+#else /* !CONFIG_MPU */
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ __switch_mm(prev, next, tsk);
+}
#endif
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
@@ -173,4 +196,10 @@ static inline void destroy_context(struct mm_struct *mm)
#endif
}
+#define ipipe_mm_switch_protect(flags) \
+ local_irq_save_hw_cond(flags)
+
+#define ipipe_mm_switch_unprotect(flags) \
+ local_irq_restore_hw_cond(flags)
+
#endif
diff --git a/arch/blackfin/include/asm/module.h b/arch/blackfin/include/asm/module.h
index 9c1cfffddd9..4282b169ead 100644
--- a/arch/blackfin/include/asm/module.h
+++ b/arch/blackfin/include/asm/module.h
@@ -7,8 +7,6 @@
#ifndef _ASM_BFIN_MODULE_H
#define _ASM_BFIN_MODULE_H
-#define MODULE_SYMBOL_PREFIX "_"
-
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
diff --git a/arch/blackfin/include/asm/pci.h b/arch/blackfin/include/asm/pci.h
index 61277358c86..99cae2e3bac 100644
--- a/arch/blackfin/include/asm/pci.h
+++ b/arch/blackfin/include/asm/pci.h
@@ -4,145 +4,19 @@
#define _ASM_BFIN_PCI_H
#include <asm/scatterlist.h>
+#include <asm-generic/pci-dma-compat.h>
+#include <asm-generic/pci.h>
-/*
- *
- * Written by Wout Klaren.
- */
-
-/* Added by Chang Junxiao */
#define PCIBIOS_MIN_IO 0x00001000
#define PCIBIOS_MIN_MEM 0x10000000
-#define PCI_DMA_BUS_IS_PHYS (1)
-struct pci_ops;
-
-/*
- * Structure with hardware dependent information and functions of the
- * PCI bus.
- */
-struct pci_bus_info {
-
- /*
- * Resources of the PCI bus.
- */
- struct resource mem_space;
- struct resource io_space;
-
- /*
- * System dependent functions.
- */
- struct pci_ops *bfin_pci_ops;
- void (*fixup) (int pci_modify);
- void (*conf_device) (unsigned char bus, unsigned char device_fn);
-};
-
-#define pcibios_assign_all_busses() 0
static inline void pcibios_set_master(struct pci_dev *dev)
{
-
/* No special bus mastering setup handling */
}
static inline void pcibios_penalize_isa_irq(int irq)
{
-
/* We don't do dynamic PCI IRQ allocation */
}
-static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
-
- /* return virt_to_bus(ptr); */
- return (dma_addr_t) ptr;
-}
-
-/* Unmap a single streaming mode DMA translation. The dma_addr and size
- * must match what was provided for in a previous pci_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guarenteed to see
- * whatever the device wrote there.
- */
-static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
-
- /* Nothing to do */
-}
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scather-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
- return nents;
-}
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
-
- /* Nothing to do */
-}
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, the
- * device again owns the buffer.
- */
-static inline void pci_dma_sync_single(struct pci_dev *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
-
- /* Nothing to do */
-}
-
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single but for a scatter-gather list,
- * same rules and usage.
- */
-static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
- struct scatterlist *sg, int nelems,
- int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
-
- /* Nothing to do */
-}
#endif /* _ASM_BFIN_PCI_H */
diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h
index 27290c955a7..b33a4488f49 100644
--- a/arch/blackfin/include/asm/ptrace.h
+++ b/arch/blackfin/include/asm/ptrace.h
@@ -89,9 +89,9 @@ struct pt_regs {
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13 /* ptrace signal */
-#define PTRACE_GETFDPIC 31
-#define PTRACE_GETFDPIC_EXEC 0
-#define PTRACE_GETFDPIC_INTERP 1
+#define PTRACE_GETFDPIC 31 /* get the ELF fdpic loadmap address */
+#define PTRACE_GETFDPIC_EXEC 0 /* [addr] request the executable loadmap */
+#define PTRACE_GETFDPIC_INTERP 1 /* [addr] request the interpreter loadmap */
#define PS_S (0x0002)
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 1f5381fbb4a..42f6c53c59c 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -13,10 +13,18 @@ extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
extern unsigned long _ramstart, _ramend, _rambase;
extern unsigned long memory_start, memory_end, physical_mem_end;
-extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
- _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
- _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
- _ebss_l2[], _l2_lma_start[];
+/*
+ * The weak markings on the lengths might seem weird, but this is required
+ * in order to make gcc accept the fact that these may actually have a value
+ * of 0 (since they aren't actually addresses, but sizes of sections).
+ */
+extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __weak _text_l1_len[];
+extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[],
+ _data_l1_lma[], __weak _data_l1_len[];
+extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
+ _data_b_l1_lma[], __weak _data_b_l1_len[];
+extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[],
+ _sbss_l2[], _ebss_l2[], _l2_lma[], __weak _l2_len[];
#include <asm/mem_map.h>
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index b0c7f0ee4b0..1942ccfedbe 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -17,84 +17,84 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_read_lock_asm(volatile int *ptr);
-asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
-asmlinkage void __raw_write_lock_asm(volatile int *ptr);
-asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
-asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
-
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+asmlinkage void arch_read_lock_asm(volatile int *ptr);
+asmlinkage int arch_read_trylock_asm(volatile int *ptr);
+asmlinkage void arch_read_unlock_asm(volatile int *ptr);
+asmlinkage void arch_write_lock_asm(volatile int *ptr);
+asmlinkage int arch_write_trylock_asm(volatile int *ptr);
+asmlinkage void arch_write_unlock_asm(volatile int *ptr);
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __raw_spin_is_locked_asm(&lock->lock);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__raw_spin_lock_asm(&lock->lock);
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __raw_spin_trylock_asm(&lock->lock);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__raw_spin_unlock_asm(&lock->lock);
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
-static inline int __raw_read_can_lock(raw_rwlock_t *rw)
+static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *rw)
+static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- __raw_read_lock_asm(&rw->lock);
+ arch_read_lock_asm(&rw->lock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return __raw_read_trylock_asm(&rw->lock);
+ return arch_read_trylock_asm(&rw->lock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __raw_read_unlock_asm(&rw->lock);
+ arch_read_unlock_asm(&rw->lock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- __raw_write_lock_asm(&rw->lock);
+ arch_write_lock_asm(&rw->lock);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return __raw_write_trylock_asm(&rw->lock);
+ return arch_write_trylock_asm(&rw->lock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- __raw_write_unlock_asm(&rw->lock);
+ arch_write_unlock_asm(&rw->lock);
}
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif
diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h
index be75762c061..1a33608c958 100644
--- a/arch/blackfin/include/asm/spinlock_types.h
+++ b/arch/blackfin/include/asm/spinlock_types.h
@@ -15,14 +15,14 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index afb3a862638..a40d9368c38 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -103,11 +103,13 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
#define TIF_FREEZE 6 /* is freezing for suspend */
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
+#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/arch/blackfin/include/asm/trace.h b/arch/blackfin/include/asm/trace.h
index 609ad3c8418..dc0aa55ae77 100644
--- a/arch/blackfin/include/asm/trace.h
+++ b/arch/blackfin/include/asm/trace.h
@@ -28,6 +28,8 @@ extern unsigned long software_trace_buff[];
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
+#define trace_buffer_init() bfin_write_TBUFCTL(BFIN_TRACE_INIT)
+
#define trace_buffer_save(x) \
do { \
(x) = bfin_read_TBUFCTL(); \
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index c03b8532aad..1c0d190adae 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -17,9 +17,7 @@
#include <linux/string.h>
#include <asm/segment.h>
-#ifdef CONFIG_ACCESS_CHECK
-# include <asm/bfin-global.h>
-#endif
+#include <asm/sections.h>
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index 779be02a910..22886cbdae7 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -388,8 +388,9 @@
#define __NR_pwritev 367
#define __NR_rt_tgsigqueueinfo 368
#define __NR_perf_event_open 369
+#define __NR_recvmmsg 370
-#define __NR_syscall 370
+#define __NR_syscall 371
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 3946aff4f41..924c00286ba 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -37,9 +37,8 @@ static int __init blackfin_dma_init(void)
printk(KERN_INFO "Blackfin DMA Controller\n");
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
- dma_ch[i].chan_status = DMA_CHANNEL_FREE;
+ atomic_set(&dma_ch[i].chan_status, 0);
dma_ch[i].regs = dma_io_base_addr[i];
- mutex_init(&(dma_ch[i].dmalock));
}
/* Mark MEMDMA Channel 0 as requested since we're using it internally */
request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
@@ -60,7 +59,7 @@ static int proc_dma_show(struct seq_file *m, void *v)
int i;
for (i = 0; i < MAX_DMA_CHANNELS; ++i)
- if (dma_ch[i].chan_status != DMA_CHANNEL_FREE)
+ if (dma_channel_active(i))
seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
return 0;
@@ -107,20 +106,11 @@ int request_dma(unsigned int channel, const char *device_id)
}
#endif
- mutex_lock(&(dma_ch[channel].dmalock));
-
- if ((dma_ch[channel].chan_status == DMA_CHANNEL_REQUESTED)
- || (dma_ch[channel].chan_status == DMA_CHANNEL_ENABLED)) {
- mutex_unlock(&(dma_ch[channel].dmalock));
+ if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) {
pr_debug("DMA CHANNEL IN USE \n");
return -EBUSY;
- } else {
- dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
- pr_debug("DMA CHANNEL IS ALLOCATED \n");
}
- mutex_unlock(&(dma_ch[channel].dmalock));
-
#ifdef CONFIG_BF54x
if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
unsigned int per_map;
@@ -148,21 +138,20 @@ EXPORT_SYMBOL(request_dma);
int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
{
- BUG_ON(channel >= MAX_DMA_CHANNELS ||
- dma_ch[channel].chan_status == DMA_CHANNEL_FREE);
+ int ret;
+ unsigned int irq;
- if (callback != NULL) {
- int ret;
- unsigned int irq = channel2irq(channel);
+ BUG_ON(channel >= MAX_DMA_CHANNELS || !callback ||
+ !atomic_read(&dma_ch[channel].chan_status));
- ret = request_irq(irq, callback, IRQF_DISABLED,
- dma_ch[channel].device_id, data);
- if (ret)
- return ret;
+ irq = channel2irq(channel);
+ ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data);
+ if (ret)
+ return ret;
+
+ dma_ch[channel].irq = irq;
+ dma_ch[channel].data = data;
- dma_ch[channel].irq = irq;
- dma_ch[channel].data = data;
- }
return 0;
}
EXPORT_SYMBOL(set_dma_callback);
@@ -184,7 +173,7 @@ void free_dma(unsigned int channel)
{
pr_debug("freedma() : BEGIN \n");
BUG_ON(channel >= MAX_DMA_CHANNELS ||
- dma_ch[channel].chan_status == DMA_CHANNEL_FREE);
+ !atomic_read(&dma_ch[channel].chan_status));
/* Halt the DMA */
disable_dma(channel);
@@ -194,9 +183,7 @@ void free_dma(unsigned int channel)
free_irq(dma_ch[channel].irq, dma_ch[channel].data);
/* Clear the DMA Variable in the Channel */
- mutex_lock(&(dma_ch[channel].dmalock));
- dma_ch[channel].chan_status = DMA_CHANNEL_FREE;
- mutex_unlock(&(dma_ch[channel].dmalock));
+ atomic_set(&dma_ch[channel].chan_status, 0);
pr_debug("freedma() : END \n");
}
@@ -210,13 +197,14 @@ int blackfin_dma_suspend(void)
{
int i;
- for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) {
- if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) {
+ for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
+ if (dma_ch[i].regs->cfg & DMAEN) {
printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
return -EBUSY;
}
- dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
+ if (i < MAX_DMA_SUSPEND_CHANNELS)
+ dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
}
return 0;
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 22705eeff34..a174596cc00 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -100,6 +100,12 @@ u8 pmux_offset[][16] = {
};
# endif
+#elif defined(BF538_FAMILY)
+static unsigned short * const port_fer[] = {
+ (unsigned short *) PORTCIO_FER,
+ (unsigned short *) PORTDIO_FER,
+ (unsigned short *) PORTEIO_FER,
+};
#endif
static unsigned short reserved_gpio_map[GPIO_BANK_NUM];
@@ -163,6 +169,27 @@ static int cmp_label(unsigned short ident, const char *label)
static void port_setup(unsigned gpio, unsigned short usage)
{
+#if defined(BF538_FAMILY)
+ /*
+ * BF538/9 Port C,D and E are special.
+ * Inverted PORT_FER polarity on CDE and no PORF_FER on F
+ * Regular PORT F GPIOs are handled here, CDE are exclusively
+ * managed by GPIOLIB
+ */
+
+ if (gpio < MAX_BLACKFIN_GPIOS || gpio >= MAX_RESOURCES)
+ return;
+
+ gpio -= MAX_BLACKFIN_GPIOS;
+
+ if (usage == GPIO_USAGE)
+ *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
+ else
+ *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
+ SSYNC();
+ return;
+#endif
+
if (check_gpio(gpio))
return;
@@ -762,6 +789,8 @@ int peripheral_request(unsigned short per, const char *label)
if (!(per & P_DEFINED))
return -ENODEV;
+ BUG_ON(ident >= MAX_RESOURCES);
+
local_irq_save_hw(flags);
/* If a pin can be muxed as either GPIO or peripheral, make
@@ -979,6 +1008,76 @@ void bfin_gpio_free(unsigned gpio)
}
EXPORT_SYMBOL(bfin_gpio_free);
+#ifdef BFIN_SPECIAL_GPIO_BANKS
+static unsigned short reserved_special_gpio_map[gpio_bank(MAX_RESOURCES)];
+
+int bfin_special_gpio_request(unsigned gpio, const char *label)
+{
+ unsigned long flags;
+
+ local_irq_save_hw(flags);
+
+ /*
+ * Allow that the identical GPIO can
+ * be requested from the same driver twice
+ * Do nothing and return -
+ */
+
+ if (cmp_label(gpio, label) == 0) {
+ local_irq_restore_hw(flags);
+ return 0;
+ }
+
+ if (unlikely(reserved_special_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
+ local_irq_restore_hw(flags);
+ printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
+ gpio, get_label(gpio));
+
+ return -EBUSY;
+ }
+ if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
+ local_irq_restore_hw(flags);
+ printk(KERN_ERR
+ "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
+ gpio, get_label(gpio));
+
+ return -EBUSY;
+ }
+
+ reserved_special_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio);
+ reserved_peri_map[gpio_bank(gpio)] |= gpio_bit(gpio);
+
+ set_label(gpio, label);
+ local_irq_restore_hw(flags);
+ port_setup(gpio, GPIO_USAGE);
+
+ return 0;
+}
+EXPORT_SYMBOL(bfin_special_gpio_request);
+
+void bfin_special_gpio_free(unsigned gpio)
+{
+ unsigned long flags;
+
+ might_sleep();
+
+ local_irq_save_hw(flags);
+
+ if (unlikely(!(reserved_special_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
+ gpio_error(gpio);
+ local_irq_restore_hw(flags);
+ return;
+ }
+
+ reserved_special_gpio_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
+ reserved_peri_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
+ set_label(gpio, "free");
+ local_irq_restore_hw(flags);
+}
+EXPORT_SYMBOL(bfin_special_gpio_free);
+#endif
+
+
int bfin_gpio_irq_request(unsigned gpio, const char *label)
{
unsigned long flags;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index b52c1f8c4bc..8d42b9e50df 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -92,6 +92,6 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
icplb_tbl[cpu][i_i++].data = 0;
}
-void generate_cplb_tables_all(void)
+void __init generate_cplb_tables_all(void)
{
}
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 69e0e530d70..930c01c0681 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -113,11 +113,11 @@ static noinline int dcplb_miss(unsigned int cpu)
addr = L2_START;
d_data = L2_DMEMORY;
} else if (addr >= physical_mem_end) {
- if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
- && (status & FAULT_USERSUPV)) {
- addr &= ~0x3fffff;
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
+ addr &= ~(4 * 1024 * 1024 - 1);
d_data &= ~PAGE_SIZE_4KB;
d_data |= PAGE_SIZE_4MB;
+ d_data |= CPLB_USER_RD | CPLB_USER_WR;
} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
addr &= ~(1 * 1024 * 1024 - 1);
@@ -203,7 +203,12 @@ static noinline int icplb_miss(unsigned int cpu)
addr = L2_START;
i_data = L2_IMEMORY;
} else if (addr >= physical_mem_end) {
- if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
+ addr &= ~(4 * 1024 * 1024 - 1);
+ i_data &= ~PAGE_SIZE_4KB;
+ i_data |= PAGE_SIZE_4MB;
+ i_data |= CPLB_USER_RD;
+ } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & FAULT_USERSUPV)) {
addr &= ~(1 * 1024 * 1024 - 1);
i_data &= ~PAGE_SIZE_4KB;
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index fd9a2f31e68..282a7919821 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -89,15 +89,25 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
void __init generate_cplb_tables_all(void)
{
+ unsigned long uncached_end;
int i_d, i_i;
i_d = 0;
/* Normal RAM, including MTD FS. */
#ifdef CONFIG_MTD_UCLINUX
- dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size;
+ uncached_end = memory_mtd_start + mtd_size;
#else
- dcplb_bounds[i_d].eaddr = memory_end;
+ uncached_end = memory_end;
#endif
+ /*
+ * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
+ * so that we don't have to use 4kB pages and cause CPLB thrashing
+ */
+ if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
+ ((_ramend - uncached_end) >= 1 * 1024 * 1024))
+ dcplb_bounds[i_d].eaddr = uncached_end;
+ else
+ dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024);
dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
/* DMA uncached region. */
if (DMA_UNCACHED_REGION) {
@@ -135,18 +145,15 @@ void __init generate_cplb_tables_all(void)
i_i = 0;
/* Normal RAM, including MTD FS. */
-#ifdef CONFIG_MTD_UCLINUX
- icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
-#else
- icplb_bounds[i_i].eaddr = memory_end;
-#endif
+ icplb_bounds[i_i].eaddr = uncached_end;
icplb_bounds[i_i++].data = SDRAM_IGENERIC;
- /* DMA uncached region. */
- if (DMA_UNCACHED_REGION) {
- icplb_bounds[i_i].eaddr = _ramend;
- icplb_bounds[i_i++].data = 0;
- }
if (_ramend != physical_mem_end) {
+ /* DMA uncached region. */
+ if (DMA_UNCACHED_REGION) {
+ /* Normally this hole is caught by the async below. */
+ icplb_bounds[i_i].eaddr = _ramend;
+ icplb_bounds[i_i++].data = 0;
+ }
/* Reserved memory. */
icplb_bounds[i_i].eaddr = physical_mem_end;
icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index e74e74d7733..e937f323d82 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -7,30 +7,25 @@
*/
#include <linux/types.h>
-#include <linux/mm.h>
+#include <linux/gfp.h>
#include <linux/string.h>
-#include <linux/bootmem.h>
#include <linux/spinlock.h>
-#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <linux/io.h>
#include <linux/scatterlist.h>
-#include <asm/cacheflush.h>
-#include <asm/bfin-global.h>
static spinlock_t dma_page_lock;
-static unsigned int *dma_page;
+static unsigned long *dma_page;
static unsigned int dma_pages;
static unsigned long dma_base;
static unsigned long dma_size;
static unsigned int dma_initialized;
-void dma_alloc_init(unsigned long start, unsigned long end)
+static void dma_alloc_init(unsigned long start, unsigned long end)
{
spin_lock_init(&dma_page_lock);
dma_initialized = 0;
- dma_page = (unsigned int *)__get_free_page(GFP_KERNEL);
+ dma_page = (unsigned long *)__get_free_page(GFP_KERNEL);
memset(dma_page, 0, PAGE_SIZE);
dma_base = PAGE_ALIGN(start);
dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);
@@ -58,10 +53,11 @@ static unsigned long __alloc_dma_pages(unsigned int pages)
spin_lock_irqsave(&dma_page_lock, flags);
for (i = 0; i < dma_pages;) {
- if (dma_page[i++] == 0) {
+ if (test_bit(i++, dma_page) == 0) {
if (++count == pages) {
while (count--)
- dma_page[--i] = 1;
+ __set_bit(--i, dma_page);
+
ret = dma_base + (i << PAGE_SHIFT);
break;
}
@@ -84,14 +80,14 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
}
spin_lock_irqsave(&dma_page_lock, flags);
- for (i = page; i < page + pages; i++) {
- dma_page[i] = 0;
- }
+ for (i = page; i < page + pages; i++)
+ __clear_bit(i, dma_page);
+
spin_unlock_irqrestore(&dma_page_lock, flags);
}
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp)
+ dma_addr_t *dma_handle, gfp_t gfp)
{
void *ret;
@@ -115,21 +111,14 @@ dma_free_coherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dma_free_coherent);
/*
- * Dummy functions defined for some existing drivers
+ * Streaming DMA mappings
*/
-
-dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
+void __dma_sync(dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
{
- BUG_ON(direction == DMA_NONE);
-
- invalidate_dcache_range((unsigned long)ptr,
- (unsigned long)ptr + size);
-
- return (dma_addr_t) ptr;
+ _dma_sync(addr, size, dir);
}
-EXPORT_SYMBOL(dma_map_single);
+EXPORT_SYMBOL(__dma_sync);
int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
@@ -137,30 +126,23 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
{
int i;
- BUG_ON(direction == DMA_NONE);
-
for (i = 0; i < nents; i++, sg++) {
sg->dma_address = (dma_addr_t) sg_virt(sg);
-
- invalidate_dcache_range(sg_dma_address(sg),
- sg_dma_address(sg) +
- sg_dma_len(sg));
+ __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
}
return nents;
}
EXPORT_SYMBOL(dma_map_sg);
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
-}
-EXPORT_SYMBOL(dma_unmap_single);
+ int i;
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
+ for (i = 0; i < nelems; i++, sg++) {
+ sg->dma_address = (dma_addr_t) sg_virt(sg);
+ __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
+ }
}
-EXPORT_SYMBOL(dma_unmap_sg);
+EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 7281a91d26b..cdbe075de1d 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -137,7 +137,7 @@ static uint32_t const timil_mask[MAX_BLACKFIN_GPTIMERS] =
#endif
};
-void set_gptimer_pwidth(int timer_id, uint32_t value)
+void set_gptimer_pwidth(unsigned int timer_id, uint32_t value)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
timer_regs[timer_id]->width = value;
@@ -145,14 +145,14 @@ void set_gptimer_pwidth(int timer_id, uint32_t value)
}
EXPORT_SYMBOL(set_gptimer_pwidth);
-uint32_t get_gptimer_pwidth(int timer_id)
+uint32_t get_gptimer_pwidth(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
return timer_regs[timer_id]->width;
}
EXPORT_SYMBOL(get_gptimer_pwidth);
-void set_gptimer_period(int timer_id, uint32_t period)
+void set_gptimer_period(unsigned int timer_id, uint32_t period)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
timer_regs[timer_id]->period = period;
@@ -160,28 +160,28 @@ void set_gptimer_period(int timer_id, uint32_t period)
}
EXPORT_SYMBOL(set_gptimer_period);
-uint32_t get_gptimer_period(int timer_id)
+uint32_t get_gptimer_period(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
return timer_regs[timer_id]->period;
}
EXPORT_SYMBOL(get_gptimer_period);
-uint32_t get_gptimer_count(int timer_id)
+uint32_t get_gptimer_count(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
return timer_regs[timer_id]->counter;
}
EXPORT_SYMBOL(get_gptimer_count);
-uint32_t get_gptimer_status(int group)
+uint32_t get_gptimer_status(unsigned int group)
{
tassert(group < BFIN_TIMER_NUM_GROUP);
return group_regs[group]->status;
}
EXPORT_SYMBOL(get_gptimer_status);
-void set_gptimer_status(int group, uint32_t value)
+void set_gptimer_status(unsigned int group, uint32_t value)
{
tassert(group < BFIN_TIMER_NUM_GROUP);
group_regs[group]->status = value;
@@ -189,42 +189,42 @@ void set_gptimer_status(int group, uint32_t value)
}
EXPORT_SYMBOL(set_gptimer_status);
-int get_gptimer_intr(int timer_id)
+int get_gptimer_intr(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_intr);
-void clear_gptimer_intr(int timer_id)
+void clear_gptimer_intr(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
group_regs[BFIN_TIMER_OCTET(timer_id)]->status = timil_mask[timer_id];
}
EXPORT_SYMBOL(clear_gptimer_intr);
-int get_gptimer_over(int timer_id)
+int get_gptimer_over(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_over);
-void clear_gptimer_over(int timer_id)
+void clear_gptimer_over(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
group_regs[BFIN_TIMER_OCTET(timer_id)]->status = tovf_mask[timer_id];
}
EXPORT_SYMBOL(clear_gptimer_over);
-int get_gptimer_run(int timer_id)
+int get_gptimer_run(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]);
}
EXPORT_SYMBOL(get_gptimer_run);
-void set_gptimer_config(int timer_id, uint16_t config)
+void set_gptimer_config(unsigned int timer_id, uint16_t config)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
timer_regs[timer_id]->config = config;
@@ -232,7 +232,7 @@ void set_gptimer_config(int timer_id, uint16_t config)
}
EXPORT_SYMBOL(set_gptimer_config);
-uint16_t get_gptimer_config(int timer_id)
+uint16_t get_gptimer_config(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
return timer_regs[timer_id]->config;
@@ -280,7 +280,7 @@ void disable_gptimers_sync(uint16_t mask)
}
EXPORT_SYMBOL(disable_gptimers_sync);
-void set_gptimer_pulse_hi(int timer_id)
+void set_gptimer_pulse_hi(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
timer_regs[timer_id]->config |= TIMER_PULSE_HI;
@@ -288,7 +288,7 @@ void set_gptimer_pulse_hi(int timer_id)
}
EXPORT_SYMBOL(set_gptimer_pulse_hi);
-void clear_gptimer_pulse_hi(int timer_id)
+void clear_gptimer_pulse_hi(unsigned int timer_id)
{
tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
timer_regs[timer_id]->config &= ~TIMER_PULSE_HI;
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 5d7382396dc..a77307a4473 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -335,3 +335,70 @@ void __ipipe_enable_root_irqs_hw(void)
__clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
bfin_sti(bfin_irq_flags);
}
+
+/*
+ * We could use standard atomic bitops in the following root status
+ * manipulation routines, but let's prepare for SMP support in the
+ * same move, preventing CPU migration as required.
+ */
+void __ipipe_stall_root(void)
+{
+ unsigned long *p, flags;
+
+ local_irq_save_hw(flags);
+ p = &__ipipe_root_status;
+ __set_bit(IPIPE_STALL_FLAG, p);
+ local_irq_restore_hw(flags);
+}
+EXPORT_SYMBOL(__ipipe_stall_root);
+
+unsigned long __ipipe_test_and_stall_root(void)
+{
+ unsigned long *p, flags;
+ int x;
+
+ local_irq_save_hw(flags);
+ p = &__ipipe_root_status;
+ x = __test_and_set_bit(IPIPE_STALL_FLAG, p);
+ local_irq_restore_hw(flags);
+
+ return x;
+}
+EXPORT_SYMBOL(__ipipe_test_and_stall_root);
+
+unsigned long __ipipe_test_root(void)
+{
+ const unsigned long *p;
+ unsigned long flags;
+ int x;
+
+ local_irq_save_hw_smp(flags);
+ p = &__ipipe_root_status;
+ x = test_bit(IPIPE_STALL_FLAG, p);
+ local_irq_restore_hw_smp(flags);
+
+ return x;
+}
+EXPORT_SYMBOL(__ipipe_test_root);
+
+void __ipipe_lock_root(void)
+{
+ unsigned long *p, flags;
+
+ local_irq_save_hw(flags);
+ p = &__ipipe_root_status;
+ __set_bit(IPIPE_SYNCDEFER_FLAG, p);
+ local_irq_restore_hw(flags);
+}
+EXPORT_SYMBOL(__ipipe_lock_root);
+
+void __ipipe_unlock_root(void)
+{
+ unsigned long *p, flags;
+
+ local_irq_save_hw(flags);
+ p = &__ipipe_root_status;
+ __clear_bit(IPIPE_SYNCDEFER_FLAG, p);
+ local_irq_restore_hw(flags);
+}
+EXPORT_SYMBOL(__ipipe_unlock_root);
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index db9f9c91f11..64cff54a8a5 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq)
static struct irq_desc bad_irq_desc = {
.handle_irq = handle_bad_irq,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v)
unsigned long flags;
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index cce79d05b90..f1036b6b929 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -24,16 +24,6 @@
#include <asm/blackfin.h>
#include <asm/dma.h>
-/* Put the error code here just in case the user cares. */
-int gdb_bfin_errcode;
-/* Likewise, the vector number here (since GDB only gets the signal
- number through the usual means, and that's not very specific). */
-int gdb_bfin_vector = -1;
-
-#if KGDB_MAX_NO_CPUS != 8
-#error change the definition of slavecpulocks
-#endif
-
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
gdb_regs[BFIN_R0] = regs->r0;
@@ -369,13 +359,6 @@ void kgdb_roundup_cpu(int cpu, unsigned long flags)
}
#endif
-void kgdb_post_primary_code(struct pt_regs *regs, int eVector, int err_code)
-{
- /* Master processor is completely in the debugger */
- gdb_bfin_vector = eVector;
- gdb_bfin_errcode = err_code;
-}
-
int kgdb_arch_handle_exception(int vector, int signo,
int err_code, char *remcom_in_buffer,
char *remcom_out_buffer,
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 59fc42dc5d6..9a4b0759438 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -17,8 +17,9 @@
#include <asm/blackfin.h>
+/* Symbols are here for kgdb test to poke directly */
static char cmdline[256];
-static unsigned long len;
+static size_t len;
#ifndef CONFIG_SMP
static int num1 __attribute__((l1_data));
@@ -27,11 +28,10 @@ void kgdb_l1_test(void) __attribute__((l1_text));
void kgdb_l1_test(void)
{
- printk(KERN_ALERT "L1(before change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
- printk(KERN_ALERT "L1 : code function addr = 0x%p\n", kgdb_l1_test);
- num1 = num1 + 10 ;
- printk(KERN_ALERT "L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
- return ;
+ pr_alert("L1(before change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
+ pr_alert("L1 : code function addr = 0x%p\n", kgdb_l1_test);
+ num1 = num1 + 10;
+ pr_alert("L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
}
#endif
@@ -42,11 +42,10 @@ void kgdb_l2_test(void) __attribute__((l2));
void kgdb_l2_test(void)
{
- printk(KERN_ALERT "L2(before change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
- printk(KERN_ALERT "L2 : code function addr = 0x%p\n", kgdb_l2_test);
- num2 = num2 + 20 ;
- printk(KERN_ALERT "L2(after change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
- return ;
+ pr_alert("L2(before change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
+ pr_alert("L2 : code function addr = 0x%p\n", kgdb_l2_test);
+ num2 = num2 + 20;
+ pr_alert("L2(after change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
}
#endif
@@ -54,12 +53,14 @@ void kgdb_l2_test(void)
int kgdb_test(char *name, int len, int count, int z)
{
- printk(KERN_ALERT "kgdb name(%d): %s, %d, %d\n", len, name, count, z);
+ pr_alert("kgdb name(%d): %s, %d, %d\n", len, name, count, z);
count = z;
return count;
}
-static int test_proc_output(char *buf)
+static ssize_t
+kgdb_test_proc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
kgdb_test("hello world!", 12, 0x55, 0x10);
#ifndef CONFIG_SMP
@@ -72,49 +73,31 @@ static int test_proc_output(char *buf)
return 0;
}
-static int test_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static ssize_t
+kgdb_test_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- int len;
-
- len = test_proc_output(page);
- if (len <= off+count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
- return len;
-}
-
-static int test_write_proc(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- if (count >= 256)
- len = 255;
- else
- len = count;
-
+ len = min_t(size_t, 255, count);
memcpy(cmdline, buffer, count);
cmdline[len] = 0;
return len;
}
+static const struct file_operations kgdb_test_proc_fops = {
+ .owner = THIS_MODULE,
+ .read = kgdb_test_proc_read,
+ .write = kgdb_test_proc_write,
+};
+
static int __init kgdbtest_init(void)
{
struct proc_dir_entry *entry;
- entry = create_proc_entry("kgdbtest", 0, NULL);
+ entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops);
if (entry == NULL)
return -ENOMEM;
- entry->read_proc = test_read_proc;
- entry->write_proc = test_write_proc;
- entry->data = NULL;
-
return 0;
}
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 45876427eb2..b56b0e485e0 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -258,9 +258,12 @@ void finish_atomic_sections (struct pt_regs *regs)
int __user *up0 = (int __user *)regs->p0;
switch (regs->pc) {
+ default:
+ /* not in middle of an atomic step, so resume like normal */
+ return;
+
case ATOMIC_XCHG32 + 2:
put_user(regs->r1, up0);
- regs->pc = ATOMIC_XCHG32 + 4;
break;
case ATOMIC_CAS32 + 2:
@@ -268,7 +271,6 @@ void finish_atomic_sections (struct pt_regs *regs)
if (regs->r0 == regs->r1)
case ATOMIC_CAS32 + 6:
put_user(regs->r2, up0);
- regs->pc = ATOMIC_CAS32 + 8;
break;
case ATOMIC_ADD32 + 2:
@@ -276,7 +278,6 @@ void finish_atomic_sections (struct pt_regs *regs)
/* fall through */
case ATOMIC_ADD32 + 4:
put_user(regs->r0, up0);
- regs->pc = ATOMIC_ADD32 + 6;
break;
case ATOMIC_SUB32 + 2:
@@ -284,7 +285,6 @@ void finish_atomic_sections (struct pt_regs *regs)
/* fall through */
case ATOMIC_SUB32 + 4:
put_user(regs->r0, up0);
- regs->pc = ATOMIC_SUB32 + 6;
break;
case ATOMIC_IOR32 + 2:
@@ -292,7 +292,6 @@ void finish_atomic_sections (struct pt_regs *regs)
/* fall through */
case ATOMIC_IOR32 + 4:
put_user(regs->r0, up0);
- regs->pc = ATOMIC_IOR32 + 6;
break;
case ATOMIC_AND32 + 2:
@@ -300,7 +299,6 @@ void finish_atomic_sections (struct pt_regs *regs)
/* fall through */
case ATOMIC_AND32 + 4:
put_user(regs->r0, up0);
- regs->pc = ATOMIC_AND32 + 6;
break;
case ATOMIC_XOR32 + 2:
@@ -308,9 +306,15 @@ void finish_atomic_sections (struct pt_regs *regs)
/* fall through */
case ATOMIC_XOR32 + 4:
put_user(regs->r0, up0);
- regs->pc = ATOMIC_XOR32 + 6;
break;
}
+
+ /*
+ * We've finished the atomic section, and the only thing left for
+ * userspace is to do a RTS, so we might as well handle that too
+ * since we need to update the PC anyways.
+ */
+ regs->pc = regs->rets;
}
static inline
@@ -332,12 +336,58 @@ int in_mem_const(unsigned long addr, unsigned long size,
{
return in_mem_const_off(addr, size, 0, const_addr, const_size);
}
-#define IN_ASYNC(bnum, bctlnum) \
+#define ASYNC_ENABLED(bnum, bctlnum) \
({ \
- (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \
- bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \
- BFIN_MEM_ACCESS_CORE; \
+ (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
+ bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
+ 1; \
})
+/*
+ * We can't read EBIU banks that aren't enabled or we end up hanging
+ * on the access to the async space. Make sure we validate accesses
+ * that cross async banks too.
+ * 0 - found, but unusable
+ * 1 - found & usable
+ * 2 - not found
+ */
+static
+int in_async(unsigned long addr, unsigned long size)
+{
+ if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
+ if (!ASYNC_ENABLED(0, 0))
+ return 0;
+ if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
+ return 1;
+ size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
+ addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
+ }
+ if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
+ if (!ASYNC_ENABLED(1, 0))
+ return 0;
+ if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
+ return 1;
+ size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
+ addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
+ }
+ if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
+ if (!ASYNC_ENABLED(2, 1))
+ return 0;
+ if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
+ return 1;
+ size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
+ addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
+ }
+ if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
+ if (ASYNC_ENABLED(3, 1))
+ return 0;
+ if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
+ return 1;
+ return 0;
+ }
+
+ /* not within async bounds */
+ return 2;
+}
int bfin_mem_access_type(unsigned long addr, unsigned long size)
{
@@ -374,17 +424,11 @@ int bfin_mem_access_type(unsigned long addr, unsigned long size)
if (addr >= SYSMMR_BASE)
return BFIN_MEM_ACCESS_CORE_ONLY;
- /* We can't read EBIU banks that aren't enabled or we end up hanging
- * on the access to the async space.
- */
- if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE))
- return IN_ASYNC(0, 0);
- if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
- return IN_ASYNC(1, 0);
- if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
- return IN_ASYNC(2, 1);
- if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
- return IN_ASYNC(3, 1);
+ switch (in_async(addr, size)) {
+ case 0: return -EFAULT;
+ case 1: return BFIN_MEM_ACCESS_CORE;
+ case 2: /* fall through */;
+ }
if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
return BFIN_MEM_ACCESS_CORE;
@@ -401,6 +445,8 @@ __attribute__((l1_text))
/* Return 1 if access to memory range is OK, 0 otherwise */
int _access_ok(unsigned long addr, unsigned long size)
{
+ int aret;
+
if (size == 0)
return 1;
/* Check that things do not wrap around */
@@ -450,6 +496,11 @@ int _access_ok(unsigned long addr, unsigned long size)
if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
return 1;
#endif
+
+ aret = in_async(addr, size);
+ if (aret < 2)
+ return aret;
+
if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
return 1;
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 56b0ba12175..65567dc4b9f 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -316,19 +316,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case BFIN_MEM_ACCESS_CORE_ONLY:
copied = access_process_vm(child, addr, &data,
to_copy, 1);
- if (copied)
- break;
-
- /* hrm, why didn't that work ... maybe no mapping */
- if (addr >= FIXED_CODE_START &&
- addr + to_copy <= FIXED_CODE_END) {
- copy_to_user_page(0, 0, 0, paddr, &data, to_copy);
- copied = to_copy;
- } else if (addr >= BOOT_ROM_START) {
- memcpy(paddr, &data, to_copy);
- copied = to_copy;
- }
-
break;
case BFIN_MEM_ACCESS_DMA:
if (safe_dma_memcpy(paddr, &data, to_copy))
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index c202a44d141..95448ae9c43 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -178,10 +178,10 @@ void __init bfin_cache_init(void)
void __init bfin_relocate_l1_mem(void)
{
- unsigned long l1_code_length;
- unsigned long l1_data_a_length;
- unsigned long l1_data_b_length;
- unsigned long l2_length;
+ unsigned long text_l1_len = (unsigned long)_text_l1_len;
+ unsigned long data_l1_len = (unsigned long)_data_l1_len;
+ unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
+ unsigned long l2_len = (unsigned long)_l2_len;
early_shadow_stamp();
@@ -201,30 +201,23 @@ void __init bfin_relocate_l1_mem(void)
blackfin_dma_early_init();
- /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
- l1_code_length = _etext_l1 - _stext_l1;
- if (l1_code_length)
- early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
+ /* if necessary, copy L1 text to L1 instruction SRAM */
+ if (L1_CODE_LENGTH && text_l1_len)
+ early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
- /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
- l1_data_a_length = _sbss_l1 - _sdata_l1;
- if (l1_data_a_length)
- early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
+ /* if necessary, copy L1 data to L1 data bank A SRAM */
+ if (L1_DATA_A_LENGTH && data_l1_len)
+ early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
- /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
- l1_data_b_length = _sbss_b_l1 - _sdata_b_l1;
- if (l1_data_b_length)
- early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
- l1_data_a_length, l1_data_b_length);
+ /* if necessary, copy L1 data B to L1 data bank B SRAM */
+ if (L1_DATA_B_LENGTH && data_b_l1_len)
+ early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
early_dma_memcpy_done();
- /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */
- if (L2_LENGTH != 0) {
- l2_length = _sbss_l2 - _stext_l2;
- if (l2_length)
- memcpy(_stext_l2, _l2_lma_start, l2_length);
- }
+ /* if necessary, copy L2 text/data to L2 SRAM */
+ if (L2_LENGTH && l2_len)
+ memcpy(_stext_l2, _l2_lma, l2_len);
}
/* add_memory_region to memmap */
@@ -608,11 +601,6 @@ static __init void memory_setup(void)
page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
#endif
-#if !defined(CONFIG_MTD_UCLINUX)
- /*In case there is no valid CPLB behind memory_end make sure we don't get to close*/
- memory_end -= SIZE_4K;
-#endif
-
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
@@ -917,7 +905,7 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n");
if (bfin_compiled_revid() == 0xffff)
- printk(KERN_INFO "Compiled for ADSP-%s Rev any\n", CPU);
+ printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
else if (bfin_compiled_revid() == -1)
printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
else
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index 9d90c18fab2..e0fd63e9e38 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -12,6 +12,7 @@
#include <linux/binfmts.h>
#include <linux/freezer.h>
#include <linux/uaccess.h>
+#include <linux/tracehook.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
@@ -332,3 +333,20 @@ asmlinkage void do_signal(struct pt_regs *regs)
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
+
+/*
+ * notification of userspace execution resumption
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK))
+ do_signal(regs);
+
+ if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
+
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index afcef129d4e..2e7f8e10bf8 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -22,39 +22,6 @@
#include <asm/cacheflush.h>
#include <asm/dma.h>
-/* common code for old and new mmaps */
-static inline long
-do_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
- out:
- return error;
-}
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
{
return sram_alloc_with_lsl(size, flags);
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 359cfb1815c..17c38c5b5b2 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -22,8 +22,6 @@
#include <asm/time.h>
#include <asm/gptimers.h>
-#if defined(CONFIG_CYCLES_CLOCKSOURCE)
-
/* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits)
* basic equation:
@@ -46,20 +44,11 @@
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static unsigned long cyc2ns_scale;
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-static inline void set_cyc2ns_scale(unsigned long cpu_khz)
-{
- cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR) / cpu_khz;
-}
-
-static inline unsigned long long cycles_2_ns(cycle_t cyc)
-{
- return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
-}
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
-static cycle_t bfin_read_cycles(struct clocksource *cs)
+static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
{
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
}
@@ -69,19 +58,18 @@ static struct clocksource bfin_cs_cycles = {
.rating = 400,
.read = bfin_read_cycles,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 22,
+ .shift = CYC2NS_SCALE_FACTOR,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-unsigned long long sched_clock(void)
+static inline unsigned long long bfin_cs_cycles_sched_clock(void)
{
- return cycles_2_ns(bfin_read_cycles(&bfin_cs_cycles));
+ return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles),
+ bfin_cs_cycles.mult, bfin_cs_cycles.shift);
}
static int __init bfin_cs_cycles_init(void)
{
- set_cyc2ns_scale(get_cclk() / 1000);
-
bfin_cs_cycles.mult = \
clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);
@@ -108,7 +96,7 @@ void __init setup_gptimer0(void)
enable_gptimers(TIMER0bit);
}
-static cycle_t bfin_read_gptimer0(void)
+static cycle_t bfin_read_gptimer0(struct clocksource *cs)
{
return bfin_read_TIMER0_COUNTER();
}
@@ -118,10 +106,16 @@ static struct clocksource bfin_cs_gptimer0 = {
.rating = 350,
.read = bfin_read_gptimer0,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 22,
+ .shift = CYC2NS_SCALE_FACTOR,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+static inline unsigned long long bfin_cs_gptimer0_sched_clock(void)
+{
+ return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(),
+ bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift);
+}
+
static int __init bfin_cs_gptimer0_init(void)
{
setup_gptimer0();
@@ -138,6 +132,19 @@ static int __init bfin_cs_gptimer0_init(void)
# define bfin_cs_gptimer0_init()
#endif
+
+#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
+/* prefer to use cycles since it has higher rating */
+notrace unsigned long long sched_clock(void)
+{
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+ return bfin_cs_cycles_sched_clock();
+#else
+ return bfin_cs_gptimer0_sched_clock();
+#endif
+}
+#endif
+
#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index bd3b53da295..13c1ee3e640 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -184,11 +184,3 @@ void __init time_init(void)
time_sched_init(timer_interrupt);
}
-
-/*
- * Scheduler clock - returns current time in nanosec units.
- */
-unsigned long long sched_clock(void)
-{
- return (unsigned long long)jiffies *(NSEC_PER_SEC / HZ);
-}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 78cb3d38f89..d3cbcd6bd98 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -119,6 +119,15 @@ static void decode_address(char *buf, unsigned long address)
return;
}
+ /*
+ * Don't walk any of the vmas if we are oopsing, it has been known
+ * to cause problems - corrupt vmas (kernel crashes) cause double faults
+ */
+ if (oops_in_progress) {
+ strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
+ return;
+ }
+
/* looks like we're off in user-land, so let's walk all the
* mappings of all our processes and see if we can't be a whee
* bit more specific
@@ -515,6 +524,36 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
break;
/* External Memory Addressing Error */
case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
+ if (ANOMALY_05000310) {
+ static unsigned long anomaly_rets;
+
+ if ((fp->pc >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
+ (fp->pc < (L1_CODE_START + L1_CODE_LENGTH))) {
+ /*
+ * A false hardware error will happen while fetching at
+ * the L1 instruction SRAM boundary. Ignore it.
+ */
+ anomaly_rets = fp->rets;
+ goto traps_done;
+ } else if (fp->rets == anomaly_rets) {
+ /*
+ * While boundary code returns to a function, at the ret
+ * point, a new false hardware error might occur too based
+ * on tests. Ignore it too.
+ */
+ goto traps_done;
+ } else if ((fp->rets >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
+ (fp->rets < (L1_CODE_START + L1_CODE_LENGTH))) {
+ /*
+ * If boundary code calls a function, at the entry point,
+ * a new false hardware error maybe happen based on tests.
+ * Ignore it too.
+ */
+ goto traps_done;
+ } else
+ anomaly_rets = 0;
+ }
+
info.si_code = BUS_ADRERR;
sig = SIGBUS;
strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
@@ -976,12 +1015,12 @@ void dump_bfin_process(struct pt_regs *fp)
!((unsigned long)current & 0x3) && current->pid) {
verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n");
if (current->comm >= (char *)FIXED_CODE_START)
- verbose_printk(KERN_NOTICE "COMM=%s PID=%d\n",
+ verbose_printk(KERN_NOTICE "COMM=%s PID=%d",
current->comm, current->pid);
else
- verbose_printk(KERN_NOTICE "COMM= invalid\n");
+ verbose_printk(KERN_NOTICE "COMM= invalid");
- printk(KERN_NOTICE "CPU = %d\n", current_thread_info()->cpu);
+ printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu);
if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
verbose_printk(KERN_NOTICE
"TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
@@ -1140,7 +1179,7 @@ void show_regs(struct pt_regs *fp)
if (fp->ipend & ~0x3F) {
for (i = 0; i < (NR_IRQS - 1); i++) {
if (!in_atomic)
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
@@ -1155,7 +1194,7 @@ void show_regs(struct pt_regs *fp)
verbose_printk("\n");
unlock:
if (!in_atomic)
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
}
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 10e12539000..66799e763dc 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -4,8 +4,6 @@
* Licensed under the GPL-2 or later
*/
-#define VMLINUX_SYMBOL(_sym_) _##_sym_
-
#include <asm-generic/vmlinux.lds.h>
#include <asm/mem_map.h>
#include <asm/page.h>
@@ -123,8 +121,6 @@ SECTIONS
EXIT_DATA
}
- __l1_lma_start = .;
-
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
{
. = ALIGN(4);
@@ -136,9 +132,11 @@ SECTIONS
. = ALIGN(4);
__etext_l1 = .;
}
- ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!")
+ __text_l1_lma = LOADADDR(.text_l1);
+ __text_l1_len = SIZEOF(.text_l1);
+ ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
- .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1))
+ .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
{
. = ALIGN(4);
__sdata_l1 = .;
@@ -154,9 +152,11 @@ SECTIONS
. = ALIGN(4);
__ebss_l1 = .;
}
- ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!")
+ __data_l1_lma = LOADADDR(.data_l1);
+ __data_l1_len = SIZEOF(.data_l1);
+ ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
- .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1))
+ .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
{
. = ALIGN(4);
__sdata_b_l1 = .;
@@ -169,11 +169,11 @@ SECTIONS
. = ALIGN(4);
__ebss_b_l1 = .;
}
- ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!")
-
- __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
+ __data_b_l1_lma = LOADADDR(.data_b_l1);
+ __data_b_l1_len = SIZEOF(.data_b_l1);
+ ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
- .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
+ .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
{
. = ALIGN(4);
__stext_l2 = .;
@@ -195,12 +195,14 @@ SECTIONS
. = ALIGN(4);
__ebss_l2 = .;
}
- ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!")
+ __l2_lma = LOADADDR(.text_data_l2);
+ __l2_len = SIZEOF(.text_data_l2);
+ ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
/* Force trailing alignment of our init section so that when we
* free our init memory, we don't leave behind a partial page.
*/
- . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
+ . = __l2_lma + __l2_len;
. = ALIGN(PAGE_SIZE);
___init_end = .;
diff --git a/arch/blackfin/lib/Makefile b/arch/blackfin/lib/Makefile
index 635288fc5f5..42c47dc9e12 100644
--- a/arch/blackfin/lib/Makefile
+++ b/arch/blackfin/lib/Makefile
@@ -5,7 +5,7 @@
lib-y := \
ashldi3.o ashrdi3.o lshrdi3.o \
muldi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
- checksum.o memcpy.o memset.o memcmp.o memchr.o memmove.o \
+ memcpy.o memset.o memcmp.o memchr.o memmove.o \
strcmp.o strcpy.o strncmp.o strncpy.o \
umulsi3_highpart.o smulsi3_highpart.o \
ins.o outs.o
diff --git a/arch/blackfin/lib/checksum.c b/arch/blackfin/lib/checksum.c
deleted file mode 100644
index c62969dc1bb..00000000000
--- a/arch/blackfin/lib/checksum.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright 2004-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- *
- * An implementation of the TCP/IP protocol suite for the LINUX operating
- * system. INET is implemented using the BSD Socket interface as the
- * means of communication with the user level.
- *
- */
-
-#include <linux/module.h>
-#include <net/checksum.h>
-#include <asm/checksum.h>
-
-#ifdef CONFIG_IP_CHECKSUM_L1
-static unsigned short do_csum(const unsigned char *buff, int len)__attribute__((l1_text));
-#endif
-
-static unsigned short do_csum(const unsigned char *buff, int len)
-{
- register unsigned long sum = 0;
- int swappem = 0;
-
- if (1 & (unsigned long)buff) {
- sum = *buff << 8;
- buff++;
- len--;
- ++swappem;
- }
-
- while (len > 1) {
- sum += *(unsigned short *)buff;
- buff += 2;
- len -= 2;
- }
-
- if (len > 0)
- sum += *buff;
-
- /* Fold 32-bit sum to 16 bits */
- while (sum >> 16)
- sum = (sum & 0xffff) + (sum >> 16);
-
- if (swappem)
- sum = ((sum & 0xff00) >> 8) + ((sum & 0x00ff) << 8);
-
- return sum;
-
-}
-
-/*
- * This is a version of ip_compute_csum() optimized for IP headers,
- * which always checksum on 4 octet boundaries.
- */
-__sum16 ip_fast_csum(unsigned char *iph, unsigned int ihl)
-{
- return (__force __sum16)~do_csum(iph, ihl * 4);
-}
-EXPORT_SYMBOL(ip_fast_csum);
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
- /*
- * Just in case we get nasty checksum data...
- * Like 0xffff6ec3 in the case of our IPv6 multicast header.
- * We fold to begin with, as well as at the end.
- */
- sum = (sum & 0xffff) + (sum >> 16);
-
- sum += do_csum(buff, len);
-
- sum = (sum & 0xffff) + (sum >> 16);
-
- return sum;
-}
-EXPORT_SYMBOL(csum_partial);
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-__sum16 ip_compute_csum(const void *buff, int len)
-{
- return (__force __sum16)~do_csum(buff, len);
-}
-EXPORT_SYMBOL(ip_compute_csum);
-
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *csum_err)
-{
- if (csum_err)
- *csum_err = 0;
- memcpy(dst, (__force void *)src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-
-__wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
-{
- memcpy(dst, src, len);
- return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy);
diff --git a/arch/blackfin/mach-bf518/Kconfig b/arch/blackfin/mach-bf518/Kconfig
index 4c76fefb7a3..4ab2d166c83 100644
--- a/arch/blackfin/mach-bf518/Kconfig
+++ b/arch/blackfin/mach-bf518/Kconfig
@@ -1,3 +1,7 @@
+config BF51x
+ def_bool y
+ depends on (BF512 || BF514 || BF516 || BF518)
+
if (BF51x)
source "arch/blackfin/mach-bf518/boards/Kconfig"
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
index 6cfb246aebe..9053462be4b 100644
--- a/arch/blackfin/mach-bf518/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -58,10 +58,4 @@
#define OFFSET_SCR 0x1C /* SCR Scratch Register */
#define OFFSET_GCTL 0x24 /* Global Control Register */
-/* PLL_DIV Masks */
-#define CCLK_DIV1 CSEL_DIV1 /* CCLK = VCO / 1 */
-#define CCLK_DIV2 CSEL_DIV2 /* CCLK = VCO / 2 */
-#define CCLK_DIV4 CSEL_DIV4 /* CCLK = VCO / 4 */
-#define CCLK_DIV8 CSEL_DIV8 /* CCLK = VCO / 8 */
-
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
index e1d99911025..108fa4bde27 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
@@ -1,7 +1,7 @@
/*
* Copyright 2008-2009 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
*/
#ifndef _CDEF_BF514_H
@@ -10,15 +10,8 @@
/* include all Core registers and bit definitions */
#include "defBF514.h"
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF514 */
-
-/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "cdefBF51x_base.h"
-
-/* The following are the #defines needed by ADSP-BF514 that are not in the common header */
+/* BF514 is BF512 + RSI */
+#include "cdefBF512.h"
/* Removable Storage Interface Registers */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
index 6b364eda494..2751592ef1c 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
@@ -1,7 +1,7 @@
/*
* Copyright 2008-2009 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
*/
#ifndef _CDEF_BF516_H
@@ -10,15 +10,8 @@
/* include all Core registers and bit definitions */
#include "defBF516.h"
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF516 */
-
-/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "cdefBF51x_base.h"
-
-/* The following are the #defines needed by ADSP-BF516 that are not in the common header */
+/* BF516 is BF514 + EMAC */
+#include "cdefBF514.h"
/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */
@@ -185,71 +178,4 @@
#define bfin_read_EMAC_TXC_ABORT() bfin_read32(EMAC_TXC_ABORT)
#define bfin_write_EMAC_TXC_ABORT(val) bfin_write32(EMAC_TXC_ABORT, val)
-/* Removable Storage Interface Registers */
-
-#define bfin_read_RSI_PWR_CTL() bfin_read16(RSI_PWR_CONTROL)
-#define bfin_write_RSI_PWR_CTL(val) bfin_write16(RSI_PWR_CONTROL, val)
-#define bfin_read_RSI_CLK_CTL() bfin_read16(RSI_CLK_CONTROL)
-#define bfin_write_RSI_CLK_CTL(val) bfin_write16(RSI_CLK_CONTROL, val)
-#define bfin_read_RSI_ARGUMENT() bfin_read32(RSI_ARGUMENT)
-#define bfin_write_RSI_ARGUMENT(val) bfin_write32(RSI_ARGUMENT, val)
-#define bfin_read_RSI_COMMAND() bfin_read16(RSI_COMMAND)
-#define bfin_write_RSI_COMMAND(val) bfin_write16(RSI_COMMAND, val)
-#define bfin_read_RSI_RESP_CMD() bfin_read16(RSI_RESP_CMD)
-#define bfin_write_RSI_RESP_CMD(val) bfin_write16(RSI_RESP_CMD, val)
-#define bfin_read_RSI_RESPONSE0() bfin_read32(RSI_RESPONSE0)
-#define bfin_write_RSI_RESPONSE0(val) bfin_write32(RSI_RESPONSE0, val)
-#define bfin_read_RSI_RESPONSE1() bfin_read32(RSI_RESPONSE1)
-#define bfin_write_RSI_RESPONSE1(val) bfin_write32(RSI_RESPONSE1, val)
-#define bfin_read_RSI_RESPONSE2() bfin_read32(RSI_RESPONSE2)
-#define bfin_write_RSI_RESPONSE2(val) bfin_write32(RSI_RESPONSE2, val)
-#define bfin_read_RSI_RESPONSE3() bfin_read32(RSI_RESPONSE3)
-#define bfin_write_RSI_RESPONSE3(val) bfin_write32(RSI_RESPONSE3, val)
-#define bfin_read_RSI_DATA_TIMER() bfin_read32(RSI_DATA_TIMER)
-#define bfin_write_RSI_DATA_TIMER(val) bfin_write32(RSI_DATA_TIMER, val)
-#define bfin_read_RSI_DATA_LGTH() bfin_read16(RSI_DATA_LGTH)
-#define bfin_write_RSI_DATA_LGTH(val) bfin_write16(RSI_DATA_LGTH, val)
-#define bfin_read_RSI_DATA_CTL() bfin_read16(RSI_DATA_CONTROL)
-#define bfin_write_RSI_DATA_CTL(val) bfin_write16(RSI_DATA_CONTROL, val)
-#define bfin_read_RSI_DATA_CNT() bfin_read16(RSI_DATA_CNT)
-#define bfin_write_RSI_DATA_CNT(val) bfin_write16(RSI_DATA_CNT, val)
-#define bfin_read_RSI_STATUS() bfin_read32(RSI_STATUS)
-#define bfin_write_RSI_STATUS(val) bfin_write32(RSI_STATUS, val)
-#define bfin_read_RSI_STATUS_CLR() bfin_read16(RSI_STATUSCL)
-#define bfin_write_RSI_STATUS_CLR(val) bfin_write16(RSI_STATUSCL, val)
-#define bfin_read_RSI_MASK0() bfin_read32(RSI_MASK0)
-#define bfin_write_RSI_MASK0(val) bfin_write32(RSI_MASK0, val)
-#define bfin_read_RSI_MASK1() bfin_read32(RSI_MASK1)
-#define bfin_write_RSI_MASK1(val) bfin_write32(RSI_MASK1, val)
-#define bfin_read_RSI_FIFO_CNT() bfin_read16(RSI_FIFO_CNT)
-#define bfin_write_RSI_FIFO_CNT(val) bfin_write16(RSI_FIFO_CNT, val)
-#define bfin_read_RSI_CEATA_CTL() bfin_read16(RSI_CEATA_CONTROL)
-#define bfin_write_RSI_CEATA_CTL(val) bfin_write16(RSI_CEATA_CONTROL, val)
-#define bfin_read_RSI_FIFO() bfin_read32(RSI_FIFO)
-#define bfin_write_RSI_FIFO(val) bfin_write32(RSI_FIFO, val)
-#define bfin_read_RSI_E_STATUS() bfin_read16(RSI_ESTAT)
-#define bfin_write_RSI_E_STATUS(val) bfin_write16(RSI_ESTAT, val)
-#define bfin_read_RSI_E_MASK() bfin_read16(RSI_EMASK)
-#define bfin_write_RSI_E_MASK(val) bfin_write16(RSI_EMASK, val)
-#define bfin_read_RSI_CFG() bfin_read16(RSI_CONFIG)
-#define bfin_write_RSI_CFG(val) bfin_write16(RSI_CONFIG, val)
-#define bfin_read_RSI_RD_WAIT_EN() bfin_read16(RSI_RD_WAIT_EN)
-#define bfin_write_RSI_RD_WAIT_EN(val) bfin_write16(RSI_RD_WAIT_EN, val)
-#define bfin_read_RSI_PID0() bfin_read16(RSI_PID0)
-#define bfin_write_RSI_PID0(val) bfin_write16(RSI_PID0, val)
-#define bfin_read_RSI_PID1() bfin_read16(RSI_PID1)
-#define bfin_write_RSI_PID1(val) bfin_write16(RSI_PID1, val)
-#define bfin_read_RSI_PID2() bfin_read16(RSI_PID2)
-#define bfin_write_RSI_PID2(val) bfin_write16(RSI_PID2, val)
-#define bfin_read_RSI_PID3() bfin_read16(RSI_PID3)
-#define bfin_write_RSI_PID3(val) bfin_write16(RSI_PID3, val)
-#define bfin_read_RSI_PID4() bfin_read16(RSI_PID4)
-#define bfin_write_RSI_PID4(val) bfin_write16(RSI_PID4, val)
-#define bfin_read_RSI_PID5() bfin_read16(RSI_PID5)
-#define bfin_write_RSI_PID5(val) bfin_write16(RSI_PID5, val)
-#define bfin_read_RSI_PID6() bfin_read16(RSI_PID6)
-#define bfin_write_RSI_PID6(val) bfin_write16(RSI_PID6, val)
-#define bfin_read_RSI_PID7() bfin_read16(RSI_PID7)
-#define bfin_write_RSI_PID7(val) bfin_write16(RSI_PID7, val)
-
#endif /* _CDEF_BF516_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
index 929b90650bd..7fb7f0eab99 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
@@ -1,7 +1,7 @@
/*
* Copyright 2008-2009 Analog Devices Inc.
*
- * Licensed under the GPL-2 or later
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
*/
#ifndef _CDEF_BF518_H
@@ -10,181 +10,10 @@
/* include all Core registers and bit definitions */
#include "defBF518.h"
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
+/* BF518 is BF516 + IEEE-1588 */
+#include "cdefBF516.h"
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF518 */
-
-/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "cdefBF51x_base.h"
-
-/* The following are the #defines needed by ADSP-BF518 that are not in the common header */
-
-
-/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */
-
-#define bfin_read_EMAC_OPMODE() bfin_read32(EMAC_OPMODE)
-#define bfin_write_EMAC_OPMODE(val) bfin_write32(EMAC_OPMODE, val)
-#define bfin_read_EMAC_ADDRLO() bfin_read32(EMAC_ADDRLO)
-#define bfin_write_EMAC_ADDRLO(val) bfin_write32(EMAC_ADDRLO, val)
-#define bfin_read_EMAC_ADDRHI() bfin_read32(EMAC_ADDRHI)
-#define bfin_write_EMAC_ADDRHI(val) bfin_write32(EMAC_ADDRHI, val)
-#define bfin_read_EMAC_HASHLO() bfin_read32(EMAC_HASHLO)
-#define bfin_write_EMAC_HASHLO(val) bfin_write32(EMAC_HASHLO, val)
-#define bfin_read_EMAC_HASHHI() bfin_read32(EMAC_HASHHI)
-#define bfin_write_EMAC_HASHHI(val) bfin_write32(EMAC_HASHHI, val)
-#define bfin_read_EMAC_STAADD() bfin_read32(EMAC_STAADD)
-#define bfin_write_EMAC_STAADD(val) bfin_write32(EMAC_STAADD, val)
-#define bfin_read_EMAC_STADAT() bfin_read32(EMAC_STADAT)
-#define bfin_write_EMAC_STADAT(val) bfin_write32(EMAC_STADAT, val)
-#define bfin_read_EMAC_FLC() bfin_read32(EMAC_FLC)
-#define bfin_write_EMAC_FLC(val) bfin_write32(EMAC_FLC, val)
-#define bfin_read_EMAC_VLAN1() bfin_read32(EMAC_VLAN1)
-#define bfin_write_EMAC_VLAN1(val) bfin_write32(EMAC_VLAN1, val)
-#define bfin_read_EMAC_VLAN2() bfin_read32(EMAC_VLAN2)
-#define bfin_write_EMAC_VLAN2(val) bfin_write32(EMAC_VLAN2, val)
-#define bfin_read_EMAC_WKUP_CTL() bfin_read32(EMAC_WKUP_CTL)
-#define bfin_write_EMAC_WKUP_CTL(val) bfin_write32(EMAC_WKUP_CTL, val)
-#define bfin_read_EMAC_WKUP_FFMSK0() bfin_read32(EMAC_WKUP_FFMSK0)
-#define bfin_write_EMAC_WKUP_FFMSK0(val) bfin_write32(EMAC_WKUP_FFMSK0, val)
-#define bfin_read_EMAC_WKUP_FFMSK1() bfin_read32(EMAC_WKUP_FFMSK1)
-#define bfin_write_EMAC_WKUP_FFMSK1(val) bfin_write32(EMAC_WKUP_FFMSK1, val)
-#define bfin_read_EMAC_WKUP_FFMSK2() bfin_read32(EMAC_WKUP_FFMSK2)
-#define bfin_write_EMAC_WKUP_FFMSK2(val) bfin_write32(EMAC_WKUP_FFMSK2, val)
-#define bfin_read_EMAC_WKUP_FFMSK3() bfin_read32(EMAC_WKUP_FFMSK3)
-#define bfin_write_EMAC_WKUP_FFMSK3(val) bfin_write32(EMAC_WKUP_FFMSK3, val)
-#define bfin_read_EMAC_WKUP_FFCMD() bfin_read32(EMAC_WKUP_FFCMD)
-#define bfin_write_EMAC_WKUP_FFCMD(val) bfin_write32(EMAC_WKUP_FFCMD, val)
-#define bfin_read_EMAC_WKUP_FFOFF() bfin_read32(EMAC_WKUP_FFOFF)
-#define bfin_write_EMAC_WKUP_FFOFF(val) bfin_write32(EMAC_WKUP_FFOFF, val)
-#define bfin_read_EMAC_WKUP_FFCRC0() bfin_read32(EMAC_WKUP_FFCRC0)
-#define bfin_write_EMAC_WKUP_FFCRC0(val) bfin_write32(EMAC_WKUP_FFCRC0, val)
-#define bfin_read_EMAC_WKUP_FFCRC1() bfin_read32(EMAC_WKUP_FFCRC1)
-#define bfin_write_EMAC_WKUP_FFCRC1(val) bfin_write32(EMAC_WKUP_FFCRC1, val)
-
-#define bfin_read_EMAC_SYSCTL() bfin_read32(EMAC_SYSCTL)
-#define bfin_write_EMAC_SYSCTL(val) bfin_write32(EMAC_SYSCTL, val)
-#define bfin_read_EMAC_SYSTAT() bfin_read32(EMAC_SYSTAT)
-#define bfin_write_EMAC_SYSTAT(val) bfin_write32(EMAC_SYSTAT, val)
-#define bfin_read_EMAC_RX_STAT() bfin_read32(EMAC_RX_STAT)
-#define bfin_write_EMAC_RX_STAT(val) bfin_write32(EMAC_RX_STAT, val)
-#define bfin_read_EMAC_RX_STKY() bfin_read32(EMAC_RX_STKY)
-#define bfin_write_EMAC_RX_STKY(val) bfin_write32(EMAC_RX_STKY, val)
-#define bfin_read_EMAC_RX_IRQE() bfin_read32(EMAC_RX_IRQE)
-#define bfin_write_EMAC_RX_IRQE(val) bfin_write32(EMAC_RX_IRQE, val)
-#define bfin_read_EMAC_TX_STAT() bfin_read32(EMAC_TX_STAT)
-#define bfin_write_EMAC_TX_STAT(val) bfin_write32(EMAC_TX_STAT, val)
-#define bfin_read_EMAC_TX_STKY() bfin_read32(EMAC_TX_STKY)
-#define bfin_write_EMAC_TX_STKY(val) bfin_write32(EMAC_TX_STKY, val)
-#define bfin_read_EMAC_TX_IRQE() bfin_read32(EMAC_TX_IRQE)
-#define bfin_write_EMAC_TX_IRQE(val) bfin_write32(EMAC_TX_IRQE, val)
-
-#define bfin_read_EMAC_MMC_CTL() bfin_read32(EMAC_MMC_CTL)
-#define bfin_write_EMAC_MMC_CTL(val) bfin_write32(EMAC_MMC_CTL, val)
-#define bfin_read_EMAC_MMC_RIRQS() bfin_read32(EMAC_MMC_RIRQS)
-#define bfin_write_EMAC_MMC_RIRQS(val) bfin_write32(EMAC_MMC_RIRQS, val)
-#define bfin_read_EMAC_MMC_RIRQE() bfin_read32(EMAC_MMC_RIRQE)
-#define bfin_write_EMAC_MMC_RIRQE(val) bfin_write32(EMAC_MMC_RIRQE, val)
-#define bfin_read_EMAC_MMC_TIRQS() bfin_read32(EMAC_MMC_TIRQS)
-#define bfin_write_EMAC_MMC_TIRQS(val) bfin_write32(EMAC_MMC_TIRQS, val)
-#define bfin_read_EMAC_MMC_TIRQE() bfin_read32(EMAC_MMC_TIRQE)
-#define bfin_write_EMAC_MMC_TIRQE(val) bfin_write32(EMAC_MMC_TIRQE, val)
-
-#define bfin_read_EMAC_RXC_OK() bfin_read32(EMAC_RXC_OK)
-#define bfin_write_EMAC_RXC_OK(val) bfin_write32(EMAC_RXC_OK, val)
-#define bfin_read_EMAC_RXC_FCS() bfin_read32(EMAC_RXC_FCS)
-#define bfin_write_EMAC_RXC_FCS(val) bfin_write32(EMAC_RXC_FCS, val)
-#define bfin_read_EMAC_RXC_ALIGN() bfin_read32(EMAC_RXC_ALIGN)
-#define bfin_write_EMAC_RXC_ALIGN(val) bfin_write32(EMAC_RXC_ALIGN, val)
-#define bfin_read_EMAC_RXC_OCTET() bfin_read32(EMAC_RXC_OCTET)
-#define bfin_write_EMAC_RXC_OCTET(val) bfin_write32(EMAC_RXC_OCTET, val)
-#define bfin_read_EMAC_RXC_DMAOVF() bfin_read32(EMAC_RXC_DMAOVF)
-#define bfin_write_EMAC_RXC_DMAOVF(val) bfin_write32(EMAC_RXC_DMAOVF, val)
-#define bfin_read_EMAC_RXC_UNICST() bfin_read32(EMAC_RXC_UNICST)
-#define bfin_write_EMAC_RXC_UNICST(val) bfin_write32(EMAC_RXC_UNICST, val)
-#define bfin_read_EMAC_RXC_MULTI() bfin_read32(EMAC_RXC_MULTI)
-#define bfin_write_EMAC_RXC_MULTI(val) bfin_write32(EMAC_RXC_MULTI, val)
-#define bfin_read_EMAC_RXC_BROAD() bfin_read32(EMAC_RXC_BROAD)
-#define bfin_write_EMAC_RXC_BROAD(val) bfin_write32(EMAC_RXC_BROAD, val)
-#define bfin_read_EMAC_RXC_LNERRI() bfin_read32(EMAC_RXC_LNERRI)
-#define bfin_write_EMAC_RXC_LNERRI(val) bfin_write32(EMAC_RXC_LNERRI, val)
-#define bfin_read_EMAC_RXC_LNERRO() bfin_read32(EMAC_RXC_LNERRO)
-#define bfin_write_EMAC_RXC_LNERRO(val) bfin_write32(EMAC_RXC_LNERRO, val)
-#define bfin_read_EMAC_RXC_LONG() bfin_read32(EMAC_RXC_LONG)
-#define bfin_write_EMAC_RXC_LONG(val) bfin_write32(EMAC_RXC_LONG, val)
-#define bfin_read_EMAC_RXC_MACCTL() bfin_read32(EMAC_RXC_MACCTL)
-#define bfin_write_EMAC_RXC_MACCTL(val) bfin_write32(EMAC_RXC_MACCTL, val)
-#define bfin_read_EMAC_RXC_OPCODE() bfin_read32(EMAC_RXC_OPCODE)
-#define bfin_write_EMAC_RXC_OPCODE(val) bfin_write32(EMAC_RXC_OPCODE, val)
-#define bfin_read_EMAC_RXC_PAUSE() bfin_read32(EMAC_RXC_PAUSE)
-#define bfin_write_EMAC_RXC_PAUSE(val) bfin_write32(EMAC_RXC_PAUSE, val)
-#define bfin_read_EMAC_RXC_ALLFRM() bfin_read32(EMAC_RXC_ALLFRM)
-#define bfin_write_EMAC_RXC_ALLFRM(val) bfin_write32(EMAC_RXC_ALLFRM, val)
-#define bfin_read_EMAC_RXC_ALLOCT() bfin_read32(EMAC_RXC_ALLOCT)
-#define bfin_write_EMAC_RXC_ALLOCT(val) bfin_write32(EMAC_RXC_ALLOCT, val)
-#define bfin_read_EMAC_RXC_TYPED() bfin_read32(EMAC_RXC_TYPED)
-#define bfin_write_EMAC_RXC_TYPED(val) bfin_write32(EMAC_RXC_TYPED, val)
-#define bfin_read_EMAC_RXC_SHORT() bfin_read32(EMAC_RXC_SHORT)
-#define bfin_write_EMAC_RXC_SHORT(val) bfin_write32(EMAC_RXC_SHORT, val)
-#define bfin_read_EMAC_RXC_EQ64() bfin_read32(EMAC_RXC_EQ64)
-#define bfin_write_EMAC_RXC_EQ64(val) bfin_write32(EMAC_RXC_EQ64, val)
-#define bfin_read_EMAC_RXC_LT128() bfin_read32(EMAC_RXC_LT128)
-#define bfin_write_EMAC_RXC_LT128(val) bfin_write32(EMAC_RXC_LT128, val)
-#define bfin_read_EMAC_RXC_LT256() bfin_read32(EMAC_RXC_LT256)
-#define bfin_write_EMAC_RXC_LT256(val) bfin_write32(EMAC_RXC_LT256, val)
-#define bfin_read_EMAC_RXC_LT512() bfin_read32(EMAC_RXC_LT512)
-#define bfin_write_EMAC_RXC_LT512(val) bfin_write32(EMAC_RXC_LT512, val)
-#define bfin_read_EMAC_RXC_LT1024() bfin_read32(EMAC_RXC_LT1024)
-#define bfin_write_EMAC_RXC_LT1024(val) bfin_write32(EMAC_RXC_LT1024, val)
-#define bfin_read_EMAC_RXC_GE1024() bfin_read32(EMAC_RXC_GE1024)
-#define bfin_write_EMAC_RXC_GE1024(val) bfin_write32(EMAC_RXC_GE1024, val)
-
-#define bfin_read_EMAC_TXC_OK() bfin_read32(EMAC_TXC_OK)
-#define bfin_write_EMAC_TXC_OK(val) bfin_write32(EMAC_TXC_OK, val)
-#define bfin_read_EMAC_TXC_1COL() bfin_read32(EMAC_TXC_1COL)
-#define bfin_write_EMAC_TXC_1COL(val) bfin_write32(EMAC_TXC_1COL, val)
-#define bfin_read_EMAC_TXC_GT1COL() bfin_read32(EMAC_TXC_GT1COL)
-#define bfin_write_EMAC_TXC_GT1COL(val) bfin_write32(EMAC_TXC_GT1COL, val)
-#define bfin_read_EMAC_TXC_OCTET() bfin_read32(EMAC_TXC_OCTET)
-#define bfin_write_EMAC_TXC_OCTET(val) bfin_write32(EMAC_TXC_OCTET, val)
-#define bfin_read_EMAC_TXC_DEFER() bfin_read32(EMAC_TXC_DEFER)
-#define bfin_write_EMAC_TXC_DEFER(val) bfin_write32(EMAC_TXC_DEFER, val)
-#define bfin_read_EMAC_TXC_LATECL() bfin_read32(EMAC_TXC_LATECL)
-#define bfin_write_EMAC_TXC_LATECL(val) bfin_write32(EMAC_TXC_LATECL, val)
-#define bfin_read_EMAC_TXC_XS_COL() bfin_read32(EMAC_TXC_XS_COL)
-#define bfin_write_EMAC_TXC_XS_COL(val) bfin_write32(EMAC_TXC_XS_COL, val)
-#define bfin_read_EMAC_TXC_DMAUND() bfin_read32(EMAC_TXC_DMAUND)
-#define bfin_write_EMAC_TXC_DMAUND(val) bfin_write32(EMAC_TXC_DMAUND, val)
-#define bfin_read_EMAC_TXC_CRSERR() bfin_read32(EMAC_TXC_CRSERR)
-#define bfin_write_EMAC_TXC_CRSERR(val) bfin_write32(EMAC_TXC_CRSERR, val)
-#define bfin_read_EMAC_TXC_UNICST() bfin_read32(EMAC_TXC_UNICST)
-#define bfin_write_EMAC_TXC_UNICST(val) bfin_write32(EMAC_TXC_UNICST, val)
-#define bfin_read_EMAC_TXC_MULTI() bfin_read32(EMAC_TXC_MULTI)
-#define bfin_write_EMAC_TXC_MULTI(val) bfin_write32(EMAC_TXC_MULTI, val)
-#define bfin_read_EMAC_TXC_BROAD() bfin_read32(EMAC_TXC_BROAD)
-#define bfin_write_EMAC_TXC_BROAD(val) bfin_write32(EMAC_TXC_BROAD, val)
-#define bfin_read_EMAC_TXC_XS_DFR() bfin_read32(EMAC_TXC_XS_DFR)
-#define bfin_write_EMAC_TXC_XS_DFR(val) bfin_write32(EMAC_TXC_XS_DFR, val)
-#define bfin_read_EMAC_TXC_MACCTL() bfin_read32(EMAC_TXC_MACCTL)
-#define bfin_write_EMAC_TXC_MACCTL(val) bfin_write32(EMAC_TXC_MACCTL, val)
-#define bfin_read_EMAC_TXC_ALLFRM() bfin_read32(EMAC_TXC_ALLFRM)
-#define bfin_write_EMAC_TXC_ALLFRM(val) bfin_write32(EMAC_TXC_ALLFRM, val)
-#define bfin_read_EMAC_TXC_ALLOCT() bfin_read32(EMAC_TXC_ALLOCT)
-#define bfin_write_EMAC_TXC_ALLOCT(val) bfin_write32(EMAC_TXC_ALLOCT, val)
-#define bfin_read_EMAC_TXC_EQ64() bfin_read32(EMAC_TXC_EQ64)
-#define bfin_write_EMAC_TXC_EQ64(val) bfin_write32(EMAC_TXC_EQ64, val)
-#define bfin_read_EMAC_TXC_LT128() bfin_read32(EMAC_TXC_LT128)
-#define bfin_write_EMAC_TXC_LT128(val) bfin_write32(EMAC_TXC_LT128, val)
-#define bfin_read_EMAC_TXC_LT256() bfin_read32(EMAC_TXC_LT256)
-#define bfin_write_EMAC_TXC_LT256(val) bfin_write32(EMAC_TXC_LT256, val)
-#define bfin_read_EMAC_TXC_LT512() bfin_read32(EMAC_TXC_LT512)
-#define bfin_write_EMAC_TXC_LT512(val) bfin_write32(EMAC_TXC_LT512, val)
-#define bfin_read_EMAC_TXC_LT1024() bfin_read32(EMAC_TXC_LT1024)
-#define bfin_write_EMAC_TXC_LT1024(val) bfin_write32(EMAC_TXC_LT1024, val)
-#define bfin_read_EMAC_TXC_GE1024() bfin_read32(EMAC_TXC_GE1024)
-#define bfin_write_EMAC_TXC_GE1024(val) bfin_write32(EMAC_TXC_GE1024, val)
-#define bfin_read_EMAC_TXC_ABORT() bfin_read32(EMAC_TXC_ABORT)
-#define bfin_write_EMAC_TXC_ABORT(val) bfin_write32(EMAC_TXC_ABORT, val)
+/* PTP TSYNC Registers */
#define bfin_read_EMAC_PTP_CTL() bfin_read16(EMAC_PTP_CTL)
#define bfin_write_EMAC_PTP_CTL(val) bfin_write16(EMAC_PTP_CTL, val)
@@ -227,72 +56,4 @@
#define bfin_read_EMAC_PTP_PPS_PERIOD() bfin_read32(EMAC_PTP_PPS_PERIOD)
#define bfin_write_EMAC_PTP_PPS_PERIOD(val) bfin_write32(EMAC_PTP_PPS_PERIOD, val)
-/* Removable Storage Interface Registers */
-
-#define bfin_read_RSI_PWR_CTL() bfin_read16(RSI_PWR_CONTROL)
-#define bfin_write_RSI_PWR_CTL(val) bfin_write16(RSI_PWR_CONTROL, val)
-#define bfin_read_RSI_CLK_CTL() bfin_read16(RSI_CLK_CONTROL)
-#define bfin_write_RSI_CLK_CTL(val) bfin_write16(RSI_CLK_CONTROL, val)
-#define bfin_read_RSI_ARGUMENT() bfin_read32(RSI_ARGUMENT)
-#define bfin_write_RSI_ARGUMENT(val) bfin_write32(RSI_ARGUMENT, val)
-#define bfin_read_RSI_COMMAND() bfin_read16(RSI_COMMAND)
-#define bfin_write_RSI_COMMAND(val) bfin_write16(RSI_COMMAND, val)
-#define bfin_read_RSI_RESP_CMD() bfin_read16(RSI_RESP_CMD)
-#define bfin_write_RSI_RESP_CMD(val) bfin_write16(RSI_RESP_CMD, val)
-#define bfin_read_RSI_RESPONSE0() bfin_read32(RSI_RESPONSE0)
-#define bfin_write_RSI_RESPONSE0(val) bfin_write32(RSI_RESPONSE0, val)
-#define bfin_read_RSI_RESPONSE1() bfin_read32(RSI_RESPONSE1)
-#define bfin_write_RSI_RESPONSE1(val) bfin_write32(RSI_RESPONSE1, val)
-#define bfin_read_RSI_RESPONSE2() bfin_read32(RSI_RESPONSE2)
-#define bfin_write_RSI_RESPONSE2(val) bfin_write32(RSI_RESPONSE2, val)
-#define bfin_read_RSI_RESPONSE3() bfin_read32(RSI_RESPONSE3)
-#define bfin_write_RSI_RESPONSE3(val) bfin_write32(RSI_RESPONSE3, val)
-#define bfin_read_RSI_DATA_TIMER() bfin_read32(RSI_DATA_TIMER)
-#define bfin_write_RSI_DATA_TIMER(val) bfin_write32(RSI_DATA_TIMER, val)
-#define bfin_read_RSI_DATA_LGTH() bfin_read16(RSI_DATA_LGTH)
-#define bfin_write_RSI_DATA_LGTH(val) bfin_write16(RSI_DATA_LGTH, val)
-#define bfin_read_RSI_DATA_CTL() bfin_read16(RSI_DATA_CONTROL)
-#define bfin_write_RSI_DATA_CTL(val) bfin_write16(RSI_DATA_CONTROL, val)
-#define bfin_read_RSI_DATA_CNT() bfin_read16(RSI_DATA_CNT)
-#define bfin_write_RSI_DATA_CNT(val) bfin_write16(RSI_DATA_CNT, val)
-#define bfin_read_RSI_STATUS() bfin_read32(RSI_STATUS)
-#define bfin_write_RSI_STATUS(val) bfin_write32(RSI_STATUS, val)
-#define bfin_read_RSI_STATUS_CLR() bfin_read16(RSI_STATUSCL)
-#define bfin_write_RSI_STATUS_CLR(val) bfin_write16(RSI_STATUSCL, val)
-#define bfin_read_RSI_MASK0() bfin_read32(RSI_MASK0)
-#define bfin_write_RSI_MASK0(val) bfin_write32(RSI_MASK0, val)
-#define bfin_read_RSI_MASK1() bfin_read32(RSI_MASK1)
-#define bfin_write_RSI_MASK1(val) bfin_write32(RSI_MASK1, val)
-#define bfin_read_RSI_FIFO_CNT() bfin_read16(RSI_FIFO_CNT)
-#define bfin_write_RSI_FIFO_CNT(val) bfin_write16(RSI_FIFO_CNT, val)
-#define bfin_read_RSI_CEATA_CTL() bfin_read16(RSI_CEATA_CONTROL)
-#define bfin_write_RSI_CEATA_CTL(val) bfin_write16(RSI_CEATA_CONTROL, val)
-#define bfin_read_RSI_FIFO() bfin_read32(RSI_FIFO)
-#define bfin_write_RSI_FIFO(val) bfin_write32(RSI_FIFO, val)
-#define bfin_read_RSI_E_STATUS() bfin_read16(RSI_ESTAT)
-#define bfin_write_RSI_E_STATUS(val) bfin_write16(RSI_ESTAT, val)
-#define bfin_read_RSI_E_MASK() bfin_read16(RSI_EMASK)
-#define bfin_write_RSI_E_MASK(val) bfin_write16(RSI_EMASK, val)
-#define bfin_read_RSI_CFG() bfin_read16(RSI_CONFIG)
-#define bfin_write_RSI_CFG(val) bfin_write16(RSI_CONFIG, val)
-#define bfin_read_RSI_RD_WAIT_EN() bfin_read16(RSI_RD_WAIT_EN)
-#define bfin_write_RSI_RD_WAIT_EN(val) bfin_write16(RSI_RD_WAIT_EN, val)
-#define bfin_read_RSI_PID0() bfin_read16(RSI_PID0)
-#define bfin_write_RSI_PID0(val) bfin_write16(RSI_PID0, val)
-#define bfin_read_RSI_PID1() bfin_read16(RSI_PID1)
-#define bfin_write_RSI_PID1(val) bfin_write16(RSI_PID1, val)
-#define bfin_read_RSI_PID2() bfin_read16(RSI_PID2)
-#define bfin_write_RSI_PID2(val) bfin_write16(RSI_PID2, val)
-#define bfin_read_RSI_PID3() bfin_read16(RSI_PID3)
-#define bfin_write_RSI_PID3(val) bfin_write16(RSI_PID3, val)
-#define bfin_read_RSI_PID4() bfin_read16(RSI_PID4)
-#define bfin_write_RSI_PID4(val) bfin_write16(RSI_PID4, val)
-#define bfin_read_RSI_PID5() bfin_read16(RSI_PID5)
-#define bfin_write_RSI_PID5(val) bfin_write16(RSI_PID5, val)
-#define bfin_read_RSI_PID6() bfin_read16(RSI_PID6)
-#define bfin_write_RSI_PID6(val) bfin_write16(RSI_PID6, val)
-#define bfin_read_RSI_PID7() bfin_read16(RSI_PID7)
-#define bfin_write_RSI_PID7(val) bfin_write16(RSI_PID7, val)
-
-
#endif /* _CDEF_BF518_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
index 1d970df7aee..e548e9d1d6f 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
@@ -131,23 +131,6 @@
#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
-/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
-#define bfin_read_SPI_CTL() bfin_read16(SPI_CTL)
-#define bfin_write_SPI_CTL(val) bfin_write16(SPI_CTL, val)
-#define bfin_read_SPI_FLG() bfin_read16(SPI_FLG)
-#define bfin_write_SPI_FLG(val) bfin_write16(SPI_FLG, val)
-#define bfin_read_SPI_STAT() bfin_read16(SPI_STAT)
-#define bfin_write_SPI_STAT(val) bfin_write16(SPI_STAT, val)
-#define bfin_read_SPI_TDBR() bfin_read16(SPI_TDBR)
-#define bfin_write_SPI_TDBR(val) bfin_write16(SPI_TDBR, val)
-#define bfin_read_SPI_RDBR() bfin_read16(SPI_RDBR)
-#define bfin_write_SPI_RDBR(val) bfin_write16(SPI_RDBR, val)
-#define bfin_read_SPI_BAUD() bfin_read16(SPI_BAUD)
-#define bfin_write_SPI_BAUD(val) bfin_write16(SPI_BAUD, val)
-#define bfin_read_SPI_SHADOW() bfin_read16(SPI_SHADOW)
-#define bfin_write_SPI_SHADOW(val) bfin_write16(SPI_SHADOW, val)
-
-
/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
@@ -844,6 +827,7 @@
#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
+#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
@@ -1062,17 +1046,6 @@
#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
-/* OTP/FUSE Registers */
-
-#define bfin_read_OTP_CONTROL() bfin_read16(OTP_CONTROL)
-#define bfin_write_OTP_CONTROL(val) bfin_write16(OTP_CONTROL, val)
-#define bfin_read_OTP_BEN() bfin_read16(OTP_BEN)
-#define bfin_write_OTP_BEN(val) bfin_write16(OTP_BEN, val)
-#define bfin_read_OTP_STATUS() bfin_read16(OTP_STATUS)
-#define bfin_write_OTP_STATUS(val) bfin_write16(OTP_STATUS, val)
-#define bfin_read_OTP_TIMING() bfin_read32(OTP_TIMING)
-#define bfin_write_OTP_TIMING(val) bfin_write32(OTP_TIMING, val)
-
/* Security Registers */
#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
@@ -1082,52 +1055,6 @@
#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
-/* OTP Read/Write Data Buffer Registers */
-
-#define bfin_read_OTP_DATA0() bfin_read32(OTP_DATA0)
-#define bfin_write_OTP_DATA0(val) bfin_write32(OTP_DATA0, val)
-#define bfin_read_OTP_DATA1() bfin_read32(OTP_DATA1)
-#define bfin_write_OTP_DATA1(val) bfin_write32(OTP_DATA1, val)
-#define bfin_read_OTP_DATA2() bfin_read32(OTP_DATA2)
-#define bfin_write_OTP_DATA2(val) bfin_write32(OTP_DATA2, val)
-#define bfin_read_OTP_DATA3() bfin_read32(OTP_DATA3)
-#define bfin_write_OTP_DATA3(val) bfin_write32(OTP_DATA3, val)
-
-/* NFC Registers */
-
-#define bfin_read_NFC_CTL() bfin_read16(NFC_CTL)
-#define bfin_write_NFC_CTL(val) bfin_write16(NFC_CTL, val)
-#define bfin_read_NFC_STAT() bfin_read16(NFC_STAT)
-#define bfin_write_NFC_STAT(val) bfin_write16(NFC_STAT, val)
-#define bfin_read_NFC_IRQSTAT() bfin_read16(NFC_IRQSTAT)
-#define bfin_write_NFC_IRQSTAT(val) bfin_write16(NFC_IRQSTAT, val)
-#define bfin_read_NFC_IRQMASK() bfin_read16(NFC_IRQMASK)
-#define bfin_write_NFC_IRQMASK(val) bfin_write16(NFC_IRQMASK, val)
-#define bfin_read_NFC_ECC0() bfin_read16(NFC_ECC0)
-#define bfin_write_NFC_ECC0(val) bfin_write16(NFC_ECC0, val)
-#define bfin_read_NFC_ECC1() bfin_read16(NFC_ECC1)
-#define bfin_write_NFC_ECC1(val) bfin_write16(NFC_ECC1, val)
-#define bfin_read_NFC_ECC2() bfin_read16(NFC_ECC2)
-#define bfin_write_NFC_ECC2(val) bfin_write16(NFC_ECC2, val)
-#define bfin_read_NFC_ECC3() bfin_read16(NFC_ECC3)
-#define bfin_write_NFC_ECC3(val) bfin_write16(NFC_ECC3, val)
-#define bfin_read_NFC_COUNT() bfin_read16(NFC_COUNT)
-#define bfin_write_NFC_COUNT(val) bfin_write16(NFC_COUNT, val)
-#define bfin_read_NFC_RST() bfin_read16(NFC_RST)
-#define bfin_write_NFC_RST(val) bfin_write16(NFC_RST, val)
-#define bfin_read_NFC_PGCTL() bfin_read16(NFC_PGCTL)
-#define bfin_write_NFC_PGCTL(val) bfin_write16(NFC_PGCTL, val)
-#define bfin_read_NFC_READ() bfin_read16(NFC_READ)
-#define bfin_write_NFC_READ(val) bfin_write16(NFC_READ, val)
-#define bfin_read_NFC_ADDR() bfin_read16(NFC_ADDR)
-#define bfin_write_NFC_ADDR(val) bfin_write16(NFC_ADDR, val)
-#define bfin_read_NFC_CMD() bfin_read16(NFC_CMD)
-#define bfin_write_NFC_CMD(val) bfin_write16(NFC_CMD, val)
-#define bfin_read_NFC_DATA_WR() bfin_read16(NFC_DATA_WR)
-#define bfin_write_NFC_DATA_WR(val) bfin_write16(NFC_DATA_WR, val)
-#define bfin_read_NFC_DATA_RD() bfin_read16(NFC_DATA_RD)
-#define bfin_write_NFC_DATA_RD(val) bfin_write16(NFC_DATA_RD, val)
-
/* These need to be last due to the cdef/linux inter-dependencies */
#include <asm/irq.h>
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF514.h b/arch/blackfin/mach-bf518/include/mach/defBF514.h
index b5adca23a78..92e950d6e99 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF514.h
@@ -7,49 +7,8 @@
#ifndef _DEF_BF514_H
#define _DEF_BF514_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF514 */
-
-/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "defBF51x_base.h"
-
-/* The following are the #defines needed by ADSP-BF514 that are not in the common header */
-
-/* SDH Registers */
-
-#define SDH_PWR_CTL 0xFFC03900 /* SDH Power Control */
-#define SDH_CLK_CTL 0xFFC03904 /* SDH Clock Control */
-#define SDH_ARGUMENT 0xFFC03908 /* SDH Argument */
-#define SDH_COMMAND 0xFFC0390C /* SDH Command */
-#define SDH_RESP_CMD 0xFFC03910 /* SDH Response Command */
-#define SDH_RESPONSE0 0xFFC03914 /* SDH Response0 */
-#define SDH_RESPONSE1 0xFFC03918 /* SDH Response1 */
-#define SDH_RESPONSE2 0xFFC0391C /* SDH Response2 */
-#define SDH_RESPONSE3 0xFFC03920 /* SDH Response3 */
-#define SDH_DATA_TIMER 0xFFC03924 /* SDH Data Timer */
-#define SDH_DATA_LGTH 0xFFC03928 /* SDH Data Length */
-#define SDH_DATA_CTL 0xFFC0392C /* SDH Data Control */
-#define SDH_DATA_CNT 0xFFC03930 /* SDH Data Counter */
-#define SDH_STATUS 0xFFC03934 /* SDH Status */
-#define SDH_STATUS_CLR 0xFFC03938 /* SDH Status Clear */
-#define SDH_MASK0 0xFFC0393C /* SDH Interrupt0 Mask */
-#define SDH_MASK1 0xFFC03940 /* SDH Interrupt1 Mask */
-#define SDH_FIFO_CNT 0xFFC03948 /* SDH FIFO Counter */
-#define SDH_FIFO 0xFFC03980 /* SDH Data FIFO */
-#define SDH_E_STATUS 0xFFC039C0 /* SDH Exception Status */
-#define SDH_E_MASK 0xFFC039C4 /* SDH Exception Mask */
-#define SDH_CFG 0xFFC039C8 /* SDH Configuration */
-#define SDH_RD_WAIT_EN 0xFFC039CC /* SDH Read Wait Enable */
-#define SDH_PID0 0xFFC039D0 /* SDH Peripheral Identification0 */
-#define SDH_PID1 0xFFC039D4 /* SDH Peripheral Identification1 */
-#define SDH_PID2 0xFFC039D8 /* SDH Peripheral Identification2 */
-#define SDH_PID3 0xFFC039DC /* SDH Peripheral Identification3 */
-#define SDH_PID4 0xFFC039E0 /* SDH Peripheral Identification4 */
-#define SDH_PID5 0xFFC039E4 /* SDH Peripheral Identification5 */
-#define SDH_PID6 0xFFC039E8 /* SDH Peripheral Identification6 */
-#define SDH_PID7 0xFFC039EC /* SDH Peripheral Identification7 */
+/* BF514 is BF512 + RSI */
+#include "defBF512.h"
/* Removable Storage Interface Registers */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF516.h b/arch/blackfin/mach-bf518/include/mach/defBF516.h
index 7eb18774d72..22a3aa0d262 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF516.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF516.h
@@ -7,13 +7,8 @@
#ifndef _DEF_BF516_H
#define _DEF_BF516_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF516 */
-
-/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "defBF51x_base.h"
+/* BF516 is BF514 + EMAC */
+#include "defBF514.h"
/* The following are the #defines needed by ADSP-BF516 that are not in the common header */
/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */
@@ -394,208 +389,4 @@
#define TX_GE1024_CNT 0x00200000 /* 1024-Max-Byte TX Frames Sent */
#define TX_ABORT_CNT 0x00400000 /* TX Frames Aborted */
-/* SDH Registers */
-
-#define SDH_PWR_CTL 0xFFC03900 /* SDH Power Control */
-#define SDH_CLK_CTL 0xFFC03904 /* SDH Clock Control */
-#define SDH_ARGUMENT 0xFFC03908 /* SDH Argument */
-#define SDH_COMMAND 0xFFC0390C /* SDH Command */
-#define SDH_RESP_CMD 0xFFC03910 /* SDH Response Command */
-#define SDH_RESPONSE0 0xFFC03914 /* SDH Response0 */
-#define SDH_RESPONSE1 0xFFC03918 /* SDH Response1 */
-#define SDH_RESPONSE2 0xFFC0391C /* SDH Response2 */
-#define SDH_RESPONSE3 0xFFC03920 /* SDH Response3 */
-#define SDH_DATA_TIMER 0xFFC03924 /* SDH Data Timer */
-#define SDH_DATA_LGTH 0xFFC03928 /* SDH Data Length */
-#define SDH_DATA_CTL 0xFFC0392C /* SDH Data Control */
-#define SDH_DATA_CNT 0xFFC03930 /* SDH Data Counter */
-#define SDH_STATUS 0xFFC03934 /* SDH Status */
-#define SDH_STATUS_CLR 0xFFC03938 /* SDH Status Clear */
-#define SDH_MASK0 0xFFC0393C /* SDH Interrupt0 Mask */
-#define SDH_MASK1 0xFFC03940 /* SDH Interrupt1 Mask */
-#define SDH_FIFO_CNT 0xFFC03948 /* SDH FIFO Counter */
-#define SDH_FIFO 0xFFC03980 /* SDH Data FIFO */
-#define SDH_E_STATUS 0xFFC039C0 /* SDH Exception Status */
-#define SDH_E_MASK 0xFFC039C4 /* SDH Exception Mask */
-#define SDH_CFG 0xFFC039C8 /* SDH Configuration */
-#define SDH_RD_WAIT_EN 0xFFC039CC /* SDH Read Wait Enable */
-#define SDH_PID0 0xFFC039D0 /* SDH Peripheral Identification0 */
-#define SDH_PID1 0xFFC039D4 /* SDH Peripheral Identification1 */
-#define SDH_PID2 0xFFC039D8 /* SDH Peripheral Identification2 */
-#define SDH_PID3 0xFFC039DC /* SDH Peripheral Identification3 */
-#define SDH_PID4 0xFFC039E0 /* SDH Peripheral Identification4 */
-#define SDH_PID5 0xFFC039E4 /* SDH Peripheral Identification5 */
-#define SDH_PID6 0xFFC039E8 /* SDH Peripheral Identification6 */
-#define SDH_PID7 0xFFC039EC /* SDH Peripheral Identification7 */
-
-/* Removable Storage Interface Registers */
-
-#define RSI_PWR_CONTROL 0xFFC03800 /* RSI Power Control Register */
-#define RSI_CLK_CONTROL 0xFFC03804 /* RSI Clock Control Register */
-#define RSI_ARGUMENT 0xFFC03808 /* RSI Argument Register */
-#define RSI_COMMAND 0xFFC0380C /* RSI Command Register */
-#define RSI_RESP_CMD 0xFFC03810 /* RSI Response Command Register */
-#define RSI_RESPONSE0 0xFFC03814 /* RSI Response Register */
-#define RSI_RESPONSE1 0xFFC03818 /* RSI Response Register */
-#define RSI_RESPONSE2 0xFFC0381C /* RSI Response Register */
-#define RSI_RESPONSE3 0xFFC03820 /* RSI Response Register */
-#define RSI_DATA_TIMER 0xFFC03824 /* RSI Data Timer Register */
-#define RSI_DATA_LGTH 0xFFC03828 /* RSI Data Length Register */
-#define RSI_DATA_CONTROL 0xFFC0382C /* RSI Data Control Register */
-#define RSI_DATA_CNT 0xFFC03830 /* RSI Data Counter Register */
-#define RSI_STATUS 0xFFC03834 /* RSI Status Register */
-#define RSI_STATUSCL 0xFFC03838 /* RSI Status Clear Register */
-#define RSI_MASK0 0xFFC0383C /* RSI Interrupt 0 Mask Register */
-#define RSI_MASK1 0xFFC03840 /* RSI Interrupt 1 Mask Register */
-#define RSI_FIFO_CNT 0xFFC03848 /* RSI FIFO Counter Register */
-#define RSI_CEATA_CONTROL 0xFFC0384C /* RSI CEATA Register */
-#define RSI_FIFO 0xFFC03880 /* RSI Data FIFO Register */
-#define RSI_ESTAT 0xFFC038C0 /* RSI Exception Status Register */
-#define RSI_EMASK 0xFFC038C4 /* RSI Exception Mask Register */
-#define RSI_CONFIG 0xFFC038C8 /* RSI Configuration Register */
-#define RSI_RD_WAIT_EN 0xFFC038CC /* RSI Read Wait Enable Register */
-#define RSI_PID0 0xFFC03FE0 /* RSI Peripheral ID Register 0 */
-#define RSI_PID1 0xFFC03FE4 /* RSI Peripheral ID Register 1 */
-#define RSI_PID2 0xFFC03FE8 /* RSI Peripheral ID Register 2 */
-#define RSI_PID3 0xFFC03FEC /* RSI Peripheral ID Register 3 */
-#define RSI_PID4 0xFFC03FF0 /* RSI Peripheral ID Register 4 */
-#define RSI_PID5 0xFFC03FF4 /* RSI Peripheral ID Register 5 */
-#define RSI_PID6 0xFFC03FF8 /* RSI Peripheral ID Register 6 */
-#define RSI_PID7 0xFFC03FFC /* RSI Peripheral ID Register 7 */
-
-/* ********************************************************** */
-/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
-/* and MULTI BIT READ MACROS */
-/* ********************************************************** */
-
-/* Bit masks for SDH_COMMAND */
-
-#define CMD_IDX 0x3f /* Command Index */
-#define CMD_RSP 0x40 /* Response */
-#define CMD_L_RSP 0x80 /* Long Response */
-#define CMD_INT_E 0x100 /* Command Interrupt */
-#define CMD_PEND_E 0x200 /* Command Pending */
-#define CMD_E 0x400 /* Command Enable */
-
-/* Bit masks for SDH_PWR_CTL */
-
-#define PWR_ON 0x3 /* Power On */
-#if 0
-#define TBD 0x3c /* TBD */
-#endif
-#define SD_CMD_OD 0x40 /* Open Drain Output */
-#define ROD_CTL 0x80 /* Rod Control */
-
-/* Bit masks for SDH_CLK_CTL */
-
-#define CLKDIV 0xff /* MC_CLK Divisor */
-#define CLK_E 0x100 /* MC_CLK Bus Clock Enable */
-#define PWR_SV_E 0x200 /* Power Save Enable */
-#define CLKDIV_BYPASS 0x400 /* Bypass Divisor */
-#define WIDE_BUS 0x800 /* Wide Bus Mode Enable */
-
-/* Bit masks for SDH_RESP_CMD */
-
-#define RESP_CMD 0x3f /* Response Command */
-
-/* Bit masks for SDH_DATA_CTL */
-
-#define DTX_E 0x1 /* Data Transfer Enable */
-#define DTX_DIR 0x2 /* Data Transfer Direction */
-#define DTX_MODE 0x4 /* Data Transfer Mode */
-#define DTX_DMA_E 0x8 /* Data Transfer DMA Enable */
-#define DTX_BLK_LGTH 0xf0 /* Data Transfer Block Length */
-
-/* Bit masks for SDH_STATUS */
-
-#define CMD_CRC_FAIL 0x1 /* CMD CRC Fail */
-#define DAT_CRC_FAIL 0x2 /* Data CRC Fail */
-#define CMD_TIME_OUT 0x4 /* CMD Time Out */
-#define DAT_TIME_OUT 0x8 /* Data Time Out */
-#define TX_UNDERRUN 0x10 /* Transmit Underrun */
-#define RX_OVERRUN 0x20 /* Receive Overrun */
-#define CMD_RESP_END 0x40 /* CMD Response End */
-#define CMD_SENT 0x80 /* CMD Sent */
-#define DAT_END 0x100 /* Data End */
-#define START_BIT_ERR 0x200 /* Start Bit Error */
-#define DAT_BLK_END 0x400 /* Data Block End */
-#define CMD_ACT 0x800 /* CMD Active */
-#define TX_ACT 0x1000 /* Transmit Active */
-#define RX_ACT 0x2000 /* Receive Active */
-#define TX_FIFO_STAT 0x4000 /* Transmit FIFO Status */
-#define RX_FIFO_STAT 0x8000 /* Receive FIFO Status */
-#define TX_FIFO_FULL 0x10000 /* Transmit FIFO Full */
-#define RX_FIFO_FULL 0x20000 /* Receive FIFO Full */
-#define TX_FIFO_ZERO 0x40000 /* Transmit FIFO Empty */
-#define RX_DAT_ZERO 0x80000 /* Receive FIFO Empty */
-#define TX_DAT_RDY 0x100000 /* Transmit Data Available */
-#define RX_FIFO_RDY 0x200000 /* Receive Data Available */
-
-/* Bit masks for SDH_STATUS_CLR */
-
-#define CMD_CRC_FAIL_STAT 0x1 /* CMD CRC Fail Status */
-#define DAT_CRC_FAIL_STAT 0x2 /* Data CRC Fail Status */
-#define CMD_TIMEOUT_STAT 0x4 /* CMD Time Out Status */
-#define DAT_TIMEOUT_STAT 0x8 /* Data Time Out status */
-#define TX_UNDERRUN_STAT 0x10 /* Transmit Underrun Status */
-#define RX_OVERRUN_STAT 0x20 /* Receive Overrun Status */
-#define CMD_RESP_END_STAT 0x40 /* CMD Response End Status */
-#define CMD_SENT_STAT 0x80 /* CMD Sent Status */
-#define DAT_END_STAT 0x100 /* Data End Status */
-#define START_BIT_ERR_STAT 0x200 /* Start Bit Error Status */
-#define DAT_BLK_END_STAT 0x400 /* Data Block End Status */
-
-/* Bit masks for SDH_MASK0 */
-
-#define CMD_CRC_FAIL_MASK 0x1 /* CMD CRC Fail Mask */
-#define DAT_CRC_FAIL_MASK 0x2 /* Data CRC Fail Mask */
-#define CMD_TIMEOUT_MASK 0x4 /* CMD Time Out Mask */
-#define DAT_TIMEOUT_MASK 0x8 /* Data Time Out Mask */
-#define TX_UNDERRUN_MASK 0x10 /* Transmit Underrun Mask */
-#define RX_OVERRUN_MASK 0x20 /* Receive Overrun Mask */
-#define CMD_RESP_END_MASK 0x40 /* CMD Response End Mask */
-#define CMD_SENT_MASK 0x80 /* CMD Sent Mask */
-#define DAT_END_MASK 0x100 /* Data End Mask */
-#define START_BIT_ERR_MASK 0x200 /* Start Bit Error Mask */
-#define DAT_BLK_END_MASK 0x400 /* Data Block End Mask */
-#define CMD_ACT_MASK 0x800 /* CMD Active Mask */
-#define TX_ACT_MASK 0x1000 /* Transmit Active Mask */
-#define RX_ACT_MASK 0x2000 /* Receive Active Mask */
-#define TX_FIFO_STAT_MASK 0x4000 /* Transmit FIFO Status Mask */
-#define RX_FIFO_STAT_MASK 0x8000 /* Receive FIFO Status Mask */
-#define TX_FIFO_FULL_MASK 0x10000 /* Transmit FIFO Full Mask */
-#define RX_FIFO_FULL_MASK 0x20000 /* Receive FIFO Full Mask */
-#define TX_FIFO_ZERO_MASK 0x40000 /* Transmit FIFO Empty Mask */
-#define RX_DAT_ZERO_MASK 0x80000 /* Receive FIFO Empty Mask */
-#define TX_DAT_RDY_MASK 0x100000 /* Transmit Data Available Mask */
-#define RX_FIFO_RDY_MASK 0x200000 /* Receive Data Available Mask */
-
-/* Bit masks for SDH_FIFO_CNT */
-
-#define FIFO_COUNT 0x7fff /* FIFO Count */
-
-/* Bit masks for SDH_E_STATUS */
-
-#define SDIO_INT_DET 0x2 /* SDIO Int Detected */
-#define SD_CARD_DET 0x10 /* SD Card Detect */
-
-/* Bit masks for SDH_E_MASK */
-
-#define SDIO_MSK 0x2 /* Mask SDIO Int Detected */
-#define SCD_MSK 0x40 /* Mask Card Detect */
-
-/* Bit masks for SDH_CFG */
-
-#define CLKS_EN 0x1 /* Clocks Enable */
-#define SD4E 0x4 /* SDIO 4-Bit Enable */
-#define MWE 0x8 /* Moving Window Enable */
-#define SD_RST 0x10 /* SDMMC Reset */
-#define PUP_SDDAT 0x20 /* Pull-up SD_DAT */
-#define PUP_SDDAT3 0x40 /* Pull-up SD_DAT3 */
-#define PD_SDDAT3 0x80 /* Pull-down SD_DAT3 */
-
-/* Bit masks for SDH_RD_WAIT_EN */
-
-#define RWR 0x1 /* Read Wait Request */
-
#endif /* _DEF_BF516_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF518.h b/arch/blackfin/mach-bf518/include/mach/defBF518.h
index 794cf06eb5b..cb18270e55c 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF518.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF518.h
@@ -7,461 +7,8 @@
#ifndef _DEF_BF518_H
#define _DEF_BF518_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF518 */
-
-/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "defBF51x_base.h"
-
-/* The following are the #defines needed by ADSP-BF518 that are not in the common header */
-/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */
-
-#define EMAC_OPMODE 0xFFC03000 /* Operating Mode Register */
-#define EMAC_ADDRLO 0xFFC03004 /* Address Low (32 LSBs) Register */
-#define EMAC_ADDRHI 0xFFC03008 /* Address High (16 MSBs) Register */
-#define EMAC_HASHLO 0xFFC0300C /* Multicast Hash Table Low (Bins 31-0) Register */
-#define EMAC_HASHHI 0xFFC03010 /* Multicast Hash Table High (Bins 63-32) Register */
-#define EMAC_STAADD 0xFFC03014 /* Station Management Address Register */
-#define EMAC_STADAT 0xFFC03018 /* Station Management Data Register */
-#define EMAC_FLC 0xFFC0301C /* Flow Control Register */
-#define EMAC_VLAN1 0xFFC03020 /* VLAN1 Tag Register */
-#define EMAC_VLAN2 0xFFC03024 /* VLAN2 Tag Register */
-#define EMAC_WKUP_CTL 0xFFC0302C /* Wake-Up Control/Status Register */
-#define EMAC_WKUP_FFMSK0 0xFFC03030 /* Wake-Up Frame Filter 0 Byte Mask Register */
-#define EMAC_WKUP_FFMSK1 0xFFC03034 /* Wake-Up Frame Filter 1 Byte Mask Register */
-#define EMAC_WKUP_FFMSK2 0xFFC03038 /* Wake-Up Frame Filter 2 Byte Mask Register */
-#define EMAC_WKUP_FFMSK3 0xFFC0303C /* Wake-Up Frame Filter 3 Byte Mask Register */
-#define EMAC_WKUP_FFCMD 0xFFC03040 /* Wake-Up Frame Filter Commands Register */
-#define EMAC_WKUP_FFOFF 0xFFC03044 /* Wake-Up Frame Filter Offsets Register */
-#define EMAC_WKUP_FFCRC0 0xFFC03048 /* Wake-Up Frame Filter 0,1 CRC-16 Register */
-#define EMAC_WKUP_FFCRC1 0xFFC0304C /* Wake-Up Frame Filter 2,3 CRC-16 Register */
-
-#define EMAC_SYSCTL 0xFFC03060 /* EMAC System Control Register */
-#define EMAC_SYSTAT 0xFFC03064 /* EMAC System Status Register */
-#define EMAC_RX_STAT 0xFFC03068 /* RX Current Frame Status Register */
-#define EMAC_RX_STKY 0xFFC0306C /* RX Sticky Frame Status Register */
-#define EMAC_RX_IRQE 0xFFC03070 /* RX Frame Status Interrupt Enables Register */
-#define EMAC_TX_STAT 0xFFC03074 /* TX Current Frame Status Register */
-#define EMAC_TX_STKY 0xFFC03078 /* TX Sticky Frame Status Register */
-#define EMAC_TX_IRQE 0xFFC0307C /* TX Frame Status Interrupt Enables Register */
-
-#define EMAC_MMC_CTL 0xFFC03080 /* MMC Counter Control Register */
-#define EMAC_MMC_RIRQS 0xFFC03084 /* MMC RX Interrupt Status Register */
-#define EMAC_MMC_RIRQE 0xFFC03088 /* MMC RX Interrupt Enables Register */
-#define EMAC_MMC_TIRQS 0xFFC0308C /* MMC TX Interrupt Status Register */
-#define EMAC_MMC_TIRQE 0xFFC03090 /* MMC TX Interrupt Enables Register */
-
-#define EMAC_RXC_OK 0xFFC03100 /* RX Frame Successful Count */
-#define EMAC_RXC_FCS 0xFFC03104 /* RX Frame FCS Failure Count */
-#define EMAC_RXC_ALIGN 0xFFC03108 /* RX Alignment Error Count */
-#define EMAC_RXC_OCTET 0xFFC0310C /* RX Octets Successfully Received Count */
-#define EMAC_RXC_DMAOVF 0xFFC03110 /* Internal MAC Sublayer Error RX Frame Count */
-#define EMAC_RXC_UNICST 0xFFC03114 /* Unicast RX Frame Count */
-#define EMAC_RXC_MULTI 0xFFC03118 /* Multicast RX Frame Count */
-#define EMAC_RXC_BROAD 0xFFC0311C /* Broadcast RX Frame Count */
-#define EMAC_RXC_LNERRI 0xFFC03120 /* RX Frame In Range Error Count */
-#define EMAC_RXC_LNERRO 0xFFC03124 /* RX Frame Out Of Range Error Count */
-#define EMAC_RXC_LONG 0xFFC03128 /* RX Frame Too Long Count */
-#define EMAC_RXC_MACCTL 0xFFC0312C /* MAC Control RX Frame Count */
-#define EMAC_RXC_OPCODE 0xFFC03130 /* Unsupported Op-Code RX Frame Count */
-#define EMAC_RXC_PAUSE 0xFFC03134 /* MAC Control Pause RX Frame Count */
-#define EMAC_RXC_ALLFRM 0xFFC03138 /* Overall RX Frame Count */
-#define EMAC_RXC_ALLOCT 0xFFC0313C /* Overall RX Octet Count */
-#define EMAC_RXC_TYPED 0xFFC03140 /* Type/Length Consistent RX Frame Count */
-#define EMAC_RXC_SHORT 0xFFC03144 /* RX Frame Fragment Count - Byte Count x < 64 */
-#define EMAC_RXC_EQ64 0xFFC03148 /* Good RX Frame Count - Byte Count x = 64 */
-#define EMAC_RXC_LT128 0xFFC0314C /* Good RX Frame Count - Byte Count 64 < x < 128 */
-#define EMAC_RXC_LT256 0xFFC03150 /* Good RX Frame Count - Byte Count 128 <= x < 256 */
-#define EMAC_RXC_LT512 0xFFC03154 /* Good RX Frame Count - Byte Count 256 <= x < 512 */
-#define EMAC_RXC_LT1024 0xFFC03158 /* Good RX Frame Count - Byte Count 512 <= x < 1024 */
-#define EMAC_RXC_GE1024 0xFFC0315C /* Good RX Frame Count - Byte Count x >= 1024 */
-
-#define EMAC_TXC_OK 0xFFC03180 /* TX Frame Successful Count */
-#define EMAC_TXC_1COL 0xFFC03184 /* TX Frames Successful After Single Collision Count */
-#define EMAC_TXC_GT1COL 0xFFC03188 /* TX Frames Successful After Multiple Collisions Count */
-#define EMAC_TXC_OCTET 0xFFC0318C /* TX Octets Successfully Received Count */
-#define EMAC_TXC_DEFER 0xFFC03190 /* TX Frame Delayed Due To Busy Count */
-#define EMAC_TXC_LATECL 0xFFC03194 /* Late TX Collisions Count */
-#define EMAC_TXC_XS_COL 0xFFC03198 /* TX Frame Failed Due To Excessive Collisions Count */
-#define EMAC_TXC_DMAUND 0xFFC0319C /* Internal MAC Sublayer Error TX Frame Count */
-#define EMAC_TXC_CRSERR 0xFFC031A0 /* Carrier Sense Deasserted During TX Frame Count */
-#define EMAC_TXC_UNICST 0xFFC031A4 /* Unicast TX Frame Count */
-#define EMAC_TXC_MULTI 0xFFC031A8 /* Multicast TX Frame Count */
-#define EMAC_TXC_BROAD 0xFFC031AC /* Broadcast TX Frame Count */
-#define EMAC_TXC_XS_DFR 0xFFC031B0 /* TX Frames With Excessive Deferral Count */
-#define EMAC_TXC_MACCTL 0xFFC031B4 /* MAC Control TX Frame Count */
-#define EMAC_TXC_ALLFRM 0xFFC031B8 /* Overall TX Frame Count */
-#define EMAC_TXC_ALLOCT 0xFFC031BC /* Overall TX Octet Count */
-#define EMAC_TXC_EQ64 0xFFC031C0 /* Good TX Frame Count - Byte Count x = 64 */
-#define EMAC_TXC_LT128 0xFFC031C4 /* Good TX Frame Count - Byte Count 64 < x < 128 */
-#define EMAC_TXC_LT256 0xFFC031C8 /* Good TX Frame Count - Byte Count 128 <= x < 256 */
-#define EMAC_TXC_LT512 0xFFC031CC /* Good TX Frame Count - Byte Count 256 <= x < 512 */
-#define EMAC_TXC_LT1024 0xFFC031D0 /* Good TX Frame Count - Byte Count 512 <= x < 1024 */
-#define EMAC_TXC_GE1024 0xFFC031D4 /* Good TX Frame Count - Byte Count x >= 1024 */
-#define EMAC_TXC_ABORT 0xFFC031D8 /* Total TX Frames Aborted Count */
-
-/* Listing for IEEE-Supported Count Registers */
-
-#define FramesReceivedOK EMAC_RXC_OK /* RX Frame Successful Count */
-#define FrameCheckSequenceErrors EMAC_RXC_FCS /* RX Frame FCS Failure Count */
-#define AlignmentErrors EMAC_RXC_ALIGN /* RX Alignment Error Count */
-#define OctetsReceivedOK EMAC_RXC_OCTET /* RX Octets Successfully Received Count */
-#define FramesLostDueToIntMACRcvError EMAC_RXC_DMAOVF /* Internal MAC Sublayer Error RX Frame Count */
-#define UnicastFramesReceivedOK EMAC_RXC_UNICST /* Unicast RX Frame Count */
-#define MulticastFramesReceivedOK EMAC_RXC_MULTI /* Multicast RX Frame Count */
-#define BroadcastFramesReceivedOK EMAC_RXC_BROAD /* Broadcast RX Frame Count */
-#define InRangeLengthErrors EMAC_RXC_LNERRI /* RX Frame In Range Error Count */
-#define OutOfRangeLengthField EMAC_RXC_LNERRO /* RX Frame Out Of Range Error Count */
-#define FrameTooLongErrors EMAC_RXC_LONG /* RX Frame Too Long Count */
-#define MACControlFramesReceived EMAC_RXC_MACCTL /* MAC Control RX Frame Count */
-#define UnsupportedOpcodesReceived EMAC_RXC_OPCODE /* Unsupported Op-Code RX Frame Count */
-#define PAUSEMACCtrlFramesReceived EMAC_RXC_PAUSE /* MAC Control Pause RX Frame Count */
-#define FramesReceivedAll EMAC_RXC_ALLFRM /* Overall RX Frame Count */
-#define OctetsReceivedAll EMAC_RXC_ALLOCT /* Overall RX Octet Count */
-#define TypedFramesReceived EMAC_RXC_TYPED /* Type/Length Consistent RX Frame Count */
-#define FramesLenLt64Received EMAC_RXC_SHORT /* RX Frame Fragment Count - Byte Count x < 64 */
-#define FramesLenEq64Received EMAC_RXC_EQ64 /* Good RX Frame Count - Byte Count x = 64 */
-#define FramesLen65_127Received EMAC_RXC_LT128 /* Good RX Frame Count - Byte Count 64 < x < 128 */
-#define FramesLen128_255Received EMAC_RXC_LT256 /* Good RX Frame Count - Byte Count 128 <= x < 256 */
-#define FramesLen256_511Received EMAC_RXC_LT512 /* Good RX Frame Count - Byte Count 256 <= x < 512 */
-#define FramesLen512_1023Received EMAC_RXC_LT1024 /* Good RX Frame Count - Byte Count 512 <= x < 1024 */
-#define FramesLen1024_MaxReceived EMAC_RXC_GE1024 /* Good RX Frame Count - Byte Count x >= 1024 */
-
-#define FramesTransmittedOK EMAC_TXC_OK /* TX Frame Successful Count */
-#define SingleCollisionFrames EMAC_TXC_1COL /* TX Frames Successful After Single Collision Count */
-#define MultipleCollisionFrames EMAC_TXC_GT1COL /* TX Frames Successful After Multiple Collisions Count */
-#define OctetsTransmittedOK EMAC_TXC_OCTET /* TX Octets Successfully Received Count */
-#define FramesWithDeferredXmissions EMAC_TXC_DEFER /* TX Frame Delayed Due To Busy Count */
-#define LateCollisions EMAC_TXC_LATECL /* Late TX Collisions Count */
-#define FramesAbortedDueToXSColls EMAC_TXC_XS_COL /* TX Frame Failed Due To Excessive Collisions Count */
-#define FramesLostDueToIntMacXmitError EMAC_TXC_DMAUND /* Internal MAC Sublayer Error TX Frame Count */
-#define CarrierSenseErrors EMAC_TXC_CRSERR /* Carrier Sense Deasserted During TX Frame Count */
-#define UnicastFramesXmittedOK EMAC_TXC_UNICST /* Unicast TX Frame Count */
-#define MulticastFramesXmittedOK EMAC_TXC_MULTI /* Multicast TX Frame Count */
-#define BroadcastFramesXmittedOK EMAC_TXC_BROAD /* Broadcast TX Frame Count */
-#define FramesWithExcessiveDeferral EMAC_TXC_XS_DFR /* TX Frames With Excessive Deferral Count */
-#define MACControlFramesTransmitted EMAC_TXC_MACCTL /* MAC Control TX Frame Count */
-#define FramesTransmittedAll EMAC_TXC_ALLFRM /* Overall TX Frame Count */
-#define OctetsTransmittedAll EMAC_TXC_ALLOCT /* Overall TX Octet Count */
-#define FramesLenEq64Transmitted EMAC_TXC_EQ64 /* Good TX Frame Count - Byte Count x = 64 */
-#define FramesLen65_127Transmitted EMAC_TXC_LT128 /* Good TX Frame Count - Byte Count 64 < x < 128 */
-#define FramesLen128_255Transmitted EMAC_TXC_LT256 /* Good TX Frame Count - Byte Count 128 <= x < 256 */
-#define FramesLen256_511Transmitted EMAC_TXC_LT512 /* Good TX Frame Count - Byte Count 256 <= x < 512 */
-#define FramesLen512_1023Transmitted EMAC_TXC_LT1024 /* Good TX Frame Count - Byte Count 512 <= x < 1024 */
-#define FramesLen1024_MaxTransmitted EMAC_TXC_GE1024 /* Good TX Frame Count - Byte Count x >= 1024 */
-#define TxAbortedFrames EMAC_TXC_ABORT /* Total TX Frames Aborted Count */
-
-/***********************************************************************************
-** System MMR Register Bits And Macros
-**
-** Disclaimer: All macros are intended to make C and Assembly code more readable.
-** Use these macros carefully, as any that do left shifts for field
-** depositing will result in the lower order bits being destroyed. Any
-** macro that shifts left to properly position the bit-field should be
-** used as part of an OR to initialize a register and NOT as a dynamic
-** modifier UNLESS the lower order bits are saved and ORed back in when
-** the macro is used.
-*************************************************************************************/
-
-/************************ ETHERNET 10/100 CONTROLLER MASKS ************************/
-
-/* EMAC_OPMODE Masks */
-
-#define RE 0x00000001 /* Receiver Enable */
-#define ASTP 0x00000002 /* Enable Automatic Pad Stripping On RX Frames */
-#define HU 0x00000010 /* Hash Filter Unicast Address */
-#define HM 0x00000020 /* Hash Filter Multicast Address */
-#define PAM 0x00000040 /* Pass-All-Multicast Mode Enable */
-#define PR 0x00000080 /* Promiscuous Mode Enable */
-#define IFE 0x00000100 /* Inverse Filtering Enable */
-#define DBF 0x00000200 /* Disable Broadcast Frame Reception */
-#define PBF 0x00000400 /* Pass Bad Frames Enable */
-#define PSF 0x00000800 /* Pass Short Frames Enable */
-#define RAF 0x00001000 /* Receive-All Mode */
-#define TE 0x00010000 /* Transmitter Enable */
-#define DTXPAD 0x00020000 /* Disable Automatic TX Padding */
-#define DTXCRC 0x00040000 /* Disable Automatic TX CRC Generation */
-#define DC 0x00080000 /* Deferral Check */
-#define BOLMT 0x00300000 /* Back-Off Limit */
-#define BOLMT_10 0x00000000 /* 10-bit range */
-#define BOLMT_8 0x00100000 /* 8-bit range */
-#define BOLMT_4 0x00200000 /* 4-bit range */
-#define BOLMT_1 0x00300000 /* 1-bit range */
-#define DRTY 0x00400000 /* Disable TX Retry On Collision */
-#define LCTRE 0x00800000 /* Enable TX Retry On Late Collision */
-#define RMII 0x01000000 /* RMII/MII* Mode */
-#define RMII_10 0x02000000 /* Speed Select for RMII Port (10MBit/100MBit*) */
-#define FDMODE 0x04000000 /* Duplex Mode Enable (Full/Half*) */
-#define LB 0x08000000 /* Internal Loopback Enable */
-#define DRO 0x10000000 /* Disable Receive Own Frames (Half-Duplex Mode) */
-
-/* EMAC_STAADD Masks */
-
-#define STABUSY 0x00000001 /* Initiate Station Mgt Reg Access / STA Busy Stat */
-#define STAOP 0x00000002 /* Station Management Operation Code (Write/Read*) */
-#define STADISPRE 0x00000004 /* Disable Preamble Generation */
-#define STAIE 0x00000008 /* Station Mgt. Transfer Done Interrupt Enable */
-#define REGAD 0x000007C0 /* STA Register Address */
-#define PHYAD 0x0000F800 /* PHY Device Address */
-
-#define SET_REGAD(x) (((x)&0x1F)<< 6 ) /* Set STA Register Address */
-#define SET_PHYAD(x) (((x)&0x1F)<< 11 ) /* Set PHY Device Address */
-
-/* EMAC_STADAT Mask */
-
-#define STADATA 0x0000FFFF /* Station Management Data */
-
-/* EMAC_FLC Masks */
-
-#define FLCBUSY 0x00000001 /* Send Flow Ctrl Frame / Flow Ctrl Busy Status */
-#define FLCE 0x00000002 /* Flow Control Enable */
-#define PCF 0x00000004 /* Pass Control Frames */
-#define BKPRSEN 0x00000008 /* Enable Backpressure */
-#define FLCPAUSE 0xFFFF0000 /* Pause Time */
-
-#define SET_FLCPAUSE(x) (((x)&0xFFFF)<< 16) /* Set Pause Time */
-
-/* EMAC_WKUP_CTL Masks */
-
-#define CAPWKFRM 0x00000001 /* Capture Wake-Up Frames */
-#define MPKE 0x00000002 /* Magic Packet Enable */
-#define RWKE 0x00000004 /* Remote Wake-Up Frame Enable */
-#define GUWKE 0x00000008 /* Global Unicast Wake Enable */
-#define MPKS 0x00000020 /* Magic Packet Received Status */
-#define RWKS 0x00000F00 /* Wake-Up Frame Received Status, Filters 3:0 */
-
-/* EMAC_WKUP_FFCMD Masks */
-
-#define WF0_E 0x00000001 /* Enable Wake-Up Filter 0 */
-#define WF0_T 0x00000008 /* Wake-Up Filter 0 Addr Type (Multicast/Unicast*) */
-#define WF1_E 0x00000100 /* Enable Wake-Up Filter 1 */
-#define WF1_T 0x00000800 /* Wake-Up Filter 1 Addr Type (Multicast/Unicast*) */
-#define WF2_E 0x00010000 /* Enable Wake-Up Filter 2 */
-#define WF2_T 0x00080000 /* Wake-Up Filter 2 Addr Type (Multicast/Unicast*) */
-#define WF3_E 0x01000000 /* Enable Wake-Up Filter 3 */
-#define WF3_T 0x08000000 /* Wake-Up Filter 3 Addr Type (Multicast/Unicast*) */
-
-/* EMAC_WKUP_FFOFF Masks */
-
-#define WF0_OFF 0x000000FF /* Wake-Up Filter 0 Pattern Offset */
-#define WF1_OFF 0x0000FF00 /* Wake-Up Filter 1 Pattern Offset */
-#define WF2_OFF 0x00FF0000 /* Wake-Up Filter 2 Pattern Offset */
-#define WF3_OFF 0xFF000000 /* Wake-Up Filter 3 Pattern Offset */
-
-#define SET_WF0_OFF(x) (((x)&0xFF)<< 0 ) /* Set Wake-Up Filter 0 Byte Offset */
-#define SET_WF1_OFF(x) (((x)&0xFF)<< 8 ) /* Set Wake-Up Filter 1 Byte Offset */
-#define SET_WF2_OFF(x) (((x)&0xFF)<< 16 ) /* Set Wake-Up Filter 2 Byte Offset */
-#define SET_WF3_OFF(x) (((x)&0xFF)<< 24 ) /* Set Wake-Up Filter 3 Byte Offset */
-/* Set ALL Offsets */
-#define SET_WF_OFFS(x0,x1,x2,x3) (SET_WF0_OFF((x0))|SET_WF1_OFF((x1))|SET_WF2_OFF((x2))|SET_WF3_OFF((x3)))
-
-/* EMAC_WKUP_FFCRC0 Masks */
-
-#define WF0_CRC 0x0000FFFF /* Wake-Up Filter 0 Pattern CRC */
-#define WF1_CRC 0xFFFF0000 /* Wake-Up Filter 1 Pattern CRC */
-
-#define SET_WF0_CRC(x) (((x)&0xFFFF)<< 0 ) /* Set Wake-Up Filter 0 Target CRC */
-#define SET_WF1_CRC(x) (((x)&0xFFFF)<< 16 ) /* Set Wake-Up Filter 1 Target CRC */
-
-/* EMAC_WKUP_FFCRC1 Masks */
-
-#define WF2_CRC 0x0000FFFF /* Wake-Up Filter 2 Pattern CRC */
-#define WF3_CRC 0xFFFF0000 /* Wake-Up Filter 3 Pattern CRC */
-
-#define SET_WF2_CRC(x) (((x)&0xFFFF)<< 0 ) /* Set Wake-Up Filter 2 Target CRC */
-#define SET_WF3_CRC(x) (((x)&0xFFFF)<< 16 ) /* Set Wake-Up Filter 3 Target CRC */
-
-/* EMAC_SYSCTL Masks */
-
-#define PHYIE 0x00000001 /* PHY_INT Interrupt Enable */
-#define RXDWA 0x00000002 /* Receive Frame DMA Word Alignment (Odd/Even*) */
-#define RXCKS 0x00000004 /* Enable RX Frame TCP/UDP Checksum Computation */
-#define TXDWA 0x00000010 /* Transmit Frame DMA Word Alignment (Odd/Even*) */
-#define MDCDIV 0x00003F00 /* SCLK:MDC Clock Divisor [MDC=SCLK/(2*(N+1))] */
-
-#define SET_MDCDIV(x) (((x)&0x3F)<< 8) /* Set MDC Clock Divisor */
-
-/* EMAC_SYSTAT Masks */
-
-#define PHYINT 0x00000001 /* PHY_INT Interrupt Status */
-#define MMCINT 0x00000002 /* MMC Counter Interrupt Status */
-#define RXFSINT 0x00000004 /* RX Frame-Status Interrupt Status */
-#define TXFSINT 0x00000008 /* TX Frame-Status Interrupt Status */
-#define WAKEDET 0x00000010 /* Wake-Up Detected Status */
-#define RXDMAERR 0x00000020 /* RX DMA Direction Error Status */
-#define TXDMAERR 0x00000040 /* TX DMA Direction Error Status */
-#define STMDONE 0x00000080 /* Station Mgt. Transfer Done Interrupt Status */
-
-/* EMAC_RX_STAT, EMAC_RX_STKY, and EMAC_RX_IRQE Masks */
-
-#define RX_FRLEN 0x000007FF /* Frame Length In Bytes */
-#define RX_COMP 0x00001000 /* RX Frame Complete */
-#define RX_OK 0x00002000 /* RX Frame Received With No Errors */
-#define RX_LONG 0x00004000 /* RX Frame Too Long Error */
-#define RX_ALIGN 0x00008000 /* RX Frame Alignment Error */
-#define RX_CRC 0x00010000 /* RX Frame CRC Error */
-#define RX_LEN 0x00020000 /* RX Frame Length Error */
-#define RX_FRAG 0x00040000 /* RX Frame Fragment Error */
-#define RX_ADDR 0x00080000 /* RX Frame Address Filter Failed Error */
-#define RX_DMAO 0x00100000 /* RX Frame DMA Overrun Error */
-#define RX_PHY 0x00200000 /* RX Frame PHY Error */
-#define RX_LATE 0x00400000 /* RX Frame Late Collision Error */
-#define RX_RANGE 0x00800000 /* RX Frame Length Field Out of Range Error */
-#define RX_MULTI 0x01000000 /* RX Multicast Frame Indicator */
-#define RX_BROAD 0x02000000 /* RX Broadcast Frame Indicator */
-#define RX_CTL 0x04000000 /* RX Control Frame Indicator */
-#define RX_UCTL 0x08000000 /* Unsupported RX Control Frame Indicator */
-#define RX_TYPE 0x10000000 /* RX Typed Frame Indicator */
-#define RX_VLAN1 0x20000000 /* RX VLAN1 Frame Indicator */
-#define RX_VLAN2 0x40000000 /* RX VLAN2 Frame Indicator */
-#define RX_ACCEPT 0x80000000 /* RX Frame Accepted Indicator */
-
-/* EMAC_TX_STAT, EMAC_TX_STKY, and EMAC_TX_IRQE Masks */
-
-#define TX_COMP 0x00000001 /* TX Frame Complete */
-#define TX_OK 0x00000002 /* TX Frame Sent With No Errors */
-#define TX_ECOLL 0x00000004 /* TX Frame Excessive Collision Error */
-#define TX_LATE 0x00000008 /* TX Frame Late Collision Error */
-#define TX_DMAU 0x00000010 /* TX Frame DMA Underrun Error (STAT) */
-#define TX_MACE 0x00000010 /* Internal MAC Error Detected (STKY and IRQE) */
-#define TX_EDEFER 0x00000020 /* TX Frame Excessive Deferral Error */
-#define TX_BROAD 0x00000040 /* TX Broadcast Frame Indicator */
-#define TX_MULTI 0x00000080 /* TX Multicast Frame Indicator */
-#define TX_CCNT 0x00000F00 /* TX Frame Collision Count */
-#define TX_DEFER 0x00001000 /* TX Frame Deferred Indicator */
-#define TX_CRS 0x00002000 /* TX Frame Carrier Sense Not Asserted Error */
-#define TX_LOSS 0x00004000 /* TX Frame Carrier Lost During TX Error */
-#define TX_RETRY 0x00008000 /* TX Frame Successful After Retry */
-#define TX_FRLEN 0x07FF0000 /* TX Frame Length (Bytes) */
-
-/* EMAC_MMC_CTL Masks */
-#define RSTC 0x00000001 /* Reset All Counters */
-#define CROLL 0x00000002 /* Counter Roll-Over Enable */
-#define CCOR 0x00000004 /* Counter Clear-On-Read Mode Enable */
-#define MMCE 0x00000008 /* Enable MMC Counter Operation */
-
-/* EMAC_MMC_RIRQS and EMAC_MMC_RIRQE Masks */
-#define RX_OK_CNT 0x00000001 /* RX Frames Received With No Errors */
-#define RX_FCS_CNT 0x00000002 /* RX Frames W/Frame Check Sequence Errors */
-#define RX_ALIGN_CNT 0x00000004 /* RX Frames With Alignment Errors */
-#define RX_OCTET_CNT 0x00000008 /* RX Octets Received OK */
-#define RX_LOST_CNT 0x00000010 /* RX Frames Lost Due To Internal MAC RX Error */
-#define RX_UNI_CNT 0x00000020 /* Unicast RX Frames Received OK */
-#define RX_MULTI_CNT 0x00000040 /* Multicast RX Frames Received OK */
-#define RX_BROAD_CNT 0x00000080 /* Broadcast RX Frames Received OK */
-#define RX_IRL_CNT 0x00000100 /* RX Frames With In-Range Length Errors */
-#define RX_ORL_CNT 0x00000200 /* RX Frames With Out-Of-Range Length Errors */
-#define RX_LONG_CNT 0x00000400 /* RX Frames With Frame Too Long Errors */
-#define RX_MACCTL_CNT 0x00000800 /* MAC Control RX Frames Received */
-#define RX_OPCODE_CTL 0x00001000 /* Unsupported Op-Code RX Frames Received */
-#define RX_PAUSE_CNT 0x00002000 /* PAUSEMAC Control RX Frames Received */
-#define RX_ALLF_CNT 0x00004000 /* All RX Frames Received */
-#define RX_ALLO_CNT 0x00008000 /* All RX Octets Received */
-#define RX_TYPED_CNT 0x00010000 /* Typed RX Frames Received */
-#define RX_SHORT_CNT 0x00020000 /* RX Frame Fragments (< 64 Bytes) Received */
-#define RX_EQ64_CNT 0x00040000 /* 64-Byte RX Frames Received */
-#define RX_LT128_CNT 0x00080000 /* 65-127-Byte RX Frames Received */
-#define RX_LT256_CNT 0x00100000 /* 128-255-Byte RX Frames Received */
-#define RX_LT512_CNT 0x00200000 /* 256-511-Byte RX Frames Received */
-#define RX_LT1024_CNT 0x00400000 /* 512-1023-Byte RX Frames Received */
-#define RX_GE1024_CNT 0x00800000 /* 1024-Max-Byte RX Frames Received */
-
-/* EMAC_MMC_TIRQS and EMAC_MMC_TIRQE Masks */
-
-#define TX_OK_CNT 0x00000001 /* TX Frames Sent OK */
-#define TX_SCOLL_CNT 0x00000002 /* TX Frames With Single Collisions */
-#define TX_MCOLL_CNT 0x00000004 /* TX Frames With Multiple Collisions */
-#define TX_OCTET_CNT 0x00000008 /* TX Octets Sent OK */
-#define TX_DEFER_CNT 0x00000010 /* TX Frames With Deferred Transmission */
-#define TX_LATE_CNT 0x00000020 /* TX Frames With Late Collisions */
-#define TX_ABORTC_CNT 0x00000040 /* TX Frames Aborted Due To Excess Collisions */
-#define TX_LOST_CNT 0x00000080 /* TX Frames Lost Due To Internal MAC TX Error */
-#define TX_CRS_CNT 0x00000100 /* TX Frames With Carrier Sense Errors */
-#define TX_UNI_CNT 0x00000200 /* Unicast TX Frames Sent */
-#define TX_MULTI_CNT 0x00000400 /* Multicast TX Frames Sent */
-#define TX_BROAD_CNT 0x00000800 /* Broadcast TX Frames Sent */
-#define TX_EXDEF_CTL 0x00001000 /* TX Frames With Excessive Deferral */
-#define TX_MACCTL_CNT 0x00002000 /* MAC Control TX Frames Sent */
-#define TX_ALLF_CNT 0x00004000 /* All TX Frames Sent */
-#define TX_ALLO_CNT 0x00008000 /* All TX Octets Sent */
-#define TX_EQ64_CNT 0x00010000 /* 64-Byte TX Frames Sent */
-#define TX_LT128_CNT 0x00020000 /* 65-127-Byte TX Frames Sent */
-#define TX_LT256_CNT 0x00040000 /* 128-255-Byte TX Frames Sent */
-#define TX_LT512_CNT 0x00080000 /* 256-511-Byte TX Frames Sent */
-#define TX_LT1024_CNT 0x00100000 /* 512-1023-Byte TX Frames Sent */
-#define TX_GE1024_CNT 0x00200000 /* 1024-Max-Byte TX Frames Sent */
-#define TX_ABORT_CNT 0x00400000 /* TX Frames Aborted */
-
-/* SDH Registers */
-
-#define SDH_PWR_CTL 0xFFC03900 /* SDH Power Control */
-#define SDH_CLK_CTL 0xFFC03904 /* SDH Clock Control */
-#define SDH_ARGUMENT 0xFFC03908 /* SDH Argument */
-#define SDH_COMMAND 0xFFC0390C /* SDH Command */
-#define SDH_RESP_CMD 0xFFC03910 /* SDH Response Command */
-#define SDH_RESPONSE0 0xFFC03914 /* SDH Response0 */
-#define SDH_RESPONSE1 0xFFC03918 /* SDH Response1 */
-#define SDH_RESPONSE2 0xFFC0391C /* SDH Response2 */
-#define SDH_RESPONSE3 0xFFC03920 /* SDH Response3 */
-#define SDH_DATA_TIMER 0xFFC03924 /* SDH Data Timer */
-#define SDH_DATA_LGTH 0xFFC03928 /* SDH Data Length */
-#define SDH_DATA_CTL 0xFFC0392C /* SDH Data Control */
-#define SDH_DATA_CNT 0xFFC03930 /* SDH Data Counter */
-#define SDH_STATUS 0xFFC03934 /* SDH Status */
-#define SDH_STATUS_CLR 0xFFC03938 /* SDH Status Clear */
-#define SDH_MASK0 0xFFC0393C /* SDH Interrupt0 Mask */
-#define SDH_MASK1 0xFFC03940 /* SDH Interrupt1 Mask */
-#define SDH_FIFO_CNT 0xFFC03948 /* SDH FIFO Counter */
-#define SDH_FIFO 0xFFC03980 /* SDH Data FIFO */
-#define SDH_E_STATUS 0xFFC039C0 /* SDH Exception Status */
-#define SDH_E_MASK 0xFFC039C4 /* SDH Exception Mask */
-#define SDH_CFG 0xFFC039C8 /* SDH Configuration */
-#define SDH_RD_WAIT_EN 0xFFC039CC /* SDH Read Wait Enable */
-#define SDH_PID0 0xFFC039D0 /* SDH Peripheral Identification0 */
-#define SDH_PID1 0xFFC039D4 /* SDH Peripheral Identification1 */
-#define SDH_PID2 0xFFC039D8 /* SDH Peripheral Identification2 */
-#define SDH_PID3 0xFFC039DC /* SDH Peripheral Identification3 */
-#define SDH_PID4 0xFFC039E0 /* SDH Peripheral Identification4 */
-#define SDH_PID5 0xFFC039E4 /* SDH Peripheral Identification5 */
-#define SDH_PID6 0xFFC039E8 /* SDH Peripheral Identification6 */
-#define SDH_PID7 0xFFC039EC /* SDH Peripheral Identification7 */
-
-/* Removable Storage Interface Registers */
-
-#define RSI_PWR_CONTROL 0xFFC03800 /* RSI Power Control Register */
-#define RSI_CLK_CONTROL 0xFFC03804 /* RSI Clock Control Register */
-#define RSI_ARGUMENT 0xFFC03808 /* RSI Argument Register */
-#define RSI_COMMAND 0xFFC0380C /* RSI Command Register */
-#define RSI_RESP_CMD 0xFFC03810 /* RSI Response Command Register */
-#define RSI_RESPONSE0 0xFFC03814 /* RSI Response Register */
-#define RSI_RESPONSE1 0xFFC03818 /* RSI Response Register */
-#define RSI_RESPONSE2 0xFFC0381C /* RSI Response Register */
-#define RSI_RESPONSE3 0xFFC03820 /* RSI Response Register */
-#define RSI_DATA_TIMER 0xFFC03824 /* RSI Data Timer Register */
-#define RSI_DATA_LGTH 0xFFC03828 /* RSI Data Length Register */
-#define RSI_DATA_CONTROL 0xFFC0382C /* RSI Data Control Register */
-#define RSI_DATA_CNT 0xFFC03830 /* RSI Data Counter Register */
-#define RSI_STATUS 0xFFC03834 /* RSI Status Register */
-#define RSI_STATUSCL 0xFFC03838 /* RSI Status Clear Register */
-#define RSI_MASK0 0xFFC0383C /* RSI Interrupt 0 Mask Register */
-#define RSI_MASK1 0xFFC03840 /* RSI Interrupt 1 Mask Register */
-#define RSI_FIFO_CNT 0xFFC03848 /* RSI FIFO Counter Register */
-#define RSI_CEATA_CONTROL 0xFFC0384C /* RSI CEATA Register */
-#define RSI_FIFO 0xFFC03880 /* RSI Data FIFO Register */
-#define RSI_ESTAT 0xFFC038C0 /* RSI Exception Status Register */
-#define RSI_EMASK 0xFFC038C4 /* RSI Exception Mask Register */
-#define RSI_CONFIG 0xFFC038C8 /* RSI Configuration Register */
-#define RSI_RD_WAIT_EN 0xFFC038CC /* RSI Read Wait Enable Register */
-#define RSI_PID0 0xFFC03FE0 /* RSI Peripheral ID Register 0 */
-#define RSI_PID1 0xFFC03FE4 /* RSI Peripheral ID Register 1 */
-#define RSI_PID2 0xFFC03FE8 /* RSI Peripheral ID Register 2 */
-#define RSI_PID3 0xFFC03FEC /* RSI Peripheral ID Register 3 */
-#define RSI_PID4 0xFFC03FF0 /* RSI Peripheral ID Register 4 */
-#define RSI_PID5 0xFFC03FF4 /* RSI Peripheral ID Register 5 */
-#define RSI_PID6 0xFFC03FF8 /* RSI Peripheral ID Register 6 */
-#define RSI_PID7 0xFFC03FFC /* RSI Peripheral ID Register 7 */
+/* BF518 is BF516 + IEEE-1588 */
+#include "defBF516.h"
/* PTP TSYNC Registers */
@@ -489,141 +36,6 @@
#define EMAC_PTP_PPS_STARTHI 0xFFC030F4 /* PPS Start Time High */
#define EMAC_PTP_PPS_PERIOD 0xFFC030F8 /* PPS Count Register */
-/* ********************************************************** */
-/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
-/* and MULTI BIT READ MACROS */
-/* ********************************************************** */
-
-/* Bit masks for SDH_COMMAND */
-
-#define CMD_IDX 0x3f /* Command Index */
-#define CMD_RSP 0x40 /* Response */
-#define CMD_L_RSP 0x80 /* Long Response */
-#define CMD_INT_E 0x100 /* Command Interrupt */
-#define CMD_PEND_E 0x200 /* Command Pending */
-#define CMD_E 0x400 /* Command Enable */
-
-/* Bit masks for SDH_PWR_CTL */
-
-#define PWR_ON 0x3 /* Power On */
-#if 0
-#define TBD 0x3c /* TBD */
-#endif
-#define SD_CMD_OD 0x40 /* Open Drain Output */
-#define ROD_CTL 0x80 /* Rod Control */
-
-/* Bit masks for SDH_CLK_CTL */
-
-#define CLKDIV 0xff /* MC_CLK Divisor */
-#define CLK_E 0x100 /* MC_CLK Bus Clock Enable */
-#define PWR_SV_E 0x200 /* Power Save Enable */
-#define CLKDIV_BYPASS 0x400 /* Bypass Divisor */
-#define WIDE_BUS 0x800 /* Wide Bus Mode Enable */
-
-/* Bit masks for SDH_RESP_CMD */
-
-#define RESP_CMD 0x3f /* Response Command */
-
-/* Bit masks for SDH_DATA_CTL */
-
-#define DTX_E 0x1 /* Data Transfer Enable */
-#define DTX_DIR 0x2 /* Data Transfer Direction */
-#define DTX_MODE 0x4 /* Data Transfer Mode */
-#define DTX_DMA_E 0x8 /* Data Transfer DMA Enable */
-#define DTX_BLK_LGTH 0xf0 /* Data Transfer Block Length */
-
-/* Bit masks for SDH_STATUS */
-
-#define CMD_CRC_FAIL 0x1 /* CMD CRC Fail */
-#define DAT_CRC_FAIL 0x2 /* Data CRC Fail */
-#define CMD_TIME_OUT 0x4 /* CMD Time Out */
-#define DAT_TIME_OUT 0x8 /* Data Time Out */
-#define TX_UNDERRUN 0x10 /* Transmit Underrun */
-#define RX_OVERRUN 0x20 /* Receive Overrun */
-#define CMD_RESP_END 0x40 /* CMD Response End */
-#define CMD_SENT 0x80 /* CMD Sent */
-#define DAT_END 0x100 /* Data End */
-#define START_BIT_ERR 0x200 /* Start Bit Error */
-#define DAT_BLK_END 0x400 /* Data Block End */
-#define CMD_ACT 0x800 /* CMD Active */
-#define TX_ACT 0x1000 /* Transmit Active */
-#define RX_ACT 0x2000 /* Receive Active */
-#define TX_FIFO_STAT 0x4000 /* Transmit FIFO Status */
-#define RX_FIFO_STAT 0x8000 /* Receive FIFO Status */
-#define TX_FIFO_FULL 0x10000 /* Transmit FIFO Full */
-#define RX_FIFO_FULL 0x20000 /* Receive FIFO Full */
-#define TX_FIFO_ZERO 0x40000 /* Transmit FIFO Empty */
-#define RX_DAT_ZERO 0x80000 /* Receive FIFO Empty */
-#define TX_DAT_RDY 0x100000 /* Transmit Data Available */
-#define RX_FIFO_RDY 0x200000 /* Receive Data Available */
-
-/* Bit masks for SDH_STATUS_CLR */
-
-#define CMD_CRC_FAIL_STAT 0x1 /* CMD CRC Fail Status */
-#define DAT_CRC_FAIL_STAT 0x2 /* Data CRC Fail Status */
-#define CMD_TIMEOUT_STAT 0x4 /* CMD Time Out Status */
-#define DAT_TIMEOUT_STAT 0x8 /* Data Time Out status */
-#define TX_UNDERRUN_STAT 0x10 /* Transmit Underrun Status */
-#define RX_OVERRUN_STAT 0x20 /* Receive Overrun Status */
-#define CMD_RESP_END_STAT 0x40 /* CMD Response End Status */
-#define CMD_SENT_STAT 0x80 /* CMD Sent Status */
-#define DAT_END_STAT 0x100 /* Data End Status */
-#define START_BIT_ERR_STAT 0x200 /* Start Bit Error Status */
-#define DAT_BLK_END_STAT 0x400 /* Data Block End Status */
-
-/* Bit masks for SDH_MASK0 */
-
-#define CMD_CRC_FAIL_MASK 0x1 /* CMD CRC Fail Mask */
-#define DAT_CRC_FAIL_MASK 0x2 /* Data CRC Fail Mask */
-#define CMD_TIMEOUT_MASK 0x4 /* CMD Time Out Mask */
-#define DAT_TIMEOUT_MASK 0x8 /* Data Time Out Mask */
-#define TX_UNDERRUN_MASK 0x10 /* Transmit Underrun Mask */
-#define RX_OVERRUN_MASK 0x20 /* Receive Overrun Mask */
-#define CMD_RESP_END_MASK 0x40 /* CMD Response End Mask */
-#define CMD_SENT_MASK 0x80 /* CMD Sent Mask */
-#define DAT_END_MASK 0x100 /* Data End Mask */
-#define START_BIT_ERR_MASK 0x200 /* Start Bit Error Mask */
-#define DAT_BLK_END_MASK 0x400 /* Data Block End Mask */
-#define CMD_ACT_MASK 0x800 /* CMD Active Mask */
-#define TX_ACT_MASK 0x1000 /* Transmit Active Mask */
-#define RX_ACT_MASK 0x2000 /* Receive Active Mask */
-#define TX_FIFO_STAT_MASK 0x4000 /* Transmit FIFO Status Mask */
-#define RX_FIFO_STAT_MASK 0x8000 /* Receive FIFO Status Mask */
-#define TX_FIFO_FULL_MASK 0x10000 /* Transmit FIFO Full Mask */
-#define RX_FIFO_FULL_MASK 0x20000 /* Receive FIFO Full Mask */
-#define TX_FIFO_ZERO_MASK 0x40000 /* Transmit FIFO Empty Mask */
-#define RX_DAT_ZERO_MASK 0x80000 /* Receive FIFO Empty Mask */
-#define TX_DAT_RDY_MASK 0x100000 /* Transmit Data Available Mask */
-#define RX_FIFO_RDY_MASK 0x200000 /* Receive Data Available Mask */
-
-/* Bit masks for SDH_FIFO_CNT */
-
-#define FIFO_COUNT 0x7fff /* FIFO Count */
-
-/* Bit masks for SDH_E_STATUS */
-
-#define SDIO_INT_DET 0x2 /* SDIO Int Detected */
-#define SD_CARD_DET 0x10 /* SD Card Detect */
-
-/* Bit masks for SDH_E_MASK */
-
-#define SDIO_MSK 0x2 /* Mask SDIO Int Detected */
-#define SCD_MSK 0x40 /* Mask Card Detect */
-
-/* Bit masks for SDH_CFG */
-
-#define CLKS_EN 0x1 /* Clocks Enable */
-#define SD4E 0x4 /* SDIO 4-Bit Enable */
-#define MWE 0x8 /* Moving Window Enable */
-#define SD_RST 0x10 /* SDMMC Reset */
-#define PUP_SDDAT 0x20 /* Pull-up SD_DAT */
-#define PUP_SDDAT3 0x40 /* Pull-up SD_DAT3 */
-#define PD_SDDAT3 0x80 /* Pull-down SD_DAT3 */
-
-/* Bit masks for SDH_RD_WAIT_EN */
-
-#define RWR 0x1 /* Read Wait Request */
-
/* Bit masks for EMAC_PTP_CTL */
#define PTP_EN 0x1 /* Enable the PTP_TSYNC module */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
index f9fd2b2a295..9241205fb99 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
@@ -585,58 +585,6 @@
** modifier UNLESS the lower order bits are saved and ORed back in when
** the macro is used.
*************************************************************************************/
-/*
-** ********************* PLL AND RESET MASKS ****************************************/
-/* PLL_CTL Masks */
-#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
-#define PLL_OFF 0x0002 /* PLL Not Powered */
-#define STOPCK 0x0008 /* Core Clock Off */
-#define PDWN 0x0020 /* Enter Deep Sleep Mode */
-#define IN_DELAY 0x0040 /* Add 200ps Delay To EBIU Input Latches */
-#define OUT_DELAY 0x0080 /* Add 200ps Delay To EBIU Output Signals */
-#define BYPASS 0x0100 /* Bypass the PLL */
-#define MSEL 0x7E00 /* Multiplier Select For CCLK/VCO Factors */
-/* PLL_CTL Macros (Only Use With Logic OR While Setting Lower Order Bits) */
-#define SET_MSEL(x) (((x)&0x3F) << 0x9) /* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
-
-/* PLL_DIV Masks */
-#define SSEL 0x000F /* System Select */
-#define CSEL 0x0030 /* Core Select */
-#define CSEL_DIV1 0x0000 /* CCLK = VCO / 1 */
-#define CSEL_DIV2 0x0010 /* CCLK = VCO / 2 */
-#define CSEL_DIV4 0x0020 /* CCLK = VCO / 4 */
-#define CSEL_DIV8 0x0030 /* CCLK = VCO / 8 */
-/* PLL_DIV Macros */
-#define SET_SSEL(x) ((x)&0xF) /* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
-
-/* VR_CTL Masks */
-#define FREQ 0x3000 /* Switching Oscillator Frequency For Regulator */
-#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */
-
-#define VLEV 0x00F0 /* Internal Voltage Level */
-#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
-#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
-#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
-#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
-#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */
-#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */
-#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
-#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
-#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */
-#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */
-
-#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */
-#define USBWE 0x0200 /* Enable USB Wakeup From Hibernate */
-#define PHYWE 0x0400 /* Enable PHY Wakeup From Hibernate */
-#define CLKBUFOE 0x4000 /* CLKIN Buffer Output Enable */
-#define PHYCLKOE CLKBUFOE /* Alternative legacy name for the above */
-#define SCKELOW 0x8000 /* Enable Drive CKE Low During Reset */
-
-/* PLL_STAT Masks */
-#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */
-#define FULL_ON 0x0002 /* Processor In Full On Mode */
-#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */
-#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */
/* CHIPID Masks */
#define CHIPID_VERSION 0xF0000000
@@ -756,66 +704,6 @@
#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
-/* ********* WATCHDOG TIMER MASKS ******************** */
-
-/* Watchdog Timer WDOG_CTL Register Masks */
-
-#define WDEV(x) (((x)<<1) & 0x0006) /* event generated on roll over */
-#define WDEV_RESET 0x0000 /* generate reset event on roll over */
-#define WDEV_NMI 0x0002 /* generate NMI event on roll over */
-#define WDEV_GPI 0x0004 /* generate GP IRQ on roll over */
-#define WDEV_NONE 0x0006 /* no event on roll over */
-#define WDEN 0x0FF0 /* enable watchdog */
-#define WDDIS 0x0AD0 /* disable watchdog */
-#define WDRO 0x8000 /* watchdog rolled over latch */
-
-/* depreciated WDOG_CTL Register Masks for legacy code */
-
-
-#define ICTL WDEV
-#define ENABLE_RESET WDEV_RESET
-#define WDOG_RESET WDEV_RESET
-#define ENABLE_NMI WDEV_NMI
-#define WDOG_NMI WDEV_NMI
-#define ENABLE_GPI WDEV_GPI
-#define WDOG_GPI WDEV_GPI
-#define DISABLE_EVT WDEV_NONE
-#define WDOG_NONE WDEV_NONE
-
-#define TMR_EN WDEN
-#define TMR_DIS WDDIS
-#define TRO WDRO
-#define ICTL_P0 0x01
- #define ICTL_P1 0x02
-#define TRO_P 0x0F
-
-
-
-/* *************** REAL TIME CLOCK MASKS **************************/
-/* RTC_STAT and RTC_ALARM Masks */
-#define RTC_SEC 0x0000003F /* Real-Time Clock Seconds */
-#define RTC_MIN 0x00000FC0 /* Real-Time Clock Minutes */
-#define RTC_HR 0x0001F000 /* Real-Time Clock Hours */
-#define RTC_DAY 0xFFFE0000 /* Real-Time Clock Days */
-
-/* RTC_ALARM Macro z=day y=hr x=min w=sec */
-#define SET_ALARM(z,y,x,w) ((((z)&0x7FFF)<<0x11)|(((y)&0x1F)<<0xC)|(((x)&0x3F)<<0x6)|((w)&0x3F))
-
-/* RTC_ICTL and RTC_ISTAT Masks */
-#define STOPWATCH 0x0001 /* Stopwatch Interrupt Enable */
-#define ALARM 0x0002 /* Alarm Interrupt Enable */
-#define SECOND 0x0004 /* Seconds (1 Hz) Interrupt Enable */
-#define MINUTE 0x0008 /* Minutes Interrupt Enable */
-#define HOUR 0x0010 /* Hours Interrupt Enable */
-#define DAY 0x0020 /* 24 Hours (Days) Interrupt Enable */
-#define DAY_ALARM 0x0040 /* Day Alarm (Day, Hour, Minute, Second) Interrupt Enable */
-#define WRITE_PENDING 0x4000 /* Write Pending Status */
-#define WRITE_COMPLETE 0x8000 /* Write Complete Interrupt Enable */
-
-/* RTC_FAST / RTC_PREN Mask */
-#define PREN 0x0001 /* Enable Prescaler, RTC Runs @1 Hz */
-
-
/* ************** UART CONTROLLER MASKS *************************/
/* UARTx_LCR Masks */
#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
@@ -1372,33 +1260,6 @@
/* ************************** DMA CONTROLLER MASKS ********************************/
-/* DMAx_CONFIG, MDMA_yy_CONFIG Masks */
-#define DMAEN 0x0001 /* DMA Channel Enable */
-#define WNR 0x0002 /* Channel Direction (W/R*) */
-#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */
-#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */
-#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */
-#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */
-#define RESTART 0x0020 /* DMA Buffer Clear */
-#define DI_SEL 0x0040 /* Data Interrupt Timing Select */
-#define DI_EN 0x0080 /* Data Interrupt Enable */
-#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
-#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
-#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
-#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
-#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
-#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
-#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
-#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
-#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
-#define NDSIZE 0x0900 /* Next Descriptor Size */
-#define DMAFLOW 0x7000 /* Flow Control */
-#define DMAFLOW_STOP 0x0000 /* Stop Mode */
-#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
-#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
-#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
@@ -1416,13 +1277,6 @@
#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
-/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks */
-#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */
-#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */
-#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */
-#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */
-
-
/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
/* PPI_CONTROL Masks */
#define PORT_EN 0x0001 /* PPI Port Enable */
@@ -1830,46 +1684,6 @@
#define BNDMODE_CAPT 0x2000 /* boundary capture mode */
#define BNDMODE_AEXT 0x3000 /* boundary auto-extend mode */
-/* Bit masks for OTP_CONTROL */
-
-#define FUSE_FADDR 0x1ff /* OTP/Fuse Address */
-#define FIEN 0x800 /* OTP/Fuse Interrupt Enable */
-#define nFIEN 0x0
-#define FTESTDEC 0x1000 /* OTP/Fuse Test Decoder */
-#define nFTESTDEC 0x0
-#define FWRTEST 0x2000 /* OTP/Fuse Write Test */
-#define nFWRTEST 0x0
-#define FRDEN 0x4000 /* OTP/Fuse Read Enable */
-#define nFRDEN 0x0
-#define FWREN 0x8000 /* OTP/Fuse Write Enable */
-#define nFWREN 0x0
-
-/* Bit masks for OTP_BEN */
-
-#define FBEN 0xffff /* OTP/Fuse Byte Enable */
-
-/* Bit masks for OTP_STATUS */
-
-#define FCOMP 0x1 /* OTP/Fuse Access Complete */
-#define nFCOMP 0x0
-#define FERROR 0x2 /* OTP/Fuse Access Error */
-#define nFERROR 0x0
-#define MMRGLOAD 0x10 /* Memory Mapped Register Gasket Load */
-#define nMMRGLOAD 0x0
-#define MMRGLOCK 0x20 /* Memory Mapped Register Gasket Lock */
-#define nMMRGLOCK 0x0
-#define FPGMEN 0x40 /* OTP/Fuse Program Enable */
-#define nFPGMEN 0x0
-
-/* Bit masks for OTP_TIMING */
-
-#define USECDIV 0xff /* Micro Second Divider */
-#define READACC 0x7f00 /* Read Access Time */
-#define CPUMPRL 0x38000 /* Charge Pump Release Time */
-#define CPUMPSU 0xc0000 /* Charge Pump Setup Time */
-#define CPUMPHD 0xf00000 /* Charge Pump Hold Time */
-#define PGMTIME 0xff000000 /* Program Time */
-
/* Bit masks for SECURE_SYSSWT */
#define EMUDABL 0x1 /* Emulation Disable. */
diff --git a/arch/blackfin/mach-bf527/Kconfig b/arch/blackfin/mach-bf527/Kconfig
index 848ac6f8682..1f8cbe9d6b9 100644
--- a/arch/blackfin/mach-bf527/Kconfig
+++ b/arch/blackfin/mach-bf527/Kconfig
@@ -1,3 +1,7 @@
+config BF52x
+ def_bool y
+ depends on (BF522 || BF523 || BF524 || BF525 || BF526 || BF527)
+
if (BF52x)
source "arch/blackfin/mach-bf527/boards/Kconfig"
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index f1996b13a3d..7ab0800e291 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -15,9 +15,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/etherdevice.h>
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-#include <linux/usb/isp1362.h>
-#endif
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -65,7 +62,7 @@ static struct isp1760_platform_data isp1760_priv = {
};
static struct platform_device bfin_isp1760_device = {
- .name = "isp1760-hcd",
+ .name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
@@ -317,45 +314,6 @@ static struct platform_device sl811_hcd_device = {
};
#endif
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-static struct resource isp1362_hcd_resources[] = {
- {
- .start = 0x20360000,
- .end = 0x20360000,
- .flags = IORESOURCE_MEM,
- }, {
- .start = 0x20360004,
- .end = 0x20360004,
- .flags = IORESOURCE_MEM,
- }, {
- .start = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
- .end = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
- },
-};
-
-static struct isp1362_platform_data isp1362_priv = {
- .sel15Kres = 1,
- .clknotstop = 0,
- .oc_enable = 0,
- .int_act_high = 0,
- .int_edge_triggered = 0,
- .remote_wakeup_connected = 0,
- .no_power_switching = 1,
- .power_switching_mode = 0,
-};
-
-static struct platform_device isp1362_hcd_device = {
- .name = "isp1362-hcd",
- .id = 0,
- .dev = {
- .platform_data = &isp1362_priv,
- },
- .num_resources = ARRAY_SIZE(isp1362_hcd_resources),
- .resource = isp1362_hcd_resources,
-};
-#endif
-
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
@@ -841,10 +799,6 @@ static struct platform_device *cmbf527_devices[] __initdata = {
&sl811_hcd_device,
#endif
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
- &isp1362_hcd_device,
-#endif
-
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
&bfin_isp1760_device,
#endif
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index f09665f74ba..5294fdd2073 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -13,9 +13,6 @@
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-#include <linux/usb/isp1362.h>
-#endif
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -63,7 +60,7 @@ static struct isp1760_platform_data isp1760_priv = {
};
static struct platform_device bfin_isp1760_device = {
- .name = "isp1760-hcd",
+ .name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
@@ -373,45 +370,6 @@ static struct platform_device sl811_hcd_device = {
};
#endif
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-static struct resource isp1362_hcd_resources[] = {
- {
- .start = 0x20360000,
- .end = 0x20360000,
- .flags = IORESOURCE_MEM,
- }, {
- .start = 0x20360004,
- .end = 0x20360004,
- .flags = IORESOURCE_MEM,
- }, {
- .start = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
- .end = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
- },
-};
-
-static struct isp1362_platform_data isp1362_priv = {
- .sel15Kres = 1,
- .clknotstop = 0,
- .oc_enable = 0,
- .int_act_high = 0,
- .int_edge_triggered = 0,
- .remote_wakeup_connected = 0,
- .no_power_switching = 1,
- .power_switching_mode = 0,
-};
-
-static struct platform_device isp1362_hcd_device = {
- .name = "isp1362-hcd",
- .id = 0,
- .dev = {
- .platform_data = &isp1362_priv,
- },
- .num_resources = ARRAY_SIZE(isp1362_hcd_resources),
- .resource = isp1362_hcd_resources,
-};
-#endif
-
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
@@ -688,12 +646,6 @@ static struct platform_device bfin_spi0_device = {
};
#endif /* spi master and devices */
-#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
-static struct platform_device bfin_fb_device = {
- .name = "bf537-lq035",
-};
-#endif
-
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
static struct resource bfin_uart_resources[] = {
#ifdef CONFIG_SERIAL_BFIN_UART0
@@ -850,7 +802,7 @@ static struct platform_device bfin_device_gpiokeys = {
};
#endif
-#if defined(CONFIG_JOYSTICK_BFIN_ROTARY) || defined(CONFIG_JOYSTICK_BFIN_ROTARY_MODULE)
+#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
#include <linux/input.h>
#include <asm/bfin_rotary.h>
@@ -924,10 +876,6 @@ static struct platform_device *stamp_devices[] __initdata = {
&sl811_hcd_device,
#endif
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
- &isp1362_hcd_device,
-#endif
-
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
&bfin_isp1760_device,
#endif
@@ -957,10 +905,6 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_spi0_device,
#endif
-#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
- &bfin_fb_device,
-#endif
-
#if defined(CONFIG_FB_BFIN_T350MCQB) || defined(CONFIG_FB_BFIN_T350MCQB_MODULE)
&bf52x_t350mcqb_device,
#endif
@@ -991,7 +935,7 @@ static struct platform_device *stamp_devices[] __initdata = {
&bfin_device_gpiokeys,
#endif
-#if defined(CONFIG_JOYSTICK_BFIN_ROTARY) || defined(CONFIG_JOYSTICK_BFIN_ROTARY_MODULE)
+#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
&bfin_rotary_device,
#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/blackfin.h b/arch/blackfin/mach-bf527/include/mach/blackfin.h
index e7d6034f268..f714c5de307 100644
--- a/arch/blackfin/mach-bf527/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf527/include/mach/blackfin.h
@@ -46,10 +46,4 @@
#define OFFSET_SCR 0x1C /* SCR Scratch Register */
#define OFFSET_GCTL 0x24 /* Global Control Register */
-/* PLL_DIV Masks */
-#define CCLK_DIV1 CSEL_DIV1 /* CCLK = VCO / 1 */
-#define CCLK_DIV2 CSEL_DIV2 /* CCLK = VCO / 2 */
-#define CCLK_DIV4 CSEL_DIV4 /* CCLK = VCO / 4 */
-#define CCLK_DIV8 CSEL_DIV8 /* CCLK = VCO / 8 */
-
#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
index dc3119e9f66..d7e2751c6bc 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
@@ -10,15 +10,8 @@
/* include all Core registers and bit definitions */
#include "defBF525.h"
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF525 */
-
-/* include cdefBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "cdefBF52x_base.h"
-
-/* The following are the #defines needed by ADSP-BF525 that are not in the common header */
+/* BF525 is BF522 + USB */
+#include "cdefBF522.h"
/* USB Control Registers */
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
index d6579449ee4..c7ba544d50b 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
@@ -10,15 +10,8 @@
/* include all Core registers and bit definitions */
#include "defBF527.h"
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF527 */
-
-/* include cdefBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "cdefBF52x_base.h"
-
-/* The following are the #defines needed by ADSP-BF527 that are not in the common header */
+/* BF527 is BF525 + EMAC */
+#include "cdefBF525.h"
/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */
@@ -185,417 +178,4 @@
#define bfin_read_EMAC_TXC_ABORT() bfin_read32(EMAC_TXC_ABORT)
#define bfin_write_EMAC_TXC_ABORT(val) bfin_write32(EMAC_TXC_ABORT, val)
-/* USB Control Registers */
-
-#define bfin_read_USB_FADDR() bfin_read16(USB_FADDR)
-#define bfin_write_USB_FADDR(val) bfin_write16(USB_FADDR, val)
-#define bfin_read_USB_POWER() bfin_read16(USB_POWER)
-#define bfin_write_USB_POWER(val) bfin_write16(USB_POWER, val)
-#define bfin_read_USB_INTRTX() bfin_read16(USB_INTRTX)
-#define bfin_write_USB_INTRTX(val) bfin_write16(USB_INTRTX, val)
-#define bfin_read_USB_INTRRX() bfin_read16(USB_INTRRX)
-#define bfin_write_USB_INTRRX(val) bfin_write16(USB_INTRRX, val)
-#define bfin_read_USB_INTRTXE() bfin_read16(USB_INTRTXE)
-#define bfin_write_USB_INTRTXE(val) bfin_write16(USB_INTRTXE, val)
-#define bfin_read_USB_INTRRXE() bfin_read16(USB_INTRRXE)
-#define bfin_write_USB_INTRRXE(val) bfin_write16(USB_INTRRXE, val)
-#define bfin_read_USB_INTRUSB() bfin_read16(USB_INTRUSB)
-#define bfin_write_USB_INTRUSB(val) bfin_write16(USB_INTRUSB, val)
-#define bfin_read_USB_INTRUSBE() bfin_read16(USB_INTRUSBE)
-#define bfin_write_USB_INTRUSBE(val) bfin_write16(USB_INTRUSBE, val)
-#define bfin_read_USB_FRAME() bfin_read16(USB_FRAME)
-#define bfin_write_USB_FRAME(val) bfin_write16(USB_FRAME, val)
-#define bfin_read_USB_INDEX() bfin_read16(USB_INDEX)
-#define bfin_write_USB_INDEX(val) bfin_write16(USB_INDEX, val)
-#define bfin_read_USB_TESTMODE() bfin_read16(USB_TESTMODE)
-#define bfin_write_USB_TESTMODE(val) bfin_write16(USB_TESTMODE, val)
-#define bfin_read_USB_GLOBINTR() bfin_read16(USB_GLOBINTR)
-#define bfin_write_USB_GLOBINTR(val) bfin_write16(USB_GLOBINTR, val)
-#define bfin_read_USB_GLOBAL_CTL() bfin_read16(USB_GLOBAL_CTL)
-#define bfin_write_USB_GLOBAL_CTL(val) bfin_write16(USB_GLOBAL_CTL, val)
-
-/* USB Packet Control Registers */
-
-#define bfin_read_USB_TX_MAX_PACKET() bfin_read16(USB_TX_MAX_PACKET)
-#define bfin_write_USB_TX_MAX_PACKET(val) bfin_write16(USB_TX_MAX_PACKET, val)
-#define bfin_read_USB_CSR0() bfin_read16(USB_CSR0)
-#define bfin_write_USB_CSR0(val) bfin_write16(USB_CSR0, val)
-#define bfin_read_USB_TXCSR() bfin_read16(USB_TXCSR)
-#define bfin_write_USB_TXCSR(val) bfin_write16(USB_TXCSR, val)
-#define bfin_read_USB_RX_MAX_PACKET() bfin_read16(USB_RX_MAX_PACKET)
-#define bfin_write_USB_RX_MAX_PACKET(val) bfin_write16(USB_RX_MAX_PACKET, val)
-#define bfin_read_USB_RXCSR() bfin_read16(USB_RXCSR)
-#define bfin_write_USB_RXCSR(val) bfin_write16(USB_RXCSR, val)
-#define bfin_read_USB_COUNT0() bfin_read16(USB_COUNT0)
-#define bfin_write_USB_COUNT0(val) bfin_write16(USB_COUNT0, val)
-#define bfin_read_USB_RXCOUNT() bfin_read16(USB_RXCOUNT)
-#define bfin_write_USB_RXCOUNT(val) bfin_write16(USB_RXCOUNT, val)
-#define bfin_read_USB_TXTYPE() bfin_read16(USB_TXTYPE)
-#define bfin_write_USB_TXTYPE(val) bfin_write16(USB_TXTYPE, val)
-#define bfin_read_USB_NAKLIMIT0() bfin_read16(USB_NAKLIMIT0)
-#define bfin_write_USB_NAKLIMIT0(val) bfin_write16(USB_NAKLIMIT0, val)
-#define bfin_read_USB_TXINTERVAL() bfin_read16(USB_TXINTERVAL)
-#define bfin_write_USB_TXINTERVAL(val) bfin_write16(USB_TXINTERVAL, val)
-#define bfin_read_USB_RXTYPE() bfin_read16(USB_RXTYPE)
-#define bfin_write_USB_RXTYPE(val) bfin_write16(USB_RXTYPE, val)
-#define bfin_read_USB_RXINTERVAL() bfin_read16(USB_RXINTERVAL)
-#define bfin_write_USB_RXINTERVAL(val) bfin_write16(USB_RXINTERVAL, val)
-#define bfin_read_USB_TXCOUNT() bfin_read16(USB_TXCOUNT)
-#define bfin_write_USB_TXCOUNT(val) bfin_write16(USB_TXCOUNT, val)
-
-/* USB Endpoint FIFO Registers */
-
-#define bfin_read_USB_EP0_FIFO() bfin_read16(USB_EP0_FIFO)
-#define bfin_write_USB_EP0_FIFO(val) bfin_write16(USB_EP0_FIFO, val)
-#define bfin_read_USB_EP1_FIFO() bfin_read16(USB_EP1_FIFO)
-#define bfin_write_USB_EP1_FIFO(val) bfin_write16(USB_EP1_FIFO, val)
-#define bfin_read_USB_EP2_FIFO() bfin_read16(USB_EP2_FIFO)
-#define bfin_write_USB_EP2_FIFO(val) bfin_write16(USB_EP2_FIFO, val)
-#define bfin_read_USB_EP3_FIFO() bfin_read16(USB_EP3_FIFO)
-#define bfin_write_USB_EP3_FIFO(val) bfin_write16(USB_EP3_FIFO, val)
-#define bfin_read_USB_EP4_FIFO() bfin_read16(USB_EP4_FIFO)
-#define bfin_write_USB_EP4_FIFO(val) bfin_write16(USB_EP4_FIFO, val)
-#define bfin_read_USB_EP5_FIFO() bfin_read16(USB_EP5_FIFO)
-#define bfin_write_USB_EP5_FIFO(val) bfin_write16(USB_EP5_FIFO, val)
-#define bfin_read_USB_EP6_FIFO() bfin_read16(USB_EP6_FIFO)
-#define bfin_write_USB_EP6_FIFO(val) bfin_write16(USB_EP6_FIFO, val)
-#define bfin_read_USB_EP7_FIFO() bfin_read16(USB_EP7_FIFO)
-#define bfin_write_USB_EP7_FIFO(val) bfin_write16(USB_EP7_FIFO, val)
-
-/* USB OTG Control Registers */
-
-#define bfin_read_USB_OTG_DEV_CTL() bfin_read16(USB_OTG_DEV_CTL)
-#define bfin_write_USB_OTG_DEV_CTL(val) bfin_write16(USB_OTG_DEV_CTL, val)
-#define bfin_read_USB_OTG_VBUS_IRQ() bfin_read16(USB_OTG_VBUS_IRQ)
-#define bfin_write_USB_OTG_VBUS_IRQ(val) bfin_write16(USB_OTG_VBUS_IRQ, val)
-#define bfin_read_USB_OTG_VBUS_MASK() bfin_read16(USB_OTG_VBUS_MASK)
-#define bfin_write_USB_OTG_VBUS_MASK(val) bfin_write16(USB_OTG_VBUS_MASK, val)
-
-/* USB Phy Control Registers */
-
-#define bfin_read_USB_LINKINFO() bfin_read16(USB_LINKINFO)
-#define bfin_write_USB_LINKINFO(val) bfin_write16(USB_LINKINFO, val)
-#define bfin_read_USB_VPLEN() bfin_read16(USB_VPLEN)
-#define bfin_write_USB_VPLEN(val) bfin_write16(USB_VPLEN, val)
-#define bfin_read_USB_HS_EOF1() bfin_read16(USB_HS_EOF1)
-#define bfin_write_USB_HS_EOF1(val) bfin_write16(USB_HS_EOF1, val)
-#define bfin_read_USB_FS_EOF1() bfin_read16(USB_FS_EOF1)
-#define bfin_write_USB_FS_EOF1(val) bfin_write16(USB_FS_EOF1, val)
-#define bfin_read_USB_LS_EOF1() bfin_read16(USB_LS_EOF1)
-#define bfin_write_USB_LS_EOF1(val) bfin_write16(USB_LS_EOF1, val)
-
-/* (APHY_CNTRL is for ADI usage only) */
-
-#define bfin_read_USB_APHY_CNTRL() bfin_read16(USB_APHY_CNTRL)
-#define bfin_write_USB_APHY_CNTRL(val) bfin_write16(USB_APHY_CNTRL, val)
-
-/* (APHY_CALIB is for ADI usage only) */
-
-#define bfin_read_USB_APHY_CALIB() bfin_read16(USB_APHY_CALIB)
-#define bfin_write_USB_APHY_CALIB(val) bfin_write16(USB_APHY_CALIB, val)
-
-#define bfin_read_USB_APHY_CNTRL2() bfin_read16(USB_APHY_CNTRL2)
-#define bfin_write_USB_APHY_CNTRL2(val) bfin_write16(USB_APHY_CNTRL2, val)
-
-/* (PHY_TEST is for ADI usage only) */
-
-#define bfin_read_USB_PHY_TEST() bfin_read16(USB_PHY_TEST)
-#define bfin_write_USB_PHY_TEST(val) bfin_write16(USB_PHY_TEST, val)
-
-#define bfin_read_USB_PLLOSC_CTRL() bfin_read16(USB_PLLOSC_CTRL)
-#define bfin_write_USB_PLLOSC_CTRL(val) bfin_write16(USB_PLLOSC_CTRL, val)
-#define bfin_read_USB_SRP_CLKDIV() bfin_read16(USB_SRP_CLKDIV)
-#define bfin_write_USB_SRP_CLKDIV(val) bfin_write16(USB_SRP_CLKDIV, val)
-
-/* USB Endpoint 0 Control Registers */
-
-#define bfin_read_USB_EP_NI0_TXMAXP() bfin_read16(USB_EP_NI0_TXMAXP)
-#define bfin_write_USB_EP_NI0_TXMAXP(val) bfin_write16(USB_EP_NI0_TXMAXP, val)
-#define bfin_read_USB_EP_NI0_TXCSR() bfin_read16(USB_EP_NI0_TXCSR)
-#define bfin_write_USB_EP_NI0_TXCSR(val) bfin_write16(USB_EP_NI0_TXCSR, val)
-#define bfin_read_USB_EP_NI0_RXMAXP() bfin_read16(USB_EP_NI0_RXMAXP)
-#define bfin_write_USB_EP_NI0_RXMAXP(val) bfin_write16(USB_EP_NI0_RXMAXP, val)
-#define bfin_read_USB_EP_NI0_RXCSR() bfin_read16(USB_EP_NI0_RXCSR)
-#define bfin_write_USB_EP_NI0_RXCSR(val) bfin_write16(USB_EP_NI0_RXCSR, val)
-#define bfin_read_USB_EP_NI0_RXCOUNT() bfin_read16(USB_EP_NI0_RXCOUNT)
-#define bfin_write_USB_EP_NI0_RXCOUNT(val) bfin_write16(USB_EP_NI0_RXCOUNT, val)
-#define bfin_read_USB_EP_NI0_TXTYPE() bfin_read16(USB_EP_NI0_TXTYPE)
-#define bfin_write_USB_EP_NI0_TXTYPE(val) bfin_write16(USB_EP_NI0_TXTYPE, val)
-#define bfin_read_USB_EP_NI0_TXINTERVAL() bfin_read16(USB_EP_NI0_TXINTERVAL)
-#define bfin_write_USB_EP_NI0_TXINTERVAL(val) bfin_write16(USB_EP_NI0_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI0_RXTYPE() bfin_read16(USB_EP_NI0_RXTYPE)
-#define bfin_write_USB_EP_NI0_RXTYPE(val) bfin_write16(USB_EP_NI0_RXTYPE, val)
-#define bfin_read_USB_EP_NI0_RXINTERVAL() bfin_read16(USB_EP_NI0_RXINTERVAL)
-#define bfin_write_USB_EP_NI0_RXINTERVAL(val) bfin_write16(USB_EP_NI0_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI0_TXCOUNT() bfin_read16(USB_EP_NI0_TXCOUNT)
-#define bfin_write_USB_EP_NI0_TXCOUNT(val) bfin_write16(USB_EP_NI0_TXCOUNT, val)
-
-/* USB Endpoint 1 Control Registers */
-
-#define bfin_read_USB_EP_NI1_TXMAXP() bfin_read16(USB_EP_NI1_TXMAXP)
-#define bfin_write_USB_EP_NI1_TXMAXP(val) bfin_write16(USB_EP_NI1_TXMAXP, val)
-#define bfin_read_USB_EP_NI1_TXCSR() bfin_read16(USB_EP_NI1_TXCSR)
-#define bfin_write_USB_EP_NI1_TXCSR(val) bfin_write16(USB_EP_NI1_TXCSR, val)
-#define bfin_read_USB_EP_NI1_RXMAXP() bfin_read16(USB_EP_NI1_RXMAXP)
-#define bfin_write_USB_EP_NI1_RXMAXP(val) bfin_write16(USB_EP_NI1_RXMAXP, val)
-#define bfin_read_USB_EP_NI1_RXCSR() bfin_read16(USB_EP_NI1_RXCSR)
-#define bfin_write_USB_EP_NI1_RXCSR(val) bfin_write16(USB_EP_NI1_RXCSR, val)
-#define bfin_read_USB_EP_NI1_RXCOUNT() bfin_read16(USB_EP_NI1_RXCOUNT)
-#define bfin_write_USB_EP_NI1_RXCOUNT(val) bfin_write16(USB_EP_NI1_RXCOUNT, val)
-#define bfin_read_USB_EP_NI1_TXTYPE() bfin_read16(USB_EP_NI1_TXTYPE)
-#define bfin_write_USB_EP_NI1_TXTYPE(val) bfin_write16(USB_EP_NI1_TXTYPE, val)
-#define bfin_read_USB_EP_NI1_TXINTERVAL() bfin_read16(USB_EP_NI1_TXINTERVAL)
-#define bfin_write_USB_EP_NI1_TXINTERVAL(val) bfin_write16(USB_EP_NI1_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI1_RXTYPE() bfin_read16(USB_EP_NI1_RXTYPE)
-#define bfin_write_USB_EP_NI1_RXTYPE(val) bfin_write16(USB_EP_NI1_RXTYPE, val)
-#define bfin_read_USB_EP_NI1_RXINTERVAL() bfin_read16(USB_EP_NI1_RXINTERVAL)
-#define bfin_write_USB_EP_NI1_RXINTERVAL(val) bfin_write16(USB_EP_NI1_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI1_TXCOUNT() bfin_read16(USB_EP_NI1_TXCOUNT)
-#define bfin_write_USB_EP_NI1_TXCOUNT(val) bfin_write16(USB_EP_NI1_TXCOUNT, val)
-
-/* USB Endpoint 2 Control Registers */
-
-#define bfin_read_USB_EP_NI2_TXMAXP() bfin_read16(USB_EP_NI2_TXMAXP)
-#define bfin_write_USB_EP_NI2_TXMAXP(val) bfin_write16(USB_EP_NI2_TXMAXP, val)
-#define bfin_read_USB_EP_NI2_TXCSR() bfin_read16(USB_EP_NI2_TXCSR)
-#define bfin_write_USB_EP_NI2_TXCSR(val) bfin_write16(USB_EP_NI2_TXCSR, val)
-#define bfin_read_USB_EP_NI2_RXMAXP() bfin_read16(USB_EP_NI2_RXMAXP)
-#define bfin_write_USB_EP_NI2_RXMAXP(val) bfin_write16(USB_EP_NI2_RXMAXP, val)
-#define bfin_read_USB_EP_NI2_RXCSR() bfin_read16(USB_EP_NI2_RXCSR)
-#define bfin_write_USB_EP_NI2_RXCSR(val) bfin_write16(USB_EP_NI2_RXCSR, val)
-#define bfin_read_USB_EP_NI2_RXCOUNT() bfin_read16(USB_EP_NI2_RXCOUNT)
-#define bfin_write_USB_EP_NI2_RXCOUNT(val) bfin_write16(USB_EP_NI2_RXCOUNT, val)
-#define bfin_read_USB_EP_NI2_TXTYPE() bfin_read16(USB_EP_NI2_TXTYPE)
-#define bfin_write_USB_EP_NI2_TXTYPE(val) bfin_write16(USB_EP_NI2_TXTYPE, val)
-#define bfin_read_USB_EP_NI2_TXINTERVAL() bfin_read16(USB_EP_NI2_TXINTERVAL)
-#define bfin_write_USB_EP_NI2_TXINTERVAL(val) bfin_write16(USB_EP_NI2_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI2_RXTYPE() bfin_read16(USB_EP_NI2_RXTYPE)
-#define bfin_write_USB_EP_NI2_RXTYPE(val) bfin_write16(USB_EP_NI2_RXTYPE, val)
-#define bfin_read_USB_EP_NI2_RXINTERVAL() bfin_read16(USB_EP_NI2_RXINTERVAL)
-#define bfin_write_USB_EP_NI2_RXINTERVAL(val) bfin_write16(USB_EP_NI2_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI2_TXCOUNT() bfin_read16(USB_EP_NI2_TXCOUNT)
-#define bfin_write_USB_EP_NI2_TXCOUNT(val) bfin_write16(USB_EP_NI2_TXCOUNT, val)
-
-/* USB Endpoint 3 Control Registers */
-
-#define bfin_read_USB_EP_NI3_TXMAXP() bfin_read16(USB_EP_NI3_TXMAXP)
-#define bfin_write_USB_EP_NI3_TXMAXP(val) bfin_write16(USB_EP_NI3_TXMAXP, val)
-#define bfin_read_USB_EP_NI3_TXCSR() bfin_read16(USB_EP_NI3_TXCSR)
-#define bfin_write_USB_EP_NI3_TXCSR(val) bfin_write16(USB_EP_NI3_TXCSR, val)
-#define bfin_read_USB_EP_NI3_RXMAXP() bfin_read16(USB_EP_NI3_RXMAXP)
-#define bfin_write_USB_EP_NI3_RXMAXP(val) bfin_write16(USB_EP_NI3_RXMAXP, val)
-#define bfin_read_USB_EP_NI3_RXCSR() bfin_read16(USB_EP_NI3_RXCSR)
-#define bfin_write_USB_EP_NI3_RXCSR(val) bfin_write16(USB_EP_NI3_RXCSR, val)
-#define bfin_read_USB_EP_NI3_RXCOUNT() bfin_read16(USB_EP_NI3_RXCOUNT)
-#define bfin_write_USB_EP_NI3_RXCOUNT(val) bfin_write16(USB_EP_NI3_RXCOUNT, val)
-#define bfin_read_USB_EP_NI3_TXTYPE() bfin_read16(USB_EP_NI3_TXTYPE)
-#define bfin_write_USB_EP_NI3_TXTYPE(val) bfin_write16(USB_EP_NI3_TXTYPE, val)
-#define bfin_read_USB_EP_NI3_TXINTERVAL() bfin_read16(USB_EP_NI3_TXINTERVAL)
-#define bfin_write_USB_EP_NI3_TXINTERVAL(val) bfin_write16(USB_EP_NI3_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI3_RXTYPE() bfin_read16(USB_EP_NI3_RXTYPE)
-#define bfin_write_USB_EP_NI3_RXTYPE(val) bfin_write16(USB_EP_NI3_RXTYPE, val)
-#define bfin_read_USB_EP_NI3_RXINTERVAL() bfin_read16(USB_EP_NI3_RXINTERVAL)
-#define bfin_write_USB_EP_NI3_RXINTERVAL(val) bfin_write16(USB_EP_NI3_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI3_TXCOUNT() bfin_read16(USB_EP_NI3_TXCOUNT)
-#define bfin_write_USB_EP_NI3_TXCOUNT(val) bfin_write16(USB_EP_NI3_TXCOUNT, val)
-
-/* USB Endpoint 4 Control Registers */
-
-#define bfin_read_USB_EP_NI4_TXMAXP() bfin_read16(USB_EP_NI4_TXMAXP)
-#define bfin_write_USB_EP_NI4_TXMAXP(val) bfin_write16(USB_EP_NI4_TXMAXP, val)
-#define bfin_read_USB_EP_NI4_TXCSR() bfin_read16(USB_EP_NI4_TXCSR)
-#define bfin_write_USB_EP_NI4_TXCSR(val) bfin_write16(USB_EP_NI4_TXCSR, val)
-#define bfin_read_USB_EP_NI4_RXMAXP() bfin_read16(USB_EP_NI4_RXMAXP)
-#define bfin_write_USB_EP_NI4_RXMAXP(val) bfin_write16(USB_EP_NI4_RXMAXP, val)
-#define bfin_read_USB_EP_NI4_RXCSR() bfin_read16(USB_EP_NI4_RXCSR)
-#define bfin_write_USB_EP_NI4_RXCSR(val) bfin_write16(USB_EP_NI4_RXCSR, val)
-#define bfin_read_USB_EP_NI4_RXCOUNT() bfin_read16(USB_EP_NI4_RXCOUNT)
-#define bfin_write_USB_EP_NI4_RXCOUNT(val) bfin_write16(USB_EP_NI4_RXCOUNT, val)
-#define bfin_read_USB_EP_NI4_TXTYPE() bfin_read16(USB_EP_NI4_TXTYPE)
-#define bfin_write_USB_EP_NI4_TXTYPE(val) bfin_write16(USB_EP_NI4_TXTYPE, val)
-#define bfin_read_USB_EP_NI4_TXINTERVAL() bfin_read16(USB_EP_NI4_TXINTERVAL)
-#define bfin_write_USB_EP_NI4_TXINTERVAL(val) bfin_write16(USB_EP_NI4_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI4_RXTYPE() bfin_read16(USB_EP_NI4_RXTYPE)
-#define bfin_write_USB_EP_NI4_RXTYPE(val) bfin_write16(USB_EP_NI4_RXTYPE, val)
-#define bfin_read_USB_EP_NI4_RXINTERVAL() bfin_read16(USB_EP_NI4_RXINTERVAL)
-#define bfin_write_USB_EP_NI4_RXINTERVAL(val) bfin_write16(USB_EP_NI4_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI4_TXCOUNT() bfin_read16(USB_EP_NI4_TXCOUNT)
-#define bfin_write_USB_EP_NI4_TXCOUNT(val) bfin_write16(USB_EP_NI4_TXCOUNT, val)
-
-/* USB Endpoint 5 Control Registers */
-
-#define bfin_read_USB_EP_NI5_TXMAXP() bfin_read16(USB_EP_NI5_TXMAXP)
-#define bfin_write_USB_EP_NI5_TXMAXP(val) bfin_write16(USB_EP_NI5_TXMAXP, val)
-#define bfin_read_USB_EP_NI5_TXCSR() bfin_read16(USB_EP_NI5_TXCSR)
-#define bfin_write_USB_EP_NI5_TXCSR(val) bfin_write16(USB_EP_NI5_TXCSR, val)
-#define bfin_read_USB_EP_NI5_RXMAXP() bfin_read16(USB_EP_NI5_RXMAXP)
-#define bfin_write_USB_EP_NI5_RXMAXP(val) bfin_write16(USB_EP_NI5_RXMAXP, val)
-#define bfin_read_USB_EP_NI5_RXCSR() bfin_read16(USB_EP_NI5_RXCSR)
-#define bfin_write_USB_EP_NI5_RXCSR(val) bfin_write16(USB_EP_NI5_RXCSR, val)
-#define bfin_read_USB_EP_NI5_RXCOUNT() bfin_read16(USB_EP_NI5_RXCOUNT)
-#define bfin_write_USB_EP_NI5_RXCOUNT(val) bfin_write16(USB_EP_NI5_RXCOUNT, val)
-#define bfin_read_USB_EP_NI5_TXTYPE() bfin_read16(USB_EP_NI5_TXTYPE)
-#define bfin_write_USB_EP_NI5_TXTYPE(val) bfin_write16(USB_EP_NI5_TXTYPE, val)
-#define bfin_read_USB_EP_NI5_TXINTERVAL() bfin_read16(USB_EP_NI5_TXINTERVAL)
-#define bfin_write_USB_EP_NI5_TXINTERVAL(val) bfin_write16(USB_EP_NI5_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI5_RXTYPE() bfin_read16(USB_EP_NI5_RXTYPE)
-#define bfin_write_USB_EP_NI5_RXTYPE(val) bfin_write16(USB_EP_NI5_RXTYPE, val)
-#define bfin_read_USB_EP_NI5_RXINTERVAL() bfin_read16(USB_EP_NI5_RXINTERVAL)
-#define bfin_write_USB_EP_NI5_RXINTERVAL(val) bfin_write16(USB_EP_NI5_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI5_TXCOUNT() bfin_read16(USB_EP_NI5_TXCOUNT)
-#define bfin_write_USB_EP_NI5_TXCOUNT(val) bfin_write16(USB_EP_NI5_TXCOUNT, val)
-
-/* USB Endpoint 6 Control Registers */
-
-#define bfin_read_USB_EP_NI6_TXMAXP() bfin_read16(USB_EP_NI6_TXMAXP)
-#define bfin_write_USB_EP_NI6_TXMAXP(val) bfin_write16(USB_EP_NI6_TXMAXP, val)
-#define bfin_read_USB_EP_NI6_TXCSR() bfin_read16(USB_EP_NI6_TXCSR)
-#define bfin_write_USB_EP_NI6_TXCSR(val) bfin_write16(USB_EP_NI6_TXCSR, val)
-#define bfin_read_USB_EP_NI6_RXMAXP() bfin_read16(USB_EP_NI6_RXMAXP)
-#define bfin_write_USB_EP_NI6_RXMAXP(val) bfin_write16(USB_EP_NI6_RXMAXP, val)
-#define bfin_read_USB_EP_NI6_RXCSR() bfin_read16(USB_EP_NI6_RXCSR)
-#define bfin_write_USB_EP_NI6_RXCSR(val) bfin_write16(USB_EP_NI6_RXCSR, val)
-#define bfin_read_USB_EP_NI6_RXCOUNT() bfin_read16(USB_EP_NI6_RXCOUNT)
-#define bfin_write_USB_EP_NI6_RXCOUNT(val) bfin_write16(USB_EP_NI6_RXCOUNT, val)
-#define bfin_read_USB_EP_NI6_TXTYPE() bfin_read16(USB_EP_NI6_TXTYPE)
-#define bfin_write_USB_EP_NI6_TXTYPE(val) bfin_write16(USB_EP_NI6_TXTYPE, val)
-#define bfin_read_USB_EP_NI6_TXINTERVAL() bfin_read16(USB_EP_NI6_TXINTERVAL)
-#define bfin_write_USB_EP_NI6_TXINTERVAL(val) bfin_write16(USB_EP_NI6_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI6_RXTYPE() bfin_read16(USB_EP_NI6_RXTYPE)
-#define bfin_write_USB_EP_NI6_RXTYPE(val) bfin_write16(USB_EP_NI6_RXTYPE, val)
-#define bfin_read_USB_EP_NI6_RXINTERVAL() bfin_read16(USB_EP_NI6_RXINTERVAL)
-#define bfin_write_USB_EP_NI6_RXINTERVAL(val) bfin_write16(USB_EP_NI6_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI6_TXCOUNT() bfin_read16(USB_EP_NI6_TXCOUNT)
-#define bfin_write_USB_EP_NI6_TXCOUNT(val) bfin_write16(USB_EP_NI6_TXCOUNT, val)
-
-/* USB Endpoint 7 Control Registers */
-
-#define bfin_read_USB_EP_NI7_TXMAXP() bfin_read16(USB_EP_NI7_TXMAXP)
-#define bfin_write_USB_EP_NI7_TXMAXP(val) bfin_write16(USB_EP_NI7_TXMAXP, val)
-#define bfin_read_USB_EP_NI7_TXCSR() bfin_read16(USB_EP_NI7_TXCSR)
-#define bfin_write_USB_EP_NI7_TXCSR(val) bfin_write16(USB_EP_NI7_TXCSR, val)
-#define bfin_read_USB_EP_NI7_RXMAXP() bfin_read16(USB_EP_NI7_RXMAXP)
-#define bfin_write_USB_EP_NI7_RXMAXP(val) bfin_write16(USB_EP_NI7_RXMAXP, val)
-#define bfin_read_USB_EP_NI7_RXCSR() bfin_read16(USB_EP_NI7_RXCSR)
-#define bfin_write_USB_EP_NI7_RXCSR(val) bfin_write16(USB_EP_NI7_RXCSR, val)
-#define bfin_read_USB_EP_NI7_RXCOUNT() bfin_read16(USB_EP_NI7_RXCOUNT)
-#define bfin_write_USB_EP_NI7_RXCOUNT(val) bfin_write16(USB_EP_NI7_RXCOUNT, val)
-#define bfin_read_USB_EP_NI7_TXTYPE() bfin_read16(USB_EP_NI7_TXTYPE)
-#define bfin_write_USB_EP_NI7_TXTYPE(val) bfin_write16(USB_EP_NI7_TXTYPE, val)
-#define bfin_read_USB_EP_NI7_TXINTERVAL() bfin_read16(USB_EP_NI7_TXINTERVAL)
-#define bfin_write_USB_EP_NI7_TXINTERVAL(val) bfin_write16(USB_EP_NI7_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI7_RXTYPE() bfin_read16(USB_EP_NI7_RXTYPE)
-#define bfin_write_USB_EP_NI7_RXTYPE(val) bfin_write16(USB_EP_NI7_RXTYPE, val)
-#define bfin_read_USB_EP_NI7_RXINTERVAL() bfin_read16(USB_EP_NI7_RXINTERVAL)
-#define bfin_write_USB_EP_NI7_RXINTERVAL(val) bfin_write16(USB_EP_NI7_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI7_TXCOUNT() bfin_read16(USB_EP_NI7_TXCOUNT)
-#define bfin_write_USB_EP_NI7_TXCOUNT(val) bfin_write16(USB_EP_NI7_TXCOUNT, val)
-
-#define bfin_read_USB_DMA_INTERRUPT() bfin_read16(USB_DMA_INTERRUPT)
-#define bfin_write_USB_DMA_INTERRUPT(val) bfin_write16(USB_DMA_INTERRUPT, val)
-
-/* USB Channel 0 Config Registers */
-
-#define bfin_read_USB_DMA0CONTROL() bfin_read16(USB_DMA0CONTROL)
-#define bfin_write_USB_DMA0CONTROL(val) bfin_write16(USB_DMA0CONTROL, val)
-#define bfin_read_USB_DMA0ADDRLOW() bfin_read16(USB_DMA0ADDRLOW)
-#define bfin_write_USB_DMA0ADDRLOW(val) bfin_write16(USB_DMA0ADDRLOW, val)
-#define bfin_read_USB_DMA0ADDRHIGH() bfin_read16(USB_DMA0ADDRHIGH)
-#define bfin_write_USB_DMA0ADDRHIGH(val) bfin_write16(USB_DMA0ADDRHIGH, val)
-#define bfin_read_USB_DMA0COUNTLOW() bfin_read16(USB_DMA0COUNTLOW)
-#define bfin_write_USB_DMA0COUNTLOW(val) bfin_write16(USB_DMA0COUNTLOW, val)
-#define bfin_read_USB_DMA0COUNTHIGH() bfin_read16(USB_DMA0COUNTHIGH)
-#define bfin_write_USB_DMA0COUNTHIGH(val) bfin_write16(USB_DMA0COUNTHIGH, val)
-
-/* USB Channel 1 Config Registers */
-
-#define bfin_read_USB_DMA1CONTROL() bfin_read16(USB_DMA1CONTROL)
-#define bfin_write_USB_DMA1CONTROL(val) bfin_write16(USB_DMA1CONTROL, val)
-#define bfin_read_USB_DMA1ADDRLOW() bfin_read16(USB_DMA1ADDRLOW)
-#define bfin_write_USB_DMA1ADDRLOW(val) bfin_write16(USB_DMA1ADDRLOW, val)
-#define bfin_read_USB_DMA1ADDRHIGH() bfin_read16(USB_DMA1ADDRHIGH)
-#define bfin_write_USB_DMA1ADDRHIGH(val) bfin_write16(USB_DMA1ADDRHIGH, val)
-#define bfin_read_USB_DMA1COUNTLOW() bfin_read16(USB_DMA1COUNTLOW)
-#define bfin_write_USB_DMA1COUNTLOW(val) bfin_write16(USB_DMA1COUNTLOW, val)
-#define bfin_read_USB_DMA1COUNTHIGH() bfin_read16(USB_DMA1COUNTHIGH)
-#define bfin_write_USB_DMA1COUNTHIGH(val) bfin_write16(USB_DMA1COUNTHIGH, val)
-
-/* USB Channel 2 Config Registers */
-
-#define bfin_read_USB_DMA2CONTROL() bfin_read16(USB_DMA2CONTROL)
-#define bfin_write_USB_DMA2CONTROL(val) bfin_write16(USB_DMA2CONTROL, val)
-#define bfin_read_USB_DMA2ADDRLOW() bfin_read16(USB_DMA2ADDRLOW)
-#define bfin_write_USB_DMA2ADDRLOW(val) bfin_write16(USB_DMA2ADDRLOW, val)
-#define bfin_read_USB_DMA2ADDRHIGH() bfin_read16(USB_DMA2ADDRHIGH)
-#define bfin_write_USB_DMA2ADDRHIGH(val) bfin_write16(USB_DMA2ADDRHIGH, val)
-#define bfin_read_USB_DMA2COUNTLOW() bfin_read16(USB_DMA2COUNTLOW)
-#define bfin_write_USB_DMA2COUNTLOW(val) bfin_write16(USB_DMA2COUNTLOW, val)
-#define bfin_read_USB_DMA2COUNTHIGH() bfin_read16(USB_DMA2COUNTHIGH)
-#define bfin_write_USB_DMA2COUNTHIGH(val) bfin_write16(USB_DMA2COUNTHIGH, val)
-
-/* USB Channel 3 Config Registers */
-
-#define bfin_read_USB_DMA3CONTROL() bfin_read16(USB_DMA3CONTROL)
-#define bfin_write_USB_DMA3CONTROL(val) bfin_write16(USB_DMA3CONTROL, val)
-#define bfin_read_USB_DMA3ADDRLOW() bfin_read16(USB_DMA3ADDRLOW)
-#define bfin_write_USB_DMA3ADDRLOW(val) bfin_write16(USB_DMA3ADDRLOW, val)
-#define bfin_read_USB_DMA3ADDRHIGH() bfin_read16(USB_DMA3ADDRHIGH)
-#define bfin_write_USB_DMA3ADDRHIGH(val) bfin_write16(USB_DMA3ADDRHIGH, val)
-#define bfin_read_USB_DMA3COUNTLOW() bfin_read16(USB_DMA3COUNTLOW)
-#define bfin_write_USB_DMA3COUNTLOW(val) bfin_write16(USB_DMA3COUNTLOW, val)
-#define bfin_read_USB_DMA3COUNTHIGH() bfin_read16(USB_DMA3COUNTHIGH)
-#define bfin_write_USB_DMA3COUNTHIGH(val) bfin_write16(USB_DMA3COUNTHIGH, val)
-
-/* USB Channel 4 Config Registers */
-
-#define bfin_read_USB_DMA4CONTROL() bfin_read16(USB_DMA4CONTROL)
-#define bfin_write_USB_DMA4CONTROL(val) bfin_write16(USB_DMA4CONTROL, val)
-#define bfin_read_USB_DMA4ADDRLOW() bfin_read16(USB_DMA4ADDRLOW)
-#define bfin_write_USB_DMA4ADDRLOW(val) bfin_write16(USB_DMA4ADDRLOW, val)
-#define bfin_read_USB_DMA4ADDRHIGH() bfin_read16(USB_DMA4ADDRHIGH)
-#define bfin_write_USB_DMA4ADDRHIGH(val) bfin_write16(USB_DMA4ADDRHIGH, val)
-#define bfin_read_USB_DMA4COUNTLOW() bfin_read16(USB_DMA4COUNTLOW)
-#define bfin_write_USB_DMA4COUNTLOW(val) bfin_write16(USB_DMA4COUNTLOW, val)
-#define bfin_read_USB_DMA4COUNTHIGH() bfin_read16(USB_DMA4COUNTHIGH)
-#define bfin_write_USB_DMA4COUNTHIGH(val) bfin_write16(USB_DMA4COUNTHIGH, val)
-
-/* USB Channel 5 Config Registers */
-
-#define bfin_read_USB_DMA5CONTROL() bfin_read16(USB_DMA5CONTROL)
-#define bfin_write_USB_DMA5CONTROL(val) bfin_write16(USB_DMA5CONTROL, val)
-#define bfin_read_USB_DMA5ADDRLOW() bfin_read16(USB_DMA5ADDRLOW)
-#define bfin_write_USB_DMA5ADDRLOW(val) bfin_write16(USB_DMA5ADDRLOW, val)
-#define bfin_read_USB_DMA5ADDRHIGH() bfin_read16(USB_DMA5ADDRHIGH)
-#define bfin_write_USB_DMA5ADDRHIGH(val) bfin_write16(USB_DMA5ADDRHIGH, val)
-#define bfin_read_USB_DMA5COUNTLOW() bfin_read16(USB_DMA5COUNTLOW)
-#define bfin_write_USB_DMA5COUNTLOW(val) bfin_write16(USB_DMA5COUNTLOW, val)
-#define bfin_read_USB_DMA5COUNTHIGH() bfin_read16(USB_DMA5COUNTHIGH)
-#define bfin_write_USB_DMA5COUNTHIGH(val) bfin_write16(USB_DMA5COUNTHIGH, val)
-
-/* USB Channel 6 Config Registers */
-
-#define bfin_read_USB_DMA6CONTROL() bfin_read16(USB_DMA6CONTROL)
-#define bfin_write_USB_DMA6CONTROL(val) bfin_write16(USB_DMA6CONTROL, val)
-#define bfin_read_USB_DMA6ADDRLOW() bfin_read16(USB_DMA6ADDRLOW)
-#define bfin_write_USB_DMA6ADDRLOW(val) bfin_write16(USB_DMA6ADDRLOW, val)
-#define bfin_read_USB_DMA6ADDRHIGH() bfin_read16(USB_DMA6ADDRHIGH)
-#define bfin_write_USB_DMA6ADDRHIGH(val) bfin_write16(USB_DMA6ADDRHIGH, val)
-#define bfin_read_USB_DMA6COUNTLOW() bfin_read16(USB_DMA6COUNTLOW)
-#define bfin_write_USB_DMA6COUNTLOW(val) bfin_write16(USB_DMA6COUNTLOW, val)
-#define bfin_read_USB_DMA6COUNTHIGH() bfin_read16(USB_DMA6COUNTHIGH)
-#define bfin_write_USB_DMA6COUNTHIGH(val) bfin_write16(USB_DMA6COUNTHIGH, val)
-
-/* USB Channel 7 Config Registers */
-
-#define bfin_read_USB_DMA7CONTROL() bfin_read16(USB_DMA7CONTROL)
-#define bfin_write_USB_DMA7CONTROL(val) bfin_write16(USB_DMA7CONTROL, val)
-#define bfin_read_USB_DMA7ADDRLOW() bfin_read16(USB_DMA7ADDRLOW)
-#define bfin_write_USB_DMA7ADDRLOW(val) bfin_write16(USB_DMA7ADDRLOW, val)
-#define bfin_read_USB_DMA7ADDRHIGH() bfin_read16(USB_DMA7ADDRHIGH)
-#define bfin_write_USB_DMA7ADDRHIGH(val) bfin_write16(USB_DMA7ADDRHIGH, val)
-#define bfin_read_USB_DMA7COUNTLOW() bfin_read16(USB_DMA7COUNTLOW)
-#define bfin_write_USB_DMA7COUNTLOW(val) bfin_write16(USB_DMA7COUNTLOW, val)
-#define bfin_read_USB_DMA7COUNTHIGH() bfin_read16(USB_DMA7COUNTHIGH)
-#define bfin_write_USB_DMA7COUNTHIGH(val) bfin_write16(USB_DMA7COUNTHIGH, val)
-
#endif /* _CDEF_BF527_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
index 7014dde10dd..12f2ad45314 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
@@ -844,6 +844,7 @@
#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
+#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
@@ -1062,17 +1063,6 @@
#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
-/* OTP/FUSE Registers */
-
-#define bfin_read_OTP_CONTROL() bfin_read16(OTP_CONTROL)
-#define bfin_write_OTP_CONTROL(val) bfin_write16(OTP_CONTROL, val)
-#define bfin_read_OTP_BEN() bfin_read16(OTP_BEN)
-#define bfin_write_OTP_BEN(val) bfin_write16(OTP_BEN, val)
-#define bfin_read_OTP_STATUS() bfin_read16(OTP_STATUS)
-#define bfin_write_OTP_STATUS(val) bfin_write16(OTP_STATUS, val)
-#define bfin_read_OTP_TIMING() bfin_read32(OTP_TIMING)
-#define bfin_write_OTP_TIMING(val) bfin_write32(OTP_TIMING, val)
-
/* Security Registers */
#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
@@ -1082,17 +1072,6 @@
#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
-/* OTP Read/Write Data Buffer Registers */
-
-#define bfin_read_OTP_DATA0() bfin_read32(OTP_DATA0)
-#define bfin_write_OTP_DATA0(val) bfin_write32(OTP_DATA0, val)
-#define bfin_read_OTP_DATA1() bfin_read32(OTP_DATA1)
-#define bfin_write_OTP_DATA1(val) bfin_write32(OTP_DATA1, val)
-#define bfin_read_OTP_DATA2() bfin_read32(OTP_DATA2)
-#define bfin_write_OTP_DATA2(val) bfin_write32(OTP_DATA2, val)
-#define bfin_read_OTP_DATA3() bfin_read32(OTP_DATA3)
-#define bfin_write_OTP_DATA3(val) bfin_write32(OTP_DATA3, val)
-
/* NFC Registers */
#define bfin_read_NFC_CTL() bfin_read16(NFC_CTL)
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index 82abefc1ef6..c136f703296 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -7,15 +7,8 @@
#ifndef _DEF_BF525_H
#define _DEF_BF525_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF525 */
-
-/* Include defBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "defBF52x_base.h"
-
-/* The following are the #defines needed by ADSP-BF525 that are not in the common header */
+/* BF525 is BF522 + USB */
+#include "defBF522.h"
/* USB Control Registers */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF527.h b/arch/blackfin/mach-bf527/include/mach/defBF527.h
index 570a125df02..4dd58fb3315 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF527.h
@@ -7,15 +7,9 @@
#ifndef _DEF_BF527_H
#define _DEF_BF527_H
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
+/* BF527 is BF525 + EMAC */
+#include "defBF525.h"
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF527 */
-
-/* Include defBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "defBF52x_base.h"
-
-/* The following are the #defines needed by ADSP-BF527 that are not in the common header */
/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */
#define EMAC_OPMODE 0xFFC03000 /* Operating Mode Register */
@@ -394,673 +388,4 @@
#define TX_GE1024_CNT 0x00200000 /* 1024-Max-Byte TX Frames Sent */
#define TX_ABORT_CNT 0x00400000 /* TX Frames Aborted */
-/* USB Control Registers */
-
-#define USB_FADDR 0xffc03800 /* Function address register */
-#define USB_POWER 0xffc03804 /* Power management register */
-#define USB_INTRTX 0xffc03808 /* Interrupt register for endpoint 0 and Tx endpoint 1 to 7 */
-#define USB_INTRRX 0xffc0380c /* Interrupt register for Rx endpoints 1 to 7 */
-#define USB_INTRTXE 0xffc03810 /* Interrupt enable register for IntrTx */
-#define USB_INTRRXE 0xffc03814 /* Interrupt enable register for IntrRx */
-#define USB_INTRUSB 0xffc03818 /* Interrupt register for common USB interrupts */
-#define USB_INTRUSBE 0xffc0381c /* Interrupt enable register for IntrUSB */
-#define USB_FRAME 0xffc03820 /* USB frame number */
-#define USB_INDEX 0xffc03824 /* Index register for selecting the indexed endpoint registers */
-#define USB_TESTMODE 0xffc03828 /* Enabled USB 20 test modes */
-#define USB_GLOBINTR 0xffc0382c /* Global Interrupt Mask register and Wakeup Exception Interrupt */
-#define USB_GLOBAL_CTL 0xffc03830 /* Global Clock Control for the core */
-
-/* USB Packet Control Registers */
-
-#define USB_TX_MAX_PACKET 0xffc03840 /* Maximum packet size for Host Tx endpoint */
-#define USB_CSR0 0xffc03844 /* Control Status register for endpoint 0 and Control Status register for Host Tx endpoint */
-#define USB_TXCSR 0xffc03844 /* Control Status register for endpoint 0 and Control Status register for Host Tx endpoint */
-#define USB_RX_MAX_PACKET 0xffc03848 /* Maximum packet size for Host Rx endpoint */
-#define USB_RXCSR 0xffc0384c /* Control Status register for Host Rx endpoint */
-#define USB_COUNT0 0xffc03850 /* Number of bytes received in endpoint 0 FIFO and Number of bytes received in Host Tx endpoint */
-#define USB_RXCOUNT 0xffc03850 /* Number of bytes received in endpoint 0 FIFO and Number of bytes received in Host Tx endpoint */
-#define USB_TXTYPE 0xffc03854 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint */
-#define USB_NAKLIMIT0 0xffc03858 /* Sets the NAK response timeout on Endpoint 0 and on Bulk transfers for Host Tx endpoint */
-#define USB_TXINTERVAL 0xffc03858 /* Sets the NAK response timeout on Endpoint 0 and on Bulk transfers for Host Tx endpoint */
-#define USB_RXTYPE 0xffc0385c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint */
-#define USB_RXINTERVAL 0xffc03860 /* Sets the polling interval for Interrupt and Isochronous transfers or the NAK response timeout on Bulk transfers */
-#define USB_TXCOUNT 0xffc03868 /* Number of bytes to be written to the selected endpoint Tx FIFO */
-
-/* USB Endpoint FIFO Registers */
-
-#define USB_EP0_FIFO 0xffc03880 /* Endpoint 0 FIFO */
-#define USB_EP1_FIFO 0xffc03888 /* Endpoint 1 FIFO */
-#define USB_EP2_FIFO 0xffc03890 /* Endpoint 2 FIFO */
-#define USB_EP3_FIFO 0xffc03898 /* Endpoint 3 FIFO */
-#define USB_EP4_FIFO 0xffc038a0 /* Endpoint 4 FIFO */
-#define USB_EP5_FIFO 0xffc038a8 /* Endpoint 5 FIFO */
-#define USB_EP6_FIFO 0xffc038b0 /* Endpoint 6 FIFO */
-#define USB_EP7_FIFO 0xffc038b8 /* Endpoint 7 FIFO */
-
-/* USB OTG Control Registers */
-
-#define USB_OTG_DEV_CTL 0xffc03900 /* OTG Device Control Register */
-#define USB_OTG_VBUS_IRQ 0xffc03904 /* OTG VBUS Control Interrupts */
-#define USB_OTG_VBUS_MASK 0xffc03908 /* VBUS Control Interrupt Enable */
-
-/* USB Phy Control Registers */
-
-#define USB_LINKINFO 0xffc03948 /* Enables programming of some PHY-side delays */
-#define USB_VPLEN 0xffc0394c /* Determines duration of VBUS pulse for VBUS charging */
-#define USB_HS_EOF1 0xffc03950 /* Time buffer for High-Speed transactions */
-#define USB_FS_EOF1 0xffc03954 /* Time buffer for Full-Speed transactions */
-#define USB_LS_EOF1 0xffc03958 /* Time buffer for Low-Speed transactions */
-
-/* (APHY_CNTRL is for ADI usage only) */
-
-#define USB_APHY_CNTRL 0xffc039e0 /* Register that increases visibility of Analog PHY */
-
-/* (APHY_CALIB is for ADI usage only) */
-
-#define USB_APHY_CALIB 0xffc039e4 /* Register used to set some calibration values */
-
-#define USB_APHY_CNTRL2 0xffc039e8 /* Register used to prevent re-enumeration once Moab goes into hibernate mode */
-
-/* (PHY_TEST is for ADI usage only) */
-
-#define USB_PHY_TEST 0xffc039ec /* Used for reducing simulation time and simplifies FIFO testability */
-
-#define USB_PLLOSC_CTRL 0xffc039f0 /* Used to program different parameters for USB PLL and Oscillator */
-#define USB_SRP_CLKDIV 0xffc039f4 /* Used to program clock divide value for the clock fed to the SRP detection logic */
-
-/* USB Endpoint 0 Control Registers */
-
-#define USB_EP_NI0_TXMAXP 0xffc03a00 /* Maximum packet size for Host Tx endpoint0 */
-#define USB_EP_NI0_TXCSR 0xffc03a04 /* Control Status register for endpoint 0 */
-#define USB_EP_NI0_RXMAXP 0xffc03a08 /* Maximum packet size for Host Rx endpoint0 */
-#define USB_EP_NI0_RXCSR 0xffc03a0c /* Control Status register for Host Rx endpoint0 */
-#define USB_EP_NI0_RXCOUNT 0xffc03a10 /* Number of bytes received in endpoint 0 FIFO */
-#define USB_EP_NI0_TXTYPE 0xffc03a14 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint0 */
-#define USB_EP_NI0_TXINTERVAL 0xffc03a18 /* Sets the NAK response timeout on Endpoint 0 */
-#define USB_EP_NI0_RXTYPE 0xffc03a1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint0 */
-#define USB_EP_NI0_RXINTERVAL 0xffc03a20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint0 */
-#define USB_EP_NI0_TXCOUNT 0xffc03a28 /* Number of bytes to be written to the endpoint0 Tx FIFO */
-
-/* USB Endpoint 1 Control Registers */
-
-#define USB_EP_NI1_TXMAXP 0xffc03a40 /* Maximum packet size for Host Tx endpoint1 */
-#define USB_EP_NI1_TXCSR 0xffc03a44 /* Control Status register for endpoint1 */
-#define USB_EP_NI1_RXMAXP 0xffc03a48 /* Maximum packet size for Host Rx endpoint1 */
-#define USB_EP_NI1_RXCSR 0xffc03a4c /* Control Status register for Host Rx endpoint1 */
-#define USB_EP_NI1_RXCOUNT 0xffc03a50 /* Number of bytes received in endpoint1 FIFO */
-#define USB_EP_NI1_TXTYPE 0xffc03a54 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint1 */
-#define USB_EP_NI1_TXINTERVAL 0xffc03a58 /* Sets the NAK response timeout on Endpoint1 */
-#define USB_EP_NI1_RXTYPE 0xffc03a5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint1 */
-#define USB_EP_NI1_RXINTERVAL 0xffc03a60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint1 */
-#define USB_EP_NI1_TXCOUNT 0xffc03a68 /* Number of bytes to be written to the+H102 endpoint1 Tx FIFO */
-
-/* USB Endpoint 2 Control Registers */
-
-#define USB_EP_NI2_TXMAXP 0xffc03a80 /* Maximum packet size for Host Tx endpoint2 */
-#define USB_EP_NI2_TXCSR 0xffc03a84 /* Control Status register for endpoint2 */
-#define USB_EP_NI2_RXMAXP 0xffc03a88 /* Maximum packet size for Host Rx endpoint2 */
-#define USB_EP_NI2_RXCSR 0xffc03a8c /* Control Status register for Host Rx endpoint2 */
-#define USB_EP_NI2_RXCOUNT 0xffc03a90 /* Number of bytes received in endpoint2 FIFO */
-#define USB_EP_NI2_TXTYPE 0xffc03a94 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint2 */
-#define USB_EP_NI2_TXINTERVAL 0xffc03a98 /* Sets the NAK response timeout on Endpoint2 */
-#define USB_EP_NI2_RXTYPE 0xffc03a9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint2 */
-#define USB_EP_NI2_RXINTERVAL 0xffc03aa0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint2 */
-#define USB_EP_NI2_TXCOUNT 0xffc03aa8 /* Number of bytes to be written to the endpoint2 Tx FIFO */
-
-/* USB Endpoint 3 Control Registers */
-
-#define USB_EP_NI3_TXMAXP 0xffc03ac0 /* Maximum packet size for Host Tx endpoint3 */
-#define USB_EP_NI3_TXCSR 0xffc03ac4 /* Control Status register for endpoint3 */
-#define USB_EP_NI3_RXMAXP 0xffc03ac8 /* Maximum packet size for Host Rx endpoint3 */
-#define USB_EP_NI3_RXCSR 0xffc03acc /* Control Status register for Host Rx endpoint3 */
-#define USB_EP_NI3_RXCOUNT 0xffc03ad0 /* Number of bytes received in endpoint3 FIFO */
-#define USB_EP_NI3_TXTYPE 0xffc03ad4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint3 */
-#define USB_EP_NI3_TXINTERVAL 0xffc03ad8 /* Sets the NAK response timeout on Endpoint3 */
-#define USB_EP_NI3_RXTYPE 0xffc03adc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint3 */
-#define USB_EP_NI3_RXINTERVAL 0xffc03ae0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint3 */
-#define USB_EP_NI3_TXCOUNT 0xffc03ae8 /* Number of bytes to be written to the H124endpoint3 Tx FIFO */
-
-/* USB Endpoint 4 Control Registers */
-
-#define USB_EP_NI4_TXMAXP 0xffc03b00 /* Maximum packet size for Host Tx endpoint4 */
-#define USB_EP_NI4_TXCSR 0xffc03b04 /* Control Status register for endpoint4 */
-#define USB_EP_NI4_RXMAXP 0xffc03b08 /* Maximum packet size for Host Rx endpoint4 */
-#define USB_EP_NI4_RXCSR 0xffc03b0c /* Control Status register for Host Rx endpoint4 */
-#define USB_EP_NI4_RXCOUNT 0xffc03b10 /* Number of bytes received in endpoint4 FIFO */
-#define USB_EP_NI4_TXTYPE 0xffc03b14 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint4 */
-#define USB_EP_NI4_TXINTERVAL 0xffc03b18 /* Sets the NAK response timeout on Endpoint4 */
-#define USB_EP_NI4_RXTYPE 0xffc03b1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint4 */
-#define USB_EP_NI4_RXINTERVAL 0xffc03b20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint4 */
-#define USB_EP_NI4_TXCOUNT 0xffc03b28 /* Number of bytes to be written to the endpoint4 Tx FIFO */
-
-/* USB Endpoint 5 Control Registers */
-
-#define USB_EP_NI5_TXMAXP 0xffc03b40 /* Maximum packet size for Host Tx endpoint5 */
-#define USB_EP_NI5_TXCSR 0xffc03b44 /* Control Status register for endpoint5 */
-#define USB_EP_NI5_RXMAXP 0xffc03b48 /* Maximum packet size for Host Rx endpoint5 */
-#define USB_EP_NI5_RXCSR 0xffc03b4c /* Control Status register for Host Rx endpoint5 */
-#define USB_EP_NI5_RXCOUNT 0xffc03b50 /* Number of bytes received in endpoint5 FIFO */
-#define USB_EP_NI5_TXTYPE 0xffc03b54 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint5 */
-#define USB_EP_NI5_TXINTERVAL 0xffc03b58 /* Sets the NAK response timeout on Endpoint5 */
-#define USB_EP_NI5_RXTYPE 0xffc03b5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint5 */
-#define USB_EP_NI5_RXINTERVAL 0xffc03b60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint5 */
-#define USB_EP_NI5_TXCOUNT 0xffc03b68 /* Number of bytes to be written to the endpoint5 Tx FIFO */
-
-/* USB Endpoint 6 Control Registers */
-
-#define USB_EP_NI6_TXMAXP 0xffc03b80 /* Maximum packet size for Host Tx endpoint6 */
-#define USB_EP_NI6_TXCSR 0xffc03b84 /* Control Status register for endpoint6 */
-#define USB_EP_NI6_RXMAXP 0xffc03b88 /* Maximum packet size for Host Rx endpoint6 */
-#define USB_EP_NI6_RXCSR 0xffc03b8c /* Control Status register for Host Rx endpoint6 */
-#define USB_EP_NI6_RXCOUNT 0xffc03b90 /* Number of bytes received in endpoint6 FIFO */
-#define USB_EP_NI6_TXTYPE 0xffc03b94 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint6 */
-#define USB_EP_NI6_TXINTERVAL 0xffc03b98 /* Sets the NAK response timeout on Endpoint6 */
-#define USB_EP_NI6_RXTYPE 0xffc03b9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint6 */
-#define USB_EP_NI6_RXINTERVAL 0xffc03ba0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint6 */
-#define USB_EP_NI6_TXCOUNT 0xffc03ba8 /* Number of bytes to be written to the endpoint6 Tx FIFO */
-
-/* USB Endpoint 7 Control Registers */
-
-#define USB_EP_NI7_TXMAXP 0xffc03bc0 /* Maximum packet size for Host Tx endpoint7 */
-#define USB_EP_NI7_TXCSR 0xffc03bc4 /* Control Status register for endpoint7 */
-#define USB_EP_NI7_RXMAXP 0xffc03bc8 /* Maximum packet size for Host Rx endpoint7 */
-#define USB_EP_NI7_RXCSR 0xffc03bcc /* Control Status register for Host Rx endpoint7 */
-#define USB_EP_NI7_RXCOUNT 0xffc03bd0 /* Number of bytes received in endpoint7 FIFO */
-#define USB_EP_NI7_TXTYPE 0xffc03bd4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint7 */
-#define USB_EP_NI7_TXINTERVAL 0xffc03bd8 /* Sets the NAK response timeout on Endpoint7 */
-#define USB_EP_NI7_RXTYPE 0xffc03bdc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint7 */
-#define USB_EP_NI7_RXINTERVAL 0xffc03bf0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
-#define USB_EP_NI7_TXCOUNT 0xffc03bf8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
-
-#define USB_DMA_INTERRUPT 0xffc03c00 /* Indicates pending interrupts for the DMA channels */
-
-/* USB Channel 0 Config Registers */
-
-#define USB_DMA0CONTROL 0xffc03c04 /* DMA master channel 0 configuration */
-#define USB_DMA0ADDRLOW 0xffc03c08 /* Lower 16-bits of memory source/destination address for DMA master channel 0 */
-#define USB_DMA0ADDRHIGH 0xffc03c0c /* Upper 16-bits of memory source/destination address for DMA master channel 0 */
-#define USB_DMA0COUNTLOW 0xffc03c10 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 0 */
-#define USB_DMA0COUNTHIGH 0xffc03c14 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 0 */
-
-/* USB Channel 1 Config Registers */
-
-#define USB_DMA1CONTROL 0xffc03c24 /* DMA master channel 1 configuration */
-#define USB_DMA1ADDRLOW 0xffc03c28 /* Lower 16-bits of memory source/destination address for DMA master channel 1 */
-#define USB_DMA1ADDRHIGH 0xffc03c2c /* Upper 16-bits of memory source/destination address for DMA master channel 1 */
-#define USB_DMA1COUNTLOW 0xffc03c30 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 1 */
-#define USB_DMA1COUNTHIGH 0xffc03c34 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 1 */
-
-/* USB Channel 2 Config Registers */
-
-#define USB_DMA2CONTROL 0xffc03c44 /* DMA master channel 2 configuration */
-#define USB_DMA2ADDRLOW 0xffc03c48 /* Lower 16-bits of memory source/destination address for DMA master channel 2 */
-#define USB_DMA2ADDRHIGH 0xffc03c4c /* Upper 16-bits of memory source/destination address for DMA master channel 2 */
-#define USB_DMA2COUNTLOW 0xffc03c50 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 2 */
-#define USB_DMA2COUNTHIGH 0xffc03c54 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 2 */
-
-/* USB Channel 3 Config Registers */
-
-#define USB_DMA3CONTROL 0xffc03c64 /* DMA master channel 3 configuration */
-#define USB_DMA3ADDRLOW 0xffc03c68 /* Lower 16-bits of memory source/destination address for DMA master channel 3 */
-#define USB_DMA3ADDRHIGH 0xffc03c6c /* Upper 16-bits of memory source/destination address for DMA master channel 3 */
-#define USB_DMA3COUNTLOW 0xffc03c70 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 3 */
-#define USB_DMA3COUNTHIGH 0xffc03c74 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 3 */
-
-/* USB Channel 4 Config Registers */
-
-#define USB_DMA4CONTROL 0xffc03c84 /* DMA master channel 4 configuration */
-#define USB_DMA4ADDRLOW 0xffc03c88 /* Lower 16-bits of memory source/destination address for DMA master channel 4 */
-#define USB_DMA4ADDRHIGH 0xffc03c8c /* Upper 16-bits of memory source/destination address for DMA master channel 4 */
-#define USB_DMA4COUNTLOW 0xffc03c90 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 4 */
-#define USB_DMA4COUNTHIGH 0xffc03c94 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 4 */
-
-/* USB Channel 5 Config Registers */
-
-#define USB_DMA5CONTROL 0xffc03ca4 /* DMA master channel 5 configuration */
-#define USB_DMA5ADDRLOW 0xffc03ca8 /* Lower 16-bits of memory source/destination address for DMA master channel 5 */
-#define USB_DMA5ADDRHIGH 0xffc03cac /* Upper 16-bits of memory source/destination address for DMA master channel 5 */
-#define USB_DMA5COUNTLOW 0xffc03cb0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 5 */
-#define USB_DMA5COUNTHIGH 0xffc03cb4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 5 */
-
-/* USB Channel 6 Config Registers */
-
-#define USB_DMA6CONTROL 0xffc03cc4 /* DMA master channel 6 configuration */
-#define USB_DMA6ADDRLOW 0xffc03cc8 /* Lower 16-bits of memory source/destination address for DMA master channel 6 */
-#define USB_DMA6ADDRHIGH 0xffc03ccc /* Upper 16-bits of memory source/destination address for DMA master channel 6 */
-#define USB_DMA6COUNTLOW 0xffc03cd0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 6 */
-#define USB_DMA6COUNTHIGH 0xffc03cd4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 6 */
-
-/* USB Channel 7 Config Registers */
-
-#define USB_DMA7CONTROL 0xffc03ce4 /* DMA master channel 7 configuration */
-#define USB_DMA7ADDRLOW 0xffc03ce8 /* Lower 16-bits of memory source/destination address for DMA master channel 7 */
-#define USB_DMA7ADDRHIGH 0xffc03cec /* Upper 16-bits of memory source/destination address for DMA master channel 7 */
-#define USB_DMA7COUNTLOW 0xffc03cf0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 7 */
-#define USB_DMA7COUNTHIGH 0xffc03cf4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 7 */
-
-/* Bit masks for USB_FADDR */
-
-#define FUNCTION_ADDRESS 0x7f /* Function address */
-
-/* Bit masks for USB_POWER */
-
-#define ENABLE_SUSPENDM 0x1 /* enable SuspendM output */
-#define nENABLE_SUSPENDM 0x0
-#define SUSPEND_MODE 0x2 /* Suspend Mode indicator */
-#define nSUSPEND_MODE 0x0
-#define RESUME_MODE 0x4 /* DMA Mode */
-#define nRESUME_MODE 0x0
-#define RESET 0x8 /* Reset indicator */
-#define nRESET 0x0
-#define HS_MODE 0x10 /* High Speed mode indicator */
-#define nHS_MODE 0x0
-#define HS_ENABLE 0x20 /* high Speed Enable */
-#define nHS_ENABLE 0x0
-#define SOFT_CONN 0x40 /* Soft connect */
-#define nSOFT_CONN 0x0
-#define ISO_UPDATE 0x80 /* Isochronous update */
-#define nISO_UPDATE 0x0
-
-/* Bit masks for USB_INTRTX */
-
-#define EP0_TX 0x1 /* Tx Endpoint 0 interrupt */
-#define nEP0_TX 0x0
-#define EP1_TX 0x2 /* Tx Endpoint 1 interrupt */
-#define nEP1_TX 0x0
-#define EP2_TX 0x4 /* Tx Endpoint 2 interrupt */
-#define nEP2_TX 0x0
-#define EP3_TX 0x8 /* Tx Endpoint 3 interrupt */
-#define nEP3_TX 0x0
-#define EP4_TX 0x10 /* Tx Endpoint 4 interrupt */
-#define nEP4_TX 0x0
-#define EP5_TX 0x20 /* Tx Endpoint 5 interrupt */
-#define nEP5_TX 0x0
-#define EP6_TX 0x40 /* Tx Endpoint 6 interrupt */
-#define nEP6_TX 0x0
-#define EP7_TX 0x80 /* Tx Endpoint 7 interrupt */
-#define nEP7_TX 0x0
-
-/* Bit masks for USB_INTRRX */
-
-#define EP1_RX 0x2 /* Rx Endpoint 1 interrupt */
-#define nEP1_RX 0x0
-#define EP2_RX 0x4 /* Rx Endpoint 2 interrupt */
-#define nEP2_RX 0x0
-#define EP3_RX 0x8 /* Rx Endpoint 3 interrupt */
-#define nEP3_RX 0x0
-#define EP4_RX 0x10 /* Rx Endpoint 4 interrupt */
-#define nEP4_RX 0x0
-#define EP5_RX 0x20 /* Rx Endpoint 5 interrupt */
-#define nEP5_RX 0x0
-#define EP6_RX 0x40 /* Rx Endpoint 6 interrupt */
-#define nEP6_RX 0x0
-#define EP7_RX 0x80 /* Rx Endpoint 7 interrupt */
-#define nEP7_RX 0x0
-
-/* Bit masks for USB_INTRTXE */
-
-#define EP0_TX_E 0x1 /* Endpoint 0 interrupt Enable */
-#define nEP0_TX_E 0x0
-#define EP1_TX_E 0x2 /* Tx Endpoint 1 interrupt Enable */
-#define nEP1_TX_E 0x0
-#define EP2_TX_E 0x4 /* Tx Endpoint 2 interrupt Enable */
-#define nEP2_TX_E 0x0
-#define EP3_TX_E 0x8 /* Tx Endpoint 3 interrupt Enable */
-#define nEP3_TX_E 0x0
-#define EP4_TX_E 0x10 /* Tx Endpoint 4 interrupt Enable */
-#define nEP4_TX_E 0x0
-#define EP5_TX_E 0x20 /* Tx Endpoint 5 interrupt Enable */
-#define nEP5_TX_E 0x0
-#define EP6_TX_E 0x40 /* Tx Endpoint 6 interrupt Enable */
-#define nEP6_TX_E 0x0
-#define EP7_TX_E 0x80 /* Tx Endpoint 7 interrupt Enable */
-#define nEP7_TX_E 0x0
-
-/* Bit masks for USB_INTRRXE */
-
-#define EP1_RX_E 0x2 /* Rx Endpoint 1 interrupt Enable */
-#define nEP1_RX_E 0x0
-#define EP2_RX_E 0x4 /* Rx Endpoint 2 interrupt Enable */
-#define nEP2_RX_E 0x0
-#define EP3_RX_E 0x8 /* Rx Endpoint 3 interrupt Enable */
-#define nEP3_RX_E 0x0
-#define EP4_RX_E 0x10 /* Rx Endpoint 4 interrupt Enable */
-#define nEP4_RX_E 0x0
-#define EP5_RX_E 0x20 /* Rx Endpoint 5 interrupt Enable */
-#define nEP5_RX_E 0x0
-#define EP6_RX_E 0x40 /* Rx Endpoint 6 interrupt Enable */
-#define nEP6_RX_E 0x0
-#define EP7_RX_E 0x80 /* Rx Endpoint 7 interrupt Enable */
-#define nEP7_RX_E 0x0
-
-/* Bit masks for USB_INTRUSB */
-
-#define SUSPEND_B 0x1 /* Suspend indicator */
-#define nSUSPEND_B 0x0
-#define RESUME_B 0x2 /* Resume indicator */
-#define nRESUME_B 0x0
-#define RESET_OR_BABLE_B 0x4 /* Reset/babble indicator */
-#define nRESET_OR_BABLE_B 0x0
-#define SOF_B 0x8 /* Start of frame */
-#define nSOF_B 0x0
-#define CONN_B 0x10 /* Connection indicator */
-#define nCONN_B 0x0
-#define DISCON_B 0x20 /* Disconnect indicator */
-#define nDISCON_B 0x0
-#define SESSION_REQ_B 0x40 /* Session Request */
-#define nSESSION_REQ_B 0x0
-#define VBUS_ERROR_B 0x80 /* Vbus threshold indicator */
-#define nVBUS_ERROR_B 0x0
-
-/* Bit masks for USB_INTRUSBE */
-
-#define SUSPEND_BE 0x1 /* Suspend indicator int enable */
-#define nSUSPEND_BE 0x0
-#define RESUME_BE 0x2 /* Resume indicator int enable */
-#define nRESUME_BE 0x0
-#define RESET_OR_BABLE_BE 0x4 /* Reset/babble indicator int enable */
-#define nRESET_OR_BABLE_BE 0x0
-#define SOF_BE 0x8 /* Start of frame int enable */
-#define nSOF_BE 0x0
-#define CONN_BE 0x10 /* Connection indicator int enable */
-#define nCONN_BE 0x0
-#define DISCON_BE 0x20 /* Disconnect indicator int enable */
-#define nDISCON_BE 0x0
-#define SESSION_REQ_BE 0x40 /* Session Request int enable */
-#define nSESSION_REQ_BE 0x0
-#define VBUS_ERROR_BE 0x80 /* Vbus threshold indicator int enable */
-#define nVBUS_ERROR_BE 0x0
-
-/* Bit masks for USB_FRAME */
-
-#define FRAME_NUMBER 0x7ff /* Frame number */
-
-/* Bit masks for USB_INDEX */
-
-#define SELECTED_ENDPOINT 0xf /* selected endpoint */
-
-/* Bit masks for USB_GLOBAL_CTL */
-
-#define GLOBAL_ENA 0x1 /* enables USB module */
-#define nGLOBAL_ENA 0x0
-#define EP1_TX_ENA 0x2 /* Transmit endpoint 1 enable */
-#define nEP1_TX_ENA 0x0
-#define EP2_TX_ENA 0x4 /* Transmit endpoint 2 enable */
-#define nEP2_TX_ENA 0x0
-#define EP3_TX_ENA 0x8 /* Transmit endpoint 3 enable */
-#define nEP3_TX_ENA 0x0
-#define EP4_TX_ENA 0x10 /* Transmit endpoint 4 enable */
-#define nEP4_TX_ENA 0x0
-#define EP5_TX_ENA 0x20 /* Transmit endpoint 5 enable */
-#define nEP5_TX_ENA 0x0
-#define EP6_TX_ENA 0x40 /* Transmit endpoint 6 enable */
-#define nEP6_TX_ENA 0x0
-#define EP7_TX_ENA 0x80 /* Transmit endpoint 7 enable */
-#define nEP7_TX_ENA 0x0
-#define EP1_RX_ENA 0x100 /* Receive endpoint 1 enable */
-#define nEP1_RX_ENA 0x0
-#define EP2_RX_ENA 0x200 /* Receive endpoint 2 enable */
-#define nEP2_RX_ENA 0x0
-#define EP3_RX_ENA 0x400 /* Receive endpoint 3 enable */
-#define nEP3_RX_ENA 0x0
-#define EP4_RX_ENA 0x800 /* Receive endpoint 4 enable */
-#define nEP4_RX_ENA 0x0
-#define EP5_RX_ENA 0x1000 /* Receive endpoint 5 enable */
-#define nEP5_RX_ENA 0x0
-#define EP6_RX_ENA 0x2000 /* Receive endpoint 6 enable */
-#define nEP6_RX_ENA 0x0
-#define EP7_RX_ENA 0x4000 /* Receive endpoint 7 enable */
-#define nEP7_RX_ENA 0x0
-
-/* Bit masks for USB_OTG_DEV_CTL */
-
-#define SESSION 0x1 /* session indicator */
-#define nSESSION 0x0
-#define HOST_REQ 0x2 /* Host negotiation request */
-#define nHOST_REQ 0x0
-#define HOST_MODE 0x4 /* indicates USBDRC is a host */
-#define nHOST_MODE 0x0
-#define VBUS0 0x8 /* Vbus level indicator[0] */
-#define nVBUS0 0x0
-#define VBUS1 0x10 /* Vbus level indicator[1] */
-#define nVBUS1 0x0
-#define LSDEV 0x20 /* Low-speed indicator */
-#define nLSDEV 0x0
-#define FSDEV 0x40 /* Full or High-speed indicator */
-#define nFSDEV 0x0
-#define B_DEVICE 0x80 /* A' or 'B' device indicator */
-#define nB_DEVICE 0x0
-
-/* Bit masks for USB_OTG_VBUS_IRQ */
-
-#define DRIVE_VBUS_ON 0x1 /* indicator to drive VBUS control circuit */
-#define nDRIVE_VBUS_ON 0x0
-#define DRIVE_VBUS_OFF 0x2 /* indicator to shut off charge pump */
-#define nDRIVE_VBUS_OFF 0x0
-#define CHRG_VBUS_START 0x4 /* indicator for external circuit to start charging VBUS */
-#define nCHRG_VBUS_START 0x0
-#define CHRG_VBUS_END 0x8 /* indicator for external circuit to end charging VBUS */
-#define nCHRG_VBUS_END 0x0
-#define DISCHRG_VBUS_START 0x10 /* indicator to start discharging VBUS */
-#define nDISCHRG_VBUS_START 0x0
-#define DISCHRG_VBUS_END 0x20 /* indicator to stop discharging VBUS */
-#define nDISCHRG_VBUS_END 0x0
-
-/* Bit masks for USB_OTG_VBUS_MASK */
-
-#define DRIVE_VBUS_ON_ENA 0x1 /* enable DRIVE_VBUS_ON interrupt */
-#define nDRIVE_VBUS_ON_ENA 0x0
-#define DRIVE_VBUS_OFF_ENA 0x2 /* enable DRIVE_VBUS_OFF interrupt */
-#define nDRIVE_VBUS_OFF_ENA 0x0
-#define CHRG_VBUS_START_ENA 0x4 /* enable CHRG_VBUS_START interrupt */
-#define nCHRG_VBUS_START_ENA 0x0
-#define CHRG_VBUS_END_ENA 0x8 /* enable CHRG_VBUS_END interrupt */
-#define nCHRG_VBUS_END_ENA 0x0
-#define DISCHRG_VBUS_START_ENA 0x10 /* enable DISCHRG_VBUS_START interrupt */
-#define nDISCHRG_VBUS_START_ENA 0x0
-#define DISCHRG_VBUS_END_ENA 0x20 /* enable DISCHRG_VBUS_END interrupt */
-#define nDISCHRG_VBUS_END_ENA 0x0
-
-/* Bit masks for USB_CSR0 */
-
-#define RXPKTRDY 0x1 /* data packet receive indicator */
-#define nRXPKTRDY 0x0
-#define TXPKTRDY 0x2 /* data packet in FIFO indicator */
-#define nTXPKTRDY 0x0
-#define STALL_SENT 0x4 /* STALL handshake sent */
-#define nSTALL_SENT 0x0
-#define DATAEND 0x8 /* Data end indicator */
-#define nDATAEND 0x0
-#define SETUPEND 0x10 /* Setup end */
-#define nSETUPEND 0x0
-#define SENDSTALL 0x20 /* Send STALL handshake */
-#define nSENDSTALL 0x0
-#define SERVICED_RXPKTRDY 0x40 /* used to clear the RxPktRdy bit */
-#define nSERVICED_RXPKTRDY 0x0
-#define SERVICED_SETUPEND 0x80 /* used to clear the SetupEnd bit */
-#define nSERVICED_SETUPEND 0x0
-#define FLUSHFIFO 0x100 /* flush endpoint FIFO */
-#define nFLUSHFIFO 0x0
-#define STALL_RECEIVED_H 0x4 /* STALL handshake received host mode */
-#define nSTALL_RECEIVED_H 0x0
-#define SETUPPKT_H 0x8 /* send Setup token host mode */
-#define nSETUPPKT_H 0x0
-#define ERROR_H 0x10 /* timeout error indicator host mode */
-#define nERROR_H 0x0
-#define REQPKT_H 0x20 /* Request an IN transaction host mode */
-#define nREQPKT_H 0x0
-#define STATUSPKT_H 0x40 /* Status stage transaction host mode */
-#define nSTATUSPKT_H 0x0
-#define NAK_TIMEOUT_H 0x80 /* EP0 halted after a NAK host mode */
-#define nNAK_TIMEOUT_H 0x0
-
-/* Bit masks for USB_COUNT0 */
-
-#define EP0_RX_COUNT 0x7f /* number of received bytes in EP0 FIFO */
-
-/* Bit masks for USB_NAKLIMIT0 */
-
-#define EP0_NAK_LIMIT 0x1f /* number of frames/micro frames after which EP0 timeouts */
-
-/* Bit masks for USB_TX_MAX_PACKET */
-
-#define MAX_PACKET_SIZE_T 0x7ff /* maximum data pay load in a frame */
-
-/* Bit masks for USB_RX_MAX_PACKET */
-
-#define MAX_PACKET_SIZE_R 0x7ff /* maximum data pay load in a frame */
-
-/* Bit masks for USB_TXCSR */
-
-#define TXPKTRDY_T 0x1 /* data packet in FIFO indicator */
-#define nTXPKTRDY_T 0x0
-#define FIFO_NOT_EMPTY_T 0x2 /* FIFO not empty */
-#define nFIFO_NOT_EMPTY_T 0x0
-#define UNDERRUN_T 0x4 /* TxPktRdy not set for an IN token */
-#define nUNDERRUN_T 0x0
-#define FLUSHFIFO_T 0x8 /* flush endpoint FIFO */
-#define nFLUSHFIFO_T 0x0
-#define STALL_SEND_T 0x10 /* issue a Stall handshake */
-#define nSTALL_SEND_T 0x0
-#define STALL_SENT_T 0x20 /* Stall handshake transmitted */
-#define nSTALL_SENT_T 0x0
-#define CLEAR_DATATOGGLE_T 0x40 /* clear endpoint data toggle */
-#define nCLEAR_DATATOGGLE_T 0x0
-#define INCOMPTX_T 0x80 /* indicates that a large packet is split */
-#define nINCOMPTX_T 0x0
-#define DMAREQMODE_T 0x400 /* DMA mode (0 or 1) selection */
-#define nDMAREQMODE_T 0x0
-#define FORCE_DATATOGGLE_T 0x800 /* Force data toggle */
-#define nFORCE_DATATOGGLE_T 0x0
-#define DMAREQ_ENA_T 0x1000 /* Enable DMA request for Tx EP */
-#define nDMAREQ_ENA_T 0x0
-#define ISO_T 0x4000 /* enable Isochronous transfers */
-#define nISO_T 0x0
-#define AUTOSET_T 0x8000 /* allows TxPktRdy to be set automatically */
-#define nAUTOSET_T 0x0
-#define ERROR_TH 0x4 /* error condition host mode */
-#define nERROR_TH 0x0
-#define STALL_RECEIVED_TH 0x20 /* Stall handshake received host mode */
-#define nSTALL_RECEIVED_TH 0x0
-#define NAK_TIMEOUT_TH 0x80 /* NAK timeout host mode */
-#define nNAK_TIMEOUT_TH 0x0
-
-/* Bit masks for USB_TXCOUNT */
-
-#define TX_COUNT 0x1fff /* Number of bytes to be written to the selected endpoint Tx FIFO */
-
-/* Bit masks for USB_RXCSR */
-
-#define RXPKTRDY_R 0x1 /* data packet in FIFO indicator */
-#define nRXPKTRDY_R 0x0
-#define FIFO_FULL_R 0x2 /* FIFO not empty */
-#define nFIFO_FULL_R 0x0
-#define OVERRUN_R 0x4 /* TxPktRdy not set for an IN token */
-#define nOVERRUN_R 0x0
-#define DATAERROR_R 0x8 /* Out packet cannot be loaded into Rx FIFO */
-#define nDATAERROR_R 0x0
-#define FLUSHFIFO_R 0x10 /* flush endpoint FIFO */
-#define nFLUSHFIFO_R 0x0
-#define STALL_SEND_R 0x20 /* issue a Stall handshake */
-#define nSTALL_SEND_R 0x0
-#define STALL_SENT_R 0x40 /* Stall handshake transmitted */
-#define nSTALL_SENT_R 0x0
-#define CLEAR_DATATOGGLE_R 0x80 /* clear endpoint data toggle */
-#define nCLEAR_DATATOGGLE_R 0x0
-#define INCOMPRX_R 0x100 /* indicates that a large packet is split */
-#define nINCOMPRX_R 0x0
-#define DMAREQMODE_R 0x800 /* DMA mode (0 or 1) selection */
-#define nDMAREQMODE_R 0x0
-#define DISNYET_R 0x1000 /* disable Nyet handshakes */
-#define nDISNYET_R 0x0
-#define DMAREQ_ENA_R 0x2000 /* Enable DMA request for Tx EP */
-#define nDMAREQ_ENA_R 0x0
-#define ISO_R 0x4000 /* enable Isochronous transfers */
-#define nISO_R 0x0
-#define AUTOCLEAR_R 0x8000 /* allows TxPktRdy to be set automatically */
-#define nAUTOCLEAR_R 0x0
-#define ERROR_RH 0x4 /* TxPktRdy not set for an IN token host mode */
-#define nERROR_RH 0x0
-#define REQPKT_RH 0x20 /* request an IN transaction host mode */
-#define nREQPKT_RH 0x0
-#define STALL_RECEIVED_RH 0x40 /* Stall handshake received host mode */
-#define nSTALL_RECEIVED_RH 0x0
-#define INCOMPRX_RH 0x100 /* indicates that a large packet is split host mode */
-#define nINCOMPRX_RH 0x0
-#define DMAREQMODE_RH 0x800 /* DMA mode (0 or 1) selection host mode */
-#define nDMAREQMODE_RH 0x0
-#define AUTOREQ_RH 0x4000 /* sets ReqPkt automatically host mode */
-#define nAUTOREQ_RH 0x0
-
-/* Bit masks for USB_RXCOUNT */
-
-#define RX_COUNT 0x1fff /* Number of received bytes in the packet in the Rx FIFO */
-
-/* Bit masks for USB_TXTYPE */
-
-#define TARGET_EP_NO_T 0xf /* EP number */
-#define PROTOCOL_T 0xc /* transfer type */
-
-/* Bit masks for USB_TXINTERVAL */
-
-#define TX_POLL_INTERVAL 0xff /* polling interval for selected Tx EP */
-
-/* Bit masks for USB_RXTYPE */
-
-#define TARGET_EP_NO_R 0xf /* EP number */
-#define PROTOCOL_R 0xc /* transfer type */
-
-/* Bit masks for USB_RXINTERVAL */
-
-#define RX_POLL_INTERVAL 0xff /* polling interval for selected Rx EP */
-
-/* Bit masks for USB_DMA_INTERRUPT */
-
-#define DMA0_INT 0x1 /* DMA0 pending interrupt */
-#define nDMA0_INT 0x0
-#define DMA1_INT 0x2 /* DMA1 pending interrupt */
-#define nDMA1_INT 0x0
-#define DMA2_INT 0x4 /* DMA2 pending interrupt */
-#define nDMA2_INT 0x0
-#define DMA3_INT 0x8 /* DMA3 pending interrupt */
-#define nDMA3_INT 0x0
-#define DMA4_INT 0x10 /* DMA4 pending interrupt */
-#define nDMA4_INT 0x0
-#define DMA5_INT 0x20 /* DMA5 pending interrupt */
-#define nDMA5_INT 0x0
-#define DMA6_INT 0x40 /* DMA6 pending interrupt */
-#define nDMA6_INT 0x0
-#define DMA7_INT 0x80 /* DMA7 pending interrupt */
-#define nDMA7_INT 0x0
-
-/* Bit masks for USB_DMAxCONTROL */
-
-#define DMA_ENA 0x1 /* DMA enable */
-#define nDMA_ENA 0x0
-#define DIRECTION 0x2 /* direction of DMA transfer */
-#define nDIRECTION 0x0
-#define MODE 0x4 /* DMA Bus error */
-#define nMODE 0x0
-#define INT_ENA 0x8 /* Interrupt enable */
-#define nINT_ENA 0x0
-#define EPNUM 0xf0 /* EP number */
-#define BUSERROR 0x100 /* DMA Bus error */
-#define nBUSERROR 0x0
-
-/* Bit masks for USB_DMAxADDRHIGH */
-
-#define DMA_ADDR_HIGH 0xffff /* Upper 16-bits of memory source/destination address for the DMA master channel */
-
-/* Bit masks for USB_DMAxADDRLOW */
-
-#define DMA_ADDR_LOW 0xffff /* Lower 16-bits of memory source/destination address for the DMA master channel */
-
-/* Bit masks for USB_DMAxCOUNTHIGH */
-
-#define DMA_COUNT_HIGH 0xffff /* Upper 16-bits of byte count of DMA transfer for DMA master channel */
-
-/* Bit masks for USB_DMAxCOUNTLOW */
-
-#define DMA_COUNT_LOW 0xffff /* Lower 16-bits of byte count of DMA transfer for DMA master channel */
-
#endif /* _DEF_BF527_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
index b9dbb73d7ef..8b18b535921 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
@@ -586,58 +586,6 @@
** modifier UNLESS the lower order bits are saved and ORed back in when
** the macro is used.
*************************************************************************************/
-/*
-** ********************* PLL AND RESET MASKS ****************************************/
-/* PLL_CTL Masks */
-#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
-#define PLL_OFF 0x0002 /* PLL Not Powered */
-#define STOPCK 0x0008 /* Core Clock Off */
-#define PDWN 0x0020 /* Enter Deep Sleep Mode */
-#define IN_DELAY 0x0040 /* Add 200ps Delay To EBIU Input Latches */
-#define OUT_DELAY 0x0080 /* Add 200ps Delay To EBIU Output Signals */
-#define BYPASS 0x0100 /* Bypass the PLL */
-#define MSEL 0x7E00 /* Multiplier Select For CCLK/VCO Factors */
-/* PLL_CTL Macros (Only Use With Logic OR While Setting Lower Order Bits) */
-#define SET_MSEL(x) (((x)&0x3F) << 0x9) /* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
-
-/* PLL_DIV Masks */
-#define SSEL 0x000F /* System Select */
-#define CSEL 0x0030 /* Core Select */
-#define CSEL_DIV1 0x0000 /* CCLK = VCO / 1 */
-#define CSEL_DIV2 0x0010 /* CCLK = VCO / 2 */
-#define CSEL_DIV4 0x0020 /* CCLK = VCO / 4 */
-#define CSEL_DIV8 0x0030 /* CCLK = VCO / 8 */
-/* PLL_DIV Macros */
-#define SET_SSEL(x) ((x)&0xF) /* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
-
-/* VR_CTL Masks */
-#define FREQ 0x3000 /* Switching Oscillator Frequency For Regulator */
-#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */
-
-#define VLEV 0x00F0 /* Internal Voltage Level */
-#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
-#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
-#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
-#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
-#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */
-#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */
-#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
-#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
-#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */
-#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */
-
-#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */
-#define USBWE 0x0200 /* Enable USB Wakeup From Hibernate */
-#define PHYWE 0x0400 /* Enable PHY Wakeup From Hibernate */
-#define CLKBUFOE 0x4000 /* CLKIN Buffer Output Enable */
-#define PHYCLKOE CLKBUFOE /* Alternative legacy name for the above */
-#define SCKELOW 0x8000 /* Enable Drive CKE Low During Reset */
-
-/* PLL_STAT Masks */
-#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */
-#define FULL_ON 0x0002 /* Processor In Full On Mode */
-#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */
-#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */
/* CHIPID Masks */
#define CHIPID_VERSION 0xF0000000
@@ -757,66 +705,6 @@
#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
-/* ********* WATCHDOG TIMER MASKS ******************** */
-
-/* Watchdog Timer WDOG_CTL Register Masks */
-
-#define WDEV(x) (((x)<<1) & 0x0006) /* event generated on roll over */
-#define WDEV_RESET 0x0000 /* generate reset event on roll over */
-#define WDEV_NMI 0x0002 /* generate NMI event on roll over */
-#define WDEV_GPI 0x0004 /* generate GP IRQ on roll over */
-#define WDEV_NONE 0x0006 /* no event on roll over */
-#define WDEN 0x0FF0 /* enable watchdog */
-#define WDDIS 0x0AD0 /* disable watchdog */
-#define WDRO 0x8000 /* watchdog rolled over latch */
-
-/* depreciated WDOG_CTL Register Masks for legacy code */
-
-
-#define ICTL WDEV
-#define ENABLE_RESET WDEV_RESET
-#define WDOG_RESET WDEV_RESET
-#define ENABLE_NMI WDEV_NMI
-#define WDOG_NMI WDEV_NMI
-#define ENABLE_GPI WDEV_GPI
-#define WDOG_GPI WDEV_GPI
-#define DISABLE_EVT WDEV_NONE
-#define WDOG_NONE WDEV_NONE
-
-#define TMR_EN WDEN
-#define TMR_DIS WDDIS
-#define TRO WDRO
-#define ICTL_P0 0x01
- #define ICTL_P1 0x02
-#define TRO_P 0x0F
-
-
-
-/* *************** REAL TIME CLOCK MASKS **************************/
-/* RTC_STAT and RTC_ALARM Masks */
-#define RTC_SEC 0x0000003F /* Real-Time Clock Seconds */
-#define RTC_MIN 0x00000FC0 /* Real-Time Clock Minutes */
-#define RTC_HR 0x0001F000 /* Real-Time Clock Hours */
-#define RTC_DAY 0xFFFE0000 /* Real-Time Clock Days */
-
-/* RTC_ALARM Macro z=day y=hr x=min w=sec */
-#define SET_ALARM(z,y,x,w) ((((z)&0x7FFF)<<0x11)|(((y)&0x1F)<<0xC)|(((x)&0x3F)<<0x6)|((w)&0x3F))
-
-/* RTC_ICTL and RTC_ISTAT Masks */
-#define STOPWATCH 0x0001 /* Stopwatch Interrupt Enable */
-#define ALARM 0x0002 /* Alarm Interrupt Enable */
-#define SECOND 0x0004 /* Seconds (1 Hz) Interrupt Enable */
-#define MINUTE 0x0008 /* Minutes Interrupt Enable */
-#define HOUR 0x0010 /* Hours Interrupt Enable */
-#define DAY 0x0020 /* 24 Hours (Days) Interrupt Enable */
-#define DAY_ALARM 0x0040 /* Day Alarm (Day, Hour, Minute, Second) Interrupt Enable */
-#define WRITE_PENDING 0x4000 /* Write Pending Status */
-#define WRITE_COMPLETE 0x8000 /* Write Complete Interrupt Enable */
-
-/* RTC_FAST / RTC_PREN Mask */
-#define PREN 0x0001 /* Enable Prescaler, RTC Runs @1 Hz */
-
-
/* ************** UART CONTROLLER MASKS *************************/
/* UARTx_LCR Masks */
#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
@@ -1381,33 +1269,6 @@
/* ************************** DMA CONTROLLER MASKS ********************************/
-/* DMAx_CONFIG, MDMA_yy_CONFIG Masks */
-#define DMAEN 0x0001 /* DMA Channel Enable */
-#define WNR 0x0002 /* Channel Direction (W/R*) */
-#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */
-#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */
-#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */
-#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */
-#define RESTART 0x0020 /* DMA Buffer Clear */
-#define DI_SEL 0x0040 /* Data Interrupt Timing Select */
-#define DI_EN 0x0080 /* Data Interrupt Enable */
-#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
-#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
-#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
-#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
-#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
-#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
-#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
-#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
-#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
-#define NDSIZE 0x0900 /* Next Descriptor Size */
-#define DMAFLOW 0x7000 /* Flow Control */
-#define DMAFLOW_STOP 0x0000 /* Stop Mode */
-#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
-#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
-#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
@@ -1425,13 +1286,6 @@
#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
-/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks */
-#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */
-#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */
-#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */
-#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */
-
-
/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
/* PPI_CONTROL Masks */
#define PORT_EN 0x0001 /* PPI Port Enable */
@@ -1843,46 +1697,6 @@
#define BNDMODE_CAPT 0x2000 /* boundary capture mode */
#define BNDMODE_AEXT 0x3000 /* boundary auto-extend mode */
-/* Bit masks for OTP_CONTROL */
-
-#define FUSE_FADDR 0x1ff /* OTP/Fuse Address */
-#define FIEN 0x800 /* OTP/Fuse Interrupt Enable */
-#define nFIEN 0x0
-#define FTESTDEC 0x1000 /* OTP/Fuse Test Decoder */
-#define nFTESTDEC 0x0
-#define FWRTEST 0x2000 /* OTP/Fuse Write Test */
-#define nFWRTEST 0x0
-#define FRDEN 0x4000 /* OTP/Fuse Read Enable */
-#define nFRDEN 0x0
-#define FWREN 0x8000 /* OTP/Fuse Write Enable */
-#define nFWREN 0x0
-
-/* Bit masks for OTP_BEN */
-
-#define FBEN 0xffff /* OTP/Fuse Byte Enable */
-
-/* Bit masks for OTP_STATUS */
-
-#define FCOMP 0x1 /* OTP/Fuse Access Complete */
-#define nFCOMP 0x0
-#define FERROR 0x2 /* OTP/Fuse Access Error */
-#define nFERROR 0x0
-#define MMRGLOAD 0x10 /* Memory Mapped Register Gasket Load */
-#define nMMRGLOAD 0x0
-#define MMRGLOCK 0x20 /* Memory Mapped Register Gasket Lock */
-#define nMMRGLOCK 0x0
-#define FPGMEN 0x40 /* OTP/Fuse Program Enable */
-#define nFPGMEN 0x0
-
-/* Bit masks for OTP_TIMING */
-
-#define USECDIV 0xff /* Micro Second Divider */
-#define READACC 0x7f00 /* Read Access Time */
-#define CPUMPRL 0x38000 /* Charge Pump Release Time */
-#define CPUMPSU 0xc0000 /* Charge Pump Setup Time */
-#define CPUMPHD 0xf00000 /* Charge Pump Hold Time */
-#define PGMTIME 0xff000000 /* Program Time */
-
/* Bit masks for SECURE_SYSSWT */
#define EMUDABL 0x1 /* Emulation Disable. */
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 43f43a095a9..4adceb0bdb6 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -166,7 +166,6 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = {
#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
/* SPI ADC chip */
static struct bfin5xx_spi_chip spi_adc_chip_info = {
- .ctl_reg = 0x1000,
.enable_dma = 1, /* use dma transfer with this chip*/
.bits_per_word = 16,
};
@@ -174,7 +173,6 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = {
#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE)
static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
- .ctl_reg = 0x1000,
.enable_dma = 0,
.bits_per_word = 16,
};
@@ -258,12 +256,6 @@ static struct platform_device bfin_spi0_device = {
};
#endif /* spi master and devices */
-#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
-static struct platform_device bfin_fb_device = {
- .name = "bf537-fb",
-};
-#endif
-
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
static struct resource bfin_uart_resources[] = {
{
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index 644be5e5ab6..8ec42ba35b9 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -20,6 +20,7 @@
#endif
#include <asm/irq.h>
#include <asm/bfin5xx_spi.h>
+#include <asm/portmux.h>
/*
* Name the Board for the /proc/cpuinfo
@@ -107,20 +108,6 @@ static struct platform_device dm9000_device2 = {
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
-/*
- * CPOL (Clock Polarity)
- * 0 - Active high SCK
- * 1 - Active low SCK
- * CPHA (Clock Phase) Selects transfer format and operation mode
- * 0 - SCLK toggles from middle of the first data bit, slave select
- * pins controlled by hardware.
- * 1 - SCLK toggles from beginning of first data bit, slave select
- * pins controller by user software.
- * .ctl_reg = 0x1c00, * CPOL=1,CPHA=1,Sandisk 1G work
- * NO NO .ctl_reg = 0x1800, * CPOL=1,CPHA=0
- * NO NO .ctl_reg = 0x1400, * CPOL=0,CPHA=1
- */
- .ctl_reg = 0x1000, /* CPOL=0,CPHA=0,Sandisk 1G work */
.enable_dma = 0, /* if 1 - block!!! */
.bits_per_word = 8,
};
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 82f70efd66e..6d68dcfa2da 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -321,12 +321,6 @@ static struct platform_device bfin_spi0_device = {
};
#endif /* spi master and devices */
-#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
-static struct platform_device bfin_fb_device = {
- .name = "bf537-fb",
-};
-#endif
-
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
static struct resource bfin_uart_resources[] = {
{
diff --git a/arch/blackfin/mach-bf533/include/mach/defBF532.h b/arch/blackfin/mach-bf533/include/mach/defBF532.h
index 02b328eb0e0..e9ff491c095 100644
--- a/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -370,72 +370,6 @@
/* System MMR Register Bits */
/******************************************************************************* */
-/* ********************* PLL AND RESET MASKS ************************ */
-
-/* PLL_CTL Masks */
-#define PLL_CLKIN 0x0000 /* Pass CLKIN to PLL */
-#define PLL_CLKIN_DIV2 0x0001 /* Pass CLKIN/2 to PLL */
-#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
-#define PLL_OFF 0x0002 /* Shut off PLL clocks */
-#define STOPCK_OFF 0x0008 /* Core clock off */
-#define STOPCK 0x0008 /* Core Clock Off */
-#define PDWN 0x0020 /* Put the PLL in a Deep Sleep state */
-#if !defined(__ADSPBF538__)
-/* this file is included in defBF538.h but IN_DELAY/OUT_DELAY are different */
-# define IN_DELAY 0x0040 /* Add 200ps Delay To EBIU Input Latches */
-# define OUT_DELAY 0x0080 /* Add 200ps Delay To EBIU Output Signals */
-#endif
-#define BYPASS 0x0100 /* Bypass the PLL */
-/* PLL_CTL Macros (Only Use With Logic OR While Setting Lower Order Bits) */
-#define SET_MSEL(x) (((x)&0x3F) << 0x9) /* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
-
-/* PLL_DIV Masks */
-#define SSEL 0x000F /* System Select */
-#define CSEL 0x0030 /* Core Select */
-
-#define SCLK_DIV(x) (x) /* SCLK = VCO / x */
-
-#define CCLK_DIV1 0x00000000 /* CCLK = VCO / 1 */
-#define CCLK_DIV2 0x00000010 /* CCLK = VCO / 2 */
-#define CCLK_DIV4 0x00000020 /* CCLK = VCO / 4 */
-#define CCLK_DIV8 0x00000030 /* CCLK = VCO / 8 */
-/* PLL_DIV Macros */
-#define SET_SSEL(x) ((x)&0xF) /* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
-
-/* PLL_STAT Masks */
-#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */
-#define FULL_ON 0x0002 /* Processor In Full On Mode */
-#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */
-#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */
-
-/* VR_CTL Masks */
-#define FREQ 0x0003 /* Switching Oscillator Frequency For Regulator */
-#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */
-#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */
-#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */
-#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */
-
-#define GAIN 0x000C /* Voltage Level Gain */
-#define GAIN_5 0x0000 /* GAIN = 5 */
-#define GAIN_10 0x0004 /* GAIN = 10 */
-#define GAIN_20 0x0008 /* GAIN = 20 */
-#define GAIN_50 0x000C /* GAIN = 50 */
-
-#define VLEV 0x00F0 /* Internal Voltage Level */
-#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
-#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
-#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
-#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
-#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */
-#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */
-#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
-#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
-#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */
-#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */
-
-#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */
-#define SCKELOW 0x8000 /* Do Not Drive SCKE High During Reset After Hibernate */
-
/* CHIPID Masks */
#define CHIPID_VERSION 0xF0000000
#define CHIPID_FAMILY 0x0FFFF000
@@ -703,54 +637,7 @@
/* ********** DMA CONTROLLER MASKS *********************8 */
-/*DMAx_CONFIG, MDMA_yy_CONFIG Masks */
-#define DMAEN 0x00000001 /* Channel Enable */
-#define WNR 0x00000002 /* Channel Direction (W/R*) */
-#define WDSIZE_8 0x00000000 /* Word Size 8 bits */
-#define WDSIZE_16 0x00000004 /* Word Size 16 bits */
-#define WDSIZE_32 0x00000008 /* Word Size 32 bits */
-#define DMA2D 0x00000010 /* 2D/1D* Mode */
-#define RESTART 0x00000020 /* Restart */
-#define DI_SEL 0x00000040 /* Data Interrupt Select */
-#define DI_EN 0x00000080 /* Data Interrupt Enable */
-#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
-#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
-#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
-#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
-#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
-#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
-#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
-#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
-#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
-#define NDSIZE 0x00000900 /* Next Descriptor Size */
-#define DMAFLOW 0x00007000 /* Flow Control */
-#define DMAFLOW_STOP 0x0000 /* Stop Mode */
-#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
-#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
-#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
-
-#define DMAEN_P 0 /* Channel Enable */
-#define WNR_P 1 /* Channel Direction (W/R*) */
-#define DMA2D_P 4 /* 2D/1D* Mode */
-#define RESTART_P 5 /* Restart */
-#define DI_SEL_P 6 /* Data Interrupt Select */
-#define DI_EN_P 7 /* Data Interrupt Enable */
-
-/*DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks */
-
-#define DMA_DONE 0x00000001 /* DMA Done Indicator */
-#define DMA_ERR 0x00000002 /* DMA Error Indicator */
-#define DFETCH 0x00000004 /* Descriptor Fetch Indicator */
-#define DMA_RUN 0x00000008 /* DMA Running Indicator */
-
-#define DMA_DONE_P 0 /* DMA Done Indicator */
-#define DMA_ERR_P 1 /* DMA Error Indicator */
-#define DFETCH_P 2 /* Descriptor Fetch Indicator */
-#define DMA_RUN_P 3 /* DMA Running Indicator */
-
-/*DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
#define CTYPE 0x00000040 /* DMA Channel Type Indicator */
#define CTYPE_P 6 /* DMA Channel Type Indicator BIT POSITION */
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 9ba290466b5..4e0afda472a 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -13,9 +13,6 @@
#include <linux/mtd/partitions.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-#include <linux/usb/isp1362.h>
-#endif
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
@@ -147,45 +144,6 @@ static struct platform_device sl811_hcd_device = {
};
#endif
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
-static struct resource isp1362_hcd_resources[] = {
- {
- .start = 0x20360000,
- .end = 0x20360000,
- .flags = IORESOURCE_MEM,
- }, {
- .start = 0x20360004,
- .end = 0x20360004,
- .flags = IORESOURCE_MEM,
- }, {
- .start = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
- .end = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
- },
-};
-
-static struct isp1362_platform_data isp1362_priv = {
- .sel15Kres = 1,
- .clknotstop = 0,
- .oc_enable = 0,
- .int_act_high = 0,
- .int_edge_triggered = 0,
- .remote_wakeup_connected = 0,
- .no_power_switching = 1,
- .power_switching_mode = 0,
-};
-
-static struct platform_device isp1362_hcd_device = {
- .name = "isp1362-hcd",
- .id = 0,
- .dev = {
- .platform_data = &isp1362_priv,
- },
- .num_resources = ARRAY_SIZE(isp1362_hcd_resources),
- .resource = isp1362_hcd_resources,
-};
-#endif
-
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
@@ -492,10 +450,6 @@ static struct platform_device *stamp_devices[] __initdata = {
&sl811_hcd_device,
#endif
-#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
- &isp1362_hcd_device,
-#endif
-
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index c46baa5e6d9..ac9b52e0087 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -25,6 +26,8 @@
#include <linux/i2c.h>
#include <linux/usb/sl811.h>
#include <linux/spi/mmc_spi.h>
+#include <linux/leds.h>
+#include <linux/input.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
@@ -65,7 +68,7 @@ static struct isp1760_platform_data isp1760_priv = {
};
static struct platform_device bfin_isp1760_device = {
- .name = "isp1760-hcd",
+ .name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
@@ -76,7 +79,6 @@ static struct platform_device bfin_isp1760_device = {
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
-#include <linux/input.h>
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
@@ -195,28 +197,6 @@ static struct platform_device dm9000_device = {
};
#endif
-#if defined(CONFIG_AX88180) || defined(CONFIG_AX88180_MODULE)
-static struct resource ax88180_resources[] = {
- [0] = {
- .start = 0x20300000,
- .end = 0x20300000 + 0x8000,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PF7,
- .end = IRQ_PF7,
- .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL),
- },
-};
-
-static struct platform_device ax88180_device = {
- .name = "ax88180",
- .id = -1,
- .num_resources = ARRAY_SIZE(ax88180_resources),
- .resource = ax88180_resources,
-};
-#endif
-
#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
static struct resource sl811_hcd_resources[] = {
{
@@ -272,8 +252,8 @@ static struct resource isp1362_hcd_resources[] = {
.end = 0x20360004,
.flags = IORESOURCE_MEM,
}, {
- .start = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
- .end = CONFIG_USB_ISP1362_BFIN_GPIO_IRQ,
+ .start = IRQ_PF3,
+ .end = IRQ_PF3,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
@@ -300,6 +280,44 @@ static struct platform_device isp1362_hcd_device = {
};
#endif
+#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
+unsigned short bfin_can_peripherals[] = {
+ P_CAN0_RX, P_CAN0_TX, 0
+};
+
+static struct resource bfin_can_resources[] = {
+ {
+ .start = 0xFFC02A00,
+ .end = 0xFFC02FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_CAN_RX,
+ .end = IRQ_CAN_RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_CAN_TX,
+ .end = IRQ_CAN_TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_CAN_ERROR,
+ .end = IRQ_CAN_ERROR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device bfin_can_device = {
+ .name = "bfin_can",
+ .num_resources = ARRAY_SIZE(bfin_can_resources),
+ .resource = bfin_can_resources,
+ .dev = {
+ .platform_data = &bfin_can_peripherals, /* Passed to driver */
+ },
+};
+#endif
+
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
@@ -514,15 +532,14 @@ static struct bfin5xx_spi_chip ad1938_spi_chip_info = {
};
#endif
-#if defined(CONFIG_INPUT_EVAL_AD7147EBZ)
-#include <linux/input.h>
+#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
#include <linux/input/ad714x.h>
static struct bfin5xx_spi_chip ad7147_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
-static struct ad714x_slider_plat slider_plat[] = {
+static struct ad714x_slider_plat ad7147_spi_slider_plat[] = {
{
.start_stage = 0,
.end_stage = 7,
@@ -530,7 +547,7 @@ static struct ad714x_slider_plat slider_plat[] = {
},
};
-static struct ad714x_button_plat button_plat[] = {
+static struct ad714x_button_plat ad7147_spi_button_plat[] = {
{
.keycode = BTN_FORWARD,
.l_mask = 0,
@@ -557,11 +574,11 @@ static struct ad714x_button_plat button_plat[] = {
.h_mask = 0x400,
},
};
-static struct ad714x_platform_data ad7147_platfrom_data = {
+static struct ad714x_platform_data ad7147_spi_platform_data = {
.slider_num = 1,
.button_num = 5,
- .slider = slider_plat,
- .button = button_plat,
+ .slider = ad7147_spi_slider_plat,
+ .button = ad7147_spi_button_plat,
.stage_cfg_reg = {
{0xFBFF, 0x1FFF, 0, 0x2626, 1600, 1600, 1600, 1600},
{0xEFFF, 0x1FFF, 0, 0x2626, 1650, 1650, 1650, 1650},
@@ -580,10 +597,9 @@ static struct ad714x_platform_data ad7147_platfrom_data = {
};
#endif
-#if defined(CONFIG_INPUT_EVAL_AD7142EB)
-#include <linux/input.h>
+#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE)
#include <linux/input/ad714x.h>
-static struct ad714x_button_plat button_plat[] = {
+static struct ad714x_button_plat ad7142_i2c_button_plat[] = {
{
.keycode = BTN_1,
.l_mask = 0,
@@ -605,9 +621,9 @@ static struct ad714x_button_plat button_plat[] = {
.h_mask = 0x8,
},
};
-static struct ad714x_platform_data ad7142_platfrom_data = {
+static struct ad714x_platform_data ad7142_i2c_platform_data = {
.button_num = 4,
- .button = button_plat,
+ .button = ad7142_i2c_button_plat,
.stage_cfg_reg = {
/* fixme: figure out right setting for all comoponent according
* to hardware feature of EVAL-AD7142EB board */
@@ -696,8 +712,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = {
#endif
#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE)
-#include <linux/input.h>
-#include <linux/spi/adxl34x.h>
+#include <linux/input/adxl34x.h>
static const struct adxl34x_platform_data adxl34x_info = {
.x_axis_offset = 0,
.y_axis_offset = 0,
@@ -721,9 +736,7 @@ static const struct adxl34x_platform_data adxl34x_info = {
.ev_code_y = ABS_Y, /* EV_REL */
.ev_code_z = ABS_Z, /* EV_REL */
- .ev_code_tap_x = BTN_TOUCH, /* EV_KEY */
- .ev_code_tap_y = BTN_TOUCH, /* EV_KEY */
- .ev_code_tap_z = BTN_TOUCH, /* EV_KEY */
+ .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY x,y,z */
/* .ev_code_ff = KEY_F,*/ /* EV_KEY */
/* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */
@@ -761,6 +774,47 @@ static struct bfin5xx_spi_chip enc28j60_spi_chip_info = {
};
#endif
+#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
+static struct bfin5xx_spi_chip adf7021_spi_chip_info = {
+ .bits_per_word = 16,
+ .cs_gpio = GPIO_PF10,
+};
+
+#include <linux/spi/adf702x.h>
+#define TXREG 0x0160A470
+static const u32 adf7021_regs[] = {
+ 0x09608FA0,
+ 0x00575011,
+ 0x00A7F092,
+ 0x2B141563,
+ 0x81F29E94,
+ 0x00003155,
+ 0x050A4F66,
+ 0x00000007,
+ 0x00000008,
+ 0x000231E9,
+ 0x3296354A,
+ 0x891A2B3B,
+ 0x00000D9C,
+ 0x0000000D,
+ 0x0000000E,
+ 0x0000000F,
+};
+
+static struct adf702x_platform_data adf7021_platform_data = {
+ .regs_base = (void *)SPORT1_TCR1,
+ .dma_ch_rx = CH_SPORT1_RX,
+ .dma_ch_tx = CH_SPORT1_TX,
+ .irq_sport_err = IRQ_SPORT1_ERROR,
+ .gpio_int_rfs = GPIO_PF8,
+ .pin_req = {P_SPORT1_DTPRI, P_SPORT1_RFS, P_SPORT1_DRPRI,
+ P_SPORT1_RSCLK, P_SPORT1_TSCLK, 0},
+ .adf702x_model = MODEL_ADF7021,
+ .adf702x_regs = adf7021_regs,
+ .tx_reg = TXREG,
+};
+#endif
+
#if defined(CONFIG_MTD_DATAFLASH) \
|| defined(CONFIG_MTD_DATAFLASH_MODULE)
@@ -794,6 +848,13 @@ static struct bfin5xx_spi_chip data_flash_chip_info = {
};
#endif
+#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
+static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
+ .enable_dma = 0, /* use dma transfer with this chip*/
+ .bits_per_word = 8,
+};
+#endif
+
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -855,7 +916,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
},
#endif
-#if defined(CONFIG_INPUT_EVAL_AD7147EBZ)
+#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
{
.modalias = "ad714x_captouch",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
@@ -863,7 +924,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 5,
.mode = SPI_MODE_3,
- .platform_data = &ad7147_platfrom_data,
+ .platform_data = &ad7147_spi_platform_data,
.controller_data = &ad7147_spi_chip_info,
},
#endif
@@ -932,6 +993,30 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.mode = SPI_MODE_0,
},
#endif
+#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
+ {
+ .modalias = "adxl34x",
+ .platform_data = &adxl34x_info,
+ .irq = IRQ_PF6,
+ .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = 2,
+ .controller_data = &spi_adxl34x_chip_info,
+ .mode = SPI_MODE_3,
+ },
+#endif
+#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
+ {
+ .modalias = "adf702x",
+ .max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = 0, /* GPIO controlled SSEL */
+ .controller_data = &adf7021_spi_chip_info,
+ .platform_data = &adf7021_platform_data,
+ .mode = SPI_MODE_0,
+ },
+#endif
+
};
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
@@ -1175,7 +1260,6 @@ static struct platform_device i2c_bfin_twi_device = {
#endif
#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE)
-#include <linux/input.h>
#include <linux/i2c/adp5588.h>
static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = {
[0] = KEY_GRAVE,
@@ -1268,35 +1352,33 @@ static struct adp5588_kpad_platform_data adp5588_kpad_data = {
* ADP5520/5501 Backlight Data
*/
-static struct adp5520_backlight_platfrom_data adp5520_backlight_data = {
- .fade_in = FADE_T_1200ms,
- .fade_out = FADE_T_1200ms,
- .fade_led_law = BL_LAW_LINEAR,
- .en_ambl_sens = 1,
- .abml_filt = BL_AMBL_FILT_640ms,
- .l1_daylight_max = BL_CUR_mA(15),
- .l1_daylight_dim = BL_CUR_mA(0),
- .l2_office_max = BL_CUR_mA(7),
- .l2_office_dim = BL_CUR_mA(0),
- .l3_dark_max = BL_CUR_mA(3),
- .l3_dark_dim = BL_CUR_mA(0),
- .l2_trip = L2_COMP_CURR_uA(700),
- .l2_hyst = L2_COMP_CURR_uA(50),
- .l3_trip = L3_COMP_CURR_uA(80),
- .l3_hyst = L3_COMP_CURR_uA(20),
+static struct adp5520_backlight_platform_data adp5520_backlight_data = {
+ .fade_in = ADP5520_FADE_T_1200ms,
+ .fade_out = ADP5520_FADE_T_1200ms,
+ .fade_led_law = ADP5520_BL_LAW_LINEAR,
+ .en_ambl_sens = 1,
+ .abml_filt = ADP5520_BL_AMBL_FILT_640ms,
+ .l1_daylight_max = ADP5520_BL_CUR_mA(15),
+ .l1_daylight_dim = ADP5520_BL_CUR_mA(0),
+ .l2_office_max = ADP5520_BL_CUR_mA(7),
+ .l2_office_dim = ADP5520_BL_CUR_mA(0),
+ .l3_dark_max = ADP5520_BL_CUR_mA(3),
+ .l3_dark_dim = ADP5520_BL_CUR_mA(0),
+ .l2_trip = ADP5520_L2_COMP_CURR_uA(700),
+ .l2_hyst = ADP5520_L2_COMP_CURR_uA(50),
+ .l3_trip = ADP5520_L3_COMP_CURR_uA(80),
+ .l3_hyst = ADP5520_L3_COMP_CURR_uA(20),
};
/*
* ADP5520/5501 LEDs Data
*/
-#include <linux/leds.h>
-
static struct led_info adp5520_leds[] = {
{
.name = "adp5520-led1",
.default_trigger = "none",
- .flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | LED_OFFT_600ms,
+ .flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | ADP5520_LED_OFFT_600ms,
},
#ifdef ADP5520_EN_ALL_LEDS
{
@@ -1312,51 +1394,50 @@ static struct led_info adp5520_leds[] = {
#endif
};
-static struct adp5520_leds_platfrom_data adp5520_leds_data = {
+static struct adp5520_leds_platform_data adp5520_leds_data = {
.num_leds = ARRAY_SIZE(adp5520_leds),
.leds = adp5520_leds,
- .fade_in = FADE_T_600ms,
- .fade_out = FADE_T_600ms,
- .led_on_time = LED_ONT_600ms,
+ .fade_in = ADP5520_FADE_T_600ms,
+ .fade_out = ADP5520_FADE_T_600ms,
+ .led_on_time = ADP5520_LED_ONT_600ms,
};
/*
* ADP5520 GPIO Data
*/
-static struct adp5520_gpio_platfrom_data adp5520_gpio_data = {
+static struct adp5520_gpio_platform_data adp5520_gpio_data = {
.gpio_start = 50,
- .gpio_en_mask = GPIO_C1 | GPIO_C2 | GPIO_R2,
- .gpio_pullup_mask = GPIO_C1 | GPIO_C2 | GPIO_R2,
+ .gpio_en_mask = ADP5520_GPIO_C1 | ADP5520_GPIO_C2 | ADP5520_GPIO_R2,
+ .gpio_pullup_mask = ADP5520_GPIO_C1 | ADP5520_GPIO_C2 | ADP5520_GPIO_R2,
};
/*
* ADP5520 Keypad Data
*/
-#include <linux/input.h>
static const unsigned short adp5520_keymap[ADP5520_KEYMAPSIZE] = {
- [KEY(0, 0)] = KEY_GRAVE,
- [KEY(0, 1)] = KEY_1,
- [KEY(0, 2)] = KEY_2,
- [KEY(0, 3)] = KEY_3,
- [KEY(1, 0)] = KEY_4,
- [KEY(1, 1)] = KEY_5,
- [KEY(1, 2)] = KEY_6,
- [KEY(1, 3)] = KEY_7,
- [KEY(2, 0)] = KEY_8,
- [KEY(2, 1)] = KEY_9,
- [KEY(2, 2)] = KEY_0,
- [KEY(2, 3)] = KEY_MINUS,
- [KEY(3, 0)] = KEY_EQUAL,
- [KEY(3, 1)] = KEY_BACKSLASH,
- [KEY(3, 2)] = KEY_BACKSPACE,
- [KEY(3, 3)] = KEY_ENTER,
-};
-
-static struct adp5520_keys_platfrom_data adp5520_keys_data = {
- .rows_en_mask = ROW_R3 | ROW_R2 | ROW_R1 | ROW_R0,
- .cols_en_mask = COL_C3 | COL_C2 | COL_C1 | COL_C0,
+ [ADP5520_KEY(0, 0)] = KEY_GRAVE,
+ [ADP5520_KEY(0, 1)] = KEY_1,
+ [ADP5520_KEY(0, 2)] = KEY_2,
+ [ADP5520_KEY(0, 3)] = KEY_3,
+ [ADP5520_KEY(1, 0)] = KEY_4,
+ [ADP5520_KEY(1, 1)] = KEY_5,
+ [ADP5520_KEY(1, 2)] = KEY_6,
+ [ADP5520_KEY(1, 3)] = KEY_7,
+ [ADP5520_KEY(2, 0)] = KEY_8,
+ [ADP5520_KEY(2, 1)] = KEY_9,
+ [ADP5520_KEY(2, 2)] = KEY_0,
+ [ADP5520_KEY(2, 3)] = KEY_MINUS,
+ [ADP5520_KEY(3, 0)] = KEY_EQUAL,
+ [ADP5520_KEY(3, 1)] = KEY_BACKSLASH,
+ [ADP5520_KEY(3, 2)] = KEY_BACKSPACE,
+ [ADP5520_KEY(3, 3)] = KEY_ENTER,
+};
+
+static struct adp5520_keys_platform_data adp5520_keys_data = {
+ .rows_en_mask = ADP5520_ROW_R3 | ADP5520_ROW_R2 | ADP5520_ROW_R1 | ADP5520_ROW_R0,
+ .cols_en_mask = ADP5520_COL_C3 | ADP5520_COL_C2 | ADP5520_COL_C1 | ADP5520_COL_C0,
.keymap = adp5520_keymap,
.keymapsize = ARRAY_SIZE(adp5520_keymap),
.repeat = 0,
@@ -1366,50 +1447,81 @@ static struct adp5520_keys_platfrom_data adp5520_keys_data = {
* ADP5520/5501 Multifuction Device Init Data
*/
-static struct adp5520_subdev_info adp5520_subdevs[] = {
- {
- .name = "adp5520-backlight",
- .id = ID_ADP5520,
- .platform_data = &adp5520_backlight_data,
- },
- {
- .name = "adp5520-led",
- .id = ID_ADP5520,
- .platform_data = &adp5520_leds_data,
- },
- {
- .name = "adp5520-gpio",
- .id = ID_ADP5520,
- .platform_data = &adp5520_gpio_data,
- },
- {
- .name = "adp5520-keys",
- .id = ID_ADP5520,
- .platform_data = &adp5520_keys_data,
- },
-};
-
static struct adp5520_platform_data adp5520_pdev_data = {
- .num_subdevs = ARRAY_SIZE(adp5520_subdevs),
- .subdevs = adp5520_subdevs,
+ .backlight = &adp5520_backlight_data,
+ .leds = &adp5520_leds_data,
+ .gpio = &adp5520_gpio_data,
+ .keys = &adp5520_keys_data,
};
#endif
#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE)
#include <linux/i2c/adp5588.h>
-static struct adp5588_gpio_platfrom_data adp5588_gpio_data = {
+static struct adp5588_gpio_platform_data adp5588_gpio_data = {
.gpio_start = 50,
.pullup_dis_mask = 0,
};
#endif
+#if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE)
+#include <linux/i2c/adp8870.h>
+static struct led_info adp8870_leds[] = {
+ {
+ .name = "adp8870-led7",
+ .default_trigger = "none",
+ .flags = ADP8870_LED_D7 | ADP8870_LED_OFFT_600ms,
+ },
+};
+
+
+static struct adp8870_backlight_platform_data adp8870_pdata = {
+ .bl_led_assign = ADP8870_BL_D1 | ADP8870_BL_D2 | ADP8870_BL_D3 |
+ ADP8870_BL_D4 | ADP8870_BL_D5 | ADP8870_BL_D6, /* 1 = Backlight 0 = Individual LED */
+ .pwm_assign = 0, /* 1 = Enables PWM mode */
+
+ .bl_fade_in = ADP8870_FADE_T_1200ms, /* Backlight Fade-In Timer */
+ .bl_fade_out = ADP8870_FADE_T_1200ms, /* Backlight Fade-Out Timer */
+ .bl_fade_law = ADP8870_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */
+
+ .en_ambl_sens = 1, /* 1 = enable ambient light sensor */
+ .abml_filt = ADP8870_BL_AMBL_FILT_320ms, /* Light sensor filter time */
+
+ .l1_daylight_max = ADP8870_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l1_daylight_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l2_bright_max = ADP8870_BL_CUR_mA(14), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l2_bright_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l3_office_max = ADP8870_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l3_office_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l4_indoor_max = ADP8870_BL_CUR_mA(3), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l4_indor_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l5_dark_max = ADP8870_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ .l5_dark_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+
+ .l2_trip = ADP8870_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+ .l2_hyst = ADP8870_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+ .l3_trip = ADP8870_L3_COMP_CURR_uA(389), /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+ .l3_hyst = ADP8870_L3_COMP_CURR_uA(54), /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+ .l4_trip = ADP8870_L4_COMP_CURR_uA(167), /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
+ .l4_hyst = ADP8870_L4_COMP_CURR_uA(16), /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
+ .l5_trip = ADP8870_L5_COMP_CURR_uA(43), /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */
+ .l5_hyst = ADP8870_L5_COMP_CURR_uA(11), /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */
+
+ .leds = adp8870_leds,
+ .num_leds = ARRAY_SIZE(adp8870_leds),
+ .led_fade_law = ADP8870_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */
+ .led_fade_in = ADP8870_FADE_T_600ms,
+ .led_fade_out = ADP8870_FADE_T_600ms,
+ .led_on_time = ADP8870_LED_ONT_200ms,
+};
+#endif
+
static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
-#if defined(CONFIG_INPUT_EVAL_AD7142EB)
+#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE)
{
I2C_BOARD_INFO("ad7142_captouch", 0x2C),
.irq = IRQ_PG5,
- .platform_data = (void *)&ad7142_platfrom_data,
+ .platform_data = (void *)&ad7142_i2c_platform_data,
},
#endif
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
@@ -1462,6 +1574,32 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
I2C_BOARD_INFO("bfin-adv7393", 0x2B),
},
#endif
+#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
+ {
+ I2C_BOARD_INFO("bf537-lq035-ad5280", 0x2C),
+ },
+#endif
+#if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE)
+ {
+ I2C_BOARD_INFO("adp8870", 0x2B),
+ .platform_data = (void *)&adp8870_pdata,
+ },
+#endif
+#if defined(CONFIG_SND_SOC_ADAU1371) || defined(CONFIG_SND_SOC_ADAU1371_MODULE)
+ {
+ I2C_BOARD_INFO("adau1371", 0x1A),
+ },
+#endif
+#if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE)
+ {
+ I2C_BOARD_INFO("adau1761", 0x38),
+ },
+#endif
+#if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE)
+ {
+ I2C_BOARD_INFO("ad5258", 0x18),
+ },
+#endif
};
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
@@ -1602,8 +1740,8 @@ static struct platform_device *stamp_devices[] __initdata = {
&dm9000_device,
#endif
-#if defined(CONFIG_AX88180) || defined(CONFIG_AX88180_MODULE)
- &ax88180_device,
+#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
+ &bfin_can_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
diff --git a/arch/blackfin/mach-bf537/include/mach/bf537.h b/arch/blackfin/mach-bf537/include/mach/bf537.h
index 17fab447466..8b291418ca3 100644
--- a/arch/blackfin/mach-bf537/include/mach/bf537.h
+++ b/arch/blackfin/mach-bf537/include/mach/bf537.h
@@ -9,16 +9,6 @@
#ifndef __MACH_BF537_H__
#define __MACH_BF537_H__
-/* Masks for generic ERROR IRQ demultiplexing used in int-priority-sc.c */
-
-#define SPI_ERR_MASK (TXCOL | RBSY | MODF | TXE) /* SPI_STAT */
-#define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORTx_STAT */
-#define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */
-#define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */
-#define UART_ERR_MASK_STAT1 (0x4) /* UARTx_IIR */
-#define UART_ERR_MASK_STAT0 (0x2) /* UARTx_IIR */
-#define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */
-
#define OFFSET_(x) ((x) & 0x0000FFFF)
/*some misc defines*/
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index eab006d260c..a12d4b6a221 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -40,10 +40,4 @@
#define OFFSET_SCR 0x1C /* SCR Scratch Register */
#define OFFSET_GCTL 0x24 /* Global Control Register */
-/* PLL_DIV Masks */
-#define CCLK_DIV1 CSEL_DIV1 /* CCLK = VCO / 1 */
-#define CCLK_DIV2 CSEL_DIV2 /* CCLK = VCO / 2 */
-#define CCLK_DIV4 CSEL_DIV4 /* CCLK = VCO / 4 */
-#define CCLK_DIV8 CSEL_DIV8 /* CCLK = VCO / 8 */
-
#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index a6d20ca5768..066d5c261f4 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -958,67 +958,6 @@
** modifier UNLESS the lower order bits are saved and ORed back in when
** the macro is used.
*************************************************************************************/
-/*
-** ********************* PLL AND RESET MASKS ****************************************/
-/* PLL_CTL Masks */
-#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
-#define PLL_OFF 0x0002 /* PLL Not Powered */
-#define STOPCK 0x0008 /* Core Clock Off */
-#define PDWN 0x0020 /* Enter Deep Sleep Mode */
-#define IN_DELAY 0x0040 /* Add 200ps Delay To EBIU Input Latches */
-#define OUT_DELAY 0x0080 /* Add 200ps Delay To EBIU Output Signals */
-#define BYPASS 0x0100 /* Bypass the PLL */
-#define MSEL 0x7E00 /* Multiplier Select For CCLK/VCO Factors */
-/* PLL_CTL Macros (Only Use With Logic OR While Setting Lower Order Bits) */
-#define SET_MSEL(x) (((x)&0x3F) << 0x9) /* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
-
-/* PLL_DIV Masks */
-#define SSEL 0x000F /* System Select */
-#define CSEL 0x0030 /* Core Select */
-#define CSEL_DIV1 0x0000 /* CCLK = VCO / 1 */
-#define CSEL_DIV2 0x0010 /* CCLK = VCO / 2 */
-#define CSEL_DIV4 0x0020 /* CCLK = VCO / 4 */
-#define CSEL_DIV8 0x0030 /* CCLK = VCO / 8 */
-/* PLL_DIV Macros */
-#define SET_SSEL(x) ((x)&0xF) /* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
-
-/* VR_CTL Masks */
-#define FREQ 0x0003 /* Switching Oscillator Frequency For Regulator */
-#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */
-#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */
-#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */
-#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */
-
-#define GAIN 0x000C /* Voltage Level Gain */
-#define GAIN_5 0x0000 /* GAIN = 5 */
-#define GAIN_10 0x0004 /* GAIN = 10 */
-#define GAIN_20 0x0008 /* GAIN = 20 */
-#define GAIN_50 0x000C /* GAIN = 50 */
-
-#define VLEV 0x00F0 /* Internal Voltage Level */
-#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
-#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
-#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
-#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
-#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */
-#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */
-#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
-#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
-#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */
-#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */
-
-#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */
-#define CANWE 0x0200 /* Enable CAN Wakeup From Hibernate */
-#define PHYWE 0x0400 /* Enable PHY Wakeup From Hibernate */
-#define CLKBUFOE 0x4000 /* CLKIN Buffer Output Enable */
-#define PHYCLKOE CLKBUFOE /* Alternative legacy name for the above */
-#define SCKELOW 0x8000 /* Enable Drive CKE Low During Reset */
-
-/* PLL_STAT Masks */
-#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */
-#define FULL_ON 0x0002 /* Processor In Full On Mode */
-#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */
-#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */
/* CHIPID Masks */
#define CHIPID_VERSION 0xF0000000
@@ -1645,34 +1584,6 @@
#define BGSTAT 0x0020 /* Bus Grant Status */
/* ************************** DMA CONTROLLER MASKS ********************************/
-/* DMAx_CONFIG, MDMA_yy_CONFIG Masks */
-#define DMAEN 0x0001 /* DMA Channel Enable */
-#define WNR 0x0002 /* Channel Direction (W/R*) */
-#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */
-#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */
-#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */
-#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */
-#define RESTART 0x0020 /* DMA Buffer Clear */
-#define DI_SEL 0x0040 /* Data Interrupt Timing Select */
-#define DI_EN 0x0080 /* Data Interrupt Enable */
-#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
-#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
-#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
-#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
-#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
-#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
-#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
-#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
-#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
-#define NDSIZE 0x0900 /* Next Descriptor Size */
-
-#define DMAFLOW 0x7000 /* Flow Control */
-#define DMAFLOW_STOP 0x0000 /* Stop Mode */
-#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
-#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
-#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
@@ -1690,12 +1601,6 @@
#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
-/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks */
-#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */
-#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */
-#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */
-#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */
-
/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
/* PPI_CONTROL Masks */
#define PORT_EN 0x0001 /* PPI Port Enable */
diff --git a/arch/blackfin/mach-bf538/Makefile b/arch/blackfin/mach-bf538/Makefile
index 8cd2719684d..c0be54f2cd2 100644
--- a/arch/blackfin/mach-bf538/Makefile
+++ b/arch/blackfin/mach-bf538/Makefile
@@ -3,3 +3,4 @@
#
obj-y := ints-priority.o dma.o
+obj-$(CONFIG_GPIOLIB) += ext-gpio.o
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index 14af5c2088d..c296bb1ed50 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -151,6 +151,44 @@ static struct platform_device bfin_sir2_device = {
#endif
#endif
+#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
+unsigned short bfin_can_peripherals[] = {
+ P_CAN0_RX, P_CAN0_TX, 0
+};
+
+static struct resource bfin_can_resources[] = {
+ {
+ .start = 0xFFC02A00,
+ .end = 0xFFC02FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_CAN_RX,
+ .end = IRQ_CAN_RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_CAN_TX,
+ .end = IRQ_CAN_TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_CAN_ERROR,
+ .end = IRQ_CAN_ERROR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device bfin_can_device = {
+ .name = "bfin_can",
+ .num_resources = ARRAY_SIZE(bfin_can_resources),
+ .resource = bfin_can_resources,
+ .dev = {
+ .platform_data = &bfin_can_peripherals, /* Passed to driver */
+ },
+};
+#endif
+
/*
* USB-LAN EzExtender board
* Driver needs to know address, irq and flag pin.
@@ -610,6 +648,10 @@ static struct platform_device *cm_bf538_devices[] __initdata = {
#endif
#endif
+#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
+ &bfin_can_device,
+#endif
+
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
diff --git a/arch/blackfin/mach-bf538/ext-gpio.c b/arch/blackfin/mach-bf538/ext-gpio.c
new file mode 100644
index 00000000000..180b1252679
--- /dev/null
+++ b/arch/blackfin/mach-bf538/ext-gpio.c
@@ -0,0 +1,123 @@
+/*
+ * GPIOLIB interface for BF538/9 PORT C, D, and E GPIOs
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <asm/blackfin.h>
+#include <asm/gpio.h>
+#include <asm/portmux.h>
+
+#define DEFINE_REG(reg, off) \
+static inline u16 read_##reg(void __iomem *port) \
+ { return bfin_read16(port + off); } \
+static inline void write_##reg(void __iomem *port, u16 v) \
+ { bfin_write16(port + off, v); }
+
+DEFINE_REG(PORTIO, 0x00)
+DEFINE_REG(PORTIO_CLEAR, 0x10)
+DEFINE_REG(PORTIO_SET, 0x20)
+DEFINE_REG(PORTIO_DIR, 0x40)
+DEFINE_REG(PORTIO_INEN, 0x50)
+
+static void __iomem *gpio_chip_to_mmr(struct gpio_chip *chip)
+{
+ switch (chip->base) {
+ default: /* not really needed, but keeps gcc happy */
+ case GPIO_PC0: return (void __iomem *)PORTCIO;
+ case GPIO_PD0: return (void __iomem *)PORTDIO;
+ case GPIO_PE0: return (void __iomem *)PORTEIO;
+ }
+}
+
+static int bf538_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+ void __iomem *port = gpio_chip_to_mmr(chip);
+ return !!(read_PORTIO(port) & (1u << gpio));
+}
+
+static void bf538_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ void __iomem *port = gpio_chip_to_mmr(chip);
+ if (value)
+ write_PORTIO_SET(port, (1u << gpio));
+ else
+ write_PORTIO_CLEAR(port, (1u << gpio));
+}
+
+static int bf538_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ void __iomem *port = gpio_chip_to_mmr(chip);
+ write_PORTIO_DIR(port, read_PORTIO_DIR(port) & ~(1u << gpio));
+ write_PORTIO_INEN(port, read_PORTIO_INEN(port) | (1u << gpio));
+ return 0;
+}
+
+static int bf538_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ void __iomem *port = gpio_chip_to_mmr(chip);
+ write_PORTIO_INEN(port, read_PORTIO_INEN(port) & ~(1u << gpio));
+ bf538_gpio_set_value(port, gpio, value);
+ write_PORTIO_DIR(port, read_PORTIO_DIR(port) | (1u << gpio));
+ return 0;
+}
+
+static int bf538_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+ return bfin_special_gpio_request(chip->base + gpio, chip->label);
+}
+
+static void bf538_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+ return bfin_special_gpio_free(chip->base + gpio);
+}
+
+/* We don't set the irq fields as these banks cannot generate interrupts */
+
+static struct gpio_chip bf538_portc_chip = {
+ .label = "GPIO-PC",
+ .direction_input = bf538_gpio_direction_input,
+ .get = bf538_gpio_get_value,
+ .direction_output = bf538_gpio_direction_output,
+ .set = bf538_gpio_set_value,
+ .request = bf538_gpio_request,
+ .free = bf538_gpio_free,
+ .base = GPIO_PC0,
+ .ngpio = GPIO_PC9 - GPIO_PC0 + 1,
+};
+
+static struct gpio_chip bf538_portd_chip = {
+ .label = "GPIO-PD",
+ .direction_input = bf538_gpio_direction_input,
+ .get = bf538_gpio_get_value,
+ .direction_output = bf538_gpio_direction_output,
+ .set = bf538_gpio_set_value,
+ .request = bf538_gpio_request,
+ .free = bf538_gpio_free,
+ .base = GPIO_PD0,
+ .ngpio = GPIO_PD13 - GPIO_PD0 + 1,
+};
+
+static struct gpio_chip bf538_porte_chip = {
+ .label = "GPIO-PE",
+ .direction_input = bf538_gpio_direction_input,
+ .get = bf538_gpio_get_value,
+ .direction_output = bf538_gpio_direction_output,
+ .set = bf538_gpio_set_value,
+ .request = bf538_gpio_request,
+ .free = bf538_gpio_free,
+ .base = GPIO_PE0,
+ .ngpio = GPIO_PE15 - GPIO_PE0 + 1,
+};
+
+static int __init bf538_extgpio_setup(void)
+{
+ return gpiochip_add(&bf538_portc_chip) |
+ gpiochip_add(&bf538_portd_chip) |
+ gpiochip_add(&bf538_porte_chip);
+}
+arch_initcall(bf538_extgpio_setup);
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index 278e8942eef..08b5eabb1ed 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -37,10 +37,4 @@
#define OFFSET_SCR 0x1C /* SCR Scratch Register */
#define OFFSET_GCTL 0x24 /* Global Control Register */
-/* PLL_DIV Masks */
-#define CCLK_DIV1 CSEL_DIV1 /* CCLK = VCO / 1 */
-#define CCLK_DIV2 CSEL_DIV2 /* CCLK = VCO / 2 */
-#define CCLK_DIV4 CSEL_DIV4 /* CCLK = VCO / 4 */
-#define CCLK_DIV8 CSEL_DIV8 /* CCLK = VCO / 8 */
-
#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index 5f6c34dfd08..fac563e6f62 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -468,31 +468,31 @@
/* General-Purpose Ports (0xFFC01500 - 0xFFC015FF) */
/* GPIO Port C Register Names */
-#define GPIO_C_CNFG 0xFFC01500 /* GPIO Pin Port C Configuration Register */
-#define GPIO_C_D 0xFFC01510 /* GPIO Pin Port C Data Register */
-#define GPIO_C_C 0xFFC01520 /* Clear GPIO Pin Port C Register */
-#define GPIO_C_S 0xFFC01530 /* Set GPIO Pin Port C Register */
-#define GPIO_C_T 0xFFC01540 /* Toggle GPIO Pin Port C Register */
-#define GPIO_C_DIR 0xFFC01550 /* GPIO Pin Port C Direction Register */
-#define GPIO_C_INEN 0xFFC01560 /* GPIO Pin Port C Input Enable Register */
+#define PORTCIO_FER 0xFFC01500 /* GPIO Pin Port C Configuration Register */
+#define PORTCIO 0xFFC01510 /* GPIO Pin Port C Data Register */
+#define PORTCIO_CLEAR 0xFFC01520 /* Clear GPIO Pin Port C Register */
+#define PORTCIO_SET 0xFFC01530 /* Set GPIO Pin Port C Register */
+#define PORTCIO_TOGGLE 0xFFC01540 /* Toggle GPIO Pin Port C Register */
+#define PORTCIO_DIR 0xFFC01550 /* GPIO Pin Port C Direction Register */
+#define PORTCIO_INEN 0xFFC01560 /* GPIO Pin Port C Input Enable Register */
/* GPIO Port D Register Names */
-#define GPIO_D_CNFG 0xFFC01504 /* GPIO Pin Port D Configuration Register */
-#define GPIO_D_D 0xFFC01514 /* GPIO Pin Port D Data Register */
-#define GPIO_D_C 0xFFC01524 /* Clear GPIO Pin Port D Register */
-#define GPIO_D_S 0xFFC01534 /* Set GPIO Pin Port D Register */
-#define GPIO_D_T 0xFFC01544 /* Toggle GPIO Pin Port D Register */
-#define GPIO_D_DIR 0xFFC01554 /* GPIO Pin Port D Direction Register */
-#define GPIO_D_INEN 0xFFC01564 /* GPIO Pin Port D Input Enable Register */
+#define PORTDIO_FER 0xFFC01504 /* GPIO Pin Port D Configuration Register */
+#define PORTDIO 0xFFC01514 /* GPIO Pin Port D Data Register */
+#define PORTDIO_CLEAR 0xFFC01524 /* Clear GPIO Pin Port D Register */
+#define PORTDIO_SET 0xFFC01534 /* Set GPIO Pin Port D Register */
+#define PORTDIO_TOGGLE 0xFFC01544 /* Toggle GPIO Pin Port D Register */
+#define PORTDIO_DIR 0xFFC01554 /* GPIO Pin Port D Direction Register */
+#define PORTDIO_INEN 0xFFC01564 /* GPIO Pin Port D Input Enable Register */
/* GPIO Port E Register Names */
-#define GPIO_E_CNFG 0xFFC01508 /* GPIO Pin Port E Configuration Register */
-#define GPIO_E_D 0xFFC01518 /* GPIO Pin Port E Data Register */
-#define GPIO_E_C 0xFFC01528 /* Clear GPIO Pin Port E Register */
-#define GPIO_E_S 0xFFC01538 /* Set GPIO Pin Port E Register */
-#define GPIO_E_T 0xFFC01548 /* Toggle GPIO Pin Port E Register */
-#define GPIO_E_DIR 0xFFC01558 /* GPIO Pin Port E Direction Register */
-#define GPIO_E_INEN 0xFFC01568 /* GPIO Pin Port E Input Enable Register */
+#define PORTEIO_FER 0xFFC01508 /* GPIO Pin Port E Configuration Register */
+#define PORTEIO 0xFFC01518 /* GPIO Pin Port E Data Register */
+#define PORTEIO_CLEAR 0xFFC01528 /* Clear GPIO Pin Port E Register */
+#define PORTEIO_SET 0xFFC01538 /* Set GPIO Pin Port E Register */
+#define PORTEIO_TOGGLE 0xFFC01548 /* Toggle GPIO Pin Port E Register */
+#define PORTEIO_DIR 0xFFC01558 /* GPIO Pin Port E Direction Register */
+#define PORTEIO_INEN 0xFFC01568 /* GPIO Pin Port E Input Enable Register */
/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
@@ -1422,81 +1422,6 @@
/* System MMR Register Bits and Macros */
/******************************************************************************* */
-/* ********************* PLL AND RESET MASKS ************************ */
-/* PLL_CTL Masks */
-#define PLL_CLKIN 0x0000 /* Pass CLKIN to PLL */
-#define PLL_CLKIN_DIV2 0x0001 /* Pass CLKIN/2 to PLL */
-#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
-#define PLL_OFF 0x0002 /* Shut off PLL clocks */
-
-#define STOPCK 0x0008 /* Core Clock Off */
-#define PDWN 0x0020 /* Put the PLL in a Deep Sleep state */
-#define IN_DELAY 0x0014 /* EBIU Input Delay Select */
-#define OUT_DELAY 0x00C0 /* EBIU Output Delay Select */
-#define BYPASS 0x0100 /* Bypass the PLL */
-#define MSEL 0x7E00 /* Multiplier Select For CCLK/VCO Factors */
-
-/* PLL_CTL Macros */
-#ifdef _MISRA_RULES
-#define SET_MSEL(x) (((x)&0x3Fu) << 0x9) /* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
-#define SET_OUT_DELAY(x) (((x)&0x03u) << 0x6)
-#define SET_IN_DELAY(x) ((((x)&0x02u) << 0x3) | (((x)&0x01u) << 0x2))
-#else
-#define SET_MSEL(x) (((x)&0x3F) << 0x9) /* Set MSEL = 0-63 --> VCO = CLKIN*MSEL */
-#define SET_OUT_DELAY(x) (((x)&0x03) << 0x6)
-#define SET_IN_DELAY(x) ((((x)&0x02) << 0x3) | (((x)&0x01) << 0x2))
-#endif /* _MISRA_RULES */
-
-/* PLL_DIV Masks */
-#define SSEL 0x000F /* System Select */
-#define CSEL 0x0030 /* Core Select */
-#define CSEL_DIV1 0x0000 /* CCLK = VCO / 1 */
-#define CSEL_DIV2 0x0010 /* CCLK = VCO / 2 */
-#define CSEL_DIV4 0x0020 /* CCLK = VCO / 4 */
-#define CSEL_DIV8 0x0030 /* CCLK = VCO / 8 */
-
-#define SCLK_DIV(x) (x) /* SCLK = VCO / x */
-
-/* PLL_DIV Macros */
-#ifdef _MISRA_RULES
-#define SET_SSEL(x) ((x)&0xFu) /* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
-#else
-#define SET_SSEL(x) ((x)&0xF) /* Set SSEL = 0-15 --> SCLK = VCO/SSEL */
-#endif /* _MISRA_RULES */
-
-/* PLL_STAT Masks */
-#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */
-#define FULL_ON 0x0002 /* Processor In Full On Mode */
-#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */
-#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */
-
-/* VR_CTL Masks */
-#define FREQ 0x0003 /* Switching Oscillator Frequency For Regulator */
-#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */
-#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */
-#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */
-#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */
-
-#define GAIN 0x000C /* Voltage Level Gain */
-#define GAIN_5 0x0000 /* GAIN = 5 */
-#define GAIN_10 0x0004 /* GAIN = 10 */
-#define GAIN_20 0x0008 /* GAIN = 20 */
-#define GAIN_50 0x000C /* GAIN = 50 */
-
-#define VLEV 0x00F0 /* Internal Voltage Level - Only Program Values Within Specifications */
-#define VLEV_100 0x0090 /* VLEV = 1.00 V (See Datasheet for Regulator Tolerance) */
-#define VLEV_105 0x00A0 /* VLEV = 1.05 V (See Datasheet for Regulator Tolerance) */
-#define VLEV_110 0x00B0 /* VLEV = 1.10 V (See Datasheet for Regulator Tolerance) */
-#define VLEV_115 0x00C0 /* VLEV = 1.15 V (See Datasheet for Regulator Tolerance) */
-#define VLEV_120 0x00D0 /* VLEV = 1.20 V (See Datasheet for Regulator Tolerance) */
-#define VLEV_125 0x00E0 /* VLEV = 1.25 V (See Datasheet for Regulator Tolerance) */
-#define VLEV_130 0x00F0 /* VLEV = 1.30 V (See Datasheet for Regulator Tolerance) */
-
-#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */
-#define CANWE 0x0200 /* Enable CAN Wakeup From Hibernate */
-#define MXVRWE 0x0400 /* Enable MXVR Wakeup From Hibernate */
-#define SCKELOW 0x8000 /* Do Not Drive SCKE High During Reset After Hibernate */
-
/* SWRST Mask */
#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
@@ -1609,91 +1534,6 @@
#endif /* _MISRA_RULES */
-/* ********* WATCHDOG TIMER MASKS ******************** */
-/* Watchdog Timer WDOG_CTL Register Masks */
-#ifdef _MISRA_RULES
-#define WDEV(x) (((x)<<1) & 0x0006u) /* event generated on roll over */
-#else
-#define WDEV(x) (((x)<<1) & 0x0006) /* event generated on roll over */
-#endif /* _MISRA_RULES */
-#define WDEV_RESET 0x0000 /* generate reset event on roll over */
-#define WDEV_NMI 0x0002 /* generate NMI event on roll over */
-#define WDEV_GPI 0x0004 /* generate GP IRQ on roll over */
-#define WDEV_NONE 0x0006 /* no event on roll over */
-#define WDEN 0x0FF0 /* enable watchdog */
-#define WDDIS 0x0AD0 /* disable watchdog */
-#define WDRO 0x8000 /* watchdog rolled over latch */
-
-/* deprecated WDOG_CTL Register Masks for legacy code */
-#define ICTL WDEV
-#define ENABLE_RESET WDEV_RESET
-#define WDOG_RESET WDEV_RESET
-#define ENABLE_NMI WDEV_NMI
-#define WDOG_NMI WDEV_NMI
-#define ENABLE_GPI WDEV_GPI
-#define WDOG_GPI WDEV_GPI
-#define DISABLE_EVT WDEV_NONE
-#define WDOG_NONE WDEV_NONE
-
-#define TMR_EN WDEN
-#define WDOG_DISABLE WDDIS
-#define TRO WDRO
-
-#define ICTL_P0 0x01
-#define ICTL_P1 0x02
-#define TRO_P 0x0F
-
-
-/* *************** REAL TIME CLOCK MASKS **************************/
-/* RTC_STAT and RTC_ALARM register */
-#define RTSEC 0x0000003F /* Real-Time Clock Seconds */
-#define RTMIN 0x00000FC0 /* Real-Time Clock Minutes */
-#define RTHR 0x0001F000 /* Real-Time Clock Hours */
-#define RTDAY 0xFFFE0000 /* Real-Time Clock Days */
-
-/* RTC_ICTL register */
-#define SWIE 0x0001 /* Stopwatch Interrupt Enable */
-#define AIE 0x0002 /* Alarm Interrupt Enable */
-#define SIE 0x0004 /* Seconds (1 Hz) Interrupt Enable */
-#define MIE 0x0008 /* Minutes Interrupt Enable */
-#define HIE 0x0010 /* Hours Interrupt Enable */
-#define DIE 0x0020 /* 24 Hours (Days) Interrupt Enable */
-#define DAIE 0x0040 /* Day Alarm (Day, Hour, Minute, Second) Interrupt Enable */
-#define WCIE 0x8000 /* Write Complete Interrupt Enable */
-
-/* RTC_ISTAT register */
-#define SWEF 0x0001 /* Stopwatch Event Flag */
-#define AEF 0x0002 /* Alarm Event Flag */
-#define SEF 0x0004 /* Seconds (1 Hz) Event Flag */
-#define MEF 0x0008 /* Minutes Event Flag */
-#define HEF 0x0010 /* Hours Event Flag */
-#define DEF 0x0020 /* 24 Hours (Days) Event Flag */
-#define DAEF 0x0040 /* Day Alarm (Day, Hour, Minute, Second) Event Flag */
-#define WPS 0x4000 /* Write Pending Status (RO) */
-#define WCOM 0x8000 /* Write Complete */
-
-/* RTC_FAST Mask (RTC_PREN Mask) */
-#define ENABLE_PRESCALE 0x00000001 /* Enable prescaler so RTC runs at 1 Hz */
-#define PREN 0x00000001
- /* ** Must be set after power-up for proper operation of RTC */
-
-/* Deprecated RTC_STAT and RTC_ALARM Masks */
-#define RTC_SEC RTSEC /* Real-Time Clock Seconds */
-#define RTC_MIN RTMIN /* Real-Time Clock Minutes */
-#define RTC_HR RTHR /* Real-Time Clock Hours */
-#define RTC_DAY RTDAY /* Real-Time Clock Days */
-
-/* Deprecated RTC_ICTL/RTC_ISTAT Masks */
-#define STOPWATCH SWIE /* Stopwatch Interrupt Enable */
-#define ALARM AIE /* Alarm Interrupt Enable */
-#define SECOND SIE /* Seconds (1 Hz) Interrupt Enable */
-#define MINUTE MIE /* Minutes Interrupt Enable */
-#define HOUR HIE /* Hours Interrupt Enable */
-#define DAY DIE /* 24 Hours (Days) Interrupt Enable */
-#define DAY_ALARM DAIE /* Day Alarm (Day, Hour, Minute, Second) Interrupt Enable */
-#define WRITE_COMPLETE WCIE /* Write Complete Interrupt Enable */
-
-
/* ***************************** UART CONTROLLER MASKS ********************** */
/* UARTx_LCR Register */
#ifdef _MISRA_RULES
@@ -1917,52 +1757,6 @@
/* ********** DMA CONTROLLER MASKS ***********************/
-/* DMAx_CONFIG, MDMA_yy_CONFIG Masks */
-#define DMAEN 0x0001 /* Channel Enable */
-#define WNR 0x0002 /* Channel Direction (W/R*) */
-#define WDSIZE_8 0x0000 /* Word Size 8 bits */
-#define WDSIZE_16 0x0004 /* Word Size 16 bits */
-#define WDSIZE_32 0x0008 /* Word Size 32 bits */
-#define DMA2D 0x0010 /* 2D/1D* Mode */
-#define RESTART 0x0020 /* Restart */
-#define DI_SEL 0x0040 /* Data Interrupt Select */
-#define DI_EN 0x0080 /* Data Interrupt Enable */
-#define NDSIZE 0x0900 /* Next Descriptor Size */
-#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
-#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
-#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
-#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
-#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
-#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
-#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
-#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
-#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
-
-#define DMAFLOW 0x7000 /* Flow Control */
-#define DMAFLOW_STOP 0x0000 /* Stop Mode */
-#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
-#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
-#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
-
-#define DMAEN_P 0x0 /* Channel Enable */
-#define WNR_P 0x1 /* Channel Direction (W/R*) */
-#define DMA2D_P 0x4 /* 2D/1D* Mode */
-#define RESTART_P 0x5 /* Restart */
-#define DI_SEL_P 0x6 /* Data Interrupt Select */
-#define DI_EN_P 0x7 /* Data Interrupt Enable */
-
-/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS Masks */
-#define DMA_DONE 0x0001 /* DMA Done Indicator */
-#define DMA_ERR 0x0002 /* DMA Error Indicator */
-#define DFETCH 0x0004 /* Descriptor Fetch Indicator */
-#define DMA_RUN 0x0008 /* DMA Running Indicator */
-
-#define DMA_DONE_P 0x0 /* DMA Done Indicator */
-#define DMA_ERR_P 0x1 /* DMA Error Indicator */
-#define DFETCH_P 0x2 /* Descriptor Fetch Indicator */
-#define DMA_RUN_P 0x3 /* DMA Running Indicator */
/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
@@ -2625,1019 +2419,6 @@
#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-/********************************* MXVR MASKS ****************************************/
-
-/* MXVR_CONFIG Masks */
-
-#define MXVREN 0x00000001lu
-#define MMSM 0x00000002lu
-#define ACTIVE 0x00000004lu
-#define SDELAY 0x00000008lu
-#define NCMRXEN 0x00000010lu
-#define RWRRXEN 0x00000020lu
-#define MTXEN 0x00000040lu
-#define MTXON 0x00000080lu /*legacy*/
-#define MTXONB 0x00000080lu
-#define EPARITY 0x00000100lu
-#define MSB 0x00001E00lu
-#define APRXEN 0x00002000lu
-#define WAKEUP 0x00004000lu
-#define LMECH 0x00008000lu
-
-#ifdef _MISRA_RULES
-#define SET_MSB(x) (((x)&0xFu) << 0x9)
-#else
-#define SET_MSB(x) (((x)&0xF) << 0x9)
-#endif /* _MISRA_RULES */
-
-
-/* MXVR_PLL_CTL_0 Masks */
-
-#define MXTALCEN 0x00000001lu
-#define MXTALFEN 0x00000002lu
-#define MPLLMS 0x00000008lu
-#define MXTALMUL 0x00000030lu
-#define MPLLEN 0x00000040lu
-#define MPLLEN0 0x00000040lu /* legacy */
-#define MPLLEN1 0x00000080lu /* legacy */
-#define MMCLKEN 0x00000100lu
-#define MMCLKMUL 0x00001E00lu
-#define MPLLRSTB 0x00002000lu
-#define MPLLRSTB0 0x00002000lu /* legacy */
-#define MPLLRSTB1 0x00004000lu /* legacy */
-#define MBCLKEN 0x00010000lu
-#define MBCLKDIV 0x001E0000lu
-#define MPLLCDR 0x00200000lu
-#define MPLLCDR0 0x00200000lu /* legacy */
-#define MPLLCDR1 0x00400000lu /* legacy */
-#define INVRX 0x00800000lu
-#define MFSEN 0x01000000lu
-#define MFSDIV 0x1E000000lu
-#define MFSSEL 0x60000000lu
-#define MFSSYNC 0x80000000lu
-
-#define MXTALMUL_256FS 0x00000000lu /* legacy */
-#define MXTALMUL_384FS 0x00000010lu /* legacy */
-#define MXTALMUL_512FS 0x00000020lu /* legacy */
-#define MXTALMUL_1024FS 0x00000030lu
-
-#define MMCLKMUL_1024FS 0x00000000lu
-#define MMCLKMUL_512FS 0x00000200lu
-#define MMCLKMUL_256FS 0x00000400lu
-#define MMCLKMUL_128FS 0x00000600lu
-#define MMCLKMUL_64FS 0x00000800lu
-#define MMCLKMUL_32FS 0x00000A00lu
-#define MMCLKMUL_16FS 0x00000C00lu
-#define MMCLKMUL_8FS 0x00000E00lu
-#define MMCLKMUL_4FS 0x00001000lu
-#define MMCLKMUL_2FS 0x00001200lu
-#define MMCLKMUL_1FS 0x00001400lu
-#define MMCLKMUL_1536FS 0x00001A00lu
-#define MMCLKMUL_768FS 0x00001C00lu
-#define MMCLKMUL_384FS 0x00001E00lu
-
-#define MBCLKDIV_DIV2 0x00020000lu
-#define MBCLKDIV_DIV4 0x00040000lu
-#define MBCLKDIV_DIV8 0x00060000lu
-#define MBCLKDIV_DIV16 0x00080000lu
-#define MBCLKDIV_DIV32 0x000A0000lu
-#define MBCLKDIV_DIV64 0x000C0000lu
-#define MBCLKDIV_DIV128 0x000E0000lu
-#define MBCLKDIV_DIV256 0x00100000lu
-#define MBCLKDIV_DIV512 0x00120000lu
-#define MBCLKDIV_DIV1024 0x00140000lu
-
-#define MFSDIV_DIV2 0x02000000lu
-#define MFSDIV_DIV4 0x04000000lu
-#define MFSDIV_DIV8 0x06000000lu
-#define MFSDIV_DIV16 0x08000000lu
-#define MFSDIV_DIV32 0x0A000000lu
-#define MFSDIV_DIV64 0x0C000000lu
-#define MFSDIV_DIV128 0x0E000000lu
-#define MFSDIV_DIV256 0x10000000lu
-#define MFSDIV_DIV512 0x12000000lu
-#define MFSDIV_DIV1024 0x14000000lu
-
-#define MFSSEL_CLOCK 0x00000000lu
-#define MFSSEL_PULSE_HI 0x20000000lu
-#define MFSSEL_PULSE_LO 0x40000000lu
-
-
-/* MXVR_PLL_CTL_1 Masks */
-
-#define MSTO 0x00000001lu
-#define MSTO0 0x00000001lu /* legacy */
-#define MHOGGD 0x00000004lu
-#define MHOGGD0 0x00000004lu /* legacy */
-#define MHOGGD1 0x00000008lu /* legacy */
-#define MSHAPEREN 0x00000010lu
-#define MSHAPEREN0 0x00000010lu /* legacy */
-#define MSHAPEREN1 0x00000020lu /* legacy */
-#define MPLLCNTEN 0x00008000lu
-#define MPLLCNT 0xFFFF0000lu
-
-#ifdef _MISRA_RULES
-#define SET_MPLLCNT(x) (((x)&0xFFFFu) << 0x10)
-#else
-#define SET_MPLLCNT(x) (((x)&0xFFFF) << 0x10)
-#endif /* _MISRA_RULES */
-
-
-/* MXVR_PLL_CTL_2 Masks */
-
-#define MSHAPERSEL 0x00000007lu
-#define MCPSEL 0x000000E0lu
-
-/* MXVR_INT_STAT_0 Masks */
-
-#define NI2A 0x00000001lu
-#define NA2I 0x00000002lu
-#define SBU2L 0x00000004lu
-#define SBL2U 0x00000008lu
-#define PRU 0x00000010lu
-#define MPRU 0x00000020lu
-#define DRU 0x00000040lu
-#define MDRU 0x00000080lu
-#define SBU 0x00000100lu
-#define ATU 0x00000200lu
-#define FCZ0 0x00000400lu
-#define FCZ1 0x00000800lu
-#define PERR 0x00001000lu
-#define MH2L 0x00002000lu
-#define ML2H 0x00004000lu
-#define WUP 0x00008000lu
-#define FU2L 0x00010000lu
-#define FL2U 0x00020000lu
-#define BU2L 0x00040000lu
-#define BL2U 0x00080000lu
-#define PCZ 0x00400000lu
-#define FERR 0x00800000lu
-#define CMR 0x01000000lu
-#define CMROF 0x02000000lu
-#define CMTS 0x04000000lu
-#define CMTC 0x08000000lu
-#define RWRC 0x10000000lu
-#define BCZ 0x20000000lu
-#define BMERR 0x40000000lu
-#define DERR 0x80000000lu
-
-
-/* MXVR_INT_EN_0 Masks */
-
-#define NI2AEN NI2A
-#define NA2IEN NA2I
-#define SBU2LEN SBU2L
-#define SBL2UEN SBL2U
-#define PRUEN PRU
-#define MPRUEN MPRU
-#define DRUEN DRU
-#define MDRUEN MDRU
-#define SBUEN SBU
-#define ATUEN ATU
-#define FCZ0EN FCZ0
-#define FCZ1EN FCZ1
-#define PERREN PERR
-#define MH2LEN MH2L
-#define ML2HEN ML2H
-#define WUPEN WUP
-#define FU2LEN FU2L
-#define FL2UEN FL2U
-#define BU2LEN BU2L
-#define BL2UEN BL2U
-#define PCZEN PCZ
-#define FERREN FERR
-#define CMREN CMR
-#define CMROFEN CMROF
-#define CMTSEN CMTS
-#define CMTCEN CMTC
-#define RWRCEN RWRC
-#define BCZEN BCZ
-#define BMERREN BMERR
-#define DERREN DERR
-
-
-/* MXVR_INT_STAT_1 Masks */
-
-#define APR 0x00000004lu
-#define APROF 0x00000008lu
-#define APTS 0x00000040lu
-#define APTC 0x00000080lu
-#define APRCE 0x00000400lu
-#define APRPE 0x00000800lu
-
-#define HDONE0 0x00000001lu
-#define DONE0 0x00000002lu
-#define HDONE1 0x00000010lu
-#define DONE1 0x00000020lu
-#define HDONE2 0x00000100lu
-#define DONE2 0x00000200lu
-#define HDONE3 0x00001000lu
-#define DONE3 0x00002000lu
-#define HDONE4 0x00010000lu
-#define DONE4 0x00020000lu
-#define HDONE5 0x00100000lu
-#define DONE5 0x00200000lu
-#define HDONE6 0x01000000lu
-#define DONE6 0x02000000lu
-#define HDONE7 0x10000000lu
-#define DONE7 0x20000000lu
-
-#define DONEX(x) (0x00000002 << (4 * (x)))
-#define HDONEX(x) (0x00000001 << (4 * (x)))
-
-
-/* MXVR_INT_EN_1 Masks */
-
-#define APREN APR
-#define APROFEN APROF
-#define APTSEN APTS
-#define APTCEN APTC
-#define APRCEEN APRCE
-#define APRPEEN APRPE
-
-#define HDONEEN0 HDONE0
-#define DONEEN0 DONE0
-#define HDONEEN1 HDONE1
-#define DONEEN1 DONE1
-#define HDONEEN2 HDONE2
-#define DONEEN2 DONE2
-#define HDONEEN3 HDONE3
-#define DONEEN3 DONE3
-#define HDONEEN4 HDONE4
-#define DONEEN4 DONE4
-#define HDONEEN5 HDONE5
-#define DONEEN5 DONE5
-#define HDONEEN6 HDONE6
-#define DONEEN6 DONE6
-#define HDONEEN7 HDONE7
-#define DONEEN7 DONE7
-
-#define DONEENX(x) (0x00000002 << (4 * (x)))
-#define HDONEENX(x) (0x00000001 << (4 * (x)))
-
-
-/* MXVR_STATE_0 Masks */
-
-#define NACT 0x00000001lu
-#define SBLOCK 0x00000002lu
-#define PFDLOCK 0x00000004lu
-#define PFDLOCK0 0x00000004lu /* legacy */
-#define PDD 0x00000008lu
-#define PDD0 0x00000008lu /* legacy */
-#define PVCO 0x00000010lu
-#define PVCO0 0x00000010lu /* legacy */
-#define PFDLOCK1 0x00000020lu /* legacy */
-#define PDD1 0x00000040lu /* legacy */
-#define PVCO1 0x00000080lu /* legacy */
-#define APBSY 0x00000100lu
-#define APARB 0x00000200lu
-#define APTX 0x00000400lu
-#define APRX 0x00000800lu
-#define CMBSY 0x00001000lu
-#define CMARB 0x00002000lu
-#define CMTX 0x00004000lu
-#define CMRX 0x00008000lu
-#define MRXONB 0x00010000lu
-#define RGSIP 0x00020000lu
-#define DALIP 0x00040000lu
-#define ALIP 0x00080000lu
-#define RRDIP 0x00100000lu
-#define RWRIP 0x00200000lu
-#define FLOCK 0x00400000lu
-#define BLOCK 0x00800000lu
-#define RSB 0x0F000000lu
-#define DERRNUM 0xF0000000lu
-
-
-/* MXVR_STATE_1 Masks */
-
-#define STXNUMB 0x0000000Flu
-#define SRXNUMB 0x000000F0lu
-#define APCONT 0x00000100lu
-#define DMAACTIVEX 0x00FF0000lu
-#define DMAACTIVE0 0x00010000lu
-#define DMAACTIVE1 0x00020000lu
-#define DMAACTIVE2 0x00040000lu
-#define DMAACTIVE3 0x00080000lu
-#define DMAACTIVE4 0x00100000lu
-#define DMAACTIVE5 0x00200000lu
-#define DMAACTIVE6 0x00400000lu
-#define DMAACTIVE7 0x00800000lu
-#define DMAPMENX 0xFF000000lu
-#define DMAPMEN0 0x01000000lu
-#define DMAPMEN1 0x02000000lu
-#define DMAPMEN2 0x04000000lu
-#define DMAPMEN3 0x08000000lu
-#define DMAPMEN4 0x10000000lu
-#define DMAPMEN5 0x20000000lu
-#define DMAPMEN6 0x40000000lu
-#define DMAPMEN7 0x80000000lu
-
-
-/* MXVR_POSITION Masks */
-
-#define PVALID 0x8000
-#define POSITION 0x003F
-
-
-/* MXVR_MAX_POSITION Masks */
-
-#define MPVALID 0x8000
-#define MPOSITION 0x003F
-
-
-/* MXVR_DELAY Masks */
-
-#define DVALID 0x8000
-#define DELAY 0x003F
-
-
-/* MXVR_MAX_DELAY Masks */
-
-#define MDVALID 0x8000
-#define MDELAY 0x003F
-
-
-/* MXVR_LADDR Masks */
-
-#define LVALID 0x80000000lu
-#define LADDR 0x0000FFFFlu
-
-
-/* MXVR_GADDR Masks */
-
-#define GVALID 0x8000
-#define GADDRL 0x00FF
-
-
-/* MXVR_AADDR Masks */
-
-#define AVALID 0x80000000lu
-#define AADDR 0x0000FFFFlu
-
-
-/* MXVR_ALLOC_0 Masks */
-
-#define CIU0 0x00000080lu
-#define CIU1 0x00008000lu
-#define CIU2 0x00800000lu
-#define CIU3 0x80000000lu
-
-#define CL0 0x0000007Flu
-#define CL1 0x00007F00lu
-#define CL2 0x007F0000lu
-#define CL3 0x7F000000lu
-
-
-/* MXVR_ALLOC_1 Masks */
-
-#define CIU4 0x00000080lu
-#define CIU5 0x00008000lu
-#define CIU6 0x00800000lu
-#define CIU7 0x80000000lu
-
-#define CL4 0x0000007Flu
-#define CL5 0x00007F00lu
-#define CL6 0x007F0000lu
-#define CL7 0x7F000000lu
-
-
-/* MXVR_ALLOC_2 Masks */
-
-#define CIU8 0x00000080lu
-#define CIU9 0x00008000lu
-#define CIU10 0x00800000lu
-#define CIU11 0x80000000lu
-
-#define CL8 0x0000007Flu
-#define CL9 0x00007F00lu
-#define CL10 0x007F0000lu
-#define CL11 0x7F000000lu
-
-
-/* MXVR_ALLOC_3 Masks */
-
-#define CIU12 0x00000080lu
-#define CIU13 0x00008000lu
-#define CIU14 0x00800000lu
-#define CIU15 0x80000000lu
-
-#define CL12 0x0000007Flu
-#define CL13 0x00007F00lu
-#define CL14 0x007F0000lu
-#define CL15 0x7F000000lu
-
-
-/* MXVR_ALLOC_4 Masks */
-
-#define CIU16 0x00000080lu
-#define CIU17 0x00008000lu
-#define CIU18 0x00800000lu
-#define CIU19 0x80000000lu
-
-#define CL16 0x0000007Flu
-#define CL17 0x00007F00lu
-#define CL18 0x007F0000lu
-#define CL19 0x7F000000lu
-
-
-/* MXVR_ALLOC_5 Masks */
-
-#define CIU20 0x00000080lu
-#define CIU21 0x00008000lu
-#define CIU22 0x00800000lu
-#define CIU23 0x80000000lu
-
-#define CL20 0x0000007Flu
-#define CL21 0x00007F00lu
-#define CL22 0x007F0000lu
-#define CL23 0x7F000000lu
-
-
-/* MXVR_ALLOC_6 Masks */
-
-#define CIU24 0x00000080lu
-#define CIU25 0x00008000lu
-#define CIU26 0x00800000lu
-#define CIU27 0x80000000lu
-
-#define CL24 0x0000007Flu
-#define CL25 0x00007F00lu
-#define CL26 0x007F0000lu
-#define CL27 0x7F000000lu
-
-
-/* MXVR_ALLOC_7 Masks */
-
-#define CIU28 0x00000080lu
-#define CIU29 0x00008000lu
-#define CIU30 0x00800000lu
-#define CIU31 0x80000000lu
-
-#define CL28 0x0000007Flu
-#define CL29 0x00007F00lu
-#define CL30 0x007F0000lu
-#define CL31 0x7F000000lu
-
-
-/* MXVR_ALLOC_8 Masks */
-
-#define CIU32 0x00000080lu
-#define CIU33 0x00008000lu
-#define CIU34 0x00800000lu
-#define CIU35 0x80000000lu
-
-#define CL32 0x0000007Flu
-#define CL33 0x00007F00lu
-#define CL34 0x007F0000lu
-#define CL35 0x7F000000lu
-
-
-/* MXVR_ALLOC_9 Masks */
-
-#define CIU36 0x00000080lu
-#define CIU37 0x00008000lu
-#define CIU38 0x00800000lu
-#define CIU39 0x80000000lu
-
-#define CL36 0x0000007Flu
-#define CL37 0x00007F00lu
-#define CL38 0x007F0000lu
-#define CL39 0x7F000000lu
-
-
-/* MXVR_ALLOC_10 Masks */
-
-#define CIU40 0x00000080lu
-#define CIU41 0x00008000lu
-#define CIU42 0x00800000lu
-#define CIU43 0x80000000lu
-
-#define CL40 0x0000007Flu
-#define CL41 0x00007F00lu
-#define CL42 0x007F0000lu
-#define CL43 0x7F000000lu
-
-
-/* MXVR_ALLOC_11 Masks */
-
-#define CIU44 0x00000080lu
-#define CIU45 0x00008000lu
-#define CIU46 0x00800000lu
-#define CIU47 0x80000000lu
-
-#define CL44 0x0000007Flu
-#define CL45 0x00007F00lu
-#define CL46 0x007F0000lu
-#define CL47 0x7F000000lu
-
-
-/* MXVR_ALLOC_12 Masks */
-
-#define CIU48 0x00000080lu
-#define CIU49 0x00008000lu
-#define CIU50 0x00800000lu
-#define CIU51 0x80000000lu
-
-#define CL48 0x0000007Flu
-#define CL49 0x00007F00lu
-#define CL50 0x007F0000lu
-#define CL51 0x7F000000lu
-
-
-/* MXVR_ALLOC_13 Masks */
-
-#define CIU52 0x00000080lu
-#define CIU53 0x00008000lu
-#define CIU54 0x00800000lu
-#define CIU55 0x80000000lu
-
-#define CL52 0x0000007Flu
-#define CL53 0x00007F00lu
-#define CL54 0x007F0000lu
-#define CL55 0x7F000000lu
-
-
-/* MXVR_ALLOC_14 Masks */
-
-#define CIU56 0x00000080lu
-#define CIU57 0x00008000lu
-#define CIU58 0x00800000lu
-#define CIU59 0x80000000lu
-
-#define CL56 0x0000007Flu
-#define CL57 0x00007F00lu
-#define CL58 0x007F0000lu
-#define CL59 0x7F000000lu
-
-
-/* MXVR_SYNC_LCHAN_0 Masks */
-
-#define LCHANPC0 0x0000000Flu
-#define LCHANPC1 0x000000F0lu
-#define LCHANPC2 0x00000F00lu
-#define LCHANPC3 0x0000F000lu
-#define LCHANPC4 0x000F0000lu
-#define LCHANPC5 0x00F00000lu
-#define LCHANPC6 0x0F000000lu
-#define LCHANPC7 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_1 Masks */
-
-#define LCHANPC8 0x0000000Flu
-#define LCHANPC9 0x000000F0lu
-#define LCHANPC10 0x00000F00lu
-#define LCHANPC11 0x0000F000lu
-#define LCHANPC12 0x000F0000lu
-#define LCHANPC13 0x00F00000lu
-#define LCHANPC14 0x0F000000lu
-#define LCHANPC15 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_2 Masks */
-
-#define LCHANPC16 0x0000000Flu
-#define LCHANPC17 0x000000F0lu
-#define LCHANPC18 0x00000F00lu
-#define LCHANPC19 0x0000F000lu
-#define LCHANPC20 0x000F0000lu
-#define LCHANPC21 0x00F00000lu
-#define LCHANPC22 0x0F000000lu
-#define LCHANPC23 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_3 Masks */
-
-#define LCHANPC24 0x0000000Flu
-#define LCHANPC25 0x000000F0lu
-#define LCHANPC26 0x00000F00lu
-#define LCHANPC27 0x0000F000lu
-#define LCHANPC28 0x000F0000lu
-#define LCHANPC29 0x00F00000lu
-#define LCHANPC30 0x0F000000lu
-#define LCHANPC31 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_4 Masks */
-
-#define LCHANPC32 0x0000000Flu
-#define LCHANPC33 0x000000F0lu
-#define LCHANPC34 0x00000F00lu
-#define LCHANPC35 0x0000F000lu
-#define LCHANPC36 0x000F0000lu
-#define LCHANPC37 0x00F00000lu
-#define LCHANPC38 0x0F000000lu
-#define LCHANPC39 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_5 Masks */
-
-#define LCHANPC40 0x0000000Flu
-#define LCHANPC41 0x000000F0lu
-#define LCHANPC42 0x00000F00lu
-#define LCHANPC43 0x0000F000lu
-#define LCHANPC44 0x000F0000lu
-#define LCHANPC45 0x00F00000lu
-#define LCHANPC46 0x0F000000lu
-#define LCHANPC47 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_6 Masks */
-
-#define LCHANPC48 0x0000000Flu
-#define LCHANPC49 0x000000F0lu
-#define LCHANPC50 0x00000F00lu
-#define LCHANPC51 0x0000F000lu
-#define LCHANPC52 0x000F0000lu
-#define LCHANPC53 0x00F00000lu
-#define LCHANPC54 0x0F000000lu
-#define LCHANPC55 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_7 Masks */
-
-#define LCHANPC56 0x0000000Flu
-#define LCHANPC57 0x000000F0lu
-#define LCHANPC58 0x00000F00lu
-#define LCHANPC59 0x0000F000lu
-
-
-/* MXVR_DMAx_CONFIG Masks */
-
-#define MDMAEN 0x00000001lu
-#define DD 0x00000002lu
-#define LCHAN 0x000003C0lu
-#define BITSWAPEN 0x00000400lu
-#define BYSWAPEN 0x00000800lu
-#define MFLOW 0x00007000lu
-#define FIXEDPM 0x00080000lu
-#define STARTPAT 0x00300000lu
-#define STOPPAT 0x00C00000lu
-#define COUNTPOS 0x1C000000lu
-
-#define DD_TX 0x00000000lu
-#define DD_RX 0x00000002lu
-
-#define LCHAN_0 0x00000000lu
-#define LCHAN_1 0x00000040lu
-#define LCHAN_2 0x00000080lu
-#define LCHAN_3 0x000000C0lu
-#define LCHAN_4 0x00000100lu
-#define LCHAN_5 0x00000140lu
-#define LCHAN_6 0x00000180lu
-#define LCHAN_7 0x000001C0lu
-
-#define MFLOW_STOP 0x00000000lu
-#define MFLOW_AUTO 0x00001000lu
-#define MFLOW_PVC 0x00002000lu
-#define MFLOW_PSS 0x00003000lu
-#define MFLOW_PFC 0x00004000lu
-
-#define STARTPAT_0 0x00000000lu
-#define STARTPAT_1 0x00100000lu
-
-#define STOPPAT_0 0x00000000lu
-#define STOPPAT_1 0x00400000lu
-
-#define COUNTPOS_0 0x00000000lu
-#define COUNTPOS_1 0x04000000lu
-#define COUNTPOS_2 0x08000000lu
-#define COUNTPOS_3 0x0C000000lu
-#define COUNTPOS_4 0x10000000lu
-#define COUNTPOS_5 0x14000000lu
-#define COUNTPOS_6 0x18000000lu
-#define COUNTPOS_7 0x1C000000lu
-
-
-/* MXVR_AP_CTL Masks */
-
-#define STARTAP 0x00000001lu
-#define CANCELAP 0x00000002lu
-#define RESETAP 0x00000004lu
-#define APRBE0 0x00004000lu
-#define APRBE1 0x00008000lu
-#define APRBEX 0x0000C000lu
-
-
-/* MXVR_CM_CTL Masks */
-
-#define STARTCM 0x00000001lu
-#define CANCELCM 0x00000002lu
-#define CMRBEX 0xFFFF0000lu
-#define CMRBE0 0x00010000lu
-#define CMRBE1 0x00020000lu
-#define CMRBE2 0x00040000lu
-#define CMRBE3 0x00080000lu
-#define CMRBE4 0x00100000lu
-#define CMRBE5 0x00200000lu
-#define CMRBE6 0x00400000lu
-#define CMRBE7 0x00800000lu
-#define CMRBE8 0x01000000lu
-#define CMRBE9 0x02000000lu
-#define CMRBE10 0x04000000lu
-#define CMRBE11 0x08000000lu
-#define CMRBE12 0x10000000lu
-#define CMRBE13 0x20000000lu
-#define CMRBE14 0x40000000lu
-#define CMRBE15 0x80000000lu
-
-
-/* MXVR_PAT_DATA_x Masks */
-
-#define MATCH_DATA_0 0x000000FFlu
-#define MATCH_DATA_1 0x0000FF00lu
-#define MATCH_DATA_2 0x00FF0000lu
-#define MATCH_DATA_3 0xFF000000lu
-
-
-
-/* MXVR_PAT_EN_x Masks */
-
-#define MATCH_EN_0_0 0x00000001lu
-#define MATCH_EN_0_1 0x00000002lu
-#define MATCH_EN_0_2 0x00000004lu
-#define MATCH_EN_0_3 0x00000008lu
-#define MATCH_EN_0_4 0x00000010lu
-#define MATCH_EN_0_5 0x00000020lu
-#define MATCH_EN_0_6 0x00000040lu
-#define MATCH_EN_0_7 0x00000080lu
-
-#define MATCH_EN_1_0 0x00000100lu
-#define MATCH_EN_1_1 0x00000200lu
-#define MATCH_EN_1_2 0x00000400lu
-#define MATCH_EN_1_3 0x00000800lu
-#define MATCH_EN_1_4 0x00001000lu
-#define MATCH_EN_1_5 0x00002000lu
-#define MATCH_EN_1_6 0x00004000lu
-#define MATCH_EN_1_7 0x00008000lu
-
-#define MATCH_EN_2_0 0x00010000lu
-#define MATCH_EN_2_1 0x00020000lu
-#define MATCH_EN_2_2 0x00040000lu
-#define MATCH_EN_2_3 0x00080000lu
-#define MATCH_EN_2_4 0x00100000lu
-#define MATCH_EN_2_5 0x00200000lu
-#define MATCH_EN_2_6 0x00400000lu
-#define MATCH_EN_2_7 0x00800000lu
-
-#define MATCH_EN_3_0 0x01000000lu
-#define MATCH_EN_3_1 0x02000000lu
-#define MATCH_EN_3_2 0x04000000lu
-#define MATCH_EN_3_3 0x08000000lu
-#define MATCH_EN_3_4 0x10000000lu
-#define MATCH_EN_3_5 0x20000000lu
-#define MATCH_EN_3_6 0x40000000lu
-#define MATCH_EN_3_7 0x80000000lu
-
-
-/* MXVR_ROUTING_0 Masks */
-
-#define MUTE_CH0 0x00000080lu
-#define MUTE_CH1 0x00008000lu
-#define MUTE_CH2 0x00800000lu
-#define MUTE_CH3 0x80000000lu
-
-#define TX_CH0 0x0000007Flu
-#define TX_CH1 0x00007F00lu
-#define TX_CH2 0x007F0000lu
-#define TX_CH3 0x7F000000lu
-
-
-/* MXVR_ROUTING_1 Masks */
-
-#define MUTE_CH4 0x00000080lu
-#define MUTE_CH5 0x00008000lu
-#define MUTE_CH6 0x00800000lu
-#define MUTE_CH7 0x80000000lu
-
-#define TX_CH4 0x0000007Flu
-#define TX_CH5 0x00007F00lu
-#define TX_CH6 0x007F0000lu
-#define TX_CH7 0x7F000000lu
-
-
-/* MXVR_ROUTING_2 Masks */
-
-#define MUTE_CH8 0x00000080lu
-#define MUTE_CH9 0x00008000lu
-#define MUTE_CH10 0x00800000lu
-#define MUTE_CH11 0x80000000lu
-
-#define TX_CH8 0x0000007Flu
-#define TX_CH9 0x00007F00lu
-#define TX_CH10 0x007F0000lu
-#define TX_CH11 0x7F000000lu
-
-/* MXVR_ROUTING_3 Masks */
-
-#define MUTE_CH12 0x00000080lu
-#define MUTE_CH13 0x00008000lu
-#define MUTE_CH14 0x00800000lu
-#define MUTE_CH15 0x80000000lu
-
-#define TX_CH12 0x0000007Flu
-#define TX_CH13 0x00007F00lu
-#define TX_CH14 0x007F0000lu
-#define TX_CH15 0x7F000000lu
-
-
-/* MXVR_ROUTING_4 Masks */
-
-#define MUTE_CH16 0x00000080lu
-#define MUTE_CH17 0x00008000lu
-#define MUTE_CH18 0x00800000lu
-#define MUTE_CH19 0x80000000lu
-
-#define TX_CH16 0x0000007Flu
-#define TX_CH17 0x00007F00lu
-#define TX_CH18 0x007F0000lu
-#define TX_CH19 0x7F000000lu
-
-
-/* MXVR_ROUTING_5 Masks */
-
-#define MUTE_CH20 0x00000080lu
-#define MUTE_CH21 0x00008000lu
-#define MUTE_CH22 0x00800000lu
-#define MUTE_CH23 0x80000000lu
-
-#define TX_CH20 0x0000007Flu
-#define TX_CH21 0x00007F00lu
-#define TX_CH22 0x007F0000lu
-#define TX_CH23 0x7F000000lu
-
-
-/* MXVR_ROUTING_6 Masks */
-
-#define MUTE_CH24 0x00000080lu
-#define MUTE_CH25 0x00008000lu
-#define MUTE_CH26 0x00800000lu
-#define MUTE_CH27 0x80000000lu
-
-#define TX_CH24 0x0000007Flu
-#define TX_CH25 0x00007F00lu
-#define TX_CH26 0x007F0000lu
-#define TX_CH27 0x7F000000lu
-
-
-/* MXVR_ROUTING_7 Masks */
-
-#define MUTE_CH28 0x00000080lu
-#define MUTE_CH29 0x00008000lu
-#define MUTE_CH30 0x00800000lu
-#define MUTE_CH31 0x80000000lu
-
-#define TX_CH28 0x0000007Flu
-#define TX_CH29 0x00007F00lu
-#define TX_CH30 0x007F0000lu
-#define TX_CH31 0x7F000000lu
-
-
-/* MXVR_ROUTING_8 Masks */
-
-#define MUTE_CH32 0x00000080lu
-#define MUTE_CH33 0x00008000lu
-#define MUTE_CH34 0x00800000lu
-#define MUTE_CH35 0x80000000lu
-
-#define TX_CH32 0x0000007Flu
-#define TX_CH33 0x00007F00lu
-#define TX_CH34 0x007F0000lu
-#define TX_CH35 0x7F000000lu
-
-
-/* MXVR_ROUTING_9 Masks */
-
-#define MUTE_CH36 0x00000080lu
-#define MUTE_CH37 0x00008000lu
-#define MUTE_CH38 0x00800000lu
-#define MUTE_CH39 0x80000000lu
-
-#define TX_CH36 0x0000007Flu
-#define TX_CH37 0x00007F00lu
-#define TX_CH38 0x007F0000lu
-#define TX_CH39 0x7F000000lu
-
-
-/* MXVR_ROUTING_10 Masks */
-
-#define MUTE_CH40 0x00000080lu
-#define MUTE_CH41 0x00008000lu
-#define MUTE_CH42 0x00800000lu
-#define MUTE_CH43 0x80000000lu
-
-#define TX_CH40 0x0000007Flu
-#define TX_CH41 0x00007F00lu
-#define TX_CH42 0x007F0000lu
-#define TX_CH43 0x7F000000lu
-
-
-/* MXVR_ROUTING_11 Masks */
-
-#define MUTE_CH44 0x00000080lu
-#define MUTE_CH45 0x00008000lu
-#define MUTE_CH46 0x00800000lu
-#define MUTE_CH47 0x80000000lu
-
-#define TX_CH44 0x0000007Flu
-#define TX_CH45 0x00007F00lu
-#define TX_CH46 0x007F0000lu
-#define TX_CH47 0x7F000000lu
-
-
-/* MXVR_ROUTING_12 Masks */
-
-#define MUTE_CH48 0x00000080lu
-#define MUTE_CH49 0x00008000lu
-#define MUTE_CH50 0x00800000lu
-#define MUTE_CH51 0x80000000lu
-
-#define TX_CH48 0x0000007Flu
-#define TX_CH49 0x00007F00lu
-#define TX_CH50 0x007F0000lu
-#define TX_CH51 0x7F000000lu
-
-
-/* MXVR_ROUTING_13 Masks */
-
-#define MUTE_CH52 0x00000080lu
-#define MUTE_CH53 0x00008000lu
-#define MUTE_CH54 0x00800000lu
-#define MUTE_CH55 0x80000000lu
-
-#define TX_CH52 0x0000007Flu
-#define TX_CH53 0x00007F00lu
-#define TX_CH54 0x007F0000lu
-#define TX_CH55 0x7F000000lu
-
-
-/* MXVR_ROUTING_14 Masks */
-
-#define MUTE_CH56 0x00000080lu
-#define MUTE_CH57 0x00008000lu
-#define MUTE_CH58 0x00800000lu
-#define MUTE_CH59 0x80000000lu
-
-#define TX_CH56 0x0000007Flu
-#define TX_CH57 0x00007F00lu
-#define TX_CH58 0x007F0000lu
-#define TX_CH59 0x7F000000lu
-
-
-/* Control Message Receive Buffer (CMRB) Address Offsets */
-
-#define CMRB_STRIDE 0x00000016lu
-
-#define CMRB_DST_OFFSET 0x00000000lu
-#define CMRB_SRC_OFFSET 0x00000002lu
-#define CMRB_DATA_OFFSET 0x00000005lu
-
-
-/* Control Message Transmit Buffer (CMTB) Address Offsets */
-
-#define CMTB_PRIO_OFFSET 0x00000000lu
-#define CMTB_DST_OFFSET 0x00000002lu
-#define CMTB_SRC_OFFSET 0x00000004lu
-#define CMTB_TYPE_OFFSET 0x00000006lu
-#define CMTB_DATA_OFFSET 0x00000007lu
-
-#define CMTB_ANSWER_OFFSET 0x0000000Alu
-
-#define CMTB_STAT_N_OFFSET 0x00000018lu
-#define CMTB_STAT_A_OFFSET 0x00000016lu
-#define CMTB_STAT_D_OFFSET 0x0000000Elu
-#define CMTB_STAT_R_OFFSET 0x00000014lu
-#define CMTB_STAT_W_OFFSET 0x00000014lu
-#define CMTB_STAT_G_OFFSET 0x00000014lu
-
-
-/* Asynchronous Packet Receive Buffer (APRB) Address Offsets */
-
-#define APRB_STRIDE 0x00000400lu
-
-#define APRB_DST_OFFSET 0x00000000lu
-#define APRB_LEN_OFFSET 0x00000002lu
-#define APRB_SRC_OFFSET 0x00000004lu
-#define APRB_DATA_OFFSET 0x00000006lu
-
-
-/* Asynchronous Packet Transmit Buffer (APTB) Address Offsets */
-
-#define APTB_PRIO_OFFSET 0x00000000lu
-#define APTB_DST_OFFSET 0x00000002lu
-#define APTB_LEN_OFFSET 0x00000004lu
-#define APTB_SRC_OFFSET 0x00000006lu
-#define APTB_DATA_OFFSET 0x00000008lu
-
-
-/* Remote Read Buffer (RRDB) Address Offsets */
-
-#define RRDB_WADDR_OFFSET 0x00000100lu
-#define RRDB_WLEN_OFFSET 0x00000101lu
-
-
-
/* ************ CONTROLLER AREA NETWORK (CAN) MASKS ***************/
/* CAN_CONTROL Masks */
#define SRS 0x0001 /* Software Reset */
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
index 295c78a465c..0c346fba961 100644
--- a/arch/blackfin/mach-bf538/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Analog Devices Inc.
+ * Copyright (C) 2008-2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
@@ -7,11 +7,8 @@
#ifndef _MACH_GPIO_H_
#define _MACH_GPIO_H_
- /* FIXME:
- * For now only support PORTF GPIOs.
- * PORT C,D and E are for peripheral usage only
- */
#define MAX_BLACKFIN_GPIOS 16
+#define BFIN_SPECIAL_GPIO_BANKS 3
#define GPIO_PF0 0 /* PF */
#define GPIO_PF1 1
diff --git a/arch/blackfin/mach-bf538/include/mach/portmux.h b/arch/blackfin/mach-bf538/include/mach/portmux.h
index 6121cf8b587..0083ba13ee9 100644
--- a/arch/blackfin/mach-bf538/include/mach/portmux.h
+++ b/arch/blackfin/mach-bf538/include/mach/portmux.h
@@ -7,7 +7,7 @@
#ifndef _MACH_PORTMUX_H_
#define _MACH_PORTMUX_H_
-#define MAX_RESOURCES MAX_BLACKFIN_GPIOS
+#define MAX_RESOURCES 64
#define P_TMR2 (P_DONTCARE)
#define P_TMR1 (P_DONTCARE)
diff --git a/arch/blackfin/mach-bf548/Kconfig b/arch/blackfin/mach-bf548/Kconfig
index a09623dfd55..70189a0d1a1 100644
--- a/arch/blackfin/mach-bf548/Kconfig
+++ b/arch/blackfin/mach-bf548/Kconfig
@@ -1,3 +1,27 @@
+config BF542
+ def_bool y
+ depends on BF542_std || BF542M
+config BF544
+ def_bool y
+ depends on BF544_std || BF544M
+config BF547
+ def_bool y
+ depends on BF547_std || BF547M
+config BF548
+ def_bool y
+ depends on BF548_std || BF548M
+config BF549
+ def_bool y
+ depends on BF549_std || BF549M
+
+config BF54xM
+ def_bool y
+ depends on (BF542M || BF544M || BF547M || BF548M || BF549M)
+
+config BF54x
+ def_bool y
+ depends on (BF542 || BF544 || BF547 || BF548 || BF549)
+
if (BF54x)
source "arch/blackfin/mach-bf548/boards/Kconfig"
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 1a5286bbb3f..60193f72777 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -62,7 +62,7 @@ static struct isp1760_platform_data isp1760_priv = {
};
static struct platform_device bfin_isp1760_device = {
- .name = "isp1760-hcd",
+ .name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
@@ -154,7 +154,7 @@ static struct platform_device bf54x_kpad_device = {
};
#endif
-#if defined(CONFIG_JOYSTICK_BFIN_ROTARY) || defined(CONFIG_JOYSTICK_BFIN_ROTARY_MODULE)
+#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
#include <asm/bfin_rotary.h>
static struct bfin_rotary_platform_data bfin_rotary_data = {
@@ -186,7 +186,7 @@ static struct platform_device bfin_rotary_device = {
#endif
#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE)
-#include <linux/spi/adxl34x.h>
+#include <linux/input/adxl34x.h>
static const struct adxl34x_platform_data adxl34x_info = {
.x_axis_offset = 0,
.y_axis_offset = 0,
@@ -210,14 +210,17 @@ static const struct adxl34x_platform_data adxl34x_info = {
.ev_code_y = ABS_Y, /* EV_REL */
.ev_code_z = ABS_Z, /* EV_REL */
- .ev_code_tap_x = BTN_TOUCH, /* EV_KEY */
- .ev_code_tap_y = BTN_TOUCH, /* EV_KEY */
- .ev_code_tap_z = BTN_TOUCH, /* EV_KEY */
+ .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY x,y,z */
/* .ev_code_ff = KEY_F,*/ /* EV_KEY */
/* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */
.power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
.fifo_mode = ADXL_FIFO_STREAM,
+ .orientation_enable = ADXL_EN_ORIENTATION_3D,
+ .deadzone_angle = ADXL_DEADZONE_ANGLE_10p8,
+ .divisor_length = ADXL_LP_FILTER_DIVISOR_16,
+ /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */
+ .ev_codes_orient_3d = {BTN_Z, BTN_Y, BTN_X, BTN_A, BTN_B, BTN_C},
};
#endif
@@ -461,6 +464,44 @@ static struct platform_device musb_device = {
};
#endif
+#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
+unsigned short bfin_can_peripherals[] = {
+ P_CAN0_RX, P_CAN0_TX, 0
+};
+
+static struct resource bfin_can_resources[] = {
+ {
+ .start = 0xFFC02A00,
+ .end = 0xFFC02FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_CAN0_RX,
+ .end = IRQ_CAN0_RX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_CAN0_TX,
+ .end = IRQ_CAN0_TX,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_CAN0_ERROR,
+ .end = IRQ_CAN0_ERROR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device bfin_can_device = {
+ .name = "bfin_can",
+ .num_resources = ARRAY_SIZE(bfin_can_resources),
+ .resource = bfin_can_resources,
+ .dev = {
+ .platform_data = &bfin_can_peripherals, /* Passed to driver */
+ },
+};
+#endif
+
#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
static struct resource bfin_atapi_resources[] = {
{
@@ -953,6 +994,10 @@ static struct platform_device *ezkit_devices[] __initdata = {
&bfin_isp1760_device,
#endif
+#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
+ &bfin_can_device,
+#endif
+
#if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
&bfin_atapi_device,
#endif
@@ -974,7 +1019,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
&bf54x_kpad_device,
#endif
-#if defined(CONFIG_JOYSTICK_BFIN_ROTARY) || defined(CONFIG_JOYSTICK_BFIN_ROTARY_MODULE)
+#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
&bfin_rotary_device,
#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/bf548.h b/arch/blackfin/mach-bf548/include/mach/bf548.h
index 7bead5ce0f3..751e5e11ecf 100644
--- a/arch/blackfin/mach-bf548/include/mach/bf548.h
+++ b/arch/blackfin/mach-bf548/include/mach/bf548.h
@@ -81,18 +81,6 @@
#define AMGCTLVAL (V_AMBEN | V_AMCKEN)
-#if defined(CONFIG_BF542M)
-# define CONFIG_BF542
-#elif defined(CONFIG_BF544M)
-# define CONFIG_BF544
-#elif defined(CONFIG_BF547M)
-# define CONFIG_BF547
-#elif defined(CONFIG_BF548M)
-# define CONFIG_BF548
-#elif defined(CONFIG_BF549M)
-# define CONFIG_BF549
-#endif
-
#if defined(CONFIG_BF542)
# define CPU "BF542"
# define CPUID 0x27de
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index 13302b67857..5684030ccc2 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -64,10 +64,4 @@
#define OFFSET_THR 0x28 /* Transmit Holding register */
#define OFFSET_RBR 0x2C /* Receive Buffer register */
-/* PLL_DIV Masks */
-#define CCLK_DIV1 CSEL_DIV1 /* CCLK = VCO / 1 */
-#define CCLK_DIV2 CSEL_DIV2 /* CCLK = VCO / 2 */
-#define CCLK_DIV4 CSEL_DIV4 /* CCLK = VCO / 4 */
-#define CCLK_DIV8 CSEL_DIV8 /* CCLK = VCO / 8 */
-
#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
index 42342151513..bc650e6ea48 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
@@ -4,21 +4,21 @@
* Licensed under the GPL-2 or later.
*/
-#ifndef _CDEF_BF548_H
-#define _CDEF_BF548_H
+#ifndef _CDEF_BF547_H
+#define _CDEF_BF547_H
/* include all Core registers and bit definitions */
-#include "defBF548.h"
+#include "defBF547.h"
/* include core sbfin_read_()ecific register pointer definitions */
#include <asm/cdef_LPBlackfin.h>
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
#include "cdefBF54x_base.h"
-/* The following are the #defines needed by ADSP-BF548 that are not in the common header */
+/* The following are the #defines needed by ADSP-BF547 that are not in the common header */
/* Timer Registers */
@@ -805,4 +805,4 @@
#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
-#endif /* _CDEF_BF548_H */
+#endif /* _CDEF_BF547_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
index df84180410c..3523e08f796 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
@@ -18,165 +18,8 @@
/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
#include "cdefBF54x_base.h"
-/* The following are the #defines needed by ADSP-BF548 that are not in the common header */
-
-/* Timer Registers */
-
-#define bfin_read_TIMER8_CONFIG() bfin_read16(TIMER8_CONFIG)
-#define bfin_write_TIMER8_CONFIG(val) bfin_write16(TIMER8_CONFIG, val)
-#define bfin_read_TIMER8_COUNTER() bfin_read32(TIMER8_COUNTER)
-#define bfin_write_TIMER8_COUNTER(val) bfin_write32(TIMER8_COUNTER, val)
-#define bfin_read_TIMER8_PERIOD() bfin_read32(TIMER8_PERIOD)
-#define bfin_write_TIMER8_PERIOD(val) bfin_write32(TIMER8_PERIOD, val)
-#define bfin_read_TIMER8_WIDTH() bfin_read32(TIMER8_WIDTH)
-#define bfin_write_TIMER8_WIDTH(val) bfin_write32(TIMER8_WIDTH, val)
-#define bfin_read_TIMER9_CONFIG() bfin_read16(TIMER9_CONFIG)
-#define bfin_write_TIMER9_CONFIG(val) bfin_write16(TIMER9_CONFIG, val)
-#define bfin_read_TIMER9_COUNTER() bfin_read32(TIMER9_COUNTER)
-#define bfin_write_TIMER9_COUNTER(val) bfin_write32(TIMER9_COUNTER, val)
-#define bfin_read_TIMER9_PERIOD() bfin_read32(TIMER9_PERIOD)
-#define bfin_write_TIMER9_PERIOD(val) bfin_write32(TIMER9_PERIOD, val)
-#define bfin_read_TIMER9_WIDTH() bfin_read32(TIMER9_WIDTH)
-#define bfin_write_TIMER9_WIDTH(val) bfin_write32(TIMER9_WIDTH, val)
-#define bfin_read_TIMER10_CONFIG() bfin_read16(TIMER10_CONFIG)
-#define bfin_write_TIMER10_CONFIG(val) bfin_write16(TIMER10_CONFIG, val)
-#define bfin_read_TIMER10_COUNTER() bfin_read32(TIMER10_COUNTER)
-#define bfin_write_TIMER10_COUNTER(val) bfin_write32(TIMER10_COUNTER, val)
-#define bfin_read_TIMER10_PERIOD() bfin_read32(TIMER10_PERIOD)
-#define bfin_write_TIMER10_PERIOD(val) bfin_write32(TIMER10_PERIOD, val)
-#define bfin_read_TIMER10_WIDTH() bfin_read32(TIMER10_WIDTH)
-#define bfin_write_TIMER10_WIDTH(val) bfin_write32(TIMER10_WIDTH, val)
-
-/* Timer Groubfin_read_() of 3 */
-
-#define bfin_read_TIMER_ENABLE1() bfin_read16(TIMER_ENABLE1)
-#define bfin_write_TIMER_ENABLE1(val) bfin_write16(TIMER_ENABLE1, val)
-#define bfin_read_TIMER_DISABLE1() bfin_read16(TIMER_DISABLE1)
-#define bfin_write_TIMER_DISABLE1(val) bfin_write16(TIMER_DISABLE1, val)
-#define bfin_read_TIMER_STATUS1() bfin_read32(TIMER_STATUS1)
-#define bfin_write_TIMER_STATUS1(val) bfin_write32(TIMER_STATUS1, val)
-
-/* SPORT0 Registers */
-
-#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
-#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
-#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
-#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
-#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
-#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
-#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
-#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
-#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
-#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
-#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
-#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
-#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
-#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
-#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
-#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
-#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
-#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
-#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
-#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
-#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
-#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
-#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
-#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
-#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
-#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
-#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
-#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
-#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
-#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
-#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
-#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
-#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
-#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
-#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
-#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
-#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
-#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
-#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
-#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
-
-/* EPPI0 Registers */
-
-#define bfin_read_EPPI0_STATUS() bfin_read16(EPPI0_STATUS)
-#define bfin_write_EPPI0_STATUS(val) bfin_write16(EPPI0_STATUS, val)
-#define bfin_read_EPPI0_HCOUNT() bfin_read16(EPPI0_HCOUNT)
-#define bfin_write_EPPI0_HCOUNT(val) bfin_write16(EPPI0_HCOUNT, val)
-#define bfin_read_EPPI0_HDELAY() bfin_read16(EPPI0_HDELAY)
-#define bfin_write_EPPI0_HDELAY(val) bfin_write16(EPPI0_HDELAY, val)
-#define bfin_read_EPPI0_VCOUNT() bfin_read16(EPPI0_VCOUNT)
-#define bfin_write_EPPI0_VCOUNT(val) bfin_write16(EPPI0_VCOUNT, val)
-#define bfin_read_EPPI0_VDELAY() bfin_read16(EPPI0_VDELAY)
-#define bfin_write_EPPI0_VDELAY(val) bfin_write16(EPPI0_VDELAY, val)
-#define bfin_read_EPPI0_FRAME() bfin_read16(EPPI0_FRAME)
-#define bfin_write_EPPI0_FRAME(val) bfin_write16(EPPI0_FRAME, val)
-#define bfin_read_EPPI0_LINE() bfin_read16(EPPI0_LINE)
-#define bfin_write_EPPI0_LINE(val) bfin_write16(EPPI0_LINE, val)
-#define bfin_read_EPPI0_CLKDIV() bfin_read16(EPPI0_CLKDIV)
-#define bfin_write_EPPI0_CLKDIV(val) bfin_write16(EPPI0_CLKDIV, val)
-#define bfin_read_EPPI0_CONTROL() bfin_read32(EPPI0_CONTROL)
-#define bfin_write_EPPI0_CONTROL(val) bfin_write32(EPPI0_CONTROL, val)
-#define bfin_read_EPPI0_FS1W_HBL() bfin_read32(EPPI0_FS1W_HBL)
-#define bfin_write_EPPI0_FS1W_HBL(val) bfin_write32(EPPI0_FS1W_HBL, val)
-#define bfin_read_EPPI0_FS1P_AVPL() bfin_read32(EPPI0_FS1P_AVPL)
-#define bfin_write_EPPI0_FS1P_AVPL(val) bfin_write32(EPPI0_FS1P_AVPL, val)
-#define bfin_read_EPPI0_FS2W_LVB() bfin_read32(EPPI0_FS2W_LVB)
-#define bfin_write_EPPI0_FS2W_LVB(val) bfin_write32(EPPI0_FS2W_LVB, val)
-#define bfin_read_EPPI0_FS2P_LAVF() bfin_read32(EPPI0_FS2P_LAVF)
-#define bfin_write_EPPI0_FS2P_LAVF(val) bfin_write32(EPPI0_FS2P_LAVF, val)
-#define bfin_read_EPPI0_CLIP() bfin_read32(EPPI0_CLIP)
-#define bfin_write_EPPI0_CLIP(val) bfin_write32(EPPI0_CLIP, val)
-
-/* UART2 Registers */
-
-#define bfin_read_UART2_DLL() bfin_read16(UART2_DLL)
-#define bfin_write_UART2_DLL(val) bfin_write16(UART2_DLL, val)
-#define bfin_read_UART2_DLH() bfin_read16(UART2_DLH)
-#define bfin_write_UART2_DLH(val) bfin_write16(UART2_DLH, val)
-#define bfin_read_UART2_GCTL() bfin_read16(UART2_GCTL)
-#define bfin_write_UART2_GCTL(val) bfin_write16(UART2_GCTL, val)
-#define bfin_read_UART2_LCR() bfin_read16(UART2_LCR)
-#define bfin_write_UART2_LCR(val) bfin_write16(UART2_LCR, val)
-#define bfin_read_UART2_MCR() bfin_read16(UART2_MCR)
-#define bfin_write_UART2_MCR(val) bfin_write16(UART2_MCR, val)
-#define bfin_read_UART2_LSR() bfin_read16(UART2_LSR)
-#define bfin_write_UART2_LSR(val) bfin_write16(UART2_LSR, val)
-#define bfin_read_UART2_MSR() bfin_read16(UART2_MSR)
-#define bfin_write_UART2_MSR(val) bfin_write16(UART2_MSR, val)
-#define bfin_read_UART2_SCR() bfin_read16(UART2_SCR)
-#define bfin_write_UART2_SCR(val) bfin_write16(UART2_SCR, val)
-#define bfin_read_UART2_IER_SET() bfin_read16(UART2_IER_SET)
-#define bfin_write_UART2_IER_SET(val) bfin_write16(UART2_IER_SET, val)
-#define bfin_read_UART2_IER_CLEAR() bfin_read16(UART2_IER_CLEAR)
-#define bfin_write_UART2_IER_CLEAR(val) bfin_write16(UART2_IER_CLEAR, val)
-#define bfin_read_UART2_RBR() bfin_read16(UART2_RBR)
-#define bfin_write_UART2_RBR(val) bfin_write16(UART2_RBR, val)
-
-/* Two Wire Interface Registers (TWI1) */
-
-/* SPI2 Registers */
-
-#define bfin_read_SPI2_CTL() bfin_read16(SPI2_CTL)
-#define bfin_write_SPI2_CTL(val) bfin_write16(SPI2_CTL, val)
-#define bfin_read_SPI2_FLG() bfin_read16(SPI2_FLG)
-#define bfin_write_SPI2_FLG(val) bfin_write16(SPI2_FLG, val)
-#define bfin_read_SPI2_STAT() bfin_read16(SPI2_STAT)
-#define bfin_write_SPI2_STAT(val) bfin_write16(SPI2_STAT, val)
-#define bfin_read_SPI2_TDBR() bfin_read16(SPI2_TDBR)
-#define bfin_write_SPI2_TDBR(val) bfin_write16(SPI2_TDBR, val)
-#define bfin_read_SPI2_RDBR() bfin_read16(SPI2_RDBR)
-#define bfin_write_SPI2_RDBR(val) bfin_write16(SPI2_RDBR, val)
-#define bfin_read_SPI2_BAUD() bfin_read16(SPI2_BAUD)
-#define bfin_write_SPI2_BAUD(val) bfin_write16(SPI2_BAUD, val)
-#define bfin_read_SPI2_SHADOW() bfin_read16(SPI2_SHADOW)
-#define bfin_write_SPI2_SHADOW(val) bfin_write16(SPI2_SHADOW, val)
+/* The BF548 is like the BF547, but has additional CANs */
+#include "cdefBF547.h"
/* CAN Controller 1 Config 1 Registers */
@@ -923,631 +766,4 @@
#define bfin_read_CAN1_MB31_ID1() bfin_read16(CAN1_MB31_ID1)
#define bfin_write_CAN1_MB31_ID1(val) bfin_write16(CAN1_MB31_ID1, val)
-/* ATAPI Registers */
-
-#define bfin_read_ATAPI_CONTROL() bfin_read16(ATAPI_CONTROL)
-#define bfin_write_ATAPI_CONTROL(val) bfin_write16(ATAPI_CONTROL, val)
-#define bfin_read_ATAPI_STATUS() bfin_read16(ATAPI_STATUS)
-#define bfin_write_ATAPI_STATUS(val) bfin_write16(ATAPI_STATUS, val)
-#define bfin_read_ATAPI_DEV_ADDR() bfin_read16(ATAPI_DEV_ADDR)
-#define bfin_write_ATAPI_DEV_ADDR(val) bfin_write16(ATAPI_DEV_ADDR, val)
-#define bfin_read_ATAPI_DEV_TXBUF() bfin_read16(ATAPI_DEV_TXBUF)
-#define bfin_write_ATAPI_DEV_TXBUF(val) bfin_write16(ATAPI_DEV_TXBUF, val)
-#define bfin_read_ATAPI_DEV_RXBUF() bfin_read16(ATAPI_DEV_RXBUF)
-#define bfin_write_ATAPI_DEV_RXBUF(val) bfin_write16(ATAPI_DEV_RXBUF, val)
-#define bfin_read_ATAPI_INT_MASK() bfin_read16(ATAPI_INT_MASK)
-#define bfin_write_ATAPI_INT_MASK(val) bfin_write16(ATAPI_INT_MASK, val)
-#define bfin_read_ATAPI_INT_STATUS() bfin_read16(ATAPI_INT_STATUS)
-#define bfin_write_ATAPI_INT_STATUS(val) bfin_write16(ATAPI_INT_STATUS, val)
-#define bfin_read_ATAPI_XFER_LEN() bfin_read16(ATAPI_XFER_LEN)
-#define bfin_write_ATAPI_XFER_LEN(val) bfin_write16(ATAPI_XFER_LEN, val)
-#define bfin_read_ATAPI_LINE_STATUS() bfin_read16(ATAPI_LINE_STATUS)
-#define bfin_write_ATAPI_LINE_STATUS(val) bfin_write16(ATAPI_LINE_STATUS, val)
-#define bfin_read_ATAPI_SM_STATE() bfin_read16(ATAPI_SM_STATE)
-#define bfin_write_ATAPI_SM_STATE(val) bfin_write16(ATAPI_SM_STATE, val)
-#define bfin_read_ATAPI_TERMINATE() bfin_read16(ATAPI_TERMINATE)
-#define bfin_write_ATAPI_TERMINATE(val) bfin_write16(ATAPI_TERMINATE, val)
-#define bfin_read_ATAPI_PIO_TFRCNT() bfin_read16(ATAPI_PIO_TFRCNT)
-#define bfin_write_ATAPI_PIO_TFRCNT(val) bfin_write16(ATAPI_PIO_TFRCNT, val)
-#define bfin_read_ATAPI_DMA_TFRCNT() bfin_read16(ATAPI_DMA_TFRCNT)
-#define bfin_write_ATAPI_DMA_TFRCNT(val) bfin_write16(ATAPI_DMA_TFRCNT, val)
-#define bfin_read_ATAPI_UMAIN_TFRCNT() bfin_read16(ATAPI_UMAIN_TFRCNT)
-#define bfin_write_ATAPI_UMAIN_TFRCNT(val) bfin_write16(ATAPI_UMAIN_TFRCNT, val)
-#define bfin_read_ATAPI_UDMAOUT_TFRCNT() bfin_read16(ATAPI_UDMAOUT_TFRCNT)
-#define bfin_write_ATAPI_UDMAOUT_TFRCNT(val) bfin_write16(ATAPI_UDMAOUT_TFRCNT, val)
-#define bfin_read_ATAPI_REG_TIM_0() bfin_read16(ATAPI_REG_TIM_0)
-#define bfin_write_ATAPI_REG_TIM_0(val) bfin_write16(ATAPI_REG_TIM_0, val)
-#define bfin_read_ATAPI_PIO_TIM_0() bfin_read16(ATAPI_PIO_TIM_0)
-#define bfin_write_ATAPI_PIO_TIM_0(val) bfin_write16(ATAPI_PIO_TIM_0, val)
-#define bfin_read_ATAPI_PIO_TIM_1() bfin_read16(ATAPI_PIO_TIM_1)
-#define bfin_write_ATAPI_PIO_TIM_1(val) bfin_write16(ATAPI_PIO_TIM_1, val)
-#define bfin_read_ATAPI_MULTI_TIM_0() bfin_read16(ATAPI_MULTI_TIM_0)
-#define bfin_write_ATAPI_MULTI_TIM_0(val) bfin_write16(ATAPI_MULTI_TIM_0, val)
-#define bfin_read_ATAPI_MULTI_TIM_1() bfin_read16(ATAPI_MULTI_TIM_1)
-#define bfin_write_ATAPI_MULTI_TIM_1(val) bfin_write16(ATAPI_MULTI_TIM_1, val)
-#define bfin_read_ATAPI_MULTI_TIM_2() bfin_read16(ATAPI_MULTI_TIM_2)
-#define bfin_write_ATAPI_MULTI_TIM_2(val) bfin_write16(ATAPI_MULTI_TIM_2, val)
-#define bfin_read_ATAPI_ULTRA_TIM_0() bfin_read16(ATAPI_ULTRA_TIM_0)
-#define bfin_write_ATAPI_ULTRA_TIM_0(val) bfin_write16(ATAPI_ULTRA_TIM_0, val)
-#define bfin_read_ATAPI_ULTRA_TIM_1() bfin_read16(ATAPI_ULTRA_TIM_1)
-#define bfin_write_ATAPI_ULTRA_TIM_1(val) bfin_write16(ATAPI_ULTRA_TIM_1, val)
-#define bfin_read_ATAPI_ULTRA_TIM_2() bfin_read16(ATAPI_ULTRA_TIM_2)
-#define bfin_write_ATAPI_ULTRA_TIM_2(val) bfin_write16(ATAPI_ULTRA_TIM_2, val)
-#define bfin_read_ATAPI_ULTRA_TIM_3() bfin_read16(ATAPI_ULTRA_TIM_3)
-#define bfin_write_ATAPI_ULTRA_TIM_3(val) bfin_write16(ATAPI_ULTRA_TIM_3, val)
-
-/* SDH Registers */
-
-#define bfin_read_SDH_PWR_CTL() bfin_read16(SDH_PWR_CTL)
-#define bfin_write_SDH_PWR_CTL(val) bfin_write16(SDH_PWR_CTL, val)
-#define bfin_read_SDH_CLK_CTL() bfin_read16(SDH_CLK_CTL)
-#define bfin_write_SDH_CLK_CTL(val) bfin_write16(SDH_CLK_CTL, val)
-#define bfin_read_SDH_ARGUMENT() bfin_read32(SDH_ARGUMENT)
-#define bfin_write_SDH_ARGUMENT(val) bfin_write32(SDH_ARGUMENT, val)
-#define bfin_read_SDH_COMMAND() bfin_read16(SDH_COMMAND)
-#define bfin_write_SDH_COMMAND(val) bfin_write16(SDH_COMMAND, val)
-#define bfin_read_SDH_RESP_CMD() bfin_read16(SDH_RESP_CMD)
-#define bfin_write_SDH_RESP_CMD(val) bfin_write16(SDH_RESP_CMD, val)
-#define bfin_read_SDH_RESPONSE0() bfin_read32(SDH_RESPONSE0)
-#define bfin_write_SDH_RESPONSE0(val) bfin_write32(SDH_RESPONSE0, val)
-#define bfin_read_SDH_RESPONSE1() bfin_read32(SDH_RESPONSE1)
-#define bfin_write_SDH_RESPONSE1(val) bfin_write32(SDH_RESPONSE1, val)
-#define bfin_read_SDH_RESPONSE2() bfin_read32(SDH_RESPONSE2)
-#define bfin_write_SDH_RESPONSE2(val) bfin_write32(SDH_RESPONSE2, val)
-#define bfin_read_SDH_RESPONSE3() bfin_read32(SDH_RESPONSE3)
-#define bfin_write_SDH_RESPONSE3(val) bfin_write32(SDH_RESPONSE3, val)
-#define bfin_read_SDH_DATA_TIMER() bfin_read32(SDH_DATA_TIMER)
-#define bfin_write_SDH_DATA_TIMER(val) bfin_write32(SDH_DATA_TIMER, val)
-#define bfin_read_SDH_DATA_LGTH() bfin_read16(SDH_DATA_LGTH)
-#define bfin_write_SDH_DATA_LGTH(val) bfin_write16(SDH_DATA_LGTH, val)
-#define bfin_read_SDH_DATA_CTL() bfin_read16(SDH_DATA_CTL)
-#define bfin_write_SDH_DATA_CTL(val) bfin_write16(SDH_DATA_CTL, val)
-#define bfin_read_SDH_DATA_CNT() bfin_read16(SDH_DATA_CNT)
-#define bfin_write_SDH_DATA_CNT(val) bfin_write16(SDH_DATA_CNT, val)
-#define bfin_read_SDH_STATUS() bfin_read32(SDH_STATUS)
-#define bfin_write_SDH_STATUS(val) bfin_write32(SDH_STATUS, val)
-#define bfin_read_SDH_STATUS_CLR() bfin_read16(SDH_STATUS_CLR)
-#define bfin_write_SDH_STATUS_CLR(val) bfin_write16(SDH_STATUS_CLR, val)
-#define bfin_read_SDH_MASK0() bfin_read32(SDH_MASK0)
-#define bfin_write_SDH_MASK0(val) bfin_write32(SDH_MASK0, val)
-#define bfin_read_SDH_MASK1() bfin_read32(SDH_MASK1)
-#define bfin_write_SDH_MASK1(val) bfin_write32(SDH_MASK1, val)
-#define bfin_read_SDH_FIFO_CNT() bfin_read16(SDH_FIFO_CNT)
-#define bfin_write_SDH_FIFO_CNT(val) bfin_write16(SDH_FIFO_CNT, val)
-#define bfin_read_SDH_FIFO() bfin_read32(SDH_FIFO)
-#define bfin_write_SDH_FIFO(val) bfin_write32(SDH_FIFO, val)
-#define bfin_read_SDH_E_STATUS() bfin_read16(SDH_E_STATUS)
-#define bfin_write_SDH_E_STATUS(val) bfin_write16(SDH_E_STATUS, val)
-#define bfin_read_SDH_E_MASK() bfin_read16(SDH_E_MASK)
-#define bfin_write_SDH_E_MASK(val) bfin_write16(SDH_E_MASK, val)
-#define bfin_read_SDH_CFG() bfin_read16(SDH_CFG)
-#define bfin_write_SDH_CFG(val) bfin_write16(SDH_CFG, val)
-#define bfin_read_SDH_RD_WAIT_EN() bfin_read16(SDH_RD_WAIT_EN)
-#define bfin_write_SDH_RD_WAIT_EN(val) bfin_write16(SDH_RD_WAIT_EN, val)
-#define bfin_read_SDH_PID0() bfin_read16(SDH_PID0)
-#define bfin_write_SDH_PID0(val) bfin_write16(SDH_PID0, val)
-#define bfin_read_SDH_PID1() bfin_read16(SDH_PID1)
-#define bfin_write_SDH_PID1(val) bfin_write16(SDH_PID1, val)
-#define bfin_read_SDH_PID2() bfin_read16(SDH_PID2)
-#define bfin_write_SDH_PID2(val) bfin_write16(SDH_PID2, val)
-#define bfin_read_SDH_PID3() bfin_read16(SDH_PID3)
-#define bfin_write_SDH_PID3(val) bfin_write16(SDH_PID3, val)
-#define bfin_read_SDH_PID4() bfin_read16(SDH_PID4)
-#define bfin_write_SDH_PID4(val) bfin_write16(SDH_PID4, val)
-#define bfin_read_SDH_PID5() bfin_read16(SDH_PID5)
-#define bfin_write_SDH_PID5(val) bfin_write16(SDH_PID5, val)
-#define bfin_read_SDH_PID6() bfin_read16(SDH_PID6)
-#define bfin_write_SDH_PID6(val) bfin_write16(SDH_PID6, val)
-#define bfin_read_SDH_PID7() bfin_read16(SDH_PID7)
-#define bfin_write_SDH_PID7(val) bfin_write16(SDH_PID7, val)
-
-/* HOST Port Registers */
-
-#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
-#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
-#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
-#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
-#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
-#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
-
-/* USB Control Registers */
-
-#define bfin_read_USB_FADDR() bfin_read16(USB_FADDR)
-#define bfin_write_USB_FADDR(val) bfin_write16(USB_FADDR, val)
-#define bfin_read_USB_POWER() bfin_read16(USB_POWER)
-#define bfin_write_USB_POWER(val) bfin_write16(USB_POWER, val)
-#define bfin_read_USB_INTRTX() bfin_read16(USB_INTRTX)
-#define bfin_write_USB_INTRTX(val) bfin_write16(USB_INTRTX, val)
-#define bfin_read_USB_INTRRX() bfin_read16(USB_INTRRX)
-#define bfin_write_USB_INTRRX(val) bfin_write16(USB_INTRRX, val)
-#define bfin_read_USB_INTRTXE() bfin_read16(USB_INTRTXE)
-#define bfin_write_USB_INTRTXE(val) bfin_write16(USB_INTRTXE, val)
-#define bfin_read_USB_INTRRXE() bfin_read16(USB_INTRRXE)
-#define bfin_write_USB_INTRRXE(val) bfin_write16(USB_INTRRXE, val)
-#define bfin_read_USB_INTRUSB() bfin_read16(USB_INTRUSB)
-#define bfin_write_USB_INTRUSB(val) bfin_write16(USB_INTRUSB, val)
-#define bfin_read_USB_INTRUSBE() bfin_read16(USB_INTRUSBE)
-#define bfin_write_USB_INTRUSBE(val) bfin_write16(USB_INTRUSBE, val)
-#define bfin_read_USB_FRAME() bfin_read16(USB_FRAME)
-#define bfin_write_USB_FRAME(val) bfin_write16(USB_FRAME, val)
-#define bfin_read_USB_INDEX() bfin_read16(USB_INDEX)
-#define bfin_write_USB_INDEX(val) bfin_write16(USB_INDEX, val)
-#define bfin_read_USB_TESTMODE() bfin_read16(USB_TESTMODE)
-#define bfin_write_USB_TESTMODE(val) bfin_write16(USB_TESTMODE, val)
-#define bfin_read_USB_GLOBINTR() bfin_read16(USB_GLOBINTR)
-#define bfin_write_USB_GLOBINTR(val) bfin_write16(USB_GLOBINTR, val)
-#define bfin_read_USB_GLOBAL_CTL() bfin_read16(USB_GLOBAL_CTL)
-#define bfin_write_USB_GLOBAL_CTL(val) bfin_write16(USB_GLOBAL_CTL, val)
-
-/* USB Packet Control Registers */
-
-#define bfin_read_USB_TX_MAX_PACKET() bfin_read16(USB_TX_MAX_PACKET)
-#define bfin_write_USB_TX_MAX_PACKET(val) bfin_write16(USB_TX_MAX_PACKET, val)
-#define bfin_read_USB_CSR0() bfin_read16(USB_CSR0)
-#define bfin_write_USB_CSR0(val) bfin_write16(USB_CSR0, val)
-#define bfin_read_USB_TXCSR() bfin_read16(USB_TXCSR)
-#define bfin_write_USB_TXCSR(val) bfin_write16(USB_TXCSR, val)
-#define bfin_read_USB_RX_MAX_PACKET() bfin_read16(USB_RX_MAX_PACKET)
-#define bfin_write_USB_RX_MAX_PACKET(val) bfin_write16(USB_RX_MAX_PACKET, val)
-#define bfin_read_USB_RXCSR() bfin_read16(USB_RXCSR)
-#define bfin_write_USB_RXCSR(val) bfin_write16(USB_RXCSR, val)
-#define bfin_read_USB_COUNT0() bfin_read16(USB_COUNT0)
-#define bfin_write_USB_COUNT0(val) bfin_write16(USB_COUNT0, val)
-#define bfin_read_USB_RXCOUNT() bfin_read16(USB_RXCOUNT)
-#define bfin_write_USB_RXCOUNT(val) bfin_write16(USB_RXCOUNT, val)
-#define bfin_read_USB_TXTYPE() bfin_read16(USB_TXTYPE)
-#define bfin_write_USB_TXTYPE(val) bfin_write16(USB_TXTYPE, val)
-#define bfin_read_USB_NAKLIMIT0() bfin_read16(USB_NAKLIMIT0)
-#define bfin_write_USB_NAKLIMIT0(val) bfin_write16(USB_NAKLIMIT0, val)
-#define bfin_read_USB_TXINTERVAL() bfin_read16(USB_TXINTERVAL)
-#define bfin_write_USB_TXINTERVAL(val) bfin_write16(USB_TXINTERVAL, val)
-#define bfin_read_USB_RXTYPE() bfin_read16(USB_RXTYPE)
-#define bfin_write_USB_RXTYPE(val) bfin_write16(USB_RXTYPE, val)
-#define bfin_read_USB_RXINTERVAL() bfin_read16(USB_RXINTERVAL)
-#define bfin_write_USB_RXINTERVAL(val) bfin_write16(USB_RXINTERVAL, val)
-#define bfin_read_USB_TXCOUNT() bfin_read16(USB_TXCOUNT)
-#define bfin_write_USB_TXCOUNT(val) bfin_write16(USB_TXCOUNT, val)
-
-/* USB Endbfin_read_()oint FIFO Registers */
-
-#define bfin_read_USB_EP0_FIFO() bfin_read16(USB_EP0_FIFO)
-#define bfin_write_USB_EP0_FIFO(val) bfin_write16(USB_EP0_FIFO, val)
-#define bfin_read_USB_EP1_FIFO() bfin_read16(USB_EP1_FIFO)
-#define bfin_write_USB_EP1_FIFO(val) bfin_write16(USB_EP1_FIFO, val)
-#define bfin_read_USB_EP2_FIFO() bfin_read16(USB_EP2_FIFO)
-#define bfin_write_USB_EP2_FIFO(val) bfin_write16(USB_EP2_FIFO, val)
-#define bfin_read_USB_EP3_FIFO() bfin_read16(USB_EP3_FIFO)
-#define bfin_write_USB_EP3_FIFO(val) bfin_write16(USB_EP3_FIFO, val)
-#define bfin_read_USB_EP4_FIFO() bfin_read16(USB_EP4_FIFO)
-#define bfin_write_USB_EP4_FIFO(val) bfin_write16(USB_EP4_FIFO, val)
-#define bfin_read_USB_EP5_FIFO() bfin_read16(USB_EP5_FIFO)
-#define bfin_write_USB_EP5_FIFO(val) bfin_write16(USB_EP5_FIFO, val)
-#define bfin_read_USB_EP6_FIFO() bfin_read16(USB_EP6_FIFO)
-#define bfin_write_USB_EP6_FIFO(val) bfin_write16(USB_EP6_FIFO, val)
-#define bfin_read_USB_EP7_FIFO() bfin_read16(USB_EP7_FIFO)
-#define bfin_write_USB_EP7_FIFO(val) bfin_write16(USB_EP7_FIFO, val)
-
-/* USB OTG Control Registers */
-
-#define bfin_read_USB_OTG_DEV_CTL() bfin_read16(USB_OTG_DEV_CTL)
-#define bfin_write_USB_OTG_DEV_CTL(val) bfin_write16(USB_OTG_DEV_CTL, val)
-#define bfin_read_USB_OTG_VBUS_IRQ() bfin_read16(USB_OTG_VBUS_IRQ)
-#define bfin_write_USB_OTG_VBUS_IRQ(val) bfin_write16(USB_OTG_VBUS_IRQ, val)
-#define bfin_read_USB_OTG_VBUS_MASK() bfin_read16(USB_OTG_VBUS_MASK)
-#define bfin_write_USB_OTG_VBUS_MASK(val) bfin_write16(USB_OTG_VBUS_MASK, val)
-
-/* USB Phy Control Registers */
-
-#define bfin_read_USB_LINKINFO() bfin_read16(USB_LINKINFO)
-#define bfin_write_USB_LINKINFO(val) bfin_write16(USB_LINKINFO, val)
-#define bfin_read_USB_VPLEN() bfin_read16(USB_VPLEN)
-#define bfin_write_USB_VPLEN(val) bfin_write16(USB_VPLEN, val)
-#define bfin_read_USB_HS_EOF1() bfin_read16(USB_HS_EOF1)
-#define bfin_write_USB_HS_EOF1(val) bfin_write16(USB_HS_EOF1, val)
-#define bfin_read_USB_FS_EOF1() bfin_read16(USB_FS_EOF1)
-#define bfin_write_USB_FS_EOF1(val) bfin_write16(USB_FS_EOF1, val)
-#define bfin_read_USB_LS_EOF1() bfin_read16(USB_LS_EOF1)
-#define bfin_write_USB_LS_EOF1(val) bfin_write16(USB_LS_EOF1, val)
-
-/* (APHY_CNTRL is for ADI usage only) */
-
-#define bfin_read_USB_APHY_CNTRL() bfin_read16(USB_APHY_CNTRL)
-#define bfin_write_USB_APHY_CNTRL(val) bfin_write16(USB_APHY_CNTRL, val)
-
-/* (APHY_CALIB is for ADI usage only) */
-
-#define bfin_read_USB_APHY_CALIB() bfin_read16(USB_APHY_CALIB)
-#define bfin_write_USB_APHY_CALIB(val) bfin_write16(USB_APHY_CALIB, val)
-#define bfin_read_USB_APHY_CNTRL2() bfin_read16(USB_APHY_CNTRL2)
-#define bfin_write_USB_APHY_CNTRL2(val) bfin_write16(USB_APHY_CNTRL2, val)
-
-/* (PHY_TEST is for ADI usage only) */
-
-#define bfin_read_USB_PHY_TEST() bfin_read16(USB_PHY_TEST)
-#define bfin_write_USB_PHY_TEST(val) bfin_write16(USB_PHY_TEST, val)
-#define bfin_read_USB_PLLOSC_CTRL() bfin_read16(USB_PLLOSC_CTRL)
-#define bfin_write_USB_PLLOSC_CTRL(val) bfin_write16(USB_PLLOSC_CTRL, val)
-#define bfin_read_USB_SRP_CLKDIV() bfin_read16(USB_SRP_CLKDIV)
-#define bfin_write_USB_SRP_CLKDIV(val) bfin_write16(USB_SRP_CLKDIV, val)
-
-/* USB Endbfin_read_()oint 0 Control Registers */
-
-#define bfin_read_USB_EP_NI0_TXMAXP() bfin_read16(USB_EP_NI0_TXMAXP)
-#define bfin_write_USB_EP_NI0_TXMAXP(val) bfin_write16(USB_EP_NI0_TXMAXP, val)
-#define bfin_read_USB_EP_NI0_TXCSR() bfin_read16(USB_EP_NI0_TXCSR)
-#define bfin_write_USB_EP_NI0_TXCSR(val) bfin_write16(USB_EP_NI0_TXCSR, val)
-#define bfin_read_USB_EP_NI0_RXMAXP() bfin_read16(USB_EP_NI0_RXMAXP)
-#define bfin_write_USB_EP_NI0_RXMAXP(val) bfin_write16(USB_EP_NI0_RXMAXP, val)
-#define bfin_read_USB_EP_NI0_RXCSR() bfin_read16(USB_EP_NI0_RXCSR)
-#define bfin_write_USB_EP_NI0_RXCSR(val) bfin_write16(USB_EP_NI0_RXCSR, val)
-#define bfin_read_USB_EP_NI0_RXCOUNT() bfin_read16(USB_EP_NI0_RXCOUNT)
-#define bfin_write_USB_EP_NI0_RXCOUNT(val) bfin_write16(USB_EP_NI0_RXCOUNT, val)
-#define bfin_read_USB_EP_NI0_TXTYPE() bfin_read16(USB_EP_NI0_TXTYPE)
-#define bfin_write_USB_EP_NI0_TXTYPE(val) bfin_write16(USB_EP_NI0_TXTYPE, val)
-#define bfin_read_USB_EP_NI0_TXINTERVAL() bfin_read16(USB_EP_NI0_TXINTERVAL)
-#define bfin_write_USB_EP_NI0_TXINTERVAL(val) bfin_write16(USB_EP_NI0_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI0_RXTYPE() bfin_read16(USB_EP_NI0_RXTYPE)
-#define bfin_write_USB_EP_NI0_RXTYPE(val) bfin_write16(USB_EP_NI0_RXTYPE, val)
-#define bfin_read_USB_EP_NI0_RXINTERVAL() bfin_read16(USB_EP_NI0_RXINTERVAL)
-#define bfin_write_USB_EP_NI0_RXINTERVAL(val) bfin_write16(USB_EP_NI0_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 1 Control Registers */
-
-#define bfin_read_USB_EP_NI0_TXCOUNT() bfin_read16(USB_EP_NI0_TXCOUNT)
-#define bfin_write_USB_EP_NI0_TXCOUNT(val) bfin_write16(USB_EP_NI0_TXCOUNT, val)
-#define bfin_read_USB_EP_NI1_TXMAXP() bfin_read16(USB_EP_NI1_TXMAXP)
-#define bfin_write_USB_EP_NI1_TXMAXP(val) bfin_write16(USB_EP_NI1_TXMAXP, val)
-#define bfin_read_USB_EP_NI1_TXCSR() bfin_read16(USB_EP_NI1_TXCSR)
-#define bfin_write_USB_EP_NI1_TXCSR(val) bfin_write16(USB_EP_NI1_TXCSR, val)
-#define bfin_read_USB_EP_NI1_RXMAXP() bfin_read16(USB_EP_NI1_RXMAXP)
-#define bfin_write_USB_EP_NI1_RXMAXP(val) bfin_write16(USB_EP_NI1_RXMAXP, val)
-#define bfin_read_USB_EP_NI1_RXCSR() bfin_read16(USB_EP_NI1_RXCSR)
-#define bfin_write_USB_EP_NI1_RXCSR(val) bfin_write16(USB_EP_NI1_RXCSR, val)
-#define bfin_read_USB_EP_NI1_RXCOUNT() bfin_read16(USB_EP_NI1_RXCOUNT)
-#define bfin_write_USB_EP_NI1_RXCOUNT(val) bfin_write16(USB_EP_NI1_RXCOUNT, val)
-#define bfin_read_USB_EP_NI1_TXTYPE() bfin_read16(USB_EP_NI1_TXTYPE)
-#define bfin_write_USB_EP_NI1_TXTYPE(val) bfin_write16(USB_EP_NI1_TXTYPE, val)
-#define bfin_read_USB_EP_NI1_TXINTERVAL() bfin_read16(USB_EP_NI1_TXINTERVAL)
-#define bfin_write_USB_EP_NI1_TXINTERVAL(val) bfin_write16(USB_EP_NI1_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI1_RXTYPE() bfin_read16(USB_EP_NI1_RXTYPE)
-#define bfin_write_USB_EP_NI1_RXTYPE(val) bfin_write16(USB_EP_NI1_RXTYPE, val)
-#define bfin_read_USB_EP_NI1_RXINTERVAL() bfin_read16(USB_EP_NI1_RXINTERVAL)
-#define bfin_write_USB_EP_NI1_RXINTERVAL(val) bfin_write16(USB_EP_NI1_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 2 Control Registers */
-
-#define bfin_read_USB_EP_NI1_TXCOUNT() bfin_read16(USB_EP_NI1_TXCOUNT)
-#define bfin_write_USB_EP_NI1_TXCOUNT(val) bfin_write16(USB_EP_NI1_TXCOUNT, val)
-#define bfin_read_USB_EP_NI2_TXMAXP() bfin_read16(USB_EP_NI2_TXMAXP)
-#define bfin_write_USB_EP_NI2_TXMAXP(val) bfin_write16(USB_EP_NI2_TXMAXP, val)
-#define bfin_read_USB_EP_NI2_TXCSR() bfin_read16(USB_EP_NI2_TXCSR)
-#define bfin_write_USB_EP_NI2_TXCSR(val) bfin_write16(USB_EP_NI2_TXCSR, val)
-#define bfin_read_USB_EP_NI2_RXMAXP() bfin_read16(USB_EP_NI2_RXMAXP)
-#define bfin_write_USB_EP_NI2_RXMAXP(val) bfin_write16(USB_EP_NI2_RXMAXP, val)
-#define bfin_read_USB_EP_NI2_RXCSR() bfin_read16(USB_EP_NI2_RXCSR)
-#define bfin_write_USB_EP_NI2_RXCSR(val) bfin_write16(USB_EP_NI2_RXCSR, val)
-#define bfin_read_USB_EP_NI2_RXCOUNT() bfin_read16(USB_EP_NI2_RXCOUNT)
-#define bfin_write_USB_EP_NI2_RXCOUNT(val) bfin_write16(USB_EP_NI2_RXCOUNT, val)
-#define bfin_read_USB_EP_NI2_TXTYPE() bfin_read16(USB_EP_NI2_TXTYPE)
-#define bfin_write_USB_EP_NI2_TXTYPE(val) bfin_write16(USB_EP_NI2_TXTYPE, val)
-#define bfin_read_USB_EP_NI2_TXINTERVAL() bfin_read16(USB_EP_NI2_TXINTERVAL)
-#define bfin_write_USB_EP_NI2_TXINTERVAL(val) bfin_write16(USB_EP_NI2_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI2_RXTYPE() bfin_read16(USB_EP_NI2_RXTYPE)
-#define bfin_write_USB_EP_NI2_RXTYPE(val) bfin_write16(USB_EP_NI2_RXTYPE, val)
-#define bfin_read_USB_EP_NI2_RXINTERVAL() bfin_read16(USB_EP_NI2_RXINTERVAL)
-#define bfin_write_USB_EP_NI2_RXINTERVAL(val) bfin_write16(USB_EP_NI2_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 3 Control Registers */
-
-#define bfin_read_USB_EP_NI2_TXCOUNT() bfin_read16(USB_EP_NI2_TXCOUNT)
-#define bfin_write_USB_EP_NI2_TXCOUNT(val) bfin_write16(USB_EP_NI2_TXCOUNT, val)
-#define bfin_read_USB_EP_NI3_TXMAXP() bfin_read16(USB_EP_NI3_TXMAXP)
-#define bfin_write_USB_EP_NI3_TXMAXP(val) bfin_write16(USB_EP_NI3_TXMAXP, val)
-#define bfin_read_USB_EP_NI3_TXCSR() bfin_read16(USB_EP_NI3_TXCSR)
-#define bfin_write_USB_EP_NI3_TXCSR(val) bfin_write16(USB_EP_NI3_TXCSR, val)
-#define bfin_read_USB_EP_NI3_RXMAXP() bfin_read16(USB_EP_NI3_RXMAXP)
-#define bfin_write_USB_EP_NI3_RXMAXP(val) bfin_write16(USB_EP_NI3_RXMAXP, val)
-#define bfin_read_USB_EP_NI3_RXCSR() bfin_read16(USB_EP_NI3_RXCSR)
-#define bfin_write_USB_EP_NI3_RXCSR(val) bfin_write16(USB_EP_NI3_RXCSR, val)
-#define bfin_read_USB_EP_NI3_RXCOUNT() bfin_read16(USB_EP_NI3_RXCOUNT)
-#define bfin_write_USB_EP_NI3_RXCOUNT(val) bfin_write16(USB_EP_NI3_RXCOUNT, val)
-#define bfin_read_USB_EP_NI3_TXTYPE() bfin_read16(USB_EP_NI3_TXTYPE)
-#define bfin_write_USB_EP_NI3_TXTYPE(val) bfin_write16(USB_EP_NI3_TXTYPE, val)
-#define bfin_read_USB_EP_NI3_TXINTERVAL() bfin_read16(USB_EP_NI3_TXINTERVAL)
-#define bfin_write_USB_EP_NI3_TXINTERVAL(val) bfin_write16(USB_EP_NI3_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI3_RXTYPE() bfin_read16(USB_EP_NI3_RXTYPE)
-#define bfin_write_USB_EP_NI3_RXTYPE(val) bfin_write16(USB_EP_NI3_RXTYPE, val)
-#define bfin_read_USB_EP_NI3_RXINTERVAL() bfin_read16(USB_EP_NI3_RXINTERVAL)
-#define bfin_write_USB_EP_NI3_RXINTERVAL(val) bfin_write16(USB_EP_NI3_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 4 Control Registers */
-
-#define bfin_read_USB_EP_NI3_TXCOUNT() bfin_read16(USB_EP_NI3_TXCOUNT)
-#define bfin_write_USB_EP_NI3_TXCOUNT(val) bfin_write16(USB_EP_NI3_TXCOUNT, val)
-#define bfin_read_USB_EP_NI4_TXMAXP() bfin_read16(USB_EP_NI4_TXMAXP)
-#define bfin_write_USB_EP_NI4_TXMAXP(val) bfin_write16(USB_EP_NI4_TXMAXP, val)
-#define bfin_read_USB_EP_NI4_TXCSR() bfin_read16(USB_EP_NI4_TXCSR)
-#define bfin_write_USB_EP_NI4_TXCSR(val) bfin_write16(USB_EP_NI4_TXCSR, val)
-#define bfin_read_USB_EP_NI4_RXMAXP() bfin_read16(USB_EP_NI4_RXMAXP)
-#define bfin_write_USB_EP_NI4_RXMAXP(val) bfin_write16(USB_EP_NI4_RXMAXP, val)
-#define bfin_read_USB_EP_NI4_RXCSR() bfin_read16(USB_EP_NI4_RXCSR)
-#define bfin_write_USB_EP_NI4_RXCSR(val) bfin_write16(USB_EP_NI4_RXCSR, val)
-#define bfin_read_USB_EP_NI4_RXCOUNT() bfin_read16(USB_EP_NI4_RXCOUNT)
-#define bfin_write_USB_EP_NI4_RXCOUNT(val) bfin_write16(USB_EP_NI4_RXCOUNT, val)
-#define bfin_read_USB_EP_NI4_TXTYPE() bfin_read16(USB_EP_NI4_TXTYPE)
-#define bfin_write_USB_EP_NI4_TXTYPE(val) bfin_write16(USB_EP_NI4_TXTYPE, val)
-#define bfin_read_USB_EP_NI4_TXINTERVAL() bfin_read16(USB_EP_NI4_TXINTERVAL)
-#define bfin_write_USB_EP_NI4_TXINTERVAL(val) bfin_write16(USB_EP_NI4_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI4_RXTYPE() bfin_read16(USB_EP_NI4_RXTYPE)
-#define bfin_write_USB_EP_NI4_RXTYPE(val) bfin_write16(USB_EP_NI4_RXTYPE, val)
-#define bfin_read_USB_EP_NI4_RXINTERVAL() bfin_read16(USB_EP_NI4_RXINTERVAL)
-#define bfin_write_USB_EP_NI4_RXINTERVAL(val) bfin_write16(USB_EP_NI4_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 5 Control Registers */
-
-#define bfin_read_USB_EP_NI4_TXCOUNT() bfin_read16(USB_EP_NI4_TXCOUNT)
-#define bfin_write_USB_EP_NI4_TXCOUNT(val) bfin_write16(USB_EP_NI4_TXCOUNT, val)
-#define bfin_read_USB_EP_NI5_TXMAXP() bfin_read16(USB_EP_NI5_TXMAXP)
-#define bfin_write_USB_EP_NI5_TXMAXP(val) bfin_write16(USB_EP_NI5_TXMAXP, val)
-#define bfin_read_USB_EP_NI5_TXCSR() bfin_read16(USB_EP_NI5_TXCSR)
-#define bfin_write_USB_EP_NI5_TXCSR(val) bfin_write16(USB_EP_NI5_TXCSR, val)
-#define bfin_read_USB_EP_NI5_RXMAXP() bfin_read16(USB_EP_NI5_RXMAXP)
-#define bfin_write_USB_EP_NI5_RXMAXP(val) bfin_write16(USB_EP_NI5_RXMAXP, val)
-#define bfin_read_USB_EP_NI5_RXCSR() bfin_read16(USB_EP_NI5_RXCSR)
-#define bfin_write_USB_EP_NI5_RXCSR(val) bfin_write16(USB_EP_NI5_RXCSR, val)
-#define bfin_read_USB_EP_NI5_RXCOUNT() bfin_read16(USB_EP_NI5_RXCOUNT)
-#define bfin_write_USB_EP_NI5_RXCOUNT(val) bfin_write16(USB_EP_NI5_RXCOUNT, val)
-#define bfin_read_USB_EP_NI5_TXTYPE() bfin_read16(USB_EP_NI5_TXTYPE)
-#define bfin_write_USB_EP_NI5_TXTYPE(val) bfin_write16(USB_EP_NI5_TXTYPE, val)
-#define bfin_read_USB_EP_NI5_TXINTERVAL() bfin_read16(USB_EP_NI5_TXINTERVAL)
-#define bfin_write_USB_EP_NI5_TXINTERVAL(val) bfin_write16(USB_EP_NI5_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI5_RXTYPE() bfin_read16(USB_EP_NI5_RXTYPE)
-#define bfin_write_USB_EP_NI5_RXTYPE(val) bfin_write16(USB_EP_NI5_RXTYPE, val)
-#define bfin_read_USB_EP_NI5_RXINTERVAL() bfin_read16(USB_EP_NI5_RXINTERVAL)
-#define bfin_write_USB_EP_NI5_RXINTERVAL(val) bfin_write16(USB_EP_NI5_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 6 Control Registers */
-
-#define bfin_read_USB_EP_NI5_TXCOUNT() bfin_read16(USB_EP_NI5_TXCOUNT)
-#define bfin_write_USB_EP_NI5_TXCOUNT(val) bfin_write16(USB_EP_NI5_TXCOUNT, val)
-#define bfin_read_USB_EP_NI6_TXMAXP() bfin_read16(USB_EP_NI6_TXMAXP)
-#define bfin_write_USB_EP_NI6_TXMAXP(val) bfin_write16(USB_EP_NI6_TXMAXP, val)
-#define bfin_read_USB_EP_NI6_TXCSR() bfin_read16(USB_EP_NI6_TXCSR)
-#define bfin_write_USB_EP_NI6_TXCSR(val) bfin_write16(USB_EP_NI6_TXCSR, val)
-#define bfin_read_USB_EP_NI6_RXMAXP() bfin_read16(USB_EP_NI6_RXMAXP)
-#define bfin_write_USB_EP_NI6_RXMAXP(val) bfin_write16(USB_EP_NI6_RXMAXP, val)
-#define bfin_read_USB_EP_NI6_RXCSR() bfin_read16(USB_EP_NI6_RXCSR)
-#define bfin_write_USB_EP_NI6_RXCSR(val) bfin_write16(USB_EP_NI6_RXCSR, val)
-#define bfin_read_USB_EP_NI6_RXCOUNT() bfin_read16(USB_EP_NI6_RXCOUNT)
-#define bfin_write_USB_EP_NI6_RXCOUNT(val) bfin_write16(USB_EP_NI6_RXCOUNT, val)
-#define bfin_read_USB_EP_NI6_TXTYPE() bfin_read16(USB_EP_NI6_TXTYPE)
-#define bfin_write_USB_EP_NI6_TXTYPE(val) bfin_write16(USB_EP_NI6_TXTYPE, val)
-#define bfin_read_USB_EP_NI6_TXINTERVAL() bfin_read16(USB_EP_NI6_TXINTERVAL)
-#define bfin_write_USB_EP_NI6_TXINTERVAL(val) bfin_write16(USB_EP_NI6_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI6_RXTYPE() bfin_read16(USB_EP_NI6_RXTYPE)
-#define bfin_write_USB_EP_NI6_RXTYPE(val) bfin_write16(USB_EP_NI6_RXTYPE, val)
-#define bfin_read_USB_EP_NI6_RXINTERVAL() bfin_read16(USB_EP_NI6_RXINTERVAL)
-#define bfin_write_USB_EP_NI6_RXINTERVAL(val) bfin_write16(USB_EP_NI6_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 7 Control Registers */
-
-#define bfin_read_USB_EP_NI6_TXCOUNT() bfin_read16(USB_EP_NI6_TXCOUNT)
-#define bfin_write_USB_EP_NI6_TXCOUNT(val) bfin_write16(USB_EP_NI6_TXCOUNT, val)
-#define bfin_read_USB_EP_NI7_TXMAXP() bfin_read16(USB_EP_NI7_TXMAXP)
-#define bfin_write_USB_EP_NI7_TXMAXP(val) bfin_write16(USB_EP_NI7_TXMAXP, val)
-#define bfin_read_USB_EP_NI7_TXCSR() bfin_read16(USB_EP_NI7_TXCSR)
-#define bfin_write_USB_EP_NI7_TXCSR(val) bfin_write16(USB_EP_NI7_TXCSR, val)
-#define bfin_read_USB_EP_NI7_RXMAXP() bfin_read16(USB_EP_NI7_RXMAXP)
-#define bfin_write_USB_EP_NI7_RXMAXP(val) bfin_write16(USB_EP_NI7_RXMAXP, val)
-#define bfin_read_USB_EP_NI7_RXCSR() bfin_read16(USB_EP_NI7_RXCSR)
-#define bfin_write_USB_EP_NI7_RXCSR(val) bfin_write16(USB_EP_NI7_RXCSR, val)
-#define bfin_read_USB_EP_NI7_RXCOUNT() bfin_read16(USB_EP_NI7_RXCOUNT)
-#define bfin_write_USB_EP_NI7_RXCOUNT(val) bfin_write16(USB_EP_NI7_RXCOUNT, val)
-#define bfin_read_USB_EP_NI7_TXTYPE() bfin_read16(USB_EP_NI7_TXTYPE)
-#define bfin_write_USB_EP_NI7_TXTYPE(val) bfin_write16(USB_EP_NI7_TXTYPE, val)
-#define bfin_read_USB_EP_NI7_TXINTERVAL() bfin_read16(USB_EP_NI7_TXINTERVAL)
-#define bfin_write_USB_EP_NI7_TXINTERVAL(val) bfin_write16(USB_EP_NI7_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI7_RXTYPE() bfin_read16(USB_EP_NI7_RXTYPE)
-#define bfin_write_USB_EP_NI7_RXTYPE(val) bfin_write16(USB_EP_NI7_RXTYPE, val)
-#define bfin_read_USB_EP_NI7_RXINTERVAL() bfin_read16(USB_EP_NI7_RXINTERVAL)
-#define bfin_write_USB_EP_NI7_RXINTERVAL(val) bfin_write16(USB_EP_NI7_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI7_TXCOUNT() bfin_read16(USB_EP_NI7_TXCOUNT)
-#define bfin_write_USB_EP_NI7_TXCOUNT(val) bfin_write16(USB_EP_NI7_TXCOUNT, val)
-#define bfin_read_USB_DMA_INTERRUPT() bfin_read16(USB_DMA_INTERRUPT)
-#define bfin_write_USB_DMA_INTERRUPT(val) bfin_write16(USB_DMA_INTERRUPT, val)
-
-/* USB Channel 0 Config Registers */
-
-#define bfin_read_USB_DMA0CONTROL() bfin_read16(USB_DMA0CONTROL)
-#define bfin_write_USB_DMA0CONTROL(val) bfin_write16(USB_DMA0CONTROL, val)
-#define bfin_read_USB_DMA0ADDRLOW() bfin_read16(USB_DMA0ADDRLOW)
-#define bfin_write_USB_DMA0ADDRLOW(val) bfin_write16(USB_DMA0ADDRLOW, val)
-#define bfin_read_USB_DMA0ADDRHIGH() bfin_read16(USB_DMA0ADDRHIGH)
-#define bfin_write_USB_DMA0ADDRHIGH(val) bfin_write16(USB_DMA0ADDRHIGH, val)
-#define bfin_read_USB_DMA0COUNTLOW() bfin_read16(USB_DMA0COUNTLOW)
-#define bfin_write_USB_DMA0COUNTLOW(val) bfin_write16(USB_DMA0COUNTLOW, val)
-#define bfin_read_USB_DMA0COUNTHIGH() bfin_read16(USB_DMA0COUNTHIGH)
-#define bfin_write_USB_DMA0COUNTHIGH(val) bfin_write16(USB_DMA0COUNTHIGH, val)
-
-/* USB Channel 1 Config Registers */
-
-#define bfin_read_USB_DMA1CONTROL() bfin_read16(USB_DMA1CONTROL)
-#define bfin_write_USB_DMA1CONTROL(val) bfin_write16(USB_DMA1CONTROL, val)
-#define bfin_read_USB_DMA1ADDRLOW() bfin_read16(USB_DMA1ADDRLOW)
-#define bfin_write_USB_DMA1ADDRLOW(val) bfin_write16(USB_DMA1ADDRLOW, val)
-#define bfin_read_USB_DMA1ADDRHIGH() bfin_read16(USB_DMA1ADDRHIGH)
-#define bfin_write_USB_DMA1ADDRHIGH(val) bfin_write16(USB_DMA1ADDRHIGH, val)
-#define bfin_read_USB_DMA1COUNTLOW() bfin_read16(USB_DMA1COUNTLOW)
-#define bfin_write_USB_DMA1COUNTLOW(val) bfin_write16(USB_DMA1COUNTLOW, val)
-#define bfin_read_USB_DMA1COUNTHIGH() bfin_read16(USB_DMA1COUNTHIGH)
-#define bfin_write_USB_DMA1COUNTHIGH(val) bfin_write16(USB_DMA1COUNTHIGH, val)
-
-/* USB Channel 2 Config Registers */
-
-#define bfin_read_USB_DMA2CONTROL() bfin_read16(USB_DMA2CONTROL)
-#define bfin_write_USB_DMA2CONTROL(val) bfin_write16(USB_DMA2CONTROL, val)
-#define bfin_read_USB_DMA2ADDRLOW() bfin_read16(USB_DMA2ADDRLOW)
-#define bfin_write_USB_DMA2ADDRLOW(val) bfin_write16(USB_DMA2ADDRLOW, val)
-#define bfin_read_USB_DMA2ADDRHIGH() bfin_read16(USB_DMA2ADDRHIGH)
-#define bfin_write_USB_DMA2ADDRHIGH(val) bfin_write16(USB_DMA2ADDRHIGH, val)
-#define bfin_read_USB_DMA2COUNTLOW() bfin_read16(USB_DMA2COUNTLOW)
-#define bfin_write_USB_DMA2COUNTLOW(val) bfin_write16(USB_DMA2COUNTLOW, val)
-#define bfin_read_USB_DMA2COUNTHIGH() bfin_read16(USB_DMA2COUNTHIGH)
-#define bfin_write_USB_DMA2COUNTHIGH(val) bfin_write16(USB_DMA2COUNTHIGH, val)
-
-/* USB Channel 3 Config Registers */
-
-#define bfin_read_USB_DMA3CONTROL() bfin_read16(USB_DMA3CONTROL)
-#define bfin_write_USB_DMA3CONTROL(val) bfin_write16(USB_DMA3CONTROL, val)
-#define bfin_read_USB_DMA3ADDRLOW() bfin_read16(USB_DMA3ADDRLOW)
-#define bfin_write_USB_DMA3ADDRLOW(val) bfin_write16(USB_DMA3ADDRLOW, val)
-#define bfin_read_USB_DMA3ADDRHIGH() bfin_read16(USB_DMA3ADDRHIGH)
-#define bfin_write_USB_DMA3ADDRHIGH(val) bfin_write16(USB_DMA3ADDRHIGH, val)
-#define bfin_read_USB_DMA3COUNTLOW() bfin_read16(USB_DMA3COUNTLOW)
-#define bfin_write_USB_DMA3COUNTLOW(val) bfin_write16(USB_DMA3COUNTLOW, val)
-#define bfin_read_USB_DMA3COUNTHIGH() bfin_read16(USB_DMA3COUNTHIGH)
-#define bfin_write_USB_DMA3COUNTHIGH(val) bfin_write16(USB_DMA3COUNTHIGH, val)
-
-/* USB Channel 4 Config Registers */
-
-#define bfin_read_USB_DMA4CONTROL() bfin_read16(USB_DMA4CONTROL)
-#define bfin_write_USB_DMA4CONTROL(val) bfin_write16(USB_DMA4CONTROL, val)
-#define bfin_read_USB_DMA4ADDRLOW() bfin_read16(USB_DMA4ADDRLOW)
-#define bfin_write_USB_DMA4ADDRLOW(val) bfin_write16(USB_DMA4ADDRLOW, val)
-#define bfin_read_USB_DMA4ADDRHIGH() bfin_read16(USB_DMA4ADDRHIGH)
-#define bfin_write_USB_DMA4ADDRHIGH(val) bfin_write16(USB_DMA4ADDRHIGH, val)
-#define bfin_read_USB_DMA4COUNTLOW() bfin_read16(USB_DMA4COUNTLOW)
-#define bfin_write_USB_DMA4COUNTLOW(val) bfin_write16(USB_DMA4COUNTLOW, val)
-#define bfin_read_USB_DMA4COUNTHIGH() bfin_read16(USB_DMA4COUNTHIGH)
-#define bfin_write_USB_DMA4COUNTHIGH(val) bfin_write16(USB_DMA4COUNTHIGH, val)
-
-/* USB Channel 5 Config Registers */
-
-#define bfin_read_USB_DMA5CONTROL() bfin_read16(USB_DMA5CONTROL)
-#define bfin_write_USB_DMA5CONTROL(val) bfin_write16(USB_DMA5CONTROL, val)
-#define bfin_read_USB_DMA5ADDRLOW() bfin_read16(USB_DMA5ADDRLOW)
-#define bfin_write_USB_DMA5ADDRLOW(val) bfin_write16(USB_DMA5ADDRLOW, val)
-#define bfin_read_USB_DMA5ADDRHIGH() bfin_read16(USB_DMA5ADDRHIGH)
-#define bfin_write_USB_DMA5ADDRHIGH(val) bfin_write16(USB_DMA5ADDRHIGH, val)
-#define bfin_read_USB_DMA5COUNTLOW() bfin_read16(USB_DMA5COUNTLOW)
-#define bfin_write_USB_DMA5COUNTLOW(val) bfin_write16(USB_DMA5COUNTLOW, val)
-#define bfin_read_USB_DMA5COUNTHIGH() bfin_read16(USB_DMA5COUNTHIGH)
-#define bfin_write_USB_DMA5COUNTHIGH(val) bfin_write16(USB_DMA5COUNTHIGH, val)
-
-/* USB Channel 6 Config Registers */
-
-#define bfin_read_USB_DMA6CONTROL() bfin_read16(USB_DMA6CONTROL)
-#define bfin_write_USB_DMA6CONTROL(val) bfin_write16(USB_DMA6CONTROL, val)
-#define bfin_read_USB_DMA6ADDRLOW() bfin_read16(USB_DMA6ADDRLOW)
-#define bfin_write_USB_DMA6ADDRLOW(val) bfin_write16(USB_DMA6ADDRLOW, val)
-#define bfin_read_USB_DMA6ADDRHIGH() bfin_read16(USB_DMA6ADDRHIGH)
-#define bfin_write_USB_DMA6ADDRHIGH(val) bfin_write16(USB_DMA6ADDRHIGH, val)
-#define bfin_read_USB_DMA6COUNTLOW() bfin_read16(USB_DMA6COUNTLOW)
-#define bfin_write_USB_DMA6COUNTLOW(val) bfin_write16(USB_DMA6COUNTLOW, val)
-#define bfin_read_USB_DMA6COUNTHIGH() bfin_read16(USB_DMA6COUNTHIGH)
-#define bfin_write_USB_DMA6COUNTHIGH(val) bfin_write16(USB_DMA6COUNTHIGH, val)
-
-/* USB Channel 7 Config Registers */
-
-#define bfin_read_USB_DMA7CONTROL() bfin_read16(USB_DMA7CONTROL)
-#define bfin_write_USB_DMA7CONTROL(val) bfin_write16(USB_DMA7CONTROL, val)
-#define bfin_read_USB_DMA7ADDRLOW() bfin_read16(USB_DMA7ADDRLOW)
-#define bfin_write_USB_DMA7ADDRLOW(val) bfin_write16(USB_DMA7ADDRLOW, val)
-#define bfin_read_USB_DMA7ADDRHIGH() bfin_read16(USB_DMA7ADDRHIGH)
-#define bfin_write_USB_DMA7ADDRHIGH(val) bfin_write16(USB_DMA7ADDRHIGH, val)
-#define bfin_read_USB_DMA7COUNTLOW() bfin_read16(USB_DMA7COUNTLOW)
-#define bfin_write_USB_DMA7COUNTLOW(val) bfin_write16(USB_DMA7COUNTLOW, val)
-#define bfin_read_USB_DMA7COUNTHIGH() bfin_read16(USB_DMA7COUNTHIGH)
-#define bfin_write_USB_DMA7COUNTHIGH(val) bfin_write16(USB_DMA7COUNTHIGH, val)
-
-/* Keybfin_read_()ad Registers */
-
-#define bfin_read_KPAD_CTL() bfin_read16(KPAD_CTL)
-#define bfin_write_KPAD_CTL(val) bfin_write16(KPAD_CTL, val)
-#define bfin_read_KPAD_PRESCALE() bfin_read16(KPAD_PRESCALE)
-#define bfin_write_KPAD_PRESCALE(val) bfin_write16(KPAD_PRESCALE, val)
-#define bfin_read_KPAD_MSEL() bfin_read16(KPAD_MSEL)
-#define bfin_write_KPAD_MSEL(val) bfin_write16(KPAD_MSEL, val)
-#define bfin_read_KPAD_ROWCOL() bfin_read16(KPAD_ROWCOL)
-#define bfin_write_KPAD_ROWCOL(val) bfin_write16(KPAD_ROWCOL, val)
-#define bfin_read_KPAD_STAT() bfin_read16(KPAD_STAT)
-#define bfin_write_KPAD_STAT(val) bfin_write16(KPAD_STAT, val)
-#define bfin_read_KPAD_SOFTEVAL() bfin_read16(KPAD_SOFTEVAL)
-#define bfin_write_KPAD_SOFTEVAL(val) bfin_write16(KPAD_SOFTEVAL, val)
-
-/* Pixel Combfin_read_()ositor (PIXC) Registers */
-
-#define bfin_read_PIXC_CTL() bfin_read16(PIXC_CTL)
-#define bfin_write_PIXC_CTL(val) bfin_write16(PIXC_CTL, val)
-#define bfin_read_PIXC_PPL() bfin_read16(PIXC_PPL)
-#define bfin_write_PIXC_PPL(val) bfin_write16(PIXC_PPL, val)
-#define bfin_read_PIXC_LPF() bfin_read16(PIXC_LPF)
-#define bfin_write_PIXC_LPF(val) bfin_write16(PIXC_LPF, val)
-#define bfin_read_PIXC_AHSTART() bfin_read16(PIXC_AHSTART)
-#define bfin_write_PIXC_AHSTART(val) bfin_write16(PIXC_AHSTART, val)
-#define bfin_read_PIXC_AHEND() bfin_read16(PIXC_AHEND)
-#define bfin_write_PIXC_AHEND(val) bfin_write16(PIXC_AHEND, val)
-#define bfin_read_PIXC_AVSTART() bfin_read16(PIXC_AVSTART)
-#define bfin_write_PIXC_AVSTART(val) bfin_write16(PIXC_AVSTART, val)
-#define bfin_read_PIXC_AVEND() bfin_read16(PIXC_AVEND)
-#define bfin_write_PIXC_AVEND(val) bfin_write16(PIXC_AVEND, val)
-#define bfin_read_PIXC_ATRANSP() bfin_read16(PIXC_ATRANSP)
-#define bfin_write_PIXC_ATRANSP(val) bfin_write16(PIXC_ATRANSP, val)
-#define bfin_read_PIXC_BHSTART() bfin_read16(PIXC_BHSTART)
-#define bfin_write_PIXC_BHSTART(val) bfin_write16(PIXC_BHSTART, val)
-#define bfin_read_PIXC_BHEND() bfin_read16(PIXC_BHEND)
-#define bfin_write_PIXC_BHEND(val) bfin_write16(PIXC_BHEND, val)
-#define bfin_read_PIXC_BVSTART() bfin_read16(PIXC_BVSTART)
-#define bfin_write_PIXC_BVSTART(val) bfin_write16(PIXC_BVSTART, val)
-#define bfin_read_PIXC_BVEND() bfin_read16(PIXC_BVEND)
-#define bfin_write_PIXC_BVEND(val) bfin_write16(PIXC_BVEND, val)
-#define bfin_read_PIXC_BTRANSP() bfin_read16(PIXC_BTRANSP)
-#define bfin_write_PIXC_BTRANSP(val) bfin_write16(PIXC_BTRANSP, val)
-#define bfin_read_PIXC_INTRSTAT() bfin_read16(PIXC_INTRSTAT)
-#define bfin_write_PIXC_INTRSTAT(val) bfin_write16(PIXC_INTRSTAT, val)
-#define bfin_read_PIXC_RYCON() bfin_read32(PIXC_RYCON)
-#define bfin_write_PIXC_RYCON(val) bfin_write32(PIXC_RYCON, val)
-#define bfin_read_PIXC_GUCON() bfin_read32(PIXC_GUCON)
-#define bfin_write_PIXC_GUCON(val) bfin_write32(PIXC_GUCON, val)
-#define bfin_read_PIXC_BVCON() bfin_read32(PIXC_BVCON)
-#define bfin_write_PIXC_BVCON(val) bfin_write32(PIXC_BVCON, val)
-#define bfin_read_PIXC_CCBIAS() bfin_read32(PIXC_CCBIAS)
-#define bfin_write_PIXC_CCBIAS(val) bfin_write32(PIXC_CCBIAS, val)
-#define bfin_read_PIXC_TC() bfin_read32(PIXC_TC)
-#define bfin_write_PIXC_TC(val) bfin_write32(PIXC_TC, val)
-
-/* Handshake MDMA 0 Registers */
-
-#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
-#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
-#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
-#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
-#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
-#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
-#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
-#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
-#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
-#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
-#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
-#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
-#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
-#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
-
-/* Handshake MDMA 1 Registers */
-
-#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
-#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
-#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
-#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
-#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
-#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
-#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
-#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
-#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
-#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
-#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
-#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
-#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
-#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
-
#endif /* _CDEF_BF548_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
index 34c84c7fb25..80201ed41f8 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
@@ -18,165 +18,8 @@
/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
#include "cdefBF54x_base.h"
-/* The following are the #defines needed by ADSP-BF549 that are not in the common header */
-
-/* Timer Registers */
-
-#define bfin_read_TIMER8_CONFIG() bfin_read16(TIMER8_CONFIG)
-#define bfin_write_TIMER8_CONFIG(val) bfin_write16(TIMER8_CONFIG, val)
-#define bfin_read_TIMER8_COUNTER() bfin_read32(TIMER8_COUNTER)
-#define bfin_write_TIMER8_COUNTER(val) bfin_write32(TIMER8_COUNTER, val)
-#define bfin_read_TIMER8_PERIOD() bfin_read32(TIMER8_PERIOD)
-#define bfin_write_TIMER8_PERIOD(val) bfin_write32(TIMER8_PERIOD, val)
-#define bfin_read_TIMER8_WIDTH() bfin_read32(TIMER8_WIDTH)
-#define bfin_write_TIMER8_WIDTH(val) bfin_write32(TIMER8_WIDTH, val)
-#define bfin_read_TIMER9_CONFIG() bfin_read16(TIMER9_CONFIG)
-#define bfin_write_TIMER9_CONFIG(val) bfin_write16(TIMER9_CONFIG, val)
-#define bfin_read_TIMER9_COUNTER() bfin_read32(TIMER9_COUNTER)
-#define bfin_write_TIMER9_COUNTER(val) bfin_write32(TIMER9_COUNTER, val)
-#define bfin_read_TIMER9_PERIOD() bfin_read32(TIMER9_PERIOD)
-#define bfin_write_TIMER9_PERIOD(val) bfin_write32(TIMER9_PERIOD, val)
-#define bfin_read_TIMER9_WIDTH() bfin_read32(TIMER9_WIDTH)
-#define bfin_write_TIMER9_WIDTH(val) bfin_write32(TIMER9_WIDTH, val)
-#define bfin_read_TIMER10_CONFIG() bfin_read16(TIMER10_CONFIG)
-#define bfin_write_TIMER10_CONFIG(val) bfin_write16(TIMER10_CONFIG, val)
-#define bfin_read_TIMER10_COUNTER() bfin_read32(TIMER10_COUNTER)
-#define bfin_write_TIMER10_COUNTER(val) bfin_write32(TIMER10_COUNTER, val)
-#define bfin_read_TIMER10_PERIOD() bfin_read32(TIMER10_PERIOD)
-#define bfin_write_TIMER10_PERIOD(val) bfin_write32(TIMER10_PERIOD, val)
-#define bfin_read_TIMER10_WIDTH() bfin_read32(TIMER10_WIDTH)
-#define bfin_write_TIMER10_WIDTH(val) bfin_write32(TIMER10_WIDTH, val)
-
-/* Timer Groubfin_read_() of 3 */
-
-#define bfin_read_TIMER_ENABLE1() bfin_read16(TIMER_ENABLE1)
-#define bfin_write_TIMER_ENABLE1(val) bfin_write16(TIMER_ENABLE1, val)
-#define bfin_read_TIMER_DISABLE1() bfin_read16(TIMER_DISABLE1)
-#define bfin_write_TIMER_DISABLE1(val) bfin_write16(TIMER_DISABLE1, val)
-#define bfin_read_TIMER_STATUS1() bfin_read32(TIMER_STATUS1)
-#define bfin_write_TIMER_STATUS1(val) bfin_write32(TIMER_STATUS1, val)
-
-/* SPORT0 Registers */
-
-#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
-#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
-#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
-#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
-#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
-#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
-#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
-#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
-#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
-#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
-#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
-#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
-#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
-#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
-#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
-#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
-#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
-#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
-#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
-#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
-#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
-#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
-#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
-#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
-#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
-#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
-#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
-#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
-#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
-#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
-#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
-#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
-#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
-#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
-#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
-#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
-#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
-#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
-#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
-#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
-
-/* EPPI0 Registers */
-
-#define bfin_read_EPPI0_STATUS() bfin_read16(EPPI0_STATUS)
-#define bfin_write_EPPI0_STATUS(val) bfin_write16(EPPI0_STATUS, val)
-#define bfin_read_EPPI0_HCOUNT() bfin_read16(EPPI0_HCOUNT)
-#define bfin_write_EPPI0_HCOUNT(val) bfin_write16(EPPI0_HCOUNT, val)
-#define bfin_read_EPPI0_HDELAY() bfin_read16(EPPI0_HDELAY)
-#define bfin_write_EPPI0_HDELAY(val) bfin_write16(EPPI0_HDELAY, val)
-#define bfin_read_EPPI0_VCOUNT() bfin_read16(EPPI0_VCOUNT)
-#define bfin_write_EPPI0_VCOUNT(val) bfin_write16(EPPI0_VCOUNT, val)
-#define bfin_read_EPPI0_VDELAY() bfin_read16(EPPI0_VDELAY)
-#define bfin_write_EPPI0_VDELAY(val) bfin_write16(EPPI0_VDELAY, val)
-#define bfin_read_EPPI0_FRAME() bfin_read16(EPPI0_FRAME)
-#define bfin_write_EPPI0_FRAME(val) bfin_write16(EPPI0_FRAME, val)
-#define bfin_read_EPPI0_LINE() bfin_read16(EPPI0_LINE)
-#define bfin_write_EPPI0_LINE(val) bfin_write16(EPPI0_LINE, val)
-#define bfin_read_EPPI0_CLKDIV() bfin_read16(EPPI0_CLKDIV)
-#define bfin_write_EPPI0_CLKDIV(val) bfin_write16(EPPI0_CLKDIV, val)
-#define bfin_read_EPPI0_CONTROL() bfin_read32(EPPI0_CONTROL)
-#define bfin_write_EPPI0_CONTROL(val) bfin_write32(EPPI0_CONTROL, val)
-#define bfin_read_EPPI0_FS1W_HBL() bfin_read32(EPPI0_FS1W_HBL)
-#define bfin_write_EPPI0_FS1W_HBL(val) bfin_write32(EPPI0_FS1W_HBL, val)
-#define bfin_read_EPPI0_FS1P_AVPL() bfin_read32(EPPI0_FS1P_AVPL)
-#define bfin_write_EPPI0_FS1P_AVPL(val) bfin_write32(EPPI0_FS1P_AVPL, val)
-#define bfin_read_EPPI0_FS2W_LVB() bfin_read32(EPPI0_FS2W_LVB)
-#define bfin_write_EPPI0_FS2W_LVB(val) bfin_write32(EPPI0_FS2W_LVB, val)
-#define bfin_read_EPPI0_FS2P_LAVF() bfin_read32(EPPI0_FS2P_LAVF)
-#define bfin_write_EPPI0_FS2P_LAVF(val) bfin_write32(EPPI0_FS2P_LAVF, val)
-#define bfin_read_EPPI0_CLIP() bfin_read32(EPPI0_CLIP)
-#define bfin_write_EPPI0_CLIP(val) bfin_write32(EPPI0_CLIP, val)
-
-/* UART2 Registers */
-
-#define bfin_read_UART2_DLL() bfin_read16(UART2_DLL)
-#define bfin_write_UART2_DLL(val) bfin_write16(UART2_DLL, val)
-#define bfin_read_UART2_DLH() bfin_read16(UART2_DLH)
-#define bfin_write_UART2_DLH(val) bfin_write16(UART2_DLH, val)
-#define bfin_read_UART2_GCTL() bfin_read16(UART2_GCTL)
-#define bfin_write_UART2_GCTL(val) bfin_write16(UART2_GCTL, val)
-#define bfin_read_UART2_LCR() bfin_read16(UART2_LCR)
-#define bfin_write_UART2_LCR(val) bfin_write16(UART2_LCR, val)
-#define bfin_read_UART2_MCR() bfin_read16(UART2_MCR)
-#define bfin_write_UART2_MCR(val) bfin_write16(UART2_MCR, val)
-#define bfin_read_UART2_LSR() bfin_read16(UART2_LSR)
-#define bfin_write_UART2_LSR(val) bfin_write16(UART2_LSR, val)
-#define bfin_read_UART2_MSR() bfin_read16(UART2_MSR)
-#define bfin_write_UART2_MSR(val) bfin_write16(UART2_MSR, val)
-#define bfin_read_UART2_SCR() bfin_read16(UART2_SCR)
-#define bfin_write_UART2_SCR(val) bfin_write16(UART2_SCR, val)
-#define bfin_read_UART2_IER_SET() bfin_read16(UART2_IER_SET)
-#define bfin_write_UART2_IER_SET(val) bfin_write16(UART2_IER_SET, val)
-#define bfin_read_UART2_IER_CLEAR() bfin_read16(UART2_IER_CLEAR)
-#define bfin_write_UART2_IER_CLEAR(val) bfin_write16(UART2_IER_CLEAR, val)
-#define bfin_read_UART2_RBR() bfin_read16(UART2_RBR)
-#define bfin_write_UART2_RBR(val) bfin_write16(UART2_RBR, val)
-
-/* Two Wire Interface Registers (TWI1) */
-
-/* SPI2 Registers */
-
-#define bfin_read_SPI2_CTL() bfin_read16(SPI2_CTL)
-#define bfin_write_SPI2_CTL(val) bfin_write16(SPI2_CTL, val)
-#define bfin_read_SPI2_FLG() bfin_read16(SPI2_FLG)
-#define bfin_write_SPI2_FLG(val) bfin_write16(SPI2_FLG, val)
-#define bfin_read_SPI2_STAT() bfin_read16(SPI2_STAT)
-#define bfin_write_SPI2_STAT(val) bfin_write16(SPI2_STAT, val)
-#define bfin_read_SPI2_TDBR() bfin_read16(SPI2_TDBR)
-#define bfin_write_SPI2_TDBR(val) bfin_write16(SPI2_TDBR, val)
-#define bfin_read_SPI2_RDBR() bfin_read16(SPI2_RDBR)
-#define bfin_write_SPI2_RDBR(val) bfin_write16(SPI2_RDBR, val)
-#define bfin_read_SPI2_BAUD() bfin_read16(SPI2_BAUD)
-#define bfin_write_SPI2_BAUD(val) bfin_write16(SPI2_BAUD, val)
-#define bfin_read_SPI2_SHADOW() bfin_read16(SPI2_SHADOW)
-#define bfin_write_SPI2_SHADOW(val) bfin_write16(SPI2_SHADOW, val)
+/* The BF549 is like the BF544, but has MXVR */
+#include "cdefBF547.h"
/* MXVR Registers */
@@ -464,1376 +307,4 @@
#define bfin_read_MXVR_SCLK_CNT() bfin_read16(MXVR_SCLK_CNT)
#define bfin_write_MXVR_SCLK_CNT(val) bfin_write16(MXVR_SCLK_CNT, val)
-/* CAN Controller 1 Config 1 Registers */
-
-#define bfin_read_CAN1_MC1() bfin_read16(CAN1_MC1)
-#define bfin_write_CAN1_MC1(val) bfin_write16(CAN1_MC1, val)
-#define bfin_read_CAN1_MD1() bfin_read16(CAN1_MD1)
-#define bfin_write_CAN1_MD1(val) bfin_write16(CAN1_MD1, val)
-#define bfin_read_CAN1_TRS1() bfin_read16(CAN1_TRS1)
-#define bfin_write_CAN1_TRS1(val) bfin_write16(CAN1_TRS1, val)
-#define bfin_read_CAN1_TRR1() bfin_read16(CAN1_TRR1)
-#define bfin_write_CAN1_TRR1(val) bfin_write16(CAN1_TRR1, val)
-#define bfin_read_CAN1_TA1() bfin_read16(CAN1_TA1)
-#define bfin_write_CAN1_TA1(val) bfin_write16(CAN1_TA1, val)
-#define bfin_read_CAN1_AA1() bfin_read16(CAN1_AA1)
-#define bfin_write_CAN1_AA1(val) bfin_write16(CAN1_AA1, val)
-#define bfin_read_CAN1_RMP1() bfin_read16(CAN1_RMP1)
-#define bfin_write_CAN1_RMP1(val) bfin_write16(CAN1_RMP1, val)
-#define bfin_read_CAN1_RML1() bfin_read16(CAN1_RML1)
-#define bfin_write_CAN1_RML1(val) bfin_write16(CAN1_RML1, val)
-#define bfin_read_CAN1_MBTIF1() bfin_read16(CAN1_MBTIF1)
-#define bfin_write_CAN1_MBTIF1(val) bfin_write16(CAN1_MBTIF1, val)
-#define bfin_read_CAN1_MBRIF1() bfin_read16(CAN1_MBRIF1)
-#define bfin_write_CAN1_MBRIF1(val) bfin_write16(CAN1_MBRIF1, val)
-#define bfin_read_CAN1_MBIM1() bfin_read16(CAN1_MBIM1)
-#define bfin_write_CAN1_MBIM1(val) bfin_write16(CAN1_MBIM1, val)
-#define bfin_read_CAN1_RFH1() bfin_read16(CAN1_RFH1)
-#define bfin_write_CAN1_RFH1(val) bfin_write16(CAN1_RFH1, val)
-#define bfin_read_CAN1_OPSS1() bfin_read16(CAN1_OPSS1)
-#define bfin_write_CAN1_OPSS1(val) bfin_write16(CAN1_OPSS1, val)
-
-/* CAN Controller 1 Config 2 Registers */
-
-#define bfin_read_CAN1_MC2() bfin_read16(CAN1_MC2)
-#define bfin_write_CAN1_MC2(val) bfin_write16(CAN1_MC2, val)
-#define bfin_read_CAN1_MD2() bfin_read16(CAN1_MD2)
-#define bfin_write_CAN1_MD2(val) bfin_write16(CAN1_MD2, val)
-#define bfin_read_CAN1_TRS2() bfin_read16(CAN1_TRS2)
-#define bfin_write_CAN1_TRS2(val) bfin_write16(CAN1_TRS2, val)
-#define bfin_read_CAN1_TRR2() bfin_read16(CAN1_TRR2)
-#define bfin_write_CAN1_TRR2(val) bfin_write16(CAN1_TRR2, val)
-#define bfin_read_CAN1_TA2() bfin_read16(CAN1_TA2)
-#define bfin_write_CAN1_TA2(val) bfin_write16(CAN1_TA2, val)
-#define bfin_read_CAN1_AA2() bfin_read16(CAN1_AA2)
-#define bfin_write_CAN1_AA2(val) bfin_write16(CAN1_AA2, val)
-#define bfin_read_CAN1_RMP2() bfin_read16(CAN1_RMP2)
-#define bfin_write_CAN1_RMP2(val) bfin_write16(CAN1_RMP2, val)
-#define bfin_read_CAN1_RML2() bfin_read16(CAN1_RML2)
-#define bfin_write_CAN1_RML2(val) bfin_write16(CAN1_RML2, val)
-#define bfin_read_CAN1_MBTIF2() bfin_read16(CAN1_MBTIF2)
-#define bfin_write_CAN1_MBTIF2(val) bfin_write16(CAN1_MBTIF2, val)
-#define bfin_read_CAN1_MBRIF2() bfin_read16(CAN1_MBRIF2)
-#define bfin_write_CAN1_MBRIF2(val) bfin_write16(CAN1_MBRIF2, val)
-#define bfin_read_CAN1_MBIM2() bfin_read16(CAN1_MBIM2)
-#define bfin_write_CAN1_MBIM2(val) bfin_write16(CAN1_MBIM2, val)
-#define bfin_read_CAN1_RFH2() bfin_read16(CAN1_RFH2)
-#define bfin_write_CAN1_RFH2(val) bfin_write16(CAN1_RFH2, val)
-#define bfin_read_CAN1_OPSS2() bfin_read16(CAN1_OPSS2)
-#define bfin_write_CAN1_OPSS2(val) bfin_write16(CAN1_OPSS2, val)
-
-/* CAN Controller 1 Clock/Interrubfin_read_()t/Counter Registers */
-
-#define bfin_read_CAN1_CLOCK() bfin_read16(CAN1_CLOCK)
-#define bfin_write_CAN1_CLOCK(val) bfin_write16(CAN1_CLOCK, val)
-#define bfin_read_CAN1_TIMING() bfin_read16(CAN1_TIMING)
-#define bfin_write_CAN1_TIMING(val) bfin_write16(CAN1_TIMING, val)
-#define bfin_read_CAN1_DEBUG() bfin_read16(CAN1_DEBUG)
-#define bfin_write_CAN1_DEBUG(val) bfin_write16(CAN1_DEBUG, val)
-#define bfin_read_CAN1_STATUS() bfin_read16(CAN1_STATUS)
-#define bfin_write_CAN1_STATUS(val) bfin_write16(CAN1_STATUS, val)
-#define bfin_read_CAN1_CEC() bfin_read16(CAN1_CEC)
-#define bfin_write_CAN1_CEC(val) bfin_write16(CAN1_CEC, val)
-#define bfin_read_CAN1_GIS() bfin_read16(CAN1_GIS)
-#define bfin_write_CAN1_GIS(val) bfin_write16(CAN1_GIS, val)
-#define bfin_read_CAN1_GIM() bfin_read16(CAN1_GIM)
-#define bfin_write_CAN1_GIM(val) bfin_write16(CAN1_GIM, val)
-#define bfin_read_CAN1_GIF() bfin_read16(CAN1_GIF)
-#define bfin_write_CAN1_GIF(val) bfin_write16(CAN1_GIF, val)
-#define bfin_read_CAN1_CONTROL() bfin_read16(CAN1_CONTROL)
-#define bfin_write_CAN1_CONTROL(val) bfin_write16(CAN1_CONTROL, val)
-#define bfin_read_CAN1_INTR() bfin_read16(CAN1_INTR)
-#define bfin_write_CAN1_INTR(val) bfin_write16(CAN1_INTR, val)
-#define bfin_read_CAN1_MBTD() bfin_read16(CAN1_MBTD)
-#define bfin_write_CAN1_MBTD(val) bfin_write16(CAN1_MBTD, val)
-#define bfin_read_CAN1_EWR() bfin_read16(CAN1_EWR)
-#define bfin_write_CAN1_EWR(val) bfin_write16(CAN1_EWR, val)
-#define bfin_read_CAN1_ESR() bfin_read16(CAN1_ESR)
-#define bfin_write_CAN1_ESR(val) bfin_write16(CAN1_ESR, val)
-#define bfin_read_CAN1_UCCNT() bfin_read16(CAN1_UCCNT)
-#define bfin_write_CAN1_UCCNT(val) bfin_write16(CAN1_UCCNT, val)
-#define bfin_read_CAN1_UCRC() bfin_read16(CAN1_UCRC)
-#define bfin_write_CAN1_UCRC(val) bfin_write16(CAN1_UCRC, val)
-#define bfin_read_CAN1_UCCNF() bfin_read16(CAN1_UCCNF)
-#define bfin_write_CAN1_UCCNF(val) bfin_write16(CAN1_UCCNF, val)
-
-/* CAN Controller 1 Mailbox Accebfin_read_()tance Registers */
-
-#define bfin_read_CAN1_AM00L() bfin_read16(CAN1_AM00L)
-#define bfin_write_CAN1_AM00L(val) bfin_write16(CAN1_AM00L, val)
-#define bfin_read_CAN1_AM00H() bfin_read16(CAN1_AM00H)
-#define bfin_write_CAN1_AM00H(val) bfin_write16(CAN1_AM00H, val)
-#define bfin_read_CAN1_AM01L() bfin_read16(CAN1_AM01L)
-#define bfin_write_CAN1_AM01L(val) bfin_write16(CAN1_AM01L, val)
-#define bfin_read_CAN1_AM01H() bfin_read16(CAN1_AM01H)
-#define bfin_write_CAN1_AM01H(val) bfin_write16(CAN1_AM01H, val)
-#define bfin_read_CAN1_AM02L() bfin_read16(CAN1_AM02L)
-#define bfin_write_CAN1_AM02L(val) bfin_write16(CAN1_AM02L, val)
-#define bfin_read_CAN1_AM02H() bfin_read16(CAN1_AM02H)
-#define bfin_write_CAN1_AM02H(val) bfin_write16(CAN1_AM02H, val)
-#define bfin_read_CAN1_AM03L() bfin_read16(CAN1_AM03L)
-#define bfin_write_CAN1_AM03L(val) bfin_write16(CAN1_AM03L, val)
-#define bfin_read_CAN1_AM03H() bfin_read16(CAN1_AM03H)
-#define bfin_write_CAN1_AM03H(val) bfin_write16(CAN1_AM03H, val)
-#define bfin_read_CAN1_AM04L() bfin_read16(CAN1_AM04L)
-#define bfin_write_CAN1_AM04L(val) bfin_write16(CAN1_AM04L, val)
-#define bfin_read_CAN1_AM04H() bfin_read16(CAN1_AM04H)
-#define bfin_write_CAN1_AM04H(val) bfin_write16(CAN1_AM04H, val)
-#define bfin_read_CAN1_AM05L() bfin_read16(CAN1_AM05L)
-#define bfin_write_CAN1_AM05L(val) bfin_write16(CAN1_AM05L, val)
-#define bfin_read_CAN1_AM05H() bfin_read16(CAN1_AM05H)
-#define bfin_write_CAN1_AM05H(val) bfin_write16(CAN1_AM05H, val)
-#define bfin_read_CAN1_AM06L() bfin_read16(CAN1_AM06L)
-#define bfin_write_CAN1_AM06L(val) bfin_write16(CAN1_AM06L, val)
-#define bfin_read_CAN1_AM06H() bfin_read16(CAN1_AM06H)
-#define bfin_write_CAN1_AM06H(val) bfin_write16(CAN1_AM06H, val)
-#define bfin_read_CAN1_AM07L() bfin_read16(CAN1_AM07L)
-#define bfin_write_CAN1_AM07L(val) bfin_write16(CAN1_AM07L, val)
-#define bfin_read_CAN1_AM07H() bfin_read16(CAN1_AM07H)
-#define bfin_write_CAN1_AM07H(val) bfin_write16(CAN1_AM07H, val)
-#define bfin_read_CAN1_AM08L() bfin_read16(CAN1_AM08L)
-#define bfin_write_CAN1_AM08L(val) bfin_write16(CAN1_AM08L, val)
-#define bfin_read_CAN1_AM08H() bfin_read16(CAN1_AM08H)
-#define bfin_write_CAN1_AM08H(val) bfin_write16(CAN1_AM08H, val)
-#define bfin_read_CAN1_AM09L() bfin_read16(CAN1_AM09L)
-#define bfin_write_CAN1_AM09L(val) bfin_write16(CAN1_AM09L, val)
-#define bfin_read_CAN1_AM09H() bfin_read16(CAN1_AM09H)
-#define bfin_write_CAN1_AM09H(val) bfin_write16(CAN1_AM09H, val)
-#define bfin_read_CAN1_AM10L() bfin_read16(CAN1_AM10L)
-#define bfin_write_CAN1_AM10L(val) bfin_write16(CAN1_AM10L, val)
-#define bfin_read_CAN1_AM10H() bfin_read16(CAN1_AM10H)
-#define bfin_write_CAN1_AM10H(val) bfin_write16(CAN1_AM10H, val)
-#define bfin_read_CAN1_AM11L() bfin_read16(CAN1_AM11L)
-#define bfin_write_CAN1_AM11L(val) bfin_write16(CAN1_AM11L, val)
-#define bfin_read_CAN1_AM11H() bfin_read16(CAN1_AM11H)
-#define bfin_write_CAN1_AM11H(val) bfin_write16(CAN1_AM11H, val)
-#define bfin_read_CAN1_AM12L() bfin_read16(CAN1_AM12L)
-#define bfin_write_CAN1_AM12L(val) bfin_write16(CAN1_AM12L, val)
-#define bfin_read_CAN1_AM12H() bfin_read16(CAN1_AM12H)
-#define bfin_write_CAN1_AM12H(val) bfin_write16(CAN1_AM12H, val)
-#define bfin_read_CAN1_AM13L() bfin_read16(CAN1_AM13L)
-#define bfin_write_CAN1_AM13L(val) bfin_write16(CAN1_AM13L, val)
-#define bfin_read_CAN1_AM13H() bfin_read16(CAN1_AM13H)
-#define bfin_write_CAN1_AM13H(val) bfin_write16(CAN1_AM13H, val)
-#define bfin_read_CAN1_AM14L() bfin_read16(CAN1_AM14L)
-#define bfin_write_CAN1_AM14L(val) bfin_write16(CAN1_AM14L, val)
-#define bfin_read_CAN1_AM14H() bfin_read16(CAN1_AM14H)
-#define bfin_write_CAN1_AM14H(val) bfin_write16(CAN1_AM14H, val)
-#define bfin_read_CAN1_AM15L() bfin_read16(CAN1_AM15L)
-#define bfin_write_CAN1_AM15L(val) bfin_write16(CAN1_AM15L, val)
-#define bfin_read_CAN1_AM15H() bfin_read16(CAN1_AM15H)
-#define bfin_write_CAN1_AM15H(val) bfin_write16(CAN1_AM15H, val)
-
-/* CAN Controller 1 Mailbox Accebfin_read_()tance Registers */
-
-#define bfin_read_CAN1_AM16L() bfin_read16(CAN1_AM16L)
-#define bfin_write_CAN1_AM16L(val) bfin_write16(CAN1_AM16L, val)
-#define bfin_read_CAN1_AM16H() bfin_read16(CAN1_AM16H)
-#define bfin_write_CAN1_AM16H(val) bfin_write16(CAN1_AM16H, val)
-#define bfin_read_CAN1_AM17L() bfin_read16(CAN1_AM17L)
-#define bfin_write_CAN1_AM17L(val) bfin_write16(CAN1_AM17L, val)
-#define bfin_read_CAN1_AM17H() bfin_read16(CAN1_AM17H)
-#define bfin_write_CAN1_AM17H(val) bfin_write16(CAN1_AM17H, val)
-#define bfin_read_CAN1_AM18L() bfin_read16(CAN1_AM18L)
-#define bfin_write_CAN1_AM18L(val) bfin_write16(CAN1_AM18L, val)
-#define bfin_read_CAN1_AM18H() bfin_read16(CAN1_AM18H)
-#define bfin_write_CAN1_AM18H(val) bfin_write16(CAN1_AM18H, val)
-#define bfin_read_CAN1_AM19L() bfin_read16(CAN1_AM19L)
-#define bfin_write_CAN1_AM19L(val) bfin_write16(CAN1_AM19L, val)
-#define bfin_read_CAN1_AM19H() bfin_read16(CAN1_AM19H)
-#define bfin_write_CAN1_AM19H(val) bfin_write16(CAN1_AM19H, val)
-#define bfin_read_CAN1_AM20L() bfin_read16(CAN1_AM20L)
-#define bfin_write_CAN1_AM20L(val) bfin_write16(CAN1_AM20L, val)
-#define bfin_read_CAN1_AM20H() bfin_read16(CAN1_AM20H)
-#define bfin_write_CAN1_AM20H(val) bfin_write16(CAN1_AM20H, val)
-#define bfin_read_CAN1_AM21L() bfin_read16(CAN1_AM21L)
-#define bfin_write_CAN1_AM21L(val) bfin_write16(CAN1_AM21L, val)
-#define bfin_read_CAN1_AM21H() bfin_read16(CAN1_AM21H)
-#define bfin_write_CAN1_AM21H(val) bfin_write16(CAN1_AM21H, val)
-#define bfin_read_CAN1_AM22L() bfin_read16(CAN1_AM22L)
-#define bfin_write_CAN1_AM22L(val) bfin_write16(CAN1_AM22L, val)
-#define bfin_read_CAN1_AM22H() bfin_read16(CAN1_AM22H)
-#define bfin_write_CAN1_AM22H(val) bfin_write16(CAN1_AM22H, val)
-#define bfin_read_CAN1_AM23L() bfin_read16(CAN1_AM23L)
-#define bfin_write_CAN1_AM23L(val) bfin_write16(CAN1_AM23L, val)
-#define bfin_read_CAN1_AM23H() bfin_read16(CAN1_AM23H)
-#define bfin_write_CAN1_AM23H(val) bfin_write16(CAN1_AM23H, val)
-#define bfin_read_CAN1_AM24L() bfin_read16(CAN1_AM24L)
-#define bfin_write_CAN1_AM24L(val) bfin_write16(CAN1_AM24L, val)
-#define bfin_read_CAN1_AM24H() bfin_read16(CAN1_AM24H)
-#define bfin_write_CAN1_AM24H(val) bfin_write16(CAN1_AM24H, val)
-#define bfin_read_CAN1_AM25L() bfin_read16(CAN1_AM25L)
-#define bfin_write_CAN1_AM25L(val) bfin_write16(CAN1_AM25L, val)
-#define bfin_read_CAN1_AM25H() bfin_read16(CAN1_AM25H)
-#define bfin_write_CAN1_AM25H(val) bfin_write16(CAN1_AM25H, val)
-#define bfin_read_CAN1_AM26L() bfin_read16(CAN1_AM26L)
-#define bfin_write_CAN1_AM26L(val) bfin_write16(CAN1_AM26L, val)
-#define bfin_read_CAN1_AM26H() bfin_read16(CAN1_AM26H)
-#define bfin_write_CAN1_AM26H(val) bfin_write16(CAN1_AM26H, val)
-#define bfin_read_CAN1_AM27L() bfin_read16(CAN1_AM27L)
-#define bfin_write_CAN1_AM27L(val) bfin_write16(CAN1_AM27L, val)
-#define bfin_read_CAN1_AM27H() bfin_read16(CAN1_AM27H)
-#define bfin_write_CAN1_AM27H(val) bfin_write16(CAN1_AM27H, val)
-#define bfin_read_CAN1_AM28L() bfin_read16(CAN1_AM28L)
-#define bfin_write_CAN1_AM28L(val) bfin_write16(CAN1_AM28L, val)
-#define bfin_read_CAN1_AM28H() bfin_read16(CAN1_AM28H)
-#define bfin_write_CAN1_AM28H(val) bfin_write16(CAN1_AM28H, val)
-#define bfin_read_CAN1_AM29L() bfin_read16(CAN1_AM29L)
-#define bfin_write_CAN1_AM29L(val) bfin_write16(CAN1_AM29L, val)
-#define bfin_read_CAN1_AM29H() bfin_read16(CAN1_AM29H)
-#define bfin_write_CAN1_AM29H(val) bfin_write16(CAN1_AM29H, val)
-#define bfin_read_CAN1_AM30L() bfin_read16(CAN1_AM30L)
-#define bfin_write_CAN1_AM30L(val) bfin_write16(CAN1_AM30L, val)
-#define bfin_read_CAN1_AM30H() bfin_read16(CAN1_AM30H)
-#define bfin_write_CAN1_AM30H(val) bfin_write16(CAN1_AM30H, val)
-#define bfin_read_CAN1_AM31L() bfin_read16(CAN1_AM31L)
-#define bfin_write_CAN1_AM31L(val) bfin_write16(CAN1_AM31L, val)
-#define bfin_read_CAN1_AM31H() bfin_read16(CAN1_AM31H)
-#define bfin_write_CAN1_AM31H(val) bfin_write16(CAN1_AM31H, val)
-
-/* CAN Controller 1 Mailbox Data Registers */
-
-#define bfin_read_CAN1_MB00_DATA0() bfin_read16(CAN1_MB00_DATA0)
-#define bfin_write_CAN1_MB00_DATA0(val) bfin_write16(CAN1_MB00_DATA0, val)
-#define bfin_read_CAN1_MB00_DATA1() bfin_read16(CAN1_MB00_DATA1)
-#define bfin_write_CAN1_MB00_DATA1(val) bfin_write16(CAN1_MB00_DATA1, val)
-#define bfin_read_CAN1_MB00_DATA2() bfin_read16(CAN1_MB00_DATA2)
-#define bfin_write_CAN1_MB00_DATA2(val) bfin_write16(CAN1_MB00_DATA2, val)
-#define bfin_read_CAN1_MB00_DATA3() bfin_read16(CAN1_MB00_DATA3)
-#define bfin_write_CAN1_MB00_DATA3(val) bfin_write16(CAN1_MB00_DATA3, val)
-#define bfin_read_CAN1_MB00_LENGTH() bfin_read16(CAN1_MB00_LENGTH)
-#define bfin_write_CAN1_MB00_LENGTH(val) bfin_write16(CAN1_MB00_LENGTH, val)
-#define bfin_read_CAN1_MB00_TIMESTAMP() bfin_read16(CAN1_MB00_TIMESTAMP)
-#define bfin_write_CAN1_MB00_TIMESTAMP(val) bfin_write16(CAN1_MB00_TIMESTAMP, val)
-#define bfin_read_CAN1_MB00_ID0() bfin_read16(CAN1_MB00_ID0)
-#define bfin_write_CAN1_MB00_ID0(val) bfin_write16(CAN1_MB00_ID0, val)
-#define bfin_read_CAN1_MB00_ID1() bfin_read16(CAN1_MB00_ID1)
-#define bfin_write_CAN1_MB00_ID1(val) bfin_write16(CAN1_MB00_ID1, val)
-#define bfin_read_CAN1_MB01_DATA0() bfin_read16(CAN1_MB01_DATA0)
-#define bfin_write_CAN1_MB01_DATA0(val) bfin_write16(CAN1_MB01_DATA0, val)
-#define bfin_read_CAN1_MB01_DATA1() bfin_read16(CAN1_MB01_DATA1)
-#define bfin_write_CAN1_MB01_DATA1(val) bfin_write16(CAN1_MB01_DATA1, val)
-#define bfin_read_CAN1_MB01_DATA2() bfin_read16(CAN1_MB01_DATA2)
-#define bfin_write_CAN1_MB01_DATA2(val) bfin_write16(CAN1_MB01_DATA2, val)
-#define bfin_read_CAN1_MB01_DATA3() bfin_read16(CAN1_MB01_DATA3)
-#define bfin_write_CAN1_MB01_DATA3(val) bfin_write16(CAN1_MB01_DATA3, val)
-#define bfin_read_CAN1_MB01_LENGTH() bfin_read16(CAN1_MB01_LENGTH)
-#define bfin_write_CAN1_MB01_LENGTH(val) bfin_write16(CAN1_MB01_LENGTH, val)
-#define bfin_read_CAN1_MB01_TIMESTAMP() bfin_read16(CAN1_MB01_TIMESTAMP)
-#define bfin_write_CAN1_MB01_TIMESTAMP(val) bfin_write16(CAN1_MB01_TIMESTAMP, val)
-#define bfin_read_CAN1_MB01_ID0() bfin_read16(CAN1_MB01_ID0)
-#define bfin_write_CAN1_MB01_ID0(val) bfin_write16(CAN1_MB01_ID0, val)
-#define bfin_read_CAN1_MB01_ID1() bfin_read16(CAN1_MB01_ID1)
-#define bfin_write_CAN1_MB01_ID1(val) bfin_write16(CAN1_MB01_ID1, val)
-#define bfin_read_CAN1_MB02_DATA0() bfin_read16(CAN1_MB02_DATA0)
-#define bfin_write_CAN1_MB02_DATA0(val) bfin_write16(CAN1_MB02_DATA0, val)
-#define bfin_read_CAN1_MB02_DATA1() bfin_read16(CAN1_MB02_DATA1)
-#define bfin_write_CAN1_MB02_DATA1(val) bfin_write16(CAN1_MB02_DATA1, val)
-#define bfin_read_CAN1_MB02_DATA2() bfin_read16(CAN1_MB02_DATA2)
-#define bfin_write_CAN1_MB02_DATA2(val) bfin_write16(CAN1_MB02_DATA2, val)
-#define bfin_read_CAN1_MB02_DATA3() bfin_read16(CAN1_MB02_DATA3)
-#define bfin_write_CAN1_MB02_DATA3(val) bfin_write16(CAN1_MB02_DATA3, val)
-#define bfin_read_CAN1_MB02_LENGTH() bfin_read16(CAN1_MB02_LENGTH)
-#define bfin_write_CAN1_MB02_LENGTH(val) bfin_write16(CAN1_MB02_LENGTH, val)
-#define bfin_read_CAN1_MB02_TIMESTAMP() bfin_read16(CAN1_MB02_TIMESTAMP)
-#define bfin_write_CAN1_MB02_TIMESTAMP(val) bfin_write16(CAN1_MB02_TIMESTAMP, val)
-#define bfin_read_CAN1_MB02_ID0() bfin_read16(CAN1_MB02_ID0)
-#define bfin_write_CAN1_MB02_ID0(val) bfin_write16(CAN1_MB02_ID0, val)
-#define bfin_read_CAN1_MB02_ID1() bfin_read16(CAN1_MB02_ID1)
-#define bfin_write_CAN1_MB02_ID1(val) bfin_write16(CAN1_MB02_ID1, val)
-#define bfin_read_CAN1_MB03_DATA0() bfin_read16(CAN1_MB03_DATA0)
-#define bfin_write_CAN1_MB03_DATA0(val) bfin_write16(CAN1_MB03_DATA0, val)
-#define bfin_read_CAN1_MB03_DATA1() bfin_read16(CAN1_MB03_DATA1)
-#define bfin_write_CAN1_MB03_DATA1(val) bfin_write16(CAN1_MB03_DATA1, val)
-#define bfin_read_CAN1_MB03_DATA2() bfin_read16(CAN1_MB03_DATA2)
-#define bfin_write_CAN1_MB03_DATA2(val) bfin_write16(CAN1_MB03_DATA2, val)
-#define bfin_read_CAN1_MB03_DATA3() bfin_read16(CAN1_MB03_DATA3)
-#define bfin_write_CAN1_MB03_DATA3(val) bfin_write16(CAN1_MB03_DATA3, val)
-#define bfin_read_CAN1_MB03_LENGTH() bfin_read16(CAN1_MB03_LENGTH)
-#define bfin_write_CAN1_MB03_LENGTH(val) bfin_write16(CAN1_MB03_LENGTH, val)
-#define bfin_read_CAN1_MB03_TIMESTAMP() bfin_read16(CAN1_MB03_TIMESTAMP)
-#define bfin_write_CAN1_MB03_TIMESTAMP(val) bfin_write16(CAN1_MB03_TIMESTAMP, val)
-#define bfin_read_CAN1_MB03_ID0() bfin_read16(CAN1_MB03_ID0)
-#define bfin_write_CAN1_MB03_ID0(val) bfin_write16(CAN1_MB03_ID0, val)
-#define bfin_read_CAN1_MB03_ID1() bfin_read16(CAN1_MB03_ID1)
-#define bfin_write_CAN1_MB03_ID1(val) bfin_write16(CAN1_MB03_ID1, val)
-#define bfin_read_CAN1_MB04_DATA0() bfin_read16(CAN1_MB04_DATA0)
-#define bfin_write_CAN1_MB04_DATA0(val) bfin_write16(CAN1_MB04_DATA0, val)
-#define bfin_read_CAN1_MB04_DATA1() bfin_read16(CAN1_MB04_DATA1)
-#define bfin_write_CAN1_MB04_DATA1(val) bfin_write16(CAN1_MB04_DATA1, val)
-#define bfin_read_CAN1_MB04_DATA2() bfin_read16(CAN1_MB04_DATA2)
-#define bfin_write_CAN1_MB04_DATA2(val) bfin_write16(CAN1_MB04_DATA2, val)
-#define bfin_read_CAN1_MB04_DATA3() bfin_read16(CAN1_MB04_DATA3)
-#define bfin_write_CAN1_MB04_DATA3(val) bfin_write16(CAN1_MB04_DATA3, val)
-#define bfin_read_CAN1_MB04_LENGTH() bfin_read16(CAN1_MB04_LENGTH)
-#define bfin_write_CAN1_MB04_LENGTH(val) bfin_write16(CAN1_MB04_LENGTH, val)
-#define bfin_read_CAN1_MB04_TIMESTAMP() bfin_read16(CAN1_MB04_TIMESTAMP)
-#define bfin_write_CAN1_MB04_TIMESTAMP(val) bfin_write16(CAN1_MB04_TIMESTAMP, val)
-#define bfin_read_CAN1_MB04_ID0() bfin_read16(CAN1_MB04_ID0)
-#define bfin_write_CAN1_MB04_ID0(val) bfin_write16(CAN1_MB04_ID0, val)
-#define bfin_read_CAN1_MB04_ID1() bfin_read16(CAN1_MB04_ID1)
-#define bfin_write_CAN1_MB04_ID1(val) bfin_write16(CAN1_MB04_ID1, val)
-#define bfin_read_CAN1_MB05_DATA0() bfin_read16(CAN1_MB05_DATA0)
-#define bfin_write_CAN1_MB05_DATA0(val) bfin_write16(CAN1_MB05_DATA0, val)
-#define bfin_read_CAN1_MB05_DATA1() bfin_read16(CAN1_MB05_DATA1)
-#define bfin_write_CAN1_MB05_DATA1(val) bfin_write16(CAN1_MB05_DATA1, val)
-#define bfin_read_CAN1_MB05_DATA2() bfin_read16(CAN1_MB05_DATA2)
-#define bfin_write_CAN1_MB05_DATA2(val) bfin_write16(CAN1_MB05_DATA2, val)
-#define bfin_read_CAN1_MB05_DATA3() bfin_read16(CAN1_MB05_DATA3)
-#define bfin_write_CAN1_MB05_DATA3(val) bfin_write16(CAN1_MB05_DATA3, val)
-#define bfin_read_CAN1_MB05_LENGTH() bfin_read16(CAN1_MB05_LENGTH)
-#define bfin_write_CAN1_MB05_LENGTH(val) bfin_write16(CAN1_MB05_LENGTH, val)
-#define bfin_read_CAN1_MB05_TIMESTAMP() bfin_read16(CAN1_MB05_TIMESTAMP)
-#define bfin_write_CAN1_MB05_TIMESTAMP(val) bfin_write16(CAN1_MB05_TIMESTAMP, val)
-#define bfin_read_CAN1_MB05_ID0() bfin_read16(CAN1_MB05_ID0)
-#define bfin_write_CAN1_MB05_ID0(val) bfin_write16(CAN1_MB05_ID0, val)
-#define bfin_read_CAN1_MB05_ID1() bfin_read16(CAN1_MB05_ID1)
-#define bfin_write_CAN1_MB05_ID1(val) bfin_write16(CAN1_MB05_ID1, val)
-#define bfin_read_CAN1_MB06_DATA0() bfin_read16(CAN1_MB06_DATA0)
-#define bfin_write_CAN1_MB06_DATA0(val) bfin_write16(CAN1_MB06_DATA0, val)
-#define bfin_read_CAN1_MB06_DATA1() bfin_read16(CAN1_MB06_DATA1)
-#define bfin_write_CAN1_MB06_DATA1(val) bfin_write16(CAN1_MB06_DATA1, val)
-#define bfin_read_CAN1_MB06_DATA2() bfin_read16(CAN1_MB06_DATA2)
-#define bfin_write_CAN1_MB06_DATA2(val) bfin_write16(CAN1_MB06_DATA2, val)
-#define bfin_read_CAN1_MB06_DATA3() bfin_read16(CAN1_MB06_DATA3)
-#define bfin_write_CAN1_MB06_DATA3(val) bfin_write16(CAN1_MB06_DATA3, val)
-#define bfin_read_CAN1_MB06_LENGTH() bfin_read16(CAN1_MB06_LENGTH)
-#define bfin_write_CAN1_MB06_LENGTH(val) bfin_write16(CAN1_MB06_LENGTH, val)
-#define bfin_read_CAN1_MB06_TIMESTAMP() bfin_read16(CAN1_MB06_TIMESTAMP)
-#define bfin_write_CAN1_MB06_TIMESTAMP(val) bfin_write16(CAN1_MB06_TIMESTAMP, val)
-#define bfin_read_CAN1_MB06_ID0() bfin_read16(CAN1_MB06_ID0)
-#define bfin_write_CAN1_MB06_ID0(val) bfin_write16(CAN1_MB06_ID0, val)
-#define bfin_read_CAN1_MB06_ID1() bfin_read16(CAN1_MB06_ID1)
-#define bfin_write_CAN1_MB06_ID1(val) bfin_write16(CAN1_MB06_ID1, val)
-#define bfin_read_CAN1_MB07_DATA0() bfin_read16(CAN1_MB07_DATA0)
-#define bfin_write_CAN1_MB07_DATA0(val) bfin_write16(CAN1_MB07_DATA0, val)
-#define bfin_read_CAN1_MB07_DATA1() bfin_read16(CAN1_MB07_DATA1)
-#define bfin_write_CAN1_MB07_DATA1(val) bfin_write16(CAN1_MB07_DATA1, val)
-#define bfin_read_CAN1_MB07_DATA2() bfin_read16(CAN1_MB07_DATA2)
-#define bfin_write_CAN1_MB07_DATA2(val) bfin_write16(CAN1_MB07_DATA2, val)
-#define bfin_read_CAN1_MB07_DATA3() bfin_read16(CAN1_MB07_DATA3)
-#define bfin_write_CAN1_MB07_DATA3(val) bfin_write16(CAN1_MB07_DATA3, val)
-#define bfin_read_CAN1_MB07_LENGTH() bfin_read16(CAN1_MB07_LENGTH)
-#define bfin_write_CAN1_MB07_LENGTH(val) bfin_write16(CAN1_MB07_LENGTH, val)
-#define bfin_read_CAN1_MB07_TIMESTAMP() bfin_read16(CAN1_MB07_TIMESTAMP)
-#define bfin_write_CAN1_MB07_TIMESTAMP(val) bfin_write16(CAN1_MB07_TIMESTAMP, val)
-#define bfin_read_CAN1_MB07_ID0() bfin_read16(CAN1_MB07_ID0)
-#define bfin_write_CAN1_MB07_ID0(val) bfin_write16(CAN1_MB07_ID0, val)
-#define bfin_read_CAN1_MB07_ID1() bfin_read16(CAN1_MB07_ID1)
-#define bfin_write_CAN1_MB07_ID1(val) bfin_write16(CAN1_MB07_ID1, val)
-#define bfin_read_CAN1_MB08_DATA0() bfin_read16(CAN1_MB08_DATA0)
-#define bfin_write_CAN1_MB08_DATA0(val) bfin_write16(CAN1_MB08_DATA0, val)
-#define bfin_read_CAN1_MB08_DATA1() bfin_read16(CAN1_MB08_DATA1)
-#define bfin_write_CAN1_MB08_DATA1(val) bfin_write16(CAN1_MB08_DATA1, val)
-#define bfin_read_CAN1_MB08_DATA2() bfin_read16(CAN1_MB08_DATA2)
-#define bfin_write_CAN1_MB08_DATA2(val) bfin_write16(CAN1_MB08_DATA2, val)
-#define bfin_read_CAN1_MB08_DATA3() bfin_read16(CAN1_MB08_DATA3)
-#define bfin_write_CAN1_MB08_DATA3(val) bfin_write16(CAN1_MB08_DATA3, val)
-#define bfin_read_CAN1_MB08_LENGTH() bfin_read16(CAN1_MB08_LENGTH)
-#define bfin_write_CAN1_MB08_LENGTH(val) bfin_write16(CAN1_MB08_LENGTH, val)
-#define bfin_read_CAN1_MB08_TIMESTAMP() bfin_read16(CAN1_MB08_TIMESTAMP)
-#define bfin_write_CAN1_MB08_TIMESTAMP(val) bfin_write16(CAN1_MB08_TIMESTAMP, val)
-#define bfin_read_CAN1_MB08_ID0() bfin_read16(CAN1_MB08_ID0)
-#define bfin_write_CAN1_MB08_ID0(val) bfin_write16(CAN1_MB08_ID0, val)
-#define bfin_read_CAN1_MB08_ID1() bfin_read16(CAN1_MB08_ID1)
-#define bfin_write_CAN1_MB08_ID1(val) bfin_write16(CAN1_MB08_ID1, val)
-#define bfin_read_CAN1_MB09_DATA0() bfin_read16(CAN1_MB09_DATA0)
-#define bfin_write_CAN1_MB09_DATA0(val) bfin_write16(CAN1_MB09_DATA0, val)
-#define bfin_read_CAN1_MB09_DATA1() bfin_read16(CAN1_MB09_DATA1)
-#define bfin_write_CAN1_MB09_DATA1(val) bfin_write16(CAN1_MB09_DATA1, val)
-#define bfin_read_CAN1_MB09_DATA2() bfin_read16(CAN1_MB09_DATA2)
-#define bfin_write_CAN1_MB09_DATA2(val) bfin_write16(CAN1_MB09_DATA2, val)
-#define bfin_read_CAN1_MB09_DATA3() bfin_read16(CAN1_MB09_DATA3)
-#define bfin_write_CAN1_MB09_DATA3(val) bfin_write16(CAN1_MB09_DATA3, val)
-#define bfin_read_CAN1_MB09_LENGTH() bfin_read16(CAN1_MB09_LENGTH)
-#define bfin_write_CAN1_MB09_LENGTH(val) bfin_write16(CAN1_MB09_LENGTH, val)
-#define bfin_read_CAN1_MB09_TIMESTAMP() bfin_read16(CAN1_MB09_TIMESTAMP)
-#define bfin_write_CAN1_MB09_TIMESTAMP(val) bfin_write16(CAN1_MB09_TIMESTAMP, val)
-#define bfin_read_CAN1_MB09_ID0() bfin_read16(CAN1_MB09_ID0)
-#define bfin_write_CAN1_MB09_ID0(val) bfin_write16(CAN1_MB09_ID0, val)
-#define bfin_read_CAN1_MB09_ID1() bfin_read16(CAN1_MB09_ID1)
-#define bfin_write_CAN1_MB09_ID1(val) bfin_write16(CAN1_MB09_ID1, val)
-#define bfin_read_CAN1_MB10_DATA0() bfin_read16(CAN1_MB10_DATA0)
-#define bfin_write_CAN1_MB10_DATA0(val) bfin_write16(CAN1_MB10_DATA0, val)
-#define bfin_read_CAN1_MB10_DATA1() bfin_read16(CAN1_MB10_DATA1)
-#define bfin_write_CAN1_MB10_DATA1(val) bfin_write16(CAN1_MB10_DATA1, val)
-#define bfin_read_CAN1_MB10_DATA2() bfin_read16(CAN1_MB10_DATA2)
-#define bfin_write_CAN1_MB10_DATA2(val) bfin_write16(CAN1_MB10_DATA2, val)
-#define bfin_read_CAN1_MB10_DATA3() bfin_read16(CAN1_MB10_DATA3)
-#define bfin_write_CAN1_MB10_DATA3(val) bfin_write16(CAN1_MB10_DATA3, val)
-#define bfin_read_CAN1_MB10_LENGTH() bfin_read16(CAN1_MB10_LENGTH)
-#define bfin_write_CAN1_MB10_LENGTH(val) bfin_write16(CAN1_MB10_LENGTH, val)
-#define bfin_read_CAN1_MB10_TIMESTAMP() bfin_read16(CAN1_MB10_TIMESTAMP)
-#define bfin_write_CAN1_MB10_TIMESTAMP(val) bfin_write16(CAN1_MB10_TIMESTAMP, val)
-#define bfin_read_CAN1_MB10_ID0() bfin_read16(CAN1_MB10_ID0)
-#define bfin_write_CAN1_MB10_ID0(val) bfin_write16(CAN1_MB10_ID0, val)
-#define bfin_read_CAN1_MB10_ID1() bfin_read16(CAN1_MB10_ID1)
-#define bfin_write_CAN1_MB10_ID1(val) bfin_write16(CAN1_MB10_ID1, val)
-#define bfin_read_CAN1_MB11_DATA0() bfin_read16(CAN1_MB11_DATA0)
-#define bfin_write_CAN1_MB11_DATA0(val) bfin_write16(CAN1_MB11_DATA0, val)
-#define bfin_read_CAN1_MB11_DATA1() bfin_read16(CAN1_MB11_DATA1)
-#define bfin_write_CAN1_MB11_DATA1(val) bfin_write16(CAN1_MB11_DATA1, val)
-#define bfin_read_CAN1_MB11_DATA2() bfin_read16(CAN1_MB11_DATA2)
-#define bfin_write_CAN1_MB11_DATA2(val) bfin_write16(CAN1_MB11_DATA2, val)
-#define bfin_read_CAN1_MB11_DATA3() bfin_read16(CAN1_MB11_DATA3)
-#define bfin_write_CAN1_MB11_DATA3(val) bfin_write16(CAN1_MB11_DATA3, val)
-#define bfin_read_CAN1_MB11_LENGTH() bfin_read16(CAN1_MB11_LENGTH)
-#define bfin_write_CAN1_MB11_LENGTH(val) bfin_write16(CAN1_MB11_LENGTH, val)
-#define bfin_read_CAN1_MB11_TIMESTAMP() bfin_read16(CAN1_MB11_TIMESTAMP)
-#define bfin_write_CAN1_MB11_TIMESTAMP(val) bfin_write16(CAN1_MB11_TIMESTAMP, val)
-#define bfin_read_CAN1_MB11_ID0() bfin_read16(CAN1_MB11_ID0)
-#define bfin_write_CAN1_MB11_ID0(val) bfin_write16(CAN1_MB11_ID0, val)
-#define bfin_read_CAN1_MB11_ID1() bfin_read16(CAN1_MB11_ID1)
-#define bfin_write_CAN1_MB11_ID1(val) bfin_write16(CAN1_MB11_ID1, val)
-#define bfin_read_CAN1_MB12_DATA0() bfin_read16(CAN1_MB12_DATA0)
-#define bfin_write_CAN1_MB12_DATA0(val) bfin_write16(CAN1_MB12_DATA0, val)
-#define bfin_read_CAN1_MB12_DATA1() bfin_read16(CAN1_MB12_DATA1)
-#define bfin_write_CAN1_MB12_DATA1(val) bfin_write16(CAN1_MB12_DATA1, val)
-#define bfin_read_CAN1_MB12_DATA2() bfin_read16(CAN1_MB12_DATA2)
-#define bfin_write_CAN1_MB12_DATA2(val) bfin_write16(CAN1_MB12_DATA2, val)
-#define bfin_read_CAN1_MB12_DATA3() bfin_read16(CAN1_MB12_DATA3)
-#define bfin_write_CAN1_MB12_DATA3(val) bfin_write16(CAN1_MB12_DATA3, val)
-#define bfin_read_CAN1_MB12_LENGTH() bfin_read16(CAN1_MB12_LENGTH)
-#define bfin_write_CAN1_MB12_LENGTH(val) bfin_write16(CAN1_MB12_LENGTH, val)
-#define bfin_read_CAN1_MB12_TIMESTAMP() bfin_read16(CAN1_MB12_TIMESTAMP)
-#define bfin_write_CAN1_MB12_TIMESTAMP(val) bfin_write16(CAN1_MB12_TIMESTAMP, val)
-#define bfin_read_CAN1_MB12_ID0() bfin_read16(CAN1_MB12_ID0)
-#define bfin_write_CAN1_MB12_ID0(val) bfin_write16(CAN1_MB12_ID0, val)
-#define bfin_read_CAN1_MB12_ID1() bfin_read16(CAN1_MB12_ID1)
-#define bfin_write_CAN1_MB12_ID1(val) bfin_write16(CAN1_MB12_ID1, val)
-#define bfin_read_CAN1_MB13_DATA0() bfin_read16(CAN1_MB13_DATA0)
-#define bfin_write_CAN1_MB13_DATA0(val) bfin_write16(CAN1_MB13_DATA0, val)
-#define bfin_read_CAN1_MB13_DATA1() bfin_read16(CAN1_MB13_DATA1)
-#define bfin_write_CAN1_MB13_DATA1(val) bfin_write16(CAN1_MB13_DATA1, val)
-#define bfin_read_CAN1_MB13_DATA2() bfin_read16(CAN1_MB13_DATA2)
-#define bfin_write_CAN1_MB13_DATA2(val) bfin_write16(CAN1_MB13_DATA2, val)
-#define bfin_read_CAN1_MB13_DATA3() bfin_read16(CAN1_MB13_DATA3)
-#define bfin_write_CAN1_MB13_DATA3(val) bfin_write16(CAN1_MB13_DATA3, val)
-#define bfin_read_CAN1_MB13_LENGTH() bfin_read16(CAN1_MB13_LENGTH)
-#define bfin_write_CAN1_MB13_LENGTH(val) bfin_write16(CAN1_MB13_LENGTH, val)
-#define bfin_read_CAN1_MB13_TIMESTAMP() bfin_read16(CAN1_MB13_TIMESTAMP)
-#define bfin_write_CAN1_MB13_TIMESTAMP(val) bfin_write16(CAN1_MB13_TIMESTAMP, val)
-#define bfin_read_CAN1_MB13_ID0() bfin_read16(CAN1_MB13_ID0)
-#define bfin_write_CAN1_MB13_ID0(val) bfin_write16(CAN1_MB13_ID0, val)
-#define bfin_read_CAN1_MB13_ID1() bfin_read16(CAN1_MB13_ID1)
-#define bfin_write_CAN1_MB13_ID1(val) bfin_write16(CAN1_MB13_ID1, val)
-#define bfin_read_CAN1_MB14_DATA0() bfin_read16(CAN1_MB14_DATA0)
-#define bfin_write_CAN1_MB14_DATA0(val) bfin_write16(CAN1_MB14_DATA0, val)
-#define bfin_read_CAN1_MB14_DATA1() bfin_read16(CAN1_MB14_DATA1)
-#define bfin_write_CAN1_MB14_DATA1(val) bfin_write16(CAN1_MB14_DATA1, val)
-#define bfin_read_CAN1_MB14_DATA2() bfin_read16(CAN1_MB14_DATA2)
-#define bfin_write_CAN1_MB14_DATA2(val) bfin_write16(CAN1_MB14_DATA2, val)
-#define bfin_read_CAN1_MB14_DATA3() bfin_read16(CAN1_MB14_DATA3)
-#define bfin_write_CAN1_MB14_DATA3(val) bfin_write16(CAN1_MB14_DATA3, val)
-#define bfin_read_CAN1_MB14_LENGTH() bfin_read16(CAN1_MB14_LENGTH)
-#define bfin_write_CAN1_MB14_LENGTH(val) bfin_write16(CAN1_MB14_LENGTH, val)
-#define bfin_read_CAN1_MB14_TIMESTAMP() bfin_read16(CAN1_MB14_TIMESTAMP)
-#define bfin_write_CAN1_MB14_TIMESTAMP(val) bfin_write16(CAN1_MB14_TIMESTAMP, val)
-#define bfin_read_CAN1_MB14_ID0() bfin_read16(CAN1_MB14_ID0)
-#define bfin_write_CAN1_MB14_ID0(val) bfin_write16(CAN1_MB14_ID0, val)
-#define bfin_read_CAN1_MB14_ID1() bfin_read16(CAN1_MB14_ID1)
-#define bfin_write_CAN1_MB14_ID1(val) bfin_write16(CAN1_MB14_ID1, val)
-#define bfin_read_CAN1_MB15_DATA0() bfin_read16(CAN1_MB15_DATA0)
-#define bfin_write_CAN1_MB15_DATA0(val) bfin_write16(CAN1_MB15_DATA0, val)
-#define bfin_read_CAN1_MB15_DATA1() bfin_read16(CAN1_MB15_DATA1)
-#define bfin_write_CAN1_MB15_DATA1(val) bfin_write16(CAN1_MB15_DATA1, val)
-#define bfin_read_CAN1_MB15_DATA2() bfin_read16(CAN1_MB15_DATA2)
-#define bfin_write_CAN1_MB15_DATA2(val) bfin_write16(CAN1_MB15_DATA2, val)
-#define bfin_read_CAN1_MB15_DATA3() bfin_read16(CAN1_MB15_DATA3)
-#define bfin_write_CAN1_MB15_DATA3(val) bfin_write16(CAN1_MB15_DATA3, val)
-#define bfin_read_CAN1_MB15_LENGTH() bfin_read16(CAN1_MB15_LENGTH)
-#define bfin_write_CAN1_MB15_LENGTH(val) bfin_write16(CAN1_MB15_LENGTH, val)
-#define bfin_read_CAN1_MB15_TIMESTAMP() bfin_read16(CAN1_MB15_TIMESTAMP)
-#define bfin_write_CAN1_MB15_TIMESTAMP(val) bfin_write16(CAN1_MB15_TIMESTAMP, val)
-#define bfin_read_CAN1_MB15_ID0() bfin_read16(CAN1_MB15_ID0)
-#define bfin_write_CAN1_MB15_ID0(val) bfin_write16(CAN1_MB15_ID0, val)
-#define bfin_read_CAN1_MB15_ID1() bfin_read16(CAN1_MB15_ID1)
-#define bfin_write_CAN1_MB15_ID1(val) bfin_write16(CAN1_MB15_ID1, val)
-
-/* CAN Controller 1 Mailbox Data Registers */
-
-#define bfin_read_CAN1_MB16_DATA0() bfin_read16(CAN1_MB16_DATA0)
-#define bfin_write_CAN1_MB16_DATA0(val) bfin_write16(CAN1_MB16_DATA0, val)
-#define bfin_read_CAN1_MB16_DATA1() bfin_read16(CAN1_MB16_DATA1)
-#define bfin_write_CAN1_MB16_DATA1(val) bfin_write16(CAN1_MB16_DATA1, val)
-#define bfin_read_CAN1_MB16_DATA2() bfin_read16(CAN1_MB16_DATA2)
-#define bfin_write_CAN1_MB16_DATA2(val) bfin_write16(CAN1_MB16_DATA2, val)
-#define bfin_read_CAN1_MB16_DATA3() bfin_read16(CAN1_MB16_DATA3)
-#define bfin_write_CAN1_MB16_DATA3(val) bfin_write16(CAN1_MB16_DATA3, val)
-#define bfin_read_CAN1_MB16_LENGTH() bfin_read16(CAN1_MB16_LENGTH)
-#define bfin_write_CAN1_MB16_LENGTH(val) bfin_write16(CAN1_MB16_LENGTH, val)
-#define bfin_read_CAN1_MB16_TIMESTAMP() bfin_read16(CAN1_MB16_TIMESTAMP)
-#define bfin_write_CAN1_MB16_TIMESTAMP(val) bfin_write16(CAN1_MB16_TIMESTAMP, val)
-#define bfin_read_CAN1_MB16_ID0() bfin_read16(CAN1_MB16_ID0)
-#define bfin_write_CAN1_MB16_ID0(val) bfin_write16(CAN1_MB16_ID0, val)
-#define bfin_read_CAN1_MB16_ID1() bfin_read16(CAN1_MB16_ID1)
-#define bfin_write_CAN1_MB16_ID1(val) bfin_write16(CAN1_MB16_ID1, val)
-#define bfin_read_CAN1_MB17_DATA0() bfin_read16(CAN1_MB17_DATA0)
-#define bfin_write_CAN1_MB17_DATA0(val) bfin_write16(CAN1_MB17_DATA0, val)
-#define bfin_read_CAN1_MB17_DATA1() bfin_read16(CAN1_MB17_DATA1)
-#define bfin_write_CAN1_MB17_DATA1(val) bfin_write16(CAN1_MB17_DATA1, val)
-#define bfin_read_CAN1_MB17_DATA2() bfin_read16(CAN1_MB17_DATA2)
-#define bfin_write_CAN1_MB17_DATA2(val) bfin_write16(CAN1_MB17_DATA2, val)
-#define bfin_read_CAN1_MB17_DATA3() bfin_read16(CAN1_MB17_DATA3)
-#define bfin_write_CAN1_MB17_DATA3(val) bfin_write16(CAN1_MB17_DATA3, val)
-#define bfin_read_CAN1_MB17_LENGTH() bfin_read16(CAN1_MB17_LENGTH)
-#define bfin_write_CAN1_MB17_LENGTH(val) bfin_write16(CAN1_MB17_LENGTH, val)
-#define bfin_read_CAN1_MB17_TIMESTAMP() bfin_read16(CAN1_MB17_TIMESTAMP)
-#define bfin_write_CAN1_MB17_TIMESTAMP(val) bfin_write16(CAN1_MB17_TIMESTAMP, val)
-#define bfin_read_CAN1_MB17_ID0() bfin_read16(CAN1_MB17_ID0)
-#define bfin_write_CAN1_MB17_ID0(val) bfin_write16(CAN1_MB17_ID0, val)
-#define bfin_read_CAN1_MB17_ID1() bfin_read16(CAN1_MB17_ID1)
-#define bfin_write_CAN1_MB17_ID1(val) bfin_write16(CAN1_MB17_ID1, val)
-#define bfin_read_CAN1_MB18_DATA0() bfin_read16(CAN1_MB18_DATA0)
-#define bfin_write_CAN1_MB18_DATA0(val) bfin_write16(CAN1_MB18_DATA0, val)
-#define bfin_read_CAN1_MB18_DATA1() bfin_read16(CAN1_MB18_DATA1)
-#define bfin_write_CAN1_MB18_DATA1(val) bfin_write16(CAN1_MB18_DATA1, val)
-#define bfin_read_CAN1_MB18_DATA2() bfin_read16(CAN1_MB18_DATA2)
-#define bfin_write_CAN1_MB18_DATA2(val) bfin_write16(CAN1_MB18_DATA2, val)
-#define bfin_read_CAN1_MB18_DATA3() bfin_read16(CAN1_MB18_DATA3)
-#define bfin_write_CAN1_MB18_DATA3(val) bfin_write16(CAN1_MB18_DATA3, val)
-#define bfin_read_CAN1_MB18_LENGTH() bfin_read16(CAN1_MB18_LENGTH)
-#define bfin_write_CAN1_MB18_LENGTH(val) bfin_write16(CAN1_MB18_LENGTH, val)
-#define bfin_read_CAN1_MB18_TIMESTAMP() bfin_read16(CAN1_MB18_TIMESTAMP)
-#define bfin_write_CAN1_MB18_TIMESTAMP(val) bfin_write16(CAN1_MB18_TIMESTAMP, val)
-#define bfin_read_CAN1_MB18_ID0() bfin_read16(CAN1_MB18_ID0)
-#define bfin_write_CAN1_MB18_ID0(val) bfin_write16(CAN1_MB18_ID0, val)
-#define bfin_read_CAN1_MB18_ID1() bfin_read16(CAN1_MB18_ID1)
-#define bfin_write_CAN1_MB18_ID1(val) bfin_write16(CAN1_MB18_ID1, val)
-#define bfin_read_CAN1_MB19_DATA0() bfin_read16(CAN1_MB19_DATA0)
-#define bfin_write_CAN1_MB19_DATA0(val) bfin_write16(CAN1_MB19_DATA0, val)
-#define bfin_read_CAN1_MB19_DATA1() bfin_read16(CAN1_MB19_DATA1)
-#define bfin_write_CAN1_MB19_DATA1(val) bfin_write16(CAN1_MB19_DATA1, val)
-#define bfin_read_CAN1_MB19_DATA2() bfin_read16(CAN1_MB19_DATA2)
-#define bfin_write_CAN1_MB19_DATA2(val) bfin_write16(CAN1_MB19_DATA2, val)
-#define bfin_read_CAN1_MB19_DATA3() bfin_read16(CAN1_MB19_DATA3)
-#define bfin_write_CAN1_MB19_DATA3(val) bfin_write16(CAN1_MB19_DATA3, val)
-#define bfin_read_CAN1_MB19_LENGTH() bfin_read16(CAN1_MB19_LENGTH)
-#define bfin_write_CAN1_MB19_LENGTH(val) bfin_write16(CAN1_MB19_LENGTH, val)
-#define bfin_read_CAN1_MB19_TIMESTAMP() bfin_read16(CAN1_MB19_TIMESTAMP)
-#define bfin_write_CAN1_MB19_TIMESTAMP(val) bfin_write16(CAN1_MB19_TIMESTAMP, val)
-#define bfin_read_CAN1_MB19_ID0() bfin_read16(CAN1_MB19_ID0)
-#define bfin_write_CAN1_MB19_ID0(val) bfin_write16(CAN1_MB19_ID0, val)
-#define bfin_read_CAN1_MB19_ID1() bfin_read16(CAN1_MB19_ID1)
-#define bfin_write_CAN1_MB19_ID1(val) bfin_write16(CAN1_MB19_ID1, val)
-#define bfin_read_CAN1_MB20_DATA0() bfin_read16(CAN1_MB20_DATA0)
-#define bfin_write_CAN1_MB20_DATA0(val) bfin_write16(CAN1_MB20_DATA0, val)
-#define bfin_read_CAN1_MB20_DATA1() bfin_read16(CAN1_MB20_DATA1)
-#define bfin_write_CAN1_MB20_DATA1(val) bfin_write16(CAN1_MB20_DATA1, val)
-#define bfin_read_CAN1_MB20_DATA2() bfin_read16(CAN1_MB20_DATA2)
-#define bfin_write_CAN1_MB20_DATA2(val) bfin_write16(CAN1_MB20_DATA2, val)
-#define bfin_read_CAN1_MB20_DATA3() bfin_read16(CAN1_MB20_DATA3)
-#define bfin_write_CAN1_MB20_DATA3(val) bfin_write16(CAN1_MB20_DATA3, val)
-#define bfin_read_CAN1_MB20_LENGTH() bfin_read16(CAN1_MB20_LENGTH)
-#define bfin_write_CAN1_MB20_LENGTH(val) bfin_write16(CAN1_MB20_LENGTH, val)
-#define bfin_read_CAN1_MB20_TIMESTAMP() bfin_read16(CAN1_MB20_TIMESTAMP)
-#define bfin_write_CAN1_MB20_TIMESTAMP(val) bfin_write16(CAN1_MB20_TIMESTAMP, val)
-#define bfin_read_CAN1_MB20_ID0() bfin_read16(CAN1_MB20_ID0)
-#define bfin_write_CAN1_MB20_ID0(val) bfin_write16(CAN1_MB20_ID0, val)
-#define bfin_read_CAN1_MB20_ID1() bfin_read16(CAN1_MB20_ID1)
-#define bfin_write_CAN1_MB20_ID1(val) bfin_write16(CAN1_MB20_ID1, val)
-#define bfin_read_CAN1_MB21_DATA0() bfin_read16(CAN1_MB21_DATA0)
-#define bfin_write_CAN1_MB21_DATA0(val) bfin_write16(CAN1_MB21_DATA0, val)
-#define bfin_read_CAN1_MB21_DATA1() bfin_read16(CAN1_MB21_DATA1)
-#define bfin_write_CAN1_MB21_DATA1(val) bfin_write16(CAN1_MB21_DATA1, val)
-#define bfin_read_CAN1_MB21_DATA2() bfin_read16(CAN1_MB21_DATA2)
-#define bfin_write_CAN1_MB21_DATA2(val) bfin_write16(CAN1_MB21_DATA2, val)
-#define bfin_read_CAN1_MB21_DATA3() bfin_read16(CAN1_MB21_DATA3)
-#define bfin_write_CAN1_MB21_DATA3(val) bfin_write16(CAN1_MB21_DATA3, val)
-#define bfin_read_CAN1_MB21_LENGTH() bfin_read16(CAN1_MB21_LENGTH)
-#define bfin_write_CAN1_MB21_LENGTH(val) bfin_write16(CAN1_MB21_LENGTH, val)
-#define bfin_read_CAN1_MB21_TIMESTAMP() bfin_read16(CAN1_MB21_TIMESTAMP)
-#define bfin_write_CAN1_MB21_TIMESTAMP(val) bfin_write16(CAN1_MB21_TIMESTAMP, val)
-#define bfin_read_CAN1_MB21_ID0() bfin_read16(CAN1_MB21_ID0)
-#define bfin_write_CAN1_MB21_ID0(val) bfin_write16(CAN1_MB21_ID0, val)
-#define bfin_read_CAN1_MB21_ID1() bfin_read16(CAN1_MB21_ID1)
-#define bfin_write_CAN1_MB21_ID1(val) bfin_write16(CAN1_MB21_ID1, val)
-#define bfin_read_CAN1_MB22_DATA0() bfin_read16(CAN1_MB22_DATA0)
-#define bfin_write_CAN1_MB22_DATA0(val) bfin_write16(CAN1_MB22_DATA0, val)
-#define bfin_read_CAN1_MB22_DATA1() bfin_read16(CAN1_MB22_DATA1)
-#define bfin_write_CAN1_MB22_DATA1(val) bfin_write16(CAN1_MB22_DATA1, val)
-#define bfin_read_CAN1_MB22_DATA2() bfin_read16(CAN1_MB22_DATA2)
-#define bfin_write_CAN1_MB22_DATA2(val) bfin_write16(CAN1_MB22_DATA2, val)
-#define bfin_read_CAN1_MB22_DATA3() bfin_read16(CAN1_MB22_DATA3)
-#define bfin_write_CAN1_MB22_DATA3(val) bfin_write16(CAN1_MB22_DATA3, val)
-#define bfin_read_CAN1_MB22_LENGTH() bfin_read16(CAN1_MB22_LENGTH)
-#define bfin_write_CAN1_MB22_LENGTH(val) bfin_write16(CAN1_MB22_LENGTH, val)
-#define bfin_read_CAN1_MB22_TIMESTAMP() bfin_read16(CAN1_MB22_TIMESTAMP)
-#define bfin_write_CAN1_MB22_TIMESTAMP(val) bfin_write16(CAN1_MB22_TIMESTAMP, val)
-#define bfin_read_CAN1_MB22_ID0() bfin_read16(CAN1_MB22_ID0)
-#define bfin_write_CAN1_MB22_ID0(val) bfin_write16(CAN1_MB22_ID0, val)
-#define bfin_read_CAN1_MB22_ID1() bfin_read16(CAN1_MB22_ID1)
-#define bfin_write_CAN1_MB22_ID1(val) bfin_write16(CAN1_MB22_ID1, val)
-#define bfin_read_CAN1_MB23_DATA0() bfin_read16(CAN1_MB23_DATA0)
-#define bfin_write_CAN1_MB23_DATA0(val) bfin_write16(CAN1_MB23_DATA0, val)
-#define bfin_read_CAN1_MB23_DATA1() bfin_read16(CAN1_MB23_DATA1)
-#define bfin_write_CAN1_MB23_DATA1(val) bfin_write16(CAN1_MB23_DATA1, val)
-#define bfin_read_CAN1_MB23_DATA2() bfin_read16(CAN1_MB23_DATA2)
-#define bfin_write_CAN1_MB23_DATA2(val) bfin_write16(CAN1_MB23_DATA2, val)
-#define bfin_read_CAN1_MB23_DATA3() bfin_read16(CAN1_MB23_DATA3)
-#define bfin_write_CAN1_MB23_DATA3(val) bfin_write16(CAN1_MB23_DATA3, val)
-#define bfin_read_CAN1_MB23_LENGTH() bfin_read16(CAN1_MB23_LENGTH)
-#define bfin_write_CAN1_MB23_LENGTH(val) bfin_write16(CAN1_MB23_LENGTH, val)
-#define bfin_read_CAN1_MB23_TIMESTAMP() bfin_read16(CAN1_MB23_TIMESTAMP)
-#define bfin_write_CAN1_MB23_TIMESTAMP(val) bfin_write16(CAN1_MB23_TIMESTAMP, val)
-#define bfin_read_CAN1_MB23_ID0() bfin_read16(CAN1_MB23_ID0)
-#define bfin_write_CAN1_MB23_ID0(val) bfin_write16(CAN1_MB23_ID0, val)
-#define bfin_read_CAN1_MB23_ID1() bfin_read16(CAN1_MB23_ID1)
-#define bfin_write_CAN1_MB23_ID1(val) bfin_write16(CAN1_MB23_ID1, val)
-#define bfin_read_CAN1_MB24_DATA0() bfin_read16(CAN1_MB24_DATA0)
-#define bfin_write_CAN1_MB24_DATA0(val) bfin_write16(CAN1_MB24_DATA0, val)
-#define bfin_read_CAN1_MB24_DATA1() bfin_read16(CAN1_MB24_DATA1)
-#define bfin_write_CAN1_MB24_DATA1(val) bfin_write16(CAN1_MB24_DATA1, val)
-#define bfin_read_CAN1_MB24_DATA2() bfin_read16(CAN1_MB24_DATA2)
-#define bfin_write_CAN1_MB24_DATA2(val) bfin_write16(CAN1_MB24_DATA2, val)
-#define bfin_read_CAN1_MB24_DATA3() bfin_read16(CAN1_MB24_DATA3)
-#define bfin_write_CAN1_MB24_DATA3(val) bfin_write16(CAN1_MB24_DATA3, val)
-#define bfin_read_CAN1_MB24_LENGTH() bfin_read16(CAN1_MB24_LENGTH)
-#define bfin_write_CAN1_MB24_LENGTH(val) bfin_write16(CAN1_MB24_LENGTH, val)
-#define bfin_read_CAN1_MB24_TIMESTAMP() bfin_read16(CAN1_MB24_TIMESTAMP)
-#define bfin_write_CAN1_MB24_TIMESTAMP(val) bfin_write16(CAN1_MB24_TIMESTAMP, val)
-#define bfin_read_CAN1_MB24_ID0() bfin_read16(CAN1_MB24_ID0)
-#define bfin_write_CAN1_MB24_ID0(val) bfin_write16(CAN1_MB24_ID0, val)
-#define bfin_read_CAN1_MB24_ID1() bfin_read16(CAN1_MB24_ID1)
-#define bfin_write_CAN1_MB24_ID1(val) bfin_write16(CAN1_MB24_ID1, val)
-#define bfin_read_CAN1_MB25_DATA0() bfin_read16(CAN1_MB25_DATA0)
-#define bfin_write_CAN1_MB25_DATA0(val) bfin_write16(CAN1_MB25_DATA0, val)
-#define bfin_read_CAN1_MB25_DATA1() bfin_read16(CAN1_MB25_DATA1)
-#define bfin_write_CAN1_MB25_DATA1(val) bfin_write16(CAN1_MB25_DATA1, val)
-#define bfin_read_CAN1_MB25_DATA2() bfin_read16(CAN1_MB25_DATA2)
-#define bfin_write_CAN1_MB25_DATA2(val) bfin_write16(CAN1_MB25_DATA2, val)
-#define bfin_read_CAN1_MB25_DATA3() bfin_read16(CAN1_MB25_DATA3)
-#define bfin_write_CAN1_MB25_DATA3(val) bfin_write16(CAN1_MB25_DATA3, val)
-#define bfin_read_CAN1_MB25_LENGTH() bfin_read16(CAN1_MB25_LENGTH)
-#define bfin_write_CAN1_MB25_LENGTH(val) bfin_write16(CAN1_MB25_LENGTH, val)
-#define bfin_read_CAN1_MB25_TIMESTAMP() bfin_read16(CAN1_MB25_TIMESTAMP)
-#define bfin_write_CAN1_MB25_TIMESTAMP(val) bfin_write16(CAN1_MB25_TIMESTAMP, val)
-#define bfin_read_CAN1_MB25_ID0() bfin_read16(CAN1_MB25_ID0)
-#define bfin_write_CAN1_MB25_ID0(val) bfin_write16(CAN1_MB25_ID0, val)
-#define bfin_read_CAN1_MB25_ID1() bfin_read16(CAN1_MB25_ID1)
-#define bfin_write_CAN1_MB25_ID1(val) bfin_write16(CAN1_MB25_ID1, val)
-#define bfin_read_CAN1_MB26_DATA0() bfin_read16(CAN1_MB26_DATA0)
-#define bfin_write_CAN1_MB26_DATA0(val) bfin_write16(CAN1_MB26_DATA0, val)
-#define bfin_read_CAN1_MB26_DATA1() bfin_read16(CAN1_MB26_DATA1)
-#define bfin_write_CAN1_MB26_DATA1(val) bfin_write16(CAN1_MB26_DATA1, val)
-#define bfin_read_CAN1_MB26_DATA2() bfin_read16(CAN1_MB26_DATA2)
-#define bfin_write_CAN1_MB26_DATA2(val) bfin_write16(CAN1_MB26_DATA2, val)
-#define bfin_read_CAN1_MB26_DATA3() bfin_read16(CAN1_MB26_DATA3)
-#define bfin_write_CAN1_MB26_DATA3(val) bfin_write16(CAN1_MB26_DATA3, val)
-#define bfin_read_CAN1_MB26_LENGTH() bfin_read16(CAN1_MB26_LENGTH)
-#define bfin_write_CAN1_MB26_LENGTH(val) bfin_write16(CAN1_MB26_LENGTH, val)
-#define bfin_read_CAN1_MB26_TIMESTAMP() bfin_read16(CAN1_MB26_TIMESTAMP)
-#define bfin_write_CAN1_MB26_TIMESTAMP(val) bfin_write16(CAN1_MB26_TIMESTAMP, val)
-#define bfin_read_CAN1_MB26_ID0() bfin_read16(CAN1_MB26_ID0)
-#define bfin_write_CAN1_MB26_ID0(val) bfin_write16(CAN1_MB26_ID0, val)
-#define bfin_read_CAN1_MB26_ID1() bfin_read16(CAN1_MB26_ID1)
-#define bfin_write_CAN1_MB26_ID1(val) bfin_write16(CAN1_MB26_ID1, val)
-#define bfin_read_CAN1_MB27_DATA0() bfin_read16(CAN1_MB27_DATA0)
-#define bfin_write_CAN1_MB27_DATA0(val) bfin_write16(CAN1_MB27_DATA0, val)
-#define bfin_read_CAN1_MB27_DATA1() bfin_read16(CAN1_MB27_DATA1)
-#define bfin_write_CAN1_MB27_DATA1(val) bfin_write16(CAN1_MB27_DATA1, val)
-#define bfin_read_CAN1_MB27_DATA2() bfin_read16(CAN1_MB27_DATA2)
-#define bfin_write_CAN1_MB27_DATA2(val) bfin_write16(CAN1_MB27_DATA2, val)
-#define bfin_read_CAN1_MB27_DATA3() bfin_read16(CAN1_MB27_DATA3)
-#define bfin_write_CAN1_MB27_DATA3(val) bfin_write16(CAN1_MB27_DATA3, val)
-#define bfin_read_CAN1_MB27_LENGTH() bfin_read16(CAN1_MB27_LENGTH)
-#define bfin_write_CAN1_MB27_LENGTH(val) bfin_write16(CAN1_MB27_LENGTH, val)
-#define bfin_read_CAN1_MB27_TIMESTAMP() bfin_read16(CAN1_MB27_TIMESTAMP)
-#define bfin_write_CAN1_MB27_TIMESTAMP(val) bfin_write16(CAN1_MB27_TIMESTAMP, val)
-#define bfin_read_CAN1_MB27_ID0() bfin_read16(CAN1_MB27_ID0)
-#define bfin_write_CAN1_MB27_ID0(val) bfin_write16(CAN1_MB27_ID0, val)
-#define bfin_read_CAN1_MB27_ID1() bfin_read16(CAN1_MB27_ID1)
-#define bfin_write_CAN1_MB27_ID1(val) bfin_write16(CAN1_MB27_ID1, val)
-#define bfin_read_CAN1_MB28_DATA0() bfin_read16(CAN1_MB28_DATA0)
-#define bfin_write_CAN1_MB28_DATA0(val) bfin_write16(CAN1_MB28_DATA0, val)
-#define bfin_read_CAN1_MB28_DATA1() bfin_read16(CAN1_MB28_DATA1)
-#define bfin_write_CAN1_MB28_DATA1(val) bfin_write16(CAN1_MB28_DATA1, val)
-#define bfin_read_CAN1_MB28_DATA2() bfin_read16(CAN1_MB28_DATA2)
-#define bfin_write_CAN1_MB28_DATA2(val) bfin_write16(CAN1_MB28_DATA2, val)
-#define bfin_read_CAN1_MB28_DATA3() bfin_read16(CAN1_MB28_DATA3)
-#define bfin_write_CAN1_MB28_DATA3(val) bfin_write16(CAN1_MB28_DATA3, val)
-#define bfin_read_CAN1_MB28_LENGTH() bfin_read16(CAN1_MB28_LENGTH)
-#define bfin_write_CAN1_MB28_LENGTH(val) bfin_write16(CAN1_MB28_LENGTH, val)
-#define bfin_read_CAN1_MB28_TIMESTAMP() bfin_read16(CAN1_MB28_TIMESTAMP)
-#define bfin_write_CAN1_MB28_TIMESTAMP(val) bfin_write16(CAN1_MB28_TIMESTAMP, val)
-#define bfin_read_CAN1_MB28_ID0() bfin_read16(CAN1_MB28_ID0)
-#define bfin_write_CAN1_MB28_ID0(val) bfin_write16(CAN1_MB28_ID0, val)
-#define bfin_read_CAN1_MB28_ID1() bfin_read16(CAN1_MB28_ID1)
-#define bfin_write_CAN1_MB28_ID1(val) bfin_write16(CAN1_MB28_ID1, val)
-#define bfin_read_CAN1_MB29_DATA0() bfin_read16(CAN1_MB29_DATA0)
-#define bfin_write_CAN1_MB29_DATA0(val) bfin_write16(CAN1_MB29_DATA0, val)
-#define bfin_read_CAN1_MB29_DATA1() bfin_read16(CAN1_MB29_DATA1)
-#define bfin_write_CAN1_MB29_DATA1(val) bfin_write16(CAN1_MB29_DATA1, val)
-#define bfin_read_CAN1_MB29_DATA2() bfin_read16(CAN1_MB29_DATA2)
-#define bfin_write_CAN1_MB29_DATA2(val) bfin_write16(CAN1_MB29_DATA2, val)
-#define bfin_read_CAN1_MB29_DATA3() bfin_read16(CAN1_MB29_DATA3)
-#define bfin_write_CAN1_MB29_DATA3(val) bfin_write16(CAN1_MB29_DATA3, val)
-#define bfin_read_CAN1_MB29_LENGTH() bfin_read16(CAN1_MB29_LENGTH)
-#define bfin_write_CAN1_MB29_LENGTH(val) bfin_write16(CAN1_MB29_LENGTH, val)
-#define bfin_read_CAN1_MB29_TIMESTAMP() bfin_read16(CAN1_MB29_TIMESTAMP)
-#define bfin_write_CAN1_MB29_TIMESTAMP(val) bfin_write16(CAN1_MB29_TIMESTAMP, val)
-#define bfin_read_CAN1_MB29_ID0() bfin_read16(CAN1_MB29_ID0)
-#define bfin_write_CAN1_MB29_ID0(val) bfin_write16(CAN1_MB29_ID0, val)
-#define bfin_read_CAN1_MB29_ID1() bfin_read16(CAN1_MB29_ID1)
-#define bfin_write_CAN1_MB29_ID1(val) bfin_write16(CAN1_MB29_ID1, val)
-#define bfin_read_CAN1_MB30_DATA0() bfin_read16(CAN1_MB30_DATA0)
-#define bfin_write_CAN1_MB30_DATA0(val) bfin_write16(CAN1_MB30_DATA0, val)
-#define bfin_read_CAN1_MB30_DATA1() bfin_read16(CAN1_MB30_DATA1)
-#define bfin_write_CAN1_MB30_DATA1(val) bfin_write16(CAN1_MB30_DATA1, val)
-#define bfin_read_CAN1_MB30_DATA2() bfin_read16(CAN1_MB30_DATA2)
-#define bfin_write_CAN1_MB30_DATA2(val) bfin_write16(CAN1_MB30_DATA2, val)
-#define bfin_read_CAN1_MB30_DATA3() bfin_read16(CAN1_MB30_DATA3)
-#define bfin_write_CAN1_MB30_DATA3(val) bfin_write16(CAN1_MB30_DATA3, val)
-#define bfin_read_CAN1_MB30_LENGTH() bfin_read16(CAN1_MB30_LENGTH)
-#define bfin_write_CAN1_MB30_LENGTH(val) bfin_write16(CAN1_MB30_LENGTH, val)
-#define bfin_read_CAN1_MB30_TIMESTAMP() bfin_read16(CAN1_MB30_TIMESTAMP)
-#define bfin_write_CAN1_MB30_TIMESTAMP(val) bfin_write16(CAN1_MB30_TIMESTAMP, val)
-#define bfin_read_CAN1_MB30_ID0() bfin_read16(CAN1_MB30_ID0)
-#define bfin_write_CAN1_MB30_ID0(val) bfin_write16(CAN1_MB30_ID0, val)
-#define bfin_read_CAN1_MB30_ID1() bfin_read16(CAN1_MB30_ID1)
-#define bfin_write_CAN1_MB30_ID1(val) bfin_write16(CAN1_MB30_ID1, val)
-#define bfin_read_CAN1_MB31_DATA0() bfin_read16(CAN1_MB31_DATA0)
-#define bfin_write_CAN1_MB31_DATA0(val) bfin_write16(CAN1_MB31_DATA0, val)
-#define bfin_read_CAN1_MB31_DATA1() bfin_read16(CAN1_MB31_DATA1)
-#define bfin_write_CAN1_MB31_DATA1(val) bfin_write16(CAN1_MB31_DATA1, val)
-#define bfin_read_CAN1_MB31_DATA2() bfin_read16(CAN1_MB31_DATA2)
-#define bfin_write_CAN1_MB31_DATA2(val) bfin_write16(CAN1_MB31_DATA2, val)
-#define bfin_read_CAN1_MB31_DATA3() bfin_read16(CAN1_MB31_DATA3)
-#define bfin_write_CAN1_MB31_DATA3(val) bfin_write16(CAN1_MB31_DATA3, val)
-#define bfin_read_CAN1_MB31_LENGTH() bfin_read16(CAN1_MB31_LENGTH)
-#define bfin_write_CAN1_MB31_LENGTH(val) bfin_write16(CAN1_MB31_LENGTH, val)
-#define bfin_read_CAN1_MB31_TIMESTAMP() bfin_read16(CAN1_MB31_TIMESTAMP)
-#define bfin_write_CAN1_MB31_TIMESTAMP(val) bfin_write16(CAN1_MB31_TIMESTAMP, val)
-#define bfin_read_CAN1_MB31_ID0() bfin_read16(CAN1_MB31_ID0)
-#define bfin_write_CAN1_MB31_ID0(val) bfin_write16(CAN1_MB31_ID0, val)
-#define bfin_read_CAN1_MB31_ID1() bfin_read16(CAN1_MB31_ID1)
-#define bfin_write_CAN1_MB31_ID1(val) bfin_write16(CAN1_MB31_ID1, val)
-
-/* ATAPI Registers */
-
-#define bfin_read_ATAPI_CONTROL() bfin_read16(ATAPI_CONTROL)
-#define bfin_write_ATAPI_CONTROL(val) bfin_write16(ATAPI_CONTROL, val)
-#define bfin_read_ATAPI_STATUS() bfin_read16(ATAPI_STATUS)
-#define bfin_write_ATAPI_STATUS(val) bfin_write16(ATAPI_STATUS, val)
-#define bfin_read_ATAPI_DEV_ADDR() bfin_read16(ATAPI_DEV_ADDR)
-#define bfin_write_ATAPI_DEV_ADDR(val) bfin_write16(ATAPI_DEV_ADDR, val)
-#define bfin_read_ATAPI_DEV_TXBUF() bfin_read16(ATAPI_DEV_TXBUF)
-#define bfin_write_ATAPI_DEV_TXBUF(val) bfin_write16(ATAPI_DEV_TXBUF, val)
-#define bfin_read_ATAPI_DEV_RXBUF() bfin_read16(ATAPI_DEV_RXBUF)
-#define bfin_write_ATAPI_DEV_RXBUF(val) bfin_write16(ATAPI_DEV_RXBUF, val)
-#define bfin_read_ATAPI_INT_MASK() bfin_read16(ATAPI_INT_MASK)
-#define bfin_write_ATAPI_INT_MASK(val) bfin_write16(ATAPI_INT_MASK, val)
-#define bfin_read_ATAPI_INT_STATUS() bfin_read16(ATAPI_INT_STATUS)
-#define bfin_write_ATAPI_INT_STATUS(val) bfin_write16(ATAPI_INT_STATUS, val)
-#define bfin_read_ATAPI_XFER_LEN() bfin_read16(ATAPI_XFER_LEN)
-#define bfin_write_ATAPI_XFER_LEN(val) bfin_write16(ATAPI_XFER_LEN, val)
-#define bfin_read_ATAPI_LINE_STATUS() bfin_read16(ATAPI_LINE_STATUS)
-#define bfin_write_ATAPI_LINE_STATUS(val) bfin_write16(ATAPI_LINE_STATUS, val)
-#define bfin_read_ATAPI_SM_STATE() bfin_read16(ATAPI_SM_STATE)
-#define bfin_write_ATAPI_SM_STATE(val) bfin_write16(ATAPI_SM_STATE, val)
-#define bfin_read_ATAPI_TERMINATE() bfin_read16(ATAPI_TERMINATE)
-#define bfin_write_ATAPI_TERMINATE(val) bfin_write16(ATAPI_TERMINATE, val)
-#define bfin_read_ATAPI_PIO_TFRCNT() bfin_read16(ATAPI_PIO_TFRCNT)
-#define bfin_write_ATAPI_PIO_TFRCNT(val) bfin_write16(ATAPI_PIO_TFRCNT, val)
-#define bfin_read_ATAPI_DMA_TFRCNT() bfin_read16(ATAPI_DMA_TFRCNT)
-#define bfin_write_ATAPI_DMA_TFRCNT(val) bfin_write16(ATAPI_DMA_TFRCNT, val)
-#define bfin_read_ATAPI_UMAIN_TFRCNT() bfin_read16(ATAPI_UMAIN_TFRCNT)
-#define bfin_write_ATAPI_UMAIN_TFRCNT(val) bfin_write16(ATAPI_UMAIN_TFRCNT, val)
-#define bfin_read_ATAPI_UDMAOUT_TFRCNT() bfin_read16(ATAPI_UDMAOUT_TFRCNT)
-#define bfin_write_ATAPI_UDMAOUT_TFRCNT(val) bfin_write16(ATAPI_UDMAOUT_TFRCNT, val)
-#define bfin_read_ATAPI_REG_TIM_0() bfin_read16(ATAPI_REG_TIM_0)
-#define bfin_write_ATAPI_REG_TIM_0(val) bfin_write16(ATAPI_REG_TIM_0, val)
-#define bfin_read_ATAPI_PIO_TIM_0() bfin_read16(ATAPI_PIO_TIM_0)
-#define bfin_write_ATAPI_PIO_TIM_0(val) bfin_write16(ATAPI_PIO_TIM_0, val)
-#define bfin_read_ATAPI_PIO_TIM_1() bfin_read16(ATAPI_PIO_TIM_1)
-#define bfin_write_ATAPI_PIO_TIM_1(val) bfin_write16(ATAPI_PIO_TIM_1, val)
-#define bfin_read_ATAPI_MULTI_TIM_0() bfin_read16(ATAPI_MULTI_TIM_0)
-#define bfin_write_ATAPI_MULTI_TIM_0(val) bfin_write16(ATAPI_MULTI_TIM_0, val)
-#define bfin_read_ATAPI_MULTI_TIM_1() bfin_read16(ATAPI_MULTI_TIM_1)
-#define bfin_write_ATAPI_MULTI_TIM_1(val) bfin_write16(ATAPI_MULTI_TIM_1, val)
-#define bfin_read_ATAPI_MULTI_TIM_2() bfin_read16(ATAPI_MULTI_TIM_2)
-#define bfin_write_ATAPI_MULTI_TIM_2(val) bfin_write16(ATAPI_MULTI_TIM_2, val)
-#define bfin_read_ATAPI_ULTRA_TIM_0() bfin_read16(ATAPI_ULTRA_TIM_0)
-#define bfin_write_ATAPI_ULTRA_TIM_0(val) bfin_write16(ATAPI_ULTRA_TIM_0, val)
-#define bfin_read_ATAPI_ULTRA_TIM_1() bfin_read16(ATAPI_ULTRA_TIM_1)
-#define bfin_write_ATAPI_ULTRA_TIM_1(val) bfin_write16(ATAPI_ULTRA_TIM_1, val)
-#define bfin_read_ATAPI_ULTRA_TIM_2() bfin_read16(ATAPI_ULTRA_TIM_2)
-#define bfin_write_ATAPI_ULTRA_TIM_2(val) bfin_write16(ATAPI_ULTRA_TIM_2, val)
-#define bfin_read_ATAPI_ULTRA_TIM_3() bfin_read16(ATAPI_ULTRA_TIM_3)
-#define bfin_write_ATAPI_ULTRA_TIM_3(val) bfin_write16(ATAPI_ULTRA_TIM_3, val)
-
-/* SDH Registers */
-
-#define bfin_read_SDH_PWR_CTL() bfin_read16(SDH_PWR_CTL)
-#define bfin_write_SDH_PWR_CTL(val) bfin_write16(SDH_PWR_CTL, val)
-#define bfin_read_SDH_CLK_CTL() bfin_read16(SDH_CLK_CTL)
-#define bfin_write_SDH_CLK_CTL(val) bfin_write16(SDH_CLK_CTL, val)
-#define bfin_read_SDH_ARGUMENT() bfin_read32(SDH_ARGUMENT)
-#define bfin_write_SDH_ARGUMENT(val) bfin_write32(SDH_ARGUMENT, val)
-#define bfin_read_SDH_COMMAND() bfin_read16(SDH_COMMAND)
-#define bfin_write_SDH_COMMAND(val) bfin_write16(SDH_COMMAND, val)
-#define bfin_read_SDH_RESP_CMD() bfin_read16(SDH_RESP_CMD)
-#define bfin_write_SDH_RESP_CMD(val) bfin_write16(SDH_RESP_CMD, val)
-#define bfin_read_SDH_RESPONSE0() bfin_read32(SDH_RESPONSE0)
-#define bfin_write_SDH_RESPONSE0(val) bfin_write32(SDH_RESPONSE0, val)
-#define bfin_read_SDH_RESPONSE1() bfin_read32(SDH_RESPONSE1)
-#define bfin_write_SDH_RESPONSE1(val) bfin_write32(SDH_RESPONSE1, val)
-#define bfin_read_SDH_RESPONSE2() bfin_read32(SDH_RESPONSE2)
-#define bfin_write_SDH_RESPONSE2(val) bfin_write32(SDH_RESPONSE2, val)
-#define bfin_read_SDH_RESPONSE3() bfin_read32(SDH_RESPONSE3)
-#define bfin_write_SDH_RESPONSE3(val) bfin_write32(SDH_RESPONSE3, val)
-#define bfin_read_SDH_DATA_TIMER() bfin_read32(SDH_DATA_TIMER)
-#define bfin_write_SDH_DATA_TIMER(val) bfin_write32(SDH_DATA_TIMER, val)
-#define bfin_read_SDH_DATA_LGTH() bfin_read16(SDH_DATA_LGTH)
-#define bfin_write_SDH_DATA_LGTH(val) bfin_write16(SDH_DATA_LGTH, val)
-#define bfin_read_SDH_DATA_CTL() bfin_read16(SDH_DATA_CTL)
-#define bfin_write_SDH_DATA_CTL(val) bfin_write16(SDH_DATA_CTL, val)
-#define bfin_read_SDH_DATA_CNT() bfin_read16(SDH_DATA_CNT)
-#define bfin_write_SDH_DATA_CNT(val) bfin_write16(SDH_DATA_CNT, val)
-#define bfin_read_SDH_STATUS() bfin_read32(SDH_STATUS)
-#define bfin_write_SDH_STATUS(val) bfin_write32(SDH_STATUS, val)
-#define bfin_read_SDH_STATUS_CLR() bfin_read16(SDH_STATUS_CLR)
-#define bfin_write_SDH_STATUS_CLR(val) bfin_write16(SDH_STATUS_CLR, val)
-#define bfin_read_SDH_MASK0() bfin_read32(SDH_MASK0)
-#define bfin_write_SDH_MASK0(val) bfin_write32(SDH_MASK0, val)
-#define bfin_read_SDH_MASK1() bfin_read32(SDH_MASK1)
-#define bfin_write_SDH_MASK1(val) bfin_write32(SDH_MASK1, val)
-#define bfin_read_SDH_FIFO_CNT() bfin_read16(SDH_FIFO_CNT)
-#define bfin_write_SDH_FIFO_CNT(val) bfin_write16(SDH_FIFO_CNT, val)
-#define bfin_read_SDH_FIFO() bfin_read32(SDH_FIFO)
-#define bfin_write_SDH_FIFO(val) bfin_write32(SDH_FIFO, val)
-#define bfin_read_SDH_E_STATUS() bfin_read16(SDH_E_STATUS)
-#define bfin_write_SDH_E_STATUS(val) bfin_write16(SDH_E_STATUS, val)
-#define bfin_read_SDH_E_MASK() bfin_read16(SDH_E_MASK)
-#define bfin_write_SDH_E_MASK(val) bfin_write16(SDH_E_MASK, val)
-#define bfin_read_SDH_CFG() bfin_read16(SDH_CFG)
-#define bfin_write_SDH_CFG(val) bfin_write16(SDH_CFG, val)
-#define bfin_read_SDH_RD_WAIT_EN() bfin_read16(SDH_RD_WAIT_EN)
-#define bfin_write_SDH_RD_WAIT_EN(val) bfin_write16(SDH_RD_WAIT_EN, val)
-#define bfin_read_SDH_PID0() bfin_read16(SDH_PID0)
-#define bfin_write_SDH_PID0(val) bfin_write16(SDH_PID0, val)
-#define bfin_read_SDH_PID1() bfin_read16(SDH_PID1)
-#define bfin_write_SDH_PID1(val) bfin_write16(SDH_PID1, val)
-#define bfin_read_SDH_PID2() bfin_read16(SDH_PID2)
-#define bfin_write_SDH_PID2(val) bfin_write16(SDH_PID2, val)
-#define bfin_read_SDH_PID3() bfin_read16(SDH_PID3)
-#define bfin_write_SDH_PID3(val) bfin_write16(SDH_PID3, val)
-#define bfin_read_SDH_PID4() bfin_read16(SDH_PID4)
-#define bfin_write_SDH_PID4(val) bfin_write16(SDH_PID4, val)
-#define bfin_read_SDH_PID5() bfin_read16(SDH_PID5)
-#define bfin_write_SDH_PID5(val) bfin_write16(SDH_PID5, val)
-#define bfin_read_SDH_PID6() bfin_read16(SDH_PID6)
-#define bfin_write_SDH_PID6(val) bfin_write16(SDH_PID6, val)
-#define bfin_read_SDH_PID7() bfin_read16(SDH_PID7)
-#define bfin_write_SDH_PID7(val) bfin_write16(SDH_PID7, val)
-
-/* HOST Port Registers */
-
-#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
-#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
-#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
-#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
-#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
-#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
-
-/* USB Control Registers */
-
-#define bfin_read_USB_FADDR() bfin_read16(USB_FADDR)
-#define bfin_write_USB_FADDR(val) bfin_write16(USB_FADDR, val)
-#define bfin_read_USB_POWER() bfin_read16(USB_POWER)
-#define bfin_write_USB_POWER(val) bfin_write16(USB_POWER, val)
-#define bfin_read_USB_INTRTX() bfin_read16(USB_INTRTX)
-#define bfin_write_USB_INTRTX(val) bfin_write16(USB_INTRTX, val)
-#define bfin_read_USB_INTRRX() bfin_read16(USB_INTRRX)
-#define bfin_write_USB_INTRRX(val) bfin_write16(USB_INTRRX, val)
-#define bfin_read_USB_INTRTXE() bfin_read16(USB_INTRTXE)
-#define bfin_write_USB_INTRTXE(val) bfin_write16(USB_INTRTXE, val)
-#define bfin_read_USB_INTRRXE() bfin_read16(USB_INTRRXE)
-#define bfin_write_USB_INTRRXE(val) bfin_write16(USB_INTRRXE, val)
-#define bfin_read_USB_INTRUSB() bfin_read16(USB_INTRUSB)
-#define bfin_write_USB_INTRUSB(val) bfin_write16(USB_INTRUSB, val)
-#define bfin_read_USB_INTRUSBE() bfin_read16(USB_INTRUSBE)
-#define bfin_write_USB_INTRUSBE(val) bfin_write16(USB_INTRUSBE, val)
-#define bfin_read_USB_FRAME() bfin_read16(USB_FRAME)
-#define bfin_write_USB_FRAME(val) bfin_write16(USB_FRAME, val)
-#define bfin_read_USB_INDEX() bfin_read16(USB_INDEX)
-#define bfin_write_USB_INDEX(val) bfin_write16(USB_INDEX, val)
-#define bfin_read_USB_TESTMODE() bfin_read16(USB_TESTMODE)
-#define bfin_write_USB_TESTMODE(val) bfin_write16(USB_TESTMODE, val)
-#define bfin_read_USB_GLOBINTR() bfin_read16(USB_GLOBINTR)
-#define bfin_write_USB_GLOBINTR(val) bfin_write16(USB_GLOBINTR, val)
-#define bfin_read_USB_GLOBAL_CTL() bfin_read16(USB_GLOBAL_CTL)
-#define bfin_write_USB_GLOBAL_CTL(val) bfin_write16(USB_GLOBAL_CTL, val)
-
-/* USB Packet Control Registers */
-
-#define bfin_read_USB_TX_MAX_PACKET() bfin_read16(USB_TX_MAX_PACKET)
-#define bfin_write_USB_TX_MAX_PACKET(val) bfin_write16(USB_TX_MAX_PACKET, val)
-#define bfin_read_USB_CSR0() bfin_read16(USB_CSR0)
-#define bfin_write_USB_CSR0(val) bfin_write16(USB_CSR0, val)
-#define bfin_read_USB_TXCSR() bfin_read16(USB_TXCSR)
-#define bfin_write_USB_TXCSR(val) bfin_write16(USB_TXCSR, val)
-#define bfin_read_USB_RX_MAX_PACKET() bfin_read16(USB_RX_MAX_PACKET)
-#define bfin_write_USB_RX_MAX_PACKET(val) bfin_write16(USB_RX_MAX_PACKET, val)
-#define bfin_read_USB_RXCSR() bfin_read16(USB_RXCSR)
-#define bfin_write_USB_RXCSR(val) bfin_write16(USB_RXCSR, val)
-#define bfin_read_USB_COUNT0() bfin_read16(USB_COUNT0)
-#define bfin_write_USB_COUNT0(val) bfin_write16(USB_COUNT0, val)
-#define bfin_read_USB_RXCOUNT() bfin_read16(USB_RXCOUNT)
-#define bfin_write_USB_RXCOUNT(val) bfin_write16(USB_RXCOUNT, val)
-#define bfin_read_USB_TXTYPE() bfin_read16(USB_TXTYPE)
-#define bfin_write_USB_TXTYPE(val) bfin_write16(USB_TXTYPE, val)
-#define bfin_read_USB_NAKLIMIT0() bfin_read16(USB_NAKLIMIT0)
-#define bfin_write_USB_NAKLIMIT0(val) bfin_write16(USB_NAKLIMIT0, val)
-#define bfin_read_USB_TXINTERVAL() bfin_read16(USB_TXINTERVAL)
-#define bfin_write_USB_TXINTERVAL(val) bfin_write16(USB_TXINTERVAL, val)
-#define bfin_read_USB_RXTYPE() bfin_read16(USB_RXTYPE)
-#define bfin_write_USB_RXTYPE(val) bfin_write16(USB_RXTYPE, val)
-#define bfin_read_USB_RXINTERVAL() bfin_read16(USB_RXINTERVAL)
-#define bfin_write_USB_RXINTERVAL(val) bfin_write16(USB_RXINTERVAL, val)
-#define bfin_read_USB_TXCOUNT() bfin_read16(USB_TXCOUNT)
-#define bfin_write_USB_TXCOUNT(val) bfin_write16(USB_TXCOUNT, val)
-
-/* USB Endbfin_read_()oint FIFO Registers */
-
-#define bfin_read_USB_EP0_FIFO() bfin_read16(USB_EP0_FIFO)
-#define bfin_write_USB_EP0_FIFO(val) bfin_write16(USB_EP0_FIFO, val)
-#define bfin_read_USB_EP1_FIFO() bfin_read16(USB_EP1_FIFO)
-#define bfin_write_USB_EP1_FIFO(val) bfin_write16(USB_EP1_FIFO, val)
-#define bfin_read_USB_EP2_FIFO() bfin_read16(USB_EP2_FIFO)
-#define bfin_write_USB_EP2_FIFO(val) bfin_write16(USB_EP2_FIFO, val)
-#define bfin_read_USB_EP3_FIFO() bfin_read16(USB_EP3_FIFO)
-#define bfin_write_USB_EP3_FIFO(val) bfin_write16(USB_EP3_FIFO, val)
-#define bfin_read_USB_EP4_FIFO() bfin_read16(USB_EP4_FIFO)
-#define bfin_write_USB_EP4_FIFO(val) bfin_write16(USB_EP4_FIFO, val)
-#define bfin_read_USB_EP5_FIFO() bfin_read16(USB_EP5_FIFO)
-#define bfin_write_USB_EP5_FIFO(val) bfin_write16(USB_EP5_FIFO, val)
-#define bfin_read_USB_EP6_FIFO() bfin_read16(USB_EP6_FIFO)
-#define bfin_write_USB_EP6_FIFO(val) bfin_write16(USB_EP6_FIFO, val)
-#define bfin_read_USB_EP7_FIFO() bfin_read16(USB_EP7_FIFO)
-#define bfin_write_USB_EP7_FIFO(val) bfin_write16(USB_EP7_FIFO, val)
-
-/* USB OTG Control Registers */
-
-#define bfin_read_USB_OTG_DEV_CTL() bfin_read16(USB_OTG_DEV_CTL)
-#define bfin_write_USB_OTG_DEV_CTL(val) bfin_write16(USB_OTG_DEV_CTL, val)
-#define bfin_read_USB_OTG_VBUS_IRQ() bfin_read16(USB_OTG_VBUS_IRQ)
-#define bfin_write_USB_OTG_VBUS_IRQ(val) bfin_write16(USB_OTG_VBUS_IRQ, val)
-#define bfin_read_USB_OTG_VBUS_MASK() bfin_read16(USB_OTG_VBUS_MASK)
-#define bfin_write_USB_OTG_VBUS_MASK(val) bfin_write16(USB_OTG_VBUS_MASK, val)
-
-/* USB Phy Control Registers */
-
-#define bfin_read_USB_LINKINFO() bfin_read16(USB_LINKINFO)
-#define bfin_write_USB_LINKINFO(val) bfin_write16(USB_LINKINFO, val)
-#define bfin_read_USB_VPLEN() bfin_read16(USB_VPLEN)
-#define bfin_write_USB_VPLEN(val) bfin_write16(USB_VPLEN, val)
-#define bfin_read_USB_HS_EOF1() bfin_read16(USB_HS_EOF1)
-#define bfin_write_USB_HS_EOF1(val) bfin_write16(USB_HS_EOF1, val)
-#define bfin_read_USB_FS_EOF1() bfin_read16(USB_FS_EOF1)
-#define bfin_write_USB_FS_EOF1(val) bfin_write16(USB_FS_EOF1, val)
-#define bfin_read_USB_LS_EOF1() bfin_read16(USB_LS_EOF1)
-#define bfin_write_USB_LS_EOF1(val) bfin_write16(USB_LS_EOF1, val)
-
-/* (APHY_CNTRL is for ADI usage only) */
-
-#define bfin_read_USB_APHY_CNTRL() bfin_read16(USB_APHY_CNTRL)
-#define bfin_write_USB_APHY_CNTRL(val) bfin_write16(USB_APHY_CNTRL, val)
-
-/* (APHY_CALIB is for ADI usage only) */
-
-#define bfin_read_USB_APHY_CALIB() bfin_read16(USB_APHY_CALIB)
-#define bfin_write_USB_APHY_CALIB(val) bfin_write16(USB_APHY_CALIB, val)
-#define bfin_read_USB_APHY_CNTRL2() bfin_read16(USB_APHY_CNTRL2)
-#define bfin_write_USB_APHY_CNTRL2(val) bfin_write16(USB_APHY_CNTRL2, val)
-
-/* (PHY_TEST is for ADI usage only) */
-
-#define bfin_read_USB_PHY_TEST() bfin_read16(USB_PHY_TEST)
-#define bfin_write_USB_PHY_TEST(val) bfin_write16(USB_PHY_TEST, val)
-#define bfin_read_USB_PLLOSC_CTRL() bfin_read16(USB_PLLOSC_CTRL)
-#define bfin_write_USB_PLLOSC_CTRL(val) bfin_write16(USB_PLLOSC_CTRL, val)
-#define bfin_read_USB_SRP_CLKDIV() bfin_read16(USB_SRP_CLKDIV)
-#define bfin_write_USB_SRP_CLKDIV(val) bfin_write16(USB_SRP_CLKDIV, val)
-
-/* USB Endbfin_read_()oint 0 Control Registers */
-
-#define bfin_read_USB_EP_NI0_TXMAXP() bfin_read16(USB_EP_NI0_TXMAXP)
-#define bfin_write_USB_EP_NI0_TXMAXP(val) bfin_write16(USB_EP_NI0_TXMAXP, val)
-#define bfin_read_USB_EP_NI0_TXCSR() bfin_read16(USB_EP_NI0_TXCSR)
-#define bfin_write_USB_EP_NI0_TXCSR(val) bfin_write16(USB_EP_NI0_TXCSR, val)
-#define bfin_read_USB_EP_NI0_RXMAXP() bfin_read16(USB_EP_NI0_RXMAXP)
-#define bfin_write_USB_EP_NI0_RXMAXP(val) bfin_write16(USB_EP_NI0_RXMAXP, val)
-#define bfin_read_USB_EP_NI0_RXCSR() bfin_read16(USB_EP_NI0_RXCSR)
-#define bfin_write_USB_EP_NI0_RXCSR(val) bfin_write16(USB_EP_NI0_RXCSR, val)
-#define bfin_read_USB_EP_NI0_RXCOUNT() bfin_read16(USB_EP_NI0_RXCOUNT)
-#define bfin_write_USB_EP_NI0_RXCOUNT(val) bfin_write16(USB_EP_NI0_RXCOUNT, val)
-#define bfin_read_USB_EP_NI0_TXTYPE() bfin_read16(USB_EP_NI0_TXTYPE)
-#define bfin_write_USB_EP_NI0_TXTYPE(val) bfin_write16(USB_EP_NI0_TXTYPE, val)
-#define bfin_read_USB_EP_NI0_TXINTERVAL() bfin_read16(USB_EP_NI0_TXINTERVAL)
-#define bfin_write_USB_EP_NI0_TXINTERVAL(val) bfin_write16(USB_EP_NI0_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI0_RXTYPE() bfin_read16(USB_EP_NI0_RXTYPE)
-#define bfin_write_USB_EP_NI0_RXTYPE(val) bfin_write16(USB_EP_NI0_RXTYPE, val)
-#define bfin_read_USB_EP_NI0_RXINTERVAL() bfin_read16(USB_EP_NI0_RXINTERVAL)
-#define bfin_write_USB_EP_NI0_RXINTERVAL(val) bfin_write16(USB_EP_NI0_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 1 Control Registers */
-
-#define bfin_read_USB_EP_NI0_TXCOUNT() bfin_read16(USB_EP_NI0_TXCOUNT)
-#define bfin_write_USB_EP_NI0_TXCOUNT(val) bfin_write16(USB_EP_NI0_TXCOUNT, val)
-#define bfin_read_USB_EP_NI1_TXMAXP() bfin_read16(USB_EP_NI1_TXMAXP)
-#define bfin_write_USB_EP_NI1_TXMAXP(val) bfin_write16(USB_EP_NI1_TXMAXP, val)
-#define bfin_read_USB_EP_NI1_TXCSR() bfin_read16(USB_EP_NI1_TXCSR)
-#define bfin_write_USB_EP_NI1_TXCSR(val) bfin_write16(USB_EP_NI1_TXCSR, val)
-#define bfin_read_USB_EP_NI1_RXMAXP() bfin_read16(USB_EP_NI1_RXMAXP)
-#define bfin_write_USB_EP_NI1_RXMAXP(val) bfin_write16(USB_EP_NI1_RXMAXP, val)
-#define bfin_read_USB_EP_NI1_RXCSR() bfin_read16(USB_EP_NI1_RXCSR)
-#define bfin_write_USB_EP_NI1_RXCSR(val) bfin_write16(USB_EP_NI1_RXCSR, val)
-#define bfin_read_USB_EP_NI1_RXCOUNT() bfin_read16(USB_EP_NI1_RXCOUNT)
-#define bfin_write_USB_EP_NI1_RXCOUNT(val) bfin_write16(USB_EP_NI1_RXCOUNT, val)
-#define bfin_read_USB_EP_NI1_TXTYPE() bfin_read16(USB_EP_NI1_TXTYPE)
-#define bfin_write_USB_EP_NI1_TXTYPE(val) bfin_write16(USB_EP_NI1_TXTYPE, val)
-#define bfin_read_USB_EP_NI1_TXINTERVAL() bfin_read16(USB_EP_NI1_TXINTERVAL)
-#define bfin_write_USB_EP_NI1_TXINTERVAL(val) bfin_write16(USB_EP_NI1_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI1_RXTYPE() bfin_read16(USB_EP_NI1_RXTYPE)
-#define bfin_write_USB_EP_NI1_RXTYPE(val) bfin_write16(USB_EP_NI1_RXTYPE, val)
-#define bfin_read_USB_EP_NI1_RXINTERVAL() bfin_read16(USB_EP_NI1_RXINTERVAL)
-#define bfin_write_USB_EP_NI1_RXINTERVAL(val) bfin_write16(USB_EP_NI1_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 2 Control Registers */
-
-#define bfin_read_USB_EP_NI1_TXCOUNT() bfin_read16(USB_EP_NI1_TXCOUNT)
-#define bfin_write_USB_EP_NI1_TXCOUNT(val) bfin_write16(USB_EP_NI1_TXCOUNT, val)
-#define bfin_read_USB_EP_NI2_TXMAXP() bfin_read16(USB_EP_NI2_TXMAXP)
-#define bfin_write_USB_EP_NI2_TXMAXP(val) bfin_write16(USB_EP_NI2_TXMAXP, val)
-#define bfin_read_USB_EP_NI2_TXCSR() bfin_read16(USB_EP_NI2_TXCSR)
-#define bfin_write_USB_EP_NI2_TXCSR(val) bfin_write16(USB_EP_NI2_TXCSR, val)
-#define bfin_read_USB_EP_NI2_RXMAXP() bfin_read16(USB_EP_NI2_RXMAXP)
-#define bfin_write_USB_EP_NI2_RXMAXP(val) bfin_write16(USB_EP_NI2_RXMAXP, val)
-#define bfin_read_USB_EP_NI2_RXCSR() bfin_read16(USB_EP_NI2_RXCSR)
-#define bfin_write_USB_EP_NI2_RXCSR(val) bfin_write16(USB_EP_NI2_RXCSR, val)
-#define bfin_read_USB_EP_NI2_RXCOUNT() bfin_read16(USB_EP_NI2_RXCOUNT)
-#define bfin_write_USB_EP_NI2_RXCOUNT(val) bfin_write16(USB_EP_NI2_RXCOUNT, val)
-#define bfin_read_USB_EP_NI2_TXTYPE() bfin_read16(USB_EP_NI2_TXTYPE)
-#define bfin_write_USB_EP_NI2_TXTYPE(val) bfin_write16(USB_EP_NI2_TXTYPE, val)
-#define bfin_read_USB_EP_NI2_TXINTERVAL() bfin_read16(USB_EP_NI2_TXINTERVAL)
-#define bfin_write_USB_EP_NI2_TXINTERVAL(val) bfin_write16(USB_EP_NI2_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI2_RXTYPE() bfin_read16(USB_EP_NI2_RXTYPE)
-#define bfin_write_USB_EP_NI2_RXTYPE(val) bfin_write16(USB_EP_NI2_RXTYPE, val)
-#define bfin_read_USB_EP_NI2_RXINTERVAL() bfin_read16(USB_EP_NI2_RXINTERVAL)
-#define bfin_write_USB_EP_NI2_RXINTERVAL(val) bfin_write16(USB_EP_NI2_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 3 Control Registers */
-
-#define bfin_read_USB_EP_NI2_TXCOUNT() bfin_read16(USB_EP_NI2_TXCOUNT)
-#define bfin_write_USB_EP_NI2_TXCOUNT(val) bfin_write16(USB_EP_NI2_TXCOUNT, val)
-#define bfin_read_USB_EP_NI3_TXMAXP() bfin_read16(USB_EP_NI3_TXMAXP)
-#define bfin_write_USB_EP_NI3_TXMAXP(val) bfin_write16(USB_EP_NI3_TXMAXP, val)
-#define bfin_read_USB_EP_NI3_TXCSR() bfin_read16(USB_EP_NI3_TXCSR)
-#define bfin_write_USB_EP_NI3_TXCSR(val) bfin_write16(USB_EP_NI3_TXCSR, val)
-#define bfin_read_USB_EP_NI3_RXMAXP() bfin_read16(USB_EP_NI3_RXMAXP)
-#define bfin_write_USB_EP_NI3_RXMAXP(val) bfin_write16(USB_EP_NI3_RXMAXP, val)
-#define bfin_read_USB_EP_NI3_RXCSR() bfin_read16(USB_EP_NI3_RXCSR)
-#define bfin_write_USB_EP_NI3_RXCSR(val) bfin_write16(USB_EP_NI3_RXCSR, val)
-#define bfin_read_USB_EP_NI3_RXCOUNT() bfin_read16(USB_EP_NI3_RXCOUNT)
-#define bfin_write_USB_EP_NI3_RXCOUNT(val) bfin_write16(USB_EP_NI3_RXCOUNT, val)
-#define bfin_read_USB_EP_NI3_TXTYPE() bfin_read16(USB_EP_NI3_TXTYPE)
-#define bfin_write_USB_EP_NI3_TXTYPE(val) bfin_write16(USB_EP_NI3_TXTYPE, val)
-#define bfin_read_USB_EP_NI3_TXINTERVAL() bfin_read16(USB_EP_NI3_TXINTERVAL)
-#define bfin_write_USB_EP_NI3_TXINTERVAL(val) bfin_write16(USB_EP_NI3_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI3_RXTYPE() bfin_read16(USB_EP_NI3_RXTYPE)
-#define bfin_write_USB_EP_NI3_RXTYPE(val) bfin_write16(USB_EP_NI3_RXTYPE, val)
-#define bfin_read_USB_EP_NI3_RXINTERVAL() bfin_read16(USB_EP_NI3_RXINTERVAL)
-#define bfin_write_USB_EP_NI3_RXINTERVAL(val) bfin_write16(USB_EP_NI3_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 4 Control Registers */
-
-#define bfin_read_USB_EP_NI3_TXCOUNT() bfin_read16(USB_EP_NI3_TXCOUNT)
-#define bfin_write_USB_EP_NI3_TXCOUNT(val) bfin_write16(USB_EP_NI3_TXCOUNT, val)
-#define bfin_read_USB_EP_NI4_TXMAXP() bfin_read16(USB_EP_NI4_TXMAXP)
-#define bfin_write_USB_EP_NI4_TXMAXP(val) bfin_write16(USB_EP_NI4_TXMAXP, val)
-#define bfin_read_USB_EP_NI4_TXCSR() bfin_read16(USB_EP_NI4_TXCSR)
-#define bfin_write_USB_EP_NI4_TXCSR(val) bfin_write16(USB_EP_NI4_TXCSR, val)
-#define bfin_read_USB_EP_NI4_RXMAXP() bfin_read16(USB_EP_NI4_RXMAXP)
-#define bfin_write_USB_EP_NI4_RXMAXP(val) bfin_write16(USB_EP_NI4_RXMAXP, val)
-#define bfin_read_USB_EP_NI4_RXCSR() bfin_read16(USB_EP_NI4_RXCSR)
-#define bfin_write_USB_EP_NI4_RXCSR(val) bfin_write16(USB_EP_NI4_RXCSR, val)
-#define bfin_read_USB_EP_NI4_RXCOUNT() bfin_read16(USB_EP_NI4_RXCOUNT)
-#define bfin_write_USB_EP_NI4_RXCOUNT(val) bfin_write16(USB_EP_NI4_RXCOUNT, val)
-#define bfin_read_USB_EP_NI4_TXTYPE() bfin_read16(USB_EP_NI4_TXTYPE)
-#define bfin_write_USB_EP_NI4_TXTYPE(val) bfin_write16(USB_EP_NI4_TXTYPE, val)
-#define bfin_read_USB_EP_NI4_TXINTERVAL() bfin_read16(USB_EP_NI4_TXINTERVAL)
-#define bfin_write_USB_EP_NI4_TXINTERVAL(val) bfin_write16(USB_EP_NI4_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI4_RXTYPE() bfin_read16(USB_EP_NI4_RXTYPE)
-#define bfin_write_USB_EP_NI4_RXTYPE(val) bfin_write16(USB_EP_NI4_RXTYPE, val)
-#define bfin_read_USB_EP_NI4_RXINTERVAL() bfin_read16(USB_EP_NI4_RXINTERVAL)
-#define bfin_write_USB_EP_NI4_RXINTERVAL(val) bfin_write16(USB_EP_NI4_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 5 Control Registers */
-
-#define bfin_read_USB_EP_NI4_TXCOUNT() bfin_read16(USB_EP_NI4_TXCOUNT)
-#define bfin_write_USB_EP_NI4_TXCOUNT(val) bfin_write16(USB_EP_NI4_TXCOUNT, val)
-#define bfin_read_USB_EP_NI5_TXMAXP() bfin_read16(USB_EP_NI5_TXMAXP)
-#define bfin_write_USB_EP_NI5_TXMAXP(val) bfin_write16(USB_EP_NI5_TXMAXP, val)
-#define bfin_read_USB_EP_NI5_TXCSR() bfin_read16(USB_EP_NI5_TXCSR)
-#define bfin_write_USB_EP_NI5_TXCSR(val) bfin_write16(USB_EP_NI5_TXCSR, val)
-#define bfin_read_USB_EP_NI5_RXMAXP() bfin_read16(USB_EP_NI5_RXMAXP)
-#define bfin_write_USB_EP_NI5_RXMAXP(val) bfin_write16(USB_EP_NI5_RXMAXP, val)
-#define bfin_read_USB_EP_NI5_RXCSR() bfin_read16(USB_EP_NI5_RXCSR)
-#define bfin_write_USB_EP_NI5_RXCSR(val) bfin_write16(USB_EP_NI5_RXCSR, val)
-#define bfin_read_USB_EP_NI5_RXCOUNT() bfin_read16(USB_EP_NI5_RXCOUNT)
-#define bfin_write_USB_EP_NI5_RXCOUNT(val) bfin_write16(USB_EP_NI5_RXCOUNT, val)
-#define bfin_read_USB_EP_NI5_TXTYPE() bfin_read16(USB_EP_NI5_TXTYPE)
-#define bfin_write_USB_EP_NI5_TXTYPE(val) bfin_write16(USB_EP_NI5_TXTYPE, val)
-#define bfin_read_USB_EP_NI5_TXINTERVAL() bfin_read16(USB_EP_NI5_TXINTERVAL)
-#define bfin_write_USB_EP_NI5_TXINTERVAL(val) bfin_write16(USB_EP_NI5_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI5_RXTYPE() bfin_read16(USB_EP_NI5_RXTYPE)
-#define bfin_write_USB_EP_NI5_RXTYPE(val) bfin_write16(USB_EP_NI5_RXTYPE, val)
-#define bfin_read_USB_EP_NI5_RXINTERVAL() bfin_read16(USB_EP_NI5_RXINTERVAL)
-#define bfin_write_USB_EP_NI5_RXINTERVAL(val) bfin_write16(USB_EP_NI5_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 6 Control Registers */
-
-#define bfin_read_USB_EP_NI5_TXCOUNT() bfin_read16(USB_EP_NI5_TXCOUNT)
-#define bfin_write_USB_EP_NI5_TXCOUNT(val) bfin_write16(USB_EP_NI5_TXCOUNT, val)
-#define bfin_read_USB_EP_NI6_TXMAXP() bfin_read16(USB_EP_NI6_TXMAXP)
-#define bfin_write_USB_EP_NI6_TXMAXP(val) bfin_write16(USB_EP_NI6_TXMAXP, val)
-#define bfin_read_USB_EP_NI6_TXCSR() bfin_read16(USB_EP_NI6_TXCSR)
-#define bfin_write_USB_EP_NI6_TXCSR(val) bfin_write16(USB_EP_NI6_TXCSR, val)
-#define bfin_read_USB_EP_NI6_RXMAXP() bfin_read16(USB_EP_NI6_RXMAXP)
-#define bfin_write_USB_EP_NI6_RXMAXP(val) bfin_write16(USB_EP_NI6_RXMAXP, val)
-#define bfin_read_USB_EP_NI6_RXCSR() bfin_read16(USB_EP_NI6_RXCSR)
-#define bfin_write_USB_EP_NI6_RXCSR(val) bfin_write16(USB_EP_NI6_RXCSR, val)
-#define bfin_read_USB_EP_NI6_RXCOUNT() bfin_read16(USB_EP_NI6_RXCOUNT)
-#define bfin_write_USB_EP_NI6_RXCOUNT(val) bfin_write16(USB_EP_NI6_RXCOUNT, val)
-#define bfin_read_USB_EP_NI6_TXTYPE() bfin_read16(USB_EP_NI6_TXTYPE)
-#define bfin_write_USB_EP_NI6_TXTYPE(val) bfin_write16(USB_EP_NI6_TXTYPE, val)
-#define bfin_read_USB_EP_NI6_TXINTERVAL() bfin_read16(USB_EP_NI6_TXINTERVAL)
-#define bfin_write_USB_EP_NI6_TXINTERVAL(val) bfin_write16(USB_EP_NI6_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI6_RXTYPE() bfin_read16(USB_EP_NI6_RXTYPE)
-#define bfin_write_USB_EP_NI6_RXTYPE(val) bfin_write16(USB_EP_NI6_RXTYPE, val)
-#define bfin_read_USB_EP_NI6_RXINTERVAL() bfin_read16(USB_EP_NI6_RXINTERVAL)
-#define bfin_write_USB_EP_NI6_RXINTERVAL(val) bfin_write16(USB_EP_NI6_RXINTERVAL, val)
-
-/* USB Endbfin_read_()oint 7 Control Registers */
-
-#define bfin_read_USB_EP_NI6_TXCOUNT() bfin_read16(USB_EP_NI6_TXCOUNT)
-#define bfin_write_USB_EP_NI6_TXCOUNT(val) bfin_write16(USB_EP_NI6_TXCOUNT, val)
-#define bfin_read_USB_EP_NI7_TXMAXP() bfin_read16(USB_EP_NI7_TXMAXP)
-#define bfin_write_USB_EP_NI7_TXMAXP(val) bfin_write16(USB_EP_NI7_TXMAXP, val)
-#define bfin_read_USB_EP_NI7_TXCSR() bfin_read16(USB_EP_NI7_TXCSR)
-#define bfin_write_USB_EP_NI7_TXCSR(val) bfin_write16(USB_EP_NI7_TXCSR, val)
-#define bfin_read_USB_EP_NI7_RXMAXP() bfin_read16(USB_EP_NI7_RXMAXP)
-#define bfin_write_USB_EP_NI7_RXMAXP(val) bfin_write16(USB_EP_NI7_RXMAXP, val)
-#define bfin_read_USB_EP_NI7_RXCSR() bfin_read16(USB_EP_NI7_RXCSR)
-#define bfin_write_USB_EP_NI7_RXCSR(val) bfin_write16(USB_EP_NI7_RXCSR, val)
-#define bfin_read_USB_EP_NI7_RXCOUNT() bfin_read16(USB_EP_NI7_RXCOUNT)
-#define bfin_write_USB_EP_NI7_RXCOUNT(val) bfin_write16(USB_EP_NI7_RXCOUNT, val)
-#define bfin_read_USB_EP_NI7_TXTYPE() bfin_read16(USB_EP_NI7_TXTYPE)
-#define bfin_write_USB_EP_NI7_TXTYPE(val) bfin_write16(USB_EP_NI7_TXTYPE, val)
-#define bfin_read_USB_EP_NI7_TXINTERVAL() bfin_read16(USB_EP_NI7_TXINTERVAL)
-#define bfin_write_USB_EP_NI7_TXINTERVAL(val) bfin_write16(USB_EP_NI7_TXINTERVAL, val)
-#define bfin_read_USB_EP_NI7_RXTYPE() bfin_read16(USB_EP_NI7_RXTYPE)
-#define bfin_write_USB_EP_NI7_RXTYPE(val) bfin_write16(USB_EP_NI7_RXTYPE, val)
-#define bfin_read_USB_EP_NI7_RXINTERVAL() bfin_read16(USB_EP_NI7_RXINTERVAL)
-#define bfin_write_USB_EP_NI7_RXINTERVAL(val) bfin_write16(USB_EP_NI7_RXINTERVAL, val)
-#define bfin_read_USB_EP_NI7_TXCOUNT() bfin_read16(USB_EP_NI7_TXCOUNT)
-#define bfin_write_USB_EP_NI7_TXCOUNT(val) bfin_write16(USB_EP_NI7_TXCOUNT, val)
-#define bfin_read_USB_DMA_INTERRUPT() bfin_read16(USB_DMA_INTERRUPT)
-#define bfin_write_USB_DMA_INTERRUPT(val) bfin_write16(USB_DMA_INTERRUPT, val)
-
-/* USB Channel 0 Config Registers */
-
-#define bfin_read_USB_DMA0CONTROL() bfin_read16(USB_DMA0CONTROL)
-#define bfin_write_USB_DMA0CONTROL(val) bfin_write16(USB_DMA0CONTROL, val)
-#define bfin_read_USB_DMA0ADDRLOW() bfin_read16(USB_DMA0ADDRLOW)
-#define bfin_write_USB_DMA0ADDRLOW(val) bfin_write16(USB_DMA0ADDRLOW, val)
-#define bfin_read_USB_DMA0ADDRHIGH() bfin_read16(USB_DMA0ADDRHIGH)
-#define bfin_write_USB_DMA0ADDRHIGH(val) bfin_write16(USB_DMA0ADDRHIGH, val)
-#define bfin_read_USB_DMA0COUNTLOW() bfin_read16(USB_DMA0COUNTLOW)
-#define bfin_write_USB_DMA0COUNTLOW(val) bfin_write16(USB_DMA0COUNTLOW, val)
-#define bfin_read_USB_DMA0COUNTHIGH() bfin_read16(USB_DMA0COUNTHIGH)
-#define bfin_write_USB_DMA0COUNTHIGH(val) bfin_write16(USB_DMA0COUNTHIGH, val)
-
-/* USB Channel 1 Config Registers */
-
-#define bfin_read_USB_DMA1CONTROL() bfin_read16(USB_DMA1CONTROL)
-#define bfin_write_USB_DMA1CONTROL(val) bfin_write16(USB_DMA1CONTROL, val)
-#define bfin_read_USB_DMA1ADDRLOW() bfin_read16(USB_DMA1ADDRLOW)
-#define bfin_write_USB_DMA1ADDRLOW(val) bfin_write16(USB_DMA1ADDRLOW, val)
-#define bfin_read_USB_DMA1ADDRHIGH() bfin_read16(USB_DMA1ADDRHIGH)
-#define bfin_write_USB_DMA1ADDRHIGH(val) bfin_write16(USB_DMA1ADDRHIGH, val)
-#define bfin_read_USB_DMA1COUNTLOW() bfin_read16(USB_DMA1COUNTLOW)
-#define bfin_write_USB_DMA1COUNTLOW(val) bfin_write16(USB_DMA1COUNTLOW, val)
-#define bfin_read_USB_DMA1COUNTHIGH() bfin_read16(USB_DMA1COUNTHIGH)
-#define bfin_write_USB_DMA1COUNTHIGH(val) bfin_write16(USB_DMA1COUNTHIGH, val)
-
-/* USB Channel 2 Config Registers */
-
-#define bfin_read_USB_DMA2CONTROL() bfin_read16(USB_DMA2CONTROL)
-#define bfin_write_USB_DMA2CONTROL(val) bfin_write16(USB_DMA2CONTROL, val)
-#define bfin_read_USB_DMA2ADDRLOW() bfin_read16(USB_DMA2ADDRLOW)
-#define bfin_write_USB_DMA2ADDRLOW(val) bfin_write16(USB_DMA2ADDRLOW, val)
-#define bfin_read_USB_DMA2ADDRHIGH() bfin_read16(USB_DMA2ADDRHIGH)
-#define bfin_write_USB_DMA2ADDRHIGH(val) bfin_write16(USB_DMA2ADDRHIGH, val)
-#define bfin_read_USB_DMA2COUNTLOW() bfin_read16(USB_DMA2COUNTLOW)
-#define bfin_write_USB_DMA2COUNTLOW(val) bfin_write16(USB_DMA2COUNTLOW, val)
-#define bfin_read_USB_DMA2COUNTHIGH() bfin_read16(USB_DMA2COUNTHIGH)
-#define bfin_write_USB_DMA2COUNTHIGH(val) bfin_write16(USB_DMA2COUNTHIGH, val)
-
-/* USB Channel 3 Config Registers */
-
-#define bfin_read_USB_DMA3CONTROL() bfin_read16(USB_DMA3CONTROL)
-#define bfin_write_USB_DMA3CONTROL(val) bfin_write16(USB_DMA3CONTROL, val)
-#define bfin_read_USB_DMA3ADDRLOW() bfin_read16(USB_DMA3ADDRLOW)
-#define bfin_write_USB_DMA3ADDRLOW(val) bfin_write16(USB_DMA3ADDRLOW, val)
-#define bfin_read_USB_DMA3ADDRHIGH() bfin_read16(USB_DMA3ADDRHIGH)
-#define bfin_write_USB_DMA3ADDRHIGH(val) bfin_write16(USB_DMA3ADDRHIGH, val)
-#define bfin_read_USB_DMA3COUNTLOW() bfin_read16(USB_DMA3COUNTLOW)
-#define bfin_write_USB_DMA3COUNTLOW(val) bfin_write16(USB_DMA3COUNTLOW, val)
-#define bfin_read_USB_DMA3COUNTHIGH() bfin_read16(USB_DMA3COUNTHIGH)
-#define bfin_write_USB_DMA3COUNTHIGH(val) bfin_write16(USB_DMA3COUNTHIGH, val)
-
-/* USB Channel 4 Config Registers */
-
-#define bfin_read_USB_DMA4CONTROL() bfin_read16(USB_DMA4CONTROL)
-#define bfin_write_USB_DMA4CONTROL(val) bfin_write16(USB_DMA4CONTROL, val)
-#define bfin_read_USB_DMA4ADDRLOW() bfin_read16(USB_DMA4ADDRLOW)
-#define bfin_write_USB_DMA4ADDRLOW(val) bfin_write16(USB_DMA4ADDRLOW, val)
-#define bfin_read_USB_DMA4ADDRHIGH() bfin_read16(USB_DMA4ADDRHIGH)
-#define bfin_write_USB_DMA4ADDRHIGH(val) bfin_write16(USB_DMA4ADDRHIGH, val)
-#define bfin_read_USB_DMA4COUNTLOW() bfin_read16(USB_DMA4COUNTLOW)
-#define bfin_write_USB_DMA4COUNTLOW(val) bfin_write16(USB_DMA4COUNTLOW, val)
-#define bfin_read_USB_DMA4COUNTHIGH() bfin_read16(USB_DMA4COUNTHIGH)
-#define bfin_write_USB_DMA4COUNTHIGH(val) bfin_write16(USB_DMA4COUNTHIGH, val)
-
-/* USB Channel 5 Config Registers */
-
-#define bfin_read_USB_DMA5CONTROL() bfin_read16(USB_DMA5CONTROL)
-#define bfin_write_USB_DMA5CONTROL(val) bfin_write16(USB_DMA5CONTROL, val)
-#define bfin_read_USB_DMA5ADDRLOW() bfin_read16(USB_DMA5ADDRLOW)
-#define bfin_write_USB_DMA5ADDRLOW(val) bfin_write16(USB_DMA5ADDRLOW, val)
-#define bfin_read_USB_DMA5ADDRHIGH() bfin_read16(USB_DMA5ADDRHIGH)
-#define bfin_write_USB_DMA5ADDRHIGH(val) bfin_write16(USB_DMA5ADDRHIGH, val)
-#define bfin_read_USB_DMA5COUNTLOW() bfin_read16(USB_DMA5COUNTLOW)
-#define bfin_write_USB_DMA5COUNTLOW(val) bfin_write16(USB_DMA5COUNTLOW, val)
-#define bfin_read_USB_DMA5COUNTHIGH() bfin_read16(USB_DMA5COUNTHIGH)
-#define bfin_write_USB_DMA5COUNTHIGH(val) bfin_write16(USB_DMA5COUNTHIGH, val)
-
-/* USB Channel 6 Config Registers */
-
-#define bfin_read_USB_DMA6CONTROL() bfin_read16(USB_DMA6CONTROL)
-#define bfin_write_USB_DMA6CONTROL(val) bfin_write16(USB_DMA6CONTROL, val)
-#define bfin_read_USB_DMA6ADDRLOW() bfin_read16(USB_DMA6ADDRLOW)
-#define bfin_write_USB_DMA6ADDRLOW(val) bfin_write16(USB_DMA6ADDRLOW, val)
-#define bfin_read_USB_DMA6ADDRHIGH() bfin_read16(USB_DMA6ADDRHIGH)
-#define bfin_write_USB_DMA6ADDRHIGH(val) bfin_write16(USB_DMA6ADDRHIGH, val)
-#define bfin_read_USB_DMA6COUNTLOW() bfin_read16(USB_DMA6COUNTLOW)
-#define bfin_write_USB_DMA6COUNTLOW(val) bfin_write16(USB_DMA6COUNTLOW, val)
-#define bfin_read_USB_DMA6COUNTHIGH() bfin_read16(USB_DMA6COUNTHIGH)
-#define bfin_write_USB_DMA6COUNTHIGH(val) bfin_write16(USB_DMA6COUNTHIGH, val)
-
-/* USB Channel 7 Config Registers */
-
-#define bfin_read_USB_DMA7CONTROL() bfin_read16(USB_DMA7CONTROL)
-#define bfin_write_USB_DMA7CONTROL(val) bfin_write16(USB_DMA7CONTROL, val)
-#define bfin_read_USB_DMA7ADDRLOW() bfin_read16(USB_DMA7ADDRLOW)
-#define bfin_write_USB_DMA7ADDRLOW(val) bfin_write16(USB_DMA7ADDRLOW, val)
-#define bfin_read_USB_DMA7ADDRHIGH() bfin_read16(USB_DMA7ADDRHIGH)
-#define bfin_write_USB_DMA7ADDRHIGH(val) bfin_write16(USB_DMA7ADDRHIGH, val)
-#define bfin_read_USB_DMA7COUNTLOW() bfin_read16(USB_DMA7COUNTLOW)
-#define bfin_write_USB_DMA7COUNTLOW(val) bfin_write16(USB_DMA7COUNTLOW, val)
-#define bfin_read_USB_DMA7COUNTHIGH() bfin_read16(USB_DMA7COUNTHIGH)
-#define bfin_write_USB_DMA7COUNTHIGH(val) bfin_write16(USB_DMA7COUNTHIGH, val)
-
-/* Keybfin_read_()ad Registers */
-
-#define bfin_read_KPAD_CTL() bfin_read16(KPAD_CTL)
-#define bfin_write_KPAD_CTL(val) bfin_write16(KPAD_CTL, val)
-#define bfin_read_KPAD_PRESCALE() bfin_read16(KPAD_PRESCALE)
-#define bfin_write_KPAD_PRESCALE(val) bfin_write16(KPAD_PRESCALE, val)
-#define bfin_read_KPAD_MSEL() bfin_read16(KPAD_MSEL)
-#define bfin_write_KPAD_MSEL(val) bfin_write16(KPAD_MSEL, val)
-#define bfin_read_KPAD_ROWCOL() bfin_read16(KPAD_ROWCOL)
-#define bfin_write_KPAD_ROWCOL(val) bfin_write16(KPAD_ROWCOL, val)
-#define bfin_read_KPAD_STAT() bfin_read16(KPAD_STAT)
-#define bfin_write_KPAD_STAT(val) bfin_write16(KPAD_STAT, val)
-#define bfin_read_KPAD_SOFTEVAL() bfin_read16(KPAD_SOFTEVAL)
-#define bfin_write_KPAD_SOFTEVAL(val) bfin_write16(KPAD_SOFTEVAL, val)
-
-/* Pixel Combfin_read_()ositor (PIXC) Registers */
-
-#define bfin_read_PIXC_CTL() bfin_read16(PIXC_CTL)
-#define bfin_write_PIXC_CTL(val) bfin_write16(PIXC_CTL, val)
-#define bfin_read_PIXC_PPL() bfin_read16(PIXC_PPL)
-#define bfin_write_PIXC_PPL(val) bfin_write16(PIXC_PPL, val)
-#define bfin_read_PIXC_LPF() bfin_read16(PIXC_LPF)
-#define bfin_write_PIXC_LPF(val) bfin_write16(PIXC_LPF, val)
-#define bfin_read_PIXC_AHSTART() bfin_read16(PIXC_AHSTART)
-#define bfin_write_PIXC_AHSTART(val) bfin_write16(PIXC_AHSTART, val)
-#define bfin_read_PIXC_AHEND() bfin_read16(PIXC_AHEND)
-#define bfin_write_PIXC_AHEND(val) bfin_write16(PIXC_AHEND, val)
-#define bfin_read_PIXC_AVSTART() bfin_read16(PIXC_AVSTART)
-#define bfin_write_PIXC_AVSTART(val) bfin_write16(PIXC_AVSTART, val)
-#define bfin_read_PIXC_AVEND() bfin_read16(PIXC_AVEND)
-#define bfin_write_PIXC_AVEND(val) bfin_write16(PIXC_AVEND, val)
-#define bfin_read_PIXC_ATRANSP() bfin_read16(PIXC_ATRANSP)
-#define bfin_write_PIXC_ATRANSP(val) bfin_write16(PIXC_ATRANSP, val)
-#define bfin_read_PIXC_BHSTART() bfin_read16(PIXC_BHSTART)
-#define bfin_write_PIXC_BHSTART(val) bfin_write16(PIXC_BHSTART, val)
-#define bfin_read_PIXC_BHEND() bfin_read16(PIXC_BHEND)
-#define bfin_write_PIXC_BHEND(val) bfin_write16(PIXC_BHEND, val)
-#define bfin_read_PIXC_BVSTART() bfin_read16(PIXC_BVSTART)
-#define bfin_write_PIXC_BVSTART(val) bfin_write16(PIXC_BVSTART, val)
-#define bfin_read_PIXC_BVEND() bfin_read16(PIXC_BVEND)
-#define bfin_write_PIXC_BVEND(val) bfin_write16(PIXC_BVEND, val)
-#define bfin_read_PIXC_BTRANSP() bfin_read16(PIXC_BTRANSP)
-#define bfin_write_PIXC_BTRANSP(val) bfin_write16(PIXC_BTRANSP, val)
-#define bfin_read_PIXC_INTRSTAT() bfin_read16(PIXC_INTRSTAT)
-#define bfin_write_PIXC_INTRSTAT(val) bfin_write16(PIXC_INTRSTAT, val)
-#define bfin_read_PIXC_RYCON() bfin_read32(PIXC_RYCON)
-#define bfin_write_PIXC_RYCON(val) bfin_write32(PIXC_RYCON, val)
-#define bfin_read_PIXC_GUCON() bfin_read32(PIXC_GUCON)
-#define bfin_write_PIXC_GUCON(val) bfin_write32(PIXC_GUCON, val)
-#define bfin_read_PIXC_BVCON() bfin_read32(PIXC_BVCON)
-#define bfin_write_PIXC_BVCON(val) bfin_write32(PIXC_BVCON, val)
-#define bfin_read_PIXC_CCBIAS() bfin_read32(PIXC_CCBIAS)
-#define bfin_write_PIXC_CCBIAS(val) bfin_write32(PIXC_CCBIAS, val)
-#define bfin_read_PIXC_TC() bfin_read32(PIXC_TC)
-#define bfin_write_PIXC_TC(val) bfin_write32(PIXC_TC, val)
-
-/* Handshake MDMA 0 Registers */
-
-#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
-#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
-#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
-#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
-#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
-#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
-#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
-#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
-#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
-#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
-#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
-#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
-#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
-#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
-
-/* Handshake MDMA 1 Registers */
-
-#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
-#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
-#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
-#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
-#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
-#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
-#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
-#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
-#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
-#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
-#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
-#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
-#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
-#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
-
#endif /* _CDEF_BF549_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
index a2e9d9849eb..32f71e6a7c1 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
@@ -2615,17 +2615,6 @@
#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
-/* OTP/FUSE Registers */
-
-#define bfin_read_OTP_CONTROL() bfin_read16(OTP_CONTROL)
-#define bfin_write_OTP_CONTROL(val) bfin_write16(OTP_CONTROL, val)
-#define bfin_read_OTP_BEN() bfin_read16(OTP_BEN)
-#define bfin_write_OTP_BEN(val) bfin_write16(OTP_BEN, val)
-#define bfin_read_OTP_STATUS() bfin_read16(OTP_STATUS)
-#define bfin_write_OTP_STATUS(val) bfin_write16(OTP_STATUS, val)
-#define bfin_read_OTP_TIMING() bfin_read32(OTP_TIMING)
-#define bfin_write_OTP_TIMING(val) bfin_write32(OTP_TIMING, val)
-
/* Security Registers */
#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
@@ -2640,17 +2629,6 @@
#define bfin_read_DMAC1_PERIMUX() bfin_read16(DMAC1_PERIMUX)
#define bfin_write_DMAC1_PERIMUX(val) bfin_write16(DMAC1_PERIMUX, val)
-/* OTP Read/Write Data Buffer Registers */
-
-#define bfin_read_OTP_DATA0() bfin_read32(OTP_DATA0)
-#define bfin_write_OTP_DATA0(val) bfin_write32(OTP_DATA0, val)
-#define bfin_read_OTP_DATA1() bfin_read32(OTP_DATA1)
-#define bfin_write_OTP_DATA1(val) bfin_write32(OTP_DATA1, val)
-#define bfin_read_OTP_DATA2() bfin_read32(OTP_DATA2)
-#define bfin_write_OTP_DATA2(val) bfin_write32(OTP_DATA2, val)
-#define bfin_read_OTP_DATA3() bfin_read32(OTP_DATA3)
-#define bfin_write_OTP_DATA3(val) bfin_write32(OTP_DATA3, val)
-
/* Handshake MDMA is not defined in the shared file because it is not available on the ADSP-BF542 bfin_read_()rocessor */
/* legacy definitions */
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF544.h b/arch/blackfin/mach-bf548/include/mach/defBF544.h
index 39f588dcd38..f916c52a148 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF544.h
@@ -624,9 +624,9 @@
#define DMA_READY 0x1 /* DMA Ready */
#define FIFOFULL 0x2 /* FIFO Full */
#define FIFOEMPTY 0x4 /* FIFO Empty */
-#define COMPLETE 0x8 /* DMA Complete */
+#define DMA_COMPLETE 0x8 /* DMA Complete */
#define HSHK 0x10 /* Host Handshake */
-#define TIMEOUT 0x20 /* Host Timeout */
+#define HSTIMEOUT 0x20 /* Host Timeout */
#define HIRQ 0x40 /* Host Interrupt Request */
#define ALLOW_CNFG 0x80 /* Allow New Configuration */
#define DMA_DIR 0x100 /* DMA Direction */
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index c4dcf302d9f..72c343646b2 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -4,18 +4,18 @@
* Licensed under the ADI BSD license or the GPL-2 (or later)
*/
-#ifndef _DEF_BF548_H
-#define _DEF_BF548_H
+#ifndef _DEF_BF547_H
+#define _DEF_BF547_H
/* Include all Core registers and bit definitions */
#include <asm/def_LPBlackfin.h>
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
+/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
#include "defBF54x_base.h"
-/* The following are the #defines needed by ADSP-BF548 that are not in the common header */
+/* The following are the #defines needed by ADSP-BF547 that are not in the common header */
/* Timer Registers */
@@ -1217,4 +1217,4 @@
/* ******************************************* */
-#endif /* _DEF_BF548_H */
+#endif /* _DEF_BF547_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF548.h b/arch/blackfin/mach-bf548/include/mach/defBF548.h
index a5079980968..3fb33b040ab 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF548.h
@@ -15,115 +15,8 @@
/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
#include "defBF54x_base.h"
-/* The following are the #defines needed by ADSP-BF548 that are not in the common header */
-
-/* Timer Registers */
-
-#define TIMER8_CONFIG 0xffc00600 /* Timer 8 Configuration Register */
-#define TIMER8_COUNTER 0xffc00604 /* Timer 8 Counter Register */
-#define TIMER8_PERIOD 0xffc00608 /* Timer 8 Period Register */
-#define TIMER8_WIDTH 0xffc0060c /* Timer 8 Width Register */
-#define TIMER9_CONFIG 0xffc00610 /* Timer 9 Configuration Register */
-#define TIMER9_COUNTER 0xffc00614 /* Timer 9 Counter Register */
-#define TIMER9_PERIOD 0xffc00618 /* Timer 9 Period Register */
-#define TIMER9_WIDTH 0xffc0061c /* Timer 9 Width Register */
-#define TIMER10_CONFIG 0xffc00620 /* Timer 10 Configuration Register */
-#define TIMER10_COUNTER 0xffc00624 /* Timer 10 Counter Register */
-#define TIMER10_PERIOD 0xffc00628 /* Timer 10 Period Register */
-#define TIMER10_WIDTH 0xffc0062c /* Timer 10 Width Register */
-
-/* Timer Group of 3 Registers */
-
-#define TIMER_ENABLE1 0xffc00640 /* Timer Group of 3 Enable Register */
-#define TIMER_DISABLE1 0xffc00644 /* Timer Group of 3 Disable Register */
-#define TIMER_STATUS1 0xffc00648 /* Timer Group of 3 Status Register */
-
-/* SPORT0 Registers */
-
-#define SPORT0_TCR1 0xffc00800 /* SPORT0 Transmit Configuration 1 Register */
-#define SPORT0_TCR2 0xffc00804 /* SPORT0 Transmit Configuration 2 Register */
-#define SPORT0_TCLKDIV 0xffc00808 /* SPORT0 Transmit Serial Clock Divider Register */
-#define SPORT0_TFSDIV 0xffc0080c /* SPORT0 Transmit Frame Sync Divider Register */
-#define SPORT0_TX 0xffc00810 /* SPORT0 Transmit Data Register */
-#define SPORT0_RX 0xffc00818 /* SPORT0 Receive Data Register */
-#define SPORT0_RCR1 0xffc00820 /* SPORT0 Receive Configuration 1 Register */
-#define SPORT0_RCR2 0xffc00824 /* SPORT0 Receive Configuration 2 Register */
-#define SPORT0_RCLKDIV 0xffc00828 /* SPORT0 Receive Serial Clock Divider Register */
-#define SPORT0_RFSDIV 0xffc0082c /* SPORT0 Receive Frame Sync Divider Register */
-#define SPORT0_STAT 0xffc00830 /* SPORT0 Status Register */
-#define SPORT0_CHNL 0xffc00834 /* SPORT0 Current Channel Register */
-#define SPORT0_MCMC1 0xffc00838 /* SPORT0 Multi channel Configuration Register 1 */
-#define SPORT0_MCMC2 0xffc0083c /* SPORT0 Multi channel Configuration Register 2 */
-#define SPORT0_MTCS0 0xffc00840 /* SPORT0 Multi channel Transmit Select Register 0 */
-#define SPORT0_MTCS1 0xffc00844 /* SPORT0 Multi channel Transmit Select Register 1 */
-#define SPORT0_MTCS2 0xffc00848 /* SPORT0 Multi channel Transmit Select Register 2 */
-#define SPORT0_MTCS3 0xffc0084c /* SPORT0 Multi channel Transmit Select Register 3 */
-#define SPORT0_MRCS0 0xffc00850 /* SPORT0 Multi channel Receive Select Register 0 */
-#define SPORT0_MRCS1 0xffc00854 /* SPORT0 Multi channel Receive Select Register 1 */
-#define SPORT0_MRCS2 0xffc00858 /* SPORT0 Multi channel Receive Select Register 2 */
-#define SPORT0_MRCS3 0xffc0085c /* SPORT0 Multi channel Receive Select Register 3 */
-
-/* EPPI0 Registers */
-
-#define EPPI0_STATUS 0xffc01000 /* EPPI0 Status Register */
-#define EPPI0_HCOUNT 0xffc01004 /* EPPI0 Horizontal Transfer Count Register */
-#define EPPI0_HDELAY 0xffc01008 /* EPPI0 Horizontal Delay Count Register */
-#define EPPI0_VCOUNT 0xffc0100c /* EPPI0 Vertical Transfer Count Register */
-#define EPPI0_VDELAY 0xffc01010 /* EPPI0 Vertical Delay Count Register */
-#define EPPI0_FRAME 0xffc01014 /* EPPI0 Lines per Frame Register */
-#define EPPI0_LINE 0xffc01018 /* EPPI0 Samples per Line Register */
-#define EPPI0_CLKDIV 0xffc0101c /* EPPI0 Clock Divide Register */
-#define EPPI0_CONTROL 0xffc01020 /* EPPI0 Control Register */
-#define EPPI0_FS1W_HBL 0xffc01024 /* EPPI0 FS1 Width Register / EPPI0 Horizontal Blanking Samples Per Line Register */
-#define EPPI0_FS1P_AVPL 0xffc01028 /* EPPI0 FS1 Period Register / EPPI0 Active Video Samples Per Line Register */
-#define EPPI0_FS2W_LVB 0xffc0102c /* EPPI0 FS2 Width Register / EPPI0 Lines of Vertical Blanking Register */
-#define EPPI0_FS2P_LAVF 0xffc01030 /* EPPI0 FS2 Period Register/ EPPI0 Lines of Active Video Per Field Register */
-#define EPPI0_CLIP 0xffc01034 /* EPPI0 Clipping Register */
-
-/* UART2 Registers */
-
-#define UART2_DLL 0xffc02100 /* Divisor Latch Low Byte */
-#define UART2_DLH 0xffc02104 /* Divisor Latch High Byte */
-#define UART2_GCTL 0xffc02108 /* Global Control Register */
-#define UART2_LCR 0xffc0210c /* Line Control Register */
-#define UART2_MCR 0xffc02110 /* Modem Control Register */
-#define UART2_LSR 0xffc02114 /* Line Status Register */
-#define UART2_MSR 0xffc02118 /* Modem Status Register */
-#define UART2_SCR 0xffc0211c /* Scratch Register */
-#define UART2_IER_SET 0xffc02120 /* Interrupt Enable Register Set */
-#define UART2_IER_CLEAR 0xffc02124 /* Interrupt Enable Register Clear */
-#define UART2_RBR 0xffc0212c /* Receive Buffer Register */
-
-/* Two Wire Interface Registers (TWI1) */
-
-#define TWI1_REGBASE 0xffc02200
-#define TWI1_CLKDIV 0xffc02200 /* Clock Divider Register */
-#define TWI1_CONTROL 0xffc02204 /* TWI Control Register */
-#define TWI1_SLAVE_CTRL 0xffc02208 /* TWI Slave Mode Control Register */
-#define TWI1_SLAVE_STAT 0xffc0220c /* TWI Slave Mode Status Register */
-#define TWI1_SLAVE_ADDR 0xffc02210 /* TWI Slave Mode Address Register */
-#define TWI1_MASTER_CTRL 0xffc02214 /* TWI Master Mode Control Register */
-#define TWI1_MASTER_STAT 0xffc02218 /* TWI Master Mode Status Register */
-#define TWI1_MASTER_ADDR 0xffc0221c /* TWI Master Mode Address Register */
-#define TWI1_INT_STAT 0xffc02220 /* TWI Interrupt Status Register */
-#define TWI1_INT_MASK 0xffc02224 /* TWI Interrupt Mask Register */
-#define TWI1_FIFO_CTRL 0xffc02228 /* TWI FIFO Control Register */
-#define TWI1_FIFO_STAT 0xffc0222c /* TWI FIFO Status Register */
-#define TWI1_XMT_DATA8 0xffc02280 /* TWI FIFO Transmit Data Single Byte Register */
-#define TWI1_XMT_DATA16 0xffc02284 /* TWI FIFO Transmit Data Double Byte Register */
-#define TWI1_RCV_DATA8 0xffc02288 /* TWI FIFO Receive Data Single Byte Register */
-#define TWI1_RCV_DATA16 0xffc0228c /* TWI FIFO Receive Data Double Byte Register */
-
-/* SPI2 Registers */
-
-#define SPI2_REGBASE 0xffc02400
-#define SPI2_CTL 0xffc02400 /* SPI2 Control Register */
-#define SPI2_FLG 0xffc02404 /* SPI2 Flag Register */
-#define SPI2_STAT 0xffc02408 /* SPI2 Status Register */
-#define SPI2_TDBR 0xffc0240c /* SPI2 Transmit Data Buffer Register */
-#define SPI2_RDBR 0xffc02410 /* SPI2 Receive Data Buffer Register */
-#define SPI2_BAUD 0xffc02414 /* SPI2 Baud Rate Register */
-#define SPI2_SHADOW 0xffc02418 /* SPI2 Receive Data Buffer Shadow Register */
+/* The BF548 is like the BF547, but has additional CANs */
+#include "defBF547.h"
/* CAN Controller 1 Config 1 Registers */
@@ -508,1096 +401,4 @@
#define CAN1_MB31_ID0 0xffc037f8 /* CAN Controller 1 Mailbox 31 ID0 Register */
#define CAN1_MB31_ID1 0xffc037fc /* CAN Controller 1 Mailbox 31 ID1 Register */
-/* ATAPI Registers */
-
-#define ATAPI_CONTROL 0xffc03800 /* ATAPI Control Register */
-#define ATAPI_STATUS 0xffc03804 /* ATAPI Status Register */
-#define ATAPI_DEV_ADDR 0xffc03808 /* ATAPI Device Register Address */
-#define ATAPI_DEV_TXBUF 0xffc0380c /* ATAPI Device Register Write Data */
-#define ATAPI_DEV_RXBUF 0xffc03810 /* ATAPI Device Register Read Data */
-#define ATAPI_INT_MASK 0xffc03814 /* ATAPI Interrupt Mask Register */
-#define ATAPI_INT_STATUS 0xffc03818 /* ATAPI Interrupt Status Register */
-#define ATAPI_XFER_LEN 0xffc0381c /* ATAPI Length of Transfer */
-#define ATAPI_LINE_STATUS 0xffc03820 /* ATAPI Line Status */
-#define ATAPI_SM_STATE 0xffc03824 /* ATAPI State Machine Status */
-#define ATAPI_TERMINATE 0xffc03828 /* ATAPI Host Terminate */
-#define ATAPI_PIO_TFRCNT 0xffc0382c /* ATAPI PIO mode transfer count */
-#define ATAPI_DMA_TFRCNT 0xffc03830 /* ATAPI DMA mode transfer count */
-#define ATAPI_UMAIN_TFRCNT 0xffc03834 /* ATAPI UDMAIN transfer count */
-#define ATAPI_UDMAOUT_TFRCNT 0xffc03838 /* ATAPI UDMAOUT transfer count */
-#define ATAPI_REG_TIM_0 0xffc03840 /* ATAPI Register Transfer Timing 0 */
-#define ATAPI_PIO_TIM_0 0xffc03844 /* ATAPI PIO Timing 0 Register */
-#define ATAPI_PIO_TIM_1 0xffc03848 /* ATAPI PIO Timing 1 Register */
-#define ATAPI_MULTI_TIM_0 0xffc03850 /* ATAPI Multi-DMA Timing 0 Register */
-#define ATAPI_MULTI_TIM_1 0xffc03854 /* ATAPI Multi-DMA Timing 1 Register */
-#define ATAPI_MULTI_TIM_2 0xffc03858 /* ATAPI Multi-DMA Timing 2 Register */
-#define ATAPI_ULTRA_TIM_0 0xffc03860 /* ATAPI Ultra-DMA Timing 0 Register */
-#define ATAPI_ULTRA_TIM_1 0xffc03864 /* ATAPI Ultra-DMA Timing 1 Register */
-#define ATAPI_ULTRA_TIM_2 0xffc03868 /* ATAPI Ultra-DMA Timing 2 Register */
-#define ATAPI_ULTRA_TIM_3 0xffc0386c /* ATAPI Ultra-DMA Timing 3 Register */
-
-/* SDH Registers */
-
-#define SDH_PWR_CTL 0xffc03900 /* SDH Power Control */
-#define SDH_CLK_CTL 0xffc03904 /* SDH Clock Control */
-#define SDH_ARGUMENT 0xffc03908 /* SDH Argument */
-#define SDH_COMMAND 0xffc0390c /* SDH Command */
-#define SDH_RESP_CMD 0xffc03910 /* SDH Response Command */
-#define SDH_RESPONSE0 0xffc03914 /* SDH Response0 */
-#define SDH_RESPONSE1 0xffc03918 /* SDH Response1 */
-#define SDH_RESPONSE2 0xffc0391c /* SDH Response2 */
-#define SDH_RESPONSE3 0xffc03920 /* SDH Response3 */
-#define SDH_DATA_TIMER 0xffc03924 /* SDH Data Timer */
-#define SDH_DATA_LGTH 0xffc03928 /* SDH Data Length */
-#define SDH_DATA_CTL 0xffc0392c /* SDH Data Control */
-#define SDH_DATA_CNT 0xffc03930 /* SDH Data Counter */
-#define SDH_STATUS 0xffc03934 /* SDH Status */
-#define SDH_STATUS_CLR 0xffc03938 /* SDH Status Clear */
-#define SDH_MASK0 0xffc0393c /* SDH Interrupt0 Mask */
-#define SDH_MASK1 0xffc03940 /* SDH Interrupt1 Mask */
-#define SDH_FIFO_CNT 0xffc03948 /* SDH FIFO Counter */
-#define SDH_FIFO 0xffc03980 /* SDH Data FIFO */
-#define SDH_E_STATUS 0xffc039c0 /* SDH Exception Status */
-#define SDH_E_MASK 0xffc039c4 /* SDH Exception Mask */
-#define SDH_CFG 0xffc039c8 /* SDH Configuration */
-#define SDH_RD_WAIT_EN 0xffc039cc /* SDH Read Wait Enable */
-#define SDH_PID0 0xffc039d0 /* SDH Peripheral Identification0 */
-#define SDH_PID1 0xffc039d4 /* SDH Peripheral Identification1 */
-#define SDH_PID2 0xffc039d8 /* SDH Peripheral Identification2 */
-#define SDH_PID3 0xffc039dc /* SDH Peripheral Identification3 */
-#define SDH_PID4 0xffc039e0 /* SDH Peripheral Identification4 */
-#define SDH_PID5 0xffc039e4 /* SDH Peripheral Identification5 */
-#define SDH_PID6 0xffc039e8 /* SDH Peripheral Identification6 */
-#define SDH_PID7 0xffc039ec /* SDH Peripheral Identification7 */
-
-/* HOST Port Registers */
-
-#define HOST_CONTROL 0xffc03a00 /* HOST Control Register */
-#define HOST_STATUS 0xffc03a04 /* HOST Status Register */
-#define HOST_TIMEOUT 0xffc03a08 /* HOST Acknowledge Mode Timeout Register */
-
-/* USB Control Registers */
-
-#define USB_FADDR 0xffc03c00 /* Function address register */
-#define USB_POWER 0xffc03c04 /* Power management register */
-#define USB_INTRTX 0xffc03c08 /* Interrupt register for endpoint 0 and Tx endpoint 1 to 7 */
-#define USB_INTRRX 0xffc03c0c /* Interrupt register for Rx endpoints 1 to 7 */
-#define USB_INTRTXE 0xffc03c10 /* Interrupt enable register for IntrTx */
-#define USB_INTRRXE 0xffc03c14 /* Interrupt enable register for IntrRx */
-#define USB_INTRUSB 0xffc03c18 /* Interrupt register for common USB interrupts */
-#define USB_INTRUSBE 0xffc03c1c /* Interrupt enable register for IntrUSB */
-#define USB_FRAME 0xffc03c20 /* USB frame number */
-#define USB_INDEX 0xffc03c24 /* Index register for selecting the indexed endpoint registers */
-#define USB_TESTMODE 0xffc03c28 /* Enabled USB 20 test modes */
-#define USB_GLOBINTR 0xffc03c2c /* Global Interrupt Mask register and Wakeup Exception Interrupt */
-#define USB_GLOBAL_CTL 0xffc03c30 /* Global Clock Control for the core */
-
-/* USB Packet Control Registers */
-
-#define USB_TX_MAX_PACKET 0xffc03c40 /* Maximum packet size for Host Tx endpoint */
-#define USB_CSR0 0xffc03c44 /* Control Status register for endpoint 0 and Control Status register for Host Tx endpoint */
-#define USB_TXCSR 0xffc03c44 /* Control Status register for endpoint 0 and Control Status register for Host Tx endpoint */
-#define USB_RX_MAX_PACKET 0xffc03c48 /* Maximum packet size for Host Rx endpoint */
-#define USB_RXCSR 0xffc03c4c /* Control Status register for Host Rx endpoint */
-#define USB_COUNT0 0xffc03c50 /* Number of bytes received in endpoint 0 FIFO and Number of bytes received in Host Tx endpoint */
-#define USB_RXCOUNT 0xffc03c50 /* Number of bytes received in endpoint 0 FIFO and Number of bytes received in Host Tx endpoint */
-#define USB_TXTYPE 0xffc03c54 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint */
-#define USB_NAKLIMIT0 0xffc03c58 /* Sets the NAK response timeout on Endpoint 0 and on Bulk transfers for Host Tx endpoint */
-#define USB_TXINTERVAL 0xffc03c58 /* Sets the NAK response timeout on Endpoint 0 and on Bulk transfers for Host Tx endpoint */
-#define USB_RXTYPE 0xffc03c5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint */
-#define USB_RXINTERVAL 0xffc03c60 /* Sets the polling interval for Interrupt and Isochronous transfers or the NAK response timeout on Bulk transfers */
-#define USB_TXCOUNT 0xffc03c68 /* Number of bytes to be written to the selected endpoint Tx FIFO */
-
-/* USB Endpoint FIFO Registers */
-
-#define USB_EP0_FIFO 0xffc03c80 /* Endpoint 0 FIFO */
-#define USB_EP1_FIFO 0xffc03c88 /* Endpoint 1 FIFO */
-#define USB_EP2_FIFO 0xffc03c90 /* Endpoint 2 FIFO */
-#define USB_EP3_FIFO 0xffc03c98 /* Endpoint 3 FIFO */
-#define USB_EP4_FIFO 0xffc03ca0 /* Endpoint 4 FIFO */
-#define USB_EP5_FIFO 0xffc03ca8 /* Endpoint 5 FIFO */
-#define USB_EP6_FIFO 0xffc03cb0 /* Endpoint 6 FIFO */
-#define USB_EP7_FIFO 0xffc03cb8 /* Endpoint 7 FIFO */
-
-/* USB OTG Control Registers */
-
-#define USB_OTG_DEV_CTL 0xffc03d00 /* OTG Device Control Register */
-#define USB_OTG_VBUS_IRQ 0xffc03d04 /* OTG VBUS Control Interrupts */
-#define USB_OTG_VBUS_MASK 0xffc03d08 /* VBUS Control Interrupt Enable */
-
-/* USB Phy Control Registers */
-
-#define USB_LINKINFO 0xffc03d48 /* Enables programming of some PHY-side delays */
-#define USB_VPLEN 0xffc03d4c /* Determines duration of VBUS pulse for VBUS charging */
-#define USB_HS_EOF1 0xffc03d50 /* Time buffer for High-Speed transactions */
-#define USB_FS_EOF1 0xffc03d54 /* Time buffer for Full-Speed transactions */
-#define USB_LS_EOF1 0xffc03d58 /* Time buffer for Low-Speed transactions */
-
-/* (APHY_CNTRL is for ADI usage only) */
-
-#define USB_APHY_CNTRL 0xffc03de0 /* Register that increases visibility of Analog PHY */
-
-/* (APHY_CALIB is for ADI usage only) */
-
-#define USB_APHY_CALIB 0xffc03de4 /* Register used to set some calibration values */
-#define USB_APHY_CNTRL2 0xffc03de8 /* Register used to prevent re-enumeration once Moab goes into hibernate mode */
-
-/* (PHY_TEST is for ADI usage only) */
-
-#define USB_PHY_TEST 0xffc03dec /* Used for reducing simulation time and simplifies FIFO testability */
-#define USB_PLLOSC_CTRL 0xffc03df0 /* Used to program different parameters for USB PLL and Oscillator */
-#define USB_SRP_CLKDIV 0xffc03df4 /* Used to program clock divide value for the clock fed to the SRP detection logic */
-
-/* USB Endpoint 0 Control Registers */
-
-#define USB_EP_NI0_TXMAXP 0xffc03e00 /* Maximum packet size for Host Tx endpoint0 */
-#define USB_EP_NI0_TXCSR 0xffc03e04 /* Control Status register for endpoint 0 */
-#define USB_EP_NI0_RXMAXP 0xffc03e08 /* Maximum packet size for Host Rx endpoint0 */
-#define USB_EP_NI0_RXCSR 0xffc03e0c /* Control Status register for Host Rx endpoint0 */
-#define USB_EP_NI0_RXCOUNT 0xffc03e10 /* Number of bytes received in endpoint 0 FIFO */
-#define USB_EP_NI0_TXTYPE 0xffc03e14 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint0 */
-#define USB_EP_NI0_TXINTERVAL 0xffc03e18 /* Sets the NAK response timeout on Endpoint 0 */
-#define USB_EP_NI0_RXTYPE 0xffc03e1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint0 */
-#define USB_EP_NI0_RXINTERVAL 0xffc03e20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint0 */
-
-/* USB Endpoint 1 Control Registers */
-
-#define USB_EP_NI0_TXCOUNT 0xffc03e28 /* Number of bytes to be written to the endpoint0 Tx FIFO */
-#define USB_EP_NI1_TXMAXP 0xffc03e40 /* Maximum packet size for Host Tx endpoint1 */
-#define USB_EP_NI1_TXCSR 0xffc03e44 /* Control Status register for endpoint1 */
-#define USB_EP_NI1_RXMAXP 0xffc03e48 /* Maximum packet size for Host Rx endpoint1 */
-#define USB_EP_NI1_RXCSR 0xffc03e4c /* Control Status register for Host Rx endpoint1 */
-#define USB_EP_NI1_RXCOUNT 0xffc03e50 /* Number of bytes received in endpoint1 FIFO */
-#define USB_EP_NI1_TXTYPE 0xffc03e54 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint1 */
-#define USB_EP_NI1_TXINTERVAL 0xffc03e58 /* Sets the NAK response timeout on Endpoint1 */
-#define USB_EP_NI1_RXTYPE 0xffc03e5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint1 */
-#define USB_EP_NI1_RXINTERVAL 0xffc03e60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint1 */
-
-/* USB Endpoint 2 Control Registers */
-
-#define USB_EP_NI1_TXCOUNT 0xffc03e68 /* Number of bytes to be written to the+H102 endpoint1 Tx FIFO */
-#define USB_EP_NI2_TXMAXP 0xffc03e80 /* Maximum packet size for Host Tx endpoint2 */
-#define USB_EP_NI2_TXCSR 0xffc03e84 /* Control Status register for endpoint2 */
-#define USB_EP_NI2_RXMAXP 0xffc03e88 /* Maximum packet size for Host Rx endpoint2 */
-#define USB_EP_NI2_RXCSR 0xffc03e8c /* Control Status register for Host Rx endpoint2 */
-#define USB_EP_NI2_RXCOUNT 0xffc03e90 /* Number of bytes received in endpoint2 FIFO */
-#define USB_EP_NI2_TXTYPE 0xffc03e94 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint2 */
-#define USB_EP_NI2_TXINTERVAL 0xffc03e98 /* Sets the NAK response timeout on Endpoint2 */
-#define USB_EP_NI2_RXTYPE 0xffc03e9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint2 */
-#define USB_EP_NI2_RXINTERVAL 0xffc03ea0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint2 */
-
-/* USB Endpoint 3 Control Registers */
-
-#define USB_EP_NI2_TXCOUNT 0xffc03ea8 /* Number of bytes to be written to the endpoint2 Tx FIFO */
-#define USB_EP_NI3_TXMAXP 0xffc03ec0 /* Maximum packet size for Host Tx endpoint3 */
-#define USB_EP_NI3_TXCSR 0xffc03ec4 /* Control Status register for endpoint3 */
-#define USB_EP_NI3_RXMAXP 0xffc03ec8 /* Maximum packet size for Host Rx endpoint3 */
-#define USB_EP_NI3_RXCSR 0xffc03ecc /* Control Status register for Host Rx endpoint3 */
-#define USB_EP_NI3_RXCOUNT 0xffc03ed0 /* Number of bytes received in endpoint3 FIFO */
-#define USB_EP_NI3_TXTYPE 0xffc03ed4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint3 */
-#define USB_EP_NI3_TXINTERVAL 0xffc03ed8 /* Sets the NAK response timeout on Endpoint3 */
-#define USB_EP_NI3_RXTYPE 0xffc03edc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint3 */
-#define USB_EP_NI3_RXINTERVAL 0xffc03ee0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint3 */
-
-/* USB Endpoint 4 Control Registers */
-
-#define USB_EP_NI3_TXCOUNT 0xffc03ee8 /* Number of bytes to be written to the H124endpoint3 Tx FIFO */
-#define USB_EP_NI4_TXMAXP 0xffc03f00 /* Maximum packet size for Host Tx endpoint4 */
-#define USB_EP_NI4_TXCSR 0xffc03f04 /* Control Status register for endpoint4 */
-#define USB_EP_NI4_RXMAXP 0xffc03f08 /* Maximum packet size for Host Rx endpoint4 */
-#define USB_EP_NI4_RXCSR 0xffc03f0c /* Control Status register for Host Rx endpoint4 */
-#define USB_EP_NI4_RXCOUNT 0xffc03f10 /* Number of bytes received in endpoint4 FIFO */
-#define USB_EP_NI4_TXTYPE 0xffc03f14 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint4 */
-#define USB_EP_NI4_TXINTERVAL 0xffc03f18 /* Sets the NAK response timeout on Endpoint4 */
-#define USB_EP_NI4_RXTYPE 0xffc03f1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint4 */
-#define USB_EP_NI4_RXINTERVAL 0xffc03f20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint4 */
-
-/* USB Endpoint 5 Control Registers */
-
-#define USB_EP_NI4_TXCOUNT 0xffc03f28 /* Number of bytes to be written to the endpoint4 Tx FIFO */
-#define USB_EP_NI5_TXMAXP 0xffc03f40 /* Maximum packet size for Host Tx endpoint5 */
-#define USB_EP_NI5_TXCSR 0xffc03f44 /* Control Status register for endpoint5 */
-#define USB_EP_NI5_RXMAXP 0xffc03f48 /* Maximum packet size for Host Rx endpoint5 */
-#define USB_EP_NI5_RXCSR 0xffc03f4c /* Control Status register for Host Rx endpoint5 */
-#define USB_EP_NI5_RXCOUNT 0xffc03f50 /* Number of bytes received in endpoint5 FIFO */
-#define USB_EP_NI5_TXTYPE 0xffc03f54 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint5 */
-#define USB_EP_NI5_TXINTERVAL 0xffc03f58 /* Sets the NAK response timeout on Endpoint5 */
-#define USB_EP_NI5_RXTYPE 0xffc03f5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint5 */
-#define USB_EP_NI5_RXINTERVAL 0xffc03f60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint5 */
-
-/* USB Endpoint 6 Control Registers */
-
-#define USB_EP_NI5_TXCOUNT 0xffc03f68 /* Number of bytes to be written to the H145endpoint5 Tx FIFO */
-#define USB_EP_NI6_TXMAXP 0xffc03f80 /* Maximum packet size for Host Tx endpoint6 */
-#define USB_EP_NI6_TXCSR 0xffc03f84 /* Control Status register for endpoint6 */
-#define USB_EP_NI6_RXMAXP 0xffc03f88 /* Maximum packet size for Host Rx endpoint6 */
-#define USB_EP_NI6_RXCSR 0xffc03f8c /* Control Status register for Host Rx endpoint6 */
-#define USB_EP_NI6_RXCOUNT 0xffc03f90 /* Number of bytes received in endpoint6 FIFO */
-#define USB_EP_NI6_TXTYPE 0xffc03f94 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint6 */
-#define USB_EP_NI6_TXINTERVAL 0xffc03f98 /* Sets the NAK response timeout on Endpoint6 */
-#define USB_EP_NI6_RXTYPE 0xffc03f9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint6 */
-#define USB_EP_NI6_RXINTERVAL 0xffc03fa0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint6 */
-
-/* USB Endpoint 7 Control Registers */
-
-#define USB_EP_NI6_TXCOUNT 0xffc03fa8 /* Number of bytes to be written to the endpoint6 Tx FIFO */
-#define USB_EP_NI7_TXMAXP 0xffc03fc0 /* Maximum packet size for Host Tx endpoint7 */
-#define USB_EP_NI7_TXCSR 0xffc03fc4 /* Control Status register for endpoint7 */
-#define USB_EP_NI7_RXMAXP 0xffc03fc8 /* Maximum packet size for Host Rx endpoint7 */
-#define USB_EP_NI7_RXCSR 0xffc03fcc /* Control Status register for Host Rx endpoint7 */
-#define USB_EP_NI7_RXCOUNT 0xffc03fd0 /* Number of bytes received in endpoint7 FIFO */
-#define USB_EP_NI7_TXTYPE 0xffc03fd4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint7 */
-#define USB_EP_NI7_TXINTERVAL 0xffc03fd8 /* Sets the NAK response timeout on Endpoint7 */
-#define USB_EP_NI7_RXTYPE 0xffc03fdc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint7 */
-#define USB_EP_NI7_RXINTERVAL 0xffc03ff0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
-#define USB_EP_NI7_TXCOUNT 0xffc03ff8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
-#define USB_DMA_INTERRUPT 0xffc04000 /* Indicates pending interrupts for the DMA channels */
-
-/* USB Channel 0 Config Registers */
-
-#define USB_DMA0CONTROL 0xffc04004 /* DMA master channel 0 configuration */
-#define USB_DMA0ADDRLOW 0xffc04008 /* Lower 16-bits of memory source/destination address for DMA master channel 0 */
-#define USB_DMA0ADDRHIGH 0xffc0400c /* Upper 16-bits of memory source/destination address for DMA master channel 0 */
-#define USB_DMA0COUNTLOW 0xffc04010 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 0 */
-#define USB_DMA0COUNTHIGH 0xffc04014 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 0 */
-
-/* USB Channel 1 Config Registers */
-
-#define USB_DMA1CONTROL 0xffc04024 /* DMA master channel 1 configuration */
-#define USB_DMA1ADDRLOW 0xffc04028 /* Lower 16-bits of memory source/destination address for DMA master channel 1 */
-#define USB_DMA1ADDRHIGH 0xffc0402c /* Upper 16-bits of memory source/destination address for DMA master channel 1 */
-#define USB_DMA1COUNTLOW 0xffc04030 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 1 */
-#define USB_DMA1COUNTHIGH 0xffc04034 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 1 */
-
-/* USB Channel 2 Config Registers */
-
-#define USB_DMA2CONTROL 0xffc04044 /* DMA master channel 2 configuration */
-#define USB_DMA2ADDRLOW 0xffc04048 /* Lower 16-bits of memory source/destination address for DMA master channel 2 */
-#define USB_DMA2ADDRHIGH 0xffc0404c /* Upper 16-bits of memory source/destination address for DMA master channel 2 */
-#define USB_DMA2COUNTLOW 0xffc04050 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 2 */
-#define USB_DMA2COUNTHIGH 0xffc04054 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 2 */
-
-/* USB Channel 3 Config Registers */
-
-#define USB_DMA3CONTROL 0xffc04064 /* DMA master channel 3 configuration */
-#define USB_DMA3ADDRLOW 0xffc04068 /* Lower 16-bits of memory source/destination address for DMA master channel 3 */
-#define USB_DMA3ADDRHIGH 0xffc0406c /* Upper 16-bits of memory source/destination address for DMA master channel 3 */
-#define USB_DMA3COUNTLOW 0xffc04070 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 3 */
-#define USB_DMA3COUNTHIGH 0xffc04074 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 3 */
-
-/* USB Channel 4 Config Registers */
-
-#define USB_DMA4CONTROL 0xffc04084 /* DMA master channel 4 configuration */
-#define USB_DMA4ADDRLOW 0xffc04088 /* Lower 16-bits of memory source/destination address for DMA master channel 4 */
-#define USB_DMA4ADDRHIGH 0xffc0408c /* Upper 16-bits of memory source/destination address for DMA master channel 4 */
-#define USB_DMA4COUNTLOW 0xffc04090 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 4 */
-#define USB_DMA4COUNTHIGH 0xffc04094 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 4 */
-
-/* USB Channel 5 Config Registers */
-
-#define USB_DMA5CONTROL 0xffc040a4 /* DMA master channel 5 configuration */
-#define USB_DMA5ADDRLOW 0xffc040a8 /* Lower 16-bits of memory source/destination address for DMA master channel 5 */
-#define USB_DMA5ADDRHIGH 0xffc040ac /* Upper 16-bits of memory source/destination address for DMA master channel 5 */
-#define USB_DMA5COUNTLOW 0xffc040b0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 5 */
-#define USB_DMA5COUNTHIGH 0xffc040b4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 5 */
-
-/* USB Channel 6 Config Registers */
-
-#define USB_DMA6CONTROL 0xffc040c4 /* DMA master channel 6 configuration */
-#define USB_DMA6ADDRLOW 0xffc040c8 /* Lower 16-bits of memory source/destination address for DMA master channel 6 */
-#define USB_DMA6ADDRHIGH 0xffc040cc /* Upper 16-bits of memory source/destination address for DMA master channel 6 */
-#define USB_DMA6COUNTLOW 0xffc040d0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 6 */
-#define USB_DMA6COUNTHIGH 0xffc040d4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 6 */
-
-/* USB Channel 7 Config Registers */
-
-#define USB_DMA7CONTROL 0xffc040e4 /* DMA master channel 7 configuration */
-#define USB_DMA7ADDRLOW 0xffc040e8 /* Lower 16-bits of memory source/destination address for DMA master channel 7 */
-#define USB_DMA7ADDRHIGH 0xffc040ec /* Upper 16-bits of memory source/destination address for DMA master channel 7 */
-#define USB_DMA7COUNTLOW 0xffc040f0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 7 */
-#define USB_DMA7COUNTHIGH 0xffc040f4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 7 */
-
-/* Keypad Registers */
-
-#define KPAD_CTL 0xffc04100 /* Controls keypad module enable and disable */
-#define KPAD_PRESCALE 0xffc04104 /* Establish a time base for programing the KPAD_MSEL register */
-#define KPAD_MSEL 0xffc04108 /* Selects delay parameters for keypad interface sensitivity */
-#define KPAD_ROWCOL 0xffc0410c /* Captures the row and column output values of the keys pressed */
-#define KPAD_STAT 0xffc04110 /* Holds and clears the status of the keypad interface interrupt */
-#define KPAD_SOFTEVAL 0xffc04114 /* Lets software force keypad interface to check for keys being pressed */
-
-/* Pixel Compositor (PIXC) Registers */
-
-#define PIXC_CTL 0xffc04400 /* Overlay enable, resampling mode, I/O data format, transparency enable, watermark level, FIFO status */
-#define PIXC_PPL 0xffc04404 /* Holds the number of pixels per line of the display */
-#define PIXC_LPF 0xffc04408 /* Holds the number of lines per frame of the display */
-#define PIXC_AHSTART 0xffc0440c /* Contains horizontal start pixel information of the overlay data (set A) */
-#define PIXC_AHEND 0xffc04410 /* Contains horizontal end pixel information of the overlay data (set A) */
-#define PIXC_AVSTART 0xffc04414 /* Contains vertical start pixel information of the overlay data (set A) */
-#define PIXC_AVEND 0xffc04418 /* Contains vertical end pixel information of the overlay data (set A) */
-#define PIXC_ATRANSP 0xffc0441c /* Contains the transparency ratio (set A) */
-#define PIXC_BHSTART 0xffc04420 /* Contains horizontal start pixel information of the overlay data (set B) */
-#define PIXC_BHEND 0xffc04424 /* Contains horizontal end pixel information of the overlay data (set B) */
-#define PIXC_BVSTART 0xffc04428 /* Contains vertical start pixel information of the overlay data (set B) */
-#define PIXC_BVEND 0xffc0442c /* Contains vertical end pixel information of the overlay data (set B) */
-#define PIXC_BTRANSP 0xffc04430 /* Contains the transparency ratio (set B) */
-#define PIXC_INTRSTAT 0xffc0443c /* Overlay interrupt configuration/status */
-#define PIXC_RYCON 0xffc04440 /* Color space conversion matrix register. Contains the R/Y conversion coefficients */
-#define PIXC_GUCON 0xffc04444 /* Color space conversion matrix register. Contains the G/U conversion coefficients */
-#define PIXC_BVCON 0xffc04448 /* Color space conversion matrix register. Contains the B/V conversion coefficients */
-#define PIXC_CCBIAS 0xffc0444c /* Bias values for the color space conversion matrix */
-#define PIXC_TC 0xffc04450 /* Holds the transparent color value */
-
-/* Handshake MDMA 0 Registers */
-
-#define HMDMA0_CONTROL 0xffc04500 /* Handshake MDMA0 Control Register */
-#define HMDMA0_ECINIT 0xffc04504 /* Handshake MDMA0 Initial Edge Count Register */
-#define HMDMA0_BCINIT 0xffc04508 /* Handshake MDMA0 Initial Block Count Register */
-#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshold Register */
-#define HMDMA0_ECOVERFLOW 0xffc04510 /* Handshake MDMA0 Edge Count Overflow Interrupt Register */
-#define HMDMA0_ECOUNT 0xffc04514 /* Handshake MDMA0 Current Edge Count Register */
-#define HMDMA0_BCOUNT 0xffc04518 /* Handshake MDMA0 Current Block Count Register */
-
-/* Handshake MDMA 1 Registers */
-
-#define HMDMA1_CONTROL 0xffc04540 /* Handshake MDMA1 Control Register */
-#define HMDMA1_ECINIT 0xffc04544 /* Handshake MDMA1 Initial Edge Count Register */
-#define HMDMA1_BCINIT 0xffc04548 /* Handshake MDMA1 Initial Block Count Register */
-#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshold Register */
-#define HMDMA1_ECOVERFLOW 0xffc04550 /* Handshake MDMA1 Edge Count Overflow Interrupt Register */
-#define HMDMA1_ECOUNT 0xffc04554 /* Handshake MDMA1 Current Edge Count Register */
-#define HMDMA1_BCOUNT 0xffc04558 /* Handshake MDMA1 Current Block Count Register */
-
-
-/* ********************************************************** */
-/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
-/* and MULTI BIT READ MACROS */
-/* ********************************************************** */
-
-/* Bit masks for PIXC_CTL */
-
-#define PIXC_EN 0x1 /* Pixel Compositor Enable */
-#define OVR_A_EN 0x2 /* Overlay A Enable */
-#define OVR_B_EN 0x4 /* Overlay B Enable */
-#define IMG_FORM 0x8 /* Image Data Format */
-#define OVR_FORM 0x10 /* Overlay Data Format */
-#define OUT_FORM 0x20 /* Output Data Format */
-#define UDS_MOD 0x40 /* Resampling Mode */
-#define TC_EN 0x80 /* Transparent Color Enable */
-#define IMG_STAT 0x300 /* Image FIFO Status */
-#define OVR_STAT 0xc00 /* Overlay FIFO Status */
-#define WM_LVL 0x3000 /* FIFO Watermark Level */
-
-/* Bit masks for PIXC_AHSTART */
-
-#define A_HSTART 0xfff /* Horizontal Start Coordinates */
-
-/* Bit masks for PIXC_AHEND */
-
-#define A_HEND 0xfff /* Horizontal End Coordinates */
-
-/* Bit masks for PIXC_AVSTART */
-
-#define A_VSTART 0x3ff /* Vertical Start Coordinates */
-
-/* Bit masks for PIXC_AVEND */
-
-#define A_VEND 0x3ff /* Vertical End Coordinates */
-
-/* Bit masks for PIXC_ATRANSP */
-
-#define A_TRANSP 0xf /* Transparency Value */
-
-/* Bit masks for PIXC_BHSTART */
-
-#define B_HSTART 0xfff /* Horizontal Start Coordinates */
-
-/* Bit masks for PIXC_BHEND */
-
-#define B_HEND 0xfff /* Horizontal End Coordinates */
-
-/* Bit masks for PIXC_BVSTART */
-
-#define B_VSTART 0x3ff /* Vertical Start Coordinates */
-
-/* Bit masks for PIXC_BVEND */
-
-#define B_VEND 0x3ff /* Vertical End Coordinates */
-
-/* Bit masks for PIXC_BTRANSP */
-
-#define B_TRANSP 0xf /* Transparency Value */
-
-/* Bit masks for PIXC_INTRSTAT */
-
-#define OVR_INT_EN 0x1 /* Interrupt at End of Last Valid Overlay */
-#define FRM_INT_EN 0x2 /* Interrupt at End of Frame */
-#define OVR_INT_STAT 0x4 /* Overlay Interrupt Status */
-#define FRM_INT_STAT 0x8 /* Frame Interrupt Status */
-
-/* Bit masks for PIXC_RYCON */
-
-#define A11 0x3ff /* A11 in the Coefficient Matrix */
-#define A12 0xffc00 /* A12 in the Coefficient Matrix */
-#define A13 0x3ff00000 /* A13 in the Coefficient Matrix */
-#define RY_MULT4 0x40000000 /* Multiply Row by 4 */
-
-/* Bit masks for PIXC_GUCON */
-
-#define A21 0x3ff /* A21 in the Coefficient Matrix */
-#define A22 0xffc00 /* A22 in the Coefficient Matrix */
-#define A23 0x3ff00000 /* A23 in the Coefficient Matrix */
-#define GU_MULT4 0x40000000 /* Multiply Row by 4 */
-
-/* Bit masks for PIXC_BVCON */
-
-#define A31 0x3ff /* A31 in the Coefficient Matrix */
-#define A32 0xffc00 /* A32 in the Coefficient Matrix */
-#define A33 0x3ff00000 /* A33 in the Coefficient Matrix */
-#define BV_MULT4 0x40000000 /* Multiply Row by 4 */
-
-/* Bit masks for PIXC_CCBIAS */
-
-#define A14 0x3ff /* A14 in the Bias Vector */
-#define A24 0xffc00 /* A24 in the Bias Vector */
-#define A34 0x3ff00000 /* A34 in the Bias Vector */
-
-/* Bit masks for PIXC_TC */
-
-#define RY_TRANS 0xff /* Transparent Color - R/Y Component */
-#define GU_TRANS 0xff00 /* Transparent Color - G/U Component */
-#define BV_TRANS 0xff0000 /* Transparent Color - B/V Component */
-
-/* Bit masks for HOST_CONTROL */
-
-#define HOST_EN 0x1 /* Host Enable */
-#define HOST_END 0x2 /* Host Endianess */
-#define DATA_SIZE 0x4 /* Data Size */
-#define HOST_RST 0x8 /* Host Reset */
-#define HRDY_OVR 0x20 /* Host Ready Override */
-#define INT_MODE 0x40 /* Interrupt Mode */
-#define BT_EN 0x80 /* Bus Timeout Enable */
-#define EHW 0x100 /* Enable Host Write */
-#define EHR 0x200 /* Enable Host Read */
-#define BDR 0x400 /* Burst DMA Requests */
-
-/* Bit masks for HOST_STATUS */
-
-#define DMA_READY 0x1 /* DMA Ready */
-#define FIFOFULL 0x2 /* FIFO Full */
-#define FIFOEMPTY 0x4 /* FIFO Empty */
-#define DMA_COMPLETE 0x8 /* DMA Complete */
-#define HSHK 0x10 /* Host Handshake */
-#define HSTIMEOUT 0x20 /* Host Timeout */
-#define HIRQ 0x40 /* Host Interrupt Request */
-#define ALLOW_CNFG 0x80 /* Allow New Configuration */
-#define DMA_DIR 0x100 /* DMA Direction */
-#define BTE 0x200 /* Bus Timeout Enabled */
-
-/* Bit masks for HOST_TIMEOUT */
-
-#define COUNT_TIMEOUT 0x7ff /* Host Timeout count */
-
-/* Bit masks for KPAD_CTL */
-
-#define KPAD_EN 0x1 /* Keypad Enable */
-#define KPAD_IRQMODE 0x6 /* Key Press Interrupt Enable */
-#define KPAD_ROWEN 0x1c00 /* Row Enable Width */
-#define KPAD_COLEN 0xe000 /* Column Enable Width */
-
-/* Bit masks for KPAD_PRESCALE */
-
-#define KPAD_PRESCALE_VAL 0x3f /* Key Prescale Value */
-
-/* Bit masks for KPAD_MSEL */
-
-#define DBON_SCALE 0xff /* Debounce Scale Value */
-#define COLDRV_SCALE 0xff00 /* Column Driver Scale Value */
-
-/* Bit masks for KPAD_ROWCOL */
-
-#define KPAD_ROW 0xff /* Rows Pressed */
-#define KPAD_COL 0xff00 /* Columns Pressed */
-
-/* Bit masks for KPAD_STAT */
-
-#define KPAD_IRQ 0x1 /* Keypad Interrupt Status */
-#define KPAD_MROWCOL 0x6 /* Multiple Row/Column Keypress Status */
-#define KPAD_PRESSED 0x8 /* Key press current status */
-
-/* Bit masks for KPAD_SOFTEVAL */
-
-#define KPAD_SOFTEVAL_E 0x2 /* Software Programmable Force Evaluate */
-
-/* Bit masks for SDH_COMMAND */
-
-#define CMD_IDX 0x3f /* Command Index */
-#define CMD_RSP 0x40 /* Response */
-#define CMD_L_RSP 0x80 /* Long Response */
-#define CMD_INT_E 0x100 /* Command Interrupt */
-#define CMD_PEND_E 0x200 /* Command Pending */
-#define CMD_E 0x400 /* Command Enable */
-
-/* Bit masks for SDH_PWR_CTL */
-
-#define PWR_ON 0x3 /* Power On */
-#if 0
-#define TBD 0x3c /* TBD */
-#endif
-#define SD_CMD_OD 0x40 /* Open Drain Output */
-#define ROD_CTL 0x80 /* Rod Control */
-
-/* Bit masks for SDH_CLK_CTL */
-
-#define CLKDIV 0xff /* MC_CLK Divisor */
-#define CLK_E 0x100 /* MC_CLK Bus Clock Enable */
-#define PWR_SV_E 0x200 /* Power Save Enable */
-#define CLKDIV_BYPASS 0x400 /* Bypass Divisor */
-#define WIDE_BUS 0x800 /* Wide Bus Mode Enable */
-
-/* Bit masks for SDH_RESP_CMD */
-
-#define RESP_CMD 0x3f /* Response Command */
-
-/* Bit masks for SDH_DATA_CTL */
-
-#define DTX_E 0x1 /* Data Transfer Enable */
-#define DTX_DIR 0x2 /* Data Transfer Direction */
-#define DTX_MODE 0x4 /* Data Transfer Mode */
-#define DTX_DMA_E 0x8 /* Data Transfer DMA Enable */
-#define DTX_BLK_LGTH 0xf0 /* Data Transfer Block Length */
-
-/* Bit masks for SDH_STATUS */
-
-#define CMD_CRC_FAIL 0x1 /* CMD CRC Fail */
-#define DAT_CRC_FAIL 0x2 /* Data CRC Fail */
-#define CMD_TIME_OUT 0x4 /* CMD Time Out */
-#define DAT_TIME_OUT 0x8 /* Data Time Out */
-#define TX_UNDERRUN 0x10 /* Transmit Underrun */
-#define RX_OVERRUN 0x20 /* Receive Overrun */
-#define CMD_RESP_END 0x40 /* CMD Response End */
-#define CMD_SENT 0x80 /* CMD Sent */
-#define DAT_END 0x100 /* Data End */
-#define START_BIT_ERR 0x200 /* Start Bit Error */
-#define DAT_BLK_END 0x400 /* Data Block End */
-#define CMD_ACT 0x800 /* CMD Active */
-#define TX_ACT 0x1000 /* Transmit Active */
-#define RX_ACT 0x2000 /* Receive Active */
-#define TX_FIFO_STAT 0x4000 /* Transmit FIFO Status */
-#define RX_FIFO_STAT 0x8000 /* Receive FIFO Status */
-#define TX_FIFO_FULL 0x10000 /* Transmit FIFO Full */
-#define RX_FIFO_FULL 0x20000 /* Receive FIFO Full */
-#define TX_FIFO_ZERO 0x40000 /* Transmit FIFO Empty */
-#define RX_DAT_ZERO 0x80000 /* Receive FIFO Empty */
-#define TX_DAT_RDY 0x100000 /* Transmit Data Available */
-#define RX_FIFO_RDY 0x200000 /* Receive Data Available */
-
-/* Bit masks for SDH_STATUS_CLR */
-
-#define CMD_CRC_FAIL_STAT 0x1 /* CMD CRC Fail Status */
-#define DAT_CRC_FAIL_STAT 0x2 /* Data CRC Fail Status */
-#define CMD_TIMEOUT_STAT 0x4 /* CMD Time Out Status */
-#define DAT_TIMEOUT_STAT 0x8 /* Data Time Out status */
-#define TX_UNDERRUN_STAT 0x10 /* Transmit Underrun Status */
-#define RX_OVERRUN_STAT 0x20 /* Receive Overrun Status */
-#define CMD_RESP_END_STAT 0x40 /* CMD Response End Status */
-#define CMD_SENT_STAT 0x80 /* CMD Sent Status */
-#define DAT_END_STAT 0x100 /* Data End Status */
-#define START_BIT_ERR_STAT 0x200 /* Start Bit Error Status */
-#define DAT_BLK_END_STAT 0x400 /* Data Block End Status */
-
-/* Bit masks for SDH_MASK0 */
-
-#define CMD_CRC_FAIL_MASK 0x1 /* CMD CRC Fail Mask */
-#define DAT_CRC_FAIL_MASK 0x2 /* Data CRC Fail Mask */
-#define CMD_TIMEOUT_MASK 0x4 /* CMD Time Out Mask */
-#define DAT_TIMEOUT_MASK 0x8 /* Data Time Out Mask */
-#define TX_UNDERRUN_MASK 0x10 /* Transmit Underrun Mask */
-#define RX_OVERRUN_MASK 0x20 /* Receive Overrun Mask */
-#define CMD_RESP_END_MASK 0x40 /* CMD Response End Mask */
-#define CMD_SENT_MASK 0x80 /* CMD Sent Mask */
-#define DAT_END_MASK 0x100 /* Data End Mask */
-#define START_BIT_ERR_MASK 0x200 /* Start Bit Error Mask */
-#define DAT_BLK_END_MASK 0x400 /* Data Block End Mask */
-#define CMD_ACT_MASK 0x800 /* CMD Active Mask */
-#define TX_ACT_MASK 0x1000 /* Transmit Active Mask */
-#define RX_ACT_MASK 0x2000 /* Receive Active Mask */
-#define TX_FIFO_STAT_MASK 0x4000 /* Transmit FIFO Status Mask */
-#define RX_FIFO_STAT_MASK 0x8000 /* Receive FIFO Status Mask */
-#define TX_FIFO_FULL_MASK 0x10000 /* Transmit FIFO Full Mask */
-#define RX_FIFO_FULL_MASK 0x20000 /* Receive FIFO Full Mask */
-#define TX_FIFO_ZERO_MASK 0x40000 /* Transmit FIFO Empty Mask */
-#define RX_DAT_ZERO_MASK 0x80000 /* Receive FIFO Empty Mask */
-#define TX_DAT_RDY_MASK 0x100000 /* Transmit Data Available Mask */
-#define RX_FIFO_RDY_MASK 0x200000 /* Receive Data Available Mask */
-
-/* Bit masks for SDH_FIFO_CNT */
-
-#define FIFO_COUNT 0x7fff /* FIFO Count */
-
-/* Bit masks for SDH_E_STATUS */
-
-#define SDIO_INT_DET 0x2 /* SDIO Int Detected */
-#define SD_CARD_DET 0x10 /* SD Card Detect */
-
-/* Bit masks for SDH_E_MASK */
-
-#define SDIO_MSK 0x2 /* Mask SDIO Int Detected */
-#define SCD_MSK 0x40 /* Mask Card Detect */
-
-/* Bit masks for SDH_CFG */
-
-#define CLKS_EN 0x1 /* Clocks Enable */
-#define SD4E 0x4 /* SDIO 4-Bit Enable */
-#define MWE 0x8 /* Moving Window Enable */
-#define SD_RST 0x10 /* SDMMC Reset */
-#define PUP_SDDAT 0x20 /* Pull-up SD_DAT */
-#define PUP_SDDAT3 0x40 /* Pull-up SD_DAT3 */
-#define PD_SDDAT3 0x80 /* Pull-down SD_DAT3 */
-
-/* Bit masks for SDH_RD_WAIT_EN */
-
-#define RWR 0x1 /* Read Wait Request */
-
-/* Bit masks for ATAPI_CONTROL */
-
-#define PIO_START 0x1 /* Start PIO/Reg Op */
-#define MULTI_START 0x2 /* Start Multi-DMA Op */
-#define ULTRA_START 0x4 /* Start Ultra-DMA Op */
-#define XFER_DIR 0x8 /* Transfer Direction */
-#define IORDY_EN 0x10 /* IORDY Enable */
-#define FIFO_FLUSH 0x20 /* Flush FIFOs */
-#define SOFT_RST 0x40 /* Soft Reset */
-#define DEV_RST 0x80 /* Device Reset */
-#define TFRCNT_RST 0x100 /* Trans Count Reset */
-#define END_ON_TERM 0x200 /* End/Terminate Select */
-#define PIO_USE_DMA 0x400 /* PIO-DMA Enable */
-#define UDMAIN_FIFO_THRS 0xf000 /* Ultra DMA-IN FIFO Threshold */
-
-/* Bit masks for ATAPI_STATUS */
-
-#define PIO_XFER_ON 0x1 /* PIO transfer in progress */
-#define MULTI_XFER_ON 0x2 /* Multi-word DMA transfer in progress */
-#define ULTRA_XFER_ON 0x4 /* Ultra DMA transfer in progress */
-#define ULTRA_IN_FL 0xf0 /* Ultra DMA Input FIFO Level */
-
-/* Bit masks for ATAPI_DEV_ADDR */
-
-#define DEV_ADDR 0x1f /* Device Address */
-
-/* Bit masks for ATAPI_INT_MASK */
-
-#define ATAPI_DEV_INT_MASK 0x1 /* Device interrupt mask */
-#define PIO_DONE_MASK 0x2 /* PIO transfer done interrupt mask */
-#define MULTI_DONE_MASK 0x4 /* Multi-DMA transfer done interrupt mask */
-#define UDMAIN_DONE_MASK 0x8 /* Ultra-DMA in transfer done interrupt mask */
-#define UDMAOUT_DONE_MASK 0x10 /* Ultra-DMA out transfer done interrupt mask */
-#define HOST_TERM_XFER_MASK 0x20 /* Host terminate current transfer interrupt mask */
-#define MULTI_TERM_MASK 0x40 /* Device terminate Multi-DMA transfer interrupt mask */
-#define UDMAIN_TERM_MASK 0x80 /* Device terminate Ultra-DMA-in transfer interrupt mask */
-#define UDMAOUT_TERM_MASK 0x100 /* Device terminate Ultra-DMA-out transfer interrupt mask */
-
-/* Bit masks for ATAPI_INT_STATUS */
-
-#define ATAPI_DEV_INT 0x1 /* Device interrupt status */
-#define PIO_DONE_INT 0x2 /* PIO transfer done interrupt status */
-#define MULTI_DONE_INT 0x4 /* Multi-DMA transfer done interrupt status */
-#define UDMAIN_DONE_INT 0x8 /* Ultra-DMA in transfer done interrupt status */
-#define UDMAOUT_DONE_INT 0x10 /* Ultra-DMA out transfer done interrupt status */
-#define HOST_TERM_XFER_INT 0x20 /* Host terminate current transfer interrupt status */
-#define MULTI_TERM_INT 0x40 /* Device terminate Multi-DMA transfer interrupt status */
-#define UDMAIN_TERM_INT 0x80 /* Device terminate Ultra-DMA-in transfer interrupt status */
-#define UDMAOUT_TERM_INT 0x100 /* Device terminate Ultra-DMA-out transfer interrupt status */
-
-/* Bit masks for ATAPI_LINE_STATUS */
-
-#define ATAPI_INTR 0x1 /* Device interrupt to host line status */
-#define ATAPI_DASP 0x2 /* Device dasp to host line status */
-#define ATAPI_CS0N 0x4 /* ATAPI chip select 0 line status */
-#define ATAPI_CS1N 0x8 /* ATAPI chip select 1 line status */
-#define ATAPI_ADDR 0x70 /* ATAPI address line status */
-#define ATAPI_DMAREQ 0x80 /* ATAPI DMA request line status */
-#define ATAPI_DMAACKN 0x100 /* ATAPI DMA acknowledge line status */
-#define ATAPI_DIOWN 0x200 /* ATAPI write line status */
-#define ATAPI_DIORN 0x400 /* ATAPI read line status */
-#define ATAPI_IORDY 0x800 /* ATAPI IORDY line status */
-
-/* Bit masks for ATAPI_SM_STATE */
-
-#define PIO_CSTATE 0xf /* PIO mode state machine current state */
-#define DMA_CSTATE 0xf0 /* DMA mode state machine current state */
-#define UDMAIN_CSTATE 0xf00 /* Ultra DMA-In mode state machine current state */
-#define UDMAOUT_CSTATE 0xf000 /* ATAPI IORDY line status */
-
-/* Bit masks for ATAPI_TERMINATE */
-
-#define ATAPI_HOST_TERM 0x1 /* Host terminationation */
-
-/* Bit masks for ATAPI_REG_TIM_0 */
-
-#define T2_REG 0xff /* End of cycle time for register access transfers */
-#define TEOC_REG 0xff00 /* Selects DIOR/DIOW pulsewidth */
-
-/* Bit masks for ATAPI_PIO_TIM_0 */
-
-#define T1_REG 0xf /* Time from address valid to DIOR/DIOW */
-#define T2_REG_PIO 0xff0 /* DIOR/DIOW pulsewidth */
-#define T4_REG 0xf000 /* DIOW data hold */
-
-/* Bit masks for ATAPI_PIO_TIM_1 */
-
-#define TEOC_REG_PIO 0xff /* End of cycle time for PIO access transfers. */
-
-/* Bit masks for ATAPI_MULTI_TIM_0 */
-
-#define TD 0xff /* DIOR/DIOW asserted pulsewidth */
-#define TM 0xff00 /* Time from address valid to DIOR/DIOW */
-
-/* Bit masks for ATAPI_MULTI_TIM_1 */
-
-#define TKW 0xff /* Selects DIOW negated pulsewidth */
-#define TKR 0xff00 /* Selects DIOR negated pulsewidth */
-
-/* Bit masks for ATAPI_MULTI_TIM_2 */
-
-#define TH 0xff /* Selects DIOW data hold */
-#define TEOC 0xff00 /* Selects end of cycle for DMA */
-
-/* Bit masks for ATAPI_ULTRA_TIM_0 */
-
-#define TACK 0xff /* Selects setup and hold times for TACK */
-#define TENV 0xff00 /* Selects envelope time */
-
-/* Bit masks for ATAPI_ULTRA_TIM_1 */
-
-#define TDVS 0xff /* Selects data valid setup time */
-#define TCYC_TDVS 0xff00 /* Selects cycle time - TDVS time */
-
-/* Bit masks for ATAPI_ULTRA_TIM_2 */
-
-#define TSS 0xff /* Selects time from STROBE edge to negation of DMARQ or assertion of STOP */
-#define TMLI 0xff00 /* Selects interlock time */
-
-/* Bit masks for ATAPI_ULTRA_TIM_3 */
-
-#define TZAH 0xff /* Selects minimum delay required for output */
-#define READY_PAUSE 0xff00 /* Selects ready to pause */
-
-/* Bit masks for TIMER_ENABLE1 */
-
-#define TIMEN8 0x1 /* Timer 8 Enable */
-#define TIMEN9 0x2 /* Timer 9 Enable */
-#define TIMEN10 0x4 /* Timer 10 Enable */
-
-/* Bit masks for TIMER_DISABLE1 */
-
-#define TIMDIS8 0x1 /* Timer 8 Disable */
-#define TIMDIS9 0x2 /* Timer 9 Disable */
-#define TIMDIS10 0x4 /* Timer 10 Disable */
-
-/* Bit masks for TIMER_STATUS1 */
-
-#define TIMIL8 0x1 /* Timer 8 Interrupt */
-#define TIMIL9 0x2 /* Timer 9 Interrupt */
-#define TIMIL10 0x4 /* Timer 10 Interrupt */
-#define TOVF_ERR8 0x10 /* Timer 8 Counter Overflow */
-#define TOVF_ERR9 0x20 /* Timer 9 Counter Overflow */
-#define TOVF_ERR10 0x40 /* Timer 10 Counter Overflow */
-#define TRUN8 0x1000 /* Timer 8 Slave Enable Status */
-#define TRUN9 0x2000 /* Timer 9 Slave Enable Status */
-#define TRUN10 0x4000 /* Timer 10 Slave Enable Status */
-
-/* Bit masks for EPPI0 are obtained from common base header for EPPIx (EPPI1 and EPPI2) */
-
-/* Bit masks for USB_FADDR */
-
-#define FUNCTION_ADDRESS 0x7f /* Function address */
-
-/* Bit masks for USB_POWER */
-
-#define ENABLE_SUSPENDM 0x1 /* enable SuspendM output */
-#define SUSPEND_MODE 0x2 /* Suspend Mode indicator */
-#define RESUME_MODE 0x4 /* DMA Mode */
-#define RESET 0x8 /* Reset indicator */
-#define HS_MODE 0x10 /* High Speed mode indicator */
-#define HS_ENABLE 0x20 /* high Speed Enable */
-#define SOFT_CONN 0x40 /* Soft connect */
-#define ISO_UPDATE 0x80 /* Isochronous update */
-
-/* Bit masks for USB_INTRTX */
-
-#define EP0_TX 0x1 /* Tx Endpoint 0 interrupt */
-#define EP1_TX 0x2 /* Tx Endpoint 1 interrupt */
-#define EP2_TX 0x4 /* Tx Endpoint 2 interrupt */
-#define EP3_TX 0x8 /* Tx Endpoint 3 interrupt */
-#define EP4_TX 0x10 /* Tx Endpoint 4 interrupt */
-#define EP5_TX 0x20 /* Tx Endpoint 5 interrupt */
-#define EP6_TX 0x40 /* Tx Endpoint 6 interrupt */
-#define EP7_TX 0x80 /* Tx Endpoint 7 interrupt */
-
-/* Bit masks for USB_INTRRX */
-
-#define EP1_RX 0x2 /* Rx Endpoint 1 interrupt */
-#define EP2_RX 0x4 /* Rx Endpoint 2 interrupt */
-#define EP3_RX 0x8 /* Rx Endpoint 3 interrupt */
-#define EP4_RX 0x10 /* Rx Endpoint 4 interrupt */
-#define EP5_RX 0x20 /* Rx Endpoint 5 interrupt */
-#define EP6_RX 0x40 /* Rx Endpoint 6 interrupt */
-#define EP7_RX 0x80 /* Rx Endpoint 7 interrupt */
-
-/* Bit masks for USB_INTRTXE */
-
-#define EP0_TX_E 0x1 /* Endpoint 0 interrupt Enable */
-#define EP1_TX_E 0x2 /* Tx Endpoint 1 interrupt Enable */
-#define EP2_TX_E 0x4 /* Tx Endpoint 2 interrupt Enable */
-#define EP3_TX_E 0x8 /* Tx Endpoint 3 interrupt Enable */
-#define EP4_TX_E 0x10 /* Tx Endpoint 4 interrupt Enable */
-#define EP5_TX_E 0x20 /* Tx Endpoint 5 interrupt Enable */
-#define EP6_TX_E 0x40 /* Tx Endpoint 6 interrupt Enable */
-#define EP7_TX_E 0x80 /* Tx Endpoint 7 interrupt Enable */
-
-/* Bit masks for USB_INTRRXE */
-
-#define EP1_RX_E 0x2 /* Rx Endpoint 1 interrupt Enable */
-#define EP2_RX_E 0x4 /* Rx Endpoint 2 interrupt Enable */
-#define EP3_RX_E 0x8 /* Rx Endpoint 3 interrupt Enable */
-#define EP4_RX_E 0x10 /* Rx Endpoint 4 interrupt Enable */
-#define EP5_RX_E 0x20 /* Rx Endpoint 5 interrupt Enable */
-#define EP6_RX_E 0x40 /* Rx Endpoint 6 interrupt Enable */
-#define EP7_RX_E 0x80 /* Rx Endpoint 7 interrupt Enable */
-
-/* Bit masks for USB_INTRUSB */
-
-#define SUSPEND_B 0x1 /* Suspend indicator */
-#define RESUME_B 0x2 /* Resume indicator */
-#define RESET_OR_BABLE_B 0x4 /* Reset/babble indicator */
-#define SOF_B 0x8 /* Start of frame */
-#define CONN_B 0x10 /* Connection indicator */
-#define DISCON_B 0x20 /* Disconnect indicator */
-#define SESSION_REQ_B 0x40 /* Session Request */
-#define VBUS_ERROR_B 0x80 /* Vbus threshold indicator */
-
-/* Bit masks for USB_INTRUSBE */
-
-#define SUSPEND_BE 0x1 /* Suspend indicator int enable */
-#define RESUME_BE 0x2 /* Resume indicator int enable */
-#define RESET_OR_BABLE_BE 0x4 /* Reset/babble indicator int enable */
-#define SOF_BE 0x8 /* Start of frame int enable */
-#define CONN_BE 0x10 /* Connection indicator int enable */
-#define DISCON_BE 0x20 /* Disconnect indicator int enable */
-#define SESSION_REQ_BE 0x40 /* Session Request int enable */
-#define VBUS_ERROR_BE 0x80 /* Vbus threshold indicator int enable */
-
-/* Bit masks for USB_FRAME */
-
-#define FRAME_NUMBER 0x7ff /* Frame number */
-
-/* Bit masks for USB_INDEX */
-
-#define SELECTED_ENDPOINT 0xf /* selected endpoint */
-
-/* Bit masks for USB_GLOBAL_CTL */
-
-#define GLOBAL_ENA 0x1 /* enables USB module */
-#define EP1_TX_ENA 0x2 /* Transmit endpoint 1 enable */
-#define EP2_TX_ENA 0x4 /* Transmit endpoint 2 enable */
-#define EP3_TX_ENA 0x8 /* Transmit endpoint 3 enable */
-#define EP4_TX_ENA 0x10 /* Transmit endpoint 4 enable */
-#define EP5_TX_ENA 0x20 /* Transmit endpoint 5 enable */
-#define EP6_TX_ENA 0x40 /* Transmit endpoint 6 enable */
-#define EP7_TX_ENA 0x80 /* Transmit endpoint 7 enable */
-#define EP1_RX_ENA 0x100 /* Receive endpoint 1 enable */
-#define EP2_RX_ENA 0x200 /* Receive endpoint 2 enable */
-#define EP3_RX_ENA 0x400 /* Receive endpoint 3 enable */
-#define EP4_RX_ENA 0x800 /* Receive endpoint 4 enable */
-#define EP5_RX_ENA 0x1000 /* Receive endpoint 5 enable */
-#define EP6_RX_ENA 0x2000 /* Receive endpoint 6 enable */
-#define EP7_RX_ENA 0x4000 /* Receive endpoint 7 enable */
-
-/* Bit masks for USB_OTG_DEV_CTL */
-
-#define SESSION 0x1 /* session indicator */
-#define HOST_REQ 0x2 /* Host negotiation request */
-#define HOST_MODE 0x4 /* indicates USBDRC is a host */
-#define VBUS0 0x8 /* Vbus level indicator[0] */
-#define VBUS1 0x10 /* Vbus level indicator[1] */
-#define LSDEV 0x20 /* Low-speed indicator */
-#define FSDEV 0x40 /* Full or High-speed indicator */
-#define B_DEVICE 0x80 /* A' or 'B' device indicator */
-
-/* Bit masks for USB_OTG_VBUS_IRQ */
-
-#define DRIVE_VBUS_ON 0x1 /* indicator to drive VBUS control circuit */
-#define DRIVE_VBUS_OFF 0x2 /* indicator to shut off charge pump */
-#define CHRG_VBUS_START 0x4 /* indicator for external circuit to start charging VBUS */
-#define CHRG_VBUS_END 0x8 /* indicator for external circuit to end charging VBUS */
-#define DISCHRG_VBUS_START 0x10 /* indicator to start discharging VBUS */
-#define DISCHRG_VBUS_END 0x20 /* indicator to stop discharging VBUS */
-
-/* Bit masks for USB_OTG_VBUS_MASK */
-
-#define DRIVE_VBUS_ON_ENA 0x1 /* enable DRIVE_VBUS_ON interrupt */
-#define DRIVE_VBUS_OFF_ENA 0x2 /* enable DRIVE_VBUS_OFF interrupt */
-#define CHRG_VBUS_START_ENA 0x4 /* enable CHRG_VBUS_START interrupt */
-#define CHRG_VBUS_END_ENA 0x8 /* enable CHRG_VBUS_END interrupt */
-#define DISCHRG_VBUS_START_ENA 0x10 /* enable DISCHRG_VBUS_START interrupt */
-#define DISCHRG_VBUS_END_ENA 0x20 /* enable DISCHRG_VBUS_END interrupt */
-
-/* Bit masks for USB_CSR0 */
-
-#define RXPKTRDY 0x1 /* data packet receive indicator */
-#define TXPKTRDY 0x2 /* data packet in FIFO indicator */
-#define STALL_SENT 0x4 /* STALL handshake sent */
-#define DATAEND 0x8 /* Data end indicator */
-#define SETUPEND 0x10 /* Setup end */
-#define SENDSTALL 0x20 /* Send STALL handshake */
-#define SERVICED_RXPKTRDY 0x40 /* used to clear the RxPktRdy bit */
-#define SERVICED_SETUPEND 0x80 /* used to clear the SetupEnd bit */
-#define FLUSHFIFO 0x100 /* flush endpoint FIFO */
-#define STALL_RECEIVED_H 0x4 /* STALL handshake received host mode */
-#define SETUPPKT_H 0x8 /* send Setup token host mode */
-#define ERROR_H 0x10 /* timeout error indicator host mode */
-#define REQPKT_H 0x20 /* Request an IN transaction host mode */
-#define STATUSPKT_H 0x40 /* Status stage transaction host mode */
-#define NAK_TIMEOUT_H 0x80 /* EP0 halted after a NAK host mode */
-
-/* Bit masks for USB_COUNT0 */
-
-#define EP0_RX_COUNT 0x7f /* number of received bytes in EP0 FIFO */
-
-/* Bit masks for USB_NAKLIMIT0 */
-
-#define EP0_NAK_LIMIT 0x1f /* number of frames/micro frames after which EP0 timeouts */
-
-/* Bit masks for USB_TX_MAX_PACKET */
-
-#define MAX_PACKET_SIZE_T 0x7ff /* maximum data pay load in a frame */
-
-/* Bit masks for USB_RX_MAX_PACKET */
-
-#define MAX_PACKET_SIZE_R 0x7ff /* maximum data pay load in a frame */
-
-/* Bit masks for USB_TXCSR */
-
-#define TXPKTRDY_T 0x1 /* data packet in FIFO indicator */
-#define FIFO_NOT_EMPTY_T 0x2 /* FIFO not empty */
-#define UNDERRUN_T 0x4 /* TxPktRdy not set for an IN token */
-#define FLUSHFIFO_T 0x8 /* flush endpoint FIFO */
-#define STALL_SEND_T 0x10 /* issue a Stall handshake */
-#define STALL_SENT_T 0x20 /* Stall handshake transmitted */
-#define CLEAR_DATATOGGLE_T 0x40 /* clear endpoint data toggle */
-#define INCOMPTX_T 0x80 /* indicates that a large packet is split */
-#define DMAREQMODE_T 0x400 /* DMA mode (0 or 1) selection */
-#define FORCE_DATATOGGLE_T 0x800 /* Force data toggle */
-#define DMAREQ_ENA_T 0x1000 /* Enable DMA request for Tx EP */
-#define ISO_T 0x4000 /* enable Isochronous transfers */
-#define AUTOSET_T 0x8000 /* allows TxPktRdy to be set automatically */
-#define ERROR_TH 0x4 /* error condition host mode */
-#define STALL_RECEIVED_TH 0x20 /* Stall handshake received host mode */
-#define NAK_TIMEOUT_TH 0x80 /* NAK timeout host mode */
-
-/* Bit masks for USB_TXCOUNT */
-
-#define TX_COUNT 0x1fff /* Number of bytes to be written to the selected endpoint Tx FIFO */
-
-/* Bit masks for USB_RXCSR */
-
-#define RXPKTRDY_R 0x1 /* data packet in FIFO indicator */
-#define FIFO_FULL_R 0x2 /* FIFO not empty */
-#define OVERRUN_R 0x4 /* TxPktRdy not set for an IN token */
-#define DATAERROR_R 0x8 /* Out packet cannot be loaded into Rx FIFO */
-#define FLUSHFIFO_R 0x10 /* flush endpoint FIFO */
-#define STALL_SEND_R 0x20 /* issue a Stall handshake */
-#define STALL_SENT_R 0x40 /* Stall handshake transmitted */
-#define CLEAR_DATATOGGLE_R 0x80 /* clear endpoint data toggle */
-#define INCOMPRX_R 0x100 /* indicates that a large packet is split */
-#define DMAREQMODE_R 0x800 /* DMA mode (0 or 1) selection */
-#define DISNYET_R 0x1000 /* disable Nyet handshakes */
-#define DMAREQ_ENA_R 0x2000 /* Enable DMA request for Tx EP */
-#define ISO_R 0x4000 /* enable Isochronous transfers */
-#define AUTOCLEAR_R 0x8000 /* allows TxPktRdy to be set automatically */
-#define ERROR_RH 0x4 /* TxPktRdy not set for an IN token host mode */
-#define REQPKT_RH 0x20 /* request an IN transaction host mode */
-#define STALL_RECEIVED_RH 0x40 /* Stall handshake received host mode */
-#define INCOMPRX_RH 0x100 /* indicates that a large packet is split host mode */
-#define DMAREQMODE_RH 0x800 /* DMA mode (0 or 1) selection host mode */
-#define AUTOREQ_RH 0x4000 /* sets ReqPkt automatically host mode */
-
-/* Bit masks for USB_RXCOUNT */
-
-#define RX_COUNT 0x1fff /* Number of received bytes in the packet in the Rx FIFO */
-
-/* Bit masks for USB_TXTYPE */
-
-#define TARGET_EP_NO_T 0xf /* EP number */
-#define PROTOCOL_T 0xc /* transfer type */
-
-/* Bit masks for USB_TXINTERVAL */
-
-#define TX_POLL_INTERVAL 0xff /* polling interval for selected Tx EP */
-
-/* Bit masks for USB_RXTYPE */
-
-#define TARGET_EP_NO_R 0xf /* EP number */
-#define PROTOCOL_R 0xc /* transfer type */
-
-/* Bit masks for USB_RXINTERVAL */
-
-#define RX_POLL_INTERVAL 0xff /* polling interval for selected Rx EP */
-
-/* Bit masks for USB_DMA_INTERRUPT */
-
-#define DMA0_INT 0x1 /* DMA0 pending interrupt */
-#define DMA1_INT 0x2 /* DMA1 pending interrupt */
-#define DMA2_INT 0x4 /* DMA2 pending interrupt */
-#define DMA3_INT 0x8 /* DMA3 pending interrupt */
-#define DMA4_INT 0x10 /* DMA4 pending interrupt */
-#define DMA5_INT 0x20 /* DMA5 pending interrupt */
-#define DMA6_INT 0x40 /* DMA6 pending interrupt */
-#define DMA7_INT 0x80 /* DMA7 pending interrupt */
-
-/* Bit masks for USB_DMAxCONTROL */
-
-#define DMA_ENA 0x1 /* DMA enable */
-#define DIRECTION 0x2 /* direction of DMA transfer */
-#define MODE 0x4 /* DMA Bus error */
-#define INT_ENA 0x8 /* Interrupt enable */
-#define EPNUM 0xf0 /* EP number */
-#define BUSERROR 0x100 /* DMA Bus error */
-
-/* Bit masks for USB_DMAxADDRHIGH */
-
-#define DMA_ADDR_HIGH 0xffff /* Upper 16-bits of memory source/destination address for the DMA master channel */
-
-/* Bit masks for USB_DMAxADDRLOW */
-
-#define DMA_ADDR_LOW 0xffff /* Lower 16-bits of memory source/destination address for the DMA master channel */
-
-/* Bit masks for USB_DMAxCOUNTHIGH */
-
-#define DMA_COUNT_HIGH 0xffff /* Upper 16-bits of byte count of DMA transfer for DMA master channel */
-
-/* Bit masks for USB_DMAxCOUNTLOW */
-
-#define DMA_COUNT_LOW 0xffff /* Lower 16-bits of byte count of DMA transfer for DMA master channel */
-
-/* Bit masks for HMDMAx_CONTROL */
-
-#define HMDMAEN 0x1 /* Handshake MDMA Enable */
-#define REP 0x2 /* Handshake MDMA Request Polarity */
-#define UTE 0x8 /* Urgency Threshold Enable */
-#define OIE 0x10 /* Overflow Interrupt Enable */
-#define BDIE 0x20 /* Block Done Interrupt Enable */
-#define MBDI 0x40 /* Mask Block Done Interrupt */
-#define DRQ 0x300 /* Handshake MDMA Request Type */
-#define RBC 0x1000 /* Force Reload of BCOUNT */
-#define PS 0x2000 /* Pin Status */
-#define OI 0x4000 /* Overflow Interrupt Generated */
-#define BDI 0x8000 /* Block Done Interrupt Generated */
-
-/* ******************************************* */
-/* MULTI BIT MACRO ENUMERATIONS */
-/* ******************************************* */
-
-
#endif /* _DEF_BF548_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF549.h b/arch/blackfin/mach-bf548/include/mach/defBF549.h
index f7f043560c6..5a04e6d4017 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF549.h
@@ -10,121 +10,13 @@
/* Include all Core registers and bit definitions */
#include <asm/def_LPBlackfin.h>
-
/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
#include "defBF54x_base.h"
-/* The following are the #defines needed by ADSP-BF549 that are not in the common header */
-
-/* Timer Registers */
-
-#define TIMER8_CONFIG 0xffc00600 /* Timer 8 Configuration Register */
-#define TIMER8_COUNTER 0xffc00604 /* Timer 8 Counter Register */
-#define TIMER8_PERIOD 0xffc00608 /* Timer 8 Period Register */
-#define TIMER8_WIDTH 0xffc0060c /* Timer 8 Width Register */
-#define TIMER9_CONFIG 0xffc00610 /* Timer 9 Configuration Register */
-#define TIMER9_COUNTER 0xffc00614 /* Timer 9 Counter Register */
-#define TIMER9_PERIOD 0xffc00618 /* Timer 9 Period Register */
-#define TIMER9_WIDTH 0xffc0061c /* Timer 9 Width Register */
-#define TIMER10_CONFIG 0xffc00620 /* Timer 10 Configuration Register */
-#define TIMER10_COUNTER 0xffc00624 /* Timer 10 Counter Register */
-#define TIMER10_PERIOD 0xffc00628 /* Timer 10 Period Register */
-#define TIMER10_WIDTH 0xffc0062c /* Timer 10 Width Register */
-
-/* Timer Group of 3 Registers */
-
-#define TIMER_ENABLE1 0xffc00640 /* Timer Group of 3 Enable Register */
-#define TIMER_DISABLE1 0xffc00644 /* Timer Group of 3 Disable Register */
-#define TIMER_STATUS1 0xffc00648 /* Timer Group of 3 Status Register */
-
-/* SPORT0 Registers */
-
-#define SPORT0_TCR1 0xffc00800 /* SPORT0 Transmit Configuration 1 Register */
-#define SPORT0_TCR2 0xffc00804 /* SPORT0 Transmit Configuration 2 Register */
-#define SPORT0_TCLKDIV 0xffc00808 /* SPORT0 Transmit Serial Clock Divider Register */
-#define SPORT0_TFSDIV 0xffc0080c /* SPORT0 Transmit Frame Sync Divider Register */
-#define SPORT0_TX 0xffc00810 /* SPORT0 Transmit Data Register */
-#define SPORT0_RX 0xffc00818 /* SPORT0 Receive Data Register */
-#define SPORT0_RCR1 0xffc00820 /* SPORT0 Receive Configuration 1 Register */
-#define SPORT0_RCR2 0xffc00824 /* SPORT0 Receive Configuration 2 Register */
-#define SPORT0_RCLKDIV 0xffc00828 /* SPORT0 Receive Serial Clock Divider Register */
-#define SPORT0_RFSDIV 0xffc0082c /* SPORT0 Receive Frame Sync Divider Register */
-#define SPORT0_STAT 0xffc00830 /* SPORT0 Status Register */
-#define SPORT0_CHNL 0xffc00834 /* SPORT0 Current Channel Register */
-#define SPORT0_MCMC1 0xffc00838 /* SPORT0 Multi channel Configuration Register 1 */
-#define SPORT0_MCMC2 0xffc0083c /* SPORT0 Multi channel Configuration Register 2 */
-#define SPORT0_MTCS0 0xffc00840 /* SPORT0 Multi channel Transmit Select Register 0 */
-#define SPORT0_MTCS1 0xffc00844 /* SPORT0 Multi channel Transmit Select Register 1 */
-#define SPORT0_MTCS2 0xffc00848 /* SPORT0 Multi channel Transmit Select Register 2 */
-#define SPORT0_MTCS3 0xffc0084c /* SPORT0 Multi channel Transmit Select Register 3 */
-#define SPORT0_MRCS0 0xffc00850 /* SPORT0 Multi channel Receive Select Register 0 */
-#define SPORT0_MRCS1 0xffc00854 /* SPORT0 Multi channel Receive Select Register 1 */
-#define SPORT0_MRCS2 0xffc00858 /* SPORT0 Multi channel Receive Select Register 2 */
-#define SPORT0_MRCS3 0xffc0085c /* SPORT0 Multi channel Receive Select Register 3 */
-
-/* EPPI0 Registers */
-
-#define EPPI0_STATUS 0xffc01000 /* EPPI0 Status Register */
-#define EPPI0_HCOUNT 0xffc01004 /* EPPI0 Horizontal Transfer Count Register */
-#define EPPI0_HDELAY 0xffc01008 /* EPPI0 Horizontal Delay Count Register */
-#define EPPI0_VCOUNT 0xffc0100c /* EPPI0 Vertical Transfer Count Register */
-#define EPPI0_VDELAY 0xffc01010 /* EPPI0 Vertical Delay Count Register */
-#define EPPI0_FRAME 0xffc01014 /* EPPI0 Lines per Frame Register */
-#define EPPI0_LINE 0xffc01018 /* EPPI0 Samples per Line Register */
-#define EPPI0_CLKDIV 0xffc0101c /* EPPI0 Clock Divide Register */
-#define EPPI0_CONTROL 0xffc01020 /* EPPI0 Control Register */
-#define EPPI0_FS1W_HBL 0xffc01024 /* EPPI0 FS1 Width Register / EPPI0 Horizontal Blanking Samples Per Line Register */
-#define EPPI0_FS1P_AVPL 0xffc01028 /* EPPI0 FS1 Period Register / EPPI0 Active Video Samples Per Line Register */
-#define EPPI0_FS2W_LVB 0xffc0102c /* EPPI0 FS2 Width Register / EPPI0 Lines of Vertical Blanking Register */
-#define EPPI0_FS2P_LAVF 0xffc01030 /* EPPI0 FS2 Period Register/ EPPI0 Lines of Active Video Per Field Register */
-#define EPPI0_CLIP 0xffc01034 /* EPPI0 Clipping Register */
-
-/* UART2 Registers */
-
-#define UART2_DLL 0xffc02100 /* Divisor Latch Low Byte */
-#define UART2_DLH 0xffc02104 /* Divisor Latch High Byte */
-#define UART2_GCTL 0xffc02108 /* Global Control Register */
-#define UART2_LCR 0xffc0210c /* Line Control Register */
-#define UART2_MCR 0xffc02110 /* Modem Control Register */
-#define UART2_LSR 0xffc02114 /* Line Status Register */
-#define UART2_MSR 0xffc02118 /* Modem Status Register */
-#define UART2_SCR 0xffc0211c /* Scratch Register */
-#define UART2_IER_SET 0xffc02120 /* Interrupt Enable Register Set */
-#define UART2_IER_CLEAR 0xffc02124 /* Interrupt Enable Register Clear */
-#define UART2_RBR 0xffc0212c /* Receive Buffer Register */
-
-/* Two Wire Interface Registers (TWI1) */
-
-#define TWI1_REGBASE 0xffc02200
-#define TWI1_CLKDIV 0xffc02200 /* Clock Divider Register */
-#define TWI1_CONTROL 0xffc02204 /* TWI Control Register */
-#define TWI1_SLAVE_CTRL 0xffc02208 /* TWI Slave Mode Control Register */
-#define TWI1_SLAVE_STAT 0xffc0220c /* TWI Slave Mode Status Register */
-#define TWI1_SLAVE_ADDR 0xffc02210 /* TWI Slave Mode Address Register */
-#define TWI1_MASTER_CTRL 0xffc02214 /* TWI Master Mode Control Register */
-#define TWI1_MASTER_STAT 0xffc02218 /* TWI Master Mode Status Register */
-#define TWI1_MASTER_ADDR 0xffc0221c /* TWI Master Mode Address Register */
-#define TWI1_INT_STAT 0xffc02220 /* TWI Interrupt Status Register */
-#define TWI1_INT_MASK 0xffc02224 /* TWI Interrupt Mask Register */
-#define TWI1_FIFO_CTRL 0xffc02228 /* TWI FIFO Control Register */
-#define TWI1_FIFO_STAT 0xffc0222c /* TWI FIFO Status Register */
-#define TWI1_XMT_DATA8 0xffc02280 /* TWI FIFO Transmit Data Single Byte Register */
-#define TWI1_XMT_DATA16 0xffc02284 /* TWI FIFO Transmit Data Double Byte Register */
-#define TWI1_RCV_DATA8 0xffc02288 /* TWI FIFO Receive Data Single Byte Register */
-#define TWI1_RCV_DATA16 0xffc0228c /* TWI FIFO Receive Data Double Byte Register */
-
-/* SPI2 Registers */
-
-#define SPI2_REGBASE 0xffc02400
-#define SPI2_CTL 0xffc02400 /* SPI2 Control Register */
-#define SPI2_FLG 0xffc02404 /* SPI2 Flag Register */
-#define SPI2_STAT 0xffc02408 /* SPI2 Status Register */
-#define SPI2_TDBR 0xffc0240c /* SPI2 Transmit Data Buffer Register */
-#define SPI2_RDBR 0xffc02410 /* SPI2 Receive Data Buffer Register */
-#define SPI2_BAUD 0xffc02414 /* SPI2 Baud Rate Register */
-#define SPI2_SHADOW 0xffc02418 /* SPI2 Receive Data Buffer Shadow Register */
+/* The BF549 is like the BF544, but has MXVR */
+#include "defBF547.h"
/* MXVR Registers */
@@ -296,2418 +188,4 @@
#define MXVR_PIN_CTL 0xffc028dc /* MXVR Pin Control Register */
#define MXVR_SCLK_CNT 0xffc028e0 /* MXVR System Clock Counter Register */
-/* CAN Controller 1 Config 1 Registers */
-
-#define CAN1_MC1 0xffc03200 /* CAN Controller 1 Mailbox Configuration Register 1 */
-#define CAN1_MD1 0xffc03204 /* CAN Controller 1 Mailbox Direction Register 1 */
-#define CAN1_TRS1 0xffc03208 /* CAN Controller 1 Transmit Request Set Register 1 */
-#define CAN1_TRR1 0xffc0320c /* CAN Controller 1 Transmit Request Reset Register 1 */
-#define CAN1_TA1 0xffc03210 /* CAN Controller 1 Transmit Acknowledge Register 1 */
-#define CAN1_AA1 0xffc03214 /* CAN Controller 1 Abort Acknowledge Register 1 */
-#define CAN1_RMP1 0xffc03218 /* CAN Controller 1 Receive Message Pending Register 1 */
-#define CAN1_RML1 0xffc0321c /* CAN Controller 1 Receive Message Lost Register 1 */
-#define CAN1_MBTIF1 0xffc03220 /* CAN Controller 1 Mailbox Transmit Interrupt Flag Register 1 */
-#define CAN1_MBRIF1 0xffc03224 /* CAN Controller 1 Mailbox Receive Interrupt Flag Register 1 */
-#define CAN1_MBIM1 0xffc03228 /* CAN Controller 1 Mailbox Interrupt Mask Register 1 */
-#define CAN1_RFH1 0xffc0322c /* CAN Controller 1 Remote Frame Handling Enable Register 1 */
-#define CAN1_OPSS1 0xffc03230 /* CAN Controller 1 Overwrite Protection Single Shot Transmit Register 1 */
-
-/* CAN Controller 1 Config 2 Registers */
-
-#define CAN1_MC2 0xffc03240 /* CAN Controller 1 Mailbox Configuration Register 2 */
-#define CAN1_MD2 0xffc03244 /* CAN Controller 1 Mailbox Direction Register 2 */
-#define CAN1_TRS2 0xffc03248 /* CAN Controller 1 Transmit Request Set Register 2 */
-#define CAN1_TRR2 0xffc0324c /* CAN Controller 1 Transmit Request Reset Register 2 */
-#define CAN1_TA2 0xffc03250 /* CAN Controller 1 Transmit Acknowledge Register 2 */
-#define CAN1_AA2 0xffc03254 /* CAN Controller 1 Abort Acknowledge Register 2 */
-#define CAN1_RMP2 0xffc03258 /* CAN Controller 1 Receive Message Pending Register 2 */
-#define CAN1_RML2 0xffc0325c /* CAN Controller 1 Receive Message Lost Register 2 */
-#define CAN1_MBTIF2 0xffc03260 /* CAN Controller 1 Mailbox Transmit Interrupt Flag Register 2 */
-#define CAN1_MBRIF2 0xffc03264 /* CAN Controller 1 Mailbox Receive Interrupt Flag Register 2 */
-#define CAN1_MBIM2 0xffc03268 /* CAN Controller 1 Mailbox Interrupt Mask Register 2 */
-#define CAN1_RFH2 0xffc0326c /* CAN Controller 1 Remote Frame Handling Enable Register 2 */
-#define CAN1_OPSS2 0xffc03270 /* CAN Controller 1 Overwrite Protection Single Shot Transmit Register 2 */
-
-/* CAN Controller 1 Clock/Interrupt/Counter Registers */
-
-#define CAN1_CLOCK 0xffc03280 /* CAN Controller 1 Clock Register */
-#define CAN1_TIMING 0xffc03284 /* CAN Controller 1 Timing Register */
-#define CAN1_DEBUG 0xffc03288 /* CAN Controller 1 Debug Register */
-#define CAN1_STATUS 0xffc0328c /* CAN Controller 1 Global Status Register */
-#define CAN1_CEC 0xffc03290 /* CAN Controller 1 Error Counter Register */
-#define CAN1_GIS 0xffc03294 /* CAN Controller 1 Global Interrupt Status Register */
-#define CAN1_GIM 0xffc03298 /* CAN Controller 1 Global Interrupt Mask Register */
-#define CAN1_GIF 0xffc0329c /* CAN Controller 1 Global Interrupt Flag Register */
-#define CAN1_CONTROL 0xffc032a0 /* CAN Controller 1 Master Control Register */
-#define CAN1_INTR 0xffc032a4 /* CAN Controller 1 Interrupt Pending Register */
-#define CAN1_MBTD 0xffc032ac /* CAN Controller 1 Mailbox Temporary Disable Register */
-#define CAN1_EWR 0xffc032b0 /* CAN Controller 1 Programmable Warning Level Register */
-#define CAN1_ESR 0xffc032b4 /* CAN Controller 1 Error Status Register */
-#define CAN1_UCCNT 0xffc032c4 /* CAN Controller 1 Universal Counter Register */
-#define CAN1_UCRC 0xffc032c8 /* CAN Controller 1 Universal Counter Force Reload Register */
-#define CAN1_UCCNF 0xffc032cc /* CAN Controller 1 Universal Counter Configuration Register */
-
-/* CAN Controller 1 Mailbox Acceptance Registers */
-
-#define CAN1_AM00L 0xffc03300 /* CAN Controller 1 Mailbox 0 Acceptance Mask High Register */
-#define CAN1_AM00H 0xffc03304 /* CAN Controller 1 Mailbox 0 Acceptance Mask Low Register */
-#define CAN1_AM01L 0xffc03308 /* CAN Controller 1 Mailbox 1 Acceptance Mask High Register */
-#define CAN1_AM01H 0xffc0330c /* CAN Controller 1 Mailbox 1 Acceptance Mask Low Register */
-#define CAN1_AM02L 0xffc03310 /* CAN Controller 1 Mailbox 2 Acceptance Mask High Register */
-#define CAN1_AM02H 0xffc03314 /* CAN Controller 1 Mailbox 2 Acceptance Mask Low Register */
-#define CAN1_AM03L 0xffc03318 /* CAN Controller 1 Mailbox 3 Acceptance Mask High Register */
-#define CAN1_AM03H 0xffc0331c /* CAN Controller 1 Mailbox 3 Acceptance Mask Low Register */
-#define CAN1_AM04L 0xffc03320 /* CAN Controller 1 Mailbox 4 Acceptance Mask High Register */
-#define CAN1_AM04H 0xffc03324 /* CAN Controller 1 Mailbox 4 Acceptance Mask Low Register */
-#define CAN1_AM05L 0xffc03328 /* CAN Controller 1 Mailbox 5 Acceptance Mask High Register */
-#define CAN1_AM05H 0xffc0332c /* CAN Controller 1 Mailbox 5 Acceptance Mask Low Register */
-#define CAN1_AM06L 0xffc03330 /* CAN Controller 1 Mailbox 6 Acceptance Mask High Register */
-#define CAN1_AM06H 0xffc03334 /* CAN Controller 1 Mailbox 6 Acceptance Mask Low Register */
-#define CAN1_AM07L 0xffc03338 /* CAN Controller 1 Mailbox 7 Acceptance Mask High Register */
-#define CAN1_AM07H 0xffc0333c /* CAN Controller 1 Mailbox 7 Acceptance Mask Low Register */
-#define CAN1_AM08L 0xffc03340 /* CAN Controller 1 Mailbox 8 Acceptance Mask High Register */
-#define CAN1_AM08H 0xffc03344 /* CAN Controller 1 Mailbox 8 Acceptance Mask Low Register */
-#define CAN1_AM09L 0xffc03348 /* CAN Controller 1 Mailbox 9 Acceptance Mask High Register */
-#define CAN1_AM09H 0xffc0334c /* CAN Controller 1 Mailbox 9 Acceptance Mask Low Register */
-#define CAN1_AM10L 0xffc03350 /* CAN Controller 1 Mailbox 10 Acceptance Mask High Register */
-#define CAN1_AM10H 0xffc03354 /* CAN Controller 1 Mailbox 10 Acceptance Mask Low Register */
-#define CAN1_AM11L 0xffc03358 /* CAN Controller 1 Mailbox 11 Acceptance Mask High Register */
-#define CAN1_AM11H 0xffc0335c /* CAN Controller 1 Mailbox 11 Acceptance Mask Low Register */
-#define CAN1_AM12L 0xffc03360 /* CAN Controller 1 Mailbox 12 Acceptance Mask High Register */
-#define CAN1_AM12H 0xffc03364 /* CAN Controller 1 Mailbox 12 Acceptance Mask Low Register */
-#define CAN1_AM13L 0xffc03368 /* CAN Controller 1 Mailbox 13 Acceptance Mask High Register */
-#define CAN1_AM13H 0xffc0336c /* CAN Controller 1 Mailbox 13 Acceptance Mask Low Register */
-#define CAN1_AM14L 0xffc03370 /* CAN Controller 1 Mailbox 14 Acceptance Mask High Register */
-#define CAN1_AM14H 0xffc03374 /* CAN Controller 1 Mailbox 14 Acceptance Mask Low Register */
-#define CAN1_AM15L 0xffc03378 /* CAN Controller 1 Mailbox 15 Acceptance Mask High Register */
-#define CAN1_AM15H 0xffc0337c /* CAN Controller 1 Mailbox 15 Acceptance Mask Low Register */
-
-/* CAN Controller 1 Mailbox Acceptance Registers */
-
-#define CAN1_AM16L 0xffc03380 /* CAN Controller 1 Mailbox 16 Acceptance Mask High Register */
-#define CAN1_AM16H 0xffc03384 /* CAN Controller 1 Mailbox 16 Acceptance Mask Low Register */
-#define CAN1_AM17L 0xffc03388 /* CAN Controller 1 Mailbox 17 Acceptance Mask High Register */
-#define CAN1_AM17H 0xffc0338c /* CAN Controller 1 Mailbox 17 Acceptance Mask Low Register */
-#define CAN1_AM18L 0xffc03390 /* CAN Controller 1 Mailbox 18 Acceptance Mask High Register */
-#define CAN1_AM18H 0xffc03394 /* CAN Controller 1 Mailbox 18 Acceptance Mask Low Register */
-#define CAN1_AM19L 0xffc03398 /* CAN Controller 1 Mailbox 19 Acceptance Mask High Register */
-#define CAN1_AM19H 0xffc0339c /* CAN Controller 1 Mailbox 19 Acceptance Mask Low Register */
-#define CAN1_AM20L 0xffc033a0 /* CAN Controller 1 Mailbox 20 Acceptance Mask High Register */
-#define CAN1_AM20H 0xffc033a4 /* CAN Controller 1 Mailbox 20 Acceptance Mask Low Register */
-#define CAN1_AM21L 0xffc033a8 /* CAN Controller 1 Mailbox 21 Acceptance Mask High Register */
-#define CAN1_AM21H 0xffc033ac /* CAN Controller 1 Mailbox 21 Acceptance Mask Low Register */
-#define CAN1_AM22L 0xffc033b0 /* CAN Controller 1 Mailbox 22 Acceptance Mask High Register */
-#define CAN1_AM22H 0xffc033b4 /* CAN Controller 1 Mailbox 22 Acceptance Mask Low Register */
-#define CAN1_AM23L 0xffc033b8 /* CAN Controller 1 Mailbox 23 Acceptance Mask High Register */
-#define CAN1_AM23H 0xffc033bc /* CAN Controller 1 Mailbox 23 Acceptance Mask Low Register */
-#define CAN1_AM24L 0xffc033c0 /* CAN Controller 1 Mailbox 24 Acceptance Mask High Register */
-#define CAN1_AM24H 0xffc033c4 /* CAN Controller 1 Mailbox 24 Acceptance Mask Low Register */
-#define CAN1_AM25L 0xffc033c8 /* CAN Controller 1 Mailbox 25 Acceptance Mask High Register */
-#define CAN1_AM25H 0xffc033cc /* CAN Controller 1 Mailbox 25 Acceptance Mask Low Register */
-#define CAN1_AM26L 0xffc033d0 /* CAN Controller 1 Mailbox 26 Acceptance Mask High Register */
-#define CAN1_AM26H 0xffc033d4 /* CAN Controller 1 Mailbox 26 Acceptance Mask Low Register */
-#define CAN1_AM27L 0xffc033d8 /* CAN Controller 1 Mailbox 27 Acceptance Mask High Register */
-#define CAN1_AM27H 0xffc033dc /* CAN Controller 1 Mailbox 27 Acceptance Mask Low Register */
-#define CAN1_AM28L 0xffc033e0 /* CAN Controller 1 Mailbox 28 Acceptance Mask High Register */
-#define CAN1_AM28H 0xffc033e4 /* CAN Controller 1 Mailbox 28 Acceptance Mask Low Register */
-#define CAN1_AM29L 0xffc033e8 /* CAN Controller 1 Mailbox 29 Acceptance Mask High Register */
-#define CAN1_AM29H 0xffc033ec /* CAN Controller 1 Mailbox 29 Acceptance Mask Low Register */
-#define CAN1_AM30L 0xffc033f0 /* CAN Controller 1 Mailbox 30 Acceptance Mask High Register */
-#define CAN1_AM30H 0xffc033f4 /* CAN Controller 1 Mailbox 30 Acceptance Mask Low Register */
-#define CAN1_AM31L 0xffc033f8 /* CAN Controller 1 Mailbox 31 Acceptance Mask High Register */
-#define CAN1_AM31H 0xffc033fc /* CAN Controller 1 Mailbox 31 Acceptance Mask Low Register */
-
-/* CAN Controller 1 Mailbox Data Registers */
-
-#define CAN1_MB00_DATA0 0xffc03400 /* CAN Controller 1 Mailbox 0 Data 0 Register */
-#define CAN1_MB00_DATA1 0xffc03404 /* CAN Controller 1 Mailbox 0 Data 1 Register */
-#define CAN1_MB00_DATA2 0xffc03408 /* CAN Controller 1 Mailbox 0 Data 2 Register */
-#define CAN1_MB00_DATA3 0xffc0340c /* CAN Controller 1 Mailbox 0 Data 3 Register */
-#define CAN1_MB00_LENGTH 0xffc03410 /* CAN Controller 1 Mailbox 0 Length Register */
-#define CAN1_MB00_TIMESTAMP 0xffc03414 /* CAN Controller 1 Mailbox 0 Timestamp Register */
-#define CAN1_MB00_ID0 0xffc03418 /* CAN Controller 1 Mailbox 0 ID0 Register */
-#define CAN1_MB00_ID1 0xffc0341c /* CAN Controller 1 Mailbox 0 ID1 Register */
-#define CAN1_MB01_DATA0 0xffc03420 /* CAN Controller 1 Mailbox 1 Data 0 Register */
-#define CAN1_MB01_DATA1 0xffc03424 /* CAN Controller 1 Mailbox 1 Data 1 Register */
-#define CAN1_MB01_DATA2 0xffc03428 /* CAN Controller 1 Mailbox 1 Data 2 Register */
-#define CAN1_MB01_DATA3 0xffc0342c /* CAN Controller 1 Mailbox 1 Data 3 Register */
-#define CAN1_MB01_LENGTH 0xffc03430 /* CAN Controller 1 Mailbox 1 Length Register */
-#define CAN1_MB01_TIMESTAMP 0xffc03434 /* CAN Controller 1 Mailbox 1 Timestamp Register */
-#define CAN1_MB01_ID0 0xffc03438 /* CAN Controller 1 Mailbox 1 ID0 Register */
-#define CAN1_MB01_ID1 0xffc0343c /* CAN Controller 1 Mailbox 1 ID1 Register */
-#define CAN1_MB02_DATA0 0xffc03440 /* CAN Controller 1 Mailbox 2 Data 0 Register */
-#define CAN1_MB02_DATA1 0xffc03444 /* CAN Controller 1 Mailbox 2 Data 1 Register */
-#define CAN1_MB02_DATA2 0xffc03448 /* CAN Controller 1 Mailbox 2 Data 2 Register */
-#define CAN1_MB02_DATA3 0xffc0344c /* CAN Controller 1 Mailbox 2 Data 3 Register */
-#define CAN1_MB02_LENGTH 0xffc03450 /* CAN Controller 1 Mailbox 2 Length Register */
-#define CAN1_MB02_TIMESTAMP 0xffc03454 /* CAN Controller 1 Mailbox 2 Timestamp Register */
-#define CAN1_MB02_ID0 0xffc03458 /* CAN Controller 1 Mailbox 2 ID0 Register */
-#define CAN1_MB02_ID1 0xffc0345c /* CAN Controller 1 Mailbox 2 ID1 Register */
-#define CAN1_MB03_DATA0 0xffc03460 /* CAN Controller 1 Mailbox 3 Data 0 Register */
-#define CAN1_MB03_DATA1 0xffc03464 /* CAN Controller 1 Mailbox 3 Data 1 Register */
-#define CAN1_MB03_DATA2 0xffc03468 /* CAN Controller 1 Mailbox 3 Data 2 Register */
-#define CAN1_MB03_DATA3 0xffc0346c /* CAN Controller 1 Mailbox 3 Data 3 Register */
-#define CAN1_MB03_LENGTH 0xffc03470 /* CAN Controller 1 Mailbox 3 Length Register */
-#define CAN1_MB03_TIMESTAMP 0xffc03474 /* CAN Controller 1 Mailbox 3 Timestamp Register */
-#define CAN1_MB03_ID0 0xffc03478 /* CAN Controller 1 Mailbox 3 ID0 Register */
-#define CAN1_MB03_ID1 0xffc0347c /* CAN Controller 1 Mailbox 3 ID1 Register */
-#define CAN1_MB04_DATA0 0xffc03480 /* CAN Controller 1 Mailbox 4 Data 0 Register */
-#define CAN1_MB04_DATA1 0xffc03484 /* CAN Controller 1 Mailbox 4 Data 1 Register */
-#define CAN1_MB04_DATA2 0xffc03488 /* CAN Controller 1 Mailbox 4 Data 2 Register */
-#define CAN1_MB04_DATA3 0xffc0348c /* CAN Controller 1 Mailbox 4 Data 3 Register */
-#define CAN1_MB04_LENGTH 0xffc03490 /* CAN Controller 1 Mailbox 4 Length Register */
-#define CAN1_MB04_TIMESTAMP 0xffc03494 /* CAN Controller 1 Mailbox 4 Timestamp Register */
-#define CAN1_MB04_ID0 0xffc03498 /* CAN Controller 1 Mailbox 4 ID0 Register */
-#define CAN1_MB04_ID1 0xffc0349c /* CAN Controller 1 Mailbox 4 ID1 Register */
-#define CAN1_MB05_DATA0 0xffc034a0 /* CAN Controller 1 Mailbox 5 Data 0 Register */
-#define CAN1_MB05_DATA1 0xffc034a4 /* CAN Controller 1 Mailbox 5 Data 1 Register */
-#define CAN1_MB05_DATA2 0xffc034a8 /* CAN Controller 1 Mailbox 5 Data 2 Register */
-#define CAN1_MB05_DATA3 0xffc034ac /* CAN Controller 1 Mailbox 5 Data 3 Register */
-#define CAN1_MB05_LENGTH 0xffc034b0 /* CAN Controller 1 Mailbox 5 Length Register */
-#define CAN1_MB05_TIMESTAMP 0xffc034b4 /* CAN Controller 1 Mailbox 5 Timestamp Register */
-#define CAN1_MB05_ID0 0xffc034b8 /* CAN Controller 1 Mailbox 5 ID0 Register */
-#define CAN1_MB05_ID1 0xffc034bc /* CAN Controller 1 Mailbox 5 ID1 Register */
-#define CAN1_MB06_DATA0 0xffc034c0 /* CAN Controller 1 Mailbox 6 Data 0 Register */
-#define CAN1_MB06_DATA1 0xffc034c4 /* CAN Controller 1 Mailbox 6 Data 1 Register */
-#define CAN1_MB06_DATA2 0xffc034c8 /* CAN Controller 1 Mailbox 6 Data 2 Register */
-#define CAN1_MB06_DATA3 0xffc034cc /* CAN Controller 1 Mailbox 6 Data 3 Register */
-#define CAN1_MB06_LENGTH 0xffc034d0 /* CAN Controller 1 Mailbox 6 Length Register */
-#define CAN1_MB06_TIMESTAMP 0xffc034d4 /* CAN Controller 1 Mailbox 6 Timestamp Register */
-#define CAN1_MB06_ID0 0xffc034d8 /* CAN Controller 1 Mailbox 6 ID0 Register */
-#define CAN1_MB06_ID1 0xffc034dc /* CAN Controller 1 Mailbox 6 ID1 Register */
-#define CAN1_MB07_DATA0 0xffc034e0 /* CAN Controller 1 Mailbox 7 Data 0 Register */
-#define CAN1_MB07_DATA1 0xffc034e4 /* CAN Controller 1 Mailbox 7 Data 1 Register */
-#define CAN1_MB07_DATA2 0xffc034e8 /* CAN Controller 1 Mailbox 7 Data 2 Register */
-#define CAN1_MB07_DATA3 0xffc034ec /* CAN Controller 1 Mailbox 7 Data 3 Register */
-#define CAN1_MB07_LENGTH 0xffc034f0 /* CAN Controller 1 Mailbox 7 Length Register */
-#define CAN1_MB07_TIMESTAMP 0xffc034f4 /* CAN Controller 1 Mailbox 7 Timestamp Register */
-#define CAN1_MB07_ID0 0xffc034f8 /* CAN Controller 1 Mailbox 7 ID0 Register */
-#define CAN1_MB07_ID1 0xffc034fc /* CAN Controller 1 Mailbox 7 ID1 Register */
-#define CAN1_MB08_DATA0 0xffc03500 /* CAN Controller 1 Mailbox 8 Data 0 Register */
-#define CAN1_MB08_DATA1 0xffc03504 /* CAN Controller 1 Mailbox 8 Data 1 Register */
-#define CAN1_MB08_DATA2 0xffc03508 /* CAN Controller 1 Mailbox 8 Data 2 Register */
-#define CAN1_MB08_DATA3 0xffc0350c /* CAN Controller 1 Mailbox 8 Data 3 Register */
-#define CAN1_MB08_LENGTH 0xffc03510 /* CAN Controller 1 Mailbox 8 Length Register */
-#define CAN1_MB08_TIMESTAMP 0xffc03514 /* CAN Controller 1 Mailbox 8 Timestamp Register */
-#define CAN1_MB08_ID0 0xffc03518 /* CAN Controller 1 Mailbox 8 ID0 Register */
-#define CAN1_MB08_ID1 0xffc0351c /* CAN Controller 1 Mailbox 8 ID1 Register */
-#define CAN1_MB09_DATA0 0xffc03520 /* CAN Controller 1 Mailbox 9 Data 0 Register */
-#define CAN1_MB09_DATA1 0xffc03524 /* CAN Controller 1 Mailbox 9 Data 1 Register */
-#define CAN1_MB09_DATA2 0xffc03528 /* CAN Controller 1 Mailbox 9 Data 2 Register */
-#define CAN1_MB09_DATA3 0xffc0352c /* CAN Controller 1 Mailbox 9 Data 3 Register */
-#define CAN1_MB09_LENGTH 0xffc03530 /* CAN Controller 1 Mailbox 9 Length Register */
-#define CAN1_MB09_TIMESTAMP 0xffc03534 /* CAN Controller 1 Mailbox 9 Timestamp Register */
-#define CAN1_MB09_ID0 0xffc03538 /* CAN Controller 1 Mailbox 9 ID0 Register */
-#define CAN1_MB09_ID1 0xffc0353c /* CAN Controller 1 Mailbox 9 ID1 Register */
-#define CAN1_MB10_DATA0 0xffc03540 /* CAN Controller 1 Mailbox 10 Data 0 Register */
-#define CAN1_MB10_DATA1 0xffc03544 /* CAN Controller 1 Mailbox 10 Data 1 Register */
-#define CAN1_MB10_DATA2 0xffc03548 /* CAN Controller 1 Mailbox 10 Data 2 Register */
-#define CAN1_MB10_DATA3 0xffc0354c /* CAN Controller 1 Mailbox 10 Data 3 Register */
-#define CAN1_MB10_LENGTH 0xffc03550 /* CAN Controller 1 Mailbox 10 Length Register */
-#define CAN1_MB10_TIMESTAMP 0xffc03554 /* CAN Controller 1 Mailbox 10 Timestamp Register */
-#define CAN1_MB10_ID0 0xffc03558 /* CAN Controller 1 Mailbox 10 ID0 Register */
-#define CAN1_MB10_ID1 0xffc0355c /* CAN Controller 1 Mailbox 10 ID1 Register */
-#define CAN1_MB11_DATA0 0xffc03560 /* CAN Controller 1 Mailbox 11 Data 0 Register */
-#define CAN1_MB11_DATA1 0xffc03564 /* CAN Controller 1 Mailbox 11 Data 1 Register */
-#define CAN1_MB11_DATA2 0xffc03568 /* CAN Controller 1 Mailbox 11 Data 2 Register */
-#define CAN1_MB11_DATA3 0xffc0356c /* CAN Controller 1 Mailbox 11 Data 3 Register */
-#define CAN1_MB11_LENGTH 0xffc03570 /* CAN Controller 1 Mailbox 11 Length Register */
-#define CAN1_MB11_TIMESTAMP 0xffc03574 /* CAN Controller 1 Mailbox 11 Timestamp Register */
-#define CAN1_MB11_ID0 0xffc03578 /* CAN Controller 1 Mailbox 11 ID0 Register */
-#define CAN1_MB11_ID1 0xffc0357c /* CAN Controller 1 Mailbox 11 ID1 Register */
-#define CAN1_MB12_DATA0 0xffc03580 /* CAN Controller 1 Mailbox 12 Data 0 Register */
-#define CAN1_MB12_DATA1 0xffc03584 /* CAN Controller 1 Mailbox 12 Data 1 Register */
-#define CAN1_MB12_DATA2 0xffc03588 /* CAN Controller 1 Mailbox 12 Data 2 Register */
-#define CAN1_MB12_DATA3 0xffc0358c /* CAN Controller 1 Mailbox 12 Data 3 Register */
-#define CAN1_MB12_LENGTH 0xffc03590 /* CAN Controller 1 Mailbox 12 Length Register */
-#define CAN1_MB12_TIMESTAMP 0xffc03594 /* CAN Controller 1 Mailbox 12 Timestamp Register */
-#define CAN1_MB12_ID0 0xffc03598 /* CAN Controller 1 Mailbox 12 ID0 Register */
-#define CAN1_MB12_ID1 0xffc0359c /* CAN Controller 1 Mailbox 12 ID1 Register */
-#define CAN1_MB13_DATA0 0xffc035a0 /* CAN Controller 1 Mailbox 13 Data 0 Register */
-#define CAN1_MB13_DATA1 0xffc035a4 /* CAN Controller 1 Mailbox 13 Data 1 Register */
-#define CAN1_MB13_DATA2 0xffc035a8 /* CAN Controller 1 Mailbox 13 Data 2 Register */
-#define CAN1_MB13_DATA3 0xffc035ac /* CAN Controller 1 Mailbox 13 Data 3 Register */
-#define CAN1_MB13_LENGTH 0xffc035b0 /* CAN Controller 1 Mailbox 13 Length Register */
-#define CAN1_MB13_TIMESTAMP 0xffc035b4 /* CAN Controller 1 Mailbox 13 Timestamp Register */
-#define CAN1_MB13_ID0 0xffc035b8 /* CAN Controller 1 Mailbox 13 ID0 Register */
-#define CAN1_MB13_ID1 0xffc035bc /* CAN Controller 1 Mailbox 13 ID1 Register */
-#define CAN1_MB14_DATA0 0xffc035c0 /* CAN Controller 1 Mailbox 14 Data 0 Register */
-#define CAN1_MB14_DATA1 0xffc035c4 /* CAN Controller 1 Mailbox 14 Data 1 Register */
-#define CAN1_MB14_DATA2 0xffc035c8 /* CAN Controller 1 Mailbox 14 Data 2 Register */
-#define CAN1_MB14_DATA3 0xffc035cc /* CAN Controller 1 Mailbox 14 Data 3 Register */
-#define CAN1_MB14_LENGTH 0xffc035d0 /* CAN Controller 1 Mailbox 14 Length Register */
-#define CAN1_MB14_TIMESTAMP 0xffc035d4 /* CAN Controller 1 Mailbox 14 Timestamp Register */
-#define CAN1_MB14_ID0 0xffc035d8 /* CAN Controller 1 Mailbox 14 ID0 Register */
-#define CAN1_MB14_ID1 0xffc035dc /* CAN Controller 1 Mailbox 14 ID1 Register */
-#define CAN1_MB15_DATA0 0xffc035e0 /* CAN Controller 1 Mailbox 15 Data 0 Register */
-#define CAN1_MB15_DATA1 0xffc035e4 /* CAN Controller 1 Mailbox 15 Data 1 Register */
-#define CAN1_MB15_DATA2 0xffc035e8 /* CAN Controller 1 Mailbox 15 Data 2 Register */
-#define CAN1_MB15_DATA3 0xffc035ec /* CAN Controller 1 Mailbox 15 Data 3 Register */
-#define CAN1_MB15_LENGTH 0xffc035f0 /* CAN Controller 1 Mailbox 15 Length Register */
-#define CAN1_MB15_TIMESTAMP 0xffc035f4 /* CAN Controller 1 Mailbox 15 Timestamp Register */
-#define CAN1_MB15_ID0 0xffc035f8 /* CAN Controller 1 Mailbox 15 ID0 Register */
-#define CAN1_MB15_ID1 0xffc035fc /* CAN Controller 1 Mailbox 15 ID1 Register */
-
-/* CAN Controller 1 Mailbox Data Registers */
-
-#define CAN1_MB16_DATA0 0xffc03600 /* CAN Controller 1 Mailbox 16 Data 0 Register */
-#define CAN1_MB16_DATA1 0xffc03604 /* CAN Controller 1 Mailbox 16 Data 1 Register */
-#define CAN1_MB16_DATA2 0xffc03608 /* CAN Controller 1 Mailbox 16 Data 2 Register */
-#define CAN1_MB16_DATA3 0xffc0360c /* CAN Controller 1 Mailbox 16 Data 3 Register */
-#define CAN1_MB16_LENGTH 0xffc03610 /* CAN Controller 1 Mailbox 16 Length Register */
-#define CAN1_MB16_TIMESTAMP 0xffc03614 /* CAN Controller 1 Mailbox 16 Timestamp Register */
-#define CAN1_MB16_ID0 0xffc03618 /* CAN Controller 1 Mailbox 16 ID0 Register */
-#define CAN1_MB16_ID1 0xffc0361c /* CAN Controller 1 Mailbox 16 ID1 Register */
-#define CAN1_MB17_DATA0 0xffc03620 /* CAN Controller 1 Mailbox 17 Data 0 Register */
-#define CAN1_MB17_DATA1 0xffc03624 /* CAN Controller 1 Mailbox 17 Data 1 Register */
-#define CAN1_MB17_DATA2 0xffc03628 /* CAN Controller 1 Mailbox 17 Data 2 Register */
-#define CAN1_MB17_DATA3 0xffc0362c /* CAN Controller 1 Mailbox 17 Data 3 Register */
-#define CAN1_MB17_LENGTH 0xffc03630 /* CAN Controller 1 Mailbox 17 Length Register */
-#define CAN1_MB17_TIMESTAMP 0xffc03634 /* CAN Controller 1 Mailbox 17 Timestamp Register */
-#define CAN1_MB17_ID0 0xffc03638 /* CAN Controller 1 Mailbox 17 ID0 Register */
-#define CAN1_MB17_ID1 0xffc0363c /* CAN Controller 1 Mailbox 17 ID1 Register */
-#define CAN1_MB18_DATA0 0xffc03640 /* CAN Controller 1 Mailbox 18 Data 0 Register */
-#define CAN1_MB18_DATA1 0xffc03644 /* CAN Controller 1 Mailbox 18 Data 1 Register */
-#define CAN1_MB18_DATA2 0xffc03648 /* CAN Controller 1 Mailbox 18 Data 2 Register */
-#define CAN1_MB18_DATA3 0xffc0364c /* CAN Controller 1 Mailbox 18 Data 3 Register */
-#define CAN1_MB18_LENGTH 0xffc03650 /* CAN Controller 1 Mailbox 18 Length Register */
-#define CAN1_MB18_TIMESTAMP 0xffc03654 /* CAN Controller 1 Mailbox 18 Timestamp Register */
-#define CAN1_MB18_ID0 0xffc03658 /* CAN Controller 1 Mailbox 18 ID0 Register */
-#define CAN1_MB18_ID1 0xffc0365c /* CAN Controller 1 Mailbox 18 ID1 Register */
-#define CAN1_MB19_DATA0 0xffc03660 /* CAN Controller 1 Mailbox 19 Data 0 Register */
-#define CAN1_MB19_DATA1 0xffc03664 /* CAN Controller 1 Mailbox 19 Data 1 Register */
-#define CAN1_MB19_DATA2 0xffc03668 /* CAN Controller 1 Mailbox 19 Data 2 Register */
-#define CAN1_MB19_DATA3 0xffc0366c /* CAN Controller 1 Mailbox 19 Data 3 Register */
-#define CAN1_MB19_LENGTH 0xffc03670 /* CAN Controller 1 Mailbox 19 Length Register */
-#define CAN1_MB19_TIMESTAMP 0xffc03674 /* CAN Controller 1 Mailbox 19 Timestamp Register */
-#define CAN1_MB19_ID0 0xffc03678 /* CAN Controller 1 Mailbox 19 ID0 Register */
-#define CAN1_MB19_ID1 0xffc0367c /* CAN Controller 1 Mailbox 19 ID1 Register */
-#define CAN1_MB20_DATA0 0xffc03680 /* CAN Controller 1 Mailbox 20 Data 0 Register */
-#define CAN1_MB20_DATA1 0xffc03684 /* CAN Controller 1 Mailbox 20 Data 1 Register */
-#define CAN1_MB20_DATA2 0xffc03688 /* CAN Controller 1 Mailbox 20 Data 2 Register */
-#define CAN1_MB20_DATA3 0xffc0368c /* CAN Controller 1 Mailbox 20 Data 3 Register */
-#define CAN1_MB20_LENGTH 0xffc03690 /* CAN Controller 1 Mailbox 20 Length Register */
-#define CAN1_MB20_TIMESTAMP 0xffc03694 /* CAN Controller 1 Mailbox 20 Timestamp Register */
-#define CAN1_MB20_ID0 0xffc03698 /* CAN Controller 1 Mailbox 20 ID0 Register */
-#define CAN1_MB20_ID1 0xffc0369c /* CAN Controller 1 Mailbox 20 ID1 Register */
-#define CAN1_MB21_DATA0 0xffc036a0 /* CAN Controller 1 Mailbox 21 Data 0 Register */
-#define CAN1_MB21_DATA1 0xffc036a4 /* CAN Controller 1 Mailbox 21 Data 1 Register */
-#define CAN1_MB21_DATA2 0xffc036a8 /* CAN Controller 1 Mailbox 21 Data 2 Register */
-#define CAN1_MB21_DATA3 0xffc036ac /* CAN Controller 1 Mailbox 21 Data 3 Register */
-#define CAN1_MB21_LENGTH 0xffc036b0 /* CAN Controller 1 Mailbox 21 Length Register */
-#define CAN1_MB21_TIMESTAMP 0xffc036b4 /* CAN Controller 1 Mailbox 21 Timestamp Register */
-#define CAN1_MB21_ID0 0xffc036b8 /* CAN Controller 1 Mailbox 21 ID0 Register */
-#define CAN1_MB21_ID1 0xffc036bc /* CAN Controller 1 Mailbox 21 ID1 Register */
-#define CAN1_MB22_DATA0 0xffc036c0 /* CAN Controller 1 Mailbox 22 Data 0 Register */
-#define CAN1_MB22_DATA1 0xffc036c4 /* CAN Controller 1 Mailbox 22 Data 1 Register */
-#define CAN1_MB22_DATA2 0xffc036c8 /* CAN Controller 1 Mailbox 22 Data 2 Register */
-#define CAN1_MB22_DATA3 0xffc036cc /* CAN Controller 1 Mailbox 22 Data 3 Register */
-#define CAN1_MB22_LENGTH 0xffc036d0 /* CAN Controller 1 Mailbox 22 Length Register */
-#define CAN1_MB22_TIMESTAMP 0xffc036d4 /* CAN Controller 1 Mailbox 22 Timestamp Register */
-#define CAN1_MB22_ID0 0xffc036d8 /* CAN Controller 1 Mailbox 22 ID0 Register */
-#define CAN1_MB22_ID1 0xffc036dc /* CAN Controller 1 Mailbox 22 ID1 Register */
-#define CAN1_MB23_DATA0 0xffc036e0 /* CAN Controller 1 Mailbox 23 Data 0 Register */
-#define CAN1_MB23_DATA1 0xffc036e4 /* CAN Controller 1 Mailbox 23 Data 1 Register */
-#define CAN1_MB23_DATA2 0xffc036e8 /* CAN Controller 1 Mailbox 23 Data 2 Register */
-#define CAN1_MB23_DATA3 0xffc036ec /* CAN Controller 1 Mailbox 23 Data 3 Register */
-#define CAN1_MB23_LENGTH 0xffc036f0 /* CAN Controller 1 Mailbox 23 Length Register */
-#define CAN1_MB23_TIMESTAMP 0xffc036f4 /* CAN Controller 1 Mailbox 23 Timestamp Register */
-#define CAN1_MB23_ID0 0xffc036f8 /* CAN Controller 1 Mailbox 23 ID0 Register */
-#define CAN1_MB23_ID1 0xffc036fc /* CAN Controller 1 Mailbox 23 ID1 Register */
-#define CAN1_MB24_DATA0 0xffc03700 /* CAN Controller 1 Mailbox 24 Data 0 Register */
-#define CAN1_MB24_DATA1 0xffc03704 /* CAN Controller 1 Mailbox 24 Data 1 Register */
-#define CAN1_MB24_DATA2 0xffc03708 /* CAN Controller 1 Mailbox 24 Data 2 Register */
-#define CAN1_MB24_DATA3 0xffc0370c /* CAN Controller 1 Mailbox 24 Data 3 Register */
-#define CAN1_MB24_LENGTH 0xffc03710 /* CAN Controller 1 Mailbox 24 Length Register */
-#define CAN1_MB24_TIMESTAMP 0xffc03714 /* CAN Controller 1 Mailbox 24 Timestamp Register */
-#define CAN1_MB24_ID0 0xffc03718 /* CAN Controller 1 Mailbox 24 ID0 Register */
-#define CAN1_MB24_ID1 0xffc0371c /* CAN Controller 1 Mailbox 24 ID1 Register */
-#define CAN1_MB25_DATA0 0xffc03720 /* CAN Controller 1 Mailbox 25 Data 0 Register */
-#define CAN1_MB25_DATA1 0xffc03724 /* CAN Controller 1 Mailbox 25 Data 1 Register */
-#define CAN1_MB25_DATA2 0xffc03728 /* CAN Controller 1 Mailbox 25 Data 2 Register */
-#define CAN1_MB25_DATA3 0xffc0372c /* CAN Controller 1 Mailbox 25 Data 3 Register */
-#define CAN1_MB25_LENGTH 0xffc03730 /* CAN Controller 1 Mailbox 25 Length Register */
-#define CAN1_MB25_TIMESTAMP 0xffc03734 /* CAN Controller 1 Mailbox 25 Timestamp Register */
-#define CAN1_MB25_ID0 0xffc03738 /* CAN Controller 1 Mailbox 25 ID0 Register */
-#define CAN1_MB25_ID1 0xffc0373c /* CAN Controller 1 Mailbox 25 ID1 Register */
-#define CAN1_MB26_DATA0 0xffc03740 /* CAN Controller 1 Mailbox 26 Data 0 Register */
-#define CAN1_MB26_DATA1 0xffc03744 /* CAN Controller 1 Mailbox 26 Data 1 Register */
-#define CAN1_MB26_DATA2 0xffc03748 /* CAN Controller 1 Mailbox 26 Data 2 Register */
-#define CAN1_MB26_DATA3 0xffc0374c /* CAN Controller 1 Mailbox 26 Data 3 Register */
-#define CAN1_MB26_LENGTH 0xffc03750 /* CAN Controller 1 Mailbox 26 Length Register */
-#define CAN1_MB26_TIMESTAMP 0xffc03754 /* CAN Controller 1 Mailbox 26 Timestamp Register */
-#define CAN1_MB26_ID0 0xffc03758 /* CAN Controller 1 Mailbox 26 ID0 Register */
-#define CAN1_MB26_ID1 0xffc0375c /* CAN Controller 1 Mailbox 26 ID1 Register */
-#define CAN1_MB27_DATA0 0xffc03760 /* CAN Controller 1 Mailbox 27 Data 0 Register */
-#define CAN1_MB27_DATA1 0xffc03764 /* CAN Controller 1 Mailbox 27 Data 1 Register */
-#define CAN1_MB27_DATA2 0xffc03768 /* CAN Controller 1 Mailbox 27 Data 2 Register */
-#define CAN1_MB27_DATA3 0xffc0376c /* CAN Controller 1 Mailbox 27 Data 3 Register */
-#define CAN1_MB27_LENGTH 0xffc03770 /* CAN Controller 1 Mailbox 27 Length Register */
-#define CAN1_MB27_TIMESTAMP 0xffc03774 /* CAN Controller 1 Mailbox 27 Timestamp Register */
-#define CAN1_MB27_ID0 0xffc03778 /* CAN Controller 1 Mailbox 27 ID0 Register */
-#define CAN1_MB27_ID1 0xffc0377c /* CAN Controller 1 Mailbox 27 ID1 Register */
-#define CAN1_MB28_DATA0 0xffc03780 /* CAN Controller 1 Mailbox 28 Data 0 Register */
-#define CAN1_MB28_DATA1 0xffc03784 /* CAN Controller 1 Mailbox 28 Data 1 Register */
-#define CAN1_MB28_DATA2 0xffc03788 /* CAN Controller 1 Mailbox 28 Data 2 Register */
-#define CAN1_MB28_DATA3 0xffc0378c /* CAN Controller 1 Mailbox 28 Data 3 Register */
-#define CAN1_MB28_LENGTH 0xffc03790 /* CAN Controller 1 Mailbox 28 Length Register */
-#define CAN1_MB28_TIMESTAMP 0xffc03794 /* CAN Controller 1 Mailbox 28 Timestamp Register */
-#define CAN1_MB28_ID0 0xffc03798 /* CAN Controller 1 Mailbox 28 ID0 Register */
-#define CAN1_MB28_ID1 0xffc0379c /* CAN Controller 1 Mailbox 28 ID1 Register */
-#define CAN1_MB29_DATA0 0xffc037a0 /* CAN Controller 1 Mailbox 29 Data 0 Register */
-#define CAN1_MB29_DATA1 0xffc037a4 /* CAN Controller 1 Mailbox 29 Data 1 Register */
-#define CAN1_MB29_DATA2 0xffc037a8 /* CAN Controller 1 Mailbox 29 Data 2 Register */
-#define CAN1_MB29_DATA3 0xffc037ac /* CAN Controller 1 Mailbox 29 Data 3 Register */
-#define CAN1_MB29_LENGTH 0xffc037b0 /* CAN Controller 1 Mailbox 29 Length Register */
-#define CAN1_MB29_TIMESTAMP 0xffc037b4 /* CAN Controller 1 Mailbox 29 Timestamp Register */
-#define CAN1_MB29_ID0 0xffc037b8 /* CAN Controller 1 Mailbox 29 ID0 Register */
-#define CAN1_MB29_ID1 0xffc037bc /* CAN Controller 1 Mailbox 29 ID1 Register */
-#define CAN1_MB30_DATA0 0xffc037c0 /* CAN Controller 1 Mailbox 30 Data 0 Register */
-#define CAN1_MB30_DATA1 0xffc037c4 /* CAN Controller 1 Mailbox 30 Data 1 Register */
-#define CAN1_MB30_DATA2 0xffc037c8 /* CAN Controller 1 Mailbox 30 Data 2 Register */
-#define CAN1_MB30_DATA3 0xffc037cc /* CAN Controller 1 Mailbox 30 Data 3 Register */
-#define CAN1_MB30_LENGTH 0xffc037d0 /* CAN Controller 1 Mailbox 30 Length Register */
-#define CAN1_MB30_TIMESTAMP 0xffc037d4 /* CAN Controller 1 Mailbox 30 Timestamp Register */
-#define CAN1_MB30_ID0 0xffc037d8 /* CAN Controller 1 Mailbox 30 ID0 Register */
-#define CAN1_MB30_ID1 0xffc037dc /* CAN Controller 1 Mailbox 30 ID1 Register */
-#define CAN1_MB31_DATA0 0xffc037e0 /* CAN Controller 1 Mailbox 31 Data 0 Register */
-#define CAN1_MB31_DATA1 0xffc037e4 /* CAN Controller 1 Mailbox 31 Data 1 Register */
-#define CAN1_MB31_DATA2 0xffc037e8 /* CAN Controller 1 Mailbox 31 Data 2 Register */
-#define CAN1_MB31_DATA3 0xffc037ec /* CAN Controller 1 Mailbox 31 Data 3 Register */
-#define CAN1_MB31_LENGTH 0xffc037f0 /* CAN Controller 1 Mailbox 31 Length Register */
-#define CAN1_MB31_TIMESTAMP 0xffc037f4 /* CAN Controller 1 Mailbox 31 Timestamp Register */
-#define CAN1_MB31_ID0 0xffc037f8 /* CAN Controller 1 Mailbox 31 ID0 Register */
-#define CAN1_MB31_ID1 0xffc037fc /* CAN Controller 1 Mailbox 31 ID1 Register */
-
-/* ATAPI Registers */
-
-#define ATAPI_CONTROL 0xffc03800 /* ATAPI Control Register */
-#define ATAPI_STATUS 0xffc03804 /* ATAPI Status Register */
-#define ATAPI_DEV_ADDR 0xffc03808 /* ATAPI Device Register Address */
-#define ATAPI_DEV_TXBUF 0xffc0380c /* ATAPI Device Register Write Data */
-#define ATAPI_DEV_RXBUF 0xffc03810 /* ATAPI Device Register Read Data */
-#define ATAPI_INT_MASK 0xffc03814 /* ATAPI Interrupt Mask Register */
-#define ATAPI_INT_STATUS 0xffc03818 /* ATAPI Interrupt Status Register */
-#define ATAPI_XFER_LEN 0xffc0381c /* ATAPI Length of Transfer */
-#define ATAPI_LINE_STATUS 0xffc03820 /* ATAPI Line Status */
-#define ATAPI_SM_STATE 0xffc03824 /* ATAPI State Machine Status */
-#define ATAPI_TERMINATE 0xffc03828 /* ATAPI Host Terminate */
-#define ATAPI_PIO_TFRCNT 0xffc0382c /* ATAPI PIO mode transfer count */
-#define ATAPI_DMA_TFRCNT 0xffc03830 /* ATAPI DMA mode transfer count */
-#define ATAPI_UMAIN_TFRCNT 0xffc03834 /* ATAPI UDMAIN transfer count */
-#define ATAPI_UDMAOUT_TFRCNT 0xffc03838 /* ATAPI UDMAOUT transfer count */
-#define ATAPI_REG_TIM_0 0xffc03840 /* ATAPI Register Transfer Timing 0 */
-#define ATAPI_PIO_TIM_0 0xffc03844 /* ATAPI PIO Timing 0 Register */
-#define ATAPI_PIO_TIM_1 0xffc03848 /* ATAPI PIO Timing 1 Register */
-#define ATAPI_MULTI_TIM_0 0xffc03850 /* ATAPI Multi-DMA Timing 0 Register */
-#define ATAPI_MULTI_TIM_1 0xffc03854 /* ATAPI Multi-DMA Timing 1 Register */
-#define ATAPI_MULTI_TIM_2 0xffc03858 /* ATAPI Multi-DMA Timing 2 Register */
-#define ATAPI_ULTRA_TIM_0 0xffc03860 /* ATAPI Ultra-DMA Timing 0 Register */
-#define ATAPI_ULTRA_TIM_1 0xffc03864 /* ATAPI Ultra-DMA Timing 1 Register */
-#define ATAPI_ULTRA_TIM_2 0xffc03868 /* ATAPI Ultra-DMA Timing 2 Register */
-#define ATAPI_ULTRA_TIM_3 0xffc0386c /* ATAPI Ultra-DMA Timing 3 Register */
-
-/* SDH Registers */
-
-#define SDH_PWR_CTL 0xffc03900 /* SDH Power Control */
-#define SDH_CLK_CTL 0xffc03904 /* SDH Clock Control */
-#define SDH_ARGUMENT 0xffc03908 /* SDH Argument */
-#define SDH_COMMAND 0xffc0390c /* SDH Command */
-#define SDH_RESP_CMD 0xffc03910 /* SDH Response Command */
-#define SDH_RESPONSE0 0xffc03914 /* SDH Response0 */
-#define SDH_RESPONSE1 0xffc03918 /* SDH Response1 */
-#define SDH_RESPONSE2 0xffc0391c /* SDH Response2 */
-#define SDH_RESPONSE3 0xffc03920 /* SDH Response3 */
-#define SDH_DATA_TIMER 0xffc03924 /* SDH Data Timer */
-#define SDH_DATA_LGTH 0xffc03928 /* SDH Data Length */
-#define SDH_DATA_CTL 0xffc0392c /* SDH Data Control */
-#define SDH_DATA_CNT 0xffc03930 /* SDH Data Counter */
-#define SDH_STATUS 0xffc03934 /* SDH Status */
-#define SDH_STATUS_CLR 0xffc03938 /* SDH Status Clear */
-#define SDH_MASK0 0xffc0393c /* SDH Interrupt0 Mask */
-#define SDH_MASK1 0xffc03940 /* SDH Interrupt1 Mask */
-#define SDH_FIFO_CNT 0xffc03948 /* SDH FIFO Counter */
-#define SDH_FIFO 0xffc03980 /* SDH Data FIFO */
-#define SDH_E_STATUS 0xffc039c0 /* SDH Exception Status */
-#define SDH_E_MASK 0xffc039c4 /* SDH Exception Mask */
-#define SDH_CFG 0xffc039c8 /* SDH Configuration */
-#define SDH_RD_WAIT_EN 0xffc039cc /* SDH Read Wait Enable */
-#define SDH_PID0 0xffc039d0 /* SDH Peripheral Identification0 */
-#define SDH_PID1 0xffc039d4 /* SDH Peripheral Identification1 */
-#define SDH_PID2 0xffc039d8 /* SDH Peripheral Identification2 */
-#define SDH_PID3 0xffc039dc /* SDH Peripheral Identification3 */
-#define SDH_PID4 0xffc039e0 /* SDH Peripheral Identification4 */
-#define SDH_PID5 0xffc039e4 /* SDH Peripheral Identification5 */
-#define SDH_PID6 0xffc039e8 /* SDH Peripheral Identification6 */
-#define SDH_PID7 0xffc039ec /* SDH Peripheral Identification7 */
-
-/* HOST Port Registers */
-
-#define HOST_CONTROL 0xffc03a00 /* HOST Control Register */
-#define HOST_STATUS 0xffc03a04 /* HOST Status Register */
-#define HOST_TIMEOUT 0xffc03a08 /* HOST Acknowledge Mode Timeout Register */
-
-/* USB Control Registers */
-
-#define USB_FADDR 0xffc03c00 /* Function address register */
-#define USB_POWER 0xffc03c04 /* Power management register */
-#define USB_INTRTX 0xffc03c08 /* Interrupt register for endpoint 0 and Tx endpoint 1 to 7 */
-#define USB_INTRRX 0xffc03c0c /* Interrupt register for Rx endpoints 1 to 7 */
-#define USB_INTRTXE 0xffc03c10 /* Interrupt enable register for IntrTx */
-#define USB_INTRRXE 0xffc03c14 /* Interrupt enable register for IntrRx */
-#define USB_INTRUSB 0xffc03c18 /* Interrupt register for common USB interrupts */
-#define USB_INTRUSBE 0xffc03c1c /* Interrupt enable register for IntrUSB */
-#define USB_FRAME 0xffc03c20 /* USB frame number */
-#define USB_INDEX 0xffc03c24 /* Index register for selecting the indexed endpoint registers */
-#define USB_TESTMODE 0xffc03c28 /* Enabled USB 20 test modes */
-#define USB_GLOBINTR 0xffc03c2c /* Global Interrupt Mask register and Wakeup Exception Interrupt */
-#define USB_GLOBAL_CTL 0xffc03c30 /* Global Clock Control for the core */
-
-/* USB Packet Control Registers */
-
-#define USB_TX_MAX_PACKET 0xffc03c40 /* Maximum packet size for Host Tx endpoint */
-#define USB_CSR0 0xffc03c44 /* Control Status register for endpoint 0 and Control Status register for Host Tx endpoint */
-#define USB_TXCSR 0xffc03c44 /* Control Status register for endpoint 0 and Control Status register for Host Tx endpoint */
-#define USB_RX_MAX_PACKET 0xffc03c48 /* Maximum packet size for Host Rx endpoint */
-#define USB_RXCSR 0xffc03c4c /* Control Status register for Host Rx endpoint */
-#define USB_COUNT0 0xffc03c50 /* Number of bytes received in endpoint 0 FIFO and Number of bytes received in Host Tx endpoint */
-#define USB_RXCOUNT 0xffc03c50 /* Number of bytes received in endpoint 0 FIFO and Number of bytes received in Host Tx endpoint */
-#define USB_TXTYPE 0xffc03c54 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint */
-#define USB_NAKLIMIT0 0xffc03c58 /* Sets the NAK response timeout on Endpoint 0 and on Bulk transfers for Host Tx endpoint */
-#define USB_TXINTERVAL 0xffc03c58 /* Sets the NAK response timeout on Endpoint 0 and on Bulk transfers for Host Tx endpoint */
-#define USB_RXTYPE 0xffc03c5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint */
-#define USB_RXINTERVAL 0xffc03c60 /* Sets the polling interval for Interrupt and Isochronous transfers or the NAK response timeout on Bulk transfers */
-#define USB_TXCOUNT 0xffc03c68 /* Number of bytes to be written to the selected endpoint Tx FIFO */
-
-/* USB Endpoint FIFO Registers */
-
-#define USB_EP0_FIFO 0xffc03c80 /* Endpoint 0 FIFO */
-#define USB_EP1_FIFO 0xffc03c88 /* Endpoint 1 FIFO */
-#define USB_EP2_FIFO 0xffc03c90 /* Endpoint 2 FIFO */
-#define USB_EP3_FIFO 0xffc03c98 /* Endpoint 3 FIFO */
-#define USB_EP4_FIFO 0xffc03ca0 /* Endpoint 4 FIFO */
-#define USB_EP5_FIFO 0xffc03ca8 /* Endpoint 5 FIFO */
-#define USB_EP6_FIFO 0xffc03cb0 /* Endpoint 6 FIFO */
-#define USB_EP7_FIFO 0xffc03cb8 /* Endpoint 7 FIFO */
-
-/* USB OTG Control Registers */
-
-#define USB_OTG_DEV_CTL 0xffc03d00 /* OTG Device Control Register */
-#define USB_OTG_VBUS_IRQ 0xffc03d04 /* OTG VBUS Control Interrupts */
-#define USB_OTG_VBUS_MASK 0xffc03d08 /* VBUS Control Interrupt Enable */
-
-/* USB Phy Control Registers */
-
-#define USB_LINKINFO 0xffc03d48 /* Enables programming of some PHY-side delays */
-#define USB_VPLEN 0xffc03d4c /* Determines duration of VBUS pulse for VBUS charging */
-#define USB_HS_EOF1 0xffc03d50 /* Time buffer for High-Speed transactions */
-#define USB_FS_EOF1 0xffc03d54 /* Time buffer for Full-Speed transactions */
-#define USB_LS_EOF1 0xffc03d58 /* Time buffer for Low-Speed transactions */
-
-/* (APHY_CNTRL is for ADI usage only) */
-
-#define USB_APHY_CNTRL 0xffc03de0 /* Register that increases visibility of Analog PHY */
-
-/* (APHY_CALIB is for ADI usage only) */
-
-#define USB_APHY_CALIB 0xffc03de4 /* Register used to set some calibration values */
-#define USB_APHY_CNTRL2 0xffc03de8 /* Register used to prevent re-enumeration once Moab goes into hibernate mode */
-
-/* (PHY_TEST is for ADI usage only) */
-
-#define USB_PHY_TEST 0xffc03dec /* Used for reducing simulation time and simplifies FIFO testability */
-#define USB_PLLOSC_CTRL 0xffc03df0 /* Used to program different parameters for USB PLL and Oscillator */
-#define USB_SRP_CLKDIV 0xffc03df4 /* Used to program clock divide value for the clock fed to the SRP detection logic */
-
-/* USB Endpoint 0 Control Registers */
-
-#define USB_EP_NI0_TXMAXP 0xffc03e00 /* Maximum packet size for Host Tx endpoint0 */
-#define USB_EP_NI0_TXCSR 0xffc03e04 /* Control Status register for endpoint 0 */
-#define USB_EP_NI0_RXMAXP 0xffc03e08 /* Maximum packet size for Host Rx endpoint0 */
-#define USB_EP_NI0_RXCSR 0xffc03e0c /* Control Status register for Host Rx endpoint0 */
-#define USB_EP_NI0_RXCOUNT 0xffc03e10 /* Number of bytes received in endpoint 0 FIFO */
-#define USB_EP_NI0_TXTYPE 0xffc03e14 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint0 */
-#define USB_EP_NI0_TXINTERVAL 0xffc03e18 /* Sets the NAK response timeout on Endpoint 0 */
-#define USB_EP_NI0_RXTYPE 0xffc03e1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint0 */
-#define USB_EP_NI0_RXINTERVAL 0xffc03e20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint0 */
-
-/* USB Endpoint 1 Control Registers */
-
-#define USB_EP_NI0_TXCOUNT 0xffc03e28 /* Number of bytes to be written to the endpoint0 Tx FIFO */
-#define USB_EP_NI1_TXMAXP 0xffc03e40 /* Maximum packet size for Host Tx endpoint1 */
-#define USB_EP_NI1_TXCSR 0xffc03e44 /* Control Status register for endpoint1 */
-#define USB_EP_NI1_RXMAXP 0xffc03e48 /* Maximum packet size for Host Rx endpoint1 */
-#define USB_EP_NI1_RXCSR 0xffc03e4c /* Control Status register for Host Rx endpoint1 */
-#define USB_EP_NI1_RXCOUNT 0xffc03e50 /* Number of bytes received in endpoint1 FIFO */
-#define USB_EP_NI1_TXTYPE 0xffc03e54 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint1 */
-#define USB_EP_NI1_TXINTERVAL 0xffc03e58 /* Sets the NAK response timeout on Endpoint1 */
-#define USB_EP_NI1_RXTYPE 0xffc03e5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint1 */
-#define USB_EP_NI1_RXINTERVAL 0xffc03e60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint1 */
-
-/* USB Endpoint 2 Control Registers */
-
-#define USB_EP_NI1_TXCOUNT 0xffc03e68 /* Number of bytes to be written to the+H102 endpoint1 Tx FIFO */
-#define USB_EP_NI2_TXMAXP 0xffc03e80 /* Maximum packet size for Host Tx endpoint2 */
-#define USB_EP_NI2_TXCSR 0xffc03e84 /* Control Status register for endpoint2 */
-#define USB_EP_NI2_RXMAXP 0xffc03e88 /* Maximum packet size for Host Rx endpoint2 */
-#define USB_EP_NI2_RXCSR 0xffc03e8c /* Control Status register for Host Rx endpoint2 */
-#define USB_EP_NI2_RXCOUNT 0xffc03e90 /* Number of bytes received in endpoint2 FIFO */
-#define USB_EP_NI2_TXTYPE 0xffc03e94 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint2 */
-#define USB_EP_NI2_TXINTERVAL 0xffc03e98 /* Sets the NAK response timeout on Endpoint2 */
-#define USB_EP_NI2_RXTYPE 0xffc03e9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint2 */
-#define USB_EP_NI2_RXINTERVAL 0xffc03ea0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint2 */
-
-/* USB Endpoint 3 Control Registers */
-
-#define USB_EP_NI2_TXCOUNT 0xffc03ea8 /* Number of bytes to be written to the endpoint2 Tx FIFO */
-#define USB_EP_NI3_TXMAXP 0xffc03ec0 /* Maximum packet size for Host Tx endpoint3 */
-#define USB_EP_NI3_TXCSR 0xffc03ec4 /* Control Status register for endpoint3 */
-#define USB_EP_NI3_RXMAXP 0xffc03ec8 /* Maximum packet size for Host Rx endpoint3 */
-#define USB_EP_NI3_RXCSR 0xffc03ecc /* Control Status register for Host Rx endpoint3 */
-#define USB_EP_NI3_RXCOUNT 0xffc03ed0 /* Number of bytes received in endpoint3 FIFO */
-#define USB_EP_NI3_TXTYPE 0xffc03ed4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint3 */
-#define USB_EP_NI3_TXINTERVAL 0xffc03ed8 /* Sets the NAK response timeout on Endpoint3 */
-#define USB_EP_NI3_RXTYPE 0xffc03edc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint3 */
-#define USB_EP_NI3_RXINTERVAL 0xffc03ee0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint3 */
-
-/* USB Endpoint 4 Control Registers */
-
-#define USB_EP_NI3_TXCOUNT 0xffc03ee8 /* Number of bytes to be written to the H124endpoint3 Tx FIFO */
-#define USB_EP_NI4_TXMAXP 0xffc03f00 /* Maximum packet size for Host Tx endpoint4 */
-#define USB_EP_NI4_TXCSR 0xffc03f04 /* Control Status register for endpoint4 */
-#define USB_EP_NI4_RXMAXP 0xffc03f08 /* Maximum packet size for Host Rx endpoint4 */
-#define USB_EP_NI4_RXCSR 0xffc03f0c /* Control Status register for Host Rx endpoint4 */
-#define USB_EP_NI4_RXCOUNT 0xffc03f10 /* Number of bytes received in endpoint4 FIFO */
-#define USB_EP_NI4_TXTYPE 0xffc03f14 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint4 */
-#define USB_EP_NI4_TXINTERVAL 0xffc03f18 /* Sets the NAK response timeout on Endpoint4 */
-#define USB_EP_NI4_RXTYPE 0xffc03f1c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint4 */
-#define USB_EP_NI4_RXINTERVAL 0xffc03f20 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint4 */
-
-/* USB Endpoint 5 Control Registers */
-
-#define USB_EP_NI4_TXCOUNT 0xffc03f28 /* Number of bytes to be written to the endpoint4 Tx FIFO */
-#define USB_EP_NI5_TXMAXP 0xffc03f40 /* Maximum packet size for Host Tx endpoint5 */
-#define USB_EP_NI5_TXCSR 0xffc03f44 /* Control Status register for endpoint5 */
-#define USB_EP_NI5_RXMAXP 0xffc03f48 /* Maximum packet size for Host Rx endpoint5 */
-#define USB_EP_NI5_RXCSR 0xffc03f4c /* Control Status register for Host Rx endpoint5 */
-#define USB_EP_NI5_RXCOUNT 0xffc03f50 /* Number of bytes received in endpoint5 FIFO */
-#define USB_EP_NI5_TXTYPE 0xffc03f54 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint5 */
-#define USB_EP_NI5_TXINTERVAL 0xffc03f58 /* Sets the NAK response timeout on Endpoint5 */
-#define USB_EP_NI5_RXTYPE 0xffc03f5c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint5 */
-#define USB_EP_NI5_RXINTERVAL 0xffc03f60 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint5 */
-
-/* USB Endpoint 6 Control Registers */
-
-#define USB_EP_NI5_TXCOUNT 0xffc03f68 /* Number of bytes to be written to the H145endpoint5 Tx FIFO */
-#define USB_EP_NI6_TXMAXP 0xffc03f80 /* Maximum packet size for Host Tx endpoint6 */
-#define USB_EP_NI6_TXCSR 0xffc03f84 /* Control Status register for endpoint6 */
-#define USB_EP_NI6_RXMAXP 0xffc03f88 /* Maximum packet size for Host Rx endpoint6 */
-#define USB_EP_NI6_RXCSR 0xffc03f8c /* Control Status register for Host Rx endpoint6 */
-#define USB_EP_NI6_RXCOUNT 0xffc03f90 /* Number of bytes received in endpoint6 FIFO */
-#define USB_EP_NI6_TXTYPE 0xffc03f94 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint6 */
-#define USB_EP_NI6_TXINTERVAL 0xffc03f98 /* Sets the NAK response timeout on Endpoint6 */
-#define USB_EP_NI6_RXTYPE 0xffc03f9c /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint6 */
-#define USB_EP_NI6_RXINTERVAL 0xffc03fa0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint6 */
-
-/* USB Endpoint 7 Control Registers */
-
-#define USB_EP_NI6_TXCOUNT 0xffc03fa8 /* Number of bytes to be written to the endpoint6 Tx FIFO */
-#define USB_EP_NI7_TXMAXP 0xffc03fc0 /* Maximum packet size for Host Tx endpoint7 */
-#define USB_EP_NI7_TXCSR 0xffc03fc4 /* Control Status register for endpoint7 */
-#define USB_EP_NI7_RXMAXP 0xffc03fc8 /* Maximum packet size for Host Rx endpoint7 */
-#define USB_EP_NI7_RXCSR 0xffc03fcc /* Control Status register for Host Rx endpoint7 */
-#define USB_EP_NI7_RXCOUNT 0xffc03fd0 /* Number of bytes received in endpoint7 FIFO */
-#define USB_EP_NI7_TXTYPE 0xffc03fd4 /* Sets the transaction protocol and peripheral endpoint number for the Host Tx endpoint7 */
-#define USB_EP_NI7_TXINTERVAL 0xffc03fd8 /* Sets the NAK response timeout on Endpoint7 */
-#define USB_EP_NI7_RXTYPE 0xffc03fdc /* Sets the transaction protocol and peripheral endpoint number for the Host Rx endpoint7 */
-#define USB_EP_NI7_RXINTERVAL 0xffc03ff0 /* Sets the polling interval for Interrupt/Isochronous transfers or the NAK response timeout on Bulk transfers for Host Rx endpoint7 */
-#define USB_EP_NI7_TXCOUNT 0xffc03ff8 /* Number of bytes to be written to the endpoint7 Tx FIFO */
-#define USB_DMA_INTERRUPT 0xffc04000 /* Indicates pending interrupts for the DMA channels */
-
-/* USB Channel 0 Config Registers */
-
-#define USB_DMA0CONTROL 0xffc04004 /* DMA master channel 0 configuration */
-#define USB_DMA0ADDRLOW 0xffc04008 /* Lower 16-bits of memory source/destination address for DMA master channel 0 */
-#define USB_DMA0ADDRHIGH 0xffc0400c /* Upper 16-bits of memory source/destination address for DMA master channel 0 */
-#define USB_DMA0COUNTLOW 0xffc04010 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 0 */
-#define USB_DMA0COUNTHIGH 0xffc04014 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 0 */
-
-/* USB Channel 1 Config Registers */
-
-#define USB_DMA1CONTROL 0xffc04024 /* DMA master channel 1 configuration */
-#define USB_DMA1ADDRLOW 0xffc04028 /* Lower 16-bits of memory source/destination address for DMA master channel 1 */
-#define USB_DMA1ADDRHIGH 0xffc0402c /* Upper 16-bits of memory source/destination address for DMA master channel 1 */
-#define USB_DMA1COUNTLOW 0xffc04030 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 1 */
-#define USB_DMA1COUNTHIGH 0xffc04034 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 1 */
-
-/* USB Channel 2 Config Registers */
-
-#define USB_DMA2CONTROL 0xffc04044 /* DMA master channel 2 configuration */
-#define USB_DMA2ADDRLOW 0xffc04048 /* Lower 16-bits of memory source/destination address for DMA master channel 2 */
-#define USB_DMA2ADDRHIGH 0xffc0404c /* Upper 16-bits of memory source/destination address for DMA master channel 2 */
-#define USB_DMA2COUNTLOW 0xffc04050 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 2 */
-#define USB_DMA2COUNTHIGH 0xffc04054 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 2 */
-
-/* USB Channel 3 Config Registers */
-
-#define USB_DMA3CONTROL 0xffc04064 /* DMA master channel 3 configuration */
-#define USB_DMA3ADDRLOW 0xffc04068 /* Lower 16-bits of memory source/destination address for DMA master channel 3 */
-#define USB_DMA3ADDRHIGH 0xffc0406c /* Upper 16-bits of memory source/destination address for DMA master channel 3 */
-#define USB_DMA3COUNTLOW 0xffc04070 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 3 */
-#define USB_DMA3COUNTHIGH 0xffc04074 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 3 */
-
-/* USB Channel 4 Config Registers */
-
-#define USB_DMA4CONTROL 0xffc04084 /* DMA master channel 4 configuration */
-#define USB_DMA4ADDRLOW 0xffc04088 /* Lower 16-bits of memory source/destination address for DMA master channel 4 */
-#define USB_DMA4ADDRHIGH 0xffc0408c /* Upper 16-bits of memory source/destination address for DMA master channel 4 */
-#define USB_DMA4COUNTLOW 0xffc04090 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 4 */
-#define USB_DMA4COUNTHIGH 0xffc04094 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 4 */
-
-/* USB Channel 5 Config Registers */
-
-#define USB_DMA5CONTROL 0xffc040a4 /* DMA master channel 5 configuration */
-#define USB_DMA5ADDRLOW 0xffc040a8 /* Lower 16-bits of memory source/destination address for DMA master channel 5 */
-#define USB_DMA5ADDRHIGH 0xffc040ac /* Upper 16-bits of memory source/destination address for DMA master channel 5 */
-#define USB_DMA5COUNTLOW 0xffc040b0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 5 */
-#define USB_DMA5COUNTHIGH 0xffc040b4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 5 */
-
-/* USB Channel 6 Config Registers */
-
-#define USB_DMA6CONTROL 0xffc040c4 /* DMA master channel 6 configuration */
-#define USB_DMA6ADDRLOW 0xffc040c8 /* Lower 16-bits of memory source/destination address for DMA master channel 6 */
-#define USB_DMA6ADDRHIGH 0xffc040cc /* Upper 16-bits of memory source/destination address for DMA master channel 6 */
-#define USB_DMA6COUNTLOW 0xffc040d0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 6 */
-#define USB_DMA6COUNTHIGH 0xffc040d4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 6 */
-
-/* USB Channel 7 Config Registers */
-
-#define USB_DMA7CONTROL 0xffc040e4 /* DMA master channel 7 configuration */
-#define USB_DMA7ADDRLOW 0xffc040e8 /* Lower 16-bits of memory source/destination address for DMA master channel 7 */
-#define USB_DMA7ADDRHIGH 0xffc040ec /* Upper 16-bits of memory source/destination address for DMA master channel 7 */
-#define USB_DMA7COUNTLOW 0xffc040f0 /* Lower 16-bits of byte count of DMA transfer for DMA master channel 7 */
-#define USB_DMA7COUNTHIGH 0xffc040f4 /* Upper 16-bits of byte count of DMA transfer for DMA master channel 7 */
-
-/* Keypad Registers */
-
-#define KPAD_CTL 0xffc04100 /* Controls keypad module enable and disable */
-#define KPAD_PRESCALE 0xffc04104 /* Establish a time base for programing the KPAD_MSEL register */
-#define KPAD_MSEL 0xffc04108 /* Selects delay parameters for keypad interface sensitivity */
-#define KPAD_ROWCOL 0xffc0410c /* Captures the row and column output values of the keys pressed */
-#define KPAD_STAT 0xffc04110 /* Holds and clears the status of the keypad interface interrupt */
-#define KPAD_SOFTEVAL 0xffc04114 /* Lets software force keypad interface to check for keys being pressed */
-
-/* Pixel Compositor (PIXC) Registers */
-
-#define PIXC_CTL 0xffc04400 /* Overlay enable, resampling mode, I/O data format, transparency enable, watermark level, FIFO status */
-#define PIXC_PPL 0xffc04404 /* Holds the number of pixels per line of the display */
-#define PIXC_LPF 0xffc04408 /* Holds the number of lines per frame of the display */
-#define PIXC_AHSTART 0xffc0440c /* Contains horizontal start pixel information of the overlay data (set A) */
-#define PIXC_AHEND 0xffc04410 /* Contains horizontal end pixel information of the overlay data (set A) */
-#define PIXC_AVSTART 0xffc04414 /* Contains vertical start pixel information of the overlay data (set A) */
-#define PIXC_AVEND 0xffc04418 /* Contains vertical end pixel information of the overlay data (set A) */
-#define PIXC_ATRANSP 0xffc0441c /* Contains the transparency ratio (set A) */
-#define PIXC_BHSTART 0xffc04420 /* Contains horizontal start pixel information of the overlay data (set B) */
-#define PIXC_BHEND 0xffc04424 /* Contains horizontal end pixel information of the overlay data (set B) */
-#define PIXC_BVSTART 0xffc04428 /* Contains vertical start pixel information of the overlay data (set B) */
-#define PIXC_BVEND 0xffc0442c /* Contains vertical end pixel information of the overlay data (set B) */
-#define PIXC_BTRANSP 0xffc04430 /* Contains the transparency ratio (set B) */
-#define PIXC_INTRSTAT 0xffc0443c /* Overlay interrupt configuration/status */
-#define PIXC_RYCON 0xffc04440 /* Color space conversion matrix register. Contains the R/Y conversion coefficients */
-#define PIXC_GUCON 0xffc04444 /* Color space conversion matrix register. Contains the G/U conversion coefficients */
-#define PIXC_BVCON 0xffc04448 /* Color space conversion matrix register. Contains the B/V conversion coefficients */
-#define PIXC_CCBIAS 0xffc0444c /* Bias values for the color space conversion matrix */
-#define PIXC_TC 0xffc04450 /* Holds the transparent color value */
-
-/* Handshake MDMA 0 Registers */
-
-#define HMDMA0_CONTROL 0xffc04500 /* Handshake MDMA0 Control Register */
-#define HMDMA0_ECINIT 0xffc04504 /* Handshake MDMA0 Initial Edge Count Register */
-#define HMDMA0_BCINIT 0xffc04508 /* Handshake MDMA0 Initial Block Count Register */
-#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshold Register */
-#define HMDMA0_ECOVERFLOW 0xffc04510 /* Handshake MDMA0 Edge Count Overflow Interrupt Register */
-#define HMDMA0_ECOUNT 0xffc04514 /* Handshake MDMA0 Current Edge Count Register */
-#define HMDMA0_BCOUNT 0xffc04518 /* Handshake MDMA0 Current Block Count Register */
-
-/* Handshake MDMA 1 Registers */
-
-#define HMDMA1_CONTROL 0xffc04540 /* Handshake MDMA1 Control Register */
-#define HMDMA1_ECINIT 0xffc04544 /* Handshake MDMA1 Initial Edge Count Register */
-#define HMDMA1_BCINIT 0xffc04548 /* Handshake MDMA1 Initial Block Count Register */
-#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshold Register */
-#define HMDMA1_ECOVERFLOW 0xffc04550 /* Handshake MDMA1 Edge Count Overflow Interrupt Register */
-#define HMDMA1_ECOUNT 0xffc04554 /* Handshake MDMA1 Current Edge Count Register */
-#define HMDMA1_BCOUNT 0xffc04558 /* Handshake MDMA1 Current Block Count Register */
-
-
-/* ********************************************************** */
-/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
-/* and MULTI BIT READ MACROS */
-/* ********************************************************** */
-
-/* Bit masks for PIXC_CTL */
-
-#define PIXC_EN 0x1 /* Pixel Compositor Enable */
-#define OVR_A_EN 0x2 /* Overlay A Enable */
-#define OVR_B_EN 0x4 /* Overlay B Enable */
-#define IMG_FORM 0x8 /* Image Data Format */
-#define OVR_FORM 0x10 /* Overlay Data Format */
-#define OUT_FORM 0x20 /* Output Data Format */
-#define UDS_MOD 0x40 /* Resampling Mode */
-#define TC_EN 0x80 /* Transparent Color Enable */
-#define IMG_STAT 0x300 /* Image FIFO Status */
-#define OVR_STAT 0xc00 /* Overlay FIFO Status */
-#define WM_LVL 0x3000 /* FIFO Watermark Level */
-
-/* Bit masks for PIXC_AHSTART */
-
-#define A_HSTART 0xfff /* Horizontal Start Coordinates */
-
-/* Bit masks for PIXC_AHEND */
-
-#define A_HEND 0xfff /* Horizontal End Coordinates */
-
-/* Bit masks for PIXC_AVSTART */
-
-#define A_VSTART 0x3ff /* Vertical Start Coordinates */
-
-/* Bit masks for PIXC_AVEND */
-
-#define A_VEND 0x3ff /* Vertical End Coordinates */
-
-/* Bit masks for PIXC_ATRANSP */
-
-#define A_TRANSP 0xf /* Transparency Value */
-
-/* Bit masks for PIXC_BHSTART */
-
-#define B_HSTART 0xfff /* Horizontal Start Coordinates */
-
-/* Bit masks for PIXC_BHEND */
-
-#define B_HEND 0xfff /* Horizontal End Coordinates */
-
-/* Bit masks for PIXC_BVSTART */
-
-#define B_VSTART 0x3ff /* Vertical Start Coordinates */
-
-/* Bit masks for PIXC_BVEND */
-
-#define B_VEND 0x3ff /* Vertical End Coordinates */
-
-/* Bit masks for PIXC_BTRANSP */
-
-#define B_TRANSP 0xf /* Transparency Value */
-
-/* Bit masks for PIXC_INTRSTAT */
-
-#define OVR_INT_EN 0x1 /* Interrupt at End of Last Valid Overlay */
-#define FRM_INT_EN 0x2 /* Interrupt at End of Frame */
-#define OVR_INT_STAT 0x4 /* Overlay Interrupt Status */
-#define FRM_INT_STAT 0x8 /* Frame Interrupt Status */
-
-/* Bit masks for PIXC_RYCON */
-
-#define A11 0x3ff /* A11 in the Coefficient Matrix */
-#define A12 0xffc00 /* A12 in the Coefficient Matrix */
-#define A13 0x3ff00000 /* A13 in the Coefficient Matrix */
-#define RY_MULT4 0x40000000 /* Multiply Row by 4 */
-
-/* Bit masks for PIXC_GUCON */
-
-#define A21 0x3ff /* A21 in the Coefficient Matrix */
-#define A22 0xffc00 /* A22 in the Coefficient Matrix */
-#define A23 0x3ff00000 /* A23 in the Coefficient Matrix */
-#define GU_MULT4 0x40000000 /* Multiply Row by 4 */
-
-/* Bit masks for PIXC_BVCON */
-
-#define A31 0x3ff /* A31 in the Coefficient Matrix */
-#define A32 0xffc00 /* A32 in the Coefficient Matrix */
-#define A33 0x3ff00000 /* A33 in the Coefficient Matrix */
-#define BV_MULT4 0x40000000 /* Multiply Row by 4 */
-
-/* Bit masks for PIXC_CCBIAS */
-
-#define A14 0x3ff /* A14 in the Bias Vector */
-#define A24 0xffc00 /* A24 in the Bias Vector */
-#define A34 0x3ff00000 /* A34 in the Bias Vector */
-
-/* Bit masks for PIXC_TC */
-
-#define RY_TRANS 0xff /* Transparent Color - R/Y Component */
-#define GU_TRANS 0xff00 /* Transparent Color - G/U Component */
-#define BV_TRANS 0xff0000 /* Transparent Color - B/V Component */
-
-/* Bit masks for HOST_CONTROL */
-
-#define HOST_EN 0x1 /* Host Enable */
-#define HOST_END 0x2 /* Host Endianess */
-#define DATA_SIZE 0x4 /* Data Size */
-#define HOST_RST 0x8 /* Host Reset */
-#define HRDY_OVR 0x20 /* Host Ready Override */
-#define INT_MODE 0x40 /* Interrupt Mode */
-#define BT_EN 0x80 /* Bus Timeout Enable */
-#define EHW 0x100 /* Enable Host Write */
-#define EHR 0x200 /* Enable Host Read */
-#define BDR 0x400 /* Burst DMA Requests */
-
-/* Bit masks for HOST_STATUS */
-
-#define DMA_READY 0x1 /* DMA Ready */
-#define FIFOFULL 0x2 /* FIFO Full */
-#define FIFOEMPTY 0x4 /* FIFO Empty */
-#define DMA_COMPLETE 0x8 /* DMA Complete */
-#define HSHK 0x10 /* Host Handshake */
-#define TIMEOUT 0x20 /* Host Timeout */
-#define HIRQ 0x40 /* Host Interrupt Request */
-#define ALLOW_CNFG 0x80 /* Allow New Configuration */
-#define DMA_DIR 0x100 /* DMA Direction */
-#define BTE 0x200 /* Bus Timeout Enabled */
-
-/* Bit masks for HOST_TIMEOUT */
-
-#define COUNT_TIMEOUT 0x7ff /* Host Timeout count */
-
-/* Bit masks for MXVR_CONFIG */
-
-#define MXVREN 0x1 /* MXVR Enable */
-#define MMSM 0x2 /* MXVR Master/Slave Mode Select */
-#define ACTIVE 0x4 /* Active Mode */
-#define SDELAY 0x8 /* Synchronous Data Delay */
-#define NCMRXEN 0x10 /* Normal Control Message Receive Enable */
-#define RWRRXEN 0x20 /* Remote Write Receive Enable */
-#define MTXEN 0x40 /* MXVR Transmit Data Enable */
-#define MTXONB 0x80 /* MXVR Phy Transmitter On */
-#define EPARITY 0x100 /* Even Parity Select */
-#define MSB 0x1e00 /* Master Synchronous Boundary */
-#define APRXEN 0x2000 /* Asynchronous Packet Receive Enable */
-#define WAKEUP 0x4000 /* Wake-Up */
-#define LMECH 0x8000 /* Lock Mechanism Select */
-
-/* Bit masks for MXVR_STATE_0 */
-
-#define NACT 0x1 /* Network Activity */
-#define SBLOCK 0x2 /* Super Block Lock */
-#define FMPLLST 0xc /* Frequency Multiply PLL SM State */
-#define CDRPLLST 0xe0 /* Clock/Data Recovery PLL SM State */
-#define APBSY 0x100 /* Asynchronous Packet Transmit Buffer Busy */
-#define APARB 0x200 /* Asynchronous Packet Arbitrating */
-#define APTX 0x400 /* Asynchronous Packet Transmitting */
-#define APRX 0x800 /* Receiving Asynchronous Packet */
-#define CMBSY 0x1000 /* Control Message Transmit Buffer Busy */
-#define CMARB 0x2000 /* Control Message Arbitrating */
-#define CMTX 0x4000 /* Control Message Transmitting */
-#define CMRX 0x8000 /* Receiving Control Message */
-#define MRXONB 0x10000 /* MRXONB Pin State */
-#define RGSIP 0x20000 /* Remote Get Source In Progress */
-#define DALIP 0x40000 /* Resource Deallocate In Progress */
-#define ALIP 0x80000 /* Resource Allocate In Progress */
-#define RRDIP 0x100000 /* Remote Read In Progress */
-#define RWRIP 0x200000 /* Remote Write In Progress */
-#define FLOCK 0x400000 /* Frame Lock */
-#define BLOCK 0x800000 /* Block Lock */
-#define RSB 0xf000000 /* Received Synchronous Boundary */
-#define DERRNUM 0xf0000000 /* DMA Error Channel Number */
-
-/* Bit masks for MXVR_STATE_1 */
-
-#define SRXNUMB 0xf /* Synchronous Receive FIFO Number of Bytes */
-#define STXNUMB 0xf0 /* Synchronous Transmit FIFO Number of Bytes */
-#define APCONT 0x100 /* Asynchronous Packet Continuation */
-#define OBERRNUM 0xe00 /* DMA Out of Bounds Error Channel Number */
-#define DMAACTIVE0 0x10000 /* DMA0 Active */
-#define DMAACTIVE1 0x20000 /* DMA1 Active */
-#define DMAACTIVE2 0x40000 /* DMA2 Active */
-#define DMAACTIVE3 0x80000 /* DMA3 Active */
-#define DMAACTIVE4 0x100000 /* DMA4 Active */
-#define DMAACTIVE5 0x200000 /* DMA5 Active */
-#define DMAACTIVE6 0x400000 /* DMA6 Active */
-#define DMAACTIVE7 0x800000 /* DMA7 Active */
-#define DMAPMEN0 0x1000000 /* DMA0 Pattern Matching Enabled */
-#define DMAPMEN1 0x2000000 /* DMA1 Pattern Matching Enabled */
-#define DMAPMEN2 0x4000000 /* DMA2 Pattern Matching Enabled */
-#define DMAPMEN3 0x8000000 /* DMA3 Pattern Matching Enabled */
-#define DMAPMEN4 0x10000000 /* DMA4 Pattern Matching Enabled */
-#define DMAPMEN5 0x20000000 /* DMA5 Pattern Matching Enabled */
-#define DMAPMEN6 0x40000000 /* DMA6 Pattern Matching Enabled */
-#define DMAPMEN7 0x80000000 /* DMA7 Pattern Matching Enabled */
-
-/* Bit masks for MXVR_INT_STAT_0 */
-
-#define NI2A 0x1 /* Network Inactive to Active */
-#define NA2I 0x2 /* Network Active to Inactive */
-#define SBU2L 0x4 /* Super Block Unlock to Lock */
-#define SBL2U 0x8 /* Super Block Lock to Unlock */
-#define PRU 0x10 /* Position Register Updated */
-#define MPRU 0x20 /* Maximum Position Register Updated */
-#define DRU 0x40 /* Delay Register Updated */
-#define MDRU 0x80 /* Maximum Delay Register Updated */
-#define SBU 0x100 /* Synchronous Boundary Updated */
-#define ATU 0x200 /* Allocation Table Updated */
-#define FCZ0 0x400 /* Frame Counter 0 Zero */
-#define FCZ1 0x800 /* Frame Counter 1 Zero */
-#define PERR 0x1000 /* Parity Error */
-#define MH2L 0x2000 /* MRXONB High to Low */
-#define ML2H 0x4000 /* MRXONB Low to High */
-#define WUP 0x8000 /* Wake-Up Preamble Received */
-#define FU2L 0x10000 /* Frame Unlock to Lock */
-#define FL2U 0x20000 /* Frame Lock to Unlock */
-#define BU2L 0x40000 /* Block Unlock to Lock */
-#define BL2U 0x80000 /* Block Lock to Unlock */
-#define OBERR 0x100000 /* DMA Out of Bounds Error */
-#define PFL 0x200000 /* PLL Frequency Locked */
-#define SCZ 0x400000 /* System Clock Counter Zero */
-#define FERR 0x800000 /* FIFO Error */
-#define CMR 0x1000000 /* Control Message Received */
-#define CMROF 0x2000000 /* Control Message Receive Buffer Overflow */
-#define CMTS 0x4000000 /* Control Message Transmit Buffer Successfully Sent */
-#define CMTC 0x8000000 /* Control Message Transmit Buffer Successfully Cancelled */
-#define RWRC 0x10000000 /* Remote Write Control Message Completed */
-#define BCZ 0x20000000 /* Block Counter Zero */
-#define BMERR 0x40000000 /* Biphase Mark Coding Error */
-#define DERR 0x80000000 /* DMA Error */
-
-/* Bit masks for MXVR_INT_STAT_1 */
-
-#define HDONE0 0x1 /* DMA0 Half Done */
-#define DONE0 0x2 /* DMA0 Done */
-#define APR 0x4 /* Asynchronous Packet Received */
-#define APROF 0x8 /* Asynchronous Packet Receive Buffer Overflow */
-#define HDONE1 0x10 /* DMA1 Half Done */
-#define DONE1 0x20 /* DMA1 Done */
-#define APTS 0x40 /* Asynchronous Packet Transmit Buffer Successfully Sent */
-#define APTC 0x80 /* Asynchronous Packet Transmit Buffer Successfully Cancelled */
-#define HDONE2 0x100 /* DMA2 Half Done */
-#define DONE2 0x200 /* DMA2 Done */
-#define APRCE 0x400 /* Asynchronous Packet Receive CRC Error */
-#define APRPE 0x800 /* Asynchronous Packet Receive Packet Error */
-#define HDONE3 0x1000 /* DMA3 Half Done */
-#define DONE3 0x2000 /* DMA3 Done */
-#define HDONE4 0x10000 /* DMA4 Half Done */
-#define DONE4 0x20000 /* DMA4 Done */
-#define HDONE5 0x100000 /* DMA5 Half Done */
-#define DONE5 0x200000 /* DMA5 Done */
-#define HDONE6 0x1000000 /* DMA6 Half Done */
-#define DONE6 0x2000000 /* DMA6 Done */
-#define HDONE7 0x10000000 /* DMA7 Half Done */
-#define DONE7 0x20000000 /* DMA7 Done */
-
-/* Bit masks for MXVR_INT_EN_0 */
-
-#define NI2AEN 0x1 /* Network Inactive to Active Interrupt Enable */
-#define NA2IEN 0x2 /* Network Active to Inactive Interrupt Enable */
-#define SBU2LEN 0x4 /* Super Block Unlock to Lock Interrupt Enable */
-#define SBL2UEN 0x8 /* Super Block Lock to Unlock Interrupt Enable */
-#define PRUEN 0x10 /* Position Register Updated Interrupt Enable */
-#define MPRUEN 0x20 /* Maximum Position Register Updated Interrupt Enable */
-#define DRUEN 0x40 /* Delay Register Updated Interrupt Enable */
-#define MDRUEN 0x80 /* Maximum Delay Register Updated Interrupt Enable */
-#define SBUEN 0x100 /* Synchronous Boundary Updated Interrupt Enable */
-#define ATUEN 0x200 /* Allocation Table Updated Interrupt Enable */
-#define FCZ0EN 0x400 /* Frame Counter 0 Zero Interrupt Enable */
-#define FCZ1EN 0x800 /* Frame Counter 1 Zero Interrupt Enable */
-#define PERREN 0x1000 /* Parity Error Interrupt Enable */
-#define MH2LEN 0x2000 /* MRXONB High to Low Interrupt Enable */
-#define ML2HEN 0x4000 /* MRXONB Low to High Interrupt Enable */
-#define WUPEN 0x8000 /* Wake-Up Preamble Received Interrupt Enable */
-#define FU2LEN 0x10000 /* Frame Unlock to Lock Interrupt Enable */
-#define FL2UEN 0x20000 /* Frame Lock to Unlock Interrupt Enable */
-#define BU2LEN 0x40000 /* Block Unlock to Lock Interrupt Enable */
-#define BL2UEN 0x80000 /* Block Lock to Unlock Interrupt Enable */
-#define OBERREN 0x100000 /* DMA Out of Bounds Error Interrupt Enable */
-#define PFLEN 0x200000 /* PLL Frequency Locked Interrupt Enable */
-#define SCZEN 0x400000 /* System Clock Counter Zero Interrupt Enable */
-#define FERREN 0x800000 /* FIFO Error Interrupt Enable */
-#define CMREN 0x1000000 /* Control Message Received Interrupt Enable */
-#define CMROFEN 0x2000000 /* Control Message Receive Buffer Overflow Interrupt Enable */
-#define CMTSEN 0x4000000 /* Control Message Transmit Buffer Successfully Sent Interrupt Enable */
-#define CMTCEN 0x8000000 /* Control Message Transmit Buffer Successfully Cancelled Interrupt Enable */
-#define RWRCEN 0x10000000 /* Remote Write Control Message Completed Interrupt Enable */
-#define BCZEN 0x20000000 /* Block Counter Zero Interrupt Enable */
-#define BMERREN 0x40000000 /* Biphase Mark Coding Error Interrupt Enable */
-#define DERREN 0x80000000 /* DMA Error Interrupt Enable */
-
-/* Bit masks for MXVR_INT_EN_1 */
-
-#define HDONEEN0 0x1 /* DMA0 Half Done Interrupt Enable */
-#define DONEEN0 0x2 /* DMA0 Done Interrupt Enable */
-#define APREN 0x4 /* Asynchronous Packet Received Interrupt Enable */
-#define APROFEN 0x8 /* Asynchronous Packet Receive Buffer Overflow Interrupt Enable */
-#define HDONEEN1 0x10 /* DMA1 Half Done Interrupt Enable */
-#define DONEEN1 0x20 /* DMA1 Done Interrupt Enable */
-#define APTSEN 0x40 /* Asynchronous Packet Transmit Buffer Successfully Sent Interrupt Enable */
-#define APTCEN 0x80 /* Asynchronous Packet Transmit Buffer Successfully Cancelled Interrupt Enable */
-#define HDONEEN2 0x100 /* DMA2 Half Done Interrupt Enable */
-#define DONEEN2 0x200 /* DMA2 Done Interrupt Enable */
-#define APRCEEN 0x400 /* Asynchronous Packet Receive CRC Error Interrupt Enable */
-#define APRPEEN 0x800 /* Asynchronous Packet Receive Packet Error Interrupt Enable */
-#define HDONEEN3 0x1000 /* DMA3 Half Done Interrupt Enable */
-#define DONEEN3 0x2000 /* DMA3 Done Interrupt Enable */
-#define HDONEEN4 0x10000 /* DMA4 Half Done Interrupt Enable */
-#define DONEEN4 0x20000 /* DMA4 Done Interrupt Enable */
-#define HDONEEN5 0x100000 /* DMA5 Half Done Interrupt Enable */
-#define DONEEN5 0x200000 /* DMA5 Done Interrupt Enable */
-#define HDONEEN6 0x1000000 /* DMA6 Half Done Interrupt Enable */
-#define DONEEN6 0x2000000 /* DMA6 Done Interrupt Enable */
-#define HDONEEN7 0x10000000 /* DMA7 Half Done Interrupt Enable */
-#define DONEEN7 0x20000000 /* DMA7 Done Interrupt Enable */
-
-/* Bit masks for MXVR_POSITION */
-
-#define POSITION 0x3f /* Node Position */
-#define PVALID 0x8000 /* Node Position Valid */
-
-/* Bit masks for MXVR_MAX_POSITION */
-
-#define MPOSITION 0x3f /* Maximum Node Position */
-#define MPVALID 0x8000 /* Maximum Node Position Valid */
-
-/* Bit masks for MXVR_DELAY */
-
-#define DELAY 0x3f /* Node Frame Delay */
-#define DVALID 0x8000 /* Node Frame Delay Valid */
-
-/* Bit masks for MXVR_MAX_DELAY */
-
-#define MDELAY 0x3f /* Maximum Node Frame Delay */
-#define MDVALID 0x8000 /* Maximum Node Frame Delay Valid */
-
-/* Bit masks for MXVR_LADDR */
-
-#define LADDR 0xffff /* Logical Address */
-#define LVALID 0x80000000 /* Logical Address Valid */
-
-/* Bit masks for MXVR_GADDR */
-
-#define GADDRL 0xff /* Group Address Lower Byte */
-#define GVALID 0x8000 /* Group Address Valid */
-
-/* Bit masks for MXVR_AADDR */
-
-#define AADDR 0xffff /* Alternate Address */
-#define AVALID 0x80000000 /* Alternate Address Valid */
-
-/* Bit masks for MXVR_ALLOC_0 */
-
-#define CL0 0x7f /* Channel 0 Connection Label */
-#define CIU0 0x80 /* Channel 0 In Use */
-#define CL1 0x7f00 /* Channel 0 Connection Label */
-#define CIU1 0x8000 /* Channel 0 In Use */
-#define CL2 0x7f0000 /* Channel 0 Connection Label */
-#define CIU2 0x800000 /* Channel 0 In Use */
-#define CL3 0x7f000000 /* Channel 0 Connection Label */
-#define CIU3 0x80000000 /* Channel 0 In Use */
-
-/* Bit masks for MXVR_ALLOC_1 */
-
-#define CL4 0x7f /* Channel 4 Connection Label */
-#define CIU4 0x80 /* Channel 4 In Use */
-#define CL5 0x7f00 /* Channel 5 Connection Label */
-#define CIU5 0x8000 /* Channel 5 In Use */
-#define CL6 0x7f0000 /* Channel 6 Connection Label */
-#define CIU6 0x800000 /* Channel 6 In Use */
-#define CL7 0x7f000000 /* Channel 7 Connection Label */
-#define CIU7 0x80000000 /* Channel 7 In Use */
-
-/* Bit masks for MXVR_ALLOC_2 */
-
-#define CL8 0x7f /* Channel 8 Connection Label */
-#define CIU8 0x80 /* Channel 8 In Use */
-#define CL9 0x7f00 /* Channel 9 Connection Label */
-#define CIU9 0x8000 /* Channel 9 In Use */
-#define CL10 0x7f0000 /* Channel 10 Connection Label */
-#define CIU10 0x800000 /* Channel 10 In Use */
-#define CL11 0x7f000000 /* Channel 11 Connection Label */
-#define CIU11 0x80000000 /* Channel 11 In Use */
-
-/* Bit masks for MXVR_ALLOC_3 */
-
-#define CL12 0x7f /* Channel 12 Connection Label */
-#define CIU12 0x80 /* Channel 12 In Use */
-#define CL13 0x7f00 /* Channel 13 Connection Label */
-#define CIU13 0x8000 /* Channel 13 In Use */
-#define CL14 0x7f0000 /* Channel 14 Connection Label */
-#define CIU14 0x800000 /* Channel 14 In Use */
-#define CL15 0x7f000000 /* Channel 15 Connection Label */
-#define CIU15 0x80000000 /* Channel 15 In Use */
-
-/* Bit masks for MXVR_ALLOC_4 */
-
-#define CL16 0x7f /* Channel 16 Connection Label */
-#define CIU16 0x80 /* Channel 16 In Use */
-#define CL17 0x7f00 /* Channel 17 Connection Label */
-#define CIU17 0x8000 /* Channel 17 In Use */
-#define CL18 0x7f0000 /* Channel 18 Connection Label */
-#define CIU18 0x800000 /* Channel 18 In Use */
-#define CL19 0x7f000000 /* Channel 19 Connection Label */
-#define CIU19 0x80000000 /* Channel 19 In Use */
-
-/* Bit masks for MXVR_ALLOC_5 */
-
-#define CL20 0x7f /* Channel 20 Connection Label */
-#define CIU20 0x80 /* Channel 20 In Use */
-#define CL21 0x7f00 /* Channel 21 Connection Label */
-#define CIU21 0x8000 /* Channel 21 In Use */
-#define CL22 0x7f0000 /* Channel 22 Connection Label */
-#define CIU22 0x800000 /* Channel 22 In Use */
-#define CL23 0x7f000000 /* Channel 23 Connection Label */
-#define CIU23 0x80000000 /* Channel 23 In Use */
-
-/* Bit masks for MXVR_ALLOC_6 */
-
-#define CL24 0x7f /* Channel 24 Connection Label */
-#define CIU24 0x80 /* Channel 24 In Use */
-#define CL25 0x7f00 /* Channel 25 Connection Label */
-#define CIU25 0x8000 /* Channel 25 In Use */
-#define CL26 0x7f0000 /* Channel 26 Connection Label */
-#define CIU26 0x800000 /* Channel 26 In Use */
-#define CL27 0x7f000000 /* Channel 27 Connection Label */
-#define CIU27 0x80000000 /* Channel 27 In Use */
-
-/* Bit masks for MXVR_ALLOC_7 */
-
-#define CL28 0x7f /* Channel 28 Connection Label */
-#define CIU28 0x80 /* Channel 28 In Use */
-#define CL29 0x7f00 /* Channel 29 Connection Label */
-#define CIU29 0x8000 /* Channel 29 In Use */
-#define CL30 0x7f0000 /* Channel 30 Connection Label */
-#define CIU30 0x800000 /* Channel 30 In Use */
-#define CL31 0x7f000000 /* Channel 31 Connection Label */
-#define CIU31 0x80000000 /* Channel 31 In Use */
-
-/* Bit masks for MXVR_ALLOC_8 */
-
-#define CL32 0x7f /* Channel 32 Connection Label */
-#define CIU32 0x80 /* Channel 32 In Use */
-#define CL33 0x7f00 /* Channel 33 Connection Label */
-#define CIU33 0x8000 /* Channel 33 In Use */
-#define CL34 0x7f0000 /* Channel 34 Connection Label */
-#define CIU34 0x800000 /* Channel 34 In Use */
-#define CL35 0x7f000000 /* Channel 35 Connection Label */
-#define CIU35 0x80000000 /* Channel 35 In Use */
-
-/* Bit masks for MXVR_ALLOC_9 */
-
-#define CL36 0x7f /* Channel 36 Connection Label */
-#define CIU36 0x80 /* Channel 36 In Use */
-#define CL37 0x7f00 /* Channel 37 Connection Label */
-#define CIU37 0x8000 /* Channel 37 In Use */
-#define CL38 0x7f0000 /* Channel 38 Connection Label */
-#define CIU38 0x800000 /* Channel 38 In Use */
-#define CL39 0x7f000000 /* Channel 39 Connection Label */
-#define CIU39 0x80000000 /* Channel 39 In Use */
-
-/* Bit masks for MXVR_ALLOC_10 */
-
-#define CL40 0x7f /* Channel 40 Connection Label */
-#define CIU40 0x80 /* Channel 40 In Use */
-#define CL41 0x7f00 /* Channel 41 Connection Label */
-#define CIU41 0x8000 /* Channel 41 In Use */
-#define CL42 0x7f0000 /* Channel 42 Connection Label */
-#define CIU42 0x800000 /* Channel 42 In Use */
-#define CL43 0x7f000000 /* Channel 43 Connection Label */
-#define CIU43 0x80000000 /* Channel 43 In Use */
-
-/* Bit masks for MXVR_ALLOC_11 */
-
-#define CL44 0x7f /* Channel 44 Connection Label */
-#define CIU44 0x80 /* Channel 44 In Use */
-#define CL45 0x7f00 /* Channel 45 Connection Label */
-#define CIU45 0x8000 /* Channel 45 In Use */
-#define CL46 0x7f0000 /* Channel 46 Connection Label */
-#define CIU46 0x800000 /* Channel 46 In Use */
-#define CL47 0x7f000000 /* Channel 47 Connection Label */
-#define CIU47 0x80000000 /* Channel 47 In Use */
-
-/* Bit masks for MXVR_ALLOC_12 */
-
-#define CL48 0x7f /* Channel 48 Connection Label */
-#define CIU48 0x80 /* Channel 48 In Use */
-#define CL49 0x7f00 /* Channel 49 Connection Label */
-#define CIU49 0x8000 /* Channel 49 In Use */
-#define CL50 0x7f0000 /* Channel 50 Connection Label */
-#define CIU50 0x800000 /* Channel 50 In Use */
-#define CL51 0x7f000000 /* Channel 51 Connection Label */
-#define CIU51 0x80000000 /* Channel 51 In Use */
-
-/* Bit masks for MXVR_ALLOC_13 */
-
-#define CL52 0x7f /* Channel 52 Connection Label */
-#define CIU52 0x80 /* Channel 52 In Use */
-#define CL53 0x7f00 /* Channel 53 Connection Label */
-#define CIU53 0x8000 /* Channel 53 In Use */
-#define CL54 0x7f0000 /* Channel 54 Connection Label */
-#define CIU54 0x800000 /* Channel 54 In Use */
-#define CL55 0x7f000000 /* Channel 55 Connection Label */
-#define CIU55 0x80000000 /* Channel 55 In Use */
-
-/* Bit masks for MXVR_ALLOC_14 */
-
-#define CL56 0x7f /* Channel 56 Connection Label */
-#define CIU56 0x80 /* Channel 56 In Use */
-#define CL57 0x7f00 /* Channel 57 Connection Label */
-#define CIU57 0x8000 /* Channel 57 In Use */
-#define CL58 0x7f0000 /* Channel 58 Connection Label */
-#define CIU58 0x800000 /* Channel 58 In Use */
-#define CL59 0x7f000000 /* Channel 59 Connection Label */
-#define CIU59 0x80000000 /* Channel 59 In Use */
-
-/* MXVR_SYNC_LCHAN_0 Masks */
-
-#define LCHANPC0 0x0000000Flu
-#define LCHANPC1 0x000000F0lu
-#define LCHANPC2 0x00000F00lu
-#define LCHANPC3 0x0000F000lu
-#define LCHANPC4 0x000F0000lu
-#define LCHANPC5 0x00F00000lu
-#define LCHANPC6 0x0F000000lu
-#define LCHANPC7 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_1 Masks */
-
-#define LCHANPC8 0x0000000Flu
-#define LCHANPC9 0x000000F0lu
-#define LCHANPC10 0x00000F00lu
-#define LCHANPC11 0x0000F000lu
-#define LCHANPC12 0x000F0000lu
-#define LCHANPC13 0x00F00000lu
-#define LCHANPC14 0x0F000000lu
-#define LCHANPC15 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_2 Masks */
-
-#define LCHANPC16 0x0000000Flu
-#define LCHANPC17 0x000000F0lu
-#define LCHANPC18 0x00000F00lu
-#define LCHANPC19 0x0000F000lu
-#define LCHANPC20 0x000F0000lu
-#define LCHANPC21 0x00F00000lu
-#define LCHANPC22 0x0F000000lu
-#define LCHANPC23 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_3 Masks */
-
-#define LCHANPC24 0x0000000Flu
-#define LCHANPC25 0x000000F0lu
-#define LCHANPC26 0x00000F00lu
-#define LCHANPC27 0x0000F000lu
-#define LCHANPC28 0x000F0000lu
-#define LCHANPC29 0x00F00000lu
-#define LCHANPC30 0x0F000000lu
-#define LCHANPC31 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_4 Masks */
-
-#define LCHANPC32 0x0000000Flu
-#define LCHANPC33 0x000000F0lu
-#define LCHANPC34 0x00000F00lu
-#define LCHANPC35 0x0000F000lu
-#define LCHANPC36 0x000F0000lu
-#define LCHANPC37 0x00F00000lu
-#define LCHANPC38 0x0F000000lu
-#define LCHANPC39 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_5 Masks */
-
-#define LCHANPC40 0x0000000Flu
-#define LCHANPC41 0x000000F0lu
-#define LCHANPC42 0x00000F00lu
-#define LCHANPC43 0x0000F000lu
-#define LCHANPC44 0x000F0000lu
-#define LCHANPC45 0x00F00000lu
-#define LCHANPC46 0x0F000000lu
-#define LCHANPC47 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_6 Masks */
-
-#define LCHANPC48 0x0000000Flu
-#define LCHANPC49 0x000000F0lu
-#define LCHANPC50 0x00000F00lu
-#define LCHANPC51 0x0000F000lu
-#define LCHANPC52 0x000F0000lu
-#define LCHANPC53 0x00F00000lu
-#define LCHANPC54 0x0F000000lu
-#define LCHANPC55 0xF0000000lu
-
-
-/* MXVR_SYNC_LCHAN_7 Masks */
-
-#define LCHANPC56 0x0000000Flu
-#define LCHANPC57 0x000000F0lu
-#define LCHANPC58 0x00000F00lu
-#define LCHANPC59 0x0000F000lu
-
-/* Bit masks for MXVR_DMAx_CONFIG */
-
-#define MDMAEN 0x1 /* DMA Channel Enable */
-#define DMADD 0x2 /* DMA Channel Direction */
-#define BY4SWAPEN 0x20 /* DMA Channel Four Byte Swap Enable */
-#define LCHAN 0x3c0 /* DMA Channel Logical Channel */
-#define BITSWAPEN 0x400 /* DMA Channel Bit Swap Enable */
-#define BY2SWAPEN 0x800 /* DMA Channel Two Byte Swap Enable */
-#define MFLOW 0x7000 /* DMA Channel Operation Flow */
-#define FIXEDPM 0x80000 /* DMA Channel Fixed Pattern Matching Select */
-#define STARTPAT 0x300000 /* DMA Channel Start Pattern Select */
-#define STOPPAT 0xc00000 /* DMA Channel Stop Pattern Select */
-#define COUNTPOS 0x1c000000 /* DMA Channel Count Position */
-
-/* Bit masks for MXVR_AP_CTL */
-
-#define STARTAP 0x1 /* Start Asynchronous Packet Transmission */
-#define CANCELAP 0x2 /* Cancel Asynchronous Packet Transmission */
-#define RESETAP 0x4 /* Reset Asynchronous Packet Arbitration */
-#define APRBE0 0x4000 /* Asynchronous Packet Receive Buffer Entry 0 */
-#define APRBE1 0x8000 /* Asynchronous Packet Receive Buffer Entry 1 */
-
-/* Bit masks for MXVR_APRB_START_ADDR */
-
-#define MXVR_APRB_START_ADDR_MASK 0x1fffffe /* Asynchronous Packet Receive Buffer Start Address */
-
-/* Bit masks for MXVR_APRB_CURR_ADDR */
-
-#define MXVR_APRB_CURR_ADDR_MASK 0xffffffff /* Asynchronous Packet Receive Buffer Current Address */
-
-/* Bit masks for MXVR_APTB_START_ADDR */
-
-#define MXVR_APTB_START_ADDR_MASK 0x1fffffe /* Asynchronous Packet Transmit Buffer Start Address */
-
-/* Bit masks for MXVR_APTB_CURR_ADDR */
-
-#define MXVR_APTB_CURR_ADDR_MASK 0xffffffff /* Asynchronous Packet Transmit Buffer Current Address */
-
-/* Bit masks for MXVR_CM_CTL */
-
-#define STARTCM 0x1 /* Start Control Message Transmission */
-#define CANCELCM 0x2 /* Cancel Control Message Transmission */
-#define CMRBE0 0x10000 /* Control Message Receive Buffer Entry 0 */
-#define CMRBE1 0x20000 /* Control Message Receive Buffer Entry 1 */
-#define CMRBE2 0x40000 /* Control Message Receive Buffer Entry 2 */
-#define CMRBE3 0x80000 /* Control Message Receive Buffer Entry 3 */
-#define CMRBE4 0x100000 /* Control Message Receive Buffer Entry 4 */
-#define CMRBE5 0x200000 /* Control Message Receive Buffer Entry 5 */
-#define CMRBE6 0x400000 /* Control Message Receive Buffer Entry 6 */
-#define CMRBE7 0x800000 /* Control Message Receive Buffer Entry 7 */
-#define CMRBE8 0x1000000 /* Control Message Receive Buffer Entry 8 */
-#define CMRBE9 0x2000000 /* Control Message Receive Buffer Entry 9 */
-#define CMRBE10 0x4000000 /* Control Message Receive Buffer Entry 10 */
-#define CMRBE11 0x8000000 /* Control Message Receive Buffer Entry 11 */
-#define CMRBE12 0x10000000 /* Control Message Receive Buffer Entry 12 */
-#define CMRBE13 0x20000000 /* Control Message Receive Buffer Entry 13 */
-#define CMRBE14 0x40000000 /* Control Message Receive Buffer Entry 14 */
-#define CMRBE15 0x80000000 /* Control Message Receive Buffer Entry 15 */
-
-/* Bit masks for MXVR_CMRB_START_ADDR */
-
-#define MXVR_CMRB_START_ADDR_MASK 0x1fffffe /* Control Message Receive Buffer Start Address */
-
-/* Bit masks for MXVR_CMRB_CURR_ADDR */
-
-#define MXVR_CMRB_CURR_ADDR_MASK 0xffffffff /* Control Message Receive Buffer Current Address */
-
-/* Bit masks for MXVR_CMTB_START_ADDR */
-
-#define MXVR_CMTB_START_ADDR_MASK 0x1fffffe /* Control Message Transmit Buffer Start Address */
-
-/* Bit masks for MXVR_CMTB_CURR_ADDR */
-
-#define MXVR_CMTB_CURR_ADDR_MASK 0xffffffff /* Control Message Transmit Buffer Current Address */
-
-/* Bit masks for MXVR_RRDB_START_ADDR */
-
-#define MXVR_RRDB_START_ADDR_MASK 0x1fffffe /* Remote Read Buffer Start Address */
-
-/* Bit masks for MXVR_RRDB_CURR_ADDR */
-
-#define MXVR_RRDB_CURR_ADDR_MASK 0xffffffff /* Remote Read Buffer Current Address */
-
-/* Bit masks for MXVR_PAT_DATAx */
-
-#define MATCH_DATA_0 0xff /* Pattern Match Data Byte 0 */
-#define MATCH_DATA_1 0xff00 /* Pattern Match Data Byte 1 */
-#define MATCH_DATA_2 0xff0000 /* Pattern Match Data Byte 2 */
-#define MATCH_DATA_3 0xff000000 /* Pattern Match Data Byte 3 */
-
-/* Bit masks for MXVR_PAT_EN_0 */
-
-#define MATCH_EN_0_0 0x1 /* Pattern Match Enable Byte 0 Bit 0 */
-#define MATCH_EN_0_1 0x2 /* Pattern Match Enable Byte 0 Bit 1 */
-#define MATCH_EN_0_2 0x4 /* Pattern Match Enable Byte 0 Bit 2 */
-#define MATCH_EN_0_3 0x8 /* Pattern Match Enable Byte 0 Bit 3 */
-#define MATCH_EN_0_4 0x10 /* Pattern Match Enable Byte 0 Bit 4 */
-#define MATCH_EN_0_5 0x20 /* Pattern Match Enable Byte 0 Bit 5 */
-#define MATCH_EN_0_6 0x40 /* Pattern Match Enable Byte 0 Bit 6 */
-#define MATCH_EN_0_7 0x80 /* Pattern Match Enable Byte 0 Bit 7 */
-#define MATCH_EN_1_0 0x100 /* Pattern Match Enable Byte 1 Bit 0 */
-#define MATCH_EN_1_1 0x200 /* Pattern Match Enable Byte 1 Bit 1 */
-#define MATCH_EN_1_2 0x400 /* Pattern Match Enable Byte 1 Bit 2 */
-#define MATCH_EN_1_3 0x800 /* Pattern Match Enable Byte 1 Bit 3 */
-#define MATCH_EN_1_4 0x1000 /* Pattern Match Enable Byte 1 Bit 4 */
-#define MATCH_EN_1_5 0x2000 /* Pattern Match Enable Byte 1 Bit 5 */
-#define MATCH_EN_1_6 0x4000 /* Pattern Match Enable Byte 1 Bit 6 */
-#define MATCH_EN_1_7 0x8000 /* Pattern Match Enable Byte 1 Bit 7 */
-#define MATCH_EN_2_0 0x10000 /* Pattern Match Enable Byte 2 Bit 0 */
-#define MATCH_EN_2_1 0x20000 /* Pattern Match Enable Byte 2 Bit 1 */
-#define MATCH_EN_2_2 0x40000 /* Pattern Match Enable Byte 2 Bit 2 */
-#define MATCH_EN_2_3 0x80000 /* Pattern Match Enable Byte 2 Bit 3 */
-#define MATCH_EN_2_4 0x100000 /* Pattern Match Enable Byte 2 Bit 4 */
-#define MATCH_EN_2_5 0x200000 /* Pattern Match Enable Byte 2 Bit 5 */
-#define MATCH_EN_2_6 0x400000 /* Pattern Match Enable Byte 2 Bit 6 */
-#define MATCH_EN_2_7 0x800000 /* Pattern Match Enable Byte 2 Bit 7 */
-#define MATCH_EN_3_0 0x1000000 /* Pattern Match Enable Byte 3 Bit 0 */
-#define MATCH_EN_3_1 0x2000000 /* Pattern Match Enable Byte 3 Bit 1 */
-#define MATCH_EN_3_2 0x4000000 /* Pattern Match Enable Byte 3 Bit 2 */
-#define MATCH_EN_3_3 0x8000000 /* Pattern Match Enable Byte 3 Bit 3 */
-#define MATCH_EN_3_4 0x10000000 /* Pattern Match Enable Byte 3 Bit 4 */
-#define MATCH_EN_3_5 0x20000000 /* Pattern Match Enable Byte 3 Bit 5 */
-#define MATCH_EN_3_6 0x40000000 /* Pattern Match Enable Byte 3 Bit 6 */
-#define MATCH_EN_3_7 0x80000000 /* Pattern Match Enable Byte 3 Bit 7 */
-
-/* Bit masks for MXVR_PAT_EN_1 */
-
-#define MATCH_EN_0_0 0x1 /* Pattern Match Enable Byte 0 Bit 0 */
-#define MATCH_EN_0_1 0x2 /* Pattern Match Enable Byte 0 Bit 1 */
-#define MATCH_EN_0_2 0x4 /* Pattern Match Enable Byte 0 Bit 2 */
-#define MATCH_EN_0_3 0x8 /* Pattern Match Enable Byte 0 Bit 3 */
-#define MATCH_EN_0_4 0x10 /* Pattern Match Enable Byte 0 Bit 4 */
-#define MATCH_EN_0_5 0x20 /* Pattern Match Enable Byte 0 Bit 5 */
-#define MATCH_EN_0_6 0x40 /* Pattern Match Enable Byte 0 Bit 6 */
-#define MATCH_EN_0_7 0x80 /* Pattern Match Enable Byte 0 Bit 7 */
-#define MATCH_EN_1_0 0x100 /* Pattern Match Enable Byte 1 Bit 0 */
-#define MATCH_EN_1_1 0x200 /* Pattern Match Enable Byte 1 Bit 1 */
-#define MATCH_EN_1_2 0x400 /* Pattern Match Enable Byte 1 Bit 2 */
-#define MATCH_EN_1_3 0x800 /* Pattern Match Enable Byte 1 Bit 3 */
-#define MATCH_EN_1_4 0x1000 /* Pattern Match Enable Byte 1 Bit 4 */
-#define MATCH_EN_1_5 0x2000 /* Pattern Match Enable Byte 1 Bit 5 */
-#define MATCH_EN_1_6 0x4000 /* Pattern Match Enable Byte 1 Bit 6 */
-#define MATCH_EN_1_7 0x8000 /* Pattern Match Enable Byte 1 Bit 7 */
-#define MATCH_EN_2_0 0x10000 /* Pattern Match Enable Byte 2 Bit 0 */
-#define MATCH_EN_2_1 0x20000 /* Pattern Match Enable Byte 2 Bit 1 */
-#define MATCH_EN_2_2 0x40000 /* Pattern Match Enable Byte 2 Bit 2 */
-#define MATCH_EN_2_3 0x80000 /* Pattern Match Enable Byte 2 Bit 3 */
-#define MATCH_EN_2_4 0x100000 /* Pattern Match Enable Byte 2 Bit 4 */
-#define MATCH_EN_2_5 0x200000 /* Pattern Match Enable Byte 2 Bit 5 */
-#define MATCH_EN_2_6 0x400000 /* Pattern Match Enable Byte 2 Bit 6 */
-#define MATCH_EN_2_7 0x800000 /* Pattern Match Enable Byte 2 Bit 7 */
-#define MATCH_EN_3_0 0x1000000 /* Pattern Match Enable Byte 3 Bit 0 */
-#define MATCH_EN_3_1 0x2000000 /* Pattern Match Enable Byte 3 Bit 1 */
-#define MATCH_EN_3_2 0x4000000 /* Pattern Match Enable Byte 3 Bit 2 */
-#define MATCH_EN_3_3 0x8000000 /* Pattern Match Enable Byte 3 Bit 3 */
-#define MATCH_EN_3_4 0x10000000 /* Pattern Match Enable Byte 3 Bit 4 */
-#define MATCH_EN_3_5 0x20000000 /* Pattern Match Enable Byte 3 Bit 5 */
-#define MATCH_EN_3_6 0x40000000 /* Pattern Match Enable Byte 3 Bit 6 */
-#define MATCH_EN_3_7 0x80000000 /* Pattern Match Enable Byte 3 Bit 7 */
-
-/* Bit masks for MXVR_FRAME_CNT_0 */
-
-#define FCNT 0xffff /* Frame Count */
-
-/* Bit masks for MXVR_FRAME_CNT_1 */
-
-#define FCNT 0xffff /* Frame Count */
-
-/* Bit masks for MXVR_ROUTING_0 */
-
-#define TX_CH0 0x3f /* Transmit Channel 0 */
-#define MUTE_CH0 0x80 /* Mute Channel 0 */
-#define TX_CH1 0x3f00 /* Transmit Channel 0 */
-#define MUTE_CH1 0x8000 /* Mute Channel 0 */
-#define TX_CH2 0x3f0000 /* Transmit Channel 0 */
-#define MUTE_CH2 0x800000 /* Mute Channel 0 */
-#define TX_CH3 0x3f000000 /* Transmit Channel 0 */
-#define MUTE_CH3 0x80000000 /* Mute Channel 0 */
-
-/* Bit masks for MXVR_ROUTING_1 */
-
-#define TX_CH4 0x3f /* Transmit Channel 4 */
-#define MUTE_CH4 0x80 /* Mute Channel 4 */
-#define TX_CH5 0x3f00 /* Transmit Channel 5 */
-#define MUTE_CH5 0x8000 /* Mute Channel 5 */
-#define TX_CH6 0x3f0000 /* Transmit Channel 6 */
-#define MUTE_CH6 0x800000 /* Mute Channel 6 */
-#define TX_CH7 0x3f000000 /* Transmit Channel 7 */
-#define MUTE_CH7 0x80000000 /* Mute Channel 7 */
-
-/* Bit masks for MXVR_ROUTING_2 */
-
-#define TX_CH8 0x3f /* Transmit Channel 8 */
-#define MUTE_CH8 0x80 /* Mute Channel 8 */
-#define TX_CH9 0x3f00 /* Transmit Channel 9 */
-#define MUTE_CH9 0x8000 /* Mute Channel 9 */
-#define TX_CH10 0x3f0000 /* Transmit Channel 10 */
-#define MUTE_CH10 0x800000 /* Mute Channel 10 */
-#define TX_CH11 0x3f000000 /* Transmit Channel 11 */
-#define MUTE_CH11 0x80000000 /* Mute Channel 11 */
-
-/* Bit masks for MXVR_ROUTING_3 */
-
-#define TX_CH12 0x3f /* Transmit Channel 12 */
-#define MUTE_CH12 0x80 /* Mute Channel 12 */
-#define TX_CH13 0x3f00 /* Transmit Channel 13 */
-#define MUTE_CH13 0x8000 /* Mute Channel 13 */
-#define TX_CH14 0x3f0000 /* Transmit Channel 14 */
-#define MUTE_CH14 0x800000 /* Mute Channel 14 */
-#define TX_CH15 0x3f000000 /* Transmit Channel 15 */
-#define MUTE_CH15 0x80000000 /* Mute Channel 15 */
-
-/* Bit masks for MXVR_ROUTING_4 */
-
-#define TX_CH16 0x3f /* Transmit Channel 16 */
-#define MUTE_CH16 0x80 /* Mute Channel 16 */
-#define TX_CH17 0x3f00 /* Transmit Channel 17 */
-#define MUTE_CH17 0x8000 /* Mute Channel 17 */
-#define TX_CH18 0x3f0000 /* Transmit Channel 18 */
-#define MUTE_CH18 0x800000 /* Mute Channel 18 */
-#define TX_CH19 0x3f000000 /* Transmit Channel 19 */
-#define MUTE_CH19 0x80000000 /* Mute Channel 19 */
-
-/* Bit masks for MXVR_ROUTING_5 */
-
-#define TX_CH20 0x3f /* Transmit Channel 20 */
-#define MUTE_CH20 0x80 /* Mute Channel 20 */
-#define TX_CH21 0x3f00 /* Transmit Channel 21 */
-#define MUTE_CH21 0x8000 /* Mute Channel 21 */
-#define TX_CH22 0x3f0000 /* Transmit Channel 22 */
-#define MUTE_CH22 0x800000 /* Mute Channel 22 */
-#define TX_CH23 0x3f000000 /* Transmit Channel 23 */
-#define MUTE_CH23 0x80000000 /* Mute Channel 23 */
-
-/* Bit masks for MXVR_ROUTING_6 */
-
-#define TX_CH24 0x3f /* Transmit Channel 24 */
-#define MUTE_CH24 0x80 /* Mute Channel 24 */
-#define TX_CH25 0x3f00 /* Transmit Channel 25 */
-#define MUTE_CH25 0x8000 /* Mute Channel 25 */
-#define TX_CH26 0x3f0000 /* Transmit Channel 26 */
-#define MUTE_CH26 0x800000 /* Mute Channel 26 */
-#define TX_CH27 0x3f000000 /* Transmit Channel 27 */
-#define MUTE_CH27 0x80000000 /* Mute Channel 27 */
-
-/* Bit masks for MXVR_ROUTING_7 */
-
-#define TX_CH28 0x3f /* Transmit Channel 28 */
-#define MUTE_CH28 0x80 /* Mute Channel 28 */
-#define TX_CH29 0x3f00 /* Transmit Channel 29 */
-#define MUTE_CH29 0x8000 /* Mute Channel 29 */
-#define TX_CH30 0x3f0000 /* Transmit Channel 30 */
-#define MUTE_CH30 0x800000 /* Mute Channel 30 */
-#define TX_CH31 0x3f000000 /* Transmit Channel 31 */
-#define MUTE_CH31 0x80000000 /* Mute Channel 31 */
-
-/* Bit masks for MXVR_ROUTING_8 */
-
-#define TX_CH32 0x3f /* Transmit Channel 32 */
-#define MUTE_CH32 0x80 /* Mute Channel 32 */
-#define TX_CH33 0x3f00 /* Transmit Channel 33 */
-#define MUTE_CH33 0x8000 /* Mute Channel 33 */
-#define TX_CH34 0x3f0000 /* Transmit Channel 34 */
-#define MUTE_CH34 0x800000 /* Mute Channel 34 */
-#define TX_CH35 0x3f000000 /* Transmit Channel 35 */
-#define MUTE_CH35 0x80000000 /* Mute Channel 35 */
-
-/* Bit masks for MXVR_ROUTING_9 */
-
-#define TX_CH36 0x3f /* Transmit Channel 36 */
-#define MUTE_CH36 0x80 /* Mute Channel 36 */
-#define TX_CH37 0x3f00 /* Transmit Channel 37 */
-#define MUTE_CH37 0x8000 /* Mute Channel 37 */
-#define TX_CH38 0x3f0000 /* Transmit Channel 38 */
-#define MUTE_CH38 0x800000 /* Mute Channel 38 */
-#define TX_CH39 0x3f000000 /* Transmit Channel 39 */
-#define MUTE_CH39 0x80000000 /* Mute Channel 39 */
-
-/* Bit masks for MXVR_ROUTING_10 */
-
-#define TX_CH40 0x3f /* Transmit Channel 40 */
-#define MUTE_CH40 0x80 /* Mute Channel 40 */
-#define TX_CH41 0x3f00 /* Transmit Channel 41 */
-#define MUTE_CH41 0x8000 /* Mute Channel 41 */
-#define TX_CH42 0x3f0000 /* Transmit Channel 42 */
-#define MUTE_CH42 0x800000 /* Mute Channel 42 */
-#define TX_CH43 0x3f000000 /* Transmit Channel 43 */
-#define MUTE_CH43 0x80000000 /* Mute Channel 43 */
-
-/* Bit masks for MXVR_ROUTING_11 */
-
-#define TX_CH44 0x3f /* Transmit Channel 44 */
-#define MUTE_CH44 0x80 /* Mute Channel 44 */
-#define TX_CH45 0x3f00 /* Transmit Channel 45 */
-#define MUTE_CH45 0x8000 /* Mute Channel 45 */
-#define TX_CH46 0x3f0000 /* Transmit Channel 46 */
-#define MUTE_CH46 0x800000 /* Mute Channel 46 */
-#define TX_CH47 0x3f000000 /* Transmit Channel 47 */
-#define MUTE_CH47 0x80000000 /* Mute Channel 47 */
-
-/* Bit masks for MXVR_ROUTING_12 */
-
-#define TX_CH48 0x3f /* Transmit Channel 48 */
-#define MUTE_CH48 0x80 /* Mute Channel 48 */
-#define TX_CH49 0x3f00 /* Transmit Channel 49 */
-#define MUTE_CH49 0x8000 /* Mute Channel 49 */
-#define TX_CH50 0x3f0000 /* Transmit Channel 50 */
-#define MUTE_CH50 0x800000 /* Mute Channel 50 */
-#define TX_CH51 0x3f000000 /* Transmit Channel 51 */
-#define MUTE_CH51 0x80000000 /* Mute Channel 51 */
-
-/* Bit masks for MXVR_ROUTING_13 */
-
-#define TX_CH52 0x3f /* Transmit Channel 52 */
-#define MUTE_CH52 0x80 /* Mute Channel 52 */
-#define TX_CH53 0x3f00 /* Transmit Channel 53 */
-#define MUTE_CH53 0x8000 /* Mute Channel 53 */
-#define TX_CH54 0x3f0000 /* Transmit Channel 54 */
-#define MUTE_CH54 0x800000 /* Mute Channel 54 */
-#define TX_CH55 0x3f000000 /* Transmit Channel 55 */
-#define MUTE_CH55 0x80000000 /* Mute Channel 55 */
-
-/* Bit masks for MXVR_ROUTING_14 */
-
-#define TX_CH56 0x3f /* Transmit Channel 56 */
-#define MUTE_CH56 0x80 /* Mute Channel 56 */
-#define TX_CH57 0x3f00 /* Transmit Channel 57 */
-#define MUTE_CH57 0x8000 /* Mute Channel 57 */
-#define TX_CH58 0x3f0000 /* Transmit Channel 58 */
-#define MUTE_CH58 0x800000 /* Mute Channel 58 */
-#define TX_CH59 0x3f000000 /* Transmit Channel 59 */
-#define MUTE_CH59 0x80000000 /* Mute Channel 59 */
-
-/* Bit masks for MXVR_BLOCK_CNT */
-
-#define BCNT 0xffff /* Block Count */
-
-/* Bit masks for MXVR_CLK_CTL */
-
-#define MXTALCEN 0x1 /* MXVR Crystal Oscillator Clock Enable */
-#define MXTALFEN 0x2 /* MXVR Crystal Oscillator Feedback Enable */
-#define MXTALMUL 0x30 /* MXVR Crystal Multiplier */
-#define CLKX3SEL 0x80 /* Clock Generation Source Select */
-#define MMCLKEN 0x100 /* Master Clock Enable */
-#define MMCLKMUL 0x1e00 /* Master Clock Multiplication Factor */
-#define PLLSMPS 0xe000 /* MXVR PLL State Machine Prescaler */
-#define MBCLKEN 0x10000 /* Bit Clock Enable */
-#define MBCLKDIV 0x1e0000 /* Bit Clock Divide Factor */
-#define INVRX 0x800000 /* Invert Receive Data */
-#define MFSEN 0x1000000 /* Frame Sync Enable */
-#define MFSDIV 0x1e000000 /* Frame Sync Divide Factor */
-#define MFSSEL 0x60000000 /* Frame Sync Select */
-#define MFSSYNC 0x80000000 /* Frame Sync Synchronization Select */
-
-/* Bit masks for MXVR_CDRPLL_CTL */
-
-#define CDRSMEN 0x1 /* MXVR CDRPLL State Machine Enable */
-#define CDRRSTB 0x2 /* MXVR CDRPLL Reset */
-#define CDRSVCO 0x4 /* MXVR CDRPLL Start VCO */
-#define CDRMODE 0x8 /* MXVR CDRPLL CDR Mode Select */
-#define CDRSCNT 0x3f0 /* MXVR CDRPLL Start Counter */
-#define CDRLCNT 0xfc00 /* MXVR CDRPLL Lock Counter */
-#define CDRSHPSEL 0x3f0000 /* MXVR CDRPLL Shaper Select */
-#define CDRSHPEN 0x800000 /* MXVR CDRPLL Shaper Enable */
-#define CDRCPSEL 0xff000000 /* MXVR CDRPLL Charge Pump Current Select */
-
-/* Bit masks for MXVR_FMPLL_CTL */
-
-#define FMSMEN 0x1 /* MXVR FMPLL State Machine Enable */
-#define FMRSTB 0x2 /* MXVR FMPLL Reset */
-#define FMSVCO 0x4 /* MXVR FMPLL Start VCO */
-#define FMSCNT 0x3f0 /* MXVR FMPLL Start Counter */
-#define FMLCNT 0xfc00 /* MXVR FMPLL Lock Counter */
-#define FMCPSEL 0xff000000 /* MXVR FMPLL Charge Pump Current Select */
-
-/* Bit masks for MXVR_PIN_CTL */
-
-#define MTXONBOD 0x1 /* MTXONB Open Drain Select */
-#define MTXONBG 0x2 /* MTXONB Gates MTX Select */
-#define MFSOE 0x10 /* MFS Output Enable */
-#define MFSGPSEL 0x20 /* MFS General Purpose Output Select */
-#define MFSGPDAT 0x40 /* MFS General Purpose Output Data */
-
-/* Bit masks for MXVR_SCLK_CNT */
-
-#define SCNT 0xffff /* System Clock Count */
-
-/* Bit masks for KPAD_CTL */
-
-#define KPAD_EN 0x1 /* Keypad Enable */
-#define KPAD_IRQMODE 0x6 /* Key Press Interrupt Enable */
-#define KPAD_ROWEN 0x1c00 /* Row Enable Width */
-#define KPAD_COLEN 0xe000 /* Column Enable Width */
-
-/* Bit masks for KPAD_PRESCALE */
-
-#define KPAD_PRESCALE_VAL 0x3f /* Key Prescale Value */
-
-/* Bit masks for KPAD_MSEL */
-
-#define DBON_SCALE 0xff /* Debounce Scale Value */
-#define COLDRV_SCALE 0xff00 /* Column Driver Scale Value */
-
-/* Bit masks for KPAD_ROWCOL */
-
-#define KPAD_ROW 0xff /* Rows Pressed */
-#define KPAD_COL 0xff00 /* Columns Pressed */
-
-/* Bit masks for KPAD_STAT */
-
-#define KPAD_IRQ 0x1 /* Keypad Interrupt Status */
-#define KPAD_MROWCOL 0x6 /* Multiple Row/Column Keypress Status */
-#define KPAD_PRESSED 0x8 /* Key press current status */
-
-/* Bit masks for KPAD_SOFTEVAL */
-
-#define KPAD_SOFTEVAL_E 0x2 /* Software Programmable Force Evaluate */
-
-/* Bit masks for SDH_COMMAND */
-
-#define CMD_IDX 0x3f /* Command Index */
-#define CMD_RSP 0x40 /* Response */
-#define CMD_L_RSP 0x80 /* Long Response */
-#define CMD_INT_E 0x100 /* Command Interrupt */
-#define CMD_PEND_E 0x200 /* Command Pending */
-#define CMD_E 0x400 /* Command Enable */
-
-/* Bit masks for SDH_PWR_CTL */
-
-#define PWR_ON 0x3 /* Power On */
-#if 0
-#define TBD 0x3c /* TBD */
-#endif
-#define SD_CMD_OD 0x40 /* Open Drain Output */
-#define ROD_CTL 0x80 /* Rod Control */
-
-/* Bit masks for SDH_CLK_CTL */
-
-#define CLKDIV 0xff /* MC_CLK Divisor */
-#define CLK_E 0x100 /* MC_CLK Bus Clock Enable */
-#define PWR_SV_E 0x200 /* Power Save Enable */
-#define CLKDIV_BYPASS 0x400 /* Bypass Divisor */
-#define WIDE_BUS 0x800 /* Wide Bus Mode Enable */
-
-/* Bit masks for SDH_RESP_CMD */
-
-#define RESP_CMD 0x3f /* Response Command */
-
-/* Bit masks for SDH_DATA_CTL */
-
-#define DTX_E 0x1 /* Data Transfer Enable */
-#define DTX_DIR 0x2 /* Data Transfer Direction */
-#define DTX_MODE 0x4 /* Data Transfer Mode */
-#define DTX_DMA_E 0x8 /* Data Transfer DMA Enable */
-#define DTX_BLK_LGTH 0xf0 /* Data Transfer Block Length */
-
-/* Bit masks for SDH_STATUS */
-
-#define CMD_CRC_FAIL 0x1 /* CMD CRC Fail */
-#define DAT_CRC_FAIL 0x2 /* Data CRC Fail */
-#define CMD_TIME_OUT 0x4 /* CMD Time Out */
-#define DAT_TIME_OUT 0x8 /* Data Time Out */
-#define TX_UNDERRUN 0x10 /* Transmit Underrun */
-#define RX_OVERRUN 0x20 /* Receive Overrun */
-#define CMD_RESP_END 0x40 /* CMD Response End */
-#define CMD_SENT 0x80 /* CMD Sent */
-#define DAT_END 0x100 /* Data End */
-#define START_BIT_ERR 0x200 /* Start Bit Error */
-#define DAT_BLK_END 0x400 /* Data Block End */
-#define CMD_ACT 0x800 /* CMD Active */
-#define TX_ACT 0x1000 /* Transmit Active */
-#define RX_ACT 0x2000 /* Receive Active */
-#define TX_FIFO_STAT 0x4000 /* Transmit FIFO Status */
-#define RX_FIFO_STAT 0x8000 /* Receive FIFO Status */
-#define TX_FIFO_FULL 0x10000 /* Transmit FIFO Full */
-#define RX_FIFO_FULL 0x20000 /* Receive FIFO Full */
-#define TX_FIFO_ZERO 0x40000 /* Transmit FIFO Empty */
-#define RX_DAT_ZERO 0x80000 /* Receive FIFO Empty */
-#define TX_DAT_RDY 0x100000 /* Transmit Data Available */
-#define RX_FIFO_RDY 0x200000 /* Receive Data Available */
-
-/* Bit masks for SDH_STATUS_CLR */
-
-#define CMD_CRC_FAIL_STAT 0x1 /* CMD CRC Fail Status */
-#define DAT_CRC_FAIL_STAT 0x2 /* Data CRC Fail Status */
-#define CMD_TIMEOUT_STAT 0x4 /* CMD Time Out Status */
-#define DAT_TIMEOUT_STAT 0x8 /* Data Time Out status */
-#define TX_UNDERRUN_STAT 0x10 /* Transmit Underrun Status */
-#define RX_OVERRUN_STAT 0x20 /* Receive Overrun Status */
-#define CMD_RESP_END_STAT 0x40 /* CMD Response End Status */
-#define CMD_SENT_STAT 0x80 /* CMD Sent Status */
-#define DAT_END_STAT 0x100 /* Data End Status */
-#define START_BIT_ERR_STAT 0x200 /* Start Bit Error Status */
-#define DAT_BLK_END_STAT 0x400 /* Data Block End Status */
-
-/* Bit masks for SDH_MASK0 */
-
-#define CMD_CRC_FAIL_MASK 0x1 /* CMD CRC Fail Mask */
-#define DAT_CRC_FAIL_MASK 0x2 /* Data CRC Fail Mask */
-#define CMD_TIMEOUT_MASK 0x4 /* CMD Time Out Mask */
-#define DAT_TIMEOUT_MASK 0x8 /* Data Time Out Mask */
-#define TX_UNDERRUN_MASK 0x10 /* Transmit Underrun Mask */
-#define RX_OVERRUN_MASK 0x20 /* Receive Overrun Mask */
-#define CMD_RESP_END_MASK 0x40 /* CMD Response End Mask */
-#define CMD_SENT_MASK 0x80 /* CMD Sent Mask */
-#define DAT_END_MASK 0x100 /* Data End Mask */
-#define START_BIT_ERR_MASK 0x200 /* Start Bit Error Mask */
-#define DAT_BLK_END_MASK 0x400 /* Data Block End Mask */
-#define CMD_ACT_MASK 0x800 /* CMD Active Mask */
-#define TX_ACT_MASK 0x1000 /* Transmit Active Mask */
-#define RX_ACT_MASK 0x2000 /* Receive Active Mask */
-#define TX_FIFO_STAT_MASK 0x4000 /* Transmit FIFO Status Mask */
-#define RX_FIFO_STAT_MASK 0x8000 /* Receive FIFO Status Mask */
-#define TX_FIFO_FULL_MASK 0x10000 /* Transmit FIFO Full Mask */
-#define RX_FIFO_FULL_MASK 0x20000 /* Receive FIFO Full Mask */
-#define TX_FIFO_ZERO_MASK 0x40000 /* Transmit FIFO Empty Mask */
-#define RX_DAT_ZERO_MASK 0x80000 /* Receive FIFO Empty Mask */
-#define TX_DAT_RDY_MASK 0x100000 /* Transmit Data Available Mask */
-#define RX_FIFO_RDY_MASK 0x200000 /* Receive Data Available Mask */
-
-/* Bit masks for SDH_FIFO_CNT */
-
-#define FIFO_COUNT 0x7fff /* FIFO Count */
-
-/* Bit masks for SDH_E_STATUS */
-
-#define SDIO_INT_DET 0x2 /* SDIO Int Detected */
-#define SD_CARD_DET 0x10 /* SD Card Detect */
-
-/* Bit masks for SDH_E_MASK */
-
-#define SDIO_MSK 0x2 /* Mask SDIO Int Detected */
-#define SCD_MSK 0x40 /* Mask Card Detect */
-
-/* Bit masks for SDH_CFG */
-
-#define CLKS_EN 0x1 /* Clocks Enable */
-#define SD4E 0x4 /* SDIO 4-Bit Enable */
-#define MWE 0x8 /* Moving Window Enable */
-#define SD_RST 0x10 /* SDMMC Reset */
-#define PUP_SDDAT 0x20 /* Pull-up SD_DAT */
-#define PUP_SDDAT3 0x40 /* Pull-up SD_DAT3 */
-#define PD_SDDAT3 0x80 /* Pull-down SD_DAT3 */
-
-/* Bit masks for SDH_RD_WAIT_EN */
-
-#define RWR 0x1 /* Read Wait Request */
-
-/* Bit masks for ATAPI_CONTROL */
-
-#define PIO_START 0x1 /* Start PIO/Reg Op */
-#define MULTI_START 0x2 /* Start Multi-DMA Op */
-#define ULTRA_START 0x4 /* Start Ultra-DMA Op */
-#define XFER_DIR 0x8 /* Transfer Direction */
-#define IORDY_EN 0x10 /* IORDY Enable */
-#define FIFO_FLUSH 0x20 /* Flush FIFOs */
-#define SOFT_RST 0x40 /* Soft Reset */
-#define DEV_RST 0x80 /* Device Reset */
-#define TFRCNT_RST 0x100 /* Trans Count Reset */
-#define END_ON_TERM 0x200 /* End/Terminate Select */
-#define PIO_USE_DMA 0x400 /* PIO-DMA Enable */
-#define UDMAIN_FIFO_THRS 0xf000 /* Ultra DMA-IN FIFO Threshold */
-
-/* Bit masks for ATAPI_STATUS */
-
-#define PIO_XFER_ON 0x1 /* PIO transfer in progress */
-#define MULTI_XFER_ON 0x2 /* Multi-word DMA transfer in progress */
-#define ULTRA_XFER_ON 0x4 /* Ultra DMA transfer in progress */
-#define ULTRA_IN_FL 0xf0 /* Ultra DMA Input FIFO Level */
-
-/* Bit masks for ATAPI_DEV_ADDR */
-
-#define DEV_ADDR 0x1f /* Device Address */
-
-/* Bit masks for ATAPI_INT_MASK */
-
-#define ATAPI_DEV_INT_MASK 0x1 /* Device interrupt mask */
-#define PIO_DONE_MASK 0x2 /* PIO transfer done interrupt mask */
-#define MULTI_DONE_MASK 0x4 /* Multi-DMA transfer done interrupt mask */
-#define UDMAIN_DONE_MASK 0x8 /* Ultra-DMA in transfer done interrupt mask */
-#define UDMAOUT_DONE_MASK 0x10 /* Ultra-DMA out transfer done interrupt mask */
-#define HOST_TERM_XFER_MASK 0x20 /* Host terminate current transfer interrupt mask */
-#define MULTI_TERM_MASK 0x40 /* Device terminate Multi-DMA transfer interrupt mask */
-#define UDMAIN_TERM_MASK 0x80 /* Device terminate Ultra-DMA-in transfer interrupt mask */
-#define UDMAOUT_TERM_MASK 0x100 /* Device terminate Ultra-DMA-out transfer interrupt mask */
-
-/* Bit masks for ATAPI_INT_STATUS */
-
-#define ATAPI_DEV_INT 0x1 /* Device interrupt status */
-#define PIO_DONE_INT 0x2 /* PIO transfer done interrupt status */
-#define MULTI_DONE_INT 0x4 /* Multi-DMA transfer done interrupt status */
-#define UDMAIN_DONE_INT 0x8 /* Ultra-DMA in transfer done interrupt status */
-#define UDMAOUT_DONE_INT 0x10 /* Ultra-DMA out transfer done interrupt status */
-#define HOST_TERM_XFER_INT 0x20 /* Host terminate current transfer interrupt status */
-#define MULTI_TERM_INT 0x40 /* Device terminate Multi-DMA transfer interrupt status */
-#define UDMAIN_TERM_INT 0x80 /* Device terminate Ultra-DMA-in transfer interrupt status */
-#define UDMAOUT_TERM_INT 0x100 /* Device terminate Ultra-DMA-out transfer interrupt status */
-
-/* Bit masks for ATAPI_LINE_STATUS */
-
-#define ATAPI_INTR 0x1 /* Device interrupt to host line status */
-#define ATAPI_DASP 0x2 /* Device dasp to host line status */
-#define ATAPI_CS0N 0x4 /* ATAPI chip select 0 line status */
-#define ATAPI_CS1N 0x8 /* ATAPI chip select 1 line status */
-#define ATAPI_ADDR 0x70 /* ATAPI address line status */
-#define ATAPI_DMAREQ 0x80 /* ATAPI DMA request line status */
-#define ATAPI_DMAACKN 0x100 /* ATAPI DMA acknowledge line status */
-#define ATAPI_DIOWN 0x200 /* ATAPI write line status */
-#define ATAPI_DIORN 0x400 /* ATAPI read line status */
-#define ATAPI_IORDY 0x800 /* ATAPI IORDY line status */
-
-/* Bit masks for ATAPI_SM_STATE */
-
-#define PIO_CSTATE 0xf /* PIO mode state machine current state */
-#define DMA_CSTATE 0xf0 /* DMA mode state machine current state */
-#define UDMAIN_CSTATE 0xf00 /* Ultra DMA-In mode state machine current state */
-#define UDMAOUT_CSTATE 0xf000 /* ATAPI IORDY line status */
-
-/* Bit masks for ATAPI_TERMINATE */
-
-#define ATAPI_HOST_TERM 0x1 /* Host terminationation */
-
-/* Bit masks for ATAPI_REG_TIM_0 */
-
-#define T2_REG 0xff /* End of cycle time for register access transfers */
-#define TEOC_REG 0xff00 /* Selects DIOR/DIOW pulsewidth */
-
-/* Bit masks for ATAPI_PIO_TIM_0 */
-
-#define T1_REG 0xf /* Time from address valid to DIOR/DIOW */
-#define T2_REG_PIO 0xff0 /* DIOR/DIOW pulsewidth */
-#define T4_REG 0xf000 /* DIOW data hold */
-
-/* Bit masks for ATAPI_PIO_TIM_1 */
-
-#define TEOC_REG_PIO 0xff /* End of cycle time for PIO access transfers. */
-
-/* Bit masks for ATAPI_MULTI_TIM_0 */
-
-#define TD 0xff /* DIOR/DIOW asserted pulsewidth */
-#define TM 0xff00 /* Time from address valid to DIOR/DIOW */
-
-/* Bit masks for ATAPI_MULTI_TIM_1 */
-
-#define TKW 0xff /* Selects DIOW negated pulsewidth */
-#define TKR 0xff00 /* Selects DIOR negated pulsewidth */
-
-/* Bit masks for ATAPI_MULTI_TIM_2 */
-
-#define TH 0xff /* Selects DIOW data hold */
-#define TEOC 0xff00 /* Selects end of cycle for DMA */
-
-/* Bit masks for ATAPI_ULTRA_TIM_0 */
-
-#define TACK 0xff /* Selects setup and hold times for TACK */
-#define TENV 0xff00 /* Selects envelope time */
-
-/* Bit masks for ATAPI_ULTRA_TIM_1 */
-
-#define TDVS 0xff /* Selects data valid setup time */
-#define TCYC_TDVS 0xff00 /* Selects cycle time - TDVS time */
-
-/* Bit masks for ATAPI_ULTRA_TIM_2 */
-
-#define TSS 0xff /* Selects time from STROBE edge to negation of DMARQ or assertion of STOP */
-#define TMLI 0xff00 /* Selects interlock time */
-
-/* Bit masks for ATAPI_ULTRA_TIM_3 */
-
-#define TZAH 0xff /* Selects minimum delay required for output */
-#define READY_PAUSE 0xff00 /* Selects ready to pause */
-
-/* Bit masks for TIMER_ENABLE1 */
-
-#define TIMEN8 0x1 /* Timer 8 Enable */
-#define TIMEN9 0x2 /* Timer 9 Enable */
-#define TIMEN10 0x4 /* Timer 10 Enable */
-
-/* Bit masks for TIMER_DISABLE1 */
-
-#define TIMDIS8 0x1 /* Timer 8 Disable */
-#define TIMDIS9 0x2 /* Timer 9 Disable */
-#define TIMDIS10 0x4 /* Timer 10 Disable */
-
-/* Bit masks for TIMER_STATUS1 */
-
-#define TIMIL8 0x1 /* Timer 8 Interrupt */
-#define TIMIL9 0x2 /* Timer 9 Interrupt */
-#define TIMIL10 0x4 /* Timer 10 Interrupt */
-#define TOVF_ERR8 0x10 /* Timer 8 Counter Overflow */
-#define TOVF_ERR9 0x20 /* Timer 9 Counter Overflow */
-#define TOVF_ERR10 0x40 /* Timer 10 Counter Overflow */
-#define TRUN8 0x1000 /* Timer 8 Slave Enable Status */
-#define TRUN9 0x2000 /* Timer 9 Slave Enable Status */
-#define TRUN10 0x4000 /* Timer 10 Slave Enable Status */
-
-/* Bit masks for EPPI0 are obtained from common base header for EPPIx (EPPI1 and EPPI2) */
-
-/* Bit masks for USB_FADDR */
-
-#define FUNCTION_ADDRESS 0x7f /* Function address */
-
-/* Bit masks for USB_POWER */
-
-#define ENABLE_SUSPENDM 0x1 /* enable SuspendM output */
-#define SUSPEND_MODE 0x2 /* Suspend Mode indicator */
-#define RESUME_MODE 0x4 /* DMA Mode */
-#define RESET 0x8 /* Reset indicator */
-#define HS_MODE 0x10 /* High Speed mode indicator */
-#define HS_ENABLE 0x20 /* high Speed Enable */
-#define SOFT_CONN 0x40 /* Soft connect */
-#define ISO_UPDATE 0x80 /* Isochronous update */
-
-/* Bit masks for USB_INTRTX */
-
-#define EP0_TX 0x1 /* Tx Endpoint 0 interrupt */
-#define EP1_TX 0x2 /* Tx Endpoint 1 interrupt */
-#define EP2_TX 0x4 /* Tx Endpoint 2 interrupt */
-#define EP3_TX 0x8 /* Tx Endpoint 3 interrupt */
-#define EP4_TX 0x10 /* Tx Endpoint 4 interrupt */
-#define EP5_TX 0x20 /* Tx Endpoint 5 interrupt */
-#define EP6_TX 0x40 /* Tx Endpoint 6 interrupt */
-#define EP7_TX 0x80 /* Tx Endpoint 7 interrupt */
-
-/* Bit masks for USB_INTRRX */
-
-#define EP1_RX 0x2 /* Rx Endpoint 1 interrupt */
-#define EP2_RX 0x4 /* Rx Endpoint 2 interrupt */
-#define EP3_RX 0x8 /* Rx Endpoint 3 interrupt */
-#define EP4_RX 0x10 /* Rx Endpoint 4 interrupt */
-#define EP5_RX 0x20 /* Rx Endpoint 5 interrupt */
-#define EP6_RX 0x40 /* Rx Endpoint 6 interrupt */
-#define EP7_RX 0x80 /* Rx Endpoint 7 interrupt */
-
-/* Bit masks for USB_INTRTXE */
-
-#define EP0_TX_E 0x1 /* Endpoint 0 interrupt Enable */
-#define EP1_TX_E 0x2 /* Tx Endpoint 1 interrupt Enable */
-#define EP2_TX_E 0x4 /* Tx Endpoint 2 interrupt Enable */
-#define EP3_TX_E 0x8 /* Tx Endpoint 3 interrupt Enable */
-#define EP4_TX_E 0x10 /* Tx Endpoint 4 interrupt Enable */
-#define EP5_TX_E 0x20 /* Tx Endpoint 5 interrupt Enable */
-#define EP6_TX_E 0x40 /* Tx Endpoint 6 interrupt Enable */
-#define EP7_TX_E 0x80 /* Tx Endpoint 7 interrupt Enable */
-
-/* Bit masks for USB_INTRRXE */
-
-#define EP1_RX_E 0x2 /* Rx Endpoint 1 interrupt Enable */
-#define EP2_RX_E 0x4 /* Rx Endpoint 2 interrupt Enable */
-#define EP3_RX_E 0x8 /* Rx Endpoint 3 interrupt Enable */
-#define EP4_RX_E 0x10 /* Rx Endpoint 4 interrupt Enable */
-#define EP5_RX_E 0x20 /* Rx Endpoint 5 interrupt Enable */
-#define EP6_RX_E 0x40 /* Rx Endpoint 6 interrupt Enable */
-#define EP7_RX_E 0x80 /* Rx Endpoint 7 interrupt Enable */
-
-/* Bit masks for USB_INTRUSB */
-
-#define SUSPEND_B 0x1 /* Suspend indicator */
-#define RESUME_B 0x2 /* Resume indicator */
-#define RESET_OR_BABLE_B 0x4 /* Reset/babble indicator */
-#define SOF_B 0x8 /* Start of frame */
-#define CONN_B 0x10 /* Connection indicator */
-#define DISCON_B 0x20 /* Disconnect indicator */
-#define SESSION_REQ_B 0x40 /* Session Request */
-#define VBUS_ERROR_B 0x80 /* Vbus threshold indicator */
-
-/* Bit masks for USB_INTRUSBE */
-
-#define SUSPEND_BE 0x1 /* Suspend indicator int enable */
-#define RESUME_BE 0x2 /* Resume indicator int enable */
-#define RESET_OR_BABLE_BE 0x4 /* Reset/babble indicator int enable */
-#define SOF_BE 0x8 /* Start of frame int enable */
-#define CONN_BE 0x10 /* Connection indicator int enable */
-#define DISCON_BE 0x20 /* Disconnect indicator int enable */
-#define SESSION_REQ_BE 0x40 /* Session Request int enable */
-#define VBUS_ERROR_BE 0x80 /* Vbus threshold indicator int enable */
-
-/* Bit masks for USB_FRAME */
-
-#define FRAME_NUMBER 0x7ff /* Frame number */
-
-/* Bit masks for USB_INDEX */
-
-#define SELECTED_ENDPOINT 0xf /* selected endpoint */
-
-/* Bit masks for USB_GLOBAL_CTL */
-
-#define GLOBAL_ENA 0x1 /* enables USB module */
-#define EP1_TX_ENA 0x2 /* Transmit endpoint 1 enable */
-#define EP2_TX_ENA 0x4 /* Transmit endpoint 2 enable */
-#define EP3_TX_ENA 0x8 /* Transmit endpoint 3 enable */
-#define EP4_TX_ENA 0x10 /* Transmit endpoint 4 enable */
-#define EP5_TX_ENA 0x20 /* Transmit endpoint 5 enable */
-#define EP6_TX_ENA 0x40 /* Transmit endpoint 6 enable */
-#define EP7_TX_ENA 0x80 /* Transmit endpoint 7 enable */
-#define EP1_RX_ENA 0x100 /* Receive endpoint 1 enable */
-#define EP2_RX_ENA 0x200 /* Receive endpoint 2 enable */
-#define EP3_RX_ENA 0x400 /* Receive endpoint 3 enable */
-#define EP4_RX_ENA 0x800 /* Receive endpoint 4 enable */
-#define EP5_RX_ENA 0x1000 /* Receive endpoint 5 enable */
-#define EP6_RX_ENA 0x2000 /* Receive endpoint 6 enable */
-#define EP7_RX_ENA 0x4000 /* Receive endpoint 7 enable */
-
-/* Bit masks for USB_OTG_DEV_CTL */
-
-#define SESSION 0x1 /* session indicator */
-#define HOST_REQ 0x2 /* Host negotiation request */
-#define HOST_MODE 0x4 /* indicates USBDRC is a host */
-#define VBUS0 0x8 /* Vbus level indicator[0] */
-#define VBUS1 0x10 /* Vbus level indicator[1] */
-#define LSDEV 0x20 /* Low-speed indicator */
-#define FSDEV 0x40 /* Full or High-speed indicator */
-#define B_DEVICE 0x80 /* A' or 'B' device indicator */
-
-/* Bit masks for USB_OTG_VBUS_IRQ */
-
-#define DRIVE_VBUS_ON 0x1 /* indicator to drive VBUS control circuit */
-#define DRIVE_VBUS_OFF 0x2 /* indicator to shut off charge pump */
-#define CHRG_VBUS_START 0x4 /* indicator for external circuit to start charging VBUS */
-#define CHRG_VBUS_END 0x8 /* indicator for external circuit to end charging VBUS */
-#define DISCHRG_VBUS_START 0x10 /* indicator to start discharging VBUS */
-#define DISCHRG_VBUS_END 0x20 /* indicator to stop discharging VBUS */
-
-/* Bit masks for USB_OTG_VBUS_MASK */
-
-#define DRIVE_VBUS_ON_ENA 0x1 /* enable DRIVE_VBUS_ON interrupt */
-#define DRIVE_VBUS_OFF_ENA 0x2 /* enable DRIVE_VBUS_OFF interrupt */
-#define CHRG_VBUS_START_ENA 0x4 /* enable CHRG_VBUS_START interrupt */
-#define CHRG_VBUS_END_ENA 0x8 /* enable CHRG_VBUS_END interrupt */
-#define DISCHRG_VBUS_START_ENA 0x10 /* enable DISCHRG_VBUS_START interrupt */
-#define DISCHRG_VBUS_END_ENA 0x20 /* enable DISCHRG_VBUS_END interrupt */
-
-/* Bit masks for USB_CSR0 */
-
-#define RXPKTRDY 0x1 /* data packet receive indicator */
-#define TXPKTRDY 0x2 /* data packet in FIFO indicator */
-#define STALL_SENT 0x4 /* STALL handshake sent */
-#define DATAEND 0x8 /* Data end indicator */
-#define SETUPEND 0x10 /* Setup end */
-#define SENDSTALL 0x20 /* Send STALL handshake */
-#define SERVICED_RXPKTRDY 0x40 /* used to clear the RxPktRdy bit */
-#define SERVICED_SETUPEND 0x80 /* used to clear the SetupEnd bit */
-#define FLUSHFIFO 0x100 /* flush endpoint FIFO */
-#define STALL_RECEIVED_H 0x4 /* STALL handshake received host mode */
-#define SETUPPKT_H 0x8 /* send Setup token host mode */
-#define ERROR_H 0x10 /* timeout error indicator host mode */
-#define REQPKT_H 0x20 /* Request an IN transaction host mode */
-#define STATUSPKT_H 0x40 /* Status stage transaction host mode */
-#define NAK_TIMEOUT_H 0x80 /* EP0 halted after a NAK host mode */
-
-/* Bit masks for USB_COUNT0 */
-
-#define EP0_RX_COUNT 0x7f /* number of received bytes in EP0 FIFO */
-
-/* Bit masks for USB_NAKLIMIT0 */
-
-#define EP0_NAK_LIMIT 0x1f /* number of frames/micro frames after which EP0 timeouts */
-
-/* Bit masks for USB_TX_MAX_PACKET */
-
-#define MAX_PACKET_SIZE_T 0x7ff /* maximum data pay load in a frame */
-
-/* Bit masks for USB_RX_MAX_PACKET */
-
-#define MAX_PACKET_SIZE_R 0x7ff /* maximum data pay load in a frame */
-
-/* Bit masks for USB_TXCSR */
-
-#define TXPKTRDY_T 0x1 /* data packet in FIFO indicator */
-#define FIFO_NOT_EMPTY_T 0x2 /* FIFO not empty */
-#define UNDERRUN_T 0x4 /* TxPktRdy not set for an IN token */
-#define FLUSHFIFO_T 0x8 /* flush endpoint FIFO */
-#define STALL_SEND_T 0x10 /* issue a Stall handshake */
-#define STALL_SENT_T 0x20 /* Stall handshake transmitted */
-#define CLEAR_DATATOGGLE_T 0x40 /* clear endpoint data toggle */
-#define INCOMPTX_T 0x80 /* indicates that a large packet is split */
-#define DMAREQMODE_T 0x400 /* DMA mode (0 or 1) selection */
-#define FORCE_DATATOGGLE_T 0x800 /* Force data toggle */
-#define DMAREQ_ENA_T 0x1000 /* Enable DMA request for Tx EP */
-#define ISO_T 0x4000 /* enable Isochronous transfers */
-#define AUTOSET_T 0x8000 /* allows TxPktRdy to be set automatically */
-#define ERROR_TH 0x4 /* error condition host mode */
-#define STALL_RECEIVED_TH 0x20 /* Stall handshake received host mode */
-#define NAK_TIMEOUT_TH 0x80 /* NAK timeout host mode */
-
-/* Bit masks for USB_TXCOUNT */
-
-#define TX_COUNT 0x1fff /* Number of bytes to be written to the selected endpoint Tx FIFO */
-
-/* Bit masks for USB_RXCSR */
-
-#define RXPKTRDY_R 0x1 /* data packet in FIFO indicator */
-#define FIFO_FULL_R 0x2 /* FIFO not empty */
-#define OVERRUN_R 0x4 /* TxPktRdy not set for an IN token */
-#define DATAERROR_R 0x8 /* Out packet cannot be loaded into Rx FIFO */
-#define FLUSHFIFO_R 0x10 /* flush endpoint FIFO */
-#define STALL_SEND_R 0x20 /* issue a Stall handshake */
-#define STALL_SENT_R 0x40 /* Stall handshake transmitted */
-#define CLEAR_DATATOGGLE_R 0x80 /* clear endpoint data toggle */
-#define INCOMPRX_R 0x100 /* indicates that a large packet is split */
-#define DMAREQMODE_R 0x800 /* DMA mode (0 or 1) selection */
-#define DISNYET_R 0x1000 /* disable Nyet handshakes */
-#define DMAREQ_ENA_R 0x2000 /* Enable DMA request for Tx EP */
-#define ISO_R 0x4000 /* enable Isochronous transfers */
-#define AUTOCLEAR_R 0x8000 /* allows TxPktRdy to be set automatically */
-#define ERROR_RH 0x4 /* TxPktRdy not set for an IN token host mode */
-#define REQPKT_RH 0x20 /* request an IN transaction host mode */
-#define STALL_RECEIVED_RH 0x40 /* Stall handshake received host mode */
-#define INCOMPRX_RH 0x100 /* indicates that a large packet is split host mode */
-#define DMAREQMODE_RH 0x800 /* DMA mode (0 or 1) selection host mode */
-#define AUTOREQ_RH 0x4000 /* sets ReqPkt automatically host mode */
-
-/* Bit masks for USB_RXCOUNT */
-
-#define RX_COUNT 0x1fff /* Number of received bytes in the packet in the Rx FIFO */
-
-/* Bit masks for USB_TXTYPE */
-
-#define TARGET_EP_NO_T 0xf /* EP number */
-#define PROTOCOL_T 0xc /* transfer type */
-
-/* Bit masks for USB_TXINTERVAL */
-
-#define TX_POLL_INTERVAL 0xff /* polling interval for selected Tx EP */
-
-/* Bit masks for USB_RXTYPE */
-
-#define TARGET_EP_NO_R 0xf /* EP number */
-#define PROTOCOL_R 0xc /* transfer type */
-
-/* Bit masks for USB_RXINTERVAL */
-
-#define RX_POLL_INTERVAL 0xff /* polling interval for selected Rx EP */
-
-/* Bit masks for USB_DMA_INTERRUPT */
-
-#define DMA0_INT 0x1 /* DMA0 pending interrupt */
-#define DMA1_INT 0x2 /* DMA1 pending interrupt */
-#define DMA2_INT 0x4 /* DMA2 pending interrupt */
-#define DMA3_INT 0x8 /* DMA3 pending interrupt */
-#define DMA4_INT 0x10 /* DMA4 pending interrupt */
-#define DMA5_INT 0x20 /* DMA5 pending interrupt */
-#define DMA6_INT 0x40 /* DMA6 pending interrupt */
-#define DMA7_INT 0x80 /* DMA7 pending interrupt */
-
-/* Bit masks for USB_DMAxCONTROL */
-
-#define DMA_ENA 0x1 /* DMA enable */
-#define DIRECTION 0x2 /* direction of DMA transfer */
-#define MODE 0x4 /* DMA Bus error */
-#define INT_ENA 0x8 /* Interrupt enable */
-#define EPNUM 0xf0 /* EP number */
-#define BUSERROR 0x100 /* DMA Bus error */
-
-/* Bit masks for USB_DMAxADDRHIGH */
-
-#define DMA_ADDR_HIGH 0xffff /* Upper 16-bits of memory source/destination address for the DMA master channel */
-
-/* Bit masks for USB_DMAxADDRLOW */
-
-#define DMA_ADDR_LOW 0xffff /* Lower 16-bits of memory source/destination address for the DMA master channel */
-
-/* Bit masks for USB_DMAxCOUNTHIGH */
-
-#define DMA_COUNT_HIGH 0xffff /* Upper 16-bits of byte count of DMA transfer for DMA master channel */
-
-/* Bit masks for USB_DMAxCOUNTLOW */
-
-#define DMA_COUNT_LOW 0xffff /* Lower 16-bits of byte count of DMA transfer for DMA master channel */
-
-/* Bit masks for HMDMAx_CONTROL */
-
-#define HMDMAEN 0x1 /* Handshake MDMA Enable */
-#define REP 0x2 /* Handshake MDMA Request Polarity */
-#define UTE 0x8 /* Urgency Threshold Enable */
-#define OIE 0x10 /* Overflow Interrupt Enable */
-#define BDIE 0x20 /* Block Done Interrupt Enable */
-#define MBDI 0x40 /* Mask Block Done Interrupt */
-#define DRQ 0x300 /* Handshake MDMA Request Type */
-#define RBC 0x1000 /* Force Reload of BCOUNT */
-#define PS 0x2000 /* Pin Status */
-#define OI 0x4000 /* Overflow Interrupt Generated */
-#define BDI 0x8000 /* Block Done Interrupt Generated */
-
-/* ******************************************* */
-/* MULTI BIT MACRO ENUMERATIONS */
-/* ******************************************* */
-
-/* ************************ */
-/* MXVR Address Offsets */
-/* ************************ */
-
-/* Control Message Receive Buffer (CMRB) Address Offsets */
-
-#define CMRB_STRIDE 0x00000016lu
-
-#define CMRB_DST_OFFSET 0x00000000lu
-#define CMRB_SRC_OFFSET 0x00000002lu
-#define CMRB_DATA_OFFSET 0x00000005lu
-
-/* Control Message Transmit Buffer (CMTB) Address Offsets */
-
-#define CMTB_PRIO_OFFSET 0x00000000lu
-#define CMTB_DST_OFFSET 0x00000002lu
-#define CMTB_SRC_OFFSET 0x00000004lu
-#define CMTB_TYPE_OFFSET 0x00000006lu
-#define CMTB_DATA_OFFSET 0x00000007lu
-
-#define CMTB_ANSWER_OFFSET 0x0000000Alu
-
-#define CMTB_STAT_N_OFFSET 0x00000018lu
-#define CMTB_STAT_A_OFFSET 0x00000016lu
-#define CMTB_STAT_D_OFFSET 0x0000000Elu
-#define CMTB_STAT_R_OFFSET 0x00000014lu
-#define CMTB_STAT_W_OFFSET 0x00000014lu
-#define CMTB_STAT_G_OFFSET 0x00000014lu
-
-/* Asynchronous Packet Receive Buffer (APRB) Address Offsets */
-
-#define APRB_STRIDE 0x00000400lu
-
-#define APRB_DST_OFFSET 0x00000000lu
-#define APRB_LEN_OFFSET 0x00000002lu
-#define APRB_SRC_OFFSET 0x00000004lu
-#define APRB_DATA_OFFSET 0x00000006lu
-
-/* Asynchronous Packet Transmit Buffer (APTB) Address Offsets */
-
-#define APTB_PRIO_OFFSET 0x00000000lu
-#define APTB_DST_OFFSET 0x00000002lu
-#define APTB_LEN_OFFSET 0x00000004lu
-#define APTB_SRC_OFFSET 0x00000006lu
-#define APTB_DATA_OFFSET 0x00000008lu
-
-/* Remote Read Buffer (RRDB) Address Offsets */
-
-#define RRDB_WADDR_OFFSET 0x00000100lu
-#define RRDB_WLEN_OFFSET 0x00000101lu
-
-/* **************** */
-/* MXVR Macros */
-/* **************** */
-
-/* MXVR_CONFIG Macros */
-
-#define SET_MSB(x) ( ( (x) & 0xF ) << 9)
-
-/* MXVR_INT_STAT_1 Macros */
-
-#define DONEX(x) (0x00000002 << (4 * (x)))
-#define HDONEX(x) (0x00000001 << (4 * (x)))
-
-/* MXVR_INT_EN_1 Macros */
-
-#define DONEENX(x) (0x00000002 << (4 * (x)))
-#define HDONEENX(x) (0x00000001 << (4 * (x)))
-
-/* MXVR_CDRPLL_CTL Macros */
-
-#define SET_CDRSHPSEL(x) ( ( (x) & 0x3F ) << 16)
-
-/* MXVR_FMPLL_CTL Macros */
-
-#define SET_CDRCPSEL(x) ( ( (x) & 0xFF ) << 24)
-#define SET_FMCPSEL(x) ( ( (x) & 0xFF ) << 24)
-
#endif /* _DEF_BF549_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 8590c8c7833..ab04d137fd8 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -1609,44 +1609,6 @@
#define PINT2 0x40000000 /* Pin Interrupt 2 */
#define PINT3 0x80000000 /* Pin Interrupt 3 */
-/* Bit masks for DMAx_CONFIG, MDMA_Sx_CONFIG, MDMA_Dx_CONFIG */
-
-#define DMAEN 0x1 /* DMA Channel Enable */
-#define WNR 0x2 /* DMA Direction */
-#define WDSIZE_8 0x0 /* Transfer Word Size = 8 */
-#define WDSIZE_16 0x4 /* Transfer Word Size = 16 */
-#define WDSIZE_32 0x8 /* Transfer Word Size = 32 */
-#define DMA2D 0x10 /* DMA Mode */
-#define RESTART 0x20 /* Work Unit Transitions */
-#define DI_SEL 0x40 /* Data Interrupt Timing Select */
-#define DI_EN 0x80 /* Data Interrupt Enable */
-
-#define NDSIZE 0xf00 /* Flex Descriptor Size */
-#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
-#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
-#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
-#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
-#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
-#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
-#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
-#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
-#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
-
-#define DMAFLOW 0xf000 /* Next Operation */
-#define DMAFLOW_STOP 0x0000 /* Stop Mode */
-#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
-#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
-#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
-
-/* Bit masks for DMAx_IRQ_STATUS, MDMA_Sx_IRQ_STATUS, MDMA_Dx_IRQ_STATUS */
-
-#define DMA_DONE 0x1 /* DMA Completion Interrupt Status */
-#define DMA_ERR 0x2 /* DMA Error Interrupt Status */
-#define DFETCH 0x4 /* DMA Descriptor Fetch */
-#define DMA_RUN 0x8 /* DMA Channel Running */
-
/* Bit masks for DMAx_PERIPHERAL_MAP, MDMA_Sx_IRQ_STATUS, MDMA_Dx_IRQ_STATUS */
#define CTYPE 0x40 /* DMA Channel Type */
@@ -1815,10 +1777,6 @@
#define DEB3_MERROR 0x40 /* DEB3 Error (2nd) */
#define CORE_MERROR 0x80 /* Core Error (2nd) */
-/* Bit masks for EBIU_ERRADD */
-
-#define ERROR_ADDRESS 0xffffffff /* Error Address */
-
/* Bit masks for EBIU_RSTCTL */
#define DDRSRESET 0x1 /* DDR soft reset */
@@ -1827,98 +1785,6 @@
#define SRACK 0x10 /* Self-refresh acknowledge */
#define MDDRENABLE 0x20 /* Mobile DDR enable */
-/* Bit masks for EBIU_DDRBRC0 */
-
-#define BRC0 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBRC1 */
-
-#define BRC1 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBRC2 */
-
-#define BRC2 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBRC3 */
-
-#define BRC3 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBRC4 */
-
-#define BRC4 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBRC5 */
-
-#define BRC5 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBRC6 */
-
-#define BRC6 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBRC7 */
-
-#define BRC7 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBWC0 */
-
-#define BWC0 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBWC1 */
-
-#define BWC1 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBWC2 */
-
-#define BWC2 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBWC3 */
-
-#define BWC3 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBWC4 */
-
-#define BWC4 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBWC5 */
-
-#define BWC5 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBWC6 */
-
-#define BWC6 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRBWC7 */
-
-#define BWC7 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRACCT */
-
-#define ACCT 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRTACT */
-
-#define TECT 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRARCT */
-
-#define ARCT 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRGC0 */
-
-#define GC0 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRGC1 */
-
-#define GC1 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRGC2 */
-
-#define GC2 0xffffffff /* Count */
-
-/* Bit masks for EBIU_DDRGC3 */
-
-#define GC3 0xffffffff /* Count */
-
/* Bit masks for EBIU_DDRMCEN */
#define B0WCENABLE 0x1 /* Bank 0 write count enable */
@@ -2092,12 +1958,6 @@
#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
-/* Bit masks for WDOG_CTL */
-
-#define WDEV 0x6 /* Watchdog Event */
-#define WDEN 0xff0 /* Watchdog Enable */
-#define WDRO 0x8000 /* Watchdog Rolled Over */
-
/* Bit masks for CNT_CONFIG */
#define CNTE 0x1 /* Counter Enable */
@@ -2149,81 +2009,6 @@
#define DPRESCALE 0xf /* Load Counter Register */
-/* Bit masks for RTC_STAT */
-
-#define SECONDS 0x3f /* Seconds */
-#define MINUTES 0xfc0 /* Minutes */
-#define HOURS 0x1f000 /* Hours */
-#define DAY_COUNTER 0xfffe0000 /* Day Counter */
-
-/* Bit masks for RTC_ICTL */
-
-#define STOPWATCH_INTERRUPT_ENABLE 0x1 /* Stopwatch Interrupt Enable */
-#define ALARM_INTERRUPT_ENABLE 0x2 /* Alarm Interrupt Enable */
-#define SECONDS_INTERRUPT_ENABLE 0x4 /* Seconds Interrupt Enable */
-#define MINUTES_INTERRUPT_ENABLE 0x8 /* Minutes Interrupt Enable */
-#define HOURS_INTERRUPT_ENABLE 0x10 /* Hours Interrupt Enable */
-#define TWENTY_FOUR_HOURS_INTERRUPT_ENABLE 0x20 /* 24 Hours Interrupt Enable */
-#define DAY_ALARM_INTERRUPT_ENABLE 0x40 /* Day Alarm Interrupt Enable */
-#define WRITE_COMPLETE_INTERRUPT_ENABLE 0x8000 /* Write Complete Interrupt Enable */
-
-/* Bit masks for RTC_ISTAT */
-
-#define STOPWATCH_EVENT_FLAG 0x1 /* Stopwatch Event Flag */
-#define ALARM_EVENT_FLAG 0x2 /* Alarm Event Flag */
-#define SECONDS_EVENT_FLAG 0x4 /* Seconds Event Flag */
-#define MINUTES_EVENT_FLAG 0x8 /* Minutes Event Flag */
-#define HOURS_EVENT_FLAG 0x10 /* Hours Event Flag */
-#define TWENTY_FOUR_HOURS_EVENT_FLAG 0x20 /* 24 Hours Event Flag */
-#define DAY_ALARM_EVENT_FLAG 0x40 /* Day Alarm Event Flag */
-#define WRITE_PENDING__STATUS 0x4000 /* Write Pending Status */
-#define WRITE_COMPLETE 0x8000 /* Write Complete */
-
-/* Bit masks for RTC_SWCNT */
-
-#define STOPWATCH_COUNT 0xffff /* Stopwatch Count */
-
-/* Bit masks for RTC_ALARM */
-
-#define SECONDS 0x3f /* Seconds */
-#define MINUTES 0xfc0 /* Minutes */
-#define HOURS 0x1f000 /* Hours */
-#define DAY 0xfffe0000 /* Day */
-
-/* Bit masks for RTC_PREN */
-
-#define PREN 0x1 /* Prescaler Enable */
-
-/* Bit masks for OTP_CONTROL */
-
-#define FUSE_FADDR 0x1ff /* OTP/Fuse Address */
-#define FIEN 0x800 /* OTP/Fuse Interrupt Enable */
-#define FTESTDEC 0x1000 /* OTP/Fuse Test Decoder */
-#define FWRTEST 0x2000 /* OTP/Fuse Write Test */
-#define FRDEN 0x4000 /* OTP/Fuse Read Enable */
-#define FWREN 0x8000 /* OTP/Fuse Write Enable */
-
-/* Bit masks for OTP_BEN */
-
-#define FBEN 0xffff /* OTP/Fuse Byte Enable */
-
-/* Bit masks for OTP_STATUS */
-
-#define FCOMP 0x1 /* OTP/Fuse Access Complete */
-#define FERROR 0x2 /* OTP/Fuse Access Error */
-#define MMRGLOAD 0x10 /* Memory Mapped Register Gasket Load */
-#define MMRGLOCK 0x20 /* Memory Mapped Register Gasket Lock */
-#define FPGMEN 0x40 /* OTP/Fuse Program Enable */
-
-/* Bit masks for OTP_TIMING */
-
-#define USECDIV 0xff /* Micro Second Divider */
-#define READACC 0x7f00 /* Read Access Time */
-#define CPUMPRL 0x38000 /* Charge Pump Release Time */
-#define CPUMPSU 0xc0000 /* Charge Pump Setup Time */
-#define CPUMPHD 0xf00000 /* Charge Pump Hold Time */
-#define PGMTIME 0xff000000 /* Program Time */
-
/* Bit masks for SECURE_SYSSWT */
#define EMUDABL 0x1 /* Emulation Disable. */
@@ -2252,26 +2037,6 @@
#define AFEXIT 0x10 /* Authentication Firmware Exit */
#define SECSTAT 0xe0 /* Secure Status */
-/* Bit masks for PLL_DIV */
-
-#define CSEL 0x30 /* Core Select */
-#define SSEL 0xf /* System Select */
-#define CSEL_DIV1 0x0000 /* CCLK = VCO / 1 */
-#define CSEL_DIV2 0x0010 /* CCLK = VCO / 2 */
-#define CSEL_DIV4 0x0020 /* CCLK = VCO / 4 */
-#define CSEL_DIV8 0x0030 /* CCLK = VCO / 8 */
-
-/* Bit masks for PLL_CTL */
-
-#define MSEL 0x7e00 /* Multiplier Select */
-#define BYPASS 0x100 /* PLL Bypass Enable */
-#define OUTPUT_DELAY 0x80 /* External Memory Output Delay Enable */
-#define INPUT_DELAY 0x40 /* External Memory Input Delay Enable */
-#define PDWN 0x20 /* Power Down */
-#define STOPCK 0x8 /* Stop Clock */
-#define PLL_OFF 0x2 /* Disable PLL */
-#define DF 0x1 /* Divide Frequency */
-
/* SWRST Masks */
#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
@@ -2279,52 +2044,6 @@
#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
-/* Bit masks for PLL_STAT */
-
-#define PLL_LOCKED 0x20 /* PLL Locked Status */
-#define ACTIVE_PLLDISABLED 0x4 /* Active Mode With PLL Disabled */
-#define FULL_ON 0x2 /* Full-On Mode */
-#define ACTIVE_PLLENABLED 0x1 /* Active Mode With PLL Enabled */
-#define RTCWS 0x400 /* RTC/Reset Wake-Up Status */
-#define CANWS 0x800 /* CAN Wake-Up Status */
-#define USBWS 0x2000 /* USB Wake-Up Status */
-#define KPADWS 0x4000 /* Keypad Wake-Up Status */
-#define ROTWS 0x8000 /* Rotary Wake-Up Status */
-#define GPWS 0x1000 /* General-Purpose Wake-Up Status */
-
-/* Bit masks for VR_CTL */
-
-#define FREQ 0x3 /* Regulator Switching Frequency */
-#define GAIN 0xc /* Voltage Output Level Gain */
-#define VLEV 0xf0 /* Internal Voltage Level */
-#define SCKELOW 0x8000 /* Drive SCKE Low During Reset Enable */
-#define WAKE 0x100 /* RTC/Reset Wake-Up Enable */
-#define CANWE 0x200 /* CAN0/1 Wake-Up Enable */
-#define GPWE 0x400 /* General-Purpose Wake-Up Enable */
-#define USBWE 0x800 /* USB Wake-Up Enable */
-#define KPADWE 0x1000 /* Keypad Wake-Up Enable */
-#define ROTWE 0x2000 /* Rotary Wake-Up Enable */
-
-#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */
-#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */
-#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */
-
-#define GAIN_5 0x0000 /* GAIN = 5*/
-#define GAIN_10 0x0004 /* GAIN = 1*/
-#define GAIN_20 0x0008 /* GAIN = 2*/
-#define GAIN_50 0x000C /* GAIN = 5*/
-
-#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
-#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
-#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
-#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
-#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */
-#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */
-#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
-#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
-#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */
-#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */
-
/* Bit masks for NFC_CTL */
#define WR_DLY 0xf /* Write Strobe Delay */
@@ -2489,14 +2208,6 @@
#define UCCT 0x40 /* Universal Counter CAN Trigger */
#define UCE 0x80 /* Universal Counter Enable */
-/* Bit masks for CAN0_UCCNT */
-
-#define UCCNT 0xffff /* Universal Counter Count Value */
-
-/* Bit masks for CAN0_UCRC */
-
-#define UCVAL 0xffff /* Universal Counter Reload/Capture Value */
-
/* Bit masks for CAN0_CEC */
#define RXECNT 0xff /* Receive Error Counter */
diff --git a/arch/blackfin/mach-bf561/boards/Kconfig b/arch/blackfin/mach-bf561/boards/Kconfig
index e4bc6d7c5a6..1aa529b9f8b 100644
--- a/arch/blackfin/mach-bf561/boards/Kconfig
+++ b/arch/blackfin/mach-bf561/boards/Kconfig
@@ -19,4 +19,11 @@ config BFIN561_BLUETECHNIX_CM
help
CM-BF561 support for EVAL- and DEV-Board.
+config BFIN561_ACVILON
+ bool "BF561-ACVILON"
+ help
+ BF561-ACVILON System On Module support (SO-DIMM 144).
+ For more information about Acvilon BF561 SoM
+ please go to http://www.niistt.ru/
+
endchoice
diff --git a/arch/blackfin/mach-bf561/boards/Makefile b/arch/blackfin/mach-bf561/boards/Makefile
index 3a152559e95..a5879f7857a 100644
--- a/arch/blackfin/mach-bf561/boards/Makefile
+++ b/arch/blackfin/mach-bf561/boards/Makefile
@@ -2,6 +2,7 @@
# arch/blackfin/mach-bf561/boards/Makefile
#
+obj-$(CONFIG_BFIN561_ACVILON) += acvilon.o
obj-$(CONFIG_BFIN561_BLUETECHNIX_CM) += cm_bf561.o
obj-$(CONFIG_BFIN561_EZKIT) += ezkit.o
obj-$(CONFIG_BFIN561_TEPLA) += tepla.o
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
new file mode 100644
index 00000000000..07e8dc8770d
--- /dev/null
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -0,0 +1,551 @@
+/*
+ * File: arch/blackfin/mach-bf561/acvilon.c
+ * Based on: arch/blackfin/mach-bf561/ezkit.c
+ * Author:
+ *
+ * Created:
+ * Description:
+ *
+ * Modified:
+ * Copyright 2004-2006 Analog Devices Inc.
+ * Copyright 2009 CJSC "NII STT"
+ *
+ * Bugs:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ * For more information about Acvilon BF561 SoM please
+ * go to http://www.niistt.ru/
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/plat-ram.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/i2c-pca-platform.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <asm/dma.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/portmux.h>
+#include <asm/dpmc.h>
+#include <asm/cacheflush.h>
+#include <linux/i2c.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "Acvilon board";
+
+#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
+#include <linux/usb/isp1760.h>
+static struct resource bfin_isp1760_resources[] = {
+ [0] = {
+ .start = 0x20000000,
+ .end = 0x20000000 + 0x000fffff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_PF15,
+ .end = IRQ_PF15,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
+ },
+};
+
+static struct isp1760_platform_data isp1760_priv = {
+ .is_isp1761 = 0,
+ .port1_disable = 0,
+ .bus_width_16 = 1,
+ .port1_otg = 0,
+ .analog_oc = 0,
+ .dack_polarity_high = 0,
+ .dreq_polarity_high = 0,
+};
+
+static struct platform_device bfin_isp1760_device = {
+ .name = "isp1760-hcd",
+ .id = 0,
+ .dev = {
+ .platform_data = &isp1760_priv,
+ },
+ .num_resources = ARRAY_SIZE(bfin_isp1760_resources),
+ .resource = bfin_isp1760_resources,
+};
+#endif
+
+static struct resource bfin_i2c_pca_resources[] = {
+ {
+ .name = "pca9564-regs",
+ .start = 0x2C000000,
+ .end = 0x2C000000 + 16,
+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+ }, {
+
+ .start = IRQ_PF8,
+ .end = IRQ_PF8,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
+ },
+};
+
+struct i2c_pca9564_pf_platform_data pca9564_platform_data = {
+ .gpio = -1,
+ .i2c_clock_speed = 330000,
+ .timeout = 10000
+};
+
+/* PCA9564 I2C Bus driver */
+static struct platform_device bfin_i2c_pca_device = {
+ .name = "i2c-pca-platform",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bfin_i2c_pca_resources),
+ .resource = bfin_i2c_pca_resources,
+ .dev = {
+ .platform_data = &pca9564_platform_data,
+ }
+};
+
+/* I2C devices fitted. */
+static struct i2c_board_info acvilon_i2c_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("ds1339", 0x68),
+ },
+ {
+ I2C_BOARD_INFO("tcn75", 0x49),
+ },
+};
+
+#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE)
+static struct platdata_mtd_ram mtd_ram_data = {
+ .mapname = "rootfs(RAM)",
+ .bankwidth = 4,
+};
+
+static struct resource mtd_ram_resource = {
+ .start = 0x4000000,
+ .end = 0x5ffffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device mtd_ram_device = {
+ .name = "mtd-ram",
+ .id = 0,
+ .dev = {
+ .platform_data = &mtd_ram_data,
+ },
+ .num_resources = 1,
+ .resource = &mtd_ram_resource,
+};
+#endif
+
+#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#include <linux/smsc911x.h>
+static struct resource smsc911x_resources[] = {
+ {
+ .name = "smsc911x-memory",
+ .start = 0x28000000,
+ .end = 0x28000000 + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_PF7,
+ .end = IRQ_PF7,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
+ },
+};
+
+static struct smsc911x_platform_config smsc911x_config = {
+ .flags = SMSC911X_USE_32BIT,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
+static struct platform_device smsc911x_device = {
+ .name = "smsc911x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smsc911x_resources),
+ .resource = smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc911x_config,
+ },
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+static struct resource bfin_uart0_resources[] = {
+ {
+ .start = BFIN_UART_THR,
+ .end = BFIN_UART_GCTL + 2,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_UART_RX,
+ .end = IRQ_UART_RX + 1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_UART_ERROR,
+ .end = IRQ_UART_ERROR,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = CH_UART_TX,
+ .end = CH_UART_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .start = CH_UART_RX,
+ .end = CH_UART_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+unsigned short bfin_uart0_peripherals[] = {
+ P_UART0_TX, P_UART0_RX, 0
+};
+
+static struct platform_device bfin_uart0_device = {
+ .name = "bfin-uart",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bfin_uart0_resources),
+ .resource = bfin_uart0_resources,
+ .dev = {
+ /* Passed to driver */
+ .platform_data = &bfin_uart0_peripherals,
+ },
+};
+#endif
+#endif
+
+#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
+
+#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
+
+static struct mtd_partition bfin_plat_nand_partitions[] = {
+ {
+ .name = "params(nand)",
+ .size = 32 * 1024 * 1024,
+ .offset = 0,
+ }, {
+ .name = "userfs(nand)",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ },
+};
+#endif
+
+#define BFIN_NAND_PLAT_CLE 2
+#define BFIN_NAND_PLAT_ALE 3
+
+static void bfin_plat_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_CLE));
+ else
+ writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_ALE));
+}
+
+#define BFIN_NAND_PLAT_READY GPIO_PF10
+static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
+{
+ return gpio_get_value(BFIN_NAND_PLAT_READY);
+}
+
+static struct platform_nand_data bfin_plat_nand_data = {
+ .chip = {
+ .chip_delay = 30,
+#ifdef CONFIG_MTD_PARTITIONS
+ .part_probe_types = part_probes,
+ .partitions = bfin_plat_nand_partitions,
+ .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
+#endif
+ },
+ .ctrl = {
+ .cmd_ctrl = bfin_plat_nand_cmd_ctrl,
+ .dev_ready = bfin_plat_nand_dev_ready,
+ },
+};
+
+#define MAX(x, y) (x > y ? x : y)
+static struct resource bfin_plat_nand_resources = {
+ .start = 0x24000000,
+ .end = 0x24000000 + (1 << MAX(BFIN_NAND_PLAT_CLE, BFIN_NAND_PLAT_ALE)),
+ .flags = IORESOURCE_IO,
+};
+
+static struct platform_device bfin_async_nand_device = {
+ .name = "gen_nand",
+ .id = -1,
+ .num_resources = 1,
+ .resource = &bfin_plat_nand_resources,
+ .dev = {
+ .platform_data = &bfin_plat_nand_data,
+ },
+};
+
+static void bfin_plat_nand_init(void)
+{
+ gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat");
+}
+#else
+static void bfin_plat_nand_init(void)
+{
+}
+#endif
+
+#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
+static struct mtd_partition bfin_spi_dataflash_partitions[] = {
+ {
+ .name = "bootloader",
+ .size = 0x4200,
+ .offset = 0,
+ .mask_flags = MTD_CAP_ROM},
+ {
+ .name = "u-boot",
+ .size = 0x42000,
+ .offset = MTDPART_OFS_APPEND,
+ },
+ {
+ .name = "u-boot(params)",
+ .size = 0x4200,
+ .offset = MTDPART_OFS_APPEND,
+ },
+ {
+ .name = "kernel",
+ .size = 0x294000,
+ .offset = MTDPART_OFS_APPEND,
+ },
+ {
+ .name = "params",
+ .size = 0x42000,
+ .offset = MTDPART_OFS_APPEND,
+ },
+ {
+ .name = "rootfs",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static struct flash_platform_data bfin_spi_dataflash_data = {
+ .name = "SPI Dataflash",
+ .parts = bfin_spi_dataflash_partitions,
+ .nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions),
+};
+
+/* DataFlash chip */
+static struct bfin5xx_spi_chip data_flash_chip_info = {
+ .enable_dma = 0, /* use dma transfer with this chip */
+ .bits_per_word = 8,
+};
+#endif
+
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+static struct bfin5xx_spi_chip spidev_chip_info = {
+ .enable_dma = 0,
+ .bits_per_word = 8,
+};
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+/* SPI (0) */
+static struct resource bfin_spi0_resource[] = {
+ [0] = {
+ .start = SPI0_REGBASE,
+ .end = SPI0_REGBASE + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = CH_SPI,
+ .end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* SPI controller data */
+static struct bfin5xx_spi_master bfin_spi0_info = {
+ .num_chipselect = 8,
+ .enable_dma = 1, /* master has the ability to do dma transfer */
+ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct platform_device bfin_spi0_device = {
+ .name = "bfin-spi",
+ .id = 0, /* Bus number */
+ .num_resources = ARRAY_SIZE(bfin_spi0_resource),
+ .resource = bfin_spi0_resource,
+ .dev = {
+ .platform_data = &bfin_spi0_info, /* Passed to driver */
+ },
+};
+#endif
+
+static struct spi_board_info bfin_spi_board_info[] __initdata = {
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+ {
+ .modalias = "spidev",
+ .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0,
+ .chip_select = 3,
+ .controller_data = &spidev_chip_info,
+ },
+#endif
+#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
+ { /* DataFlash chip */
+ .modalias = "mtd_dataflash",
+ .max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */
+ .bus_num = 0, /* Framework bus number */
+ .chip_select = 2, /* Framework chip select */
+ .platform_data = &bfin_spi_dataflash_data,
+ .controller_data = &data_flash_chip_info,
+ .mode = SPI_MODE_3,
+ },
+#endif
+};
+
+static struct resource bfin_gpios_resources = {
+ .start = 31,
+/* .end = MAX_BLACKFIN_GPIOS - 1, */
+ .end = 32,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct platform_device bfin_gpios_device = {
+ .name = "simple-gpio",
+ .id = -1,
+ .num_resources = 1,
+ .resource = &bfin_gpios_resources,
+};
+
+static const unsigned int cclk_vlev_datasheet[] = {
+ VRPAIR(VLEV_085, 250000000),
+ VRPAIR(VLEV_090, 300000000),
+ VRPAIR(VLEV_095, 313000000),
+ VRPAIR(VLEV_100, 350000000),
+ VRPAIR(VLEV_105, 400000000),
+ VRPAIR(VLEV_110, 444000000),
+ VRPAIR(VLEV_115, 450000000),
+ VRPAIR(VLEV_120, 475000000),
+ VRPAIR(VLEV_125, 500000000),
+ VRPAIR(VLEV_130, 600000000),
+};
+
+static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
+ .tuple_tab = cclk_vlev_datasheet,
+ .tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
+ .vr_settling_time = 25 /* us */ ,
+};
+
+static struct platform_device bfin_dpmc = {
+ .name = "bfin dpmc",
+ .dev = {
+ .platform_data = &bfin_dmpc_vreg_data,
+ },
+};
+
+static struct platform_device *acvilon_devices[] __initdata = {
+ &bfin_dpmc,
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+ &bfin_spi0_device,
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+ &bfin_uart0_device,
+#endif
+#endif
+
+ &bfin_gpios_device,
+
+#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+ &smsc911x_device,
+#endif
+
+ &bfin_i2c_pca_device,
+
+#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
+ &bfin_async_nand_device,
+#endif
+
+#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE)
+ &mtd_ram_device,
+#endif
+
+};
+
+static int __init acvilon_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "%s(): registering device resources\n", __func__);
+
+ bfin_plat_nand_init();
+ ret =
+ platform_add_devices(acvilon_devices, ARRAY_SIZE(acvilon_devices));
+ if (ret < 0)
+ return ret;
+
+ i2c_register_board_info(0, acvilon_i2c_devs,
+ ARRAY_SIZE(acvilon_i2c_devs));
+
+ bfin_write_FIO0_FLAG_C(1 << 14);
+ msleep(5);
+ bfin_write_FIO0_FLAG_S(1 << 14);
+
+ spi_register_board_info(bfin_spi_board_info,
+ ARRAY_SIZE(bfin_spi_board_info));
+ return 0;
+}
+
+arch_initcall(acvilon_init);
+
+static struct platform_device *acvilon_early_devices[] __initdata = {
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+ &bfin_uart0_device,
+#endif
+#endif
+};
+
+void __init native_machine_early_platform_add_devices(void)
+{
+ printk(KERN_INFO "register early platform devices\n");
+ early_platform_add_devices(acvilon_early_devices,
+ ARRAY_SIZE(acvilon_early_devices));
+}
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 9e2d8cfba54..ffd3e6a80d1 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -49,7 +49,7 @@ static struct isp1760_platform_data isp1760_priv = {
};
static struct platform_device bfin_isp1760_device = {
- .name = "isp1760-hcd",
+ .name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
@@ -159,28 +159,6 @@ static struct platform_device smc91x_device = {
};
#endif
-#if defined(CONFIG_AX88180) || defined(CONFIG_AX88180_MODULE)
-static struct resource ax88180_resources[] = {
- [0] = {
- .start = 0x2c000000,
- .end = 0x2c000000 + 0x8000,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_PF10,
- .end = IRQ_PF10,
- .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL),
- },
-};
-
-static struct platform_device ax88180_device = {
- .name = "ax88180",
- .id = -1,
- .num_resources = ARRAY_SIZE(ax88180_resources),
- .resource = ax88180_resources,
-};
-#endif
-
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
static struct resource bfin_uart_resources[] = {
{
@@ -421,10 +399,6 @@ static struct platform_device *ezkit_devices[] __initdata = {
&smc91x_device,
#endif
-#if defined(CONFIG_AX88180) || defined(CONFIG_AX88180_MODULE)
- &ax88180_device,
-#endif
-
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 1e60a92dd60..deb2271d09a 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -22,8 +22,8 @@
#define CMD_COREB_STOP 3
#define CMD_COREB_RESET 4
-static int
-coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static long
+coreb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret = 0;
@@ -49,8 +49,8 @@ coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned l
}
static const struct file_operations coreb_fops = {
- .owner = THIS_MODULE,
- .ioctl = coreb_ioctl,
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = coreb_ioctl,
};
static struct miscdevice coreb_dev = {
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index a31e509553f..4c8e36b7fb3 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -884,65 +884,11 @@
/* System MMR Register Bits */
/******************************************************************************* */
-/* ********************* PLL AND RESET MASKS ************************ */
-
-/* PLL_CTL Masks */
-#define PLL_CLKIN 0x00000000 /* Pass CLKIN to PLL */
-#define PLL_CLKIN_DIV2 0x00000001 /* Pass CLKIN/2 to PLL */
-#define PLL_OFF 0x00000002 /* Shut off PLL clocks */
-#define STOPCK_OFF 0x00000008 /* Core clock off */
-#define PDWN 0x00000020 /* Put the PLL in a Deep Sleep state */
-#define BYPASS 0x00000100 /* Bypass the PLL */
-
/* CHIPID Masks */
#define CHIPID_VERSION 0xF0000000
#define CHIPID_FAMILY 0x0FFFF000
#define CHIPID_MANUFACTURE 0x00000FFE
-/* VR_CTL Masks */
-#define FREQ 0x0003 /* Switching Oscillator Frequency For Regulator */
-#define HIBERNATE 0x0000 /* Powerdown/Bypass On-Board Regulation */
-#define FREQ_333 0x0001 /* Switching Frequency Is 333 kHz */
-#define FREQ_667 0x0002 /* Switching Frequency Is 667 kHz */
-#define FREQ_1000 0x0003 /* Switching Frequency Is 1 MHz */
-
-#define GAIN 0x000C /* Voltage Level Gain */
-#define GAIN_5 0x0000 /* GAIN = 5*/
-#define GAIN_10 0x0004 /* GAIN = 1*/
-#define GAIN_20 0x0008 /* GAIN = 2*/
-#define GAIN_50 0x000C /* GAIN = 5*/
-
-#define VLEV 0x00F0 /* Internal Voltage Level */
-#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */
-#define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */
-#define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */
-#define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */
-#define VLEV_105 0x00A0 /* VLEV = 1.05 V (-5% - +10% Accuracy) */
-#define VLEV_110 0x00B0 /* VLEV = 1.10 V (-5% - +10% Accuracy) */
-#define VLEV_115 0x00C0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */
-#define VLEV_120 0x00D0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */
-#define VLEV_125 0x00E0 /* VLEV = 1.25 V (-5% - +10% Accuracy) */
-#define VLEV_130 0x00F0 /* VLEV = 1.30 V (-5% - +10% Accuracy) */
-
-#define WAKE 0x0100 /* Enable RTC/Reset Wakeup From Hibernate */
-#define SCKELOW 0x8000 /* Do Not Drive SCKE High During Reset After Hibernate */
-
-/* PLL_DIV Masks */
-#define SCLK_DIV(x) (x) /* SCLK = VCO / x */
-
-#define CSEL 0x30 /* Core Select */
-#define SSEL 0xf /* System Select */
-#define CCLK_DIV1 0x00000000 /* CCLK = VCO / 1 */
-#define CCLK_DIV2 0x00000010 /* CCLK = VCO / 2 */
-#define CCLK_DIV4 0x00000020 /* CCLK = VCO / 4 */
-#define CCLK_DIV8 0x00000030 /* CCLK = VCO / 8 */
-
-/* PLL_STAT Masks */
-#define ACTIVE_PLLENABLED 0x0001 /* Processor In Active Mode With PLL Enabled */
-#define FULL_ON 0x0002 /* Processor In Full On Mode */
-#define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */
-#define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */
-
/* SICA_SYSCR Masks */
#define COREB_SRAM_INIT 0x0020
@@ -1150,53 +1096,6 @@
/* ********** DMA CONTROLLER MASKS *********************8 */
-/* DMAx_CONFIG, MDMA_yy_CONFIG, IMDMA_yy_CONFIG Masks */
-#define DMAEN 0x00000001 /* Channel Enable */
-#define WNR 0x00000002 /* Channel Direction (W/R*) */
-#define WDSIZE_8 0x00000000 /* Word Size 8 bits */
-#define WDSIZE_16 0x00000004 /* Word Size 16 bits */
-#define WDSIZE_32 0x00000008 /* Word Size 32 bits */
-#define DMA2D 0x00000010 /* 2D/1D* Mode */
-#define RESTART 0x00000020 /* Restart */
-#define DI_SEL 0x00000040 /* Data Interrupt Select */
-#define DI_EN 0x00000080 /* Data Interrupt Enable */
-#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
-#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
-#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
-#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
-#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
-#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
-#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
-#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
-#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
-#define NDSIZE 0x00000900 /* Next Descriptor Size */
-#define DMAFLOW 0x00007000 /* Flow Control */
-#define DMAFLOW_STOP 0x0000 /* Stop Mode */
-#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
-#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
-#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
-
-#define DMAEN_P 0 /* Channel Enable */
-#define WNR_P 1 /* Channel Direction (W/R*) */
-#define DMA2D_P 4 /* 2D/1D* Mode */
-#define RESTART_P 5 /* Restart */
-#define DI_SEL_P 6 /* Data Interrupt Select */
-#define DI_EN_P 7 /* Data Interrupt Enable */
-
-/* DMAx_IRQ_STATUS, MDMA_yy_IRQ_STATUS, IMDMA_yy_IRQ_STATUS Masks */
-
-#define DMA_DONE 0x00000001 /* DMA Done Indicator */
-#define DMA_ERR 0x00000002 /* DMA Error Indicator */
-#define DFETCH 0x00000004 /* Descriptor Fetch Indicator */
-#define DMA_RUN 0x00000008 /* DMA Running Indicator */
-
-#define DMA_DONE_P 0 /* DMA Done Indicator */
-#define DMA_ERR_P 1 /* DMA Error Indicator */
-#define DFETCH_P 2 /* Descriptor Fetch Indicator */
-#define DMA_RUN_P 3 /* DMA Running Indicator */
-
/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP, IMDMA_yy_PERIPHERAL_MAP Masks */
#define CTYPE 0x00000040 /* DMA Channel Type Indicator */
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index 510f5764149..0192532e96a 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -52,8 +52,6 @@ int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
void __cpuinit platform_secondary_init(unsigned int cpu)
{
- local_irq_disable();
-
/* Clone setup for peripheral interrupt sources from CoreA. */
bfin_write_SICB_IMASK0(bfin_read_SICA_IMASK0());
bfin_write_SICB_IMASK1(bfin_read_SICA_IMASK1());
@@ -70,11 +68,6 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
bfin_write_SICB_IAR7(bfin_read_SICA_IAR7());
SSYNC();
- local_irq_enable();
-
- /* Calibrate loops per jiffy value. */
- calibrate_delay();
-
/* Store CPU-private information to the cpu_data array. */
bfin_setup_cpudata(cpu);
@@ -108,9 +101,13 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
barrier();
}
- spin_unlock(&boot_lock);
-
- return cpu_isset(cpu, cpu_callin_map) ? 0 : -ENOSYS;
+ if (cpu_isset(cpu, cpu_callin_map)) {
+ cpu_set(cpu, cpu_online_map);
+ /* release the lock and let coreb run */
+ spin_unlock(&boot_lock);
+ return 0;
+ } else
+ panic("CPU%u: processor failed to boot\n", cpu);
}
void __init platform_request_ipi(irq_handler_t handler)
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index ef6870e9eea..d5cfe611b77 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -13,6 +13,7 @@
#include <asm/dma.h>
#include <asm/clocks.h>
#include <asm/mem_init.h>
+#include <asm/dpmc.h>
#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
#define PLL_CTL_VAL \
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 01506504e6d..77758289725 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -13,7 +13,7 @@
#include <linux/fs.h>
#include <asm/blackfin.h>
#include <asm/time.h>
-
+#include <asm/dpmc.h>
/* this is the table of CCLK frequencies, in Hz */
/* .index is the entry in the auxillary dpm_state_table[] */
@@ -138,7 +138,8 @@ static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
dpm_state_table[index].tscale);
}
- policy->cpuinfo.transition_latency = (bfin_read_PLL_LOCKCNT() / (sclk / 1000000)) * 1000;
+ policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
+
/*Now ,only support one cpu */
policy->cur = cclk;
cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 8009a512fb1..b0371689605 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -404,6 +404,21 @@ ENTRY(_do_hibernate)
PM_SYS_PUSH(EBIU_FCTL)
#endif
+#ifdef PORTCIO_FER
+ PM_SYS_PUSH16(PORTCIO_DIR)
+ PM_SYS_PUSH16(PORTCIO_INEN)
+ PM_SYS_PUSH16(PORTCIO)
+ PM_SYS_PUSH16(PORTCIO_FER)
+ PM_SYS_PUSH16(PORTDIO_DIR)
+ PM_SYS_PUSH16(PORTDIO_INEN)
+ PM_SYS_PUSH16(PORTDIO)
+ PM_SYS_PUSH16(PORTDIO_FER)
+ PM_SYS_PUSH16(PORTEIO_DIR)
+ PM_SYS_PUSH16(PORTEIO_INEN)
+ PM_SYS_PUSH16(PORTEIO)
+ PM_SYS_PUSH16(PORTEIO_FER)
+#endif
+
PM_SYS_PUSH16(SYSCR)
/* Save Core MMRs */
@@ -716,6 +731,21 @@ ENTRY(_do_hibernate)
P0.L = lo(PLL_CTL);
PM_SYS_POP16(SYSCR)
+#ifdef PORTCIO_FER
+ PM_SYS_POP16(PORTEIO_FER)
+ PM_SYS_POP16(PORTEIO)
+ PM_SYS_POP16(PORTEIO_INEN)
+ PM_SYS_POP16(PORTEIO_DIR)
+ PM_SYS_POP16(PORTDIO_FER)
+ PM_SYS_POP16(PORTDIO)
+ PM_SYS_POP16(PORTDIO_INEN)
+ PM_SYS_POP16(PORTDIO_DIR)
+ PM_SYS_POP16(PORTCIO_FER)
+ PM_SYS_POP16(PORTCIO)
+ PM_SYS_POP16(PORTCIO_INEN)
+ PM_SYS_POP16(PORTCIO_DIR)
+#endif
+
#ifdef EBIU_FCTL
PM_SYS_POP(EBIU_FCTL)
PM_SYS_POP(EBIU_MODE)
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index a50637a8b9b..b0ed0b487ff 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -713,6 +713,8 @@ ENTRY(_system_call)
cc = BITTST(r7, TIF_RESTORE_SIGMASK);
if cc jump .Lsyscall_do_signals;
cc = BITTST(r7, TIF_SIGPENDING);
+ if cc jump .Lsyscall_do_signals;
+ cc = BITTST(r7, TIF_NOTIFY_RESUME);
if !cc jump .Lsyscall_really_exit;
.Lsyscall_do_signals:
/* Reenable interrupts. */
@@ -721,7 +723,7 @@ ENTRY(_system_call)
r0 = sp;
SP += -12;
- call _do_signal;
+ call _do_notify_resume;
SP += 12;
.Lsyscall_really_exit:
@@ -1422,7 +1424,7 @@ ENTRY(_sys_call_table)
.long _sys_ni_syscall /* streams2 */
.long _sys_vfork /* 190 */
.long _sys_getrlimit
- .long _sys_mmap2
+ .long _sys_mmap_pgoff
.long _sys_truncate64
.long _sys_ftruncate64
.long _sys_stat64 /* 195 */
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 660ea1bec54..1873b2c1fed 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -25,11 +25,20 @@
#include <asm/blackfin.h>
#include <asm/gpio.h>
#include <asm/irq_handler.h>
+#include <asm/dpmc.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/bfin_sport.h>
#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
#ifdef BF537_FAMILY
# define BF537_GENERIC_ERROR_INT_DEMUX
+# define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */
+# define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */
+# define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */
+# define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */
+# define UART_ERR_MASK (0x6) /* UART_IIR */
+# define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */
#else
# undef BF537_GENERIC_ERROR_INT_DEMUX
#endif
@@ -324,11 +333,9 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
irq = IRQ_CAN_ERROR;
else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
irq = IRQ_SPI_ERROR;
- else if ((bfin_read_UART0_IIR() & UART_ERR_MASK_STAT1) &&
- (bfin_read_UART0_IIR() & UART_ERR_MASK_STAT0))
+ else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
irq = IRQ_UART0_ERROR;
- else if ((bfin_read_UART1_IIR() & UART_ERR_MASK_STAT1) &&
- (bfin_read_UART1_IIR() & UART_ERR_MASK_STAT0))
+ else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
irq = IRQ_UART1_ERROR;
if (irq) {
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index d92b168c832..369e687582b 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -336,13 +336,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
ret = platform_boot_secondary(cpu, idle);
- if (ret) {
- cpu_clear(cpu, cpu_present_map);
- printk(KERN_CRIT "CPU%u: processor failed to boot (%d)\n", cpu, ret);
- free_task(idle);
- } else
- cpu_set(cpu, cpu_online_map);
-
secondary_stack = NULL;
return ret;
@@ -418,9 +411,16 @@ void __cpuinit secondary_start_kernel(void)
setup_secondary(cpu);
+ platform_secondary_init(cpu);
+
local_irq_enable();
- platform_secondary_init(cpu);
+ /*
+ * Calibrate loops per jiffy value.
+ * IRQs need to be enabled here - D-cache can be invalidated
+ * in timer irq handler, so core B can read correct jiffies.
+ */
+ calibrate_delay();
cpu_idle();
}
diff --git a/arch/cris/arch-v32/kernel/head.S b/arch/cris/arch-v32/kernel/head.S
index 3db478eb515..76266f80a5f 100644
--- a/arch/cris/arch-v32/kernel/head.S
+++ b/arch/cris/arch-v32/kernel/head.S
@@ -10,7 +10,6 @@
* The macros found in mmu_defs_asm.h uses the ## concatenation operator, so
* -traditional must not be used when assembling this file.
*/
-#include <linux/autoconf.h>
#include <arch/memmap.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/intr_vect.h>
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index 367a53ea10c..f171a6600fb 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
: "memory");
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
/*
@@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
*
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
rw->lock++;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return 1;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_ARCH_SPINLOCK_H */
diff --git a/arch/cris/include/asm/asm-offsets.h b/arch/cris/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/cris/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/asm/elf.h
index 0f51b10b9f4..8a3d8e2b33c 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/asm/elf.h
@@ -64,8 +64,6 @@ typedef unsigned long elf_fpregset_t;
#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004
/* End of excerpt from {binutils}/include/elf/cris.h. */
-#define USE_ELF_CORE_DUMP
-
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/cris/kernel/asm-offsets.c b/arch/cris/kernel/asm-offsets.c
index ddd6fbbe75d..dd7b8e98322 100644
--- a/arch/cris/kernel/asm-offsets.c
+++ b/arch/cris/kernel/asm-offsets.c
@@ -1,6 +1,5 @@
#include <linux/sched.h>
#include <asm/thread_info.h>
-#include <linux/autoconf.h>
/*
* Generate definitions needed by assembly language modules.
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 0ca7d9892cc..b5ce0724a88 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/cris/kernel/sys_cris.c b/arch/cris/kernel/sys_cris.c
index 2ad962c7e88..c2bbb1ac98a 100644
--- a/arch/cris/kernel/sys_cris.c
+++ b/arch/cris/kernel/sys_cris.c
@@ -26,31 +26,6 @@
#include <asm/uaccess.h>
#include <asm/segment.h>
-/* common code for old and new mmaps */
-static inline long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
asmlinkage unsigned long old_mmap(unsigned long __user *args)
{
unsigned long buffer[6];
@@ -63,7 +38,7 @@ asmlinkage unsigned long old_mmap(unsigned long __user *args)
if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */
goto out;
- err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3],
+ err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3],
buffer[4], buffer[5] >> PAGE_SHIFT);
out:
return err;
@@ -73,7 +48,8 @@ asmlinkage long
sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long pgoff)
{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
+ /* bug(?): 8Kb pages here */
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/*
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index bbfda67d290..d49d17d2a14 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -8,7 +8,6 @@
* the kernel has booted.
*/
-#include <linux/autoconf.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
diff --git a/arch/frv/include/asm/asm-offsets.h b/arch/frv/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/frv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/frv/include/asm/elf.h b/arch/frv/include/asm/elf.h
index 7bbf6e47f8c..c3819804a74 100644
--- a/arch/frv/include/asm/elf.h
+++ b/arch/frv/include/asm/elf.h
@@ -115,7 +115,6 @@ do { \
__kernel_frame0_ptr->gr29 = 0; \
} while(0)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
#define ELF_EXEC_PAGESIZE 16384
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index af3e824b91b..62d1aba615d 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
seq_printf(p, "%3d: ", i);
@@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
}
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index 55e4fab7c0b..75cf7f4b2fa 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/delay.h>
diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
index 2b6b5289cdc..1d3d4c9e252 100644
--- a/arch/frv/kernel/sys_frv.c
+++ b/arch/frv/kernel/sys_frv.c
@@ -31,9 +31,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
- int error = -EBADF;
- struct file * file = NULL;
-
/* As with sparc32, make sure the shift for mmap2 is constant
(12), no matter what PAGE_SIZE we have.... */
@@ -41,69 +38,10 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
trying to map something we can't */
if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
return -EINVAL;
- pgoff >>= PAGE_SHIFT - 12;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
-#if 0 /* DAVIDM - do we want this */
-struct mmap_arg_struct64 {
- __u32 addr;
- __u32 len;
- __u32 prot;
- __u32 flags;
- __u64 offset; /* 64 bits */
- __u32 fd;
-};
-
-asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
-{
- int error = -EFAULT;
- struct file * file = NULL;
- struct mmap_arg_struct64 a;
- unsigned long pgoff;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- return -EFAULT;
-
- if ((long)a.offset & ~PAGE_MASK)
- return -EINVAL;
-
- pgoff = a.offset >> PAGE_SHIFT;
- if ((a.offset >> PAGE_SHIFT) != pgoff)
- return -EINVAL;
-
- if (!(a.flags & MAP_ANONYMOUS)) {
- error = -EBADF;
- file = fget(a.fd);
- if (!file)
- goto out;
- }
- a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
-out:
- return error;
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
}
-#endif
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 9420648352b..53cc669e6d5 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -10,6 +10,10 @@ config H8300
default y
select HAVE_IDE
+config SYMBOL_PREFIX
+ string
+ default "_"
+
config MMU
bool
default n
diff --git a/arch/h8300/include/asm/asm-offsets.h b/arch/h8300/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/h8300/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h
index 94e2284c881..c24fa250d65 100644
--- a/arch/h8300/include/asm/elf.h
+++ b/arch/h8300/include/asm/elf.h
@@ -34,7 +34,6 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLAT_INIT(_r) _r->er1 = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/h8300/include/asm/module.h b/arch/h8300/include/asm/module.h
index de23231f319..8e46724b7c0 100644
--- a/arch/h8300/include/asm/module.h
+++ b/arch/h8300/include/asm/module.h
@@ -8,6 +8,4 @@ struct mod_arch_specific { };
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
-#define MODULE_SYMBOL_PREFIX "_"
-
#endif /* _ASM_H8/300_MODULE_H */
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index 5c913d47211..c25dc2c2b1d 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_puts(p, " CPU0");
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
@@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
index 8cb5d73a0e3..b5969db0ca1 100644
--- a/arch/h8300/kernel/sys_h8300.c
+++ b/arch/h8300/kernel/sys_h8300.c
@@ -26,39 +26,6 @@
#include <asm/traps.h>
#include <asm/unistd.h>
-/* common code for old and new mmaps */
-static inline long do_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
@@ -87,57 +54,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg)
if (a.offset & ~PAGE_MASK)
goto out;
- a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
-out:
- return error;
-}
-
-#if 0 /* DAVIDM - do we want this */
-struct mmap_arg_struct64 {
- __u32 addr;
- __u32 len;
- __u32 prot;
- __u32 flags;
- __u64 offset; /* 64 bits */
- __u32 fd;
-};
-
-asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
-{
- int error = -EFAULT;
- struct file * file = NULL;
- struct mmap_arg_struct64 a;
- unsigned long pgoff;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- return -EFAULT;
-
- if ((long)a.offset & ~PAGE_MASK)
- return -EINVAL;
-
- pgoff = a.offset >> PAGE_SHIFT;
- if ((a.offset >> PAGE_SHIFT) != pgoff)
- return -EINVAL;
-
- if (!(a.flags & MAP_ANONYMOUS)) {
- error = -EBADF;
- file = fget(a.fd);
- if (!file)
- goto out;
- }
- a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
+ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
out:
return error;
}
-#endif
struct sel_arg_struct {
unsigned long n;
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index 4eb67faac63..2d69881eda6 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -206,7 +206,7 @@ SYMBOL_NAME_LABEL(sys_call_table)
.long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
.long SYMBOL_NAME(sys_vfork) /* 190 */
.long SYMBOL_NAME(sys_getrlimit)
- .long SYMBOL_NAME(sys_mmap2)
+ .long SYMBOL_NAME(sys_mmap_pgoff)
.long SYMBOL_NAME(sys_truncate64)
.long SYMBOL_NAME(sys_ftruncate64)
.long SYMBOL_NAME(sys_stat64) /* 195 */
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
index b9e24907e6e..03d356d96e5 100644
--- a/arch/h8300/kernel/vmlinux.lds.S
+++ b/arch/h8300/kernel/vmlinux.lds.S
@@ -1,4 +1,3 @@
-#define VMLINUX_SYMBOL(_sym_) _##_sym_
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1ee596cd942..2d7f56a98e0 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
bool
default y
-config HAVE_LEGACY_PER_CPU_AREA
- def_bool y
-
config HAVE_SETUP_PER_CPU_AREA
def_bool y
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index e7cbaa02cd0..475e2725fbd 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -103,4 +103,4 @@ archprepare: make_nr_irqs_h FORCE
PHONY += make_nr_irqs_h FORCE
make_nr_irqs_h: FORCE
- $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h
+ $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index f332e3fe423..e14c492a8a9 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
spin_unlock_irqrestore(&ioc->saved_lock, flags);
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
- if (unlikely(pide >= (ioc->res_size << 3)))
- panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
- ioc->ioc_hpa);
+ if (unlikely(pide >= (ioc->res_size << 3))) {
+ printk(KERN_WARNING "%s: I/O MMU @ %p is"
+ "out of mapping resources, %u %u %lx\n",
+ __func__, ioc->ioc_hpa, ioc->res_size,
+ pages_needed, dma_get_seg_boundary(dev));
+ return -1;
+ }
#else
- panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
- ioc->ioc_hpa);
+ printk(KERN_WARNING "%s: I/O MMU @ %p is"
+ "out of mapping resources, %u %u %lx\n",
+ __func__, ioc->ioc_hpa, ioc->res_size,
+ pages_needed, dma_get_seg_boundary(dev));
+ return -1;
#endif
}
}
@@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
#endif
pide = sba_alloc_range(ioc, dev, size);
+ if (pide < 0)
+ return 0;
iovp = (dma_addr_t) pide << iovp_shift;
@@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
+ int idx;
while (nents > 0) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
vcontig_sg->dma_length = vcontig_len;
dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
ASSERT(dma_len <= DMA_CHUNK_SIZE);
- dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
- | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
- | dma_offset);
+ idx = sba_alloc_range(ioc, dev, dma_len);
+ if (idx < 0) {
+ dma_sg->dma_length = 0;
+ return -1;
+ }
+ dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
+ | dma_offset);
n_mappings++;
}
return n_mappings;
}
-
+static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
@@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
+ if (coalesced < 0) {
+ sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
+ return 0;
+ }
/*
** Program the I/O Pdir
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index 9a3abf58cea..65772574261 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -11,8 +11,6 @@
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
-#define USE_ELF_CORE_DUMP 1
-
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 429ec968c9e..045b746b980 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -858,6 +858,9 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
prot = get_prot32(prot);
+ if (flags & MAP_HUGETLB)
+ return -ENOMEM;
+
#if PAGE_SHIFT > IA32_PAGE_SHIFT
mutex_lock(&ia32_mmap_mutex);
{
diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/ia64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9f..6ebc229a1c5 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
* @addr: Address to start counting from
*
* Similarly to clear_bit_unlock, the implementation uses a store
- * with release semantics. See also __raw_spin_unlock().
+ * with release semantics. See also arch_spin_unlock().
*/
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 8d3c79cd81e..7d09a09cdaa 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 86eddee029c..e14108b19c0 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -25,7 +25,6 @@
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_IA_64
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 91619b31dbf..bf2e37493e0 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -59,7 +59,13 @@ typedef u16 ia64_vector;
extern int ia64_first_device_vector;
extern int ia64_last_device_vector;
+#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
+/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
+#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
+#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
+#else
#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
+#endif
#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 0d9d16e2d94..cc8335eb311 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr)
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
+extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
+extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
/*
* String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 5282546cdf8..91b920fd7d5 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <linux/cpumask.h>
-#include <asm-ia64/nr-irqs.h>
+#include <generated/nr-irqs.h>
static __inline__ int
irq_canonicalize (int irq)
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index c171cdf0a78..43f96ab18fa 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -106,6 +106,11 @@ struct ia64_sal_os_state {
unsigned long os_status; /* OS status to SAL, enum below */
unsigned long context; /* 0 if return to same context
1 if return to new context */
+
+ /* I-resources */
+ unsigned long iip;
+ unsigned long ipsr;
+ unsigned long ifs;
};
enum {
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 688a812c017..61c7b1750b1 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
- extern unsigned long vmalloc_end;
+ extern unsigned long VMALLOC_END;
extern struct page *vmem_map;
extern int find_largest_hole(u64 start, u64 end, void *arg);
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 3499ff57bf4..6a8a27cfae3 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -22,8 +22,6 @@
#include <asm/mmzone.h>
-#define NUMA_NO_NODE -1
-
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 8840a690d1e..69bf13857a9 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
-# define VMALLOC_END vmalloc_end
- extern unsigned long vmalloc_end;
+extern unsigned long VMALLOC_END;
#else
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 3eaeedf1aef..7fa90f73f6b 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
#endif
};
-DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
/*
* The "local" data variable. It refers to the per-CPU data of the currently executing
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
* Do not use the address of local_cpu_data, since it will be different from
* cpu_data(smp_processor_id())!
*/
-#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
-#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
+#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
+#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
extern void print_cpu_info (struct cpuinfo_ia64 *);
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index fbee74b1578..e8762688e8e 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -47,7 +47,7 @@ struct rw_semaphore {
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 239ecdc9516..1a91c9121d1 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
#include <asm/intrinsics.h>
#include <asm/system.h>
-#define __raw_spin_lock_init(x) ((x)->lock = 0)
+#define arch_spin_lock_init(x) ((x)->lock = 0)
/*
* Ticket locks are conceptually two parts, one indicating the current head of
@@ -38,7 +38,7 @@
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket, serve;
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
}
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->lock);
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
return 0;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
-static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
{
int *p = (int *)&lock->lock, ticket;
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
}
}
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
__ticket_spin_unlock_wait(lock);
}
-#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
+#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
: "p6", "p7", "r2", "memory");
}
-#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-#define __raw_read_lock(rw) \
+#define arch_read_lock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -188,16 +188,16 @@ do { \
#endif /* !ASM_SUPPORTED */
-#define __raw_read_unlock(rw) \
+#define arch_read_unlock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
-#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
register long result; \
\
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
(result == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#else /* !ASM_SUPPORTED */
-#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+#define arch_write_lock_flags(l, flags) arch_write_lock(l)
-#define __raw_write_lock(l) \
+#define arch_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
} while (ia64_val); \
})
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(raw_rwlock_t *x)
+static inline int arch_read_trylock(arch_rwlock_t *x)
{
union {
- raw_rwlock_t lock;
+ arch_rwlock_t lock;
__u32 word;
} old, new;
old.lock = new.lock = *x;
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4..e2b42a52a6d 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int read_counter : 31;
volatile unsigned int write_lock : 1;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
#endif
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 88afb54501e..67455c2ed2b 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -37,35 +37,9 @@
#include <xen/interface/xen.h>
#include <xen/interface/version.h> /* to compile feature.c */
#include <xen/features.h> /* to comiple xen-netfront.c */
+#include <xen/xen.h>
#include <asm/xen/hypercall.h>
-/* xen_domain_type is set before executing any C code by early_xen_setup */
-enum xen_domain_type {
- XEN_NATIVE, /* running on bare hardware */
- XEN_PV_DOMAIN, /* running in a PV domain */
- XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
-};
-
-#ifdef CONFIG_XEN
-extern enum xen_domain_type xen_domain_type;
-#else
-#define xen_domain_type XEN_NATIVE
-#endif
-
-#define xen_domain() (xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain() (xen_domain() && \
- xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain() (xen_domain() && \
- xen_domain_type == XEN_HVM_DOMAIN)
-
-#ifdef CONFIG_XEN_DOM0
-#define xen_initial_domain() (xen_pv_domain() && \
- (xen_start_info->flags & SIF_INITDOMAIN))
-#else
-#define xen_initial_domain() (0)
-#endif
-
-
#ifdef CONFIG_XEN
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 6b7edcab0cb..2a75e937ae8 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -81,17 +81,14 @@ define cmd_nr_irqs
endef
# We use internal kbuild rules to avoid the "is up to date" message from make
-arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \
- $(wildcard $(srctree)/include/asm-ia64/*/irq.h)
+arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
$(Q)mkdir -p $(dir $@)
$(call if_changed_dep,cc_s_c)
-include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
+include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
$(Q)mkdir -p $(dir $@)
$(call cmd,nr_irqs)
-clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
-
#
# native ivt.S, entry.S and fsys.S
#
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index baec6f00f7f..40574ae1140 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
printk(KERN_ERR PREFIX
"Error parsing MADT - no LAPIC entries\n");
+#ifdef CONFIG_SMP
+ if (available_cpus == 0) {
+ printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
+ printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
+ smp_boot_data.cpu_phys_id[available_cpus] =
+ hard_smp_processor_id();
+ available_cpus = 1; /* We've got at least one of these, no? */
+ }
+ smp_boot_data.cpu_count = available_cpus;
+#endif
+ /* Make boot-up look pretty */
+ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
+ total_cpus);
+
return 0;
}
-
-
int __init acpi_boot_init(void)
{
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
printk(KERN_ERR PREFIX "Can't find FADT\n");
+#ifdef CONFIG_ACPI_NUMA
#ifdef CONFIG_SMP
- if (available_cpus == 0) {
- printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
- printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
- smp_boot_data.cpu_phys_id[available_cpus] =
- hard_smp_processor_id();
- available_cpus = 1; /* We've got at least one of these, no? */
- }
- smp_boot_data.cpu_count = available_cpus;
-
- smp_build_cpu_map();
-# ifdef CONFIG_ACPI_NUMA
if (srat_num_cpus == 0) {
int cpu, i = 1;
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void)
node_cpuid[i++].phys_id =
smp_boot_data.cpu_phys_id[cpu];
}
-# endif
#endif
-#ifdef CONFIG_ACPI_NUMA
build_cpu_to_node_map();
#endif
- /* Make boot-up look pretty */
- printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
- total_cpus);
return 0;
}
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 696eff28a0c..17a9fba3893 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
* intermediate precision so that we can produce a full 64-bit result.
*/
GLOBAL_ENTRY(ia64_native_sched_clock)
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+ addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
;;
ldf8 f8=[r8]
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
GLOBAL_ENTRY(cycle_to_cputime)
alloc r16=ar.pfs,1,0,0,0
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+ addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
;;
ldf8 f8=[r8]
;;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 14d39e30062..461b99902bf 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
#endif
#include <asm/processor.h>
-EXPORT_SYMBOL(per_cpu__cpu_info);
+EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index dab4d393908..95ac77aeae9 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
goto unlock_iosapic_lock;
}
- spin_lock(&irq_desc[irq].lock);
+ raw_spin_lock(&irq_desc[irq].lock);
dest = get_target_cpu(gsi, irq);
dmode = choose_dmode();
err = register_intr(gsi, irq, dmode, polarity, trigger);
if (err < 0) {
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
irq = err;
goto unlock_iosapic_lock;
}
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
unlock_iosapic_lock:
spin_unlock_irqrestore(&iosapic_lock, flags);
return irq;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7..94ee9d067cb 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a..d4093a173a3 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu)
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
-#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
static enum vector_domain_type {
VECTOR_DOMAIN_NONE,
@@ -345,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
desc = irq_desc + irq;
cfg = irq_cfg + irq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
goto unlock;
@@ -358,7 +357,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&vector_lock, flags);
cfg->move_cleanup_count--;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
return IRQ_HANDLED;
}
@@ -659,11 +658,8 @@ init_IRQ (void)
register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
#ifdef CONFIG_SMP
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
- if (vector_domain_type != VECTOR_DOMAIN_NONE) {
- BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
- IA64_FIRST_DEVICE_VECTOR++;
+ if (vector_domain_type != VECTOR_DOMAIN_NONE)
register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
- }
#endif
#endif
#ifdef CONFIG_PERFMON
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 496ac7a9948..32f2639e9b0 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -888,9 +888,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current)
}
static void
-finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms,
+finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
unsigned long *nat)
{
+ const pal_min_state_area_t *ms = sos->pal_min_state;
const u64 *bank;
/* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
@@ -904,6 +905,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms,
regs->cr_iip = ms->pmsa_xip;
regs->cr_ipsr = ms->pmsa_xpsr;
regs->cr_ifs = ms->pmsa_xfs;
+
+ sos->iip = ms->pmsa_iip;
+ sos->ipsr = ms->pmsa_ipsr;
+ sos->ifs = ms->pmsa_ifs;
}
regs->pr = ms->pmsa_pr;
regs->b0 = ms->pmsa_br0;
@@ -1079,7 +1084,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
memcpy(old_regs, regs, sizeof(*regs));
old_regs->loadrs = loadrs;
old_unat = old_regs->ar_unat;
- finish_pt_regs(old_regs, ms, &old_unat);
+ finish_pt_regs(old_regs, sos, &old_unat);
/* Next stack a struct switch_stack. mca_asm.S built a partial
* switch_stack, copy it and fill in the blanks using pt_regs and
@@ -1150,7 +1155,7 @@ no_mod:
mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
smp_processor_id(), type, msg);
old_unat = regs->ar_unat;
- finish_pt_regs(regs, ms, &old_unat);
+ finish_pt_regs(regs, sos, &old_unat);
return previous_current;
}
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 7461d2573d4..d5bdf9de36b 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -59,7 +59,7 @@
ia64_do_tlb_purge:
#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 599b233bef7..5246285a95f 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2200,7 +2200,7 @@ pfm_alloc_file(pfm_context_t *ctx)
{
struct file *file;
struct inode *inode;
- struct dentry *dentry;
+ struct path path;
char name[32];
struct qstr this;
@@ -2225,18 +2225,19 @@ pfm_alloc_file(pfm_context_t *ctx)
/*
* allocate a new dcache entry
*/
- dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
- if (!dentry) {
+ path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
+ if (!path.dentry) {
iput(inode);
return ERR_PTR(-ENOMEM);
}
+ path.mnt = mntget(pfmfs_mnt);
- dentry->d_op = &pfmfs_dentry_operations;
- d_add(dentry, inode);
+ path.dentry->d_op = &pfmfs_dentry_operations;
+ d_add(path.dentry, inode);
- file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops);
+ file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
if (!file) {
- dput(dentry);
+ path_put(&path);
return ERR_PTR(-ENFILE);
}
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 32f6fc131fb..c370e02f006 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
// purge all TC entries
#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1de86c96801..a1ea8791977 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset);
#endif
-DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p)
early_acpi_boot_init();
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
+# ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
-#endif
+# endif
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
32 : cpus_weight(early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
# endif
-#else
-# ifdef CONFIG_SMP
- smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
-# endif
#endif /* CONFIG_APCI_BOOT */
+#ifdef CONFIG_SMP
+ smp_build_cpu_map();
+#endif
find_memory();
/* process SAL system table: */
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
}
/*
- * In UP configuration, setup_per_cpu_areas() is defined in
- * include/linux/percpu.h
- */
-#ifdef CONFIG_SMP
-void __init
-setup_per_cpu_areas (void)
-{
- /* start_kernel() requires this... */
-}
-#endif
-
-/*
* Do the following calculations:
*
* 1. the max. cache line size.
@@ -980,7 +967,7 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() through the canonical per-CPU address.
*/
- cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
+ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
identify_cpu(cpu_info);
#ifdef CONFIG_MCKINLEY
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 92ed83f3403..609d50056a6 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -100,51 +100,7 @@ sys_getpagesize (void)
asmlinkage unsigned long
ia64_brk (unsigned long brk)
{
- unsigned long rlim, retval, newbrk, oldbrk;
- struct mm_struct *mm = current->mm;
-
- /*
- * Most of this replicates the code in sys_brk() except for an additional safety
- * check and the clearing of r8. However, we can't call sys_brk() because we need
- * to acquire the mmap_sem before we can do the test...
- */
- down_write(&mm->mmap_sem);
-
- if (brk < mm->end_code)
- goto out;
- newbrk = PAGE_ALIGN(brk);
- oldbrk = PAGE_ALIGN(mm->brk);
- if (oldbrk == newbrk)
- goto set_brk;
-
- /* Always allow shrinking brk. */
- if (brk <= mm->brk) {
- if (!do_munmap(mm, newbrk, oldbrk-newbrk))
- goto set_brk;
- goto out;
- }
-
- /* Check against unimplemented/unmapped addresses: */
- if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
- goto out;
-
- /* Check against rlimit.. */
- rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
- if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
- goto out;
-
- /* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
- goto out;
-
- /* Ok, looks good - let it rip. */
- if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
- goto out;
-set_brk:
- mm->brk = brk;
-out:
- retval = mm->brk;
- up_write(&mm->mmap_sem);
+ unsigned long retval = sys_brk(brk);
force_successful_syscall_return();
return retval;
}
@@ -185,39 +141,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len,
return 0;
}
-static inline unsigned long
-do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
-{
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- return -EBADF;
-
- if (!file->f_op || !file->f_op->mmap) {
- addr = -ENODEV;
- goto out;
- }
- }
-
- /* Careful about overflows.. */
- len = PAGE_ALIGN(len);
- if (!len || len > TASK_SIZE) {
- addr = -EINVAL;
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
-out: if (file)
- fput(file);
- return addr;
-}
-
/*
* mmap2() is like mmap() except that the offset is expressed in units
* of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
@@ -226,7 +149,7 @@ out: if (file)
asmlinkage unsigned long
sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
{
- addr = do_mmap2(addr, len, prot, flags, fd, pgoff);
+ addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
@@ -238,7 +161,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo
if (offset_in_page(off) != 0)
return -EINVAL;
- addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0a0c77b2c98..1295ba327f6 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -166,6 +166,12 @@ SECTIONS
}
#endif
+#ifdef CONFIG_SMP
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ __cpu0_per_cpu = .;
+ . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
+#endif
+
. = ALIGN(PAGE_SIZE);
__init_end = .;
@@ -198,11 +204,6 @@ SECTIONS
data : { } :data
.data : AT(ADDR(.data) - LOAD_OFFSET)
{
-#ifdef CONFIG_SMP
- . = ALIGN(PERCPU_PAGE_SIZE);
- __cpu0_per_cpu = .;
- . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
-#endif
INIT_TASK_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
index 0c3564a7a03..9324c875caf 100644
--- a/arch/ia64/kvm/asm-offsets.c
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -22,7 +22,6 @@
*
*/
-#include <linux/autoconf.h>
#include <linux/kvm_host.h>
#include <linux/kbuild.h>
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2f724d2bf29..54bf5405981 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,38 +154,99 @@ static void *cpu_data;
void * __cpuinit
per_cpu_init (void)
{
- int cpu;
- static int first_time=1;
+ static bool first_time = true;
+ void *cpu0_data = __cpu0_per_cpu;
+ unsigned int cpu;
+
+ if (!first_time)
+ goto skip;
+ first_time = false;
/*
- * get_free_pages() cannot be used before cpu_init() done. BSP
- * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
- * get_zeroed_page().
+ * get_free_pages() cannot be used before cpu_init() done.
+ * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
+ * to avoid that AP calls get_zeroed_page().
*/
- if (first_time) {
- void *cpu0_data = __cpu0_per_cpu;
+ for_each_possible_cpu(cpu) {
+ void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
- first_time=0;
+ memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
+ per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
- per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
+ /*
+ * percpu area for cpu0 is moved from the __init area
+ * which is setup by head.S and used till this point.
+ * Update ar.k3. This move is ensures that percpu
+ * area for cpu0 is on the correct node and its
+ * virtual address isn't insanely far from other
+ * percpu areas which is important for congruent
+ * percpu allocator.
+ */
+ if (cpu == 0)
+ ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
+ (unsigned long)__per_cpu_start);
- for (cpu = 1; cpu < NR_CPUS; cpu++) {
- memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- }
+ cpu_data += PERCPU_PAGE_SIZE;
}
+skip:
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
+ cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
+
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas. All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init
+setup_per_cpu_areas(void)
+{
+ struct pcpu_alloc_info *ai;
+ struct pcpu_group_info *gi;
+ unsigned int cpu;
+ ssize_t static_size, reserved_size, dyn_size;
+ int rc;
+
+ ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
+ if (!ai)
+ panic("failed to allocate pcpu_alloc_info");
+ gi = &ai->groups[0];
+
+ /* units are assigned consecutively to possible cpus */
+ for_each_possible_cpu(cpu)
+ gi->cpu_map[gi->nr_units++] = cpu;
+
+ /* set parameters */
+ static_size = __per_cpu_end - __per_cpu_start;
+ reserved_size = PERCPU_MODULE_RESERVE;
+ dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+ if (dyn_size < 0)
+ panic("percpu area overflow static=%zd reserved=%zd\n",
+ static_size, reserved_size);
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = PERCPU_PAGE_SIZE;
+ ai->atom_size = PAGE_SIZE;
+ ai->alloc_size = PERCPU_PAGE_SIZE;
+
+ rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
+ if (rc)
+ panic("failed to setup percpu area (err=%d)", rc);
+
+ pcpu_free_alloc_info(ai);
+}
#else
#define alloc_per_cpu_data() do { } while (0)
#endif /* CONFIG_SMP */
@@ -270,8 +331,8 @@ paging_init (void)
map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
- vmalloc_end -= map_size;
- vmem_map = (struct page *) vmalloc_end;
+ VMALLOC_END -= map_size;
+ vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
/*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d85ba98d900..19c4b2195dc 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
int cpu;
for_each_possible_early_cpu(cpu) {
- if (cpu == 0) {
- void *cpu0_data = __cpu0_per_cpu;
- __per_cpu_offset[cpu] = (char*)cpu0_data -
- __per_cpu_start;
- } else if (node == node_cpuid[cpu].nid) {
- memcpy(__va(cpu_data), __phys_per_cpu_start,
- __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
- __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- }
+ void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+ if (node != node_cpuid[cpu].nid)
+ continue;
+
+ memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+ __per_cpu_start;
+
+ /*
+ * percpu area for cpu0 is moved from the __init area
+ * which is setup by head.S and used till this point.
+ * Update ar.k3. This move is ensures that percpu
+ * area for cpu0 is on the correct node and its
+ * virtual address isn't insanely far from other
+ * percpu areas which is important for congruent
+ * percpu allocator.
+ */
+ if (cpu == 0)
+ ia64_set_kr(IA64_KR_PER_CPU_DATA,
+ (unsigned long)cpu_data -
+ (unsigned long)__per_cpu_start);
+
+ cpu_data += PERCPU_PAGE_SIZE;
}
#endif
return cpu_data;
}
+#ifdef CONFIG_SMP
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas. All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init setup_per_cpu_areas(void)
+{
+ struct pcpu_alloc_info *ai;
+ struct pcpu_group_info *uninitialized_var(gi);
+ unsigned int *cpu_map;
+ void *base;
+ unsigned long base_offset;
+ unsigned int cpu;
+ ssize_t static_size, reserved_size, dyn_size;
+ int node, prev_node, unit, nr_units, rc;
+
+ ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
+ if (!ai)
+ panic("failed to allocate pcpu_alloc_info");
+ cpu_map = ai->groups[0].cpu_map;
+
+ /* determine base */
+ base = (void *)ULONG_MAX;
+ for_each_possible_cpu(cpu)
+ base = min(base,
+ (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
+ base_offset = (void *)__per_cpu_start - base;
+
+ /* build cpu_map, units are grouped by node */
+ unit = 0;
+ for_each_node(node)
+ for_each_possible_cpu(cpu)
+ if (node == node_cpuid[cpu].nid)
+ cpu_map[unit++] = cpu;
+ nr_units = unit;
+
+ /* set basic parameters */
+ static_size = __per_cpu_end - __per_cpu_start;
+ reserved_size = PERCPU_MODULE_RESERVE;
+ dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+ if (dyn_size < 0)
+ panic("percpu area overflow static=%zd reserved=%zd\n",
+ static_size, reserved_size);
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = PERCPU_PAGE_SIZE;
+ ai->atom_size = PAGE_SIZE;
+ ai->alloc_size = PERCPU_PAGE_SIZE;
+
+ /*
+ * CPUs are put into groups according to node. Walk cpu_map
+ * and create new groups at node boundaries.
+ */
+ prev_node = -1;
+ ai->nr_groups = 0;
+ for (unit = 0; unit < nr_units; unit++) {
+ cpu = cpu_map[unit];
+ node = node_cpuid[cpu].nid;
+
+ if (node == prev_node) {
+ gi->nr_units++;
+ continue;
+ }
+ prev_node = node;
+
+ gi = &ai->groups[ai->nr_groups++];
+ gi->nr_units = 1;
+ gi->base_offset = __per_cpu_offset[cpu] + base_offset;
+ gi->cpu_map = &cpu_map[unit];
+ }
+
+ rc = pcpu_setup_first_chunk(ai, base);
+ if (rc)
+ panic("failed to setup percpu area (err=%d)", rc);
+
+ pcpu_free_alloc_info(ai);
+}
+#endif
+
/**
* fill_pernode - initialize pernode data.
* @node: the node id.
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void)
/* Set the node_data pointer for each per-cpu struct */
for_each_possible_early_cpu(cpu) {
node = node_cpuid[cpu].nid;
- per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
+ per_cpu(ia64_cpu_info, cpu).node_data =
+ mem_data[node].node_data;
}
#else
{
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void)
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
- ((char *)&per_cpu__cpu_info - __per_cpu_start));
+ ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
@@ -666,9 +765,9 @@ void __init paging_init(void)
sparse_init();
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
- vmem_map = (struct page *) vmalloc_end;
+ vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1857766a63c..b9609c69343 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-unsigned long vmalloc_end = VMALLOC_END_INIT;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long VMALLOC_END = VMALLOC_END_INIT;
+EXPORT_SYMBOL(VMALLOC_END);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 2a140627dfd..3dccdd8eb27 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -22,6 +22,12 @@ __ioremap (unsigned long phys_addr)
}
void __iomem *
+early_ioremap (unsigned long phys_addr, unsigned long size)
+{
+ return __ioremap(phys_addr);
+}
+
+void __iomem *
ioremap (unsigned long phys_addr, unsigned long size)
{
void __iomem *addr;
@@ -102,6 +108,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
EXPORT_SYMBOL(ioremap_nocache);
void
+early_iounmap (volatile void __iomem *addr, unsigned long size)
+{
+}
+
+void
iounmap (volatile void __iomem *addr)
{
if (REGION_NUMBER(addr) == RGN_GATE)
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index c0fca2c1c85..df639db779f 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -131,6 +131,7 @@ alloc_pci_controller (int seg)
}
struct pci_root_info {
+ struct acpi_device *bridge;
struct pci_controller *controller;
char *name;
};
@@ -297,9 +298,20 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
window->offset = offset;
if (insert_resource(root, &window->resource)) {
- printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n",
- window->resource.start, window->resource.end,
- root->name, info->name);
+ dev_err(&info->bridge->dev,
+ "can't allocate host bridge window %pR\n",
+ &window->resource);
+ } else {
+ if (offset)
+ dev_info(&info->bridge->dev, "host bridge window %pR "
+ "(PCI address [%#llx-%#llx])\n",
+ &window->resource,
+ window->resource.start - offset,
+ window->resource.end - offset);
+ else
+ dev_info(&info->bridge->dev,
+ "host bridge window %pR\n",
+ &window->resource);
}
return AE_OK;
@@ -319,8 +331,9 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
(res->end - res->start < 16))
continue;
if (j >= PCI_BUS_NUM_RESOURCES) {
- printk("Ignoring range [%#llx-%#llx] (%lx)\n",
- res->start, res->end, res->flags);
+ dev_warn(&bus->dev,
+ "ignoring host bridge window %pR (no space)\n",
+ res);
continue;
}
bus->resource[j++] = res;
@@ -364,6 +377,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
goto out3;
sprintf(name, "PCI Bus %04x:%02x", domain, bus);
+ info.bridge = device;
info.controller = controller;
info.name = name;
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
@@ -720,9 +734,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
return ret;
}
-/* It's defined in drivers/pci/pci.c */
-extern u8 pci_cache_line_size;
-
/**
* set_pci_cacheline_size - determine cacheline size for PCI devices
*
@@ -731,7 +742,7 @@ extern u8 pci_cache_line_size;
*
* Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
*/
-static void __init set_pci_cacheline_size(void)
+static void __init set_pci_dfl_cacheline_size(void)
{
unsigned long levels, unique_caches;
long status;
@@ -751,7 +762,7 @@ static void __init set_pci_cacheline_size(void)
"(status=%ld)\n", __func__, status);
return;
}
- pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
+ pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
}
u64 ia64_dma_get_required_mask(struct device *dev)
@@ -782,7 +793,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init pcibios_init(void)
{
- set_pci_cacheline_size();
+ set_pci_dfl_cacheline_size();
return 0;
}
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 1176506b2ba..e884ba4e031 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
- 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
stat->shub_ptc_flushes_not_my_mm,
stat->deadlocks2,
stat->shub_ipi_flushes,
- 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
+ 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
}
return 0;
}
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 35b2a27d2e7..efb454534e5 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/bitmap.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
@@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
- int i, ps, ps_shift, entry, entries, mapsize, last_entry;
+ int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
@@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
- entry = find_first_zero_bit(map, mapsize);
- while (entry < mapsize) {
- last_entry = find_next_bit(map, mapsize, entry);
-
- if (last_entry - entry >= entries)
- break;
-
- entry = find_next_zero_bit(map, mapsize, last_entry);
- }
-
- if (entry > mapsize) {
+ entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
+ if (entry >= mapsize) {
kfree(ca_dmamap);
goto map_return;
}
- for (i = 0; i < entries; i++)
- set_bit(entry + i, map);
+ bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
index f042e192d2f..a3fb7cf9ae1 100644
--- a/arch/ia64/xen/irq_xen.c
+++ b/arch/ia64/xen/irq_xen.c
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
}
-static DEFINE_PER_CPU(int, timer_irq) = -1;
-static DEFINE_PER_CPU(int, ipi_irq) = -1;
-static DEFINE_PER_CPU(int, resched_irq) = -1;
-static DEFINE_PER_CPU(int, cmc_irq) = -1;
-static DEFINE_PER_CPU(int, cmcp_irq) = -1;
-static DEFINE_PER_CPU(int, cpep_irq) = -1;
+static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
+static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
#define NAME_SIZE 15
-static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
#undef NAME_SIZE
struct saved_irq {
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
if (xen_slab_ready) {
switch (vec) {
case IA64_TIMER_VECTOR:
- snprintf(per_cpu(timer_name, cpu),
- sizeof(per_cpu(timer_name, cpu)),
+ snprintf(per_cpu(xen_timer_name, cpu),
+ sizeof(per_cpu(xen_timer_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
action->handler, action->flags,
- per_cpu(timer_name, cpu), action->dev_id);
- per_cpu(timer_irq, cpu) = irq;
+ per_cpu(xen_timer_name, cpu), action->dev_id);
+ per_cpu(xen_timer_irq, cpu) = irq;
break;
case IA64_IPI_RESCHEDULE:
- snprintf(per_cpu(resched_name, cpu),
- sizeof(per_cpu(resched_name, cpu)),
+ snprintf(per_cpu(xen_resched_name, cpu),
+ sizeof(per_cpu(xen_resched_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
action->handler, action->flags,
- per_cpu(resched_name, cpu), action->dev_id);
- per_cpu(resched_irq, cpu) = irq;
+ per_cpu(xen_resched_name, cpu), action->dev_id);
+ per_cpu(xen_resched_irq, cpu) = irq;
break;
case IA64_IPI_VECTOR:
- snprintf(per_cpu(ipi_name, cpu),
- sizeof(per_cpu(ipi_name, cpu)),
+ snprintf(per_cpu(xen_ipi_name, cpu),
+ sizeof(per_cpu(xen_ipi_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
action->handler, action->flags,
- per_cpu(ipi_name, cpu), action->dev_id);
- per_cpu(ipi_irq, cpu) = irq;
+ per_cpu(xen_ipi_name, cpu), action->dev_id);
+ per_cpu(xen_ipi_irq, cpu) = irq;
break;
case IA64_CMC_VECTOR:
- snprintf(per_cpu(cmc_name, cpu),
- sizeof(per_cpu(cmc_name, cpu)),
+ snprintf(per_cpu(xen_cmc_name, cpu),
+ sizeof(per_cpu(xen_cmc_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
- action->handler,
- action->flags,
- per_cpu(cmc_name, cpu),
- action->dev_id);
- per_cpu(cmc_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cmc_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cmc_irq, cpu) = irq;
break;
case IA64_CMCP_VECTOR:
- snprintf(per_cpu(cmcp_name, cpu),
- sizeof(per_cpu(cmcp_name, cpu)),
+ snprintf(per_cpu(xen_cmcp_name, cpu),
+ sizeof(per_cpu(xen_cmcp_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
- action->handler,
- action->flags,
- per_cpu(cmcp_name, cpu),
- action->dev_id);
- per_cpu(cmcp_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cmcp_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cmcp_irq, cpu) = irq;
break;
case IA64_CPEP_VECTOR:
- snprintf(per_cpu(cpep_name, cpu),
- sizeof(per_cpu(cpep_name, cpu)),
+ snprintf(per_cpu(xen_cpep_name, cpu),
+ sizeof(per_cpu(xen_cpep_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
- action->handler,
- action->flags,
- per_cpu(cpep_name, cpu),
- action->dev_id);
- per_cpu(cpep_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cpep_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cpep_irq, cpu) = irq;
break;
case IA64_CPE_VECTOR:
case IA64_MCA_RENDEZ_VECTOR:
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
if (action == CPU_DEAD) {
/* Unregister evtchn. */
- if (per_cpu(cpep_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
- per_cpu(cpep_irq, cpu) = -1;
+ if (per_cpu(xen_cpep_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
+ NULL);
+ per_cpu(xen_cpep_irq, cpu) = -1;
}
- if (per_cpu(cmcp_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
- per_cpu(cmcp_irq, cpu) = -1;
+ if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
+ NULL);
+ per_cpu(xen_cmcp_irq, cpu) = -1;
}
- if (per_cpu(cmc_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
- per_cpu(cmc_irq, cpu) = -1;
+ if (per_cpu(xen_cmc_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
+ per_cpu(xen_cmc_irq, cpu) = -1;
}
- if (per_cpu(ipi_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
- per_cpu(ipi_irq, cpu) = -1;
+ if (per_cpu(xen_ipi_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
+ per_cpu(xen_ipi_irq, cpu) = -1;
}
- if (per_cpu(resched_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(resched_irq, cpu),
- NULL);
- per_cpu(resched_irq, cpu) = -1;
+ if (per_cpu(xen_resched_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
+ NULL);
+ per_cpu(xen_resched_irq, cpu) = -1;
}
- if (per_cpu(timer_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
- per_cpu(timer_irq, cpu) = -1;
+ if (per_cpu(xen_timer_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
+ NULL);
+ per_cpu(xen_timer_irq, cpu) = -1;
}
}
return NOTIFY_OK;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index dbeadb9c8e2..c1c544513e8 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -34,15 +34,15 @@
#include "../kernel/fsyscall_gtod_data.h"
-DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
-DEFINE_PER_CPU(unsigned long, processed_stolen_time);
-DEFINE_PER_CPU(unsigned long, processed_blocked_time);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
+static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
/* taken from i386/kernel/time-xen.c */
static void xen_init_missing_ticks_accounting(int cpu)
{
struct vcpu_register_runstate_memory_area area;
- struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
+ struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
int rc;
memset(runstate, 0, sizeof(*runstate));
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
&area);
WARN_ON(rc && rc != -ENOSYS);
- per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
- per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+ per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
+ per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+ runstate->time[RUNSTATE_offline];
}
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
BUG_ON(preemptible());
- state = &__get_cpu_var(runstate);
+ state = &__get_cpu_var(xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
* This function just checks and reject this effect.
*/
if (!time_after_eq(runstate.time[RUNSTATE_blocked],
- per_cpu(processed_blocked_time, cpu)))
+ per_cpu(xen_blocked_time, cpu)))
blocked = 0;
if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
runstate.time[RUNSTATE_offline],
- per_cpu(processed_stolen_time, cpu)))
+ per_cpu(xen_stolen_time, cpu)))
stolen = 0;
if (!time_after(delta_itm + new_itm, ia64_get_itc()))
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
} else {
local_cpu_data->itm_next = delta_itm + new_itm;
}
- per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
- per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
+ per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
+ per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
}
return delta_itm;
}
diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h
index 0cc34c94bf2..2f85412ef73 100644
--- a/arch/m32r/include/asm/elf.h
+++ b/arch/m32r/include/asm/elf.h
@@ -102,7 +102,6 @@ typedef elf_fpreg_t elf_fpregset_t;
*/
#define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index dded923883b..179a06489b1 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -24,19 +24,19 @@
* We make no fairness assumptions. They have a cost.
*/
-#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- do { cpu_relax(); } while (__raw_spin_is_locked(x))
+#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
/**
- * __raw_spin_trylock - Try spin lock and return a result
+ * arch_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
*
- * __raw_spin_trylock() tries to get the lock and returns a result.
+ * arch_spin_trylock() tries to get the lock and returns a result.
* On the m32r, the result value is 1 (= Success) or 0 (= Failure).
*/
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
int oldval;
unsigned long tmp1, tmp2;
@@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
* }
*/
__asm__ __volatile__ (
- "# __raw_spin_trylock \n\t"
+ "# arch_spin_trylock \n\t"
"ldi %1, #0; \n\t"
"mvfc %2, psw; \n\t"
"clrpsw #0x40 -> nop; \n\t"
@@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (oldval > 0);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp0, tmp1;
@@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
* }
*/
__asm__ __volatile__ (
- "# __raw_spin_lock \n\t"
+ "# arch_spin_lock \n\t"
".fillinsn \n"
"1: \n\t"
"mvfc %1, psw; \n\t"
@@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
mb();
lock->slock = 1;
@@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1;
@@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long tmp0, tmp1, tmp2;
@@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t*)lock;
if (atomic_dec_return(count) >= 0)
@@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
return 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_M32R_SPINLOCK_H */
diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h
index 83f52105c0e..92e27672661 100644
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
@@ -7,17 +7,17 @@
typedef struct {
volatile int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct {
volatile int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_M32R_SPINLOCK_TYPES_H */
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 8dfd31e87c4..3c71f776872 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index 305ac852bbe..d3c865c5a6b 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -76,30 +76,6 @@ asmlinkage int sys_tas(int __user *addr)
return oldval;
}
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index aa3bf4cfab3..60536e27123 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap2
+ .long sys_mmap_pgoff
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
diff --git a/arch/m68k/include/asm/asm-offsets.h b/arch/m68k/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/m68k/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h
index 0b0f49eb876..01c193d9141 100644
--- a/arch/m68k/include/asm/elf.h
+++ b/arch/m68k/include/asm/elf.h
@@ -59,7 +59,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
is actually used on ASV. */
#define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
-#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN3
#define ELF_EXEC_PAGESIZE 4096
#else
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index fe60e1abaee..aca0e28581c 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -83,9 +83,9 @@
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END KMAP_START
#else
-extern unsigned long vmalloc_end;
+extern unsigned long m68k_vmalloc_end;
#define VMALLOC_START 0x0f800000
-#define VMALLOC_END vmalloc_end
+#define VMALLOC_END m68k_vmalloc_end
#endif /* CONFIG_SUN3 */
/* zero page used for uninitialized stuff */
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index 86edb5fbcfc..ef54128baa0 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -196,7 +196,7 @@
* for them and trying to understand what they mean.
*
* CONFIG_xxx: These are the obvious machine configuration defines created
- * during configuration. These are defined in include/linux/autoconf.h.
+ * during configuration. These are defined in autoconf.h.
*
* CONSOLE: There is support for head.S console in this file. This
* console can talk to a Mac frame buffer, but could easily be extrapolated
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 7deb402bfc7..218f441de66 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -29,37 +29,16 @@
#include <asm/page.h>
#include <asm/unistd.h>
-/* common code for old and new mmaps */
-static inline long do_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
+ /*
+ * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
+ * so we need to shift the argument down by 1; m68k mmap64(3)
+ * (in libc) expects the last argument of mmap2 in 4Kb units.
+ */
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/*
@@ -90,57 +69,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
if (a.offset & ~PAGE_MASK)
goto out;
- a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
-out:
- return error;
-}
-
-#if 0
-struct mmap_arg_struct64 {
- __u32 addr;
- __u32 len;
- __u32 prot;
- __u32 flags;
- __u64 offset; /* 64 bits */
- __u32 fd;
-};
-
-asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
-{
- int error = -EFAULT;
- struct file * file = NULL;
- struct mmap_arg_struct64 a;
- unsigned long pgoff;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- return -EFAULT;
-
- if ((long)a.offset & ~PAGE_MASK)
- return -EINVAL;
-
- pgoff = a.offset >> PAGE_SHIFT;
- if ((a.offset >> PAGE_SHIFT) != pgoff)
- return -EINVAL;
-
- if (!(a.flags & MAP_ANONYMOUS)) {
- error = -EBADF;
- file = fget(a.fd);
- if (!file)
- goto out;
- }
- a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
+ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
out:
return error;
}
-#endif
struct sel_arg_struct {
unsigned long n;
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 3cd19390aae..94f81ecfe3f 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -45,8 +45,8 @@
** Globals
*/
-unsigned long vmalloc_end;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long m68k_vmalloc_end;
+EXPORT_SYMBOL(m68k_vmalloc_end);
unsigned long pmeg_vaddr[PMEGS_NUM];
unsigned char pmeg_alloc[PMEGS_NUM];
@@ -172,8 +172,8 @@ void mmu_emu_init(unsigned long bootmem_end)
#endif
// the lowest mapping here is the end of our
// vmalloc region
- if(!vmalloc_end)
- vmalloc_end = seg;
+ if (!m68k_vmalloc_end)
+ m68k_vmalloc_end = seg;
// mark the segmap alloc'd, and reserve any
// of the first 0xbff pages the hardware is
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
index efdd090778a..b67cbc735a9 100644
--- a/arch/m68knommu/kernel/sys_m68k.c
+++ b/arch/m68knommu/kernel/sys_m68k.c
@@ -27,39 +27,6 @@
#include <asm/cacheflush.h>
#include <asm/unistd.h>
-/* common code for old and new mmaps */
-static inline long do_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
-
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
@@ -88,9 +55,8 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg)
if (a.offset & ~PAGE_MASK)
goto out;
- a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
out:
return error;
}
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 23535cc415a..486837efa3d 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -210,7 +210,7 @@ ENTRY(sys_call_table)
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap2
+ .long sys_mmap_pgoff
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index bbd8327f189..fd53e500be6 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -6,8 +6,15 @@ mainmenu "Linux/Microblaze Kernel Configuration"
config MICROBLAZE
def_bool y
select HAVE_LMB
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
select USB_ARCH_HAS_EHCI
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select HAVE_OPROFILE
+ select TRACING_SUPPORT
config SWAP
def_bool n
@@ -57,12 +64,24 @@ config GENERIC_GPIO
config GENERIC_CSUM
def_bool y
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
config PCI
def_bool n
config NO_DMA
def_bool y
+config DTC
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index 242cd35bdb4..9dc708a7f70 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -3,6 +3,9 @@
menu "Kernel hacking"
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
source "lib/Kconfig.debug"
config EARLY_PRINTK
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 34187354304..d2d6cfcb1a3 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -51,6 +51,8 @@ core-y += arch/microblaze/kernel/
core-y += arch/microblaze/mm/
core-y += arch/microblaze/platform/
+drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
+
boot := arch/microblaze/boot
# Are we making a simpleImage.<boardname> target? If so, crack out the boardname
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 21f13322a4c..902cf9846c3 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -2,11 +2,13 @@
# arch/microblaze/boot/Makefile
#
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
obj-y += linked_dtb.o
targets := linux.bin linux.bin.gz simpleImage.%
-OBJCOPYFLAGS_linux.bin := -O binary
+OBJCOPYFLAGS := -O binary
# Where the DTS files live
dtstree := $(srctree)/$(src)/dts
@@ -24,6 +26,7 @@ $(obj)/linux.bin: vmlinux FORCE
[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
$(call if_changed,objcopy)
+ $(call if_changed,uimage)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
@@ -36,8 +39,16 @@ quiet_cmd_cp = CP $< $@$2
quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -K _start -K _end -K __log_buf -K _fdt_start vmlinux -o $@
+quiet_cmd_uimage = UIMAGE $@.ub
+ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \
+ -C none -n 'Linux-$(KERNELRELEASE)' \
+ -a $(CONFIG_KERNEL_BASE_ADDR) -e $(CONFIG_KERNEL_BASE_ADDR) \
+ -d $@ $@.ub
+
$(obj)/simpleImage.%: vmlinux FORCE
$(call if_changed,cp,.unstrip)
+ $(call if_changed,objcopy)
+ $(call if_changed,uimage)
$(call if_changed,strip)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@@ -53,4 +64,4 @@ $(obj)/%.dtb: $(dtstree)/%.dts FORCE
clean-kernel += linux.bin linux.bin.gz simpleImage.*
-clean-files += *.dtb
+clean-files += *.dtb simpleImage.*.unstrip
diff --git a/arch/microblaze/include/asm/asm-offsets.h b/arch/microblaze/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/microblaze/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
index c209c47509d..e52210891d7 100644
--- a/arch/microblaze/include/asm/cache.h
+++ b/arch/microblaze/include/asm/cache.h
@@ -21,20 +21,4 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-void _enable_icache(void);
-void _disable_icache(void);
-void _invalidate_icache(unsigned int addr);
-
-#define __enable_icache() _enable_icache()
-#define __disable_icache() _disable_icache()
-#define __invalidate_icache(addr) _invalidate_icache(addr)
-
-void _enable_dcache(void);
-void _disable_dcache(void);
-void _invalidate_dcache(unsigned int addr);
-
-#define __enable_dcache() _enable_dcache()
-#define __disable_dcache() _disable_dcache()
-#define __invalidate_dcache(addr) _invalidate_dcache(addr)
-
#endif /* _ASM_MICROBLAZE_CACHE_H */
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 088076e657b..a6edd356cd0 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -18,6 +18,8 @@
/* Somebody depends on this; sigh... */
#include <linux/mm.h>
+/* Look at Documentation/cachetlb.txt */
+
/*
* Cache handling functions.
* Microblaze has a write-through data cache, meaning that the data cache
@@ -27,78 +29,81 @@
* instruction cache to make sure we don't fetch old, bad code.
*/
+/* struct cache, d=dcache, i=icache, fl = flush, iv = invalidate,
+ * suffix r = range */
+struct scache {
+ /* icache */
+ void (*ie)(void); /* enable */
+ void (*id)(void); /* disable */
+ void (*ifl)(void); /* flush */
+ void (*iflr)(unsigned long a, unsigned long b);
+ void (*iin)(void); /* invalidate */
+ void (*iinr)(unsigned long a, unsigned long b);
+ /* dcache */
+ void (*de)(void); /* enable */
+ void (*dd)(void); /* disable */
+ void (*dfl)(void); /* flush */
+ void (*dflr)(unsigned long a, unsigned long b);
+ void (*din)(void); /* invalidate */
+ void (*dinr)(unsigned long a, unsigned long b);
+};
+
+/* microblaze cache */
+extern struct scache *mbc;
+
+void microblaze_cache_init(void);
+
+#define enable_icache() mbc->ie();
+#define disable_icache() mbc->id();
+#define flush_icache() mbc->ifl();
+#define flush_icache_range(start, end) mbc->iflr(start, end);
+#define invalidate_icache() mbc->iin();
+#define invalidate_icache_range(start, end) mbc->iinr(start, end);
+
+
+#define flush_icache_user_range(vma, pg, adr, len) flush_icache();
+#define flush_icache_page(vma, pg) do { } while (0)
+
+#define enable_dcache() mbc->de();
+#define disable_dcache() mbc->dd();
/* FIXME for LL-temac driver */
-#define invalidate_dcache_range(start, end) \
- __invalidate_dcache_range(start, end)
-
-#define flush_cache_all() __invalidate_cache_all()
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) __invalidate_cache_all()
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define invalidate_dcache() mbc->din();
+#define invalidate_dcache_range(start, end) mbc->dinr(start, end);
+#define flush_dcache() mbc->dfl();
+#define flush_dcache_range(start, end) mbc->dflr(start, end);
-#define flush_dcache_range(start, end) __invalidate_dcache_range(start, end)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+/* D-cache aliasing problem can't happen - cache is between MMU and ram */
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_range(start, len) __invalidate_icache_range(start, len)
-#define flush_icache_page(vma, pg) do { } while (0)
-
-#ifndef CONFIG_MMU
-# define flush_icache_user_range(start, len) do { } while (0)
-#else
-# define flush_icache_user_range(vma, pg, adr, len) __invalidate_icache_all()
-
-# define flush_page_to_ram(page) do { } while (0)
-# define flush_icache() __invalidate_icache_all()
-# define flush_cache_sigtramp(vaddr) \
- __invalidate_icache_range(vaddr, vaddr + 8)
-
-# define flush_dcache_mmap_lock(mapping) do { } while (0)
-# define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-# define flush_cache_dup_mm(mm) do { } while (0)
+/* MS: kgdb code use this macro, wrong len with FLASH */
+#if 0
+#define flush_cache_range(vma, start, len) { \
+ flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \
+ flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \
+}
#endif
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-struct page;
-struct mm_struct;
-struct vm_area_struct;
-
-/* see arch/microblaze/kernel/cache.c */
-extern void __invalidate_icache_all(void);
-extern void __invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __invalidate_icache_page(struct vm_area_struct *vma,
- struct page *page);
-extern void __invalidate_icache_user_range(struct vm_area_struct *vma,
- struct page *page,
- unsigned long adr, int len);
-extern void __invalidate_cache_sigtramp(unsigned long addr);
-
-extern void __invalidate_dcache_all(void);
-extern void __invalidate_dcache_range(unsigned long start, unsigned long end);
-extern void __invalidate_dcache_page(struct vm_area_struct *vma,
- struct page *page);
-extern void __invalidate_dcache_user_range(struct vm_area_struct *vma,
- struct page *page,
- unsigned long adr, int len);
-
-extern inline void __invalidate_cache_all(void)
-{
- __invalidate_icache_all();
- __invalidate_dcache_all();
-}
+#define flush_cache_range(vma, start, len) do { } while (0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy((dst), (src), (len)); \
- flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy((dst), (src), (len)); \
+ flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy((dst), (src), (len))
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy((dst), (src), (len)); \
+} while (0)
#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
diff --git a/arch/microblaze/include/asm/cpuinfo.h b/arch/microblaze/include/asm/cpuinfo.h
index 52f28f6dc4e..b4f5ca33aeb 100644
--- a/arch/microblaze/include/asm/cpuinfo.h
+++ b/arch/microblaze/include/asm/cpuinfo.h
@@ -43,7 +43,7 @@ struct cpuinfo {
u32 use_icache;
u32 icache_tagbits;
u32 icache_write;
- u32 icache_line;
+ u32 icache_line_length;
u32 icache_size;
unsigned long icache_base;
unsigned long icache_high;
@@ -51,8 +51,9 @@ struct cpuinfo {
u32 use_dcache;
u32 dcache_tagbits;
u32 dcache_write;
- u32 dcache_line;
+ u32 dcache_line_length;
u32 dcache_size;
+ u32 dcache_wb;
unsigned long dcache_base;
unsigned long dcache_high;
diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h
index 30286db27c1..78a038452c0 100644
--- a/arch/microblaze/include/asm/device.h
+++ b/arch/microblaze/include/asm/device.h
@@ -19,6 +19,18 @@ struct dev_archdata {
struct pdev_archdata {
};
+static inline void dev_archdata_set_node(struct dev_archdata *ad,
+ struct device_node *np)
+{
+ ad->of_node = np;
+}
+
+static inline struct device_node *
+dev_archdata_get_node(const struct dev_archdata *ad)
+{
+ return ad->of_node;
+}
+
#endif /* _ASM_MICROBLAZE_DEVICE_H */
diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h
index f92fc0dda00..7d4acf2b278 100644
--- a/arch/microblaze/include/asm/elf.h
+++ b/arch/microblaze/include/asm/elf.h
@@ -77,7 +77,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_DATA ELFDATA2MSB
#endif
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h
index 8b137891791..fd2fa2eca62 100644
--- a/arch/microblaze/include/asm/ftrace.h
+++ b/arch/microblaze/include/asm/ftrace.h
@@ -1 +1,26 @@
+#ifndef _ASM_MICROBLAZE_FTRACE
+#define _ASM_MICROBLAZE_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((long)(_mcount))
+#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+extern void ftrace_call_graph(void);
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* reloction of mcount call site is the same as the address */
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+#endif /* _ASM_MICROBLAZE_FTRACE */
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 0b745828f42..8dbb6e7a03a 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -1 +1,126 @@
-#include <asm-generic/futex.h>
+#ifndef _ASM_MICROBLAZE_FUTEX_H
+#define _ASM_MICROBLAZE_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+({ \
+ __asm__ __volatile__ ( \
+ "1: lwx %0, %2, r0; " \
+ insn \
+ "2: swx %1, %2, r0; \
+ addic %1, r0, 0; \
+ bnei %1, 1b; \
+ 3: \
+ .section .fixup,\"ax\"; \
+ 4: brid 3b; \
+ addik %1, r0, %3; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,4b,2b,4b; \
+ .previous;" \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
+ ); \
+})
+
+static inline int
+futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("or %1,%4,%4;", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %1,%0,%4;", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %1,%0,%4;", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %1,%0,%4;", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %1,%0,%4;", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ int prev, cmp;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ __asm__ __volatile__ ("1: lwx %0, %2, r0; \
+ cmp %1, %0, %3; \
+ beqi %1, 3f; \
+ 2: swx %4, %2, r0; \
+ addic %1, r0, 0; \
+ bnei %1, 1b; \
+ 3: \
+ .section .fixup,\"ax\"; \
+ 4: brid 3b; \
+ addik %0, r0, %5; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,4b,2b,4b; \
+ .previous;" \
+ : "=&r" (prev), "=&r"(cmp) \
+ : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
+
+ return prev;
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h
index dea65645a4f..2c38c6d8017 100644
--- a/arch/microblaze/include/asm/irqflags.h
+++ b/arch/microblaze/include/asm/irqflags.h
@@ -10,78 +10,73 @@
#define _ASM_MICROBLAZE_IRQFLAGS_H
#include <linux/irqflags.h>
+#include <asm/registers.h>
# if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
-# define local_irq_save(flags) \
+# define raw_local_irq_save(flags) \
do { \
- asm volatile ("# local_irq_save \n\t" \
- "msrclr %0, %1 \n\t" \
- "nop \n\t" \
+ asm volatile (" msrclr %0, %1; \
+ nop;" \
: "=r"(flags) \
: "i"(MSR_IE) \
: "memory"); \
} while (0)
-# define local_irq_disable() \
- do { \
- asm volatile ("# local_irq_disable \n\t" \
- "msrclr r0, %0 \n\t" \
- "nop \n\t" \
- : \
- : "i"(MSR_IE) \
- : "memory"); \
+# define raw_local_irq_disable() \
+ do { \
+ asm volatile (" msrclr r0, %0; \
+ nop;" \
+ : \
+ : "i"(MSR_IE) \
+ : "memory"); \
} while (0)
-# define local_irq_enable() \
- do { \
- asm volatile ("# local_irq_enable \n\t" \
- "msrset r0, %0 \n\t" \
- "nop \n\t" \
- : \
- : "i"(MSR_IE) \
- : "memory"); \
+# define raw_local_irq_enable() \
+ do { \
+ asm volatile (" msrset r0, %0; \
+ nop;" \
+ : \
+ : "i"(MSR_IE) \
+ : "memory"); \
} while (0)
# else /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR == 0 */
-# define local_irq_save(flags) \
+# define raw_local_irq_save(flags) \
do { \
register unsigned tmp; \
- asm volatile ("# local_irq_save \n\t" \
- "mfs %0, rmsr \n\t" \
- "nop \n\t" \
- "andi %1, %0, %2 \n\t" \
- "mts rmsr, %1 \n\t" \
- "nop \n\t" \
+ asm volatile (" mfs %0, rmsr; \
+ nop; \
+ andi %1, %0, %2; \
+ mts rmsr, %1; \
+ nop;" \
: "=r"(flags), "=r" (tmp) \
: "i"(~MSR_IE) \
: "memory"); \
} while (0)
-# define local_irq_disable() \
+# define raw_local_irq_disable() \
do { \
register unsigned tmp; \
- asm volatile ("# local_irq_disable \n\t" \
- "mfs %0, rmsr \n\t" \
- "nop \n\t" \
- "andi %0, %0, %1 \n\t" \
- "mts rmsr, %0 \n\t" \
- "nop \n\t" \
+ asm volatile (" mfs %0, rmsr; \
+ nop; \
+ andi %0, %0, %1; \
+ mts rmsr, %0; \
+ nop;" \
: "=r"(tmp) \
: "i"(~MSR_IE) \
: "memory"); \
} while (0)
-# define local_irq_enable() \
+# define raw_local_irq_enable() \
do { \
register unsigned tmp; \
- asm volatile ("# local_irq_enable \n\t" \
- "mfs %0, rmsr \n\t" \
- "nop \n\t" \
- "ori %0, %0, %1 \n\t" \
- "mts rmsr, %0 \n\t" \
- "nop \n\t" \
+ asm volatile (" mfs %0, rmsr; \
+ nop; \
+ ori %0, %0, %1; \
+ mts rmsr, %0; \
+ nop;" \
: "=r"(tmp) \
: "i"(MSR_IE) \
: "memory"); \
@@ -89,35 +84,28 @@
# endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */
-#define local_save_flags(flags) \
+#define raw_local_irq_restore(flags) \
do { \
- asm volatile ("# local_save_flags \n\t" \
- "mfs %0, rmsr \n\t" \
- "nop \n\t" \
- : "=r"(flags) \
+ asm volatile (" mts rmsr, %0; \
+ nop;" \
: \
+ : "r"(flags) \
: "memory"); \
} while (0)
-#define local_irq_restore(flags) \
- do { \
- asm volatile ("# local_irq_restore \n\t"\
- "mts rmsr, %0 \n\t" \
- "nop \n\t" \
- : \
- : "r"(flags) \
- : "memory"); \
- } while (0)
-
-static inline int irqs_disabled(void)
+static inline unsigned long get_msr(void)
{
unsigned long flags;
-
- local_save_flags(flags);
- return ((flags & MSR_IE) == 0);
+ asm volatile (" mfs %0, rmsr; \
+ nop;" \
+ : "=r"(flags) \
+ : \
+ : "memory"); \
+ return flags;
}
-#define raw_irqs_disabled irqs_disabled
-#define raw_irqs_disabled_flags(flags) ((flags) == 0)
+#define raw_local_save_flags(flags) ((flags) = get_msr())
+#define raw_irqs_disabled() ((get_msr() & MSR_IE) == 0)
+#define raw_irqs_disabled_flags(flags) ((flags & MSR_IE) == 0)
#endif /* _ASM_MICROBLAZE_IRQFLAGS_H */
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 880c988c223..9b66c0fa9a3 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -164,7 +164,8 @@ extern int page_is_ram(unsigned long pfn);
# endif /* CONFIG_MMU */
# ifndef CONFIG_MMU
-# define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) <= max_mapnr)
+# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && \
+ ((pfn) <= (min_low_pfn + max_mapnr)))
# define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
# else /* CONFIG_MMU */
# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index b0131da1387..7547f506456 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -106,9 +106,6 @@ extern inline void free_pgd_slow(pgd_t *pgd)
*/
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
-/* FIXME two definition - look below */
-#define pmd_free(mm, x) do { } while (0)
-#define pgd_populate(mm, pmd, pte) BUG()
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
@@ -192,14 +189,14 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
* the pgd will always be present..
*/
#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
-/*#define pmd_free(mm, x) do { } while (0)*/
-#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
+#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
#define pgd_populate(mm, pmd, pte) BUG()
extern int do_check_pgt_cache(int, int);
#endif /* CONFIG_MMU */
-#define check_pgt_cache() do {} while (0)
+#define check_pgt_cache() do { } while (0)
#endif /* _ASM_MICROBLAZE_PGALLOC_H */
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 66f1b30dd09..e38abc7714b 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -76,20 +76,23 @@ struct pvr_s {
#define PVR3_FSL_LINKS_MASK 0x00000380
/* ICache config PVR masks */
-#define PVR4_USE_ICACHE_MASK 0x80000000
-#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000
-#define PVR4_ICACHE_USE_FSL_MASK 0x02000000
-#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000
-#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000
-#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000
+#define PVR4_USE_ICACHE_MASK 0x80000000 /* ICU */
+#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000 /* ICTS */
+#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000 /* ICW */
+#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000 /* ICLL */
+#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000 /* ICBS */
+#define PVR4_ICACHE_ALWAYS_USED 0x00008000 /* IAU */
+#define PVR4_ICACHE_INTERFACE 0x00002000 /* ICI */
/* DCache config PVR masks */
-#define PVR5_USE_DCACHE_MASK 0x80000000
-#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000
-#define PVR5_DCACHE_USE_FSL_MASK 0x02000000
-#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000
-#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000
-#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000
+#define PVR5_USE_DCACHE_MASK 0x80000000 /* DCU */
+#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000 /* DCTS */
+#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000 /* DCW */
+#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000 /* DCLL */
+#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000 /* DCBS */
+#define PVR5_DCACHE_ALWAYS_USED 0x00008000 /* DAU */
+#define PVR5_DCACHE_USE_WRITEBACK 0x00004000 /* DWB */
+#define PVR5_DCACHE_INTERFACE 0x00002000 /* DCI */
/* ICache base address PVR mask */
#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF
@@ -178,11 +181,14 @@ struct pvr_s {
((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
#define PVR_DCACHE_USE_FSL(pvr) (pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
#define PVR_DCACHE_ALLOW_WR(pvr) (pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
+/* FIXME two shifts on one line needs any comment */
#define PVR_DCACHE_LINE_LEN(pvr) \
(1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
#define PVR_DCACHE_BYTE_SIZE(pvr) \
(1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_DCACHE_USE_WRITEBACK(pvr) \
+ ((pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
#define PVR_ICACHE_BASEADDR(pvr) (pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
#define PVR_ICACHE_HIGHADDR(pvr) (pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index ed67c9ed15b..7f31394985e 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -35,6 +35,8 @@ extern void mmu_reset(void);
extern void early_console_reg_tlb_alloc(unsigned int addr);
# endif /* CONFIG_MMU */
+extern void of_platform_reset_gpio_probe(void);
+
void time_init(void);
void init_IRQ(void);
void machine_early_init(const char *cmdline, unsigned int ram,
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index b1ed6159066..157970688b2 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -16,6 +16,8 @@
#include <asm-generic/cmpxchg.h>
#include <asm-generic/cmpxchg-local.h>
+#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
+
struct task_struct;
struct thread_info;
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 5431b4631a7..371bd6e56d9 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -272,8 +272,9 @@ static inline int clear_user(char *to, int size)
return size;
}
-extern unsigned long __copy_tofrom_user(void __user *to,
- const void __user *from, unsigned long size);
+#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
+#define __copy_from_user_inatomic(to, from, n) \
+ copy_from_user((to), (from), (n))
#define copy_to_user(to, from, n) \
(access_ok(VERIFY_WRITE, (to), (n)) ? \
@@ -290,10 +291,6 @@ extern unsigned long __copy_tofrom_user(void __user *to,
(void __user *)(from), (n)) \
: -EFAULT)
-#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
- copy_from_user((to), (from), (n))
-
extern int __strncpy_user(char *to, const char __user *from, int len);
extern int __strnlen_user(const char __user *sstr, int len);
@@ -305,6 +302,9 @@ extern int __strnlen_user(const char __user *sstr, int len);
#endif /* CONFIG_MMU */
+extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index d487729683d..b07594eccf9 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -2,12 +2,22 @@
# Makefile
#
+ifdef CONFIG_FUNCTION_TRACER
+# Do not trace early boot code and low level code
+CFLAGS_REMOVE_timer.o = -pg
+CFLAGS_REMOVE_intc.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+CFLAGS_REMOVE_selfmod.o = -pg
+CFLAGS_REMOVE_heartbeat.o = -pg
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
extra-y := head.o vmlinux.lds
obj-y += exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
of_platform.o process.o prom.o prom_parse.o ptrace.o \
- setup.o signal.o sys_microblaze.o timer.o traps.o
+ setup.o signal.o sys_microblaze.o timer.o traps.o reset.o
obj-y += cpu/
@@ -16,5 +26,7 @@ obj-$(CONFIG_SELFMOD) += selfmod.o
obj-$(CONFIG_HEART_BEAT) += heartbeat.o
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
obj-y += entry$(MMU).o
diff --git a/arch/microblaze/kernel/cpu/Makefile b/arch/microblaze/kernel/cpu/Makefile
index 20646e54927..59cc7bceaf8 100644
--- a/arch/microblaze/kernel/cpu/Makefile
+++ b/arch/microblaze/kernel/cpu/Makefile
@@ -2,6 +2,10 @@
# Build the appropriate CPU version support
#
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_cache.o = -pg
+endif
+
EXTRA_CFLAGS += -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \
-DCPU_REV=$(CPU_REV)
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index af866a45012..d9d63831cc2 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
@@ -13,243 +13,534 @@
#include <asm/cacheflush.h>
#include <linux/cache.h>
#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
-/* Exported functions */
+static inline void __invalidate_flush_icache(unsigned int addr)
+{
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (addr));
+}
+
+static inline void __flush_dcache(unsigned int addr)
+{
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ : : "r" (addr));
+}
+
+static inline void __invalidate_dcache(unsigned int baseaddr,
+ unsigned int offset)
+{
+ __asm__ __volatile__ ("wdc.clear %0, %1;" \
+ : : "r" (baseaddr), "r" (offset));
+}
-void _enable_icache(void)
+static inline void __enable_icache_msr(void)
{
- if (cpuinfo.use_icache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrset r0, %0; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
+ __asm__ __volatile__ (" msrset r0, %0; \
+ nop; " \
+ : : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __disable_icache_msr(void)
+{
+ __asm__ __volatile__ (" msrclr r0, %0; \
+ nop; " \
+ : : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __enable_dcache_msr(void)
+{
+ __asm__ __volatile__ (" msrset r0, %0; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
: "memory");
-#else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
- : "memory", "r12");
-#endif
- }
}
-void _disable_icache(void)
+static inline void __disable_dcache_msr(void)
{
- if (cpuinfo.use_icache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrclr r0, %0; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
+ __asm__ __volatile__ (" msrclr r0, %0; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
: "memory");
-#else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_ICE) \
+}
+
+static inline void __enable_icache_nomsr(void)
+{
+ __asm__ __volatile__ (" mfs r12, rmsr; \
+ nop; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_ICE) \
: "memory", "r12");
-#endif
- }
}
-void _invalidate_icache(unsigned int addr)
+static inline void __disable_icache_nomsr(void)
{
- if (cpuinfo.use_icache) {
- __asm__ __volatile__ (" \
- wic %0, r0" \
- : \
- : "r" (addr));
- }
+ __asm__ __volatile__ (" mfs r12, rmsr; \
+ nop; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory", "r12");
}
-void _enable_dcache(void)
+static inline void __enable_dcache_nomsr(void)
{
- if (cpuinfo.use_dcache) {
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrset r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
-#else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- ori r12, r12, %0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
+ __asm__ __volatile__ (" mfs r12, rmsr; \
+ nop; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
: "memory", "r12");
-#endif
- }
}
-void _disable_dcache(void)
+static inline void __disable_dcache_nomsr(void)
{
-#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
- __asm__ __volatile__ (" \
- msrclr r0, %0; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
- : "memory");
-#else
- __asm__ __volatile__ (" \
- mfs r12, rmsr; \
- nop; \
- andi r12, r12, ~%0; \
- mts rmsr, r12; \
- nop; " \
- : \
- : "i" (MSR_DCE) \
+ __asm__ __volatile__ (" mfs r12, rmsr; \
+ nop; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
: "memory", "r12");
-#endif
}
-void _invalidate_dcache(unsigned int addr)
+
+/* Helper macro for computing the limits of cache range loops */
+#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
+do { \
+ int align = ~(cache_line_length - 1); \
+ end = min(start + cache_size, end); \
+ start &= align; \
+ end = ((end & align) + cache_line_length); \
+} while (0);
+
+/*
+ * Helper macro to loop over the specified cache_size/line_length and
+ * execute 'op' on that cacheline
+ */
+#define CACHE_ALL_LOOP(cache_size, line_length, op) \
+do { \
+ unsigned int len = cache_size; \
+ int step = -line_length; \
+ BUG_ON(step >= 0); \
+ \
+ __asm__ __volatile__ (" 1: " #op " %0, r0; \
+ bgtid %0, 1b; \
+ addk %0, %0, %1; \
+ " : : "r" (len), "r" (step) \
+ : "memory"); \
+} while (0);
+
+
+#define CACHE_ALL_LOOP2(cache_size, line_length, op) \
+do { \
+ unsigned int len = cache_size; \
+ int step = -line_length; \
+ BUG_ON(step >= 0); \
+ \
+ __asm__ __volatile__ (" 1: " #op " r0, %0; \
+ bgtid %0, 1b; \
+ addk %0, %0, %1; \
+ " : : "r" (len), "r" (step) \
+ : "memory"); \
+} while (0);
+
+/* for wdc.flush/clear */
+#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
+do { \
+ int step = -line_length; \
+ int count = end - start; \
+ BUG_ON(count <= 0); \
+ \
+ __asm__ __volatile__ (" 1: " #op " %0, %1; \
+ bgtid %1, 1b; \
+ addk %1, %1, %2; \
+ " : : "r" (start), "r" (count), \
+ "r" (step) : "memory"); \
+} while (0);
+
+/* It is used only first parameter for OP - for wic, wdc */
+#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
+do { \
+ int step = -line_length; \
+ int count = end - start; \
+ BUG_ON(count <= 0); \
+ \
+ __asm__ __volatile__ (" 1: addk %0, %0, %1; \
+ " #op " %0, r0; \
+ bgtid %1, 1b; \
+ addk %1, %1, %2; \
+ " : : "r" (start), "r" (count), \
+ "r" (step) : "memory"); \
+} while (0);
+
+static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
{
- __asm__ __volatile__ (" \
- wdc %0, r0" \
- : \
- : "r" (addr));
+ unsigned long flags;
+
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
+
+ local_irq_save(flags);
+ __disable_icache_msr();
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+
+ __enable_icache_msr();
+ local_irq_restore(flags);
}
-void __invalidate_icache_all(void)
+static void __flush_icache_range_nomsr_irq(unsigned long start,
+ unsigned long end)
{
- unsigned int i;
- unsigned flags;
+ unsigned long flags;
- if (cpuinfo.use_icache) {
- local_irq_save(flags);
- __disable_icache();
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
- /* Just loop through cache size and invalidate, no need to add
- CACHE_BASE address */
- for (i = 0; i < cpuinfo.icache_size;
- i += cpuinfo.icache_line)
- __invalidate_icache(i);
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
- __enable_icache();
- local_irq_restore(flags);
- }
+ local_irq_save(flags);
+ __disable_icache_nomsr();
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+
+ __enable_icache_nomsr();
+ local_irq_restore(flags);
}
-void __invalidate_icache_range(unsigned long start, unsigned long end)
+static void __flush_icache_range_noirq(unsigned long start,
+ unsigned long end)
{
- unsigned int i;
- unsigned flags;
- unsigned int align;
-
- if (cpuinfo.use_icache) {
- /*
- * No need to cover entire cache range,
- * just cover cache footprint
- */
- end = min(start + cpuinfo.icache_size, end);
- align = ~(cpuinfo.icache_line - 1);
- start &= align; /* Make sure we are aligned */
- /* Push end up to the next cache line */
- end = ((end & align) + cpuinfo.icache_line);
-
- local_irq_save(flags);
- __disable_icache();
-
- for (i = start; i < end; i += cpuinfo.icache_line)
- __invalidate_icache(i);
-
- __enable_icache();
- local_irq_restore(flags);
- }
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+}
+
+static void __flush_icache_all_msr_irq(void)
+{
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_icache_msr();
+
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+
+ __enable_icache_msr();
+ local_irq_restore(flags);
+}
+
+static void __flush_icache_all_nomsr_irq(void)
+{
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_icache_nomsr();
+
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+
+ __enable_icache_nomsr();
+ local_irq_restore(flags);
}
-void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
+static void __flush_icache_all_noirq(void)
{
- __invalidate_icache_all();
+ pr_debug("%s\n", __func__);
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
}
-void __invalidate_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long adr,
- int len)
+static void __invalidate_dcache_all_msr_irq(void)
{
- __invalidate_icache_all();
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_dcache_msr();
+
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+
+ __enable_dcache_msr();
+ local_irq_restore(flags);
}
-void __invalidate_cache_sigtramp(unsigned long addr)
+static void __invalidate_dcache_all_nomsr_irq(void)
{
- __invalidate_icache_range(addr, addr + 8);
+ unsigned long flags;
+
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_dcache_nomsr();
+
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+
+ __enable_dcache_nomsr();
+ local_irq_restore(flags);
}
-void __invalidate_dcache_all(void)
+static void __invalidate_dcache_all_noirq_wt(void)
{
- unsigned int i;
- unsigned flags;
-
- if (cpuinfo.use_dcache) {
- local_irq_save(flags);
- __disable_dcache();
-
- /*
- * Just loop through cache size and invalidate,
- * no need to add CACHE_BASE address
- */
- for (i = 0; i < cpuinfo.dcache_size;
- i += cpuinfo.dcache_line)
- __invalidate_dcache(i);
-
- __enable_dcache();
- local_irq_restore(flags);
- }
+ pr_debug("%s\n", __func__);
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
}
-void __invalidate_dcache_range(unsigned long start, unsigned long end)
+/* FIXME this is weird - should be only wdc but not work
+ * MS: I am getting bus errors and other weird things */
+static void __invalidate_dcache_all_wb(void)
{
+ pr_debug("%s\n", __func__);
+ CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ wdc.clear)
+
+#if 0
unsigned int i;
- unsigned flags;
- unsigned int align;
-
- if (cpuinfo.use_dcache) {
- /*
- * No need to cover entire cache range,
- * just cover cache footprint
- */
- end = min(start + cpuinfo.dcache_size, end);
- align = ~(cpuinfo.dcache_line - 1);
- start &= align; /* Make sure we are aligned */
- /* Push end up to the next cache line */
- end = ((end & align) + cpuinfo.dcache_line);
- local_irq_save(flags);
- __disable_dcache();
-
- for (i = start; i < end; i += cpuinfo.dcache_line)
- __invalidate_dcache(i);
-
- __enable_dcache();
- local_irq_restore(flags);
- }
+
+ pr_debug("%s\n", __func__);
+
+ /* Just loop through cache size and invalidate it */
+ for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
+ __invalidate_dcache(0, i);
+#endif
+}
+
+static void __invalidate_dcache_range_wb(unsigned long start,
+ unsigned long end)
+{
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+}
+
+static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
+ unsigned long end)
+{
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
}
-void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
+static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
+ unsigned long end)
{
- __invalidate_dcache_all();
+ unsigned long flags;
+
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+ local_irq_save(flags);
+ __disable_dcache_msr();
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+
+ __enable_dcache_msr();
+ local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+ local_irq_save(flags);
+ __disable_dcache_nomsr();
+
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+
+ __enable_dcache_nomsr();
+ local_irq_restore(flags);
+}
+
+static void __flush_dcache_all_wb(void)
+{
+ pr_debug("%s\n", __func__);
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ wdc.flush);
}
-void __invalidate_dcache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long adr,
- int len)
+static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
{
- __invalidate_dcache_all();
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+ CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+}
+
+/* struct for wb caches and for wt caches */
+struct scache *mbc;
+
+/* new wb cache model */
+const struct scache wb_msr = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __flush_dcache_all_wb,
+ .dflr = __flush_dcache_range_wb,
+ .din = __invalidate_dcache_all_wb,
+ .dinr = __invalidate_dcache_range_wb,
+};
+
+/* There is only difference in ie, id, de, dd functions */
+const struct scache wb_nomsr = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __flush_dcache_all_wb,
+ .dflr = __flush_dcache_range_wb,
+ .din = __invalidate_dcache_all_wb,
+ .dinr = __invalidate_dcache_range_wb,
+};
+
+/* Old wt cache model with disabling irq and turn off cache */
+const struct scache wt_msr = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_msr_irq,
+ .iflr = __flush_icache_range_msr_irq,
+ .iin = __flush_icache_all_msr_irq,
+ .iinr = __flush_icache_range_msr_irq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __invalidate_dcache_all_msr_irq,
+ .dflr = __invalidate_dcache_range_msr_irq_wt,
+ .din = __invalidate_dcache_all_msr_irq,
+ .dinr = __invalidate_dcache_range_msr_irq_wt,
+};
+
+const struct scache wt_nomsr = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_nomsr_irq,
+ .iflr = __flush_icache_range_nomsr_irq,
+ .iin = __flush_icache_all_nomsr_irq,
+ .iinr = __flush_icache_range_nomsr_irq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __invalidate_dcache_all_nomsr_irq,
+ .dflr = __invalidate_dcache_range_nomsr_irq,
+ .din = __invalidate_dcache_all_nomsr_irq,
+ .dinr = __invalidate_dcache_range_nomsr_irq,
+};
+
+/* New wt cache model for newer Microblaze versions */
+const struct scache wt_msr_noirq = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __invalidate_dcache_all_noirq_wt,
+ .dflr = __invalidate_dcache_range_nomsr_wt,
+ .din = __invalidate_dcache_all_noirq_wt,
+ .dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+const struct scache wt_nomsr_noirq = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __invalidate_dcache_all_noirq_wt,
+ .dflr = __invalidate_dcache_range_nomsr_wt,
+ .din = __invalidate_dcache_all_noirq_wt,
+ .dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
+#define CPUVER_7_20_A 0x0c
+#define CPUVER_7_20_D 0x0f
+
+#define INFO(s) printk(KERN_INFO "cache: " s " \n");
+
+void microblaze_cache_init(void)
+{
+ if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
+ if (cpuinfo.dcache_wb) {
+ INFO("wb_msr");
+ mbc = (struct scache *)&wb_msr;
+ if (cpuinfo.ver_code < CPUVER_7_20_D) {
+ /* MS: problem with signal handling - hw bug */
+ INFO("WB won't work properly");
+ }
+ } else {
+ if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+ INFO("wt_msr_noirq");
+ mbc = (struct scache *)&wt_msr_noirq;
+ } else {
+ INFO("wt_msr");
+ mbc = (struct scache *)&wt_msr;
+ }
+ }
+ } else {
+ if (cpuinfo.dcache_wb) {
+ INFO("wb_nomsr");
+ mbc = (struct scache *)&wb_nomsr;
+ if (cpuinfo.ver_code < CPUVER_7_20_D) {
+ /* MS: problem with signal handling - hw bug */
+ INFO("WB won't work properly");
+ }
+ } else {
+ if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+ INFO("wt_nomsr_noirq");
+ mbc = (struct scache *)&wt_nomsr_noirq;
+ } else {
+ INFO("wt_nomsr");
+ mbc = (struct scache *)&wt_nomsr;
+ }
+ }
+ }
}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index c259786e7fa..f72dbd66c84 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -21,8 +21,14 @@
*/
#define CI(c, p) { ci->c = PVR_##p(pvr); }
+
+#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
#define err_printk(x) \
early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
+#else
+#define err_printk(x) \
+ printk(KERN_INFO "ERROR: Microblaze " x "-different for PVR and DTS\n");
+#endif
void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
{
@@ -70,7 +76,7 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
CI(use_icache, USE_ICACHE);
CI(icache_tagbits, ICACHE_ADDR_TAG_BITS);
CI(icache_write, ICACHE_ALLOW_WR);
- CI(icache_line, ICACHE_LINE_LEN);
+ ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2;
CI(icache_size, ICACHE_BYTE_SIZE);
CI(icache_base, ICACHE_BASEADDR);
CI(icache_high, ICACHE_HIGHADDR);
@@ -78,11 +84,16 @@ void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
CI(use_dcache, USE_DCACHE);
CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS);
CI(dcache_write, DCACHE_ALLOW_WR);
- CI(dcache_line, DCACHE_LINE_LEN);
+ ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2;
CI(dcache_size, DCACHE_BYTE_SIZE);
CI(dcache_base, DCACHE_BASEADDR);
CI(dcache_high, DCACHE_HIGHADDR);
+ temp = PVR_DCACHE_USE_WRITEBACK(pvr);
+ if (ci->dcache_wb != temp)
+ err_printk("DCACHE WB");
+ ci->dcache_wb = temp;
+
CI(use_dopb, D_OPB);
CI(use_iopb, I_OPB);
CI(use_dlmb, D_LMB);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index adb448f93d5..6095aa6b5c8 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -72,12 +72,12 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
ci->use_icache = fcpu(cpu, "xlnx,use-icache");
ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits");
ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr");
- ci->icache_line = fcpu(cpu, "xlnx,icache-line-len") << 2;
- if (!ci->icache_line) {
+ ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2;
+ if (!ci->icache_line_length) {
if (fcpu(cpu, "xlnx,icache-use-fsl"))
- ci->icache_line = 4 << 2;
+ ci->icache_line_length = 4 << 2;
else
- ci->icache_line = 1 << 2;
+ ci->icache_line_length = 1 << 2;
}
ci->icache_size = fcpu(cpu, "i-cache-size");
ci->icache_base = fcpu(cpu, "i-cache-baseaddr");
@@ -86,16 +86,17 @@ void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
ci->use_dcache = fcpu(cpu, "xlnx,use-dcache");
ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag");
ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr");
- ci->dcache_line = fcpu(cpu, "xlnx,dcache-line-len") << 2;
- if (!ci->dcache_line) {
+ ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2;
+ if (!ci->dcache_line_length) {
if (fcpu(cpu, "xlnx,dcache-use-fsl"))
- ci->dcache_line = 4 << 2;
+ ci->dcache_line_length = 4 << 2;
else
- ci->dcache_line = 1 << 2;
+ ci->dcache_line_length = 1 << 2;
}
ci->dcache_size = fcpu(cpu, "d-cache-size");
ci->dcache_base = fcpu(cpu, "d-cache-baseaddr");
ci->dcache_high = fcpu(cpu, "d-cache-highaddr");
+ ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback");
ci->use_dopb = fcpu(cpu, "xlnx,d-opb");
ci->use_iopb = fcpu(cpu, "xlnx,i-opb");
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 3539babc1c1..991d71311b0 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -29,11 +29,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"7.20.a", 0x0c},
{"7.20.b", 0x0d},
{"7.20.c", 0x0e},
- /* FIXME There is no keycode defined in MBV for these versions */
- {"2.10.a", 0x10},
- {"3.00.a", 0x20},
- {"4.00.a", 0x30},
- {"4.00.b", 0x40},
+ {"7.20.d", 0x0f},
+ {"7.30.a", 0x10},
{NULL, 0},
};
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
index 4dcfccdbc36..0c912b2a8e0 100644
--- a/arch/microblaze/kernel/cpu/mb.c
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -103,11 +103,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
else
count += seq_printf(m, "Icache:\t\tno\n");
- if (cpuinfo.use_dcache)
+ if (cpuinfo.use_dcache) {
count += seq_printf(m,
"Dcache:\t\t%ukB\n",
cpuinfo.dcache_size >> 10);
- else
+ if (cpuinfo.dcache_wb)
+ count += seq_printf(m, "\t\twrite-back\n");
+ else
+ count += seq_printf(m, "\t\twrite-through\n");
+ } else
count += seq_printf(m, "Dcache:\t\tno\n");
count += seq_printf(m,
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
index c9a4340ddd5..9bee9382bf7 100644
--- a/arch/microblaze/kernel/cpu/pvr.c
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -45,7 +45,7 @@
int cpu_has_pvr(void)
{
- unsigned flags;
+ unsigned long flags;
unsigned pvr0;
local_save_flags(flags);
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 9083d85376a..95b0855802d 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -208,8 +208,6 @@ ENTRY(_user_exception)
lwi r1, r1, TS_THREAD_INFO /* get the thread info */
/* calculate kernel stack pointer */
addik r1, r1, THREAD_SIZE - PT_SIZE
- swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
- lwi r11, r0, PER_CPU(KM) /* load mode indicator */
2:
swi r11, r1, PT_MODE /* store the mode */
lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index e3ecb36dd55..3bad4ff4947 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -31,6 +31,8 @@
#include <linux/errno.h>
#include <asm/signal.h>
+#undef DEBUG
+
/* The size of a state save frame. */
#define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
@@ -352,10 +354,12 @@ C_ENTRY(_user_exception):
add r12, r12, r12; /* convert num -> ptr */
add r12, r12, r12;
+#ifdef DEBUG
/* Trac syscalls and stored them to r0_ram */
lwi r3, r12, 0x400 + r0_ram
addi r3, r3, 1
swi r3, r12, 0x400 + r0_ram
+#endif
# Find and jump into the syscall handler.
lwi r12, r12, sys_call_table
@@ -496,17 +500,6 @@ C_ENTRY(sys_execve):
brid microblaze_execve; /* Do real work (tail-call).*/
nop;
-C_ENTRY(sys_rt_sigsuspend_wrapper):
- swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- swi r4, r1, PTO+PT_R4;
- la r7, r1, PTO; /* add user context as 3rd arg */
- brlid r15, sys_rt_sigsuspend; /* Do real work.*/
- nop;
- lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- lwi r4, r1, PTO+PT_R4;
- bri ret_from_trap /* fall through will not work here due to align */
- nop;
-
C_ENTRY(sys_rt_sigreturn_wrapper):
swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
swi r4, r1, PTO+PT_R4;
@@ -711,15 +704,11 @@ C_ENTRY(ret_from_exc):
* (in a possibly modified form) after do_signal returns.
* store return registers separately because this macros is use
* for others exceptions */
- swi r3, r1, PTO + PT_R3;
- swi r4, r1, PTO + PT_R4;
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
nop;
- lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
- lwi r4, r1, PTO+PT_R4;
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
new file mode 100644
index 00000000000..388b31ca65a
--- /dev/null
+++ b/arch/microblaze/kernel/ftrace.c
@@ -0,0 +1,237 @@
+/*
+ * Ftrace support for Microblaze.
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * Based on MIPS and PowerPC ftrace code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ int faulted, err;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ asm volatile(" 1: lwi %0, %2, 0; \
+ 2: swi %3, %2, 0; \
+ addik %1, r0, 0; \
+ 3: \
+ .section .fixup, \"ax\"; \
+ 4: brid 3b; \
+ addik %1, r0, 1; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,4b; \
+ .word 2b,4b; \
+ .previous;" \
+ : "=&r" (old), "=r" (faulted)
+ : "r" (parent), "r" (return_hooker)
+ );
+
+ if (unlikely(faulted)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
+ if (err == -EBUSY) {
+ *parent = old;
+ return;
+ }
+
+ trace.func = self_addr;
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent = old;
+ }
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* save value to addr - it is save to do it in asm */
+static int ftrace_modify_code(unsigned long addr, unsigned int value)
+{
+ int faulted = 0;
+
+ __asm__ __volatile__(" 1: swi %2, %1, 0; \
+ addik %0, r0, 0; \
+ 2: \
+ .section .fixup, \"ax\"; \
+ 3: brid 2b; \
+ addik %0, r0, 1; \
+ .previous; \
+ .section __ex_table,\"a\"; \
+ .word 1b,3b; \
+ .previous;" \
+ : "=r" (faulted)
+ : "r" (addr), "r" (value)
+ );
+
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define MICROBLAZE_NOP 0x80000000
+#define MICROBLAZE_BRI 0xb800000C
+
+static unsigned int recorded; /* if save was or not */
+static unsigned int imm; /* saving whole imm instruction */
+
+/* There are two approaches howto solve ftrace_make nop function - look below */
+#undef USE_FTRACE_NOP
+
+#ifdef USE_FTRACE_NOP
+static unsigned int bralid; /* saving whole bralid instruction */
+#endif
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ /* we have this part of code which we are working with
+ * b000c000 imm -16384
+ * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
+ * 80000000 or r0, r0, r0
+ *
+ * The first solution (!USE_FTRACE_NOP-could be called branch solution)
+ * b000c000 bri 12 (0xC - jump to any other instruction)
+ * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
+ * 80000000 or r0, r0, r0
+ * any other instruction
+ *
+ * The second solution (USE_FTRACE_NOP) - no jump just nops
+ * 80000000 or r0, r0, r0
+ * 80000000 or r0, r0, r0
+ * 80000000 or r0, r0, r0
+ */
+ int ret = 0;
+
+ if (recorded == 0) {
+ recorded = 1;
+ imm = *(unsigned int *)rec->ip;
+ pr_debug("%s: imm:0x%x\n", __func__, imm);
+#ifdef USE_FTRACE_NOP
+ bralid = *(unsigned int *)(rec->ip + 4);
+ pr_debug("%s: bralid 0x%x\n", __func__, bralid);
+#endif /* USE_FTRACE_NOP */
+ }
+
+#ifdef USE_FTRACE_NOP
+ ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP);
+ ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP);
+#else /* USE_FTRACE_NOP */
+ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI);
+#endif /* USE_FTRACE_NOP */
+ return ret;
+}
+
+static int ret_addr; /* initialized as 0 by default */
+
+/* I believe that first is called ftrace_make_nop before this function */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ int ret;
+ ret_addr = addr; /* saving where the barrier jump is */
+ pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
+ __func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
+ ret = ftrace_modify_code(rec->ip, imm);
+#ifdef USE_FTRACE_NOP
+ pr_debug("%s: bralid:0x%x\n", __func__, bralid);
+ ret += ftrace_modify_code(rec->ip + 4, bralid);
+#endif /* USE_FTRACE_NOP */
+ return ret;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ /* The return code is retured via data */
+ *(unsigned long *)data = 0;
+
+ return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned int upper = (unsigned int)func;
+ unsigned int lower = (unsigned int)func;
+ int ret = 0;
+
+ /* create proper saving to ftrace_call poll */
+ upper = 0xb0000000 + (upper >> 16); /* imm func_upper */
+ lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */
+
+ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n",
+ __func__, (unsigned int)func, (unsigned int)ip, upper, lower);
+
+ /* save upper and lower code */
+ ret = ftrace_modify_code(ip, upper);
+ ret += ftrace_modify_code(ip + 4, lower);
+
+ /* We just need to remove the rtsd r15, 8 by NOP */
+ BUG_ON(!ret_addr);
+ if (ret_addr)
+ ret += ftrace_modify_code(ret_addr, MICROBLAZE_NOP);
+ else
+ ret = 1; /* fault */
+
+ /* All changes are done - lets do caches consistent */
+ flush_icache();
+ return ret;
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+unsigned int old_jump; /* saving place for jump instruction */
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned int ret;
+ unsigned long ip = (unsigned long)(&ftrace_call_graph);
+
+ old_jump = *(unsigned int *)ip; /* save jump over instruction */
+ ret = ftrace_modify_code(ip, MICROBLAZE_NOP);
+ flush_icache();
+
+ pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump);
+ return ret;
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned int ret;
+ unsigned long ip = (unsigned long)(&ftrace_call_graph);
+
+ ret = ftrace_modify_code(ip, old_jump);
+ flush_icache();
+
+ pr_debug("%s\n", __func__);
+ return ret;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
index 1bdf20222b9..522751737cf 100644
--- a/arch/microblaze/kernel/heartbeat.c
+++ b/arch/microblaze/kernel/heartbeat.c
@@ -45,6 +45,7 @@ void heartbeat(void)
void setup_heartbeat(void)
{
struct device_node *gpio = NULL;
+ int *prop;
int j;
char *gpio_list[] = {
"xlnx,xps-gpio-1.00.a",
@@ -58,10 +59,14 @@ void setup_heartbeat(void)
break;
}
- base_addr = *(int *) of_get_property(gpio, "reg", NULL);
- base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
- printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr);
+ if (gpio) {
+ base_addr = *(int *) of_get_property(gpio, "reg", NULL);
+ base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
+ printk(KERN_NOTICE "Heartbeat GPIO at 0x%x\n", base_addr);
- if (*(int *) of_get_property(gpio, "xlnx,is-bidir", NULL))
- out_be32(base_addr + 4, 0); /* GPIO is configured as output */
+ /* GPIO is configured as output */
+ prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
+ if (prop)
+ out_be32(base_addr + 4, 0);
+ }
}
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 6eea6f92b84..03172c1da77 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -42,8 +42,16 @@ unsigned int nr_irq;
static void intc_enable_or_unmask(unsigned int irq)
{
+ unsigned long mask = 1 << irq;
pr_debug("enable_or_unmask: %d\n", irq);
- out_be32(INTC_BASE + SIE, 1 << irq);
+ out_be32(INTC_BASE + SIE, mask);
+
+ /* ack level irqs because they can't be acked during
+ * ack function since the handle_level_irq function
+ * acks the irq before calling the interrupt handler
+ */
+ if (irq_desc[irq].status & IRQ_LEVEL)
+ out_be32(INTC_BASE + IAR, mask);
}
static void intc_disable_or_mask(unsigned int irq)
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 7d5ddd62d4d..0f06034d1fe 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < nr_irq) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
new file mode 100644
index 00000000000..e7eaa7a8cbd
--- /dev/null
+++ b/arch/microblaze/kernel/mcount.S
@@ -0,0 +1,170 @@
+/*
+ * Low-level ftrace handling
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/linkage.h>
+
+#define NOALIGN_ENTRY(name) .globl name; name:
+
+/* FIXME MS: I think that I don't need to save all regs */
+#define SAVE_REGS \
+ addik r1, r1, -120; \
+ swi r2, r1, 4; \
+ swi r3, r1, 8; \
+ swi r4, r1, 12; \
+ swi r5, r1, 116; \
+ swi r6, r1, 16; \
+ swi r7, r1, 20; \
+ swi r8, r1, 24; \
+ swi r9, r1, 28; \
+ swi r10, r1, 32; \
+ swi r11, r1, 36; \
+ swi r12, r1, 40; \
+ swi r13, r1, 44; \
+ swi r14, r1, 48; \
+ swi r16, r1, 52; \
+ swi r17, r1, 56; \
+ swi r18, r1, 60; \
+ swi r19, r1, 64; \
+ swi r20, r1, 68; \
+ swi r21, r1, 72; \
+ swi r22, r1, 76; \
+ swi r23, r1, 80; \
+ swi r24, r1, 84; \
+ swi r25, r1, 88; \
+ swi r26, r1, 92; \
+ swi r27, r1, 96; \
+ swi r28, r1, 100; \
+ swi r29, r1, 104; \
+ swi r30, r1, 108; \
+ swi r31, r1, 112;
+
+#define RESTORE_REGS \
+ lwi r2, r1, 4; \
+ lwi r3, r1, 8; \
+ lwi r4, r1, 12; \
+ lwi r5, r1, 116; \
+ lwi r6, r1, 16; \
+ lwi r7, r1, 20; \
+ lwi r8, r1, 24; \
+ lwi r9, r1, 28; \
+ lwi r10, r1, 32; \
+ lwi r11, r1, 36; \
+ lwi r12, r1, 40; \
+ lwi r13, r1, 44; \
+ lwi r14, r1, 48; \
+ lwi r16, r1, 52; \
+ lwi r17, r1, 56; \
+ lwi r18, r1, 60; \
+ lwi r19, r1, 64; \
+ lwi r20, r1, 68; \
+ lwi r21, r1, 72; \
+ lwi r22, r1, 76; \
+ lwi r23, r1, 80; \
+ lwi r24, r1, 84; \
+ lwi r25, r1, 88; \
+ lwi r26, r1, 92; \
+ lwi r27, r1, 96; \
+ lwi r28, r1, 100; \
+ lwi r29, r1, 104; \
+ lwi r30, r1, 108; \
+ lwi r31, r1, 112; \
+ addik r1, r1, 120;
+
+ENTRY(ftrace_stub)
+ rtsd r15, 8;
+ nop;
+
+ENTRY(_mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller)
+ /* MS: It is just barrier which is removed from C code */
+ rtsd r15, 8
+ nop
+#endif /* CONFIG_DYNAMIC_FTRACE */
+ SAVE_REGS
+ swi r15, r1, 0;
+ /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */
+ lwi r5, r0, function_trace_stop;
+ bneid r5, end;
+ nop;
+ /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef CONFIG_DYNAMIC_FTRACE
+ lwi r5, r0, ftrace_graph_return;
+ addik r6, r0, ftrace_stub; /* asm implementation */
+ cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */
+ beqid r5, end_graph_tracer;
+ nop;
+
+ lwi r6, r0, ftrace_graph_entry;
+ addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */
+ cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */
+ beqid r5, end_graph_tracer;
+ nop;
+#else /* CONFIG_DYNAMIC_FTRACE */
+NOALIGN_ENTRY(ftrace_call_graph)
+ /* MS: jump over graph function - replaced from C code */
+ bri end_graph_tracer
+#endif /* CONFIG_DYNAMIC_FTRACE */
+ addik r5, r1, 120; /* MS: load parent addr */
+ addik r6, r15, 0; /* MS: load current function addr */
+ bralid r15, prepare_ftrace_return;
+ nop;
+ /* MS: graph was taken that's why - can jump over function trace */
+ brid end;
+ nop;
+end_graph_tracer:
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#ifndef CONFIG_DYNAMIC_FTRACE
+ /* MS: test function trace if is taken or not */
+ lwi r20, r0, ftrace_trace_function;
+ addik r6, r0, ftrace_stub;
+ cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */
+ beqid r5, end; /* MS: not taken -> jump over */
+ nop;
+#else /* CONFIG_DYNAMIC_FTRACE */
+NOALIGN_ENTRY(ftrace_call)
+/* instruction for setup imm FUNC_part1, addik r20, r0, FUNC_part2 */
+ nop
+ nop
+#endif /* CONFIG_DYNAMIC_FTRACE */
+/* static normal trace */
+ lwi r6, r1, 120; /* MS: load parent addr */
+ addik r5, r15, 0; /* MS: load current function addr */
+ /* MS: here is dependency on previous code */
+ brald r15, r20; /* MS: jump to ftrace handler */
+ nop;
+end:
+ lwi r15, r1, 0;
+ RESTORE_REGS
+
+ rtsd r15, 8; /* MS: jump back */
+ nop;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(return_to_handler)
+ nop; /* MS: just barrier for rtsd r15, 8 */
+ nop;
+ SAVE_REGS
+ swi r15, r1, 0;
+
+ /* MS: find out returning address */
+ bralid r15, ftrace_return_to_handler;
+ nop;
+
+ /* MS: return value from ftrace_return_to_handler is my returning addr
+ * must be before restore regs because I have to restore r3 content */
+ addik r15, r3, 0;
+ RESTORE_REGS
+
+ rtsd r15, 8; /* MS: jump back */
+ nop;
+#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index 59ff20e33e0..bc4dcb7d386 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <asm/page.h>
#include <asm/system.h>
+#include <linux/ftrace.h>
#include <linux/uaccess.h>
/*
@@ -47,3 +48,7 @@ extern void __umodsi3(void);
EXPORT_SYMBOL(__umodsi3);
extern char *_ebss;
EXPORT_SYMBOL_GPL(_ebss);
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index c592d475b3d..812f1bf06c9 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
void show_regs(struct pt_regs *regs)
{
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
new file mode 100644
index 00000000000..a1721a33042
--- /dev/null
+++ b/arch/microblaze/kernel/reset.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <asm/prom.h>
+
+/* Trigger specific functions */
+#ifdef CONFIG_GPIOLIB
+
+#include <linux/of_gpio.h>
+
+static int handle; /* reset pin handle */
+static unsigned int reset_val;
+
+static int of_reset_gpio_handle(void)
+{
+ int ret; /* variable which stored handle reset gpio pin */
+ struct device_node *root; /* root node */
+ struct device_node *gpio; /* gpio node */
+ struct of_gpio_chip *of_gc = NULL;
+ enum of_gpio_flags flags ;
+ const void *gpio_spec;
+
+ /* find out root node */
+ root = of_find_node_by_path("/");
+
+ /* give me handle for gpio node to be possible allocate pin */
+ ret = of_parse_phandles_with_args(root, "hard-reset-gpios",
+ "#gpio-cells", 0, &gpio, &gpio_spec);
+ if (ret) {
+ pr_debug("%s: can't parse gpios property\n", __func__);
+ goto err0;
+ }
+
+ of_gc = gpio->data;
+ if (!of_gc) {
+ pr_debug("%s: gpio controller %s isn't registered\n",
+ root->full_name, gpio->full_name);
+ ret = -ENODEV;
+ goto err1;
+ }
+
+ ret = of_gc->xlate(of_gc, root, gpio_spec, &flags);
+ if (ret < 0)
+ goto err1;
+
+ ret += of_gc->gc.base;
+err1:
+ of_node_put(gpio);
+err0:
+ pr_debug("%s exited with status %d\n", __func__, ret);
+ return ret;
+}
+
+void of_platform_reset_gpio_probe(void)
+{
+ int ret;
+ handle = of_reset_gpio_handle();
+
+ if (!gpio_is_valid(handle)) {
+ printk(KERN_INFO "Skipping unavailable RESET gpio %d (%s)\n",
+ handle, "reset");
+ }
+
+ ret = gpio_request(handle, "reset");
+ if (ret < 0) {
+ printk(KERN_INFO "GPIO pin is already allocated\n");
+ return;
+ }
+
+ /* get current setup value */
+ reset_val = gpio_get_value(handle);
+ /* FIXME maybe worth to perform any action */
+ pr_debug("Reset: Gpio output state: 0x%x\n", reset_val);
+
+ /* Setup GPIO as output */
+ ret = gpio_direction_output(handle, 0);
+ if (ret < 0)
+ goto err;
+
+ /* Setup output direction */
+ gpio_set_value(handle, 0);
+
+ printk(KERN_INFO "RESET: Registered gpio device: %d, current val: %d\n",
+ handle, reset_val);
+ return;
+err:
+ gpio_free(handle);
+ return;
+}
+
+
+static void gpio_system_reset(void)
+{
+ gpio_set_value(handle, 1 - reset_val);
+}
+#else
+#define gpio_system_reset() do {} while (0)
+void of_platform_reset_gpio_probe(void)
+{
+ return;
+}
+#endif
+
+void machine_restart(char *cmd)
+{
+ printk(KERN_NOTICE "Machine restart...\n");
+ gpio_system_reset();
+ dump_stack();
+ while (1)
+ ;
+}
+
+void machine_shutdown(void)
+{
+ printk(KERN_NOTICE "Machine shutdown...\n");
+ while (1)
+ ;
+}
+
+void machine_halt(void)
+{
+ printk(KERN_NOTICE "Machine halt...\n");
+ while (1)
+ ;
+}
+
+void machine_power_off(void)
+{
+ printk(KERN_NOTICE "Machine power off...\n");
+ while (1)
+ ;
+}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 8c1e0f4dcf1..5372b24ad04 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -52,13 +52,12 @@ void __init setup_arch(char **cmdline_p)
/* irq_early_init(); */
setup_cpuinfo();
- __invalidate_icache_all();
- __enable_icache();
+ microblaze_cache_init();
- __invalidate_dcache_all();
- __enable_dcache();
+ enable_dcache();
- panic_timeout = 120;
+ invalidate_icache();
+ enable_icache();
setup_memory();
@@ -131,6 +130,8 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
strlcpy(cmd_line, cmdline, COMMAND_LINE_SIZE);
#endif
+ lockdep_init();
+
/* initialize device tree for usage in early_printk */
early_init_devtree((void *)_fdt_start);
@@ -186,32 +187,3 @@ static int microblaze_debugfs_init(void)
}
arch_initcall(microblaze_debugfs_init);
#endif
-
-void machine_restart(char *cmd)
-{
- printk(KERN_NOTICE "Machine restart...\n");
- dump_stack();
- while (1)
- ;
-}
-
-void machine_shutdown(void)
-{
- printk(KERN_NOTICE "Machine shutdown...\n");
- while (1)
- ;
-}
-
-void machine_halt(void)
-{
- printk(KERN_NOTICE "Machine halt...\n");
- while (1)
- ;
-}
-
-void machine_power_off(void)
-{
- printk(KERN_NOTICE "Machine power off...\n");
- while (1)
- ;
-}
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 1c80e4fc40c..d8d3bb396cd 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -44,7 +44,6 @@
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset, int in_sycall);
-
asmlinkage long
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs)
@@ -176,6 +175,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
struct rt_sigframe __user *frame;
int err = 0;
int signal;
+ unsigned long address = 0;
+#ifdef CONFIG_MMU
+ pmd_t *pmdp;
+ pte_t *ptep;
+#endif
frame = get_sigframe(ka, regs, sizeof(*frame));
@@ -216,8 +220,29 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
Negative 8 offset because return is rtsd r15, 8 */
regs->r15 = ((unsigned long)frame->tramp)-8;
- __invalidate_cache_sigtramp((unsigned long)frame->tramp);
-
+ address = ((unsigned long)frame->tramp);
+#ifdef CONFIG_MMU
+ pmdp = pmd_offset(pud_offset(
+ pgd_offset(current->mm, address),
+ address), address);
+
+ preempt_disable();
+ ptep = pte_offset_map(pmdp, address);
+ if (pte_present(*ptep)) {
+ address = (unsigned long) page_address(pte_page(*ptep));
+ /* MS: I need add offset in page */
+ address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
+ /* MS address is virtual */
+ address = virt_to_phys(address);
+ invalidate_icache_range(address, address + 8);
+ flush_dcache_range(address, address + 8);
+ }
+ pte_unmap(ptep);
+ preempt_enable();
+#else
+ flush_icache_range(address, address + 8);
+ flush_dcache_range(address, address + 8);
+#endif
if (err)
goto give_sigsegv;
@@ -233,6 +258,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
set_fs(USER_DS);
+ /* the tracer may want to single-step inside the handler */
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
#ifdef DEBUG_SIG
printk(KERN_INFO "SIG deliver (%s:%d): sp=%p pc=%08lx\n",
current->comm, current->pid, frame, regs->pc);
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c
new file mode 100644
index 00000000000..123692f2264
--- /dev/null
+++ b/arch/microblaze/kernel/stacktrace.c
@@ -0,0 +1,65 @@
+/*
+ * Stack trace support for Microblaze.
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+
+/* FIXME initial support */
+void save_stack_trace(struct stack_trace *trace)
+{
+ unsigned long *sp;
+ unsigned long addr;
+ asm("addik %0, r1, 0" : "=r" (sp));
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = addr;
+
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ unsigned int *sp;
+ unsigned long addr;
+
+ struct thread_info *ti = task_thread_info(tsk);
+
+ if (tsk == current)
+ asm("addik %0, r1, 0" : "=r" (sp));
+ else
+ sp = (unsigned int *)ti->cpu_context.r1;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = addr;
+
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 07cabed4b94..9f3c205fb75 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -62,46 +62,14 @@ out:
return error;
}
-asmlinkage long
-sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- struct file *file = NULL;
- int ret = -EBADF;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file) {
- printk(KERN_INFO "no fd in mmap\r\n");
- goto out;
- }
- }
-
- down_write(&current->mm->mmap_sem);
- ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
-out:
- return ret;
-}
-
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t pgoff)
{
- int err = -EINVAL;
-
- if (pgoff & ~PAGE_MASK) {
- printk(KERN_INFO "no pagemask in mmap\r\n");
- goto out;
- }
+ if (pgoff & ~PAGE_MASK)
+ return -EINVAL;
- err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
-out:
- return err;
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
}
/*
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index c1ab1dc1089..4088be7d4e2 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -183,7 +183,7 @@ ENTRY(sys_call_table)
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend_wrapper
+ .long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown
@@ -196,7 +196,7 @@ ENTRY(sys_call_table)
.long sys_ni_syscall /* reserved for streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap2 /* mmap2 */
+ .long sys_mmap_pgoff /* mmap2 */
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
@@ -303,7 +303,7 @@ ENTRY(sys_call_table)
.long sys_mkdirat
.long sys_mknodat
.long sys_fchownat
- .long sys_ni_syscall
+ .long sys_futimesat
.long sys_fstatat64 /* 300 */
.long sys_unlinkat
.long sys_renameat
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 5499deae7fa..ed61b2f1771 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -183,6 +183,31 @@ static cycle_t microblaze_read(struct clocksource *cs)
return (cycle_t) (in_be32(TIMER_BASE + TCR1));
}
+static struct timecounter microblaze_tc = {
+ .cc = NULL,
+};
+
+static cycle_t microblaze_cc_read(const struct cyclecounter *cc)
+{
+ return microblaze_read(NULL);
+}
+
+static struct cyclecounter microblaze_cc = {
+ .read = microblaze_cc_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .shift = 24,
+};
+
+int __init init_microblaze_timecounter(void)
+{
+ microblaze_cc.mult = div_sc(cpuinfo.cpu_clock_freq, NSEC_PER_SEC,
+ microblaze_cc.shift);
+
+ timecounter_init(&microblaze_tc, &microblaze_cc, sched_clock());
+
+ return 0;
+}
+
static struct clocksource clocksource_microblaze = {
.name = "microblaze_clocksource",
.rating = 300,
@@ -204,6 +229,9 @@ static int __init microblaze_clocksource_init(void)
out_be32(TIMER_BASE + TCSR1, in_be32(TIMER_BASE + TCSR1) & ~TCSR_ENT);
/* start timer1 - up counting without interrupt */
out_be32(TIMER_BASE + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT);
+
+ /* register timecounter - for ftrace support */
+ init_microblaze_timecounter();
return 0;
}
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index e704188d785..5ef619aad63 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -26,11 +26,12 @@ SECTIONS {
_stext = . ;
*(.text .text.*)
*(.fixup)
- EXIT_TEXT
- EXIT_CALL
+ EXIT_TEXT
+ EXIT_CALL
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
. = ALIGN (4) ;
_etext = . ;
}
@@ -86,6 +87,7 @@ SECTIONS {
_KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ;
}
+ . = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c
index 8eb9df5a26c..a853fe089c4 100644
--- a/arch/microblaze/lib/uaccess.c
+++ b/arch/microblaze/lib/uaccess.c
@@ -39,3 +39,10 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
__do_strncpy_from_user(dst, src, count, res);
return res;
}
+
+unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size)
+{
+ memcpy(to, from, size);
+ return 0;
+}
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a44892e7cd5..a57cedf3671 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -41,6 +41,7 @@ char *klimit = _end;
* have available.
*/
unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
unsigned long memory_end; /* due to mm/nommu.c */
unsigned long memory_size;
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 46c4ca5d15c..2820081b21a 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -144,7 +144,6 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
- /* spin_lock(&init_mm.page_table_lock); */
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va);
/* Use middle 10 bits of VA to index the second-level map */
@@ -158,9 +157,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
if (mem_init_done)
flush_HPTE(0, va, pmd_val(*pd));
/* flush_HPTE(0, va, pg); */
-
}
- /* spin_unlock(&init_mm.page_table_lock); */
return err;
}
@@ -182,12 +179,6 @@ void __init adjust_total_lowmem(void)
#endif
}
-static void show_tmem(unsigned long tmem)
-{
- volatile unsigned long a;
- a = a + tmem;
-}
-
/*
* Map in all of physical memory starting at CONFIG_KERNEL_START.
*/
@@ -197,7 +188,6 @@ void __init mapin_ram(void)
v = CONFIG_KERNEL_START;
p = memory_start;
- show_tmem(memory_size);
for (s = 0; s < memory_size; s += PAGE_SIZE) {
f = _PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_SHARED | _PAGE_HWEXEC;
diff --git a/arch/microblaze/oprofile/Makefile b/arch/microblaze/oprofile/Makefile
new file mode 100644
index 00000000000..0d0348c8af9
--- /dev/null
+++ b/arch/microblaze/oprofile/Makefile
@@ -0,0 +1,13 @@
+#
+# arch/microblaze/oprofile/Makefile
+#
+
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) microblaze_oprofile.o
diff --git a/arch/microblaze/oprofile/microblaze_oprofile.c b/arch/microblaze/oprofile/microblaze_oprofile.c
new file mode 100644
index 00000000000..def17e59888
--- /dev/null
+++ b/arch/microblaze/oprofile/microblaze_oprofile.c
@@ -0,0 +1,22 @@
+/*
+ * Microblaze oprofile code
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ return -1;
+}
+
+void oprofile_arch_exit(void)
+{
+}
diff --git a/arch/microblaze/platform/Kconfig.platform b/arch/microblaze/platform/Kconfig.platform
index 8e9b4752d3f..669c7eec293 100644
--- a/arch/microblaze/platform/Kconfig.platform
+++ b/arch/microblaze/platform/Kconfig.platform
@@ -53,31 +53,12 @@ config OPT_LIB_FUNCTION
config OPT_LIB_ASM
bool "Optimalized lib function ASM"
- depends on OPT_LIB_FUNCTION
+ depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
default n
help
Allows turn on optimalized library function (memcpy and memmove).
Function are written in asm code.
-# This is still a bit broken - disabling for now JW 20070504
-config ALLOW_EDIT_AUTO
- bool "Permit Display/edit of Kconfig.auto platform settings"
- default n
- help
- Allows the editing of auto-generated platform settings from
- the Kconfig.auto file. Obviously this does not change the
- underlying hardware, so be very careful if you go editing
- these settings.
-
- Also, if you enable this, and edit various Kconfig.auto
- settings, YOUR CHANGES WILL BE LOST if you then disable it
- again. You have been warned!
-
- If unsure, say no.
-
-comment "Automatic platform settings from Kconfig.auto"
- depends on ALLOW_EDIT_AUTO
-
if PLATFORM_GENERIC=y
source "arch/microblaze/platform/generic/Kconfig.auto"
endif
diff --git a/arch/microblaze/platform/generic/Kconfig.auto b/arch/microblaze/platform/generic/Kconfig.auto
index fbca22d9c8b..5d86fc19029 100644
--- a/arch/microblaze/platform/generic/Kconfig.auto
+++ b/arch/microblaze/platform/generic/Kconfig.auto
@@ -21,7 +21,6 @@
# Definitions for MICROBLAZE0
comment "Definitions for MICROBLAZE0"
- depends on ALLOW_EDIT_AUTO
config KERNEL_BASE_ADDR
hex "Physical address where Linux Kernel is"
@@ -30,33 +29,33 @@ config KERNEL_BASE_ADDR
BASE Address for kernel
config XILINX_MICROBLAZE0_FAMILY
- string "Targetted FPGA family" if ALLOW_EDIT_AUTO
+ string "Targetted FPGA family"
default "virtex5"
config XILINX_MICROBLAZE0_USE_MSR_INSTR
- int "USE_MSR_INSTR range (0:1)" if ALLOW_EDIT_AUTO
- default 1
+ int "USE_MSR_INSTR range (0:1)"
+ default 0
config XILINX_MICROBLAZE0_USE_PCMP_INSTR
- int "USE_PCMP_INSTR range (0:1)" if ALLOW_EDIT_AUTO
- default 1
+ int "USE_PCMP_INSTR range (0:1)"
+ default 0
config XILINX_MICROBLAZE0_USE_BARREL
- int "USE_BARREL range (0:1)" if ALLOW_EDIT_AUTO
- default 1
+ int "USE_BARREL range (0:1)"
+ default 0
config XILINX_MICROBLAZE0_USE_DIV
- int "USE_DIV range (0:1)" if ALLOW_EDIT_AUTO
- default 1
+ int "USE_DIV range (0:1)"
+ default 0
config XILINX_MICROBLAZE0_USE_HW_MUL
- int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)" if ALLOW_EDIT_AUTO
- default 2
+ int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)"
+ default 0
config XILINX_MICROBLAZE0_USE_FPU
- int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)" if ALLOW_EDIT_AUTO
- default 2
+ int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)"
+ default 0
config XILINX_MICROBLAZE0_HW_VER
- string "Core version number" if ALLOW_EDIT_AUTO
+ string "Core version number"
default 7.10.d
diff --git a/arch/microblaze/platform/generic/system.dts b/arch/microblaze/platform/generic/system.dts
index 29993f62b30..2d5c41767cd 100644
--- a/arch/microblaze/platform/generic/system.dts
+++ b/arch/microblaze/platform/generic/system.dts
@@ -32,11 +32,16 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,microblaze";
+ hard-reset-gpios = <&LEDs_8Bit 2 1>;
model = "testing";
DDR2_SDRAM: memory@90000000 {
device_type = "memory";
reg = < 0x90000000 0x10000000 >;
} ;
+ aliases {
+ ethernet0 = &Hard_Ethernet_MAC;
+ serial0 = &RS232_Uart_1;
+ } ;
chosen {
bootargs = "console=ttyUL0,115200 highres=on";
linux,stdout-path = "/plb@0/serial@84000000";
@@ -127,7 +132,7 @@
mb_plb: plb@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "xlnx,plb-v46-1.03.a", "simple-bus";
+ compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
ranges ;
FLASH: flash@a0000000 {
bank-width = <2>;
@@ -214,12 +219,12 @@
#size-cells = <1>;
compatible = "xlnx,compound";
ethernet@81c00000 {
- compatible = "xlnx,xps-ll-temac-1.01.b";
+ compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
device_type = "network";
interrupt-parent = <&xps_intc_0>;
interrupts = < 5 2 >;
llink-connected = <&PIM3>;
- local-mac-address = [ 02 00 00 00 00 00 ];
+ local-mac-address = [ 00 0a 35 00 00 00 ];
reg = < 0x81c00000 0x40 >;
xlnx,bus2core-clk-ratio = <0x1>;
xlnx,phy-type = <0x1>;
@@ -261,6 +266,33 @@
xlnx,is-dual = <0x0>;
xlnx,tri-default = <0xffffffff>;
xlnx,tri-default-2 = <0xffffffff>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ } ;
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ heartbeat {
+ label = "Heartbeat";
+ gpios = <&LEDs_8Bit 4 1>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ yellow {
+ label = "Yellow";
+ gpios = <&LEDs_8Bit 5 1>;
+ };
+
+ red {
+ label = "Red";
+ gpios = <&LEDs_8Bit 6 1>;
+ };
+
+ green {
+ label = "Green";
+ gpios = <&LEDs_8Bit 7 1>;
+ };
} ;
RS232_Uart_1: serial@84000000 {
clock-frequency = <125000000>;
diff --git a/arch/microblaze/platform/platform.c b/arch/microblaze/platform/platform.c
index 56e0234fa34..5b89b58c5ae 100644
--- a/arch/microblaze/platform/platform.c
+++ b/arch/microblaze/platform/platform.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/prom.h>
+#include <asm/setup.h>
static struct of_device_id xilinx_of_bus_ids[] __initdata = {
{ .compatible = "simple-bus", },
@@ -26,6 +27,7 @@ static struct of_device_id xilinx_of_bus_ids[] __initdata = {
static int __init microblaze_device_probe(void)
{
of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
+ of_platform_reset_gpio_probe();
return 0;
}
device_initcall(microblaze_device_probe);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index fd7620f025f..9541171f122 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -5,9 +5,12 @@ config MIPS
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_ARCH_KGDB
- # Horrible source of confusion. Die, die, die ...
- select EMBEDDED
- select RTC_LIB if !LEMOTE_FULOONG2E
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select RTC_LIB if !MACH_LOONGSON
mainmenu "Linux/MIPS Kernel Configuration"
@@ -22,6 +25,7 @@ choice
config MACH_ALCHEMY
bool "Alchemy processor based machines"
+ select SYS_SUPPORTS_ZBOOT
config AR7
bool "Texas Instruments AR7"
@@ -36,6 +40,7 @@ config AR7
select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_ZBOOT_UART16550
select GENERIC_GPIO
select GCD
select VLYNQ
@@ -43,23 +48,6 @@ config AR7
Support for the Texas Instruments AR7 System-on-a-Chip
family: TNETD7100, 7200 and 7300.
-config BASLER_EXCITE
- bool "Basler eXcite smart camera"
- select CEVT_R4K
- select CSRC_R4K
- select DMA_COHERENT
- select HW_HAS_PCI
- select IRQ_CPU
- select IRQ_CPU_RM7K
- select IRQ_CPU_RM9K
- select MIPS_RM9122
- select SYS_HAS_CPU_RM9000
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_BIG_ENDIAN
- help
- The eXcite is a smart camera platform manufactured by
- Basler Vision Technologies AG.
-
config BCM47XX
bool "BCM47XX based boards"
select CEVT_R4K
@@ -192,6 +180,7 @@ config LASAT
config MACH_LOONGSON
bool "Loongson family of machines"
+ select SYS_SUPPORTS_ZBOOT_UART16550
help
This enables the support of Loongson family of machines.
@@ -233,6 +222,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_MIPS_CMP
select SYS_SUPPORTS_MULTITHREADING
select SYS_SUPPORTS_SMARTMIPS
+ select SYS_SUPPORTS_ZBOOT
help
This enables support for the MIPS Technologies Malta evaluation
board.
@@ -334,6 +324,24 @@ config PMC_YOSEMITE
Yosemite is an evaluation board for the RM9000x2 processor
manufactured by PMC-Sierra.
+config POWERTV
+ bool "Cisco PowerTV"
+ select BOOT_ELF32
+ select CEVT_R4K
+ select CPU_MIPSR2_IRQ_VI
+ select CPU_MIPSR2_IRQ_EI
+ select CSRC_POWERTV
+ select DMA_NONCOHERENT
+ select HW_HAS_PCI
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select USB_OHCI_LITTLE_ENDIAN
+ help
+ This enables support for the Cisco PowerTV Platform.
+
config SGI_IP22
bool "SGI IP22 (Indy/Indigo2)"
select ARC
@@ -674,11 +682,11 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
endchoice
source "arch/mips/alchemy/Kconfig"
-source "arch/mips/basler/excite/Kconfig"
source "arch/mips/bcm63xx/Kconfig"
source "arch/mips/jazz/Kconfig"
source "arch/mips/lasat/Kconfig"
source "arch/mips/pmc-sierra/Kconfig"
+source "arch/mips/powertv/Kconfig"
source "arch/mips/sgi-ip27/Kconfig"
source "arch/mips/sibyte/Kconfig"
source "arch/mips/txx9/Kconfig"
@@ -778,6 +786,9 @@ config CSRC_BCM1480
config CSRC_IOASIC
bool
+config CSRC_POWERTV
+ bool
+
config CSRC_R4K_LIB
bool
@@ -806,20 +817,6 @@ config DMA_NONCOHERENT
config DMA_NEED_PCI_MAP_STATE
bool
-config EARLY_PRINTK
- bool "Early printk" if EMBEDDED && DEBUG_KERNEL
- depends on SYS_HAS_EARLY_PRINTK
- default y
- help
- This option enables special console drivers which allow the kernel
- to print messages very early in the bootup process.
-
- This is useful for kernel debugging when your machine crashes very
- early before the console code is initialized. For normal operation,
- it is not recommended because it looks ugly on some machines and
- doesn't cooperate with an X server. You should normally say N here,
- unless you want to debug such a crash.
-
config SYS_HAS_EARLY_PRINTK
bool
@@ -1069,6 +1066,21 @@ config CPU_LOONGSON2E
The Loongson 2E processor implements the MIPS III instruction set
with many extensions.
+ It has an internal FPGA northbridge, which is compatiable to
+ bonito64.
+
+config CPU_LOONGSON2F
+ bool "Loongson 2F"
+ depends on SYS_HAS_CPU_LOONGSON2F
+ select CPU_LOONGSON2
+ help
+ The Loongson 2F processor implements the MIPS III instruction set
+ with many extensions.
+
+ Loongson2F have built-in DDR2 and PCIX controller. The PCIX controller
+ have a similar programming interface with FPGA northbridge used in
+ Loongson2E.
+
config CPU_MIPS32_R1
bool "MIPS32 Release 1"
depends on SYS_HAS_CPU_MIPS32_R1
@@ -1294,6 +1306,16 @@ config CPU_CAVIUM_OCTEON
endchoice
+config SYS_SUPPORTS_ZBOOT
+ bool
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_LZMA
+
+config SYS_SUPPORTS_ZBOOT_UART16550
+ bool
+ select SYS_SUPPORTS_ZBOOT
+
config CPU_LOONGSON2
bool
select CPU_SUPPORTS_32BIT_KERNEL
@@ -1303,6 +1325,12 @@ config CPU_LOONGSON2
config SYS_HAS_CPU_LOONGSON2E
bool
+config SYS_HAS_CPU_LOONGSON2F
+ bool
+ select CPU_SUPPORTS_CPUFREQ
+ select CPU_SUPPORTS_ADDRWINCFG if 64BIT
+ select CPU_SUPPORTS_UNCACHED_ACCELERATED
+
config SYS_HAS_CPU_MIPS32_R1
bool
@@ -1411,8 +1439,17 @@ config CPU_SUPPORTS_32BIT_KERNEL
bool
config CPU_SUPPORTS_64BIT_KERNEL
bool
+config CPU_SUPPORTS_CPUFREQ
+ bool
+config CPU_SUPPORTS_ADDRWINCFG
+ bool
config CPU_SUPPORTS_HUGEPAGES
bool
+config CPU_SUPPORTS_UNCACHED_ACCELERATED
+ bool
+config MIPS_PGD_C0_CONTEXT
+ bool
+ default y if 64BIT && CPU_MIPSR2
#
# Set to y for ptrace access to watch registers.
@@ -2024,15 +2061,6 @@ config STACKTRACE_SUPPORT
source "init/Kconfig"
-config PROBE_INITRD_HEADER
- bool "Probe initrd header created by addinitrd"
- depends on BLK_DEV_INITRD
- help
- Probe initrd header at the last page of kernel image.
- Say Y here if you are using arch/mips/boot/addinitrd.c to
- add initrd or initramfs image to the kernel image.
- Otherwise, say N.
-
source "kernel/Kconfig.freezer"
menu "Bus options (PCI, PCMCIA, EISA, ISA, TC)"
@@ -2104,6 +2132,7 @@ config MMU
config I8253
bool
+ select MIPS_EXTERNAL_TIMER
config ZONE_DMA32
bool
@@ -2180,6 +2209,8 @@ source "kernel/power/Kconfig"
endmenu
+source "arch/mips/kernel/cpufreq/Kconfig"
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 364ca893880..d2b88a0be51 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -6,15 +6,66 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
+config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED
+ depends on SYS_HAS_EARLY_PRINTK
+ default y
+ help
+ This option enables special console drivers which allow the kernel
+ to print messages very early in the bootup process.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation,
+ it is not recommended because it looks ugly on some machines and
+ doesn't cooperate with an X server. You should normally say N here,
+ unless you want to debug such a crash.
+
+config CMDLINE_BOOL
+ bool "Built-in kernel command line"
+ default n
+ help
+ For most systems, it is firmware or second stage bootloader that
+ by default specifies the kernel command line options. However,
+ it might be necessary or advantageous to either override the
+ default kernel command line or add a few extra options to it.
+ For such cases, this option allows you to hardcode your own
+ command line options directly into the kernel. For that, you
+ should choose 'Y' here, and fill in the extra boot arguments
+ in CONFIG_CMDLINE.
+
+ The built-in options will be concatenated to the default command
+ line if CMDLINE_OVERRIDE is set to 'N'. Otherwise, the default
+ command line will be ignored and replaced by the built-in string.
+
+ Most MIPS systems will normally expect 'N' here and rely upon
+ the command line from the firmware or the second-stage bootloader.
+
config CMDLINE
string "Default kernel command string"
+ depends on CMDLINE_BOOL
default ""
help
On some platforms, there is currently no way for the boot loader to
- pass arguments to the kernel. For these platforms, you can supply
- some command-line options at build time by entering them here. In
- other cases you can specify kernel args so that you don't have
- to set them up in board prom initialization routines.
+ pass arguments to the kernel. For these platforms, and for the cases
+ when you want to add some extra options to the command line or ignore
+ the default command line, you can supply some command-line options at
+ build time by entering them here. In other cases you can specify
+ kernel args so that you don't have to set them up in board prom
+ initialization routines.
+
+ For more information, see the CMDLINE_BOOL and CMDLINE_OVERRIDE
+ options.
+
+config CMDLINE_OVERRIDE
+ bool "Built-in command line overrides firware arguments"
+ default n
+ depends on CMDLINE_BOOL
+ help
+ By setting this option to 'Y' you will have your kernel ignore
+ command line arguments from firmware or second stage bootloader.
+ Instead, the built-in command line will be used exclusively.
+
+ Normally, you will choose 'N' here.
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 77f5021218d..1893efd43fc 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -48,7 +48,16 @@ ifneq ($(SUBARCH),$(ARCH))
endif
endif
+ifndef CONFIG_FUNCTION_TRACER
cflags-y := -ffunction-sections
+endif
+ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ifndef KBUILD_MCOUNT_RA_ADDRESS
+ ifeq ($(call cc-option-yn,-mmcount-ra-address), y)
+ cflags-y += -mmcount-ra-address -DKBUILD_MCOUNT_RA_ADDRESS
+ endif
+ endif
+endif
cflags-y += $(call cc-option, -mno-check-zero-division)
ifdef CONFIG_32BIT
@@ -69,6 +78,7 @@ endif
all-$(CONFIG_BOOT_ELF32) := $(vmlinux-32)
all-$(CONFIG_BOOT_ELF64) := $(vmlinux-64)
+all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlinuz
#
# GCC uses -G 0 -mabicalls -fpic as default. We don't want PIC in the kernel
@@ -124,6 +134,8 @@ cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2) += -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2E) += \
$(call cc-option,-march=loongson2e,-march=r4600)
+cflags-$(CONFIG_CPU_LOONGSON2F) += \
+ $(call cc-option,-march=loongson2f,-march=r4600)
cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32 -Wa,--trap
@@ -324,6 +336,7 @@ core-$(CONFIG_MACH_LOONGSON) +=arch/mips/loongson/
cflags-$(CONFIG_MACH_LOONGSON) += -I$(srctree)/arch/mips/include/asm/mach-loongson \
-mno-branch-likely
load-$(CONFIG_LEMOTE_FULOONG2E) +=0xffffffff80100000
+load-$(CONFIG_LEMOTE_MACH2F) +=0xffffffff80200000
#
# MIPS Malta board
@@ -331,7 +344,7 @@ load-$(CONFIG_LEMOTE_FULOONG2E) +=0xffffffff80100000
core-$(CONFIG_MIPS_MALTA) += arch/mips/mti-malta/
cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta
load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
-all-$(CONFIG_MIPS_MALTA) := vmlinux.bin
+all-$(CONFIG_MIPS_MALTA) := vmlinuz.bin
#
# MIPS SIM
@@ -356,13 +369,6 @@ cflags-$(CONFIG_PMC_YOSEMITE) += -I$(srctree)/arch/mips/include/asm/mach-yosemit
load-$(CONFIG_PMC_YOSEMITE) += 0xffffffff80100000
#
-# Basler eXcite
-#
-core-$(CONFIG_BASLER_EXCITE) += arch/mips/basler/excite/
-cflags-$(CONFIG_BASLER_EXCITE) += -I$(srctree)/arch/mips/include/asm/mach-excite
-load-$(CONFIG_BASLER_EXCITE) += 0x80100000
-
-#
# LASAT platforms
#
core-$(CONFIG_LASAT) += arch/mips/lasat/
@@ -441,6 +447,13 @@ core-$(CONFIG_NEC_MARKEINS) += arch/mips/emma/markeins/
load-$(CONFIG_NEC_MARKEINS) += 0xffffffff88100000
#
+# Cisco PowerTV Platform
+#
+core-$(CONFIG_POWERTV) += arch/mips/powertv/
+cflags-$(CONFIG_POWERTV) += -I$(srctree)/arch/mips/include/asm/mach-powertv
+load-$(CONFIG_POWERTV) += 0xffffffff90800000
+
+#
# SGI IP22 (Indy/Indigo2)
#
# Set the load address to >= 0xffffffff88069000 if you want to leave space for
@@ -581,7 +594,7 @@ load-$(CONFIG_SNI_RM) += 0xffffffff80600000
else
load-$(CONFIG_SNI_RM) += 0xffffffff80030000
endif
-all-$(CONFIG_SNI_RM) := vmlinux.ecoff
+all-$(CONFIG_SNI_RM) := vmlinuz.ecoff
#
# Common TXx9
@@ -699,9 +712,23 @@ vmlinux.64: vmlinux
$(OBJCOPY) -O $(64bit-bfd) $(OBJCOPYFLAGS) $< $@
makeboot =$(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) $(1)
+makezboot =$(Q)$(MAKE) $(build)=arch/mips/boot/compressed \
+ VMLINUX_LOAD_ADDRESS=$(load-y) 32bit-bfd=$(32bit-bfd) $(1)
all: $(all-y)
+vmlinuz: vmlinux FORCE
+ +@$(call makezboot,$@)
+
+vmlinuz.bin: vmlinux
+ +@$(call makezboot,$@)
+
+vmlinuz.ecoff: vmlinux
+ +@$(call makezboot,$@)
+
+vmlinuz.srec: vmlinux
+ +@$(call makezboot,$@)
+
vmlinux.bin: $(vmlinux-32)
+@$(call makeboot,$@)
@@ -726,11 +753,13 @@ endif
install:
$(Q)install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE)
+ $(Q)install -D -m 755 vmlinuz $(INSTALL_PATH)/vmlinuz-$(KERNELRELEASE)
$(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE)
$(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE)
archclean:
@$(MAKE) $(clean)=arch/mips/boot
+ @$(MAKE) $(clean)=arch/mips/boot/compressed
@$(MAKE) $(clean)=arch/mips/lasat
define archhelp
@@ -738,10 +767,18 @@ define archhelp
echo ' vmlinux.ecoff - ECOFF boot image'
echo ' vmlinux.bin - Raw binary boot image'
echo ' vmlinux.srec - SREC boot image'
+ echo ' vmlinuz - Compressed boot(zboot) image'
+ echo ' vmlinuz.ecoff - ECOFF zboot image'
+ echo ' vmlinuz.bin - Raw binary zboot image'
+ echo ' vmlinuz.srec - SREC zboot image'
echo
echo ' These will be default as apropriate for a configured platform.'
endef
CLEAN_FILES += vmlinux.32 \
vmlinux.64 \
- vmlinux.ecoff
+ vmlinux.ecoff \
+ vmlinuz \
+ vmlinuz.ecoff \
+ vmlinuz.bin \
+ vmlinuz.srec
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 835f3f0319c..85169c08d8d 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -505,7 +505,7 @@ static int __init ar7_register_devices(void)
int res;
u32 *bootcr, val;
#ifdef CONFIG_SERIAL_8250
- static struct uart_port uart_port[2];
+ static struct uart_port uart_port[2] __initdata;
memset(uart_port, 0, sizeof(struct uart_port) * 2);
diff --git a/arch/mips/basler/excite/Kconfig b/arch/mips/basler/excite/Kconfig
deleted file mode 100644
index ba506075608..00000000000
--- a/arch/mips/basler/excite/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config BASLER_EXCITE_PROTOTYPE
- bool "Support for pre-release units"
- depends on BASLER_EXCITE
- default n
- help
- Pre-series (prototype) units are different from later ones in
- some ways. Select this option if you have one of these. Please
- note that a kernel built with this option selected will not be
- able to run on normal units.
diff --git a/arch/mips/basler/excite/Makefile b/arch/mips/basler/excite/Makefile
deleted file mode 100644
index cff29cf46d0..00000000000
--- a/arch/mips/basler/excite/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for Basler eXcite
-#
-
-obj-$(CONFIG_BASLER_EXCITE) += excite_irq.o excite_prom.o excite_setup.o \
- excite_device.o excite_procfs.o
-
-obj-m += excite_iodev.o
diff --git a/arch/mips/basler/excite/excite_device.c b/arch/mips/basler/excite/excite_device.c
deleted file mode 100644
index e00bc2d7f30..00000000000
--- a/arch/mips/basler/excite/excite_device.c
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright (C) 2004 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
-#include <linux/err.h>
-#include <linux/jiffies.h>
-#include <linux/sched.h>
-#include <asm/types.h>
-#include <asm/rm9k-ocd.h>
-
-#include <excite.h>
-#include <rm9k_eth.h>
-#include <rm9k_wdt.h>
-#include <rm9k_xicap.h>
-#include <excite_nandflash.h>
-
-#include "excite_iodev.h"
-
-#define RM9K_GE_UNIT 0
-#define XICAP_UNIT 0
-#define NAND_UNIT 0
-
-#define DLL_TIMEOUT 3 /* seconds */
-
-
-#define RINIT(__start__, __end__, __name__, __parent__) { \
- .name = __name__ "_0", \
- .start = (__start__), \
- .end = (__end__), \
- .flags = 0, \
- .parent = (__parent__) \
-}
-
-#define RINIT_IRQ(__irq__, __name__) { \
- .name = __name__ "_0", \
- .start = (__irq__), \
- .end = (__irq__), \
- .flags = IORESOURCE_IRQ, \
- .parent = NULL \
-}
-
-
-
-enum {
- slice_xicap,
- slice_eth
-};
-
-
-
-static struct resource
- excite_ctr_resource __maybe_unused = {
- .name = "GPI counters",
- .start = 0,
- .end = 5,
- .flags = 0,
- .parent = NULL,
- .sibling = NULL,
- .child = NULL
- },
- excite_gpislice_resource __maybe_unused = {
- .name = "GPI slices",
- .start = 0,
- .end = 1,
- .flags = 0,
- .parent = NULL,
- .sibling = NULL,
- .child = NULL
- },
- excite_mdio_channel_resource __maybe_unused = {
- .name = "MDIO channels",
- .start = 0,
- .end = 1,
- .flags = 0,
- .parent = NULL,
- .sibling = NULL,
- .child = NULL
- },
- excite_fifomem_resource __maybe_unused = {
- .name = "FIFO memory",
- .start = 0,
- .end = 767,
- .flags = 0,
- .parent = NULL,
- .sibling = NULL,
- .child = NULL
- },
- excite_scram_resource __maybe_unused = {
- .name = "Scratch RAM",
- .start = EXCITE_PHYS_SCRAM,
- .end = EXCITE_PHYS_SCRAM + EXCITE_SIZE_SCRAM - 1,
- .flags = IORESOURCE_MEM,
- .parent = NULL,
- .sibling = NULL,
- .child = NULL
- },
- excite_fpga_resource __maybe_unused = {
- .name = "System FPGA",
- .start = EXCITE_PHYS_FPGA,
- .end = EXCITE_PHYS_FPGA + EXCITE_SIZE_FPGA - 1,
- .flags = IORESOURCE_MEM,
- .parent = NULL,
- .sibling = NULL,
- .child = NULL
- },
- excite_nand_resource __maybe_unused = {
- .name = "NAND flash control",
- .start = EXCITE_PHYS_NAND,
- .end = EXCITE_PHYS_NAND + EXCITE_SIZE_NAND - 1,
- .flags = IORESOURCE_MEM,
- .parent = NULL,
- .sibling = NULL,
- .child = NULL
- },
- excite_titan_resource __maybe_unused = {
- .name = "TITAN registers",
- .start = EXCITE_PHYS_TITAN,
- .end = EXCITE_PHYS_TITAN + EXCITE_SIZE_TITAN - 1,
- .flags = IORESOURCE_MEM,
- .parent = NULL,
- .sibling = NULL,
- .child = NULL
- };
-
-
-
-static void adjust_resources(struct resource *res, unsigned int n)
-{
- struct resource *p;
- const unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM
- | IORESOURCE_IRQ | IORESOURCE_DMA;
-
- for (p = res; p < res + n; p++) {
- const struct resource * const parent = p->parent;
- if (parent) {
- p->start += parent->start;
- p->end += parent->start;
- p->flags = parent->flags & mask;
- }
- }
-}
-
-
-
-#if defined(CONFIG_EXCITE_FCAP_GPI) || defined(CONFIG_EXCITE_FCAP_GPI_MODULE)
-static struct resource xicap_rsrc[] = {
- RINIT(0x4840, 0x486f, XICAP_RESOURCE_FIFO_RX, &excite_titan_resource),
- RINIT(0x4940, 0x494b, XICAP_RESOURCE_FIFO_TX, &excite_titan_resource),
- RINIT(0x5040, 0x5127, XICAP_RESOURCE_XDMA, &excite_titan_resource),
- RINIT(0x1000, 0x112f, XICAP_RESOURCE_PKTPROC, &excite_titan_resource),
- RINIT(0x1100, 0x110f, XICAP_RESOURCE_PKT_STREAM, &excite_fpga_resource),
- RINIT(0x0800, 0x0bff, XICAP_RESOURCE_DMADESC, &excite_scram_resource),
- RINIT(slice_xicap, slice_xicap, XICAP_RESOURCE_GPI_SLICE, &excite_gpislice_resource),
- RINIT(0x0100, 0x02ff, XICAP_RESOURCE_FIFO_BLK, &excite_fifomem_resource),
- RINIT_IRQ(TITAN_IRQ, XICAP_RESOURCE_IRQ)
-};
-
-static struct platform_device xicap_pdev = {
- .name = XICAP_NAME,
- .id = XICAP_UNIT,
- .num_resources = ARRAY_SIZE(xicap_rsrc),
- .resource = xicap_rsrc
-};
-
-/*
- * Create a platform device for the GPI port that receives the
- * image data from the embedded camera.
- */
-static int __init xicap_devinit(void)
-{
- unsigned long tend;
- u32 reg;
- int retval;
-
- adjust_resources(xicap_rsrc, ARRAY_SIZE(xicap_rsrc));
-
- /* Power up the slice and configure it. */
- reg = titan_readl(CPTC1R);
- reg &= ~(0x11100 << slice_xicap);
- titan_writel(reg, CPTC1R);
-
- /* Enable slice & DLL. */
- reg= titan_readl(CPRR);
- reg &= ~(0x00030003 << (slice_xicap * 2));
- titan_writel(reg, CPRR);
-
- /* Wait for DLLs to lock */
- tend = jiffies + DLL_TIMEOUT * HZ;
- while (time_before(jiffies, tend)) {
- if (!(~titan_readl(CPDSR) & (0x1 << (slice_xicap * 4))))
- break;
- yield();
- }
-
- if (~titan_readl(CPDSR) & (0x1 << (slice_xicap * 4))) {
- printk(KERN_ERR "%s: DLL not locked after %u seconds\n",
- xicap_pdev.name, DLL_TIMEOUT);
- retval = -ETIME;
- } else {
- /* Register platform device */
- retval = platform_device_register(&xicap_pdev);
- }
-
- return retval;
-}
-
-device_initcall(xicap_devinit);
-#endif /* defined(CONFIG_EXCITE_FCAP_GPI) || defined(CONFIG_EXCITE_FCAP_GPI_MODULE) */
-
-
-
-#if defined(CONFIG_WDT_RM9K_GPI) || defined(CONFIG_WDT_RM9K_GPI_MODULE)
-static struct resource wdt_rsrc[] = {
- RINIT(0, 0, WDT_RESOURCE_COUNTER, &excite_ctr_resource),
- RINIT(0x0084, 0x008f, WDT_RESOURCE_REGS, &excite_titan_resource),
- RINIT_IRQ(TITAN_IRQ, WDT_RESOURCE_IRQ)
-};
-
-static struct platform_device wdt_pdev = {
- .name = WDT_NAME,
- .id = -1,
- .num_resources = ARRAY_SIZE(wdt_rsrc),
- .resource = wdt_rsrc
-};
-
-/*
- * Create a platform device for the GPI port that receives the
- * image data from the embedded camera.
- */
-static int __init wdt_devinit(void)
-{
- adjust_resources(wdt_rsrc, ARRAY_SIZE(wdt_rsrc));
- return platform_device_register(&wdt_pdev);
-}
-
-device_initcall(wdt_devinit);
-#endif /* defined(CONFIG_WDT_RM9K_GPI) || defined(CONFIG_WDT_RM9K_GPI_MODULE) */
-
-
-
-static struct resource excite_nandflash_rsrc[] = {
- RINIT(0x2000, 0x201f, EXCITE_NANDFLASH_RESOURCE_REGS, &excite_nand_resource)
-};
-
-static struct platform_device excite_nandflash_pdev = {
- .name = "excite_nand",
- .id = NAND_UNIT,
- .num_resources = ARRAY_SIZE(excite_nandflash_rsrc),
- .resource = excite_nandflash_rsrc
-};
-
-/*
- * Create a platform device for the access to the nand-flash
- * port
- */
-static int __init excite_nandflash_devinit(void)
-{
- adjust_resources(excite_nandflash_rsrc, ARRAY_SIZE(excite_nandflash_rsrc));
-
- /* nothing to be done here */
-
- /* Register platform device */
- return platform_device_register(&excite_nandflash_pdev);
-}
-
-device_initcall(excite_nandflash_devinit);
-
-
-
-static struct resource iodev_rsrc[] = {
- RINIT_IRQ(FPGA1_IRQ, IODEV_RESOURCE_IRQ)
-};
-
-static struct platform_device io_pdev = {
- .name = IODEV_NAME,
- .id = -1,
- .num_resources = ARRAY_SIZE(iodev_rsrc),
- .resource = iodev_rsrc
-};
-
-/*
- * Create a platform device for the external I/O ports.
- */
-static int __init io_devinit(void)
-{
- adjust_resources(iodev_rsrc, ARRAY_SIZE(iodev_rsrc));
- return platform_device_register(&io_pdev);
-}
-
-device_initcall(io_devinit);
-
-
-
-
-#if defined(CONFIG_RM9K_GE) || defined(CONFIG_RM9K_GE_MODULE)
-static struct resource rm9k_ge_rsrc[] = {
- RINIT(0x2200, 0x27ff, RM9K_GE_RESOURCE_MAC, &excite_titan_resource),
- RINIT(0x1800, 0x1fff, RM9K_GE_RESOURCE_MSTAT, &excite_titan_resource),
- RINIT(0x2000, 0x212f, RM9K_GE_RESOURCE_PKTPROC, &excite_titan_resource),
- RINIT(0x5140, 0x5227, RM9K_GE_RESOURCE_XDMA, &excite_titan_resource),
- RINIT(0x4870, 0x489f, RM9K_GE_RESOURCE_FIFO_RX, &excite_titan_resource),
- RINIT(0x494c, 0x4957, RM9K_GE_RESOURCE_FIFO_TX, &excite_titan_resource),
- RINIT(0x0000, 0x007f, RM9K_GE_RESOURCE_FIFOMEM_RX, &excite_fifomem_resource),
- RINIT(0x0080, 0x00ff, RM9K_GE_RESOURCE_FIFOMEM_TX, &excite_fifomem_resource),
- RINIT(0x0180, 0x019f, RM9K_GE_RESOURCE_PHY, &excite_titan_resource),
- RINIT(0x0000, 0x03ff, RM9K_GE_RESOURCE_DMADESC_RX, &excite_scram_resource),
- RINIT(0x0400, 0x07ff, RM9K_GE_RESOURCE_DMADESC_TX, &excite_scram_resource),
- RINIT(slice_eth, slice_eth, RM9K_GE_RESOURCE_GPI_SLICE, &excite_gpislice_resource),
- RINIT(0, 0, RM9K_GE_RESOURCE_MDIO_CHANNEL, &excite_mdio_channel_resource),
- RINIT_IRQ(TITAN_IRQ, RM9K_GE_RESOURCE_IRQ_MAIN),
- RINIT_IRQ(PHY_IRQ, RM9K_GE_RESOURCE_IRQ_PHY)
-};
-
-static struct platform_device rm9k_ge_pdev = {
- .name = RM9K_GE_NAME,
- .id = RM9K_GE_UNIT,
- .num_resources = ARRAY_SIZE(rm9k_ge_rsrc),
- .resource = rm9k_ge_rsrc
-};
-
-
-
-/*
- * Create a platform device for the Ethernet port.
- */
-static int __init rm9k_ge_devinit(void)
-{
- u32 reg;
-
- adjust_resources(rm9k_ge_rsrc, ARRAY_SIZE(rm9k_ge_rsrc));
-
- /* Power up the slice and configure it. */
- reg = titan_readl(CPTC1R);
- reg &= ~(0x11000 << slice_eth);
- reg |= 0x100 << slice_eth;
- titan_writel(reg, CPTC1R);
-
- /* Take the MAC out of reset, reset the DLLs. */
- reg = titan_readl(CPRR);
- reg &= ~(0x00030000 << (slice_eth * 2));
- reg |= 0x3 << (slice_eth * 2);
- titan_writel(reg, CPRR);
-
- return platform_device_register(&rm9k_ge_pdev);
-}
-
-device_initcall(rm9k_ge_devinit);
-#endif /* defined(CONFIG_RM9K_GE) || defined(CONFIG_RM9K_GE_MODULE) */
-
-
-
-static int __init excite_setup_devs(void)
-{
- int res;
- u32 reg;
-
- /* Enable xdma and fifo interrupts */
- reg = titan_readl(0x0050);
- titan_writel(reg | 0x18000000, 0x0050);
-
- res = request_resource(&iomem_resource, &excite_titan_resource);
- if (res)
- return res;
- res = request_resource(&iomem_resource, &excite_scram_resource);
- if (res)
- return res;
- res = request_resource(&iomem_resource, &excite_fpga_resource);
- if (res)
- return res;
- res = request_resource(&iomem_resource, &excite_nand_resource);
- if (res)
- return res;
- excite_fpga_resource.flags = excite_fpga_resource.parent->flags &
- ( IORESOURCE_IO | IORESOURCE_MEM
- | IORESOURCE_IRQ | IORESOURCE_DMA);
- excite_nand_resource.flags = excite_nand_resource.parent->flags &
- ( IORESOURCE_IO | IORESOURCE_MEM
- | IORESOURCE_IRQ | IORESOURCE_DMA);
-
- return 0;
-}
-
-arch_initcall(excite_setup_devs);
-
diff --git a/arch/mips/basler/excite/excite_iodev.c b/arch/mips/basler/excite/excite_iodev.c
deleted file mode 100644
index 938b1d0b765..00000000000
--- a/arch/mips/basler/excite/excite_iodev.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2005 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/compiler.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/poll.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/miscdevice.h>
-#include <linux/smp_lock.h>
-
-#include "excite_iodev.h"
-
-
-
-static const struct resource *iodev_get_resource(struct platform_device *, const char *, unsigned int);
-static int __init iodev_probe(struct platform_device *);
-static int __exit iodev_remove(struct platform_device *);
-static int iodev_open(struct inode *, struct file *);
-static int iodev_release(struct inode *, struct file *);
-static ssize_t iodev_read(struct file *, char __user *, size_t s, loff_t *);
-static unsigned int iodev_poll(struct file *, struct poll_table_struct *);
-static irqreturn_t iodev_irqhdl(int, void *);
-
-
-
-static const char iodev_name[] = "iodev";
-static unsigned int iodev_irq;
-static DECLARE_WAIT_QUEUE_HEAD(wq);
-
-
-
-static const struct file_operations fops =
-{
- .owner = THIS_MODULE,
- .open = iodev_open,
- .release = iodev_release,
- .read = iodev_read,
- .poll = iodev_poll
-};
-
-static struct miscdevice miscdev =
-{
- .minor = MISC_DYNAMIC_MINOR,
- .name = iodev_name,
- .fops = &fops
-};
-
-static struct platform_driver iodev_driver = {
- .driver = {
- .name = iodev_name,
- .owner = THIS_MODULE,
- },
- .probe = iodev_probe,
- .remove = __devexit_p(iodev_remove),
-};
-
-
-
-static const struct resource *
-iodev_get_resource(struct platform_device *pdv, const char *name,
- unsigned int type)
-{
- char buf[80];
- if (snprintf(buf, sizeof buf, "%s_0", name) >= sizeof buf)
- return NULL;
- return platform_get_resource_byname(pdv, type, buf);
-}
-
-
-
-/* No hotplugging on the platform bus - use __init */
-static int __init iodev_probe(struct platform_device *dev)
-{
- const struct resource * const ri =
- iodev_get_resource(dev, IODEV_RESOURCE_IRQ, IORESOURCE_IRQ);
-
- if (unlikely(!ri))
- return -ENXIO;
-
- iodev_irq = ri->start;
- return misc_register(&miscdev);
-}
-
-
-
-static int __exit iodev_remove(struct platform_device *dev)
-{
- return misc_deregister(&miscdev);
-}
-
-static int iodev_open(struct inode *i, struct file *f)
-{
- int ret;
-
- ret = request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED,
- iodev_name, &miscdev);
-
- return ret;
-}
-
-static int iodev_release(struct inode *i, struct file *f)
-{
- free_irq(iodev_irq, &miscdev);
- return 0;
-}
-
-
-
-
-static ssize_t
-iodev_read(struct file *f, char __user *d, size_t s, loff_t *o)
-{
- ssize_t ret;
- DEFINE_WAIT(w);
-
- prepare_to_wait(&wq, &w, TASK_INTERRUPTIBLE);
- if (!signal_pending(current))
- schedule();
- ret = signal_pending(current) ? -ERESTARTSYS : 0;
- finish_wait(&wq, &w);
- return ret;
-}
-
-
-static unsigned int iodev_poll(struct file *f, struct poll_table_struct *p)
-{
- poll_wait(f, &wq, p);
- return POLLOUT | POLLWRNORM;
-}
-
-static irqreturn_t iodev_irqhdl(int irq, void *ctxt)
-{
- wake_up(&wq);
-
- return IRQ_HANDLED;
-}
-
-static int __init iodev_init_module(void)
-{
- return platform_driver_register(&iodev_driver);
-}
-
-
-
-static void __exit iodev_cleanup_module(void)
-{
- platform_driver_unregister(&iodev_driver);
-}
-
-module_init(iodev_init_module);
-module_exit(iodev_cleanup_module);
-
-
-
-MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
-MODULE_DESCRIPTION("Basler eXcite i/o interrupt handler");
-MODULE_VERSION("0.0");
-MODULE_LICENSE("GPL");
diff --git a/arch/mips/basler/excite/excite_iodev.h b/arch/mips/basler/excite/excite_iodev.h
deleted file mode 100644
index cbfbb5d2ee6..00000000000
--- a/arch/mips/basler/excite/excite_iodev.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __EXCITE_IODEV_H__
-#define __EXCITE_IODEV_H__
-
-/* Device name */
-#define IODEV_NAME "iodev"
-
-/* Resource names */
-#define IODEV_RESOURCE_IRQ "excite_iodev_irq"
-
-#endif /* __EXCITE_IODEV_H__ */
diff --git a/arch/mips/basler/excite/excite_irq.c b/arch/mips/basler/excite/excite_irq.c
deleted file mode 100644
index 934e0a6b101..00000000000
--- a/arch/mips/basler/excite/excite_irq.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslereb.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/bitops.h>
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
-#include <asm/rm9k-ocd.h>
-
-#include <excite.h>
-
-extern asmlinkage void excite_handle_int(void);
-
-/*
- * Initialize the interrupt handler
- */
-void __init arch_init_irq(void)
-{
- mips_cpu_irq_init();
- rm7k_cpu_irq_init();
- rm9k_cpu_irq_init();
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- const u32
- interrupts = read_c0_cause() >> 8,
- mask = ((read_c0_status() >> 8) & 0x000000ff) |
- (read_c0_intcontrol() & 0x0000ff00),
- pending = interrupts & mask;
- u32 msgintflags, msgintmask, msgint;
-
- /* process timer interrupt */
- if (pending & (1 << TIMER_IRQ)) {
- do_IRQ(TIMER_IRQ);
- return;
- }
-
- /* Process PCI interrupts */
-#if USB_IRQ < 10
- msgintflags = ocd_readl(INTP0Status0 + (USB_MSGINT / 0x20 * 0x10));
- msgintmask = ocd_readl(INTP0Mask0 + (USB_MSGINT / 0x20 * 0x10));
- msgint = msgintflags & msgintmask & (0x1 << (USB_MSGINT % 0x20));
- if ((pending & (1 << USB_IRQ)) && msgint) {
-#else
- if (pending & (1 << USB_IRQ)) {
-#endif
- do_IRQ(USB_IRQ);
- return;
- }
-
- /* Process TITAN interrupts */
- msgintflags = ocd_readl(INTP0Status0 + (TITAN_MSGINT / 0x20 * 0x10));
- msgintmask = ocd_readl(INTP0Mask0 + (TITAN_MSGINT / 0x20 * 0x10));
- msgint = msgintflags & msgintmask & (0x1 << (TITAN_MSGINT % 0x20));
- if ((pending & (1 << TITAN_IRQ)) && msgint) {
- ocd_writel(msgint, INTP0Clear0 + (TITAN_MSGINT / 0x20 * 0x10));
- do_IRQ(TITAN_IRQ);
- return;
- }
-
- /* Process FPGA line #0 interrupts */
- msgintflags = ocd_readl(INTP0Status0 + (FPGA0_MSGINT / 0x20 * 0x10));
- msgintmask = ocd_readl(INTP0Mask0 + (FPGA0_MSGINT / 0x20 * 0x10));
- msgint = msgintflags & msgintmask & (0x1 << (FPGA0_MSGINT % 0x20));
- if ((pending & (1 << FPGA0_IRQ)) && msgint) {
- do_IRQ(FPGA0_IRQ);
- return;
- }
-
- /* Process FPGA line #1 interrupts */
- msgintflags = ocd_readl(INTP0Status0 + (FPGA1_MSGINT / 0x20 * 0x10));
- msgintmask = ocd_readl(INTP0Mask0 + (FPGA1_MSGINT / 0x20 * 0x10));
- msgint = msgintflags & msgintmask & (0x1 << (FPGA1_MSGINT % 0x20));
- if ((pending & (1 << FPGA1_IRQ)) && msgint) {
- do_IRQ(FPGA1_IRQ);
- return;
- }
-
- /* Process PHY interrupts */
- msgintflags = ocd_readl(INTP0Status0 + (PHY_MSGINT / 0x20 * 0x10));
- msgintmask = ocd_readl(INTP0Mask0 + (PHY_MSGINT / 0x20 * 0x10));
- msgint = msgintflags & msgintmask & (0x1 << (PHY_MSGINT % 0x20));
- if ((pending & (1 << PHY_IRQ)) && msgint) {
- do_IRQ(PHY_IRQ);
- return;
- }
-
- /* Process spurious interrupts */
- spurious_interrupt();
-}
diff --git a/arch/mips/basler/excite/excite_procfs.c b/arch/mips/basler/excite/excite_procfs.c
deleted file mode 100644
index 08923e6825b..00000000000
--- a/arch/mips/basler/excite/excite_procfs.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2004, 2005 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- * Procfs support for Basler eXcite
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/stat.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/rm9k-ocd.h>
-
-#include <excite.h>
-
-static int excite_unit_id_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%06x", unit_id);
- return 0;
-}
-
-static int excite_unit_id_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, excite_unit_id_proc_show, NULL);
-}
-
-static const struct file_operations excite_unit_id_proc_fops = {
- .owner = THIS_MODULE,
- .open = excite_unit_id_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int
-excite_bootrom_read(char *page, char **start, off_t off, int count,
- int *eof, void *data)
-{
- void __iomem * src;
-
- if (off >= EXCITE_SIZE_BOOTROM) {
- *eof = 1;
- return 0;
- }
-
- if ((off + count) > EXCITE_SIZE_BOOTROM)
- count = EXCITE_SIZE_BOOTROM - off;
-
- src = ioremap(EXCITE_PHYS_BOOTROM + off, count);
- if (src) {
- memcpy_fromio(page, src, count);
- iounmap(src);
- *start = page;
- } else {
- count = -ENOMEM;
- }
-
- return count;
-}
-
-void excite_procfs_init(void)
-{
- /* Create & populate /proc/excite */
- struct proc_dir_entry * const pdir = proc_mkdir("excite", NULL);
- if (pdir) {
- struct proc_dir_entry * e;
-
- e = proc_create("unit_id", S_IRUGO, pdir,
- &excite_unit_id_proc_fops);
- if (e) e->size = 6;
-
- e = create_proc_read_entry("bootrom", S_IRUGO, pdir,
- excite_bootrom_read, NULL);
- if (e) e->size = EXCITE_SIZE_BOOTROM;
- }
-}
diff --git a/arch/mips/basler/excite/excite_prom.c b/arch/mips/basler/excite/excite_prom.c
deleted file mode 100644
index 68d8bc597e3..00000000000
--- a/arch/mips/basler/excite/excite_prom.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (C) 2004, 2005 by Thomas Koeller (thomas.koeller@baslerweb.com)
- * Based on the PMC-Sierra Yosemite board support by Ralf Baechle and
- * Manish Lachwani.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/reboot.h>
-#include <asm/system.h>
-#include <asm/bootinfo.h>
-#include <asm/string.h>
-
-#include <excite.h>
-
-/* This struct is used by Redboot to pass arguments to the kernel */
-typedef struct
-{
- char *name;
- char *val;
-} t_env_var;
-
-struct parmblock {
- t_env_var memsize;
- t_env_var modetty0;
- t_env_var ethaddr;
- t_env_var env_end;
- char *argv[2];
- char text[0];
-};
-
-static unsigned int prom_argc;
-static const char ** prom_argv;
-static const t_env_var * prom_env;
-
-static void prom_halt(void) __attribute__((noreturn));
-static void prom_exit(void) __attribute__((noreturn));
-
-
-
-const char *get_system_type(void)
-{
- return "Basler eXcite";
-}
-
-/*
- * Halt the system
- */
-static void prom_halt(void)
-{
- printk(KERN_NOTICE "\n** System halted.\n");
- while (1)
- asm volatile (
- "\t.set\tmips3\n"
- "\twait\n"
- "\t.set\tmips0\n"
- );
-}
-
-/*
- * Reset the CPU and re-enter Redboot
- */
-static void prom_exit(void)
-{
- unsigned int i;
- volatile unsigned char * const flg =
- (volatile unsigned char *) (EXCITE_ADDR_FPGA + EXCITE_FPGA_DPR);
-
- /* Clear the watchdog reset flag, set the reboot flag */
- *flg &= ~0x01;
- *flg |= 0x80;
-
- for (i = 0; i < 10; i++) {
- *(volatile unsigned char *) (EXCITE_ADDR_FPGA + EXCITE_FPGA_SYSCTL) = 0x02;
- iob();
- mdelay(1000);
- }
-
- printk(KERN_NOTICE "Reset failed\n");
- prom_halt();
-}
-
-static const char __init *prom_getenv(char *name)
-{
- const t_env_var * p;
- for (p = prom_env; p->name != NULL; p++)
- if(strcmp(name, p->name) == 0)
- break;
- return p->val;
-}
-
-/*
- * Init routine which accepts the variables from Redboot
- */
-void __init prom_init(void)
-{
- const struct parmblock * const pb = (struct parmblock *) fw_arg2;
-
- prom_argc = fw_arg0;
- prom_argv = (const char **) fw_arg1;
- prom_env = &pb->memsize;
-
- /* Callbacks for halt, restart */
- _machine_restart = (void (*)(char *)) prom_exit;
- _machine_halt = prom_halt;
-
-#ifdef CONFIG_32BIT
- /* copy command line */
- strcpy(arcs_cmdline, prom_argv[1]);
- memsize = simple_strtol(prom_getenv("memsize"), NULL, 16);
- strcpy(modetty, prom_getenv("modetty0"));
-#endif /* CONFIG_32BIT */
-
-#ifdef CONFIG_64BIT
-# error 64 bit support not implemented
-#endif /* CONFIG_64BIT */
-}
-
-/* This is called from free_initmem(), so we need to provide it */
-void __init prom_free_prom_memory(void)
-{
- /* Nothing to do */
-}
diff --git a/arch/mips/basler/excite/excite_setup.c b/arch/mips/basler/excite/excite_setup.c
deleted file mode 100644
index d66b3b8edf2..00000000000
--- a/arch/mips/basler/excite/excite_setup.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2004, 2005 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- * Based on the PMC-Sierra Yosemite board support by Ralf Baechle and
- * Manish Lachwani.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/serial_8250.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#include <asm/bootinfo.h>
-#include <asm/mipsregs.h>
-#include <asm/pgtable-32.h>
-#include <asm/io.h>
-#include <asm/time.h>
-#include <asm/rm9k-ocd.h>
-
-#include <excite.h>
-
-#define TITAN_UART_CLK 25000000
-
-#if 1
-/* normal serial port assignment */
-#define REGBASE_SER0 0x0208
-#define REGBASE_SER1 0x0238
-#define MASK_SER0 0x1
-#define MASK_SER1 0x2
-#else
-/* serial ports swapped */
-#define REGBASE_SER0 0x0238
-#define REGBASE_SER1 0x0208
-#define MASK_SER0 0x2
-#define MASK_SER1 0x1
-#endif
-
-unsigned long memsize;
-char modetty[30];
-unsigned int titan_irq = TITAN_IRQ;
-static void __iomem * ctl_regs;
-u32 unit_id;
-
-volatile void __iomem * const ocd_base = (void *) (EXCITE_ADDR_OCD);
-volatile void __iomem * const titan_base = (void *) (EXCITE_ADDR_TITAN);
-
-/* Protect access to shared GPI registers */
-DEFINE_SPINLOCK(titan_lock);
-int titan_irqflags;
-
-
-/*
- * The eXcite platform uses the alternate timer interrupt
- *
- * Fixme: At the time of this writing cevt-r4k.c doesn't yet know about how
- * to handle the alternate timer interrupt of the RM9000.
- */
-void __init plat_time_init(void)
-{
- const u32 modebit5 = ocd_readl(0x00e4);
- unsigned int mult = ((modebit5 >> 11) & 0x1f) + 2;
- unsigned int div = ((modebit5 >> 16) & 0x1f) + 2;
-
- if (div == 33)
- div = 1;
- mips_hpt_frequency = EXCITE_CPU_EXT_CLOCK * mult / div / 2;
-}
-
-static int __init excite_init_console(void)
-{
-#if defined(CONFIG_SERIAL_8250)
- static __initdata char serr[] =
- KERN_ERR "Serial port #%u setup failed\n";
- struct uart_port up;
-
- /* Take the DUART out of reset */
- titan_writel(0x00ff1cff, CPRR);
-
-#if (CONFIG_SERIAL_8250_NR_UARTS > 1)
- /* Enable both ports */
- titan_writel(MASK_SER0 | MASK_SER1, UACFG);
-#else
- /* Enable port #0 only */
- titan_writel(MASK_SER0, UACFG);
-#endif
-
- /*
- * Set up serial port #0. Do not use autodetection; the result is
- * not what we want.
- */
- memset(&up, 0, sizeof(up));
- up.membase = (char *) titan_addr(REGBASE_SER0);
- up.irq = TITAN_IRQ;
- up.uartclk = TITAN_UART_CLK;
- up.regshift = 0;
- up.iotype = UPIO_RM9000;
- up.type = PORT_RM9000;
- up.flags = UPF_SHARE_IRQ;
- up.line = 0;
- if (early_serial_setup(&up))
- printk(serr, up.line);
-
-#if CONFIG_SERIAL_8250_NR_UARTS > 1
- /* And now for port #1. */
- up.membase = (char *) titan_addr(REGBASE_SER1);
- up.line = 1;
- if (early_serial_setup(&up))
- printk(serr, up.line);
-#endif /* CONFIG_SERIAL_8250_NR_UARTS > 1 */
-#else
- /* Leave the DUART in reset */
- titan_writel(0x00ff3cff, CPRR);
-#endif /* defined(CONFIG_SERIAL_8250) */
-
- return 0;
-}
-
-static int __init excite_platform_init(void)
-{
- unsigned int i;
- unsigned char buf[3];
- u8 reg;
- void __iomem * dpr;
-
- /* BIU buffer allocations */
- ocd_writel(8, CPURSLMT); /* CPU */
- titan_writel(4, CPGRWL); /* GPI / Ethernet */
-
- /* Map control registers located in FPGA */
- ctl_regs = ioremap_nocache(EXCITE_PHYS_FPGA + EXCITE_FPGA_SYSCTL, 16);
- if (!ctl_regs)
- panic("eXcite: failed to map platform control registers\n");
- memcpy_fromio(buf, ctl_regs + 2, ARRAY_SIZE(buf));
- unit_id = buf[0] | (buf[1] << 8) | (buf[2] << 16);
-
- /* Clear the reboot flag */
- dpr = ioremap_nocache(EXCITE_PHYS_FPGA + EXCITE_FPGA_DPR, 1);
- reg = __raw_readb(dpr);
- __raw_writeb(reg & 0x7f, dpr);
- iounmap(dpr);
-
- /* Interrupt controller setup */
- for (i = INTP0Status0; i < INTP0Status0 + 0x80; i += 0x10) {
- ocd_writel(0x00000000, i + 0x04);
- ocd_writel(0xffffffff, i + 0x0c);
- }
- ocd_writel(0x2, NMICONFIG);
-
- ocd_writel(0x1 << (TITAN_MSGINT % 0x20),
- INTP0Mask0 + (0x10 * (TITAN_MSGINT / 0x20)));
- ocd_writel((0x1 << (FPGA0_MSGINT % 0x20))
- | ocd_readl(INTP0Mask0 + (0x10 * (FPGA0_MSGINT / 0x20))),
- INTP0Mask0 + (0x10 * (FPGA0_MSGINT / 0x20)));
- ocd_writel((0x1 << (FPGA1_MSGINT % 0x20))
- | ocd_readl(INTP0Mask0 + (0x10 * (FPGA1_MSGINT / 0x20))),
- INTP0Mask0 + (0x10 * (FPGA1_MSGINT / 0x20)));
- ocd_writel((0x1 << (PHY_MSGINT % 0x20))
- | ocd_readl(INTP0Mask0 + (0x10 * (PHY_MSGINT / 0x20))),
- INTP0Mask0 + (0x10 * (PHY_MSGINT / 0x20)));
-#if USB_IRQ < 10
- ocd_writel((0x1 << (USB_MSGINT % 0x20))
- | ocd_readl(INTP0Mask0 + (0x10 * (USB_MSGINT / 0x20))),
- INTP0Mask0 + (0x10 * (USB_MSGINT / 0x20)));
-#endif
- /* Enable the packet FIFO, XDMA and XDMA arbiter */
- titan_writel(0x00ff18ff, CPRR);
-
- /*
- * Set up the PADMUX. Power down all ethernet slices,
- * they will be powered up and configured at device startup.
- */
- titan_writel(0x00878206, CPTC1R);
- titan_writel(0x00001100, CPTC0R); /* latch PADMUX, enable WCIMODE */
-
- /* Reset and enable the FIFO block */
- titan_writel(0x00000001, SDRXFCIE);
- titan_writel(0x00000001, SDTXFCIE);
- titan_writel(0x00000100, SDRXFCIE);
- titan_writel(0x00000000, SDTXFCIE);
-
- /*
- * Initialize the common interrupt shared by all components of
- * the GPI/Ethernet subsystem.
- */
- titan_writel((EXCITE_PHYS_OCD >> 12), CPCFG0);
- titan_writel(TITAN_MSGINT, CPCFG1);
-
- /*
- * XDMA configuration.
- * In order for the XDMA to be sharable among multiple drivers,
- * the setup must be done here in the platform. The reason is that
- * this setup can only be done while the XDMA is in reset. If this
- * were done in a driver, it would interrupt all other drivers
- * using the XDMA.
- */
- titan_writel(0x80021dff, GXCFG); /* XDMA reset */
- titan_writel(0x00000000, CPXCISRA);
- titan_writel(0x00000000, CPXCISRB); /* clear pending interrupts */
-#if defined(CONFIG_HIGHMEM)
-# error change for HIGHMEM support!
-#else
- titan_writel(0x00000000, GXDMADRPFX); /* buffer address prefix */
-#endif
- titan_writel(0, GXDMA_DESCADR);
-
- for (i = 0x5040; i <= 0x5300; i += 0x0040)
- titan_writel(0x80080000, i); /* reset channel */
-
- titan_writel((0x1 << 29) /* no sparse tx descr. */
- | (0x1 << 28) /* no sparse rx descr. */
- | (0x1 << 23) | (0x1 << 24) /* descriptor coherency */
- | (0x1 << 21) | (0x1 << 22) /* data coherency */
- | (0x1 << 17)
- | 0x1dff,
- GXCFG);
-
-#if defined(CONFIG_SMP)
-# error No SMP support
-#else
- /* All interrupts go to core #0 only. */
- titan_writel(0x1f007fff, CPDST0A);
- titan_writel(0x00000000, CPDST0B);
- titan_writel(0x0000ff3f, CPDST1A);
- titan_writel(0x00000000, CPDST1B);
- titan_writel(0x00ffffff, CPXDSTA);
- titan_writel(0x00000000, CPXDSTB);
-#endif
-
- /* Enable DUART interrupts, disable everything else. */
- titan_writel(0x04000000, CPGIG0ER);
- titan_writel(0x000000c0, CPGIG1ER);
-
- excite_procfs_init();
- return 0;
-}
-
-void __init plat_mem_setup(void)
-{
- volatile u32 * const boot_ocd_base = (u32 *) 0xbf7fc000;
-
- /* Announce RAM to system */
- add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
-
- /* Set up the peripheral address map */
- *(boot_ocd_base + (LKB9 / sizeof(u32))) = 0;
- *(boot_ocd_base + (LKB10 / sizeof(u32))) = 0;
- *(boot_ocd_base + (LKB11 / sizeof(u32))) = 0;
- *(boot_ocd_base + (LKB12 / sizeof(u32))) = 0;
- wmb();
- *(boot_ocd_base + (LKB0 / sizeof(u32))) = EXCITE_PHYS_OCD >> 4;
- wmb();
-
- ocd_writel((EXCITE_PHYS_TITAN >> 4) | 0x1UL, LKB5);
- ocd_writel(((EXCITE_SIZE_TITAN >> 4) & 0x7fffff00) - 0x100, LKM5);
- ocd_writel((EXCITE_PHYS_SCRAM >> 4) | 0x1UL, LKB13);
- ocd_writel(((EXCITE_SIZE_SCRAM >> 4) & 0xffffff00) - 0x100, LKM13);
-
- /* Local bus slot #0 */
- ocd_writel(0x00040510, LDP0);
- ocd_writel((EXCITE_PHYS_BOOTROM >> 4) | 0x1UL, LKB9);
- ocd_writel(((EXCITE_SIZE_BOOTROM >> 4) & 0x03ffff00) - 0x100, LKM9);
-
- /* Local bus slot #2 */
- ocd_writel(0x00000330, LDP2);
- ocd_writel((EXCITE_PHYS_FPGA >> 4) | 0x1, LKB11);
- ocd_writel(((EXCITE_SIZE_FPGA >> 4) - 0x100) & 0x03ffff00, LKM11);
-
- /* Local bus slot #3 */
- ocd_writel(0x00123413, LDP3);
- ocd_writel((EXCITE_PHYS_NAND >> 4) | 0x1, LKB12);
- ocd_writel(((EXCITE_SIZE_NAND >> 4) - 0x100) & 0x03ffff00, LKM12);
-}
-
-
-
-console_initcall(excite_init_console);
-arch_initcall(excite_platform_init);
-
-EXPORT_SYMBOL(titan_lock);
-EXPORT_SYMBOL(titan_irqflags);
-EXPORT_SYMBOL(titan_irq);
-EXPORT_SYMBOL(ocd_base);
-EXPORT_SYMBOL(titan_base);
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index fb284c3b2cf..c51405e5792 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -100,11 +100,11 @@ static __init void prom_init_console(void)
static __init void prom_init_cmdline(void)
{
- static char buf[CL_SIZE] __initdata;
+ static char buf[COMMAND_LINE_SIZE] __initdata;
/* Get the kernel command line from CFE */
- if (cfe_getenv("LINUX_CMDLINE", buf, CL_SIZE) >= 0) {
- buf[CL_SIZE-1] = 0;
+ if (cfe_getenv("LINUX_CMDLINE", buf, COMMAND_LINE_SIZE) >= 0) {
+ buf[COMMAND_LINE_SIZE - 1] = 0;
strcpy(arcs_cmdline, buf);
}
@@ -112,13 +112,13 @@ static __init void prom_init_cmdline(void)
* as CFE is not available anymore later in the boot process. */
if ((strstr(arcs_cmdline, "console=")) == NULL) {
/* Try to read the default serial port used by CFE */
- if ((cfe_getenv("BOOT_CONSOLE", buf, CL_SIZE) < 0)
+ if ((cfe_getenv("BOOT_CONSOLE", buf, COMMAND_LINE_SIZE) < 0)
|| (strncmp("uart", buf, 4)))
/* Default to uart0 */
strcpy(buf, "uart0");
/* Compute the new command line */
- snprintf(arcs_cmdline, CL_SIZE, "%s console=ttyS%c,115200",
+ snprintf(arcs_cmdline, COMMAND_LINE_SIZE, "%s console=ttyS%c,115200",
arcs_cmdline, buf[4]);
}
}
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2a209d74f0b..094bc84765a 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -25,7 +25,7 @@ strip-flags = $(addprefix --remove-section=,$(drop-sections))
VMLINUX = vmlinux
-all: vmlinux.ecoff vmlinux.srec addinitrd
+all: vmlinux.ecoff vmlinux.srec
vmlinux.ecoff: $(obj)/elf2ecoff $(VMLINUX)
$(obj)/elf2ecoff $(VMLINUX) vmlinux.ecoff $(E2EFLAGS)
@@ -39,11 +39,7 @@ vmlinux.bin: $(VMLINUX)
vmlinux.srec: $(VMLINUX)
$(OBJCOPY) -S -O srec $(strip-flags) $(VMLINUX) $(obj)/vmlinux.srec
-$(obj)/addinitrd: $(obj)/addinitrd.c
- $(HOSTCC) -o $@ $^
-
-clean-files += addinitrd \
- elf2ecoff \
+clean-files += elf2ecoff \
vmlinux.bin \
vmlinux.ecoff \
vmlinux.srec
diff --git a/arch/mips/boot/addinitrd.c b/arch/mips/boot/addinitrd.c
deleted file mode 100644
index b5b3febc10c..00000000000
--- a/arch/mips/boot/addinitrd.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * addinitrd - program to add a initrd image to an ecoff kernel
- *
- * (C) 1999 Thomas Bogendoerfer
- * minor modifications, cleanup: Guido Guenther <agx@sigxcpu.org>
- * further cleanup: Maciej W. Rozycki
- */
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <netinet/in.h>
-
-#include "ecoff.h"
-
-#define MIPS_PAGE_SIZE 4096
-#define MIPS_PAGE_MASK (MIPS_PAGE_SIZE-1)
-
-#define swab16(x) \
- ((unsigned short)( \
- (((unsigned short)(x) & (unsigned short)0x00ffU) << 8) | \
- (((unsigned short)(x) & (unsigned short)0xff00U) >> 8) ))
-
-#define swab32(x) \
- ((unsigned int)( \
- (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
- (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \
- (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \
- (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) ))
-
-#define SWAB(a) (swab ? swab32(a) : (a))
-
-void die(char *s)
-{
- perror(s);
- exit(1);
-}
-
-int main(int argc, char *argv[])
-{
- int fd_vmlinux, fd_initrd, fd_outfile;
- FILHDR efile;
- AOUTHDR eaout;
- SCNHDR esecs[3];
- struct stat st;
- char buf[1024];
- unsigned long loadaddr;
- unsigned long initrd_header[2];
- int i, cnt;
- int swab = 0;
-
- if (argc != 4) {
- printf("Usage: %s <vmlinux> <initrd> <outfile>\n", argv[0]);
- exit(1);
- }
-
- if ((fd_vmlinux = open (argv[1], O_RDONLY)) < 0)
- die("open vmlinux");
- if (read (fd_vmlinux, &efile, sizeof efile) != sizeof efile)
- die("read file header");
- if (read (fd_vmlinux, &eaout, sizeof eaout) != sizeof eaout)
- die("read aout header");
- if (read (fd_vmlinux, esecs, sizeof esecs) != sizeof esecs)
- die("read section headers");
- /*
- * check whether the file is good for us
- */
- /* TBD */
-
- /*
- * check, if we have to swab words
- */
- if (ntohs(0xaa55) == 0xaa55) {
- if (efile.f_magic == swab16(MIPSELMAGIC))
- swab = 1;
- } else {
- if (efile.f_magic == swab16(MIPSEBMAGIC))
- swab = 1;
- }
-
- /* make sure we have an empty data segment for the initrd */
- if (eaout.dsize || esecs[1].s_size) {
- fprintf(stderr, "Data segment not empty. Giving up!\n");
- exit(1);
- }
- if ((fd_initrd = open (argv[2], O_RDONLY)) < 0)
- die("open initrd");
- if (fstat (fd_initrd, &st) < 0)
- die("fstat initrd");
- loadaddr = ((SWAB(esecs[2].s_vaddr) + SWAB(esecs[2].s_size)
- + MIPS_PAGE_SIZE-1) & ~MIPS_PAGE_MASK) - 8;
- if (loadaddr < (SWAB(esecs[2].s_vaddr) + SWAB(esecs[2].s_size)))
- loadaddr += MIPS_PAGE_SIZE;
- initrd_header[0] = SWAB(0x494E5244);
- initrd_header[1] = SWAB(st.st_size);
- eaout.dsize = esecs[1].s_size = initrd_header[1] = SWAB(st.st_size+8);
- eaout.data_start = esecs[1].s_vaddr = esecs[1].s_paddr = SWAB(loadaddr);
-
- if ((fd_outfile = open (argv[3], O_RDWR|O_CREAT|O_TRUNC, 0666)) < 0)
- die("open outfile");
- if (write (fd_outfile, &efile, sizeof efile) != sizeof efile)
- die("write file header");
- if (write (fd_outfile, &eaout, sizeof eaout) != sizeof eaout)
- die("write aout header");
- if (write (fd_outfile, esecs, sizeof esecs) != sizeof esecs)
- die("write section headers");
- /* skip padding */
- if(lseek(fd_vmlinux, SWAB(esecs[0].s_scnptr), SEEK_SET) == (off_t)-1)
- die("lseek vmlinux");
- if(lseek(fd_outfile, SWAB(esecs[0].s_scnptr), SEEK_SET) == (off_t)-1)
- die("lseek outfile");
- /* copy text segment */
- cnt = SWAB(eaout.tsize);
- while (cnt) {
- if ((i = read (fd_vmlinux, buf, sizeof buf)) <= 0)
- die("read vmlinux");
- if (write (fd_outfile, buf, i) != i)
- die("write vmlinux");
- cnt -= i;
- }
- if (write (fd_outfile, initrd_header, sizeof initrd_header) != sizeof initrd_header)
- die("write initrd header");
- while ((i = read (fd_initrd, buf, sizeof buf)) > 0)
- if (write (fd_outfile, buf, i) != i)
- die("write initrd");
- close(fd_vmlinux);
- close(fd_initrd);
- return 0;
-}
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
new file mode 100644
index 00000000000..e27f40bbd4e
--- /dev/null
+++ b/arch/mips/boot/compressed/Makefile
@@ -0,0 +1,100 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.
+#
+# Adapted for MIPS Pete Popov, Dan Malek
+#
+# Copyright (C) 1994 by Linus Torvalds
+# Adapted for PowerPC by Gary Thomas
+# modified by Cort (cort@cs.nmt.edu)
+#
+# Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University
+# Author: Wu Zhangjin <wuzj@lemote.com>
+#
+
+# compressed kernel load addr: VMLINUZ_LOAD_ADDRESS > VMLINUX_LOAD_ADDRESS + VMLINUX_SIZE
+VMLINUX_SIZE := $(shell wc -c $(objtree)/$(KBUILD_IMAGE) 2>/dev/null | cut -d' ' -f1)
+VMLINUX_SIZE := $(shell [ -n "$(VMLINUX_SIZE)" ] && echo $$(($(VMLINUX_SIZE) + (65536 - $(VMLINUX_SIZE) % 65536))))
+VMLINUZ_LOAD_ADDRESS := 0x$(shell [ -n "$(VMLINUX_SIZE)" ] && printf %x $$(($(VMLINUX_LOAD_ADDRESS) + $(VMLINUX_SIZE))))
+
+# set the default size of the mallocing area for decompressing
+BOOT_HEAP_SIZE := 0x400000
+
+# Disable Function Tracer
+KBUILD_CFLAGS := $(shell echo $(KBUILD_CFLAGS) | sed -e "s/-pg//")
+
+KBUILD_CFLAGS := $(LINUXINCLUDE) $(KBUILD_CFLAGS) -D__KERNEL__ \
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull" \
+
+KBUILD_AFLAGS := $(LINUXINCLUDE) $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
+ -DKERNEL_ENTRY=0x$(shell $(NM) $(objtree)/$(KBUILD_IMAGE) 2>/dev/null | grep " kernel_entry" | cut -f1 -d \ ) \
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE)
+
+obj-y := $(obj)/head.o $(obj)/decompress.o $(obj)/dbg.o
+
+obj-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
+
+OBJCOPYFLAGS_vmlinux.bin := $(OBJCOPYFLAGS) -O binary -R .comment -S
+$(obj)/vmlinux.bin: $(KBUILD_IMAGE)
+ $(call if_changed,objcopy)
+
+suffix_$(CONFIG_KERNEL_GZIP) = gz
+suffix_$(CONFIG_KERNEL_BZIP2) = bz2
+suffix_$(CONFIG_KERNEL_LZMA) = lzma
+tool_$(CONFIG_KERNEL_GZIP) = gzip
+tool_$(CONFIG_KERNEL_BZIP2) = bzip2
+tool_$(CONFIG_KERNEL_LZMA) = lzma
+$(obj)/vmlinux.$(suffix_y): $(obj)/vmlinux.bin
+ $(call if_changed,$(tool_y))
+
+$(obj)/piggy.o: $(obj)/vmlinux.$(suffix_y) $(obj)/dummy.o
+ $(Q)$(OBJCOPY) $(OBJCOPYFLAGS) \
+ --add-section=.image=$< \
+ --set-section-flags=.image=contents,alloc,load,readonly,data \
+ $(obj)/dummy.o $@
+
+LDFLAGS_vmlinuz := $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T
+vmlinuz: $(src)/ld.script $(obj-y) $(obj)/piggy.o
+ $(call if_changed,ld)
+ $(Q)$(OBJCOPY) $(OBJCOPYFLAGS) -R .comment -R .stab -R .stabstr -R .initrd -R .sysmap $@
+
+#
+# Some DECstations need all possible sections of an ECOFF executable
+#
+ifdef CONFIG_MACH_DECSTATION
+ E2EFLAGS = -a
+else
+ E2EFLAGS =
+endif
+
+# elf2ecoff can only handle 32bit image
+
+ifdef CONFIG_32BIT
+ VMLINUZ = vmlinuz
+else
+ VMLINUZ = vmlinuz.32
+endif
+
+vmlinuz.32: vmlinuz
+ $(Q)$(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
+
+vmlinuz.ecoff: $(obj)/../elf2ecoff $(VMLINUZ)
+ $(Q)$(obj)/../elf2ecoff $(VMLINUZ) vmlinuz.ecoff $(E2EFLAGS)
+
+$(obj)/../elf2ecoff: $(src)/../elf2ecoff.c
+ $(Q)$(HOSTCC) -o $@ $^
+
+drop-sections = .reginfo .mdebug .comment .note .pdr .options .MIPS.options
+strip-flags = $(addprefix --remove-section=,$(drop-sections))
+
+OBJCOPYFLAGS_vmlinuz.bin := $(OBJCOPYFLAGS) -O binary $(strip-flags)
+vmlinuz.bin: vmlinuz
+ $(call if_changed,objcopy)
+
+OBJCOPYFLAGS_vmlinuz.srec := $(OBJCOPYFLAGS) -S -O srec $(strip-flags)
+vmlinuz.srec: vmlinuz
+ $(call if_changed,objcopy)
+
+clean:
+clean-files += *.o \
+ vmlinu*
diff --git a/arch/mips/boot/compressed/dbg.c b/arch/mips/boot/compressed/dbg.c
new file mode 100644
index 00000000000..ff4dc7a33a9
--- /dev/null
+++ b/arch/mips/boot/compressed/dbg.c
@@ -0,0 +1,37 @@
+/*
+ * MIPS-specific debug support for pre-boot environment
+ *
+ * NOTE: putc() is board specific, if your board have a 16550 compatible uart,
+ * please select SYS_SUPPORTS_ZBOOT_UART16550 for your machine. othewise, you
+ * need to implement your own putc().
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+void __attribute__ ((weak)) putc(char c)
+{
+}
+
+void puts(const char *s)
+{
+ char c;
+ while ((c = *s++) != '\0') {
+ putc(c);
+ if (c == '\n')
+ putc('\r');
+ }
+}
+
+void puthex(unsigned long long val)
+{
+
+ unsigned char buf[10];
+ int i;
+ for (i = 7; i >= 0; i--) {
+ buf[i] = "0123456789ABCDEF"[val & 0x0F];
+ val >>= 4;
+ }
+ buf[8] = '\0';
+ puts(buf);
+}
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
new file mode 100644
index 00000000000..67330c2f731
--- /dev/null
+++ b/arch/mips/boot/compressed/decompress.c
@@ -0,0 +1,126 @@
+/*
+ * Misc. bootloader code for many machines.
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: Matt Porter <mporter@mvista.com> Derived from
+ * arch/ppc/boot/prep/misc.c
+ *
+ * Copyright (C) 2009 Lemote, Inc. & Institute of Computing Technology
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include <asm/addrspace.h>
+
+/* These two variables specify the free mem region
+ * that can be used for temporary malloc area
+ */
+unsigned long free_mem_ptr;
+unsigned long free_mem_end_ptr;
+char *zimage_start;
+
+/* The linker tells us where the image is. */
+extern unsigned char __image_begin, __image_end;
+extern unsigned char __ramdisk_begin, __ramdisk_end;
+unsigned long initrd_size;
+
+/* debug interfaces */
+extern void puts(const char *s);
+extern void puthex(unsigned long long val);
+
+void error(char *x)
+{
+ puts("\n\n");
+ puts(x);
+ puts("\n\n -- System halted");
+
+ while (1)
+ ; /* Halt */
+}
+
+/* activate the code for pre-boot environment */
+#define STATIC static
+
+#ifdef CONFIG_KERNEL_GZIP
+void *memcpy(void *dest, const void *src, size_t n)
+{
+ int i;
+ const char *s = src;
+ char *d = dest;
+
+ for (i = 0; i < n; i++)
+ d[i] = s[i];
+ return dest;
+}
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+void *memset(void *s, int c, size_t n)
+{
+ int i;
+ char *ss = s;
+
+ for (i = 0; i < n; i++)
+ ss[i] = c;
+ return s;
+}
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+void decompress_kernel(unsigned long boot_heap_start)
+{
+ int zimage_size;
+
+ /*
+ * We link ourself to an arbitrary low address. When we run, we
+ * relocate outself to that address. __image_beign points to
+ * the part of the image where the zImage is. -- Tom
+ */
+ zimage_start = (char *)(unsigned long)(&__image_begin);
+ zimage_size = (unsigned long)(&__image_end) -
+ (unsigned long)(&__image_begin);
+
+ /*
+ * The zImage and initrd will be between start and _end, so they've
+ * already been moved once. We're good to go now. -- Tom
+ */
+ puts("zimage at: ");
+ puthex((unsigned long)zimage_start);
+ puts(" ");
+ puthex((unsigned long)(zimage_size + zimage_start));
+ puts("\n");
+
+ if (initrd_size) {
+ puts("initrd at: ");
+ puthex((unsigned long)(&__ramdisk_begin));
+ puts(" ");
+ puthex((unsigned long)(&__ramdisk_end));
+ puts("\n");
+ }
+
+ /* this area are prepared for mallocing when decompressing */
+ free_mem_ptr = boot_heap_start;
+ free_mem_end_ptr = boot_heap_start + BOOT_HEAP_SIZE;
+
+ /* Display standard Linux/MIPS boot prompt for kernel args */
+ puts("Uncompressing Linux at load address ");
+ puthex(VMLINUX_LOAD_ADDRESS_ULL);
+ puts("\n");
+ /* Decompress the kernel with according algorithm */
+ decompress(zimage_start, zimage_size, 0, 0,
+ (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error);
+ /* FIXME: is there a need to flush cache here? */
+ puts("Now, booting the kernel...\n");
+}
diff --git a/arch/mips/boot/compressed/dummy.c b/arch/mips/boot/compressed/dummy.c
new file mode 100644
index 00000000000..31dbf45bf99
--- /dev/null
+++ b/arch/mips/boot/compressed/dummy.c
@@ -0,0 +1,4 @@
+int main(void)
+{
+ return 0;
+}
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S
new file mode 100644
index 00000000000..4e65a8420be
--- /dev/null
+++ b/arch/mips/boot/compressed/head.S
@@ -0,0 +1,56 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995 Waldorf Electronics
+ * Written by Ralf Baechle and Andreas Busse
+ * Copyright (C) 1995 - 1999 Ralf Baechle
+ * Copyright (C) 1996 Paul M. Antoine
+ * Modified for DECStation and hence R3000 support by Paul M. Antoine
+ * Further modifications by David S. Miller and Harald Koerfgen
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+ .cprestore
+ LEAF(start)
+start:
+ /* Save boot rom start args */
+ move s0, a0
+ move s1, a1
+ move s2, a2
+ move s3, a3
+
+ /* Clear BSS */
+ PTR_LA a0, _edata
+ PTR_LA a2, _end
+1: sw zero, 0(a0)
+ bne a2, a0, 1b
+ addiu a0, a0, 4
+
+ PTR_LA a0, (.heap) /* heap address */
+ PTR_LA sp, (.stack + 8192) /* stack address */
+
+ PTR_LA ra, 2f
+ PTR_LA k0, decompress_kernel
+ jr k0
+ nop
+2:
+ move a0, s0
+ move a1, s1
+ move a2, s2
+ move a3, s3
+ PTR_LI k0, KERNEL_ENTRY
+ jr k0
+ nop
+3:
+ b 3b
+ nop
+ END(start)
+
+ .comm .heap,BOOT_HEAP_SIZE,4
+ .comm .stack,4096*2,4
diff --git a/arch/mips/boot/compressed/ld.script b/arch/mips/boot/compressed/ld.script
new file mode 100644
index 00000000000..29e9f4c0d5d
--- /dev/null
+++ b/arch/mips/boot/compressed/ld.script
@@ -0,0 +1,150 @@
+OUTPUT_ARCH(mips)
+ENTRY(start)
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ .init : { *(.init) } =0
+ .text :
+ {
+ _ftext = . ;
+ *(.text)
+ *(.rodata)
+ *(.rodata1)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ } =0
+ .kstrtab : { *(.kstrtab) }
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ __start___dbe_table = .; /* Exception table for data bus errors */
+ __dbe_table : { *(__dbe_table) }
+ __stop___dbe_table = .;
+
+ __start___ksymtab = .; /* Kernel symbol table */
+ __ksymtab : { *(__ksymtab) }
+ __stop___ksymtab = .;
+
+ _etext = .;
+
+ . = ALIGN(8192);
+ .data.init_task : { *(.data.init_task) }
+
+ /* Startup code */
+ . = ALIGN(4096);
+ __init_begin = .;
+ .text.init : { *(.text.init) }
+ .data.init : { *(.data.init) }
+ . = ALIGN(16);
+ __setup_start = .;
+ .setup.init : { *(.setup.init) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : { *(.initcall.init) }
+ __initcall_end = .;
+ . = ALIGN(4096); /* Align double page for init_task_union */
+ __init_end = .;
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.idt) }
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ .fini : { *(.fini) } =0
+ .reginfo : { *(.reginfo) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. It would
+ be more correct to do this:
+ . = .;
+ The current expression does not correctly handle the case of a
+ text segment ending precisely at the end of a page; it causes the
+ data segment to skip a page. The above expression does not have
+ this problem, but it will currently (2/95) cause BFD to allocate
+ a single segment, combining both text and data, for this case.
+ This will prevent the text segment from being shared among
+ multiple executions of the program; I think that is more
+ important than losing a page of the virtual address space (note
+ that no actual memory is lost; the page which is skipped can not
+ be referenced). */
+ . = .;
+ .data :
+ {
+ _fdata = . ;
+ *(.data)
+
+ /* Put the compressed image here, so bss is on the end. */
+ __image_begin = .;
+ *(.image)
+ __image_end = .;
+ /* Align the initial ramdisk image (INITRD) on page boundaries. */
+ . = ALIGN(4096);
+ __ramdisk_begin = .;
+ *(.initrd)
+ __ramdisk_end = .;
+ . = ALIGN(4096);
+
+ CONSTRUCTORS
+ }
+ .data1 : { *(.data1) }
+ _gp = . + 0x8000;
+ .lit8 : { *(.lit8) }
+ .lit4 : { *(.lit4) }
+ .ctors : { *(.ctors) }
+ .dtors : { *(.dtors) }
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata : { *(.sdata) }
+ . = ALIGN(4);
+ _edata = .;
+ PROVIDE (edata = .);
+
+ __bss_start = .;
+ _fbss = .;
+ .sbss : { *(.sbss) *(.scommon) }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ . = ALIGN(4);
+ _end = . ;
+ PROVIDE (end = .);
+ }
+
+ /* Sections to be discarded */
+ /DISCARD/ :
+ {
+ *(.text.exit)
+ *(.data.exit)
+ *(.exitcall.exit)
+ }
+
+ /* This is the MIPS specific mdebug section. */
+ .mdebug : { *(.mdebug) }
+ /* These are needed for ELF backends which have not yet been
+ converted to the new style linker. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ /* DWARF debug sections.
+ Symbols in the .debug DWARF section are relative to the beginning of the
+ section so we begin .debug at 0. It's not clear yet what needs to happen
+ for the others. */
+ .debug 0 : { *(.debug) }
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ .line 0 : { *(.line) }
+ /* These must appear regardless of . */
+ .gptab.sdata : { *(.gptab.data) *(.gptab.sdata) }
+ .gptab.sbss : { *(.gptab.bss) *(.gptab.sbss) }
+ .comment : { *(.comment) }
+ .note : { *(.note) }
+}
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
new file mode 100644
index 00000000000..c9caaf4fbf6
--- /dev/null
+++ b/arch/mips/boot/compressed/uart-16550.c
@@ -0,0 +1,43 @@
+/*
+ * 16550 compatible uart based serial debug support for zboot
+ */
+
+#include <linux/types.h>
+#include <linux/serial_reg.h>
+#include <linux/init.h>
+
+#include <asm/addrspace.h>
+
+#if defined(CONFIG_MACH_LOONGSON) || defined(CONFIG_MIPS_MALTA)
+#define UART_BASE 0x1fd003f8
+#define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset))
+#endif
+
+#ifdef CONFIG_AR7
+#include <ar7.h>
+#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
+#endif
+
+#ifndef PORT
+#error please define the serial port address for your own machine
+#endif
+
+static inline unsigned int serial_in(int offset)
+{
+ return *((char *)PORT(offset));
+}
+
+static inline void serial_out(int offset, int value)
+{
+ *((char *)PORT(offset)) = value;
+}
+
+void putc(char c)
+{
+ int timeout = 1024;
+
+ while (((serial_in(UART_LSR) & UART_LSR_THRE) == 0) && (timeout-- > 0))
+ ;
+
+ serial_out(UART_TX, c);
+}
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
index 13943628052..3e9876317e6 100644
--- a/arch/mips/cavium-octeon/Makefile
+++ b/arch/mips/cavium-octeon/Makefile
@@ -9,7 +9,7 @@
# Copyright (C) 2005-2009 Cavium Networks
#
-obj-y := setup.o serial.o octeon-platform.o octeon-irq.o csrc-octeon.o
+obj-y := cpu.o setup.o serial.o octeon-platform.o octeon-irq.o csrc-octeon.o
obj-y += dma-octeon.o flash_setup.o
obj-y += octeon-memcpy.o
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c
new file mode 100644
index 00000000000..b6df5387e85
--- /dev/null
+++ b/arch/mips/cavium-octeon/cpu.c
@@ -0,0 +1,52 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/notifier.h>
+#include <linux/prefetch.h>
+#include <linux/sched.h>
+
+#include <asm/cop2.h>
+#include <asm/current.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/octeon/octeon.h>
+
+static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ unsigned long flags;
+ unsigned int status;
+
+ switch (action) {
+ case CU2_EXCEPTION:
+ prefetch(&current->thread.cp2);
+ local_irq_save(flags);
+ KSTK_STATUS(current) |= ST0_CU2;
+ status = read_c0_status();
+ write_c0_status(status | ST0_CU2);
+ octeon_cop2_restore(&(current->thread.cp2));
+ write_c0_status(status & ~ST0_CU2);
+ local_irq_restore(flags);
+
+ return NOTIFY_BAD; /* Don't call default notifier */
+ }
+
+ return NOTIFY_OK; /* Let default notifier send signals */
+}
+
+static struct notifier_block cnmips_cu2_notifier = {
+ .notifier_call = cnmips_cu2_call,
+};
+
+static int cnmips_cu2_setup(void)
+{
+ return register_cu2_notifier(&cnmips_cu2_notifier);
+}
+early_initcall(cnmips_cu2_setup);
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index be711dd2d91..cfdb4c2ac5c 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -159,6 +159,94 @@ out:
}
device_initcall(octeon_rng_device_init);
+/* Octeon SMI/MDIO interface. */
+static int __init octeon_mdiobus_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
+
+ if (octeon_is_simulation())
+ return 0; /* No mdio in the simulator. */
+
+ /* The bus number is the platform_device id. */
+ pd = platform_device_alloc("mdio-octeon", 0);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+
+out:
+ return ret;
+
+}
+device_initcall(octeon_mdiobus_device_init);
+
+/* Octeon mgmt port Ethernet interface. */
+static int __init octeon_mgmt_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
+ int port, num_ports;
+
+ struct resource mgmt_port_resource = {
+ .flags = IORESOURCE_IRQ,
+ .start = -1,
+ .end = -1
+ };
+
+ if (!OCTEON_IS_MODEL(OCTEON_CN56XX) && !OCTEON_IS_MODEL(OCTEON_CN52XX))
+ return 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX))
+ num_ports = 1;
+ else
+ num_ports = 2;
+
+ for (port = 0; port < num_ports; port++) {
+ pd = platform_device_alloc("octeon_mgmt", port);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ switch (port) {
+ case 0:
+ mgmt_port_resource.start = OCTEON_IRQ_MII0;
+ break;
+ case 1:
+ mgmt_port_resource.start = OCTEON_IRQ_MII1;
+ break;
+ default:
+ BUG();
+ }
+ mgmt_port_resource.end = mgmt_port_resource.start;
+
+ ret = platform_device_add_resources(pd, &mgmt_port_resource, 1);
+
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+ }
+ return ret;
+fail:
+ platform_device_put(pd);
+
+out:
+ return ret;
+
+}
+device_initcall(octeon_mgmt_device_init);
+
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Platform driver for Octeon SOC");
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 35648302f7c..5a5b6ba7514 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -10,7 +10,6 @@ CONFIG_MIPS=y
#
# CONFIG_MACH_ALCHEMY is not set
CONFIG_AR7=y
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -265,7 +264,6 @@ CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
-CONFIG_PROBE_INITRD_HEADER=y
# CONFIG_FREEZER is not set
#
@@ -1053,7 +1051,9 @@ CONFIG_TRACING_SUPPORT=y
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 94b7d57f906..267bd46120b 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
CONFIG_BCM47XX=y
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -1853,7 +1852,7 @@ CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SAMPLES is not set
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index ea00c18d1f7..7fee0273c82 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
CONFIG_BCM63XX=y
# CONFIG_MIPS_COBALT is not set
@@ -942,7 +941,9 @@ CONFIG_TRACING_SUPPORT=y
# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 13d9eb4736c..c2f06e38c85 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -1237,7 +1236,7 @@ CONFIG_DEBUG_MUTEXES=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_SAMPLES is not set
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_SB1XXX_CORELIS is not set
# CONFIG_RUNTIME_DEBUG is not set
diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig
index 185df23fd46..72b7e456916 100644
--- a/arch/mips/configs/capcella_defconfig
+++ b/arch/mips/configs/capcella_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -783,7 +782,9 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="mem=32M console=ttyVR0,38400"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/cavium-octeon_defconfig b/arch/mips/configs/cavium-octeon_defconfig
index 7afaa28a376..c8507bc8e92 100644
--- a/arch/mips/configs/cavium-octeon_defconfig
+++ b/arch/mips/configs/cavium-octeon_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -269,7 +268,6 @@ CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_CLASSIC_RCU=y
-# CONFIG_PROBE_INITRD_HEADER is not set
# CONFIG_FREEZER is not set
#
@@ -822,7 +820,7 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_RUNTIME_DEBUG is not set
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index 6c8cca8589b..49e61312e00 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
CONFIG_MIPS_COBALT=y
# CONFIG_MACH_DECSTATION is not set
@@ -1126,7 +1125,7 @@ CONFIG_FRAME_WARN=1024
# CONFIG_SLUB_STATS is not set
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_SAMPLES is not set
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig
index dbdf3bb1a34..68e90cd6b2d 100644
--- a/arch/mips/configs/db1000_defconfig
+++ b/arch/mips/configs/db1000_defconfig
@@ -23,7 +23,6 @@ CONFIG_MIPS_DB1000=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1090,7 +1089,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/db1100_defconfig b/arch/mips/configs/db1100_defconfig
index fa681447589..90812830e94 100644
--- a/arch/mips/configs/db1100_defconfig
+++ b/arch/mips/configs/db1100_defconfig
@@ -23,7 +23,6 @@ CONFIG_MIPS_DB1100=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1090,7 +1089,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/db1200_defconfig b/arch/mips/configs/db1200_defconfig
index d73f1de43b5..dabf03032e0 100644
--- a/arch/mips/configs/db1200_defconfig
+++ b/arch/mips/configs/db1200_defconfig
@@ -23,7 +23,6 @@ CONFIG_MACH_ALCHEMY=y
# CONFIG_MIPS_DB1550 is not set
CONFIG_MIPS_DB1200=y
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1172,7 +1171,9 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="mem=48M"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/db1500_defconfig b/arch/mips/configs/db1500_defconfig
index ec3e028a5b2..a1513137313 100644
--- a/arch/mips/configs/db1500_defconfig
+++ b/arch/mips/configs/db1500_defconfig
@@ -23,7 +23,6 @@ CONFIG_MIPS_DB1500=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1390,7 +1389,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/db1550_defconfig b/arch/mips/configs/db1550_defconfig
index 7631dae51be..6b64339c001 100644
--- a/arch/mips/configs/db1550_defconfig
+++ b/arch/mips/configs/db1550_defconfig
@@ -23,7 +23,6 @@ CONFIG_MACH_ALCHEMY=y
CONFIG_MIPS_DB1550=y
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1207,7 +1206,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 9e65e6a2dcb..cbb4d86f291 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
CONFIG_MACH_DECSTATION=y
# CONFIG_MACH_JAZZ is not set
@@ -882,7 +881,7 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig
index 1bd84d42b14..52968c46c80 100644
--- a/arch/mips/configs/e55_defconfig
+++ b/arch/mips/configs/e55_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -561,7 +560,9 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyVR0,19200 ide0=0x1f0,0x3f6,40 mem=8M"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/excite_defconfig b/arch/mips/configs/excite_defconfig
deleted file mode 100644
index 1995d43a2ed..00000000000
--- a/arch/mips/configs/excite_defconfig
+++ /dev/null
@@ -1,1335 +0,0 @@
-#
-# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.20
-# Tue Feb 20 21:47:31 2007
-#
-CONFIG_MIPS=y
-
-#
-# Machine selection
-#
-CONFIG_ZONE_DMA=y
-# CONFIG_MIPS_MTX1 is not set
-# CONFIG_MIPS_BOSPORUS is not set
-# CONFIG_MIPS_PB1000 is not set
-# CONFIG_MIPS_PB1100 is not set
-# CONFIG_MIPS_PB1500 is not set
-# CONFIG_MIPS_PB1550 is not set
-# CONFIG_MIPS_PB1200 is not set
-# CONFIG_MIPS_DB1000 is not set
-# CONFIG_MIPS_DB1100 is not set
-# CONFIG_MIPS_DB1500 is not set
-# CONFIG_MIPS_DB1550 is not set
-# CONFIG_MIPS_DB1200 is not set
-# CONFIG_MIPS_MIRAGE is not set
-CONFIG_BASLER_EXCITE=y
-# CONFIG_BASLER_EXCITE_PROTOTYPE is not set
-# CONFIG_MIPS_COBALT is not set
-# CONFIG_MACH_DECSTATION is not set
-# CONFIG_MACH_JAZZ is not set
-# CONFIG_MIPS_MALTA is not set
-# CONFIG_WR_PPMC is not set
-# CONFIG_MIPS_SIM is not set
-# CONFIG_MOMENCO_JAGUAR_ATX is not set
-# CONFIG_MIPS_XXS1500 is not set
-# CONFIG_PNX8550_JBS is not set
-# CONFIG_PNX8550_STB810 is not set
-# CONFIG_MACH_VR41XX is not set
-# CONFIG_PMC_YOSEMITE is not set
-# CONFIG_MARKEINS is not set
-# CONFIG_SGI_IP22 is not set
-# CONFIG_SGI_IP27 is not set
-# CONFIG_SGI_IP32 is not set
-# CONFIG_SIBYTE_BIGSUR is not set
-# CONFIG_SIBYTE_SWARM is not set
-# CONFIG_SIBYTE_SENTOSA is not set
-# CONFIG_SIBYTE_RHONE is not set
-# CONFIG_SIBYTE_CARMEL is not set
-# CONFIG_SIBYTE_LITTLESUR is not set
-# CONFIG_SIBYTE_CRHINE is not set
-# CONFIG_SIBYTE_CRHONE is not set
-# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
-CONFIG_GENERIC_FIND_NEXT_BIT=y
-CONFIG_GENERIC_HWEIGHT=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_GENERIC_TIME=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
-CONFIG_DMA_COHERENT=y
-CONFIG_CPU_BIG_ENDIAN=y
-# CONFIG_CPU_LITTLE_ENDIAN is not set
-CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
-CONFIG_IRQ_CPU=y
-CONFIG_IRQ_CPU_RM7K=y
-CONFIG_IRQ_CPU_RM9K=y
-CONFIG_MIPS_RM9122=y
-CONFIG_SERIAL_RM9000=y
-CONFIG_GPI_RM9000=y
-CONFIG_WDT_RM9000=y
-CONFIG_MIPS_L1_CACHE_SHIFT=5
-
-#
-# CPU selection
-#
-# CONFIG_CPU_MIPS32_R1 is not set
-# CONFIG_CPU_MIPS32_R2 is not set
-# CONFIG_CPU_MIPS64_R1 is not set
-# CONFIG_CPU_MIPS64_R2 is not set
-# CONFIG_CPU_R3000 is not set
-# CONFIG_CPU_TX39XX is not set
-# CONFIG_CPU_VR41XX is not set
-# CONFIG_CPU_R4300 is not set
-# CONFIG_CPU_R4X00 is not set
-# CONFIG_CPU_TX49XX is not set
-# CONFIG_CPU_R5000 is not set
-# CONFIG_CPU_R5432 is not set
-# CONFIG_CPU_R6000 is not set
-# CONFIG_CPU_NEVADA is not set
-# CONFIG_CPU_R8000 is not set
-# CONFIG_CPU_R10000 is not set
-# CONFIG_CPU_RM7000 is not set
-CONFIG_CPU_RM9000=y
-# CONFIG_CPU_SB1 is not set
-CONFIG_SYS_HAS_CPU_RM9000=y
-CONFIG_WEAK_ORDERING=y
-CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
-CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
-CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
-CONFIG_CPU_SUPPORTS_64BIT_KERNEL=y
-
-#
-# Kernel type
-#
-CONFIG_32BIT=y
-# CONFIG_64BIT is not set
-CONFIG_PAGE_SIZE_4KB=y
-# CONFIG_PAGE_SIZE_8KB is not set
-# CONFIG_PAGE_SIZE_16KB is not set
-# CONFIG_PAGE_SIZE_64KB is not set
-CONFIG_CPU_HAS_PREFETCH=y
-CONFIG_MIPS_MT_DISABLED=y
-# CONFIG_MIPS_MT_SMP is not set
-# CONFIG_MIPS_MT_SMTC is not set
-# CONFIG_MIPS_VPE_LOADER is not set
-# CONFIG_64BIT_PHYS_ADDR is not set
-CONFIG_CPU_HAS_SYNC=y
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_GENERIC_IRQ_PROBE=y
-CONFIG_CPU_SUPPORTS_HIGHMEM=y
-CONFIG_ARCH_FLATMEM_ENABLE=y
-CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
-# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
-# CONFIG_HZ_48 is not set
-# CONFIG_HZ_100 is not set
-# CONFIG_HZ_128 is not set
-# CONFIG_HZ_250 is not set
-# CONFIG_HZ_256 is not set
-CONFIG_HZ_1000=y
-# CONFIG_HZ_1024 is not set
-CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
-CONFIG_HZ=1000
-# CONFIG_PREEMPT_NONE is not set
-# CONFIG_PREEMPT_VOLUNTARY is not set
-CONFIG_PREEMPT=y
-CONFIG_PREEMPT_BKL=y
-# CONFIG_KEXEC is not set
-CONFIG_LOCKDEP_SUPPORT=y
-CONFIG_STACKTRACE_SUPPORT=y
-CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
-
-#
-# Code maturity level options
-#
-CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
-CONFIG_LOCK_KERNEL=y
-CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
-CONFIG_LOCALVERSION=""
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
-CONFIG_SYSVIPC_SYSCTL=y
-CONFIG_POSIX_MQUEUE=y
-# CONFIG_BSD_PROCESS_ACCT is not set
-# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
-# CONFIG_AUDIT is not set
-# CONFIG_IKCONFIG is not set
-CONFIG_SYSFS_DEPRECATED=y
-# CONFIG_RELAY is not set
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL=y
-CONFIG_EMBEDDED=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
-CONFIG_HOTPLUG=y
-CONFIG_PRINTK=y
-CONFIG_BUG=y
-CONFIG_ELF_CORE=y
-CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_SHMEM=y
-CONFIG_SLAB=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
-CONFIG_BASE_SMALL=0
-# CONFIG_SLOB is not set
-
-#
-# Loadable module support
-#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
-
-#
-# Block layer
-#
-CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
-# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
-
-#
-# Bus options (PCI, PCMCIA, EISA, ISA, TC)
-#
-CONFIG_HW_HAS_PCI=y
-CONFIG_PCI=y
-CONFIG_MMU=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
-# CONFIG_PCCARD is not set
-
-#
-# PCI Hotplug Support
-#
-# CONFIG_HOTPLUG_PCI is not set
-
-#
-# Executable file formats
-#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-CONFIG_TRAD_SIGNALS=y
-
-#
-# Power management options
-#
-CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
-# CONFIG_PM_DEBUG is not set
-# CONFIG_PM_SYSFS_DEPRECATED is not set
-
-#
-# Networking
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-# CONFIG_NETDEBUG is not set
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-CONFIG_UNIX=y
-CONFIG_XFRM=y
-# CONFIG_XFRM_USER is not set
-# CONFIG_XFRM_SUB_POLICY is not set
-CONFIG_XFRM_MIGRATE=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_FIB_HASH=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=y
-CONFIG_INET_TCP_DIAG=y
-# CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_CUBIC=y
-CONFIG_DEFAULT_TCP_CONG="cubic"
-CONFIG_TCP_MD5SIG=y
-# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
-CONFIG_NETWORK_SECMARK=y
-# CONFIG_NETFILTER is not set
-
-#
-# DCCP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_DCCP is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
-# CONFIG_TIPC is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
-# CONFIG_IEEE80211 is not set
-
-#
-# Device Drivers
-#
-
-#
-# Generic Driver Options
-#
-CONFIG_STANDALONE=y
-CONFIG_PREVENT_FIRMWARE_BUILD=y
-CONFIG_FW_LOADER=m
-# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
-# CONFIG_CONNECTOR is not set
-
-#
-# Memory Technology Devices (MTD)
-#
-CONFIG_MTD=y
-# CONFIG_MTD_DEBUG is not set
-# CONFIG_MTD_CONCAT is not set
-CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_REDBOOT_PARTS is not set
-# CONFIG_MTD_CMDLINE_PARTS is not set
-
-#
-# User Modules And Translation Layers
-#
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLKDEVS=y
-CONFIG_MTD_BLOCK=y
-# CONFIG_FTL is not set
-# CONFIG_NFTL is not set
-# CONFIG_INFTL is not set
-# CONFIG_RFD_FTL is not set
-# CONFIG_SSFDC is not set
-
-#
-# RAM/ROM/Flash chip drivers
-#
-# CONFIG_MTD_CFI is not set
-# CONFIG_MTD_JEDECPROBE is not set
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
-# CONFIG_MTD_CFI_I4 is not set
-# CONFIG_MTD_CFI_I8 is not set
-# CONFIG_MTD_RAM is not set
-# CONFIG_MTD_ROM is not set
-# CONFIG_MTD_ABSENT is not set
-# CONFIG_MTD_OBSOLETE_CHIPS is not set
-
-#
-# Mapping drivers for chip access
-#
-# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-# CONFIG_MTD_PLATRAM is not set
-
-#
-# Self-contained MTD device drivers
-#
-# CONFIG_MTD_PMC551 is not set
-# CONFIG_MTD_SLRAM is not set
-# CONFIG_MTD_PHRAM is not set
-# CONFIG_MTD_MTDRAM is not set
-# CONFIG_MTD_BLOCK2MTD is not set
-
-#
-# Disk-On-Chip Device Drivers
-#
-# CONFIG_MTD_DOC2000 is not set
-# CONFIG_MTD_DOC2001 is not set
-# CONFIG_MTD_DOC2001PLUS is not set
-
-#
-# NAND Flash Device Drivers
-#
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_VERIFY_WRITE=y
-# CONFIG_MTD_NAND_ECC_SMC is not set
-CONFIG_MTD_NAND_IDS=y
-# CONFIG_MTD_NAND_DISKONCHIP is not set
-# CONFIG_MTD_NAND_BASLER_EXCITE is not set
-# CONFIG_MTD_NAND_CAFE is not set
-# CONFIG_MTD_NAND_NANDSIM is not set
-
-#
-# OneNAND Flash Device Drivers
-#
-# CONFIG_MTD_ONENAND is not set
-
-#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
-#
-# CONFIG_PNPACPI is not set
-
-#
-# Block devices
-#
-# CONFIG_BLK_CPQ_DA is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-# CONFIG_BLK_DEV_COW_COMMON is not set
-CONFIG_BLK_DEV_LOOP=m
-# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_SX8 is not set
-# CONFIG_BLK_DEV_UB is not set
-# CONFIG_BLK_DEV_RAM is not set
-# CONFIG_BLK_DEV_INITRD is not set
-# CONFIG_CDROM_PKTCDVD is not set
-# CONFIG_ATA_OVER_ETH is not set
-
-#
-# Misc devices
-#
-CONFIG_SGI_IOC4=m
-# CONFIG_TIFM_CORE is not set
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
-# CONFIG_IDE is not set
-
-#
-# SCSI device support
-#
-# CONFIG_RAID_ATTRS is not set
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-# CONFIG_SCSI_NETLINK is not set
-# CONFIG_SCSI_PROC_FS is not set
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-# CONFIG_CHR_DEV_SG is not set
-# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-CONFIG_SCSI_SCAN_ASYNC=y
-
-#
-# SCSI Transports
-#
-# CONFIG_SCSI_SPI_ATTRS is not set
-# CONFIG_SCSI_FC_ATTRS is not set
-# CONFIG_SCSI_ISCSI_ATTRS is not set
-CONFIG_SCSI_SAS_ATTRS=m
-CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_ISCSI_TCP is not set
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_SCSI_3W_9XXX is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_AACRAID is not set
-# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
-# CONFIG_SCSI_AIC79XX is not set
-CONFIG_SCSI_AIC94XX=m
-# CONFIG_AIC94XX_DEBUG is not set
-# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_ARCMSR is not set
-# CONFIG_MEGARAID_NEWGEN is not set
-# CONFIG_MEGARAID_LEGACY is not set
-# CONFIG_MEGARAID_SAS is not set
-# CONFIG_SCSI_HPTIOP is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_STEX is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_QLA_FC is not set
-# CONFIG_SCSI_QLA_ISCSI is not set
-# CONFIG_SCSI_LPFC is not set
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DC390T is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_DEBUG is not set
-# CONFIG_SCSI_SRP is not set
-
-#
-# Serial ATA (prod) and Parallel ATA (experimental) drivers
-#
-# CONFIG_ATA is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-# CONFIG_FUSION is not set
-# CONFIG_FUSION_SPI is not set
-# CONFIG_FUSION_FC is not set
-# CONFIG_FUSION_SAS is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Network device support
-#
-CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
-# CONFIG_BONDING is not set
-# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
-
-#
-# ARCnet devices
-#
-# CONFIG_ARCNET is not set
-
-#
-# PHY device support
-#
-
-#
-# Ethernet (10 or 100Mbit)
-#
-# CONFIG_NET_ETHERNET is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-# CONFIG_ACENIC is not set
-# CONFIG_DL2K is not set
-# CONFIG_E1000 is not set
-# CONFIG_NS83820 is not set
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
-# CONFIG_R8169 is not set
-# CONFIG_SIS190 is not set
-# CONFIG_SKGE is not set
-# CONFIG_SKY2 is not set
-# CONFIG_SK98LIN is not set
-# CONFIG_TIGON3 is not set
-# CONFIG_BNX2 is not set
-CONFIG_QLA3XXX=m
-# CONFIG_ATL1 is not set
-
-#
-# Ethernet (10000 Mbit)
-#
-# CONFIG_CHELSIO_T1 is not set
-CONFIG_CHELSIO_T3=m
-# CONFIG_IXGB is not set
-# CONFIG_S2IO is not set
-# CONFIG_MYRI10GE is not set
-CONFIG_NETXEN_NIC=m
-
-#
-# Token Ring devices
-#
-# CONFIG_TR is not set
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# Wan interfaces
-#
-# CONFIG_WAN is not set
-# CONFIG_FDDI is not set
-# CONFIG_HIPPI is not set
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
-# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
-# CONFIG_NETCONSOLE is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
-# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
-# CONFIG_PHONE is not set
-
-#
-# Input device support
-#
-CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
-
-#
-# Userland interfaces
-#
-CONFIG_INPUT_MOUSEDEV=m
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-CONFIG_INPUT_EVDEV=m
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
-
-#
-# Hardware I/O ports
-#
-# CONFIG_SERIO is not set
-# CONFIG_GAMEPORT is not set
-
-#
-# Character devices
-#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_SERIAL_NONSTANDARD is not set
-
-#
-# Serial drivers
-#
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_PCI=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_SERIAL_8250_DETECT_IRQ is not set
-# CONFIG_SERIAL_8250_RSA is not set
-
-#
-# Non-8250 serial port support
-#
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
-# CONFIG_SERIAL_JSM is not set
-CONFIG_UNIX98_PTYS=y
-# CONFIG_LEGACY_PTYS is not set
-
-#
-# IPMI
-#
-# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
-
-#
-# Watchdog Device Drivers
-#
-# CONFIG_SOFT_WATCHDOG is not set
-CONFIG_WDT_RM9K_GPI=m
-
-#
-# PCI-based Watchdog Cards
-#
-# CONFIG_PCIPCWATCHDOG is not set
-# CONFIG_WDTPCI is not set
-
-#
-# USB-based Watchdog Cards
-#
-# CONFIG_USBPCWATCHDOG is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
-# CONFIG_R3964 is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_DRM is not set
-# CONFIG_RAW_DRIVER is not set
-
-#
-# TPM devices
-#
-# CONFIG_TCG_TPM is not set
-
-#
-# I2C support
-#
-# CONFIG_I2C is not set
-
-#
-# SPI support
-#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
-
-#
-# Dallas's 1-wire bus
-#
-# CONFIG_W1 is not set
-
-#
-# Hardware Monitoring support
-#
-# CONFIG_HWMON is not set
-# CONFIG_HWMON_VID is not set
-
-#
-# Multimedia devices
-#
-# CONFIG_VIDEO_DEV is not set
-
-#
-# Digital Video Broadcasting Devices
-#
-# CONFIG_DVB is not set
-# CONFIG_USB_DABUSB is not set
-
-#
-# Graphics support
-#
-# CONFIG_FIRMWARE_EDID is not set
-CONFIG_FB=y
-# CONFIG_FB_CFB_FILLRECT is not set
-# CONFIG_FB_CFB_COPYAREA is not set
-# CONFIG_FB_CFB_IMAGEBLIT is not set
-# CONFIG_FB_SVGALIB is not set
-# CONFIG_FB_MACMODES is not set
-# CONFIG_FB_BACKLIGHT is not set
-# CONFIG_FB_MODE_HELPERS is not set
-# CONFIG_FB_TILEBLITTING is not set
-# CONFIG_FB_CIRRUS is not set
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-# CONFIG_FB_S1D13XXX is not set
-# CONFIG_FB_NVIDIA is not set
-# CONFIG_FB_RIVA is not set
-# CONFIG_FB_MATROX is not set
-# CONFIG_FB_RADEON is not set
-# CONFIG_FB_ATY128 is not set
-# CONFIG_FB_ATY is not set
-# CONFIG_FB_S3 is not set
-# CONFIG_FB_SAVAGE is not set
-# CONFIG_FB_SIS is not set
-# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_KYRO is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_SMIVGX is not set
-# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_VIRTUAL is not set
-
-#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=m
-# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
-# CONFIG_FONTS is not set
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-
-#
-# Logo configuration
-#
-# CONFIG_LOGO is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Sound
-#
-# CONFIG_SOUND is not set
-
-#
-# HID Devices
-#
-CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
-
-#
-# USB support
-#
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
-CONFIG_USB_ARCH_HAS_EHCI=y
-CONFIG_USB=y
-# CONFIG_USB_DEBUG is not set
-
-#
-# Miscellaneous USB options
-#
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DYNAMIC_MINORS is not set
-# CONFIG_USB_SUSPEND is not set
-# CONFIG_USB_OTG is not set
-
-#
-# USB Host Controller Drivers
-#
-CONFIG_USB_EHCI_HCD=y
-# CONFIG_USB_EHCI_SPLIT_ISO is not set
-# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
-# CONFIG_USB_ISP116X_HCD is not set
-CONFIG_USB_OHCI_HCD=y
-# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
-# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
-CONFIG_USB_OHCI_LITTLE_ENDIAN=y
-# CONFIG_USB_UHCI_HCD is not set
-# CONFIG_USB_SL811_HCD is not set
-
-#
-# USB Device Class drivers
-#
-# CONFIG_USB_ACM is not set
-# CONFIG_USB_PRINTER is not set
-
-#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
-#
-
-#
-# may also be needed; see USB_STORAGE Help for more information
-#
-CONFIG_USB_STORAGE=y
-# CONFIG_USB_STORAGE_DEBUG is not set
-# CONFIG_USB_STORAGE_DATAFAB is not set
-# CONFIG_USB_STORAGE_FREECOM is not set
-# CONFIG_USB_STORAGE_DPCM is not set
-# CONFIG_USB_STORAGE_USBAT is not set
-# CONFIG_USB_STORAGE_SDDR09 is not set
-# CONFIG_USB_STORAGE_SDDR55 is not set
-# CONFIG_USB_STORAGE_JUMPSHOT is not set
-# CONFIG_USB_STORAGE_ALAUDA is not set
-# CONFIG_USB_STORAGE_KARMA is not set
-# CONFIG_USB_LIBUSUAL is not set
-
-#
-# USB Input Devices
-#
-CONFIG_USB_HID=m
-# CONFIG_USB_HIDINPUT_POWERBOOK is not set
-# CONFIG_HID_FF is not set
-# CONFIG_USB_HIDDEV is not set
-
-#
-# USB HID Boot Protocol drivers
-#
-# CONFIG_USB_KBD is not set
-# CONFIG_USB_MOUSE is not set
-# CONFIG_USB_AIPTEK is not set
-# CONFIG_USB_WACOM is not set
-# CONFIG_USB_ACECAD is not set
-# CONFIG_USB_KBTAB is not set
-# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_TOUCHSCREEN is not set
-# CONFIG_USB_YEALINK is not set
-# CONFIG_USB_XPAD is not set
-# CONFIG_USB_ATI_REMOTE is not set
-# CONFIG_USB_ATI_REMOTE2 is not set
-# CONFIG_USB_KEYSPAN_REMOTE is not set
-# CONFIG_USB_APPLETOUCH is not set
-# CONFIG_USB_GTCO is not set
-
-#
-# USB Imaging devices
-#
-# CONFIG_USB_MDC800 is not set
-# CONFIG_USB_MICROTEK is not set
-
-#
-# USB Network Adapters
-#
-# CONFIG_USB_CATC is not set
-# CONFIG_USB_KAWETH is not set
-# CONFIG_USB_PEGASUS is not set
-# CONFIG_USB_RTL8150 is not set
-# CONFIG_USB_USBNET_MII is not set
-# CONFIG_USB_USBNET is not set
-# CONFIG_USB_MON is not set
-
-#
-# USB port drivers
-#
-
-#
-# USB Serial Converter support
-#
-# CONFIG_USB_SERIAL is not set
-
-#
-# USB Miscellaneous drivers
-#
-# CONFIG_USB_EMI62 is not set
-# CONFIG_USB_EMI26 is not set
-# CONFIG_USB_ADUTUX is not set
-# CONFIG_USB_AUERSWALD is not set
-# CONFIG_USB_RIO500 is not set
-# CONFIG_USB_LEGOTOWER is not set
-# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
-# CONFIG_USB_LED is not set
-# CONFIG_USB_CYPRESS_CY7C63 is not set
-# CONFIG_USB_CYTHERM is not set
-# CONFIG_USB_PHIDGET is not set
-# CONFIG_USB_IDMOUSE is not set
-# CONFIG_USB_FTDI_ELAN is not set
-# CONFIG_USB_APPLEDISPLAY is not set
-# CONFIG_USB_SISUSBVGA is not set
-# CONFIG_USB_LD is not set
-# CONFIG_USB_TRANCEVIBRATOR is not set
-# CONFIG_USB_TEST is not set
-
-#
-# USB DSL modem support
-#
-
-#
-# USB Gadget Support
-#
-# CONFIG_USB_GADGET is not set
-
-#
-# MMC/SD Card support
-#
-# CONFIG_MMC is not set
-
-#
-# LED devices
-#
-# CONFIG_NEW_LEDS is not set
-
-#
-# LED drivers
-#
-
-#
-# LED Triggers
-#
-
-#
-# InfiniBand support
-#
-# CONFIG_INFINIBAND is not set
-
-#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
-#
-
-#
-# Real Time Clock
-#
-# CONFIG_RTC_CLASS is not set
-
-#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
-
-#
-# Auxiliary Display support
-#
-
-#
-# Virtualization
-#
-
-#
-# File systems
-#
-CONFIG_EXT2_FS=y
-# CONFIG_EXT2_FS_XATTR is not set
-# CONFIG_EXT2_FS_XIP is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
-# CONFIG_REISERFS_FS is not set
-# CONFIG_JFS_FS is not set
-CONFIG_FS_POSIX_ACL=y
-# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
-# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_INOTIFY=y
-CONFIG_INOTIFY_USER=y
-# CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
-# CONFIG_AUTOFS_FS is not set
-# CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
-CONFIG_GENERIC_ACL=y
-
-#
-# CD-ROM/DVD Filesystems
-#
-# CONFIG_ISO9660_FS is not set
-# CONFIG_UDF_FS is not set
-
-#
-# DOS/FAT/NT Filesystems
-#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-# CONFIG_NTFS_FS is not set
-
-#
-# Pseudo filesystems
-#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_PROC_SYSCTL=y
-CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_ADFS_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_BEFS_FS is not set
-# CONFIG_BFS_FS is not set
-# CONFIG_EFS_FS is not set
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=0
-CONFIG_JFFS2_FS_WRITEBUFFER=y
-# CONFIG_JFFS2_SUMMARY is not set
-# CONFIG_JFFS2_FS_XATTR is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
-CONFIG_JFFS2_ZLIB=y
-CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-# CONFIG_NFS_V3_ACL is not set
-# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
-CONFIG_ROOT_NFS=y
-CONFIG_LOCKD=y
-CONFIG_LOCKD_V4=y
-CONFIG_NFS_COMMON=y
-CONFIG_SUNRPC=y
-# CONFIG_RPCSEC_GSS_KRB5 is not set
-# CONFIG_RPCSEC_GSS_SPKM3 is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
-
-#
-# Partition Types
-#
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_ACORN_PARTITION is not set
-# CONFIG_OSF_PARTITION is not set
-# CONFIG_AMIGA_PARTITION is not set
-# CONFIG_ATARI_PARTITION is not set
-# CONFIG_MAC_PARTITION is not set
-CONFIG_MSDOS_PARTITION=y
-# CONFIG_BSD_DISKLABEL is not set
-# CONFIG_MINIX_SUBPARTITION is not set
-# CONFIG_SOLARIS_X86_PARTITION is not set
-# CONFIG_UNIXWARE_DISKLABEL is not set
-# CONFIG_LDM_PARTITION is not set
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_ULTRIX_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
-# CONFIG_KARMA_PARTITION is not set
-# CONFIG_EFI_PARTITION is not set
-
-#
-# Native Language Support
-#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=m
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-CONFIG_NLS_CODEPAGE_850=m
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
-# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ASCII is not set
-CONFIG_NLS_ISO8859_1=m
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Distributed Lock Manager
-#
-CONFIG_DLM=m
-CONFIG_DLM_TCP=y
-# CONFIG_DLM_SCTP is not set
-# CONFIG_DLM_DEBUG is not set
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
-# Kernel hacking
-#
-CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-# CONFIG_PRINTK_TIME is not set
-CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
-# CONFIG_UNUSED_SYMBOLS is not set
-# CONFIG_DEBUG_FS is not set
-# CONFIG_HEADERS_CHECK is not set
-# CONFIG_DEBUG_KERNEL is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
-
-#
-# Security options
-#
-# CONFIG_KEYS is not set
-# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_BLKCIPHER=m
-CONFIG_CRYPTO_HASH=m
-CONFIG_CRYPTO_MANAGER=m
-# CONFIG_CRYPTO_HMAC is not set
-CONFIG_CRYPTO_XCBC=m
-# CONFIG_CRYPTO_NULL is not set
-# CONFIG_CRYPTO_MD4 is not set
-CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA256 is not set
-# CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
-# CONFIG_CRYPTO_TGR192 is not set
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_LRW=m
-# CONFIG_CRYPTO_DES is not set
-CONFIG_CRYPTO_FCRYPT=m
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
-# CONFIG_CRYPTO_AES is not set
-# CONFIG_CRYPTO_CAST5 is not set
-# CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
-# CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
-CONFIG_CRYPTO_CAMELLIA=m
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Hardware crypto devices
-#
-
-#
-# Library routines
-#
-CONFIG_BITREVERSE=y
-# CONFIG_CRC_CCITT is not set
-# CONFIG_CRC16 is not set
-CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=y
-CONFIG_PLIST=y
-CONFIG_HAS_IOMEM=y
-CONFIG_HAS_IOPORT=y
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 0197f0de6b3..a09dd03aa8c 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31-rc1
-# Thu Jul 2 22:37:00 2009
+# Linux kernel version: 2.6.32-rc4
+# Fri Oct 16 13:18:01 2009
#
CONFIG_MIPS=y
@@ -10,8 +10,8 @@ CONFIG_MIPS=y
#
# CONFIG_MACH_ALCHEMY is not set
# CONFIG_AR7 is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -105,6 +105,8 @@ CONFIG_CPU_LOONGSON2E=y
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
+CONFIG_SYS_SUPPORTS_ZBOOT_UART16550=y
CONFIG_CPU_LOONGSON2=y
CONFIG_SYS_HAS_CPU_LOONGSON2E=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
@@ -135,12 +137,16 @@ CONFIG_SYS_SUPPORTS_HIGHMEM=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
+# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_SPARSEMEM_STATIC=y
+
+#
+# Memory hotplug is currently incompatible with Software Suspend
+#
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_PHYS_ADDR_T_64BIT=y
@@ -148,6 +154,7 @@ CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
@@ -180,6 +187,12 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION="-fuloong2e"
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@@ -193,11 +206,12 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
-# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=64
+# CONFIG_RCU_FANOUT_EXACT is not set
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -235,18 +249,16 @@ CONFIG_SHMEM=y
CONFIG_AIO=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
-# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-CONFIG_MARKERS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_SYSCALL_WRAPPERS=y
@@ -255,8 +267,8 @@ CONFIG_HAVE_SYSCALL_WRAPPERS=y
# GCOV-based kernel profiling
#
# CONFIG_GCOV_KERNEL is not set
-# CONFIG_SLOW_WORK is not set
-# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLOW_WORK=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
@@ -283,7 +295,7 @@ CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -321,9 +333,14 @@ CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
# CONFIG_SUSPEND is not set
-# CONFIG_HIBERNATION is not set
+CONFIG_HIBERNATION_NVS=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION="/dev/hda3"
+# CONFIG_PM_RUNTIME is not set
CONFIG_NET=y
+CONFIG_COMPAT_NETLINK_MESSAGES=y
#
# Networking options
@@ -442,6 +459,7 @@ CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -473,6 +491,7 @@ CONFIG_NET_CLS_ROUTE=y
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
CONFIG_WIRELESS_OLD_REGULATORY=y
CONFIG_WIRELESS_EXT=y
CONFIG_WIRELESS_EXT_SYSFS=y
@@ -481,7 +500,6 @@ CONFIG_WIRELESS_EXT_SYSFS=y
#
# CFG80211 needs to be enabled for MAC80211
#
-CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
CONFIG_NET_9P=m
@@ -495,6 +513,7 @@ CONFIG_NET_9P=m
# Generic Driver Options
#
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=m
@@ -504,9 +523,9 @@ CONFIG_EXTRA_FIRMWARE=""
# CONFIG_CONNECTOR is not set
CONFIG_MTD=m
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
# CONFIG_MTD_PARTITIONS is not set
-# CONFIG_MTD_TESTS is not set
#
# User Modules And Translation Layers
@@ -820,6 +839,7 @@ CONFIG_8139TOO=y
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
@@ -867,10 +887,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
+CONFIG_WLAN=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
@@ -886,6 +903,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_USBNET is not set
+# CONFIG_USB_CDC_PHONET is not set
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -933,12 +951,16 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_QT2160 is not set
# CONFIG_KEYBOARD_LKKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_PS2_ALPS=y
@@ -946,6 +968,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
CONFIG_MOUSE_SERIAL=y
# CONFIG_MOUSE_APPLETOUCH is not set
@@ -1015,6 +1038,7 @@ CONFIG_RTC=y
CONFIG_DEVPORT=y
CONFIG_I2C=m
CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_HELPER_AUTO=y
@@ -1070,9 +1094,6 @@ CONFIG_I2C_VIAPRO=m
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
-# CONFIG_SENSORS_PCF8574 is not set
-# CONFIG_PCF8575 is not set
-# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -1088,7 +1109,6 @@ CONFIG_I2C_VIAPRO=m
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -1105,6 +1125,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
@@ -1114,6 +1135,7 @@ CONFIG_SSB_POSSIBLE=y
#
# Graphics support
#
+CONFIG_VGA_ARB=y
# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
@@ -1198,6 +1220,7 @@ CONFIG_FONT_8x16=y
# CONFIG_LOGO is not set
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@@ -1304,7 +1327,6 @@ CONFIG_SND_USB=y
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
CONFIG_HIDRAW=y
#
@@ -1356,6 +1378,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_OXU210HP_HCD is not set
# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_ISP1760_HCD=m
+# CONFIG_USB_ISP1362_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
@@ -1453,6 +1476,7 @@ CONFIG_UIO_CIF=m
# CONFIG_UIO_SMX is not set
# CONFIG_UIO_AEC is not set
# CONFIG_UIO_SERCOS3 is not set
+# CONFIG_UIO_PCI_GENERIC is not set
#
# TI VLYNQ
@@ -1469,10 +1493,10 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT4_FS=m
-CONFIG_EXT4DEV_COMPAT=y
CONFIG_EXT4_FS_XATTR=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
CONFIG_FS_XIP=y
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
@@ -1489,6 +1513,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
@@ -1557,7 +1582,6 @@ CONFIG_OMFS_FS=m
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
@@ -1666,6 +1690,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=2048
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -1678,13 +1703,14 @@ CONFIG_NOP_TRACER=y
CONFIG_RING_BUFFER=y
CONFIG_EVENT_TRACING=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
# CONFIG_FTRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
@@ -1742,11 +1768,13 @@ CONFIG_CRYPTO_XTS=m
#
CONFIG_CRYPTO_HMAC=y
# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
#
# Digest
#
# CONFIG_CRYPTO_CRC32C is not set
+CONFIG_CRYPTO_GHASH=m
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
# CONFIG_CRYPTO_MICHAEL_MIC is not set
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index f14d38ba603..222d7eca2fe 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -1188,7 +1187,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DYNAMIC_PRINTK_DEBUG=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 1fc73aa7b50..ed84b4cb3c8 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -940,7 +939,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
index 539dccb0345..dab2e5aaada 100644
--- a/arch/mips/configs/ip28_defconfig
+++ b/arch/mips/configs/ip28_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -816,7 +815,7 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SAMPLES is not set
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index d934bdefb39..1841c88d3d2 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -1126,7 +1125,7 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index d22df61833a..14c2ab3b267 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
CONFIG_MACH_JAZZ=y
@@ -1374,7 +1373,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig
index 5380f1f582d..4d66c44cced 100644
--- a/arch/mips/configs/jmr3927_defconfig
+++ b/arch/mips/configs/jmr3927_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -835,7 +834,7 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/lasat_defconfig b/arch/mips/configs/lasat_defconfig
index 044074db7e5..08d481e3d42 100644
--- a/arch/mips/configs/lasat_defconfig
+++ b/arch/mips/configs/lasat_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -798,7 +797,7 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
new file mode 100644
index 00000000000..b71a0a4fb95
--- /dev/null
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -0,0 +1,1835 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc6
+# Mon Nov 9 23:42:42 2009
+#
+CONFIG_MIPS=y
+
+#
+# Machine selection
+#
+# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
+# CONFIG_MIPS_COBALT is not set
+# CONFIG_MACH_DECSTATION is not set
+# CONFIG_MACH_JAZZ is not set
+# CONFIG_LASAT is not set
+CONFIG_MACH_LOONGSON=y
+# CONFIG_MIPS_MALTA is not set
+# CONFIG_MIPS_SIM is not set
+# CONFIG_NEC_MARKEINS is not set
+# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
+# CONFIG_PNX8550_JBS is not set
+# CONFIG_PNX8550_STB810 is not set
+# CONFIG_PMC_MSP is not set
+# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_SGI_IP22 is not set
+# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
+# CONFIG_SGI_IP32 is not set
+# CONFIG_SIBYTE_CRHINE is not set
+# CONFIG_SIBYTE_CARMEL is not set
+# CONFIG_SIBYTE_CRHONE is not set
+# CONFIG_SIBYTE_RHONE is not set
+# CONFIG_SIBYTE_SWARM is not set
+# CONFIG_SIBYTE_LITTLESUR is not set
+# CONFIG_SIBYTE_SENTOSA is not set
+# CONFIG_SIBYTE_BIGSUR is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+# CONFIG_LEMOTE_FULOONG2E is not set
+CONFIG_LEMOTE_MACH2F=y
+CONFIG_CS5536=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
+CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K_LIB=y
+CONFIG_CSRC_R4K=y
+CONFIG_DMA_NONCOHERENT=y
+CONFIG_DMA_NEED_PCI_MAP_STATE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
+CONFIG_I8259=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_ISA_DMA_SUPPORT_BROKEN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+CONFIG_IRQ_CPU=y
+CONFIG_BOOT_ELF32=y
+CONFIG_MIPS_L1_CACHE_SHIFT=5
+
+#
+# CPU selection
+#
+# CONFIG_CPU_LOONGSON2E is not set
+CONFIG_CPU_LOONGSON2F=y
+# CONFIG_CPU_MIPS32_R1 is not set
+# CONFIG_CPU_MIPS32_R2 is not set
+# CONFIG_CPU_MIPS64_R1 is not set
+# CONFIG_CPU_MIPS64_R2 is not set
+# CONFIG_CPU_R3000 is not set
+# CONFIG_CPU_TX39XX is not set
+# CONFIG_CPU_VR41XX is not set
+# CONFIG_CPU_R4300 is not set
+# CONFIG_CPU_R4X00 is not set
+# CONFIG_CPU_TX49XX is not set
+# CONFIG_CPU_R5000 is not set
+# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
+# CONFIG_CPU_R6000 is not set
+# CONFIG_CPU_NEVADA is not set
+# CONFIG_CPU_R8000 is not set
+# CONFIG_CPU_R10000 is not set
+# CONFIG_CPU_RM7000 is not set
+# CONFIG_CPU_RM9000 is not set
+# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
+CONFIG_SYS_SUPPORTS_ZBOOT_UART16550=y
+CONFIG_CPU_LOONGSON2=y
+CONFIG_SYS_HAS_CPU_LOONGSON2F=y
+CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
+CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_64BIT_KERNEL=y
+
+#
+# Kernel type
+#
+# CONFIG_32BIT is not set
+CONFIG_64BIT=y
+# CONFIG_PAGE_SIZE_4KB is not set
+# CONFIG_PAGE_SIZE_8KB is not set
+CONFIG_PAGE_SIZE_16KB=y
+# CONFIG_PAGE_SIZE_32KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_BOARD_SCACHE=y
+CONFIG_MIPS_MT_DISABLED=y
+# CONFIG_MIPS_MT_SMP is not set
+# CONFIG_MIPS_MT_SMTC is not set
+CONFIG_CPU_HAS_WB=y
+CONFIG_CPU_HAS_SYNC=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_SYS_SUPPORTS_HIGHMEM=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_STATIC=y
+
+#
+# Memory hotplug is currently incompatible with Software Suspend
+#
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_48 is not set
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_128 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_256 is not set
+# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_1024 is not set
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_HZ=250
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+# CONFIG_KEXEC is not set
+# CONFIG_SECCOMP is not set
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+CONFIG_KERNEL_LZMA=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+CONFIG_AUDIT=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=64
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_SYSCALL_WRAPPERS=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_BLK_DEV_BSG=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLOCK_COMPAT=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# Bus options (PCI, PCMCIA, EISA, ISA, TC)
+#
+CONFIG_HW_HAS_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
+CONFIG_ISA=y
+CONFIG_MMU=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MIPS32_COMPAT=y
+CONFIG_COMPAT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_BINFMT_ELF32=y
+
+#
+# Power management options
+#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_HIBERNATION_NVS=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION="/dev/hda3"
+# CONFIG_PM_RUNTIME is not set
+CONFIG_NET=y
+CONFIG_COMPAT_NETLINK_MESSAGES=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_TCP_CONG_WESTWOOD=m
+CONFIG_TCP_CONG_HTCP=m
+# CONFIG_TCP_CONG_HSTCP is not set
+# CONFIG_TCP_CONG_HYBLA is not set
+# CONFIG_TCP_CONG_VEGAS is not set
+# CONFIG_TCP_CONG_SCALABLE is not set
+# CONFIG_TCP_CONG_LP is not set
+# CONFIG_TCP_CONG_VENO is not set
+# CONFIG_TCP_CONG_YEAH is not set
+# CONFIG_TCP_CONG_ILLINOIS is not set
+CONFIG_DEFAULT_BIC=y
+# CONFIG_DEFAULT_CUBIC is not set
+# CONFIG_DEFAULT_HTCP is not set
+# CONFIG_DEFAULT_VEGAS is not set
+# CONFIG_DEFAULT_WESTWOOD is not set
+# CONFIG_DEFAULT_RENO is not set
+CONFIG_DEFAULT_TCP_CONG="bic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_XTABLES is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_IP6_NF_QUEUE is not set
+# CONFIG_IP6_NF_IPTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_INGRESS is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+# CONFIG_NET_EMATCH_CMP is not set
+# CONFIG_NET_EMATCH_NBYTE is not set
+# CONFIG_NET_EMATCH_U32 is not set
+# CONFIG_NET_EMATCH_META is not set
+# CONFIG_NET_EMATCH_TEXT is not set
+CONFIG_NET_CLS_ACT=y
+# CONFIG_NET_ACT_POLICE is not set
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_CFG80211_DEFAULT_PS_VALUE=0
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=m
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+# CONFIG_PARPORT is not set
+# CONFIG_PNP is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_PHANTOM is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
+CONFIG_HAVE_IDE=y
+CONFIG_IDE=y
+
+#
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
+#
+CONFIG_IDE_XFER_MODE=y
+CONFIG_IDE_TIMINGS=y
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+# CONFIG_IDE_GD_ATAPI is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+CONFIG_IDE_TASK_IOCTL=y
+CONFIG_IDE_PROC_FS=y
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+# CONFIG_BLK_DEV_PLATFORM is not set
+CONFIG_BLK_DEV_IDEDMA_SFF=y
+
+#
+# PCI IDE chipsets support
+#
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+CONFIG_BLK_DEV_AMD74XX=y
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT8172 is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+
+#
+# Other IDE chipsets support
+#
+
+#
+# Note: most of these also require special kernel boot parameters
+#
+# CONFIG_BLK_DEV_4DRIVES is not set
+# CONFIG_BLK_DEV_ALI14XX is not set
+# CONFIG_BLK_DEV_DTC2278 is not set
+# CONFIG_BLK_DEV_HT6560B is not set
+# CONFIG_BLK_DEV_QD65XX is not set
+# CONFIG_BLK_DEV_UMC8672 is not set
+CONFIG_BLK_DEV_IDEDMA=y
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=m
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+CONFIG_NETDEVICES=y
+# CONFIG_IFB is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_SMC is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_NET_VENDOR_RACAL is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_AT1700 is not set
+# CONFIG_DEPCA is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_ISA is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_AC3200 is not set
+# CONFIG_APRICOT is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_CS89x0 is not set
+# CONFIG_TC35815 is not set
+# CONFIG_E100 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_8139TOO_TUNE_TWISTER=y
+# CONFIG_8139TOO_8129 is not set
+# CONFIG_8139_OLD_RX_RESET is not set
+# CONFIG_R6040 is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_R8169=y
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_JME is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+CONFIG_WLAN=y
+CONFIG_WLAN_PRE80211=y
+# CONFIG_STRIP is not set
+# CONFIG_WAVELAN is not set
+CONFIG_WLAN_80211=y
+# CONFIG_LIBERTAS is not set
+# CONFIG_ATMEL is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_PS2_ALPS is not set
+# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+# CONFIG_MOUSE_PS2_TRACKPOINT is not set
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_INPORT is not set
+# CONFIG_MOUSE_LOGIBM is not set
+# CONFIG_MOUSE_PC110PAD is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_COMPUTONE is not set
+# CONFIG_ROCKETPORT is not set
+# CONFIG_CYCLADES is not set
+# CONFIG_DIGIEPCA is not set
+# CONFIG_MOXA_INTELLIO is not set
+# CONFIG_MOXA_SMARTIO is not set
+# CONFIG_ISI is not set
+# CONFIG_SYNCLINKMP is not set
+# CONFIG_SYNCLINK_GT is not set
+# CONFIG_N_HDLC is not set
+# CONFIG_RISCOM8 is not set
+# CONFIG_SPECIALIX is not set
+# CONFIG_STALDRV is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_FOURPORT=y
+# CONFIG_SERIAL_8250_ACCENT is not set
+# CONFIG_SERIAL_8250_BOCA is not set
+# CONFIG_SERIAL_8250_EXAR_ST16C554 is not set
+# CONFIG_SERIAL_8250_HUB6 is not set
+# CONFIG_SERIAL_8250_SHARE_IRQ is not set
+# CONFIG_SERIAL_8250_DETECT_IRQ is not set
+# CONFIG_SERIAL_8250_RSA is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+CONFIG_THERMAL=y
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+CONFIG_MEDIA_SUPPORT=m
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+# CONFIG_DVB_CORE is not set
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEO_V4L1=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_PMS is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_STRADIS is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GL860 is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_JEILINJ is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_MR97310A is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SN9C20X is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_SQ905 is not set
+# CONFIG_USB_GSPCA_SQ905C is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+# CONFIG_USB_GSPCA_ZC3XX is not set
+# CONFIG_VIDEO_HDPVR is not set
+# CONFIG_USB_VICAM is not set
+# CONFIG_USB_IBMCAM is not set
+# CONFIG_USB_KONICAWC is not set
+# CONFIG_USB_QUICKCAM_MESSENGER is not set
+# CONFIG_USB_ET61X251 is not set
+# CONFIG_USB_OV511 is not set
+# CONFIG_USB_SE401 is not set
+# CONFIG_USB_SN9C102 is not set
+# CONFIG_USB_STV680 is not set
+# CONFIG_USB_ZC0301 is not set
+# CONFIG_USB_PWC is not set
+CONFIG_USB_PWC_INPUT_EVDEV=y
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+CONFIG_VGA_ARB=y
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB_DDC is not set
+CONFIG_FB_BOOT_VESA_SUPPORT=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+CONFIG_FB_SIS=y
+CONFIG_FB_SIS_300=y
+CONFIG_FB_SIS_315=y
+# CONFIG_FB_VIA is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_6x11=y
+CONFIG_FONT_7x14=y
+CONFIG_FONT_PEARL_8x8=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_FONT_SUN8x16=y
+CONFIG_FONT_SUN12x22=y
+CONFIG_FONT_10x18=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=m
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+# CONFIG_SND_RTCTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=m
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_PCI=y
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+CONFIG_SND_CS5535AUDIO=m
+# CONFIG_SND_CTXFI is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HIFIER is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SND_MIPS is not set
+# CONFIG_SND_USB is not set
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=m
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_SUSPEND=y
+# CONFIG_USB_OTG is not set
+CONFIG_USB_OTG_WHITELIST=y
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_UHCI_HCD=m
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+# CONFIG_USB_PRINTER is not set
+CONFIG_USB_WDM=m
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DATAFAB=m
+CONFIG_USB_STORAGE_FREECOM=m
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_EZUSB is not set
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP210X is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_ET131X is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_ALTERA_PCIE_CHDMA is not set
+# CONFIG_RTL8187SE is not set
+# CONFIG_RTL8192SU is not set
+# CONFIG_RTL8192E is not set
+# CONFIG_INPUT_MIMIO is not set
+# CONFIG_TRANZPORT is not set
+
+#
+# Android
+#
+
+#
+# Qualcomm MSM Camera And Video
+#
+
+#
+# Camera Sensor Selection
+#
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_DST is not set
+# CONFIG_POHMELFS is not set
+# CONFIG_B3DFG is not set
+# CONFIG_PLAN9AUTH is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_USB_SERIAL_QUATECH2 is not set
+# CONFIG_USB_SERIAL_QUATECH_USB2 is not set
+# CONFIG_VT6655 is not set
+# CONFIG_VT6656 is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_VME_BUS is not set
+
+#
+# RAR Register Driver
+#
+# CONFIG_RAR_REGISTER is not set
+# CONFIG_IIO is not set
+CONFIG_FB_SM7XX=y
+CONFIG_FB_SM7XX_ACCEL=y
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QFMT_V1 is not set
+# CONFIG_QFMT_V2 is not set
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf-8"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_STRIP_ASM_SYMS=y
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC_T10DIF=y
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=m
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 3f01870b4d6..d3c601206db 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -10,7 +10,6 @@ CONFIG_MIPS=y
#
CONFIG_ZONE_DMA=y
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -1591,7 +1590,7 @@ CONFIG_FRAME_WARN=1024
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig
index d001f7e8741..6a325c02b63 100644
--- a/arch/mips/configs/markeins_defconfig
+++ b/arch/mips/configs/markeins_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1366,7 +1365,9 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200 mem=192m ip=bootp root=/dev/nfs rw"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/mipssim_defconfig b/arch/mips/configs/mipssim_defconfig
index 7358454deaa..f77a34e0f93 100644
--- a/arch/mips/configs/mipssim_defconfig
+++ b/arch/mips/configs/mipssim_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -635,7 +634,9 @@ CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_FAULT_INJECTION is not set
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="nfsroot=192.168.192.169:/u1/mipsel,timeo=20 ip=dhcp"
+# CONFIG_CMDLINE_OVERRIDE is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_RUNTIME_DEBUG is not set
diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig
index 8c720e51795..17203056b22 100644
--- a/arch/mips/configs/mpc30x_defconfig
+++ b/arch/mips/configs/mpc30x_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -817,7 +816,9 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="mem=32M console=ttyVR0,19200 ide0=0x170,0x376,73"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig
index ecbc030b7b6..000d185ddf4 100644
--- a/arch/mips/configs/msp71xx_defconfig
+++ b/arch/mips/configs/msp71xx_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1412,7 +1411,7 @@ CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_FAULT_INJECTION is not set
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_RUNTIME_DEBUG is not set
# CONFIG_MIPS_UNCACHED is not set
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 9477f040796..144b94d9a6a 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
CONFIG_MACH_ALCHEMY=y
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -3018,7 +3017,7 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/pb1100_defconfig b/arch/mips/configs/pb1100_defconfig
index be8091ef0a7..ddf67f63919 100644
--- a/arch/mips/configs/pb1100_defconfig
+++ b/arch/mips/configs/pb1100_defconfig
@@ -23,7 +23,6 @@ CONFIG_MIPS_PB1100=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1083,7 +1082,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/pb1500_defconfig b/arch/mips/configs/pb1500_defconfig
index e74ba794c78..5ec60836b64 100644
--- a/arch/mips/configs/pb1500_defconfig
+++ b/arch/mips/configs/pb1500_defconfig
@@ -23,7 +23,6 @@ CONFIG_MIPS_PB1500=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1200,7 +1199,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/pb1550_defconfig b/arch/mips/configs/pb1550_defconfig
index 1d896fd830d..6647642b5d9 100644
--- a/arch/mips/configs/pb1550_defconfig
+++ b/arch/mips/configs/pb1550_defconfig
@@ -23,7 +23,6 @@ CONFIG_MIPS_PB1550=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1193,7 +1192,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/pnx8335-stb225_defconfig b/arch/mips/configs/pnx8335-stb225_defconfig
index fef4d31c205..848344d588d 100644
--- a/arch/mips/configs/pnx8335-stb225_defconfig
+++ b/arch/mips/configs/pnx8335-stb225_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -1034,7 +1033,7 @@ CONFIG_FRAME_WARN=1024
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SAMPLES is not set
# CONFIG_KERNEL_TESTS is not set
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/pnx8550-jbs_defconfig b/arch/mips/configs/pnx8550-jbs_defconfig
index e10c7116c3c..9d721fdccb3 100644
--- a/arch/mips/configs/pnx8550-jbs_defconfig
+++ b/arch/mips/configs/pnx8550-jbs_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1215,7 +1214,9 @@ CONFIG_DEBUG_MUTEXES=y
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS1,38400n8 root=/dev/nfs ip=bootp"
+# CONFIG_CMDLINE_OVERRIDE is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_RUNTIME_DEBUG is not set
diff --git a/arch/mips/configs/pnx8550-stb810_defconfig b/arch/mips/configs/pnx8550-stb810_defconfig
index 5ed3c8dfa0a..ab07ec08c6f 100644
--- a/arch/mips/configs/pnx8550-stb810_defconfig
+++ b/arch/mips/configs/pnx8550-stb810_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1205,7 +1204,9 @@ CONFIG_DEBUG_SLAB=y
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS1,38400n8 root=/dev/nfs ip=bootp"
+# CONFIG_CMDLINE_OVERRIDE is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_RUNTIME_DEBUG is not set
diff --git a/arch/mips/configs/powertv_defconfig b/arch/mips/configs/powertv_defconfig
new file mode 100644
index 00000000000..7291633d81c
--- /dev/null
+++ b/arch/mips/configs/powertv_defconfig
@@ -0,0 +1,1550 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.31-rc5
+# Fri Aug 28 14:49:33 2009
+#
+CONFIG_MIPS=y
+
+#
+# Machine selection
+#
+# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_MIPS_COBALT is not set
+# CONFIG_MACH_DECSTATION is not set
+# CONFIG_MACH_JAZZ is not set
+# CONFIG_LASAT is not set
+# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MIPS_MALTA is not set
+# CONFIG_MIPS_SIM is not set
+# CONFIG_NEC_MARKEINS is not set
+# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
+# CONFIG_PNX8550_JBS is not set
+# CONFIG_PNX8550_STB810 is not set
+# CONFIG_PMC_MSP is not set
+# CONFIG_PMC_YOSEMITE is not set
+CONFIG_POWERTV=y
+# CONFIG_SGI_IP22 is not set
+# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
+# CONFIG_SGI_IP32 is not set
+# CONFIG_SIBYTE_CRHINE is not set
+# CONFIG_SIBYTE_CARMEL is not set
+# CONFIG_SIBYTE_CRHONE is not set
+# CONFIG_SIBYTE_RHONE is not set
+# CONFIG_SIBYTE_SWARM is not set
+# CONFIG_SIBYTE_LITTLESUR is not set
+# CONFIG_SIBYTE_SENTOSA is not set
+# CONFIG_SIBYTE_BIGSUR is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+# CONFIG_MIN_RUNTIME_RESOURCES is not set
+# CONFIG_BOOTLOADER_DRIVER is not set
+CONFIG_BOOTLOADER_FAMILY="R2"
+CONFIG_CSRC_POWERTV=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
+CONFIG_CEVT_R4K=y
+CONFIG_DMA_NONCOHERENT=y
+CONFIG_DMA_NEED_PCI_MAP_STATE=y
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_SYS_HAS_EARLY_PRINTK=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_CPU_BIG_ENDIAN=y
+# CONFIG_CPU_LITTLE_ENDIAN is not set
+CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+CONFIG_BOOT_ELF32=y
+CONFIG_MIPS_L1_CACHE_SHIFT=5
+
+#
+# CPU selection
+#
+# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_MIPS32_R1 is not set
+CONFIG_CPU_MIPS32_R2=y
+# CONFIG_CPU_MIPS64_R1 is not set
+# CONFIG_CPU_MIPS64_R2 is not set
+# CONFIG_CPU_R3000 is not set
+# CONFIG_CPU_TX39XX is not set
+# CONFIG_CPU_VR41XX is not set
+# CONFIG_CPU_R4300 is not set
+# CONFIG_CPU_R4X00 is not set
+# CONFIG_CPU_TX49XX is not set
+# CONFIG_CPU_R5000 is not set
+# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
+# CONFIG_CPU_R6000 is not set
+# CONFIG_CPU_NEVADA is not set
+# CONFIG_CPU_R8000 is not set
+# CONFIG_CPU_R10000 is not set
+# CONFIG_CPU_RM7000 is not set
+# CONFIG_CPU_RM9000 is not set
+# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
+CONFIG_SYS_HAS_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPS32=y
+CONFIG_CPU_MIPSR2=y
+CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
+
+#
+# Kernel type
+#
+CONFIG_32BIT=y
+# CONFIG_64BIT is not set
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_CPU_HAS_PREFETCH=y
+CONFIG_MIPS_MT_DISABLED=y
+# CONFIG_MIPS_MT_SMP is not set
+# CONFIG_MIPS_MT_SMTC is not set
+CONFIG_CPU_HAS_LLSC=y
+CONFIG_CPU_MIPSR2_IRQ_VI=y
+CONFIG_CPU_MIPSR2_IRQ_EI=y
+CONFIG_CPU_HAS_SYNC=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+# CONFIG_HIGHMEM is not set
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_SYS_SUPPORTS_HIGHMEM=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_48 is not set
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_128 is not set
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_256 is not set
+CONFIG_HZ_1000=y
+# CONFIG_HZ_1024 is not set
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_HZ=1000
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+# CONFIG_KEXEC is not set
+# CONFIG_SECCOMP is not set
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_PCSPKR_PLATFORM is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+CONFIG_TIMERFD=y
+# CONFIG_EVENTFD is not set
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Performance Counters
+#
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_PCI_QUIRKS=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_PROBE_INITRD_HEADER is not set
+# CONFIG_FREEZER is not set
+
+#
+# Bus options (PCI, PCMCIA, EISA, ISA, TC)
+#
+CONFIG_HW_HAS_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCI_LEGACY is not set
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
+CONFIG_MMU=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_TRAD_SIGNALS=y
+
+#
+# Power management options
+#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_PM is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_MULTIPLE_TABLES is not set
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+# CONFIG_IPV6_MIP6 is not set
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+# CONFIG_IPV6_SIT is not set
+CONFIG_IPV6_TUNNEL=y
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+# CONFIG_BRIDGE_NETFILTER is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+CONFIG_NETFILTER_XTABLES=y
+# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set
+# CONFIG_NETFILTER_XT_TARGET_MARK is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+# CONFIG_NETFILTER_XT_TARGET_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+# CONFIG_NETFILTER_XT_MATCH_MARK is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_OWNER is not set
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+# CONFIG_IP_NF_TARGET_REJECT is not set
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_MANGLE is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+# CONFIG_IP_NF_RAW is not set
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+# CONFIG_IP_NF_ARP_MANGLE is not set
+
+#
+# IPv6: Netfilter Configuration
+#
+# CONFIG_IP6_NF_QUEUE is not set
+CONFIG_IP6_NF_IPTABLES=y
+# CONFIG_IP6_NF_MATCH_AH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_MH is not set
+# CONFIG_IP6_NF_MATCH_RT is not set
+# CONFIG_IP6_NF_TARGET_HL is not set
+# CONFIG_IP6_NF_TARGET_LOG is not set
+CONFIG_IP6_NF_FILTER=y
+# CONFIG_IP6_NF_TARGET_REJECT is not set
+# CONFIG_IP6_NF_MANGLE is not set
+# CONFIG_IP6_NF_RAW is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_CBQ is not set
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+# CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFQ is not set
+# CONFIG_NET_SCH_TEQL is not set
+CONFIG_NET_SCH_TBF=y
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_DSMARK is not set
+# CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
+
+#
+# Classification
+#
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_TCINDEX is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+# CONFIG_NET_CLS_U32 is not set
+# CONFIG_NET_CLS_RSVP is not set
+# CONFIG_NET_CLS_RSVP6 is not set
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_EMATCH is not set
+# CONFIG_NET_CLS_ACT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_CAFE is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_SATA_PMP=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SIL24 is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+# CONFIG_PATA_PLATFORM is not set
+# CONFIG_PATA_SCH is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# See the help texts for more information.
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_ATL2 is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_JME is not set
+CONFIG_NETDEV_10000=y
+# CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_ENIC is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_VXGE is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
+# CONFIG_QLGE is not set
+# CONFIG_SFC is not set
+# CONFIG_BE2NET is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+CONFIG_USB_RTL8150=y
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+# CONFIG_USB_EZUSB is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+CONFIG_USB_SERIAL_CP210X=y
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+# CONFIG_USB_SERIAL_SPCP8X5 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_UWB is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_KMEMCHECK is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="rw dhash_entries=1024 ihash_entries=1024 ip=10.0.1.3:10.0.1.1:10.0.1.1:255.255.255.0:zeus:eth0: root=/dev/nfs nfsroot=/nfsroot/cramfs,wsize=512,rsize=512,tcp nokgdb console=ttyUSB0,115200 memsize=252M"
+# CONFIG_CMDLINE_OVERRIDE is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_RUNTIME_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index f40c3a04739..57a50483abd 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -1204,7 +1203,7 @@ CONFIG_FRAME_WARN=1024
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SAMPLES is not set
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 6c6a19aebe1..21c2022d46e 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -10,7 +10,6 @@ CONFIG_MIPS=y
#
# CONFIG_MACH_ALCHEMY is not set
# CONFIG_AR7 is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
@@ -284,7 +283,6 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
-# CONFIG_PROBE_INITRD_HEADER is not set
# CONFIG_FREEZER is not set
#
@@ -1063,7 +1061,7 @@ CONFIG_TRACING_SUPPORT=y
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index e53b8d096cf..79036289003 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -1694,7 +1693,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
#
# Security options
diff --git a/arch/mips/configs/sb1250-swarm_defconfig b/arch/mips/configs/sb1250-swarm_defconfig
index 7f38c0b956f..7f07bf02b83 100644
--- a/arch/mips/configs/sb1250-swarm_defconfig
+++ b/arch/mips/configs/sb1250-swarm_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -961,7 +960,7 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SAMPLES is not set
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
# CONFIG_SB1XXX_CORELIS is not set
#
diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig
index b5059881bc7..c54d1128f9a 100644
--- a/arch/mips/configs/tb0219_defconfig
+++ b/arch/mips/configs/tb0219_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -892,7 +891,9 @@ CONFIG_FRAME_WARN=1024
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SAMPLES is not set
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig
index b06a716bf23..e7c5cd32a2b 100644
--- a/arch/mips/configs/tb0226_defconfig
+++ b/arch/mips/configs/tb0226_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -895,7 +894,9 @@ CONFIG_FRAME_WARN=1024
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SAMPLES is not set
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="cca=3 mem=32M console=ttyVR0,115200"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig
index 46512cf7ce0..b50032ba4d0 100644
--- a/arch/mips/configs/tb0287_defconfig
+++ b/arch/mips/configs/tb0287_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_BCM47XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
@@ -1077,7 +1076,9 @@ CONFIG_FRAME_WARN=1024
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_SAMPLES is not set
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
index b437eb7f867..c02ba08b69a 100644
--- a/arch/mips/configs/workpad_defconfig
+++ b/arch/mips/configs/workpad_defconfig
@@ -9,7 +9,6 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -755,7 +754,9 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyVR0,19200 ide0=0x170,0x376,49 mem=16M"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/wrppmc_defconfig b/arch/mips/configs/wrppmc_defconfig
index 06acc7482e4..a35bc41389e 100644
--- a/arch/mips/configs/wrppmc_defconfig
+++ b/arch/mips/configs/wrppmc_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -887,7 +886,9 @@ CONFIG_ENABLE_MUST_CHECK=y
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CROSSCOMPILE=y
+CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200n8"
+# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
diff --git a/arch/mips/configs/yosemite_defconfig b/arch/mips/configs/yosemite_defconfig
index 69feaf88b51..e3d68d651e7 100644
--- a/arch/mips/configs/yosemite_defconfig
+++ b/arch/mips/configs/yosemite_defconfig
@@ -22,7 +22,6 @@ CONFIG_ZONE_DMA=y
# CONFIG_MIPS_DB1550 is not set
# CONFIG_MIPS_DB1200 is not set
# CONFIG_MIPS_MIRAGE is not set
-# CONFIG_BASLER_EXCITE is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
@@ -824,7 +823,7 @@ CONFIG_DEBUG_MUTEXES=y
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_CROSSCOMPILE=y
-CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_BOOL is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_RUNTIME_DEBUG is not set
diff --git a/arch/mips/fw/arc/cmdline.c b/arch/mips/fw/arc/cmdline.c
index 4ca4eef934a..5c8603c85f2 100644
--- a/arch/mips/fw/arc/cmdline.c
+++ b/arch/mips/fw/arc/cmdline.c
@@ -16,11 +16,6 @@
#undef DEBUG_CMDLINE
-char * __init prom_getcmdline(void)
-{
- return arcs_cmdline;
-}
-
static char *ignored[] = {
"ConsoleIn=",
"ConsoleOut=",
diff --git a/arch/mips/include/asm/asm-offsets.h b/arch/mips/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/mips/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index f5dfaf6a160..09eee09780f 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -67,9 +67,9 @@
#define MACH_LEMOTE_ML2F7 3
#define MACH_LEMOTE_YL2F89 4
#define MACH_DEXXON_GDIUM2F10 5
-#define MACH_LOONGSON_END 6
-
-#define CL_SIZE COMMAND_LINE_SIZE
+#define MACH_LEMOTE_NAS 6
+#define MACH_LEMOTE_LL2F 7
+#define MACH_LOONGSON_END 8
extern char *system_type;
const char *get_system_type(void);
@@ -107,7 +107,7 @@ extern void free_init_pages(const char *what,
/*
* Initial kernel command line, usually setup by prom_init()
*/
-extern char arcs_cmdline[CL_SIZE];
+extern char arcs_cmdline[COMMAND_LINE_SIZE];
/*
* Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware
diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h
new file mode 100644
index 00000000000..83894aa7932
--- /dev/null
+++ b/arch/mips/include/asm/clock.h
@@ -0,0 +1,64 @@
+#ifndef __ASM_MIPS_CLOCK_H
+#define __ASM_MIPS_CLOCK_H
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/clk.h>
+
+extern void (*cpu_wait) (void);
+
+struct clk;
+
+struct clk_ops {
+ void (*init) (struct clk *clk);
+ void (*enable) (struct clk *clk);
+ void (*disable) (struct clk *clk);
+ void (*recalc) (struct clk *clk);
+ int (*set_rate) (struct clk *clk, unsigned long rate, int algo_id);
+ long (*round_rate) (struct clk *clk, unsigned long rate);
+};
+
+struct clk {
+ struct list_head node;
+ const char *name;
+ int id;
+ struct module *owner;
+
+ struct clk *parent;
+ struct clk_ops *ops;
+
+ struct kref kref;
+
+ unsigned long rate;
+ unsigned long flags;
+};
+
+#define CLK_ALWAYS_ENABLED (1 << 0)
+#define CLK_RATE_PROPAGATES (1 << 1)
+
+/* Should be defined by processor-specific code */
+void arch_init_clk_ops(struct clk_ops **, int type);
+
+int clk_init(void);
+
+int __clk_enable(struct clk *);
+void __clk_disable(struct clk *);
+
+void clk_recalc_rate(struct clk *);
+
+int clk_register(struct clk *);
+void clk_unregister(struct clk *);
+
+/* the exported API, in addition to clk_set_rate */
+/**
+ * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter
+ * @clk: clock source
+ * @rate: desired clock rate in Hz
+ * @algo_id: algorithm id to be passed down to ops->set_rate
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id);
+
+#endif /* __ASM_MIPS_CLOCK_H */
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h
new file mode 100644
index 00000000000..6b04c98b7fa
--- /dev/null
+++ b/arch/mips/include/asm/cop2.h
@@ -0,0 +1,23 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_COP2_H
+#define __ASM_COP2_H
+
+enum cu2_ops {
+ CU2_EXCEPTION,
+ CU2_LWC2_OP,
+ CU2_LDC2_OP,
+ CU2_SWC2_OP,
+ CU2_SDC2_OP,
+};
+
+extern int register_cu2_notifier(struct notifier_block *nb);
+extern int cu2_notifier_call_chain(unsigned long val, void *v);
+
+#endif /* __ASM_COP2_H */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 4b96d1a3605..cf373a95fe4 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -154,6 +154,8 @@
#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
#define PRID_REV_VR4130 0x0080
#define PRID_REV_34K_V1_0_2 0x0022
+#define PRID_REV_LOONGSON2E 0x0002
+#define PRID_REV_LOONGSON2F 0x0003
/*
* Older processors used to encode processor version and revision in two
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 7990694cda2..7a6a35dbe52 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -326,7 +326,6 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
diff --git a/arch/mips/include/asm/fcntl.h b/arch/mips/include/asm/fcntl.h
index 2a52333a062..e482fe90fe8 100644
--- a/arch/mips/include/asm/fcntl.h
+++ b/arch/mips/include/asm/fcntl.h
@@ -10,7 +10,7 @@
#define O_APPEND 0x0008
-#define O_SYNC 0x0010
+#define O_DSYNC 0x0010 /* used to be O_SYNC, see below */
#define O_NONBLOCK 0x0080
#define O_CREAT 0x0100 /* not fcntl */
#define O_TRUNC 0x0200 /* not fcntl */
@@ -18,6 +18,21 @@
#define O_NOCTTY 0x0800 /* not fcntl */
#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
#define O_LARGEFILE 0x2000 /* allow large file opens */
+/*
+ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ *
+ * This has the nice side-effect that we can simply test for O_DSYNC
+ * wherever we do not care if O_DSYNC or O_SYNC is used.
+ *
+ * Note: __O_SYNC must never be used directly.
+ */
+#define __O_SYNC 0x4000
+#define O_SYNC (__O_SYNC|O_DSYNC)
#define O_DIRECT 0x8000 /* direct disk access hint */
#define F_GETLK 14
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 8a3ef247659..7fcef8ef3fa 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -28,15 +28,7 @@
struct sigcontext;
struct sigcontext32;
-extern asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
-extern asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
-
-extern asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
-extern asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-
extern void fpu_emulator_init_fpu(void);
-extern int fpu_emulator_save_context(struct sigcontext __user *sc);
-extern int fpu_emulator_restore_context(struct sigcontext __user *sc);
extern void _init_fpu(void);
extern void _save_fp(struct task_struct *);
extern void _restore_fp(struct task_struct *);
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index e5189572956..aecada6f611 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -25,17 +25,27 @@
#include <asm/break.h>
#include <asm/inst.h>
+#include <asm/local.h>
+
+#ifdef CONFIG_DEBUG_FS
struct mips_fpu_emulator_stats {
- unsigned int emulated;
- unsigned int loads;
- unsigned int stores;
- unsigned int cp1ops;
- unsigned int cp1xops;
- unsigned int errors;
+ local_t emulated;
+ local_t loads;
+ local_t stores;
+ local_t cp1ops;
+ local_t cp1xops;
+ local_t errors;
};
-extern struct mips_fpu_emulator_stats fpuemustats;
+DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
+
+#define MIPS_FPU_EMU_INC_STATS(M) \
+ cpu_local_wrap(__local_inc(&__get_cpu_var(fpuemustats).M))
+
+#else
+#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0)
+#endif /* CONFIG_DEBUG_FS */
extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
unsigned long cpc);
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index 40a8c178f10..3986cd8704f 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -1 +1,90 @@
-/* empty */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ */
+
+#ifndef _ASM_MIPS_FTRACE_H
+#define _ASM_MIPS_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
+
+#ifndef __ASSEMBLY__
+extern void _mcount(void);
+#define mcount _mcount
+
+#define safe_load(load, src, dst, error) \
+do { \
+ asm volatile ( \
+ "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
+ " li %[" STR(error) "], 0\n" \
+ "2:\n" \
+ \
+ ".section .fixup, \"ax\"\n" \
+ "3: li %[" STR(error) "], 1\n" \
+ " j 2b\n" \
+ ".previous\n" \
+ \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR) "\t1b, 3b\n\t" \
+ ".previous\n" \
+ \
+ : [dst] "=&r" (dst), [error] "=r" (error)\
+ : [src] "r" (src) \
+ : "memory" \
+ ); \
+} while (0)
+
+#define safe_store(store, src, dst, error) \
+do { \
+ asm volatile ( \
+ "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
+ " li %[" STR(error) "], 0\n" \
+ "2:\n" \
+ \
+ ".section .fixup, \"ax\"\n" \
+ "3: li %[" STR(error) "], 1\n" \
+ " j 2b\n" \
+ ".previous\n" \
+ \
+ ".section\t__ex_table,\"a\"\n\t"\
+ STR(PTR) "\t1b, 3b\n\t" \
+ ".previous\n" \
+ \
+ : [error] "=r" (error) \
+ : [dst] "r" (dst), [src] "r" (src)\
+ : "memory" \
+ ); \
+} while (0)
+
+#define safe_load_code(dst, src, error) \
+ safe_load(STR(lw), src, dst, error)
+#define safe_store_code(src, dst, error) \
+ safe_store(STR(sw), src, dst, error)
+
+#define safe_load_stack(dst, src, error) \
+ safe_load(STR(PTR_L), src, dst, error)
+
+#define safe_store_stack(src, dst, error) \
+ safe_store(STR(PTR_S), src, dst, error)
+
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+#endif /* _ASM_MIPS_FTRACE_H */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 09b08d05ff7..06960364c96 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -113,36 +113,11 @@ do { \
#endif
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- *
- * Ideally there should be away to get this into kernel/irq/handle.c to
- * avoid the overhead of a call for just a tiny function ...
- */
-#define do_IRQ(irq) \
-do { \
- irq_enter(); \
- __DO_IRQ_SMTC_HOOK(irq); \
- generic_handle_irq(irq); \
- irq_exit(); \
-} while (0)
+extern void do_IRQ(unsigned int irq);
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
-/*
- * To avoid inefficient and in some cases pathological re-checking of
- * IRQ affinity, we have this variant that skips the affinity check.
- */
-
-#define do_IRQ_no_affinity(irq) \
-do { \
- irq_enter(); \
- __NO_AFFINITY_IRQ_SMTC_HOOK(irq); \
- generic_handle_irq(irq); \
- irq_exit(); \
-} while (0)
+extern void do_IRQ_no_affinity(unsigned int irq);
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/include/asm/mach-excite/cpu-feature-overrides.h b/arch/mips/include/asm/mach-excite/cpu-feature-overrides.h
deleted file mode 100644
index 107104c3cd1..00000000000
--- a/arch/mips/include/asm/mach-excite/cpu-feature-overrides.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 Thomas Koeller <thomas.koeller@baslerweb.com>
- * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
- */
-#ifndef __ASM_MACH_EXCITE_CPU_FEATURE_OVERRIDES_H
-#define __ASM_MACH_EXCITE_CPU_FEATURE_OVERRIDES_H
-
-/*
- * Basler eXcite has an RM9122 processor.
- */
-#define cpu_has_watch 1
-#define cpu_has_mips16 0
-#define cpu_has_divec 0
-#define cpu_has_vce 0
-#define cpu_has_cache_cdex_p 0
-#define cpu_has_cache_cdex_s 0
-#define cpu_has_prefetch 1
-#define cpu_has_mcheck 0
-#define cpu_has_ejtag 0
-
-#define cpu_has_llsc 1
-#define cpu_has_vtag_icache 0
-#define cpu_has_dc_aliases 0
-#define cpu_has_ic_fills_f_dc 0
-#define cpu_has_dsp 0
-#define cpu_icache_snoops_remote_store 0
-#define cpu_has_mipsmt 0
-#define cpu_has_userlocal 0
-
-#define cpu_has_nofpuex 0
-#define cpu_has_64bits 1
-
-#define cpu_has_mips32r1 0
-#define cpu_has_mips32r2 0
-#define cpu_has_mips64r1 0
-#define cpu_has_mips64r2 0
-
-#define cpu_has_inclusive_pcaches 0
-
-#define cpu_dcache_line_size() 32
-#define cpu_icache_line_size() 32
-#define cpu_scache_line_size() 32
-
-#endif /* __ASM_MACH_EXCITE_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-excite/excite.h b/arch/mips/include/asm/mach-excite/excite.h
deleted file mode 100644
index 4c29ba44992..00000000000
--- a/arch/mips/include/asm/mach-excite/excite.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#ifndef __EXCITE_H__
-#define __EXCITE_H__
-
-#include <linux/init.h>
-#include <asm/addrspace.h>
-#include <asm/types.h>
-
-#define EXCITE_CPU_EXT_CLOCK 100000000
-
-#if !defined(__ASSEMBLY__)
-void __init excite_kgdb_init(void);
-void excite_procfs_init(void);
-extern unsigned long memsize;
-extern char modetty[];
-extern u32 unit_id;
-#endif
-
-/* Base name for XICAP devices */
-#define XICAP_NAME "xicap_gpi"
-
-/* OCD register offsets */
-#define LKB0 0x0038
-#define LKB5 0x0128
-#define LKM5 0x012C
-#define LKB7 0x0138
-#define LKM7 0x013c
-#define LKB8 0x0140
-#define LKM8 0x0144
-#define LKB9 0x0148
-#define LKM9 0x014c
-#define LKB10 0x0150
-#define LKM10 0x0154
-#define LKB11 0x0158
-#define LKM11 0x015c
-#define LKB12 0x0160
-#define LKM12 0x0164
-#define LKB13 0x0168
-#define LKM13 0x016c
-#define LDP0 0x0200
-#define LDP1 0x0210
-#define LDP2 0x0220
-#define LDP3 0x0230
-#define INTPIN0 0x0A40
-#define INTPIN1 0x0A44
-#define INTPIN2 0x0A48
-#define INTPIN3 0x0A4C
-#define INTPIN4 0x0A50
-#define INTPIN5 0x0A54
-#define INTPIN6 0x0A58
-#define INTPIN7 0x0A5C
-
-
-
-
-/* TITAN register offsets */
-#define CPRR 0x0004
-#define CPDSR 0x0008
-#define CPTC0R 0x000c
-#define CPTC1R 0x0010
-#define CPCFG0 0x0020
-#define CPCFG1 0x0024
-#define CPDST0A 0x0028
-#define CPDST0B 0x002c
-#define CPDST1A 0x0030
-#define CPDST1B 0x0034
-#define CPXDSTA 0x0038
-#define CPXDSTB 0x003c
-#define CPXCISRA 0x0048
-#define CPXCISRB 0x004c
-#define CPGIG0ER 0x0050
-#define CPGIG1ER 0x0054
-#define CPGRWL 0x0068
-#define CPURSLMT 0x00f8
-#define UACFG 0x0200
-#define UAINTS 0x0204
-#define SDRXFCIE 0x4828
-#define SDTXFCIE 0x4928
-#define INTP0Status0 0x1B00
-#define INTP0Mask0 0x1B04
-#define INTP0Set0 0x1B08
-#define INTP0Clear0 0x1B0C
-#define GXCFG 0x5000
-#define GXDMADRPFX 0x5018
-#define GXDMA_DESCADR 0x501c
-#define GXCH0TDESSTRT 0x5054
-
-/* IRQ definitions */
-#define NMICONFIG 0xac0
-#define TITAN_MSGINT 0xc4
-#define TITAN_IRQ ((TITAN_MSGINT / 0x20) + 2)
-#define FPGA0_MSGINT 0x5a
-#define FPGA0_IRQ ((FPGA0_MSGINT / 0x20) + 2)
-#define FPGA1_MSGINT 0x7b
-#define FPGA1_IRQ ((FPGA1_MSGINT / 0x20) + 2)
-#define PHY_MSGINT 0x9c
-#define PHY_IRQ ((PHY_MSGINT / 0x20) + 2)
-
-#if defined(CONFIG_BASLER_EXCITE_PROTOTYPE)
-/* Pre-release units used interrupt pin #9 */
-#define USB_IRQ 11
-#else
-/* Re-designed units use interrupt pin #1 */
-#define USB_MSGINT 0x39
-#define USB_IRQ ((USB_MSGINT / 0x20) + 2)
-#endif
-#define TIMER_IRQ 12
-
-
-/* Device address ranges */
-#define EXCITE_OFFS_OCD 0x1fffc000
-#define EXCITE_SIZE_OCD (16 * 1024)
-#define EXCITE_PHYS_OCD CPHYSADDR(EXCITE_OFFS_OCD)
-#define EXCITE_ADDR_OCD CKSEG1ADDR(EXCITE_OFFS_OCD)
-
-#define EXCITE_OFFS_SCRAM 0x1fffa000
-#define EXCITE_SIZE_SCRAM (8 << 10)
-#define EXCITE_PHYS_SCRAM CPHYSADDR(EXCITE_OFFS_SCRAM)
-#define EXCITE_ADDR_SCRAM CKSEG1ADDR(EXCITE_OFFS_SCRAM)
-
-#define EXCITE_OFFS_PCI_IO 0x1fff8000
-#define EXCITE_SIZE_PCI_IO (8 << 10)
-#define EXCITE_PHYS_PCI_IO CPHYSADDR(EXCITE_OFFS_PCI_IO)
-#define EXCITE_ADDR_PCI_IO CKSEG1ADDR(EXCITE_OFFS_PCI_IO)
-
-#define EXCITE_OFFS_TITAN 0x1fff0000
-#define EXCITE_SIZE_TITAN (32 << 10)
-#define EXCITE_PHYS_TITAN CPHYSADDR(EXCITE_OFFS_TITAN)
-#define EXCITE_ADDR_TITAN CKSEG1ADDR(EXCITE_OFFS_TITAN)
-
-#define EXCITE_OFFS_PCI_MEM 0x1ffe0000
-#define EXCITE_SIZE_PCI_MEM (64 << 10)
-#define EXCITE_PHYS_PCI_MEM CPHYSADDR(EXCITE_OFFS_PCI_MEM)
-#define EXCITE_ADDR_PCI_MEM CKSEG1ADDR(EXCITE_OFFS_PCI_MEM)
-
-#define EXCITE_OFFS_FPGA 0x1ffdc000
-#define EXCITE_SIZE_FPGA (16 << 10)
-#define EXCITE_PHYS_FPGA CPHYSADDR(EXCITE_OFFS_FPGA)
-#define EXCITE_ADDR_FPGA CKSEG1ADDR(EXCITE_OFFS_FPGA)
-
-#define EXCITE_OFFS_NAND 0x1ffd8000
-#define EXCITE_SIZE_NAND (16 << 10)
-#define EXCITE_PHYS_NAND CPHYSADDR(EXCITE_OFFS_NAND)
-#define EXCITE_ADDR_NAND CKSEG1ADDR(EXCITE_OFFS_NAND)
-
-#define EXCITE_OFFS_BOOTROM 0x1f000000
-#define EXCITE_SIZE_BOOTROM (8 << 20)
-#define EXCITE_PHYS_BOOTROM CPHYSADDR(EXCITE_OFFS_BOOTROM)
-#define EXCITE_ADDR_BOOTROM CKSEG1ADDR(EXCITE_OFFS_BOOTROM)
-
-/* FPGA address offsets */
-#define EXCITE_FPGA_DPR 0x0104 /* dual-ported ram */
-#define EXCITE_FPGA_SYSCTL 0x0200 /* system control register block */
-
-#endif /* __EXCITE_H__ */
diff --git a/arch/mips/include/asm/mach-excite/excite_fpga.h b/arch/mips/include/asm/mach-excite/excite_fpga.h
deleted file mode 100644
index 0a1ef69bece..00000000000
--- a/arch/mips/include/asm/mach-excite/excite_fpga.h
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef EXCITE_FPGA_H_INCLUDED
-#define EXCITE_FPGA_H_INCLUDED
-
-
-/**
- * Address alignment of the individual FPGA bytes.
- * The address arrangement of the individual bytes of the FPGA is two
- * byte aligned at the embedded MK2 platform.
- */
-#ifdef EXCITE_CCI_FPGA_MK2
-typedef unsigned char excite_cci_fpga_align_t __attribute__ ((aligned(2)));
-#else
-typedef unsigned char excite_cci_fpga_align_t;
-#endif
-
-
-/**
- * Size of Dual Ported RAM.
- */
-#define EXCITE_DPR_SIZE 263
-
-
-/**
- * Size of Reserved Status Fields in Dual Ported RAM.
- */
-#define EXCITE_DPR_STATUS_SIZE 7
-
-
-
-/**
- * FPGA.
- * Hardware register layout of the FPGA interface. The FPGA must accessed
- * byte wise solely.
- * @see EXCITE_CCI_DPR_MK2
- */
-typedef struct excite_fpga {
-
- /**
- * Dual Ported RAM.
- */
- excite_cci_fpga_align_t dpr[EXCITE_DPR_SIZE];
-
- /**
- * Status.
- */
- excite_cci_fpga_align_t status[EXCITE_DPR_STATUS_SIZE];
-
-#ifdef EXCITE_CCI_FPGA_MK2
- /**
- * RM9000 Interrupt.
- * Write access initiates interrupt at the RM9000 (MIPS) processor of the eXcite.
- */
- excite_cci_fpga_align_t rm9k_int;
-#else
- /**
- * MK2 Interrupt.
- * Write access initiates interrupt at the ARM processor of the MK2.
- */
- excite_cci_fpga_align_t mk2_int;
-
- excite_cci_fpga_align_t gap[0x1000-0x10f];
-
- /**
- * IRQ Source/Acknowledge.
- */
- excite_cci_fpga_align_t rm9k_irq_src;
-
- /**
- * IRQ Mask.
- * Set bits enable the related interrupt.
- */
- excite_cci_fpga_align_t rm9k_irq_mask;
-#endif
-
-
-} excite_fpga;
-
-
-
-#endif /* ndef EXCITE_FPGA_H_INCLUDED */
diff --git a/arch/mips/include/asm/mach-excite/excite_nandflash.h b/arch/mips/include/asm/mach-excite/excite_nandflash.h
deleted file mode 100644
index c4cf6140622..00000000000
--- a/arch/mips/include/asm/mach-excite/excite_nandflash.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __EXCITE_NANDFLASH_H__
-#define __EXCITE_NANDFLASH_H__
-
-/* Resource names */
-#define EXCITE_NANDFLASH_RESOURCE_REGS "excite_nandflash_regs"
-
-#endif /* __EXCITE_NANDFLASH_H__ */
diff --git a/arch/mips/include/asm/mach-excite/rm9k_eth.h b/arch/mips/include/asm/mach-excite/rm9k_eth.h
deleted file mode 100644
index 94705a46f72..00000000000
--- a/arch/mips/include/asm/mach-excite/rm9k_eth.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#if !defined(__RM9K_ETH_H__)
-#define __RM9K_ETH_H__
-
-#define RM9K_GE_NAME "rm9k_ge"
-
-/* Resource names */
-#define RM9K_GE_RESOURCE_MAC "rm9k_ge_mac"
-#define RM9K_GE_RESOURCE_MSTAT "rm9k_ge_mstat"
-#define RM9K_GE_RESOURCE_PKTPROC "rm9k_ge_pktproc"
-#define RM9K_GE_RESOURCE_XDMA "rm9k_ge_xdma"
-#define RM9K_GE_RESOURCE_FIFO_RX "rm9k_ge_fifo_rx"
-#define RM9K_GE_RESOURCE_FIFO_TX "rm9k_ge_fifo_tx"
-#define RM9K_GE_RESOURCE_FIFOMEM_RX "rm9k_ge_fifo_memory_rx"
-#define RM9K_GE_RESOURCE_FIFOMEM_TX "rm9k_ge_fifo_memory_tx"
-#define RM9K_GE_RESOURCE_PHY "rm9k_ge_phy"
-#define RM9K_GE_RESOURCE_DMADESC_RX "rm9k_ge_dmadesc_rx"
-#define RM9K_GE_RESOURCE_DMADESC_TX "rm9k_ge_dmadesc_tx"
-#define RM9K_GE_RESOURCE_IRQ_MAIN "rm9k_ge_irq_main"
-#define RM9K_GE_RESOURCE_IRQ_PHY "rm9k_ge_irq_phy"
-#define RM9K_GE_RESOURCE_GPI_SLICE "rm9k_ge_gpi_slice"
-#define RM9K_GE_RESOURCE_MDIO_CHANNEL "rm9k_ge_mdio_channel"
-
-#endif /* !defined(__RM9K_ETH_H__) */
diff --git a/arch/mips/include/asm/mach-excite/rm9k_wdt.h b/arch/mips/include/asm/mach-excite/rm9k_wdt.h
deleted file mode 100644
index 3fa3c08d2da..00000000000
--- a/arch/mips/include/asm/mach-excite/rm9k_wdt.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef __RM9K_WDT_H__
-#define __RM9K_WDT_H__
-
-/* Device name */
-#define WDT_NAME "wdt_gpi"
-
-/* Resource names */
-#define WDT_RESOURCE_REGS "excite_watchdog_regs"
-#define WDT_RESOURCE_IRQ "excite_watchdog_irq"
-#define WDT_RESOURCE_COUNTER "excite_watchdog_counter"
-
-#endif /* __RM9K_WDT_H__ */
diff --git a/arch/mips/include/asm/mach-excite/rm9k_xicap.h b/arch/mips/include/asm/mach-excite/rm9k_xicap.h
deleted file mode 100644
index 009577734a8..00000000000
--- a/arch/mips/include/asm/mach-excite/rm9k_xicap.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __EXCITE_XICAP_H__
-#define __EXCITE_XICAP_H__
-
-
-/* Resource names */
-#define XICAP_RESOURCE_FIFO_RX "xicap_fifo_rx"
-#define XICAP_RESOURCE_FIFO_TX "xicap_fifo_tx"
-#define XICAP_RESOURCE_XDMA "xicap_xdma"
-#define XICAP_RESOURCE_DMADESC "xicap_dmadesc"
-#define XICAP_RESOURCE_PKTPROC "xicap_pktproc"
-#define XICAP_RESOURCE_IRQ "xicap_irq"
-#define XICAP_RESOURCE_GPI_SLICE "xicap_gpi_slice"
-#define XICAP_RESOURCE_FIFO_BLK "xicap_fifo_blocks"
-#define XICAP_RESOURCE_PKT_STREAM "xicap_pkt_stream"
-
-#endif /* __EXCITE_XICAP_H__ */
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
new file mode 100644
index 00000000000..021f77ca59e
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
@@ -0,0 +1,305 @@
+/*
+ * The header file of cs5536 sourth bridge.
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu <liujl@lemote.com>
+ */
+
+#ifndef _CS5536_H
+#define _CS5536_H
+
+#include <linux/types.h>
+
+extern void _rdmsr(u32 msr, u32 *hi, u32 *lo);
+extern void _wrmsr(u32 msr, u32 hi, u32 lo);
+
+/*
+ * MSR module base
+ */
+#define CS5536_SB_MSR_BASE (0x00000000)
+#define CS5536_GLIU_MSR_BASE (0x10000000)
+#define CS5536_ILLEGAL_MSR_BASE (0x20000000)
+#define CS5536_USB_MSR_BASE (0x40000000)
+#define CS5536_IDE_MSR_BASE (0x60000000)
+#define CS5536_DIVIL_MSR_BASE (0x80000000)
+#define CS5536_ACC_MSR_BASE (0xa0000000)
+#define CS5536_UNUSED_MSR_BASE (0xc0000000)
+#define CS5536_GLCP_MSR_BASE (0xe0000000)
+
+#define SB_MSR_REG(offset) (CS5536_SB_MSR_BASE | (offset))
+#define GLIU_MSR_REG(offset) (CS5536_GLIU_MSR_BASE | (offset))
+#define ILLEGAL_MSR_REG(offset) (CS5536_ILLEGAL_MSR_BASE | (offset))
+#define USB_MSR_REG(offset) (CS5536_USB_MSR_BASE | (offset))
+#define IDE_MSR_REG(offset) (CS5536_IDE_MSR_BASE | (offset))
+#define DIVIL_MSR_REG(offset) (CS5536_DIVIL_MSR_BASE | (offset))
+#define ACC_MSR_REG(offset) (CS5536_ACC_MSR_BASE | (offset))
+#define UNUSED_MSR_REG(offset) (CS5536_UNUSED_MSR_BASE | (offset))
+#define GLCP_MSR_REG(offset) (CS5536_GLCP_MSR_BASE | (offset))
+
+/*
+ * BAR SPACE OF VIRTUAL PCI :
+ * range for pci probe use, length is the actual size.
+ */
+/* IO space for all DIVIL modules */
+#define CS5536_IRQ_RANGE 0xffffffe0 /* USERD FOR PCI PROBE */
+#define CS5536_IRQ_LENGTH 0x20 /* THE REGS ACTUAL LENGTH */
+#define CS5536_SMB_RANGE 0xfffffff8
+#define CS5536_SMB_LENGTH 0x08
+#define CS5536_GPIO_RANGE 0xffffff00
+#define CS5536_GPIO_LENGTH 0x100
+#define CS5536_MFGPT_RANGE 0xffffffc0
+#define CS5536_MFGPT_LENGTH 0x40
+#define CS5536_ACPI_RANGE 0xffffffe0
+#define CS5536_ACPI_LENGTH 0x20
+#define CS5536_PMS_RANGE 0xffffff80
+#define CS5536_PMS_LENGTH 0x80
+/* IO space for IDE */
+#define CS5536_IDE_RANGE 0xfffffff0
+#define CS5536_IDE_LENGTH 0x10
+/* IO space for ACC */
+#define CS5536_ACC_RANGE 0xffffff80
+#define CS5536_ACC_LENGTH 0x80
+/* MEM space for ALL USB modules */
+#define CS5536_OHCI_RANGE 0xfffff000
+#define CS5536_OHCI_LENGTH 0x1000
+#define CS5536_EHCI_RANGE 0xfffff000
+#define CS5536_EHCI_LENGTH 0x1000
+
+/*
+ * PCI MSR ACCESS
+ */
+#define PCI_MSR_CTRL 0xF0
+#define PCI_MSR_ADDR 0xF4
+#define PCI_MSR_DATA_LO 0xF8
+#define PCI_MSR_DATA_HI 0xFC
+
+/**************** MSR *****************************/
+
+/*
+ * GLIU STANDARD MSR
+ */
+#define GLIU_CAP 0x00
+#define GLIU_CONFIG 0x01
+#define GLIU_SMI 0x02
+#define GLIU_ERROR 0x03
+#define GLIU_PM 0x04
+#define GLIU_DIAG 0x05
+
+/*
+ * GLIU SPEC. MSR
+ */
+#define GLIU_P2D_BM0 0x20
+#define GLIU_P2D_BM1 0x21
+#define GLIU_P2D_BM2 0x22
+#define GLIU_P2D_BMK0 0x23
+#define GLIU_P2D_BMK1 0x24
+#define GLIU_P2D_BM3 0x25
+#define GLIU_P2D_BM4 0x26
+#define GLIU_COH 0x80
+#define GLIU_PAE 0x81
+#define GLIU_ARB 0x82
+#define GLIU_ASMI 0x83
+#define GLIU_AERR 0x84
+#define GLIU_DEBUG 0x85
+#define GLIU_PHY_CAP 0x86
+#define GLIU_NOUT_RESP 0x87
+#define GLIU_NOUT_WDATA 0x88
+#define GLIU_WHOAMI 0x8B
+#define GLIU_SLV_DIS 0x8C
+#define GLIU_IOD_BM0 0xE0
+#define GLIU_IOD_BM1 0xE1
+#define GLIU_IOD_BM2 0xE2
+#define GLIU_IOD_BM3 0xE3
+#define GLIU_IOD_BM4 0xE4
+#define GLIU_IOD_BM5 0xE5
+#define GLIU_IOD_BM6 0xE6
+#define GLIU_IOD_BM7 0xE7
+#define GLIU_IOD_BM8 0xE8
+#define GLIU_IOD_BM9 0xE9
+#define GLIU_IOD_SC0 0xEA
+#define GLIU_IOD_SC1 0xEB
+#define GLIU_IOD_SC2 0xEC
+#define GLIU_IOD_SC3 0xED
+#define GLIU_IOD_SC4 0xEE
+#define GLIU_IOD_SC5 0xEF
+#define GLIU_IOD_SC6 0xF0
+#define GLIU_IOD_SC7 0xF1
+
+/*
+ * SB STANDARD
+ */
+#define SB_CAP 0x00
+#define SB_CONFIG 0x01
+#define SB_SMI 0x02
+#define SB_ERROR 0x03
+#define SB_MAR_ERR_EN 0x00000001
+#define SB_TAR_ERR_EN 0x00000002
+#define SB_RSVD_BIT1 0x00000004
+#define SB_EXCEP_ERR_EN 0x00000008
+#define SB_SYSE_ERR_EN 0x00000010
+#define SB_PARE_ERR_EN 0x00000020
+#define SB_TAS_ERR_EN 0x00000040
+#define SB_MAR_ERR_FLAG 0x00010000
+#define SB_TAR_ERR_FLAG 0x00020000
+#define SB_RSVD_BIT2 0x00040000
+#define SB_EXCEP_ERR_FLAG 0x00080000
+#define SB_SYSE_ERR_FLAG 0x00100000
+#define SB_PARE_ERR_FLAG 0x00200000
+#define SB_TAS_ERR_FLAG 0x00400000
+#define SB_PM 0x04
+#define SB_DIAG 0x05
+
+/*
+ * SB SPEC.
+ */
+#define SB_CTRL 0x10
+#define SB_R0 0x20
+#define SB_R1 0x21
+#define SB_R2 0x22
+#define SB_R3 0x23
+#define SB_R4 0x24
+#define SB_R5 0x25
+#define SB_R6 0x26
+#define SB_R7 0x27
+#define SB_R8 0x28
+#define SB_R9 0x29
+#define SB_R10 0x2A
+#define SB_R11 0x2B
+#define SB_R12 0x2C
+#define SB_R13 0x2D
+#define SB_R14 0x2E
+#define SB_R15 0x2F
+
+/*
+ * GLCP STANDARD
+ */
+#define GLCP_CAP 0x00
+#define GLCP_CONFIG 0x01
+#define GLCP_SMI 0x02
+#define GLCP_ERROR 0x03
+#define GLCP_PM 0x04
+#define GLCP_DIAG 0x05
+
+/*
+ * GLCP SPEC.
+ */
+#define GLCP_CLK_DIS_DELAY 0x08
+#define GLCP_PM_CLK_DISABLE 0x09
+#define GLCP_GLB_PM 0x0B
+#define GLCP_DBG_OUT 0x0C
+#define GLCP_RSVD1 0x0D
+#define GLCP_SOFT_COM 0x0E
+#define SOFT_BAR_SMB_FLAG 0x00000001
+#define SOFT_BAR_GPIO_FLAG 0x00000002
+#define SOFT_BAR_MFGPT_FLAG 0x00000004
+#define SOFT_BAR_IRQ_FLAG 0x00000008
+#define SOFT_BAR_PMS_FLAG 0x00000010
+#define SOFT_BAR_ACPI_FLAG 0x00000020
+#define SOFT_BAR_IDE_FLAG 0x00000400
+#define SOFT_BAR_ACC_FLAG 0x00000800
+#define SOFT_BAR_OHCI_FLAG 0x00001000
+#define SOFT_BAR_EHCI_FLAG 0x00002000
+#define GLCP_RSVD2 0x0F
+#define GLCP_CLK_OFF 0x10
+#define GLCP_CLK_ACTIVE 0x11
+#define GLCP_CLK_DISABLE 0x12
+#define GLCP_CLK4ACK 0x13
+#define GLCP_SYS_RST 0x14
+#define GLCP_RSVD3 0x15
+#define GLCP_DBG_CLK_CTRL 0x16
+#define GLCP_CHIP_REV_ID 0x17
+
+/* PIC */
+#define PIC_YSEL_LOW 0x20
+#define PIC_YSEL_LOW_USB_SHIFT 8
+#define PIC_YSEL_LOW_ACC_SHIFT 16
+#define PIC_YSEL_LOW_FLASH_SHIFT 24
+#define PIC_YSEL_HIGH 0x21
+#define PIC_ZSEL_LOW 0x22
+#define PIC_ZSEL_HIGH 0x23
+#define PIC_IRQM_PRIM 0x24
+#define PIC_IRQM_LPC 0x25
+#define PIC_XIRR_STS_LOW 0x26
+#define PIC_XIRR_STS_HIGH 0x27
+#define PCI_SHDW 0x34
+
+/*
+ * DIVIL STANDARD
+ */
+#define DIVIL_CAP 0x00
+#define DIVIL_CONFIG 0x01
+#define DIVIL_SMI 0x02
+#define DIVIL_ERROR 0x03
+#define DIVIL_PM 0x04
+#define DIVIL_DIAG 0x05
+
+/*
+ * DIVIL SPEC.
+ */
+#define DIVIL_LBAR_IRQ 0x08
+#define DIVIL_LBAR_KEL 0x09
+#define DIVIL_LBAR_SMB 0x0B
+#define DIVIL_LBAR_GPIO 0x0C
+#define DIVIL_LBAR_MFGPT 0x0D
+#define DIVIL_LBAR_ACPI 0x0E
+#define DIVIL_LBAR_PMS 0x0F
+#define DIVIL_LEG_IO 0x14
+#define DIVIL_BALL_OPTS 0x15
+#define DIVIL_SOFT_IRQ 0x16
+#define DIVIL_SOFT_RESET 0x17
+
+/* MFGPT */
+#define MFGPT_IRQ 0x28
+
+/*
+ * IDE STANDARD
+ */
+#define IDE_CAP 0x00
+#define IDE_CONFIG 0x01
+#define IDE_SMI 0x02
+#define IDE_ERROR 0x03
+#define IDE_PM 0x04
+#define IDE_DIAG 0x05
+
+/*
+ * IDE SPEC.
+ */
+#define IDE_IO_BAR 0x08
+#define IDE_CFG 0x10
+#define IDE_DTC 0x12
+#define IDE_CAST 0x13
+#define IDE_ETC 0x14
+#define IDE_INTERNAL_PM 0x15
+
+/*
+ * ACC STANDARD
+ */
+#define ACC_CAP 0x00
+#define ACC_CONFIG 0x01
+#define ACC_SMI 0x02
+#define ACC_ERROR 0x03
+#define ACC_PM 0x04
+#define ACC_DIAG 0x05
+
+/*
+ * USB STANDARD
+ */
+#define USB_CAP 0x00
+#define USB_CONFIG 0x01
+#define USB_SMI 0x02
+#define USB_ERROR 0x03
+#define USB_PM 0x04
+#define USB_DIAG 0x05
+
+/*
+ * USB SPEC.
+ */
+#define USB_OHCI 0x08
+#define USB_EHCI 0x09
+
+/****************** NATIVE ***************************/
+/* GPIO : I/O SPACE; REG : 32BITS */
+#define GPIOL_OUT_VAL 0x00
+#define GPIOL_OUT_EN 0x04
+
+#endif /* _CS5536_H */
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
new file mode 100644
index 00000000000..4b493d6772c
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
@@ -0,0 +1,35 @@
+/*
+ * cs5536 mfgpt header file
+ */
+
+#ifndef _CS5536_MFGPT_H
+#define _CS5536_MFGPT_H
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+#ifdef CONFIG_CS5536_MFGPT
+extern void setup_mfgpt0_timer(void);
+extern void disable_mfgpt0_counter(void);
+extern void enable_mfgpt0_counter(void);
+#else
+static inline void __maybe_unused setup_mfgpt0_timer(void)
+{
+}
+static inline void __maybe_unused disable_mfgpt0_counter(void)
+{
+}
+static inline void __maybe_unused enable_mfgpt0_counter(void)
+{
+}
+#endif
+
+#define MFGPT_TICK_RATE 14318000
+#define COMPARE ((MFGPT_TICK_RATE + HZ/2) / HZ)
+
+#define MFGPT_BASE mfgpt_base
+#define MFGPT0_CMP2 (MFGPT_BASE + 2)
+#define MFGPT0_CNT (MFGPT_BASE + 4)
+#define MFGPT0_SETUP (MFGPT_BASE + 6)
+
+#endif /*!_CS5536_MFGPT_H */
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
new file mode 100644
index 00000000000..0dca9c89ee7
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
@@ -0,0 +1,153 @@
+/*
+ * the definition file of cs5536 Virtual Support Module(VSM).
+ * pci configuration space can be accessed through the VSM, so
+ * there is no need of the MSR read/write now, except the spec.
+ * MSR registers which are not implemented yet.
+ *
+ * Copyright (C) 2007 Lemote Inc.
+ * Author : jlliu, liujl@lemote.com
+ */
+
+#ifndef _CS5536_PCI_H
+#define _CS5536_PCI_H
+
+#include <linux/types.h>
+#include <linux/pci_regs.h>
+
+extern void cs5536_pci_conf_write4(int function, int reg, u32 value);
+extern u32 cs5536_pci_conf_read4(int function, int reg);
+
+#define CS5536_ACC_INTR 9
+#define CS5536_IDE_INTR 14
+#define CS5536_USB_INTR 11
+#define CS5536_MFGPT_INTR 5
+#define CS5536_UART1_INTR 4
+#define CS5536_UART2_INTR 3
+
+/************** PCI BUS DEVICE FUNCTION ***************/
+
+/*
+ * PCI bus device function
+ */
+#define PCI_BUS_CS5536 0
+#define PCI_IDSEL_CS5536 14
+
+/********** STANDARD PCI-2.2 EXPANSION ****************/
+
+/*
+ * PCI configuration space
+ * we have to virtualize the PCI configure space head, so we should
+ * define the necessary IDs and some others.
+ */
+
+/* CONFIG of PCI VENDOR ID*/
+#define CFG_PCI_VENDOR_ID(mod_dev_id, sys_vendor_id) \
+ (((mod_dev_id) << 16) | (sys_vendor_id))
+
+/* VENDOR ID */
+#define CS5536_VENDOR_ID 0x1022
+
+/* DEVICE ID */
+#define CS5536_ISA_DEVICE_ID 0x2090
+#define CS5536_IDE_DEVICE_ID 0x209a
+#define CS5536_ACC_DEVICE_ID 0x2093
+#define CS5536_OHCI_DEVICE_ID 0x2094
+#define CS5536_EHCI_DEVICE_ID 0x2095
+
+/* CLASS CODE : CLASS SUB-CLASS INTERFACE */
+#define CS5536_ISA_CLASS_CODE 0x060100
+#define CS5536_IDE_CLASS_CODE 0x010180
+#define CS5536_ACC_CLASS_CODE 0x040100
+#define CS5536_OHCI_CLASS_CODE 0x0C0310
+#define CS5536_EHCI_CLASS_CODE 0x0C0320
+
+/* BHLC : BIST HEADER-TYPE LATENCY-TIMER CACHE-LINE-SIZE */
+
+#define CFG_PCI_CACHE_LINE_SIZE(header_type, latency_timer) \
+ ((PCI_NONE_BIST << 24) | ((header_type) << 16) \
+ | ((latency_timer) << 8) | PCI_NORMAL_CACHE_LINE_SIZE);
+
+#define PCI_NONE_BIST 0x00 /* RO not implemented yet. */
+#define PCI_BRIDGE_HEADER_TYPE 0x80 /* RO */
+#define PCI_NORMAL_HEADER_TYPE 0x00
+#define PCI_NORMAL_LATENCY_TIMER 0x00
+#define PCI_NORMAL_CACHE_LINE_SIZE 0x08 /* RW */
+
+/* BAR */
+#define PCI_BAR0_REG 0x10
+#define PCI_BAR1_REG 0x14
+#define PCI_BAR2_REG 0x18
+#define PCI_BAR3_REG 0x1c
+#define PCI_BAR4_REG 0x20
+#define PCI_BAR5_REG 0x24
+#define PCI_BAR_COUNT 6
+#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
+
+/* CARDBUS CIS POINTER */
+#define PCI_CARDBUS_CIS_POINTER 0x00000000
+
+/* SUBSYSTEM VENDOR ID */
+#define CS5536_SUB_VENDOR_ID CS5536_VENDOR_ID
+
+/* SUBSYSTEM ID */
+#define CS5536_ISA_SUB_ID CS5536_ISA_DEVICE_ID
+#define CS5536_IDE_SUB_ID CS5536_IDE_DEVICE_ID
+#define CS5536_ACC_SUB_ID CS5536_ACC_DEVICE_ID
+#define CS5536_OHCI_SUB_ID CS5536_OHCI_DEVICE_ID
+#define CS5536_EHCI_SUB_ID CS5536_EHCI_DEVICE_ID
+
+/* EXPANSION ROM BAR */
+#define PCI_EXPANSION_ROM_BAR 0x00000000
+
+/* CAPABILITIES POINTER */
+#define PCI_CAPLIST_POINTER 0x00000000
+#define PCI_CAPLIST_USB_POINTER 0x40
+/* INTERRUPT */
+
+#define CFG_PCI_INTERRUPT_LINE(pin, mod_intr) \
+ ((PCI_MAX_LATENCY << 24) | (PCI_MIN_GRANT << 16) | \
+ ((pin) << 8) | (mod_intr))
+
+#define PCI_MAX_LATENCY 0x40
+#define PCI_MIN_GRANT 0x00
+#define PCI_DEFAULT_PIN 0x01
+
+/*********** EXPANSION PCI REG ************************/
+
+/*
+ * ISA EXPANSION
+ */
+#define PCI_UART1_INT_REG 0x50
+#define PCI_UART2_INT_REG 0x54
+#define PCI_ISA_FIXUP_REG 0x58
+
+/*
+ * IDE EXPANSION
+ */
+#define PCI_IDE_CFG_REG 0x40
+#define CS5536_IDE_FLASH_SIGNATURE 0xDEADBEEF
+#define PCI_IDE_DTC_REG 0x48
+#define PCI_IDE_CAST_REG 0x4C
+#define PCI_IDE_ETC_REG 0x50
+#define PCI_IDE_PM_REG 0x54
+#define PCI_IDE_INT_REG 0x60
+
+/*
+ * ACC EXPANSION
+ */
+#define PCI_ACC_INT_REG 0x50
+
+/*
+ * OHCI EXPANSION : INTTERUPT IS IMPLEMENTED BY THE OHCI
+ */
+#define PCI_OHCI_PM_REG 0x40
+#define PCI_OHCI_INT_REG 0x50
+
+/*
+ * EHCI EXPANSION
+ */
+#define PCI_EHCI_LEGSMIEN_REG 0x50
+#define PCI_EHCI_LEGSMISTS_REG 0x54
+#define PCI_EHCI_FLADJ_REG 0x60
+
+#endif /* _CS5536_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
new file mode 100644
index 00000000000..6305bea7e18
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
@@ -0,0 +1,31 @@
+/*
+ * the read/write interfaces for Virtual Support Module(VSM)
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ */
+
+#ifndef _CS5536_VSM_H
+#define _CS5536_VSM_H
+
+#include <linux/types.h>
+
+typedef void (*cs5536_pci_vsm_write)(int reg, u32 value);
+typedef u32 (*cs5536_pci_vsm_read)(int reg);
+
+#define DECLARE_CS5536_MODULE(name) \
+extern void pci_##name##_write_reg(int reg, u32 value); \
+extern u32 pci_##name##_read_reg(int reg);
+
+/* ide module */
+DECLARE_CS5536_MODULE(ide)
+/* acc module */
+DECLARE_CS5536_MODULE(acc)
+/* ohci module */
+DECLARE_CS5536_MODULE(ohci)
+/* isa module */
+DECLARE_CS5536_MODULE(isa)
+/* ehci module */
+DECLARE_CS5536_MODULE(ehci)
+
+#endif /* _CS5536_VSM_H */
diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h
index 71a6851ba83..981c75f91a7 100644
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -28,7 +28,11 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
+#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
+ return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
+#else
return dma_addr & 0x7fffffff;
+#endif
}
static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index da70bcf2304..ee8bc837697 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Lemote, Inc. & Institute of Computing Technology
+ * Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzj@lemote.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -15,9 +15,6 @@
#include <linux/io.h>
#include <linux/init.h>
-/* there is an internal bonito64-compatiable northbridge in loongson2e/2f */
-#include <asm/mips-boards/bonito64.h>
-
/* loongson internal northbridge initialization */
extern void bonito_irq_init(void);
@@ -32,7 +29,19 @@ extern unsigned long memsize, highmemsize;
/* loongson-specific command line, env and memory initialization */
extern void __init prom_init_memory(void);
extern void __init prom_init_cmdline(void);
+extern void __init prom_init_machtype(void);
extern void __init prom_init_env(void);
+#ifdef CONFIG_LOONGSON_UART_BASE
+extern unsigned long _loongson_uart_base, loongson_uart_base;
+extern void prom_init_loongson_uart_base(void);
+#endif
+
+static inline void prom_init_uart_base(void)
+{
+#ifdef CONFIG_LOONGSON_UART_BASE
+ prom_init_loongson_uart_base();
+#endif
+}
/* irq operation functions */
extern void bonito_irqdispatch(void);
@@ -40,25 +49,276 @@ extern void __init bonito_irq_init(void);
extern void __init set_irq_trigger_mode(void);
extern void __init mach_init_irq(void);
extern void mach_irq_dispatch(unsigned int pending);
+extern int mach_i8259_irq(void);
+
+/* We need this in some places... */
+#define delay() ({ \
+ int x; \
+ for (x = 0; x < 100000; x++) \
+ __asm__ __volatile__(""); \
+})
+
+#define LOONGSON_REG(x) \
+ (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
+
+#define LOONGSON_IRQ_BASE 32
+#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
+
+#define LOONGSON_FLASH_BASE 0x1c000000
+#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
+#define LOONGSON_FLASH_TOP (LOONGSON_FLASH_BASE+LOONGSON_FLASH_SIZE-1)
+
+#define LOONGSON_LIO0_BASE 0x1e000000
+#define LOONGSON_LIO0_SIZE 0x01C00000 /* 28M */
+#define LOONGSON_LIO0_TOP (LOONGSON_LIO0_BASE+LOONGSON_LIO0_SIZE-1)
+
+#define LOONGSON_BOOT_BASE 0x1fc00000
+#define LOONGSON_BOOT_SIZE 0x00100000 /* 1M */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+
+#define LOONGSON_LIO1_BASE 0x1ff00000
+#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO1_TOP (LOONGSON_LIO1_BASE+LOONGSON_LIO1_SIZE-1)
+
+#define LOONGSON_PCILO0_BASE 0x10000000
+#define LOONGSON_PCILO1_BASE 0x14000000
+#define LOONGSON_PCILO2_BASE 0x18000000
+#define LOONGSON_PCILO_BASE LOONGSON_PCILO0_BASE
+#define LOONGSON_PCILO_SIZE 0x0c000000 /* 64M * 3 */
+#define LOONGSON_PCILO_TOP (LOONGSON_PCILO0_BASE+LOONGSON_PCILO_SIZE-1)
+
+#define LOONGSON_PCICFG_BASE 0x1fe80000
+#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
+#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
+#define LOONGSON_PCIIO_BASE 0x1fd00000
+#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
+#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
+
+/* Loongson Register Bases */
+
+#define LOONGSON_PCICONFIGBASE 0x00
+#define LOONGSON_REGBASE 0x100
/* PCI Configuration Registers */
-#define LOONGSON_PCI_ISR4C BONITO_PCI_REG(0x4c)
+
+#define LOONGSON_PCI_REG(x) LOONGSON_REG(LOONGSON_PCICONFIGBASE + (x))
+#define LOONGSON_PCIDID LOONGSON_PCI_REG(0x00)
+#define LOONGSON_PCICMD LOONGSON_PCI_REG(0x04)
+#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
+#define LOONGSON_PCILTIMER LOONGSON_PCI_REG(0x0c)
+#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
+#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
+#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
+#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
+#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
+#define LOONGSON_PCIEXPRBASE LOONGSON_PCI_REG(0x30)
+#define LOONGSON_PCIINT LOONGSON_PCI_REG(0x3c)
+
+#define LOONGSON_PCI_ISR4C LOONGSON_PCI_REG(0x4c)
+
+#define LOONGSON_PCICMD_PERR_CLR 0x80000000
+#define LOONGSON_PCICMD_SERR_CLR 0x40000000
+#define LOONGSON_PCICMD_MABORT_CLR 0x20000000
+#define LOONGSON_PCICMD_MTABORT_CLR 0x10000000
+#define LOONGSON_PCICMD_TABORT_CLR 0x08000000
+#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
+#define LOONGSON_PCICMD_PERRRESPEN 0x00000040
+#define LOONGSON_PCICMD_ASTEPEN 0x00000080
+#define LOONGSON_PCICMD_SERREN 0x00000100
+#define LOONGSON_PCILTIMER_BUSLATENCY 0x0000ff00
+#define LOONGSON_PCILTIMER_BUSLATENCY_SHIFT 8
+
+/* Loongson h/w Configuration */
+
+#define LOONGSON_GENCFG_OFFSET 0x4
+#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
+
+#define LOONGSON_GENCFG_DEBUGMODE 0x00000001
+#define LOONGSON_GENCFG_SNOOPEN 0x00000002
+#define LOONGSON_GENCFG_CPUSELFRESET 0x00000004
+
+#define LOONGSON_GENCFG_FORCE_IRQA 0x00000008
+#define LOONGSON_GENCFG_IRQA_ISOUT 0x00000010
+#define LOONGSON_GENCFG_IRQA_FROM_INT1 0x00000020
+#define LOONGSON_GENCFG_BYTESWAP 0x00000040
+
+#define LOONGSON_GENCFG_UNCACHED 0x00000080
+#define LOONGSON_GENCFG_PREFETCHEN 0x00000100
+#define LOONGSON_GENCFG_WBEHINDEN 0x00000200
+#define LOONGSON_GENCFG_CACHEALG 0x00000c00
+#define LOONGSON_GENCFG_CACHEALG_SHIFT 10
+#define LOONGSON_GENCFG_PCIQUEUE 0x00001000
+#define LOONGSON_GENCFG_CACHESTOP 0x00002000
+#define LOONGSON_GENCFG_MSTRBYTESWAP 0x00004000
+#define LOONGSON_GENCFG_BUSERREN 0x00008000
+#define LOONGSON_GENCFG_NORETRYTIMEOUT 0x00010000
+#define LOONGSON_GENCFG_SHORTCOPYTIMEOUT 0x00020000
+
+/* PCI address map control */
+
+#define LOONGSON_PCIMAP LOONGSON_REG(LOONGSON_REGBASE + 0x10)
+#define LOONGSON_PCIMEMBASECFG LOONGSON_REG(LOONGSON_REGBASE + 0x14)
+#define LOONGSON_PCIMAP_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x18)
+
+/* GPIO Regs - r/w */
+
+#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
+#define LOONGSON_GPIOIE LOONGSON_REG(LOONGSON_REGBASE + 0x20)
+
+/* ICU Configuration Regs - r/w */
+
+#define LOONGSON_INTEDGE LOONGSON_REG(LOONGSON_REGBASE + 0x24)
+#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
+#define LOONGSON_INTPOL LOONGSON_REG(LOONGSON_REGBASE + 0x2c)
+
+/* ICU Enable Regs - IntEn & IntISR are r/o. */
+
+#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
+#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
+#define LOONGSON_INTEN LOONGSON_REG(LOONGSON_REGBASE + 0x38)
+#define LOONGSON_INTISR LOONGSON_REG(LOONGSON_REGBASE + 0x3c)
+
+/* ICU */
+#define LOONGSON_ICU_MBOXES 0x0000000f
+#define LOONGSON_ICU_MBOXES_SHIFT 0
+#define LOONGSON_ICU_DMARDY 0x00000010
+#define LOONGSON_ICU_DMAEMPTY 0x00000020
+#define LOONGSON_ICU_COPYRDY 0x00000040
+#define LOONGSON_ICU_COPYEMPTY 0x00000080
+#define LOONGSON_ICU_COPYERR 0x00000100
+#define LOONGSON_ICU_PCIIRQ 0x00000200
+#define LOONGSON_ICU_MASTERERR 0x00000400
+#define LOONGSON_ICU_SYSTEMERR 0x00000800
+#define LOONGSON_ICU_DRAMPERR 0x00001000
+#define LOONGSON_ICU_RETRYERR 0x00002000
+#define LOONGSON_ICU_GPIOS 0x01ff0000
+#define LOONGSON_ICU_GPIOS_SHIFT 16
+#define LOONGSON_ICU_GPINS 0x7e000000
+#define LOONGSON_ICU_GPINS_SHIFT 25
+#define LOONGSON_ICU_MBOX(N) (1<<(LOONGSON_ICU_MBOXES_SHIFT+(N)))
+#define LOONGSON_ICU_GPIO(N) (1<<(LOONGSON_ICU_GPIOS_SHIFT+(N)))
+#define LOONGSON_ICU_GPIN(N) (1<<(LOONGSON_ICU_GPINS_SHIFT+(N)))
+
+/* PCI prefetch window base & mask */
+
+#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
+#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
+#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
+#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
/* PCI_Hit*_Sel_* */
-#define LOONGSON_PCI_HIT0_SEL_L BONITO(BONITO_REGBASE + 0x50)
-#define LOONGSON_PCI_HIT0_SEL_H BONITO(BONITO_REGBASE + 0x54)
-#define LOONGSON_PCI_HIT1_SEL_L BONITO(BONITO_REGBASE + 0x58)
-#define LOONGSON_PCI_HIT1_SEL_H BONITO(BONITO_REGBASE + 0x5c)
-#define LOONGSON_PCI_HIT2_SEL_L BONITO(BONITO_REGBASE + 0x60)
-#define LOONGSON_PCI_HIT2_SEL_H BONITO(BONITO_REGBASE + 0x64)
+#define LOONGSON_PCI_HIT0_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x50)
+#define LOONGSON_PCI_HIT0_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x54)
+#define LOONGSON_PCI_HIT1_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x58)
+#define LOONGSON_PCI_HIT1_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x5c)
+#define LOONGSON_PCI_HIT2_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x60)
+#define LOONGSON_PCI_HIT2_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x64)
/* PXArb Config & Status */
-#define LOONGSON_PXARB_CFG BONITO(BONITO_REGBASE + 0x68)
-#define LOONGSON_PXARB_STATUS BONITO(BONITO_REGBASE + 0x6c)
+#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68)
+#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
+
+/* pcimap */
+
+#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f
+#define LOONGSON_PCIMAP_PCIMAP_LO0_SHIFT 0
+#define LOONGSON_PCIMAP_PCIMAP_LO1 0x00000fc0
+#define LOONGSON_PCIMAP_PCIMAP_LO1_SHIFT 6
+#define LOONGSON_PCIMAP_PCIMAP_LO2 0x0003f000
+#define LOONGSON_PCIMAP_PCIMAP_LO2_SHIFT 12
+#define LOONGSON_PCIMAP_PCIMAP_2 0x00040000
+#define LOONGSON_PCIMAP_WIN(WIN, ADDR) \
+ ((((ADDR)>>26) & LOONGSON_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
+
+#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
+#include <linux/cpufreq.h>
+extern void loongson2_cpu_wait(void);
+extern struct cpufreq_frequency_table loongson2_clockmod_table[];
+
+/* Chip Config */
+#define LOONGSON_CHIPCFG0 LOONGSON_REG(LOONGSON_REGBASE + 0x80)
+#endif
+
+/*
+ * address windows configuration module
+ *
+ * loongson2e do not have this module
+ */
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+
+/* address window config module base address */
+#define LOONGSON_ADDRWINCFG_BASE 0x3ff00000ul
+#define LOONGSON_ADDRWINCFG_SIZE 0x180
+
+extern unsigned long _loongson_addrwincfg_base;
+#define LOONGSON_ADDRWINCFG(offset) \
+ (*(volatile u64 *)(_loongson_addrwincfg_base + (offset)))
+
+#define CPU_WIN0_BASE LOONGSON_ADDRWINCFG(0x00)
+#define CPU_WIN1_BASE LOONGSON_ADDRWINCFG(0x08)
+#define CPU_WIN2_BASE LOONGSON_ADDRWINCFG(0x10)
+#define CPU_WIN3_BASE LOONGSON_ADDRWINCFG(0x18)
+
+#define CPU_WIN0_MASK LOONGSON_ADDRWINCFG(0x20)
+#define CPU_WIN1_MASK LOONGSON_ADDRWINCFG(0x28)
+#define CPU_WIN2_MASK LOONGSON_ADDRWINCFG(0x30)
+#define CPU_WIN3_MASK LOONGSON_ADDRWINCFG(0x38)
+
+#define CPU_WIN0_MMAP LOONGSON_ADDRWINCFG(0x40)
+#define CPU_WIN1_MMAP LOONGSON_ADDRWINCFG(0x48)
+#define CPU_WIN2_MMAP LOONGSON_ADDRWINCFG(0x50)
+#define CPU_WIN3_MMAP LOONGSON_ADDRWINCFG(0x58)
+
+#define PCIDMA_WIN0_BASE LOONGSON_ADDRWINCFG(0x60)
+#define PCIDMA_WIN1_BASE LOONGSON_ADDRWINCFG(0x68)
+#define PCIDMA_WIN2_BASE LOONGSON_ADDRWINCFG(0x70)
+#define PCIDMA_WIN3_BASE LOONGSON_ADDRWINCFG(0x78)
+
+#define PCIDMA_WIN0_MASK LOONGSON_ADDRWINCFG(0x80)
+#define PCIDMA_WIN1_MASK LOONGSON_ADDRWINCFG(0x88)
+#define PCIDMA_WIN2_MASK LOONGSON_ADDRWINCFG(0x90)
+#define PCIDMA_WIN3_MASK LOONGSON_ADDRWINCFG(0x98)
+
+#define PCIDMA_WIN0_MMAP LOONGSON_ADDRWINCFG(0xa0)
+#define PCIDMA_WIN1_MMAP LOONGSON_ADDRWINCFG(0xa8)
+#define PCIDMA_WIN2_MMAP LOONGSON_ADDRWINCFG(0xb0)
+#define PCIDMA_WIN3_MMAP LOONGSON_ADDRWINCFG(0xb8)
+
+#define ADDRWIN_WIN0 0
+#define ADDRWIN_WIN1 1
+#define ADDRWIN_WIN2 2
+#define ADDRWIN_WIN3 3
+
+#define ADDRWIN_MAP_DST_DDR 0
+#define ADDRWIN_MAP_DST_PCI 1
+#define ADDRWIN_MAP_DST_LIO 1
+
+/*
+ * s: CPU, PCIDMA
+ * d: DDR, PCI, LIO
+ * win: 0, 1, 2, 3
+ * src: map source
+ * dst: map destination
+ * size: ~mask + 1
+ */
+#define LOONGSON_ADDRWIN_CFG(s, d, w, src, dst, size) do {\
+ s##_WIN##w##_BASE = (src); \
+ s##_WIN##w##_MMAP = (src) | ADDRWIN_MAP_DST_##d; \
+ s##_WIN##w##_MASK = ~(size-1); \
+} while (0)
+
+#define LOONGSON_ADDRWIN_CPUTOPCI(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(CPU, PCI, win, src, dst, size)
+#define LOONGSON_ADDRWIN_CPUTODDR(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(CPU, DDR, win, src, dst, size)
+#define LOONGSON_ADDRWIN_PCITODDR(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(PCIDMA, DDR, win, src, dst, size)
-/* loongson2-specific perf counter IRQ */
-#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6)
+#endif /* ! CONFIG_CPU_SUPPORTS_ADDRWINCFG */
#endif /* __ASM_MACH_LOONGSON_LOONGSON_H */
diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h
index 206ea206791..acf8359cb13 100644
--- a/arch/mips/include/asm/mach-loongson/machine.h
+++ b/arch/mips/include/asm/mach-loongson/machine.h
@@ -13,10 +13,15 @@
#ifdef CONFIG_LEMOTE_FULOONG2E
-#define LOONGSON_UART_BASE (BONITO_PCIIO_BASE + 0x3f8)
-
#define LOONGSON_MACHTYPE MACH_LEMOTE_FL2E
#endif
+/* use fuloong2f as the default machine of LEMOTE_MACH2F */
+#ifdef CONFIG_LEMOTE_MACH2F
+
+#define LOONGSON_MACHTYPE MACH_LEMOTE_FL2F
+
+#endif
+
#endif /* __ASM_MACH_LOONGSON_MACHINE_H */
diff --git a/arch/mips/include/asm/mach-loongson/mem.h b/arch/mips/include/asm/mach-loongson/mem.h
index bd7b3cba7e3..e9960f341b9 100644
--- a/arch/mips/include/asm/mach-loongson/mem.h
+++ b/arch/mips/include/asm/mach-loongson/mem.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Lemote, Inc. & Institute of Computing Technology
+ * Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzj@lemote.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -12,19 +12,30 @@
#define __ASM_MACH_LOONGSON_MEM_H
/*
- * On Lemote Loongson 2e
+ * high memory space
*
- * the high memory space starts from 512M.
- * the peripheral registers reside between 0x1000:0000 and 0x2000:0000.
+ * in loongson2e, starts from 512M
+ * in loongson2f, starts from 2G 256M
*/
+#ifdef CONFIG_CPU_LOONGSON2E
+#define LOONGSON_HIGHMEM_START 0x20000000
+#else
+#define LOONGSON_HIGHMEM_START 0x90000000
+#endif
-#ifdef CONFIG_LEMOTE_FULOONG2E
-
-#define LOONGSON_HIGHMEM_START 0x20000000
+/*
+ * the peripheral registers(MMIO):
+ *
+ * On the Lemote Loongson 2e system, reside between 0x1000:0000 and 0x2000:0000.
+ * On the Lemote Loongson 2f system, reside between 0x1000:0000 and 0x8000:0000.
+ */
#define LOONGSON_MMIO_MEM_START 0x10000000
-#define LOONGSON_MMIO_MEM_END 0x20000000
+#ifdef CONFIG_CPU_LOONGSON2E
+#define LOONGSON_MMIO_MEM_END 0x20000000
+#else
+#define LOONGSON_MMIO_MEM_END 0x80000000
#endif
#endif /* __ASM_MACH_LOONGSON_MEM_H */
diff --git a/arch/mips/include/asm/mach-loongson/pci.h b/arch/mips/include/asm/mach-loongson/pci.h
index f1663ca81da..a199a4f6de4 100644
--- a/arch/mips/include/asm/mach-loongson/pci.h
+++ b/arch/mips/include/asm/mach-loongson/pci.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2008 Zhang Le <r0bertz@gentoo.org>
+ * Copyright (c) 2009 Wu Zhangjin <wuzj@lemote.com>
*
* This program is free software; you can redistribute it
* and/or modify it under the terms of the GNU General
@@ -22,16 +23,39 @@
#ifndef __ASM_MACH_LOONGSON_PCI_H_
#define __ASM_MACH_LOONGSON_PCI_H_
-extern struct pci_ops bonito64_pci_ops;
+extern struct pci_ops loongson_pci_ops;
-#ifdef CONFIG_LEMOTE_FULOONG2E
+/* this is an offset from mips_io_port_base */
+#define LOONGSON_PCI_IO_START 0x00004000UL
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+
+/*
+ * we use address window2 to map cpu address space to pci space
+ * window2: cpu [1G, 2G] -> pci [1G, 2G]
+ * why not use window 0 & 1? because they are used by cpu when booting.
+ * window0: cpu [0, 256M] -> ddr [0, 256M]
+ * window1: cpu [256M, 512M] -> pci [256M, 512M]
+ */
+
+/* the smallest LOONGSON_CPU_MEM_SRC can be 512M */
+#define LOONGSON_CPU_MEM_SRC 0x40000000ul /* 1G */
+#define LOONGSON_PCI_MEM_DST LOONGSON_CPU_MEM_SRC
+
+#define LOONGSON_PCI_MEM_START LOONGSON_PCI_MEM_DST
+#define LOONGSON_PCI_MEM_END (0x80000000ul-1) /* 2G */
+
+#define MMAP_CPUTOPCI_SIZE (LOONGSON_PCI_MEM_END - \
+ LOONGSON_PCI_MEM_START + 1)
+
+#else /* loongson2f/32bit & loongson2e */
/* this pci memory space is mapped by pcimap in pci.c */
-#define LOONGSON_PCI_MEM_START BONITO_PCILO1_BASE
-#define LOONGSON_PCI_MEM_END (BONITO_PCILO1_BASE + 0x04000000 * 2)
+#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
+#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
/* this is an offset from mips_io_port_base */
#define LOONGSON_PCI_IO_START 0x00004000UL
-#endif
+#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
#endif /* !__ASM_MACH_LOONGSON_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-powertv/asic.h b/arch/mips/include/asm/mach-powertv/asic.h
new file mode 100644
index 00000000000..bcad43a93eb
--- /dev/null
+++ b/arch/mips/include/asm/mach-powertv/asic.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ASM_MACH_POWERTV_ASIC_H
+#define _ASM_MACH_POWERTV_ASIC_H
+
+#include <linux/ioport.h>
+#include <asm/mach-powertv/asic_regs.h>
+
+#define DVR_CAPABLE (1<<0)
+#define PCIE_CAPABLE (1<<1)
+#define FFS_CAPABLE (1<<2)
+#define DISPLAY_CAPABLE (1<<3)
+
+/* Platform Family types
+ * For compitability, the new value must be added in the end */
+enum family_type {
+ FAMILY_8500,
+ FAMILY_8500RNG,
+ FAMILY_4500,
+ FAMILY_1500,
+ FAMILY_8600,
+ FAMILY_4600,
+ FAMILY_4600VZA,
+ FAMILY_8600VZB,
+ FAMILY_1500VZE,
+ FAMILY_1500VZF,
+ FAMILIES
+};
+
+/* Register maps for each ASIC */
+extern const struct register_map calliope_register_map;
+extern const struct register_map cronus_register_map;
+extern const struct register_map zeus_register_map;
+
+extern struct resource dvr_cronus_resources[];
+extern struct resource dvr_zeus_resources[];
+extern struct resource non_dvr_calliope_resources[];
+extern struct resource non_dvr_cronus_resources[];
+extern struct resource non_dvr_cronuslite_resources[];
+extern struct resource non_dvr_vz_calliope_resources[];
+extern struct resource non_dvr_vze_calliope_resources[];
+extern struct resource non_dvr_vzf_calliope_resources[];
+extern struct resource non_dvr_zeus_resources[];
+
+extern void powertv_platform_init(void);
+extern void platform_alloc_bootmem(void);
+extern enum asic_type platform_get_asic(void);
+extern enum family_type platform_get_family(void);
+extern int platform_supports_dvr(void);
+extern int platform_supports_ffs(void);
+extern int platform_supports_pcie(void);
+extern int platform_supports_display(void);
+extern void configure_platform(void);
+extern void platform_configure_usb_ehci(void);
+extern void platform_unconfigure_usb_ehci(void);
+extern void platform_configure_usb_ohci(void);
+extern void platform_unconfigure_usb_ohci(void);
+
+/* Platform Resources */
+#define ASIC_RESOURCE_GET_EXISTS 1
+extern struct resource *asic_resource_get(const char *name);
+extern void platform_release_memory(void *baddr, int size);
+
+/* Reboot Cause */
+extern void set_reboot_cause(char code, unsigned int data, unsigned int data2);
+extern void set_locked_reboot_cause(char code, unsigned int data,
+ unsigned int data2);
+
+enum sys_reboot_type {
+ sys_unknown_reboot = 0x00, /* Unknown reboot cause */
+ sys_davic_change = 0x01, /* Reboot due to change in DAVIC
+ * mode */
+ sys_user_reboot = 0x02, /* Reboot initiated by user */
+ sys_system_reboot = 0x03, /* Reboot initiated by OS */
+ sys_trap_reboot = 0x04, /* Reboot due to a CPU trap */
+ sys_silent_reboot = 0x05, /* Silent reboot */
+ sys_boot_ldr_reboot = 0x06, /* Bootloader reboot */
+ sys_power_up_reboot = 0x07, /* Power on bootup. Older
+ * drivers may report as
+ * userReboot. */
+ sys_code_change = 0x08, /* Reboot to take code change.
+ * Older drivers may report as
+ * userReboot. */
+ sys_hardware_reset = 0x09, /* HW watchdog or front-panel
+ * reset button reset. Older
+ * drivers may report as
+ * userReboot. */
+ sys_watchdogInterrupt = 0x0A /* Pre-watchdog interrupt */
+};
+
+#endif /* _ASM_MACH_POWERTV_ASIC_H */
diff --git a/arch/mips/include/asm/mach-powertv/asic_regs.h b/arch/mips/include/asm/mach-powertv/asic_regs.h
new file mode 100644
index 00000000000..9a65c93782f
--- /dev/null
+++ b/arch/mips/include/asm/mach-powertv/asic_regs.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __ASM_MACH_POWERTV_ASIC_H_
+#define __ASM_MACH_POWERTV_ASIC_H_
+#include <linux/io.h>
+
+/* ASIC types */
+enum asic_type {
+ ASIC_UNKNOWN,
+ ASIC_ZEUS,
+ ASIC_CALLIOPE,
+ ASIC_CRONUS,
+ ASIC_CRONUSLITE,
+ ASICS
+};
+
+/* hardcoded values read from Chip Version registers */
+#define CRONUS_10 0x0B4C1C20
+#define CRONUS_11 0x0B4C1C21
+#define CRONUSLITE_10 0x0B4C1C40
+
+#define NAND_FLASH_BASE 0x03000000
+#define ZEUS_IO_BASE 0x09000000
+#define CALLIOPE_IO_BASE 0x08000000
+#define CRONUS_IO_BASE 0x09000000
+#define ASIC_IO_SIZE 0x01000000
+
+/* Definitions for backward compatibility */
+#define UART1_INTSTAT uart1_intstat
+#define UART1_INTEN uart1_inten
+#define UART1_CONFIG1 uart1_config1
+#define UART1_CONFIG2 uart1_config2
+#define UART1_DIVISORHI uart1_divisorhi
+#define UART1_DIVISORLO uart1_divisorlo
+#define UART1_DATA uart1_data
+#define UART1_STATUS uart1_status
+
+/* ASIC register enumeration */
+struct register_map {
+ u32 eic_slow0_strt_add;
+ u32 eic_cfg_bits;
+ u32 eic_ready_status;
+
+ u32 chipver3;
+ u32 chipver2;
+ u32 chipver1;
+ u32 chipver0;
+
+ u32 uart1_intstat;
+ u32 uart1_inten;
+ u32 uart1_config1;
+ u32 uart1_config2;
+ u32 uart1_divisorhi;
+ u32 uart1_divisorlo;
+ u32 uart1_data;
+ u32 uart1_status;
+
+ u32 int_stat_3;
+ u32 int_stat_2;
+ u32 int_stat_1;
+ u32 int_stat_0;
+ u32 int_config;
+ u32 int_int_scan;
+ u32 ien_int_3;
+ u32 ien_int_2;
+ u32 ien_int_1;
+ u32 ien_int_0;
+ u32 int_level_3_3;
+ u32 int_level_3_2;
+ u32 int_level_3_1;
+ u32 int_level_3_0;
+ u32 int_level_2_3;
+ u32 int_level_2_2;
+ u32 int_level_2_1;
+ u32 int_level_2_0;
+ u32 int_level_1_3;
+ u32 int_level_1_2;
+ u32 int_level_1_1;
+ u32 int_level_1_0;
+ u32 int_level_0_3;
+ u32 int_level_0_2;
+ u32 int_level_0_1;
+ u32 int_level_0_0;
+ u32 int_docsis_en;
+
+ u32 mips_pll_setup;
+ u32 usb_fs;
+ u32 test_bus;
+ u32 crt_spare;
+ u32 usb2_ohci_int_mask;
+ u32 usb2_strap;
+ u32 ehci_hcapbase;
+ u32 ohci_hc_revision;
+ u32 bcm1_bs_lmi_steer;
+ u32 usb2_control;
+ u32 usb2_stbus_obc;
+ u32 usb2_stbus_mess_size;
+ u32 usb2_stbus_chunk_size;
+
+ u32 pcie_regs;
+ u32 tim_ch;
+ u32 tim_cl;
+ u32 gpio_dout;
+ u32 gpio_din;
+ u32 gpio_dir;
+ u32 watchdog;
+ u32 front_panel;
+
+ u32 register_maps;
+};
+
+extern enum asic_type asic;
+extern const struct register_map *register_map;
+extern unsigned long asic_phy_base; /* Physical address of ASIC */
+extern unsigned long asic_base; /* Virtual address of ASIC */
+
+/*
+ * Macros to interface to registers through their ioremapped address
+ * asic_reg_offset Returns the offset of a given register from the start
+ * of the ASIC address space
+ * asic_reg_phys_addr Returns the physical address of the given register
+ * asic_reg_addr Returns the iomapped virtual address of the given
+ * register.
+ */
+#define asic_reg_offset(x) (register_map->x)
+#define asic_reg_phys_addr(x) (asic_phy_base + asic_reg_offset(x))
+#define asic_reg_addr(x) \
+ ((unsigned int *) (asic_base + asic_reg_offset(x)))
+
+/*
+ * The asic_reg macro is gone. It should be replaced by either asic_read or
+ * asic_write, as appropriate.
+ */
+
+#define asic_read(x) readl(asic_reg_addr(x))
+#define asic_write(v, x) writel(v, asic_reg_addr(x))
+
+extern void asic_irq_init(void);
+#endif
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
new file mode 100644
index 00000000000..5b8d5ebeb83
--- /dev/null
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -0,0 +1,119 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Version from mach-generic modified to support PowerTV port
+ * Portions Copyright (C) 2009 Cisco Systems, Inc.
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ *
+ */
+
+#ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H
+#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
+
+#include <linux/sched.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <asm/mach-powertv/asic.h>
+
+static inline bool is_kseg2(void *addr)
+{
+ return (unsigned long)addr >= KSEG2;
+}
+
+static inline unsigned long virt_to_phys_from_pte(void *addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ unsigned long virt_addr = (unsigned long)addr;
+ unsigned long phys_addr = 0UL;
+
+ /* get the page global directory. */
+ pgd = pgd_offset_k(virt_addr);
+
+ if (!pgd_none(*pgd)) {
+ /* get the page upper directory */
+ pud = pud_offset(pgd, virt_addr);
+ if (!pud_none(*pud)) {
+ /* get the page middle directory */
+ pmd = pmd_offset(pud, virt_addr);
+ if (!pmd_none(*pmd)) {
+ /* get a pointer to the page table entry */
+ ptep = pte_offset(pmd, virt_addr);
+ pte = *ptep;
+ /* check for a valid page */
+ if (pte_present(pte)) {
+ /* get the physical address the page is
+ * refering to */
+ phys_addr = (unsigned long)
+ page_to_phys(pte_page(pte));
+ /* add the offset within the page */
+ phys_addr |= (virt_addr & ~PAGE_MASK);
+ }
+ }
+ }
+ }
+
+ return phys_addr;
+}
+
+static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
+ size_t size)
+{
+ if (is_kseg2(addr))
+ return phys_to_bus(virt_to_phys_from_pte(addr));
+ else
+ return phys_to_bus(virt_to_phys(addr));
+}
+
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
+{
+ return phys_to_bus(page_to_phys(page));
+}
+
+static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return bus_to_phys(dma_addr);
+}
+
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction)
+{
+}
+
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int plat_device_is_coherent(struct device *dev)
+{
+ return 0;
+}
+
+#endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-powertv/interrupts.h b/arch/mips/include/asm/mach-powertv/interrupts.h
new file mode 100644
index 00000000000..629a5741365
--- /dev/null
+++ b/arch/mips/include/asm/mach-powertv/interrupts.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
+#define _ASM_MACH_POWERTV_INTERRUPTS_H_
+
+/*
+ * Defines for all of the interrupt lines
+ */
+
+/* Definitions for backward compatibility */
+#define kIrq_Uart1 irq_uart1
+
+#define ibase 0
+
+/*------------- Register: int_stat_3 */
+/* 126 unused (bit 31) */
+#define irq_asc2video (ibase+126) /* ASC 2 Video Interrupt */
+#define irq_asc1video (ibase+125) /* ASC 1 Video Interrupt */
+#define irq_comms_block_wd (ibase+124) /* ASC 1 Video Interrupt */
+#define irq_fdma_mailbox (ibase+123) /* FDMA Mailbox Output */
+#define irq_fdma_gp (ibase+122) /* FDMA GP Output */
+#define irq_mips_pic (ibase+121) /* MIPS Performance Counter
+ * Interrupt */
+#define irq_mips_timer (ibase+120) /* MIPS Timer Interrupt */
+#define irq_memory_protect (ibase+119) /* Memory Protection Interrupt
+ * -- Ored by glue logic inside
+ * SPARC ILC (see
+ * INT_MEM_PROT_STAT, below,
+ * for individual interrupts)
+ */
+/* 118 unused (bit 22) */
+#define irq_sbag (ibase+117) /* SBAG Interrupt -- Ored by
+ * glue logic inside SPARC ILC
+ * (see INT_SBAG_STAT, below,
+ * for individual interrupts) */
+#define irq_qam_b_fec (ibase+116) /* QAM B FEC Interrupt */
+#define irq_qam_a_fec (ibase+115) /* QAM A FEC Interrupt */
+/* 114 unused (bit 18) */
+#define irq_mailbox (ibase+113) /* Mailbox Debug Interrupt --
+ * Ored by glue logic inside
+ * SPARC ILC (see
+ * INT_MAILBOX_STAT, below, for
+ * individual interrupts) */
+#define irq_fuse_stat1 (ibase+112) /* Fuse Status 1 */
+#define irq_fuse_stat2 (ibase+111) /* Fuse Status 2 */
+#define irq_fuse_stat3 (ibase+110) /* Blitter Interrupt / Fuse
+ * Status 3 */
+#define irq_blitter (ibase+110) /* Blitter Interrupt / Fuse
+ * Status 3 */
+#define irq_avc1_pp0 (ibase+109) /* AVC Decoder #1 PP0
+ * Interrupt */
+#define irq_avc1_pp1 (ibase+108) /* AVC Decoder #1 PP1
+ * Interrupt */
+#define irq_avc1_mbe (ibase+107) /* AVC Decoder #1 MBE
+ * Interrupt */
+#define irq_avc2_pp0 (ibase+106) /* AVC Decoder #2 PP0
+ * Interrupt */
+#define irq_avc2_pp1 (ibase+105) /* AVC Decoder #2 PP1
+ * Interrupt */
+#define irq_avc2_mbe (ibase+104) /* AVC Decoder #2 MBE
+ * Interrupt */
+#define irq_zbug_spi (ibase+103) /* Zbug SPI Slave Interrupt */
+#define irq_qam_mod2 (ibase+102) /* QAM Modulator 2 DMA
+ * Interrupt */
+#define irq_ir_rx (ibase+101) /* IR RX 2 Interrupt */
+#define irq_aud_dsp2 (ibase+100) /* Audio DSP #2 Interrupt */
+#define irq_aud_dsp1 (ibase+99) /* Audio DSP #1 Interrupt */
+#define irq_docsis (ibase+98) /* DOCSIS Debug Interrupt */
+#define irq_sd_dvp1 (ibase+97) /* SD DVP #1 Interrupt */
+#define irq_sd_dvp2 (ibase+96) /* SD DVP #2 Interrupt */
+/*------------- Register: int_stat_2 */
+#define irq_hd_dvp (ibase+95) /* HD DVP Interrupt */
+#define kIrq_Prewatchdog (ibase+94) /* watchdog Pre-Interrupt */
+#define irq_timer2 (ibase+93) /* Programmable Timer
+ * Interrupt 2 */
+#define irq_1394 (ibase+92) /* 1394 Firewire Interrupt */
+#define irq_usbohci (ibase+91) /* USB 2.0 OHCI Interrupt */
+#define irq_usbehci (ibase+90) /* USB 2.0 EHCI Interrupt */
+#define irq_pciexp (ibase+89) /* PCI Express 0 Interrupt */
+#define irq_pciexp0 (ibase+89) /* PCI Express 0 Interrupt */
+#define irq_afe1 (ibase+88) /* AFE 1 Interrupt */
+#define irq_sata (ibase+87) /* SATA 1 Interrupt */
+#define irq_sata1 (ibase+87) /* SATA 1 Interrupt */
+#define irq_dtcp (ibase+86) /* DTCP Interrupt */
+#define irq_pciexp1 (ibase+85) /* PCI Express 1 Interrupt */
+/* 84 unused (bit 20) */
+/* 83 unused (bit 19) */
+/* 82 unused (bit 18) */
+#define irq_sata2 (ibase+81) /* SATA2 Interrupt */
+#define irq_uart2 (ibase+80) /* UART2 Interrupt */
+#define irq_legacy_usb (ibase+79) /* Legacy USB Host ISR (1.1
+ * Host module) */
+#define irq_pod (ibase+78) /* POD Interrupt */
+#define irq_slave_usb (ibase+77) /* Slave USB */
+#define irq_denc1 (ibase+76) /* DENC #1 VTG Interrupt */
+#define irq_vbi_vtg (ibase+75) /* VBI VTG Interrupt */
+#define irq_afe2 (ibase+74) /* AFE 2 Interrupt */
+#define irq_denc2 (ibase+73) /* DENC #2 VTG Interrupt */
+#define irq_asc2 (ibase+72) /* ASC #2 Interrupt */
+#define irq_asc1 (ibase+71) /* ASC #1 Interrupt */
+#define irq_mod_dma (ibase+70) /* Modulator DMA Interrupt */
+#define irq_byte_eng1 (ibase+69) /* Byte Engine Interrupt [1] */
+#define irq_byte_eng0 (ibase+68) /* Byte Engine Interrupt [0] */
+/* 67 unused (bit 03) */
+/* 66 unused (bit 02) */
+/* 65 unused (bit 01) */
+/* 64 unused (bit 00) */
+/*------------- Register: int_stat_1 */
+/* 63 unused (bit 31) */
+/* 62 unused (bit 30) */
+/* 61 unused (bit 29) */
+/* 60 unused (bit 28) */
+/* 59 unused (bit 27) */
+/* 58 unused (bit 26) */
+/* 57 unused (bit 25) */
+/* 56 unused (bit 24) */
+#define irq_buf_dma_mem2mem (ibase+55) /* BufDMA Memory to Memory
+ * Interrupt */
+#define irq_buf_dma_usbtransmit (ibase+54) /* BufDMA USB Transmit
+ * Interrupt */
+#define irq_buf_dma_qpskpodtransmit (ibase+53) /* BufDMA QPSK/POD Tramsit
+ * Interrupt */
+#define irq_buf_dma_transmit_error (ibase+52) /* BufDMA Transmit Error
+ * Interrupt */
+#define irq_buf_dma_usbrecv (ibase+51) /* BufDMA USB Receive
+ * Interrupt */
+#define irq_buf_dma_qpskpodrecv (ibase+50) /* BufDMA QPSK/POD Receive
+ * Interrupt */
+#define irq_buf_dma_recv_error (ibase+49) /* BufDMA Receive Error
+ * Interrupt */
+#define irq_qamdma_transmit_play (ibase+48) /* QAMDMA Transmit/Play
+ * Interrupt */
+#define irq_qamdma_transmit_error (ibase+47) /* QAMDMA Transmit Error
+ * Interrupt */
+#define irq_qamdma_recv2high (ibase+46) /* QAMDMA Receive 2 High
+ * (Chans 63-32) */
+#define irq_qamdma_recv2low (ibase+45) /* QAMDMA Receive 2 Low
+ * (Chans 31-0) */
+#define irq_qamdma_recv1high (ibase+44) /* QAMDMA Receive 1 High
+ * (Chans 63-32) */
+#define irq_qamdma_recv1low (ibase+43) /* QAMDMA Receive 1 Low
+ * (Chans 31-0) */
+#define irq_qamdma_recv_error (ibase+42) /* QAMDMA Receive Error
+ * Interrupt */
+#define irq_mpegsplice (ibase+41) /* MPEG Splice Interrupt */
+#define irq_deinterlace_rdy (ibase+40) /* Deinterlacer Frame Ready
+ * Interrupt */
+#define irq_ext_in0 (ibase+39) /* External Interrupt irq_in0 */
+#define irq_gpio3 (ibase+38) /* GP I/O IRQ 3 - From GP I/O
+ * Module */
+#define irq_gpio2 (ibase+37) /* GP I/O IRQ 2 - From GP I/O
+ * Module (ABE_intN) */
+#define irq_pcrcmplt1 (ibase+36) /* PCR Capture Complete or
+ * Discontinuity 1 */
+#define irq_pcrcmplt2 (ibase+35) /* PCR Capture Complete or
+ * Discontinuity 2 */
+#define irq_parse_peierr (ibase+34) /* PID Parser Error Detect
+ * (PEI) */
+#define irq_parse_cont_err (ibase+33) /* PID Parser continuity error
+ * detect */
+#define irq_ds1framer (ibase+32) /* DS1 Framer Interrupt */
+/*------------- Register: int_stat_0 */
+#define irq_gpio1 (ibase+31) /* GP I/O IRQ 1 - From GP I/O
+ * Module */
+#define irq_gpio0 (ibase+30) /* GP I/O IRQ 0 - From GP I/O
+ * Module */
+#define irq_qpsk_out_aloha (ibase+29) /* QPSK Output Slotted Aloha
+ * (chan 3) Transmission
+ * Completed OK */
+#define irq_qpsk_out_tdma (ibase+28) /* QPSK Output TDMA (chan 2)
+ * Transmission Completed OK */
+#define irq_qpsk_out_reserve (ibase+27) /* QPSK Output Reservation
+ * (chan 1) Transmission
+ * Completed OK */
+#define irq_qpsk_out_aloha_err (ibase+26) /* QPSK Output Slotted Aloha
+ * (chan 3)Transmission
+ * completed with Errors. */
+#define irq_qpsk_out_tdma_err (ibase+25) /* QPSK Output TDMA (chan 2)
+ * Transmission completed with
+ * Errors. */
+#define irq_qpsk_out_rsrv_err (ibase+24) /* QPSK Output Reservation
+ * (chan 1) Transmission
+ * completed with Errors */
+#define irq_aloha_fail (ibase+23) /* Unsuccessful Resend of Aloha
+ * for N times. Aloha retry
+ * timeout for channel 3. */
+#define irq_timer1 (ibase+22) /* Programmable Timer
+ * Interrupt */
+#define irq_keyboard (ibase+21) /* Keyboard Module Interrupt */
+#define irq_i2c (ibase+20) /* I2C Module Interrupt */
+#define irq_spi (ibase+19) /* SPI Module Interrupt */
+#define irq_irblaster (ibase+18) /* IR Blaster Interrupt */
+#define irq_splice_detect (ibase+17) /* PID Key Change Interrupt or
+ * Splice Detect Interrupt */
+#define irq_se_micro (ibase+16) /* Secure Micro I/F Module
+ * Interrupt */
+#define irq_uart1 (ibase+15) /* UART Interrupt */
+#define irq_irrecv (ibase+14) /* IR Receiver Interrupt */
+#define irq_host_int1 (ibase+13) /* Host-to-Host Interrupt 1 */
+#define irq_host_int0 (ibase+12) /* Host-to-Host Interrupt 0 */
+#define irq_qpsk_hecerr (ibase+11) /* QPSK HEC Error Interrupt */
+#define irq_qpsk_crcerr (ibase+10) /* QPSK AAL-5 CRC Error
+ * Interrupt */
+/* 9 unused (bit 09) */
+/* 8 unused (bit 08) */
+#define irq_psicrcerr (ibase+7) /* QAM PSI CRC Error
+ * Interrupt */
+#define irq_psilength_err (ibase+6) /* QAM PSI Length Error
+ * Interrupt */
+#define irq_esfforward (ibase+5) /* ESF Interrupt Mark From
+ * Forward Path Reference -
+ * every 3ms when forward Mbits
+ * and forward slot control
+ * bytes are updated. */
+#define irq_esfreverse (ibase+4) /* ESF Interrupt Mark from
+ * Reverse Path Reference -
+ * delayed from forward mark by
+ * the ranging delay plus a
+ * fixed amount. When reverse
+ * Mbits and reverse slot
+ * control bytes are updated.
+ * Occurs every 3ms for 3.0M and
+ * 1.554 M upstream rates and
+ * every 6 ms for 256K upstream
+ * rate. */
+#define irq_aloha_timeout (ibase+3) /* Slotted-Aloha timeout on
+ * Channel 1. */
+#define irq_reservation (ibase+2) /* Partial (or Incremental)
+ * Reservation Message Completed
+ * or Slotted aloha verify for
+ * channel 1. */
+#define irq_aloha3 (ibase+1) /* Slotted-Aloha Message Verify
+ * Interrupt or Reservation
+ * increment completed for
+ * channel 3. */
+#define irq_mpeg_d (ibase+0) /* MPEG Decoder Interrupt */
+#endif /* _ASM_MACH_POWERTV_INTERRUPTS_H_ */
+
diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h
new file mode 100644
index 00000000000..e6276d5146e
--- /dev/null
+++ b/arch/mips/include/asm/mach-powertv/ioremap.h
@@ -0,0 +1,90 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Portions Copyright (C) Cisco Systems, Inc.
+ */
+#ifndef __ASM_MACH_POWERTV_IOREMAP_H
+#define __ASM_MACH_POWERTV_IOREMAP_H
+
+#include <linux/types.h>
+
+#define LOW_MEM_BOUNDARY_PHYS 0x20000000
+#define LOW_MEM_BOUNDARY_MASK (~(LOW_MEM_BOUNDARY_PHYS - 1))
+
+/*
+ * The bus addresses are different than the physical addresses that
+ * the processor sees by an offset. This offset varies by ASIC
+ * version. Define a variable to hold the offset and some macros to
+ * make the conversion simpler. */
+extern unsigned long phys_to_bus_offset;
+
+#ifdef CONFIG_HIGHMEM
+#define MEM_GAP_PHYS 0x60000000
+/*
+ * TODO: We will use the hard code for conversion between physical and
+ * bus until the bootloader releases their device tree to us.
+ */
+#define phys_to_bus(x) (((x) < LOW_MEM_BOUNDARY_PHYS) ? \
+ ((x) + phys_to_bus_offset) : (x))
+#define bus_to_phys(x) (((x) < MEM_GAP_PHYS_ADDR) ? \
+ ((x) - phys_to_bus_offset) : (x))
+#else
+#define phys_to_bus(x) ((x) + phys_to_bus_offset)
+#define bus_to_phys(x) ((x) - phys_to_bus_offset)
+#endif
+
+/*
+ * Determine whether the address we are given is for an ASIC device
+ * Params: addr Address to check
+ * Returns: Zero if the address is not for ASIC devices, non-zero
+ * if it is.
+ */
+static inline int asic_is_device_addr(phys_t addr)
+{
+ return !((phys_t)addr & (phys_t) LOW_MEM_BOUNDARY_MASK);
+}
+
+/*
+ * Determine whether the address we are given is external RAM mappable
+ * into KSEG1.
+ * Params: addr Address to check
+ * Returns: Zero if the address is not for external RAM and
+ */
+static inline int asic_is_lowmem_ram_addr(phys_t addr)
+{
+ /*
+ * The RAM always starts at the following address in the processor's
+ * physical address space
+ */
+ static const phys_t phys_ram_base = 0x10000000;
+ phys_t bus_ram_base;
+
+ bus_ram_base = phys_to_bus_offset + phys_ram_base;
+
+ return addr >= bus_ram_base &&
+ addr < (bus_ram_base + (LOW_MEM_BOUNDARY_PHYS - phys_ram_base));
+}
+
+/*
+ * Allow physical addresses to be fixed up to help peripherals located
+ * outside the low 32-bit range -- generic pass-through version.
+ */
+static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
+{
+ return phys_addr;
+}
+
+static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size,
+ unsigned long flags)
+{
+ return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+ return 0;
+}
+#endif /* __ASM_MACH_POWERTV_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-powertv/irq.h b/arch/mips/include/asm/mach-powertv/irq.h
new file mode 100644
index 00000000000..4bd5d0c61a9
--- /dev/null
+++ b/arch/mips/include/asm/mach-powertv/irq.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ASM_MACH_POWERTV_IRQ_H
+#define _ASM_MACH_POWERTV_IRQ_H
+#include <asm/mach-powertv/interrupts.h>
+
+#define MIPS_CPU_IRQ_BASE ibase
+#define NR_IRQS 127
+#endif
diff --git a/arch/mips/include/asm/mach-powertv/powertv-clock.h b/arch/mips/include/asm/mach-powertv/powertv-clock.h
new file mode 100644
index 00000000000..6f3e9a0fcf8
--- /dev/null
+++ b/arch/mips/include/asm/mach-powertv/powertv-clock.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+/*
+ * Local definitions for the powertv PCI code
+ */
+
+#ifndef _POWERTV_PCI_POWERTV_PCI_H_
+#define _POWERTV_PCI_POWERTV_PCI_H_
+extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+extern int asic_pcie_init(void);
+extern int asic_pcie_init(void);
+
+extern int log_level;
+#endif
diff --git a/arch/mips/include/asm/mach-excite/war.h b/arch/mips/include/asm/mach-powertv/war.h
index 1f82180c159..7ac05ecc512 100644
--- a/arch/mips/include/asm/mach-excite/war.h
+++ b/arch/mips/include/asm/mach-powertv/war.h
@@ -3,10 +3,13 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
+ * This version for the PowerTV platform copied from the Malta version.
+ *
* Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
*/
-#ifndef __ASM_MIPS_MACH_EXCITE_WAR_H
-#define __ASM_MIPS_MACH_EXCITE_WAR_H
+#ifndef __ASM_MACH_POWERTV_WAR_H
+#define __ASM_MACH_POWERTV_WAR_H
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
@@ -14,12 +17,12 @@
#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 1
+#define MIPS_CACHE_SYNC_WAR 1
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define RM9000_CDEX_SMP_WAR 1
-#define ICACHE_REFILLS_WORKAROUND_WAR 1
+#define RM9000_CDEX_SMP_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
-#endif /* __ASM_MIPS_MACH_EXCITE_WAR_H */
+#endif /* __ASM_MACH_POWERTV_WAR_H */
diff --git a/arch/mips/include/asm/mips-boards/bonito64.h b/arch/mips/include/asm/mips-boards/bonito64.h
index a576ce044c3..d14e2adc4be 100644
--- a/arch/mips/include/asm/mips-boards/bonito64.h
+++ b/arch/mips/include/asm/mips-boards/bonito64.h
@@ -26,11 +26,6 @@
/* offsets from base register */
#define BONITO(x) (x)
-#elif defined(CONFIG_LEMOTE_FULOONG2E)
-
-#define BONITO(x) (*(volatile u32 *)((char *)CKSEG1ADDR(BONITO_REG_BASE) + (x)))
-#define BONITO_IRQ_BASE 32
-
#else
/*
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 6083db58650..145bb81ccaa 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -24,6 +24,33 @@
#endif /* SMTC */
#include <asm-generic/mm_hooks.h>
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+
+#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
+ tlbmiss_handler_setup_pgd((unsigned long)(pgd))
+
+static inline void tlbmiss_handler_setup_pgd(unsigned long pgd)
+{
+ /* Check for swapper_pg_dir and convert to physical address. */
+ if ((pgd & CKSEG3) == CKSEG0)
+ pgd = CPHYSADDR(pgd);
+ write_c0_context(pgd << 11);
+}
+
+#define TLBMISS_HANDLER_SETUP() \
+ do { \
+ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
+ write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
+ } while (0)
+
+
+static inline unsigned long get_current_pgd(void)
+{
+ return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL);
+}
+
+#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
+
/*
* For the fast tlb miss handlers, we keep a per cpu array of pointers
* to the current pgd for each processor. Also, the proc. id is stuffed
@@ -46,7 +73,7 @@ extern unsigned long pgd_current[];
back_to_back_c0_hazard(); \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif
-
+#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define ASID_INC 0x40
diff --git a/arch/mips/include/asm/octeon/cvmx-agl-defs.h b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
new file mode 100644
index 00000000000..ec94b9ab7be
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
@@ -0,0 +1,1194 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_AGL_DEFS_H__
+#define __CVMX_AGL_DEFS_H__
+
+#define CVMX_AGL_GMX_BAD_REG \
+ CVMX_ADD_IO_SEG(0x00011800E0000518ull)
+#define CVMX_AGL_GMX_BIST \
+ CVMX_ADD_IO_SEG(0x00011800E0000400ull)
+#define CVMX_AGL_GMX_DRV_CTL \
+ CVMX_ADD_IO_SEG(0x00011800E00007F0ull)
+#define CVMX_AGL_GMX_INF_MODE \
+ CVMX_ADD_IO_SEG(0x00011800E00007F8ull)
+#define CVMX_AGL_GMX_PRTX_CFG(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000010ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_ADR_CAM0(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000180ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_ADR_CAM1(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000188ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_ADR_CAM2(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000190ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_ADR_CAM3(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000198ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_ADR_CAM4(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00001A0ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_ADR_CAM5(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00001A8ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_ADR_CAM_EN(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000108ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_ADR_CTL(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000100ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_DECISION(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000040ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_FRM_CHK(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000020ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_FRM_CTL(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000018ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_FRM_MAX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000030ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_FRM_MIN(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000028ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_IFG(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000058ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_INT_EN(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000008ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_INT_REG(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000000ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_JABBER(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000038ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_PAUSE_DROP_TIME(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000068ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_CTL(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000050ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_OCTS(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000088ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_CTL(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000098ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DMAC(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00000A8ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_OCTS_DRP(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00000B8ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_PKTS(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000080ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00000C0ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_CTL(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000090ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DMAC(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00000A0ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00000B0ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RXX_UDD_SKP(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000048ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_RX_BP_DROPX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000420ull + (((offset) & 1) * 8))
+#define CVMX_AGL_GMX_RX_BP_OFFX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000460ull + (((offset) & 1) * 8))
+#define CVMX_AGL_GMX_RX_BP_ONX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000440ull + (((offset) & 1) * 8))
+#define CVMX_AGL_GMX_RX_PRT_INFO \
+ CVMX_ADD_IO_SEG(0x00011800E00004E8ull)
+#define CVMX_AGL_GMX_RX_TX_STATUS \
+ CVMX_ADD_IO_SEG(0x00011800E00007E8ull)
+#define CVMX_AGL_GMX_SMACX(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000230ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_STAT_BP \
+ CVMX_ADD_IO_SEG(0x00011800E0000520ull)
+#define CVMX_AGL_GMX_TXX_APPEND(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000218ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_CTL(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000270ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_MIN_PKT(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000240ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_INTERVAL(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000248ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_PAUSE_PKT_TIME(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000238ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_PAUSE_TOGO(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000258ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_PAUSE_ZERO(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000260ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_SOFT_PAUSE(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000250ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT0(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000280ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT1(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000288ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT2(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000290ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT3(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000298ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT4(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00002A0ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT5(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00002A8ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT6(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00002B0ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT7(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00002B8ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT8(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00002C0ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STAT9(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E00002C8ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_STATS_CTL(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000268ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TXX_THRESH(offset) \
+ CVMX_ADD_IO_SEG(0x00011800E0000210ull + (((offset) & 1) * 2048))
+#define CVMX_AGL_GMX_TX_BP \
+ CVMX_ADD_IO_SEG(0x00011800E00004D0ull)
+#define CVMX_AGL_GMX_TX_COL_ATTEMPT \
+ CVMX_ADD_IO_SEG(0x00011800E0000498ull)
+#define CVMX_AGL_GMX_TX_IFG \
+ CVMX_ADD_IO_SEG(0x00011800E0000488ull)
+#define CVMX_AGL_GMX_TX_INT_EN \
+ CVMX_ADD_IO_SEG(0x00011800E0000508ull)
+#define CVMX_AGL_GMX_TX_INT_REG \
+ CVMX_ADD_IO_SEG(0x00011800E0000500ull)
+#define CVMX_AGL_GMX_TX_JAM \
+ CVMX_ADD_IO_SEG(0x00011800E0000490ull)
+#define CVMX_AGL_GMX_TX_LFSR \
+ CVMX_ADD_IO_SEG(0x00011800E00004F8ull)
+#define CVMX_AGL_GMX_TX_OVR_BP \
+ CVMX_ADD_IO_SEG(0x00011800E00004C8ull)
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_DMAC \
+ CVMX_ADD_IO_SEG(0x00011800E00004A0ull)
+#define CVMX_AGL_GMX_TX_PAUSE_PKT_TYPE \
+ CVMX_ADD_IO_SEG(0x00011800E00004A8ull)
+
+union cvmx_agl_gmx_bad_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_bad_reg_s {
+ uint64_t reserved_38_63:26;
+ uint64_t txpsh1:1;
+ uint64_t txpop1:1;
+ uint64_t ovrflw1:1;
+ uint64_t txpsh:1;
+ uint64_t txpop:1;
+ uint64_t ovrflw:1;
+ uint64_t reserved_27_31:5;
+ uint64_t statovr:1;
+ uint64_t reserved_23_25:3;
+ uint64_t loststat:1;
+ uint64_t reserved_4_21:18;
+ uint64_t out_ovr:2;
+ uint64_t reserved_0_1:2;
+ } s;
+ struct cvmx_agl_gmx_bad_reg_s cn52xx;
+ struct cvmx_agl_gmx_bad_reg_s cn52xxp1;
+ struct cvmx_agl_gmx_bad_reg_cn56xx {
+ uint64_t reserved_35_63:29;
+ uint64_t txpsh:1;
+ uint64_t txpop:1;
+ uint64_t ovrflw:1;
+ uint64_t reserved_27_31:5;
+ uint64_t statovr:1;
+ uint64_t reserved_23_25:3;
+ uint64_t loststat:1;
+ uint64_t reserved_3_21:19;
+ uint64_t out_ovr:1;
+ uint64_t reserved_0_1:2;
+ } cn56xx;
+ struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
+};
+
+union cvmx_agl_gmx_bist {
+ uint64_t u64;
+ struct cvmx_agl_gmx_bist_s {
+ uint64_t reserved_10_63:54;
+ uint64_t status:10;
+ } s;
+ struct cvmx_agl_gmx_bist_s cn52xx;
+ struct cvmx_agl_gmx_bist_s cn52xxp1;
+ struct cvmx_agl_gmx_bist_s cn56xx;
+ struct cvmx_agl_gmx_bist_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_drv_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_drv_ctl_s {
+ uint64_t reserved_49_63:15;
+ uint64_t byp_en1:1;
+ uint64_t reserved_45_47:3;
+ uint64_t pctl1:5;
+ uint64_t reserved_37_39:3;
+ uint64_t nctl1:5;
+ uint64_t reserved_17_31:15;
+ uint64_t byp_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:5;
+ } s;
+ struct cvmx_agl_gmx_drv_ctl_s cn52xx;
+ struct cvmx_agl_gmx_drv_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx {
+ uint64_t reserved_17_63:47;
+ uint64_t byp_en:1;
+ uint64_t reserved_13_15:3;
+ uint64_t pctl:5;
+ uint64_t reserved_5_7:3;
+ uint64_t nctl:5;
+ } cn56xx;
+ struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1;
+};
+
+union cvmx_agl_gmx_inf_mode {
+ uint64_t u64;
+ struct cvmx_agl_gmx_inf_mode_s {
+ uint64_t reserved_2_63:62;
+ uint64_t en:1;
+ uint64_t reserved_0_0:1;
+ } s;
+ struct cvmx_agl_gmx_inf_mode_s cn52xx;
+ struct cvmx_agl_gmx_inf_mode_s cn52xxp1;
+ struct cvmx_agl_gmx_inf_mode_s cn56xx;
+ struct cvmx_agl_gmx_inf_mode_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_prtx_cfg_s {
+ uint64_t reserved_6_63:58;
+ uint64_t tx_en:1;
+ uint64_t rx_en:1;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } s;
+ struct cvmx_agl_gmx_prtx_cfg_s cn52xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn52xxp1;
+ struct cvmx_agl_gmx_prtx_cfg_s cn56xx;
+ struct cvmx_agl_gmx_prtx_cfg_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam0 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam1 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam2 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam3 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam4 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam5 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s {
+ uint64_t adr:64;
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_adr_cam_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s {
+ uint64_t reserved_8_63:56;
+ uint64_t en:8;
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s {
+ uint64_t reserved_4_63:60;
+ uint64_t cam_mode:1;
+ uint64_t mcst:2;
+ uint64_t bcst:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_decision {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_decision_s {
+ uint64_t reserved_5_63:59;
+ uint64_t cnt:5;
+ } s;
+ struct cvmx_agl_gmx_rxx_decision_s cn52xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_decision_s cn56xx;
+ struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_frm_chk {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_chk_s {
+ uint64_t reserved_9_63:55;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_chk_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s {
+ uint64_t reserved_10_63:54;
+ uint64_t pre_align:1;
+ uint64_t pad_len:1;
+ uint64_t vlan_len:1;
+ uint64_t pre_free:1;
+ uint64_t ctl_smac:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_drp:1;
+ uint64_t pre_strp:1;
+ uint64_t pre_chk:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_frm_max {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_max_s {
+ uint64_t reserved_16_63:48;
+ uint64_t len:16;
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_frm_min {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_frm_min_s {
+ uint64_t reserved_16_63:48;
+ uint64_t len:16;
+ } s;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn52xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
+ struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_ifg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_ifg_s {
+ uint64_t reserved_4_63:60;
+ uint64_t ifg:4;
+ } s;
+ struct cvmx_agl_gmx_rxx_ifg_s cn52xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
+ struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_int_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_en_s {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_int_en_s cn52xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_en_s cn56xx;
+ struct cvmx_agl_gmx_rxx_int_en_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_int_reg_s {
+ uint64_t reserved_20_63:44;
+ uint64_t pause_drp:1;
+ uint64_t reserved_16_18:3;
+ uint64_t ifgerr:1;
+ uint64_t coldet:1;
+ uint64_t falerr:1;
+ uint64_t rsverr:1;
+ uint64_t pcterr:1;
+ uint64_t ovrerr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t skperr:1;
+ uint64_t rcverr:1;
+ uint64_t lenerr:1;
+ uint64_t alnerr:1;
+ uint64_t fcserr:1;
+ uint64_t jabber:1;
+ uint64_t maxerr:1;
+ uint64_t reserved_1_1:1;
+ uint64_t minerr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn52xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn56xx;
+ struct cvmx_agl_gmx_rxx_int_reg_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_jabber {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_jabber_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt:16;
+ } s;
+ struct cvmx_agl_gmx_rxx_jabber_s cn52xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
+ struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_pause_drop_time {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s {
+ uint64_t reserved_16_63:48;
+ uint64_t status:16;
+ } s;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
+ struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s {
+ uint64_t reserved_1_63:63;
+ uint64_t rd_clr:1;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_octs {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_s {
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_octs_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s {
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_octs_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s {
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_octs_drp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s {
+ uint64_t reserved_48_63:16;
+ uint64_t cnt:48;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts_bad {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_stats_pkts_drp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s {
+ uint64_t reserved_32_63:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rxx_udd_skp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rxx_udd_skp_s {
+ uint64_t reserved_9_63:55;
+ uint64_t fcssel:1;
+ uint64_t reserved_7_7:1;
+ uint64_t len:7;
+ } s;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
+ struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rx_bp_dropx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_dropx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t mark:6;
+ } s;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rx_bp_offx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_offx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t mark:6;
+ } s;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rx_bp_onx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_bp_onx_s {
+ uint64_t reserved_9_63:55;
+ uint64_t mark:9;
+ } s;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn52xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
+ struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_rx_prt_info {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_prt_info_s {
+ uint64_t reserved_18_63:46;
+ uint64_t drop:2;
+ uint64_t reserved_2_15:14;
+ uint64_t commit:2;
+ } s;
+ struct cvmx_agl_gmx_rx_prt_info_s cn52xx;
+ struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx {
+ uint64_t reserved_17_63:47;
+ uint64_t drop:1;
+ uint64_t reserved_1_15:15;
+ uint64_t commit:1;
+ } cn56xx;
+ struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
+};
+
+union cvmx_agl_gmx_rx_tx_status {
+ uint64_t u64;
+ struct cvmx_agl_gmx_rx_tx_status_s {
+ uint64_t reserved_6_63:58;
+ uint64_t tx:2;
+ uint64_t reserved_2_3:2;
+ uint64_t rx:2;
+ } s;
+ struct cvmx_agl_gmx_rx_tx_status_s cn52xx;
+ struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx {
+ uint64_t reserved_5_63:59;
+ uint64_t tx:1;
+ uint64_t reserved_1_3:3;
+ uint64_t rx:1;
+ } cn56xx;
+ struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
+};
+
+union cvmx_agl_gmx_smacx {
+ uint64_t u64;
+ struct cvmx_agl_gmx_smacx_s {
+ uint64_t reserved_48_63:16;
+ uint64_t smac:48;
+ } s;
+ struct cvmx_agl_gmx_smacx_s cn52xx;
+ struct cvmx_agl_gmx_smacx_s cn52xxp1;
+ struct cvmx_agl_gmx_smacx_s cn56xx;
+ struct cvmx_agl_gmx_smacx_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_stat_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_stat_bp_s {
+ uint64_t reserved_17_63:47;
+ uint64_t bp:1;
+ uint64_t cnt:16;
+ } s;
+ struct cvmx_agl_gmx_stat_bp_s cn52xx;
+ struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_stat_bp_s cn56xx;
+ struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_append {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_append_s {
+ uint64_t reserved_4_63:60;
+ uint64_t force_fcs:1;
+ uint64_t fcs:1;
+ uint64_t pad:1;
+ uint64_t preamble:1;
+ } s;
+ struct cvmx_agl_gmx_txx_append_s cn52xx;
+ struct cvmx_agl_gmx_txx_append_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_append_s cn56xx;
+ struct cvmx_agl_gmx_txx_append_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_ctl_s {
+ uint64_t reserved_2_63:62;
+ uint64_t xsdef_en:1;
+ uint64_t xscol_en:1;
+ } s;
+ struct cvmx_agl_gmx_txx_ctl_s cn52xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_ctl_s cn56xx;
+ struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_min_pkt {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_min_pkt_s {
+ uint64_t reserved_8_63:56;
+ uint64_t min_size:8;
+ } s;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn52xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
+ struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_pause_pkt_interval {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s {
+ uint64_t reserved_16_63:48;
+ uint64_t interval:16;
+ } s;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_pause_pkt_time {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s {
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+ } s;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_pause_togo {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_togo_s {
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+ } s;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_pause_zero {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_pause_zero_s {
+ uint64_t reserved_1_63:63;
+ uint64_t send:1;
+ } s;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn52xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
+ struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_soft_pause {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_soft_pause_s {
+ uint64_t reserved_16_63:48;
+ uint64_t time:16;
+ } s;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn52xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
+ struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat0 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat0_s {
+ uint64_t xsdef:32;
+ uint64_t xscol:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat0_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat0_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat1 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat1_s {
+ uint64_t scol:32;
+ uint64_t mcol:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat1_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat1_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat2 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat2_s {
+ uint64_t reserved_48_63:16;
+ uint64_t octs:48;
+ } s;
+ struct cvmx_agl_gmx_txx_stat2_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat2_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat3 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat3_s {
+ uint64_t reserved_32_63:32;
+ uint64_t pkts:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat3_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat3_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat4 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat4_s {
+ uint64_t hist1:32;
+ uint64_t hist0:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat4_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat4_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat5 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat5_s {
+ uint64_t hist3:32;
+ uint64_t hist2:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat5_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat5_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat6 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat6_s {
+ uint64_t hist5:32;
+ uint64_t hist4:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat6_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat6_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat7 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat7_s {
+ uint64_t hist7:32;
+ uint64_t hist6:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat7_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat7_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat8 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat8_s {
+ uint64_t mcst:32;
+ uint64_t bcst:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat8_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat8_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stat9 {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stat9_s {
+ uint64_t undflw:32;
+ uint64_t ctl:32;
+ } s;
+ struct cvmx_agl_gmx_txx_stat9_s cn52xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stat9_s cn56xx;
+ struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_stats_ctl {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_stats_ctl_s {
+ uint64_t reserved_1_63:63;
+ uint64_t rd_clr:1;
+ } s;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
+ struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_txx_thresh {
+ uint64_t u64;
+ struct cvmx_agl_gmx_txx_thresh_s {
+ uint64_t reserved_6_63:58;
+ uint64_t cnt:6;
+ } s;
+ struct cvmx_agl_gmx_txx_thresh_s cn52xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
+ struct cvmx_agl_gmx_txx_thresh_s cn56xx;
+ struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_bp_s {
+ uint64_t reserved_2_63:62;
+ uint64_t bp:2;
+ } s;
+ struct cvmx_agl_gmx_tx_bp_s cn52xx;
+ struct cvmx_agl_gmx_tx_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_bp_cn56xx {
+ uint64_t reserved_1_63:63;
+ uint64_t bp:1;
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_col_attempt {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_col_attempt_s {
+ uint64_t reserved_5_63:59;
+ uint64_t limit:5;
+ } s;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn52xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
+ struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_ifg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ifg_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ifg2:4;
+ uint64_t ifg1:4;
+ } s;
+ struct cvmx_agl_gmx_tx_ifg_s cn52xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_ifg_s cn56xx;
+ struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_int_en {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_en_s {
+ uint64_t reserved_18_63:46;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } s;
+ struct cvmx_agl_gmx_tx_int_en_s cn52xx;
+ struct cvmx_agl_gmx_tx_int_en_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx {
+ uint64_t reserved_17_63:47;
+ uint64_t late_col:1;
+ uint64_t reserved_13_15:3;
+ uint64_t xsdef:1;
+ uint64_t reserved_9_11:3;
+ uint64_t xscol:1;
+ uint64_t reserved_3_7:5;
+ uint64_t undflw:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_int_reg {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_int_reg_s {
+ uint64_t reserved_18_63:46;
+ uint64_t late_col:2;
+ uint64_t reserved_14_15:2;
+ uint64_t xsdef:2;
+ uint64_t reserved_10_11:2;
+ uint64_t xscol:2;
+ uint64_t reserved_4_7:4;
+ uint64_t undflw:2;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } s;
+ struct cvmx_agl_gmx_tx_int_reg_s cn52xx;
+ struct cvmx_agl_gmx_tx_int_reg_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx {
+ uint64_t reserved_17_63:47;
+ uint64_t late_col:1;
+ uint64_t reserved_13_15:3;
+ uint64_t xsdef:1;
+ uint64_t reserved_9_11:3;
+ uint64_t xscol:1;
+ uint64_t reserved_3_7:5;
+ uint64_t undflw:1;
+ uint64_t reserved_1_1:1;
+ uint64_t pko_nxa:1;
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_jam {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_jam_s {
+ uint64_t reserved_8_63:56;
+ uint64_t jam:8;
+ } s;
+ struct cvmx_agl_gmx_tx_jam_s cn52xx;
+ struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_jam_s cn56xx;
+ struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_lfsr {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_lfsr_s {
+ uint64_t reserved_16_63:48;
+ uint64_t lfsr:16;
+ } s;
+ struct cvmx_agl_gmx_tx_lfsr_s cn52xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
+ struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_ovr_bp {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_ovr_bp_s {
+ uint64_t reserved_10_63:54;
+ uint64_t en:2;
+ uint64_t reserved_6_7:2;
+ uint64_t bp:2;
+ uint64_t reserved_2_3:2;
+ uint64_t ign_full:2;
+ } s;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx {
+ uint64_t reserved_9_63:55;
+ uint64_t en:1;
+ uint64_t reserved_5_7:3;
+ uint64_t bp:1;
+ uint64_t reserved_1_3:3;
+ uint64_t ign_full:1;
+ } cn56xx;
+ struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_pause_pkt_dmac {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s {
+ uint64_t reserved_48_63:16;
+ uint64_t dmac:48;
+ } s;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
+};
+
+union cvmx_agl_gmx_tx_pause_pkt_type {
+ uint64_t u64;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s {
+ uint64_t reserved_16_63:48;
+ uint64_t type:16;
+ } s;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
+ struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
new file mode 100644
index 00000000000..dab6dca492f
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
@@ -0,0 +1,248 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_MIXX_DEFS_H__
+#define __CVMX_MIXX_DEFS_H__
+
+#define CVMX_MIXX_BIST(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100078ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_CTL(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100020ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_INTENA(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100050ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_IRCNT(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100030ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_IRHWM(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100028ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_IRING1(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100010ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_IRING2(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100018ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_ISR(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100048ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_ORCNT(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100040ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_ORHWM(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100038ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_ORING1(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100000ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_ORING2(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100008ull + (((offset) & 1) * 2048))
+#define CVMX_MIXX_REMCNT(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000100058ull + (((offset) & 1) * 2048))
+
+union cvmx_mixx_bist {
+ uint64_t u64;
+ struct cvmx_mixx_bist_s {
+ uint64_t reserved_4_63:60;
+ uint64_t mrqdat:1;
+ uint64_t ipfdat:1;
+ uint64_t irfdat:1;
+ uint64_t orfdat:1;
+ } s;
+ struct cvmx_mixx_bist_s cn52xx;
+ struct cvmx_mixx_bist_s cn52xxp1;
+ struct cvmx_mixx_bist_s cn56xx;
+ struct cvmx_mixx_bist_s cn56xxp1;
+};
+
+union cvmx_mixx_ctl {
+ uint64_t u64;
+ struct cvmx_mixx_ctl_s {
+ uint64_t reserved_8_63:56;
+ uint64_t crc_strip:1;
+ uint64_t busy:1;
+ uint64_t en:1;
+ uint64_t reset:1;
+ uint64_t lendian:1;
+ uint64_t nbtarb:1;
+ uint64_t mrq_hwm:2;
+ } s;
+ struct cvmx_mixx_ctl_s cn52xx;
+ struct cvmx_mixx_ctl_s cn52xxp1;
+ struct cvmx_mixx_ctl_s cn56xx;
+ struct cvmx_mixx_ctl_s cn56xxp1;
+};
+
+union cvmx_mixx_intena {
+ uint64_t u64;
+ struct cvmx_mixx_intena_s {
+ uint64_t reserved_7_63:57;
+ uint64_t orunena:1;
+ uint64_t irunena:1;
+ uint64_t data_drpena:1;
+ uint64_t ithena:1;
+ uint64_t othena:1;
+ uint64_t ivfena:1;
+ uint64_t ovfena:1;
+ } s;
+ struct cvmx_mixx_intena_s cn52xx;
+ struct cvmx_mixx_intena_s cn52xxp1;
+ struct cvmx_mixx_intena_s cn56xx;
+ struct cvmx_mixx_intena_s cn56xxp1;
+};
+
+union cvmx_mixx_ircnt {
+ uint64_t u64;
+ struct cvmx_mixx_ircnt_s {
+ uint64_t reserved_20_63:44;
+ uint64_t ircnt:20;
+ } s;
+ struct cvmx_mixx_ircnt_s cn52xx;
+ struct cvmx_mixx_ircnt_s cn52xxp1;
+ struct cvmx_mixx_ircnt_s cn56xx;
+ struct cvmx_mixx_ircnt_s cn56xxp1;
+};
+
+union cvmx_mixx_irhwm {
+ uint64_t u64;
+ struct cvmx_mixx_irhwm_s {
+ uint64_t reserved_40_63:24;
+ uint64_t ibplwm:20;
+ uint64_t irhwm:20;
+ } s;
+ struct cvmx_mixx_irhwm_s cn52xx;
+ struct cvmx_mixx_irhwm_s cn52xxp1;
+ struct cvmx_mixx_irhwm_s cn56xx;
+ struct cvmx_mixx_irhwm_s cn56xxp1;
+};
+
+union cvmx_mixx_iring1 {
+ uint64_t u64;
+ struct cvmx_mixx_iring1_s {
+ uint64_t reserved_60_63:4;
+ uint64_t isize:20;
+ uint64_t reserved_36_39:4;
+ uint64_t ibase:33;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_mixx_iring1_s cn52xx;
+ struct cvmx_mixx_iring1_s cn52xxp1;
+ struct cvmx_mixx_iring1_s cn56xx;
+ struct cvmx_mixx_iring1_s cn56xxp1;
+};
+
+union cvmx_mixx_iring2 {
+ uint64_t u64;
+ struct cvmx_mixx_iring2_s {
+ uint64_t reserved_52_63:12;
+ uint64_t itlptr:20;
+ uint64_t reserved_20_31:12;
+ uint64_t idbell:20;
+ } s;
+ struct cvmx_mixx_iring2_s cn52xx;
+ struct cvmx_mixx_iring2_s cn52xxp1;
+ struct cvmx_mixx_iring2_s cn56xx;
+ struct cvmx_mixx_iring2_s cn56xxp1;
+};
+
+union cvmx_mixx_isr {
+ uint64_t u64;
+ struct cvmx_mixx_isr_s {
+ uint64_t reserved_7_63:57;
+ uint64_t orun:1;
+ uint64_t irun:1;
+ uint64_t data_drp:1;
+ uint64_t irthresh:1;
+ uint64_t orthresh:1;
+ uint64_t idblovf:1;
+ uint64_t odblovf:1;
+ } s;
+ struct cvmx_mixx_isr_s cn52xx;
+ struct cvmx_mixx_isr_s cn52xxp1;
+ struct cvmx_mixx_isr_s cn56xx;
+ struct cvmx_mixx_isr_s cn56xxp1;
+};
+
+union cvmx_mixx_orcnt {
+ uint64_t u64;
+ struct cvmx_mixx_orcnt_s {
+ uint64_t reserved_20_63:44;
+ uint64_t orcnt:20;
+ } s;
+ struct cvmx_mixx_orcnt_s cn52xx;
+ struct cvmx_mixx_orcnt_s cn52xxp1;
+ struct cvmx_mixx_orcnt_s cn56xx;
+ struct cvmx_mixx_orcnt_s cn56xxp1;
+};
+
+union cvmx_mixx_orhwm {
+ uint64_t u64;
+ struct cvmx_mixx_orhwm_s {
+ uint64_t reserved_20_63:44;
+ uint64_t orhwm:20;
+ } s;
+ struct cvmx_mixx_orhwm_s cn52xx;
+ struct cvmx_mixx_orhwm_s cn52xxp1;
+ struct cvmx_mixx_orhwm_s cn56xx;
+ struct cvmx_mixx_orhwm_s cn56xxp1;
+};
+
+union cvmx_mixx_oring1 {
+ uint64_t u64;
+ struct cvmx_mixx_oring1_s {
+ uint64_t reserved_60_63:4;
+ uint64_t osize:20;
+ uint64_t reserved_36_39:4;
+ uint64_t obase:33;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_mixx_oring1_s cn52xx;
+ struct cvmx_mixx_oring1_s cn52xxp1;
+ struct cvmx_mixx_oring1_s cn56xx;
+ struct cvmx_mixx_oring1_s cn56xxp1;
+};
+
+union cvmx_mixx_oring2 {
+ uint64_t u64;
+ struct cvmx_mixx_oring2_s {
+ uint64_t reserved_52_63:12;
+ uint64_t otlptr:20;
+ uint64_t reserved_20_31:12;
+ uint64_t odbell:20;
+ } s;
+ struct cvmx_mixx_oring2_s cn52xx;
+ struct cvmx_mixx_oring2_s cn52xxp1;
+ struct cvmx_mixx_oring2_s cn56xx;
+ struct cvmx_mixx_oring2_s cn56xxp1;
+};
+
+union cvmx_mixx_remcnt {
+ uint64_t u64;
+ struct cvmx_mixx_remcnt_s {
+ uint64_t reserved_52_63:12;
+ uint64_t iremcnt:20;
+ uint64_t reserved_20_31:12;
+ uint64_t oremcnt:20;
+ } s;
+ struct cvmx_mixx_remcnt_s cn52xx;
+ struct cvmx_mixx_remcnt_s cn52xxp1;
+ struct cvmx_mixx_remcnt_s cn56xx;
+ struct cvmx_mixx_remcnt_s cn56xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
new file mode 100644
index 00000000000..9ae45fcbe3e
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
@@ -0,0 +1,178 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_SMIX_DEFS_H__
+#define __CVMX_SMIX_DEFS_H__
+
+#define CVMX_SMIX_CLK(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001818ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_CMD(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001800ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_EN(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001820ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_RD_DAT(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001810ull + (((offset) & 1) * 256))
+#define CVMX_SMIX_WR_DAT(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001808ull + (((offset) & 1) * 256))
+
+union cvmx_smix_clk {
+ uint64_t u64;
+ struct cvmx_smix_clk_s {
+ uint64_t reserved_25_63:39;
+ uint64_t mode:1;
+ uint64_t reserved_21_23:3;
+ uint64_t sample_hi:5;
+ uint64_t sample_mode:1;
+ uint64_t reserved_14_14:1;
+ uint64_t clk_idle:1;
+ uint64_t preamble:1;
+ uint64_t sample:4;
+ uint64_t phase:8;
+ } s;
+ struct cvmx_smix_clk_cn30xx {
+ uint64_t reserved_21_63:43;
+ uint64_t sample_hi:5;
+ uint64_t reserved_14_15:2;
+ uint64_t clk_idle:1;
+ uint64_t preamble:1;
+ uint64_t sample:4;
+ uint64_t phase:8;
+ } cn30xx;
+ struct cvmx_smix_clk_cn30xx cn31xx;
+ struct cvmx_smix_clk_cn30xx cn38xx;
+ struct cvmx_smix_clk_cn30xx cn38xxp2;
+ struct cvmx_smix_clk_cn50xx {
+ uint64_t reserved_25_63:39;
+ uint64_t mode:1;
+ uint64_t reserved_21_23:3;
+ uint64_t sample_hi:5;
+ uint64_t reserved_14_15:2;
+ uint64_t clk_idle:1;
+ uint64_t preamble:1;
+ uint64_t sample:4;
+ uint64_t phase:8;
+ } cn50xx;
+ struct cvmx_smix_clk_s cn52xx;
+ struct cvmx_smix_clk_cn50xx cn52xxp1;
+ struct cvmx_smix_clk_s cn56xx;
+ struct cvmx_smix_clk_cn50xx cn56xxp1;
+ struct cvmx_smix_clk_cn30xx cn58xx;
+ struct cvmx_smix_clk_cn30xx cn58xxp1;
+};
+
+union cvmx_smix_cmd {
+ uint64_t u64;
+ struct cvmx_smix_cmd_s {
+ uint64_t reserved_18_63:46;
+ uint64_t phy_op:2;
+ uint64_t reserved_13_15:3;
+ uint64_t phy_adr:5;
+ uint64_t reserved_5_7:3;
+ uint64_t reg_adr:5;
+ } s;
+ struct cvmx_smix_cmd_cn30xx {
+ uint64_t reserved_17_63:47;
+ uint64_t phy_op:1;
+ uint64_t reserved_13_15:3;
+ uint64_t phy_adr:5;
+ uint64_t reserved_5_7:3;
+ uint64_t reg_adr:5;
+ } cn30xx;
+ struct cvmx_smix_cmd_cn30xx cn31xx;
+ struct cvmx_smix_cmd_cn30xx cn38xx;
+ struct cvmx_smix_cmd_cn30xx cn38xxp2;
+ struct cvmx_smix_cmd_s cn50xx;
+ struct cvmx_smix_cmd_s cn52xx;
+ struct cvmx_smix_cmd_s cn52xxp1;
+ struct cvmx_smix_cmd_s cn56xx;
+ struct cvmx_smix_cmd_s cn56xxp1;
+ struct cvmx_smix_cmd_cn30xx cn58xx;
+ struct cvmx_smix_cmd_cn30xx cn58xxp1;
+};
+
+union cvmx_smix_en {
+ uint64_t u64;
+ struct cvmx_smix_en_s {
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+ } s;
+ struct cvmx_smix_en_s cn30xx;
+ struct cvmx_smix_en_s cn31xx;
+ struct cvmx_smix_en_s cn38xx;
+ struct cvmx_smix_en_s cn38xxp2;
+ struct cvmx_smix_en_s cn50xx;
+ struct cvmx_smix_en_s cn52xx;
+ struct cvmx_smix_en_s cn52xxp1;
+ struct cvmx_smix_en_s cn56xx;
+ struct cvmx_smix_en_s cn56xxp1;
+ struct cvmx_smix_en_s cn58xx;
+ struct cvmx_smix_en_s cn58xxp1;
+};
+
+union cvmx_smix_rd_dat {
+ uint64_t u64;
+ struct cvmx_smix_rd_dat_s {
+ uint64_t reserved_18_63:46;
+ uint64_t pending:1;
+ uint64_t val:1;
+ uint64_t dat:16;
+ } s;
+ struct cvmx_smix_rd_dat_s cn30xx;
+ struct cvmx_smix_rd_dat_s cn31xx;
+ struct cvmx_smix_rd_dat_s cn38xx;
+ struct cvmx_smix_rd_dat_s cn38xxp2;
+ struct cvmx_smix_rd_dat_s cn50xx;
+ struct cvmx_smix_rd_dat_s cn52xx;
+ struct cvmx_smix_rd_dat_s cn52xxp1;
+ struct cvmx_smix_rd_dat_s cn56xx;
+ struct cvmx_smix_rd_dat_s cn56xxp1;
+ struct cvmx_smix_rd_dat_s cn58xx;
+ struct cvmx_smix_rd_dat_s cn58xxp1;
+};
+
+union cvmx_smix_wr_dat {
+ uint64_t u64;
+ struct cvmx_smix_wr_dat_s {
+ uint64_t reserved_18_63:46;
+ uint64_t pending:1;
+ uint64_t val:1;
+ uint64_t dat:16;
+ } s;
+ struct cvmx_smix_wr_dat_s cn30xx;
+ struct cvmx_smix_wr_dat_s cn31xx;
+ struct cvmx_smix_wr_dat_s cn38xx;
+ struct cvmx_smix_wr_dat_s cn38xxp2;
+ struct cvmx_smix_wr_dat_s cn50xx;
+ struct cvmx_smix_wr_dat_s cn52xx;
+ struct cvmx_smix_wr_dat_s cn52xxp1;
+ struct cvmx_smix_wr_dat_s cn56xx;
+ struct cvmx_smix_wr_dat_s cn56xxp1;
+ struct cvmx_smix_wr_dat_s cn58xx;
+ struct cvmx_smix_wr_dat_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index cac9b1a206f..4d0a8c61fc3 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -47,6 +47,7 @@ struct octeon_cop2_state;
extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
extern void octeon_crypto_disable(struct octeon_cop2_state *state,
unsigned long flags);
+extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
extern void octeon_init_cvmcount(void);
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index d6eb6134abe..1854336e56a 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -390,6 +390,19 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
#include <asm-generic/pgtable.h>
/*
+ * uncached accelerated TLB map for video memory access
+ */
+#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+struct file;
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
+#endif
+
+/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
*/
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index bfce5c786f1..63741ca1e42 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -85,8 +85,7 @@ extern void prom_identify_arch(void);
extern PCHAR ArcGetEnvironmentVariable(PCHAR name);
extern LONG ArcSetEnvironmentVariable(PCHAR name, PCHAR value);
-/* ARCS command line acquisition and parsing. */
-extern char *prom_getcmdline(void);
+/* ARCS command line parsing. */
extern void prom_init_cmdline(void);
/* Acquiring info about the current time, etc. */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5b60a09a0f0..21ef9efbde4 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -34,33 +34,33 @@
* becomes equal to the the initial value of the tail.
*/
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return ((counters >> 14) ^ counters) & 0x1fff;
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- while (__raw_spin_is_locked(x)) { cpu_relax(); }
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ while (arch_spin_is_locked(x)) { cpu_relax(); }
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return (((counters >> 14) - counters) & 0x1fff) > 1;
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set push # __raw_spin_lock \n"
+ " .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
[my_ticket] "=&r" (my_ticket));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_lock \n"
+ " .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_llsc_mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
int tmp;
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " # __raw_spin_unlock \n"
+ " # arch_spin_unlock \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addiu %[ticket], %[ticket], 1 \n"
" ori %[ticket], %[ticket], 0x2000 \n"
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
[ticket] "=&r" (tmp));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_unlock \n"
+ " .set push # arch_spin_unlock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
}
}
-static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set push # __raw_spin_trylock \n"
+ " .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
[now_serving] "=&r" (tmp3));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_trylock \n"
+ " .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
/*
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_write_can_lock(rw) (!(rw)->lock)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 1b \n"
" addu %1, 1 \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
/* Note the use of sub, not subu which will make the kernel die with an
overflow exception if we ever try to unlock an rwlock that is already
unlocked or is being held by a writer. */
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- "1: ll %1, %2 # __raw_read_unlock \n"
+ "1: ll %1, %2 # arch_read_unlock \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_unlock \n"
+ " .set noreorder # arch_read_unlock \n"
"1: ll %1, %2 \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
smp_llsc_mb();
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
__asm__ __volatile__(
- " # __raw_write_unlock \n"
+ " # arch_write_unlock \n"
" sw $0, %0 \n"
: "=m" (rw->lock)
: "m" (rw->lock)
: "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return ret;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index adeedaa116c..ee197c2f9c9 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -12,14 +12,14 @@ typedef struct {
* bits 15..28: ticket
*/
unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index db0fa7b5aea..3b6da3330e3 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -51,9 +51,6 @@
LONG_S v1, PT_ACX(sp)
#else
mfhi v1
- LONG_S v1, PT_HI(sp)
- mflo v1
- LONG_S v1, PT_LO(sp)
#endif
#ifdef CONFIG_32BIT
LONG_S $8, PT_R8(sp)
@@ -62,10 +59,17 @@
LONG_S $10, PT_R10(sp)
LONG_S $11, PT_R11(sp)
LONG_S $12, PT_R12(sp)
+#ifndef CONFIG_CPU_HAS_SMARTMIPS
+ LONG_S v1, PT_HI(sp)
+ mflo v1
+#endif
LONG_S $13, PT_R13(sp)
LONG_S $14, PT_R14(sp)
LONG_S $15, PT_R15(sp)
LONG_S $24, PT_R24(sp)
+#ifndef CONFIG_CPU_HAS_SMARTMIPS
+ LONG_S v1, PT_LO(sp)
+#endif
.endm
.macro SAVE_STATIC
@@ -83,15 +87,19 @@
#ifdef CONFIG_SMP
#ifdef CONFIG_MIPS_MT_SMTC
#define PTEBASE_SHIFT 19 /* TCBIND */
+#define CPU_ID_REG CP0_TCBIND
+#define CPU_ID_MFC0 mfc0
+#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
+#define PTEBASE_SHIFT 48 /* XCONTEXT */
+#define CPU_ID_REG CP0_XCONTEXT
+#define CPU_ID_MFC0 MFC0
#else
#define PTEBASE_SHIFT 23 /* CONTEXT */
+#define CPU_ID_REG CP0_CONTEXT
+#define CPU_ID_MFC0 MFC0
#endif
.macro get_saved_sp /* SMP variation */
-#ifdef CONFIG_MIPS_MT_SMTC
- mfc0 k0, CP0_TCBIND
-#else
- MFC0 k0, CP0_CONTEXT
-#endif
+ CPU_ID_MFC0 k0, CPU_ID_REG
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
lui k1, %hi(kernelsp)
#else
@@ -107,11 +115,7 @@
.endm
.macro set_saved_sp stackp temp temp2
-#ifdef CONFIG_MIPS_MT_SMTC
- mfc0 \temp, CP0_TCBIND
-#else
- MFC0 \temp, CP0_CONTEXT
-#endif
+ CPU_ID_MFC0 \temp, CPU_ID_REG
LONG_SRL \temp, PTEBASE_SHIFT
LONG_S \stackp, kernelsp(\temp)
.endm
@@ -166,7 +170,6 @@
LONG_S $0, PT_R0(sp)
mfc0 v1, CP0_STATUS
LONG_S $2, PT_R2(sp)
- LONG_S v1, PT_STATUS(sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Ideally, these instructions would be shuffled in
@@ -178,20 +181,21 @@
LONG_S v1, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp)
- mfc0 v1, CP0_CAUSE
LONG_S $5, PT_R5(sp)
- LONG_S v1, PT_CAUSE(sp)
+ LONG_S v1, PT_STATUS(sp)
+ mfc0 v1, CP0_CAUSE
LONG_S $6, PT_R6(sp)
- MFC0 v1, CP0_EPC
LONG_S $7, PT_R7(sp)
+ LONG_S v1, PT_CAUSE(sp)
+ MFC0 v1, CP0_EPC
#ifdef CONFIG_64BIT
LONG_S $8, PT_R8(sp)
LONG_S $9, PT_R9(sp)
#endif
- LONG_S v1, PT_EPC(sp)
LONG_S $25, PT_R25(sp)
LONG_S $28, PT_R28(sp)
LONG_S $31, PT_R31(sp)
+ LONG_S v1, PT_EPC(sp)
ori $28, sp, _THREAD_MASK
xori $28, _THREAD_MASK
#ifdef CONFIG_CPU_CAVIUM_OCTEON
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index eecd2a9f155..9326af5186f 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -2,14 +2,17 @@
# Makefile for the Linux/MIPS kernel.
#
-CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
-
extra-y := head.o init_task.o vmlinux.lds
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
ptrace.o reset.o setup.o signal.o syscall.o \
time.o topology.o traps.o unaligned.o watch.o
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+endif
+
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
@@ -19,6 +22,7 @@ obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
+obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
@@ -26,6 +30,8 @@ obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
+
obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
@@ -92,4 +98,8 @@ CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/n
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
+obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
+
EXTRA_CFLAGS += -Werror
+
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 7a51866068a..80e202eca05 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -16,6 +16,7 @@
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/stddef.h>
+#include <linux/module.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
@@ -32,6 +33,7 @@
* the CPU very much.
*/
void (*cpu_wait)(void);
+EXPORT_SYMBOL(cpu_wait);
static void r3081_wait(void)
{
diff --git a/arch/mips/kernel/cpufreq/Kconfig b/arch/mips/kernel/cpufreq/Kconfig
new file mode 100644
index 00000000000..58c601eee6f
--- /dev/null
+++ b/arch/mips/kernel/cpufreq/Kconfig
@@ -0,0 +1,41 @@
+#
+# CPU Frequency scaling
+#
+
+config MIPS_EXTERNAL_TIMER
+ bool
+
+config MIPS_CPUFREQ
+ bool
+ default y
+ depends on CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
+
+if MIPS_CPUFREQ
+
+menu "CPU Frequency scaling"
+
+source "drivers/cpufreq/Kconfig"
+
+if CPU_FREQ
+
+comment "CPUFreq processor drivers"
+
+config LOONGSON2_CPUFREQ
+ tristate "Loongson2 CPUFreq Driver"
+ select CPU_FREQ_TABLE
+ depends on MIPS_CPUFREQ
+ help
+ This option adds a CPUFreq driver for loongson processors which
+ support software configurable cpu frequency.
+
+ Loongson2F and it's successors support this feature.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+endif # CPU_FREQ
+
+endmenu
+
+endif # MIPS_CPUFREQ
diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile
new file mode 100644
index 00000000000..c3479a432ef
--- /dev/null
+++ b/arch/mips/kernel/cpufreq/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Linux/MIPS cpufreq.
+#
+
+obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/kernel/cpufreq/loongson2_clock.c
new file mode 100644
index 00000000000..d7ca256e33e
--- /dev/null
+++ b/arch/mips/kernel/cpufreq/loongson2_clock.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Author: Yanhua, yanh@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/platform_device.h>
+
+#include <asm/clock.h>
+
+#include <loongson.h>
+
+static LIST_HEAD(clock_list);
+static DEFINE_SPINLOCK(clock_lock);
+static DEFINE_MUTEX(clock_list_sem);
+
+/* Minimum CLK support */
+enum {
+ DC_ZERO, DC_25PT = 2, DC_37PT, DC_50PT, DC_62PT, DC_75PT,
+ DC_87PT, DC_DISABLE, DC_RESV
+};
+
+struct cpufreq_frequency_table loongson2_clockmod_table[] = {
+ {DC_RESV, CPUFREQ_ENTRY_INVALID},
+ {DC_ZERO, CPUFREQ_ENTRY_INVALID},
+ {DC_25PT, 0},
+ {DC_37PT, 0},
+ {DC_50PT, 0},
+ {DC_62PT, 0},
+ {DC_75PT, 0},
+ {DC_87PT, 0},
+ {DC_DISABLE, 0},
+ {DC_RESV, CPUFREQ_TABLE_END},
+};
+EXPORT_SYMBOL_GPL(loongson2_clockmod_table);
+
+static struct clk cpu_clk = {
+ .name = "cpu_clk",
+ .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
+ .rate = 800000000,
+};
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+ return &cpu_clk;
+}
+EXPORT_SYMBOL(clk_get);
+
+static void propagate_rate(struct clk *clk)
+{
+ struct clk *clkp;
+
+ list_for_each_entry(clkp, &clock_list, node) {
+ if (likely(clkp->parent != clk))
+ continue;
+ if (likely(clkp->ops && clkp->ops->recalc))
+ clkp->ops->recalc(clkp);
+ if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
+ propagate_rate(clkp);
+ }
+}
+
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return (unsigned long)clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_set_rate_ex(clk, rate, 0);
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
+
+int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
+{
+ int ret = 0;
+ int regval;
+ int i;
+
+ if (likely(clk->ops && clk->ops->set_rate)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ ret = clk->ops->set_rate(clk, rate, algo_id);
+ spin_unlock_irqrestore(&clock_lock, flags);
+ }
+
+ if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
+ propagate_rate(clk);
+
+ for (i = 0; loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END;
+ i++) {
+ if (loongson2_clockmod_table[i].frequency ==
+ CPUFREQ_ENTRY_INVALID)
+ continue;
+ if (rate == loongson2_clockmod_table[i].frequency)
+ break;
+ }
+ if (rate != loongson2_clockmod_table[i].frequency)
+ return -ENOTSUPP;
+
+ clk->rate = rate;
+
+ regval = LOONGSON_CHIPCFG0;
+ regval = (regval & ~0x7) | (loongson2_clockmod_table[i].index - 1);
+ LOONGSON_CHIPCFG0 = regval;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate_ex);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ if (likely(clk->ops && clk->ops->round_rate)) {
+ unsigned long flags, rounded;
+
+ spin_lock_irqsave(&clock_lock, flags);
+ rounded = clk->ops->round_rate(clk, rate);
+ spin_unlock_irqrestore(&clock_lock, flags);
+
+ return rounded;
+ }
+
+ return rate;
+}
+EXPORT_SYMBOL_GPL(clk_round_rate);
+
+/*
+ * This is the simple version of Loongson-2 wait, Maybe we need do this in
+ * interrupt disabled content
+ */
+
+DEFINE_SPINLOCK(loongson2_wait_lock);
+void loongson2_cpu_wait(void)
+{
+ u32 cpu_freq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&loongson2_wait_lock, flags);
+ cpu_freq = LOONGSON_CHIPCFG0;
+ LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
+ LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
+ spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+}
+EXPORT_SYMBOL_GPL(loongson2_cpu_wait);
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
new file mode 100644
index 00000000000..2f6a0b147ab
--- /dev/null
+++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
@@ -0,0 +1,227 @@
+/*
+ * Cpufreq driver for the loongson-2 processors
+ *
+ * The 2E revision of loongson processor not support this feature.
+ *
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Author: Yanhua, yanh@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/sched.h> /* set_cpus_allowed() */
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <asm/clock.h>
+
+#include <loongson.h>
+
+static uint nowait;
+
+static struct clk *cpuclk;
+
+static void (*saved_cpu_wait) (void);
+
+static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data);
+
+static struct notifier_block loongson2_cpufreq_notifier_block = {
+ .notifier_call = loongson2_cpu_freq_notifier
+};
+
+static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ current_cpu_data.udelay_val = loops_per_jiffy;
+
+ return 0;
+}
+
+static unsigned int loongson2_cpufreq_get(unsigned int cpu)
+{
+ return clk_get_rate(cpuclk);
+}
+
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned int newstate = 0;
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+ unsigned int freq;
+
+ if (!cpu_online(cpu))
+ return -ENODEV;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+ if (cpufreq_frequency_table_target
+ (policy, &loongson2_clockmod_table[0], target_freq, relation,
+ &newstate))
+ return -EINVAL;
+
+ freq =
+ ((cpu_clock_freq / 1000) *
+ loongson2_clockmod_table[newstate].index) / 8;
+ if (freq < policy->min || freq > policy->max)
+ return -EINVAL;
+
+ pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
+
+ freqs.cpu = cpu;
+ freqs.old = loongson2_cpufreq_get(cpu);
+ freqs.new = freq;
+ freqs.flags = 0;
+
+ if (freqs.new == freqs.old)
+ return 0;
+
+ /* notifiers */
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ set_cpus_allowed(current, cpus_allowed);
+
+ /* setting the cpu frequency */
+ clk_set_rate(cpuclk, freq);
+
+ /* notifiers */
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ pr_debug("cpufreq: set frequency %u kHz\n", freq);
+
+ return 0;
+}
+
+static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int i;
+
+ if (!cpu_online(policy->cpu))
+ return -ENODEV;
+
+ cpuclk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(cpuclk)) {
+ printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
+ return PTR_ERR(cpuclk);
+ }
+
+ cpuclk->rate = cpu_clock_freq / 1000;
+ if (!cpuclk->rate)
+ return -EINVAL;
+
+ /* clock table init */
+ for (i = 2;
+ (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
+ i++)
+ loongson2_clockmod_table[i].frequency = (cpuclk->rate * i) / 8;
+
+ policy->cur = loongson2_cpufreq_get(policy->cpu);
+
+ cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
+ policy->cpu);
+
+ return cpufreq_frequency_table_cpuinfo(policy,
+ &loongson2_clockmod_table[0]);
+}
+
+static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ &loongson2_clockmod_table[0]);
+}
+
+static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ clk_put(cpuclk);
+ return 0;
+}
+
+static struct freq_attr *loongson2_table_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver loongson2_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .name = "loongson2",
+ .init = loongson2_cpufreq_cpu_init,
+ .verify = loongson2_cpufreq_verify,
+ .target = loongson2_cpufreq_target,
+ .get = loongson2_cpufreq_get,
+ .exit = loongson2_cpufreq_exit,
+ .attr = loongson2_table_attr,
+};
+
+static struct platform_device_id platform_device_ids[] = {
+ {
+ .name = "loongson2_cpufreq",
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(platform, platform_device_ids);
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "loongson2_cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .id_table = platform_device_ids,
+};
+
+static int __init cpufreq_init(void)
+{
+ int ret;
+
+ /* Register platform stuff */
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ return ret;
+
+ pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
+
+ cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
+
+ if (!ret && !nowait) {
+ saved_cpu_wait = cpu_wait;
+ cpu_wait = loongson2_cpu_wait;
+ }
+
+ return ret;
+}
+
+static void __exit cpufreq_exit(void)
+{
+ if (!nowait && saved_cpu_wait)
+ cpu_wait = saved_cpu_wait;
+ cpufreq_unregister_driver(&loongson2_cpufreq_driver);
+ cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ platform_driver_unregister(&platform_driver);
+}
+
+module_init(cpufreq_init);
+module_exit(cpufreq_exit);
+
+module_param(nowait, uint, 0644);
+MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
+
+MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
+MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c
new file mode 100644
index 00000000000..a27c16c8690
--- /dev/null
+++ b/arch/mips/kernel/csrc-powertv.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2008 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * The file comes from kernel/csrc-r4k.c
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h> /* Not included in linux/time.h */
+
+#include <asm/mach-powertv/asic_regs.h>
+#include "powertv-clock.h"
+
+/* MIPS PLL Register Definitions */
+#define PLL_GET_M(x) (((x) >> 8) & 0x000000FF)
+#define PLL_GET_N(x) (((x) >> 16) & 0x000000FF)
+#define PLL_GET_P(x) (((x) >> 24) & 0x00000007)
+
+/*
+ * returns: Clock frequency in kHz
+ */
+unsigned int __init mips_get_pll_freq(void)
+{
+ unsigned int pll_reg, m, n, p;
+ unsigned int fin = 54000; /* Base frequency in kHz */
+ unsigned int fout;
+
+ /* Read PLL register setting */
+ pll_reg = asic_read(mips_pll_setup);
+ m = PLL_GET_M(pll_reg);
+ n = PLL_GET_N(pll_reg);
+ p = PLL_GET_P(pll_reg);
+ pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p);
+
+ /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */
+ fout = ((2 * n * fin) / (m * (0x01 << p)));
+
+ pr_info("MIPS Clock Freq=%d kHz\n", fout);
+
+ return fout;
+}
+
+static cycle_t c0_hpt_read(struct clocksource *cs)
+{
+ return read_c0_count();
+}
+
+static struct clocksource clocksource_mips = {
+ .name = "powertv-counter",
+ .read = c0_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void __init powertv_c0_hpt_clocksource_init(void)
+{
+ unsigned int pll_freq = mips_get_pll_freq();
+
+ pr_info("CPU frequency %d.%02d MHz\n", pll_freq / 1000,
+ (pll_freq % 1000) * 100 / 1000);
+
+ mips_hpt_frequency = pll_freq / 2 * 1000;
+
+ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
+
+ clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
+
+ clocksource_register(&clocksource_mips);
+}
+
+/**
+ * struct tim_c - free running counter
+ * @hi: High 16 bits of the counter
+ * @lo: Low 32 bits of the counter
+ *
+ * Lays out the structure of the free running counter in memory. This counter
+ * increments at a rate of 27 MHz/8 on all platforms.
+ */
+struct tim_c {
+ unsigned int hi;
+ unsigned int lo;
+};
+
+static struct tim_c *tim_c;
+
+static cycle_t tim_c_read(struct clocksource *cs)
+{
+ unsigned int hi;
+ unsigned int next_hi;
+ unsigned int lo;
+
+ hi = readl(&tim_c->hi);
+
+ for (;;) {
+ lo = readl(&tim_c->lo);
+ next_hi = readl(&tim_c->hi);
+ if (next_hi == hi)
+ break;
+ hi = next_hi;
+ }
+
+pr_crit("%s: read %llx\n", __func__, ((u64) hi << 32) | lo);
+ return ((u64) hi << 32) | lo;
+}
+
+#define TIM_C_SIZE 48 /* # bits in the timer */
+
+static struct clocksource clocksource_tim_c = {
+ .name = "powertv-tim_c",
+ .read = tim_c_read,
+ .mask = CLOCKSOURCE_MASK(TIM_C_SIZE),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/**
+ * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock
+ *
+ * The hard part here is coming up with a constant k and shift s such that
+ * the 48-bit TIM_C value multiplied by k doesn't overflow and that value,
+ * when shifted right by s, yields the corresponding number of nanoseconds.
+ * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to
+ * 1 / (27,000,000/8) seconds. Multiply that by a billion and you get the
+ * number of nanoseconds. Since the TIM_C value has 48 bits and the math is
+ * done in 64 bits, avoiding an overflow means that k must be less than
+ * 64 - 48 = 16 bits.
+ */
+static void __init powertv_tim_c_clocksource_init(void)
+{
+ int prescale;
+ unsigned long dividend;
+ unsigned long k;
+ int s;
+ const int max_k_bits = (64 - 48) - 1;
+ const unsigned long billion = 1000000000;
+ const unsigned long counts_per_second = 27000000 / 8;
+
+ prescale = BITS_PER_LONG - ilog2(billion) - 1;
+ dividend = billion << prescale;
+ k = dividend / counts_per_second;
+ s = ilog2(k) - max_k_bits;
+
+ if (s < 0)
+ s = prescale;
+
+ else {
+ k >>= s;
+ s += prescale;
+ }
+
+ clocksource_tim_c.mult = k;
+ clocksource_tim_c.shift = s;
+ clocksource_tim_c.rating = 200;
+
+ clocksource_register(&clocksource_tim_c);
+ tim_c = (struct tim_c *) asic_reg_addr(tim_ch);
+}
+
+/**
+ powertv_clocksource_init - initialize all clocksources
+ */
+void __init powertv_clocksource_init(void)
+{
+ powertv_c0_hpt_clocksource_init();
+ powertv_tim_c_clocksource_init();
+}
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
new file mode 100644
index 00000000000..68b067040d8
--- /dev/null
+++ b/arch/mips/kernel/ftrace.c
@@ -0,0 +1,275 @@
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2009 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ *
+ * Thanks goes to Steven Rostedt for writing the original x86 version.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/ftrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
+#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
+#define jump_insn_encode(op_code, addr) \
+ ((unsigned int)((op_code) | (((addr) >> 2) & ADDR_MASK)))
+
+static unsigned int ftrace_nop = 0x00000000;
+
+static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
+{
+ int faulted;
+
+ /* *(unsigned int *)ip = new_code; */
+ safe_store_code(new_code, ip, faulted);
+
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+
+static int lui_v1;
+static int jal_mcount;
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ int faulted;
+ unsigned long ip = rec->ip;
+
+ /* We have compiled module with -mlong-calls, but compiled the kernel
+ * without it, we need to cope with them respectively. */
+ if (ip & 0x40000000) {
+ /* record it for ftrace_make_call */
+ if (lui_v1 == 0) {
+ /* lui_v1 = *(unsigned int *)ip; */
+ safe_load_code(lui_v1, ip, faulted);
+
+ if (unlikely(faulted))
+ return -EFAULT;
+ }
+
+ /* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * jalr v1
+ * nop
+ * 1f: (ip + 12)
+ */
+ new = 0x10000004;
+ } else {
+ /* record/calculate it for ftrace_make_call */
+ if (jal_mcount == 0) {
+ /* We can record it directly like this:
+ * jal_mcount = *(unsigned int *)ip;
+ * Herein, jump over the first two nop instructions */
+ jal_mcount = jump_insn_encode(JAL, (MCOUNT_ADDR + 8));
+ }
+
+ /* move at, ra
+ * jalr v1 --> nop
+ */
+ new = ftrace_nop;
+ }
+ return ftrace_modify_code(ip, new);
+}
+
+static int modified; /* initialized as 0 by default */
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ /* We just need to remove the "b ftrace_stub" at the fist time! */
+ if (modified == 0) {
+ modified = 1;
+ ftrace_modify_code(addr, ftrace_nop);
+ }
+ /* ip, module: 0xc0000000, kernel: 0x80000000 */
+ new = (ip & 0x40000000) ? lui_v1 : jal_mcount;
+
+ return ftrace_modify_code(ip, new);
+}
+
+#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned int new;
+
+ new = jump_insn_encode(JAL, (unsigned long)func);
+
+ return ftrace_modify_code(FTRACE_CALL_IP, new);
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ /* The return code is retured via data */
+ *(unsigned long *)data = 0;
+
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+extern void ftrace_graph_call(void);
+#define JMP 0x08000000 /* jump to target directly */
+#define CALL_FTRACE_GRAPH_CALLER \
+ jump_insn_encode(JMP, (unsigned long)(&ftrace_graph_caller))
+#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
+ CALL_FTRACE_GRAPH_CALLER);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, ftrace_nop);
+}
+
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
+#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
+#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
+
+unsigned long ftrace_get_parent_addr(unsigned long self_addr,
+ unsigned long parent,
+ unsigned long parent_addr,
+ unsigned long fp)
+{
+ unsigned long sp, ip, ra;
+ unsigned int code;
+ int faulted;
+
+ /* in module or kernel? */
+ if (self_addr & 0x40000000) {
+ /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */
+ ip = self_addr - 20;
+ } else {
+ /* kernel: move to the instruction "move ra, at" */
+ ip = self_addr - 12;
+ }
+
+ /* search the text until finding the non-store instruction or "s{d,w}
+ * ra, offset(sp)" instruction */
+ do {
+ ip -= 4;
+
+ /* get the code at "ip": code = *(unsigned int *)ip; */
+ safe_load_code(code, ip, faulted);
+
+ if (unlikely(faulted))
+ return 0;
+
+ /* If we hit the non-store instruction before finding where the
+ * ra is stored, then this is a leaf function and it does not
+ * store the ra on the stack. */
+ if ((code & S_R_SP) != S_R_SP)
+ return parent_addr;
+
+ } while (((code & S_RA_SP) != S_RA_SP));
+
+ sp = fp + (code & OFFSET_MASK);
+
+ /* ra = *(unsigned long *)sp; */
+ safe_load_stack(ra, sp, faulted);
+ if (unlikely(faulted))
+ return 0;
+
+ if (ra == parent)
+ return sp;
+ return 0;
+}
+
+#endif
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long fp)
+{
+ unsigned long old;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+ int faulted;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /* "parent" is the stack address saved the return address of the caller
+ * of _mcount.
+ *
+ * if the gcc < 4.5, a leaf function does not save the return address
+ * in the stack address, so, we "emulate" one in _mcount's stack space,
+ * and hijack it directly, but for a non-leaf function, it save the
+ * return address to the its own stack space, we can not hijack it
+ * directly, but need to find the real stack address,
+ * ftrace_get_parent_addr() does it!
+ *
+ * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
+ * non-leaf function, the location of the return address will be saved
+ * to $12 for us, and for a leaf function, only put a zero into $12. we
+ * do it in ftrace_graph_caller of mcount.S.
+ */
+
+ /* old = *parent; */
+ safe_load_stack(old, parent, faulted);
+ if (unlikely(faulted))
+ goto out;
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+ parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
+ (unsigned long)parent,
+ fp);
+ /* If fails when getting the stack address of the non-leaf function's
+ * ra, stop function graph tracer and return */
+ if (parent == 0)
+ goto out;
+#endif
+ /* *parent = return_hooker; */
+ safe_store_stack(return_hooker, parent, faulted);
+ if (unlikely(faulted))
+ goto out;
+
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
+ -EBUSY) {
+ *parent = old;
+ return;
+ }
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent = old;
+ }
+ return;
+out:
+ ftrace_graph_stop();
+ WARN_ON(1);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 7b845ba9dff..981f86c2616 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/kgdb.h>
+#include <linux/ftrace.h>
#include <asm/atomic.h>
#include <asm/system.h>
@@ -99,7 +100,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -118,7 +119,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_putc(p, '\n');
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
@@ -150,3 +151,32 @@ void __init init_IRQ(void)
kgdb_early_setup = 1;
#endif
}
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+void __irq_entry do_IRQ(unsigned int irq)
+{
+ irq_enter();
+ __DO_IRQ_SMTC_HOOK(irq);
+ generic_handle_irq(irq);
+ irq_exit();
+}
+
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+/*
+ * To avoid inefficient and in some cases pathological re-checking of
+ * IRQ affinity, we have this variant that skips the affinity check.
+ */
+
+void __irq_entry do_IRQ_no_affinity(unsigned int irq)
+{
+ irq_enter();
+ __NO_AFFINITY_IRQ_SMTC_HOOK(irq);
+ generic_handle_irq(irq);
+ irq_exit();
+}
+
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index ad4e017ed2f..80e2ba694ba 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -82,6 +82,7 @@ static int sp_stopping;
#define MTSP_O_SHLOCK 0x0010
#define MTSP_O_EXLOCK 0x0020
#define MTSP_O_ASYNC 0x0040
+/* XXX: check which of these is actually O_SYNC vs O_DSYNC */
#define MTSP_O_FSYNC O_SYNC
#define MTSP_O_NOFOLLOW 0x0100
#define MTSP_O_SYNC 0x0080
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 1a2793efdc4..f042563c924 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -67,28 +67,13 @@ SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, pgoff)
{
- struct file * file = NULL;
unsigned long error;
error = -EINVAL;
if (pgoff & (~PAGE_MASK >> 12))
goto out;
- pgoff >>= PAGE_SHIFT-12;
-
- if (!(flags & MAP_ANONYMOUS)) {
- error = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
- }
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
-
+ error = sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT-12));
out:
return error;
}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
new file mode 100644
index 00000000000..0a9cfdb271d
--- /dev/null
+++ b/arch/mips/kernel/mcount.S
@@ -0,0 +1,189 @@
+/*
+ * MIPS specific _mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ */
+
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/ftrace.h>
+
+ .text
+ .set noreorder
+ .set noat
+
+ .macro MCOUNT_SAVE_REGS
+ PTR_SUBU sp, PT_SIZE
+ PTR_S ra, PT_R31(sp)
+ PTR_S AT, PT_R1(sp)
+ PTR_S a0, PT_R4(sp)
+ PTR_S a1, PT_R5(sp)
+ PTR_S a2, PT_R6(sp)
+ PTR_S a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_S a4, PT_R8(sp)
+ PTR_S a5, PT_R9(sp)
+ PTR_S a6, PT_R10(sp)
+ PTR_S a7, PT_R11(sp)
+#endif
+ .endm
+
+ .macro MCOUNT_RESTORE_REGS
+ PTR_L ra, PT_R31(sp)
+ PTR_L AT, PT_R1(sp)
+ PTR_L a0, PT_R4(sp)
+ PTR_L a1, PT_R5(sp)
+ PTR_L a2, PT_R6(sp)
+ PTR_L a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_L a4, PT_R8(sp)
+ PTR_L a5, PT_R9(sp)
+ PTR_L a6, PT_R10(sp)
+ PTR_L a7, PT_R11(sp)
+#endif
+#ifdef CONFIG_64BIT
+ PTR_ADDIU sp, PT_SIZE
+#else
+ PTR_ADDIU sp, (PT_SIZE + 8)
+#endif
+.endm
+
+ .macro RETURN_BACK
+ jr ra
+ move ra, AT
+ .endm
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+NESTED(ftrace_caller, PT_SIZE, ra)
+ .globl _mcount
+_mcount:
+ b ftrace_stub
+ nop
+ lw t1, function_trace_stop
+ bnez t1, ftrace_stub
+ nop
+
+ MCOUNT_SAVE_REGS
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+ PTR_S t0, PT_R12(sp) /* t0 saved the location of the return address(at) by -mmcount-ra-address */
+#endif
+
+ move a0, ra /* arg1: next ip, selfaddr */
+ .globl ftrace_call
+ftrace_call:
+ nop /* a placeholder for the call to a real tracing function */
+ move a1, AT /* arg2: the caller's next ip, parent */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call
+ftrace_graph_call:
+ nop
+ nop
+#endif
+
+ MCOUNT_RESTORE_REGS
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(ftrace_caller)
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+NESTED(_mcount, PT_SIZE, ra)
+ lw t1, function_trace_stop
+ bnez t1, ftrace_stub
+ nop
+ PTR_LA t1, ftrace_stub
+ PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
+ bne t1, t2, static_trace
+ nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ PTR_L t3, ftrace_graph_return
+ bne t1, t3, ftrace_graph_caller
+ nop
+ PTR_LA t1, ftrace_graph_entry_stub
+ PTR_L t3, ftrace_graph_entry
+ bne t1, t3, ftrace_graph_caller
+ nop
+#endif
+ b ftrace_stub
+ nop
+
+static_trace:
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg1: next ip, selfaddr */
+ jalr t2 /* (1) call *ftrace_trace_function */
+ move a1, AT /* arg2: the caller's next ip, parent */
+
+ MCOUNT_RESTORE_REGS
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(_mcount)
+
+#endif /* ! CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+NESTED(ftrace_graph_caller, PT_SIZE, ra)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a1, PT_R31(sp) /* load the original ra from the stack */
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+ PTR_L t0, PT_R12(sp) /* load the original t0 from the stack */
+#endif
+#else
+ MCOUNT_SAVE_REGS
+ move a1, ra /* arg2: next ip, selfaddr */
+#endif
+
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+ bnez t0, 1f /* non-leaf func: t0 saved the location of the return address */
+ nop
+ PTR_LA t0, PT_R1(sp) /* leaf func: get the location of at(old ra) from our own stack */
+1: move a0, t0 /* arg1: the location of the return address */
+#else
+ PTR_LA a0, PT_R1(sp) /* arg1: &AT -> a0 */
+#endif
+ jal prepare_ftrace_return
+#ifdef CONFIG_FRAME_POINTER
+ move a2, fp /* arg3: frame pointer */
+#else
+#ifdef CONFIG_64BIT
+ PTR_LA a2, PT_SIZE(sp)
+#else
+ PTR_LA a2, (PT_SIZE+8)(sp)
+#endif
+#endif
+
+ MCOUNT_RESTORE_REGS
+ RETURN_BACK
+ END(ftrace_graph_caller)
+
+ .align 2
+ .globl return_to_handler
+return_to_handler:
+ PTR_SUBU sp, PT_SIZE
+ PTR_S v0, PT_R2(sp)
+
+ jal ftrace_return_to_handler
+ PTR_S v1, PT_R3(sp)
+
+ /* restore the real parent address: v0 -> ra */
+ move ra, v0
+
+ PTR_L v0, PT_R2(sp)
+ PTR_L v1, PT_R3(sp)
+ jr ra
+ PTR_ADDIU sp, PT_SIZE
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+ .set at
+ .set reorder
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 225755d0c1f..1d04807874d 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -13,6 +13,7 @@
#include <asm/checksum.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
+#include <asm/ftrace.h>
extern void *__bzero(void *__s, size_t __count);
extern long __strncpy_from_user_nocheck_asm(char *__to,
@@ -51,3 +52,7 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_partial_copy_user);
EXPORT_SYMBOL(invalid_pte_table);
+#ifdef CONFIG_FUNCTION_TRACER
+/* _mcount is defined in arch/mips/kernel/mcount.S */
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 2b290d70083..f9513f9e61d 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -58,8 +58,12 @@ EXPORT_SYMBOL(mips_machtype);
struct boot_mem_map boot_mem_map;
-static char command_line[CL_SIZE];
- char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE;
+static char __initdata command_line[COMMAND_LINE_SIZE];
+char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+#endif
/*
* mips_io_port_base is the begin of the address space to which x86 style
@@ -166,26 +170,8 @@ static unsigned long __init init_initrd(void)
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
- if (!initrd_start || initrd_end <= initrd_start) {
-#ifdef CONFIG_PROBE_INITRD_HEADER
- u32 *initrd_header;
-
- /*
- * See if initrd has been added to the kernel image by
- * arch/mips/boot/addinitrd.c. In that case a header is
- * prepended to initrd and is made up by 8 bytes. The first
- * word is a magic number and the second one is the size of
- * initrd. Initrd start must be page aligned in any cases.
- */
- initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
- if (initrd_header[0] != 0x494E5244)
- goto disable;
- initrd_start = (unsigned long)(initrd_header + 2);
- initrd_end = initrd_start + initrd_header[1];
-#else
+ if (!initrd_start || initrd_end <= initrd_start)
goto disable;
-#endif
- }
if (initrd_start & ~PAGE_MASK) {
pr_err("initrd start must be page aligned\n");
@@ -476,8 +462,20 @@ static void __init arch_mem_init(char **cmdline_p)
pr_info("Determined physical RAM map:\n");
print_memory_map();
- strlcpy(command_line, arcs_cmdline, sizeof(command_line));
- strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+ if (builtin_cmdline[0]) {
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE);
+ }
+ strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
+#endif
+#else
+ strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
+#endif
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 6254041b942..d0c68b5d717 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -35,6 +35,15 @@
#include "signal-common.h"
+static int (*save_fp_context)(struct sigcontext __user *sc);
+static int (*restore_fp_context)(struct sigcontext __user *sc);
+
+extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
+extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
+
+extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
+extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
+
/*
* Horribly complicated - with the bloody RM9000 workarounds enabled
* the signal trampolines is moving to the end of the structure so we can
@@ -709,3 +718,40 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
key_replace_session_keyring();
}
}
+
+#ifdef CONFIG_SMP
+static int smp_save_fp_context(struct sigcontext __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? _save_fp_context(sc)
+ : fpu_emulator_save_context(sc);
+}
+
+static int smp_restore_fp_context(struct sigcontext __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? _restore_fp_context(sc)
+ : fpu_emulator_restore_context(sc);
+}
+#endif
+
+static int signal_setup(void)
+{
+#ifdef CONFIG_SMP
+ /* For now just do the cpu_has_fpu check when the functions are invoked */
+ save_fp_context = smp_save_fp_context;
+ restore_fp_context = smp_restore_fp_context;
+#else
+ if (cpu_has_fpu) {
+ save_fp_context = _save_fp_context;
+ restore_fp_context = _restore_fp_context;
+ } else {
+ save_fp_context = fpu_emulator_save_context;
+ restore_fp_context = fpu_emulator_restore_context;
+ }
+#endif
+
+ return 0;
+}
+
+arch_initcall(signal_setup);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 2e74075ac0c..03abaf048f0 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -35,6 +35,15 @@
#include "signal-common.h"
+static int (*save_fp_context32)(struct sigcontext32 __user *sc);
+static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
+
+extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
+
+extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
+
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
@@ -828,3 +837,18 @@ SYSCALL_DEFINE5(32_waitid, int, which, compat_pid_t, pid,
info.si_code |= __SI_CHLD;
return copy_siginfo_to_user32(uinfo, &info);
}
+
+static int signal32_init(void)
+{
+ if (cpu_has_fpu) {
+ save_fp_context32 = _save_fp_context32;
+ restore_fp_context32 = _restore_fp_context32;
+ } else {
+ save_fp_context32 = fpu_emulator_save_context32;
+ restore_fp_context32 = fpu_emulator_restore_context32;
+ }
+
+ return 0;
+}
+
+arch_initcall(signal32_init);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index e72e6844d13..6cdca1956b7 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -32,6 +32,7 @@
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/err.h>
+#include <linux/ftrace.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
@@ -130,7 +131,7 @@ asmlinkage __cpuinit void start_secondary(void)
/*
* Call into both interrupt handlers, as we share the IPI for them
*/
-void smp_call_function_interrupt(void)
+void __irq_entry smp_call_function_interrupt(void)
{
irq_enter();
generic_smp_call_function_single_interrupt();
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a38e3ee9551..23499b5bd9c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
+#include <linux/ftrace.h>
#include <asm/cpu.h>
#include <asm/processor.h>
@@ -939,23 +940,29 @@ static void ipi_call_interrupt(void)
DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
-void ipi_decode(struct smtc_ipi *pipi)
+static void __irq_entry smtc_clock_tick_interrupt(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
+ int irq = MIPS_CPU_IRQ_BASE + 1;
+
+ irq_enter();
+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ irq_exit();
+}
+
+void ipi_decode(struct smtc_ipi *pipi)
+{
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
- int irq = MIPS_CPU_IRQ_BASE + 1;
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
- irq_enter();
- kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
- cd = &per_cpu(mips_clockevent_device, cpu);
- cd->event_handler(cd);
- irq_exit();
+ smtc_clock_tick_interrupt();
break;
case LINUX_SMP_IPI:
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index fe0d7980560..3f7f466190b 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -93,7 +93,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) && (addr & shm_align_mask))
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
@@ -129,31 +130,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
}
}
-/* common code for old and new mmaps */
-static inline unsigned long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long pgoff)
-{
- unsigned long error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long,
fd, off_t, offset)
@@ -164,7 +140,7 @@ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
if (offset & ~PAGE_MASK)
goto out;
- result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
out:
return result;
@@ -177,7 +153,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
if (pgoff & (~PAGE_MASK >> 12))
return -EINVAL;
- return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
}
save_static_function(sys_fork);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0a18b4c62af..308e4346086 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -25,10 +25,12 @@
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
+#include <linux/notifier.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
+#include <asm/cop2.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
@@ -79,10 +81,6 @@ extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu);
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
-#endif
-
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
@@ -857,6 +855,44 @@ static void mt_ase_fp_affinity(void)
#endif /* CONFIG_MIPS_MT_FPAFF */
}
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(cu2_chain);
+
+int __ref register_cu2_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&cu2_chain, nb);
+}
+
+int cu2_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&cu2_chain, val, v);
+}
+
+static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ struct pt_regs *regs = data;
+
+ switch (action) {
+ default:
+ die_if_kernel("Unhandled kernel unaligned access or invalid "
+ "instruction", regs);
+ /* Fall through */
+
+ case CU2_EXCEPTION:
+ force_sig(SIGILL, current);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block default_cu2_notifier = {
+ .notifier_call = default_cu2_call,
+ .priority = 0x80000000, /* Run last */
+};
+
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int __user *epc;
@@ -920,17 +956,9 @@ asmlinkage void do_cpu(struct pt_regs *regs)
return;
case 2:
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
- prefetch(&current->thread.cp2);
- local_irq_save(flags);
- KSTK_STATUS(current) |= ST0_CU2;
- status = read_c0_status();
- write_c0_status(status | ST0_CU2);
- octeon_cop2_restore(&(current->thread.cp2));
- write_c0_status(status & ~ST0_CU2);
- local_irq_restore(flags);
- return;
-#endif
+ raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
+ break;
+
case 3:
break;
}
@@ -1367,77 +1395,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
return set_vi_srs_handler(n, addr, 0);
}
-/*
- * This is used by native signal handling
- */
-asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
-asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
-
-extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
-
-extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
-extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
-
-#ifdef CONFIG_SMP
-static int smp_save_fp_context(struct sigcontext __user *sc)
-{
- return raw_cpu_has_fpu
- ? _save_fp_context(sc)
- : fpu_emulator_save_context(sc);
-}
-
-static int smp_restore_fp_context(struct sigcontext __user *sc)
-{
- return raw_cpu_has_fpu
- ? _restore_fp_context(sc)
- : fpu_emulator_restore_context(sc);
-}
-#endif
-
-static inline void signal_init(void)
-{
-#ifdef CONFIG_SMP
- /* For now just do the cpu_has_fpu check when the functions are invoked */
- save_fp_context = smp_save_fp_context;
- restore_fp_context = smp_restore_fp_context;
-#else
- if (cpu_has_fpu) {
- save_fp_context = _save_fp_context;
- restore_fp_context = _restore_fp_context;
- } else {
- save_fp_context = fpu_emulator_save_context;
- restore_fp_context = fpu_emulator_restore_context;
- }
-#endif
-}
-
-#ifdef CONFIG_MIPS32_COMPAT
-
-/*
- * This is used by 32-bit signal stuff on the 64-bit kernel
- */
-asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
-asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-
-extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-
-extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
-
-static inline void signal32_init(void)
-{
- if (cpu_has_fpu) {
- save_fp_context32 = _save_fp_context32;
- restore_fp_context32 = _restore_fp_context32;
- } else {
- save_fp_context32 = fpu_emulator_save_context32;
- restore_fp_context32 = fpu_emulator_restore_context32;
- }
-}
-#endif
-
extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void flush_tlb_handlers(void);
@@ -1751,13 +1708,10 @@ void __init trap_init(void)
else
memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
- signal_init();
-#ifdef CONFIG_MIPS32_COMPAT
- signal32_init();
-#endif
-
local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers();
sort_extable(__start___dbe_table, __stop___dbe_table);
+
+ register_cu2_notifier(&default_cu2_notifier);
}
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 67bd626942a..69b039ca8d8 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -81,6 +81,7 @@
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
+#include <asm/cop2.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -451,17 +452,27 @@ static void emulate_load_store_insn(struct pt_regs *regs,
*/
goto sigbus;
+ /*
+ * COP2 is available to implementor for application specific use.
+ * It's up to applications to register a notifier chain and do
+ * whatever they have to do, including possible sending of signals.
+ */
case lwc2_op:
+ cu2_notifier_call_chain(CU2_LWC2_OP, regs);
+ break;
+
case ldc2_op:
+ cu2_notifier_call_chain(CU2_LDC2_OP, regs);
+ break;
+
case swc2_op:
+ cu2_notifier_call_chain(CU2_SWC2_OP, regs);
+ break;
+
case sdc2_op:
- /*
- * These are the coprocessor 2 load/stores. The current
- * implementations don't use cp2 and cp2 should always be
- * disabled in c0_status. So send SIGILL.
- * (No longer true: The Sony Praystation uses cp2 for
- * 3D matrix operations. Dunno if that thingy has a MMU ...)
- */
+ cu2_notifier_call_chain(CU2_SDC2_OP, regs);
+ break;
+
default:
/*
* Pheeee... We encountered an yet unknown instruction or
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 162b29954ba..f25df73db92 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -46,6 +46,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
*(.text.*)
*(.fixup)
*(.gnu.warning)
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index 0bb6037afba..8e388da1926 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -4,12 +4,14 @@
* Brian Murphy <brian.murphy@eicon.com>
*
*/
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@@ -38,12 +40,9 @@ static void pvc_display(unsigned long data)
static DECLARE_TASKLET(pvc_display_tasklet, &pvc_display, 0);
-static int pvc_proc_read_line(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
+static int pvc_line_proc_show(struct seq_file *m, void *v)
{
- char *origpage = page;
- int lineno = *(int *)data;
+ int lineno = *(int *)m->private;
if (lineno < 0 || lineno > PVC_NLINES) {
printk(KERN_WARNING "proc_read_line: invalid lineno %d\n", lineno);
@@ -51,45 +50,66 @@ static int pvc_proc_read_line(char *page, char **start,
}
mutex_lock(&pvc_mutex);
- page += sprintf(page, "%s\n", pvc_lines[lineno]);
+ seq_printf(m, "%s\n", pvc_lines[lineno]);
mutex_unlock(&pvc_mutex);
- return page - origpage;
+ return 0;
}
-static int pvc_proc_write_line(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int pvc_line_proc_open(struct inode *inode, struct file *file)
{
- int origcount = count;
- int lineno = *(int *)data;
+ return single_open(file, pvc_line_proc_show, PDE(inode)->data);
+}
- if (lineno < 0 || lineno > PVC_NLINES) {
- printk(KERN_WARNING "proc_write_line: invalid lineno %d\n",
- lineno);
- return origcount;
- }
+static ssize_t pvc_line_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int lineno = *(int *)PDE(file->f_path.dentry->d_inode)->data;
+ char kbuf[PVC_LINELEN];
+ size_t len;
+
+ BUG_ON(lineno < 0 || lineno > PVC_NLINES);
- if (count > PVC_LINELEN)
- count = PVC_LINELEN;
+ len = min(count, sizeof(kbuf) - 1);
+ if (copy_from_user(kbuf, buf, len))
+ return -EFAULT;
+ kbuf[len] = '\0';
- if (buffer[count-1] == '\n')
- count--;
+ if (len > 0 && kbuf[len - 1] == '\n')
+ len--;
mutex_lock(&pvc_mutex);
- strncpy(pvc_lines[lineno], buffer, count);
- pvc_lines[lineno][count] = '\0';
+ strncpy(pvc_lines[lineno], kbuf, len);
+ pvc_lines[lineno][len] = '\0';
mutex_unlock(&pvc_mutex);
tasklet_schedule(&pvc_display_tasklet);
- return origcount;
+ return count;
}
-static int pvc_proc_write_scroll(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static const struct file_operations pvc_line_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pvc_line_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pvc_line_proc_write,
+};
+
+static ssize_t pvc_scroll_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
- int origcount = count;
- int cmd = simple_strtol(buffer, NULL, 10);
+ char kbuf[42];
+ size_t len;
+ int cmd;
+
+ len = min(count, sizeof(kbuf) - 1);
+ if (copy_from_user(kbuf, buf, len))
+ return -EFAULT;
+ kbuf[len] = '\0';
+
+ cmd = simple_strtol(kbuf, NULL, 10);
mutex_lock(&pvc_mutex);
if (scroll_interval != 0)
@@ -110,22 +130,31 @@ static int pvc_proc_write_scroll(struct file *file, const char *buffer,
}
mutex_unlock(&pvc_mutex);
- return origcount;
+ return count;
}
-static int pvc_proc_read_scroll(char *page, char **start,
- off_t off, int count,
- int *eof, void *data)
+static int pvc_scroll_proc_show(struct seq_file *m, void *v)
{
- char *origpage = page;
-
mutex_lock(&pvc_mutex);
- page += sprintf(page, "%d\n", scroll_dir * scroll_interval);
+ seq_printf(m, "%d\n", scroll_dir * scroll_interval);
mutex_unlock(&pvc_mutex);
- return page - origpage;
+ return 0;
}
+static int pvc_scroll_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pvc_scroll_proc_show, NULL);
+}
+
+static const struct file_operations pvc_scroll_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pvc_scroll_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pvc_scroll_proc_write,
+};
void pvc_proc_timerfunc(unsigned long data)
{
@@ -163,22 +192,16 @@ static int __init pvc_proc_init(void)
pvc_linedata[i] = i;
}
for (i = 0; i < PVC_NLINES; i++) {
- proc_entry = create_proc_entry(pvc_linename[i], 0644,
- pvc_display_dir);
+ proc_entry = proc_create_data(pvc_linename[i], 0644, pvc_display_dir,
+ &pvc_line_proc_fops, &pvc_linedata[i]);
if (proc_entry == NULL)
goto error;
-
- proc_entry->read_proc = pvc_proc_read_line;
- proc_entry->write_proc = pvc_proc_write_line;
- proc_entry->data = &pvc_linedata[i];
}
- proc_entry = create_proc_entry("scroll", 0644, pvc_display_dir);
+ proc_entry = proc_create("scroll", 0644, pvc_display_dir,
+ &pvc_scroll_proc_fops);
if (proc_entry == NULL)
goto error;
- proc_entry->write_proc = pvc_proc_write_scroll;
- proc_entry->read_proc = pvc_proc_read_scroll;
-
init_timer(&timer);
timer.function = pvc_proc_timerfunc;
diff --git a/arch/mips/lasat/prom.c b/arch/mips/lasat/prom.c
index 6acc6cb85f0..20fde19a5fb 100644
--- a/arch/mips/lasat/prom.c
+++ b/arch/mips/lasat/prom.c
@@ -100,8 +100,8 @@ void __init prom_init(void)
/* Get the command line */
if (argc > 0) {
- strncpy(arcs_cmdline, argv[0], CL_SIZE-1);
- arcs_cmdline[CL_SIZE-1] = '\0';
+ strncpy(arcs_cmdline, argv[0], COMMAND_LINE_SIZE-1);
+ arcs_cmdline[COMMAND_LINE_SIZE-1] = '\0';
}
/* Set the I/O base address */
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index 14b9a28a4ae..d87ffd04cb0 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -204,7 +204,7 @@ static ctl_table lasat_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_lasat_prid,
-. },
+ },
#ifdef CONFIG_INET
{
.procname = "ipaddr",
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index d45092505fa..3df1967dea0 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -1,31 +1,85 @@
choice
- prompt "Machine Type"
- depends on MACH_LOONGSON
+ prompt "Machine Type"
+ depends on MACH_LOONGSON
config LEMOTE_FULOONG2E
- bool "Lemote Fuloong(2e) mini-PC"
- select ARCH_SPARSEMEM_ENABLE
- select CEVT_R4K
- select CSRC_R4K
- select SYS_HAS_CPU_LOONGSON2E
- select DMA_NONCOHERENT
- select BOOT_ELF32
- select BOARD_SCACHE
- select HW_HAS_PCI
- select I8259
- select ISA
- select IRQ_CPU
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_64BIT_KERNEL
- select SYS_SUPPORTS_LITTLE_ENDIAN
- select SYS_SUPPORTS_HIGHMEM
- select SYS_HAS_EARLY_PRINTK
- select GENERIC_HARDIRQS_NO__DO_IRQ
- select GENERIC_ISA_DMA_SUPPORT_BROKEN
- select CPU_HAS_WB
- help
- Lemote Fuloong(2e) mini-PC board based on the Chinese Loongson-2E CPU and
- an FPGA northbridge
-
- Lemote Fuloong(2e) mini PC have a VIA686B south bridge.
+ bool "Lemote Fuloong(2e) mini-PC"
+ select ARCH_SPARSEMEM_ENABLE
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYS_HAS_CPU_LOONGSON2E
+ select DMA_NONCOHERENT
+ select BOOT_ELF32
+ select BOARD_SCACHE
+ select HW_HAS_PCI
+ select I8259
+ select ISA
+ select IRQ_CPU
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_EARLY_PRINTK
+ select GENERIC_HARDIRQS_NO__DO_IRQ
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select CPU_HAS_WB
+ help
+ Lemote Fuloong(2e) mini-PC board based on the Chinese Loongson-2E CPU and
+ an FPGA northbridge
+
+ Lemote Fuloong(2e) mini PC have a VIA686B south bridge.
+
+config LEMOTE_MACH2F
+ bool "Lemote Loongson 2F family machines"
+ select ARCH_SPARSEMEM_ENABLE
+ select BOARD_SCACHE
+ select BOOT_ELF32
+ select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
+ select CPU_HAS_WB
+ select CS5536
+ select CSRC_R4K if ! MIPS_EXTERNAL_TIMER
+ select DMA_NONCOHERENT
+ select GENERIC_HARDIRQS_NO__DO_IRQ
+ select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select HW_HAS_PCI
+ select I8259
+ select IRQ_CPU
+ select ISA
+ select SYS_HAS_CPU_LOONGSON2F
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ help
+ Lemote Loongson 2F family machines utilize the 2F revision of
+ Loongson processor and the AMD CS5536 south bridge.
+
+ These family machines include fuloong2f mini PC, yeeloong2f notebook,
+ LingLoong allinone PC and so forth.
endchoice
+
+config CS5536
+ bool
+
+config CS5536_MFGPT
+ bool "CS5536 MFGPT Timer"
+ depends on CS5536
+ select MIPS_EXTERNAL_TIMER
+ help
+ This option enables the mfgpt0 timer of AMD CS5536.
+
+ If you want to enable the Loongson2 CPUFreq Driver, Please enable
+ this option at first, otherwise, You will get wrong system time.
+
+ If unsure, say Yes.
+
+config LOONGSON_SUSPEND
+ bool
+ default y
+ depends on CPU_SUPPORTS_CPUFREQ && SUSPEND
+
+config LOONGSON_UART_BASE
+ bool
+ default y
+ depends on EARLY_PRINTK || SERIAL_8250
diff --git a/arch/mips/loongson/Makefile b/arch/mips/loongson/Makefile
index 39048c455d7..2b76cb0fb07 100644
--- a/arch/mips/loongson/Makefile
+++ b/arch/mips/loongson/Makefile
@@ -9,3 +9,9 @@ obj-$(CONFIG_MACH_LOONGSON) += common/
#
obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
+
+#
+# Lemote loongson2f family machines
+#
+
+obj-$(CONFIG_LEMOTE_MACH2F) += lemote-2f/
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index 656b3cc0a2a..7668c4de115 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -3,9 +3,23 @@
#
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
- pci.o bonito-irq.o mem.o machtype.o
+ pci.o bonito-irq.o mem.o machtype.o platform.o
#
-# Early printk support
+# Serial port support
#
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_SERIAL_8250) += serial.o
+obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
+
+#
+# Enable CS5536 Virtual Support Module(VSM) to virtulize the PCI configure
+# space
+#
+obj-$(CONFIG_CS5536) += cs5536/
+
+#
+# Suspend Support
+#
+
+obj-$(CONFIG_LOONGSON_SUSPEND) += pm.o
diff --git a/arch/mips/loongson/common/bonito-irq.c b/arch/mips/loongson/common/bonito-irq.c
index 3e31e7ad713..2dc2a4cc632 100644
--- a/arch/mips/loongson/common/bonito-irq.c
+++ b/arch/mips/loongson/common/bonito-irq.c
@@ -12,18 +12,19 @@
* option) any later version.
*/
#include <linux/interrupt.h>
+#include <linux/compiler.h>
#include <loongson.h>
static inline void bonito_irq_enable(unsigned int irq)
{
- BONITO_INTENSET = (1 << (irq - BONITO_IRQ_BASE));
+ LOONGSON_INTENSET = (1 << (irq - LOONGSON_IRQ_BASE));
mmiowb();
}
static inline void bonito_irq_disable(unsigned int irq)
{
- BONITO_INTENCLR = (1 << (irq - BONITO_IRQ_BASE));
+ LOONGSON_INTENCLR = (1 << (irq - LOONGSON_IRQ_BASE));
mmiowb();
}
@@ -35,7 +36,7 @@ static struct irq_chip bonito_irq_type = {
.unmask = bonito_irq_enable,
};
-static struct irqaction dma_timeout_irqaction = {
+static struct irqaction __maybe_unused dma_timeout_irqaction = {
.handler = no_action,
.name = "dma_timeout",
};
@@ -44,8 +45,10 @@ void bonito_irq_init(void)
{
u32 i;
- for (i = BONITO_IRQ_BASE; i < BONITO_IRQ_BASE + 32; i++)
+ for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++)
set_irq_chip_and_handler(i, &bonito_irq_type, handle_level_irq);
- setup_irq(BONITO_IRQ_BASE + 10, &dma_timeout_irqaction);
+#ifdef CONFIG_CPU_LOONGSON2E
+ setup_irq(LOONGSON_IRQ_BASE + 10, &dma_timeout_irqaction);
+#endif
}
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c
index 75f1b243ee4..7ad47f22747 100644
--- a/arch/mips/loongson/common/cmdline.c
+++ b/arch/mips/loongson/common/cmdline.c
@@ -9,7 +9,7 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * Copyright (C) 2009 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzj@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
@@ -49,4 +49,6 @@ void __init prom_init_cmdline(void)
strcat(arcs_cmdline, " console=ttyS0,115200");
if ((strstr(arcs_cmdline, "root=")) == NULL)
strcat(arcs_cmdline, " root=/dev/hda1");
+
+ prom_init_machtype();
}
diff --git a/arch/mips/loongson/common/cs5536/Makefile b/arch/mips/loongson/common/cs5536/Makefile
new file mode 100644
index 00000000000..510d4cdc237
--- /dev/null
+++ b/arch/mips/loongson/common/cs5536/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for CS5536 support.
+#
+
+obj-$(CONFIG_CS5536) += cs5536_pci.o cs5536_ide.o cs5536_acc.o cs5536_ohci.o \
+ cs5536_isa.o cs5536_ehci.o
+
+#
+# Enable cs5536 mfgpt Timer
+#
+obj-$(CONFIG_CS5536_MFGPT) += cs5536_mfgpt.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/loongson/common/cs5536/cs5536_acc.c b/arch/mips/loongson/common/cs5536/cs5536_acc.c
new file mode 100644
index 00000000000..b49485f187e
--- /dev/null
+++ b/arch/mips/loongson/common/cs5536/cs5536_acc.c
@@ -0,0 +1,140 @@
+/*
+ * the ACC Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+void pci_acc_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
+ if (value & PCI_COMMAND_MASTER)
+ lo |= (0x03 << 8);
+ else
+ lo &= ~(0x03 << 8);
+ _wrmsr(GLIU_MSR_REG(GLIU_PAE), hi, lo);
+ break;
+ case PCI_STATUS:
+ if (value & PCI_STATUS_PARITY) {
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG) {
+ lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+ }
+ break;
+ case PCI_BAR0_REG:
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= SOFT_BAR_ACC_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if (value & 0x01) {
+ value &= 0xfffffffc;
+ hi = 0xA0000000 | ((value & 0x000ff000) >> 12);
+ lo = 0x000fff80 | ((value & 0x00000fff) << 20);
+ _wrmsr(GLIU_MSR_REG(GLIU_IOD_BM1), hi, lo);
+ }
+ break;
+ case PCI_ACC_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
+ /* disable all the usb interrupt in PIC */
+ lo &= ~(0xf << PIC_YSEL_LOW_ACC_SHIFT);
+ if (value) /* enable all the acc interrupt in PIC */
+ lo |= (CS5536_ACC_INTR << PIC_YSEL_LOW_ACC_SHIFT);
+ _wrmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), hi, lo);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 pci_acc_read_reg(int reg)
+{
+ u32 hi, lo;
+ u32 conf_data = 0;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_ACC_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ _rdmsr(GLIU_MSR_REG(GLIU_IOD_BM1), &hi, &lo);
+ if (((lo & 0xfff00000) || (hi & 0x000000ff))
+ && ((hi & 0xf0000000) == 0xa0000000))
+ conf_data |= PCI_COMMAND_IO;
+ _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
+ if ((lo & 0x300) == 0x300)
+ conf_data |= PCI_COMMAND_MASTER;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_FAST_BACK;
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_PARITY;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(ACC_MSR_REG(ACC_CAP), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_ACC_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ conf_data =
+ CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
+ PCI_NORMAL_LATENCY_TIMER);
+ break;
+ case PCI_BAR0_REG:
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & SOFT_BAR_ACC_FLAG) {
+ conf_data = CS5536_ACC_RANGE |
+ PCI_BASE_ADDRESS_SPACE_IO;
+ lo &= ~SOFT_BAR_ACC_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(GLIU_MSR_REG(GLIU_IOD_BM1), &hi, &lo);
+ conf_data = (hi & 0x000000ff) << 12;
+ conf_data |= (lo & 0xfff00000) >> 20;
+ conf_data |= 0x01;
+ conf_data &= ~0x02;
+ }
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_ACC_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_USB_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ conf_data =
+ CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_ACC_INTR);
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ehci.c b/arch/mips/loongson/common/cs5536/cs5536_ehci.c
new file mode 100644
index 00000000000..74f9c59d36a
--- /dev/null
+++ b/arch/mips/loongson/common/cs5536/cs5536_ehci.c
@@ -0,0 +1,158 @@
+/*
+ * the EHCI Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+void pci_ehci_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ if (value & PCI_COMMAND_MASTER)
+ hi |= PCI_COMMAND_MASTER;
+ else
+ hi &= ~PCI_COMMAND_MASTER;
+
+ if (value & PCI_COMMAND_MEMORY)
+ hi |= PCI_COMMAND_MEMORY;
+ else
+ hi &= ~PCI_COMMAND_MEMORY;
+ _wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
+ break;
+ case PCI_STATUS:
+ if (value & PCI_STATUS_PARITY) {
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG) {
+ lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+ }
+ break;
+ case PCI_BAR0_REG:
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= SOFT_BAR_EHCI_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if ((value & 0x01) == 0x00) {
+ _wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
+
+ value &= 0xfffffff0;
+ hi = 0x40000000 | ((value & 0xff000000) >> 24);
+ lo = 0x000fffff | ((value & 0x00fff000) << 8);
+ _wrmsr(GLIU_MSR_REG(GLIU_P2D_BM4), hi, lo);
+ }
+ break;
+ case PCI_EHCI_LEGSMIEN_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ hi &= 0x003f0000;
+ hi |= (value & 0x3f) << 16;
+ _wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
+ break;
+ case PCI_EHCI_FLADJ_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ hi &= ~0x00003f00;
+ hi |= value & 0x00003f00;
+ _wrmsr(USB_MSR_REG(USB_EHCI), hi, lo);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 pci_ehci_read_reg(int reg)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_EHCI_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ if (hi & PCI_COMMAND_MASTER)
+ conf_data |= PCI_COMMAND_MASTER;
+ if (hi & PCI_COMMAND_MEMORY)
+ conf_data |= PCI_COMMAND_MEMORY;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_FAST_BACK;
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_PARITY;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(USB_MSR_REG(USB_CAP), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_EHCI_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ conf_data =
+ CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
+ PCI_NORMAL_LATENCY_TIMER);
+ break;
+ case PCI_BAR0_REG:
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & SOFT_BAR_EHCI_FLAG) {
+ conf_data = CS5536_EHCI_RANGE |
+ PCI_BASE_ADDRESS_SPACE_MEMORY;
+ lo &= ~SOFT_BAR_EHCI_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ conf_data = lo & 0xfffff000;
+ }
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_EHCI_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_USB_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ conf_data =
+ CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_USB_INTR);
+ break;
+ case PCI_EHCI_LEGSMIEN_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ conf_data = (hi & 0x003f0000) >> 16;
+ break;
+ case PCI_EHCI_LEGSMISTS_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ conf_data = (hi & 0x3f000000) >> 24;
+ break;
+ case PCI_EHCI_FLADJ_REG:
+ _rdmsr(USB_MSR_REG(USB_EHCI), &hi, &lo);
+ conf_data = hi & 0x00003f00;
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ide.c b/arch/mips/loongson/common/cs5536/cs5536_ide.c
new file mode 100644
index 00000000000..3f61594b388
--- /dev/null
+++ b/arch/mips/loongson/common/cs5536/cs5536_ide.c
@@ -0,0 +1,179 @@
+/*
+ * the IDE Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+void pci_ide_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
+ if (value & PCI_COMMAND_MASTER)
+ lo |= (0x03 << 4);
+ else
+ lo &= ~(0x03 << 4);
+ _wrmsr(GLIU_MSR_REG(GLIU_PAE), hi, lo);
+ break;
+ case PCI_STATUS:
+ if (value & PCI_STATUS_PARITY) {
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG) {
+ lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+ }
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ value &= 0x0000ff00;
+ _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
+ hi &= 0xffffff00;
+ hi |= (value >> 8);
+ _wrmsr(SB_MSR_REG(SB_CTRL), hi, lo);
+ break;
+ case PCI_BAR4_REG:
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= SOFT_BAR_IDE_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if (value & 0x01) {
+ lo = (value & 0xfffffff0) | 0x1;
+ _wrmsr(IDE_MSR_REG(IDE_IO_BAR), hi, lo);
+
+ value &= 0xfffffffc;
+ hi = 0x60000000 | ((value & 0x000ff000) >> 12);
+ lo = 0x000ffff0 | ((value & 0x00000fff) << 20);
+ _wrmsr(GLIU_MSR_REG(GLIU_IOD_BM2), hi, lo);
+ }
+ break;
+ case PCI_IDE_CFG_REG:
+ if (value == CS5536_IDE_FLASH_SIGNATURE) {
+ _rdmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), &hi, &lo);
+ lo |= 0x01;
+ _wrmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), hi, lo);
+ } else
+ _wrmsr(IDE_MSR_REG(IDE_CFG), hi, lo);
+ break;
+ case PCI_IDE_DTC_REG:
+ _wrmsr(IDE_MSR_REG(IDE_DTC), hi, lo);
+ break;
+ case PCI_IDE_CAST_REG:
+ _wrmsr(IDE_MSR_REG(IDE_CAST), hi, lo);
+ break;
+ case PCI_IDE_ETC_REG:
+ _wrmsr(IDE_MSR_REG(IDE_ETC), hi, lo);
+ break;
+ case PCI_IDE_PM_REG:
+ _wrmsr(IDE_MSR_REG(IDE_INTERNAL_PM), hi, lo);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 pci_ide_read_reg(int reg)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_IDE_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
+ if (lo & 0xfffffff0)
+ conf_data |= PCI_COMMAND_IO;
+ _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo);
+ if ((lo & 0x30) == 0x30)
+ conf_data |= PCI_COMMAND_MASTER;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_FAST_BACK;
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_PARITY;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(IDE_MSR_REG(IDE_CAP), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_IDE_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
+ hi &= 0x000000f8;
+ conf_data = CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE, hi);
+ break;
+ case PCI_BAR4_REG:
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & SOFT_BAR_IDE_FLAG) {
+ conf_data = CS5536_IDE_RANGE |
+ PCI_BASE_ADDRESS_SPACE_IO;
+ lo &= ~SOFT_BAR_IDE_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo);
+ conf_data = lo & 0xfffffff0;
+ conf_data |= 0x01;
+ conf_data &= ~0x02;
+ }
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_IDE_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ conf_data =
+ CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_IDE_INTR);
+ break;
+ case PCI_IDE_CFG_REG:
+ _rdmsr(IDE_MSR_REG(IDE_CFG), &hi, &lo);
+ conf_data = lo;
+ break;
+ case PCI_IDE_DTC_REG:
+ _rdmsr(IDE_MSR_REG(IDE_DTC), &hi, &lo);
+ conf_data = lo;
+ break;
+ case PCI_IDE_CAST_REG:
+ _rdmsr(IDE_MSR_REG(IDE_CAST), &hi, &lo);
+ conf_data = lo;
+ break;
+ case PCI_IDE_ETC_REG:
+ _rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo);
+ conf_data = lo;
+ case PCI_IDE_PM_REG:
+ _rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo);
+ conf_data = lo;
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson/common/cs5536/cs5536_isa.c b/arch/mips/loongson/common/cs5536/cs5536_isa.c
new file mode 100644
index 00000000000..b6f17f538e4
--- /dev/null
+++ b/arch/mips/loongson/common/cs5536/cs5536_isa.c
@@ -0,0 +1,316 @@
+/*
+ * the ISA Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+/* common variables for PCI_ISA_READ/WRITE_BAR */
+static const u32 divil_msr_reg[6] = {
+ DIVIL_MSR_REG(DIVIL_LBAR_SMB), DIVIL_MSR_REG(DIVIL_LBAR_GPIO),
+ DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), DIVIL_MSR_REG(DIVIL_LBAR_IRQ),
+ DIVIL_MSR_REG(DIVIL_LBAR_PMS), DIVIL_MSR_REG(DIVIL_LBAR_ACPI),
+};
+
+static const u32 soft_bar_flag[6] = {
+ SOFT_BAR_SMB_FLAG, SOFT_BAR_GPIO_FLAG, SOFT_BAR_MFGPT_FLAG,
+ SOFT_BAR_IRQ_FLAG, SOFT_BAR_PMS_FLAG, SOFT_BAR_ACPI_FLAG,
+};
+
+static const u32 sb_msr_reg[6] = {
+ SB_MSR_REG(SB_R0), SB_MSR_REG(SB_R1), SB_MSR_REG(SB_R2),
+ SB_MSR_REG(SB_R3), SB_MSR_REG(SB_R4), SB_MSR_REG(SB_R5),
+};
+
+static const u32 bar_space_range[6] = {
+ CS5536_SMB_RANGE, CS5536_GPIO_RANGE, CS5536_MFGPT_RANGE,
+ CS5536_IRQ_RANGE, CS5536_PMS_RANGE, CS5536_ACPI_RANGE,
+};
+
+static const int bar_space_len[6] = {
+ CS5536_SMB_LENGTH, CS5536_GPIO_LENGTH, CS5536_MFGPT_LENGTH,
+ CS5536_IRQ_LENGTH, CS5536_PMS_LENGTH, CS5536_ACPI_LENGTH,
+};
+
+/*
+ * enable the divil module bar space.
+ *
+ * For all the DIVIL module LBAR, you should control the DIVIL LBAR reg
+ * and the RCONFx(0~5) reg to use the modules.
+ */
+static void divil_lbar_enable(void)
+{
+ u32 hi, lo;
+ int offset;
+
+ /*
+ * The DIVIL IRQ is not used yet. and make the RCONF0 reserved.
+ */
+
+ for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) {
+ _rdmsr(DIVIL_MSR_REG(offset), &hi, &lo);
+ hi |= 0x01;
+ _wrmsr(DIVIL_MSR_REG(DIVIL_LBAR_SMB), hi, lo);
+ }
+}
+
+/*
+ * disable the divil module bar space.
+ */
+static void divil_lbar_disable(void)
+{
+ u32 hi, lo;
+ int offset;
+
+ for (offset = DIVIL_LBAR_SMB; offset <= DIVIL_LBAR_PMS; offset++) {
+ _rdmsr(DIVIL_MSR_REG(offset), &hi, &lo);
+ hi &= ~0x01;
+ _wrmsr(DIVIL_MSR_REG(DIVIL_LBAR_SMB), hi, lo);
+ }
+}
+
+/*
+ * BAR write: write value to the n BAR
+ */
+
+void pci_isa_write_bar(int n, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= soft_bar_flag[n];
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if (value & 0x01) {
+ /* NATIVE reg */
+ hi = 0x0000f001;
+ lo &= bar_space_range[n];
+ _wrmsr(divil_msr_reg[n], hi, lo);
+
+ /* RCONFx is 4bytes in units for I/O space */
+ hi = ((value & 0x000ffffc) << 12) |
+ ((bar_space_len[n] - 4) << 12) | 0x01;
+ lo = ((value & 0x000ffffc) << 12) | 0x01;
+ _wrmsr(sb_msr_reg[n], hi, lo);
+ }
+}
+
+/*
+ * BAR read: read the n BAR
+ */
+
+u32 pci_isa_read_bar(int n)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & soft_bar_flag[n]) {
+ conf_data = bar_space_range[n] | PCI_BASE_ADDRESS_SPACE_IO;
+ lo &= ~soft_bar_flag[n];
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(divil_msr_reg[n], &hi, &lo);
+ conf_data = lo & bar_space_range[n];
+ conf_data |= 0x01;
+ conf_data &= ~0x02;
+ }
+ return conf_data;
+}
+
+/*
+ * isa_write: ISA write transfer
+ *
+ * We assume that this is not a bus master transfer.
+ */
+void pci_isa_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+ u32 temp;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ if (value & PCI_COMMAND_IO)
+ divil_lbar_enable();
+ else
+ divil_lbar_disable();
+ break;
+ case PCI_STATUS:
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ temp = lo & 0x0000ffff;
+ if ((value & PCI_STATUS_SIG_TARGET_ABORT) &&
+ (lo & SB_TAS_ERR_EN))
+ temp |= SB_TAS_ERR_FLAG;
+
+ if ((value & PCI_STATUS_REC_TARGET_ABORT) &&
+ (lo & SB_TAR_ERR_EN))
+ temp |= SB_TAR_ERR_FLAG;
+
+ if ((value & PCI_STATUS_REC_MASTER_ABORT)
+ && (lo & SB_MAR_ERR_EN))
+ temp |= SB_MAR_ERR_FLAG;
+
+ if ((value & PCI_STATUS_DETECTED_PARITY)
+ && (lo & SB_PARE_ERR_EN))
+ temp |= SB_PARE_ERR_FLAG;
+
+ lo = temp;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ value &= 0x0000ff00;
+ _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
+ hi &= 0xffffff00;
+ hi |= (value >> 8);
+ _wrmsr(SB_MSR_REG(SB_CTRL), hi, lo);
+ break;
+ case PCI_BAR0_REG:
+ pci_isa_write_bar(0, value);
+ break;
+ case PCI_BAR1_REG:
+ pci_isa_write_bar(1, value);
+ break;
+ case PCI_BAR2_REG:
+ pci_isa_write_bar(2, value);
+ break;
+ case PCI_BAR3_REG:
+ pci_isa_write_bar(3, value);
+ break;
+ case PCI_BAR4_REG:
+ pci_isa_write_bar(4, value);
+ break;
+ case PCI_BAR5_REG:
+ pci_isa_write_bar(5, value);
+ break;
+ case PCI_UART1_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), &hi, &lo);
+ /* disable uart1 interrupt in PIC */
+ lo &= ~(0xf << 24);
+ if (value) /* enable uart1 interrupt in PIC */
+ lo |= (CS5536_UART1_INTR << 24);
+ _wrmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), hi, lo);
+ break;
+ case PCI_UART2_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), &hi, &lo);
+ /* disable uart2 interrupt in PIC */
+ lo &= ~(0xf << 28);
+ if (value) /* enable uart2 interrupt in PIC */
+ lo |= (CS5536_UART2_INTR << 28);
+ _wrmsr(DIVIL_MSR_REG(PIC_YSEL_HIGH), hi, lo);
+ break;
+ case PCI_ISA_FIXUP_REG:
+ if (value) {
+ /* enable the TARGET ABORT/MASTER ABORT etc. */
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ lo |= 0x00000063;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+
+ default:
+ /* ALL OTHER PCI CONFIG SPACE HEADER IS NOT IMPLEMENTED. */
+ break;
+ }
+}
+
+/*
+ * isa_read: ISA read transfers
+ *
+ * We assume that this is not a bus master transfer.
+ */
+u32 pci_isa_read_reg(int reg)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_ISA_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ /* we just check the first LBAR for the IO enable bit, */
+ /* maybe we should changed later. */
+ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_SMB), &hi, &lo);
+ if (hi & 0x01)
+ conf_data |= PCI_COMMAND_IO;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ conf_data |= PCI_STATUS_FAST_BACK;
+
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_TAS_ERR_FLAG)
+ conf_data |= PCI_STATUS_SIG_TARGET_ABORT;
+ if (lo & SB_TAR_ERR_FLAG)
+ conf_data |= PCI_STATUS_REC_TARGET_ABORT;
+ if (lo & SB_MAR_ERR_FLAG)
+ conf_data |= PCI_STATUS_REC_MASTER_ABORT;
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_DETECTED_PARITY;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(GLCP_MSR_REG(GLCP_CHIP_REV_ID), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_ISA_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo);
+ hi &= 0x000000f8;
+ conf_data = CFG_PCI_CACHE_LINE_SIZE(PCI_BRIDGE_HEADER_TYPE, hi);
+ break;
+ /*
+ * we only use the LBAR of DIVIL, no RCONF used.
+ * all of them are IO space.
+ */
+ case PCI_BAR0_REG:
+ return pci_isa_read_bar(0);
+ break;
+ case PCI_BAR1_REG:
+ return pci_isa_read_bar(1);
+ break;
+ case PCI_BAR2_REG:
+ return pci_isa_read_bar(2);
+ break;
+ case PCI_BAR3_REG:
+ break;
+ case PCI_BAR4_REG:
+ return pci_isa_read_bar(4);
+ break;
+ case PCI_BAR5_REG:
+ return pci_isa_read_bar(5);
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_ISA_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ /* no interrupt used here */
+ conf_data = CFG_PCI_INTERRUPT_LINE(0x00, 0x00);
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
new file mode 100644
index 00000000000..6cb44dbaeec
--- /dev/null
+++ b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
@@ -0,0 +1,217 @@
+/*
+ * CS5536 General timer functions
+ *
+ * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Author: Yanhua, yanh@lemote.com
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu zhangjin, wuzj@lemote.com
+ *
+ * Reference: AMD Geode(TM) CS5536 Companion Device Data Book
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+
+#include <asm/time.h>
+
+#include <cs5536/cs5536_mfgpt.h>
+
+DEFINE_SPINLOCK(mfgpt_lock);
+EXPORT_SYMBOL(mfgpt_lock);
+
+static u32 mfgpt_base;
+
+/*
+ * Initialize the MFGPT timer.
+ *
+ * This is also called after resume to bring the MFGPT into operation again.
+ */
+
+/* disable counter */
+void disable_mfgpt0_counter(void)
+{
+ outw(inw(MFGPT0_SETUP) & 0x7fff, MFGPT0_SETUP);
+}
+EXPORT_SYMBOL(disable_mfgpt0_counter);
+
+/* enable counter, comparator2 to event mode, 14.318MHz clock */
+void enable_mfgpt0_counter(void)
+{
+ outw(0xe310, MFGPT0_SETUP);
+}
+EXPORT_SYMBOL(enable_mfgpt0_counter);
+
+static void init_mfgpt_timer(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ spin_lock(&mfgpt_lock);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ outw(COMPARE, MFGPT0_CMP2); /* set comparator2 */
+ outw(0, MFGPT0_CNT); /* set counter to 0 */
+ enable_mfgpt0_counter();
+ break;
+
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
+ evt->mode == CLOCK_EVT_MODE_ONESHOT)
+ disable_mfgpt0_counter();
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* The oneshot mode have very high deviation, Not use it! */
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ /* Nothing to do here */
+ break;
+ }
+ spin_unlock(&mfgpt_lock);
+}
+
+static struct clock_event_device mfgpt_clockevent = {
+ .name = "mfgpt",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_mode = init_mfgpt_timer,
+ .irq = CS5536_MFGPT_INTR,
+};
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ u32 basehi;
+
+ /*
+ * get MFGPT base address
+ *
+ * NOTE: do not remove me, it's need for the value of mfgpt_base is
+ * variable
+ */
+ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base);
+
+ /* ack */
+ outw(inw(MFGPT0_SETUP) | 0x4000, MFGPT0_SETUP);
+
+ mfgpt_clockevent.event_handler(&mfgpt_clockevent);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq5 = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
+ .name = "timer"
+};
+
+/*
+ * Initialize the conversion factor and the min/max deltas of the clock event
+ * structure and register the clock event source with the framework.
+ */
+void __init setup_mfgpt0_timer(void)
+{
+ u32 basehi;
+ struct clock_event_device *cd = &mfgpt_clockevent;
+ unsigned int cpu = smp_processor_id();
+
+ cd->cpumask = cpumask_of(cpu);
+ clockevent_set_clock(cd, MFGPT_TICK_RATE);
+ cd->max_delta_ns = clockevent_delta2ns(0xffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
+
+ /* Enable MFGPT0 Comparator 2 Output to the Interrupt Mapper */
+ _wrmsr(DIVIL_MSR_REG(MFGPT_IRQ), 0, 0x100);
+
+ /* Enable Interrupt Gate 5 */
+ _wrmsr(DIVIL_MSR_REG(PIC_ZSEL_LOW), 0, 0x50000);
+
+ /* get MFGPT base address */
+ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_MFGPT), &basehi, &mfgpt_base);
+
+ clockevents_register_device(cd);
+
+ setup_irq(CS5536_MFGPT_INTR, &irq5);
+}
+
+/*
+ * Since the MFGPT overflows every tick, its not very useful
+ * to just read by itself. So use jiffies to emulate a free
+ * running counter:
+ */
+static cycle_t mfgpt_read(struct clocksource *cs)
+{
+ unsigned long flags;
+ int count;
+ u32 jifs;
+ static int old_count;
+ static u32 old_jifs;
+
+ spin_lock_irqsave(&mfgpt_lock, flags);
+ /*
+ * Although our caller may have the read side of xtime_lock,
+ * this is now a seqlock, and we are cheating in this routine
+ * by having side effects on state that we cannot undo if
+ * there is a collision on the seqlock and our caller has to
+ * retry. (Namely, old_jifs and old_count.) So we must treat
+ * jiffies as volatile despite the lock. We read jiffies
+ * before latching the timer count to guarantee that although
+ * the jiffies value might be older than the count (that is,
+ * the counter may underflow between the last point where
+ * jiffies was incremented and the point where we latch the
+ * count), it cannot be newer.
+ */
+ jifs = jiffies;
+ /* read the count */
+ count = inw(MFGPT0_CNT);
+
+ /*
+ * It's possible for count to appear to go the wrong way for this
+ * reason:
+ *
+ * The timer counter underflows, but we haven't handled the resulting
+ * interrupt and incremented jiffies yet.
+ *
+ * Previous attempts to handle these cases intelligently were buggy, so
+ * we just do the simple thing now.
+ */
+ if (count < old_count && jifs == old_jifs)
+ count = old_count;
+
+ old_count = count;
+ old_jifs = jifs;
+
+ spin_unlock_irqrestore(&mfgpt_lock, flags);
+
+ return (cycle_t) (jifs * COMPARE) + count;
+}
+
+static struct clocksource clocksource_mfgpt = {
+ .name = "mfgpt",
+ .rating = 120, /* Functional for real use, but not desired */
+ .read = mfgpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .mult = 0,
+ .shift = 22,
+};
+
+int __init init_mfgpt_clocksource(void)
+{
+ if (num_possible_cpus() > 1) /* MFGPT does not scale! */
+ return 0;
+
+ clocksource_mfgpt.mult = clocksource_hz2mult(MFGPT_TICK_RATE, 22);
+ return clocksource_register(&clocksource_mfgpt);
+}
+
+arch_initcall(init_mfgpt_clocksource);
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ohci.c b/arch/mips/loongson/common/cs5536/cs5536_ohci.c
new file mode 100644
index 00000000000..8fdb02b6e90
--- /dev/null
+++ b/arch/mips/loongson/common/cs5536/cs5536_ohci.c
@@ -0,0 +1,147 @@
+/*
+ * the OHCI Virtual Support Module of AMD CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+void pci_ohci_write_reg(int reg, u32 value)
+{
+ u32 hi = 0, lo = value;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ _rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
+ if (value & PCI_COMMAND_MASTER)
+ hi |= PCI_COMMAND_MASTER;
+ else
+ hi &= ~PCI_COMMAND_MASTER;
+
+ if (value & PCI_COMMAND_MEMORY)
+ hi |= PCI_COMMAND_MEMORY;
+ else
+ hi &= ~PCI_COMMAND_MEMORY;
+ _wrmsr(USB_MSR_REG(USB_OHCI), hi, lo);
+ break;
+ case PCI_STATUS:
+ if (value & PCI_STATUS_PARITY) {
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG) {
+ lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG;
+ _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo);
+ }
+ }
+ break;
+ case PCI_BAR0_REG:
+ if (value == PCI_BAR_RANGE_MASK) {
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ lo |= SOFT_BAR_OHCI_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else if ((value & 0x01) == 0x00) {
+ _wrmsr(USB_MSR_REG(USB_OHCI), hi, lo);
+
+ value &= 0xfffffff0;
+ hi = 0x40000000 | ((value & 0xff000000) >> 24);
+ lo = 0x000fffff | ((value & 0x00fff000) << 8);
+ _wrmsr(GLIU_MSR_REG(GLIU_P2D_BM3), hi, lo);
+ }
+ break;
+ case PCI_OHCI_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
+ lo &= ~(0xf << PIC_YSEL_LOW_USB_SHIFT);
+ if (value) /* enable all the usb interrupt in PIC */
+ lo |= (CS5536_USB_INTR << PIC_YSEL_LOW_USB_SHIFT);
+ _wrmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), hi, lo);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 pci_ohci_read_reg(int reg)
+{
+ u32 conf_data = 0;
+ u32 hi, lo;
+
+ switch (reg) {
+ case PCI_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_OHCI_DEVICE_ID, CS5536_VENDOR_ID);
+ break;
+ case PCI_COMMAND:
+ _rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
+ if (hi & PCI_COMMAND_MASTER)
+ conf_data |= PCI_COMMAND_MASTER;
+ if (hi & PCI_COMMAND_MEMORY)
+ conf_data |= PCI_COMMAND_MEMORY;
+ break;
+ case PCI_STATUS:
+ conf_data |= PCI_STATUS_66MHZ;
+ conf_data |= PCI_STATUS_FAST_BACK;
+ _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo);
+ if (lo & SB_PARE_ERR_FLAG)
+ conf_data |= PCI_STATUS_PARITY;
+ conf_data |= PCI_STATUS_DEVSEL_MEDIUM;
+ break;
+ case PCI_CLASS_REVISION:
+ _rdmsr(USB_MSR_REG(USB_CAP), &hi, &lo);
+ conf_data = lo & 0x000000ff;
+ conf_data |= (CS5536_OHCI_CLASS_CODE << 8);
+ break;
+ case PCI_CACHE_LINE_SIZE:
+ conf_data =
+ CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE,
+ PCI_NORMAL_LATENCY_TIMER);
+ break;
+ case PCI_BAR0_REG:
+ _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo);
+ if (lo & SOFT_BAR_OHCI_FLAG) {
+ conf_data = CS5536_OHCI_RANGE |
+ PCI_BASE_ADDRESS_SPACE_MEMORY;
+ lo &= ~SOFT_BAR_OHCI_FLAG;
+ _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo);
+ } else {
+ _rdmsr(USB_MSR_REG(USB_OHCI), &hi, &lo);
+ conf_data = lo & 0xffffff00;
+ conf_data &= ~0x0000000f; /* 32bit mem */
+ }
+ break;
+ case PCI_CARDBUS_CIS:
+ conf_data = PCI_CARDBUS_CIS_POINTER;
+ break;
+ case PCI_SUBSYSTEM_VENDOR_ID:
+ conf_data =
+ CFG_PCI_VENDOR_ID(CS5536_OHCI_SUB_ID, CS5536_SUB_VENDOR_ID);
+ break;
+ case PCI_ROM_ADDRESS:
+ conf_data = PCI_EXPANSION_ROM_BAR;
+ break;
+ case PCI_CAPABILITY_LIST:
+ conf_data = PCI_CAPLIST_USB_POINTER;
+ break;
+ case PCI_INTERRUPT_LINE:
+ conf_data =
+ CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_USB_INTR);
+ break;
+ case PCI_OHCI_INT_REG:
+ _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
+ if ((lo & 0x00000f00) == CS5536_USB_INTR)
+ conf_data = 1;
+ break;
+ default:
+ break;
+ }
+
+ return conf_data;
+}
diff --git a/arch/mips/loongson/common/cs5536/cs5536_pci.c b/arch/mips/loongson/common/cs5536/cs5536_pci.c
new file mode 100644
index 00000000000..e23f3d7d2c1
--- /dev/null
+++ b/arch/mips/loongson/common/cs5536/cs5536_pci.c
@@ -0,0 +1,87 @@
+/*
+ * read/write operation to the PCI config space of CS5536
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author : jlliu, liujl@lemote.com
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * the Virtual Support Module(VSM) for virtulizing the PCI
+ * configure space are defined in cs5536_modulename.c respectively,
+ *
+ * after this virtulizing, user can access the PCI configure space
+ * directly as a normal multi-function PCI device which follows
+ * the PCI-2.2 spec.
+ */
+
+#include <linux/types.h>
+#include <cs5536/cs5536_vsm.h>
+
+enum {
+ CS5536_FUNC_START = -1,
+ CS5536_ISA_FUNC,
+ reserved_func,
+ CS5536_IDE_FUNC,
+ CS5536_ACC_FUNC,
+ CS5536_OHCI_FUNC,
+ CS5536_EHCI_FUNC,
+ CS5536_FUNC_END,
+};
+
+static const cs5536_pci_vsm_write vsm_conf_write[] = {
+ [CS5536_ISA_FUNC] pci_isa_write_reg,
+ [reserved_func] NULL,
+ [CS5536_IDE_FUNC] pci_ide_write_reg,
+ [CS5536_ACC_FUNC] pci_acc_write_reg,
+ [CS5536_OHCI_FUNC] pci_ohci_write_reg,
+ [CS5536_EHCI_FUNC] pci_ehci_write_reg,
+};
+
+static const cs5536_pci_vsm_read vsm_conf_read[] = {
+ [CS5536_ISA_FUNC] pci_isa_read_reg,
+ [reserved_func] NULL,
+ [CS5536_IDE_FUNC] pci_ide_read_reg,
+ [CS5536_ACC_FUNC] pci_acc_read_reg,
+ [CS5536_OHCI_FUNC] pci_ohci_read_reg,
+ [CS5536_EHCI_FUNC] pci_ehci_read_reg,
+};
+
+/*
+ * write to PCI config space and transfer it to MSR write.
+ */
+void cs5536_pci_conf_write4(int function, int reg, u32 value)
+{
+ if ((function <= CS5536_FUNC_START) || (function >= CS5536_FUNC_END))
+ return;
+ if ((reg < 0) || (reg > 0x100) || ((reg & 0x03) != 0))
+ return;
+
+ if (vsm_conf_write[function] != NULL)
+ vsm_conf_write[function](reg, value);
+}
+
+/*
+ * read PCI config space and transfer it to MSR access.
+ */
+u32 cs5536_pci_conf_read4(int function, int reg)
+{
+ u32 data = 0;
+
+ if ((function <= CS5536_FUNC_START) || (function >= CS5536_FUNC_END))
+ return 0;
+ if ((reg < 0) || ((reg & 0x03) != 0))
+ return 0;
+ if (reg > 0x100)
+ return 0xffffffff;
+
+ if (vsm_conf_read[function] != NULL)
+ data = vsm_conf_read[function](reg);
+
+ return data;
+}
diff --git a/arch/mips/loongson/common/early_printk.c b/arch/mips/loongson/common/early_printk.c
index bc73edc0cfd..23e7a8f8897 100644
--- a/arch/mips/loongson/common/early_printk.c
+++ b/arch/mips/loongson/common/early_printk.c
@@ -1,7 +1,7 @@
/* early printk support
*
* Copyright (c) 2009 Philippe Vachon <philippe@cowpig.ca>
- * Copyright (C) 2009 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (c) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzj@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
@@ -12,26 +12,29 @@
#include <linux/serial_reg.h>
#include <loongson.h>
-#include <machine.h>
#define PORT(base, offset) (u8 *)(base + offset)
-static inline unsigned int serial_in(phys_addr_t base, int offset)
+static inline unsigned int serial_in(unsigned char *base, int offset)
{
return readb(PORT(base, offset));
}
-static inline void serial_out(phys_addr_t base, int offset, int value)
+static inline void serial_out(unsigned char *base, int offset, int value)
{
writeb(value, PORT(base, offset));
}
void prom_putchar(char c)
{
- phys_addr_t uart_base =
- (phys_addr_t) ioremap_nocache(LOONGSON_UART_BASE, 8);
+ int timeout;
+ unsigned char *uart_base;
- while ((serial_in(uart_base, UART_LSR) & UART_LSR_THRE) == 0)
+ uart_base = (unsigned char *)_loongson_uart_base;
+ timeout = 1024;
+
+ while (((serial_in(uart_base, UART_LSR) & UART_LSR_THRE) == 0) &&
+ (timeout-- > 0))
;
serial_out(uart_base, UART_TX, c);
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index b9ef5038554..196d947d929 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -17,11 +17,14 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/module.h>
+
#include <asm/bootinfo.h>
#include <loongson.h>
unsigned long bus_clock, cpu_clock_freq;
+EXPORT_SYMBOL(cpu_clock_freq);
unsigned long memsize, highmemsize;
/* pmon passes arguments in 32bit pointers */
diff --git a/arch/mips/loongson/common/init.c b/arch/mips/loongson/common/init.c
index 3abe927422a..a2abd935573 100644
--- a/arch/mips/loongson/common/init.c
+++ b/arch/mips/loongson/common/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzj@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
@@ -10,19 +10,28 @@
#include <linux/bootmem.h>
-#include <asm/bootinfo.h>
-
#include <loongson.h>
+/* Loongson CPU address windows config space base address */
+unsigned long __maybe_unused _loongson_addrwincfg_base;
+
void __init prom_init(void)
{
- /* init base address of io space */
+ /* init base address of io space */
set_io_port_base((unsigned long)
- ioremap(BONITO_PCIIO_BASE, BONITO_PCIIO_SIZE));
+ ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+ _loongson_addrwincfg_base = (unsigned long)
+ ioremap(LOONGSON_ADDRWINCFG_BASE, LOONGSON_ADDRWINCFG_SIZE);
+#endif
prom_init_cmdline();
prom_init_env();
prom_init_memory();
+
+ /*init the uart base address */
+ prom_init_uart_base();
}
void __init prom_free_prom_memory(void)
diff --git a/arch/mips/loongson/common/irq.c b/arch/mips/loongson/common/irq.c
index b32b4a3e513..20e73283197 100644
--- a/arch/mips/loongson/common/irq.c
+++ b/arch/mips/loongson/common/irq.c
@@ -20,21 +20,21 @@ void bonito_irqdispatch(void)
int i;
/* workaround the IO dma problem: let cpu looping to allow DMA finish */
- int_status = BONITO_INTISR;
+ int_status = LOONGSON_INTISR;
if (int_status & (1 << 10)) {
while (int_status & (1 << 10)) {
udelay(1);
- int_status = BONITO_INTISR;
+ int_status = LOONGSON_INTISR;
}
}
/* Get pending sources, masked by current enables */
- int_status = BONITO_INTISR & BONITO_INTEN;
+ int_status = LOONGSON_INTISR & LOONGSON_INTEN;
if (int_status != 0) {
i = __ffs(int_status);
int_status &= ~(1 << i);
- do_IRQ(BONITO_IRQ_BASE + i);
+ do_IRQ(LOONGSON_IRQ_BASE + i);
}
}
@@ -60,13 +60,13 @@ void __init arch_init_irq(void)
set_irq_trigger_mode();
/* no steer */
- BONITO_INTSTEER = 0;
+ LOONGSON_INTSTEER = 0;
/*
* Mask out all interrupt by writing "1" to all bit position in
* the interrupt reset reg.
*/
- BONITO_INTENCLR = ~0;
+ LOONGSON_INTENCLR = ~0;
/* machine specific irq init */
mach_init_irq();
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 7b348248de7..0ed52b3f531 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -15,6 +15,9 @@
#include <loongson.h>
#include <machine.h>
+/* please ensure the length of the machtype string is less than 50 */
+#define MACHTYPE_LEN 50
+
static const char *system_types[] = {
[MACH_LOONGSON_UNKNOWN] "unknown loongson machine",
[MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box",
@@ -22,29 +25,35 @@ static const char *system_types[] = {
[MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches",
[MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches",
[MACH_DEXXON_GDIUM2F10] "dexxon-gidum-2f-10inches",
+ [MACH_LEMOTE_NAS] "lemote-nas-2f",
+ [MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
[MACH_LOONGSON_END] NULL,
};
const char *get_system_type(void)
{
- if (mips_machtype == MACH_UNKNOWN)
- mips_machtype = LOONGSON_MACHTYPE;
-
return system_types[mips_machtype];
}
-static __init int machtype_setup(char *str)
+void __init prom_init_machtype(void)
{
+ char *p, str[MACHTYPE_LEN];
int machtype = MACH_LEMOTE_FL2E;
- if (!str)
- return -EINVAL;
+ mips_machtype = LOONGSON_MACHTYPE;
+
+ p = strstr(arcs_cmdline, "machtype=");
+ if (!p)
+ return;
+ p += strlen("machtype=");
+ strncpy(str, p, MACHTYPE_LEN);
+ p = strstr(str, " ");
+ if (p)
+ *p = '\0';
for (; system_types[machtype]; machtype++)
if (strstr(system_types[machtype], str)) {
mips_machtype = machtype;
break;
}
- return 0;
}
-__setup("machtype=", machtype_setup);
diff --git a/arch/mips/loongson/common/mem.c b/arch/mips/loongson/common/mem.c
index 7c92f79b648..ceacd092b44 100644
--- a/arch/mips/loongson/common/mem.c
+++ b/arch/mips/loongson/common/mem.c
@@ -12,24 +12,107 @@
#include <loongson.h>
#include <mem.h>
+#include <pci.h>
void __init prom_init_memory(void)
{
add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
+
+ add_memory_region(memsize << 20, LOONGSON_PCI_MEM_START - (memsize <<
+ 20), BOOT_MEM_RESERVED);
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+ {
+ int bit;
+
+ bit = fls(memsize + highmemsize);
+ if (bit != ffs(memsize + highmemsize))
+ bit += 20;
+ else
+ bit = bit + 20 - 1;
+
+ /* set cpu window3 to map CPU to DDR: 2G -> 2G */
+ LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul,
+ 0x80000000ul, (1 << bit));
+ mmiowb();
+ }
+#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
#ifdef CONFIG_64BIT
- if (highmemsize > 0)
- add_memory_region(LOONGSON_HIGHMEM_START,
- highmemsize << 20, BOOT_MEM_RAM);
-#endif /* CONFIG_64BIT */
+ if (highmemsize > 0)
+ add_memory_region(LOONGSON_HIGHMEM_START,
+ highmemsize << 20, BOOT_MEM_RAM);
+
+ add_memory_region(LOONGSON_PCI_MEM_END + 1, LOONGSON_HIGHMEM_START -
+ LOONGSON_PCI_MEM_END - 1, BOOT_MEM_RESERVED);
+
+#endif /* !CONFIG_64BIT */
}
/* override of arch/mips/mm/cache.c: __uncached_access */
int __uncached_access(struct file *file, unsigned long addr)
{
- if (file->f_flags & O_SYNC)
+ if (file->f_flags & O_DSYNC)
return 1;
return addr >= __pa(high_memory) ||
((addr >= LOONGSON_MMIO_MEM_START) &&
(addr < LOONGSON_MMIO_MEM_END));
}
+
+#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <asm/current.h>
+
+static unsigned long uca_start, uca_end;
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+ unsigned long offset = pfn << PAGE_SHIFT;
+ unsigned long end = offset + size;
+
+ if (__uncached_access(file, offset)) {
+ if (((uca_start && offset) >= uca_start) &&
+ (end <= uca_end))
+ return __pgprot((pgprot_val(vma_prot) &
+ ~_CACHE_MASK) |
+ _CACHE_UNCACHED_ACCELERATED);
+ else
+ return pgprot_noncached(vma_prot);
+ }
+ return vma_prot;
+}
+
+static int __init find_vga_mem_init(void)
+{
+ struct pci_dev *dev = 0;
+ struct resource *r;
+ int idx;
+
+ if (uca_start)
+ return 0;
+
+ for_each_pci_dev(dev) {
+ if ((dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
+ for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
+ r = &dev->resource[idx];
+ if (!r->start && r->end)
+ continue;
+ if (r->flags & IORESOURCE_IO)
+ continue;
+ if (r->flags & IORESOURCE_MEM) {
+ uca_start = r->start;
+ uca_end = r->end;
+ return 0;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+late_initcall(find_vga_mem_init);
+#endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */
diff --git a/arch/mips/loongson/common/pci.c b/arch/mips/loongson/common/pci.c
index a3a4abfb6c9..31d8c5ecd16 100644
--- a/arch/mips/loongson/common/pci.c
+++ b/arch/mips/loongson/common/pci.c
@@ -27,7 +27,7 @@ static struct resource loongson_pci_io_resource = {
};
static struct pci_controller loongson_pci_controller = {
- .pci_ops = &bonito64_pci_ops,
+ .pci_ops = &loongson_pci_ops,
.io_resource = &loongson_pci_io_resource,
.mem_resource = &loongson_pci_mem_resource,
.mem_offset = 0x00000000UL,
@@ -44,15 +44,15 @@ static void __init setup_pcimap(void)
* pcimap: PCI_MAP2 PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0
* [<2G] [384M,448M] [320M,384M] [0M,64M]
*/
- BONITO_PCIMAP = BONITO_PCIMAP_PCIMAP_2 |
- BONITO_PCIMAP_WIN(2, BONITO_PCILO2_BASE) |
- BONITO_PCIMAP_WIN(1, BONITO_PCILO1_BASE) |
- BONITO_PCIMAP_WIN(0, 0);
+ LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 |
+ LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) |
+ LOONGSON_PCIMAP_WIN(1, LOONGSON_PCILO1_BASE) |
+ LOONGSON_PCIMAP_WIN(0, 0);
/*
* PCI-DMA to local mapping: [2G,2G+256M] -> [0M,256M]
*/
- BONITO_PCIBASE0 = 0x80000000ul; /* base: 2G -> mmap: 0M */
+ LOONGSON_PCIBASE0 = 0x80000000ul; /* base: 2G -> mmap: 0M */
/* size: 256M, burst transmission, pre-fetch enable, 64bit */
LOONGSON_PCI_HIT0_SEL_L = 0xc000000cul;
LOONGSON_PCI_HIT0_SEL_H = 0xfffffffful;
@@ -67,6 +67,14 @@ static void __init setup_pcimap(void)
/* can not change gnt to break pci transfer when device's gnt not
deassert for some broken device */
LOONGSON_PXARB_CFG = 0x00fe0105ul;
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+ /*
+ * set cpu addr window2 to map CPU address space to PCI address space
+ */
+ LOONGSON_ADDRWIN_CPUTOPCI(ADDRWIN_WIN2, LOONGSON_CPU_MEM_SRC,
+ LOONGSON_PCI_MEM_DST, MMAP_CPUTOPCI_SIZE);
+#endif
}
static int __init pcibios_init(void)
diff --git a/arch/mips/loongson/common/platform.c b/arch/mips/loongson/common/platform.c
new file mode 100644
index 00000000000..be81777eb94
--- /dev/null
+++ b/arch/mips/loongson/common/platform.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+static struct platform_device loongson2_cpufreq_device = {
+ .name = "loongson2_cpufreq",
+ .id = -1,
+};
+
+static int __init loongson2_cpufreq_init(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ /* Only 2F revision and it's successors support CPUFreq */
+ if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON2F)
+ return platform_device_register(&loongson2_cpufreq_device);
+
+ return -ENODEV;
+}
+
+arch_initcall(loongson2_cpufreq_init);
diff --git a/arch/mips/loongson/common/pm.c b/arch/mips/loongson/common/pm.c
new file mode 100644
index 00000000000..b625fec8a4d
--- /dev/null
+++ b/arch/mips/loongson/common/pm.c
@@ -0,0 +1,161 @@
+/*
+ * loongson-specific suspend support
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+
+#include <loongson.h>
+
+static unsigned int __maybe_unused cached_master_mask; /* i8259A */
+static unsigned int __maybe_unused cached_slave_mask;
+static unsigned int __maybe_unused cached_bonito_irq_mask; /* bonito */
+
+void arch_suspend_disable_irqs(void)
+{
+ /* disable all mips events */
+ local_irq_disable();
+
+#ifdef CONFIG_I8259
+ /* disable all events of i8259A */
+ cached_slave_mask = inb(PIC_SLAVE_IMR);
+ cached_master_mask = inb(PIC_MASTER_IMR);
+
+ outb(0xff, PIC_SLAVE_IMR);
+ inb(PIC_SLAVE_IMR);
+ outb(0xff, PIC_MASTER_IMR);
+ inb(PIC_MASTER_IMR);
+#endif
+ /* disable all events of bonito */
+ cached_bonito_irq_mask = LOONGSON_INTEN;
+ LOONGSON_INTENCLR = 0xffff;
+ (void)LOONGSON_INTENCLR;
+}
+
+void arch_suspend_enable_irqs(void)
+{
+ /* enable all mips events */
+ local_irq_enable();
+#ifdef CONFIG_I8259
+ /* only enable the cached events of i8259A */
+ outb(cached_slave_mask, PIC_SLAVE_IMR);
+ outb(cached_master_mask, PIC_MASTER_IMR);
+#endif
+ /* enable all cached events of bonito */
+ LOONGSON_INTENSET = cached_bonito_irq_mask;
+ (void)LOONGSON_INTENSET;
+}
+
+/*
+ * Setup the board-specific events for waking up loongson from wait mode
+ */
+void __weak setup_wakeup_events(void)
+{
+}
+
+/*
+ * Check wakeup events
+ */
+int __weak wakeup_loongson(void)
+{
+ return 1;
+}
+
+/*
+ * If the events are really what we want to wakeup the CPU, wake it up
+ * otherwise put the CPU asleep again.
+ */
+static void wait_for_wakeup_events(void)
+{
+ while (!wakeup_loongson())
+ LOONGSON_CHIPCFG0 &= ~0x7;
+}
+
+/*
+ * Stop all perf counters
+ *
+ * $24 is the control register of Loongson perf counter
+ */
+static inline void stop_perf_counters(void)
+{
+ __write_64bit_c0_register($24, 0, 0);
+}
+
+
+static void loongson_suspend_enter(void)
+{
+ static unsigned int cached_cpu_freq;
+
+ /* setup wakeup events via enabling the IRQs */
+ setup_wakeup_events();
+
+ stop_perf_counters();
+
+ cached_cpu_freq = LOONGSON_CHIPCFG0;
+
+ /* Put CPU into wait mode */
+ LOONGSON_CHIPCFG0 &= ~0x7;
+
+ /* wait for the given events to wakeup cpu from wait mode */
+ wait_for_wakeup_events();
+
+ LOONGSON_CHIPCFG0 = cached_cpu_freq;
+ mmiowb();
+}
+
+void __weak mach_suspend(void)
+{
+}
+
+void __weak mach_resume(void)
+{
+}
+
+static int loongson_pm_enter(suspend_state_t state)
+{
+ mach_suspend();
+
+ /* processor specific suspend */
+ loongson_suspend_enter();
+
+ mach_resume();
+
+ return 0;
+}
+
+static int loongson_pm_valid_state(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static struct platform_suspend_ops loongson_pm_ops = {
+ .valid = loongson_pm_valid_state,
+ .enter = loongson_pm_enter,
+};
+
+static int __init loongson_pm_init(void)
+{
+ suspend_set_ops(&loongson_pm_ops);
+
+ return 0;
+}
+arch_initcall(loongson_pm_init);
diff --git a/arch/mips/loongson/common/reset.c b/arch/mips/loongson/common/reset.c
index 97e918251ed..d57f1719da9 100644
--- a/arch/mips/loongson/common/reset.c
+++ b/arch/mips/loongson/common/reset.c
@@ -22,7 +22,7 @@ static void loongson_restart(char *command)
mach_prepare_reboot();
/* reboot via jumping to boot base address */
- ((void (*)(void))ioremap_nocache(BONITO_BOOT_BASE, 4)) ();
+ ((void (*)(void))ioremap_nocache(LOONGSON_BOOT_BASE, 4)) ();
}
static void loongson_halt(void)
diff --git a/arch/mips/loongson/common/serial.c b/arch/mips/loongson/common/serial.c
new file mode 100644
index 00000000000..23b66a5f88c
--- /dev/null
+++ b/arch/mips/loongson/common/serial.c
@@ -0,0 +1,76 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Yan hua (yanhua@lemote.com)
+ * Author: Wu Zhangjin (wuzj@lemote.com)
+ */
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+#include <machine.h>
+
+#define PORT(int) \
+{ \
+ .irq = int, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+#define PORT_M(int) \
+{ \
+ .irq = MIPS_CPU_IRQ_BASE + (int), \
+ .uartclk = 3686400, \
+ .iotype = UPIO_MEM, \
+ .membase = (void __iomem *)NULL, \
+ .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
+ .regshift = 0, \
+}
+
+static struct plat_serial8250_port uart8250_data[][2] = {
+ [MACH_LOONGSON_UNKNOWN] {},
+ [MACH_LEMOTE_FL2E] {PORT(4), {} },
+ [MACH_LEMOTE_FL2F] {PORT(3), {} },
+ [MACH_LEMOTE_ML2F7] {PORT_M(3), {} },
+ [MACH_LEMOTE_YL2F89] {PORT_M(3), {} },
+ [MACH_DEXXON_GDIUM2F10] {PORT_M(3), {} },
+ [MACH_LEMOTE_NAS] {PORT_M(3), {} },
+ [MACH_LEMOTE_LL2F] {PORT(3), {} },
+ [MACH_LOONGSON_END] {},
+};
+
+static struct platform_device uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+};
+
+static int __init serial_init(void)
+{
+ unsigned char iotype;
+
+ iotype = uart8250_data[mips_machtype][0].iotype;
+
+ if (UPIO_MEM == iotype)
+ uart8250_data[mips_machtype][0].membase =
+ (void __iomem *)_loongson_uart_base;
+ else if (UPIO_PORT == iotype)
+ uart8250_data[mips_machtype][0].iobase =
+ loongson_uart_base - LOONGSON_PCIIO_BASE;
+
+ uart8250_device.dev.platform_data = uart8250_data[mips_machtype];
+
+ return platform_device_register(&uart8250_device);
+}
+
+device_initcall(serial_init);
diff --git a/arch/mips/loongson/common/time.c b/arch/mips/loongson/common/time.c
index 6e08c8270ab..35f0b66a94f 100644
--- a/arch/mips/loongson/common/time.c
+++ b/arch/mips/loongson/common/time.c
@@ -14,11 +14,14 @@
#include <asm/time.h>
#include <loongson.h>
+#include <cs5536/cs5536_mfgpt.h>
void __init plat_time_init(void)
{
/* setup mips r4k timer */
mips_hpt_frequency = cpu_clock_freq / 2;
+
+ setup_mfgpt0_timer();
}
void read_persistent_clock(struct timespec *ts)
diff --git a/arch/mips/loongson/common/uart_base.c b/arch/mips/loongson/common/uart_base.c
new file mode 100644
index 00000000000..78ff66ae749
--- /dev/null
+++ b/arch/mips/loongson/common/uart_base.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+
+/* ioremapped */
+unsigned long _loongson_uart_base;
+EXPORT_SYMBOL(_loongson_uart_base);
+/* raw */
+unsigned long loongson_uart_base;
+EXPORT_SYMBOL(loongson_uart_base);
+
+void prom_init_loongson_uart_base(void)
+{
+ switch (mips_machtype) {
+ case MACH_LEMOTE_FL2E:
+ loongson_uart_base = LOONGSON_PCIIO_BASE + 0x3f8;
+ break;
+ case MACH_LEMOTE_FL2F:
+ case MACH_LEMOTE_LL2F:
+ loongson_uart_base = LOONGSON_PCIIO_BASE + 0x2f8;
+ break;
+ case MACH_LEMOTE_ML2F7:
+ case MACH_LEMOTE_YL2F89:
+ case MACH_DEXXON_GDIUM2F10:
+ case MACH_LEMOTE_NAS:
+ default:
+ /* The CPU provided serial port */
+ loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8;
+ break;
+ }
+
+ _loongson_uart_base =
+ (unsigned long)ioremap_nocache(loongson_uart_base, 8);
+}
diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c
index 7888cf69424..320e9379bdd 100644
--- a/arch/mips/loongson/fuloong-2e/irq.c
+++ b/arch/mips/loongson/fuloong-2e/irq.c
@@ -47,8 +47,8 @@ static struct irqaction cascade_irqaction = {
void __init set_irq_trigger_mode(void)
{
/* most bonito irq should be level triggered */
- BONITO_INTEDGE = BONITO_ICU_SYSTEMERR | BONITO_ICU_MASTERERR |
- BONITO_ICU_RETRYERR | BONITO_ICU_MBOXES;
+ LOONGSON_INTEDGE = LOONGSON_ICU_SYSTEMERR | LOONGSON_ICU_MASTERERR |
+ LOONGSON_ICU_RETRYERR | LOONGSON_ICU_MBOXES;
}
void __init mach_init_irq(void)
diff --git a/arch/mips/loongson/fuloong-2e/reset.c b/arch/mips/loongson/fuloong-2e/reset.c
index 677fe186db9..fc16c677d47 100644
--- a/arch/mips/loongson/fuloong-2e/reset.c
+++ b/arch/mips/loongson/fuloong-2e/reset.c
@@ -14,8 +14,8 @@
void mach_prepare_reboot(void)
{
- BONITO_BONGENCFG &= ~(1 << 2);
- BONITO_BONGENCFG |= (1 << 2);
+ LOONGSON_GENCFG &= ~(1 << 2);
+ LOONGSON_GENCFG |= (1 << 2);
}
void mach_prepare_shutdown(void)
diff --git a/arch/mips/loongson/lemote-2f/Makefile b/arch/mips/loongson/lemote-2f/Makefile
new file mode 100644
index 00000000000..4d84b27dc41
--- /dev/null
+++ b/arch/mips/loongson/lemote-2f/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for lemote loongson2f family machines
+#
+
+obj-y += irq.o reset.o ec_kb3310b.o
+
+#
+# Suspend Support
+#
+
+obj-$(CONFIG_LOONGSON_SUSPEND) += pm.o
diff --git a/arch/mips/loongson/lemote-2f/ec_kb3310b.c b/arch/mips/loongson/lemote-2f/ec_kb3310b.c
new file mode 100644
index 00000000000..4d84111a2cd
--- /dev/null
+++ b/arch/mips/loongson/lemote-2f/ec_kb3310b.c
@@ -0,0 +1,130 @@
+/*
+ * Basic KB3310B Embedded Controller support for the YeeLoong 2F netbook
+ *
+ * Copyright (C) 2008 Lemote Inc.
+ * Author: liujl <liujl@lemote.com>, 2008-04-20
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "ec_kb3310b.h"
+
+static DEFINE_SPINLOCK(index_access_lock);
+static DEFINE_SPINLOCK(port_access_lock);
+
+unsigned char ec_read(unsigned short addr)
+{
+ unsigned char value;
+ unsigned long flags;
+
+ spin_lock_irqsave(&index_access_lock, flags);
+ outb((addr & 0xff00) >> 8, EC_IO_PORT_HIGH);
+ outb((addr & 0x00ff), EC_IO_PORT_LOW);
+ value = inb(EC_IO_PORT_DATA);
+ spin_unlock_irqrestore(&index_access_lock, flags);
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(ec_read);
+
+void ec_write(unsigned short addr, unsigned char val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&index_access_lock, flags);
+ outb((addr & 0xff00) >> 8, EC_IO_PORT_HIGH);
+ outb((addr & 0x00ff), EC_IO_PORT_LOW);
+ outb(val, EC_IO_PORT_DATA);
+ /* flush the write action */
+ inb(EC_IO_PORT_DATA);
+ spin_unlock_irqrestore(&index_access_lock, flags);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(ec_write);
+
+/*
+ * This function is used for EC command writes and corresponding status queries.
+ */
+int ec_query_seq(unsigned char cmd)
+{
+ int timeout;
+ unsigned char status;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&port_access_lock, flags);
+
+ /* make chip goto reset mode */
+ udelay(EC_REG_DELAY);
+ outb(cmd, EC_CMD_PORT);
+ udelay(EC_REG_DELAY);
+
+ /* check if the command is received by ec */
+ timeout = EC_CMD_TIMEOUT;
+ status = inb(EC_STS_PORT);
+ while (timeout-- && (status & (1 << 1))) {
+ status = inb(EC_STS_PORT);
+ udelay(EC_REG_DELAY);
+ }
+
+ if (timeout <= 0) {
+ printk(KERN_ERR "%s: deadable error : timeout...\n", __func__);
+ ret = -EINVAL;
+ } else
+ printk(KERN_INFO
+ "(%x/%d)ec issued command %d status : 0x%x\n",
+ timeout, EC_CMD_TIMEOUT - timeout, cmd, status);
+
+ spin_unlock_irqrestore(&port_access_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ec_query_seq);
+
+/*
+ * Send query command to EC to get the proper event number
+ */
+int ec_query_event_num(void)
+{
+ return ec_query_seq(CMD_GET_EVENT_NUM);
+}
+EXPORT_SYMBOL(ec_query_event_num);
+
+/*
+ * Get event number from EC
+ *
+ * NOTE: This routine must follow the query_event_num function in the
+ * interrupt.
+ */
+int ec_get_event_num(void)
+{
+ int timeout = 100;
+ unsigned char value;
+ unsigned char status;
+
+ udelay(EC_REG_DELAY);
+ status = inb(EC_STS_PORT);
+ udelay(EC_REG_DELAY);
+ while (timeout-- && !(status & (1 << 0))) {
+ status = inb(EC_STS_PORT);
+ udelay(EC_REG_DELAY);
+ }
+ if (timeout <= 0) {
+ pr_info("%s: get event number timeout.\n", __func__);
+
+ return -EINVAL;
+ }
+ value = inb(EC_DAT_PORT);
+ udelay(EC_REG_DELAY);
+
+ return value;
+}
+EXPORT_SYMBOL(ec_get_event_num);
diff --git a/arch/mips/loongson/lemote-2f/ec_kb3310b.h b/arch/mips/loongson/lemote-2f/ec_kb3310b.h
new file mode 100644
index 00000000000..1595a21b315
--- /dev/null
+++ b/arch/mips/loongson/lemote-2f/ec_kb3310b.h
@@ -0,0 +1,188 @@
+/*
+ * KB3310B Embedded Controller
+ *
+ * Copyright (C) 2008 Lemote Inc.
+ * Author: liujl <liujl@lemote.com>, 2008-03-14
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _EC_KB3310B_H
+#define _EC_KB3310B_H
+
+extern unsigned char ec_read(unsigned short addr);
+extern void ec_write(unsigned short addr, unsigned char val);
+extern int ec_query_seq(unsigned char cmd);
+extern int ec_query_event_num(void);
+extern int ec_get_event_num(void);
+
+typedef int (*sci_handler) (int status);
+extern sci_handler yeeloong_report_lid_status;
+
+#define SCI_IRQ_NUM 0x0A
+
+/*
+ * The following registers are determined by the EC index configuration.
+ * 1, fill the PORT_HIGH as EC register high part.
+ * 2, fill the PORT_LOW as EC register low part.
+ * 3, fill the PORT_DATA as EC register write data or get the data from it.
+ */
+#define EC_IO_PORT_HIGH 0x0381
+#define EC_IO_PORT_LOW 0x0382
+#define EC_IO_PORT_DATA 0x0383
+
+/*
+ * EC delay time is 500us for register and status access
+ */
+#define EC_REG_DELAY 500 /* unit : us */
+#define EC_CMD_TIMEOUT 0x1000
+
+/*
+ * EC access port for SCI communication
+ */
+#define EC_CMD_PORT 0x66
+#define EC_STS_PORT 0x66
+#define EC_DAT_PORT 0x62
+#define CMD_INIT_IDLE_MODE 0xdd
+#define CMD_EXIT_IDLE_MODE 0xdf
+#define CMD_INIT_RESET_MODE 0xd8
+#define CMD_REBOOT_SYSTEM 0x8c
+#define CMD_GET_EVENT_NUM 0x84
+#define CMD_PROGRAM_PIECE 0xda
+
+/* temperature & fan registers */
+#define REG_TEMPERATURE_VALUE 0xF458
+#define REG_FAN_AUTO_MAN_SWITCH 0xF459
+#define BIT_FAN_AUTO 0
+#define BIT_FAN_MANUAL 1
+#define REG_FAN_CONTROL 0xF4D2
+#define BIT_FAN_CONTROL_ON (1 << 0)
+#define BIT_FAN_CONTROL_OFF (0 << 0)
+#define REG_FAN_STATUS 0xF4DA
+#define BIT_FAN_STATUS_ON (1 << 0)
+#define BIT_FAN_STATUS_OFF (0 << 0)
+#define REG_FAN_SPEED_HIGH 0xFE22
+#define REG_FAN_SPEED_LOW 0xFE23
+#define REG_FAN_SPEED_LEVEL 0xF4CC
+/* fan speed divider */
+#define FAN_SPEED_DIVIDER 480000 /* (60*1000*1000/62.5/2)*/
+
+/* battery registers */
+#define REG_BAT_DESIGN_CAP_HIGH 0xF77D
+#define REG_BAT_DESIGN_CAP_LOW 0xF77E
+#define REG_BAT_FULLCHG_CAP_HIGH 0xF780
+#define REG_BAT_FULLCHG_CAP_LOW 0xF781
+#define REG_BAT_DESIGN_VOL_HIGH 0xF782
+#define REG_BAT_DESIGN_VOL_LOW 0xF783
+#define REG_BAT_CURRENT_HIGH 0xF784
+#define REG_BAT_CURRENT_LOW 0xF785
+#define REG_BAT_VOLTAGE_HIGH 0xF786
+#define REG_BAT_VOLTAGE_LOW 0xF787
+#define REG_BAT_TEMPERATURE_HIGH 0xF788
+#define REG_BAT_TEMPERATURE_LOW 0xF789
+#define REG_BAT_RELATIVE_CAP_HIGH 0xF492
+#define REG_BAT_RELATIVE_CAP_LOW 0xF493
+#define REG_BAT_VENDOR 0xF4C4
+#define FLAG_BAT_VENDOR_SANYO 0x01
+#define FLAG_BAT_VENDOR_SIMPLO 0x02
+#define REG_BAT_CELL_COUNT 0xF4C6
+#define FLAG_BAT_CELL_3S1P 0x03
+#define FLAG_BAT_CELL_3S2P 0x06
+#define REG_BAT_CHARGE 0xF4A2
+#define FLAG_BAT_CHARGE_DISCHARGE 0x01
+#define FLAG_BAT_CHARGE_CHARGE 0x02
+#define FLAG_BAT_CHARGE_ACPOWER 0x00
+#define REG_BAT_STATUS 0xF4B0
+#define BIT_BAT_STATUS_LOW (1 << 5)
+#define BIT_BAT_STATUS_DESTROY (1 << 2)
+#define BIT_BAT_STATUS_FULL (1 << 1)
+#define BIT_BAT_STATUS_IN (1 << 0)
+#define REG_BAT_CHARGE_STATUS 0xF4B1
+#define BIT_BAT_CHARGE_STATUS_OVERTEMP (1 << 2)
+#define BIT_BAT_CHARGE_STATUS_PRECHG (1 << 1)
+#define REG_BAT_STATE 0xF482
+#define BIT_BAT_STATE_CHARGING (1 << 1)
+#define BIT_BAT_STATE_DISCHARGING (1 << 0)
+#define REG_BAT_POWER 0xF440
+#define BIT_BAT_POWER_S3 (1 << 2)
+#define BIT_BAT_POWER_ON (1 << 1)
+#define BIT_BAT_POWER_ACIN (1 << 0)
+
+/* other registers */
+/* Audio: rd/wr */
+#define REG_AUDIO_VOLUME 0xF46C
+#define REG_AUDIO_MUTE 0xF4E7
+#define REG_AUDIO_BEEP 0xF4D0
+/* USB port power or not: rd/wr */
+#define REG_USB0_FLAG 0xF461
+#define REG_USB1_FLAG 0xF462
+#define REG_USB2_FLAG 0xF463
+#define BIT_USB_FLAG_ON 1
+#define BIT_USB_FLAG_OFF 0
+/* LID */
+#define REG_LID_DETECT 0xF4BD
+#define BIT_LID_DETECT_ON 1
+#define BIT_LID_DETECT_OFF 0
+/* CRT */
+#define REG_CRT_DETECT 0xF4AD
+#define BIT_CRT_DETECT_PLUG 1
+#define BIT_CRT_DETECT_UNPLUG 0
+/* LCD backlight brightness adjust: 9 levels */
+#define REG_DISPLAY_BRIGHTNESS 0xF4F5
+/* Black screen Status */
+#define BIT_DISPLAY_LCD_ON 1
+#define BIT_DISPLAY_LCD_OFF 0
+/* LCD backlight control: off/restore */
+#define REG_BACKLIGHT_CTRL 0xF7BD
+#define BIT_BACKLIGHT_ON 1
+#define BIT_BACKLIGHT_OFF 0
+/* Reset the machine auto-clear: rd/wr */
+#define REG_RESET 0xF4EC
+#define BIT_RESET_ON 1
+/* Light the led: rd/wr */
+#define REG_LED 0xF4C8
+#define BIT_LED_RED_POWER (1 << 0)
+#define BIT_LED_ORANGE_POWER (1 << 1)
+#define BIT_LED_GREEN_CHARGE (1 << 2)
+#define BIT_LED_RED_CHARGE (1 << 3)
+#define BIT_LED_NUMLOCK (1 << 4)
+/* Test led mode, all led on/off */
+#define REG_LED_TEST 0xF4C2
+#define BIT_LED_TEST_IN 1
+#define BIT_LED_TEST_OUT 0
+/* Camera on/off */
+#define REG_CAMERA_STATUS 0xF46A
+#define BIT_CAMERA_STATUS_ON 1
+#define BIT_CAMERA_STATUS_OFF 0
+#define REG_CAMERA_CONTROL 0xF7B7
+#define BIT_CAMERA_CONTROL_OFF 0
+#define BIT_CAMERA_CONTROL_ON 1
+/* Wlan Status */
+#define REG_WLAN 0xF4FA
+#define BIT_WLAN_ON 1
+#define BIT_WLAN_OFF 0
+#define REG_DISPLAY_LCD 0xF79F
+
+/* SCI Event Number from EC */
+enum {
+ EVENT_LID = 0x23, /* LID open/close */
+ EVENT_DISPLAY_TOGGLE, /* Fn+F3 for display switch */
+ EVENT_SLEEP, /* Fn+F1 for entering sleep mode */
+ EVENT_OVERTEMP, /* Over-temperature happened */
+ EVENT_CRT_DETECT, /* CRT is connected */
+ EVENT_CAMERA, /* Camera on/off */
+ EVENT_USB_OC2, /* USB2 Over Current occurred */
+ EVENT_USB_OC0, /* USB0 Over Current occurred */
+ EVENT_BLACK_SCREEN, /* Turn on/off backlight */
+ EVENT_AUDIO_MUTE, /* Mute on/off */
+ EVENT_DISPLAY_BRIGHTNESS,/* LCD backlight brightness adjust */
+ EVENT_AC_BAT, /* AC & Battery relative issue */
+ EVENT_AUDIO_VOLUME, /* Volume adjust */
+ EVENT_WLAN, /* Wlan on/off */
+ EVENT_END
+};
+
+#endif /* !_EC_KB3310B_H */
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c
new file mode 100644
index 00000000000..77d32f9cf31
--- /dev/null
+++ b/arch/mips/loongson/lemote-2f/irq.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2007 Lemote Inc.
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+
+#include <loongson.h>
+#include <machine.h>
+
+#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
+#define LOONGSON_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
+#define LOONGSON_NORTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 6) /* bonito */
+#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
+#define LOONGSON_SOUTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
+
+#define LOONGSON_INT_BIT_INT0 (1 << 11)
+#define LOONGSON_INT_BIT_INT1 (1 << 12)
+
+/*
+ * The generic i8259_irq() make the kernel hang on booting. Since we cannot
+ * get the irq via the IRR directly, we access the ISR instead.
+ */
+int mach_i8259_irq(void)
+{
+ int irq, isr;
+
+ irq = -1;
+
+ if ((LOONGSON_INTISR & LOONGSON_INTEN) & LOONGSON_INT_BIT_INT0) {
+ spin_lock(&i8259A_lock);
+ isr = inb(PIC_MASTER_CMD) &
+ ~inb(PIC_MASTER_IMR) & ~(1 << PIC_CASCADE_IR);
+ if (!isr)
+ isr = (inb(PIC_SLAVE_CMD) & ~inb(PIC_SLAVE_IMR)) << 8;
+ irq = ffs(isr) - 1;
+ if (unlikely(irq == 7)) {
+ /*
+ * This may be a spurious interrupt.
+ *
+ * Read the interrupt status register (ISR). If the most
+ * significant bit is not set then there is no valid
+ * interrupt.
+ */
+ outb(0x0B, PIC_MASTER_ISR); /* ISR register */
+ if (~inb(PIC_MASTER_ISR) & 0x80)
+ irq = -1;
+ }
+ spin_unlock(&i8259A_lock);
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL(mach_i8259_irq);
+
+static void i8259_irqdispatch(void)
+{
+ int irq;
+
+ irq = mach_i8259_irq();
+ if (irq >= 0)
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+}
+
+void mach_irq_dispatch(unsigned int pending)
+{
+ if (pending & CAUSEF_IP7)
+ do_IRQ(LOONGSON_TIMER_IRQ);
+ else if (pending & CAUSEF_IP6) { /* North Bridge, Perf counter */
+#ifdef CONFIG_OPROFILE
+ do_IRQ(LOONGSON2_PERFCNT_IRQ);
+#endif
+ bonito_irqdispatch();
+ } else if (pending & CAUSEF_IP3) /* CPU UART */
+ do_IRQ(LOONGSON_UART_IRQ);
+ else if (pending & CAUSEF_IP2) /* South Bridge */
+ i8259_irqdispatch();
+ else
+ spurious_interrupt();
+}
+
+void __init set_irq_trigger_mode(void)
+{
+ /* setup cs5536 as high level trigger */
+ LOONGSON_INTPOL = LOONGSON_INT_BIT_INT0 | LOONGSON_INT_BIT_INT1;
+ LOONGSON_INTEDGE &= ~(LOONGSON_INT_BIT_INT0 | LOONGSON_INT_BIT_INT1);
+}
+
+static irqreturn_t ip6_action(int cpl, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+struct irqaction ip6_irqaction = {
+ .handler = ip6_action,
+ .name = "cascade",
+ .flags = IRQF_SHARED,
+};
+
+struct irqaction cascade_irqaction = {
+ .handler = no_action,
+ .name = "cascade",
+};
+
+void __init mach_init_irq(void)
+{
+ /* init all controller
+ * 0-15 ------> i8259 interrupt
+ * 16-23 ------> mips cpu interrupt
+ * 32-63 ------> bonito irq
+ */
+
+ /* Sets the first-level interrupt dispatcher. */
+ mips_cpu_irq_init();
+ init_i8259_irqs();
+ bonito_irq_init();
+
+ /* setup north bridge irq (bonito) */
+ setup_irq(LOONGSON_NORTH_BRIDGE_IRQ, &ip6_irqaction);
+ /* setup source bridge irq (i8259) */
+ setup_irq(LOONGSON_SOUTH_BRIDGE_IRQ, &cascade_irqaction);
+}
diff --git a/arch/mips/loongson/lemote-2f/pm.c b/arch/mips/loongson/lemote-2f/pm.c
new file mode 100644
index 00000000000..d7af2e61659
--- /dev/null
+++ b/arch/mips/loongson/lemote-2f/pm.c
@@ -0,0 +1,149 @@
+/*
+ * Lemote loongson2f family machines' specific suspend support
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/suspend.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/i8042.h>
+#include <linux/module.h>
+
+#include <asm/i8259.h>
+#include <asm/mipsregs.h>
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+
+#include <cs5536/cs5536_mfgpt.h>
+#include "ec_kb3310b.h"
+
+#define I8042_KBD_IRQ 1
+#define I8042_CTR_KBDINT 0x01
+#define I8042_CTR_KBDDIS 0x10
+
+static unsigned char i8042_ctr;
+
+static int i8042_enable_kbd_port(void)
+{
+ if (i8042_command(&i8042_ctr, I8042_CMD_CTL_RCTR)) {
+ pr_err("i8042.c: Can't read CTR while enabling i8042 kbd port."
+ "\n");
+ return -EIO;
+ }
+
+ i8042_ctr &= ~I8042_CTR_KBDDIS;
+ i8042_ctr |= I8042_CTR_KBDINT;
+
+ if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
+ i8042_ctr &= ~I8042_CTR_KBDINT;
+ i8042_ctr |= I8042_CTR_KBDDIS;
+ pr_err("i8042.c: Failed to enable KBD port.\n");
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void setup_wakeup_events(void)
+{
+ int irq_mask;
+
+ switch (mips_machtype) {
+ case MACH_LEMOTE_ML2F7:
+ case MACH_LEMOTE_YL2F89:
+ /* open the keyboard irq in i8259A */
+ outb((0xff & ~(1 << I8042_KBD_IRQ)), PIC_MASTER_IMR);
+ irq_mask = inb(PIC_MASTER_IMR);
+
+ /* enable keyboard port */
+ i8042_enable_kbd_port();
+
+ /* Wakeup CPU via SCI lid open event */
+ outb(irq_mask & ~(1 << PIC_CASCADE_IR), PIC_MASTER_IMR);
+ inb(PIC_MASTER_IMR);
+ outb(0xff & ~(1 << (SCI_IRQ_NUM - 8)), PIC_SLAVE_IMR);
+ inb(PIC_SLAVE_IMR);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+static struct delayed_work lid_task;
+static int initialized;
+/* yeeloong_report_lid_status will be implemented in yeeloong_laptop.c */
+sci_handler yeeloong_report_lid_status;
+EXPORT_SYMBOL(yeeloong_report_lid_status);
+static void yeeloong_lid_update_task(struct work_struct *work)
+{
+ if (yeeloong_report_lid_status)
+ yeeloong_report_lid_status(BIT_LID_DETECT_ON);
+}
+
+int wakeup_loongson(void)
+{
+ int irq;
+
+ /* query the interrupt number */
+ irq = mach_i8259_irq();
+ if (irq < 0)
+ return 0;
+
+ printk(KERN_INFO "%s: irq = %d\n", __func__, irq);
+
+ if (irq == I8042_KBD_IRQ)
+ return 1;
+ else if (irq == SCI_IRQ_NUM) {
+ int ret, sci_event;
+ /* query the event number */
+ ret = ec_query_seq(CMD_GET_EVENT_NUM);
+ if (ret < 0)
+ return 0;
+ sci_event = ec_get_event_num();
+ if (sci_event < 0)
+ return 0;
+ if (sci_event == EVENT_LID) {
+ int lid_status;
+ /* check the LID status */
+ lid_status = ec_read(REG_LID_DETECT);
+ /* wakeup cpu when people open the LID */
+ if (lid_status == BIT_LID_DETECT_ON) {
+ /* If we call it directly here, the WARNING
+ * will be sent out by getnstimeofday
+ * via "WARN_ON(timekeeping_suspended);"
+ * because we can not schedule in suspend mode.
+ */
+ if (initialized == 0) {
+ INIT_DELAYED_WORK(&lid_task,
+ yeeloong_lid_update_task);
+ initialized = 1;
+ }
+ schedule_delayed_work(&lid_task, 1);
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void __weak mach_suspend(void)
+{
+ disable_mfgpt0_counter();
+}
+
+void __weak mach_resume(void)
+{
+ enable_mfgpt0_counter();
+}
diff --git a/arch/mips/loongson/lemote-2f/reset.c b/arch/mips/loongson/lemote-2f/reset.c
new file mode 100644
index 00000000000..51d1a60d534
--- /dev/null
+++ b/arch/mips/loongson/lemote-2f/reset.c
@@ -0,0 +1,159 @@
+/* Board-specific reboot/shutdown routines
+ *
+ * Copyright (c) 2009 Philippe Vachon <philippe@cowpig.ca>
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin, wuzj@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+
+#include <asm/bootinfo.h>
+
+#include <loongson.h>
+
+#include <cs5536/cs5536.h>
+#include "ec_kb3310b.h"
+
+static void reset_cpu(void)
+{
+ /*
+ * reset cpu to full speed, this is needed when enabling cpu frequency
+ * scalling
+ */
+ LOONGSON_CHIPCFG0 |= 0x7;
+}
+
+/* reset support for fuloong2f */
+
+static void fl2f_reboot(void)
+{
+ reset_cpu();
+
+ /* send a reset signal to south bridge.
+ *
+ * NOTE: if enable "Power Management" in kernel, rtl8169 will not reset
+ * normally with this reset operation and it will not work in PMON, but
+ * you can type halt command and then reboot, seems the hardware reset
+ * logic not work normally.
+ */
+ {
+ u32 hi, lo;
+ _rdmsr(DIVIL_MSR_REG(DIVIL_SOFT_RESET), &hi, &lo);
+ lo |= 0x00000001;
+ _wrmsr(DIVIL_MSR_REG(DIVIL_SOFT_RESET), hi, lo);
+ }
+}
+
+static void fl2f_shutdown(void)
+{
+ u32 hi, lo, val;
+ int gpio_base;
+
+ /* get gpio base */
+ _rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_GPIO), &hi, &lo);
+ gpio_base = lo & 0xff00;
+
+ /* make cs5536 gpio13 output enable */
+ val = inl(gpio_base + GPIOL_OUT_EN);
+ val &= ~(1 << (16 + 13));
+ val |= (1 << 13);
+ outl(val, gpio_base + GPIOL_OUT_EN);
+ mmiowb();
+ /* make cs5536 gpio13 output low level voltage. */
+ val = inl(gpio_base + GPIOL_OUT_VAL) & ~(1 << (13));
+ val |= (1 << (16 + 13));
+ outl(val, gpio_base + GPIOL_OUT_VAL);
+ mmiowb();
+}
+
+/* reset support for yeeloong2f and mengloong2f notebook */
+
+void ml2f_reboot(void)
+{
+ reset_cpu();
+
+ /* sending an reset signal to EC(embedded controller) */
+ ec_write(REG_RESET, BIT_RESET_ON);
+}
+
+#define yl2f89_reboot ml2f_reboot
+
+/* menglong(7inches) laptop has different shutdown logic from 8.9inches */
+#define EC_SHUTDOWN_IO_PORT_HIGH 0xff2d
+#define EC_SHUTDOWN_IO_PORT_LOW 0xff2e
+#define EC_SHUTDOWN_IO_PORT_DATA 0xff2f
+#define REG_SHUTDOWN_HIGH 0xFC
+#define REG_SHUTDOWN_LOW 0x29
+#define BIT_SHUTDOWN_ON (1 << 1)
+
+static void ml2f_shutdown(void)
+{
+ u8 val;
+ u64 i;
+
+ outb(REG_SHUTDOWN_HIGH, EC_SHUTDOWN_IO_PORT_HIGH);
+ outb(REG_SHUTDOWN_LOW, EC_SHUTDOWN_IO_PORT_LOW);
+ mmiowb();
+ val = inb(EC_SHUTDOWN_IO_PORT_DATA);
+ outb(val & (~BIT_SHUTDOWN_ON), EC_SHUTDOWN_IO_PORT_DATA);
+ mmiowb();
+ /* need enough wait here... how many microseconds needs? */
+ for (i = 0; i < 0x10000; i++)
+ delay();
+ outb(val | BIT_SHUTDOWN_ON, EC_SHUTDOWN_IO_PORT_DATA);
+ mmiowb();
+}
+
+static void yl2f89_shutdown(void)
+{
+ /* cpu-gpio0 output low */
+ LOONGSON_GPIODATA &= ~0x00000001;
+ /* cpu-gpio0 as output */
+ LOONGSON_GPIOIE &= ~0x00000001;
+}
+
+void mach_prepare_reboot(void)
+{
+ switch (mips_machtype) {
+ case MACH_LEMOTE_FL2F:
+ case MACH_LEMOTE_NAS:
+ case MACH_LEMOTE_LL2F:
+ fl2f_reboot();
+ break;
+ case MACH_LEMOTE_ML2F7:
+ ml2f_reboot();
+ break;
+ case MACH_LEMOTE_YL2F89:
+ yl2f89_reboot();
+ break;
+ default:
+ break;
+ }
+}
+
+void mach_prepare_shutdown(void)
+{
+ switch (mips_machtype) {
+ case MACH_LEMOTE_FL2F:
+ case MACH_LEMOTE_NAS:
+ case MACH_LEMOTE_LL2F:
+ fl2f_shutdown();
+ break;
+ case MACH_LEMOTE_ML2F7:
+ ml2f_shutdown();
+ break;
+ case MACH_LEMOTE_YL2F89:
+ yl2f89_shutdown();
+ break;
+ default:
+ break;
+ }
+}
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 454b5392449..8f2f8e9d8b2 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -35,6 +35,7 @@
* better performance by compiling with -msoft-float!
*/
#include <linux/sched.h>
+#include <linux/module.h>
#include <linux/debugfs.h>
#include <asm/inst.h>
@@ -68,7 +69,9 @@ static int fpux_emu(struct pt_regs *,
/* Further private data for which no space exists in mips_fpu_struct */
-struct mips_fpu_emulator_stats fpuemustats;
+#ifdef CONFIG_DEBUG_FS
+DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
+#endif
/* Control registers */
@@ -209,7 +212,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
unsigned int cond;
if (get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
@@ -240,7 +243,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
return SIGILL;
}
if (get_user(ir, (mips_instruction __user *) emulpc)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
/* __compute_return_epc() will have updated cp0_epc */
@@ -253,16 +256,16 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
}
emul:
- fpuemustats.emulated++;
+ MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{
u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u64 val;
- fpuemustats.loads++;
+ MIPS_FPU_EMU_INC_STATS(loads);
if (get_user(val, va)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
DITOREG(val, MIPSInst_RT(ir));
@@ -274,10 +277,10 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
MIPSInst_SIMM(ir));
u64 val;
- fpuemustats.stores++;
+ MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_RT(ir));
if (put_user(val, va)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
break;
@@ -288,9 +291,9 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
MIPSInst_SIMM(ir));
u32 val;
- fpuemustats.loads++;
+ MIPS_FPU_EMU_INC_STATS(loads);
if (get_user(val, va)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
SITOREG(val, MIPSInst_RT(ir));
@@ -302,10 +305,10 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
MIPSInst_SIMM(ir));
u32 val;
- fpuemustats.stores++;
+ MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_RT(ir));
if (put_user(val, va)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
break;
@@ -429,7 +432,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx)
if (get_user(ir,
(mips_instruction __user *) xcp->cp0_epc)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
@@ -595,7 +598,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
{
unsigned rcsr = 0; /* resulting csr */
- fpuemustats.cp1xops++;
+ MIPS_FPU_EMU_INC_STATS(cp1xops);
switch (MIPSInst_FMA_FFMT(ir)) {
case s_fmt:{ /* 0 */
@@ -610,9 +613,9 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
- fpuemustats.loads++;
+ MIPS_FPU_EMU_INC_STATS(loads);
if (get_user(val, va)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
SITOREG(val, MIPSInst_FD(ir));
@@ -622,11 +625,11 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
- fpuemustats.stores++;
+ MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_FS(ir));
if (put_user(val, va)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
break;
@@ -687,9 +690,9 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
- fpuemustats.loads++;
+ MIPS_FPU_EMU_INC_STATS(loads);
if (get_user(val, va)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
DITOREG(val, MIPSInst_FD(ir));
@@ -699,10 +702,10 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
- fpuemustats.stores++;
+ MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_FS(ir));
if (put_user(val, va)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
break;
@@ -769,7 +772,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
#endif
} rv; /* resulting value */
- fpuemustats.cp1ops++;
+ MIPS_FPU_EMU_INC_STATS(cp1ops);
switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
case s_fmt:{ /* 0 */
union {
@@ -1240,7 +1243,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
prevepc = xcp->cp0_epc;
if (get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
if (insn == 0)
@@ -1276,33 +1279,50 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
}
#ifdef CONFIG_DEBUG_FS
+
+static int fpuemu_stat_get(void *data, u64 *val)
+{
+ int cpu;
+ unsigned long sum = 0;
+ for_each_online_cpu(cpu) {
+ struct mips_fpu_emulator_stats *ps;
+ local_t *pv;
+ ps = &per_cpu(fpuemustats, cpu);
+ pv = (void *)ps + (unsigned long)data;
+ sum += local_read(pv);
+ }
+ *val = sum;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
+
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_fpuemu(void)
{
struct dentry *d, *dir;
- int i;
- static struct {
- const char *name;
- unsigned int *v;
- } vars[] __initdata = {
- { "emulated", &fpuemustats.emulated },
- { "loads", &fpuemustats.loads },
- { "stores", &fpuemustats.stores },
- { "cp1ops", &fpuemustats.cp1ops },
- { "cp1xops", &fpuemustats.cp1xops },
- { "errors", &fpuemustats.errors },
- };
if (!mips_debugfs_dir)
return -ENODEV;
dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir);
if (!dir)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(vars); i++) {
- d = debugfs_create_u32(vars[i].name, S_IRUGO, dir, vars[i].v);
- if (!d)
- return -ENOMEM;
- }
+
+#define FPU_STAT_CREATE(M) \
+ do { \
+ d = debugfs_create_file(#M , S_IRUGO, dir, \
+ (void *)offsetof(struct mips_fpu_emulator_stats, M), \
+ &fops_fpuemu_stat); \
+ if (!d) \
+ return -ENOMEM; \
+ } while (0)
+
+ FPU_STAT_CREATE(emulated);
+ FPU_STAT_CREATE(loads);
+ FPU_STAT_CREATE(stores);
+ FPU_STAT_CREATE(cp1ops);
+ FPU_STAT_CREATE(cp1xops);
+ FPU_STAT_CREATE(errors);
+
return 0;
}
__initcall(debugfs_fpuemu);
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index df7b9d928ef..36d975ae08f 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -98,7 +98,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
err |= __put_user(cpc, &fr->epc);
if (unlikely(err)) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return SIGBUS;
}
@@ -136,7 +136,7 @@ int do_dsemulret(struct pt_regs *xcp)
err |= __get_user(cookie, &fr->cookie);
if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
- fpuemustats.errors++;
+ MIPS_FPU_EMU_INC_STATS(errors);
return 0;
}
diff --git a/arch/mips/mipssim/Makefile b/arch/mips/mipssim/Makefile
index 57f43c1c788..41b96571315 100644
--- a/arch/mips/mipssim/Makefile
+++ b/arch/mips/mipssim/Makefile
@@ -17,8 +17,7 @@
# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
#
-obj-y := sim_platform.o sim_setup.o sim_mem.o sim_time.o sim_int.o \
- sim_cmdline.o
+obj-y := sim_platform.o sim_setup.o sim_mem.o sim_time.o sim_int.o
obj-$(CONFIG_EARLY_PRINTK) += sim_console.o
obj-$(CONFIG_MIPS_MT_SMTC) += sim_smtc.o
diff --git a/arch/mips/mipssim/sim_setup.c b/arch/mips/mipssim/sim_setup.c
index 2877675c5f0..0824f6af477 100644
--- a/arch/mips/mipssim/sim_setup.c
+++ b/arch/mips/mipssim/sim_setup.c
@@ -61,7 +61,6 @@ void __init prom_init(void)
set_io_port_base(0xbfd00000);
pr_info("\nLINUX started...\n");
- prom_init_cmdline();
prom_meminit();
#ifdef CONFIG_MIPS_MT_SMP
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 694d51f523d..102b2dfa542 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -194,7 +194,7 @@ void __devinit cpu_cache_init(void)
int __weak __uncached_access(struct file *file, unsigned long addr)
{
- if (file->f_flags & O_SYNC)
+ if (file->f_flags & O_DSYNC)
return 1;
return addr >= __pa(high_memory);
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 1bd1f18ac23..3571090ba17 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -567,13 +567,10 @@ static uint32_t extract_dc(unsigned short addr, int data)
datalo = ((unsigned long long)datalohi << 32) | datalolo;
ecc = dc_ecc(datalo);
if (ecc != datahi) {
- int bits = 0;
+ int bits;
bad_ecc |= 1 << (3-offset);
ecc ^= datahi;
- while (ecc) {
- if (ecc & 1) bits++;
- ecc >>= 1;
- }
+ bits = hweight8(ecc);
res |= (bits == 1) ? CP0_CERRD_DATA_SBE : CP0_CERRD_DATA_DBE;
}
printk(" %02X-%016llX", datahi, datalo);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 8d1f4f36304..9e8d00389ee 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -462,7 +462,9 @@ void __init_refok free_initmem(void)
__pa_symbol(&__init_end));
}
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
unsigned long pgd_current[NR_CPUS];
+#endif
/*
* On 64-bit we've got three-level pagetables with a slightly
* different layout ...
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index bb1719a55d2..3d0baa4a842 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -160,6 +160,12 @@ static u32 tlb_handler[128] __cpuinitdata;
static struct uasm_label labels[128] __cpuinitdata;
static struct uasm_reloc relocs[128] __cpuinitdata;
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+/*
+ * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
+ * we cannot do r3000 under these circumstances.
+ */
+
/*
* The R3000 TLB handler is simple.
*/
@@ -199,6 +205,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
dump_handler((u32 *)ebase, 32);
}
+#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
/*
* The R4000 TLB handler is much more complicated. We have two
@@ -497,8 +504,9 @@ static void __cpuinit
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr)
{
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
long pgdc = (long)pgd_current;
-
+#endif
/*
* The vmalloc handling is not in the hotpath.
*/
@@ -506,7 +514,15 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_il_bltz(p, r, tmp, label_vmalloc);
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
+ /*
+ * &pgd << 11 stored in CONTEXT [23..63].
+ */
+ UASM_i_MFC0(p, ptr, C0_CONTEXT);
+ uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */
+ uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */
+ uasm_i_drotr(p, ptr, ptr, 11);
+#elif defined(CONFIG_SMP)
# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
@@ -520,7 +536,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
*/
uasm_i_dmfc0(p, ptr, C0_CONTEXT);
uasm_i_dsrl(p, ptr, ptr, 23);
-#endif
+# endif
UASM_i_LA_mostly(p, tmp, pgdc);
uasm_i_daddu(p, ptr, ptr, tmp);
uasm_i_dmfc0(p, tmp, C0_BADVADDR);
@@ -1033,6 +1049,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
iPTE_LW(p, pte, ptr);
}
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
/*
* R3000 style TLB load/store/modify handlers.
*/
@@ -1184,6 +1201,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
}
+#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
/*
* R4000 style TLB load/store/modify handlers.
@@ -1400,6 +1418,7 @@ void __cpuinit build_tlb_refill_handler(void)
case CPU_TX3912:
case CPU_TX3922:
case CPU_TX3927:
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
build_r3000_tlb_refill_handler();
if (!run_once) {
build_r3000_tlb_load_handler();
@@ -1407,6 +1426,9 @@ void __cpuinit build_tlb_refill_handler(void)
build_r3000_tlb_modify_handler();
run_once++;
}
+#else
+ panic("No R3000 TLB refill handler");
+#endif
break;
case CPU_R6000:
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index f467199676a..0a165c5179a 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -60,11 +60,11 @@ enum opcode {
insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
- insn_dsrl32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr,
- insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
+ insn_dsrl32, insn_drotr, insn_dsubu, insn_eret, insn_j, insn_jal,
+ insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
- insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori
+ insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, insn_dins
};
struct insn {
@@ -104,6 +104,7 @@ static struct insn insn_table[] __cpuinitdata = {
{ insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
{ insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
{ insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
{ insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
{ insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
@@ -132,6 +133,7 @@ static struct insn insn_table[] __cpuinitdata = {
{ insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
{ insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
{ insn_invalid, 0, 0 }
};
@@ -304,6 +306,12 @@ Ip_u2u1s3(op) \
build_insn(buf, insn##op, b, a, c); \
}
+#define I_u2u1msbu3(op) \
+Ip_u2u1msbu3(op) \
+{ \
+ build_insn(buf, insn##op, b, a, c+d-1, c); \
+}
+
#define I_u1u2(op) \
Ip_u1u2(op) \
{ \
@@ -349,6 +357,7 @@ I_u2u1u3(_dsll32)
I_u2u1u3(_dsra)
I_u2u1u3(_dsrl)
I_u2u1u3(_dsrl32)
+I_u2u1u3(_drotr)
I_u3u1u2(_dsubu)
I_0(_eret)
I_u1(_j)
@@ -377,6 +386,7 @@ I_0(_tlbwi)
I_0(_tlbwr)
I_u3u1u2(_xor)
I_u2u1u3(_xori)
+I_u2u1msbu3(_dins);
/* Handle labels. */
void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
diff --git a/arch/mips/mm/uasm.h b/arch/mips/mm/uasm.h
index c6d1e3dd82d..3d153edaa51 100644
--- a/arch/mips/mm/uasm.h
+++ b/arch/mips/mm/uasm.h
@@ -34,6 +34,11 @@ uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+#define Ip_u2u1msbu3(op) \
+void __cpuinit \
+uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
+ unsigned int d)
+
#define Ip_u1u2(op) \
void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
@@ -65,6 +70,7 @@ Ip_u2u1u3(_dsll32);
Ip_u2u1u3(_dsra);
Ip_u2u1u3(_dsrl);
Ip_u2u1u3(_dsrl32);
+Ip_u2u1u3(_drotr);
Ip_u3u1u2(_dsubu);
Ip_0(_eret);
Ip_u1(_j);
@@ -93,6 +99,7 @@ Ip_0(_tlbwi);
Ip_0(_tlbwr);
Ip_u3u1u2(_xor);
Ip_u2u1u3(_xori);
+Ip_u2u1msbu3(_dins);
/* Handle labels. */
struct uasm_label {
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 9035c64bc5e..b27419c8491 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -55,7 +55,7 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
char *memsize_str;
unsigned int memsize;
char *ptr;
- static char cmdline[CL_SIZE] __initdata;
+ static char cmdline[COMMAND_LINE_SIZE] __initdata;
/* otherwise look in the environment */
memsize_str = prom_getenv("memsize");
diff --git a/arch/mips/nxp/pnx833x/common/interrupts.c b/arch/mips/nxp/pnx833x/common/interrupts.c
index 30533ba200e..3a467c04f81 100644
--- a/arch/mips/nxp/pnx833x/common/interrupts.c
+++ b/arch/mips/nxp/pnx833x/common/interrupts.c
@@ -295,7 +295,7 @@ static int pnx833x_set_type_gpio_irq(unsigned int irq, unsigned int flow_type)
}
static struct irq_chip pnx833x_pic_irq_type = {
- .typename = "PNX-PIC",
+ .name = "PNX-PIC",
.startup = pnx833x_startup_pic_irq,
.shutdown = pnx833x_shutdown_pic_irq,
.enable = pnx833x_enable_pic_irq,
@@ -305,7 +305,7 @@ static struct irq_chip pnx833x_pic_irq_type = {
};
static struct irq_chip pnx833x_gpio_irq_type = {
- .typename = "PNX-GPIO",
+ .name = "PNX-GPIO",
.startup = pnx833x_startup_gpio_irq,
.shutdown = pnx833x_disable_gpio_irq,
.enable = pnx833x_enable_gpio_irq,
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
index 575cd147347..475ff46712a 100644
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -1,7 +1,7 @@
/*
* Loongson2 performance counter driver for oprofile
*
- * Copyright (C) 2009 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2009 Lemote Inc.
* Author: Yanhua <yanh@lemote.com>
* Author: Wu Zhangjin <wuzj@lemote.com>
*
@@ -125,6 +125,9 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
*/
/* Check whether the irq belongs to me */
+ enabled = read_c0_perfcnt() & LOONGSON2_PERFCNT_INT_EN;
+ if (!enabled)
+ return IRQ_NONE;
enabled = reg.cnt1_enabled | reg.cnt2_enabled;
if (!enabled)
return IRQ_NONE;
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 91bfe73a7f6..c9209ca6c8e 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -22,13 +22,13 @@ obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
#
# These are still pretty much in the old state, watch, go blind.
#
-obj-$(CONFIG_BASLER_EXCITE) += ops-titan.o pci-excite.o fixup-excite.o
obj-$(CONFIG_LASAT) += pci-lasat.o
obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o
obj-$(CONFIG_SOC_AU1500) += fixup-au1000.o ops-au1000.o
obj-$(CONFIG_SOC_AU1550) += fixup-au1000.o ops-au1000.o
obj-$(CONFIG_SOC_PNX8550) += fixup-pnx8550.o ops-pnx8550.o
-obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-bonito64.o
+obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
+obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o
obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o
obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o
obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o
diff --git a/arch/mips/pci/fixup-excite.c b/arch/mips/pci/fixup-excite.c
deleted file mode 100644
index cd64d9f177c..00000000000
--- a/arch/mips/pci/fixup-excite.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2004 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <excite.h>
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- if (pin == 0)
- return -1;
-
- return USB_IRQ; /* USB controller is the only PCI device */
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c
index 0c4c7a81213..4f6d8da07f9 100644
--- a/arch/mips/pci/fixup-fuloong2e.c
+++ b/arch/mips/pci/fixup-fuloong2e.c
@@ -13,7 +13,8 @@
*/
#include <linux/init.h>
#include <linux/pci.h>
-#include <asm/mips-boards/bonito64.h>
+
+#include <loongson.h>
/* South bridge slot number is set by the pci probe process */
static u8 sb_slot = 5;
@@ -35,7 +36,7 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
break;
}
} else {
- irq = BONITO_IRQ_BASE + 25 + pin;
+ irq = LOONGSON_IRQ_BASE + 25 + pin;
}
return irq;
diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c
new file mode 100644
index 00000000000..caf2edeb02f
--- /dev/null
+++ b/arch/mips/pci/fixup-lemote2f.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2008 Lemote Technology
+ * Copyright (C) 2004 ICT CAS
+ * Author: Li xiaoyu, lixy@ict.ac.cn
+ *
+ * Copyright (C) 2007 Lemote, Inc.
+ * Author: Fuxin Zhang, zhangfx@lemote.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <loongson.h>
+#include <cs5536/cs5536.h>
+#include <cs5536/cs5536_pci.h>
+
+/* PCI interrupt pins
+ *
+ * These should not be changed, or you should consider loongson2f interrupt
+ * register and your pci card dispatch
+ */
+
+#define PCIA 4
+#define PCIB 5
+#define PCIC 6
+#define PCID 7
+
+/* all the pci device has the PCIA pin, check the datasheet. */
+static char irq_tab[][5] __initdata = {
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0}, /* 11: Unused */
+ {0, 0, 0, 0, 0}, /* 12: Unused */
+ {0, 0, 0, 0, 0}, /* 13: Unused */
+ {0, 0, 0, 0, 0}, /* 14: Unused */
+ {0, 0, 0, 0, 0}, /* 15: Unused */
+ {0, 0, 0, 0, 0}, /* 16: Unused */
+ {0, PCIA, 0, 0, 0}, /* 17: RTL8110-0 */
+ {0, PCIB, 0, 0, 0}, /* 18: RTL8110-1 */
+ {0, PCIC, 0, 0, 0}, /* 19: SiI3114 */
+ {0, PCID, 0, 0, 0}, /* 20: 3-ports nec usb */
+ {0, PCIA, PCIB, PCIC, PCID}, /* 21: PCI-SLOT */
+ {0, 0, 0, 0, 0}, /* 22: Unused */
+ {0, 0, 0, 0, 0}, /* 23: Unused */
+ {0, 0, 0, 0, 0}, /* 24: Unused */
+ {0, 0, 0, 0, 0}, /* 25: Unused */
+ {0, 0, 0, 0, 0}, /* 26: Unused */
+ {0, 0, 0, 0, 0}, /* 27: Unused */
+};
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int virq;
+
+ if ((PCI_SLOT(dev->devfn) != PCI_IDSEL_CS5536)
+ && (PCI_SLOT(dev->devfn) < 32)) {
+ virq = irq_tab[slot][pin];
+ printk(KERN_INFO "slot: %d, pin: %d, irq: %d\n", slot, pin,
+ virq + LOONGSON_IRQ_BASE);
+ if (virq != 0)
+ return LOONGSON_IRQ_BASE + virq;
+ else
+ return 0;
+ } else if (PCI_SLOT(dev->devfn) == PCI_IDSEL_CS5536) { /* cs5536 */
+ switch (PCI_FUNC(dev->devfn)) {
+ case 2:
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ CS5536_IDE_INTR);
+ return CS5536_IDE_INTR; /* for IDE */
+ case 3:
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ CS5536_ACC_INTR);
+ return CS5536_ACC_INTR; /* for AUDIO */
+ case 4: /* for OHCI */
+ case 5: /* for EHCI */
+ case 6: /* for UDC */
+ case 7: /* for OTG */
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
+ CS5536_USB_INTR);
+ return CS5536_USB_INTR;
+ }
+ return dev->irq;
+ } else {
+ printk(KERN_INFO " strange pci slot number.\n");
+ return 0;
+ }
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/* CS5536 SPEC. fixup */
+static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev)
+{
+ /* the uart1 and uart2 interrupt in PIC is enabled as default */
+ pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
+ pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
+}
+
+static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev)
+{
+ /* setting the mutex pin as IDE function */
+ pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
+ CS5536_IDE_FLASH_SIGNATURE);
+}
+
+static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
+{
+ /* enable the AUDIO interrupt in PIC */
+ pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
+}
+
+static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
+{
+ /* enable the OHCI interrupt in PIC */
+ /* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
+ pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
+}
+
+static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
+{
+ u32 hi, lo;
+
+ /* Serial short detect enable */
+ _rdmsr(USB_MSR_REG(USB_CONFIG), &hi, &lo);
+ _wrmsr(USB_MSR_REG(USB_CONFIG), (1 << 1) | (1 << 2) | (1 << 3), lo);
+
+ /* setting the USB2.0 micro frame length */
+ pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
+}
+
+static void __init loongson_nec_fixup(struct pci_dev *pdev)
+{
+ unsigned int val;
+
+ pci_read_config_dword(pdev, 0xe0, &val);
+ /* Only 2 port be used */
+ pci_write_config_dword(pdev, 0xe0, (val & ~3) | 0x2);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA,
+ loongson_cs5536_isa_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_OHC,
+ loongson_cs5536_ohci_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_EHC,
+ loongson_cs5536_ehci_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_AUDIO,
+ loongson_cs5536_acc_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE,
+ loongson_cs5536_ide_fixup);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
+ loongson_nec_fixup);
diff --git a/arch/mips/pci/ops-bonito64.c b/arch/mips/pci/ops-bonito64.c
index 54e55e7a243..1b3e03f20c5 100644
--- a/arch/mips/pci/ops-bonito64.c
+++ b/arch/mips/pci/ops-bonito64.c
@@ -29,13 +29,8 @@
#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
-#ifdef CONFIG_LEMOTE_FULOONG2E
-#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(BONITO_PCICFG_BASE | (offset))
-#define ID_SEL_BEGIN 11
-#else
#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(_pcictrl_bonito_pcicfg + (offset))
#define ID_SEL_BEGIN 10
-#endif
#define MAX_DEV_NUM (31 - ID_SEL_BEGIN)
@@ -77,10 +72,8 @@ static int bonito64_pcibios_config_access(unsigned char access_type,
addrp = CFG_SPACE_REG(addr & 0xffff);
if (access_type == PCI_ACCESS_WRITE) {
writel(cpu_to_le32(*data), addrp);
-#ifndef CONFIG_LEMOTE_FULOONG2E
/* Wait till done */
while (BONITO_PCIMSTAT & 0xF);
-#endif
} else {
*data = le32_to_cpu(readl(addrp));
}
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
new file mode 100644
index 00000000000..aa5d3da2721
--- /dev/null
+++ b/arch/mips/pci/ops-loongson2.c
@@ -0,0 +1,208 @@
+/*
+ * fuloong2e specific PCI support.
+ *
+ * Copyright (C) 1999, 2000, 2004 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ *
+ * Copyright (C) 2009 Lemote Inc.
+ * Author: Wu Zhangjin <wuzj@lemote.com>
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <loongson.h>
+
+#ifdef CONFIG_CS5536
+#include <cs5536/cs5536_pci.h>
+#include <cs5536/cs5536.h>
+#endif
+
+#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_WRITE 1
+
+#define CFG_SPACE_REG(offset) \
+ (void *)CKSEG1ADDR(LOONGSON_PCICFG_BASE | (offset))
+#define ID_SEL_BEGIN 11
+#define MAX_DEV_NUM (31 - ID_SEL_BEGIN)
+
+
+static int loongson_pcibios_config_access(unsigned char access_type,
+ struct pci_bus *bus,
+ unsigned int devfn, int where,
+ u32 *data)
+{
+ u32 busnum = bus->number;
+ u32 addr, type;
+ u32 dummy;
+ void *addrp;
+ int device = PCI_SLOT(devfn);
+ int function = PCI_FUNC(devfn);
+ int reg = where & ~3;
+
+ if (busnum == 0) {
+ /* board-specific part,currently,only fuloong2f,yeeloong2f
+ * use CS5536, fuloong2e use via686b, gdium has no
+ * south bridge
+ */
+#ifdef CONFIG_CS5536
+ /* cs5536_pci_conf_read4/write4() will call _rdmsr/_wrmsr() to
+ * access the regsters PCI_MSR_ADDR, PCI_MSR_DATA_LO,
+ * PCI_MSR_DATA_HI, which is bigger than PCI_MSR_CTRL, so, it
+ * will not go this branch, but the others. so, no calling dead
+ * loop here.
+ */
+ if ((PCI_IDSEL_CS5536 == device) && (reg < PCI_MSR_CTRL)) {
+ switch (access_type) {
+ case PCI_ACCESS_READ:
+ *data = cs5536_pci_conf_read4(function, reg);
+ break;
+ case PCI_ACCESS_WRITE:
+ cs5536_pci_conf_write4(function, reg, *data);
+ break;
+ }
+ return 0;
+ }
+#endif
+ /* Type 0 configuration for onboard PCI bus */
+ if (device > MAX_DEV_NUM)
+ return -1;
+
+ addr = (1 << (device + ID_SEL_BEGIN)) | (function << 8) | reg;
+ type = 0;
+ } else {
+ /* Type 1 configuration for offboard PCI bus */
+ addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
+ type = 0x10000;
+ }
+
+ /* Clear aborts */
+ LOONGSON_PCICMD |= LOONGSON_PCICMD_MABORT_CLR | \
+ LOONGSON_PCICMD_MTABORT_CLR;
+
+ LOONGSON_PCIMAP_CFG = (addr >> 16) | type;
+
+ /* Flush Bonito register block */
+ dummy = LOONGSON_PCIMAP_CFG;
+ mmiowb();
+
+ addrp = CFG_SPACE_REG(addr & 0xffff);
+ if (access_type == PCI_ACCESS_WRITE)
+ writel(cpu_to_le32(*data), addrp);
+ else
+ *data = le32_to_cpu(readl(addrp));
+
+ /* Detect Master/Target abort */
+ if (LOONGSON_PCICMD & (LOONGSON_PCICMD_MABORT_CLR |
+ LOONGSON_PCICMD_MTABORT_CLR)) {
+ /* Error occurred */
+
+ /* Clear bits */
+ LOONGSON_PCICMD |= (LOONGSON_PCICMD_MABORT_CLR |
+ LOONGSON_PCICMD_MTABORT_CLR);
+
+ return -1;
+ }
+
+ return 0;
+
+}
+
+
+/*
+ * We can't address 8 and 16 bit words directly. Instead we have to
+ * read/write a 32bit word and mask/modify the data we actually want.
+ */
+static int loongson_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
+ &data))
+ return -1;
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int loongson_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data = 0;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (size == 4)
+ data = val;
+ else {
+ if (loongson_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
+ where, &data))
+ return -1;
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ }
+
+ if (loongson_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn, where,
+ &data))
+ return -1;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops loongson_pci_ops = {
+ .read = loongson_pcibios_read,
+ .write = loongson_pcibios_write
+};
+
+#ifdef CONFIG_CS5536
+void _rdmsr(u32 msr, u32 *hi, u32 *lo)
+{
+ struct pci_bus bus = {
+ .number = PCI_BUS_CS5536
+ };
+ u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
+ loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
+ loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
+ loongson_pcibios_read(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
+}
+EXPORT_SYMBOL(_rdmsr);
+
+void _wrmsr(u32 msr, u32 hi, u32 lo)
+{
+ struct pci_bus bus = {
+ .number = PCI_BUS_CS5536
+ };
+ u32 devfn = PCI_DEVFN(PCI_IDSEL_CS5536, 0);
+ loongson_pcibios_write(&bus, devfn, PCI_MSR_ADDR, 4, msr);
+ loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_LO, 4, lo);
+ loongson_pcibios_write(&bus, devfn, PCI_MSR_DATA_HI, 4, hi);
+}
+EXPORT_SYMBOL(_wrmsr);
+#endif
diff --git a/arch/mips/pci/pci-excite.c b/arch/mips/pci/pci-excite.c
deleted file mode 100644
index 8a56876afcc..00000000000
--- a/arch/mips/pci/pci-excite.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) 2004 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- * Based on the PMC-Sierra Yosemite board support by Ralf Baechle.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/bitops.h>
-#include <asm/rm9k-ocd.h>
-#include <excite.h>
-
-
-extern struct pci_ops titan_pci_ops;
-
-
-static struct resource
- mem_resource = {
- .name = "PCI memory",
- .start = EXCITE_PHYS_PCI_MEM,
- .end = EXCITE_PHYS_PCI_MEM + EXCITE_SIZE_PCI_MEM - 1,
- .flags = IORESOURCE_MEM
- },
- io_resource = {
- .name = "PCI I/O",
- .start = EXCITE_PHYS_PCI_IO,
- .end = EXCITE_PHYS_PCI_IO + EXCITE_SIZE_PCI_IO - 1,
- .flags = IORESOURCE_IO
- };
-
-
-static struct pci_controller bx_controller = {
- .pci_ops = &titan_pci_ops,
- .mem_resource = &mem_resource,
- .mem_offset = 0x00000000UL,
- .io_resource = &io_resource,
- .io_offset = 0x00000000UL
-};
-
-
-static char
- iopage_failed[] __initdata = "Cannot allocate PCI I/O page",
- modebits_no_pci[] __initdata = "PCI is not configured in mode bits";
-
-#define RM9000x2_OCD_HTSC 0x0604
-#define RM9000x2_OCD_HTBHL 0x060c
-#define RM9000x2_OCD_PCIHRST 0x078c
-
-#define RM9K_OCD_MODEBIT1 0x00d4 /* (MODEBIT1) Mode Bit 1 */
-#define RM9K_OCD_CPHDCR 0x00f4 /* CPU-PCI/HT Data Control. */
-
-#define PCISC_FB2B 0x00000200
-#define PCISC_MWICG 0x00000010
-#define PCISC_EMC 0x00000004
-#define PCISC_ERMA 0x00000002
-
-
-
-static int __init basler_excite_pci_setup(void)
-{
- const unsigned int fullbars = memsize / (256 << 20);
- unsigned int i;
-
- /* Check modebits to see if PCI is really enabled. */
- if (!((ocd_readl(RM9K_OCD_MODEBIT1) >> (47-32)) & 0x1))
- panic(modebits_no_pci);
-
- if (NULL == request_mem_region(EXCITE_PHYS_PCI_IO, EXCITE_SIZE_PCI_IO,
- "Memory-mapped PCI I/O page"))
- panic(iopage_failed);
-
- /* Enable PCI 0 as master for config cycles */
- ocd_writel(PCISC_EMC | PCISC_ERMA, RM9000x2_OCD_HTSC);
-
-
- /* Set up latency timer */
- ocd_writel(0x8008, RM9000x2_OCD_HTBHL);
-
- /* Setup host IO and Memory space */
- ocd_writel((EXCITE_PHYS_PCI_IO >> 4) | 1, LKB7);
- ocd_writel(((EXCITE_SIZE_PCI_IO >> 4) & 0x7fffff00) - 0x100, LKM7);
- ocd_writel((EXCITE_PHYS_PCI_MEM >> 4) | 1, LKB8);
- ocd_writel(((EXCITE_SIZE_PCI_MEM >> 4) & 0x7fffff00) - 0x100, LKM8);
-
- /* Set up PCI BARs to map all installed memory */
- for (i = 0; i < 6; i++) {
- const unsigned int bar = 0x610 + i * 4;
-
- if (i < fullbars) {
- ocd_writel(0x10000000 * i, bar);
- ocd_writel(0x01000000 * i, bar + 0x140);
- ocd_writel(0x0ffff029, bar + 0x100);
- continue;
- }
-
- if (i == fullbars) {
- int o;
- u32 mask;
-
- const unsigned long rem = memsize - i * 0x10000000;
- if (!rem) {
- ocd_writel(0x00000000, bar + 0x100);
- continue;
- }
-
- o = ffs(rem) - 1;
- if (rem & ~(0x1 << o))
- o++;
- mask = ((0x1 << o) & 0x0ffff000) - 0x1000;
- ocd_writel(0x10000000 * i, bar);
- ocd_writel(0x01000000 * i, bar + 0x140);
- ocd_writel(0x00000029 | mask, bar + 0x100);
- continue;
- }
-
- ocd_writel(0x00000000, bar + 0x100);
- }
-
- /* Finally, enable the PCI interrupt */
-#if USB_IRQ > 7
- set_c0_intcontrol(1 << USB_IRQ);
-#else
- set_c0_status(1 << (USB_IRQ + 8));
-#endif
-
- ioport_resource.start = EXCITE_PHYS_PCI_IO;
- ioport_resource.end = EXCITE_PHYS_PCI_IO + EXCITE_SIZE_PCI_IO - 1;
- set_io_port_base((unsigned long) ioremap_nocache(EXCITE_PHYS_PCI_IO, EXCITE_SIZE_PCI_IO));
- register_pci_controller(&bx_controller);
- return 0;
-}
-
-
-arch_initcall(basler_excite_pci_setup);
diff --git a/arch/mips/powertv/Kconfig b/arch/mips/powertv/Kconfig
new file mode 100644
index 00000000000..ff0e7e3e695
--- /dev/null
+++ b/arch/mips/powertv/Kconfig
@@ -0,0 +1,21 @@
+source "arch/mips/powertv/asic/Kconfig"
+
+config BOOTLOADER_DRIVER
+ bool "PowerTV Bootloader Driver Support"
+ default n
+ depends on POWERTV
+ help
+ Use this option if you want to load bootloader driver.
+
+config BOOTLOADER_FAMILY
+ string "POWERTV Bootloader Family string"
+ default "85"
+ depends on POWERTV && !BOOTLOADER_DRIVER
+ help
+ This value should be specified when the bootloader driver is disabled
+ and must be exactly two characters long. Families supported are:
+ R1 - RNG-100 R2 - RNG-200
+ A1 - Class A B1 - Class B
+ E1 - Class E F1 - Class F
+ 44 - 45xx 46 - 46xx
+ 85 - 85xx 86 - 86xx
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
new file mode 100644
index 00000000000..2c516718aff
--- /dev/null
+++ b/arch/mips/powertv/Makefile
@@ -0,0 +1,28 @@
+#
+# Carsten Langgaard, carstenl@mips.com
+# Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+#
+# Carsten Langgaard, carstenl@mips.com
+# Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+# Portions copyright (C) 2009 Cisco Systems, Inc.
+#
+# This program is free software; you can distribute it and/or modify it
+# under the terms of the GNU General Public License (Version 2) as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+# Makefile for the Cisco PowerTV-specific kernel interface routines
+# under Linux.
+#
+
+obj-y += cmdline.o init.o memory.o reset.o time.o powertv_setup.o asic/ pci/
+
+EXTRA_CFLAGS += -Wall -Werror
diff --git a/arch/mips/powertv/asic/Kconfig b/arch/mips/powertv/asic/Kconfig
new file mode 100644
index 00000000000..2016bfe94d6
--- /dev/null
+++ b/arch/mips/powertv/asic/Kconfig
@@ -0,0 +1,28 @@
+config MIN_RUNTIME_RESOURCES
+ bool "Support for minimum runtime resources"
+ default n
+ depends on POWERTV
+ help
+ Enables support for minimizing the number of (SA asic) runtime
+ resources that are preallocated by the kernel.
+
+config MIN_RUNTIME_DOCSIS
+ bool "Support for minimum DOCSIS resource"
+ default y
+ depends on MIN_RUNTIME_RESOURCES
+ help
+ Enables support for the preallocated DOCSIS resource.
+
+config MIN_RUNTIME_PMEM
+ bool "Support for minimum PMEM resource"
+ default y
+ depends on MIN_RUNTIME_RESOURCES
+ help
+ Enables support for the preallocated Memory resource.
+
+config MIN_RUNTIME_TFTP
+ bool "Support for minimum TFTP resource"
+ default y
+ depends on MIN_RUNTIME_RESOURCES
+ help
+ Enables support for the preallocated TFTP resource.
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
new file mode 100644
index 00000000000..bebfdcff044
--- /dev/null
+++ b/arch/mips/powertv/asic/Makefile
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2009 Scientific-Atlanta, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+obj-y += asic-calliope.o asic-cronus.o asic-zeus.o asic_devices.o asic_int.o \
+ irq_asic.o prealloc-calliope.o prealloc-cronus.o \
+ prealloc-cronuslite.o prealloc-zeus.o
+
+EXTRA_CFLAGS += -Wall -Werror
diff --git a/arch/mips/powertv/asic/asic-calliope.c b/arch/mips/powertv/asic/asic-calliope.c
new file mode 100644
index 00000000000..03d3884c627
--- /dev/null
+++ b/arch/mips/powertv/asic/asic-calliope.c
@@ -0,0 +1,98 @@
+/*
+ * Locations of devices in the Calliope ASIC.
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
+ *
+ * Description: Defines the platform resources for the SA settop.
+ */
+
+#include <asm/mach-powertv/asic.h>
+
+const struct register_map calliope_register_map = {
+ .eic_slow0_strt_add = 0x800000,
+ .eic_cfg_bits = 0x800038,
+ .eic_ready_status = 0x80004c,
+
+ .chipver3 = 0xA00800,
+ .chipver2 = 0xA00804,
+ .chipver1 = 0xA00808,
+ .chipver0 = 0xA0080c,
+
+ /* The registers of IRBlaster */
+ .uart1_intstat = 0xA01800,
+ .uart1_inten = 0xA01804,
+ .uart1_config1 = 0xA01808,
+ .uart1_config2 = 0xA0180C,
+ .uart1_divisorhi = 0xA01810,
+ .uart1_divisorlo = 0xA01814,
+ .uart1_data = 0xA01818,
+ .uart1_status = 0xA0181C,
+
+ .int_stat_3 = 0xA02800,
+ .int_stat_2 = 0xA02804,
+ .int_stat_1 = 0xA02808,
+ .int_stat_0 = 0xA0280c,
+ .int_config = 0xA02810,
+ .int_int_scan = 0xA02818,
+ .ien_int_3 = 0xA02830,
+ .ien_int_2 = 0xA02834,
+ .ien_int_1 = 0xA02838,
+ .ien_int_0 = 0xA0283c,
+ .int_level_3_3 = 0xA02880,
+ .int_level_3_2 = 0xA02884,
+ .int_level_3_1 = 0xA02888,
+ .int_level_3_0 = 0xA0288c,
+ .int_level_2_3 = 0xA02890,
+ .int_level_2_2 = 0xA02894,
+ .int_level_2_1 = 0xA02898,
+ .int_level_2_0 = 0xA0289c,
+ .int_level_1_3 = 0xA028a0,
+ .int_level_1_2 = 0xA028a4,
+ .int_level_1_1 = 0xA028a8,
+ .int_level_1_0 = 0xA028ac,
+ .int_level_0_3 = 0xA028b0,
+ .int_level_0_2 = 0xA028b4,
+ .int_level_0_1 = 0xA028b8,
+ .int_level_0_0 = 0xA028bc,
+ .int_docsis_en = 0xA028F4,
+
+ .mips_pll_setup = 0x980000,
+ .usb_fs = 0x980030, /* -default 72800028- */
+ .test_bus = 0x9800CC,
+ .crt_spare = 0x9800d4,
+ .usb2_ohci_int_mask = 0x9A000c,
+ .usb2_strap = 0x9A0014,
+ .ehci_hcapbase = 0x9BFE00,
+ .ohci_hc_revision = 0x9BFC00,
+ .bcm1_bs_lmi_steer = 0x9E0004,
+ .usb2_control = 0x9E0054,
+ .usb2_stbus_obc = 0x9BFF00,
+ .usb2_stbus_mess_size = 0x9BFF04,
+ .usb2_stbus_chunk_size = 0x9BFF08,
+
+ .pcie_regs = 0x000000, /* -doesn't exist- */
+ .tim_ch = 0xA02C10,
+ .tim_cl = 0xA02C14,
+ .gpio_dout = 0xA02c20,
+ .gpio_din = 0xA02c24,
+ .gpio_dir = 0xA02c2C,
+ .watchdog = 0xA02c30,
+ .front_panel = 0x000000, /* -not used- */
+};
diff --git a/arch/mips/powertv/asic/asic-cronus.c b/arch/mips/powertv/asic/asic-cronus.c
new file mode 100644
index 00000000000..5f4589c9f83
--- /dev/null
+++ b/arch/mips/powertv/asic/asic-cronus.c
@@ -0,0 +1,98 @@
+/*
+ * Locations of devices in the Cronus ASIC
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
+ *
+ * Description: Defines the platform resources for the SA settop.
+ */
+
+#include <asm/mach-powertv/asic.h>
+
+const struct register_map cronus_register_map = {
+ .eic_slow0_strt_add = 0x000000,
+ .eic_cfg_bits = 0x000038,
+ .eic_ready_status = 0x00004C,
+
+ .chipver3 = 0x2A0800,
+ .chipver2 = 0x2A0804,
+ .chipver1 = 0x2A0808,
+ .chipver0 = 0x2A080C,
+
+ /* The registers of IRBlaster */
+ .uart1_intstat = 0x2A1800,
+ .uart1_inten = 0x2A1804,
+ .uart1_config1 = 0x2A1808,
+ .uart1_config2 = 0x2A180C,
+ .uart1_divisorhi = 0x2A1810,
+ .uart1_divisorlo = 0x2A1814,
+ .uart1_data = 0x2A1818,
+ .uart1_status = 0x2A181C,
+
+ .int_stat_3 = 0x2A2800,
+ .int_stat_2 = 0x2A2804,
+ .int_stat_1 = 0x2A2808,
+ .int_stat_0 = 0x2A280C,
+ .int_config = 0x2A2810,
+ .int_int_scan = 0x2A2818,
+ .ien_int_3 = 0x2A2830,
+ .ien_int_2 = 0x2A2834,
+ .ien_int_1 = 0x2A2838,
+ .ien_int_0 = 0x2A283C,
+ .int_level_3_3 = 0x2A2880,
+ .int_level_3_2 = 0x2A2884,
+ .int_level_3_1 = 0x2A2888,
+ .int_level_3_0 = 0x2A288C,
+ .int_level_2_3 = 0x2A2890,
+ .int_level_2_2 = 0x2A2894,
+ .int_level_2_1 = 0x2A2898,
+ .int_level_2_0 = 0x2A289C,
+ .int_level_1_3 = 0x2A28A0,
+ .int_level_1_2 = 0x2A28A4,
+ .int_level_1_1 = 0x2A28A8,
+ .int_level_1_0 = 0x2A28AC,
+ .int_level_0_3 = 0x2A28B0,
+ .int_level_0_2 = 0x2A28B4,
+ .int_level_0_1 = 0x2A28B8,
+ .int_level_0_0 = 0x2A28BC,
+ .int_docsis_en = 0x2A28F4,
+
+ .mips_pll_setup = 0x1C0000,
+ .usb_fs = 0x1C0018,
+ .test_bus = 0x1C00CC,
+ .crt_spare = 0x1c00d4,
+ .usb2_ohci_int_mask = 0x20000C,
+ .usb2_strap = 0x200014,
+ .ehci_hcapbase = 0x21FE00,
+ .ohci_hc_revision = 0x1E0000,
+ .bcm1_bs_lmi_steer = 0x2E0008,
+ .usb2_control = 0x2E004C,
+ .usb2_stbus_obc = 0x21FF00,
+ .usb2_stbus_mess_size = 0x21FF04,
+ .usb2_stbus_chunk_size = 0x21FF08,
+
+ .pcie_regs = 0x220000,
+ .tim_ch = 0x2A2C10,
+ .tim_cl = 0x2A2C14,
+ .gpio_dout = 0x2A2C20,
+ .gpio_din = 0x2A2C24,
+ .gpio_dir = 0x2A2C2C,
+ .watchdog = 0x2A2C30,
+ .front_panel = 0x2A3800,
+};
diff --git a/arch/mips/powertv/asic/asic-zeus.c b/arch/mips/powertv/asic/asic-zeus.c
new file mode 100644
index 00000000000..1469daab920
--- /dev/null
+++ b/arch/mips/powertv/asic/asic-zeus.c
@@ -0,0 +1,98 @@
+/*
+ * Locations of devices in the Zeus ASIC
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
+ *
+ * Description: Defines the platform resources for the SA settop.
+ */
+
+#include <asm/mach-powertv/asic.h>
+
+const struct register_map zeus_register_map = {
+ .eic_slow0_strt_add = 0x000000,
+ .eic_cfg_bits = 0x000038,
+ .eic_ready_status = 0x00004c,
+
+ .chipver3 = 0x280800,
+ .chipver2 = 0x280804,
+ .chipver1 = 0x280808,
+ .chipver0 = 0x28080c,
+
+ /* The registers of IRBlaster */
+ .uart1_intstat = 0x281800,
+ .uart1_inten = 0x281804,
+ .uart1_config1 = 0x281808,
+ .uart1_config2 = 0x28180C,
+ .uart1_divisorhi = 0x281810,
+ .uart1_divisorlo = 0x281814,
+ .uart1_data = 0x281818,
+ .uart1_status = 0x28181C,
+
+ .int_stat_3 = 0x282800,
+ .int_stat_2 = 0x282804,
+ .int_stat_1 = 0x282808,
+ .int_stat_0 = 0x28280c,
+ .int_config = 0x282810,
+ .int_int_scan = 0x282818,
+ .ien_int_3 = 0x282830,
+ .ien_int_2 = 0x282834,
+ .ien_int_1 = 0x282838,
+ .ien_int_0 = 0x28283c,
+ .int_level_3_3 = 0x282880,
+ .int_level_3_2 = 0x282884,
+ .int_level_3_1 = 0x282888,
+ .int_level_3_0 = 0x28288c,
+ .int_level_2_3 = 0x282890,
+ .int_level_2_2 = 0x282894,
+ .int_level_2_1 = 0x282898,
+ .int_level_2_0 = 0x28289c,
+ .int_level_1_3 = 0x2828a0,
+ .int_level_1_2 = 0x2828a4,
+ .int_level_1_1 = 0x2828a8,
+ .int_level_1_0 = 0x2828ac,
+ .int_level_0_3 = 0x2828b0,
+ .int_level_0_2 = 0x2828b4,
+ .int_level_0_1 = 0x2828b8,
+ .int_level_0_0 = 0x2828bc,
+ .int_docsis_en = 0x2828F4,
+
+ .mips_pll_setup = 0x1a0000,
+ .usb_fs = 0x1a0018,
+ .test_bus = 0x1a0238,
+ .crt_spare = 0x1a0090,
+ .usb2_ohci_int_mask = 0x1e000c,
+ .usb2_strap = 0x1e0014,
+ .ehci_hcapbase = 0x1FFE00,
+ .ohci_hc_revision = 0x1FFC00,
+ .bcm1_bs_lmi_steer = 0x2C0008,
+ .usb2_control = 0x2c01a0,
+ .usb2_stbus_obc = 0x1FFF00,
+ .usb2_stbus_mess_size = 0x1FFF04,
+ .usb2_stbus_chunk_size = 0x1FFF08,
+
+ .pcie_regs = 0x200000,
+ .tim_ch = 0x282C10,
+ .tim_cl = 0x282C14,
+ .gpio_dout = 0x282c20,
+ .gpio_din = 0x282c24,
+ .gpio_dir = 0x282c2C,
+ .watchdog = 0x282c30,
+ .front_panel = 0x283800,
+};
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
new file mode 100644
index 00000000000..bae82880b6b
--- /dev/null
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -0,0 +1,787 @@
+/*
+ * ASIC Device List Intialization
+ *
+ * Description: Defines the platform resources for the SA settop.
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
+ *
+ * Description: Defines the platform resources for the SA settop.
+ *
+ * NOTE: The bootloader allocates persistent memory at an address which is
+ * 16 MiB below the end of the highest address in KSEG0. All fixed
+ * address memory reservations must avoid this region.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/resource.h>
+#include <linux/serial_reg.h>
+#include <linux/io.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <linux/swap.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/mach-powertv/asic.h>
+#include <asm/mach-powertv/asic_regs.h>
+#include <asm/mach-powertv/interrupts.h>
+
+#ifdef CONFIG_BOOTLOADER_DRIVER
+#include <asm/mach-powertv/kbldr.h>
+#endif
+#include <asm/bootinfo.h>
+
+#define BOOTLDRFAMILY(byte1, byte0) (((byte1) << 8) | (byte0))
+
+/*
+ * Forward Prototypes
+ */
+static void pmem_setup_resource(void);
+
+/*
+ * Global Variables
+ */
+enum asic_type asic;
+
+unsigned int platform_features;
+unsigned int platform_family;
+const struct register_map *register_map;
+EXPORT_SYMBOL(register_map); /* Exported for testing */
+unsigned long asic_phy_base;
+unsigned long asic_base;
+EXPORT_SYMBOL(asic_base); /* Exported for testing */
+struct resource *gp_resources;
+static bool usb_configured;
+
+/*
+ * Don't recommend to use it directly, it is usually used by kernel internally.
+ * Portable code should be using interfaces such as ioremp, dma_map_single, etc.
+ */
+unsigned long phys_to_bus_offset;
+EXPORT_SYMBOL(phys_to_bus_offset);
+
+/*
+ *
+ * IO Resource Definition
+ *
+ */
+
+struct resource asic_resource = {
+ .name = "ASIC Resource",
+ .start = 0,
+ .end = ASIC_IO_SIZE,
+ .flags = IORESOURCE_MEM,
+};
+
+/*
+ *
+ * USB Host Resource Definition
+ *
+ */
+
+static struct resource ehci_resources[] = {
+ {
+ .parent = &asic_resource,
+ .start = 0,
+ .end = 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = irq_usbehci,
+ .end = irq_usbehci,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 ehci_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ehci_device = {
+ .name = "powertv-ehci",
+ .id = 0,
+ .num_resources = 2,
+ .resource = ehci_resources,
+ .dev = {
+ .dma_mask = &ehci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct resource ohci_resources[] = {
+ {
+ .parent = &asic_resource,
+ .start = 0,
+ .end = 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = irq_usbohci,
+ .end = irq_usbohci,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 ohci_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device ohci_device = {
+ .name = "powertv-ohci",
+ .id = 0,
+ .num_resources = 2,
+ .resource = ohci_resources,
+ .dev = {
+ .dma_mask = &ohci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+};
+
+static struct platform_device *platform_devices[] = {
+ &ehci_device,
+ &ohci_device,
+};
+
+/*
+ *
+ * Platform Configuration and Device Initialization
+ *
+ */
+static void __init fs_update(int pe, int md, int sdiv, int disable_div_by_3)
+{
+ int en_prg, byp, pwr, nsb, val;
+ int sout;
+
+ sout = 1;
+ en_prg = 1;
+ byp = 0;
+ nsb = 1;
+ pwr = 1;
+
+ val = ((sdiv << 29) | (md << 24) | (pe<<8) | (sout<<3) | (byp<<2) |
+ (nsb<<1) | (disable_div_by_3<<5));
+
+ asic_write(val, usb_fs);
+ asic_write(val | (en_prg<<4), usb_fs);
+ asic_write(val | (en_prg<<4) | pwr, usb_fs);
+}
+
+/*
+ * Allow override of bootloader-specified model
+ */
+static char __initdata cmdline[COMMAND_LINE_SIZE];
+
+#define FORCEFAMILY_PARAM "forcefamily"
+
+static __init int check_forcefamily(unsigned char forced_family[2])
+{
+ const char *p;
+
+ forced_family[0] = '\0';
+ forced_family[1] = '\0';
+
+ /* Check the command line for a forcefamily directive */
+ strncpy(cmdline, arcs_cmdline, COMMAND_LINE_SIZE - 1);
+ p = strstr(cmdline, FORCEFAMILY_PARAM);
+ if (p && (p != cmdline) && (*(p - 1) != ' '))
+ p = strstr(p, " " FORCEFAMILY_PARAM "=");
+
+ if (p) {
+ p += strlen(FORCEFAMILY_PARAM "=");
+
+ if (*p == '\0' || *(p + 1) == '\0' ||
+ (*(p + 2) != '\0' && *(p + 2) != ' '))
+ pr_err(FORCEFAMILY_PARAM " must be exactly two "
+ "characters long, ignoring value\n");
+
+ else {
+ forced_family[0] = *p;
+ forced_family[1] = *(p + 1);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * platform_set_family - determine major platform family type.
+ *
+ * Returns family type; -1 if none
+ * Returns the family type; -1 if none
+ *
+ */
+static __init noinline void platform_set_family(void)
+{
+#define BOOTLDRFAMILY(byte1, byte0) (((byte1) << 8) | (byte0))
+
+ unsigned char forced_family[2];
+ unsigned short bootldr_family;
+
+ check_forcefamily(forced_family);
+
+ if (forced_family[0] != '\0' && forced_family[1] != '\0')
+ bootldr_family = BOOTLDRFAMILY(forced_family[0],
+ forced_family[1]);
+ else {
+
+#ifdef CONFIG_BOOTLOADER_DRIVER
+ bootldr_family = (unsigned short) kbldr_GetSWFamily();
+#else
+#if defined(CONFIG_BOOTLOADER_FAMILY)
+ bootldr_family = (unsigned short) BOOTLDRFAMILY(
+ CONFIG_BOOTLOADER_FAMILY[0],
+ CONFIG_BOOTLOADER_FAMILY[1]);
+#else
+#error "Unknown Bootloader Family"
+#endif
+#endif
+ }
+
+ pr_info("Bootloader Family = 0x%04X\n", bootldr_family);
+
+ switch (bootldr_family) {
+ case BOOTLDRFAMILY('R', '1'):
+ platform_family = FAMILY_1500;
+ break;
+ case BOOTLDRFAMILY('4', '4'):
+ platform_family = FAMILY_4500;
+ break;
+ case BOOTLDRFAMILY('4', '6'):
+ platform_family = FAMILY_4600;
+ break;
+ case BOOTLDRFAMILY('A', '1'):
+ platform_family = FAMILY_4600VZA;
+ break;
+ case BOOTLDRFAMILY('8', '5'):
+ platform_family = FAMILY_8500;
+ break;
+ case BOOTLDRFAMILY('R', '2'):
+ platform_family = FAMILY_8500RNG;
+ break;
+ case BOOTLDRFAMILY('8', '6'):
+ platform_family = FAMILY_8600;
+ break;
+ case BOOTLDRFAMILY('B', '1'):
+ platform_family = FAMILY_8600VZB;
+ break;
+ case BOOTLDRFAMILY('E', '1'):
+ platform_family = FAMILY_1500VZE;
+ break;
+ case BOOTLDRFAMILY('F', '1'):
+ platform_family = FAMILY_1500VZF;
+ break;
+ default:
+ platform_family = -1;
+ }
+}
+
+unsigned int platform_get_family(void)
+{
+ return platform_family;
+}
+EXPORT_SYMBOL(platform_get_family);
+
+/*
+ * \brief usb_eye_configure() for optimizing the USB eye on Calliope.
+ *
+ * \param unsigned int value saved to the register.
+ *
+ * \return none
+ *
+ */
+static void __init usb_eye_configure(unsigned int value)
+{
+ asic_write(asic_read(crt_spare) | value, crt_spare);
+}
+
+/*
+ * platform_get_asic - determine the ASIC type.
+ *
+ * \param none
+ *
+ * \return ASIC type; ASIC_UNKNOWN if none
+ *
+ */
+enum asic_type platform_get_asic(void)
+{
+ return asic;
+}
+EXPORT_SYMBOL(platform_get_asic);
+
+/*
+ * platform_configure_usb - usb configuration based on platform type.
+ * @bcm1_usb2_ctl: value for the BCM1_USB2_CTL register, which is
+ * quirky
+ */
+static void __init platform_configure_usb(void)
+{
+ u32 bcm1_usb2_ctl;
+
+ if (usb_configured)
+ return;
+
+ switch (asic) {
+ case ASIC_ZEUS:
+ fs_update(0x0000, 0x11, 0x02, 0);
+ bcm1_usb2_ctl = 0x803;
+ break;
+
+ case ASIC_CRONUS:
+ case ASIC_CRONUSLITE:
+ fs_update(0x0000, 0x11, 0x02, 0);
+ bcm1_usb2_ctl = 0x803;
+ break;
+
+ case ASIC_CALLIOPE:
+ fs_update(0x0000, 0x11, 0x02, 1);
+
+ switch (platform_family) {
+ case FAMILY_1500VZE:
+ break;
+
+ case FAMILY_1500VZF:
+ usb_eye_configure(0x003c0000);
+ break;
+
+ default:
+ usb_eye_configure(0x00300000);
+ break;
+ }
+
+ bcm1_usb2_ctl = 0x803;
+ break;
+
+ default:
+ pr_err("Unknown ASIC type: %d\n", asic);
+ break;
+ }
+
+ /* turn on USB power */
+ asic_write(0, usb2_strap);
+ /* Enable all OHCI interrupts */
+ asic_write(bcm1_usb2_ctl, usb2_control);
+ /* USB2_STBUS_OBC store32/load32 */
+ asic_write(3, usb2_stbus_obc);
+ /* USB2_STBUS_MESS_SIZE 2 packets */
+ asic_write(1, usb2_stbus_mess_size);
+ /* USB2_STBUS_CHUNK_SIZE 2 packets */
+ asic_write(1, usb2_stbus_chunk_size);
+
+ usb_configured = true;
+}
+
+/*
+ * Set up the USB EHCI interface
+ */
+void platform_configure_usb_ehci()
+{
+ platform_configure_usb();
+}
+
+/*
+ * Set up the USB OHCI interface
+ */
+void platform_configure_usb_ohci()
+{
+ platform_configure_usb();
+}
+
+/*
+ * Shut the USB EHCI interface down--currently a NOP
+ */
+void platform_unconfigure_usb_ehci()
+{
+}
+
+/*
+ * Shut the USB OHCI interface down--currently a NOP
+ */
+void platform_unconfigure_usb_ohci()
+{
+}
+
+/**
+ * configure_platform - configuration based on platform type.
+ */
+void __init configure_platform(void)
+{
+ platform_set_family();
+
+ switch (platform_family) {
+ case FAMILY_1500:
+ case FAMILY_1500VZE:
+ case FAMILY_1500VZF:
+ platform_features = FFS_CAPABLE;
+ asic = ASIC_CALLIOPE;
+ asic_phy_base = CALLIOPE_IO_BASE;
+ register_map = &calliope_register_map;
+ asic_base = (unsigned long)ioremap_nocache(asic_phy_base,
+ ASIC_IO_SIZE);
+
+ if (platform_family == FAMILY_1500VZE) {
+ gp_resources = non_dvr_vze_calliope_resources;
+ pr_info("Platform: 1500/Vz Class E - "
+ "CALLIOPE, NON_DVR_CAPABLE\n");
+ } else if (platform_family == FAMILY_1500VZF) {
+ gp_resources = non_dvr_vzf_calliope_resources;
+ pr_info("Platform: 1500/Vz Class F - "
+ "CALLIOPE, NON_DVR_CAPABLE\n");
+ } else {
+ gp_resources = non_dvr_calliope_resources;
+ pr_info("Platform: 1500/RNG100 - CALLIOPE, "
+ "NON_DVR_CAPABLE\n");
+ }
+ break;
+
+ case FAMILY_4500:
+ platform_features = FFS_CAPABLE | PCIE_CAPABLE |
+ DISPLAY_CAPABLE;
+ asic = ASIC_ZEUS;
+ asic_phy_base = ZEUS_IO_BASE;
+ register_map = &zeus_register_map;
+ asic_base = (unsigned long)ioremap_nocache(asic_phy_base,
+ ASIC_IO_SIZE);
+ gp_resources = non_dvr_zeus_resources;
+
+ pr_info("Platform: 4500 - ZEUS, NON_DVR_CAPABLE\n");
+ break;
+
+ case FAMILY_4600:
+ {
+ unsigned int chipversion = 0;
+
+ /* The settop has PCIE but it isn't used, so don't advertise
+ * it*/
+ platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
+ asic_phy_base = CRONUS_IO_BASE; /* same as Cronus */
+ register_map = &cronus_register_map; /* same as Cronus */
+ asic_base = (unsigned long)ioremap_nocache(asic_phy_base,
+ ASIC_IO_SIZE);
+ gp_resources = non_dvr_cronuslite_resources;
+
+ /* ASIC version will determine if this is a real CronusLite or
+ * Castrati(Cronus) */
+ chipversion = asic_read(chipver3) << 24;
+ chipversion |= asic_read(chipver2) << 16;
+ chipversion |= asic_read(chipver1) << 8;
+ chipversion |= asic_read(chipver0);
+
+ if ((chipversion == CRONUS_10) || (chipversion == CRONUS_11))
+ asic = ASIC_CRONUS;
+ else
+ asic = ASIC_CRONUSLITE;
+
+ pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
+ "chipversion=0x%08X\n",
+ (asic == ASIC_CRONUS) ? "CRONUS" : "CRONUS LITE",
+ chipversion);
+ break;
+ }
+ case FAMILY_4600VZA:
+ platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
+ asic = ASIC_CRONUS;
+ asic_phy_base = CRONUS_IO_BASE;
+ register_map = &cronus_register_map;
+ asic_base = (unsigned long)ioremap_nocache(asic_phy_base,
+ ASIC_IO_SIZE);
+ gp_resources = non_dvr_cronus_resources;
+
+ pr_info("Platform: Vz Class A - CRONUS, NON_DVR_CAPABLE\n");
+ break;
+
+ case FAMILY_8500:
+ case FAMILY_8500RNG:
+ platform_features = DVR_CAPABLE | PCIE_CAPABLE |
+ DISPLAY_CAPABLE;
+ asic = ASIC_ZEUS;
+ asic_phy_base = ZEUS_IO_BASE;
+ register_map = &zeus_register_map;
+ asic_base = (unsigned long)ioremap_nocache(asic_phy_base,
+ ASIC_IO_SIZE);
+ gp_resources = dvr_zeus_resources;
+
+ pr_info("Platform: 8500/RNG200 - ZEUS, DVR_CAPABLE\n");
+ break;
+
+ case FAMILY_8600:
+ case FAMILY_8600VZB:
+ platform_features = DVR_CAPABLE | PCIE_CAPABLE |
+ DISPLAY_CAPABLE;
+ asic = ASIC_CRONUS;
+ asic_phy_base = CRONUS_IO_BASE;
+ register_map = &cronus_register_map;
+ asic_base = (unsigned long)ioremap_nocache(asic_phy_base,
+ ASIC_IO_SIZE);
+ gp_resources = dvr_cronus_resources;
+
+ pr_info("Platform: 8600/Vz Class B - CRONUS, "
+ "DVR_CAPABLE\n");
+ break;
+
+ default:
+ pr_crit("Platform: UNKNOWN PLATFORM\n");
+ break;
+ }
+
+ switch (asic) {
+ case ASIC_ZEUS:
+ phys_to_bus_offset = 0x30000000;
+ break;
+ case ASIC_CALLIOPE:
+ phys_to_bus_offset = 0x10000000;
+ break;
+ case ASIC_CRONUSLITE:
+ /* Fall through */
+ case ASIC_CRONUS:
+ /*
+ * TODO: We suppose 0x10000000 aliases into 0x20000000-
+ * 0x2XXXXXXX. If 0x10000000 aliases into 0x60000000-
+ * 0x6XXXXXXX, the offset should be 0x50000000, not 0x10000000.
+ */
+ phys_to_bus_offset = 0x10000000;
+ break;
+ default:
+ phys_to_bus_offset = 0x00000000;
+ break;
+ }
+}
+
+/**
+ * platform_devices_init - sets up USB device resourse.
+ */
+static int __init platform_devices_init(void)
+{
+ pr_notice("%s: ----- Initializing USB resources -----\n", __func__);
+
+ asic_resource.start = asic_phy_base;
+ asic_resource.end += asic_resource.start;
+
+ ehci_resources[0].start = asic_reg_phys_addr(ehci_hcapbase);
+ ehci_resources[0].end += ehci_resources[0].start;
+
+ ohci_resources[0].start = asic_reg_phys_addr(ohci_hc_revision);
+ ohci_resources[0].end += ohci_resources[0].start;
+
+ set_io_port_base(0);
+
+ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+
+ return 0;
+}
+
+arch_initcall(platform_devices_init);
+
+/*
+ *
+ * BOOTMEM ALLOCATION
+ *
+ */
+/*
+ * Allocates/reserves the Platform memory resources early in the boot process.
+ * This ignores any resources that are designated IORESOURCE_IO
+ */
+void __init platform_alloc_bootmem(void)
+{
+ int i;
+ int total = 0;
+
+ /* Get persistent memory data from command line before allocating
+ * resources. This need to happen before normal command line parsing
+ * has been done */
+ pmem_setup_resource();
+
+ /* Loop through looking for resources that want a particular address */
+ for (i = 0; gp_resources[i].flags != 0; i++) {
+ int size = gp_resources[i].end - gp_resources[i].start + 1;
+ if ((gp_resources[i].start != 0) &&
+ ((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
+ reserve_bootmem(bus_to_phys(gp_resources[i].start),
+ size, 0);
+ total += gp_resources[i].end -
+ gp_resources[i].start + 1;
+ pr_info("reserve resource %s at %08x (%u bytes)\n",
+ gp_resources[i].name, gp_resources[i].start,
+ gp_resources[i].end -
+ gp_resources[i].start + 1);
+ }
+ }
+
+ /* Loop through assigning addresses for those that are left */
+ for (i = 0; gp_resources[i].flags != 0; i++) {
+ int size = gp_resources[i].end - gp_resources[i].start + 1;
+ if ((gp_resources[i].start == 0) &&
+ ((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
+ void *mem = alloc_bootmem_pages(size);
+
+ if (mem == NULL)
+ pr_err("Unable to allocate bootmem pages "
+ "for %s\n", gp_resources[i].name);
+
+ else {
+ gp_resources[i].start =
+ phys_to_bus(virt_to_phys(mem));
+ gp_resources[i].end =
+ gp_resources[i].start + size - 1;
+ total += size;
+ pr_info("allocate resource %s at %08x "
+ "(%u bytes)\n",
+ gp_resources[i].name,
+ gp_resources[i].start, size);
+ }
+ }
+ }
+
+ pr_info("Total Platform driver memory allocation: 0x%08x\n", total);
+
+ /* indicate resources that are platform I/O related */
+ for (i = 0; gp_resources[i].flags != 0; i++) {
+ if ((gp_resources[i].start != 0) &&
+ ((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
+ pr_info("reserved platform resource %s at %08x\n",
+ gp_resources[i].name, gp_resources[i].start);
+ }
+ }
+}
+
+/*
+ *
+ * PERSISTENT MEMORY (PMEM) CONFIGURATION
+ *
+ */
+static unsigned long pmemaddr __initdata;
+
+static int __init early_param_pmemaddr(char *p)
+{
+ pmemaddr = (unsigned long)simple_strtoul(p, NULL, 0);
+ return 0;
+}
+early_param("pmemaddr", early_param_pmemaddr);
+
+static long pmemlen __initdata;
+
+static int __init early_param_pmemlen(char *p)
+{
+/* TODO: we can use this code when and if the bootloader ever changes this */
+#if 0
+ pmemlen = (unsigned long)simple_strtoul(p, NULL, 0);
+#else
+ pmemlen = 0x20000;
+#endif
+ return 0;
+}
+early_param("pmemlen", early_param_pmemlen);
+
+/*
+ * Set up persistent memory. If we were given values, we patch the array of
+ * resources. Otherwise, persistent memory may be allocated anywhere at all.
+ */
+static void __init pmem_setup_resource(void)
+{
+ struct resource *resource;
+ resource = asic_resource_get("DiagPersistentMemory");
+
+ if (resource && pmemaddr && pmemlen) {
+ /* The address provided by bootloader is in kseg0. Convert to
+ * a bus address. */
+ resource->start = phys_to_bus(pmemaddr - 0x80000000);
+ resource->end = resource->start + pmemlen - 1;
+
+ pr_info("persistent memory: start=0x%x end=0x%x\n",
+ resource->start, resource->end);
+ }
+}
+
+/*
+ *
+ * RESOURCE ACCESS FUNCTIONS
+ *
+ */
+
+/**
+ * asic_resource_get - retrieves parameters for a platform resource.
+ * @name: string to match resource
+ *
+ * Returns a pointer to a struct resource corresponding to the given name.
+ *
+ * CANNOT BE NAMED platform_resource_get, which would be the obvious choice,
+ * as this function name is already declared
+ */
+struct resource *asic_resource_get(const char *name)
+{
+ int i;
+
+ for (i = 0; gp_resources[i].flags != 0; i++) {
+ if (strcmp(gp_resources[i].name, name) == 0)
+ return &gp_resources[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(asic_resource_get);
+
+/**
+ * platform_release_memory - release pre-allocated memory
+ * @ptr: pointer to memory to release
+ * @size: size of resource
+ *
+ * This must only be called for memory allocated or reserved via the boot
+ * memory allocator.
+ */
+void platform_release_memory(void *ptr, int size)
+{
+ unsigned long addr;
+ unsigned long end;
+
+ addr = ((unsigned long)ptr + (PAGE_SIZE - 1)) & PAGE_MASK;
+ end = ((unsigned long)ptr + size) & PAGE_MASK;
+
+ for (; addr < end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(__va(addr)));
+ init_page_count(virt_to_page(__va(addr)));
+ free_page((unsigned long)__va(addr));
+ }
+}
+EXPORT_SYMBOL(platform_release_memory);
+
+/*
+ *
+ * FEATURE AVAILABILITY FUNCTIONS
+ *
+ */
+int platform_supports_dvr(void)
+{
+ return (platform_features & DVR_CAPABLE) != 0;
+}
+
+int platform_supports_ffs(void)
+{
+ return (platform_features & FFS_CAPABLE) != 0;
+}
+
+int platform_supports_pcie(void)
+{
+ return (platform_features & PCIE_CAPABLE) != 0;
+}
+
+int platform_supports_display(void)
+{
+ return (platform_features & DISPLAY_CAPABLE) != 0;
+}
diff --git a/arch/mips/powertv/asic/asic_int.c b/arch/mips/powertv/asic/asic_int.c
new file mode 100644
index 00000000000..80b2eed21ac
--- /dev/null
+++ b/arch/mips/powertv/asic/asic_int.c
@@ -0,0 +1,125 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
+ * Copyright (C) 2001 Ralf Baechle
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Routines for generic manipulation of the interrupts found on the PowerTV
+ * platform.
+ *
+ * The interrupt controller is located in the South Bridge a PIIX4 device
+ * with two internal 82C95 interrupt controllers.
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+
+#include <asm/irq_cpu.h>
+#include <linux/io.h>
+#include <asm/irq_regs.h>
+#include <asm/mips-boards/generic.h>
+
+#include <asm/mach-powertv/asic_regs.h>
+
+static DEFINE_SPINLOCK(asic_irq_lock);
+
+static inline int get_int(void)
+{
+ unsigned long flags;
+ int irq;
+
+ spin_lock_irqsave(&asic_irq_lock, flags);
+
+ irq = (asic_read(int_int_scan) >> 4) - 1;
+
+ if (irq == 0 || irq >= NR_IRQS)
+ irq = -1;
+
+ spin_unlock_irqrestore(&asic_irq_lock, flags);
+
+ return irq;
+}
+
+static void asic_irqdispatch(void)
+{
+ int irq;
+
+ irq = get_int();
+ if (irq < 0)
+ return; /* interrupt has already been cleared */
+
+ do_IRQ(irq);
+}
+
+static inline int clz(unsigned long x)
+{
+ __asm__(
+ " .set push \n"
+ " .set mips32 \n"
+ " clz %0, %1 \n"
+ " .set pop \n"
+ : "=r" (x)
+ : "r" (x));
+
+ return x;
+}
+
+/*
+ * Version of ffs that only looks at bits 12..15.
+ */
+static inline unsigned int irq_ffs(unsigned int pending)
+{
+ return fls(pending) - 1 + CAUSEB_IP;
+}
+
+/*
+ * TODO: check how it works under EIC mode.
+ */
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
+ int irq;
+
+ irq = irq_ffs(pending);
+
+ if (irq == CAUSEF_IP3)
+ asic_irqdispatch();
+ else if (irq >= 0)
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+}
+
+void __init arch_init_irq(void)
+{
+ int i;
+
+ asic_irq_init();
+
+ /*
+ * Initialize interrupt exception vectors.
+ */
+ if (cpu_has_veic || cpu_has_vint) {
+ int nvec = cpu_has_veic ? 64 : 8;
+ for (i = 0; i < nvec; i++)
+ set_vi_handler(i, asic_irqdispatch);
+ }
+}
diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c
new file mode 100644
index 00000000000..b54d24499b0
--- /dev/null
+++ b/arch/mips/powertv/asic/irq_asic.c
@@ -0,0 +1,116 @@
+/*
+ * Portions copyright (C) 2005-2009 Scientific Atlanta
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * Modified from arch/mips/kernel/irq-rm7000.c:
+ * Copyright (C) 2003 Ralf Baechle
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/system.h>
+
+#include <asm/mach-powertv/asic_regs.h>
+
+static inline void unmask_asic_irq(unsigned int irq)
+{
+ unsigned long enable_bit;
+
+ enable_bit = (1 << (irq & 0x1f));
+
+ switch (irq >> 5) {
+ case 0:
+ asic_write(asic_read(ien_int_0) | enable_bit, ien_int_0);
+ break;
+ case 1:
+ asic_write(asic_read(ien_int_1) | enable_bit, ien_int_1);
+ break;
+ case 2:
+ asic_write(asic_read(ien_int_2) | enable_bit, ien_int_2);
+ break;
+ case 3:
+ asic_write(asic_read(ien_int_3) | enable_bit, ien_int_3);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void mask_asic_irq(unsigned int irq)
+{
+ unsigned long disable_mask;
+
+ disable_mask = ~(1 << (irq & 0x1f));
+
+ switch (irq >> 5) {
+ case 0:
+ asic_write(asic_read(ien_int_0) & disable_mask, ien_int_0);
+ break;
+ case 1:
+ asic_write(asic_read(ien_int_1) & disable_mask, ien_int_1);
+ break;
+ case 2:
+ asic_write(asic_read(ien_int_2) & disable_mask, ien_int_2);
+ break;
+ case 3:
+ asic_write(asic_read(ien_int_3) & disable_mask, ien_int_3);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static struct irq_chip asic_irq_chip = {
+ .name = "ASIC Level",
+ .ack = mask_asic_irq,
+ .mask = mask_asic_irq,
+ .mask_ack = mask_asic_irq,
+ .unmask = unmask_asic_irq,
+ .eoi = unmask_asic_irq,
+};
+
+void __init asic_irq_init(void)
+{
+ int i;
+
+ /* set priority to 0 */
+ write_c0_status(read_c0_status() & ~(0x0000fc00));
+
+ asic_write(0, ien_int_0);
+ asic_write(0, ien_int_1);
+ asic_write(0, ien_int_2);
+ asic_write(0, ien_int_3);
+
+ asic_write(0x0fffffff, int_level_3_3);
+ asic_write(0xffffffff, int_level_3_2);
+ asic_write(0xffffffff, int_level_3_1);
+ asic_write(0xffffffff, int_level_3_0);
+ asic_write(0xffffffff, int_level_2_3);
+ asic_write(0xffffffff, int_level_2_2);
+ asic_write(0xffffffff, int_level_2_1);
+ asic_write(0xffffffff, int_level_2_0);
+ asic_write(0xffffffff, int_level_1_3);
+ asic_write(0xffffffff, int_level_1_2);
+ asic_write(0xffffffff, int_level_1_1);
+ asic_write(0xffffffff, int_level_1_0);
+ asic_write(0xffffffff, int_level_0_3);
+ asic_write(0xffffffff, int_level_0_2);
+ asic_write(0xffffffff, int_level_0_1);
+ asic_write(0xffffffff, int_level_0_0);
+
+ asic_write(0xf, int_int_scan);
+
+ /*
+ * Initialize interrupt handlers.
+ */
+ for (i = 0; i < NR_IRQS; i++)
+ set_irq_chip_and_handler(i, &asic_irq_chip, handle_level_irq);
+}
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
new file mode 100644
index 00000000000..cd5b76a1c95
--- /dev/null
+++ b/arch/mips/powertv/asic/prealloc-calliope.c
@@ -0,0 +1,620 @@
+/*
+ * Memory pre-allocations for Calliope boxes.
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
+ */
+
+#include <linux/init.h>
+#include <asm/mach-powertv/asic.h>
+
+/*
+ * NON_DVR_CAPABLE CALLIOPE RESOURCES
+ */
+struct resource non_dvr_calliope_resources[] __initdata =
+{
+ /*
+ * VIDEO / LX1
+ */
+ {
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x24200000 - 1, /*2MiB */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ST231aMonitor", /*8KiB block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24202000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x24202000,
+ .end = 0x26700000 - 1, /*~36.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Sysaudio Driver
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * STAVEM driver/STAPI
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x00000000,
+ .end = 0x00600000 - 1, /* 6 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * DOCSIS Subsystem
+ */
+ {
+ .name = "Docsis",
+ .start = 0x22000000,
+ .end = 0x22700000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * GHW HAL Driver
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x22700000,
+ .end = 0x23500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * multi com buffer area
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x23700000,
+ .end = 0x23720000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * DMA Ring buffer (don't need recording buffers)
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x000AA000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Display bins buffer for unit0
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * AVFS: player HAL memory
+ *
+ *
+ */
+ {
+ .name = "AvfsDmaMem",
+ .start = 0x00000000,
+ .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * PMEM
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Smartcard
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x00000000,
+ .end = 0x2800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * NAND Flash
+ */
+ {
+ .name = "NandFlash",
+ .start = NAND_FLASH_BASE,
+ .end = NAND_FLASH_BASE + 0x400 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ * Synopsys GMAC Memory Region
+ */
+ {
+ .name = "GMAC",
+ .start = 0x00000000,
+ .end = 0x00010000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Add other resources here
+ *
+ */
+ { },
+};
+
+struct resource non_dvr_vz_calliope_resources[] __initdata =
+{
+ /*
+ * VIDEO / LX1
+ */
+ {
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x24200000 - 1, /*2 Meg */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ST231aMonitor", /* 8k block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24202000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x22202000,
+ .end = 0x22C20B85 - 1, /* 10.12 Meg */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Sysaudio Driver
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * STAVEM driver/STAPI
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x20300000,
+ .end = 0x20620000-1, /*3.125 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * GHW HAL Driver
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x20100000,
+ .end = 0x20300000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * multi com buffer area
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x23900000,
+ .end = 0x23920000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * DMA Ring buffer
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x000AA000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Display bins buffer for unit0
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * PMEM
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Smartcard
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x00000000,
+ .end = 0x2800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * NAND Flash
+ */
+ {
+ .name = "NandFlash",
+ .start = NAND_FLASH_BASE,
+ .end = NAND_FLASH_BASE+0x400 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ * Synopsys GMAC Memory Region
+ */
+ {
+ .name = "GMAC",
+ .start = 0x00000000,
+ .end = 0x00010000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Add other resources here
+ */
+ { },
+};
+
+struct resource non_dvr_vze_calliope_resources[] __initdata =
+{
+ /*
+ * VIDEO / LX1
+ */
+ {
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x22000000,
+ .end = 0x22200000 - 1, /*2 Meg */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ST231aMonitor", /* 8k block ST231a monitor */
+ .start = 0x22200000,
+ .end = 0x22202000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x22202000,
+ .end = 0x22C20B85 - 1, /* 10.12 Meg */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Sysaudio Driver
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * STAVEM driver/STAPI
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x20396000,
+ .end = 0x206B6000 - 1, /* 3.125 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * GHW HAL Driver
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x20100000,
+ .end = 0x20396000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * multi com buffer area
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x206B6000,
+ .end = 0x206D6000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * DMA Ring buffer
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x000AA000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Display bins buffer for unit0
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * PMEM
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Smartcard
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x00000000,
+ .end = 0x2800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * NAND Flash
+ */
+ {
+ .name = "NandFlash",
+ .start = NAND_FLASH_BASE,
+ .end = NAND_FLASH_BASE+0x400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Synopsys GMAC Memory Region
+ */
+ {
+ .name = "GMAC",
+ .start = 0x00000000,
+ .end = 0x00010000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Add other resources here
+ */
+ { },
+};
+
+struct resource non_dvr_vzf_calliope_resources[] __initdata =
+{
+ /*
+ * VIDEO / LX1
+ */
+ {
+ .name = "ST231aImage", /*Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x24200000 - 1, /*2MiB */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ST231aMonitor", /*8KiB block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24202000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x24202000,
+ /* ~19.4 (21.5MiB - (2MiB + 8KiB)) */
+ .end = 0x25580000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Sysaudio Driver
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * STAVEM driver/STAPI
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x00000000,
+ .end = 0x00480000 - 1, /* 4.5 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * GHW HAL Driver
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x22700000,
+ .end = 0x23500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * multi com buffer area
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x23700000,
+ .end = 0x23720000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * DMA Ring buffer (don't need recording buffers)
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x000AA000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Display bins buffer for unit0
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Display bins buffer for unit1
+ */
+ {
+ .name = "DisplayBins1",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * AVFS: player HAL memory
+ *
+ *
+ */
+ {
+ .name = "AvfsDmaMem",
+ .start = 0x00000000,
+ .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * PMEM
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Smartcard
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x00000000,
+ .end = 0x2800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * NAND Flash
+ */
+ {
+ .name = "NandFlash",
+ .start = NAND_FLASH_BASE,
+ .end = NAND_FLASH_BASE + 0x400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Synopsys GMAC Memory Region
+ */
+ {
+ .name = "GMAC",
+ .start = 0x00000000,
+ .end = 0x00010000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Add other resources here
+ */
+ { },
+};
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
new file mode 100644
index 00000000000..45a5c3ea718
--- /dev/null
+++ b/arch/mips/powertv/asic/prealloc-cronus.c
@@ -0,0 +1,608 @@
+/*
+ * Memory pre-allocations for Cronus boxes.
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
+ */
+
+#include <linux/init.h>
+#include <asm/mach-powertv/asic.h>
+
+/*
+ * DVR_CAPABLE CRONUS RESOURCES
+ */
+struct resource dvr_cronus_resources[] __initdata =
+{
+ /*
+ *
+ * VIDEO1 / LX1
+ *
+ */
+ {
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x241FFFFF, /* 2MiB */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24201FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x24202000,
+ .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * VIDEO2 / LX2
+ *
+ */
+ {
+ .name = "ST231bImage", /* Delta-Mu 2 image and ram */
+ .start = 0x60000000,
+ .end = 0x601FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
+ .start = 0x60200000,
+ .end = 0x60201FFF,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "MediaMemory2",
+ .start = 0x60202000,
+ .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * Sysaudio Driver
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * DSP_Image_Buff - DSP code and data images (1MB)
+ * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
+ * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
+ * ADSC_Main_Buff - ADSC Main buffer (16KB)
+ *
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * STAVEM driver/STAPI
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * This memory area is used for allocating buffers for Video decoding
+ * purposes. Allocation/De-allocation within this buffer is managed
+ * by the STAVMEM driver of the STAPI. They could be Decimated
+ * Picture Buffers, Intermediate Buffers, as deemed necessary for
+ * video decoding purposes, for any video decoders on Zeus.
+ *
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x63580000,
+ .end = 0x64180000 - 1, /* 12 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * DOCSIS Subsystem
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "Docsis",
+ .start = 0x62000000,
+ .end = 0x62700000 - 1, /* 7 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * GHW HAL Driver
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * GraphicsHeap - PowerTV Graphics Heap
+ *
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x62700000,
+ .end = 0x63500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * multi com buffer area
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x26000000,
+ .end = 0x26020000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * DMA Ring buffer
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x00280000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Display bins buffer for unit0
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Display Bins for unit0
+ *
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Display bins buffer
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Display Bins for unit1
+ *
+ */
+ {
+ .name = "DisplayBins1",
+ .start = 0x64AD4000,
+ .end = 0x64AD5000 - 1, /* 4 KB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * ITFS
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "ITFS",
+ .start = 0x64180000,
+ /* 815,104 bytes each for 2 ITFS partitions. */
+ .end = 0x6430DFFF,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * AVFS
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "AvfsDmaMem",
+ .start = 0x6430E000,
+ /* (945K * 8) = (128K *3) 5 playbacks / 3 server */
+ .end = 0x64AD0000 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "AvfsFileSys",
+ .start = 0x64AD0000,
+ .end = 0x64AD1000 - 1, /* 4K */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * PMEM
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Persistent memory for diagnostics.
+ *
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Smartcard
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Read and write buffers for Internal/External cards
+ *
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x64AD1000,
+ .end = 0x64AD3800 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * KAVNET
+ * NP Reset Vector - must be of the form xxCxxxxx
+ * NP Image - must be video bank 1
+ * NP IPC - must be video bank 2
+ */
+ {
+ .name = "NP_Reset_Vector",
+ .start = 0x27c00000,
+ .end = 0x27c01000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "NP_Image",
+ .start = 0x27020000,
+ .end = 0x27060000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "NP_IPC",
+ .start = 0x63500000,
+ .end = 0x63580000 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ * Add other resources here
+ */
+ { },
+};
+
+/*
+ * NON_DVR_CAPABLE CRONUS RESOURCES
+ */
+struct resource non_dvr_cronus_resources[] __initdata =
+{
+ /*
+ *
+ * VIDEO1 / LX1
+ *
+ */
+ {
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x241FFFFF, /* 2MiB */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24201FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x24202000,
+ .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * VIDEO2 / LX2
+ *
+ */
+ {
+ .name = "ST231bImage", /* Delta-Mu 2 image and ram */
+ .start = 0x60000000,
+ .end = 0x601FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
+ .start = 0x60200000,
+ .end = 0x60201FFF,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "MediaMemory2",
+ .start = 0x60202000,
+ .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * Sysaudio Driver
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * DSP_Image_Buff - DSP code and data images (1MB)
+ * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
+ * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
+ * ADSC_Main_Buff - ADSC Main buffer (16KB)
+ *
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * STAVEM driver/STAPI
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * This memory area is used for allocating buffers for Video decoding
+ * purposes. Allocation/De-allocation within this buffer is managed
+ * by the STAVMEM driver of the STAPI. They could be Decimated
+ * Picture Buffers, Intermediate Buffers, as deemed necessary for
+ * video decoding purposes, for any video decoders on Zeus.
+ *
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x63580000,
+ .end = 0x64180000 - 1, /* 12 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * DOCSIS Subsystem
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "Docsis",
+ .start = 0x62000000,
+ .end = 0x62700000 - 1, /* 7 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * GHW HAL Driver
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * GraphicsHeap - PowerTV Graphics Heap
+ *
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x62700000,
+ .end = 0x63500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * multi com buffer area
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x26000000,
+ .end = 0x26020000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * DMA Ring buffer
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x000AA000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Display bins buffer for unit0
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Display Bins for unit0
+ *
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Display bins buffer
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Display Bins for unit1
+ *
+ */
+ {
+ .name = "DisplayBins1",
+ .start = 0x64AD4000,
+ .end = 0x64AD5000 - 1, /* 4 KB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * AVFS: player HAL memory
+ *
+ *
+ */
+ {
+ .name = "AvfsDmaMem",
+ .start = 0x6430E000,
+ .end = 0x645D2C00 - 1, /* 945K * 3 for playback */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * PMEM
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Persistent memory for diagnostics.
+ *
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Smartcard
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Read and write buffers for Internal/External cards
+ *
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x64AD1000,
+ .end = 0x64AD3800 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * KAVNET
+ * NP Reset Vector - must be of the form xxCxxxxx
+ * NP Image - must be video bank 1
+ * NP IPC - must be video bank 2
+ */
+ {
+ .name = "NP_Reset_Vector",
+ .start = 0x27c00000,
+ .end = 0x27c01000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "NP_Image",
+ .start = 0x27020000,
+ .end = 0x27060000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "NP_IPC",
+ .start = 0x63500000,
+ .end = 0x63580000 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ { },
+};
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
new file mode 100644
index 00000000000..23a905613c0
--- /dev/null
+++ b/arch/mips/powertv/asic/prealloc-cronuslite.c
@@ -0,0 +1,290 @@
+/*
+ * Memory pre-allocations for Cronus Lite boxes.
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
+ */
+
+#include <linux/init.h>
+#include <asm/mach-powertv/asic.h>
+
+/*
+ * NON_DVR_CAPABLE CRONUSLITE RESOURCES
+ */
+struct resource non_dvr_cronuslite_resources[] __initdata =
+{
+ /*
+ *
+ * VIDEO2 / LX2
+ *
+ */
+ {
+ .name = "ST231aImage", /* Delta-Mu 2 image and ram */
+ .start = 0x60000000,
+ .end = 0x601FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "ST231aMonitor", /* 8KiB block ST231b monitor */
+ .start = 0x60200000,
+ .end = 0x60201FFF,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x60202000,
+ .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * Sysaudio Driver
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * DSP_Image_Buff - DSP code and data images (1MB)
+ * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
+ * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
+ * ADSC_Main_Buff - ADSC Main buffer (16KB)
+ *
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * STAVEM driver/STAPI
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * This memory area is used for allocating buffers for Video decoding
+ * purposes. Allocation/De-allocation within this buffer is managed
+ * by the STAVMEM driver of the STAPI. They could be Decimated
+ * Picture Buffers, Intermediate Buffers, as deemed necessary for
+ * video decoding purposes, for any video decoders on Zeus.
+ *
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x63580000,
+ .end = 0x63B80000 - 1, /* 6 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * DOCSIS Subsystem
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "Docsis",
+ .start = 0x62000000,
+ .end = 0x62700000 - 1, /* 7 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * GHW HAL Driver
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * GraphicsHeap - PowerTV Graphics Heap
+ *
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x62700000,
+ .end = 0x63500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * multi com buffer area
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x26000000,
+ .end = 0x26020000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * DMA Ring buffer
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x000AA000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Display bins buffer for unit0
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Display Bins for unit0
+ *
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Display bins buffer
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Display Bins for unit1
+ *
+ */
+ {
+ .name = "DisplayBins1",
+ .start = 0x63B83000,
+ .end = 0x63B84000 - 1, /* 4 KB total */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * AVFS: player HAL memory
+ *
+ *
+ */
+ {
+ .name = "AvfsDmaMem",
+ .start = 0x63B84000,
+ .end = 0x63E48C00 - 1, /* 945K * 3 for playback */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * PMEM
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Persistent memory for diagnostics.
+ *
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Smartcard
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Read and write buffers for Internal/External cards
+ *
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x63B80000,
+ .end = 0x63B82800 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * KAVNET
+ * NP Reset Vector - must be of the form xxCxxxxx
+ * NP Image - must be video bank 1
+ * NP IPC - must be video bank 2
+ */
+ {
+ .name = "NP_Reset_Vector",
+ .start = 0x27c00000,
+ .end = 0x27c01000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "NP_Image",
+ .start = 0x27020000,
+ .end = 0x27060000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "NP_IPC",
+ .start = 0x63500000,
+ .end = 0x63580000 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ * NAND Flash
+ */
+ {
+ .name = "NandFlash",
+ .start = NAND_FLASH_BASE,
+ .end = NAND_FLASH_BASE + 0x400 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ * Add other resources here
+ */
+ { },
+};
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
new file mode 100644
index 00000000000..018d4514dbe
--- /dev/null
+++ b/arch/mips/powertv/asic/prealloc-zeus.c
@@ -0,0 +1,459 @@
+/*
+ * Memory pre-allocations for Zeus boxes.
+ *
+ * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
+ */
+
+#include <linux/init.h>
+#include <asm/mach-powertv/asic.h>
+
+/*
+ * DVR_CAPABLE RESOURCES
+ */
+struct resource dvr_zeus_resources[] __initdata =
+{
+ /*
+ *
+ * VIDEO1 / LX1
+ *
+ */
+ {
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x20000000,
+ .end = 0x201FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
+ .start = 0x20200000,
+ .end = 0x20201FFF,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x20202000,
+ .end = 0x21FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * VIDEO2 / LX2
+ *
+ */
+ {
+ .name = "ST231bImage", /* Delta-Mu 2 image and ram */
+ .start = 0x30000000,
+ .end = 0x301FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
+ .start = 0x30200000,
+ .end = 0x30201FFF,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "MediaMemory2",
+ .start = 0x30202000,
+ .end = 0x31FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ *
+ * Sysaudio Driver
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * DSP_Image_Buff - DSP code and data images (1MB)
+ * ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
+ * ADSC_AUX_Buff - ADSC AUX buffer (16KB)
+ * ADSC_Main_Buff - ADSC Main buffer (16KB)
+ *
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * STAVEM driver/STAPI
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * This memory area is used for allocating buffers for Video decoding
+ * purposes. Allocation/De-allocation within this buffer is managed
+ * by the STAVMEM driver of the STAPI. They could be Decimated
+ * Picture Buffers, Intermediate Buffers, as deemed necessary for
+ * video decoding purposes, for any video decoders on Zeus.
+ *
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x00000000,
+ .end = 0x00c00000 - 1, /* 12 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * DOCSIS Subsystem
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "Docsis",
+ .start = 0x40100000,
+ .end = 0x407fffff,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * GHW HAL Driver
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * GraphicsHeap - PowerTV Graphics Heap
+ *
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x46900000,
+ .end = 0x47700000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * multi com buffer area
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x47900000,
+ .end = 0x47920000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * DMA Ring buffer
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x00280000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Display bins buffer for unit0
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Display Bins for unit0
+ *
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Display bins buffer
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Display Bins for unit1
+ *
+ */
+ {
+ .name = "DisplayBins1",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * ITFS
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "ITFS",
+ .start = 0x00000000,
+ /* 815,104 bytes each for 2 ITFS partitions. */
+ .end = 0x0018DFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * AVFS
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Docsis -
+ *
+ */
+ {
+ .name = "AvfsDmaMem",
+ .start = 0x00000000,
+ /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
+ .end = 0x007c2000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "AvfsFileSys",
+ .start = 0x00000000,
+ .end = 0x00001000 - 1, /* 4K */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * PMEM
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Persistent memory for diagnostics.
+ *
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * Smartcard
+ *
+ * This driver requires:
+ *
+ * Arbitrary Based Buffers:
+ * Read and write buffers for Internal/External cards
+ *
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x00000000,
+ .end = 0x2800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Add other resources here
+ */
+ { },
+};
+
+/*
+ * NON_DVR_CAPABLE ZEUS RESOURCES
+ */
+struct resource non_dvr_zeus_resources[] __initdata =
+{
+ /*
+ * VIDEO1 / LX1
+ */
+ {
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x20000000,
+ .end = 0x201FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
+ .start = 0x20200000,
+ .end = 0x20201FFF,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "MediaMemory1",
+ .start = 0x20202000,
+ .end = 0x21FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ * Sysaudio Driver
+ */
+ {
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * STAVEM driver/STAPI
+ */
+ {
+ .name = "AVMEMPartition0",
+ .start = 0x00000000,
+ .end = 0x00600000 - 1, /* 6 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * DOCSIS Subsystem
+ */
+ {
+ .name = "Docsis",
+ .start = 0x40100000,
+ .end = 0x407fffff,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * GHW HAL Driver
+ */
+ {
+ .name = "GraphicsHeap",
+ .start = 0x46900000,
+ .end = 0x47700000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * multi com buffer area
+ */
+ {
+ .name = "MulticomSHM",
+ .start = 0x47900000,
+ .end = 0x47920000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * DMA Ring buffer
+ */
+ {
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x00280000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Display bins buffer for unit0
+ */
+ {
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ *
+ * AVFS: player HAL memory
+ *
+ *
+ */
+ {
+ .name = "AvfsDmaMem",
+ .start = 0x00000000,
+ .end = 0x002c4c00 - 1, /* 945K * 3 for playback */
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * PMEM
+ */
+ {
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * Smartcard
+ */
+ {
+ .name = "SmartCardInfo",
+ .start = 0x00000000,
+ .end = 0x2800 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ /*
+ * NAND Flash
+ */
+ {
+ .name = "NandFlash",
+ .start = NAND_FLASH_BASE,
+ .end = NAND_FLASH_BASE + 0x400 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ /*
+ * Add other resources here
+ */
+ { },
+};
diff --git a/arch/mips/powertv/cmdline.c b/arch/mips/powertv/cmdline.c
new file mode 100644
index 00000000000..98d73cb0d45
--- /dev/null
+++ b/arch/mips/powertv/cmdline.c
@@ -0,0 +1,52 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Kernel command line creation using the prom monitor (YAMON) argc/argv.
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+
+#include "init.h"
+
+/*
+ * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
+ * This macro take care of sign extension.
+ */
+#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
+
+char * __init prom_getcmdline(void)
+{
+ return &(arcs_cmdline[0]);
+}
+
+void __init prom_init_cmdline(void)
+{
+ int len;
+
+ if (prom_argc != 1)
+ return;
+
+ len = strlen(arcs_cmdline);
+
+ arcs_cmdline[len] = ' ';
+
+ strlcpy(arcs_cmdline + len + 1, (char *)_prom_argv,
+ COMMAND_LINE_SIZE - len - 1);
+}
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
new file mode 100644
index 00000000000..5f4e4c304e4
--- /dev/null
+++ b/arch/mips/powertv/init.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * PROM library initialisation code.
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#include <asm/bootinfo.h>
+#include <linux/io.h>
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+
+#include <asm/mips-boards/prom.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mach-powertv/asic.h>
+
+#include "init.h"
+
+int prom_argc;
+int *_prom_argv, *_prom_envp;
+unsigned long _prom_memsize;
+
+/*
+ * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
+ * This macro take care of sign extension, if running in 64-bit mode.
+ */
+#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
+
+char *prom_getenv(char *envname)
+{
+ char *result = NULL;
+
+ if (_prom_envp != NULL) {
+ /*
+ * Return a pointer to the given environment variable.
+ * In 64-bit mode: we're using 64-bit pointers, but all pointers
+ * in the PROM structures are only 32-bit, so we need some
+ * workarounds, if we are running in 64-bit mode.
+ */
+ int i, index = 0;
+
+ i = strlen(envname);
+
+ while (prom_envp(index)) {
+ if (strncmp(envname, prom_envp(index), i) == 0) {
+ result = prom_envp(index + 1);
+ break;
+ }
+ index += 2;
+ }
+ }
+
+ return result;
+}
+
+/* TODO: Verify on linux-mips mailing list that the following two */
+/* functions are correct */
+/* TODO: Copy NMI and EJTAG exception vectors to memory from the */
+/* BootROM exception vectors. Flush their cache entries. test it. */
+
+static void __init mips_nmi_setup(void)
+{
+ void *base;
+#if defined(CONFIG_CPU_MIPS32_R1)
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa80) :
+ (void *)(CAC_BASE + 0x380);
+#elif defined(CONFIG_CPU_MIPS32_R2)
+ base = (void *)0xbfc00000;
+#else
+#error NMI exception handler address not defined
+#endif
+}
+
+static void __init mips_ejtag_setup(void)
+{
+ void *base;
+
+#if defined(CONFIG_CPU_MIPS32_R1)
+ base = cpu_has_veic ?
+ (void *)(CAC_BASE + 0xa00) :
+ (void *)(CAC_BASE + 0x300);
+#elif defined(CONFIG_CPU_MIPS32_R2)
+ base = (void *)0xbfc00480;
+#else
+#error EJTAG exception handler address not defined
+#endif
+}
+
+void __init prom_init(void)
+{
+ prom_argc = fw_arg0;
+ _prom_argv = (int *) fw_arg1;
+ _prom_envp = (int *) fw_arg2;
+ _prom_memsize = (unsigned long) fw_arg3;
+
+ board_nmi_handler_setup = mips_nmi_setup;
+ board_ejtag_handler_setup = mips_ejtag_setup;
+
+ pr_info("\nLINUX started...\n");
+ prom_init_cmdline();
+ configure_platform();
+ prom_meminit();
+
+#ifndef CONFIG_BOOTLOADER_DRIVER
+ pr_info("\nBootloader driver isn't loaded...\n");
+#endif
+}
diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h
new file mode 100644
index 00000000000..7af6bf25008
--- /dev/null
+++ b/arch/mips/powertv/init.h
@@ -0,0 +1,28 @@
+/*
+ * Definitions from powertv init.c file
+ *
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: David VomLehn
+ */
+
+#ifndef _POWERTV_INIT_H
+#define _POWERTV_INIT_H
+extern int prom_argc;
+extern int *_prom_argv;
+extern unsigned long _prom_memsize;
+#endif
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
new file mode 100644
index 00000000000..28d06605fff
--- /dev/null
+++ b/arch/mips/powertv/memory.c
@@ -0,0 +1,186 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Apparently originally from arch/mips/malta-memory.c. Modified to work
+ * with the PowerTV bootloader.
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/pfn.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+
+#include <asm/mips-boards/prom.h>
+
+#include "init.h"
+
+/* Memory constants */
+#define KIBIBYTE(n) ((n) * 1024) /* Number of kibibytes */
+#define MEBIBYTE(n) ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
+#define DEFAULT_MEMSIZE MEBIBYTE(256) /* If no memsize provided */
+#define LOW_MEM_MAX MEBIBYTE(252) /* Max usable low mem */
+#define RES_BOOTLDR_MEMSIZE MEBIBYTE(1) /* Memory reserved for bldr */
+#define BOOT_MEM_SIZE KIBIBYTE(256) /* Memory reserved for bldr */
+#define PHYS_MEM_START 0x10000000 /* Start of physical memory */
+
+unsigned long ptv_memsize;
+
+char __initdata cmdline[COMMAND_LINE_SIZE];
+
+void __init prom_meminit(void)
+{
+ char *memsize_str;
+ unsigned long memsize = 0;
+ unsigned int physend;
+ char *ptr;
+ int low_mem;
+ int high_mem;
+
+ /* Check the command line first for a memsize directive */
+ strcpy(cmdline, arcs_cmdline);
+ ptr = strstr(cmdline, "memsize=");
+ if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
+ ptr = strstr(ptr, " memsize=");
+
+ if (ptr) {
+ memsize = memparse(ptr + 8, &ptr);
+ } else {
+ /* otherwise look in the environment */
+ memsize_str = prom_getenv("memsize");
+
+ if (memsize_str != NULL) {
+ pr_info("prom memsize = %s\n", memsize_str);
+ memsize = simple_strtol(memsize_str, NULL, 0);
+ }
+
+ if (memsize == 0) {
+ if (_prom_memsize != 0) {
+ memsize = _prom_memsize;
+ pr_info("_prom_memsize = 0x%lx\n", memsize);
+ /* add in memory that the bootloader doesn't
+ * report */
+ memsize += BOOT_MEM_SIZE;
+ } else {
+ memsize = DEFAULT_MEMSIZE;
+ pr_info("Memsize not passed by bootloader, "
+ "defaulting to 0x%lx\n", memsize);
+ }
+ }
+ }
+
+ /* Store memsize for diagnostic purposes */
+ ptv_memsize = memsize;
+
+ physend = PFN_ALIGN(&_end) - 0x80000000;
+ if (memsize > LOW_MEM_MAX) {
+ low_mem = LOW_MEM_MAX;
+ high_mem = memsize - low_mem;
+ } else {
+ low_mem = memsize;
+ high_mem = 0;
+ }
+
+/*
+ * TODO: We will use the hard code for memory configuration until
+ * the bootloader releases their device tree to us.
+ */
+ /*
+ * Add the memory reserved for use by the bootloader to the
+ * memory map.
+ */
+ add_memory_region(PHYS_MEM_START, RES_BOOTLDR_MEMSIZE,
+ BOOT_MEM_RESERVED);
+#ifdef CONFIG_HIGHMEM_256_128
+ /*
+ * Add memory in low for general use by the kernel and its friends
+ * (like drivers, applications, etc).
+ */
+ add_memory_region(PHYS_MEM_START + RES_BOOTLDR_MEMSIZE,
+ LOW_MEM_MAX - RES_BOOTLDR_MEMSIZE, BOOT_MEM_RAM);
+ /*
+ * Add the memory reserved for reset vector.
+ */
+ add_memory_region(0x1fc00000, MEBIBYTE(4), BOOT_MEM_RESERVED);
+ /*
+ * Add the memory reserved.
+ */
+ add_memory_region(0x20000000, MEBIBYTE(1024 + 75), BOOT_MEM_RESERVED);
+ /*
+ * Add memory in high for general use by the kernel and its friends
+ * (like drivers, applications, etc).
+ *
+ * 75MB is reserved for devices which are using the memory in high.
+ */
+ add_memory_region(0x60000000 + MEBIBYTE(75), MEBIBYTE(128 - 75),
+ BOOT_MEM_RAM);
+#elif defined CONFIG_HIGHMEM_128_128
+ /*
+ * Add memory in low for general use by the kernel and its friends
+ * (like drivers, applications, etc).
+ */
+ add_memory_region(PHYS_MEM_START + RES_BOOTLDR_MEMSIZE,
+ MEBIBYTE(128) - RES_BOOTLDR_MEMSIZE, BOOT_MEM_RAM);
+ /*
+ * Add the memory reserved.
+ */
+ add_memory_region(PHYS_MEM_START + MEBIBYTE(128),
+ MEBIBYTE(128 + 1024 + 75), BOOT_MEM_RESERVED);
+ /*
+ * Add memory in high for general use by the kernel and its friends
+ * (like drivers, applications, etc).
+ *
+ * 75MB is reserved for devices which are using the memory in high.
+ */
+ add_memory_region(0x60000000 + MEBIBYTE(75), MEBIBYTE(128 - 75),
+ BOOT_MEM_RAM);
+#else
+ /* Add low memory regions for either:
+ * - no-highmemory configuration case -OR-
+ * - highmemory "HIGHMEM_LOWBANK_ONLY" case
+ */
+ /*
+ * Add memory for general use by the kernel and its friends
+ * (like drivers, applications, etc).
+ */
+ add_memory_region(PHYS_MEM_START + RES_BOOTLDR_MEMSIZE,
+ low_mem - RES_BOOTLDR_MEMSIZE, BOOT_MEM_RAM);
+ /*
+ * Add the memory reserved for reset vector.
+ */
+ add_memory_region(0x1fc00000, MEBIBYTE(4), BOOT_MEM_RESERVED);
+#endif
+}
+
+void __init prom_free_prom_memory(void)
+{
+ unsigned long addr;
+ int i;
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
+ continue;
+
+ addr = boot_mem_map.map[i].addr;
+ free_init_pages("prom memory",
+ addr, addr + boot_mem_map.map[i].size);
+ }
+}
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
new file mode 100644
index 00000000000..f5c62462fc9
--- /dev/null
+++ b/arch/mips/powertv/pci/Makefile
@@ -0,0 +1,21 @@
+#
+# Copyright (C) 2009 Scientific-Atlanta, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+obj-$(CONFIG_PCI) += fixup-powertv.o
+
+EXTRA_CFLAGS += -Wall -Werror
diff --git a/arch/mips/powertv/pci/fixup-powertv.c b/arch/mips/powertv/pci/fixup-powertv.c
new file mode 100644
index 00000000000..726bc2e824b
--- /dev/null
+++ b/arch/mips/powertv/pci/fixup-powertv.c
@@ -0,0 +1,36 @@
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/mach-powertv/interrupts.h>
+#include "powertv-pci.h"
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return asic_pcie_map_irq(dev, slot, pin);
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/*
+ * asic_pcie_map_irq
+ *
+ * Parameters:
+ * *dev - pointer to a pci_dev structure (not used)
+ * slot - slot number (not used)
+ * pin - pin number (not used)
+ *
+ * Return Value:
+ * Returns: IRQ number (always the PCI Express IRQ number)
+ *
+ * Description:
+ * asic_pcie_map_irq will return the IRQ number of the PCI Express interrupt.
+ *
+ */
+int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return irq_pciexp;
+}
+EXPORT_SYMBOL(asic_pcie_map_irq);
diff --git a/arch/mips/powertv/pci/powertv-pci.h b/arch/mips/powertv/pci/powertv-pci.h
new file mode 100644
index 00000000000..1b5886bbd75
--- /dev/null
+++ b/arch/mips/powertv/pci/powertv-pci.h
@@ -0,0 +1,31 @@
+/*
+ * powertv-pci.c
+ *
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+/*
+ * Local definitions for the powertv PCI code
+ */
+
+#ifndef _POWERTV_PCI_POWERTV_PCI_H_
+#define _POWERTV_PCI_POWERTV_PCI_H_
+extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+extern int asic_pcie_init(void);
+extern int asic_pcie_init(void);
+
+extern int log_level;
+#endif
diff --git a/arch/mips/powertv/powertv-clock.h b/arch/mips/powertv/powertv-clock.h
new file mode 100644
index 00000000000..d94c5431148
--- /dev/null
+++ b/arch/mips/powertv/powertv-clock.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: David VomLehn
+ */
+
+#ifndef _POWERTV_POWERTV_CLOCK_H
+#define _POWERTV_POWERTV_CLOCK_H
+extern int powertv_clockevent_init(void);
+extern void powertv_clocksource_init(void);
+extern unsigned int mips_get_pll_freq(void);
+#endif
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
new file mode 100644
index 00000000000..bd8ebf128f2
--- /dev/null
+++ b/arch/mips/powertv/powertv_setup.c
@@ -0,0 +1,351 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/screen_info.h>
+#include <linux/notifier.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/ctype.h>
+
+#include <linux/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/prom.h>
+#include <asm/dma.h>
+#include <linux/time.h>
+#include <asm/traps.h>
+#include <asm/asm-offsets.h>
+#include "reset.h"
+
+#define VAL(n) STR(n)
+
+/*
+ * Macros for loading addresses and storing registers:
+ * PTR_LA Load the address into a register
+ * LONG_S Store the full width of the given register.
+ * LONG_L Load the full width of the given register
+ * PTR_ADDIU Add a constant value to a register used as a pointer
+ * REG_SIZE Number of 8-bit bytes in a full width register
+ */
+#ifdef CONFIG_64BIT
+#warning TODO: 64-bit code needs to be verified
+#define PTR_LA "dla "
+#define LONG_S "sd "
+#define LONG_L "ld "
+#define PTR_ADDIU "daddiu "
+#define REG_SIZE "8" /* In bytes */
+#endif
+
+#ifdef CONFIG_32BIT
+#define PTR_LA "la "
+#define LONG_S "sw "
+#define LONG_L "lw "
+#define PTR_ADDIU "addiu "
+#define REG_SIZE "4" /* In bytes */
+#endif
+
+static struct pt_regs die_regs;
+static bool have_die_regs;
+
+static void register_panic_notifier(void);
+static int panic_handler(struct notifier_block *notifier_block,
+ unsigned long event, void *cause_string);
+
+const char *get_system_type(void)
+{
+ return "PowerTV";
+}
+
+void __init plat_mem_setup(void)
+{
+ panic_on_oops = 1;
+ register_panic_notifier();
+
+#if 0
+ mips_pcibios_init();
+#endif
+ mips_reboot_setup();
+}
+
+/*
+ * Install a panic notifier for platform-specific diagnostics
+ */
+static void register_panic_notifier()
+{
+ static struct notifier_block panic_notifier = {
+ .notifier_call = panic_handler,
+ .next = NULL,
+ .priority = INT_MAX
+ };
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
+}
+
+static int panic_handler(struct notifier_block *notifier_block,
+ unsigned long event, void *cause_string)
+{
+ struct pt_regs my_regs;
+
+ /* Save all of the registers */
+ {
+ unsigned long at, v0, v1; /* Must be on the stack */
+
+ /* Start by saving $at and v0 on the stack. We use $at
+ * ourselves, but it looks like the compiler may use v0 or v1
+ * to load the address of the pt_regs structure. We'll come
+ * back later to store the registers in the pt_regs
+ * structure. */
+ __asm__ __volatile__ (
+ ".set noat\n"
+ LONG_S "$at, %[at]\n"
+ LONG_S "$2, %[v0]\n"
+ LONG_S "$3, %[v1]\n"
+ :
+ [at] "=m" (at),
+ [v0] "=m" (v0),
+ [v1] "=m" (v1)
+ :
+ : "at"
+ );
+
+ __asm__ __volatile__ (
+ ".set noat\n"
+ "move $at, %[pt_regs]\n"
+
+ /* Argument registers */
+ LONG_S "$4, " VAL(PT_R4) "($at)\n"
+ LONG_S "$5, " VAL(PT_R5) "($at)\n"
+ LONG_S "$6, " VAL(PT_R6) "($at)\n"
+ LONG_S "$7, " VAL(PT_R7) "($at)\n"
+
+ /* Temporary regs */
+ LONG_S "$8, " VAL(PT_R8) "($at)\n"
+ LONG_S "$9, " VAL(PT_R9) "($at)\n"
+ LONG_S "$10, " VAL(PT_R10) "($at)\n"
+ LONG_S "$11, " VAL(PT_R11) "($at)\n"
+ LONG_S "$12, " VAL(PT_R12) "($at)\n"
+ LONG_S "$13, " VAL(PT_R13) "($at)\n"
+ LONG_S "$14, " VAL(PT_R14) "($at)\n"
+ LONG_S "$15, " VAL(PT_R15) "($at)\n"
+
+ /* "Saved" registers */
+ LONG_S "$16, " VAL(PT_R16) "($at)\n"
+ LONG_S "$17, " VAL(PT_R17) "($at)\n"
+ LONG_S "$18, " VAL(PT_R18) "($at)\n"
+ LONG_S "$19, " VAL(PT_R19) "($at)\n"
+ LONG_S "$20, " VAL(PT_R20) "($at)\n"
+ LONG_S "$21, " VAL(PT_R21) "($at)\n"
+ LONG_S "$22, " VAL(PT_R22) "($at)\n"
+ LONG_S "$23, " VAL(PT_R23) "($at)\n"
+
+ /* Add'l temp regs */
+ LONG_S "$24, " VAL(PT_R24) "($at)\n"
+ LONG_S "$25, " VAL(PT_R25) "($at)\n"
+
+ /* Kernel temp regs */
+ LONG_S "$26, " VAL(PT_R26) "($at)\n"
+ LONG_S "$27, " VAL(PT_R27) "($at)\n"
+
+ /* Global pointer, stack pointer, frame pointer and
+ * return address */
+ LONG_S "$gp, " VAL(PT_R28) "($at)\n"
+ LONG_S "$sp, " VAL(PT_R29) "($at)\n"
+ LONG_S "$fp, " VAL(PT_R30) "($at)\n"
+ LONG_S "$ra, " VAL(PT_R31) "($at)\n"
+
+ /* Now we can get the $at and v0 registers back and
+ * store them */
+ LONG_L "$8, %[at]\n"
+ LONG_S "$8, " VAL(PT_R1) "($at)\n"
+ LONG_L "$8, %[v0]\n"
+ LONG_S "$8, " VAL(PT_R2) "($at)\n"
+ LONG_L "$8, %[v1]\n"
+ LONG_S "$8, " VAL(PT_R3) "($at)\n"
+ :
+ :
+ [at] "m" (at),
+ [v0] "m" (v0),
+ [v1] "m" (v1),
+ [pt_regs] "r" (&my_regs)
+ : "at", "t0"
+ );
+
+ /* Set the current EPC value to be the current location in this
+ * function */
+ __asm__ __volatile__ (
+ ".set noat\n"
+ "1:\n"
+ PTR_LA "$at, 1b\n"
+ LONG_S "$at, %[cp0_epc]\n"
+ :
+ [cp0_epc] "=m" (my_regs.cp0_epc)
+ :
+ : "at"
+ );
+
+ my_regs.cp0_cause = read_c0_cause();
+ my_regs.cp0_status = read_c0_status();
+ }
+
+#ifdef CONFIG_DIAGNOSTICS
+ failure_report((char *) cause_string,
+ have_die_regs ? &die_regs : &my_regs);
+ have_die_regs = false;
+#else
+ pr_crit("I'm feeling a bit sleepy. hmmmmm... perhaps a nap would... "
+ "zzzz... \n");
+#endif
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * Platform-specific handling of oops
+ * @str: Pointer to the oops string
+ * @regs: Pointer to the oops registers
+ * All we do here is to save the registers for subsequent printing through
+ * the panic notifier.
+ */
+void platform_die(const char *str, const struct pt_regs *regs)
+{
+ /* If we already have saved registers, don't overwrite them as they
+ * they apply to the initial fault */
+
+ if (!have_die_regs) {
+ have_die_regs = true;
+ die_regs = *regs;
+ }
+}
+
+/* Information about the RF MAC address, if one was supplied on the
+ * command line. */
+static bool have_rfmac;
+static u8 rfmac[ETH_ALEN];
+
+static int rfmac_param(char *p)
+{
+ u8 *q;
+ bool is_high_nibble;
+ int c;
+
+ /* Skip a leading "0x", if present */
+ if (*p == '0' && *(p+1) == 'x')
+ p += 2;
+
+ q = rfmac;
+ is_high_nibble = true;
+
+ for (c = (unsigned char) *p++;
+ isxdigit(c) && q - rfmac < ETH_ALEN;
+ c = (unsigned char) *p++) {
+ int nibble;
+
+ nibble = (isdigit(c) ? (c - '0') :
+ (isupper(c) ? c - 'A' + 10 : c - 'a' + 10));
+
+ if (is_high_nibble)
+ *q = nibble << 4;
+ else
+ *q++ |= nibble;
+
+ is_high_nibble = !is_high_nibble;
+ }
+
+ /* If we parsed all the way to the end of the parameter value and
+ * parsed all ETH_ALEN bytes, we have a usable RF MAC address */
+ have_rfmac = (c == '\0' && q - rfmac == ETH_ALEN);
+
+ return 0;
+}
+
+early_param("rfmac", rfmac_param);
+
+/*
+ * Generate an Ethernet MAC address that has a good chance of being unique.
+ * @addr: Pointer to six-byte array containing the Ethernet address
+ * Generates an Ethernet MAC address that is highly likely to be unique for
+ * this particular system on a network with other systems of the same type.
+ *
+ * The problem we are solving is that, when random_ether_addr() is used to
+ * generate MAC addresses at startup, there isn't much entropy for the random
+ * number generator to use and the addresses it produces are fairly likely to
+ * be the same as those of other identical systems on the same local network.
+ * This is true even for relatively small numbers of systems (for the reason
+ * why, see the Wikipedia entry for "Birthday problem" at:
+ * http://en.wikipedia.org/wiki/Birthday_problem
+ *
+ * The good news is that we already have a MAC address known to be unique, the
+ * RF MAC address. The bad news is that this address is already in use on the
+ * RF interface. Worse, the obvious trick, taking the RF MAC address and
+ * turning on the locally managed bit, has already been used for other devices.
+ * Still, this does give us something to work with.
+ *
+ * The approach we take is:
+ * 1. If we can't get the RF MAC Address, just call random_ether_addr.
+ * 2. Use the 24-bit NIC-specific bits of the RF MAC address as the last 24
+ * bits of the new address. This is very likely to be unique, except for
+ * the current box.
+ * 3. To avoid using addresses already on the current box, we set the top
+ * six bits of the address with a value different from any currently
+ * registered Scientific Atlanta organizationally unique identifyer
+ * (OUI). This avoids duplication with any addresses on the system that
+ * were generated from valid Scientific Atlanta-registered address by
+ * simply flipping the locally managed bit.
+ * 4. We aren't generating a multicast address, so we leave the multicast
+ * bit off. Since we aren't using a registered address, we have to set
+ * the locally managed bit.
+ * 5. We then randomly generate the remaining 16-bits. This does two
+ * things:
+ * a. It allows us to call this function for more than one device
+ * in this system
+ * b. It ensures that things will probably still work even if
+ * some device on the device network has a locally managed
+ * address that matches the top six bits from step 2.
+ */
+void platform_random_ether_addr(u8 addr[ETH_ALEN])
+{
+ const int num_random_bytes = 2;
+ const unsigned char non_sciatl_oui_bits = 0xc0u;
+ const unsigned char mac_addr_locally_managed = (1 << 1);
+
+ if (!have_rfmac) {
+ pr_warning("rfmac not available on command line; "
+ "generating random MAC address\n");
+ random_ether_addr(addr);
+ }
+
+ else {
+ int i;
+
+ /* Set the first byte to something that won't match a Scientific
+ * Atlanta OUI, is locally managed, and isn't a multicast
+ * address */
+ addr[0] = non_sciatl_oui_bits | mac_addr_locally_managed;
+
+ /* Get some bytes of random address information */
+ get_random_bytes(&addr[1], num_random_bytes);
+
+ /* Copy over the NIC-specific bits of the RF MAC address */
+ for (i = 1 + num_random_bytes; i < ETH_ALEN; i++)
+ addr[i] = rfmac[i];
+ }
+}
diff --git a/arch/mips/powertv/reset.c b/arch/mips/powertv/reset.c
new file mode 100644
index 00000000000..494c652c984
--- /dev/null
+++ b/arch/mips/powertv/reset.c
@@ -0,0 +1,65 @@
+/*
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ */
+#include <linux/pm.h>
+
+#include <linux/io.h>
+#include <asm/reboot.h> /* Not included by linux/reboot.h */
+
+#ifdef CONFIG_BOOTLOADER_DRIVER
+#include <asm/mach-powertv/kbldr.h>
+#endif
+
+#include <asm/mach-powertv/asic_regs.h>
+#include "reset.h"
+
+static void mips_machine_restart(char *command);
+static void mips_machine_halt(void);
+
+static void mips_machine_restart(char *command)
+{
+#ifdef CONFIG_BOOTLOADER_DRIVER
+ /*
+ * Call the bootloader's reset function to ensure
+ * that persistent data is flushed before hard reset
+ */
+ kbldr_SetCauseAndReset();
+#else
+ writel(0x1, asic_reg_addr(watchdog));
+#endif
+}
+
+static void mips_machine_halt(void)
+{
+#ifdef CONFIG_BOOTLOADER_DRIVER
+ /*
+ * Call the bootloader's reset function to ensure
+ * that persistent data is flushed before hard reset
+ */
+ kbldr_SetCauseAndReset();
+#else
+ writel(0x1, asic_reg_addr(watchdog));
+#endif
+}
+
+void mips_reboot_setup(void)
+{
+ _machine_restart = mips_machine_restart;
+ _machine_halt = mips_machine_halt;
+ pm_power_off = mips_machine_halt;
+}
diff --git a/arch/mips/powertv/reset.h b/arch/mips/powertv/reset.h
new file mode 100644
index 00000000000..888fd09e262
--- /dev/null
+++ b/arch/mips/powertv/reset.h
@@ -0,0 +1,26 @@
+/*
+ * Definitions from powertv reset.c file
+ *
+ * Copyright (C) 2009 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: David VomLehn
+ */
+
+#ifndef _POWERTV_POWERTV_RESET_H
+#define _POWERTV_POWERTV_RESET_H
+extern void mips_reboot_setup(void);
+#endif
diff --git a/arch/mips/mipssim/sim_cmdline.c b/arch/mips/powertv/time.c
index 74240e1ce5a..1e0a5ef4c8c 100644
--- a/arch/mips/mipssim/sim_cmdline.c
+++ b/arch/mips/powertv/time.c
@@ -1,5 +1,7 @@
/*
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
@@ -14,19 +16,22 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
+ * Setting up the clock on the MIPS boards.
*/
+
#include <linux/init.h>
-#include <linux/string.h>
-#include <asm/bootinfo.h>
+#include <asm/mach-powertv/interrupts.h>
+#include <asm/time.h>
-extern char arcs_cmdline[];
+#include "powertv-clock.h"
-char * __init prom_getcmdline(void)
+unsigned int __cpuinit get_c0_compare_int(void)
{
- return arcs_cmdline;
+ return irq_mips_timer;
}
-void __init prom_init_cmdline(void)
+void __init plat_time_init(void)
{
- /* XXX: Get boot line from environment? */
+ powertv_clocksource_init();
+ r4k_clockevent_init();
}
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index ad5bd109797..d7c26d00cfe 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -69,7 +69,7 @@ static inline unsigned long tag2ul(char *arg, const char *tag)
void __init prom_setup_cmdline(void)
{
- static char cmd_line[CL_SIZE] __initdata;
+ static char cmd_line[COMMAND_LINE_SIZE] __initdata;
char *cp, *board;
int prom_argc;
char **prom_argv, **prom_envp;
@@ -115,7 +115,7 @@ void __init prom_setup_cmdline(void)
strcpy(cp, arcs_cmdline);
cp += strlen(arcs_cmdline);
}
- cmd_line[CL_SIZE-1] = '\0';
+ cmd_line[COMMAND_LINE_SIZE - 1] = '\0';
strcpy(arcs_cmdline, cmd_line);
}
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index 1617241d273..da44ccb2082 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -50,9 +50,9 @@
static char __init *decode_eisa_sig(unsigned long addr)
{
- static char sig_str[EISA_SIG_LEN];
+ static char sig_str[EISA_SIG_LEN] __initdata;
u8 sig[4];
- u16 rev;
+ u16 rev;
int i;
for (i = 0; i < 4; i++) {
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index 0ecd5fe9486..383f11d7f44 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
+#include <linux/ftrace.h>
#include <asm/irq_cpu.h>
#include <asm/sgi/hpc3.h>
@@ -150,7 +151,7 @@ static void indy_local1_irqdispatch(void)
extern void ip22_be_interrupt(int irq);
-static void indy_buserror_irq(void)
+static void __irq_entry indy_buserror_irq(void)
{
int irq = SGI_BUSERR_IRQ;
diff --git a/arch/mips/sgi-ip22/ip22-setup.c b/arch/mips/sgi-ip22/ip22-setup.c
index b9a931358e2..5deeb68b6c9 100644
--- a/arch/mips/sgi-ip22/ip22-setup.c
+++ b/arch/mips/sgi-ip22/ip22-setup.c
@@ -67,7 +67,7 @@ void __init plat_mem_setup(void)
cserial = ArcGetEnvironmentVariable("ConsoleOut");
if ((ctype && *ctype == 'd') || (cserial && *cserial == 's')) {
- static char options[8];
+ static char options[8] __initdata;
char *baud = ArcGetEnvironmentVariable("dbaud");
if (baud)
strcpy(options, baud);
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index c8f7d2328b2..603fc91c103 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/time.h>
+#include <linux/ftrace.h>
#include <asm/cpu.h>
#include <asm/mipsregs.h>
@@ -115,7 +116,7 @@ __init void plat_time_init(void)
}
/* Generic SGI handler for (spurious) 8254 interrupts */
-void indy_8254timer_irq(void)
+void __irq_entry indy_8254timer_irq(void)
{
int irq = SGI_8254_0_IRQ;
ULONG cnt;
diff --git a/arch/mips/sgi-ip32/ip32-setup.c b/arch/mips/sgi-ip32/ip32-setup.c
index c5a5d4a31b4..3abd1465ec0 100644
--- a/arch/mips/sgi-ip32/ip32-setup.c
+++ b/arch/mips/sgi-ip32/ip32-setup.c
@@ -90,7 +90,7 @@ void __init plat_mem_setup(void)
{
char* con = ArcGetEnvironmentVariable("console");
if (con && *con == 'd') {
- static char options[8];
+ static char options[8] __initdata;
char *baud = ArcGetEnvironmentVariable("dbaud");
if (baud)
strcpy(options, baud);
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index eb5396cf81b..6343011e990 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -287,7 +287,7 @@ void __init prom_init(void)
* boot console
*/
cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE);
- if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, CL_SIZE) < 0) {
+ if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, COMMAND_LINE_SIZE) < 0) {
if (argc >= 0) {
/* The loader should have set the command line */
/* too early for panic to do any good */
@@ -318,7 +318,7 @@ void __init prom_init(void)
#endif /* CONFIG_BLK_DEV_INITRD */
/* Not sure this is needed, but it's the safe way. */
- arcs_cmdline[CL_SIZE-1] = 0;
+ arcs_cmdline[COMMAND_LINE_SIZE-1] = 0;
prom_meminit();
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index 7dd76fb3b64..e6980892834 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -188,7 +188,7 @@ static void end_a20r_irq(unsigned int irq)
}
static struct irq_chip a20r_irq_type = {
- .typename = "A20R",
+ .name = "A20R",
.ack = mask_a20r_irq,
.mask = mask_a20r_irq,
.mask_ack = mask_a20r_irq,
diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c
index 74e6c67982f..51e62bbaa23 100644
--- a/arch/mips/sni/pcimt.c
+++ b/arch/mips/sni/pcimt.c
@@ -214,7 +214,7 @@ static void end_pcimt_irq(unsigned int irq)
}
static struct irq_chip pcimt_irq_type = {
- .typename = "PCIMT",
+ .name = "PCIMT",
.ack = disable_pcimt_irq,
.mask = disable_pcimt_irq,
.mask_ack = disable_pcimt_irq,
diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c
index 071a9573ac7..f4699d35858 100644
--- a/arch/mips/sni/pcit.c
+++ b/arch/mips/sni/pcit.c
@@ -176,7 +176,7 @@ void end_pcit_irq(unsigned int irq)
}
static struct irq_chip pcit_irq_type = {
- .typename = "PCIT",
+ .name = "PCIT",
.ack = disable_pcit_irq,
.mask = disable_pcit_irq,
.mask_ack = disable_pcit_irq,
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index 5e687819cbc..46f00691f44 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -449,7 +449,7 @@ void end_rm200_irq(unsigned int irq)
}
static struct irq_chip rm200_irq_type = {
- .typename = "RM200",
+ .name = "RM200",
.ack = disable_rm200_irq,
.mask = disable_rm200_irq,
.mask_ack = disable_rm200_irq,
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index a49272ce7ef..d16b462154c 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -60,7 +60,7 @@ static void __init sni_console_setup(void)
char *cdev;
char *baud;
int port;
- static char options[8];
+ static char options[8] __initdata;
cdev = prom_getenv("console_dev");
if (strncmp(cdev, "tty", 3) == 0) {
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index d66802edebb..06e801c7e25 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -160,7 +160,7 @@ static void __init prom_init_cmdline(void)
int argc;
int *argv32;
int i; /* Always ignore the "-c" at argv[0] */
- static char builtin[CL_SIZE] __initdata;
+ static char builtin[COMMAND_LINE_SIZE] __initdata;
if (fw_arg0 >= CKSEG0 || fw_arg1 < CKSEG0) {
/*
@@ -315,7 +315,7 @@ static inline void txx9_cache_fixup(void)
static void __init preprocess_cmdline(void)
{
- static char cmdline[CL_SIZE] __initdata;
+ static char cmdline[COMMAND_LINE_SIZE] __initdata;
char *s;
strcpy(cmdline, arcs_cmdline);
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 6d39e222b17..6153b6a05cc 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MPIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MAIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask)
if (current_cpu_type() == CPU_VR4111 ||
current_cpu_type() == CPU_VR4121) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MKIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask)
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_macint);
@@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask)
struct irq_desc *desc = irq_desc + ETHERNET_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MMACINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_macint);
@@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask)
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_set(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_dsiuint);
@@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask)
struct irq_desc *desc = irq_desc + DSIU_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu1_clear(MDSIUINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_dsiuint);
@@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask)
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_enable_firint);
@@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask)
struct irq_desc *desc = irq_desc + FIR_IRQ;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MFIRINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(vr41xx_disable_firint);
@@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, PCIINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MPCIINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, SCUINT0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MSCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_set(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_clear(MCSIINTREG, mask);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, BCUINTR);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void)
if (current_cpu_type() == CPU_VR4122 ||
current_cpu_type() == CPU_VR4131 ||
current_cpu_type() == CPU_VR4133) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
icu2_write(MBCUINTREG, 0);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
pin = SYSINT1_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
intassign0 = icu1_read(INTASSIGN0);
intassign1 = icu1_read(INTASSIGN1);
@@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
intassign1 |= (uint16_t)assign << 9;
break;
default:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
icu1_write(INTASSIGN0, intassign0);
icu1_write(INTASSIGN1, intassign1);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return 0;
}
@@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
pin = SYSINT2_IRQ_TO_PIN(irq);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
intassign2 = icu1_read(INTASSIGN2);
intassign3 = icu1_read(INTASSIGN3);
@@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
intassign3 |= (uint16_t)assign << 12;
break;
default:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return -EINVAL;
}
@@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign)
icu1_write(INTASSIGN2, intassign2);
icu1_write(INTASSIGN3, intassign3);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
return 0;
}
diff --git a/arch/mn10300/include/asm/asm-offsets.h b/arch/mn10300/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/mn10300/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h
index 75a70aa9fd6..e5fa97cd9a1 100644
--- a/arch/mn10300/include/asm/elf.h
+++ b/arch/mn10300/include/asm/elf.h
@@ -77,7 +77,6 @@ do { \
_ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \
} while (0)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h
index 8eebf89f5ab..db5c53da73c 100644
--- a/arch/mn10300/include/asm/mman.h
+++ b/arch/mn10300/include/asm/mman.h
@@ -1 +1,6 @@
#include <asm-generic/mman.h>
+
+#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */
+
+#define arch_mmap_check(addr, len, flags) \
+ (((flags) & MAP_FIXED && (addr) < MIN_MAP_ADDR) ? -EINVAL : 0)
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index a94e7ea3faa..c9ee6c009d7 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -578,7 +578,7 @@ ENTRY(sys_call_table)
.long sys_ni_syscall /* reserved for streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap2
+ .long sys_mmap_pgoff
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 4c3c58ef5cd..e2d5ed891f3 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v)
/* display information rows, one per active CPU */
case 1 ... NR_IRQS - 1:
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (action) {
@@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
break;
/* polish off with NMI and error counters */
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index dacafab00eb..67e6389d625 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
#define KPROBE_HIT_ACTIVE 0x00000001
#define KPROBE_HIT_SS 0x00000002
-static struct kprobe *current_kprobe;
-static unsigned long current_kprobe_orig_pc;
-static unsigned long current_kprobe_next_pc;
-static int current_kprobe_ss_flags;
+static struct kprobe *cur_kprobe;
+static unsigned long cur_kprobe_orig_pc;
+static unsigned long cur_kprobe_next_pc;
+static int cur_kprobe_ss_flags;
static unsigned long kprobe_status;
-static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2];
-static unsigned long current_kprobe_bp_addr;
+static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
+static unsigned long cur_kprobe_bp_addr;
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
unsigned long nextpc;
- current_kprobe_orig_pc = regs->pc;
- memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
- regs->pc = (unsigned long) current_kprobe_ss_buf;
+ cur_kprobe_orig_pc = regs->pc;
+ memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
+ regs->pc = (unsigned long) cur_kprobe_ss_buf;
- nextpc = find_nextpc(regs, &current_kprobe_ss_flags);
- if (current_kprobe_ss_flags & SINGLESTEP_PCREL)
- current_kprobe_next_pc =
- current_kprobe_orig_pc + (nextpc - regs->pc);
+ nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
+ if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
+ cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
else
- current_kprobe_next_pc = nextpc;
+ cur_kprobe_next_pc = nextpc;
/* branching instructions need special handling */
- if (current_kprobe_ss_flags & SINGLESTEP_BRANCH)
+ if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
nextpc = singlestep_branch_setup(regs);
- current_kprobe_bp_addr = nextpc;
+ cur_kprobe_bp_addr = nextpc;
*(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
- mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf,
- sizeof(current_kprobe_ss_buf));
+ mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
+ sizeof(cur_kprobe_ss_buf));
mn10300_icache_inv();
}
@@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
disarm_kprobe(p, regs);
ret = 1;
} else {
- p = current_kprobe;
+ p = cur_kprobe;
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
}
@@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
}
kprobe_status = KPROBE_HIT_ACTIVE;
- current_kprobe = p;
+ cur_kprobe = p;
if (p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
return 1;
@@ -491,8 +490,8 @@ no_kprobe:
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
{
/* we may need to fixup regs/stack after singlestepping a call insn */
- if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) {
- regs->pc = current_kprobe_orig_pc;
+ if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
+ regs->pc = cur_kprobe_orig_pc;
switch (p->ainsn.insn[0]) {
case 0xcd: /* CALL (d16,PC) */
*(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
@@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
}
}
- regs->pc = current_kprobe_next_pc;
- current_kprobe_bp_addr = 0;
+ regs->pc = cur_kprobe_next_pc;
+ cur_kprobe_bp_addr = 0;
}
static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
@@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
if (!kprobe_running())
return 0;
- if (current_kprobe->post_handler)
- current_kprobe->post_handler(current_kprobe, regs, 0);
+ if (cur_kprobe->post_handler)
+ cur_kprobe->post_handler(cur_kprobe, regs, 0);
- resume_execution(current_kprobe, regs);
+ resume_execution(cur_kprobe, regs);
reset_current_kprobe();
preempt_enable_no_resched();
return 1;
@@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
static inline
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
- if (current_kprobe->fault_handler &&
- current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ if (cur_kprobe->fault_handler &&
+ cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
return 1;
if (kprobe_status & KPROBE_HIT_SS) {
- resume_execution(current_kprobe, regs);
+ resume_execution(cur_kprobe, regs);
reset_current_kprobe();
preempt_enable_no_resched();
}
@@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
switch (val) {
case DIE_BREAKPOINT:
- if (current_kprobe_bp_addr != args->regs->pc) {
+ if (cur_kprobe_bp_addr != args->regs->pc) {
if (kprobe_handler(args->regs))
return NOTIFY_STOP;
} else {
diff --git a/arch/mn10300/kernel/sys_mn10300.c b/arch/mn10300/kernel/sys_mn10300.c
index 8ca5af00334..17cc6ce04e8 100644
--- a/arch/mn10300/kernel/sys_mn10300.c
+++ b/arch/mn10300/kernel/sys_mn10300.c
@@ -23,47 +23,13 @@
#include <asm/uaccess.h>
-#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */
-
-/*
- * memory mapping syscall
- */
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- struct file *file = NULL;
- long error = -EINVAL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- if (flags & MAP_FIXED && addr < MIN_MAP_ADDR)
- goto out;
-
- error = -EBADF;
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
asmlinkage long old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long offset)
{
if (offset & ~PAGE_MASK)
return -EINVAL;
- return sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
}
struct sel_arg_struct {
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 18072e03a01..92343bd35fa 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -445,12 +445,7 @@ done:
int hpux_pipe(int *kstack_fildes)
{
- int error;
-
- lock_kernel();
- error = do_pipe_flags(kstack_fildes, 0);
- unlock_kernel();
- return error;
+ return do_pipe_flags(kstack_fildes, 0);
}
/* lies - says it works, but it really didn't lock anything */
diff --git a/arch/parisc/include/asm/asm-offsets.h b/arch/parisc/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/parisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 8bc9e96699b..716634d1f54 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -27,19 +27,19 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 8cfc553fc83..75e46c557a1 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -32,14 +32,14 @@
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
- for(;;) ; \
+ unreachable(); \
} while(0)
#else
#define BUG() \
do { \
asm volatile(PARISC_BUG_BREAK_ASM : : ); \
- for(;;) ; \
+ unreachable(); \
} while(0)
#endif
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 9c802eb4be8..19f6cb1a4a1 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */
such function. */
#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h
index 1e1c824764e..f357fc693c8 100644
--- a/arch/parisc/include/asm/fcntl.h
+++ b/arch/parisc/include/asm/fcntl.h
@@ -1,14 +1,13 @@
#ifndef _PARISC_FCNTL_H
#define _PARISC_FCNTL_H
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_APPEND 000000010
#define O_BLKSEEK 000000100 /* HPUX only */
#define O_CREAT 000000400 /* not fcntl */
#define O_EXCL 000002000 /* not fcntl */
#define O_LARGEFILE 000004000
-#define O_SYNC 000100000
+#define __O_SYNC 000100000
+#define O_SYNC (__O_SYNC|O_DSYNC)
#define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */
#define O_NOCTTY 000400000 /* not fcntl */
#define O_DSYNC 001000000 /* HPUX only */
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 2fa05dd6aee..72c0fafaa03 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -20,6 +20,20 @@ struct ftrace_ret_stack {
* Defined in entry.S
*/
extern void return_to_handler(void);
+
+
+extern unsigned long return_address(unsigned int);
+
+#define HAVE_ARCH_CALLER_ADDR
+
+#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#define CALLER_ADDR1 return_address(1)
+#define CALLER_ADDR2 return_address(2)
+#define CALLER_ADDR3 return_address(3)
+#define CALLER_ADDR4 return_address(4)
+#define CALLER_ADDR5 return_address(5)
+#define CALLER_ADDR6 return_address(6)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fae03e136fa..74036f436a3 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
}
-#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
-#define __raw_spin_unlock_wait(x) \
- do { cpu_relax(); } while (__raw_spin_is_locked(x))
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
-static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *x)
+static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *x)
+static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
+static __inline__ void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
+static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter--;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
return 0;
/* Wait until we have a realistic chance at the lock */
- while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
+ while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
if (rw->counter != 0) {
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0)
@@ -141,27 +141,27 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags);
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 3f72f47cf4b..8c373aa28a8 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -4,18 +4,18 @@
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
-# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
-# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
-} raw_spinlock_t;
+} arch_spinlock_t;
typedef struct {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
volatile int counter;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index fcd3c707bf1..ec787b411e9 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -244,9 +244,6 @@ int main(void)
DEFINE(THREAD_SZ, sizeof(struct thread_info));
DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
BLANK();
- DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending));
- DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t));
- BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 2e7610cb33d..efbcee5d222 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -145,7 +145,7 @@ static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
#endif
static struct irq_chip cpu_interrupt_type = {
- .typename = "CPU",
+ .name = "CPU",
.startup = cpu_startup_irq,
.shutdown = cpu_disable_irq,
.enable = cpu_enable_irq,
@@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i < NR_IRQS) {
struct irqaction *action;
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -192,7 +192,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
@@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index e8467e4aa8d..fb37ac52e46 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -26,7 +26,6 @@
#include <linux/stddef.h>
#include <linux/compat.h>
#include <linux/elf.h>
-#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <asm/uaccess.h>
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 1fd0f0cec03..3f2fce8ce6b 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -60,8 +60,6 @@ static int smp_debug_lvl = 0;
#define smp_debug(lvl, ...) do { } while(0)
#endif /* DEBUG_SMP */
-DEFINE_SPINLOCK(smp_lock);
-
volatile struct task_struct *smp_init_current_idle_task;
/* track which CPU is booting */
@@ -69,7 +67,7 @@ static volatile int cpu_now_booting __cpuinitdata;
static int parisc_max_cpus __cpuinitdata = 1;
-DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
+static DEFINE_PER_CPU(spinlock_t, ipi_lock);
enum ipi_message_type {
IPI_NOP=0,
@@ -438,6 +436,11 @@ void __init smp_prepare_boot_cpu(void)
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(ipi_lock, cpu));
+
init_cpu_present(cpumask_of(0));
parisc_max_cpus = max_cpus;
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 71b31957c8f..9147391afb0 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -110,37 +110,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
-static unsigned long do_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags, unsigned long fd,
- unsigned long pgoff)
-{
- struct file * file = NULL;
- unsigned long error = -EBADF;
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file != NULL)
- fput(file);
-out:
- return error;
-}
-
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
- return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
@@ -148,7 +125,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long offset)
{
if (!(offset & ~PAGE_MASK)) {
- return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> PAGE_SHIFT);
} else {
return -EINVAL;
}
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 76d23ec8dfa..9779ece2b07 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -26,13 +26,7 @@
#include <linux/shm.h>
#include <linux/slab.h>
#include <linux/uio.h>
-#include <linux/nfs_fs.h>
#include <linux/ncp_fs.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
-#include <linux/nfsd/syscall.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index a36799e8569..d58eac1a828 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
+#include <linux/sort.h>
#include <asm/uaccess.h>
#include <asm/assembly.h>
@@ -115,24 +116,18 @@ unwind_table_init(struct unwind_table *table, const char *name,
}
}
+static int cmp_unwind_table_entry(const void *a, const void *b)
+{
+ return ((const struct unwind_table_entry *)a)->region_start
+ - ((const struct unwind_table_entry *)b)->region_start;
+}
+
static void
unwind_table_sort(struct unwind_table_entry *start,
struct unwind_table_entry *finish)
{
- struct unwind_table_entry el, *p, *q;
-
- for (p = start + 1; p < finish; ++p) {
- if (p[0].region_start < p[-1].region_start) {
- el = *p;
- q = p;
- do {
- q[0] = q[-1];
- --q;
- } while (q > start &&
- el.region_start < q[-1].region_start);
- *q = el;
- }
- }
+ sort(start, finish - start, sizeof(struct unwind_table_entry),
+ cmp_unwind_table_entry, NULL);
}
struct unwind_table *
@@ -417,3 +412,30 @@ int unwind_to_user(struct unwind_frame_info *info)
return ret;
}
+
+unsigned long return_address(unsigned int level)
+{
+ struct unwind_frame_info info;
+ struct pt_regs r;
+ unsigned long sp;
+
+ /* initialize unwind info */
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ memset(&r, 0, sizeof(struct pt_regs));
+ r.iaoq[0] = (unsigned long) current_text_addr();
+ r.gr[2] = (unsigned long) __builtin_return_address(0);
+ r.gr[30] = sp;
+ unwind_frame_init(&info, current, &r);
+
+ /* unwind stack */
+ ++level;
+ do {
+ if (unwind_once(&info) < 0 || info.ip == 0)
+ return 0;
+ if (!__kernel_text_address(info.ip)) {
+ return 0;
+ }
+ } while (info.ip && level--);
+
+ return info.ip;
+}
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index e3eb739fab1..353963d4205 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -12,8 +12,8 @@
#include <asm/atomic.h>
#ifdef CONFIG_SMP
-raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
- [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
};
#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2ba14e77296..ba3948c7007 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -56,6 +56,16 @@ config IRQ_PER_CPU
bool
default y
+config NR_IRQS
+ int "Number of virtual interrupt numbers"
+ range 32 512
+ default "512"
+ help
+ This defines the number of virtual interrupt numbers the kernel
+ can manage. Virtual interrupt numbers are what you see in
+ /proc/interrupts. If you configure your system to have too few,
+ drivers will fail to load or worse - handle with care.
+
config STACKTRACE_SUPPORT
bool
default y
@@ -199,24 +209,14 @@ config DEFAULT_UIMAGE
config REDBOOT
bool
-config HIBERNATE_32
- bool
- depends on (PPC_PMAC && !SMP) || BROKEN
- default y
-
-config HIBERNATE_64
- bool
- depends on BROKEN || (PPC_PMAC64 && EXPERIMENTAL)
- default y
-
config ARCH_HIBERNATION_POSSIBLE
bool
- depends on (PPC64 && HIBERNATE_64) || (PPC32 && HIBERNATE_32)
default y
config ARCH_SUSPEND_POSSIBLE
def_bool y
- depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx
+ depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
+ PPC_85xx || PPC_86xx
config PPC_DCR_NATIVE
bool
@@ -320,6 +320,10 @@ config HOTPLUG_CPU
Say N if you are unsure.
+config ARCH_CPU_PROBE_RELEASE
+ def_bool y
+ depends on HOTPLUG_CPU
+
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
@@ -378,6 +382,19 @@ config IRQ_ALL_CPUS
CPU. Generally saying Y is safe, although some problems have been
reported with SMP Power Macintoshes with this option enabled.
+config SPARSE_IRQ
+ bool "Support sparse irq numbering"
+ default y
+ help
+ This enables support for sparse irqs. This is useful for distro
+ kernels that want to define a high CONFIG_NR_CPUS value but still
+ want to have low kernel memory footprint on smaller machines.
+
+ ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
+ out the irq_desc[] array in a more NUMA-friendly way. )
+
+ If you don't know what to do here, say Y.
+
config NUMA
bool "NUMA support"
depends on PPC64
@@ -652,6 +669,14 @@ config FSL_PCI
select PPC_INDIRECT_PCI
select PCI_QUIRKS
+config FSL_PMC
+ bool
+ default y
+ depends on SUSPEND && (PPC_85xx || PPC_86xx)
+ help
+ Freescale MPC85xx/MPC86xx power management controller support
+ (suspend/resume). For MPC83xx see platforms/83xx/suspend.c
+
config 4xx_SOC
bool
@@ -679,7 +704,7 @@ config PPC_PCI_CHOICE
config PCI
bool "PCI support" if PPC_PCI_CHOICE
default y if !40x && !CPM2 && !8xx && !PPC_83xx \
- && !PPC_85xx && !PPC_86xx
+ && !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx
default PCI_QSPAN if !4xx && !CPM2 && 8xx
select ARCH_SUPPORTS_MSI
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index bf3382f1904..5cdd7ed9a12 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -254,6 +254,14 @@ config PPC_EARLY_DEBUG_CPM
using a CPM-based serial port. This assumes that the bootwrapper
has run, and set up the CPM in a particular way.
+config PPC_EARLY_DEBUG_USBGECKO
+ bool "Early debugging through the USB Gecko adapter"
+ depends on GAMECUBE_COMMON
+ select USBGECKO_UDBG
+ help
+ Select this to enable early debugging for Nintendo GameCube/Wii
+ consoles via an external USB Gecko adapter.
+
endchoice
config PPC_EARLY_DEBUG_44x_PHYSLOW
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 7bfc8ad8779..bb2465bcb32 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -66,7 +66,7 @@ src-wlib := string.S crt0.S crtsavres.S stdio.c main.c \
gunzip_util.c elf_util.c $(zlib) devtree.c oflib.c ofconsole.c \
4xx.c ebony.c mv64x60.c mpsc.c mv64x60_i2c.c cuboot.c bamboo.c \
cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \
- fsl-soc.c mpc8xx.c pq2.c
+ fsl-soc.c mpc8xx.c pq2.c ugecon.c
src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \
cuboot-ebony.c cuboot-hotfoot.c treeboot-ebony.c prpmc2800.c \
ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \
@@ -76,7 +76,8 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c
cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \
cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
- cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c
+ cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
+ gamecube-head.S gamecube.c wii-head.S wii.c
src-boot := $(src-wlib) $(src-plat) empty.c
src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -254,6 +255,8 @@ image-$(CONFIG_KSI8560) += cuImage.ksi8560
image-$(CONFIG_STORCENTER) += cuImage.storcenter
image-$(CONFIG_MPC7448HPC2) += cuImage.mpc7448hpc2
image-$(CONFIG_PPC_C2K) += cuImage.c2k
+image-$(CONFIG_GAMECUBE) += dtbImage.gamecube
+image-$(CONFIG_WII) += dtbImage.wii
# Board port in arch/powerpc/platform/amigaone/Kconfig
image-$(CONFIG_AMIGAONE) += cuImage.amigaone
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index c920170b7df..cd56bb5b347 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -352,6 +352,7 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
@@ -381,6 +382,7 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/eiger.dts b/arch/powerpc/boot/dts/eiger.dts
index c4a934f2e88..48bcf718792 100644
--- a/arch/powerpc/boot/dts/eiger.dts
+++ b/arch/powerpc/boot/dts/eiger.dts
@@ -316,6 +316,7 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
@@ -345,6 +346,7 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
@@ -375,6 +377,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>; /* emac2&3 only */
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII1>;
@@ -403,6 +407,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>; /* emac2&3 only */
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII1>;
diff --git a/arch/powerpc/boot/dts/gamecube.dts b/arch/powerpc/boot/dts/gamecube.dts
new file mode 100644
index 00000000000..ef3be0e58b0
--- /dev/null
+++ b/arch/powerpc/boot/dts/gamecube.dts
@@ -0,0 +1,114 @@
+/*
+ * arch/powerpc/boot/dts/gamecube.dts
+ *
+ * Nintendo GameCube platform device tree source
+ * Copyright (C) 2007-2009 The GameCube Linux Team
+ * Copyright (C) 2007,2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/ {
+ model = "nintendo,gamecube";
+ compatible = "nintendo,gamecube";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen {
+ bootargs = "root=/dev/gcnsda2 rootwait udbg-immortal";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x01800000>;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,gekko@0 {
+ device_type = "cpu";
+ reg = <0>;
+ clock-frequency = <486000000>; /* 486MHz */
+ bus-frequency = <162000000>; /* 162MHz core-to-bus 3x */
+ timebase-frequency = <40500000>; /* 162MHz / 4 */
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ };
+ };
+
+ /* devices contained int the flipper chipset */
+ flipper {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "nintendo,flipper";
+ ranges = <0x0c000000 0x0c000000 0x00010000>;
+ interrupt-parent = <&PIC>;
+
+ video@0c002000 {
+ compatible = "nintendo,flipper-vi";
+ reg = <0x0c002000 0x100>;
+ interrupts = <8>;
+ };
+
+ processor-interface@0c003000 {
+ compatible = "nintendo,flipper-pi";
+ reg = <0x0c003000 0x100>;
+
+ PIC: pic {
+ #interrupt-cells = <1>;
+ compatible = "nintendo,flipper-pic";
+ interrupt-controller;
+ };
+ };
+
+ dsp@0c005000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "nintendo,flipper-dsp";
+ reg = <0x0c005000 0x200>;
+ interrupts = <6>;
+
+ memory@0 {
+ compatible = "nintendo,flipper-aram";
+ reg = <0 0x1000000>; /* 16MB */
+ };
+ };
+
+ disk@0c006000 {
+ compatible = "nintendo,flipper-di";
+ reg = <0x0c006000 0x40>;
+ interrupts = <2>;
+ };
+
+ audio@0c006c00 {
+ compatible = "nintendo,flipper-ai";
+ reg = <0x0c006c00 0x20>;
+ interrupts = <6>;
+ };
+
+ gamepad-controller@0c006400 {
+ compatible = "nintendo,flipper-si";
+ reg = <0x0c006400 0x100>;
+ interrupts = <3>;
+ };
+
+ /* External Interface bus */
+ exi@0c006800 {
+ compatible = "nintendo,flipper-exi";
+ reg = <0x0c006800 0x40>;
+ virtual-reg = <0x0c006800>;
+ interrupts = <4>;
+ };
+ };
+};
+
diff --git a/arch/powerpc/boot/dts/gef_ppc9a.dts b/arch/powerpc/boot/dts/gef_ppc9a.dts
index 910944edd88..c86114e93f1 100644
--- a/arch/powerpc/boot/dts/gef_ppc9a.dts
+++ b/arch/powerpc/boot/dts/gef_ppc9a.dts
@@ -118,6 +118,12 @@
};
};
+ nvram@3,0 {
+ device_type = "nvram";
+ compatible = "simtek,stk14ca8";
+ reg = <0x3 0x0 0x20000>;
+ };
+
fpga@4,0 {
compatible = "gef,ppc9a-fpga-regs";
reg = <0x4 0x0 0x40>;
diff --git a/arch/powerpc/boot/dts/gef_sbc310.dts b/arch/powerpc/boot/dts/gef_sbc310.dts
index 2107d3c7cfe..820c2b355ab 100644
--- a/arch/powerpc/boot/dts/gef_sbc310.dts
+++ b/arch/powerpc/boot/dts/gef_sbc310.dts
@@ -115,6 +115,12 @@
};
};
+ nvram@3,0 {
+ device_type = "nvram";
+ compatible = "simtek,stk14ca8";
+ reg = <0x3 0x0 0x20000>;
+ };
+
fpga@4,0 {
compatible = "gef,fpga-regs";
reg = <0x4 0x0 0x40>;
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index 35a63183eec..30911adefc8 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -84,6 +84,12 @@
6 0 0xfd000000 0x00800000 // IO FPGA (8-bit)
7 0 0xfd800000 0x00800000>; // IO FPGA (32-bit)
+ nvram@3,0 {
+ device_type = "nvram";
+ compatible = "simtek,stk14ca8";
+ reg = <0x3 0x0 0x20000>;
+ };
+
fpga@4,0 {
compatible = "gef,fpga-regs";
reg = <0x4 0x0 0x40>;
diff --git a/arch/powerpc/boot/dts/glacier.dts b/arch/powerpc/boot/dts/glacier.dts
index f3787a27f63..f6f61893929 100644
--- a/arch/powerpc/boot/dts/glacier.dts
+++ b/arch/powerpc/boot/dts/glacier.dts
@@ -292,6 +292,7 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
@@ -321,6 +322,7 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
@@ -351,6 +353,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>; /* emac2&3 only */
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII1>;
@@ -379,6 +383,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>; /* emac2&3 only */
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII1>;
diff --git a/arch/powerpc/boot/dts/haleakala.dts b/arch/powerpc/boot/dts/haleakala.dts
index 5b2a4947bf8..2b256694eca 100644
--- a/arch/powerpc/boot/dts/haleakala.dts
+++ b/arch/powerpc/boot/dts/haleakala.dts
@@ -226,6 +226,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
index 077819bc3cb..8f345de960c 100644
--- a/arch/powerpc/boot/dts/katmai.dts
+++ b/arch/powerpc/boot/dts/katmai.dts
@@ -16,7 +16,7 @@
/ {
#address-cells = <2>;
- #size-cells = <1>;
+ #size-cells = <2>;
model = "amcc,katmai";
compatible = "amcc,katmai";
dcr-parent = <&{/cpus/cpu@0}>;
@@ -49,7 +49,7 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x00000000 0x00000000>; /* Filled in by zImage */
+ reg = <0x0 0x00000000 0x0 0x00000000>; /* Filled in by U-Boot */
};
UIC0: interrupt-controller0 {
@@ -108,11 +108,26 @@
dcr-reg = <0x00c 0x002>;
};
+ MQ0: mq {
+ compatible = "ibm,mq-440spe";
+ dcr-reg = <0x040 0x020>;
+ };
+
plb {
compatible = "ibm,plb-440spe", "ibm,plb-440gp", "ibm,plb4";
#address-cells = <2>;
#size-cells = <1>;
- ranges;
+ /* addr-child addr-parent size */
+ ranges = <0x4 0x00100000 0x4 0x00100000 0x00001000
+ 0x4 0x00200000 0x4 0x00200000 0x00000400
+ 0x4 0xe0000000 0x4 0xe0000000 0x20000000
+ 0xc 0x00000000 0xc 0x00000000 0x20000000
+ 0xd 0x00000000 0xd 0x00000000 0x80000000
+ 0xd 0x80000000 0xd 0x80000000 0x80000000
+ 0xe 0x00000000 0xe 0x00000000 0x80000000
+ 0xe 0x80000000 0xe 0x80000000 0x80000000
+ 0xf 0x00000000 0xf 0x00000000 0x80000000
+ 0xf 0x80000000 0xf 0x80000000 0x80000000>;
clock-frequency = <0>; /* Filled in by zImage */
SDRAM0: sdram {
@@ -245,8 +260,8 @@
ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000
0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>;
- /* Inbound 2GB range starting at 0 */
- dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
+ /* Inbound 4GB range starting at 0 */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
/* This drives busses 0 to 0xf */
bus-range = <0x0 0xf>;
@@ -289,10 +304,10 @@
ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
- /* Inbound 2GB range starting at 0 */
- dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
+ /* Inbound 4GB range starting at 0 */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
- /* This drives busses 10 to 0x1f */
+ /* This drives busses 0x10 to 0x1f */
bus-range = <0x10 0x1f>;
/* Legacy interrupts (note the weird polarity, the bridge seems
@@ -330,10 +345,10 @@
ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000
0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>;
- /* Inbound 2GB range starting at 0 */
- dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
+ /* Inbound 4GB range starting at 0 */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
- /* This drives busses 10 to 0x1f */
+ /* This drives busses 0x20 to 0x2f */
bus-range = <0x20 0x2f>;
/* Legacy interrupts (note the weird polarity, the bridge seems
@@ -371,10 +386,10 @@
ranges = <0x02000000 0x00000000 0x80000000 0x0000000f 0x00000000 0x00000000 0x80000000
0x01000000 0x00000000 0x00000000 0x0000000f 0x80020000 0x00000000 0x00010000>;
- /* Inbound 2GB range starting at 0 */
- dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
+ /* Inbound 4GB range starting at 0 */
+ dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
- /* This drives busses 10 to 0x1f */
+ /* This drives busses 0x30 to 0x3f */
bus-range = <0x30 0x3f>;
/* Legacy interrupts (note the weird polarity, the bridge seems
@@ -392,6 +407,49 @@
0x0 0x0 0x0 0x3 &UIC3 0xa 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
};
+
+ I2O: i2o@400100000 {
+ compatible = "ibm,i2o-440spe";
+ reg = <0x00000004 0x00100000 0x100>;
+ dcr-reg = <0x060 0x020>;
+ };
+
+ DMA0: dma0@400100100 {
+ compatible = "ibm,dma-440spe";
+ cell-index = <0>;
+ reg = <0x00000004 0x00100100 0x100>;
+ dcr-reg = <0x060 0x020>;
+ interrupt-parent = <&DMA0>;
+ interrupts = <0 1>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = <
+ 0 &UIC0 0x14 4
+ 1 &UIC1 0x16 4>;
+ };
+
+ DMA1: dma1@400100200 {
+ compatible = "ibm,dma-440spe";
+ cell-index = <1>;
+ reg = <0x00000004 0x00100200 0x100>;
+ dcr-reg = <0x060 0x020>;
+ interrupt-parent = <&DMA1>;
+ interrupts = <0 1>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ interrupt-map = <
+ 0 &UIC0 0x16 4
+ 1 &UIC1 0x16 4>;
+ };
+
+ xor-accel@400200000 {
+ compatible = "amcc,xor-accelerator";
+ reg = <0x00000004 0x00200000 0x400>;
+ interrupt-parent = <&UIC1>;
+ interrupts = <0x1f 4>;
+ };
};
chosen {
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index c46561456ed..083e68eeaca 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -272,6 +272,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
@@ -300,6 +302,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/kmeter1.dts b/arch/powerpc/boot/dts/kmeter1.dts
index 167044f7de1..65b8b4f27ef 100644
--- a/arch/powerpc/boot/dts/kmeter1.dts
+++ b/arch/powerpc/boot/dts/kmeter1.dts
@@ -59,6 +59,13 @@
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>; /* Filled in by U-Boot */
+ pmc: power@b00 {
+ compatible = "fsl,mpc8360-pmc", "fsl,mpc8349-pmc";
+ reg = <0xb00 0x100 0xa00 0x100>;
+ interrupts = <80 0x8>;
+ interrupt-parent = <&ipic>;
+ };
+
i2c@3000 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/powerpc/boot/dts/makalu.dts b/arch/powerpc/boot/dts/makalu.dts
index ffc246e7267..63d48b632c8 100644
--- a/arch/powerpc/boot/dts/makalu.dts
+++ b/arch/powerpc/boot/dts/makalu.dts
@@ -227,6 +227,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x0000003f>; /* Start at 6 */
rgmii-device = <&RGMII0>;
@@ -255,6 +257,8 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 32e10f588c1..8a3a4f3ef83 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -204,6 +204,7 @@
interrupt-parent = <&ipic>;
tbi-handle = <&tbi0>;
phy-handle = < &phy0 >;
+ fsl,magic-packet;
mdio@520 {
#address-cells = <1>;
@@ -246,6 +247,7 @@
interrupt-parent = <&ipic>;
tbi-handle = <&tbi1>;
phy-handle = < &phy1 >;
+ fsl,magic-packet;
mdio@520 {
#address-cells = <1>;
@@ -309,6 +311,22 @@
interrupt-parent = <&ipic>;
};
+ gtm1: timer@500 {
+ compatible = "fsl,mpc8315-gtm", "fsl,gtm";
+ reg = <0x500 0x100>;
+ interrupts = <90 8 78 8 84 8 72 8>;
+ interrupt-parent = <&ipic>;
+ clock-frequency = <133333333>;
+ };
+
+ timer@600 {
+ compatible = "fsl,mpc8315-gtm", "fsl,gtm";
+ reg = <0x600 0x100>;
+ interrupts = <91 8 79 8 85 8 73 8>;
+ interrupt-parent = <&ipic>;
+ clock-frequency = <133333333>;
+ };
+
/* IPIC
* interrupts cell = <intr #, sense>
* sense values match linux IORESOURCE_IRQ_* defines:
@@ -337,6 +355,15 @@
0x59 0x8>;
interrupt-parent = < &ipic >;
};
+
+ pmc: power@b00 {
+ compatible = "fsl,mpc8315-pmc", "fsl,mpc8313-pmc",
+ "fsl,mpc8349-pmc";
+ reg = <0xb00 0x100 0xa00 0x100>;
+ interrupts = <80 8>;
+ interrupt-parent = <&ipic>;
+ fsl,mpc8313-wakeup-timer = <&gtm1>;
+ };
};
pci0: pci@e0008500 {
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index 436c9c671dd..05ad8c98e52 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -79,6 +79,13 @@
reg = <0x200 0x100>;
};
+ pmc: power@b00 {
+ compatible = "fsl,mpc8323-pmc", "fsl,mpc8349-pmc";
+ reg = <0xb00 0x100 0xa00 0x100>;
+ interrupts = <80 0x8>;
+ interrupt-parent = <&ipic>;
+ };
+
i2c@3000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -163,6 +170,7 @@
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0x4c>;
fsl,descriptor-types-mask = <0x0122003f>;
+ sleep = <&pmc 0x03000000>;
};
ipic: pic@700 {
@@ -428,5 +436,6 @@
0xe0008300 0x8>; /* config space access registers */
compatible = "fsl,mpc8349-pci";
device_type = "pci";
+ sleep = <&pmc 0x00010000>;
};
};
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index 9a0952f74b8..f4fadb23ad6 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -62,6 +62,13 @@
reg = <0x200 0x100>;
};
+ pmc: power@b00 {
+ compatible = "fsl,mpc8323-pmc", "fsl,mpc8349-pmc";
+ reg = <0xb00 0x100 0xa00 0x100>;
+ interrupts = <80 0x8>;
+ interrupt-parent = <&ipic>;
+ };
+
i2c@3000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -141,6 +148,7 @@
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0x4c>;
fsl,descriptor-types-mask = <0x0122003f>;
+ sleep = <&pmc 0x03000000>;
};
ipic:pic@700 {
@@ -360,5 +368,6 @@
0xe0008300 0x8>; /* config space access registers */
compatible = "fsl,mpc8349-pci";
device_type = "pci";
+ sleep = <&pmc 0x00010000>;
};
};
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index feeeb7f9d60..b53d1df11e2 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -63,6 +63,24 @@
reg = <0x200 0x100>;
};
+ gpio1: gpio-controller@c00 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8349-gpio";
+ reg = <0xc00 0x100>;
+ interrupts = <74 0x8>;
+ interrupt-parent = <&ipic>;
+ gpio-controller;
+ };
+
+ gpio2: gpio-controller@d00 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8349-gpio";
+ reg = <0xd00 0x100>;
+ interrupts = <75 0x8>;
+ interrupt-parent = <&ipic>;
+ gpio-controller;
+ };
+
i2c@3000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -72,6 +90,12 @@
interrupts = <14 0x8>;
interrupt-parent = <&ipic>;
dfsrr;
+
+ eeprom: at24@50 {
+ compatible = "st-micro,24c256";
+ reg = <0x50>;
+ };
+
};
i2c@3100 {
@@ -91,6 +115,25 @@
interrupt-parent = <&ipic>;
};
+ pcf1: iexp@38 {
+ #gpio-cells = <2>;
+ compatible = "ti,pcf8574a";
+ reg = <0x38>;
+ gpio-controller;
+ };
+
+ pcf2: iexp@39 {
+ #gpio-cells = <2>;
+ compatible = "ti,pcf8574a";
+ reg = <0x39>;
+ gpio-controller;
+ };
+
+ spd: at24@51 {
+ compatible = "at24,spd";
+ reg = <0x51>;
+ };
+
mcu_pio: mcu@a {
#gpio-cells = <2>;
compatible = "fsl,mc9s08qg8-mpc8349emitx",
@@ -275,6 +318,24 @@
reg = <0x700 0x100>;
device_type = "ipic";
};
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ green {
+ label = "Green";
+ gpios = <&pcf1 0 1>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ yellow {
+ label = "Yellow";
+ gpios = <&pcf1 1 1>;
+ /* linux,default-trigger = "heartbeat"; */
+ default-state = "on";
+ };
+ };
+
};
pci0: pci@e0008500 {
@@ -331,7 +392,26 @@
compatible = "fsl,mpc8349e-localbus",
"fsl,pq2pro-localbus";
reg = <0xe0005000 0xd8>;
- ranges = <0x3 0x0 0xf0000000 0x210>;
+ ranges = <0x0 0x0 0xfe000000 0x1000000 /* flash */
+ 0x1 0x0 0xf8000000 0x20000 /* VSC 7385 */
+ 0x2 0x0 0xf9000000 0x200000 /* exp slot */
+ 0x3 0x0 0xf0000000 0x210>; /* CF slot */
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x800000>;
+ bank-width = <2>;
+ device-width = <1>;
+ };
+
+ flash@0,800000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x800000 0x800000>;
+ bank-width = <2>;
+ device-width = <1>;
+ };
pata@3,0 {
compatible = "fsl,mpc8349emitx-pata", "ata-generic";
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 39ff4c829ca..45cfa1c50a2 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -99,6 +99,13 @@
reg = <0x200 0x100>;
};
+ pmc: power@b00 {
+ compatible = "fsl,mpc8360-pmc", "fsl,mpc8349-pmc";
+ reg = <0xb00 0x100 0xa00 0x100>;
+ interrupts = <80 0x8>;
+ interrupt-parent = <&ipic>;
+ };
+
i2c@3000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -194,6 +201,7 @@
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0x7e>;
fsl,descriptor-types-mask = <0x01010ebf>;
+ sleep = <&pmc 0x03000000>;
};
ipic: pic@700 {
@@ -470,5 +478,6 @@
0xe0008300 0x8>; /* config space access registers */
compatible = "fsl,mpc8349-pci";
device_type = "pci";
+ sleep = <&pmc 0x00010000>;
};
};
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index 6315d6fcc58..bdf4459677b 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -71,6 +71,13 @@
reg = <0x200 0x100>;
};
+ pmc: power@b00 {
+ compatible = "fsl,mpc8360-pmc", "fsl,mpc8349-pmc";
+ reg = <0xb00 0x100 0xa00 0x100>;
+ interrupts = <80 0x8>;
+ interrupt-parent = <&ipic>;
+ };
+
i2c@3000 {
#address-cells = <1>;
#size-cells = <0>;
@@ -161,6 +168,7 @@
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0x7e>;
fsl,descriptor-types-mask = <0x01010ebf>;
+ sleep = <&pmc 0x03000000>;
};
ipic: interrupt-controller@700 {
@@ -455,6 +463,7 @@
0xa800 0 0 2 &ipic 20 8
0xa800 0 0 3 &ipic 21 8
0xa800 0 0 4 &ipic 18 8>;
+ sleep = <&pmc 0x00010000>;
/* filled by u-boot */
bus-range = <0 0>;
clock-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index 00c2bbda701..6d892ba74e5 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -40,6 +40,8 @@
i-cache-line-size = <32>; // 32 bytes
d-cache-size = <0x8000>; // L1, 32K
i-cache-size = <0x8000>; // L1, 32K
+ sleep = <&pmc 0x00008000 // core
+ &pmc 0x00004000>; // timebase
timebase-frequency = <0>;
bus-frequency = <0>;
clock-frequency = <0>;
@@ -94,31 +96,41 @@
interrupts = <16 2>;
};
- i2c@3000 {
+ i2c-sleep-nexus {
#address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ sleep = <&pmc 0x00000004>;
+ ranges;
- rtc@68 {
- compatible = "dallas,ds1374";
- reg = <0x68>;
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+
+ rtc@68 {
+ compatible = "dallas,ds1374";
+ reg = <0x68>;
+ interrupts = <3 1>;
+ interrupt-parent = <&mpic>;
+ };
};
- };
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
};
dma@21300 {
@@ -128,6 +140,8 @@
reg = <0x21300 0x4>;
ranges = <0x0 0x21100 0x200>;
cell-index = <0>;
+ sleep = <&pmc 0x00000400>;
+
dma-channel@0 {
compatible = "fsl,mpc8568-dma-channel",
"fsl,eloplus-dma-channel";
@@ -176,6 +190,7 @@
interrupt-parent = <&mpic>;
tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
+ sleep = <&pmc 0x00000080>;
mdio@520 {
#address-cells = <1>;
@@ -228,6 +243,7 @@
interrupt-parent = <&mpic>;
tbi-handle = <&tbi1>;
phy-handle = <&phy3>;
+ sleep = <&pmc 0x00000040>;
mdio@520 {
#address-cells = <1>;
@@ -242,30 +258,47 @@
};
};
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
+ duart-sleep-nexus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ sleep = <&pmc 0x00000002>;
+ ranges;
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
};
- global-utilities@e0000 { //global utilities block
- compatible = "fsl,mpc8548-guts";
+ global-utilities@e0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8568-guts", "fsl,mpc8548-guts";
reg = <0xe0000 0x1000>;
+ ranges = <0 0xe0000 0x1000>;
fsl,has-rstcr;
- };
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
+ pmc: power@70 {
+ compatible = "fsl,mpc8568-pmc",
+ "fsl,mpc8548-pmc";
+ reg = <0x70 0x20>;
+ };
};
crypto@30000 {
@@ -277,6 +310,7 @@
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0xfe>;
fsl,descriptor-types-mask = <0x12b0ebf>;
+ sleep = <&pmc 0x01000000>;
};
mpic: pic@40000 {
@@ -376,6 +410,7 @@
compatible = "fsl,qe";
ranges = <0x0 0xe0080000 0x40000>;
reg = <0xe0080000 0x480>;
+ sleep = <&pmc 0x00000800>;
brg-frequency = <0>;
bus-frequency = <396000000>;
fsl,qe-num-riscs = <2>;
@@ -509,6 +544,7 @@
bus-range = <0 255>;
ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
0x1000000 0x0 0x0 0xe2000000 0x0 0x800000>;
+ sleep = <&pmc 0x80000000>;
clock-frequency = <66666666>;
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -534,6 +570,7 @@
bus-range = <0 255>;
ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
0x1000000 0x0 0x0 0xe2800000 0x0 0x800000>;
+ sleep = <&pmc 0x20000000>;
clock-frequency = <33333333>;
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -570,5 +607,7 @@
55 2 /* msg2_tx */
56 2 /* msg2_rx */>;
interrupt-parent = <&mpic>;
+ sleep = <&pmc 0x00080000 /* controller */
+ &pmc 0x00040000>; /* message unit */
};
};
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 1e3ec8f059b..795eb362fcf 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -41,6 +41,8 @@
i-cache-line-size = <32>; // 32 bytes
d-cache-size = <0x8000>; // L1, 32K
i-cache-size = <0x8000>; // L1, 32K
+ sleep = <&pmc 0x00008000 // core
+ &pmc 0x00004000>; // timebase
timebase-frequency = <0>;
bus-frequency = <0>;
clock-frequency = <0>;
@@ -59,6 +61,7 @@
reg = <0xe0005000 0x1000>;
interrupts = <19 2>;
interrupt-parent = <&mpic>;
+ sleep = <&pmc 0x08000000>;
ranges = <0x0 0x0 0xfe000000 0x02000000
0x1 0x0 0xf8000000 0x00008000
@@ -158,51 +161,69 @@
interrupts = <18 2>;
};
- i2c@3000 {
+ i2c-sleep-nexus {
#address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ sleep = <&pmc 0x00000004>;
+ ranges;
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+
+ rtc@68 {
+ compatible = "dallas,ds1374";
+ reg = <0x68>;
+ interrupts = <3 1>;
+ interrupt-parent = <&mpic>;
+ };
+ };
- rtc@68 {
- compatible = "dallas,ds1374";
- reg = <0x68>;
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
};
};
- i2c@3100 {
+ duart-sleep-nexus {
#address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <43 2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ sleep = <&pmc 0x00000002>;
+ ranges;
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4500 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
- };
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x4600 0x100>;
- clock-frequency = <0>;
- interrupts = <42 2>;
- interrupt-parent = <&mpic>;
+ serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
};
L2: l2-cache-controller@20000 {
@@ -260,6 +281,7 @@
reg = <0x2e000 0x1000>;
interrupts = <72 0x8>;
interrupt-parent = <&mpic>;
+ sleep = <&pmc 0x00200000>;
/* Filled in by U-Boot */
clock-frequency = <0>;
status = "disabled";
@@ -276,6 +298,7 @@
fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0xbfe>;
fsl,descriptor-types-mask = <0x3ab0ebf>;
+ sleep = <&pmc 0x01000000>;
};
mpic: pic@40000 {
@@ -304,9 +327,18 @@
};
global-utilities@e0000 {
- compatible = "fsl,mpc8569-guts";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8569-guts", "fsl,mpc8548-guts";
reg = <0xe0000 0x1000>;
+ ranges = <0 0xe0000 0x1000>;
fsl,has-rstcr;
+
+ pmc: power@70 {
+ compatible = "fsl,mpc8569-pmc",
+ "fsl,mpc8548-pmc";
+ reg = <0x70 0x20>;
+ };
};
par_io@e0100 {
@@ -422,6 +454,7 @@
compatible = "fsl,qe";
ranges = <0x0 0xe0080000 0x40000>;
reg = <0xe0080000 0x480>;
+ sleep = <&pmc 0x00000800>;
brg-frequency = <0>;
bus-frequency = <0>;
fsl,qe-num-riscs = <4>;
@@ -684,6 +717,7 @@
bus-range = <0 255>;
ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
0x1000000 0x0 0x00000000 0xe2800000 0x0 0x00800000>;
+ sleep = <&pmc 0x20000000>;
clock-frequency = <33333333>;
pcie@0 {
reg = <0x0 0x0 0x0 0x0 0x0>;
@@ -714,5 +748,6 @@
55 2 /* msg2_tx */
56 2 /* msg2_rx */>;
interrupt-parent = <&mpic>;
+ sleep = <&pmc 0x00080000>;
};
};
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index f468d215f71..9535ce68caa 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -35,6 +35,8 @@
i-cache-line-size = <32>;
d-cache-size = <32768>; // L1
i-cache-size = <32768>; // L1
+ sleep = <&pmc 0x00008000 0 // core
+ &pmc 0x00004000 0>; // timebase
timebase-frequency = <0>; // From uboot
bus-frequency = <0>; // From uboot
clock-frequency = <0>; // From uboot
@@ -60,6 +62,7 @@
5 0 0xe8480000 0x00008000
6 0 0xe84c0000 0x00008000
3 0 0xe8000000 0x00000020>;
+ sleep = <&pmc 0x08000000 0>;
flash@0,0 {
compatible = "cfi-flash";
@@ -105,6 +108,8 @@
compatible = "fsl,fpga-pixis";
reg = <3 0 0x20>;
ranges = <0 3 0 0x20>;
+ interrupt-parent = <&mpic>;
+ interrupts = <8 8>;
sdcsr_pio: gpio-controller@a {
#gpio-cells = <2>;
@@ -163,6 +168,7 @@
reg = <0x3100 0x100>;
interrupts = <43 2>;
interrupt-parent = <&mpic>;
+ sleep = <&pmc 0x00000004 0>;
dfsrr;
};
@@ -174,6 +180,7 @@
clock-frequency = <0>;
interrupts = <42 2>;
interrupt-parent = <&mpic>;
+ sleep = <&pmc 0x00000002 0>;
};
serial1: serial@4600 {
@@ -184,6 +191,7 @@
clock-frequency = <0>;
interrupts = <42 2>;
interrupt-parent = <&mpic>;
+ sleep = <&pmc 0x00000008 0>;
};
spi@7000 {
@@ -196,6 +204,7 @@
interrupt-parent = <&mpic>;
mode = "cpu";
gpios = <&sdcsr_pio 7 0>;
+ sleep = <&pmc 0x00000800 0>;
mmc-slot@0 {
compatible = "fsl,mpc8610hpcd-mmc-slot",
@@ -213,6 +222,7 @@
reg = <0x2c000 100>;
interrupts = <72 2>;
interrupt-parent = <&mpic>;
+ sleep = <&pmc 0x04000000 0>;
};
mpic: interrupt-controller@40000 {
@@ -241,9 +251,18 @@
};
global-utilities@e0000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "fsl,mpc8610-guts";
reg = <0xe0000 0x1000>;
+ ranges = <0 0xe0000 0x1000>;
fsl,has-rstcr;
+
+ pmc: power@70 {
+ compatible = "fsl,mpc8610-pmc",
+ "fsl,mpc8641d-pmc";
+ reg = <0x70 0x20>;
+ };
};
wdt@e4000 {
@@ -262,6 +281,7 @@
fsl,playback-dma = <&dma00>;
fsl,capture-dma = <&dma01>;
fsl,fifo-depth = <8>;
+ sleep = <&pmc 0 0x08000000>;
};
ssi@16100 {
@@ -271,6 +291,7 @@
interrupt-parent = <&mpic>;
interrupts = <63 2>;
fsl,fifo-depth = <8>;
+ sleep = <&pmc 0 0x04000000>;
};
dma@21300 {
@@ -280,6 +301,7 @@
cell-index = <0>;
reg = <0x21300 0x4>; /* DMA general status register */
ranges = <0x0 0x21100 0x200>;
+ sleep = <&pmc 0x00000400 0>;
dma00: dma-channel@0 {
compatible = "fsl,mpc8610-dma-channel",
@@ -322,6 +344,7 @@
cell-index = <1>;
reg = <0xc300 0x4>; /* DMA general status register */
ranges = <0x0 0xc100 0x200>;
+ sleep = <&pmc 0x00000200 0>;
dma-channel@0 {
compatible = "fsl,mpc8610-dma-channel",
@@ -369,6 +392,7 @@
bus-range = <0 0>;
ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000
0x01000000 0x0 0x00000000 0xe1000000 0x0 0x00100000>;
+ sleep = <&pmc 0x80000000 0>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <24 2>;
@@ -398,6 +422,7 @@
bus-range = <1 3>;
ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>;
+ sleep = <&pmc 0x40000000 0>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <26 2>;
@@ -474,6 +499,7 @@
0x0000 0 0 4 &mpic 7 1>;
interrupt-parent = <&mpic>;
interrupts = <25 2>;
+ sleep = <&pmc 0x20000000 0>;
clock-frequency = <33333333>;
};
};
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts
new file mode 100644
index 00000000000..df5269093af
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb.dts
@@ -0,0 +1,477 @@
+/*
+ * P1020 RDB Device Tree Source
+ *
+ * Copyright 2009 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ model = "fsl,P1020";
+ compatible = "fsl,P1020RDB";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P1020@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+
+ PowerPC,P1020@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ localbus@ffe05000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus";
+ reg = <0 0xffe05000 0 0x1000>;
+ interrupts = <19 2>;
+ interrupt-parent = <&mpic>;
+
+ /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
+ ranges = <0x0 0x0 0x0 0xef000000 0x01000000
+ 0x1 0x0 0x0 0xffa00000 0x00040000
+ 0x2 0x0 0x0 0xffb00000 0x00020000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR (RO) Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR (RO) DTB Image";
+ read-only;
+ };
+
+ partition@80000 {
+ /* 3.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00380000>;
+ label = "NOR (RO) Linux Kernel Image";
+ read-only;
+ };
+
+ partition@400000 {
+ /* 11MB for JFFS2 based Root file System */
+ reg = <0x00400000 0x00b00000>;
+ label = "NOR (RW) JFFS2 Root File System";
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x00f00000 0x00100000>;
+ label = "NOR (RO) U-Boot Image";
+ read-only;
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1020-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x40000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND (RO) U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND (RO) DTB Image";
+ read-only;
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND (RO) Linux Kernel Image";
+ read-only;
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND (RO) Compressed RFS Image";
+ read-only;
+ };
+
+ partition@a00000 {
+ /* 7MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00700000>;
+ label = "NAND (RW) JFFS2 Root File System";
+ };
+
+ partition@1100000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x01100000 0x00f00000>;
+ label = "NAND (RW) Writable User area";
+ };
+ };
+
+ L2switch@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "vitesse-7385";
+ reg = <0x2 0x0 0x20000>;
+ };
+
+ };
+
+ soc@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p1020-immr", "simple-bus";
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p1020-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p1020-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <42 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ spi@7000 {
+ cell-index = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,espi";
+ reg = <0x7000 0x1000>;
+ interrupts = <59 0x2>;
+ interrupt-parent = <&mpic>;
+ mode = "cpu";
+
+ fsl_m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,espi-flash";
+ reg = <0>;
+ linux,modalias = "fsl_m25p80";
+ modal = "s25sl128b";
+ spi-max-frequency = <50000000>;
+ mode = <0>;
+
+ partition@0 {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "SPI (RO) U-Boot Image";
+ read-only;
+ };
+
+ partition@80000 {
+ /* 512KB for DTB Image */
+ reg = <0x00080000 0x00080000>;
+ label = "SPI (RO) DTB Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "SPI (RO) Linux Kernel Image";
+ read-only;
+ };
+
+ partition@500000 {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "SPI (RO) Compressed RFS Image";
+ read-only;
+ };
+
+ partition@900000 {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "SPI (RW) JFFS2 RFS";
+ };
+ };
+ };
+
+ gpio: gpio-controller@f000 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8572-gpio";
+ reg = <0xf000 0x100>;
+ interrupts = <47 0x2>;
+ interrupt-parent = <&mpic>;
+ gpio-controller;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p1020-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ usb@22000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl-usb2-dr";
+ reg = <0x22000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <28 0x2>;
+ phy_type = "ulpi";
+ };
+
+ usb@23000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl-usb2-dr";
+ reg = <0x23000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <46 0x2>;
+ phy_type = "ulpi";
+ };
+
+ sdhci@2e000 {
+ compatible = "fsl,p1020-esdhc", "fsl,esdhc";
+ reg = <0x2e000 0x1000>;
+ interrupts = <72 0x2>;
+ interrupt-parent = <&mpic>;
+ /* Filled in by U-Boot */
+ clock-frequency = <0>;
+ };
+
+ crypto@30000 {
+ compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
+ "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 58 2>;
+ interrupt-parent = <&mpic>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0xbfe>;
+ fsl,descriptor-types-mask = <0x3ab0ebf>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ msi@41600 {
+ compatible = "fsl,p1020-msi", "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0
+ 0xe1 0
+ 0xe2 0
+ 0xe3 0
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,p1020-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+ };
+
+ pci0: pcie@ffe09000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe09000 0 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe0a000 0 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
new file mode 100644
index 00000000000..0fe93d0c8b2
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts
@@ -0,0 +1,363 @@
+/*
+ * P2020 RDB Core0 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts file allows core0 to have memory, l2, i2c, spi, gpio, dma1, usb,
+ * eth1, eth2, sdhc, crypto, global-util, pci0.
+ *
+ * Copyright 2009 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ model = "fsl,P2020";
+ compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ serial0 = &serial0;
+ pci0 = &pci0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P2020@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p2020-immr", "simple-bus";
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,p2020-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <17 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,p2020-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <18 2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ };
+
+ spi@7000 {
+ cell-index = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,espi";
+ reg = <0x7000 0x1000>;
+ interrupts = <59 0x2>;
+ interrupt-parent = <&mpic>;
+ mode = "cpu";
+
+ fsl_m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,espi-flash";
+ reg = <0>;
+ linux,modalias = "fsl_m25p80";
+ modal = "s25sl128b";
+ spi-max-frequency = <50000000>;
+ mode = <0>;
+
+ partition@0 {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "SPI (RO) U-Boot Image";
+ read-only;
+ };
+
+ partition@80000 {
+ /* 512KB for DTB Image */
+ reg = <0x00080000 0x00080000>;
+ label = "SPI (RO) DTB Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "SPI (RO) Linux Kernel Image";
+ read-only;
+ };
+
+ partition@500000 {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "SPI (RO) Compressed RFS Image";
+ read-only;
+ };
+
+ partition@900000 {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "SPI (RW) JFFS2 RFS";
+ };
+ };
+ };
+
+ gpio: gpio-controller@f000 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8572-gpio";
+ reg = <0xf000 0x100>;
+ interrupts = <47 0x2>;
+ interrupt-parent = <&mpic>;
+ gpio-controller;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p2020-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2,512K
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ usb@22000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl-usb2-dr";
+ reg = <0x22000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <28 0x2>;
+ phy_type = "ulpi";
+ };
+
+ mdio@24520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x24520 0x20>;
+
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <3 1>;
+ reg = <0x1>;
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ enet1: ethernet@25000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x25000 0x1000>;
+ ranges = <0x0 0x25000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <35 2 36 2 40 2>;
+ interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
+ phy-handle = <&phy0>;
+ phy-connection-type = "sgmii";
+
+ };
+
+ enet2: ethernet@26000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <2>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x26000 0x1000>;
+ ranges = <0x0 0x26000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <31 2 32 2 33 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ sdhci@2e000 {
+ compatible = "fsl,p2020-esdhc", "fsl,esdhc";
+ reg = <0x2e000 0x1000>;
+ interrupts = <72 0x2>;
+ interrupt-parent = <&mpic>;
+ /* Filled in by U-Boot */
+ clock-frequency = <0>;
+ };
+
+ crypto@30000 {
+ compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4",
+ "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 58 2>;
+ interrupt-parent = <&mpic>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0xbfe>;
+ fsl,descriptor-types-mask = <0x3ab0ebf>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ protected-sources = <
+ 42 76 77 78 79 /* serial1 , dma2 */
+ 29 30 34 26 /* enet0, pci1 */
+ 0xe0 0xe1 0xe2 0xe3 /* msi */
+ 0xe4 0xe5 0xe6 0xe7
+ >;
+ };
+
+ global-utilities@e0000 {
+ compatible = "fsl,p2020-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+ };
+
+ pci0: pcie@ffe09000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe09000 0 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <25 2>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
new file mode 100644
index 00000000000..e95a5128532
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts
@@ -0,0 +1,184 @@
+/*
+ * P2020 RDB Core1 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts allows core1 to have l2, dma2, eth0, pci1, msi.
+ *
+ * Please note to add "-b 1" for core1's dts compiling.
+ *
+ * Copyright 2009 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ model = "fsl,P2020";
+ compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ ethernet0 = &enet0;
+ serial0 = &serial0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,P2020@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,p2020-immr", "simple-bus";
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ bus-frequency = <0>; // Filled out by uboot.
+
+ serial0: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ };
+
+ dma@c300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,eloplus-dma";
+ reg = <0xc300 0x4>;
+ ranges = <0x0 0xc100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <76 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <77 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <78 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <79 2>;
+ };
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,p2020-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2,512K
+ interrupt-parent = <&mpic>;
+ };
+
+
+ enet0: ethernet@24000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <0>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x24000 0x1000>;
+ ranges = <0x0 0x24000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <29 2 30 2 34 2>;
+ interrupt-parent = <&mpic>;
+ fixed-link = <1 1 1000 0 0>;
+ phy-connection-type = "rgmii-id";
+
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ protected-sources = <
+ 17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */
+ 16 20 21 22 23 28 /* L2, dma1, USB */
+ 03 35 36 40 31 32 33 /* mdio, enet1, enet2 */
+ 72 45 58 25 /* sdhci, crypto , pci */
+ >;
+ };
+
+ msi@41600 {
+ compatible = "fsl,p2020-msi", "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0
+ 0xe1 0
+ 0xe2 0
+ 0xe3 0
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ interrupt-parent = <&mpic>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0 0xffe0a000 0 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <26 2>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
new file mode 100644
index 00000000000..6b29eab0536
--- /dev/null
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -0,0 +1,554 @@
+/*
+ * P4080DS Device Tree Source
+ *
+ * Copyright 2009 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "fsl,P4080DS";
+ compatible = "fsl,P4080DS";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ ccsr = &soc;
+
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ dma0 = &dma0;
+ dma1 = &dma1;
+ sdhc = &sdhc;
+
+ rio0 = &rapidio0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: PowerPC,4080@0 {
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ };
+ };
+ cpu1: PowerPC,4080@1 {
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ };
+ };
+ cpu2: PowerPC,4080@2 {
+ device_type = "cpu";
+ reg = <2>;
+ next-level-cache = <&L2_2>;
+ L2_2: l2-cache {
+ };
+ };
+ cpu3: PowerPC,4080@3 {
+ device_type = "cpu";
+ reg = <3>;
+ next-level-cache = <&L2_3>;
+ L2_3: l2-cache {
+ };
+ };
+ cpu4: PowerPC,4080@4 {
+ device_type = "cpu";
+ reg = <4>;
+ next-level-cache = <&L2_4>;
+ L2_4: l2-cache {
+ };
+ };
+ cpu5: PowerPC,4080@5 {
+ device_type = "cpu";
+ reg = <5>;
+ next-level-cache = <&L2_5>;
+ L2_5: l2-cache {
+ };
+ };
+ cpu6: PowerPC,4080@6 {
+ device_type = "cpu";
+ reg = <6>;
+ next-level-cache = <&L2_6>;
+ L2_6: l2-cache {
+ };
+ };
+ cpu7: PowerPC,4080@7 {
+ device_type = "cpu";
+ reg = <7>;
+ next-level-cache = <&L2_7>;
+ L2_7: l2-cache {
+ };
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+ reg = <0xf 0xfe000000 0 0x00001000>;
+
+ corenet-law@0 {
+ compatible = "fsl,corenet-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <32>;
+ };
+
+ memory-controller@8000 {
+ compatible = "fsl,p4080-memory-controller";
+ reg = <0x8000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <0x12 2>;
+ };
+
+ memory-controller@9000 {
+ compatible = "fsl,p4080-memory-controller";
+ reg = <0x9000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <0x12 2>;
+ };
+
+ corenet-cf@18000 {
+ compatible = "fsl,corenet-cf";
+ reg = <0x18000 0x1000>;
+ fsl,ccf-num-csdids = <32>;
+ fsl,ccf-num-snoopids = <32>;
+ };
+
+ iommu@20000 {
+ compatible = "fsl,p4080-pamu";
+ reg = <0x20000 0x10000>;
+ interrupts = <24 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ };
+
+ dma0: dma@100300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
+ reg = <0x100300 0x4>;
+ ranges = <0x0 0x100100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <28 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <29 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <30 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <31 2>;
+ };
+ };
+
+ dma1: dma@101300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p4080-dma", "fsl,eloplus-dma";
+ reg = <0x101300 0x4>;
+ ranges = <0x0 0x101100 0x200>;
+ cell-index = <1>;
+ dma-channel@0 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <32 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <33 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <34 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,p4080-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <35 2>;
+ };
+ };
+
+ spi@110000 {
+ cell-index = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,espi";
+ reg = <0x110000 0x1000>;
+ interrupts = <53 0x2>;
+ interrupt-parent = <&mpic>;
+ espi,num-ss-bits = <4>;
+ mode = "cpu";
+
+ fsl_m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,espi-flash";
+ reg = <0>;
+ linux,modalias = "fsl_m25p80";
+ spi-max-frequency = <40000000>; /* input clock */
+ partition@u-boot {
+ label = "u-boot";
+ reg = <0x00000000 0x00100000>;
+ read-only;
+ };
+ partition@kernel {
+ label = "kernel";
+ reg = <0x00100000 0x00500000>;
+ read-only;
+ };
+ partition@dtb {
+ label = "dtb";
+ reg = <0x00600000 0x00100000>;
+ read-only;
+ };
+ partition@fs {
+ label = "file system";
+ reg = <0x00700000 0x00900000>;
+ };
+ };
+ };
+
+ sdhc: sdhc@114000 {
+ compatible = "fsl,p4080-esdhc", "fsl,esdhc";
+ reg = <0x114000 0x1000>;
+ interrupts = <48 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ i2c@118000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x118000 0x100>;
+ interrupts = <38 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ i2c@118100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x118100 0x100>;
+ interrupts = <38 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ eeprom@51 {
+ compatible = "at24,24c256";
+ reg = <0x51>;
+ };
+ eeprom@52 {
+ compatible = "at24,24c256";
+ reg = <0x52>;
+ };
+ rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ interrupts = <0 0x1>;
+ interrupt-parent = <&mpic>;
+ };
+ };
+
+ i2c@119000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <2>;
+ compatible = "fsl-i2c";
+ reg = <0x119000 0x100>;
+ interrupts = <39 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ i2c@119100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <3>;
+ compatible = "fsl-i2c";
+ reg = <0x119100 0x100>;
+ interrupts = <39 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ serial0: serial@11c500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial1: serial@11c600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11c600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <36 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial2: serial@11d500 {
+ cell-index = <2>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d500 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial3: serial@11d600 {
+ cell-index = <3>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x11d600 0x100>;
+ clock-frequency = <0>;
+ interrupts = <37 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ gpio0: gpio@130000 {
+ compatible = "fsl,p4080-gpio";
+ reg = <0x130000 0x1000>;
+ interrupts = <55 2>;
+ interrupt-parent = <&mpic>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+
+ usb0: usb@210000 {
+ compatible = "fsl,p4080-usb2-mph",
+ "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ reg = <0x210000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <44 0x2>;
+ phy_type = "ulpi";
+ };
+
+ usb1: usb@211000 {
+ compatible = "fsl,p4080-usb2-dr",
+ "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ reg = <0x211000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <45 0x2>;
+ dr_mode = "host";
+ phy_type = "ulpi";
+ };
+ };
+
+ rapidio0: rapidio@ffe0c0000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "fsl,rapidio-delta";
+ reg = <0xf 0xfe0c0000 0 0x20000>;
+ ranges = <0 0 0xf 0xf5000000 0 0x01000000>;
+ interrupt-parent = <&mpic>;
+ /* err_irq bell_outb_irq bell_inb_irq
+ msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */
+ interrupts = <16 2 56 2 57 2 60 2 61 2 62 2 63 2>;
+ };
+
+ localbus@ffe124000 {
+ compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
+ reg = <0xf 0xfe124000 0 0x1000>;
+ interrupts = <25 2>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+
+ ranges = <0 0 0xf 0xe8000000 0x08000000>;
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x08000000>;
+ bank-width = <2>;
+ device-width = <2>;
+ };
+ };
+
+ pci0: pcie@ffe200000 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xf 0xfe200000 0 0x1000>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
+ clock-frequency = <0x1fca055>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 40 1
+ 0000 0 0 2 &mpic 1 1
+ 0000 0 0 3 &mpic 2 1
+ 0000 0 0 4 &mpic 3 1
+ >;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci1: pcie@ffe201000 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xf 0xfe201000 0 0x1000>;
+ bus-range = <0 0xff>;
+ ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
+ clock-frequency = <0x1fca055>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 41 1
+ 0000 0 0 2 &mpic 5 1
+ 0000 0 0 3 &mpic 6 1
+ 0000 0 0 4 &mpic 7 1
+ >;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+ pci2: pcie@ffe202000 {
+ compatible = "fsl,p4080-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xf 0xfe202000 0 0x1000>;
+ bus-range = <0x0 0xff>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x40000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ clock-frequency = <0x1fca055>;
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ interrupt-map-mask = <0xf800 0 0 7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0 0 1 &mpic 42 1
+ 0000 0 0 2 &mpic 9 1
+ 0000 0 0 3 &mpic 10 1
+ 0000 0 0 4 &mpic 11 1
+ >;
+ pcie@0 {
+ reg = <0 0 0 0 0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x20000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
+ };
+
+};
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts
index ad402c48874..d2af32e2bf7 100644
--- a/arch/powerpc/boot/dts/redwood.dts
+++ b/arch/powerpc/boot/dts/redwood.dts
@@ -226,6 +226,7 @@
max-frame-size = <9000>;
rx-fifo-size = <4096>;
tx-fifo-size = <2048>;
+ rx-fifo-size-gige = <16384>;
phy-mode = "rgmii";
phy-map = <0x00000000>;
rgmii-device = <&RGMII0>;
diff --git a/arch/powerpc/boot/dts/warp.dts b/arch/powerpc/boot/dts/warp.dts
index 31605ee4afb..e576ee85c42 100644
--- a/arch/powerpc/boot/dts/warp.dts
+++ b/arch/powerpc/boot/dts/warp.dts
@@ -146,7 +146,7 @@
fpga@2,4000 {
compatible = "pika,fpga-sd";
- reg = <0x00000002 0x00004000 0x00000A00>;
+ reg = <0x00000002 0x00004000 0x00004000>;
};
nor@0,0 {
diff --git a/arch/powerpc/boot/dts/wii.dts b/arch/powerpc/boot/dts/wii.dts
new file mode 100644
index 00000000000..77528c9a8db
--- /dev/null
+++ b/arch/powerpc/boot/dts/wii.dts
@@ -0,0 +1,218 @@
+/*
+ * arch/powerpc/boot/dts/wii.dts
+ *
+ * Nintendo Wii platform device tree source
+ * Copyright (C) 2008-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/*
+ * This is commented-out for now.
+ * Until a later patch is merged, the kernel can use only the first
+ * contiguous RAM range and will BUG() if the memreserve is outside
+ * that range.
+ */
+/*/memreserve/ 0x10000000 0x0004000;*/ /* DSP RAM */
+
+/ {
+ model = "nintendo,wii";
+ compatible = "nintendo,wii";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ chosen {
+ bootargs = "root=/dev/mmcblk0p2 rootwait udbg-immortal";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x01800000 /* MEM1 24MB 1T-SRAM */
+ 0x10000000 0x04000000>; /* MEM2 64MB GDDR3 */
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,broadway@0 {
+ device_type = "cpu";
+ reg = <0>;
+ clock-frequency = <729000000>; /* 729MHz */
+ bus-frequency = <243000000>; /* 243MHz core-to-bus 3x */
+ timebase-frequency = <60750000>; /* 243MHz / 4 */
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ i-cache-size = <32768>;
+ d-cache-size = <32768>;
+ };
+ };
+
+ /* devices contained in the hollywood chipset */
+ hollywood {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "nintendo,hollywood";
+ ranges = <0x0c000000 0x0c000000 0x01000000
+ 0x0d000000 0x0d000000 0x00800000
+ 0x0d800000 0x0d800000 0x00800000>;
+ interrupt-parent = <&PIC0>;
+
+ video@0c002000 {
+ compatible = "nintendo,hollywood-vi",
+ "nintendo,flipper-vi";
+ reg = <0x0c002000 0x100>;
+ interrupts = <8>;
+ };
+
+ processor-interface@0c003000 {
+ compatible = "nintendo,hollywood-pi",
+ "nintendo,flipper-pi";
+ reg = <0x0c003000 0x100>;
+
+ PIC0: pic0 {
+ #interrupt-cells = <1>;
+ compatible = "nintendo,flipper-pic";
+ interrupt-controller;
+ };
+ };
+
+ dsp@0c005000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "nintendo,hollywood-dsp",
+ "nintendo,flipper-dsp";
+ reg = <0x0c005000 0x200>;
+ interrupts = <6>;
+ };
+
+ gamepad-controller@0d006400 {
+ compatible = "nintendo,hollywood-si",
+ "nintendo,flipper-si";
+ reg = <0x0d006400 0x100>;
+ interrupts = <3>;
+ };
+
+ audio@0c006c00 {
+ compatible = "nintendo,hollywood-ai",
+ "nintendo,flipper-ai";
+ reg = <0x0d006c00 0x20>;
+ interrupts = <6>;
+ };
+
+ /* External Interface bus */
+ exi@0d006800 {
+ compatible = "nintendo,hollywood-exi",
+ "nintendo,flipper-exi";
+ reg = <0x0d006800 0x40>;
+ virtual-reg = <0x0d006800>;
+ interrupts = <4>;
+ };
+
+ usb@0d040000 {
+ compatible = "nintendo,hollywood-usb-ehci",
+ "usb-ehci";
+ reg = <0x0d040000 0x100>;
+ interrupts = <4>;
+ interrupt-parent = <&PIC1>;
+ };
+
+ usb@0d050000 {
+ compatible = "nintendo,hollywood-usb-ohci",
+ "usb-ohci";
+ reg = <0x0d050000 0x100>;
+ interrupts = <5>;
+ interrupt-parent = <&PIC1>;
+ };
+
+ usb@0d060000 {
+ compatible = "nintendo,hollywood-usb-ohci",
+ "usb-ohci";
+ reg = <0x0d060000 0x100>;
+ interrupts = <6>;
+ interrupt-parent = <&PIC1>;
+ };
+
+ sd@0d070000 {
+ compatible = "nintendo,hollywood-sdhci",
+ "sdhci";
+ reg = <0x0d070000 0x200>;
+ interrupts = <7>;
+ interrupt-parent = <&PIC1>;
+ };
+
+ sdio@0d080000 {
+ compatible = "nintendo,hollywood-sdhci",
+ "sdhci";
+ reg = <0x0d080000 0x200>;
+ interrupts = <8>;
+ interrupt-parent = <&PIC1>;
+ };
+
+ ipc@0d000000 {
+ compatible = "nintendo,hollywood-ipc";
+ reg = <0x0d000000 0x10>;
+ interrupts = <30>;
+ interrupt-parent = <&PIC1>;
+ };
+
+ PIC1: pic1@0d800030 {
+ #interrupt-cells = <1>;
+ compatible = "nintendo,hollywood-pic";
+ reg = <0x0d800030 0x10>;
+ interrupt-controller;
+ interrupts = <14>;
+ };
+
+ GPIO: gpio@0d8000c0 {
+ #gpio-cells = <2>;
+ compatible = "nintendo,hollywood-gpio";
+ reg = <0x0d8000c0 0x40>;
+ gpio-controller;
+
+ /*
+ * This is commented out while a standard binding
+ * for i2c over gpio is defined.
+ */
+ /*
+ i2c-video {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "i2c-gpio";
+
+ gpios = <&GPIO 15 0
+ &GPIO 14 0>;
+ clock-frequency = <250000>;
+ no-clock-stretching;
+ scl-is-open-drain;
+ sda-is-open-drain;
+ sda-enforce-dir;
+
+ AVE: audio-video-encoder@70 {
+ compatible = "nintendo,wii-audio-video-encoder";
+ reg = <0x70>;
+ };
+ };
+ */
+ };
+
+ control@0d800100 {
+ compatible = "nintendo,hollywood-control";
+ reg = <0x0d800100 0x300>;
+ };
+
+ disk@0d806000 {
+ compatible = "nintendo,hollywood-di";
+ reg = <0x0d806000 0x40>;
+ interrupts = <2>;
+ };
+ };
+};
+
diff --git a/arch/powerpc/boot/dts/yosemite.dts b/arch/powerpc/boot/dts/yosemite.dts
index 1fa3cb4c4eb..64923245f0e 100644
--- a/arch/powerpc/boot/dts/yosemite.dts
+++ b/arch/powerpc/boot/dts/yosemite.dts
@@ -282,20 +282,10 @@
/* Inbound 2GB range starting at 0 */
dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
- /* Bamboo has all 4 IRQ pins tied together per slot */
interrupt-map-mask = <0xf800 0x0 0x0 0x0>;
interrupt-map = <
- /* IDSEL 1 */
- 0x800 0x0 0x0 0x0 &UIC0 0x1c 0x8
-
- /* IDSEL 2 */
- 0x1000 0x0 0x0 0x0 &UIC0 0x1b 0x8
-
- /* IDSEL 3 */
- 0x1800 0x0 0x0 0x0 &UIC0 0x1a 0x8
-
- /* IDSEL 4 */
- 0x2000 0x0 0x0 0x0 &UIC0 0x19 0x8
+ /* IDSEL 12 */
+ 0x6000 0x0 0x0 0x0 &UIC0 0x19 0x8
>;
};
};
diff --git a/arch/powerpc/boot/gamecube-head.S b/arch/powerpc/boot/gamecube-head.S
new file mode 100644
index 00000000000..65a9b2a3bf3
--- /dev/null
+++ b/arch/powerpc/boot/gamecube-head.S
@@ -0,0 +1,111 @@
+/*
+ * arch/powerpc/boot/gamecube-head.S
+ *
+ * Nintendo GameCube bootwrapper entry.
+ * Copyright (C) 2004-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include "ppc_asm.h"
+
+/*
+ * The entry code does no assumptions regarding:
+ * - if the data and instruction caches are enabled or not
+ * - if the MMU is enabled or not
+ *
+ * We enable the caches if not already enabled, enable the MMU with an
+ * identity mapping scheme and jump to the start code.
+ */
+
+ .text
+
+ .globl _zimage_start
+_zimage_start:
+
+ /* turn the MMU off */
+ mfmsr 9
+ rlwinm 9, 9, 0, ~((1<<4)|(1<<5)) /* MSR_DR|MSR_IR */
+ bcl 20, 31, 1f
+1:
+ mflr 8
+ clrlwi 8, 8, 3 /* convert to a real address */
+ addi 8, 8, _mmu_off - 1b
+ mtsrr0 8
+ mtsrr1 9
+ rfi
+_mmu_off:
+ /* MMU disabled */
+
+ /* setup BATs */
+ isync
+ li 8, 0
+ mtspr 0x210, 8 /* IBAT0U */
+ mtspr 0x212, 8 /* IBAT1U */
+ mtspr 0x214, 8 /* IBAT2U */
+ mtspr 0x216, 8 /* IBAT3U */
+ mtspr 0x218, 8 /* DBAT0U */
+ mtspr 0x21a, 8 /* DBAT1U */
+ mtspr 0x21c, 8 /* DBAT2U */
+ mtspr 0x21e, 8 /* DBAT3U */
+
+ li 8, 0x01ff /* first 16MiB */
+ li 9, 0x0002 /* rw */
+ mtspr 0x211, 9 /* IBAT0L */
+ mtspr 0x210, 8 /* IBAT0U */
+ mtspr 0x219, 9 /* DBAT0L */
+ mtspr 0x218, 8 /* DBAT0U */
+
+ lis 8, 0x0c00 /* I/O mem */
+ ori 8, 8, 0x3ff /* 32MiB */
+ lis 9, 0x0c00
+ ori 9, 9, 0x002a /* uncached, guarded, rw */
+ mtspr 0x21b, 9 /* DBAT1L */
+ mtspr 0x21a, 8 /* DBAT1U */
+
+ lis 8, 0x0100 /* next 8MiB */
+ ori 8, 8, 0x00ff /* 8MiB */
+ lis 9, 0x0100
+ ori 9, 9, 0x0002 /* rw */
+ mtspr 0x215, 9 /* IBAT2L */
+ mtspr 0x214, 8 /* IBAT2U */
+ mtspr 0x21d, 9 /* DBAT2L */
+ mtspr 0x21c, 8 /* DBAT2U */
+
+ /* enable and invalidate the caches if not already enabled */
+ mfspr 8, 0x3f0 /* HID0 */
+ andi. 0, 8, (1<<15) /* HID0_ICE */
+ bne 1f
+ ori 8, 8, (1<<15)|(1<<11) /* HID0_ICE|HID0_ICFI*/
+1:
+ andi. 0, 8, (1<<14) /* HID0_DCE */
+ bne 1f
+ ori 8, 8, (1<<14)|(1<<10) /* HID0_DCE|HID0_DCFI*/
+1:
+ mtspr 0x3f0, 8 /* HID0 */
+ isync
+
+ /* initialize arguments */
+ li 3, 0
+ li 4, 0
+ li 5, 0
+
+ /* turn the MMU on */
+ bcl 20, 31, 1f
+1:
+ mflr 8
+ addi 8, 8, _mmu_on - 1b
+ mfmsr 9
+ ori 9, 9, (1<<4)|(1<<5) /* MSR_DR|MSR_IR */
+ mtsrr0 8
+ mtsrr1 9
+ sync
+ rfi
+_mmu_on:
+ b _zimage_start_lib
+
diff --git a/arch/powerpc/boot/gamecube.c b/arch/powerpc/boot/gamecube.c
new file mode 100644
index 00000000000..28ae7057be5
--- /dev/null
+++ b/arch/powerpc/boot/gamecube.c
@@ -0,0 +1,35 @@
+/*
+ * arch/powerpc/boot/gamecube.c
+ *
+ * Nintendo GameCube bootwrapper support
+ * Copyright (C) 2004-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stddef.h>
+#include "stdio.h"
+#include "types.h"
+#include "io.h"
+#include "ops.h"
+
+#include "ugecon.h"
+
+BSS_STACK(8192);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+{
+ u32 heapsize = 16*1024*1024 - (u32)_end;
+
+ simple_alloc_init(_end, heapsize, 32, 64);
+ fdt_init(_dtb_start);
+
+ if (ug_probe())
+ console_ops.write = ug_console_write;
+}
+
diff --git a/arch/powerpc/boot/ugecon.c b/arch/powerpc/boot/ugecon.c
new file mode 100644
index 00000000000..8f2a6b31153
--- /dev/null
+++ b/arch/powerpc/boot/ugecon.c
@@ -0,0 +1,147 @@
+/*
+ * arch/powerpc/boot/ugecon.c
+ *
+ * USB Gecko bootwrapper console.
+ * Copyright (C) 2008-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stddef.h>
+#include "stdio.h"
+#include "types.h"
+#include "io.h"
+#include "ops.h"
+
+
+#define EXI_CLK_32MHZ 5
+
+#define EXI_CSR 0x00
+#define EXI_CSR_CLKMASK (0x7<<4)
+#define EXI_CSR_CLK_32MHZ (EXI_CLK_32MHZ<<4)
+#define EXI_CSR_CSMASK (0x7<<7)
+#define EXI_CSR_CS_0 (0x1<<7) /* Chip Select 001 */
+
+#define EXI_CR 0x0c
+#define EXI_CR_TSTART (1<<0)
+#define EXI_CR_WRITE (1<<2)
+#define EXI_CR_READ_WRITE (2<<2)
+#define EXI_CR_TLEN(len) (((len)-1)<<4)
+
+#define EXI_DATA 0x10
+
+
+/* virtual address base for input/output, retrieved from device tree */
+static void *ug_io_base;
+
+
+static u32 ug_io_transaction(u32 in)
+{
+ u32 *csr_reg = ug_io_base + EXI_CSR;
+ u32 *data_reg = ug_io_base + EXI_DATA;
+ u32 *cr_reg = ug_io_base + EXI_CR;
+ u32 csr, data, cr;
+
+ /* select */
+ csr = EXI_CSR_CLK_32MHZ | EXI_CSR_CS_0;
+ out_be32(csr_reg, csr);
+
+ /* read/write */
+ data = in;
+ out_be32(data_reg, data);
+ cr = EXI_CR_TLEN(2) | EXI_CR_READ_WRITE | EXI_CR_TSTART;
+ out_be32(cr_reg, cr);
+
+ while (in_be32(cr_reg) & EXI_CR_TSTART)
+ barrier();
+
+ /* deselect */
+ out_be32(csr_reg, 0);
+
+ data = in_be32(data_reg);
+ return data;
+}
+
+static int ug_is_txfifo_ready(void)
+{
+ return ug_io_transaction(0xc0000000) & 0x04000000;
+}
+
+static void ug_raw_putc(char ch)
+{
+ ug_io_transaction(0xb0000000 | (ch << 20));
+}
+
+static void ug_putc(char ch)
+{
+ int count = 16;
+
+ if (!ug_io_base)
+ return;
+
+ while (!ug_is_txfifo_ready() && count--)
+ barrier();
+ if (count >= 0)
+ ug_raw_putc(ch);
+}
+
+void ug_console_write(const char *buf, int len)
+{
+ char *b = (char *)buf;
+
+ while (len--) {
+ if (*b == '\n')
+ ug_putc('\r');
+ ug_putc(*b++);
+ }
+}
+
+static int ug_is_adapter_present(void)
+{
+ if (!ug_io_base)
+ return 0;
+ return ug_io_transaction(0x90000000) == 0x04700000;
+}
+
+static void *ug_grab_exi_io_base(void)
+{
+ u32 v;
+ void *devp;
+
+ devp = find_node_by_compatible(NULL, "nintendo,flipper-exi");
+ if (devp == NULL)
+ goto err_out;
+ if (getprop(devp, "virtual-reg", &v, sizeof(v)) != sizeof(v))
+ goto err_out;
+
+ return (void *)v;
+
+err_out:
+ return NULL;
+}
+
+void *ug_probe(void)
+{
+ void *exi_io_base;
+ int i;
+
+ exi_io_base = ug_grab_exi_io_base();
+ if (!exi_io_base)
+ return NULL;
+
+ /* look for a usbgecko on memcard slots A and B */
+ for (i = 0; i < 2; i++) {
+ ug_io_base = exi_io_base + 0x14 * i;
+ if (ug_is_adapter_present())
+ break;
+ }
+ if (i == 2)
+ ug_io_base = NULL;
+ return ug_io_base;
+}
+
diff --git a/arch/powerpc/boot/ugecon.h b/arch/powerpc/boot/ugecon.h
new file mode 100644
index 00000000000..43737539169
--- /dev/null
+++ b/arch/powerpc/boot/ugecon.h
@@ -0,0 +1,24 @@
+/*
+ * arch/powerpc/boot/ugecon.h
+ *
+ * USB Gecko early bootwrapper console.
+ * Copyright (C) 2008-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef __UGECON_H
+#define __UGECON_H
+
+extern void *ug_probe(void);
+
+extern void ug_putc(char ch);
+extern void ug_console_write(const char *buf, int len);
+
+#endif /* __UGECON_H */
+
diff --git a/arch/powerpc/boot/wii-head.S b/arch/powerpc/boot/wii-head.S
new file mode 100644
index 00000000000..edd79b836fc
--- /dev/null
+++ b/arch/powerpc/boot/wii-head.S
@@ -0,0 +1,142 @@
+/*
+ * arch/powerpc/boot/wii-head.S
+ *
+ * Nintendo Wii bootwrapper entry.
+ * Copyright (C) 2008-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include "ppc_asm.h"
+
+/*
+ * The entry code does no assumptions regarding:
+ * - if the data and instruction caches are enabled or not
+ * - if the MMU is enabled or not
+ * - if the high BATs are enabled or not
+ *
+ * We enable the high BATs, enable the caches if not already enabled,
+ * enable the MMU with an identity mapping scheme and jump to the start code.
+ */
+
+ .text
+
+ .globl _zimage_start
+_zimage_start:
+
+ /* turn the MMU off */
+ mfmsr 9
+ rlwinm 9, 9, 0, ~((1<<4)|(1<<5)) /* MSR_DR|MSR_IR */
+ bcl 20, 31, 1f
+1:
+ mflr 8
+ clrlwi 8, 8, 3 /* convert to a real address */
+ addi 8, 8, _mmu_off - 1b
+ mtsrr0 8
+ mtsrr1 9
+ rfi
+_mmu_off:
+ /* MMU disabled */
+
+ /* setup BATs */
+ isync
+ li 8, 0
+ mtspr 0x210, 8 /* IBAT0U */
+ mtspr 0x212, 8 /* IBAT1U */
+ mtspr 0x214, 8 /* IBAT2U */
+ mtspr 0x216, 8 /* IBAT3U */
+ mtspr 0x218, 8 /* DBAT0U */
+ mtspr 0x21a, 8 /* DBAT1U */
+ mtspr 0x21c, 8 /* DBAT2U */
+ mtspr 0x21e, 8 /* DBAT3U */
+
+ mtspr 0x230, 8 /* IBAT4U */
+ mtspr 0x232, 8 /* IBAT5U */
+ mtspr 0x234, 8 /* IBAT6U */
+ mtspr 0x236, 8 /* IBAT7U */
+ mtspr 0x238, 8 /* DBAT4U */
+ mtspr 0x23a, 8 /* DBAT5U */
+ mtspr 0x23c, 8 /* DBAT6U */
+ mtspr 0x23e, 8 /* DBAT7U */
+
+ li 8, 0x01ff /* first 16MiB */
+ li 9, 0x0002 /* rw */
+ mtspr 0x211, 9 /* IBAT0L */
+ mtspr 0x210, 8 /* IBAT0U */
+ mtspr 0x219, 9 /* DBAT0L */
+ mtspr 0x218, 8 /* DBAT0U */
+
+ lis 8, 0x0c00 /* I/O mem */
+ ori 8, 8, 0x3ff /* 32MiB */
+ lis 9, 0x0c00
+ ori 9, 9, 0x002a /* uncached, guarded, rw */
+ mtspr 0x21b, 9 /* DBAT1L */
+ mtspr 0x21a, 8 /* DBAT1U */
+
+ lis 8, 0x0100 /* next 8MiB */
+ ori 8, 8, 0x00ff /* 8MiB */
+ lis 9, 0x0100
+ ori 9, 9, 0x0002 /* rw */
+ mtspr 0x215, 9 /* IBAT2L */
+ mtspr 0x214, 8 /* IBAT2U */
+ mtspr 0x21d, 9 /* DBAT2L */
+ mtspr 0x21c, 8 /* DBAT2U */
+
+ lis 8, 0x1000 /* MEM2 */
+ ori 8, 8, 0x07ff /* 64MiB */
+ lis 9, 0x1000
+ ori 9, 9, 0x0002 /* rw */
+ mtspr 0x216, 8 /* IBAT3U */
+ mtspr 0x217, 9 /* IBAT3L */
+ mtspr 0x21e, 8 /* DBAT3U */
+ mtspr 0x21f, 9 /* DBAT3L */
+
+ /* enable the high BATs */
+ mfspr 8, 0x3f3 /* HID4 */
+ oris 8, 8, 0x0200
+ mtspr 0x3f3, 8 /* HID4 */
+
+ /* enable and invalidate the caches if not already enabled */
+ mfspr 8, 0x3f0 /* HID0 */
+ andi. 0, 8, (1<<15) /* HID0_ICE */
+ bne 1f
+ ori 8, 8, (1<<15)|(1<<11) /* HID0_ICE|HID0_ICFI*/
+1:
+ andi. 0, 8, (1<<14) /* HID0_DCE */
+ bne 1f
+ ori 8, 8, (1<<14)|(1<<10) /* HID0_DCE|HID0_DCFI*/
+1:
+ mtspr 0x3f0, 8 /* HID0 */
+ isync
+
+ /* initialize arguments */
+ li 3, 0
+ li 4, 0
+ li 5, 0
+
+ /* turn the MMU on */
+ bcl 20, 31, 1f
+1:
+ mflr 8
+ addi 8, 8, _mmu_on - 1b
+ mfmsr 9
+ ori 9, 9, (1<<4)|(1<<5) /* MSR_DR|MSR_IR */
+ mtsrr0 8
+ mtsrr1 9
+ sync
+ rfi
+_mmu_on:
+ /* turn on the front blue led (aka: yay! we got here!) */
+ lis 8, 0x0d00
+ ori 8, 8, 0x00c0
+ lwz 9, 0(8)
+ ori 9, 9, 0x20
+ stw 9, 0(8)
+
+ b _zimage_start_lib
+
diff --git a/arch/powerpc/boot/wii.c b/arch/powerpc/boot/wii.c
new file mode 100644
index 00000000000..2ebaec0344d
--- /dev/null
+++ b/arch/powerpc/boot/wii.c
@@ -0,0 +1,158 @@
+/*
+ * arch/powerpc/boot/wii.c
+ *
+ * Nintendo Wii bootwrapper support
+ * Copyright (C) 2008-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include <stddef.h>
+#include "stdio.h"
+#include "types.h"
+#include "io.h"
+#include "ops.h"
+
+#include "ugecon.h"
+
+BSS_STACK(8192);
+
+#define HW_REG(x) ((void *)(x))
+
+#define EXI_CTRL HW_REG(0x0d800070)
+#define EXI_CTRL_ENABLE (1<<0)
+
+#define MEM2_TOP (0x10000000 + 64*1024*1024)
+#define FIRMWARE_DEFAULT_SIZE (12*1024*1024)
+
+
+struct mipc_infohdr {
+ char magic[3];
+ u8 version;
+ u32 mem2_boundary;
+ u32 ipc_in;
+ size_t ipc_in_size;
+ u32 ipc_out;
+ size_t ipc_out_size;
+};
+
+static int mipc_check_address(u32 pa)
+{
+ /* only MEM2 addresses */
+ if (pa < 0x10000000 || pa > 0x14000000)
+ return -EINVAL;
+ return 0;
+}
+
+static struct mipc_infohdr *mipc_get_infohdr(void)
+{
+ struct mipc_infohdr **hdrp, *hdr;
+
+ /* 'mini' header pointer is the last word of MEM2 memory */
+ hdrp = (struct mipc_infohdr **)0x13fffffc;
+ if (mipc_check_address((u32)hdrp)) {
+ printf("mini: invalid hdrp %08X\n", (u32)hdrp);
+ hdr = NULL;
+ goto out;
+ }
+
+ hdr = *hdrp;
+ if (mipc_check_address((u32)hdr)) {
+ printf("mini: invalid hdr %08X\n", (u32)hdr);
+ hdr = NULL;
+ goto out;
+ }
+ if (memcmp(hdr->magic, "IPC", 3)) {
+ printf("mini: invalid magic\n");
+ hdr = NULL;
+ goto out;
+ }
+
+out:
+ return hdr;
+}
+
+static int mipc_get_mem2_boundary(u32 *mem2_boundary)
+{
+ struct mipc_infohdr *hdr;
+ int error;
+
+ hdr = mipc_get_infohdr();
+ if (!hdr) {
+ error = -1;
+ goto out;
+ }
+
+ if (mipc_check_address(hdr->mem2_boundary)) {
+ printf("mini: invalid mem2_boundary %08X\n",
+ hdr->mem2_boundary);
+ error = -EINVAL;
+ goto out;
+ }
+ *mem2_boundary = hdr->mem2_boundary;
+ error = 0;
+out:
+ return error;
+
+}
+
+static void platform_fixups(void)
+{
+ void *mem;
+ u32 reg[4];
+ u32 mem2_boundary;
+ int len;
+ int error;
+
+ mem = finddevice("/memory");
+ if (!mem)
+ fatal("Can't find memory node\n");
+
+ /* two ranges of (address, size) words */
+ len = getprop(mem, "reg", reg, sizeof(reg));
+ if (len != sizeof(reg)) {
+ /* nothing to do */
+ goto out;
+ }
+
+ /* retrieve MEM2 boundary from 'mini' */
+ error = mipc_get_mem2_boundary(&mem2_boundary);
+ if (error) {
+ /* if that fails use a sane value */
+ mem2_boundary = MEM2_TOP - FIRMWARE_DEFAULT_SIZE;
+ }
+
+ if (mem2_boundary > reg[2] && mem2_boundary < reg[2] + reg[3]) {
+ reg[3] = mem2_boundary - reg[2];
+ printf("top of MEM2 @ %08X\n", reg[2] + reg[3]);
+ setprop(mem, "reg", reg, sizeof(reg));
+ }
+
+out:
+ return;
+}
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+{
+ u32 heapsize = 24*1024*1024 - (u32)_end;
+
+ simple_alloc_init(_end, heapsize, 32, 64);
+ fdt_init(_dtb_start);
+
+ /*
+ * 'mini' boots the Broadway processor with EXI disabled.
+ * We need it enabled before probing for the USB Gecko.
+ */
+ out_be32(EXI_CTRL, in_be32(EXI_CTRL) | EXI_CTRL_ENABLE);
+
+ if (ug_probe())
+ console_ops.write = ug_console_write;
+
+ platform_ops.fixups = platform_fixups;
+}
+
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index ac9e9a58b2b..390512ae7f8 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -230,6 +230,10 @@ xpedite52*)
link_address='0x1400000'
platformo=$object/cuboot-85xx.o
;;
+gamecube|wii)
+ link_address='0x600000'
+ platformo="$object/$platform-head.o $object/$platform.o"
+ ;;
esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext"
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index 28980738776..6cd2cd65c2c 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -218,7 +218,7 @@ CONFIG_MPIC=y
# CONFIG_MPIC_WEIRD is not set
# CONFIG_PPC_I8259 is not set
# CONFIG_PPC_RTAS is not set
-# CONFIG_MMIO_NVRAM is not set
+CONFIG_MMIO_NVRAM=y
# CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set
# CONFIG_PPC_INDIRECT_IO is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index e199d1cacba..a6a3768f730 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -218,7 +218,7 @@ CONFIG_MPIC=y
# CONFIG_MPIC_WEIRD is not set
# CONFIG_PPC_I8259 is not set
# CONFIG_PPC_RTAS is not set
-# CONFIG_MMIO_NVRAM is not set
+CONFIG_MMIO_NVRAM=y
# CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set
# CONFIG_PPC_INDIRECT_IO is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 3b0fbfb28ef..1975d41e076 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -219,7 +219,7 @@ CONFIG_MPIC=y
# CONFIG_MPIC_WEIRD is not set
# CONFIG_PPC_I8259 is not set
# CONFIG_PPC_RTAS is not set
-# CONFIG_MMIO_NVRAM is not set
+CONFIG_MMIO_NVRAM=y
# CONFIG_PPC_MPC106 is not set
# CONFIG_PPC_970_NAP is not set
# CONFIG_PPC_INDIRECT_IO is not set
@@ -1124,7 +1124,7 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
# CONFIG_HW_RANDOM_TIMERIOMEM is not set
-# CONFIG_NVRAM is not set
+CONFIG_NVRAM=y
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_RAW_DRIVER is not set
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index fc905924c02..826a65d3f00 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -757,7 +757,7 @@ CONFIG_SUNGEM=y
# CONFIG_B44 is not set
# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
-CONFIG_ACENIC=y
+CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set
CONFIG_E1000=y
@@ -794,8 +794,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_BNX2X is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
-CONFIG_TR=y
-CONFIG_IBMOL=y
+# CONFIG_TR is not set
+# CONFIG_IBMOL is not set
# CONFIG_3C359 is not set
# CONFIG_TMS380TR is not set
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
new file mode 100644
index 00000000000..942e1193e9e
--- /dev/null
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -0,0 +1,1061 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc8
+# Sun Nov 22 21:07:30 2009
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+CONFIG_PPC_BOOK3S_32=y
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_PPC_BOOK3S=y
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_PPC_HAVE_PMU_SUPPORT=y
+CONFIG_PPC_PERF_CTRS=y
+# CONFIG_SMP is not set
+CONFIG_NOT_COHERENT_CACHE=y
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DTC=y
+# CONFIG_DEFAULT_UIMAGE is not set
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION="-gcn"
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+CONFIG_PERF_COUNTERS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+# CONFIG_FREEZER is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_CHRP is not set
+# CONFIG_MPC5121_ADS is not set
+# CONFIG_MPC5121_GENERIC is not set
+# CONFIG_PPC_MPC52xx is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_86xx is not set
+CONFIG_EMBEDDED6xx=y
+# CONFIG_LINKSTATION is not set
+# CONFIG_STORCENTER is not set
+# CONFIG_MPC7448HPC2 is not set
+# CONFIG_PPC_HOLLY is not set
+# CONFIG_PPC_PRPMC2800 is not set
+# CONFIG_PPC_C2K is not set
+CONFIG_GAMECUBE_COMMON=y
+CONFIG_USBGECKO_UDBG=y
+CONFIG_FLIPPER_PIC=y
+CONFIG_GAMECUBE=y
+# CONFIG_WII is not set
+# CONFIG_AMIGAONE is not set
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_FSL_ULI1575 is not set
+# CONFIG_SIMPLE_GPIO is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=m
+# CONFIG_IOMMU_HELPER is not set
+# CONFIG_SWIOTLB is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_KEXEC=y
+# CONFIG_CRASH_DUMP is not set
+CONFIG_MAX_ACTIVE_REGIONS=32
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_MIGRATION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_64K_PAGES is not set
+# CONFIG_PPC_256K_PAGES is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_EXTRA_TARGETS=""
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+CONFIG_ADVANCED_OPTIONS=y
+# CONFIG_LOWMEM_SIZE_BOOL is not set
+CONFIG_LOWMEM_SIZE=0x30000000
+# CONFIG_PAGE_OFFSET_BOOL is not set
+CONFIG_PAGE_OFFSET=0xc0000000
+# CONFIG_KERNEL_START_BOOL is not set
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+# CONFIG_TASK_SIZE_BOOL is not set
+CONFIG_TASK_SIZE=0xc0000000
+# CONFIG_CONSISTENT_SIZE_BOOL is not set
+CONFIG_CONSISTENT_SIZE=0x00200000
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+# CONFIG_STANDALONE is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+CONFIG_OF_DEVICE=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_XILINX_EMACLITE is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_XILINX_XPS_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=64
+# CONFIG_HVC_UDBG is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_OF is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_SEQUENCER=y
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_PPC=y
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_GENERIC=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=y
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_LATENCYTOP=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+CONFIG_SCHED_TRACER=y
+CONFIG_BOOT_TRACER=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+CONFIG_DMA_API_DEBUG=y
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_PPC_DISABLE_WERROR is not set
+CONFIG_PPC_WERROR=y
+CONFIG_PRINT_STACK_DEPTH=64
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_PPC_EMULATED_STATS is not set
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+# CONFIG_MSI_BITMAP_SELFTEST is not set
+# CONFIG_XMON is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
+# CONFIG_PPC_EARLY_DEBUG_44x is not set
+# CONFIG_PPC_EARLY_DEBUG_40x is not set
+# CONFIG_PPC_EARLY_DEBUG_CPM is not set
+CONFIG_PPC_EARLY_DEBUG_USBGECKO=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+# CONFIG_PPC_CLOCK is not set
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/iseries_defconfig b/arch/powerpc/configs/iseries_defconfig
index f925c555508..76982c51a4c 100644
--- a/arch/powerpc/configs/iseries_defconfig
+++ b/arch/powerpc/configs/iseries_defconfig
@@ -714,8 +714,8 @@ CONFIG_NETDEV_10000=y
# CONFIG_BNX2X is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
-CONFIG_TR=y
-CONFIG_IBMOL=y
+# CONFIG_TR is not set
+# CONFIG_IBMOL is not set
# CONFIG_3C359 is not set
# CONFIG_TMS380TR is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 25240182457..7b3804a6e36 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -304,11 +304,11 @@ CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-# CONFIG_HZ_100 is not set
-CONFIG_HZ_250=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
-CONFIG_HZ=250
+CONFIG_HZ=100
CONFIG_SCHED_HRTICK=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
@@ -980,7 +980,7 @@ CONFIG_E100=y
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
-CONFIG_ACENIC=y
+CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set
CONFIG_E1000=y
@@ -1023,8 +1023,8 @@ CONFIG_PASEMI_MAC=y
# CONFIG_BNX2X is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
-CONFIG_TR=y
-CONFIG_IBMOL=y
+# CONFIG_TR is not set
+# CONFIG_IBMOL is not set
# CONFIG_3C359 is not set
# CONFIG_TMS380TR is not set
@@ -1863,7 +1863,7 @@ CONFIG_HFSPLUS_FS=m
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-CONFIG_CRAMFS=y
+CONFIG_CRAMFS=m
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 18af4603625..8195f1650cb 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -1008,8 +1008,8 @@ CONFIG_IXGB=m
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
# CONFIG_BE2NET is not set
-CONFIG_TR=y
-CONFIG_IBMOL=y
+# CONFIG_TR is not set
+# CONFIG_IBMOL is not set
# CONFIG_3C359 is not set
# CONFIG_TMS380TR is not set
CONFIG_WLAN=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index c568329723b..ca9ff9aad74 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -230,11 +230,11 @@ CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-# CONFIG_HZ_100 is not set
-CONFIG_HZ_250=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
-CONFIG_HZ=250
+CONFIG_HZ=100
CONFIG_SCHED_HRTICK=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
@@ -796,7 +796,7 @@ CONFIG_E100=y
# CONFIG_NET_POCKET is not set
# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
-CONFIG_ACENIC=y
+CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set
CONFIG_E1000=y
@@ -834,8 +834,8 @@ CONFIG_S2IO=m
# CONFIG_BNX2X is not set
# CONFIG_QLGE is not set
# CONFIG_SFC is not set
-CONFIG_TR=y
-CONFIG_IBMOL=y
+# CONFIG_TR is not set
+# CONFIG_IBMOL is not set
# CONFIG_3C359 is not set
# CONFIG_TMS380TR is not set
@@ -1494,7 +1494,7 @@ CONFIG_CONFIGFS_FS=m
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-CONFIG_CRAMFS=y
+CONFIG_CRAMFS=m
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_OMFS_FS is not set
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
new file mode 100644
index 00000000000..c386828c639
--- /dev/null
+++ b/arch/powerpc/configs/wii_defconfig
@@ -0,0 +1,1406 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc8
+# Sun Nov 22 20:37:21 2009
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+CONFIG_PPC_BOOK3S_32=y
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_PPC_BOOK3S=y
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_PPC_MM_SLICES is not set
+CONFIG_PPC_HAVE_PMU_SUPPORT=y
+CONFIG_PPC_PERF_CTRS=y
+# CONFIG_SMP is not set
+CONFIG_NOT_COHERENT_CACHE=y
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DTC=y
+# CONFIG_DEFAULT_UIMAGE is not set
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION="-wii"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+CONFIG_EVENT_PROFILE=y
+CONFIG_PERF_COUNTERS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_TRACEPOINTS=y
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_SLOW_WORK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+# CONFIG_FREEZER is not set
+
+#
+# Platform support
+#
+# CONFIG_PPC_CHRP is not set
+# CONFIG_MPC5121_ADS is not set
+# CONFIG_MPC5121_GENERIC is not set
+# CONFIG_PPC_MPC52xx is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_86xx is not set
+CONFIG_EMBEDDED6xx=y
+# CONFIG_LINKSTATION is not set
+# CONFIG_STORCENTER is not set
+# CONFIG_MPC7448HPC2 is not set
+# CONFIG_PPC_HOLLY is not set
+# CONFIG_PPC_PRPMC2800 is not set
+# CONFIG_PPC_C2K is not set
+CONFIG_GAMECUBE_COMMON=y
+CONFIG_USBGECKO_UDBG=y
+CONFIG_FLIPPER_PIC=y
+# CONFIG_GAMECUBE is not set
+CONFIG_HLWD_PIC=y
+CONFIG_STARLET_MINI=y
+CONFIG_WII=y
+# CONFIG_AMIGAONE is not set
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_FSL_ULI1575 is not set
+# CONFIG_SIMPLE_GPIO is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_BINFMT_ELF=y
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=m
+# CONFIG_IOMMU_HELPER is not set
+# CONFIG_SWIOTLB is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_KEXEC=y
+# CONFIG_CRASH_DUMP is not set
+CONFIG_MAX_ACTIVE_REGIONS=32
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_MIGRATION is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_64K_PAGES is not set
+# CONFIG_PPC_256K_PAGES is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_EXTRA_TARGETS=""
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+CONFIG_ADVANCED_OPTIONS=y
+# CONFIG_LOWMEM_SIZE_BOOL is not set
+CONFIG_LOWMEM_SIZE=0x30000000
+# CONFIG_PAGE_OFFSET_BOOL is not set
+CONFIG_PAGE_OFFSET=0xc0000000
+# CONFIG_KERNEL_START_BOOL is not set
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+# CONFIG_TASK_SIZE_BOOL is not set
+CONFIG_TASK_SIZE=0xc0000000
+# CONFIG_CONSISTENT_SIZE_BOOL is not set
+CONFIG_CONSISTENT_SIZE=0x00200000
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=y
+CONFIG_BT_L2CAP=y
+# CONFIG_BT_SCO is not set
+CONFIG_BT_RFCOMM=y
+# CONFIG_BT_RFCOMM_TTY is not set
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+# CONFIG_BT_BNEP_PROTO_FILTER is not set
+CONFIG_BT_HIDP=y
+
+#
+# Bluetooth device drivers
+#
+# CONFIG_BT_HCIBTSDIO is not set
+# CONFIG_BT_HCIUART is not set
+# CONFIG_BT_HCIVHCI is not set
+# CONFIG_BT_MRVL is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+CONFIG_CFG80211_DEFAULT_PS_VALUE=1
+# CONFIG_CFG80211_DEBUGFS is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
+CONFIG_MAC80211=y
+# CONFIG_MAC80211_RC_PID is not set
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+# CONFIG_STANDALONE is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+CONFIG_OF_DEVICE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_I2C=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_XILINX_EMACLITE is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+# CONFIG_LIBERTAS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_HOSTAP is not set
+CONFIG_B43=y
+CONFIG_B43_SDIO=y
+CONFIG_B43_PIO=y
+# CONFIG_B43_PHY_LP is not set
+CONFIG_B43_DEBUG=y
+# CONFIG_B43_FORCE_PIO is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
+# CONFIG_IWM is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_GPIO is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_LIBPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_XILINX_XPS_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=64
+# CONFIG_HVC_UDBG is not set
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_NVRAM=y
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_MPC is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_XILINX is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=y
+CONFIG_SSB_BLOCKIO=y
+CONFIG_SSB_SDIOHOST_POSSIBLE=y
+CONFIG_SSB_SDIOHOST=y
+# CONFIG_SSB_SILENT is not set
+CONFIG_SSB_DEBUG=y
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_OF is not set
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SOUND_OSS_CORE_PRECLAIM=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_SEQUENCER=y
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_PPC=y
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_APPLE=m
+CONFIG_HID_WACOM=m
+CONFIG_USB_SUPPORT=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+
+#
+# Enable Host or Gadget support to see Inventra options
+#
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+# CONFIG_MMC_SDHCI_OF is not set
+# CONFIG_MMC_SDHCI_PLTFM is not set
+# CONFIG_MMC_WBSD is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_GENERIC=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+# CONFIG_CUSE is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+# CONFIG_CIFS_WEAK_PW_HASH is not set
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_LATENCYTOP=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+CONFIG_SCHED_TRACER=y
+CONFIG_BOOT_TRACER=y
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+CONFIG_DMA_API_DEBUG=y
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_PPC_DISABLE_WERROR is not set
+CONFIG_PPC_WERROR=y
+CONFIG_PRINT_STACK_DEPTH=64
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_PPC_EMULATED_STATS is not set
+# CONFIG_CODE_PATCHING_SELFTEST is not set
+# CONFIG_FTR_FIXUP_SELFTEST is not set
+# CONFIG_MSI_BITMAP_SELFTEST is not set
+# CONFIG_XMON is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_VIRQ_DEBUG is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+# CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE is not set
+# CONFIG_PPC_EARLY_DEBUG_BEAT is not set
+# CONFIG_PPC_EARLY_DEBUG_44x is not set
+# CONFIG_PPC_EARLY_DEBUG_40x is not set
+# CONFIG_PPC_EARLY_DEBUG_CPM is not set
+CONFIG_PPC_EARLY_DEBUG_USBGECKO=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_PPC_CLOCK is not set
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/include/asm/asm-offsets.h b/arch/powerpc/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/powerpc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/powerpc/include/asm/async_tx.h b/arch/powerpc/include/asm/async_tx.h
new file mode 100644
index 00000000000..8b2dc55d01a
--- /dev/null
+++ b/arch/powerpc/include/asm/async_tx.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008-2009 DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef _ASM_POWERPC_ASYNC_TX_H_
+#define _ASM_POWERPC_ASYNC_TX_H_
+
+#if defined(CONFIG_440SPe) || defined(CONFIG_440SP)
+extern struct dma_chan *
+ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
+ struct page **dst_lst, int dst_cnt, struct page **src_lst,
+ int src_cnt, size_t src_sz);
+
+#define async_tx_find_channel(dep, cap, dst_lst, dst_cnt, src_lst, \
+ src_cnt, src_sz) \
+ ppc440spe_async_tx_find_best_channel(cap, dst_lst, dst_cnt, src_lst, \
+ src_cnt, src_sz)
+#else
+
+#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
+ __async_tx_find_channel(dep, type)
+
+struct dma_chan *
+__async_tx_find_channel(struct async_submit_ctl *submit,
+ enum dma_transaction_type tx_type);
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 64e1fdca233..2c15212e170 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -68,7 +68,7 @@
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry))); \
- for(;;) ; \
+ unreachable(); \
} while (0)
#define BUG_ON(x) do { \
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
index 24d79e3abd8..0835eb977ba 100644
--- a/arch/powerpc/include/asm/cpm.h
+++ b/arch/powerpc/include/asm/cpm.h
@@ -3,8 +3,47 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/errno.h>
#include <linux/of.h>
+/*
+ * USB Controller pram common to QE and CPM.
+ */
+struct usb_ctlr {
+ u8 usb_usmod;
+ u8 usb_usadr;
+ u8 usb_uscom;
+ u8 res1[1];
+ __be16 usb_usep[4];
+ u8 res2[4];
+ __be16 usb_usber;
+ u8 res3[2];
+ __be16 usb_usbmr;
+ u8 res4[1];
+ u8 usb_usbs;
+ /* Fields down below are QE-only */
+ __be16 usb_ussft;
+ u8 res5[2];
+ __be16 usb_usfrn;
+ u8 res6[0x22];
+} __attribute__ ((packed));
+
+/*
+ * Function code bits, usually generic to devices.
+ */
+#ifdef CONFIG_CPM1
+#define CPMFCR_GBL ((u_char)0x00) /* Flag doesn't exist in CPM1 */
+#define CPMFCR_TC2 ((u_char)0x00) /* Flag doesn't exist in CPM1 */
+#define CPMFCR_DTB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
+#define CPMFCR_BDB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
+#else
+#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
+#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
+#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
+#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
+#endif
+#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
+
/* Opcodes common to CPM1 and CPM2
*/
#define CPM_CR_INIT_TRX ((ushort)0x0000)
@@ -93,13 +132,56 @@ typedef struct cpm_buf_desc {
#define BD_I2C_START (0x0400)
int cpm_muram_init(void);
+
+#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
int cpm_muram_free(unsigned long offset);
unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
unsigned long cpm_muram_offset(void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
+#else
+static inline unsigned long cpm_muram_alloc(unsigned long size,
+ unsigned long align)
+{
+ return -ENOSYS;
+}
+
+static inline int cpm_muram_free(unsigned long offset)
+{
+ return -ENOSYS;
+}
+
+static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
+ unsigned long size)
+{
+ return -ENOSYS;
+}
+
+static inline void __iomem *cpm_muram_addr(unsigned long offset)
+{
+ return NULL;
+}
+
+static inline unsigned long cpm_muram_offset(void __iomem *addr)
+{
+ return -ENOSYS;
+}
+
+static inline dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+ return 0;
+}
+#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */
+
+#ifdef CONFIG_CPM
int cpm_command(u32 command, u8 opcode);
+#else
+static inline int cpm_command(u32 command, u8 opcode)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_CPM */
int cpm2_gpiochip_add32(struct device_node *np);
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 7685ffde882..81b01192f44 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -478,51 +478,6 @@ typedef struct iic {
char res2[2]; /* Reserved */
} iic_t;
-/* SPI parameter RAM.
-*/
-typedef struct spi {
- ushort spi_rbase; /* Rx Buffer descriptor base address */
- ushort spi_tbase; /* Tx Buffer descriptor base address */
- u_char spi_rfcr; /* Rx function code */
- u_char spi_tfcr; /* Tx function code */
- ushort spi_mrblr; /* Max receive buffer length */
- uint spi_rstate; /* Internal */
- uint spi_rdp; /* Internal */
- ushort spi_rbptr; /* Internal */
- ushort spi_rbc; /* Internal */
- uint spi_rxtmp; /* Internal */
- uint spi_tstate; /* Internal */
- uint spi_tdp; /* Internal */
- ushort spi_tbptr; /* Internal */
- ushort spi_tbc; /* Internal */
- uint spi_txtmp; /* Internal */
- uint spi_res;
- ushort spi_rpbase; /* Relocation pointer */
- ushort spi_res2;
-} spi_t;
-
-/* SPI Mode register.
-*/
-#define SPMODE_LOOP ((ushort)0x4000) /* Loopback */
-#define SPMODE_CI ((ushort)0x2000) /* Clock Invert */
-#define SPMODE_CP ((ushort)0x1000) /* Clock Phase */
-#define SPMODE_DIV16 ((ushort)0x0800) /* BRG/16 mode */
-#define SPMODE_REV ((ushort)0x0400) /* Reversed Data */
-#define SPMODE_MSTR ((ushort)0x0200) /* SPI Master */
-#define SPMODE_EN ((ushort)0x0100) /* Enable */
-#define SPMODE_LENMSK ((ushort)0x00f0) /* character length */
-#define SPMODE_LEN4 ((ushort)0x0030) /* 4 bits per char */
-#define SPMODE_LEN8 ((ushort)0x0070) /* 8 bits per char */
-#define SPMODE_LEN16 ((ushort)0x00f0) /* 16 bits per char */
-#define SPMODE_PMMSK ((ushort)0x000f) /* prescale modulus */
-
-/* SPIE fields */
-#define SPIE_MME 0x20
-#define SPIE_TXE 0x10
-#define SPIE_BSY 0x04
-#define SPIE_TXB 0x02
-#define SPIE_RXB 0x01
-
/*
* RISC Controller Configuration Register definitons
*/
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index 990ff191da8..f42e9baf3a4 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -124,14 +124,6 @@ static inline void cpm2_fastbrg(uint brg, uint rate, int div16)
__cpm2_setbrg(brg, rate, CPM2_BRG_INT_CLK, div16, CPM_BRG_EXTC_INT);
}
-/* Function code bits, usually generic to devices.
-*/
-#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
-#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
-#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
-#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
-#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
-
/* Parameter RAM offsets from the base.
*/
#define PROFF_SCC1 ((uint)0x8000)
@@ -654,45 +646,6 @@ typedef struct iic {
uint iic_txtmp; /* Internal */
} iic_t;
-/* SPI parameter RAM.
-*/
-typedef struct spi {
- ushort spi_rbase; /* Rx Buffer descriptor base address */
- ushort spi_tbase; /* Tx Buffer descriptor base address */
- u_char spi_rfcr; /* Rx function code */
- u_char spi_tfcr; /* Tx function code */
- ushort spi_mrblr; /* Max receive buffer length */
- uint spi_rstate; /* Internal */
- uint spi_rdp; /* Internal */
- ushort spi_rbptr; /* Internal */
- ushort spi_rbc; /* Internal */
- uint spi_rxtmp; /* Internal */
- uint spi_tstate; /* Internal */
- uint spi_tdp; /* Internal */
- ushort spi_tbptr; /* Internal */
- ushort spi_tbc; /* Internal */
- uint spi_txtmp; /* Internal */
- uint spi_res; /* Tx temp. */
- uint spi_res1[4]; /* SDMA temp. */
-} spi_t;
-
-/* SPI Mode register.
-*/
-#define SPMODE_LOOP ((ushort)0x4000) /* Loopback */
-#define SPMODE_CI ((ushort)0x2000) /* Clock Invert */
-#define SPMODE_CP ((ushort)0x1000) /* Clock Phase */
-#define SPMODE_DIV16 ((ushort)0x0800) /* BRG/16 mode */
-#define SPMODE_REV ((ushort)0x0400) /* Reversed Data */
-#define SPMODE_MSTR ((ushort)0x0200) /* SPI Master */
-#define SPMODE_EN ((ushort)0x0100) /* Enable */
-#define SPMODE_LENMSK ((ushort)0x00f0) /* character length */
-#define SPMODE_PMMSK ((ushort)0x000f) /* prescale modulus */
-
-#define SPMODE_LEN(x) ((((x)-1)&0xF)<<4)
-#define SPMODE_PM(x) ((x) &0xF)
-
-#define SPI_EB ((u_char)0x10) /* big endian byte order */
-
/* IDMA parameter RAM
*/
typedef struct idma {
diff --git a/arch/powerpc/include/asm/dcr-regs.h b/arch/powerpc/include/asm/dcr-regs.h
index 828e3aa1f2f..380274de429 100644
--- a/arch/powerpc/include/asm/dcr-regs.h
+++ b/arch/powerpc/include/asm/dcr-regs.h
@@ -157,4 +157,27 @@
#define L2C_SNP_SSR_32G 0x0000f000
#define L2C_SNP_ESR 0x00000800
+/*
+ * DCR register offsets for 440SP/440SPe I2O/DMA controller.
+ * The base address is configured in the device tree.
+ */
+#define DCRN_I2O0_IBAL 0x006
+#define DCRN_I2O0_IBAH 0x007
+#define I2O_REG_ENABLE 0x00000001 /* Enable I2O/DMA access */
+
+/* 440SP/440SPe Software Reset DCR */
+#define DCRN_SDR0_SRST 0x0200
+#define DCRN_SDR0_SRST_I2ODMA (0x80000000 >> 15) /* Reset I2O/DMA */
+
+/* 440SP/440SPe Memory Queue DCR offsets */
+#define DCRN_MQ0_XORBA 0x04
+#define DCRN_MQ0_CF2H 0x06
+#define DCRN_MQ0_CFBHL 0x0f
+#define DCRN_MQ0_BAUH 0x10
+
+/* HB/LL Paths Configuration Register */
+#define MQ0_CFBHL_TPLM 28
+#define MQ0_CFBHL_HBCL 23
+#define MQ0_CFBHL_POLY 15
+
#endif /* __DCR_REGS_H__ */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e281daebddc..80a973bb9e7 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -197,7 +197,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 014a624f4c8..17828ad411e 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -170,7 +170,6 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index a98653b2623..57c40007199 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -147,6 +147,7 @@
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
+ DO_KVM n; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
@@ -170,6 +171,7 @@ label##_pSeries: \
.globl label##_pSeries; \
label##_pSeries: \
HMT_MEDIUM; \
+ DO_KVM n; \
mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \
std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index f1f4e23a84e..5c2c0233175 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -44,6 +44,9 @@
*/
enum fixed_addresses {
FIX_HOLE,
+ /* reserve the top 128K for early debugging purposes */
+ FIX_EARLY_DEBUG_TOP = FIX_HOLE,
+ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+((128*1024)/PAGE_SIZE)-1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
diff --git a/arch/powerpc/include/asm/gpio.h b/arch/powerpc/include/asm/gpio.h
index ea04632399d..38762edb5e5 100644
--- a/arch/powerpc/include/asm/gpio.h
+++ b/arch/powerpc/include/asm/gpio.h
@@ -38,12 +38,9 @@ static inline int gpio_cansleep(unsigned int gpio)
return __gpio_cansleep(gpio);
}
-/*
- * Not implemented, yet.
- */
static inline int gpio_to_irq(unsigned int gpio)
{
- return -ENOSYS;
+ return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned int irq)
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index b1dafb6a974..5856a66ab40 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -3,6 +3,10 @@
#include <asm/page.h>
+pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
+ unsigned long addr, unsigned *shift);
+
+void flush_dcache_icache_hugepage(struct page *page);
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
@@ -11,12 +15,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep);
-
/*
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
* to override the version in mm/hugetlb.c
@@ -42,9 +40,26 @@ static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
+ return __pte(old);
+}
+
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+ pte_t pte;
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ flush_tlb_page(vma, addr);
}
static inline int huge_pte_none(pte_t pte)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index c27caac47ad..f0275818b95 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -212,6 +212,19 @@
#define H_QUERY_INT_STATE 0x1E4
#define H_POLL_PENDING 0x1D8
#define H_ILLAN_ATTRIBUTES 0x244
+#define H_MODIFY_HEA_QP 0x250
+#define H_QUERY_HEA_QP 0x254
+#define H_QUERY_HEA 0x258
+#define H_QUERY_HEA_PORT 0x25C
+#define H_MODIFY_HEA_PORT 0x260
+#define H_REG_BCMC 0x264
+#define H_DEREG_BCMC 0x268
+#define H_REGISTER_HEA_RPAGES 0x26C
+#define H_DISABLE_AND_GET_HEA 0x270
+#define H_GET_HEA_INFO 0x274
+#define H_ALLOC_HEA_RESOURCE 0x278
+#define H_ADD_CONN 0x284
+#define H_DEL_CONN 0x288
#define H_JOIN 0x298
#define H_VASI_STATE 0x2A4
#define H_ENABLE_CRQ 0x2B0
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index abbc2aaaced..9f4c9d4f580 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -64,11 +64,6 @@ extern void iseries_handle_interrupts(void);
get_paca()->hard_enabled = 0; \
} while(0)
-static inline int irqs_disabled_flags(unsigned long flags)
-{
- return flags == 0;
-}
-
#else
#if defined(CONFIG_BOOKE)
diff --git a/arch/powerpc/include/asm/immap_cpm2.h b/arch/powerpc/include/asm/immap_cpm2.h
index d4f069bf0e5..7c64fda5357 100644
--- a/arch/powerpc/include/asm/immap_cpm2.h
+++ b/arch/powerpc/include/asm/immap_cpm2.h
@@ -549,7 +549,7 @@ typedef struct comm_proc {
/* USB Controller.
*/
-typedef struct usb_ctlr {
+typedef struct cpm_usb_ctlr {
u8 usb_usmod;
u8 usb_usadr;
u8 usb_uscom;
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
index c346d0bcd23..4e10f508570 100644
--- a/arch/powerpc/include/asm/immap_qe.h
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -210,7 +210,7 @@ struct sir {
} __attribute__ ((packed));
/* USB Controller */
-struct usb_ctlr {
+struct qe_usb_ctlr {
u8 usb_usmod;
u8 usb_usadr;
u8 usb_uscom;
@@ -229,7 +229,7 @@ struct usb_ctlr {
} __attribute__ ((packed));
/* MCC */
-struct mcc {
+struct qe_mcc {
__be32 mcce; /* MCC event register */
__be32 mccm; /* MCC mask register */
__be32 mccf; /* MCC configuration register */
@@ -431,9 +431,9 @@ struct qe_immap {
struct qe_mux qmx; /* QE Multiplexer */
struct qe_timers qet; /* QE Timers */
struct spi spi[0x2]; /* spi */
- struct mcc mcc; /* mcc */
+ struct qe_mcc mcc; /* mcc */
struct qe_brg brg; /* brg */
- struct usb_ctlr usb; /* USB */
+ struct qe_usb_ctlr usb; /* USB */
struct si1 si1; /* SI */
u8 res11[0x800];
struct sir sir; /* SI Routing Tables */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index bbcd1aaf3df..e054baef184 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -17,8 +17,6 @@
#include <asm/atomic.h>
-#define get_irq_desc(irq) (&irq_desc[(irq)])
-
/* Define a way to iterate across irqs. */
#define for_each_irq(i) \
for ((i) = 0; (i) < NR_IRQS; ++(i))
@@ -34,12 +32,15 @@ extern atomic_t ppc_n_lost_interrupts;
*/
#define NO_IRQ_IGNORE ((unsigned int)-1)
-/* Total number of virq in the platform (make it a CONFIG_* option ? */
-#define NR_IRQS 512
+/* Total number of virq in the platform */
+#define NR_IRQS CONFIG_NR_IRQS
/* Number of irqs reserved for the legacy controller */
#define NUM_ISA_INTERRUPTS 16
+/* Same thing, used by the generic IRQ code */
+#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
+
/* This type is the placeholder for a hardware interrupt number. It has to
* be big enough to enclose whatever representation is used by a given
* platform.
@@ -99,7 +100,7 @@ struct irq_host_ops {
* interrupt controller has for that line)
*/
int (*xlate)(struct irq_host *h, struct device_node *ctrler,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type);
};
@@ -313,7 +314,7 @@ extern void irq_free_virt(unsigned int virq, unsigned int count);
* of the of_irq_map_*() functions.
*/
extern unsigned int irq_create_of_mapping(struct device_node *controller,
- u32 *intspec, unsigned int intsize);
+ const u32 *intspec, unsigned int intsize);
/**
* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index bb2de6aa5ce..81f3b0b5601 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -46,6 +46,24 @@ struct kvm_regs {
};
struct kvm_sregs {
+ __u32 pvr;
+ union {
+ struct {
+ __u64 sdr1;
+ struct {
+ struct {
+ __u64 slbe;
+ __u64 slbv;
+ } slb[64];
+ } ppc64;
+ struct {
+ __u32 sr[16];
+ __u64 ibat[8];
+ __u64 dbat[8];
+ } ppc32;
+ } s;
+ __u8 pad[1020];
+ } u;
};
struct kvm_fpu {
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 56bfae59837..af2abe74f54 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -49,6 +49,46 @@
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
+/* book3s */
+
+#define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100
+#define BOOK3S_INTERRUPT_MACHINE_CHECK 0x200
+#define BOOK3S_INTERRUPT_DATA_STORAGE 0x300
+#define BOOK3S_INTERRUPT_DATA_SEGMENT 0x380
+#define BOOK3S_INTERRUPT_INST_STORAGE 0x400
+#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
+#define BOOK3S_INTERRUPT_EXTERNAL 0x500
+#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
+#define BOOK3S_INTERRUPT_PROGRAM 0x700
+#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
+#define BOOK3S_INTERRUPT_DECREMENTER 0x900
+#define BOOK3S_INTERRUPT_SYSCALL 0xc00
+#define BOOK3S_INTERRUPT_TRACE 0xd00
+#define BOOK3S_INTERRUPT_PERFMON 0xf00
+#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
+#define BOOK3S_INTERRUPT_VSX 0xf40
+
+#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
+#define BOOK3S_IRQPRIO_DATA_SEGMENT 1
+#define BOOK3S_IRQPRIO_INST_SEGMENT 2
+#define BOOK3S_IRQPRIO_DATA_STORAGE 3
+#define BOOK3S_IRQPRIO_INST_STORAGE 4
+#define BOOK3S_IRQPRIO_ALIGNMENT 5
+#define BOOK3S_IRQPRIO_PROGRAM 6
+#define BOOK3S_IRQPRIO_FP_UNAVAIL 7
+#define BOOK3S_IRQPRIO_ALTIVEC 8
+#define BOOK3S_IRQPRIO_VSX 9
+#define BOOK3S_IRQPRIO_SYSCALL 10
+#define BOOK3S_IRQPRIO_MACHINE_CHECK 11
+#define BOOK3S_IRQPRIO_DEBUG 12
+#define BOOK3S_IRQPRIO_EXTERNAL 13
+#define BOOK3S_IRQPRIO_DECREMENTER 14
+#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15
+#define BOOK3S_IRQPRIO_MAX 16
+
+#define BOOK3S_HFLAG_DCBZ32 0x1
+#define BOOK3S_HFLAG_SLB 0x2
+
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
new file mode 100644
index 00000000000..74b7369770d
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -0,0 +1,139 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_H__
+#define __ASM_KVM_BOOK3S_H__
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_ppc.h>
+
+struct kvmppc_slb {
+ u64 esid;
+ u64 vsid;
+ u64 orige;
+ u64 origv;
+ bool valid;
+ bool Ks;
+ bool Kp;
+ bool nx;
+ bool large;
+ bool class;
+};
+
+struct kvmppc_sr {
+ u32 raw;
+ u32 vsid;
+ bool Ks;
+ bool Kp;
+ bool nx;
+};
+
+struct kvmppc_bat {
+ u64 raw;
+ u32 bepi;
+ u32 bepi_mask;
+ bool vs;
+ bool vp;
+ u32 brpn;
+ u8 wimg;
+ u8 pp;
+};
+
+struct kvmppc_sid_map {
+ u64 guest_vsid;
+ u64 guest_esid;
+ u64 host_vsid;
+ bool valid;
+};
+
+#define SID_MAP_BITS 9
+#define SID_MAP_NUM (1 << SID_MAP_BITS)
+#define SID_MAP_MASK (SID_MAP_NUM - 1)
+
+struct kvmppc_vcpu_book3s {
+ struct kvm_vcpu vcpu;
+ struct kvmppc_sid_map sid_map[SID_MAP_NUM];
+ struct kvmppc_slb slb[64];
+ struct {
+ u64 esid;
+ u64 vsid;
+ } slb_shadow[64];
+ u8 slb_shadow_max;
+ struct kvmppc_sr sr[16];
+ struct kvmppc_bat ibat[8];
+ struct kvmppc_bat dbat[8];
+ u64 hid[6];
+ int slb_nr;
+ u64 sdr1;
+ u64 dsisr;
+ u64 hior;
+ u64 msr_mask;
+ u64 vsid_first;
+ u64 vsid_next;
+ u64 vsid_max;
+ int context_id;
+};
+
+#define CONTEXT_HOST 0
+#define CONTEXT_GUEST 1
+#define CONTEXT_GUEST_END 2
+
+#define VSID_REAL 0xfffffffffff00000
+#define VSID_REAL_DR 0xffffffffffe00000
+#define VSID_REAL_IR 0xffffffffffd00000
+#define VSID_BAT 0xffffffffffc00000
+#define VSID_PR 0x8000000000000000
+
+extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
+extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
+extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end);
+extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
+extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
+extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
+extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
+extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
+extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
+extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data);
+extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data);
+extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr);
+extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
+extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
+ bool upper, u32 val);
+
+extern u32 kvmppc_trampoline_lowmem;
+extern u32 kvmppc_trampoline_enter;
+
+static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
+}
+
+static inline ulong dsisr(void)
+{
+ ulong r;
+ asm ( "mfdsisr %0 " : "=r" (r) );
+ return r;
+}
+
+extern void kvm_return_point(void);
+
+#define INS_DCBZ 0x7c0007ec
+
+#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
new file mode 100644
index 00000000000..2e06ee8184e
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
@@ -0,0 +1,58 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_ASM_H__
+#define __ASM_KVM_BOOK3S_ASM_H__
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+
+#include <asm/kvm_asm.h>
+
+.macro DO_KVM intno
+ .if (\intno == BOOK3S_INTERRUPT_SYSTEM_RESET) || \
+ (\intno == BOOK3S_INTERRUPT_MACHINE_CHECK) || \
+ (\intno == BOOK3S_INTERRUPT_DATA_STORAGE) || \
+ (\intno == BOOK3S_INTERRUPT_INST_STORAGE) || \
+ (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
+ (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
+ (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
+ (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
+ (\intno == BOOK3S_INTERRUPT_PROGRAM) || \
+ (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
+ (\intno == BOOK3S_INTERRUPT_DECREMENTER) || \
+ (\intno == BOOK3S_INTERRUPT_SYSCALL) || \
+ (\intno == BOOK3S_INTERRUPT_TRACE) || \
+ (\intno == BOOK3S_INTERRUPT_PERFMON) || \
+ (\intno == BOOK3S_INTERRUPT_ALTIVEC) || \
+ (\intno == BOOK3S_INTERRUPT_VSX)
+
+ b kvmppc_trampoline_\intno
+kvmppc_resume_\intno:
+
+ .endif
+.endm
+
+#else
+
+.macro DO_KVM intno
+.endm
+
+#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
+
+#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index c9c930ed11d..1201f62d0d7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -21,7 +21,8 @@
#define __POWERPC_KVM_HOST_H__
#include <linux/mutex.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/kvm_asm.h>
@@ -37,6 +38,8 @@
#define KVM_NR_PAGE_SIZES 1
#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
+#define HPTEG_CACHE_NUM 1024
+
struct kvm;
struct kvm_run;
struct kvm_vcpu;
@@ -63,6 +66,17 @@ struct kvm_vcpu_stat {
u32 dec_exits;
u32 ext_intr_exits;
u32 halt_wakeup;
+#ifdef CONFIG_PPC64
+ u32 pf_storage;
+ u32 pf_instruc;
+ u32 sp_storage;
+ u32 sp_instruc;
+ u32 queue_intr;
+ u32 ld;
+ u32 ld_slow;
+ u32 st;
+ u32 st_slow;
+#endif
};
enum kvm_exit_types {
@@ -109,9 +123,53 @@ struct kvmppc_exit_timing {
struct kvm_arch {
};
+struct kvmppc_pte {
+ u64 eaddr;
+ u64 vpage;
+ u64 raddr;
+ bool may_read;
+ bool may_write;
+ bool may_execute;
+};
+
+struct kvmppc_mmu {
+ /* book3s_64 only */
+ void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
+ u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
+ u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
+ void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
+ void (*slbia)(struct kvm_vcpu *vcpu);
+ /* book3s */
+ void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
+ u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
+ int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
+ void (*reset_msr)(struct kvm_vcpu *vcpu);
+ void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
+ int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid);
+ u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
+ bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
+};
+
+struct hpte_cache {
+ u64 host_va;
+ u64 pfn;
+ ulong slot;
+ struct kvmppc_pte pte;
+};
+
struct kvm_vcpu_arch {
- u32 host_stack;
+ ulong host_stack;
u32 host_pid;
+#ifdef CONFIG_PPC64
+ ulong host_msr;
+ ulong host_r2;
+ void *host_retip;
+ ulong trampoline_lowmem;
+ ulong trampoline_enter;
+ ulong highmem_handler;
+ ulong host_paca_phys;
+ struct kvmppc_mmu mmu;
+#endif
u64 fpr[32];
ulong gpr[32];
@@ -123,6 +181,10 @@ struct kvm_vcpu_arch {
ulong xer;
ulong msr;
+#ifdef CONFIG_PPC64
+ ulong shadow_msr;
+ ulong hflags;
+#endif
u32 mmucr;
ulong sprg0;
ulong sprg1;
@@ -149,6 +211,7 @@ struct kvm_vcpu_arch {
u32 ivor[64];
ulong ivpr;
u32 pir;
+ u32 pvr;
u32 shadow_pid;
u32 pid;
@@ -174,6 +237,9 @@ struct kvm_vcpu_arch {
#endif
u32 last_inst;
+#ifdef CONFIG_PPC64
+ ulong fault_dsisr;
+#endif
ulong fault_dear;
ulong fault_esr;
gpa_t paddr_accessed;
@@ -185,8 +251,15 @@ struct kvm_vcpu_arch {
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
- struct timer_list dec_timer;
+ struct hrtimer dec_timer;
+ struct tasklet_struct tasklet;
+ u64 dec_jiffies;
unsigned long pending_exceptions;
+
+#ifdef CONFIG_PPC64
+ struct hpte_cache hpte_cache[HPTEG_CACHE_NUM];
+ int hpte_cache_offset;
+#endif
};
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 2c6ee349df5..269ee46ab02 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -39,6 +39,7 @@ enum emulation_result {
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern char kvmppc_handlers_start[];
extern unsigned long kvmppc_handler_len;
+extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index f78f65c38f0..14b592dfb4e 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -100,7 +100,14 @@ struct lppaca {
// Used to pass parms from the OS to PLIC for SetAsrAndRfid
u64 saved_gpr3; // Saved GPR3 x20-x27
u64 saved_gpr4; // Saved GPR4 x28-x2F
- u64 saved_gpr5; // Saved GPR5 x30-x37
+ union {
+ u64 saved_gpr5; /* Saved GPR5 x30-x37 */
+ struct {
+ u8 cede_latency_hint; /* x30 */
+ u8 reserved[7]; /* x31-x36 */
+ } fields;
+ } gpr5_dword;
+
u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38
u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 9efa2be7833..9f0fc9e6ce0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -266,6 +266,11 @@ struct machdep_calls {
void (*suspend_disable_irqs)(void);
void (*suspend_enable_irqs)(void);
#endif
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ ssize_t (*cpu_probe)(const char *, size_t);
+ ssize_t (*cpu_release)(const char *, size_t);
+#endif
};
extern void e500_idle(void);
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index 079c06eae44..a062c57696d 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -39,6 +39,7 @@ struct macio_dev
struct macio_bus *bus; /* macio bus this device is on */
struct macio_dev *media_bay; /* Device is part of a media bay */
struct of_device ofdev;
+ struct device_dma_parameters dma_parms; /* ide needs that */
int n_resources;
struct resource resource[MACIO_DEV_COUNT_RESOURCES];
int n_interrupts;
@@ -78,6 +79,8 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour
return res->end - res->start + 1;
}
+extern int macio_enable_devres(struct macio_dev *dev);
+
extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name);
extern void macio_release_resource(struct macio_dev *dev, int resource_no);
extern int macio_request_resources(struct macio_dev *dev, const char *name);
@@ -131,6 +134,9 @@ struct macio_driver
int (*resume)(struct macio_dev* dev);
int (*shutdown)(struct macio_dev* dev);
+#ifdef CONFIG_PMAC_MEDIABAY
+ void (*mediabay_event)(struct macio_dev* dev, int mb_state);
+#endif
struct device_driver driver;
};
#define to_macio_driver(drv) container_of(drv,struct macio_driver, driver)
diff --git a/arch/powerpc/include/asm/mediabay.h b/arch/powerpc/include/asm/mediabay.h
index b2efb332580..11037a4133e 100644
--- a/arch/powerpc/include/asm/mediabay.h
+++ b/arch/powerpc/include/asm/mediabay.h
@@ -17,26 +17,31 @@
#define MB_POWER 6 /* media bay contains a Power device (???) */
#define MB_NO 7 /* media bay contains nothing */
-/* Number of bays in the machine or 0 */
-extern int media_bay_count;
+struct macio_dev;
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
-#include <linux/ide.h>
+#ifdef CONFIG_PMAC_MEDIABAY
-int check_media_bay_by_base(unsigned long base, int what);
-/* called by IDE PMAC host driver to register IDE controller for media bay */
-int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base,
- int irq, ide_hwif_t *hwif);
+/* Check the content type of the bay, returns MB_NO if the bay is still
+ * transitionning
+ */
+extern int check_media_bay(struct macio_dev *bay);
-int check_media_bay(struct device_node *which_bay, int what);
+/* The ATA driver uses the calls below to temporarily hold on the
+ * media bay callbacks while initializing the interface
+ */
+extern void lock_media_bay(struct macio_dev *bay);
+extern void unlock_media_bay(struct macio_dev *bay);
#else
-static inline int check_media_bay(struct device_node *which_bay, int what)
+static inline int check_media_bay(struct macio_dev *bay)
{
- return -ENODEV;
+ return MB_NO;
}
+static inline void lock_media_bay(struct macio_dev *bay) { }
+static inline void unlock_media_bay(struct macio_dev *bay) { }
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index bebe31c2e90..2102b214a87 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -173,14 +173,6 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
*/
extern int mmu_ci_restrictions;
-#ifdef CONFIG_HUGETLB_PAGE
-/*
- * The page size indexes of the huge pages for use by hugetlbfs
- */
-extern unsigned int mmu_huge_psizes[MMU_PAGE_COUNT];
-
-#endif /* CONFIG_HUGETLB_PAGE */
-
/*
* This function sets the AVPN and L fields of the HPTE appropriately
* for the page size
@@ -253,10 +245,11 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
unsigned long vsid, pte_t *ptep, unsigned long trap,
unsigned int local, int ssize);
struct mm_struct;
+unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
-extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
- unsigned long ea, unsigned long vsid, int local,
- unsigned long trap);
+int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
+ pte_t *ptep, unsigned long trap, int local, int ssize,
+ unsigned int shift, unsigned int mmu_psize);
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long prot,
@@ -380,6 +373,38 @@ extern void slb_set_size(u16 size);
#ifndef __ASSEMBLY__
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+/*
+ * For the sub-page protection option, we extend the PGD with one of
+ * these. Basically we have a 3-level tree, with the top level being
+ * the protptrs array. To optimize speed and memory consumption when
+ * only addresses < 4GB are being protected, pointers to the first
+ * four pages of sub-page protection words are stored in the low_prot
+ * array.
+ * Each page of sub-page protection words protects 1GB (4 bytes
+ * protects 64k). For the 3-level tree, each page of pointers then
+ * protects 8TB.
+ */
+struct subpage_prot_table {
+ unsigned long maxaddr; /* only addresses < this are protected */
+ unsigned int **protptrs[2];
+ unsigned int *low_prot[4];
+};
+
+#define SBP_L1_BITS (PAGE_SHIFT - 2)
+#define SBP_L2_BITS (PAGE_SHIFT - 3)
+#define SBP_L1_COUNT (1 << SBP_L1_BITS)
+#define SBP_L2_COUNT (1 << SBP_L2_BITS)
+#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
+#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
+
+extern void subpage_prot_free(struct mm_struct *mm);
+extern void subpage_prot_init_new_context(struct mm_struct *mm);
+#else
+static inline void subpage_prot_free(struct mm_struct *mm) {}
+static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+
typedef unsigned long mm_context_id_t;
typedef struct {
@@ -393,6 +418,9 @@ typedef struct {
u16 sllp; /* SLB page size encoding */
#endif
unsigned long vdso_base;
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+ struct subpage_prot_table spt;
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
} mm_context_t;
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b34e94d9443..26383e0778a 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -23,6 +23,8 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
+extern int __init_new_context(void);
+extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
#else
extern void mmu_context_init(void);
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 08454880a2c..0192a4ee2bc 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -87,5 +87,10 @@ struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+
+extern const unsigned long reloc_start[];
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 1b4f697abbd..b664ce79a17 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -276,6 +276,53 @@ extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
extern void mpc52xx_restart(char *cmd);
+/* mpc52xx_gpt.c */
+struct mpc52xx_gpt_priv;
+extern struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq);
+extern int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
+ int continuous);
+extern u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt);
+extern int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt);
+
+/* mpc52xx_lpbfifo.c */
+#define MPC52XX_LPBFIFO_FLAG_READ (0)
+#define MPC52XX_LPBFIFO_FLAG_WRITE (1<<0)
+#define MPC52XX_LPBFIFO_FLAG_NO_INCREMENT (1<<1)
+#define MPC52XX_LPBFIFO_FLAG_NO_DMA (1<<2)
+#define MPC52XX_LPBFIFO_FLAG_POLL_DMA (1<<3)
+
+struct mpc52xx_lpbfifo_request {
+ struct list_head list;
+
+ /* localplus bus address */
+ unsigned int cs;
+ size_t offset;
+
+ /* Memory address */
+ void *data;
+ phys_addr_t data_phys;
+
+ /* Details of transfer */
+ size_t size;
+ size_t pos; /* current position of transfer */
+ int flags;
+
+ /* What to do when finished */
+ void (*callback)(struct mpc52xx_lpbfifo_request *);
+
+ void *priv; /* Driver private data */
+
+ /* statistics */
+ int irq_count;
+ int irq_ticks;
+ u8 last_byte;
+ int buffer_not_done_cnt;
+};
+
+extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
+extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
+extern void mpc52xx_lpbfifo_poll(void);
+
/* mpc52xx_pic.c */
extern void mpc52xx_init_irq(void);
extern unsigned int mpc52xx_get_irq(void);
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 6c587eddee5..850b72f2744 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -73,7 +73,6 @@ extern int nvram_write_error_log(char * buff, int length,
extern int nvram_read_error_log(char * buff, int length,
unsigned int * err_type, unsigned int *err_seq);
extern int nvram_clear_error_log(void);
-extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
extern int pSeries_nvram_init(void);
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index e482e5352e6..d4b4bfa26fb 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -17,6 +17,7 @@
#ifdef CONFIG_PPC_PSERIES
extern int pSeries_reconfig_notifier_register(struct notifier_block *);
extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
+extern struct blocking_notifier_head pSeries_reconfig_chain;
#else /* !CONFIG_PPC_PSERIES */
static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 7d8514cecea..5e9b4ef7141 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -129,6 +129,15 @@ struct paca_struct {
u64 system_time; /* accumulated system TB ticks */
u64 startpurr; /* PURR/TB value snapshot */
u64 startspurr; /* SPURR value snapshot */
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ struct {
+ u64 esid;
+ u64 vsid;
+ } kvm_slb[64]; /* guest SLB */
+ u8 kvm_slb_max; /* highest used guest slb entry */
+ u8 kvm_in_guest; /* are we inside the guest? */
+#endif
};
extern struct paca_struct paca[];
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index ff24254990e..e96d52a516b 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -229,6 +229,20 @@ typedef unsigned long pgprot_t;
#endif
+typedef struct { signed long pd; } hugepd_t;
+#define HUGEPD_SHIFT_MASK 0x3f
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int hugepd_ok(hugepd_t hpd)
+{
+ return (hpd.pd > 0);
+}
+
+#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep))))
+#else /* CONFIG_HUGETLB_PAGE */
+#define is_hugepd(pdep) 0
+#endif /* CONFIG_HUGETLB_PAGE */
+
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 3f17b83f55a..bfc4e027e2a 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -90,7 +90,7 @@ extern unsigned int HPAGE_SHIFT;
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#define HUGE_MAX_HSTATE 3
+#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index c9500d666a1..580cf73b96e 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -3,7 +3,8 @@
#include <linux/threads.h>
-#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */
+/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
+#define MAX_PGTABLE_INDEX_SIZE 0
extern void __bad_pte(pmd_t *pmd);
@@ -36,11 +37,10 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-static inline void pgtable_free(pgtable_free_t pgf)
+static inline void pgtable_free(void *table, unsigned index_size)
{
- void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
-
- free_page((unsigned long)p);
+ BUG_ON(index_size); /* 32-bit doesn't use this */
+ free_page((unsigned long)table);
}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index e6f069c4f71..605f5c5398d 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -11,27 +11,34 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
-#ifndef CONFIG_PPC_SUBPAGE_PROT
-static inline void subpage_prot_free(pgd_t *pgd) {}
-#endif
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[];
-
-#define PGD_CACHE_NUM 0
-#define PUD_CACHE_NUM 1
-#define PMD_CACHE_NUM 1
-#define HUGEPTE_CACHE_NUM 2
-#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
+#define PGT_CACHE(shift) (pgtable_cache[(shift)-1])
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- subpage_prot_free(pgd);
- kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
#ifndef CONFIG_PPC_64K_PAGES
@@ -40,13 +47,13 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
GFP_KERNEL|__GFP_REPEAT);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
- kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
+ kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -78,13 +85,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
+ return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE),
GFP_KERNEL|__GFP_REPEAT);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
- kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
+ kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -107,24 +114,22 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
return page;
}
-static inline void pgtable_free(pgtable_free_t pgf)
+static inline void pgtable_free(void *table, unsigned index_size)
{
- void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
- int cachenum = pgf.val & PGF_CACHENUM_MASK;
-
- if (cachenum == PTE_NONCACHE_NUM)
- free_page((unsigned long)p);
- else
- kmem_cache_free(pgtable_cache[cachenum], p);
+ if (!index_size)
+ free_page((unsigned long)table);
+ else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
}
-#define __pmd_free_tlb(tlb, pmd,addr) \
- pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
- PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
+#define __pmd_free_tlb(tlb, pmd, addr) \
+ pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE)
#ifndef CONFIG_PPC_64K_PAGES
#define __pud_free_tlb(tlb, pud, addr) \
- pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
- PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
+ pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
+
#endif /* CONFIG_PPC_64K_PAGES */
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index f2e812de7c3..abe8532bd14 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -24,25 +24,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
__free_page(ptepage);
}
-typedef struct pgtable_free {
- unsigned long val;
-} pgtable_free_t;
-
-/* This needs to be big enough to allow for MMU_PAGE_COUNT + 2 to be stored
- * and small enough to fit in the low bits of any naturally aligned page
- * table cache entry. Arbitrarily set to 0x1f, that should give us some
- * room to grow
- */
-#define PGF_CACHENUM_MASK 0x1f
-
-static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
- unsigned long mask)
-{
- BUG_ON(cachenum > PGF_CACHENUM_MASK);
-
- return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
-}
-
#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
@@ -50,12 +31,12 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
#endif
#ifdef CONFIG_SMP
-extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
+extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift);
extern void pte_free_finish(void);
#else /* CONFIG_SMP */
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
{
- pgtable_free(pgf);
+ pgtable_free(table, shift);
}
static inline void pte_free_finish(void) { }
#endif /* !CONFIG_SMP */
@@ -63,12 +44,9 @@ static inline void pte_free_finish(void) { }
static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage,
unsigned long address)
{
- pgtable_free_t pgf = pgtable_free_cache(page_address(ptepage),
- PTE_NONCACHE_NUM,
- PTE_TABLE_SIZE-1);
tlb_flush_pgtable(tlb, address);
pgtable_page_dtor(ptepage);
- pgtable_free_tlb(tlb, pgf);
+ pgtable_free_tlb(tlb, page_address(ptepage), 0);
}
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 806abe7a3fa..49865045d56 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -354,6 +354,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
/*
@@ -378,7 +379,18 @@ void pgtable_cache_init(void);
return pt;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long address);
+#ifdef CONFIG_HUGETLB_PAGE
+pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+ unsigned *shift);
+#else
+static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
+ unsigned *shift)
+{
+ if (shift)
+ *shift = 0;
+ return find_linux_pte(pgdir, ea);
+}
+#endif /* !CONFIG_HUGETLB_PAGE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2a5da069714..21207e54825 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -211,6 +211,9 @@ extern void paging_init(void);
*/
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr);
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index dd5ea95fe61..d44826e4ff9 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -33,21 +33,21 @@
#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
#define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
+#define _PAGE_DIRTY 0x0100 /* C: page changed */
-/* These five software bits must be masked out when the entry is loaded
- * into the TLB.
+/* These 4 software bits must be masked out when the entry is loaded
+ * into the TLB, 1 SW bit left(0x0080).
*/
#define _PAGE_GUARDED 0x0010 /* software: guarded access */
-#define _PAGE_DIRTY 0x0020 /* software: page changed */
-#define _PAGE_RW 0x0040 /* software: user write access allowed */
-#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
+#define _PAGE_ACCESSED 0x0020 /* software: page referenced */
+#define _PAGE_WRITETHRU 0x0040 /* software: caching is write through */
/* Setting any bits in the nibble with the follow two controls will
* require a TLB exception handler change. It is assumed unused bits
* are always zero.
*/
-#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
-#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
+#define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */
+#define _PAGE_USER 0x0800 /* msb PP bits */
#define _PMD_PRESENT 0x0001
#define _PMD_BAD 0x0ff0
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 82b72207c51..c4490f9c67c 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -76,41 +76,4 @@
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
__pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
-
-#ifdef CONFIG_PPC_SUBPAGE_PROT
-/*
- * For the sub-page protection option, we extend the PGD with one of
- * these. Basically we have a 3-level tree, with the top level being
- * the protptrs array. To optimize speed and memory consumption when
- * only addresses < 4GB are being protected, pointers to the first
- * four pages of sub-page protection words are stored in the low_prot
- * array.
- * Each page of sub-page protection words protects 1GB (4 bytes
- * protects 64k). For the 3-level tree, each page of pointers then
- * protects 8TB.
- */
-struct subpage_prot_table {
- unsigned long maxaddr; /* only addresses < this are protected */
- unsigned int **protptrs[2];
- unsigned int *low_prot[4];
-};
-
-#undef PGD_TABLE_SIZE
-#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \
- sizeof(struct subpage_prot_table))
-
-#define SBP_L1_BITS (PAGE_SHIFT - 2)
-#define SBP_L2_BITS (PAGE_SHIFT - 3)
-#define SBP_L1_COUNT (1 << SBP_L1_BITS)
-#define SBP_L2_COUNT (1 << SBP_L2_BITS)
-#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
-#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
-
-extern void subpage_prot_free(pgd_t *pgd);
-
-static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
-{
- return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD);
-}
-#endif /* CONFIG_PPC_SUBPAGE_PROT */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 8c341490cfc..cbd759e3cd7 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -140,6 +140,8 @@ extern void user_enable_single_step(struct task_struct *);
extern void user_enable_block_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
+#define ARCH_HAS_USER_SINGLE_STEP_INFO
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index f388f0ab193..0947b36e534 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -87,7 +87,7 @@ extern spinlock_t cmxgcr_lock;
/* Export QE common operations */
#ifdef CONFIG_QUICC_ENGINE
-extern void __init qe_reset(void);
+extern void qe_reset(void);
#else
static inline void qe_reset(void) {}
#endif
@@ -145,8 +145,17 @@ static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {}
static inline void qe_pin_set_dedicated(struct qe_pin *pin) {}
#endif /* CONFIG_QE_GPIO */
-/* QE internal API */
+#ifdef CONFIG_QUICC_ENGINE
int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
+#else
+static inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol,
+ u32 cmd_input)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_QUICC_ENGINE */
+
+/* QE internal API */
enum qe_clock qe_clock_source(const char *source);
unsigned int qe_get_brg_clk(void);
int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier);
@@ -154,7 +163,28 @@ int qe_get_snum(void);
void qe_put_snum(u8 snum);
unsigned int qe_get_num_of_risc(void);
unsigned int qe_get_num_of_snums(void);
-int qe_alive_during_sleep(void);
+
+static inline int qe_alive_during_sleep(void)
+{
+ /*
+ * MPC8568E reference manual says:
+ *
+ * "...power down sequence waits for all I/O interfaces to become idle.
+ * In some applications this may happen eventually without actively
+ * shutting down interfaces, but most likely, software will have to
+ * take steps to shut down the eTSEC, QUICC Engine Block, and PCI
+ * interfaces before issuing the command (either the write to the core
+ * MSR[WE] as described above or writing to POWMGTCSR) to put the
+ * device into sleep state."
+ *
+ * MPC8569E reference manual has a similar paragraph.
+ */
+#ifdef CONFIG_PPC_85xx
+ return 0;
+#else
+ return 1;
+#endif
+}
/* we actually use cpm_muram implementation, define this for convenience */
#define qe_muram_init cpm_muram_init
@@ -210,8 +240,15 @@ struct qe_firmware_info {
u64 extended_modes; /* Extended modes */
};
+#ifdef CONFIG_QUICC_ENGINE
/* Upload a firmware to the QE */
int qe_upload_firmware(const struct qe_firmware *firmware);
+#else
+static inline int qe_upload_firmware(const struct qe_firmware *firmware)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_QUICC_ENGINE */
/* Obtain information on the uploaded firmware */
struct qe_firmware_info *qe_get_firmware_info(void);
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 168fce72620..20de73c3668 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d9ea8d39c34..1d3b270d308 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -37,7 +37,7 @@ extern void cpu_die(void);
extern void smp_send_debugger_break(int cpu);
extern void smp_message_recv(int);
-DECLARE_PER_CPU(unsigned int, pvr);
+DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU
extern void fixup_irqs(cpumask_t map);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 198266cf9e2..764094cff68 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,7 +28,7 @@
#include <asm/asm-compat.h>
#include <asm/synch.h>
-#define __raw_spin_is_locked(x) ((x)->slock != 0)
+#define arch_spin_is_locked(x) ((x)->slock != 0)
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
@@ -54,7 +54,7 @@
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
*/
-static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, token;
@@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
return tmp;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
- return arch_spin_trylock(lock) == 0;
+ return __arch_spin_trylock(lock) == 0;
}
/*
@@ -96,19 +96,19 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
/* We only yield to the hypervisor if we are in shared processor mode */
#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
-extern void __spin_yield(raw_spinlock_t *lock);
-extern void __rw_yield(raw_rwlock_t *lock);
+extern void __spin_yield(arch_spinlock_t *lock);
+extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
#define SHARED_PROCESSOR 0
#endif
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
CLEAR_IO_SYNC;
while (1) {
- if (likely(arch_spin_trylock(lock) == 0))
+ if (likely(__arch_spin_trylock(lock) == 0))
break;
do {
HMT_low();
@@ -120,13 +120,13 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
}
static inline
-void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long flags_dis;
CLEAR_IO_SYNC;
while (1) {
- if (likely(arch_spin_trylock(lock) == 0))
+ if (likely(__arch_spin_trylock(lock) == 0))
break;
local_save_flags(flags_dis);
local_irq_restore(flags);
@@ -140,19 +140,19 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
}
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
SYNC_IO;
- __asm__ __volatile__("# __raw_spin_unlock\n\t"
+ __asm__ __volatile__("# arch_spin_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
lock->slock = 0;
}
#ifdef CONFIG_PPC64
-extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
+extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
#else
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
#endif
/*
@@ -166,8 +166,8 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
* read-locks.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_write_can_lock(rw) (!(rw)->lock)
#ifdef CONFIG_PPC64
#define __DO_SIGN_EXTEND "extsw %0,%0\n"
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
* This returns the old value in the lock + 1,
* so we got a read lock if the return value is > 0.
*/
-static inline long arch_read_trylock(raw_rwlock_t *rw)
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
{
long tmp;
@@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw)
* This returns the old value in the lock,
* so we got the write lock if the return value is 0.
*/
-static inline long arch_write_trylock(raw_rwlock_t *rw)
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
{
long tmp, token;
@@ -225,10 +225,10 @@ static inline long arch_write_trylock(raw_rwlock_t *rw)
return tmp;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_read_trylock(rw) > 0))
+ if (likely(__arch_read_trylock(rw) > 0))
break;
do {
HMT_low();
@@ -239,10 +239,10 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (1) {
- if (likely(arch_write_trylock(rw) == 0))
+ if (likely(__arch_write_trylock(rw) == 0))
break;
do {
HMT_low();
@@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
}
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- return arch_read_trylock(rw) > 0;
+ return __arch_read_trylock(rw) > 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- return arch_write_trylock(rw) == 0;
+ return __arch_write_trylock(rw) == 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
long tmp;
@@ -280,19 +280,19 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "cr0", "xer", "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
LWSYNC_ON_SMP: : :"memory");
rw->lock = 0;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) __spin_yield(lock)
-#define _raw_read_relax(lock) __rw_yield(lock)
-#define _raw_write_relax(lock) __rw_yield(lock)
+#define arch_spin_relax(lock) __spin_yield(lock)
+#define arch_read_relax(lock) __rw_yield(lock)
+#define arch_write_relax(lock) __rw_yield(lock)
#endif /* __KERNEL__ */
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index 74236c9f05b..2351adc4fdc 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile signed int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index c7d671a7d9a..07d2d19ab5e 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -145,7 +145,7 @@ SYSCALL_SPU(setfsuid)
SYSCALL_SPU(setfsgid)
SYSCALL_SPU(llseek)
COMPAT_SYS_SPU(getdents)
-SYSX_SPU(sys_select,ppc32_select,ppc_select)
+SYSX_SPU(sys_select,ppc32_select,sys_select)
SYSCALL_SPU(flock)
SYSCALL_SPU(msync)
COMPAT_SYS_SPU(readv)
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index cd21e5e6b04..11ae699135b 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -51,6 +51,7 @@ extern void __init udbg_init_btext(void);
extern void __init udbg_init_44x_as1(void);
extern void __init udbg_init_40x_realmode(void);
extern void __init udbg_init_cpm(void);
+extern void __init udbg_init_usbgecko(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b23664a0b86..c002b041021 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -42,10 +42,11 @@ obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
obj-$(CONFIG_PPC_CLOCK) += clock.o
-procfs-$(CONFIG_PPC64) := proc_ppc64.o
+procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
+obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 3839839f83c..b876e989220 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -642,10 +642,14 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
*/
static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
unsigned int areg, struct pt_regs *regs,
- unsigned int flags, unsigned int length)
+ unsigned int flags, unsigned int length,
+ unsigned int elsize)
{
char *ptr;
+ unsigned long *lptr;
int ret = 0;
+ int sw = 0;
+ int i, j;
flush_vsx_to_thread(current);
@@ -654,19 +658,35 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
else
ptr = (char *) &current->thread.vr[reg - 32];
- if (flags & ST)
- ret = __copy_to_user(addr, ptr, length);
- else {
- if (flags & SPLT){
- ret = __copy_from_user(ptr, addr, length);
- ptr += length;
+ lptr = (unsigned long *) ptr;
+
+ if (flags & SW)
+ sw = elsize-1;
+
+ for (j = 0; j < length; j += elsize) {
+ for (i = 0; i < elsize; ++i) {
+ if (flags & ST)
+ ret |= __put_user(ptr[i^sw], addr + i);
+ else
+ ret |= __get_user(ptr[i^sw], addr + i);
}
- ret |= __copy_from_user(ptr, addr, length);
+ ptr += elsize;
+ addr += elsize;
}
- if (flags & U)
- regs->gpr[areg] = regs->dar;
- if (ret)
+
+ if (!ret) {
+ if (flags & U)
+ regs->gpr[areg] = regs->dar;
+
+ /* Splat load copies the same data to top and bottom 8 bytes */
+ if (flags & SPLT)
+ lptr[1] = lptr[0];
+ /* For 8 byte loads, zero the top 8 bytes */
+ else if (!(flags & ST) && (8 == length))
+ lptr[1] = 0;
+ } else
return -EFAULT;
+
return 1;
}
#endif
@@ -767,16 +787,25 @@ int fix_alignment(struct pt_regs *regs)
#ifdef CONFIG_VSX
if ((instruction & 0xfc00003e) == 0x7c000018) {
- /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */
+ unsigned int elsize;
+
+ /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
reg |= (instruction & 0x1) << 5;
/* Simple inline decoder instead of a table */
+ /* VSX has only 8 and 16 byte memory accesses */
+ nb = 8;
if (instruction & 0x200)
nb = 16;
- else if (instruction & 0x080)
- nb = 8;
- else
- nb = 4;
+
+ /* Vector stores in little-endian mode swap individual
+ elements, so process them separately */
+ elsize = 4;
+ if (instruction & 0x80)
+ elsize = 8;
+
flags = 0;
+ if (regs->msr & MSR_LE)
+ flags |= SW;
if (instruction & 0x100)
flags |= ST;
if (instruction & 0x040)
@@ -787,7 +816,7 @@ int fix_alignment(struct pt_regs *regs)
nb = 8;
}
PPC_WARN_ALIGNMENT(vsx, regs);
- return emulate_vsx(addr, reg, areg, regs, flags, nb);
+ return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
}
#endif
/* A size of 0 indicates an instruction we don't support, with
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0812b0f414b..a6c2b63227b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -190,6 +190,11 @@ int main(void)
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
+ DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
+ DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
+#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -398,14 +403,24 @@ int main(void)
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+
+ /* book3s_64 */
+#ifdef CONFIG_PPC64
+ DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
+ DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
+ DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2));
+ DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
+ DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
+ DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
+ DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
+ DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
+ DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
+#endif
#endif
#ifdef CONFIG_44x
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
-#ifdef CONFIG_FSL_BOOKE
- DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
-#endif
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 03c862b6a9c..2fc82bac3bb 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -697,9 +697,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc750",
},
- { /* 750CL */
- .pvr_mask = 0xfffff0f0,
- .pvr_value = 0x00087010,
+ { /* 750CL (and "Broadway") */
+ .pvr_mask = 0xfffff0e0,
+ .pvr_value = 0x00087000,
.cpu_name = "750CL",
.cpu_features = CPU_FTRS_750CL,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 0a8439aafdd..6f4613dd05e 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -373,7 +373,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
hard_irq_disable();
for_each_irq(i) {
- struct irq_desc *desc = irq_desc + i;
+ struct irq_desc *desc = irq_to_desc(i);
if (desc->status & IRQ_INPROGRESS)
desc->chip->eoi(i);
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index e96cbbd9b44..59c928564a0 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -21,7 +21,6 @@
#include <asm/dma.h>
#include <asm/abs_addr.h>
-int swiotlb __read_mostly;
unsigned int ppc_swiotlb_enable;
/*
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index c7eb4e0eb86..e3be98ffe2a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -41,6 +41,7 @@ __start_interrupts:
. = 0x200
_machine_check_pSeries:
HMT_MEDIUM
+ DO_KVM 0x200
mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
@@ -48,6 +49,7 @@ _machine_check_pSeries:
.globl data_access_pSeries
data_access_pSeries:
HMT_MEDIUM
+ DO_KVM 0x300
mtspr SPRN_SPRG_SCRATCH0,r13
BEGIN_FTR_SECTION
mfspr r13,SPRN_SPRG_PACA
@@ -77,6 +79,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
.globl data_access_slb_pSeries
data_access_slb_pSeries:
HMT_MEDIUM
+ DO_KVM 0x380
mtspr SPRN_SPRG_SCRATCH0,r13
mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
@@ -115,6 +118,7 @@ data_access_slb_pSeries:
.globl instruction_access_slb_pSeries
instruction_access_slb_pSeries:
HMT_MEDIUM
+ DO_KVM 0x480
mtspr SPRN_SPRG_SCRATCH0,r13
mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
@@ -154,6 +158,7 @@ instruction_access_slb_pSeries:
.globl system_call_pSeries
system_call_pSeries:
HMT_MEDIUM
+ DO_KVM 0xc00
BEGIN_FTR_SECTION
cmpdi r0,0x1ebe
beq- 1f
@@ -187,14 +192,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
*/
performance_monitor_pSeries_1:
. = 0xf00
+ DO_KVM 0xf00
b performance_monitor_pSeries
altivec_unavailable_pSeries_1:
. = 0xf20
+ DO_KVM 0xf20
b altivec_unavailable_pSeries
vsx_unavailable_pSeries_1:
. = 0xf40
+ DO_KVM 0xf40
b vsx_unavailable_pSeries
#ifdef CONFIG_CBE_RAS
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 829c3fe7c5a..e025e89fe93 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -164,6 +164,9 @@ __after_mmu_off:
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
bl setup_cpm_bat
#endif
+#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
+ bl setup_usbgecko_bat
+#endif
/*
* Call setup_cpu for CPU 0 and initialize 6xx Idle
@@ -1203,6 +1206,28 @@ setup_cpm_bat:
blr
#endif
+#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
+setup_usbgecko_bat:
+ /* prepare a BAT for early io */
+#if defined(CONFIG_GAMECUBE)
+ lis r8, 0x0c00
+#elif defined(CONFIG_WII)
+ lis r8, 0x0d00
+#else
+#error Invalid platform for USB Gecko based early debugging.
+#endif
+ /*
+ * The virtual address used must match the virtual address
+ * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
+ */
+ lis r11, 0xfffe /* top 128K */
+ ori r8, r8, 0x002a /* uncached, guarded ,rw */
+ ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
+ mtspr SPRN_DBAT1L, r8
+ mtspr SPRN_DBAT1U, r11
+ blr
+#endif
+
#ifdef CONFIG_8260
/* Jump into the system reset for the rom.
* We first disable the MMU, and then jump to the ROM reset address.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index c38afdb45d7..92580748802 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -37,6 +37,7 @@
#include <asm/firmware.h>
#include <asm/page_64.h>
#include <asm/irqflags.h>
+#include <asm/kvm_book3s_64_asm.h>
/* The physical memory is layed out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -165,6 +166,12 @@ exception_marker:
#include "exceptions-64s.S"
#endif
+/* KVM trampoline code needs to be close to the interrupt handlers */
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include "../kvm/book3s_64_rmhandlers.S"
+#endif
+
_GLOBAL(generic_secondary_thread_init)
mr r24,r3
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 6ded19d0189..678f98cd5e6 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -206,6 +206,8 @@ MachineCheck:
EXCEPTION_PROLOG
mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
+ li r5,0x00f0
+ mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -222,6 +224,8 @@ DataAccess:
stw r10,_DSISR(r11)
mr r5,r10
mfspr r4,SPRN_DAR
+ li r10,0x00f0
+ mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
EXC_XFER_EE_LITE(0x300, handle_page_fault)
/* Instruction access exception.
@@ -244,6 +248,8 @@ Alignment:
EXCEPTION_PROLOG
mfspr r4,SPRN_DAR
stw r4,_DAR(r11)
+ li r5,0x00f0
+ mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -333,26 +339,20 @@ InstructionTLBMiss:
mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */
-#ifdef CONFIG_SWAP
- /* do not set the _PAGE_ACCESSED bit of a non-present page */
- andi. r11, r10, _PAGE_PRESENT
- beq 4f
- ori r10, r10, _PAGE_ACCESSED
- mfspr r11, SPRN_MD_TWC /* get the pte address again */
- stw r10, 0(r11)
-4:
-#else
- ori r10, r10, _PAGE_ACCESSED
- stw r10, 0(r11)
-#endif
+ andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
+ cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
+ bne- cr0, 2f
+
+ /* Clear PP lsb, 0x400 */
+ rlwinm r10, r10, 0, 22, 20
/* The Linux PTE won't go exactly into the MMU TLB.
- * Software indicator bits 21, 22 and 28 must be clear.
+ * Software indicator bits 22 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
-2: li r11, 0x00f0
+ li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x2d80, r3)
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
@@ -365,6 +365,22 @@ InstructionTLBMiss:
lwz r3, 8(r0)
#endif
rfi
+2:
+ mfspr r11, SPRN_SRR1
+ /* clear all error bits as TLB Miss
+ * sets a few unconditionally
+ */
+ rlwinm r11, r11, 0, 0xffff
+ mtspr SPRN_SRR1, r11
+
+ mfspr r10, SPRN_M_TW /* Restore registers */
+ lwz r11, 0(r0)
+ mtcr r11
+ lwz r11, 4(r0)
+#ifdef CONFIG_8xx_CPU6
+ lwz r3, 8(r0)
+#endif
+ b InstructionAccess
. = 0x1200
DataStoreTLBMiss:
@@ -406,29 +422,45 @@ DataStoreTLBMiss:
* above.
*/
rlwimi r11, r10, 0, 27, 27
+ /* Insert the WriteThru flag into the TWC from the Linux PTE.
+ * It is bit 25 in the Linux PTE and bit 30 in the TWC
+ */
+ rlwimi r11, r10, 32-5, 30, 30
DO_8xx_CPU6(0x3b80, r3)
mtspr SPRN_MD_TWC, r11
-#ifdef CONFIG_SWAP
- /* do not set the _PAGE_ACCESSED bit of a non-present page */
- andi. r11, r10, _PAGE_PRESENT
- beq 4f
- ori r10, r10, _PAGE_ACCESSED
-4:
- /* and update pte in table */
-#else
- ori r10, r10, _PAGE_ACCESSED
-#endif
- mfspr r11, SPRN_MD_TWC /* get the pte address again */
- stw r10, 0(r11)
+ /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
+ * We also need to know if the insn is a load/store, so:
+ * Clear _PAGE_PRESENT and load that which will
+ * trap into DTLB Error with store bit set accordinly.
+ */
+ /* PRESENT=0x1, ACCESSED=0x20
+ * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
+ * r10 = (r10 & ~PRESENT) | r11;
+ */
+ rlwinm r11, r10, 32-5, _PAGE_PRESENT
+ and r11, r11, r10
+ rlwimi r10, r11, 0, _PAGE_PRESENT
+
+ /* Honour kernel RO, User NA */
+ /* 0x200 == Extended encoding, bit 22 */
+ /* r11 = (r10 & _PAGE_USER) >> 2 */
+ rlwinm r11, r10, 32-2, 0x200
+ or r10, r11, r10
+ /* r11 = (r10 & _PAGE_RW) >> 1 */
+ rlwinm r11, r10, 32-1, 0x200
+ or r10, r11, r10
+ /* invert RW and 0x200 bits */
+ xori r10, r10, _PAGE_RW | 0x200
/* The Linux PTE won't go exactly into the MMU TLB.
- * Software indicator bits 21, 22 and 28 must be clear.
+ * Software indicator bits 22 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
2: li r11, 0x00f0
+ mtspr SPRN_DAR,r11 /* Tag DAR */
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3)
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
@@ -469,97 +501,10 @@ DataTLBError:
stw r10, 0(r0)
stw r11, 4(r0)
- /* First, make sure this was a store operation.
- */
- mfspr r10, SPRN_DSISR
- andis. r11, r10, 0x0200 /* If set, indicates store op */
- beq 2f
-
- /* The EA of a data TLB miss is automatically stored in the MD_EPN
- * register. The EA of a data TLB error is automatically stored in
- * the DAR, but not the MD_EPN register. We must copy the 20 most
- * significant bits of the EA from the DAR to MD_EPN before we
- * start walking the page tables. We also need to copy the CASID
- * value from the M_CASID register.
- * Addendum: The EA of a data TLB error is _supposed_ to be stored
- * in DAR, but it seems that this doesn't happen in some cases, such
- * as when the error is due to a dcbi instruction to a page with a
- * TLB that doesn't have the changed bit set. In such cases, there
- * does not appear to be any way to recover the EA of the error
- * since it is neither in DAR nor MD_EPN. As a workaround, the
- * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
- * are initialized in mapin_ram(). This will avoid the problem,
- * assuming we only use the dcbi instruction on kernel addresses.
- */
mfspr r10, SPRN_DAR
- rlwinm r11, r10, 0, 0, 19
- ori r11, r11, MD_EVALID
- mfspr r10, SPRN_M_CASID
- rlwimi r11, r10, 0, 28, 31
- DO_8xx_CPU6(0x3780, r3)
- mtspr SPRN_MD_EPN, r11
-
- mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
-
- /* If we are faulting a kernel address, we have to use the
- * kernel page tables.
- */
- andi. r11, r10, 0x0800
- beq 3f
- lis r11, swapper_pg_dir@h
- ori r11, r11, swapper_pg_dir@l
- rlwimi r10, r11, 0, 2, 19
-3:
- lwz r11, 0(r10) /* Get the level 1 entry */
- rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
- beq 2f /* If zero, bail */
-
- /* We have a pte table, so fetch the pte from the table.
- */
- ori r11, r11, 1 /* Set valid bit in physical L2 page */
- DO_8xx_CPU6(0x3b80, r3)
- mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
- mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
- lwz r10, 0(r11) /* Get the pte */
-
- andi. r11, r10, _PAGE_RW /* Is it writeable? */
- beq 2f /* Bail out if not */
-
- /* Update 'changed', among others.
- */
-#ifdef CONFIG_SWAP
- ori r10, r10, _PAGE_DIRTY|_PAGE_HWWRITE
- /* do not set the _PAGE_ACCESSED bit of a non-present page */
- andi. r11, r10, _PAGE_PRESENT
- beq 4f
- ori r10, r10, _PAGE_ACCESSED
-4:
-#else
- ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
-#endif
- mfspr r11, SPRN_MD_TWC /* Get pte address again */
- stw r10, 0(r11) /* and update pte in table */
-
- /* The Linux PTE won't go exactly into the MMU TLB.
- * Software indicator bits 21, 22 and 28 must be clear.
- * Software indicator bits 24, 25, 26, and 27 must be
- * set. All other Linux PTE bits control the behavior
- * of the MMU.
- */
- li r11, 0x00f0
- rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
- DO_8xx_CPU6(0x3d80, r3)
- mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
-
- mfspr r10, SPRN_M_TW /* Restore registers */
- lwz r11, 0(r0)
- mtcr r11
- lwz r11, 4(r0)
-#ifdef CONFIG_8xx_CPU6
- lwz r3, 8(r0)
-#endif
- rfi
-2:
+ cmpwi cr0, r10, 0x00f0
+ beq- FixupDAR /* must be a buggy dcbX, icbi insn. */
+DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR */
mfspr r10, SPRN_M_TW /* Restore registers */
lwz r11, 0(r0)
mtcr r11
@@ -588,6 +533,140 @@ DataTLBError:
. = 0x2000
+/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
+ * by decoding the registers used by the dcbx instruction and adding them.
+ * DAR is set to the calculated address and r10 also holds the EA on exit.
+ */
+ /* define if you don't want to use self modifying code */
+#define NO_SELF_MODIFYING_CODE
+FixupDAR:/* Entry point for dcbx workaround. */
+ /* fetch instruction from memory. */
+ mfspr r10, SPRN_SRR0
+ DO_8xx_CPU6(0x3780, r3)
+ mtspr SPRN_MD_EPN, r10
+ mfspr r11, SPRN_M_TWB /* Get level 1 table entry address */
+ cmplwi cr0, r11, 0x0800
+ blt- 3f /* Branch if user space */
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
+ ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
+ rlwimi r11, r10, 32-20, 0xffc /* r11 = r11&~0xffc|(r10>>20)&0xffc */
+3: lwz r11, 0(r11) /* Get the level 1 entry */
+ DO_8xx_CPU6(0x3b80, r3)
+ mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
+ mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
+ lwz r11, 0(r11) /* Get the pte */
+ /* concat physical page address(r11) and page offset(r10) */
+ rlwimi r11, r10, 0, 20, 31
+ lwz r11,0(r11)
+/* Check if it really is a dcbx instruction. */
+/* dcbt and dcbtst does not generate DTLB Misses/Errors,
+ * no need to include them here */
+ srwi r10, r11, 26 /* check if major OP code is 31 */
+ cmpwi cr0, r10, 31
+ bne- 141f
+ rlwinm r10, r11, 0, 21, 30
+ cmpwi cr0, r10, 2028 /* Is dcbz? */
+ beq+ 142f
+ cmpwi cr0, r10, 940 /* Is dcbi? */
+ beq+ 142f
+ cmpwi cr0, r10, 108 /* Is dcbst? */
+ beq+ 144f /* Fix up store bit! */
+ cmpwi cr0, r10, 172 /* Is dcbf? */
+ beq+ 142f
+ cmpwi cr0, r10, 1964 /* Is icbi? */
+ beq+ 142f
+141: mfspr r10, SPRN_DAR /* r10 must hold DAR at exit */
+ b DARFixed /* Nope, go back to normal TLB processing */
+
+144: mfspr r10, SPRN_DSISR
+ rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
+ mtspr SPRN_DSISR, r10
+142: /* continue, it was a dcbx, dcbi instruction. */
+#ifdef CONFIG_8xx_CPU6
+ lwz r3, 8(r0) /* restore r3 from memory */
+#endif
+#ifndef NO_SELF_MODIFYING_CODE
+ andis. r10,r11,0x1f /* test if reg RA is r0 */
+ li r10,modified_instr@l
+ dcbtst r0,r10 /* touch for store */
+ rlwinm r11,r11,0,0,20 /* Zero lower 10 bits */
+ oris r11,r11,640 /* Transform instr. to a "add r10,RA,RB" */
+ ori r11,r11,532
+ stw r11,0(r10) /* store add/and instruction */
+ dcbf 0,r10 /* flush new instr. to memory. */
+ icbi 0,r10 /* invalidate instr. cache line */
+ lwz r11, 4(r0) /* restore r11 from memory */
+ mfspr r10, SPRN_M_TW /* restore r10 from M_TW */
+ isync /* Wait until new instr is loaded from memory */
+modified_instr:
+ .space 4 /* this is where the add instr. is stored */
+ bne+ 143f
+ subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */
+143: mtdar r10 /* store faulting EA in DAR */
+ b DARFixed /* Go back to normal TLB handling */
+#else
+ mfctr r10
+ mtdar r10 /* save ctr reg in DAR */
+ rlwinm r10, r11, 24, 24, 28 /* offset into jump table for reg RB */
+ addi r10, r10, 150f@l /* add start of table */
+ mtctr r10 /* load ctr with jump address */
+ xor r10, r10, r10 /* sum starts at zero */
+ bctr /* jump into table */
+150:
+ add r10, r10, r0 ;b 151f
+ add r10, r10, r1 ;b 151f
+ add r10, r10, r2 ;b 151f
+ add r10, r10, r3 ;b 151f
+ add r10, r10, r4 ;b 151f
+ add r10, r10, r5 ;b 151f
+ add r10, r10, r6 ;b 151f
+ add r10, r10, r7 ;b 151f
+ add r10, r10, r8 ;b 151f
+ add r10, r10, r9 ;b 151f
+ mtctr r11 ;b 154f /* r10 needs special handling */
+ mtctr r11 ;b 153f /* r11 needs special handling */
+ add r10, r10, r12 ;b 151f
+ add r10, r10, r13 ;b 151f
+ add r10, r10, r14 ;b 151f
+ add r10, r10, r15 ;b 151f
+ add r10, r10, r16 ;b 151f
+ add r10, r10, r17 ;b 151f
+ add r10, r10, r18 ;b 151f
+ add r10, r10, r19 ;b 151f
+ add r10, r10, r20 ;b 151f
+ add r10, r10, r21 ;b 151f
+ add r10, r10, r22 ;b 151f
+ add r10, r10, r23 ;b 151f
+ add r10, r10, r24 ;b 151f
+ add r10, r10, r25 ;b 151f
+ add r10, r10, r26 ;b 151f
+ add r10, r10, r27 ;b 151f
+ add r10, r10, r28 ;b 151f
+ add r10, r10, r29 ;b 151f
+ add r10, r10, r30 ;b 151f
+ add r10, r10, r31
+151:
+ rlwinm. r11,r11,19,24,28 /* offset into jump table for reg RA */
+ beq 152f /* if reg RA is zero, don't add it */
+ addi r11, r11, 150b@l /* add start of table */
+ mtctr r11 /* load ctr with jump address */
+ rlwinm r11,r11,0,16,10 /* make sure we don't execute this more than once */
+ bctr /* jump into table */
+152:
+ mfdar r11
+ mtctr r11 /* restore ctr reg from DAR */
+ mtdar r10 /* save fault EA to DAR */
+ b DARFixed /* Go back to normal TLB handling */
+
+ /* special handling for r10,r11 since these are modified already */
+153: lwz r11, 4(r0) /* load r11 from memory */
+ b 155f
+154: mfspr r11, SPRN_M_TW /* load r10 from M_TW */
+155: add r10, r10, r11 /* add it */
+ mfctr r11 /* restore r11 */
+ b 151b
+#endif
+
.globl giveup_fpu
giveup_fpu:
blr
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 975788ca05d..7f4bd7f3b6a 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -944,28 +944,6 @@ _GLOBAL(__setup_e500mc_ivors)
blr
/*
- * extern void loadcam_entry(unsigned int index)
- *
- * Load TLBCAM[index] entry in to the L2 CAM MMU
- */
-_GLOBAL(loadcam_entry)
- lis r4,TLBCAM@ha
- addi r4,r4,TLBCAM@l
- mulli r5,r3,TLBCAM_SIZE
- add r3,r5,r4
- lwz r4,0(r3)
- mtspr SPRN_MAS0,r4
- lwz r4,4(r3)
- mtspr SPRN_MAS1,r4
- lwz r4,8(r3)
- mtspr SPRN_MAS2,r4
- lwz r4,12(r3)
- mtspr SPRN_MAS3,r4
- tlbwe
- isync
- blr
-
-/*
* extern void giveup_altivec(struct task_struct *prev)
*
* The e500 core does not have an AltiVec unit.
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 1882bf419fa..8dc7547c237 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -161,7 +161,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
dest++;
n--;
}
- while(n > 4) {
+ while(n >= 4) {
*((u32 *)dest) = *((volatile u32 *)vsrc);
eieio();
vsrc += 4;
@@ -190,7 +190,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
vdest++;
n--;
}
- while(n > 4) {
+ while(n >= 4) {
*((volatile u32 *)vdest) = *((volatile u32 *)src);
src += 4;
vdest += 4;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index fd51578e29d..5547ae6e6b0 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -30,7 +30,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
#include <linux/crash_dump.h>
#include <asm/io.h>
@@ -251,7 +251,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
}
ppc_md.tce_free(tbl, entry, npages);
- iommu_area_free(tbl->it_map, free_entry, npages);
+ bitmap_clear(tbl->it_map, free_entry, npages);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 02a334662cc..9040330b053 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -87,7 +87,10 @@ extern int tau_interrupts(int);
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
+
+#ifndef CONFIG_SPARSE_IRQ
EXPORT_SYMBOL(irq_desc);
+#endif
int distribute_irqs = 1;
@@ -189,33 +192,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "CPU%d ", j);
seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- desc = get_irq_desc(i);
- spin_lock_irqsave(&desc->lock, flags);
- action = desc->action;
- if (!action || !action->handler)
- goto skip;
- seq_printf(p, "%3d: ", i);
-#ifdef CONFIG_SMP
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-#else
- seq_printf(p, "%10u ", kstat_irqs(i));
-#endif /* CONFIG_SMP */
- if (desc->chip)
- seq_printf(p, " %s ", desc->chip->typename);
- else
- seq_puts(p, " None ");
- seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
- seq_printf(p, " %s", action->name);
- for (action = action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
- seq_putc(p, '\n');
-skip:
- spin_unlock_irqrestore(&desc->lock, flags);
- } else if (i == NR_IRQS) {
+ } else if (i == nr_irqs) {
#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
if (tau_initialized){
seq_puts(p, "TAU: ");
@@ -225,30 +202,68 @@ skip:
}
#endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
+
+ return 0;
}
+
+ desc = irq_to_desc(i);
+ if (!desc)
+ return 0;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ action = desc->action;
+ if (!action || !action->handler)
+ goto skip;
+
+ seq_printf(p, "%3d: ", i);
+#ifdef CONFIG_SMP
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+#else
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#endif /* CONFIG_SMP */
+
+ if (desc->chip)
+ seq_printf(p, " %s ", desc->chip->name);
+ else
+ seq_puts(p, " None ");
+
+ seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
+ seq_printf(p, " %s", action->name);
+
+ for (action = action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+ seq_putc(p, '\n');
+
+skip:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(cpumask_t map)
{
+ struct irq_desc *desc;
unsigned int irq;
static int warned;
for_each_irq(irq) {
cpumask_t mask;
- if (irq_desc[irq].status & IRQ_PER_CPU)
+ desc = irq_to_desc(irq);
+ if (desc && desc->status & IRQ_PER_CPU)
continue;
- cpumask_and(&mask, irq_desc[irq].affinity, &map);
+ cpumask_and(&mask, desc->affinity, &map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
}
- if (irq_desc[irq].chip->set_affinity)
- irq_desc[irq].chip->set_affinity(irq, &mask);
- else if (irq_desc[irq].action && !(warned++))
+ if (desc->chip->set_affinity)
+ desc->chip->set_affinity(irq, &mask);
+ else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
@@ -275,7 +290,7 @@ static inline void handle_one_irq(unsigned int irq)
return;
}
- desc = irq_desc + irq;
+ desc = irq_to_desc(irq);
saved_sp_limit = current->thread.ksp_limit;
irqtp->task = curtp->task;
@@ -541,7 +556,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
smp_wmb();
/* Clear norequest flags */
- get_irq_desc(i)->status &= ~IRQ_NOREQUEST;
+ irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
/* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to
@@ -607,8 +622,16 @@ void irq_set_virq_count(unsigned int count)
static int irq_setup_virq(struct irq_host *host, unsigned int virq,
irq_hw_number_t hwirq)
{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc_alloc_node(virq, 0);
+ if (!desc) {
+ pr_debug("irq: -> allocating desc failed\n");
+ goto error;
+ }
+
/* Clear IRQ_NOREQUEST flag */
- get_irq_desc(virq)->status &= ~IRQ_NOREQUEST;
+ desc->status &= ~IRQ_NOREQUEST;
/* map it */
smp_wmb();
@@ -617,11 +640,14 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq,
if (host->ops->map(host, virq, hwirq)) {
pr_debug("irq: -> mapping failed, freeing\n");
- irq_free_virt(virq, 1);
- return -1;
+ goto error;
}
return 0;
+
+error:
+ irq_free_virt(virq, 1);
+ return -1;
}
unsigned int irq_create_direct_mapping(struct irq_host *host)
@@ -705,7 +731,7 @@ unsigned int irq_create_mapping(struct irq_host *host,
EXPORT_SYMBOL_GPL(irq_create_mapping);
unsigned int irq_create_of_mapping(struct device_node *controller,
- u32 *intspec, unsigned int intsize)
+ const u32 *intspec, unsigned int intsize)
{
struct irq_host *host;
irq_hw_number_t hwirq;
@@ -738,7 +764,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
/* Set type if specified and different than the current one */
if (type != IRQ_TYPE_NONE &&
- type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK))
+ type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
set_irq_type(virq, type);
return virq;
}
@@ -810,7 +836,7 @@ void irq_dispose_mapping(unsigned int virq)
irq_map[virq].hwirq = host->inval_irq;
/* Set some flags */
- get_irq_desc(virq)->status |= IRQ_NOREQUEST;
+ irq_to_desc(virq)->status |= IRQ_NOREQUEST;
/* Free it */
irq_free_virt(virq, 1);
@@ -1002,12 +1028,24 @@ void irq_free_virt(unsigned int virq, unsigned int count)
spin_unlock_irqrestore(&irq_big_lock, flags);
}
-void irq_early_init(void)
+int arch_early_irq_init(void)
{
- unsigned int i;
+ struct irq_desc *desc;
+ int i;
+
+ for (i = 0; i < NR_IRQS; i++) {
+ desc = irq_to_desc(i);
+ if (desc)
+ desc->status |= IRQ_NOREQUEST;
+ }
- for (i = 0; i < NR_IRQS; i++)
- get_irq_desc(i)->status |= IRQ_NOREQUEST;
+ return 0;
+}
+
+int arch_init_chip_data(struct irq_desc *desc, int node)
+{
+ desc->status |= IRQ_NOREQUEST;
+ return 0;
}
/* We need to create the radix trees late */
@@ -1069,16 +1107,19 @@ static int virq_debug_show(struct seq_file *m, void *private)
seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq",
"chip name", "host name");
- for (i = 1; i < NR_IRQS; i++) {
- desc = get_irq_desc(i);
- spin_lock_irqsave(&desc->lock, flags);
+ for (i = 1; i < nr_irqs; i++) {
+ desc = irq_to_desc(i);
+ if (!desc)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action && desc->action->handler) {
seq_printf(m, "%5d ", i);
seq_printf(m, "0x%05lx ", virq_to_hw(i));
- if (desc->chip && desc->chip->typename)
- p = desc->chip->typename;
+ if (desc->chip && desc->chip->name)
+ p = desc->chip->name;
else
p = none;
seq_printf(m, "%-15s ", p);
@@ -1090,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
seq_printf(m, "%s\n", p);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
return 0;
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index ed0ac4e4b8d..79a00bb9c64 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -781,9 +781,9 @@ static int __init lparcfg_init(void)
!firmware_has_feature(FW_FEATURE_ISERIES))
mode |= S_IWUSR;
- ent = proc_create("ppc64/lparcfg", mode, NULL, &lparcfg_fops);
+ ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops);
if (!ent) {
- printk(KERN_ERR "Failed to create ppc64/lparcfg\n");
+ printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
return -EIO;
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index da9c0c4c10f..8649f536f8d 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -502,15 +502,7 @@ _GLOBAL(clear_pages)
li r0,PAGE_SIZE/L1_CACHE_BYTES
slw r0,r0,r4
mtctr r0
-#ifdef CONFIG_8xx
- li r4, 0
-1: stw r4, 0(r3)
- stw r4, 4(r3)
- stw r4, 8(r3)
- stw r4, 12(r3)
-#else
1: dcbz 0,r3
-#endif
addi r3,r3,L1_CACHE_BYTES
bdnz 1b
blr
@@ -535,15 +527,6 @@ _GLOBAL(copy_page)
addi r3,r3,-4
addi r4,r4,-4
-#ifdef CONFIG_8xx
- /* don't use prefetch on 8xx */
- li r0,4096/L1_CACHE_BYTES
- mtctr r0
-1: COPY_16_BYTES
- bdnz 1b
- blr
-
-#else /* not 8xx, we can prefetch */
li r5,4
#if MAX_COPY_PREFETCH > 1
@@ -584,7 +567,6 @@ _GLOBAL(copy_page)
li r0,MAX_COPY_PREFETCH
li r11,4
b 2b
-#endif /* CONFIG_8xx */
/*
* void atomic_clear_mask(atomic_t mask, atomic_t *addr)
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 0ed31f22048..ad461e735ae 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -139,8 +139,8 @@ out:
}
-static int dev_nvram_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long dev_nvram_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
switch(cmd) {
#ifdef CONFIG_PPC_PMAC
@@ -169,11 +169,11 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file,
}
const struct file_operations nvram_fops = {
- .owner = THIS_MODULE,
- .llseek = dev_nvram_llseek,
- .read = dev_nvram_read,
- .write = dev_nvram_write,
- .ioctl = dev_nvram_ioctl,
+ .owner = THIS_MODULE,
+ .llseek = dev_nvram_llseek,
+ .read = dev_nvram_read,
+ .write = dev_nvram_write,
+ .unlocked_ioctl = dev_nvram_ioctl,
};
static struct miscdevice nvram_dev = {
@@ -184,7 +184,7 @@ static struct miscdevice nvram_dev = {
#ifdef DEBUG_NVRAM
-static void nvram_print_partitions(char * label)
+static void __init nvram_print_partitions(char * label)
{
struct list_head * p;
struct nvram_partition * tmp_part;
@@ -202,7 +202,7 @@ static void nvram_print_partitions(char * label)
#endif
-static int nvram_write_header(struct nvram_partition * part)
+static int __init nvram_write_header(struct nvram_partition * part)
{
loff_t tmp_index;
int rc;
@@ -214,7 +214,7 @@ static int nvram_write_header(struct nvram_partition * part)
}
-static unsigned char nvram_checksum(struct nvram_header *p)
+static unsigned char __init nvram_checksum(struct nvram_header *p)
{
unsigned int c_sum, c_sum2;
unsigned short *sp = (unsigned short *)p->name; /* assume 6 shorts */
@@ -228,32 +228,7 @@ static unsigned char nvram_checksum(struct nvram_header *p)
return c_sum;
}
-
-/*
- * Find an nvram partition, sig can be 0 for any
- * partition or name can be NULL for any name, else
- * tries to match both
- */
-struct nvram_partition *nvram_find_partition(int sig, const char *name)
-{
- struct nvram_partition * part;
- struct list_head * p;
-
- list_for_each(p, &nvram_part->partition) {
- part = list_entry(p, struct nvram_partition, partition);
-
- if (sig && part->header.signature != sig)
- continue;
- if (name && 0 != strncmp(name, part->header.name, 12))
- continue;
- return part;
- }
- return NULL;
-}
-EXPORT_SYMBOL(nvram_find_partition);
-
-
-static int nvram_remove_os_partition(void)
+static int __init nvram_remove_os_partition(void)
{
struct list_head *i;
struct list_head *j;
@@ -319,7 +294,7 @@ static int nvram_remove_os_partition(void)
* Will create a partition starting at the first free
* space found if space has enough room.
*/
-static int nvram_create_os_partition(void)
+static int __init nvram_create_os_partition(void)
{
struct nvram_partition *part;
struct nvram_partition *new_part;
@@ -422,7 +397,7 @@ static int nvram_create_os_partition(void)
* 5.) If the max chunk cannot be allocated then try finding a chunk
* that will satisfy the minum needed (NVRAM_MIN_REQ).
*/
-static int nvram_setup_partition(void)
+static int __init nvram_setup_partition(void)
{
struct list_head * p;
struct nvram_partition * part;
@@ -480,7 +455,7 @@ static int nvram_setup_partition(void)
}
-static int nvram_scan_partitions(void)
+static int __init nvram_scan_partitions(void)
{
loff_t cur_index = 0;
struct nvram_header phead;
@@ -706,6 +681,9 @@ int nvram_clear_error_log(void)
int clear_word = ERR_FLAG_ALREADY_LOGGED;
int rc;
+ if (nvram_error_log_index == -1)
+ return -1;
+
tmp_index = nvram_error_log_index;
rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index 0a03cf70d24..a3c11cac3d7 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -119,13 +119,6 @@ static void perf_callchain_kernel(struct pt_regs *regs,
}
#ifdef CONFIG_PPC64
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize])
-#else
-#define is_huge_psize(pagesize) 0
-#endif
-
/*
* On 64-bit we don't want to invoke hash_page on user addresses from
* interrupt context, so if the access faults, we read the page tables
@@ -135,7 +128,7 @@ static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
pgd_t *pgdir;
pte_t *ptep, pte;
- int pagesize;
+ unsigned shift;
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
unsigned long pfn;
@@ -145,17 +138,14 @@ static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
if (!pgdir)
return -EFAULT;
- pagesize = get_slice_psize(current->mm, addr);
+ ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
+ if (!shift)
+ shift = PAGE_SHIFT;
/* align address to page boundary */
- offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
+ offset = addr & ((1UL << shift) - 1);
addr -= offset;
- if (is_huge_psize(pagesize))
- ptep = huge_pte_offset(current->mm, addr);
- else
- ptep = find_linux_pte(pgdir, addr);
-
if (ptep == NULL)
return -EFAULT;
pte = *ptep;
@@ -497,11 +487,11 @@ static void perf_callchain_user_32(struct pt_regs *regs,
* Since we can't get PMU interrupts inside a PMU interrupt handler,
* we don't need separate irq and nmi entries here.
*/
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
+static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
- struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
+ struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
entry->nr = 0;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index c8b27bb4dbd..425451453e9 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -96,8 +96,6 @@ EXPORT_SYMBOL(copy_4K_page);
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(isa_mem_base);
EXPORT_SYMBOL(pci_dram_offset);
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
#endif /* CONFIG_PCI */
EXPORT_SYMBOL(start_thread);
@@ -162,7 +160,6 @@ EXPORT_SYMBOL(screen_info);
#ifdef CONFIG_PPC32
EXPORT_SYMBOL(timer_interrupt);
-EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
EXPORT_SYMBOL(cacheable_memcpy);
EXPORT_SYMBOL(cacheable_memzero);
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_powerpc.c
index c647ddef40d..1ed3b8d7981 100644
--- a/arch/powerpc/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -28,55 +28,7 @@
#include <asm/uaccess.h>
#include <asm/prom.h>
-static loff_t page_map_seek( struct file *file, loff_t off, int whence);
-static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos);
-static int page_map_mmap( struct file *file, struct vm_area_struct *vma );
-
-static const struct file_operations page_map_fops = {
- .llseek = page_map_seek,
- .read = page_map_read,
- .mmap = page_map_mmap
-};
-
-/*
- * Create the ppc64 and ppc64/rtas directories early. This allows us to
- * assume that they have been previously created in drivers.
- */
-static int __init proc_ppc64_create(void)
-{
- struct proc_dir_entry *root;
-
- root = proc_mkdir("ppc64", NULL);
- if (!root)
- return 1;
-
- if (!of_find_node_by_path("/rtas"))
- return 0;
-
- if (!proc_mkdir("rtas", root))
- return 1;
-
- if (!proc_symlink("rtas", NULL, "ppc64/rtas"))
- return 1;
-
- return 0;
-}
-core_initcall(proc_ppc64_create);
-
-static int __init proc_ppc64_init(void)
-{
- struct proc_dir_entry *pde;
-
- pde = proc_create_data("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL,
- &page_map_fops, vdso_data);
- if (!pde)
- return 1;
- pde->size = PAGE_SIZE;
-
- return 0;
-}
-__initcall(proc_ppc64_init);
+#ifdef CONFIG_PPC64
static loff_t page_map_seek( struct file *file, loff_t off, int whence)
{
@@ -120,3 +72,55 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
return 0;
}
+static const struct file_operations page_map_fops = {
+ .llseek = page_map_seek,
+ .read = page_map_read,
+ .mmap = page_map_mmap
+};
+
+
+static int __init proc_ppc64_init(void)
+{
+ struct proc_dir_entry *pde;
+
+ pde = proc_create_data("powerpc/systemcfg", S_IFREG|S_IRUGO, NULL,
+ &page_map_fops, vdso_data);
+ if (!pde)
+ return 1;
+ pde->size = PAGE_SIZE;
+
+ return 0;
+}
+__initcall(proc_ppc64_init);
+
+#endif /* CONFIG_PPC64 */
+
+/*
+ * Create the ppc64 and ppc64/rtas directories early. This allows us to
+ * assume that they have been previously created in drivers.
+ */
+static int __init proc_ppc64_create(void)
+{
+ struct proc_dir_entry *root;
+
+ root = proc_mkdir("powerpc", NULL);
+ if (!root)
+ return 1;
+
+#ifdef CONFIG_PPC64
+ if (!proc_symlink("ppc64", NULL, "powerpc"))
+ pr_err("Failed to create link /proc/ppc64 -> /proc/powerpc\n");
+#endif
+
+ if (!of_find_node_by_path("/rtas"))
+ return 0;
+
+ if (!proc_mkdir("rtas", root))
+ return 1;
+
+ if (!proc_symlink("rtas", NULL, "powerpc/rtas"))
+ return 1;
+
+ return 0;
+}
+core_initcall(proc_ppc64_create);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index bf90361bb70..fd0d29493fd 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -42,7 +42,7 @@
#include <asm/mmu.h>
struct rtas_t rtas = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED
+ .lock = __ARCH_SPIN_LOCK_UNLOCKED
};
EXPORT_SYMBOL(rtas);
@@ -80,13 +80,13 @@ static unsigned long lock_rtas(void)
local_irq_save(flags);
preempt_disable();
- __raw_spin_lock_flags(&rtas.lock, flags);
+ arch_spin_lock_flags(&rtas.lock, flags);
return flags;
}
static void unlock_rtas(unsigned long flags)
{
- __raw_spin_unlock(&rtas.lock);
+ arch_spin_unlock(&rtas.lock);
local_irq_restore(flags);
preempt_enable();
}
@@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node,
return 1;
}
-static raw_spinlock_t timebase_lock;
+static arch_spinlock_t timebase_lock;
static u64 timebase = 0;
void __cpuinit rtas_give_timebase(void)
@@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void)
local_irq_save(flags);
hard_irq_disable();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
timebase = get_tb();
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
while (timebase)
barrier();
@@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void)
{
while (!timebase)
barrier();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
}
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 13011a96a97..a85117d5c9a 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -6,7 +6,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * /proc/ppc64/rtas/firmware_flash interface
+ * /proc/powerpc/rtas/firmware_flash interface
*
* This file implements a firmware_flash interface to pump a firmware
* image into the kernel. At reboot time rtas_restart() will see the
@@ -740,7 +740,7 @@ static int __init rtas_flash_init(void)
return 1;
}
- firmware_flash_pde = create_flash_pde("ppc64/rtas/"
+ firmware_flash_pde = create_flash_pde("powerpc/rtas/"
FIRMWARE_FLASH_NAME,
&rtas_flash_operations);
if (firmware_flash_pde == NULL) {
@@ -754,7 +754,7 @@ static int __init rtas_flash_init(void)
if (rc != 0)
goto cleanup;
- firmware_update_pde = create_flash_pde("ppc64/rtas/"
+ firmware_update_pde = create_flash_pde("powerpc/rtas/"
FIRMWARE_UPDATE_NAME,
&rtas_flash_operations);
if (firmware_update_pde == NULL) {
@@ -768,7 +768,7 @@ static int __init rtas_flash_init(void)
if (rc != 0)
goto cleanup;
- validate_pde = create_flash_pde("ppc64/rtas/" VALIDATE_FLASH_NAME,
+ validate_pde = create_flash_pde("powerpc/rtas/" VALIDATE_FLASH_NAME,
&validate_flash_operations);
if (validate_pde == NULL) {
rc = -ENOMEM;
@@ -781,7 +781,7 @@ static int __init rtas_flash_init(void)
if (rc != 0)
goto cleanup;
- manage_pde = create_flash_pde("ppc64/rtas/" MANAGE_FLASH_NAME,
+ manage_pde = create_flash_pde("powerpc/rtas/" MANAGE_FLASH_NAME,
&manage_flash_operations);
if (manage_pde == NULL) {
rc = -ENOMEM;
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/kernel/rtasd.c
index b3cbac85592..2e4832ab210 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -39,6 +39,7 @@ static unsigned long rtas_log_start;
static unsigned long rtas_log_size;
static int surveillance_timeout = -1;
+
static unsigned int rtas_error_log_max;
static unsigned int rtas_error_log_buffer_max;
@@ -213,9 +214,11 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
return;
}
+#ifdef CONFIG_PPC64
/* Write error to NVRAM */
if (logging_enabled && !(err_type & ERR_FLAG_BOOT))
nvram_write_error_log(buf, len, err_type, error_log_cnt);
+#endif /* CONFIG_PPC64 */
/*
* rtas errors can occur during boot, and we do want to capture
@@ -264,7 +267,6 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
}
-
static int rtas_log_open(struct inode * inode, struct file * file)
{
return 0;
@@ -300,6 +302,7 @@ static ssize_t rtas_log_read(struct file * file, char __user * buf,
return -ENOMEM;
spin_lock_irqsave(&rtasd_log_lock, s);
+
/* if it's 0, then we know we got the last one (the one in NVRAM) */
while (rtas_log_size == 0) {
if (file->f_flags & O_NONBLOCK) {
@@ -313,7 +316,9 @@ static ssize_t rtas_log_read(struct file * file, char __user * buf,
error = -ENODATA;
goto out;
}
+#ifdef CONFIG_PPC64
nvram_clear_error_log();
+#endif /* CONFIG_PPC64 */
spin_unlock_irqrestore(&rtasd_log_lock, s);
error = wait_event_interruptible(rtas_log_wait, rtas_log_size);
@@ -427,14 +432,11 @@ static void rtas_event_scan(struct work_struct *w)
put_online_cpus();
}
-static void start_event_scan(void)
+#ifdef CONFIG_PPC64
+static void retreive_nvram_error_log(void)
{
- unsigned int err_type;
- int rc;
-
- printk(KERN_DEBUG "RTAS daemon started\n");
- pr_debug("rtasd: will sleep for %d milliseconds\n",
- (30000 / rtas_event_scan_rate));
+ unsigned int err_type ;
+ int rc ;
/* See if we have any error stored in NVRAM */
memset(logdata, 0, rtas_error_log_max);
@@ -442,12 +444,26 @@ static void start_event_scan(void)
&err_type, &error_log_cnt);
/* We can use rtas_log_buf now */
logging_enabled = 1;
-
if (!rc) {
if (err_type != ERR_FLAG_ALREADY_LOGGED) {
pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0);
}
}
+}
+#else /* CONFIG_PPC64 */
+static void retreive_nvram_error_log(void)
+{
+}
+#endif /* CONFIG_PPC64 */
+
+static void start_event_scan(void)
+{
+ printk(KERN_DEBUG "RTAS daemon started\n");
+ pr_debug("rtasd: will sleep for %d milliseconds\n",
+ (30000 / rtas_event_scan_rate));
+
+ /* Retreive errors from nvram if any */
+ retreive_nvram_error_log();
schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work,
event_scan_delay);
@@ -457,13 +473,13 @@ static int __init rtas_init(void)
{
struct proc_dir_entry *entry;
- if (!machine_is(pseries))
+ if (!machine_is(pseries) && !machine_is(chrp))
return 0;
/* No RTAS */
event_scan = rtas_token("event-scan");
if (event_scan == RTAS_UNKNOWN_SERVICE) {
- printk(KERN_DEBUG "rtasd: no event-scan on system\n");
+ printk(KERN_INFO "rtasd: No event-scan on system\n");
return -ENODEV;
}
@@ -483,7 +499,7 @@ static int __init rtas_init(void)
return -ENOMEM;
}
- entry = proc_create("ppc64/rtas/error_log", S_IRUSR, NULL,
+ entry = proc_create("powerpc/rtas/error_log", S_IRUSR, NULL,
&proc_rtas_log_operations);
if (!entry)
printk(KERN_ERR "Failed to create error_log proc entry\n");
@@ -492,11 +508,16 @@ static int __init rtas_init(void)
return 0;
}
+__initcall(rtas_init);
static int __init surveillance_setup(char *str)
{
int i;
+ /* We only do surveillance on pseries */
+ if (!machine_is(pseries))
+ return 0;
+
if (get_option(&str,&i)) {
if (i >= 0 && i <= 255)
surveillance_timeout = i;
@@ -504,6 +525,7 @@ static int __init surveillance_setup(char *str)
return 1;
}
+__setup("surveillance=", surveillance_setup);
static int __init rtasmsgs_setup(char *str)
{
@@ -514,6 +536,4 @@ static int __init rtasmsgs_setup(char *str)
return 1;
}
-__initcall(rtas_init);
-__setup("surveillance=", surveillance_setup);
__setup("rtasmsgs=", rtasmsgs_setup);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 845c72ab735..03dd6a24819 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -157,7 +157,7 @@ extern u32 cpu_temp_both(unsigned long cpu);
#endif /* CONFIG_TAU */
#ifdef CONFIG_SMP
-DEFINE_PER_CPU(unsigned int, pvr);
+DEFINE_PER_CPU(unsigned int, cpu_pvr);
#endif
static int show_cpuinfo(struct seq_file *m, void *v)
@@ -209,7 +209,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
#ifdef CONFIG_SMP
- pvr = per_cpu(pvr, cpu_id);
+ pvr = per_cpu(cpu_pvr, cpu_id);
#else
pvr = mfspr(SPRN_PVR);
#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index df2c9e932b3..6568406b2a3 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -356,11 +356,6 @@ void __init setup_system(void)
*/
initialize_cache_info();
- /*
- * Initialize irq remapping subsystem
- */
- irq_early_init();
-
#ifdef CONFIG_PPC_RTAS
/*
* Initialize RTAS if available
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9b86a74d281..a521fb8a40e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -218,6 +218,9 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
static void stop_this_cpu(void *dummy)
{
+ /* Remove this CPU */
+ set_cpu_online(smp_processor_id(), false);
+
local_irq_disable();
while (1)
;
@@ -232,7 +235,7 @@ struct thread_info *current_set[NR_CPUS];
static void __devinit smp_store_cpu_info(int id)
{
- per_cpu(pvr, id) = mfspr(SPRN_PVR);
+ per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
}
static void __init smp_create_idle(unsigned int cpu)
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index c04832c4a02..3370e62e43d 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -140,7 +140,6 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long off, int shift)
{
- struct file * file = NULL;
unsigned long ret = -EINVAL;
if (!arch_validate_prot(prot))
@@ -151,20 +150,8 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
goto out;
off >>= shift;
}
-
- ret = -EBADF;
- if (!(flags & MAP_ANONYMOUS)) {
- if (!(file = fget(fd)))
- goto out;
- }
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- down_write(&current->mm->mmap_sem);
- ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
+ ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off);
out:
return ret;
}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 956ab33fd73..e235e52dc4f 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -461,6 +461,25 @@ static void unregister_cpu_online(unsigned int cpu)
cacheinfo_cpu_offline(cpu);
}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ssize_t arch_cpu_probe(const char *buf, size_t count)
+{
+ if (ppc_md.cpu_probe)
+ return ppc_md.cpu_probe(buf, count);
+
+ return -EINVAL;
+}
+
+ssize_t arch_cpu_release(const char *buf, size_t count)
+{
+ if (ppc_md.cpu_release)
+ return ppc_md.cpu_release(buf, count);
+
+ return -EINVAL;
+}
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
#endif /* CONFIG_HOTPLUG_CPU */
static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 674800b242d..9ba2cc88591 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -269,6 +269,7 @@ void account_system_vtime(struct task_struct *tsk)
per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
local_irq_restore(flags);
}
+EXPORT_SYMBOL_GPL(account_system_vtime);
/*
* Transfer the user and system times accumulated in the paca
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9d1f9354d6c..d069ff8a7e0 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -174,6 +174,15 @@ int die(const char *str, struct pt_regs *regs, long err)
return 0;
}
+void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = TRAP_TRACE;
+ info->si_addr = (void __user *)regs->nip;
+}
+
void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
{
siginfo_t info;
@@ -198,28 +207,6 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
info.si_code = code;
info.si_addr = (void __user *) addr;
force_sig_info(signr, &info, current);
-
- /*
- * Init gets no signals that it doesn't have a handler for.
- * That's all very well, but if it has caused a synchronous
- * exception and we ignore the resulting signal, it will just
- * generate the same exception over and over again and we get
- * nowhere. Better to kill it and let the kernel panic.
- */
- if (is_global_init(current)) {
- __sighandler_t handler;
-
- spin_lock_irq(&current->sighand->siglock);
- handler = current->sighand->action[signr-1].sa.sa_handler;
- spin_unlock_irq(&current->sighand->siglock);
- if (handler == SIG_DFL) {
- /* init has generated a synchronous exception
- and it doesn't have a handler for the signal */
- printk(KERN_CRIT "init has generated signal %d "
- "but has no handler for it\n", signr);
- do_exit(signr);
- }
- }
}
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index fc9af47e212..e39cad83c88 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -60,6 +60,8 @@ void __init udbg_early_init(void)
udbg_init_40x_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_CPM)
udbg_init_cpm();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
+ udbg_init_usbgecko();
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 67b6916f0e9..fe460482fa6 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -58,7 +58,7 @@ _GLOBAL(load_up_altivec)
* all 1's
*/
mfspr r4,SPRN_VRSAVE
- cmpdi 0,r4,0
+ cmpwi 0,r4,0
bne+ 1f
li r4,-1
mtspr SPRN_VRSAVE,r4
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 27735a7ac12..dcd01c82e70 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,6 +38,9 @@ jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
+ . = 0;
+ reloc_start = .;
+
. = KERNELBASE;
/*
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index c2992684661..07703f72330 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -21,6 +21,23 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
+config KVM_BOOK3S_64_HANDLER
+ bool
+
+config KVM_BOOK3S_64
+ tristate "KVM support for PowerPC book3s_64 processors"
+ depends on EXPERIMENTAL && PPC64
+ select KVM
+ select KVM_BOOK3S_64_HANDLER
+ ---help---
+ Support running unmodified book3s_64 and book3s_32 guest kernels
+ in virtual machines on book3s_64 host processors.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
+
config KVM_440
bool "KVM support for PowerPC 440 processors"
depends on EXPERIMENTAL && 44x
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 37655fe19f2..56484d65237 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -12,26 +12,45 @@ CFLAGS_44x_tlb.o := -I.
CFLAGS_e500_tlb.o := -I.
CFLAGS_emulate.o := -I.
-kvm-objs := $(common-objs-y) powerpc.o emulate.o
+common-objs-y += powerpc.o emulate.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
-obj-$(CONFIG_KVM) += kvm.o
+obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o
AFLAGS_booke_interrupts.o := -I$(obj)
kvm-440-objs := \
+ $(common-objs-y) \
booke.o \
booke_emulate.o \
booke_interrupts.o \
44x.o \
44x_tlb.o \
44x_emulate.o
-obj-$(CONFIG_KVM_440) += kvm-440.o
+kvm-objs-$(CONFIG_KVM_440) := $(kvm-440-objs)
kvm-e500-objs := \
+ $(common-objs-y) \
booke.o \
booke_emulate.o \
booke_interrupts.o \
e500.o \
e500_tlb.o \
e500_emulate.o
-obj-$(CONFIG_KVM_E500) += kvm-e500.o
+kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
+
+kvm-book3s_64-objs := \
+ $(common-objs-y) \
+ book3s.o \
+ book3s_64_emulate.o \
+ book3s_64_interrupts.o \
+ book3s_64_mmu_host.o \
+ book3s_64_mmu.o \
+ book3s_32_mmu.o
+kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs)
+
+kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
+
+obj-$(CONFIG_KVM_440) += kvm.o
+obj-$(CONFIG_KVM_E500) += kvm.o
+obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
+
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
new file mode 100644
index 00000000000..3e294bd9b8c
--- /dev/null
+++ b/arch/powerpc/kvm/book3s.c
@@ -0,0 +1,974 @@
+/*
+ * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ * Kevin Wolf <mail@kevin-wolf.de>
+ *
+ * Description:
+ * This file is derived from arch/powerpc/kvm/44x.c,
+ * by Hollis Blanchard <hollisb@us.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu_context.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+/* #define EXIT_DEBUG */
+/* #define EXIT_DEBUG_SIMPLE */
+
+/* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0.
+ * When set, we retrigger a DEC interrupt after that if DEC <= 0.
+ * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */
+
+/* #define AGGRESSIVE_DEC */
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "exits", VCPU_STAT(sum_exits) },
+ { "mmio", VCPU_STAT(mmio_exits) },
+ { "sig", VCPU_STAT(signal_exits) },
+ { "sysc", VCPU_STAT(syscall_exits) },
+ { "inst_emu", VCPU_STAT(emulated_inst_exits) },
+ { "dec", VCPU_STAT(dec_exits) },
+ { "ext_intr", VCPU_STAT(ext_intr_exits) },
+ { "queue_intr", VCPU_STAT(queue_intr) },
+ { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+ { "pf_storage", VCPU_STAT(pf_storage) },
+ { "sp_storage", VCPU_STAT(sp_storage) },
+ { "pf_instruc", VCPU_STAT(pf_instruc) },
+ { "sp_instruc", VCPU_STAT(sp_instruc) },
+ { "ld", VCPU_STAT(ld) },
+ { "ld_slow", VCPU_STAT(ld_slow) },
+ { "st", VCPU_STAT(st) },
+ { "st_slow", VCPU_STAT(st_slow) },
+ { NULL }
+};
+
+void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
+ get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
+ to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
+}
+
+#if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG)
+static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
+{
+ u64 jd = mftb() - vcpu->arch.dec_jiffies;
+ return vcpu->arch.dec - jd;
+}
+#endif
+
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
+{
+ ulong old_msr = vcpu->arch.msr;
+
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
+#endif
+ msr &= to_book3s(vcpu)->msr_mask;
+ vcpu->arch.msr = msr;
+ vcpu->arch.shadow_msr = msr | MSR_USER32;
+ vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 |
+ MSR_USER64 | MSR_SE | MSR_BE | MSR_DE |
+ MSR_FE1);
+
+ if (msr & (MSR_WE|MSR_POW)) {
+ if (!vcpu->arch.pending_exceptions) {
+ kvm_vcpu_block(vcpu);
+ vcpu->stat.halt_wakeup++;
+ }
+ }
+
+ if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
+ (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
+ kvmppc_mmu_flush_segments(vcpu);
+ kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
+ }
+}
+
+void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
+{
+ vcpu->arch.srr0 = vcpu->arch.pc;
+ vcpu->arch.srr1 = vcpu->arch.msr | flags;
+ vcpu->arch.pc = to_book3s(vcpu)->hior + vec;
+ vcpu->arch.mmu.reset_msr(vcpu);
+}
+
+void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
+{
+ unsigned int prio;
+
+ vcpu->stat.queue_intr++;
+ switch (vec) {
+ case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
+ case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
+ case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
+ case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
+ case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
+ case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
+ case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
+ case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
+ case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
+ case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
+ case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
+ case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
+ case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
+ case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
+ case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
+ default: prio = BOOK3S_IRQPRIO_MAX; break;
+ }
+
+ set_bit(prio, &vcpu->arch.pending_exceptions);
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "Queueing interrupt %x\n", vec);
+#endif
+}
+
+
+void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
+{
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
+}
+
+void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
+{
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
+}
+
+int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
+{
+ return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
+}
+
+void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+{
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
+}
+
+int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
+{
+ int deliver = 1;
+ int vec = 0;
+
+ switch (priority) {
+ case BOOK3S_IRQPRIO_DECREMENTER:
+ deliver = vcpu->arch.msr & MSR_EE;
+ vec = BOOK3S_INTERRUPT_DECREMENTER;
+ break;
+ case BOOK3S_IRQPRIO_EXTERNAL:
+ deliver = vcpu->arch.msr & MSR_EE;
+ vec = BOOK3S_INTERRUPT_EXTERNAL;
+ break;
+ case BOOK3S_IRQPRIO_SYSTEM_RESET:
+ vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
+ break;
+ case BOOK3S_IRQPRIO_MACHINE_CHECK:
+ vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
+ break;
+ case BOOK3S_IRQPRIO_DATA_STORAGE:
+ vec = BOOK3S_INTERRUPT_DATA_STORAGE;
+ break;
+ case BOOK3S_IRQPRIO_INST_STORAGE:
+ vec = BOOK3S_INTERRUPT_INST_STORAGE;
+ break;
+ case BOOK3S_IRQPRIO_DATA_SEGMENT:
+ vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
+ break;
+ case BOOK3S_IRQPRIO_INST_SEGMENT:
+ vec = BOOK3S_INTERRUPT_INST_SEGMENT;
+ break;
+ case BOOK3S_IRQPRIO_ALIGNMENT:
+ vec = BOOK3S_INTERRUPT_ALIGNMENT;
+ break;
+ case BOOK3S_IRQPRIO_PROGRAM:
+ vec = BOOK3S_INTERRUPT_PROGRAM;
+ break;
+ case BOOK3S_IRQPRIO_VSX:
+ vec = BOOK3S_INTERRUPT_VSX;
+ break;
+ case BOOK3S_IRQPRIO_ALTIVEC:
+ vec = BOOK3S_INTERRUPT_ALTIVEC;
+ break;
+ case BOOK3S_IRQPRIO_FP_UNAVAIL:
+ vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
+ break;
+ case BOOK3S_IRQPRIO_SYSCALL:
+ vec = BOOK3S_INTERRUPT_SYSCALL;
+ break;
+ case BOOK3S_IRQPRIO_DEBUG:
+ vec = BOOK3S_INTERRUPT_TRACE;
+ break;
+ case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
+ vec = BOOK3S_INTERRUPT_PERFMON;
+ break;
+ default:
+ deliver = 0;
+ printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
+ break;
+ }
+
+#if 0
+ printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
+#endif
+
+ if (deliver)
+ kvmppc_inject_interrupt(vcpu, vec, 0ULL);
+
+ return deliver;
+}
+
+void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
+{
+ unsigned long *pending = &vcpu->arch.pending_exceptions;
+ unsigned int priority;
+
+ /* XXX be more clever here - no need to mftb() on every entry */
+ /* Issue DEC again if it's still active */
+#ifdef AGGRESSIVE_DEC
+ if (vcpu->arch.msr & MSR_EE)
+ if (kvmppc_get_dec(vcpu) & 0x80000000)
+ kvmppc_core_queue_dec(vcpu);
+#endif
+
+#ifdef EXIT_DEBUG
+ if (vcpu->arch.pending_exceptions)
+ printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
+#endif
+ priority = __ffs(*pending);
+ while (priority <= (sizeof(unsigned int) * 8)) {
+ if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) {
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+ break;
+ }
+
+ priority = find_next_bit(pending,
+ BITS_PER_BYTE * sizeof(*pending),
+ priority + 1);
+ }
+}
+
+void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
+{
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
+ vcpu->arch.pvr = pvr;
+ if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
+ kvmppc_mmu_book3s_64_init(vcpu);
+ to_book3s(vcpu)->hior = 0xfff00000;
+ to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
+ } else {
+ kvmppc_mmu_book3s_32_init(vcpu);
+ to_book3s(vcpu)->hior = 0;
+ to_book3s(vcpu)->msr_mask = 0xffffffffULL;
+ }
+
+ /* If we are in hypervisor level on 970, we can tell the CPU to
+ * treat DCBZ as 32 bytes store */
+ vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
+ !strcmp(cur_cpu_spec->platform, "ppc970"))
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+
+}
+
+/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
+ * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
+ * emulate 32 bytes dcbz length.
+ *
+ * The Book3s_64 inventors also realized this case and implemented a special bit
+ * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
+ *
+ * My approach here is to patch the dcbz instruction on executing pages.
+ */
+static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
+{
+ bool touched = false;
+ hva_t hpage;
+ u32 *page;
+ int i;
+
+ hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
+ if (kvm_is_error_hva(hpage))
+ return;
+
+ hpage |= pte->raddr & ~PAGE_MASK;
+ hpage &= ~0xFFFULL;
+
+ page = vmalloc(HW_PAGE_SIZE);
+
+ if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE))
+ goto out;
+
+ for (i=0; i < HW_PAGE_SIZE / 4; i++)
+ if ((page[i] & 0xff0007ff) == INS_DCBZ) {
+ page[i] &= 0xfffffff7; // reserved instruction, so we trap
+ touched = true;
+ }
+
+ if (touched)
+ copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE);
+
+out:
+ vfree(page);
+}
+
+static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
+ struct kvmppc_pte *pte)
+{
+ int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR));
+ int r;
+
+ if (relocated) {
+ r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
+ } else {
+ pte->eaddr = eaddr;
+ pte->raddr = eaddr & 0xffffffff;
+ pte->vpage = eaddr >> 12;
+ switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ pte->vpage |= VSID_REAL;
+ case MSR_DR:
+ pte->vpage |= VSID_REAL_DR;
+ case MSR_IR:
+ pte->vpage |= VSID_REAL_IR;
+ }
+ pte->may_read = true;
+ pte->may_write = true;
+ pte->may_execute = true;
+ r = 0;
+ }
+
+ return r;
+}
+
+static hva_t kvmppc_bad_hva(void)
+{
+ return PAGE_OFFSET;
+}
+
+static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
+ bool read)
+{
+ hva_t hpage;
+
+ if (read && !pte->may_read)
+ goto err;
+
+ if (!read && !pte->may_write)
+ goto err;
+
+ hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
+ if (kvm_is_error_hva(hpage))
+ goto err;
+
+ return hpage | (pte->raddr & ~PAGE_MASK);
+err:
+ return kvmppc_bad_hva();
+}
+
+int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr)
+{
+ struct kvmppc_pte pte;
+ hva_t hva = eaddr;
+
+ vcpu->stat.st++;
+
+ if (kvmppc_xlate(vcpu, eaddr, false, &pte))
+ goto err;
+
+ hva = kvmppc_pte_to_hva(vcpu, &pte, false);
+ if (kvm_is_error_hva(hva))
+ goto err;
+
+ if (copy_to_user((void __user *)hva, ptr, size)) {
+ printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -ENOENT;
+}
+
+int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr,
+ bool data)
+{
+ struct kvmppc_pte pte;
+ hva_t hva = eaddr;
+
+ vcpu->stat.ld++;
+
+ if (kvmppc_xlate(vcpu, eaddr, data, &pte))
+ goto err;
+
+ hva = kvmppc_pte_to_hva(vcpu, &pte, true);
+ if (kvm_is_error_hva(hva))
+ goto err;
+
+ if (copy_from_user(ptr, (void __user *)hva, size)) {
+ printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return -ENOENT;
+}
+
+static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+ return kvm_is_visible_gfn(vcpu->kvm, gfn);
+}
+
+int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ ulong eaddr, int vec)
+{
+ bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
+ int r = RESUME_GUEST;
+ int relocated;
+ int page_found = 0;
+ struct kvmppc_pte pte;
+ bool is_mmio = false;
+
+ if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) {
+ relocated = (vcpu->arch.msr & MSR_DR);
+ } else {
+ relocated = (vcpu->arch.msr & MSR_IR);
+ }
+
+ /* Resolve real address if translation turned on */
+ if (relocated) {
+ page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
+ } else {
+ pte.may_execute = true;
+ pte.may_read = true;
+ pte.may_write = true;
+ pte.raddr = eaddr & 0xffffffff;
+ pte.eaddr = eaddr;
+ pte.vpage = eaddr >> 12;
+ switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ pte.vpage |= VSID_REAL;
+ case MSR_DR:
+ pte.vpage |= VSID_REAL_DR;
+ case MSR_IR:
+ pte.vpage |= VSID_REAL_IR;
+ }
+ }
+
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
+ /*
+ * If we do the dcbz hack, we have to NX on every execution,
+ * so we can patch the executing code. This renders our guest
+ * NX-less.
+ */
+ pte.may_execute = !data;
+ }
+
+ if (page_found == -ENOENT) {
+ /* Page not found in guest PTE entries */
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
+ vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ } else if (page_found == -EPERM) {
+ /* Storage protection */
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
+ to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
+ vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
+ kvmppc_book3s_queue_irqprio(vcpu, vec);
+ } else if (page_found == -EINVAL) {
+ /* Page not found in guest SLB */
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
+ } else if (!is_mmio &&
+ kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
+ /* The guest's PTE is not mapped yet. Map on the host */
+ kvmppc_mmu_map_page(vcpu, &pte);
+ if (data)
+ vcpu->stat.sp_storage++;
+ else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
+ kvmppc_patch_dcbz(vcpu, &pte);
+ } else {
+ /* MMIO */
+ vcpu->stat.mmio_exits++;
+ vcpu->arch.paddr_accessed = pte.raddr;
+ r = kvmppc_emulate_mmio(run, vcpu);
+ if ( r == RESUME_HOST_NV )
+ r = RESUME_HOST;
+ if ( r == RESUME_GUEST_NV )
+ r = RESUME_GUEST;
+ }
+
+ return r;
+}
+
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ int r = RESUME_HOST;
+
+ vcpu->stat.sum_exits++;
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+#ifdef EXIT_DEBUG
+ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
+ exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
+ kvmppc_get_dec(vcpu), vcpu->arch.msr);
+#elif defined (EXIT_DEBUG_SIMPLE)
+ if ((exit_nr != 0x900) && (exit_nr != 0x500))
+ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
+ exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
+ vcpu->arch.msr);
+#endif
+ kvm_resched(vcpu);
+ switch (exit_nr) {
+ case BOOK3S_INTERRUPT_INST_STORAGE:
+ vcpu->stat.pf_instruc++;
+ /* only care about PTEG not found errors, but leave NX alone */
+ if (vcpu->arch.shadow_msr & 0x40000000) {
+ r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
+ vcpu->stat.sp_instruc++;
+ } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
+ /*
+ * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
+ * so we can't use the NX bit inside the guest. Let's cross our fingers,
+ * that no guest that needs the dcbz hack does NX.
+ */
+ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
+ } else {
+ vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000);
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
+ r = RESUME_GUEST;
+ }
+ break;
+ case BOOK3S_INTERRUPT_DATA_STORAGE:
+ vcpu->stat.pf_storage++;
+ /* The only case we need to handle is missing shadow PTEs */
+ if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) {
+ r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr);
+ } else {
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
+ r = RESUME_GUEST;
+ }
+ break;
+ case BOOK3S_INTERRUPT_DATA_SEGMENT:
+ if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) {
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_DATA_SEGMENT);
+ }
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_INST_SEGMENT:
+ if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) {
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_INST_SEGMENT);
+ }
+ r = RESUME_GUEST;
+ break;
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_PROGRAM:
+ {
+ enum emulation_result er;
+
+ if (vcpu->arch.msr & MSR_PR) {
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst);
+#endif
+ if ((vcpu->arch.last_inst & 0xff0007ff) !=
+ (INS_DCBZ & 0xfffffff7)) {
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ break;
+ }
+ }
+
+ vcpu->stat.emulated_inst_exits++;
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ r = RESUME_GUEST;
+ break;
+ case EMULATE_FAIL:
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+ __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ break;
+ default:
+ BUG();
+ }
+ break;
+ }
+ case BOOK3S_INTERRUPT_SYSCALL:
+#ifdef EXIT_DEBUG
+ printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]);
+#endif
+ vcpu->stat.syscall_exits++;
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ case BOOK3S_INTERRUPT_FP_UNAVAIL:
+ case BOOK3S_INTERRUPT_TRACE:
+ case BOOK3S_INTERRUPT_ALTIVEC:
+ case BOOK3S_INTERRUPT_VSX:
+ kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ r = RESUME_GUEST;
+ break;
+ default:
+ /* Ugh - bork here! What did we get? */
+ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr);
+ r = RESUME_HOST;
+ BUG();
+ break;
+ }
+
+
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(current)) {
+#ifdef EXIT_DEBUG
+ printk(KERN_EMERG "KVM: Going back to host\n");
+#endif
+ vcpu->stat.signal_exits++;
+ run->exit_reason = KVM_EXIT_INTR;
+ r = -EINTR;
+ } else {
+ /* In case an interrupt came in that was triggered
+ * from userspace (like DEC), we need to check what
+ * to inject now! */
+ kvmppc_core_deliver_interrupts(vcpu);
+ }
+ }
+
+#ifdef EXIT_DEBUG
+ printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r);
+#endif
+
+ return r;
+}
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ regs->pc = vcpu->arch.pc;
+ regs->cr = vcpu->arch.cr;
+ regs->ctr = vcpu->arch.ctr;
+ regs->lr = vcpu->arch.lr;
+ regs->xer = vcpu->arch.xer;
+ regs->msr = vcpu->arch.msr;
+ regs->srr0 = vcpu->arch.srr0;
+ regs->srr1 = vcpu->arch.srr1;
+ regs->pid = vcpu->arch.pid;
+ regs->sprg0 = vcpu->arch.sprg0;
+ regs->sprg1 = vcpu->arch.sprg1;
+ regs->sprg2 = vcpu->arch.sprg2;
+ regs->sprg3 = vcpu->arch.sprg3;
+ regs->sprg5 = vcpu->arch.sprg4;
+ regs->sprg6 = vcpu->arch.sprg5;
+ regs->sprg7 = vcpu->arch.sprg6;
+
+ for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+ regs->gpr[i] = vcpu->arch.gpr[i];
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ vcpu->arch.pc = regs->pc;
+ vcpu->arch.cr = regs->cr;
+ vcpu->arch.ctr = regs->ctr;
+ vcpu->arch.lr = regs->lr;
+ vcpu->arch.xer = regs->xer;
+ kvmppc_set_msr(vcpu, regs->msr);
+ vcpu->arch.srr0 = regs->srr0;
+ vcpu->arch.srr1 = regs->srr1;
+ vcpu->arch.sprg0 = regs->sprg0;
+ vcpu->arch.sprg1 = regs->sprg1;
+ vcpu->arch.sprg2 = regs->sprg2;
+ vcpu->arch.sprg3 = regs->sprg3;
+ vcpu->arch.sprg5 = regs->sprg4;
+ vcpu->arch.sprg6 = regs->sprg5;
+ vcpu->arch.sprg7 = regs->sprg6;
+
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
+ vcpu->arch.gpr[i] = regs->gpr[i];
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
+ sregs->pvr = vcpu->arch.pvr;
+
+ sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
+ sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
+ sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
+ }
+ for (i = 0; i < 8; i++) {
+ sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
+ sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
+ }
+ }
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
+ int i;
+
+ kvmppc_set_pvr(vcpu, sregs->pvr);
+
+ vcpu3s->sdr1 = sregs->u.s.sdr1;
+ if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
+ for (i = 0; i < 64; i++) {
+ vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
+ sregs->u.s.ppc64.slb[i].slbe);
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
+ }
+ for (i = 0; i < 8; i++) {
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
+ (u32)sregs->u.s.ppc32.ibat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
+ (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
+ (u32)sregs->u.s.ppc32.dbat[i]);
+ kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
+ (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
+ }
+ }
+
+ /* Flush the MMU after messing with the segments */
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ return 0;
+}
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ struct kvm_memory_slot *memslot;
+ struct kvm_vcpu *vcpu;
+ ulong ga, ga_end;
+ int is_dirty = 0;
+ int r, n;
+
+ down_write(&kvm->slots_lock);
+
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
+ goto out;
+
+ /* If nothing is dirty, don't bother messing with page tables. */
+ if (is_dirty) {
+ memslot = &kvm->memslots[log->slot];
+
+ ga = memslot->base_gfn << PAGE_SHIFT;
+ ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+ kvm_for_each_vcpu(n, vcpu, kvm)
+ kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
+
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+
+ r = 0;
+out:
+ up_write(&kvm->slots_lock);
+ return r;
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s;
+ struct kvm_vcpu *vcpu;
+ int err;
+
+ vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO,
+ get_order(sizeof(struct kvmppc_vcpu_book3s)));
+ if (!vcpu_book3s) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ vcpu = &vcpu_book3s->vcpu;
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ vcpu->arch.host_retip = kvm_return_point;
+ vcpu->arch.host_msr = mfmsr();
+ /* default to book3s_64 (970fx) */
+ vcpu->arch.pvr = 0x3C0301;
+ kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+ vcpu_book3s->slb_nr = 64;
+
+ /* remember where some real-mode handlers are */
+ vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
+ vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
+ vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+
+ vcpu->arch.shadow_msr = MSR_USER64;
+
+ err = __init_new_context();
+ if (err < 0)
+ goto free_vcpu;
+ vcpu_book3s->context_id = err;
+
+ vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
+ vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS;
+ vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+
+ return vcpu;
+
+free_vcpu:
+ free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+
+ __destroy_context(vcpu_book3s->context_id);
+ kvm_vcpu_uninit(vcpu);
+ free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
+}
+
+extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ /* No need to go into the guest when all we do is going out */
+ if (signal_pending(current)) {
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+ /* XXX we get called with irq disabled - change that! */
+ local_irq_enable();
+
+ ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
+
+ local_irq_disable();
+
+ return ret;
+}
+
+static int kvmppc_book3s_init(void)
+{
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE);
+}
+
+static void kvmppc_book3s_exit(void)
+{
+ kvm_exit();
+}
+
+module_init(kvmppc_book3s_init);
+module_exit(kvmppc_book3s_exit);
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
new file mode 100644
index 00000000000..faf99f20d99
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -0,0 +1,372 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+
+/* #define DEBUG_MMU */
+/* #define DEBUG_MMU_PTE */
+/* #define DEBUG_MMU_PTE_IP 0xfff14c40 */
+
+#ifdef DEBUG_MMU
+#define dprintk(X...) printk(KERN_INFO X)
+#else
+#define dprintk(X...) do { } while(0)
+#endif
+
+#ifdef DEBUG_PTE
+#define dprintk_pte(X...) printk(KERN_INFO X)
+#else
+#define dprintk_pte(X...) do { } while(0)
+#endif
+
+#define PTEG_FLAG_ACCESSED 0x00000100
+#define PTEG_FLAG_DIRTY 0x00000080
+
+static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
+{
+#ifdef DEBUG_MMU_PTE_IP
+ return vcpu->arch.pc == DEBUG_MMU_PTE_IP;
+#else
+ return true;
+#endif
+}
+
+static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *pte, bool data);
+
+static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
+{
+ return &vcpu_book3s->sr[(eaddr >> 28) & 0xf];
+}
+
+static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
+ bool data)
+{
+ struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr);
+ struct kvmppc_pte pte;
+
+ if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
+ return pte.vpage;
+
+ return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16);
+}
+
+static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
+{
+ kvmppc_set_msr(vcpu, 0);
+}
+
+static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s,
+ struct kvmppc_sr *sre, gva_t eaddr,
+ bool primary)
+{
+ u32 page, hash, pteg, htabmask;
+ hva_t r;
+
+ page = (eaddr & 0x0FFFFFFF) >> 12;
+ htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0;
+
+ hash = ((sre->vsid ^ page) << 6);
+ if (!primary)
+ hash = ~hash;
+ hash &= htabmask;
+
+ pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash;
+
+ dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n",
+ vcpu_book3s->vcpu.arch.pc, eaddr, vcpu_book3s->sdr1, pteg,
+ sre->vsid);
+
+ r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
+ if (kvm_is_error_hva(r))
+ return r;
+ return r | (pteg & ~PAGE_MASK);
+}
+
+static u32 kvmppc_mmu_book3s_32_get_ptem(struct kvmppc_sr *sre, gva_t eaddr,
+ bool primary)
+{
+ return ((eaddr & 0x0fffffff) >> 22) | (sre->vsid << 7) |
+ (primary ? 0 : 0x40) | 0x80000000;
+}
+
+static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *pte, bool data)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_bat *bat;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (data)
+ bat = &vcpu_book3s->dbat[i];
+ else
+ bat = &vcpu_book3s->ibat[i];
+
+ if (vcpu->arch.msr & MSR_PR) {
+ if (!bat->vp)
+ continue;
+ } else {
+ if (!bat->vs)
+ continue;
+ }
+
+ if (check_debug_ip(vcpu))
+ {
+ dprintk_pte("%cBAT %02d: 0x%lx - 0x%x (0x%x)\n",
+ data ? 'd' : 'i', i, eaddr, bat->bepi,
+ bat->bepi_mask);
+ }
+ if ((eaddr & bat->bepi_mask) == bat->bepi) {
+ pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
+ pte->vpage = (eaddr >> 12) | VSID_BAT;
+ pte->may_read = bat->pp;
+ pte->may_write = bat->pp > 1;
+ pte->may_execute = true;
+ if (!pte->may_read) {
+ printk(KERN_INFO "BAT is not readable!\n");
+ continue;
+ }
+ if (!pte->may_write) {
+ /* let's treat r/o BATs as not-readable for now */
+ dprintk_pte("BAT is read-only!\n");
+ continue;
+ }
+
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *pte, bool data,
+ bool primary)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_sr *sre;
+ hva_t ptegp;
+ u32 pteg[16];
+ u64 ptem = 0;
+ int i;
+ int found = 0;
+
+ sre = find_sr(vcpu_book3s, eaddr);
+
+ dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28,
+ sre->vsid, sre->raw);
+
+ pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
+
+ ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary);
+ if (kvm_is_error_hva(ptegp)) {
+ printk(KERN_INFO "KVM: Invalid PTEG!\n");
+ goto no_page_found;
+ }
+
+ ptem = kvmppc_mmu_book3s_32_get_ptem(sre, eaddr, primary);
+
+ if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
+ printk(KERN_ERR "KVM: Can't copy data from 0x%lx!\n", ptegp);
+ goto no_page_found;
+ }
+
+ for (i=0; i<16; i+=2) {
+ if (ptem == pteg[i]) {
+ u8 pp;
+
+ pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
+ pp = pteg[i+1] & 3;
+
+ if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) ||
+ (sre->Ks && !(vcpu->arch.msr & MSR_PR)))
+ pp |= 4;
+
+ pte->may_write = false;
+ pte->may_read = false;
+ pte->may_execute = true;
+ switch (pp) {
+ case 0:
+ case 1:
+ case 2:
+ case 6:
+ pte->may_write = true;
+ case 3:
+ case 5:
+ case 7:
+ pte->may_read = true;
+ break;
+ }
+
+ if ( !pte->may_read )
+ continue;
+
+ dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
+ pteg[i], pteg[i+1], pp);
+ found = 1;
+ break;
+ }
+ }
+
+ /* Update PTE C and A bits, so the guest's swapper knows we used the
+ page */
+ if (found) {
+ u32 oldpte = pteg[i+1];
+
+ if (pte->may_read)
+ pteg[i+1] |= PTEG_FLAG_ACCESSED;
+ if (pte->may_write)
+ pteg[i+1] |= PTEG_FLAG_DIRTY;
+ else
+ dprintk_pte("KVM: Mapping read-only page!\n");
+
+ /* Write back into the PTEG */
+ if (pteg[i+1] != oldpte)
+ copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
+
+ return 0;
+ }
+
+no_page_found:
+
+ if (check_debug_ip(vcpu)) {
+ dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n",
+ to_book3s(vcpu)->sdr1, ptegp);
+ for (i=0; i<16; i+=2) {
+ dprintk_pte(" %02d: 0x%x - 0x%x (0x%llx)\n",
+ i, pteg[i], pteg[i+1], ptem);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *pte, bool data)
+{
+ int r;
+
+ pte->eaddr = eaddr;
+ r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
+ if (r < 0)
+ r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
+ if (r < 0)
+ r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false);
+
+ return r;
+}
+
+
+static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
+{
+ return to_book3s(vcpu)->sr[srnum].raw;
+}
+
+static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
+ ulong value)
+{
+ struct kvmppc_sr *sre;
+
+ sre = &to_book3s(vcpu)->sr[srnum];
+
+ /* Flush any left-over shadows from the previous SR */
+
+ /* XXX Not necessary? */
+ /* kvmppc_mmu_pte_flush(vcpu, ((u64)sre->vsid) << 28, 0xf0000000ULL); */
+
+ /* And then put in the new SR */
+ sre->raw = value;
+ sre->vsid = (value & 0x0fffffff);
+ sre->Ks = (value & 0x40000000) ? true : false;
+ sre->Kp = (value & 0x20000000) ? true : false;
+ sre->nx = (value & 0x10000000) ? true : false;
+
+ /* Map the new segment */
+ kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
+}
+
+static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
+{
+ kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL);
+}
+
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+ u64 *vsid)
+{
+ /* In case we only have one of MSR_IR or MSR_DR set, let's put
+ that in the real-mode context (and hope RM doesn't access
+ high memory) */
+ switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ *vsid = (VSID_REAL >> 16) | esid;
+ break;
+ case MSR_IR:
+ *vsid = (VSID_REAL_IR >> 16) | esid;
+ break;
+ case MSR_DR:
+ *vsid = (VSID_REAL_DR >> 16) | esid;
+ break;
+ case MSR_DR|MSR_IR:
+ {
+ ulong ea;
+ ea = esid << SID_SHIFT;
+ *vsid = find_sr(to_book3s(vcpu), ea)->vsid;
+ break;
+ }
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+static bool kvmppc_mmu_book3s_32_is_dcbz32(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
+
+void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
+
+ mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin;
+ mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin;
+ mmu->xlate = kvmppc_mmu_book3s_32_xlate;
+ mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr;
+ mmu->tlbie = kvmppc_mmu_book3s_32_tlbie;
+ mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid;
+ mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp;
+ mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32;
+
+ mmu->slbmte = NULL;
+ mmu->slbmfee = NULL;
+ mmu->slbmfev = NULL;
+ mmu->slbie = NULL;
+ mmu->slbia = NULL;
+}
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
new file mode 100644
index 00000000000..1027eac6d47
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -0,0 +1,345 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/kvm_ppc.h>
+#include <asm/disassemble.h>
+#include <asm/kvm_book3s.h>
+#include <asm/reg.h>
+
+#define OP_19_XOP_RFID 18
+#define OP_19_XOP_RFI 50
+
+#define OP_31_XOP_MFMSR 83
+#define OP_31_XOP_MTMSR 146
+#define OP_31_XOP_MTMSRD 178
+#define OP_31_XOP_MTSRIN 242
+#define OP_31_XOP_TLBIEL 274
+#define OP_31_XOP_TLBIE 306
+#define OP_31_XOP_SLBMTE 402
+#define OP_31_XOP_SLBIE 434
+#define OP_31_XOP_SLBIA 498
+#define OP_31_XOP_MFSRIN 659
+#define OP_31_XOP_SLBMFEV 851
+#define OP_31_XOP_EIOIO 854
+#define OP_31_XOP_SLBMFEE 915
+
+/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
+#define OP_31_XOP_DCBZ 1010
+
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
+{
+ int emulated = EMULATE_DONE;
+
+ switch (get_op(inst)) {
+ case 19:
+ switch (get_xop(inst)) {
+ case OP_19_XOP_RFID:
+ case OP_19_XOP_RFI:
+ vcpu->arch.pc = vcpu->arch.srr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+ *advance = 0;
+ break;
+
+ default:
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+ case 31:
+ switch (get_xop(inst)) {
+ case OP_31_XOP_MFMSR:
+ vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr;
+ break;
+ case OP_31_XOP_MTMSRD:
+ {
+ ulong rs = vcpu->arch.gpr[get_rs(inst)];
+ if (inst & 0x10000) {
+ vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
+ vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
+ } else
+ kvmppc_set_msr(vcpu, rs);
+ break;
+ }
+ case OP_31_XOP_MTMSR:
+ kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]);
+ break;
+ case OP_31_XOP_MFSRIN:
+ {
+ int srnum;
+
+ srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf;
+ if (vcpu->arch.mmu.mfsrin) {
+ u32 sr;
+ sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
+ vcpu->arch.gpr[get_rt(inst)] = sr;
+ }
+ break;
+ }
+ case OP_31_XOP_MTSRIN:
+ vcpu->arch.mmu.mtsrin(vcpu,
+ (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf,
+ vcpu->arch.gpr[get_rs(inst)]);
+ break;
+ case OP_31_XOP_TLBIE:
+ case OP_31_XOP_TLBIEL:
+ {
+ bool large = (inst & 0x00200000) ? true : false;
+ ulong addr = vcpu->arch.gpr[get_rb(inst)];
+ vcpu->arch.mmu.tlbie(vcpu, addr, large);
+ break;
+ }
+ case OP_31_XOP_EIOIO:
+ break;
+ case OP_31_XOP_SLBMTE:
+ if (!vcpu->arch.mmu.slbmte)
+ return EMULATE_FAIL;
+
+ vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)],
+ vcpu->arch.gpr[get_rb(inst)]);
+ break;
+ case OP_31_XOP_SLBIE:
+ if (!vcpu->arch.mmu.slbie)
+ return EMULATE_FAIL;
+
+ vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]);
+ break;
+ case OP_31_XOP_SLBIA:
+ if (!vcpu->arch.mmu.slbia)
+ return EMULATE_FAIL;
+
+ vcpu->arch.mmu.slbia(vcpu);
+ break;
+ case OP_31_XOP_SLBMFEE:
+ if (!vcpu->arch.mmu.slbmfee) {
+ emulated = EMULATE_FAIL;
+ } else {
+ ulong t, rb;
+
+ rb = vcpu->arch.gpr[get_rb(inst)];
+ t = vcpu->arch.mmu.slbmfee(vcpu, rb);
+ vcpu->arch.gpr[get_rt(inst)] = t;
+ }
+ break;
+ case OP_31_XOP_SLBMFEV:
+ if (!vcpu->arch.mmu.slbmfev) {
+ emulated = EMULATE_FAIL;
+ } else {
+ ulong t, rb;
+
+ rb = vcpu->arch.gpr[get_rb(inst)];
+ t = vcpu->arch.mmu.slbmfev(vcpu, rb);
+ vcpu->arch.gpr[get_rt(inst)] = t;
+ }
+ break;
+ case OP_31_XOP_DCBZ:
+ {
+ ulong rb = vcpu->arch.gpr[get_rb(inst)];
+ ulong ra = 0;
+ ulong addr;
+ u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ if (get_ra(inst))
+ ra = vcpu->arch.gpr[get_ra(inst)];
+
+ addr = (ra + rb) & ~31ULL;
+ if (!(vcpu->arch.msr & MSR_SF))
+ addr &= 0xffffffff;
+
+ if (kvmppc_st(vcpu, addr, 32, zeros)) {
+ vcpu->arch.dear = addr;
+ vcpu->arch.fault_dear = addr;
+ to_book3s(vcpu)->dsisr = DSISR_PROTFAULT |
+ DSISR_ISSTORE;
+ kvmppc_book3s_queue_irqprio(vcpu,
+ BOOK3S_INTERRUPT_DATA_STORAGE);
+ kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL);
+ }
+
+ break;
+ }
+ default:
+ emulated = EMULATE_FAIL;
+ }
+ break;
+ default:
+ emulated = EMULATE_FAIL;
+ }
+
+ return emulated;
+}
+
+void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
+ u32 val)
+{
+ if (upper) {
+ /* Upper BAT */
+ u32 bl = (val >> 2) & 0x7ff;
+ bat->bepi_mask = (~bl << 17);
+ bat->bepi = val & 0xfffe0000;
+ bat->vs = (val & 2) ? 1 : 0;
+ bat->vp = (val & 1) ? 1 : 0;
+ bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
+ } else {
+ /* Lower BAT */
+ bat->brpn = val & 0xfffe0000;
+ bat->wimg = (val >> 3) & 0xf;
+ bat->pp = val & 3;
+ bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
+ }
+}
+
+static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_bat *bat;
+
+ switch (sprn) {
+ case SPRN_IBAT0U ... SPRN_IBAT3L:
+ bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
+ break;
+ case SPRN_IBAT4U ... SPRN_IBAT7L:
+ bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2];
+ break;
+ case SPRN_DBAT0U ... SPRN_DBAT3L:
+ bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
+ break;
+ case SPRN_DBAT4U ... SPRN_DBAT7L:
+ bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2];
+ break;
+ default:
+ BUG();
+ }
+
+ kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
+}
+
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+{
+ int emulated = EMULATE_DONE;
+
+ switch (sprn) {
+ case SPRN_SDR1:
+ to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_DSISR:
+ to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_DAR:
+ vcpu->arch.dear = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_HIOR:
+ to_book3s(vcpu)->hior = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IBAT0U ... SPRN_IBAT3L:
+ case SPRN_IBAT4U ... SPRN_IBAT7L:
+ case SPRN_DBAT0U ... SPRN_DBAT3L:
+ case SPRN_DBAT4U ... SPRN_DBAT7L:
+ kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
+ /* BAT writes happen so rarely that we're ok to flush
+ * everything here */
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ break;
+ case SPRN_HID0:
+ to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_HID1:
+ to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_HID2:
+ to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_HID4:
+ to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_HID5:
+ to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs];
+ /* guest HID5 set can change is_dcbz32 */
+ if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+ (mfmsr() & MSR_HV))
+ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+ break;
+ case SPRN_ICTC:
+ case SPRN_THRM1:
+ case SPRN_THRM2:
+ case SPRN_THRM3:
+ case SPRN_CTRLF:
+ case SPRN_CTRLT:
+ break;
+ default:
+ printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
+#ifndef DEBUG_SPR
+ emulated = EMULATE_FAIL;
+#endif
+ break;
+ }
+
+ return emulated;
+}
+
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+{
+ int emulated = EMULATE_DONE;
+
+ switch (sprn) {
+ case SPRN_SDR1:
+ vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1;
+ break;
+ case SPRN_DSISR:
+ vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr;
+ break;
+ case SPRN_DAR:
+ vcpu->arch.gpr[rt] = vcpu->arch.dear;
+ break;
+ case SPRN_HIOR:
+ vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior;
+ break;
+ case SPRN_HID0:
+ vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0];
+ break;
+ case SPRN_HID1:
+ vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1];
+ break;
+ case SPRN_HID2:
+ vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2];
+ break;
+ case SPRN_HID4:
+ vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4];
+ break;
+ case SPRN_HID5:
+ vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5];
+ break;
+ case SPRN_THRM1:
+ case SPRN_THRM2:
+ case SPRN_THRM3:
+ case SPRN_CTRLF:
+ case SPRN_CTRLT:
+ vcpu->arch.gpr[rt] = 0;
+ break;
+ default:
+ printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
+#ifndef DEBUG_SPR
+ emulated = EMULATE_FAIL;
+#endif
+ break;
+ }
+
+ return emulated;
+}
+
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c
new file mode 100644
index 00000000000..5b2db38ed86
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_exports.c
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <linux/module.h>
+#include <asm/kvm_book3s.h>
+
+EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
+EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
new file mode 100644
index 00000000000..7b55d8094c8
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_interrupts.S
@@ -0,0 +1,392 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+
+#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
+#define ULONG_SIZE 8
+#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+
+.macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
+ ld \tmp_reg, (PACA_EXMC+\offset)(r13)
+ std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
+.endm
+
+.macro DISABLE_INTERRUPTS
+ mfmsr r0
+ rldicl r0,r0,48,1
+ rotldi r0,r0,16
+ mtmsrd r0,1
+.endm
+
+/*****************************************************************************
+ * *
+ * Guest entry / exit code that is in kernel module memory (highmem) *
+ * *
+ ****************************************************************************/
+
+/* Registers:
+ * r3: kvm_run pointer
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_entry)
+
+kvm_start_entry:
+ /* Write correct stack frame */
+ mflr r0
+ std r0,16(r1)
+
+ /* Save host state to the stack */
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+
+ /* Save r3 (kvm_run) and r4 (vcpu) */
+ SAVE_2GPRS(3, r1)
+
+ /* Save non-volatile registers (r14 - r31) */
+ SAVE_NVGPRS(r1)
+
+ /* Save LR */
+ mflr r14
+ std r14, _LINK(r1)
+
+/* XXX optimize non-volatile loading away */
+kvm_start_lightweight:
+
+ DISABLE_INTERRUPTS
+
+ /* Save R1/R2 in the PACA */
+ std r1, PACAR1(r13)
+ std r2, (PACA_EXMC+EX_SRR0)(r13)
+ ld r3, VCPU_HIGHMEM_HANDLER(r4)
+ std r3, PACASAVEDMSR(r13)
+
+ /* Load non-volatile guest state from the vcpu */
+ ld r14, VCPU_GPR(r14)(r4)
+ ld r15, VCPU_GPR(r15)(r4)
+ ld r16, VCPU_GPR(r16)(r4)
+ ld r17, VCPU_GPR(r17)(r4)
+ ld r18, VCPU_GPR(r18)(r4)
+ ld r19, VCPU_GPR(r19)(r4)
+ ld r20, VCPU_GPR(r20)(r4)
+ ld r21, VCPU_GPR(r21)(r4)
+ ld r22, VCPU_GPR(r22)(r4)
+ ld r23, VCPU_GPR(r23)(r4)
+ ld r24, VCPU_GPR(r24)(r4)
+ ld r25, VCPU_GPR(r25)(r4)
+ ld r26, VCPU_GPR(r26)(r4)
+ ld r27, VCPU_GPR(r27)(r4)
+ ld r28, VCPU_GPR(r28)(r4)
+ ld r29, VCPU_GPR(r29)(r4)
+ ld r30, VCPU_GPR(r30)(r4)
+ ld r31, VCPU_GPR(r31)(r4)
+
+ ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
+ ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
+
+ ld r3, VCPU_TRAMPOLINE_ENTER(r4)
+ mtsrr0 r3
+
+ LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
+ mtsrr1 r3
+
+ /* Load guest state in the respective registers */
+ lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */
+ stw r3, (PACA_EXMC + EX_CCR)(r13)
+
+ ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
+ mtctr r3 /* CTR = r3 */
+
+ ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
+ mtlr r3 /* LR = r3 */
+
+ ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */
+ std r3, (PACA_EXMC + EX_R3)(r13)
+
+ /* Some guests may need to have dcbz set to 32 byte length.
+ *
+ * Usually we ensure that by patching the guest's instructions
+ * to trap on dcbz and emulate it in the hypervisor.
+ *
+ * If we can, we should tell the CPU to use 32 byte dcbz though,
+ * because that's a lot faster.
+ */
+
+ ld r3, VCPU_HFLAGS(r4)
+ rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
+ beq no_dcbz32_on
+
+ mfspr r3,SPRN_HID5
+ ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */
+ mtspr SPRN_HID5,r3
+
+no_dcbz32_on:
+ /* Load guest GPRs */
+
+ ld r3, VCPU_GPR(r9)(r4)
+ std r3, (PACA_EXMC + EX_R9)(r13)
+ ld r3, VCPU_GPR(r10)(r4)
+ std r3, (PACA_EXMC + EX_R10)(r13)
+ ld r3, VCPU_GPR(r11)(r4)
+ std r3, (PACA_EXMC + EX_R11)(r13)
+ ld r3, VCPU_GPR(r12)(r4)
+ std r3, (PACA_EXMC + EX_R12)(r13)
+ ld r3, VCPU_GPR(r13)(r4)
+ std r3, (PACA_EXMC + EX_R13)(r13)
+
+ ld r0, VCPU_GPR(r0)(r4)
+ ld r1, VCPU_GPR(r1)(r4)
+ ld r2, VCPU_GPR(r2)(r4)
+ ld r3, VCPU_GPR(r3)(r4)
+ ld r5, VCPU_GPR(r5)(r4)
+ ld r6, VCPU_GPR(r6)(r4)
+ ld r7, VCPU_GPR(r7)(r4)
+ ld r8, VCPU_GPR(r8)(r4)
+ ld r4, VCPU_GPR(r4)(r4)
+
+ /* This sets the Magic value for the trampoline */
+
+ li r11, 1
+ stb r11, PACA_KVM_IN_GUEST(r13)
+
+ /* Jump to SLB patching handlder and into our guest */
+ RFI
+
+/*
+ * This is the handler in module memory. It gets jumped at from the
+ * lowmem trampoline code, so it's basically the guest exit code.
+ *
+ */
+
+.global kvmppc_handler_highmem
+kvmppc_handler_highmem:
+
+ /*
+ * Register usage at this point:
+ *
+ * R00 = guest R13
+ * R01 = host R1
+ * R02 = host R2
+ * R10 = guest PC
+ * R11 = guest MSR
+ * R12 = exit handler id
+ * R13 = PACA
+ * PACA.exmc.R9 = guest R1
+ * PACA.exmc.R10 = guest R10
+ * PACA.exmc.R11 = guest R11
+ * PACA.exmc.R12 = guest R12
+ * PACA.exmc.R13 = guest R2
+ * PACA.exmc.DAR = guest DAR
+ * PACA.exmc.DSISR = guest DSISR
+ * PACA.exmc.LR = guest instruction
+ * PACA.exmc.CCR = guest CR
+ * PACA.exmc.SRR0 = guest R0
+ *
+ */
+
+ std r3, (PACA_EXMC+EX_R3)(r13)
+
+ /* save the exit id in R3 */
+ mr r3, r12
+
+ /* R12 = vcpu */
+ ld r12, GPR4(r1)
+
+ /* Now save the guest state */
+
+ std r0, VCPU_GPR(r13)(r12)
+ std r4, VCPU_GPR(r4)(r12)
+ std r5, VCPU_GPR(r5)(r12)
+ std r6, VCPU_GPR(r6)(r12)
+ std r7, VCPU_GPR(r7)(r12)
+ std r8, VCPU_GPR(r8)(r12)
+ std r9, VCPU_GPR(r9)(r12)
+
+ /* get registers from PACA */
+ mfpaca r5, r0, EX_SRR0, r12
+ mfpaca r5, r3, EX_R3, r12
+ mfpaca r5, r1, EX_R9, r12
+ mfpaca r5, r10, EX_R10, r12
+ mfpaca r5, r11, EX_R11, r12
+ mfpaca r5, r12, EX_R12, r12
+ mfpaca r5, r2, EX_R13, r12
+
+ lwz r5, (PACA_EXMC+EX_LR)(r13)
+ stw r5, VCPU_LAST_INST(r12)
+
+ lwz r5, (PACA_EXMC+EX_CCR)(r13)
+ stw r5, VCPU_CR(r12)
+
+ ld r5, VCPU_HFLAGS(r12)
+ rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
+ beq no_dcbz32_off
+
+ mfspr r5,SPRN_HID5
+ rldimi r5,r5,6,56
+ mtspr SPRN_HID5,r5
+
+no_dcbz32_off:
+
+ /* XXX maybe skip on lightweight? */
+ std r14, VCPU_GPR(r14)(r12)
+ std r15, VCPU_GPR(r15)(r12)
+ std r16, VCPU_GPR(r16)(r12)
+ std r17, VCPU_GPR(r17)(r12)
+ std r18, VCPU_GPR(r18)(r12)
+ std r19, VCPU_GPR(r19)(r12)
+ std r20, VCPU_GPR(r20)(r12)
+ std r21, VCPU_GPR(r21)(r12)
+ std r22, VCPU_GPR(r22)(r12)
+ std r23, VCPU_GPR(r23)(r12)
+ std r24, VCPU_GPR(r24)(r12)
+ std r25, VCPU_GPR(r25)(r12)
+ std r26, VCPU_GPR(r26)(r12)
+ std r27, VCPU_GPR(r27)(r12)
+ std r28, VCPU_GPR(r28)(r12)
+ std r29, VCPU_GPR(r29)(r12)
+ std r30, VCPU_GPR(r30)(r12)
+ std r31, VCPU_GPR(r31)(r12)
+
+ /* Restore non-volatile host registers (r14 - r31) */
+ REST_NVGPRS(r1)
+
+ /* Save guest PC (R10) */
+ std r10, VCPU_PC(r12)
+
+ /* Save guest msr (R11) */
+ std r11, VCPU_SHADOW_MSR(r12)
+
+ /* Save guest CTR (in R12) */
+ mfctr r5
+ std r5, VCPU_CTR(r12)
+
+ /* Save guest LR */
+ mflr r5
+ std r5, VCPU_LR(r12)
+
+ /* Save guest XER */
+ mfxer r5
+ std r5, VCPU_XER(r12)
+
+ /* Save guest DAR */
+ ld r5, (PACA_EXMC+EX_DAR)(r13)
+ std r5, VCPU_FAULT_DEAR(r12)
+
+ /* Save guest DSISR */
+ lwz r5, (PACA_EXMC+EX_DSISR)(r13)
+ std r5, VCPU_FAULT_DSISR(r12)
+
+ /* Restore host msr -> SRR1 */
+ ld r7, VCPU_HOST_MSR(r12)
+ mtsrr1 r7
+
+ /* Restore host IP -> SRR0 */
+ ld r6, VCPU_HOST_RETIP(r12)
+ mtsrr0 r6
+
+ /*
+ * For some interrupts, we need to call the real Linux
+ * handler, so it can do work for us. This has to happen
+ * as if the interrupt arrived from the kernel though,
+ * so let's fake it here where most state is restored.
+ *
+ * Call Linux for hardware interrupts/decrementer
+ * r3 = address of interrupt handler (exit reason)
+ */
+
+ cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL
+ beq call_linux_handler
+ cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER
+ beq call_linux_handler
+
+ /* Back to Interruptable Mode! (goto kvm_return_point) */
+ RFI
+
+call_linux_handler:
+
+ /*
+ * If we land here we need to jump back to the handler we
+ * came from.
+ *
+ * We have a page that we can access from real mode, so let's
+ * jump back to that and use it as a trampoline to get back into the
+ * interrupt handler!
+ *
+ * R3 still contains the exit code,
+ * R6 VCPU_HOST_RETIP and
+ * R7 VCPU_HOST_MSR
+ */
+
+ mtlr r3
+
+ ld r5, VCPU_TRAMPOLINE_LOWMEM(r12)
+ mtsrr0 r5
+ LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR))
+ mtsrr1 r5
+
+ RFI
+
+.global kvm_return_point
+kvm_return_point:
+
+ /* Jump back to lightweight entry if we're supposed to */
+ /* go back into the guest */
+ mr r5, r3
+ /* Restore r3 (kvm_run) and r4 (vcpu) */
+ REST_2GPRS(3, r1)
+ bl KVMPPC_HANDLE_EXIT
+
+#if 0 /* XXX get lightweight exits back */
+ cmpwi r3, RESUME_GUEST
+ bne kvm_exit_heavyweight
+
+ /* put VCPU and KVM_RUN back into place and roll again! */
+ REST_2GPRS(3, r1)
+ b kvm_start_lightweight
+
+kvm_exit_heavyweight:
+ /* Restore non-volatile host registers */
+ ld r14, _LINK(r1)
+ mtlr r14
+ REST_NVGPRS(r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+#else
+ ld r4, _LINK(r1)
+ mtlr r4
+
+ cmpwi r3, RESUME_GUEST
+ bne kvm_exit_heavyweight
+
+ REST_2GPRS(3, r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+
+ b kvm_start_entry
+
+kvm_exit_heavyweight:
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+#endif
+
+ blr
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
new file mode 100644
index 00000000000..5598f88f142
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -0,0 +1,478 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+
+/* #define DEBUG_MMU */
+
+#ifdef DEBUG_MMU
+#define dprintk(X...) printk(KERN_INFO X)
+#else
+#define dprintk(X...) do { } while(0)
+#endif
+
+static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
+{
+ kvmppc_set_msr(vcpu, MSR_SF);
+}
+
+static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
+ struct kvmppc_vcpu_book3s *vcpu_book3s,
+ gva_t eaddr)
+{
+ int i;
+ u64 esid = GET_ESID(eaddr);
+ u64 esid_1t = GET_ESID_1T(eaddr);
+
+ for (i = 0; i < vcpu_book3s->slb_nr; i++) {
+ u64 cmp_esid = esid;
+
+ if (!vcpu_book3s->slb[i].valid)
+ continue;
+
+ if (vcpu_book3s->slb[i].large)
+ cmp_esid = esid_1t;
+
+ if (vcpu_book3s->slb[i].esid == cmp_esid)
+ return &vcpu_book3s->slb[i];
+ }
+
+ dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
+ eaddr, esid, esid_1t);
+ for (i = 0; i < vcpu_book3s->slb_nr; i++) {
+ if (vcpu_book3s->slb[i].vsid)
+ dprintk(" %d: %c%c %llx %llx\n", i,
+ vcpu_book3s->slb[i].valid ? 'v' : ' ',
+ vcpu_book3s->slb[i].large ? 'l' : ' ',
+ vcpu_book3s->slb[i].esid,
+ vcpu_book3s->slb[i].vsid);
+ }
+
+ return NULL;
+}
+
+static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
+ bool data)
+{
+ struct kvmppc_slb *slb;
+
+ slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr);
+ if (!slb)
+ return 0;
+
+ if (slb->large)
+ return (((u64)eaddr >> 12) & 0xfffffff) |
+ (((u64)slb->vsid) << 28);
+
+ return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16);
+}
+
+static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
+{
+ return slbe->large ? 24 : 12;
+}
+
+static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
+{
+ int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
+ return ((eaddr & 0xfffffff) >> p);
+}
+
+static hva_t kvmppc_mmu_book3s_64_get_pteg(
+ struct kvmppc_vcpu_book3s *vcpu_book3s,
+ struct kvmppc_slb *slbe, gva_t eaddr,
+ bool second)
+{
+ u64 hash, pteg, htabsize;
+ u32 page;
+ hva_t r;
+
+ page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
+ htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
+
+ hash = slbe->vsid ^ page;
+ if (second)
+ hash = ~hash;
+ hash &= ((1ULL << 39ULL) - 1ULL);
+ hash &= htabsize;
+ hash <<= 7ULL;
+
+ pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
+ pteg |= hash;
+
+ dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
+ page, vcpu_book3s->sdr1, pteg, slbe->vsid);
+
+ r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
+ if (kvm_is_error_hva(r))
+ return r;
+ return r | (pteg & ~PAGE_MASK);
+}
+
+static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
+{
+ int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
+ u64 avpn;
+
+ avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
+ avpn |= slbe->vsid << (28 - p);
+
+ if (p < 24)
+ avpn >>= ((80 - p) - 56) - 8;
+ else
+ avpn <<= 8;
+
+ return avpn;
+}
+
+static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, bool data)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_slb *slbe;
+ hva_t ptegp;
+ u64 pteg[16];
+ u64 avpn = 0;
+ int i;
+ u8 key = 0;
+ bool found = false;
+ bool perm_err = false;
+ int second = 0;
+
+ slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
+ if (!slbe)
+ goto no_seg_found;
+
+do_second:
+ ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
+ if (kvm_is_error_hva(ptegp))
+ goto no_page_found;
+
+ avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
+
+ if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
+ printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
+ goto no_page_found;
+ }
+
+ if ((vcpu->arch.msr & MSR_PR) && slbe->Kp)
+ key = 4;
+ else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks)
+ key = 4;
+
+ for (i=0; i<16; i+=2) {
+ u64 v = pteg[i];
+ u64 r = pteg[i+1];
+
+ /* Valid check */
+ if (!(v & HPTE_V_VALID))
+ continue;
+ /* Hash check */
+ if ((v & HPTE_V_SECONDARY) != second)
+ continue;
+
+ /* AVPN compare */
+ if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) {
+ u8 pp = (r & HPTE_R_PP) | key;
+ int eaddr_mask = 0xFFF;
+
+ gpte->eaddr = eaddr;
+ gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
+ eaddr,
+ data);
+ if (slbe->large)
+ eaddr_mask = 0xFFFFFF;
+ gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
+ gpte->may_execute = ((r & HPTE_R_N) ? false : true);
+ gpte->may_read = false;
+ gpte->may_write = false;
+
+ switch (pp) {
+ case 0:
+ case 1:
+ case 2:
+ case 6:
+ gpte->may_write = true;
+ /* fall through */
+ case 3:
+ case 5:
+ case 7:
+ gpte->may_read = true;
+ break;
+ }
+
+ if (!gpte->may_read) {
+ perm_err = true;
+ continue;
+ }
+
+ dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
+ "-> 0x%llx\n",
+ eaddr, avpn, gpte->vpage, gpte->raddr);
+ found = true;
+ break;
+ }
+ }
+
+ /* Update PTE R and C bits, so the guest's swapper knows we used the
+ * page */
+ if (found) {
+ u32 oldr = pteg[i+1];
+
+ if (gpte->may_read) {
+ /* Set the accessed flag */
+ pteg[i+1] |= HPTE_R_R;
+ }
+ if (gpte->may_write) {
+ /* Set the dirty flag */
+ pteg[i+1] |= HPTE_R_C;
+ } else {
+ dprintk("KVM: Mapping read-only page!\n");
+ }
+
+ /* Write back into the PTEG */
+ if (pteg[i+1] != oldr)
+ copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
+
+ return 0;
+ } else {
+ dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
+ "ptegp=0x%lx)\n",
+ eaddr, to_book3s(vcpu)->sdr1, ptegp);
+ for (i = 0; i < 16; i += 2)
+ dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n",
+ i, pteg[i], pteg[i+1], avpn);
+
+ if (!second) {
+ second = HPTE_V_SECONDARY;
+ goto do_second;
+ }
+ }
+
+
+no_page_found:
+
+
+ if (perm_err)
+ return -EPERM;
+
+ return -ENOENT;
+
+no_seg_found:
+
+ dprintk("KVM MMU: Trigger segment fault\n");
+ return -EINVAL;
+}
+
+static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s;
+ u64 esid, esid_1t;
+ int slb_nr;
+ struct kvmppc_slb *slbe;
+
+ dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
+
+ vcpu_book3s = to_book3s(vcpu);
+
+ esid = GET_ESID(rb);
+ esid_1t = GET_ESID_1T(rb);
+ slb_nr = rb & 0xfff;
+
+ if (slb_nr > vcpu_book3s->slb_nr)
+ return;
+
+ slbe = &vcpu_book3s->slb[slb_nr];
+
+ slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
+ slbe->esid = slbe->large ? esid_1t : esid;
+ slbe->vsid = rs >> 12;
+ slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
+ slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
+ slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
+ slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
+ slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
+
+ slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
+ slbe->origv = rs;
+
+ /* Map the new segment */
+ kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
+}
+
+static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_slb *slbe;
+
+ if (slb_nr > vcpu_book3s->slb_nr)
+ return 0;
+
+ slbe = &vcpu_book3s->slb[slb_nr];
+
+ return slbe->orige;
+}
+
+static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_slb *slbe;
+
+ if (slb_nr > vcpu_book3s->slb_nr)
+ return 0;
+
+ slbe = &vcpu_book3s->slb[slb_nr];
+
+ return slbe->origv;
+}
+
+static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ struct kvmppc_slb *slbe;
+
+ dprintk("KVM MMU: slbie(0x%llx)\n", ea);
+
+ slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea);
+
+ if (!slbe)
+ return;
+
+ dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
+
+ slbe->valid = false;
+
+ kvmppc_mmu_map_segment(vcpu, ea);
+}
+
+static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ int i;
+
+ dprintk("KVM MMU: slbia()\n");
+
+ for (i = 1; i < vcpu_book3s->slb_nr; i++)
+ vcpu_book3s->slb[i].valid = false;
+
+ if (vcpu->arch.msr & MSR_IR) {
+ kvmppc_mmu_flush_segments(vcpu);
+ kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
+ }
+}
+
+static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
+ ulong value)
+{
+ u64 rb = 0, rs = 0;
+
+ /* ESID = srnum */
+ rb |= (srnum & 0xf) << 28;
+ /* Set the valid bit */
+ rb |= 1 << 27;
+ /* Index = ESID */
+ rb |= srnum;
+
+ /* VSID = VSID */
+ rs |= (value & 0xfffffff) << 12;
+ /* flags = flags */
+ rs |= ((value >> 27) & 0xf) << 9;
+
+ kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
+}
+
+static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
+ bool large)
+{
+ u64 mask = 0xFFFFFFFFFULL;
+
+ dprintk("KVM MMU: tlbie(0x%lx)\n", va);
+
+ if (large)
+ mask = 0xFFFFFF000ULL;
+ kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
+}
+
+static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+ u64 *vsid)
+{
+ switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ case 0:
+ *vsid = (VSID_REAL >> 16) | esid;
+ break;
+ case MSR_IR:
+ *vsid = (VSID_REAL_IR >> 16) | esid;
+ break;
+ case MSR_DR:
+ *vsid = (VSID_REAL_DR >> 16) | esid;
+ break;
+ case MSR_DR|MSR_IR:
+ {
+ ulong ea;
+ struct kvmppc_slb *slb;
+ ea = esid << SID_SHIFT;
+ slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+ if (slb)
+ *vsid = slb->vsid;
+ else
+ return -ENOENT;
+
+ break;
+ }
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
+{
+ return (to_book3s(vcpu)->hid[5] & 0x80);
+}
+
+void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
+
+ mmu->mfsrin = NULL;
+ mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
+ mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
+ mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
+ mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
+ mmu->slbie = kvmppc_mmu_book3s_64_slbie;
+ mmu->slbia = kvmppc_mmu_book3s_64_slbia;
+ mmu->xlate = kvmppc_mmu_book3s_64_xlate;
+ mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
+ mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
+ mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
+ mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
+ mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
+
+ vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
+}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
new file mode 100644
index 00000000000..f2899b297ff
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
+ *
+ * Authors:
+ * Alexander Graf <agraf@suse.de>
+ * Kevin Wolf <mail@kevin-wolf.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/machdep.h>
+#include <asm/mmu_context.h>
+#include <asm/hw_irq.h>
+
+#define PTE_SIZE 12
+#define VSID_ALL 0
+
+/* #define DEBUG_MMU */
+/* #define DEBUG_SLB */
+
+#ifdef DEBUG_MMU
+#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
+#else
+#define dprintk_mmu(a, ...) do { } while(0)
+#endif
+
+#ifdef DEBUG_SLB
+#define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
+#else
+#define dprintk_slb(a, ...) do { } while(0)
+#endif
+
+static void invalidate_pte(struct hpte_cache *pte)
+{
+ dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n",
+ i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
+
+ ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+ MMU_PAGE_4K, MMU_SEGSIZE_256M,
+ false);
+ pte->host_va = 0;
+ kvm_release_pfn_dirty(pte->pfn);
+}
+
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n",
+ vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ guest_ea &= ea_mask;
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.eaddr & ea_mask) == guest_ea) {
+ invalidate_pte(pte);
+ }
+ }
+
+ /* Doing a complete flush -> start from scratch */
+ if (!ea_mask)
+ vcpu->arch.hpte_cache_offset = 0;
+}
+
+void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
+ vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ guest_vp &= vp_mask;
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.vpage & vp_mask) == guest_vp) {
+ invalidate_pte(pte);
+ }
+ }
+}
+
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+{
+ int i;
+
+ dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
+ vcpu->arch.hpte_cache_offset, guest_pa, pa_mask);
+ BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
+
+ for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if ((pte->pte.raddr >= pa_start) &&
+ (pte->pte.raddr < pa_end)) {
+ invalidate_pte(pte);
+ }
+ }
+}
+
+struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
+{
+ int i;
+ u64 guest_vp;
+
+ guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
+ for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
+ struct hpte_cache *pte;
+
+ pte = &vcpu->arch.hpte_cache[i];
+ if (!pte->host_va)
+ continue;
+
+ if (pte->pte.vpage == guest_vp)
+ return &pte->pte;
+ }
+
+ return NULL;
+}
+
+static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+
+ return vcpu->arch.hpte_cache_offset++;
+}
+
+/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
+ * a hash, so we don't waste cycles on looping */
+static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
+ ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
+}
+
+
+static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ struct kvmppc_sid_map *map;
+ u16 sid_map_mask;
+
+ if (vcpu->arch.msr & MSR_PR)
+ gvsid |= VSID_PR;
+
+ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
+ map = &to_book3s(vcpu)->sid_map[sid_map_mask];
+ if (map->guest_vsid == gvsid) {
+ dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
+ gvsid, map->host_vsid);
+ return map;
+ }
+
+ map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
+ if (map->guest_vsid == gvsid) {
+ dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
+ gvsid, map->host_vsid);
+ return map;
+ }
+
+ dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid);
+ return NULL;
+}
+
+int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
+{
+ pfn_t hpaddr;
+ ulong hash, hpteg, va;
+ u64 vsid;
+ int ret;
+ int rflags = 0x192;
+ int vflags = 0;
+ int attempt = 0;
+ struct kvmppc_sid_map *map;
+
+ /* Get host physical address for gpa */
+ hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+ if (kvm_is_error_hva(hpaddr)) {
+ printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr);
+ return -EINVAL;
+ }
+ hpaddr <<= PAGE_SHIFT;
+#if PAGE_SHIFT == 12
+#elif PAGE_SHIFT == 16
+ hpaddr |= orig_pte->raddr & 0xf000;
+#else
+#error Unknown page size
+#endif
+
+ /* and write the mapping ea -> hpa into the pt */
+ vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
+ map = find_sid_vsid(vcpu, vsid);
+ if (!map) {
+ kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
+ map = find_sid_vsid(vcpu, vsid);
+ }
+ BUG_ON(!map);
+
+ vsid = map->host_vsid;
+ va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
+
+ if (!orig_pte->may_write)
+ rflags |= HPTE_R_PP;
+ else
+ mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
+
+ if (!orig_pte->may_execute)
+ rflags |= HPTE_R_N;
+
+ hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
+
+map_again:
+ hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+
+ /* In case we tried normal mapping already, let's nuke old entries */
+ if (attempt > 1)
+ if (ppc_md.hpte_remove(hpteg) < 0)
+ return -1;
+
+ ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
+
+ if (ret < 0) {
+ /* If we couldn't map a primary PTE, try a secondary */
+#ifdef USE_SECONDARY
+ hash = ~hash;
+ attempt++;
+ if (attempt % 2)
+ vflags = HPTE_V_SECONDARY;
+ else
+ vflags = 0;
+#else
+ attempt = 2;
+#endif
+ goto map_again;
+ } else {
+ int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
+ struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
+
+ dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n",
+ ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
+ (rflags & HPTE_R_N) ? '-' : 'x',
+ orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
+
+ pte->slot = hpteg + (ret & 7);
+ pte->host_va = va;
+ pte->pte = *orig_pte;
+ pte->pfn = hpaddr >> PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
+static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
+{
+ struct kvmppc_sid_map *map;
+ struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+ u16 sid_map_mask;
+ static int backwards_map = 0;
+
+ if (vcpu->arch.msr & MSR_PR)
+ gvsid |= VSID_PR;
+
+ /* We might get collisions that trap in preceding order, so let's
+ map them differently */
+
+ sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
+ if (backwards_map)
+ sid_map_mask = SID_MAP_MASK - sid_map_mask;
+
+ map = &to_book3s(vcpu)->sid_map[sid_map_mask];
+
+ /* Make sure we're taking the other map next time */
+ backwards_map = !backwards_map;
+
+ /* Uh-oh ... out of mappings. Let's flush! */
+ if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
+ vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+ memset(vcpu_book3s->sid_map, 0,
+ sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+ kvmppc_mmu_flush_segments(vcpu);
+ }
+ map->host_vsid = vcpu_book3s->vsid_next++;
+
+ map->guest_vsid = gvsid;
+ map->valid = true;
+
+ return map;
+}
+
+static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
+{
+ int i;
+ int max_slb_size = 64;
+ int found_inval = -1;
+ int r;
+
+ if (!get_paca()->kvm_slb_max)
+ get_paca()->kvm_slb_max = 1;
+
+ /* Are we overwriting? */
+ for (i = 1; i < get_paca()->kvm_slb_max; i++) {
+ if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V))
+ found_inval = i;
+ else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid)
+ return i;
+ }
+
+ /* Found a spare entry that was invalidated before */
+ if (found_inval > 0)
+ return found_inval;
+
+ /* No spare invalid entry, so create one */
+
+ if (mmu_slb_size < 64)
+ max_slb_size = mmu_slb_size;
+
+ /* Overflowing -> purge */
+ if ((get_paca()->kvm_slb_max) == max_slb_size)
+ kvmppc_mmu_flush_segments(vcpu);
+
+ r = get_paca()->kvm_slb_max;
+ get_paca()->kvm_slb_max++;
+
+ return r;
+}
+
+int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
+{
+ u64 esid = eaddr >> SID_SHIFT;
+ u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
+ u64 slb_vsid = SLB_VSID_USER;
+ u64 gvsid;
+ int slb_index;
+ struct kvmppc_sid_map *map;
+
+ slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
+
+ if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
+ /* Invalidate an entry */
+ get_paca()->kvm_slb[slb_index].esid = 0;
+ return -ENOENT;
+ }
+
+ map = find_sid_vsid(vcpu, gvsid);
+ if (!map)
+ map = create_sid_map(vcpu, gvsid);
+
+ map->guest_esid = esid;
+
+ slb_vsid |= (map->host_vsid << 12);
+ slb_vsid &= ~SLB_VSID_KP;
+ slb_esid |= slb_index;
+
+ get_paca()->kvm_slb[slb_index].esid = slb_esid;
+ get_paca()->kvm_slb[slb_index].vsid = slb_vsid;
+
+ dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
+
+ return 0;
+}
+
+void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
+{
+ get_paca()->kvm_slb_max = 1;
+ get_paca()->kvm_slb[0].esid = 0;
+}
+
+void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvmppc_mmu_pte_flush(vcpu, 0, 0);
+}
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S
new file mode 100644
index 00000000000..fb7dd2e9ac8
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S
@@ -0,0 +1,131 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+
+/*****************************************************************************
+ * *
+ * Real Mode handlers that need to be in low physical memory *
+ * *
+ ****************************************************************************/
+
+
+.macro INTERRUPT_TRAMPOLINE intno
+
+.global kvmppc_trampoline_\intno
+kvmppc_trampoline_\intno:
+
+ mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
+
+ /*
+ * First thing to do is to find out if we're coming
+ * from a KVM guest or a Linux process.
+ *
+ * To distinguish, we check a magic byte in the PACA
+ */
+ mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
+ std r12, (PACA_EXMC + EX_R12)(r13)
+ mfcr r12
+ stw r12, (PACA_EXMC + EX_CCR)(r13)
+ lbz r12, PACA_KVM_IN_GUEST(r13)
+ cmpwi r12, 0
+ bne ..kvmppc_handler_hasmagic_\intno
+ /* No KVM guest? Then jump back to the Linux handler! */
+ lwz r12, (PACA_EXMC + EX_CCR)(r13)
+ mtcr r12
+ ld r12, (PACA_EXMC + EX_R12)(r13)
+ mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
+ b kvmppc_resume_\intno /* Get back original handler */
+
+ /* Now we know we're handling a KVM guest */
+..kvmppc_handler_hasmagic_\intno:
+ /* Unset guest state */
+ li r12, 0
+ stb r12, PACA_KVM_IN_GUEST(r13)
+
+ std r1, (PACA_EXMC+EX_R9)(r13)
+ std r10, (PACA_EXMC+EX_R10)(r13)
+ std r11, (PACA_EXMC+EX_R11)(r13)
+ std r2, (PACA_EXMC+EX_R13)(r13)
+
+ mfsrr0 r10
+ mfsrr1 r11
+
+ /* Restore R1/R2 so we can handle faults */
+ ld r1, PACAR1(r13)
+ ld r2, (PACA_EXMC+EX_SRR0)(r13)
+
+ /* Let's store which interrupt we're handling */
+ li r12, \intno
+
+ /* Jump into the SLB exit code that goes to the highmem handler */
+ b kvmppc_handler_trampoline_exit
+
+.endm
+
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DECREMENTER
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
+INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
+
+/*
+ * This trampoline brings us back to a real mode handler
+ *
+ * Input Registers:
+ *
+ * R6 = SRR0
+ * R7 = SRR1
+ * LR = real-mode IP
+ *
+ */
+.global kvmppc_handler_lowmem_trampoline
+kvmppc_handler_lowmem_trampoline:
+
+ mtsrr0 r6
+ mtsrr1 r7
+ blr
+kvmppc_handler_lowmem_trampoline_end:
+
+.global kvmppc_trampoline_lowmem
+kvmppc_trampoline_lowmem:
+ .long kvmppc_handler_lowmem_trampoline - _stext
+
+.global kvmppc_trampoline_enter
+kvmppc_trampoline_enter:
+ .long kvmppc_handler_trampoline_enter - _stext
+
+#include "book3s_64_slb.S"
+
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
new file mode 100644
index 00000000000..ecd237a03fd
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -0,0 +1,262 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
+#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
+#define UNBOLT_SLB_ENTRY(num) \
+ ld r9, SHADOW_SLB_ESID(num)(r12); \
+ /* Invalid? Skip. */; \
+ rldicl. r0, r9, 37, 63; \
+ beq slb_entry_skip_ ## num; \
+ xoris r9, r9, SLB_ESID_V@h; \
+ std r9, SHADOW_SLB_ESID(num)(r12); \
+ slb_entry_skip_ ## num:
+
+#define REBOLT_SLB_ENTRY(num) \
+ ld r10, SHADOW_SLB_ESID(num)(r11); \
+ cmpdi r10, 0; \
+ beq slb_exit_skip_1; \
+ oris r10, r10, SLB_ESID_V@h; \
+ ld r9, SHADOW_SLB_VSID(num)(r11); \
+ slbmte r9, r10; \
+ std r10, SHADOW_SLB_ESID(num)(r11); \
+slb_exit_skip_ ## num:
+
+/******************************************************************************
+ * *
+ * Entry code *
+ * *
+ *****************************************************************************/
+
+.global kvmppc_handler_trampoline_enter
+kvmppc_handler_trampoline_enter:
+
+ /* Required state:
+ *
+ * MSR = ~IR|DR
+ * R13 = PACA
+ * R9 = guest IP
+ * R10 = guest MSR
+ * R11 = free
+ * R12 = free
+ * PACA[PACA_EXMC + EX_R9] = guest R9
+ * PACA[PACA_EXMC + EX_R10] = guest R10
+ * PACA[PACA_EXMC + EX_R11] = guest R11
+ * PACA[PACA_EXMC + EX_R12] = guest R12
+ * PACA[PACA_EXMC + EX_R13] = guest R13
+ * PACA[PACA_EXMC + EX_CCR] = guest CR
+ * PACA[PACA_EXMC + EX_R3] = guest XER
+ */
+
+ mtsrr0 r9
+ mtsrr1 r10
+
+ mtspr SPRN_SPRG_SCRATCH0, r0
+
+ /* Remove LPAR shadow entries */
+
+#if SLB_NUM_BOLTED == 3
+
+ ld r12, PACA_SLBSHADOWPTR(r13)
+
+ /* Save off the first entry so we can slbie it later */
+ ld r10, SHADOW_SLB_ESID(0)(r12)
+ ld r11, SHADOW_SLB_VSID(0)(r12)
+
+ /* Remove bolted entries */
+ UNBOLT_SLB_ENTRY(0)
+ UNBOLT_SLB_ENTRY(1)
+ UNBOLT_SLB_ENTRY(2)
+
+#else
+#error unknown number of bolted entries
+#endif
+
+ /* Flush SLB */
+
+ slbia
+
+ /* r0 = esid & ESID_MASK */
+ rldicr r10, r10, 0, 35
+ /* r0 |= CLASS_BIT(VSID) */
+ rldic r12, r11, 56 - 36, 36
+ or r10, r10, r12
+ slbie r10
+
+ isync
+
+ /* Fill SLB with our shadow */
+
+ lbz r12, PACA_KVM_SLB_MAX(r13)
+ mulli r12, r12, 16
+ addi r12, r12, PACA_KVM_SLB
+ add r12, r12, r13
+
+ /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
+ li r11, PACA_KVM_SLB
+ add r11, r11, r13
+
+slb_loop_enter:
+
+ ld r10, 0(r11)
+
+ rldicl. r0, r10, 37, 63
+ beq slb_loop_enter_skip
+
+ ld r9, 8(r11)
+ slbmte r9, r10
+
+slb_loop_enter_skip:
+ addi r11, r11, 16
+ cmpd cr0, r11, r12
+ blt slb_loop_enter
+
+slb_do_enter:
+
+ /* Enter guest */
+
+ mfspr r0, SPRN_SPRG_SCRATCH0
+
+ ld r9, (PACA_EXMC+EX_R9)(r13)
+ ld r10, (PACA_EXMC+EX_R10)(r13)
+ ld r12, (PACA_EXMC+EX_R12)(r13)
+
+ lwz r11, (PACA_EXMC+EX_CCR)(r13)
+ mtcr r11
+
+ ld r11, (PACA_EXMC+EX_R3)(r13)
+ mtxer r11
+
+ ld r11, (PACA_EXMC+EX_R11)(r13)
+ ld r13, (PACA_EXMC+EX_R13)(r13)
+
+ RFI
+kvmppc_handler_trampoline_enter_end:
+
+
+
+/******************************************************************************
+ * *
+ * Exit code *
+ * *
+ *****************************************************************************/
+
+.global kvmppc_handler_trampoline_exit
+kvmppc_handler_trampoline_exit:
+
+ /* Register usage at this point:
+ *
+ * SPRG_SCRATCH0 = guest R13
+ * R01 = host R1
+ * R02 = host R2
+ * R10 = guest PC
+ * R11 = guest MSR
+ * R12 = exit handler id
+ * R13 = PACA
+ * PACA.exmc.CCR = guest CR
+ * PACA.exmc.R9 = guest R1
+ * PACA.exmc.R10 = guest R10
+ * PACA.exmc.R11 = guest R11
+ * PACA.exmc.R12 = guest R12
+ * PACA.exmc.R13 = guest R2
+ *
+ */
+
+ /* Save registers */
+
+ std r0, (PACA_EXMC+EX_SRR0)(r13)
+ std r9, (PACA_EXMC+EX_R3)(r13)
+ std r10, (PACA_EXMC+EX_LR)(r13)
+ std r11, (PACA_EXMC+EX_DAR)(r13)
+
+ /*
+ * In order for us to easily get the last instruction,
+ * we got the #vmexit at, we exploit the fact that the
+ * virtual layout is still the same here, so we can just
+ * ld from the guest's PC address
+ */
+
+ /* We only load the last instruction when it's safe */
+ cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
+ beq ld_last_inst
+ cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
+ beq ld_last_inst
+
+ b no_ld_last_inst
+
+ld_last_inst:
+ /* Save off the guest instruction we're at */
+ /* 1) enable paging for data */
+ mfmsr r9
+ ori r11, r9, MSR_DR /* Enable paging for data */
+ mtmsr r11
+ /* 2) fetch the instruction */
+ lwz r0, 0(r10)
+ /* 3) disable paging again */
+ mtmsr r9
+
+no_ld_last_inst:
+
+ /* Restore bolted entries from the shadow and fix it along the way */
+
+ /* We don't store anything in entry 0, so we don't need to take care of it */
+ slbia
+ isync
+
+#if SLB_NUM_BOLTED == 3
+
+ ld r11, PACA_SLBSHADOWPTR(r13)
+
+ REBOLT_SLB_ENTRY(0)
+ REBOLT_SLB_ENTRY(1)
+ REBOLT_SLB_ENTRY(2)
+
+#else
+#error unknown number of bolted entries
+#endif
+
+slb_do_exit:
+
+ /* Restore registers */
+
+ ld r11, (PACA_EXMC+EX_DAR)(r13)
+ ld r10, (PACA_EXMC+EX_LR)(r13)
+ ld r9, (PACA_EXMC+EX_R3)(r13)
+
+ /* Save last inst */
+ stw r0, (PACA_EXMC+EX_LR)(r13)
+
+ /* Save DAR and DSISR before going to paged mode */
+ mfdar r0
+ std r0, (PACA_EXMC+EX_DAR)(r13)
+ mfdsisr r0
+ stw r0, (PACA_EXMC+EX_DSISR)(r13)
+
+ /* RFI into the highmem handler */
+ mfmsr r0
+ ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
+ mtsrr1 r0
+ ld r0, PACASAVEDMSR(r13) /* Highmem handler address */
+ mtsrr0 r0
+
+ mfspr r0, SPRN_SPRG_SCRATCH0
+
+ RFI
+kvmppc_handler_trampoline_exit_end:
+
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index e7bf4d02948..06f5a9ecc42 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -520,6 +520,11 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return kvmppc_core_vcpu_translate(vcpu, tr);
}
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return -ENOTSUPP;
+}
+
int __init kvmppc_booke_init(void)
{
unsigned long ivor[16];
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 7737146af3f..4a9ac6640fa 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -18,7 +18,7 @@
*/
#include <linux/jiffies.h>
-#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
@@ -32,6 +32,7 @@
#include "trace.h"
#define OP_TRAP 3
+#define OP_TRAP_64 2
#define OP_31_XOP_LWZX 23
#define OP_31_XOP_LBZX 87
@@ -64,19 +65,45 @@
#define OP_STH 44
#define OP_STHU 45
+#ifdef CONFIG_PPC64
+static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+#else
+static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.tcr & TCR_DIE;
+}
+#endif
+
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.tcr & TCR_DIE) {
+ unsigned long dec_nsec;
+
+ pr_debug("mtDEC: %x\n", vcpu->arch.dec);
+#ifdef CONFIG_PPC64
+ /* POWER4+ triggers a dec interrupt if the value is < 0 */
+ if (vcpu->arch.dec & 0x80000000) {
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+ kvmppc_core_queue_dec(vcpu);
+ return;
+ }
+#endif
+ if (kvmppc_dec_enabled(vcpu)) {
/* The decrementer ticks at the same rate as the timebase, so
* that's how we convert the guest DEC value to the number of
* host ticks. */
- unsigned long nr_jiffies;
- nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
- mod_timer(&vcpu->arch.dec_timer,
- get_jiffies_64() + nr_jiffies);
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+ dec_nsec = vcpu->arch.dec;
+ dec_nsec *= 1000;
+ dec_nsec /= tb_ticks_per_usec;
+ hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
+ HRTIMER_MODE_REL);
+ vcpu->arch.dec_jiffies = get_tb();
} else {
- del_timer(&vcpu->arch.dec_timer);
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
}
}
@@ -111,9 +138,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
+ pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
+
switch (get_op(inst)) {
case OP_TRAP:
+#ifdef CONFIG_PPC64
+ case OP_TRAP_64:
+#else
vcpu->arch.esr |= ESR_PTR;
+#endif
kvmppc_core_queue_program(vcpu);
advance = 0;
break;
@@ -188,17 +221,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_SRR1:
vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
case SPRN_PVR:
- vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break;
+ vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
case SPRN_PIR:
- vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break;
+ vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
+ case SPRN_MSSSR0:
+ vcpu->arch.gpr[rt] = 0; break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
- vcpu->arch.gpr[rt] = mftbl(); break;
+ vcpu->arch.gpr[rt] = get_tb() >> 32; break;
case SPRN_TBWU:
- vcpu->arch.gpr[rt] = mftbu(); break;
+ vcpu->arch.gpr[rt] = get_tb(); break;
case SPRN_SPRG0:
vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
@@ -211,6 +246,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
+ case SPRN_DEC:
+ {
+ u64 jd = get_tb() - vcpu->arch.dec_jiffies;
+ vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
+ pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
+ break;
+ }
default:
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
if (emulated == EMULATE_FAIL) {
@@ -260,6 +302,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_TBWL: break;
case SPRN_TBWU: break;
+ case SPRN_MSSSR0: break;
+
case SPRN_DEC:
vcpu->arch.dec = vcpu->arch.gpr[rs];
kvmppc_emulate_dec(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 5902bbc2411..f06cf93b178 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -23,6 +23,7 @@
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/hrtimer.h>
#include <linux/fs.h>
#include <asm/cputable.h>
#include <asm/uaccess.h>
@@ -144,6 +145,9 @@ int kvm_dev_ioctl_check_extension(long ext)
int r;
switch (ext) {
+ case KVM_CAP_PPC_SEGSTATE:
+ r = 1;
+ break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
@@ -209,10 +213,25 @@ static void kvmppc_decrementer_func(unsigned long data)
}
}
+/*
+ * low level hrtimer wake routine. Because this runs in hardirq context
+ * we schedule a tasklet to do the real work.
+ */
+enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
+{
+ struct kvm_vcpu *vcpu;
+
+ vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
+ tasklet_schedule(&vcpu->arch.tasklet);
+
+ return HRTIMER_NORESTART;
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
- setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func,
- (unsigned long)vcpu);
+ hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
+ vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
return 0;
}
@@ -410,11 +429,6 @@ out:
return r;
}
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
-{
- return -ENOTSUPP;
-}
-
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 2aa371e3007..70378551c0c 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -23,6 +23,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
+#include <linux/module.h>
#include <asm/time.h>
#include <asm-generic/div64.h>
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 67f219de045..a8e84001805 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -12,8 +12,8 @@
* Tracepoint for guest mode entry.
*/
TRACE_EVENT(kvm_ppc_instr,
- TP_PROTO(unsigned int inst, unsigned long pc, unsigned int emulate),
- TP_ARGS(inst, pc, emulate),
+ TP_PROTO(unsigned int inst, unsigned long _pc, unsigned int emulate),
+ TP_ARGS(inst, _pc, emulate),
TP_STRUCT__entry(
__field( unsigned int, inst )
@@ -23,7 +23,7 @@ TRACE_EVENT(kvm_ppc_instr,
TP_fast_assign(
__entry->inst = inst;
- __entry->pc = pc;
+ __entry->pc = _pc;
__entry->emulate = emulate;
),
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index c657de59abc..74a7f4130b4 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -98,20 +98,7 @@ _GLOBAL(cacheable_memzero)
bdnz 4b
3: mtctr r9
li r7,4
-#if !defined(CONFIG_8xx)
10: dcbz r7,r6
-#else
-10: stw r4, 4(r6)
- stw r4, 8(r6)
- stw r4, 12(r6)
- stw r4, 16(r6)
-#if CACHE_LINE_SIZE >= 32
- stw r4, 20(r6)
- stw r4, 24(r6)
- stw r4, 28(r6)
- stw r4, 32(r6)
-#endif /* CACHE_LINE_SIZE */
-#endif
addi r6,r6,CACHELINE_BYTES
bdnz 10b
clrlwi r5,r8,32-LG_CACHELINE_BYTES
@@ -200,9 +187,7 @@ _GLOBAL(cacheable_memcpy)
mtctr r0
beq 63f
53:
-#if !defined(CONFIG_8xx)
dcbz r11,r6
-#endif
COPY_16_BYTES
#if L1_CACHE_BYTES >= 32
COPY_16_BYTES
@@ -356,14 +341,6 @@ _GLOBAL(__copy_tofrom_user)
li r11,4
beq 63f
-#ifdef CONFIG_8xx
- /* Don't use prefetch on 8xx */
- mtctr r0
- li r0,0
-53: COPY_16_BYTES_WITHEX(0)
- bdnz 53b
-
-#else /* not CONFIG_8xx */
/* Here we decide how far ahead to prefetch the source */
li r3,4
cmpwi r0,1
@@ -416,7 +393,6 @@ _GLOBAL(__copy_tofrom_user)
li r3,4
li r7,0
bne 114b
-#endif /* CONFIG_8xx */
63: srwi. r0,r5,2
mtctr r0
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 79d0fa3a470..58e14fba11b 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -25,7 +25,7 @@
#include <asm/smp.h>
#include <asm/firmware.h>
-void __spin_yield(raw_spinlock_t *lock)
+void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -55,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock)
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
-void __rw_yield(raw_rwlock_t *rw)
+void __rw_yield(arch_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
@@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw)
}
#endif
-void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (lock->slock) {
HMT_low();
@@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock)
HMT_medium();
}
-EXPORT_SYMBOL(__raw_spin_unlock_wait);
+EXPORT_SYMBOL(arch_spin_unlock_wait);
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index f5e7b9ce63d..08dfa8e6d86 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -91,7 +91,7 @@ void __init MMU_init_hw(void)
#define LARGE_PAGE_SIZE_16M (1<<24)
#define LARGE_PAGE_SIZE_4M (1<<22)
-unsigned long __init mmu_mapin_ram(void)
+unsigned long __init mmu_mapin_ram(unsigned long top)
{
unsigned long v, s, mapped;
phys_addr_t p;
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 98052ac9658..3986264b099 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -88,7 +88,7 @@ void __init MMU_init_hw(void)
flush_instruction_cache();
}
-unsigned long __init mmu_mapin_ram(void)
+unsigned long __init mmu_mapin_ram(unsigned long top)
{
unsigned long addr;
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 6fb8fc8d2fe..ce68708bbad 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -28,7 +28,10 @@ obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ifeq ($(CONFIG_HUGETLB_PAGE),y)
+obj-y += hugetlbpage.o
+obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o
+endif
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e7dae82c128..26fb6b990b0 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -40,7 +40,7 @@
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
-
+#include <mm/mmu_decl.h>
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
@@ -246,6 +246,12 @@ good_area:
goto bad_area;
#endif /* CONFIG_6xx */
#if defined(CONFIG_8xx)
+ /* 8xx sometimes need to load a invalid/non-present TLBs.
+ * These must be invalidated separately as linux mm don't.
+ */
+ if (error_code & 0x40000000) /* no translation? */
+ _tlbil_va(address, 0, 0, 0);
+
/* The MPC8xx seems to always set 0x80000000, which is
* "undefined". Of those that can be set, this is the only
* one which seems bad.
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index dc93e95b256..c5394728bf2 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -54,26 +54,35 @@
#include "mmu_decl.h"
-extern void loadcam_entry(unsigned int index);
unsigned int tlbcam_index;
-static unsigned long cam[CONFIG_LOWMEM_CAM_NUM];
-#define NUM_TLBCAMS (16)
+#define NUM_TLBCAMS (64)
#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
#endif
-struct tlbcam TLBCAM[NUM_TLBCAMS];
+struct tlbcam {
+ u32 MAS0;
+ u32 MAS1;
+ unsigned long MAS2;
+ u32 MAS3;
+ u32 MAS7;
+} TLBCAM[NUM_TLBCAMS];
struct tlbcamrange {
- unsigned long start;
+ unsigned long start;
unsigned long limit;
phys_addr_t phys;
} tlbcam_addrs[NUM_TLBCAMS];
extern unsigned int tlbcam_index;
+unsigned long tlbcam_sz(int idx)
+{
+ return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
+}
+
/*
* Return PA for this VA if it is mapped by a CAM, or 0
*/
@@ -94,23 +103,36 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
int b;
for (b = 0; b < tlbcam_index; ++b)
if (pa >= tlbcam_addrs[b].phys
- && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+ && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
+tlbcam_addrs[b].phys)
return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
return 0;
}
+void loadcam_entry(int idx)
+{
+ mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
+ mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
+ mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
+ mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
+
+ if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS)
+ mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
+
+ asm volatile("isync;tlbwe;isync" : : : "memory");
+}
+
/*
* Set up one of the I/D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
* of 4 between 4k and 256M.
*/
-void settlbcam(int index, unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags, unsigned int pid)
+static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
+ unsigned long size, unsigned long flags, unsigned int pid)
{
unsigned int tsize, lz;
- asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
+ asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (size));
tsize = 21 - lz;
#ifdef CONFIG_SMP
@@ -128,8 +150,10 @@ void settlbcam(int index, unsigned long virt, phys_addr_t phys,
TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
- TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
+ TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SX | MAS3_SR;
TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
+ if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS)
+ TLBCAM[index].MAS7 = (u64)phys >> 32;
#ifndef CONFIG_KGDB /* want user access for breakpoints */
if (flags & _PAGE_USER) {
@@ -148,27 +172,44 @@ void settlbcam(int index, unsigned long virt, phys_addr_t phys,
loadcam_entry(index);
}
-void invalidate_tlbcam_entry(int index)
-{
- TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
- TLBCAM[index].MAS1 = ~MAS1_VALID;
-
- loadcam_entry(index);
-}
-
-unsigned long __init mmu_mapin_ram(void)
+unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx)
{
+ int i;
unsigned long virt = PAGE_OFFSET;
phys_addr_t phys = memstart_addr;
+ unsigned long amount_mapped = 0;
+ unsigned long max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xf;
+
+ /* Convert (4^max) kB to (2^max) bytes */
+ max_cam = max_cam * 2 + 10;
- while (tlbcam_index < ARRAY_SIZE(cam) && cam[tlbcam_index]) {
- settlbcam(tlbcam_index, virt, phys, cam[tlbcam_index], PAGE_KERNEL_X, 0);
- virt += cam[tlbcam_index];
- phys += cam[tlbcam_index];
- tlbcam_index++;
+ /* Calculate CAM values */
+ for (i = 0; ram && i < max_cam_idx; i++) {
+ unsigned int camsize = __ilog2(ram) & ~1U;
+ unsigned int align = __ffs(virt | phys) & ~1U;
+ unsigned long cam_sz;
+
+ if (camsize > align)
+ camsize = align;
+ if (camsize > max_cam)
+ camsize = max_cam;
+
+ cam_sz = 1UL << camsize;
+ settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0);
+
+ ram -= cam_sz;
+ amount_mapped += cam_sz;
+ virt += cam_sz;
+ phys += cam_sz;
}
+ tlbcam_index = i;
- return virt - PAGE_OFFSET;
+ return amount_mapped;
+}
+
+unsigned long __init mmu_mapin_ram(unsigned long top)
+{
+ return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
}
/*
@@ -179,46 +220,21 @@ void __init MMU_init_hw(void)
flush_instruction_cache();
}
-void __init
-adjust_total_lowmem(void)
+void __init adjust_total_lowmem(void)
{
- phys_addr_t ram;
- unsigned int max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xff;
- char buf[ARRAY_SIZE(cam) * 5 + 1], *p = buf;
+ unsigned long ram;
int i;
- unsigned long virt = PAGE_OFFSET & 0xffffffffUL;
- unsigned long phys = memstart_addr & 0xffffffffUL;
-
- /* Convert (4^max) kB to (2^max) bytes */
- max_cam = max_cam * 2 + 10;
/* adjust lowmem size to __max_low_memory */
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
- /* Calculate CAM values */
- __max_low_memory = 0;
- for (i = 0; ram && i < ARRAY_SIZE(cam); i++) {
- unsigned int camsize = __ilog2(ram) & ~1U;
- unsigned int align = __ffs(virt | phys) & ~1U;
-
- if (camsize > align)
- camsize = align;
- if (camsize > max_cam)
- camsize = max_cam;
-
- cam[i] = 1UL << camsize;
- ram -= cam[i];
- __max_low_memory += cam[i];
- virt += cam[i];
- phys += cam[i];
+ __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
- p += sprintf(p, "%lu/", cam[i] >> 20);
- }
- for (; i < ARRAY_SIZE(cam); i++)
- p += sprintf(p, "0/");
- p[-1] = '\0';
-
- pr_info("Memory CAM mapping: %s Mb, residual: %dMb\n", buf,
+ pr_info("Memory CAM mapping: ");
+ for (i = 0; i < tlbcam_index - 1; i++)
+ pr_cont("%lu/", tlbcam_sz(i) >> 20);
+ pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20,
(unsigned int)((total_lowmem - __max_low_memory) >> 20));
+
__initial_memory_limit_addr = memstart_addr + __max_low_memory;
}
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index bc122a120bf..d7efdbf640c 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -55,57 +55,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
return 1;
}
-#ifdef CONFIG_HUGETLB_PAGE
-static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
- unsigned long *addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long mask;
- unsigned long pte_end;
- struct page *head, *page;
- pte_t pte;
- int refs;
-
- pte_end = (*addr + huge_page_size(hstate)) & huge_page_mask(hstate);
- if (pte_end < end)
- end = pte_end;
-
- pte = *ptep;
- mask = _PAGE_PRESENT|_PAGE_USER;
- if (write)
- mask |= _PAGE_RW;
- if ((pte_val(pte) & mask) != mask)
- return 0;
- /* hugepages are never "special" */
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-
- refs = 0;
- head = pte_page(pte);
- page = head + ((*addr & ~huge_page_mask(hstate)) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (*addr += PAGE_SIZE, *addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- /* Could be optimized better */
- while (*nr) {
- put_page(page);
- (*nr)--;
- }
- }
-
- return 1;
-}
-#endif /* CONFIG_HUGETLB_PAGE */
-
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
@@ -119,7 +68,11 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
if (pmd_none(pmd))
return 0;
- if (!gup_pte_range(pmd, addr, next, write, pages, nr))
+ if (is_hugepd(pmdp)) {
+ if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
+ addr, next, write, pages, nr))
+ return 0;
+ } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
return 0;
} while (pmdp++, addr = next, addr != end);
@@ -139,7 +92,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
- if (!gup_pmd_range(pud, addr, next, write, pages, nr))
+ if (is_hugepd(pudp)) {
+ if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
+ addr, next, write, pages, nr))
+ return 0;
+ } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0;
} while (pudp++, addr = next, addr != end);
@@ -154,10 +111,6 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
unsigned long next;
pgd_t *pgdp;
int nr = 0;
-#ifdef CONFIG_PPC64
- unsigned int shift;
- int psize;
-#endif
pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
@@ -172,25 +125,6 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
pr_devel(" aligned: %lx .. %lx\n", start, end);
-#ifdef CONFIG_HUGETLB_PAGE
- /* We bail out on slice boundary crossing when hugetlb is
- * enabled in order to not have to deal with two different
- * page table formats
- */
- if (addr < SLICE_LOW_TOP) {
- if (end > SLICE_LOW_TOP)
- goto slow_irqon;
-
- if (unlikely(GET_LOW_SLICE_INDEX(addr) !=
- GET_LOW_SLICE_INDEX(end - 1)))
- goto slow_irqon;
- } else {
- if (unlikely(GET_HIGH_SLICE_INDEX(addr) !=
- GET_HIGH_SLICE_INDEX(end - 1)))
- goto slow_irqon;
- }
-#endif /* CONFIG_HUGETLB_PAGE */
-
/*
* XXX: batch / limit 'nr', to avoid large irq off latency
* needs some instrumenting to determine the common sizes used by
@@ -210,54 +144,23 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
*/
local_irq_disable();
-#ifdef CONFIG_PPC64
- /* Those bits are related to hugetlbfs implementation and only exist
- * on 64-bit for now
- */
- psize = get_slice_psize(mm, addr);
- shift = mmu_psize_defs[psize].shift;
-#endif /* CONFIG_PPC64 */
-
-#ifdef CONFIG_HUGETLB_PAGE
- if (unlikely(mmu_huge_psizes[psize])) {
- pte_t *ptep;
- unsigned long a = addr;
- unsigned long sz = ((1UL) << shift);
- struct hstate *hstate = size_to_hstate(sz);
-
- BUG_ON(!hstate);
- /*
- * XXX: could be optimized to avoid hstate
- * lookup entirely (just use shift)
- */
-
- do {
- VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
- ptep = huge_pte_offset(mm, a);
- pr_devel(" %016lx: huge ptep %p\n", a, ptep);
- if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
- &nr))
- goto slow;
- } while (a != end);
- } else
-#endif /* CONFIG_HUGETLB_PAGE */
- {
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
-#ifdef CONFIG_PPC64
- VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
-#endif
- pr_devel(" %016lx: normal pgd %p\n", addr,
- (void *)pgd_val(pgd));
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ pr_devel(" %016lx: normal pgd %p\n", addr,
+ (void *)pgd_val(pgd));
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ goto slow;
+ if (is_hugepd(pgdp)) {
+ if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
+ addr, next, write, pages, &nr))
goto slow;
- } while (pgdp++, addr = next, addr != end);
- }
+ } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ goto slow;
+ } while (pgdp++, addr = next, addr != end);
+
local_irq_enable();
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1ade7eb6ae0..3ecdcec0a39 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -92,6 +92,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
struct hash_pte *htab_address;
unsigned long htab_size_bytes;
unsigned long htab_hash_mask;
+EXPORT_SYMBOL_GPL(htab_hash_mask);
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
@@ -102,6 +103,7 @@ int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
u16 mmu_slb_size = 64;
+EXPORT_SYMBOL_GPL(mmu_slb_size);
#ifdef CONFIG_HUGETLB_PAGE
unsigned int HPAGE_SHIFT;
#endif
@@ -338,7 +340,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
else
def->tlbiel = 0;
- DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
+ DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, "
"tlbiel=%d, penc=%d\n",
idx, shift, def->sllp, def->avpnm, def->tlbiel,
def->penc);
@@ -481,16 +483,6 @@ static void __init htab_init_page_sizes(void)
#ifdef CONFIG_HUGETLB_PAGE
/* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
-
-/* Set default large page size. Currently, we pick 16M or 1M depending
- * on what is available
- */
- if (mmu_psize_defs[MMU_PAGE_16M].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
- /* With 4k/4level pagetables, we can't (for now) cope with a
- * huge page size < PMD_SIZE */
- else if (mmu_psize_defs[MMU_PAGE_1M].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
#endif /* CONFIG_HUGETLB_PAGE */
}
@@ -671,7 +663,7 @@ static void __init htab_initialize(void)
base = (unsigned long)__va(lmb.memory.region[i].base);
size = lmb.memory.region[i].size;
- DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
+ DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
base, size, prot);
#ifdef CONFIG_U3_DART
@@ -785,7 +777,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
/* page is dirty */
if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
if (trap == 0x400) {
- __flush_dcache_icache(page_address(page));
+ flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
} else
pp |= HPTE_R_N;
@@ -843,9 +835,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
* Result is 0: full permissions, _PAGE_RW: read-only,
* _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
*/
-static int subpage_protection(pgd_t *pgdir, unsigned long ea)
+static int subpage_protection(struct mm_struct *mm, unsigned long ea)
{
- struct subpage_prot_table *spt = pgd_subpage_prot(pgdir);
+ struct subpage_prot_table *spt = &mm->context.spt;
u32 spp = 0;
u32 **sbpm, *sbpp;
@@ -873,7 +865,7 @@ static int subpage_protection(pgd_t *pgdir, unsigned long ea)
}
#else /* CONFIG_PPC_SUBPAGE_PROT */
-static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
+static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
{
return 0;
}
@@ -887,10 +879,11 @@ static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
*/
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
- void *pgdir;
+ pgd_t *pgdir;
unsigned long vsid;
struct mm_struct *mm;
pte_t *ptep;
+ unsigned hugeshift;
const struct cpumask *tmp;
int rc, user_region = 0, local = 0;
int psize, ssize;
@@ -943,30 +936,31 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
local = 1;
-#ifdef CONFIG_HUGETLB_PAGE
- /* Handle hugepage regions */
- if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
- DBG_LOW(" -> huge page !\n");
- return hash_huge_page(mm, access, ea, vsid, local, trap);
- }
-#endif /* CONFIG_HUGETLB_PAGE */
-
#ifndef CONFIG_PPC_64K_PAGES
- /* If we use 4K pages and our psize is not 4K, then we are hitting
- * a special driver mapping, we need to align the address before
- * we fetch the PTE
+ /* If we use 4K pages and our psize is not 4K, then we might
+ * be hitting a special driver mapping, and need to align the
+ * address before we fetch the PTE.
+ *
+ * It could also be a hugepage mapping, in which case this is
+ * not necessary, but it's not harmful, either.
*/
if (psize != MMU_PAGE_4K)
ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
#endif /* CONFIG_PPC_64K_PAGES */
/* Get PTE and page size from page tables */
- ptep = find_linux_pte(pgdir, ea);
+ ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
if (ptep == NULL || !pte_present(*ptep)) {
DBG_LOW(" no PTE !\n");
return 1;
}
+#ifdef CONFIG_HUGETLB_PAGE
+ if (hugeshift)
+ return __hash_page_huge(ea, access, vsid, ptep, trap, local,
+ ssize, hugeshift, psize);
+#endif /* CONFIG_HUGETLB_PAGE */
+
#ifndef CONFIG_PPC_64K_PAGES
DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
#else
@@ -1031,7 +1025,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
else
#endif /* CONFIG_PPC_HAS_HASH_64K */
{
- int spp = subpage_protection(pgdir, ea);
+ int spp = subpage_protection(mm, ea);
if (access & spp)
rc = -2;
else
@@ -1121,7 +1115,7 @@ void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
{
unsigned long hash, index, shift, hidx, slot;
- DBG_LOW("flush_hash_page(va=%016x)\n", va);
+ DBG_LOW("flush_hash_page(va=%016lx)\n", va);
pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
hash = hpt_hash(va, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
@@ -1129,7 +1123,7 @@ void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
+ DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
} pte_iterate_hashed_end();
}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
new file mode 100644
index 00000000000..199539882f9
--- /dev/null
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -0,0 +1,139 @@
+/*
+ * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
+ *
+ * Copyright (C) 2003 David Gibson, IBM Corporation.
+ *
+ * Based on the IA-32 version:
+ * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/machdep.h>
+
+int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
+ pte_t *ptep, unsigned long trap, int local, int ssize,
+ unsigned int shift, unsigned int mmu_psize)
+{
+ unsigned long old_pte, new_pte;
+ unsigned long va, rflags, pa, sz;
+ long slot;
+ int err = 1;
+
+ BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
+
+ /* Search the Linux page table for a match with va */
+ va = hpt_va(ea, vsid, ssize);
+
+ /*
+ * Check the user's access rights to the page. If access should be
+ * prevented then send the problem up to do_page_fault.
+ */
+ if (unlikely(access & ~pte_val(*ptep)))
+ goto out;
+ /*
+ * At this point, we have a pte (old_pte) which can be used to build
+ * or update an HPTE. There are 2 cases:
+ *
+ * 1. There is a valid (present) pte with no associated HPTE (this is
+ * the most common case)
+ * 2. There is a valid (present) pte with an associated HPTE. The
+ * current values of the pp bits in the HPTE prevent access
+ * because we are doing software DIRTY bit management and the
+ * page is currently not DIRTY.
+ */
+
+
+ do {
+ old_pte = pte_val(*ptep);
+ if (old_pte & _PAGE_BUSY)
+ goto out;
+ new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
+ } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
+ old_pte, new_pte));
+
+ rflags = 0x2 | (!(new_pte & _PAGE_RW));
+ /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
+ rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
+ sz = ((1UL) << shift);
+ if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ /* No CPU has hugepages but lacks no execute, so we
+ * don't need to worry about that case */
+ rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
+
+ /* Check if pte already has an hpte (case 2) */
+ if (unlikely(old_pte & _PAGE_HASHPTE)) {
+ /* There MIGHT be an HPTE for this pte */
+ unsigned long hash, slot;
+
+ hash = hpt_hash(va, shift, ssize);
+ if (old_pte & _PAGE_F_SECOND)
+ hash = ~hash;
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot += (old_pte & _PAGE_F_GIX) >> 12;
+
+ if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
+ ssize, local) == -1)
+ old_pte &= ~_PAGE_HPTEFLAGS;
+ }
+
+ if (likely(!(old_pte & _PAGE_HASHPTE))) {
+ unsigned long hash = hpt_hash(va, shift, ssize);
+ unsigned long hpte_group;
+
+ pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
+
+repeat:
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP) & ~0x7UL;
+
+ /* clear HPTE slot informations in new PTE */
+#ifdef CONFIG_PPC_64K_PAGES
+ new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
+#else
+ new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
+#endif
+ /* Add in WIMG bits */
+ rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
+ _PAGE_COHERENT | _PAGE_GUARDED));
+
+ /* Insert into the hash table, primary slot */
+ slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
+ mmu_psize, ssize);
+
+ /* Primary is full, try the secondary */
+ if (unlikely(slot == -1)) {
+ hpte_group = ((~hash & htab_hash_mask) *
+ HPTES_PER_GROUP) & ~0x7UL;
+ slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
+ HPTE_V_SECONDARY,
+ mmu_psize, ssize);
+ if (slot == -1) {
+ if (mftb() & 0x1)
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP)&~0x7UL;
+
+ ppc_md.hpte_remove(hpte_group);
+ goto repeat;
+ }
+ }
+
+ if (unlikely(slot == -2))
+ panic("hash_huge_page: pte_insert failed\n");
+
+ new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
+ }
+
+ /*
+ * No need to use ldarx/stdcx here
+ */
+ *ptep = __pte(new_pte & ~_PAGE_BUSY);
+
+ err = 0;
+
+ out:
+ return err;
+}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 90df6ffe3a4..123f7070238 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -7,29 +7,17 @@
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
*/
-#include <linux/init.h>
-#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/io.h>
#include <linux/hugetlb.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/sysctl.h>
-#include <asm/mman.h>
+#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-#include <asm/machdep.h>
-#include <asm/cputable.h>
-#include <asm/spu.h>
#define PAGE_SHIFT_64K 16
#define PAGE_SHIFT_16M 24
#define PAGE_SHIFT_16G 34
-#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
-#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
#define MAX_NUMBER_GPAGES 1024
/* Tracks the 16G pages after the device tree is scanned and before the
@@ -37,53 +25,17 @@
static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
-/* Array of valid huge page sizes - non-zero value(hugepte_shift) is
- * stored for the huge page sizes that are valid.
- */
-unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
-
-#define hugepte_shift mmu_huge_psizes
-#define PTRS_PER_HUGEPTE(psize) (1 << hugepte_shift[psize])
-#define HUGEPTE_TABLE_SIZE(psize) (sizeof(pte_t) << hugepte_shift[psize])
-
-#define HUGEPD_SHIFT(psize) (mmu_psize_to_shift(psize) \
- + hugepte_shift[psize])
-#define HUGEPD_SIZE(psize) (1UL << HUGEPD_SHIFT(psize))
-#define HUGEPD_MASK(psize) (~(HUGEPD_SIZE(psize)-1))
-
-/* Subtract one from array size because we don't need a cache for 4K since
- * is not a huge page size */
-#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1)
-#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
-
-static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
- [MMU_PAGE_64K] = "hugepte_cache_64K",
- [MMU_PAGE_1M] = "hugepte_cache_1M",
- [MMU_PAGE_16M] = "hugepte_cache_16M",
- [MMU_PAGE_16G] = "hugepte_cache_16G",
-};
-
/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
* will choke on pointers to hugepte tables, which is handy for
* catching screwups early. */
-#define HUGEPD_OK 0x1
-
-typedef struct { unsigned long pd; } hugepd_t;
-
-#define hugepd_none(hpd) ((hpd).pd == 0)
static inline int shift_to_mmu_psize(unsigned int shift)
{
- switch (shift) {
-#ifndef CONFIG_PPC_64K_PAGES
- case PAGE_SHIFT_64K:
- return MMU_PAGE_64K;
-#endif
- case PAGE_SHIFT_16M:
- return MMU_PAGE_16M;
- case PAGE_SHIFT_16G:
- return MMU_PAGE_16G;
- }
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
return -1;
}
@@ -94,71 +46,126 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
+#define hugepd_none(hpd) ((hpd).pd == 0)
+
static inline pte_t *hugepd_page(hugepd_t hpd)
{
- BUG_ON(!(hpd.pd & HUGEPD_OK));
- return (pte_t *)(hpd.pd & ~HUGEPD_OK);
+ BUG_ON(!hugepd_ok(hpd));
+ return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | 0xc000000000000000);
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return hpd.pd & HUGEPD_SHIFT_MASK;
}
-static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
- struct hstate *hstate)
+static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift)
{
- unsigned int shift = huge_page_shift(hstate);
- int psize = shift_to_mmu_psize(shift);
- unsigned long idx = ((addr >> shift) & (PTRS_PER_HUGEPTE(psize)-1));
+ unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
pte_t *dir = hugepd_page(*hpdp);
return dir + idx;
}
+pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
+{
+ pgd_t *pg;
+ pud_t *pu;
+ pmd_t *pm;
+ hugepd_t *hpdp = NULL;
+ unsigned pdshift = PGDIR_SHIFT;
+
+ if (shift)
+ *shift = 0;
+
+ pg = pgdir + pgd_index(ea);
+ if (is_hugepd(pg)) {
+ hpdp = (hugepd_t *)pg;
+ } else if (!pgd_none(*pg)) {
+ pdshift = PUD_SHIFT;
+ pu = pud_offset(pg, ea);
+ if (is_hugepd(pu))
+ hpdp = (hugepd_t *)pu;
+ else if (!pud_none(*pu)) {
+ pdshift = PMD_SHIFT;
+ pm = pmd_offset(pu, ea);
+ if (is_hugepd(pm))
+ hpdp = (hugepd_t *)pm;
+ else if (!pmd_none(*pm)) {
+ return pte_offset_map(pm, ea);
+ }
+ }
+ }
+
+ if (!hpdp)
+ return NULL;
+
+ if (shift)
+ *shift = hugepd_shift(*hpdp);
+ return hugepte_offset(hpdp, ea, pdshift);
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
+}
+
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
- unsigned long address, unsigned int psize)
+ unsigned long address, unsigned pdshift, unsigned pshift)
{
- pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)],
- GFP_KERNEL|__GFP_REPEAT);
+ pte_t *new = kmem_cache_zalloc(PGT_CACHE(pdshift - pshift),
+ GFP_KERNEL|__GFP_REPEAT);
+
+ BUG_ON(pshift > HUGEPD_SHIFT_MASK);
+ BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
if (! new)
return -ENOMEM;
spin_lock(&mm->page_table_lock);
if (!hugepd_none(*hpdp))
- kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new);
+ kmem_cache_free(PGT_CACHE(pdshift - pshift), new);
else
- hpdp->pd = (unsigned long)new | HUGEPD_OK;
+ hpdp->pd = ((unsigned long)new & ~0x8000000000000000) | pshift;
spin_unlock(&mm->page_table_lock);
return 0;
}
-
-static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate)
-{
- if (huge_page_shift(hstate) < PUD_SHIFT)
- return pud_offset(pgd, addr);
- else
- return (pud_t *) pgd;
-}
-static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr,
- struct hstate *hstate)
+pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
- if (huge_page_shift(hstate) < PUD_SHIFT)
- return pud_alloc(mm, pgd, addr);
- else
- return (pud_t *) pgd;
-}
-static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
-{
- if (huge_page_shift(hstate) < PMD_SHIFT)
- return pmd_offset(pud, addr);
- else
- return (pmd_t *) pud;
-}
-static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
- struct hstate *hstate)
-{
- if (huge_page_shift(hstate) < PMD_SHIFT)
- return pmd_alloc(mm, pud, addr);
- else
- return (pmd_t *) pud;
+ pgd_t *pg;
+ pud_t *pu;
+ pmd_t *pm;
+ hugepd_t *hpdp = NULL;
+ unsigned pshift = __ffs(sz);
+ unsigned pdshift = PGDIR_SHIFT;
+
+ addr &= ~(sz-1);
+
+ pg = pgd_offset(mm, addr);
+ if (pshift >= PUD_SHIFT) {
+ hpdp = (hugepd_t *)pg;
+ } else {
+ pdshift = PUD_SHIFT;
+ pu = pud_alloc(mm, pg, addr);
+ if (pshift >= PMD_SHIFT) {
+ hpdp = (hugepd_t *)pu;
+ } else {
+ pdshift = PMD_SHIFT;
+ pm = pmd_alloc(mm, pu, addr);
+ hpdp = (hugepd_t *)pm;
+ }
+ }
+
+ if (!hpdp)
+ return NULL;
+
+ BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
+
+ if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
+ return NULL;
+
+ return hugepte_offset(hpdp, addr, pdshift);
}
/* Build list of addresses of gigantic pages. This function is used in early
@@ -192,94 +199,38 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
return 1;
}
-
-/* Modelled after find_linux_pte() */
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pg;
- pud_t *pu;
- pmd_t *pm;
-
- unsigned int psize;
- unsigned int shift;
- unsigned long sz;
- struct hstate *hstate;
- psize = get_slice_psize(mm, addr);
- shift = mmu_psize_to_shift(psize);
- sz = ((1UL) << shift);
- hstate = size_to_hstate(sz);
-
- addr &= hstate->mask;
-
- pg = pgd_offset(mm, addr);
- if (!pgd_none(*pg)) {
- pu = hpud_offset(pg, addr, hstate);
- if (!pud_none(*pu)) {
- pm = hpmd_offset(pu, addr, hstate);
- if (!pmd_none(*pm))
- return hugepte_offset((hugepd_t *)pm, addr,
- hstate);
- }
- }
-
- return NULL;
-}
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pg;
- pud_t *pu;
- pmd_t *pm;
- hugepd_t *hpdp = NULL;
- struct hstate *hstate;
- unsigned int psize;
- hstate = size_to_hstate(sz);
-
- psize = get_slice_psize(mm, addr);
- BUG_ON(!mmu_huge_psizes[psize]);
-
- addr &= hstate->mask;
-
- pg = pgd_offset(mm, addr);
- pu = hpud_alloc(mm, pg, addr, hstate);
-
- if (pu) {
- pm = hpmd_alloc(mm, pu, addr, hstate);
- if (pm)
- hpdp = (hugepd_t *)pm;
- }
-
- if (! hpdp)
- return NULL;
-
- if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, psize))
- return NULL;
-
- return hugepte_offset(hpdp, addr, hstate);
-}
-
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
-static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp,
- unsigned int psize)
+static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
+ unsigned long start, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
{
pte_t *hugepte = hugepd_page(*hpdp);
+ unsigned shift = hugepd_shift(*hpdp);
+ unsigned long pdmask = ~((1UL << pdshift) - 1);
+
+ start &= pdmask;
+ if (start < floor)
+ return;
+ if (ceiling) {
+ ceiling &= pdmask;
+ if (! ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ return;
hpdp->pd = 0;
tlb->need_flush = 1;
- pgtable_free_tlb(tlb, pgtable_free_cache(hugepte,
- HUGEPTE_CACHE_NUM+psize-1,
- PGF_CACHENUM_MASK));
+ pgtable_free_tlb(tlb, hugepte, pdshift - shift);
}
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling,
- unsigned int psize)
+ unsigned long floor, unsigned long ceiling)
{
pmd_t *pmd;
unsigned long next;
@@ -291,7 +242,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd))
continue;
- free_hugepte_range(tlb, (hugepd_t *)pmd, psize);
+ free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
+ addr, next, floor, ceiling);
} while (pmd++, addr = next, addr != end);
start &= PUD_MASK;
@@ -317,23 +269,19 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
pud_t *pud;
unsigned long next;
unsigned long start;
- unsigned int shift;
- unsigned int psize = get_slice_psize(tlb->mm, addr);
- shift = mmu_psize_to_shift(psize);
start = addr;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
- if (shift < PMD_SHIFT) {
+ if (!is_hugepd(pud)) {
if (pud_none_or_clear_bad(pud))
continue;
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
- ceiling, psize);
+ ceiling);
} else {
- if (pud_none(*pud))
- continue;
- free_hugepte_range(tlb, (hugepd_t *)pud, psize);
+ free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
+ addr, next, floor, ceiling);
}
} while (pud++, addr = next, addr != end);
@@ -364,121 +312,56 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
{
pgd_t *pgd;
unsigned long next;
- unsigned long start;
/*
- * Comments below take from the normal free_pgd_range(). They
- * apply here too. The tests against HUGEPD_MASK below are
- * essential, because we *don't* test for this at the bottom
- * level. Without them we'll attempt to free a hugepte table
- * when we unmap just part of it, even if there are other
- * active mappings using it.
- *
- * The next few lines have given us lots of grief...
- *
- * Why are we testing HUGEPD* at this top level? Because
- * often there will be no work to do at all, and we'd prefer
- * not to go all the way down to the bottom just to discover
- * that.
- *
- * Why all these "- 1"s? Because 0 represents both the bottom
- * of the address space and the top of it (using -1 for the
- * top wouldn't help much: the masks would do the wrong thing).
- * The rule is that addr 0 and floor 0 refer to the bottom of
- * the address space, but end 0 and ceiling 0 refer to the top
- * Comparisons need to use "end - 1" and "ceiling - 1" (though
- * that end 0 case should be mythical).
+ * Because there are a number of different possible pagetable
+ * layouts for hugepage ranges, we limit knowledge of how
+ * things should be laid out to the allocation path
+ * (huge_pte_alloc(), above). Everything else works out the
+ * structure as it goes from information in the hugepd
+ * pointers. That means that we can't here use the
+ * optimization used in the normal page free_pgd_range(), of
+ * checking whether we're actually covering a large enough
+ * range to have to do anything at the top level of the walk
+ * instead of at the bottom.
*
- * Wherever addr is brought up or ceiling brought down, we
- * must be careful to reject "the opposite 0" before it
- * confuses the subsequent tests. But what about where end is
- * brought down by HUGEPD_SIZE below? no, end can't go down to
- * 0 there.
- *
- * Whereas we round start (addr) and ceiling down, by different
- * masks at different levels, in order to test whether a table
- * now has no other vmas using it, so can be freed, we don't
- * bother to round floor or end up - the tests don't need that.
+ * To make sense of this, you should probably go read the big
+ * block comment at the top of the normal free_pgd_range(),
+ * too.
*/
- unsigned int psize = get_slice_psize(tlb->mm, addr);
-
- addr &= HUGEPD_MASK(psize);
- if (addr < floor) {
- addr += HUGEPD_SIZE(psize);
- if (!addr)
- return;
- }
- if (ceiling) {
- ceiling &= HUGEPD_MASK(psize);
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
- end -= HUGEPD_SIZE(psize);
- if (addr > end - 1)
- return;
- start = addr;
pgd = pgd_offset(tlb->mm, addr);
do {
- psize = get_slice_psize(tlb->mm, addr);
- BUG_ON(!mmu_huge_psizes[psize]);
next = pgd_addr_end(addr, end);
- if (mmu_psize_to_shift(psize) < PUD_SHIFT) {
+ if (!is_hugepd(pgd)) {
if (pgd_none_or_clear_bad(pgd))
continue;
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} else {
- if (pgd_none(*pgd))
- continue;
- free_hugepte_range(tlb, (hugepd_t *)pgd, psize);
+ free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
+ addr, next, floor, ceiling);
}
} while (pgd++, addr = next, addr != end);
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- if (pte_present(*ptep)) {
- /* We open-code pte_clear because we need to pass the right
- * argument to hpte_need_flush (huge / !huge). Might not be
- * necessary anymore if we make hpte_need_flush() get the
- * page size from the slices
- */
- unsigned int psize = get_slice_psize(mm, addr);
- unsigned int shift = mmu_psize_to_shift(psize);
- unsigned long sz = ((1UL) << shift);
- struct hstate *hstate = size_to_hstate(sz);
- pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1);
- }
- *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
-}
-
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
- return __pte(old);
-}
-
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
pte_t *ptep;
struct page *page;
- unsigned int mmu_psize = get_slice_psize(mm, address);
+ unsigned shift;
+ unsigned long mask;
+
+ ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
/* Verify it is a huge page else bail. */
- if (!mmu_huge_psizes[mmu_psize])
+ if (!ptep || !shift)
return ERR_PTR(-EINVAL);
- ptep = huge_pte_offset(mm, address);
+ mask = (1UL << shift) - 1;
page = pte_page(*ptep);
- if (page) {
- unsigned int shift = mmu_psize_to_shift(mmu_psize);
- unsigned long sz = ((1UL) << shift);
- page += (address % sz) / PAGE_SIZE;
- }
+ if (page)
+ page += (address & mask) / PAGE_SIZE;
return page;
}
@@ -501,6 +384,82 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL;
}
+static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ unsigned long mask;
+ unsigned long pte_end;
+ struct page *head, *page;
+ pte_t pte;
+ int refs;
+
+ pte_end = (addr + sz) & ~(sz-1);
+ if (pte_end < end)
+ end = pte_end;
+
+ pte = *ptep;
+ mask = _PAGE_PRESENT | _PAGE_USER;
+ if (write)
+ mask |= _PAGE_RW;
+
+ if ((pte_val(pte) & mask) != mask)
+ return 0;
+
+ /* hugepages are never "special" */
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+ refs = 0;
+ head = pte_page(pte);
+
+ page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+ (*nr)++;
+ page++;
+ refs++;
+ } while (addr += PAGE_SIZE, addr != end);
+
+ if (!page_cache_add_speculative(head, refs)) {
+ *nr -= refs;
+ return 0;
+ }
+
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ /* Could be optimized better */
+ while (*nr) {
+ put_page(page);
+ (*nr)--;
+ }
+ }
+
+ return 1;
+}
+
+static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
+ unsigned long sz)
+{
+ unsigned long __boundary = (addr + sz) & ~(sz-1);
+ return (__boundary - 1 < end - 1) ? __boundary : end;
+}
+
+int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
+ unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ pte_t *ptep;
+ unsigned long sz = 1UL << hugepd_shift(*hugepd);
+ unsigned long next;
+
+ ptep = hugepte_offset(hugepd, addr, pdshift);
+ do {
+ next = hugepte_addr_end(addr, end, sz);
+ if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
+ return 0;
+ } while (ptep++, addr = next, addr != end);
+
+ return 1;
+}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
@@ -509,8 +468,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
- if (!mmu_huge_psizes[mmu_psize])
- return -EINVAL;
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
}
@@ -521,229 +478,46 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
return 1UL << mmu_psize_to_shift(psize);
}
-/*
- * Called by asm hashtable.S for doing lazy icache flush
- */
-static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
- pte_t pte, int trap, unsigned long sz)
+static int __init add_huge_page_size(unsigned long long size)
{
- struct page *page;
- int i;
-
- if (!pfn_valid(pte_pfn(pte)))
- return rflags;
-
- page = pte_page(pte);
-
- /* page is dirty */
- if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
- if (trap == 0x400) {
- for (i = 0; i < (sz / PAGE_SIZE); i++)
- __flush_dcache_icache(page_address(page+i));
- set_bit(PG_arch_1, &page->flags);
- } else {
- rflags |= HPTE_R_N;
- }
- }
- return rflags;
-}
+ int shift = __ffs(size);
+ int mmu_psize;
-int hash_huge_page(struct mm_struct *mm, unsigned long access,
- unsigned long ea, unsigned long vsid, int local,
- unsigned long trap)
-{
- pte_t *ptep;
- unsigned long old_pte, new_pte;
- unsigned long va, rflags, pa, sz;
- long slot;
- int err = 1;
- int ssize = user_segment_size(ea);
- unsigned int mmu_psize;
- int shift;
- mmu_psize = get_slice_psize(mm, ea);
-
- if (!mmu_huge_psizes[mmu_psize])
- goto out;
- ptep = huge_pte_offset(mm, ea);
-
- /* Search the Linux page table for a match with va */
- va = hpt_va(ea, vsid, ssize);
+ /* Check that it is a page size supported by the hardware and
+ * that it fits within pagetable and slice limits. */
+ if (!is_power_of_2(size)
+ || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
+ return -EINVAL;
- /*
- * If no pte found or not present, send the problem up to
- * do_page_fault
- */
- if (unlikely(!ptep || pte_none(*ptep)))
- goto out;
+ if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
+ return -EINVAL;
- /*
- * Check the user's access rights to the page. If access should be
- * prevented then send the problem up to do_page_fault.
+#ifdef CONFIG_SPU_FS_64K_LS
+ /* Disable support for 64K huge pages when 64K SPU local store
+ * support is enabled as the current implementation conflicts.
*/
- if (unlikely(access & ~pte_val(*ptep)))
- goto out;
- /*
- * At this point, we have a pte (old_pte) which can be used to build
- * or update an HPTE. There are 2 cases:
- *
- * 1. There is a valid (present) pte with no associated HPTE (this is
- * the most common case)
- * 2. There is a valid (present) pte with an associated HPTE. The
- * current values of the pp bits in the HPTE prevent access
- * because we are doing software DIRTY bit management and the
- * page is currently not DIRTY.
- */
-
-
- do {
- old_pte = pte_val(*ptep);
- if (old_pte & _PAGE_BUSY)
- goto out;
- new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
- } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
- old_pte, new_pte));
-
- rflags = 0x2 | (!(new_pte & _PAGE_RW));
- /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
- rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
- shift = mmu_psize_to_shift(mmu_psize);
- sz = ((1UL) << shift);
- if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- /* No CPU has hugepages but lacks no execute, so we
- * don't need to worry about that case */
- rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
- trap, sz);
-
- /* Check if pte already has an hpte (case 2) */
- if (unlikely(old_pte & _PAGE_HASHPTE)) {
- /* There MIGHT be an HPTE for this pte */
- unsigned long hash, slot;
-
- hash = hpt_hash(va, shift, ssize);
- if (old_pte & _PAGE_F_SECOND)
- hash = ~hash;
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += (old_pte & _PAGE_F_GIX) >> 12;
-
- if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
- ssize, local) == -1)
- old_pte &= ~_PAGE_HPTEFLAGS;
- }
-
- if (likely(!(old_pte & _PAGE_HASHPTE))) {
- unsigned long hash = hpt_hash(va, shift, ssize);
- unsigned long hpte_group;
-
- pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
-
-repeat:
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
-
- /* clear HPTE slot informations in new PTE */
-#ifdef CONFIG_PPC_64K_PAGES
- new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
-#else
- new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
-#endif
- /* Add in WIMG bits */
- rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
- _PAGE_COHERENT | _PAGE_GUARDED));
-
- /* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
- mmu_psize, ssize);
-
- /* Primary is full, try the secondary */
- if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
- HPTE_V_SECONDARY,
- mmu_psize, ssize);
- if (slot == -1) {
- if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP)&~0x7UL;
-
- ppc_md.hpte_remove(hpte_group);
- goto repeat;
- }
- }
-
- if (unlikely(slot == -2))
- panic("hash_huge_page: pte_insert failed\n");
-
- new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
- }
+ if (shift == PAGE_SHIFT_64K)
+ return -EINVAL;
+#endif /* CONFIG_SPU_FS_64K_LS */
- /*
- * No need to use ldarx/stdcx here
- */
- *ptep = __pte(new_pte & ~_PAGE_BUSY);
+ BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
- err = 0;
+ /* Return if huge page size has already been setup */
+ if (size_to_hstate(size))
+ return 0;
- out:
- return err;
-}
+ hugetlb_add_hstate(shift - PAGE_SHIFT);
-static void __init set_huge_psize(int psize)
-{
- /* Check that it is a page size supported by the hardware and
- * that it fits within pagetable limits. */
- if (mmu_psize_defs[psize].shift &&
- mmu_psize_defs[psize].shift < SID_SHIFT_1T &&
- (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
- mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
- mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
- /* Return if huge page size has already been setup or is the
- * same as the base page size. */
- if (mmu_huge_psizes[psize] ||
- mmu_psize_defs[psize].shift == PAGE_SHIFT)
- return;
- if (WARN_ON(HUGEPTE_CACHE_NAME(psize) == NULL))
- return;
- hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
-
- switch (mmu_psize_defs[psize].shift) {
- case PAGE_SHIFT_64K:
- /* We only allow 64k hpages with 4k base page,
- * which was checked above, and always put them
- * at the PMD */
- hugepte_shift[psize] = PMD_SHIFT;
- break;
- case PAGE_SHIFT_16M:
- /* 16M pages can be at two different levels
- * of pagestables based on base page size */
- if (PAGE_SHIFT == PAGE_SHIFT_64K)
- hugepte_shift[psize] = PMD_SHIFT;
- else /* 4k base page */
- hugepte_shift[psize] = PUD_SHIFT;
- break;
- case PAGE_SHIFT_16G:
- /* 16G pages are always at PGD level */
- hugepte_shift[psize] = PGDIR_SHIFT;
- break;
- }
- hugepte_shift[psize] -= mmu_psize_defs[psize].shift;
- } else
- hugepte_shift[psize] = 0;
+ return 0;
}
static int __init hugepage_setup_sz(char *str)
{
unsigned long long size;
- int mmu_psize;
- int shift;
size = memparse(str, &str);
- shift = __ffs(size);
- mmu_psize = shift_to_mmu_psize(shift);
- if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift)
- set_huge_psize(mmu_psize);
- else
+ if (add_huge_page_size(size) != 0)
printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
return 1;
@@ -752,41 +526,55 @@ __setup("hugepagesz=", hugepage_setup_sz);
static int __init hugetlbpage_init(void)
{
- unsigned int psize;
+ int psize;
if (!cpu_has_feature(CPU_FTR_16M_PAGE))
return -ENODEV;
- /* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
- * and adjust PTE_NONCACHE_NUM if the number of supported huge page
- * sizes changes.
- */
- set_huge_psize(MMU_PAGE_16M);
- set_huge_psize(MMU_PAGE_16G);
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
+ unsigned shift;
+ unsigned pdshift;
- /* Temporarily disable support for 64K huge pages when 64K SPU local
- * store support is enabled as the current implementation conflicts.
- */
-#ifndef CONFIG_SPU_FS_64K_LS
- set_huge_psize(MMU_PAGE_64K);
-#endif
+ if (!mmu_psize_defs[psize].shift)
+ continue;
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
- if (mmu_huge_psizes[psize]) {
- pgtable_cache[HUGE_PGTABLE_INDEX(psize)] =
- kmem_cache_create(
- HUGEPTE_CACHE_NAME(psize),
- HUGEPTE_TABLE_SIZE(psize),
- HUGEPTE_TABLE_SIZE(psize),
- 0,
- NULL);
- if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)])
- panic("hugetlbpage_init(): could not create %s"\
- "\n", HUGEPTE_CACHE_NAME(psize));
- }
+ shift = mmu_psize_to_shift(psize);
+
+ if (add_huge_page_size(1ULL << shift) < 0)
+ continue;
+
+ if (shift < PMD_SHIFT)
+ pdshift = PMD_SHIFT;
+ else if (shift < PUD_SHIFT)
+ pdshift = PUD_SHIFT;
+ else
+ pdshift = PGDIR_SHIFT;
+
+ pgtable_cache_add(pdshift - shift, NULL);
+ if (!PGT_CACHE(pdshift - shift))
+ panic("hugetlbpage_init(): could not create "
+ "pgtable cache for %d bit pagesize\n", shift);
}
+ /* Set default large page size. Currently, we pick 16M or 1M
+ * depending on what is available
+ */
+ if (mmu_psize_defs[MMU_PAGE_16M].shift)
+ HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
+ else if (mmu_psize_defs[MMU_PAGE_1M].shift)
+ HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
+
return 0;
}
module_init(hugetlbpage_init);
+
+void flush_dcache_icache_hugepage(struct page *page)
+{
+ int i;
+
+ BUG_ON(!PageCompound(page));
+
+ for (i = 0; i < (1UL << compound_order(page)); i++)
+ __flush_dcache_icache(page_address(page+i));
+}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 9ddcfb4dc13..4ec900af332 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -82,6 +82,11 @@ extern struct task_struct *current_set[NR_CPUS];
int __map_without_bats;
int __map_without_ltlbs;
+/*
+ * This tells the system to allow ioremapping memory marked as reserved.
+ */
+int __allow_ioremap_reserved;
+
/* max amount of low RAM to map in */
unsigned long __max_low_memory = MAX_LOW_MEM;
@@ -131,9 +136,13 @@ void __init MMU_init(void)
MMU_setup();
if (lmb.memory.cnt > 1) {
+#ifndef CONFIG_WII
lmb.memory.cnt = 1;
lmb_analyze();
printk(KERN_WARNING "Only using first contiguous memory region");
+#else
+ wii_memory_fixups();
+#endif
}
total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 335c578b9cc..776f28d02b6 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -41,6 +41,7 @@
#include <linux/module.h>
#include <linux/poison.h>
#include <linux/lmb.h>
+#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -119,30 +120,63 @@ static void pmd_ctor(void *addr)
memset(addr, 0, PMD_TABLE_SIZE);
}
-static const unsigned int pgtable_cache_size[2] = {
- PGD_TABLE_SIZE, PMD_TABLE_SIZE
-};
-static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
-#ifdef CONFIG_PPC_64K_PAGES
- "pgd_cache", "pmd_cache",
-#else
- "pgd_cache", "pud_pmd_cache",
-#endif /* CONFIG_PPC_64K_PAGES */
-};
-
-#ifdef CONFIG_HUGETLB_PAGE
-/* Hugepages need an extra cache per hugepagesize, initialized in
- * hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT
- * is not compile time constant. */
-struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT];
-#else
-struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
-#endif
+struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
+
+/*
+ * Create a kmem_cache() for pagetables. This is not used for PTE
+ * pages - they're linked to struct page, come from the normal free
+ * pages pool and have a different entry size (see real_pte_t) to
+ * everything else. Caches created by this function are used for all
+ * the higher level pagetables, and for hugepage pagetables.
+ */
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
+{
+ char *name;
+ unsigned long table_size = sizeof(void *) << shift;
+ unsigned long align = table_size;
+
+ /* When batching pgtable pointers for RCU freeing, we store
+ * the index size in the low bits. Table alignment must be
+ * big enough to fit it.
+ *
+ * Likewise, hugeapge pagetable pointers contain a (different)
+ * shift value in the low bits. All tables must be aligned so
+ * as to leave enough 0 bits in the address to contain it. */
+ unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
+ HUGEPD_SHIFT_MASK + 1);
+ struct kmem_cache *new;
+
+ /* It would be nice if this was a BUILD_BUG_ON(), but at the
+ * moment, gcc doesn't seem to recognize is_power_of_2 as a
+ * constant expression, so so much for that. */
+ BUG_ON(!is_power_of_2(minalign));
+ BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
+
+ if (PGT_CACHE(shift))
+ return; /* Already have a cache of this size */
+
+ align = max_t(unsigned long, align, minalign);
+ name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
+ new = kmem_cache_create(name, table_size, align, 0, ctor);
+ PGT_CACHE(shift) = new;
+
+ pr_debug("Allocated pgtable cache for order %d\n", shift);
+}
+
void pgtable_cache_init(void)
{
- pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor);
- pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor);
+ pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
+ pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
+ if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
+ panic("Couldn't allocate pgtable caches");
+
+ /* In all current configs, when the PUD index exists it's the
+ * same size as either the pgd or pmd index. Verify that the
+ * initialization above has also created a PUD cache. This
+ * will need re-examiniation if we add new possibilities for
+ * the pagetable layout. */
+ BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 59736317bf0..b9b152558f9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -32,6 +32,7 @@
#include <linux/pagemap.h>
#include <linux/suspend.h>
#include <linux/lmb.h>
+#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
@@ -417,18 +418,26 @@ EXPORT_SYMBOL(flush_dcache_page);
void flush_dcache_icache_page(struct page *page)
{
+#ifdef CONFIG_HUGETLB_PAGE
+ if (PageCompound(page)) {
+ flush_dcache_icache_hugepage(page);
+ return;
+ }
+#endif
#ifdef CONFIG_BOOKE
- void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
- __flush_dcache_icache(start);
- kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+ {
+ void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
+ __flush_dcache_icache(start);
+ kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+ }
#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
/* On 8xx there is no need to kmap since highmem is not supported */
__flush_dcache_icache(page_address(page));
#else
__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
#endif
-
}
+
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
clear_page(page);
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index dbeb86ac90c..b910d37aea1 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -18,6 +18,7 @@
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
+#include <linux/module.h>
#include <asm/mmu_context.h>
@@ -32,7 +33,7 @@ static DEFINE_IDR(mmu_context_idr);
#define NO_CONTEXT 0
#define MAX_CONTEXT ((1UL << 19) - 1)
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+int __init_new_context(void)
{
int index;
int err;
@@ -57,22 +58,41 @@ again:
return -ENOMEM;
}
+ return index;
+}
+EXPORT_SYMBOL_GPL(__init_new_context);
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int index;
+
+ index = __init_new_context();
+ if (index < 0)
+ return index;
+
/* The old code would re-promote on fork, we don't do that
* when using slices as it could cause problem promoting slices
* that have been forced down to 4K
*/
if (slice_mm_new_context(mm))
slice_set_user_psize(mm, mmu_virtual_psize);
+ subpage_prot_init_new_context(mm);
mm->context.id = index;
return 0;
}
-void destroy_context(struct mm_struct *mm)
+void __destroy_context(int context_id)
{
spin_lock(&mmu_context_lock);
- idr_remove(&mmu_context_idr, mm->context.id);
+ idr_remove(&mmu_context_idr, context_id);
spin_unlock(&mmu_context_lock);
+}
+EXPORT_SYMBOL_GPL(__destroy_context);
+void destroy_context(struct mm_struct *mm)
+{
+ __destroy_context(mm->context.id);
+ subpage_prot_free(mm);
mm->context.id = NO_CONTEXT;
}
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index be4f34c30a0..1044a634b6d 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -353,7 +353,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
read_lock(&tasklist_lock);
for_each_process(p) {
if (p->mm)
- cpu_mask_clear_cpu(cpu, mm_cpumask(p->mm));
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
}
read_unlock(&tasklist_lock);
break;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d2e5321d5ea..d49a77503e1 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -98,23 +98,13 @@ extern void _tlbia(void);
#ifdef CONFIG_PPC32
-struct tlbcam {
- u32 MAS0;
- u32 MAS1;
- u32 MAS2;
- u32 MAS3;
- u32 MAS7;
-};
-
extern void mapin_ram(void);
extern int map_page(unsigned long va, phys_addr_t pa, int flags);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, int flags);
-extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags, unsigned int pid);
-extern void invalidate_tlbcam_entry(int index);
extern int __map_without_bats;
+extern int __allow_ioremap_reserved;
extern unsigned long ioremap_base;
extern unsigned int rtas_data, rtas_size;
@@ -136,24 +126,32 @@ extern phys_addr_t total_lowmem;
extern phys_addr_t memstart_addr;
extern phys_addr_t lowmem_end_addr;
+#ifdef CONFIG_WII
+extern unsigned long wii_hole_start;
+extern unsigned long wii_hole_size;
+
+extern unsigned long wii_mmu_mapin_mem2(unsigned long top);
+extern void wii_memory_fixups(void);
+#endif
+
/* ...and now those things that may be slightly different between processor
* architectures. -- Dan
*/
#if defined(CONFIG_8xx)
#define MMU_init_hw() do { } while(0)
-#define mmu_mapin_ram() (0UL)
+#define mmu_mapin_ram(top) (0UL)
#elif defined(CONFIG_4xx)
extern void MMU_init_hw(void);
-extern unsigned long mmu_mapin_ram(void);
+extern unsigned long mmu_mapin_ram(unsigned long top);
#elif defined(CONFIG_FSL_BOOKE)
extern void MMU_init_hw(void);
-extern unsigned long mmu_mapin_ram(void);
+extern unsigned long mmu_mapin_ram(unsigned long top);
extern void adjust_total_lowmem(void);
#elif defined(CONFIG_PPC32)
/* anything 32-bit except 4xx or 8xx */
extern void MMU_init_hw(void);
-extern unsigned long mmu_mapin_ram(void);
+extern unsigned long mmu_mapin_ram(unsigned long top);
#endif
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 53040931de3..99df697c601 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -49,12 +49,12 @@ struct pte_freelist_batch
{
struct rcu_head rcu;
unsigned int index;
- pgtable_free_t tables[0];
+ unsigned long tables[0];
};
#define PTE_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
- / sizeof(pgtable_free_t))
+ / sizeof(unsigned long))
static void pte_free_smp_sync(void *arg)
{
@@ -64,13 +64,13 @@ static void pte_free_smp_sync(void *arg)
/* This is only called when we are critically out of memory
* (and fail to get a page in pte_free_tlb).
*/
-static void pgtable_free_now(pgtable_free_t pgf)
+static void pgtable_free_now(void *table, unsigned shift)
{
pte_freelist_forced_free++;
smp_call_function(pte_free_smp_sync, NULL, 1);
- pgtable_free(pgf);
+ pgtable_free(table, shift);
}
static void pte_free_rcu_callback(struct rcu_head *head)
@@ -79,8 +79,12 @@ static void pte_free_rcu_callback(struct rcu_head *head)
container_of(head, struct pte_freelist_batch, rcu);
unsigned int i;
- for (i = 0; i < batch->index; i++)
- pgtable_free(batch->tables[i]);
+ for (i = 0; i < batch->index; i++) {
+ void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+ }
free_page((unsigned long)batch);
}
@@ -91,25 +95,28 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
call_rcu(&batch->rcu, pte_free_rcu_callback);
}
-void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
+void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
{
/* This is safe since tlb_gather_mmu has disabled preemption */
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+ unsigned long pgf;
if (atomic_read(&tlb->mm->mm_users) < 2 ||
cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
- pgtable_free(pgf);
+ pgtable_free(table, shift);
return;
}
if (*batchp == NULL) {
*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
if (*batchp == NULL) {
- pgtable_free_now(pgf);
+ pgtable_free_now(table, shift);
return;
}
(*batchp)->index = 0;
}
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf = (unsigned long)table | shift;
(*batchp)->tables[(*batchp)->index++] = pgf;
if ((*batchp)->index == PTE_FREELIST_SIZE) {
pte_free_submit(*batchp);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index cb96cb2e17c..573b3bd1c45 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/highmem.h>
+#include <linux/lmb.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -191,7 +192,8 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
- if (mem_init_done && (p < virt_to_phys(high_memory))) {
+ if (mem_init_done && (p < virt_to_phys(high_memory)) &&
+ !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) {
printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
(unsigned long long)p, __builtin_return_address(0));
return NULL;
@@ -283,18 +285,18 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
}
/*
- * Map in a big chunk of physical memory starting at PAGE_OFFSET.
+ * Map in a chunk of physical memory starting at start.
*/
-void __init mapin_ram(void)
+void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
{
unsigned long v, s, f;
phys_addr_t p;
int ktext;
- s = mmu_mapin_ram();
+ s = offset;
v = PAGE_OFFSET + s;
p = memstart_addr + s;
- for (; s < total_lowmem; s += PAGE_SIZE) {
+ for (; s < top; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
map_page(v, p, f);
@@ -307,6 +309,30 @@ void __init mapin_ram(void)
}
}
+void __init mapin_ram(void)
+{
+ unsigned long s, top;
+
+#ifndef CONFIG_WII
+ top = total_lowmem;
+ s = mmu_mapin_ram(top);
+ __mapin_ram_chunk(s, top);
+#else
+ if (!wii_hole_size) {
+ s = mmu_mapin_ram(total_lowmem);
+ __mapin_ram_chunk(s, total_lowmem);
+ } else {
+ top = wii_hole_start;
+ s = mmu_mapin_ram(top);
+ __mapin_ram_chunk(s, top);
+
+ top = lmb_end_of_DRAM();
+ s = wii_mmu_mapin_mem2(top);
+ __mapin_ram_chunk(s, top);
+ }
+#endif
+}
+
/* Scan the real Linux page tables and return a PTE pointer for
* a virtual address in a context.
* Returns true (1) if PTE was found, zero otherwise. The pointer to
@@ -356,7 +382,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return 0;
if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
return -EINVAL;
- set_pte_at(&init_mm, address, kpte, mk_pte(page, prot));
+ __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
wmb();
#ifdef CONFIG_PPC_STD_MMU
flush_hash_pages(0, address, pmd_val(*kpmd), 1);
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 2d2a87e1015..f11c2cdcb0f 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -72,7 +72,7 @@ unsigned long p_mapped_by_bats(phys_addr_t pa)
return 0;
}
-unsigned long __init mmu_mapin_ram(void)
+unsigned long __init mmu_mapin_ram(unsigned long top)
{
unsigned long tot, bl, done;
unsigned long max_size = (256<<20);
@@ -86,7 +86,7 @@ unsigned long __init mmu_mapin_ram(void)
/* Make sure we don't map a block larger than the
smallest alignment of the physical address. */
- tot = total_lowmem;
+ tot = top;
for (bl = 128<<10; bl < max_size; bl <<= 1) {
if (bl * 2 > tot)
break;
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 4cafc0c33d0..a040b81e93b 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -24,9 +24,9 @@
* Also makes sure that the subpage_prot_table structure is
* reinitialized for the next user.
*/
-void subpage_prot_free(pgd_t *pgd)
+void subpage_prot_free(struct mm_struct *mm)
{
- struct subpage_prot_table *spt = pgd_subpage_prot(pgd);
+ struct subpage_prot_table *spt = &mm->context.spt;
unsigned long i, j, addr;
u32 **p;
@@ -51,6 +51,13 @@ void subpage_prot_free(pgd_t *pgd)
spt->maxaddr = 0;
}
+void subpage_prot_init_new_context(struct mm_struct *mm)
+{
+ struct subpage_prot_table *spt = &mm->context.spt;
+
+ memset(spt, 0, sizeof(*spt));
+}
+
static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
int npages)
{
@@ -87,7 +94,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
static void subpage_prot_clear(unsigned long addr, unsigned long len)
{
struct mm_struct *mm = current->mm;
- struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
+ struct subpage_prot_table *spt = &mm->context.spt;
u32 **spm, *spp;
int i, nw;
unsigned long next, limit;
@@ -136,7 +143,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
{
struct mm_struct *mm = current->mm;
- struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
+ struct subpage_prot_table *spt = &mm->context.spt;
u32 **spm, *spp;
int i, nw;
unsigned long next, limit;
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 2b2f35f6985..282d9306361 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -53,11 +53,6 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
i = batch->index;
- /* We mask the address for the base page size. Huge pages will
- * have applied their own masking already
- */
- addr &= PAGE_MASK;
-
/* Get page size (maybe move back to caller).
*
* NOTE: when using special 64K mappings in 4K environment like
@@ -75,6 +70,9 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
} else
psize = pte_pagesize_index(mm, addr, pte);
+ /* Mask the address for the correct page size */
+ addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
+
/* Build full vaddr */
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index a6ce8056662..da9b20a6376 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -79,7 +79,7 @@ cpld_unmask_irq(unsigned int irq)
}
static struct irq_chip cpld_pic = {
- .typename = " CPLD PIC ",
+ .name = " CPLD PIC ",
.mask = cpld_mask_irq,
.ack = cpld_mask_irq,
.unmask = cpld_unmask_irq,
@@ -132,7 +132,7 @@ static int
cpld_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq);
return 0;
}
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 8b8e9560a31..47ea1be1481 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -62,3 +62,8 @@ config PPC_MPC5200_GPIO
select GENERIC_GPIO
help
Enable gpiolib support for mpc5200 based boards
+
+config PPC_MPC5200_LPBFIFO
+ tristate "MPC5200 LocalPlus bus FIFO driver"
+ depends on PPC_MPC52xx
+ select PPC_BESTCOMM_GEN_BD
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index bfd4f52cf3d..2bc8cd0c5cf 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -15,3 +15,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
endif
obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
+obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index bcc69e1f77c..45c0cb9b67e 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -10,7 +10,7 @@
*/
#include <linux/init.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 68e4f1696d1..0bac3a3dbec 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -74,7 +74,7 @@ static void media5200_irq_mask(unsigned int virq)
}
static struct irq_chip media5200_irq_chip = {
- .typename = "Media5200 FPGA",
+ .name = "Media5200 FPGA",
.unmask = media5200_irq_unmask,
.mask = media5200_irq_mask,
.mask_ack = media5200_irq_mask,
@@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
u32 status, enable;
/* Mask off the cascaded IRQ */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->chip->mask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
* are pending. 'ffs()' is 1 based */
@@ -104,17 +104,17 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
}
/* Processing done; can reenable the cascade now */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED))
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int media5200_irq_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
set_irq_chip_data(virq, &media5200_irq);
@@ -127,7 +127,7 @@ static int media5200_irq_map(struct irq_host *h, unsigned int virq,
}
static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index bfbcd418e69..6f8ebe1085b 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -16,8 +16,14 @@
* output signals or measure input signals.
*
* This driver supports the GPIO and IRQ controller functions of the GPT
- * device. Timer functions are not yet supported, nor is the watchdog
- * timer.
+ * device. Timer functions are not yet supported.
+ *
+ * The timer gpt0 can be used as watchdog (wdt). If the wdt mode is used,
+ * this prevents the use of any gpt0 gpt function (i.e. they will fail with
+ * -EBUSY). Thus, the safety wdt function always has precedence over the gpt
+ * function. If the kernel has been compiled with CONFIG_WATCHDOG_NOWAYOUT,
+ * this means that gpt0 is locked in wdt mode until the next reboot - this
+ * may be a requirement in safety applications.
*
* To use the GPIO function, the following two properties must be added
* to the device tree node for the gpt device (typically in the .dts file
@@ -46,17 +52,24 @@
* the output mode. This driver does not change the output mode setting.
*/
+#include <linux/device.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/kernel.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <asm/div64.h>
#include <asm/mpc52xx.h>
MODULE_DESCRIPTION("Freescale MPC52xx gpt driver");
-MODULE_AUTHOR("Sascha Hauer, Grant Likely");
+MODULE_AUTHOR("Sascha Hauer, Grant Likely, Albrecht Dreß");
MODULE_LICENSE("GPL");
/**
@@ -66,18 +79,27 @@ MODULE_LICENSE("GPL");
* @lock: spinlock to coordinate between different functions.
* @of_gc: of_gpio_chip instance structure; used when GPIO is enabled
* @irqhost: Pointer to irq_host instance; used when IRQ mode is supported
+ * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
+ * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
+ * if the timer is actively used as wdt which blocks gpt functions
*/
struct mpc52xx_gpt_priv {
+ struct list_head list; /* List of all GPT devices */
struct device *dev;
struct mpc52xx_gpt __iomem *regs;
spinlock_t lock;
struct irq_host *irqhost;
+ u32 ipb_freq;
+ u8 wdt_mode;
#if defined(CONFIG_GPIOLIB)
struct of_gpio_chip of_gc;
#endif
};
+LIST_HEAD(mpc52xx_gpt_list);
+DEFINE_MUTEX(mpc52xx_gpt_list_mutex);
+
#define MPC52xx_GPT_MODE_MS_MASK (0x07)
#define MPC52xx_GPT_MODE_MS_IC (0x01)
#define MPC52xx_GPT_MODE_MS_OC (0x02)
@@ -88,15 +110,25 @@ struct mpc52xx_gpt_priv {
#define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20)
#define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30)
+#define MPC52xx_GPT_MODE_COUNTER_ENABLE (0x1000)
+#define MPC52xx_GPT_MODE_CONTINUOUS (0x0400)
+#define MPC52xx_GPT_MODE_OPEN_DRAIN (0x0200)
#define MPC52xx_GPT_MODE_IRQ_EN (0x0100)
+#define MPC52xx_GPT_MODE_WDT_EN (0x8000)
#define MPC52xx_GPT_MODE_ICT_MASK (0x030000)
#define MPC52xx_GPT_MODE_ICT_RISING (0x010000)
#define MPC52xx_GPT_MODE_ICT_FALLING (0x020000)
#define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000)
+#define MPC52xx_GPT_MODE_WDT_PING (0xa5)
+
#define MPC52xx_GPT_STATUS_IRQMASK (0x000f)
+#define MPC52xx_GPT_CAN_WDT (1 << 0)
+#define MPC52xx_GPT_IS_WDT (1 << 1)
+
+
/* ---------------------------------------------------------------------
* Cascaded interrupt controller hooks
*/
@@ -149,7 +181,7 @@ static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type)
}
static struct irq_chip mpc52xx_gpt_irq_chip = {
- .typename = "MPC52xx GPT",
+ .name = "MPC52xx GPT",
.unmask = mpc52xx_gpt_irq_unmask,
.mask = mpc52xx_gpt_irq_mask,
.ack = mpc52xx_gpt_irq_ack,
@@ -182,7 +214,7 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq,
}
static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
@@ -190,7 +222,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]);
- if ((intsize < 1) || (intspec[0] < 1) || (intspec[0] > 3)) {
+ if ((intsize < 1) || (intspec[0] > 3)) {
dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name);
return -EINVAL;
}
@@ -211,13 +243,11 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
{
int cascade_virq;
unsigned long flags;
-
- /* Only setup cascaded IRQ if device tree claims the GPT is
- * an interrupt controller */
- if (!of_find_property(node, "interrupt-controller", NULL))
- return;
+ u32 mode;
cascade_virq = irq_of_parse_and_map(node, 0);
+ if (!cascade_virq)
+ return;
gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1,
&mpc52xx_gpt_irq_ops, -1);
@@ -227,14 +257,16 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
}
gpt->irqhost->host_data = gpt;
-
set_irq_data(cascade_virq, gpt);
set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
- /* Set to Input Capture mode */
+ /* If the GPT is currently disabled, then change it to be in Input
+ * Capture mode. If the mode is non-zero, then the pin could be
+ * already in use for something. */
spin_lock_irqsave(&gpt->lock, flags);
- clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK,
- MPC52xx_GPT_MODE_MS_IC);
+ mode = in_be32(&gpt->regs->mode);
+ if ((mode & MPC52xx_GPT_MODE_MS_MASK) == 0)
+ out_be32(&gpt->regs->mode, mode | MPC52xx_GPT_MODE_MS_IC);
spin_unlock_irqrestore(&gpt->lock, flags);
dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq);
@@ -335,6 +367,354 @@ static void
mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { }
#endif /* defined(CONFIG_GPIOLIB) */
+/***********************************************************************
+ * Timer API
+ */
+
+/**
+ * mpc52xx_gpt_from_irq - Return the GPT device associated with an IRQ number
+ * @irq: irq of timer.
+ */
+struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq)
+{
+ struct mpc52xx_gpt_priv *gpt;
+ struct list_head *pos;
+
+ /* Iterate over the list of timers looking for a matching device */
+ mutex_lock(&mpc52xx_gpt_list_mutex);
+ list_for_each(pos, &mpc52xx_gpt_list) {
+ gpt = container_of(pos, struct mpc52xx_gpt_priv, list);
+ if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) {
+ mutex_unlock(&mpc52xx_gpt_list_mutex);
+ return gpt;
+ }
+ }
+ mutex_unlock(&mpc52xx_gpt_list_mutex);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mpc52xx_gpt_from_irq);
+
+static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period,
+ int continuous, int as_wdt)
+{
+ u32 clear, set;
+ u64 clocks;
+ u32 prescale;
+ unsigned long flags;
+
+ clear = MPC52xx_GPT_MODE_MS_MASK | MPC52xx_GPT_MODE_CONTINUOUS;
+ set = MPC52xx_GPT_MODE_MS_GPIO | MPC52xx_GPT_MODE_COUNTER_ENABLE;
+ if (as_wdt) {
+ clear |= MPC52xx_GPT_MODE_IRQ_EN;
+ set |= MPC52xx_GPT_MODE_WDT_EN;
+ } else if (continuous)
+ set |= MPC52xx_GPT_MODE_CONTINUOUS;
+
+ /* Determine the number of clocks in the requested period. 64 bit
+ * arithmatic is done here to preserve the precision until the value
+ * is scaled back down into the u32 range. Period is in 'ns', bus
+ * frequency is in Hz. */
+ clocks = period * (u64)gpt->ipb_freq;
+ do_div(clocks, 1000000000); /* Scale it down to ns range */
+
+ /* This device cannot handle a clock count greater than 32 bits */
+ if (clocks > 0xffffffff)
+ return -EINVAL;
+
+ /* Calculate the prescaler and count values from the clocks value.
+ * 'clocks' is the number of clock ticks in the period. The timer
+ * has 16 bit precision and a 16 bit prescaler. Prescaler is
+ * calculated by integer dividing the clocks by 0x10000 (shifting
+ * down 16 bits) to obtain the smallest possible divisor for clocks
+ * to get a 16 bit count value.
+ *
+ * Note: the prescale register is '1' based, not '0' based. ie. a
+ * value of '1' means divide the clock by one. 0xffff divides the
+ * clock by 0xffff. '0x0000' does not divide by zero, but wraps
+ * around and divides by 0x10000. That is why prescale must be
+ * a u32 variable, not a u16, for this calculation. */
+ prescale = (clocks >> 16) + 1;
+ do_div(clocks, prescale);
+ if (clocks > 0xffff) {
+ pr_err("calculation error; prescale:%x clocks:%llx\n",
+ prescale, clocks);
+ return -EINVAL;
+ }
+
+ /* Set and enable the timer, reject an attempt to use a wdt as gpt */
+ spin_lock_irqsave(&gpt->lock, flags);
+ if (as_wdt)
+ gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
+ else if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
+ spin_unlock_irqrestore(&gpt->lock, flags);
+ return -EBUSY;
+ }
+ out_be32(&gpt->regs->count, prescale << 16 | clocks);
+ clrsetbits_be32(&gpt->regs->mode, clear, set);
+ spin_unlock_irqrestore(&gpt->lock, flags);
+
+ return 0;
+}
+
+/**
+ * mpc52xx_gpt_start_timer - Set and enable the GPT timer
+ * @gpt: Pointer to gpt private data structure
+ * @period: period of timer in ns; max. ~130s @ 33MHz IPB clock
+ * @continuous: set to 1 to make timer continuous free running
+ *
+ * An interrupt will be generated every time the timer fires
+ */
+int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
+ int continuous)
+{
+ return mpc52xx_gpt_do_start(gpt, period, continuous, 0);
+}
+EXPORT_SYMBOL(mpc52xx_gpt_start_timer);
+
+/**
+ * mpc52xx_gpt_stop_timer - Stop a gpt
+ * @gpt: Pointer to gpt private data structure
+ *
+ * Returns an error if attempting to stop a wdt
+ */
+int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt)
+{
+ unsigned long flags;
+
+ /* reject the operation if the timer is used as watchdog (gpt 0 only) */
+ spin_lock_irqsave(&gpt->lock, flags);
+ if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
+ spin_unlock_irqrestore(&gpt->lock, flags);
+ return -EBUSY;
+ }
+
+ clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_COUNTER_ENABLE);
+ spin_unlock_irqrestore(&gpt->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(mpc52xx_gpt_stop_timer);
+
+/**
+ * mpc52xx_gpt_timer_period - Read the timer period
+ * @gpt: Pointer to gpt private data structure
+ *
+ * Returns the timer period in ns
+ */
+u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt)
+{
+ u64 period;
+ u64 prescale;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpt->lock, flags);
+ period = in_be32(&gpt->regs->count);
+ spin_unlock_irqrestore(&gpt->lock, flags);
+
+ prescale = period >> 16;
+ period &= 0xffff;
+ if (prescale == 0)
+ prescale = 0x10000;
+ period = period * prescale * 1000000000ULL;
+ do_div(period, (u64)gpt->ipb_freq);
+ return period;
+}
+EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
+
+#if defined(CONFIG_MPC5200_WDT)
+/***********************************************************************
+ * Watchdog API for gpt0
+ */
+
+#define WDT_IDENTITY "mpc52xx watchdog on GPT0"
+
+/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
+static unsigned long wdt_is_active;
+
+/* wdt-capable gpt */
+static struct mpc52xx_gpt_priv *mpc52xx_gpt_wdt;
+
+/* low-level wdt functions */
+static inline void mpc52xx_gpt_wdt_ping(struct mpc52xx_gpt_priv *gpt_wdt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpt_wdt->lock, flags);
+ out_8((u8 *) &gpt_wdt->regs->mode, MPC52xx_GPT_MODE_WDT_PING);
+ spin_unlock_irqrestore(&gpt_wdt->lock, flags);
+}
+
+/* wdt misc device api */
+static ssize_t mpc52xx_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
+ mpc52xx_gpt_wdt_ping(gpt_wdt);
+ return 0;
+}
+
+static struct watchdog_info mpc5200_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = WDT_IDENTITY,
+};
+
+static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
+ int __user *data = (int __user *)arg;
+ int timeout;
+ u64 real_timeout;
+ int ret = 0;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user(data, &mpc5200_wdt_info,
+ sizeof(mpc5200_wdt_info));
+ if (ret)
+ ret = -EFAULT;
+ break;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, data);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ mpc52xx_gpt_wdt_ping(gpt_wdt);
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ ret = get_user(timeout, data);
+ if (ret)
+ break;
+ real_timeout = (u64) timeout * 1000000000ULL;
+ ret = mpc52xx_gpt_do_start(gpt_wdt, real_timeout, 0, 1);
+ if (ret)
+ break;
+ /* fall through and return the timeout */
+
+ case WDIOC_GETTIMEOUT:
+ /* we need to round here as to avoid e.g. the following
+ * situation:
+ * - timeout requested is 1 second;
+ * - real timeout @33MHz is 999997090ns
+ * - the int divide by 10^9 will return 0.
+ */
+ real_timeout =
+ mpc52xx_gpt_timer_period(gpt_wdt) + 500000000ULL;
+ do_div(real_timeout, 1000000000ULL);
+ timeout = (int) real_timeout;
+ ret = put_user(timeout, data);
+ break;
+
+ default:
+ ret = -ENOTTY;
+ }
+ return ret;
+}
+
+static int mpc52xx_wdt_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ /* sanity check */
+ if (!mpc52xx_gpt_wdt)
+ return -ENODEV;
+
+ /* /dev/watchdog can only be opened once */
+ if (test_and_set_bit(0, &wdt_is_active))
+ return -EBUSY;
+
+ /* Set and activate the watchdog with 30 seconds timeout */
+ ret = mpc52xx_gpt_do_start(mpc52xx_gpt_wdt, 30ULL * 1000000000ULL,
+ 0, 1);
+ if (ret) {
+ clear_bit(0, &wdt_is_active);
+ return ret;
+ }
+
+ file->private_data = mpc52xx_gpt_wdt;
+ return nonseekable_open(inode, file);
+}
+
+static int mpc52xx_wdt_release(struct inode *inode, struct file *file)
+{
+ /* note: releasing the wdt in NOWAYOUT-mode does not stop it */
+#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
+ struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpt_wdt->lock, flags);
+ clrbits32(&gpt_wdt->regs->mode,
+ MPC52xx_GPT_MODE_COUNTER_ENABLE | MPC52xx_GPT_MODE_WDT_EN);
+ gpt_wdt->wdt_mode &= ~MPC52xx_GPT_IS_WDT;
+ spin_unlock_irqrestore(&gpt_wdt->lock, flags);
+#endif
+ clear_bit(0, &wdt_is_active);
+ return 0;
+}
+
+
+static const struct file_operations mpc52xx_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = mpc52xx_wdt_write,
+ .unlocked_ioctl = mpc52xx_wdt_ioctl,
+ .open = mpc52xx_wdt_open,
+ .release = mpc52xx_wdt_release,
+};
+
+static struct miscdevice mpc52xx_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &mpc52xx_wdt_fops,
+};
+
+static int __devinit mpc52xx_gpt_wdt_init(void)
+{
+ int err;
+
+ /* try to register the watchdog misc device */
+ err = misc_register(&mpc52xx_wdt_miscdev);
+ if (err)
+ pr_err("%s: cannot register watchdog device\n", WDT_IDENTITY);
+ else
+ pr_info("%s: watchdog device registered\n", WDT_IDENTITY);
+ return err;
+}
+
+static int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt,
+ const u32 *period)
+{
+ u64 real_timeout;
+
+ /* remember the gpt for the wdt operation */
+ mpc52xx_gpt_wdt = gpt;
+
+ /* configure the wdt if the device tree contained a timeout */
+ if (!period || *period == 0)
+ return 0;
+
+ real_timeout = (u64) *period * 1000000000ULL;
+ if (mpc52xx_gpt_do_start(gpt, real_timeout, 0, 1))
+ dev_warn(gpt->dev, "starting as wdt failed\n");
+ else
+ dev_info(gpt->dev, "watchdog set to %us timeout\n", *period);
+ return 0;
+}
+
+#else
+
+static int __devinit mpc52xx_gpt_wdt_init(void)
+{
+ return 0;
+}
+
+#define mpc52xx_gpt_wdt_setup(x, y) (0)
+
+#endif /* CONFIG_MPC5200_WDT */
+
/* ---------------------------------------------------------------------
* of_platform bus binding code
*/
@@ -349,6 +729,7 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
spin_lock_init(&gpt->lock);
gpt->dev = &ofdev->dev;
+ gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->node);
gpt->regs = of_iomap(ofdev->node, 0);
if (!gpt->regs) {
kfree(gpt);
@@ -360,6 +741,26 @@ static int __devinit mpc52xx_gpt_probe(struct of_device *ofdev,
mpc52xx_gpt_gpio_setup(gpt, ofdev->node);
mpc52xx_gpt_irq_setup(gpt, ofdev->node);
+ mutex_lock(&mpc52xx_gpt_list_mutex);
+ list_add(&gpt->list, &mpc52xx_gpt_list);
+ mutex_unlock(&mpc52xx_gpt_list_mutex);
+
+ /* check if this device could be a watchdog */
+ if (of_get_property(ofdev->node, "fsl,has-wdt", NULL) ||
+ of_get_property(ofdev->node, "has-wdt", NULL)) {
+ const u32 *on_boot_wdt;
+
+ gpt->wdt_mode = MPC52xx_GPT_CAN_WDT;
+ on_boot_wdt = of_get_property(ofdev->node, "fsl,wdt-on-boot",
+ NULL);
+ if (on_boot_wdt) {
+ dev_info(gpt->dev, "used as watchdog\n");
+ gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
+ } else
+ dev_info(gpt->dev, "can function as watchdog\n");
+ mpc52xx_gpt_wdt_setup(gpt, on_boot_wdt);
+ }
+
return 0;
}
@@ -394,3 +795,4 @@ static int __init mpc52xx_gpt_init(void)
/* Make sure GPIOs and IRQs get set up before anyone tries to use them */
subsys_initcall(mpc52xx_gpt_init);
+device_initcall(mpc52xx_gpt_wdt_init);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
new file mode 100644
index 00000000000..929d017535a
--- /dev/null
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -0,0 +1,560 @@
+/*
+ * LocalPlus Bus FIFO driver for the Freescale MPC52xx.
+ *
+ * Copyright (C) 2009 Secret Lab Technologies Ltd.
+ *
+ * This file is released under the GPLv2
+ *
+ * Todo:
+ * - Add support for multiple requests to be queued.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/mpc52xx.h>
+#include <asm/time.h>
+
+#include <sysdev/bestcomm/bestcomm.h>
+#include <sysdev/bestcomm/bestcomm_priv.h>
+#include <sysdev/bestcomm/gen_bd.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
+MODULE_LICENSE("GPL");
+
+#define LPBFIFO_REG_PACKET_SIZE (0x00)
+#define LPBFIFO_REG_START_ADDRESS (0x04)
+#define LPBFIFO_REG_CONTROL (0x08)
+#define LPBFIFO_REG_ENABLE (0x0C)
+#define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
+#define LPBFIFO_REG_FIFO_DATA (0x40)
+#define LPBFIFO_REG_FIFO_STATUS (0x44)
+#define LPBFIFO_REG_FIFO_CONTROL (0x48)
+#define LPBFIFO_REG_FIFO_ALARM (0x4C)
+
+struct mpc52xx_lpbfifo {
+ struct device *dev;
+ phys_addr_t regs_phys;
+ void __iomem *regs;
+ int irq;
+ spinlock_t lock;
+
+ struct bcom_task *bcom_tx_task;
+ struct bcom_task *bcom_rx_task;
+ struct bcom_task *bcom_cur_task;
+
+ /* Current state data */
+ struct mpc52xx_lpbfifo_request *req;
+ int dma_irqs_enabled;
+};
+
+/* The MPC5200 has only one fifo, so only need one instance structure */
+static struct mpc52xx_lpbfifo lpbfifo;
+
+/**
+ * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered
+ */
+static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
+{
+ size_t transfer_size = req->size - req->pos;
+ struct bcom_bd *bd;
+ void __iomem *reg;
+ u32 *data;
+ int i;
+ int bit_fields;
+ int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
+ int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
+ int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
+
+ /* Set and clear the reset bits; is good practice in User Manual */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+
+ /* set master enable bit */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
+ if (!dma) {
+ /* While the FIFO can be setup for transfer sizes as large as
+ * 16M-1, the FIFO itself is only 512 bytes deep and it does
+ * not generate interrupts for FIFO full events (only transfer
+ * complete will raise an IRQ). Therefore when not using
+ * Bestcomm to drive the FIFO it needs to either be polled, or
+ * transfers need to constrained to the size of the fifo.
+ *
+ * This driver restricts the size of the transfer
+ */
+ if (transfer_size > 512)
+ transfer_size = 512;
+
+ /* Load the FIFO with data */
+ if (write) {
+ reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
+ data = req->data + req->pos;
+ for (i = 0; i < transfer_size; i += 4)
+ out_be32(reg, *data++);
+ }
+
+ /* Unmask both error and completion irqs */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
+ } else {
+ /* Choose the correct direction
+ *
+ * Configure the watermarks so DMA will always complete correctly.
+ * It may be worth experimenting with the ALARM value to see if
+ * there is a performance impacit. However, if it is wrong there
+ * is a risk of DMA not transferring the last chunk of data
+ */
+ if (write) {
+ out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
+ out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
+ lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
+ } else {
+ out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
+ out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
+ lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
+
+ if (poll_dma) {
+ if (lpbfifo.dma_irqs_enabled) {
+ disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
+ lpbfifo.dma_irqs_enabled = 0;
+ }
+ } else {
+ if (!lpbfifo.dma_irqs_enabled) {
+ enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
+ lpbfifo.dma_irqs_enabled = 1;
+ }
+ }
+ }
+
+ bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
+ bd->status = transfer_size;
+ if (!write) {
+ /*
+ * In the DMA read case, the DMA doesn't complete,
+ * possibly due to incorrect watermarks in the ALARM
+ * and CONTROL regs. For now instead of trying to
+ * determine the right watermarks that will make this
+ * work, just increase the number of bytes the FIFO is
+ * expecting.
+ *
+ * When submitting another operation, the FIFO will get
+ * reset, so the condition of the FIFO waiting for a
+ * non-existent 4 bytes will get cleared.
+ */
+ transfer_size += 4; /* BLECH! */
+ }
+ bd->data[0] = req->data_phys + req->pos;
+ bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
+
+ /* error irq & master enabled bit */
+ bit_fields = 0x00000201;
+
+ /* Unmask irqs */
+ if (write && (!poll_dma))
+ bit_fields |= 0x00000100; /* completion irq too */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
+ }
+
+ /* Set transfer size, width, chip select and READ mode */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
+ req->offset + req->pos);
+ out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
+
+ bit_fields = req->cs << 24 | 0x000008;
+ if (!write)
+ bit_fields |= 0x010000; /* read mode */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
+
+ /* Kick it off */
+ out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
+ if (dma)
+ bcom_enable(lpbfifo.bcom_cur_task);
+}
+
+/**
+ * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
+ *
+ * On transmit, the dma completion irq triggers before the fifo completion
+ * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
+ * task completion irq becuase everyting is not really done until the LPB FIFO
+ * completion irq triggers.
+ *
+ * In other words:
+ * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
+ * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
+ * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
+ *
+ * Reasons for entering this routine:
+ * 1) PIO mode rx and tx completion irq
+ * 2) DMA interrupt mode tx completion irq
+ * 3) DMA polled mode tx
+ *
+ * Exit conditions:
+ * 1) Transfer aborted
+ * 2) FIFO complete without DMA; more data to do
+ * 3) FIFO complete without DMA; all data transfered
+ * 4) FIFO complete using DMA
+ *
+ * Condition 1 can occur regardless of whether or not DMA is used.
+ * It requires executing the callback to report the error and exiting
+ * immediately.
+ *
+ * Condition 2 requires programming the FIFO with the next block of data
+ *
+ * Condition 3 requires executing the callback to report completion
+ *
+ * Condition 4 means the same as 3, except that we also retrieve the bcom
+ * buffer so DMA doesn't get clogged up.
+ *
+ * To make things trickier, the spinlock must be dropped before
+ * executing the callback, otherwise we could end up with a deadlock
+ * or nested spinlock condition. The out path is non-trivial, so
+ * extra fiddling is done to make sure all paths lead to the same
+ * outbound code.
+ */
+static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
+{
+ struct mpc52xx_lpbfifo_request *req;
+ u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
+ void __iomem *reg;
+ u32 *data;
+ int count, i;
+ int do_callback = 0;
+ u32 ts;
+ unsigned long flags;
+ int dma, write, poll_dma;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+ ts = get_tbl();
+
+ req = lpbfifo.req;
+ if (!req) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ pr_err("bogus LPBFIFO IRQ\n");
+ return IRQ_HANDLED;
+ }
+
+ dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
+ write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
+ poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
+
+ if (dma && !write) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ pr_err("bogus LPBFIFO IRQ (dma and not writting)\n");
+ return IRQ_HANDLED;
+ }
+
+ if ((status & 0x01) == 0) {
+ goto out;
+ }
+
+ /* check abort bit */
+ if (status & 0x10) {
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+ do_callback = 1;
+ goto out;
+ }
+
+ /* Read result from hardware */
+ count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
+ count &= 0x00ffffff;
+
+ if (!dma && !write) {
+ /* copy the data out of the FIFO */
+ reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
+ data = req->data + req->pos;
+ for (i = 0; i < count; i += 4)
+ *data++ = in_be32(reg);
+ }
+
+ /* Update transfer position and count */
+ req->pos += count;
+
+ /* Decide what to do next */
+ if (req->size - req->pos)
+ mpc52xx_lpbfifo_kick(req); /* more work to do */
+ else
+ do_callback = 1;
+
+ out:
+ /* Clear the IRQ */
+ out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
+
+ if (dma && (status & 0x11)) {
+ /*
+ * Count the DMA as complete only when the FIFO completion
+ * status or abort bits are set.
+ *
+ * (status & 0x01) should always be the case except sometimes
+ * when using polled DMA.
+ *
+ * (status & 0x10) {transfer aborted}: This case needs more
+ * testing.
+ */
+ bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
+ }
+ req->last_byte = ((u8 *)req->data)[req->size - 1];
+
+ /* When the do_callback flag is set; it means the transfer is finished
+ * so set the FIFO as idle */
+ if (do_callback)
+ lpbfifo.req = NULL;
+
+ if (irq != 0) /* don't increment on polled case */
+ req->irq_count++;
+
+ req->irq_ticks += get_tbl() - ts;
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+
+ /* Spinlock is released; it is now safe to call the callback */
+ if (do_callback && req->callback)
+ req->callback(req);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
+ *
+ * Only used when receiving data.
+ */
+static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
+{
+ struct mpc52xx_lpbfifo_request *req;
+ unsigned long flags;
+ u32 status;
+ u32 ts;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+ ts = get_tbl();
+
+ req = lpbfifo.req;
+ if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (irq != 0) /* don't increment on polled case */
+ req->irq_count++;
+
+ if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+
+ req->buffer_not_done_cnt++;
+ if ((req->buffer_not_done_cnt % 1000) == 0)
+ pr_err("transfer stalled\n");
+
+ return IRQ_HANDLED;
+ }
+
+ bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
+
+ req->last_byte = ((u8 *)req->data)[req->size - 1];
+
+ req->pos = status & 0x00ffffff;
+
+ /* Mark the FIFO as idle */
+ lpbfifo.req = NULL;
+
+ /* Release the lock before calling out to the callback. */
+ req->irq_ticks += get_tbl() - ts;
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+
+ if (req->callback)
+ req->callback(req);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion
+ */
+void mpc52xx_lpbfifo_poll(void)
+{
+ struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
+ int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
+ int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
+
+ /*
+ * For more information, see comments on the "Fat Lady"
+ */
+ if (dma && write)
+ mpc52xx_lpbfifo_irq(0, NULL);
+ else
+ mpc52xx_lpbfifo_bcom_irq(0, NULL);
+}
+EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
+
+/**
+ * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
+ * @req: Pointer to request structure
+ */
+int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
+{
+ unsigned long flags;
+
+ if (!lpbfifo.regs)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+
+ /* If the req pointer is already set, then a transfer is in progress */
+ if (lpbfifo.req) {
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ return -EBUSY;
+ }
+
+ /* Setup the transfer */
+ lpbfifo.req = req;
+ req->irq_count = 0;
+ req->irq_ticks = 0;
+ req->buffer_not_done_cnt = 0;
+ req->pos = 0;
+
+ mpc52xx_lpbfifo_kick(req);
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
+
+void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lpbfifo.lock, flags);
+ if (lpbfifo.req == req) {
+ /* Put it into reset and clear the state */
+ bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
+ bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+ lpbfifo.req = NULL;
+ }
+ spin_unlock_irqrestore(&lpbfifo.lock, flags);
+}
+EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
+
+static int __devinit
+mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
+{
+ struct resource res;
+ int rc = -ENOMEM;
+
+ if (lpbfifo.dev != NULL)
+ return -ENOSPC;
+
+ lpbfifo.irq = irq_of_parse_and_map(op->node, 0);
+ if (!lpbfifo.irq)
+ return -ENODEV;
+
+ if (of_address_to_resource(op->node, 0, &res))
+ return -ENODEV;
+ lpbfifo.regs_phys = res.start;
+ lpbfifo.regs = of_iomap(op->node, 0);
+ if (!lpbfifo.regs)
+ return -ENOMEM;
+
+ spin_lock_init(&lpbfifo.lock);
+
+ /* Put FIFO into reset */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+
+ /* Register the interrupt handler */
+ rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
+ "mpc52xx-lpbfifo", &lpbfifo);
+ if (rc)
+ goto err_irq;
+
+ /* Request the Bestcomm receive (fifo --> memory) task and IRQ */
+ lpbfifo.bcom_rx_task =
+ bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
+ BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
+ 16*1024*1024);
+ if (!lpbfifo.bcom_rx_task)
+ goto err_bcom_rx;
+
+ rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
+ mpc52xx_lpbfifo_bcom_irq, 0,
+ "mpc52xx-lpbfifo-rx", &lpbfifo);
+ if (rc)
+ goto err_bcom_rx_irq;
+
+ /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
+ lpbfifo.bcom_tx_task =
+ bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
+ BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
+ if (!lpbfifo.bcom_tx_task)
+ goto err_bcom_tx;
+
+ lpbfifo.dev = &op->dev;
+ return 0;
+
+ err_bcom_tx:
+ free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
+ err_bcom_rx_irq:
+ bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
+ err_bcom_rx:
+ err_irq:
+ iounmap(lpbfifo.regs);
+ lpbfifo.regs = NULL;
+
+ dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
+ return -ENODEV;
+}
+
+
+static int __devexit mpc52xx_lpbfifo_remove(struct of_device *op)
+{
+ if (lpbfifo.dev != &op->dev)
+ return 0;
+
+ /* Put FIFO in reset */
+ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
+
+ /* Release the bestcomm transmit task */
+ free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
+ bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
+
+ /* Release the bestcomm receive task */
+ free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
+ bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
+
+ free_irq(lpbfifo.irq, &lpbfifo);
+ iounmap(lpbfifo.regs);
+ lpbfifo.regs = NULL;
+ lpbfifo.dev = NULL;
+
+ return 0;
+}
+
+static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = {
+ { .compatible = "fsl,mpc5200-lpbfifo", },
+ {},
+};
+
+static struct of_platform_driver mpc52xx_lpbfifo_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc52xx-lpbfifo",
+ .match_table = mpc52xx_lpbfifo_match,
+ .probe = mpc52xx_lpbfifo_probe,
+ .remove = __devexit_p(mpc52xx_lpbfifo_remove),
+};
+
+/***********************************************************************
+ * Module init/exit
+ */
+static int __init mpc52xx_lpbfifo_init(void)
+{
+ pr_debug("Registering LocalPlus bus FIFO driver\n");
+ return of_register_platform_driver(&mpc52xx_lpbfifo_driver);
+}
+module_init(mpc52xx_lpbfifo_init);
+
+static void __exit mpc52xx_lpbfifo_exit(void)
+{
+ pr_debug("Unregistering LocalPlus bus FIFO driver\n");
+ of_unregister_platform_driver(&mpc52xx_lpbfifo_driver);
+}
+module_exit(mpc52xx_lpbfifo_exit);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 480f806fd0a..4bf4bf7b063 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -220,7 +220,7 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
}
static struct irq_chip mpc52xx_extirq_irqchip = {
- .typename = "MPC52xx External",
+ .name = "MPC52xx External",
.mask = mpc52xx_extirq_mask,
.unmask = mpc52xx_extirq_unmask,
.ack = mpc52xx_extirq_ack,
@@ -258,7 +258,7 @@ static void mpc52xx_main_unmask(unsigned int virq)
}
static struct irq_chip mpc52xx_main_irqchip = {
- .typename = "MPC52xx Main",
+ .name = "MPC52xx Main",
.mask = mpc52xx_main_mask,
.mask_ack = mpc52xx_main_mask,
.unmask = mpc52xx_main_unmask,
@@ -291,7 +291,7 @@ static void mpc52xx_periph_unmask(unsigned int virq)
}
static struct irq_chip mpc52xx_periph_irqchip = {
- .typename = "MPC52xx Peripherals",
+ .name = "MPC52xx Peripherals",
.mask = mpc52xx_periph_mask,
.mask_ack = mpc52xx_periph_mask,
.unmask = mpc52xx_periph_unmask,
@@ -335,7 +335,7 @@ static void mpc52xx_sdma_ack(unsigned int virq)
}
static struct irq_chip mpc52xx_sdma_irqchip = {
- .typename = "MPC52xx SDMA",
+ .name = "MPC52xx SDMA",
.mask = mpc52xx_sdma_mask,
.unmask = mpc52xx_sdma_unmask,
.ack = mpc52xx_sdma_ack,
@@ -355,7 +355,7 @@ static int mpc52xx_is_extirq(int l1, int l2)
* mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
*/
static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 7ee979f323d..9d962d7c72c 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -69,7 +69,6 @@ static void pq2ads_pci_unmask_irq(unsigned int virq)
}
static struct irq_chip pq2ads_pci_ic = {
- .typename = "PQ2 ADS PCI",
.name = "PQ2 ADS PCI",
.end = pq2ads_pci_unmask_irq,
.mask = pq2ads_pci_mask_irq,
@@ -107,7 +106,7 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_data(virq, h->host_data);
set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq);
return 0;
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 567ded7c3b9..17f99745f0e 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -74,7 +74,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
prop = of_get_property(np, "mode", NULL);
if (prop && !strcmp(prop, "cpu-qe"))
- pdata.qe_mode = 1;
+ pdata.flags = SPI_QE_CPU_MODE;
for (j = 0; j < num_board_infos; j++) {
if (board_infos[j].bus_num == pdata.bus_num)
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 08e65fc8b98..43805348b81 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -32,6 +32,7 @@
#define PMCCR1_NEXT_STATE 0x0C /* Next state for power management */
#define PMCCR1_NEXT_STATE_SHIFT 2
#define PMCCR1_CURR_STATE 0x03 /* Current state for power management*/
+#define IMMR_SYSCR_OFFSET 0x100
#define IMMR_RCW_OFFSET 0x900
#define RCW_PCI_HOST 0x80000000
@@ -78,6 +79,22 @@ struct mpc83xx_clock {
u32 sccr;
};
+struct mpc83xx_syscr {
+ __be32 sgprl;
+ __be32 sgprh;
+ __be32 spridr;
+ __be32 :32;
+ __be32 spcr;
+ __be32 sicrl;
+ __be32 sicrh;
+};
+
+struct mpc83xx_saved {
+ u32 sicrl;
+ u32 sicrh;
+ u32 sccr;
+};
+
struct pmc_type {
int has_deep_sleep;
};
@@ -87,6 +104,8 @@ static int has_deep_sleep, deep_sleeping;
static int pmc_irq;
static struct mpc83xx_pmc __iomem *pmc_regs;
static struct mpc83xx_clock __iomem *clock_regs;
+static struct mpc83xx_syscr __iomem *syscr_regs;
+static struct mpc83xx_saved saved_regs;
static int is_pci_agent, wake_from_pci;
static phys_addr_t immrbase;
static int pci_pm_state;
@@ -96,6 +115,7 @@ int fsl_deep_sleep(void)
{
return deep_sleeping;
}
+EXPORT_SYMBOL(fsl_deep_sleep);
static int mpc83xx_change_state(void)
{
@@ -136,6 +156,20 @@ static irqreturn_t pmc_irq_handler(int irq, void *dev_id)
return ret;
}
+static void mpc83xx_suspend_restore_regs(void)
+{
+ out_be32(&syscr_regs->sicrl, saved_regs.sicrl);
+ out_be32(&syscr_regs->sicrh, saved_regs.sicrh);
+ out_be32(&clock_regs->sccr, saved_regs.sccr);
+}
+
+static void mpc83xx_suspend_save_regs(void)
+{
+ saved_regs.sicrl = in_be32(&syscr_regs->sicrl);
+ saved_regs.sicrh = in_be32(&syscr_regs->sicrh);
+ saved_regs.sccr = in_be32(&clock_regs->sccr);
+}
+
static int mpc83xx_suspend_enter(suspend_state_t state)
{
int ret = -EAGAIN;
@@ -165,6 +199,8 @@ static int mpc83xx_suspend_enter(suspend_state_t state)
*/
if (deep_sleeping) {
+ mpc83xx_suspend_save_regs();
+
out_be32(&pmc_regs->mask, PMCER_ALL);
out_be32(&pmc_regs->config1,
@@ -178,6 +214,8 @@ static int mpc83xx_suspend_enter(suspend_state_t state)
in_be32(&pmc_regs->config1) & ~PMCCR1_POWER_OFF);
out_be32(&pmc_regs->mask, PMCER_PMCI);
+
+ mpc83xx_suspend_restore_regs();
} else {
out_be32(&pmc_regs->mask, PMCER_PMCI);
@@ -193,7 +231,7 @@ out:
return ret;
}
-static void mpc83xx_suspend_finish(void)
+static void mpc83xx_suspend_end(void)
{
deep_sleeping = 0;
}
@@ -277,7 +315,7 @@ static struct platform_suspend_ops mpc83xx_suspend_ops = {
.valid = mpc83xx_suspend_valid,
.begin = mpc83xx_suspend_begin,
.enter = mpc83xx_suspend_enter,
- .finish = mpc83xx_suspend_finish,
+ .end = mpc83xx_suspend_end,
};
static int pmc_probe(struct of_device *ofdev,
@@ -332,12 +370,23 @@ static int pmc_probe(struct of_device *ofdev,
goto out_pmc;
}
+ if (has_deep_sleep) {
+ syscr_regs = ioremap(immrbase + IMMR_SYSCR_OFFSET,
+ sizeof(*syscr_regs));
+ if (!syscr_regs) {
+ ret = -ENOMEM;
+ goto out_syscr;
+ }
+ }
+
if (is_pci_agent)
mpc83xx_set_agent();
suspend_set_ops(&mpc83xx_suspend_ops);
return 0;
+out_syscr:
+ iounmap(clock_regs);
out_pmc:
iounmap(pmc_regs);
out:
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index d3a975e8fd3..d95121894eb 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -1,6 +1,7 @@
-menuconfig MPC85xx
- bool "Machine Type"
- depends on PPC_85xx
+menuconfig FSL_SOC_BOOKE
+ bool "Freescale Book-E Machine Type"
+ depends on PPC_85xx || PPC_BOOK3E
+ select FSL_SOC
select PPC_UDBG_16550
select MPIC
select PPC_PCI_CHOICE
@@ -8,7 +9,7 @@ menuconfig MPC85xx
select SERIAL_8250_SHARE_IRQ if SERIAL_8250
default y
-if MPC85xx
+if FSL_SOC_BOOKE
config MPC8540_ADS
bool "Freescale MPC8540 ADS"
@@ -144,7 +145,19 @@ config SBC8560
help
This option enables support for the Wind River SBC8560 board
-endif # MPC85xx
+config P4080_DS
+ bool "Freescale P4080 DS"
+ select DEFAULT_UIMAGE
+ select PPC_FSL_BOOK3E
+ select PPC_E500MC
+ select PHYS_64BIT
+ select SWIOTLB
+ select MPC8xxx_GPIO
+ select HAS_RAPIDIO
+ help
+ This option enables support for the P4080 DS board
+
+endif # FSL_SOC_BOOKE
config TQM85xx
bool
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 9098aea0cf3..387c128f2c8 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_MPC8536_DS) += mpc8536_ds.o
obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o
obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o
obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
+obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
obj-$(CONFIG_SBC8560) += sbc8560.o
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
new file mode 100644
index 00000000000..534c2ecc89d
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -0,0 +1,125 @@
+/*
+ * Corenet based SoC DS Setup
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * Copyright 2009 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/lmb.h>
+
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+
+void __init corenet_ds_pic_init(void)
+{
+ struct mpic *mpic;
+ struct resource r;
+ struct device_node *np = NULL;
+ unsigned int flags = MPIC_PRIMARY | MPIC_BIG_ENDIAN |
+ MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU;
+
+ np = of_find_node_by_type(np, "open-pic");
+
+ if (np == NULL) {
+ printk(KERN_ERR "Could not find open-pic node\n");
+ return;
+ }
+
+ if (of_address_to_resource(np, 0, &r)) {
+ printk(KERN_ERR "Failed to map mpic register space\n");
+ of_node_put(np);
+ return;
+ }
+
+ if (ppc_md.get_irq == mpic_get_coreint_irq)
+ flags |= MPIC_ENABLE_COREINT;
+
+ mpic = mpic_alloc(np, r.start, flags, 0, 256, " OpenPIC ");
+ BUG_ON(mpic == NULL);
+
+ mpic_init(mpic);
+}
+
+#ifdef CONFIG_PCI
+static int primary_phb_addr;
+#endif
+
+/*
+ * Setup the architecture
+ */
+#ifdef CONFIG_SMP
+void __init mpc85xx_smp_init(void);
+#endif
+
+void __init corenet_ds_setup_arch(void)
+{
+#ifdef CONFIG_PCI
+ struct device_node *np;
+ struct pci_controller *hose;
+#endif
+ dma_addr_t max = 0xffffffff;
+
+#ifdef CONFIG_SMP
+ mpc85xx_smp_init();
+#endif
+
+#ifdef CONFIG_PCI
+ for_each_compatible_node(np, "pci", "fsl,p4080-pcie") {
+ struct resource rsrc;
+ of_address_to_resource(np, 0, &rsrc);
+ if ((rsrc.start & 0xfffff) == primary_phb_addr)
+ fsl_add_bridge(np, 1);
+ else
+ fsl_add_bridge(np, 0);
+
+ hose = pci_find_hose_for_OF_device(np);
+ max = min(max, hose->dma_window_base_cur +
+ hose->dma_window_size);
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (lmb_end_of_DRAM() > max) {
+ ppc_swiotlb_enable = 1;
+ set_pci_dma_ops(&swiotlb_dma_ops);
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
+ }
+#endif
+ pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
+}
+
+static const struct of_device_id of_device_ids[] __devinitconst = {
+ {
+ .compatible = "simple-bus"
+ },
+ {
+ .compatible = "fsl,rapidio-delta",
+ },
+ {}
+};
+
+int __init corenet_ds_publish_devices(void)
+{
+ return of_platform_bus_probe(NULL, of_device_ids, NULL);
+}
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.h b/arch/powerpc/platforms/85xx/corenet_ds.h
new file mode 100644
index 00000000000..ddd700b2303
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/corenet_ds.h
@@ -0,0 +1,19 @@
+/*
+ * Corenet based SoC DS Setup
+ *
+ * Copyright 2009 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef CORENET_DS_H
+#define CORENET_DS_H
+
+extern void __init corenet_ds_pic_init(void);
+extern void __init corenet_ds_setup_arch(void);
+extern int __init corenet_ds_publish_devices(void);
+
+#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 3909d57b86e..21f61b8c445 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -86,7 +86,7 @@ static int mpc8568_fixup_125_clock(struct phy_device *phydev)
scr = phy_read(phydev, MV88E1111_SCR);
if (scr < 0)
- return err;
+ return scr;
err = phy_write(phydev, MV88E1111_SCR, scr | 0x0008);
@@ -301,6 +301,7 @@ static struct of_device_id mpc85xx_ids[] = {
{ .compatible = "fsl,qe", },
{ .compatible = "gianfar", },
{ .compatible = "fsl,rapidio-delta", },
+ { .compatible = "fsl,mpc8548-guts", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index c8468de4acf..088f30b0c08 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -44,6 +44,7 @@ void __init mpc85xx_rdb_pic_init(void)
struct mpic *mpic;
struct resource r;
struct device_node *np;
+ unsigned long root = of_get_flat_dt_root();
np = of_find_node_by_type(NULL, "open-pic");
if (np == NULL) {
@@ -57,11 +58,18 @@ void __init mpc85xx_rdb_pic_init(void)
return;
}
- mpic = mpic_alloc(np, r.start,
+ if (of_flat_dt_is_compatible(root, "fsl,85XXRDB-CAMP")) {
+ mpic = mpic_alloc(np, r.start,
+ MPIC_PRIMARY |
+ MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
+ 0, 256, " OpenPIC ");
+ } else {
+ mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET |
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
+ }
BUG_ON(mpic == NULL);
of_node_put(np);
@@ -113,6 +121,7 @@ static int __init mpc85xxrdb_publish_devices(void)
return of_platform_bus_probe(NULL, mpc85xxrdb_ids, NULL);
}
machine_device_initcall(p2020_rdb, mpc85xxrdb_publish_devices);
+machine_device_initcall(p1020_rdb, mpc85xxrdb_publish_devices);
/*
* Called very early, device-tree isn't unflattened
@@ -126,6 +135,15 @@ static int __init p2020_rdb_probe(void)
return 0;
}
+static int __init p1020_rdb_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "fsl,P1020RDB"))
+ return 1;
+ return 0;
+}
+
define_machine(p2020_rdb) {
.name = "P2020 RDB",
.probe = p2020_rdb_probe,
@@ -139,3 +157,17 @@ define_machine(p2020_rdb) {
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
+
+define_machine(p1020_rdb) {
+ .name = "P1020 RDB",
+ .probe = p1020_rdb_probe,
+ .setup_arch = mpc85xx_rdb_setup_arch,
+ .init_IRQ = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c
new file mode 100644
index 00000000000..84170460497
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p4080_ds.c
@@ -0,0 +1,74 @@
+/*
+ * P4080 DS Setup
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * Copyright 2009 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+
+#include "corenet_ds.h"
+
+#ifdef CONFIG_PCI
+static int primary_phb_addr;
+#endif
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init p4080_ds_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (of_flat_dt_is_compatible(root, "fsl,P4080DS")) {
+#ifdef CONFIG_PCI
+ /* treat PCIe1 as primary,
+ * shouldn't matter as we have no ISA on the board
+ */
+ primary_phb_addr = 0x0000;
+#endif
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+define_machine(p4080_ds) {
+ .name = "P4080 DS",
+ .probe = p4080_ds_probe,
+ .setup_arch = corenet_ds_setup_arch,
+ .init_IRQ = corenet_ds_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_coreint_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
+
+machine_device_initcall(p4080_ds, corenet_ds_publish_devices);
+machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 60edf63d015..e5da5f62b24 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -232,7 +232,7 @@ static int socrates_fpga_pic_set_type(unsigned int virq,
}
static struct irq_chip socrates_fpga_pic_chip = {
- .typename = " FPGA-PIC ",
+ .name = " FPGA-PIC ",
.ack = socrates_fpga_pic_ack,
.mask = socrates_fpga_pic_mask,
.mask_ack = socrates_fpga_pic_mask_ack,
@@ -245,7 +245,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip,
handle_fasteoi_irq);
@@ -253,7 +253,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
}
static int socrates_fpga_pic_host_xlate(struct irq_host *h,
- struct device_node *ct, u32 *intspec, unsigned int intsize,
+ struct device_node *ct, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
struct socrates_fpga_irq_info *fpga_irq = &fpga_irqs[intspec[0]];
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 9c7b64a3402..2bbfd530d6d 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -35,6 +35,7 @@ config MPC8610_HPCD
config GEF_PPC9A
bool "GE Fanuc PPC9A"
select DEFAULT_UIMAGE
+ select MMIO_NVRAM
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
@@ -43,6 +44,7 @@ config GEF_PPC9A
config GEF_SBC310
bool "GE Fanuc SBC310"
select DEFAULT_UIMAGE
+ select MMIO_NVRAM
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
@@ -51,6 +53,7 @@ config GEF_SBC310
config GEF_SBC610
bool "GE Fanuc SBC610"
select DEFAULT_UIMAGE
+ select MMIO_NVRAM
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select HAS_RAPIDIO
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c
index 50d0a2b6380..0110a8736d3 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/platforms/86xx/gef_pic.c
@@ -149,7 +149,7 @@ static void gef_pic_unmask(unsigned int virq)
}
static struct irq_chip gef_pic_chip = {
- .typename = "gefp",
+ .name = "gefp",
.mask = gef_pic_mask,
.mask_ack = gef_pic_mask_ack,
.unmask = gef_pic_unmask,
@@ -163,14 +163,14 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
return 0;
}
static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 287f7bd17dd..a792e5d8581 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -33,6 +33,7 @@
#include <asm/udbg.h>
#include <asm/mpic.h>
+#include <asm/nvram.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
@@ -95,6 +96,10 @@ static void __init gef_ppc9a_setup_arch(void)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
+
+#if defined(CONFIG_MMIO_NVRAM)
+ mmio_nvram_init();
+#endif
}
/* Return the PCB revision */
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 90754e752bd..6a1a613836c 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -33,6 +33,7 @@
#include <asm/udbg.h>
#include <asm/mpic.h>
+#include <asm/nvram.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
@@ -95,6 +96,10 @@ static void __init gef_sbc310_setup_arch(void)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
+
+#if defined(CONFIG_MMIO_NVRAM)
+ mmio_nvram_init();
+#endif
}
/* Return the PCB revision */
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 72b31a6010a..e10688a0fc4 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -33,6 +33,7 @@
#include <asm/udbg.h>
#include <asm/mpic.h>
+#include <asm/nvram.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
@@ -95,6 +96,10 @@ static void __init gef_sbc610_setup_arch(void)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
+
+#if defined(CONFIG_MMIO_NVRAM)
+ mmio_nvram_init();
+#endif
}
/* Return the PCB revision */
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 627908a4cd7..5abe137f630 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -19,6 +19,7 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/interrupt.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
@@ -41,10 +42,46 @@
#include "mpc86xx.h"
+static struct device_node *pixis_node;
static unsigned char *pixis_bdcfg0, *pixis_arch;
+#ifdef CONFIG_SUSPEND
+static irqreturn_t mpc8610_sw9_irq(int irq, void *data)
+{
+ pr_debug("%s: PIXIS' event (sw9/wakeup) IRQ handled\n", __func__);
+ return IRQ_HANDLED;
+}
+
+static void __init mpc8610_suspend_init(void)
+{
+ int irq;
+ int ret;
+
+ if (!pixis_node)
+ return;
+
+ irq = irq_of_parse_and_map(pixis_node, 0);
+ if (!irq) {
+ pr_err("%s: can't map pixis event IRQ.\n", __func__);
+ return;
+ }
+
+ ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9/wakeup", NULL);
+ if (ret) {
+ pr_err("%s: can't request pixis event IRQ: %d\n",
+ __func__, ret);
+ irq_dispose_mapping(irq);
+ }
+
+ enable_irq_wake(irq);
+}
+#else
+static inline void mpc8610_suspend_init(void) { }
+#endif /* CONFIG_SUSPEND */
+
static struct of_device_id __initdata mpc8610_ids[] = {
{ .compatible = "fsl,mpc8610-immr", },
+ { .compatible = "fsl,mpc8610-guts", },
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{}
@@ -55,6 +92,9 @@ static int __init mpc8610_declare_of_platform_devices(void)
/* Firstly, register PIXIS GPIOs. */
simple_gpiochip_init("fsl,fpga-pixis-gpio-bank");
+ /* Enable wakeup on PIXIS' event IRQ. */
+ mpc8610_suspend_init();
+
/* Without this call, the SSI device driver won't get probed. */
of_platform_bus_probe(NULL, mpc8610_ids, NULL);
@@ -250,10 +290,10 @@ static void __init mpc86xx_hpcd_setup_arch(void)
diu_ops.set_sysfs_monitor_port = mpc8610hpcd_set_sysfs_monitor_port;
#endif
- np = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis");
- if (np) {
- of_address_to_resource(np, 0, &r);
- of_node_put(np);
+ pixis_node = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis");
+ if (pixis_node) {
+ of_address_to_resource(pixis_node, 0, &r);
+ of_node_put(pixis_node);
pixis = ioremap(r.start, 32);
if (!pixis) {
printk(KERN_ERR "Err: can't map FPGA cfg register!\n");
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index 385acfc4839..242954c4293 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -222,7 +222,7 @@ static void cpm_cascade(unsigned int irq, struct irq_desc *desc)
int cascade_irq;
if ((cascade_irq = cpm_get_irq()) >= 0) {
- struct irq_desc *cdesc = irq_desc + cascade_irq;
+ struct irq_desc *cdesc = irq_to_desc(cascade_irq);
generic_handle_irq(cascade_irq);
cdesc->chip->eoi(cascade_irq);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 04a8061045c..d1663db7810 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -86,6 +86,11 @@ config RTAS_ERROR_LOGGING
depends on PPC_RTAS
default n
+config PPC_RTAS_DAEMON
+ bool
+ depends on PPC_RTAS
+ default n
+
config RTAS_PROC
bool "Proc interface to RTAS"
depends on PPC_RTAS
@@ -255,7 +260,7 @@ config QE_GPIO
config CPM2
bool "Enable support for the CPM2 (Communications Processor Module)"
- depends on MPC85xx || 8260
+ depends on (FSL_SOC_BOOKE && PPC32) || 8260
select CPM
select PPC_LIB_RHEAP
select PPC_PCI_CHOICE
@@ -300,7 +305,7 @@ source "arch/powerpc/sysdev/bestcomm/Kconfig"
config MPC8xxx_GPIO
bool "MPC8xxx GPIO support"
- depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || PPC_85xx || PPC_86xx
+ depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e382cae678b..fa0f690d386 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -28,8 +28,6 @@ config PPC_BOOK3S_32
config PPC_85xx
bool "Freescale 85xx"
select E500
- select FSL_SOC
- select MPC85xx
config PPC_8xx
bool "Freescale 8xx"
@@ -138,6 +136,14 @@ config PPC_FPU
bool
default y if PPC64
+config FSL_EMB_PERFMON
+ bool "Freescale Embedded Perfmon"
+ depends on E500 || PPC_83xx
+ help
+ This is the Performance Monitor support found on the e500 core
+ and some e300 cores (c3 and c4). Select this only if your
+ core supports the Embedded Performance Monitor APU
+
config 4xx
bool
depends on 40x || 44x
@@ -153,13 +159,6 @@ config FSL_BOOKE
depends on E200 || E500
default y
-config FSL_EMB_PERFMON
- bool "Freescale Embedded Perfmon"
- depends on E500 || PPC_83xx
- help
- This is the Performance Monitor support found on the e500 core
- and some e300 cores (c3 and c4). Select this only if your
- core supports the Embedded Performance Monitor APU
config PTE_64BIT
bool
@@ -312,7 +311,7 @@ config NR_CPUS
config NOT_COHERENT_CACHE
bool
- depends on 4xx || 8xx || E200 || PPC_MPC512x
+ depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
default y
config CHECK_CACHE_COHERENCY
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index a6812ee0010..fdb9f0b0d7a 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_PPC_MPC52xx) += 52xx/
obj-$(CONFIG_PPC_8xx) += 8xx/
obj-$(CONFIG_PPC_82xx) += 82xx/
obj-$(CONFIG_PPC_83xx) += 83xx/
-obj-$(CONFIG_PPC_85xx) += 85xx/
+obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/
obj-$(CONFIG_PPC_86xx) += 86xx/
obj-$(CONFIG_PPC_PSERIES) += pseries/
obj-$(CONFIG_PPC_ISERIES) += iseries/
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index 9290a7a442d..fb4eb0df054 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/seq_file.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index a86c34b3bb8..96fe896f6df 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -312,7 +312,7 @@ static struct irq_chip msic_irq_chip = {
.mask = mask_msi_irq,
.unmask = unmask_msi_irq,
.shutdown = unmask_msi_irq,
- .typename = "AXON-MSI",
+ .name = "AXON-MSI",
};
static int msic_host_map(struct irq_host *h, unsigned int virq,
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 72254848a22..36052a9ebcd 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -110,7 +110,7 @@ static void beatic_end_irq(unsigned int irq_plug)
}
static struct irq_chip beatic_pic = {
- .typename = " CELL-BEAT ",
+ .name = " CELL-BEAT ",
.unmask = beatic_unmask_irq,
.mask = beatic_mask_irq,
.eoi = beatic_end_irq,
@@ -136,7 +136,7 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq)
static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
int64_t err;
err = beat_construct_and_connect_irq_plug(virq, hw);
@@ -166,11 +166,11 @@ static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq,
* Note: We have only 1 entry to translate.
*/
static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
- u64 *intspec2 = (u64 *)intspec;
+ const u64 *intspec2 = (const u64 *)intspec;
*out_hwirq = *intspec2;
*out_flags |= IRQ_TYPE_LEVEL_LOW;
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 882e47080e7..6829cf7e2bd 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -54,7 +54,7 @@ struct iic {
struct device_node *node;
};
-static DEFINE_PER_CPU(struct iic, iic);
+static DEFINE_PER_CPU(struct iic, cpu_iic);
#define IIC_NODE_COUNT 2
static struct irq_host *iic_host;
@@ -82,13 +82,13 @@ static void iic_unmask(unsigned int irq)
static void iic_eoi(unsigned int irq)
{
- struct iic *iic = &__get_cpu_var(iic);
+ struct iic *iic = &__get_cpu_var(cpu_iic);
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
BUG_ON(iic->eoi_ptr < 0);
}
static struct irq_chip iic_chip = {
- .typename = " CELL-IIC ",
+ .name = " CELL-IIC ",
.mask = iic_mask,
.unmask = iic_unmask,
.eoi = iic_eoi,
@@ -133,7 +133,7 @@ static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
static struct irq_chip iic_ioexc_chip = {
- .typename = " CELL-IOEX",
+ .name = " CELL-IOEX",
.mask = iic_mask,
.unmask = iic_unmask,
.eoi = iic_ioexc_eoi,
@@ -146,7 +146,7 @@ static unsigned int iic_get_irq(void)
struct iic *iic;
unsigned int virq;
- iic = &__get_cpu_var(iic);
+ iic = &__get_cpu_var(cpu_iic);
*(unsigned long *) &pending =
in_be64((u64 __iomem *) &iic->regs->pending_destr);
if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -161,12 +161,12 @@ static unsigned int iic_get_irq(void)
void iic_setup_cpu(void)
{
- out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
+ out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
}
u8 iic_get_target_id(int cpu)
{
- return per_cpu(iic, cpu).target_id;
+ return per_cpu(cpu_iic, cpu).target_id;
}
EXPORT_SYMBOL_GPL(iic_get_target_id);
@@ -181,7 +181,7 @@ static inline int iic_ipi_to_irq(int ipi)
void iic_cause_IPI(int cpu, int mesg)
{
- out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
+ out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
}
struct irq_host *iic_get_irq_host(int node)
@@ -237,7 +237,7 @@ extern int noirqdebug;
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
goto out_eoi;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_eoi:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int iic_host_map(struct irq_host *h, unsigned int virq,
@@ -297,7 +297,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
}
static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
@@ -348,7 +348,7 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
/* XXX FIXME: should locate the linux CPU number from the HW cpu
* number properly. We are lucky for now
*/
- struct iic *iic = &per_cpu(iic, hw_cpu);
+ struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
BUG_ON(iic->regs == NULL);
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 4e5655624ae..01244f254a1 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -102,7 +102,7 @@ static void spider_ack_irq(unsigned int virq)
/* Reset edge detection logic if necessary
*/
- if (get_irq_desc(virq)->status & IRQ_LEVEL)
+ if (irq_to_desc(virq)->status & IRQ_LEVEL)
return;
/* Only interrupts 47 to 50 can be set to edge */
@@ -119,7 +119,7 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type)
struct spider_pic *pic = spider_virq_to_pic(virq);
unsigned int hw = irq_map[virq].hwirq;
void __iomem *cfg = spider_get_irq_config(pic, hw);
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
u32 old_mask;
u32 ic;
@@ -168,7 +168,7 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type)
}
static struct irq_chip spider_pic = {
- .typename = " SPIDER ",
+ .name = " SPIDER ",
.unmask = spider_unmask_irq,
.mask = spider_mask_irq,
.ack = spider_ack_irq,
@@ -187,7 +187,7 @@ static int spider_host_map(struct irq_host *h, unsigned int virq,
}
static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index b93f877ba50..b9d5d678aa4 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -13,10 +13,8 @@ SPU_CC := $(SPU_CROSS)gcc
SPU_AS := $(SPU_CROSS)gcc
SPU_LD := $(SPU_CROSS)ld
SPU_OBJCOPY := $(SPU_CROSS)objcopy
-SPU_CFLAGS := -O2 -Wall -I$(srctree)/include \
- -I$(objtree)/include2 -D__KERNEL__
-SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include \
- -I$(objtree)/include2 -D__KERNEL__
+SPU_CFLAGS := -O2 -Wall -I$(srctree)/include -D__KERNEL__
+SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include -D__KERNEL__
SPU_LDFLAGS := -N -Ttext=0x0
$(obj)/switch.o: $(obj)/spu_save_dump.h $(obj)/spu_restore_dump.h
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 884e8bcec49..64a4c2d85f7 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -2494,7 +2494,7 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int error = 0, cnt = 0;
- if (!buf || len < 0)
+ if (!buf)
return -EINVAL;
error = spu_acquire(ctx);
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index 37d438bd5b7..bc0b0efdc5f 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -5,6 +5,8 @@ config PPC_CHRP
select PPC_I8259
select PPC_INDIRECT_PCI
select PPC_RTAS
+ select PPC_RTAS_DAEMON
+ select RTAS_ERROR_LOGGING
select PPC_MPC106
select PPC_UDBG_16550
select PPC_NATIVE
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index cd4ad9aea76..8f41685d8f4 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -23,7 +23,7 @@
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/adb.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -364,19 +364,6 @@ void __init chrp_setup_arch(void)
if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
}
-void
-chrp_event_scan(unsigned long unused)
-{
- unsigned char log[1024];
- int ret = 0;
-
- /* XXX: we should loop until the hardware says no more error logs -- Cort */
- rtas_call(rtas_token("event-scan"), 4, 1, &ret, 0xffffffff, 0,
- __pa(log), 1024);
- mod_timer(&__get_cpu_var(heartbeat_timer),
- jiffies + event_scan_interval);
-}
-
static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
unsigned int cascade_irq = i8259_irq();
@@ -568,9 +555,6 @@ void __init chrp_init_IRQ(void)
void __init
chrp_init2(void)
{
- struct device_node *device;
- const unsigned int *p = NULL;
-
#ifdef CONFIG_NVRAM
chrp_nvram_init();
#endif
@@ -582,40 +566,6 @@ chrp_init2(void)
request_region(0x80,0x10,"dma page reg");
request_region(0xc0,0x20,"dma2");
- /* Get the event scan rate for the rtas so we know how
- * often it expects a heartbeat. -- Cort
- */
- device = of_find_node_by_name(NULL, "rtas");
- if (device)
- p = of_get_property(device, "rtas-event-scan-rate", NULL);
- if (p && *p) {
- /*
- * Arrange to call chrp_event_scan at least *p times
- * per minute. We use 59 rather than 60 here so that
- * the rate will be slightly higher than the minimum.
- * This all assumes we don't do hotplug CPU on any
- * machine that needs the event scans done.
- */
- unsigned long interval, offset;
- int cpu, ncpus;
- struct timer_list *timer;
-
- interval = HZ * 59 / *p;
- offset = HZ;
- ncpus = num_online_cpus();
- event_scan_interval = ncpus * interval;
- for (cpu = 0; cpu < ncpus; ++cpu) {
- timer = &per_cpu(heartbeat_timer, cpu);
- setup_timer(timer, chrp_event_scan, 0);
- timer->expires = jiffies + offset;
- add_timer_on(timer, cpu);
- offset += interval;
- }
- printk("RTAS Event Scan Rate: %u (%lu jiffies)\n",
- *p, interval);
- }
- of_node_put(device);
-
if (ppc_md.progress)
ppc_md.progress(" Have fun! ", 0x7777);
}
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 291ac9d8cbe..524d971a147 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -90,3 +90,36 @@ config MPC10X_OPENPIC
config MPC10X_STORE_GATHERING
bool "Enable MPC10x store gathering"
depends on MPC10X_BRIDGE
+
+config GAMECUBE_COMMON
+ bool
+
+config USBGECKO_UDBG
+ bool "USB Gecko udbg console for the Nintendo GameCube/Wii"
+ depends on GAMECUBE_COMMON
+ help
+ If you say yes to this option, support will be included for the
+ USB Gecko adapter as an udbg console.
+ The USB Gecko is a EXI to USB Serial converter that can be plugged
+ into a memcard slot in the Nintendo GameCube/Wii.
+
+ This driver bypasses the EXI layer completely.
+
+ If in doubt, say N here.
+
+config GAMECUBE
+ bool "Nintendo-GameCube"
+ depends on EMBEDDED6xx
+ select GAMECUBE_COMMON
+ help
+ Select GAMECUBE if configuring for the Nintendo GameCube.
+ More information at: <http://gc-linux.sourceforge.net/>
+
+config WII
+ bool "Nintendo-Wii"
+ depends on EMBEDDED6xx
+ select GAMECUBE_COMMON
+ help
+ Select WII if configuring for the Nintendo Wii.
+ More information at: <http://gc-linux.sourceforge.net/>
+
diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
index 0773c08bd44..66c23e423f4 100644
--- a/arch/powerpc/platforms/embedded6xx/Makefile
+++ b/arch/powerpc/platforms/embedded6xx/Makefile
@@ -7,3 +7,7 @@ obj-$(CONFIG_STORCENTER) += storcenter.o
obj-$(CONFIG_PPC_HOLLY) += holly.o
obj-$(CONFIG_PPC_PRPMC2800) += prpmc2800.o
obj-$(CONFIG_PPC_C2K) += c2k.o
+obj-$(CONFIG_USBGECKO_UDBG) += usbgecko_udbg.o
+obj-$(CONFIG_GAMECUBE_COMMON) += flipper-pic.o
+obj-$(CONFIG_GAMECUBE) += gamecube.o
+obj-$(CONFIG_WII) += wii.o hlwd-pic.o
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
new file mode 100644
index 00000000000..c278bd3a8fe
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -0,0 +1,263 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/flipper-pic.c
+ *
+ * Nintendo GameCube/Wii "Flipper" interrupt controller support.
+ * Copyright (C) 2004-2009 The GameCube Linux Team
+ * Copyright (C) 2007,2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+#define DRV_MODULE_NAME "flipper-pic"
+#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <asm/io.h>
+
+#include "flipper-pic.h"
+
+#define FLIPPER_NR_IRQS 32
+
+/*
+ * Each interrupt has a corresponding bit in both
+ * the Interrupt Cause (ICR) and Interrupt Mask (IMR) registers.
+ *
+ * Enabling/disabling an interrupt line involves setting/clearing
+ * the corresponding bit in IMR.
+ * Except for the RSW interrupt, all interrupts get deasserted automatically
+ * when the source deasserts the interrupt.
+ */
+#define FLIPPER_ICR 0x00
+#define FLIPPER_ICR_RSS (1<<16) /* reset switch state */
+
+#define FLIPPER_IMR 0x04
+
+#define FLIPPER_RESET 0x24
+
+
+/*
+ * IRQ chip hooks.
+ *
+ */
+
+static void flipper_pic_mask_and_ack(unsigned int virq)
+{
+ int irq = virq_to_hw(virq);
+ void __iomem *io_base = get_irq_chip_data(virq);
+ u32 mask = 1 << irq;
+
+ clrbits32(io_base + FLIPPER_IMR, mask);
+ /* this is at least needed for RSW */
+ out_be32(io_base + FLIPPER_ICR, mask);
+}
+
+static void flipper_pic_ack(unsigned int virq)
+{
+ int irq = virq_to_hw(virq);
+ void __iomem *io_base = get_irq_chip_data(virq);
+
+ /* this is at least needed for RSW */
+ out_be32(io_base + FLIPPER_ICR, 1 << irq);
+}
+
+static void flipper_pic_mask(unsigned int virq)
+{
+ int irq = virq_to_hw(virq);
+ void __iomem *io_base = get_irq_chip_data(virq);
+
+ clrbits32(io_base + FLIPPER_IMR, 1 << irq);
+}
+
+static void flipper_pic_unmask(unsigned int virq)
+{
+ int irq = virq_to_hw(virq);
+ void __iomem *io_base = get_irq_chip_data(virq);
+
+ setbits32(io_base + FLIPPER_IMR, 1 << irq);
+}
+
+
+static struct irq_chip flipper_pic = {
+ .name = "flipper-pic",
+ .ack = flipper_pic_ack,
+ .mask_ack = flipper_pic_mask_and_ack,
+ .mask = flipper_pic_mask,
+ .unmask = flipper_pic_unmask,
+};
+
+/*
+ * IRQ host hooks.
+ *
+ */
+
+static struct irq_host *flipper_irq_host;
+
+static int flipper_pic_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ set_irq_chip_data(virq, h->host_data);
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
+ set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq);
+ return 0;
+}
+
+static void flipper_pic_unmap(struct irq_host *h, unsigned int irq)
+{
+ set_irq_chip_data(irq, NULL);
+ set_irq_chip(irq, NULL);
+}
+
+static int flipper_pic_match(struct irq_host *h, struct device_node *np)
+{
+ return 1;
+}
+
+
+static struct irq_host_ops flipper_irq_host_ops = {
+ .map = flipper_pic_map,
+ .unmap = flipper_pic_unmap,
+ .match = flipper_pic_match,
+};
+
+/*
+ * Platform hooks.
+ *
+ */
+
+static void __flipper_quiesce(void __iomem *io_base)
+{
+ /* mask and ack all IRQs */
+ out_be32(io_base + FLIPPER_IMR, 0x00000000);
+ out_be32(io_base + FLIPPER_ICR, 0xffffffff);
+}
+
+struct irq_host * __init flipper_pic_init(struct device_node *np)
+{
+ struct device_node *pi;
+ struct irq_host *irq_host = NULL;
+ struct resource res;
+ void __iomem *io_base;
+ int retval;
+
+ pi = of_get_parent(np);
+ if (!pi) {
+ pr_err("no parent found\n");
+ goto out;
+ }
+ if (!of_device_is_compatible(pi, "nintendo,flipper-pi")) {
+ pr_err("unexpected parent compatible\n");
+ goto out;
+ }
+
+ retval = of_address_to_resource(pi, 0, &res);
+ if (retval) {
+ pr_err("no io memory range found\n");
+ goto out;
+ }
+ io_base = ioremap(res.start, resource_size(&res));
+
+ pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
+
+ __flipper_quiesce(io_base);
+
+ irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS,
+ &flipper_irq_host_ops, -1);
+ if (!irq_host) {
+ pr_err("failed to allocate irq_host\n");
+ return NULL;
+ }
+
+ irq_host->host_data = io_base;
+
+out:
+ return irq_host;
+}
+
+unsigned int flipper_pic_get_irq(void)
+{
+ void __iomem *io_base = flipper_irq_host->host_data;
+ int irq;
+ u32 irq_status;
+
+ irq_status = in_be32(io_base + FLIPPER_ICR) &
+ in_be32(io_base + FLIPPER_IMR);
+ if (irq_status == 0)
+ return NO_IRQ; /* no more IRQs pending */
+
+ irq = __ffs(irq_status);
+ return irq_linear_revmap(flipper_irq_host, irq);
+}
+
+/*
+ * Probe function.
+ *
+ */
+
+void __init flipper_pic_probe(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-pic");
+ BUG_ON(!np);
+
+ flipper_irq_host = flipper_pic_init(np);
+ BUG_ON(!flipper_irq_host);
+
+ irq_set_default_host(flipper_irq_host);
+
+ of_node_put(np);
+}
+
+/*
+ * Misc functions related to the flipper chipset.
+ *
+ */
+
+/**
+ * flipper_quiesce() - quiesce flipper irq controller
+ *
+ * Mask and ack all interrupt sources.
+ *
+ */
+void flipper_quiesce(void)
+{
+ void __iomem *io_base = flipper_irq_host->host_data;
+
+ __flipper_quiesce(io_base);
+}
+
+/*
+ * Resets the platform.
+ */
+void flipper_platform_reset(void)
+{
+ void __iomem *io_base;
+
+ if (flipper_irq_host && flipper_irq_host->host_data) {
+ io_base = flipper_irq_host->host_data;
+ out_8(io_base + FLIPPER_RESET, 0x00);
+ }
+}
+
+/*
+ * Returns non-zero if the reset button is pressed.
+ */
+int flipper_is_reset_button_pressed(void)
+{
+ void __iomem *io_base;
+ u32 icr;
+
+ if (flipper_irq_host && flipper_irq_host->host_data) {
+ io_base = flipper_irq_host->host_data;
+ icr = in_be32(io_base + FLIPPER_ICR);
+ return !(icr & FLIPPER_ICR_RSS);
+ }
+ return 0;
+}
+
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.h b/arch/powerpc/platforms/embedded6xx/flipper-pic.h
new file mode 100644
index 00000000000..e339186b566
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.h
@@ -0,0 +1,25 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/flipper-pic.h
+ *
+ * Nintendo GameCube/Wii "Flipper" interrupt controller support.
+ * Copyright (C) 2004-2009 The GameCube Linux Team
+ * Copyright (C) 2007,2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef __FLIPPER_PIC_H
+#define __FLIPPER_PIC_H
+
+unsigned int flipper_pic_get_irq(void);
+void __init flipper_pic_probe(void);
+
+void flipper_quiesce(void);
+void flipper_platform_reset(void);
+int flipper_is_reset_button_pressed(void);
+
+#endif
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
new file mode 100644
index 00000000000..1106fd99627
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -0,0 +1,118 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/gamecube.c
+ *
+ * Nintendo GameCube board-specific support
+ * Copyright (C) 2004-2009 The GameCube Linux Team
+ * Copyright (C) 2007,2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kexec.h>
+#include <linux/seq_file.h>
+#include <linux/of_platform.h>
+
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/udbg.h>
+
+#include "flipper-pic.h"
+#include "usbgecko_udbg.h"
+
+
+static void gamecube_spin(void)
+{
+ /* spin until power button pressed */
+ for (;;)
+ cpu_relax();
+}
+
+static void gamecube_restart(char *cmd)
+{
+ local_irq_disable();
+ flipper_platform_reset();
+ gamecube_spin();
+}
+
+static void gamecube_power_off(void)
+{
+ local_irq_disable();
+ gamecube_spin();
+}
+
+static void gamecube_halt(void)
+{
+ gamecube_restart(NULL);
+}
+
+static void __init gamecube_init_early(void)
+{
+ ug_udbg_init();
+}
+
+static int __init gamecube_probe(void)
+{
+ unsigned long dt_root;
+
+ dt_root = of_get_flat_dt_root();
+ if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube"))
+ return 0;
+
+ return 1;
+}
+
+static void gamecube_shutdown(void)
+{
+ flipper_quiesce();
+}
+
+#ifdef CONFIG_KEXEC
+static int gamecube_kexec_prepare(struct kimage *image)
+{
+ return 0;
+}
+#endif /* CONFIG_KEXEC */
+
+
+define_machine(gamecube) {
+ .name = "gamecube",
+ .probe = gamecube_probe,
+ .init_early = gamecube_init_early,
+ .restart = gamecube_restart,
+ .power_off = gamecube_power_off,
+ .halt = gamecube_halt,
+ .init_IRQ = flipper_pic_probe,
+ .get_irq = flipper_pic_get_irq,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+ .machine_shutdown = gamecube_shutdown,
+#ifdef CONFIG_KEXEC
+ .machine_kexec_prepare = gamecube_kexec_prepare,
+#endif
+};
+
+
+static struct of_device_id gamecube_of_bus[] = {
+ { .compatible = "nintendo,flipper", },
+ { },
+};
+
+static int __init gamecube_device_probe(void)
+{
+ if (!machine_is(gamecube))
+ return 0;
+
+ of_platform_bus_probe(NULL, gamecube_of_bus, NULL);
+ return 0;
+}
+device_initcall(gamecube_device_probe);
+
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
new file mode 100644
index 00000000000..a771f91e215
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -0,0 +1,241 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+ *
+ * Nintendo Wii "Hollywood" interrupt controller support.
+ * Copyright (C) 2009 The GameCube Linux Team
+ * Copyright (C) 2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+#define DRV_MODULE_NAME "hlwd-pic"
+#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <asm/io.h>
+
+#include "hlwd-pic.h"
+
+#define HLWD_NR_IRQS 32
+
+/*
+ * Each interrupt has a corresponding bit in both
+ * the Interrupt Cause (ICR) and Interrupt Mask (IMR) registers.
+ *
+ * Enabling/disabling an interrupt line involves asserting/clearing
+ * the corresponding bit in IMR. ACK'ing a request simply involves
+ * asserting the corresponding bit in ICR.
+ */
+#define HW_BROADWAY_ICR 0x00
+#define HW_BROADWAY_IMR 0x04
+
+
+/*
+ * IRQ chip hooks.
+ *
+ */
+
+static void hlwd_pic_mask_and_ack(unsigned int virq)
+{
+ int irq = virq_to_hw(virq);
+ void __iomem *io_base = get_irq_chip_data(virq);
+ u32 mask = 1 << irq;
+
+ clrbits32(io_base + HW_BROADWAY_IMR, mask);
+ out_be32(io_base + HW_BROADWAY_ICR, mask);
+}
+
+static void hlwd_pic_ack(unsigned int virq)
+{
+ int irq = virq_to_hw(virq);
+ void __iomem *io_base = get_irq_chip_data(virq);
+
+ out_be32(io_base + HW_BROADWAY_ICR, 1 << irq);
+}
+
+static void hlwd_pic_mask(unsigned int virq)
+{
+ int irq = virq_to_hw(virq);
+ void __iomem *io_base = get_irq_chip_data(virq);
+
+ clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
+}
+
+static void hlwd_pic_unmask(unsigned int virq)
+{
+ int irq = virq_to_hw(virq);
+ void __iomem *io_base = get_irq_chip_data(virq);
+
+ setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
+}
+
+
+static struct irq_chip hlwd_pic = {
+ .name = "hlwd-pic",
+ .ack = hlwd_pic_ack,
+ .mask_ack = hlwd_pic_mask_and_ack,
+ .mask = hlwd_pic_mask,
+ .unmask = hlwd_pic_unmask,
+};
+
+/*
+ * IRQ host hooks.
+ *
+ */
+
+static struct irq_host *hlwd_irq_host;
+
+static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ set_irq_chip_data(virq, h->host_data);
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
+ set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq);
+ return 0;
+}
+
+static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq)
+{
+ set_irq_chip_data(irq, NULL);
+ set_irq_chip(irq, NULL);
+}
+
+static struct irq_host_ops hlwd_irq_host_ops = {
+ .map = hlwd_pic_map,
+ .unmap = hlwd_pic_unmap,
+};
+
+static unsigned int __hlwd_pic_get_irq(struct irq_host *h)
+{
+ void __iomem *io_base = h->host_data;
+ int irq;
+ u32 irq_status;
+
+ irq_status = in_be32(io_base + HW_BROADWAY_ICR) &
+ in_be32(io_base + HW_BROADWAY_IMR);
+ if (irq_status == 0)
+ return NO_IRQ; /* no more IRQs pending */
+
+ irq = __ffs(irq_status);
+ return irq_linear_revmap(h, irq);
+}
+
+static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
+ struct irq_desc *desc)
+{
+ struct irq_host *irq_host = get_irq_data(cascade_virq);
+ unsigned int virq;
+
+ raw_spin_lock(&desc->lock);
+ desc->chip->mask(cascade_virq); /* IRQ_LEVEL */
+ raw_spin_unlock(&desc->lock);
+
+ virq = __hlwd_pic_get_irq(irq_host);
+ if (virq != NO_IRQ)
+ generic_handle_irq(virq);
+ else
+ pr_err("spurious interrupt!\n");
+
+ raw_spin_lock(&desc->lock);
+ desc->chip->ack(cascade_virq); /* IRQ_LEVEL */
+ if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
+ desc->chip->unmask(cascade_virq);
+ raw_spin_unlock(&desc->lock);
+}
+
+/*
+ * Platform hooks.
+ *
+ */
+
+static void __hlwd_quiesce(void __iomem *io_base)
+{
+ /* mask and ack all IRQs */
+ out_be32(io_base + HW_BROADWAY_IMR, 0);
+ out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
+}
+
+struct irq_host *hlwd_pic_init(struct device_node *np)
+{
+ struct irq_host *irq_host;
+ struct resource res;
+ void __iomem *io_base;
+ int retval;
+
+ retval = of_address_to_resource(np, 0, &res);
+ if (retval) {
+ pr_err("no io memory range found\n");
+ return NULL;
+ }
+ io_base = ioremap(res.start, resource_size(&res));
+ if (!io_base) {
+ pr_err("ioremap failed\n");
+ return NULL;
+ }
+
+ pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
+
+ __hlwd_quiesce(io_base);
+
+ irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, HLWD_NR_IRQS,
+ &hlwd_irq_host_ops, -1);
+ if (!irq_host) {
+ pr_err("failed to allocate irq_host\n");
+ return NULL;
+ }
+ irq_host->host_data = io_base;
+
+ return irq_host;
+}
+
+unsigned int hlwd_pic_get_irq(void)
+{
+ return __hlwd_pic_get_irq(hlwd_irq_host);
+}
+
+/*
+ * Probe function.
+ *
+ */
+
+void hlwd_pic_probe(void)
+{
+ struct irq_host *host;
+ struct device_node *np;
+ const u32 *interrupts;
+ int cascade_virq;
+
+ for_each_compatible_node(np, NULL, "nintendo,hollywood-pic") {
+ interrupts = of_get_property(np, "interrupts", NULL);
+ if (interrupts) {
+ host = hlwd_pic_init(np);
+ BUG_ON(!host);
+ cascade_virq = irq_of_parse_and_map(np, 0);
+ set_irq_data(cascade_virq, host);
+ set_irq_chained_handler(cascade_virq,
+ hlwd_pic_irq_cascade);
+ hlwd_irq_host = host;
+ break;
+ }
+ }
+}
+
+/**
+ * hlwd_quiesce() - quiesce hollywood irq controller
+ *
+ * Mask and ack all interrupt sources.
+ *
+ */
+void hlwd_quiesce(void)
+{
+ void __iomem *io_base = hlwd_irq_host->host_data;
+
+ __hlwd_quiesce(io_base);
+}
+
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.h b/arch/powerpc/platforms/embedded6xx/hlwd-pic.h
new file mode 100644
index 00000000000..d2e5a092761
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.h
@@ -0,0 +1,22 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/hlwd-pic.h
+ *
+ * Nintendo Wii "Hollywood" interrupt controller support.
+ * Copyright (C) 2009 The GameCube Linux Team
+ * Copyright (C) 2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef __HLWD_PIC_H
+#define __HLWD_PIC_H
+
+extern unsigned int hlwd_pic_get_irq(void);
+extern void hlwd_pic_probe(void);
+extern void hlwd_quiesce(void);
+
+#endif
diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
new file mode 100644
index 00000000000..20a8ed91962
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
@@ -0,0 +1,328 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
+ *
+ * udbg serial input/output routines for the USB Gecko adapter.
+ * Copyright (C) 2008-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include <mm/mmu_decl.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/fixmap.h>
+
+#include "usbgecko_udbg.h"
+
+
+#define EXI_CLK_32MHZ 5
+
+#define EXI_CSR 0x00
+#define EXI_CSR_CLKMASK (0x7<<4)
+#define EXI_CSR_CLK_32MHZ (EXI_CLK_32MHZ<<4)
+#define EXI_CSR_CSMASK (0x7<<7)
+#define EXI_CSR_CS_0 (0x1<<7) /* Chip Select 001 */
+
+#define EXI_CR 0x0c
+#define EXI_CR_TSTART (1<<0)
+#define EXI_CR_WRITE (1<<2)
+#define EXI_CR_READ_WRITE (2<<2)
+#define EXI_CR_TLEN(len) (((len)-1)<<4)
+
+#define EXI_DATA 0x10
+
+#define UG_READ_ATTEMPTS 100
+#define UG_WRITE_ATTEMPTS 100
+
+
+static void __iomem *ug_io_base;
+
+/*
+ * Performs one input/output transaction between the exi host and the usbgecko.
+ */
+static u32 ug_io_transaction(u32 in)
+{
+ u32 __iomem *csr_reg = ug_io_base + EXI_CSR;
+ u32 __iomem *data_reg = ug_io_base + EXI_DATA;
+ u32 __iomem *cr_reg = ug_io_base + EXI_CR;
+ u32 csr, data, cr;
+
+ /* select */
+ csr = EXI_CSR_CLK_32MHZ | EXI_CSR_CS_0;
+ out_be32(csr_reg, csr);
+
+ /* read/write */
+ data = in;
+ out_be32(data_reg, data);
+ cr = EXI_CR_TLEN(2) | EXI_CR_READ_WRITE | EXI_CR_TSTART;
+ out_be32(cr_reg, cr);
+
+ while (in_be32(cr_reg) & EXI_CR_TSTART)
+ barrier();
+
+ /* deselect */
+ out_be32(csr_reg, 0);
+
+ /* result */
+ data = in_be32(data_reg);
+
+ return data;
+}
+
+/*
+ * Returns true if an usbgecko adapter is found.
+ */
+static int ug_is_adapter_present(void)
+{
+ if (!ug_io_base)
+ return 0;
+
+ return ug_io_transaction(0x90000000) == 0x04700000;
+}
+
+/*
+ * Returns true if the TX fifo is ready for transmission.
+ */
+static int ug_is_txfifo_ready(void)
+{
+ return ug_io_transaction(0xc0000000) & 0x04000000;
+}
+
+/*
+ * Tries to transmit a character.
+ * If the TX fifo is not ready the result is undefined.
+ */
+static void ug_raw_putc(char ch)
+{
+ ug_io_transaction(0xb0000000 | (ch << 20));
+}
+
+/*
+ * Transmits a character.
+ * It silently fails if the TX fifo is not ready after a number of retries.
+ */
+static void ug_putc(char ch)
+{
+ int count = UG_WRITE_ATTEMPTS;
+
+ if (!ug_io_base)
+ return;
+
+ if (ch == '\n')
+ ug_putc('\r');
+
+ while (!ug_is_txfifo_ready() && count--)
+ barrier();
+ if (count >= 0)
+ ug_raw_putc(ch);
+}
+
+/*
+ * Returns true if the RX fifo is ready for transmission.
+ */
+static int ug_is_rxfifo_ready(void)
+{
+ return ug_io_transaction(0xd0000000) & 0x04000000;
+}
+
+/*
+ * Tries to receive a character.
+ * If a character is unavailable the function returns -1.
+ */
+static int ug_raw_getc(void)
+{
+ u32 data = ug_io_transaction(0xa0000000);
+ if (data & 0x08000000)
+ return (data >> 16) & 0xff;
+ else
+ return -1;
+}
+
+/*
+ * Receives a character.
+ * It fails if the RX fifo is not ready after a number of retries.
+ */
+static int ug_getc(void)
+{
+ int count = UG_READ_ATTEMPTS;
+
+ if (!ug_io_base)
+ return -1;
+
+ while (!ug_is_rxfifo_ready() && count--)
+ barrier();
+ return ug_raw_getc();
+}
+
+/*
+ * udbg functions.
+ *
+ */
+
+/*
+ * Transmits a character.
+ */
+void ug_udbg_putc(char ch)
+{
+ ug_putc(ch);
+}
+
+/*
+ * Receives a character. Waits until a character is available.
+ */
+static int ug_udbg_getc(void)
+{
+ int ch;
+
+ while ((ch = ug_getc()) == -1)
+ barrier();
+ return ch;
+}
+
+/*
+ * Receives a character. If a character is not available, returns -1.
+ */
+static int ug_udbg_getc_poll(void)
+{
+ if (!ug_is_rxfifo_ready())
+ return -1;
+ return ug_getc();
+}
+
+/*
+ * Retrieves and prepares the virtual address needed to access the hardware.
+ */
+static void __iomem *ug_udbg_setup_exi_io_base(struct device_node *np)
+{
+ void __iomem *exi_io_base = NULL;
+ phys_addr_t paddr;
+ const unsigned int *reg;
+
+ reg = of_get_property(np, "reg", NULL);
+ if (reg) {
+ paddr = of_translate_address(np, reg);
+ if (paddr)
+ exi_io_base = ioremap(paddr, reg[1]);
+ }
+ return exi_io_base;
+}
+
+/*
+ * Checks if a USB Gecko adapter is inserted in any memory card slot.
+ */
+static void __iomem *ug_udbg_probe(void __iomem *exi_io_base)
+{
+ int i;
+
+ /* look for a usbgecko on memcard slots A and B */
+ for (i = 0; i < 2; i++) {
+ ug_io_base = exi_io_base + 0x14 * i;
+ if (ug_is_adapter_present())
+ break;
+ }
+ if (i == 2)
+ ug_io_base = NULL;
+ return ug_io_base;
+
+}
+
+/*
+ * USB Gecko udbg support initialization.
+ */
+void __init ug_udbg_init(void)
+{
+ struct device_node *np;
+ void __iomem *exi_io_base;
+
+ if (ug_io_base)
+ udbg_printf("%s: early -> final\n", __func__);
+
+ np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-exi");
+ if (!np) {
+ udbg_printf("%s: EXI node not found\n", __func__);
+ goto done;
+ }
+
+ exi_io_base = ug_udbg_setup_exi_io_base(np);
+ if (!exi_io_base) {
+ udbg_printf("%s: failed to setup EXI io base\n", __func__);
+ goto done;
+ }
+
+ if (!ug_udbg_probe(exi_io_base)) {
+ udbg_printf("usbgecko_udbg: not found\n");
+ iounmap(exi_io_base);
+ } else {
+ udbg_putc = ug_udbg_putc;
+ udbg_getc = ug_udbg_getc;
+ udbg_getc_poll = ug_udbg_getc_poll;
+ udbg_printf("usbgecko_udbg: ready\n");
+ }
+
+done:
+ if (np)
+ of_node_put(np);
+ return;
+}
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
+
+static phys_addr_t __init ug_early_grab_io_addr(void)
+{
+#if defined(CONFIG_GAMECUBE)
+ return 0x0c000000;
+#elif defined(CONFIG_WII)
+ return 0x0d000000;
+#else
+#error Invalid platform for USB Gecko based early debugging.
+#endif
+}
+
+/*
+ * USB Gecko early debug support initialization for udbg.
+ */
+void __init udbg_init_usbgecko(void)
+{
+ void __iomem *early_debug_area;
+ void __iomem *exi_io_base;
+
+ /*
+ * At this point we have a BAT already setup that enables I/O
+ * to the EXI hardware.
+ *
+ * The BAT uses a virtual address range reserved at the fixmap.
+ * This must match the virtual address configured in
+ * head_32.S:setup_usbgecko_bat().
+ */
+ early_debug_area = (void __iomem *)__fix_to_virt(FIX_EARLY_DEBUG_BASE);
+ exi_io_base = early_debug_area + 0x00006800;
+
+ /* try to detect a USB Gecko */
+ if (!ug_udbg_probe(exi_io_base))
+ return;
+
+ /* we found a USB Gecko, load udbg hooks */
+ udbg_putc = ug_udbg_putc;
+ udbg_getc = ug_udbg_getc;
+ udbg_getc_poll = ug_udbg_getc_poll;
+
+ /*
+ * Prepare again the same BAT for MMU_init.
+ * This allows udbg I/O to continue working after the MMU is
+ * turned on for real.
+ * It is safe to continue using the same virtual address as it is
+ * a reserved fixmap area.
+ */
+ setbat(1, (unsigned long)early_debug_area,
+ ug_early_grab_io_addr(), 128*1024, PAGE_KERNEL_NCG);
+}
+
+#endif /* CONFIG_PPC_EARLY_DEBUG_USBGECKO */
+
diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.h b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.h
new file mode 100644
index 00000000000..bb6cde4ad76
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.h
@@ -0,0 +1,32 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/usbgecko_udbg.h
+ *
+ * udbg serial input/output routines for the USB Gecko adapter.
+ * Copyright (C) 2008-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef __USBGECKO_UDBG_H
+#define __USBGECKO_UDBG_H
+
+#ifdef CONFIG_USBGECKO_UDBG
+
+extern void __init ug_udbg_init(void);
+
+#else
+
+static inline void __init ug_udbg_init(void)
+{
+}
+
+#endif /* CONFIG_USBGECKO_UDBG */
+
+void __init udbg_init_usbgecko(void);
+
+#endif /* __USBGECKO_UDBG_H */
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
new file mode 100644
index 00000000000..57e5b608fa1
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -0,0 +1,268 @@
+/*
+ * arch/powerpc/platforms/embedded6xx/wii.c
+ *
+ * Nintendo Wii board-specific support
+ * Copyright (C) 2008-2009 The GameCube Linux Team
+ * Copyright (C) 2008,2009 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+#define DRV_MODULE_NAME "wii"
+#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/kexec.h>
+#include <linux/of_platform.h>
+#include <linux/lmb.h>
+#include <mm/mmu_decl.h>
+
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/udbg.h>
+
+#include "flipper-pic.h"
+#include "hlwd-pic.h"
+#include "usbgecko_udbg.h"
+
+/* control block */
+#define HW_CTRL_COMPATIBLE "nintendo,hollywood-control"
+
+#define HW_CTRL_RESETS 0x94
+#define HW_CTRL_RESETS_SYS (1<<0)
+
+/* gpio */
+#define HW_GPIO_COMPATIBLE "nintendo,hollywood-gpio"
+
+#define HW_GPIO_BASE(idx) (idx * 0x20)
+#define HW_GPIO_OUT(idx) (HW_GPIO_BASE(idx) + 0)
+#define HW_GPIO_DIR(idx) (HW_GPIO_BASE(idx) + 4)
+
+#define HW_GPIO_SHUTDOWN (1<<1)
+#define HW_GPIO_SLOT_LED (1<<5)
+#define HW_GPIO_SENSOR_BAR (1<<8)
+
+
+static void __iomem *hw_ctrl;
+static void __iomem *hw_gpio;
+
+unsigned long wii_hole_start;
+unsigned long wii_hole_size;
+
+
+static int __init page_aligned(unsigned long x)
+{
+ return !(x & (PAGE_SIZE-1));
+}
+
+void __init wii_memory_fixups(void)
+{
+ struct lmb_property *p = lmb.memory.region;
+
+ /*
+ * This is part of a workaround to allow the use of two
+ * discontiguous RAM ranges on the Wii, even if this is
+ * currently unsupported on 32-bit PowerPC Linux.
+ *
+ * We coealesce the two memory ranges of the Wii into a
+ * single range, then create a reservation for the "hole"
+ * between both ranges.
+ */
+
+ BUG_ON(lmb.memory.cnt != 2);
+ BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
+
+ p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
+ p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);
+
+ wii_hole_start = p[0].base + p[0].size;
+ wii_hole_size = p[1].base - wii_hole_start;
+
+ pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
+ pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
+ pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);
+
+ p[0].size += wii_hole_size + p[1].size;
+
+ lmb.memory.cnt = 1;
+ lmb_analyze();
+
+ /* reserve the hole */
+ lmb_reserve(wii_hole_start, wii_hole_size);
+
+ /* allow ioremapping the address space in the hole */
+ __allow_ioremap_reserved = 1;
+}
+
+unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
+{
+ unsigned long delta, size, bl;
+ unsigned long max_size = (256<<20);
+
+ /* MEM2 64MB@0x10000000 */
+ delta = wii_hole_start + wii_hole_size;
+ size = top - delta;
+ for (bl = 128<<10; bl < max_size; bl <<= 1) {
+ if (bl * 2 > size)
+ break;
+ }
+ setbat(4, PAGE_OFFSET+delta, delta, bl, PAGE_KERNEL_X);
+ return delta + bl;
+}
+
+static void wii_spin(void)
+{
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+}
+
+static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible)
+{
+ void __iomem *hw_regs = NULL;
+ struct device_node *np;
+ struct resource res;
+ int error = -ENODEV;
+
+ np = of_find_compatible_node(NULL, NULL, compatible);
+ if (!np) {
+ pr_err("no compatible node found for %s\n", compatible);
+ goto out;
+ }
+ error = of_address_to_resource(np, 0, &res);
+ if (error) {
+ pr_err("no valid reg found for %s\n", np->name);
+ goto out_put;
+ }
+
+ hw_regs = ioremap(res.start, resource_size(&res));
+ if (hw_regs) {
+ pr_info("%s at 0x%08x mapped to 0x%p\n", name,
+ res.start, hw_regs);
+ }
+
+out_put:
+ of_node_put(np);
+out:
+ return hw_regs;
+}
+
+static void __init wii_setup_arch(void)
+{
+ hw_ctrl = wii_ioremap_hw_regs("hw_ctrl", HW_CTRL_COMPATIBLE);
+ hw_gpio = wii_ioremap_hw_regs("hw_gpio", HW_GPIO_COMPATIBLE);
+ if (hw_gpio) {
+ /* turn off the front blue led and IR light */
+ clrbits32(hw_gpio + HW_GPIO_OUT(0),
+ HW_GPIO_SLOT_LED | HW_GPIO_SENSOR_BAR);
+ }
+}
+
+static void wii_restart(char *cmd)
+{
+ local_irq_disable();
+
+ if (hw_ctrl) {
+ /* clear the system reset pin to cause a reset */
+ clrbits32(hw_ctrl + HW_CTRL_RESETS, HW_CTRL_RESETS_SYS);
+ }
+ wii_spin();
+}
+
+static void wii_power_off(void)
+{
+ local_irq_disable();
+
+ if (hw_gpio) {
+ /* make sure that the poweroff GPIO is configured as output */
+ setbits32(hw_gpio + HW_GPIO_DIR(1), HW_GPIO_SHUTDOWN);
+
+ /* drive the poweroff GPIO high */
+ setbits32(hw_gpio + HW_GPIO_OUT(1), HW_GPIO_SHUTDOWN);
+ }
+ wii_spin();
+}
+
+static void wii_halt(void)
+{
+ if (ppc_md.restart)
+ ppc_md.restart(NULL);
+ wii_spin();
+}
+
+static void __init wii_init_early(void)
+{
+ ug_udbg_init();
+}
+
+static void __init wii_pic_probe(void)
+{
+ flipper_pic_probe();
+ hlwd_pic_probe();
+}
+
+static int __init wii_probe(void)
+{
+ unsigned long dt_root;
+
+ dt_root = of_get_flat_dt_root();
+ if (!of_flat_dt_is_compatible(dt_root, "nintendo,wii"))
+ return 0;
+
+ return 1;
+}
+
+static void wii_shutdown(void)
+{
+ hlwd_quiesce();
+ flipper_quiesce();
+}
+
+#ifdef CONFIG_KEXEC
+static int wii_machine_kexec_prepare(struct kimage *image)
+{
+ return 0;
+}
+#endif /* CONFIG_KEXEC */
+
+define_machine(wii) {
+ .name = "wii",
+ .probe = wii_probe,
+ .init_early = wii_init_early,
+ .setup_arch = wii_setup_arch,
+ .restart = wii_restart,
+ .power_off = wii_power_off,
+ .halt = wii_halt,
+ .init_IRQ = wii_pic_probe,
+ .get_irq = flipper_pic_get_irq,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+ .machine_shutdown = wii_shutdown,
+#ifdef CONFIG_KEXEC
+ .machine_kexec_prepare = wii_machine_kexec_prepare,
+#endif
+};
+
+static struct of_device_id wii_of_bus[] = {
+ { .compatible = "nintendo,hollywood", },
+ { },
+};
+
+static int __init wii_device_probe(void)
+{
+ if (!machine_is(wii))
+ return 0;
+
+ of_platform_bus_probe(NULL, wii_of_bus, NULL);
+ return 0;
+}
+device_initcall(wii_device_probe);
+
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
index f99c6c4b698..3ae66ab9d5e 100644
--- a/arch/powerpc/platforms/iseries/htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -19,8 +19,7 @@
#include "call_hpt.h"
-static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp =
- { [0 ... 63] = SPIN_LOCK_UNLOCKED};
+static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp;
/*
* Very primitive algorithm for picking up a lock
@@ -245,6 +244,11 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
void __init hpte_init_iSeries(void)
{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iSeries_hlocks); i++)
+ spin_lock_init(&iSeries_hlocks[i]);
+
ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index 94f44475883..86c4b29eea8 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -214,12 +214,12 @@ void __init iSeries_activate_IRQs()
unsigned long flags;
for_each_irq (irq) {
- struct irq_desc *desc = get_irq_desc(irq);
+ struct irq_desc *desc = irq_to_desc(irq);
if (desc && desc->chip && desc->chip->startup) {
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->chip->startup(irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
}
@@ -273,7 +273,7 @@ static void iseries_end_IRQ(unsigned int irq)
}
static struct irq_chip iseries_pic = {
- .typename = "iSeries irq controller",
+ .name = "iSeries irq controller",
.startup = iseries_startup_IRQ,
.shutdown = iseries_shutdown_IRQ,
.unmask = iseries_enable_IRQ,
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 0d9343df35b..6617915bcb1 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -855,59 +855,58 @@ static int mf_get_boot_rtc(struct rtc_time *tm)
}
#ifdef CONFIG_PROC_FS
-
-static int proc_mf_dump_cmdline(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int mf_cmdline_proc_show(struct seq_file *m, void *v)
{
- int len;
- char *p;
+ char *page, *p;
struct vsp_cmd_data vsp_cmd;
int rc;
dma_addr_t dma_addr;
/* The HV appears to return no more than 256 bytes of command line */
- if (off >= 256)
- return 0;
- if ((off + count) > 256)
- count = 256 - off;
+ page = kmalloc(256, GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
- dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE);
- if (dma_addr == DMA_ERROR_CODE)
+ dma_addr = iseries_hv_map(page, 256, DMA_FROM_DEVICE);
+ if (dma_addr == DMA_ERROR_CODE) {
+ kfree(page);
return -ENOMEM;
- memset(page, 0, off + count);
+ }
+ memset(page, 0, 256);
memset(&vsp_cmd, 0, sizeof(vsp_cmd));
vsp_cmd.cmd = 33;
vsp_cmd.sub_data.kern.token = dma_addr;
vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
- vsp_cmd.sub_data.kern.side = (u64)data;
- vsp_cmd.sub_data.kern.length = off + count;
+ vsp_cmd.sub_data.kern.side = (u64)m->private;
+ vsp_cmd.sub_data.kern.length = 256;
mb();
rc = signal_vsp_instruction(&vsp_cmd);
- iseries_hv_unmap(dma_addr, off + count, DMA_FROM_DEVICE);
- if (rc)
+ iseries_hv_unmap(dma_addr, 256, DMA_FROM_DEVICE);
+ if (rc) {
+ kfree(page);
return rc;
- if (vsp_cmd.result_code != 0)
+ }
+ if (vsp_cmd.result_code != 0) {
+ kfree(page);
return -ENOMEM;
+ }
p = page;
- len = 0;
- while (len < (off + count)) {
- if ((*p == '\0') || (*p == '\n')) {
- if (*p == '\0')
- *p = '\n';
- p++;
- len++;
- *eof = 1;
+ while (p - page < 256) {
+ if (*p == '\0' || *p == '\n') {
+ *p = '\n';
break;
}
p++;
- len++;
- }
- if (len < off) {
- *eof = 1;
- len = 0;
}
- return len;
+ seq_write(m, page, p - page);
+ kfree(page);
+ return 0;
+}
+
+static int mf_cmdline_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mf_cmdline_proc_show, PDE(inode)->data);
}
#if 0
@@ -962,10 +961,8 @@ static int proc_mf_dump_vmlinux(char *page, char **start, off_t off,
}
#endif
-static int proc_mf_dump_side(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int mf_side_proc_show(struct seq_file *m, void *v)
{
- int len;
char mf_current_side = ' ';
struct vsp_cmd_data vsp_cmd;
@@ -989,21 +986,17 @@ static int proc_mf_dump_side(char *page, char **start, off_t off,
}
}
- len = sprintf(page, "%c\n", mf_current_side);
+ seq_printf(m, "%c\n", mf_current_side);
+ return 0;
+}
- if (len <= (off + count))
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
- return len;
+static int mf_side_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mf_side_proc_show, NULL);
}
-static int proc_mf_change_side(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t mf_side_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
char side;
u64 newSide;
@@ -1041,6 +1034,15 @@ static int proc_mf_change_side(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations mf_side_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mf_side_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = mf_side_proc_write,
+};
+
#if 0
static void mf_getSrcHistory(char *buffer, int size)
{
@@ -1087,8 +1089,7 @@ static void mf_getSrcHistory(char *buffer, int size)
}
#endif
-static int proc_mf_dump_src(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int mf_src_proc_show(struct seq_file *m, void *v)
{
#if 0
int len;
@@ -1109,8 +1110,13 @@ static int proc_mf_dump_src(char *page, char **start, off_t off,
#endif
}
-static int proc_mf_change_src(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static int mf_src_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mf_src_proc_show, NULL);
+}
+
+static ssize_t mf_src_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
char stkbuf[10];
@@ -1135,9 +1141,19 @@ static int proc_mf_change_src(struct file *file, const char __user *buffer,
return count;
}
-static int proc_mf_change_cmdline(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static const struct file_operations mf_src_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mf_src_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = mf_src_proc_write,
+};
+
+static ssize_t mf_cmdline_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
+ void *data = PDE(file->f_path.dentry->d_inode)->data;
struct vsp_cmd_data vsp_cmd;
dma_addr_t dma_addr;
char *page;
@@ -1172,6 +1188,15 @@ out:
return ret;
}
+static const struct file_operations mf_cmdline_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mf_cmdline_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = mf_cmdline_proc_write,
+};
+
static ssize_t proc_mf_change_vmlinux(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
@@ -1246,12 +1271,10 @@ static int __init mf_proc_init(void)
if (!mf)
return 1;
- ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf);
+ ent = proc_create_data("cmdline", S_IRUSR|S_IWUSR, mf,
+ &mf_cmdline_proc_fops, (void *)(long)i);
if (!ent)
return 1;
- ent->data = (void *)(long)i;
- ent->read_proc = proc_mf_dump_cmdline;
- ent->write_proc = proc_mf_change_cmdline;
if (i == 3) /* no vmlinux entry for 'D' */
continue;
@@ -1263,19 +1286,15 @@ static int __init mf_proc_init(void)
return 1;
}
- ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
+ ent = proc_create("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
+ &mf_side_proc_fops);
if (!ent)
return 1;
- ent->data = (void *)0;
- ent->read_proc = proc_mf_dump_side;
- ent->write_proc = proc_mf_change_side;
- ent = create_proc_entry("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
+ ent = proc_create("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
+ &mf_src_proc_fops);
if (!ent)
return 1;
- ent->data = (void *)0;
- ent->read_proc = proc_mf_dump_src;
- ent->write_proc = proc_mf_change_src;
return 0;
}
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 49ff4dc422b..5aea94f3083 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -116,7 +116,7 @@ static int proc_viopath_show(struct seq_file *m, void *v)
u16 vlanMap;
dma_addr_t handle;
HvLpEvent_Rc hvrc;
- DECLARE_COMPLETION(done);
+ DECLARE_COMPLETION_ONSTACK(done);
struct device_node *node;
const char *sysid;
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index a4619347aa7..242f8095c2d 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -71,7 +71,7 @@ static void pas_restart(char *cmd)
}
#ifdef CONFIG_SMP
-static raw_spinlock_t timebase_lock;
+static arch_spinlock_t timebase_lock;
static unsigned long timebase;
static void __devinit pas_give_timebase(void)
@@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void)
local_irq_save(flags);
hard_irq_disable();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
isync();
timebase = get_tb();
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
while (timebase)
barrier();
@@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void)
while (!timebase)
smp_rmb();
- __raw_spin_lock(&timebase_lock);
+ arch_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- __raw_spin_unlock(&timebase_lock);
+ arch_spin_unlock(&timebase_lock);
}
struct smp_ops_t pas_smp_ops = {
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index cf660916ae0..9dd789a7370 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/page.h>
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index d212006a5b3..09e82729627 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -152,12 +152,12 @@ static unsigned int pmac_startup_irq(unsigned int virq)
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
- spin_lock_irqsave(&pmac_pic_lock, flags);
- if ((irq_desc[virq].status & IRQ_LEVEL) == 0)
+ spin_lock_irqsave(&pmac_pic_lock, flags);
+ if ((irq_to_desc(virq)->status & IRQ_LEVEL) == 0)
out_le32(&pmac_irq_hw[i]->ack, bit);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 0;
}
@@ -195,7 +195,7 @@ static int pmac_retrigger(unsigned int virq)
}
static struct irq_chip pmac_pic = {
- .typename = " PMAC-PIC ",
+ .name = " PMAC-PIC ",
.startup = pmac_startup_irq,
.mask = pmac_mask_irq,
.ack = pmac_ack_irq,
@@ -285,7 +285,7 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node)
static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
int level;
if (hw >= max_irqs)
@@ -303,7 +303,7 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
}
static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 8ec5ccf76b1..59d9712d736 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -152,7 +152,7 @@ static void ps3_chip_eoi(unsigned int virq)
*/
static struct irq_chip ps3_irq_chip = {
- .typename = "ps3",
+ .name = "ps3",
.mask = ps3_chip_mask,
.unmask = ps3_chip_unmask,
.eoi = ps3_chip_eoi,
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 189a25b8073..e81b028a2a4 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -34,7 +34,7 @@
#if defined(DEBUG)
#define DBG udbg_printf
#else
-#define DBG pr_debug
+#define DBG pr_devel
#endif
enum {
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index f0e6f28427b..c667f0f02c3 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -2,8 +2,11 @@ config PPC_PSERIES
depends on PPC64 && PPC_BOOK3S
bool "IBM pSeries & new (POWER5-based) iSeries"
select MPIC
+ select PCI_MSI
+ select XICS
select PPC_I8259
select PPC_RTAS
+ select PPC_RTAS_DAEMON
select RTAS_ERROR_LOGGING
select PPC_UDBG_16550
select PPC_NATIVE
@@ -59,7 +62,7 @@ config PPC_SMLPAR
config CMM
tristate "Collaborative memory management"
- depends on PPC_SMLPAR && !CRASH_DUMP
+ depends on PPC_SMLPAR
default y
help
Select this option, if you want to enable the kernel interface
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 790c0b872d4..0ff5174ae4f 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -7,8 +7,8 @@ EXTRA_CFLAGS += -DDEBUG
endif
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
- setup.o iommu.o ras.o rtasd.o \
- firmware.o power.o
+ setup.o iommu.o ras.o \
+ firmware.o power.o dlpar.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 6567439fe78..a277f2e28db 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -38,19 +38,28 @@
#include <asm/mmu.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
+#include <linux/memory.h>
#include "plpar_wrappers.h"
#define CMM_DRIVER_VERSION "1.0.0"
#define CMM_DEFAULT_DELAY 1
+#define CMM_HOTPLUG_DELAY 5
#define CMM_DEBUG 0
#define CMM_DISABLE 0
#define CMM_OOM_KB 1024
#define CMM_MIN_MEM_MB 256
#define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
+/*
+ * The priority level tries to ensure that this notifier is called as
+ * late as possible to reduce thrashing in the shared memory pool.
+ */
+#define CMM_MEM_HOTPLUG_PRI 1
+#define CMM_MEM_ISOLATE_PRI 15
static unsigned int delay = CMM_DEFAULT_DELAY;
+static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
static unsigned int oom_kb = CMM_OOM_KB;
static unsigned int cmm_debug = CMM_DEBUG;
static unsigned int cmm_disabled = CMM_DISABLE;
@@ -65,6 +74,10 @@ MODULE_VERSION(CMM_DRIVER_VERSION);
module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
"[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
+module_param_named(hotplug_delay, hotplug_delay, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(delay, "Delay (in seconds) after memory hotplug remove "
+ "before loaning resumes. "
+ "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
"[Default=" __stringify(CMM_OOM_KB) "]");
@@ -92,6 +105,9 @@ static unsigned long oom_freed_pages;
static struct cmm_page_array *cmm_page_list;
static DEFINE_SPINLOCK(cmm_lock);
+static DEFINE_MUTEX(hotplug_mutex);
+static int hotplug_occurred; /* protected by the hotplug mutex */
+
static struct task_struct *cmm_thread_ptr;
/**
@@ -110,6 +126,17 @@ static long cmm_alloc_pages(long nr)
cmm_dbg("Begin request for %ld pages\n", nr);
while (nr) {
+ /* Exit if a hotplug operation is in progress or occurred */
+ if (mutex_trylock(&hotplug_mutex)) {
+ if (hotplug_occurred) {
+ mutex_unlock(&hotplug_mutex);
+ break;
+ }
+ mutex_unlock(&hotplug_mutex);
+ } else {
+ break;
+ }
+
addr = __get_free_page(GFP_NOIO | __GFP_NOWARN |
__GFP_NORETRY | __GFP_NOMEMALLOC);
if (!addr)
@@ -119,8 +146,9 @@ static long cmm_alloc_pages(long nr)
if (!pa || pa->index >= CMM_NR_PAGES) {
/* Need a new page for the page list. */
spin_unlock(&cmm_lock);
- npa = (struct cmm_page_array *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
- __GFP_NORETRY | __GFP_NOMEMALLOC);
+ npa = (struct cmm_page_array *)__get_free_page(
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_NORETRY | __GFP_NOMEMALLOC);
if (!npa) {
pr_info("%s: Can not allocate new page list\n", __func__);
free_page(addr);
@@ -229,8 +257,9 @@ static void cmm_get_mpp(void)
{
int rc;
struct hvcall_mpp_data mpp_data;
- unsigned long active_pages_target;
- signed long page_loan_request;
+ signed long active_pages_target, page_loan_request, target;
+ signed long total_pages = totalram_pages + loaned_pages;
+ signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
rc = h_get_mpp(&mpp_data);
@@ -238,17 +267,25 @@ static void cmm_get_mpp(void)
return;
page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
- loaned_pages_target = page_loan_request + loaned_pages;
- if (loaned_pages_target > oom_freed_pages)
- loaned_pages_target -= oom_freed_pages;
+ target = page_loan_request + (signed long)loaned_pages;
+
+ if (target < 0 || total_pages < min_mem_pages)
+ target = 0;
+
+ if (target > oom_freed_pages)
+ target -= oom_freed_pages;
else
- loaned_pages_target = 0;
+ target = 0;
+
+ active_pages_target = total_pages - target;
- active_pages_target = totalram_pages + loaned_pages - loaned_pages_target;
+ if (min_mem_pages > active_pages_target)
+ target = total_pages - min_mem_pages;
- if ((min_mem_mb * 1024 * 1024) > (active_pages_target * PAGE_SIZE))
- loaned_pages_target = totalram_pages + loaned_pages -
- ((min_mem_mb * 1024 * 1024) / PAGE_SIZE);
+ if (target < 0)
+ target = 0;
+
+ loaned_pages_target = target;
cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
page_loan_request, loaned_pages, loaned_pages_target,
@@ -273,9 +310,28 @@ static int cmm_thread(void *dummy)
while (1) {
timeleft = msleep_interruptible(delay * 1000);
- if (kthread_should_stop() || timeleft) {
- loaned_pages_target = loaned_pages;
+ if (kthread_should_stop() || timeleft)
break;
+
+ if (mutex_trylock(&hotplug_mutex)) {
+ if (hotplug_occurred) {
+ hotplug_occurred = 0;
+ mutex_unlock(&hotplug_mutex);
+ cmm_dbg("Hotplug operation has occurred, "
+ "loaning activity suspended "
+ "for %d seconds.\n",
+ hotplug_delay);
+ timeleft = msleep_interruptible(hotplug_delay *
+ 1000);
+ if (kthread_should_stop() || timeleft)
+ break;
+ continue;
+ }
+ mutex_unlock(&hotplug_mutex);
+ } else {
+ cmm_dbg("Hotplug operation in progress, activity "
+ "suspended\n");
+ continue;
}
cmm_get_mpp();
@@ -405,6 +461,193 @@ static struct notifier_block cmm_reboot_nb = {
};
/**
+ * cmm_count_pages - Count the number of pages loaned in a particular range.
+ *
+ * @arg: memory_isolate_notify structure with address range and count
+ *
+ * Return value:
+ * 0 on success
+ **/
+static unsigned long cmm_count_pages(void *arg)
+{
+ struct memory_isolate_notify *marg = arg;
+ struct cmm_page_array *pa;
+ unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn);
+ unsigned long end = start + (marg->nr_pages << PAGE_SHIFT);
+ unsigned long idx;
+
+ spin_lock(&cmm_lock);
+ pa = cmm_page_list;
+ while (pa) {
+ if ((unsigned long)pa >= start && (unsigned long)pa < end)
+ marg->pages_found++;
+ for (idx = 0; idx < pa->index; idx++)
+ if (pa->page[idx] >= start && pa->page[idx] < end)
+ marg->pages_found++;
+ pa = pa->next;
+ }
+ spin_unlock(&cmm_lock);
+ return 0;
+}
+
+/**
+ * cmm_memory_isolate_cb - Handle memory isolation notifier calls
+ * @self: notifier block struct
+ * @action: action to take
+ * @arg: struct memory_isolate_notify data for handler
+ *
+ * Return value:
+ * NOTIFY_OK or notifier error based on subfunction return value
+ **/
+static int cmm_memory_isolate_cb(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ int ret = 0;
+
+ if (action == MEM_ISOLATE_COUNT)
+ ret = cmm_count_pages(arg);
+
+ if (ret)
+ ret = notifier_from_errno(ret);
+ else
+ ret = NOTIFY_OK;
+
+ return ret;
+}
+
+static struct notifier_block cmm_mem_isolate_nb = {
+ .notifier_call = cmm_memory_isolate_cb,
+ .priority = CMM_MEM_ISOLATE_PRI
+};
+
+/**
+ * cmm_mem_going_offline - Unloan pages where memory is to be removed
+ * @arg: memory_notify structure with page range to be offlined
+ *
+ * Return value:
+ * 0 on success
+ **/
+static int cmm_mem_going_offline(void *arg)
+{
+ struct memory_notify *marg = arg;
+ unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn);
+ unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT);
+ struct cmm_page_array *pa_curr, *pa_last, *npa;
+ unsigned long idx;
+ unsigned long freed = 0;
+
+ cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n",
+ start_page, marg->nr_pages);
+ spin_lock(&cmm_lock);
+
+ /* Search the page list for pages in the range to be offlined */
+ pa_last = pa_curr = cmm_page_list;
+ while (pa_curr) {
+ for (idx = (pa_curr->index - 1); (idx + 1) > 0; idx--) {
+ if ((pa_curr->page[idx] < start_page) ||
+ (pa_curr->page[idx] >= end_page))
+ continue;
+
+ plpar_page_set_active(__pa(pa_curr->page[idx]));
+ free_page(pa_curr->page[idx]);
+ freed++;
+ loaned_pages--;
+ totalram_pages++;
+ pa_curr->page[idx] = pa_last->page[--pa_last->index];
+ if (pa_last->index == 0) {
+ if (pa_curr == pa_last)
+ pa_curr = pa_last->next;
+ pa_last = pa_last->next;
+ free_page((unsigned long)cmm_page_list);
+ cmm_page_list = pa_last;
+ continue;
+ }
+ }
+ pa_curr = pa_curr->next;
+ }
+
+ /* Search for page list structures in the range to be offlined */
+ pa_last = NULL;
+ pa_curr = cmm_page_list;
+ while (pa_curr) {
+ if (((unsigned long)pa_curr >= start_page) &&
+ ((unsigned long)pa_curr < end_page)) {
+ npa = (struct cmm_page_array *)__get_free_page(
+ GFP_NOIO | __GFP_NOWARN |
+ __GFP_NORETRY | __GFP_NOMEMALLOC);
+ if (!npa) {
+ spin_unlock(&cmm_lock);
+ cmm_dbg("Failed to allocate memory for list "
+ "management. Memory hotplug "
+ "failed.\n");
+ return ENOMEM;
+ }
+ memcpy(npa, pa_curr, PAGE_SIZE);
+ if (pa_curr == cmm_page_list)
+ cmm_page_list = npa;
+ if (pa_last)
+ pa_last->next = npa;
+ free_page((unsigned long) pa_curr);
+ freed++;
+ pa_curr = npa;
+ }
+
+ pa_last = pa_curr;
+ pa_curr = pa_curr->next;
+ }
+
+ spin_unlock(&cmm_lock);
+ cmm_dbg("Released %ld pages in the search range.\n", freed);
+
+ return 0;
+}
+
+/**
+ * cmm_memory_cb - Handle memory hotplug notifier calls
+ * @self: notifier block struct
+ * @action: action to take
+ * @arg: struct memory_notify data for handler
+ *
+ * Return value:
+ * NOTIFY_OK or notifier error based on subfunction return value
+ *
+ **/
+static int cmm_memory_cb(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ int ret = 0;
+
+ switch (action) {
+ case MEM_GOING_OFFLINE:
+ mutex_lock(&hotplug_mutex);
+ hotplug_occurred = 1;
+ ret = cmm_mem_going_offline(arg);
+ break;
+ case MEM_OFFLINE:
+ case MEM_CANCEL_OFFLINE:
+ mutex_unlock(&hotplug_mutex);
+ cmm_dbg("Memory offline operation complete.\n");
+ break;
+ case MEM_GOING_ONLINE:
+ case MEM_ONLINE:
+ case MEM_CANCEL_ONLINE:
+ break;
+ }
+
+ if (ret)
+ ret = notifier_from_errno(ret);
+ else
+ ret = NOTIFY_OK;
+
+ return ret;
+}
+
+static struct notifier_block cmm_mem_nb = {
+ .notifier_call = cmm_memory_cb,
+ .priority = CMM_MEM_HOTPLUG_PRI
+};
+
+/**
* cmm_init - Module initialization
*
* Return value:
@@ -426,18 +669,24 @@ static int cmm_init(void)
if ((rc = cmm_sysfs_register(&cmm_sysdev)))
goto out_reboot_notifier;
+ if (register_memory_notifier(&cmm_mem_nb) ||
+ register_memory_isolate_notifier(&cmm_mem_isolate_nb))
+ goto out_unregister_notifier;
+
if (cmm_disabled)
return rc;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (IS_ERR(cmm_thread_ptr)) {
rc = PTR_ERR(cmm_thread_ptr);
- goto out_unregister_sysfs;
+ goto out_unregister_notifier;
}
return rc;
-out_unregister_sysfs:
+out_unregister_notifier:
+ unregister_memory_notifier(&cmm_mem_nb);
+ unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
cmm_unregister_sysfs(&cmm_sysdev);
out_reboot_notifier:
unregister_reboot_notifier(&cmm_reboot_nb);
@@ -458,6 +707,8 @@ static void cmm_exit(void)
kthread_stop(cmm_thread_ptr);
unregister_oom_notifier(&cmm_oom_nb);
unregister_reboot_notifier(&cmm_reboot_nb);
+ unregister_memory_notifier(&cmm_mem_nb);
+ unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
cmm_free_pages(loaned_pages);
cmm_unregister_sysfs(&cmm_sysdev);
}
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
new file mode 100644
index 00000000000..67b7a10f9fc
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -0,0 +1,560 @@
+/*
+ * Support for dynamic reconfiguration for PCI, Memory, and CPU
+ * Hotplug and Dynamic Logical Partitioning on RPA platforms.
+ *
+ * Copyright (C) 2009 Nathan Fontenot
+ * Copyright (C) 2009 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/notifier.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include "offline_states.h"
+
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/uaccess.h>
+#include <asm/rtas.h>
+#include <asm/pSeries_reconfig.h>
+
+struct cc_workarea {
+ u32 drc_index;
+ u32 zero;
+ u32 name_offset;
+ u32 prop_length;
+ u32 prop_offset;
+};
+
+static void dlpar_free_cc_property(struct property *prop)
+{
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+}
+
+static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
+{
+ struct property *prop;
+ char *name;
+ char *value;
+
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return NULL;
+
+ name = (char *)ccwa + ccwa->name_offset;
+ prop->name = kstrdup(name, GFP_KERNEL);
+
+ prop->length = ccwa->prop_length;
+ value = (char *)ccwa + ccwa->prop_offset;
+ prop->value = kzalloc(prop->length, GFP_KERNEL);
+ if (!prop->value) {
+ dlpar_free_cc_property(prop);
+ return NULL;
+ }
+
+ memcpy(prop->value, value, prop->length);
+ return prop;
+}
+
+static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
+{
+ struct device_node *dn;
+ char *name;
+
+ dn = kzalloc(sizeof(*dn), GFP_KERNEL);
+ if (!dn)
+ return NULL;
+
+ /* The configure connector reported name does not contain a
+ * preceeding '/', so we allocate a buffer large enough to
+ * prepend this to the full_name.
+ */
+ name = (char *)ccwa + ccwa->name_offset;
+ dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL);
+ if (!dn->full_name) {
+ kfree(dn);
+ return NULL;
+ }
+
+ sprintf(dn->full_name, "/%s", name);
+ return dn;
+}
+
+static void dlpar_free_one_cc_node(struct device_node *dn)
+{
+ struct property *prop;
+
+ while (dn->properties) {
+ prop = dn->properties;
+ dn->properties = prop->next;
+ dlpar_free_cc_property(prop);
+ }
+
+ kfree(dn->full_name);
+ kfree(dn);
+}
+
+static void dlpar_free_cc_nodes(struct device_node *dn)
+{
+ if (dn->child)
+ dlpar_free_cc_nodes(dn->child);
+
+ if (dn->sibling)
+ dlpar_free_cc_nodes(dn->sibling);
+
+ dlpar_free_one_cc_node(dn);
+}
+
+#define NEXT_SIBLING 1
+#define NEXT_CHILD 2
+#define NEXT_PROPERTY 3
+#define PREV_PARENT 4
+#define MORE_MEMORY 5
+#define CALL_AGAIN -2
+#define ERR_CFG_USE -9003
+
+struct device_node *dlpar_configure_connector(u32 drc_index)
+{
+ struct device_node *dn;
+ struct device_node *first_dn = NULL;
+ struct device_node *last_dn = NULL;
+ struct property *property;
+ struct property *last_property = NULL;
+ struct cc_workarea *ccwa;
+ int cc_token;
+ int rc;
+
+ cc_token = rtas_token("ibm,configure-connector");
+ if (cc_token == RTAS_UNKNOWN_SERVICE)
+ return NULL;
+
+ spin_lock(&rtas_data_buf_lock);
+ ccwa = (struct cc_workarea *)&rtas_data_buf[0];
+ ccwa->drc_index = drc_index;
+ ccwa->zero = 0;
+
+ rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+ while (rc) {
+ switch (rc) {
+ case NEXT_SIBLING:
+ dn = dlpar_parse_cc_node(ccwa);
+ if (!dn)
+ goto cc_error;
+
+ dn->parent = last_dn->parent;
+ last_dn->sibling = dn;
+ last_dn = dn;
+ break;
+
+ case NEXT_CHILD:
+ dn = dlpar_parse_cc_node(ccwa);
+ if (!dn)
+ goto cc_error;
+
+ if (!first_dn)
+ first_dn = dn;
+ else {
+ dn->parent = last_dn;
+ if (last_dn)
+ last_dn->child = dn;
+ }
+
+ last_dn = dn;
+ break;
+
+ case NEXT_PROPERTY:
+ property = dlpar_parse_cc_property(ccwa);
+ if (!property)
+ goto cc_error;
+
+ if (!last_dn->properties)
+ last_dn->properties = property;
+ else
+ last_property->next = property;
+
+ last_property = property;
+ break;
+
+ case PREV_PARENT:
+ last_dn = last_dn->parent;
+ break;
+
+ case CALL_AGAIN:
+ break;
+
+ case MORE_MEMORY:
+ case ERR_CFG_USE:
+ default:
+ printk(KERN_ERR "Unexpected Error (%d) "
+ "returned from configure-connector\n", rc);
+ goto cc_error;
+ }
+
+ rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+ }
+
+ spin_unlock(&rtas_data_buf_lock);
+ return first_dn;
+
+cc_error:
+ if (first_dn)
+ dlpar_free_cc_nodes(first_dn);
+ spin_unlock(&rtas_data_buf_lock);
+ return NULL;
+}
+
+static struct device_node *derive_parent(const char *path)
+{
+ struct device_node *parent;
+ char *last_slash;
+
+ last_slash = strrchr(path, '/');
+ if (last_slash == path) {
+ parent = of_find_node_by_path("/");
+ } else {
+ char *parent_path;
+ int parent_path_len = last_slash - path + 1;
+ parent_path = kmalloc(parent_path_len, GFP_KERNEL);
+ if (!parent_path)
+ return NULL;
+
+ strlcpy(parent_path, path, parent_path_len);
+ parent = of_find_node_by_path(parent_path);
+ kfree(parent_path);
+ }
+
+ return parent;
+}
+
+int dlpar_attach_node(struct device_node *dn)
+{
+ struct proc_dir_entry *ent;
+ int rc;
+
+ of_node_set_flag(dn, OF_DYNAMIC);
+ kref_init(&dn->kref);
+ dn->parent = derive_parent(dn->full_name);
+ if (!dn->parent)
+ return -ENOMEM;
+
+ rc = blocking_notifier_call_chain(&pSeries_reconfig_chain,
+ PSERIES_RECONFIG_ADD, dn);
+ if (rc == NOTIFY_BAD) {
+ printk(KERN_ERR "Failed to add device node %s\n",
+ dn->full_name);
+ return -ENOMEM; /* For now, safe to assume kmalloc failure */
+ }
+
+ of_attach_node(dn);
+
+#ifdef CONFIG_PROC_DEVICETREE
+ ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
+ if (ent)
+ proc_device_tree_add_node(dn, ent);
+#endif
+
+ of_node_put(dn->parent);
+ return 0;
+}
+
+int dlpar_detach_node(struct device_node *dn)
+{
+ struct device_node *parent = dn->parent;
+ struct property *prop = dn->properties;
+
+#ifdef CONFIG_PROC_DEVICETREE
+ while (prop) {
+ remove_proc_entry(prop->name, dn->pde);
+ prop = prop->next;
+ }
+
+ if (dn->pde)
+ remove_proc_entry(dn->pde->name, parent->pde);
+#endif
+
+ blocking_notifier_call_chain(&pSeries_reconfig_chain,
+ PSERIES_RECONFIG_REMOVE, dn);
+ of_detach_node(dn);
+ of_node_put(dn); /* Must decrement the refcount */
+
+ return 0;
+}
+
+#define DR_ENTITY_SENSE 9003
+#define DR_ENTITY_PRESENT 1
+#define DR_ENTITY_UNUSABLE 2
+#define ALLOCATION_STATE 9003
+#define ALLOC_UNUSABLE 0
+#define ALLOC_USABLE 1
+#define ISOLATION_STATE 9001
+#define ISOLATE 0
+#define UNISOLATE 1
+
+int dlpar_acquire_drc(u32 drc_index)
+{
+ int dr_status, rc;
+
+ rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
+ DR_ENTITY_SENSE, drc_index);
+ if (rc || dr_status != DR_ENTITY_UNUSABLE)
+ return -1;
+
+ rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
+ if (rc)
+ return rc;
+
+ rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
+ if (rc) {
+ rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
+ return rc;
+ }
+
+ return 0;
+}
+
+int dlpar_release_drc(u32 drc_index)
+{
+ int dr_status, rc;
+
+ rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
+ DR_ENTITY_SENSE, drc_index);
+ if (rc || dr_status != DR_ENTITY_PRESENT)
+ return -1;
+
+ rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
+ if (rc)
+ return rc;
+
+ rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
+ if (rc) {
+ rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
+ return rc;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+
+static DEFINE_MUTEX(pseries_cpu_hotplug_mutex);
+
+void cpu_hotplug_driver_lock(void)
+__acquires(pseries_cpu_hotplug_mutex)
+{
+ mutex_lock(&pseries_cpu_hotplug_mutex);
+}
+
+void cpu_hotplug_driver_unlock(void)
+__releases(pseries_cpu_hotplug_mutex)
+{
+ mutex_unlock(&pseries_cpu_hotplug_mutex);
+}
+
+static int dlpar_online_cpu(struct device_node *dn)
+{
+ int rc = 0;
+ unsigned int cpu;
+ int len, nthreads, i;
+ const u32 *intserv;
+
+ intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+ if (!intserv)
+ return -EINVAL;
+
+ nthreads = len / sizeof(u32);
+
+ cpu_maps_update_begin();
+ for (i = 0; i < nthreads; i++) {
+ for_each_present_cpu(cpu) {
+ if (get_hard_smp_processor_id(cpu) != intserv[i])
+ continue;
+ BUG_ON(get_cpu_current_state(cpu)
+ != CPU_STATE_OFFLINE);
+ cpu_maps_update_done();
+ rc = cpu_up(cpu);
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+
+ break;
+ }
+ if (cpu == num_possible_cpus())
+ printk(KERN_WARNING "Could not find cpu to online "
+ "with physical id 0x%x\n", intserv[i]);
+ }
+ cpu_maps_update_done();
+
+out:
+ return rc;
+
+}
+
+static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
+{
+ struct device_node *dn;
+ unsigned long drc_index;
+ char *cpu_name;
+ int rc;
+
+ cpu_hotplug_driver_lock();
+ rc = strict_strtoul(buf, 0, &drc_index);
+ if (rc) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dn = dlpar_configure_connector(drc_index);
+ if (!dn) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* configure-connector reports cpus as living in the base
+ * directory of the device tree. CPUs actually live in the
+ * cpus directory so we need to fixup the full_name.
+ */
+ cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1,
+ GFP_KERNEL);
+ if (!cpu_name) {
+ dlpar_free_cc_nodes(dn);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sprintf(cpu_name, "/cpus%s", dn->full_name);
+ kfree(dn->full_name);
+ dn->full_name = cpu_name;
+
+ rc = dlpar_acquire_drc(drc_index);
+ if (rc) {
+ dlpar_free_cc_nodes(dn);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = dlpar_attach_node(dn);
+ if (rc) {
+ dlpar_release_drc(drc_index);
+ dlpar_free_cc_nodes(dn);
+ }
+
+ rc = dlpar_online_cpu(dn);
+out:
+ cpu_hotplug_driver_unlock();
+
+ return rc ? rc : count;
+}
+
+static int dlpar_offline_cpu(struct device_node *dn)
+{
+ int rc = 0;
+ unsigned int cpu;
+ int len, nthreads, i;
+ const u32 *intserv;
+
+ intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+ if (!intserv)
+ return -EINVAL;
+
+ nthreads = len / sizeof(u32);
+
+ cpu_maps_update_begin();
+ for (i = 0; i < nthreads; i++) {
+ for_each_present_cpu(cpu) {
+ if (get_hard_smp_processor_id(cpu) != intserv[i])
+ continue;
+
+ if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
+ break;
+
+ if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
+ cpu_maps_update_done();
+ rc = cpu_down(cpu);
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+ break;
+
+ }
+
+ /*
+ * The cpu is in CPU_STATE_INACTIVE.
+ * Upgrade it's state to CPU_STATE_OFFLINE.
+ */
+ set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
+ BUG_ON(plpar_hcall_norets(H_PROD, intserv[i])
+ != H_SUCCESS);
+ __cpu_die(cpu);
+ break;
+ }
+ if (cpu == num_possible_cpus())
+ printk(KERN_WARNING "Could not find cpu to offline "
+ "with physical id 0x%x\n", intserv[i]);
+ }
+ cpu_maps_update_done();
+
+out:
+ return rc;
+
+}
+
+static ssize_t dlpar_cpu_release(const char *buf, size_t count)
+{
+ struct device_node *dn;
+ const u32 *drc_index;
+ int rc;
+
+ dn = of_find_node_by_path(buf);
+ if (!dn)
+ return -EINVAL;
+
+ drc_index = of_get_property(dn, "ibm,my-drc-index", NULL);
+ if (!drc_index) {
+ of_node_put(dn);
+ return -EINVAL;
+ }
+
+ cpu_hotplug_driver_lock();
+ rc = dlpar_offline_cpu(dn);
+ if (rc) {
+ of_node_put(dn);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = dlpar_release_drc(*drc_index);
+ if (rc) {
+ of_node_put(dn);
+ goto out;
+ }
+
+ rc = dlpar_detach_node(dn);
+ if (rc) {
+ dlpar_acquire_drc(*drc_index);
+ goto out;
+ }
+
+ of_node_put(dn);
+out:
+ cpu_hotplug_driver_unlock();
+ return rc ? rc : count;
+}
+
+static int __init pseries_dlpar_init(void)
+{
+ ppc_md.cpu_probe = dlpar_cpu_probe;
+ ppc_md.cpu_release = dlpar_cpu_release;
+
+ return 0;
+}
+machine_device_initcall(pseries, pseries_dlpar_init);
+
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 937a544a236..c5f3116b6ca 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -54,7 +54,7 @@ struct dtl {
int buf_entries;
u64 last_idx;
};
-static DEFINE_PER_CPU(struct dtl, dtl);
+static DEFINE_PER_CPU(struct dtl, cpu_dtl);
/*
* Dispatch trace log event mask:
@@ -261,7 +261,7 @@ static int dtl_init(void)
/* set up the per-cpu log structures */
for_each_possible_cpu(i) {
- struct dtl *dtl = &per_cpu(dtl, i);
+ struct dtl *dtl = &per_cpu(cpu_dtl, i);
dtl->cpu = i;
rc = dtl_setup_file(dtl);
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 0e8db677125..ef8e4544848 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -63,22 +63,6 @@ static void print_device_node_tree(struct pci_dn *pdn, int dent)
}
#endif
-/**
- * irq_in_use - return true if this irq is being used
- */
-static int irq_in_use(unsigned int irq)
-{
- int rc = 0;
- unsigned long flags;
- struct irq_desc *desc = irq_desc + irq;
-
- spin_lock_irqsave(&desc->lock, flags);
- if (desc->action)
- rc = 1;
- spin_unlock_irqrestore(&desc->lock, flags);
- return rc;
-}
-
/**
* eeh_disable_irq - disable interrupt for the recovering device
*/
@@ -93,7 +77,7 @@ static void eeh_disable_irq(struct pci_dev *dev)
if (dev->msi_enabled || dev->msix_enabled)
return;
- if (!irq_in_use(dev->irq))
+ if (!irq_has_action(dev->irq))
return;
PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index ebff6d9a4e3..6ea4698d917 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -30,6 +30,7 @@
#include <asm/pSeries_reconfig.h>
#include "xics.h"
#include "plpar_wrappers.h"
+#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
static struct rtas_args rtas_stop_self_args = {
@@ -39,6 +40,55 @@ static struct rtas_args rtas_stop_self_args = {
.rets = &rtas_stop_self_args.args[0],
};
+static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
+ CPU_STATE_OFFLINE;
+static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE;
+
+static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE;
+
+static int cede_offline_enabled __read_mostly = 1;
+
+/*
+ * Enable/disable cede_offline when available.
+ */
+static int __init setup_cede_offline(char *str)
+{
+ if (!strcmp(str, "off"))
+ cede_offline_enabled = 0;
+ else if (!strcmp(str, "on"))
+ cede_offline_enabled = 1;
+ else
+ return 0;
+ return 1;
+}
+
+__setup("cede_offline=", setup_cede_offline);
+
+enum cpu_state_vals get_cpu_current_state(int cpu)
+{
+ return per_cpu(current_state, cpu);
+}
+
+void set_cpu_current_state(int cpu, enum cpu_state_vals state)
+{
+ per_cpu(current_state, cpu) = state;
+}
+
+enum cpu_state_vals get_preferred_offline_state(int cpu)
+{
+ return per_cpu(preferred_offline_state, cpu);
+}
+
+void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
+{
+ per_cpu(preferred_offline_state, cpu) = state;
+}
+
+void set_default_offline_state(int cpu)
+{
+ per_cpu(preferred_offline_state, cpu) = default_offline_state;
+}
+
static void rtas_stop_self(void)
{
struct rtas_args *args = &rtas_stop_self_args;
@@ -56,11 +106,61 @@ static void rtas_stop_self(void)
static void pseries_mach_cpu_die(void)
{
+ unsigned int cpu = smp_processor_id();
+ unsigned int hwcpu = hard_smp_processor_id();
+ u8 cede_latency_hint = 0;
+
local_irq_disable();
idle_task_exit();
xics_teardown_cpu();
- unregister_slb_shadow(hard_smp_processor_id(), __pa(get_slb_shadow()));
- rtas_stop_self();
+
+ if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
+ set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
+ cede_latency_hint = 2;
+
+ get_lppaca()->idle = 1;
+ if (!get_lppaca()->shared_proc)
+ get_lppaca()->donate_dedicated_cpu = 1;
+
+ printk(KERN_INFO
+ "cpu %u (hwid %u) ceding for offline with hint %d\n",
+ cpu, hwcpu, cede_latency_hint);
+ while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
+ extended_cede_processor(cede_latency_hint);
+ printk(KERN_INFO "cpu %u (hwid %u) returned from cede.\n",
+ cpu, hwcpu);
+ printk(KERN_INFO
+ "Decrementer value = %x Timebase value = %llx\n",
+ get_dec(), get_tb());
+ }
+
+ printk(KERN_INFO "cpu %u (hwid %u) got prodded to go online\n",
+ cpu, hwcpu);
+
+ if (!get_lppaca()->shared_proc)
+ get_lppaca()->donate_dedicated_cpu = 0;
+ get_lppaca()->idle = 0;
+ }
+
+ if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
+ unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
+
+ /*
+ * NOTE: Calling start_secondary() here for now to
+ * start new context.
+ * However, need to do it cleanly by resetting the
+ * stack pointer.
+ */
+ start_secondary();
+
+ } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
+
+ set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
+ unregister_slb_shadow(hard_smp_processor_id(),
+ __pa(get_slb_shadow()));
+ rtas_stop_self();
+ }
+
/* Should never get here... */
BUG();
for(;;);
@@ -106,18 +206,43 @@ static int pseries_cpu_disable(void)
return 0;
}
+/*
+ * pseries_cpu_die: Wait for the cpu to die.
+ * @cpu: logical processor id of the CPU whose death we're awaiting.
+ *
+ * This function is called from the context of the thread which is performing
+ * the cpu-offline. Here we wait for long enough to allow the cpu in question
+ * to self-destroy so that the cpu-offline thread can send the CPU_DEAD
+ * notifications.
+ *
+ * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to
+ * self-destruct.
+ */
static void pseries_cpu_die(unsigned int cpu)
{
int tries;
- int cpu_status;
+ int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
- for (tries = 0; tries < 25; tries++) {
- cpu_status = query_cpu_stopped(pcpu);
- if (cpu_status == 0 || cpu_status == -1)
- break;
- cpu_relax();
+ if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
+ cpu_status = 1;
+ for (tries = 0; tries < 1000; tries++) {
+ if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) {
+ cpu_status = 0;
+ break;
+ }
+ cpu_relax();
+ }
+ } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
+
+ for (tries = 0; tries < 25; tries++) {
+ cpu_status = query_cpu_stopped(pcpu);
+ if (cpu_status == 0 || cpu_status == -1)
+ break;
+ cpu_relax();
+ }
}
+
if (cpu_status != 0) {
printk("Querying DEAD? cpu %i (%i) shows %i\n",
cpu, pcpu, cpu_status);
@@ -252,10 +377,41 @@ static struct notifier_block pseries_smp_nb = {
.notifier_call = pseries_smp_notifier,
};
+#define MAX_CEDE_LATENCY_LEVELS 4
+#define CEDE_LATENCY_PARAM_LENGTH 10
+#define CEDE_LATENCY_PARAM_MAX_LENGTH \
+ (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char))
+#define CEDE_LATENCY_TOKEN 45
+
+static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH];
+
+static int parse_cede_parameters(void)
+{
+ int call_status;
+
+ memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH);
+ call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
+ NULL,
+ CEDE_LATENCY_TOKEN,
+ __pa(cede_parameters),
+ CEDE_LATENCY_PARAM_MAX_LENGTH);
+
+ if (call_status != 0)
+ printk(KERN_INFO "CEDE_LATENCY: \
+ %s %s Error calling get-system-parameter(0x%x)\n",
+ __FILE__, __func__, call_status);
+ else
+ printk(KERN_INFO "CEDE_LATENCY: \
+ get-system-parameter successful.\n");
+
+ return call_status;
+}
+
static int __init pseries_cpu_hotplug_init(void)
{
struct device_node *np;
const char *typep;
+ int cpu;
for_each_node_by_name(np, "interrupt-controller") {
typep = of_get_property(np, "compatible", NULL);
@@ -283,8 +439,16 @@ static int __init pseries_cpu_hotplug_init(void)
smp_ops->cpu_die = pseries_cpu_die;
/* Processors can be added/removed only on LPAR */
- if (firmware_has_feature(FW_FEATURE_LPAR))
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
pSeries_reconfig_notifier_register(&pseries_smp_nb);
+ cpu_maps_update_begin();
+ if (cede_offline_enabled && parse_cede_parameters() == 0) {
+ default_offline_state = CPU_STATE_INACTIVE;
+ for_each_online_cpu(cpu)
+ set_default_offline_state(cpu);
+ }
+ cpu_maps_update_done();
+ }
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
new file mode 100644
index 00000000000..22574e0d9d9
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/offline_states.h
@@ -0,0 +1,18 @@
+#ifndef _OFFLINE_STATES_H_
+#define _OFFLINE_STATES_H_
+
+/* Cpu offline states go here */
+enum cpu_state_vals {
+ CPU_STATE_OFFLINE,
+ CPU_STATE_INACTIVE,
+ CPU_STATE_ONLINE,
+ CPU_MAX_OFFLINE_STATES
+};
+
+extern enum cpu_state_vals get_cpu_current_state(int cpu);
+extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
+extern enum cpu_state_vals get_preferred_offline_state(int cpu);
+extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
+extern void set_default_offline_state(int cpu);
+extern int start_secondary(void);
+#endif
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index a24a6b2333b..0603c91538a 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -9,11 +9,33 @@ static inline long poll_pending(void)
return plpar_hcall_norets(H_POLL_PENDING);
}
+static inline u8 get_cede_latency_hint(void)
+{
+ return get_lppaca()->gpr5_dword.fields.cede_latency_hint;
+}
+
+static inline void set_cede_latency_hint(u8 latency_hint)
+{
+ get_lppaca()->gpr5_dword.fields.cede_latency_hint = latency_hint;
+}
+
static inline long cede_processor(void)
{
return plpar_hcall_norets(H_CEDE);
}
+static inline long extended_cede_processor(unsigned long latency_hint)
+{
+ long rc;
+ u8 old_latency_hint = get_cede_latency_hint();
+
+ set_cede_latency_hint(latency_hint);
+ rc = cede_processor();
+ set_cede_latency_hint(old_latency_hint);
+
+ return rc;
+}
+
static inline long vpa_call(unsigned long flags, unsigned long cpu,
unsigned long vpa)
{
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 2e2bbe120b9..a2305d29bbb 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -96,7 +96,7 @@ static struct device_node *derive_parent(const char *path)
return parent;
}
-static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
+BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -184,7 +184,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
}
/*
- * /proc/ppc64/ofdt - yucky binary interface for adding and removing
+ * /proc/powerpc/ofdt - yucky binary interface for adding and removing
* OF device nodes. Should be deprecated as soon as we get an
* in-kernel wrapper for the RTAS ibm,configure-connector call.
*/
@@ -543,7 +543,7 @@ static const struct file_operations ofdt_fops = {
.write = ofdt_write
};
-/* create /proc/ppc64/ofdt write-only by root */
+/* create /proc/powerpc/ofdt write-only by root */
static int proc_ppc64_create_ofdt(void)
{
struct proc_dir_entry *ent;
@@ -551,7 +551,7 @@ static int proc_ppc64_create_ofdt(void)
if (!machine_is(pseries))
return 0;
- ent = proc_create("ppc64/ofdt", S_IWUSR, NULL, &ofdt_fops);
+ ent = proc_create("powerpc/ofdt", S_IWUSR, NULL, &ofdt_fops);
if (ent)
ent->size = 0;
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 417eca79df6..1b45c458f95 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -13,7 +13,7 @@
* of this data using this driver. A dump exists if the device-tree
* /chosen/ibm,scan-log-data property exists.
*
- * This driver exports /proc/ppc64/scan-log-dump which can be read.
+ * This driver exports /proc/powerpc/scan-log-dump which can be read.
* The driver supports only sequential reads.
*
* The driver looks at a write to the driver for the single word "reset".
@@ -186,7 +186,7 @@ static int __init scanlog_init(void)
if (!data)
goto err;
- ent = proc_create_data("ppc64/rtas/scan-log-dump", S_IRUSR, NULL,
+ ent = proc_create_data("powerpc/rtas/scan-log-dump", S_IRUSR, NULL,
&scanlog_fops, data);
if (!ent)
goto err;
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 440000cc713..b4886635972 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -48,6 +48,7 @@
#include "plpar_wrappers.h"
#include "pseries.h"
#include "xics.h"
+#include "offline_states.h"
/*
@@ -84,6 +85,9 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
/* Fixup atomic count: it exited inside IRQ handler. */
task_thread_info(paca[lcpu].__current)->preempt_count = 0;
+ if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE)
+ goto out;
+
/*
* If the RTAS start-cpu token does not exist then presume the
* cpu is already spinning.
@@ -98,6 +102,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
return 0;
}
+out:
return 1;
}
@@ -111,12 +116,16 @@ static void __devinit smp_xics_setup_cpu(int cpu)
vpa_init(cpu);
cpu_clear(cpu, of_spin_map);
+ set_cpu_current_state(cpu, CPU_STATE_ONLINE);
+ set_default_offline_state(cpu);
}
#endif /* CONFIG_XICS */
static void __devinit smp_pSeries_kick_cpu(int nr)
{
+ long rc;
+ unsigned long hcpuid;
BUG_ON(nr < 0 || nr >= NR_CPUS);
if (!smp_startup_cpu(nr))
@@ -128,6 +137,16 @@ static void __devinit smp_pSeries_kick_cpu(int nr)
* the processor will continue on to secondary_start
*/
paca[nr].cpu_start = 1;
+
+ set_preferred_offline_state(nr, CPU_STATE_ONLINE);
+
+ if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) {
+ hcpuid = get_hard_smp_processor_id(nr);
+ rc = plpar_hcall_norets(H_PROD, hcpuid);
+ if (rc != H_SUCCESS)
+ printk(KERN_ERR "Error: Prod to wake up processor %d\
+ Ret= %ld\n", nr, rc);
+ }
}
static int smp_pSeries_cpu_bootable(unsigned int nr)
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index b9bf0eedccf..b9b9e11609e 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -20,6 +20,7 @@
#include <linux/cpu.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/percpu.h>
#include <asm/firmware.h>
#include <asm/io.h>
@@ -46,6 +47,12 @@ static struct irq_host *xics_host;
*/
#define IPI_PRIORITY 4
+/* The least favored priority */
+#define LOWEST_PRIORITY 0xFF
+
+/* The number of priorities defined above */
+#define MAX_NUM_PRIORITIES 3
+
static unsigned int default_server = 0xFF;
static unsigned int default_distrib_server = 0;
static unsigned int interrupt_server_size = 8;
@@ -56,6 +63,12 @@ static int ibm_set_xive;
static int ibm_int_on;
static int ibm_int_off;
+struct xics_cppr {
+ unsigned char stack[MAX_NUM_PRIORITIES];
+ int index;
+};
+
+static DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
/* Direct hardware low level accessors */
@@ -157,7 +170,7 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
cpumask_t cpumask;
cpumask_t tmp = CPU_MASK_NONE;
- cpumask_copy(&cpumask, irq_desc[virq].affinity);
+ cpumask_copy(&cpumask, irq_to_desc(virq)->affinity);
if (!distribute_irqs)
return default_server;
@@ -284,6 +297,19 @@ static inline unsigned int xics_xirr_vector(unsigned int xirr)
return xirr & 0x00ffffff;
}
+static void push_cppr(unsigned int vec)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
+ return;
+
+ if (vec == XICS_IPI)
+ os_cppr->stack[++os_cppr->index] = IPI_PRIORITY;
+ else
+ os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY;
+}
+
static unsigned int xics_get_irq_direct(void)
{
unsigned int xirr = direct_xirr_info_get();
@@ -294,8 +320,10 @@ static unsigned int xics_get_irq_direct(void)
return NO_IRQ;
irq = irq_radix_revmap_lookup(xics_host, vec);
- if (likely(irq != NO_IRQ))
+ if (likely(irq != NO_IRQ)) {
+ push_cppr(vec);
return irq;
+ }
/* We don't have a linux mapping, so have rtas mask it. */
xics_mask_unknown_vec(vec);
@@ -315,8 +343,10 @@ static unsigned int xics_get_irq_lpar(void)
return NO_IRQ;
irq = irq_radix_revmap_lookup(xics_host, vec);
- if (likely(irq != NO_IRQ))
+ if (likely(irq != NO_IRQ)) {
+ push_cppr(vec);
return irq;
+ }
/* We don't have a linux mapping, so have RTAS mask it. */
xics_mask_unknown_vec(vec);
@@ -326,12 +356,22 @@ static unsigned int xics_get_irq_lpar(void)
return NO_IRQ;
}
+static unsigned char pop_cppr(void)
+{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ if (WARN_ON(os_cppr->index < 1))
+ return LOWEST_PRIORITY;
+
+ return os_cppr->stack[--os_cppr->index];
+}
+
static void xics_eoi_direct(unsigned int virq)
{
unsigned int irq = (unsigned int)irq_map[virq].hwirq;
iosync();
- direct_xirr_info_set((0xff << 24) | irq);
+ direct_xirr_info_set((pop_cppr() << 24) | irq);
}
static void xics_eoi_lpar(unsigned int virq)
@@ -339,7 +379,7 @@ static void xics_eoi_lpar(unsigned int virq)
unsigned int irq = (unsigned int)irq_map[virq].hwirq;
iosync();
- lpar_xirr_info_set((0xff << 24) | irq);
+ lpar_xirr_info_set((pop_cppr() << 24) | irq);
}
static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
@@ -388,7 +428,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
}
static struct irq_chip xics_pic_direct = {
- .typename = " XICS ",
+ .name = " XICS ",
.startup = xics_startup,
.mask = xics_mask_irq,
.unmask = xics_unmask_irq,
@@ -397,7 +437,7 @@ static struct irq_chip xics_pic_direct = {
};
static struct irq_chip xics_pic_lpar = {
- .typename = " XICS ",
+ .name = " XICS ",
.startup = xics_startup,
.mask = xics_mask_irq,
.unmask = xics_unmask_irq,
@@ -428,13 +468,13 @@ static int xics_host_map(struct irq_host *h, unsigned int virq,
/* Insert the interrupt mapping into the radix tree for fast lookup */
irq_radix_revmap_insert(xics_host, virq, hw);
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq);
return 0;
}
static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
@@ -746,6 +786,12 @@ void __init xics_init_IRQ(void)
static void xics_set_cpu_priority(unsigned char cppr)
{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
+
+ BUG_ON(os_cppr->index != 0);
+
+ os_cppr->stack[os_cppr->index] = cppr;
+
if (firmware_has_feature(FW_FEATURE_LPAR))
lpar_cppr_info(cppr);
else
@@ -772,7 +818,7 @@ static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
void xics_setup_cpu(void)
{
- xics_set_cpu_priority(0xff);
+ xics_set_cpu_priority(LOWEST_PRIORITY);
xics_set_cpu_giq(default_distrib_server, 1);
}
@@ -852,7 +898,7 @@ void xics_migrate_irqs_away(void)
/* We need to get IPIs still. */
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
continue;
- desc = get_irq_desc(virq);
+ desc = irq_to_desc(virq);
/* We only need to migrate enabled IRQS */
if (desc == NULL || desc->chip == NULL
@@ -860,7 +906,7 @@ void xics_migrate_irqs_away(void)
|| desc->chip->set_affinity == NULL)
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
@@ -881,10 +927,10 @@ void xics_migrate_irqs_away(void)
virq, cpu);
/* Reset affinity to all cpus */
- cpumask_setall(irq_desc[virq].affinity);
+ cpumask_setall(irq_to_desc(virq)->affinity);
desc->chip->set_affinity(virq, cpu_all_mask);
unlock:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
#endif
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 9d4b17462f1..5642924fb9f 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o
obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y)
+obj-$(CONFIG_FSL_PMC) += fsl_pmc.o
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 82424cd7e12..a4b41dbde12 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -77,7 +77,7 @@ static void cpm_end_irq(unsigned int irq)
}
static struct irq_chip cpm_pic = {
- .typename = " CPM PIC ",
+ .name = " CPM PIC ",
.mask = cpm_mask_irq,
.unmask = cpm_unmask_irq,
.eoi = cpm_end_irq,
@@ -102,7 +102,7 @@ static int cpm_pic_host_map(struct irq_host *h, unsigned int virq,
{
pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
return 0;
}
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 78f1f7cca0a..1709ac5aac7 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -115,11 +115,13 @@ static void cpm2_ack(unsigned int virq)
static void cpm2_end_irq(unsigned int virq)
{
+ struct irq_desc *desc;
int bit, word;
unsigned int irq_nr = virq_to_hw(virq);
- if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
- && irq_desc[irq_nr].action) {
+ desc = irq_to_desc(irq_nr);
+ if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))
+ && desc->action) {
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
@@ -138,16 +140,26 @@ static void cpm2_end_irq(unsigned int virq)
static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
{
unsigned int src = virq_to_hw(virq);
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
unsigned int vold, vnew, edibit;
- if (flow_type == IRQ_TYPE_NONE)
- flow_type = IRQ_TYPE_LEVEL_LOW;
-
- if (flow_type & IRQ_TYPE_EDGE_RISING) {
- printk(KERN_ERR "CPM2 PIC: sense type 0x%x not supported\n",
- flow_type);
- return -EINVAL;
+ /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
+ * IRQ_TYPE_EDGE_BOTH (default). All others are IRQ_TYPE_EDGE_FALLING
+ * or IRQ_TYPE_LEVEL_LOW (default)
+ */
+ if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) {
+ if (flow_type == IRQ_TYPE_NONE)
+ flow_type = IRQ_TYPE_EDGE_BOTH;
+
+ if (flow_type != IRQ_TYPE_EDGE_BOTH &&
+ flow_type != IRQ_TYPE_EDGE_FALLING)
+ goto err_sense;
+ } else {
+ if (flow_type == IRQ_TYPE_NONE)
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+
+ if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
+ goto err_sense;
}
desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
@@ -179,10 +191,14 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type)
if (vold != vnew)
out_be32(&cpm2_intctl->ic_siexr, vnew);
return 0;
+
+err_sense:
+ pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type);
+ return -EINVAL;
}
static struct irq_chip cpm2_pic = {
- .typename = " CPM2 SIU ",
+ .name = " CPM2 SIU ",
.mask = cpm2_mask_irq,
.unmask = cpm2_unmask_irq,
.ack = cpm2_ack,
@@ -210,13 +226,13 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
{
pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
return 0;
}
static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
*out_hwirq = intspec[0];
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index e4b6d66d93d..9de72c96e6d 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -72,7 +72,7 @@ static phys_addr_t muram_pbase;
/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS 4
-int __init cpm_muram_init(void)
+int cpm_muram_init(void)
{
struct device_node *np;
struct resource r;
@@ -81,6 +81,9 @@ int __init cpm_muram_init(void)
int i = 0;
int ret = 0;
+ if (muram_pbase)
+ return 0;
+
spin_lock_init(&cpm_muram_lock);
/* initialize the info header */
rh_init(&cpm_muram_info, 1,
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index da38a1ff97b..c6e11b07710 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -47,7 +47,7 @@ static struct irq_chip fsl_msi_chip = {
.mask = mask_msi_irq,
.unmask = unmask_msi_irq,
.ack = fsl_msi_end_irq,
- .typename = " FSL-MSI ",
+ .name = " FSL-MSI ",
};
static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
@@ -55,7 +55,7 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
{
struct irq_chip *chip = &fsl_msi_chip;
- get_irq_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING;
+ irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING;
set_irq_chip_and_handler(virq, chip, handle_edge_irq);
@@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
u32 intr_index;
u32 have_shift = 0;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) {
if (desc->chip->mask_ack)
desc->chip->mask_ack(irq);
@@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
break;
}
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static int __devinit fsl_of_msi_probe(struct of_device *dev,
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index ae88b144801..e1a028c1f18 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -56,7 +56,7 @@ static int __init fsl_pcie_check_link(struct pci_controller *hose)
return 0;
}
-#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx)
+#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static int __init setup_one_atmu(struct ccsr_pci __iomem *pci,
unsigned int index, const struct resource *res,
resource_size_t offset)
@@ -392,9 +392,23 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header);
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header);
-#endif /* CONFIG_PPC_85xx || CONFIG_PPC_86xx */
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header);
+#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8314E, quirk_fsl_pcie_header);
@@ -450,8 +464,7 @@ static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct mpc83xx_pcie_priv *pcie = hose->dn->data;
- u8 bus_no = bus->number - hose->first_busno;
- u32 dev_base = bus_no << 24 | devfn << 16;
+ u32 dev_base = bus->number << 24 | devfn << 16;
int ret;
ret = mpc83xx_pcie_exclude_device(bus, devfn);
@@ -501,12 +514,17 @@ static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
+ struct pci_controller *hose = pci_bus_to_host(bus);
void __iomem *cfg_addr;
cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
if (!cfg_addr)
return PCIBIOS_DEVICE_NOT_FOUND;
+ /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
+ if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
+ val &= 0xffffff00;
+
switch (len) {
case 1:
out_8(cfg_addr, val);
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
new file mode 100644
index 00000000000..a7635a993dc
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -0,0 +1,88 @@
+/*
+ * Suspend/resume support
+ *
+ * Copyright 2009 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+
+struct pmc_regs {
+ __be32 devdisr;
+ __be32 devdisr2;
+ __be32 :32;
+ __be32 :32;
+ __be32 pmcsr;
+#define PMCSR_SLP (1 << 17)
+};
+
+static struct device *pmc_dev;
+static struct pmc_regs __iomem *pmc_regs;
+
+static int pmc_suspend_enter(suspend_state_t state)
+{
+ int ret;
+
+ setbits32(&pmc_regs->pmcsr, PMCSR_SLP);
+ /* At this point, the CPU is asleep. */
+
+ /* Upon resume, wait for SLP bit to be clear. */
+ ret = spin_event_timeout((in_be32(&pmc_regs->pmcsr) & PMCSR_SLP) == 0,
+ 10000, 10) ? 0 : -ETIMEDOUT;
+ if (ret)
+ dev_err(pmc_dev, "tired waiting for SLP bit to clear\n");
+ return ret;
+}
+
+static int pmc_suspend_valid(suspend_state_t state)
+{
+ if (state != PM_SUSPEND_STANDBY)
+ return 0;
+ return 1;
+}
+
+static struct platform_suspend_ops pmc_suspend_ops = {
+ .valid = pmc_suspend_valid,
+ .enter = pmc_suspend_enter,
+};
+
+static int pmc_probe(struct of_device *ofdev, const struct of_device_id *id)
+{
+ pmc_regs = of_iomap(ofdev->node, 0);
+ if (!pmc_regs)
+ return -ENOMEM;
+
+ pmc_dev = &ofdev->dev;
+ suspend_set_ops(&pmc_suspend_ops);
+ return 0;
+}
+
+static const struct of_device_id pmc_ids[] = {
+ { .compatible = "fsl,mpc8548-pmc", },
+ { .compatible = "fsl,mpc8641d-pmc", },
+ { },
+};
+
+static struct of_platform_driver pmc_driver = {
+ .driver.name = "fsl-pmc",
+ .match_table = pmc_ids,
+ .probe = pmc_probe,
+};
+
+static int __init pmc_init(void)
+{
+ return of_register_platform_driver(&pmc_driver);
+}
+device_initcall(pmc_init);
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index adca4affcf1..b91f7acdda6 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -372,7 +372,7 @@ err:
arch_initcall(fsl_usb_of_init);
-#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx)
+#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static __be32 __iomem *rstcr;
static int __init setup_rstcr(void)
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index a96584ab33d..0a55db8a5a2 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -135,7 +135,7 @@ static void i8259_unmask_irq(unsigned int irq_nr)
}
static struct irq_chip i8259_pic = {
- .typename = " i8259 ",
+ .name = " i8259 ",
.mask = i8259_mask_irq,
.disable = i8259_mask_irq,
.unmask = i8259_unmask_irq,
@@ -175,12 +175,12 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
/* We block the internal cascade */
if (hw == 2)
- get_irq_desc(virq)->status |= IRQ_NOREQUEST;
+ irq_to_desc(virq)->status |= IRQ_NOREQUEST;
/* We use the level handler only for now, we might want to
* be more cautious here but that works for now
*/
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq);
return 0;
}
@@ -198,7 +198,7 @@ static void i8259_host_unmap(struct irq_host *h, unsigned int virq)
}
static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
static unsigned char map_isa_senses[4] = {
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index cb7689c4bfb..28cdddd2f89 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -605,7 +605,7 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
{
struct ipic *ipic = ipic_from_irq(virq);
unsigned int src = ipic_irq_to_hw(virq);
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
unsigned int vold, vnew, edibit;
if (flow_type == IRQ_TYPE_NONE)
@@ -660,7 +660,7 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
/* level interrupts and edge interrupts have different ack operations */
static struct irq_chip ipic_level_irq_chip = {
- .typename = " IPIC ",
+ .name = " IPIC ",
.unmask = ipic_unmask_irq,
.mask = ipic_mask_irq,
.mask_ack = ipic_mask_irq,
@@ -668,7 +668,7 @@ static struct irq_chip ipic_level_irq_chip = {
};
static struct irq_chip ipic_edge_irq_chip = {
- .typename = " IPIC ",
+ .name = " IPIC ",
.unmask = ipic_unmask_irq,
.mask = ipic_mask_irq,
.mask_ack = ipic_mask_irq_and_ack,
@@ -697,7 +697,7 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq,
}
static int ipic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 5d2d5522ef4..69bd6f4dff8 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -72,7 +72,7 @@ static void mpc8xx_end_irq(unsigned int virq)
static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
{
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
@@ -94,7 +94,7 @@ static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type)
}
static struct irq_chip mpc8xx_pic = {
- .typename = " MPC8XX SIU ",
+ .name = " MPC8XX SIU ",
.unmask = mpc8xx_unmask_irq,
.mask = mpc8xx_mask_irq,
.ack = mpc8xx_ack,
@@ -130,7 +130,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
static unsigned char map_pic_senses[4] = {
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index 103eace3619..ee1c0e1cf4a 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -54,6 +54,22 @@ static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm)
mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT);
}
+/* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs
+ * defined as output cannot be determined by reading GPDAT register,
+ * so we use shadow data register instead. The status of input pins
+ * is determined by reading GPDAT register.
+ */
+static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ u32 val;
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
+
+ val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR);
+
+ return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio);
+}
+
static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -136,7 +152,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
gc->direction_output = mpc8xxx_gpio_dir_out;
- gc->get = mpc8xxx_gpio_get;
+ if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
+ gc->get = mpc8572_gpio_get;
+ else
+ gc->get = mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
ret = of_mm_gpiochip_add(np, mm_gc);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 30c44e6b041..470dc6c11d5 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -567,13 +567,11 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#endif /* CONFIG_MPIC_U3_HT_IRQS */
#ifdef CONFIG_SMP
-static int irq_choose_cpu(unsigned int virt_irq)
+static int irq_choose_cpu(const cpumask_t *mask)
{
- cpumask_t mask;
int cpuid;
- cpumask_copy(&mask, irq_desc[virt_irq].affinity);
- if (cpus_equal(mask, CPU_MASK_ALL)) {
+ if (cpumask_equal(mask, cpu_all_mask)) {
static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock);
unsigned long flags;
@@ -594,20 +592,15 @@ static int irq_choose_cpu(unsigned int virt_irq)
spin_unlock_irqrestore(&irq_rover_lock, flags);
} else {
- cpumask_t tmp;
-
- cpus_and(tmp, cpu_online_map, mask);
-
- if (cpus_empty(tmp))
+ cpuid = cpumask_first_and(mask, cpu_online_mask);
+ if (cpuid >= nr_cpu_ids)
goto do_round_robin;
-
- cpuid = first_cpu(tmp);
}
return get_hard_smp_processor_id(cpuid);
}
#else
-static int irq_choose_cpu(unsigned int virt_irq)
+static int irq_choose_cpu(const cpumask_t *mask)
{
return hard_smp_processor_id();
}
@@ -621,7 +614,7 @@ static struct mpic *mpic_find(unsigned int irq)
if (irq < NUM_ISA_INTERRUPTS)
return NULL;
- return irq_desc[irq].chip_data;
+ return irq_to_desc(irq)->chip_data;
}
/* Determine if the linux irq is an IPI */
@@ -648,14 +641,14 @@ static inline u32 mpic_physmask(u32 cpumask)
/* Get the mpic structure from the IPI number */
static inline struct mpic * mpic_from_ipi(unsigned int ipi)
{
- return irq_desc[ipi].chip_data;
+ return irq_to_desc(ipi)->chip_data;
}
#endif
/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
- return irq_desc[irq].chip_data;
+ return irq_to_desc(irq)->chip_data;
}
/* Send an EOI */
@@ -735,7 +728,7 @@ static void mpic_unmask_ht_irq(unsigned int irq)
mpic_unmask_irq(irq);
- if (irq_desc[irq].status & IRQ_LEVEL)
+ if (irq_to_desc(irq)->status & IRQ_LEVEL)
mpic_ht_end_irq(mpic, src);
}
@@ -745,7 +738,7 @@ static unsigned int mpic_startup_ht_irq(unsigned int irq)
unsigned int src = mpic_irq_to_hw(irq);
mpic_unmask_irq(irq);
- mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
+ mpic_startup_ht_interrupt(mpic, src, irq_to_desc(irq)->status);
return 0;
}
@@ -755,7 +748,7 @@ static void mpic_shutdown_ht_irq(unsigned int irq)
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = mpic_irq_to_hw(irq);
- mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
+ mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(irq)->status);
mpic_mask_irq(irq);
}
@@ -772,7 +765,7 @@ static void mpic_end_ht_irq(unsigned int irq)
* latched another edge interrupt coming in anyway
*/
- if (irq_desc[irq].status & IRQ_LEVEL)
+ if (irq_to_desc(irq)->status & IRQ_LEVEL)
mpic_ht_end_irq(mpic, src);
mpic_eoi(mpic);
}
@@ -816,7 +809,7 @@ int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
unsigned int src = mpic_irq_to_hw(irq);
if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
- int cpuid = irq_choose_cpu(irq);
+ int cpuid = irq_choose_cpu(cpumask);
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
} else {
@@ -856,7 +849,7 @@ int mpic_set_irq_type(unsigned int virq, unsigned int flow_type)
{
struct mpic *mpic = mpic_from_irq(virq);
unsigned int src = mpic_irq_to_hw(virq);
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
unsigned int vecpri, vold, vnew;
DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
@@ -994,7 +987,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
}
static int mpic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
@@ -1062,19 +1055,19 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->name = name;
mpic->hc_irq = mpic_irq_chip;
- mpic->hc_irq.typename = name;
+ mpic->hc_irq.name = name;
if (flags & MPIC_PRIMARY)
mpic->hc_irq.set_affinity = mpic_set_affinity;
#ifdef CONFIG_MPIC_U3_HT_IRQS
mpic->hc_ht_irq = mpic_irq_ht_chip;
- mpic->hc_ht_irq.typename = name;
+ mpic->hc_ht_irq.name = name;
if (flags & MPIC_PRIMARY)
mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
#endif /* CONFIG_MPIC_U3_HT_IRQS */
#ifdef CONFIG_SMP
mpic->hc_ipi = mpic_ipi_chip;
- mpic->hc_ipi.typename = name;
+ mpic->hc_ipi.name = name;
#endif /* CONFIG_SMP */
mpic->flags = flags;
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index 1d44eee80fa..0f67cd79d48 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -39,7 +39,12 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
pr_debug("mpic: found U3, guessing msi allocator setup\n");
- /* Reserve source numbers we know are reserved in the HW */
+ /* Reserve source numbers we know are reserved in the HW.
+ *
+ * This is a bit of a mix of U3 and U4 reserves but that's going
+ * to work fine, we have plenty enugh numbers left so let's just
+ * mark anything we don't like reserved.
+ */
for (i = 0; i < 8; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
@@ -49,6 +54,10 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
for (i = 100; i < 105; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
+ for (i = 124; i < mpic->irq_count; i++)
+ msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
+
+
np = NULL;
while ((np = of_find_all_nodes(np))) {
pr_debug("mpic: mapping hwirqs for %s\n", np->full_name);
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 656cb772b69..0f6ab06f847 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -60,7 +60,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
.eoi = mpic_end_irq,
.set_type = mpic_set_irq_type,
.set_affinity = mpic_set_affinity,
- .typename = "PASEMI-MSI ",
+ .name = "PASEMI-MSI ",
};
static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 0a8f5a9e87c..bcbfe79c704 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -42,7 +42,7 @@ static struct irq_chip mpic_u3msi_chip = {
.eoi = mpic_end_irq,
.set_type = mpic_set_irq_type,
.set_affinity = mpic_set_affinity,
- .typename = "MPIC-U3MSI",
+ .name = "MPIC-U3MSI",
};
static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos)
@@ -64,12 +64,12 @@ static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos)
return addr;
}
-static u64 find_ht_magic_addr(struct pci_dev *pdev)
+static u64 find_ht_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
{
struct pci_bus *bus;
unsigned int pos;
- for (bus = pdev->bus; bus; bus = bus->parent) {
+ for (bus = pdev->bus; bus && bus->self; bus = bus->parent) {
pos = pci_find_ht_capability(bus->self, HT_CAPTYPE_MSI_MAPPING);
if (pos)
return read_ht_magic_addr(bus->self, pos);
@@ -78,13 +78,41 @@ static u64 find_ht_magic_addr(struct pci_dev *pdev)
return 0;
}
+static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+
+ /* U4 PCIe MSIs need to write to the special register in
+ * the bridge that generates interrupts. There should be
+ * theorically a register at 0xf8005000 where you just write
+ * the MSI number and that triggers the right interrupt, but
+ * unfortunately, this is busted in HW, the bridge endian swaps
+ * the value and hits the wrong nibble in the register.
+ *
+ * So instead we use another register set which is used normally
+ * for converting HT interrupts to MPIC interrupts, which decodes
+ * the interrupt number as part of the low address bits
+ *
+ * This will not work if we ever use more than one legacy MSI in
+ * a block but we never do. For one MSI or multiple MSI-X where
+ * each interrupt address can be specified separately, it works
+ * just fine.
+ */
+ if (of_device_is_compatible(hose->dn, "u4-pcie") ||
+ of_device_is_compatible(hose->dn, "U4-pcie"))
+ return 0xf8004000 | (hwirq << 4);
+
+ return 0;
+}
+
static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
{
if (type == PCI_CAP_ID_MSIX)
pr_debug("u3msi: MSI-X untested, trying anyway.\n");
/* If we can't find a magic address then MSI ain't gonna work */
- if (find_ht_magic_addr(pdev) == 0) {
+ if (find_ht_magic_addr(pdev, 0) == 0 &&
+ find_u4_magic_addr(pdev, 0) == 0) {
pr_debug("u3msi: no magic address found for %s\n",
pci_name(pdev));
return -ENXIO;
@@ -118,10 +146,6 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
u64 addr;
int hwirq;
- addr = find_ht_magic_addr(pdev);
- msg.address_lo = addr & 0xFFFFFFFF;
- msg.address_hi = addr >> 32;
-
list_for_each_entry(entry, &pdev->msi_list, list) {
hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
if (hwirq < 0) {
@@ -129,6 +153,12 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return hwirq;
}
+ addr = find_ht_magic_addr(pdev, hwirq);
+ if (addr == 0)
+ addr = find_u4_magic_addr(pdev, hwirq);
+ msg.address_lo = addr & 0xFFFFFFFF;
+ msg.address_hi = addr >> 32;
+
virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
if (virq == NO_IRQ) {
pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq);
@@ -143,6 +173,8 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
virq, hwirq, (unsigned long)addr);
+ printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
+ virq, hwirq, (unsigned long)addr);
msg.data = hwirq;
write_msi_msg(virq, &msg);
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 2aa4ed066db..485b92477d7 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -213,7 +213,7 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
{
int level1;
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
BUG_ON(level1 > MV64x60_LEVEL1_GPP);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 464271bea6c..149393c02c3 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -27,6 +27,8 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_platform.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -65,19 +67,6 @@ static unsigned int qe_num_of_snum;
static phys_addr_t qebase = -1;
-int qe_alive_during_sleep(void)
-{
- static int ret = -1;
-
- if (ret != -1)
- return ret;
-
- ret = !of_find_compatible_node(NULL, NULL, "fsl,mpc8569-pmc");
-
- return ret;
-}
-EXPORT_SYMBOL(qe_alive_during_sleep);
-
phys_addr_t get_qe_base(void)
{
struct device_node *qe;
@@ -104,7 +93,7 @@ phys_addr_t get_qe_base(void)
EXPORT_SYMBOL(get_qe_base);
-void __init qe_reset(void)
+void qe_reset(void)
{
if (qe_immr == NULL)
qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
@@ -330,16 +319,18 @@ EXPORT_SYMBOL(qe_put_snum);
static int qe_sdma_init(void)
{
struct sdma __iomem *sdma = &qe_immr->sdma;
- unsigned long sdma_buf_offset;
+ static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
if (!sdma)
return -ENODEV;
/* allocate 2 internal temporary buffers (512 bytes size each) for
* the SDMA */
- sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
- if (IS_ERR_VALUE(sdma_buf_offset))
- return -ENOMEM;
+ if (IS_ERR_VALUE(sdma_buf_offset)) {
+ sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
+ if (IS_ERR_VALUE(sdma_buf_offset))
+ return -ENOMEM;
+ }
out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
@@ -349,7 +340,7 @@ static int qe_sdma_init(void)
}
/* The maximum number of RISCs we support */
-#define MAX_QE_RISC 2
+#define MAX_QE_RISC 4
/* Firmware information stored here for qe_get_firmware_info() */
static struct qe_firmware_info qe_firmware_info;
@@ -658,3 +649,35 @@ unsigned int qe_get_num_of_snums(void)
return num_of_snums;
}
EXPORT_SYMBOL(qe_get_num_of_snums);
+
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
+static int qe_resume(struct of_device *ofdev)
+{
+ if (!qe_alive_during_sleep())
+ qe_reset();
+ return 0;
+}
+
+static int qe_probe(struct of_device *ofdev, const struct of_device_id *id)
+{
+ return 0;
+}
+
+static const struct of_device_id qe_ids[] = {
+ { .compatible = "fsl,qe", },
+ { },
+};
+
+static struct of_platform_driver qe_driver = {
+ .driver.name = "fsl-qe",
+ .match_table = qe_ids,
+ .probe = qe_probe,
+ .resume = qe_resume,
+};
+
+static int __init qe_drv_init(void)
+{
+ return of_register_platform_driver(&qe_driver);
+}
+device_initcall(qe_drv_init);
+#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 3faa42e03a8..2acc928d192 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -189,7 +189,7 @@ static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
{
- return irq_desc[virq].chip_data;
+ return irq_to_desc(virq)->chip_data;
}
#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
@@ -237,7 +237,7 @@ static void qe_ic_mask_irq(unsigned int virq)
}
static struct irq_chip qe_ic_irq_chip = {
- .typename = " QEIC ",
+ .name = " QEIC ",
.unmask = qe_ic_unmask_irq,
.mask = qe_ic_mask_irq,
.mask_ack = qe_ic_mask_irq,
@@ -263,7 +263,7 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
chip = &qe_ic->hc_irq;
set_irq_chip_data(virq, qe_ic);
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_to_desc(virq)->status |= IRQ_LEVEL;
set_irq_chip_and_handler(virq, chip, handle_level_irq);
@@ -271,7 +271,7 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
}
static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 * intspec, unsigned int intsize,
+ const u32 * intspec, unsigned int intsize,
irq_hw_number_t * out_hwirq,
unsigned int *out_flags)
{
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index cf244a419e9..595034cfb85 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -376,7 +376,7 @@ static void tsi108_pci_irq_end(u_int irq)
*/
static struct irq_chip tsi108_pci_irq = {
- .typename = "tsi108_PCI_int",
+ .name = "tsi108_PCI_int",
.mask = tsi108_pci_irq_disable,
.ack = tsi108_pci_irq_ack,
.end = tsi108_pci_irq_end,
@@ -384,7 +384,7 @@ static struct irq_chip tsi108_pci_irq = {
};
static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
*out_hwirq = intspec[0];
@@ -398,7 +398,7 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
if ((virq >= 1) && (virq <= 4)){
irq = virq + IRQ_PCI_INTAD_BASE - 1;
- get_irq_desc(irq)->status |= IRQ_LEVEL;
+ irq_to_desc(irq)->status |= IRQ_LEVEL;
set_irq_chip(irq, &tsi108_pci_irq);
}
return 0;
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 466ce9ace12..6f220a913e4 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -57,7 +57,7 @@ struct uic {
static void uic_unmask_irq(unsigned int virq)
{
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
struct uic *uic = get_irq_chip_data(virq);
unsigned int src = uic_irq_to_hw(virq);
unsigned long flags;
@@ -101,7 +101,7 @@ static void uic_ack_irq(unsigned int virq)
static void uic_mask_ack_irq(unsigned int virq)
{
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
struct uic *uic = get_irq_chip_data(virq);
unsigned int src = uic_irq_to_hw(virq);
unsigned long flags;
@@ -129,7 +129,7 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
{
struct uic *uic = get_irq_chip_data(virq);
unsigned int src = uic_irq_to_hw(virq);
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
unsigned long flags;
int trigger, polarity;
u32 tr, pr, mask;
@@ -177,7 +177,7 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type)
}
static struct irq_chip uic_irq_chip = {
- .typename = " UIC ",
+ .name = " UIC ",
.unmask = uic_unmask_irq,
.mask = uic_mask_irq,
.mask_ack = uic_mask_ack_irq,
@@ -202,7 +202,7 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
}
static int uic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
@@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
int src;
int subvirq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->mask(virq);
else
desc->chip->mask_ack(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
if (!msr) /* spurious interrupt */
@@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
generic_handle_irq(subvirq);
uic_irq_ret:
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->status & IRQ_LEVEL)
desc->chip->ack(virq);
if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(virq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
static struct uic * __init uic_init_one(struct device_node *node)
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 40edad52077..1e0ccfaf403 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -79,7 +79,7 @@ static void xilinx_intc_mask(unsigned int virq)
static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type)
{
- struct irq_desc *desc = get_irq_desc(virq);
+ struct irq_desc *desc = irq_to_desc(virq);
desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
@@ -106,7 +106,7 @@ static void xilinx_intc_level_unmask(unsigned int virq)
}
static struct irq_chip xilinx_intc_level_irqchip = {
- .typename = "Xilinx Level INTC",
+ .name = "Xilinx Level INTC",
.mask = xilinx_intc_mask,
.mask_ack = xilinx_intc_mask,
.unmask = xilinx_intc_level_unmask,
@@ -133,7 +133,7 @@ static void xilinx_intc_edge_ack(unsigned int virq)
}
static struct irq_chip xilinx_intc_edge_irqchip = {
- .typename = "Xilinx Edge INTC",
+ .name = "Xilinx Edge INTC",
.mask = xilinx_intc_mask,
.unmask = xilinx_intc_edge_unmask,
.ack = xilinx_intc_edge_ack,
@@ -148,7 +148,7 @@ static struct irq_chip xilinx_intc_edge_irqchip = {
* xilinx_intc_xlate - translate virq# from device tree interrupts property
*/
static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
- u32 *intspec, unsigned int intsize,
+ const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index bdbe96c8a7e..4e6152c1376 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1641,7 +1641,8 @@ static void super_regs(void)
ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
- printf(" Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5);
+ printf(" Saved Gpr5=%.16lx \n",
+ ptrLpPaca->gpr5_dword.saved_gpr5);
}
#endif
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 495589950dc..5c91995b74e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -551,7 +551,7 @@ static int appldata_thaw(struct device *dev)
return appldata_restore(dev);
}
-static struct dev_pm_ops appldata_pm_ops = {
+static const struct dev_pm_ops appldata_pm_ops = {
.freeze = appldata_freeze,
.thaw = appldata_thaw,
.restore = appldata_restore,
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 6118890c946..6be4503201a 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -174,7 +174,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
if (IS_ERR(sctx->fallback.cip)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
name);
- return PTR_ERR(sctx->fallback.blk);
+ return PTR_ERR(sctx->fallback.cip);
}
return 0;
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 77df726180b..2b92d501425 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -164,7 +164,7 @@ static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
LPAR_NAME_LEN);
EBCASC(name, LPAR_NAME_LEN);
name[LPAR_NAME_LEN] = 0;
- strstrip(name);
+ strim(name);
}
struct cpu_info {
@@ -523,7 +523,7 @@ static int diag224_idx2name(int index, char *name)
memcpy(name, diag224_cpu_names + ((index + 1) * CPU_NAME_LEN),
CPU_NAME_LEN);
name[CPU_NAME_LEN] = 0;
- strstrip(name);
+ strim(name);
return 0;
}
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index d01fc8f799f..f0b0d31f0b4 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -124,7 +124,7 @@ static int hpyfs_vm_create_guest(struct super_block *sb,
/* guest dir */
memcpy(guest_name, data->guest_name, NAME_LEN);
EBCASC(guest_name, NAME_LEN);
- strstrip(guest_name);
+ strim(guest_name);
guest_dir = hypfs_mkdir(sb, systems_dir, guest_name);
if (IS_ERR(guest_dir))
return PTR_ERR(guest_dir);
diff --git a/arch/s390/include/asm/asm-offsets.h b/arch/s390/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/s390/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index e885442c1df..354d42616c7 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -155,7 +155,6 @@ extern unsigned int vdso_enabled;
} while (0)
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c9af0d19c7a..a587907d77f 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock,
* (the type definitions are in asm/spinlock_types.h)
*/
-#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) \
- _raw_spin_relax(lock); } while (0)
+#define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) \
+ arch_spin_relax(lock); } while (0)
-extern void _raw_spin_lock_wait(raw_spinlock_t *);
-extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
-extern int _raw_spin_trylock_retry(raw_spinlock_t *);
-extern void _raw_spin_relax(raw_spinlock_t *lock);
+extern void arch_spin_lock_wait(arch_spinlock_t *);
+extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+extern int arch_spin_trylock_retry(arch_spinlock_t *);
+extern void arch_spin_relax(arch_spinlock_t *lock);
-static inline void __raw_spin_lock(raw_spinlock_t *lp)
+static inline void arch_spin_lock(arch_spinlock_t *lp)
{
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return;
- _raw_spin_lock_wait(lp);
+ arch_spin_lock_wait(lp);
}
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
+static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
int old;
@@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return;
- _raw_spin_lock_wait_flags(lp, flags);
+ arch_spin_lock_wait_flags(lp, flags);
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lp)
+static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
int old;
old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
if (likely(old == 0))
return 1;
- return _raw_spin_trylock_retry(lp);
+ return arch_spin_trylock_retry(lp);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lp)
+static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
}
@@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
+#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) ((x)->lock == 0)
-extern void _raw_read_lock_wait(raw_rwlock_t *lp);
-extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait(raw_rwlock_t *lp);
-extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags);
-extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
+extern void _raw_read_lock_wait(arch_rwlock_t *lp);
+extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp);
+extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
+extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
_raw_read_lock_wait(rw);
}
-static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_read_lock_wait_flags(rw, flags);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int old, cmp;
@@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
} while (cmp != old);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait(rw);
}
-static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags)
+static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
{
if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
_raw_write_lock_wait_flags(rw, flags);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
_raw_compare_and_swap(&rw->lock, 0x80000000, 0);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int old;
old = rw->lock & 0x7fffffffU;
@@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return _raw_read_trylock_retry(rw);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
return 1;
return _raw_write_trylock_retry(rw);
}
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index 654abc40de0..9c76656a0af 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned int owner_cpu;
-} __attribute__ ((aligned (4))) raw_spinlock_t;
+} __attribute__ ((aligned (4))) arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index cb5232df151..192a7203a14 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -269,7 +269,8 @@
#define __NR_pwritev 329
#define __NR_rt_tgsigqueueinfo 330
#define __NR_perf_event_open 331
-#define NR_syscalls 332
+#define __NR_recvmmsg 332
+#define NR_syscalls 333
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 25c31d68140..22c9e557bb2 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -624,38 +624,6 @@ struct mmap_arg_struct_emu31 {
u32 offset;
};
-/* common code for old and new mmaps */
-static inline long do_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- struct file * file = NULL;
- unsigned long error = -EBADF;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
- /* Result is out of bounds. */
- do_munmap(current->mm, addr, len);
- error = -ENOMEM;
- }
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
-
asmlinkage unsigned long
old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
{
@@ -669,7 +637,8 @@ old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
if (a.offset & ~PAGE_MASK)
goto out;
- error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
out:
return error;
}
@@ -682,7 +651,7 @@ sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
- error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return error;
}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 30de2d0e52b..faeaccc7d7d 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1853,3 +1853,12 @@ sys32_execve_wrapper:
llgtr %r3,%r3 # compat_uptr_t *
llgtr %r4,%r4 # compat_uptr_t *
jg sys32_execve # branch to system call
+
+ .globl compat_sys_recvmmsg_wrapper
+compat_sys_recvmmsg_wrapper:
+ lgfr %r2,%r2 # int
+ llgtr %r3,%r3 # struct compat_mmsghdr *
+ llgfr %r4,%r4 # unsigned int
+ llgfr %r5,%r5 # unsigned int
+ llgtr %r6,%r6 # struct compat_timespec *
+ jg compat_sys_recvmmsg
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 071c81f179e..0168472b2fd 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/sysctl.h>
#include <asm/uaccess.h>
#include <linux/module.h>
@@ -1178,7 +1179,7 @@ debug_get_uint(char *buf)
{
int rc;
- for(; isspace(*buf); buf++);
+ buf = skip_spaces(buf);
rc = simple_strtoul(buf, &buf, 10);
if(*buf){
rc = -EINVAL;
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 4890ac6d7fa..4d73296fed7 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -221,7 +221,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
const char *buf, size_t len) \
{ \
strncpy(_value, buf, sizeof(_value) - 1); \
- strstrip(_value); \
+ strim(_value); \
return len; \
} \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
@@ -472,7 +472,7 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
return sprintf(page, "#unknown#\n");
memcpy(loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
- strstrip(loadparm);
+ strim(loadparm);
return sprintf(page, "%s\n", loadparm);
}
@@ -776,7 +776,7 @@ static void reipl_get_ascii_loadparm(char *loadparm,
memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
- strstrip(loadparm);
+ strim(loadparm);
}
static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 653c6a17874..13815d39f7d 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -959,7 +959,7 @@ static const struct user_regset s390_compat_regsets[] = {
.set = s390_fpregs_set,
},
[REGSET_GENERAL_EXTENDED] = {
- .core_note_type = NT_PRXSTATUS,
+ .core_note_type = NT_S390_HIGH_GPRS,
.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
.size = sizeof(compat_long_t),
.align = sizeof(compat_long_t),
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index e9d94f61d50..86a74c9c9e6 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -32,32 +32,6 @@
#include <asm/uaccess.h>
#include "entry.h"
-/* common code for old and new mmaps */
-static inline long do_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- long error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux for S/390 isn't able to handle more than 5
@@ -81,7 +55,7 @@ SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg)
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
- error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
out:
return error;
}
@@ -98,7 +72,7 @@ SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg)
if (a.offset & ~PAGE_MASK)
goto out;
- error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+ error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
out:
return error;
}
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 30eca070d42..4f292c93687 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -340,3 +340,4 @@ SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
+SYSCALL(sys_recvmmsg,sys_recvmmsg,compat_sys_recvmmsg_wrapper)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c2e42cc65ce..6e7ad63854c 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -18,7 +18,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -382,7 +382,7 @@ void __kprobes do_single_step(struct pt_regs *regs)
SIGTRAP) == NOTIFY_STOP){
return;
}
- if ((current->ptrace & PT_PTRACED) != 0)
+ if (tracehook_consider_fatal_signal(current, SIGTRAP))
force_sig(SIGTRAP, current);
}
@@ -483,7 +483,7 @@ static void illegal_op(struct pt_regs * regs, long interruption_code)
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
- if (current->ptrace & PT_PTRACED)
+ if (tracehook_consider_fatal_signal(current, SIGTRAP))
force_sig(SIGTRAP, current);
else
signal = SIGILL;
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f7e0d30250b..10754a37566 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)
_raw_yield();
}
-void _raw_spin_lock_wait(raw_spinlock_t *lp)
+void arch_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp)
_raw_yield_cpu(~owner);
count = spin_retry;
}
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return;
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait);
+EXPORT_SYMBOL(arch_spin_lock_wait);
-void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
+void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
int count = spin_retry;
unsigned int cpu = ~smp_processor_id();
@@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
_raw_yield_cpu(~owner);
count = spin_retry;
}
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
@@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags)
local_irq_restore(flags);
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait_flags);
+EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-int _raw_spin_trylock_retry(raw_spinlock_t *lp)
+int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
unsigned int cpu = ~smp_processor_id();
int count;
for (count = spin_retry; count > 0; count--) {
- if (__raw_spin_is_locked(lp))
+ if (arch_spin_is_locked(lp))
continue;
if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
return 1;
}
return 0;
}
-EXPORT_SYMBOL(_raw_spin_trylock_retry);
+EXPORT_SYMBOL(arch_spin_trylock_retry);
-void _raw_spin_relax(raw_spinlock_t *lock)
+void arch_spin_relax(arch_spinlock_t *lock)
{
unsigned int cpu = lock->owner_cpu;
if (cpu != 0)
_raw_yield_cpu(~cpu);
}
-EXPORT_SYMBOL(_raw_spin_relax);
+EXPORT_SYMBOL(arch_spin_relax);
-void _raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
@@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
unsigned int old;
int count = spin_retry;
@@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_yield();
count = spin_retry;
}
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
local_irq_disable();
@@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
-int _raw_read_trylock_retry(raw_rwlock_t *rw)
+int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_read_can_lock(rw))
+ if (!arch_read_can_lock(rw))
continue;
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
@@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void _raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
int count = spin_retry;
@@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return;
@@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw)
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
+void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
int count = spin_retry;
@@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
_raw_yield();
count = spin_retry;
}
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
local_irq_disable();
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
@@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags)
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
-int _raw_write_trylock_retry(raw_rwlock_t *rw)
+int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
int count = spin_retry;
while (count-- > 0) {
- if (!__raw_write_can_lock(rw))
+ if (!arch_write_can_lock(rw))
continue;
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return 1;
diff --git a/arch/score/include/asm/asm-offsets.h b/arch/score/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/score/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h
index caaba24036e..1d545d0ce20 100644
--- a/arch/score/include/asm/cacheflush.h
+++ b/arch/score/include/asm/cacheflush.h
@@ -14,10 +14,12 @@ extern void flush_cache_sigtramp(unsigned long addr);
extern void flush_icache_all(void);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_dcache_range(unsigned long start, unsigned long end);
+extern void flush_dcache_page(struct page *page);
+
+#define PG_dcache_dirty PG_arch_1
#define flush_cache_dup_mm(mm) do {} while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do {} while (0)
#define flush_dcache_mmap_lock(mapping) do {} while (0)
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
#define flush_cache_vmap(start, end) do {} while (0)
diff --git a/arch/score/include/asm/delay.h b/arch/score/include/asm/delay.h
index 6726ec199dc..529e494712a 100644
--- a/arch/score/include/asm/delay.h
+++ b/arch/score/include/asm/delay.h
@@ -1,6 +1,8 @@
#ifndef _ASM_SCORE_DELAY_H
#define _ASM_SCORE_DELAY_H
+#include <asm-generic/param.h>
+
static inline void __delay(unsigned long loops)
{
/* 3 cycles per loop. */
diff --git a/arch/score/include/asm/elf.h b/arch/score/include/asm/elf.h
index 43526d9fda9..f478ce94181 100644
--- a/arch/score/include/asm/elf.h
+++ b/arch/score/include/asm/elf.h
@@ -61,7 +61,6 @@ struct task_struct;
struct pt_regs;
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This yields a mask that user programs can use to figure out what
diff --git a/arch/score/include/asm/page.h b/arch/score/include/asm/page.h
index d92a5a2d36d..1e9ade8e77e 100644
--- a/arch/score/include/asm/page.h
+++ b/arch/score/include/asm/page.h
@@ -74,7 +74,7 @@ extern unsigned long max_pfn;
#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
-#define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_mapnr)
+#define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn))
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
diff --git a/arch/score/kernel/setup.c b/arch/score/kernel/setup.c
index 6a2503c75c4..6f898c05787 100644
--- a/arch/score/kernel/setup.c
+++ b/arch/score/kernel/setup.c
@@ -49,6 +49,7 @@ static void __init bootmem_init(void)
min_low_pfn = PFN_UP(MEMORY_START);
max_low_pfn = PFN_UP(MEMORY_START + MEMORY_SIZE);
+ max_mapnr = max_low_pfn - min_low_pfn;
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c
index 00124946986..856ed68a58e 100644
--- a/arch/score/kernel/sys_score.c
+++ b/arch/score/kernel/sys_score.c
@@ -36,34 +36,16 @@ asmlinkage long
sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long pgoff)
{
- int error = -EBADF;
- struct file *file = NULL;
-
- if (pgoff & (~PAGE_MASK >> 12))
- return -EINVAL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- return error;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-
- return error;
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
asmlinkage long
sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, off_t pgoff)
+ unsigned long flags, unsigned long fd, off_t offset)
{
- return sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
+ if (unlikely(offset & ~PAGE_MASK))
+ return -EINVAL;
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
}
asmlinkage long
diff --git a/arch/score/mm/cache.c b/arch/score/mm/cache.c
index dbac9d9dfdd..b25e9574360 100644
--- a/arch/score/mm/cache.c
+++ b/arch/score/mm/cache.c
@@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/fs.h>
#include <asm/mmu_context.h>
@@ -51,6 +52,27 @@ static void flush_data_cache_page(unsigned long addr)
}
}
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ unsigned long addr;
+
+ if (PageHighMem(page))
+ return;
+ if (mapping && !mapping_mapped(mapping)) {
+ set_bit(PG_dcache_dirty, &(page)->flags);
+ return;
+ }
+
+ /*
+ * We could delay the flush for the !page_mapping case too. But that
+ * case is for exec env/arg pages and those are %99 certainly going to
+ * get faulted into the tlb (and thus flushed) anyways.
+ */
+ addr = (unsigned long) page_address(page);
+ flush_data_cache_page(addr);
+}
+
/* called by update_mmu_cache. */
void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
@@ -63,11 +85,11 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
if (unlikely(!pfn_valid(pfn)))
return;
page = pfn_to_page(pfn);
- if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
+ if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) {
addr = (unsigned long) page_address(page);
if (exec)
flush_data_cache_page(addr);
- clear_bit(PG_arch_1, &page->flags);
+ clear_bit(PG_dcache_dirty, &(page)->flags);
}
}
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index 4e3dcd0c471..8c15b2c85d5 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -83,7 +83,6 @@ void __init mem_init(void)
unsigned long codesize, reservedpages, datasize, initsize;
unsigned long tmp, ram = 0;
- max_mapnr = max_low_pfn;
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
totalram_pages -= setup_zero_page(); /* Setup zeroed pages. */
@@ -101,10 +100,6 @@ void __init mem_init(void)
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
- kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
- kclist_add(&kcore_vmalloc, (void *) VMALLOC_START,
- VMALLOC_END - VMALLOC_START);
-
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
"%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 55907af1dc2..12fec72fec5 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -19,50 +19,6 @@ config SH_STANDARD_BIOS
mask ROM and no flash (WindowsCE machines fall in this category).
If unsure, say N.
-config EARLY_SCIF_CONSOLE
- bool "Use early SCIF console"
- help
- This enables an early console using a fixed SCIF port. This can
- be used by platforms that are either not running the SH
- standard BIOS, or do not wish to use the BIOS callbacks for the
- serial I/O.
-
-config EARLY_SCIF_CONSOLE_PORT
- hex
- depends on EARLY_SCIF_CONSOLE
- default "0xa4400000" if CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7705
- default "0xa4430000" if CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721
- default "0xf8420000" if CPU_SUBTYPE_SH7619
- default "0xff804000" if CPU_SUBTYPE_MXG
- default "0xffc30000" if CPU_SUBTYPE_SHX3
- default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763 || \
- CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7366 || \
- CPU_SUBTYPE_SH7343
- default "0xfe4c0000" if CPU_SUBTYPE_SH7757
- default "0xffeb0000" if CPU_SUBTYPE_SH7785
- default "0xffeb0000" if CPU_SUBTYPE_SH7786
- default "0xfffe8000" if CPU_SUBTYPE_SH7203
- default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263
- default "0xffe80000" if CPU_SH4
- default "0xa4000150" if CPU_SH3
- default "0x00000000"
-
-config EARLY_PRINTK
- bool "Early printk support"
- depends on SH_STANDARD_BIOS || EARLY_SCIF_CONSOLE
- help
- Say Y here to redirect kernel printk messages to the serial port
- used by the SH-IPL bootloader, starting very early in the boot
- process and ending when the kernel's serial console is initialised.
- This option is only useful porting the kernel to a new machine,
- when the kernel may crash or hang before the serial console is
- initialised. If unsure, say N.
-
- On devices that are running SH-IPL and want to keep the port
- initialization consistent while not using the BIOS callbacks,
- select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
- the kernel command line option to toggle back and forth.
-
config STACK_DEBUG
bool "Check for stack overflows"
depends on DEBUG_KERNEL && SUPERH32
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index ac17c5ac550..db91925c79d 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -205,10 +205,7 @@ libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.srec uImage.bin \
zImage vmlinux.srec romImage
-PHONY += maketools $(BOOT_TARGETS) FORCE
-
-maketools: include/linux/version.h FORCE
- $(Q)$(MAKE) $(build)=arch/sh/tools include/asm-sh/machtypes.h
+PHONY += $(BOOT_TARGETS)
all: $(KBUILD_IMAGE)
@@ -217,7 +214,8 @@ $(BOOT_TARGETS): vmlinux
compressed: zImage
-archprepare: maketools
+archprepare:
+ $(Q)$(MAKE) $(build)=arch/sh/tools include/generated/machtypes.h
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
@@ -234,5 +232,3 @@ define archhelp
@echo ' uImage.bz2 - Kernel-only image for U-Boot (bzip2)'
@echo ' uImage.lzma - Kernel-only image for U-Boot (lzma)'
endef
-
-CLEAN_FILES += include/asm-sh/machtypes.h
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index cf9dc12dfeb..1f5fa5c44f6 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -316,20 +316,24 @@ static struct soc_camera_platform_info camera_info = {
.format_name = "UYVY",
.format_depth = 16,
.format = {
- .pixelformat = V4L2_PIX_FMT_UYVY,
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_BE,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
+ .field = V4L2_FIELD_NONE,
.width = 640,
.height = 480,
},
.bus_param = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH |
- SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8,
+ SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8 |
+ SOCAM_DATA_ACTIVE_HIGH,
.set_capture = camera_set_capture,
- .link = {
- .bus_id = 0,
- .add_device = ap325rxa_camera_add,
- .del_device = ap325rxa_camera_del,
- .module_name = "soc_camera_platform",
- },
+};
+
+struct soc_camera_link camera_link = {
+ .bus_id = 0,
+ .add_device = ap325rxa_camera_add,
+ .del_device = ap325rxa_camera_del,
+ .module_name = "soc_camera_platform",
+ .priv = &camera_info,
};
static void dummy_release(struct device *dev)
@@ -347,7 +351,7 @@ static struct platform_device camera_device = {
static int ap325rxa_camera_add(struct soc_camera_link *icl,
struct device *dev)
{
- if (icl != &camera_info.link || camera_probe() <= 0)
+ if (icl != &camera_link || camera_probe() <= 0)
return -ENODEV;
camera_info.dev = dev;
@@ -357,7 +361,7 @@ static int ap325rxa_camera_add(struct soc_camera_link *icl,
static void ap325rxa_camera_del(struct soc_camera_link *icl)
{
- if (icl != &camera_info.link)
+ if (icl != &camera_link)
return;
platform_device_unregister(&camera_device);
@@ -470,13 +474,15 @@ static struct ov772x_camera_info ov7725_info = {
.buswidth = SOCAM_DATAWIDTH_8,
.flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
.edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0),
- .link = {
- .bus_id = 0,
- .power = ov7725_power,
- .board_info = &ap325rxa_i2c_camera[0],
- .i2c_adapter_id = 0,
- .module_name = "ov772x",
- },
+};
+
+static struct soc_camera_link ov7725_link = {
+ .bus_id = 0,
+ .power = ov7725_power,
+ .board_info = &ap325rxa_i2c_camera[0],
+ .i2c_adapter_id = 0,
+ .module_name = "ov772x",
+ .priv = &ov7725_info,
};
static struct platform_device ap325rxa_camera[] = {
@@ -484,13 +490,13 @@ static struct platform_device ap325rxa_camera[] = {
.name = "soc-camera-pdrv",
.id = 0,
.dev = {
- .platform_data = &ov7725_info.link,
+ .platform_data = &ov7725_link,
},
}, {
.name = "soc-camera-pdrv",
.id = 1,
.dev = {
- .platform_data = &camera_info.link,
+ .platform_data = &camera_link,
},
},
};
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 826e62326d5..194aaca22d4 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -19,11 +19,18 @@
#include <linux/usb/r8a66597.h>
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/sh_msiof.h>
+#include <linux/spi/mmc_spi.h>
+#include <linux/mmc/host.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <video/sh_mobile_lcdc.h>
+#include <sound/sh_fsi.h>
#include <media/sh_mobile_ceu.h>
+#include <media/tw9910.h>
+#include <media/mt9t112.h>
#include <asm/heartbeat.h>
#include <asm/sh_eth.h>
#include <asm/clock.h>
@@ -338,6 +345,12 @@ static struct platform_device ceu1_device = {
};
/* I2C device */
+static struct i2c_board_info i2c0_devices[] = {
+ {
+ I2C_BOARD_INFO("da7210", 0x1a),
+ },
+};
+
static struct i2c_board_info i2c1_devices[] = {
{
I2C_BOARD_INFO("r2025sd", 0x32),
@@ -421,6 +434,7 @@ static struct i2c_board_info ts_i2c_clients = {
.irq = IRQ0,
};
+#ifdef CONFIG_MFD_SH_MOBILE_SDHI
/* SHDI0 */
static void sdhi0_set_pwr(struct platform_device *pdev, int state)
{
@@ -493,6 +507,248 @@ static struct platform_device sdhi1_device = {
},
};
+#else
+
+static int mmc_spi_get_ro(struct device *dev)
+{
+ return gpio_get_value(GPIO_PTY6);
+}
+
+static int mmc_spi_get_cd(struct device *dev)
+{
+ return !gpio_get_value(GPIO_PTY7);
+}
+
+static void mmc_spi_setpower(struct device *dev, unsigned int maskval)
+{
+ gpio_set_value(GPIO_PTB6, maskval ? 1 : 0);
+}
+
+static struct mmc_spi_platform_data mmc_spi_info = {
+ .get_ro = mmc_spi_get_ro,
+ .get_cd = mmc_spi_get_cd,
+ .caps = MMC_CAP_NEEDS_POLL,
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* 3.3V only */
+ .setpower = mmc_spi_setpower,
+};
+
+static struct spi_board_info spi_bus[] = {
+ {
+ .modalias = "mmc_spi",
+ .platform_data = &mmc_spi_info,
+ .max_speed_hz = 5000000,
+ .mode = SPI_MODE_0,
+ .controller_data = (void *) GPIO_PTM4,
+ },
+};
+
+static struct sh_msiof_spi_info msiof0_data = {
+ .num_chipselect = 1,
+};
+
+static struct resource msiof0_resources[] = {
+ [0] = {
+ .name = "MSIOF0",
+ .start = 0xa4c40000,
+ .end = 0xa4c40063,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 84,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device msiof0_device = {
+ .name = "spi_sh_msiof",
+ .id = 0, /* MSIOF0 */
+ .dev = {
+ .platform_data = &msiof0_data,
+ },
+ .num_resources = ARRAY_SIZE(msiof0_resources),
+ .resource = msiof0_resources,
+ .archdata = {
+ .hwblk_id = HWBLK_MSIOF0,
+ },
+};
+
+#endif
+
+/* I2C Video/Camera */
+static struct i2c_board_info i2c_camera[] = {
+ {
+ I2C_BOARD_INFO("tw9910", 0x45),
+ },
+ {
+ /* 1st camera */
+ I2C_BOARD_INFO("mt9t112", 0x3c),
+ },
+ {
+ /* 2nd camera */
+ I2C_BOARD_INFO("mt9t112", 0x3c),
+ },
+};
+
+/* tw9910 */
+static int tw9910_power(struct device *dev, int mode)
+{
+ int val = mode ? 0 : 1;
+
+ gpio_set_value(GPIO_PTU2, val);
+ if (mode)
+ mdelay(100);
+
+ return 0;
+}
+
+static struct tw9910_video_info tw9910_info = {
+ .buswidth = SOCAM_DATAWIDTH_8,
+ .mpout = TW9910_MPO_FIELD,
+};
+
+static struct soc_camera_link tw9910_link = {
+ .i2c_adapter_id = 0,
+ .bus_id = 1,
+ .power = tw9910_power,
+ .board_info = &i2c_camera[0],
+ .module_name = "tw9910",
+ .priv = &tw9910_info,
+};
+
+/* mt9t112 */
+static int mt9t112_power1(struct device *dev, int mode)
+{
+ gpio_set_value(GPIO_PTA3, mode);
+ if (mode)
+ mdelay(100);
+
+ return 0;
+}
+
+static struct mt9t112_camera_info mt9t112_info1 = {
+ .flags = MT9T112_FLAG_PCLK_RISING_EDGE | MT9T112_FLAG_DATAWIDTH_8,
+ .divider = { 0x49, 0x6, 0, 6, 0, 9, 9, 6, 0 }, /* for 24MHz */
+};
+
+static struct soc_camera_link mt9t112_link1 = {
+ .i2c_adapter_id = 0,
+ .power = mt9t112_power1,
+ .bus_id = 0,
+ .board_info = &i2c_camera[1],
+ .module_name = "mt9t112",
+ .priv = &mt9t112_info1,
+};
+
+static int mt9t112_power2(struct device *dev, int mode)
+{
+ gpio_set_value(GPIO_PTA4, mode);
+ if (mode)
+ mdelay(100);
+
+ return 0;
+}
+
+static struct mt9t112_camera_info mt9t112_info2 = {
+ .flags = MT9T112_FLAG_PCLK_RISING_EDGE | MT9T112_FLAG_DATAWIDTH_8,
+ .divider = { 0x49, 0x6, 0, 6, 0, 9, 9, 6, 0 }, /* for 24MHz */
+};
+
+static struct soc_camera_link mt9t112_link2 = {
+ .i2c_adapter_id = 1,
+ .power = mt9t112_power2,
+ .bus_id = 1,
+ .board_info = &i2c_camera[2],
+ .module_name = "mt9t112",
+ .priv = &mt9t112_info2,
+};
+
+static struct platform_device camera_devices[] = {
+ {
+ .name = "soc-camera-pdrv",
+ .id = 0,
+ .dev = {
+ .platform_data = &tw9910_link,
+ },
+ },
+ {
+ .name = "soc-camera-pdrv",
+ .id = 1,
+ .dev = {
+ .platform_data = &mt9t112_link1,
+ },
+ },
+ {
+ .name = "soc-camera-pdrv",
+ .id = 2,
+ .dev = {
+ .platform_data = &mt9t112_link2,
+ },
+ },
+};
+
+/* FSI */
+/*
+ * FSI-B use external clock which came from da7210.
+ * So, we should change parent of fsi
+ */
+#define FCLKBCR 0xa415000c
+static void fsimck_init(struct clk *clk)
+{
+ u32 status = ctrl_inl(clk->enable_reg);
+
+ /* use external clock */
+ status &= ~0x000000ff;
+ status |= 0x00000080;
+
+ ctrl_outl(status, clk->enable_reg);
+}
+
+static struct clk_ops fsimck_clk_ops = {
+ .init = fsimck_init,
+};
+
+static struct clk fsimckb_clk = {
+ .name = "fsimckb_clk",
+ .id = -1,
+ .ops = &fsimck_clk_ops,
+ .enable_reg = (void __iomem *)FCLKBCR,
+ .rate = 0, /* unknown */
+};
+
+struct sh_fsi_platform_info fsi_info = {
+ .portb_flags = SH_FSI_BRS_INV |
+ SH_FSI_OUT_SLAVE_MODE |
+ SH_FSI_IN_SLAVE_MODE |
+ SH_FSI_OFMT(I2S) |
+ SH_FSI_IFMT(I2S),
+};
+
+static struct resource fsi_resources[] = {
+ [0] = {
+ .name = "FSI",
+ .start = 0xFE3C0000,
+ .end = 0xFE3C021d,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 108,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device fsi_device = {
+ .name = "sh_fsi",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(fsi_resources),
+ .resource = fsi_resources,
+ .dev = {
+ .platform_data = &fsi_info,
+ },
+ .archdata = {
+ .hwblk_id = HWBLK_SPU, /* FSI needs SPU hwblk */
+ },
+};
+
static struct platform_device *ecovec_devices[] __initdata = {
&heartbeat_device,
&nor_flash_device,
@@ -503,8 +759,16 @@ static struct platform_device *ecovec_devices[] __initdata = {
&ceu0_device,
&ceu1_device,
&keysc_device,
+#ifdef CONFIG_MFD_SH_MOBILE_SDHI
&sdhi0_device,
&sdhi1_device,
+#else
+ &msiof0_device,
+#endif
+ &camera_devices[0],
+ &camera_devices[1],
+ &camera_devices[2],
+ &fsi_device,
};
#define EEPROM_ADDR 0x50
@@ -560,6 +824,8 @@ extern char ecovec24_sdram_leave_end;
static int __init arch_setup(void)
{
+ struct clk *clk;
+
/* register board specific self-refresh code */
sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF,
&ecovec24_sdram_enter_start,
@@ -773,7 +1039,8 @@ static int __init arch_setup(void)
gpio_direction_input(GPIO_PTR5);
gpio_direction_input(GPIO_PTR6);
- /* enable SDHI0 (needs DS2.4 set to ON) */
+#ifdef CONFIG_MFD_SH_MOBILE_SDHI
+ /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
gpio_request(GPIO_FN_SDHI0CD, NULL);
gpio_request(GPIO_FN_SDHI0WP, NULL);
gpio_request(GPIO_FN_SDHI0CMD, NULL);
@@ -785,7 +1052,7 @@ static int __init arch_setup(void)
gpio_request(GPIO_PTB6, NULL);
gpio_direction_output(GPIO_PTB6, 0);
- /* enable SDHI1 (needs DS2.6,7 set to ON,OFF) */
+ /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */
gpio_request(GPIO_FN_SDHI1CD, NULL);
gpio_request(GPIO_FN_SDHI1WP, NULL);
gpio_request(GPIO_FN_SDHI1CMD, NULL);
@@ -799,8 +1066,59 @@ static int __init arch_setup(void)
/* I/O buffer drive ability is high for SDHI1 */
ctrl_outw((ctrl_inw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
+#else
+ /* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */
+ gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
+ gpio_request(GPIO_FN_MSIOF0_RXD, NULL);
+ gpio_request(GPIO_FN_MSIOF0_TSCK, NULL);
+ gpio_request(GPIO_PTM4, NULL); /* software CS control of TSYNC pin */
+ gpio_direction_output(GPIO_PTM4, 1); /* active low CS */
+ gpio_request(GPIO_PTB6, NULL); /* 3.3V power control */
+ gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
+ gpio_request(GPIO_PTY6, NULL); /* write protect */
+ gpio_direction_input(GPIO_PTY6);
+ gpio_request(GPIO_PTY7, NULL); /* card detect */
+ gpio_direction_input(GPIO_PTY7);
+
+ spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
+#endif
+
+ /* enable Video */
+ gpio_request(GPIO_PTU2, NULL);
+ gpio_direction_output(GPIO_PTU2, 1);
+
+ /* enable Camera */
+ gpio_request(GPIO_PTA3, NULL);
+ gpio_request(GPIO_PTA4, NULL);
+ gpio_direction_output(GPIO_PTA3, 0);
+ gpio_direction_output(GPIO_PTA4, 0);
+
+ /* enable FSI */
+ gpio_request(GPIO_FN_FSIMCKB, NULL);
+ gpio_request(GPIO_FN_FSIIBSD, NULL);
+ gpio_request(GPIO_FN_FSIOBSD, NULL);
+ gpio_request(GPIO_FN_FSIIBBCK, NULL);
+ gpio_request(GPIO_FN_FSIIBLRCK, NULL);
+ gpio_request(GPIO_FN_FSIOBBCK, NULL);
+ gpio_request(GPIO_FN_FSIOBLRCK, NULL);
+ gpio_request(GPIO_FN_CLKAUDIOBO, NULL);
+
+ /* change parent of FSI B */
+ clk = clk_get(NULL, "fsib_clk");
+ clk_register(&fsimckb_clk);
+ clk_set_parent(clk, &fsimckb_clk);
+ clk_set_rate(clk, 11000);
+ clk_set_rate(&fsimckb_clk, 11000);
+ clk_put(clk);
+
+ gpio_request(GPIO_PTU0, NULL);
+ gpio_direction_output(GPIO_PTU0, 0);
+ mdelay(20);
/* enable I2C device */
+ i2c_register_board_info(0, i2c0_devices,
+ ARRAY_SIZE(i2c0_devices));
+
i2c_register_board_info(1, i2c1_devices,
ARRAY_SIZE(i2c1_devices));
diff --git a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
index 8ccb1cc8b58..e9b970846c4 100644
--- a/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
+++ b/arch/sh/boards/mach-kfr2r09/lcd_wqvga.c
@@ -273,6 +273,12 @@ int kfr2r09_lcd_setup(void *board_data, void *sohandle,
return 0;
}
+void kfr2r09_lcd_start(void *board_data, void *sohandle,
+ struct sh_mobile_lcdc_sys_bus_ops *so)
+{
+ write_memory_start(sohandle, so);
+}
+
#define CTRL_CKSW 0x10
#define CTRL_C10 0x20
#define CTRL_CPSW 0x80
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 87438d6603d..5d7b5d92475 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -19,6 +19,7 @@
#include <linux/input/sh_keysc.h>
#include <linux/i2c.h>
#include <linux/usb/r8a66597.h>
+#include <media/rj54n1cb0c.h>
#include <media/soc_camera.h>
#include <media/sh_mobile_ceu.h>
#include <video/sh_mobile_lcdc.h>
@@ -149,6 +150,7 @@ static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = {
},
.board_cfg = {
.setup_sys = kfr2r09_lcd_setup,
+ .start_transfer = kfr2r09_lcd_start,
.display_on = kfr2r09_lcd_on,
.display_off = kfr2r09_lcd_off,
},
@@ -255,6 +257,9 @@ static struct i2c_board_info kfr2r09_i2c_camera = {
static struct clk *camera_clk;
+/* set VIO_CKO clock to 25MHz */
+#define CEU_MCLK_FREQ 25000000
+
#define DRVCRB 0xA405018C
static int camera_power(struct device *dev, int mode)
{
@@ -267,8 +272,7 @@ static int camera_power(struct device *dev, int mode)
if (IS_ERR(camera_clk))
return PTR_ERR(camera_clk);
- /* set VIO_CKO clock to 25MHz */
- rate = clk_round_rate(camera_clk, 25000000);
+ rate = clk_round_rate(camera_clk, CEU_MCLK_FREQ);
ret = clk_set_rate(camera_clk, rate);
if (ret < 0)
goto eclkrate;
@@ -318,11 +322,17 @@ eclkrate:
return ret;
}
+static struct rj54n1_pdata rj54n1_priv = {
+ .mclk_freq = CEU_MCLK_FREQ,
+ .ioctl_high = false,
+};
+
static struct soc_camera_link rj54n1_link = {
.power = camera_power,
.board_info = &kfr2r09_i2c_camera,
.i2c_adapter_id = 1,
.module_name = "rj54n1cb0c",
+ .priv = &rj54n1_priv,
};
static struct platform_device kfr2r09_camera = {
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 9099b6da995..507c77be476 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -432,23 +432,27 @@ static struct i2c_board_info migor_i2c_camera[] = {
static struct ov772x_camera_info ov7725_info = {
.buswidth = SOCAM_DATAWIDTH_8,
- .link = {
- .power = ov7725_power,
- .board_info = &migor_i2c_camera[0],
- .i2c_adapter_id = 0,
- .module_name = "ov772x",
- },
+};
+
+static struct soc_camera_link ov7725_link = {
+ .power = ov7725_power,
+ .board_info = &migor_i2c_camera[0],
+ .i2c_adapter_id = 0,
+ .module_name = "ov772x",
+ .priv = &ov7725_info,
};
static struct tw9910_video_info tw9910_info = {
.buswidth = SOCAM_DATAWIDTH_8,
.mpout = TW9910_MPO_FIELD,
- .link = {
- .power = tw9910_power,
- .board_info = &migor_i2c_camera[1],
- .i2c_adapter_id = 0,
- .module_name = "tw9910",
- }
+};
+
+static struct soc_camera_link tw9910_link = {
+ .power = tw9910_power,
+ .board_info = &migor_i2c_camera[1],
+ .i2c_adapter_id = 0,
+ .module_name = "tw9910",
+ .priv = &tw9910_info,
};
static struct platform_device migor_camera[] = {
@@ -456,13 +460,13 @@ static struct platform_device migor_camera[] = {
.name = "soc-camera-pdrv",
.id = 0,
.dev = {
- .platform_data = &ov7725_info.link,
+ .platform_data = &ov7725_link,
},
}, {
.name = "soc-camera-pdrv",
.id = 1,
.dev = {
- .platform_data = &tw9910_info.link,
+ .platform_data = &tw9910_link,
},
},
};
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index 4eb31acfafe..b221b6842b0 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -57,15 +57,16 @@ static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
*/
void __init init_se7722_IRQ(void)
{
- int i;
+ int i, irq;
ctrl_outw(0, IRQ01_MASK); /* disable all irqs */
ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */
for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) {
- se7722_fpga_irq[i] = create_irq();
- if (se7722_fpga_irq[i] < 0)
+ irq = create_irq();
+ if (irq < 0)
return;
+ se7722_fpga_irq[i] = irq;
set_irq_chip_and_handler_name(se7722_fpga_irq[i],
&se7722_irq_chip,
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 4b0f0c0dc2b..5d0f70b46c9 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -514,6 +514,13 @@ static struct platform_device *ms7724se_devices[] __initdata = {
&sdhi1_cn8_device,
};
+/* I2C device */
+static struct i2c_board_info i2c0_devices[] = {
+ {
+ I2C_BOARD_INFO("ak4642", 0x12),
+ },
+};
+
#define EEPROM_OP 0xBA206000
#define EEPROM_ADR 0xBA206004
#define EEPROM_DATA 0xBA20600C
@@ -575,6 +582,16 @@ extern char ms7724se_sdram_enter_end;
extern char ms7724se_sdram_leave_start;
extern char ms7724se_sdram_leave_end;
+
+static int __init arch_setup(void)
+{
+ /* enable I2C device */
+ i2c_register_board_info(0, i2c0_devices,
+ ARRAY_SIZE(i2c0_devices));
+ return 0;
+}
+arch_initcall(arch_setup);
+
static int __init devices_setup(void)
{
u16 sw = ctrl_inw(SW4140); /* select camera, monitor */
diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig
index 0774924623c..46874704e4e 100644
--- a/arch/sh/configs/ecovec24-romimage_defconfig
+++ b/arch/sh/configs/ecovec24-romimage_defconfig
@@ -203,7 +203,7 @@ CONFIG_MMU=y
CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
-CONFIG_MEMORY_SIZE=0x08000000
+CONFIG_MEMORY_SIZE=0x10000000
CONFIG_29BIT=y
# CONFIG_X2TLB is not set
CONFIG_VSYSCALL=y
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index ac6469718a2..cad918437ca 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -204,7 +204,7 @@ CONFIG_MMU=y
CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_MEMORY_START=0x08000000
-CONFIG_MEMORY_SIZE=0x08000000
+CONFIG_MEMORY_SIZE=0x10000000
CONFIG_29BIT=y
# CONFIG_X2TLB is not set
CONFIG_VSYSCALL=y
diff --git a/arch/sh/configs/rts7751r2d1_defconfig b/arch/sh/configs/rts7751r2d1_defconfig
index f521e82cc19..6f1126b3e48 100644
--- a/arch/sh/configs/rts7751r2d1_defconfig
+++ b/arch/sh/configs/rts7751r2d1_defconfig
@@ -324,7 +324,7 @@ CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
CONFIG_CMDLINE_OVERWRITE=y
# CONFIG_CMDLINE_EXTEND is not set
-CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial"
+CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 root=/dev/sda1"
#
# Bus options
diff --git a/arch/sh/configs/rts7751r2dplus_defconfig b/arch/sh/configs/rts7751r2dplus_defconfig
index a156cd1e061..9215bbb13d6 100644
--- a/arch/sh/configs/rts7751r2dplus_defconfig
+++ b/arch/sh/configs/rts7751r2dplus_defconfig
@@ -324,7 +324,7 @@ CONFIG_ENTRY_OFFSET=0x00001000
# CONFIG_UBC_WAKEUP is not set
CONFIG_CMDLINE_OVERWRITE=y
# CONFIG_CMDLINE_EXTEND is not set
-CONFIG_CMDLINE="console=tty0 console=ttySC0,115200 root=/dev/sda1 earlyprintk=serial"
+CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 root=/dev/sda1"
#
# Bus options
diff --git a/arch/sh/drivers/pci/fixups-rts7751r2d.c b/arch/sh/drivers/pci/fixups-rts7751r2d.c
index 052b354236d..7898f14d664 100644
--- a/arch/sh/drivers/pci/fixups-rts7751r2d.c
+++ b/arch/sh/drivers/pci/fixups-rts7751r2d.c
@@ -15,7 +15,7 @@
#include <mach/lboxre2.h>
#include <mach/r2d.h>
#include "pci-sh4.h"
-#include <asm/machtypes.h>
+#include <generated/machtypes.h>
#define PCIMCR_MRSET_OFF 0xBFFFFFFF
#define PCIMCR_RFSH_OFF 0xFFFFFFFB
diff --git a/arch/sh/include/asm/.gitignore b/arch/sh/include/asm/.gitignore
deleted file mode 100644
index 378db779fb6..00000000000
--- a/arch/sh/include/asm/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-machtypes.h
diff --git a/arch/sh/include/asm/asm-offsets.h b/arch/sh/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/sh/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index ccb1d93bb04..ac04255022b 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
*/
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 512cd3e9d0c..026dd659a64 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -233,11 +233,17 @@ unsigned long long poke_real_address_q(unsigned long long addr,
* doesn't exist, so everything must go through page tables.
*/
#ifdef CONFIG_MMU
-void __iomem *__ioremap(unsigned long offset, unsigned long size,
- unsigned long flags);
+void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
+ unsigned long flags, void *caller);
void __iounmap(void __iomem *addr);
static inline void __iomem *
+__ioremap(unsigned long offset, unsigned long size, unsigned long flags)
+{
+ return __ioremap_caller(offset, size, flags, __builtin_return_address(0));
+}
+
+static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
{
#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
@@ -271,6 +277,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
return __ioremap(offset, size, flags);
}
#else
+#define __ioremap(offset, size, flags) ((void __iomem *)(offset))
#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset))
#define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h
index 84dd37761f5..9c30955630f 100644
--- a/arch/sh/include/asm/machvec.h
+++ b/arch/sh/include/asm/machvec.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
#include <linux/time.h>
-#include <asm/machtypes.h>
+#include <generated/machtypes.h>
struct sh_machine_vector {
void (*mv_setup)(char **cmdline_p);
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index b3543551620..5003ee86f67 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -344,7 +344,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
#ifdef CONFIG_X2TLB
-#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
+#define pte_write(pte) \
+ ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
#else
#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
#endif
@@ -358,7 +359,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
* individually toggled (and user permissions are entirely decoupled from
* kernel permissions), we attempt to couple them a bit more sanely here.
*/
-PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
+PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
#else
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index a28c9f0053f..bdc0f3b6c56 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -23,10 +23,10 @@
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
-#define __raw_spin_is_locked(x) ((x)->lock <= 0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
+#define arch_spin_is_locked(x) ((x)->lock <= 0)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
@@ -34,14 +34,14 @@
*
* We make no fairness assumptions. They have a cost.
*/
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
unsigned long oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_spin_lock \n\t"
+ "movli.l @%2, %0 ! arch_spin_lock \n\t"
"mov %0, %1 \n\t"
"mov #0, %0 \n\t"
"movco.l %0, @%2 \n\t"
@@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__ (
- "mov #1, %0 ! __raw_spin_unlock \n\t"
+ "mov #1, %0 ! arch_spin_unlock \n\t"
"mov.l %0, @%1 \n\t"
: "=&z" (tmp)
: "r" (&lock->lock)
@@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
);
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_spin_trylock \n\t"
+ "movli.l @%2, %0 ! arch_spin_trylock \n\t"
"mov %0, %1 \n\t"
"mov #0, %0 \n\t"
"movco.l %0, @%2 \n\t"
@@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((x)->lock > 0)
+#define arch_read_can_lock(x) ((x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_read_lock \n\t"
+ "movli.l @%1, %0 ! arch_read_lock \n\t"
"cmp/pl %0 \n\t"
"bf 1b \n\t"
"add #-1, %0 \n\t"
@@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_read_unlock \n\t"
+ "movli.l @%1, %0 ! arch_read_unlock \n\t"
"add #1, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
@@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%1, %0 ! __raw_write_lock \n\t"
+ "movli.l @%1, %0 ! arch_write_lock \n\t"
"cmp/hs %2, %0 \n\t"
"bf 1b \n\t"
"sub %2, %0 \n\t"
@@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__ (
- "mov.l %1, @%0 ! __raw_write_unlock \n\t"
+ "mov.l %1, @%0 ! arch_write_unlock \n\t"
:
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
: "t", "memory"
);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_read_trylock \n\t"
+ "movli.l @%2, %0 ! arch_read_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/pl %0 \n\t"
"bf 2f \n\t"
@@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return (oldval > 0);
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
- "movli.l @%2, %0 ! __raw_write_trylock \n\t"
+ "movli.l @%2, %0 ! arch_write_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/hs %3, %0 \n\t"
"bf 2f \n\t"
@@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (oldval > (RW_LOCK_BIAS - 1));
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SH_SPINLOCK_H */
diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
index b4d244e7b60..9b7560db06c 100644
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
typedef struct {
volatile unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
#define RW_LOCK_BIAS 0x01000000
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index f3fd1b9eb6b..f18c4f9baf2 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -345,8 +345,9 @@
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
+#define __NR_recvmmsg 337
-#define NR_syscalls 337
+#define NR_syscalls 338
#ifdef __KERNEL__
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 343ce8f073e..3e7645d1113 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -385,10 +385,11 @@
#define __NR_pwritev 362
#define __NR_rt_tgsigqueueinfo 363
#define __NR_perf_event_open 364
+#define __NR_recvmmsg 365
#ifdef __KERNEL__
-#define NR_syscalls 365
+#define NR_syscalls 366
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h b/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
index 174374e1954..484ef42c2fb 100644
--- a/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
+++ b/arch/sh/include/mach-kfr2r09/mach/kfr2r09.h
@@ -8,6 +8,8 @@ void kfr2r09_lcd_on(void *board_data);
void kfr2r09_lcd_off(void *board_data);
int kfr2r09_lcd_setup(void *board_data, void *sys_ops_handle,
struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
+void kfr2r09_lcd_start(void *board_data, void *sys_ops_handle,
+ struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
#else
static inline void kfr2r09_lcd_on(void *board_data) {}
static inline void kfr2r09_lcd_off(void *board_data) {}
@@ -16,6 +18,10 @@ static inline int kfr2r09_lcd_setup(void *board_data, void *sys_ops_handle,
{
return -ENODEV;
}
+static inline void kfr2r09_lcd_start(void *board_data, void *sys_ops_handle,
+ struct sh_mobile_lcdc_sys_bus_ops *sys_ops)
+{
+}
#endif
#endif /* __ASM_SH_KFR2R09_H */
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 0471a3eb25e..0d587da1ef1 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -22,11 +22,10 @@ obj-y := debugtraps.o dma-nommu.o dumpstack.o \
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o early_printk.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index c1508a90fc6..9282d965a1b 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -17,16 +17,17 @@
* for more details.
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
#include <linux/topology.h>
static inline struct ipr_desc *get_ipr_desc(unsigned int irq)
{
struct irq_chip *chip = get_irq_chip(irq);
- return (void *)((char *)chip - offsetof(struct ipr_desc, chip));
+ return container_of(chip, struct ipr_desc, chip);
}
static void disable_ipr_irq(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 8555c05e866..114c7cee718 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -59,32 +59,48 @@ static struct intc_prio_reg prio_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL,
NULL, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xf8400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 88, 88, 88, 88 },
- }, {
- .mapbase = 0xf8410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 92, 92, 92, 92 },
- }, {
- .mapbase = 0xf8420000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 96, 96, 96, 96 },
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xf8400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 88, 88, 88, 88 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xf8410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 92, 92, 92, 92 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xf8420000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 96, 96, 96, 96 },
+};
+
+static struct platform_device scif2_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 2,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif2_platform_data,
},
};
@@ -176,7 +192,9 @@ static struct platform_device cmt1_device = {
};
static struct platform_device *sh7619_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
&eth_device,
&cmt0_device,
&cmt1_device,
@@ -195,6 +213,9 @@ void __init plat_irq_setup(void)
}
static struct platform_device *sh7619_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
&cmt0_device,
&cmt1_device,
};
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index b6737644531..8f669dc9b0d 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -207,27 +207,23 @@ static struct platform_device mtu2_2_device = {
.num_resources = ARRAY_SIZE(mtu2_2_resources),
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xff804000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 220, 220, 220, 220 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xff804000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 220, 220, 220, 220 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif0_platform_data,
},
};
static struct platform_device *mxg_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
&mtu2_0_device,
&mtu2_1_device,
&mtu2_2_device,
@@ -246,6 +242,7 @@ void __init plat_irq_setup(void)
}
static struct platform_device *mxg_early_devices[] __initdata = {
+ &scif0_device,
&mtu2_0_device,
&mtu2_1_device,
&mtu2_2_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index fbde5b75deb..4ccfeb59eb1 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -177,57 +177,123 @@ static struct intc_mask_reg mask_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfffe8000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 180, 180, 180, 180 }
- }, {
- .mapbase = 0xfffe8800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 184, 184, 184, 184 }
- }, {
- .mapbase = 0xfffe9000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 188, 188, 188, 188 }
- }, {
- .mapbase = 0xfffe9800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 192, 192, 192, 192 }
- }, {
- .mapbase = 0xfffea000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 196, 196, 196, 196 }
- }, {
- .mapbase = 0xfffea800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 200, 200, 200, 200 }
- }, {
- .mapbase = 0xfffeb000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 204, 204, 204, 204 }
- }, {
- .mapbase = 0xfffeb800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 208, 208, 208, 208 }
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 180, 180, 180, 180 }
+};
+
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 184, 184, 184, 184 }
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 188, 188, 188, 188 }
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 192, 192, 192, 192 }
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xfffea000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 196, 196, 196, 196 }
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xfffea800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 200, 200, 200, 200 }
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .mapbase = 0xfffeb000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 204, 204, 204, 204 }
+};
+
+static struct platform_device scif6_device = {
+ .name = "sh-sci",
+ .id = 6,
+ .dev = {
+ .platform_data = &scif6_platform_data,
+ },
+};
+
+static struct plat_sci_port scif7_platform_data = {
+ .mapbase = 0xfffeb800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 208, 208, 208, 208 }
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif7_platform_data,
},
};
@@ -345,7 +411,14 @@ static struct platform_device mtu2_2_device = {
};
static struct platform_device *sh7201_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
&rtc_device,
&mtu2_0_device,
&mtu2_1_device,
@@ -365,6 +438,14 @@ void __init plat_irq_setup(void)
}
static struct platform_device *sh7201_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
&mtu2_0_device,
&mtu2_1_device,
&mtu2_2_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index d3fd536c9a8..3136966cc9b 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -173,37 +173,63 @@ static struct intc_mask_reg mask_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfffe8000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 192, 192, 192, 192 },
- }, {
- .mapbase = 0xfffe8800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 196, 196, 196, 196 },
- }, {
- .mapbase = 0xfffe9000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 200, 200, 200, 200 },
- }, {
- .mapbase = 0xfffe9800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 204, 204, 204, 204 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 192, 192, 192, 192 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 196, 196, 196, 196 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 200, 200, 200, 200 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 204, 204, 204, 204 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif3_platform_data,
},
};
@@ -354,7 +380,10 @@ static struct platform_device rtc_device = {
};
static struct platform_device *sh7203_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
@@ -375,6 +404,10 @@ void __init plat_irq_setup(void)
}
static struct platform_device *sh7203_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index a9ccc5e8d9e..064873585a8 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -133,37 +133,63 @@ static struct intc_mask_reg mask_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfffe8000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 240, 240, 240, 240 },
- }, {
- .mapbase = 0xfffe8800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 244, 244, 244, 244 },
- }, {
- .mapbase = 0xfffe9000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 248, 248, 248, 248 },
- }, {
- .mapbase = 0xfffe9800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 252, 252, 252, 252 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfffe8000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 240, 240, 240, 240 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xfffe8800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 244, 244, 244, 244 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfffe9000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 248, 248, 248, 248 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfffe9800,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 252, 252, 252, 252 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif3_platform_data,
},
};
@@ -325,7 +351,10 @@ static struct platform_device mtu2_2_device = {
};
static struct platform_device *sh7206_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
@@ -346,6 +375,10 @@ void __init plat_irq_setup(void)
}
static struct platform_device *sh7206_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
&cmt0_device,
&cmt1_device,
&mtu2_0_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index c2310598387..7b892d60e3a 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -67,27 +67,33 @@ static struct intc_prio_reg prio_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL,
NULL, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xa4410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 56, 56 },
- }, {
- .mapbase = 0xa4400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 52, 52 },
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xa4410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56 },
+};
+
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xa4400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif1_platform_data,
},
};
@@ -210,10 +216,11 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh7705_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
- &sci_device,
&rtc_device,
};
@@ -225,6 +232,8 @@ static int __init sh7705_devices_setup(void)
arch_initcall(sh7705_devices_setup);
static struct platform_device *sh7705_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 347ab35d069..bc0c4f68c7c 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -106,44 +106,55 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfffffe80,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 23, 23, 23, 0 },
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfffffe80,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 23, 23, 23, 0 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
},
+};
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
- {
- .mapbase = 0xa4000150,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 56, 56, 56 },
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xa4000150,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56, 56 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
},
+};
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
- {
- .mapbase = 0xa4000140,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_IRDA,
- .irqs = { 52, 52, 52, 52 },
- },
-#endif
- {
- .flags = 0,
- }
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xa4000140,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_IRDA,
+ .irqs = { 52, 52, 52, 52 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif2_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 2,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif2_platform_data,
},
};
+#endif
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
@@ -238,10 +249,19 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh770x_devices[] __initdata = {
+ &scif0_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif1_device,
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif2_device,
+#endif
&tmu0_device,
&tmu1_device,
&tmu2_device,
- &sci_device,
&rtc_device,
};
@@ -253,6 +273,16 @@ static int __init sh770x_devices_setup(void)
arch_initcall(sh770x_devices_setup);
static struct platform_device *sh770x_early_devices[] __initdata = {
+ &scif0_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif1_device,
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif2_device,
+#endif
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 717e90ae109..0845a3ad006 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -96,28 +96,33 @@ static struct platform_device rtc_device = {
},
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xa4400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 52, 52, 52 },
- }, {
- .mapbase = 0xa4410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 56, 56, 56 },
- }, {
-
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xa4400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52, 52 },
+};
+
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xa4410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 56, 56, 56, 56 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif1_platform_data,
},
};
@@ -214,10 +219,11 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh7710_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
- &sci_device,
&rtc_device,
};
@@ -229,6 +235,8 @@ static int __init sh7710_devices_setup(void)
arch_initcall(sh7710_devices_setup);
static struct platform_device *sh7710_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 74d8baaf8e9..a718a623109 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -48,28 +48,33 @@ static struct platform_device rtc_device = {
},
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xa4430000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- }, {
- .mapbase = 0xa4438000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- }, {
-
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xa4430000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+};
+
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xa4438000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif1_platform_data,
},
};
@@ -369,6 +374,8 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh7720_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&cmt0_device,
&cmt1_device,
&cmt2_device,
@@ -378,7 +385,6 @@ static struct platform_device *sh7720_devices[] __initdata = {
&tmu1_device,
&tmu2_device,
&rtc_device,
- &sci_device,
&usb_ohci_device,
&usbf_device,
};
@@ -391,6 +397,8 @@ static int __init sh7720_devices_setup(void)
arch_initcall(sh7720_devices_setup);
static struct platform_device *sh7720_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&cmt0_device,
&cmt1_device,
&cmt2_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index de4827df19a..4b733715cdb 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -15,22 +15,18 @@
#include <linux/sh_timer.h>
#include <linux/io.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe80000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif0_platform_data,
},
};
@@ -127,7 +123,7 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh4202_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -141,6 +137,7 @@ static int __init sh4202_devices_setup(void)
arch_initcall(sh4202_devices_setup);
static struct platform_device *sh4202_early_devices[] __initdata = {
+ &scif0_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 1b8b122e8f3..b2a9df1af64 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -35,29 +35,33 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
- {
-#ifndef CONFIG_SH_RTS7751R2D
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 23, 23, 23, 0 },
- }, {
-#endif
- .mapbase = 0xffe80000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 23, 23, 23, 0 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif1_platform_data,
},
};
@@ -221,8 +225,9 @@ static struct platform_device tmu4_device = {
#endif
static struct platform_device *sh7750_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&rtc_device,
- &sci_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -242,6 +247,8 @@ static int __init sh7750_devices_setup(void)
arch_initcall(sh7750_devices_setup);
static struct platform_device *sh7750_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 7fbb7be9284..5b74cc0b43d 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -126,37 +126,63 @@ static struct intc_vect vectors_irq[] __initdata = {
static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfe600000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 53, 55, 54 },
- }, {
- .mapbase = 0xfe610000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 72, 73, 75, 74 },
- }, {
- .mapbase = 0xfe620000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 77, 79, 78 },
- }, {
- .mapbase = 0xfe480000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 80, 81, 82, 0 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xfe600000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xfe610000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 72, 73, 75, 74 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfe620000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 77, 79, 78 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfe480000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 80, 81, 82, 0 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif3_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 3,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif3_platform_data,
},
};
@@ -254,7 +280,10 @@ static struct platform_device tmu2_device = {
static struct platform_device *sh7760_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -268,6 +297,10 @@ static int __init sh7760_devices_setup(void)
arch_initcall(sh7760_devices_setup);
static struct platform_device *sh7760_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index ac4d5672ec1..45eb1bfd42c 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -15,6 +15,71 @@
#include <linux/sh_timer.h>
#include <asm/clock.h>
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .clk = "scif1",
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .clk = "scif2",
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xffe30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 83, 83, 83, 83 },
+ .clk = "scif3",
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
static struct resource iic0_resources[] = {
[0] = {
.name = "IIC0",
@@ -265,52 +330,17 @@ static struct platform_device tmu2_device = {
.num_resources = ARRAY_SIZE(tmu2_resources),
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
- }, {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
- }, {
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
- }, {
- .mapbase = 0xffe30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 83, 83, 83, 83 },
- .clk = "scif3",
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct platform_device *sh7343_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&iic0_device,
&iic1_device,
- &sci_device,
&vpu_device,
&veu_device,
&jpu_device,
@@ -328,6 +358,10 @@ static int __init sh7343_devices_setup(void)
arch_initcall(sh7343_devices_setup);
static struct platform_device *sh7343_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 4a9010bf4fd..c494c193e3b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -18,6 +18,22 @@
#include <linux/usb/r8a66597.h>
#include <asm/clock.h>
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
static struct resource iic_resources[] = {
[0] = {
.name = "IIC",
@@ -276,33 +292,13 @@ static struct platform_device tmu2_device = {
.num_resources = ARRAY_SIZE(tmu2_resources),
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct platform_device *sh7366_devices[] __initdata = {
+ &scif0_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&iic_device,
- &sci_device,
&usb_host_device,
&vpu_device,
&veu0_device,
@@ -321,6 +317,7 @@ static int __init sh7366_devices_setup(void)
arch_initcall(sh7366_devices_setup);
static struct platform_device *sh7366_early_devices[] __initdata = {
+ &scif0_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index 5491b094cf0..b5335b5e309 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -20,6 +20,55 @@
#include <asm/dma-sh.h>
#include <cpu/sh7722.h>
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .clk = "scif1",
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .clk = "scif2",
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa465fec0,
@@ -339,41 +388,6 @@ static struct platform_device tmu2_device = {
},
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
- },
- {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
- },
- {
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
- },
- {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct sh_dmae_pdata dma_platform_data = {
.mode = 0,
};
@@ -387,6 +401,9 @@ static struct platform_device dma_device = {
};
static struct platform_device *sh7722_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
@@ -394,7 +411,6 @@ static struct platform_device *sh7722_devices[] __initdata = {
&rtc_device,
&usbf_device,
&iic_device,
- &sci_device,
&vpu_device,
&veu_device,
&jpu_device,
@@ -413,6 +429,9 @@ static int __init sh7722_devices_setup(void)
arch_initcall(sh7722_devices_setup);
static struct platform_device *sh7722_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 4caa5a7ca86..772b9265d0e 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -20,6 +20,103 @@
#include <asm/mmzone.h>
#include <cpu/sh7723.h>
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .clk = "scif1",
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .clk = "scif2",
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xa4e30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 56, 56, 56, 56 },
+ .clk = "scif3",
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xa4e40000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 88, 88, 88, 88 },
+ .clk = "scif4",
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xa4e50000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 109, 109, 109, 109 },
+ .clk = "scif5",
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
static struct uio_info vpu_platform_data = {
.name = "VPU5",
.version = "0",
@@ -348,56 +445,6 @@ static struct platform_device tmu5_device = {
},
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
- },{
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
- },{
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
- },{
- .mapbase = 0xa4e30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 56, 56, 56, 56 },
- .clk = "scif3",
- },{
- .mapbase = 0xa4e40000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 88, 88, 88, 88 },
- .clk = "scif4",
- },{
- .mapbase = 0xa4e50000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 109, 109, 109, 109 },
- .clk = "scif5",
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa465fec0,
@@ -488,6 +535,12 @@ static struct platform_device iic_device = {
};
static struct platform_device *sh7723_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
@@ -495,7 +548,6 @@ static struct platform_device *sh7723_devices[] __initdata = {
&tmu3_device,
&tmu4_device,
&tmu5_device,
- &sci_device,
&rtc_device,
&iic_device,
&sh7723_usb_host_device,
@@ -516,6 +568,12 @@ static int __init sh7723_devices_setup(void)
arch_initcall(sh7723_devices_setup);
static struct platform_device *sh7723_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 845e89c936e..a52f35117e8 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -27,53 +27,99 @@
#include <cpu/sh7724.h>
/* Serial */
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- .clk = "scif0",
- }, {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- .clk = "scif1",
- }, {
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
- .clk = "scif2",
- }, {
- .mapbase = 0xa4e30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 56, 56, 56, 56 },
- .clk = "scif3",
- }, {
- .mapbase = 0xa4e40000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 88, 88, 88, 88 },
- .clk = "scif4",
- }, {
- .mapbase = 0xa4e50000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIFA,
- .irqs = { 109, 109, 109, 109 },
- .clk = "scif5",
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 80, 80, 80 },
+ .clk = "scif0",
+};
+
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 81, 81, 81, 81 },
+ .clk = "scif1",
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe20000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 82, 82, 82, 82 },
+ .clk = "scif2",
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xa4e30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 56, 56, 56, 56 },
+ .clk = "scif3",
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xa4e40000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 88, 88, 88, 88 },
+ .clk = "scif4",
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xa4e50000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIFA,
+ .irqs = { 109, 109, 109, 109 },
+ .clk = "scif5",
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif5_platform_data,
},
};
@@ -590,6 +636,12 @@ static struct platform_device spu1_device = {
};
static struct platform_device *sh7724_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
@@ -597,7 +649,6 @@ static struct platform_device *sh7724_devices[] __initdata = {
&tmu3_device,
&tmu4_device,
&tmu5_device,
- &sci_device,
&rtc_device,
&iic0_device,
&iic1_device,
@@ -624,6 +675,12 @@ static int __init sh7724_devices_setup(void)
arch_initcall(sh7724_devices_setup);
static struct platform_device *sh7724_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
&cmt_device,
&tmu0_device,
&tmu1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index c470e15f2e0..37e32efbbaa 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -17,6 +17,51 @@
#include <linux/mm.h>
#include <linux/sh_timer.h>
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xfe4b0000, /* SCIF2 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xfe4c0000, /* SCIF3 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xfe4d0000, /* SCIF4 */
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 104, 104, 104, 104 },
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
.channel_offset = 0x04,
@@ -79,39 +124,12 @@ static struct platform_device tmu1_device = {
.num_resources = ARRAY_SIZE(tmu1_resources),
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfe4b0000, /* SCIF2 */
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
- }, {
- .mapbase = 0xfe4c0000, /* SCIF3 */
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 76, 76, 76 },
- }, {
- .mapbase = 0xfe4d0000, /* SCIF4 */
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 104, 104, 104, 104 },
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct platform_device *sh7757_devices[] __initdata = {
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
&tmu0_device,
&tmu1_device,
- &sci_device,
};
static int __init sh7757_devices_setup(void)
@@ -121,6 +139,20 @@ static int __init sh7757_devices_setup(void)
}
arch_initcall(sh7757_devices_setup);
+static struct platform_device *sh7757_early_devices[] __initdata = {
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &tmu0_device,
+ &tmu1_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7757_early_devices,
+ ARRAY_SIZE(sh7757_early_devices));
+}
+
enum {
UNUSED = 0,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 4659fff6b84..6aba26fec41 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -16,6 +16,51 @@
#include <linux/io.h>
#include <linux/serial_sci.h>
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe08000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 104, 104, 104, 104 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffe80000,
@@ -36,35 +81,6 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
- }, {
- .mapbase = 0xffe08000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 76, 76, 76 },
- }, {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 104, 104, 104, 104 },
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct resource usb_ohci_resources[] = {
[0] = {
.start = 0xffec8000,
@@ -297,6 +313,9 @@ static struct platform_device tmu5_device = {
};
static struct platform_device *sh7763_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -304,7 +323,6 @@ static struct platform_device *sh7763_devices[] __initdata = {
&tmu4_device,
&tmu5_device,
&rtc_device,
- &sci_device,
&usb_ohci_device,
&usbf_device,
};
@@ -317,6 +335,9 @@ static int __init sh7763_devices_setup(void)
arch_initcall(sh7763_devices_setup);
static struct platform_device *sh7763_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index eead08d89d3..c1643bc9590 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -14,67 +14,153 @@
#include <linux/sh_timer.h>
#include <linux/io.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xff923000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 61, 61, 61, 61 },
- }, {
- .mapbase = 0xff924000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 62, 62, 62, 62 },
- }, {
- .mapbase = 0xff925000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 63, 63, 63, 63 },
- }, {
- .mapbase = 0xff926000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 64, 64, 64, 64 },
- }, {
- .mapbase = 0xff927000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 65, 65, 65, 65 },
- }, {
- .mapbase = 0xff928000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 66, 66, 66, 66 },
- }, {
- .mapbase = 0xff929000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 67, 67, 67, 67 },
- }, {
- .mapbase = 0xff92a000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 68, 68, 68, 68 },
- }, {
- .mapbase = 0xff92b000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 69, 69, 69, 69 },
- }, {
- .mapbase = 0xff92c000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 70, 70, 70, 70 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xff923000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 61, 61, 61, 61 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xff924000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 62, 62, 62, 62 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xff925000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 63, 63, 63, 63 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xff926000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 64, 64, 64, 64 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xff927000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 65, 65, 65, 65 },
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xff928000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 66, 66, 66, 66 },
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .mapbase = 0xff929000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 67, 67, 67, 67 },
+};
+
+static struct platform_device scif6_device = {
+ .name = "sh-sci",
+ .id = 6,
+ .dev = {
+ .platform_data = &scif6_platform_data,
+ },
+};
+
+static struct plat_sci_port scif7_platform_data = {
+ .mapbase = 0xff92a000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 68, 68, 68, 68 },
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
+ .dev = {
+ .platform_data = &scif7_platform_data,
+ },
+};
+
+static struct plat_sci_port scif8_platform_data = {
+ .mapbase = 0xff92b000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 69, 69, 69, 69 },
+};
+
+static struct platform_device scif8_device = {
+ .name = "sh-sci",
+ .id = 8,
+ .dev = {
+ .platform_data = &scif8_platform_data,
+ },
+};
+
+static struct plat_sci_port scif9_platform_data = {
+ .mapbase = 0xff92c000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 70, 70, 70, 70 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif9_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 9,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif9_platform_data,
},
};
@@ -351,6 +437,16 @@ static struct platform_device tmu8_device = {
};
static struct platform_device *sh7770_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &scif8_device,
+ &scif9_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -360,7 +456,6 @@ static struct platform_device *sh7770_devices[] __initdata = {
&tmu6_device,
&tmu7_device,
&tmu8_device,
- &sci_device,
};
static int __init sh7770_devices_setup(void)
@@ -371,6 +466,16 @@ static int __init sh7770_devices_setup(void)
arch_initcall(sh7770_devices_setup);
static struct platform_device *sh7770_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &scif8_device,
+ &scif9_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 12ff56f19c5..c310558490d 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -15,6 +15,36 @@
#include <linux/sh_timer.h>
#include <asm/dma-sh.h>
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 76, 76, 76 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
.channel_offset = 0x04,
@@ -217,30 +247,6 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
- }, {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 76, 76, 76 },
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct sh_dmae_pdata dma_platform_data = {
.mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1),
};
@@ -254,6 +260,8 @@ static struct platform_device dma_device = {
};
static struct platform_device *sh7780_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -261,7 +269,6 @@ static struct platform_device *sh7780_devices[] __initdata = {
&tmu4_device,
&tmu5_device,
&rtc_device,
- &sci_device,
&dma_device,
};
@@ -271,8 +278,9 @@ static int __init sh7780_devices_setup(void)
ARRAY_SIZE(sh7780_devices));
}
arch_initcall(sh7780_devices_setup);
-
static struct platform_device *sh7780_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 7f6c718b6c3..ef26ebda6e8 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -16,6 +16,102 @@
#include <linux/sh_timer.h>
#include <asm/mmzone.h>
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffea0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 40, 40, 40 },
+ .clk = "scif_fck",
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffeb0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 44, 44, 44, 44 },
+ .clk = "scif_fck",
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffec0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 60, 60, 60, 60 },
+ .clk = "scif_fck",
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xffed0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 61, 61, 61, 61 },
+ .clk = "scif_fck",
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xffee0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 62, 62, 62, 62 },
+ .clk = "scif_fck",
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xffef0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 63, 63, 63, 63 },
+ .clk = "scif_fck",
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
static struct sh_timer_config tmu0_platform_data = {
.name = "TMU0",
.channel_offset = 0x04,
@@ -198,64 +294,19 @@ static struct platform_device tmu5_device = {
.num_resources = ARRAY_SIZE(tmu5_resources),
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffea0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 40, 40, 40 },
- .clk = "scif_fck",
- }, {
- .mapbase = 0xffeb0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 44, 44, 44, 44 },
- .clk = "scif_fck",
- }, {
- .mapbase = 0xffec0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 60, 60, 60, 60 },
- .clk = "scif_fck",
- }, {
- .mapbase = 0xffed0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 61, 61, 61, 61 },
- .clk = "scif_fck",
- }, {
- .mapbase = 0xffee0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 62, 62, 62, 62 },
- .clk = "scif_fck",
- }, {
- .mapbase = 0xffef0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 63, 63, 63, 63 },
- .clk = "scif_fck",
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct platform_device *sh7785_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&tmu3_device,
&tmu4_device,
&tmu5_device,
- &sci_device,
};
static int __init sh7785_devices_setup(void)
@@ -266,6 +317,12 @@ static int __init sh7785_devices_setup(void)
arch_initcall(sh7785_devices_setup);
static struct platform_device *sh7785_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 0104a8ec536..71673487ace 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -23,51 +23,96 @@
#include <linux/sh_timer.h>
#include <asm/mmzone.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffea0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffea0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
},
- /*
- * The rest of these all have multiplexed IRQs
- */
- {
- .mapbase = 0xffeb0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 44, 44, 44, 44 },
- }, {
- .mapbase = 0xffec0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 50, 50, 50, 50 },
- }, {
- .mapbase = 0xffed0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 51, 51, 51, 51 },
- }, {
- .mapbase = 0xffee0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 52, 52, 52 },
- }, {
- .mapbase = 0xffef0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 53, 53, 53, 53 },
- }, {
- .flags = 0,
- }
};
-static struct platform_device sci_device = {
+/*
+ * The rest of these all have multiplexed IRQs
+ */
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffeb0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 44, 44, 44, 44 },
+};
+
+static struct platform_device scif1_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffec0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 50, 50, 50, 50 },
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .mapbase = 0xffed0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 51, 51, 51, 51 },
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .mapbase = 0xffee0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 52, 52, 52 },
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .mapbase = 0xffef0000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 53, 53, 53, 53 },
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif5_platform_data,
},
};
@@ -459,6 +504,12 @@ static struct platform_device usb_ohci_device = {
};
static struct platform_device *sh7786_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -474,7 +525,6 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
};
static struct platform_device *sh7786_devices[] __initdata = {
- &sci_device,
&usb_ohci_device,
};
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index c7ba9166e18..780ba17a559 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -24,32 +24,48 @@
* silicon in the first place, we just refuse to deal with the port at
* all rather than adding infrastructure to hack around it.
*/
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffc30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
- }, {
- .mapbase = 0xffc40000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 44, 45, 47, 46 },
- }, {
- .mapbase = 0xffc60000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 53, 55, 54 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = 0xffc30000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .mapbase = 0xffc40000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 44, 45, 47, 46 },
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .mapbase = 0xffc60000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif2_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 2,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif2_platform_data,
},
};
@@ -236,6 +252,9 @@ static struct platform_device tmu5_device = {
};
static struct platform_device *shx3_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
@@ -244,21 +263,10 @@ static struct platform_device *shx3_early_devices[] __initdata = {
&tmu5_device,
};
-static struct platform_device *shx3_devices[] __initdata = {
- &sci_device,
-};
-
static int __init shx3_devices_setup(void)
{
- int ret;
-
- ret = platform_add_devices(shx3_early_devices,
+ return platform_add_devices(shx3_early_devices,
ARRAY_SIZE(shx3_early_devices));
- if (unlikely(ret != 0))
- return ret;
-
- return platform_add_devices(shx3_devices,
- ARRAY_SIZE(shx3_devices));
}
arch_initcall(shx3_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index dd4f51ffb50..4648ccee6c4 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -34,7 +34,7 @@ static union sh_fpu_union init_fpuregs = {
}
};
-void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+void save_fpu(struct task_struct *tsk)
{
asm volatile("fst.p %0, (0*8), fp0\n\t"
"fst.p %0, (1*8), fp2\n\t"
@@ -153,7 +153,7 @@ do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
enable_fpu();
if (last_task_used_math != NULL)
/* Other processes fpu state, save away */
- save_fpu(last_task_used_math, regs);
+ save_fpu(last_task_used_math);
last_task_used_math = current;
if (used_math()) {
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index 6a0f82f7003..e7a3c1e4b60 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -16,22 +16,18 @@
#include <linux/sh_timer.h>
#include <asm/addrspace.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000,
- .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
- .type = PORT_SCIF,
- .irqs = { 39, 40, 42, 0 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .mapbase = PHYS_PERIPHERAL_BLOCK + 0x01030000,
+ .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
+ .type = PORT_SCIF,
+ .irqs = { 39, 40, 42, 0 },
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif0_platform_data,
},
};
@@ -164,13 +160,13 @@ static struct platform_device tmu2_device = {
};
static struct platform_device *sh5_early_devices[] __initdata = {
+ &scif0_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
};
static struct platform_device *sh5_devices[] __initdata = {
- &sci_device,
&rtc_device,
};
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index 81a46145ffa..f8bb50c6e05 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/delay.h>
-#ifdef CONFIG_SH_STANDARD_BIOS
#include <asm/sh_bios.h>
/*
@@ -57,149 +56,8 @@ static struct console bios_console = {
.flags = CON_PRINTBUFFER,
.index = -1,
};
-#endif
-#ifdef CONFIG_EARLY_SCIF_CONSOLE
-#include <linux/serial_core.h>
-#include "../../../drivers/serial/sh-sci.h"
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721)
-#define EPK_SCSMR_VALUE 0x000
-#define EPK_SCBRR_VALUE 0x00C
-#define EPK_FIFO_SIZE 64
-#define EPK_FIFO_BITS (0x7f00 >> 8)
-#else
-#define EPK_FIFO_SIZE 16
-#define EPK_FIFO_BITS (0x1f00 >> 8)
-#endif
-
-static struct uart_port scif_port = {
- .type = PORT_SCIF,
- .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT,
- .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
-};
-
-static void scif_sercon_putc(int c)
-{
- while (((sci_in(&scif_port, SCFDR) & EPK_FIFO_BITS) >= EPK_FIFO_SIZE))
- ;
-
- sci_in(&scif_port, SCxSR);
- sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40));
- sci_out(&scif_port, SCxTDR, c);
-
- while ((sci_in(&scif_port, SCxSR) & 0x40) == 0)
- ;
-
- if (c == '\n')
- scif_sercon_putc('\r');
-}
-
-static void scif_sercon_write(struct console *con, const char *s,
- unsigned count)
-{
- while (count-- > 0)
- scif_sercon_putc(*s++);
-}
-
-static int __init scif_sercon_setup(struct console *con, char *options)
-{
- con->cflag = CREAD | HUPCL | CLOCAL | B115200 | CS8;
-
- return 0;
-}
-
-static struct console scif_console = {
- .name = "sercon",
- .write = scif_sercon_write,
- .setup = scif_sercon_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-#if !defined(CONFIG_SH_STANDARD_BIOS)
-#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721)
-static void scif_sercon_init(char *s)
-{
- sci_out(&scif_port, SCSCR, 0x0000); /* clear TE and RE */
- sci_out(&scif_port, SCFCR, 0x4006); /* reset */
- sci_out(&scif_port, SCSCR, 0x0000); /* select internal clock */
- sci_out(&scif_port, SCSMR, EPK_SCSMR_VALUE);
- sci_out(&scif_port, SCBRR, EPK_SCBRR_VALUE);
-
- mdelay(1); /* wait 1-bit time */
-
- sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */
- sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */
-}
-#elif defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
-#define DEFAULT_BAUD 115200
-/*
- * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
- * devices that aren't using sh-ipl+g.
- */
-static void scif_sercon_init(char *s)
-{
- struct uart_port *port = &scif_port;
- unsigned baud = DEFAULT_BAUD;
- unsigned int status;
- char *e;
-
- if (*s == ',')
- ++s;
-
- if (*s) {
- /* ignore ioport/device name */
- s += strcspn(s, ",");
- if (*s == ',')
- s++;
- }
-
- if (*s) {
- baud = simple_strtoul(s, &e, 0);
- if (baud == 0 || s == e)
- baud = DEFAULT_BAUD;
- }
-
- do {
- status = sci_in(port, SCxSR);
- } while (!(status & SCxSR_TEND(port)));
-
- sci_out(port, SCSCR, 0); /* TE=0, RE=0 */
- sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
- sci_out(port, SCSMR, 0);
-
- /* Set baud rate */
- sci_out(port, SCBRR, (CONFIG_SH_PCLK_FREQ + 16 * baud) /
- (32 * baud) - 1);
- udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */
-
- sci_out(port, SCSPTR, 0);
- sci_out(port, SCxSR, 0x60);
- sci_out(port, SCLSR, 0);
-
- sci_out(port, SCFCR, 0);
- sci_out(port, SCSCR, 0x30); /* TE=1, RE=1 */
-}
-#endif /* defined(CONFIG_CPU_SUBTYPE_SH7720) */
-#endif /* !defined(CONFIG_SH_STANDARD_BIOS) */
-#endif /* CONFIG_EARLY_SCIF_CONSOLE */
-
-/*
- * Setup a default console, if more than one is compiled in, rely on the
- * earlyprintk= parsing to give priority.
- */
-static struct console *early_console =
-#ifdef CONFIG_SH_STANDARD_BIOS
- &bios_console
-#elif defined(CONFIG_EARLY_SCIF_CONSOLE)
- &scif_console
-#else
- NULL
-#endif
- ;
+static struct console *early_console;
static int __init setup_early_printk(char *buf)
{
@@ -211,21 +69,8 @@ static int __init setup_early_printk(char *buf)
if (strstr(buf, "keep"))
keep_early = 1;
-#ifdef CONFIG_SH_STANDARD_BIOS
if (!strncmp(buf, "bios", 4))
early_console = &bios_console;
-#endif
-#if defined(CONFIG_EARLY_SCIF_CONSOLE)
- if (!strncmp(buf, "serial", 6)) {
- early_console = &scif_console;
-
-#if !defined(CONFIG_SH_STANDARD_BIOS)
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3)
- scif_sercon_init(buf + 6);
-#endif
-#endif
- }
-#endif
if (likely(early_console)) {
if (keep_early)
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index b6f41c109be..a48cdedc73b 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -401,82 +401,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
-
-extern unsigned long __start_syscalls_metadata[];
-extern unsigned long __stop_syscalls_metadata[];
extern unsigned long *sys_call_table;
-static struct syscall_metadata **syscalls_metadata;
-
-static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
-{
- struct syscall_metadata *start;
- struct syscall_metadata *stop;
- char str[KSYM_SYMBOL_LEN];
-
-
- start = (struct syscall_metadata *)__start_syscalls_metadata;
- stop = (struct syscall_metadata *)__stop_syscalls_metadata;
- kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
-
- for ( ; start < stop; start++) {
- if (start->name && !strcmp(start->name, str))
- return start;
- }
-
- return NULL;
-}
-
-struct syscall_metadata *syscall_nr_to_meta(int nr)
-{
- if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
- return NULL;
-
- return syscalls_metadata[nr];
-}
-
-int syscall_name_to_nr(char *name)
-{
- int i;
-
- if (!syscalls_metadata)
- return -1;
- for (i = 0; i < NR_syscalls; i++)
- if (syscalls_metadata[i])
- if (!strcmp(syscalls_metadata[i]->name, name))
- return i;
- return -1;
-}
-
-void set_syscall_enter_id(int num, int id)
-{
- syscalls_metadata[num]->enter_id = id;
-}
-
-void set_syscall_exit_id(int num, int id)
-{
- syscalls_metadata[num]->exit_id = id;
-}
-
-static int __init arch_init_ftrace_syscalls(void)
+unsigned long __init arch_syscall_addr(int nr)
{
- int i;
- struct syscall_metadata *meta;
- unsigned long **psys_syscall_table = &sys_call_table;
-
- syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
- FTRACE_SYSCALL_MAX, GFP_KERNEL);
- if (!syscalls_metadata) {
- WARN_ON(1);
- return -ENOMEM;
- }
-
- for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
- meta = find_syscall_meta(psys_syscall_table[i]);
- syscalls_metadata[i] = meta;
- }
-
- return 0;
+ return (unsigned long)sys_call_table[nr];
}
-arch_initcall(arch_init_ftrace_syscalls);
#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index e1913f28f41..d2d41d04665 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
#endif
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 359b8a2f4d2..31f80c61b03 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -404,7 +404,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
if (fpvalid) {
if (current == last_task_used_math) {
enable_fpu();
- save_fpu(tsk, regs);
+ save_fpu(tsk);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
@@ -431,7 +431,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_SH_FPU
if(last_task_used_math == current) {
enable_fpu();
- save_fpu(current, regs);
+ save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 952da83903d..873ebdc4f98 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -82,7 +82,7 @@ get_fpu_long(struct task_struct *task, unsigned long addr)
if (last_task_used_math == task) {
enable_fpu();
- save_fpu(task, regs);
+ save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
@@ -118,7 +118,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
set_stopped_child_used_math(task);
} else if (last_task_used_math == task) {
enable_fpu();
- save_fpu(task, regs);
+ save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 5a947a2567e..8b0e69792cf 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -423,6 +423,9 @@ void __init setup_arch(char **cmdline_p)
plat_early_device_setup();
+ /* Let earlyprintk output early console messages */
+ early_platform_driver_probe("earlyprintk", 1, 1);
+
sh_mv_setup();
/*
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index feb3dddd319..ce76dbdef29 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -314,7 +314,7 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
if (current == last_task_used_math) {
enable_fpu();
- save_fpu(current, regs);
+ save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 8aa5d1ceaf1..71399cde03b 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -28,37 +28,13 @@
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
-static inline long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, int fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
int fd, unsigned long off)
{
if (off & ~PAGE_MASK)
return -EINVAL;
- return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
}
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
@@ -74,7 +50,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
pgoff >>= PAGE_SHIFT - 12;
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/*
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 19fd11dd987..4bd5a114695 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -353,3 +353,4 @@ ENTRY(sys_call_table)
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open
+ .long sys_recvmmsg
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 3da5a125d88..86639beac3a 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -452,12 +452,18 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
rm = regs->regs[index];
/* shout about fixups */
- if (!expected && printk_ratelimit())
- printk(KERN_NOTICE "Fixing up unaligned %s access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- user_mode(regs) ? "userspace" : "kernel",
- current->comm, task_pid_nr(current),
- (void *)regs->pc, instruction);
+ if (!expected) {
+ if (user_mode(regs) && (se_usermode & 1) && printk_ratelimit())
+ pr_notice("Fixing up unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm, task_pid_nr(current),
+ (void *)regs->pc, instruction);
+ else if (se_kernmode_warn && printk_ratelimit())
+ pr_notice("Fixing up unaligned kernel access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ current->comm, task_pid_nr(current),
+ (void *)regs->pc, instruction);
+ }
ret = -EFAULT;
switch (instruction&0xF000) {
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 75c0cbe2eda..d86f5315a0c 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -600,7 +600,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
- save_fpu(current, regs);
+ save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
@@ -673,7 +673,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
- save_fpu(current, regs);
+ save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index f36a08bf3d5..560ddb6bc8a 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -256,8 +256,7 @@ static void sh4_flush_cache_page(void *args)
address = (unsigned long)vaddr;
}
- if (pages_do_alias(address, phys))
- flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
+ flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
(address & shm_align_mask), phys);
if (vma->vm_flags & VM_EXEC)
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
index a86eaa9d75a..2141befb4f9 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap_32.c
@@ -33,10 +33,10 @@
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
- unsigned long flags)
+void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size,
+ unsigned long flags, void *caller)
{
- struct vm_struct * area;
+ struct vm_struct *area;
unsigned long offset, last_addr, addr, orig_addr;
pgprot_t pgprot;
@@ -67,7 +67,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
/*
* Ok, go for it..
*/
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
area->phys_addr = phys_addr;
@@ -103,7 +103,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
return (void __iomem *)(offset + (char *)orig_addr);
}
-EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(__ioremap_caller);
void __iounmap(void __iomem *addr)
{
diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c
index b16843d02b7..ef434657d42 100644
--- a/arch/sh/mm/ioremap_64.c
+++ b/arch/sh/mm/ioremap_64.c
@@ -258,15 +258,15 @@ static void shmedia_unmapioaddr(unsigned long vaddr)
pte_clear(&init_mm, vaddr, ptep);
}
-void __iomem *__ioremap(unsigned long offset, unsigned long size,
- unsigned long flags)
+void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
+ unsigned long flags, void *caller)
{
char name[14];
sprintf(name, "phys_%08x", (u32)offset);
return shmedia_alloc_io(offset, size, name, flags);
}
-EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(__ioremap_caller);
void __iounmap(void __iomem *virtual)
{
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index d2984fa42d3..afeb710ec5c 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -54,7 +54,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) && (addr & shm_align_mask))
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 6c524446c0f..422e9272187 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -28,7 +28,7 @@ void __init setup_memory(void)
{
unsigned long free_pfn = PFN_UP(__pa(_end));
u64 base = min_low_pfn << PAGE_SHIFT;
- u64 size = (max_low_pfn << PAGE_SHIFT) - min_low_pfn;
+ u64 size = (max_low_pfn << PAGE_SHIFT) - base;
lmb_add(base, size);
@@ -38,6 +38,15 @@ void __init setup_memory(void)
(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
/*
+ * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
+ */
+ if (CONFIG_ZERO_PAGE_OFFSET != 0)
+ lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
+
+ lmb_analyze();
+ lmb_dump_all();
+
+ /*
* Node 0 sets up its pgdat at the first available pfn,
* and bumps it up before setting up the bootmem allocator.
*/
@@ -71,7 +80,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Node-local pgdat */
NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
- SMP_CACHE_BYTES, end_pfn));
+ SMP_CACHE_BYTES, end));
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
@@ -81,7 +90,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Node-local bootmap */
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
- PAGE_SIZE, end_pfn);
+ PAGE_SIZE, end);
init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
start_pfn, end_pfn);
diff --git a/arch/sh/tools/Makefile b/arch/sh/tools/Makefile
index 567516b58ac..558a56bcc7c 100644
--- a/arch/sh/tools/Makefile
+++ b/arch/sh/tools/Makefile
@@ -10,7 +10,7 @@
# Shamelessly cloned from ARM.
#
-include/asm-sh/machtypes.h: $(src)/gen-mach-types $(src)/mach-types
+include/generated/machtypes.h: $(src)/gen-mach-types $(src)/mach-types
@echo ' Generating $@'
- $(Q)if [ ! -d include/asm-sh ]; then mkdir -p include/asm-sh; fi
+ $(Q)mkdir -p $(dir $@)
$(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
diff --git a/arch/sh/tools/gen-mach-types b/arch/sh/tools/gen-mach-types
index 65161e36835..f5ff7c5d891 100644
--- a/arch/sh/tools/gen-mach-types
+++ b/arch/sh/tools/gen-mach-types
@@ -1,6 +1,6 @@
#!/bin/awk
#
-# Awk script to generate include/asm-sh/machtypes.h
+# Awk script to generate include/generated/machtypes.h
# Heavily based on arch/arm/tools/gen-mach-types
#
BEGIN { nr = 0 }
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 33ac1a9ac88..108197ac0d5 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC64
select HAVE_SYSCALL_WRAPPERS
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_SYSCALL_TRACEPOINTS
select USE_GENERIC_SMP_HELPERS if SMP
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index 90d5fe223a7..9d3c889718a 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -33,4 +33,18 @@ config FRAME_POINTER
depends on MCOUNT
default y
+config DEBUG_STRICT_USER_COPY_CHECKS
+ bool "Strict copy size checks"
+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
+ ---help---
+ Enabling this option turns a certain set of sanity checks for user
+ copy operations into compile time failures.
+
+ The copy_from_user() etc checks are there to help test if there
+ are sufficient security checks on the length argument of
+ the copy operation, by having gcc prove that the argument is
+ within bounds.
+
+ If unsure, or if you run an older (pre 4.4) gcc, say N.
+
endmenu
diff --git a/arch/sparc/include/asm/asm-offsets.h b/arch/sparc/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/sparc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
index 381a1b5256d..4269ca6ad18 100644
--- a/arch/sparc/include/asm/elf_32.h
+++ b/arch/sparc/include/asm/elf_32.h
@@ -104,8 +104,6 @@ typedef struct {
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
-#define USE_ELF_CORE_DUMP
-
#define ELF_EXEC_PAGESIZE 4096
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index d42e393078c..ff66bb88537 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -152,7 +152,6 @@ typedef struct {
(x)->e_machine == EM_SPARC32PLUS)
#define compat_start_thread start_thread32
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/sparc/include/asm/fcntl.h b/arch/sparc/include/asm/fcntl.h
index d4d9c9d852c..38f37b333cc 100644
--- a/arch/sparc/include/asm/fcntl.h
+++ b/arch/sparc/include/asm/fcntl.h
@@ -1,14 +1,12 @@
#ifndef _SPARC_FCNTL_H
#define _SPARC_FCNTL_H
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_APPEND 0x0008
#define FASYNC 0x0040 /* fcntl, for BSD compatibility */
#define O_CREAT 0x0200 /* not fcntl */
#define O_TRUNC 0x0400 /* not fcntl */
#define O_EXCL 0x0800 /* not fcntl */
-#define O_SYNC 0x2000
+#define O_DSYNC 0x2000 /* used to be O_SYNC, see below */
#define O_NONBLOCK 0x4000
#if defined(__sparc__) && defined(__arch64__)
#define O_NDELAY 0x0004
@@ -20,6 +18,21 @@
#define O_DIRECT 0x100000 /* direct disk access hint */
#define O_NOATIME 0x200000
#define O_CLOEXEC 0x400000
+/*
+ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ *
+ * This has the nice side-effect that we can simply test for O_DSYNC
+ * wherever we do not care if O_DSYNC or O_SYNC is used.
+ *
+ * Note: __O_SYNC must never be used directly.
+ */
+#define __O_SYNC 0x800000
+#define O_SYNC (__O_SYNC|O_DSYNC)
#define F_GETOWN 5 /* for sockets. */
#define F_SETOWN 6 /* for sockets. */
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index b63e51c3c3e..b0576df6ec8 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -16,8 +16,6 @@
#define PCI_IRQ_NONE 0xffffffff
-#define PCI_CACHE_LINE_BYTES 64
-
static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 857630cff63..7f9b9dba38a 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -10,12 +10,12 @@
#include <asm/psr.h>
-#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
+#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+#define arch_spin_unlock_wait(lock) \
+ do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
"\n1:\n\t"
@@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "g2", "memory", "cc");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0"
@@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
}
@@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* Sort of like atomic_t's on Sparc, but even more clever.
*
* ------------------------------------
- * | 24-bit counter | wlock | raw_rwlock_t
+ * | 24-bit counter | wlock | arch_rwlock_t
* ------------------------------------
* 31 8 7 0
*
@@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
*
* Unfortunately this scheme limits us to ~16,000,000 cpus.
*/
-static inline void arch_read_lock(raw_rwlock_t *rw)
+static inline void __arch_read_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw)
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_lock(lock) \
+#define arch_read_lock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_lock(lock); \
+ __arch_read_lock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void arch_read_unlock(raw_rwlock_t *rw)
+static inline void __arch_read_unlock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw)
: "g2", "g4", "memory", "cc");
}
-#define __raw_read_unlock(lock) \
+#define arch_read_unlock(lock) \
do { unsigned long flags; \
local_irq_save(flags); \
- arch_read_unlock(lock); \
+ __arch_read_unlock(lock); \
local_irq_restore(flags); \
} while(0)
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
lp = rw;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
@@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
*(volatile __u32 *)&lp->lock = ~0U;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val;
@@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return (val == 0);
}
-static inline int arch_read_trylock(raw_rwlock_t *rw)
+static inline int __arch_read_trylock(arch_rwlock_t *rw)
{
- register raw_rwlock_t *lp asm("g1");
+ register arch_rwlock_t *lp asm("g1");
register int res asm("o0");
lp = rw;
__asm__ __volatile__(
@@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw)
return res;
}
-#define __raw_read_trylock(lock) \
+#define arch_read_trylock(lock) \
({ unsigned long flags; \
int res; \
local_irq_save(flags); \
- res = arch_read_trylock(lock); \
+ res = __arch_read_trylock(lock); \
local_irq_restore(flags); \
res; \
})
-#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
+#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0)
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
-#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
+#define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
+#define arch_write_can_lock(rw) (!(rw)->lock)
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 43e51478358..073936a8b27 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -21,13 +21,13 @@
* the spinner sections must be pre-V9 branches.
*/
-#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
+#define arch_spin_is_locked(lp) ((lp)->lock != 0)
-#define __raw_spin_unlock_wait(lp) \
+#define arch_spin_unlock_wait(lp) \
do { rmb(); \
} while((lp)->lock)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
@@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
: "memory");
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long result;
@@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0UL);
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
" stb %%g0, [%0]"
@@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
: "memory");
}
-static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long tmp1, tmp2;
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-static void inline arch_read_lock(raw_rwlock_t *lock)
+static void inline arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
: "memory");
}
-static int inline arch_read_trylock(raw_rwlock_t *lock)
+static int inline arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
@@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
return tmp1;
}
-static void inline arch_read_unlock(raw_rwlock_t *lock)
+static void inline arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_lock(raw_rwlock_t *lock)
+static void inline arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
@@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
: "memory");
}
-static void inline arch_write_unlock(raw_rwlock_t *lock)
+static void inline arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
: "memory");
}
-static int inline arch_write_trylock(raw_rwlock_t *lock)
+static int inline arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
@@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock)
return result;
}
-#define __raw_read_lock(p) arch_read_lock(p)
-#define __raw_read_lock_flags(p, f) arch_read_lock(p)
-#define __raw_read_trylock(p) arch_read_trylock(p)
-#define __raw_read_unlock(p) arch_read_unlock(p)
-#define __raw_write_lock(p) arch_write_lock(p)
-#define __raw_write_lock_flags(p, f) arch_write_lock(p)
-#define __raw_write_unlock(p) arch_write_unlock(p)
-#define __raw_write_trylock(p) arch_write_trylock(p)
-
-#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
-#define __raw_write_can_lock(rw) (!(rw)->lock)
-
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_read_lock(p) arch_read_lock(p)
+#define arch_read_lock_flags(p, f) arch_read_lock(p)
+#define arch_read_trylock(p) arch_read_trylock(p)
+#define arch_read_unlock(p) arch_read_unlock(p)
+#define arch_write_lock(p) arch_write_lock(p)
+#define arch_write_lock_flags(p, f) arch_write_lock(p)
+#define arch_write_unlock(p) arch_write_unlock(p)
+#define arch_write_trylock(p) arch_write_trylock(p)
+
+#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
+#define arch_write_can_lock(rw) (!(rw)->lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 37cbe01c585..9c454fdeaad 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -7,14 +7,14 @@
typedef struct {
volatile unsigned char lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 6c5fddb7e6b..edf196ee4ef 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -16,8 +16,6 @@
#ifdef __KERNEL__
extern void __memmove(void *,const void *,__kernel_size_t);
-extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
-extern __kernel_size_t __memset(void *,int,__kernel_size_t);
#ifndef EXPORT_SYMTAB_STROPS
@@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t);
})
#define __HAVE_ARCH_MEMCPY
-
-static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
-{
- extern void __copy_1page(void *, const void *);
-
- if(n <= 32) {
- __builtin_memcpy(to, from, n);
- } else if (((unsigned int) to & 7) != 0) {
- /* Destination is not aligned on the double-word boundary */
- __memcpy(to, from, n);
- } else {
- switch(n) {
- case PAGE_SIZE:
- __copy_1page(to, from);
- break;
- default:
- __memcpy(to, from, n);
- break;
- }
- }
- return to;
-}
-
-static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
-{
- __memcpy(to, from, n);
- return to;
-}
-
-#undef memcpy
-#define memcpy(t, f, n) \
-(__builtin_constant_p(n) ? \
- __constant_memcpy((t),(f),(n)) : \
- __nonconstant_memcpy((t),(f),(n)))
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
-
-static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
-{
- extern void bzero_1page(void *);
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if(!c) {
- if(count == PAGE_SIZE)
- bzero_1page(s);
- else
- __bzero(s, count);
- } else {
- __memset(s, c, count);
- }
- return s;
-}
-
-static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
-{
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if(!c)
- __bzero(s, count);
- else
- __memset(s, c, count);
- return s;
-}
-
-static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
-{
- __memset(s, c, count);
- return s;
-}
-
-#undef memset
-#define memset(s, c, count) \
-(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
- __constant_c_and_count_memset((s), (c), (count)) : \
- __constant_c_memset((s), (c), (count))) \
- : __nonconstant_memset((s), (c), (count)))
+#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 43161f2d17e..9623bc21315 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -15,8 +15,6 @@
#include <asm/asi.h>
-extern void *__memset(void *,int,__kernel_size_t);
-
#ifndef EXPORT_SYMTAB_STROPS
/* First the mem*() things. */
@@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMCPY
-extern void *memcpy(void *, const void *, __kernel_size_t);
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#define __HAVE_ARCH_MEMSET
-extern void *__builtin_memset(void *,int,__kernel_size_t);
-
-static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
-{
- extern __kernel_size_t __bzero(void *, __kernel_size_t);
-
- if (!c) {
- __bzero(s, count);
- return s;
- } else
- return __memset(s, c, count);
-}
-
-#undef memset
-#define memset(s, c, count) \
-((__builtin_constant_p(count) && (count) <= 32) ? \
- __builtin_memset((s), (c), (count)) : \
- (__builtin_constant_p(c) ? \
- __constant_memset((s), (c), (count)) : \
- __memset((s), (c), (count))))
+#define memset(s, c, count) __builtin_memset(s, c, count)
#define __HAVE_ARCH_MEMSCAN
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 1b45a7bbe40..7257ebb8f39 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* flag bit 8 is available */
#define TIF_SECCOMP 9 /* secure computing */
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
/* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
@@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 8303ac48103..489d2ba92bc 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un
return __copy_user(to, (__force void __user *) from, n);
}
+extern void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+ __compiletime_error("copy_from_user() buffer size is not provably correct")
+#else
+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ int sz = __compiletime_object_size(to);
+
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return -EFAULT;
+ }
+
if (n && __access_ok((unsigned long) from, n))
return __copy_user((__force void __user *) to, from, n);
else
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 9ea271e19c7..dbc14166099 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -6,6 +6,7 @@
*/
#ifdef __KERNEL__
+#include <linux/errno.h>
#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -204,6 +205,14 @@ __asm__ __volatile__( \
extern int __get_user_bad(void);
+extern void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+ __compiletime_error("copy_from_user() buffer size is not provably correct")
+#else
+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
extern unsigned long __must_check ___copy_from_user(void *to,
const void __user *from,
unsigned long size);
@@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long ret = ___copy_from_user(to, from, size);
-
- if (unlikely(ret))
- ret = copy_from_user_fixup(to, from, size);
+ unsigned long ret = (unsigned long) -EFAULT;
+ int sz = __compiletime_object_size(to);
+
+ if (likely(sz == -1 || sz >= size)) {
+ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+ } else {
+ copy_from_user_overflow();
+ }
return ret;
}
#define __copy_from_user copy_from_user
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index d8d25bd9712..cb4b9bfd0d8 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -398,7 +398,7 @@
#define __NR_perf_event_open 327
#define __NR_recvmmsg 328
-#define NR_SYSCALLS 329
+#define NR_syscalls 329
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index ec9c7bc67d2..1504df8ddf7 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1294,7 +1294,7 @@ linux_sparc_syscall:
sethi %hi(PSR_SYSCALL), %l4
or %l0, %l4, %l0
/* Direct access to user regs, must faster. */
- cmp %g1, NR_SYSCALLS
+ cmp %g1, NR_syscalls
bgeu linux_sparc_ni_syscall
sll %g1, 2, %l4
ld [%l7 + %l4], %l7
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d3b1a307656..29973daa993 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -4,6 +4,7 @@
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <trace/syscall.h>
#include <asm/ftrace.h>
@@ -91,3 +92,13 @@ int __init ftrace_dyn_arch_init(void *data)
}
#endif
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+extern unsigned int sys_call_table[];
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return (unsigned long)sys_call_table[nr];
+}
+
+#endif
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 7690cc219ec..5fad94950e7 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -11,6 +11,7 @@
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
+#include <linux/bitmap.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
@@ -169,7 +170,7 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
- iommu_area_free(arena->map, entry, npages);
+ bitmap_clear(arena->map, entry, npages);
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index ce996f97855..8d6882bb480 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
@@ -785,14 +785,14 @@ void fixup_irqs(void)
for (irq = 0; irq < NR_IRQS; irq++) {
unsigned long flags;
- spin_lock_irqsave(&irq_desc[irq].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
if (irq_desc[irq].action &&
!(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq,
irq_desc[irq].affinity);
}
- spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
tick_ops->disable_irq();
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 3bc6527c95a..6716584e48a 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -46,6 +46,9 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
+ if ((unsigned long) p->addr & 0x3UL)
+ return -EILSEQ;
+
p->ainsn.insn[0] = *p->addr;
flushi(&p->ainsn.insn[0]);
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index cb3c72c45aa..df39a0f0d27 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/init.h>
+#include <linux/bitmap.h>
#include <asm/hypervisor.h>
#include <asm/iommu.h>
@@ -1242,13 +1243,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
err = request_irq(lp->cfg.rx_irq, ldc_rx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->rx_irq_name, lp);
if (err)
return err;
err = request_irq(lp->cfg.tx_irq, ldc_tx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
+ IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
@@ -1875,7 +1876,7 @@ EXPORT_SYMBOL(ldc_read);
static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
- unsigned long n, i, start, end, limit;
+ unsigned long n, start, end, limit;
int pass;
limit = arena->limit;
@@ -1883,7 +1884,7 @@ static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages)
pass = 0;
again:
- n = find_next_zero_bit(arena->map, limit, start);
+ n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0);
end = n + npages;
if (unlikely(end >= limit)) {
if (likely(pass < 1)) {
@@ -1896,16 +1897,7 @@ again:
return -1;
}
}
-
- for (i = n; i < end; i++) {
- if (test_bit(i, arena->map)) {
- start = i + 1;
- goto again;
- }
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, arena->map);
+ bitmap_set(arena->map, n, npages);
arena->hint = end;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 938da19dc06..cdc91d919e9 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
+#include <linux/bootmem.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
@@ -108,25 +109,15 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
static void mdesc_lmb_free(struct mdesc_handle *hp)
{
- unsigned int alloc_size, handle_size = hp->handle_size;
- unsigned long start, end;
+ unsigned int alloc_size;
+ unsigned long start;
BUG_ON(atomic_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
- alloc_size = PAGE_ALIGN(handle_size);
-
- start = (unsigned long) hp;
- end = start + alloc_size;
-
- while (start < end) {
- struct page *p;
-
- p = virt_to_page(start);
- ClearPageReserved(p);
- __free_page(p);
- start += PAGE_SIZE;
- }
+ alloc_size = PAGE_ALIGN(hp->handle_size);
+ start = __pa(hp);
+ free_bootmem_late(start, alloc_size);
}
static struct mdesc_mem_ops lmb_mdesc_ops = {
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index b129611590a..f30f4a1ead2 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled);
static int endflag __initdata;
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog(void)
@@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
touched = 1;
}
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
- local_inc(&__get_cpu_var(alert_counter));
- if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
+ __this_cpu_inc(per_cpu_var(alert_counter));
+ if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- local_set(&__get_cpu_var(alert_counter), 0);
+ __this_cpu_write(per_cpu_var(alert_counter), 0);
}
if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 881947e59e9..0a6f2d1798d 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -104,9 +104,19 @@ static int of_bus_pci_map(u32 *addr, const u32 *range,
int i;
/* Check address type match */
- if ((addr[0] ^ range[0]) & 0x03000000)
- return -EINVAL;
+ if (!((addr[0] ^ range[0]) & 0x03000000))
+ goto type_match;
+
+ /* Special exception, we can map a 64-bit address into
+ * a 32-bit range.
+ */
+ if ((addr[0] & 0x03000000) == 0x03000000 &&
+ (range[0] & 0x03000000) == 0x02000000)
+ goto type_match;
+
+ return -EINVAL;
+type_match:
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
na - 1, ns))
return -EINVAL;
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index c6864866280..539e83f8e08 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1064,7 +1064,6 @@ int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
return (device_mask & dma_addr_mask) == dma_addr_mask;
}
-EXPORT_SYMBOL(pci_dma_supported);
void pci_resource_to_user(const struct pci_dev *pdev, int bar,
const struct resource *rp, resource_size_t *start,
@@ -1081,3 +1080,10 @@ void pci_resource_to_user(const struct pci_dev *pdev, int bar,
*start = rp->start - offset;
*end = rp->end - offset;
}
+
+static int __init pcibios_init(void)
+{
+ pci_dfl_cache_line_size = 64 >> 2;
+ return 0;
+}
+subsys_initcall(pcibios_init);
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 4ae91dc2feb..2f6524d1a81 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -23,6 +23,7 @@
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
+#include <trace/syscall.h>
#include <linux/compat.h>
#include <linux/elf.h>
@@ -37,6 +38,9 @@
#include <asm/cpudata.h>
#include <asm/cacheflush.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
#include "entry.h"
/* #define ALLOW_INIT_TRACING */
@@ -1059,6 +1063,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
if (test_thread_flag(TIF_SYSCALL_TRACE))
ret = tracehook_report_syscall_entry(regs);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->u_regs[UREG_G1]);
+
if (unlikely(current->audit_context) && !ret)
audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
AUDIT_ARCH_SPARC :
@@ -1084,6 +1091,9 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(result, regs->u_regs[UREG_I0]);
}
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->u_regs[UREG_G1]);
+
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
}
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
index 0f26066a08d..372ad59c4cb 100644
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ b/arch/sparc/kernel/sparc_ksyms_64.c
@@ -38,17 +38,5 @@ EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
-#ifdef CONFIG_PCI
-/* inline functions in asm/pci_64.h */
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-EXPORT_SYMBOL(pci_map_single);
-EXPORT_SYMBOL(pci_unmap_single);
-EXPORT_SYMBOL(pci_map_sg);
-EXPORT_SYMBOL(pci_unmap_sg);
-EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
-EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
-#endif
-
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 00abe87e5b5..dc0ac197e7e 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -564,28 +564,6 @@ asmlinkage long sparc32_open(const char __user *filename,
return do_sys_open(AT_FDCWD, filename, flags, mode);
}
-extern unsigned long do_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr);
-
-asmlinkage unsigned long sys32_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, u32 __new_addr)
-{
- unsigned long ret = -EINVAL;
- unsigned long new_addr = __new_addr;
-
- if (unlikely(sparc_mmap_check(addr, old_len)))
- goto out;
- if (unlikely(sparc_mmap_check(new_addr, new_len)))
- goto out;
- down_write(&current->mm->mmap_sem);
- ret = do_mremap(addr, old_len, new_len, flags, new_addr);
- up_write(&current->mm->mmap_sem);
-out:
- return ret;
-}
-
long sys32_lookup_dcookie(unsigned long cookie_high,
unsigned long cookie_low,
char __user *buf, size_t len)
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 03035c852a4..3a82e65d8db 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -45,7 +45,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
@@ -79,15 +80,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
}
}
-asmlinkage unsigned long sparc_brk(unsigned long brk)
-{
- if(ARCH_SUN4C) {
- if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
- return current->mm->brk;
- }
- return sys_brk(brk);
-}
-
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
@@ -234,31 +226,6 @@ int sparc_mmap_check(unsigned long addr, unsigned long len)
}
/* Linux version of mmap */
-static unsigned long do_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags, unsigned long fd,
- unsigned long pgoff)
-{
- struct file * file = NULL;
- unsigned long retval = -EBADF;
-
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- len = PAGE_ALIGN(len);
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return retval;
-}
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
@@ -266,14 +233,16 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
- return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long off)
{
- return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ /* no alignment check? */
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
}
long sparc_remap_file_pages(unsigned long start, unsigned long size,
@@ -287,27 +256,6 @@ long sparc_remap_file_pages(unsigned long start, unsigned long size,
(pgoff >> (PAGE_SHIFT - 12)), flags);
}
-extern unsigned long do_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr);
-
-asmlinkage unsigned long sparc_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr)
-{
- unsigned long ret = -EINVAL;
-
- if (unlikely(sparc_mmap_check(addr, old_len)))
- goto out;
- if (unlikely(sparc_mmap_check(new_addr, new_len)))
- goto out;
- down_write(&current->mm->mmap_sem);
- ret = do_mremap(addr, old_len, new_len, flags, new_addr);
- up_write(&current->mm->mmap_sem);
-out:
- return ret;
-}
-
/* we come to here via sys_nis_syscall so it can setup the regs argument */
asmlinkage unsigned long
c_sys_nis_syscall (struct pt_regs *regs)
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index e2d102447a4..cfa0e19abe3 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -317,10 +317,14 @@ bottomup:
unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{
unsigned long align_goal, addr = -ENOMEM;
+ unsigned long (*get_area)(struct file *, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+
+ get_area = current->mm->get_unmapped_area;
if (flags & MAP_FIXED) {
/* Ok, don't mess with it. */
- return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
+ return get_area(NULL, orig_addr, len, pgoff, flags);
}
flags &= ~MAP_SHARED;
@@ -333,7 +337,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
align_goal = (64UL * 1024);
do {
- addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+ addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
if (!(addr & ~PAGE_MASK)) {
addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
break;
@@ -351,7 +355,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
* be obtained.
*/
if (addr & ~PAGE_MASK)
- addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
+ addr = get_area(NULL, orig_addr, len, pgoff, flags);
return addr;
}
@@ -399,18 +403,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
}
}
-SYSCALL_DEFINE1(sparc_brk, unsigned long, brk)
-{
- /* People could try to be nasty and use ta 0x6d in 32bit programs */
- if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
- return current->mm->brk;
-
- if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
- return current->mm->brk;
-
- return sys_brk(brk);
-}
-
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
@@ -568,23 +560,13 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, off)
{
- struct file * file = NULL;
- unsigned long retval = -EBADF;
-
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- len = PAGE_ALIGN(len);
+ unsigned long retval = -EINVAL;
- down_write(&current->mm->mmap_sem);
- retval = do_mmap(file, addr, len, prot, flags, off);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
+ if ((off + PAGE_ALIGN(len)) < off)
+ goto out;
+ if (off & ~PAGE_MASK)
+ goto out;
+ retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
out:
return retval;
}
@@ -614,12 +596,6 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
if (test_thread_flag(TIF_32BIT))
goto out;
- if (unlikely(new_len >= VA_EXCLUDE_START))
- goto out;
- if (unlikely(sparc_mmap_check(addr, old_len)))
- goto out;
- if (unlikely(sparc_mmap_check(new_addr, new_len)))
- goto out;
down_write(&current->mm->mmap_sem);
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index d150c2aa98d..dc4a458f74d 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
#endif
.align 32
1: ldx [%g6 + TI_FLAGS], %l5
- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+ andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
be,pt %icc, rtrap
nop
call syscall_trace_leave
@@ -187,7 +187,7 @@ linux_syscall_trace:
.globl linux_sparc_syscall32
linux_sparc_syscall32:
/* Direct access to user regs, much faster. */
- cmp %g1, NR_SYSCALLS ! IEU1 Group
+ cmp %g1, NR_syscalls ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
srl %i0, 0, %o0 ! IEU0
sll %g1, 2, %l4 ! IEU0 Group
@@ -198,7 +198,7 @@ linux_sparc_syscall32:
srl %i5, 0, %o5 ! IEU1
srl %i2, 0, %o2 ! IEU0 Group
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1
call %l7 ! CTI Group brk forced
@@ -210,7 +210,7 @@ linux_sparc_syscall32:
.globl linux_sparc_syscall
linux_sparc_syscall:
/* Direct access to user regs, much faster. */
- cmp %g1, NR_SYSCALLS ! IEU1 Group
+ cmp %g1, NR_syscalls ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
mov %i0, %o0 ! IEU0
sll %g1, 2, %l4 ! IEU0 Group
@@ -221,7 +221,7 @@ linux_sparc_syscall:
mov %i3, %o3 ! IEU1
mov %i4, %o4 ! IEU0 Group
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
bne,pn %icc, linux_syscall_trace ! CTI Group
mov %i0, %l5 ! IEU0
2: call %l7 ! CTI Group brk forced
@@ -245,7 +245,7 @@ ret_sys_call:
cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
80:
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
@@ -260,7 +260,7 @@ ret_sys_call:
/* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process.
*/
- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
sub %g0, %o0, %o0
or %g3, %g2, %g3
stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index a63c5d2d984..d2f999ae2b8 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -9,7 +9,6 @@
struct new_utsname;
extern asmlinkage unsigned long sys_getpagesize(void);
-extern asmlinkage unsigned long sparc_brk(unsigned long brk);
extern asmlinkage long sparc_pipe(struct pt_regs *regs);
extern asmlinkage long sys_ipc(unsigned int call, int first,
unsigned long second,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index ceb1530f8aa..801fc8e5a0e 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -19,7 +19,7 @@ sys_call_table:
/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek
+/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
@@ -67,7 +67,7 @@ sys_call_table:
/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
-/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index cc8e7862e95..e575b46bd7a 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -21,7 +21,7 @@ sys_call_table32:
/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod
-/*15*/ .word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek
+/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
@@ -68,7 +68,7 @@ sys_call_table32:
.word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
.word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
-/*250*/ .word sys32_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
+/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
@@ -96,7 +96,7 @@ sys_call_table:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek
+/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 63f73ae8a89..67e16510288 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -774,26 +774,9 @@ void __devinit setup_sparc64_timer(void)
static struct clocksource clocksource_tick = {
.rating = 100,
.mask = CLOCKSOURCE_MASK(64),
- .shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init setup_clockevent_multiplier(unsigned long hz)
-{
- unsigned long mult, shift = 32;
-
- while (1) {
- mult = div_sc(hz, NSEC_PER_SEC, shift);
- if (mult && (mult >> 32UL) == 0UL)
- break;
-
- shift--;
- }
-
- sparc64_clockevent.shift = shift;
- sparc64_clockevent.mult = mult;
-}
-
static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
@@ -828,9 +811,7 @@ void __init time_init(void)
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
clocksource_tick.name = tick_ops->name;
- clocksource_tick.mult =
- clocksource_hz2mult(freq,
- clocksource_tick.shift);
+ clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
clocksource_tick.read = clocksource_tick_read;
printk("clocksource: mult[%x] shift[%d]\n",
@@ -839,15 +820,14 @@ void __init time_init(void)
clocksource_register(&clocksource_tick);
sparc64_clockevent.name = tick_ops->name;
-
- setup_clockevent_multiplier(freq);
+ clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
sparc64_clockevent.max_delta_ns =
clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent);
sparc64_clockevent.min_delta_ns =
clockevent_delta2ns(0xF, &sparc64_clockevent);
- printk("clockevent: mult[%ux] shift[%d]\n",
+ printk("clockevent: mult[%x] shift[%d]\n",
sparc64_clockevent.mult, sparc64_clockevent.shift);
setup_sparc64_timer();
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 6b1e6cde6ff..f8514e291e1 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -17,8 +17,7 @@
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
-
-/* #define DEBUG_MNA */
+#include <linux/perf_event.h>
enum direction {
load, /* ld, ldd, ldh, ldsh */
@@ -29,12 +28,6 @@ enum direction {
invalid,
};
-#ifdef DEBUG_MNA
-static char *dirstrings[] = {
- "load", "store", "both", "fpload", "fpstore", "invalid"
-};
-#endif
-
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
@@ -255,10 +248,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
unsigned long addr = compute_effective_address(regs, insn);
int err;
-#ifdef DEBUG_MNA
- printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
- regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
-#endif
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@@ -350,6 +340,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
}
addr = compute_effective_address(regs, insn);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch(dir) {
case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 379209982a0..378ca82b9cc 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -20,10 +20,9 @@
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/bitops.h>
+#include <linux/perf_event.h>
#include <asm/fpumacro.h>
-/* #define DEBUG_MNA */
-
enum direction {
load, /* ld, ldd, ldh, ldsh */
store, /* st, std, sth, stsh */
@@ -33,12 +32,6 @@ enum direction {
invalid,
};
-#ifdef DEBUG_MNA
-static char *dirstrings[] = {
- "load", "store", "both", "fpload", "fpstore", "invalid"
-};
-#endif
-
static inline enum direction decode_direction(unsigned int insn)
{
unsigned long tmp = (insn >> 21) & 1;
@@ -327,12 +320,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
-#ifdef DEBUG_MNA
- printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
- "retpc[%016lx]\n",
- regs->tpc, dirstrings[dir], addr, size,
- regs->u_regs[UREG_RETPC]);
-#endif
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
@@ -399,6 +387,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn);
@@ -445,6 +434,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000;
if (freg & 3) {
@@ -566,6 +557,8 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs);
if (from_kernel || rd < 16) {
@@ -596,6 +589,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -657,6 +651,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index d231cbd5c52..9dfd2ebcb15 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
+#include <linux/perf_event.h>
#include <asm/ptrace.h>
#include <asm/pstate.h>
@@ -801,6 +802,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
BUG_ON(regs->tstate & TSTATE_PRIV);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index e75faf0e59a..c4b5e03af11 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -44,3 +44,4 @@ obj-y += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o
obj-y += ksyms.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
+obj-y += usercopy.o
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index b6557297440..615f401edf6 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -6,10 +6,6 @@
.text
- .globl __memset
- .type __memset, #function
-__memset: /* %o0=buf, %o1=pat, %o2=len */
-
.globl memset
.type memset, #function
memset: /* %o0=buf, %o1=pat, %o2=len */
@@ -83,7 +79,6 @@ __bzero_done:
retl
mov %o3, %o0
.size __bzero, .-__bzero
- .size __memset, .-__memset
.size memset, .-memset
#define EX_ST(x,y) \
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 77f228533d4..3632cb34e91 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -560,7 +560,7 @@ __csum_partial_copy_end:
mov %i0, %o1
mov %i1, %o0
5:
- call __memcpy
+ call memcpy
mov %i2, %o2
tst %o0
bne,a 2f
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 704b1266838..1b30bb3bfdb 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -30,7 +30,6 @@ EXPORT_SYMBOL(__memscan_generic);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__bzero);
@@ -81,7 +80,6 @@ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
/* Special internal versions of library functions. */
EXPORT_SYMBOL(__copy_1page);
-EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(bzero_1page);
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 7ce9c65f359..24b8b12deed 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -64,8 +64,9 @@ mcount:
2: sethi %hi(softirq_stack), %g3
or %g3, %lo(softirq_stack), %g3
ldx [%g3 + %g1], %g7
+ sub %g7, STACK_BIAS, %g7
cmp %sp, %g7
- bleu,pt %xcc, 2f
+ bleu,pt %xcc, 3f
sethi %hi(THREAD_SIZE), %g3
add %g7, %g3, %g7
cmp %sp, %g7
@@ -75,7 +76,7 @@ mcount:
* again, we are already trying to output the stack overflow
* message.
*/
- sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
+3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough
or %g7, %lo(ovstack), %g7
add %g7, OVSTACKSIZE, %g3
sub %g3, STACK_BIAS + 192, %g3
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index ce10bc869af..34fe6575173 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -543,9 +543,6 @@ FUNC(memmove)
b 3f
add %o0, 2, %o0
-#ifdef __KERNEL__
-FUNC(__memcpy)
-#endif
FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
sub %o0, %o1, %o4
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index 1c37ea892de..99c017be871 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -60,11 +60,10 @@
.globl __bzero_begin
__bzero_begin:
- .globl __bzero, __memset,
+ .globl __bzero
.globl memset
.globl __memset_start, __memset_end
__memset_start:
-__memset:
memset:
and %o1, 0xff, %g3
sll %g3, 8, %g2
diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c
new file mode 100644
index 00000000000..14b363fec8a
--- /dev/null
+++ b/arch/sparc/lib/usercopy.c
@@ -0,0 +1,8 @@
+#include <linux/module.h>
+#include <linux/bug.h>
+
+void copy_from_user_overflow(void)
+{
+ WARN(1, "Buffer overflow detected!\n");
+}
+EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index e13f65da17d..a3fccde894e 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -67,6 +67,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include "sfp-util_32.h"
@@ -163,6 +164,8 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
int retcode = 0; /* assume all succeed */
unsigned long insn;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+
#ifdef DEBUG_MATHEMU
printk("In do_mathemu()... pc is %08lx\n", regs->pc);
printk("fpqdepth is %ld\n", fpt->thread.fpqdepth);
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 6863c9bde25..56d2c44747b 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
+#include <linux/perf_event.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
@@ -183,6 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if (tstate & TSTATE_PRIV)
die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 43b0da96a4f..6081936bf03 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -31,13 +31,12 @@
#include <asm/sections.h>
#include <asm/mmu_context.h>
-#ifdef CONFIG_KPROBES
-static inline int notify_page_fault(struct pt_regs *regs)
+static inline __kprobes int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
- if (!user_mode(regs)) {
+ if (kprobes_built_in() && !user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 0))
ret = 1;
@@ -45,12 +44,6 @@ static inline int notify_page_fault(struct pt_regs *regs)
}
return ret;
}
-#else
-static inline int notify_page_fault(struct pt_regs *regs)
-{
- return 0;
-}
-#endif
static void __kprobes unhandled_fault(unsigned long address,
struct task_struct *tsk,
@@ -73,7 +66,7 @@ static void __kprobes unhandled_fault(unsigned long address,
die_if_kernel("Oops", regs);
}
-static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
{
printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
regs->tpc);
@@ -170,8 +163,9 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
return insn;
}
-static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
- unsigned int insn, unsigned long address)
+static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
+ int fault_code, unsigned int insn,
+ unsigned long address)
{
unsigned char asi = ASI_P;
@@ -225,7 +219,7 @@ cannot_handle:
unhandled_fault (address, current, regs);
}
-static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
+static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
{
static int times;
@@ -237,8 +231,8 @@ static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs)
show_regs(regs);
}
-static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
- unsigned long addr)
+static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
+ unsigned long addr)
{
static int times;
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index 2ffacd67c42..a89baf0d875 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
#include <asm/sections.h>
#include <asm/page.h>
@@ -1021,20 +1022,12 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size)
npages = (((unsigned long)vaddr & ~PAGE_MASK) +
size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
- scan = 0;
local_irq_save(flags);
- for (;;) {
- scan = find_next_zero_bit(sun4c_iobuffer_map,
- iobuffer_map_size, scan);
- if ((base = scan) + npages > iobuffer_map_size) goto abend;
- for (;;) {
- if (scan >= base + npages) goto found;
- if (test_bit(scan, sun4c_iobuffer_map)) break;
- scan++;
- }
- }
+ base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size,
+ 0, npages, 0);
+ if (base >= iobuffer_map_size)
+ goto abend;
-found:
high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
high = SUN4C_REAL_PGDIR_ALIGN(high);
while (high > sun4c_iobuffer_high) {
diff --git a/arch/um/Makefile b/arch/um/Makefile
index fc633dbacf8..fab8121d2b3 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -149,6 +149,6 @@ $(SHARED_HEADERS)/user_constants.h: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.s
$(SHARED_HEADERS)/kern_constants.h:
$(Q)mkdir -p $(dir $@)
- $(Q)echo '#include "../../../../include/asm/asm-offsets.h"' >$@
+ $(Q)echo '#include "../../../../include/generated/asm-offsets.h"' >$@
export SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS HEADER_ARCH DEV_NULL_PATH
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index e14629c87de..51069245b79 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -6,6 +6,7 @@
#include <linux/console.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mm.h>
@@ -131,7 +132,7 @@ void mconsole_proc(struct mc_request *req)
char *ptr = req->request.data, *buf;
ptr += strlen("proc");
- while (isspace(*ptr)) ptr++;
+ ptr = skip_spaces(ptr);
proc = get_fs_type("proc");
if (proc == NULL) {
@@ -212,8 +213,7 @@ void mconsole_proc(struct mc_request *req)
char *ptr = req->request.data;
ptr += strlen("proc");
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
snprintf(path, sizeof(path), "/proc/%s", ptr);
fd = sys_open(path, 0, 0);
@@ -560,8 +560,7 @@ void mconsole_config(struct mc_request *req)
int err;
ptr += strlen("config");
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
dev = mconsole_find_dev(ptr);
if (dev == NULL) {
mconsole_reply(req, "Bad configuration option", 1, 0);
@@ -588,7 +587,7 @@ void mconsole_remove(struct mc_request *req)
int err, start, end, n;
ptr += strlen("remove");
- while (isspace(*ptr)) ptr++;
+ ptr = skip_spaces(ptr);
dev = mconsole_find_dev(ptr);
if (dev == NULL) {
mconsole_reply(req, "Bad remove option", 1, 0);
@@ -712,7 +711,7 @@ void mconsole_sysrq(struct mc_request *req)
char *ptr = req->request.data;
ptr += strlen("sysrq");
- while (isspace(*ptr)) ptr++;
+ ptr = skip_spaces(ptr);
/*
* With 'b', the system will shut down without a chance to reply,
@@ -757,8 +756,7 @@ void mconsole_stack(struct mc_request *req)
*/
ptr += strlen("stack");
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
/*
* Should really check for multiple pids or reject bad args here
@@ -833,8 +831,8 @@ static int __init mconsole_init(void)
__initcall(mconsole_init);
-static int write_proc_mconsole(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t mconsole_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char *buf;
@@ -855,6 +853,11 @@ static int write_proc_mconsole(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations mconsole_proc_fops = {
+ .owner = THIS_MODULE,
+ .write = mconsole_proc_write,
+};
+
static int create_proc_mconsole(void)
{
struct proc_dir_entry *ent;
@@ -862,15 +865,12 @@ static int create_proc_mconsole(void)
if (notify_socket == NULL)
return 0;
- ent = create_proc_entry("mconsole", S_IFREG | 0200, NULL);
+ ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_fops);
if (ent == NULL) {
printk(KERN_INFO "create_proc_mconsole : create_proc_entry "
"failed\n");
return 0;
}
-
- ent->read_proc = NULL;
- ent->write_proc = write_proc_mconsole;
return 0;
}
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 635d16d90a8..5ff554677f4 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -27,6 +27,7 @@
#include "linux/init.h"
#include "linux/cdrom.h"
#include "linux/proc_fs.h"
+#include "linux/seq_file.h"
#include "linux/ctype.h"
#include "linux/capability.h"
#include "linux/mm.h"
@@ -200,23 +201,25 @@ static void make_proc_ide(void)
proc_ide = proc_mkdir("ide0", proc_ide_root);
}
-static int proc_ide_read_media(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int fake_ide_media_proc_show(struct seq_file *m, void *v)
{
- int len;
-
- strcpy(page, "disk\n");
- len = strlen("disk\n");
- len -= off;
- if (len < count){
- *eof = 1;
- if (len <= 0) return 0;
- }
- else len = count;
- *start = page + off;
- return len;
+ seq_puts(m, "disk\n");
+ return 0;
+}
+
+static int fake_ide_media_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fake_ide_media_proc_show, NULL);
}
+static const struct file_operations fake_ide_media_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = fake_ide_media_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void make_ide_entries(const char *dev_name)
{
struct proc_dir_entry *dir, *ent;
@@ -227,11 +230,8 @@ static void make_ide_entries(const char *dev_name)
dir = proc_mkdir(dev_name, proc_ide);
if(!dir) return;
- ent = create_proc_entry("media", S_IFREG|S_IRUGO, dir);
+ ent = proc_create("media", S_IRUGO, dir, &fake_ide_media_proc_fops);
if(!ent) return;
- ent->data = NULL;
- ent->read_proc = proc_ide_read_media;
- ent->write_proc = NULL;
snprintf(name, sizeof(name), "ide0/%s", dev_name);
proc_symlink(dev_name, proc_ide_root, name);
}
diff --git a/arch/um/include/asm/asm-offsets.h b/arch/um/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/um/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index 6540d2c9fbb..829df49dee9 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -6,7 +6,9 @@
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/types.h>
#include <asm/uaccess.h>
@@ -16,30 +18,26 @@
*/
int uml_exitcode = 0;
-static int read_proc_exitcode(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int exitcode_proc_show(struct seq_file *m, void *v)
{
- int len, val;
+ int val;
/*
* Save uml_exitcode in a local so that we don't need to guarantee
* that sprintf accesses it atomically.
*/
val = uml_exitcode;
- len = sprintf(page, "%d\n", val);
- len -= off;
- if (len <= off+count)
- *eof = 1;
- *start = page + off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
- return len;
+ seq_printf(m, "%d\n", val);
+ return 0;
+}
+
+static int exitcode_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, exitcode_proc_show, NULL);
}
-static int write_proc_exitcode(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t exitcode_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
int tmp;
@@ -55,20 +53,25 @@ static int write_proc_exitcode(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations exitcode_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = exitcode_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = exitcode_proc_write,
+};
+
static int make_proc_exitcode(void)
{
struct proc_dir_entry *ent;
- ent = create_proc_entry("exitcode", 0600, NULL);
+ ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops);
if (ent == NULL) {
printk(KERN_WARNING "make_proc_exitcode : Failed to register "
"/proc/exitcode\n");
return 0;
}
-
- ent->read_proc = read_proc_exitcode;
- ent->write_proc = write_proc_exitcode;
-
return 0;
}
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 039270b9b73..89474ba0741 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_putc(p, '\n');
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 4a28a1568d8..2f910a1b745 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -9,11 +9,13 @@
#include <linux/hardirq.h>
#include <linux/gfp.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/personality.h>
#include <linux/proc_fs.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/tick.h>
#include <linux/threads.h>
#include <asm/current.h>
@@ -336,16 +338,19 @@ int get_using_sysemu(void)
return atomic_read(&using_sysemu);
}
-static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
+static int sysemu_proc_show(struct seq_file *m, void *v)
{
- if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size)
- /* No overflow */
- *eof = 1;
+ seq_printf(m, "%d\n", get_using_sysemu());
+ return 0;
+}
- return strlen(buf);
+static int sysemu_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sysemu_proc_show, NULL);
}
-static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned long count,void *data)
+static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
char tmp[2];
@@ -358,13 +363,22 @@ static int proc_write_sysemu(struct file *file,const char __user *buf, unsigned
return count;
}
+static const struct file_operations sysemu_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = sysemu_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = sysemu_proc_write,
+};
+
int __init make_proc_sysemu(void)
{
struct proc_dir_entry *ent;
if (!sysemu_supported)
return 0;
- ent = create_proc_entry("sysemu", 0600, NULL);
+ ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
if (ent == NULL)
{
@@ -372,9 +386,6 @@ int __init make_proc_sysemu(void)
return 0;
}
- ent->read_proc = proc_read_sysemu;
- ent->write_proc = proc_write_sysemu;
-
return 0;
}
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index a4625c7b2bf..cccab850c27 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -8,6 +8,7 @@
#include "linux/mm.h"
#include "linux/sched.h"
#include "linux/utsname.h"
+#include "linux/syscalls.h"
#include "asm/current.h"
#include "asm/mman.h"
#include "asm/uaccess.h"
@@ -37,31 +38,6 @@ long sys_vfork(void)
return ret;
}
-/* common code for old and new mmaps */
-long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- long error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
- out:
- return error;
-}
-
long old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long offset)
@@ -70,7 +46,7 @@ long old_mmap(unsigned long addr, unsigned long len,
if (offset & ~PAGE_MASK)
goto out;
- err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
out:
return err;
}
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h
index d0da9d7c537..770885472ed 100644
--- a/arch/um/sys-i386/asm/elf.h
+++ b/arch/um/sys-i386/asm/elf.h
@@ -48,7 +48,6 @@ typedef struct user_i387_struct elf_fpregset_t;
PT_REGS_EAX(regs) = 0; \
} while (0)
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h
index 905698197e3..e7787679e31 100644
--- a/arch/um/sys-i386/shared/sysdep/syscalls.h
+++ b/arch/um/sys-i386/shared/sysdep/syscalls.h
@@ -20,7 +20,3 @@ extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
((long (*)(struct syscall_args)) \
(*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
-
-extern long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
diff --git a/arch/um/sys-ppc/asm/elf.h b/arch/um/sys-ppc/asm/elf.h
index af9463cd8ce..8aacaf56508 100644
--- a/arch/um/sys-ppc/asm/elf.h
+++ b/arch/um/sys-ppc/asm/elf.h
@@ -17,8 +17,6 @@ extern long elf_aux_hwcap;
#define ELF_CLASS ELFCLASS32
#endif
-#define USE_ELF_CORE_DUMP
-
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
index 04b9e87c8da..49655c83efd 100644
--- a/arch/um/sys-x86_64/asm/elf.h
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -104,7 +104,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
clear_thread_flag(TIF_IA32);
#endif
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 32a1918e1b8..55298e89157 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -50,6 +50,8 @@ config X86
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
select HAVE_HW_BREAKPOINT
+ select PERF_EVENTS
+ select ANON_INODES
select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER
@@ -2012,18 +2014,9 @@ config SCx200HR_TIMER
processor goes idle (as is done by the scheduler). The
other workaround is idle=poll boot option.
-config GEODE_MFGPT_TIMER
- def_bool y
- prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
- depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
- ---help---
- This driver provides a clock event source based on the MFGPT
- timer(s) in the CS5535 and CS5536 companion chip for the geode.
- MFGPTs have a better resolution and max interval than the
- generic PIT, and are suitable for use as high-res timers.
-
config OLPC
bool "One Laptop Per Child support"
+ select GPIOLIB
default n
---help---
Add support for detecting the unique features of the OLPC
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 731318e5ac1..bc01e3ebfeb 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -187,8 +187,8 @@ config HAVE_MMIOTRACE_SUPPORT
def_bool y
config X86_DECODER_SELFTEST
- bool "x86 instruction decoder selftest"
- depends on DEBUG_KERNEL
+ bool "x86 instruction decoder selftest"
+ depends on DEBUG_KERNEL && KPROBES
---help---
Perform x86 instruction decoder selftests at build time.
This option is useful for checking the sanity of x86 instruction
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index bbeb0c3fbd9..89bbf4e4d05 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -9,6 +9,9 @@
#include <byteswap.h>
#define USE_BSD
#include <endian.h>
+#include <regex.h>
+
+static void die(char *fmt, ...);
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
static Elf32_Ehdr ehdr;
@@ -30,25 +33,47 @@ static struct section *secs;
* the address for which it has been compiled. Don't warn user about
* absolute relocations present w.r.t these symbols.
*/
-static const char* safe_abs_relocs[] = {
- "xen_irq_disable_direct_reloc",
- "xen_save_fl_direct_reloc",
-};
+static const char abs_sym_regex[] =
+ "^(xen_irq_disable_direct_reloc$|"
+ "xen_save_fl_direct_reloc$|"
+ "VDSO|"
+ "__crc_)";
+static regex_t abs_sym_regex_c;
+static int is_abs_reloc(const char *sym_name)
+{
+ return !regexec(&abs_sym_regex_c, sym_name, 0, NULL, 0);
+}
-static int is_safe_abs_reloc(const char* sym_name)
+/*
+ * These symbols are known to be relative, even if the linker marks them
+ * as absolute (typically defined outside any section in the linker script.)
+ */
+static const char rel_sym_regex[] =
+ "^_end$";
+static regex_t rel_sym_regex_c;
+static int is_rel_reloc(const char *sym_name)
{
- int i;
+ return !regexec(&rel_sym_regex_c, sym_name, 0, NULL, 0);
+}
- for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
- if (!strcmp(sym_name, safe_abs_relocs[i]))
- /* Match found */
- return 1;
- }
- if (strncmp(sym_name, "VDSO", 4) == 0)
- return 1;
- if (strncmp(sym_name, "__crc_", 6) == 0)
- return 1;
- return 0;
+static void regex_init(void)
+{
+ char errbuf[128];
+ int err;
+
+ err = regcomp(&abs_sym_regex_c, abs_sym_regex,
+ REG_EXTENDED|REG_NOSUB);
+ if (err) {
+ regerror(err, &abs_sym_regex_c, errbuf, sizeof errbuf);
+ die("%s", errbuf);
+ }
+
+ err = regcomp(&rel_sym_regex_c, rel_sym_regex,
+ REG_EXTENDED|REG_NOSUB);
+ if (err) {
+ regerror(err, &rel_sym_regex_c, errbuf, sizeof errbuf);
+ die("%s", errbuf);
+ }
}
static void die(char *fmt, ...)
@@ -131,7 +156,7 @@ static const char *rel_type(unsigned type)
#undef REL_TYPE
};
const char *name = "unknown type rel type name";
- if (type < ARRAY_SIZE(type_name)) {
+ if (type < ARRAY_SIZE(type_name) && type_name[type]) {
name = type_name[type];
}
return name;
@@ -448,7 +473,7 @@ static void print_absolute_relocs(void)
* Before warning check if this absolute symbol
* relocation is harmless.
*/
- if (is_safe_abs_reloc(name))
+ if (is_abs_reloc(name) || is_rel_reloc(name))
continue;
if (!printed) {
@@ -501,21 +526,26 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
r_type = ELF32_R_TYPE(rel->r_info);
/* Don't visit relocations to absolute symbols */
- if (sym->st_shndx == SHN_ABS) {
+ if (sym->st_shndx == SHN_ABS &&
+ !is_rel_reloc(sym_name(sym_strtab, sym))) {
continue;
}
- if (r_type == R_386_NONE || r_type == R_386_PC32) {
+ switch (r_type) {
+ case R_386_NONE:
+ case R_386_PC32:
/*
* NONE can be ignored and and PC relative
* relocations don't need to be adjusted.
*/
- }
- else if (r_type == R_386_32) {
+ break;
+ case R_386_32:
/* Visit relocations that need to be adjusted */
visit(rel, sym);
- }
- else {
- die("Unsupported relocation type: %d\n", r_type);
+ break;
+ default:
+ die("Unsupported relocation type: %s (%d)\n",
+ rel_type(r_type), r_type);
+ break;
}
}
}
@@ -571,16 +601,15 @@ static void emit_relocs(int as_text)
}
else {
unsigned char buf[4];
- buf[0] = buf[1] = buf[2] = buf[3] = 0;
/* Print a stop */
- printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
+ fwrite("\0\0\0\0", 4, 1, stdout);
/* Now print each relocation */
for (i = 0; i < reloc_count; i++) {
buf[0] = (relocs[i] >> 0) & 0xff;
buf[1] = (relocs[i] >> 8) & 0xff;
buf[2] = (relocs[i] >> 16) & 0xff;
buf[3] = (relocs[i] >> 24) & 0xff;
- printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
+ fwrite(buf, 4, 1, stdout);
}
}
}
@@ -598,6 +627,8 @@ int main(int argc, char **argv)
FILE *fp;
int i;
+ regex_init();
+
show_absolute_syms = 0;
show_absolute_relocs = 0;
as_text = 0;
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index b31cc54b464..93e689f4bd8 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -16,7 +16,7 @@
*/
#include <asm/segment.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <asm/boot.h>
#include <asm/e820.h>
#include <asm/page_types.h>
diff --git a/arch/x86/boot/version.c b/arch/x86/boot/version.c
index 2723d9b5ce4..2b15aa488ff 100644
--- a/arch/x86/boot/version.c
+++ b/arch/x86/boot/version.c
@@ -13,8 +13,8 @@
*/
#include "boot.h"
-#include <linux/utsrelease.h>
-#include <linux/compile.h>
+#include <generated/utsrelease.h>
+#include <generated/compile.h>
const char kernel_version[] =
UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") "
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 4eefdca9832..53147ad85b9 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -696,7 +696,7 @@ ia32_sys_call_table:
.quad quiet_ni_syscall /* streams2 */
.quad stub32_vfork /* 190 */
.quad compat_sys_getrlimit
- .quad sys32_mmap2
+ .quad sys_mmap_pgoff
.quad sys32_truncate64
.quad sys32_ftruncate64
.quad sys32_stat64 /* 195 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index df82c0e48de..422572c7792 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -155,9 +155,6 @@ struct mmap_arg_struct {
asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
{
struct mmap_arg_struct a;
- struct file *file = NULL;
- unsigned long retval;
- struct mm_struct *mm ;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
@@ -165,22 +162,8 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
if (a.offset & ~PAGE_MASK)
return -EINVAL;
- if (!(a.flags & MAP_ANONYMOUS)) {
- file = fget(a.fd);
- if (!file)
- return -EBADF;
- }
-
- mm = current->mm;
- down_write(&mm->mmap_sem);
- retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset>>PAGE_SHIFT);
- if (file)
- fput(file);
-
- up_write(&mm->mmap_sem);
-
- return retval;
}
asmlinkage long sys32_mprotect(unsigned long start, size_t len,
@@ -483,30 +466,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
return ret;
}
-asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- struct mm_struct *mm = current->mm;
- unsigned long error;
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- return -EBADF;
- }
-
- down_write(&mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&mm->mmap_sem);
-
- if (file)
- fput(file);
- return error;
-}
-
asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
{
char *arch = "x86_64";
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index 84786fb9a23..4d817f9e6e7 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -28,7 +28,9 @@ extern void amd_iommu_flush_all_domains(void);
extern void amd_iommu_flush_all_devices(void);
extern void amd_iommu_apply_erratum_63(u16 devid);
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
-
+extern int amd_iommu_init_devices(void);
+extern void amd_iommu_uninit_devices(void);
+extern void amd_iommu_init_notifier(void);
#ifndef CONFIG_AMD_IOMMU_STATS
static inline void amd_iommu_stats_init(void) { }
diff --git a/arch/x86/include/asm/asm-offsets.h b/arch/x86/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/x86/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 613700f27a4..637e1ec963c 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -153,6 +153,7 @@
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
+#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
/*
* Auxiliary flags: Linux defined - For features scattered in various
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 0f6c02f3b7d..ac91eed2106 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 8a024babe5e..b4501ee223a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -239,7 +239,6 @@ extern int force_personality32;
#endif /* !CONFIG_X86_32 */
#define CORE_DUMP_USE_REGSET
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/x86/include/asm/geode.h b/arch/x86/include/asm/geode.h
index ad3c2ed7548..7cd73552a4e 100644
--- a/arch/x86/include/asm/geode.h
+++ b/arch/x86/include/asm/geode.h
@@ -12,160 +12,7 @@
#include <asm/processor.h>
#include <linux/io.h>
-
-/* Generic southbridge functions */
-
-#define GEODE_DEV_PMS 0
-#define GEODE_DEV_ACPI 1
-#define GEODE_DEV_GPIO 2
-#define GEODE_DEV_MFGPT 3
-
-extern int geode_get_dev_base(unsigned int dev);
-
-/* Useful macros */
-#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
-#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
-#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
-#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
-
-/* MSRS */
-
-#define MSR_GLIU_P2D_RO0 0x10000029
-
-#define MSR_LX_GLD_MSR_CONFIG 0x48002001
-#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
- * sheet has the wrong value */
-#define MSR_GLCP_SYS_RSTPLL 0x4C000014
-#define MSR_GLCP_DOTPLL 0x4C000015
-
-#define MSR_LBAR_SMB 0x5140000B
-#define MSR_LBAR_GPIO 0x5140000C
-#define MSR_LBAR_MFGPT 0x5140000D
-#define MSR_LBAR_ACPI 0x5140000E
-#define MSR_LBAR_PMS 0x5140000F
-
-#define MSR_DIVIL_SOFT_RESET 0x51400017
-
-#define MSR_PIC_YSEL_LOW 0x51400020
-#define MSR_PIC_YSEL_HIGH 0x51400021
-#define MSR_PIC_ZSEL_LOW 0x51400022
-#define MSR_PIC_ZSEL_HIGH 0x51400023
-#define MSR_PIC_IRQM_LPC 0x51400025
-
-#define MSR_MFGPT_IRQ 0x51400028
-#define MSR_MFGPT_NR 0x51400029
-#define MSR_MFGPT_SETUP 0x5140002B
-
-#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
-
-#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
-#define MSR_GX_MSR_PADSEL 0xC0002011
-
-/* Resource Sizes */
-
-#define LBAR_GPIO_SIZE 0xFF
-#define LBAR_MFGPT_SIZE 0x40
-#define LBAR_ACPI_SIZE 0x40
-#define LBAR_PMS_SIZE 0x80
-
-/* ACPI registers (PMS block) */
-
-/*
- * PM1_EN is only valid when VSA is enabled for 16 bit reads.
- * When VSA is not enabled, *always* read both PM1_STS and PM1_EN
- * with a 32 bit read at offset 0x0
- */
-
-#define PM1_STS 0x00
-#define PM1_EN 0x02
-#define PM1_CNT 0x08
-#define PM2_CNT 0x0C
-#define PM_TMR 0x10
-#define PM_GPE0_STS 0x18
-#define PM_GPE0_EN 0x1C
-
-/* PMC registers (PMS block) */
-
-#define PM_SSD 0x00
-#define PM_SCXA 0x04
-#define PM_SCYA 0x08
-#define PM_OUT_SLPCTL 0x0C
-#define PM_SCLK 0x10
-#define PM_SED 0x1
-#define PM_SCXD 0x18
-#define PM_SCYD 0x1C
-#define PM_IN_SLPCTL 0x20
-#define PM_WKD 0x30
-#define PM_WKXD 0x34
-#define PM_RD 0x38
-#define PM_WKXA 0x3C
-#define PM_FSD 0x40
-#define PM_TSD 0x44
-#define PM_PSD 0x48
-#define PM_NWKD 0x4C
-#define PM_AWKD 0x50
-#define PM_SSC 0x54
-
-/* VSA2 magic values */
-
-#define VSA_VRC_INDEX 0xAC1C
-#define VSA_VRC_DATA 0xAC1E
-#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
-#define VSA_VR_SIGNATURE 0x0003
-#define VSA_VR_MEM_SIZE 0x0200
-#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
-#define GSW_VSA_SIG 0x534d /* General Software signature */
-/* GPIO */
-
-#define GPIO_OUTPUT_VAL 0x00
-#define GPIO_OUTPUT_ENABLE 0x04
-#define GPIO_OUTPUT_OPEN_DRAIN 0x08
-#define GPIO_OUTPUT_INVERT 0x0C
-#define GPIO_OUTPUT_AUX1 0x10
-#define GPIO_OUTPUT_AUX2 0x14
-#define GPIO_PULL_UP 0x18
-#define GPIO_PULL_DOWN 0x1C
-#define GPIO_INPUT_ENABLE 0x20
-#define GPIO_INPUT_INVERT 0x24
-#define GPIO_INPUT_FILTER 0x28
-#define GPIO_INPUT_EVENT_COUNT 0x2C
-#define GPIO_READ_BACK 0x30
-#define GPIO_INPUT_AUX1 0x34
-#define GPIO_EVENTS_ENABLE 0x38
-#define GPIO_LOCK_ENABLE 0x3C
-#define GPIO_POSITIVE_EDGE_EN 0x40
-#define GPIO_NEGATIVE_EDGE_EN 0x44
-#define GPIO_POSITIVE_EDGE_STS 0x48
-#define GPIO_NEGATIVE_EDGE_STS 0x4C
-
-#define GPIO_MAP_X 0xE0
-#define GPIO_MAP_Y 0xE4
-#define GPIO_MAP_Z 0xE8
-#define GPIO_MAP_W 0xEC
-
-static inline u32 geode_gpio(unsigned int nr)
-{
- BUG_ON(nr > 28);
- return 1 << nr;
-}
-
-extern void geode_gpio_set(u32, unsigned int);
-extern void geode_gpio_clear(u32, unsigned int);
-extern int geode_gpio_isset(u32, unsigned int);
-extern void geode_gpio_setup_event(unsigned int, int, int);
-extern void geode_gpio_set_irq(unsigned int, unsigned int);
-
-static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
-{
- geode_gpio_setup_event(gpio, pair, 0);
-}
-
-static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
-{
- geode_gpio_setup_event(gpio, pair, 1);
-}
-
-/* Specific geode tests */
+#include <linux/cs5535.h>
static inline int is_geode_gx(void)
{
@@ -186,68 +33,4 @@ static inline int is_geode(void)
return (is_geode_gx() || is_geode_lx());
}
-#ifdef CONFIG_MGEODE_LX
-extern int geode_has_vsa2(void);
-#else
-static inline int geode_has_vsa2(void)
-{
- return 0;
-}
-#endif
-
-/* MFGPTs */
-
-#define MFGPT_MAX_TIMERS 8
-#define MFGPT_TIMER_ANY (-1)
-
-#define MFGPT_DOMAIN_WORKING 1
-#define MFGPT_DOMAIN_STANDBY 2
-#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
-
-#define MFGPT_CMP1 0
-#define MFGPT_CMP2 1
-
-#define MFGPT_EVENT_IRQ 0
-#define MFGPT_EVENT_NMI 1
-#define MFGPT_EVENT_RESET 3
-
-#define MFGPT_REG_CMP1 0
-#define MFGPT_REG_CMP2 2
-#define MFGPT_REG_COUNTER 4
-#define MFGPT_REG_SETUP 6
-
-#define MFGPT_SETUP_CNTEN (1 << 15)
-#define MFGPT_SETUP_CMP2 (1 << 14)
-#define MFGPT_SETUP_CMP1 (1 << 13)
-#define MFGPT_SETUP_SETUP (1 << 12)
-#define MFGPT_SETUP_STOPEN (1 << 11)
-#define MFGPT_SETUP_EXTEN (1 << 10)
-#define MFGPT_SETUP_REVEN (1 << 5)
-#define MFGPT_SETUP_CLKSEL (1 << 4)
-
-static inline void geode_mfgpt_write(int timer, u16 reg, u16 value)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
- outw(value, base + reg + (timer * 8));
-}
-
-static inline u16 geode_mfgpt_read(int timer, u16 reg)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
- return inw(base + reg + (timer * 8));
-}
-
-extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
-extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
-extern int geode_mfgpt_alloc_timer(int timer, int domain);
-
-#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
-#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0)
-
-#ifdef CONFIG_GEODE_MFGPT_TIMER
-extern int __init mfgpt_timer_setup(void);
-#else
-static inline int mfgpt_timer_setup(void) { return 0; }
-#endif
-
#endif /* _ASM_X86_GEODE_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 08c48a81841..eeac829a0f4 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -103,7 +103,8 @@ extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
extern void send_cleanup_vector(struct irq_cfg *);
struct irq_desc;
-extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *);
+extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *,
+ unsigned int *dest_id);
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
extern void setup_ioapic_dest(void);
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6a635bd3986..4611f085cd4 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -113,7 +113,7 @@
*/
#define LOCAL_PENDING_VECTOR 0xec
-#define UV_BAU_MESSAGE 0xec
+#define UV_BAU_MESSAGE 0xea
/*
* Self IPI vector for machine checks
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4ffe09b2ad7..1cd58cdbc03 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -12,6 +12,7 @@
#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
+#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
/* EFER bits: */
#define _EFER_SCE 0 /* SYSCALL/SYSRET */
@@ -123,6 +124,7 @@
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
+#define MSR_FAM10H_NODE_ID 0xc001100c
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 5bef931f8b1..c5bc4c2d33f 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -27,6 +27,18 @@ struct msr {
};
};
+struct msr_info {
+ u32 msr_no;
+ struct msr reg;
+ struct msr *msrs;
+ int err;
+};
+
+struct msr_regs_info {
+ u32 *regs;
+ int err;
+};
+
static inline unsigned long long native_read_tscp(unsigned int *aux)
{
unsigned long low, high;
@@ -240,9 +252,12 @@ do { \
#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
(u32)((val) >> 32))
-#define write_tsc(val1, val2) wrmsr(0x10, (val1), (val2))
+#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
+
+#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
-#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
+struct msr *msrs_alloc(void);
+void msrs_free(struct msr *msrs);
#ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 834a30295fa..3a57385d9fa 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -120,7 +120,7 @@ extern int olpc_ec_mask_unset(uint8_t bits);
/* GPIO assignments */
-#define OLPC_GPIO_MIC_AC geode_gpio(1)
+#define OLPC_GPIO_MIC_AC 1
#define OLPC_GPIO_DCON_IRQ geode_gpio(7)
#define OLPC_GPIO_THRM_ALRM geode_gpio(10)
#define OLPC_GPIO_SMB_CLK geode_gpio(14)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index efb38994859..dd59a85a918 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
+static inline int arch_spin_is_locked(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
}
-static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
+static inline int arch_spin_is_contended(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
+static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
}
-static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
+static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
unsigned long flags)
{
PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
}
-static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
+static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
}
-static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
+static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
{
PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
}
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 9357473c8da..b1e70d51e40 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -318,14 +318,14 @@ struct pv_mmu_ops {
phys_addr_t phys, pgprot_t flags);
};
-struct raw_spinlock;
+struct arch_spinlock;
struct pv_lock_ops {
- int (*spin_is_locked)(struct raw_spinlock *lock);
- int (*spin_is_contended)(struct raw_spinlock *lock);
- void (*spin_lock)(struct raw_spinlock *lock);
- void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct raw_spinlock *lock);
- void (*spin_unlock)(struct raw_spinlock *lock);
+ int (*spin_is_locked)(struct arch_spinlock *lock);
+ int (*spin_is_contended)(struct arch_spinlock *lock);
+ void (*spin_lock)(struct arch_spinlock *lock);
+ void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct arch_spinlock *lock);
+ void (*spin_unlock)(struct arch_spinlock *lock);
};
/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b399988eee3..b4bf9a942ed 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -118,11 +118,27 @@ extern int __init pcibios_init(void);
/* pci-mmconfig.c */
+/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
+#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
+
+struct pci_mmcfg_region {
+ struct list_head list;
+ struct resource res;
+ u64 address;
+ char __iomem *virt;
+ u16 segment;
+ u8 start_bus;
+ u8 end_bus;
+ char name[PCI_MMCFG_RESOURCE_NAME_LEN];
+};
+
extern int __init pci_mmcfg_arch_init(void);
extern void __init pci_mmcfg_arch_free(void);
+extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
+
+extern struct list_head pci_mmcfg_list;
-extern struct acpi_mcfg_allocation *pci_mmcfg_config;
-extern int pci_mmcfg_config_num;
+#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20)
/*
* AMD Fam10h CPUs are buggy, and cannot access MMIO config space
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index b65a36defeb..0c44196b78a 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
#define percpu_to_op(op, var, val) \
do { \
- typedef typeof(var) T__; \
+ typedef typeof(var) pto_T__; \
if (0) { \
- T__ tmp__; \
- tmp__ = (val); \
+ pto_T__ pto_tmp__; \
+ pto_tmp__ = (val); \
} \
switch (sizeof(var)) { \
case 1: \
asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
- : "qi" ((T__)(val))); \
+ : "qi" ((pto_T__)(val))); \
break; \
case 2: \
asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)(val))); \
+ : "ri" ((pto_T__)(val))); \
break; \
case 4: \
asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)(val))); \
+ : "ri" ((pto_T__)(val))); \
break; \
case 8: \
asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
- : "re" ((T__)(val))); \
+ : "re" ((pto_T__)(val))); \
break; \
default: __bad_percpu_size(); \
} \
@@ -106,31 +106,31 @@ do { \
#define percpu_from_op(op, var, constraint) \
({ \
- typeof(var) ret__; \
+ typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
- : "=q" (ret__) \
+ : "=q" (pfo_ret__) \
: constraint); \
break; \
case 2: \
asm(op "w "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
case 4: \
asm(op "l "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
case 8: \
asm(op "q "__percpu_arg(1)",%0" \
- : "=r" (ret__) \
+ : "=r" (pfo_ret__) \
: constraint); \
break; \
default: __bad_percpu_size(); \
} \
- ret__; \
+ pfo_ret__; \
})
/*
@@ -153,6 +153,84 @@ do { \
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
+#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+
+#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
+
+/*
+ * Per cpu atomic 64 bit operations are only available under 64 bit.
+ * 32 bit must fall back to generic operations.
+ */
+#ifdef CONFIG_X86_64
+#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
+#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
+
+#endif
+
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6f8ec1c37e0..fc801bab1b3 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -181,7 +181,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
- asm("cpuid"
+ asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 3d11fd0f44c..9d369f68032 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -292,6 +292,8 @@ extern void user_enable_block_step(struct task_struct *);
#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
#endif
+#define ARCH_HAS_USER_SINGLE_STEP_INFO
+
struct user_desc;
extern int do_get_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info);
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 4e77853321d..3089f70c0c5 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -58,7 +58,7 @@
#if (NR_CPUS < 256)
#define TICKET_SHIFT 8
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
short inc = 0x0100;
@@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
: "memory", "cc");
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp, new;
@@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
return tmp;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
: "+m" (lock->slock)
@@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
#else
#define TICKET_SHIFT 16
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
int inc = 0x00010000;
int tmp;
@@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
: "memory", "cc");
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
int tmp;
int new;
@@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
return tmp;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
: "+m" (lock->slock)
@@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
}
#endif
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
}
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
@@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
#ifndef CONFIG_PARAVIRT_SPINLOCKS
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
@@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (int)(lock)->lock > 0;
}
@@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return (lock)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
"jns 1f\n"
@@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
::LOCK_PTR_REG (rw) : "memory");
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
"jz 1f\n"
@@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *lock)
+static inline int arch_read_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline int __raw_write_trylock(raw_rwlock_t *lock)
+static inline int arch_write_trylock(arch_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
@@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
return 0;
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
asm volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { }
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index 845f81c8709..dcb48b2edc1 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -5,16 +5,16 @@
# error "please don't include this file directly"
#endif
-typedef struct raw_spinlock {
+typedef struct arch_spinlock {
unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index cf86a5e7381..35e89122a42 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -5,6 +5,29 @@ extern int kstack_depth_to_print;
int x86_is_stack_id(int id, char *name);
+struct thread_info;
+struct stacktrace_ops;
+
+typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+ unsigned long *stack,
+ unsigned long bp,
+ const struct stacktrace_ops *ops,
+ void *data,
+ unsigned long *end,
+ int *graph);
+
+extern unsigned long
+print_context_stack(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph);
+
+extern unsigned long
+print_context_stack_bp(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph);
+
/* Generic stack tracer with callbacks */
struct stacktrace_ops {
@@ -14,6 +37,7 @@ struct stacktrace_ops {
void (*address)(void *data, unsigned long address, int reliable);
/* On negative return stop dumping */
int (*stack)(void *data, char *name);
+ walk_stack_t walk_stack;
};
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index 87ffcb12a1b..8085277e1b8 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -5,13 +5,17 @@
#ifdef CONFIG_SWIOTLB
extern int swiotlb;
-extern int pci_swiotlb_init(void);
+extern int __init pci_swiotlb_detect(void);
+extern void __init pci_swiotlb_init(void);
#else
#define swiotlb 0
-static inline int pci_swiotlb_init(void)
+static inline int pci_swiotlb_detect(void)
{
return 0;
}
+static inline void pci_swiotlb_init(void)
+{
+}
#endif
static inline void dma_mark_clean(void *addr, size_t size) {}
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 9af9decb38c..d5f69045c10 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -30,7 +30,6 @@ struct mmap_arg_struct;
asmlinkage long sys32_mmap(struct mmap_arg_struct __user *);
asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long);
-asmlinkage long sys32_pipe(int __user *);
struct sigaction32;
struct old_sigaction32;
asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
@@ -57,9 +56,6 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32);
asmlinkage long sys32_personality(unsigned long);
asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
-asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long, unsigned long);
-
struct oldold_utsname;
struct old_utsname;
asmlinkage long sys32_olduname(struct oldold_utsname __user *);
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 372b76edd63..8868b9420b0 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -18,16 +18,24 @@
/* Common in X86_32 and X86_64 */
/* kernel/ioport.c */
asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
+long sys_iopl(unsigned int, struct pt_regs *);
/* kernel/process.c */
int sys_fork(struct pt_regs *);
int sys_vfork(struct pt_regs *);
+long sys_execve(char __user *, char __user * __user *,
+ char __user * __user *, struct pt_regs *);
+long sys_clone(unsigned long, unsigned long, void __user *,
+ void __user *, struct pt_regs *);
/* kernel/ldt.c */
asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */
long sys_rt_sigreturn(struct pt_regs *);
+long sys_sigaltstack(const stack_t __user *, stack_t __user *,
+ struct pt_regs *);
+
/* kernel/tls.c */
asmlinkage int sys_set_thread_area(struct user_desc __user *);
@@ -35,18 +43,11 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
/* X86_32 only */
#ifdef CONFIG_X86_32
-/* kernel/ioport.c */
-long sys_iopl(struct pt_regs *);
-
-/* kernel/process_32.c */
-int sys_clone(struct pt_regs *);
-int sys_execve(struct pt_regs *);
/* kernel/signal.c */
asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
struct old_sigaction __user *);
-int sys_sigaltstack(struct pt_regs *);
unsigned long sys_sigreturn(struct pt_regs *);
/* kernel/sys_i386_32.c */
@@ -55,8 +56,6 @@ struct sel_arg_struct;
struct oldold_utsname;
struct old_utsname;
-asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long, unsigned long);
asmlinkage int old_mmap(struct mmap_arg_struct __user *);
asmlinkage int old_select(struct sel_arg_struct __user *);
asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
@@ -64,28 +63,15 @@ asmlinkage int sys_uname(struct old_utsname __user *);
asmlinkage int sys_olduname(struct oldold_utsname __user *);
/* kernel/vm86_32.c */
-int sys_vm86old(struct pt_regs *);
-int sys_vm86(struct pt_regs *);
+int sys_vm86old(struct vm86_struct __user *, struct pt_regs *);
+int sys_vm86(unsigned long, unsigned long, struct pt_regs *);
#else /* CONFIG_X86_32 */
/* X86_64 only */
-/* kernel/ioport.c */
-asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
-
/* kernel/process_64.c */
-asmlinkage long sys_clone(unsigned long, unsigned long,
- void __user *, void __user *,
- struct pt_regs *);
-asmlinkage long sys_execve(char __user *, char __user * __user *,
- char __user * __user *,
- struct pt_regs *);
long sys_arch_prctl(int, unsigned long);
-/* kernel/signal.c */
-asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
- struct pt_regs *);
-
/* kernel/sys_x86_64.c */
struct new_utsname;
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 022a84386de..ecb544e6538 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -23,6 +23,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct tss_struct;
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss);
+extern void show_regs_common(void);
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 40e37b10c6c..c5087d79658 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -35,11 +35,16 @@
# endif
#endif
-/* Node not present */
-#define NUMA_NO_NODE (-1)
+/*
+ * to preserve the visibility of NUMA_NO_NODE definition,
+ * moved to there from here. May be used independent of
+ * CONFIG_NUMA.
+ */
+#include <linux/numa.h>
#ifdef CONFIG_NUMA
#include <linux/cpumask.h>
+
#include <asm/mpspec.h>
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 90f06c25221..cb507bb05d7 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -16,7 +16,6 @@ extern unsigned long initial_code;
extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
-#define TRAMPOLINE_BASE 0x6000
extern unsigned long setup_trampoline(void);
extern void __init reserve_trampoline_memory(void);
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 7ed17ff502b..2751f3075d8 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -76,15 +76,6 @@ union partition_info_u {
};
};
-union uv_watchlist_u {
- u64 val;
- struct {
- u64 blade : 16,
- size : 32,
- filler : 16;
- };
-};
-
enum uv_memprotect {
UV_MEMPROT_RESTRICT_ACCESS,
UV_MEMPROT_ALLOW_AMO,
@@ -100,7 +91,7 @@ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
extern s64 uv_bios_freq_base(u64, u64 *);
-extern int uv_bios_mq_watchlist_alloc(int, unsigned long, unsigned int,
+extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
unsigned long *);
extern int uv_bios_mq_watchlist_free(int, int);
extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index d1414af9855..811bfabc80b 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -172,6 +172,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
+#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
+
#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
@@ -232,6 +234,26 @@ static inline unsigned long uv_gpa(void *v)
return uv_soc_phys_ram_to_gpa(__pa(v));
}
+/* Top two bits indicate the requested address is in MMR space. */
+static inline int
+uv_gpa_in_mmr_space(unsigned long gpa)
+{
+ return (gpa >> 62) == 0x3UL;
+}
+
+/* UV global physical address --> socket phys RAM */
+static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
+{
+ unsigned long paddr = gpa & uv_hub_info->gpa_mask;
+ unsigned long remap_base = uv_hub_info->lowmem_remap_base;
+ unsigned long remap_top = uv_hub_info->lowmem_remap_top;
+
+ if (paddr >= remap_base && paddr < remap_base + remap_top)
+ paddr -= remap_base;
+ return paddr;
+}
+
+
/* gnode -> pnode */
static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
{
@@ -308,6 +330,15 @@ static inline unsigned long uv_read_global_mmr64(int pnode,
}
/*
+ * Global MMR space addresses when referenced by the GRU. (GRU does
+ * NOT use socket addressing).
+ */
+static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
+{
+ return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
+}
+
+/*
* Access hub local MMRs. Faster than using global space but only local MMRs
* are accessible.
*/
@@ -434,6 +465,14 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
}
}
+static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
+{
+ return (1UL << UVH_IPI_INT_SEND_SHFT) |
+ ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
+ (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
+ (vector << UVH_IPI_INT_VECTOR_SHFT);
+}
+
static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
{
unsigned long val;
@@ -442,10 +481,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
if (vector == NMI_VECTOR)
dmode = dest_NMI;
- val = (1UL << UVH_IPI_INT_SEND_SHFT) |
- ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
- (dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
- (vector << UVH_IPI_INT_VECTOR_SHFT);
+ val = uv_hub_ipi_value(apicid, vector, dmode);
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
}
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index d5b7e90c0ed..396ff4cc8ed 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -37,31 +37,4 @@
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
-enum xen_domain_type {
- XEN_NATIVE, /* running on bare hardware */
- XEN_PV_DOMAIN, /* running in a PV domain */
- XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
-};
-
-#ifdef CONFIG_XEN
-extern enum xen_domain_type xen_domain_type;
-#else
-#define xen_domain_type XEN_NATIVE
-#endif
-
-#define xen_domain() (xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain() (xen_domain() && \
- xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain() (xen_domain() && \
- xen_domain_type == XEN_HVM_DOMAIN)
-
-#ifdef CONFIG_XEN_DOM0
-#include <xen/interface/xen.h>
-
-#define xen_initial_domain() (xen_pv_domain() && \
- xen_start_info->flags & SIF_INITDOMAIN)
-#else /* !CONFIG_XEN_DOM0 */
-#define xen_initial_domain() (0)
-#endif /* CONFIG_XEN_DOM0 */
-
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4f2e66e29ec..d87f09bc5a5 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -89,7 +89,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_HPET_TIMER) += hpet.o
obj-$(CONFIG_K8_NB) += k8.o
-obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o
obj-$(CONFIG_DEBUG_RODATA_TEST) += test_rodata.o
obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 59cdfa4686b..2e837f5080f 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -48,7 +48,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
* P4, Core and beyond CPUs
*/
if (c->x86_vendor == X86_VENDOR_INTEL &&
- (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14)))
+ (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
flags->bm_control = 0;
}
EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 1c0fb4d4ad5..23824fef789 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -19,7 +19,7 @@
#include <linux/pci.h>
#include <linux/gfp.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
@@ -166,6 +166,43 @@ static void iommu_uninit_device(struct device *dev)
{
kfree(dev->archdata.iommu);
}
+
+void __init amd_iommu_uninit_devices(void)
+{
+ struct pci_dev *pdev = NULL;
+
+ for_each_pci_dev(pdev) {
+
+ if (!check_device(&pdev->dev))
+ continue;
+
+ iommu_uninit_device(&pdev->dev);
+ }
+}
+
+int __init amd_iommu_init_devices(void)
+{
+ struct pci_dev *pdev = NULL;
+ int ret = 0;
+
+ for_each_pci_dev(pdev) {
+
+ if (!check_device(&pdev->dev))
+ continue;
+
+ ret = iommu_init_device(&pdev->dev);
+ if (ret)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+
+ amd_iommu_uninit_devices();
+
+ return ret;
+}
#ifdef CONFIG_AMD_IOMMU_STATS
/*
@@ -1125,7 +1162,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
- iommu_area_free(range->bitmap, address, pages);
+ bitmap_clear(range->bitmap, address, pages);
}
@@ -1587,6 +1624,11 @@ static struct notifier_block device_nb = {
.notifier_call = device_change_notifier,
};
+void amd_iommu_init_notifier(void)
+{
+ bus_register_notifier(&pci_bus_type, &device_nb);
+}
+
/*****************************************************************************
*
* The next functions belong to the dma_ops mapping/unmapping code.
@@ -2145,8 +2187,6 @@ static void prealloc_protection_domains(void)
if (!check_device(&dev->dev))
continue;
- iommu_init_device(&dev->dev);
-
/* Is there already any domain for it? */
if (domain_for_device(&dev->dev))
continue;
@@ -2215,8 +2255,6 @@ int __init amd_iommu_init_dma_ops(void)
register_iommu(&amd_iommu_ops);
- bus_register_notifier(&pci_bus_type, &device_nb);
-
amd_iommu_stats_init();
return 0;
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 7ffc3996523..1dca9c34eae 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1274,6 +1274,10 @@ static int __init amd_iommu_init(void)
if (ret)
goto free;
+ ret = amd_iommu_init_devices();
+ if (ret)
+ goto free;
+
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
else
@@ -1281,6 +1285,8 @@ static int __init amd_iommu_init(void)
if (ret)
goto free;
+ amd_iommu_init_notifier();
+
enable_iommus();
if (iommu_pass_through)
@@ -1296,6 +1302,9 @@ out:
return ret;
free:
+
+ amd_iommu_uninit_devices();
+
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
get_order(MAX_DOMAIN_ID/8));
@@ -1336,6 +1345,9 @@ void __init amd_iommu_detect(void)
iommu_detected = 1;
amd_iommu_detected = 1;
x86_init.iommu.iommu_init = amd_iommu_init;
+
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
}
}
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index e0dfb6856aa..3704997e8b2 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -280,7 +280,8 @@ void __init early_gart_iommu_check(void)
* or BIOS forget to put that in reserved.
* try to update e820 to make that region as reserved.
*/
- int i, fix, slot;
+ u32 agp_aper_base = 0, agp_aper_order = 0;
+ int i, fix, slot, valid_agp = 0;
u32 ctl;
u32 aper_size = 0, aper_order = 0, last_aper_order = 0;
u64 aper_base = 0, last_aper_base = 0;
@@ -290,6 +291,8 @@ void __init early_gart_iommu_check(void)
return;
/* This is mostly duplicate of iommu_hole_init */
+ agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp);
+
fix = 0;
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
@@ -342,10 +345,10 @@ void __init early_gart_iommu_check(void)
}
}
- if (!fix)
+ if (valid_agp)
return;
- /* different nodes have different setting, disable them all at first*/
+ /* disable them all at first */
for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
int bus;
int dev_base, dev_limit;
@@ -458,8 +461,6 @@ out:
if (aper_alloc) {
/* Got the aperture from the AGP bridge */
- } else if (!valid_agp) {
- /* Do nothing */
} else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
force_iommu ||
valid_agp ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index efb2b9cd132..aa57c079c98 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1341,7 +1341,7 @@ void enable_x2apic(void)
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (!(msr & X2APIC_ENABLE)) {
- pr_info("Enabling x2apic\n");
+ printk_once(KERN_INFO "Enabling x2apic\n");
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
}
}
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index d0c99abc26c..eacbd2b31d2 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -306,10 +306,7 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
- if (cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_apicid, cpu);
-
- return BAD_APICID;
+ return per_cpu(x86_cpu_to_apicid, cpu);
}
struct apic apic_physflat = {
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index d9acc3bee0f..e31b9ffe25f 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -127,7 +127,7 @@ static u32 noop_apic_read(u32 reg)
static void noop_apic_write(u32 reg, u32 v)
{
- WARN_ON_ONCE((cpu_has_apic || !disable_apic));
+ WARN_ON_ONCE(cpu_has_apic && !disable_apic);
}
struct apic apic_noop = {
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 38dcecfa581..cb804c5091b 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -131,10 +131,7 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
- if (cpu < nr_cpu_ids)
- return bigsmp_cpu_to_logical_apicid(cpu);
-
- return BAD_APICID;
+ return bigsmp_cpu_to_logical_apicid(cpu);
}
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index e85f8fb7f8e..dd2b5f26464 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -27,6 +27,9 @@
*
* http://www.unisys.com
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
@@ -223,9 +226,9 @@ static int parse_unisys_oem(char *oemptr)
mip_addr = val;
mip = (struct mip_reg *)val;
mip_reg = __va(mip);
- pr_debug("es7000_mipcfg: host_reg = 0x%lx \n",
+ pr_debug("host_reg = 0x%lx\n",
(unsigned long)host_reg);
- pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n",
+ pr_debug("mip_reg = 0x%lx\n",
(unsigned long)mip_reg);
success++;
break;
@@ -401,7 +404,7 @@ static void es7000_enable_apic_mode(void)
if (!es7000_plat)
return;
- printk(KERN_INFO "ES7000: Enabling APIC mode.\n");
+ pr_info("Enabling APIC mode.\n");
memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
es7000_mip_reg.off_0x00 = MIP_SW_APIC;
es7000_mip_reg.off_0x38 = MIP_VALID;
@@ -514,8 +517,7 @@ static void es7000_setup_apic_routing(void)
{
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
- printk(KERN_INFO
- "Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
+ pr_info("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster",
nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d5d498fbee4..de00c4619a5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2276,26 +2276,28 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
/*
* Either sets desc->affinity to a valid value, and returns
- * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
+ * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
* leaves desc->affinity untouched.
*/
unsigned int
-set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
+set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask,
+ unsigned int *dest_id)
{
struct irq_cfg *cfg;
unsigned int irq;
if (!cpumask_intersects(mask, cpu_online_mask))
- return BAD_APICID;
+ return -1;
irq = desc->irq;
cfg = desc->chip_data;
if (assign_irq_vector(irq, cfg, mask))
- return BAD_APICID;
+ return -1;
cpumask_copy(desc->affinity, mask);
- return apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
+ *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
+ return 0;
}
static int
@@ -2311,12 +2313,11 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
cfg = desc->chip_data;
spin_lock_irqsave(&ioapic_lock, flags);
- dest = set_desc_affinity(desc, mask);
- if (dest != BAD_APICID) {
+ ret = set_desc_affinity(desc, mask, &dest);
+ if (!ret) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
__target_IO_APIC_irq(irq, dest, cfg);
- ret = 0;
}
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -2431,7 +2432,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
continue;
cfg = irq_cfg(irq);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
goto unlock;
@@ -2450,7 +2451,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
}
__get_cpu_var(vector_irq)[vector] = -1;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
irq_exit();
@@ -3351,8 +3352,7 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
struct msi_msg msg;
unsigned int dest;
- dest = set_desc_affinity(desc, mask);
- if (dest == BAD_APICID)
+ if (set_desc_affinity(desc, mask, &dest))
return -1;
cfg = desc->chip_data;
@@ -3384,8 +3384,7 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
if (get_irte(irq, &irte))
return -1;
- dest = set_desc_affinity(desc, mask);
- if (dest == BAD_APICID)
+ if (set_desc_affinity(desc, mask, &dest))
return -1;
irte.vector = cfg->vector;
@@ -3567,8 +3566,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
struct msi_msg msg;
unsigned int dest;
- dest = set_desc_affinity(desc, mask);
- if (dest == BAD_APICID)
+ if (set_desc_affinity(desc, mask, &dest))
return -1;
cfg = desc->chip_data;
@@ -3623,8 +3621,7 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
struct msi_msg msg;
unsigned int dest;
- dest = set_desc_affinity(desc, mask);
- if (dest == BAD_APICID)
+ if (set_desc_affinity(desc, mask, &dest))
return -1;
cfg = desc->chip_data;
@@ -3730,8 +3727,7 @@ static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
struct irq_cfg *cfg;
unsigned int dest;
- dest = set_desc_affinity(desc, mask);
- if (dest == BAD_APICID)
+ if (set_desc_affinity(desc, mask, &dest))
return -1;
cfg = desc->chip_data;
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 6389432a9db..0159a69396c 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
*/
static DEFINE_PER_CPU(unsigned, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(long, alert_counter);
static DEFINE_PER_CPU(int, nmi_touch);
void touch_nmi_watchdog(void)
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
- local_inc(&__get_cpu_var(alert_counter));
- if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+ __this_cpu_inc(per_cpu_var(alert_counter));
+ if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
/*
* die_nmi will return ONLY if NOTIFY_STOP happens..
*/
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- local_set(&__get_cpu_var(alert_counter), 0);
+ __this_cpu_write(per_cpu_var(alert_counter), 0);
}
/* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index a5371ec3677..cf69c59f491 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -148,10 +148,7 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
break;
}
- if (cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_logical_apicid, cpu);
-
- return BAD_APICID;
+ return per_cpu(x86_cpu_to_logical_apicid, cpu);
}
static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index a8989aadc99..8972f38c5ce 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -146,10 +146,7 @@ x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
break;
}
- if (cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_apicid, cpu);
-
- return BAD_APICID;
+ return per_cpu(x86_cpu_to_apicid, cpu);
}
static unsigned int x2apic_phys_get_apic_id(unsigned long x)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b684bb303cb..d56b0efb205 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -225,10 +225,7 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
- if (cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_apicid, cpu);
-
- return BAD_APICID;
+ return per_cpu(x86_cpu_to_apicid, cpu);
}
static unsigned int x2apic_get_apic_id(unsigned long x)
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index 63a88e1f987..b0206a211b0 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -101,21 +101,17 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
}
int
-uv_bios_mq_watchlist_alloc(int blade, unsigned long addr, unsigned int mq_size,
+uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
unsigned long *intr_mmr_offset)
{
- union uv_watchlist_u size_blade;
u64 watchlist;
s64 ret;
- size_blade.size = mq_size;
- size_blade.blade = blade;
-
/*
* bios returns watchlist number or negative error number.
*/
ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
- size_blade.val, (u64)intr_mmr_offset,
+ mq_size, (u64)intr_mmr_offset,
(u64)&watchlist, 0);
if (ret < BIOS_STATUS_SUCCESS)
return ret;
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index c965e521271..468489b57aa 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -74,6 +74,7 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
+ static bool printed;
if (c->cpuid_level < 0xb)
return;
@@ -127,12 +128,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
-
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- c->phys_proc_id);
- if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- c->cpu_core_id);
+ if (!printed) {
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ c->phys_proc_id);
+ if (c->x86_max_cores > 1)
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ c->cpu_core_id);
+ printed = 1;
+ }
return;
#endif
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7128b3799ce..e485825130d 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -254,59 +254,36 @@ static int __cpuinit nearby_node(int apicid)
/*
* Fixup core topology information for AMD multi-node processors.
- * Assumption 1: Number of cores in each internal node is the same.
- * Assumption 2: Mixed systems with both single-node and dual-node
- * processors are not supported.
+ * Assumption: Number of cores in each internal node is the same.
*/
#ifdef CONFIG_X86_HT
static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_PCI
- u32 t, cpn;
- u8 n, n_id;
+ unsigned long long value;
+ u32 nodes, cores_per_node;
int cpu = smp_processor_id();
+ if (!cpu_has(c, X86_FEATURE_NODEID_MSR))
+ return;
+
/* fixup topology information only once for a core */
if (cpu_has(c, X86_FEATURE_AMD_DCM))
return;
- /* check for multi-node processor on boot cpu */
- t = read_pci_config(0, 24, 3, 0xe8);
- if (!(t & (1 << 29)))
+ rdmsrl(MSR_FAM10H_NODE_ID, value);
+
+ nodes = ((value >> 3) & 7) + 1;
+ if (nodes == 1)
return;
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
+ cores_per_node = c->x86_max_cores / nodes;
- /* cores per node: each internal node has half the number of cores */
- cpn = c->x86_max_cores >> 1;
-
- /* even-numbered NB_id of this dual-node processor */
- n = c->phys_proc_id << 1;
-
- /*
- * determine internal node id and assign cores fifty-fifty to
- * each node of the dual-node processor
- */
- t = read_pci_config(0, 24 + n, 3, 0xe8);
- n = (t>>30) & 0x3;
- if (n == 0) {
- if (c->cpu_core_id < cpn)
- n_id = 0;
- else
- n_id = 1;
- } else {
- if (c->cpu_core_id < cpn)
- n_id = 1;
- else
- n_id = 0;
- }
-
- /* compute entire NodeID, use llc_shared_map to store sibling info */
- per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
+ /* store NodeID, use llc_shared_map to store sibling info */
+ per_cpu(cpu_llc_id, cpu) = value & 7;
- /* fixup core id to be in range from 0 to cpn */
- c->cpu_core_id = c->cpu_core_id % cpn;
-#endif
+ /* fixup core id to be in range from 0 to (cores_per_node - 1) */
+ c->cpu_core_id = c->cpu_core_id % cores_per_node;
}
#endif
@@ -375,8 +352,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
node = nearby_node(apicid);
}
numa_set_node(cpu, node);
-
- printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
#endif
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c1afa990a6c..4868e4a951e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -427,6 +427,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_HT
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
+ static bool printed;
if (!cpu_has(c, X86_FEATURE_HT))
return;
@@ -442,7 +443,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
- printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
+ printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
goto out;
}
@@ -469,11 +470,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
((1 << core_bits) - 1);
out:
- if ((c->x86_max_cores * smp_num_siblings) > 1) {
+ if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
c->phys_proc_id);
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
c->cpu_core_id);
+ printed = 1;
}
#endif
}
@@ -1093,7 +1095,7 @@ static void clear_all_debug_regs(void)
void __cpuinit cpu_init(void)
{
- struct orig_ist *orig_ist;
+ struct orig_ist *oist;
struct task_struct *me;
struct tss_struct *t;
unsigned long v;
@@ -1102,7 +1104,7 @@ void __cpuinit cpu_init(void)
cpu = stack_smp_processor_id();
t = &per_cpu(init_tss, cpu);
- orig_ist = &per_cpu(orig_ist, cpu);
+ oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
@@ -1115,7 +1117,7 @@ void __cpuinit cpu_init(void)
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
panic("CPU#%d already initialized!\n", cpu);
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+ pr_debug("Initializing CPU#%d\n", cpu);
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
@@ -1143,12 +1145,12 @@ void __cpuinit cpu_init(void)
/*
* set up and load the per-CPU TSS
*/
- if (!orig_ist->ist[0]) {
+ if (!oist->ist[0]) {
char *estacks = per_cpu(exception_stacks, cpu);
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
estacks += exception_stack_sizes[v];
- orig_ist->ist[v] = t->x86_tss.ist[v] =
+ oist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
}
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index dca325c0399..b368cd86299 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,9 +30,9 @@
#include <asm/apic.h>
#include <asm/desc.h>
-static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
-static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
-static DEFINE_PER_CPU(int, cpu_priv_count);
+static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
+static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
+static DEFINE_PER_CPU(int, cpud_priv_count);
static DEFINE_MUTEX(cpu_debug_lock);
@@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
/* Already intialized */
if (file == CPU_INDEX_BIT)
- if (per_cpu(cpu_arr[type].init, cpu))
+ if (per_cpu(cpud_arr[type].init, cpu))
return 0;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
priv->reg = reg;
priv->file = file;
mutex_lock(&cpu_debug_lock);
- per_cpu(priv_arr[type], cpu) = priv;
- per_cpu(cpu_priv_count, cpu)++;
+ per_cpu(cpud_priv_arr[type], cpu) = priv;
+ per_cpu(cpud_priv_count, cpu)++;
mutex_unlock(&cpu_debug_lock);
if (file)
@@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
dentry, (void *)priv, &cpu_fops);
else {
debugfs_create_file(cpu_base[type].name, S_IRUGO,
- per_cpu(cpu_arr[type].dentry, cpu),
+ per_cpu(cpud_arr[type].dentry, cpu),
(void *)priv, &cpu_fops);
mutex_lock(&cpu_debug_lock);
- per_cpu(cpu_arr[type].init, cpu) = 1;
+ per_cpu(cpud_arr[type].init, cpu) = 1;
mutex_unlock(&cpu_debug_lock);
}
@@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
if (!is_typeflag_valid(cpu, cpu_base[type].flag))
continue;
cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
- per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
+ per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
if (type < CPU_TSS_BIT)
err = cpu_init_msr(cpu, type, cpu_dentry);
@@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
err = cpu_init_allreg(cpu, cpu_dentry);
pr_info("cpu%d(%d) debug files %d\n",
- cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
- if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
+ cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
+ if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
pr_err("Register files count %d exceeds limit %d\n",
- per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
- per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
+ per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
+ per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
err = -ENFILE;
}
if (err)
@@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
debugfs_remove_recursive(cpu_debugfs_dir);
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
- for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
- kfree(per_cpu(priv_arr[i], cpu));
+ for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
+ kfree(per_cpu(cpud_priv_arr[i], cpu));
}
module_init(cpu_debug_init);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 8b581d3905c..f28decf8dde 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
unsigned int cpu_feature;
};
-static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
+static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-static DEFINE_PER_CPU(struct aperfmperf, old_perf);
+static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance *acpi_perf_data;
@@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
if (unlikely(cpumask_empty(mask)))
return 0;
- switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
+ switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
- perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
+ perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width;
break;
@@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
- ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
- per_cpu(old_perf, cpu) = perf;
+ ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
+ per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
@@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
unsigned int freq;
unsigned int cached_freq;
@@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
struct acpi_processor_performance *perf;
struct cpufreq_freqs freqs;
struct drv_cmd cmd;
@@ -416,7 +416,7 @@ out:
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_verify\n");
@@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM;
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
- per_cpu(drv_data, cpu) = data;
+ per_cpu(acfreq_data, cpu) = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
@@ -725,20 +725,20 @@ err_unreg:
acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
- per_cpu(drv_data, cpu) = NULL;
+ per_cpu(acfreq_data, cpu) = NULL;
return result;
}
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_cpu_exit\n");
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
- per_cpu(drv_data, policy->cpu) = NULL;
+ per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
kfree(data);
@@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
- struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
dprintk("acpi_cpufreq_resume\n");
@@ -764,14 +764,15 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
};
static struct cpufreq_driver acpi_cpufreq_driver = {
- .verify = acpi_cpufreq_verify,
- .target = acpi_cpufreq_target,
- .init = acpi_cpufreq_cpu_init,
- .exit = acpi_cpufreq_cpu_exit,
- .resume = acpi_cpufreq_resume,
- .name = "acpi-cpufreq",
- .owner = THIS_MODULE,
- .attr = acpi_cpufreq_attr,
+ .verify = acpi_cpufreq_verify,
+ .target = acpi_cpufreq_target,
+ .bios_limit = acpi_processor_get_bios_limit,
+ .init = acpi_cpufreq_cpu_init,
+ .exit = acpi_cpufreq_cpu_exit,
+ .resume = acpi_cpufreq_resume,
+ .name = "acpi-cpufreq",
+ .owner = THIS_MODULE,
+ .attr = acpi_cpufreq_attr,
};
static int __init acpi_cpufreq_init(void)
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
index f10dea409f4..cb01dac267d 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
@@ -164,7 +164,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
}
/* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cpuinfo.transition_latency = 200000;
policy->cur = busfreq * max_multiplier;
result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index d47c775eb0a..9a97116f89e 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -714,14 +714,17 @@ static struct freq_attr *powernow_table_attr[] = {
};
static struct cpufreq_driver powernow_driver = {
- .verify = powernow_verify,
- .target = powernow_target,
- .get = powernow_get,
- .init = powernow_cpu_init,
- .exit = powernow_cpu_exit,
- .name = "powernow-k7",
- .owner = THIS_MODULE,
- .attr = powernow_table_attr,
+ .verify = powernow_verify,
+ .target = powernow_target,
+ .get = powernow_get,
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+ .bios_limit = acpi_processor_get_bios_limit,
+#endif
+ .init = powernow_cpu_init,
+ .exit = powernow_cpu_exit,
+ .name = "powernow-k7",
+ .owner = THIS_MODULE,
+ .attr = powernow_table_attr,
};
static int __init powernow_init(void)
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 3f12dabeab5..f125e5c551c 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1118,7 +1118,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
static int powernowk8_target(struct cpufreq_policy *pol,
unsigned targfreq, unsigned relation)
{
- cpumask_t oldmask;
+ cpumask_var_t oldmask;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid;
u32 checkvid;
@@ -1131,9 +1131,13 @@ static int powernowk8_target(struct cpufreq_policy *pol,
checkfid = data->currfid;
checkvid = data->currvid;
- /* only run on specific CPU from here on */
- oldmask = current->cpus_allowed;
- set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
+ /* only run on specific CPU from here on. */
+ /* This is poor form: use a workqueue or smp_call_function_single */
+ if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(oldmask, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
@@ -1193,7 +1197,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
ret = 0;
err_out:
- set_cpus_allowed_ptr(current, &oldmask);
+ set_cpus_allowed_ptr(current, oldmask);
+ free_cpumask_var(oldmask);
return ret;
}
@@ -1393,14 +1398,15 @@ static struct freq_attr *powernow_k8_attr[] = {
};
static struct cpufreq_driver cpufreq_amd64_driver = {
- .verify = powernowk8_verify,
- .target = powernowk8_target,
- .init = powernowk8_cpu_init,
- .exit = __devexit_p(powernowk8_cpu_exit),
- .get = powernowk8_get,
- .name = "powernow-k8",
- .owner = THIS_MODULE,
- .attr = powernow_k8_attr,
+ .verify = powernowk8_verify,
+ .target = powernowk8_target,
+ .bios_limit = acpi_processor_get_bios_limit,
+ .init = powernowk8_cpu_init,
+ .exit = __devexit_p(powernowk8_cpu_exit),
+ .get = powernowk8_get,
+ .name = "powernow-k8",
+ .owner = THIS_MODULE,
+ .attr = powernow_k8_attr,
};
/* driver entry point for init */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 3ae5a7a3a50..2ce8e0b5cc5 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -39,7 +39,7 @@ static struct pci_dev *speedstep_chipset_dev;
/* speedstep_processor
*/
-static unsigned int speedstep_processor;
+static enum speedstep_processor speedstep_processor;
static u32 pmbase;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index f4c290b8482..ad0083abfa2 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -34,7 +34,7 @@ static int relaxed_check;
* GET PROCESSOR CORE SPEED IN KHZ *
*********************************************************************/
-static unsigned int pentium3_get_frequency(unsigned int processor)
+static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
{
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
struct {
@@ -227,7 +227,7 @@ static unsigned int pentium4_get_frequency(void)
/* Warning: may get called from smp_call_function_single. */
-unsigned int speedstep_get_frequency(unsigned int processor)
+unsigned int speedstep_get_frequency(enum speedstep_processor processor)
{
switch (processor) {
case SPEEDSTEP_CPU_PCORE:
@@ -380,7 +380,7 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
-unsigned int speedstep_get_freqs(unsigned int processor,
+unsigned int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int *low_speed,
unsigned int *high_speed,
unsigned int *transition_latency,
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h
index 2b6c04e5a30..70d9cea1219 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.h
@@ -11,18 +11,18 @@
/* processors */
-
-#define SPEEDSTEP_CPU_PIII_C_EARLY 0x00000001 /* Coppermine core */
-#define SPEEDSTEP_CPU_PIII_C 0x00000002 /* Coppermine core */
-#define SPEEDSTEP_CPU_PIII_T 0x00000003 /* Tualatin core */
-#define SPEEDSTEP_CPU_P4M 0x00000004 /* P4-M */
-
+enum speedstep_processor {
+ SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */
+ SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */
+ SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */
+ SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */
/* the following processors are not speedstep-capable and are not auto-detected
* in speedstep_detect_processor(). However, their speed can be detected using
* the speedstep_get_frequency() call. */
-#define SPEEDSTEP_CPU_PM 0xFFFFFF03 /* Pentium M */
-#define SPEEDSTEP_CPU_P4D 0xFFFFFF04 /* desktop P4 */
-#define SPEEDSTEP_CPU_PCORE 0xFFFFFF05 /* Core */
+ SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */
+ SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */
+ SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */
+};
/* speedstep states -- only two of them */
@@ -31,10 +31,10 @@
/* detect a speedstep-capable processor */
-extern unsigned int speedstep_detect_processor (void);
+extern enum speedstep_processor speedstep_detect_processor(void);
/* detect the current speed (in khz) of the processor */
-extern unsigned int speedstep_get_frequency(unsigned int processor);
+extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
/* detect the low and high speeds of the processor. The callback
@@ -42,7 +42,7 @@ extern unsigned int speedstep_get_frequency(unsigned int processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
-extern unsigned int speedstep_get_freqs(unsigned int processor,
+extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int *low_speed,
unsigned int *high_speed,
unsigned int *transition_latency,
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
index befea088e4f..04d73c114e4 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c
@@ -35,7 +35,7 @@ static int smi_cmd;
static unsigned int smi_sig;
/* info about the processor */
-static unsigned int speedstep_processor;
+static enum speedstep_processor speedstep_processor;
/*
* There are only two frequency states for each processor. Values
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c900b73f922..879666f4d87 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -70,7 +70,6 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
- set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
sched_clock_stable = 1;
}
@@ -270,8 +269,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
node = cpu_to_node(cpu);
}
numa_set_node(cpu, node);
-
- printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node);
#endif
}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 6c40f6b5b34..fc6c8ef92dc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,26 +499,27 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
#ifdef CONFIG_SYSFS
/* pointer to _cpuid4_info array (for each cache leaf) */
-static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
-#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
+static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
+#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
- int index_msb, i;
+ int index_msb, i, sibling;
struct cpuinfo_x86 *c = &cpu_data(cpu);
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
- struct cpuinfo_x86 *d;
- for_each_online_cpu(i) {
- if (!per_cpu(cpuid4_info, i))
+ for_each_cpu(i, c->llc_shared_map) {
+ if (!per_cpu(ici_cpuid4_info, i))
continue;
- d = &cpu_data(i);
this_leaf = CPUID4_INFO_IDX(i, index);
- cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
- d->llc_shared_map);
+ for_each_cpu(sibling, c->llc_shared_map) {
+ if (!cpu_online(sibling))
+ continue;
+ set_bit(sibling, this_leaf->shared_cpu_map);
+ }
}
return;
}
@@ -535,7 +536,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
c->apicid >> index_msb) {
cpumask_set_cpu(i,
to_cpumask(this_leaf->shared_cpu_map));
- if (i != cpu && per_cpu(cpuid4_info, i)) {
+ if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
sibling_leaf =
CPUID4_INFO_IDX(i, index);
cpumask_set_cpu(cpu, to_cpumask(
@@ -574,8 +575,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
for (i = 0; i < num_cache_leaves; i++)
cache_remove_shared_cpu_map(cpu, i);
- kfree(per_cpu(cpuid4_info, cpu));
- per_cpu(cpuid4_info, cpu) = NULL;
+ kfree(per_cpu(ici_cpuid4_info, cpu));
+ per_cpu(ici_cpuid4_info, cpu) = NULL;
}
static int
@@ -614,15 +615,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
if (num_cache_leaves == 0)
return -ENOENT;
- per_cpu(cpuid4_info, cpu) = kzalloc(
+ per_cpu(ici_cpuid4_info, cpu) = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
- if (per_cpu(cpuid4_info, cpu) == NULL)
+ if (per_cpu(ici_cpuid4_info, cpu) == NULL)
return -ENOMEM;
smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
if (retval) {
- kfree(per_cpu(cpuid4_info, cpu));
- per_cpu(cpuid4_info, cpu) = NULL;
+ kfree(per_cpu(ici_cpuid4_info, cpu));
+ per_cpu(ici_cpuid4_info, cpu) = NULL;
}
return retval;
@@ -634,7 +635,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
/* pointer to kobject for cpuX/cache */
-static DEFINE_PER_CPU(struct kobject *, cache_kobject);
+static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
struct _index_kobject {
struct kobject kobj;
@@ -643,8 +644,8 @@ struct _index_kobject {
};
/* pointer to array of kobjects for cpuX/cache/indexY */
-static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
-#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
+static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
+#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
#define show_one_plus(file_name, object, val) \
static ssize_t show_##file_name \
@@ -863,10 +864,10 @@ static struct kobj_type ktype_percpu_entry = {
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{
- kfree(per_cpu(cache_kobject, cpu));
- kfree(per_cpu(index_kobject, cpu));
- per_cpu(cache_kobject, cpu) = NULL;
- per_cpu(index_kobject, cpu) = NULL;
+ kfree(per_cpu(ici_cache_kobject, cpu));
+ kfree(per_cpu(ici_index_kobject, cpu));
+ per_cpu(ici_cache_kobject, cpu) = NULL;
+ per_cpu(ici_index_kobject, cpu) = NULL;
free_cache_attributes(cpu);
}
@@ -882,14 +883,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return err;
/* Allocate all required memory */
- per_cpu(cache_kobject, cpu) =
+ per_cpu(ici_cache_kobject, cpu) =
kzalloc(sizeof(struct kobject), GFP_KERNEL);
- if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
+ if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
goto err_out;
- per_cpu(index_kobject, cpu) = kzalloc(
+ per_cpu(ici_index_kobject, cpu) = kzalloc(
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
- if (unlikely(per_cpu(index_kobject, cpu) == NULL))
+ if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
goto err_out;
return 0;
@@ -913,7 +914,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
if (unlikely(retval < 0))
return retval;
- retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
+ retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
&ktype_percpu_entry,
&sys_dev->kobj, "%s", "cache");
if (retval < 0) {
@@ -927,12 +928,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
this_object->index = i;
retval = kobject_init_and_add(&(this_object->kobj),
&ktype_cache,
- per_cpu(cache_kobject, cpu),
+ per_cpu(ici_cache_kobject, cpu),
"index%1lu", i);
if (unlikely(retval)) {
for (j = 0; j < i; j++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
- kobject_put(per_cpu(cache_kobject, cpu));
+ kobject_put(per_cpu(ici_cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
return retval;
}
@@ -940,7 +941,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
- kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
+ kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
return 0;
}
@@ -949,7 +950,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i;
- if (per_cpu(cpuid4_info, cpu) == NULL)
+ if (per_cpu(ici_cpuid4_info, cpu) == NULL)
return;
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
return;
@@ -957,7 +958,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
for (i = 0; i < num_cache_leaves; i++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
- kobject_put(per_cpu(cache_kobject, cpu));
+ kobject_put(per_cpu(ici_cache_kobject, cpu));
cpuid4_cache_sysfs_exit(cpu);
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 472763d9209..73734baa50f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -74,7 +74,7 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
m->finished = 0;
}
-static cpumask_t mce_inject_cpumask;
+static cpumask_var_t mce_inject_cpumask;
static int mce_raise_notify(struct notifier_block *self,
unsigned long val, void *data)
@@ -82,9 +82,9 @@ static int mce_raise_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int cpu = smp_processor_id();
struct mce *m = &__get_cpu_var(injectm);
- if (val != DIE_NMI_IPI || !cpu_isset(cpu, mce_inject_cpumask))
+ if (val != DIE_NMI_IPI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
return NOTIFY_DONE;
- cpu_clear(cpu, mce_inject_cpumask);
+ cpumask_clear_cpu(cpu, mce_inject_cpumask);
if (m->inject_flags & MCJ_EXCEPTION)
raise_exception(m, args->regs);
else if (m->status)
@@ -148,22 +148,22 @@ static void raise_mce(struct mce *m)
unsigned long start;
int cpu;
get_online_cpus();
- mce_inject_cpumask = cpu_online_map;
- cpu_clear(get_cpu(), mce_inject_cpumask);
+ cpumask_copy(mce_inject_cpumask, cpu_online_mask);
+ cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
for_each_online_cpu(cpu) {
struct mce *mcpu = &per_cpu(injectm, cpu);
if (!mcpu->finished ||
MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
- cpu_clear(cpu, mce_inject_cpumask);
+ cpumask_clear_cpu(cpu, mce_inject_cpumask);
}
- if (!cpus_empty(mce_inject_cpumask))
- apic->send_IPI_mask(&mce_inject_cpumask, NMI_VECTOR);
+ if (!cpumask_empty(mce_inject_cpumask))
+ apic->send_IPI_mask(mce_inject_cpumask, NMI_VECTOR);
start = jiffies;
- while (!cpus_empty(mce_inject_cpumask)) {
+ while (!cpumask_empty(mce_inject_cpumask)) {
if (!time_before(jiffies, start + 2*HZ)) {
printk(KERN_ERR
"Timeout waiting for mce inject NMI %lx\n",
- *cpus_addr(mce_inject_cpumask));
+ *cpumask_bits(mce_inject_cpumask));
break;
}
cpu_relax();
@@ -210,6 +210,8 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
static int inject_init(void)
{
+ if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
+ return -ENOMEM;
printk(KERN_INFO "Machine check injector initialized\n");
mce_chrdev_ops.write = mce_write;
register_die_notifier(&mce_raise_nb);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d7ebf25d10e..a8aacd4b513 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1388,13 +1388,14 @@ static void __mcheck_cpu_init_timer(void)
struct timer_list *t = &__get_cpu_var(mce_timer);
int *n = &__get_cpu_var(mce_next_interval);
+ setup_timer(t, mce_start_timer, smp_processor_id());
+
if (mce_ignore_ce)
return;
*n = check_interval * HZ;
if (!*n)
return;
- setup_timer(t, mce_start_timer, smp_processor_id());
t->expires = round_jiffies(jiffies + *n);
add_timer_on(t, smp_processor_id());
}
@@ -1928,7 +1929,7 @@ error2:
sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr);
error:
while (--i >= 0)
- sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr);
+ sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
sysdev_unregister(&per_cpu(mce_dev, cpu));
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 4fef985fc22..81c499eceb2 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -256,6 +256,16 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
ack_APIC_irq();
}
+/* Thermal monitoring depends on APIC, ACPI and clock modulation */
+static int intel_thermal_supported(struct cpuinfo_x86 *c)
+{
+ if (!cpu_has_apic)
+ return 0;
+ if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
+ return 0;
+ return 1;
+}
+
void __init mcheck_intel_therm_init(void)
{
/*
@@ -263,8 +273,7 @@ void __init mcheck_intel_therm_init(void)
* LVT value on BSP and use that value to restore APs' thermal LVT
* entry BIOS programmed later
*/
- if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) &&
- cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
+ if (intel_thermal_supported(&boot_cpu_data))
lvtthmr_init = apic_read(APIC_LVTTHMR);
}
@@ -274,8 +283,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
int tm2 = 0;
u32 l, h;
- /* Thermal monitoring depends on ACPI and clock modulation*/
- if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
+ if (!intel_thermal_supported(c))
return;
/*
@@ -339,8 +347,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
l = apic_read(APIC_LVTTHMR);
apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
- printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
- cpu, tm2 ? "TM2" : "TM1");
+ printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
+ tm2 ? "TM2" : "TM1");
/* enable thermal throttle processing */
atomic_set(&therm_throt_en, 1);
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 3c1b12d461d..e006e56f699 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -4,6 +4,7 @@
#include <linux/proc_fs.h>
#include <linux/module.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/init.h>
#define LINE_SIZE 80
@@ -133,8 +134,7 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
return -EINVAL;
base = simple_strtoull(line + 5, &ptr, 0);
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
if (strncmp(ptr, "size=", 5))
return -EINVAL;
@@ -142,14 +142,11 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
size = simple_strtoull(ptr + 5, &ptr, 0);
if ((base & 0xfff) || (size & 0xfff))
return -EINVAL;
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr);
if (strncmp(ptr, "type=", 5))
return -EINVAL;
- ptr += 5;
- while (isspace(*ptr))
- ptr++;
+ ptr = skip_spaces(ptr + 5);
for (i = 0; i < MTRR_NUM_TYPES; ++i) {
if (strcmp(ptr, mtrr_strings[i]))
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index ab1a8a89b98..c223b7e895d 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1632,6 +1632,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
data.period = event->hw.last_period;
data.addr = 0;
+ data.raw = NULL;
regs.ip = 0;
/*
@@ -1749,6 +1750,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
u64 val;
data.addr = 0;
+ data.raw = NULL;
cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1794,6 +1796,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
u64 ack, status;
data.addr = 0;
+ data.raw = NULL;
cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1857,6 +1860,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
u64 val;
data.addr = 0;
+ data.raw = NULL;
cpuc = &__get_cpu_var(cpu_hw_events);
@@ -2062,12 +2066,6 @@ static __init int p6_pmu_init(void)
x86_pmu = p6_pmu;
- if (!cpu_has_apic) {
- pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
- pr_info("no hardware sampling interrupt available.\n");
- x86_pmu.apic = 0;
- }
-
return 0;
}
@@ -2159,6 +2157,16 @@ static __init int amd_pmu_init(void)
return 0;
}
+static void __init pmu_check_apic(void)
+{
+ if (cpu_has_apic)
+ return;
+
+ x86_pmu.apic = 0;
+ pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
+ pr_info("no hardware sampling interrupt available.\n");
+}
+
void __init init_hw_perf_events(void)
{
int err;
@@ -2180,6 +2188,8 @@ void __init init_hw_perf_events(void)
return;
}
+ pmu_check_apic();
+
pr_cont("%s PMU driver.\n", x86_pmu.name);
if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
@@ -2287,7 +2297,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
-static DEFINE_PER_CPU(int, in_nmi_frame);
+static DEFINE_PER_CPU(int, in_ignored_frame);
static void
@@ -2303,8 +2313,9 @@ static void backtrace_warning(void *data, char *msg)
static int backtrace_stack(void *data, char *name)
{
- per_cpu(in_nmi_frame, smp_processor_id()) =
- x86_is_stack_id(NMI_STACK, name);
+ per_cpu(in_ignored_frame, smp_processor_id()) =
+ x86_is_stack_id(NMI_STACK, name) ||
+ x86_is_stack_id(DEBUG_STACK, name);
return 0;
}
@@ -2313,7 +2324,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry *entry = data;
- if (per_cpu(in_nmi_frame, smp_processor_id()))
+ if (per_cpu(in_ignored_frame, smp_processor_id()))
return;
if (reliable)
@@ -2325,6 +2336,7 @@ static const struct stacktrace_ops backtrace_ops = {
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
+ .walk_stack = print_context_stack_bp,
};
#include "../dumpstack.h"
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 7ef24a79699..cb27fd6136c 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -187,7 +187,8 @@ static int __init cpuid_init(void)
int i, err = 0;
i = 0;
- if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) {
+ if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
+ "cpu/cpuid", &cpuid_fops)) {
printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
CPUID_MAJOR);
err = -EBUSY;
@@ -216,7 +217,7 @@ out_class:
}
class_destroy(cpuid_class);
out_chrdev:
- unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
+ __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
out:
return err;
}
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index ef42a038f1a..1c47390dd0e 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -265,13 +265,13 @@ struct ds_context {
int cpu;
};
-static DEFINE_PER_CPU(struct ds_context *, cpu_context);
+static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
{
struct ds_context **p_context =
- (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
+ (task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
struct ds_context *context = NULL;
struct ds_context *new_context = NULL;
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b8ce165dde5..c56bc287303 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -109,6 +109,30 @@ print_context_stack(struct thread_info *tinfo,
}
return bp;
}
+EXPORT_SYMBOL_GPL(print_context_stack);
+
+unsigned long
+print_context_stack_bp(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+{
+ struct stack_frame *frame = (struct stack_frame *)bp;
+ unsigned long *ret_addr = &frame->return_address;
+
+ while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+ unsigned long addr = *ret_addr;
+
+ if (__kernel_text_address(addr)) {
+ ops->address(data, addr, 1);
+ frame = frame->next_frame;
+ ret_addr = &frame->return_address;
+ print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ }
+ }
+ return (unsigned long)frame;
+}
+EXPORT_SYMBOL_GPL(print_context_stack_bp);
static void
@@ -141,10 +165,11 @@ static void print_trace_address(void *data, unsigned long addr, int reliable)
}
static const struct stacktrace_ops print_trace_ops = {
- .warning = print_trace_warning,
- .warning_symbol = print_trace_warning_symbol,
- .stack = print_trace_stack,
- .address = print_trace_address,
+ .warning = print_trace_warning,
+ .warning_symbol = print_trace_warning_symbol,
+ .stack = print_trace_stack,
+ .address = print_trace_address,
+ .walk_stack = print_context_stack,
};
void
@@ -188,7 +213,7 @@ void dump_stack(void)
}
EXPORT_SYMBOL(dump_stack);
-static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
@@ -207,11 +232,11 @@ unsigned __kprobes long oops_begin(void)
/* racy, but better than risking deadlock. */
raw_local_irq_save(flags);
cpu = smp_processor_id();
- if (!__raw_spin_trylock(&die_lock)) {
+ if (!arch_spin_trylock(&die_lock)) {
if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
- __raw_spin_lock(&die_lock);
+ arch_spin_lock(&die_lock);
}
die_nest_count++;
die_owner = cpu;
@@ -231,7 +256,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
- __raw_spin_unlock(&die_lock);
+ arch_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
oops_exit();
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index 81086c227ab..4fd1420faff 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -14,12 +14,6 @@
#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
#endif
-extern unsigned long
-print_context_stack(struct thread_info *tinfo,
- unsigned long *stack, unsigned long bp,
- const struct stacktrace_ops *ops, void *data,
- unsigned long *end, int *graph);
-
extern void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp, char *log_lvl);
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index e0ed4c7abb6..ae775ca47b2 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -58,7 +58,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
context = (struct thread_info *)
((unsigned long)stack & (~(THREAD_SIZE - 1)));
- bp = print_context_stack(context, stack, bp, ops, data, NULL, &graph);
+ bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
stack = (unsigned long *)context->previous_esp;
if (!stack)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 8e740934bd1..0ad9597073f 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -103,6 +103,35 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
return NULL;
}
+static inline int
+in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
+ unsigned long *irq_stack_end)
+{
+ return (stack >= irq_stack && stack < irq_stack_end);
+}
+
+/*
+ * We are returning from the irq stack and go to the previous one.
+ * If the previous stack is also in the irq stack, then bp in the first
+ * frame of the irq stack points to the previous, interrupted one.
+ * Otherwise we have another level of indirection: We first save
+ * the bp of the previous stack, then we switch the stack to the irq one
+ * and save a new bp that links to the previous one.
+ * (See save_args())
+ */
+static inline unsigned long
+fixup_bp_irq_link(unsigned long bp, unsigned long *stack,
+ unsigned long *irq_stack, unsigned long *irq_stack_end)
+{
+#ifdef CONFIG_FRAME_POINTER
+ struct stack_frame *frame = (struct stack_frame *)bp;
+
+ if (!in_irq_stack(stack, irq_stack, irq_stack_end))
+ return (unsigned long)frame->next_frame;
+#endif
+ return bp;
+}
+
/*
* x86-64 can have up to three kernel stacks:
* process stack
@@ -159,8 +188,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (ops->stack(data, id) < 0)
break;
- bp = print_context_stack(tinfo, stack, bp, ops,
- data, estack_end, &graph);
+ bp = ops->walk_stack(tinfo, stack, bp, ops,
+ data, estack_end, &graph);
ops->stack(data, "<EOE>");
/*
* We link to the next stack via the
@@ -175,7 +204,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
irq_stack = irq_stack_end -
(IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
- if (stack >= irq_stack && stack < irq_stack_end) {
+ if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
if (ops->stack(data, "IRQ") < 0)
break;
bp = print_context_stack(tinfo, stack, bp,
@@ -186,6 +215,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* pointer (index -1 to end) in the IRQ stack:
*/
stack = (unsigned long *) (irq_stack_end[-1]);
+ bp = fixup_bp_irq_link(bp, stack, irq_stack,
+ irq_stack_end);
irq_stack_end = NULL;
ops->stack(data, "EOI");
continue;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d17d482a04f..05ed7ab2ca4 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -724,7 +724,7 @@ core_initcall(e820_mark_nvs_memory);
/*
* Early reserved memory areas.
*/
-#define MAX_EARLY_RES 20
+#define MAX_EARLY_RES 32
struct early_res {
u64 start, end;
@@ -732,7 +732,16 @@ struct early_res {
char overlap_ok;
};
static struct early_res early_res[MAX_EARLY_RES] __initdata = {
- { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
+ { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
+#ifdef CONFIG_X86_32
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 },
+#endif
+
{}
};
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 50b9c220e12..44a8e0dc673 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -725,22 +725,61 @@ END(syscall_badsys)
/*
* System calls that need a pt_regs pointer.
*/
-#define PTREGSCALL(name) \
+#define PTREGSCALL0(name) \
ALIGN; \
ptregs_##name: \
leal 4(%esp),%eax; \
jmp sys_##name;
-PTREGSCALL(iopl)
-PTREGSCALL(fork)
-PTREGSCALL(clone)
-PTREGSCALL(vfork)
-PTREGSCALL(execve)
-PTREGSCALL(sigaltstack)
-PTREGSCALL(sigreturn)
-PTREGSCALL(rt_sigreturn)
-PTREGSCALL(vm86)
-PTREGSCALL(vm86old)
+#define PTREGSCALL1(name) \
+ ALIGN; \
+ptregs_##name: \
+ leal 4(%esp),%edx; \
+ movl (PT_EBX+4)(%esp),%eax; \
+ jmp sys_##name;
+
+#define PTREGSCALL2(name) \
+ ALIGN; \
+ptregs_##name: \
+ leal 4(%esp),%ecx; \
+ movl (PT_ECX+4)(%esp),%edx; \
+ movl (PT_EBX+4)(%esp),%eax; \
+ jmp sys_##name;
+
+#define PTREGSCALL3(name) \
+ ALIGN; \
+ptregs_##name: \
+ leal 4(%esp),%eax; \
+ pushl %eax; \
+ movl PT_EDX(%eax),%ecx; \
+ movl PT_ECX(%eax),%edx; \
+ movl PT_EBX(%eax),%eax; \
+ call sys_##name; \
+ addl $4,%esp; \
+ ret
+
+PTREGSCALL1(iopl)
+PTREGSCALL0(fork)
+PTREGSCALL0(vfork)
+PTREGSCALL3(execve)
+PTREGSCALL2(sigaltstack)
+PTREGSCALL0(sigreturn)
+PTREGSCALL0(rt_sigreturn)
+PTREGSCALL2(vm86)
+PTREGSCALL1(vm86old)
+
+/* Clone is an oddball. The 4th arg is in %edi */
+ ALIGN;
+ptregs_clone:
+ leal 4(%esp),%eax
+ pushl %eax
+ pushl PT_EDI(%eax)
+ movl PT_EDX(%eax),%ecx
+ movl PT_ECX(%eax),%edx
+ movl PT_EBX(%eax),%eax
+ call sys_clone
+ addl $8,%esp
+ ret
.macro FIXUP_ESPFIX_STACK
/*
@@ -1008,12 +1047,8 @@ END(spurious_interrupt_bug)
ENTRY(kernel_thread_helper)
pushl $0 # fake return address for unwinder
CFI_STARTPROC
- movl %edx,%eax
- push %edx
- CFI_ADJUST_CFA_OFFSET 4
- call *%ebx
- push %eax
- CFI_ADJUST_CFA_OFFSET 4
+ movl %edi,%eax
+ call *%esi
call do_exit
ud2 # padding for call trace
CFI_ENDPROC
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 63bca794c8f..0697ff13983 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1076,10 +1076,10 @@ ENTRY(\sym)
TRACE_IRQS_OFF
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
- PER_CPU(init_tss, %rbp)
- subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
+ PER_CPU(init_tss, %r12)
+ subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
call \do_sym
- addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
+ addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
jmp paranoid_exit /* %ebx: no swapgs flag */
CFI_ENDPROC
END(\sym)
@@ -1166,63 +1166,20 @@ bad_gs:
jmp 2b
.previous
-/*
- * Create a kernel thread.
- *
- * C extern interface:
- * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
- *
- * asm input arguments:
- * rdi: fn, rsi: arg, rdx: flags
- */
-ENTRY(kernel_thread)
- CFI_STARTPROC
- FAKE_STACK_FRAME $child_rip
- SAVE_ALL
-
- # rdi: flags, rsi: usp, rdx: will be &pt_regs
- movq %rdx,%rdi
- orq kernel_thread_flags(%rip),%rdi
- movq $-1, %rsi
- movq %rsp, %rdx
-
- xorl %r8d,%r8d
- xorl %r9d,%r9d
-
- # clone now
- call do_fork
- movq %rax,RAX(%rsp)
- xorl %edi,%edi
-
- /*
- * It isn't worth to check for reschedule here,
- * so internally to the x86_64 port you can rely on kernel_thread()
- * not to reschedule the child before returning, this avoids the need
- * of hacks for example to fork off the per-CPU idle tasks.
- * [Hopefully no generic code relies on the reschedule -AK]
- */
- RESTORE_ALL
- UNFAKE_STACK_FRAME
- ret
- CFI_ENDPROC
-END(kernel_thread)
-
-ENTRY(child_rip)
+ENTRY(kernel_thread_helper)
pushq $0 # fake return address
CFI_STARTPROC
/*
* Here we are in the child and the registers are set as they were
* at kernel_thread() invocation in the parent.
*/
- movq %rdi, %rax
- movq %rsi, %rdi
- call *%rax
+ call *%rsi
# exit
mov %eax, %edi
call do_exit
ud2 # padding for call trace
CFI_ENDPROC
-END(child_rip)
+END(kernel_thread_helper)
/*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c
deleted file mode 100644
index 9b08e852fd1..00000000000
--- a/arch/x86/kernel/geode_32.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * AMD Geode southbridge support code
- * Copyright (C) 2006, Advanced Micro Devices, Inc.
- * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <asm/msr.h>
-#include <asm/geode.h>
-
-static struct {
- char *name;
- u32 msr;
- int size;
- u32 base;
-} lbars[] = {
- { "geode-pms", MSR_LBAR_PMS, LBAR_PMS_SIZE, 0 },
- { "geode-acpi", MSR_LBAR_ACPI, LBAR_ACPI_SIZE, 0 },
- { "geode-gpio", MSR_LBAR_GPIO, LBAR_GPIO_SIZE, 0 },
- { "geode-mfgpt", MSR_LBAR_MFGPT, LBAR_MFGPT_SIZE, 0 }
-};
-
-static void __init init_lbars(void)
-{
- u32 lo, hi;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lbars); i++) {
- rdmsr(lbars[i].msr, lo, hi);
- if (hi & 0x01)
- lbars[i].base = lo & 0x0000ffff;
-
- if (lbars[i].base == 0)
- printk(KERN_ERR "geode: Couldn't initialize '%s'\n",
- lbars[i].name);
- }
-}
-
-int geode_get_dev_base(unsigned int dev)
-{
- BUG_ON(dev >= ARRAY_SIZE(lbars));
- return lbars[dev].base;
-}
-EXPORT_SYMBOL_GPL(geode_get_dev_base);
-
-/* === GPIO API === */
-
-void geode_gpio_set(u32 gpio, unsigned int reg)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
-
- if (!base)
- return;
-
- /* low bank register */
- if (gpio & 0xFFFF)
- outl(gpio & 0xFFFF, base + reg);
- /* high bank register */
- gpio >>= 16;
- if (gpio)
- outl(gpio, base + 0x80 + reg);
-}
-EXPORT_SYMBOL_GPL(geode_gpio_set);
-
-void geode_gpio_clear(u32 gpio, unsigned int reg)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
-
- if (!base)
- return;
-
- /* low bank register */
- if (gpio & 0xFFFF)
- outl((gpio & 0xFFFF) << 16, base + reg);
- /* high bank register */
- gpio &= (0xFFFF << 16);
- if (gpio)
- outl(gpio, base + 0x80 + reg);
-}
-EXPORT_SYMBOL_GPL(geode_gpio_clear);
-
-int geode_gpio_isset(u32 gpio, unsigned int reg)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
- u32 val;
-
- if (!base)
- return 0;
-
- /* low bank register */
- if (gpio & 0xFFFF) {
- val = inl(base + reg) & (gpio & 0xFFFF);
- if ((gpio & 0xFFFF) == val)
- return 1;
- }
- /* high bank register */
- gpio >>= 16;
- if (gpio) {
- val = inl(base + 0x80 + reg) & gpio;
- if (gpio == val)
- return 1;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(geode_gpio_isset);
-
-void geode_gpio_set_irq(unsigned int group, unsigned int irq)
-{
- u32 lo, hi;
-
- if (group > 7 || irq > 15)
- return;
-
- rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
-
- lo &= ~(0xF << (group * 4));
- lo |= (irq & 0xF) << (group * 4);
-
- wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
-}
-EXPORT_SYMBOL_GPL(geode_gpio_set_irq);
-
-void geode_gpio_setup_event(unsigned int gpio, int pair, int pme)
-{
- u32 base = geode_get_dev_base(GEODE_DEV_GPIO);
- u32 offset, shift, val;
-
- if (gpio >= 24)
- offset = GPIO_MAP_W;
- else if (gpio >= 16)
- offset = GPIO_MAP_Z;
- else if (gpio >= 8)
- offset = GPIO_MAP_Y;
- else
- offset = GPIO_MAP_X;
-
- shift = (gpio % 8) * 4;
-
- val = inl(base + offset);
-
- /* Clear whatever was there before */
- val &= ~(0xF << shift);
-
- /* And set the new value */
-
- val |= ((pair & 7) << shift);
-
- /* Set the PME bit if this is a PME event */
-
- if (pme)
- val |= (1 << (shift + 3));
-
- outl(val, base + offset);
-}
-EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
-
-int geode_has_vsa2(void)
-{
- static int has_vsa2 = -1;
-
- if (has_vsa2 == -1) {
- u16 val;
-
- /*
- * The VSA has virtual registers that we can query for a
- * signature.
- */
- outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
- outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
-
- val = inw(VSA_VRC_DATA);
- has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
- }
-
- return has_vsa2;
-}
-EXPORT_SYMBOL_GPL(geode_has_vsa2);
-
-static int __init geode_southbridge_init(void)
-{
- if (!is_geode())
- return -ENODEV;
-
- init_lbars();
- (void) mfgpt_timer_setup();
- return 0;
-}
-
-postcore_initcall(geode_southbridge_init);
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 4f8e2507e8f..5051b94c906 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,8 +29,6 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
- reserve_trampoline_memory();
-
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 0b06cd778fd..b5a9896ca1e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -98,8 +98,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
{
copy_bootdata(__va(real_mode_data));
- reserve_trampoline_memory();
-
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index d42f65ac492..05d5fec64a9 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -362,8 +362,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
return ret;
}
- if (bp->callback)
- ret = arch_store_info(bp);
+ ret = arch_store_info(bp);
if (ret < 0)
return ret;
@@ -519,7 +518,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
break;
}
- (bp->callback)(bp, args->regs);
+ perf_bp_event(bp, args->regs);
rcu_read_unlock();
}
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 99c4d308f16..8eec0ec59af 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -103,9 +103,10 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
* on system-call entry - see also fork() and the signal handling
* code.
*/
-static int do_iopl(unsigned int level, struct pt_regs *regs)
+long sys_iopl(unsigned int level, struct pt_regs *regs)
{
unsigned int old = (regs->flags >> 12) & 3;
+ struct thread_struct *t = &current->thread;
if (level > 3)
return -EINVAL;
@@ -115,29 +116,8 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
return -EPERM;
}
regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
-
- return 0;
-}
-
-#ifdef CONFIG_X86_32
-long sys_iopl(struct pt_regs *regs)
-{
- unsigned int level = regs->bx;
- struct thread_struct *t = &current->thread;
- int rc;
-
- rc = do_iopl(level, regs);
- if (rc < 0)
- goto out;
-
t->iopl = level << 12;
set_iopl_mask(t->iopl);
-out:
- return rc;
-}
-#else
-asmlinkage long sys_iopl(unsigned int level, struct pt_regs *regs)
-{
- return do_iopl(level, regs);
+
+ return 0;
}
-#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 664bcb7384a..91fd0c70a18 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
@@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
out:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -294,12 +294,12 @@ void fixup_irqs(void)
continue;
/* interrupt's are disabled at this point */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
continue;
}
@@ -326,7 +326,7 @@ void fixup_irqs(void)
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
desc->chip->unmask(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
@@ -356,10 +356,10 @@ void fixup_irqs(void)
irq = __get_cpu_var(vector_irq)[vector];
desc = irq_to_desc(irq);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->retrigger)
desc->chip->retrigger(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
}
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 20a5b368946..dd74fe7273b 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -86,9 +86,15 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
gdb_regs[GDB_DS] = regs->ds;
gdb_regs[GDB_ES] = regs->es;
gdb_regs[GDB_CS] = regs->cs;
- gdb_regs[GDB_SS] = __KERNEL_DS;
gdb_regs[GDB_FS] = 0xFFFF;
gdb_regs[GDB_GS] = 0xFFFF;
+ if (user_mode_vm(regs)) {
+ gdb_regs[GDB_SS] = regs->ss;
+ gdb_regs[GDB_SP] = regs->sp;
+ } else {
+ gdb_regs[GDB_SS] = __KERNEL_DS;
+ gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
+ }
#else
gdb_regs[GDB_R8] = regs->r8;
gdb_regs[GDB_R9] = regs->r9;
@@ -101,8 +107,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
gdb_regs32[GDB_PS] = regs->flags;
gdb_regs32[GDB_CS] = regs->cs;
gdb_regs32[GDB_SS] = regs->ss;
-#endif
gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
+#endif
}
/**
@@ -220,8 +226,7 @@ static void kgdb_correct_hw_break(void)
dr7 |= ((breakinfo[breakno].len << 2) |
breakinfo[breakno].type) <<
((breakno << 2) + 16);
- if (breakno >= 0 && breakno <= 3)
- set_debugreg(breakinfo[breakno].addr, breakno);
+ set_debugreg(breakinfo[breakno].addr, breakno);
} else {
if ((dr7 & breakbit) && !breakinfo[breakno].enabled) {
@@ -395,7 +400,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
/* set the trace bit if we're stepping */
if (remcomInBuffer[0] == 's') {
linux_regs->flags |= X86_EFLAGS_TF;
- kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
deleted file mode 100644
index 2a62d843f01..00000000000
--- a/arch/x86/kernel/mfgpt_32.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT)
- *
- * Copyright (C) 2006, Advanced Micro Devices, Inc.
- * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
- */
-
-/*
- * We are using the 32.768kHz input clock - it's the only one that has the
- * ranges we find desirable. The following table lists the suitable
- * divisors and the associated Hz, minimum interval and the maximum interval:
- *
- * Divisor Hz Min Delta (s) Max Delta (s)
- * 1 32768 .00048828125 2.000
- * 2 16384 .0009765625 4.000
- * 4 8192 .001953125 8.000
- * 8 4096 .00390625 16.000
- * 16 2048 .0078125 32.000
- * 32 1024 .015625 64.000
- * 64 512 .03125 128.000
- * 128 256 .0625 256.000
- * 256 128 .125 512.000
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <asm/geode.h>
-
-#define MFGPT_DEFAULT_IRQ 7
-
-static struct mfgpt_timer_t {
- unsigned int avail:1;
-} mfgpt_timers[MFGPT_MAX_TIMERS];
-
-/* Selected from the table above */
-
-#define MFGPT_DIVISOR 16
-#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
-#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
-#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
-
-/* Allow for disabling of MFGPTs */
-static int disable;
-static int __init mfgpt_disable(char *s)
-{
- disable = 1;
- return 1;
-}
-__setup("nomfgpt", mfgpt_disable);
-
-/* Reset the MFGPT timers. This is required by some broken BIOSes which already
- * do the same and leave the system in an unstable state. TinyBIOS 0.98 is
- * affected at least (0.99 is OK with MFGPT workaround left to off).
- */
-static int __init mfgpt_fix(char *s)
-{
- u32 val, dummy;
-
- /* The following udocumented bit resets the MFGPT timers */
- val = 0xFF; dummy = 0;
- wrmsr(MSR_MFGPT_SETUP, val, dummy);
- return 1;
-}
-__setup("mfgptfix", mfgpt_fix);
-
-/*
- * Check whether any MFGPTs are available for the kernel to use. In most
- * cases, firmware that uses AMD's VSA code will claim all timers during
- * bootup; we certainly don't want to take them if they're already in use.
- * In other cases (such as with VSAless OpenFirmware), the system firmware
- * leaves timers available for us to use.
- */
-
-
-static int timers = -1;
-
-static void geode_mfgpt_detect(void)
-{
- int i;
- u16 val;
-
- timers = 0;
-
- if (disable) {
- printk(KERN_INFO "geode-mfgpt: MFGPT support is disabled\n");
- goto done;
- }
-
- if (!geode_get_dev_base(GEODE_DEV_MFGPT)) {
- printk(KERN_INFO "geode-mfgpt: MFGPT LBAR is not set up\n");
- goto done;
- }
-
- for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
- val = geode_mfgpt_read(i, MFGPT_REG_SETUP);
- if (!(val & MFGPT_SETUP_SETUP)) {
- mfgpt_timers[i].avail = 1;
- timers++;
- }
- }
-
-done:
- printk(KERN_INFO "geode-mfgpt: %d MFGPT timers available.\n", timers);
-}
-
-int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
-{
- u32 msr, mask, value, dummy;
- int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
-
- if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
- return -EIO;
-
- /*
- * The register maps for these are described in sections 6.17.1.x of
- * the AMD Geode CS5536 Companion Device Data Book.
- */
- switch (event) {
- case MFGPT_EVENT_RESET:
- /*
- * XXX: According to the docs, we cannot reset timers above
- * 6; that is, resets for 7 and 8 will be ignored. Is this
- * a problem? -dilinger
- */
- msr = MSR_MFGPT_NR;
- mask = 1 << (timer + 24);
- break;
-
- case MFGPT_EVENT_NMI:
- msr = MSR_MFGPT_NR;
- mask = 1 << (timer + shift);
- break;
-
- case MFGPT_EVENT_IRQ:
- msr = MSR_MFGPT_IRQ;
- mask = 1 << (timer + shift);
- break;
-
- default:
- return -EIO;
- }
-
- rdmsr(msr, value, dummy);
-
- if (enable)
- value |= mask;
- else
- value &= ~mask;
-
- wrmsr(msr, value, dummy);
- return 0;
-}
-EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
-
-int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable)
-{
- u32 zsel, lpc, dummy;
- int shift;
-
- if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
- return -EIO;
-
- /*
- * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
- * is using the same CMP of the timer's Siamese twin, the IRQ is set to
- * 2, and we mustn't use nor change it.
- * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
- * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
- * with *irq==0 is safe. Currently there _are_ no 2 drivers.
- */
- rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
- shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer % 4) * 4;
- if (((zsel >> shift) & 0xF) == 2)
- return -EIO;
-
- /* Choose IRQ: if none supplied, keep IRQ already set or use default */
- if (!*irq)
- *irq = (zsel >> shift) & 0xF;
- if (!*irq)
- *irq = MFGPT_DEFAULT_IRQ;
-
- /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
- if (*irq < 1 || *irq == 2 || *irq > 15)
- return -EIO;
- rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
- if (lpc & (1 << *irq))
- return -EIO;
-
- /* All chosen and checked - go for it */
- if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
- return -EIO;
- if (enable) {
- zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
- wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
- }
-
- return 0;
-}
-
-static int mfgpt_get(int timer)
-{
- mfgpt_timers[timer].avail = 0;
- printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer);
- return timer;
-}
-
-int geode_mfgpt_alloc_timer(int timer, int domain)
-{
- int i;
-
- if (timers == -1) {
- /* timers haven't been detected yet */
- geode_mfgpt_detect();
- }
-
- if (!timers)
- return -1;
-
- if (timer >= MFGPT_MAX_TIMERS)
- return -1;
-
- if (timer < 0) {
- /* Try to find an available timer */
- for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
- if (mfgpt_timers[i].avail)
- return mfgpt_get(i);
-
- if (i == 5 && domain == MFGPT_DOMAIN_WORKING)
- break;
- }
- } else {
- /* If they requested a specific timer, try to honor that */
- if (mfgpt_timers[timer].avail)
- return mfgpt_get(timer);
- }
-
- /* No timers available - too bad */
- return -1;
-}
-EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
-
-
-#ifdef CONFIG_GEODE_MFGPT_TIMER
-
-/*
- * The MFPGT timers on the CS5536 provide us with suitable timers to use
- * as clock event sources - not as good as a HPET or APIC, but certainly
- * better than the PIT. This isn't a general purpose MFGPT driver, but
- * a simplified one designed specifically to act as a clock event source.
- * For full details about the MFGPT, please consult the CS5536 data sheet.
- */
-
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-
-static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
-static u16 mfgpt_event_clock;
-
-static int irq;
-static int __init mfgpt_setup(char *str)
-{
- get_option(&str, &irq);
- return 1;
-}
-__setup("mfgpt_irq=", mfgpt_setup);
-
-static void mfgpt_disable_timer(u16 clock)
-{
- /* avoid races by clearing CMP1 and CMP2 unconditionally */
- geode_mfgpt_write(clock, MFGPT_REG_SETUP, (u16) ~MFGPT_SETUP_CNTEN |
- MFGPT_SETUP_CMP1 | MFGPT_SETUP_CMP2);
-}
-
-static int mfgpt_next_event(unsigned long, struct clock_event_device *);
-static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *);
-
-static struct clock_event_device mfgpt_clockevent = {
- .name = "mfgpt-timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = mfgpt_set_mode,
- .set_next_event = mfgpt_next_event,
- .rating = 250,
- .cpumask = cpu_all_mask,
- .shift = 32
-};
-
-static void mfgpt_start_timer(u16 delta)
-{
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta);
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
-
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
- MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
-}
-
-static void mfgpt_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- mfgpt_disable_timer(mfgpt_event_clock);
-
- if (mode == CLOCK_EVT_MODE_PERIODIC)
- mfgpt_start_timer(MFGPT_PERIODIC);
-
- mfgpt_tick_mode = mode;
-}
-
-static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
-{
- mfgpt_start_timer(delta);
- return 0;
-}
-
-static irqreturn_t mfgpt_tick(int irq, void *dev_id)
-{
- u16 val = geode_mfgpt_read(mfgpt_event_clock, MFGPT_REG_SETUP);
-
- /* See if the interrupt was for us */
- if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
- return IRQ_NONE;
-
- /* Turn off the clock (and clear the event) */
- mfgpt_disable_timer(mfgpt_event_clock);
-
- if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
- return IRQ_HANDLED;
-
- /* Clear the counter */
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
-
- /* Restart the clock in periodic mode */
-
- if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) {
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
- MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
- }
-
- mfgpt_clockevent.event_handler(&mfgpt_clockevent);
- return IRQ_HANDLED;
-}
-
-static struct irqaction mfgptirq = {
- .handler = mfgpt_tick,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
- .name = "mfgpt-timer"
-};
-
-int __init mfgpt_timer_setup(void)
-{
- int timer, ret;
- u16 val;
-
- timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
- if (timer < 0) {
- printk(KERN_ERR
- "mfgpt-timer: Could not allocate a MFPGT timer\n");
- return -ENODEV;
- }
-
- mfgpt_event_clock = timer;
-
- /* Set up the IRQ on the MFGPT side */
- if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, &irq)) {
- printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
- return -EIO;
- }
-
- /* And register it with the kernel */
- ret = setup_irq(irq, &mfgptirq);
-
- if (ret) {
- printk(KERN_ERR
- "mfgpt-timer: Unable to set up the interrupt.\n");
- goto err;
- }
-
- /* Set the clock scale and enable the event mode for CMP2 */
- val = MFGPT_SCALE | (3 << 8);
-
- geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
-
- /* Set up the clock event */
- mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
- mfgpt_clockevent.shift);
- mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
- &mfgpt_clockevent);
- mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
- &mfgpt_clockevent);
-
- printk(KERN_INFO
- "mfgpt-timer: Registering MFGPT timer %d as a clock event, using IRQ %d\n",
- timer, irq);
- clockevents_register_device(&mfgpt_clockevent);
-
- return 0;
-
-err:
- geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, &irq);
- printk(KERN_ERR
- "mfgpt-timer: Unable to set up the MFGPT clock source\n");
- return -EIO;
-}
-
-#endif
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 63123d90210..37542b67c57 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -13,6 +13,9 @@
* Licensed under the terms of the GNU General Public
* License version 2. See file COPYING for details.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/firmware.h>
#include <linux/pci_ids.h>
#include <linux/uaccess.h>
@@ -81,7 +84,7 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
memset(csig, 0, sizeof(*csig));
rdmsr(MSR_AMD64_PATCH_LEVEL, csig->rev, dummy);
- pr_info("microcode: CPU%d: patch_level=0x%x\n", cpu, csig->rev);
+ pr_info("CPU%d: patch_level=0x%x\n", cpu, csig->rev);
return 0;
}
@@ -111,8 +114,8 @@ static int get_matching_microcode(int cpu, void *mc, int rev)
/* ucode might be chipset specific -- currently we don't support this */
if (mc_header->nb_dev_id || mc_header->sb_dev_id) {
- pr_err(KERN_ERR "microcode: CPU%d: loading of chipset "
- "specific code not yet supported\n", cpu);
+ pr_err("CPU%d: loading of chipset specific code not yet supported\n",
+ cpu);
return 0;
}
@@ -141,12 +144,12 @@ static int apply_microcode_amd(int cpu)
/* check current patch id and patch's id for match */
if (rev != mc_amd->hdr.patch_id) {
- pr_err("microcode: CPU%d: update failed "
- "(for patch_level=0x%x)\n", cpu, mc_amd->hdr.patch_id);
+ pr_err("CPU%d: update failed (for patch_level=0x%x)\n",
+ cpu, mc_amd->hdr.patch_id);
return -1;
}
- pr_info("microcode: CPU%d: updated (new patch_level=0x%x)\n", cpu, rev);
+ pr_info("CPU%d: updated (new patch_level=0x%x)\n", cpu, rev);
uci->cpu_sig.rev = rev;
return 0;
@@ -169,15 +172,14 @@ get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size)
return NULL;
if (section_hdr[0] != UCODE_UCODE_TYPE) {
- pr_err("microcode: error: invalid type field in "
- "container file section header\n");
+ pr_err("error: invalid type field in container file section header\n");
return NULL;
}
total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8));
if (total_size > size || total_size > UCODE_MAX_SIZE) {
- pr_err("microcode: error: size mismatch\n");
+ pr_err("error: size mismatch\n");
return NULL;
}
@@ -206,14 +208,13 @@ static int install_equiv_cpu_table(const u8 *buf)
size = buf_pos[2];
if (buf_pos[1] != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
- pr_err("microcode: error: invalid type field in "
- "container file section header\n");
+ pr_err("error: invalid type field in container file section header\n");
return 0;
}
equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
if (!equiv_cpu_table) {
- pr_err("microcode: failed to allocate equivalent CPU table\n");
+ pr_err("failed to allocate equivalent CPU table\n");
return 0;
}
@@ -246,7 +247,7 @@ generic_load_microcode(int cpu, const u8 *data, size_t size)
offset = install_equiv_cpu_table(ucode_ptr);
if (!offset) {
- pr_err("microcode: failed to create equivalent cpu table\n");
+ pr_err("failed to create equivalent cpu table\n");
return UCODE_ERROR;
}
@@ -277,8 +278,7 @@ generic_load_microcode(int cpu, const u8 *data, size_t size)
if (!leftover) {
vfree(uci->mc);
uci->mc = new_mc;
- pr_debug("microcode: CPU%d found a matching microcode "
- "update with version 0x%x (current=0x%x)\n",
+ pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
} else {
vfree(new_mc);
@@ -300,7 +300,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
return UCODE_NFOUND;
if (*(u32 *)firmware->data != UCODE_MAGIC) {
- pr_err("microcode: invalid UCODE_MAGIC (0x%08x)\n",
+ pr_err("invalid UCODE_MAGIC (0x%08x)\n",
*(u32 *)firmware->data);
return UCODE_ERROR;
}
@@ -313,8 +313,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
- pr_info("microcode: AMD microcode update via "
- "/dev/cpu/microcode not supported\n");
+ pr_info("AMD microcode update via /dev/cpu/microcode not supported\n");
return UCODE_ERROR;
}
@@ -334,14 +333,13 @@ void init_microcode_amd(struct device *device)
WARN_ON(c->x86_vendor != X86_VENDOR_AMD);
if (c->x86 < 0x10) {
- pr_warning("microcode: AMD CPU family 0x%x not supported\n",
- c->x86);
+ pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
return;
}
supported_cpu = 1;
if (request_firmware(&firmware, fw_name, device))
- pr_err("microcode: failed to load file %s\n", fw_name);
+ pr_err("failed to load file %s\n", fw_name);
}
void fini_microcode_amd(void)
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index e68aae39786..844c02c65fc 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -70,6 +70,9 @@
* Fix sigmatch() macro to handle old CPUs with pf == 0.
* Thanks to Stuart Swales for pointing out this bug.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/capability.h>
@@ -209,7 +212,7 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
ssize_t ret = -EINVAL;
if ((len >> PAGE_SHIFT) > totalram_pages) {
- pr_err("microcode: too much data (max %ld pages)\n", totalram_pages);
+ pr_err("too much data (max %ld pages)\n", totalram_pages);
return ret;
}
@@ -244,7 +247,7 @@ static int __init microcode_dev_init(void)
error = misc_register(&microcode_dev);
if (error) {
- pr_err("microcode: can't misc_register on minor=%d\n", MICROCODE_MINOR);
+ pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
return error;
}
@@ -359,7 +362,7 @@ static enum ucode_state microcode_resume_cpu(int cpu)
if (!uci->mc)
return UCODE_NFOUND;
- pr_debug("microcode: CPU%d updated upon resume\n", cpu);
+ pr_debug("CPU%d updated upon resume\n", cpu);
apply_microcode_on_target(cpu);
return UCODE_OK;
@@ -379,7 +382,7 @@ static enum ucode_state microcode_init_cpu(int cpu)
ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev);
if (ustate == UCODE_OK) {
- pr_debug("microcode: CPU%d updated upon init\n", cpu);
+ pr_debug("CPU%d updated upon init\n", cpu);
apply_microcode_on_target(cpu);
}
@@ -406,7 +409,7 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
if (!cpu_online(cpu))
return 0;
- pr_debug("microcode: CPU%d added\n", cpu);
+ pr_debug("CPU%d added\n", cpu);
err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
if (err)
@@ -425,7 +428,7 @@ static int mc_sysdev_remove(struct sys_device *sys_dev)
if (!cpu_online(cpu))
return 0;
- pr_debug("microcode: CPU%d removed\n", cpu);
+ pr_debug("CPU%d removed\n", cpu);
microcode_fini_cpu(cpu);
sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
return 0;
@@ -473,15 +476,15 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
microcode_update_cpu(cpu);
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- pr_debug("microcode: CPU%d added\n", cpu);
+ pr_debug("CPU%d added\n", cpu);
if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group))
- pr_err("microcode: Failed to create group for CPU%d\n", cpu);
+ pr_err("Failed to create group for CPU%d\n", cpu);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
/* Suspend is in progress, only remove the interface */
sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
- pr_debug("microcode: CPU%d removed\n", cpu);
+ pr_debug("CPU%d removed\n", cpu);
break;
case CPU_DEAD:
case CPU_UP_CANCELED_FROZEN:
@@ -507,7 +510,7 @@ static int __init microcode_init(void)
microcode_ops = init_amd_microcode();
if (!microcode_ops) {
- pr_err("microcode: no support for this CPU vendor\n");
+ pr_err("no support for this CPU vendor\n");
return -ENODEV;
}
@@ -541,8 +544,7 @@ static int __init microcode_init(void)
register_hotcpu_notifier(&mc_cpu_notifier);
pr_info("Microcode Update Driver: v" MICROCODE_VERSION
- " <tigran@aivazian.fsnet.co.uk>,"
- " Peter Oruba\n");
+ " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
return 0;
}
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index 0d334ddd0a9..ebd193e476c 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -70,6 +70,9 @@
* Fix sigmatch() macro to handle old CPUs with pf == 0.
* Thanks to Stuart Swales for pointing out this bug.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/firmware.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
@@ -146,8 +149,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
- printk(KERN_ERR "microcode: CPU%d not a capable Intel "
- "processor\n", cpu_num);
+ pr_err("CPU%d not a capable Intel processor\n", cpu_num);
return -1;
}
@@ -165,8 +167,8 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
/* get the current revision from MSR 0x8B */
rdmsr(MSR_IA32_UCODE_REV, val[0], csig->rev);
- printk(KERN_INFO "microcode: CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
- cpu_num, csig->sig, csig->pf, csig->rev);
+ pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
+ cpu_num, csig->sig, csig->pf, csig->rev);
return 0;
}
@@ -194,28 +196,24 @@ static int microcode_sanity_check(void *mc)
data_size = get_datasize(mc_header);
if (data_size + MC_HEADER_SIZE > total_size) {
- printk(KERN_ERR "microcode: error! "
- "Bad data size in microcode data file\n");
+ pr_err("error! Bad data size in microcode data file\n");
return -EINVAL;
}
if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
- printk(KERN_ERR "microcode: error! "
- "Unknown microcode update format\n");
+ pr_err("error! Unknown microcode update format\n");
return -EINVAL;
}
ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
if (ext_table_size) {
if ((ext_table_size < EXT_HEADER_SIZE)
|| ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
- printk(KERN_ERR "microcode: error! "
- "Small exttable size in microcode data file\n");
+ pr_err("error! Small exttable size in microcode data file\n");
return -EINVAL;
}
ext_header = mc + MC_HEADER_SIZE + data_size;
if (ext_table_size != exttable_size(ext_header)) {
- printk(KERN_ERR "microcode: error! "
- "Bad exttable size in microcode data file\n");
+ pr_err("error! Bad exttable size in microcode data file\n");
return -EFAULT;
}
ext_sigcount = ext_header->count;
@@ -230,8 +228,7 @@ static int microcode_sanity_check(void *mc)
while (i--)
ext_table_sum += ext_tablep[i];
if (ext_table_sum) {
- printk(KERN_WARNING "microcode: aborting, "
- "bad extended signature table checksum\n");
+ pr_warning("aborting, bad extended signature table checksum\n");
return -EINVAL;
}
}
@@ -242,7 +239,7 @@ static int microcode_sanity_check(void *mc)
while (i--)
orig_sum += ((int *)mc)[i];
if (orig_sum) {
- printk(KERN_ERR "microcode: aborting, bad checksum\n");
+ pr_err("aborting, bad checksum\n");
return -EINVAL;
}
if (!ext_table_size)
@@ -255,7 +252,7 @@ static int microcode_sanity_check(void *mc)
- (mc_header->sig + mc_header->pf + mc_header->cksum)
+ (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
if (sum) {
- printk(KERN_ERR "microcode: aborting, bad checksum\n");
+ pr_err("aborting, bad checksum\n");
return -EINVAL;
}
}
@@ -327,13 +324,11 @@ static int apply_microcode(int cpu)
rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
if (val[1] != mc_intel->hdr.rev) {
- printk(KERN_ERR "microcode: CPU%d update "
- "to revision 0x%x failed\n",
- cpu_num, mc_intel->hdr.rev);
+ pr_err("CPU%d update to revision 0x%x failed\n",
+ cpu_num, mc_intel->hdr.rev);
return -1;
}
- printk(KERN_INFO "microcode: CPU%d updated to revision "
- "0x%x, date = %04x-%02x-%02x \n",
+ pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x \n",
cpu_num, val[1],
mc_intel->hdr.date & 0xffff,
mc_intel->hdr.date >> 24,
@@ -362,8 +357,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
mc_size = get_totalsize(&mc_header);
if (!mc_size || mc_size > leftover) {
- printk(KERN_ERR "microcode: error!"
- "Bad data in microcode data file\n");
+ pr_err("error! Bad data in microcode data file\n");
break;
}
@@ -405,9 +399,8 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
- pr_debug("microcode: CPU%d found a matching microcode update with"
- " version 0x%x (current=0x%x)\n",
- cpu, new_rev, uci->cpu_sig.rev);
+ pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
+ cpu, new_rev, uci->cpu_sig.rev);
out:
return state;
}
@@ -429,7 +422,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
c->x86, c->x86_model, c->x86_mask);
if (request_firmware(&firmware, name, device)) {
- pr_debug("microcode: data file %s load failed\n", name);
+ pr_debug("data file %s load failed\n", name);
return UCODE_NFOUND;
}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 35a57c963df..40b54ceb68b 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -945,9 +945,6 @@ void __init early_reserve_e820_mpc_new(void)
{
if (enable_update_mptable && alloc_mptable) {
u64 startt = 0;
-#ifdef CONFIG_X86_TRAMPOLINE
- startt = TRAMPOLINE_BASE;
-#endif
mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
}
}
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 553449951b8..4bd93c9b2b2 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -172,11 +172,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
static int msr_open(struct inode *inode, struct file *file)
{
- unsigned int cpu = iminor(file->f_path.dentry->d_inode);
- struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned int cpu;
+ struct cpuinfo_x86 *c;
cpu = iminor(file->f_path.dentry->d_inode);
-
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
return -ENXIO; /* No such CPU */
@@ -247,7 +246,7 @@ static int __init msr_init(void)
int i, err = 0;
i = 0;
- if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
+ if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
printk(KERN_ERR "msr: unable to get major %d for msr\n",
MSR_MAJOR);
err = -EBUSY;
@@ -275,7 +274,7 @@ out_class:
msr_device_destroy(i);
class_destroy(msr_class);
out_chrdev:
- unregister_chrdev(MSR_MAJOR, "cpu/msr");
+ __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
out:
return err;
}
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
index 4006c522adc..9d1d263f786 100644
--- a/arch/x86/kernel/olpc.c
+++ b/arch/x86/kernel/olpc.c
@@ -212,7 +212,7 @@ static int __init olpc_init(void)
unsigned char *romsig;
/* The ioremap check is dangerous; limit what we run it on */
- if (!is_geode() || geode_has_vsa2())
+ if (!is_geode() || cs5535_has_vsa2())
return 0;
spin_lock_init(&ec_lock);
@@ -244,7 +244,7 @@ static int __init olpc_init(void)
(unsigned char *) &olpc_platform_info.ecver, 1);
/* check to see if the VSA exists */
- if (geode_has_vsa2())
+ if (cs5535_has_vsa2())
olpc_platform_info.flags |= OLPC_F_VSA;
printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 3a7c5a44082..676b8c77a97 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -8,9 +8,9 @@
#include <asm/paravirt.h>
static inline void
-default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
struct pv_lock_ops pv_lock_ops = {
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index c563e4c8ff3..2bbde607814 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -31,7 +31,7 @@
#include <linux/string.h>
#include <linux/crash_dump.h>
#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -212,7 +212,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
spin_lock_irqsave(&tbl->it_lock, flags);
- iommu_area_reserve(tbl->it_map, index, npages);
+ bitmap_set(tbl->it_map, index, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
@@ -303,7 +303,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_lock_irqsave(&tbl->it_lock, flags);
- iommu_area_free(tbl->it_map, entry, npages);
+ bitmap_clear(tbl->it_map, entry, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index afcc58b69c7..75e14e21f61 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -124,8 +124,8 @@ void __init pci_iommu_alloc(void)
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem();
#endif
- if (pci_swiotlb_init())
- return;
+ if (pci_swiotlb_detect())
+ goto out;
gart_iommu_hole_init();
@@ -135,6 +135,8 @@ void __init pci_iommu_alloc(void)
/* needs to be called after gart_iommu_hole_init */
amd_iommu_detect();
+out:
+ pci_swiotlb_init();
}
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index e6a0d402f17..34de53b46f8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
@@ -126,7 +126,7 @@ static void free_iommu(unsigned long offset, int size)
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
- iommu_area_free(iommu_gart_bitmap, offset, size);
+ bitmap_clear(iommu_gart_bitmap, offset, size);
if (offset >= next_bit)
next_bit = offset + size;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@@ -710,7 +710,8 @@ static void gart_iommu_shutdown(void)
struct pci_dev *dev;
int i;
- if (no_agp)
+ /* don't shutdown it if there is AGP installed */
+ if (!no_agp)
return;
for (i = 0; i < num_k8_northbridges; i++) {
@@ -791,7 +792,7 @@ int __init gart_iommu_init(void)
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
- iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+ bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size >> 20);
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index e3c0a66b9e7..7d2829dde20 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -43,12 +43,12 @@ static struct dma_map_ops swiotlb_dma_ops = {
};
/*
- * pci_swiotlb_init - initialize swiotlb if necessary
+ * pci_swiotlb_detect - set swiotlb to 1 if necessary
*
* This returns non-zero if we are forced to use swiotlb (by the boot
* option).
*/
-int __init pci_swiotlb_init(void)
+int __init pci_swiotlb_detect(void)
{
int use_swiotlb = swiotlb | swiotlb_force;
@@ -60,10 +60,13 @@ int __init pci_swiotlb_init(void)
if (swiotlb_force)
swiotlb = 1;
+ return use_swiotlb;
+}
+
+void __init pci_swiotlb_init(void)
+{
if (swiotlb) {
swiotlb_init(0);
dma_ops = &swiotlb_dma_ops;
}
-
- return use_swiotlb;
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 5e2ba634ea1..98c2cdeb599 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -10,6 +10,8 @@
#include <linux/clockchips.h>
#include <linux/random.h>
#include <linux/user-return-notifier.h>
+#include <linux/dmi.h>
+#include <linux/utsname.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
#include <asm/system.h>
@@ -90,6 +92,25 @@ void exit_thread(void)
}
}
+void show_regs_common(void)
+{
+ const char *board, *product;
+
+ board = dmi_get_system_info(DMI_BOARD_NAME);
+ if (!board)
+ board = "";
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!product)
+ product = "";
+
+ printk("\n");
+ printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
+ current->pid, current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version, board, product);
+}
+
void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -234,6 +255,76 @@ int sys_vfork(struct pt_regs *regs)
NULL, NULL);
}
+long
+sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
+{
+ if (!newsp)
+ newsp = regs->sp;
+ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
+}
+
+/*
+ * This gets run with %si containing the
+ * function to call, and %di containing
+ * the "args".
+ */
+extern void kernel_thread_helper(void);
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+
+ regs.si = (unsigned long) fn;
+ regs.di = (unsigned long) arg;
+
+#ifdef CONFIG_X86_32
+ regs.ds = __USER_DS;
+ regs.es = __USER_DS;
+ regs.fs = __KERNEL_PERCPU;
+ regs.gs = __KERNEL_STACK_CANARY;
+#endif
+
+ regs.orig_ax = -1;
+ regs.ip = (unsigned long) kernel_thread_helper;
+ regs.cs = __KERNEL_CS | get_kernel_rpl();
+ regs.flags = X86_EFLAGS_IF | 0x2;
+
+ /* Ok, create the new process.. */
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+/*
+ * sys_execve() executes a new program.
+ */
+long sys_execve(char __user *name, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs)
+{
+ long error;
+ char *filename;
+
+ filename = getname(name);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ return error;
+ error = do_execve(filename, argv, envp, regs);
+
+#ifdef CONFIG_X86_32
+ if (error == 0) {
+ /* Make sure we don't return using sysenter.. */
+ set_thread_flag(TIF_IRET);
+ }
+#endif
+
+ putname(filename);
+ return error;
+}
/*
* Idle related variables and functions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 075580b3568..9c517b5858f 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <linux/user.h>
#include <linux/interrupt.h>
-#include <linux/utsname.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
@@ -35,7 +34,6 @@
#include <linux/tick.h>
#include <linux/percpu.h>
#include <linux/prctl.h>
-#include <linux/dmi.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -128,7 +126,6 @@ void __show_regs(struct pt_regs *regs, int all)
unsigned long d0, d1, d2, d3, d6, d7;
unsigned long sp;
unsigned short ss, gs;
- const char *board;
if (user_mode_vm(regs)) {
sp = regs->sp;
@@ -140,16 +137,7 @@ void __show_regs(struct pt_regs *regs, int all)
savesegment(gs, gs);
}
- printk("\n");
-
- board = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (!board)
- board = "";
- printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
- task_pid_nr(current), current->comm,
- print_tainted(), init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version, board);
+ show_regs_common();
printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
(u16)regs->cs, regs->ip, regs->flags,
@@ -192,39 +180,6 @@ void show_regs(struct pt_regs *regs)
show_trace(NULL, regs, &regs->sp, regs->bp);
}
-/*
- * This gets run with %bx containing the
- * function to call, and %dx containing
- * the "args".
- */
-extern void kernel_thread_helper(void);
-
-/*
- * Create a kernel thread
- */
-int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(&regs, 0, sizeof(regs));
-
- regs.bx = (unsigned long) fn;
- regs.dx = (unsigned long) arg;
-
- regs.ds = __USER_DS;
- regs.es = __USER_DS;
- regs.fs = __KERNEL_PERCPU;
- regs.gs = __KERNEL_STACK_CANARY;
- regs.orig_ax = -1;
- regs.ip = (unsigned long) kernel_thread_helper;
- regs.cs = __KERNEL_CS | get_kernel_rpl();
- regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
-
- /* Ok, create the new process.. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-}
-EXPORT_SYMBOL(kernel_thread);
-
void release_thread(struct task_struct *dead_task)
{
BUG_ON(dead_task->mm);
@@ -436,46 +391,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
return prev_p;
}
-int sys_clone(struct pt_regs *regs)
-{
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
-
- clone_flags = regs->bx;
- newsp = regs->cx;
- parent_tidptr = (int __user *)regs->dx;
- child_tidptr = (int __user *)regs->di;
- if (!newsp)
- newsp = regs->sp;
- return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-int sys_execve(struct pt_regs *regs)
-{
- int error;
- char *filename;
-
- filename = getname((char __user *) regs->bx);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename,
- (char __user * __user *) regs->cx,
- (char __user * __user *) regs->dx,
- regs);
- if (error == 0) {
- /* Make sure we don't return using sysenter.. */
- set_thread_flag(TIF_IRET);
- }
- putname(filename);
-out:
- return error;
-}
-
#define top_esp (THREAD_SIZE - sizeof(unsigned long))
#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index c95c8f4e790..52fbd0c6019 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -26,7 +26,6 @@
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/interrupt.h>
-#include <linux/utsname.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/ptrace.h>
@@ -38,7 +37,6 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/ftrace.h>
-#include <linux/dmi.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -59,8 +57,6 @@ asmlinkage extern void ret_from_fork(void);
DEFINE_PER_CPU(unsigned long, old_rsp);
static DEFINE_PER_CPU(unsigned char, is_idle);
-unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
-
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
void idle_notifier_register(struct notifier_block *n)
@@ -163,18 +159,8 @@ void __show_regs(struct pt_regs *regs, int all)
unsigned long d0, d1, d2, d3, d6, d7;
unsigned int fsindex, gsindex;
unsigned int ds, cs, es;
- const char *board;
-
- printk("\n");
- print_modules();
- board = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (!board)
- board = "";
- printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
- current->pid, current->comm, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version, board);
+
+ show_regs_common();
printk(KERN_INFO "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
printk_address(regs->ip, 1);
printk(KERN_INFO "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
@@ -285,8 +271,9 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
*childregs = *regs;
childregs->ax = 0;
- childregs->sp = sp;
- if (sp == ~0UL)
+ if (user_mode(regs))
+ childregs->sp = sp;
+ else
childregs->sp = (unsigned long)childregs;
p->thread.sp = (unsigned long) childregs;
@@ -520,25 +507,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
return prev_p;
}
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage
-long sys_execve(char __user *name, char __user * __user *argv,
- char __user * __user *envp, struct pt_regs *regs)
-{
- long error;
- char *filename;
-
- filename = getname(name);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- return error;
- error = do_execve(filename, argv, envp, regs);
- putname(filename);
- return error;
-}
-
void set_personality_64bit(void)
{
/* inherit personality from parent */
@@ -553,15 +521,6 @@ void set_personality_64bit(void)
current->personality &= ~READ_IMPLIES_EXEC;
}
-asmlinkage long
-sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
-{
- if (!newsp)
- newsp = regs->sp;
- return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
-}
-
unsigned long get_wchan(struct task_struct *p)
{
unsigned long stack;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 04d182a7cfd..017d937639f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -509,14 +509,14 @@ static int genregs_get(struct task_struct *target,
{
if (kbuf) {
unsigned long *k = kbuf;
- while (count > 0) {
+ while (count >= sizeof(*k)) {
*k++ = getreg(target, pos);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
unsigned long __user *u = ubuf;
- while (count > 0) {
+ while (count >= sizeof(*u)) {
if (__put_user(getreg(target, pos), u++))
return -EFAULT;
count -= sizeof(*u);
@@ -535,14 +535,14 @@ static int genregs_set(struct task_struct *target,
int ret = 0;
if (kbuf) {
const unsigned long *k = kbuf;
- while (count > 0 && !ret) {
+ while (count >= sizeof(*k) && !ret) {
ret = putreg(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const unsigned long __user *u = ubuf;
- while (count > 0 && !ret) {
+ while (count >= sizeof(*u) && !ret) {
unsigned long word;
ret = __get_user(word, u++);
if (ret)
@@ -555,7 +555,9 @@ static int genregs_set(struct task_struct *target,
return ret;
}
-static void ptrace_triggered(struct perf_event *bp, void *data)
+static void ptrace_triggered(struct perf_event *bp, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
int i;
struct thread_struct *thread = &(current->thread);
@@ -593,13 +595,13 @@ static unsigned long ptrace_get_dr7(struct perf_event *bp[])
return dr7;
}
-static struct perf_event *
+static int
ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
struct task_struct *tsk, int disabled)
{
int err;
int gen_len, gen_type;
- DEFINE_BREAKPOINT_ATTR(attr);
+ struct perf_event_attr attr;
/*
* We shoud have at least an inactive breakpoint at this
@@ -607,18 +609,18 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
* written the address register first
*/
if (!bp)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
if (err)
- return ERR_PTR(err);
+ return err;
attr = bp->attr;
attr.bp_len = gen_len;
attr.bp_type = gen_type;
attr.disabled = disabled;
- return modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk);
+ return modify_user_hw_breakpoint(bp, &attr);
}
/*
@@ -656,28 +658,17 @@ restore:
if (!second_pass)
continue;
- thread->ptrace_bps[i] = NULL;
- bp = ptrace_modify_breakpoint(bp, len, type,
+ rc = ptrace_modify_breakpoint(bp, len, type,
tsk, 1);
- if (IS_ERR(bp)) {
- rc = PTR_ERR(bp);
- thread->ptrace_bps[i] = NULL;
+ if (rc)
break;
- }
- thread->ptrace_bps[i] = bp;
}
continue;
}
- bp = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
-
- /* Incorrect bp, or we have a bug in bp API */
- if (IS_ERR(bp)) {
- rc = PTR_ERR(bp);
- thread->ptrace_bps[i] = NULL;
+ rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
+ if (rc)
break;
- }
- thread->ptrace_bps[i] = bp;
}
/*
* Make a second pass to free the remaining unused breakpoints
@@ -721,9 +712,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
{
struct perf_event *bp;
struct thread_struct *t = &tsk->thread;
- DEFINE_BREAKPOINT_ATTR(attr);
+ struct perf_event_attr attr;
if (!t->ptrace_bps[nr]) {
+ hw_breakpoint_init(&attr);
/*
* Put stub len and type to register (reserve) an inactive but
* correct bp
@@ -734,26 +726,32 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
attr.disabled = 1;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
+
+ /*
+ * CHECKME: the previous code returned -EIO if the addr wasn't
+ * a valid task virtual addr. The new one will return -EINVAL in
+ * this case.
+ * -EINVAL may be what we want for in-kernel breakpoints users,
+ * but -EIO looks better for ptrace, since we refuse a register
+ * writing for the user. And anyway this is the previous
+ * behaviour.
+ */
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ t->ptrace_bps[nr] = bp;
} else {
+ int err;
+
bp = t->ptrace_bps[nr];
- t->ptrace_bps[nr] = NULL;
attr = bp->attr;
attr.bp_addr = addr;
- bp = modify_user_hw_breakpoint(bp, &attr, bp->callback, tsk);
+ err = modify_user_hw_breakpoint(bp, &attr);
+ if (err)
+ return err;
}
- /*
- * CHECKME: the previous code returned -EIO if the addr wasn't a
- * valid task virtual addr. The new one will return -EINVAL in this
- * case.
- * -EINVAL may be what we want for in-kernel breakpoints users, but
- * -EIO looks better for ptrace, since we refuse a register writing
- * for the user. And anyway this is the previous behaviour.
- */
- if (IS_ERR(bp))
- return PTR_ERR(bp);
- t->ptrace_bps[nr] = bp;
return 0;
}
@@ -1460,14 +1458,14 @@ static int genregs32_get(struct task_struct *target,
{
if (kbuf) {
compat_ulong_t *k = kbuf;
- while (count > 0) {
+ while (count >= sizeof(*k)) {
getreg32(target, pos, k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
compat_ulong_t __user *u = ubuf;
- while (count > 0) {
+ while (count >= sizeof(*u)) {
compat_ulong_t word;
getreg32(target, pos, &word);
if (__put_user(word, u++))
@@ -1488,14 +1486,14 @@ static int genregs32_set(struct task_struct *target,
int ret = 0;
if (kbuf) {
const compat_ulong_t *k = kbuf;
- while (count > 0 && !ret) {
+ while (count >= sizeof(*k) && !ret) {
ret = putreg32(target, pos, *k++);
count -= sizeof(*k);
pos += sizeof(*k);
}
} else {
const compat_ulong_t __user *u = ubuf;
- while (count > 0 && !ret) {
+ while (count >= sizeof(*u) && !ret) {
compat_ulong_t word;
ret = __get_user(word, u++);
if (ret)
@@ -1678,21 +1676,33 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
}
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
- int error_code, int si_code)
+static void fill_sigtrap_info(struct task_struct *tsk,
+ struct pt_regs *regs,
+ int error_code, int si_code,
+ struct siginfo *info)
{
- struct siginfo info;
-
tsk->thread.trap_no = 1;
tsk->thread.error_code = error_code;
- memset(&info, 0, sizeof(info));
- info.si_signo = SIGTRAP;
- info.si_code = si_code;
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+ info->si_code = si_code;
+ info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
+}
+
+void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs,
+ struct siginfo *info)
+{
+ fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
+}
- /* User-mode ip? */
- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code, int si_code)
+{
+ struct siginfo info;
+ fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
/* Send us the fake SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
@@ -1757,29 +1767,22 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
asmregparm void syscall_trace_leave(struct pt_regs *regs)
{
+ bool step;
+
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->ax);
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
-
/*
* If TIF_SYSCALL_EMU is set, we only get here because of
* TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
* We already reported this syscall instruction in
- * syscall_trace_enter(), so don't do any more now.
- */
- if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
- return;
-
- /*
- * If we are single-stepping, synthesize a trap to follow the
- * system call instruction.
+ * syscall_trace_enter().
*/
- if (test_thread_flag(TIF_SINGLESTEP) &&
- tracehook_consider_fatal_signal(current, SIGTRAP))
- send_sigtrap(current, regs, 0, TRAP_BRKPT);
+ step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
+ !test_thread_flag(TIF_SYSCALL_EMU);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2b97fc5b124..1545bc0c984 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -259,6 +259,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
},
},
+ { /* Handle problems with rebooting on ASUS P4S800 */
+ .callback = set_bios_reboot,
+ .ident = "ASUS P4S800",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
+ },
+ },
{ }
};
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index 201eab63b05..fda313ebbb0 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <asm/reboot_fixups.h>
#include <asm/msr.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
static void cs5530a_warm_reset(struct pci_dev *dev)
{
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 946a311a25c..f7b8b9894b2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -73,6 +73,7 @@
#include <asm/mtrr.h>
#include <asm/apic.h>
+#include <asm/trampoline.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
@@ -875,6 +876,13 @@ void __init setup_arch(char **cmdline_p)
reserve_brk();
+ /*
+ * Find and reserve possible boot-time SMP configuration:
+ */
+ find_smp_config();
+
+ reserve_trampoline_memory();
+
#ifdef CONFIG_ACPI_SLEEP
/*
* Reserve low memory region for sleep support.
@@ -921,11 +929,6 @@ void __init setup_arch(char **cmdline_p)
early_acpi_boot_init();
- /*
- * Find and reserve possible boot-time SMP configuration:
- */
- find_smp_config();
-
#ifdef CONFIG_ACPI_NUMA
/*
* Parse SRAT to discover nodes.
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index d559af913e1..35abcb8b00e 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -20,9 +22,9 @@
#include <asm/stackprotector.h>
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-# define DBG(x...) printk(KERN_DEBUG x)
+# define DBG(fmt, ...) pr_dbg(fmt, ##__VA_ARGS__)
#else
-# define DBG(x...)
+# define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0)
#endif
DEFINE_PER_CPU(int, cpu_number);
@@ -116,8 +118,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
} else {
ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
size, align, goal);
- pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
- "%016lx\n", cpu, size, node, __pa(ptr));
+ pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
+ cpu, size, node, __pa(ptr));
}
return ptr;
#else
@@ -198,8 +200,7 @@ void __init setup_per_cpu_areas(void)
pcpu_cpu_distance,
pcpu_fc_alloc, pcpu_fc_free);
if (rc < 0)
- pr_warning("PERCPU: %s allocator failed (%d), "
- "falling back to page size\n",
+ pr_warning("%s allocator failed (%d), falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 74fe6d86dc5..4fd173cd8e5 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -545,22 +545,12 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
}
#endif /* CONFIG_X86_32 */
-#ifdef CONFIG_X86_32
-int sys_sigaltstack(struct pt_regs *regs)
-{
- const stack_t __user *uss = (const stack_t __user *)regs->bx;
- stack_t __user *uoss = (stack_t __user *)regs->cx;
-
- return do_sigaltstack(uss, uoss, regs->sp);
-}
-#else /* !CONFIG_X86_32 */
-asmlinkage long
+long
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
struct pt_regs *regs)
{
return do_sigaltstack(uss, uoss, regs->sp);
}
-#endif /* CONFIG_X86_32 */
/*
* Do a signal return; undo the signal stack.
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 29e6744f51e..678d0b8c26f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -671,6 +671,26 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
complete(&c_idle->done);
}
+/* reduce the number of lines printed when booting a large cpu count system */
+static void __cpuinit announce_cpu(int cpu, int apicid)
+{
+ static int current_node = -1;
+ int node = cpu_to_node(cpu);
+
+ if (system_state == SYSTEM_BOOTING) {
+ if (node != current_node) {
+ if (current_node > (-1))
+ pr_cont(" Ok.\n");
+ current_node = node;
+ pr_info("Booting Node %3d, Processors ", node);
+ }
+ pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
+ return;
+ } else
+ pr_info("Booting Node %d Processor %d APIC 0x%x\n",
+ node, cpu, apicid);
+}
+
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -737,9 +757,8 @@ do_rest:
/* start_ip had better be page-aligned! */
start_ip = setup_trampoline();
- /* So we see what's up */
- printk(KERN_INFO "Booting processor %d APIC 0x%x ip 0x%lx\n",
- cpu, apicid, start_ip);
+ /* So we see what's up */
+ announce_cpu(cpu, apicid);
/*
* This grunge runs the startup process for
@@ -788,21 +807,17 @@ do_rest:
udelay(100);
}
- if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
- /* number CPUs logically, starting from 1 (BSP is 0) */
- pr_debug("OK.\n");
- printk(KERN_INFO "CPU%d: ", cpu);
- print_cpu_info(&cpu_data(cpu));
- pr_debug("CPU has booted.\n");
- } else {
+ if (cpumask_test_cpu(cpu, cpu_callin_mask))
+ pr_debug("CPU%d: has booted.\n", cpu);
+ else {
boot_error = 1;
if (*((volatile unsigned char *)trampoline_base)
== 0xA5)
/* trampoline started but...? */
- printk(KERN_ERR "Stuck ??\n");
+ pr_err("CPU%d: Stuck ??\n", cpu);
else
/* trampoline code not run */
- printk(KERN_ERR "Not responding.\n");
+ pr_err("CPU%d: Not responding.\n", cpu);
if (apic->inquire_remote_apic)
apic->inquire_remote_apic(apicid);
}
@@ -1293,14 +1308,16 @@ void native_cpu_die(unsigned int cpu)
for (i = 0; i < 10; i++) {
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
- printk(KERN_INFO "CPU %d is now offline\n", cpu);
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+
if (1 == num_online_cpus())
alternatives_smp_switch(0);
return;
}
msleep(100);
}
- printk(KERN_ERR "CPU %u didn't die...\n", cpu);
+ pr_err("CPU %u didn't die...\n", cpu);
}
void play_dead_common(void)
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index c3eb207181f..922eefbb3f6 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -53,17 +53,19 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable)
}
static const struct stacktrace_ops save_stack_ops = {
- .warning = save_stack_warning,
- .warning_symbol = save_stack_warning_symbol,
- .stack = save_stack_stack,
- .address = save_stack_address,
+ .warning = save_stack_warning,
+ .warning_symbol = save_stack_warning_symbol,
+ .stack = save_stack_stack,
+ .address = save_stack_address,
+ .walk_stack = print_context_stack,
};
static const struct stacktrace_ops save_stack_ops_nosched = {
- .warning = save_stack_warning,
- .warning_symbol = save_stack_warning_symbol,
- .stack = save_stack_stack,
- .address = save_stack_address_nosched,
+ .warning = save_stack_warning,
+ .warning_symbol = save_stack_warning_symbol,
+ .stack = save_stack_stack,
+ .address = save_stack_address_nosched,
+ .walk_stack = print_context_stack,
};
/*
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 1884a8d12bf..dee1ff7cba5 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -24,31 +24,6 @@
#include <asm/syscalls.h>
-asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file *file = NULL;
- struct mm_struct *mm = current->mm;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
/*
* Perform the select(nd, in, out, ex, tv) and mmap() system
* calls. Linux/i386 didn't use to be able to handle more than
@@ -77,7 +52,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
if (a.offset & ~PAGE_MASK)
goto out;
- err = sys_mmap2(a.addr, a.len, a.prot, a.flags,
+ err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
a.fd, a.offset >> PAGE_SHIFT);
out:
return err;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 45e00eb09c3..8aa2057efd1 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -23,26 +23,11 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, fd, unsigned long, off)
{
long error;
- struct file *file;
-
error = -EINVAL;
if (off & ~PAGE_MASK)
goto out;
- error = -EBADF;
- file = NULL;
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
+ error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
out:
return error;
}
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 70c2125d55b..15228b5d3eb 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
.long sys_ni_syscall /* reserved for streams2 */
.long ptregs_vfork /* 190 */
.long sys_getrlimit
- .long sys_mmap2
+ .long sys_mmap_pgoff
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
index cd022121cab..c652ef62742 100644
--- a/arch/x86/kernel/trampoline.c
+++ b/arch/x86/kernel/trampoline.c
@@ -12,21 +12,19 @@
#endif
/* ready for x86_64 and x86 */
-unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
+unsigned char *__trampinitdata trampoline_base;
void __init reserve_trampoline_memory(void)
{
-#ifdef CONFIG_X86_32
- /*
- * But first pinch a few for the stack/trampoline stuff
- * FIXME: Don't need the extra page at 4K, but need to fix
- * trampoline before removing it. (see the GDT stuff)
- */
- reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
-#endif
+ unsigned long mem;
+
/* Has to be in very low memory so we can execute real-mode AP code. */
- reserve_early(TRAMPOLINE_BASE, TRAMPOLINE_BASE + TRAMPOLINE_SIZE,
- "TRAMPOLINE");
+ mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE);
+ if (mem == -1L)
+ panic("Cannot allocate trampoline\n");
+
+ trampoline_base = __va(mem);
+ reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE");
}
/*
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index cd982f48e23..597683aa5ba 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -763,6 +763,7 @@ void mark_tsc_unstable(char *reason)
{
if (!tsc_unstable) {
tsc_unstable = 1;
+ sched_clock_stable = 0;
printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult)
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index eed156851f5..0aa5fed8b9e 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count;
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
-static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp;
@@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void)
* previous TSC that was measured (possibly on
* another CPU) and update the previous TSC timestamp.
*/
- __raw_spin_lock(&sync_lock);
+ arch_spin_lock(&sync_lock);
prev = last_tsc;
rdtsc_barrier();
now = get_cycles();
rdtsc_barrier();
last_tsc = now;
- __raw_spin_unlock(&sync_lock);
+ arch_spin_unlock(&sync_lock);
/*
* Be nice every now and then (and also check whether
@@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void)
* we saw a time-warp of the TSC going backwards:
*/
if (unlikely(prev > now)) {
- __raw_spin_lock(&sync_lock);
+ arch_spin_lock(&sync_lock);
max_warp = max(max_warp, prev - now);
nr_warps++;
- __raw_spin_unlock(&sync_lock);
+ arch_spin_unlock(&sync_lock);
}
}
WARN(!(now-start),
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index 61d805df4c9..ece73d8e324 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -215,8 +215,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
unsigned long mmr_offset;
unsigned mmr_pnode;
- dest = set_desc_affinity(desc, mask);
- if (dest == BAD_APICID)
+ if (set_desc_affinity(desc, mask, &dest))
return -1;
mmr_value = 0;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 9c4e6253905..5ffb5622f79 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -197,9 +197,8 @@ out:
static int do_vm86_irq_handling(int subfunction, int irqnumber);
static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
-int sys_vm86old(struct pt_regs *regs)
+int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
{
- struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs->bx;
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
* This remains on the stack until we
@@ -227,7 +226,7 @@ out:
}
-int sys_vm86(struct pt_regs *regs)
+int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
{
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
@@ -239,12 +238,12 @@ int sys_vm86(struct pt_regs *regs)
struct vm86plus_struct __user *v86;
tsk = current;
- switch (regs->bx) {
+ switch (cmd) {
case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ:
case VM86_GET_IRQ_BITS:
case VM86_GET_AND_RESET_IRQ:
- ret = do_vm86_irq_handling(regs->bx, (int)regs->cx);
+ ret = do_vm86_irq_handling(cmd, (int)arg);
goto out;
case VM86_PLUS_INSTALL_CHECK:
/*
@@ -261,7 +260,7 @@ int sys_vm86(struct pt_regs *regs)
ret = -EPERM;
if (tsk->thread.saved_sp0)
goto out;
- v86 = (struct vm86plus_struct __user *)regs->cx;
+ v86 = (struct vm86plus_struct __user *)arg;
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
offsetof(struct kernel_vm86_struct, regs32) -
sizeof(info.regs));
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index f3f2104408d..f92a0da608c 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -319,9 +319,7 @@ SECTIONS
__brk_limit = .;
}
- .end : AT(ADDR(.end) - LOAD_OFFSET) {
- _end = .;
- }
+ _end = .;
STABS_DEBUG
DWARF_DEBUG
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index a1029769b6f..619f7f88b8c 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -17,8 +17,6 @@
EXPORT_SYMBOL(mcount);
#endif
-EXPORT_SYMBOL(kernel_thread);
-
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
@@ -56,4 +54,6 @@ EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(init_level4_pgt);
-EXPORT_SYMBOL(load_gs_index);
+#ifndef CONFIG_PARAVIRT
+EXPORT_SYMBOL(native_load_gs_index);
+#endif
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index fab7440c9bb..296aba49472 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -29,6 +29,8 @@
* Based on QEMU and Xen.
*/
+#define pr_fmt(fmt) "pit: " fmt
+
#include <linux/kvm_host.h>
#include "irq.h"
@@ -262,7 +264,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
static void destroy_pit_timer(struct kvm_timer *pt)
{
- pr_debug("pit: execute del timer!\n");
+ pr_debug("execute del timer!\n");
hrtimer_cancel(&pt->timer);
}
@@ -284,7 +286,7 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
- pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
+ pr_debug("create pit timer, interval is %llu nsec\n", interval);
/* TODO The new value only affected after the retriggered */
hrtimer_cancel(&pt->timer);
@@ -309,7 +311,7 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
WARN_ON(!mutex_is_locked(&ps->lock));
- pr_debug("pit: load_count val is %d, channel is %d\n", val, channel);
+ pr_debug("load_count val is %d, channel is %d\n", val, channel);
/*
* The largest possible initial count is 0; this is equivalent
@@ -395,8 +397,8 @@ static int pit_ioport_write(struct kvm_io_device *this,
mutex_lock(&pit_state->lock);
if (val != 0)
- pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n",
- (unsigned int)addr, len, val);
+ pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
+ (unsigned int)addr, len, val);
if (addr == 3) {
channel = val >> 6;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3de0b37ec03..1d9b33843c8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
static int svm_hardware_enable(void *garbage)
{
- struct svm_cpu_data *svm_data;
+ struct svm_cpu_data *sd;
uint64_t efer;
struct descriptor_table gdt_descr;
struct desc_struct *gdt;
@@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
me);
return -EINVAL;
}
- svm_data = per_cpu(svm_data, me);
+ sd = per_cpu(svm_data, me);
- if (!svm_data) {
+ if (!sd) {
printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
me);
return -EINVAL;
}
- svm_data->asid_generation = 1;
- svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
- svm_data->next_asid = svm_data->max_asid + 1;
+ sd->asid_generation = 1;
+ sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
+ sd->next_asid = sd->max_asid + 1;
kvm_get_gdt(&gdt_descr);
gdt = (struct desc_struct *)gdt_descr.base;
- svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+ sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
wrmsrl(MSR_EFER, efer | EFER_SVME);
- wrmsrl(MSR_VM_HSAVE_PA,
- page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
+ wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
return 0;
}
static void svm_cpu_uninit(int cpu)
{
- struct svm_cpu_data *svm_data
- = per_cpu(svm_data, raw_smp_processor_id());
+ struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
- if (!svm_data)
+ if (!sd)
return;
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
- __free_page(svm_data->save_area);
- kfree(svm_data);
+ __free_page(sd->save_area);
+ kfree(sd);
}
static int svm_cpu_init(int cpu)
{
- struct svm_cpu_data *svm_data;
+ struct svm_cpu_data *sd;
int r;
- svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
- if (!svm_data)
+ sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
+ if (!sd)
return -ENOMEM;
- svm_data->cpu = cpu;
- svm_data->save_area = alloc_page(GFP_KERNEL);
+ sd->cpu = cpu;
+ sd->save_area = alloc_page(GFP_KERNEL);
r = -ENOMEM;
- if (!svm_data->save_area)
+ if (!sd->save_area)
goto err_1;
- per_cpu(svm_data, cpu) = svm_data;
+ per_cpu(svm_data, cpu) = sd;
return 0;
err_1:
- kfree(svm_data);
+ kfree(sd);
return r;
}
@@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
#endif
}
-static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
+static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
{
- if (svm_data->next_asid > svm_data->max_asid) {
- ++svm_data->asid_generation;
- svm_data->next_asid = 1;
+ if (sd->next_asid > sd->max_asid) {
+ ++sd->asid_generation;
+ sd->next_asid = 1;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
- svm->asid_generation = svm_data->asid_generation;
- svm->vmcb->control.asid = svm_data->next_asid++;
+ svm->asid_generation = sd->asid_generation;
+ svm->vmcb->control.asid = sd->next_asid++;
}
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
@@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
{
int cpu = raw_smp_processor_id();
- struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
- svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+ sd->tss_desc->type = 9; /* available 32/64-bit TSS */
load_TR_desc();
}
@@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
{
int cpu = raw_smp_processor_id();
- struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
/* FIXME: handle wraparound of asid_generation */
- if (svm->asid_generation != svm_data->asid_generation)
- new_asid(svm, svm_data);
+ if (svm->asid_generation != sd->asid_generation)
+ new_asid(svm, sd);
}
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index a2d6472895f..cffd754f303 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -5,7 +5,7 @@
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
quiet_cmd_inat_tables = GEN $@
- cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
+ cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
$(call cmd,inat_tables)
@@ -14,15 +14,15 @@ $(obj)/inat.o: $(obj)/inat-tables.c
clean-files := inat-tables.c
-obj-$(CONFIG_SMP) := msr.o
+obj-$(CONFIG_SMP) += msr-smp.o
lib-y := delay.o
lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
-lib-y += insn.o inat.o
+lib-$(CONFIG_KPROBES) += insn.o inat.o
-obj-y += msr-reg.o msr-reg-export.o
+obj-y += msr.o msr-reg.o msr-reg-export.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
new file mode 100644
index 00000000000..a6b1b86d225
--- /dev/null
+++ b/arch/x86/lib/msr-smp.c
@@ -0,0 +1,204 @@
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/smp.h>
+#include <asm/msr.h>
+
+static void __rdmsr_on_cpu(void *info)
+{
+ struct msr_info *rv = info;
+ struct msr *reg;
+ int this_cpu = raw_smp_processor_id();
+
+ if (rv->msrs)
+ reg = per_cpu_ptr(rv->msrs, this_cpu);
+ else
+ reg = &rv->reg;
+
+ rdmsr(rv->msr_no, reg->l, reg->h);
+}
+
+static void __wrmsr_on_cpu(void *info)
+{
+ struct msr_info *rv = info;
+ struct msr *reg;
+ int this_cpu = raw_smp_processor_id();
+
+ if (rv->msrs)
+ reg = per_cpu_ptr(rv->msrs, this_cpu);
+ else
+ reg = &rv->reg;
+
+ wrmsr(rv->msr_no, reg->l, reg->h);
+}
+
+int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+ *l = rv.reg.l;
+ *h = rv.reg.h;
+
+ return err;
+}
+EXPORT_SYMBOL(rdmsr_on_cpu);
+
+int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ rv.reg.l = l;
+ rv.reg.h = h;
+ err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+
+ return err;
+}
+EXPORT_SYMBOL(wrmsr_on_cpu);
+
+static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
+ struct msr *msrs,
+ void (*msr_func) (void *info))
+{
+ struct msr_info rv;
+ int this_cpu;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msrs = msrs;
+ rv.msr_no = msr_no;
+
+ this_cpu = get_cpu();
+
+ if (cpumask_test_cpu(this_cpu, mask))
+ msr_func(&rv);
+
+ smp_call_function_many(mask, msr_func, &rv, 1);
+ put_cpu();
+}
+
+/* rdmsr on a bunch of CPUs
+ *
+ * @mask: which CPUs
+ * @msr_no: which MSR
+ * @msrs: array of MSR values
+ *
+ */
+void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+{
+ __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
+}
+EXPORT_SYMBOL(rdmsr_on_cpus);
+
+/*
+ * wrmsr on a bunch of CPUs
+ *
+ * @mask: which CPUs
+ * @msr_no: which MSR
+ * @msrs: array of MSR values
+ *
+ */
+void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+{
+ __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
+}
+EXPORT_SYMBOL(wrmsr_on_cpus);
+
+/* These "safe" variants are slower and should be used when the target MSR
+ may not actually exist. */
+static void __rdmsr_safe_on_cpu(void *info)
+{
+ struct msr_info *rv = info;
+
+ rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
+}
+
+static void __wrmsr_safe_on_cpu(void *info)
+{
+ struct msr_info *rv = info;
+
+ rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
+}
+
+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+ *l = rv.reg.l;
+ *h = rv.reg.h;
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(rdmsr_safe_on_cpu);
+
+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ int err;
+ struct msr_info rv;
+
+ memset(&rv, 0, sizeof(rv));
+
+ rv.msr_no = msr_no;
+ rv.reg.l = l;
+ rv.reg.h = h;
+ err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsr_safe_on_cpu);
+
+/*
+ * These variants are significantly slower, but allows control over
+ * the entire 32-bit GPR set.
+ */
+static void __rdmsr_safe_regs_on_cpu(void *info)
+{
+ struct msr_regs_info *rv = info;
+
+ rv->err = rdmsr_safe_regs(rv->regs);
+}
+
+static void __wrmsr_safe_regs_on_cpu(void *info)
+{
+ struct msr_regs_info *rv = info;
+
+ rv->err = wrmsr_safe_regs(rv->regs);
+}
+
+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+{
+ int err;
+ struct msr_regs_info rv;
+
+ rv.regs = regs;
+ rv.err = -EIO;
+ err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
+
+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+{
+ int err;
+ struct msr_regs_info rv;
+
+ rv.regs = regs;
+ rv.err = -EIO;
+ err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
+
+ return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 41628b104b9..8f8eebdca7d 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -1,218 +1,23 @@
#include <linux/module.h>
#include <linux/preempt.h>
-#include <linux/smp.h>
#include <asm/msr.h>
-struct msr_info {
- u32 msr_no;
- struct msr reg;
- struct msr *msrs;
- int off;
- int err;
-};
-
-static void __rdmsr_on_cpu(void *info)
-{
- struct msr_info *rv = info;
- struct msr *reg;
- int this_cpu = raw_smp_processor_id();
-
- if (rv->msrs)
- reg = &rv->msrs[this_cpu - rv->off];
- else
- reg = &rv->reg;
-
- rdmsr(rv->msr_no, reg->l, reg->h);
-}
-
-static void __wrmsr_on_cpu(void *info)
-{
- struct msr_info *rv = info;
- struct msr *reg;
- int this_cpu = raw_smp_processor_id();
-
- if (rv->msrs)
- reg = &rv->msrs[this_cpu - rv->off];
- else
- reg = &rv->reg;
-
- wrmsr(rv->msr_no, reg->l, reg->h);
-}
-
-int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
-{
- int err;
- struct msr_info rv;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msr_no = msr_no;
- err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
- *l = rv.reg.l;
- *h = rv.reg.h;
-
- return err;
-}
-EXPORT_SYMBOL(rdmsr_on_cpu);
-
-int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
-{
- int err;
- struct msr_info rv;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msr_no = msr_no;
- rv.reg.l = l;
- rv.reg.h = h;
- err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
-
- return err;
-}
-EXPORT_SYMBOL(wrmsr_on_cpu);
-
-static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
- struct msr *msrs,
- void (*msr_func) (void *info))
-{
- struct msr_info rv;
- int this_cpu;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.off = cpumask_first(mask);
- rv.msrs = msrs;
- rv.msr_no = msr_no;
-
- this_cpu = get_cpu();
-
- if (cpumask_test_cpu(this_cpu, mask))
- msr_func(&rv);
-
- smp_call_function_many(mask, msr_func, &rv, 1);
- put_cpu();
-}
-
-/* rdmsr on a bunch of CPUs
- *
- * @mask: which CPUs
- * @msr_no: which MSR
- * @msrs: array of MSR values
- *
- */
-void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
-{
- __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
-}
-EXPORT_SYMBOL(rdmsr_on_cpus);
-
-/*
- * wrmsr on a bunch of CPUs
- *
- * @mask: which CPUs
- * @msr_no: which MSR
- * @msrs: array of MSR values
- *
- */
-void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
+struct msr *msrs_alloc(void)
{
- __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
-}
-EXPORT_SYMBOL(wrmsr_on_cpus);
-
-/* These "safe" variants are slower and should be used when the target MSR
- may not actually exist. */
-static void __rdmsr_safe_on_cpu(void *info)
-{
- struct msr_info *rv = info;
-
- rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
-}
-
-static void __wrmsr_safe_on_cpu(void *info)
-{
- struct msr_info *rv = info;
-
- rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
-}
-
-int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
-{
- int err;
- struct msr_info rv;
-
- memset(&rv, 0, sizeof(rv));
-
- rv.msr_no = msr_no;
- err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
- *l = rv.reg.l;
- *h = rv.reg.h;
-
- return err ? err : rv.err;
-}
-EXPORT_SYMBOL(rdmsr_safe_on_cpu);
-
-int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
-{
- int err;
- struct msr_info rv;
+ struct msr *msrs = NULL;
- memset(&rv, 0, sizeof(rv));
+ msrs = alloc_percpu(struct msr);
+ if (!msrs) {
+ pr_warning("%s: error allocating msrs\n", __func__);
+ return NULL;
+ }
- rv.msr_no = msr_no;
- rv.reg.l = l;
- rv.reg.h = h;
- err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
-
- return err ? err : rv.err;
+ return msrs;
}
-EXPORT_SYMBOL(wrmsr_safe_on_cpu);
-
-/*
- * These variants are significantly slower, but allows control over
- * the entire 32-bit GPR set.
- */
-struct msr_regs_info {
- u32 *regs;
- int err;
-};
+EXPORT_SYMBOL(msrs_alloc);
-static void __rdmsr_safe_regs_on_cpu(void *info)
+void msrs_free(struct msr *msrs)
{
- struct msr_regs_info *rv = info;
-
- rv->err = rdmsr_safe_regs(rv->regs);
-}
-
-static void __wrmsr_safe_regs_on_cpu(void *info)
-{
- struct msr_regs_info *rv = info;
-
- rv->err = wrmsr_safe_regs(rv->regs);
-}
-
-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
-{
- int err;
- struct msr_regs_info rv;
-
- rv.regs = regs;
- rv.err = -EIO;
- err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
-
- return err ? err : rv.err;
-}
-EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
-
-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
-{
- int err;
- struct msr_regs_info rv;
-
- rv.regs = regs;
- rv.err = -EIO;
- err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
-
- return err ? err : rv.err;
+ free_percpu(msrs);
}
-EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
+EXPORT_SYMBOL(msrs_free);
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 07bcc309cfd..c0f6198565e 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -5,6 +5,8 @@
* 2008 Pekka Paalanen <pq@iki.fi>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
@@ -136,7 +138,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
pte_t *pte = lookup_address(f->page, &level);
if (!pte) {
- pr_err("kmmio: no pte for page 0x%08lx\n", f->page);
+ pr_err("no pte for page 0x%08lx\n", f->page);
return -1;
}
@@ -148,7 +150,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
clear_pte_presence(pte, clear, &f->old_presence);
break;
default:
- pr_err("kmmio: unexpected page level 0x%x.\n", level);
+ pr_err("unexpected page level 0x%x.\n", level);
return -1;
}
@@ -170,13 +172,14 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
{
int ret;
- WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
+ WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
if (f->armed) {
- pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
- f->page, f->count, !!f->old_presence);
+ pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
+ f->page, f->count, !!f->old_presence);
}
ret = clear_page_presence(f, true);
- WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
+ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
+ f->page);
f->armed = true;
return ret;
}
@@ -240,24 +243,21 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
* condition needs handling by do_page_fault(), the
* page really not being present is the most common.
*/
- pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
- addr, smp_processor_id());
+ pr_debug("secondary hit for 0x%08lx CPU %d.\n",
+ addr, smp_processor_id());
if (!faultpage->old_presence)
- pr_info("kmmio: unexpected secondary hit for "
- "address 0x%08lx on CPU %d.\n", addr,
- smp_processor_id());
+ pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
+ addr, smp_processor_id());
} else {
/*
* Prevent overwriting already in-flight context.
* This should not happen, let's hope disarming at
* least prevents a panic.
*/
- pr_emerg("kmmio: recursive probe hit on CPU %d, "
- "for address 0x%08lx. Ignoring.\n",
- smp_processor_id(), addr);
- pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
- ctx->addr);
+ pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
+ smp_processor_id(), addr);
+ pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
disarm_kmmio_fault_page(faultpage);
}
goto no_kmmio_ctx;
@@ -316,8 +316,8 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
* something external causing them (f.e. using a debugger while
* mmio tracing enabled), or erroneous behaviour
*/
- pr_warning("kmmio: unexpected debug trap on CPU %d.\n",
- smp_processor_id());
+ pr_warning("unexpected debug trap on CPU %d.\n",
+ smp_processor_id());
goto out;
}
@@ -425,7 +425,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
if (add_kmmio_fault_page(p->addr + size))
- pr_err("kmmio: Unable to set page fault.\n");
+ pr_err("Unable to set page fault.\n");
size += PAGE_SIZE;
}
out:
@@ -490,7 +490,7 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
* 2. remove_kmmio_fault_pages()
* Remove the pages from kmmio_page_table.
* 3. rcu_free_kmmio_fault_pages()
- * Actally free the kmmio_fault_page structs as with RCU.
+ * Actually free the kmmio_fault_page structs as with RCU.
*/
void unregister_kmmio_probe(struct kmmio_probe *p)
{
@@ -511,7 +511,7 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
if (!drelease) {
- pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
+ pr_crit("leaking kmmio_fault_page objects.\n");
return;
}
drelease->release_list = release_list;
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 132772a8ec5..34a3291ca10 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -19,6 +19,9 @@
*
* Derived from the read-mod example from relay-examples by Tom Zanussi.
*/
+
+#define pr_fmt(fmt) "mmiotrace: " fmt
+
#define DEBUG 1
#include <linux/module.h>
@@ -36,8 +39,6 @@
#include "pf_in.h"
-#define NAME "mmiotrace: "
-
struct trap_reason {
unsigned long addr;
unsigned long ip;
@@ -96,17 +97,18 @@ static void print_pte(unsigned long address)
pte_t *pte = lookup_address(address, &level);
if (!pte) {
- pr_err(NAME "Error in %s: no pte for page 0x%08lx\n",
- __func__, address);
+ pr_err("Error in %s: no pte for page 0x%08lx\n",
+ __func__, address);
return;
}
if (level == PG_LEVEL_2M) {
- pr_emerg(NAME "4MB pages are not currently supported: "
- "0x%08lx\n", address);
+ pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
+ address);
BUG();
}
- pr_info(NAME "pte for 0x%lx: 0x%llx 0x%llx\n", address,
+ pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
+ address,
(unsigned long long)pte_val(*pte),
(unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
}
@@ -118,22 +120,21 @@ static void print_pte(unsigned long address)
static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
{
const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
- pr_emerg(NAME "unexpected fault for address: 0x%08lx, "
- "last fault for address: 0x%08lx\n",
- addr, my_reason->addr);
+ pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
+ addr, my_reason->addr);
print_pte(addr);
print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip);
print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip);
#ifdef __i386__
pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
- regs->ax, regs->bx, regs->cx, regs->dx);
+ regs->ax, regs->bx, regs->cx, regs->dx);
pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
- regs->si, regs->di, regs->bp, regs->sp);
+ regs->si, regs->di, regs->bp, regs->sp);
#else
pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
- regs->ax, regs->cx, regs->dx);
+ regs->ax, regs->cx, regs->dx);
pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
- regs->si, regs->di, regs->bp, regs->sp);
+ regs->si, regs->di, regs->bp, regs->sp);
#endif
put_cpu_var(pf_reason);
BUG();
@@ -213,7 +214,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
/* this should always return the active_trace count to 0 */
my_reason->active_traces--;
if (my_reason->active_traces) {
- pr_emerg(NAME "unexpected post handler");
+ pr_emerg("unexpected post handler");
BUG();
}
@@ -244,7 +245,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
};
if (!trace) {
- pr_err(NAME "kmalloc failed in ioremap\n");
+ pr_err("kmalloc failed in ioremap\n");
return;
}
@@ -282,8 +283,8 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
if (!is_enabled()) /* recheck and proper locking in *_core() */
return;
- pr_debug(NAME "ioremap_*(0x%llx, 0x%lx) = %p\n",
- (unsigned long long)offset, size, addr);
+ pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
+ (unsigned long long)offset, size, addr);
if ((filter_offset) && (offset != filter_offset))
return;
ioremap_trace_core(offset, size, addr);
@@ -301,7 +302,7 @@ static void iounmap_trace_core(volatile void __iomem *addr)
struct remap_trace *tmp;
struct remap_trace *found_trace = NULL;
- pr_debug(NAME "Unmapping %p.\n", addr);
+ pr_debug("Unmapping %p.\n", addr);
spin_lock_irq(&trace_lock);
if (!is_enabled())
@@ -363,9 +364,8 @@ static void clear_trace_list(void)
* Caller also ensures is_enabled() cannot change.
*/
list_for_each_entry(trace, &trace_list, list) {
- pr_notice(NAME "purging non-iounmapped "
- "trace @0x%08lx, size 0x%lx.\n",
- trace->probe.addr, trace->probe.len);
+ pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
+ trace->probe.addr, trace->probe.len);
if (!nommiotrace)
unregister_kmmio_probe(&trace->probe);
}
@@ -387,7 +387,7 @@ static void enter_uniprocessor(void)
if (downed_cpus == NULL &&
!alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
- pr_notice(NAME "Failed to allocate mask\n");
+ pr_notice("Failed to allocate mask\n");
goto out;
}
@@ -395,20 +395,19 @@ static void enter_uniprocessor(void)
cpumask_copy(downed_cpus, cpu_online_mask);
cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
if (num_online_cpus() > 1)
- pr_notice(NAME "Disabling non-boot CPUs...\n");
+ pr_notice("Disabling non-boot CPUs...\n");
put_online_cpus();
for_each_cpu(cpu, downed_cpus) {
err = cpu_down(cpu);
if (!err)
- pr_info(NAME "CPU%d is down.\n", cpu);
+ pr_info("CPU%d is down.\n", cpu);
else
- pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err);
+ pr_err("Error taking CPU%d down: %d\n", cpu, err);
}
out:
if (num_online_cpus() > 1)
- pr_warning(NAME "multiple CPUs still online, "
- "may miss events.\n");
+ pr_warning("multiple CPUs still online, may miss events.\n");
}
/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
@@ -420,13 +419,13 @@ static void __ref leave_uniprocessor(void)
if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
return;
- pr_notice(NAME "Re-enabling CPUs...\n");
+ pr_notice("Re-enabling CPUs...\n");
for_each_cpu(cpu, downed_cpus) {
err = cpu_up(cpu);
if (!err)
- pr_info(NAME "enabled CPU%d.\n", cpu);
+ pr_info("enabled CPU%d.\n", cpu);
else
- pr_err(NAME "cannot re-enable CPU%d: %d\n", cpu, err);
+ pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
}
}
@@ -434,8 +433,8 @@ static void __ref leave_uniprocessor(void)
static void enter_uniprocessor(void)
{
if (num_online_cpus() > 1)
- pr_warning(NAME "multiple CPUs are online, may miss events. "
- "Suggest booting with maxcpus=1 kernel argument.\n");
+ pr_warning("multiple CPUs are online, may miss events. "
+ "Suggest booting with maxcpus=1 kernel argument.\n");
}
static void leave_uniprocessor(void)
@@ -450,13 +449,13 @@ void enable_mmiotrace(void)
goto out;
if (nommiotrace)
- pr_info(NAME "MMIO tracing disabled.\n");
+ pr_info("MMIO tracing disabled.\n");
kmmio_init();
enter_uniprocessor();
spin_lock_irq(&trace_lock);
atomic_inc(&mmiotrace_enabled);
spin_unlock_irq(&trace_lock);
- pr_info(NAME "enabled.\n");
+ pr_info("enabled.\n");
out:
mutex_unlock(&mmiotrace_mutex);
}
@@ -475,7 +474,7 @@ void disable_mmiotrace(void)
clear_trace_list(); /* guarantees: no more kmmio callbacks */
leave_uniprocessor();
kmmio_cleanup();
- pr_info(NAME "disabled.\n");
+ pr_info("disabled.\n");
out:
mutex_unlock(&mmiotrace_mutex);
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 66b55d6e69e..ae9648eb1c7 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -704,9 +704,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
if (!range_is_allowed(pfn, size))
return 0;
- if (file->f_flags & O_SYNC) {
+ if (file->f_flags & O_DSYNC)
flags = _PAGE_CACHE_UC_MINUS;
- }
#ifdef CONFIG_X86_32
/*
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 6f8aa33031c..9324f13492d 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -267,6 +267,8 @@ int __init get_memcfg_from_srat(void)
e820_register_active_regions(chunk->nid, chunk->start_pfn,
min(chunk->end_pfn, max_pfn));
}
+ /* for out of order entries in SRAT */
+ sort_node_map();
for_each_online_node(nid) {
unsigned long start = node_start_pfn[nid];
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index d8907548966..a27124185fc 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -317,7 +317,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
unsigned long s = nodes[i].start >> PAGE_SHIFT;
unsigned long e = nodes[i].end >> PAGE_SHIFT;
pxmram += e - s;
- pxmram -= absent_pages_in_range(s, e);
+ pxmram -= __absent_pages_in_range(i, s, e);
if ((long)pxmram < 0)
pxmram = 0;
}
@@ -373,6 +373,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
for_each_node_mask(i, nodes_parsed)
e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
nodes[i].end >> PAGE_SHIFT);
+ /* for out of order entries in SRAT */
+ sort_node_map();
if (!nodes_cover_memory(nodes)) {
bad_srat();
return -1;
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index 044897be021..3855096c59b 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -41,10 +41,11 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
}
static struct stacktrace_ops backtrace_ops = {
- .warning = backtrace_warning,
- .warning_symbol = backtrace_warning_symbol,
- .stack = backtrace_stack,
- .address = backtrace_address,
+ .warning = backtrace_warning,
+ .warning_symbol = backtrace_warning_symbol,
+ .stack = backtrace_stack,
+ .address = backtrace_address,
+ .walk_stack = print_context_stack,
};
struct frame_head {
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index d49202e740e..564b008a51c 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -15,3 +15,8 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-y += common.o early.o
obj-y += amd_bus.o
+obj-$(CONFIG_X86_64) += bus_numa.o intel_bus.o
+
+ifeq ($(CONFIG_PCI_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 1014eb4bfc3..959e548a703 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -7,6 +7,7 @@
#include <asm/pci_x86.h>
struct pci_root_info {
+ struct acpi_device *bridge;
char *name;
unsigned int res_num;
struct resource *res;
@@ -58,6 +59,30 @@ bus_has_transparent_bridge(struct pci_bus *bus)
return false;
}
+static void
+align_resource(struct acpi_device *bridge, struct resource *res)
+{
+ int align = (res->flags & IORESOURCE_MEM) ? 16 : 4;
+
+ /*
+ * Host bridge windows are not BARs, but the decoders on the PCI side
+ * that claim this address space have starting alignment and length
+ * constraints, so fix any obvious BIOS goofs.
+ */
+ if (!IS_ALIGNED(res->start, align)) {
+ dev_printk(KERN_DEBUG, &bridge->dev,
+ "host bridge window %pR invalid; "
+ "aligning start to %d-byte boundary\n", res, align);
+ res->start &= ~(align - 1);
+ }
+ if (!IS_ALIGNED(res->end + 1, align)) {
+ dev_printk(KERN_DEBUG, &bridge->dev,
+ "host bridge window %pR invalid; "
+ "aligning end to %d-byte boundary\n", res, align);
+ res->end = ALIGN(res->end, align) - 1;
+ }
+}
+
static acpi_status
setup_resource(struct acpi_resource *acpi_res, void *data)
{
@@ -91,11 +116,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
start = addr.minimum + addr.translation_offset;
end = start + addr.address_length - 1;
if (info->res_num >= max_root_bus_resources) {
- printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx "
- "from %s for %s due to _CRS returning more than "
- "%d resource descriptors\n", (unsigned long) start,
- (unsigned long) end, root->name, info->name,
- max_root_bus_resources);
+ if (pci_probe & PCI_USE__CRS)
+ printk(KERN_WARNING "PCI: Failed to allocate "
+ "0x%lx-0x%lx from %s for %s due to _CRS "
+ "returning more than %d resource descriptors\n",
+ (unsigned long) start, (unsigned long) end,
+ root->name, info->name, max_root_bus_resources);
return AE_OK;
}
@@ -105,14 +131,28 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
res->start = start;
res->end = end;
res->child = NULL;
+ align_resource(info->bridge, res);
+
+ if (!(pci_probe & PCI_USE__CRS)) {
+ dev_printk(KERN_DEBUG, &info->bridge->dev,
+ "host bridge window %pR (ignored)\n", res);
+ return AE_OK;
+ }
if (insert_resource(root, res)) {
- printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx "
- "from %s for %s\n", (unsigned long) res->start,
- (unsigned long) res->end, root->name, info->name);
+ dev_err(&info->bridge->dev,
+ "can't allocate host bridge window %pR\n", res);
} else {
info->bus->resource[info->res_num] = res;
info->res_num++;
+ if (addr.translation_offset)
+ dev_info(&info->bridge->dev, "host bridge window %pR "
+ "(PCI address [%#llx-%#llx])\n",
+ res, res->start - addr.translation_offset,
+ res->end - addr.translation_offset);
+ else
+ dev_info(&info->bridge->dev,
+ "host bridge window %pR\n", res);
}
return AE_OK;
}
@@ -124,6 +164,12 @@ get_current_resources(struct acpi_device *device, int busnum,
struct pci_root_info info;
size_t size;
+ if (!(pci_probe & PCI_USE__CRS))
+ dev_info(&device->dev,
+ "ignoring host bridge windows from ACPI; "
+ "boot with \"pci=use_crs\" to use them\n");
+
+ info.bridge = device;
info.bus = bus;
info.res_num = 0;
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
@@ -163,8 +209,9 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
#endif
if (domain && !pci_domains_supported) {
- printk(KERN_WARNING "PCI: Multiple domains not supported "
- "(dom %d, bus %d)\n", domain, busnum);
+ printk(KERN_WARNING "pci_bus %04x:%02x: "
+ "ignored (multiple domains not supported)\n",
+ domain, busnum);
return NULL;
}
@@ -188,7 +235,8 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
*/
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!sd) {
- printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
+ printk(KERN_WARNING "pci_bus %04x:%02x: "
+ "ignored (out of memory)\n", domain, busnum);
return NULL;
}
@@ -209,9 +257,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
} else {
bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
if (bus) {
- if (pci_probe & PCI_USE__CRS)
- get_current_resources(device, busnum, domain,
- bus);
+ get_current_resources(device, busnum, domain, bus);
bus->subordinate = pci_scan_child_bus(bus);
}
}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 572ee9782f2..95ecbd49595 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -6,10 +6,10 @@
#ifdef CONFIG_X86_64
#include <asm/pci-direct.h>
-#include <asm/mpspec.h>
-#include <linux/cpumask.h>
#endif
+#include "bus_numa.h"
+
/*
* This discovers the pcibus <-> node mapping on AMD K8.
* also get peer root bus resource for io,mmio
@@ -17,67 +17,6 @@
#ifdef CONFIG_X86_64
-/*
- * sub bus (transparent) will use entres from 3 to store extra from root,
- * so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES?
- */
-#define RES_NUM 16
-struct pci_root_info {
- char name[12];
- unsigned int res_num;
- struct resource res[RES_NUM];
- int bus_min;
- int bus_max;
- int node;
- int link;
-};
-
-/* 4 at this time, it may become to 32 */
-#define PCI_ROOT_NR 4
-static int pci_root_num;
-static struct pci_root_info pci_root_info[PCI_ROOT_NR];
-
-void x86_pci_root_bus_res_quirks(struct pci_bus *b)
-{
- int i;
- int j;
- struct pci_root_info *info;
-
- /* don't go for it if _CRS is used already */
- if (b->resource[0] != &ioport_resource ||
- b->resource[1] != &iomem_resource)
- return;
-
- /* if only one root bus, don't need to anything */
- if (pci_root_num < 2)
- return;
-
- for (i = 0; i < pci_root_num; i++) {
- if (pci_root_info[i].bus_min == b->number)
- break;
- }
-
- if (i == pci_root_num)
- return;
-
- printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
- b->number);
-
- info = &pci_root_info[i];
- for (j = 0; j < info->res_num; j++) {
- struct resource *res;
- struct resource *root;
-
- res = &info->res[j];
- b->resource[j] = res;
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
- else
- root = &iomem_resource;
- insert_resource(root, res);
- }
-}
-
#define RANGE_NUM 16
struct res_range {
@@ -130,52 +69,6 @@ static void __init update_range(struct res_range *range, size_t start,
}
}
-static void __init update_res(struct pci_root_info *info, size_t start,
- size_t end, unsigned long flags, int merge)
-{
- int i;
- struct resource *res;
-
- if (!merge)
- goto addit;
-
- /* try to merge it with old one */
- for (i = 0; i < info->res_num; i++) {
- size_t final_start, final_end;
- size_t common_start, common_end;
-
- res = &info->res[i];
- if (res->flags != flags)
- continue;
-
- common_start = max((size_t)res->start, start);
- common_end = min((size_t)res->end, end);
- if (common_start > common_end + 1)
- continue;
-
- final_start = min((size_t)res->start, start);
- final_end = max((size_t)res->end, end);
-
- res->start = final_start;
- res->end = final_end;
- return;
- }
-
-addit:
-
- /* need to add that */
- if (info->res_num >= RES_NUM)
- return;
-
- res = &info->res[info->res_num];
- res->name = info->name;
- res->flags = flags;
- res->start = start;
- res->end = end;
- res->child = NULL;
- info->res_num++;
-}
-
struct pci_hostbridge_probe {
u32 bus;
u32 slot;
@@ -230,7 +123,6 @@ static int __init early_fill_mp_bus_info(void)
int j;
unsigned bus;
unsigned slot;
- int found;
int node;
int link;
int def_node;
@@ -247,7 +139,7 @@ static int __init early_fill_mp_bus_info(void)
if (!early_pci_allowed())
return -1;
- found = 0;
+ found_all_numa_early = 0;
for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
u32 id;
u16 device;
@@ -261,12 +153,12 @@ static int __init early_fill_mp_bus_info(void)
device = (id>>16) & 0xffff;
if (pci_probes[i].vendor == vendor &&
pci_probes[i].device == device) {
- found = 1;
+ found_all_numa_early = 1;
break;
}
}
- if (!found)
+ if (!found_all_numa_early)
return 0;
pci_root_num = 0;
@@ -488,7 +380,7 @@ static int __init early_fill_mp_bus_info(void)
info = &pci_root_info[i];
res_num = info->res_num;
busnum = info->bus_min;
- printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n",
+ printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n",
info->bus_min, info->bus_max, info->node, info->link);
for (j = 0; j < res_num; j++) {
res = &info->res[j];
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
new file mode 100644
index 00000000000..145df00e038
--- /dev/null
+++ b/arch/x86/pci/bus_numa.c
@@ -0,0 +1,101 @@
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include "bus_numa.h"
+
+int pci_root_num;
+struct pci_root_info pci_root_info[PCI_ROOT_NR];
+int found_all_numa_early;
+
+void x86_pci_root_bus_res_quirks(struct pci_bus *b)
+{
+ int i;
+ int j;
+ struct pci_root_info *info;
+
+ /* don't go for it if _CRS is used already */
+ if (b->resource[0] != &ioport_resource ||
+ b->resource[1] != &iomem_resource)
+ return;
+
+ if (!pci_root_num)
+ return;
+
+ /* for amd, if only one root bus, don't need to do anything */
+ if (pci_root_num < 2 && found_all_numa_early)
+ return;
+
+ for (i = 0; i < pci_root_num; i++) {
+ if (pci_root_info[i].bus_min == b->number)
+ break;
+ }
+
+ if (i == pci_root_num)
+ return;
+
+ printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
+ b->number);
+
+ info = &pci_root_info[i];
+ for (j = 0; j < info->res_num; j++) {
+ struct resource *res;
+ struct resource *root;
+
+ res = &info->res[j];
+ b->resource[j] = res;
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ else
+ root = &iomem_resource;
+ insert_resource(root, res);
+ }
+}
+
+void __init update_res(struct pci_root_info *info, size_t start,
+ size_t end, unsigned long flags, int merge)
+{
+ int i;
+ struct resource *res;
+
+ if (start > end)
+ return;
+
+ if (!merge)
+ goto addit;
+
+ /* try to merge it with old one */
+ for (i = 0; i < info->res_num; i++) {
+ size_t final_start, final_end;
+ size_t common_start, common_end;
+
+ res = &info->res[i];
+ if (res->flags != flags)
+ continue;
+
+ common_start = max((size_t)res->start, start);
+ common_end = min((size_t)res->end, end);
+ if (common_start > common_end + 1)
+ continue;
+
+ final_start = min((size_t)res->start, start);
+ final_end = max((size_t)res->end, end);
+
+ res->start = final_start;
+ res->end = final_end;
+ return;
+ }
+
+addit:
+
+ /* need to add that */
+ if (info->res_num >= RES_NUM)
+ return;
+
+ res = &info->res[info->res_num];
+ res->name = info->name;
+ res->flags = flags;
+ res->start = start;
+ res->end = end;
+ res->child = NULL;
+ info->res_num++;
+}
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
new file mode 100644
index 00000000000..adbc23fe82a
--- /dev/null
+++ b/arch/x86/pci/bus_numa.h
@@ -0,0 +1,27 @@
+#ifdef CONFIG_X86_64
+
+/*
+ * sub bus (transparent) will use entres from 3 to store extra from
+ * root, so need to make sure we have enough slot there, Should we
+ * increase PCI_BUS_NUM_RESOURCES?
+ */
+#define RES_NUM 16
+struct pci_root_info {
+ char name[12];
+ unsigned int res_num;
+ struct resource res[RES_NUM];
+ int bus_min;
+ int bus_max;
+ int node;
+ int link;
+};
+
+/* 4 at this time, it may become to 32 */
+#define PCI_ROOT_NR 4
+extern int pci_root_num;
+extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
+extern int found_all_numa_early;
+
+extern void update_res(struct pci_root_info *info, size_t start,
+ size_t end, unsigned long flags, int merge);
+#endif
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 1331fcf2614..d2552c68e94 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -410,8 +410,6 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
return bus;
}
-extern u8 pci_cache_line_size;
-
int __init pcibios_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -422,15 +420,19 @@ int __init pcibios_init(void)
}
/*
- * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
- * and P4. It's also good for 386/486s (which actually have 16)
+ * Set PCI cacheline size to that of the CPU if the CPU has reported it.
+ * (For older CPUs that don't support cpuid, we se it to 32 bytes
+ * It's also good for 386/486s (which actually have 16)
* as quite a few PCI devices do not support smaller values.
*/
- pci_cache_line_size = 32 >> 2;
- if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
- pci_cache_line_size = 64 >> 2; /* K7 & K8 */
- else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
- pci_cache_line_size = 128 >> 2; /* P4 */
+ if (c->x86_clflush_size > 0) {
+ pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
+ printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
+ pci_dfl_cache_line_size << 2);
+ } else {
+ pci_dfl_cache_line_size = 32 >> 2;
+ printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
+ }
pcibios_resource_survey();
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index aaf26ae58cd..d1067d539be 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -12,8 +12,6 @@ u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
u32 v;
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
v = inl(0xcfc);
- if (v != 0xffffffff)
- pr_debug("%x reading 4 from %x: %x\n", slot, offset, v);
return v;
}
@@ -22,7 +20,6 @@ u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
u8 v;
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
v = inb(0xcfc + (offset&3));
- pr_debug("%x reading 1 from %x: %x\n", slot, offset, v);
return v;
}
@@ -31,28 +28,24 @@ u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
u16 v;
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
v = inw(0xcfc + (offset&2));
- pr_debug("%x reading 2 from %x: %x\n", slot, offset, v);
return v;
}
void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
u32 val)
{
- pr_debug("%x writing to %x: %x\n", slot, offset, val);
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outl(val, 0xcfc);
}
void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
{
- pr_debug("%x writing to %x: %x\n", slot, offset, val);
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outb(val, 0xcfc + (offset&3));
}
void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
{
- pr_debug("%x writing to %x: %x\n", slot, offset, val);
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outw(val, 0xcfc + (offset&2));
}
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index b22d13b0c71..5dc9e8c63fc 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -129,7 +129,9 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
continue;
if (!r->start ||
pci_claim_resource(dev, idx) < 0) {
- dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
+ dev_info(&dev->dev,
+ "can't reserve window %pR\n",
+ r);
/*
* Something is wrong with the region.
* Invalidate the resource to prevent
@@ -144,16 +146,29 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
}
}
+struct pci_check_idx_range {
+ int start;
+ int end;
+};
+
static void __init pcibios_allocate_resources(int pass)
{
struct pci_dev *dev = NULL;
- int idx, disabled;
+ int idx, disabled, i;
u16 command;
struct resource *r;
+ struct pci_check_idx_range idx_range[] = {
+ { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
+#ifdef CONFIG_PCI_IOV
+ { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
+#endif
+ };
+
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
- for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
+ for (i = 0; i < ARRAY_SIZE(idx_range); i++)
+ for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
r = &dev->resource[idx];
if (r->parent) /* Already allocated */
continue;
@@ -164,12 +179,12 @@ static void __init pcibios_allocate_resources(int pass)
else
disabled = !(command & PCI_COMMAND_MEMORY);
if (pass == disabled) {
- dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n",
- (unsigned long long) r->start,
- (unsigned long long) r->end,
- r->flags, disabled, pass);
+ dev_dbg(&dev->dev,
+ "BAR %d: reserving %pr (d=%d, p=%d)\n",
+ idx, r, disabled, pass);
if (pci_claim_resource(dev, idx) < 0) {
- dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx);
+ dev_info(&dev->dev,
+ "can't reserve %pR\n", r);
/* We'll assign a new address later */
r->end -= r->start;
r->start = 0;
@@ -182,7 +197,7 @@ static void __init pcibios_allocate_resources(int pass)
/* Turn the ROM off, leave the resource region,
* but keep it unregistered. */
u32 reg;
- dev_dbg(&dev->dev, "disabling ROM\n");
+ dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
r->flags &= ~IORESOURCE_ROM_ENABLE;
pci_read_config_dword(dev,
dev->rom_base_reg, &reg);
@@ -282,6 +297,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL;
prot = pgprot_val(vma->vm_page_prot);
+
+ /*
+ * Return error if pat is not enabled and write_combine is requested.
+ * Caller can followup with UC MINUS request and add a WC mtrr if there
+ * is a free mtrr slot.
+ */
+ if (!pat_enabled && write_combine)
+ return -EINVAL;
+
if (pat_enabled && write_combine)
prot |= _PAGE_CACHE_WC;
else if (pat_enabled || boot_cpu_data.x86 > 3)
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c
new file mode 100644
index 00000000000..b7a55dc55d1
--- /dev/null
+++ b/arch/x86/pci/intel_bus.c
@@ -0,0 +1,90 @@
+/*
+ * to read io range from IOH pci conf, need to do it after mmconfig is there
+ */
+
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <asm/pci_x86.h>
+
+#include "bus_numa.h"
+
+static inline void print_ioh_resources(struct pci_root_info *info)
+{
+ int res_num;
+ int busnum;
+ int i;
+
+ printk(KERN_DEBUG "IOH bus: [%02x, %02x]\n",
+ info->bus_min, info->bus_max);
+ res_num = info->res_num;
+ busnum = info->bus_min;
+ for (i = 0; i < res_num; i++) {
+ struct resource *res;
+
+ res = &info->res[i];
+ printk(KERN_DEBUG "IOH bus: %02x index %x %s: [%llx, %llx]\n",
+ busnum, i,
+ (res->flags & IORESOURCE_IO) ? "io port" :
+ "mmio",
+ res->start, res->end);
+ }
+}
+
+#define IOH_LIO 0x108
+#define IOH_LMMIOL 0x10c
+#define IOH_LMMIOH 0x110
+#define IOH_LMMIOH_BASEU 0x114
+#define IOH_LMMIOH_LIMITU 0x118
+#define IOH_LCFGBUS 0x11c
+
+static void __devinit pci_root_bus_res(struct pci_dev *dev)
+{
+ u16 word;
+ u32 dword;
+ struct pci_root_info *info;
+ u16 io_base, io_end;
+ u32 mmiol_base, mmiol_end;
+ u64 mmioh_base, mmioh_end;
+ int bus_base, bus_end;
+
+ if (pci_root_num >= PCI_ROOT_NR) {
+ printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
+ return;
+ }
+
+ info = &pci_root_info[pci_root_num];
+ pci_root_num++;
+
+ pci_read_config_word(dev, IOH_LCFGBUS, &word);
+ bus_base = (word & 0xff);
+ bus_end = (word & 0xff00) >> 8;
+ sprintf(info->name, "PCI Bus #%02x", bus_base);
+ info->bus_min = bus_base;
+ info->bus_max = bus_end;
+
+ pci_read_config_word(dev, IOH_LIO, &word);
+ io_base = (word & 0xf0) << (12 - 4);
+ io_end = (word & 0xf000) | 0xfff;
+ update_res(info, io_base, io_end, IORESOURCE_IO, 0);
+
+ pci_read_config_dword(dev, IOH_LMMIOL, &dword);
+ mmiol_base = (dword & 0xff00) << (24 - 8);
+ mmiol_end = (dword & 0xff000000) | 0xffffff;
+ update_res(info, mmiol_base, mmiol_end, IORESOURCE_MEM, 0);
+
+ pci_read_config_dword(dev, IOH_LMMIOH, &dword);
+ mmioh_base = ((u64)(dword & 0xfc00)) << (26 - 10);
+ mmioh_end = ((u64)(dword & 0xfc000000) | 0x3ffffff);
+ pci_read_config_dword(dev, IOH_LMMIOH_BASEU, &dword);
+ mmioh_base |= ((u64)(dword & 0x7ffff)) << 32;
+ pci_read_config_dword(dev, IOH_LMMIOH_LIMITU, &dword);
+ mmioh_end |= ((u64)(dword & 0x7ffff)) << 32;
+ update_res(info, mmioh_base, mmioh_end, IORESOURCE_MEM, 0);
+
+ print_ioh_resources(info);
+}
+
+/* intel IOH */
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, pci_root_bus_res);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 602c172d3bd..b19d1e54201 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -15,48 +15,98 @@
#include <linux/acpi.h>
#include <linux/sfi_acpi.h>
#include <linux/bitmap.h>
-#include <linux/sort.h>
+#include <linux/dmi.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
#include <asm/acpi.h>
#define PREFIX "PCI: "
-/* aperture is up to 256MB but BIOS may reserve less */
-#define MMCONFIG_APER_MIN (2 * 1024*1024)
-#define MMCONFIG_APER_MAX (256 * 1024*1024)
-
/* Indicate if the mmcfg resources have been placed into the resource table. */
static int __initdata pci_mmcfg_resources_inserted;
-static __init int extend_mmcfg(int num)
+LIST_HEAD(pci_mmcfg_list);
+
+static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
{
- struct acpi_mcfg_allocation *new;
- int new_num = pci_mmcfg_config_num + num;
+ if (cfg->res.parent)
+ release_resource(&cfg->res);
+ list_del(&cfg->list);
+ kfree(cfg);
+}
- new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL);
- if (!new)
- return -1;
+static __init void free_all_mmcfg(void)
+{
+ struct pci_mmcfg_region *cfg, *tmp;
- if (pci_mmcfg_config) {
- memcpy(new, pci_mmcfg_config,
- sizeof(pci_mmcfg_config[0]) * new_num);
- kfree(pci_mmcfg_config);
+ pci_mmcfg_arch_free();
+ list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
+ pci_mmconfig_remove(cfg);
+}
+
+static __init void list_add_sorted(struct pci_mmcfg_region *new)
+{
+ struct pci_mmcfg_region *cfg;
+
+ /* keep list sorted by segment and starting bus number */
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ if (cfg->segment > new->segment ||
+ (cfg->segment == new->segment &&
+ cfg->start_bus >= new->start_bus)) {
+ list_add_tail(&new->list, &cfg->list);
+ return;
+ }
}
- pci_mmcfg_config = new;
+ list_add_tail(&new->list, &pci_mmcfg_list);
+}
- return 0;
+static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
+ int end, u64 addr)
+{
+ struct pci_mmcfg_region *new;
+ int num_buses;
+ struct resource *res;
+
+ if (addr == 0)
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ new->address = addr;
+ new->segment = segment;
+ new->start_bus = start;
+ new->end_bus = end;
+
+ list_add_sorted(new);
+
+ num_buses = end - start + 1;
+ res = &new->res;
+ res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
+ res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
+ "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
+ res->name = new->name;
+
+ printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at "
+ "%pR (base %#lx)\n", segment, start, end, &new->res,
+ (unsigned long) addr);
+
+ return new;
}
-static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end)
+struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
{
- int i = pci_mmcfg_config_num;
+ struct pci_mmcfg_region *cfg;
- pci_mmcfg_config_num++;
- pci_mmcfg_config[i].address = addr;
- pci_mmcfg_config[i].pci_segment = segment;
- pci_mmcfg_config[i].start_bus_number = start;
- pci_mmcfg_config[i].end_bus_number = end;
+ list_for_each_entry(cfg, &pci_mmcfg_list, list)
+ if (cfg->segment == segment &&
+ cfg->start_bus <= bus && bus <= cfg->end_bus)
+ return cfg;
+
+ return NULL;
}
static const char __init *pci_mmcfg_e7520(void)
@@ -68,11 +118,9 @@ static const char __init *pci_mmcfg_e7520(void)
if (win == 0x0000 || win == 0xf000)
return NULL;
- if (extend_mmcfg(1) == -1)
+ if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
return NULL;
- fill_one_mmcfg(win << 16, 0, 0, 255);
-
return "Intel Corporation E7520 Memory Controller Hub";
}
@@ -114,11 +162,9 @@ static const char __init *pci_mmcfg_intel_945(void)
if ((pciexbar & mask) >= 0xf0000000U)
return NULL;
- if (extend_mmcfg(1) == -1)
+ if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
return NULL;
- fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1);
-
return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
}
@@ -127,7 +173,7 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
u32 low, high, address;
u64 base, msr;
int i;
- unsigned segnbits = 0, busnbits;
+ unsigned segnbits = 0, busnbits, end_bus;
if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
return NULL;
@@ -161,11 +207,13 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
busnbits = 8;
}
- if (extend_mmcfg(1 << segnbits) == -1)
- return NULL;
-
+ end_bus = (1 << busnbits) - 1;
for (i = 0; i < (1 << segnbits); i++)
- fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1);
+ if (pci_mmconfig_add(i, 0, end_bus,
+ base + (1<<28) * i) == NULL) {
+ free_all_mmcfg();
+ return NULL;
+ }
return "AMD Family 10h NB";
}
@@ -190,7 +238,7 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void)
/*
* do check if amd fam10h already took over
*/
- if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked)
+ if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
return NULL;
mcp55_checked = true;
@@ -213,16 +261,14 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void)
if (!(extcfg & extcfg_enable_mask))
continue;
- if (extend_mmcfg(1) == -1)
- continue;
-
size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
base = extcfg & extcfg_base_mask[size_index];
/* base could > 4G */
base <<= extcfg_base_lshift;
start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
end = start + extcfg_sizebus[size_index] - 1;
- fill_one_mmcfg(base, 0, start, end);
+ if (pci_mmconfig_add(0, start, end, base) == NULL)
+ continue;
mcp55_mmconf_found++;
}
@@ -253,45 +299,27 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
0x0369, pci_mmcfg_nvidia_mcp55 },
};
-static int __init cmp_mmcfg(const void *x1, const void *x2)
-{
- const typeof(pci_mmcfg_config[0]) *m1 = x1;
- const typeof(pci_mmcfg_config[0]) *m2 = x2;
- int start1, start2;
-
- start1 = m1->start_bus_number;
- start2 = m2->start_bus_number;
-
- return start1 - start2;
-}
-
static void __init pci_mmcfg_check_end_bus_number(void)
{
- int i;
- typeof(pci_mmcfg_config[0]) *cfg, *cfgx;
-
- /* sort them at first */
- sort(pci_mmcfg_config, pci_mmcfg_config_num,
- sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL);
+ struct pci_mmcfg_region *cfg, *cfgx;
/* last one*/
- if (pci_mmcfg_config_num > 0) {
- i = pci_mmcfg_config_num - 1;
- cfg = &pci_mmcfg_config[i];
- if (cfg->end_bus_number < cfg->start_bus_number)
- cfg->end_bus_number = 255;
- }
+ cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list);
+ if (cfg)
+ if (cfg->end_bus < cfg->start_bus)
+ cfg->end_bus = 255;
- /* don't overlap please */
- for (i = 0; i < pci_mmcfg_config_num - 1; i++) {
- cfg = &pci_mmcfg_config[i];
- cfgx = &pci_mmcfg_config[i+1];
+ if (list_is_singular(&pci_mmcfg_list))
+ return;
- if (cfg->end_bus_number < cfg->start_bus_number)
- cfg->end_bus_number = 255;
+ /* don't overlap please */
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ if (cfg->end_bus < cfg->start_bus)
+ cfg->end_bus = 255;
- if (cfg->end_bus_number >= cfgx->start_bus_number)
- cfg->end_bus_number = cfgx->start_bus_number - 1;
+ cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
+ if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus)
+ cfg->end_bus = cfgx->start_bus - 1;
}
}
@@ -306,8 +334,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
if (!raw_pci_ops)
return 0;
- pci_mmcfg_config_num = 0;
- pci_mmcfg_config = NULL;
+ free_all_mmcfg();
for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
bus = pci_mmcfg_probes[i].bus;
@@ -322,45 +349,22 @@ static int __init pci_mmcfg_check_hostbridge(void)
name = pci_mmcfg_probes[i].probe();
if (name)
- printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n",
+ printk(KERN_INFO PREFIX "%s with MMCONFIG support\n",
name);
}
/* some end_bus_number is crazy, fix it */
pci_mmcfg_check_end_bus_number();
- return pci_mmcfg_config_num != 0;
+ return !list_empty(&pci_mmcfg_list);
}
static void __init pci_mmcfg_insert_resources(void)
{
-#define PCI_MMCFG_RESOURCE_NAME_LEN 24
- int i;
- struct resource *res;
- char *names;
- unsigned num_buses;
-
- res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
- pci_mmcfg_config_num, GFP_KERNEL);
- if (!res) {
- printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
- return;
- }
+ struct pci_mmcfg_region *cfg;
- names = (void *)&res[pci_mmcfg_config_num];
- for (i = 0; i < pci_mmcfg_config_num; i++, res++) {
- struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i];
- num_buses = cfg->end_bus_number - cfg->start_bus_number + 1;
- res->name = names;
- snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN,
- "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment,
- cfg->start_bus_number, cfg->end_bus_number);
- res->start = cfg->address + (cfg->start_bus_number << 20);
- res->end = res->start + (num_buses << 20) - 1;
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- insert_resource(&iomem_resource, res);
- names += PCI_MMCFG_RESOURCE_NAME_LEN;
- }
+ list_for_each_entry(cfg, &pci_mmcfg_list, list)
+ insert_resource(&iomem_resource, &cfg->res);
/* Mark that the resources have been inserted. */
pci_mmcfg_resources_inserted = 1;
@@ -437,11 +441,12 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
static int __init is_mmconf_reserved(check_reserved_t is_reserved,
- u64 addr, u64 size, int i,
- typeof(pci_mmcfg_config[0]) *cfg, int with_e820)
+ struct pci_mmcfg_region *cfg, int with_e820)
{
+ u64 addr = cfg->res.start;
+ u64 size = resource_size(&cfg->res);
u64 old_size = size;
- int valid = 0;
+ int valid = 0, num_buses;
while (!is_reserved(addr, addr + size, E820_RESERVED)) {
size >>= 1;
@@ -450,19 +455,25 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
}
if (size >= (16UL<<20) || size == old_size) {
- printk(KERN_NOTICE
- "PCI: MCFG area at %Lx reserved in %s\n",
- addr, with_e820?"E820":"ACPI motherboard resources");
+ printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n",
+ &cfg->res,
+ with_e820 ? "E820" : "ACPI motherboard resources");
valid = 1;
if (old_size != size) {
- /* update end_bus_number */
- cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1);
- printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx "
- "segment %hu buses %u - %u\n",
- i, (unsigned long)cfg->address, cfg->pci_segment,
- (unsigned int)cfg->start_bus_number,
- (unsigned int)cfg->end_bus_number);
+ /* update end_bus */
+ cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
+ num_buses = cfg->end_bus - cfg->start_bus + 1;
+ cfg->res.end = cfg->res.start +
+ PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
+ snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
+ "PCI MMCONFIG %04x [bus %02x-%02x]",
+ cfg->segment, cfg->start_bus, cfg->end_bus);
+ printk(KERN_INFO PREFIX
+ "MMCONFIG for %04x [bus%02x-%02x] "
+ "at %pR (base %#lx) (size reduced!)\n",
+ cfg->segment, cfg->start_bus, cfg->end_bus,
+ &cfg->res, (unsigned long) cfg->address);
}
}
@@ -471,45 +482,26 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
static void __init pci_mmcfg_reject_broken(int early)
{
- typeof(pci_mmcfg_config[0]) *cfg;
- int i;
+ struct pci_mmcfg_region *cfg;
- if ((pci_mmcfg_config_num == 0) ||
- (pci_mmcfg_config == NULL) ||
- (pci_mmcfg_config[0].address == 0))
- return;
-
- for (i = 0; i < pci_mmcfg_config_num; i++) {
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
int valid = 0;
- u64 addr, size;
-
- cfg = &pci_mmcfg_config[i];
- addr = cfg->start_bus_number;
- addr <<= 20;
- addr += cfg->address;
- size = cfg->end_bus_number + 1 - cfg->start_bus_number;
- size <<= 20;
- printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx "
- "segment %hu buses %u - %u\n",
- i, (unsigned long)cfg->address, cfg->pci_segment,
- (unsigned int)cfg->start_bus_number,
- (unsigned int)cfg->end_bus_number);
if (!early && !acpi_disabled)
- valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0);
+ valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0);
if (valid)
continue;
if (!early)
- printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not"
- " reserved in ACPI motherboard resources\n",
- cfg->address);
+ printk(KERN_ERR FW_BUG PREFIX
+ "MMCONFIG at %pR not reserved in "
+ "ACPI motherboard resources\n", &cfg->res);
/* Don't try to do this check unless configuration
type 1 is available. how about type 2 ?*/
if (raw_pci_ops)
- valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1);
+ valid = is_mmconf_reserved(e820_all_mapped, cfg, 1);
if (!valid)
goto reject;
@@ -518,34 +510,41 @@ static void __init pci_mmcfg_reject_broken(int early)
return;
reject:
- printk(KERN_INFO "PCI: Not using MMCONFIG.\n");
- pci_mmcfg_arch_free();
- kfree(pci_mmcfg_config);
- pci_mmcfg_config = NULL;
- pci_mmcfg_config_num = 0;
+ printk(KERN_INFO PREFIX "not using MMCONFIG\n");
+ free_all_mmcfg();
}
static int __initdata known_bridge;
-static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
+static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
+ struct acpi_mcfg_allocation *cfg)
+{
+ int year;
-/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
-struct acpi_mcfg_allocation *pci_mmcfg_config;
-int pci_mmcfg_config_num;
+ if (cfg->address < 0xFFFFFFFF)
+ return 0;
-static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
-{
if (!strcmp(mcfg->header.oem_id, "SGI"))
- acpi_mcfg_64bit_base_addr = TRUE;
+ return 0;
- return 0;
+ if (mcfg->header.revision >= 1) {
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
+ year >= 2010)
+ return 0;
+ }
+
+ printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
+ "is above 4GB, ignored\n", cfg->pci_segment,
+ cfg->start_bus_number, cfg->end_bus_number, cfg->address);
+ return -EINVAL;
}
static int __init pci_parse_mcfg(struct acpi_table_header *header)
{
struct acpi_table_mcfg *mcfg;
+ struct acpi_mcfg_allocation *cfg_table, *cfg;
unsigned long i;
- int config_size;
+ int entries;
if (!header)
return -EINVAL;
@@ -553,38 +552,33 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
mcfg = (struct acpi_table_mcfg *)header;
/* how many config structures do we have */
- pci_mmcfg_config_num = 0;
+ free_all_mmcfg();
+ entries = 0;
i = header->length - sizeof(struct acpi_table_mcfg);
while (i >= sizeof(struct acpi_mcfg_allocation)) {
- ++pci_mmcfg_config_num;
+ entries++;
i -= sizeof(struct acpi_mcfg_allocation);
};
- if (pci_mmcfg_config_num == 0) {
+ if (entries == 0) {
printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
return -ENODEV;
}
- config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
- pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
- if (!pci_mmcfg_config) {
- printk(KERN_WARNING PREFIX
- "No memory for MCFG config tables\n");
- return -ENOMEM;
- }
-
- memcpy(pci_mmcfg_config, &mcfg[1], config_size);
-
- acpi_mcfg_oem_check(mcfg);
-
- for (i = 0; i < pci_mmcfg_config_num; ++i) {
- if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
- !acpi_mcfg_64bit_base_addr) {
- printk(KERN_ERR PREFIX
- "MMCONFIG not in low 4GB of memory\n");
- kfree(pci_mmcfg_config);
- pci_mmcfg_config_num = 0;
+ cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
+ for (i = 0; i < entries; i++) {
+ cfg = &cfg_table[i];
+ if (acpi_mcfg_check_entry(mcfg, cfg)) {
+ free_all_mmcfg();
return -ENODEV;
}
+
+ if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
+ cfg->end_bus_number, cfg->address) == NULL) {
+ printk(KERN_WARNING PREFIX
+ "no memory for MCFG entries\n");
+ free_all_mmcfg();
+ return -ENOMEM;
+ }
}
return 0;
@@ -614,9 +608,7 @@ static void __init __pci_mmcfg_init(int early)
pci_mmcfg_reject_broken(early);
- if ((pci_mmcfg_config_num == 0) ||
- (pci_mmcfg_config == NULL) ||
- (pci_mmcfg_config[0].address == 0))
+ if (list_empty(&pci_mmcfg_list))
return;
if (pci_mmcfg_arch_init())
@@ -648,9 +640,7 @@ static int __init pci_mmcfg_late_insert_resources(void)
*/
if ((pci_mmcfg_resources_inserted == 1) ||
(pci_probe & PCI_PROBE_MMCONF) == 0 ||
- (pci_mmcfg_config_num == 0) ||
- (pci_mmcfg_config == NULL) ||
- (pci_mmcfg_config[0].address == 0))
+ list_empty(&pci_mmcfg_list))
return 1;
/*
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index f10a7e94a84..90d5fd476ed 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -27,18 +27,10 @@ static int mmcfg_last_accessed_cpu;
*/
static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
{
- struct acpi_mcfg_allocation *cfg;
- int cfg_num;
-
- for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
- cfg = &pci_mmcfg_config[cfg_num];
- if (cfg->pci_segment == seg &&
- (cfg->start_bus_number <= bus) &&
- (cfg->end_bus_number >= bus))
- return cfg->address;
- }
+ struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
- /* Fall back to type 0 */
+ if (cfg)
+ return cfg->address;
return 0;
}
@@ -47,7 +39,7 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
*/
static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
{
- u32 dev_base = base | (bus << 20) | (devfn << 12);
+ u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12);
int cpu = smp_processor_id();
if (dev_base != mmcfg_last_accessed_device ||
cpu != mmcfg_last_accessed_cpu) {
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index 94349f8b2f9..e783841bd1d 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -12,38 +12,15 @@
#include <asm/e820.h>
#include <asm/pci_x86.h>
-/* Static virtual mapping of the MMCONFIG aperture */
-struct mmcfg_virt {
- struct acpi_mcfg_allocation *cfg;
- char __iomem *virt;
-};
-static struct mmcfg_virt *pci_mmcfg_virt;
-
-static char __iomem *get_virt(unsigned int seg, unsigned bus)
-{
- struct acpi_mcfg_allocation *cfg;
- int cfg_num;
-
- for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
- cfg = pci_mmcfg_virt[cfg_num].cfg;
- if (cfg->pci_segment == seg &&
- (cfg->start_bus_number <= bus) &&
- (cfg->end_bus_number >= bus))
- return pci_mmcfg_virt[cfg_num].virt;
- }
-
- /* Fall back to type 0 */
- return NULL;
-}
+#define PREFIX "PCI: "
static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
{
- char __iomem *addr;
+ struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
- addr = get_virt(seg, bus);
- if (!addr)
- return NULL;
- return addr + ((bus << 20) | (devfn << 12));
+ if (cfg && cfg->virt)
+ return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
+ return NULL;
}
static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
@@ -109,42 +86,30 @@ static struct pci_raw_ops pci_mmcfg = {
.write = pci_mmcfg_write,
};
-static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg)
+static void __iomem * __init mcfg_ioremap(struct pci_mmcfg_region *cfg)
{
void __iomem *addr;
u64 start, size;
+ int num_buses;
- start = cfg->start_bus_number;
- start <<= 20;
- start += cfg->address;
- size = cfg->end_bus_number + 1 - cfg->start_bus_number;
- size <<= 20;
+ start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
+ num_buses = cfg->end_bus - cfg->start_bus + 1;
+ size = PCI_MMCFG_BUS_OFFSET(num_buses);
addr = ioremap_nocache(start, size);
- if (addr) {
- printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n",
- start, start + size - 1);
- addr -= cfg->start_bus_number << 20;
- }
+ if (addr)
+ addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
return addr;
}
int __init pci_mmcfg_arch_init(void)
{
- int i;
- pci_mmcfg_virt = kzalloc(sizeof(*pci_mmcfg_virt) *
- pci_mmcfg_config_num, GFP_KERNEL);
- if (pci_mmcfg_virt == NULL) {
- printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n");
- return 0;
- }
+ struct pci_mmcfg_region *cfg;
- for (i = 0; i < pci_mmcfg_config_num; ++i) {
- pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
- pci_mmcfg_virt[i].virt = mcfg_ioremap(&pci_mmcfg_config[i]);
- if (!pci_mmcfg_virt[i].virt) {
- printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
- "segment %d\n",
- pci_mmcfg_config[i].pci_segment);
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ cfg->virt = mcfg_ioremap(cfg);
+ if (!cfg->virt) {
+ printk(KERN_ERR PREFIX "can't map MMCONFIG at %pR\n",
+ &cfg->res);
pci_mmcfg_arch_free();
return 0;
}
@@ -155,19 +120,12 @@ int __init pci_mmcfg_arch_init(void)
void __init pci_mmcfg_arch_free(void)
{
- int i;
-
- if (pci_mmcfg_virt == NULL)
- return;
+ struct pci_mmcfg_region *cfg;
- for (i = 0; i < pci_mmcfg_config_num; ++i) {
- if (pci_mmcfg_virt[i].virt) {
- iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20));
- pci_mmcfg_virt[i].virt = NULL;
- pci_mmcfg_virt[i].cfg = NULL;
+ list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ if (cfg->virt) {
+ iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
+ cfg->virt = NULL;
}
}
-
- kfree(pci_mmcfg_virt);
- pci_mmcfg_virt = NULL;
}
diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk
index 0d13cd9fdcf..5bbb5a33f22 100644
--- a/arch/x86/tools/chkobjdump.awk
+++ b/arch/x86/tools/chkobjdump.awk
@@ -9,7 +9,7 @@ BEGIN {
}
/^GNU/ {
- split($4, ver, ".");
+ split($3, ver, ".");
if (ver[1] > od_ver ||
(ver[1] == od_ver && ver[2] >= od_sver)) {
exit 1;
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index e34e92a28eb..eaf11f52fc0 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -6,8 +6,6 @@
# Awk implementation sanity check
function check_awk_implement() {
- if (!match("abc", "[[:lower:]]+"))
- return "Your awk doesn't support charactor-class."
if (sprintf("%x", 0) != "0")
return "Your awk has a printf-format problem."
return ""
@@ -44,12 +42,12 @@ BEGIN {
delete gtable
delete atable
- opnd_expr = "^[[:alpha:]/]"
+ opnd_expr = "^[A-Za-z/]"
ext_expr = "^\\("
sep_expr = "^\\|$"
- group_expr = "^Grp[[:alnum:]]+"
+ group_expr = "^Grp[0-9A-Za-z]+"
- imm_expr = "^[IJAO][[:lower:]]"
+ imm_expr = "^[IJAO][a-z]"
imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
@@ -62,7 +60,7 @@ BEGIN {
imm_flag["Ob"] = "INAT_MOFFSET"
imm_flag["Ov"] = "INAT_MOFFSET"
- modrm_expr = "^([CDEGMNPQRSUVW/][[:lower:]]+|NTA|T[012])"
+ modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
rex_expr = "^REX(\\.[XRWB]+)*"
fpu_expr = "^ESC" # TODO
@@ -226,12 +224,12 @@ function add_flags(old,new) {
}
# convert operands to flags.
-function convert_operands(opnd, i,imm,mod)
+function convert_operands(count,opnd, i,j,imm,mod)
{
imm = null
mod = null
- for (i in opnd) {
- i = opnd[i]
+ for (j = 1; j <= count; j++) {
+ i = opnd[j]
if (match(i, imm_expr) == 1) {
if (!imm_flag[i])
semantic_error("Unknown imm opnd: " i)
@@ -282,8 +280,8 @@ function convert_operands(opnd, i,imm,mod)
# parse one opcode
if (match($i, opnd_expr)) {
opnd = $i
- split($(i++), opnds, ",")
- flags = convert_operands(opnds)
+ count = split($(i++), opnds, ",")
+ flags = convert_operands(count, opnds)
}
if (match($i, ext_expr))
ext = $(i++)
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c
index d8214dc03fa..bee8d6ac269 100644
--- a/arch/x86/tools/test_get_len.c
+++ b/arch/x86/tools/test_get_len.c
@@ -113,7 +113,7 @@ int main(int argc, char **argv)
char line[BUFSIZE], sym[BUFSIZE] = "<unknown>";
unsigned char insn_buf[16];
struct insn insn;
- int insns = 0, c;
+ int insns = 0;
int warnings = 0;
parse_args(argc, argv);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b8e45f164e2..2b26dd5930c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -27,7 +27,9 @@
#include <linux/page-flags.h>
#include <linux/highmem.h>
#include <linux/console.h>
+#include <linux/pci.h>
+#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h>
#include <xen/interface/physdev.h>
@@ -1175,7 +1177,11 @@ asmlinkage void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL);
add_preferred_console("tty", 0, NULL);
add_preferred_console("hvc", 0, NULL);
+ } else {
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
}
+
xen_raw_console_write("about to get started...\n");
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 64757c0ba5f..563d2050498 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -35,10 +35,10 @@
cpumask_var_t xen_cpu_initialized_map;
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static DEFINE_PER_CPU(int, callfuncsingle_irq);
-static DEFINE_PER_CPU(int, debug_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq);
+static DEFINE_PER_CPU(int, xen_callfunc_irq);
+static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
+static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
@@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(resched_irq, cpu) = rc;
+ per_cpu(xen_resched_irq, cpu) = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
@@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(callfunc_irq, cpu) = rc;
+ per_cpu(xen_callfunc_irq, cpu) = rc;
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
@@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
debug_name, NULL);
if (rc < 0)
goto fail;
- per_cpu(debug_irq, cpu) = rc;
+ per_cpu(xen_debug_irq, cpu) = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
@@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
NULL);
if (rc < 0)
goto fail;
- per_cpu(callfuncsingle_irq, cpu) = rc;
+ per_cpu(xen_callfuncsingle_irq, cpu) = rc;
return 0;
fail:
- if (per_cpu(resched_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
- if (per_cpu(callfunc_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
- if (per_cpu(debug_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
- if (per_cpu(callfuncsingle_irq, cpu) >= 0)
- unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+ if (per_cpu(xen_resched_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+ if (per_cpu(xen_callfunc_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ if (per_cpu(xen_debug_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
+ NULL);
return rc;
}
@@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ/10);
}
- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
- unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 36a5141108d..24ded31b5ae 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -120,14 +120,14 @@ struct xen_spinlock {
unsigned short spinners; /* count of waiting cpus */
};
-static int xen_spin_is_locked(struct raw_spinlock *lock)
+static int xen_spin_is_locked(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
return xl->lock != 0;
}
-static int xen_spin_is_contended(struct raw_spinlock *lock)
+static int xen_spin_is_contended(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
@@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock)
return xl->spinners != 0;
}
-static int xen_spin_trylock(struct raw_spinlock *lock)
+static int xen_spin_trylock(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
u8 old = 1;
@@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
__get_cpu_var(lock_spinners) = prev;
}
-static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
+static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
struct xen_spinlock *prev;
@@ -254,7 +254,7 @@ out:
return ret;
}
-static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
+static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
unsigned timeout;
@@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
spin_time_accum_total(start_spin);
}
-static void xen_spin_lock(struct raw_spinlock *lock)
+static void xen_spin_lock(struct arch_spinlock *lock)
{
__xen_spin_lock(lock, false);
}
-static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
+static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
{
__xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
}
@@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
}
}
-static void xen_spin_unlock(struct raw_spinlock *lock)
+static void xen_spin_unlock(struct arch_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 9d1f853120d..0d3f07cd1b5 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -31,14 +31,14 @@
#define NS_PER_TICK (1000000000LL / HZ)
/* runstate info updated by Xen */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
/* snapshots of runstate info */
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
/* unused ns of stolen and blocked time */
-static DEFINE_PER_CPU(u64, residual_stolen);
-static DEFINE_PER_CPU(u64, residual_blocked);
+static DEFINE_PER_CPU(u64, xen_residual_stolen);
+static DEFINE_PER_CPU(u64, xen_residual_blocked);
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
@@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
BUG_ON(preemptible());
- state = &__get_cpu_var(runstate);
+ state = &__get_cpu_var(xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
@@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
/* return true when a vcpu could run but has no real cpu to run on */
bool xen_vcpu_stolen(int vcpu)
{
- return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
+ return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
}
void xen_setup_runstate_info(int cpu)
{
struct vcpu_register_runstate_memory_area area;
- area.addr.v = &per_cpu(runstate, cpu);
+ area.addr.v = &per_cpu(xen_runstate, cpu);
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
cpu, &area))
@@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
WARN_ON(state.state != RUNSTATE_running);
- snap = &__get_cpu_var(runstate_snapshot);
+ snap = &__get_cpu_var(xen_runstate_snapshot);
/* work out how much time the VCPU has not been runn*ing* */
blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
@@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
/* Add the appropriate number of ticks of stolen time,
including any left-overs from last time. */
- stolen = runnable + offline + __get_cpu_var(residual_stolen);
+ stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
if (stolen < 0)
stolen = 0;
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
- __get_cpu_var(residual_stolen) = stolen;
+ __get_cpu_var(xen_residual_stolen) = stolen;
account_steal_ticks(ticks);
/* Add the appropriate number of ticks of blocked time,
including any left-overs from last time. */
- blocked += __get_cpu_var(residual_blocked);
+ blocked += __get_cpu_var(xen_residual_blocked);
if (blocked < 0)
blocked = 0;
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
- __get_cpu_var(residual_blocked) = blocked;
+ __get_cpu_var(xen_residual_blocked) = blocked;
account_idle_ticks(ticks);
}
diff --git a/arch/xtensa/include/asm/asm-offsets.h b/arch/xtensa/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/xtensa/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index c3f53e755ca..5eb6d695e98 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -123,7 +123,6 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 05cebf8f62b..efcf33b92e4 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -12,9 +12,6 @@ struct pt_regs;
struct sigaction;
asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
-asmlinkage long xtensa_pipe(int __user *);
-asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long, unsigned long);
asmlinkage long xtensa_ptrace(long, long, long, long);
asmlinkage long xtensa_sigreturn(struct pt_regs*);
asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index 4e55dc76302..528042c2951 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -94,7 +94,7 @@ __SYSCALL( 35, sys_readlink, 3)
#define __NR_mknod 36
__SYSCALL( 36, sys_mknod, 3)
#define __NR_pipe 37
-__SYSCALL( 37, xtensa_pipe, 1)
+__SYSCALL( 37, sys_pipe, 1)
#define __NR_unlink 38
__SYSCALL( 38, sys_unlink, 1)
#define __NR_rmdir 39
@@ -189,7 +189,7 @@ __SYSCALL( 79, sys_fremovexattr, 2)
/* File Map / Shared Memory Operations */
#define __NR_mmap2 80
-__SYSCALL( 80, xtensa_mmap2, 6)
+__SYSCALL( 80, sys_mmap_pgoff, 6)
#define __NR_munmap 81
__SYSCALL( 81, sys_munmap, 2)
#define __NR_mprotect 82
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a1badb32fcd..8cd38484e13 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index ac15ecbdf91..816e6d0d686 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -39,49 +39,6 @@ syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
#include <asm/unistd.h>
};
-/*
- * xtensa_pipe() is the normal C calling standard for creating a pipe. It's not
- * the way unix traditional does this, though.
- */
-
-asmlinkage long xtensa_pipe(int __user *userfds)
-{
- int fd[2];
- int error;
-
- error = do_pipe_flags(fd, 0);
- if (!error) {
- if (copy_to_user(userfds, fd, 2 * sizeof(int)))
- error = -EFAULT;
- }
- return error;
-}
-
-
-asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
{
unsigned long ret;
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 4c559cf7da2..e60a1f57022 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -196,7 +196,7 @@ static const struct file_operations rs_proc_fops = {
.release = single_release,
};
-static struct tty_operations serial_ops = {
+static const struct tty_operations serial_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd1f1e0e196..6ae118d6e19 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -554,11 +554,18 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
ret = -1;
}
+ /*
+ * Temporarily disable discard granularity. It's currently buggy
+ * since we default to 0 for discard_granularity, hence this
+ * "failure" will always trigger for non-zero offsets.
+ */
+#if 0
if (offset &&
(offset & (b->discard_granularity - 1)) != b->discard_alignment) {
t->discard_misaligned = 1;
ret = -1;
}
+#endif
/* If top has no alignment offset, inherit from bottom */
if (!t->alignment_offset)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cfb0b2f5f63..e2f80463ed0 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -283,7 +283,7 @@ struct cfq_data {
*/
struct cfq_queue oom_cfqq;
- unsigned long last_end_sync_rq;
+ unsigned long last_delayed_sync;
/* List of cfq groups being managed on this device*/
struct hlist_head cfqg_list;
@@ -319,7 +319,6 @@ enum cfqq_state_flags {
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
- CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */
};
#define CFQ_CFQQ_FNS(name) \
@@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync);
CFQ_CFQQ_FNS(coop);
CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
-CFQ_CFQQ_FNS(wait_busy_done);
#undef CFQ_CFQQ_FNS
#ifdef CONFIG_DEBUG_CFQ_IOSCHED
@@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_wait_busy(cfqq);
- cfq_clear_cfqq_wait_busy_done(cfqq);
/*
* store what was left of this slice, if the queue idled/timed out
@@ -1750,6 +1747,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
return NULL;
/*
+ * Don't search priority tree if it's the only queue in the group.
+ */
+ if (cur_cfqq->cfqg->nr_cfqq == 1)
+ return NULL;
+
+ /*
* We should notice if some of the queues are cooperating, eg
* working closely on the same area of the disk. In that case,
* we can group them together and don't waste time idling.
@@ -2110,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
cfqd->serving_type = cfqg->saved_workload;
cfqd->serving_prio = cfqg->saved_serving_prio;
- }
+ } else
+ cfqd->workload_expires = jiffies - 1;
+
choose_service_tree(cfqd, cfqg);
}
@@ -2128,14 +2133,35 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
if (!cfqd->rq_queued)
return NULL;
+
/*
- * The active queue has run out of time, expire it and select new.
+ * We were waiting for group to get backlogged. Expire the queue
*/
- if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq))
- && !cfq_cfqq_must_dispatch(cfqq))
+ if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
goto expire;
/*
+ * The active queue has run out of time, expire it and select new.
+ */
+ if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
+ /*
+ * If slice had not expired at the completion of last request
+ * we might not have turned on wait_busy flag. Don't expire
+ * the queue yet. Allow the group to get backlogged.
+ *
+ * The very fact that we have used the slice, that means we
+ * have been idling all along on this queue and it should be
+ * ok to wait for this request to complete.
+ */
+ if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
+ && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+ cfqq = NULL;
+ goto keep_queue;
+ } else
+ goto expire;
+ }
+
+ /*
* The active queue has requests and isn't expired, allow it to
* dispatch.
*/
@@ -2264,7 +2290,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* based on the last sync IO we serviced
*/
if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
- unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
+ unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
unsigned int depth;
depth = last_sync / cfqd->cfq_slice[1];
@@ -3165,10 +3191,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (cfqq == cfqd->active_queue) {
- if (cfq_cfqq_wait_busy(cfqq)) {
- cfq_clear_cfqq_wait_busy(cfqq);
- cfq_mark_cfqq_wait_busy_done(cfqq);
- }
/*
* Remember that we saw a request from this process, but
* don't start queuing just yet. Otherwise we risk seeing lots
@@ -3183,6 +3205,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
cfqd->busy_queues > 1) {
del_timer(&cfqd->idle_slice_timer);
+ cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
} else
cfq_mark_cfqq_must_dispatch(cfqq);
@@ -3251,6 +3274,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
cfqd->hw_tag = 0;
}
+static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ struct cfq_io_context *cic = cfqd->active_cic;
+
+ /* If there are other queues in the group, don't wait */
+ if (cfqq->cfqg->nr_cfqq > 1)
+ return false;
+
+ if (cfq_slice_used(cfqq))
+ return true;
+
+ /* if slice left is less than think time, wait busy */
+ if (cic && sample_valid(cic->ttime_samples)
+ && (cfqq->slice_end - jiffies < cic->ttime_mean))
+ return true;
+
+ /*
+ * If think times is less than a jiffy than ttime_mean=0 and above
+ * will not be true. It might happen that slice has not expired yet
+ * but will expire soon (4-5 ns) during select_queue(). To cover the
+ * case where think time is less than a jiffy, mark the queue wait
+ * busy if only 1 jiffy is left in the slice.
+ */
+ if (cfqq->slice_end - jiffies == 1)
+ return true;
+
+ return false;
+}
+
static void cfq_completed_request(struct request_queue *q, struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -3273,7 +3325,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
if (sync) {
RQ_CIC(rq)->last_end_request = now;
- cfqd->last_end_sync_rq = now;
+ if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
+ cfqd->last_delayed_sync = now;
}
/*
@@ -3289,11 +3342,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
}
/*
- * If this queue consumed its slice and this is last queue
- * in the group, wait for next request before we expire
- * the queue
+ * Should we wait for next request to come in before we expire
+ * the queue.
*/
- if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) {
+ if (cfq_should_wait_busy(cfqd, cfqq)) {
cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
cfq_mark_cfqq_wait_busy(cfqq);
}
@@ -3711,7 +3763,11 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->cfq_latency = 1;
cfqd->cfq_group_isolation = 0;
cfqd->hw_tag = -1;
- cfqd->last_end_sync_rq = jiffies;
+ /*
+ * we optimistically start assuming sync ops weren't delayed in last
+ * second, in order to have larger depth for async operations.
+ */
+ cfqd->last_delayed_sync = jiffies - HZ;
INIT_RCU_HEAD(&cfqd->rcu);
return cfqd;
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index f8ae0d94a64..704c1411532 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -99,7 +99,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct cryptd_cpu_queue *cpu_queue;
cpu = get_cpu();
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
put_cpu();
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 26e434ad373..8a07363417e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -96,6 +96,8 @@ source "drivers/edac/Kconfig"
source "drivers/rtc/Kconfig"
+source "drivers/clocksource/Kconfig"
+
source "drivers/dma/Kconfig"
source "drivers/dca/Kconfig"
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index d672cfe7ca5..cb423f5aef2 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -21,7 +21,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 7702118509a..c7b10b4298e 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,6 +19,7 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
+acpi-y += hest.o
# sleep related files
acpi-y += wakeup.o
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 0d2cdb86158..97991ac6f5f 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -100,7 +100,8 @@ static void round_robin_cpu(unsigned int tsk_index)
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
cpumask_var_t tmp;
int cpu;
- unsigned long min_weight = -1, preferred_cpu;
+ unsigned long min_weight = -1;
+ unsigned long uninitialized_var(preferred_cpu);
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index ab83919dda6..61edb156e8d 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -296,6 +296,11 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
acpi_status validate_status,
union acpi_operand_object **return_object_ptr);
+void
+acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
+ u8 package_type,
+ union acpi_operand_object *obj_desc);
+
/*
* nssearch - Namespace searching and entry
*/
@@ -354,9 +359,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
const char *internal_name,
u32 * converted_name_length, char **converted_name);
-struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle);
-
-acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node);
+struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle);
void acpi_ns_terminate(void);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index b39d682a214..64062b1be3e 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -180,7 +180,11 @@ struct acpi_object_method {
u8 sync_level;
union acpi_operand_object *mutex;
u8 *aml_start;
- ACPI_INTERNAL_METHOD implementation;
+ union {
+ ACPI_INTERNAL_METHOD implementation;
+ union acpi_operand_object *handler;
+ } extra;
+
u32 aml_length;
u8 thread_count;
acpi_owner_id owner_id;
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 567a4899a01..e786f9fd767 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -414,7 +414,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
/* Invoke an internal method if necessary */
if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
- status = obj_desc->method.implementation(next_walk_state);
+ status = obj_desc->method.extra.implementation(next_walk_state);
if (status == AE_OK) {
status = AE_CTRL_TERMINATE;
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 10fc7851784..b40513dd6a6 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -212,18 +212,19 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
case ACPI_TYPE_BUFFER:
/*
- * These types we will allow, but we will change the type. This
- * enables some existing code of the form:
+ * These types we will allow, but we will change the type.
+ * This enables some existing code of the form:
*
* Name (DEB, 0)
* Scope (DEB) { ... }
*
- * Note: silently change the type here. On the second pass, we will report
- * a warning
+ * Note: silently change the type here. On the second pass,
+ * we will report a warning
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n",
- path,
+ "Type override - [%4.4s] had invalid type (%s) "
+ "for Scope operator, changed to type ANY\n",
+ acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY;
@@ -235,8 +236,10 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
/* All other types are an error */
ACPI_ERROR((AE_INFO,
- "Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)",
- acpi_ut_get_type_name(node->type), path));
+ "Invalid type (%s) for target of "
+ "Scope operator [%4.4s] (Cannot override)",
+ acpi_ut_get_type_name(node->type),
+ acpi_ut_get_node_name(node)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
@@ -697,15 +700,16 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
case ACPI_TYPE_BUFFER:
/*
- * These types we will allow, but we will change the type. This
- * enables some existing code of the form:
+ * These types we will allow, but we will change the type.
+ * This enables some existing code of the form:
*
* Name (DEB, 0)
* Scope (DEB) { ... }
*/
ACPI_WARNING((AE_INFO,
- "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)",
- buffer_ptr,
+ "Type override - [%4.4s] had invalid type (%s) "
+ "for Scope operator, changed to type ANY\n",
+ acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY;
@@ -717,9 +721,10 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* All other types are an error */
ACPI_ERROR((AE_INFO,
- "Invalid type (%s) for target of Scope operator [%4.4s]",
+ "Invalid type (%s) for target of "
+ "Scope operator [%4.4s] (Cannot override)",
acpi_ut_get_type_name(node->type),
- buffer_ptr));
+ acpi_ut_get_node_name(node)));
return (AE_AML_OPERAND_TYPE);
}
@@ -1047,9 +1052,22 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
/*
- * If we are executing a method, initialize the region
+ * The op_region is not fully parsed at this time. The only valid
+ * argument is the space_id. (We must save the address of the
+ * AML of the address and length operands)
+ *
+ * If we have a valid region, initialize it. The namespace is
+ * unlocked at this point.
+ *
+ * Need to unlock interpreter if it is locked (if we are running
+ * a control method), in order to allow _REG methods to be run
+ * during acpi_ev_initialize_region.
*/
if (walk_state->method_node) {
+ /*
+ * Executing a method: initialize the region and unlock
+ * the interpreter
+ */
status =
acpi_ex_create_region(op->named.data,
op->named.length,
@@ -1058,21 +1076,17 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
return (status);
}
- }
- /*
- * The op_region is not fully parsed at this time. Only valid
- * argument is the space_id. (We must save the address of the
- * AML of the address and length operands)
- */
+ acpi_ex_exit_interpreter();
+ }
- /*
- * If we have a valid region, initialize it
- * Namespace is NOT locked at this point.
- */
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
+ if (walk_state->method_node) {
+ acpi_ex_enter_interpreter();
+ }
+
if (ACPI_FAILURE(status)) {
/*
* If AE_NOT_EXIST is returned, it is not fatal
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 0bc807c33a5..5336d911fbf 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -718,7 +718,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
return (AE_BAD_PARAMETER);
}
@@ -1087,7 +1087,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
return (AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index cf29c495302..ff168052a33 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -575,6 +575,21 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
handler_obj = obj_desc->thermal_zone.handler;
break;
+ case ACPI_TYPE_METHOD:
+ /*
+ * If we are executing module level code, the original
+ * Node's object was replaced by this Method object and we
+ * saved the handler in the method object.
+ *
+ * See acpi_ns_exec_module_code
+ */
+ if (obj_desc->method.
+ flags & AOPOBJ_MODULE_LEVEL) {
+ handler_obj =
+ obj_desc->method.extra.handler;
+ }
+ break;
+
default:
/* Ignore other objects */
break;
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 10b8543dd46..2fe0809d4eb 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -259,7 +259,7 @@ acpi_install_notify_handler(acpi_handle device,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(device);
+ node = acpi_ns_validate_handle(device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -425,7 +425,7 @@ acpi_remove_notify_handler(acpi_handle device,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(device);
+ node = acpi_ns_validate_handle(device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 4721f58fe42..eed7a38d25f 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -610,7 +610,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
return (status);
}
- node = acpi_ns_map_handle_to_node(gpe_device);
+ node = acpi_ns_validate_handle(gpe_device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -698,7 +698,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
return (status);
}
- node = acpi_ns_map_handle_to_node(gpe_device);
+ node = acpi_ns_validate_handle(gpe_device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 7c3d2d356ff..c98aa7c2d67 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -89,7 +89,7 @@ acpi_install_address_space_handler(acpi_handle device,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(device);
+ node = acpi_ns_validate_handle(device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -155,7 +155,7 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Convert and validate the device handle */
- node = acpi_ns_map_handle_to_node(device);
+ node = acpi_ns_validate_handle(device);
if (!node ||
((node->type != ACPI_TYPE_DEVICE) &&
(node->type != ACPI_TYPE_PROCESSOR) &&
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 2f0114202b0..3c456bd575d 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -375,6 +375,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
}
+ /* Must have a valid thread ID */
+
+ if (!walk_state->thread) {
+ ACPI_ERROR((AE_INFO,
+ "Cannot release Mutex [%4.4s], null thread info",
+ acpi_ut_get_node_name(obj_desc->mutex.node)));
+ return_ACPI_STATUS(AE_AML_INTERNAL);
+ }
+
/*
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
@@ -392,15 +401,6 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
- /* Must have a valid thread ID */
-
- if (!walk_state->thread) {
- ACPI_ERROR((AE_INFO,
- "Cannot release Mutex [%4.4s], null thread info",
- acpi_ut_get_node_name(obj_desc->mutex.node)));
- return_ACPI_STATUS(AE_AML_INTERNAL);
- }
-
/*
* The sync level of the mutex must be equal to the current sync level. In
* other words, the current level means that at least one mutex at that
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 9c3cdbe2d82..d622ba77000 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -165,7 +165,7 @@ acpi_status acpi_ns_root_initialize(void)
obj_desc->method.method_flags =
AML_METHOD_INTERNAL_ONLY;
- obj_desc->method.implementation =
+ obj_desc->method.extra.implementation =
acpi_ut_osi_implementation;
#endif
break;
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 2deb986861c..e37836e27e2 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -180,7 +180,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
return (AE_OK);
}
- this_node = acpi_ns_map_handle_to_node(obj_handle);
+ this_node = acpi_ns_validate_handle(obj_handle);
if (!this_node) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid object handle %p\n",
obj_handle));
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f771e978c40..af9fe910373 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -381,6 +381,18 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
method_obj->method.next_object);
type = acpi_ns_get_type(parent_node);
+ /*
+ * Get the region handler and save it in the method object. We may need
+ * this if an operation region declaration causes a _REG method to be run.
+ *
+ * We can't do this in acpi_ps_link_module_code because
+ * acpi_gbl_root_node->Object is NULL at PASS1.
+ */
+ if ((type == ACPI_TYPE_DEVICE) && parent_node->object) {
+ method_obj->method.extra.handler =
+ parent_node->object->device.handler;
+ }
+
/* Must clear next_object (acpi_ns_attach_object needs the field) */
method_obj->method.next_object = NULL;
@@ -415,6 +427,12 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Executed module-level code at %p\n",
method_obj->method.aml_start));
+ /* Delete a possible implicit return value (in slack mode) */
+
+ if (info->return_object) {
+ acpi_ut_remove_reference(info->return_object);
+ }
+
/* Detach the temporary method object */
acpi_ns_detach_object(parent_node);
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index af8e6bcee07..8f9a4875ce2 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -232,7 +232,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle);
- node = acpi_ns_map_handle_to_node(target_handle);
+ node = acpi_ns_validate_handle(target_handle);
if (!node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index b05f42903c8..d34fa59548f 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -216,29 +216,38 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
data->pathname = pathname;
/*
- * Check that the type of the return object is what is expected for
- * this predefined name
+ * Check that the type of the main return object is what is expected
+ * for this predefined name
*/
status = acpi_ns_check_object_type(data, return_object_ptr,
predefined->info.expected_btypes,
ACPI_NOT_PACKAGE_ELEMENT);
if (ACPI_FAILURE(status)) {
- goto check_validation_status;
+ goto exit;
}
- /* For returned Package objects, check the type of all sub-objects */
-
- if (return_object->common.type == ACPI_TYPE_PACKAGE) {
+ /*
+ * For returned Package objects, check the type of all sub-objects.
+ * Note: Package may have been newly created by call above.
+ */
+ if ((*return_object_ptr)->common.type == ACPI_TYPE_PACKAGE) {
status = acpi_ns_check_package(data, return_object_ptr);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
}
/*
- * Perform additional, more complicated repairs on a per-name
- * basis.
+ * The return object was OK, or it was successfully repaired above.
+ * Now make some additional checks such as verifying that package
+ * objects are sorted correctly (if required) or buffer objects have
+ * the correct data width (bytes vs. dwords). These repairs are
+ * performed on a per-name basis, i.e., the code is specific to
+ * particular predefined names.
*/
status = acpi_ns_complex_repairs(data, node, status, return_object_ptr);
-check_validation_status:
+exit:
/*
* If the object validation failed or if we successfully repaired one
* or more objects, mark the parent node to suppress further warning
@@ -427,6 +436,13 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
data->pathname, package->ret_info.type,
return_object->package.count));
+ /*
+ * For variable-length Packages, we can safely remove all embedded
+ * and trailing NULL package elements
+ */
+ acpi_ns_remove_null_elements(data, package->ret_info.type,
+ return_object);
+
/* Extract package count and elements array */
elements = return_object->package.elements;
@@ -461,11 +477,11 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
if (count < expected_count) {
goto package_too_small;
} else if (count > expected_count) {
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
- data->node_flags,
- "Return Package is larger than needed - "
- "found %u, expected %u", count,
- expected_count));
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Return Package is larger than needed - "
+ "found %u, expected %u\n",
+ data->pathname, count,
+ expected_count));
}
/* Validate all elements of the returned package */
@@ -680,53 +696,18 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
union acpi_operand_object *sub_package;
union acpi_operand_object **sub_elements;
acpi_status status;
- u8 non_trailing_null = FALSE;
u32 expected_count;
u32 i;
u32 j;
- /* Validate each sub-Package in the parent Package */
-
+ /*
+ * Validate each sub-Package in the parent Package
+ *
+ * NOTE: assumes list of sub-packages contains no NULL elements.
+ * Any NULL elements should have been removed by earlier call
+ * to acpi_ns_remove_null_elements.
+ */
for (i = 0; i < count; i++) {
- /*
- * Handling for NULL package elements. For now, we will simply allow
- * a parent package with trailing NULL elements. This can happen if
- * the package was defined to be longer than the initializer list.
- * This is legal as per the ACPI specification. It is often used
- * to allow for dynamic initialization of a Package.
- *
- * A future enhancement may be to simply truncate the package to
- * remove the trailing NULL elements.
- */
- if (!(*elements)) {
- if (!non_trailing_null) {
-
- /* Ensure the remaining elements are all NULL */
-
- for (j = 1; j < (count - i + 1); j++) {
- if (elements[j]) {
- non_trailing_null = TRUE;
- }
- }
-
- if (!non_trailing_null) {
-
- /* Ignore the trailing NULL elements */
-
- return (AE_OK);
- }
- }
-
- /* There are trailing non-null elements, issue warning */
-
- ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
- data->node_flags,
- "Found NULL element at package index %u",
- i));
- elements++;
- continue;
- }
-
sub_package = *elements;
sub_elements = sub_package->package.elements;
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index d563f1a564a..4fd1bdb056b 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -45,13 +45,52 @@
#include "accommon.h"
#include "acnamesp.h"
#include "acinterp.h"
-#include "acpredef.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsrepair")
/*******************************************************************************
*
+ * This module attempts to repair or convert objects returned by the
+ * predefined methods to an object type that is expected, as per the ACPI
+ * specification. The need for this code is dictated by the many machines that
+ * return incorrect types for the standard predefined methods. Performing these
+ * conversions here, in one place, eliminates the need for individual ACPI
+ * device drivers to do the same. Note: Most of these conversions are different
+ * than the internal object conversion routines used for implicit object
+ * conversion.
+ *
+ * The following conversions can be performed as necessary:
+ *
+ * Integer -> String
+ * Integer -> Buffer
+ * String -> Integer
+ * String -> Buffer
+ * Buffer -> Integer
+ * Buffer -> String
+ * Buffer -> Package of Integers
+ * Package -> Package of one Package
+ *
+ ******************************************************************************/
+/* Local prototypes */
+static acpi_status
+acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+static acpi_status
+acpi_ns_convert_to_string(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+static acpi_status
+acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+static acpi_status
+acpi_ns_convert_to_package(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object);
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_repair_object
*
* PARAMETERS: Data - Pointer to validation data structure
@@ -68,6 +107,7 @@ ACPI_MODULE_NAME("nsrepair")
* not expected.
*
******************************************************************************/
+
acpi_status
acpi_ns_repair_object(struct acpi_predefined_data *data,
u32 expected_btypes,
@@ -76,32 +116,206 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
{
union acpi_operand_object *return_object = *return_object_ptr;
union acpi_operand_object *new_object;
- acpi_size length;
acpi_status status;
+ ACPI_FUNCTION_NAME(ns_repair_object);
+
/*
* At this point, we know that the type of the returned object was not
* one of the expected types for this predefined name. Attempt to
- * repair the object. Only a limited number of repairs are possible.
+ * repair the object by converting it to one of the expected object
+ * types for this predefined name.
*/
- switch (return_object->common.type) {
+ if (expected_btypes & ACPI_RTYPE_INTEGER) {
+ status = acpi_ns_convert_to_integer(return_object, &new_object);
+ if (ACPI_SUCCESS(status)) {
+ goto object_repaired;
+ }
+ }
+ if (expected_btypes & ACPI_RTYPE_STRING) {
+ status = acpi_ns_convert_to_string(return_object, &new_object);
+ if (ACPI_SUCCESS(status)) {
+ goto object_repaired;
+ }
+ }
+ if (expected_btypes & ACPI_RTYPE_BUFFER) {
+ status = acpi_ns_convert_to_buffer(return_object, &new_object);
+ if (ACPI_SUCCESS(status)) {
+ goto object_repaired;
+ }
+ }
+ if (expected_btypes & ACPI_RTYPE_PACKAGE) {
+ status = acpi_ns_convert_to_package(return_object, &new_object);
+ if (ACPI_SUCCESS(status)) {
+ goto object_repaired;
+ }
+ }
+
+ /* We cannot repair this object */
+
+ return (AE_AML_OPERAND_TYPE);
+
+ object_repaired:
+
+ /* Object was successfully repaired */
+
+ /*
+ * If the original object is a package element, we need to:
+ * 1. Set the reference count of the new object to match the
+ * reference count of the old object.
+ * 2. Decrement the reference count of the original object.
+ */
+ if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ new_object->common.reference_count =
+ return_object->common.reference_count;
+
+ if (return_object->common.reference_count > 1) {
+ return_object->common.reference_count--;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Converted %s to expected %s at index %u\n",
+ data->pathname,
+ acpi_ut_get_object_type_name(return_object),
+ acpi_ut_get_object_type_name(new_object),
+ package_index));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Converted %s to expected %s\n",
+ data->pathname,
+ acpi_ut_get_object_type_name(return_object),
+ acpi_ut_get_object_type_name(new_object)));
+ }
+
+ /* Delete old object, install the new return object */
+
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = new_object;
+ data->flags |= ACPI_OBJECT_REPAIRED;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_integer
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_status status;
+ u64 value = 0;
+ u32 i;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_STRING:
+
+ /* String-to-Integer conversion */
+
+ status = acpi_ut_strtoul64(original_object->string.pointer,
+ ACPI_ANY_BASE, &value);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
case ACPI_TYPE_BUFFER:
- /* Does the method/object legally return a string? */
+ /* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
- if (!(expected_btypes & ACPI_RTYPE_STRING)) {
+ if (original_object->buffer.length > 8) {
return (AE_AML_OPERAND_TYPE);
}
+ /* Extract each buffer byte to create the integer */
+
+ for (i = 0; i < original_object->buffer.length; i++) {
+ value |=
+ ((u64) original_object->buffer.
+ pointer[i] << (i * 8));
+ }
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ new_object = acpi_ut_create_integer_object(value);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ *return_object = new_object;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_string
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_convert_to_string(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_size length;
+ acpi_status status;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_INTEGER:
+ /*
+ * Integer-to-String conversion. Commonly, convert
+ * an integer of value 0 to a NULL string. The last element of
+ * _BIF and _BIX packages occasionally need this fix.
+ */
+ if (original_object->integer.value == 0) {
+
+ /* Allocate a new NULL string object */
+
+ new_object = acpi_ut_create_string_object(0);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
+ }
+ } else {
+ status =
+ acpi_ex_convert_to_string(original_object,
+ &new_object,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ break;
+
+ case ACPI_TYPE_BUFFER:
/*
- * Have a Buffer, expected a String, convert. Use a to_string
+ * Buffer-to-String conversion. Use a to_string
* conversion, no transform performed on the buffer data. The best
* example of this is the _BIF method, where the string data from
* the battery is often (incorrectly) returned as buffer object(s).
*/
length = 0;
- while ((length < return_object->buffer.length) &&
- (return_object->buffer.pointer[length])) {
+ while ((length < original_object->buffer.length) &&
+ (original_object->buffer.pointer[length])) {
length++;
}
@@ -117,94 +331,176 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
* terminated at Length+1.
*/
ACPI_MEMCPY(new_object->string.pointer,
- return_object->buffer.pointer, length);
+ original_object->buffer.pointer, length);
break;
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ *return_object = new_object;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_buffer
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ acpi_status status;
+ union acpi_operand_object **elements;
+ u32 *dword_buffer;
+ u32 count;
+ u32 i;
+
+ switch (original_object->common.type) {
case ACPI_TYPE_INTEGER:
+ /*
+ * Integer-to-Buffer conversion.
+ * Convert the Integer to a packed-byte buffer. _MAT and other
+ * objects need this sometimes, if a read has been performed on a
+ * Field object that is less than or equal to the global integer
+ * size (32 or 64 bits).
+ */
+ status =
+ acpi_ex_convert_to_buffer(original_object, &new_object);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
- /* 1) Does the method/object legally return a buffer? */
+ case ACPI_TYPE_STRING:
- if (expected_btypes & ACPI_RTYPE_BUFFER) {
- /*
- * Convert the Integer to a packed-byte buffer. _MAT needs
- * this sometimes, if a read has been performed on a Field
- * object that is less than or equal to the global integer
- * size (32 or 64 bits).
- */
- status =
- acpi_ex_convert_to_buffer(return_object,
- &new_object);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
+ /* String-to-Buffer conversion. Simple data copy */
+
+ new_object =
+ acpi_ut_create_buffer_object(original_object->string.
+ length);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
}
- /* 2) Does the method/object legally return a string? */
+ ACPI_MEMCPY(new_object->buffer.pointer,
+ original_object->string.pointer,
+ original_object->string.length);
+ break;
+
+ case ACPI_TYPE_PACKAGE:
+ /*
+ * This case is often seen for predefined names that must return a
+ * Buffer object with multiple DWORD integers within. For example,
+ * _FDE and _GTM. The Package can be converted to a Buffer.
+ */
+
+ /* All elements of the Package must be integers */
- else if (expected_btypes & ACPI_RTYPE_STRING) {
- /*
- * The only supported Integer-to-String conversion is to convert
- * an integer of value 0 to a NULL string. The last element of
- * _BIF and _BIX packages occasionally need this fix.
- */
- if (return_object->integer.value != 0) {
+ elements = original_object->package.elements;
+ count = original_object->package.count;
+
+ for (i = 0; i < count; i++) {
+ if ((!*elements) ||
+ ((*elements)->common.type != ACPI_TYPE_INTEGER)) {
return (AE_AML_OPERAND_TYPE);
}
+ elements++;
+ }
- /* Allocate a new NULL string object */
+ /* Create the new buffer object to replace the Package */
- new_object = acpi_ut_create_string_object(0);
- if (!new_object) {
- return (AE_NO_MEMORY);
- }
- } else {
- return (AE_AML_OPERAND_TYPE);
+ new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count));
+ if (!new_object) {
+ return (AE_NO_MEMORY);
}
- break;
- default:
+ /* Copy the package elements (integers) to the buffer as DWORDs */
- /* We cannot repair this object */
+ elements = original_object->package.elements;
+ dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
+
+ for (i = 0; i < count; i++) {
+ *dword_buffer = (u32) (*elements)->integer.value;
+ dword_buffer++;
+ elements++;
+ }
+ break;
+ default:
return (AE_AML_OPERAND_TYPE);
}
- /* Object was successfully repaired */
+ *return_object = new_object;
+ return (AE_OK);
+}
- /*
- * If the original object is a package element, we need to:
- * 1. Set the reference count of the new object to match the
- * reference count of the old object.
- * 2. Decrement the reference count of the original object.
- */
- if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
- new_object->common.reference_count =
- return_object->common.reference_count;
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_convert_to_package
+ *
+ * PARAMETERS: original_object - Object to be converted
+ * return_object - Where the new converted object is returned
+ *
+ * RETURN: Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Buffer object to a Package. Each byte of
+ * the buffer is converted to a single integer package element.
+ *
+ ******************************************************************************/
- if (return_object->common.reference_count > 1) {
- return_object->common.reference_count--;
+static acpi_status
+acpi_ns_convert_to_package(union acpi_operand_object *original_object,
+ union acpi_operand_object **return_object)
+{
+ union acpi_operand_object *new_object;
+ union acpi_operand_object **elements;
+ u32 length;
+ u8 *buffer;
+
+ switch (original_object->common.type) {
+ case ACPI_TYPE_BUFFER:
+
+ /* Buffer-to-Package conversion */
+
+ length = original_object->buffer.length;
+ new_object = acpi_ut_create_package_object(length);
+ if (!new_object) {
+ return (AE_NO_MEMORY);
}
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Converted %s to expected %s at index %u",
- acpi_ut_get_object_type_name
- (return_object),
- acpi_ut_get_object_type_name(new_object),
- package_index));
- } else {
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Converted %s to expected %s",
- acpi_ut_get_object_type_name
- (return_object),
- acpi_ut_get_object_type_name
- (new_object)));
- }
+ /* Convert each buffer byte to an integer package element */
- /* Delete old object, install the new return object */
+ elements = new_object->package.elements;
+ buffer = original_object->buffer.pointer;
- acpi_ut_remove_reference(return_object);
- *return_object_ptr = new_object;
- data->flags |= ACPI_OBJECT_REPAIRED;
+ while (length--) {
+ *elements =
+ acpi_ut_create_integer_object((u64) *buffer);
+ if (!*elements) {
+ acpi_ut_remove_reference(new_object);
+ return (AE_NO_MEMORY);
+ }
+ elements++;
+ buffer++;
+ }
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ *return_object = new_object;
return (AE_OK);
}
@@ -238,6 +534,8 @@ acpi_ns_repair_package_list(struct acpi_predefined_data *data,
{
union acpi_operand_object *pkg_obj_desc;
+ ACPI_FUNCTION_NAME(ns_repair_package_list);
+
/*
* Create the new outer package and populate it. The new package will
* have a single element, the lone subpackage.
@@ -254,8 +552,9 @@ acpi_ns_repair_package_list(struct acpi_predefined_data *data,
*obj_desc_ptr = pkg_obj_desc;
data->flags |= ACPI_OBJECT_REPAIRED;
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "Repaired Incorrectly formed Package"));
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Repaired incorrectly formed Package\n",
+ data->pathname));
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index d07b6861381..f13691c1cca 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -45,6 +45,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
+#include "acpredef.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsrepair2")
@@ -74,6 +75,10 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
static acpi_status
+acpi_ns_repair_FDE(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr);
+
+static acpi_status
acpi_ns_repair_PSS(struct acpi_predefined_data *data,
union acpi_operand_object **return_object_ptr);
@@ -89,9 +94,6 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
u8 sort_direction, char *sort_key_name);
static acpi_status
-acpi_ns_remove_null_elements(union acpi_operand_object *package);
-
-static acpi_status
acpi_ns_sort_list(union acpi_operand_object **elements,
u32 count, u32 index, u8 sort_direction);
@@ -104,17 +106,27 @@ acpi_ns_sort_list(union acpi_operand_object **elements,
* This table contains the names of the predefined methods for which we can
* perform more complex repairs.
*
- * _ALR: Sort the list ascending by ambient_illuminance if necessary
- * _PSS: Sort the list descending by Power if necessary
- * _TSS: Sort the list descending by Power if necessary
+ * As necessary:
+ *
+ * _ALR: Sort the list ascending by ambient_illuminance
+ * _FDE: Convert Buffer of BYTEs to a Buffer of DWORDs
+ * _GTM: Convert Buffer of BYTEs to a Buffer of DWORDs
+ * _PSS: Sort the list descending by Power
+ * _TSS: Sort the list descending by Power
*/
static const struct acpi_repair_info acpi_ns_repairable_names[] = {
{"_ALR", acpi_ns_repair_ALR},
+ {"_FDE", acpi_ns_repair_FDE},
+ {"_GTM", acpi_ns_repair_FDE}, /* _GTM has same repair as _FDE */
{"_PSS", acpi_ns_repair_PSS},
{"_TSS", acpi_ns_repair_TSS},
{{0, 0, 0, 0}, NULL} /* Table terminator */
};
+#define ACPI_FDE_FIELD_COUNT 5
+#define ACPI_FDE_BYTE_BUFFER_SIZE 5
+#define ACPI_FDE_DWORD_BUFFER_SIZE (ACPI_FDE_FIELD_COUNT * sizeof (u32))
+
/******************************************************************************
*
* FUNCTION: acpi_ns_complex_repairs
@@ -215,6 +227,94 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
/******************************************************************************
*
+ * FUNCTION: acpi_ns_repair_FDE
+ *
+ * PARAMETERS: Data - Pointer to validation data structure
+ * return_object_ptr - Pointer to the object returned from the
+ * evaluation of a method or object
+ *
+ * RETURN: Status. AE_OK if object is OK or was repaired successfully
+ *
+ * DESCRIPTION: Repair for the _FDE and _GTM objects. The expected return
+ * value is a Buffer of 5 DWORDs. This function repairs a common
+ * problem where the return value is a Buffer of BYTEs, not
+ * DWORDs.
+ *
+ *****************************************************************************/
+
+static acpi_status
+acpi_ns_repair_FDE(struct acpi_predefined_data *data,
+ union acpi_operand_object **return_object_ptr)
+{
+ union acpi_operand_object *return_object = *return_object_ptr;
+ union acpi_operand_object *buffer_object;
+ u8 *byte_buffer;
+ u32 *dword_buffer;
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ns_repair_FDE);
+
+ switch (return_object->common.type) {
+ case ACPI_TYPE_BUFFER:
+
+ /* This is the expected type. Length should be (at least) 5 DWORDs */
+
+ if (return_object->buffer.length >= ACPI_FDE_DWORD_BUFFER_SIZE) {
+ return (AE_OK);
+ }
+
+ /* We can only repair if we have exactly 5 BYTEs */
+
+ if (return_object->buffer.length != ACPI_FDE_BYTE_BUFFER_SIZE) {
+ ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+ data->node_flags,
+ "Incorrect return buffer length %u, expected %u",
+ return_object->buffer.length,
+ ACPI_FDE_DWORD_BUFFER_SIZE));
+
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ /* Create the new (larger) buffer object */
+
+ buffer_object =
+ acpi_ut_create_buffer_object(ACPI_FDE_DWORD_BUFFER_SIZE);
+ if (!buffer_object) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Expand each byte to a DWORD */
+
+ byte_buffer = return_object->buffer.pointer;
+ dword_buffer =
+ ACPI_CAST_PTR(u32, buffer_object->buffer.pointer);
+
+ for (i = 0; i < ACPI_FDE_FIELD_COUNT; i++) {
+ *dword_buffer = (u32) *byte_buffer;
+ dword_buffer++;
+ byte_buffer++;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s Expanded Byte Buffer to expected DWord Buffer\n",
+ data->pathname));
+ break;
+
+ default:
+ return (AE_AML_OPERAND_TYPE);
+ }
+
+ /* Delete the original return object, return the new buffer object */
+
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = buffer_object;
+
+ data->flags |= ACPI_OBJECT_REPAIRED;
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_ns_repair_TSS
*
* PARAMETERS: Data - Pointer to validation data structure
@@ -345,6 +445,8 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
u32 previous_value;
acpi_status status;
+ ACPI_FUNCTION_NAME(ns_check_sorted_list);
+
/* The top-level object must be a package */
if (return_object->common.type != ACPI_TYPE_PACKAGE) {
@@ -352,24 +454,10 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
}
/*
- * Detect any NULL package elements and remove them from the
- * package.
- *
- * TBD: We may want to do this for all predefined names that
- * return a variable-length package of packages.
+ * NOTE: assumes list of sub-packages contains no NULL elements.
+ * Any NULL elements should have been removed by earlier call
+ * to acpi_ns_remove_null_elements.
*/
- status = acpi_ns_remove_null_elements(return_object);
- if (status == AE_NULL_ENTRY) {
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
- "NULL elements removed from package"));
-
- /* Exit if package is now zero length */
-
- if (!return_object->package.count) {
- return (AE_NULL_ENTRY);
- }
- }
-
outer_elements = return_object->package.elements;
outer_element_count = return_object->package.count;
if (!outer_element_count) {
@@ -422,10 +510,9 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
data->flags |= ACPI_OBJECT_REPAIRED;
- ACPI_INFO_PREDEFINED((AE_INFO, data->pathname,
- data->node_flags,
- "Repaired unsorted list - now sorted by %s",
- sort_key_name));
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Repaired unsorted list - now sorted by %s\n",
+ data->pathname, sort_key_name));
return (AE_OK);
}
@@ -440,36 +527,63 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_remove_null_elements
*
- * PARAMETERS: obj_desc - A Package object
+ * PARAMETERS: Data - Pointer to validation data structure
+ * package_type - An acpi_return_package_types value
+ * obj_desc - A Package object
*
- * RETURN: Status. AE_NULL_ENTRY means that one or more elements were
- * removed.
+ * RETURN: None.
*
- * DESCRIPTION: Remove all NULL package elements and update the package count.
+ * DESCRIPTION: Remove all NULL package elements from packages that contain
+ * a variable number of sub-packages.
*
*****************************************************************************/
-static acpi_status
-acpi_ns_remove_null_elements(union acpi_operand_object *obj_desc)
+void
+acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
+ u8 package_type,
+ union acpi_operand_object *obj_desc)
{
union acpi_operand_object **source;
union acpi_operand_object **dest;
- acpi_status status = AE_OK;
u32 count;
u32 new_count;
u32 i;
+ ACPI_FUNCTION_NAME(ns_remove_null_elements);
+
+ /*
+ * PTYPE1 packages contain no subpackages.
+ * PTYPE2 packages contain a variable number of sub-packages. We can
+ * safely remove all NULL elements from the PTYPE2 packages.
+ */
+ switch (package_type) {
+ case ACPI_PTYPE1_FIXED:
+ case ACPI_PTYPE1_VAR:
+ case ACPI_PTYPE1_OPTION:
+ return;
+
+ case ACPI_PTYPE2:
+ case ACPI_PTYPE2_COUNT:
+ case ACPI_PTYPE2_PKG_COUNT:
+ case ACPI_PTYPE2_FIXED:
+ case ACPI_PTYPE2_MIN:
+ case ACPI_PTYPE2_REV_FIXED:
+ break;
+
+ default:
+ return;
+ }
+
count = obj_desc->package.count;
new_count = count;
source = obj_desc->package.elements;
dest = source;
- /* Examine all elements of the package object */
+ /* Examine all elements of the package object, remove nulls */
for (i = 0; i < count; i++) {
if (!*source) {
- status = AE_NULL_ENTRY;
new_count--;
} else {
*dest = *source;
@@ -478,15 +592,18 @@ acpi_ns_remove_null_elements(union acpi_operand_object *obj_desc)
source++;
}
- if (status == AE_NULL_ENTRY) {
+ /* Update parent package if any null elements were removed */
+
+ if (new_count < count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Found and removed %u NULL elements\n",
+ data->pathname, (count - new_count)));
/* NULL terminate list and update the package count */
*dest = NULL;
obj_desc->package.count = new_count;
}
-
- return (status);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index ea55ab4f984..47d91e668a1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -671,24 +671,25 @@ acpi_ns_externalize_name(u32 internal_name_length,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_map_handle_to_node
+ * FUNCTION: acpi_ns_validate_handle
*
- * PARAMETERS: Handle - Handle to be converted to an Node
+ * PARAMETERS: Handle - Handle to be validated and typecast to a
+ * namespace node.
*
- * RETURN: A Name table entry pointer
+ * RETURN: A pointer to a namespace node
*
- * DESCRIPTION: Convert a namespace handle to a real Node
+ * DESCRIPTION: Convert a namespace handle to a namespace node. Handles special
+ * cases for the root node.
*
- * Note: Real integer handles would allow for more verification
+ * NOTE: Real integer handles would allow for more verification
* and keep all pointers within this subsystem - however this introduces
- * more (and perhaps unnecessary) overhead.
- *
- * The current implemenation is basically a placeholder until such time comes
- * that it is needed.
+ * more overhead and has not been necessary to this point. Drivers
+ * holding handles are typically notified before a node becomes invalid
+ * due to a table unload.
*
******************************************************************************/
-struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
+struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
{
ACPI_FUNCTION_ENTRY();
@@ -710,42 +711,6 @@ struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
/*******************************************************************************
*
- * FUNCTION: acpi_ns_convert_entry_to_handle
- *
- * PARAMETERS: Node - Node to be converted to a Handle
- *
- * RETURN: A user handle
- *
- * DESCRIPTION: Convert a real Node to a namespace handle
- *
- ******************************************************************************/
-
-acpi_handle acpi_ns_convert_entry_to_handle(struct acpi_namespace_node *node)
-{
-
- /*
- * Simple implementation for now;
- */
- return ((acpi_handle) node);
-
-/* Example future implementation ---------------------
-
- if (!Node)
- {
- return (NULL);
- }
-
- if (Node == acpi_gbl_root_node)
- {
- return (ACPI_ROOT_OBJECT);
- }
-
- return ((acpi_handle) Node);
-------------------------------------------------------*/
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_terminate
*
* PARAMETERS: none
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index f2bd1da7700..f0c0892bc7e 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -190,7 +190,7 @@ acpi_evaluate_object(acpi_handle handle,
/* Convert and validate the device handle */
- info->prefix_node = acpi_ns_map_handle_to_node(handle);
+ info->prefix_node = acpi_ns_validate_handle(handle);
if (!info->prefix_node) {
status = AE_BAD_PARAMETER;
goto cleanup;
@@ -552,7 +552,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
return (status);
}
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return (status);
@@ -729,7 +729,7 @@ acpi_attach_data(acpi_handle obj_handle,
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -775,7 +775,7 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -822,7 +822,7 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(obj_handle);
+ node = acpi_ns_validate_handle(obj_handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index ddc84af6336..e611dd961b2 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -93,7 +93,7 @@ acpi_get_handle(acpi_handle parent,
/* Convert a parent handle to a prefix node */
if (parent) {
- prefix_node = acpi_ns_map_handle_to_node(parent);
+ prefix_node = acpi_ns_validate_handle(parent);
if (!prefix_node) {
return (AE_BAD_PARAMETER);
}
@@ -114,7 +114,7 @@ acpi_get_handle(acpi_handle parent,
if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
*ret_handle =
- acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
+ ACPI_CAST_PTR(acpi_handle, acpi_gbl_root_node);
return (AE_OK);
}
} else if (!prefix_node) {
@@ -129,7 +129,7 @@ acpi_get_handle(acpi_handle parent,
status =
acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
if (ACPI_SUCCESS(status)) {
- *ret_handle = acpi_ns_convert_entry_to_handle(node);
+ *ret_handle = ACPI_CAST_PTR(acpi_handle, node);
}
return (status);
@@ -186,7 +186,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
return (status);
}
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -291,7 +291,7 @@ acpi_get_object_info(acpi_handle handle,
goto cleanup;
}
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 4071bad4458..0cc6ba01a49 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -79,7 +79,7 @@ acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_BAD_PARAMETER);
@@ -132,7 +132,7 @@ acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (AE_BAD_PARAMETER);
@@ -182,7 +182,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
/* Convert and validate the handle */
- node = acpi_ns_map_handle_to_node(handle);
+ node = acpi_ns_validate_handle(handle);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -191,7 +191,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
/* Get the parent entry */
parent_node = acpi_ns_get_parent_node(node);
- *ret_handle = acpi_ns_convert_entry_to_handle(parent_node);
+ *ret_handle = ACPI_CAST_PTR(acpi_handle, parent_node);
/* Return exception if parent is null */
@@ -251,7 +251,7 @@ acpi_get_next_object(acpi_object_type type,
/* Start search at the beginning of the specified scope */
- parent_node = acpi_ns_map_handle_to_node(parent);
+ parent_node = acpi_ns_validate_handle(parent);
if (!parent_node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -260,7 +260,7 @@ acpi_get_next_object(acpi_object_type type,
/* Non-null handle, ignore the parent */
/* Convert and validate the handle */
- child_node = acpi_ns_map_handle_to_node(child);
+ child_node = acpi_ns_validate_handle(child);
if (!child_node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
@@ -276,7 +276,7 @@ acpi_get_next_object(acpi_object_type type,
}
if (ret_handle) {
- *ret_handle = acpi_ns_convert_entry_to_handle(node);
+ *ret_handle = ACPI_CAST_PTR(acpi_handle, node);
}
unlock_and_exit:
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 12934ad6da8..d0c1b91eb8c 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -287,7 +287,8 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
/* Invoke an internal method if necessary */
if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
- status = info->obj_desc->method.implementation(walk_state);
+ status =
+ info->obj_desc->method.extra.implementation(walk_state);
info->return_object = walk_state->return_desc;
/* Cleanup states */
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 395212bcd19..f27feb4772f 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -104,7 +104,7 @@ acpi_rs_validate_parameters(acpi_handle device_handle,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- node = acpi_ns_map_handle_to_node(device_handle);
+ node = acpi_ns_validate_handle(device_handle);
if (!node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 0f0c64bf8ac..f857c5efb79 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -323,11 +323,11 @@ acpi_ut_copy_ielement_to_eelement(u8 object_type,
* RETURN: Status
*
* DESCRIPTION: This function is called to place a package object in a user
- * buffer. A package object by definition contains other objects.
+ * buffer. A package object by definition contains other objects.
*
* The buffer is assumed to have sufficient space for the object.
- * The caller must have verified the buffer length needed using the
- * acpi_ut_get_object_size function before calling this function.
+ * The caller must have verified the buffer length needed using
+ * the acpi_ut_get_object_size function before calling this function.
*
******************************************************************************/
@@ -382,12 +382,12 @@ acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
* FUNCTION: acpi_ut_copy_iobject_to_eobject
*
* PARAMETERS: internal_object - The internal object to be converted
- * buffer_ptr - Where the object is returned
+ * ret_buffer - Where the object is returned
*
* RETURN: Status
*
- * DESCRIPTION: This function is called to build an API object to be returned to
- * the caller.
+ * DESCRIPTION: This function is called to build an API object to be returned
+ * to the caller.
*
******************************************************************************/
@@ -626,7 +626,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
* PARAMETERS: external_object - The external object to be converted
* internal_object - Where the internal object is returned
*
- * RETURN: Status - the status of the call
+ * RETURN: Status
*
* DESCRIPTION: Converts an external object to an internal object.
*
@@ -665,7 +665,7 @@ acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
*
* RETURN: Status
*
- * DESCRIPTION: Simple copy of one internal object to another. Reference count
+ * DESCRIPTION: Simple copy of one internal object to another. Reference count
* of the destination object is preserved.
*
******************************************************************************/
@@ -897,10 +897,11 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
*
* FUNCTION: acpi_ut_copy_ipackage_to_ipackage
*
- * PARAMETERS: *source_obj - Pointer to the source package object
- * *dest_obj - Where the internal object is returned
+ * PARAMETERS: source_obj - Pointer to the source package object
+ * dest_obj - Where the internal object is returned
+ * walk_state - Current Walk state descriptor
*
- * RETURN: Status - the status of the call
+ * RETURN: Status
*
* DESCRIPTION: This function is called to copy an internal package object
* into another internal package object.
@@ -953,9 +954,9 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
*
* FUNCTION: acpi_ut_copy_iobject_to_iobject
*
- * PARAMETERS: walk_state - Current walk state
- * source_desc - The internal object to be copied
+ * PARAMETERS: source_desc - The internal object to be copied
* dest_desc - Where the copied object is returned
+ * walk_state - Current walk state
*
* RETURN: Status
*
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 3f4602b8f28..cada73ffdfa 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -831,7 +831,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
dev_name(&device->dev), event,
acpi_battery_present(battery));
#ifdef CONFIG_ACPI_SYSFS_POWER
- /* acpi_batter_update could remove power_supply object */
+ /* acpi_battery_update could remove power_supply object */
if (battery->bat.dev)
kobject_uevent(&battery->bat.dev->kobj, KOBJ_CHANGE);
#endif
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 74119152435..65f7e335f12 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -344,6 +344,152 @@ bool acpi_bus_can_wakeup(acpi_handle handle)
EXPORT_SYMBOL(acpi_bus_can_wakeup);
+static void acpi_print_osc_error(acpi_handle handle,
+ struct acpi_osc_context *context, char *error)
+{
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
+ int i;
+
+ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))
+ printk(KERN_DEBUG "%s\n", error);
+ else {
+ printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error);
+ kfree(buffer.pointer);
+ }
+ printk(KERN_DEBUG"_OSC request data:");
+ for (i = 0; i < context->cap.length; i += sizeof(u32))
+ printk("%x ", *((u32 *)(context->cap.pointer + i)));
+ printk("\n");
+}
+
+static u8 hex_val(unsigned char c)
+{
+ return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
+}
+
+static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
+{
+ int i;
+ static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
+ 24, 26, 28, 30, 32, 34};
+
+ if (strlen(str) != 36)
+ return AE_BAD_PARAMETER;
+ for (i = 0; i < 36; i++) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (str[i] != '-')
+ return AE_BAD_PARAMETER;
+ } else if (!isxdigit(str[i]))
+ return AE_BAD_PARAMETER;
+ }
+ for (i = 0; i < 16; i++) {
+ uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4;
+ uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]);
+ }
+ return AE_OK;
+}
+
+acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
+{
+ acpi_status status;
+ struct acpi_object_list input;
+ union acpi_object in_params[4];
+ union acpi_object *out_obj;
+ u8 uuid[16];
+ u32 errors;
+
+ if (!context)
+ return AE_ERROR;
+ if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
+ return AE_ERROR;
+ context->ret.length = ACPI_ALLOCATE_BUFFER;
+ context->ret.pointer = NULL;
+
+ /* Setting up input parameters */
+ input.count = 4;
+ input.pointer = in_params;
+ in_params[0].type = ACPI_TYPE_BUFFER;
+ in_params[0].buffer.length = 16;
+ in_params[0].buffer.pointer = uuid;
+ in_params[1].type = ACPI_TYPE_INTEGER;
+ in_params[1].integer.value = context->rev;
+ in_params[2].type = ACPI_TYPE_INTEGER;
+ in_params[2].integer.value = context->cap.length/sizeof(u32);
+ in_params[3].type = ACPI_TYPE_BUFFER;
+ in_params[3].buffer.length = context->cap.length;
+ in_params[3].buffer.pointer = context->cap.pointer;
+
+ status = acpi_evaluate_object(handle, "_OSC", &input, &context->ret);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ /* return buffer should have the same length as cap buffer */
+ if (context->ret.length != context->cap.length)
+ return AE_NULL_OBJECT;
+
+ out_obj = context->ret.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ acpi_print_osc_error(handle, context,
+ "_OSC evaluation returned wrong type");
+ status = AE_TYPE;
+ goto out_kfree;
+ }
+ /* Need to ignore the bit0 in result code */
+ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
+ if (errors) {
+ if (errors & OSC_REQUEST_ERROR)
+ acpi_print_osc_error(handle, context,
+ "_OSC request failed");
+ if (errors & OSC_INVALID_UUID_ERROR)
+ acpi_print_osc_error(handle, context,
+ "_OSC invalid UUID");
+ if (errors & OSC_INVALID_REVISION_ERROR)
+ acpi_print_osc_error(handle, context,
+ "_OSC invalid revision");
+ if (errors & OSC_CAPABILITIES_MASK_ERROR) {
+ if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
+ & OSC_QUERY_ENABLE)
+ goto out_success;
+ status = AE_SUPPORT;
+ goto out_kfree;
+ }
+ status = AE_ERROR;
+ goto out_kfree;
+ }
+out_success:
+ return AE_OK;
+
+out_kfree:
+ kfree(context->ret.pointer);
+ context->ret.pointer = NULL;
+ return status;
+}
+EXPORT_SYMBOL(acpi_run_osc);
+
+static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
+static void acpi_bus_osc_support(void)
+{
+ u32 capbuf[2];
+ struct acpi_osc_context context = {
+ .uuid_str = sb_uuid_str,
+ .rev = 1,
+ .cap.length = 8,
+ .cap.pointer = capbuf,
+ };
+ acpi_handle handle;
+
+ capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
+#ifdef CONFIG_ACPI_PROCESSOR_AGGREGATOR
+ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
+#endif
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
+ return;
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
+ kfree(context.ret.pointer);
+ /* do we need to check the returned cap? Sounds no */
+}
+
/* --------------------------------------------------------------------------
Event Management
-------------------------------------------------------------------------- */
@@ -734,6 +880,8 @@ static int __init acpi_bus_init(void)
status = acpi_ec_ecdt_probe();
/* Ignore result. Not having an ECDT is not fatal. */
+ acpi_bus_osc_support();
+
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 0c9c6a9a002..8a95e8329df 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -282,6 +282,13 @@ static int acpi_lid_send_state(struct acpi_device *device)
if (ret == NOTIFY_DONE)
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
device);
+ if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
+ /*
+ * It is also regarded as success if the notifier_chain
+ * returns NOTIFY_OK or NOTIFY_DONE.
+ */
+ ret = 0;
+ }
return ret;
}
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index 8a690c3b8e2..cc421b7ae16 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/debugfs.h>
#include <asm/uaccess.h>
#include <acpi/acpi_drivers.h>
@@ -196,6 +197,80 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
NULL, 0644);
/* --------------------------------------------------------------------------
+ DebugFS Interface
+ -------------------------------------------------------------------------- */
+
+static ssize_t cm_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char *buf;
+ static int uncopied_bytes;
+ struct acpi_table_header table;
+ acpi_status status;
+
+ if (!(*ppos)) {
+ /* parse the table header to get the table length */
+ if (count <= sizeof(struct acpi_table_header))
+ return -EINVAL;
+ if (copy_from_user(&table, user_buf,
+ sizeof(struct acpi_table_header)))
+ return -EFAULT;
+ uncopied_bytes = table.length;
+ buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ if (uncopied_bytes < count) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ uncopied_bytes -= count;
+ *ppos += count;
+
+ if (!uncopied_bytes) {
+ status = acpi_install_method(buf);
+ kfree(buf);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
+
+ return count;
+}
+
+static const struct file_operations cm_fops = {
+ .write = cm_write,
+};
+
+static int acpi_debugfs_init(void)
+{
+ struct dentry *acpi_dir, *cm_dentry;
+
+ acpi_dir = debugfs_create_dir("acpi", NULL);
+ if (!acpi_dir)
+ goto err;
+
+ cm_dentry = debugfs_create_file("custom_method", S_IWUGO,
+ acpi_dir, NULL, &cm_fops);
+ if (!cm_dentry)
+ goto err;
+
+ return 0;
+
+err:
+ if (acpi_dir)
+ debugfs_remove(acpi_dir);
+ return -EINVAL;
+}
+
+/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_PROCFS
@@ -286,7 +361,7 @@ static const struct file_operations acpi_system_debug_proc_fops = {
};
#endif
-int __init acpi_debug_init(void)
+int __init acpi_procfs_init(void)
{
#ifdef CONFIG_ACPI_PROCFS
struct proc_dir_entry *entry;
@@ -321,3 +396,10 @@ int __init acpi_debug_init(void)
return 0;
#endif
}
+
+int __init acpi_debug_init(void)
+{
+ acpi_debugfs_init();
+ acpi_procfs_init();
+ return 0;
+}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 30be3c148f7..bbc2c1315c4 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -50,7 +50,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
" before undocking");
static struct atomic_notifier_head dock_notifier_list;
-static char dock_device_name[] = "dock";
static const struct acpi_device_id dock_device_ids[] = {
{"LNXDOCK", 0},
@@ -93,40 +92,30 @@ struct dock_dependent_device {
* Dock Dependent device functions *
*****************************************************************************/
/**
- * alloc_dock_dependent_device - allocate and init a dependent device
- * @handle: the acpi_handle of the dependent device
+ * add_dock_dependent_device - associate a device with the dock station
+ * @ds: The dock station
+ * @handle: handle of the dependent device
*
- * Allocate memory for a dependent device structure for a device referenced
- * by the acpi handle
+ * Add the dependent device to the dock's dependent device list.
*/
-static struct dock_dependent_device *
-alloc_dock_dependent_device(acpi_handle handle)
+static int
+add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
{
struct dock_dependent_device *dd;
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
- if (dd) {
- dd->handle = handle;
- INIT_LIST_HEAD(&dd->list);
- INIT_LIST_HEAD(&dd->hotplug_list);
- }
- return dd;
-}
+ if (!dd)
+ return -ENOMEM;
+
+ dd->handle = handle;
+ INIT_LIST_HEAD(&dd->list);
+ INIT_LIST_HEAD(&dd->hotplug_list);
-/**
- * add_dock_dependent_device - associate a device with the dock station
- * @ds: The dock station
- * @dd: The dependent device
- *
- * Add the dependent device to the dock's dependent device list.
- */
-static void
-add_dock_dependent_device(struct dock_station *ds,
- struct dock_dependent_device *dd)
-{
spin_lock(&ds->dd_lock);
list_add_tail(&dd->list, &ds->dependent_devices);
spin_unlock(&ds->dd_lock);
+
+ return 0;
}
/**
@@ -249,6 +238,7 @@ static int is_battery(acpi_handle handle)
static int is_ejectable_bay(acpi_handle handle)
{
acpi_handle phandle;
+
if (!is_ejectable(handle))
return 0;
if (is_battery(handle) || is_ata(handle))
@@ -275,14 +265,13 @@ int is_dock_device(acpi_handle handle)
if (is_dock(handle))
return 1;
- list_for_each_entry(dock_station, &dock_stations, sibling) {
+
+ list_for_each_entry(dock_station, &dock_stations, sibling)
if (find_dock_dependent_device(dock_station, handle))
return 1;
- }
return 0;
}
-
EXPORT_SYMBOL_GPL(is_dock_device);
/**
@@ -305,8 +294,6 @@ static int dock_present(struct dock_station *ds)
return 0;
}
-
-
/**
* dock_create_acpi_device - add new devices to acpi
* @handle - handle of the device to add
@@ -320,7 +307,7 @@ static int dock_present(struct dock_station *ds)
*/
static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
{
- struct acpi_device *device = NULL;
+ struct acpi_device *device;
struct acpi_device *parent_device;
acpi_handle parent;
int ret;
@@ -337,8 +324,7 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
ret = acpi_bus_add(&device, parent_device, handle,
ACPI_BUS_TYPE_DEVICE);
if (ret) {
- pr_debug("error adding bus, %x\n",
- -ret);
+ pr_debug("error adding bus, %x\n", -ret);
return NULL;
}
}
@@ -364,7 +350,6 @@ static void dock_remove_acpi_device(acpi_handle handle)
}
}
-
/**
* hotplug_dock_devices - insert or remove devices on the dock station
* @ds: the dock station
@@ -384,10 +369,9 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
/*
* First call driver specific hotplug functions
*/
- list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list) {
+ list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
if (dd->ops && dd->ops->handler)
dd->ops->handler(dd->handle, event, dd->context);
- }
/*
* Now make sure that an acpi_device is created for each
@@ -426,6 +410,7 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
if (dd->ops && dd->ops->uevent)
dd->ops->uevent(dd->handle, event, dd->context);
+
if (num != DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
@@ -456,8 +441,8 @@ static void eject_dock(struct dock_station *ds)
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = 1;
- if (ACPI_FAILURE(acpi_evaluate_object(ds->handle, "_EJ0",
- &arg_list, NULL)))
+ status = acpi_evaluate_object(ds->handle, "_EJ0", &arg_list, NULL);
+ if (ACPI_FAILURE(status))
pr_debug("Failed to evaluate _EJ0!\n");
}
@@ -577,7 +562,6 @@ int register_dock_notifier(struct notifier_block *nb)
return atomic_notifier_chain_register(&dock_notifier_list, nb);
}
-
EXPORT_SYMBOL_GPL(register_dock_notifier);
/**
@@ -591,7 +575,6 @@ void unregister_dock_notifier(struct notifier_block *nb)
atomic_notifier_chain_unregister(&dock_notifier_list, nb);
}
-
EXPORT_SYMBOL_GPL(unregister_dock_notifier);
/**
@@ -636,7 +619,6 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
return ret;
}
-
EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
/**
@@ -657,7 +639,6 @@ void unregister_hotplug_dock_device(acpi_handle handle)
dock_del_hotplug_device(dock_station, dd);
}
}
-
EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
/**
@@ -772,7 +753,7 @@ struct dock_data {
static void acpi_dock_deferred_cb(void *context)
{
- struct dock_data *data = (struct dock_data *)context;
+ struct dock_data *data = context;
dock_notify(data->handle, data->event, data->ds);
kfree(data);
@@ -782,23 +763,22 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
unsigned long event, void *data)
{
struct dock_station *dock_station;
- acpi_handle handle = (acpi_handle)data;
+ acpi_handle handle = data;
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
return 0;
list_for_each_entry(dock_station, &dock_stations, sibling) {
if (dock_station->handle == handle) {
- struct dock_data *dock_data;
+ struct dock_data *dd;
- dock_data = kmalloc(sizeof(*dock_data), GFP_KERNEL);
- if (!dock_data)
+ dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
return 0;
- dock_data->handle = handle;
- dock_data->event = event;
- dock_data->ds = dock_station;
- acpi_os_hotplug_execute(acpi_dock_deferred_cb,
- dock_data);
+ dd->handle = handle;
+ dd->event = event;
+ dd->ds = dock_station;
+ acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
return 0 ;
}
}
@@ -826,7 +806,6 @@ find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
acpi_status status;
acpi_handle tmp, parent;
struct dock_station *ds = context;
- struct dock_dependent_device *dd;
status = acpi_bus_get_ejd(handle, &tmp);
if (ACPI_FAILURE(status)) {
@@ -840,11 +819,9 @@ find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
goto fdd_out;
}
- if (tmp == ds->handle) {
- dd = alloc_dock_dependent_device(handle);
- if (dd)
- add_dock_dependent_device(ds, dd);
- }
+ if (tmp == ds->handle)
+ add_dock_dependent_device(ds, handle);
+
fdd_out:
return AE_OK;
}
@@ -857,8 +834,7 @@ static ssize_t show_docked(struct device *dev,
{
struct acpi_device *tmp;
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
if (ACPI_SUCCESS(acpi_bus_get_device(dock_station->handle, &tmp)))
return snprintf(buf, PAGE_SIZE, "1\n");
@@ -872,8 +848,7 @@ static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
static ssize_t show_flags(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
@@ -886,8 +861,7 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
if (!count)
return -EINVAL;
@@ -905,8 +879,7 @@ static ssize_t show_dock_uid(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long long lbuf;
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
acpi_status status = acpi_evaluate_integer(dock_station->handle,
"_UID", NULL, &lbuf);
if (ACPI_FAILURE(status))
@@ -919,8 +892,7 @@ static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
static ssize_t show_dock_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dock_station *dock_station = *((struct dock_station **)
- dev->platform_data);
+ struct dock_station *dock_station = dev->platform_data;
char *type;
if (dock_station->flags & DOCK_IS_DOCK)
@@ -936,6 +908,19 @@ static ssize_t show_dock_type(struct device *dev,
}
static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
+static struct attribute *dock_attributes[] = {
+ &dev_attr_docked.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_undock.attr,
+ &dev_attr_uid.attr,
+ &dev_attr_type.attr,
+ NULL
+};
+
+static struct attribute_group dock_attribute_group = {
+ .attrs = dock_attributes
+};
+
/**
* dock_add - add a new dock station
* @handle: the dock station handle
@@ -945,39 +930,30 @@ static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
*/
static int dock_add(acpi_handle handle)
{
- int ret;
- struct dock_dependent_device *dd;
- struct dock_station *dock_station;
- struct platform_device *dock_device;
+ int ret, id;
+ struct dock_station ds, *dock_station;
+ struct platform_device *dd;
+
+ id = dock_station_count;
+ dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
+ if (IS_ERR(dd))
+ return PTR_ERR(dd);
+
+ dock_station = dd->dev.platform_data;
- /* allocate & initialize the dock_station private data */
- dock_station = kzalloc(sizeof(*dock_station), GFP_KERNEL);
- if (!dock_station)
- return -ENOMEM;
dock_station->handle = handle;
+ dock_station->dock_device = dd;
dock_station->last_dock_time = jiffies - HZ;
- INIT_LIST_HEAD(&dock_station->dependent_devices);
- INIT_LIST_HEAD(&dock_station->hotplug_devices);
- INIT_LIST_HEAD(&dock_station->sibling);
- spin_lock_init(&dock_station->dd_lock);
+
mutex_init(&dock_station->hp_lock);
+ spin_lock_init(&dock_station->dd_lock);
+ INIT_LIST_HEAD(&dock_station->sibling);
+ INIT_LIST_HEAD(&dock_station->hotplug_devices);
ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
-
- /* initialize platform device stuff */
- dock_station->dock_device =
- platform_device_register_simple(dock_device_name,
- dock_station_count, NULL, 0);
- dock_device = dock_station->dock_device;
- if (IS_ERR(dock_device)) {
- kfree(dock_station);
- dock_station = NULL;
- return PTR_ERR(dock_device);
- }
- platform_device_add_data(dock_device, &dock_station,
- sizeof(struct dock_station *));
+ INIT_LIST_HEAD(&dock_station->dependent_devices);
/* we want the dock device to send uevents */
- dev_set_uevent_suppress(&dock_device->dev, 0);
+ dev_set_uevent_suppress(&dd->dev, 0);
if (is_dock(handle))
dock_station->flags |= DOCK_IS_DOCK;
@@ -986,47 +962,9 @@ static int dock_add(acpi_handle handle)
if (is_battery(handle))
dock_station->flags |= DOCK_IS_BAT;
- ret = device_create_file(&dock_device->dev, &dev_attr_docked);
- if (ret) {
- printk(KERN_ERR "Error %d adding sysfs file\n", ret);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
- return ret;
- }
- ret = device_create_file(&dock_device->dev, &dev_attr_undock);
- if (ret) {
- printk(KERN_ERR "Error %d adding sysfs file\n", ret);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
- return ret;
- }
- ret = device_create_file(&dock_device->dev, &dev_attr_uid);
- if (ret) {
- printk(KERN_ERR "Error %d adding sysfs file\n", ret);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- device_remove_file(&dock_device->dev, &dev_attr_undock);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
- return ret;
- }
- ret = device_create_file(&dock_device->dev, &dev_attr_flags);
- if (ret) {
- printk(KERN_ERR "Error %d adding sysfs file\n", ret);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- device_remove_file(&dock_device->dev, &dev_attr_undock);
- device_remove_file(&dock_device->dev, &dev_attr_uid);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
- return ret;
- }
- ret = device_create_file(&dock_device->dev, &dev_attr_type);
+ ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
if (ret)
- printk(KERN_ERR"Error %d adding sysfs file\n", ret);
+ goto err_unregister;
/* Find dependent devices */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
@@ -1034,58 +972,43 @@ static int dock_add(acpi_handle handle)
dock_station, NULL);
/* add the dock station as a device dependent on itself */
- dd = alloc_dock_dependent_device(handle);
- if (!dd) {
- kfree(dock_station);
- dock_station = NULL;
- ret = -ENOMEM;
- goto dock_add_err_unregister;
- }
- add_dock_dependent_device(dock_station, dd);
+ ret = add_dock_dependent_device(dock_station, handle);
+ if (ret)
+ goto err_rmgroup;
dock_station_count++;
list_add(&dock_station->sibling, &dock_stations);
return 0;
-dock_add_err_unregister:
- device_remove_file(&dock_device->dev, &dev_attr_type);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- device_remove_file(&dock_device->dev, &dev_attr_undock);
- device_remove_file(&dock_device->dev, &dev_attr_uid);
- device_remove_file(&dock_device->dev, &dev_attr_flags);
- platform_device_unregister(dock_device);
- kfree(dock_station);
- dock_station = NULL;
+err_rmgroup:
+ sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
+err_unregister:
+ platform_device_unregister(dd);
+ printk(KERN_ERR "%s encountered error %d\n", __func__, ret);
return ret;
}
/**
* dock_remove - free up resources related to the dock station
*/
-static int dock_remove(struct dock_station *dock_station)
+static int dock_remove(struct dock_station *ds)
{
struct dock_dependent_device *dd, *tmp;
- struct platform_device *dock_device = dock_station->dock_device;
+ struct platform_device *dock_device = ds->dock_device;
if (!dock_station_count)
return 0;
/* remove dependent devices */
- list_for_each_entry_safe(dd, tmp, &dock_station->dependent_devices,
- list)
- kfree(dd);
+ list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
+ kfree(dd);
+
+ list_del(&ds->sibling);
/* cleanup sysfs */
- device_remove_file(&dock_device->dev, &dev_attr_type);
- device_remove_file(&dock_device->dev, &dev_attr_docked);
- device_remove_file(&dock_device->dev, &dev_attr_undock);
- device_remove_file(&dock_device->dev, &dev_attr_uid);
- device_remove_file(&dock_device->dev, &dev_attr_flags);
+ sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
platform_device_unregister(dock_device);
- /* free dock station memory */
- kfree(dock_station);
- dock_station = NULL;
return 0;
}
@@ -1103,11 +1026,10 @@ find_dock(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status = AE_OK;
- if (is_dock(handle)) {
- if (dock_add(handle) >= 0) {
+ if (is_dock(handle))
+ if (dock_add(handle) >= 0)
status = AE_CTRL_TERMINATE;
- }
- }
+
return status;
}
@@ -1145,8 +1067,7 @@ static int __init dock_init(void)
static void __exit dock_exit(void)
{
- struct dock_station *dock_station;
- struct dock_station *tmp;
+ struct dock_station *tmp, *dock_station;
unregister_acpi_bus_notifier(&dock_acpi_notifier);
list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index f419849a0d3..acf2ab24984 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -267,7 +267,7 @@ static int acpi_fan_add(struct acpi_device *device)
goto end;
}
- dev_info(&device->dev, "registered as cooling_device%d\n", cdev->id);
+ dev_dbg(&device->dev, "registered as cooling_device%d\n", cdev->id);
device->driver_data = cdev;
result = sysfs_create_link(&device->dev.kobj,
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
new file mode 100644
index 00000000000..4bb18c980ac
--- /dev/null
+++ b/drivers/acpi/hest.c
@@ -0,0 +1,135 @@
+#include <linux/acpi.h>
+#include <linux/pci.h>
+
+#define PREFIX "ACPI: "
+
+static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
+{
+ return sizeof(*p) +
+ (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
+}
+
+static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
+{
+ return sizeof(*p) +
+ (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
+}
+
+static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
+{
+ return sizeof(*p);
+}
+
+static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
+{
+ return sizeof(*p);
+}
+
+static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
+{
+ return (0 == pci_domain_nr(pci->bus) &&
+ p->bus == pci->bus->number &&
+ p->device == PCI_SLOT(pci->devfn) &&
+ p->function == PCI_FUNC(pci->devfn));
+}
+
+static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
+{
+ struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
+ unsigned long rc=0;
+ u8 pcie_type = 0;
+ u8 bridge = 0;
+ switch (type) {
+ case ACPI_HEST_TYPE_AER_ROOT_PORT:
+ rc = sizeof(struct acpi_hest_aer_root);
+ pcie_type = PCI_EXP_TYPE_ROOT_PORT;
+ break;
+ case ACPI_HEST_TYPE_AER_ENDPOINT:
+ rc = sizeof(struct acpi_hest_aer);
+ pcie_type = PCI_EXP_TYPE_ENDPOINT;
+ break;
+ case ACPI_HEST_TYPE_AER_BRIDGE:
+ rc = sizeof(struct acpi_hest_aer_bridge);
+ if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ bridge = 1;
+ break;
+ }
+
+ if (p->flags & ACPI_HEST_GLOBAL) {
+ if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
+ *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ }
+ else
+ if (hest_match_pci(p, pci))
+ *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+ return rc;
+}
+
+static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
+{
+ struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
+ void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
+ struct acpi_hest_header *hdr = p;
+
+ int i;
+ int firmware_first = 0;
+ static unsigned char printed_unused = 0;
+ static unsigned char printed_reserved = 0;
+
+ for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
+ switch (hdr->type) {
+ case ACPI_HEST_TYPE_IA32_CHECK:
+ p += parse_acpi_hest_ia_machine_check(p);
+ break;
+ case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
+ p += parse_acpi_hest_ia_corrected(p);
+ break;
+ case ACPI_HEST_TYPE_IA32_NMI:
+ p += parse_acpi_hest_ia_nmi(p);
+ break;
+ /* These three should never appear */
+ case ACPI_HEST_TYPE_NOT_USED3:
+ case ACPI_HEST_TYPE_NOT_USED4:
+ case ACPI_HEST_TYPE_NOT_USED5:
+ if (!printed_unused) {
+ printk(KERN_DEBUG PREFIX
+ "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
+ printed_unused = 1;
+ }
+ break;
+ case ACPI_HEST_TYPE_AER_ROOT_PORT:
+ case ACPI_HEST_TYPE_AER_ENDPOINT:
+ case ACPI_HEST_TYPE_AER_BRIDGE:
+ p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
+ break;
+ case ACPI_HEST_TYPE_GENERIC_ERROR:
+ p += parse_acpi_hest_generic(p);
+ break;
+ /* These should never appear either */
+ case ACPI_HEST_TYPE_RESERVED:
+ default:
+ if (!printed_reserved) {
+ printk(KERN_DEBUG PREFIX
+ "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
+ printed_reserved = 1;
+ }
+ break;
+ }
+ }
+ return firmware_first;
+}
+
+int acpi_hest_firmware_first_pci(struct pci_dev *pci)
+{
+ acpi_status status = AE_NOT_FOUND;
+ struct acpi_table_header *hest = NULL;
+ status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
+
+ if (ACPI_SUCCESS(status)) {
+ if (acpi_hest_firmware_first(hest, pci)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 2be2fb66204..7ad48dfc12d 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/acpi.h>
+#include <linux/numa.h>
#include <acpi/acpi_bus.h>
#define PREFIX "ACPI: "
@@ -40,14 +41,14 @@ static nodemask_t nodes_found_map = NODE_MASK_NONE;
/* maps to convert between proximity domain and logical node ID */
static int pxm_to_node_map[MAX_PXM_DOMAINS]
- = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
static int node_to_pxm_map[MAX_NUMNODES]
- = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
int pxm_to_node(int pxm)
{
if (pxm < 0)
- return NID_INVAL;
+ return NUMA_NO_NODE;
return pxm_to_node_map[pxm];
}
@@ -68,9 +69,9 @@ int acpi_map_pxm_to_node(int pxm)
{
int node = pxm_to_node_map[pxm];
- if (node < 0){
+ if (node < 0) {
if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
- return NID_INVAL;
+ return NUMA_NO_NODE;
node = first_unset_node(nodes_found_map);
__acpi_map_pxm_to_node(pxm, node);
node_set(node, nodes_found_map);
@@ -79,16 +80,6 @@ int acpi_map_pxm_to_node(int pxm)
return node;
}
-#if 0
-void __cpuinit acpi_unmap_pxm_to_node(int node)
-{
- int pxm = node_to_pxm_map[node];
- pxm_to_node_map[pxm] = NID_INVAL;
- node_to_pxm_map[node] = PXM_INVAL;
- node_clear(node, nodes_found_map);
-}
-#endif /* 0 */
-
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7c1c59ea9ec..02e8464e480 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1118,7 +1118,7 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
/* Check for resource conflicts between ACPI OperationRegions and native
* drivers */
-int acpi_check_resource_conflict(struct resource *res)
+int acpi_check_resource_conflict(const struct resource *res)
{
struct acpi_res_list *res_list_elem;
int ioport;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1af808171d4..101cce3681d 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -202,72 +202,24 @@ static void acpi_pci_bridge_scan(struct acpi_device *device)
}
}
-static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
- 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
+static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
static acpi_status acpi_pci_run_osc(acpi_handle handle,
const u32 *capbuf, u32 *retval)
{
+ struct acpi_osc_context context = {
+ .uuid_str = pci_osc_uuid_str,
+ .rev = 1,
+ .cap.length = 12,
+ .cap.pointer = (void *)capbuf,
+ };
acpi_status status;
- struct acpi_object_list input;
- union acpi_object in_params[4];
- struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
- union acpi_object *out_obj;
- u32 errors;
-
- /* Setting up input parameters */
- input.count = 4;
- input.pointer = in_params;
- in_params[0].type = ACPI_TYPE_BUFFER;
- in_params[0].buffer.length = 16;
- in_params[0].buffer.pointer = OSC_UUID;
- in_params[1].type = ACPI_TYPE_INTEGER;
- in_params[1].integer.value = 1;
- in_params[2].type = ACPI_TYPE_INTEGER;
- in_params[2].integer.value = 3;
- in_params[3].type = ACPI_TYPE_BUFFER;
- in_params[3].buffer.length = 12;
- in_params[3].buffer.pointer = (u8 *)capbuf;
-
- status = acpi_evaluate_object(handle, "_OSC", &input, &output);
- if (ACPI_FAILURE(status))
- return status;
- if (!output.length)
- return AE_NULL_OBJECT;
-
- out_obj = output.pointer;
- if (out_obj->type != ACPI_TYPE_BUFFER) {
- printk(KERN_DEBUG "_OSC evaluation returned wrong type\n");
- status = AE_TYPE;
- goto out_kfree;
- }
- /* Need to ignore the bit0 in result code */
- errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
- if (errors) {
- if (errors & OSC_REQUEST_ERROR)
- printk(KERN_DEBUG "_OSC request failed\n");
- if (errors & OSC_INVALID_UUID_ERROR)
- printk(KERN_DEBUG "_OSC invalid UUID\n");
- if (errors & OSC_INVALID_REVISION_ERROR)
- printk(KERN_DEBUG "_OSC invalid revision\n");
- if (errors & OSC_CAPABILITIES_MASK_ERROR) {
- if (capbuf[OSC_QUERY_TYPE] & OSC_QUERY_ENABLE)
- goto out_success;
- printk(KERN_DEBUG
- "Firmware did not grant requested _OSC control\n");
- status = AE_SUPPORT;
- goto out_kfree;
- }
- status = AE_ERROR;
- goto out_kfree;
+ status = acpi_run_osc(handle, &context);
+ if (ACPI_SUCCESS(status)) {
+ *retval = *((u32 *)(context.ret.pointer + 8));
+ kfree(context.ret.pointer);
}
-out_success:
- *retval = *((u32 *)(out_obj->buffer.pointer + 8));
- status = AE_OK;
-
-out_kfree:
- kfree(output.pointer);
return status;
}
@@ -277,10 +229,10 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
u32 support_set, result, capbuf[3];
/* do _OSC query for all possible controls */
- support_set = root->osc_support_set | (flags & OSC_SUPPORT_MASKS);
+ support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_TYPE] = support_set;
- capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
+ capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
if (ACPI_SUCCESS(status)) {
@@ -427,7 +379,7 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
if (ACPI_FAILURE(status))
return status;
- control_req = (flags & OSC_CONTROL_MASKS);
+ control_req = (flags & OSC_PCI_CONTROL_MASKS);
if (!control_req)
return AE_TYPE;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index cb4283f5a79..41731236f9a 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -353,7 +353,7 @@ static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
PDE(inode)->data);
}
-static int acpi_processor_add_fs(struct acpi_device *device)
+static int __cpuinit acpi_processor_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *entry = NULL;
@@ -722,7 +722,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
saved = pr->performance_platform_limit;
- acpi_processor_ppc_has_changed(pr);
+ acpi_processor_ppc_has_changed(pr, 1);
if (saved == pr->performance_platform_limit)
break;
acpi_bus_generate_proc_event(device, event,
@@ -758,7 +758,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
struct acpi_processor *pr = per_cpu(processors, cpu);
if (action == CPU_ONLINE && pr) {
- acpi_processor_ppc_has_changed(pr);
+ acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_cst_has_changed(pr);
acpi_processor_tstate_has_changed(pr);
}
@@ -830,7 +830,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
arch_acpi_processor_cleanup_pdc(pr);
#ifdef CONFIG_CPU_FREQ
- acpi_processor_ppc_has_changed(pr);
+ acpi_processor_ppc_has_changed(pr, 0);
#endif
acpi_processor_get_throttling_info(pr);
acpi_processor_get_limit_info(pr);
@@ -845,7 +845,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
goto err_power_exit;
}
- dev_info(&device->dev, "registered as cooling_device%d\n",
+ dev_dbg(&device->dev, "registered as cooling_device%d\n",
pr->cdev->id);
result = sysfs_create_link(&device->dev.kobj,
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index bbd066e7f85..d1676b1754d 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -164,7 +164,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
pr->power.timer_broadcast_on_state = state;
}
-static void lapic_timer_propagate_broadcast(void *arg)
+static void __lapic_timer_propagate_broadcast(void *arg)
{
struct acpi_processor *pr = (struct acpi_processor *) arg;
unsigned long reason;
@@ -175,6 +175,12 @@ static void lapic_timer_propagate_broadcast(void *arg)
clockevents_notify(reason, &pr->id);
}
+static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
+{
+ smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
+ (void *)pr, 1);
+}
+
/* Power(C) State timer broadcast control */
static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
@@ -638,8 +644,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
working++;
}
- smp_call_function_single(pr->id, lapic_timer_propagate_broadcast,
- pr, 1);
+ lapic_timer_propagate_broadcast(pr);
return (working);
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 8ba0ed0b9dd..2cabadcc4d8 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -152,21 +152,78 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
return 0;
}
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
+/*
+ * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
+ * @handle: ACPI processor handle
+ * @status: the status code of _PPC evaluation
+ * 0: success. OSPM is now using the performance state specificed.
+ * 1: failure. OSPM has not changed the number of P-states in use
+ */
+static void acpi_processor_ppc_ost(acpi_handle handle, int status)
+{
+ union acpi_object params[2] = {
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ };
+ struct acpi_object_list arg_list = {2, params};
+ acpi_handle temp;
+
+ params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
+ params[1].integer.value = status;
+
+ /* when there is no _OST , skip it */
+ if (ACPI_FAILURE(acpi_get_handle(handle, "_OST", &temp)))
+ return;
+
+ acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
+ return;
+}
+
+int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
{
int ret;
- if (ignore_ppc)
+ if (ignore_ppc) {
+ /*
+ * Only when it is notification event, the _OST object
+ * will be evaluated. Otherwise it is skipped.
+ */
+ if (event_flag)
+ acpi_processor_ppc_ost(pr->handle, 1);
return 0;
+ }
ret = acpi_processor_get_platform_limit(pr);
-
+ /*
+ * Only when it is notification event, the _OST object
+ * will be evaluated. Otherwise it is skipped.
+ */
+ if (event_flag) {
+ if (ret < 0)
+ acpi_processor_ppc_ost(pr->handle, 1);
+ else
+ acpi_processor_ppc_ost(pr->handle, 0);
+ }
if (ret < 0)
return (ret);
else
return cpufreq_update_policy(pr->id);
}
+int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
+{
+ struct acpi_processor *pr;
+
+ pr = per_cpu(processors, cpu);
+ if (!pr || !pr->performance || !pr->performance->state_count)
+ return -ENODEV;
+ *limit = pr->performance->states[pr->performance_platform_limit].
+ core_frequency * 1000;
+ return 0;
+}
+EXPORT_SYMBOL(acpi_processor_get_bios_limit);
+
void acpi_processor_ppc_init(void)
{
if (!cpufreq_register_notifier
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 65f67815902..9073ada8883 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1052,6 +1052,13 @@ static int acpi_thermal_trip_seq_show(struct seq_file *seq, void *offset)
acpi_device_bid(device));
}
seq_puts(seq, "\n");
+ } else {
+ seq_printf(seq, "passive (forced):");
+ if (tz->thermal_zone->forced_passive)
+ seq_printf(seq, " %i C\n",
+ tz->thermal_zone->forced_passive / 1000);
+ else
+ seq_printf(seq, "<not set>\n");
}
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 676f08b004b..85844d05384 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -790,5 +790,15 @@ config PATA_BF54X
If unsure, say N.
+config PATA_MACIO
+ tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
+ depends on PPC_PMAC
+ help
+ Most IDE capable PowerMacs have IDE busses driven by a variant
+ of this controller which is part of the Apple chipset used on
+ most PowerMac models. Some models have multiple busses using
+ different chipsets, though generally, MacIO is one of them.
+
+
endif # ATA_SFF
endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d909435e9d8..fc936d4471d 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_SATA_MV) += sata_mv.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+obj-$(CONFIG_PATA_MACIO) += pata_macio.o
obj-$(CONFIG_PATA_ALI) += pata_ali.o
obj-$(CONFIG_PATA_AMD) += pata_amd.o
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1683ebda900..f4ea5a8c325 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3022,7 +3022,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case WRITE_16:
return ata_scsi_rw_xlat;
- case 0x93 /*WRITE_SAME_16*/:
+ case WRITE_SAME_16:
return ata_scsi_write_same_xlat;
case SYNCHRONIZE_CACHE:
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index efa8773bef5..741065c9da6 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2275,7 +2275,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
ap = qc->ap;
/* Drain up to 64K of data before we give up this recovery method */
for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
- && count < 32768; count++)
+ && count < 65536; count += 2)
ioread16(ap->ioaddr.data_addr);
/* Can become DEBUG later */
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index c4b47a3e544..02c81f12c70 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1557,6 +1557,25 @@ static unsigned short atapi_io_port[] = {
P_ATAPI_DMARQ,
P_ATAPI_INTRQ,
P_ATAPI_IORDY,
+ P_ATAPI_D0A,
+ P_ATAPI_D1A,
+ P_ATAPI_D2A,
+ P_ATAPI_D3A,
+ P_ATAPI_D4A,
+ P_ATAPI_D5A,
+ P_ATAPI_D6A,
+ P_ATAPI_D7A,
+ P_ATAPI_D8A,
+ P_ATAPI_D9A,
+ P_ATAPI_D10A,
+ P_ATAPI_D11A,
+ P_ATAPI_D12A,
+ P_ATAPI_D13A,
+ P_ATAPI_D14A,
+ P_ATAPI_D15A,
+ P_ATAPI_A0A,
+ P_ATAPI_A1A,
+ P_ATAPI_A2A,
0
};
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index dadfc358ba1..0efb1f58f25 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -31,7 +31,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_cmd64x"
-#define DRV_VERSION "0.3.1"
+#define DRV_VERSION "0.2.5"
/*
* CMD64x specific registers definition.
@@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
/* Merge the control bits */
regU |= 1 << adev->devno; /* UDMA on */
- if (adev->dma_mode > 2) /* 15nS timing */
+ if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */
regU |= 4 << adev->devno;
} else {
regU &= ~ (1 << adev->devno); /* UDMA off */
@@ -254,109 +254,17 @@ static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
}
/**
- * cmd64x_bmdma_stop - DMA stop callback
+ * cmd646r1_dma_stop - DMA stop callback
* @qc: Command in progress
*
- * Track the completion of live DMA commands and clear the
- * host->private_data DMA tracking flag as we do.
+ * Stub for now while investigating the r1 quirk in the old driver.
*/
-static void cmd64x_bmdma_stop(struct ata_queued_cmd *qc)
+static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
{
- struct ata_port *ap = qc->ap;
ata_bmdma_stop(qc);
- WARN_ON(ap->host->private_data != ap);
- ap->host->private_data = NULL;
-}
-
-/**
- * cmd64x_qc_defer - Defer logic for chip limits
- * @qc: queued command
- *
- * Decide whether we can issue the command. Called under the host lock.
- */
-
-static int cmd64x_qc_defer(struct ata_queued_cmd *qc)
-{
- struct ata_host *host = qc->ap->host;
- struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
- int rc;
- int dma = 0;
-
- /* Apply the ATA rules first */
- rc = ata_std_qc_defer(qc);
- if (rc)
- return rc;
-
- if (qc->tf.protocol == ATAPI_PROT_DMA ||
- qc->tf.protocol == ATA_PROT_DMA)
- dma = 1;
-
- /* If the other port is not live then issue the command */
- if (alt == NULL || !alt->qc_active) {
- if (dma)
- host->private_data = qc->ap;
- return 0;
- }
- /* If there is a live DMA command then wait */
- if (host->private_data != NULL)
- return ATA_DEFER_PORT;
- if (dma)
- /* Cannot overlap our DMA command */
- return ATA_DEFER_PORT;
- return 0;
}
-/**
- * cmd64x_interrupt - ATA host interrupt handler
- * @irq: irq line (unused)
- * @dev_instance: pointer to our ata_host information structure
- *
- * Our interrupt handler for PCI IDE devices. Calls
- * ata_sff_host_intr() for each port that is flagging an IRQ. We cannot
- * use the defaults as we need to avoid touching status/altstatus during
- * a DMA.
- *
- * LOCKING:
- * Obtains host lock during operation.
- *
- * RETURNS:
- * IRQ_NONE or IRQ_HANDLED.
- */
-irqreturn_t cmd64x_interrupt(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct pci_dev *pdev = to_pci_dev(host->dev);
- unsigned int i;
- unsigned int handled = 0;
- unsigned long flags;
- static const u8 irq_reg[2] = { CFR, ARTTIM23 };
- static const u8 irq_mask[2] = { 1 << 2, 1 << 4 };
-
- /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
- spin_lock_irqsave(&host->lock, flags);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
- u8 reg;
-
- pci_read_config_byte(pdev, irq_reg[i], &reg);
- ap = host->ports[i];
- if (ap && (reg & irq_mask[i]) &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
-
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE))
- handled |= ata_sff_host_intr(ap, qc);
- }
- }
-
- spin_unlock_irqrestore(&host->lock, flags);
-
- return IRQ_RETVAL(handled);
-}
static struct scsi_host_template cmd64x_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
@@ -365,8 +273,6 @@ static const struct ata_port_operations cmd64x_base_ops = {
.inherits = &ata_bmdma_port_ops,
.set_piomode = cmd64x_set_piomode,
.set_dmamode = cmd64x_set_dmamode,
- .bmdma_stop = cmd64x_bmdma_stop,
- .qc_defer = cmd64x_qc_defer,
};
static struct ata_port_operations cmd64x_port_ops = {
@@ -376,6 +282,7 @@ static struct ata_port_operations cmd64x_port_ops = {
static struct ata_port_operations cmd646r1_port_ops = {
.inherits = &cmd64x_base_ops,
+ .bmdma_stop = cmd646r1_bmdma_stop,
.cable_detect = ata_cable_40wire,
};
@@ -383,7 +290,6 @@ static struct ata_port_operations cmd648_port_ops = {
.inherits = &cmd64x_base_ops,
.bmdma_stop = cmd648_bmdma_stop,
.cable_detect = cmd648_cable_detect,
- .qc_defer = ata_std_qc_defer
};
static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -432,7 +338,6 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
u8 mrdmode;
int rc;
- struct ata_host *host;
rc = pcim_enable_device(pdev);
if (rc)
@@ -450,25 +355,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[0] = &cmd_info[3];
}
-
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pdev, MRDMODE, &mrdmode);
mrdmode &= ~ 0x30; /* IRQ set up */
mrdmode |= 0x02; /* Memory read line enable */
pci_write_config_byte(pdev, MRDMODE, mrdmode);
+ /* Force PIO 0 here.. */
+
/* PPC specific fixup copied from old driver */
#ifdef CONFIG_PPC
pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
#endif
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
- if (rc)
- return rc;
- /* We use this pointer to track the AP which has DMA running */
- host->private_data = NULL;
- pci_set_master(pdev);
- return ata_pci_sff_activate_host(host, cmd64x_interrupt, &cmd64x_sht);
+ return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL);
}
#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 9a09a1b11ca..dd26bc73bd9 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -8,7 +8,7 @@
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
- * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
+ * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
*
*
* TODO
@@ -25,7 +25,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.7"
+#define DRV_VERSION "0.3.8"
enum {
HPT_PCI_FAST = (1 << 31),
@@ -264,7 +264,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
static void hpt3x2n_set_clock(struct ata_port *ap, int source)
{
- void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+ void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8;
/* Tristate the bus */
iowrite8(0x80, bmdma+0x73);
@@ -274,9 +274,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
iowrite8(source, bmdma+0x7B);
iowrite8(0xC0, bmdma+0x79);
- /* Reset state machines */
- iowrite8(0x37, bmdma+0x70);
- iowrite8(0x37, bmdma+0x74);
+ /* Reset state machines, avoid enabling the disabled channels */
+ iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70);
+ iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74);
/* Complete reset */
iowrite8(0x00, bmdma+0x79);
@@ -286,21 +286,10 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source)
iowrite8(0x00, bmdma+0x77);
}
-/* Check if our partner interface is busy */
-
-static int hpt3x2n_pair_idle(struct ata_port *ap)
-{
- struct ata_host *host = ap->host;
- struct ata_port *pair = host->ports[ap->port_no ^ 1];
-
- if (pair->hsm_task_state == HSM_ST_IDLE)
- return 1;
- return 0;
-}
-
static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
{
long flags = (long)ap->host->private_data;
+
/* See if we should use the DPLL */
if (writing)
return USE_DPLL; /* Needed for write */
@@ -309,20 +298,35 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing)
return 0;
}
+static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_port *alt = ap->host->ports[ap->port_no ^ 1];
+ int rc, flags = (long)ap->host->private_data;
+ int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
+
+ /* First apply the usual rules */
+ rc = ata_std_qc_defer(qc);
+ if (rc != 0)
+ return rc;
+
+ if ((flags & USE_DPLL) != dpll && alt->qc_active)
+ return ATA_DEFER_PORT;
+ return 0;
+}
+
static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc)
{
- struct ata_taskfile *tf = &qc->tf;
struct ata_port *ap = qc->ap;
int flags = (long)ap->host->private_data;
+ int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE);
- if (hpt3x2n_pair_idle(ap)) {
- int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
- if ((flags & USE_DPLL) != dpll) {
- if (dpll == 1)
- hpt3x2n_set_clock(ap, 0x21);
- else
- hpt3x2n_set_clock(ap, 0x23);
- }
+ if ((flags & USE_DPLL) != dpll) {
+ flags &= ~USE_DPLL;
+ flags |= dpll;
+ ap->host->private_data = (void *)(long)flags;
+
+ hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23);
}
return ata_sff_qc_issue(qc);
}
@@ -339,6 +343,8 @@ static struct ata_port_operations hpt3x2n_port_ops = {
.inherits = &ata_bmdma_port_ops,
.bmdma_stop = hpt3x2n_bmdma_stop,
+
+ .qc_defer = hpt3x2n_qc_defer,
.qc_issue = hpt3x2n_qc_issue,
.cable_detect = hpt3x2n_cable_detect,
@@ -454,7 +460,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
unsigned int f_low, f_high;
int adjust;
unsigned long iobase = pci_resource_start(dev, 4);
- void *hpriv = NULL;
+ void *hpriv = (void *)USE_DPLL;
int rc;
rc = pcim_enable_device(dev);
@@ -539,7 +545,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* Set our private data up. We only need a few flags so we use
it directly */
if (pci_mhz > 60) {
- hpriv = (void *)PCI66;
+ hpriv = (void *)(PCI66 | USE_DPLL);
/*
* On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
* the MISC. register to stretch the UltraDMA Tss timing.
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
new file mode 100644
index 00000000000..4cc7bbd10ec
--- /dev/null
+++ b/drivers/ata/pata_macio.c
@@ -0,0 +1,1427 @@
+/*
+ * Libata based driver for Apple "macio" family of PATA controllers
+ *
+ * Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
+ * <benh@kernel.crashing.org>
+ *
+ * Some bits and pieces from drivers/ide/ppc/pmac.c
+ *
+ */
+
+#undef DEBUG
+#undef DEBUG_DMA
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+
+#include <asm/macio.h>
+#include <asm/io.h>
+#include <asm/dbdma.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/mediabay.h>
+
+#ifdef DEBUG_DMA
+#define dev_dbgdma(dev, format, arg...) \
+ dev_printk(KERN_DEBUG , dev , format , ## arg)
+#else
+#define dev_dbgdma(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
+#endif
+
+#define DRV_NAME "pata_macio"
+#define DRV_VERSION "0.9"
+
+/* Models of macio ATA controller */
+enum {
+ controller_ohare, /* OHare based */
+ controller_heathrow, /* Heathrow/Paddington */
+ controller_kl_ata3, /* KeyLargo ATA-3 */
+ controller_kl_ata4, /* KeyLargo ATA-4 */
+ controller_un_ata6, /* UniNorth2 ATA-6 */
+ controller_k2_ata6, /* K2 ATA-6 */
+ controller_sh_ata6, /* Shasta ATA-6 */
+};
+
+static const char* macio_ata_names[] = {
+ "OHare ATA", /* OHare based */
+ "Heathrow ATA", /* Heathrow/Paddington */
+ "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
+ "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
+ "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
+ "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
+ "Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
+};
+
+/*
+ * Extra registers, both 32-bit little-endian
+ */
+#define IDE_TIMING_CONFIG 0x200
+#define IDE_INTERRUPT 0x300
+
+/* Kauai (U2) ATA has different register setup */
+#define IDE_KAUAI_PIO_CONFIG 0x200
+#define IDE_KAUAI_ULTRA_CONFIG 0x210
+#define IDE_KAUAI_POLL_CONFIG 0x220
+
+/*
+ * Timing configuration register definitions
+ */
+
+/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
+#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
+#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
+#define IDE_SYSCLK_NS 30 /* 33Mhz cell */
+#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
+
+/* 133Mhz cell, found in shasta.
+ * See comments about 100 Mhz Uninorth 2...
+ * Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
+ * weird and I don't now why .. at this stage
+ */
+#define TR_133_PIOREG_PIO_MASK 0xff000fff
+#define TR_133_PIOREG_MDMA_MASK 0x00fff800
+#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
+#define TR_133_UDMAREG_UDMA_EN 0x00000001
+
+/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
+ * (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
+ * controlled like gem or fw. It appears to be an evolution of keylargo
+ * ATA4 with a timing register extended to 2x32bits registers (one
+ * for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
+ * It has it's own local feature control register as well.
+ *
+ * After scratching my mind over the timing values, at least for PIO
+ * and MDMA, I think I've figured the format of the timing register,
+ * though I use pre-calculated tables for UDMA as usual...
+ */
+#define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */
+#define TR_100_PIO_ADDRSETUP_SHIFT 24
+#define TR_100_MDMA_MASK 0x00fff000
+#define TR_100_MDMA_RECOVERY_MASK 0x00fc0000
+#define TR_100_MDMA_RECOVERY_SHIFT 18
+#define TR_100_MDMA_ACCESS_MASK 0x0003f000
+#define TR_100_MDMA_ACCESS_SHIFT 12
+#define TR_100_PIO_MASK 0xff000fff
+#define TR_100_PIO_RECOVERY_MASK 0x00000fc0
+#define TR_100_PIO_RECOVERY_SHIFT 6
+#define TR_100_PIO_ACCESS_MASK 0x0000003f
+#define TR_100_PIO_ACCESS_SHIFT 0
+
+#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
+#define TR_100_UDMAREG_UDMA_EN 0x00000001
+
+
+/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
+ * 40 connector cable and to 4 on 80 connector one.
+ * Clock unit is 15ns (66Mhz)
+ *
+ * 3 Values can be programmed:
+ * - Write data setup, which appears to match the cycle time. They
+ * also call it DIOW setup.
+ * - Ready to pause time (from spec)
+ * - Address setup. That one is weird. I don't see where exactly
+ * it fits in UDMA cycles, I got it's name from an obscure piece
+ * of commented out code in Darwin. They leave it to 0, we do as
+ * well, despite a comment that would lead to think it has a
+ * min value of 45ns.
+ * Apple also add 60ns to the write data setup (or cycle time ?) on
+ * reads.
+ */
+#define TR_66_UDMA_MASK 0xfff00000
+#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
+#define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */
+#define TR_66_PIO_ADDRSETUP_SHIFT 29
+#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
+#define TR_66_UDMA_RDY2PAUS_SHIFT 25
+#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
+#define TR_66_UDMA_WRDATASETUP_SHIFT 21
+#define TR_66_MDMA_MASK 0x000ffc00
+#define TR_66_MDMA_RECOVERY_MASK 0x000f8000
+#define TR_66_MDMA_RECOVERY_SHIFT 15
+#define TR_66_MDMA_ACCESS_MASK 0x00007c00
+#define TR_66_MDMA_ACCESS_SHIFT 10
+#define TR_66_PIO_MASK 0xe00003ff
+#define TR_66_PIO_RECOVERY_MASK 0x000003e0
+#define TR_66_PIO_RECOVERY_SHIFT 5
+#define TR_66_PIO_ACCESS_MASK 0x0000001f
+#define TR_66_PIO_ACCESS_SHIFT 0
+
+/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
+ * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
+ *
+ * The access time and recovery time can be programmed. Some older
+ * Darwin code base limit OHare to 150ns cycle time. I decided to do
+ * the same here fore safety against broken old hardware ;)
+ * The HalfTick bit, when set, adds half a clock (15ns) to the access
+ * time and removes one from recovery. It's not supported on KeyLargo
+ * implementation afaik. The E bit appears to be set for PIO mode 0 and
+ * is used to reach long timings used in this mode.
+ */
+#define TR_33_MDMA_MASK 0x003ff800
+#define TR_33_MDMA_RECOVERY_MASK 0x001f0000
+#define TR_33_MDMA_RECOVERY_SHIFT 16
+#define TR_33_MDMA_ACCESS_MASK 0x0000f800
+#define TR_33_MDMA_ACCESS_SHIFT 11
+#define TR_33_MDMA_HALFTICK 0x00200000
+#define TR_33_PIO_MASK 0x000007ff
+#define TR_33_PIO_E 0x00000400
+#define TR_33_PIO_RECOVERY_MASK 0x000003e0
+#define TR_33_PIO_RECOVERY_SHIFT 5
+#define TR_33_PIO_ACCESS_MASK 0x0000001f
+#define TR_33_PIO_ACCESS_SHIFT 0
+
+/*
+ * Interrupt register definitions. Only present on newer cells
+ * (Keylargo and later afaik) so we don't use it.
+ */
+#define IDE_INTR_DMA 0x80000000
+#define IDE_INTR_DEVICE 0x40000000
+
+/*
+ * FCR Register on Kauai. Not sure what bit 0x4 is ...
+ */
+#define KAUAI_FCR_UATA_MAGIC 0x00000004
+#define KAUAI_FCR_UATA_RESET_N 0x00000002
+#define KAUAI_FCR_UATA_ENABLE 0x00000001
+
+
+/* Allow up to 256 DBDMA commands per xfer */
+#define MAX_DCMDS 256
+
+/* Don't let a DMA segment go all the way to 64K */
+#define MAX_DBDMA_SEG 0xff00
+
+
+/*
+ * Wait 1s for disk to answer on IDE bus after a hard reset
+ * of the device (via GPIO/FCR).
+ *
+ * Some devices seem to "pollute" the bus even after dropping
+ * the BSY bit (typically some combo drives slave on the UDMA
+ * bus) after a hard reset. Since we hard reset all drives on
+ * KeyLargo ATA66, we have to keep that delay around. I may end
+ * up not hard resetting anymore on these and keep the delay only
+ * for older interfaces instead (we have to reset when coming
+ * from MacOS...) --BenH.
+ */
+#define IDE_WAKEUP_DELAY_MS 1000
+
+struct pata_macio_timing;
+
+struct pata_macio_priv {
+ int kind;
+ int aapl_bus_id;
+ int mediabay : 1;
+ struct device_node *node;
+ struct macio_dev *mdev;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int irq;
+ u32 treg[2][2];
+ void __iomem *tfregs;
+ void __iomem *kauai_fcr;
+ struct dbdma_cmd * dma_table_cpu;
+ dma_addr_t dma_table_dma;
+ struct ata_host *host;
+ const struct pata_macio_timing *timings;
+};
+
+/* Previous variants of this driver used to calculate timings
+ * for various variants of the chip and use tables for others.
+ *
+ * Not only was this confusing, but in addition, it isn't clear
+ * whether our calculation code was correct. It didn't entirely
+ * match the darwin code and whatever documentation I could find
+ * on these cells
+ *
+ * I decided to entirely rely on a table instead for this version
+ * of the driver. Also, because I don't really care about derated
+ * modes and really old HW other than making it work, I'm not going
+ * to calculate / snoop timing values for something else than the
+ * standard modes.
+ */
+struct pata_macio_timing {
+ int mode;
+ u32 reg1; /* Bits to set in first timing reg */
+ u32 reg2; /* Bits to set in second timing reg */
+};
+
+static const struct pata_macio_timing pata_macio_ohare_timings[] = {
+ { XFER_PIO_0, 0x00000526, 0, },
+ { XFER_PIO_1, 0x00000085, 0, },
+ { XFER_PIO_2, 0x00000025, 0, },
+ { XFER_PIO_3, 0x00000025, 0, },
+ { XFER_PIO_4, 0x00000025, 0, },
+ { XFER_MW_DMA_0, 0x00074000, 0, },
+ { XFER_MW_DMA_1, 0x00221000, 0, },
+ { XFER_MW_DMA_2, 0x00211000, 0, },
+ { -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
+ { XFER_PIO_0, 0x00000526, 0, },
+ { XFER_PIO_1, 0x00000085, 0, },
+ { XFER_PIO_2, 0x00000025, 0, },
+ { XFER_PIO_3, 0x00000025, 0, },
+ { XFER_PIO_4, 0x00000025, 0, },
+ { XFER_MW_DMA_0, 0x00074000, 0, },
+ { XFER_MW_DMA_1, 0x00221000, 0, },
+ { XFER_MW_DMA_2, 0x00211000, 0, },
+ { -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_kl33_timings[] = {
+ { XFER_PIO_0, 0x00000526, 0, },
+ { XFER_PIO_1, 0x00000085, 0, },
+ { XFER_PIO_2, 0x00000025, 0, },
+ { XFER_PIO_3, 0x00000025, 0, },
+ { XFER_PIO_4, 0x00000025, 0, },
+ { XFER_MW_DMA_0, 0x00084000, 0, },
+ { XFER_MW_DMA_1, 0x00021800, 0, },
+ { XFER_MW_DMA_2, 0x00011800, 0, },
+ { -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_kl66_timings[] = {
+ { XFER_PIO_0, 0x0000038c, 0, },
+ { XFER_PIO_1, 0x0000020a, 0, },
+ { XFER_PIO_2, 0x00000127, 0, },
+ { XFER_PIO_3, 0x000000c6, 0, },
+ { XFER_PIO_4, 0x00000065, 0, },
+ { XFER_MW_DMA_0, 0x00084000, 0, },
+ { XFER_MW_DMA_1, 0x00029800, 0, },
+ { XFER_MW_DMA_2, 0x00019400, 0, },
+ { XFER_UDMA_0, 0x19100000, 0, },
+ { XFER_UDMA_1, 0x14d00000, 0, },
+ { XFER_UDMA_2, 0x10900000, 0, },
+ { XFER_UDMA_3, 0x0c700000, 0, },
+ { XFER_UDMA_4, 0x0c500000, 0, },
+ { -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_kauai_timings[] = {
+ { XFER_PIO_0, 0x08000a92, 0, },
+ { XFER_PIO_1, 0x0800060f, 0, },
+ { XFER_PIO_2, 0x0800038b, 0, },
+ { XFER_PIO_3, 0x05000249, 0, },
+ { XFER_PIO_4, 0x04000148, 0, },
+ { XFER_MW_DMA_0, 0x00618000, 0, },
+ { XFER_MW_DMA_1, 0x00209000, 0, },
+ { XFER_MW_DMA_2, 0x00148000, 0, },
+ { XFER_UDMA_0, 0, 0x000070c1, },
+ { XFER_UDMA_1, 0, 0x00005d81, },
+ { XFER_UDMA_2, 0, 0x00004a61, },
+ { XFER_UDMA_3, 0, 0x00003a51, },
+ { XFER_UDMA_4, 0, 0x00002a31, },
+ { XFER_UDMA_5, 0, 0x00002921, },
+ { -1, 0, 0 }
+};
+
+static const struct pata_macio_timing pata_macio_shasta_timings[] = {
+ { XFER_PIO_0, 0x0a000c97, 0, },
+ { XFER_PIO_1, 0x07000712, 0, },
+ { XFER_PIO_2, 0x040003cd, 0, },
+ { XFER_PIO_3, 0x0500028b, 0, },
+ { XFER_PIO_4, 0x0400010a, 0, },
+ { XFER_MW_DMA_0, 0x00820800, 0, },
+ { XFER_MW_DMA_1, 0x0028b000, 0, },
+ { XFER_MW_DMA_2, 0x001ca000, 0, },
+ { XFER_UDMA_0, 0, 0x00035901, },
+ { XFER_UDMA_1, 0, 0x000348b1, },
+ { XFER_UDMA_2, 0, 0x00033881, },
+ { XFER_UDMA_3, 0, 0x00033861, },
+ { XFER_UDMA_4, 0, 0x00033841, },
+ { XFER_UDMA_5, 0, 0x00033031, },
+ { XFER_UDMA_6, 0, 0x00033021, },
+ { -1, 0, 0 }
+};
+
+static const struct pata_macio_timing *pata_macio_find_timing(
+ struct pata_macio_priv *priv,
+ int mode)
+{
+ int i;
+
+ for (i = 0; priv->timings[i].mode > 0; i++) {
+ if (priv->timings[i].mode == mode)
+ return &priv->timings[i];
+ }
+ return NULL;
+}
+
+
+static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
+{
+ struct pata_macio_priv *priv = ap->private_data;
+ void __iomem *rbase = ap->ioaddr.cmd_addr;
+
+ if (priv->kind == controller_sh_ata6 ||
+ priv->kind == controller_un_ata6 ||
+ priv->kind == controller_k2_ata6) {
+ writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
+ writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
+ } else
+ writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
+}
+
+static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
+{
+ ata_sff_dev_select(ap, device);
+
+ /* Apply timings */
+ pata_macio_apply_timings(ap, device);
+}
+
+static void pata_macio_set_timings(struct ata_port *ap,
+ struct ata_device *adev)
+{
+ struct pata_macio_priv *priv = ap->private_data;
+ const struct pata_macio_timing *t;
+
+ dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
+ adev->devno,
+ adev->pio_mode,
+ ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
+ adev->dma_mode,
+ ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
+
+ /* First clear timings */
+ priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
+
+ /* Now get the PIO timings */
+ t = pata_macio_find_timing(priv, adev->pio_mode);
+ if (t == NULL) {
+ dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
+ adev->pio_mode);
+ t = pata_macio_find_timing(priv, XFER_PIO_0);
+ }
+ BUG_ON(t == NULL);
+
+ /* PIO timings only ever use the first treg */
+ priv->treg[adev->devno][0] |= t->reg1;
+
+ /* Now get DMA timings */
+ t = pata_macio_find_timing(priv, adev->dma_mode);
+ if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
+ dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
+ t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
+ }
+ BUG_ON(t == NULL);
+
+ /* DMA timings can use both tregs */
+ priv->treg[adev->devno][0] |= t->reg1;
+ priv->treg[adev->devno][1] |= t->reg2;
+
+ dev_dbg(priv->dev, " -> %08x %08x\n",
+ priv->treg[adev->devno][0],
+ priv->treg[adev->devno][1]);
+
+ /* Apply to hardware */
+ pata_macio_apply_timings(ap, adev->devno);
+}
+
+/*
+ * Blast some well known "safe" values to the timing registers at init or
+ * wakeup from sleep time, before we do real calculation
+ */
+static void pata_macio_default_timings(struct pata_macio_priv *priv)
+{
+ unsigned int value, value2 = 0;
+
+ switch(priv->kind) {
+ case controller_sh_ata6:
+ value = 0x0a820c97;
+ value2 = 0x00033031;
+ break;
+ case controller_un_ata6:
+ case controller_k2_ata6:
+ value = 0x08618a92;
+ value2 = 0x00002921;
+ break;
+ case controller_kl_ata4:
+ value = 0x0008438c;
+ break;
+ case controller_kl_ata3:
+ value = 0x00084526;
+ break;
+ case controller_heathrow:
+ case controller_ohare:
+ default:
+ value = 0x00074526;
+ break;
+ }
+ priv->treg[0][0] = priv->treg[1][0] = value;
+ priv->treg[0][1] = priv->treg[1][1] = value2;
+}
+
+static int pata_macio_cable_detect(struct ata_port *ap)
+{
+ struct pata_macio_priv *priv = ap->private_data;
+
+ /* Get cable type from device-tree */
+ if (priv->kind == controller_kl_ata4 ||
+ priv->kind == controller_un_ata6 ||
+ priv->kind == controller_k2_ata6 ||
+ priv->kind == controller_sh_ata6) {
+ const char* cable = of_get_property(priv->node, "cable-type",
+ NULL);
+ struct device_node *root = of_find_node_by_path("/");
+ const char *model = of_get_property(root, "model", NULL);
+
+ if (cable && !strncmp(cable, "80-", 3)) {
+ /* Some drives fail to detect 80c cable in PowerBook
+ * These machine use proprietary short IDE cable
+ * anyway
+ */
+ if (!strncmp(model, "PowerBook", 9))
+ return ATA_CBL_PATA40_SHORT;
+ else
+ return ATA_CBL_PATA80;
+ }
+ }
+
+ /* G5's seem to have incorrect cable type in device-tree.
+ * Let's assume they always have a 80 conductor cable, this seem to
+ * be always the case unless the user mucked around
+ */
+ if (of_device_is_compatible(priv->node, "K2-UATA") ||
+ of_device_is_compatible(priv->node, "shasta-ata"))
+ return ATA_CBL_PATA80;
+
+ /* Anything else is 40 connectors */
+ return ATA_CBL_PATA40;
+}
+
+static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
+{
+ unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
+ struct ata_port *ap = qc->ap;
+ struct pata_macio_priv *priv = ap->private_data;
+ struct scatterlist *sg;
+ struct dbdma_cmd *table;
+ unsigned int si, pi;
+
+ dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
+ __func__, qc, qc->flags, write, qc->dev->devno);
+
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ table = (struct dbdma_cmd *) priv->dma_table_cpu;
+
+ pi = 0;
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ u32 addr, sg_len, len;
+
+ /* determine if physical DMA addr spans 64K boundary.
+ * Note h/w doesn't support 64-bit, so we unconditionally
+ * truncate dma_addr_t to u32.
+ */
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ /* table overflow should never happen */
+ BUG_ON (pi++ >= MAX_DCMDS);
+
+ len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
+ st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE);
+ st_le16(&table->req_count, len);
+ st_le32(&table->phy_addr, addr);
+ table->cmd_dep = 0;
+ table->xfer_status = 0;
+ table->res_count = 0;
+ addr += len;
+ sg_len -= len;
+ ++table;
+ }
+ }
+
+ /* Should never happen according to Tejun */
+ BUG_ON(!pi);
+
+ /* Convert the last command to an input/output */
+ table--;
+ st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST);
+ table++;
+
+ /* Add the stop command to the end of the list */
+ memset(table, 0, sizeof(struct dbdma_cmd));
+ st_le16(&table->command, DBDMA_STOP);
+
+ dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
+}
+
+
+static void pata_macio_freeze(struct ata_port *ap)
+{
+ struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+
+ if (dma_regs) {
+ unsigned int timeout = 1000000;
+
+ /* Make sure DMA controller is stopped */
+ writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
+ while (--timeout && (readl(&dma_regs->status) & RUN))
+ udelay(1);
+ }
+
+ ata_sff_freeze(ap);
+}
+
+
+static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pata_macio_priv *priv = ap->private_data;
+ struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+ int dev = qc->dev->devno;
+
+ dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
+
+ /* Make sure DMA commands updates are visible */
+ writel(priv->dma_table_dma, &dma_regs->cmdptr);
+
+ /* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
+ * UDMA reads
+ */
+ if (priv->kind == controller_kl_ata4 &&
+ (priv->treg[dev][0] & TR_66_UDMA_EN)) {
+ void __iomem *rbase = ap->ioaddr.cmd_addr;
+ u32 reg = priv->treg[dev][0];
+
+ if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+ reg += 0x00800000;
+ writel(reg, rbase + IDE_TIMING_CONFIG);
+ }
+
+ /* issue r/w command */
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pata_macio_priv *priv = ap->private_data;
+ struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+
+ dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
+
+ writel((RUN << 16) | RUN, &dma_regs->control);
+ /* Make sure it gets to the controller right now */
+ (void)readl(&dma_regs->control);
+}
+
+static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct pata_macio_priv *priv = ap->private_data;
+ struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+ unsigned int timeout = 1000000;
+
+ dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
+
+ /* Stop the DMA engine and wait for it to full halt */
+ writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
+ while (--timeout && (readl(&dma_regs->status) & RUN))
+ udelay(1);
+}
+
+static u8 pata_macio_bmdma_status(struct ata_port *ap)
+{
+ struct pata_macio_priv *priv = ap->private_data;
+ struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
+ u32 dstat, rstat = ATA_DMA_INTR;
+ unsigned long timeout = 0;
+
+ dstat = readl(&dma_regs->status);
+
+ dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
+
+ /* We have two things to deal with here:
+ *
+ * - The dbdma won't stop if the command was started
+ * but completed with an error without transferring all
+ * datas. This happens when bad blocks are met during
+ * a multi-block transfer.
+ *
+ * - The dbdma fifo hasn't yet finished flushing to
+ * to system memory when the disk interrupt occurs.
+ *
+ */
+
+ /* First check for errors */
+ if ((dstat & (RUN|DEAD)) != RUN)
+ rstat |= ATA_DMA_ERR;
+
+ /* If ACTIVE is cleared, the STOP command has been hit and
+ * the transfer is complete. If not, we have to flush the
+ * channel.
+ */
+ if ((dstat & ACTIVE) == 0)
+ return rstat;
+
+ dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
+
+ /* If dbdma didn't execute the STOP command yet, the
+ * active bit is still set. We consider that we aren't
+ * sharing interrupts (which is hopefully the case with
+ * those controllers) and so we just try to flush the
+ * channel for pending data in the fifo
+ */
+ udelay(1);
+ writel((FLUSH << 16) | FLUSH, &dma_regs->control);
+ for (;;) {
+ udelay(1);
+ dstat = readl(&dma_regs->status);
+ if ((dstat & FLUSH) == 0)
+ break;
+ if (++timeout > 1000) {
+ dev_warn(priv->dev, "timeout flushing DMA\n");
+ rstat |= ATA_DMA_ERR;
+ break;
+ }
+ }
+ return rstat;
+}
+
+/* port_start is when we allocate the DMA command list */
+static int pata_macio_port_start(struct ata_port *ap)
+{
+ struct pata_macio_priv *priv = ap->private_data;
+
+ if (ap->ioaddr.bmdma_addr == NULL)
+ return 0;
+
+ /* Allocate space for the DBDMA commands.
+ *
+ * The +2 is +1 for the stop command and +1 to allow for
+ * aligning the start address to a multiple of 16 bytes.
+ */
+ priv->dma_table_cpu =
+ dmam_alloc_coherent(priv->dev,
+ (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
+ &priv->dma_table_dma, GFP_KERNEL);
+ if (priv->dma_table_cpu == NULL) {
+ dev_err(priv->dev, "Unable to allocate DMA command list\n");
+ ap->ioaddr.bmdma_addr = NULL;
+ }
+ return 0;
+}
+
+static void pata_macio_irq_clear(struct ata_port *ap)
+{
+ struct pata_macio_priv *priv = ap->private_data;
+
+ /* Nothing to do here */
+
+ dev_dbgdma(priv->dev, "%s\n", __func__);
+}
+
+static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
+{
+ dev_dbg(priv->dev, "Enabling & resetting... \n");
+
+ if (priv->mediabay)
+ return;
+
+ if (priv->kind == controller_ohare && !resume) {
+ /* The code below is having trouble on some ohare machines
+ * (timing related ?). Until I can put my hand on one of these
+ * units, I keep the old way
+ */
+ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
+ } else {
+ int rc;
+
+ /* Reset and enable controller */
+ rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
+ priv->node, priv->aapl_bus_id, 1);
+ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
+ priv->node, priv->aapl_bus_id, 1);
+ msleep(10);
+ /* Only bother waiting if there's a reset control */
+ if (rc == 0) {
+ ppc_md.feature_call(PMAC_FTR_IDE_RESET,
+ priv->node, priv->aapl_bus_id, 0);
+ msleep(IDE_WAKEUP_DELAY_MS);
+ }
+ }
+
+ /* If resuming a PCI device, restore the config space here */
+ if (priv->pdev && resume) {
+ int rc;
+
+ pci_restore_state(priv->pdev);
+ rc = pcim_enable_device(priv->pdev);
+ if (rc)
+ dev_printk(KERN_ERR, &priv->pdev->dev,
+ "Failed to enable device after resume (%d)\n", rc);
+ else
+ pci_set_master(priv->pdev);
+ }
+
+ /* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
+ * seem necessary and speeds up the boot process
+ */
+ if (priv->kauai_fcr)
+ writel(KAUAI_FCR_UATA_MAGIC |
+ KAUAI_FCR_UATA_RESET_N |
+ KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
+}
+
+/* Hook the standard slave config to fixup some HW related alignment
+ * restrictions
+ */
+static int pata_macio_slave_config(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct pata_macio_priv *priv = ap->private_data;
+ struct ata_device *dev;
+ u16 cmd;
+ int rc;
+
+ /* First call original */
+ rc = ata_scsi_slave_config(sdev);
+ if (rc)
+ return rc;
+
+ /* This is lifted from sata_nv */
+ dev = &ap->link.device[sdev->id];
+
+ /* OHare has issues with non cache aligned DMA on some chipsets */
+ if (priv->kind == controller_ohare) {
+ blk_queue_update_dma_alignment(sdev->request_queue, 31);
+ blk_queue_update_dma_pad(sdev->request_queue, 31);
+
+ /* Tell the world about it */
+ ata_dev_printk(dev, KERN_INFO, "OHare alignment limits applied\n");
+ return 0;
+ }
+
+ /* We only have issues with ATAPI */
+ if (dev->class != ATA_DEV_ATAPI)
+ return 0;
+
+ /* Shasta and K2 seem to have "issues" with reads ... */
+ if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
+ /* Allright these are bad, apply restrictions */
+ blk_queue_update_dma_alignment(sdev->request_queue, 15);
+ blk_queue_update_dma_pad(sdev->request_queue, 15);
+
+ /* We enable MWI and hack cache line size directly here, this
+ * is specific to this chipset and not normal values, we happen
+ * to somewhat know what we are doing here (which is basically
+ * to do the same Apple does and pray they did not get it wrong :-)
+ */
+ BUG_ON(!priv->pdev);
+ pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
+ pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
+ pci_write_config_word(priv->pdev, PCI_COMMAND,
+ cmd | PCI_COMMAND_INVALIDATE);
+
+ /* Tell the world about it */
+ ata_dev_printk(dev, KERN_INFO,
+ "K2/Shasta alignment limits applied\n");
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
+{
+ int rc;
+
+ /* First, core libata suspend to do most of the work */
+ rc = ata_host_suspend(priv->host, mesg);
+ if (rc)
+ return rc;
+
+ /* Restore to default timings */
+ pata_macio_default_timings(priv);
+
+ /* Mask interrupt. Not strictly necessary but old driver did
+ * it and I'd rather not change that here */
+ disable_irq(priv->irq);
+
+ /* The media bay will handle itself just fine */
+ if (priv->mediabay)
+ return 0;
+
+ /* Kauai has bus control FCRs directly here */
+ if (priv->kauai_fcr) {
+ u32 fcr = readl(priv->kauai_fcr);
+ fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
+ writel(fcr, priv->kauai_fcr);
+ }
+
+ /* For PCI, save state and disable DMA. No need to call
+ * pci_set_power_state(), the HW doesn't do D states that
+ * way, the platform code will take care of suspending the
+ * ASIC properly
+ */
+ if (priv->pdev) {
+ pci_save_state(priv->pdev);
+ pci_disable_device(priv->pdev);
+ }
+
+ /* Disable the bus on older machines and the cell on kauai */
+ ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
+ priv->aapl_bus_id, 0);
+
+ return 0;
+}
+
+static int pata_macio_do_resume(struct pata_macio_priv *priv)
+{
+ /* Reset and re-enable the HW */
+ pata_macio_reset_hw(priv, 1);
+
+ /* Sanitize drive timings */
+ pata_macio_apply_timings(priv->host->ports[0], 0);
+
+ /* We want our IRQ back ! */
+ enable_irq(priv->irq);
+
+ /* Let the libata core take it from there */
+ ata_host_resume(priv->host);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
+static struct scsi_host_template pata_macio_sht = {
+ ATA_BASE_SHT(DRV_NAME),
+ .sg_tablesize = MAX_DCMDS,
+ /* We may not need that strict one */
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = pata_macio_slave_config,
+};
+
+static struct ata_port_operations pata_macio_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .freeze = pata_macio_freeze,
+ .set_piomode = pata_macio_set_timings,
+ .set_dmamode = pata_macio_set_timings,
+ .cable_detect = pata_macio_cable_detect,
+ .sff_dev_select = pata_macio_dev_select,
+ .qc_prep = pata_macio_qc_prep,
+ .mode_filter = ata_bmdma_mode_filter,
+ .bmdma_setup = pata_macio_bmdma_setup,
+ .bmdma_start = pata_macio_bmdma_start,
+ .bmdma_stop = pata_macio_bmdma_stop,
+ .bmdma_status = pata_macio_bmdma_status,
+ .port_start = pata_macio_port_start,
+ .sff_irq_clear = pata_macio_irq_clear,
+};
+
+static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
+{
+ const int *bidp;
+
+ /* Identify the type of controller */
+ if (of_device_is_compatible(priv->node, "shasta-ata")) {
+ priv->kind = controller_sh_ata6;
+ priv->timings = pata_macio_shasta_timings;
+ } else if (of_device_is_compatible(priv->node, "kauai-ata")) {
+ priv->kind = controller_un_ata6;
+ priv->timings = pata_macio_kauai_timings;
+ } else if (of_device_is_compatible(priv->node, "K2-UATA")) {
+ priv->kind = controller_k2_ata6;
+ priv->timings = pata_macio_kauai_timings;
+ } else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
+ if (strcmp(priv->node->name, "ata-4") == 0) {
+ priv->kind = controller_kl_ata4;
+ priv->timings = pata_macio_kl66_timings;
+ } else {
+ priv->kind = controller_kl_ata3;
+ priv->timings = pata_macio_kl33_timings;
+ }
+ } else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
+ priv->kind = controller_heathrow;
+ priv->timings = pata_macio_heathrow_timings;
+ } else {
+ priv->kind = controller_ohare;
+ priv->timings = pata_macio_ohare_timings;
+ }
+
+ /* XXX FIXME --- setup priv->mediabay here */
+
+ /* Get Apple bus ID (for clock and ASIC control) */
+ bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
+ priv->aapl_bus_id = bidp ? *bidp : 0;
+
+ /* Fixup missing Apple bus ID in case of media-bay */
+ if (priv->mediabay && bidp == 0)
+ priv->aapl_bus_id = 1;
+}
+
+static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
+ void __iomem * base,
+ void __iomem * dma)
+{
+ /* cmd_addr is the base of regs for that port */
+ ioaddr->cmd_addr = base;
+
+ /* taskfile registers */
+ ioaddr->data_addr = base + (ATA_REG_DATA << 4);
+ ioaddr->error_addr = base + (ATA_REG_ERR << 4);
+ ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4);
+ ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4);
+ ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4);
+ ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4);
+ ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4);
+ ioaddr->device_addr = base + (ATA_REG_DEVICE << 4);
+ ioaddr->status_addr = base + (ATA_REG_STATUS << 4);
+ ioaddr->command_addr = base + (ATA_REG_CMD << 4);
+ ioaddr->altstatus_addr = base + 0x160;
+ ioaddr->ctl_addr = base + 0x160;
+ ioaddr->bmdma_addr = dma;
+}
+
+static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
+ struct ata_port_info *pinfo)
+{
+ int i = 0;
+
+ pinfo->pio_mask = 0;
+ pinfo->mwdma_mask = 0;
+ pinfo->udma_mask = 0;
+
+ while (priv->timings[i].mode > 0) {
+ unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
+ switch(priv->timings[i].mode & 0xf0) {
+ case 0x00: /* PIO */
+ pinfo->pio_mask |= (mask >> 8);
+ break;
+ case 0x20: /* MWDMA */
+ pinfo->mwdma_mask |= mask;
+ break;
+ case 0x40: /* UDMA */
+ pinfo->udma_mask |= mask;
+ break;
+ }
+ i++;
+ }
+ dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
+ pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
+}
+
+static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
+ resource_size_t tfregs,
+ resource_size_t dmaregs,
+ resource_size_t fcregs,
+ unsigned long irq)
+{
+ struct ata_port_info pinfo;
+ const struct ata_port_info *ppi[] = { &pinfo, NULL };
+ void __iomem *dma_regs = NULL;
+
+ /* Fill up privates with various invariants collected from the
+ * device-tree
+ */
+ pata_macio_invariants(priv);
+
+ /* Make sure we have sane initial timings in the cache */
+ pata_macio_default_timings(priv);
+
+ /* Not sure what the real max is but we know it's less than 64K, let's
+ * use 64K minus 256
+ */
+ dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
+
+ /* Allocate libata host for 1 port */
+ memset(&pinfo, 0, sizeof(struct ata_port_info));
+ pmac_macio_calc_timing_masks(priv, &pinfo);
+ pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO |
+ ATA_FLAG_NO_LEGACY;
+ pinfo.port_ops = &pata_macio_ops;
+ pinfo.private_data = priv;
+
+ priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
+ if (priv->host == NULL) {
+ dev_err(priv->dev, "Failed to allocate ATA port structure\n");
+ return -ENOMEM;
+ }
+
+ /* Setup the private data in host too */
+ priv->host->private_data = priv;
+
+ /* Map base registers */
+ priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
+ if (priv->tfregs == NULL) {
+ dev_err(priv->dev, "Failed to map ATA ports\n");
+ return -ENOMEM;
+ }
+ priv->host->iomap = &priv->tfregs;
+
+ /* Map DMA regs */
+ if (dmaregs != 0) {
+ dma_regs = devm_ioremap(priv->dev, dmaregs,
+ sizeof(struct dbdma_regs));
+ if (dma_regs == NULL)
+ dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
+ }
+
+ /* If chip has local feature control, map those regs too */
+ if (fcregs != 0) {
+ priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
+ if (priv->kauai_fcr == NULL) {
+ dev_err(priv->dev, "Failed to map ATA FCR register\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* Setup port data structure */
+ pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
+ priv->tfregs, dma_regs);
+ priv->host->ports[0]->private_data = priv;
+
+ /* hard-reset the controller */
+ pata_macio_reset_hw(priv, 0);
+ pata_macio_apply_timings(priv->host->ports[0], 0);
+
+ /* Enable bus master if necessary */
+ if (priv->pdev && dma_regs)
+ pci_set_master(priv->pdev);
+
+ dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
+ macio_ata_names[priv->kind], priv->aapl_bus_id);
+
+ /* Start it up */
+ priv->irq = irq;
+ return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0,
+ &pata_macio_sht);
+}
+
+static int __devinit pata_macio_attach(struct macio_dev *mdev,
+ const struct of_device_id *match)
+{
+ struct pata_macio_priv *priv;
+ resource_size_t tfregs, dmaregs = 0;
+ unsigned long irq;
+ int rc;
+
+ /* Check for broken device-trees */
+ if (macio_resource_count(mdev) == 0) {
+ dev_err(&mdev->ofdev.dev,
+ "No addresses for controller\n");
+ return -ENXIO;
+ }
+
+ /* Enable managed resources */
+ macio_enable_devres(mdev);
+
+ /* Allocate and init private data structure */
+ priv = devm_kzalloc(&mdev->ofdev.dev,
+ sizeof(struct pata_macio_priv), GFP_KERNEL);
+ if (priv == NULL) {
+ dev_err(&mdev->ofdev.dev,
+ "Failed to allocate private memory\n");
+ return -ENOMEM;
+ }
+ priv->node = of_node_get(mdev->ofdev.node);
+ priv->mdev = mdev;
+ priv->dev = &mdev->ofdev.dev;
+
+ /* Request memory resource for taskfile registers */
+ if (macio_request_resource(mdev, 0, "pata-macio")) {
+ dev_err(&mdev->ofdev.dev,
+ "Cannot obtain taskfile resource\n");
+ return -EBUSY;
+ }
+ tfregs = macio_resource_start(mdev, 0);
+
+ /* Request resources for DMA registers if any */
+ if (macio_resource_count(mdev) >= 2) {
+ if (macio_request_resource(mdev, 1, "pata-macio-dma"))
+ dev_err(&mdev->ofdev.dev,
+ "Cannot obtain DMA resource\n");
+ else
+ dmaregs = macio_resource_start(mdev, 1);
+ }
+
+ /*
+ * Fixup missing IRQ for some old implementations with broken
+ * device-trees.
+ *
+ * This is a bit bogus, it should be fixed in the device-tree itself,
+ * via the existing macio fixups, based on the type of interrupt
+ * controller in the machine. However, I have no test HW for this case,
+ * and this trick works well enough on those old machines...
+ */
+ if (macio_irq_count(mdev) == 0) {
+ dev_warn(&mdev->ofdev.dev,
+ "No interrupts for controller, using 13\n");
+ irq = irq_create_mapping(NULL, 13);
+ } else
+ irq = macio_irq(mdev, 0);
+
+ /* Prevvent media bay callbacks until fully registered */
+ lock_media_bay(priv->mdev->media_bay);
+
+ /* Get register addresses and call common initialization */
+ rc = pata_macio_common_init(priv,
+ tfregs, /* Taskfile regs */
+ dmaregs, /* DBDMA regs */
+ 0, /* Feature control */
+ irq);
+ unlock_media_bay(priv->mdev->media_bay);
+
+ return rc;
+}
+
+static int __devexit pata_macio_detach(struct macio_dev *mdev)
+{
+ struct ata_host *host = macio_get_drvdata(mdev);
+ struct pata_macio_priv *priv = host->private_data;
+
+ lock_media_bay(priv->mdev->media_bay);
+
+ /* Make sure the mediabay callback doesn't try to access
+ * dead stuff
+ */
+ priv->host->private_data = NULL;
+
+ ata_host_detach(host);
+
+ unlock_media_bay(priv->mdev->media_bay);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
+{
+ struct ata_host *host = macio_get_drvdata(mdev);
+
+ return pata_macio_do_suspend(host->private_data, mesg);
+}
+
+static int pata_macio_resume(struct macio_dev *mdev)
+{
+ struct ata_host *host = macio_get_drvdata(mdev);
+
+ return pata_macio_do_resume(host->private_data);
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PMAC_MEDIABAY
+static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ struct ata_host *host = macio_get_drvdata(mdev);
+ struct ata_port *ap;
+ struct ata_eh_info *ehi;
+ struct ata_device *dev;
+ unsigned long flags;
+
+ if (!host || !host->private_data)
+ return;
+ ap = host->ports[0];
+ spin_lock_irqsave(ap->lock, flags);
+ ehi = &ap->link.eh_info;
+ if (mb_state == MB_CD) {
+ ata_ehi_push_desc(ehi, "mediabay plug");
+ ata_ehi_hotplugged(ehi);
+ ata_port_freeze(ap);
+ } else {
+ ata_ehi_push_desc(ehi, "mediabay unplug");
+ ata_for_each_dev(dev, &ap->link, ALL)
+ dev->flags |= ATA_DFLAG_DETACH;
+ ata_port_abort(ap);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
+
+}
+#endif /* CONFIG_PMAC_MEDIABAY */
+
+
+static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pata_macio_priv *priv;
+ struct device_node *np;
+ resource_size_t rbase;
+
+ /* We cannot use a MacIO controller without its OF device node */
+ np = pci_device_to_OF_node(pdev);
+ if (np == NULL) {
+ dev_err(&pdev->dev,
+ "Cannot find OF device node for controller\n");
+ return -ENODEV;
+ }
+
+ /* Check that it can be enabled */
+ if (pcim_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot enable controller PCI device\n");
+ return -ENXIO;
+ }
+
+ /* Allocate and init private data structure */
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct pata_macio_priv), GFP_KERNEL);
+ if (priv == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to allocate private memory\n");
+ return -ENOMEM;
+ }
+ priv->node = of_node_get(np);
+ priv->pdev = pdev;
+ priv->dev = &pdev->dev;
+
+ /* Get MMIO regions */
+ if (pci_request_regions(pdev, "pata-macio")) {
+ dev_err(&pdev->dev,
+ "Cannot obtain PCI resources\n");
+ return -EBUSY;
+ }
+
+ /* Get register addresses and call common initialization */
+ rbase = pci_resource_start(pdev, 0);
+ if (pata_macio_common_init(priv,
+ rbase + 0x2000, /* Taskfile regs */
+ rbase + 0x1000, /* DBDMA regs */
+ rbase, /* Feature control */
+ pdev->irq))
+ return -ENXIO;
+
+ return 0;
+}
+
+static void __devexit pata_macio_pci_detach(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+
+ ata_host_detach(host);
+}
+
+#ifdef CONFIG_PM
+
+static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+
+ return pata_macio_do_suspend(host->private_data, mesg);
+}
+
+static int pata_macio_pci_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+
+ return pata_macio_do_resume(host->private_data);
+}
+
+#endif /* CONFIG_PM */
+
+static struct of_device_id pata_macio_match[] =
+{
+ {
+ .name = "IDE",
+ },
+ {
+ .name = "ATA",
+ },
+ {
+ .type = "ide",
+ },
+ {
+ .type = "ata",
+ },
+ {},
+};
+
+static struct macio_driver pata_macio_driver =
+{
+ .name = "pata-macio",
+ .match_table = pata_macio_match,
+ .probe = pata_macio_attach,
+ .remove = pata_macio_detach,
+#ifdef CONFIG_PM
+ .suspend = pata_macio_suspend,
+ .resume = pata_macio_resume,
+#endif
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = pata_macio_mb_event,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+};
+
+static const struct pci_device_id pata_macio_pci_match[] = {
+ { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
+ { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
+ { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
+ { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
+ { PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
+ {},
+};
+
+static struct pci_driver pata_macio_pci_driver = {
+ .name = "pata-pci-macio",
+ .id_table = pata_macio_pci_match,
+ .probe = pata_macio_pci_attach,
+ .remove = pata_macio_pci_detach,
+#ifdef CONFIG_PM
+ .suspend = pata_macio_pci_suspend,
+ .resume = pata_macio_pci_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+};
+MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
+
+
+static int __init pata_macio_init(void)
+{
+ int rc;
+
+ if (!machine_is(powermac))
+ return -ENODEV;
+
+ rc = pci_register_driver(&pata_macio_pci_driver);
+ if (rc)
+ return rc;
+ rc = macio_register_driver(&pata_macio_driver);
+ if (rc) {
+ pci_unregister_driver(&pata_macio_pci_driver);
+ return rc;
+ }
+ return 0;
+}
+
+static void __exit pata_macio_exit(void)
+{
+ macio_unregister_driver(&pata_macio_driver);
+ pci_unregister_driver(&pata_macio_pci_driver);
+}
+
+module_init(pata_macio_init);
+module_exit(pata_macio_exit);
+
+MODULE_AUTHOR("Benjamin Herrenschmidt");
+MODULE_DESCRIPTION("Apple MacIO PATA driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index d6f69561dc8..37ef416c124 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -853,7 +853,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
return -EINVAL;
cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
- res_cs0->end - res_cs1->start + 1);
+ resource_size(res_cs1));
if (!cs1)
return -ENOMEM;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index a8a7be0d06f..df8ee325d3c 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -59,6 +59,7 @@
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
+#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mbus.h>
@@ -538,6 +539,7 @@ struct mv_port_signal {
struct mv_host_priv {
u32 hp_flags;
+ unsigned int board_idx;
u32 main_irq_mask;
struct mv_port_signal signal[8];
const struct mv_hw_ops *ops;
@@ -548,6 +550,10 @@ struct mv_host_priv {
u32 irq_cause_offset;
u32 irq_mask_offset;
u32 unmask_all_irqs;
+
+#if defined(CONFIG_HAVE_CLK)
+ struct clk *clk;
+#endif
/*
* These consistent DMA memory pools give us guaranteed
* alignment for hardware-accessed data structures,
@@ -2775,7 +2781,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
struct mv_port_priv *pp;
int edma_was_enabled;
- if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
+ if (ap->flags & ATA_FLAG_DISABLED) {
mv_unexpected_intr(ap, 0);
return;
}
@@ -3393,7 +3399,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
ZERO(0x024); /* respq outp */
ZERO(0x020); /* respq inp */
ZERO(0x02c); /* test control */
- writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
+ writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
}
#undef ZERO
@@ -3854,7 +3860,6 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
/**
* mv_init_host - Perform some early initialization of the host.
* @host: ATA host to initialize
- * @board_idx: controller index
*
* If possible, do an early global reset of the host. Then do
* our port init and clear/unmask all/relevant host interrupts.
@@ -3862,13 +3867,13 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
* LOCKING:
* Inherited from caller.
*/
-static int mv_init_host(struct ata_host *host, unsigned int board_idx)
+static int mv_init_host(struct ata_host *host)
{
int rc = 0, n_hc, port, hc;
struct mv_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->base;
- rc = mv_chip_id(host, board_idx);
+ rc = mv_chip_id(host, hpriv->board_idx);
if (rc)
goto done;
@@ -3905,14 +3910,6 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
void __iomem *port_mmio = mv_port_base(mmio, port);
mv_port_init(&ap->ioaddr, port_mmio);
-
-#ifdef CONFIG_PCI
- if (!IS_SOC(hpriv)) {
- unsigned int offset = port_mmio - mmio;
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
- }
-#endif
}
for (hc = 0; hc < n_hc; hc++) {
@@ -4035,12 +4032,21 @@ static int mv_platform_probe(struct platform_device *pdev)
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
+ hpriv->board_idx = chip_soc;
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
hpriv->base -= SATAHC0_REG_BASE;
+#if defined(CONFIG_HAVE_CLK)
+ hpriv->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hpriv->clk))
+ dev_notice(&pdev->dev, "cannot get clkdev\n");
+ else
+ clk_enable(hpriv->clk);
+#endif
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
@@ -4049,12 +4055,12 @@ static int mv_platform_probe(struct platform_device *pdev)
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
- return rc;
+ goto err;
/* initialize adapter */
- rc = mv_init_host(host, chip_soc);
+ rc = mv_init_host(host);
if (rc)
- return rc;
+ goto err;
dev_printk(KERN_INFO, &pdev->dev,
"slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
@@ -4062,6 +4068,15 @@ static int mv_platform_probe(struct platform_device *pdev)
return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
IRQF_SHARED, &mv6_sht);
+err:
+#if defined(CONFIG_HAVE_CLK)
+ if (!IS_ERR(hpriv->clk)) {
+ clk_disable(hpriv->clk);
+ clk_put(hpriv->clk);
+ }
+#endif
+
+ return rc;
}
/*
@@ -4076,14 +4091,66 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
-
+#if defined(CONFIG_HAVE_CLK)
+ struct mv_host_priv *hpriv = host->private_data;
+#endif
ata_host_detach(host);
+
+#if defined(CONFIG_HAVE_CLK)
+ if (!IS_ERR(hpriv->clk)) {
+ clk_disable(hpriv->clk);
+ clk_put(hpriv->clk);
+ }
+#endif
return 0;
}
+#ifdef CONFIG_PM
+static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ if (host)
+ return ata_host_suspend(host, state);
+ else
+ return 0;
+}
+
+static int mv_platform_resume(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int ret;
+
+ if (host) {
+ struct mv_host_priv *hpriv = host->private_data;
+ const struct mv_sata_platform_data *mv_platform_data = \
+ pdev->dev.platform_data;
+ /*
+ * (Re-)program MBUS remapping windows if we are asked to.
+ */
+ if (mv_platform_data->dram != NULL)
+ mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+
+ /* initialize adapter */
+ ret = mv_init_host(host);
+ if (ret) {
+ printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ return ret;
+ }
+ ata_host_resume(host);
+ }
+
+ return 0;
+}
+#else
+#define mv_platform_suspend NULL
+#define mv_platform_resume NULL
+#endif
+
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
.remove = __devexit_p(mv_platform_remove),
+ .suspend = mv_platform_suspend,
+ .resume = mv_platform_resume,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -4094,6 +4161,9 @@ static struct platform_driver mv_platform_driver = {
#ifdef CONFIG_PCI
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
+#ifdef CONFIG_PM
+static int mv_pci_device_resume(struct pci_dev *pdev);
+#endif
static struct pci_driver mv_pci_driver = {
@@ -4101,6 +4171,11 @@ static struct pci_driver mv_pci_driver = {
.id_table = mv_pci_tbl,
.probe = mv_pci_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = mv_pci_device_resume,
+#endif
+
};
/* move to PCI layer or libata core? */
@@ -4194,7 +4269,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
- int n_ports, rc;
+ int n_ports, port, rc;
if (!printed_version++)
dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
@@ -4208,6 +4283,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
return -ENOMEM;
host->private_data = hpriv;
hpriv->n_ports = n_ports;
+ hpriv->board_idx = board_idx;
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -4230,8 +4306,17 @@ static int mv_pci_init_one(struct pci_dev *pdev,
if (rc)
return rc;
+ for (port = 0; port < host->n_ports; port++) {
+ struct ata_port *ap = host->ports[port];
+ void __iomem *port_mmio = mv_port_base(hpriv->base, port);
+ unsigned int offset = port_mmio - hpriv->base;
+
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+ }
+
/* initialize adapter */
- rc = mv_init_host(host, board_idx);
+ rc = mv_init_host(host);
if (rc)
return rc;
@@ -4247,6 +4332,27 @@ static int mv_pci_init_one(struct pci_dev *pdev,
return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
+
+#ifdef CONFIG_PM
+static int mv_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ /* initialize adapter */
+ rc = mv_init_host(host);
+ if (rc)
+ return rc;
+
+ ata_host_resume(host);
+
+ return 0;
+}
+#endif
#endif
static int mv_platform_probe(struct platform_device *pdev);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index e90665876c4..e8c6529dc36 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2505,7 +2505,7 @@ he_close(struct atm_vcc *vcc)
* TBRQ, the host issues the close command to the adapter.
*/
- while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
+ while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
(retry < MAX_RETRY)) {
msleep(sleep);
if (sleep < 250)
@@ -2514,7 +2514,7 @@ he_close(struct atm_vcc *vcc)
++retry;
}
- if (tx_inuse)
+ if (tx_inuse > 1)
hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
/* 2.3.1.1 generic close operations with flush */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index f734b345ac7..25a4c86f839 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -557,7 +557,7 @@ static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
} /* while */
// Move this VCI number into this location of the CBR Sched table.
- memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
+ memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
dev->CbrRemEntries--;
toBeAssigned--;
} /* while */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6bee6af8d8e..f1290cbd135 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -56,7 +56,14 @@ static inline int device_is_not_partition(struct device *dev)
*/
const char *dev_driver_string(const struct device *dev)
{
- return dev->driver ? dev->driver->name :
+ struct device_driver *drv;
+
+ /* dev->driver can change to NULL underneath us because of unbinding,
+ * so be careful about accessing it. dev->bus and dev->class should
+ * never change once they are set, so they don't need special care.
+ */
+ drv = ACCESS_ONCE(dev->driver);
+ return drv ? drv->name :
(dev->bus ? dev->bus->name :
(dev->class ? dev->class->name : ""));
}
@@ -987,6 +994,8 @@ done:
device_remove_class_symlinks(dev);
SymlinkError:
if (MAJOR(dev->devt))
+ devtmpfs_delete_node(dev);
+ if (MAJOR(dev->devt))
device_remove_sys_dev_entry(dev);
devtattrError:
if (MAJOR(dev->devt))
@@ -1728,8 +1737,5 @@ void device_shutdown(void)
dev->driver->shutdown(dev);
}
}
- kobject_put(sysfs_dev_char_kobj);
- kobject_put(sysfs_dev_block_kobj);
- kobject_put(dev_kobj);
async_synchronize_full();
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index e62a4ccea54..958bd1540c3 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -35,6 +35,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
struct cpu *cpu = container_of(dev, struct cpu, sysdev);
ssize_t ret;
+ cpu_hotplug_driver_lock();
switch (buf[0]) {
case '0':
ret = cpu_down(cpu->sysdev.id);
@@ -49,6 +50,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
default:
ret = -EINVAL;
}
+ cpu_hotplug_driver_unlock();
if (ret >= 0)
ret = count;
@@ -72,6 +74,38 @@ void unregister_cpu(struct cpu *cpu)
per_cpu(cpu_sys_devices, logical_cpu) = NULL;
return;
}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+static ssize_t cpu_probe_store(struct class *class, const char *buf,
+ size_t count)
+{
+ return arch_cpu_probe(buf, count);
+}
+
+static ssize_t cpu_release_store(struct class *class, const char *buf,
+ size_t count)
+{
+ return arch_cpu_release(buf, count);
+}
+
+static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+
+int __init cpu_probe_release_init(void)
+{
+ int rc;
+
+ rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+ &class_attr_probe.attr);
+ if (!rc)
+ rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+ &class_attr_release.attr);
+
+ return rc;
+}
+device_initcall(cpu_probe_release_init);
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
#else /* ... !CONFIG_HOTPLUG_CPU */
static inline void register_cpu_control(struct cpu *cpu)
{
@@ -97,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
* boot up and this data does not change there after. Hence this
* operation should be safe. No locking required.
*/
- addr = __pa(per_cpu_ptr(crash_notes, cpunum));
+ addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
rc = sprintf(buf, "%Lx\n", addr);
return rc;
}
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index a1cb5afe680..50375bb8e51 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -32,6 +32,8 @@ static int dev_mount = 1;
static int dev_mount;
#endif
+static rwlock_t dirlock;
+
static int __init mount_param(char *str)
{
dev_mount = simple_strtoul(str, NULL, 0);
@@ -74,47 +76,35 @@ static int dev_mkdir(const char *name, mode_t mode)
dentry = lookup_create(&nd, 1);
if (!IS_ERR(dentry)) {
err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
+ if (!err)
+ /* mark as kernel-created inode */
+ dentry->d_inode->i_private = &dev_mnt;
dput(dentry);
} else {
err = PTR_ERR(dentry);
}
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
return err;
}
static int create_path(const char *nodepath)
{
- char *path;
- struct nameidata nd;
- int err = 0;
-
- path = kstrdup(nodepath, GFP_KERNEL);
- if (!path)
- return -ENOMEM;
-
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- path, LOOKUP_PARENT, &nd);
- if (err == 0) {
- struct dentry *dentry;
-
- /* create directory right away */
- dentry = lookup_create(&nd, 1);
- if (!IS_ERR(dentry)) {
- err = vfs_mkdir(nd.path.dentry->d_inode,
- dentry, 0755);
- dput(dentry);
- }
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ int err;
- path_put(&nd.path);
- } else if (err == -ENOENT) {
+ read_lock(&dirlock);
+ err = dev_mkdir(nodepath, 0755);
+ if (err == -ENOENT) {
+ char *path;
char *s;
/* parent directories do not exist, create them */
+ path = kstrdup(nodepath, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
s = path;
- while (1) {
+ for (;;) {
s = strchr(s, '/');
if (!s)
break;
@@ -125,9 +115,9 @@ static int create_path(const char *nodepath)
s[0] = '/';
s++;
}
+ kfree(path);
}
-
- kfree(path);
+ read_unlock(&dirlock);
return err;
}
@@ -156,34 +146,40 @@ int devtmpfs_create_node(struct device *dev)
mode |= S_IFCHR;
curr_cred = override_creds(&init_cred);
+
err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
nodename, LOOKUP_PARENT, &nd);
if (err == -ENOENT) {
- /* create missing parent directories */
create_path(nodename);
err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
nodename, LOOKUP_PARENT, &nd);
- if (err)
- goto out;
}
+ if (err)
+ goto out;
dentry = lookup_create(&nd, 0);
if (!IS_ERR(dentry)) {
- int umask;
-
- umask = sys_umask(0000);
err = vfs_mknod(nd.path.dentry->d_inode,
dentry, mode, dev->devt);
- sys_umask(umask);
- /* mark as kernel created inode */
- if (!err)
+ if (!err) {
+ struct iattr newattrs;
+
+ /* fixup possibly umasked mode */
+ newattrs.ia_mode = mode;
+ newattrs.ia_valid = ATTR_MODE;
+ mutex_lock(&dentry->d_inode->i_mutex);
+ notify_change(dentry, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+
+ /* mark as kernel-created inode */
dentry->d_inode->i_private = &dev_mnt;
+ }
dput(dentry);
} else {
err = PTR_ERR(dentry);
}
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
out:
kfree(tmp);
@@ -205,16 +201,21 @@ static int dev_rmdir(const char *name)
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
if (!IS_ERR(dentry)) {
- if (dentry->d_inode)
- err = vfs_rmdir(nd.path.dentry->d_inode, dentry);
- else
+ if (dentry->d_inode) {
+ if (dentry->d_inode->i_private == &dev_mnt)
+ err = vfs_rmdir(nd.path.dentry->d_inode,
+ dentry);
+ else
+ err = -EPERM;
+ } else {
err = -ENOENT;
+ }
dput(dentry);
} else {
err = PTR_ERR(dentry);
}
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
return err;
}
@@ -228,7 +229,8 @@ static int delete_path(const char *nodepath)
if (!path)
return -ENOMEM;
- while (1) {
+ write_lock(&dirlock);
+ for (;;) {
char *base;
base = strrchr(path, '/');
@@ -239,6 +241,7 @@ static int delete_path(const char *nodepath)
if (err)
break;
}
+ write_unlock(&dirlock);
kfree(path);
return err;
@@ -322,9 +325,8 @@ out:
* If configured, or requested by the commandline, devtmpfs will be
* auto-mounted after the kernel mounted the root filesystem.
*/
-int devtmpfs_mount(const char *mountpoint)
+int devtmpfs_mount(const char *mntdir)
{
- struct path path;
int err;
if (!dev_mount)
@@ -333,15 +335,11 @@ int devtmpfs_mount(const char *mountpoint)
if (!dev_mnt)
return 0;
- err = kern_path(mountpoint, LOOKUP_FOLLOW, &path);
- if (err)
- return err;
- err = do_add_mount(dev_mnt, &path, 0, NULL);
+ err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
printk(KERN_INFO "devtmpfs: mounted\n");
- path_put(&path);
return err;
}
@@ -354,6 +352,8 @@ int __init devtmpfs_init(void)
int err;
struct vfsmount *mnt;
+ rwlock_init(&dirlock);
+
err = register_filesystem(&dev_fs_type);
if (err) {
printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
@@ -361,7 +361,7 @@ int __init devtmpfs_init(void)
return err;
}
- mnt = kern_mount(&dev_fs_type);
+ mnt = kern_mount_data(&dev_fs_type, "mode=0755");
if (IS_ERR(mnt)) {
err = PTR_ERR(mnt);
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 7376367bcb8..a95024166b6 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -601,12 +601,9 @@ request_firmware_work_func(void *arg)
}
ret = _request_firmware(&fw, fw_work->name, fw_work->device,
fw_work->uevent);
- if (ret < 0)
- fw_work->cont(NULL, fw_work->context);
- else {
- fw_work->cont(fw, fw_work->context);
- release_firmware(fw);
- }
+
+ fw_work->cont(fw, fw_work->context);
+
module_put(fw_work->module);
kfree(fw_work);
return ret;
@@ -619,6 +616,7 @@ request_firmware_work_func(void *arg)
* is non-zero else the firmware copy must be done manually.
* @name: name of firmware file
* @device: device for which firmware is being loaded
+ * @gfp: allocation flags
* @context: will be passed over to @cont, and
* @fw may be %NULL if firmware request fails.
* @cont: function will be called asynchronously when the firmware
@@ -631,12 +629,12 @@ request_firmware_work_func(void *arg)
int
request_firmware_nowait(
struct module *module, int uevent,
- const char *name, struct device *device, void *context,
+ const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{
struct task_struct *task;
struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work),
- GFP_ATOMIC);
+ gfp);
if (!fw_work)
return -ENOMEM;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 989429cfed8..d7d77d4a402 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -63,6 +63,20 @@ void unregister_memory_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_memory_notifier);
+static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
+
+int register_memory_isolate_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&memory_isolate_chain, nb);
+}
+EXPORT_SYMBOL(register_memory_isolate_notifier);
+
+void unregister_memory_isolate_notifier(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
+}
+EXPORT_SYMBOL(unregister_memory_isolate_notifier);
+
/*
* register_memory - Setup a sysfs device for a memory block
*/
@@ -157,6 +171,11 @@ int memory_notify(unsigned long val, void *v)
return blocking_notifier_call_chain(&memory_chain, val, v);
}
+int memory_isolate_notify(unsigned long val, void *v)
+{
+ return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
+}
+
/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
@@ -341,6 +360,64 @@ static inline int memory_probe_init(void)
}
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Support for offlining pages of memory
+ */
+
+/* Soft offline a page */
+static ssize_t
+store_soft_offline_page(struct class *class, const char *buf, size_t count)
+{
+ int ret;
+ u64 pfn;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (strict_strtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+ ret = soft_offline_page(pfn_to_page(pfn), 0);
+ return ret == 0 ? count : ret;
+}
+
+/* Forcibly offline a page, including killing processes. */
+static ssize_t
+store_hard_offline_page(struct class *class, const char *buf, size_t count)
+{
+ int ret;
+ u64 pfn;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (strict_strtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ ret = __memory_failure(pfn, 0, 0);
+ return ret ? ret : count;
+}
+
+static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+
+static __init int memory_fail_init(void)
+{
+ int err;
+
+ err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
+ &class_attr_soft_offline_page.attr);
+ if (!err)
+ err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
+ &class_attr_hard_offline_page.attr);
+ return err;
+}
+#else
+static inline int memory_fail_init(void)
+{
+ return 0;
+}
+#endif
+
/*
* Note that phys_device is optional. It is here to allow for
* differentiation between which *physical* devices each
@@ -473,6 +550,9 @@ int __init memory_dev_init(void)
err = memory_probe_init();
if (!ret)
ret = err;
+ err = memory_fail_init();
+ if (!ret)
+ ret = err;
err = block_size_init();
if (!ret)
ret = err;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1fe5536d404..70122791683 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -173,6 +173,47 @@ static ssize_t node_read_distance(struct sys_device * dev,
}
static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+#ifdef CONFIG_HUGETLBFS
+/*
+ * hugetlbfs per node attributes registration interface:
+ * When/if hugetlb[fs] subsystem initializes [sometime after this module],
+ * it will register its per node attributes for all online nodes with
+ * memory. It will also call register_hugetlbfs_with_node(), below, to
+ * register its attribute registration functions with this node driver.
+ * Once these hooks have been initialized, the node driver will call into
+ * the hugetlb module to [un]register attributes for hot-plugged nodes.
+ */
+static node_registration_func_t __hugetlb_register_node;
+static node_registration_func_t __hugetlb_unregister_node;
+
+static inline bool hugetlb_register_node(struct node *node)
+{
+ if (__hugetlb_register_node &&
+ node_state(node->sysdev.id, N_HIGH_MEMORY)) {
+ __hugetlb_register_node(node);
+ return true;
+ }
+ return false;
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+ if (__hugetlb_unregister_node)
+ __hugetlb_unregister_node(node);
+}
+
+void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister)
+{
+ __hugetlb_register_node = doregister;
+ __hugetlb_unregister_node = unregister;
+}
+#else
+static inline void hugetlb_register_node(struct node *node) {}
+
+static inline void hugetlb_unregister_node(struct node *node) {}
+#endif
+
/*
* register_node - Setup a sysfs device for a node.
@@ -196,6 +237,8 @@ int register_node(struct node *node, int num, struct node *parent)
sysdev_create_file(&node->sysdev, &attr_distance);
scan_unevictable_register_node(node);
+
+ hugetlb_register_node(node);
}
return error;
}
@@ -216,6 +259,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_distance);
scan_unevictable_unregister_node(node);
+ hugetlb_unregister_node(node); /* no-op, if memoryless node */
sysdev_unregister(&node->sysdev);
}
@@ -227,26 +271,43 @@ struct node node_devices[MAX_NUMNODES];
*/
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
- if (node_online(nid)) {
- struct sys_device *obj = get_cpu_sysdev(cpu);
- if (!obj)
- return 0;
- return sysfs_create_link(&node_devices[nid].sysdev.kobj,
- &obj->kobj,
- kobject_name(&obj->kobj));
- }
+ int ret;
+ struct sys_device *obj;
- return 0;
+ if (!node_online(nid))
+ return 0;
+
+ obj = get_cpu_sysdev(cpu);
+ if (!obj)
+ return 0;
+
+ ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
+ &obj->kobj,
+ kobject_name(&obj->kobj));
+ if (ret)
+ return ret;
+
+ return sysfs_create_link(&obj->kobj,
+ &node_devices[nid].sysdev.kobj,
+ kobject_name(&node_devices[nid].sysdev.kobj));
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
- if (node_online(nid)) {
- struct sys_device *obj = get_cpu_sysdev(cpu);
- if (obj)
- sysfs_remove_link(&node_devices[nid].sysdev.kobj,
- kobject_name(&obj->kobj));
- }
+ struct sys_device *obj;
+
+ if (!node_online(nid))
+ return 0;
+
+ obj = get_cpu_sysdev(cpu);
+ if (!obj)
+ return 0;
+
+ sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+ kobject_name(&obj->kobj));
+ sysfs_remove_link(&obj->kobj,
+ kobject_name(&node_devices[nid].sysdev.kobj));
+
return 0;
}
@@ -268,6 +329,7 @@ static int get_nid_for_pfn(unsigned long pfn)
/* register memory section under specified node if it spans that node */
int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
{
+ int ret;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
if (!mem_blk)
@@ -284,9 +346,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
if (page_nid != nid)
continue;
- return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
+ ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
&mem_blk->sysdev.kobj,
kobject_name(&mem_blk->sysdev.kobj));
+ if (ret)
+ return ret;
+
+ return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
+ &node_devices[nid].sysdev.kobj,
+ kobject_name(&node_devices[nid].sysdev.kobj));
}
/* mem section does not span the specified node */
return 0;
@@ -295,12 +363,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
/* unregister memory section under all nodes that it spans */
int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
{
- nodemask_t unlinked_nodes;
+ NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
unsigned long pfn, sect_start_pfn, sect_end_pfn;
- if (!mem_blk)
+ if (!mem_blk) {
+ NODEMASK_FREE(unlinked_nodes);
return -EFAULT;
- nodes_clear(unlinked_nodes);
+ }
+ if (!unlinked_nodes)
+ return -ENOMEM;
+ nodes_clear(*unlinked_nodes);
sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index);
sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
@@ -311,11 +383,14 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
continue;
if (!node_online(nid))
continue;
- if (node_test_and_set(nid, unlinked_nodes))
+ if (node_test_and_set(nid, *unlinked_nodes))
continue;
sysfs_remove_link(&node_devices[nid].sysdev.kobj,
kobject_name(&mem_blk->sysdev.kobj));
+ sysfs_remove_link(&mem_blk->sysdev.kobj,
+ kobject_name(&node_devices[nid].sysdev.kobj));
}
+ NODEMASK_FREE(unlinked_nodes);
return 0;
}
@@ -345,9 +420,77 @@ static int link_mem_sections(int nid)
}
return err;
}
-#else
+
+#ifdef CONFIG_HUGETLBFS
+/*
+ * Handle per node hstate attribute [un]registration on transistions
+ * to/from memoryless state.
+ */
+static void node_hugetlb_work(struct work_struct *work)
+{
+ struct node *node = container_of(work, struct node, node_work);
+
+ /*
+ * We only get here when a node transitions to/from memoryless state.
+ * We can detect which transition occurred by examining whether the
+ * node has memory now. hugetlb_register_node() already check this
+ * so we try to register the attributes. If that fails, then the
+ * node has transitioned to memoryless, try to unregister the
+ * attributes.
+ */
+ if (!hugetlb_register_node(node))
+ hugetlb_unregister_node(node);
+}
+
+static void init_node_hugetlb_work(int nid)
+{
+ INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
+}
+
+static int node_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mnb = arg;
+ int nid = mnb->status_change_nid;
+
+ switch (action) {
+ case MEM_ONLINE:
+ case MEM_OFFLINE:
+ /*
+ * offload per node hstate [un]registration to a work thread
+ * when transitioning to/from memoryless state.
+ */
+ if (nid != NUMA_NO_NODE)
+ schedule_work(&node_devices[nid].node_work);
+ break;
+
+ case MEM_GOING_ONLINE:
+ case MEM_GOING_OFFLINE:
+ case MEM_CANCEL_ONLINE:
+ case MEM_CANCEL_OFFLINE:
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_HUGETLBFS */
+#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
+
static int link_mem_sections(int nid) { return 0; }
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+
+#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
+ !defined(CONFIG_HUGETLBFS)
+static inline int node_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ return NOTIFY_OK;
+}
+
+static void init_node_hugetlb_work(int nid) { }
+
+#endif
int register_one_node(int nid)
{
@@ -371,6 +514,9 @@ int register_one_node(int nid)
/* link memory sections under this node */
error = link_mem_sections(nid);
+
+ /* initialize work queue for memory hot plug */
+ init_node_hugetlb_work(nid);
}
return error;
@@ -460,13 +606,17 @@ static int node_states_init(void)
return err;
}
+#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
int ret;
ret = sysdev_class_register(&node_class);
- if (!ret)
+ if (!ret) {
ret = node_states_init();
+ hotplug_memory_notifier(node_memory_callback,
+ NODE_CALLBACK_PRI);
+ }
/*
* Note: we're not going to unregister the node class if we fail
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4fa954b07ac..9d2ee25deaf 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1000,7 +1000,7 @@ static __initdata LIST_HEAD(early_platform_device_list);
int __init early_platform_driver_register(struct early_platform_driver *epdrv,
char *buf)
{
- unsigned long index;
+ char *tmp;
int n;
/* Simply add the driver to the end of the global list.
@@ -1019,13 +1019,28 @@ int __init early_platform_driver_register(struct early_platform_driver *epdrv,
if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
list_move(&epdrv->list, &early_platform_driver_list);
- if (!strcmp(buf, epdrv->pdrv->driver.name))
+ /* Allow passing parameters after device name */
+ if (buf[n] == '\0' || buf[n] == ',')
epdrv->requested_id = -1;
- else if (buf[n] == '.' && strict_strtoul(&buf[n + 1], 10,
- &index) == 0)
- epdrv->requested_id = index;
- else
- epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
+ else {
+ epdrv->requested_id = simple_strtoul(&buf[n + 1],
+ &tmp, 10);
+
+ if (buf[n] != '.' || (tmp == &buf[n + 1])) {
+ epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
+ n = 0;
+ } else
+ n += strcspn(&buf[n + 1], ",") + 1;
+ }
+
+ if (buf[n] == ',')
+ n++;
+
+ if (epdrv->bufsize) {
+ memcpy(epdrv->buffer, &buf[n],
+ min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
+ epdrv->buffer[epdrv->bufsize - 1] = '\0';
+ }
}
return 0;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 8aa2443182d..48adf80926a 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -23,8 +23,8 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/resume-trace.h>
-#include <linux/rwsem.h>
#include <linux/interrupt.h>
+#include <linux/sched.h>
#include "../base.h"
#include "power.h"
@@ -161,6 +161,32 @@ void device_pm_move_last(struct device *dev)
list_move_tail(&dev->power.entry, &dpm_list);
}
+static ktime_t initcall_debug_start(struct device *dev)
+{
+ ktime_t calltime = ktime_set(0, 0);
+
+ if (initcall_debug) {
+ pr_info("calling %s+ @ %i\n",
+ dev_name(dev), task_pid_nr(current));
+ calltime = ktime_get();
+ }
+
+ return calltime;
+}
+
+static void initcall_debug_report(struct device *dev, ktime_t calltime,
+ int error)
+{
+ ktime_t delta, rettime;
+
+ if (initcall_debug) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
+ error, (unsigned long long)ktime_to_ns(delta) >> 10);
+ }
+}
+
/**
* pm_op - Execute the PM operation appropriate for given PM event.
* @dev: Device to handle.
@@ -172,6 +198,9 @@ static int pm_op(struct device *dev,
pm_message_t state)
{
int error = 0;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev);
switch (state.event) {
#ifdef CONFIG_SUSPEND
@@ -219,6 +248,9 @@ static int pm_op(struct device *dev,
default:
error = -EINVAL;
}
+
+ initcall_debug_report(dev, calltime, error);
+
return error;
}
@@ -236,6 +268,13 @@ static int pm_noirq_op(struct device *dev,
pm_message_t state)
{
int error = 0;
+ ktime_t calltime, delta, rettime;
+
+ if (initcall_debug) {
+ pr_info("calling %s_i+ @ %i\n",
+ dev_name(dev), task_pid_nr(current));
+ calltime = ktime_get();
+ }
switch (state.event) {
#ifdef CONFIG_SUSPEND
@@ -283,6 +322,15 @@ static int pm_noirq_op(struct device *dev,
default:
error = -EINVAL;
}
+
+ if (initcall_debug) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ printk("initcall %s_i+ returned %d after %Ld usecs\n",
+ dev_name(dev), error,
+ (unsigned long long)ktime_to_ns(delta) >> 10);
+ }
+
return error;
}
@@ -324,6 +372,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
kobject_name(&dev->kobj), pm_verb(state.event), info, error);
}
+static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
+{
+ ktime_t calltime;
+ s64 usecs64;
+ int usecs;
+
+ calltime = ktime_get();
+ usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
+ do_div(usecs64, NSEC_PER_USEC);
+ usecs = usecs64;
+ if (usecs == 0)
+ usecs = 1;
+ pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
+ info ?: "", info ? " " : "", pm_verb(state.event),
+ usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+}
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -341,14 +406,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (!dev->bus)
- goto End;
-
- if (dev->bus->pm) {
+ if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "EARLY ");
error = pm_noirq_op(dev, dev->bus->pm, state);
}
- End:
+
TRACE_RESUME(error);
return error;
}
@@ -363,6 +425,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
void dpm_resume_noirq(pm_message_t state)
{
struct device *dev;
+ ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
transition_started = false;
@@ -376,11 +439,32 @@ void dpm_resume_noirq(pm_message_t state)
pm_dev_err(dev, state, " early", error);
}
mutex_unlock(&dpm_list_mtx);
+ dpm_show_time(starttime, state, "early");
resume_device_irqs();
}
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
+ * legacy_resume - Execute a legacy (bus or class) resume callback for device.
+ * dev: Device to resume.
+ * cb: Resume callback to execute.
+ */
+static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
+{
+ int error;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev);
+
+ error = cb(dev);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
+/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -400,7 +484,7 @@ static int device_resume(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->bus->pm, state);
} else if (dev->bus->resume) {
pm_dev_dbg(dev, state, "legacy ");
- error = dev->bus->resume(dev);
+ error = legacy_resume(dev, dev->bus->resume);
}
if (error)
goto End;
@@ -421,7 +505,7 @@ static int device_resume(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->class->pm, state);
} else if (dev->class->resume) {
pm_dev_dbg(dev, state, "legacy class ");
- error = dev->class->resume(dev);
+ error = legacy_resume(dev, dev->class->resume);
}
}
End:
@@ -441,6 +525,7 @@ static int device_resume(struct device *dev, pm_message_t state)
static void dpm_resume(pm_message_t state)
{
struct list_head list;
+ ktime_t starttime = ktime_get();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
@@ -469,6 +554,7 @@ static void dpm_resume(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+ dpm_show_time(starttime, state, NULL);
}
/**
@@ -521,7 +607,7 @@ static void dpm_complete(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
device_complete(dev, state);
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
mutex_lock(&dpm_list_mtx);
}
@@ -584,10 +670,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
- if (!dev->bus)
- return 0;
-
- if (dev->bus->pm) {
+ if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
}
@@ -604,6 +687,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
int dpm_suspend_noirq(pm_message_t state)
{
struct device *dev;
+ ktime_t starttime = ktime_get();
int error = 0;
suspend_device_irqs();
@@ -619,11 +703,34 @@ int dpm_suspend_noirq(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
if (error)
dpm_resume_noirq(resume_event(state));
+ else
+ dpm_show_time(starttime, state, "late");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
/**
+ * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
+ * dev: Device to suspend.
+ * cb: Suspend callback to execute.
+ */
+static int legacy_suspend(struct device *dev, pm_message_t state,
+ int (*cb)(struct device *dev, pm_message_t state))
+{
+ int error;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev);
+
+ error = cb(dev, state);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
+/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -640,8 +747,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->class->pm, state);
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
- error = dev->class->suspend(dev, state);
- suspend_report_result(dev->class->suspend, error);
+ error = legacy_suspend(dev, state, dev->class->suspend);
}
if (error)
goto End;
@@ -662,8 +768,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
error = pm_op(dev, dev->bus->pm, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy ");
- error = dev->bus->suspend(dev, state);
- suspend_report_result(dev->bus->suspend, error);
+ error = legacy_suspend(dev, state, dev->bus->suspend);
}
}
End:
@@ -679,6 +784,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
static int dpm_suspend(pm_message_t state)
{
struct list_head list;
+ ktime_t starttime = ktime_get();
int error = 0;
INIT_LIST_HEAD(&list);
@@ -704,6 +810,8 @@ static int dpm_suspend(pm_message_t state)
}
list_splice(&list, dpm_list.prev);
mutex_unlock(&dpm_list_mtx);
+ if (!error)
+ dpm_show_time(starttime, state, NULL);
return error;
}
@@ -772,7 +880,7 @@ static int dpm_prepare(pm_message_t state)
pm_runtime_get_noresume(dev);
if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
/* Wake-up requested during system sleep transition. */
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
error = -EBUSY;
} else {
error = device_prepare(dev, state);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 5a01ecef4af..f8b044e8aef 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev)
dev->bus->pm->runtime_idle(dev);
spin_lock_irq(&dev->power.lock);
+ } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
+ spin_unlock_irq(&dev->power.lock);
+
+ dev->type->pm->runtime_idle(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ } else if (dev->class && dev->class->pm
+ && dev->class->pm->runtime_idle) {
+ spin_unlock_irq(&dev->power.lock);
+
+ dev->class->pm->runtime_idle(dev);
+
+ spin_lock_irq(&dev->power.lock);
}
dev->power.idle_notification = false;
@@ -194,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
spin_lock_irq(&dev->power.lock);
dev->power.runtime_error = retval;
+ } else if (dev->type && dev->type->pm
+ && dev->type->pm->runtime_suspend) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->type->pm->runtime_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
+ } else if (dev->class && dev->class->pm
+ && dev->class->pm->runtime_suspend) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->class->pm->runtime_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
} else {
retval = -ENOSYS;
}
@@ -359,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
spin_lock_irq(&dev->power.lock);
dev->power.runtime_error = retval;
+ } else if (dev->type && dev->type->pm
+ && dev->type->pm->runtime_resume) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->type->pm->runtime_resume(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
+ } else if (dev->class && dev->class->pm
+ && dev->class->pm->runtime_resume) {
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = dev->class->pm->runtime_resume(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.runtime_error = retval;
} else {
retval = -ENOSYS;
}
@@ -701,15 +746,15 @@ EXPORT_SYMBOL_GPL(pm_request_resume);
* @dev: Device to handle.
* @sync: If set and the device is suspended, resume it synchronously.
*
- * Increment the usage count of the device and if it was zero previously,
- * resume it or submit a resume request for it, depending on the value of @sync.
+ * Increment the usage count of the device and resume it or submit a resume
+ * request for it, depending on the value of @sync.
*/
int __pm_runtime_get(struct device *dev, bool sync)
{
- int retval = 1;
+ int retval;
- if (atomic_add_return(1, &dev->power.usage_count) == 1)
- retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
+ atomic_inc(&dev->power.usage_count);
+ retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
return retval;
}
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 436a090b532..4e0726aa53b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1271,8 +1271,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (crypto_tfm_alg_type(crypto_hash_tfm(tfm))
- != CRYPTO_ALG_TYPE_HASH) {
+ if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) {
retcode = ERR_AUTH_ALG_ND;
goto fail;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 5c01f747571..3266b4f65da 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3497,6 +3497,9 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
+ if (WARN_ON(size < 0 || size > sizeof(inparam)))
+ return -EINVAL;
+
/* copyin */
CLEARSTRUCT(&inparam);
if (_IOC_DIR(cmd) & _IOC_WRITE)
@@ -4162,7 +4165,7 @@ static int floppy_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops floppy_pm_ops = {
+static const struct dev_pm_ops floppy_pm_ops = {
.resume = floppy_resume,
.restore = floppy_resume,
};
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 6380ad8d91b..59ca2b77b57 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -200,7 +200,7 @@ struct floppy_state {
int ejected;
wait_queue_head_t wait;
int wanted;
- struct device_node* media_bay; /* NULL when not in bay */
+ struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
};
@@ -303,14 +303,13 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
static void do_fd_request(struct request_queue * q)
{
int i;
- for(i=0;i<floppy_count;i++)
- {
-#ifdef CONFIG_PMAC_MEDIABAY
- if (floppy_states[i].media_bay &&
- check_media_bay(floppy_states[i].media_bay, MB_FD))
+
+ for(i=0; i<floppy_count; i++) {
+ struct floppy_state *fs = &floppy_states[i];
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD)
continue;
-#endif /* CONFIG_PMAC_MEDIABAY */
- start_request(&floppy_states[i]);
+ start_request(fs);
}
}
@@ -849,10 +848,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
return -EPERM;
-#ifdef CONFIG_PMAC_MEDIABAY
- if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
-#endif
switch (cmd) {
case FDEJECT:
@@ -876,10 +874,9 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
int n, err = 0;
if (fs->ref_count == 0) {
-#ifdef CONFIG_PMAC_MEDIABAY
- if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
-#endif
out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
out_8(&sw->control_bic, 0xff);
out_8(&sw->mode, 0x95);
@@ -963,10 +960,9 @@ static int floppy_revalidate(struct gendisk *disk)
struct swim3 __iomem *sw;
int ret, n;
-#ifdef CONFIG_PMAC_MEDIABAY
- if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD)
return -ENXIO;
-#endif
sw = fs->swim3;
grab_drive(fs, revalidating, 0);
@@ -1009,7 +1005,6 @@ static const struct block_device_operations floppy_fops = {
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.node;
- struct device_node *mediabay;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
@@ -1036,9 +1031,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
}
dev_set_drvdata(&mdev->ofdev.dev, fs);
- mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ?
- swim->parent : NULL;
- if (mediabay == NULL)
+ if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
memset(fs, 0, sizeof(*fs));
@@ -1068,7 +1061,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
- fs->media_bay = mediabay;
+ fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
@@ -1093,7 +1086,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
init_timer(&fs->timeout);
printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
- mediabay ? "in media bay" : "");
+ mdev->media_bay ? "in media bay" : "");
return 0;
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 0877d3628fd..d1fd032e751 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -169,13 +169,6 @@ static int __init xd_init(void)
init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
- if (!xd_dma_buffer)
- xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
- if (!xd_dma_buffer) {
- printk(KERN_ERR "xd: Out of memory.\n");
- return -ENOMEM;
- }
-
err = -EBUSY;
if (register_blkdev(XT_DISK_MAJOR, "xd"))
goto out1;
@@ -202,6 +195,19 @@ static int __init xd_init(void)
xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
}
+ /*
+ * With the drive detected, xd_maxsectors should now be known.
+ * If xd_maxsectors is 0, nothing was detected and we fall through
+ * to return -ENODEV
+ */
+ if (!xd_dma_buffer && xd_maxsectors) {
+ xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
+ if (!xd_dma_buffer) {
+ printk(KERN_ERR "xd: Out of memory.\n");
+ goto out3;
+ }
+ }
+
err = -ENODEV;
if (!xd_drives)
goto out3;
@@ -249,15 +255,17 @@ out4:
for (i = 0; i < xd_drives; i++)
put_disk(xd_gendisk[i]);
out3:
- release_region(xd_iobase,4);
+ if (xd_maxsectors)
+ release_region(xd_iobase,4);
+
+ if (xd_dma_buffer)
+ xd_dma_mem_free((unsigned long)xd_dma_buffer,
+ xd_maxsectors * 0x200);
out2:
blk_cleanup_queue(xd_queue);
out1a:
unregister_blkdev(XT_DISK_MAJOR, "xd");
out1:
- if (xd_dma_buffer)
- xd_dma_mem_free((unsigned long)xd_dma_buffer,
- xd_maxsectors * 0x200);
return err;
Enomem:
err = -ENOMEM;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b8578bb3f4c..05a31e55d27 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -42,6 +42,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/grant_table.h>
#include <xen/events.h>
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 44bc8bbabf5..a699f09ddf7 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -307,6 +307,7 @@ static void btusb_bulk_complete(struct urb *urb)
return;
usb_anchor_urb(urb, &data->bulk_anchor);
+ usb_mark_last_busy(data->udev);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
@@ -1066,7 +1067,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
spin_lock_irq(&data->txlock);
- if (!(interface_to_usbdev(intf)->auto_pm && data->tx_in_flight)) {
+ if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) {
set_bit(BTUSB_SUSPENDING, &data->flags);
spin_unlock_irq(&data->txlock);
} else {
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 6aad99ec4e0..31be3ac2e21 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -201,19 +201,6 @@ config DIGIEPCA
To compile this driver as a module, choose M here: the
module will be called epca.
-config ESPSERIAL
- tristate "Hayes ESP serial port support"
- depends on SERIAL_NONSTANDARD && ISA && ISA_DMA_API && BROKEN
- help
- This is a driver which supports Hayes ESP serial ports. Both single
- port cards and multiport cards are supported. Make sure to read
- <file:Documentation/hayes-esp.txt>.
-
- To compile this driver as a module, choose M here: the
- module will be called esp.
-
- If unsure, say N.
-
config MOXA_INTELLIO
tristate "Moxa Intellio support"
depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
@@ -515,7 +502,7 @@ config BRIQ_PANEL
config BFIN_OTP
tristate "Blackfin On-Chip OTP Memory Support"
- depends on BLACKFIN && (BF52x || BF54x)
+ depends on BLACKFIN && (BF51x || BF52x || BF54x)
default y
help
If you say Y here, you will get support for a character device
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 19a79dd79ee..f957edf7e45 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
obj-$(CONFIG_AUDIT) += tty_audit.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
-obj-$(CONFIG_ESPSERIAL) += esp.o
obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3cb56a049e2..30c36ac2cd0 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -36,10 +36,10 @@
#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
-#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
-#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
-#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
-#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
@@ -50,20 +50,20 @@
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
-#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
-#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
-#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
-#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB 0x006a
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -83,22 +83,22 @@
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB)
extern int agp_memory_reserved;
@@ -178,6 +178,7 @@ static struct _intel_private {
* popup and for the GTT.
*/
int gtt_entries; /* i830+ */
+ int gtt_total_size;
union {
void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
@@ -653,7 +654,7 @@ static void intel_i830_init_gtt_entries(void)
size = 512;
}
size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_IGD) {
+ } else if (IS_G33 && !IS_PINEVIEW) {
/* G33's GTT size defined in gmch_ctrl */
switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
case G33_PGETBL_SIZE_1M:
@@ -669,7 +670,7 @@ static void intel_i830_init_gtt_entries(void)
size = 512;
}
size += 4;
- } else if (IS_G4X || IS_IGD) {
+ } else if (IS_G4X || IS_PINEVIEW) {
/* On 4 series hardware, GTT stolen is separate from graphics
* stolen, ignore it in stolen gtt entries counting. However,
* 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -1153,7 +1154,7 @@ static int intel_i915_configure(void)
readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+ for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
writel(agp_bridge->scratch_page, intel_private.gtt+i);
}
readl(intel_private.gtt+i-1); /* PCI Posting. */
@@ -1308,6 +1309,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
if (!intel_private.gtt)
return -ENOMEM;
+ intel_private.gtt_total_size = gtt_map_size / 4;
+
temp &= 0xfff80000;
intel_private.registers = ioremap(temp, 128 * 4096);
@@ -1352,15 +1355,15 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
{
switch (agp_bridge->dev->device) {
case PCI_DEVICE_ID_INTEL_GM45_HB:
- case PCI_DEVICE_ID_INTEL_IGD_E_HB:
+ case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
case PCI_DEVICE_ID_INTEL_Q45_HB:
case PCI_DEVICE_ID_INTEL_G45_HB:
case PCI_DEVICE_ID_INTEL_G41_HB:
case PCI_DEVICE_ID_INTEL_B43_HB:
- case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
- case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
- case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
- case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+ case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
@@ -1395,6 +1398,8 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
if (!intel_private.gtt)
return -ENOMEM;
+ intel_private.gtt_total_size = gtt_size / 4;
+
intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
iounmap(intel_private.gtt);
@@ -2340,14 +2345,14 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview",
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
- "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
- "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
+ "GM45", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+ "Eaglelake", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
"Q45/Q43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
@@ -2356,14 +2361,14 @@ static const struct intel_driver_description {
"B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
"G41", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
- "IGDNG/D", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
- "IGDNG/M", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
- "IGDNG/MA", NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
- "IGDNG/MC2", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+ "Ironlake/D", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ "Ironlake/M", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ "Ironlake/MA", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+ "Ironlake/MC2", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -2545,8 +2550,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
+ ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
ID(PCI_DEVICE_ID_INTEL_82G35_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
@@ -2557,15 +2562,15 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
ID(PCI_DEVICE_ID_INTEL_GM45_HB),
- ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
+ ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_B43_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
- ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
{ }
};
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 703959eba45..d89da4ac061 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -144,16 +144,13 @@ static int uninorth_configure(void)
return 0;
}
-static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
- int type)
+static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
- int i, j, num_entries;
+ int i, num_entries;
void *temp;
+ u32 *gp;
int mask_type;
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_32(temp)->num_entries;
-
if (type != mem->type)
return -EINVAL;
@@ -163,49 +160,12 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
return -EINVAL;
}
- if ((pg_start + mem->page_count) > num_entries)
- return -EINVAL;
-
- j = pg_start;
-
- while (j < (pg_start + mem->page_count)) {
- if (agp_bridge->gatt_table[j])
- return -EBUSY;
- j++;
- }
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- agp_bridge->gatt_table[j] =
- cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL);
- flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
- (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
- }
- (void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
- mb();
-
- uninorth_tlbflush(mem);
- return 0;
-}
-
-static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
-{
- int i, num_entries;
- void *temp;
- u32 *gp;
- int mask_type;
+ if (mem->page_count == 0)
+ return 0;
temp = agp_bridge->current_size;
num_entries = A_SIZE_32(temp)->num_entries;
- if (type != mem->type)
- return -EINVAL;
-
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
- if (mask_type != 0) {
- /* We know nothing of memory types */
- return -EINVAL;
- }
-
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
@@ -213,14 +173,18 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
for (i = 0; i < mem->page_count; ++i) {
if (gp[i]) {
dev_info(&agp_bridge->dev->dev,
- "u3_insert_memory: entry 0x%x occupied (%x)\n",
+ "uninorth_insert_memory: entry 0x%x occupied (%x)\n",
i, gp[i]);
return -EBUSY;
}
}
for (i = 0; i < mem->page_count; i++) {
- gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
+ if (is_u3)
+ gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
+ else
+ gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) |
+ 0x1UL);
flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
(unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
}
@@ -230,14 +194,23 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
return 0;
}
-int u3_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
size_t i;
u32 *gp;
+ int mask_type;
+
+ if (type != mem->type)
+ return -EINVAL;
- if (type != 0 || mem->type != 0)
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+ if (mask_type != 0) {
/* We know nothing of memory types */
return -EINVAL;
+ }
+
+ if (mem->page_count == 0)
+ return 0;
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i)
@@ -536,7 +509,7 @@ const struct agp_bridge_driver uninorth_agp_driver = {
.create_gatt_table = uninorth_create_gatt_table,
.free_gatt_table = uninorth_free_gatt_table,
.insert_memory = uninorth_insert_memory,
- .remove_memory = agp_generic_remove_memory,
+ .remove_memory = uninorth_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
@@ -562,8 +535,8 @@ const struct agp_bridge_driver u3_agp_driver = {
.agp_enable = uninorth_agp_enable,
.create_gatt_table = uninorth_create_gatt_table,
.free_gatt_table = uninorth_free_gatt_table,
- .insert_memory = u3_insert_memory,
- .remove_memory = u3_remove_memory,
+ .insert_memory = uninorth_insert_memory,
+ .remove_memory = uninorth_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index 1d7c34c73b2..2628c7415ea 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -226,7 +226,7 @@ bfin_jc_wait_until_sent(struct tty_struct *tty, int timeout)
}
}
-static struct tty_operations bfin_jc_ops = {
+static const struct tty_operations bfin_jc_ops = {
.open = bfin_jc_open,
.close = bfin_jc_close,
.write = bfin_jc_write,
diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c
index 26a47dc88f6..53c524e7b82 100644
--- a/drivers/char/efirtc.c
+++ b/drivers/char/efirtc.c
@@ -285,6 +285,7 @@ static const struct file_operations efi_rtc_fops = {
.unlocked_ioctl = efi_rtc_ioctl,
.open = efi_rtc_open,
.release = efi_rtc_close,
+ .llseek = no_llseek,
};
static struct miscdevice efi_rtc_dev= {
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index dde5134713e..17b044a71e0 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -935,7 +935,7 @@ static int info_open(struct tty_struct *tty, struct file *filp)
return 0;
}
-static struct tty_operations info_ops = {
+static const struct tty_operations info_ops = {
.open = info_open,
.ioctl = info_ioctl,
};
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
deleted file mode 100644
index b19d43cd954..00000000000
--- a/drivers/char/esp.c
+++ /dev/null
@@ -1,2533 +0,0 @@
-/*
- * esp.c - driver for Hayes ESP serial cards
- *
- * --- Notices from serial.c, upon which this driver is based ---
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * Extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92. Now
- * much more extensible to support other serial cards based on the
- * 16450/16550A UART's. Added support for the AST FourPort and the
- * Accent Async board.
- *
- * set_serial_info fixed to set the flags, custom divisor, and uart
- * type fields. Fix suggested by Michael K. Johnson 12/12/92.
- *
- * 11/95: TIOCMIWAIT, TIOCGICOUNT by Angelo Haritsis <ah@doc.ic.ac.uk>
- *
- * 03/96: Modularised by Angelo Haritsis <ah@doc.ic.ac.uk>
- *
- * rs_set_termios fixed to look also for changes of the input
- * flags INPCK, BRKINT, PARMRK, IGNPAR and IGNBRK.
- * Bernd Anhäupl 05/17/96.
- *
- * --- End of notices from serial.c ---
- *
- * Support for the ESP serial card by Andrew J. Robinson
- * <arobinso@nyx.net> (Card detection routine taken from a patch
- * by Dennis J. Boylan). Patches to allow use with 2.1.x contributed
- * by Chris Faylor.
- *
- * Most recent changes: (Andrew J. Robinson)
- * Support for PIO mode. This allows the driver to work properly with
- * multiport cards.
- *
- * Arnaldo Carvalho de Melo <acme@conectiva.com.br> -
- * several cleanups, use module_init/module_exit, etc
- *
- * This module exports the following rs232 io functions:
- *
- * int espserial_init(void);
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/serialP.h>
-#include <linux/serial_reg.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/system.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <linux/hayesesp.h>
-
-#define NR_PORTS 64 /* maximum number of ports */
-#define NR_PRIMARY 8 /* maximum number of primary ports */
-#define REGION_SIZE 8 /* size of io region to request */
-
-/* The following variables can be set by giving module options */
-static int irq[NR_PRIMARY]; /* IRQ for each base port */
-static unsigned int divisor[NR_PRIMARY]; /* custom divisor for each port */
-static unsigned int dma = ESP_DMA_CHANNEL; /* DMA channel */
-static unsigned int rx_trigger = ESP_RX_TRIGGER;
-static unsigned int tx_trigger = ESP_TX_TRIGGER;
-static unsigned int flow_off = ESP_FLOW_OFF;
-static unsigned int flow_on = ESP_FLOW_ON;
-static unsigned int rx_timeout = ESP_RX_TMOUT;
-static unsigned int pio_threshold = ESP_PIO_THRESHOLD;
-
-MODULE_LICENSE("GPL");
-
-module_param_array(irq, int, NULL, 0);
-module_param_array(divisor, uint, NULL, 0);
-module_param(dma, uint, 0);
-module_param(rx_trigger, uint, 0);
-module_param(tx_trigger, uint, 0);
-module_param(flow_off, uint, 0);
-module_param(flow_on, uint, 0);
-module_param(rx_timeout, uint, 0);
-module_param(pio_threshold, uint, 0);
-
-/* END */
-
-static char *dma_buffer;
-static int dma_bytes;
-static struct esp_pio_buffer *free_pio_buf;
-
-#define DMA_BUFFER_SZ 1024
-
-#define WAKEUP_CHARS 1024
-
-static char serial_name[] __initdata = "ESP serial driver";
-static char serial_version[] __initdata = "2.2";
-
-static struct tty_driver *esp_driver;
-
-/*
- * Serial driver configuration section. Here are the various options:
- *
- * SERIAL_PARANOIA_CHECK
- * Check the magic number for the esp_structure where
- * ever possible.
- */
-
-#undef SERIAL_PARANOIA_CHECK
-#define SERIAL_DO_RESTART
-
-#undef SERIAL_DEBUG_INTR
-#undef SERIAL_DEBUG_OPEN
-#undef SERIAL_DEBUG_FLOW
-
-#if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT)
-#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \
- tty->name, info->port.flags, \
- serial_driver.refcount, \
- info->port.count, tty->count, s)
-#else
-#define DBG_CNT(s)
-#endif
-
-static struct esp_struct *ports;
-
-static void change_speed(struct esp_struct *info);
-static void rs_wait_until_sent(struct tty_struct *, int);
-
-/*
- * The ESP card has a clock rate of 14.7456 MHz (that is, 2**ESPC_SCALE
- * times the normal 1.8432 Mhz clock of most serial boards).
- */
-#define BASE_BAUD ((1843200 / 16) * (1 << ESPC_SCALE))
-
-/* Standard COM flags (except for COM4, because of the 8514 problem) */
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-
-static inline int serial_paranoia_check(struct esp_struct *info,
- char *name, const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
- static const char badmagic[] = KERN_WARNING
- "Warning: bad magic number for serial struct (%s) in %s\n";
- static const char badinfo[] = KERN_WARNING
- "Warning: null esp_struct for (%s) in %s\n";
-
- if (!info) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (info->magic != ESP_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-static inline unsigned int serial_in(struct esp_struct *info, int offset)
-{
- return inb(info->io_port + offset);
-}
-
-static inline void serial_out(struct esp_struct *info, int offset,
- unsigned char value)
-{
- outb(value, info->io_port+offset);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_stop() and rs_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * They enable or disable transmitter interrupts, as necessary.
- * ------------------------------------------------------------
- */
-static void rs_stop(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_stop"))
- return;
-
- spin_lock_irqsave(&info->lock, flags);
- if (info->IER & UART_IER_THRI) {
- info->IER &= ~UART_IER_THRI;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- }
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-static void rs_start(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_start"))
- return;
-
- spin_lock_irqsave(&info->lock, flags);
- if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) {
- info->IER |= UART_IER_THRI;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- }
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-/*
- * ----------------------------------------------------------------------
- *
- * Here starts the interrupt handling routines. All of the following
- * subroutines are declared as inline and are folded into
- * rs_interrupt(). They were separated out for readability's sake.
- *
- * Note: rs_interrupt() is a "fast" interrupt, which means that it
- * runs with interrupts turned off. People who may want to modify
- * rs_interrupt() should try to keep the interrupt handler as fast as
- * possible. After you are done making modifications, it is not a bad
- * idea to do:
- *
- * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
- *
- * and look at the resulting assemble code in serial.s.
- *
- * - Ted Ts'o (tytso@mit.edu), 7-Mar-93
- * -----------------------------------------------------------------------
- */
-
-static DEFINE_SPINLOCK(pio_lock);
-
-static inline struct esp_pio_buffer *get_pio_buffer(void)
-{
- struct esp_pio_buffer *buf;
- unsigned long flags;
-
- spin_lock_irqsave(&pio_lock, flags);
- if (free_pio_buf) {
- buf = free_pio_buf;
- free_pio_buf = buf->next;
- } else {
- buf = kmalloc(sizeof(struct esp_pio_buffer), GFP_ATOMIC);
- }
- spin_unlock_irqrestore(&pio_lock, flags);
- return buf;
-}
-
-static inline void release_pio_buffer(struct esp_pio_buffer *buf)
-{
- unsigned long flags;
- spin_lock_irqsave(&pio_lock, flags);
- buf->next = free_pio_buf;
- free_pio_buf = buf;
- spin_unlock_irqrestore(&pio_lock, flags);
-}
-
-static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
-{
- struct tty_struct *tty = info->port.tty;
- int i;
- struct esp_pio_buffer *pio_buf;
- struct esp_pio_buffer *err_buf;
- unsigned char status_mask;
-
- pio_buf = get_pio_buffer();
-
- if (!pio_buf)
- return;
-
- err_buf = get_pio_buffer();
-
- if (!err_buf) {
- release_pio_buffer(pio_buf);
- return;
- }
-
- status_mask = (info->read_status_mask >> 2) & 0x07;
-
- for (i = 0; i < num_bytes - 1; i += 2) {
- *((unsigned short *)(pio_buf->data + i)) =
- inw(info->io_port + UART_ESI_RX);
- err_buf->data[i] = serial_in(info, UART_ESI_RWS);
- err_buf->data[i + 1] = (err_buf->data[i] >> 3) & status_mask;
- err_buf->data[i] &= status_mask;
- }
-
- if (num_bytes & 0x0001) {
- pio_buf->data[num_bytes - 1] = serial_in(info, UART_ESI_RX);
- err_buf->data[num_bytes - 1] =
- (serial_in(info, UART_ESI_RWS) >> 3) & status_mask;
- }
-
- /* make sure everything is still ok since interrupts were enabled */
- tty = info->port.tty;
-
- if (!tty) {
- release_pio_buffer(pio_buf);
- release_pio_buffer(err_buf);
- info->stat_flags &= ~ESP_STAT_RX_TIMEOUT;
- return;
- }
-
- status_mask = (info->ignore_status_mask >> 2) & 0x07;
-
- for (i = 0; i < num_bytes; i++) {
- if (!(err_buf->data[i] & status_mask)) {
- int flag = 0;
-
- if (err_buf->data[i] & 0x04) {
- flag = TTY_BREAK;
- if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
- } else if (err_buf->data[i] & 0x02)
- flag = TTY_FRAME;
- else if (err_buf->data[i] & 0x01)
- flag = TTY_PARITY;
- tty_insert_flip_char(tty, pio_buf->data[i], flag);
- }
- }
-
- tty_schedule_flip(tty);
-
- info->stat_flags &= ~ESP_STAT_RX_TIMEOUT;
- release_pio_buffer(pio_buf);
- release_pio_buffer(err_buf);
-}
-
-static void program_isa_dma(int dma, int dir, unsigned long addr, int len)
-{
- unsigned long flags;
-
- flags = claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma, dir);
- set_dma_addr(dma, addr);
- set_dma_count(dma, len);
- enable_dma(dma);
- release_dma_lock(flags);
-}
-
-static void receive_chars_dma(struct esp_struct *info, int num_bytes)
-{
- info->stat_flags &= ~ESP_STAT_RX_TIMEOUT;
- dma_bytes = num_bytes;
- info->stat_flags |= ESP_STAT_DMA_RX;
-
- program_isa_dma(dma, DMA_MODE_READ, isa_virt_to_bus(dma_buffer),
- dma_bytes);
- serial_out(info, UART_ESI_CMD1, ESI_START_DMA_RX);
-}
-
-static inline void receive_chars_dma_done(struct esp_struct *info,
- int status)
-{
- struct tty_struct *tty = info->port.tty;
- int num_bytes;
- unsigned long flags;
-
- flags = claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
-
- info->stat_flags &= ~ESP_STAT_DMA_RX;
- num_bytes = dma_bytes - get_dma_residue(dma);
- release_dma_lock(flags);
-
- info->icount.rx += num_bytes;
-
- if (num_bytes > 0) {
- tty_insert_flip_string(tty, dma_buffer, num_bytes - 1);
-
- status &= (0x1c & info->read_status_mask);
-
- /* Is the status significant or do we throw the last byte ? */
- if (!(status & info->ignore_status_mask)) {
- int statflag = 0;
-
- if (status & 0x10) {
- statflag = TTY_BREAK;
- (info->icount.brk)++;
- if (info->port.flags & ASYNC_SAK)
- do_SAK(tty);
- } else if (status & 0x08) {
- statflag = TTY_FRAME;
- info->icount.frame++;
- } else if (status & 0x04) {
- statflag = TTY_PARITY;
- info->icount.parity++;
- }
- tty_insert_flip_char(tty, dma_buffer[num_bytes - 1],
- statflag);
- }
- tty_schedule_flip(tty);
- }
-
- if (dma_bytes != num_bytes) {
- num_bytes = dma_bytes - num_bytes;
- dma_bytes = 0;
- receive_chars_dma(info, num_bytes);
- } else
- dma_bytes = 0;
-}
-
-/* Caller must hold info->lock */
-
-static inline void transmit_chars_pio(struct esp_struct *info,
- int space_avail)
-{
- int i;
- struct esp_pio_buffer *pio_buf;
-
- pio_buf = get_pio_buffer();
-
- if (!pio_buf)
- return;
-
- while (space_avail && info->xmit_cnt) {
- if (info->xmit_tail + space_avail <= ESP_XMIT_SIZE) {
- memcpy(pio_buf->data,
- &(info->xmit_buf[info->xmit_tail]),
- space_avail);
- } else {
- i = ESP_XMIT_SIZE - info->xmit_tail;
- memcpy(pio_buf->data,
- &(info->xmit_buf[info->xmit_tail]), i);
- memcpy(&(pio_buf->data[i]), info->xmit_buf,
- space_avail - i);
- }
-
- info->xmit_cnt -= space_avail;
- info->xmit_tail = (info->xmit_tail + space_avail) &
- (ESP_XMIT_SIZE - 1);
-
- for (i = 0; i < space_avail - 1; i += 2) {
- outw(*((unsigned short *)(pio_buf->data + i)),
- info->io_port + UART_ESI_TX);
- }
-
- if (space_avail & 0x0001)
- serial_out(info, UART_ESI_TX,
- pio_buf->data[space_avail - 1]);
-
- if (info->xmit_cnt) {
- serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND);
- serial_out(info, UART_ESI_CMD1, ESI_GET_TX_AVAIL);
- space_avail = serial_in(info, UART_ESI_STAT1) << 8;
- space_avail |= serial_in(info, UART_ESI_STAT2);
-
- if (space_avail > info->xmit_cnt)
- space_avail = info->xmit_cnt;
- }
- }
-
- if (info->xmit_cnt < WAKEUP_CHARS) {
- if (info->port.tty)
- tty_wakeup(info->port.tty);
-
-#ifdef SERIAL_DEBUG_INTR
- printk("THRE...");
-#endif
-
- if (info->xmit_cnt <= 0) {
- info->IER &= ~UART_IER_THRI;
- serial_out(info, UART_ESI_CMD1,
- ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- }
- }
-
- release_pio_buffer(pio_buf);
-}
-
-/* Caller must hold info->lock */
-static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes)
-{
- dma_bytes = num_bytes;
-
- if (info->xmit_tail + dma_bytes <= ESP_XMIT_SIZE) {
- memcpy(dma_buffer, &(info->xmit_buf[info->xmit_tail]),
- dma_bytes);
- } else {
- int i = ESP_XMIT_SIZE - info->xmit_tail;
- memcpy(dma_buffer, &(info->xmit_buf[info->xmit_tail]),
- i);
- memcpy(&(dma_buffer[i]), info->xmit_buf, dma_bytes - i);
- }
-
- info->xmit_cnt -= dma_bytes;
- info->xmit_tail = (info->xmit_tail + dma_bytes) & (ESP_XMIT_SIZE - 1);
-
- if (info->xmit_cnt < WAKEUP_CHARS) {
- if (info->port.tty)
- tty_wakeup(info->port.tty);
-
-#ifdef SERIAL_DEBUG_INTR
- printk("THRE...");
-#endif
-
- if (info->xmit_cnt <= 0) {
- info->IER &= ~UART_IER_THRI;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- }
- }
-
- info->stat_flags |= ESP_STAT_DMA_TX;
-
- program_isa_dma(dma, DMA_MODE_WRITE, isa_virt_to_bus(dma_buffer),
- dma_bytes);
- serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
-}
-
-static inline void transmit_chars_dma_done(struct esp_struct *info)
-{
- int num_bytes;
- unsigned long flags;
-
- flags = claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
-
- num_bytes = dma_bytes - get_dma_residue(dma);
- info->icount.tx += dma_bytes;
- release_dma_lock(flags);
-
- if (dma_bytes != num_bytes) {
- dma_bytes -= num_bytes;
- memmove(dma_buffer, dma_buffer + num_bytes, dma_bytes);
-
- program_isa_dma(dma, DMA_MODE_WRITE,
- isa_virt_to_bus(dma_buffer), dma_bytes);
-
- serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
- } else {
- dma_bytes = 0;
- info->stat_flags &= ~ESP_STAT_DMA_TX;
- }
-}
-
-static void check_modem_status(struct esp_struct *info)
-{
- int status;
-
- serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT);
- status = serial_in(info, UART_ESI_STAT2);
-
- if (status & UART_MSR_ANY_DELTA) {
- /* update input line counters */
- if (status & UART_MSR_TERI)
- info->icount.rng++;
- if (status & UART_MSR_DDSR)
- info->icount.dsr++;
- if (status & UART_MSR_DDCD)
- info->icount.dcd++;
- if (status & UART_MSR_DCTS)
- info->icount.cts++;
- wake_up_interruptible(&info->port.delta_msr_wait);
- }
-
- if ((info->port.flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
-#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
- printk("ttys%d CD now %s...", info->line,
- (status & UART_MSR_DCD) ? "on" : "off");
-#endif
- if (status & UART_MSR_DCD)
- wake_up_interruptible(&info->port.open_wait);
- else {
-#ifdef SERIAL_DEBUG_OPEN
- printk("scheduling hangup...");
-#endif
- tty_hangup(info->port.tty);
- }
- }
-}
-
-/*
- * This is the serial driver's interrupt routine
- */
-static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
-{
- struct esp_struct *info;
- unsigned err_status;
- unsigned int scratch;
-
-#ifdef SERIAL_DEBUG_INTR
- printk("rs_interrupt_single(%d)...", irq);
-#endif
- info = (struct esp_struct *)dev_id;
- err_status = 0;
- scratch = serial_in(info, UART_ESI_SID);
-
- spin_lock(&info->lock);
-
- if (!info->port.tty) {
- spin_unlock(&info->lock);
- return IRQ_NONE;
- }
-
- if (scratch & 0x04) { /* error */
- serial_out(info, UART_ESI_CMD1, ESI_GET_ERR_STAT);
- err_status = serial_in(info, UART_ESI_STAT1);
- serial_in(info, UART_ESI_STAT2);
-
- if (err_status & 0x01)
- info->stat_flags |= ESP_STAT_RX_TIMEOUT;
-
- if (err_status & 0x20) /* UART status */
- check_modem_status(info);
-
- if (err_status & 0x80) /* Start break */
- wake_up_interruptible(&info->break_wait);
- }
-
- if ((scratch & 0x88) || /* DMA completed or timed out */
- (err_status & 0x1c) /* receive error */) {
- if (info->stat_flags & ESP_STAT_DMA_RX)
- receive_chars_dma_done(info, err_status);
- else if (info->stat_flags & ESP_STAT_DMA_TX)
- transmit_chars_dma_done(info);
- }
-
- if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) &&
- ((scratch & 0x01) || (info->stat_flags & ESP_STAT_RX_TIMEOUT)) &&
- (info->IER & UART_IER_RDI)) {
- int num_bytes;
-
- serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND);
- serial_out(info, UART_ESI_CMD1, ESI_GET_RX_AVAIL);
- num_bytes = serial_in(info, UART_ESI_STAT1) << 8;
- num_bytes |= serial_in(info, UART_ESI_STAT2);
-
- num_bytes = tty_buffer_request_room(info->port.tty, num_bytes);
-
- if (num_bytes) {
- if (dma_bytes ||
- (info->stat_flags & ESP_STAT_USE_PIO) ||
- (num_bytes <= info->config.pio_threshold))
- receive_chars_pio(info, num_bytes);
- else
- receive_chars_dma(info, num_bytes);
- }
- }
-
- if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) &&
- (scratch & 0x02) && (info->IER & UART_IER_THRI)) {
- if ((info->xmit_cnt <= 0) || info->port.tty->stopped) {
- info->IER &= ~UART_IER_THRI;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- } else {
- int num_bytes;
-
- serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND);
- serial_out(info, UART_ESI_CMD1, ESI_GET_TX_AVAIL);
- num_bytes = serial_in(info, UART_ESI_STAT1) << 8;
- num_bytes |= serial_in(info, UART_ESI_STAT2);
-
- if (num_bytes > info->xmit_cnt)
- num_bytes = info->xmit_cnt;
-
- if (num_bytes) {
- if (dma_bytes ||
- (info->stat_flags & ESP_STAT_USE_PIO) ||
- (num_bytes <= info->config.pio_threshold))
- transmit_chars_pio(info, num_bytes);
- else
- transmit_chars_dma(info, num_bytes);
- }
- }
- }
-
- info->last_active = jiffies;
-
-#ifdef SERIAL_DEBUG_INTR
- printk("end.\n");
-#endif
- spin_unlock(&info->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * -------------------------------------------------------------------
- * Here ends the serial interrupt routines.
- * -------------------------------------------------------------------
- */
-
-/*
- * ---------------------------------------------------------------
- * Low level utility subroutines for the serial driver: routines to
- * figure out the appropriate timeout for an interrupt chain, routines
- * to initialize and startup a serial port, and routines to shutdown a
- * serial port. Useful stuff like that.
- *
- * Caller should hold lock
- * ---------------------------------------------------------------
- */
-
-static void esp_basic_init(struct esp_struct *info)
-{
- /* put ESPC in enhanced mode */
- serial_out(info, UART_ESI_CMD1, ESI_SET_MODE);
-
- if (info->stat_flags & ESP_STAT_NEVER_DMA)
- serial_out(info, UART_ESI_CMD2, 0x01);
- else
- serial_out(info, UART_ESI_CMD2, 0x31);
-
- /* disable interrupts for now */
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, 0x00);
-
- /* set interrupt and DMA channel */
- serial_out(info, UART_ESI_CMD1, ESI_SET_IRQ);
-
- if (info->stat_flags & ESP_STAT_NEVER_DMA)
- serial_out(info, UART_ESI_CMD2, 0x01);
- else
- serial_out(info, UART_ESI_CMD2, (dma << 4) | 0x01);
-
- serial_out(info, UART_ESI_CMD1, ESI_SET_ENH_IRQ);
-
- if (info->line % 8) /* secondary port */
- serial_out(info, UART_ESI_CMD2, 0x0d); /* shared */
- else if (info->irq == 9)
- serial_out(info, UART_ESI_CMD2, 0x02);
- else
- serial_out(info, UART_ESI_CMD2, info->irq);
-
- /* set error status mask (check this) */
- serial_out(info, UART_ESI_CMD1, ESI_SET_ERR_MASK);
-
- if (info->stat_flags & ESP_STAT_NEVER_DMA)
- serial_out(info, UART_ESI_CMD2, 0xa1);
- else
- serial_out(info, UART_ESI_CMD2, 0xbd);
-
- serial_out(info, UART_ESI_CMD2, 0x00);
-
- /* set DMA timeout */
- serial_out(info, UART_ESI_CMD1, ESI_SET_DMA_TMOUT);
- serial_out(info, UART_ESI_CMD2, 0xff);
-
- /* set FIFO trigger levels */
- serial_out(info, UART_ESI_CMD1, ESI_SET_TRIGGER);
- serial_out(info, UART_ESI_CMD2, info->config.rx_trigger >> 8);
- serial_out(info, UART_ESI_CMD2, info->config.rx_trigger);
- serial_out(info, UART_ESI_CMD2, info->config.tx_trigger >> 8);
- serial_out(info, UART_ESI_CMD2, info->config.tx_trigger);
-
- /* Set clock scaling and wait states */
- serial_out(info, UART_ESI_CMD1, ESI_SET_PRESCALAR);
- serial_out(info, UART_ESI_CMD2, 0x04 | ESPC_SCALE);
-
- /* set reinterrupt pacing */
- serial_out(info, UART_ESI_CMD1, ESI_SET_REINTR);
- serial_out(info, UART_ESI_CMD2, 0xff);
-}
-
-static int startup(struct esp_struct *info)
-{
- unsigned long flags;
- int retval = 0;
- unsigned int num_chars;
-
- spin_lock_irqsave(&info->lock, flags);
-
- if (info->port.flags & ASYNC_INITIALIZED)
- goto out;
-
- if (!info->xmit_buf) {
- info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_ATOMIC);
- retval = -ENOMEM;
- if (!info->xmit_buf)
- goto out;
- }
-
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG "starting up ttys%d (irq %d)...",
- info->line, info->irq);
-#endif
-
- /* Flush the RX buffer. Using the ESI flush command may cause */
- /* wild interrupts, so read all the data instead. */
-
- serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND);
- serial_out(info, UART_ESI_CMD1, ESI_GET_RX_AVAIL);
- num_chars = serial_in(info, UART_ESI_STAT1) << 8;
- num_chars |= serial_in(info, UART_ESI_STAT2);
-
- while (num_chars > 1) {
- inw(info->io_port + UART_ESI_RX);
- num_chars -= 2;
- }
-
- if (num_chars)
- serial_in(info, UART_ESI_RX);
-
- /* set receive character timeout */
- serial_out(info, UART_ESI_CMD1, ESI_SET_RX_TIMEOUT);
- serial_out(info, UART_ESI_CMD2, info->config.rx_timeout);
-
- /* clear all flags except the "never DMA" flag */
- info->stat_flags &= ESP_STAT_NEVER_DMA;
-
- if (info->stat_flags & ESP_STAT_NEVER_DMA)
- info->stat_flags |= ESP_STAT_USE_PIO;
-
- spin_unlock_irqrestore(&info->lock, flags);
-
- /*
- * Allocate the IRQ
- */
-
- retval = request_irq(info->irq, rs_interrupt_single, IRQF_SHARED,
- "esp serial", info);
-
- if (retval) {
- if (capable(CAP_SYS_ADMIN)) {
- if (info->port.tty)
- set_bit(TTY_IO_ERROR,
- &info->port.tty->flags);
- retval = 0;
- }
- goto out_unlocked;
- }
-
- if (!(info->stat_flags & ESP_STAT_USE_PIO) && !dma_buffer) {
- dma_buffer = (char *)__get_dma_pages(
- GFP_KERNEL, get_order(DMA_BUFFER_SZ));
-
- /* use PIO mode if DMA buf/chan cannot be allocated */
- if (!dma_buffer)
- info->stat_flags |= ESP_STAT_USE_PIO;
- else if (request_dma(dma, "esp serial")) {
- free_pages((unsigned long)dma_buffer,
- get_order(DMA_BUFFER_SZ));
- dma_buffer = NULL;
- info->stat_flags |= ESP_STAT_USE_PIO;
- }
-
- }
-
- info->MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
-
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
- serial_out(info, UART_ESI_CMD2, UART_MCR);
- serial_out(info, UART_ESI_CMD2, info->MCR);
-
- /*
- * Finally, enable interrupts
- */
- /* info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI; */
- info->IER = UART_IER_RLSI | UART_IER_RDI | UART_IER_DMA_TMOUT |
- UART_IER_DMA_TC;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
-
- if (info->port.tty)
- clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- spin_unlock_irqrestore(&info->lock, flags);
-
- /*
- * Set up the tty->alt_speed kludge
- */
- if (info->port.tty) {
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- info->port.tty->alt_speed = 57600;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- info->port.tty->alt_speed = 115200;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
- info->port.tty->alt_speed = 230400;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
- info->port.tty->alt_speed = 460800;
- }
-
- /*
- * set the speed of the serial port
- */
- change_speed(info);
- info->port.flags |= ASYNC_INITIALIZED;
- return 0;
-
-out:
- spin_unlock_irqrestore(&info->lock, flags);
-out_unlocked:
- return retval;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void shutdown(struct esp_struct *info)
-{
- unsigned long flags, f;
-
- if (!(info->port.flags & ASYNC_INITIALIZED))
- return;
-
-#ifdef SERIAL_DEBUG_OPEN
- printk("Shutting down serial port %d (irq %d)....", info->line,
- info->irq);
-#endif
-
- spin_lock_irqsave(&info->lock, flags);
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
- * here so the queue might never be waken up
- */
- wake_up_interruptible(&info->port.delta_msr_wait);
- wake_up_interruptible(&info->break_wait);
-
- /* stop a DMA transfer on the port being closed */
- /* DMA lock is higher priority always */
- if (info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) {
- f = claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- release_dma_lock(f);
-
- dma_bytes = 0;
- }
-
- /*
- * Free the IRQ
- */
- free_irq(info->irq, info);
-
- if (dma_buffer) {
- struct esp_struct *current_port = ports;
-
- while (current_port) {
- if ((current_port != info) &&
- (current_port->port.flags & ASYNC_INITIALIZED))
- break;
-
- current_port = current_port->next_port;
- }
-
- if (!current_port) {
- free_dma(dma);
- free_pages((unsigned long)dma_buffer,
- get_order(DMA_BUFFER_SZ));
- dma_buffer = NULL;
- }
- }
-
- if (info->xmit_buf) {
- free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
- }
-
- info->IER = 0;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, 0x00);
-
- if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL))
- info->MCR &= ~(UART_MCR_DTR|UART_MCR_RTS);
-
- info->MCR &= ~UART_MCR_OUT2;
- serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
- serial_out(info, UART_ESI_CMD2, UART_MCR);
- serial_out(info, UART_ESI_CMD2, info->MCR);
-
- if (info->port.tty)
- set_bit(TTY_IO_ERROR, &info->port.tty->flags);
-
- info->port.flags &= ~ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static void change_speed(struct esp_struct *info)
-{
- unsigned short port;
- int quot = 0;
- unsigned cflag, cval;
- int baud, bits;
- unsigned char flow1 = 0, flow2 = 0;
- unsigned long flags;
-
- if (!info->port.tty || !info->port.tty->termios)
- return;
- cflag = info->port.tty->termios->c_cflag;
- port = info->io_port;
-
- /* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5: cval = 0x00; bits = 7; break;
- case CS6: cval = 0x01; bits = 8; break;
- case CS7: cval = 0x02; bits = 9; break;
- case CS8: cval = 0x03; bits = 10; break;
- default: cval = 0x00; bits = 7; break;
- }
- if (cflag & CSTOPB) {
- cval |= 0x04;
- bits++;
- }
- if (cflag & PARENB) {
- cval |= UART_LCR_PARITY;
- bits++;
- }
- if (!(cflag & PARODD))
- cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
- if (cflag & CMSPAR)
- cval |= UART_LCR_SPAR;
-#endif
- baud = tty_get_baud_rate(info->port.tty);
- if (baud == 38400 &&
- ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST))
- quot = info->custom_divisor;
- else {
- if (baud == 134) /* Special case since 134 is really 134.5 */
- quot = (2*BASE_BAUD / 269);
- else if (baud)
- quot = BASE_BAUD / baud;
- }
- /* If the quotient is ever zero, default to 9600 bps */
- if (!quot)
- quot = BASE_BAUD / 9600;
-
- if (baud) {
- /* Actual rate */
- baud = BASE_BAUD/quot;
- tty_encode_baud_rate(info->port.tty, baud, baud);
- }
- info->timeout = ((1024 * HZ * bits * quot) / BASE_BAUD) + (HZ / 50);
-
- /* CTS flow control flag and modem status interrupts */
- /* info->IER &= ~UART_IER_MSI; */
- if (cflag & CRTSCTS) {
- info->port.flags |= ASYNC_CTS_FLOW;
- /* info->IER |= UART_IER_MSI; */
- flow1 = 0x04;
- flow2 = 0x10;
- } else
- info->port.flags &= ~ASYNC_CTS_FLOW;
- if (cflag & CLOCAL)
- info->port.flags &= ~ASYNC_CHECK_CD;
- else
- info->port.flags |= ASYNC_CHECK_CD;
-
- /*
- * Set up parity check flag
- */
- info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (I_INPCK(info->port.tty))
- info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
- info->read_status_mask |= UART_LSR_BI;
-
- info->ignore_status_mask = 0;
-#if 0
- /* This should be safe, but for some broken bits of hardware... */
- if (I_IGNPAR(info->port.tty)) {
- info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
- info->read_status_mask |= UART_LSR_PE | UART_LSR_FE;
- }
-#endif
- if (I_IGNBRK(info->port.tty)) {
- info->ignore_status_mask |= UART_LSR_BI;
- info->read_status_mask |= UART_LSR_BI;
- /*
- * If we're ignore parity and break indicators, ignore
- * overruns too. (For real raw support).
- */
- if (I_IGNPAR(info->port.tty)) {
- info->ignore_status_mask |= UART_LSR_OE | \
- UART_LSR_PE | UART_LSR_FE;
- info->read_status_mask |= UART_LSR_OE | \
- UART_LSR_PE | UART_LSR_FE;
- }
- }
-
- if (I_IXOFF(info->port.tty))
- flow1 |= 0x81;
-
- spin_lock_irqsave(&info->lock, flags);
- /* set baud */
- serial_out(info, UART_ESI_CMD1, ESI_SET_BAUD);
- serial_out(info, UART_ESI_CMD2, quot >> 8);
- serial_out(info, UART_ESI_CMD2, quot & 0xff);
-
- /* set data bits, parity, etc. */
- serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
- serial_out(info, UART_ESI_CMD2, UART_LCR);
- serial_out(info, UART_ESI_CMD2, cval);
-
- /* Enable flow control */
- serial_out(info, UART_ESI_CMD1, ESI_SET_FLOW_CNTL);
- serial_out(info, UART_ESI_CMD2, flow1);
- serial_out(info, UART_ESI_CMD2, flow2);
-
- /* set flow control characters (XON/XOFF only) */
- if (I_IXOFF(info->port.tty)) {
- serial_out(info, UART_ESI_CMD1, ESI_SET_FLOW_CHARS);
- serial_out(info, UART_ESI_CMD2, START_CHAR(info->port.tty));
- serial_out(info, UART_ESI_CMD2, STOP_CHAR(info->port.tty));
- serial_out(info, UART_ESI_CMD2, 0x10);
- serial_out(info, UART_ESI_CMD2, 0x21);
- switch (cflag & CSIZE) {
- case CS5:
- serial_out(info, UART_ESI_CMD2, 0x1f);
- break;
- case CS6:
- serial_out(info, UART_ESI_CMD2, 0x3f);
- break;
- case CS7:
- case CS8:
- serial_out(info, UART_ESI_CMD2, 0x7f);
- break;
- default:
- serial_out(info, UART_ESI_CMD2, 0xff);
- break;
- }
- }
-
- /* Set high/low water */
- serial_out(info, UART_ESI_CMD1, ESI_SET_FLOW_LVL);
- serial_out(info, UART_ESI_CMD2, info->config.flow_off >> 8);
- serial_out(info, UART_ESI_CMD2, info->config.flow_off);
- serial_out(info, UART_ESI_CMD2, info->config.flow_on >> 8);
- serial_out(info, UART_ESI_CMD2, info->config.flow_on);
-
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-static int rs_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
- int ret = 0;
-
- if (serial_paranoia_check(info, tty->name, "rs_put_char"))
- return 0;
-
- if (!info->xmit_buf)
- return 0;
-
- spin_lock_irqsave(&info->lock, flags);
- if (info->xmit_cnt < ESP_XMIT_SIZE - 1) {
- info->xmit_buf[info->xmit_head++] = ch;
- info->xmit_head &= ESP_XMIT_SIZE-1;
- info->xmit_cnt++;
- ret = 1;
- }
- spin_unlock_irqrestore(&info->lock, flags);
- return ret;
-}
-
-static void rs_flush_chars(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
- return;
-
- spin_lock_irqsave(&info->lock, flags);
-
- if (info->xmit_cnt <= 0 || tty->stopped || !info->xmit_buf)
- goto out;
-
- if (!(info->IER & UART_IER_THRI)) {
- info->IER |= UART_IER_THRI;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- }
-out:
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-static int rs_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
-{
- int c, t, ret = 0;
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_write"))
- return 0;
-
- if (!info->xmit_buf)
- return 0;
-
- while (1) {
- /* Thanks to R. Wolff for suggesting how to do this with */
- /* interrupts enabled */
-
- c = count;
- t = ESP_XMIT_SIZE - info->xmit_cnt - 1;
-
- if (t < c)
- c = t;
-
- t = ESP_XMIT_SIZE - info->xmit_head;
-
- if (t < c)
- c = t;
-
- if (c <= 0)
- break;
-
- memcpy(info->xmit_buf + info->xmit_head, buf, c);
-
- info->xmit_head = (info->xmit_head + c) & (ESP_XMIT_SIZE-1);
- info->xmit_cnt += c;
- buf += c;
- count -= c;
- ret += c;
- }
-
- spin_lock_irqsave(&info->lock, flags);
-
- if (info->xmit_cnt && !tty->stopped && !(info->IER & UART_IER_THRI)) {
- info->IER |= UART_IER_THRI;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- }
-
- spin_unlock_irqrestore(&info->lock, flags);
- return ret;
-}
-
-static int rs_write_room(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
- int ret;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_write_room"))
- return 0;
-
- spin_lock_irqsave(&info->lock, flags);
-
- ret = ESP_XMIT_SIZE - info->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
- spin_unlock_irqrestore(&info->lock, flags);
- return ret;
-}
-
-static int rs_chars_in_buffer(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
- return 0;
- return info->xmit_cnt;
-}
-
-static void rs_flush_buffer(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
- return;
- spin_lock_irqsave(&info->lock, flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- spin_unlock_irqrestore(&info->lock, flags);
- tty_wakeup(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_throttle()
- *
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- * ------------------------------------------------------------
- */
-static void rs_throttle(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-#ifdef SERIAL_DEBUG_THROTTLE
- char buf[64];
-
- printk("throttle %s: %d....\n", tty_name(tty, buf),
- tty_chars_in_buffer(tty));
-#endif
-
- if (serial_paranoia_check(info, tty->name, "rs_throttle"))
- return;
-
- spin_lock_irqsave(&info->lock, flags);
- info->IER &= ~UART_IER_RDI;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- serial_out(info, UART_ESI_CMD1, ESI_SET_RX_TIMEOUT);
- serial_out(info, UART_ESI_CMD2, 0x00);
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-static void rs_unthrottle(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-#ifdef SERIAL_DEBUG_THROTTLE
- char buf[64];
-
- printk(KERN_DEBUG "unthrottle %s: %d....\n", tty_name(tty, buf),
- tty_chars_in_buffer(tty));
-#endif
-
- if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
- return;
-
- spin_lock_irqsave(&info->lock, flags);
- info->IER |= UART_IER_RDI;
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
- serial_out(info, UART_ESI_CMD1, ESI_SET_RX_TIMEOUT);
- serial_out(info, UART_ESI_CMD2, info->config.rx_timeout);
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_ioctl() and friends
- * ------------------------------------------------------------
- */
-
-static int get_serial_info(struct esp_struct *info,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
-
- lock_kernel();
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = PORT_16550A;
- tmp.line = info->line;
- tmp.port = info->io_port;
- tmp.irq = info->irq;
- tmp.flags = info->port.flags;
- tmp.xmit_fifo_size = 1024;
- tmp.baud_base = BASE_BAUD;
- tmp.close_delay = info->close_delay;
- tmp.closing_wait = info->closing_wait;
- tmp.custom_divisor = info->custom_divisor;
- tmp.hub6 = 0;
- unlock_kernel();
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
-
-static int get_esp_config(struct esp_struct *info,
- struct hayes_esp_config __user *retinfo)
-{
- struct hayes_esp_config tmp;
-
- if (!retinfo)
- return -EFAULT;
-
- memset(&tmp, 0, sizeof(tmp));
- lock_kernel();
- tmp.rx_timeout = info->config.rx_timeout;
- tmp.rx_trigger = info->config.rx_trigger;
- tmp.tx_trigger = info->config.tx_trigger;
- tmp.flow_off = info->config.flow_off;
- tmp.flow_on = info->config.flow_on;
- tmp.pio_threshold = info->config.pio_threshold;
- tmp.dma_channel = (info->stat_flags & ESP_STAT_NEVER_DMA ? 0 : dma);
- unlock_kernel();
-
- return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
-}
-
-static int set_serial_info(struct esp_struct *info,
- struct serial_struct __user *new_info)
-{
- struct serial_struct new_serial;
- struct esp_struct old_info;
- unsigned int change_irq;
- int retval = 0;
- struct esp_struct *current_async;
-
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
- old_info = *info;
-
- if ((new_serial.type != PORT_16550A) ||
- (new_serial.hub6) ||
- (info->io_port != new_serial.port) ||
- (new_serial.baud_base != BASE_BAUD) ||
- (new_serial.irq > 15) ||
- (new_serial.irq < 2) ||
- (new_serial.irq == 6) ||
- (new_serial.irq == 8) ||
- (new_serial.irq == 13))
- return -EINVAL;
-
- change_irq = new_serial.irq != info->irq;
-
- if (change_irq && (info->line % 8))
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN)) {
- if (change_irq ||
- (new_serial.close_delay != info->close_delay) ||
- ((new_serial.flags & ~ASYNC_USR_MASK) !=
- (info->port.flags & ~ASYNC_USR_MASK)))
- return -EPERM;
- info->port.flags = ((info->port.flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- info->custom_divisor = new_serial.custom_divisor;
- } else {
- if (new_serial.irq == 2)
- new_serial.irq = 9;
-
- if (change_irq) {
- current_async = ports;
-
- while (current_async) {
- if ((current_async->line >= info->line) &&
- (current_async->line < (info->line + 8))) {
- if (current_async == info) {
- if (current_async->port.count > 1)
- return -EBUSY;
- } else if (current_async->port.count)
- return -EBUSY;
- }
-
- current_async = current_async->next_port;
- }
- }
-
- /*
- * OK, past this point, all the error checking has been done.
- * At this point, we start making changes.....
- */
-
- info->port.flags = ((info->port.flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- info->custom_divisor = new_serial.custom_divisor;
- info->close_delay = new_serial.close_delay * HZ/100;
- info->closing_wait = new_serial.closing_wait * HZ/100;
-
- if (change_irq) {
- /*
- * We need to shutdown the serial port at the old
- * port/irq combination.
- */
- shutdown(info);
-
- current_async = ports;
-
- while (current_async) {
- if ((current_async->line >= info->line) &&
- (current_async->line < (info->line + 8)))
- current_async->irq = new_serial.irq;
-
- current_async = current_async->next_port;
- }
-
- serial_out(info, UART_ESI_CMD1, ESI_SET_ENH_IRQ);
- if (info->irq == 9)
- serial_out(info, UART_ESI_CMD2, 0x02);
- else
- serial_out(info, UART_ESI_CMD2, info->irq);
- }
- }
-
- if (info->port.flags & ASYNC_INITIALIZED) {
- if (((old_info.port.flags & ASYNC_SPD_MASK) !=
- (info->port.flags & ASYNC_SPD_MASK)) ||
- (old_info.custom_divisor != info->custom_divisor)) {
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- info->port.tty->alt_speed = 57600;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- info->port.tty->alt_speed = 115200;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
- info->port.tty->alt_speed = 230400;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
- info->port.tty->alt_speed = 460800;
- change_speed(info);
- }
- } else
- retval = startup(info);
-
- return retval;
-}
-
-static int set_esp_config(struct esp_struct *info,
- struct hayes_esp_config __user *new_info)
-{
- struct hayes_esp_config new_config;
- unsigned int change_dma;
- int retval = 0;
- struct esp_struct *current_async;
- unsigned long flags;
-
- /* Perhaps a non-sysadmin user should be able to do some of these */
- /* operations. I haven't decided yet. */
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&new_config, new_info, sizeof(new_config)))
- return -EFAULT;
-
- if ((new_config.flow_on >= new_config.flow_off) ||
- (new_config.rx_trigger < 1) ||
- (new_config.tx_trigger < 1) ||
- (new_config.flow_off < 1) ||
- (new_config.flow_on < 1) ||
- (new_config.rx_trigger > 1023) ||
- (new_config.tx_trigger > 1023) ||
- (new_config.flow_off > 1023) ||
- (new_config.flow_on > 1023) ||
- (new_config.pio_threshold < 0) ||
- (new_config.pio_threshold > 1024))
- return -EINVAL;
-
- if ((new_config.dma_channel != 1) && (new_config.dma_channel != 3))
- new_config.dma_channel = 0;
-
- if (info->stat_flags & ESP_STAT_NEVER_DMA)
- change_dma = new_config.dma_channel;
- else
- change_dma = (new_config.dma_channel != dma);
-
- if (change_dma) {
- if (new_config.dma_channel) {
- /* PIO mode to DMA mode transition OR */
- /* change current DMA channel */
- current_async = ports;
-
- while (current_async) {
- if (current_async == info) {
- if (current_async->port.count > 1)
- return -EBUSY;
- } else if (current_async->port.count)
- return -EBUSY;
-
- current_async = current_async->next_port;
- }
-
- shutdown(info);
- dma = new_config.dma_channel;
- info->stat_flags &= ~ESP_STAT_NEVER_DMA;
-
- /* all ports must use the same DMA channel */
-
- spin_lock_irqsave(&info->lock, flags);
- current_async = ports;
-
- while (current_async) {
- esp_basic_init(current_async);
- current_async = current_async->next_port;
- }
- spin_unlock_irqrestore(&info->lock, flags);
- } else {
- /* DMA mode to PIO mode only */
- if (info->port.count > 1)
- return -EBUSY;
-
- shutdown(info);
- spin_lock_irqsave(&info->lock, flags);
- info->stat_flags |= ESP_STAT_NEVER_DMA;
- esp_basic_init(info);
- spin_unlock_irqrestore(&info->lock, flags);
- }
- }
-
- info->config.pio_threshold = new_config.pio_threshold;
-
- if ((new_config.flow_off != info->config.flow_off) ||
- (new_config.flow_on != info->config.flow_on)) {
- info->config.flow_off = new_config.flow_off;
- info->config.flow_on = new_config.flow_on;
-
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_SET_FLOW_LVL);
- serial_out(info, UART_ESI_CMD2, new_config.flow_off >> 8);
- serial_out(info, UART_ESI_CMD2, new_config.flow_off);
- serial_out(info, UART_ESI_CMD2, new_config.flow_on >> 8);
- serial_out(info, UART_ESI_CMD2, new_config.flow_on);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-
- if ((new_config.rx_trigger != info->config.rx_trigger) ||
- (new_config.tx_trigger != info->config.tx_trigger)) {
- info->config.rx_trigger = new_config.rx_trigger;
- info->config.tx_trigger = new_config.tx_trigger;
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_SET_TRIGGER);
- serial_out(info, UART_ESI_CMD2,
- new_config.rx_trigger >> 8);
- serial_out(info, UART_ESI_CMD2, new_config.rx_trigger);
- serial_out(info, UART_ESI_CMD2,
- new_config.tx_trigger >> 8);
- serial_out(info, UART_ESI_CMD2, new_config.tx_trigger);
- spin_unlock_irqrestore(&info->lock, flags);
- }
-
- if (new_config.rx_timeout != info->config.rx_timeout) {
- info->config.rx_timeout = new_config.rx_timeout;
- spin_lock_irqsave(&info->lock, flags);
-
- if (info->IER & UART_IER_RDI) {
- serial_out(info, UART_ESI_CMD1,
- ESI_SET_RX_TIMEOUT);
- serial_out(info, UART_ESI_CMD2,
- new_config.rx_timeout);
- }
-
- spin_unlock_irqrestore(&info->lock, flags);
- }
-
- if (!(info->port.flags & ASYNC_INITIALIZED))
- retval = startup(info);
-
- return retval;
-}
-
-/*
- * get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows an RS485 driver to be written in user space.
- */
-static int get_lsr_info(struct esp_struct *info, unsigned int __user *value)
-{
- unsigned char status;
- unsigned int result;
- unsigned long flags;
-
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT);
- status = serial_in(info, UART_ESI_STAT1);
- spin_unlock_irqrestore(&info->lock, flags);
- result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
- return put_user(result, value);
-}
-
-
-static int esp_tiocmget(struct tty_struct *tty, struct file *file)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned char control, status;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, __func__))
- return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- control = info->MCR;
-
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT);
- status = serial_in(info, UART_ESI_STAT2);
- spin_unlock_irqrestore(&info->lock, flags);
-
- return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
- | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
- | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
- | ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
- | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
- | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
-}
-
-static int esp_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, __func__))
- return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- spin_lock_irqsave(&info->lock, flags);
-
- if (set & TIOCM_RTS)
- info->MCR |= UART_MCR_RTS;
- if (set & TIOCM_DTR)
- info->MCR |= UART_MCR_DTR;
-
- if (clear & TIOCM_RTS)
- info->MCR &= ~UART_MCR_RTS;
- if (clear & TIOCM_DTR)
- info->MCR &= ~UART_MCR_DTR;
-
- serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
- serial_out(info, UART_ESI_CMD2, UART_MCR);
- serial_out(info, UART_ESI_CMD2, info->MCR);
-
- spin_unlock_irqrestore(&info->lock, flags);
- return 0;
-}
-
-/*
- * rs_break() --- routine which turns the break handling on or off
- */
-static int esp_break(struct tty_struct *tty, int break_state)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "esp_break"))
- return -EINVAL;
-
- if (break_state == -1) {
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_ISSUE_BREAK);
- serial_out(info, UART_ESI_CMD2, 0x01);
- spin_unlock_irqrestore(&info->lock, flags);
-
- /* FIXME - new style wait needed here */
- interruptible_sleep_on(&info->break_wait);
- } else {
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_ISSUE_BREAK);
- serial_out(info, UART_ESI_CMD2, 0x00);
- spin_unlock_irqrestore(&info->lock, flags);
- }
- return 0;
-}
-
-static int rs_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct esp_struct *info = tty->driver_data;
- struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser; /* user space */
- void __user *argp = (void __user *)arg;
- unsigned long flags;
- int ret;
-
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
-
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
- (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT) &&
- (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT) &&
- (cmd != TIOCGHAYESESP) && (cmd != TIOCSHAYESESP)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
- }
-
- switch (cmd) {
- case TIOCGSERIAL:
- return get_serial_info(info, argp);
- case TIOCSSERIAL:
- lock_kernel();
- ret = set_serial_info(info, argp);
- unlock_kernel();
- return ret;
- case TIOCSERGWILD:
- return put_user(0L, (unsigned long __user *)argp);
- case TIOCSERGETLSR: /* Get line status register */
- return get_lsr_info(info, argp);
- case TIOCSERSWILD:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- return 0;
- /*
- * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
- * - mask passed in arg for lines of interest
- * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
- * Caller should use TIOCGICOUNT to see which one it was
- */
- case TIOCMIWAIT:
- spin_lock_irqsave(&info->lock, flags);
- cprev = info->icount; /* note the counters on entry */
- spin_unlock_irqrestore(&info->lock, flags);
- while (1) {
- /* FIXME: convert to new style wakeup */
- interruptible_sleep_on(&info->port.delta_msr_wait);
- /* see if a signal did it */
- if (signal_pending(current))
- return -ERESTARTSYS;
- spin_lock_irqsave(&info->lock, flags);
- cnow = info->icount; /* atomic copy */
- spin_unlock_irqrestore(&info->lock, flags);
- if (cnow.rng == cprev.rng &&
- cnow.dsr == cprev.dsr &&
- cnow.dcd == cprev.dcd &&
- cnow.cts == cprev.cts)
- return -EIO; /* no change => error */
- if (((arg & TIOCM_RNG) &&
- (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) &&
- (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) &&
- (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) &&
- (cnow.cts != cprev.cts))) {
- return 0;
- }
- cprev = cnow;
- }
- /* NOTREACHED */
- /*
- * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
- * Return: write counters to the user passed counter struct
- * NB: both 1->0 and 0->1 transitions are counted except for
- * RI where only 0->1 is counted.
- */
- case TIOCGICOUNT:
- spin_lock_irqsave(&info->lock, flags);
- cnow = info->icount;
- spin_unlock_irqrestore(&info->lock, flags);
- p_cuser = argp;
- if (put_user(cnow.cts, &p_cuser->cts) ||
- put_user(cnow.dsr, &p_cuser->dsr) ||
- put_user(cnow.rng, &p_cuser->rng) ||
- put_user(cnow.dcd, &p_cuser->dcd))
- return -EFAULT;
- return 0;
- case TIOCGHAYESESP:
- return get_esp_config(info, argp);
- case TIOCSHAYESESP:
- lock_kernel();
- ret = set_esp_config(info, argp);
- unlock_kernel();
- return ret;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- change_speed(info);
-
- spin_lock_irqsave(&info->lock, flags);
-
- /* Handle transition to B0 status */
- if ((old_termios->c_cflag & CBAUD) &&
- !(tty->termios->c_cflag & CBAUD)) {
- info->MCR &= ~(UART_MCR_DTR|UART_MCR_RTS);
- serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
- serial_out(info, UART_ESI_CMD2, UART_MCR);
- serial_out(info, UART_ESI_CMD2, info->MCR);
- }
-
- /* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) &&
- (tty->termios->c_cflag & CBAUD)) {
- info->MCR |= (UART_MCR_DTR | UART_MCR_RTS);
- serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
- serial_out(info, UART_ESI_CMD2, UART_MCR);
- serial_out(info, UART_ESI_CMD2, info->MCR);
- }
-
- spin_unlock_irqrestore(&info->lock, flags);
-
- /* Handle turning of CRTSCTS */
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
- rs_start(tty);
- }
-}
-
-/*
- * ------------------------------------------------------------
- * rs_close()
- *
- * This routine is called when the serial port gets closed. First, we
- * wait for the last remaining data to be sent. Then, we unlink its
- * async structure from the interrupt chain if necessary, and we free
- * that IRQ if nothing is left in the chain.
- * ------------------------------------------------------------
- */
-static void rs_close(struct tty_struct *tty, struct file *filp)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long flags;
-
- if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
- return;
-
- spin_lock_irqsave(&info->lock, flags);
-
- if (tty_hung_up_p(filp)) {
- DBG_CNT("before DEC-hung");
- goto out;
- }
-
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG "rs_close ttys%d, count = %d\n",
- info->line, info->port.count);
-#endif
- if (tty->count == 1 && info->port.count != 1) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_DEBUG "rs_close: bad serial port count; tty->count is 1, info->port.count is %d\n", info->port.count);
- info->port.count = 1;
- }
- if (--info->port.count < 0) {
- printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n",
- info->line, info->port.count);
- info->port.count = 0;
- }
- if (info->port.count) {
- DBG_CNT("before DEC-2");
- goto out;
- }
- info->port.flags |= ASYNC_CLOSING;
-
- spin_unlock_irqrestore(&info->lock, flags);
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters.
- */
- tty->closing = 1;
- if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, info->closing_wait);
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
- /* info->IER &= ~UART_IER_RLSI; */
- info->IER &= ~UART_IER_RDI;
- info->read_status_mask &= ~UART_LSR_DR;
- if (info->port.flags & ASYNC_INITIALIZED) {
-
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
- serial_out(info, UART_ESI_CMD2, info->IER);
-
- /* disable receive timeout */
- serial_out(info, UART_ESI_CMD1, ESI_SET_RX_TIMEOUT);
- serial_out(info, UART_ESI_CMD2, 0x00);
-
- spin_unlock_irqrestore(&info->lock, flags);
-
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- rs_wait_until_sent(tty, info->timeout);
- }
- shutdown(info);
- rs_flush_buffer(tty);
- tty_ldisc_flush(tty);
- tty->closing = 0;
- info->port.tty = NULL;
-
- if (info->port.blocked_open) {
- if (info->close_delay)
- msleep_interruptible(jiffies_to_msecs(info->close_delay));
- wake_up_interruptible(&info->port.open_wait);
- }
- info->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
- wake_up_interruptible(&info->port.close_wait);
- return;
-
-out:
- spin_unlock_irqrestore(&info->lock, flags);
-}
-
-static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- struct esp_struct *info = tty->driver_data;
- unsigned long orig_jiffies, char_time;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
- return;
-
- orig_jiffies = jiffies;
- char_time = ((info->timeout - HZ / 50) / 1024) / 5;
-
- if (!char_time)
- char_time = 1;
-
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND);
- serial_out(info, UART_ESI_CMD1, ESI_GET_TX_AVAIL);
-
- while ((serial_in(info, UART_ESI_STAT1) != 0x03) ||
- (serial_in(info, UART_ESI_STAT2) != 0xff)) {
-
- spin_unlock_irqrestore(&info->lock, flags);
- msleep_interruptible(jiffies_to_msecs(char_time));
-
- if (signal_pending(current))
- return;
-
- if (timeout && time_after(jiffies, orig_jiffies + timeout))
- return;
-
- spin_lock_irqsave(&info->lock, flags);
- serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND);
- serial_out(info, UART_ESI_CMD1, ESI_GET_TX_AVAIL);
- }
- spin_unlock_irqrestore(&info->lock, flags);
- set_current_state(TASK_RUNNING);
-}
-
-/*
- * esp_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-static void esp_hangup(struct tty_struct *tty)
-{
- struct esp_struct *info = tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "esp_hangup"))
- return;
-
- rs_flush_buffer(tty);
- shutdown(info);
- info->port.count = 0;
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
- info->port.tty = NULL;
- wake_up_interruptible(&info->port.open_wait);
-}
-
-static int esp_carrier_raised(struct tty_port *port)
-{
- struct esp_struct *info = container_of(port, struct esp_struct, port);
- serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT);
- if (serial_in(info, UART_ESI_STAT2) & UART_MSR_DCD)
- return 1;
- return 0;
-}
-
-/*
- * ------------------------------------------------------------
- * esp_open() and friends
- * ------------------------------------------------------------
- */
-static int block_til_ready(struct tty_struct *tty, struct file *filp,
- struct esp_struct *info)
-{
- DECLARE_WAITQUEUE(wait, current);
- int retval;
- int do_clocal = 0;
- unsigned long flags;
- int cd;
- struct tty_port *port = &info->port;
-
- /*
- * If the device is in the middle of being closed, then block
- * until it's done, and then try again.
- */
- if (tty_hung_up_p(filp) ||
- (port->flags & ASYNC_CLOSING)) {
- if (port->flags & ASYNC_CLOSING)
- interruptible_sleep_on(&port->close_wait);
-#ifdef SERIAL_DO_RESTART
- if (port->flags & ASYNC_HUP_NOTIFY)
- return -EAGAIN;
- else
- return -ERESTARTSYS;
-#else
- return -EAGAIN;
-#endif
- }
-
- /*
- * If non-blocking mode is set, or the port is not enabled,
- * then make the check up front and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- (tty->flags & (1 << TTY_IO_ERROR))) {
- port->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
-
- if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
-
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, port->count is dropped by one, so that
- * rs_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&port->open_wait, &wait);
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG "block_til_ready before block: ttys%d, count = %d\n",
- info->line, port->count);
-#endif
- spin_lock_irqsave(&info->lock, flags);
- if (!tty_hung_up_p(filp))
- port->count--;
- port->blocked_open++;
- while (1) {
- if ((tty->termios->c_cflag & CBAUD)) {
- unsigned int scratch;
-
- serial_out(info, UART_ESI_CMD1, ESI_READ_UART);
- serial_out(info, UART_ESI_CMD2, UART_MCR);
- scratch = serial_in(info, UART_ESI_STAT1);
- serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
- serial_out(info, UART_ESI_CMD2, UART_MCR);
- serial_out(info, UART_ESI_CMD2,
- scratch | UART_MCR_DTR | UART_MCR_RTS);
- }
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) ||
- !(port->flags & ASYNC_INITIALIZED)) {
-#ifdef SERIAL_DO_RESTART
- if (port->flags & ASYNC_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
-#else
- retval = -EAGAIN;
-#endif
- break;
- }
-
- cd = tty_port_carrier_raised(port);
-
- if (!(port->flags & ASYNC_CLOSING) &&
- (do_clocal))
- break;
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG "block_til_ready blocking: ttys%d, count = %d\n",
- info->line, port->count);
-#endif
- spin_unlock_irqrestore(&info->lock, flags);
- schedule();
- spin_lock_irqsave(&info->lock, flags);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&port->open_wait, &wait);
- if (!tty_hung_up_p(filp))
- port->count++;
- port->blocked_open--;
- spin_unlock_irqrestore(&info->lock, flags);
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG "block_til_ready after blocking: ttys%d, count = %d\n",
- info->line, port->count);
-#endif
- if (retval)
- return retval;
- port->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
-}
-
-/*
- * This routine is called whenever a serial port is opened. It
- * enables interrupts for a serial port, linking in its async structure into
- * the IRQ chain. It also performs the serial-specific
- * initialization for the tty structure.
- */
-static int esp_open(struct tty_struct *tty, struct file *filp)
-{
- struct esp_struct *info;
- int retval, line;
- unsigned long flags;
-
- line = tty->index;
- if ((line < 0) || (line >= NR_PORTS))
- return -ENODEV;
-
- /* find the port in the chain */
-
- info = ports;
-
- while (info && (info->line != line))
- info = info->next_port;
-
- if (!info) {
- serial_paranoia_check(info, tty->name, "esp_open");
- return -ENODEV;
- }
-
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG "esp_open %s, count = %d\n", tty->name, info->port.count);
-#endif
- spin_lock_irqsave(&info->lock, flags);
- info->port.count++;
- tty->driver_data = info;
- info->port.tty = tty;
-
- spin_unlock_irqrestore(&info->lock, flags);
-
- /*
- * Start up serial port
- */
- retval = startup(info);
- if (retval)
- return retval;
-
- retval = block_til_ready(tty, filp, info);
- if (retval) {
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG "esp_open returning after block_til_ready with %d\n",
- retval);
-#endif
- return retval;
- }
-#ifdef SERIAL_DEBUG_OPEN
- printk(KERN_DEBUG "esp_open %s successful...", tty->name);
-#endif
- return 0;
-}
-
-/*
- * ---------------------------------------------------------------------
- * espserial_init() and friends
- *
- * espserial_init() is called at boot-time to initialize the serial driver.
- * ---------------------------------------------------------------------
- */
-
-/*
- * This routine prints out the appropriate serial driver version
- * number, and identifies which options were configured into this
- * driver.
- */
-
-static void __init show_serial_version(void)
-{
- printk(KERN_INFO "%s version %s (DMA %u)\n",
- serial_name, serial_version, dma);
-}
-
-/*
- * This routine is called by espserial_init() to initialize a specific serial
- * port.
- */
-static int autoconfig(struct esp_struct *info)
-{
- int port_detected = 0;
- unsigned long flags;
-
- if (!request_region(info->io_port, REGION_SIZE, "esp serial"))
- return -EIO;
-
- spin_lock_irqsave(&info->lock, flags);
- /*
- * Check for ESP card
- */
-
- if (serial_in(info, UART_ESI_BASE) == 0xf3) {
- serial_out(info, UART_ESI_CMD1, 0x00);
- serial_out(info, UART_ESI_CMD1, 0x01);
-
- if ((serial_in(info, UART_ESI_STAT2) & 0x70) == 0x20) {
- port_detected = 1;
-
- if (!(info->irq)) {
- serial_out(info, UART_ESI_CMD1, 0x02);
-
- if (serial_in(info, UART_ESI_STAT1) & 0x01)
- info->irq = 3;
- else
- info->irq = 4;
- }
-
-
- /* put card in enhanced mode */
- /* this prevents access through */
- /* the "old" IO ports */
- esp_basic_init(info);
-
- /* clear out MCR */
- serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
- serial_out(info, UART_ESI_CMD2, UART_MCR);
- serial_out(info, UART_ESI_CMD2, 0x00);
- }
- }
- if (!port_detected)
- release_region(info->io_port, REGION_SIZE);
-
- spin_unlock_irqrestore(&info->lock, flags);
- return (port_detected);
-}
-
-static const struct tty_operations esp_ops = {
- .open = esp_open,
- .close = rs_close,
- .write = rs_write,
- .put_char = rs_put_char,
- .flush_chars = rs_flush_chars,
- .write_room = rs_write_room,
- .chars_in_buffer = rs_chars_in_buffer,
- .flush_buffer = rs_flush_buffer,
- .ioctl = rs_ioctl,
- .throttle = rs_throttle,
- .unthrottle = rs_unthrottle,
- .set_termios = rs_set_termios,
- .stop = rs_stop,
- .start = rs_start,
- .hangup = esp_hangup,
- .break_ctl = esp_break,
- .wait_until_sent = rs_wait_until_sent,
- .tiocmget = esp_tiocmget,
- .tiocmset = esp_tiocmset,
-};
-
-static const struct tty_port_operations esp_port_ops = {
- .esp_carrier_raised,
-};
-
-/*
- * The serial driver boot-time initialization code!
- */
-static int __init espserial_init(void)
-{
- int i, offset;
- struct esp_struct *info;
- struct esp_struct *last_primary = NULL;
- int esp[] = { 0x100, 0x140, 0x180, 0x200, 0x240, 0x280, 0x300, 0x380 };
-
- esp_driver = alloc_tty_driver(NR_PORTS);
- if (!esp_driver)
- return -ENOMEM;
-
- for (i = 0; i < NR_PRIMARY; i++) {
- if (irq[i] != 0) {
- if ((irq[i] < 2) || (irq[i] > 15) || (irq[i] == 6) ||
- (irq[i] == 8) || (irq[i] == 13))
- irq[i] = 0;
- else if (irq[i] == 2)
- irq[i] = 9;
- }
- }
-
- if ((dma != 1) && (dma != 3))
- dma = 0;
-
- if ((rx_trigger < 1) || (rx_trigger > 1023))
- rx_trigger = 768;
-
- if ((tx_trigger < 1) || (tx_trigger > 1023))
- tx_trigger = 768;
-
- if ((flow_off < 1) || (flow_off > 1023))
- flow_off = 1016;
-
- if ((flow_on < 1) || (flow_on > 1023))
- flow_on = 944;
-
- if ((rx_timeout < 0) || (rx_timeout > 255))
- rx_timeout = 128;
-
- if (flow_on >= flow_off)
- flow_on = flow_off - 1;
-
- show_serial_version();
-
- /* Initialize the tty_driver structure */
-
- esp_driver->owner = THIS_MODULE;
- esp_driver->name = "ttyP";
- esp_driver->major = ESP_IN_MAJOR;
- esp_driver->minor_start = 0;
- esp_driver->type = TTY_DRIVER_TYPE_SERIAL;
- esp_driver->subtype = SERIAL_TYPE_NORMAL;
- esp_driver->init_termios = tty_std_termios;
- esp_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- esp_driver->init_termios.c_ispeed = 9600;
- esp_driver->init_termios.c_ospeed = 9600;
- esp_driver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(esp_driver, &esp_ops);
- if (tty_register_driver(esp_driver)) {
- printk(KERN_ERR "Couldn't register esp serial driver");
- put_tty_driver(esp_driver);
- return 1;
- }
-
- info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
-
- if (!info) {
- printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n");
- tty_unregister_driver(esp_driver);
- put_tty_driver(esp_driver);
- return 1;
- }
-
- spin_lock_init(&info->lock);
- /* rx_trigger, tx_trigger are needed by autoconfig */
- info->config.rx_trigger = rx_trigger;
- info->config.tx_trigger = tx_trigger;
-
- i = 0;
- offset = 0;
-
- do {
- tty_port_init(&info->port);
- info->port.ops = &esp_port_ops;
- info->io_port = esp[i] + offset;
- info->irq = irq[i];
- info->line = (i * 8) + (offset / 8);
-
- if (!autoconfig(info)) {
- i++;
- offset = 0;
- continue;
- }
-
- info->custom_divisor = (divisor[i] >> (offset / 2)) & 0xf;
- info->port.flags = STD_COM_FLAGS;
- if (info->custom_divisor)
- info->port.flags |= ASYNC_SPD_CUST;
- info->magic = ESP_MAGIC;
- info->close_delay = 5*HZ/10;
- info->closing_wait = 30*HZ;
- info->config.rx_timeout = rx_timeout;
- info->config.flow_on = flow_on;
- info->config.flow_off = flow_off;
- info->config.pio_threshold = pio_threshold;
- info->next_port = ports;
- init_waitqueue_head(&info->break_wait);
- ports = info;
- printk(KERN_INFO "ttyP%d at 0x%04x (irq = %d) is an ESP ",
- info->line, info->io_port, info->irq);
-
- if (info->line % 8) {
- printk("secondary port\n");
- /* 8 port cards can't do DMA */
- info->stat_flags |= ESP_STAT_NEVER_DMA;
-
- if (last_primary)
- last_primary->stat_flags |= ESP_STAT_NEVER_DMA;
- } else {
- printk("primary port\n");
- last_primary = info;
- irq[i] = info->irq;
- }
-
- if (!dma)
- info->stat_flags |= ESP_STAT_NEVER_DMA;
-
- info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
- if (!info) {
- printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n");
- /* allow use of the already detected ports */
- return 0;
- }
-
- spin_lock_init(&info->lock);
- /* rx_trigger, tx_trigger are needed by autoconfig */
- info->config.rx_trigger = rx_trigger;
- info->config.tx_trigger = tx_trigger;
-
- if (offset == 56) {
- i++;
- offset = 0;
- } else {
- offset += 8;
- }
- } while (i < NR_PRIMARY);
-
- /* free the last port memory allocation */
- kfree(info);
-
- return 0;
-}
-
-static void __exit espserial_exit(void)
-{
- int e1;
- struct esp_struct *temp_async;
- struct esp_pio_buffer *pio_buf;
-
- e1 = tty_unregister_driver(esp_driver);
- if (e1)
- printk(KERN_ERR "esp: failed to unregister driver (%d)\n", e1);
- put_tty_driver(esp_driver);
-
- while (ports) {
- if (ports->io_port)
- release_region(ports->io_port, REGION_SIZE);
- temp_async = ports->next_port;
- kfree(ports);
- ports = temp_async;
- }
-
- if (dma_buffer)
- free_pages((unsigned long)dma_buffer,
- get_order(DMA_BUFFER_SZ));
-
- while (free_pio_buf) {
- pio_buf = free_pio_buf->next;
- kfree(free_pio_buf);
- free_pio_buf = pio_buf;
- }
-}
-
-module_init(espserial_init);
-module_exit(espserial_exit);
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index a632f25f144..416d3423150 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -832,6 +832,7 @@ int hvc_remove(struct hvc_struct *hp)
tty_hangup(tty);
return 0;
}
+EXPORT_SYMBOL_GPL(hvc_remove);
/* Driver initialization: called as soon as someone uses hvc_alloc(). */
static int hvc_init(void)
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index b8a5d654d3d..fe62bd0e17b 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -931,7 +931,7 @@ static struct hv_ops hvc_iucv_ops = {
};
/* Suspend / resume device operations */
-static struct dev_pm_ops hvc_iucv_pm_ops = {
+static const struct dev_pm_ops hvc_iucv_pm_ops = {
.freeze = hvc_iucv_pm_freeze,
.thaw = hvc_iucv_pm_restore_thaw,
.restore = hvc_iucv_pm_restore_thaw,
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index a6ee32b599a..b1a71638c77 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -25,6 +25,8 @@
#include <linux/types.h>
#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
#include <xen/page.h>
#include <xen/events.h>
#include <xen/interface/io/console.h>
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 80704875794..cf82fedae09 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -370,7 +370,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
return SI_SM_IDLE;
case KCS_START_OP:
- if (state != KCS_IDLE) {
+ if (state != KCS_IDLE_STATE) {
start_error_recovery(kcs,
"State machine not idle at start");
break;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index d2e698096ac..679cd08b80b 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -64,6 +64,7 @@
#include <linux/dmi.h>
#include <linux/string.h>
#include <linux/ctype.h>
+#include <linux/pnp.h>
#ifdef CONFIG_PPC_OF
#include <linux/of_device.h>
@@ -1919,7 +1920,7 @@ struct SPMITable {
s8 spmi_id[1]; /* A '\0' terminated array starts here. */
};
-static __devinit int try_init_acpi(struct SPMITable *spmi)
+static __devinit int try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
u8 addr_space;
@@ -1940,7 +1941,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
return -ENOMEM;
}
- info->addr_source = "ACPI";
+ info->addr_source = "SPMI";
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -2002,7 +2003,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
return 0;
}
-static __devinit void acpi_find_bmc(void)
+static __devinit void spmi_find_bmc(void)
{
acpi_status status;
struct SPMITable *spmi;
@@ -2020,9 +2021,106 @@ static __devinit void acpi_find_bmc(void)
if (status != AE_OK)
return;
- try_init_acpi(spmi);
+ try_init_spmi(spmi);
}
}
+
+static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ struct acpi_device *acpi_dev;
+ struct smi_info *info;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+
+ acpi_dev = pnp_acpi_device(dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->addr_source = "ACPI";
+
+ handle = acpi_dev->handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ goto err_free;
+
+ switch (tmp) {
+ case 1:
+ info->si_type = SI_KCS;
+ break;
+ case 2:
+ info->si_type = SI_SMIC;
+ break;
+ case 3:
+ info->si_type = SI_BT;
+ break;
+ default:
+ dev_info(&dev->dev, "unknown interface type %lld\n", tmp);
+ goto err_free;
+ }
+
+ if (pnp_port_valid(dev, 0)) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ info->io.addr_data = pnp_port_start(dev, 0);
+ } else if (pnp_mem_valid(dev, 0)) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ info->io.addr_data = pnp_mem_start(dev, 0);
+ } else {
+ dev_err(&dev->dev, "no I/O or memory address\n");
+ goto err_free;
+ }
+
+ info->io.regspacing = DEFAULT_REGSPACING;
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ info->irq = tmp;
+ info->irq_setup = acpi_gpe_irq_setup;
+ } else if (pnp_irq_valid(dev, 0)) {
+ info->irq = pnp_irq(dev, 0);
+ info->irq_setup = std_irq_setup;
+ }
+
+ info->dev = &acpi_dev->dev;
+ pnp_set_drvdata(dev, info);
+
+ return try_smi_init(info);
+
+err_free:
+ kfree(info);
+ return -EINVAL;
+}
+
+static void __devexit ipmi_pnp_remove(struct pnp_dev *dev)
+{
+ struct smi_info *info = pnp_get_drvdata(dev);
+
+ cleanup_one_si(info);
+}
+
+static const struct pnp_device_id pnp_dev_table[] = {
+ {"IPI0001", 0},
+ {"", 0},
+};
+
+static struct pnp_driver ipmi_pnp_driver = {
+ .name = DEVICE_NAME,
+ .probe = ipmi_pnp_probe,
+ .remove = __devexit_p(ipmi_pnp_remove),
+ .id_table = pnp_dev_table,
+};
#endif
#ifdef CONFIG_DMI
@@ -2202,7 +2300,6 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
int rv;
int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
struct smi_info *info;
- int first_reg_offset = 0;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -2241,9 +2338,6 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
info->addr_source_cleanup = ipmi_pci_cleanup;
info->addr_source_data = pdev;
- if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
- first_reg_offset = 1;
-
if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
info->io_setup = port_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
@@ -3108,7 +3202,10 @@ static __devinit int init_ipmi_si(void)
#endif
#ifdef CONFIG_ACPI
- acpi_find_bmc();
+ spmi_find_bmc();
+#endif
+#ifdef CONFIG_PNP
+ pnp_register_driver(&ipmi_pnp_driver);
#endif
#ifdef CONFIG_PCI
@@ -3233,6 +3330,9 @@ static __exit void cleanup_ipmi_si(void)
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
#endif
+#ifdef CONFIG_PNP
+ pnp_unregister_driver(&ipmi_pnp_driver);
+#endif
#ifdef CONFIG_PPC_OF
of_unregister_platform_driver(&ipmi_of_platform_driver);
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 426bfdd7f3e..300d5bd6cd0 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -793,35 +793,30 @@ static inline void isicom_setup_board(struct isi_board *bp)
{
int channel;
struct isi_port *port;
- unsigned long flags;
- spin_lock_irqsave(&bp->card_lock, flags);
- if (bp->status & BOARD_ACTIVE) {
- spin_unlock_irqrestore(&bp->card_lock, flags);
- return;
+ bp->count++;
+ if (!(bp->status & BOARD_INIT)) {
+ port = bp->ports;
+ for (channel = 0; channel < bp->port_count; channel++, port++)
+ drop_dtr_rts(port);
}
- port = bp->ports;
- bp->status |= BOARD_ACTIVE;
- for (channel = 0; channel < bp->port_count; channel++, port++)
- drop_dtr_rts(port);
- spin_unlock_irqrestore(&bp->card_lock, flags);
+ bp->status |= BOARD_ACTIVE | BOARD_INIT;
}
-static int isicom_setup_port(struct tty_struct *tty)
+/* Activate and thus setup board are protected from races against shutdown
+ by the tty_port mutex */
+
+static int isicom_activate(struct tty_port *tport, struct tty_struct *tty)
{
- struct isi_port *port = tty->driver_data;
+ struct isi_port *port = container_of(tport, struct isi_port, port);
struct isi_board *card = port->card;
unsigned long flags;
- if (port->port.flags & ASYNC_INITIALIZED)
- return 0;
- if (tty_port_alloc_xmit_buf(&port->port) < 0)
+ if (tty_port_alloc_xmit_buf(tport) < 0)
return -ENOMEM;
spin_lock_irqsave(&card->card_lock, flags);
- clear_bit(TTY_IO_ERROR, &tty->flags);
- if (port->port.count == 1)
- card->count++;
+ isicom_setup_board(card);
port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
@@ -832,9 +827,7 @@ static int isicom_setup_port(struct tty_struct *tty)
outw(((ISICOM_KILLTX | ISICOM_KILLRX) << 8) | 0x06, card->base);
InterruptTheCard(card->base);
}
-
isicom_config_port(tty);
- port->port.flags |= ASYNC_INITIALIZED;
spin_unlock_irqrestore(&card->card_lock, flags);
return 0;
@@ -871,85 +864,37 @@ static struct tty_port *isicom_find_port(struct tty_struct *tty)
return &port->port;
}
-
+
static int isicom_open(struct tty_struct *tty, struct file *filp)
{
struct isi_port *port;
struct isi_board *card;
struct tty_port *tport;
- int error = 0;
tport = isicom_find_port(tty);
if (tport == NULL)
return -ENODEV;
port = container_of(tport, struct isi_port, port);
card = &isi_card[BOARD(tty->index)];
- isicom_setup_board(card);
- /* FIXME: locking on port.count etc */
- port->port.count++;
- tty->driver_data = port;
- tty_port_tty_set(&port->port, tty);
- /* FIXME: Locking on Initialized flag */
- if (!test_bit(ASYNCB_INITIALIZED, &tport->flags))
- error = isicom_setup_port(tty);
- if (error == 0)
- error = tty_port_block_til_ready(&port->port, tty, filp);
- return error;
+ return tty_port_open(tport, tty, filp);
}
/* close et all */
-static inline void isicom_shutdown_board(struct isi_board *bp)
-{
- if (bp->status & BOARD_ACTIVE)
- bp->status &= ~BOARD_ACTIVE;
-}
-
/* card->lock HAS to be held */
static void isicom_shutdown_port(struct isi_port *port)
{
struct isi_board *card = port->card;
- struct tty_struct *tty;
-
- tty = tty_port_tty_get(&port->port);
-
- if (!(port->port.flags & ASYNC_INITIALIZED)) {
- tty_kref_put(tty);
- return;
- }
-
- tty_port_free_xmit_buf(&port->port);
- port->port.flags &= ~ASYNC_INITIALIZED;
- /* 3rd October 2000 : Vinayak P Risbud */
- tty_port_tty_set(&port->port, NULL);
-
- /*Fix done by Anil .S on 30-04-2001
- remote login through isi port has dtr toggle problem
- due to which the carrier drops before the password prompt
- appears on the remote end. Now we drop the dtr only if the
- HUPCL(Hangup on close) flag is set for the tty*/
-
- if (C_HUPCL(tty))
- /* drop dtr on this port */
- drop_dtr(port);
-
- /* any other port uninits */
- if (tty)
- set_bit(TTY_IO_ERROR, &tty->flags);
if (--card->count < 0) {
pr_dbg("isicom_shutdown_port: bad board(0x%lx) count %d.\n",
card->base, card->count);
card->count = 0;
}
-
- /* last port was closed, shutdown that boad too */
- if (C_HUPCL(tty)) {
- if (!card->count)
- isicom_shutdown_board(card);
- }
- tty_kref_put(tty);
+ /* last port was closed, shutdown that board too */
+ if (!card->count)
+ card->status &= BOARD_ACTIVE;
}
static void isicom_flush_buffer(struct tty_struct *tty)
@@ -968,7 +913,7 @@ static void isicom_flush_buffer(struct tty_struct *tty)
tty_wakeup(tty);
}
-static void isicom_close_port(struct tty_port *port)
+static void isicom_shutdown(struct tty_port *port)
{
struct isi_port *ip = container_of(port, struct isi_port, port);
struct isi_board *card = ip->card;
@@ -977,12 +922,11 @@ static void isicom_close_port(struct tty_port *port)
/* indicate to the card that no more data can be received
on this port */
spin_lock_irqsave(&card->card_lock, flags);
- if (port->flags & ASYNC_INITIALIZED) {
- card->port_status &= ~(1 << ip->channel);
- outw(card->port_status, card->base + 0x02);
- }
+ card->port_status &= ~(1 << ip->channel);
+ outw(card->port_status, card->base + 0x02);
isicom_shutdown_port(ip);
spin_unlock_irqrestore(&card->card_lock, flags);
+ tty_port_free_xmit_buf(port);
}
static void isicom_close(struct tty_struct *tty, struct file *filp)
@@ -991,12 +935,7 @@ static void isicom_close(struct tty_struct *tty, struct file *filp)
struct tty_port *port = &ip->port;
if (isicom_paranoia_check(ip, tty->name, "isicom_close"))
return;
-
- if (tty_port_close_start(port, tty, filp) == 0)
- return;
- isicom_close_port(port);
- isicom_flush_buffer(tty);
- tty_port_close_end(port, tty);
+ tty_port_close(port, tty, filp);
}
/* write et all */
@@ -1326,15 +1265,9 @@ static void isicom_start(struct tty_struct *tty)
static void isicom_hangup(struct tty_struct *tty)
{
struct isi_port *port = tty->driver_data;
- unsigned long flags;
if (isicom_paranoia_check(port, tty->name, "isicom_hangup"))
return;
-
- spin_lock_irqsave(&port->card->card_lock, flags);
- isicom_shutdown_port(port);
- spin_unlock_irqrestore(&port->card->card_lock, flags);
-
tty_port_hangup(&port->port);
}
@@ -1367,6 +1300,8 @@ static const struct tty_operations isicom_ops = {
static const struct tty_port_operations isicom_port_ops = {
.carrier_raised = isicom_carrier_raised,
.dtr_rts = isicom_dtr_rts,
+ .activate = isicom_activate,
+ .shutdown = isicom_shutdown,
};
static int __devinit reset_card(struct pci_dev *pdev,
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 402838f4083..4cd6c527ee4 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -213,7 +213,6 @@ static int stli_shared;
* with the slave. Most of them need to be updated atomically, so always
* use the bit setting operations (unless protected by cli/sti).
*/
-#define ST_INITIALIZING 1
#define ST_OPENING 2
#define ST_CLOSING 3
#define ST_CMDING 4
@@ -621,7 +620,7 @@ static int stli_brdinit(struct stlibrd *brdp);
static int stli_startbrd(struct stlibrd *brdp);
static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp);
static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp);
-static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg);
+static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg);
static void stli_brdpoll(struct stlibrd *brdp, cdkhdr_t __iomem *hdrp);
static void stli_poll(unsigned long arg);
static int stli_hostcmd(struct stlibrd *brdp, struct stliport *portp);
@@ -704,7 +703,7 @@ static const struct file_operations stli_fsiomem = {
.owner = THIS_MODULE,
.read = stli_memread,
.write = stli_memwrite,
- .ioctl = stli_memioctl,
+ .unlocked_ioctl = stli_memioctl,
};
/*****************************************************************************/
@@ -783,13 +782,32 @@ static int stli_parsebrd(struct stlconf *confp, char **argp)
/*****************************************************************************/
+/*
+ * On the first open of the device setup the port hardware, and
+ * initialize the per port data structure. Since initializing the port
+ * requires several commands to the board we will need to wait for any
+ * other open that is already initializing the port.
+ *
+ * Locking: protected by the port mutex.
+ */
+
+static int stli_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct stliport *portp = container_of(port, struct stliport, port);
+ struct stlibrd *brdp = stli_brds[portp->brdnr];
+ int rc;
+
+ if ((rc = stli_initopen(tty, brdp, portp)) >= 0)
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ wake_up_interruptible(&portp->raw_wait);
+ return rc;
+}
+
static int stli_open(struct tty_struct *tty, struct file *filp)
{
struct stlibrd *brdp;
struct stliport *portp;
- struct tty_port *port;
unsigned int minordev, brdnr, portnr;
- int rc;
minordev = tty->index;
brdnr = MINOR2BRD(minordev);
@@ -809,95 +827,56 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
return -ENODEV;
if (portp->devnr < 1)
return -ENODEV;
- port = &portp->port;
-
-/*
- * On the first open of the device setup the port hardware, and
- * initialize the per port data structure. Since initializing the port
- * requires several commands to the board we will need to wait for any
- * other open that is already initializing the port.
- *
- * Review - locking
- */
- tty_port_tty_set(port, tty);
- tty->driver_data = portp;
- port->count++;
-
- wait_event_interruptible(portp->raw_wait,
- !test_bit(ST_INITIALIZING, &portp->state));
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if ((portp->port.flags & ASYNC_INITIALIZED) == 0) {
- set_bit(ST_INITIALIZING, &portp->state);
- if ((rc = stli_initopen(tty, brdp, portp)) >= 0) {
- /* Locking */
- port->flags |= ASYNC_INITIALIZED;
- clear_bit(TTY_IO_ERROR, &tty->flags);
- }
- clear_bit(ST_INITIALIZING, &portp->state);
- wake_up_interruptible(&portp->raw_wait);
- if (rc < 0)
- return rc;
- }
- return tty_port_block_til_ready(&portp->port, tty, filp);
+ return tty_port_open(&portp->port, tty, filp);
}
+
/*****************************************************************************/
-static void stli_close(struct tty_struct *tty, struct file *filp)
+static void stli_shutdown(struct tty_port *port)
{
struct stlibrd *brdp;
- struct stliport *portp;
- struct tty_port *port;
+ unsigned long ftype;
unsigned long flags;
+ struct stliport *portp = container_of(port, struct stliport, port);
- portp = tty->driver_data;
- if (portp == NULL)
+ if (portp->brdnr >= stli_nrbrds)
return;
- port = &portp->port;
-
- if (tty_port_close_start(port, tty, filp) == 0)
+ brdp = stli_brds[portp->brdnr];
+ if (brdp == NULL)
return;
-/*
- * May want to wait for data to drain before closing. The BUSY flag
- * keeps track of whether we are still transmitting or not. It is
- * updated by messages from the slave - indicating when all chars
- * really have drained.
- */
- spin_lock_irqsave(&stli_lock, flags);
- if (tty == stli_txcooktty)
- stli_flushchars(tty);
- spin_unlock_irqrestore(&stli_lock, flags);
-
- /* We end up doing this twice for the moment. This needs looking at
- eventually. Note we still use portp->closing_wait as a result */
- if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, portp->closing_wait);
+ /*
+ * May want to wait for data to drain before closing. The BUSY
+ * flag keeps track of whether we are still transmitting or not.
+ * It is updated by messages from the slave - indicating when all
+ * chars really have drained.
+ */
- /* FIXME: port locking here needs attending to */
- port->flags &= ~ASYNC_INITIALIZED;
+ if (!test_bit(ST_CLOSING, &portp->state))
+ stli_rawclose(brdp, portp, 0, 0);
- brdp = stli_brds[portp->brdnr];
- stli_rawclose(brdp, portp, 0, 0);
- if (tty->termios->c_cflag & HUPCL) {
- stli_mkasysigs(&portp->asig, 0, 0);
- if (test_bit(ST_CMDING, &portp->state))
- set_bit(ST_DOSIGS, &portp->state);
- else
- stli_sendcmd(brdp, portp, A_SETSIGNALS, &portp->asig,
- sizeof(asysigs_t), 0);
- }
+ spin_lock_irqsave(&stli_lock, flags);
clear_bit(ST_TXBUSY, &portp->state);
clear_bit(ST_RXSTOP, &portp->state);
- set_bit(TTY_IO_ERROR, &tty->flags);
- tty_ldisc_flush(tty);
- set_bit(ST_DOFLUSHRX, &portp->state);
- stli_flushbuffer(tty);
+ spin_unlock_irqrestore(&stli_lock, flags);
- tty_port_close_end(port, tty);
- tty_port_tty_set(port, NULL);
+ ftype = FLUSHTX | FLUSHRX;
+ stli_cmdwait(brdp, portp, A_FLUSH, &ftype, sizeof(u32), 0);
+}
+
+static void stli_close(struct tty_struct *tty, struct file *filp)
+{
+ struct stliport *portp = tty->driver_data;
+ unsigned long flags;
+ if (portp == NULL)
+ return;
+ spin_lock_irqsave(&stli_lock, flags);
+ /* Flush any internal buffering out first */
+ if (tty == stli_txcooktty)
+ stli_flushchars(tty);
+ spin_unlock_irqrestore(&stli_lock, flags);
+ tty_port_close(&portp->port, tty, filp);
}
/*****************************************************************************/
@@ -1724,6 +1703,7 @@ static void stli_start(struct tty_struct *tty)
/*****************************************************************************/
+
/*
* Hangup this port. This is pretty much like closing the port, only
* a little more brutal. No waiting for data to drain. Shutdown the
@@ -1733,47 +1713,8 @@ static void stli_start(struct tty_struct *tty)
static void stli_hangup(struct tty_struct *tty)
{
- struct stliport *portp;
- struct stlibrd *brdp;
- struct tty_port *port;
- unsigned long flags;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- if (portp->brdnr >= stli_nrbrds)
- return;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return;
- port = &portp->port;
-
- spin_lock_irqsave(&port->lock, flags);
- port->flags &= ~ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&port->lock, flags);
-
- if (!test_bit(ST_CLOSING, &portp->state))
- stli_rawclose(brdp, portp, 0, 0);
-
- spin_lock_irqsave(&stli_lock, flags);
- if (tty->termios->c_cflag & HUPCL) {
- stli_mkasysigs(&portp->asig, 0, 0);
- if (test_bit(ST_CMDING, &portp->state)) {
- set_bit(ST_DOSIGS, &portp->state);
- set_bit(ST_DOFLUSHTX, &portp->state);
- set_bit(ST_DOFLUSHRX, &portp->state);
- } else {
- stli_sendcmd(brdp, portp, A_SETSIGNALSF,
- &portp->asig, sizeof(asysigs_t), 0);
- }
- }
-
- clear_bit(ST_TXBUSY, &portp->state);
- clear_bit(ST_RXSTOP, &portp->state);
- set_bit(TTY_IO_ERROR, &tty->flags);
- spin_unlock_irqrestore(&stli_lock, flags);
-
- tty_port_hangup(port);
+ struct stliport *portp = tty->driver_data;
+ tty_port_hangup(&portp->port);
}
/*****************************************************************************/
@@ -4311,7 +4252,7 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
* reset it, and start/stop it.
*/
-static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg)
+static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
struct stlibrd *brdp;
int brdnr, rc, done;
@@ -4356,7 +4297,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
* Now handle the board specific ioctls. These all depend on the
* minor number of the device they were called from.
*/
- brdnr = iminor(ip);
+ brdnr = iminor(fp->f_dentry->d_inode);
if (brdnr >= STL_MAXBRDS)
return -ENODEV;
brdp = stli_brds[brdnr];
@@ -4420,6 +4361,8 @@ static const struct tty_operations stli_ops = {
static const struct tty_port_operations stli_port_ops = {
.carrier_raised = stli_carrier_raised,
.dtr_rts = stli_dtr_rts,
+ .activate = stli_activate,
+ .shutdown = stli_shutdown,
};
/*****************************************************************************/
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 5619007e7e0..f706b1dffdb 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -233,7 +233,8 @@ int setkeycode(unsigned int scancode, unsigned int keycode)
}
/*
- * Making beeps and bells.
+ * Making beeps and bells. Note that we prefer beeps to bells, but when
+ * shutting the sound off we do both.
*/
static int kd_sound_helper(struct input_handle *handle, void *data)
@@ -242,9 +243,12 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
struct input_dev *dev = handle->dev;
if (test_bit(EV_SND, dev->evbit)) {
- if (test_bit(SND_TONE, dev->sndbit))
+ if (test_bit(SND_TONE, dev->sndbit)) {
input_inject_event(handle, EV_SND, SND_TONE, *hz);
- if (test_bit(SND_BELL, handle->dev->sndbit))
+ if (*hz)
+ return 0;
+ }
+ if (test_bit(SND_BELL, dev->sndbit))
input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0);
}
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index e444c2dba16..938a3a27388 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -127,6 +127,7 @@
#include <linux/wait.h>
#include <linux/jiffies.h>
#include <linux/smp_lock.h>
+#include <linux/compat.h>
#include <linux/parport.h>
#undef LP_STATS
@@ -571,13 +572,11 @@ static int lp_release(struct inode * inode, struct file * file)
return 0;
}
-static int lp_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
+ unsigned long arg, void __user *argp)
{
- unsigned int minor = iminor(inode);
int status;
int retval = 0;
- void __user *argp = (void __user *)arg;
#ifdef LP_DEBUG
printk(KERN_DEBUG "lp%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
@@ -587,9 +586,6 @@ static int lp_ioctl(struct inode *inode, struct file *file,
if ((LP_F(minor) & LP_EXIST) == 0)
return -ENODEV;
switch ( cmd ) {
- struct timeval par_timeout;
- long to_jiffies;
-
case LPTIME:
LP_TIME(minor) = arg * HZ/100;
break;
@@ -652,34 +648,101 @@ static int lp_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
break;
- case LPSETTIMEOUT:
- if (copy_from_user (&par_timeout, argp,
- sizeof (struct timeval))) {
- return -EFAULT;
- }
- /* Convert to jiffies, place in lp_table */
- if ((par_timeout.tv_sec < 0) ||
- (par_timeout.tv_usec < 0)) {
- return -EINVAL;
- }
- to_jiffies = DIV_ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
- to_jiffies += par_timeout.tv_sec * (long) HZ;
- if (to_jiffies <= 0) {
- return -EINVAL;
- }
- lp_table[minor].timeout = to_jiffies;
- break;
-
default:
retval = -EINVAL;
}
return retval;
}
+static int lp_set_timeout(unsigned int minor, struct timeval *par_timeout)
+{
+ long to_jiffies;
+
+ /* Convert to jiffies, place in lp_table */
+ if ((par_timeout->tv_sec < 0) ||
+ (par_timeout->tv_usec < 0)) {
+ return -EINVAL;
+ }
+ to_jiffies = DIV_ROUND_UP(par_timeout->tv_usec, 1000000/HZ);
+ to_jiffies += par_timeout->tv_sec * (long) HZ;
+ if (to_jiffies <= 0) {
+ return -EINVAL;
+ }
+ lp_table[minor].timeout = to_jiffies;
+ return 0;
+}
+
+static long lp_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int minor;
+ struct timeval par_timeout;
+ int ret;
+
+ minor = iminor(file->f_path.dentry->d_inode);
+ lock_kernel();
+ switch (cmd) {
+ case LPSETTIMEOUT:
+ if (copy_from_user(&par_timeout, (void __user *)arg,
+ sizeof (struct timeval))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = lp_set_timeout(minor, &par_timeout);
+ break;
+ default:
+ ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg);
+ break;
+ }
+ unlock_kernel();
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long lp_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int minor;
+ struct timeval par_timeout;
+ struct compat_timeval __user *tc;
+ int ret;
+
+ minor = iminor(file->f_path.dentry->d_inode);
+ lock_kernel();
+ switch (cmd) {
+ case LPSETTIMEOUT:
+ tc = compat_ptr(arg);
+ if (get_user(par_timeout.tv_sec, &tc->tv_sec) ||
+ get_user(par_timeout.tv_usec, &tc->tv_usec)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = lp_set_timeout(minor, &par_timeout);
+ break;
+#ifdef LP_STATS
+ case LPGETSTATS:
+ /* FIXME: add an implementation if you set LP_STATS */
+ ret = -EINVAL;
+ break;
+#endif
+ default:
+ ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg));
+ break;
+ }
+ unlock_kernel();
+
+ return ret;
+}
+#endif
+
static const struct file_operations lp_fops = {
.owner = THIS_MODULE,
.write = lp_write,
- .ioctl = lp_ioctl,
+ .unlocked_ioctl = lp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lp_compat_ioctl,
+#endif
.open = lp_open,
.release = lp_release,
#ifdef CONFIG_PARPORT_1284
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 30eff80fed6..be832b6f827 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -34,6 +34,16 @@
# include <linux/efi.h>
#endif
+static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+{
+ unsigned long sz;
+
+ sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
+
+ return min(sz, size);
+}
+
/*
* Architectures vary in how they handle caching for addresses
* outside of main memory.
@@ -43,7 +53,7 @@ static inline int uncached_access(struct file *file, unsigned long addr)
{
#if defined(CONFIG_IA64)
/*
- * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
+ * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases.
*/
return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
#elif defined(CONFIG_MIPS)
@@ -56,9 +66,9 @@ static inline int uncached_access(struct file *file, unsigned long addr)
#else
/*
* Accessing memory above the top the kernel knows about or through a file pointer
- * that was marked O_SYNC will be done non-cached.
+ * that was marked O_DSYNC will be done non-cached.
*/
- if (file->f_flags & O_SYNC)
+ if (file->f_flags & O_DSYNC)
return 1;
return addr >= __pa(high_memory);
#endif
@@ -126,9 +136,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
- sz = PAGE_SIZE - p;
- if (sz > count)
- sz = count;
+ sz = size_inside_page(p, count);
if (sz > 0) {
if (clear_user(buf, sz))
return -EFAULT;
@@ -141,15 +149,9 @@ static ssize_t read_mem(struct file * file, char __user * buf,
#endif
while (count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
+ unsigned long remaining;
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
return -EPERM;
@@ -163,12 +165,10 @@ static ssize_t read_mem(struct file * file, char __user * buf,
if (!ptr)
return -EFAULT;
- if (copy_to_user(buf, ptr, sz)) {
- unxlate_dev_mem_ptr(p, ptr);
- return -EFAULT;
- }
-
+ remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
+ if (remaining)
+ return -EFAULT;
buf += sz;
p += sz;
@@ -196,9 +196,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE - p;
- if (sz > count)
- sz = count;
+ sz = size_inside_page(p, count);
/* Hmm. Do something? */
buf += sz;
p += sz;
@@ -208,15 +206,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
#endif
while (count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
return -EPERM;
@@ -234,16 +224,14 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
}
copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
- unxlate_dev_mem_ptr(p, ptr);
if (written)
break;
return -EFAULT;
}
- unxlate_dev_mem_ptr(p, ptr);
-
buf += sz;
p += sz;
count -= sz;
@@ -417,27 +405,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE && low_count > 0) {
- size_t tmp = PAGE_SIZE - p;
- if (tmp > low_count) tmp = low_count;
- if (clear_user(buf, tmp))
+ sz = size_inside_page(p, low_count);
+ if (clear_user(buf, sz))
return -EFAULT;
- buf += tmp;
- p += tmp;
- read += tmp;
- low_count -= tmp;
- count -= tmp;
+ buf += sz;
+ p += sz;
+ read += sz;
+ low_count -= sz;
+ count -= sz;
}
#endif
while (low_count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, low_count);
+ sz = size_inside_page(p, low_count);
/*
* On ia64 if a page has been mapped somewhere as
@@ -461,21 +440,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
if (!kbuf)
return -ENOMEM;
while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len = vread(kbuf, (char *)p, len);
- if (!len)
+ sz = size_inside_page(p, count);
+ sz = vread(kbuf, (char *)p, sz);
+ if (!sz)
break;
- if (copy_to_user(buf, kbuf, len)) {
+ if (copy_to_user(buf, kbuf, sz)) {
free_page((unsigned long)kbuf);
return -EFAULT;
}
- count -= len;
- buf += len;
- read += len;
- p += len;
+ count -= sz;
+ buf += sz;
+ read += sz;
+ p += sz;
}
free_page((unsigned long)kbuf);
}
@@ -485,7 +461,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
static inline ssize_t
-do_write_kmem(void *p, unsigned long realp, const char __user * buf,
+do_write_kmem(unsigned long p, const char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t written, sz;
@@ -494,14 +470,11 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
written = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
- if (realp < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE - realp;
- if (sz > count)
- sz = count;
+ if (p < PAGE_SIZE) {
+ sz = size_inside_page(p, count);
/* Hmm. Do something? */
buf += sz;
p += sz;
- realp += sz;
count -= sz;
written += sz;
}
@@ -509,22 +482,15 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
while (count > 0) {
char *ptr;
- /*
- * Handle first page in case it's not aligned
- */
- if (-realp & (PAGE_SIZE - 1))
- sz = -realp & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur
*/
- ptr = xlate_dev_kmem_ptr(p);
+ ptr = xlate_dev_kmem_ptr((char *)p);
copied = copy_from_user(ptr, buf, sz);
if (copied) {
@@ -535,7 +501,6 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
}
buf += sz;
p += sz;
- realp += sz;
count -= sz;
written += sz;
}
@@ -554,19 +519,14 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
unsigned long p = *ppos;
ssize_t wrote = 0;
ssize_t virtr = 0;
- ssize_t written;
char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
if (p < (unsigned long) high_memory) {
-
- wrote = count;
- if (count > (unsigned long) high_memory - p)
- wrote = (unsigned long) high_memory - p;
-
- written = do_write_kmem((void*)p, p, buf, wrote, ppos);
- if (written != wrote)
- return written;
- wrote = written;
+ unsigned long to_write = min_t(unsigned long, count,
+ (unsigned long)high_memory - p);
+ wrote = do_write_kmem(p, buf, to_write, ppos);
+ if (wrote != to_write)
+ return wrote;
p += wrote;
buf += wrote;
count -= wrote;
@@ -577,24 +537,21 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
if (!kbuf)
return wrote ? wrote : -ENOMEM;
while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- if (len) {
- written = copy_from_user(kbuf, buf, len);
- if (written) {
- if (wrote + virtr)
- break;
- free_page((unsigned long)kbuf);
- return -EFAULT;
- }
+ unsigned long sz = size_inside_page(p, count);
+ unsigned long n;
+
+ n = copy_from_user(kbuf, buf, sz);
+ if (n) {
+ if (wrote + virtr)
+ break;
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
}
- len = vwrite(kbuf, (char *)p, len);
- count -= len;
- buf += len;
- virtr += len;
- p += len;
+ sz = vwrite(kbuf, (char *)p, sz);
+ count -= sz;
+ buf += sz;
+ virtr += sz;
+ p += sz;
}
free_page((unsigned long)kbuf);
}
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 96f1cd086dd..94a136e96c0 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -60,9 +60,7 @@ static DEFINE_MUTEX(misc_mtx);
* Assigned numbers, used for dynamic minors
*/
#define DYNAMIC_MINORS 64 /* like dynamic majors */
-static unsigned char misc_minors[DYNAMIC_MINORS / 8];
-
-extern int pmu_device_init(void);
+static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
#ifdef CONFIG_PROC_FS
static void *misc_seq_start(struct seq_file *seq, loff_t *pos)
@@ -198,24 +196,23 @@ int misc_register(struct miscdevice * misc)
}
if (misc->minor == MISC_DYNAMIC_MINOR) {
- int i = DYNAMIC_MINORS;
- while (--i >= 0)
- if ( (misc_minors[i>>3] & (1 << (i&7))) == 0)
- break;
- if (i<0) {
+ int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS);
+ if (i >= DYNAMIC_MINORS) {
mutex_unlock(&misc_mtx);
return -EBUSY;
}
- misc->minor = i;
+ misc->minor = DYNAMIC_MINORS - i - 1;
+ set_bit(i, misc_minors);
}
- if (misc->minor < DYNAMIC_MINORS)
- misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7);
dev = MKDEV(MISC_MAJOR, misc->minor);
misc->this_device = device_create(misc_class, misc->parent, dev,
misc, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
+ int i = DYNAMIC_MINORS - misc->minor - 1;
+ if (i < DYNAMIC_MINORS && i >= 0)
+ clear_bit(i, misc_minors);
err = PTR_ERR(misc->this_device);
goto out;
}
@@ -242,7 +239,7 @@ int misc_register(struct miscdevice * misc)
int misc_deregister(struct miscdevice *misc)
{
- int i = misc->minor;
+ int i = DYNAMIC_MINORS - misc->minor - 1;
if (list_empty(&misc->list))
return -EINVAL;
@@ -250,9 +247,8 @@ int misc_deregister(struct miscdevice *misc)
mutex_lock(&misc_mtx);
list_del(&misc->list);
device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
- if (i < DYNAMIC_MINORS && i>0) {
- misc_minors[i>>3] &= ~(1 << (misc->minor & 7));
- }
+ if (i < DYNAMIC_MINORS && i >= 0)
+ clear_bit(i, misc_minors);
mutex_unlock(&misc_mtx);
return 0;
}
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index dd0083bbb64..63ee3bbc1ce 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -34,7 +34,6 @@
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/major.h>
-#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
@@ -139,7 +138,7 @@ struct moxa_port {
int cflag;
unsigned long statusflags;
- u8 DCDState;
+ u8 DCDState; /* Protected by the port lock */
u8 lineCtrl;
u8 lowChkFlag;
};
@@ -151,10 +150,9 @@ struct mon_str {
};
/* statusflags */
-#define TXSTOPPED 0x1
-#define LOWWAIT 0x2
-#define EMPTYWAIT 0x4
-#define THROTTLE 0x8
+#define TXSTOPPED 1
+#define LOWWAIT 2
+#define EMPTYWAIT 3
#define SERIAL_DO_RESTART
@@ -165,6 +163,7 @@ static struct mon_str moxaLog;
static unsigned int moxaFuncTout = HZ / 2;
static unsigned int moxaLowWaterChk;
static DEFINE_MUTEX(moxa_openlock);
+static DEFINE_SPINLOCK(moxa_lock);
/* Variables for insmod */
#ifdef MODULE
static unsigned long baseaddr[MAX_BOARDS];
@@ -194,8 +193,6 @@ static int moxa_write(struct tty_struct *, const unsigned char *, int);
static int moxa_write_room(struct tty_struct *);
static void moxa_flush_buffer(struct tty_struct *);
static int moxa_chars_in_buffer(struct tty_struct *);
-static void moxa_throttle(struct tty_struct *);
-static void moxa_unthrottle(struct tty_struct *);
static void moxa_set_termios(struct tty_struct *, struct ktermios *);
static void moxa_stop(struct tty_struct *);
static void moxa_start(struct tty_struct *);
@@ -205,9 +202,9 @@ static int moxa_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
static void moxa_poll(unsigned long);
static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
-static void moxa_setup_empty_event(struct tty_struct *);
-static void moxa_shut_down(struct tty_struct *);
+static void moxa_shutdown(struct tty_port *);
static int moxa_carrier_raised(struct tty_port *);
+static void moxa_dtr_rts(struct tty_port *, int);
/*
* moxa board interface functions:
*/
@@ -234,6 +231,8 @@ static void MoxaSetFifo(struct moxa_port *port, int enable);
* I/O functions
*/
+static DEFINE_SPINLOCK(moxafunc_lock);
+
static void moxa_wait_finish(void __iomem *ofsAddr)
{
unsigned long end = jiffies + moxaFuncTout;
@@ -247,9 +246,25 @@ static void moxa_wait_finish(void __iomem *ofsAddr)
static void moxafunc(void __iomem *ofsAddr, u16 cmd, u16 arg)
{
+ unsigned long flags;
+ spin_lock_irqsave(&moxafunc_lock, flags);
writew(arg, ofsAddr + FuncArg);
writew(cmd, ofsAddr + FuncCode);
moxa_wait_finish(ofsAddr);
+ spin_unlock_irqrestore(&moxafunc_lock, flags);
+}
+
+static int moxafuncret(void __iomem *ofsAddr, u16 cmd, u16 arg)
+{
+ unsigned long flags;
+ u16 ret;
+ spin_lock_irqsave(&moxafunc_lock, flags);
+ writew(arg, ofsAddr + FuncArg);
+ writew(cmd, ofsAddr + FuncCode);
+ moxa_wait_finish(ofsAddr);
+ ret = readw(ofsAddr + FuncArg);
+ spin_unlock_irqrestore(&moxafunc_lock, flags);
+ return ret;
}
static void moxa_low_water_check(void __iomem *ofsAddr)
@@ -299,22 +314,20 @@ static int moxa_ioctl(struct tty_struct *tty, struct file *file,
struct moxa_port *p;
unsigned int i, j;
- mutex_lock(&moxa_openlock);
for (i = 0; i < MAX_BOARDS; i++) {
p = moxa_boards[i].ports;
for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
memset(&tmp, 0, sizeof(tmp));
+ spin_lock_bh(&moxa_lock);
if (moxa_boards[i].ready) {
tmp.inq = MoxaPortRxQueue(p);
tmp.outq = MoxaPortTxQueue(p);
}
- if (copy_to_user(argm, &tmp, sizeof(tmp))) {
- mutex_unlock(&moxa_openlock);
+ spin_unlock_bh(&moxa_lock);
+ if (copy_to_user(argm, &tmp, sizeof(tmp)))
return -EFAULT;
- }
}
}
- mutex_unlock(&moxa_openlock);
break;
} case MOXA_GET_OQUEUE:
status = MoxaPortTxQueue(ch);
@@ -330,16 +343,20 @@ static int moxa_ioctl(struct tty_struct *tty, struct file *file,
struct moxa_port *p;
unsigned int i, j;
- mutex_lock(&moxa_openlock);
for (i = 0; i < MAX_BOARDS; i++) {
p = moxa_boards[i].ports;
for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
struct tty_struct *ttyp;
memset(&tmp, 0, sizeof(tmp));
- if (!moxa_boards[i].ready)
+ spin_lock_bh(&moxa_lock);
+ if (!moxa_boards[i].ready) {
+ spin_unlock_bh(&moxa_lock);
goto copy;
+ }
status = MoxaPortLineStatus(p);
+ spin_unlock_bh(&moxa_lock);
+
if (status & 1)
tmp.cts = 1;
if (status & 2)
@@ -354,24 +371,21 @@ static int moxa_ioctl(struct tty_struct *tty, struct file *file,
tmp.cflag = ttyp->termios->c_cflag;
tty_kref_put(tty);
copy:
- if (copy_to_user(argm, &tmp, sizeof(tmp))) {
- mutex_unlock(&moxa_openlock);
+ if (copy_to_user(argm, &tmp, sizeof(tmp)))
return -EFAULT;
- }
}
}
- mutex_unlock(&moxa_openlock);
break;
}
case TIOCGSERIAL:
- mutex_lock(&moxa_openlock);
+ mutex_lock(&ch->port.mutex);
ret = moxa_get_serial_info(ch, argp);
- mutex_unlock(&moxa_openlock);
+ mutex_unlock(&ch->port.mutex);
break;
case TIOCSSERIAL:
- mutex_lock(&moxa_openlock);
+ mutex_lock(&ch->port.mutex);
ret = moxa_set_serial_info(ch, argp);
- mutex_unlock(&moxa_openlock);
+ mutex_unlock(&ch->port.mutex);
break;
default:
ret = -ENOIOCTLCMD;
@@ -396,8 +410,6 @@ static const struct tty_operations moxa_ops = {
.flush_buffer = moxa_flush_buffer,
.chars_in_buffer = moxa_chars_in_buffer,
.ioctl = moxa_ioctl,
- .throttle = moxa_throttle,
- .unthrottle = moxa_unthrottle,
.set_termios = moxa_set_termios,
.stop = moxa_stop,
.start = moxa_start,
@@ -409,11 +421,12 @@ static const struct tty_operations moxa_ops = {
static const struct tty_port_operations moxa_port_ops = {
.carrier_raised = moxa_carrier_raised,
+ .dtr_rts = moxa_dtr_rts,
+ .shutdown = moxa_shutdown,
};
static struct tty_driver *moxaDriver;
static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0);
-static DEFINE_SPINLOCK(moxa_lock);
/*
* HW init
@@ -1112,14 +1125,12 @@ static void __exit moxa_exit(void)
module_init(moxa_init);
module_exit(moxa_exit);
-static void moxa_close_port(struct tty_struct *tty)
+static void moxa_shutdown(struct tty_port *port)
{
- struct moxa_port *ch = tty->driver_data;
- moxa_shut_down(tty);
+ struct moxa_port *ch = container_of(port, struct moxa_port, port);
+ MoxaPortDisable(ch);
MoxaPortFlushData(ch, 2);
- ch->port.flags &= ~ASYNC_NORMAL_ACTIVE;
- tty->driver_data = NULL;
- tty_port_tty_set(&ch->port, NULL);
+ clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
}
static int moxa_carrier_raised(struct tty_port *port)
@@ -1127,45 +1138,19 @@ static int moxa_carrier_raised(struct tty_port *port)
struct moxa_port *ch = container_of(port, struct moxa_port, port);
int dcd;
- spin_lock_bh(&moxa_lock);
+ spin_lock_irq(&port->lock);
dcd = ch->DCDState;
- spin_unlock_bh(&moxa_lock);
+ spin_unlock_irq(&port->lock);
return dcd;
}
-static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp,
- struct moxa_port *ch)
+static void moxa_dtr_rts(struct tty_port *port, int onoff)
{
- struct tty_port *port = &ch->port;
- DEFINE_WAIT(wait);
- int retval = 0;
- u8 dcd;
-
- while (1) {
- prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp)) {
-#ifdef SERIAL_DO_RESTART
- retval = -ERESTARTSYS;
-#else
- retval = -EAGAIN;
-#endif
- break;
- }
- dcd = tty_port_carrier_raised(port);
- if (dcd)
- break;
-
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- schedule();
- }
- finish_wait(&port->open_wait, &wait);
-
- return retval;
+ struct moxa_port *ch = container_of(port, struct moxa_port, port);
+ MoxaPortLineCtrl(ch, onoff, onoff);
}
+
static int moxa_open(struct tty_struct *tty, struct file *filp)
{
struct moxa_board_conf *brd;
@@ -1194,6 +1179,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
ch->port.count++;
tty->driver_data = ch;
tty_port_tty_set(&ch->port, tty);
+ mutex_lock(&ch->port.mutex);
if (!(ch->port.flags & ASYNC_INITIALIZED)) {
ch->statusflags = 0;
moxa_set_tty_param(tty, tty->termios);
@@ -1202,58 +1188,20 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
MoxaSetFifo(ch, ch->type == PORT_16550A);
ch->port.flags |= ASYNC_INITIALIZED;
}
+ mutex_unlock(&ch->port.mutex);
mutex_unlock(&moxa_openlock);
- retval = 0;
- if (!(filp->f_flags & O_NONBLOCK) && !C_CLOCAL(tty))
- retval = moxa_block_till_ready(tty, filp, ch);
- mutex_lock(&moxa_openlock);
- if (retval) {
- if (ch->port.count) /* 0 means already hung up... */
- if (--ch->port.count == 0)
- moxa_close_port(tty);
- } else
- ch->port.flags |= ASYNC_NORMAL_ACTIVE;
- mutex_unlock(&moxa_openlock);
-
+ retval = tty_port_block_til_ready(&ch->port, tty, filp);
+ if (retval == 0)
+ set_bit(ASYNCB_NORMAL_ACTIVE, &ch->port.flags);
return retval;
}
static void moxa_close(struct tty_struct *tty, struct file *filp)
{
- struct moxa_port *ch;
- int port;
-
- port = tty->index;
- if (port == MAX_PORTS || tty_hung_up_p(filp))
- return;
-
- mutex_lock(&moxa_openlock);
- ch = tty->driver_data;
- if (ch == NULL)
- goto unlock;
- if (tty->count == 1 && ch->port.count != 1) {
- printk(KERN_WARNING "moxa_close: bad serial port count; "
- "tty->count is 1, ch->port.count is %d\n", ch->port.count);
- ch->port.count = 1;
- }
- if (--ch->port.count < 0) {
- printk(KERN_WARNING "moxa_close: bad serial port count, "
- "device=%s\n", tty->name);
- ch->port.count = 0;
- }
- if (ch->port.count)
- goto unlock;
-
+ struct moxa_port *ch = tty->driver_data;
ch->cflag = tty->termios->c_cflag;
- if (ch->port.flags & ASYNC_INITIALIZED) {
- moxa_setup_empty_event(tty);
- tty_wait_until_sent(tty, 30 * HZ); /* 30 seconds timeout */
- }
-
- moxa_close_port(tty);
-unlock:
- mutex_unlock(&moxa_openlock);
+ tty_port_close(&ch->port, tty, filp);
}
static int moxa_write(struct tty_struct *tty,
@@ -1269,7 +1217,7 @@ static int moxa_write(struct tty_struct *tty,
len = MoxaPortWriteData(tty, buf, count);
spin_unlock_bh(&moxa_lock);
- ch->statusflags |= LOWWAIT;
+ set_bit(LOWWAIT, &ch->statusflags);
return len;
}
@@ -1300,40 +1248,21 @@ static int moxa_chars_in_buffer(struct tty_struct *tty)
struct moxa_port *ch = tty->driver_data;
int chars;
- /*
- * Sigh...I have to check if driver_data is NULL here, because
- * if an open() fails, the TTY subsystem eventually calls
- * tty_wait_until_sent(), which calls the driver's chars_in_buffer()
- * routine. And since the open() failed, we return 0 here. TDJ
- */
- if (ch == NULL)
- return 0;
- lock_kernel();
chars = MoxaPortTxQueue(ch);
- if (chars) {
+ if (chars)
/*
* Make it possible to wakeup anything waiting for output
* in tty_ioctl.c, etc.
*/
- if (!(ch->statusflags & EMPTYWAIT))
- moxa_setup_empty_event(tty);
- }
- unlock_kernel();
+ set_bit(EMPTYWAIT, &ch->statusflags);
return chars;
}
static int moxa_tiocmget(struct tty_struct *tty, struct file *file)
{
- struct moxa_port *ch;
+ struct moxa_port *ch = tty->driver_data;
int flag = 0, dtr, rts;
- mutex_lock(&moxa_openlock);
- ch = tty->driver_data;
- if (!ch) {
- mutex_unlock(&moxa_openlock);
- return -EINVAL;
- }
-
MoxaPortGetLineOut(ch, &dtr, &rts);
if (dtr)
flag |= TIOCM_DTR;
@@ -1346,7 +1275,6 @@ static int moxa_tiocmget(struct tty_struct *tty, struct file *file)
flag |= TIOCM_DSR;
if (dtr & 4)
flag |= TIOCM_CD;
- mutex_unlock(&moxa_openlock);
return flag;
}
@@ -1379,20 +1307,6 @@ static int moxa_tiocmset(struct tty_struct *tty, struct file *file,
return 0;
}
-static void moxa_throttle(struct tty_struct *tty)
-{
- struct moxa_port *ch = tty->driver_data;
-
- ch->statusflags |= THROTTLE;
-}
-
-static void moxa_unthrottle(struct tty_struct *tty)
-{
- struct moxa_port *ch = tty->driver_data;
-
- ch->statusflags &= ~THROTTLE;
-}
-
static void moxa_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
@@ -1412,7 +1326,7 @@ static void moxa_stop(struct tty_struct *tty)
if (ch == NULL)
return;
MoxaPortTxDisable(ch);
- ch->statusflags |= TXSTOPPED;
+ set_bit(TXSTOPPED, &ch->statusflags);
}
@@ -1427,38 +1341,32 @@ static void moxa_start(struct tty_struct *tty)
return;
MoxaPortTxEnable(ch);
- ch->statusflags &= ~TXSTOPPED;
+ clear_bit(TXSTOPPED, &ch->statusflags);
}
static void moxa_hangup(struct tty_struct *tty)
{
- struct moxa_port *ch;
-
- mutex_lock(&moxa_openlock);
- ch = tty->driver_data;
- if (ch == NULL) {
- mutex_unlock(&moxa_openlock);
- return;
- }
- ch->port.count = 0;
- moxa_close_port(tty);
- mutex_unlock(&moxa_openlock);
-
- wake_up_interruptible(&ch->port.open_wait);
+ struct moxa_port *ch = tty->driver_data;
+ tty_port_hangup(&ch->port);
}
static void moxa_new_dcdstate(struct moxa_port *p, u8 dcd)
{
struct tty_struct *tty;
+ unsigned long flags;
dcd = !!dcd;
+ spin_lock_irqsave(&p->port.lock, flags);
if (dcd != p->DCDState) {
+ p->DCDState = dcd;
+ spin_unlock_irqrestore(&p->port.lock, flags);
tty = tty_port_tty_get(&p->port);
if (tty && C_CLOCAL(tty) && !dcd)
tty_hangup(tty);
tty_kref_put(tty);
}
- p->DCDState = dcd;
+ else
+ spin_unlock_irqrestore(&p->port.lock, flags);
}
static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
@@ -1470,24 +1378,24 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
u16 intr;
if (tty) {
- if ((p->statusflags & EMPTYWAIT) &&
+ if (test_bit(EMPTYWAIT, &p->statusflags) &&
MoxaPortTxQueue(p) == 0) {
- p->statusflags &= ~EMPTYWAIT;
+ clear_bit(EMPTYWAIT, &p->statusflags);
tty_wakeup(tty);
}
- if ((p->statusflags & LOWWAIT) && !tty->stopped &&
+ if (test_bit(LOWWAIT, &p->statusflags) && !tty->stopped &&
MoxaPortTxQueue(p) <= WAKEUP_CHARS) {
- p->statusflags &= ~LOWWAIT;
+ clear_bit(LOWWAIT, &p->statusflags);
tty_wakeup(tty);
}
- if (inited && !(p->statusflags & THROTTLE) &&
+ if (inited && !test_bit(TTY_THROTTLED, &tty->flags) &&
MoxaPortRxQueue(p) > 0) { /* RX */
MoxaPortReadData(p);
tty_schedule_flip(tty);
}
} else {
- p->statusflags &= ~EMPTYWAIT;
+ clear_bit(EMPTYWAIT, &p->statusflags);
MoxaPortFlushData(p, 0); /* flush RX */
}
@@ -1588,35 +1496,6 @@ static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_term
tty_encode_baud_rate(tty, baud, baud);
}
-static void moxa_setup_empty_event(struct tty_struct *tty)
-{
- struct moxa_port *ch = tty->driver_data;
-
- spin_lock_bh(&moxa_lock);
- ch->statusflags |= EMPTYWAIT;
- spin_unlock_bh(&moxa_lock);
-}
-
-static void moxa_shut_down(struct tty_struct *tty)
-{
- struct moxa_port *ch = tty->driver_data;
-
- if (!(ch->port.flags & ASYNC_INITIALIZED))
- return;
-
- MoxaPortDisable(ch);
-
- /*
- * If we're a modem control device and HUPCL is on, drop RTS & DTR.
- */
- if (C_HUPCL(tty))
- MoxaPortLineCtrl(ch, 0, 0);
-
- spin_lock_bh(&moxa_lock);
- ch->port.flags &= ~ASYNC_INITIALIZED;
- spin_unlock_bh(&moxa_lock);
-}
-
/*****************************************************************************
* Driver level functions: *
*****************************************************************************/
@@ -1918,10 +1797,12 @@ static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio,
baud = MoxaPortSetBaud(port, baud);
if (termio->c_iflag & (IXON | IXOFF | IXANY)) {
+ spin_lock_irq(&moxafunc_lock);
writeb(termio->c_cc[VSTART], ofsAddr + FuncArg);
writeb(termio->c_cc[VSTOP], ofsAddr + FuncArg1);
writeb(FC_SetXonXoff, ofsAddr + FuncCode);
moxa_wait_finish(ofsAddr);
+ spin_unlock_irq(&moxafunc_lock);
}
return baud;
@@ -1974,18 +1855,14 @@ static int MoxaPortLineStatus(struct moxa_port *port)
int val;
ofsAddr = port->tableAddr;
- if (MOXA_IS_320(port->board)) {
- moxafunc(ofsAddr, FC_LineStatus, 0);
- val = readw(ofsAddr + FuncArg);
- } else {
+ if (MOXA_IS_320(port->board))
+ val = moxafuncret(ofsAddr, FC_LineStatus, 0);
+ else
val = readw(ofsAddr + FlagStat) >> 4;
- }
val &= 0x0B;
if (val & 8)
val |= 4;
- spin_lock_bh(&moxa_lock);
moxa_new_dcdstate(port, val & 8);
- spin_unlock_bh(&moxa_lock);
val &= 7;
return val;
}
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 5e28d39b9e8..3d923065d9a 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -23,7 +23,6 @@
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
@@ -856,9 +855,9 @@ static void mxser_check_modem_status(struct tty_struct *tty,
}
}
-static int mxser_startup(struct tty_struct *tty)
+static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
{
- struct mxser_port *info = tty->driver_data;
+ struct mxser_port *info = container_of(port, struct mxser_port, port);
unsigned long page;
unsigned long flags;
@@ -868,22 +867,13 @@ static int mxser_startup(struct tty_struct *tty)
spin_lock_irqsave(&info->slock, flags);
- if (info->port.flags & ASYNC_INITIALIZED) {
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
-
if (!info->ioaddr || !info->type) {
set_bit(TTY_IO_ERROR, &tty->flags);
free_page(page);
spin_unlock_irqrestore(&info->slock, flags);
return 0;
}
- if (info->port.xmit_buf)
- free_page(page);
- else
- info->port.xmit_buf = (unsigned char *) page;
+ info->port.xmit_buf = (unsigned char *) page;
/*
* Clear the FIFO buffers and disable them
@@ -951,24 +941,19 @@ static int mxser_startup(struct tty_struct *tty)
* and set the speed of the serial port
*/
mxser_change_speed(tty, NULL);
- info->port.flags |= ASYNC_INITIALIZED;
spin_unlock_irqrestore(&info->slock, flags);
return 0;
}
/*
- * This routine will shutdown a serial port; interrupts maybe disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
+ * This routine will shutdown a serial port
*/
-static void mxser_shutdown(struct tty_struct *tty)
+static void mxser_shutdown_port(struct tty_port *port)
{
- struct mxser_port *info = tty->driver_data;
+ struct mxser_port *info = container_of(port, struct mxser_port, port);
unsigned long flags;
- if (!(info->port.flags & ASYNC_INITIALIZED))
- return;
-
spin_lock_irqsave(&info->slock, flags);
/*
@@ -978,7 +963,7 @@ static void mxser_shutdown(struct tty_struct *tty)
wake_up_interruptible(&info->port.delta_msr_wait);
/*
- * Free the IRQ, if necessary
+ * Free the xmit buffer, if necessary
*/
if (info->port.xmit_buf) {
free_page((unsigned long) info->port.xmit_buf);
@@ -988,10 +973,6 @@ static void mxser_shutdown(struct tty_struct *tty)
info->IER = 0;
outb(0x00, info->ioaddr + UART_IER);
- if (tty->termios->c_cflag & HUPCL)
- info->MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
- outb(info->MCR, info->ioaddr + UART_MCR);
-
/* clear Rx/Tx FIFO's */
if (info->board->chip_flag)
outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
@@ -1004,9 +985,6 @@ static void mxser_shutdown(struct tty_struct *tty)
/* read data port to reset things */
(void) inb(info->ioaddr + UART_RX);
- set_bit(TTY_IO_ERROR, &tty->flags);
-
- info->port.flags &= ~ASYNC_INITIALIZED;
if (info->board->chip_flag)
SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->ioaddr);
@@ -1023,8 +1001,7 @@ static void mxser_shutdown(struct tty_struct *tty)
static int mxser_open(struct tty_struct *tty, struct file *filp)
{
struct mxser_port *info;
- unsigned long flags;
- int retval, line;
+ int line;
line = tty->index;
if (line == MXSER_PORTS)
@@ -1035,23 +1012,7 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
if (!info->ioaddr)
return -ENODEV;
- tty->driver_data = info;
- tty_port_tty_set(&info->port, tty);
- /*
- * Start up serial port
- */
- spin_lock_irqsave(&info->port.lock, flags);
- info->port.count++;
- spin_unlock_irqrestore(&info->port.lock, flags);
- retval = mxser_startup(tty);
- if (retval)
- return retval;
-
- retval = tty_port_block_til_ready(&info->port, tty, filp);
- if (retval)
- return retval;
-
- return 0;
+ return tty_port_open(&info->port, tty, filp);
}
static void mxser_flush_buffer(struct tty_struct *tty)
@@ -1075,19 +1036,11 @@ static void mxser_flush_buffer(struct tty_struct *tty)
}
-static void mxser_close_port(struct tty_struct *tty, struct tty_port *port)
+static void mxser_close_port(struct tty_port *port)
{
struct mxser_port *info = container_of(port, struct mxser_port, port);
unsigned long timeout;
/*
- * Save the termios structure, since this port may have
- * separate termios for callout and dialin.
- *
- * FIXME: Can this go ?
- */
- if (port->flags & ASYNC_NORMAL_ACTIVE)
- info->normal_termios = *tty->termios;
- /*
* At this point we stop accepting input. To do this, we
* disable the receive line status interrupts, and tell the
* interrupt driver to stop checking the data ready bit in the
@@ -1097,22 +1050,18 @@ static void mxser_close_port(struct tty_struct *tty, struct tty_port *port)
if (info->board->chip_flag)
info->IER &= ~MOXA_MUST_RECV_ISR;
- if (port->flags & ASYNC_INITIALIZED) {
- outb(info->IER, info->ioaddr + UART_IER);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
- schedule_timeout_interruptible(5);
- if (time_after(jiffies, timeout))
- break;
- }
+ outb(info->IER, info->ioaddr + UART_IER);
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ timeout = jiffies + HZ;
+ while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
+ schedule_timeout_interruptible(5);
+ if (time_after(jiffies, timeout))
+ break;
}
- mxser_shutdown(tty);
-
}
/*
@@ -1130,8 +1079,12 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
return;
if (tty_port_close_start(port, tty, filp) == 0)
return;
- mxser_close_port(tty, port);
+ mutex_lock(&port->mutex);
+ mxser_close_port(port);
mxser_flush_buffer(tty);
+ mxser_shutdown_port(port);
+ clear_bit(ASYNCB_INITIALIZED, &port->flags);
+ mutex_unlock(&port->mutex);
/* Right now the tty_port set is done outside of the close_end helper
as we don't yet have everyone using refcounts */
tty_port_close_end(port, tty);
@@ -1275,6 +1228,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
struct serial_struct __user *new_info)
{
struct mxser_port *info = tty->driver_data;
+ struct tty_port *port = &info->port;
struct serial_struct new_serial;
speed_t baud;
unsigned long sl_flags;
@@ -1290,7 +1244,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
new_serial.port != info->ioaddr)
return -EINVAL;
- flags = info->port.flags & ASYNC_SPD_MASK;
+ flags = port->flags & ASYNC_SPD_MASK;
if (!capable(CAP_SYS_ADMIN)) {
if ((new_serial.baud_base != info->baud_base) ||
@@ -1304,16 +1258,17 @@ static int mxser_set_serial_info(struct tty_struct *tty,
* OK, past this point, all the error checking has been done.
* At this point, we start making changes.....
*/
- info->port.flags = ((info->port.flags & ~ASYNC_FLAGS) |
+ port->flags = ((port->flags & ~ASYNC_FLAGS) |
(new_serial.flags & ASYNC_FLAGS));
- info->port.close_delay = new_serial.close_delay * HZ / 100;
- info->port.closing_wait = new_serial.closing_wait * HZ / 100;
- tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY)
- ? 1 : 0;
- if ((info->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
+ port->close_delay = new_serial.close_delay * HZ / 100;
+ port->closing_wait = new_serial.closing_wait * HZ / 100;
+ tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
(new_serial.baud_base != info->baud_base ||
new_serial.custom_divisor !=
info->custom_divisor)) {
+ if (new_serial.custom_divisor == 0)
+ return -EINVAL;
baud = new_serial.baud_base / new_serial.custom_divisor;
tty_encode_baud_rate(tty, baud, baud);
}
@@ -1323,15 +1278,17 @@ static int mxser_set_serial_info(struct tty_struct *tty,
process_txrx_fifo(info);
- if (info->port.flags & ASYNC_INITIALIZED) {
- if (flags != (info->port.flags & ASYNC_SPD_MASK)) {
+ if (test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ if (flags != (port->flags & ASYNC_SPD_MASK)) {
spin_lock_irqsave(&info->slock, sl_flags);
mxser_change_speed(tty, NULL);
spin_unlock_irqrestore(&info->slock, sl_flags);
}
- } else
- retval = mxser_startup(tty);
-
+ } else {
+ retval = mxser_activate(port, tty);
+ if (retval == 0)
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ }
return retval;
}
@@ -1520,7 +1477,8 @@ static int __init mxser_read_register(int port, unsigned short *regs)
static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
{
- struct mxser_port *port;
+ struct mxser_port *ip;
+ struct tty_port *port;
struct tty_struct *tty;
int result, status;
unsigned int i, j;
@@ -1536,38 +1494,39 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
case MOXA_CHKPORTENABLE:
result = 0;
- lock_kernel();
for (i = 0; i < MXSER_BOARDS; i++)
for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
if (mxser_boards[i].ports[j].ioaddr)
result |= (1 << i);
- unlock_kernel();
return put_user(result, (unsigned long __user *)argp);
case MOXA_GETDATACOUNT:
- lock_kernel();
+ /* The receive side is locked by port->slock but it isn't
+ clear that an exact snapshot is worth copying here */
if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
ret = -EFAULT;
- unlock_kernel();
return ret;
case MOXA_GETMSTATUS: {
struct mxser_mstatus ms, __user *msu = argp;
- lock_kernel();
for (i = 0; i < MXSER_BOARDS; i++)
for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
- port = &mxser_boards[i].ports[j];
+ ip = &mxser_boards[i].ports[j];
+ port = &ip->port;
memset(&ms, 0, sizeof(ms));
- if (!port->ioaddr)
+ mutex_lock(&port->mutex);
+ if (!ip->ioaddr)
goto copy;
- tty = tty_port_tty_get(&port->port);
+ tty = tty_port_tty_get(port);
if (!tty || !tty->termios)
- ms.cflag = port->normal_termios.c_cflag;
+ ms.cflag = ip->normal_termios.c_cflag;
else
ms.cflag = tty->termios->c_cflag;
tty_kref_put(tty);
- status = inb(port->ioaddr + UART_MSR);
+ spin_lock_irq(&ip->slock);
+ status = inb(ip->ioaddr + UART_MSR);
+ spin_unlock_irq(&ip->slock);
if (status & UART_MSR_DCD)
ms.dcd = 1;
if (status & UART_MSR_DSR)
@@ -1575,13 +1534,11 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
if (status & UART_MSR_CTS)
ms.cts = 1;
copy:
- if (copy_to_user(msu, &ms, sizeof(ms))) {
- unlock_kernel();
+ mutex_unlock(&port->mutex);
+ if (copy_to_user(msu, &ms, sizeof(ms)))
return -EFAULT;
- }
msu++;
}
- unlock_kernel();
return 0;
}
case MOXA_ASPP_MON_EXT: {
@@ -1593,41 +1550,48 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
if (!me)
return -ENOMEM;
- lock_kernel();
for (i = 0, p = 0; i < MXSER_BOARDS; i++) {
for (j = 0; j < MXSER_PORTS_PER_BOARD; j++, p++) {
if (p >= ARRAY_SIZE(me->rx_cnt)) {
i = MXSER_BOARDS;
break;
}
- port = &mxser_boards[i].ports[j];
- if (!port->ioaddr)
+ ip = &mxser_boards[i].ports[j];
+ port = &ip->port;
+
+ mutex_lock(&port->mutex);
+ if (!ip->ioaddr) {
+ mutex_unlock(&port->mutex);
continue;
+ }
- status = mxser_get_msr(port->ioaddr, 0, p);
+ spin_lock_irq(&ip->slock);
+ status = mxser_get_msr(ip->ioaddr, 0, p);
if (status & UART_MSR_TERI)
- port->icount.rng++;
+ ip->icount.rng++;
if (status & UART_MSR_DDSR)
- port->icount.dsr++;
+ ip->icount.dsr++;
if (status & UART_MSR_DDCD)
- port->icount.dcd++;
+ ip->icount.dcd++;
if (status & UART_MSR_DCTS)
- port->icount.cts++;
+ ip->icount.cts++;
- port->mon_data.modem_status = status;
- me->rx_cnt[p] = port->mon_data.rxcnt;
- me->tx_cnt[p] = port->mon_data.txcnt;
- me->up_rxcnt[p] = port->mon_data.up_rxcnt;
- me->up_txcnt[p] = port->mon_data.up_txcnt;
+ ip->mon_data.modem_status = status;
+ me->rx_cnt[p] = ip->mon_data.rxcnt;
+ me->tx_cnt[p] = ip->mon_data.txcnt;
+ me->up_rxcnt[p] = ip->mon_data.up_rxcnt;
+ me->up_txcnt[p] = ip->mon_data.up_txcnt;
me->modem_status[p] =
- port->mon_data.modem_status;
- tty = tty_port_tty_get(&port->port);
+ ip->mon_data.modem_status;
+ spin_unlock_irq(&ip->slock);
+
+ tty = tty_port_tty_get(&ip->port);
if (!tty || !tty->termios) {
- cflag = port->normal_termios.c_cflag;
- iflag = port->normal_termios.c_iflag;
- me->baudrate[p] = tty_termios_baud_rate(&port->normal_termios);
+ cflag = ip->normal_termios.c_cflag;
+ iflag = ip->normal_termios.c_iflag;
+ me->baudrate[p] = tty_termios_baud_rate(&ip->normal_termios);
} else {
cflag = tty->termios->c_cflag;
iflag = tty->termios->c_iflag;
@@ -1646,16 +1610,15 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
if (iflag & (IXON | IXOFF))
me->flowctrl[p] |= 0x0C;
- if (port->type == PORT_16550A)
+ if (ip->type == PORT_16550A)
me->fifo[p] = 1;
- opmode = inb(port->opmode_ioaddr) >>
- ((p % 4) * 2);
+ opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
opmode &= OP_MODE_MASK;
me->iftype[p] = opmode;
+ mutex_unlock(&port->mutex);
}
}
- unlock_kernel();
if (copy_to_user(argp, me, sizeof(*me)))
ret = -EFAULT;
kfree(me);
@@ -1692,6 +1655,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct mxser_port *info = tty->driver_data;
+ struct tty_port *port = &info->port;
struct async_icount cnow;
unsigned long flags;
void __user *argp = (void __user *)arg;
@@ -1716,20 +1680,20 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
opmode != RS422_MODE &&
opmode != RS485_4WIRE_MODE)
return -EFAULT;
- lock_kernel();
mask = ModeMask[p];
shiftbit = p * 2;
+ spin_lock_irq(&info->slock);
val = inb(info->opmode_ioaddr);
val &= mask;
val |= (opmode << shiftbit);
outb(val, info->opmode_ioaddr);
- unlock_kernel();
+ spin_unlock_irq(&info->slock);
} else {
- lock_kernel();
shiftbit = p * 2;
+ spin_lock_irq(&info->slock);
opmode = inb(info->opmode_ioaddr) >> shiftbit;
+ spin_unlock_irq(&info->slock);
opmode &= OP_MODE_MASK;
- unlock_kernel();
if (put_user(opmode, (int __user *)argp))
return -EFAULT;
}
@@ -1742,14 +1706,14 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
switch (cmd) {
case TIOCGSERIAL:
- lock_kernel();
+ mutex_lock(&port->mutex);
retval = mxser_get_serial_info(tty, argp);
- unlock_kernel();
+ mutex_unlock(&port->mutex);
return retval;
case TIOCSSERIAL:
- lock_kernel();
+ mutex_lock(&port->mutex);
retval = mxser_set_serial_info(tty, argp);
- unlock_kernel();
+ mutex_unlock(&port->mutex);
return retval;
case TIOCSERGETLSR: /* Get line status register */
return mxser_get_lsr_info(info, argp);
@@ -1795,31 +1759,33 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
case MOXA_HighSpeedOn:
return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
case MOXA_SDS_RSTICOUNTER:
- lock_kernel();
+ spin_lock_irq(&info->slock);
info->mon_data.rxcnt = 0;
info->mon_data.txcnt = 0;
- unlock_kernel();
+ spin_unlock_irq(&info->slock);
return 0;
case MOXA_ASPP_OQUEUE:{
int len, lsr;
- lock_kernel();
len = mxser_chars_in_buffer(tty);
+ spin_lock(&info->slock);
lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_THRE;
+ spin_unlock_irq(&info->slock);
len += (lsr ? 0 : 1);
- unlock_kernel();
return put_user(len, (int __user *)argp);
}
case MOXA_ASPP_MON: {
int mcr, status;
- lock_kernel();
+ spin_lock(&info->slock);
status = mxser_get_msr(info->ioaddr, 1, tty->index);
mxser_check_modem_status(tty, info, status);
mcr = inb(info->ioaddr + UART_MCR);
+ spin_unlock(&info->slock);
+
if (mcr & MOXA_MUST_MCR_XON_FLAG)
info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
else
@@ -1834,7 +1800,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
else
info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
- unlock_kernel();
+
if (copy_to_user(argp, &info->mon_data,
sizeof(struct mxser_mon)))
return -EFAULT;
@@ -1993,6 +1959,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct mxser_port *info = tty->driver_data;
unsigned long orig_jiffies, char_time;
+ unsigned long flags;
int lsr;
if (info->type == PORT_UNKNOWN)
@@ -2032,19 +1999,21 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
timeout, char_time);
printk("jiff=%lu...", jiffies);
#endif
- lock_kernel();
+ spin_lock_irqsave(&info->slock, flags);
while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
#endif
+ spin_unlock_irqrestore(&info->slock, flags);
schedule_timeout_interruptible(char_time);
+ spin_lock_irqsave(&info->slock, flags);
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
+ spin_unlock_irqrestore(&info->slock, flags);
set_current_state(TASK_RUNNING);
- unlock_kernel();
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
@@ -2059,7 +2028,6 @@ static void mxser_hangup(struct tty_struct *tty)
struct mxser_port *info = tty->driver_data;
mxser_flush_buffer(tty);
- mxser_shutdown(tty);
tty_port_hangup(&info->port);
}
@@ -2363,6 +2331,8 @@ static const struct tty_operations mxser_ops = {
struct tty_port_operations mxser_port_ops = {
.carrier_raised = mxser_carrier_raised,
.dtr_rts = mxser_dtr_rts,
+ .activate = mxser_activate,
+ .shutdown = mxser_shutdown_port,
};
/*
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index d3400b20444..7d73cd43034 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -358,7 +358,7 @@ struct port {
u8 update_flow_control;
struct ctrl_ul ctrl_ul;
struct ctrl_dl ctrl_dl;
- struct kfifo *fifo_ul;
+ struct kfifo fifo_ul;
void __iomem *dl_addr[2];
u32 dl_size[2];
u8 toggle_dl;
@@ -685,8 +685,6 @@ static int nozomi_read_config_table(struct nozomi *dc)
dump_table(dc);
for (i = PORT_MDM; i < MAX_PORT; i++) {
- dc->port[i].fifo_ul =
- kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
}
@@ -798,7 +796,7 @@ static int send_data(enum port_type index, struct nozomi *dc)
struct tty_struct *tty = tty_port_tty_get(&port->port);
/* Get data from tty and place in buf for now */
- size = __kfifo_get(port->fifo_ul, dc->send_buf,
+ size = kfifo_out(&port->fifo_ul, dc->send_buf,
ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
if (size == 0) {
@@ -988,11 +986,11 @@ static int receive_flow_control(struct nozomi *dc)
} else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
- if (__kfifo_len(dc->port[port].fifo_ul)) {
+ if (kfifo_len(&dc->port[port].fifo_ul)) {
DBG1("Enable interrupt (0x%04X) on port: %d",
enable_ier, port);
DBG1("Data in buffer [%d], enable transmit! ",
- __kfifo_len(dc->port[port].fifo_ul));
+ kfifo_len(&dc->port[port].fifo_ul));
enable_transmit_ul(port, dc);
} else {
DBG1("No data in buffer...");
@@ -1433,6 +1431,16 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
goto err_free_sbuf;
}
+ for (i = PORT_MDM; i < MAX_PORT; i++) {
+ if (kfifo_alloc(&dc->port[i].fifo_ul,
+ FIFO_BUFFER_SIZE_UL, GFP_ATOMIC)) {
+ dev_err(&pdev->dev,
+ "Could not allocate kfifo buffer\n");
+ ret = -ENOMEM;
+ goto err_free_kfifo;
+ }
+ }
+
spin_lock_init(&dc->spin_mutex);
nozomi_setup_private_data(dc);
@@ -1445,7 +1453,7 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
NOZOMI_NAME, dc);
if (unlikely(ret)) {
dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq);
- goto err_free_sbuf;
+ goto err_free_kfifo;
}
DBG1("base_addr: %p", dc->base_addr);
@@ -1464,13 +1472,28 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
dc->state = NOZOMI_STATE_ENABLED;
for (i = 0; i < MAX_PORT; i++) {
+ struct device *tty_dev;
+
mutex_init(&dc->port[i].tty_sem);
tty_port_init(&dc->port[i].port);
- tty_register_device(ntty_driver, dc->index_start + i,
+ tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
&pdev->dev);
+
+ if (IS_ERR(tty_dev)) {
+ ret = PTR_ERR(tty_dev);
+ dev_err(&pdev->dev, "Could not allocate tty?\n");
+ goto err_free_tty;
+ }
}
+
return 0;
+err_free_tty:
+ for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i)
+ tty_unregister_device(ntty_driver, i);
+err_free_kfifo:
+ for (i = 0; i < MAX_PORT; i++)
+ kfifo_free(&dc->port[i].fifo_ul);
err_free_sbuf:
kfree(dc->send_buf);
iounmap(dc->base_addr);
@@ -1536,8 +1559,7 @@ static void __devexit nozomi_card_exit(struct pci_dev *pdev)
free_irq(pdev->irq, dc);
for (i = 0; i < MAX_PORT; i++)
- if (dc->port[i].fifo_ul)
- kfifo_free(dc->port[i].fifo_ul);
+ kfifo_free(&dc->port[i].fifo_ul);
kfree(dc->send_buf);
@@ -1673,7 +1695,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
goto exit;
}
- rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count);
+ rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count);
/* notify card */
if (unlikely(dc == NULL)) {
@@ -1721,7 +1743,7 @@ static int ntty_write_room(struct tty_struct *tty)
if (!port->port.count)
goto exit;
- room = port->fifo_ul->size - __kfifo_len(port->fifo_ul);
+ room = port->fifo_ul.size - kfifo_len(&port->fifo_ul);
exit:
mutex_unlock(&port->tty_sem);
@@ -1878,7 +1900,7 @@ static s32 ntty_chars_in_buffer(struct tty_struct *tty)
goto exit_in_buffer;
}
- rval = __kfifo_len(port->fifo_ul);
+ rval = kfifo_len(&port->fifo_ul);
exit_in_buffer:
return rval;
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 4008e2ce73c..fdbcc9fd6d3 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -264,10 +264,16 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
unsigned char contents[NVRAM_BYTES];
unsigned i = *ppos;
unsigned char *tmp;
- int len;
- len = (NVRAM_BYTES - i) < count ? (NVRAM_BYTES - i) : count;
- if (copy_from_user(contents, buf, len))
+ if (i >= NVRAM_BYTES)
+ return 0; /* Past EOF */
+
+ if (count > NVRAM_BYTES - i)
+ count = NVRAM_BYTES - i;
+ if (count > NVRAM_BYTES)
+ return -EFAULT; /* Can't happen, but prove it to gcc */
+
+ if (copy_from_user(contents, buf, count))
return -EFAULT;
spin_lock_irq(&rtc_lock);
@@ -275,7 +281,7 @@ static ssize_t nvram_write(struct file *file, const char __user *buf,
if (!__nvram_check_checksum())
goto checksum_err;
- for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp)
+ for (tmp = contents; count--; ++i, ++tmp)
__nvram_write_byte(*tmp, i);
__nvram_set_checksum();
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index 674b3ab3587..2bb7874a689 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -603,7 +603,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
}
}
-static struct tty_operations tty_ops = {
+static const struct tty_operations tty_ops = {
.open = ipw_open,
.close = ipw_close,
.hangup = ipw_hangup,
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index d86c0bc05c1..385c44b3034 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -659,7 +659,7 @@ static int __ptmx_open(struct inode *inode, struct file *filp)
if (!retval)
return 0;
out1:
- tty_release_dev(filp);
+ tty_release(inode, filp);
return retval;
out:
devpts_kill_index(inode, index);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index dcd08635cf1..8258982b49e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1245,12 +1245,8 @@ static int proc_do_uuid(ctl_table *table, int write,
if (uuid[8] == 0)
generate_random_uuid(uuid);
- sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
- "%02x%02x%02x%02x%02x%02x",
- uuid[0], uuid[1], uuid[2], uuid[3],
- uuid[4], uuid[5], uuid[6], uuid[7],
- uuid[8], uuid[9], uuid[10], uuid[11],
- uuid[12], uuid[13], uuid[14], uuid[15]);
+ sprintf(buf, "%pU", uuid);
+
fake_table.data = buf;
fake_table.maxlen = sizeof(buf);
@@ -1310,7 +1306,7 @@ ctl_table random_table[] = {
/********************************************************************
*
- * Random funtions for networking
+ * Random functions for networking
*
********************************************************************/
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 3cfa22d469e..0a8d1e56c99 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -793,26 +793,21 @@ static void rc_change_speed(struct tty_struct *tty, struct riscom_board *bp,
}
/* Must be called with interrupts enabled */
-static int rc_setup_port(struct tty_struct *tty, struct riscom_board *bp,
- struct riscom_port *port)
+static int rc_activate_port(struct tty_port *port, struct tty_struct *tty)
{
+ struct riscom_port *rp = container_of(port, struct riscom_port, port);
+ struct riscom_board *bp = port_Board(rp);
unsigned long flags;
- if (port->port.flags & ASYNC_INITIALIZED)
- return 0;
-
- if (tty_port_alloc_xmit_buf(&port->port) < 0)
+ if (tty_port_alloc_xmit_buf(port) < 0)
return -ENOMEM;
spin_lock_irqsave(&riscom_lock, flags);
clear_bit(TTY_IO_ERROR, &tty->flags);
- if (port->port.count == 1)
- bp->count++;
- port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
- rc_change_speed(tty, bp, port);
- port->port.flags |= ASYNC_INITIALIZED;
-
+ bp->count++;
+ rp->xmit_cnt = rp->xmit_head = rp->xmit_tail = 0;
+ rc_change_speed(tty, bp, rp);
spin_unlock_irqrestore(&riscom_lock, flags);
return 0;
}
@@ -821,9 +816,6 @@ static int rc_setup_port(struct tty_struct *tty, struct riscom_board *bp,
static void rc_shutdown_port(struct tty_struct *tty,
struct riscom_board *bp, struct riscom_port *port)
{
- if (!(port->port.flags & ASYNC_INITIALIZED))
- return;
-
#ifdef RC_REPORT_OVERRUN
printk(KERN_INFO "rc%d: port %d: Total %ld overruns were detected.\n",
board_No(bp), port_No(port), port->overrun);
@@ -840,11 +832,6 @@ static void rc_shutdown_port(struct tty_struct *tty,
}
#endif
tty_port_free_xmit_buf(&port->port);
- if (C_HUPCL(tty)) {
- /* Drop DTR */
- bp->DTR |= (1u << port_No(port));
- rc_out(bp, RC_DTR, bp->DTR);
- }
/* Select port */
rc_out(bp, CD180_CAR, port_No(port));
@@ -856,7 +843,6 @@ static void rc_shutdown_port(struct tty_struct *tty,
rc_out(bp, CD180_IER, port->IER);
set_bit(TTY_IO_ERROR, &tty->flags);
- port->port.flags &= ~ASYNC_INITIALIZED;
if (--bp->count < 0) {
printk(KERN_INFO "rc%d: rc_shutdown_port: "
@@ -889,6 +875,20 @@ static int carrier_raised(struct tty_port *port)
return CD;
}
+static void dtr_rts(struct tty_port *port, int onoff)
+{
+ struct riscom_port *p = container_of(port, struct riscom_port, port);
+ struct riscom_board *bp = port_Board(p);
+ unsigned long flags;
+
+ spin_lock_irqsave(&riscom_lock, flags);
+ bp->DTR &= ~(1u << port_No(p));
+ if (onoff == 0)
+ bp->DTR |= (1u << port_No(p));
+ rc_out(bp, RC_DTR, bp->DTR);
+ spin_unlock_irqrestore(&riscom_lock, flags);
+}
+
static int rc_open(struct tty_struct *tty, struct file *filp)
{
int board;
@@ -909,14 +909,7 @@ static int rc_open(struct tty_struct *tty, struct file *filp)
if (error)
return error;
- port->port.count++;
- tty->driver_data = port;
- tty_port_tty_set(&port->port, tty);
-
- error = rc_setup_port(tty, bp, port);
- if (error == 0)
- error = tty_port_block_til_ready(&port->port, tty, filp);
- return error;
+ return tty_port_open(&port->port, tty, filp);
}
static void rc_flush_buffer(struct tty_struct *tty)
@@ -950,24 +943,23 @@ static void rc_close_port(struct tty_port *port)
spin_lock_irqsave(&riscom_lock, flags);
rp->IER &= ~IER_RXD;
- if (port->flags & ASYNC_INITIALIZED) {
- rp->IER &= ~IER_TXRDY;
- rp->IER |= IER_TXEMPTY;
- rc_out(bp, CD180_CAR, port_No(rp));
- rc_out(bp, CD180_IER, rp->IER);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (rp->IER & IER_TXEMPTY) {
- spin_unlock_irqrestore(&riscom_lock, flags);
- msleep_interruptible(jiffies_to_msecs(rp->timeout));
- spin_lock_irqsave(&riscom_lock, flags);
- if (time_after(jiffies, timeout))
- break;
- }
+
+ rp->IER &= ~IER_TXRDY;
+ rp->IER |= IER_TXEMPTY;
+ rc_out(bp, CD180_CAR, port_No(rp));
+ rc_out(bp, CD180_IER, rp->IER);
+ /*
+ * Before we drop DTR, make sure the UART transmitter
+ * has completely drained; this is especially
+ * important if there is a transmit FIFO!
+ */
+ timeout = jiffies + HZ;
+ while (rp->IER & IER_TXEMPTY) {
+ spin_unlock_irqrestore(&riscom_lock, flags);
+ msleep_interruptible(jiffies_to_msecs(rp->timeout));
+ spin_lock_irqsave(&riscom_lock, flags);
+ if (time_after(jiffies, timeout))
+ break;
}
rc_shutdown_port(port->tty, bp, rp);
spin_unlock_irqrestore(&riscom_lock, flags);
@@ -1354,7 +1346,6 @@ static void rc_hangup(struct tty_struct *tty)
if (rc_paranoia_check(port, tty->name, "rc_hangup"))
return;
- rc_shutdown_port(tty, port_Board(port), port);
tty_port_hangup(&port->port);
}
@@ -1401,7 +1392,9 @@ static const struct tty_operations riscom_ops = {
static const struct tty_port_operations riscom_port_ops = {
.carrier_raised = carrier_raised,
+ .dtr_rts = dtr_rts,
.shutdown = rc_close_port,
+ .activate = rc_activate_port,
};
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 8c262aaf7c2..0798754a607 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -487,7 +487,7 @@ static struct sonypi_device {
int camera_power;
int bluetooth_power;
struct mutex lock;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
wait_queue_head_t fifo_proc_list;
struct fasync_struct *fifo_async;
@@ -496,7 +496,7 @@ static struct sonypi_device {
struct input_dev *input_jog_dev;
struct input_dev *input_key_dev;
struct work_struct input_work;
- struct kfifo *input_fifo;
+ struct kfifo input_fifo;
spinlock_t input_fifo_lock;
} sonypi_device;
@@ -777,8 +777,9 @@ static void input_keyrelease(struct work_struct *work)
{
struct sonypi_keypress kp;
- while (kfifo_get(sonypi_device.input_fifo, (unsigned char *)&kp,
- sizeof(kp)) == sizeof(kp)) {
+ while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp,
+ sizeof(kp), &sonypi_device.input_fifo_lock)
+ == sizeof(kp)) {
msleep(10);
input_report_key(kp.dev, kp.key, 0);
input_sync(kp.dev);
@@ -827,8 +828,9 @@ static void sonypi_report_input_event(u8 event)
if (kp.dev) {
input_report_key(kp.dev, kp.key, 1);
input_sync(kp.dev);
- kfifo_put(sonypi_device.input_fifo,
- (unsigned char *)&kp, sizeof(kp));
+ kfifo_in_locked(&sonypi_device.input_fifo,
+ (unsigned char *)&kp, sizeof(kp),
+ &sonypi_device.input_fifo_lock);
schedule_work(&sonypi_device.input_work);
}
}
@@ -880,7 +882,8 @@ found:
acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event);
#endif
- kfifo_put(sonypi_device.fifo, (unsigned char *)&event, sizeof(event));
+ kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
+ sizeof(event), &sonypi_device.fifo_lock);
kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_device.fifo_proc_list);
@@ -906,7 +909,7 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
mutex_lock(&sonypi_device.lock);
/* Flush input queue on first open */
if (!sonypi_device.open_count)
- kfifo_reset(sonypi_device.fifo);
+ kfifo_reset(&sonypi_device.fifo);
sonypi_device.open_count++;
mutex_unlock(&sonypi_device.lock);
unlock_kernel();
@@ -919,17 +922,18 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
ssize_t ret;
unsigned char c;
- if ((kfifo_len(sonypi_device.fifo) == 0) &&
+ if ((kfifo_len(&sonypi_device.fifo) == 0) &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
ret = wait_event_interruptible(sonypi_device.fifo_proc_list,
- kfifo_len(sonypi_device.fifo) != 0);
+ kfifo_len(&sonypi_device.fifo) != 0);
if (ret)
return ret;
while (ret < count &&
- (kfifo_get(sonypi_device.fifo, &c, sizeof(c)) == sizeof(c))) {
+ (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c),
+ &sonypi_device.fifo_lock) == sizeof(c))) {
if (put_user(c, buf++))
return -EFAULT;
ret++;
@@ -946,7 +950,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_device.fifo_proc_list, wait);
- if (kfifo_len(sonypi_device.fifo))
+ if (kfifo_len(&sonypi_device.fifo))
return POLLIN | POLLRDNORM;
return 0;
}
@@ -1313,11 +1317,10 @@ static int __devinit sonypi_probe(struct platform_device *dev)
"http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n");
spin_lock_init(&sonypi_device.fifo_lock);
- sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
- &sonypi_device.fifo_lock);
- if (IS_ERR(sonypi_device.fifo)) {
+ error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL);
+ if (error) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
- return PTR_ERR(sonypi_device.fifo);
+ return error;
}
init_waitqueue_head(&sonypi_device.fifo_proc_list);
@@ -1393,12 +1396,10 @@ static int __devinit sonypi_probe(struct platform_device *dev)
}
spin_lock_init(&sonypi_device.input_fifo_lock);
- sonypi_device.input_fifo =
- kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
- &sonypi_device.input_fifo_lock);
- if (IS_ERR(sonypi_device.input_fifo)) {
+ error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE,
+ GFP_KERNEL);
+ if (error) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
- error = PTR_ERR(sonypi_device.input_fifo);
goto err_inpdev_unregister;
}
@@ -1423,7 +1424,7 @@ static int __devinit sonypi_probe(struct platform_device *dev)
pci_disable_device(pcidev);
err_put_pcidev:
pci_dev_put(pcidev);
- kfifo_free(sonypi_device.fifo);
+ kfifo_free(&sonypi_device.fifo);
return error;
}
@@ -1438,7 +1439,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
if (useinput) {
input_unregister_device(sonypi_device.input_key_dev);
input_unregister_device(sonypi_device.input_jog_dev);
- kfifo_free(sonypi_device.input_fifo);
+ kfifo_free(&sonypi_device.input_fifo);
}
misc_deregister(&sonypi_misc_device);
@@ -1451,7 +1452,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
pci_dev_put(sonypi_device.dev);
}
- kfifo_free(sonypi_device.fifo);
+ kfifo_free(&sonypi_device.fifo);
return 0;
}
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index db6dcfa35ba..0e511d61f54 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -407,7 +407,7 @@ static unsigned int stl_baudrates[] = {
* Declare all those functions in this driver!
*/
-static int stl_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg);
+static long stl_memioctl(struct file *fp, unsigned int cmd, unsigned long arg);
static int stl_brdinit(struct stlbrd *brdp);
static int stl_getportstats(struct tty_struct *tty, struct stlport *portp, comstats_t __user *cp);
static int stl_clrportstats(struct stlport *portp, comstats_t __user *cp);
@@ -607,7 +607,7 @@ static unsigned int sc26198_baudtable[] = {
*/
static const struct file_operations stl_fsiomem = {
.owner = THIS_MODULE,
- .ioctl = stl_memioctl,
+ .unlocked_ioctl = stl_memioctl,
};
static struct class *stallion_class;
@@ -702,6 +702,24 @@ static struct stlbrd *stl_allocbrd(void)
/*****************************************************************************/
+static int stl_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct stlport *portp = container_of(port, struct stlport, port);
+ if (!portp->tx.buf) {
+ portp->tx.buf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
+ if (!portp->tx.buf)
+ return -ENOMEM;
+ portp->tx.head = portp->tx.buf;
+ portp->tx.tail = portp->tx.buf;
+ }
+ stl_setport(portp, tty->termios);
+ portp->sigs = stl_getsignals(portp);
+ stl_setsignals(portp, 1, 1);
+ stl_enablerxtx(portp, 1, 1);
+ stl_startrxtx(portp, 1, 0);
+ return 0;
+}
+
static int stl_open(struct tty_struct *tty, struct file *filp)
{
struct stlport *portp;
@@ -737,32 +755,8 @@ static int stl_open(struct tty_struct *tty, struct file *filp)
if (portp == NULL)
return -ENODEV;
port = &portp->port;
+ return tty_port_open(&portp->port, tty, filp);
-/*
- * On the first open of the device setup the port hardware, and
- * initialize the per port data structure.
- */
- tty_port_tty_set(port, tty);
- tty->driver_data = portp;
- port->count++;
-
- if ((port->flags & ASYNC_INITIALIZED) == 0) {
- if (!portp->tx.buf) {
- portp->tx.buf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
- if (!portp->tx.buf)
- return -ENOMEM;
- portp->tx.head = portp->tx.buf;
- portp->tx.tail = portp->tx.buf;
- }
- stl_setport(portp, tty->termios);
- portp->sigs = stl_getsignals(portp);
- stl_setsignals(portp, 1, 1);
- stl_enablerxtx(portp, 1, 1);
- stl_startrxtx(portp, 1, 0);
- clear_bit(TTY_IO_ERROR, &tty->flags);
- port->flags |= ASYNC_INITIALIZED;
- }
- return tty_port_block_til_ready(port, tty, filp);
}
/*****************************************************************************/
@@ -826,38 +820,12 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout)
/*****************************************************************************/
-static void stl_close(struct tty_struct *tty, struct file *filp)
+static void stl_shutdown(struct tty_port *port)
{
- struct stlport *portp;
- struct tty_port *port;
- unsigned long flags;
-
- pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp);
-
- portp = tty->driver_data;
- BUG_ON(portp == NULL);
-
- port = &portp->port;
-
- if (tty_port_close_start(port, tty, filp) == 0)
- return;
-/*
- * May want to wait for any data to drain before closing. The BUSY
- * flag keeps track of whether we are still sending or not - it is
- * very accurate for the cd1400, not quite so for the sc26198.
- * (The sc26198 has no "end-of-data" interrupt only empty FIFO)
- */
- stl_waituntilsent(tty, (HZ / 2));
-
- spin_lock_irqsave(&port->lock, flags);
- portp->port.flags &= ~ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&port->lock, flags);
-
+ struct stlport *portp = container_of(port, struct stlport, port);
stl_disableintrs(portp);
- if (tty->termios->c_cflag & HUPCL)
- stl_setsignals(portp, 0, 0);
stl_enablerxtx(portp, 0, 0);
- stl_flushbuffer(tty);
+ stl_flush(portp);
portp->istate = 0;
if (portp->tx.buf != NULL) {
kfree(portp->tx.buf);
@@ -865,9 +833,16 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
portp->tx.head = NULL;
portp->tx.tail = NULL;
}
+}
+
+static void stl_close(struct tty_struct *tty, struct file *filp)
+{
+ struct stlport*portp;
+ pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp);
- tty_port_close_end(port, tty);
- tty_port_tty_set(port, NULL);
+ portp = tty->driver_data;
+ BUG_ON(portp == NULL);
+ tty_port_close(&portp->port, tty, filp);
}
/*****************************************************************************/
@@ -1314,35 +1289,12 @@ static void stl_stop(struct tty_struct *tty)
static void stl_hangup(struct tty_struct *tty)
{
- struct stlport *portp;
- struct tty_port *port;
- unsigned long flags;
-
+ struct stlport *portp = tty->driver_data;
pr_debug("stl_hangup(tty=%p)\n", tty);
- portp = tty->driver_data;
if (portp == NULL)
return;
- port = &portp->port;
-
- spin_lock_irqsave(&port->lock, flags);
- port->flags &= ~ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&port->lock, flags);
-
- stl_disableintrs(portp);
- if (tty->termios->c_cflag & HUPCL)
- stl_setsignals(portp, 0, 0);
- stl_enablerxtx(portp, 0, 0);
- stl_flushbuffer(tty);
- portp->istate = 0;
- set_bit(TTY_IO_ERROR, &tty->flags);
- if (portp->tx.buf != NULL) {
- kfree(portp->tx.buf);
- portp->tx.buf = NULL;
- portp->tx.head = NULL;
- portp->tx.tail = NULL;
- }
- tty_port_hangup(port);
+ tty_port_hangup(&portp->port);
}
/*****************************************************************************/
@@ -2486,18 +2438,19 @@ static int stl_getbrdstruct(struct stlbrd __user *arg)
* collection.
*/
-static int stl_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, unsigned long arg)
+static long stl_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
int brdnr, rc;
void __user *argp = (void __user *)arg;
- pr_debug("stl_memioctl(ip=%p,fp=%p,cmd=%x,arg=%lx)\n", ip, fp, cmd,arg);
+ pr_debug("stl_memioctl(fp=%p,cmd=%x,arg=%lx)\n", fp, cmd,arg);
- brdnr = iminor(ip);
+ brdnr = iminor(fp->f_dentry->d_inode);
if (brdnr >= STL_MAXBRDS)
return -ENODEV;
rc = 0;
+ lock_kernel();
switch (cmd) {
case COM_GETPORTSTATS:
rc = stl_getportstats(NULL, NULL, argp);
@@ -2518,7 +2471,7 @@ static int stl_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, uns
rc = -ENOIOCTLCMD;
break;
}
-
+ unlock_kernel();
return rc;
}
@@ -2549,6 +2502,8 @@ static const struct tty_operations stl_ops = {
static const struct tty_port_operations stl_port_ops = {
.carrier_raised = stl_carrier_raised,
.dtr_rts = stl_dtr_rts,
+ .activate = stl_activate,
+ .shutdown = stl_shutdown,
};
/*****************************************************************************/
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 44203ff599d..1ae2de7d8b4 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -339,7 +339,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
+ out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0, NULL);
}
static DECLARE_WORK(moom_work, moom_callback);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 59499ee0fe6..f15df40bc31 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -142,7 +142,6 @@ ssize_t redirected_tty_write(struct file *, const char __user *,
size_t, loff_t *);
static unsigned int tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
-static int tty_release(struct inode *, struct file *);
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long tty_compat_ioctl(struct file *file, unsigned int cmd,
@@ -506,8 +505,6 @@ static void do_tty_hangup(struct work_struct *work)
if (!tty)
return;
- /* inuse_filps is protected by the single kernel lock */
- lock_kernel();
spin_lock(&redirect_lock);
if (redirect && redirect->private_data == tty) {
@@ -516,7 +513,10 @@ static void do_tty_hangup(struct work_struct *work)
}
spin_unlock(&redirect_lock);
+ /* inuse_filps is protected by the single kernel lock */
+ lock_kernel();
check_tty_count(tty, "do_tty_hangup");
+
file_list_lock();
/* This breaks for file handles being sent over AF_UNIX sockets ? */
list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) {
@@ -708,6 +708,8 @@ void disassociate_ctty(int on_exit)
struct tty_struct *tty;
struct pid *tty_pgrp = NULL;
+ if (!current->signal->leader)
+ return;
tty = get_current_tty();
if (tty) {
@@ -773,8 +775,7 @@ void no_tty(void)
{
struct task_struct *tsk = current;
lock_kernel();
- if (tsk->signal->leader)
- disassociate_ctty(0);
+ disassociate_ctty(0);
unlock_kernel();
proc_clear_tty(tsk);
}
@@ -1017,14 +1018,16 @@ out:
void tty_write_message(struct tty_struct *tty, char *msg)
{
- lock_kernel();
if (tty) {
mutex_lock(&tty->atomic_write_lock);
- if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags))
+ lock_kernel();
+ if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
+ unlock_kernel();
tty->ops->write(tty, msg, strlen(msg));
+ } else
+ unlock_kernel();
tty_write_unlock(tty);
}
- unlock_kernel();
return;
}
@@ -1202,14 +1205,21 @@ static int tty_driver_install_tty(struct tty_driver *driver,
struct tty_struct *tty)
{
int idx = tty->index;
+ int ret;
- if (driver->ops->install)
- return driver->ops->install(driver, tty);
+ if (driver->ops->install) {
+ lock_kernel();
+ ret = driver->ops->install(driver, tty);
+ unlock_kernel();
+ return ret;
+ }
if (tty_init_termios(tty) == 0) {
+ lock_kernel();
tty_driver_kref_get(driver);
tty->count++;
driver->ttys[idx] = tty;
+ unlock_kernel();
return 0;
}
return -ENOMEM;
@@ -1302,10 +1312,14 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
struct tty_struct *tty;
int retval;
+ lock_kernel();
/* Check if pty master is being opened multiple times */
if (driver->subtype == PTY_TYPE_MASTER &&
- (driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok)
+ (driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok) {
+ unlock_kernel();
return ERR_PTR(-EIO);
+ }
+ unlock_kernel();
/*
* First time open is complex, especially for PTY devices.
@@ -1335,7 +1349,6 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
* If we fail here just call release_tty to clean up. No need
* to decrement the use counts, as release_tty doesn't care.
*/
-
retval = tty_ldisc_setup(tty, tty->link);
if (retval)
goto release_mem_out;
@@ -1350,7 +1363,9 @@ release_mem_out:
if (printk_ratelimit())
printk(KERN_INFO "tty_init_dev: ldisc open failed, "
"clearing slot %d\n", idx);
+ lock_kernel();
release_tty(tty, idx);
+ unlock_kernel();
return ERR_PTR(retval);
}
@@ -1464,7 +1479,17 @@ static void release_tty(struct tty_struct *tty, int idx)
tty_kref_put(tty);
}
-/*
+/**
+ * tty_release - vfs callback for close
+ * @inode: inode of tty
+ * @filp: file pointer for handle to tty
+ *
+ * Called the last time each file handle is closed that references
+ * this tty. There may however be several such references.
+ *
+ * Locking:
+ * Takes bkl. See tty_release_dev
+ *
* Even releasing the tty structures is a tricky business.. We have
* to be very careful that the structures are all released at the
* same time, as interrupts might otherwise get the wrong pointers.
@@ -1472,20 +1497,20 @@ static void release_tty(struct tty_struct *tty, int idx)
* WSH 09/09/97: rewritten to avoid some nasty race conditions that could
* lead to double frees or releasing memory still in use.
*/
-void tty_release_dev(struct file *filp)
+
+int tty_release(struct inode *inode, struct file *filp)
{
struct tty_struct *tty, *o_tty;
int pty_master, tty_closing, o_tty_closing, do_sleep;
int devpts;
int idx;
char buf[64];
- struct inode *inode;
- inode = filp->f_path.dentry->d_inode;
tty = (struct tty_struct *)filp->private_data;
if (tty_paranoia_check(tty, inode, "tty_release_dev"))
- return;
+ return 0;
+ lock_kernel();
check_tty_count(tty, "tty_release_dev");
tty_fasync(-1, filp, 0);
@@ -1500,19 +1525,22 @@ void tty_release_dev(struct file *filp)
if (idx < 0 || idx >= tty->driver->num) {
printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
"free (%s)\n", tty->name);
- return;
+ unlock_kernel();
+ return 0;
}
if (!devpts) {
if (tty != tty->driver->ttys[idx]) {
+ unlock_kernel();
printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
"for (%s)\n", idx, tty->name);
- return;
+ return 0;
}
if (tty->termios != tty->driver->termios[idx]) {
+ unlock_kernel();
printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
"for (%s)\n",
idx, tty->name);
- return;
+ return 0;
}
}
#endif
@@ -1526,26 +1554,30 @@ void tty_release_dev(struct file *filp)
if (tty->driver->other &&
!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
if (o_tty != tty->driver->other->ttys[idx]) {
+ unlock_kernel();
printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
"not o_tty for (%s)\n",
idx, tty->name);
- return;
+ return 0 ;
}
if (o_tty->termios != tty->driver->other->termios[idx]) {
+ unlock_kernel();
printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
"not o_termios for (%s)\n",
idx, tty->name);
- return;
+ return 0;
}
if (o_tty->link != tty) {
+ unlock_kernel();
printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
- return;
+ return 0;
}
}
#endif
if (tty->ops->close)
tty->ops->close(tty, filp);
+ unlock_kernel();
/*
* Sanity check: if tty->count is going to zero, there shouldn't be
* any waiters on tty->read_wait or tty->write_wait. We test the
@@ -1568,6 +1600,7 @@ void tty_release_dev(struct file *filp)
opens on /dev/tty */
mutex_lock(&tty_mutex);
+ lock_kernel();
tty_closing = tty->count <= 1;
o_tty_closing = o_tty &&
(o_tty->count <= (pty_master ? 1 : 0));
@@ -1598,6 +1631,7 @@ void tty_release_dev(struct file *filp)
printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
"active!\n", tty_name(tty, buf));
+ unlock_kernel();
mutex_unlock(&tty_mutex);
schedule();
}
@@ -1661,8 +1695,10 @@ void tty_release_dev(struct file *filp)
mutex_unlock(&tty_mutex);
/* check whether both sides are closing ... */
- if (!tty_closing || (o_tty && !o_tty_closing))
- return;
+ if (!tty_closing || (o_tty && !o_tty_closing)) {
+ unlock_kernel();
+ return 0;
+ }
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "freeing tty structure...");
@@ -1680,10 +1716,12 @@ void tty_release_dev(struct file *filp)
/* Make this pty number available for reallocation */
if (devpts)
devpts_kill_index(inode, idx);
+ unlock_kernel();
+ return 0;
}
/**
- * __tty_open - open a tty device
+ * tty_open - open a tty device
* @inode: inode of device file
* @filp: file pointer to tty
*
@@ -1703,7 +1741,7 @@ void tty_release_dev(struct file *filp)
* ->siglock protects ->signal/->sighand
*/
-static int __tty_open(struct inode *inode, struct file *filp)
+static int tty_open(struct inode *inode, struct file *filp)
{
struct tty_struct *tty = NULL;
int noctty, retval;
@@ -1720,10 +1758,12 @@ retry_open:
retval = 0;
mutex_lock(&tty_mutex);
+ lock_kernel();
if (device == MKDEV(TTYAUX_MAJOR, 0)) {
tty = get_current_tty();
if (!tty) {
+ unlock_kernel();
mutex_unlock(&tty_mutex);
return -ENXIO;
}
@@ -1755,12 +1795,14 @@ retry_open:
goto got_driver;
}
}
+ unlock_kernel();
mutex_unlock(&tty_mutex);
return -ENODEV;
}
driver = get_tty_driver(device, &index);
if (!driver) {
+ unlock_kernel();
mutex_unlock(&tty_mutex);
return -ENODEV;
}
@@ -1770,6 +1812,7 @@ got_driver:
tty = tty_driver_lookup_tty(driver, inode, index);
if (IS_ERR(tty)) {
+ unlock_kernel();
mutex_unlock(&tty_mutex);
return PTR_ERR(tty);
}
@@ -1784,8 +1827,10 @@ got_driver:
mutex_unlock(&tty_mutex);
tty_driver_kref_put(driver);
- if (IS_ERR(tty))
+ if (IS_ERR(tty)) {
+ unlock_kernel();
return PTR_ERR(tty);
+ }
filp->private_data = tty;
file_move(filp, &tty->tty_files);
@@ -1813,11 +1858,15 @@ got_driver:
printk(KERN_DEBUG "error %d in opening %s...", retval,
tty->name);
#endif
- tty_release_dev(filp);
- if (retval != -ERESTARTSYS)
+ tty_release(inode, filp);
+ if (retval != -ERESTARTSYS) {
+ unlock_kernel();
return retval;
- if (signal_pending(current))
+ }
+ if (signal_pending(current)) {
+ unlock_kernel();
return retval;
+ }
schedule();
/*
* Need to reset f_op in case a hangup happened.
@@ -1826,8 +1875,11 @@ got_driver:
filp->f_op = &tty_fops;
goto retry_open;
}
+ unlock_kernel();
+
mutex_lock(&tty_mutex);
+ lock_kernel();
spin_lock_irq(&current->sighand->siglock);
if (!noctty &&
current->signal->leader &&
@@ -1835,45 +1887,14 @@ got_driver:
tty->session == NULL)
__proc_set_tty(current, tty);
spin_unlock_irq(&current->sighand->siglock);
+ unlock_kernel();
mutex_unlock(&tty_mutex);
return 0;
}
-/* BKL pushdown: scary code avoidance wrapper */
-static int tty_open(struct inode *inode, struct file *filp)
-{
- int ret;
-
- lock_kernel();
- ret = __tty_open(inode, filp);
- unlock_kernel();
- return ret;
-}
-
-
/**
- * tty_release - vfs callback for close
- * @inode: inode of tty
- * @filp: file pointer for handle to tty
- *
- * Called the last time each file handle is closed that references
- * this tty. There may however be several such references.
- *
- * Locking:
- * Takes bkl. See tty_release_dev
- */
-
-static int tty_release(struct inode *inode, struct file *filp)
-{
- lock_kernel();
- tty_release_dev(filp);
- unlock_kernel();
- return 0;
-}
-
-/**
* tty_poll - check tty status
* @filp: file being polled
* @wait: poll wait structures to update
@@ -2317,9 +2338,7 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
if (get_user(ldisc, p))
return -EFAULT;
- lock_kernel();
ret = tty_set_ldisc(tty, ldisc);
- unlock_kernel();
return ret;
}
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index feb55075819..3f653f7d849 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -34,6 +34,8 @@
#include <linux/vt_kern.h>
#include <linux/selection.h>
+#include <linux/smp_lock.h> /* For the moment */
+
#include <linux/kmod.h>
#include <linux/nsproxy.h>
@@ -443,8 +445,14 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
{
WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
- if (ld->ops->open)
- return ld->ops->open(tty);
+ if (ld->ops->open) {
+ int ret;
+ /* BKL here locks verus a hangup event */
+ lock_kernel();
+ ret = ld->ops->open(tty);
+ unlock_kernel();
+ return ret;
+ }
return 0;
}
@@ -545,6 +553,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
+ lock_kernel();
/*
* We need to look at the tty locking here for pty/tty pairs
* when both sides try to change in parallel.
@@ -558,10 +567,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
*/
if (tty->ldisc->ops->num == ldisc) {
+ unlock_kernel();
tty_ldisc_put(new_ldisc);
return 0;
}
+ unlock_kernel();
/*
* Problem: What do we do if this blocks ?
* We could deadlock here
@@ -582,6 +593,9 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
mutex_lock(&tty->ldisc_mutex);
}
+
+ lock_kernel();
+
set_bit(TTY_LDISC_CHANGING, &tty->flags);
/*
@@ -592,6 +606,8 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
tty->receive_room = 0;
o_ldisc = tty->ldisc;
+
+ unlock_kernel();
/*
* Make sure we don't change while someone holds a
* reference to the line discipline. The TTY_LDISC bit
@@ -617,12 +633,14 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
flush_scheduled_work();
mutex_lock(&tty->ldisc_mutex);
+ lock_kernel();
if (test_bit(TTY_HUPPED, &tty->flags)) {
/* We were raced by the hangup method. It will have stomped
the ldisc data and closed the ldisc down */
clear_bit(TTY_LDISC_CHANGING, &tty->flags);
mutex_unlock(&tty->ldisc_mutex);
tty_ldisc_put(new_ldisc);
+ unlock_kernel();
return -EIO;
}
@@ -664,6 +682,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (o_work)
schedule_delayed_work(&o_tty->buf.work, 1);
mutex_unlock(&tty->ldisc_mutex);
+ unlock_kernel();
return retval;
}
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index c63f3d33914..be492dd6643 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -25,19 +25,21 @@ void tty_port_init(struct tty_port *port)
init_waitqueue_head(&port->close_wait);
init_waitqueue_head(&port->delta_msr_wait);
mutex_init(&port->mutex);
+ mutex_init(&port->buf_mutex);
spin_lock_init(&port->lock);
port->close_delay = (50 * HZ) / 100;
port->closing_wait = (3000 * HZ) / 100;
+ kref_init(&port->kref);
}
EXPORT_SYMBOL(tty_port_init);
int tty_port_alloc_xmit_buf(struct tty_port *port)
{
/* We may sleep in get_zeroed_page() */
- mutex_lock(&port->mutex);
+ mutex_lock(&port->buf_mutex);
if (port->xmit_buf == NULL)
port->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
- mutex_unlock(&port->mutex);
+ mutex_unlock(&port->buf_mutex);
if (port->xmit_buf == NULL)
return -ENOMEM;
return 0;
@@ -46,15 +48,32 @@ EXPORT_SYMBOL(tty_port_alloc_xmit_buf);
void tty_port_free_xmit_buf(struct tty_port *port)
{
- mutex_lock(&port->mutex);
+ mutex_lock(&port->buf_mutex);
if (port->xmit_buf != NULL) {
free_page((unsigned long)port->xmit_buf);
port->xmit_buf = NULL;
}
- mutex_unlock(&port->mutex);
+ mutex_unlock(&port->buf_mutex);
}
EXPORT_SYMBOL(tty_port_free_xmit_buf);
+static void tty_port_destructor(struct kref *kref)
+{
+ struct tty_port *port = container_of(kref, struct tty_port, kref);
+ if (port->xmit_buf)
+ free_page((unsigned long)port->xmit_buf);
+ if (port->ops->destruct)
+ port->ops->destruct(port);
+ else
+ kfree(port);
+}
+
+void tty_port_put(struct tty_port *port)
+{
+ if (port)
+ kref_put(&port->kref, tty_port_destructor);
+}
+EXPORT_SYMBOL(tty_port_put);
/**
* tty_port_tty_get - get a tty reference
@@ -99,10 +118,11 @@ EXPORT_SYMBOL(tty_port_tty_set);
static void tty_port_shutdown(struct tty_port *port)
{
+ mutex_lock(&port->mutex);
if (port->ops->shutdown &&
test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags))
port->ops->shutdown(port);
-
+ mutex_unlock(&port->mutex);
}
/**
@@ -120,8 +140,10 @@ void tty_port_hangup(struct tty_port *port)
spin_lock_irqsave(&port->lock, flags);
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
- if (port->tty)
+ if (port->tty) {
+ set_bit(TTY_IO_ERROR, &port->tty->flags);
tty_kref_put(port->tty);
+ }
port->tty = NULL;
spin_unlock_irqrestore(&port->lock, flags);
wake_up_interruptible(&port->open_wait);
@@ -198,7 +220,7 @@ EXPORT_SYMBOL(tty_port_lower_dtr_rts);
* management of these lines. Note that the dtr/rts raise is done each
* iteration as a hangup may have previously dropped them while we wait.
*/
-
+
int tty_port_block_til_ready(struct tty_port *port,
struct tty_struct *tty, struct file *filp)
{
@@ -253,7 +275,8 @@ int tty_port_block_til_ready(struct tty_port *port,
tty_port_raise_dtr_rts(port);
prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
- /* Check for a hangup or uninitialised port. Return accordingly */
+ /* Check for a hangup or uninitialised port.
+ Return accordingly */
if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
if (port->flags & ASYNC_HUP_NOTIFY)
retval = -EAGAIN;
@@ -285,11 +308,11 @@ int tty_port_block_til_ready(struct tty_port *port,
port->flags |= ASYNC_NORMAL_ACTIVE;
spin_unlock_irqrestore(&port->lock, flags);
return retval;
-
}
EXPORT_SYMBOL(tty_port_block_til_ready);
-int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct file *filp)
+int tty_port_close_start(struct tty_port *port,
+ struct tty_struct *tty, struct file *filp)
{
unsigned long flags;
@@ -299,7 +322,7 @@ int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct f
return 0;
}
- if( tty->count == 1 && port->count != 1) {
+ if (tty->count == 1 && port->count != 1) {
printk(KERN_WARNING
"tty_port_close_start: tty->count = 1 port count = %d.\n",
port->count);
@@ -331,12 +354,20 @@ int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct f
long timeout;
if (bps > 1200)
- timeout = max_t(long, (HZ * 10 * port->drain_delay) / bps,
- HZ / 10);
+ timeout = max_t(long,
+ (HZ * 10 * port->drain_delay) / bps, HZ / 10);
else
timeout = 2 * HZ;
schedule_timeout_interruptible(timeout);
}
+ /* Flush the ldisc buffering */
+ tty_ldisc_flush(tty);
+
+ /* Drop DTR/RTS if HUPCL is set. This causes any attached modem to
+ hang up the line */
+ if (tty->termios->c_cflag & HUPCL)
+ tty_port_lower_dtr_rts(port);
+
/* Don't call port->drop for the last reference. Callers will want
to drop the last active reference in ->shutdown() or the tty
shutdown path */
@@ -348,11 +379,6 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
{
unsigned long flags;
- tty_ldisc_flush(tty);
-
- if (tty->termios->c_cflag & HUPCL)
- tty_port_lower_dtr_rts(port);
-
spin_lock_irqsave(&port->lock, flags);
tty->closing = 0;
@@ -377,7 +403,42 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty,
if (tty_port_close_start(port, tty, filp) == 0)
return;
tty_port_shutdown(port);
+ set_bit(TTY_IO_ERROR, &tty->flags);
tty_port_close_end(port, tty);
tty_port_tty_set(port, NULL);
}
EXPORT_SYMBOL(tty_port_close);
+
+int tty_port_open(struct tty_port *port, struct tty_struct *tty,
+ struct file *filp)
+{
+ spin_lock_irq(&port->lock);
+ if (!tty_hung_up_p(filp))
+ ++port->count;
+ spin_unlock_irq(&port->lock);
+ tty_port_tty_set(port, tty);
+
+ /*
+ * Do the device-specific open only if the hardware isn't
+ * already initialized. Serialize open and shutdown using the
+ * port mutex.
+ */
+
+ mutex_lock(&port->mutex);
+
+ if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ if (port->ops->activate) {
+ int retval = port->ops->activate(port, tty);
+ if (retval) {
+ mutex_unlock(&port->mutex);
+ return retval;
+ }
+ }
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ }
+ mutex_unlock(&port->mutex);
+ return tty_port_block_til_ready(port, tty, filp);
+}
+
+EXPORT_SYMBOL(tty_port_open);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1e3d728dbf7..50faa1fb0f0 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -164,6 +164,9 @@ module_param(default_utf8, int, S_IRUGO | S_IWUSR);
int global_cursor_default = -1;
module_param(global_cursor_default, int, S_IRUGO | S_IWUSR);
+static int cur_default = CUR_DEFAULT;
+module_param(cur_default, int, S_IRUGO | S_IWUSR);
+
/*
* ignore_poke: don't unblank the screen when things are typed. This is
* mainly for the privacy of braille terminal users.
@@ -184,12 +187,10 @@ static DECLARE_WORK(console_work, console_callback);
* fg_console is the current virtual console,
* last_console is the last used one,
* want_console is the console we want to switch to,
- * kmsg_redirect is the console for kernel messages,
*/
int fg_console;
int last_console;
int want_console = -1;
-int kmsg_redirect;
/*
* For each existing display, we have a pointer to console currently visible
@@ -1638,7 +1639,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
/* do not do set_leds here because this causes an endless tasklet loop
when the keyboard hasn't been initialized yet */
- vc->vc_cursor_type = CUR_DEFAULT;
+ vc->vc_cursor_type = cur_default;
vc->vc_complement_mask = vc->vc_s_complement_mask;
default_attr(vc);
@@ -1840,7 +1841,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
if (vc->vc_par[0])
vc->vc_cursor_type = vc->vc_par[0] | (vc->vc_par[1] << 8) | (vc->vc_par[2] << 16);
else
- vc->vc_cursor_type = CUR_DEFAULT;
+ vc->vc_cursor_type = cur_default;
return;
}
break;
@@ -2434,6 +2435,37 @@ struct tty_driver *console_driver;
#ifdef CONFIG_VT_CONSOLE
+/**
+ * vt_kmsg_redirect() - Sets/gets the kernel message console
+ * @new: The new virtual terminal number or -1 if the console should stay
+ * unchanged
+ *
+ * By default, the kernel messages are always printed on the current virtual
+ * console. However, the user may modify that default with the
+ * TIOCL_SETKMSGREDIRECT ioctl call.
+ *
+ * This function sets the kernel message console to be @new. It returns the old
+ * virtual console number. The virtual terminal number 0 (both as parameter and
+ * return value) means no redirection (i.e. always printed on the currently
+ * active console).
+ *
+ * The parameter -1 means that only the current console is returned, but the
+ * value is not modified. You may use the macro vt_get_kmsg_redirect() in that
+ * case to make the code more understandable.
+ *
+ * When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores
+ * the parameter and always returns 0.
+ */
+int vt_kmsg_redirect(int new)
+{
+ static int kmsg_con;
+
+ if (new != -1)
+ return xchg(&kmsg_con, new);
+ else
+ return kmsg_con;
+}
+
/*
* Console on virtual terminal
*
@@ -2448,6 +2480,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
const ushort *start;
ushort cnt = 0;
ushort myx;
+ int kmsg_console;
/* console busy or not yet initialized */
if (!printable)
@@ -2455,8 +2488,9 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
if (!spin_trylock(&printing_lock))
return;
- if (kmsg_redirect && vc_cons_allocated(kmsg_redirect - 1))
- vc = vc_cons[kmsg_redirect - 1].d;
+ kmsg_console = vt_get_kmsg_redirect();
+ if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
+ vc = vc_cons[kmsg_console - 1].d;
/* read `x' only after setting currcons properly (otherwise
the `x' macro will read the x of the foreground console). */
@@ -2613,7 +2647,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
ret = set_vesa_blanking(p);
break;
case TIOCL_GETKMSGREDIRECT:
- data = kmsg_redirect;
+ data = vt_get_kmsg_redirect();
ret = __put_user(data, p);
break;
case TIOCL_SETKMSGREDIRECT:
@@ -2623,7 +2657,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
if (get_user(data, p+1))
ret = -EFAULT;
else
- kmsg_redirect = data;
+ vt_kmsg_redirect(data);
}
break;
case TIOCL_GETFGCONSOLE:
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
new file mode 100644
index 00000000000..08f726c5fee
--- /dev/null
+++ b/drivers/clocksource/Kconfig
@@ -0,0 +1,9 @@
+config CS5535_CLOCK_EVENT_SRC
+ tristate "CS5535/CS5536 high-res timer (MFGPT) events"
+ depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT
+ help
+ This driver provides a clock event source based on the MFGPT
+ timer(s) in the CS5535 and CS5536 companion chips.
+ MFGPTs have a better resolution and max interval than the
+ generic PIT, and are suitable for use as high-res timers.
+
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index eef216f7f61..be61ece6330 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_CYCLONE_TIMER) += cyclone.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
+obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
new file mode 100644
index 00000000000..27d20fac19d
--- /dev/null
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -0,0 +1,197 @@
+/*
+ * Clock event driver for the CS5535/CS5536
+ *
+ * Copyright (C) 2006, Advanced Micro Devices, Inc.
+ * Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/cs5535.h>
+#include <linux/clockchips.h>
+
+#define DRV_NAME "cs5535-clockevt"
+
+static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+module_param_named(irq, timer_irq, int, 0644);
+MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
+
+/*
+ * We are using the 32.768kHz input clock - it's the only one that has the
+ * ranges we find desirable. The following table lists the suitable
+ * divisors and the associated Hz, minimum interval and the maximum interval:
+ *
+ * Divisor Hz Min Delta (s) Max Delta (s)
+ * 1 32768 .00048828125 2.000
+ * 2 16384 .0009765625 4.000
+ * 4 8192 .001953125 8.000
+ * 8 4096 .00390625 16.000
+ * 16 2048 .0078125 32.000
+ * 32 1024 .015625 64.000
+ * 64 512 .03125 128.000
+ * 128 256 .0625 256.000
+ * 256 128 .125 512.000
+ */
+
+static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
+static struct cs5535_mfgpt_timer *cs5535_event_clock;
+
+/* Selected from the table above */
+
+#define MFGPT_DIVISOR 16
+#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
+#define MFGPT_HZ (32768 / MFGPT_DIVISOR)
+#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
+
+/*
+ * The MFPGT timers on the CS5536 provide us with suitable timers to use
+ * as clock event sources - not as good as a HPET or APIC, but certainly
+ * better than the PIT. This isn't a general purpose MFGPT driver, but
+ * a simplified one designed specifically to act as a clock event source.
+ * For full details about the MFGPT, please consult the CS5536 data sheet.
+ */
+
+static void disable_timer(struct cs5535_mfgpt_timer *timer)
+{
+ /* avoid races by clearing CMP1 and CMP2 unconditionally */
+ cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
+ (uint16_t) ~MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP1 |
+ MFGPT_SETUP_CMP2);
+}
+
+static void start_timer(struct cs5535_mfgpt_timer *timer, uint16_t delta)
+{
+ cs5535_mfgpt_write(timer, MFGPT_REG_CMP2, delta);
+ cs5535_mfgpt_write(timer, MFGPT_REG_COUNTER, 0);
+
+ cs5535_mfgpt_write(timer, MFGPT_REG_SETUP,
+ MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
+}
+
+static void mfgpt_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ disable_timer(cs5535_event_clock);
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC)
+ start_timer(cs5535_event_clock, MFGPT_PERIODIC);
+
+ cs5535_tick_mode = mode;
+}
+
+static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ start_timer(cs5535_event_clock, delta);
+ return 0;
+}
+
+static struct clock_event_device cs5535_clockevent = {
+ .name = DRV_NAME,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = mfgpt_set_mode,
+ .set_next_event = mfgpt_next_event,
+ .rating = 250,
+ .cpumask = cpu_all_mask,
+ .shift = 32
+};
+
+static irqreturn_t mfgpt_tick(int irq, void *dev_id)
+{
+ uint16_t val = cs5535_mfgpt_read(cs5535_event_clock, MFGPT_REG_SETUP);
+
+ /* See if the interrupt was for us */
+ if (!(val & (MFGPT_SETUP_SETUP | MFGPT_SETUP_CMP2 | MFGPT_SETUP_CMP1)))
+ return IRQ_NONE;
+
+ /* Turn off the clock (and clear the event) */
+ disable_timer(cs5535_event_clock);
+
+ if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
+ return IRQ_HANDLED;
+
+ /* Clear the counter */
+ cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_COUNTER, 0);
+
+ /* Restart the clock in periodic mode */
+
+ if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC)
+ cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP,
+ MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
+
+ cs5535_clockevent.event_handler(&cs5535_clockevent);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mfgptirq = {
+ .handler = mfgpt_tick,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
+ .name = DRV_NAME,
+};
+
+static int __init cs5535_mfgpt_init(void)
+{
+ struct cs5535_mfgpt_timer *timer;
+ int ret;
+ uint16_t val;
+
+ timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
+ if (!timer) {
+ printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n");
+ return -ENODEV;
+ }
+ cs5535_event_clock = timer;
+
+ /* Set up the IRQ on the MFGPT side */
+ if (cs5535_mfgpt_setup_irq(timer, MFGPT_CMP2, &timer_irq)) {
+ printk(KERN_ERR DRV_NAME ": Could not set up IRQ %d\n",
+ timer_irq);
+ return -EIO;
+ }
+
+ /* And register it with the kernel */
+ ret = setup_irq(timer_irq, &mfgptirq);
+ if (ret) {
+ printk(KERN_ERR DRV_NAME ": Unable to set up the interrupt.\n");
+ goto err;
+ }
+
+ /* Set the clock scale and enable the event mode for CMP2 */
+ val = MFGPT_SCALE | (3 << 8);
+
+ cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP, val);
+
+ /* Set up the clock event */
+ cs5535_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC,
+ cs5535_clockevent.shift);
+ cs5535_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
+ &cs5535_clockevent);
+ cs5535_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
+ &cs5535_clockevent);
+
+ printk(KERN_INFO DRV_NAME
+ ": Registering MFGPT timer as a clock event, using IRQ %d\n",
+ timer_irq);
+ clockevents_register_device(&cs5535_clockevent);
+
+ return 0;
+
+err:
+ cs5535_mfgpt_release_irq(cs5535_event_clock, MFGPT_CMP2, &timer_irq);
+ printk(KERN_ERR DRV_NAME ": Unable to set up the MFGPT clock source\n");
+ return -EIO;
+}
+
+module_init(cs5535_mfgpt_init);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_DESCRIPTION("CS5535/CS5536 MFGPT clock event driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ff57c40e9b8..67bc2ece7b4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -64,14 +64,14 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/
-static DEFINE_PER_CPU(int, policy_cpu);
+static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
int lock_policy_rwsem_##mode \
(int cpu) \
{ \
- int policy_cpu = per_cpu(policy_cpu, cpu); \
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
if (unlikely(!cpu_online(cpu))) { \
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
void unlock_policy_rwsem_read(int cpu)
{
- int policy_cpu = per_cpu(policy_cpu, cpu);
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
void unlock_policy_rwsem_write(int cpu)
{
- int policy_cpu = per_cpu(policy_cpu, cpu);
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
BUG_ON(policy_cpu == -1);
up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
}
@@ -647,6 +647,21 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
return policy->governor->show_setspeed(policy, buf);
}
+/**
+ * show_scaling_driver - show the current cpufreq HW/BIOS limitation
+ */
+static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
+{
+ unsigned int limit;
+ int ret;
+ if (cpufreq_driver->bios_limit) {
+ ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
+ if (!ret)
+ return sprintf(buf, "%u\n", limit);
+ }
+ return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
+}
+
#define define_one_ro(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
@@ -666,6 +681,7 @@ define_one_ro(cpuinfo_transition_latency);
define_one_ro(scaling_available_governors);
define_one_ro(scaling_driver);
define_one_ro(scaling_cur_freq);
+define_one_ro(bios_limit);
define_one_ro(related_cpus);
define_one_ro(affected_cpus);
define_one_rw(scaling_min_freq);
@@ -767,8 +783,9 @@ static struct kobj_type ktype_cpufreq = {
* 0: Success
* Positive: When we have a managed CPU and the sysfs got symlinked
*/
-int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+static int cpufreq_add_dev_policy(unsigned int cpu,
+ struct cpufreq_policy *policy,
+ struct sys_device *sys_dev)
{
int ret = 0;
#ifdef CONFIG_SMP
@@ -801,7 +818,7 @@ int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
/* Set proper policy_cpu */
unlock_policy_rwsem_write(cpu);
- per_cpu(policy_cpu, cpu) = managed_policy->cpu;
+ per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
if (lock_policy_rwsem_write(cpu) < 0) {
/* Should not go through policy unlock path */
@@ -842,7 +859,8 @@ int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
/* symlink affected CPUs */
-int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy)
+static int cpufreq_add_dev_symlink(unsigned int cpu,
+ struct cpufreq_policy *policy)
{
unsigned int j;
int ret = 0;
@@ -869,8 +887,9 @@ int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy)
return ret;
}
-int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+static int cpufreq_add_dev_interface(unsigned int cpu,
+ struct cpufreq_policy *policy,
+ struct sys_device *sys_dev)
{
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
@@ -902,13 +921,18 @@ int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy,
if (ret)
goto err_out_kobj_put;
}
+ if (cpufreq_driver->bios_limit) {
+ ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
+ if (ret)
+ goto err_out_kobj_put;
+ }
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
if (!cpu_online(j))
continue;
per_cpu(cpufreq_cpu_data, j) = policy;
- per_cpu(policy_cpu, j) = policy->cpu;
+ per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -996,7 +1020,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
- per_cpu(policy_cpu, cpu) = cpu;
+ per_cpu(cpufreq_policy_cpu, cpu) = cpu;
ret = (lock_policy_rwsem_write(cpu) < 0);
WARN_ON(ret);
@@ -1978,7 +2002,7 @@ static int __init cpufreq_core_init(void)
int cpu;
for_each_possible_cpu(cpu) {
- per_cpu(policy_cpu, cpu) = -1;
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c7b081b839f..599a40b25cb 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -164,20 +164,22 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
};
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_sampling_rate_max(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
"sysfs file is deprecated - used by: %s\n", current->comm);
return sprintf(buf, "%u\n", -1U);
}
-static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_sampling_rate_min(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
#define define_one_ro(_name) \
-static struct freq_attr _name = \
+static struct global_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(sampling_rate_max);
@@ -186,7 +188,7 @@ define_one_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
#define show_one(file_name, object) \
static ssize_t show_##file_name \
-(struct cpufreq_policy *unused, char *buf) \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
@@ -197,8 +199,40 @@ show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
-static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
- const char *buf, size_t count)
+/*** delete after deprecation time ***/
+#define DEPRECATION_MSG(file_name) \
+ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
+ "interface is deprecated - " #file_name "\n");
+
+#define show_one_old(file_name) \
+static ssize_t show_##file_name##_old \
+(struct cpufreq_policy *unused, char *buf) \
+{ \
+ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
+ "interface is deprecated - " #file_name "\n"); \
+ return show_##file_name(NULL, NULL, buf); \
+}
+show_one_old(sampling_rate);
+show_one_old(sampling_down_factor);
+show_one_old(up_threshold);
+show_one_old(down_threshold);
+show_one_old(ignore_nice_load);
+show_one_old(freq_step);
+show_one_old(sampling_rate_min);
+show_one_old(sampling_rate_max);
+
+#define define_one_ro_old(object, _name) \
+static struct freq_attr object = \
+__ATTR(_name, 0444, show_##_name##_old, NULL)
+
+define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
+define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
+
+/*** delete after deprecation time ***/
+
+static ssize_t store_sampling_down_factor(struct kobject *a,
+ struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -214,8 +248,8 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
- const char *buf, size_t count)
+static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -231,8 +265,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_up_threshold(struct cpufreq_policy *unused,
- const char *buf, size_t count)
+static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -251,8 +285,8 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_down_threshold(struct cpufreq_policy *unused,
- const char *buf, size_t count)
+static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -272,8 +306,8 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
- const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -308,8 +342,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
return count;
}
-static ssize_t store_freq_step(struct cpufreq_policy *policy,
- const char *buf, size_t count)
+static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -331,7 +365,7 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
}
#define define_one_rw(_name) \
-static struct freq_attr _name = \
+static struct global_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
@@ -358,6 +392,53 @@ static struct attribute_group dbs_attr_group = {
.name = "conservative",
};
+/*** delete after deprecation time ***/
+
+#define write_one_old(file_name) \
+static ssize_t store_##file_name##_old \
+(struct cpufreq_policy *unused, const char *buf, size_t count) \
+{ \
+ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
+ "interface is deprecated - " #file_name "\n"); \
+ return store_##file_name(NULL, NULL, buf, count); \
+}
+write_one_old(sampling_rate);
+write_one_old(sampling_down_factor);
+write_one_old(up_threshold);
+write_one_old(down_threshold);
+write_one_old(ignore_nice_load);
+write_one_old(freq_step);
+
+#define define_one_rw_old(object, _name) \
+static struct freq_attr object = \
+__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
+
+define_one_rw_old(sampling_rate_old, sampling_rate);
+define_one_rw_old(sampling_down_factor_old, sampling_down_factor);
+define_one_rw_old(up_threshold_old, up_threshold);
+define_one_rw_old(down_threshold_old, down_threshold);
+define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
+define_one_rw_old(freq_step_old, freq_step);
+
+static struct attribute *dbs_attributes_old[] = {
+ &sampling_rate_max_old.attr,
+ &sampling_rate_min_old.attr,
+ &sampling_rate_old.attr,
+ &sampling_down_factor_old.attr,
+ &up_threshold_old.attr,
+ &down_threshold_old.attr,
+ &ignore_nice_load_old.attr,
+ &freq_step_old.attr,
+ NULL
+};
+
+static struct attribute_group dbs_attr_group_old = {
+ .attrs = dbs_attributes_old,
+ .name = "conservative",
+};
+
+/*** delete after deprecation time ***/
+
/************************** sysfs end ************************/
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -530,7 +611,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
+ rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
if (rc) {
mutex_unlock(&dbs_mutex);
return rc;
@@ -564,6 +645,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (latency == 0)
latency = 1;
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &dbs_attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_mutex);
+ return rc;
+ }
+
/*
* conservative does not implement micro like ondemand
* governor, thus we are bound to jiffes/HZ
@@ -591,7 +679,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group);
+ sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
dbs_enable--;
mutex_destroy(&this_dbs_info->timer_mutex);
@@ -605,6 +693,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
CPUFREQ_TRANSITION_NOTIFIER);
mutex_unlock(&dbs_mutex);
+ if (!dbs_enable)
+ sysfs_remove_group(cpufreq_global_kobject,
+ &dbs_attr_group);
break;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index a9bd3a05a68..05432216e22 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
-static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
+static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
/**
* show_available_freqs - show available frequencies for the specified CPU
*/
@@ -185,10 +185,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
ssize_t count = 0;
struct cpufreq_frequency_table *table;
- if (!per_cpu(show_table, cpu))
+ if (!per_cpu(cpufreq_show_table, cpu))
return -ENODEV;
- table = per_cpu(show_table, cpu);
+ table = per_cpu(cpufreq_show_table, cpu);
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
@@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu)
{
dprintk("setting show_table for cpu %u to %p\n", cpu, table);
- per_cpu(show_table, cpu) = table;
+ per_cpu(cpufreq_show_table, cpu) = table;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
void cpufreq_frequency_table_put_attr(unsigned int cpu)
{
dprintk("clearing show_table for cpu %u\n", cpu);
- per_cpu(show_table, cpu) = NULL;
+ per_cpu(cpufreq_show_table, cpu) = NULL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
- return per_cpu(show_table, cpu);
+ return per_cpu(cpufreq_show_table, cpu);
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index a4bec3f919a..1c1ceb4f218 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -69,9 +69,6 @@ static int ladder_select_state(struct cpuidle_device *dev)
int last_residency, last_idx = ldev->last_state_idx;
int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
- if (unlikely(!ldev))
- return 0;
-
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
ladder_do_selection(ldev, last_idx, 0);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 84c51e17726..8c2f3703ec8 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -64,7 +64,7 @@ struct aes_ctx {
u32 *D;
};
-static DEFINE_PER_CPU(struct cword *, last_cword);
+static DEFINE_PER_CPU(struct cword *, paes_last_cword);
/* Tells whether the ACE is capable to generate
the extended key for a given key_len. */
@@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
ok:
for_each_online_cpu(cpu)
- if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
- &ctx->cword.decrypt == per_cpu(last_cword, cpu))
- per_cpu(last_cword, cpu) = NULL;
+ if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
+ &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
+ per_cpu(paes_last_cword, cpu) = NULL;
return 0;
}
@@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword)
{
int cpu = raw_smp_processor_id();
- if (cword != per_cpu(last_cword, cpu))
+ if (cword != per_cpu(paes_last_cword, cpu))
#ifndef CONFIG_X86_64
asm volatile ("pushfl; popfl");
#else
@@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword)
static inline void padlock_store_cword(struct cword *cword)
{
- per_cpu(last_cword, raw_smp_processor_id()) = cword;
+ per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
}
/*
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index eb140ff38c2..e02d74b1e89 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -111,6 +111,24 @@ config SH_DMAE
help
Enable support for the Renesas SuperH DMA controllers.
+config COH901318
+ bool "ST-Ericsson COH901318 DMA support"
+ select DMA_ENGINE
+ depends on ARCH_U300
+ help
+ Enable support for ST-Ericsson COH 901 318 DMA.
+
+config AMCC_PPC440SPE_ADMA
+ tristate "AMCC PPC440SPe ADMA support"
+ depends on 440SPe || 440SP
+ select DMA_ENGINE
+ select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ help
+ Enable support for the AMCC PPC440SPe RAID engines.
+
+config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ bool
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index eca71ba78ae..807053d4823 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -10,3 +10,5 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
+obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c52ac9efd0b..f15112569c1 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1188,7 +1188,7 @@ static int at_dma_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops at_dma_dev_pm_ops = {
+static const struct dev_pm_ops at_dma_dev_pm_ops = {
.suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq,
};
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
new file mode 100644
index 00000000000..4a99cd94536
--- /dev/null
+++ b/drivers/dma/coh901318.c
@@ -0,0 +1,1325 @@
+/*
+ * driver/dma/coh901318.c
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * DMA driver for COH 901 318
+ * Author: Per Friden <per.friden@stericsson.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/fs.h> /* everything... */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <mach/coh901318.h>
+
+#include "coh901318_lli.h"
+
+#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
+
+#ifdef VERBOSE_DEBUG
+#define COH_DBG(x) ({ if (1) x; 0; })
+#else
+#define COH_DBG(x) ({ if (0) x; 0; })
+#endif
+
+struct coh901318_desc {
+ struct dma_async_tx_descriptor desc;
+ struct list_head node;
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ struct coh901318_lli *data;
+ enum dma_data_direction dir;
+ int pending_irqs;
+ unsigned long flags;
+};
+
+struct coh901318_base {
+ struct device *dev;
+ void __iomem *virtbase;
+ struct coh901318_pool pool;
+ struct powersave pm;
+ struct dma_device dma_slave;
+ struct dma_device dma_memcpy;
+ struct coh901318_chan *chans;
+ struct coh901318_platform *platform;
+};
+
+struct coh901318_chan {
+ spinlock_t lock;
+ int allocated;
+ int completed;
+ int id;
+ int stopped;
+
+ struct work_struct free_work;
+ struct dma_chan chan;
+
+ struct tasklet_struct tasklet;
+
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free;
+
+ unsigned long nbr_active_done;
+ unsigned long busy;
+ int pending_irqs;
+
+ struct coh901318_base *base;
+};
+
+static void coh901318_list_print(struct coh901318_chan *cohc,
+ struct coh901318_lli *lli)
+{
+ struct coh901318_lli *l;
+ dma_addr_t addr = virt_to_phys(lli);
+ int i = 0;
+
+ while (addr) {
+ l = phys_to_virt(addr);
+ dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
+ ", dst 0x%x, link 0x%x link_virt 0x%p\n",
+ i, l, l->control, l->src_addr, l->dst_addr,
+ l->link_addr, phys_to_virt(l->link_addr));
+ i++;
+ addr = l->link_addr;
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
+
+static struct coh901318_base *debugfs_dma_base;
+static struct dentry *dma_dentry;
+
+static int coh901318_debugfs_open(struct inode *inode, struct file *file)
+{
+
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static int coh901318_debugfs_read(struct file *file, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ u64 started_channels = debugfs_dma_base->pm.started_channels;
+ int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
+ int i;
+ int ret = 0;
+ char *dev_buf;
+ char *tmp;
+ int dev_size;
+
+ dev_buf = kmalloc(4*1024, GFP_KERNEL);
+ if (dev_buf == NULL)
+ goto err_kmalloc;
+ tmp = dev_buf;
+
+ tmp += sprintf(tmp, "DMA -- enable dma channels\n");
+
+ for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
+ if (started_channels & (1 << i))
+ tmp += sprintf(tmp, "channel %d\n", i);
+
+ tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
+ dev_size = tmp - dev_buf;
+
+ /* No more to read if offset != 0 */
+ if (*f_pos > dev_size)
+ goto out;
+
+ if (count > dev_size - *f_pos)
+ count = dev_size - *f_pos;
+
+ if (copy_to_user(buf, dev_buf + *f_pos, count))
+ ret = -EINVAL;
+ ret = count;
+ *f_pos += count;
+
+ out:
+ kfree(dev_buf);
+ return ret;
+
+ err_kmalloc:
+ return 0;
+}
+
+static const struct file_operations coh901318_debugfs_status_operations = {
+ .owner = THIS_MODULE,
+ .open = coh901318_debugfs_open,
+ .read = coh901318_debugfs_read,
+};
+
+
+static int __init init_coh901318_debugfs(void)
+{
+
+ dma_dentry = debugfs_create_dir("dma", NULL);
+
+ (void) debugfs_create_file("status",
+ S_IFREG | S_IRUGO,
+ dma_dentry, NULL,
+ &coh901318_debugfs_status_operations);
+ return 0;
+}
+
+static void __exit exit_coh901318_debugfs(void)
+{
+ debugfs_remove_recursive(dma_dentry);
+}
+
+module_init(init_coh901318_debugfs);
+module_exit(exit_coh901318_debugfs);
+#else
+
+#define COH901318_DEBUGFS_ASSIGN(x, y)
+
+#endif /* CONFIG_DEBUG_FS */
+
+static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct coh901318_chan, chan);
+}
+
+static inline dma_addr_t
+cohc_dev_addr(struct coh901318_chan *cohc)
+{
+ return cohc->base->platform->chan_conf[cohc->id].dev_addr;
+}
+
+static inline const struct coh901318_params *
+cohc_chan_param(struct coh901318_chan *cohc)
+{
+ return &cohc->base->platform->chan_conf[cohc->id].param;
+}
+
+static inline const struct coh_dma_channel *
+cohc_chan_conf(struct coh901318_chan *cohc)
+{
+ return &cohc->base->platform->chan_conf[cohc->id];
+}
+
+static void enable_powersave(struct coh901318_chan *cohc)
+{
+ unsigned long flags;
+ struct powersave *pm = &cohc->base->pm;
+
+ spin_lock_irqsave(&pm->lock, flags);
+
+ pm->started_channels &= ~(1ULL << cohc->id);
+
+ if (!pm->started_channels) {
+ /* DMA no longer intends to access memory */
+ cohc->base->platform->access_memory_state(cohc->base->dev,
+ false);
+ }
+
+ spin_unlock_irqrestore(&pm->lock, flags);
+}
+static void disable_powersave(struct coh901318_chan *cohc)
+{
+ unsigned long flags;
+ struct powersave *pm = &cohc->base->pm;
+
+ spin_lock_irqsave(&pm->lock, flags);
+
+ if (!pm->started_channels) {
+ /* DMA intends to access memory */
+ cohc->base->platform->access_memory_state(cohc->base->dev,
+ true);
+ }
+
+ pm->started_channels |= (1ULL << cohc->id);
+
+ spin_unlock_irqrestore(&pm->lock, flags);
+}
+
+static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control)
+{
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ writel(control,
+ virtbase + COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING * channel);
+ return 0;
+}
+
+static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf)
+{
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ writel(conf,
+ virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING*channel);
+ return 0;
+}
+
+
+static int coh901318_start(struct coh901318_chan *cohc)
+{
+ u32 val;
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ disable_powersave(cohc);
+
+ val = readl(virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ /* Enable channel */
+ val |= COH901318_CX_CFG_CH_ENABLE;
+ writel(val, virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ return 0;
+}
+
+static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
+ struct coh901318_lli *data)
+{
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ BUG_ON(readl(virtbase + COH901318_CX_STAT +
+ COH901318_CX_STAT_SPACING*channel) &
+ COH901318_CX_STAT_ACTIVE);
+
+ writel(data->src_addr,
+ virtbase + COH901318_CX_SRC_ADDR +
+ COH901318_CX_SRC_ADDR_SPACING * channel);
+
+ writel(data->dst_addr, virtbase +
+ COH901318_CX_DST_ADDR +
+ COH901318_CX_DST_ADDR_SPACING * channel);
+
+ writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
+ COH901318_CX_LNK_ADDR_SPACING * channel);
+
+ writel(data->control, virtbase + COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING * channel);
+
+ return 0;
+}
+static dma_cookie_t
+coh901318_assign_cookie(struct coh901318_chan *cohc,
+ struct coh901318_desc *cohd)
+{
+ dma_cookie_t cookie = cohc->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ cohc->chan.cookie = cookie;
+ cohd->desc.cookie = cookie;
+
+ return cookie;
+}
+
+static struct coh901318_desc *
+coh901318_desc_get(struct coh901318_chan *cohc)
+{
+ struct coh901318_desc *desc;
+
+ if (list_empty(&cohc->free)) {
+ /* alloc new desc because we're out of used ones
+ * TODO: alloc a pile of descs instead of just one,
+ * avoid many small allocations.
+ */
+ desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
+ if (desc == NULL)
+ goto out;
+ INIT_LIST_HEAD(&desc->node);
+ } else {
+ /* Reuse an old desc. */
+ desc = list_first_entry(&cohc->free,
+ struct coh901318_desc,
+ node);
+ list_del(&desc->node);
+ }
+
+ out:
+ return desc;
+}
+
+static void
+coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd)
+{
+ list_add_tail(&cohd->node, &cohc->free);
+}
+
+/* call with irq lock held */
+static void
+coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
+{
+ list_add_tail(&desc->node, &cohc->active);
+
+ BUG_ON(cohc->pending_irqs != 0);
+
+ cohc->pending_irqs = desc->pending_irqs;
+}
+
+static struct coh901318_desc *
+coh901318_first_active_get(struct coh901318_chan *cohc)
+{
+ struct coh901318_desc *d;
+
+ if (list_empty(&cohc->active))
+ return NULL;
+
+ d = list_first_entry(&cohc->active,
+ struct coh901318_desc,
+ node);
+ return d;
+}
+
+static void
+coh901318_desc_remove(struct coh901318_desc *cohd)
+{
+ list_del(&cohd->node);
+}
+
+static void
+coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
+{
+ list_add_tail(&desc->node, &cohc->queue);
+}
+
+static struct coh901318_desc *
+coh901318_first_queued(struct coh901318_chan *cohc)
+{
+ struct coh901318_desc *d;
+
+ if (list_empty(&cohc->queue))
+ return NULL;
+
+ d = list_first_entry(&cohc->queue,
+ struct coh901318_desc,
+ node);
+ return d;
+}
+
+/*
+ * DMA start/stop controls
+ */
+u32 coh901318_get_bytes_left(struct dma_chan *chan)
+{
+ unsigned long flags;
+ u32 ret;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Read transfer count value */
+ ret = readl(cohc->base->virtbase +
+ COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
+ cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(coh901318_get_bytes_left);
+
+
+/* Stops a transfer without losing data. Enables power save.
+ Use this function in conjunction with coh901318_continue(..)
+*/
+void coh901318_stop(struct dma_chan *chan)
+{
+ u32 val;
+ unsigned long flags;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Disable channel in HW */
+ val = readl(virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ /* Stopping infinit transfer */
+ if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
+ (val & COH901318_CX_CFG_CH_ENABLE))
+ cohc->stopped = 1;
+
+
+ val &= ~COH901318_CX_CFG_CH_ENABLE;
+ /* Enable twice, HW bug work around */
+ writel(val, virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+ writel(val, virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ /* Spin-wait for it to actually go inactive */
+ while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING *
+ channel) & COH901318_CX_STAT_ACTIVE)
+ cpu_relax();
+
+ /* Check if we stopped an active job */
+ if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
+ channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0)
+ cohc->stopped = 1;
+
+ enable_powersave(cohc);
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+}
+EXPORT_SYMBOL(coh901318_stop);
+
+/* Continues a transfer that has been stopped via 300_dma_stop(..).
+ Power save is handled.
+*/
+void coh901318_continue(struct dma_chan *chan)
+{
+ u32 val;
+ unsigned long flags;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ int channel = cohc->id;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ disable_powersave(cohc);
+
+ if (cohc->stopped) {
+ /* Enable channel in HW */
+ val = readl(cohc->base->virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING * channel);
+
+ val |= COH901318_CX_CFG_CH_ENABLE;
+
+ writel(val, cohc->base->virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING*channel);
+
+ cohc->stopped = 0;
+ }
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+}
+EXPORT_SYMBOL(coh901318_continue);
+
+bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ unsigned int ch_nr = (unsigned int) chan_id;
+
+ if (ch_nr == to_coh901318_chan(chan)->id)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(coh901318_filter_id);
+
+/*
+ * DMA channel allocation
+ */
+static int coh901318_config(struct coh901318_chan *cohc,
+ struct coh901318_params *param)
+{
+ unsigned long flags;
+ const struct coh901318_params *p;
+ int channel = cohc->id;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ if (param)
+ p = param;
+ else
+ p = &cohc->base->platform->chan_conf[channel].param;
+
+ /* Clear any pending BE or TC interrupt */
+ if (channel < 32) {
+ writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1);
+ writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1);
+ } else {
+ writel(1 << (channel - 32), virtbase +
+ COH901318_BE_INT_CLEAR2);
+ writel(1 << (channel - 32), virtbase +
+ COH901318_TC_INT_CLEAR2);
+ }
+
+ coh901318_set_conf(cohc, p->config);
+ coh901318_set_ctrl(cohc, p->ctrl_lli_last);
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return 0;
+}
+
+/* must lock when calling this function
+ * start queued jobs, if any
+ * TODO: start all queued jobs in one go
+ *
+ * Returns descriptor if queued job is started otherwise NULL.
+ * If the queue is empty NULL is returned.
+ */
+static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
+{
+ struct coh901318_desc *cohd_que;
+
+ /* start queued jobs, if any
+ * TODO: transmit all queued jobs in one go
+ */
+ cohd_que = coh901318_first_queued(cohc);
+
+ if (cohd_que != NULL) {
+ /* Remove from queue */
+ coh901318_desc_remove(cohd_que);
+ /* initiate DMA job */
+ cohc->busy = 1;
+
+ coh901318_desc_submit(cohc, cohd_que);
+
+ coh901318_prep_linked_list(cohc, cohd_que->data);
+
+ /* start dma job */
+ coh901318_start(cohc);
+
+ }
+
+ return cohd_que;
+}
+
+static void dma_tasklet(unsigned long data)
+{
+ struct coh901318_chan *cohc = (struct coh901318_chan *) data;
+ struct coh901318_desc *cohd_fin;
+ unsigned long flags;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* get first active entry from list */
+ cohd_fin = coh901318_first_active_get(cohc);
+
+ BUG_ON(cohd_fin->pending_irqs == 0);
+
+ if (cohd_fin == NULL)
+ goto err;
+
+ cohd_fin->pending_irqs--;
+ cohc->completed = cohd_fin->desc.cookie;
+
+ BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
+
+ if (cohc->nbr_active_done == 0)
+ return;
+
+ if (!cohd_fin->pending_irqs) {
+ /* release the lli allocation*/
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
+ }
+
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
+ " nbr_active_done %ld\n", __func__,
+ cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
+
+ /* callback to client */
+ callback = cohd_fin->desc.callback;
+ callback_param = cohd_fin->desc.callback_param;
+
+ if (!cohd_fin->pending_irqs) {
+ coh901318_desc_remove(cohd_fin);
+
+ /* return desc to free-list */
+ coh901318_desc_free(cohc, cohd_fin);
+ }
+
+ if (cohc->nbr_active_done)
+ cohc->nbr_active_done--;
+
+ if (cohc->nbr_active_done) {
+ if (cohc_chan_conf(cohc)->priority_high)
+ tasklet_hi_schedule(&cohc->tasklet);
+ else
+ tasklet_schedule(&cohc->tasklet);
+ }
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ if (callback)
+ callback(callback_param);
+
+ return;
+
+ err:
+ spin_unlock_irqrestore(&cohc->lock, flags);
+ dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__);
+}
+
+
+/* called from interrupt context */
+static void dma_tc_handle(struct coh901318_chan *cohc)
+{
+ BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
+ list_empty(&cohc->queue)));
+
+ if (!cohc->allocated)
+ return;
+
+ BUG_ON(cohc->pending_irqs == 0);
+
+ cohc->pending_irqs--;
+ cohc->nbr_active_done++;
+
+ if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL)
+ cohc->busy = 0;
+
+ BUG_ON(list_empty(&cohc->active));
+
+ if (cohc_chan_conf(cohc)->priority_high)
+ tasklet_hi_schedule(&cohc->tasklet);
+ else
+ tasklet_schedule(&cohc->tasklet);
+}
+
+
+static irqreturn_t dma_irq_handler(int irq, void *dev_id)
+{
+ u32 status1;
+ u32 status2;
+ int i;
+ int ch;
+ struct coh901318_base *base = dev_id;
+ struct coh901318_chan *cohc;
+ void __iomem *virtbase = base->virtbase;
+
+ status1 = readl(virtbase + COH901318_INT_STATUS1);
+ status2 = readl(virtbase + COH901318_INT_STATUS2);
+
+ if (unlikely(status1 == 0 && status2 == 0)) {
+ dev_warn(base->dev, "spurious DMA IRQ from no channel!\n");
+ return IRQ_HANDLED;
+ }
+
+ /* TODO: consider handle IRQ in tasklet here to
+ * minimize interrupt latency */
+
+ /* Check the first 32 DMA channels for IRQ */
+ while (status1) {
+ /* Find first bit set, return as a number. */
+ i = ffs(status1) - 1;
+ ch = i;
+
+ cohc = &base->chans[ch];
+ spin_lock(&cohc->lock);
+
+ /* Mask off this bit */
+ status1 &= ~(1 << i);
+ /* Check the individual channel bits */
+ if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) {
+ dev_crit(COHC_2_DEV(cohc),
+ "DMA bus error on channel %d!\n", ch);
+ BUG_ON(1);
+ /* Clear BE interrupt */
+ __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1);
+ } else {
+ /* Caused by TC, really? */
+ if (unlikely(!test_bit(i, virtbase +
+ COH901318_TC_INT_STATUS1))) {
+ dev_warn(COHC_2_DEV(cohc),
+ "ignoring interrupt not caused by terminal count on channel %d\n", ch);
+ /* Clear TC interrupt */
+ BUG_ON(1);
+ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
+ } else {
+ /* Enable powersave if transfer has finished */
+ if (!(readl(virtbase + COH901318_CX_STAT +
+ COH901318_CX_STAT_SPACING*ch) &
+ COH901318_CX_STAT_ENABLED)) {
+ enable_powersave(cohc);
+ }
+
+ /* Must clear TC interrupt before calling
+ * dma_tc_handle
+ * in case tc_handle initate a new dma job
+ */
+ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
+
+ dma_tc_handle(cohc);
+ }
+ }
+ spin_unlock(&cohc->lock);
+ }
+
+ /* Check the remaining 32 DMA channels for IRQ */
+ while (status2) {
+ /* Find first bit set, return as a number. */
+ i = ffs(status2) - 1;
+ ch = i + 32;
+ cohc = &base->chans[ch];
+ spin_lock(&cohc->lock);
+
+ /* Mask off this bit */
+ status2 &= ~(1 << i);
+ /* Check the individual channel bits */
+ if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) {
+ dev_crit(COHC_2_DEV(cohc),
+ "DMA bus error on channel %d!\n", ch);
+ /* Clear BE interrupt */
+ BUG_ON(1);
+ __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2);
+ } else {
+ /* Caused by TC, really? */
+ if (unlikely(!test_bit(i, virtbase +
+ COH901318_TC_INT_STATUS2))) {
+ dev_warn(COHC_2_DEV(cohc),
+ "ignoring interrupt not caused by terminal count on channel %d\n", ch);
+ /* Clear TC interrupt */
+ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
+ BUG_ON(1);
+ } else {
+ /* Enable powersave if transfer has finished */
+ if (!(readl(virtbase + COH901318_CX_STAT +
+ COH901318_CX_STAT_SPACING*ch) &
+ COH901318_CX_STAT_ENABLED)) {
+ enable_powersave(cohc);
+ }
+ /* Must clear TC interrupt before calling
+ * dma_tc_handle
+ * in case tc_handle initate a new dma job
+ */
+ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
+
+ dma_tc_handle(cohc);
+ }
+ }
+ spin_unlock(&cohc->lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int coh901318_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
+ __func__, cohc->id);
+
+ if (chan->client_count > 1)
+ return -EBUSY;
+
+ coh901318_config(cohc, NULL);
+
+ cohc->allocated = 1;
+ cohc->completed = chan->cookie = 1;
+
+ return 1;
+}
+
+static void
+coh901318_free_chan_resources(struct dma_chan *chan)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ int channel = cohc->id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Disable HW */
+ writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG +
+ COH901318_CX_CFG_SPACING*channel);
+ writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL +
+ COH901318_CX_CTRL_SPACING*channel);
+
+ cohc->allocated = 0;
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ chan->device->device_terminate_all(chan);
+}
+
+
+static dma_cookie_t
+coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc,
+ desc);
+ struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ tx->cookie = coh901318_assign_cookie(cohc, cohd);
+
+ coh901318_desc_queue(cohc, cohd);
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+
+ return tx->cookie;
+}
+
+static struct dma_async_tx_descriptor *
+coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t size, unsigned long flags)
+{
+ struct coh901318_lli *data;
+ struct coh901318_desc *cohd;
+ unsigned long flg;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ int lli_len;
+ u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+
+ spin_lock_irqsave(&cohc->lock, flg);
+
+ dev_vdbg(COHC_2_DEV(cohc),
+ "[%s] channel %d src 0x%x dest 0x%x size %d\n",
+ __func__, cohc->id, src, dest, size);
+
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Trigger interrupt after last lli */
+ ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
+
+ lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT;
+ if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
+ lli_len++;
+
+ data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
+
+ if (data == NULL)
+ goto err;
+
+ cohd = coh901318_desc_get(cohc);
+ cohd->sg = NULL;
+ cohd->sg_len = 0;
+ cohd->data = data;
+
+ cohd->pending_irqs =
+ coh901318_lli_fill_memcpy(
+ &cohc->base->pool, data, src, size, dest,
+ cohc_chan_param(cohc)->ctrl_lli_chained,
+ ctrl_last);
+ cohd->flags = flags;
+
+ COH_DBG(coh901318_list_print(cohc, data));
+
+ dma_async_tx_descriptor_init(&cohd->desc, chan);
+
+ cohd->desc.tx_submit = coh901318_tx_submit;
+
+ spin_unlock_irqrestore(&cohc->lock, flg);
+
+ return &cohd->desc;
+ err:
+ spin_unlock_irqrestore(&cohc->lock, flg);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction direction,
+ unsigned long flags)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ struct coh901318_lli *data;
+ struct coh901318_desc *cohd;
+ struct scatterlist *sg;
+ int len = 0;
+ int size;
+ int i;
+ u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
+ u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
+ u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+ unsigned long flg;
+
+ if (!sgl)
+ goto out;
+ if (sgl->length == 0)
+ goto out;
+
+ spin_lock_irqsave(&cohc->lock, flg);
+
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n",
+ __func__, sg_len, direction);
+
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Trigger interrupt after last lli */
+ ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
+
+ cohd = coh901318_desc_get(cohc);
+ cohd->sg = NULL;
+ cohd->sg_len = 0;
+ cohd->dir = direction;
+
+ if (direction == DMA_TO_DEVICE) {
+ u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
+ COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
+
+ ctrl_chained |= tx_flags;
+ ctrl_last |= tx_flags;
+ ctrl |= tx_flags;
+ } else if (direction == DMA_FROM_DEVICE) {
+ u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
+ COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
+
+ ctrl_chained |= rx_flags;
+ ctrl_last |= rx_flags;
+ ctrl |= rx_flags;
+ } else
+ goto err_direction;
+
+ dma_async_tx_descriptor_init(&cohd->desc, chan);
+
+ cohd->desc.tx_submit = coh901318_tx_submit;
+
+
+ /* The dma only supports transmitting packages up to
+ * MAX_DMA_PACKET_SIZE. Calculate to total number of
+ * dma elemts required to send the entire sg list
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ unsigned int factor;
+ size = sg_dma_len(sg);
+
+ if (size <= MAX_DMA_PACKET_SIZE) {
+ len++;
+ continue;
+ }
+
+ factor = size >> MAX_DMA_PACKET_SIZE_SHIFT;
+ if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size)
+ factor++;
+
+ len += factor;
+ }
+
+ data = coh901318_lli_alloc(&cohc->base->pool, len);
+
+ if (data == NULL)
+ goto err_dma_alloc;
+
+ /* initiate allocated data list */
+ cohd->pending_irqs =
+ coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ cohc_dev_addr(cohc),
+ ctrl_chained,
+ ctrl,
+ ctrl_last,
+ direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
+ cohd->data = data;
+
+ cohd->flags = flags;
+
+ COH_DBG(coh901318_list_print(cohc, data));
+
+ spin_unlock_irqrestore(&cohc->lock, flg);
+
+ return &cohd->desc;
+ err_dma_alloc:
+ err_direction:
+ coh901318_desc_remove(cohd);
+ coh901318_desc_free(cohc, cohd);
+ spin_unlock_irqrestore(&cohc->lock, flg);
+ out:
+ return NULL;
+}
+
+static enum dma_status
+coh901318_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *done,
+ dma_cookie_t *used)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ int ret;
+
+ last_complete = cohc->completed;
+ last_used = chan->cookie;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return ret;
+}
+
+static void
+coh901318_issue_pending(struct dma_chan *chan)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Busy means that pending jobs are already being processed */
+ if (!cohc->busy)
+ coh901318_queue_start(cohc);
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+}
+
+static void
+coh901318_terminate_all(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+ struct coh901318_desc *cohd;
+ void __iomem *virtbase = cohc->base->virtbase;
+
+ coh901318_stop(chan);
+
+ spin_lock_irqsave(&cohc->lock, flags);
+
+ /* Clear any pending BE or TC interrupt */
+ if (cohc->id < 32) {
+ writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
+ writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
+ } else {
+ writel(1 << (cohc->id - 32), virtbase +
+ COH901318_BE_INT_CLEAR2);
+ writel(1 << (cohc->id - 32), virtbase +
+ COH901318_TC_INT_CLEAR2);
+ }
+
+ enable_powersave(cohc);
+
+ while ((cohd = coh901318_first_active_get(cohc))) {
+ /* release the lli allocation*/
+ coh901318_lli_free(&cohc->base->pool, &cohd->data);
+
+ coh901318_desc_remove(cohd);
+
+ /* return desc to free-list */
+ coh901318_desc_free(cohc, cohd);
+ }
+
+ while ((cohd = coh901318_first_queued(cohc))) {
+ /* release the lli allocation*/
+ coh901318_lli_free(&cohc->base->pool, &cohd->data);
+
+ coh901318_desc_remove(cohd);
+
+ /* return desc to free-list */
+ coh901318_desc_free(cohc, cohd);
+ }
+
+
+ cohc->nbr_active_done = 0;
+ cohc->busy = 0;
+ cohc->pending_irqs = 0;
+
+ spin_unlock_irqrestore(&cohc->lock, flags);
+}
+void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
+ struct coh901318_base *base)
+{
+ int chans_i;
+ int i = 0;
+ struct coh901318_chan *cohc;
+
+ INIT_LIST_HEAD(&dma->channels);
+
+ for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
+ for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
+ cohc = &base->chans[i];
+
+ cohc->base = base;
+ cohc->chan.device = dma;
+ cohc->id = i;
+
+ /* TODO: do we really need this lock if only one
+ * client is connected to each channel?
+ */
+
+ spin_lock_init(&cohc->lock);
+
+ cohc->pending_irqs = 0;
+ cohc->nbr_active_done = 0;
+ cohc->busy = 0;
+ INIT_LIST_HEAD(&cohc->free);
+ INIT_LIST_HEAD(&cohc->active);
+ INIT_LIST_HEAD(&cohc->queue);
+
+ tasklet_init(&cohc->tasklet, dma_tasklet,
+ (unsigned long) cohc);
+
+ list_add_tail(&cohc->chan.device_node,
+ &dma->channels);
+ }
+ }
+}
+
+static int __init coh901318_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct coh901318_platform *pdata;
+ struct coh901318_base *base;
+ int irq;
+ struct resource *io;
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!io)
+ goto err_get_resource;
+
+ /* Map DMA controller registers to virtual memory */
+ if (request_mem_region(io->start,
+ resource_size(io),
+ pdev->dev.driver->name) == NULL) {
+ err = -EBUSY;
+ goto err_request_mem;
+ }
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ goto err_no_platformdata;
+
+ base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) +
+ pdata->max_channels *
+ sizeof(struct coh901318_chan),
+ GFP_KERNEL);
+ if (!base)
+ goto err_alloc_coh_dma_channels;
+
+ base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
+
+ base->virtbase = ioremap(io->start, resource_size(io));
+ if (!base->virtbase) {
+ err = -ENOMEM;
+ goto err_no_ioremap;
+ }
+
+ base->dev = &pdev->dev;
+ base->platform = pdata;
+ spin_lock_init(&base->pm.lock);
+ base->pm.started_channels = 0;
+
+ COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
+
+ platform_set_drvdata(pdev, base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err_no_irq;
+
+ err = request_irq(irq, dma_irq_handler, IRQF_DISABLED,
+ "coh901318", base);
+ if (err) {
+ dev_crit(&pdev->dev,
+ "Cannot allocate IRQ for DMA controller!\n");
+ goto err_request_irq;
+ }
+
+ err = coh901318_pool_create(&base->pool, &pdev->dev,
+ sizeof(struct coh901318_lli),
+ 32);
+ if (err)
+ goto err_pool_create;
+
+ /* init channels for device transfers */
+ coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
+ base);
+
+ dma_cap_zero(base->dma_slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
+
+ base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
+ base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
+ base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
+ base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_slave.device_issue_pending = coh901318_issue_pending;
+ base->dma_slave.device_terminate_all = coh901318_terminate_all;
+ base->dma_slave.dev = &pdev->dev;
+
+ err = dma_async_device_register(&base->dma_slave);
+
+ if (err)
+ goto err_register_slave;
+
+ /* init channels for memcpy */
+ coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
+ base);
+
+ dma_cap_zero(base->dma_memcpy.cap_mask);
+ dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
+
+ base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
+ base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
+ base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
+ base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
+ base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
+ base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
+ base->dma_memcpy.dev = &pdev->dev;
+ err = dma_async_device_register(&base->dma_memcpy);
+
+ if (err)
+ goto err_register_memcpy;
+
+ dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
+ (u32) base->virtbase);
+
+ return err;
+
+ err_register_memcpy:
+ dma_async_device_unregister(&base->dma_slave);
+ err_register_slave:
+ coh901318_pool_destroy(&base->pool);
+ err_pool_create:
+ free_irq(platform_get_irq(pdev, 0), base);
+ err_request_irq:
+ err_no_irq:
+ iounmap(base->virtbase);
+ err_no_ioremap:
+ kfree(base);
+ err_alloc_coh_dma_channels:
+ err_no_platformdata:
+ release_mem_region(pdev->resource->start,
+ resource_size(pdev->resource));
+ err_request_mem:
+ err_get_resource:
+ return err;
+}
+
+static int __exit coh901318_remove(struct platform_device *pdev)
+{
+ struct coh901318_base *base = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&base->dma_memcpy);
+ dma_async_device_unregister(&base->dma_slave);
+ coh901318_pool_destroy(&base->pool);
+ free_irq(platform_get_irq(pdev, 0), base);
+ kfree(base);
+ iounmap(base->virtbase);
+ release_mem_region(pdev->resource->start,
+ resource_size(pdev->resource));
+ return 0;
+}
+
+
+static struct platform_driver coh901318_driver = {
+ .remove = __exit_p(coh901318_remove),
+ .driver = {
+ .name = "coh901318",
+ },
+};
+
+int __init coh901318_init(void)
+{
+ return platform_driver_probe(&coh901318_driver, coh901318_probe);
+}
+subsys_initcall(coh901318_init);
+
+void __exit coh901318_exit(void)
+{
+ platform_driver_unregister(&coh901318_driver);
+}
+module_exit(coh901318_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Per Friden");
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
new file mode 100644
index 00000000000..f5120f238a4
--- /dev/null
+++ b/drivers/dma/coh901318_lli.c
@@ -0,0 +1,318 @@
+/*
+ * driver/dma/coh901318_lli.c
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Support functions for handling lli for dma
+ * Author: Per Friden <per.friden@stericsson.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/dmapool.h>
+#include <linux/memory.h>
+#include <mach/coh901318.h>
+
+#include "coh901318_lli.h"
+
+#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
+#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
+#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
+#else
+#define DEBUGFS_POOL_COUNTER_RESET(pool)
+#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
+#endif
+
+static struct coh901318_lli *
+coh901318_lli_next(struct coh901318_lli *data)
+{
+ if (data == NULL || data->link_addr == 0)
+ return NULL;
+
+ return (struct coh901318_lli *) data->virt_link_addr;
+}
+
+int coh901318_pool_create(struct coh901318_pool *pool,
+ struct device *dev,
+ size_t size, size_t align)
+{
+ spin_lock_init(&pool->lock);
+ pool->dev = dev;
+ pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
+
+ DEBUGFS_POOL_COUNTER_RESET(pool);
+ return 0;
+}
+
+int coh901318_pool_destroy(struct coh901318_pool *pool)
+{
+
+ dma_pool_destroy(pool->dmapool);
+ return 0;
+}
+
+struct coh901318_lli *
+coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
+{
+ int i;
+ struct coh901318_lli *head;
+ struct coh901318_lli *lli;
+ struct coh901318_lli *lli_prev;
+ dma_addr_t phy;
+
+ if (len == 0)
+ goto err;
+
+ spin_lock(&pool->lock);
+
+ head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
+
+ if (head == NULL)
+ goto err;
+
+ DEBUGFS_POOL_COUNTER_ADD(pool, 1);
+
+ lli = head;
+ lli->phy_this = phy;
+
+ for (i = 1; i < len; i++) {
+ lli_prev = lli;
+
+ lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
+
+ if (lli == NULL)
+ goto err_clean_up;
+
+ DEBUGFS_POOL_COUNTER_ADD(pool, 1);
+ lli->phy_this = phy;
+
+ lli_prev->link_addr = phy;
+ lli_prev->virt_link_addr = lli;
+ }
+
+ lli->link_addr = 0x00000000U;
+
+ spin_unlock(&pool->lock);
+
+ return head;
+
+ err:
+ spin_unlock(&pool->lock);
+ return NULL;
+
+ err_clean_up:
+ lli_prev->link_addr = 0x00000000U;
+ spin_unlock(&pool->lock);
+ coh901318_lli_free(pool, &head);
+ return NULL;
+}
+
+void coh901318_lli_free(struct coh901318_pool *pool,
+ struct coh901318_lli **lli)
+{
+ struct coh901318_lli *l;
+ struct coh901318_lli *next;
+
+ if (lli == NULL)
+ return;
+
+ l = *lli;
+
+ if (l == NULL)
+ return;
+
+ spin_lock(&pool->lock);
+
+ while (l->link_addr) {
+ next = l->virt_link_addr;
+ dma_pool_free(pool->dmapool, l, l->phy_this);
+ DEBUGFS_POOL_COUNTER_ADD(pool, -1);
+ l = next;
+ }
+ dma_pool_free(pool->dmapool, l, l->phy_this);
+ DEBUGFS_POOL_COUNTER_ADD(pool, -1);
+
+ spin_unlock(&pool->lock);
+ *lli = NULL;
+}
+
+int
+coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ dma_addr_t source, unsigned int size,
+ dma_addr_t destination, u32 ctrl_chained,
+ u32 ctrl_eom)
+{
+ int s = size;
+ dma_addr_t src = source;
+ dma_addr_t dst = destination;
+
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ while (lli->link_addr) {
+ lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ s -= MAX_DMA_PACKET_SIZE;
+ lli = coh901318_lli_next(lli);
+
+ src += MAX_DMA_PACKET_SIZE;
+ dst += MAX_DMA_PACKET_SIZE;
+ }
+
+ lli->control = ctrl_eom | s;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ /* One irq per single transfer */
+ return 1;
+}
+
+int
+coh901318_lli_fill_single(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ dma_addr_t buf, unsigned int size,
+ dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
+ enum dma_data_direction dir)
+{
+ int s = size;
+ dma_addr_t src;
+ dma_addr_t dst;
+
+
+ if (dir == DMA_TO_DEVICE) {
+ src = buf;
+ dst = dev_addr;
+
+ } else if (dir == DMA_FROM_DEVICE) {
+
+ src = dev_addr;
+ dst = buf;
+ } else {
+ return -EINVAL;
+ }
+
+ while (lli->link_addr) {
+ size_t block_size = MAX_DMA_PACKET_SIZE;
+ lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
+
+ /* If we are on the next-to-final block and there will
+ * be less than half a DMA packet left for the last
+ * block, then we want to make this block a little
+ * smaller to balance the sizes. This is meant to
+ * avoid too small transfers if the buffer size is
+ * (MAX_DMA_PACKET_SIZE*N + 1) */
+ if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
+ block_size = MAX_DMA_PACKET_SIZE/2;
+
+ s -= block_size;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ lli = coh901318_lli_next(lli);
+
+ if (dir == DMA_TO_DEVICE)
+ src += block_size;
+ else if (dir == DMA_FROM_DEVICE)
+ dst += block_size;
+ }
+
+ lli->control = ctrl_eom | s;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ /* One irq per single transfer */
+ return 1;
+}
+
+int
+coh901318_lli_fill_sg(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ struct scatterlist *sgl, unsigned int nents,
+ dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
+ u32 ctrl_last,
+ enum dma_data_direction dir, u32 ctrl_irq_mask)
+{
+ int i;
+ struct scatterlist *sg;
+ u32 ctrl_sg;
+ dma_addr_t src = 0;
+ dma_addr_t dst = 0;
+ int nbr_of_irq = 0;
+ u32 bytes_to_transfer;
+ u32 elem_size;
+
+ if (lli == NULL)
+ goto err;
+
+ spin_lock(&pool->lock);
+
+ if (dir == DMA_TO_DEVICE)
+ dst = dev_addr;
+ else if (dir == DMA_FROM_DEVICE)
+ src = dev_addr;
+ else
+ goto err;
+
+ for_each_sg(sgl, sg, nents, i) {
+ if (sg_is_chain(sg)) {
+ /* sg continues to the next sg-element don't
+ * send ctrl_finish until the last
+ * sg-element in the chain
+ */
+ ctrl_sg = ctrl_chained;
+ } else if (i == nents - 1)
+ ctrl_sg = ctrl_last;
+ else
+ ctrl_sg = ctrl ? ctrl : ctrl_last;
+
+
+ if ((ctrl_sg & ctrl_irq_mask))
+ nbr_of_irq++;
+
+ if (dir == DMA_TO_DEVICE)
+ /* increment source address */
+ src = sg_dma_address(sg);
+ else
+ /* increment destination address */
+ dst = sg_dma_address(sg);
+
+ bytes_to_transfer = sg_dma_len(sg);
+
+ while (bytes_to_transfer) {
+ u32 val;
+
+ if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
+ elem_size = MAX_DMA_PACKET_SIZE;
+ val = ctrl_chained;
+ } else {
+ elem_size = bytes_to_transfer;
+ val = ctrl_sg;
+ }
+
+ lli->control = val | elem_size;
+ lli->src_addr = src;
+ lli->dst_addr = dst;
+
+ if (dir == DMA_FROM_DEVICE)
+ dst += elem_size;
+ else
+ src += elem_size;
+
+ BUG_ON(lli->link_addr & 3);
+
+ bytes_to_transfer -= elem_size;
+ lli = coh901318_lli_next(lli);
+ }
+
+ }
+ spin_unlock(&pool->lock);
+
+ /* There can be many IRQs per sg transfer */
+ return nbr_of_irq;
+ err:
+ spin_unlock(&pool->lock);
+ return -EINVAL;
+}
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h
new file mode 100644
index 00000000000..7bf713b79c6
--- /dev/null
+++ b/drivers/dma/coh901318_lli.h
@@ -0,0 +1,124 @@
+/*
+ * driver/dma/coh901318_lli.h
+ *
+ * Copyright (C) 2007-2009 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ * Support functions for handling lli for coh901318
+ * Author: Per Friden <per.friden@stericsson.com>
+ */
+
+#ifndef COH901318_LLI_H
+#define COH901318_LLI_H
+
+#include <mach/coh901318.h>
+
+struct device;
+
+struct coh901318_pool {
+ spinlock_t lock;
+ struct dma_pool *dmapool;
+ struct device *dev;
+
+#ifdef CONFIG_DEBUG_FS
+ int debugfs_pool_counter;
+#endif
+};
+
+struct device;
+/**
+ * coh901318_pool_create() - Creates an dma pool for lli:s
+ * @pool: pool handle
+ * @dev: dma device
+ * @lli_nbr: number of lli:s in the pool
+ * @algin: adress alignemtn of lli:s
+ * returns 0 on success otherwise none zero
+ */
+int coh901318_pool_create(struct coh901318_pool *pool,
+ struct device *dev,
+ size_t lli_nbr, size_t align);
+
+/**
+ * coh901318_pool_destroy() - Destroys the dma pool
+ * @pool: pool handle
+ * returns 0 on success otherwise none zero
+ */
+int coh901318_pool_destroy(struct coh901318_pool *pool);
+
+/**
+ * coh901318_lli_alloc() - Allocates a linked list
+ *
+ * @pool: pool handle
+ * @len: length to list
+ * return: none NULL if success otherwise NULL
+ */
+struct coh901318_lli *
+coh901318_lli_alloc(struct coh901318_pool *pool,
+ unsigned int len);
+
+/**
+ * coh901318_lli_free() - Returns the linked list items to the pool
+ * @pool: pool handle
+ * @lli: reference to lli pointer to be freed
+ */
+void coh901318_lli_free(struct coh901318_pool *pool,
+ struct coh901318_lli **lli);
+
+/**
+ * coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
+ * @pool: pool handle
+ * @lli: allocated lli
+ * @src: src address
+ * @size: transfer size
+ * @dst: destination address
+ * @ctrl_chained: ctrl for chained lli
+ * @ctrl_last: ctrl for the last lli
+ * returns number of CPU interrupts for the lli, negative on error.
+ */
+int
+coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ dma_addr_t src, unsigned int size,
+ dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
+
+/**
+ * coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
+ * @pool: pool handle
+ * @lli: allocated lli
+ * @buf: transfer buffer
+ * @size: transfer size
+ * @dev_addr: address of periphal
+ * @ctrl_chained: ctrl for chained lli
+ * @ctrl_last: ctrl for the last lli
+ * @dir: direction of transfer (to or from device)
+ * returns number of CPU interrupts for the lli, negative on error.
+ */
+int
+coh901318_lli_fill_single(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ dma_addr_t buf, unsigned int size,
+ dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
+ enum dma_data_direction dir);
+
+/**
+ * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
+ * @pool: pool handle
+ * @lli: allocated lli
+ * @sg: scatter gather list
+ * @nents: number of entries in sg
+ * @dev_addr: address of periphal
+ * @ctrl_chained: ctrl for chained lli
+ * @ctrl: ctrl of middle lli
+ * @ctrl_last: ctrl for the last lli
+ * @dir: direction of transfer (to or from device)
+ * @ctrl_irq_mask: ctrl mask for CPU interrupt
+ * returns number of CPU interrupts for the lli, negative on error.
+ */
+int
+coh901318_lli_fill_sg(struct coh901318_pool *pool,
+ struct coh901318_lli *lli,
+ struct scatterlist *sg, unsigned int nents,
+ dma_addr_t dev_addr, u32 ctrl_chained,
+ u32 ctrl, u32 ctrl_last,
+ enum dma_data_direction dir, u32 ctrl_irq_mask);
+
+#endif /* COH901318_LLI_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8f99354082c..6f51a0a7a8b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -326,14 +326,7 @@ arch_initcall(dma_channel_table_init);
*/
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{
- struct dma_chan *chan;
- int cpu;
-
- cpu = get_cpu();
- chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
- put_cpu();
-
- return chan;
+ return this_cpu_read(channel_table[tx_type]->chan);
}
EXPORT_SYMBOL(dma_find_channel);
@@ -857,7 +850,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
@@ -876,10 +868,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
@@ -906,7 +898,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
@@ -923,10 +914,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
@@ -955,7 +946,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
- int cpu;
unsigned long flags;
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
@@ -973,10 +963,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
tx->callback = NULL;
cookie = tx->tx_submit(tx);
- cpu = get_cpu();
- per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
- per_cpu_ptr(chan->local, cpu)->memcpy_count++;
- put_cpu();
+ preempt_disable();
+ __this_cpu_add(chan->local->bytes_transferred, len);
+ __this_cpu_inc(chan->local->memcpy_count);
+ preempt_enable();
return cookie;
}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a32a4cf7b1e..8b905161fbf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -298,10 +298,6 @@ static int dmatest_func(void *data)
total_tests++;
- len = dmatest_random() % test_buf_size + 1;
- src_off = dmatest_random() % (test_buf_size - len + 1);
- dst_off = dmatest_random() % (test_buf_size - len + 1);
-
/* honor alignment restrictions */
if (thread->type == DMA_MEMCPY)
align = dev->copy_align;
@@ -310,7 +306,19 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_PQ)
align = dev->pq_align;
+ if (1 << align > test_buf_size) {
+ pr_err("%u-byte buffer too small for %d-byte alignment\n",
+ test_buf_size, 1 << align);
+ break;
+ }
+
+ len = dmatest_random() % test_buf_size + 1;
len = (len >> align) << align;
+ if (!len)
+ len = 1 << align;
+ src_off = dmatest_random() % (test_buf_size - len + 1);
+ dst_off = dmatest_random() % (test_buf_size - len + 1);
+
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 2eea823516a..285bed0fe17 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1427,7 +1427,7 @@ static int dw_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops dw_dev_pm_ops = {
+static const struct dev_pm_ops dw_dev_pm_ops = {
.suspend_noirq = dw_suspend_noirq,
.resume_noirq = dw_resume_noirq,
};
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 645ca8d54ec..ca6e6a0cb79 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1470,7 +1470,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start, pdev->name))
+ resource_size(res), pdev->name))
return -EBUSY;
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
@@ -1542,7 +1542,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
iop_chan->device = adev;
iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
- res->end - res->start);
+ resource_size(res));
if (!iop_chan->mmr_base) {
ret = -ENOMEM;
goto err_free_iop_chan;
diff --git a/drivers/dma/ppc4xx/Makefile b/drivers/dma/ppc4xx/Makefile
new file mode 100644
index 00000000000..b3d259b3e52
--- /dev/null
+++ b/drivers/dma/ppc4xx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += adma.o
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
new file mode 100644
index 00000000000..0a3478e910f
--- /dev/null
+++ b/drivers/dma/ppc4xx/adma.c
@@ -0,0 +1,5027 @@
+/*
+ * Copyright (C) 2006-2009 DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * Further porting to arch/powerpc by
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This driver supports the asynchrounous DMA copy and RAID engines available
+ * on the AMCC PPC440SPe Processors.
+ * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
+ * ADMA driver written by D.Williams.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include "adma.h"
+
+enum ppc_adma_init_code {
+ PPC_ADMA_INIT_OK = 0,
+ PPC_ADMA_INIT_MEMRES,
+ PPC_ADMA_INIT_MEMREG,
+ PPC_ADMA_INIT_ALLOC,
+ PPC_ADMA_INIT_COHERENT,
+ PPC_ADMA_INIT_CHANNEL,
+ PPC_ADMA_INIT_IRQ1,
+ PPC_ADMA_INIT_IRQ2,
+ PPC_ADMA_INIT_REGISTER
+};
+
+static char *ppc_adma_errors[] = {
+ [PPC_ADMA_INIT_OK] = "ok",
+ [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
+ [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
+ [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
+ "structure",
+ [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
+ "hardware descriptors",
+ [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
+ [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
+ [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
+ [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
+};
+
+static enum ppc_adma_init_code
+ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
+
+struct ppc_dma_chan_ref {
+ struct dma_chan *chan;
+ struct list_head node;
+};
+
+/* The list of channels exported by ppc440spe ADMA */
+struct list_head
+ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
+
+/* This flag is set when want to refetch the xor chain in the interrupt
+ * handler
+ */
+static u32 do_xor_refetch;
+
+/* Pointer to DMA0, DMA1 CP/CS FIFO */
+static void *ppc440spe_dma_fifo_buf;
+
+/* Pointers to last submitted to DMA0, DMA1 CDBs */
+static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
+static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
+
+/* Pointer to last linked and submitted xor CB */
+static struct ppc440spe_adma_desc_slot *xor_last_linked;
+static struct ppc440spe_adma_desc_slot *xor_last_submit;
+
+/* This array is used in data-check operations for storing a pattern */
+static char ppc440spe_qword[16];
+
+static atomic_t ppc440spe_adma_err_irq_ref;
+static dcr_host_t ppc440spe_mq_dcr_host;
+static unsigned int ppc440spe_mq_dcr_len;
+
+/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
+ * the block size in transactions, then we do not allow to activate more than
+ * only one RXOR transactions simultaneously. So use this var to store
+ * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
+ * set) or not (PPC440SPE_RXOR_RUN is clear).
+ */
+static unsigned long ppc440spe_rxor_state;
+
+/* These are used in enable & check routines
+ */
+static u32 ppc440spe_r6_enabled;
+static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
+static struct completion ppc440spe_r6_test_comp;
+
+static int ppc440spe_adma_dma2rxor_prep_src(
+ struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_rxor *cursor, int index,
+ int src_cnt, u32 addr);
+static void ppc440spe_adma_dma2rxor_set_src(
+ struct ppc440spe_adma_desc_slot *desc,
+ int index, dma_addr_t addr);
+static void ppc440spe_adma_dma2rxor_set_mult(
+ struct ppc440spe_adma_desc_slot *desc,
+ int index, u8 mult);
+
+#ifdef ADMA_LL_DEBUG
+#define ADMA_LL_DBG(x) ({ if (1) x; 0; })
+#else
+#define ADMA_LL_DBG(x) ({ if (0) x; 0; })
+#endif
+
+static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
+{
+ struct dma_cdb *cdb;
+ struct xor_cb *cb;
+ int i;
+
+ switch (chan->device->id) {
+ case 0:
+ case 1:
+ cdb = block;
+
+ pr_debug("CDB at %p [%d]:\n"
+ "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
+ "\t sg1u 0x%08x sg1l 0x%08x\n"
+ "\t sg2u 0x%08x sg2l 0x%08x\n"
+ "\t sg3u 0x%08x sg3l 0x%08x\n",
+ cdb, chan->device->id,
+ cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
+ le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
+ le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
+ le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
+ );
+ break;
+ case 2:
+ cb = block;
+
+ pr_debug("CB at %p [%d]:\n"
+ "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
+ "\t cbtah 0x%08x cbtal 0x%08x\n"
+ "\t cblah 0x%08x cblal 0x%08x\n",
+ cb, chan->device->id,
+ cb->cbc, cb->cbbc, cb->cbs,
+ cb->cbtah, cb->cbtal,
+ cb->cblah, cb->cblal);
+ for (i = 0; i < 16; i++) {
+ if (i && !cb->ops[i].h && !cb->ops[i].l)
+ continue;
+ pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
+ i, cb->ops[i].h, cb->ops[i].l);
+ }
+ break;
+ }
+}
+
+static void print_cb_list(struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *iter)
+{
+ for (; iter; iter = iter->hw_next)
+ print_cb(chan, iter->hw_desc);
+}
+
+static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt)
+{
+ int i;
+
+ pr_debug("\n%s(%d):\nsrc: ", __func__, id);
+ for (i = 0; i < src_cnt; i++)
+ pr_debug("\t0x%016llx ", src[i]);
+ pr_debug("dst:\n\t0x%016llx\n", dst);
+}
+
+static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt)
+{
+ int i;
+
+ pr_debug("\n%s(%d):\nsrc: ", __func__, id);
+ for (i = 0; i < src_cnt; i++)
+ pr_debug("\t0x%016llx ", src[i]);
+ pr_debug("dst: ");
+ for (i = 0; i < 2; i++)
+ pr_debug("\t0x%016llx ", dst[i]);
+}
+
+static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
+ unsigned int src_cnt,
+ const unsigned char *scf)
+{
+ int i;
+
+ pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
+ if (scf) {
+ for (i = 0; i < src_cnt; i++)
+ pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
+ } else {
+ for (i = 0; i < src_cnt; i++)
+ pr_debug("\t0x%016llx(no) ", src[i]);
+ }
+
+ pr_debug("dst: ");
+ for (i = 0; i < 2; i++)
+ pr_debug("\t0x%016llx ", src[src_cnt + i]);
+}
+
+/******************************************************************************
+ * Command (Descriptor) Blocks low-level routines
+ ******************************************************************************/
+/**
+ * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
+ * pseudo operation
+ */
+static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ struct xor_cb *p;
+
+ switch (chan->device->id) {
+ case PPC440SPE_XOR_ID:
+ p = desc->hw_desc;
+ memset(desc->hw_desc, 0, sizeof(struct xor_cb));
+ /* NOP with Command Block Complete Enable */
+ p->cbc = XOR_CBCR_CBCE_BIT;
+ break;
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
+ /* NOP with interrupt */
+ set_bit(PPC440SPE_DESC_INT, &desc->flags);
+ break;
+ default:
+ printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
+ __func__);
+ break;
+ }
+}
+
+/**
+ * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
+ * pseudo operation
+ */
+static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
+{
+ memset(desc->hw_desc, 0, sizeof(struct xor_cb));
+ desc->hw_next = NULL;
+ desc->src_cnt = 0;
+ desc->dst_cnt = 1;
+}
+
+/**
+ * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
+ */
+static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
+ int src_cnt, unsigned long flags)
+{
+ struct xor_cb *hw_desc = desc->hw_desc;
+
+ memset(desc->hw_desc, 0, sizeof(struct xor_cb));
+ desc->hw_next = NULL;
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = 1;
+
+ hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Enable interrupt on completion */
+ hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
+}
+
+/**
+ * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
+ * operation in DMA2 controller
+ */
+static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
+ int dst_cnt, int src_cnt, unsigned long flags)
+{
+ struct xor_cb *hw_desc = desc->hw_desc;
+
+ memset(desc->hw_desc, 0, sizeof(struct xor_cb));
+ desc->hw_next = NULL;
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+ memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
+ desc->descs_per_op = 0;
+
+ hw_desc->cbc = XOR_CBCR_TGT_BIT;
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Enable interrupt on completion */
+ hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
+}
+
+#define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
+#define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
+#define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
+
+/**
+ * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
+ * with DMA0/1
+ */
+static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
+ int dst_cnt, int src_cnt, unsigned long flags,
+ unsigned long op)
+{
+ struct dma_cdb *hw_desc;
+ struct ppc440spe_adma_desc_slot *iter;
+ u8 dopc;
+
+ /* Common initialization of a PQ descriptors chain */
+ set_bits(op, &desc->flags);
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+
+ /* WXOR MULTICAST if both P and Q are being computed
+ * MV_SG1_SG2 if Q only
+ */
+ dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
+ DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
+
+ list_for_each_entry(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+
+ if (likely(!list_is_last(&iter->chain_node,
+ &desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+ } else {
+ /* this is the last descriptor.
+ * this slot will be pasted from ADMA level
+ * each time it wants to configure parameters
+ * of the transaction (src, dst, ...)
+ */
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+ }
+ }
+
+ /* Set OPS depending on WXOR/RXOR type of operation */
+ if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
+ /* This is a WXOR only chain:
+ * - first descriptors are for zeroing destinations
+ * if PPC440SPE_ZERO_P/Q set;
+ * - descriptors remained are for GF-XOR operations.
+ */
+ iter = list_first_entry(&desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+
+ if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ list_for_each_entry_from(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = dopc;
+ }
+ } else {
+ /* This is either RXOR-only or mixed RXOR/WXOR */
+
+ /* The first 1 or 2 slots in chain are always RXOR,
+ * if need to calculate P & Q, then there are two
+ * RXOR slots; if only P or only Q, then there is one
+ */
+ iter = list_first_entry(&desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+
+ if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ }
+
+ /* The remaining descs (if any) are WXORs */
+ if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ list_for_each_entry_from(iter, &desc->group_list,
+ chain_node) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = dopc;
+ }
+ }
+ }
+}
+
+/**
+ * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
+ * for PQ_ZERO_SUM operation
+ */
+static void ppc440spe_desc_init_dma01pqzero_sum(
+ struct ppc440spe_adma_desc_slot *desc,
+ int dst_cnt, int src_cnt)
+{
+ struct dma_cdb *hw_desc;
+ struct ppc440spe_adma_desc_slot *iter;
+ int i = 0;
+ u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
+ DMA_CDB_OPC_MV_SG1_SG2;
+ /*
+ * Initialize starting from 2nd or 3rd descriptor dependent
+ * on dst_cnt. First one or two slots are for cloning P
+ * and/or Q to chan->pdest and/or chan->qdest as we have
+ * to preserve original P/Q.
+ */
+ iter = list_first_entry(&desc->group_list,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+
+ if (dst_cnt > 1) {
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ }
+ /* initialize each source descriptor in chain */
+ list_for_each_entry_from(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->src_cnt = 0;
+ iter->dst_cnt = 0;
+
+ /* This is a ZERO_SUM operation:
+ * - <src_cnt> descriptors starting from 2nd or 3rd
+ * descriptor are for GF-XOR operations;
+ * - remaining <dst_cnt> descriptors are for checking the result
+ */
+ if (i++ < src_cnt)
+ /* MV_SG1_SG2 if only Q is being verified
+ * MULTICAST if both P and Q are being verified
+ */
+ hw_desc->opc = dopc;
+ else
+ /* DMA_CDB_OPC_DCHECK128 operation */
+ hw_desc->opc = DMA_CDB_OPC_DCHECK128;
+
+ if (likely(!list_is_last(&iter->chain_node,
+ &desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ } else {
+ /* this is the last descriptor.
+ * this slot will be pasted from ADMA level
+ * each time it wants to configure parameters
+ * of the transaction (src, dst, ...)
+ */
+ iter->hw_next = NULL;
+ /* always enable interrupt generation since we get
+ * the status of pqzero from the handler
+ */
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ }
+ }
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+}
+
+/**
+ * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
+ */
+static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
+ unsigned long flags)
+{
+ struct dma_cdb *hw_desc = desc->hw_desc;
+
+ memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &desc->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &desc->flags);
+
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+}
+
+/**
+ * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
+ */
+static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
+ int value, unsigned long flags)
+{
+ struct dma_cdb *hw_desc = desc->hw_desc;
+
+ memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &desc->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &desc->flags);
+
+ hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
+ hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
+ hw_desc->opc = DMA_CDB_OPC_DFILL128;
+}
+
+/**
+ * ppc440spe_desc_set_src_addr - set source address into the descriptor
+ */
+static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan,
+ int src_idx, dma_addr_t addrh,
+ dma_addr_t addrl)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+ phys_addr_t addr64, tmplow, tmphi;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ if (!addrh) {
+ addr64 = addrl;
+ tmphi = (addr64 >> 32);
+ tmplow = (addr64 & 0xFFFFFFFF);
+ } else {
+ tmphi = addrh;
+ tmplow = addrl;
+ }
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
+ dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
+ break;
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->ops[src_idx].l = addrl;
+ xor_hw_desc->ops[src_idx].h |= addrh;
+ break;
+ }
+}
+
+/**
+ * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
+ */
+static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan, u32 mult_index,
+ int sg_index, unsigned char mult_value)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+ u32 *psgu;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ switch (sg_index) {
+ /* for RXOR operations set multiplier
+ * into source cued address
+ */
+ case DMA_CDB_SG_SRC:
+ psgu = &dma_hw_desc->sg1u;
+ break;
+ /* for WXOR operations set multiplier
+ * into destination cued address(es)
+ */
+ case DMA_CDB_SG_DST1:
+ psgu = &dma_hw_desc->sg2u;
+ break;
+ case DMA_CDB_SG_DST2:
+ psgu = &dma_hw_desc->sg3u;
+ break;
+ default:
+ BUG();
+ }
+
+ *psgu |= cpu_to_le32(mult_value << mult_index);
+ break;
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
+ */
+static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan,
+ dma_addr_t addrh, dma_addr_t addrl,
+ u32 dst_idx)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+ phys_addr_t addr64, tmphi, tmplow;
+ u32 *psgu, *psgl;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ if (!addrh) {
+ addr64 = addrl;
+ tmphi = (addr64 >> 32);
+ tmplow = (addr64 & 0xFFFFFFFF);
+ } else {
+ tmphi = addrh;
+ tmplow = addrl;
+ }
+ dma_hw_desc = desc->hw_desc;
+
+ psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
+ psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
+
+ *psgl = cpu_to_le32((u32)tmplow);
+ *psgu |= cpu_to_le32((u32)tmphi);
+ break;
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbtal = addrl;
+ xor_hw_desc->cbtah |= addrh;
+ break;
+ }
+}
+
+/**
+ * ppc440spe_desc_set_byte_count - set number of data bytes involved
+ * into the operation
+ */
+static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan,
+ u32 byte_count)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->cnt = cpu_to_le32(byte_count);
+ break;
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbbc = byte_count;
+ break;
+ }
+}
+
+/**
+ * ppc440spe_desc_set_rxor_block_size - set RXOR block size
+ */
+static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
+{
+ /* assume that byte_count is aligned on the 512-boundary;
+ * thus write it directly to the register (bits 23:31 are
+ * reserved there).
+ */
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
+}
+
+/**
+ * ppc440spe_desc_set_dcheck - set CHECK pattern
+ */
+static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan, u8 *qword)
+{
+ struct dma_cdb *dma_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ iowrite32(qword[0], &dma_hw_desc->sg3l);
+ iowrite32(qword[4], &dma_hw_desc->sg3u);
+ iowrite32(qword[8], &dma_hw_desc->sg2l);
+ iowrite32(qword[12], &dma_hw_desc->sg2u);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * ppc440spe_xor_set_link - set link address in xor CB
+ */
+static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
+ struct ppc440spe_adma_desc_slot *next_desc)
+{
+ struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
+
+ if (unlikely(!next_desc || !(next_desc->phys))) {
+ printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
+ __func__, next_desc,
+ next_desc ? next_desc->phys : 0);
+ BUG();
+ }
+
+ xor_hw_desc->cbs = 0;
+ xor_hw_desc->cblal = next_desc->phys;
+ xor_hw_desc->cblah = 0;
+ xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
+}
+
+/**
+ * ppc440spe_desc_set_link - set the address of descriptor following this
+ * descriptor in chain
+ */
+static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *prev_desc,
+ struct ppc440spe_adma_desc_slot *next_desc)
+{
+ unsigned long flags;
+ struct ppc440spe_adma_desc_slot *tail = next_desc;
+
+ if (unlikely(!prev_desc || !next_desc ||
+ (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
+ /* If previous next is overwritten something is wrong.
+ * though we may refetch from append to initiate list
+ * processing; in this case - it's ok.
+ */
+ printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
+ "prev->hw_next=0x%p\n", __func__, prev_desc,
+ next_desc, prev_desc ? prev_desc->hw_next : 0);
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ /* do s/w chaining both for DMA and XOR descriptors */
+ prev_desc->hw_next = next_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ break;
+ case PPC440SPE_XOR_ID:
+ /* bind descriptor to the chain */
+ while (tail->hw_next)
+ tail = tail->hw_next;
+ xor_last_linked = tail;
+
+ if (prev_desc == xor_last_submit)
+ /* do not link to the last submitted CB */
+ break;
+ ppc440spe_xor_set_link(prev_desc, next_desc);
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * ppc440spe_desc_get_src_addr - extract the source address from the descriptor
+ */
+static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan, int src_idx)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ /* May have 0, 1, 2, or 3 sources */
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DFILL128:
+ return 0;
+ case DMA_CDB_OPC_DCHECK128:
+ if (unlikely(src_idx)) {
+ printk(KERN_ERR "%s: try to get %d source for"
+ " DCHECK128\n", __func__, src_idx);
+ BUG();
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ case DMA_CDB_OPC_MULTICAST:
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ if (unlikely(src_idx > 2)) {
+ printk(KERN_ERR "%s: try to get %d source from"
+ " DMA descr\n", __func__, src_idx);
+ BUG();
+ }
+ if (src_idx) {
+ if (le32_to_cpu(dma_hw_desc->sg1u) &
+ DMA_CUED_XOR_WIN_MSK) {
+ u8 region;
+
+ if (src_idx == 1)
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ desc->unmap_len;
+
+ region = (le32_to_cpu(
+ dma_hw_desc->sg1u)) >>
+ DMA_CUED_REGION_OFF;
+
+ region &= DMA_CUED_REGION_MSK;
+ switch (region) {
+ case DMA_RXOR123:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len << 1);
+ case DMA_RXOR124:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len * 3);
+ case DMA_RXOR125:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len << 2);
+ default:
+ printk(KERN_ERR
+ "%s: try to"
+ " get src3 for region %02x"
+ "PPC440SPE_DESC_RXOR12?\n",
+ __func__, region);
+ BUG();
+ }
+ } else {
+ printk(KERN_ERR
+ "%s: try to get %d"
+ " source for non-cued descr\n",
+ __func__, src_idx);
+ BUG();
+ }
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __func__, dma_hw_desc->opc);
+ BUG();
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ case PPC440SPE_XOR_ID:
+ /* May have up to 16 sources */
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->ops[src_idx].l;
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_desc_get_dest_addr - extract the destination address from the
+ * descriptor
+ */
+static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan, int idx)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ if (likely(!idx))
+ return le32_to_cpu(dma_hw_desc->sg2l);
+ return le32_to_cpu(dma_hw_desc->sg3l);
+ case PPC440SPE_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->cbtal;
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_desc_get_src_num - extract the number of source addresses from
+ * the descriptor
+ */
+static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ struct dma_cdb *dma_hw_desc;
+ struct xor_cb *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DFILL128:
+ return 0;
+ case DMA_CDB_OPC_DCHECK128:
+ return 1;
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ case DMA_CDB_OPC_MULTICAST:
+ /*
+ * Only for RXOR operations we have more than
+ * one source
+ */
+ if (le32_to_cpu(dma_hw_desc->sg1u) &
+ DMA_CUED_XOR_WIN_MSK) {
+ /* RXOR op, there are 2 or 3 sources */
+ if (((le32_to_cpu(dma_hw_desc->sg1u) >>
+ DMA_CUED_REGION_OFF) &
+ DMA_CUED_REGION_MSK) == DMA_RXOR12) {
+ /* RXOR 1-2 */
+ return 2;
+ } else {
+ /* RXOR 1-2-3/1-2-4/1-2-5 */
+ return 3;
+ }
+ }
+ return 1;
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __func__, dma_hw_desc->opc);
+ BUG();
+ }
+ case PPC440SPE_XOR_ID:
+ /* up to 16 sources */
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_desc_get_dst_num - get the number of destination addresses in
+ * this descriptor
+ */
+static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ struct dma_cdb *dma_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* May be 1 or 2 destinations */
+ dma_hw_desc = desc->hw_desc;
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DCHECK128:
+ return 0;
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ case DMA_CDB_OPC_DFILL128:
+ return 1;
+ case DMA_CDB_OPC_MULTICAST:
+ if (desc->dst_cnt == 2)
+ return 2;
+ else
+ return 1;
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __func__, dma_hw_desc->opc);
+ BUG();
+ }
+ case PPC440SPE_XOR_ID:
+ /* Always only 1 destination */
+ return 1;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_desc_get_link - get the address of the descriptor that
+ * follows this one
+ */
+static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ if (!desc->hw_next)
+ return 0;
+
+ return desc->hw_next->phys;
+}
+
+/**
+ * ppc440spe_desc_is_aligned - check alignment
+ */
+static inline int ppc440spe_desc_is_aligned(
+ struct ppc440spe_adma_desc_slot *desc, int num_slots)
+{
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+/**
+ * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
+ * XOR operation
+ */
+static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt;
+
+ /* each XOR descriptor provides up to 16 source operands */
+ slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
+
+ if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
+ return slot_cnt;
+
+ printk(KERN_ERR "%s: len %d > max %d !!\n",
+ __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
+ BUG();
+ return slot_cnt;
+}
+
+/**
+ * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
+ * DMA2 PQ operation
+ */
+static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
+ int src_cnt, size_t len)
+{
+ signed long long order = 0;
+ int state = 0;
+ int addr_count = 0;
+ int i;
+ for (i = 1; i < src_cnt; i++) {
+ dma_addr_t cur_addr = srcs[i];
+ dma_addr_t old_addr = srcs[i-1];
+ switch (state) {
+ case 0:
+ if (cur_addr == old_addr + len) {
+ /* direct RXOR */
+ order = 1;
+ state = 1;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else if (old_addr == cur_addr + len) {
+ /* reverse RXOR */
+ order = -1;
+ state = 1;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else {
+ state = 3;
+ }
+ break;
+ case 1:
+ if (i == src_cnt-2 || (order == -1
+ && cur_addr != old_addr - len)) {
+ order = 0;
+ state = 0;
+ addr_count++;
+ } else if (cur_addr == old_addr + len*order) {
+ state = 2;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else if (cur_addr == old_addr + 2*len) {
+ state = 2;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else if (cur_addr == old_addr + 3*len) {
+ state = 2;
+ if (i == src_cnt-1)
+ addr_count++;
+ } else {
+ order = 0;
+ state = 0;
+ addr_count++;
+ }
+ break;
+ case 2:
+ order = 0;
+ state = 0;
+ addr_count++;
+ break;
+ }
+ if (state == 3)
+ break;
+ }
+ if (src_cnt <= 1 || (state != 1 && state != 2)) {
+ pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
+ __func__, src_cnt, state, addr_count, order);
+ for (i = 0; i < src_cnt; i++)
+ pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
+ BUG();
+ }
+
+ return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
+}
+
+
+/******************************************************************************
+ * ADMA channel low-level routines
+ ******************************************************************************/
+
+static u32
+ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
+static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
+
+/**
+ * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
+ */
+static void ppc440spe_adma_device_clear_eot_status(
+ struct ppc440spe_adma_chan *chan)
+{
+ struct dma_regs *dma_reg;
+ struct xor_regs *xor_reg;
+ u8 *p = chan->device->dma_desc_pool_virt;
+ struct dma_cdb *cdb;
+ u32 rv, i;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* read FIFO to ack */
+ dma_reg = chan->device->dma_reg;
+ while ((rv = ioread32(&dma_reg->csfpl))) {
+ i = rv & DMA_CDB_ADDR_MSK;
+ cdb = (struct dma_cdb *)&p[i -
+ (u32)chan->device->dma_desc_pool];
+
+ /* Clear opcode to ack. This is necessary for
+ * ZeroSum operations only
+ */
+ cdb->opc = 0;
+
+ if (test_bit(PPC440SPE_RXOR_RUN,
+ &ppc440spe_rxor_state)) {
+ /* probably this is a completed RXOR op,
+ * get pointer to CDB using the fact that
+ * physical and virtual addresses of CDB
+ * in pools have the same offsets
+ */
+ if (le32_to_cpu(cdb->sg1u) &
+ DMA_CUED_XOR_BASE) {
+ /* this is a RXOR */
+ clear_bit(PPC440SPE_RXOR_RUN,
+ &ppc440spe_rxor_state);
+ }
+ }
+
+ if (rv & DMA_CDB_STATUS_MSK) {
+ /* ZeroSum check failed
+ */
+ struct ppc440spe_adma_desc_slot *iter;
+ dma_addr_t phys = rv & ~DMA_CDB_MSK;
+
+ /*
+ * Update the status of corresponding
+ * descriptor.
+ */
+ list_for_each_entry(iter, &chan->chain,
+ chain_node) {
+ if (iter->phys == phys)
+ break;
+ }
+ /*
+ * if cannot find the corresponding
+ * slot it's a bug
+ */
+ BUG_ON(&iter->chain_node == &chan->chain);
+
+ if (iter->xor_check_result) {
+ if (test_bit(PPC440SPE_DESC_PCHECK,
+ &iter->flags)) {
+ *iter->xor_check_result |=
+ SUM_CHECK_P_RESULT;
+ } else
+ if (test_bit(PPC440SPE_DESC_QCHECK,
+ &iter->flags)) {
+ *iter->xor_check_result |=
+ SUM_CHECK_Q_RESULT;
+ } else
+ BUG();
+ }
+ }
+ }
+
+ rv = ioread32(&dma_reg->dsts);
+ if (rv) {
+ pr_err("DMA%d err status: 0x%x\n",
+ chan->device->id, rv);
+ /* write back to clear */
+ iowrite32(rv, &dma_reg->dsts);
+ }
+ break;
+ case PPC440SPE_XOR_ID:
+ /* reset status bits to ack */
+ xor_reg = chan->device->xor_reg;
+ rv = ioread32be(&xor_reg->sr);
+ iowrite32be(rv, &xor_reg->sr);
+
+ if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
+ if (rv & XOR_IE_RPTIE_BIT) {
+ /* Read PLB Timeout Error.
+ * Try to resubmit the CB
+ */
+ u32 val = ioread32be(&xor_reg->ccbalr);
+
+ iowrite32be(val, &xor_reg->cblalr);
+
+ val = ioread32be(&xor_reg->crsr);
+ iowrite32be(val | XOR_CRSR_XAE_BIT,
+ &xor_reg->crsr);
+ } else
+ pr_err("XOR ERR 0x%x status\n", rv);
+ break;
+ }
+
+ /* if the XORcore is idle, but there are unprocessed CBs
+ * then refetch the s/w chain here
+ */
+ if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
+ do_xor_refetch)
+ ppc440spe_chan_append(chan);
+ break;
+ }
+}
+
+/**
+ * ppc440spe_chan_is_busy - get the channel status
+ */
+static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
+{
+ struct dma_regs *dma_reg;
+ struct xor_regs *xor_reg;
+ int busy = 0;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_reg = chan->device->dma_reg;
+ /* if command FIFO's head and tail pointers are equal and
+ * status tail is the same as command, then channel is free
+ */
+ if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
+ ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
+ busy = 1;
+ break;
+ case PPC440SPE_XOR_ID:
+ /* use the special status bit for the XORcore
+ */
+ xor_reg = chan->device->xor_reg;
+ busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
+ break;
+ }
+
+ return busy;
+}
+
+/**
+ * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
+ */
+static void ppc440spe_chan_set_first_xor_descriptor(
+ struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *next_desc)
+{
+ struct xor_regs *xor_reg = chan->device->xor_reg;
+
+ if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
+ printk(KERN_INFO "%s: Warn: XORcore is running "
+ "when try to set the first CDB!\n",
+ __func__);
+
+ xor_last_submit = xor_last_linked = next_desc;
+
+ iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
+
+ iowrite32be(next_desc->phys, &xor_reg->cblalr);
+ iowrite32be(0, &xor_reg->cblahr);
+ iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
+ &xor_reg->cbcr);
+
+ chan->hw_chain_inited = 1;
+}
+
+/**
+ * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
+ * called with irqs disabled
+ */
+static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *desc)
+{
+ u32 pcdb;
+ struct dma_regs *dma_reg = chan->device->dma_reg;
+
+ pcdb = desc->phys;
+ if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
+ pcdb |= DMA_CDB_NO_INT;
+
+ chan_last_sub[chan->device->id] = desc;
+
+ ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
+
+ iowrite32(pcdb, &dma_reg->cpfpl);
+}
+
+/**
+ * ppc440spe_chan_append - update the h/w chain in the channel
+ */
+static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
+{
+ struct xor_regs *xor_reg;
+ struct ppc440spe_adma_desc_slot *iter;
+ struct xor_cb *xcb;
+ u32 cur_desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ cur_desc = ppc440spe_chan_get_current_descriptor(chan);
+
+ if (likely(cur_desc)) {
+ iter = chan_last_sub[chan->device->id];
+ BUG_ON(!iter);
+ } else {
+ /* first peer */
+ iter = chan_first_cdb[chan->device->id];
+ BUG_ON(!iter);
+ ppc440spe_dma_put_desc(chan, iter);
+ chan->hw_chain_inited = 1;
+ }
+
+ /* is there something new to append */
+ if (!iter->hw_next)
+ break;
+
+ /* flush descriptors from the s/w queue to fifo */
+ list_for_each_entry_continue(iter, &chan->chain, chain_node) {
+ ppc440spe_dma_put_desc(chan, iter);
+ if (!iter->hw_next)
+ break;
+ }
+ break;
+ case PPC440SPE_XOR_ID:
+ /* update h/w links and refetch */
+ if (!xor_last_submit->hw_next)
+ break;
+
+ xor_reg = chan->device->xor_reg;
+ /* the last linked CDB has to generate an interrupt
+ * that we'd be able to append the next lists to h/w
+ * regardless of the XOR engine state at the moment of
+ * appending of these next lists
+ */
+ xcb = xor_last_linked->hw_desc;
+ xcb->cbc |= XOR_CBCR_CBCE_BIT;
+
+ if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
+ /* XORcore is idle. Refetch now */
+ do_xor_refetch = 0;
+ ppc440spe_xor_set_link(xor_last_submit,
+ xor_last_submit->hw_next);
+
+ ADMA_LL_DBG(print_cb_list(chan,
+ xor_last_submit->hw_next));
+
+ xor_last_submit = xor_last_linked;
+ iowrite32be(ioread32be(&xor_reg->crsr) |
+ XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
+ &xor_reg->crsr);
+ } else {
+ /* XORcore is running. Refetch later in the handler */
+ do_xor_refetch = 1;
+ }
+
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
+ */
+static u32
+ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
+{
+ struct dma_regs *dma_reg;
+ struct xor_regs *xor_reg;
+
+ if (unlikely(!chan->hw_chain_inited))
+ /* h/w descriptor chain is not initialized yet */
+ return 0;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_reg = chan->device->dma_reg;
+ return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
+ case PPC440SPE_XOR_ID:
+ xor_reg = chan->device->xor_reg;
+ return ioread32be(&xor_reg->ccbalr);
+ }
+ return 0;
+}
+
+/**
+ * ppc440spe_chan_run - enable the channel
+ */
+static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
+{
+ struct xor_regs *xor_reg;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* DMAs are always enabled, do nothing */
+ break;
+ case PPC440SPE_XOR_ID:
+ /* drain write buffer */
+ xor_reg = chan->device->xor_reg;
+
+ /* fetch descriptor pointed to in <link> */
+ iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
+ &xor_reg->crsr);
+ break;
+ }
+}
+
+/******************************************************************************
+ * ADMA device level
+ ******************************************************************************/
+
+static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
+static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
+
+static dma_cookie_t
+ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
+
+static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t addr, int index);
+static void
+ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t addr, int index);
+
+static void
+ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t *paddr, unsigned long flags);
+static void
+ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t addr, int index);
+static void
+ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
+ unsigned char mult, int index, int dst_pos);
+static void
+ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
+ dma_addr_t paddr, dma_addr_t qaddr);
+
+static struct page *ppc440spe_rxor_srcs[32];
+
+/**
+ * ppc440spe_can_rxor - check if the operands may be processed with RXOR
+ */
+static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
+{
+ int i, order = 0, state = 0;
+ int idx = 0;
+
+ if (unlikely(!(src_cnt > 1)))
+ return 0;
+
+ BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
+
+ /* Skip holes in the source list before checking */
+ for (i = 0; i < src_cnt; i++) {
+ if (!srcs[i])
+ continue;
+ ppc440spe_rxor_srcs[idx++] = srcs[i];
+ }
+ src_cnt = idx;
+
+ for (i = 1; i < src_cnt; i++) {
+ char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
+ char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
+
+ switch (state) {
+ case 0:
+ if (cur_addr == old_addr + len) {
+ /* direct RXOR */
+ order = 1;
+ state = 1;
+ } else if (old_addr == cur_addr + len) {
+ /* reverse RXOR */
+ order = -1;
+ state = 1;
+ } else
+ goto out;
+ break;
+ case 1:
+ if ((i == src_cnt - 2) ||
+ (order == -1 && cur_addr != old_addr - len)) {
+ order = 0;
+ state = 0;
+ } else if ((cur_addr == old_addr + len * order) ||
+ (cur_addr == old_addr + 2 * len) ||
+ (cur_addr == old_addr + 3 * len)) {
+ state = 2;
+ } else {
+ order = 0;
+ state = 0;
+ }
+ break;
+ case 2:
+ order = 0;
+ state = 0;
+ break;
+ }
+ }
+
+out:
+ if (state == 1 || state == 2)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ppc440spe_adma_device_estimate - estimate the efficiency of processing
+ * the operation given on this channel. It's assumed that 'chan' is
+ * capable to process 'cap' type of operation.
+ * @chan: channel to use
+ * @cap: type of transaction
+ * @dst_lst: array of destination pointers
+ * @dst_cnt: number of destination operands
+ * @src_lst: array of source pointers
+ * @src_cnt: number of source operands
+ * @src_sz: size of each source operand
+ */
+static int ppc440spe_adma_estimate(struct dma_chan *chan,
+ enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
+ struct page **src_lst, int src_cnt, size_t src_sz)
+{
+ int ef = 1;
+
+ if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
+ /* If RAID-6 capabilities were not activated don't try
+ * to use them
+ */
+ if (unlikely(!ppc440spe_r6_enabled))
+ return -1;
+ }
+ /* In the current implementation of ppc440spe ADMA driver it
+ * makes sense to pick out only pq case, because it may be
+ * processed:
+ * (1) either using Biskup method on DMA2;
+ * (2) or on DMA0/1.
+ * Thus we give a favour to (1) if the sources are suitable;
+ * else let it be processed on one of the DMA0/1 engines.
+ * In the sum_product case where destination is also the
+ * source process it on DMA0/1 only.
+ */
+ if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
+
+ if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
+ ef = 0; /* sum_product case, process on DMA0/1 */
+ else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
+ ef = 3; /* override (DMA0/1 + idle) */
+ else
+ ef = 0; /* can't process on DMA2 if !rxor */
+ }
+
+ /* channel idleness increases the priority */
+ if (likely(ef) &&
+ !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
+ ef++;
+
+ return ef;
+}
+
+struct dma_chan *
+ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
+ struct page **dst_lst, int dst_cnt, struct page **src_lst,
+ int src_cnt, size_t src_sz)
+{
+ struct dma_chan *best_chan = NULL;
+ struct ppc_dma_chan_ref *ref;
+ int best_rank = -1;
+
+ if (unlikely(!src_sz))
+ return NULL;
+ if (src_sz > PAGE_SIZE) {
+ /*
+ * should a user of the api ever pass > PAGE_SIZE requests
+ * we sort out cases where temporary page-sized buffers
+ * are used.
+ */
+ switch (cap) {
+ case DMA_PQ:
+ if (src_cnt == 1 && dst_lst[1] == src_lst[0])
+ return NULL;
+ if (src_cnt == 2 && dst_lst[1] == src_lst[1])
+ return NULL;
+ break;
+ case DMA_PQ_VAL:
+ case DMA_XOR_VAL:
+ return NULL;
+ default:
+ break;
+ }
+ }
+
+ list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
+ if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
+ int rank;
+
+ rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
+ dst_cnt, src_lst, src_cnt, src_sz);
+ if (rank > best_rank) {
+ best_rank = rank;
+ best_chan = ref->chan;
+ }
+ }
+ }
+
+ return best_chan;
+}
+EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
+
+/**
+ * ppc440spe_get_group_entry - get group entry with index idx
+ * @tdesc: is the last allocated slot in the group.
+ */
+static struct ppc440spe_adma_desc_slot *
+ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
+{
+ struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
+ int i = 0;
+
+ if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
+ printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
+ __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
+ BUG();
+ }
+
+ list_for_each_entry(iter, &tdesc->group_list, chain_node) {
+ if (i++ == entry_idx)
+ break;
+ }
+ return iter;
+}
+
+/**
+ * ppc440spe_adma_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &ppc440spe_chan->lock while calling this function
+ */
+static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
+ struct ppc440spe_adma_chan *chan)
+{
+ int stride = slot->slots_per_op;
+
+ while (stride--) {
+ slot->slots_per_op = 0;
+ slot = list_entry(slot->slot_node.next,
+ struct ppc440spe_adma_desc_slot,
+ slot_node);
+ }
+}
+
+static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *desc)
+{
+ u32 src_cnt, dst_cnt;
+ dma_addr_t addr;
+
+ /*
+ * get the number of sources & destination
+ * included in this descriptor and unmap
+ * them all
+ */
+ src_cnt = ppc440spe_desc_get_src_num(desc, chan);
+ dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
+
+ /* unmap destinations */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ while (dst_cnt--) {
+ addr = ppc440spe_desc_get_dest_addr(
+ desc, chan, dst_cnt);
+ dma_unmap_page(chan->device->dev,
+ addr, desc->unmap_len,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* unmap sources */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ while (src_cnt--) {
+ addr = ppc440spe_desc_get_src_addr(
+ desc, chan, src_cnt);
+ dma_unmap_page(chan->device->dev,
+ addr, desc->unmap_len,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+/**
+ * ppc440spe_adma_run_tx_complete_actions - call functions to be called
+ * upon completion
+ */
+static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
+ struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan,
+ dma_cookie_t cookie)
+{
+ int i;
+
+ BUG_ON(desc->async_tx.cookie < 0);
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ *
+ * actually, ppc's dma_unmap_page() functions are empty, so
+ * the following code is just for the sake of completeness
+ */
+ if (chan && chan->needs_unmap && desc->group_head &&
+ desc->unmap_len) {
+ struct ppc440spe_adma_desc_slot *unmap =
+ desc->group_head;
+ /* assume 1 slot per op always */
+ u32 slot_count = unmap->slot_cnt;
+
+ /* Run through the group list and unmap addresses */
+ for (i = 0; i < slot_count; i++) {
+ BUG_ON(!unmap);
+ ppc440spe_adma_unmap(chan, unmap);
+ unmap = unmap->hw_next;
+ }
+ }
+ }
+
+ /* run dependent operations */
+ dma_run_dependencies(&desc->async_tx);
+
+ return cookie;
+}
+
+/**
+ * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
+ */
+static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_adma_chan *chan)
+{
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx))
+ return 0;
+
+ /* leave the last descriptor in the chain
+ * so we can append to it
+ */
+ if (list_is_last(&desc->chain_node, &chan->chain) ||
+ desc->phys == ppc440spe_chan_get_current_descriptor(chan))
+ return 1;
+
+ if (chan->device->id != PPC440SPE_XOR_ID) {
+ /* our DMA interrupt handler clears opc field of
+ * each processed descriptor. For all types of
+ * operations except for ZeroSum we do not actually
+ * need ack from the interrupt handler. ZeroSum is a
+ * special case since the result of this operation
+ * is available from the handler only, so if we see
+ * such type of descriptor (which is unprocessed yet)
+ * then leave it in chain.
+ */
+ struct dma_cdb *cdb = desc->hw_desc;
+ if (cdb->opc == DMA_CDB_OPC_DCHECK128)
+ return 1;
+ }
+
+ dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
+ desc->phys, desc->idx, desc->slots_per_op);
+
+ list_del(&desc->chain_node);
+ ppc440spe_adma_free_slots(desc, chan);
+ return 0;
+}
+
+/**
+ * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
+ * which runs through the channel CDBs list until reach the descriptor
+ * currently processed. When routine determines that all CDBs of group
+ * are completed then corresponding callbacks (if any) are called and slots
+ * are freed.
+ */
+static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
+{
+ struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
+ dma_cookie_t cookie = 0;
+ u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
+ int busy = ppc440spe_chan_is_busy(chan);
+ int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
+
+ dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
+ chan->device->id, __func__);
+
+ if (!current_desc) {
+ /* There were no transactions yet, so
+ * nothing to clean
+ */
+ return;
+ }
+
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+ list_for_each_entry_safe(iter, _iter, &chan->chain,
+ chain_node) {
+ dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
+ "busy: %d this_desc: %#llx next_desc: %#x "
+ "cur: %#x ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy, iter->phys,
+ ppc440spe_desc_get_link(iter, chan), current_desc,
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel,subsequent descriptors are either in process
+ * or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy, or if it appears that the current descriptor
+ * needs to be re-read (i.e. has been appended to)
+ */
+ if (iter->phys == current_desc) {
+ BUG_ON(seen_current++);
+ if (busy || ppc440spe_desc_get_link(iter, chan)) {
+ /* not all descriptors of the group have
+ * been completed; exit.
+ */
+ break;
+ }
+ }
+
+ /* detect the start of a group transaction */
+ if (!slot_cnt && !slots_per_op) {
+ slot_cnt = iter->slot_cnt;
+ slots_per_op = iter->slots_per_op;
+ if (slot_cnt <= slots_per_op) {
+ slot_cnt = 0;
+ slots_per_op = 0;
+ }
+ }
+
+ if (slot_cnt) {
+ if (!group_start)
+ group_start = iter;
+ slot_cnt -= slots_per_op;
+ }
+
+ /* all the members of a group are complete */
+ if (slots_per_op != 0 && slot_cnt == 0) {
+ struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
+ int end_of_chain = 0;
+
+ /* clean up the group */
+ slot_cnt = group_start->slot_cnt;
+ grp_iter = group_start;
+ list_for_each_entry_safe_from(grp_iter, _grp_iter,
+ &chan->chain, chain_node) {
+
+ cookie = ppc440spe_adma_run_tx_complete_actions(
+ grp_iter, chan, cookie);
+
+ slot_cnt -= slots_per_op;
+ end_of_chain = ppc440spe_adma_clean_slot(
+ grp_iter, chan);
+ if (end_of_chain && slot_cnt) {
+ /* Should wait for ZeroSum completion */
+ if (cookie > 0)
+ chan->completed_cookie = cookie;
+ return;
+ }
+
+ if (slot_cnt == 0 || end_of_chain)
+ break;
+ }
+
+ /* the group should be complete at this point */
+ BUG_ON(slot_cnt);
+
+ slots_per_op = 0;
+ group_start = NULL;
+ if (end_of_chain)
+ break;
+ else
+ continue;
+ } else if (slots_per_op) /* wait for group completion */
+ continue;
+
+ cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
+ cookie);
+
+ if (ppc440spe_adma_clean_slot(iter, chan))
+ break;
+ }
+
+ BUG_ON(!seen_current);
+
+ if (cookie > 0) {
+ chan->completed_cookie = cookie;
+ pr_debug("\tcompleted cookie %d\n", cookie);
+ }
+
+}
+
+/**
+ * ppc440spe_adma_tasklet - clean up watch-dog initiator
+ */
+static void ppc440spe_adma_tasklet(unsigned long data)
+{
+ struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
+
+ spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
+ __ppc440spe_adma_slot_cleanup(chan);
+ spin_unlock(&chan->lock);
+}
+
+/**
+ * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
+ */
+static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
+{
+ spin_lock_bh(&chan->lock);
+ __ppc440spe_adma_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * ppc440spe_adma_alloc_slots - allocate free slots (if any)
+ */
+static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
+ struct ppc440spe_adma_chan *chan, int num_slots,
+ int slots_per_op)
+{
+ struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
+ struct ppc440spe_adma_desc_slot *alloc_start = NULL;
+ struct list_head chain = LIST_HEAD_INIT(chain);
+ int slots_found, retry = 0;
+
+
+ BUG_ON(!num_slots || !slots_per_op);
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = chan->last_used;
+ else
+ iter = list_entry(&chan->all_slots,
+ struct ppc440spe_adma_desc_slot,
+ slot_node);
+ list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
+ slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++)
+ alloc_start = iter;
+
+ if (slots_found == num_slots) {
+ struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
+ struct ppc440spe_adma_desc_slot *last_used = NULL;
+
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op)
+ async_tx_ack(&iter->async_tx);
+
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->hw_next = NULL;
+ iter->flags = 0;
+ iter->slot_cnt = num_slots;
+ iter->xor_check_result = NULL;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ struct ppc440spe_adma_desc_slot,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->group_list);
+ chan->last_used = last_used;
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&chan->irq_tasklet);
+ return NULL;
+}
+
+/**
+ * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
+ */
+static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *slot = NULL;
+ char *hw_desc;
+ int i, db_sz;
+ int init;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ init = ppc440spe_chan->slots_allocated ? 0 : 1;
+ chan->chan_id = ppc440spe_chan->device->id;
+
+ /* Allocate descriptor slots */
+ i = ppc440spe_chan->slots_allocated;
+ if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
+ db_sz = sizeof(struct dma_cdb);
+ else
+ db_sz = sizeof(struct xor_cb);
+
+ for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
+ slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
+ GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "SPE ADMA Channel only initialized"
+ " %d descriptor slots", i--);
+ break;
+ }
+
+ hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[i * db_sz];
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->group_list);
+ slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
+ slot->idx = i;
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ ppc440spe_chan->slots_allocated++;
+ list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
+ spin_unlock_bh(&ppc440spe_chan->lock);
+ }
+
+ if (i && !ppc440spe_chan->last_used) {
+ ppc440spe_chan->last_used =
+ list_entry(ppc440spe_chan->all_slots.next,
+ struct ppc440spe_adma_desc_slot,
+ slot_node);
+ }
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: allocated %d descriptor slots\n",
+ ppc440spe_chan->device->id, i);
+
+ /* initialize the channel and the chain with a null operation */
+ if (init) {
+ switch (ppc440spe_chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ ppc440spe_chan->hw_chain_inited = 0;
+ /* Use WXOR for self-testing */
+ if (!ppc440spe_r6_tchan)
+ ppc440spe_r6_tchan = ppc440spe_chan;
+ break;
+ case PPC440SPE_XOR_ID:
+ ppc440spe_chan_start_null_xor(ppc440spe_chan);
+ break;
+ default:
+ BUG();
+ }
+ ppc440spe_chan->needs_unmap = 1;
+ }
+
+ return (i > 0) ? i : -ENOMEM;
+}
+
+/**
+ * ppc440spe_desc_assign_cookie - assign a cookie
+ */
+static dma_cookie_t ppc440spe_desc_assign_cookie(
+ struct ppc440spe_adma_chan *chan,
+ struct ppc440spe_adma_desc_slot *desc)
+{
+ dma_cookie_t cookie = chan->common.cookie;
+
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+/**
+ * ppc440spe_rxor_set_region_data -
+ */
+static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
+ u8 xor_arg_no, u32 mask)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+
+ xcb->ops[xor_arg_no].h |= mask;
+}
+
+/**
+ * ppc440spe_rxor_set_src -
+ */
+static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
+ u8 xor_arg_no, dma_addr_t addr)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+
+ xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
+ xcb->ops[xor_arg_no].l = addr;
+}
+
+/**
+ * ppc440spe_rxor_set_mult -
+ */
+static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
+ u8 xor_arg_no, u8 idx, u8 mult)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+
+ xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
+}
+
+/**
+ * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
+ * has been achieved
+ */
+static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
+{
+ dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
+ chan->device->id, chan->pending);
+
+ if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
+ chan->pending = 0;
+ ppc440spe_chan_append(chan);
+ }
+}
+
+/**
+ * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
+ * (it's not necessary that descriptors will be submitted to the h/w
+ * chains too right now)
+ */
+static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc;
+ struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
+ struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
+ int slot_cnt;
+ int slots_per_op;
+ dma_cookie_t cookie;
+
+ sw_desc = tx_to_ppc440spe_adma_slot(tx);
+
+ group_start = sw_desc->group_head;
+ slot_cnt = group_start->slot_cnt;
+ slots_per_op = group_start->slots_per_op;
+
+ spin_lock_bh(&chan->lock);
+
+ cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
+
+ if (unlikely(list_empty(&chan->chain))) {
+ /* first peer */
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ chan_first_cdb[chan->device->id] = group_start;
+ } else {
+ /* isn't first peer, bind CDBs to chain */
+ old_chain_tail = list_entry(chan->chain.prev,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ list_splice_init(&sw_desc->group_list,
+ &old_chain_tail->chain_node);
+ /* fix up the hardware chain */
+ ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
+ }
+
+ /* increment the pending count by the number of operations */
+ chan->pending += slot_cnt / slots_per_op;
+ ppc440spe_adma_check_threshold(chan);
+ spin_unlock_bh(&chan->lock);
+
+ dev_dbg(chan->device->common.dev,
+ "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
+ chan->device->id, __func__,
+ sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
+
+ return cookie;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
+ __func__);
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
+ group_start->unmap_len = 0;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ if (unlikely(!len))
+ return NULL;
+
+ BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s len: %u int_en %d\n",
+ ppc440spe_chan->device->id, __func__, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc440spe_desc_init_memcpy(group_start, flags);
+ ppc440spe_adma_set_dest(group_start, dma_dest, 0);
+ ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
+ ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dma_dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ if (unlikely(!len))
+ return NULL;
+
+ BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n",
+ ppc440spe_chan->device->id, __func__, value, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc440spe_desc_init_memset(group_start, value, flags);
+ ppc440spe_adma_set_dest(group_start, dma_dest, 0);
+ ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t *dma_src, u32 src_cnt, size_t len,
+ unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
+ dma_dest, dma_src, src_cnt));
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ ppc440spe_chan->device->id, __func__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc440spe_desc_init_xor(group_start, src_cnt, flags);
+ ppc440spe_adma_set_dest(group_start, dma_dest, 0);
+ while (src_cnt--)
+ ppc440spe_adma_memcpy_xor_set_src(group_start,
+ dma_src[src_cnt], src_cnt);
+ ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static inline void
+ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
+ int src_cnt);
+static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
+
+/**
+ * ppc440spe_adma_init_dma2rxor_slot -
+ */
+static void ppc440spe_adma_init_dma2rxor_slot(
+ struct ppc440spe_adma_desc_slot *desc,
+ dma_addr_t *src, int src_cnt)
+{
+ int i;
+
+ /* initialize CDB */
+ for (i = 0; i < src_cnt; i++) {
+ ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
+ desc->src_cnt, (u32)src[i]);
+ }
+}
+
+/**
+ * ppc440spe_dma01_prep_mult -
+ * for Q operation where destination is also the source
+ */
+static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
+ struct ppc440spe_adma_chan *ppc440spe_chan,
+ dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
+ const unsigned char *scf, size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL;
+ unsigned long op = 0;
+ int slot_cnt;
+
+ set_bit(PPC440SPE_DESC_WXOR, &op);
+ slot_cnt = 2;
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+
+ /* use WXOR, each descriptor occupies one slot */
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
+ if (sw_desc) {
+ struct ppc440spe_adma_chan *chan;
+ struct ppc440spe_adma_desc_slot *iter;
+ struct dma_cdb *hw_desc;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+ set_bits(op, &sw_desc->flags);
+ sw_desc->src_cnt = src_cnt;
+ sw_desc->dst_cnt = dst_cnt;
+ /* First descriptor, zero data in the destination and copy it
+ * to q page using MULTICAST transfer.
+ */
+ iter = list_first_entry(&sw_desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MULTICAST;
+
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, dst[0], 0);
+ ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
+ src[0]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+
+ /*
+ * Second descriptor, multiply data from the q page
+ * and store the result in real destination.
+ */
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ ppc440spe_desc_set_src_addr(iter, chan, 0,
+ DMA_CUED_XOR_HB, dst[1]);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, dst[0], 0);
+
+ ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ DMA_CDB_SG_DST1, scf[0]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc;
+}
+
+/**
+ * ppc440spe_dma01_prep_sum_product -
+ * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
+ * the source.
+ */
+static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
+ struct ppc440spe_adma_chan *ppc440spe_chan,
+ dma_addr_t *dst, dma_addr_t *src, int src_cnt,
+ const unsigned char *scf, size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL;
+ unsigned long op = 0;
+ int slot_cnt;
+
+ set_bit(PPC440SPE_DESC_WXOR, &op);
+ slot_cnt = 3;
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+
+ /* WXOR, each descriptor occupies one slot */
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
+ if (sw_desc) {
+ struct ppc440spe_adma_chan *chan;
+ struct ppc440spe_adma_desc_slot *iter;
+ struct dma_cdb *hw_desc;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+ set_bits(op, &sw_desc->flags);
+ sw_desc->src_cnt = src_cnt;
+ sw_desc->dst_cnt = 1;
+ /* 1st descriptor, src[1] data to q page and zero destination */
+ iter = list_first_entry(&sw_desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MULTICAST;
+
+ ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ *dst, 0);
+ ppc440spe_desc_set_dest_addr(iter, chan, 0,
+ ppc440spe_chan->qdest, 1);
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
+ src[1]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+
+ /* 2nd descriptor, multiply src[1] data and store the
+ * result in destination */
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
+ ppc440spe_chan->qdest);
+ ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ *dst, 0);
+ ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ DMA_CDB_SG_DST1, scf[1]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+
+ /*
+ * 3rd descriptor, multiply src[0] data and xor it
+ * with destination
+ */
+ iter = list_first_entry(&iter->chain_node,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC440SPE_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC440SPE_DESC_INT, &iter->flags);
+
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
+ src[0]);
+ ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ *dst, 0);
+ ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ DMA_CDB_SG_DST1, scf[0]);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
+ iter->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc;
+}
+
+static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
+ struct ppc440spe_adma_chan *ppc440spe_chan,
+ dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
+ const unsigned char *scf, size_t len, unsigned long flags)
+{
+ int slot_cnt;
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
+ unsigned long op = 0;
+ unsigned char mult = 1;
+
+ pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
+ __func__, dst_cnt, src_cnt, len);
+ /* select operations WXOR/RXOR depending on the
+ * source addresses of operators and the number
+ * of destinations (RXOR support only Q-parity calculations)
+ */
+ set_bit(PPC440SPE_DESC_WXOR, &op);
+ if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
+ /* no active RXOR;
+ * do RXOR if:
+ * - there are more than 1 source,
+ * - len is aligned on 512-byte boundary,
+ * - source addresses fit to one of 4 possible regions.
+ */
+ if (src_cnt > 1 &&
+ !(len & MQ0_CF2H_RXOR_BS_MASK) &&
+ (src[0] + len) == src[1]) {
+ /* may do RXOR R1 R2 */
+ set_bit(PPC440SPE_DESC_RXOR, &op);
+ if (src_cnt != 2) {
+ /* may try to enhance region of RXOR */
+ if ((src[1] + len) == src[2]) {
+ /* do RXOR R1 R2 R3 */
+ set_bit(PPC440SPE_DESC_RXOR123,
+ &op);
+ } else if ((src[1] + len * 2) == src[2]) {
+ /* do RXOR R1 R2 R4 */
+ set_bit(PPC440SPE_DESC_RXOR124, &op);
+ } else if ((src[1] + len * 3) == src[2]) {
+ /* do RXOR R1 R2 R5 */
+ set_bit(PPC440SPE_DESC_RXOR125,
+ &op);
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC440SPE_DESC_RXOR12,
+ &op);
+ }
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC440SPE_DESC_RXOR12, &op);
+ }
+ }
+
+ if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
+ /* can not do this operation with RXOR */
+ clear_bit(PPC440SPE_RXOR_RUN,
+ &ppc440spe_rxor_state);
+ } else {
+ /* can do; set block size right now */
+ ppc440spe_desc_set_rxor_block_size(len);
+ }
+ }
+
+ /* Number of necessary slots depends on operation type selected */
+ if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
+ /* This is a WXOR only chain. Need descriptors for each
+ * source to GF-XOR them with WXOR, and need descriptors
+ * for each destination to zero them with WXOR
+ */
+ slot_cnt = src_cnt;
+
+ if (flags & DMA_PREP_ZERO_P) {
+ slot_cnt++;
+ set_bit(PPC440SPE_ZERO_P, &op);
+ }
+ if (flags & DMA_PREP_ZERO_Q) {
+ slot_cnt++;
+ set_bit(PPC440SPE_ZERO_Q, &op);
+ }
+ } else {
+ /* Need 1/2 descriptor for RXOR operation, and
+ * need (src_cnt - (2 or 3)) for WXOR of sources
+ * remained (if any)
+ */
+ slot_cnt = dst_cnt;
+
+ if (flags & DMA_PREP_ZERO_P)
+ set_bit(PPC440SPE_ZERO_P, &op);
+ if (flags & DMA_PREP_ZERO_Q)
+ set_bit(PPC440SPE_ZERO_Q, &op);
+
+ if (test_bit(PPC440SPE_DESC_RXOR12, &op))
+ slot_cnt += src_cnt - 2;
+ else
+ slot_cnt += src_cnt - 3;
+
+ /* Thus we have either RXOR only chain or
+ * mixed RXOR/WXOR
+ */
+ if (slot_cnt == dst_cnt)
+ /* RXOR only chain */
+ clear_bit(PPC440SPE_DESC_WXOR, &op);
+ }
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ /* for both RXOR/WXOR each descriptor occupies one slot */
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
+ if (sw_desc) {
+ ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
+ flags, op);
+
+ /* setup dst/src/mult */
+ pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
+ __func__, dst[0], dst[1]);
+ ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
+ while (src_cnt--) {
+ ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
+ src_cnt);
+
+ /* NOTE: "Multi = 0 is equivalent to = 1" as it
+ * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
+ * doesn't work for RXOR with DMA0/1! Instead, multi=0
+ * leads to zeroing source data after RXOR.
+ * So, for P case set-up mult=1 explicitly.
+ */
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ mult = scf[src_cnt];
+ ppc440spe_adma_pq_set_src_mult(sw_desc,
+ mult, src_cnt, dst_cnt - 1);
+ }
+
+ /* Setup byte count foreach slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list,
+ chain_node) {
+ ppc440spe_desc_set_byte_count(iter,
+ ppc440spe_chan, len);
+ iter->unmap_len = len;
+ }
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ return sw_desc;
+}
+
+static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
+ struct ppc440spe_adma_chan *ppc440spe_chan,
+ dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
+ const unsigned char *scf, size_t len, unsigned long flags)
+{
+ int slot_cnt, descs_per_op;
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
+ unsigned long op = 0;
+ unsigned char mult = 1;
+
+ BUG_ON(!dst_cnt);
+ /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
+ __func__, dst_cnt, src_cnt, len);*/
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
+ if (descs_per_op < 0) {
+ spin_unlock_bh(&ppc440spe_chan->lock);
+ return NULL;
+ }
+
+ /* depending on number of sources we have 1 or 2 RXOR chains */
+ slot_cnt = descs_per_op * dst_cnt;
+
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
+ if (sw_desc) {
+ op = slot_cnt;
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
+ --op ? 0 : flags);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
+ len);
+ iter->unmap_len = len;
+
+ ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
+ iter->rxor_cursor.len = len;
+ iter->descs_per_op = descs_per_op;
+ }
+ op = 0;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ op++;
+ if (op % descs_per_op == 0)
+ ppc440spe_adma_init_dma2rxor_slot(iter, src,
+ src_cnt);
+ if (likely(!list_is_last(&iter->chain_node,
+ &sw_desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next =
+ list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ ppc440spe_xor_set_link(iter, iter->hw_next);
+ } else {
+ /* this is the last descriptor. */
+ iter->hw_next = NULL;
+ }
+ }
+
+ /* fixup head descriptor */
+ sw_desc->dst_cnt = dst_cnt;
+ if (flags & DMA_PREP_ZERO_P)
+ set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
+ if (flags & DMA_PREP_ZERO_Q)
+ set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
+
+ /* setup dst/src/mult */
+ ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
+
+ while (src_cnt--) {
+ /* handle descriptors (if dst_cnt == 2) inside
+ * the ppc440spe_adma_pq_set_srcxxx() functions
+ */
+ ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
+ src_cnt);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ mult = scf[src_cnt];
+ ppc440spe_adma_pq_set_src_mult(sw_desc,
+ mult, src_cnt, dst_cnt - 1);
+ }
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+ ppc440spe_desc_set_rxor_block_size(len);
+ return sw_desc;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
+ struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc = NULL;
+ int dst_cnt = 0;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
+ dst, src, src_cnt));
+ BUG_ON(!len);
+ BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(!src_cnt);
+
+ if (src_cnt == 1 && dst[1] == src[0]) {
+ dma_addr_t dest[2];
+
+ /* dst[1] is real destination (Q) */
+ dest[0] = dst[1];
+ /* this is the page to multicast source data to */
+ dest[1] = ppc440spe_chan->qdest;
+ sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
+ dest, 2, src, src_cnt, scf, len, flags);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+ }
+
+ if (src_cnt == 2 && dst[1] == src[1]) {
+ sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
+ &dst[1], src, 2, scf, len, flags);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+ }
+
+ if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
+ BUG_ON(!dst[0]);
+ dst_cnt++;
+ flags |= DMA_PREP_ZERO_P;
+ }
+
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
+ BUG_ON(!dst[1]);
+ dst_cnt++;
+ flags |= DMA_PREP_ZERO_Q;
+ }
+
+ BUG_ON(!dst_cnt);
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ ppc440spe_chan->device->id, __func__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ switch (ppc440spe_chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
+ dst, dst_cnt, src, src_cnt, scf,
+ len, flags);
+ break;
+
+ case PPC440SPE_XOR_ID:
+ sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
+ dst, dst_cnt, src, src_cnt, scf,
+ len, flags);
+ break;
+ }
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
+ * a PQ_ZERO_SUM operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
+ struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *sw_desc, *iter;
+ dma_addr_t pdest, qdest;
+ int slot_cnt, slots_per_op, idst, dst_cnt;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ pdest = 0;
+ else
+ pdest = pq[0];
+
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ qdest = 0;
+ else
+ qdest = pq[1];
+
+ ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
+ src, src_cnt, scf));
+
+ /* Always use WXOR for P/Q calculations (two destinations).
+ * Need 1 or 2 extra slots to verify results are zero.
+ */
+ idst = dst_cnt = (pdest && qdest) ? 2 : 1;
+
+ /* One additional slot per destination to clone P/Q
+ * before calculation (we have to preserve destinations).
+ */
+ slot_cnt = src_cnt + dst_cnt * 2;
+ slots_per_op = 1;
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
+
+ /* Setup byte count for each slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
+ len);
+ iter->unmap_len = len;
+ }
+
+ if (pdest) {
+ struct dma_cdb *hw_desc;
+ struct ppc440spe_adma_chan *chan;
+
+ iter = sw_desc->group_head;
+ chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter->src_cnt = 0;
+ iter->dst_cnt = 0;
+ ppc440spe_desc_set_dest_addr(iter, chan, 0,
+ ppc440spe_chan->pdest, 0);
+ ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
+ len);
+ iter->unmap_len = 0;
+ /* override pdest to preserve original P */
+ pdest = ppc440spe_chan->pdest;
+ }
+ if (qdest) {
+ struct dma_cdb *hw_desc;
+ struct ppc440spe_adma_chan *chan;
+
+ iter = list_first_entry(&sw_desc->group_list,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
+
+ if (pdest) {
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
+ iter->hw_next = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter->src_cnt = 0;
+ iter->dst_cnt = 0;
+ ppc440spe_desc_set_dest_addr(iter, chan, 0,
+ ppc440spe_chan->qdest, 0);
+ ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
+ ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
+ len);
+ iter->unmap_len = 0;
+ /* override qdest to preserve original Q */
+ qdest = ppc440spe_chan->qdest;
+ }
+
+ /* Setup destinations for P/Q ops */
+ ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
+
+ /* Setup zero QWORDs into DCHECK CDBs */
+ idst = dst_cnt;
+ list_for_each_entry_reverse(iter, &sw_desc->group_list,
+ chain_node) {
+ /*
+ * The last CDB corresponds to Q-parity check,
+ * the one before last CDB corresponds
+ * P-parity check
+ */
+ if (idst == DMA_DEST_MAX_NUM) {
+ if (idst == dst_cnt) {
+ set_bit(PPC440SPE_DESC_QCHECK,
+ &iter->flags);
+ } else {
+ set_bit(PPC440SPE_DESC_PCHECK,
+ &iter->flags);
+ }
+ } else {
+ if (qdest) {
+ set_bit(PPC440SPE_DESC_QCHECK,
+ &iter->flags);
+ } else {
+ set_bit(PPC440SPE_DESC_PCHECK,
+ &iter->flags);
+ }
+ }
+ iter->xor_check_result = pqres;
+
+ /*
+ * set it to zero, if check fail then result will
+ * be updated
+ */
+ *iter->xor_check_result = 0;
+ ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
+ ppc440spe_qword);
+
+ if (!(--dst_cnt))
+ break;
+ }
+
+ /* Setup sources and mults for P/Q ops */
+ list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
+ chain_node) {
+ struct ppc440spe_adma_chan *chan;
+ u32 mult_dst;
+
+ chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
+ ppc440spe_desc_set_src_addr(iter, chan, 0,
+ DMA_CUED_XOR_HB,
+ src[src_cnt - 1]);
+ if (qdest) {
+ mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
+ DMA_CDB_SG_DST1;
+ ppc440spe_desc_set_src_mult(iter, chan,
+ DMA_CUED_MULT1_OFF,
+ mult_dst,
+ scf[src_cnt - 1]);
+ }
+ if (!(--src_cnt))
+ break;
+ }
+ }
+ spin_unlock_bh(&ppc440spe_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
+ * XOR ZERO_SUM operation
+ */
+static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, enum sum_check_flags *result, unsigned long flags)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t pq[2];
+
+ /* validate P, disable Q */
+ pq[0] = src[0];
+ pq[1] = 0;
+ flags |= DMA_PREP_PQ_DISABLE_Q;
+
+ tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
+ src_cnt - 1, 0, len,
+ result, flags);
+ return tx;
+}
+
+/**
+ * ppc440spe_adma_set_dest - set destination address into descriptor
+ */
+static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t addr, int index)
+{
+ struct ppc440spe_adma_chan *chan;
+
+ BUG_ON(index >= sw_desc->dst_cnt);
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* to do: support transfers lengths >
+ * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
+ */
+ ppc440spe_desc_set_dest_addr(sw_desc->group_head,
+ chan, 0, addr, index);
+ break;
+ case PPC440SPE_XOR_ID:
+ sw_desc = ppc440spe_get_group_entry(sw_desc, index);
+ ppc440spe_desc_set_dest_addr(sw_desc,
+ chan, 0, addr, index);
+ break;
+ }
+}
+
+static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
+ struct ppc440spe_adma_chan *chan, dma_addr_t addr)
+{
+ /* To clear destinations update the descriptor
+ * (P or Q depending on index) as follows:
+ * addr is destination (0 corresponds to SG2):
+ */
+ ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
+
+ /* ... and the addr is source: */
+ ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
+
+ /* addr is always SG2 then the mult is always DST1 */
+ ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ DMA_CDB_SG_DST1, 1);
+}
+
+/**
+ * ppc440spe_adma_pq_set_dest - set destination address into descriptor
+ * for the PQXOR operation
+ */
+static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t *addrs, unsigned long flags)
+{
+ struct ppc440spe_adma_desc_slot *iter;
+ struct ppc440spe_adma_chan *chan;
+ dma_addr_t paddr, qaddr;
+ dma_addr_t addr = 0, ppath, qpath;
+ int index = 0, i;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ paddr = 0;
+ else
+ paddr = addrs[0];
+
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ qaddr = 0;
+ else
+ qaddr = addrs[1];
+
+ if (!paddr || !qaddr)
+ addr = paddr ? paddr : qaddr;
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot:
+ */
+ if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
+ /* This is WXOR-only chain; may have 1/2 zero descs */
+ if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
+ index++;
+ if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
+ index++;
+
+ iter = ppc440spe_get_group_entry(sw_desc, index);
+ if (addr) {
+ /* one destination */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list, chain_node)
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, addr, 0);
+ } else {
+ /* two destinations */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list, chain_node) {
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, paddr, 0);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, qaddr, 1);
+ }
+ }
+
+ if (index) {
+ /* To clear destinations update the descriptor
+ * (1st,2nd, or both depending on flags)
+ */
+ index = 0;
+ if (test_bit(PPC440SPE_ZERO_P,
+ &sw_desc->flags)) {
+ iter = ppc440spe_get_group_entry(
+ sw_desc, index++);
+ ppc440spe_adma_pq_zero_op(iter, chan,
+ paddr);
+ }
+
+ if (test_bit(PPC440SPE_ZERO_Q,
+ &sw_desc->flags)) {
+ iter = ppc440spe_get_group_entry(
+ sw_desc, index++);
+ ppc440spe_adma_pq_zero_op(iter, chan,
+ qaddr);
+ }
+
+ return;
+ }
+ } else {
+ /* This is RXOR-only or RXOR/WXOR mixed chain */
+
+ /* If we want to include destination into calculations,
+ * then make dest addresses cued with mult=1 (XOR).
+ */
+ ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+ qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ /* Setup destination(s) in RXOR slot(s) */
+ iter = ppc440spe_get_group_entry(sw_desc, index++);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ paddr ? ppath : qpath,
+ paddr ? paddr : qaddr, 0);
+ if (!addr) {
+ /* two destinations */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index++);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ qpath, qaddr, 0);
+ }
+
+ if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
+ /* Setup destination(s) in remaining WXOR
+ * slots
+ */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index);
+ if (addr) {
+ /* one destination */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list,
+ chain_node)
+ ppc440spe_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ addr, 0);
+
+ } else {
+ /* two destinations */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list,
+ chain_node) {
+ ppc440spe_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ paddr, 0);
+ ppc440spe_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ qaddr, 1);
+ }
+ }
+ }
+
+ }
+ break;
+
+ case PPC440SPE_XOR_ID:
+ /* DMA2 descriptors have only 1 destination, so there are
+ * two chains - one for each dest.
+ * If we want to include destination into calculations,
+ * then make dest addresses cued with mult=1 (XOR).
+ */
+ ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ iter = ppc440spe_get_group_entry(sw_desc, 0);
+ for (i = 0; i < sw_desc->descs_per_op; i++) {
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ paddr ? ppath : qpath,
+ paddr ? paddr : qaddr, 0);
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ if (!addr) {
+ /* Two destinations; setup Q here */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ for (i = 0; i < sw_desc->descs_per_op; i++) {
+ ppc440spe_desc_set_dest_addr(iter,
+ chan, qpath, qaddr, 0);
+ iter = list_entry(iter->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+ }
+
+ break;
+ }
+}
+
+/**
+ * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
+ * for the PQ_ZERO_SUM operation
+ */
+static void ppc440spe_adma_pqzero_sum_set_dest(
+ struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t paddr, dma_addr_t qaddr)
+{
+ struct ppc440spe_adma_desc_slot *iter, *end;
+ struct ppc440spe_adma_chan *chan;
+ dma_addr_t addr = 0;
+ int idx;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot
+ */
+ idx = (paddr && qaddr) ? 2 : 1;
+ /* set end */
+ list_for_each_entry_reverse(end, &sw_desc->group_list,
+ chain_node) {
+ if (!(--idx))
+ break;
+ }
+ /* set start */
+ idx = (paddr && qaddr) ? 2 : 1;
+ iter = ppc440spe_get_group_entry(sw_desc, idx);
+
+ if (paddr && qaddr) {
+ /* two destinations */
+ list_for_each_entry_from(iter, &sw_desc->group_list,
+ chain_node) {
+ if (unlikely(iter == end))
+ break;
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, paddr, 0);
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, qaddr, 1);
+ }
+ } else {
+ /* one destination */
+ addr = paddr ? paddr : qaddr;
+ list_for_each_entry_from(iter, &sw_desc->group_list,
+ chain_node) {
+ if (unlikely(iter == end))
+ break;
+ ppc440spe_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, addr, 0);
+ }
+ }
+
+ /* The remaining descriptors are DATACHECK. These have no need in
+ * destination. Actually, these destinations are used there
+ * as sources for check operation. So, set addr as source.
+ */
+ ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
+
+ if (!addr) {
+ end = list_entry(end->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
+ }
+}
+
+/**
+ * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
+ */
+static inline void ppc440spe_desc_set_xor_src_cnt(
+ struct ppc440spe_adma_desc_slot *desc,
+ int src_cnt)
+{
+ struct xor_cb *hw_desc = desc->hw_desc;
+
+ hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
+ hw_desc->cbc |= src_cnt;
+}
+
+/**
+ * ppc440spe_adma_pq_set_src - set source address into descriptor
+ */
+static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t addr, int index)
+{
+ struct ppc440spe_adma_chan *chan;
+ dma_addr_t haddr = 0;
+ struct ppc440spe_adma_desc_slot *iter = NULL;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
+ */
+ if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
+ /* RXOR-only or RXOR/WXOR operation */
+ int iskip = test_bit(PPC440SPE_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index == 0) {
+ /* 1st slot (RXOR) */
+ /* setup sources region (R1-2-3, R1-2-4,
+ * or R1-2-5)
+ */
+ if (test_bit(PPC440SPE_DESC_RXOR12,
+ &sw_desc->flags))
+ haddr = DMA_RXOR12 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC440SPE_DESC_RXOR123,
+ &sw_desc->flags))
+ haddr = DMA_RXOR123 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC440SPE_DESC_RXOR124,
+ &sw_desc->flags))
+ haddr = DMA_RXOR124 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC440SPE_DESC_RXOR125,
+ &sw_desc->flags))
+ haddr = DMA_RXOR125 <<
+ DMA_CUED_REGION_OFF;
+ else
+ BUG();
+ haddr |= DMA_CUED_XOR_BASE;
+ iter = ppc440spe_get_group_entry(sw_desc, 0);
+ } else if (index < iskip) {
+ /* 1st slot (RXOR)
+ * shall actually set source address only once
+ * instead of first <iskip>
+ */
+ iter = NULL;
+ } else {
+ /* 2nd/3d and next slots (WXOR);
+ * skip first slot with RXOR
+ */
+ haddr = DMA_CUED_XOR_HB;
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index - iskip + sw_desc->dst_cnt);
+ }
+ } else {
+ int znum = 0;
+
+ /* WXOR-only operation; skip first slots with
+ * zeroing destinations
+ */
+ if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
+ znum++;
+ if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
+ znum++;
+
+ haddr = DMA_CUED_XOR_HB;
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index + znum);
+ }
+
+ if (likely(iter)) {
+ ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
+
+ if (!index &&
+ test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
+ sw_desc->dst_cnt == 2) {
+ /* if we have two destinations for RXOR, then
+ * setup source in the second descr too
+ */
+ iter = ppc440spe_get_group_entry(sw_desc, 1);
+ ppc440spe_desc_set_src_addr(iter, chan, 0,
+ haddr, addr);
+ }
+ }
+ break;
+
+ case PPC440SPE_XOR_ID:
+ /* DMA2 may do Biskup */
+ iter = sw_desc->group_head;
+ if (iter->dst_cnt == 2) {
+ /* both P & Q calculations required; set P src here */
+ ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
+
+ /* this is for Q */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ }
+ ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
+ break;
+ }
+}
+
+/**
+ * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
+ */
+static void ppc440spe_adma_memcpy_xor_set_src(
+ struct ppc440spe_adma_desc_slot *sw_desc,
+ dma_addr_t addr, int index)
+{
+ struct ppc440spe_adma_chan *chan;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+ sw_desc = sw_desc->group_head;
+
+ if (likely(sw_desc))
+ ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
+}
+
+/**
+ * ppc440spe_adma_dma2rxor_inc_addr -
+ */
+static void ppc440spe_adma_dma2rxor_inc_addr(
+ struct ppc440spe_adma_desc_slot *desc,
+ struct ppc440spe_rxor *cursor, int index, int src_cnt)
+{
+ cursor->addr_count++;
+ if (index == src_cnt - 1) {
+ ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
+ } else if (cursor->addr_count == XOR_MAX_OPS) {
+ ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
+ cursor->addr_count = 0;
+ cursor->desc_count++;
+ }
+}
+
+/**
+ * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
+ */
+static int ppc440spe_adma_dma2rxor_prep_src(
+ struct ppc440spe_adma_desc_slot *hdesc,
+ struct ppc440spe_rxor *cursor, int index,
+ int src_cnt, u32 addr)
+{
+ int rval = 0;
+ u32 sign;
+ struct ppc440spe_adma_desc_slot *desc = hdesc;
+ int i;
+
+ for (i = 0; i < cursor->desc_count; i++) {
+ desc = list_entry(hdesc->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ }
+
+ switch (cursor->state) {
+ case 0:
+ if (addr == cursor->addrl + cursor->len) {
+ /* direct RXOR */
+ cursor->state = 1;
+ cursor->xor_count++;
+ if (index == src_cnt-1) {
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else if (cursor->addrl == addr + cursor->len) {
+ /* reverse RXOR */
+ cursor->state = 1;
+ cursor->xor_count++;
+ set_bit(cursor->addr_count, &desc->reverse_flags[0]);
+ if (index == src_cnt-1) {
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else {
+ printk(KERN_ERR "Cannot build "
+ "DMA2 RXOR command block.\n");
+ BUG();
+ }
+ break;
+ case 1:
+ sign = test_bit(cursor->addr_count,
+ desc->reverse_flags)
+ ? -1 : 1;
+ if (index == src_cnt-2 || (sign == -1
+ && addr != cursor->addrl - 2*cursor->len)) {
+ cursor->state = 0;
+ cursor->xor_count = 1;
+ cursor->addrl = addr;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ } else if (addr == cursor->addrl + 2*sign*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR123 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else if (addr == cursor->addrl + 3*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR124 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else if (addr == cursor->addrl + 4*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR125 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ } else {
+ cursor->state = 0;
+ cursor->xor_count = 1;
+ cursor->addrl = addr;
+ ppc440spe_rxor_set_region(desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ break;
+ case 2:
+ cursor->state = 0;
+ cursor->addrl = addr;
+ cursor->xor_count++;
+ if (index) {
+ ppc440spe_adma_dma2rxor_inc_addr(
+ desc, cursor, index, src_cnt);
+ }
+ break;
+ }
+
+ return rval;
+}
+
+/**
+ * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
+ * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
+ */
+static void ppc440spe_adma_dma2rxor_set_src(
+ struct ppc440spe_adma_desc_slot *desc,
+ int index, dma_addr_t addr)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+ int k = 0, op = 0, lop = 0;
+
+ /* get the RXOR operand which corresponds to index addr */
+ while (op <= index) {
+ lop = op;
+ if (k == XOR_MAX_OPS) {
+ k = 0;
+ desc = list_entry(desc->chain_node.next,
+ struct ppc440spe_adma_desc_slot, chain_node);
+ xcb = desc->hw_desc;
+
+ }
+ if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
+ (DMA_RXOR12 << DMA_CUED_REGION_OFF))
+ op += 2;
+ else
+ op += 3;
+ }
+
+ BUG_ON(k < 1);
+
+ if (test_bit(k-1, desc->reverse_flags)) {
+ /* reverse operand order; put last op in RXOR group */
+ if (index == op - 1)
+ ppc440spe_rxor_set_src(desc, k - 1, addr);
+ } else {
+ /* direct operand order; put first op in RXOR group */
+ if (index == lop)
+ ppc440spe_rxor_set_src(desc, k - 1, addr);
+ }
+}
+
+/**
+ * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
+ * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
+ */
+static void ppc440spe_adma_dma2rxor_set_mult(
+ struct ppc440spe_adma_desc_slot *desc,
+ int index, u8 mult)
+{
+ struct xor_cb *xcb = desc->hw_desc;
+ int k = 0, op = 0, lop = 0;
+
+ /* get the RXOR operand which corresponds to index mult */
+ while (op <= index) {
+ lop = op;
+ if (k == XOR_MAX_OPS) {
+ k = 0;
+ desc = list_entry(desc->chain_node.next,
+ struct ppc440spe_adma_desc_slot,
+ chain_node);
+ xcb = desc->hw_desc;
+
+ }
+ if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
+ (DMA_RXOR12 << DMA_CUED_REGION_OFF))
+ op += 2;
+ else
+ op += 3;
+ }
+
+ BUG_ON(k < 1);
+ if (test_bit(k-1, desc->reverse_flags)) {
+ /* reverse order */
+ ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
+ } else {
+ /* direct order */
+ ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
+ }
+}
+
+/**
+ * ppc440spe_init_rxor_cursor -
+ */
+static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
+{
+ memset(cursor, 0, sizeof(struct ppc440spe_rxor));
+ cursor->state = 2;
+}
+
+/**
+ * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
+ * descriptor for the PQXOR operation
+ */
+static void ppc440spe_adma_pq_set_src_mult(
+ struct ppc440spe_adma_desc_slot *sw_desc,
+ unsigned char mult, int index, int dst_pos)
+{
+ struct ppc440spe_adma_chan *chan;
+ u32 mult_idx, mult_dst;
+ struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
+
+ chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
+
+ switch (chan->device->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
+ int region = test_bit(PPC440SPE_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index < region) {
+ /* RXOR multipliers */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ sw_desc->dst_cnt - 1);
+ if (sw_desc->dst_cnt == 2)
+ iter1 = ppc440spe_get_group_entry(
+ sw_desc, 0);
+
+ mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
+ mult_dst = DMA_CDB_SG_SRC;
+ } else {
+ /* WXOR multiplier */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ index - region +
+ sw_desc->dst_cnt);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
+ DMA_CDB_SG_DST1;
+ }
+ } else {
+ int znum = 0;
+
+ /* WXOR-only;
+ * skip first slots with destinations (if ZERO_DST has
+ * place)
+ */
+ if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
+ znum++;
+ if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
+ znum++;
+
+ iter = ppc440spe_get_group_entry(sw_desc, index + znum);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
+ }
+
+ if (likely(iter)) {
+ ppc440spe_desc_set_src_mult(iter, chan,
+ mult_idx, mult_dst, mult);
+
+ if (unlikely(iter1)) {
+ /* if we have two destinations for RXOR, then
+ * we've just set Q mult. Set-up P now.
+ */
+ ppc440spe_desc_set_src_mult(iter1, chan,
+ mult_idx, mult_dst, 1);
+ }
+
+ }
+ break;
+
+ case PPC440SPE_XOR_ID:
+ iter = sw_desc->group_head;
+ if (sw_desc->dst_cnt == 2) {
+ /* both P & Q calculations required; set P mult here */
+ ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
+
+ /* and then set Q mult */
+ iter = ppc440spe_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ }
+ ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
+ break;
+ }
+}
+
+/**
+ * ppc440spe_adma_free_chan_resources - free the resources allocated
+ */
+static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ struct ppc440spe_adma_desc_slot *iter, *_iter;
+ int in_use_descs = 0;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ ppc440spe_adma_slot_cleanup(ppc440spe_chan);
+
+ spin_lock_bh(&ppc440spe_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe_reverse(iter, _iter,
+ &ppc440spe_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ ppc440spe_chan->slots_allocated--;
+ }
+ ppc440spe_chan->last_used = NULL;
+
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d %s slots_allocated %d\n",
+ ppc440spe_chan->device->id,
+ __func__, ppc440spe_chan->slots_allocated);
+ spin_unlock_bh(&ppc440spe_chan->lock);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+}
+
+/**
+ * ppc440spe_adma_is_complete - poll the status of an ADMA transaction
+ * @chan: ADMA channel handle
+ * @cookie: ADMA transaction identifier
+ */
+static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ last_used = chan->cookie;
+ last_complete = ppc440spe_chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ ppc440spe_adma_slot_cleanup(ppc440spe_chan);
+
+ last_used = chan->cookie;
+ last_complete = ppc440spe_chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/**
+ * ppc440spe_adma_eot_handler - end of transfer interrupt handler
+ */
+static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
+{
+ struct ppc440spe_adma_chan *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc440spe adma%d: %s\n", chan->device->id, __func__);
+
+ tasklet_schedule(&chan->irq_tasklet);
+ ppc440spe_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ppc440spe_adma_err_handler - DMA error interrupt handler;
+ * do the same things as a eot handler
+ */
+static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
+{
+ struct ppc440spe_adma_chan *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc440spe adma%d: %s\n", chan->device->id, __func__);
+
+ tasklet_schedule(&chan->irq_tasklet);
+ ppc440spe_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ppc440spe_test_callback - called when test operation has been done
+ */
+static void ppc440spe_test_callback(void *unused)
+{
+ complete(&ppc440spe_r6_test_comp);
+}
+
+/**
+ * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
+ */
+static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
+{
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ dev_dbg(ppc440spe_chan->device->common.dev,
+ "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
+ __func__, ppc440spe_chan->pending);
+
+ if (ppc440spe_chan->pending) {
+ ppc440spe_chan->pending = 0;
+ ppc440spe_chan_append(ppc440spe_chan);
+ }
+}
+
+/**
+ * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
+ * use FIFOs (as opposite to chains used in XOR) so this is a XOR
+ * specific operation)
+ */
+static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
+ dma_cookie_t cookie;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc440spe adma%d: %s\n", chan->device->id, __func__);
+
+ spin_lock_bh(&chan->lock);
+ slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
+ sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ async_tx_ack(&sw_desc->async_tx);
+ ppc440spe_desc_init_null_xor(group_start);
+
+ cookie = chan->common.cookie;
+ cookie++;
+ if (cookie <= 1)
+ cookie = 2;
+
+ /* initialize the completed cookie to be less than
+ * the most recently used cookie
+ */
+ chan->completed_cookie = cookie - 1;
+ chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+
+ /* channel should not be busy */
+ BUG_ON(ppc440spe_chan_is_busy(chan));
+
+ /* set the descriptor address */
+ ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
+
+ /* run the descriptor */
+ ppc440spe_chan_run(chan);
+ } else
+ printk(KERN_ERR "ppc440spe adma%d"
+ " failed to allocate null descriptor\n",
+ chan->device->id);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
+ * For this we just perform one WXOR operation with the same source
+ * and destination addresses, the GF-multiplier is 1; so if RAID-6
+ * capabilities are enabled then we'll get src/dst filled with zero.
+ */
+static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
+{
+ struct ppc440spe_adma_desc_slot *sw_desc, *iter;
+ struct page *pg;
+ char *a;
+ dma_addr_t dma_addr, addrs[2];
+ unsigned long op = 0;
+ int rval = 0;
+
+ set_bit(PPC440SPE_DESC_WXOR, &op);
+
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ spin_lock_bh(&chan->lock);
+ sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
+ if (sw_desc) {
+ /* 1 src, 1 dsr, int_ena, WXOR */
+ ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
+ iter->unmap_len = PAGE_SIZE;
+ }
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ /* Fill the test page with ones */
+ memset(page_address(pg), 0xFF, PAGE_SIZE);
+ dma_addr = dma_map_page(chan->device->dev, pg, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ /* Setup addresses */
+ ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
+ ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
+ addrs[0] = dma_addr;
+ addrs[1] = 0;
+ ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
+
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = ppc440spe_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&ppc440spe_r6_test_comp);
+
+ ppc440spe_adma_tx_submit(&sw_desc->async_tx);
+ ppc440spe_adma_issue_pending(&chan->common);
+
+ wait_for_completion(&ppc440spe_r6_test_comp);
+
+ /* Now check if the test page is zeroed */
+ a = page_address(pg);
+ if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
+ /* page is zero - RAID-6 enabled */
+ rval = 0;
+ } else {
+ /* RAID-6 was not enabled */
+ rval = -EINVAL;
+ }
+exit:
+ __free_page(pg);
+ return rval;
+}
+
+static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
+{
+ switch (adev->id) {
+ case PPC440SPE_DMA0_ID:
+ case PPC440SPE_DMA1_ID:
+ dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
+ dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
+ dma_cap_set(DMA_PQ, adev->common.cap_mask);
+ dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
+ dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
+ break;
+ case PPC440SPE_XOR_ID:
+ dma_cap_set(DMA_XOR, adev->common.cap_mask);
+ dma_cap_set(DMA_PQ, adev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
+ adev->common.cap_mask = adev->common.cap_mask;
+ break;
+ }
+
+ /* Set base routines */
+ adev->common.device_alloc_chan_resources =
+ ppc440spe_adma_alloc_chan_resources;
+ adev->common.device_free_chan_resources =
+ ppc440spe_adma_free_chan_resources;
+ adev->common.device_is_tx_complete = ppc440spe_adma_is_complete;
+ adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
+
+ /* Set prep routines based on capability */
+ if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memcpy =
+ ppc440spe_adma_prep_dma_memcpy;
+ }
+ if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memset =
+ ppc440spe_adma_prep_dma_memset;
+ }
+ if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
+ adev->common.max_xor = XOR_MAX_OPS;
+ adev->common.device_prep_dma_xor =
+ ppc440spe_adma_prep_dma_xor;
+ }
+ if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
+ switch (adev->id) {
+ case PPC440SPE_DMA0_ID:
+ dma_set_maxpq(&adev->common,
+ DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
+ break;
+ case PPC440SPE_DMA1_ID:
+ dma_set_maxpq(&adev->common,
+ DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
+ break;
+ case PPC440SPE_XOR_ID:
+ adev->common.max_pq = XOR_MAX_OPS * 3;
+ break;
+ }
+ adev->common.device_prep_dma_pq =
+ ppc440spe_adma_prep_dma_pq;
+ }
+ if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
+ switch (adev->id) {
+ case PPC440SPE_DMA0_ID:
+ adev->common.max_pq = DMA0_FIFO_SIZE /
+ sizeof(struct dma_cdb);
+ break;
+ case PPC440SPE_DMA1_ID:
+ adev->common.max_pq = DMA1_FIFO_SIZE /
+ sizeof(struct dma_cdb);
+ break;
+ }
+ adev->common.device_prep_dma_pq_val =
+ ppc440spe_adma_prep_dma_pqzero_sum;
+ }
+ if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
+ switch (adev->id) {
+ case PPC440SPE_DMA0_ID:
+ adev->common.max_xor = DMA0_FIFO_SIZE /
+ sizeof(struct dma_cdb);
+ break;
+ case PPC440SPE_DMA1_ID:
+ adev->common.max_xor = DMA1_FIFO_SIZE /
+ sizeof(struct dma_cdb);
+ break;
+ }
+ adev->common.device_prep_dma_xor_val =
+ ppc440spe_adma_prep_dma_xor_zero_sum;
+ }
+ if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_interrupt =
+ ppc440spe_adma_prep_dma_interrupt;
+ }
+ pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
+ "( %s%s%s%s%s%s%s)\n",
+ dev_name(adev->dev),
+ dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
+ dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
+ dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
+ dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
+ dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
+}
+
+static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
+ struct ppc440spe_adma_chan *chan,
+ int *initcode)
+{
+ struct device_node *np;
+ int ret;
+
+ np = container_of(adev->dev, struct of_device, dev)->node;
+ if (adev->id != PPC440SPE_XOR_ID) {
+ adev->err_irq = irq_of_parse_and_map(np, 1);
+ if (adev->err_irq == NO_IRQ) {
+ dev_warn(adev->dev, "no err irq resource?\n");
+ *initcode = PPC_ADMA_INIT_IRQ2;
+ adev->err_irq = -ENXIO;
+ } else
+ atomic_inc(&ppc440spe_adma_err_irq_ref);
+ } else {
+ adev->err_irq = -ENXIO;
+ }
+
+ adev->irq = irq_of_parse_and_map(np, 0);
+ if (adev->irq == NO_IRQ) {
+ dev_err(adev->dev, "no irq resource\n");
+ *initcode = PPC_ADMA_INIT_IRQ1;
+ ret = -ENXIO;
+ goto err_irq_map;
+ }
+ dev_dbg(adev->dev, "irq %d, err irq %d\n",
+ adev->irq, adev->err_irq);
+
+ ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
+ 0, dev_driver_string(adev->dev), chan);
+ if (ret) {
+ dev_err(adev->dev, "can't request irq %d\n",
+ adev->irq);
+ *initcode = PPC_ADMA_INIT_IRQ1;
+ ret = -EIO;
+ goto err_req1;
+ }
+
+ /* only DMA engines have a separate error IRQ
+ * so it's Ok if err_irq < 0 in XOR engine case.
+ */
+ if (adev->err_irq > 0) {
+ /* both DMA engines share common error IRQ */
+ ret = request_irq(adev->err_irq,
+ ppc440spe_adma_err_handler,
+ IRQF_SHARED,
+ dev_driver_string(adev->dev),
+ chan);
+ if (ret) {
+ dev_err(adev->dev, "can't request irq %d\n",
+ adev->err_irq);
+ *initcode = PPC_ADMA_INIT_IRQ2;
+ ret = -EIO;
+ goto err_req2;
+ }
+ }
+
+ if (adev->id == PPC440SPE_XOR_ID) {
+ /* enable XOR engine interrupts */
+ iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
+ XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
+ &adev->xor_reg->ier);
+ } else {
+ u32 mask, enable;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
+ if (!np) {
+ pr_err("%s: can't find I2O device tree node\n",
+ __func__);
+ ret = -ENODEV;
+ goto err_req2;
+ }
+ adev->i2o_reg = of_iomap(np, 0);
+ if (!adev->i2o_reg) {
+ pr_err("%s: failed to map I2O registers\n", __func__);
+ of_node_put(np);
+ ret = -EINVAL;
+ goto err_req2;
+ }
+ of_node_put(np);
+ /* Unmask 'CS FIFO Attention' interrupts and
+ * enable generating interrupts on errors
+ */
+ enable = (adev->id == PPC440SPE_DMA0_ID) ?
+ ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
+ ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
+ mask = ioread32(&adev->i2o_reg->iopim) & enable;
+ iowrite32(mask, &adev->i2o_reg->iopim);
+ }
+ return 0;
+
+err_req2:
+ free_irq(adev->irq, chan);
+err_req1:
+ irq_dispose_mapping(adev->irq);
+err_irq_map:
+ if (adev->err_irq > 0) {
+ if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
+ irq_dispose_mapping(adev->err_irq);
+ }
+ return ret;
+}
+
+static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
+ struct ppc440spe_adma_chan *chan)
+{
+ u32 mask, disable;
+
+ if (adev->id == PPC440SPE_XOR_ID) {
+ /* disable XOR engine interrupts */
+ mask = ioread32be(&adev->xor_reg->ier);
+ mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
+ XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
+ iowrite32be(mask, &adev->xor_reg->ier);
+ } else {
+ /* disable DMAx engine interrupts */
+ disable = (adev->id == PPC440SPE_DMA0_ID) ?
+ (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
+ (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
+ mask = ioread32(&adev->i2o_reg->iopim) | disable;
+ iowrite32(mask, &adev->i2o_reg->iopim);
+ }
+ free_irq(adev->irq, chan);
+ irq_dispose_mapping(adev->irq);
+ if (adev->err_irq > 0) {
+ free_irq(adev->err_irq, chan);
+ if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
+ irq_dispose_mapping(adev->err_irq);
+ iounmap(adev->i2o_reg);
+ }
+ }
+}
+
+/**
+ * ppc440spe_adma_probe - probe the asynch device
+ */
+static int __devinit ppc440spe_adma_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct device_node *np = ofdev->node;
+ struct resource res;
+ struct ppc440spe_adma_device *adev;
+ struct ppc440spe_adma_chan *chan;
+ struct ppc_dma_chan_ref *ref, *_ref;
+ int ret = 0, initcode = PPC_ADMA_INIT_OK;
+ const u32 *idx;
+ int len;
+ void *regs;
+ u32 id, pool_size;
+
+ if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
+ id = PPC440SPE_XOR_ID;
+ /* As far as the XOR engine is concerned, it does not
+ * use FIFOs but uses linked list. So there is no dependency
+ * between pool size to allocate and the engine configuration.
+ */
+ pool_size = PAGE_SIZE << 1;
+ } else {
+ /* it is DMA0 or DMA1 */
+ idx = of_get_property(np, "cell-index", &len);
+ if (!idx || (len != sizeof(u32))) {
+ dev_err(&ofdev->dev, "Device node %s has missing "
+ "or invalid cell-index property\n",
+ np->full_name);
+ return -EINVAL;
+ }
+ id = *idx;
+ /* DMA0,1 engines use FIFO to maintain CDBs, so we
+ * should allocate the pool accordingly to size of this
+ * FIFO. Thus, the pool size depends on the FIFO depth:
+ * how much CDBs pointers the FIFO may contain then so
+ * much CDBs we should provide in the pool.
+ * That is
+ * CDB size = 32B;
+ * CDBs number = (DMA0_FIFO_SIZE >> 3);
+ * Pool size = CDBs number * CDB size =
+ * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
+ */
+ pool_size = (id == PPC440SPE_DMA0_ID) ?
+ DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
+ pool_size <<= 2;
+ }
+
+ if (of_address_to_resource(np, 0, &res)) {
+ dev_err(&ofdev->dev, "failed to get memory resource\n");
+ initcode = PPC_ADMA_INIT_MEMRES;
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ dev_driver_string(&ofdev->dev))) {
+ dev_err(&ofdev->dev, "failed to request memory region "
+ "(0x%016llx-0x%016llx)\n",
+ (u64)res.start, (u64)res.end);
+ initcode = PPC_ADMA_INIT_MEMREG;
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* create a device */
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev) {
+ dev_err(&ofdev->dev, "failed to allocate device\n");
+ initcode = PPC_ADMA_INIT_ALLOC;
+ ret = -ENOMEM;
+ goto err_adev_alloc;
+ }
+
+ adev->id = id;
+ adev->pool_size = pool_size;
+ /* allocate coherent memory for hardware descriptors */
+ adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
+ adev->pool_size, &adev->dma_desc_pool,
+ GFP_KERNEL);
+ if (adev->dma_desc_pool_virt == NULL) {
+ dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
+ "memory for hardware descriptors\n",
+ adev->pool_size);
+ initcode = PPC_ADMA_INIT_COHERENT;
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+ dev_dbg(&ofdev->dev, "allocted descriptor pool virt 0x%p phys 0x%llx\n",
+ adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
+
+ regs = ioremap(res.start, resource_size(&res));
+ if (!regs) {
+ dev_err(&ofdev->dev, "failed to ioremap regs!\n");
+ goto err_regs_alloc;
+ }
+
+ if (adev->id == PPC440SPE_XOR_ID) {
+ adev->xor_reg = regs;
+ /* Reset XOR */
+ iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
+ iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
+ } else {
+ size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
+ DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
+ adev->dma_reg = regs;
+ /* DMAx_FIFO_SIZE is defined in bytes,
+ * <fsiz> - is defined in number of CDB pointers (8byte).
+ * DMA FIFO Length = CSlength + CPlength, where
+ * CSlength = CPlength = (fsiz + 1) * 8.
+ */
+ iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
+ &adev->dma_reg->fsiz);
+ /* Configure DMA engine */
+ iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
+ &adev->dma_reg->cfg);
+ /* Clear Status */
+ iowrite32(~0, &adev->dma_reg->dsts);
+ }
+
+ adev->dev = &ofdev->dev;
+ adev->common.dev = &ofdev->dev;
+ INIT_LIST_HEAD(&adev->common.channels);
+ dev_set_drvdata(&ofdev->dev, adev);
+
+ /* create a channel */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(&ofdev->dev, "can't allocate channel structure\n");
+ initcode = PPC_ADMA_INIT_CHANNEL;
+ ret = -ENOMEM;
+ goto err_chan_alloc;
+ }
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->chain);
+ INIT_LIST_HEAD(&chan->all_slots);
+ chan->device = adev;
+ chan->common.device = &adev->common;
+ list_add_tail(&chan->common.device_node, &adev->common.channels);
+ tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
+ (unsigned long)chan);
+
+ /* allocate and map helper pages for async validation or
+ * async_mult/async_sum_product operations on DMA0/1.
+ */
+ if (adev->id != PPC440SPE_XOR_ID) {
+ chan->pdest_page = alloc_page(GFP_KERNEL);
+ chan->qdest_page = alloc_page(GFP_KERNEL);
+ if (!chan->pdest_page ||
+ !chan->qdest_page) {
+ if (chan->pdest_page)
+ __free_page(chan->pdest_page);
+ if (chan->qdest_page)
+ __free_page(chan->qdest_page);
+ ret = -ENOMEM;
+ goto err_page_alloc;
+ }
+ chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ if (ref) {
+ ref->chan = &chan->common;
+ INIT_LIST_HEAD(&ref->node);
+ list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
+ } else {
+ dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
+ ret = -ENOMEM;
+ goto err_ref_alloc;
+ }
+
+ ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
+ if (ret)
+ goto err_irq;
+
+ ppc440spe_adma_init_capabilities(adev);
+
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ initcode = PPC_ADMA_INIT_REGISTER;
+ dev_err(&ofdev->dev, "failed to register dma device\n");
+ goto err_dev_reg;
+ }
+
+ goto out;
+
+err_dev_reg:
+ ppc440spe_adma_release_irqs(adev, chan);
+err_irq:
+ list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
+ if (chan == to_ppc440spe_adma_chan(ref->chan)) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+ }
+err_ref_alloc:
+ if (adev->id != PPC440SPE_XOR_ID) {
+ dma_unmap_page(&ofdev->dev, chan->pdest,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(&ofdev->dev, chan->qdest,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(chan->pdest_page);
+ __free_page(chan->qdest_page);
+ }
+err_page_alloc:
+ kfree(chan);
+err_chan_alloc:
+ if (adev->id == PPC440SPE_XOR_ID)
+ iounmap(adev->xor_reg);
+ else
+ iounmap(adev->dma_reg);
+err_regs_alloc:
+ dma_free_coherent(adev->dev, adev->pool_size,
+ adev->dma_desc_pool_virt,
+ adev->dma_desc_pool);
+err_dma_alloc:
+ kfree(adev);
+err_adev_alloc:
+ release_mem_region(res.start, resource_size(&res));
+out:
+ if (id < PPC440SPE_ADMA_ENGINES_NUM)
+ ppc440spe_adma_devices[id] = initcode;
+
+ return ret;
+}
+
+/**
+ * ppc440spe_adma_remove - remove the asynch device
+ */
+static int __devexit ppc440spe_adma_remove(struct of_device *ofdev)
+{
+ struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev);
+ struct device_node *np = ofdev->node;
+ struct resource res;
+ struct dma_chan *chan, *_chan;
+ struct ppc_dma_chan_ref *ref, *_ref;
+ struct ppc440spe_adma_chan *ppc440spe_chan;
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+ if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
+ ppc440spe_adma_devices[adev->id] = -1;
+
+ dma_async_device_unregister(&adev->common);
+
+ list_for_each_entry_safe(chan, _chan, &adev->common.channels,
+ device_node) {
+ ppc440spe_chan = to_ppc440spe_adma_chan(chan);
+ ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
+ tasklet_kill(&ppc440spe_chan->irq_tasklet);
+ if (adev->id != PPC440SPE_XOR_ID) {
+ dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(ppc440spe_chan->pdest_page);
+ __free_page(ppc440spe_chan->qdest_page);
+ }
+ list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
+ node) {
+ if (ppc440spe_chan ==
+ to_ppc440spe_adma_chan(ref->chan)) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+ }
+ list_del(&chan->device_node);
+ kfree(ppc440spe_chan);
+ }
+
+ dma_free_coherent(adev->dev, adev->pool_size,
+ adev->dma_desc_pool_virt, adev->dma_desc_pool);
+ if (adev->id == PPC440SPE_XOR_ID)
+ iounmap(adev->xor_reg);
+ else
+ iounmap(adev->dma_reg);
+ of_address_to_resource(np, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+ kfree(adev);
+ return 0;
+}
+
+/*
+ * /sys driver interface to enable h/w RAID-6 capabilities
+ * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
+ * directory are "devices", "enable" and "poly".
+ * "devices" shows available engines.
+ * "enable" is used to enable RAID-6 capabilities or to check
+ * whether these has been activated.
+ * "poly" allows setting/checking used polynomial (for PPC440SPe only).
+ */
+
+static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf)
+{
+ ssize_t size = 0;
+ int i;
+
+ for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
+ if (ppc440spe_adma_devices[i] == -1)
+ continue;
+ size += snprintf(buf + size, PAGE_SIZE - size,
+ "PPC440SP(E)-ADMA.%d: %s\n", i,
+ ppc_adma_errors[ppc440spe_adma_devices[i]]);
+ }
+ return size;
+}
+
+static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE,
+ "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
+ ppc440spe_r6_enabled ? "EN" : "DIS");
+}
+
+static ssize_t store_ppc440spe_r6enable(struct device_driver *dev,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (!count || count > 11)
+ return -EINVAL;
+
+ if (!ppc440spe_r6_tchan)
+ return -EFAULT;
+
+ /* Write a key */
+ sscanf(buf, "%lx", &val);
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
+ isync();
+
+ /* Verify whether it really works now */
+ if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
+ pr_info("PPC440SP(e) RAID-6 has been activated "
+ "successfully\n");
+ ppc440spe_r6_enabled = 1;
+ } else {
+ pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
+ " Error key ?\n");
+ ppc440spe_r6_enabled = 0;
+ }
+ return count;
+}
+
+static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf)
+{
+ ssize_t size = 0;
+ u32 reg;
+
+#ifdef CONFIG_440SP
+ /* 440SP has fixed polynomial */
+ reg = 0x4d;
+#else
+ reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
+ reg >>= MQ0_CFBHL_POLY;
+ reg &= 0xFF;
+#endif
+
+ size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
+ "uses 0x1%02x polynomial.\n", reg);
+ return size;
+}
+
+static ssize_t store_ppc440spe_r6poly(struct device_driver *dev,
+ const char *buf, size_t count)
+{
+ unsigned long reg, val;
+
+#ifdef CONFIG_440SP
+ /* 440SP uses default 0x14D polynomial only */
+ return -EINVAL;
+#endif
+
+ if (!count || count > 6)
+ return -EINVAL;
+
+ /* e.g., 0x14D or 0x11D */
+ sscanf(buf, "%lx", &val);
+
+ if (val & ~0x1FF)
+ return -EINVAL;
+
+ val &= 0xFF;
+ reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
+ reg &= ~(0xFF << MQ0_CFBHL_POLY);
+ reg |= val << MQ0_CFBHL_POLY;
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
+
+ return count;
+}
+
+static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL);
+static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable,
+ store_ppc440spe_r6enable);
+static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly,
+ store_ppc440spe_r6poly);
+
+/*
+ * Common initialisation for RAID engines; allocate memory for
+ * DMAx FIFOs, perform configuration common for all DMA engines.
+ * Further DMA engine specific configuration is done at probe time.
+ */
+static int ppc440spe_configure_raid_devices(void)
+{
+ struct device_node *np;
+ struct resource i2o_res;
+ struct i2o_regs __iomem *i2o_reg;
+ dcr_host_t i2o_dcr_host;
+ unsigned int dcr_base, dcr_len;
+ int i, ret;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
+ if (!np) {
+ pr_err("%s: can't find I2O device tree node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(np, 0, &i2o_res)) {
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ i2o_reg = of_iomap(np, 0);
+ if (!i2o_reg) {
+ pr_err("%s: failed to map I2O registers\n", __func__);
+ of_node_put(np);
+ return -EINVAL;
+ }
+
+ /* Get I2O DCRs base */
+ dcr_base = dcr_resource_start(np, 0);
+ dcr_len = dcr_resource_len(np, 0);
+ if (!dcr_base && !dcr_len) {
+ pr_err("%s: can't get DCR registers base/len!\n",
+ np->full_name);
+ of_node_put(np);
+ iounmap(i2o_reg);
+ return -ENODEV;
+ }
+
+ i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
+ if (!DCR_MAP_OK(i2o_dcr_host)) {
+ pr_err("%s: failed to map DCRs!\n", np->full_name);
+ of_node_put(np);
+ iounmap(i2o_reg);
+ return -ENODEV;
+ }
+ of_node_put(np);
+
+ /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
+ * the base address of FIFO memory space.
+ * Actually we need twice more physical memory than programmed in the
+ * <fsiz> register (because there are two FIFOs for each DMA: CP and CS)
+ */
+ ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
+ GFP_KERNEL);
+ if (!ppc440spe_dma_fifo_buf) {
+ pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
+ iounmap(i2o_reg);
+ dcr_unmap(i2o_dcr_host, dcr_len);
+ return -ENOMEM;
+ }
+
+ /*
+ * Configure h/w
+ */
+ /* Reset I2O/DMA */
+ mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
+ mtdcri(SDR0, DCRN_SDR0_SRST, 0);
+
+ /* Setup the base address of mmaped registers */
+ dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
+ dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
+ I2O_REG_ENABLE);
+ dcr_unmap(i2o_dcr_host, dcr_len);
+
+ /* Setup FIFO memory space base address */
+ iowrite32(0, &i2o_reg->ifbah);
+ iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
+
+ /* set zero FIFO size for I2O, so the whole
+ * ppc440spe_dma_fifo_buf is used by DMAs.
+ * DMAx_FIFOs will be configured while probe.
+ */
+ iowrite32(0, &i2o_reg->ifsiz);
+ iounmap(i2o_reg);
+
+ /* To prepare WXOR/RXOR functionality we need access to
+ * Memory Queue Module DCRs (finally it will be enabled
+ * via /sys interface of the ppc440spe ADMA driver).
+ */
+ np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
+ if (!np) {
+ pr_err("%s: can't find MQ device tree node\n",
+ __func__);
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ /* Get MQ DCRs base */
+ dcr_base = dcr_resource_start(np, 0);
+ dcr_len = dcr_resource_len(np, 0);
+ if (!dcr_base && !dcr_len) {
+ pr_err("%s: can't get DCR registers base/len!\n",
+ np->full_name);
+ ret = -ENODEV;
+ goto out_mq;
+ }
+
+ ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
+ if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
+ pr_err("%s: failed to map DCRs!\n", np->full_name);
+ ret = -ENODEV;
+ goto out_mq;
+ }
+ of_node_put(np);
+ ppc440spe_mq_dcr_len = dcr_len;
+
+ /* Set HB alias */
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
+
+ /* Set:
+ * - LL transaction passing limit to 1;
+ * - Memory controller cycle limit to 1;
+ * - Galois Polynomial to 0x14d (default)
+ */
+ dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
+ (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
+ (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
+
+ atomic_set(&ppc440spe_adma_err_irq_ref, 0);
+ for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
+ ppc440spe_adma_devices[i] = -1;
+
+ return 0;
+
+out_mq:
+ of_node_put(np);
+out_free:
+ kfree(ppc440spe_dma_fifo_buf);
+ return ret;
+}
+
+static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = {
+ { .compatible = "ibm,dma-440spe", },
+ { .compatible = "amcc,xor-accelerator", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
+
+static struct of_platform_driver ppc440spe_adma_driver = {
+ .match_table = ppc440spe_adma_of_match,
+ .probe = ppc440spe_adma_probe,
+ .remove = __devexit_p(ppc440spe_adma_remove),
+ .driver = {
+ .name = "PPC440SP(E)-ADMA",
+ .owner = THIS_MODULE,
+ },
+};
+
+static __init int ppc440spe_adma_init(void)
+{
+ int ret;
+
+ ret = ppc440spe_configure_raid_devices();
+ if (ret)
+ return ret;
+
+ ret = of_register_platform_driver(&ppc440spe_adma_driver);
+ if (ret) {
+ pr_err("%s: failed to register platform driver\n",
+ __func__);
+ goto out_reg;
+ }
+
+ /* Initialization status */
+ ret = driver_create_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_devices);
+ if (ret)
+ goto out_dev;
+
+ /* RAID-6 h/w enable entry */
+ ret = driver_create_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_enable);
+ if (ret)
+ goto out_en;
+
+ /* GF polynomial to use */
+ ret = driver_create_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_poly);
+ if (!ret)
+ return ret;
+
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_enable);
+out_en:
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_devices);
+out_dev:
+ /* User will not be able to enable h/w RAID-6 */
+ pr_err("%s: failed to create RAID-6 driver interface\n",
+ __func__);
+ of_unregister_platform_driver(&ppc440spe_adma_driver);
+out_reg:
+ dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
+ kfree(ppc440spe_dma_fifo_buf);
+ return ret;
+}
+
+static void __exit ppc440spe_adma_exit(void)
+{
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_poly);
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_enable);
+ driver_remove_file(&ppc440spe_adma_driver.driver,
+ &driver_attr_devices);
+ of_unregister_platform_driver(&ppc440spe_adma_driver);
+ dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
+ kfree(ppc440spe_dma_fifo_buf);
+}
+
+arch_initcall(ppc440spe_adma_init);
+module_exit(ppc440spe_adma_exit);
+
+MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
+MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h
new file mode 100644
index 00000000000..8ada5a812e3
--- /dev/null
+++ b/drivers/dma/ppc4xx/adma.h
@@ -0,0 +1,195 @@
+/*
+ * 2006-2009 (C) DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ */
+
+#ifndef _PPC440SPE_ADMA_H
+#define _PPC440SPE_ADMA_H
+
+#include <linux/types.h>
+#include "dma.h"
+#include "xor.h"
+
+#define to_ppc440spe_adma_chan(chan) \
+ container_of(chan, struct ppc440spe_adma_chan, common)
+#define to_ppc440spe_adma_device(dev) \
+ container_of(dev, struct ppc440spe_adma_device, common)
+#define tx_to_ppc440spe_adma_slot(tx) \
+ container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
+
+/* Default polynomial (for 440SP is only available) */
+#define PPC440SPE_DEFAULT_POLY 0x4d
+
+#define PPC440SPE_ADMA_ENGINES_NUM (XOR_ENGINES_NUM + DMA_ENGINES_NUM)
+
+#define PPC440SPE_ADMA_WATCHDOG_MSEC 3
+#define PPC440SPE_ADMA_THRESHOLD 1
+
+#define PPC440SPE_DMA0_ID 0
+#define PPC440SPE_DMA1_ID 1
+#define PPC440SPE_XOR_ID 2
+
+#define PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT 0xFFFFFFUL
+/* this is the XOR_CBBCR width */
+#define PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT (1 << 31)
+#define PPC440SPE_ADMA_ZERO_SUM_MAX_BYTE_COUNT PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
+
+#define PPC440SPE_RXOR_RUN 0
+
+#define MQ0_CF2H_RXOR_BS_MASK 0x1FF
+
+#undef ADMA_LL_DEBUG
+
+/**
+ * struct ppc440spe_adma_device - internal representation of an ADMA device
+ * @dev: device
+ * @dma_reg: base for DMAx register access
+ * @xor_reg: base for XOR register access
+ * @i2o_reg: base for I2O register access
+ * @id: HW ADMA Device selector
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @pool_size: size of the pool
+ * @irq: DMAx or XOR irq number
+ * @err_irq: DMAx error irq number
+ * @common: embedded struct dma_device
+ */
+struct ppc440spe_adma_device {
+ struct device *dev;
+ struct dma_regs __iomem *dma_reg;
+ struct xor_regs __iomem *xor_reg;
+ struct i2o_regs __iomem *i2o_reg;
+ int id;
+ void *dma_desc_pool_virt;
+ dma_addr_t dma_desc_pool;
+ size_t pool_size;
+ int irq;
+ int err_irq;
+ struct dma_device common;
+};
+
+/**
+ * struct ppc440spe_adma_chan - internal representation of an ADMA channel
+ * @lock: serializes enqueue/dequeue operations to the slot pool
+ * @device: parent device
+ * @chain: device chain view of the descriptors
+ * @common: common dmaengine channel object members
+ * @all_slots: complete domain of slots usable by the channel
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @hw_chain_inited: h/w descriptor chain initialization flag
+ * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
+ * @needs_unmap: if buffers should not be unmapped upon final processing
+ * @pdest_page: P destination page for async validate operation
+ * @qdest_page: Q destination page for async validate operation
+ * @pdest: P dma addr for async validate operation
+ * @qdest: Q dma addr for async validate operation
+ */
+struct ppc440spe_adma_chan {
+ spinlock_t lock;
+ struct ppc440spe_adma_device *device;
+ struct list_head chain;
+ struct dma_chan common;
+ struct list_head all_slots;
+ struct ppc440spe_adma_desc_slot *last_used;
+ int pending;
+ dma_cookie_t completed_cookie;
+ int slots_allocated;
+ int hw_chain_inited;
+ struct tasklet_struct irq_tasklet;
+ u8 needs_unmap;
+ struct page *pdest_page;
+ struct page *qdest_page;
+ dma_addr_t pdest;
+ dma_addr_t qdest;
+};
+
+struct ppc440spe_rxor {
+ u32 addrl;
+ u32 addrh;
+ int len;
+ int xor_count;
+ int addr_count;
+ int desc_count;
+ int state;
+};
+
+/**
+ * struct ppc440spe_adma_desc_slot - PPC440SPE-ADMA software descriptor
+ * @phys: hardware address of the hardware descriptor chain
+ * @group_head: first operation in a transaction
+ * @hw_next: pointer to the next descriptor in chain
+ * @async_tx: support for the async_tx api
+ * @slot_node: node on the iop_adma_chan.all_slots list
+ * @chain_node: node on the op_adma_chan.chain list
+ * @group_list: list of slots that make up a multi-descriptor transaction
+ * for example transfer lengths larger than the supported hw max
+ * @unmap_len: transaction bytecount
+ * @hw_desc: virtual address of the hardware descriptor chain
+ * @stride: currently chained or not
+ * @idx: pool index
+ * @slot_cnt: total slots used in an transaction (group of operations)
+ * @src_cnt: number of sources set in this descriptor
+ * @dst_cnt: number of destinations set in the descriptor
+ * @slots_per_op: number of slots per operation
+ * @descs_per_op: number of slot per P/Q operation see comment
+ * for ppc440spe_prep_dma_pqxor function
+ * @flags: desc state/type
+ * @reverse_flags: 1 if a corresponding rxor address uses reversed address order
+ * @xor_check_result: result of zero sum
+ * @crc32_result: result crc calculation
+ */
+struct ppc440spe_adma_desc_slot {
+ dma_addr_t phys;
+ struct ppc440spe_adma_desc_slot *group_head;
+ struct ppc440spe_adma_desc_slot *hw_next;
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head slot_node;
+ struct list_head chain_node; /* node in channel ops list */
+ struct list_head group_list; /* list */
+ unsigned int unmap_len;
+ void *hw_desc;
+ u16 stride;
+ u16 idx;
+ u16 slot_cnt;
+ u8 src_cnt;
+ u8 dst_cnt;
+ u8 slots_per_op;
+ u8 descs_per_op;
+ unsigned long flags;
+ unsigned long reverse_flags[8];
+
+#define PPC440SPE_DESC_INT 0 /* generate interrupt on complete */
+#define PPC440SPE_ZERO_P 1 /* clear P destionaion */
+#define PPC440SPE_ZERO_Q 2 /* clear Q destination */
+#define PPC440SPE_COHERENT 3 /* src/dst are coherent */
+
+#define PPC440SPE_DESC_WXOR 4 /* WXORs are in chain */
+#define PPC440SPE_DESC_RXOR 5 /* RXOR is in chain */
+
+#define PPC440SPE_DESC_RXOR123 8 /* CDB for RXOR123 operation */
+#define PPC440SPE_DESC_RXOR124 9 /* CDB for RXOR124 operation */
+#define PPC440SPE_DESC_RXOR125 10 /* CDB for RXOR125 operation */
+#define PPC440SPE_DESC_RXOR12 11 /* CDB for RXOR12 operation */
+#define PPC440SPE_DESC_RXOR_REV 12 /* CDB has srcs in reversed order */
+
+#define PPC440SPE_DESC_PCHECK 13
+#define PPC440SPE_DESC_QCHECK 14
+
+#define PPC440SPE_DESC_RXOR_MSK 0x3
+
+ struct ppc440spe_rxor rxor_cursor;
+
+ union {
+ u32 *xor_check_result;
+ u32 *crc32_result;
+ };
+};
+
+#endif /* _PPC440SPE_ADMA_H */
diff --git a/drivers/dma/ppc4xx/dma.h b/drivers/dma/ppc4xx/dma.h
new file mode 100644
index 00000000000..bcde2df2f37
--- /dev/null
+++ b/drivers/dma/ppc4xx/dma.h
@@ -0,0 +1,223 @@
+/*
+ * 440SPe's DMA engines support header file
+ *
+ * 2006-2009 (C) DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This file is licensed under the term of the GNU General Public License
+ * version 2. The program licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _PPC440SPE_DMA_H
+#define _PPC440SPE_DMA_H
+
+#include <linux/types.h>
+
+/* Number of elements in the array with statical CDBs */
+#define MAX_STAT_DMA_CDBS 16
+/* Number of DMA engines available on the contoller */
+#define DMA_ENGINES_NUM 2
+
+/* Maximum h/w supported number of destinations */
+#define DMA_DEST_MAX_NUM 2
+
+/* FIFO's params */
+#define DMA0_FIFO_SIZE 0x1000
+#define DMA1_FIFO_SIZE 0x1000
+#define DMA_FIFO_ENABLE (1<<12)
+
+/* DMA Configuration Register. Data Transfer Engine PLB Priority: */
+#define DMA_CFG_DXEPR_LP (0<<26)
+#define DMA_CFG_DXEPR_HP (3<<26)
+#define DMA_CFG_DXEPR_HHP (2<<26)
+#define DMA_CFG_DXEPR_HHHP (1<<26)
+
+/* DMA Configuration Register. DMA FIFO Manager PLB Priority: */
+#define DMA_CFG_DFMPP_LP (0<<23)
+#define DMA_CFG_DFMPP_HP (3<<23)
+#define DMA_CFG_DFMPP_HHP (2<<23)
+#define DMA_CFG_DFMPP_HHHP (1<<23)
+
+/* DMA Configuration Register. Force 64-byte Alignment */
+#define DMA_CFG_FALGN (1 << 19)
+
+/*UIC0:*/
+#define D0CPF_INT (1<<12)
+#define D0CSF_INT (1<<11)
+#define D1CPF_INT (1<<10)
+#define D1CSF_INT (1<<9)
+/*UIC1:*/
+#define DMAE_INT (1<<9)
+
+/* I2O IOP Interrupt Mask Register */
+#define I2O_IOPIM_P0SNE (1<<3)
+#define I2O_IOPIM_P0EM (1<<5)
+#define I2O_IOPIM_P1SNE (1<<6)
+#define I2O_IOPIM_P1EM (1<<8)
+
+/* DMA CDB fields */
+#define DMA_CDB_MSK (0xF)
+#define DMA_CDB_64B_ADDR (1<<2)
+#define DMA_CDB_NO_INT (1<<3)
+#define DMA_CDB_STATUS_MSK (0x3)
+#define DMA_CDB_ADDR_MSK (0xFFFFFFF0)
+
+/* DMA CDB OpCodes */
+#define DMA_CDB_OPC_NO_OP (0x00)
+#define DMA_CDB_OPC_MV_SG1_SG2 (0x01)
+#define DMA_CDB_OPC_MULTICAST (0x05)
+#define DMA_CDB_OPC_DFILL128 (0x24)
+#define DMA_CDB_OPC_DCHECK128 (0x23)
+
+#define DMA_CUED_XOR_BASE (0x10000000)
+#define DMA_CUED_XOR_HB (0x00000008)
+
+#ifdef CONFIG_440SP
+#define DMA_CUED_MULT1_OFF 0
+#define DMA_CUED_MULT2_OFF 8
+#define DMA_CUED_MULT3_OFF 16
+#define DMA_CUED_REGION_OFF 24
+#define DMA_CUED_XOR_WIN_MSK (0xFC000000)
+#else
+#define DMA_CUED_MULT1_OFF 2
+#define DMA_CUED_MULT2_OFF 10
+#define DMA_CUED_MULT3_OFF 18
+#define DMA_CUED_REGION_OFF 26
+#define DMA_CUED_XOR_WIN_MSK (0xF0000000)
+#endif
+
+#define DMA_CUED_REGION_MSK 0x3
+#define DMA_RXOR123 0x0
+#define DMA_RXOR124 0x1
+#define DMA_RXOR125 0x2
+#define DMA_RXOR12 0x3
+
+/* S/G addresses */
+#define DMA_CDB_SG_SRC 1
+#define DMA_CDB_SG_DST1 2
+#define DMA_CDB_SG_DST2 3
+
+/*
+ * DMAx engines Command Descriptor Block Type
+ */
+struct dma_cdb {
+ /*
+ * Basic CDB structure (Table 20-17, p.499, 440spe_um_1_22.pdf)
+ */
+ u8 pad0[2]; /* reserved */
+ u8 attr; /* attributes */
+ u8 opc; /* opcode */
+ u32 sg1u; /* upper SG1 address */
+ u32 sg1l; /* lower SG1 address */
+ u32 cnt; /* SG count, 3B used */
+ u32 sg2u; /* upper SG2 address */
+ u32 sg2l; /* lower SG2 address */
+ u32 sg3u; /* upper SG3 address */
+ u32 sg3l; /* lower SG3 address */
+};
+
+/*
+ * DMAx hardware registers (p.515 in 440SPe UM 1.22)
+ */
+struct dma_regs {
+ u32 cpfpl;
+ u32 cpfph;
+ u32 csfpl;
+ u32 csfph;
+ u32 dsts;
+ u32 cfg;
+ u8 pad0[0x8];
+ u16 cpfhp;
+ u16 cpftp;
+ u16 csfhp;
+ u16 csftp;
+ u8 pad1[0x8];
+ u32 acpl;
+ u32 acph;
+ u32 s1bpl;
+ u32 s1bph;
+ u32 s2bpl;
+ u32 s2bph;
+ u32 s3bpl;
+ u32 s3bph;
+ u8 pad2[0x10];
+ u32 earl;
+ u32 earh;
+ u8 pad3[0x8];
+ u32 seat;
+ u32 sead;
+ u32 op;
+ u32 fsiz;
+};
+
+/*
+ * I2O hardware registers (p.528 in 440SPe UM 1.22)
+ */
+struct i2o_regs {
+ u32 ists;
+ u32 iseat;
+ u32 isead;
+ u8 pad0[0x14];
+ u32 idbel;
+ u8 pad1[0xc];
+ u32 ihis;
+ u32 ihim;
+ u8 pad2[0x8];
+ u32 ihiq;
+ u32 ihoq;
+ u8 pad3[0x8];
+ u32 iopis;
+ u32 iopim;
+ u32 iopiq;
+ u8 iopoq;
+ u8 pad4[3];
+ u16 iiflh;
+ u16 iiflt;
+ u16 iiplh;
+ u16 iiplt;
+ u16 ioflh;
+ u16 ioflt;
+ u16 ioplh;
+ u16 ioplt;
+ u32 iidc;
+ u32 ictl;
+ u32 ifcpp;
+ u8 pad5[0x4];
+ u16 mfac0;
+ u16 mfac1;
+ u16 mfac2;
+ u16 mfac3;
+ u16 mfac4;
+ u16 mfac5;
+ u16 mfac6;
+ u16 mfac7;
+ u16 ifcfh;
+ u16 ifcht;
+ u8 pad6[0x4];
+ u32 iifmc;
+ u32 iodb;
+ u32 iodbc;
+ u32 ifbal;
+ u32 ifbah;
+ u32 ifsiz;
+ u32 ispd0;
+ u32 ispd1;
+ u32 ispd2;
+ u32 ispd3;
+ u32 ihipl;
+ u32 ihiph;
+ u32 ihopl;
+ u32 ihoph;
+ u32 iiipl;
+ u32 iiiph;
+ u32 iiopl;
+ u32 iioph;
+ u32 ifcpl;
+ u32 ifcph;
+ u8 pad7[0x8];
+ u32 iopt;
+};
+
+#endif /* _PPC440SPE_DMA_H */
diff --git a/drivers/dma/ppc4xx/xor.h b/drivers/dma/ppc4xx/xor.h
new file mode 100644
index 00000000000..daed7384daa
--- /dev/null
+++ b/drivers/dma/ppc4xx/xor.h
@@ -0,0 +1,110 @@
+/*
+ * 440SPe's XOR engines support header file
+ *
+ * 2006-2009 (C) DENX Software Engineering.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This file is licensed under the term of the GNU General Public License
+ * version 2. The program licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _PPC440SPE_XOR_H
+#define _PPC440SPE_XOR_H
+
+#include <linux/types.h>
+
+/* Number of XOR engines available on the contoller */
+#define XOR_ENGINES_NUM 1
+
+/* Number of operands supported in the h/w */
+#define XOR_MAX_OPS 16
+
+/*
+ * XOR Command Block Control Register bits
+ */
+#define XOR_CBCR_LNK_BIT (1<<31) /* link present */
+#define XOR_CBCR_TGT_BIT (1<<30) /* target present */
+#define XOR_CBCR_CBCE_BIT (1<<29) /* command block compete enable */
+#define XOR_CBCR_RNZE_BIT (1<<28) /* result not zero enable */
+#define XOR_CBCR_XNOR_BIT (1<<15) /* XOR/XNOR */
+#define XOR_CDCR_OAC_MSK (0x7F) /* operand address count */
+
+/*
+ * XORCore Status Register bits
+ */
+#define XOR_SR_XCP_BIT (1<<31) /* core processing */
+#define XOR_SR_ICB_BIT (1<<17) /* invalid CB */
+#define XOR_SR_IC_BIT (1<<16) /* invalid command */
+#define XOR_SR_IPE_BIT (1<<15) /* internal parity error */
+#define XOR_SR_RNZ_BIT (1<<2) /* result not Zero */
+#define XOR_SR_CBC_BIT (1<<1) /* CB complete */
+#define XOR_SR_CBLC_BIT (1<<0) /* CB list complete */
+
+/*
+ * XORCore Control Set and Reset Register bits
+ */
+#define XOR_CRSR_XASR_BIT (1<<31) /* soft reset */
+#define XOR_CRSR_XAE_BIT (1<<30) /* enable */
+#define XOR_CRSR_RCBE_BIT (1<<29) /* refetch CB enable */
+#define XOR_CRSR_PAUS_BIT (1<<28) /* pause */
+#define XOR_CRSR_64BA_BIT (1<<27) /* 64/32 CB format */
+#define XOR_CRSR_CLP_BIT (1<<25) /* continue list processing */
+
+/*
+ * XORCore Interrupt Enable Register
+ */
+#define XOR_IE_ICBIE_BIT (1<<17) /* Invalid Command Block IRQ Enable */
+#define XOR_IE_ICIE_BIT (1<<16) /* Invalid Command IRQ Enable */
+#define XOR_IE_RPTIE_BIT (1<<14) /* Read PLB Timeout Error IRQ Enable */
+#define XOR_IE_CBCIE_BIT (1<<1) /* CB complete interrupt enable */
+#define XOR_IE_CBLCI_BIT (1<<0) /* CB list complete interrupt enable */
+
+/*
+ * XOR Accelerator engine Command Block Type
+ */
+struct xor_cb {
+ /*
+ * Basic 64-bit format XOR CB (Table 19-1, p.463, 440spe_um_1_22.pdf)
+ */
+ u32 cbc; /* control */
+ u32 cbbc; /* byte count */
+ u32 cbs; /* status */
+ u8 pad0[4]; /* reserved */
+ u32 cbtah; /* target address high */
+ u32 cbtal; /* target address low */
+ u32 cblah; /* link address high */
+ u32 cblal; /* link address low */
+ struct {
+ u32 h;
+ u32 l;
+ } __attribute__ ((packed)) ops[16];
+} __attribute__ ((packed));
+
+/*
+ * XOR hardware registers Table 19-3, UM 1.22
+ */
+struct xor_regs {
+ u32 op_ar[16][2]; /* operand address[0]-high,[1]-low registers */
+ u8 pad0[352]; /* reserved */
+ u32 cbcr; /* CB control register */
+ u32 cbbcr; /* CB byte count register */
+ u32 cbsr; /* CB status register */
+ u8 pad1[4]; /* reserved */
+ u32 cbtahr; /* operand target address high register */
+ u32 cbtalr; /* operand target address low register */
+ u32 cblahr; /* CB link address high register */
+ u32 cblalr; /* CB link address low register */
+ u32 crsr; /* control set register */
+ u32 crrr; /* control reset register */
+ u32 ccbahr; /* current CB address high register */
+ u32 ccbalr; /* current CB address low register */
+ u32 plbr; /* PLB configuration register */
+ u32 ier; /* interrupt enable register */
+ u32 pecr; /* parity error count register */
+ u32 sr; /* status register */
+ u32 revidr; /* revision ID register */
+};
+
+#endif /* _PPC440SPE_XOR_H */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 034ecf0ace0..2e4a54c8afe 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -80,17 +80,17 @@ static int sh_dmae_rst(int id)
unsigned short dmaor;
sh_dmae_ctl_stop(id);
- dmaor = (dmaor_read_reg(id)|DMAOR_INIT);
+ dmaor = dmaor_read_reg(id) | DMAOR_INIT;
dmaor_write_reg(id, dmaor);
- if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) {
+ if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
return -EINVAL;
}
return 0;
}
-static int dmae_is_idle(struct sh_dmae_chan *sh_chan)
+static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
if (chcr & CHCR_DE) {
@@ -110,15 +110,14 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
{
sh_dmae_writel(sh_chan, hw.sar, SAR);
sh_dmae_writel(sh_chan, hw.dar, DAR);
- sh_dmae_writel(sh_chan,
- (hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
+ sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
}
static void dmae_start(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
- chcr |= (CHCR_DE|CHCR_IE);
+ chcr |= CHCR_DE | CHCR_IE;
sh_dmae_writel(sh_chan, chcr, CHCR);
}
@@ -132,7 +131,7 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
- int ret = dmae_is_idle(sh_chan);
+ int ret = dmae_is_busy(sh_chan);
/* When DMA was working, can not set data to CHCR */
if (ret)
return ret;
@@ -149,7 +148,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
u32 addr;
int shift = 0;
- int ret = dmae_is_idle(sh_chan);
+ int ret = dmae_is_busy(sh_chan);
if (ret)
return ret;
@@ -307,7 +306,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
new = sh_dmae_get_desc(sh_chan);
if (!new) {
dev_err(sh_chan->dev,
- "No free memory for link descriptor\n");
+ "No free memory for link descriptor\n");
goto err_get_desc;
}
@@ -388,7 +387,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
struct sh_dmae_regs hw;
/* DMA work check */
- if (dmae_is_idle(sh_chan))
+ if (dmae_is_busy(sh_chan))
return;
/* Find the first un-transfer desciptor */
@@ -497,8 +496,9 @@ static void dmae_do_tasklet(unsigned long data)
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
struct sh_desc *desc, *_desc, *cur_desc = NULL;
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+
list_for_each_entry_safe(desc, _desc,
- &sh_chan->ld_queue, node) {
+ &sh_chan->ld_queue, node) {
if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
cur_desc = desc;
break;
@@ -543,8 +543,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
/* alloc channel */
new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
if (!new_sh_chan) {
- dev_err(shdev->common.dev, "No free memory for allocating "
- "dma channels!\n");
+ dev_err(shdev->common.dev,
+ "No free memory for allocating dma channels!\n");
return -ENOMEM;
}
@@ -586,8 +586,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
"sh-dmae%d", new_sh_chan->id);
/* set up channel irq */
- err = request_irq(irq, &sh_dmae_interrupt,
- irqflags, new_sh_chan->dev_id, new_sh_chan);
+ err = request_irq(irq, &sh_dmae_interrupt, irqflags,
+ new_sh_chan->dev_id, new_sh_chan);
if (err) {
dev_err(shdev->common.dev, "DMA channel %d request_irq error "
"with return %d\n", id, err);
@@ -676,6 +676,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_is_tx_complete = sh_dmae_is_complete;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
shdev->common.dev = &pdev->dev;
+ /* Default transfer size of 32 bytes requires 32-byte alignment */
+ shdev->common.copy_align = 5;
#if defined(CONFIG_CPU_SH4)
/* Non Mix IRQ mode SH7722/SH7730 etc... */
@@ -688,8 +690,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
}
for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
- err = request_irq(eirq[ecnt], sh_dmae_err,
- irqflags, "DMAC Address Error", shdev);
+ err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
if (err) {
dev_err(&pdev->dev, "DMA device request_irq"
"error (irq %d) with return %d\n",
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b4bc15a2c0..60b81e529b4 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -35,15 +35,15 @@ struct sh_desc {
struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
- spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_queue; /* Link descriptors queue */
- struct list_head ld_free; /* Link descriptors free */
- struct dma_chan common; /* DMA common channel */
- struct device *dev; /* Channel device */
+ spinlock_t desc_lock; /* Descriptor operation lock */
+ struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_free; /* Link descriptors free */
+ struct dma_chan common; /* DMA common channel */
+ struct device *dev; /* Channel device */
struct tasklet_struct tasklet; /* Tasklet */
- int descs_allocated; /* desc count */
+ int descs_allocated; /* desc count */
int id; /* Raw id of this channel */
- char dev_id[16]; /* unique name per DMAC of channel */
+ char dev_id[16]; /* unique name per DMAC of channel */
/* Set chcr */
int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index fb6bb64e886..3ebc61067e5 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1313,7 +1313,7 @@ static int txx9dmac_resume_noirq(struct device *dev)
}
-static struct dev_pm_ops txx9dmac_dev_pm_ops = {
+static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
.suspend_noirq = txx9dmac_suspend_noirq,
.resume_noirq = txx9dmac_resume_noirq,
};
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index a38831c8264..df5b68433f3 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,32 +13,56 @@ module_param(report_gart_errors, int, 0644);
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
+static struct msr *msrs;
+
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
/*
- * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
- * for DDR2 DRAM mapping.
+ * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
+ * later.
*/
-u32 revf_quad_ddr2_shift[] = {
- 0, /* 0000b NULL DIMM (128mb) */
- 28, /* 0001b 256mb */
- 29, /* 0010b 512mb */
- 29, /* 0011b 512mb */
- 29, /* 0100b 512mb */
- 30, /* 0101b 1gb */
- 30, /* 0110b 1gb */
- 31, /* 0111b 2gb */
- 31, /* 1000b 2gb */
- 32, /* 1001b 4gb */
- 32, /* 1010b 4gb */
- 33, /* 1011b 8gb */
- 0, /* 1100b future */
- 0, /* 1101b future */
- 0, /* 1110b future */
- 0 /* 1111b future */
+static int ddr2_dbam_revCG[] = {
+ [0] = 32,
+ [1] = 64,
+ [2] = 128,
+ [3] = 256,
+ [4] = 512,
+ [5] = 1024,
+ [6] = 2048,
+};
+
+static int ddr2_dbam_revD[] = {
+ [0] = 32,
+ [1] = 64,
+ [2 ... 3] = 128,
+ [4] = 256,
+ [5] = 512,
+ [6] = 256,
+ [7] = 512,
+ [8 ... 9] = 1024,
+ [10] = 2048,
+};
+
+static int ddr2_dbam[] = { [0] = 128,
+ [1] = 256,
+ [2 ... 4] = 512,
+ [5 ... 6] = 1024,
+ [7 ... 8] = 2048,
+ [9 ... 10] = 4096,
+ [11] = 8192,
+};
+
+static int ddr3_dbam[] = { [0] = -1,
+ [1] = 256,
+ [2] = 512,
+ [3 ... 4] = -1,
+ [5 ... 6] = 1024,
+ [7 ... 8] = 2048,
+ [9 ... 10] = 4096,
+ [11] = 8192,
};
/*
@@ -164,11 +188,9 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 scrubval = 0;
- int status = -1, i, ret = 0;
+ int status = -1, i;
- ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
- if (ret)
- debugf0("Reading K8_SCRCTRL failed\n");
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
scrubval = scrubval & 0x001F;
@@ -189,7 +211,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
/* Map from a CSROW entry to the mask entry that operates on it */
static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
return csrow;
else
return csrow >> 1;
@@ -437,7 +459,7 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 base;
/* only revE and later have the DRAM Hole Address Register */
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
debugf1(" revision %d for node %d does not support DHAR\n",
pvt->ext_model, pvt->mc_node_id);
return 1;
@@ -743,21 +765,6 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
*input_addr_max = base | mask | pvt->dcs_mask_notused;
}
-/*
- * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
- * Address High (section 3.6.4.6) register values and return the result. Address
- * is located in the info structure (nbeah and nbeal), the encoding is device
- * specific.
- */
-static u64 extract_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
-
- return pvt->ops->get_error_address(mci, info);
-}
-
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
u32 *page, u32 *offset)
@@ -787,7 +794,7 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
return csrow;
}
-static int get_channel_from_ecc_syndrome(unsigned short syndrome);
+static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
static void amd64_cpu_display_info(struct amd64_pvt *pvt)
{
@@ -797,7 +804,7 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt)
edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
else if (boot_cpu_data.x86 == 0xf)
edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
- (pvt->ext_model >= OPTERON_CPU_REV_F) ?
+ (pvt->ext_model >= K8_REV_F) ?
"Rev F or later" : "Rev E or earlier");
else
/* we'll hardly ever ever get here */
@@ -813,7 +820,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
int bit;
enum dev_type edac_cap = EDAC_FLAG_NONE;
- bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
+ bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
? 19
: 17;
@@ -824,111 +831,86 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
}
-static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
- int ganged);
+static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
+
+static void amd64_dump_dramcfg_low(u32 dclr, int chan)
+{
+ debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
+
+ debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
+ (dclr & BIT(16)) ? "un" : "",
+ (dclr & BIT(19)) ? "yes" : "no");
+
+ debugf1(" PAR/ERR parity: %s\n",
+ (dclr & BIT(8)) ? "enabled" : "disabled");
+
+ debugf1(" DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
+
+ debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
+ (dclr & BIT(12)) ? "yes" : "no",
+ (dclr & BIT(13)) ? "yes" : "no",
+ (dclr & BIT(14)) ? "yes" : "no",
+ (dclr & BIT(15)) ? "yes" : "no");
+}
/* Display and decode various NB registers for debug purposes. */
static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
{
int ganged;
- debugf1(" nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
- pvt->nbcap,
- (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
- (pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
- (pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
- debugf1(" ECC Capable=%s ChipKill Capable=%s\n",
- (pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
- (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
- debugf1(" DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
- pvt->dclr0,
- (pvt->dclr0 & BIT(19)) ? "Enabled" : "Disabled",
- (pvt->dclr0 & BIT(8)) ? "Enabled" : "Disabled",
- (pvt->dclr0 & BIT(11)) ? "128b" : "64b");
- debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s DIMM Type=%s\n",
- (pvt->dclr0 & BIT(12)) ? "Y" : "N",
- (pvt->dclr0 & BIT(13)) ? "Y" : "N",
- (pvt->dclr0 & BIT(14)) ? "Y" : "N",
- (pvt->dclr0 & BIT(15)) ? "Y" : "N",
- (pvt->dclr0 & BIT(16)) ? "UN-Buffered" : "Buffered");
-
-
- debugf1(" online-spare: 0x%8.08x\n", pvt->online_spare);
+ debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
- if (boot_cpu_data.x86 == 0xf) {
- debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
- pvt->dhar, dhar_base(pvt->dhar),
- k8_dhar_offset(pvt->dhar));
- debugf1(" DramHoleValid=%s\n",
- (pvt->dhar & DHAR_VALID) ? "True" : "False");
+ debugf1(" NB two channel DRAM capable: %s\n",
+ (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
- debugf1(" dbam-dkt: 0x%8.08x\n", pvt->dbam0);
+ debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
+ (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
- /* everything below this point is Fam10h and above */
- return;
+ amd64_dump_dramcfg_low(pvt->dclr0, 0);
- } else {
- debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
- pvt->dhar, dhar_base(pvt->dhar),
- f10_dhar_offset(pvt->dhar));
- debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n",
- (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
- "True" : "False",
- (pvt->dhar & DHAR_VALID) ?
- "True" : "False");
- }
+ debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
- /* Only if NOT ganged does dcl1 have valid info */
- if (!dct_ganging_enabled(pvt)) {
- debugf1(" DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
- "Width=%s\n", pvt->dclr1,
- (pvt->dclr1 & BIT(19)) ? "Enabled" : "Disabled",
- (pvt->dclr1 & BIT(8)) ? "Enabled" : "Disabled",
- (pvt->dclr1 & BIT(11)) ? "128b" : "64b");
- debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s "
- "DIMM Type=%s\n",
- (pvt->dclr1 & BIT(12)) ? "Y" : "N",
- (pvt->dclr1 & BIT(13)) ? "Y" : "N",
- (pvt->dclr1 & BIT(14)) ? "Y" : "N",
- (pvt->dclr1 & BIT(15)) ? "Y" : "N",
- (pvt->dclr1 & BIT(16)) ? "UN-Buffered" : "Buffered");
+ debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
+ "offset: 0x%08x\n",
+ pvt->dhar,
+ dhar_base(pvt->dhar),
+ (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
+ : f10_dhar_offset(pvt->dhar));
+
+ debugf1(" DramHoleValid: %s\n",
+ (pvt->dhar & DHAR_VALID) ? "yes" : "no");
+
+ /* everything below this point is Fam10h and above */
+ if (boot_cpu_data.x86 == 0xf) {
+ amd64_debug_display_dimm_sizes(0, pvt);
+ return;
}
+ /* Only if NOT ganged does dclr1 have valid info */
+ if (!dct_ganging_enabled(pvt))
+ amd64_dump_dramcfg_low(pvt->dclr1, 1);
+
/*
* Determine if ganged and then dump memory sizes for first controller,
* and if NOT ganged dump info for 2nd controller.
*/
ganged = dct_ganging_enabled(pvt);
- f10_debug_display_dimm_sizes(0, pvt, ganged);
+ amd64_debug_display_dimm_sizes(0, pvt);
if (!ganged)
- f10_debug_display_dimm_sizes(1, pvt, ganged);
+ amd64_debug_display_dimm_sizes(1, pvt);
}
/* Read in both of DBAM registers */
static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
{
- int err = 0;
- unsigned int reg;
-
- reg = DBAM0;
- err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
- if (err)
- goto err_reg;
-
- if (boot_cpu_data.x86 >= 0x10) {
- reg = DBAM1;
- err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
-
- if (err)
- goto err_reg;
- }
-
- return;
+ amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
-err_reg:
- debugf0("Error reading F2x%03x.\n", reg);
+ if (boot_cpu_data.x86 >= 0x10)
+ amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
}
/*
@@ -963,7 +945,7 @@ err_reg:
static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
@@ -991,28 +973,21 @@ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
*/
static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
{
- int cs, reg, err = 0;
+ int cs, reg;
amd64_set_dct_base_and_mask(pvt);
for (cs = 0; cs < pvt->cs_count; cs++) {
reg = K8_DCSB0 + (cs * 4);
- err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
- &pvt->dcsb0[cs]);
- if (unlikely(err))
- debugf0("Reading K8_DCSB0[%d] failed\n", cs);
- else
+ if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
cs, pvt->dcsb0[cs], reg);
/* If DCT are NOT ganged, then read in DCT1's base */
if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
reg = F10_DCSB1 + (cs * 4);
- err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
- &pvt->dcsb1[cs]);
- if (unlikely(err))
- debugf0("Reading F10_DCSB1[%d] failed\n", cs);
- else
+ if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
+ &pvt->dcsb1[cs]))
debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
cs, pvt->dcsb1[cs], reg);
} else {
@@ -1022,26 +997,20 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
for (cs = 0; cs < pvt->num_dcsm; cs++) {
reg = K8_DCSM0 + (cs * 4);
- err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
- &pvt->dcsm0[cs]);
- if (unlikely(err))
- debugf0("Reading K8_DCSM0 failed\n");
- else
+ if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
cs, pvt->dcsm0[cs], reg);
/* If DCT are NOT ganged, then read in DCT1's mask */
if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
reg = F10_DCSM1 + (cs * 4);
- err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
- &pvt->dcsm1[cs]);
- if (unlikely(err))
- debugf0("Reading F10_DCSM1[%d] failed\n", cs);
- else
+ if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
+ &pvt->dcsm1[cs]))
debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
cs, pvt->dcsm1[cs], reg);
- } else
+ } else {
pvt->dcsm1[cs] = 0;
+ }
}
}
@@ -1049,18 +1018,16 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
{
enum mem_type type;
- if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
- /* Rev F and later */
- type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
+ if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
+ if (pvt->dchr0 & DDR3_MODE)
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
+ else
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
} else {
- /* Rev E and earlier */
type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
}
- debugf1(" Memory type is: %s\n",
- (type == MEM_DDR2) ? "MEM_DDR2" :
- (type == MEM_RDDR2) ? "MEM_RDDR2" :
- (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
+ debugf1(" Memory type is: %s\n", edac_mem_types[type]);
return type;
}
@@ -1078,11 +1045,11 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
{
int flag, err = 0;
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+ err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
if (err)
return err;
- if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
+ if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) {
/* RevF (NPT) and later */
flag = pvt->dclr0 & F10_WIDTH_128;
} else {
@@ -1114,22 +1081,15 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
{
u32 low;
u32 off = dram << 3; /* 8 bytes between DRAM entries */
- int err;
- err = pci_read_config_dword(pvt->addr_f1_ctl,
- K8_DRAM_BASE_LOW + off, &low);
- if (err)
- debugf0("Reading K8_DRAM_BASE_LOW failed\n");
+ amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
/* Extract parts into separate data entries */
pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
pvt->dram_rw_en[dram] = (low & 0x3);
- err = pci_read_config_dword(pvt->addr_f1_ctl,
- K8_DRAM_LIMIT_LOW + off, &low);
- if (err)
- debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
+ amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
/*
* Extract parts into separate data entries. Limit is the HIGHEST memory
@@ -1142,7 +1102,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
struct err_regs *info,
- u64 SystemAddress)
+ u64 sys_addr)
{
struct mem_ctl_info *src_mci;
unsigned short syndrome;
@@ -1155,7 +1115,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
/* CHIPKILL enabled */
if (info->nbcfg & K8_NBCFG_CHIPKILL) {
- channel = get_channel_from_ecc_syndrome(syndrome);
+ channel = get_channel_from_ecc_syndrome(mci, syndrome);
if (channel < 0) {
/*
* Syndrome didn't map, so we don't know which of the
@@ -1177,64 +1137,46 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
* was obtained from email communication with someone at AMD.
* (Wish the email was placed in this comment - norsk)
*/
- channel = ((SystemAddress & BIT(3)) != 0);
+ channel = ((sys_addr & BIT(3)) != 0);
}
/*
* Find out which node the error address belongs to. This may be
* different from the node that detected the error.
*/
- src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+ src_mci = find_mc_by_sys_addr(mci, sys_addr);
if (!src_mci) {
amd64_mc_printk(mci, KERN_ERR,
"failed to map error address 0x%lx to a node\n",
- (unsigned long)SystemAddress);
+ (unsigned long)sys_addr);
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return;
}
- /* Now map the SystemAddress to a CSROW */
- csrow = sys_addr_to_csrow(src_mci, SystemAddress);
+ /* Now map the sys_addr to a CSROW */
+ csrow = sys_addr_to_csrow(src_mci, sys_addr);
if (csrow < 0) {
edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
} else {
- error_address_to_page_and_offset(SystemAddress, &page, &offset);
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
channel, EDAC_MOD_STR);
}
}
-/*
- * determrine the number of PAGES in for this DIMM's size based on its DRAM
- * Address Mapping.
- *
- * First step is to calc the number of bits to shift a value of 1 left to
- * indicate show many pages. Start with the DBAM value as the starting bits,
- * then proceed to adjust those shift bits, based on CPU rev and the table.
- * See BKDG on the DBAM
- */
-static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
{
- int nr_pages;
+ int *dbam_map;
- if (pvt->ext_model >= OPTERON_CPU_REV_F) {
- nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
- } else {
- /*
- * RevE and less section; this line is tricky. It collapses the
- * table used by RevD and later to one that matches revisions CG
- * and earlier.
- */
- dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
- (dram_map > 8 ? 4 : (dram_map > 5 ?
- 3 : (dram_map > 2 ? 1 : 0))) : 0;
-
- /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
- nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
- }
+ if (pvt->ext_model >= K8_REV_F)
+ dbam_map = ddr2_dbam;
+ else if (pvt->ext_model >= K8_REV_D)
+ dbam_map = ddr2_dbam_revD;
+ else
+ dbam_map = ddr2_dbam_revCG;
- return nr_pages;
+ return dbam_map[cs_mode];
}
/*
@@ -1248,34 +1190,24 @@ static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
static int f10_early_channel_count(struct amd64_pvt *pvt)
{
int dbams[] = { DBAM0, DBAM1 };
- int err = 0, channels = 0;
- int i, j;
+ int i, j, channels = 0;
u32 dbam;
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
- if (err)
- goto err_reg;
-
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
- if (err)
- goto err_reg;
-
/* If we are in 128 bit mode, then we are using 2 channels */
if (pvt->dclr0 & F10_WIDTH_128) {
- debugf0("Data WIDTH is 128 bits - 2 channels\n");
channels = 2;
return channels;
}
/*
- * Need to check if in UN-ganged mode: In such, there are 2 channels,
- * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
- * will be OFF.
+ * Need to check if in unganged mode: In such, there are 2 channels,
+ * but they are not in 128 bit mode and thus the above 'dclr0' status
+ * bit will be OFF.
*
* Need to check DCT0[0] and DCT1[0] to see if only one of them has
* their CSEnable bit on. If so, then SINGLE DIMM case.
*/
- debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
+ debugf0("Data width is not 128 bits - need more decoding\n");
/*
* Check DRAM Bank Address Mapping values for each DIMM to see if there
@@ -1283,8 +1215,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
* both controllers since DIMMs can be placed in either one.
*/
for (i = 0; i < ARRAY_SIZE(dbams); i++) {
- err = pci_read_config_dword(pvt->dram_f2_ctl, dbams[i], &dbam);
- if (err)
+ if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
goto err_reg;
for (j = 0; j < 4; j++) {
@@ -1295,6 +1226,9 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
}
}
+ if (channels > 2)
+ channels = 2;
+
debugf0("MCT channel count: %d\n", channels);
return channels;
@@ -1304,9 +1238,16 @@ err_reg:
}
-static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
+static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
{
- return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
+ int *dbam_map;
+
+ if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
+ dbam_map = ddr3_dbam;
+ else
+ dbam_map = ddr2_dbam;
+
+ return dbam_map[cs_mode];
}
/* Enable extended configuration access via 0xCF8 feature */
@@ -1314,7 +1255,7 @@ static void amd64_setup(struct amd64_pvt *pvt)
{
u32 reg;
- pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
@@ -1326,7 +1267,7 @@ static void amd64_teardown(struct amd64_pvt *pvt)
{
u32 reg;
- pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
if (pvt->flags.cf8_extcfg)
@@ -1355,10 +1296,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
/* read the 'raw' DRAM BASE Address register */
- pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
+ amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
/* Read from the ECS data register */
- pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
+ amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
/* Extract parts into separate data entries */
pvt->dram_rw_en[dram] = (low_base & 0x3);
@@ -1375,13 +1316,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
/* read the 'raw' LIMIT registers */
- pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
+ amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
/* Read from the ECS data register for the HIGH portion */
- pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
-
- debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
- high_base, low_base, high_limit, low_limit);
+ amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
pvt->dram_DstNode[dram] = (low_limit & 0x7);
pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
@@ -1397,32 +1335,35 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
{
- int err = 0;
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
- &pvt->dram_ctl_select_low);
- if (err) {
- debugf0("Reading F10_DCTL_SEL_LOW failed\n");
- } else {
- debugf0("DRAM_DCTL_SEL_LOW=0x%x DctSelBaseAddr=0x%x\n",
- pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt));
-
- debugf0(" DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-"
- "sel-hi-range=%s\n",
- (dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"),
- (dct_dram_enabled(pvt) ? "Enabled" : "Disabled"),
- (dct_high_range_enabled(pvt) ? "Enabled" : "Disabled"));
-
- debugf0(" DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
- (dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
- (dct_memory_cleared(pvt) ? "True " : "False "),
+ if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
+ &pvt->dram_ctl_select_low)) {
+ debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
+ "High range addresses at: 0x%x\n",
+ pvt->dram_ctl_select_low,
+ dct_sel_baseaddr(pvt));
+
+ debugf0(" DCT mode: %s, All DCTs on: %s\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
+ (dct_dram_enabled(pvt) ? "yes" : "no"));
+
+ if (!dct_ganging_enabled(pvt))
+ debugf0(" Address range split per DCT: %s\n",
+ (dct_high_range_enabled(pvt) ? "yes" : "no"));
+
+ debugf0(" DCT data interleave for ECC: %s, "
+ "DRAM cleared since last warm reset: %s\n",
+ (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
+ (dct_memory_cleared(pvt) ? "yes" : "no"));
+
+ debugf0(" DCT channel interleave: %s, "
+ "DCT interleave bits selector: 0x%x\n",
+ (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
dct_sel_interleave_addr(pvt));
}
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
- &pvt->dram_ctl_select_high);
- if (err)
- debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
+ amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
+ &pvt->dram_ctl_select_high);
}
/*
@@ -1706,10 +1647,11 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
}
/*
- * This the F10h reference code from AMD to map a @sys_addr to NodeID,
- * CSROW, Channel.
+ * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
+ * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
*
- * The @sys_addr is usually an error address received from the hardware.
+ * The @sys_addr is usually an error address received from the hardware
+ * (MCX_ADDR).
*/
static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
struct err_regs *info,
@@ -1722,133 +1664,76 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
- if (csrow >= 0) {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
- syndrome = HIGH_SYNDROME(info->nbsl) << 8;
- syndrome |= LOW_SYNDROME(info->nbsh);
-
- /*
- * Is CHIPKILL on? If so, then we can attempt to use the
- * syndrome to isolate which channel the error was on.
- */
- if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
- chan = get_channel_from_ecc_syndrome(syndrome);
-
- if (chan >= 0) {
- edac_mc_handle_ce(mci, page, offset, syndrome,
- csrow, chan, EDAC_MOD_STR);
- } else {
- /*
- * Channel unknown, report all channels on this
- * CSROW as failed.
- */
- for (chan = 0; chan < mci->csrows[csrow].nr_channels;
- chan++) {
- edac_mc_handle_ce(mci, page, offset,
- syndrome,
- csrow, chan,
- EDAC_MOD_STR);
- }
- }
-
- } else {
+ if (csrow < 0) {
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ return;
}
-}
-/*
- * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
- * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
- * indicates an empty DIMM slot, as reported by Hardware on empty slots.
- *
- * Normalize to 128MB by subracting 27 bit shift.
- */
-static int map_dbam_to_csrow_size(int index)
-{
- int mega_bytes = 0;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
- if (index > 0 && index <= DBAM_MAX_VALUE)
- mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
+ syndrome = HIGH_SYNDROME(info->nbsl) << 8;
+ syndrome |= LOW_SYNDROME(info->nbsh);
+
+ /*
+ * We need the syndromes for channel detection only when we're
+ * ganged. Otherwise @chan should already contain the channel at
+ * this point.
+ */
+ if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL)
+ chan = get_channel_from_ecc_syndrome(mci, syndrome);
- return mega_bytes;
+ if (chan >= 0)
+ edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
+ EDAC_MOD_STR);
+ else
+ /*
+ * Channel unknown, report all channels on this CSROW as failed.
+ */
+ for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
+ edac_mc_handle_ce(mci, page, offset, syndrome,
+ csrow, chan, EDAC_MOD_STR);
}
/*
- * debug routine to display the memory sizes of a DIMM (ganged or not) and it
+ * debug routine to display the memory sizes of all logical DIMMs and its
* CSROWs as well
*/
-static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
- int ganged)
+static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
{
int dimm, size0, size1;
u32 dbam;
u32 *dcsb;
- debugf1(" dbam%d: 0x%8.08x CSROW is %s\n", ctrl,
- ctrl ? pvt->dbam1 : pvt->dbam0,
- ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
+ if (boot_cpu_data.x86 == 0xf) {
+ /* K8 families < revF not supported yet */
+ if (pvt->ext_model < K8_REV_F)
+ return;
+ else
+ WARN_ON(ctrl != 0);
+ }
+
+ debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
+ ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+ edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
+
/* Dump memory sizes for DIMM and its CSROWs */
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
- size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
+ size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
size1 = 0;
if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
- size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
-
- debugf1(" CTRL-%d DIMM-%d=%5dMB CSROW-%d=%5dMB "
- "CSROW-%d=%5dMB\n",
- ctrl,
- dimm,
- size0 + size1,
- dimm * 2,
- size0,
- dimm * 2 + 1,
- size1);
- }
-}
-
-/*
- * Very early hardware probe on pci_probe thread to determine if this module
- * supports the hardware.
- *
- * Return:
- * 0 for OK
- * 1 for error
- */
-static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
-{
- int ret = 0;
-
- /*
- * If we are on a DDR3 machine, we don't know yet if
- * we support that properly at this time
- */
- if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
- (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
-
- amd64_printk(KERN_WARNING,
- "%s() This machine is running with DDR3 memory. "
- "This is not currently supported. "
- "DCHR0=0x%x DCHR1=0x%x\n",
- __func__, pvt->dchr0, pvt->dchr1);
-
- amd64_printk(KERN_WARNING,
- " Contact '%s' module MAINTAINER to help add"
- " support.\n",
- EDAC_MOD_STR);
-
- ret = 1;
+ size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
+ dimm * 2, size0, dimm * 2 + 1, size1);
}
- return ret;
}
/*
@@ -1868,11 +1753,11 @@ static struct amd64_family_type amd64_family_types[] = {
.addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
.misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
.ops = {
- .early_channel_count = k8_early_channel_count,
- .get_error_address = k8_get_error_address,
- .read_dram_base_limit = k8_read_dram_base_limit,
- .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
- .dbam_map_to_pages = k8_dbam_map_to_pages,
+ .early_channel_count = k8_early_channel_count,
+ .get_error_address = k8_get_error_address,
+ .read_dram_base_limit = k8_read_dram_base_limit,
+ .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
+ .dbam_to_cs = k8_dbam_to_chip_select,
}
},
[F10_CPUS] = {
@@ -1880,13 +1765,12 @@ static struct amd64_family_type amd64_family_types[] = {
.addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
.ops = {
- .probe_valid_hardware = f10_probe_valid_hardware,
- .early_channel_count = f10_early_channel_count,
- .get_error_address = f10_get_error_address,
- .read_dram_base_limit = f10_read_dram_base_limit,
- .read_dram_ctl_register = f10_read_dram_ctl_register,
- .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
- .dbam_map_to_pages = f10_dbam_map_to_pages,
+ .early_channel_count = f10_early_channel_count,
+ .get_error_address = f10_get_error_address,
+ .read_dram_base_limit = f10_read_dram_base_limit,
+ .read_dram_ctl_register = f10_read_dram_ctl_register,
+ .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .dbam_to_cs = f10_dbam_to_chip_select,
}
},
[F11_CPUS] = {
@@ -1894,13 +1778,12 @@ static struct amd64_family_type amd64_family_types[] = {
.addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
.misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
.ops = {
- .probe_valid_hardware = f10_probe_valid_hardware,
- .early_channel_count = f10_early_channel_count,
- .get_error_address = f10_get_error_address,
- .read_dram_base_limit = f10_read_dram_base_limit,
- .read_dram_ctl_register = f10_read_dram_ctl_register,
- .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
- .dbam_map_to_pages = f10_dbam_map_to_pages,
+ .early_channel_count = f10_early_channel_count,
+ .get_error_address = f10_get_error_address,
+ .read_dram_base_limit = f10_read_dram_base_limit,
+ .read_dram_ctl_register = f10_read_dram_ctl_register,
+ .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .dbam_to_cs = f10_dbam_to_chip_select,
}
},
};
@@ -1923,142 +1806,170 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
}
/*
- * syndrome mapping table for ECC ChipKill devices
- *
- * The comment in each row is the token (nibble) number that is in error.
- * The least significant nibble of the syndrome is the mask for the bits
- * that are in error (need to be toggled) for the particular nibble.
- *
- * Each row contains 16 entries.
- * The first entry (0th) is the channel number for that row of syndromes.
- * The remaining 15 entries are the syndromes for the respective Error
- * bit mask index.
+ * These are tables of eigenvectors (one per line) which can be used for the
+ * construction of the syndrome tables. The modified syndrome search algorithm
+ * uses those to find the symbol in error and thus the DIMM.
*
- * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
- * bit in error.
- * The 2nd index entry is 0x0010 that the second bit is damaged.
- * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
- * are damaged.
- * Thus so on until index 15, 0x1111, whose entry has the syndrome
- * indicating that all 4 bits are damaged.
- *
- * A search is performed on this table looking for a given syndrome.
- *
- * See the AMD documentation for ECC syndromes. This ECC table is valid
- * across all the versions of the AMD64 processors.
- *
- * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
- * COLUMN index, then search all ROWS of that column, looking for a match
- * with the input syndrome. The ROW value will be the token number.
- *
- * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
- * error.
+ * Algorithm courtesy of Ross LaFetra from AMD.
*/
-#define NUMBER_ECC_ROWS 36
-static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
- /* Channel 0 syndromes */
- {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
- 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
- {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
- 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
- {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
- 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
- {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
- 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
- {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
- 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
- {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
- 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
- {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
- 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
- {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
- 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
- {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
- 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
- {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
- 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
- {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
- 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
- {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
- 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
- {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
- 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
- {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
- 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
- {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
- 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
- {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
- 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
-
- /* Channel 1 syndromes */
- {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
- 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
- {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
- 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
- {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
- 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
- {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
- 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
- {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
- 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
- {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
- 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
- {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
- 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
- {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
- 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
- {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
- 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
- {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
- 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
- {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
- 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
- {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
- 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
- {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
- 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
- {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
- 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
- {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
- 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
- {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
- 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
-
- /* ECC bits are also in the set of tokens and they too can go bad
- * first 2 cover channel 0, while the second 2 cover channel 1
- */
- {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
- 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
- {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
- 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
- {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
- 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
- {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
- 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
+static u16 x4_vectors[] = {
+ 0x2f57, 0x1afe, 0x66cc, 0xdd88,
+ 0x11eb, 0x3396, 0x7f4c, 0xeac8,
+ 0x0001, 0x0002, 0x0004, 0x0008,
+ 0x1013, 0x3032, 0x4044, 0x8088,
+ 0x106b, 0x30d6, 0x70fc, 0xe0a8,
+ 0x4857, 0xc4fe, 0x13cc, 0x3288,
+ 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
+ 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
+ 0x15c1, 0x2a42, 0x89ac, 0x4758,
+ 0x2b03, 0x1602, 0x4f0c, 0xca08,
+ 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
+ 0x8ba7, 0x465e, 0x244c, 0x1cc8,
+ 0x2b87, 0x164e, 0x642c, 0xdc18,
+ 0x40b9, 0x80de, 0x1094, 0x20e8,
+ 0x27db, 0x1eb6, 0x9dac, 0x7b58,
+ 0x11c1, 0x2242, 0x84ac, 0x4c58,
+ 0x1be5, 0x2d7a, 0x5e34, 0xa718,
+ 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
+ 0x4c97, 0xc87e, 0x11fc, 0x33a8,
+ 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
+ 0x16b3, 0x3d62, 0x4f34, 0x8518,
+ 0x1e2f, 0x391a, 0x5cac, 0xf858,
+ 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
+ 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
+ 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
+ 0x4397, 0xc27e, 0x17fc, 0x3ea8,
+ 0x1617, 0x3d3e, 0x6464, 0xb8b8,
+ 0x23ff, 0x12aa, 0xab6c, 0x56d8,
+ 0x2dfb, 0x1ba6, 0x913c, 0x7328,
+ 0x185d, 0x2ca6, 0x7914, 0x9e28,
+ 0x171b, 0x3e36, 0x7d7c, 0xebe8,
+ 0x4199, 0x82ee, 0x19f4, 0x2e58,
+ 0x4807, 0xc40e, 0x130c, 0x3208,
+ 0x1905, 0x2e0a, 0x5804, 0xac08,
+ 0x213f, 0x132a, 0xadfc, 0x5ba8,
+ 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
};
-/*
- * Given the syndrome argument, scan each of the channel tables for a syndrome
- * match. Depending on which table it is found, return the channel number.
- */
-static int get_channel_from_ecc_syndrome(unsigned short syndrome)
+static u16 x8_vectors[] = {
+ 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
+ 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
+ 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
+ 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
+ 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
+ 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
+ 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
+ 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
+ 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
+ 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
+ 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
+ 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
+ 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
+ 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
+ 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
+ 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
+ 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
+ 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
+ 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
+};
+
+static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
+ int v_dim)
{
- int row;
- int column;
+ unsigned int i, err_sym;
- /* Determine column to scan */
- column = syndrome & 0xF;
+ for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
+ u16 s = syndrome;
+ int v_idx = err_sym * v_dim;
+ int v_end = (err_sym + 1) * v_dim;
- /* Scan all rows, looking for syndrome, or end of table */
- for (row = 0; row < NUMBER_ECC_ROWS; row++) {
- if (ecc_chipkill_syndromes[row][column] == syndrome)
- return ecc_chipkill_syndromes[row][0];
+ /* walk over all 16 bits of the syndrome */
+ for (i = 1; i < (1U << 16); i <<= 1) {
+
+ /* if bit is set in that eigenvector... */
+ if (v_idx < v_end && vectors[v_idx] & i) {
+ u16 ev_comp = vectors[v_idx++];
+
+ /* ... and bit set in the modified syndrome, */
+ if (s & i) {
+ /* remove it. */
+ s ^= ev_comp;
+
+ if (!s)
+ return err_sym;
+ }
+
+ } else if (s & i)
+ /* can't get to zero, move to next symbol */
+ break;
+ }
}
debugf0("syndrome(%x) not found\n", syndrome);
return -1;
}
+static int map_err_sym_to_channel(int err_sym, int sym_size)
+{
+ if (sym_size == 4)
+ switch (err_sym) {
+ case 0x20:
+ case 0x21:
+ return 0;
+ break;
+ case 0x22:
+ case 0x23:
+ return 1;
+ break;
+ default:
+ return err_sym >> 4;
+ break;
+ }
+ /* x8 symbols */
+ else
+ switch (err_sym) {
+ /* imaginary bits not in a DIMM */
+ case 0x10:
+ WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
+ err_sym);
+ return -1;
+ break;
+
+ case 0x11:
+ return 0;
+ break;
+ case 0x12:
+ return 1;
+ break;
+ default:
+ return err_sym >> 3;
+ break;
+ }
+ return -1;
+}
+
+static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 value = 0;
+ int err_sym = 0;
+
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
+
+ /* F3x180[EccSymbolSize]=1, x8 symbols */
+ if (boot_cpu_data.x86 == 0x10 &&
+ boot_cpu_data.x86_model > 7 &&
+ value & BIT(25)) {
+ err_sym = decode_syndrome(syndrome, x8_vectors,
+ ARRAY_SIZE(x8_vectors), 8);
+ return map_err_sym_to_channel(err_sym, 8);
+ } else {
+ err_sym = decode_syndrome(syndrome, x4_vectors,
+ ARRAY_SIZE(x4_vectors), 4);
+ return map_err_sym_to_channel(err_sym, 4);
+ }
+}
+
/*
* Check for valid error in the NB Status High register. If so, proceed to read
* NB Status Low, NB Address Low and NB Address High registers and store data
@@ -2073,40 +1984,24 @@ static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
{
struct amd64_pvt *pvt;
struct pci_dev *misc_f3_ctl;
- int err = 0;
pvt = mci->pvt_info;
misc_f3_ctl = pvt->misc_f3_ctl;
- err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh);
- if (err)
- goto err_reg;
+ if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, &regs->nbsh))
+ return 0;
if (!(regs->nbsh & K8_NBSH_VALID_BIT))
return 0;
/* valid error, read remaining error information registers */
- err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl);
- if (err)
- goto err_reg;
-
- err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal);
- if (err)
- goto err_reg;
-
- err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
- if (err)
- goto err_reg;
-
- err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
- if (err)
- goto err_reg;
+ if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, &regs->nbsl) ||
+ amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, &regs->nbeal) ||
+ amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, &regs->nbeah) ||
+ amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, &regs->nbcfg))
+ return 0;
return 1;
-
-err_reg:
- debugf0("Reading error info register failed\n");
- return 0;
}
/*
@@ -2184,7 +2079,7 @@ static void amd64_handle_ce(struct mem_ctl_info *mci,
struct err_regs *info)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u64 SystemAddress;
+ u64 sys_addr;
/* Ensure that the Error Address is VALID */
if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
@@ -2194,22 +2089,23 @@ static void amd64_handle_ce(struct mem_ctl_info *mci,
return;
}
- SystemAddress = extract_error_address(mci, info);
+ sys_addr = pvt->ops->get_error_address(mci, info);
amd64_mc_printk(mci, KERN_ERR,
- "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
+ "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
- pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
+ pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
}
/* Handle any Un-correctable Errors (UEs) */
static void amd64_handle_ue(struct mem_ctl_info *mci,
struct err_regs *info)
{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ struct mem_ctl_info *log_mci, *src_mci = NULL;
int csrow;
- u64 SystemAddress;
+ u64 sys_addr;
u32 page, offset;
- struct mem_ctl_info *log_mci, *src_mci = NULL;
log_mci = mci;
@@ -2220,31 +2116,31 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
return;
}
- SystemAddress = extract_error_address(mci, info);
+ sys_addr = pvt->ops->get_error_address(mci, info);
/*
* Find out which node the error address belongs to. This may be
* different from the node that detected the error.
*/
- src_mci = find_mc_by_sys_addr(mci, SystemAddress);
+ src_mci = find_mc_by_sys_addr(mci, sys_addr);
if (!src_mci) {
amd64_mc_printk(mci, KERN_CRIT,
"ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
- (unsigned long)SystemAddress);
+ (unsigned long)sys_addr);
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
return;
}
log_mci = src_mci;
- csrow = sys_addr_to_csrow(log_mci, SystemAddress);
+ csrow = sys_addr_to_csrow(log_mci, sys_addr);
if (csrow < 0) {
amd64_mc_printk(mci, KERN_CRIT,
"ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
- (unsigned long)SystemAddress);
+ (unsigned long)sys_addr);
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
} else {
- error_address_to_page_and_offset(SystemAddress, &page, &offset);
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
}
}
@@ -2384,30 +2280,26 @@ static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
static void amd64_read_mc_registers(struct amd64_pvt *pvt)
{
u64 msr_val;
- int dram, err = 0;
+ int dram;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
* those are Read-As-Zero
*/
- rdmsrl(MSR_K8_TOP_MEM1, msr_val);
- pvt->top_mem = msr_val >> 23;
- debugf0(" TOP_MEM=0x%08llx\n", pvt->top_mem);
+ rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
+ debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* check first whether TOP_MEM2 is enabled */
rdmsrl(MSR_K8_SYSCFG, msr_val);
if (msr_val & (1U << 21)) {
- rdmsrl(MSR_K8_TOP_MEM2, msr_val);
- pvt->top_mem2 = msr_val >> 23;
- debugf0(" TOP_MEM2=0x%08llx\n", pvt->top_mem2);
+ rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
+ debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else
debugf0(" TOP_MEM2 disabled.\n");
amd64_cpu_display_info(pvt);
- err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
- if (err)
- goto err_reg;
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
if (pvt->ops->read_dram_ctl_register)
pvt->ops->read_dram_ctl_register(pvt);
@@ -2425,13 +2317,12 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
* debug output block away.
*/
if (pvt->dram_rw_en[dram] != 0) {
- debugf1(" DRAM_BASE[%d]: 0x%8.08x-%8.08x "
- "DRAM_LIMIT: 0x%8.08x-%8.08x\n",
+ debugf1(" DRAM-BASE[%d]: 0x%016llx "
+ "DRAM-LIMIT: 0x%016llx\n",
dram,
- (u32)(pvt->dram_base[dram] >> 32),
- (u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
- (u32)(pvt->dram_limit[dram] >> 32),
- (u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
+ pvt->dram_base[dram],
+ pvt->dram_limit[dram]);
+
debugf1(" IntlvEn=%s %s %s "
"IntlvSel=%d DstNode=%d\n",
pvt->dram_IntlvEn[dram] ?
@@ -2445,44 +2336,20 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
amd64_read_dct_base_mask(pvt);
- err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
- if (err)
- goto err_reg;
-
+ amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
amd64_read_dbam_reg(pvt);
- err = pci_read_config_dword(pvt->misc_f3_ctl,
- F10_ONLINE_SPARE, &pvt->online_spare);
- if (err)
- goto err_reg;
+ amd64_read_pci_cfg(pvt->misc_f3_ctl,
+ F10_ONLINE_SPARE, &pvt->online_spare);
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
- if (err)
- goto err_reg;
-
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
- if (err)
- goto err_reg;
+ amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
+ amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
if (!dct_ganging_enabled(pvt)) {
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1,
- &pvt->dclr1);
- if (err)
- goto err_reg;
-
- err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
- &pvt->dchr1);
- if (err)
- goto err_reg;
+ amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
+ amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
}
-
amd64_dump_misc_regs(pvt);
-
- return;
-
-err_reg:
- debugf0("Reading an MC register failed\n");
-
}
/*
@@ -2521,7 +2388,7 @@ err_reg:
*/
static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
{
- u32 dram_map, nr_pages;
+ u32 cs_mode, nr_pages;
/*
* The math on this doesn't look right on the surface because x/2*4 can
@@ -2530,9 +2397,9 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
* number of bits to shift the DBAM register to extract the proper CSROW
* field.
*/
- dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
+ cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
- nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
+ nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
/*
* If dual channel then double the memory size of single channel.
@@ -2540,7 +2407,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
*/
nr_pages <<= (pvt->channel_count - 1);
- debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
+ debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
debugf0(" nr_pages= %u channel-count = %d\n",
nr_pages, pvt->channel_count);
@@ -2556,13 +2423,11 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
struct csrow_info *csrow;
struct amd64_pvt *pvt;
u64 input_addr_min, input_addr_max, sys_addr;
- int i, err = 0, empty = 1;
+ int i, empty = 1;
pvt = mci->pvt_info;
- err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
- if (err)
- debugf0("Reading K8_NBCFG failed\n");
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
(pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2618,6 +2483,90 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
return empty;
}
+/* get all cores on this DCT */
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ if (amd_get_nb_id(cpu) == nid)
+ cpumask_set_cpu(cpu, mask);
+}
+
+/* check MCG_CTL on all the cpus on this node */
+static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+{
+ cpumask_var_t mask;
+ int cpu, nbe;
+ bool ret = false;
+
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+ __func__);
+ return false;
+ }
+
+ get_cpus_on_this_dct_cpumask(mask, nid);
+
+ rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
+
+ for_each_cpu(cpu, mask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ nbe = reg->l & K8_MSR_MCGCTL_NBE;
+
+ debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+ cpu, reg->q,
+ (nbe ? "enabled" : "disabled"));
+
+ if (!nbe)
+ goto out;
+ }
+ ret = true;
+
+out:
+ free_cpumask_var(mask);
+ return ret;
+}
+
+static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
+{
+ cpumask_var_t cmask;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
+ amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
+ __func__);
+ return false;
+ }
+
+ get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
+
+ rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+
+ for_each_cpu(cpu, cmask) {
+
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+
+ if (on) {
+ if (reg->l & K8_MSR_MCGCTL_NBE)
+ pvt->flags.ecc_report = 1;
+
+ reg->l |= K8_MSR_MCGCTL_NBE;
+ } else {
+ /*
+ * Turn off ECC reporting only when it was off before
+ */
+ if (!pvt->flags.ecc_report)
+ reg->l &= ~K8_MSR_MCGCTL_NBE;
+ }
+ }
+ wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
+
+ free_cpumask_var(cmask);
+
+ return 0;
+}
+
/*
* Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
* enable it.
@@ -2625,24 +2574,16 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
- const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
- int cpu, idx = 0, err = 0;
- struct msr msrs[cpumask_weight(cpumask)];
- u32 value;
- u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
if (!ecc_enable_override)
return;
- memset(msrs, 0, sizeof(msrs));
-
amd64_printk(KERN_WARNING,
"'ecc_enable_override' parameter is active, "
"Enabling AMD ECC hardware now: CAUTION\n");
- err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
- if (err)
- debugf0("Reading K8_NBCTL failed\n");
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
/* turn on UECCn and CECCEn bits */
pvt->old_nbctl = value & mask;
@@ -2651,20 +2592,11 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
value |= mask;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
- rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
+ if (amd64_toggle_ecc_err_reporting(pvt, ON))
+ amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
+ "MCGCTL!\n");
- for_each_cpu(cpu, cpumask) {
- if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
- set_bit(idx, &pvt->old_mcgctl);
-
- msrs[idx].l |= K8_MSR_MCGCTL_NBE;
- idx++;
- }
- wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
-
- err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
- if (err)
- debugf0("Reading K8_NBCFG failed\n");
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2679,9 +2611,7 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
value |= K8_NBCFG_ECC_ENABLE;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
- err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
- if (err)
- debugf0("Reading K8_NBCFG failed\n");
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
if (!(value & K8_NBCFG_ECC_ENABLE)) {
amd64_printk(KERN_WARNING,
@@ -2701,86 +2631,21 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
{
- const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id);
- int cpu, idx = 0, err = 0;
- struct msr msrs[cpumask_weight(cpumask)];
- u32 value;
- u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
if (!pvt->nbctl_mcgctl_saved)
return;
- memset(msrs, 0, sizeof(msrs));
-
- err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
- if (err)
- debugf0("Reading K8_NBCTL failed\n");
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
value &= ~mask;
value |= pvt->old_nbctl;
/* restore the NB Enable MCGCTL bit */
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
- rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
-
- for_each_cpu(cpu, cpumask) {
- msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
- msrs[idx].l |=
- test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
- idx++;
- }
-
- wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
-}
-
-/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
-{
- int cpu;
-
- for_each_online_cpu(cpu)
- if (amd_get_nb_id(cpu) == nid)
- cpumask_set_cpu(cpu, mask);
-}
-
-/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
-{
- cpumask_t mask;
- struct msr *msrs;
- int cpu, nbe, idx = 0;
- bool ret = false;
-
- cpumask_clear(&mask);
-
- get_cpus_on_this_dct_cpumask(&mask, nid);
-
- msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
- if (!msrs) {
- amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
- __func__);
- return false;
- }
-
- rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
-
- for_each_cpu(cpu, &mask) {
- nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
-
- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, msrs[idx].q,
- (nbe ? "enabled" : "disabled"));
-
- if (!nbe)
- goto out;
-
- idx++;
- }
- ret = true;
-
-out:
- kfree(msrs);
- return ret;
+ if (amd64_toggle_ecc_err_reporting(pvt, OFF))
+ amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
+ "MCGCTL!\n");
}
/*
@@ -2797,13 +2662,10 @@ static const char *ecc_warning =
static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
{
u32 value;
- int err = 0;
u8 ecc_enabled = 0;
bool nb_mce_en = false;
- err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
- if (err)
- debugf0("Reading K8_NBCTL failed\n");
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
if (!ecc_enabled)
@@ -2909,7 +2771,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->mc_type_index = mc_type_index;
pvt->ops = family_ops(mc_type_index);
- pvt->old_mcgctl = 0;
/*
* We have the dram_f2_ctl device as an argument, now go reserve its
@@ -2959,17 +2820,10 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
{
int node_id = pvt->mc_node_id;
struct mem_ctl_info *mci;
- int ret, err = 0;
+ int ret = -ENODEV;
amd64_read_mc_registers(pvt);
- ret = -ENODEV;
- if (pvt->ops->probe_valid_hardware) {
- err = pvt->ops->probe_valid_hardware(pvt);
- if (err)
- goto err_exit;
- }
-
/*
* We need to determine how many memory channels there are. Then use
* that information for calculating the size of the dynamic instance
@@ -3165,6 +3019,8 @@ static int __init amd64_edac_init(void)
if (cache_k8_northbridges() < 0)
return err;
+ msrs = msrs_alloc();
+
err = pci_register_driver(&amd64_pci_driver);
if (err)
return err;
@@ -3200,6 +3056,9 @@ static void __exit amd64_edac_exit(void)
edac_pci_release_generic_ctl(amd64_ctl_pci);
pci_unregister_driver(&amd64_pci_driver);
+
+ msrs_free(msrs);
+ msrs = NULL;
}
module_init(amd64_edac_init);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c6f359a8520..41bc561e598 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -129,24 +129,22 @@
* sections 3.5.4 and 3.5.5 for more information.
*/
-#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
+#define EDAC_AMD64_VERSION " Ver: 3.3.0 " __DATE__
#define EDAC_MOD_STR "amd64_edac"
#define EDAC_MAX_NUMNODES 8
/* Extended Model from CPUID, for CPU Revision numbers */
-#define OPTERON_CPU_LE_REV_C 0
-#define OPTERON_CPU_REV_D 1
-#define OPTERON_CPU_REV_E 2
-
-/* NPT processors have the following Extended Models */
-#define OPTERON_CPU_REV_F 4
-#define OPTERON_CPU_REV_FA 5
+#define K8_REV_D 1
+#define K8_REV_E 2
+#define K8_REV_F 4
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define MAX_CS_COUNT 8
#define DRAM_REG_COUNT 8
+#define ON true
+#define OFF false
/*
* PCI-defined configuration space registers
@@ -241,7 +239,7 @@
#define F10_DCHR_1 0x194
#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
-#define F10_DCHR_Ddr3Mode BIT(8)
+#define DDR3_MODE BIT(8)
#define F10_DCHR_MblMode BIT(6)
@@ -382,14 +380,9 @@ enum {
#define K8_NBCAP_CORES (BIT(12)|BIT(13))
#define K8_NBCAP_CHIPKILL BIT(4)
#define K8_NBCAP_SECDED BIT(3)
-#define K8_NBCAP_8_NODE BIT(2)
-#define K8_NBCAP_DUAL_NODE BIT(1)
#define K8_NBCAP_DCT_DUAL BIT(0)
-/*
- * MSR Regs
- */
-#define K8_MSR_MCGCTL 0x017b
+/* MSRs */
#define K8_MSR_MCGCTL_NBE BIT(4)
#define K8_MSR_MC4CTL 0x0410
@@ -487,7 +480,6 @@ struct amd64_pvt {
/* Save old hw registers' values before we modified them */
u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
u32 old_nbctl;
- unsigned long old_mcgctl; /* per core on this node */
/* MC Type Index value: socket F vs Family 10h */
u32 mc_type_index;
@@ -495,6 +487,7 @@ struct amd64_pvt {
/* misc settings */
struct flags {
unsigned long cf8_extcfg:1;
+ unsigned long ecc_report:1;
} flags;
};
@@ -504,7 +497,6 @@ struct scrubrate {
};
extern struct scrubrate scrubrates[23];
-extern u32 revf_quad_ddr2_shift[16];
extern const char *tt_msgs[4];
extern const char *ll_msgs[4];
extern const char *rrrr_msgs[16];
@@ -534,17 +526,15 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
* functions and per device encoding/decoding logic.
*/
struct low_ops {
- int (*probe_valid_hardware)(struct amd64_pvt *pvt);
- int (*early_channel_count)(struct amd64_pvt *pvt);
-
- u64 (*get_error_address)(struct mem_ctl_info *mci,
- struct err_regs *info);
- void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram);
- void (*read_dram_ctl_register)(struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci,
- struct err_regs *info,
- u64 SystemAddr);
- int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
+ int (*early_channel_count) (struct amd64_pvt *pvt);
+
+ u64 (*get_error_address) (struct mem_ctl_info *mci,
+ struct err_regs *info);
+ void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
+ void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
+ void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
+ struct err_regs *info, u64 SystemAddr);
+ int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
};
struct amd64_family_type {
@@ -566,6 +556,22 @@ static inline struct low_ops *family_ops(int index)
return &amd64_family_types[index].ops;
}
+static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 *val, const char *func)
+{
+ int err = 0;
+
+ err = pci_read_config_dword(pdev, offset, val);
+ if (err)
+ amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+#define amd64_read_pci_cfg(pdev, offset, val) \
+ amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
+
/*
* For future CPU versions, verify the following as new 'slow' rates appear and
* modify the necessary skip values for the supported CPU.
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 12f355cafdb..001b2e797fb 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -74,6 +74,7 @@
#ifdef CONFIG_EDAC_DEBUG
extern int edac_debug_level;
+extern const char *edac_mem_types[];
#ifndef CONFIG_EDAC_DEBUG_VERBOSE
#define edac_debug_printk(level, fmt, arg...) \
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index b629c41756f..3630308e7b8 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -76,6 +76,30 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
}
+/*
+ * keep those in sync with the enum mem_type
+ */
+const char *edac_mem_types[] = {
+ "Empty csrow",
+ "Reserved csrow type",
+ "Unknown csrow type",
+ "Fast page mode RAM",
+ "Extended data out RAM",
+ "Burst Extended data out RAM",
+ "Single data rate SDRAM",
+ "Registered single data rate SDRAM",
+ "Double data rate SDRAM",
+ "Registered Double data rate SDRAM",
+ "Rambus DRAM",
+ "Unbuffered DDR2 RAM",
+ "Fully buffered DDR2",
+ "Registered DDR2 RAM",
+ "Rambus XDR",
+ "Unbuffered DDR3 RAM",
+ "Registered DDR3 RAM",
+};
+EXPORT_SYMBOL_GPL(edac_mem_types);
+
#endif /* CONFIG_EDAC_DEBUG */
/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index 689cc6a6214..8fc91a01962 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -299,6 +299,12 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
if (!handle_errors)
return;
+ /*
+ * GART TLB error reporting is disabled by default. Bail out early.
+ */
+ if (TLB_ERROR(ec) && !report_gart_errors)
+ return;
+
pr_emerg(" Northbridge Error, node %d", node_id);
/*
@@ -306,14 +312,13 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
* value encoding has changed so interpret those differently
*/
if ((boot_cpu_data.x86 == 0x10) &&
- (boot_cpu_data.x86_model > 8)) {
+ (boot_cpu_data.x86_model > 7)) {
if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
} else {
- pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
+ pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1));
}
-
pr_emerg("%s.\n", EXT_ERR_MSG(xec));
if (BUS_ERROR(ec) && nb_bus_decoder)
@@ -333,21 +338,6 @@ static void amd_decode_fr_mce(u64 mc5_status)
static inline void amd_decode_err_code(unsigned int ec)
{
if (TLB_ERROR(ec)) {
- /*
- * GART errors are intended to help graphics driver developers
- * to detect bad GART PTEs. It is recommended by AMD to disable
- * GART table walk error reporting by default[1] (currently
- * being disabled in mce_cpu_quirks()) and according to the
- * comment in mce_cpu_quirks(), such GART errors can be
- * incorrectly triggered. We may see these errors anyway and
- * unless requested by the user, they won't be reported.
- *
- * [1] section 13.10.1 on BIOS and Kernel Developers Guide for
- * AMD NPT family 0Fh processors
- */
- if (!report_gart_errors)
- return;
-
pr_emerg(" Transaction: %s, Cache Level %s\n",
TT_MSG(ec), LL_MSG(ec));
} else if (MEM_ERROR(ec)) {
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 22db05a67bf..7785d8ffa40 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -9,6 +9,11 @@
* Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
* http://download.intel.com/design/chipsets/datashts/318378.pdf
*
+ * The intel 5100 has two independent channels. EDAC core currently
+ * can not reflect this configuration so instead the chip-select
+ * rows for each respective channel are layed out one after another,
+ * the first half belonging to channel 0, the second half belonging
+ * to channel 1.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -25,6 +30,8 @@
/* device 16, func 1 */
#define I5100_MC 0x40 /* Memory Control Register */
+#define I5100_MC_SCRBEN_MASK (1 << 7)
+#define I5100_MC_SCRBDONE_MASK (1 << 4)
#define I5100_MS 0x44 /* Memory Status Register */
#define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */
#define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */
@@ -72,11 +79,21 @@
/* bit field accessors */
+static inline u32 i5100_mc_scrben(u32 mc)
+{
+ return mc >> 7 & 1;
+}
+
static inline u32 i5100_mc_errdeten(u32 mc)
{
return mc >> 5 & 1;
}
+static inline u32 i5100_mc_scrbdone(u32 mc)
+{
+ return mc >> 4 & 1;
+}
+
static inline u16 i5100_spddata_rdo(u16 a)
{
return a >> 15 & 1;
@@ -265,42 +282,43 @@ static inline u32 i5100_recmemb_ras(u32 a)
}
/* some generic limits */
-#define I5100_MAX_RANKS_PER_CTLR 6
-#define I5100_MAX_CTLRS 2
+#define I5100_MAX_RANKS_PER_CHAN 6
+#define I5100_CHANNELS 2
#define I5100_MAX_RANKS_PER_DIMM 4
#define I5100_DIMM_ADDR_LINES (6 - 3) /* 64 bits / 8 bits per byte */
-#define I5100_MAX_DIMM_SLOTS_PER_CTLR 4
+#define I5100_MAX_DIMM_SLOTS_PER_CHAN 4
#define I5100_MAX_RANK_INTERLEAVE 4
#define I5100_MAX_DMIRS 5
+#define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ)
struct i5100_priv {
/* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
- int dimm_numrank[I5100_MAX_CTLRS][I5100_MAX_DIMM_SLOTS_PER_CTLR];
+ int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
/*
* mainboard chip select map -- maps i5100 chip selects to
* DIMM slot chip selects. In the case of only 4 ranks per
- * controller, the mapping is fairly obvious but not unique.
- * we map -1 -> NC and assume both controllers use the same
+ * channel, the mapping is fairly obvious but not unique.
+ * we map -1 -> NC and assume both channels use the same
* map...
*
*/
- int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CTLR][I5100_MAX_RANKS_PER_DIMM];
+ int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
/* memory interleave range */
struct {
u64 limit;
unsigned way[2];
- } mir[I5100_MAX_CTLRS];
+ } mir[I5100_CHANNELS];
/* adjusted memory interleave range register */
- unsigned amir[I5100_MAX_CTLRS];
+ unsigned amir[I5100_CHANNELS];
/* dimm interleave range */
struct {
unsigned rank[I5100_MAX_RANK_INTERLEAVE];
u64 limit;
- } dmir[I5100_MAX_CTLRS][I5100_MAX_DMIRS];
+ } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
/* memory technology registers... */
struct {
@@ -310,30 +328,33 @@ struct i5100_priv {
unsigned numbank; /* 2 or 3 lines */
unsigned numrow; /* 13 .. 16 lines */
unsigned numcol; /* 11 .. 12 lines */
- } mtr[I5100_MAX_CTLRS][I5100_MAX_RANKS_PER_CTLR];
+ } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
u64 tolm; /* top of low memory in bytes */
- unsigned ranksperctlr; /* number of ranks per controller */
+ unsigned ranksperchan; /* number of ranks per channel */
struct pci_dev *mc; /* device 16 func 1 */
struct pci_dev *ch0mm; /* device 21 func 0 */
struct pci_dev *ch1mm; /* device 22 func 0 */
+
+ struct delayed_work i5100_scrubbing;
+ int scrub_enable;
};
-/* map a rank/ctlr to a slot number on the mainboard */
+/* map a rank/chan to a slot number on the mainboard */
static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
- int ctlr, int rank)
+ int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
- const int numrank = priv->dimm_numrank[ctlr][i];
+ const int numrank = priv->dimm_numrank[chan][i];
for (j = 0; j < numrank; j++)
if (priv->dimm_csmap[i][j] == rank)
- return i * 2 + ctlr;
+ return i * 2 + chan;
}
return -1;
@@ -374,32 +395,32 @@ static const char *i5100_err_msg(unsigned err)
return "none";
}
-/* convert csrow index into a rank (per controller -- 0..5) */
+/* convert csrow index into a rank (per channel -- 0..5) */
static int i5100_csrow_to_rank(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow % priv->ranksperctlr;
+ return csrow % priv->ranksperchan;
}
-/* convert csrow index into a controller (0..1) */
-static int i5100_csrow_to_cntlr(const struct mem_ctl_info *mci, int csrow)
+/* convert csrow index into a channel (0..1) */
+static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
{
const struct i5100_priv *priv = mci->pvt_info;
- return csrow / priv->ranksperctlr;
+ return csrow / priv->ranksperchan;
}
static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int ctlr, int rank)
+ int chan, int rank)
{
const struct i5100_priv *priv = mci->pvt_info;
- return ctlr * priv->ranksperctlr + rank;
+ return chan * priv->ranksperchan + rank;
}
static void i5100_handle_ce(struct mem_ctl_info *mci,
- int ctlr,
+ int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@@ -407,12 +428,12 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+ const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
- "CE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- ctlr, bank, rank, syndrome, cas, ras,
+ chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ce_count++;
@@ -421,7 +442,7 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
- int ctlr,
+ int chan,
unsigned bank,
unsigned rank,
unsigned long syndrome,
@@ -429,23 +450,23 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, ctlr, rank);
+ const int csrow = i5100_rank_to_csrow(mci, chan, rank);
printk(KERN_ERR
- "UE ctlr %d, bank %u, rank %u, syndrome 0x%lx, "
+ "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
"cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- ctlr, bank, rank, syndrome, cas, ras,
+ chan, bank, rank, syndrome, cas, ras,
csrow, mci->csrows[csrow].channels[0].label, msg);
mci->ue_count++;
mci->csrows[csrow].ue_count++;
}
-static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
+static void i5100_read_log(struct mem_ctl_info *mci, int chan,
u32 ferr, u32 nerr)
{
struct i5100_priv *priv = mci->pvt_info;
- struct pci_dev *pdev = (ctlr) ? priv->ch1mm : priv->ch0mm;
+ struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
u32 dw;
u32 dw2;
unsigned syndrome = 0;
@@ -484,7 +505,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
else
msg = i5100_err_msg(nerr);
- i5100_handle_ce(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
if (i5100_validlog_nrecmemvalid(dw)) {
@@ -506,7 +527,7 @@ static void i5100_read_log(struct mem_ctl_info *mci, int ctlr,
else
msg = i5100_err_msg(nerr);
- i5100_handle_ue(mci, ctlr, bank, rank, syndrome, cas, ras, msg);
+ i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
}
pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
@@ -534,6 +555,80 @@ static void i5100_check_error(struct mem_ctl_info *mci)
}
}
+/* The i5100 chipset will scrub the entire memory once, then
+ * set a done bit. Continuous scrubbing is achieved by enqueing
+ * delayed work to a workqueue, checking every few minutes if
+ * the scrubbing has completed and if so reinitiating it.
+ */
+
+static void i5100_refresh_scrubbing(struct work_struct *work)
+{
+ struct delayed_work *i5100_scrubbing = container_of(work,
+ struct delayed_work,
+ work);
+ struct i5100_priv *priv = container_of(i5100_scrubbing,
+ struct i5100_priv,
+ i5100_scrubbing);
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ if (priv->scrub_enable) {
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ if (i5100_mc_scrbdone(dw)) {
+ dw |= I5100_MC_SCRBEN_MASK;
+ pci_write_config_dword(priv->mc, I5100_MC, dw);
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+ }
+
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ }
+}
+/*
+ * The bandwidth is based on experimentation, feel free to refine it.
+ */
+static int i5100_set_scrub_rate(struct mem_ctl_info *mci,
+ u32 *bandwidth)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+ if (*bandwidth) {
+ priv->scrub_enable = 1;
+ dw |= I5100_MC_SCRBEN_MASK;
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ } else {
+ priv->scrub_enable = 0;
+ dw &= ~I5100_MC_SCRBEN_MASK;
+ cancel_delayed_work(&(priv->i5100_scrubbing));
+ }
+ pci_write_config_dword(priv->mc, I5100_MC, dw);
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ *bandwidth = 5900000 * i5100_mc_scrben(dw);
+
+ return 0;
+}
+
+static int i5100_get_scrub_rate(struct mem_ctl_info *mci,
+ u32 *bandwidth)
+{
+ struct i5100_priv *priv = mci->pvt_info;
+ u32 dw;
+
+ pci_read_config_dword(priv->mc, I5100_MC, &dw);
+
+ *bandwidth = 5900000 * i5100_mc_scrben(dw);
+
+ return 0;
+}
+
static struct pci_dev *pci_get_device_func(unsigned vendor,
unsigned device,
unsigned func)
@@ -557,19 +652,19 @@ static unsigned long __devinit i5100_npages(struct mem_ctl_info *mci,
int csrow)
{
struct i5100_priv *priv = mci->pvt_info;
- const unsigned ctlr_rank = i5100_csrow_to_rank(mci, csrow);
- const unsigned ctlr = i5100_csrow_to_cntlr(mci, csrow);
+ const unsigned chan_rank = i5100_csrow_to_rank(mci, csrow);
+ const unsigned chan = i5100_csrow_to_chan(mci, csrow);
unsigned addr_lines;
/* dimm present? */
- if (!priv->mtr[ctlr][ctlr_rank].present)
+ if (!priv->mtr[chan][chan_rank].present)
return 0ULL;
addr_lines =
I5100_DIMM_ADDR_LINES +
- priv->mtr[ctlr][ctlr_rank].numcol +
- priv->mtr[ctlr][ctlr_rank].numrow +
- priv->mtr[ctlr][ctlr_rank].numbank;
+ priv->mtr[chan][chan_rank].numcol +
+ priv->mtr[chan][chan_rank].numrow +
+ priv->mtr[chan][chan_rank].numbank;
return (unsigned long)
((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
@@ -581,11 +676,11 @@ static void __devinit i5100_init_mtr(struct mem_ctl_info *mci)
struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
int i;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
struct pci_dev *pdev = mms[i];
- for (j = 0; j < I5100_MAX_RANKS_PER_CTLR; j++) {
+ for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
const unsigned addr =
(j < 4) ? I5100_MTR_0 + j * 2 :
I5100_MTR_4 + (j - 4) * 2;
@@ -644,7 +739,6 @@ static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
* fill dimm chip select map
*
* FIXME:
- * o only valid for 4 ranks per controller
* o not the only way to may chip selects to dimm slots
* o investigate if there is some way to obtain this map from the bios
*/
@@ -653,9 +747,7 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
struct i5100_priv *priv = mci->pvt_info;
int i;
- WARN_ON(priv->ranksperctlr != 4);
-
- for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CTLR; i++) {
+ for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
int j;
for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
@@ -663,12 +755,21 @@ static void __devinit i5100_init_dimm_csmap(struct mem_ctl_info *mci)
}
/* only 2 chip selects per slot... */
- priv->dimm_csmap[0][0] = 0;
- priv->dimm_csmap[0][1] = 3;
- priv->dimm_csmap[1][0] = 1;
- priv->dimm_csmap[1][1] = 2;
- priv->dimm_csmap[2][0] = 2;
- priv->dimm_csmap[3][0] = 3;
+ if (priv->ranksperchan == 4) {
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 3;
+ priv->dimm_csmap[1][0] = 1;
+ priv->dimm_csmap[1][1] = 2;
+ priv->dimm_csmap[2][0] = 2;
+ priv->dimm_csmap[3][0] = 3;
+ } else {
+ priv->dimm_csmap[0][0] = 0;
+ priv->dimm_csmap[0][1] = 1;
+ priv->dimm_csmap[1][0] = 2;
+ priv->dimm_csmap[1][1] = 3;
+ priv->dimm_csmap[2][0] = 4;
+ priv->dimm_csmap[2][1] = 5;
+ }
}
static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
@@ -677,10 +778,10 @@ static void __devinit i5100_init_dimm_layout(struct pci_dev *pdev,
struct i5100_priv *priv = mci->pvt_info;
int i;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
- for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CTLR; j++) {
+ for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
u8 rank;
if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
@@ -720,7 +821,7 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
pci_read_config_word(pdev, I5100_AMIR_1, &w);
priv->amir[1] = w;
- for (i = 0; i < I5100_MAX_CTLRS; i++) {
+ for (i = 0; i < I5100_CHANNELS; i++) {
int j;
for (j = 0; j < 5; j++) {
@@ -747,7 +848,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
for (i = 0; i < mci->nr_csrows; i++) {
const unsigned long npages = i5100_npages(mci, i);
- const unsigned cntlr = i5100_csrow_to_cntlr(mci, i);
+ const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
if (!npages)
@@ -765,7 +866,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].grain = 32;
mci->csrows[i].csrow_idx = i;
mci->csrows[i].dtype =
- (priv->mtr[cntlr][rank].width == 4) ? DEV_X4 : DEV_X8;
+ (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
mci->csrows[i].ue_count = 0;
mci->csrows[i].ce_count = 0;
mci->csrows[i].mtype = MEM_RDDR2;
@@ -777,7 +878,7 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
mci->csrows[i].channels[0].csrow = mci->csrows + i;
snprintf(mci->csrows[i].channels[0].label,
sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, cntlr, rank));
+ "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
total_pages += npages;
}
@@ -815,13 +916,6 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
pci_read_config_dword(pdev, I5100_MS, &dw);
ranksperch = !!(dw & (1 << 8)) * 2 + 4;
- if (ranksperch != 4) {
- /* FIXME: get 6 ranks / controller to work - need hw... */
- printk(KERN_INFO "i5100_edac: unsupported configuration.\n");
- ret = -ENODEV;
- goto bail_pdev;
- }
-
/* enable error reporting... */
pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
@@ -864,11 +958,21 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->dev = &pdev->dev;
priv = mci->pvt_info;
- priv->ranksperctlr = ranksperch;
+ priv->ranksperchan = ranksperch;
priv->mc = pdev;
priv->ch0mm = ch0mm;
priv->ch1mm = ch1mm;
+ INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
+
+ /* If scrubbing was already enabled by the bios, start maintaining it */
+ pci_read_config_dword(pdev, I5100_MC, &dw);
+ if (i5100_mc_scrben(dw)) {
+ priv->scrub_enable = 1;
+ schedule_delayed_work(&(priv->i5100_scrubbing),
+ I5100_SCRUB_REFRESH_RATE);
+ }
+
i5100_init_dimm_layout(pdev, mci);
i5100_init_interleaving(pdev, mci);
@@ -882,6 +986,8 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
mci->ctl_page_to_phys = NULL;
mci->edac_check = i5100_check_error;
+ mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
+ mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
i5100_init_csrows(mci);
@@ -897,12 +1003,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
if (edac_mc_add_mc(mci)) {
ret = -ENODEV;
- goto bail_mc;
+ goto bail_scrub;
}
return ret;
-bail_mc:
+bail_scrub:
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
bail_disable_ch1:
@@ -935,6 +1043,10 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
return;
priv = mci->pvt_info;
+
+ priv->scrub_enable = 0;
+ cancel_delayed_work_sync(&(priv->i5100_scrubbing));
+
pci_disable_device(pdev);
pci_disable_device(priv->ch0mm);
pci_disable_device(priv->ch1mm);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index ae4556f0c0c..96768e16086 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2218,6 +2218,13 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
page = payload >> PAGE_SHIFT;
offset = payload & ~PAGE_MASK;
rest = p->payload_length;
+ /*
+ * The controllers I've tested have not worked correctly when
+ * second_req_count is zero. Rather than do something we know won't
+ * work, return an error
+ */
+ if (rest == 0)
+ return -EINVAL;
/* FIXME: make packet-per-buffer/dual-buffer a context option */
while (rest > 0) {
@@ -2271,7 +2278,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
- struct descriptor *d = NULL, *pd = NULL;
+ struct descriptor *d, *pd;
struct fw_iso_packet *p = packet;
dma_addr_t d_bus, page_bus;
u32 z, header_z, rest;
@@ -2309,8 +2316,9 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
rest = payload_per_buffer;
+ pd = d;
for (j = 1; j < z; j++) {
- pd = d + j;
+ pd++;
pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_MORE);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index ebb9e51deb0..1b03ba1d083 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -7,7 +7,7 @@ menu "Firmware Drivers"
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
- depends on !IA64
+ depends on X86
help
Say Y or M here if you want to enable BIOS Enhanced Disk Drive
Services real mode BIOS calls to determine which disk
@@ -28,7 +28,7 @@ config EDD_OFF
config FIRMWARE_MEMMAP
bool "Add firmware-provided memory map to sysfs" if EMBEDDED
- default (X86_64 || X86_32)
+ default X86
help
Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
That memory map is used for example by kexec to set up parameter area
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index b4704e150b2..b3a0cf57442 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -544,9 +544,12 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
{
rbu_data.entry_created = 0;
- if (!fw || !fw->size)
+ if (!fw)
return;
+ if (!fw->size)
+ goto out;
+
spin_lock(&rbu_data.lock);
if (!strcmp(image_type, "mono")) {
if (!img_update_realloc(fw->size))
@@ -568,6 +571,8 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
} else
pr_debug("invalid image type specified.\n");
spin_unlock(&rbu_data.lock);
+ out:
+ release_firmware(fw);
}
static ssize_t read_rbu_image_type(struct kobject *kobj,
@@ -615,7 +620,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj,
spin_unlock(&rbu_data.lock);
req_firm_rc = request_firmware_nowait(THIS_MODULE,
FW_ACTION_NOHOTPLUG, "dell_rbu",
- &rbu_device->dev, &context,
+ &rbu_device->dev, GFP_KERNEL, &context,
callbackfn_rbu);
if (req_firm_rc) {
printk(KERN_ERR
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 3a2ccb09e2f..31b983d9462 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -169,10 +169,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
if (!s)
return;
- sprintf(s,
- "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
- d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7],
- d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]);
+ sprintf(s, "%pUB", d);
dmi_ident[slot] = s;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2ad0128c63c..a019b49ecc9 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -174,6 +174,16 @@ config GPIO_ADP5520
comment "PCI GPIO expanders:"
+config GPIO_CS5535
+ tristate "AMD CS5535/CS5536 GPIO support"
+ depends on PCI && !CS5535_GPIO
+ help
+ The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
+ can be used for quite a number of things. The CS5535/6 is found on
+ AMD Geode and Lemote Yeeloong devices.
+
+ If unsure, say N.
+
config GPIO_BT8XX
tristate "BT8XX GPIO abuser"
depends on PCI && VIDEO_BT848=n
@@ -196,6 +206,12 @@ config GPIO_LANGWELL
help
Say Y here to support Intel Moorestown platform GPIO.
+config GPIO_TIMBERDALE
+ bool "Support for timberdale GPIO IP"
+ depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
+ ---help---
+ Add support for the GPIO IP in the timberdale FPGA.
+
comment "SPI GPIO expanders:"
config GPIO_MAX7301
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 00a532c9a1e..52fe4cf734c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -13,9 +13,11 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
obj-$(CONFIG_GPIO_PL061) += pl061.o
+obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
+obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
diff --git a/drivers/gpio/adp5520-gpio.c b/drivers/gpio/adp5520-gpio.c
index ad05bbc7ffd..0f93105873c 100644
--- a/drivers/gpio/adp5520-gpio.c
+++ b/drivers/gpio/adp5520-gpio.c
@@ -34,9 +34,9 @@ static int adp5520_gpio_get_value(struct gpio_chip *chip, unsigned off)
*/
if (test_bit(off, &dev->output))
- adp5520_read(dev->master, GPIO_OUT, &reg_val);
+ adp5520_read(dev->master, ADP5520_GPIO_OUT, &reg_val);
else
- adp5520_read(dev->master, GPIO_IN, &reg_val);
+ adp5520_read(dev->master, ADP5520_GPIO_IN, &reg_val);
return !!(reg_val & dev->lut[off]);
}
@@ -48,9 +48,9 @@ static void adp5520_gpio_set_value(struct gpio_chip *chip,
dev = container_of(chip, struct adp5520_gpio, gpio_chip);
if (val)
- adp5520_set_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ adp5520_set_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
else
- adp5520_clr_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
}
static int adp5520_gpio_direction_input(struct gpio_chip *chip, unsigned off)
@@ -60,7 +60,8 @@ static int adp5520_gpio_direction_input(struct gpio_chip *chip, unsigned off)
clear_bit(off, &dev->output);
- return adp5520_clr_bits(dev->master, GPIO_CFG_2, dev->lut[off]);
+ return adp5520_clr_bits(dev->master, ADP5520_GPIO_CFG_2,
+ dev->lut[off]);
}
static int adp5520_gpio_direction_output(struct gpio_chip *chip,
@@ -73,18 +74,21 @@ static int adp5520_gpio_direction_output(struct gpio_chip *chip,
set_bit(off, &dev->output);
if (val)
- ret |= adp5520_set_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
else
- ret |= adp5520_clr_bits(dev->master, GPIO_OUT, dev->lut[off]);
+ ret |= adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
- ret |= adp5520_set_bits(dev->master, GPIO_CFG_2, dev->lut[off]);
+ ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_CFG_2,
+ dev->lut[off]);
return ret;
}
static int __devinit adp5520_gpio_probe(struct platform_device *pdev)
{
- struct adp5520_gpio_platfrom_data *pdata = pdev->dev.platform_data;
+ struct adp5520_gpio_platform_data *pdata = pdev->dev.platform_data;
struct adp5520_gpio *dev;
struct gpio_chip *gc;
int ret, i, gpios;
@@ -129,20 +133,20 @@ static int __devinit adp5520_gpio_probe(struct platform_device *pdev)
gc->label = pdev->name;
gc->owner = THIS_MODULE;
- ret = adp5520_clr_bits(dev->master, GPIO_CFG_1,
+ ret = adp5520_clr_bits(dev->master, ADP5520_GPIO_CFG_1,
pdata->gpio_en_mask);
- if (pdata->gpio_en_mask & GPIO_C3)
- ctl_mask |= C3_MODE;
+ if (pdata->gpio_en_mask & ADP5520_GPIO_C3)
+ ctl_mask |= ADP5520_C3_MODE;
- if (pdata->gpio_en_mask & GPIO_R3)
- ctl_mask |= R3_MODE;
+ if (pdata->gpio_en_mask & ADP5520_GPIO_R3)
+ ctl_mask |= ADP5520_R3_MODE;
if (ctl_mask)
- ret = adp5520_set_bits(dev->master, LED_CONTROL,
+ ret = adp5520_set_bits(dev->master, ADP5520_LED_CONTROL,
ctl_mask);
- ret |= adp5520_set_bits(dev->master, GPIO_PULLUP,
+ ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_PULLUP,
pdata->gpio_pullup_mask);
if (ret) {
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
new file mode 100644
index 00000000000..0fdbe94f24a
--- /dev/null
+++ b/drivers/gpio/cs5535-gpio.c
@@ -0,0 +1,355 @@
+/*
+ * AMD CS5535/CS5536 GPIO driver
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2007-2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/cs5535.h>
+
+#define DRV_NAME "cs5535-gpio"
+#define GPIO_BAR 1
+
+/*
+ * Some GPIO pins
+ * 31-29,23 : reserved (always mask out)
+ * 28 : Power Button
+ * 26 : PME#
+ * 22-16 : LPC
+ * 14,15 : SMBus
+ * 9,8 : UART1
+ * 7 : PCI INTB
+ * 3,4 : UART2/DDC
+ * 2 : IDE_IRQ0
+ * 1 : AC_BEEP
+ * 0 : PCI INTA
+ *
+ * If a mask was not specified, allow all except
+ * reserved and Power Button
+ */
+#define GPIO_DEFAULT_MASK 0x0F7FFFFF
+
+static ulong mask = GPIO_DEFAULT_MASK;
+module_param_named(mask, mask, ulong, 0444);
+MODULE_PARM_DESC(mask, "GPIO channel mask.");
+
+static struct cs5535_gpio_chip {
+ struct gpio_chip chip;
+ resource_size_t base;
+
+ struct pci_dev *pdev;
+ spinlock_t lock;
+} cs5535_gpio_chip;
+
+/*
+ * The CS5535/CS5536 GPIOs support a number of extra features not defined
+ * by the gpio_chip API, so these are exported. For a full list of the
+ * registers, see include/linux/cs5535.h.
+ */
+
+static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
+ unsigned int reg)
+{
+ if (offset < 16)
+ /* low bank register */
+ outl(1 << offset, chip->base + reg);
+ else
+ /* high bank register */
+ outl(1 << (offset - 16), chip->base + 0x80 + reg);
+}
+
+void cs5535_gpio_set(unsigned offset, unsigned int reg)
+{
+ struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ __cs5535_gpio_set(chip, offset, reg);
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_set);
+
+static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
+ unsigned int reg)
+{
+ if (offset < 16)
+ /* low bank register */
+ outl(1 << (offset + 16), chip->base + reg);
+ else
+ /* high bank register */
+ outl(1 << offset, chip->base + 0x80 + reg);
+}
+
+void cs5535_gpio_clear(unsigned offset, unsigned int reg)
+{
+ struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ __cs5535_gpio_clear(chip, offset, reg);
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_clear);
+
+int cs5535_gpio_isset(unsigned offset, unsigned int reg)
+{
+ struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+ unsigned long flags;
+ long val;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ if (offset < 16)
+ /* low bank register */
+ val = inl(chip->base + reg);
+ else {
+ /* high bank register */
+ val = inl(chip->base + 0x80 + reg);
+ offset -= 16;
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return (val & (1 << offset)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
+
+/*
+ * Generic gpio_chip API support.
+ */
+
+static int chip_gpio_request(struct gpio_chip *c, unsigned offset)
+{
+ struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+
+ /* check if this pin is available */
+ if ((mask & (1 << offset)) == 0) {
+ dev_info(&chip->pdev->dev,
+ "pin %u is not available (check mask)\n", offset);
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return -EINVAL;
+ }
+
+ /* disable output aux 1 & 2 on this pin */
+ __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX1);
+ __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX2);
+
+ /* disable input aux 1 on this pin */
+ __cs5535_gpio_clear(chip, offset, GPIO_INPUT_AUX1);
+
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return cs5535_gpio_isset(offset, GPIO_OUTPUT_VAL);
+}
+
+static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ if (val)
+ cs5535_gpio_set(offset, GPIO_OUTPUT_VAL);
+ else
+ cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL);
+}
+
+static int chip_direction_input(struct gpio_chip *c, unsigned offset)
+{
+ struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ __cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
+{
+ struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+
+ __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_ENABLE);
+ if (val)
+ __cs5535_gpio_set(chip, offset, GPIO_OUTPUT_VAL);
+ else
+ __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_VAL);
+
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
+}
+
+static char *cs5535_gpio_names[] = {
+ "GPIO0", "GPIO1", "GPIO2", "GPIO3",
+ "GPIO4", "GPIO5", "GPIO6", "GPIO7",
+ "GPIO8", "GPIO9", "GPIO10", "GPIO11",
+ "GPIO12", "GPIO13", "GPIO14", "GPIO15",
+ "GPIO16", "GPIO17", "GPIO18", "GPIO19",
+ "GPIO20", "GPIO21", "GPIO22", NULL,
+ "GPIO24", "GPIO25", "GPIO26", "GPIO27",
+ "GPIO28", NULL, NULL, NULL,
+};
+
+static struct cs5535_gpio_chip cs5535_gpio_chip = {
+ .chip = {
+ .owner = THIS_MODULE,
+ .label = DRV_NAME,
+
+ .base = 0,
+ .ngpio = 32,
+ .names = cs5535_gpio_names,
+ .request = chip_gpio_request,
+
+ .get = chip_gpio_get,
+ .set = chip_gpio_set,
+
+ .direction_input = chip_direction_input,
+ .direction_output = chip_direction_output,
+ },
+};
+
+static int __init cs5535_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ int err;
+ ulong mask_orig = mask;
+
+ /* There are two ways to get the GPIO base address; one is by
+ * fetching it from MSR_LBAR_GPIO, the other is by reading the
+ * PCI BAR info. The latter method is easier (especially across
+ * different architectures), so we'll stick with that for now. If
+ * it turns out to be unreliable in the face of crappy BIOSes, we
+ * can always go back to using MSRs.. */
+
+ err = pci_enable_device_io(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "can't enable device IO\n");
+ goto done;
+ }
+
+ err = pci_request_region(pdev, GPIO_BAR, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
+ goto done;
+ }
+
+ /* set up the driver-specific struct */
+ cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR);
+ cs5535_gpio_chip.pdev = pdev;
+ spin_lock_init(&cs5535_gpio_chip.lock);
+
+ dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR,
+ (unsigned long long) cs5535_gpio_chip.base);
+
+ /* mask out reserved pins */
+ mask &= 0x1F7FFFFF;
+
+ /* do not allow pin 28, Power Button, as there's special handling
+ * in the PMC needed. (note 12, p. 48) */
+ mask &= ~(1 << 28);
+
+ if (mask_orig != mask)
+ dev_info(&pdev->dev, "mask changed from 0x%08lX to 0x%08lX\n",
+ mask_orig, mask);
+
+ /* finally, register with the generic GPIO API */
+ err = gpiochip_add(&cs5535_gpio_chip.chip);
+ if (err)
+ goto release_region;
+
+ dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n");
+ return 0;
+
+release_region:
+ pci_release_region(pdev, GPIO_BAR);
+done:
+ return err;
+}
+
+static void __exit cs5535_gpio_remove(struct pci_dev *pdev)
+{
+ int err;
+
+ err = gpiochip_remove(&cs5535_gpio_chip.chip);
+ if (err) {
+ /* uhh? */
+ dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
+ }
+ pci_release_region(pdev, GPIO_BAR);
+}
+
+static struct pci_device_id cs5535_gpio_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl);
+
+/*
+ * We can't use the standard PCI driver registration stuff here, since
+ * that allows only one driver to bind to each PCI device (and we want
+ * multiple drivers to be able to bind to the device). Instead, manually
+ * scan for the PCI device, request a single region, and keep track of the
+ * devices that we're using.
+ */
+
+static int __init cs5535_gpio_scan_pci(void)
+{
+ struct pci_dev *pdev;
+ int err = -ENODEV;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) {
+ pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor,
+ cs5535_gpio_pci_tbl[i].device, NULL);
+ if (pdev) {
+ err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]);
+ if (err)
+ pci_dev_put(pdev);
+
+ /* we only support a single CS5535/6 southbridge */
+ break;
+ }
+ }
+
+ return err;
+}
+
+static void __exit cs5535_gpio_free_pci(void)
+{
+ cs5535_gpio_remove(cs5535_gpio_chip.pdev);
+ pci_dev_put(cs5535_gpio_chip.pdev);
+}
+
+static int __init cs5535_gpio_init(void)
+{
+ return cs5535_gpio_scan_pci();
+}
+
+static void __exit cs5535_gpio_exit(void)
+{
+ cs5535_gpio_free_pci();
+}
+
+module_init(cs5535_gpio_init);
+module_exit(cs5535_gpio_exit);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 50de0f5750d..a25ad284a27 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -53,6 +53,7 @@ struct gpio_desc {
#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
+#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
#define PDESC_ID_SHIFT 16 /* add new flags before this one */
@@ -210,6 +211,11 @@ static DEFINE_MUTEX(sysfs_lock);
* * configures behavior of poll(2) on /value
* * available only if pin can generate IRQs on input
* * is read/write as "none", "falling", "rising", or "both"
+ * /active_low
+ * * configures polarity of /value
+ * * is read/write as zero/nonzero
+ * * also affects existing and subsequent "falling" and "rising"
+ * /edge configuration
*/
static ssize_t gpio_direction_show(struct device *dev,
@@ -255,7 +261,7 @@ static ssize_t gpio_direction_store(struct device *dev,
return status ? : size;
}
-static const DEVICE_ATTR(direction, 0644,
+static /* const */ DEVICE_ATTR(direction, 0644,
gpio_direction_show, gpio_direction_store);
static ssize_t gpio_value_show(struct device *dev,
@@ -267,10 +273,17 @@ static ssize_t gpio_value_show(struct device *dev,
mutex_lock(&sysfs_lock);
- if (!test_bit(FLAG_EXPORT, &desc->flags))
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
- else
- status = sprintf(buf, "%d\n", !!gpio_get_value_cansleep(gpio));
+ } else {
+ int value;
+
+ value = !!gpio_get_value_cansleep(gpio);
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+
+ status = sprintf(buf, "%d\n", value);
+ }
mutex_unlock(&sysfs_lock);
return status;
@@ -294,6 +307,8 @@ static ssize_t gpio_value_store(struct device *dev,
status = strict_strtol(buf, 0, &value);
if (status == 0) {
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
gpio_set_value_cansleep(gpio, value != 0);
status = size;
}
@@ -303,7 +318,7 @@ static ssize_t gpio_value_store(struct device *dev,
return status;
}
-static /*const*/ DEVICE_ATTR(value, 0644,
+static const DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -352,9 +367,11 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
irq_flags = IRQF_SHARED;
if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
- irq_flags |= IRQF_TRIGGER_FALLING;
+ irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
- irq_flags |= IRQF_TRIGGER_RISING;
+ irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (!pdesc) {
pdesc = kmalloc(sizeof(*pdesc), GFP_KERNEL);
@@ -475,9 +492,79 @@ found:
static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
+static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
+ int value)
+{
+ int status = 0;
+
+ if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
+ return 0;
+
+ if (value)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ else
+ clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
+ /* reconfigure poll(2) support if enabled on one edge only */
+ if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
+ !!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
+ unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
+
+ gpio_setup_irq(desc, dev, 0);
+ status = gpio_setup_irq(desc, dev, trigger_flags);
+ }
+
+ return status;
+}
+
+static ssize_t gpio_active_low_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else
+ status = sprintf(buf, "%d\n",
+ !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
+
+ mutex_unlock(&sysfs_lock);
+
+ return status;
+}
+
+static ssize_t gpio_active_low_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
+ status = -EIO;
+ } else {
+ long value;
+
+ status = strict_strtol(buf, 0, &value);
+ if (status == 0)
+ status = sysfs_set_active_low(desc, dev, value != 0);
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+ return status ? : size;
+}
+
+static const DEVICE_ATTR(active_low, 0644,
+ gpio_active_low_show, gpio_active_low_store);
+
static const struct attribute *gpio_attrs[] = {
- &dev_attr_direction.attr,
&dev_attr_value.attr,
+ &dev_attr_active_low.attr,
NULL,
};
@@ -662,12 +749,12 @@ int gpio_export(unsigned gpio, bool direction_may_change)
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
desc, ioname ? ioname : "gpio%d", gpio);
if (!IS_ERR(dev)) {
- if (direction_may_change)
- status = sysfs_create_group(&dev->kobj,
+ status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
- else
+
+ if (!status && direction_may_change)
status = device_create_file(dev,
- &dev_attr_value);
+ &dev_attr_direction);
if (!status && gpio_to_irq(gpio) >= 0
&& (direction_may_change
@@ -744,6 +831,55 @@ done:
}
EXPORT_SYMBOL_GPL(gpio_export_link);
+
+/**
+ * gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
+ * @gpio: gpio to change
+ * @value: non-zero to use active low, i.e. inverted values
+ *
+ * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
+ * The GPIO does not have to be exported yet. If poll(2) support has
+ * been enabled for either rising or falling edge, it will be
+ * reconfigured to follow the new polarity.
+ *
+ * Returns zero on success, else an error.
+ */
+int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ struct gpio_desc *desc;
+ struct device *dev = NULL;
+ int status = -EINVAL;
+
+ if (!gpio_is_valid(gpio))
+ goto done;
+
+ mutex_lock(&sysfs_lock);
+
+ desc = &gpio_desc[gpio];
+
+ if (test_bit(FLAG_EXPORT, &desc->flags)) {
+ struct device *dev;
+
+ dev = class_find_device(&gpio_class, NULL, desc, match_export);
+ if (dev == NULL) {
+ status = -ENODEV;
+ goto unlock;
+ }
+ }
+
+ status = sysfs_set_active_low(desc, dev, value);
+
+unlock:
+ mutex_unlock(&sysfs_lock);
+
+done:
+ if (status)
+ pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
+
/**
* gpio_unexport - reverse effect of gpio_export()
* @gpio: gpio to make unavailable
@@ -1094,6 +1230,7 @@ void gpio_free(unsigned gpio)
}
desc_set_label(desc, NULL);
module_put(desc->chip->owner);
+ clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_REQUESTED, &desc->flags);
} else
WARN_ON(extra_checks);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 4baf3d7d0f8..6c0ebbdc659 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -123,7 +123,7 @@ static int lnw_irq_type(unsigned irq, unsigned type)
void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]);
void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]);
- if (gpio < 0 || gpio > lnw->chip.ngpio)
+ if (gpio >= lnw->chip.ngpio)
return -EINVAL;
spin_lock_irqsave(&lnw->lock, flags);
if (type & IRQ_TYPE_EDGE_RISING)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
new file mode 100644
index 00000000000..a4d344ba8e5
--- /dev/null
+++ b/drivers/gpio/timbgpio.c
@@ -0,0 +1,342 @@
+/*
+ * timbgpio.c timberdale FPGA GPIO driver
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA GPIO
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/timb_gpio.h>
+#include <linux/interrupt.h>
+
+#define DRIVER_NAME "timb-gpio"
+
+#define TGPIOVAL 0x00
+#define TGPIODIR 0x04
+#define TGPIO_IER 0x08
+#define TGPIO_ISR 0x0c
+#define TGPIO_IPR 0x10
+#define TGPIO_ICR 0x14
+#define TGPIO_FLR 0x18
+#define TGPIO_LVR 0x1c
+
+struct timbgpio {
+ void __iomem *membase;
+ spinlock_t lock; /* mutual exclusion */
+ struct gpio_chip gpio;
+ int irq_base;
+};
+
+static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
+ unsigned offset, bool enabled)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+ u32 reg;
+
+ spin_lock(&tgpio->lock);
+ reg = ioread32(tgpio->membase + offset);
+
+ if (enabled)
+ reg |= (1 << index);
+ else
+ reg &= ~(1 << index);
+
+ iowrite32(reg, tgpio->membase + offset);
+ spin_unlock(&tgpio->lock);
+
+ return 0;
+}
+
+static int timbgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ return timbgpio_update_bit(gpio, nr, TGPIODIR, true);
+}
+
+static int timbgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+ u32 value;
+
+ value = ioread32(tgpio->membase + TGPIOVAL);
+ return (value & (1 << nr)) ? 1 : 0;
+}
+
+static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
+ unsigned nr, int val)
+{
+ return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
+}
+
+static void timbgpio_gpio_set(struct gpio_chip *gpio,
+ unsigned nr, int val)
+{
+ timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
+}
+
+static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct timbgpio *tgpio = container_of(gpio, struct timbgpio, gpio);
+
+ if (tgpio->irq_base <= 0)
+ return -EINVAL;
+
+ return tgpio->irq_base + offset;
+}
+
+/*
+ * GPIO IRQ
+ */
+static void timbgpio_irq_disable(unsigned irq)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+
+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
+}
+
+static void timbgpio_irq_enable(unsigned irq)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+
+ timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
+}
+
+static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+{
+ struct timbgpio *tgpio = get_irq_chip_data(irq);
+ int offset = irq - tgpio->irq_base;
+ unsigned long flags;
+ u32 lvr, flr;
+
+ if (offset < 0 || offset > tgpio->gpio.ngpio)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tgpio->lock, flags);
+
+ lvr = ioread32(tgpio->membase + TGPIO_LVR);
+ flr = ioread32(tgpio->membase + TGPIO_FLR);
+
+ if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+ flr &= ~(1 << offset);
+ if (trigger & IRQ_TYPE_LEVEL_HIGH)
+ lvr |= 1 << offset;
+ else
+ lvr &= ~(1 << offset);
+ }
+
+ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+ else {
+ flr |= 1 << offset;
+ /* opposite compared to the datasheet, but it mirrors the
+ * reality
+ */
+ if (trigger & IRQ_TYPE_EDGE_FALLING)
+ lvr |= 1 << offset;
+ else
+ lvr &= ~(1 << offset);
+ }
+
+ iowrite32(lvr, tgpio->membase + TGPIO_LVR);
+ iowrite32(flr, tgpio->membase + TGPIO_FLR);
+ iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+
+ return 0;
+}
+
+static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct timbgpio *tgpio = get_irq_data(irq);
+ unsigned long ipr;
+ int offset;
+
+ desc->chip->ack(irq);
+ ipr = ioread32(tgpio->membase + TGPIO_IPR);
+ iowrite32(ipr, tgpio->membase + TGPIO_ICR);
+
+ for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
+ generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
+}
+
+static struct irq_chip timbgpio_irqchip = {
+ .name = "GPIO",
+ .enable = timbgpio_irq_enable,
+ .disable = timbgpio_irq_disable,
+ .set_type = timbgpio_irq_type,
+};
+
+static int __devinit timbgpio_probe(struct platform_device *pdev)
+{
+ int err, i;
+ struct gpio_chip *gc;
+ struct timbgpio *tgpio;
+ struct resource *iomem;
+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ int irq = platform_get_irq(pdev, 0);
+
+ if (!pdata || pdata->nr_pins > 32) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+
+ tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL);
+ if (!tgpio) {
+ err = -EINVAL;
+ goto err_mem;
+ }
+ tgpio->irq_base = pdata->irq_base;
+
+ spin_lock_init(&tgpio->lock);
+
+ if (!request_mem_region(iomem->start, resource_size(iomem),
+ DRIVER_NAME)) {
+ err = -EBUSY;
+ goto err_request;
+ }
+
+ tgpio->membase = ioremap(iomem->start, resource_size(iomem));
+ if (!tgpio->membase) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ gc = &tgpio->gpio;
+
+ gc->label = dev_name(&pdev->dev);
+ gc->owner = THIS_MODULE;
+ gc->dev = &pdev->dev;
+ gc->direction_input = timbgpio_gpio_direction_input;
+ gc->get = timbgpio_gpio_get;
+ gc->direction_output = timbgpio_gpio_direction_output;
+ gc->set = timbgpio_gpio_set;
+ gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
+ gc->dbg_show = NULL;
+ gc->base = pdata->gpio_base;
+ gc->ngpio = pdata->nr_pins;
+ gc->can_sleep = 0;
+
+ err = gpiochip_add(gc);
+ if (err)
+ goto err_chipadd;
+
+ platform_set_drvdata(pdev, tgpio);
+
+ /* make sure to disable interrupts */
+ iowrite32(0x0, tgpio->membase + TGPIO_IER);
+
+ if (irq < 0 || tgpio->irq_base <= 0)
+ return 0;
+
+ for (i = 0; i < pdata->nr_pins; i++) {
+ set_irq_chip_and_handler_name(tgpio->irq_base + i,
+ &timbgpio_irqchip, handle_simple_irq, "mux");
+ set_irq_chip_data(tgpio->irq_base + i, tgpio);
+#ifdef CONFIG_ARM
+ set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
+#endif
+ }
+
+ set_irq_data(irq, tgpio);
+ set_irq_chained_handler(irq, timbgpio_irq);
+
+ return 0;
+
+err_chipadd:
+ iounmap(tgpio->membase);
+err_ioremap:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_request:
+ kfree(tgpio);
+err_mem:
+ printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err);
+
+ return err;
+}
+
+static int __devexit timbgpio_remove(struct platform_device *pdev)
+{
+ int err;
+ struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timbgpio *tgpio = platform_get_drvdata(pdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0 && tgpio->irq_base > 0) {
+ int i;
+ for (i = 0; i < pdata->nr_pins; i++) {
+ set_irq_chip(tgpio->irq_base + i, NULL);
+ set_irq_chip_data(tgpio->irq_base + i, NULL);
+ }
+
+ set_irq_handler(irq, NULL);
+ set_irq_data(irq, NULL);
+ }
+
+ err = gpiochip_remove(&tgpio->gpio);
+ if (err)
+ printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
+
+ iounmap(tgpio->membase);
+ release_mem_region(iomem->start, resource_size(iomem));
+ kfree(tgpio);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver timbgpio_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = timbgpio_probe,
+ .remove = timbgpio_remove,
+};
+
+/*--------------------------------------------------------------------------*/
+
+static int __init timbgpio_init(void)
+{
+ return platform_driver_register(&timbgpio_platform_driver);
+}
+
+static void __exit timbgpio_exit(void)
+{
+ platform_driver_unregister(&timbgpio_platform_driver);
+}
+
+module_init(timbgpio_init);
+module_exit(timbgpio_exit);
+
+MODULE_DESCRIPTION("Timberdale GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mocean Laboratories");
+MODULE_ALIAS("platform:"DRIVER_NAME);
+
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c
index 49384a7c549..7fe881e2bdf 100644
--- a/drivers/gpio/twl4030-gpio.c
+++ b/drivers/gpio/twl4030-gpio.c
@@ -34,7 +34,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
@@ -80,7 +80,7 @@ static unsigned int gpio_usage_count;
*/
static inline int gpio_twl4030_write(u8 address, u8 data)
{
- return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
+ return twl_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
}
/*----------------------------------------------------------------------*/
@@ -117,7 +117,7 @@ static inline int gpio_twl4030_read(u8 address)
u8 data;
int ret = 0;
- ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
+ ret = twl_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
return (ret < 0) ? ret : data;
}
@@ -142,7 +142,7 @@ static void twl4030_led_set_value(int led, int value)
cached_leden &= ~mask;
else
cached_leden |= mask;
- status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+ status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
TWL4030_LED_LEDEN);
mutex_unlock(&gpio_lock);
}
@@ -223,23 +223,23 @@ static int twl_request(struct gpio_chip *chip, unsigned offset)
}
/* initialize PWM to always-drive */
- status = twl4030_i2c_write_u8(module, 0x7f,
+ status = twl_i2c_write_u8(module, 0x7f,
TWL4030_PWMx_PWMxOFF);
if (status < 0)
goto done;
- status = twl4030_i2c_write_u8(module, 0x7f,
+ status = twl_i2c_write_u8(module, 0x7f,
TWL4030_PWMx_PWMxON);
if (status < 0)
goto done;
/* init LED to not-driven (high) */
module = TWL4030_MODULE_LED;
- status = twl4030_i2c_read_u8(module, &cached_leden,
+ status = twl_i2c_read_u8(module, &cached_leden,
TWL4030_LED_LEDEN);
if (status < 0)
goto done;
cached_leden &= ~ledclr_mask;
- status = twl4030_i2c_write_u8(module, cached_leden,
+ status = twl_i2c_write_u8(module, cached_leden,
TWL4030_LED_LEDEN);
if (status < 0)
goto done;
@@ -370,7 +370,7 @@ static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs)
message[i] = bit_mask;
}
- return twl4030_i2c_write(TWL4030_MODULE_GPIO, message,
+ return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIOPUPDCTR1, 5);
}
@@ -387,7 +387,7 @@ static int __devinit gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
debounce >>= 8;
message[3] = (debounce & 0x03);
- return twl4030_i2c_write(TWL4030_MODULE_GPIO, message,
+ return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIO_DEBEN1, 3);
}
diff --git a/drivers/gpio/wm831x-gpio.c b/drivers/gpio/wm831x-gpio.c
index f9c09a54ec7..b4468b61689 100644
--- a/drivers/gpio/wm831x-gpio.c
+++ b/drivers/gpio/wm831x-gpio.c
@@ -22,8 +22,7 @@
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/gpio.h>
-
-#define WM831X_GPIO_MAX 16
+#include <linux/mfd/wm831x/irq.h>
struct wm831x_gpio {
struct wm831x *wm831x;
@@ -80,6 +79,17 @@ static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
value << offset);
}
+static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
+ struct wm831x *wm831x = wm831x_gpio->wm831x;
+
+ if (!wm831x->irq_base)
+ return -EINVAL;
+
+ return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
+}
+
#ifdef CONFIG_DEBUG_FS
static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
@@ -175,6 +185,7 @@ static struct gpio_chip template_chip = {
.get = wm831x_gpio_get,
.direction_output = wm831x_gpio_direction_out,
.set = wm831x_gpio_set,
+ .to_irq = wm831x_gpio_to_irq,
.dbg_show = wm831x_gpio_dbg_show,
.can_sleep = 1,
};
@@ -192,7 +203,7 @@ static int __devinit wm831x_gpio_probe(struct platform_device *pdev)
wm831x_gpio->wm831x = wm831x;
wm831x_gpio->gpio_chip = template_chip;
- wm831x_gpio->gpio_chip.ngpio = WM831X_GPIO_MAX;
+ wm831x_gpio->gpio_chip.ngpio = wm831x->num_gpio;
wm831x_gpio->gpio_chip.dev = &pdev->dev;
if (pdata && pdata->gpio_base)
wm831x_gpio->gpio_chip.base = pdata->gpio_base;
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 3c8827a7aab..39c5aa75b8f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm-$(CONFIG_COMPAT) += drm_ioc32.o
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -30,4 +30,7 @@ obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
+obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
+obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3f7c500b211..5124401f266 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+ { DRM_MODE_DIRTY_OFF, "Off" },
+ { DRM_MODE_DIRTY_ON, "On" },
+ { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+ drm_dirty_info_enum_list)
+
struct drm_conn_prop_enum_list {
int type;
char *name;
@@ -247,7 +256,8 @@ static void drm_mode_object_put(struct drm_device *dev,
mutex_unlock(&dev->mode_config.idr_mutex);
}
-void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
@@ -802,6 +812,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_dithering_property);
/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+ struct drm_property *dirty_info;
+ int i;
+
+ if (dev->mode_config.dirty_info_property)
+ return 0;
+
+ dirty_info =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM |
+ DRM_MODE_PROP_IMMUTABLE,
+ "dirty",
+ ARRAY_SIZE(drm_dirty_info_enum_list));
+ for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+ drm_property_add_enum(dirty_info, i,
+ drm_dirty_info_enum_list[i].type,
+ drm_dirty_info_enum_list[i].name);
+ dev->mode_config.dirty_info_property = dirty_info;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
+/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
@@ -1753,6 +1793,71 @@ out:
return ret;
}
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_clip_rect __user *clips_ptr;
+ struct drm_clip_rect *clips = NULL;
+ struct drm_mode_fb_dirty_cmd *r = data;
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb;
+ unsigned flags;
+ int num_clips;
+ int ret = 0;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_ERROR("invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out_err1;
+ }
+ fb = obj_to_fb(obj);
+
+ num_clips = r->num_clips;
+ clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+ if (!num_clips != !clips_ptr) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+ /* If userspace annotates copy, clips must come in pairs */
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+ ret = -EINVAL;
+ goto out_err1;
+ }
+
+ if (num_clips && clips_ptr) {
+ clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ if (!clips) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = copy_from_user(clips, clips_ptr,
+ num_clips * sizeof(*clips));
+ if (ret)
+ goto out_err2;
+ }
+
+ if (fb->funcs->dirty) {
+ ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+ } else {
+ ret = -ENOSYS;
+ goto out_err2;
+ }
+
+out_err2:
+ kfree(clips);
+out_err1:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+
/**
* drm_fb_release - remove and free the FBs on this file
* @filp: file * from the ioctl
@@ -2478,3 +2583,72 @@ out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_crtc_page_flip *page_flip = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_pending_vblank_event *e = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ page_flip->reserved != 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj)
+ goto out;
+ crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->page_flip == NULL)
+ goto out;
+
+ obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj)
+ goto out;
+ fb = obj_to_fb(obj);
+
+ if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ ret = -ENOMEM;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (file_priv->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+ file_priv->event_space -= sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ goto out;
+ }
+
+ e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = page_flip->user_data;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy =
+ (void (*) (struct drm_pending_event *)) kfree;
+ }
+
+ ret = crtc->funcs->page_flip(crtc, fb, e);
+ if (ret) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ file_priv->event_space += sizeof e->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index bbfd110a716..4231d6db72e 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
count = (*connector_funcs->get_modes)(connector);
if (!count) {
- count = drm_add_modes_noedid(connector, 800, 600);
+ count = drm_add_modes_noedid(connector, 1024, 768);
if (!count)
return 0;
}
@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
{
int count = 0;
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev,
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
index a63b6f57d2d..548887c8506 100644
--- a/drivers/gpu/drm/i915/intel_dp_i2c.c
+++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -28,84 +28,20 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
#include "drmP.h"
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-
-#define MODE_I2C_START 1
-#define MODE_I2C_WRITE 2
-#define MODE_I2C_READ 4
-#define MODE_I2C_STOP 8
-
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- uint16_t address = algo_data->address;
- uint8_t msg[5];
- uint8_t reply[2];
- int msg_bytes;
- int reply_bytes;
int ret;
-
- /* Set up the command byte */
- if (mode & MODE_I2C_READ)
- msg[0] = AUX_I2C_READ << 4;
- else
- msg[0] = AUX_I2C_WRITE << 4;
-
- if (!(mode & MODE_I2C_STOP))
- msg[0] |= AUX_I2C_MOT << 4;
-
- msg[1] = address >> 8;
- msg[2] = address;
-
- switch (mode) {
- case MODE_I2C_WRITE:
- msg[3] = 0;
- msg[4] = write_byte;
- msg_bytes = 5;
- reply_bytes = 1;
- break;
- case MODE_I2C_READ:
- msg[3] = 0;
- msg_bytes = 4;
- reply_bytes = 2;
- break;
- default:
- msg_bytes = 3;
- reply_bytes = 1;
- break;
- }
-
- for (;;) {
- ret = (*algo_data->aux_ch)(adapter,
- msg, msg_bytes,
- reply, reply_bytes);
- if (ret < 0) {
- DRM_DEBUG("aux_ch failed %d\n", ret);
- return ret;
- }
- switch (reply[0] & AUX_I2C_REPLY_MASK) {
- case AUX_I2C_REPLY_ACK:
- if (mode == MODE_I2C_READ) {
- *read_byte = reply[1];
- }
- return reply_bytes - 1;
- case AUX_I2C_REPLY_NACK:
- DRM_DEBUG("aux_ch nack\n");
- return -EREMOTEIO;
- case AUX_I2C_REPLY_DEFER:
- DRM_DEBUG("aux_ch defer\n");
- udelay(100);
- break;
- default:
- DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
- return -EREMOTEIO;
- }
- }
+
+ ret = (*algo_data->aux_ch)(adapter, mode,
+ write_byte, read_byte);
+ return ret;
}
/*
@@ -224,7 +160,7 @@ i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
if (ret >= 0)
ret = num;
i2c_algo_dp_aux_stop(adapter, reading);
- DRM_DEBUG("dp_aux_xfer return %d\n", ret);
+ DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a75ca63deea..766c46875a2 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -366,6 +368,29 @@ module_init(drm_core_init);
module_exit(drm_core_exit);
/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+ int len;
+
+ /* don't overflow userbuf */
+ len = strlen(value);
+ if (len > *buf_len)
+ len = *buf_len;
+
+ /* let userspace know exact length of driver value (which could be
+ * larger than the userspace-supplied buffer) */
+ *buf_len = strlen(value);
+
+ /* finally, try filling in the userbuf */
+ if (len && buf)
+ if (copy_to_user(buf, value, len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
* Get version information
*
* \param inode device inode.
@@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_version *version = data;
- int len;
+ int err;
version->version_major = dev->driver->major;
version->version_minor = dev->driver->minor;
version->version_patchlevel = dev->driver->patchlevel;
- DRM_COPY(version->name, dev->driver->name);
- DRM_COPY(version->date, dev->driver->date);
- DRM_COPY(version->desc, dev->driver->desc);
-
- return 0;
+ err = drm_copy_field(version->name, &version->name_len,
+ dev->driver->name);
+ if (!err)
+ err = drm_copy_field(version->date, &version->date_len,
+ dev->driver->date);
+ if (!err)
+ err = drm_copy_field(version->desc, &version->desc_len,
+ dev->driver->desc);
+
+ return err;
}
/**
@@ -404,11 +434,11 @@ static int drm_version(struct drm_device *dev, void *data,
* Looks up the ioctl function in the ::ioctls table, checking for root
* previleges if so required, and dispatches to the respective function.
*/
-int drm_ioctl(struct inode *inode, struct file *filp,
+long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
+ struct drm_device *dev;
struct drm_ioctl_desc *ioctl;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -416,6 +446,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
char stack_kdata[128];
char *kdata = NULL;
+ dev = file_priv->minor->dev;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
@@ -471,7 +502,13 @@ int drm_ioctl(struct inode *inode, struct file *filp,
goto err_i1;
}
}
- retcode = func(dev, kdata, file_priv);
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+ lock_kernel();
+ retcode = func(dev, kdata, file_priv);
+ unlock_kernel();
+ }
if (cmd & IOC_OUT) {
if (copy_to_user((void __user *)arg, kdata,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b54ba63d506..5c9f79877cb 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,18 +123,20 @@ static const u8 edid_header[] = {
*/
static bool edid_is_valid(struct edid *edid)
{
- int i;
+ int i, score = 0;
u8 csum = 0;
u8 *raw_edid = (u8 *)edid;
- if (memcmp(edid->header, edid_header, sizeof(edid_header)))
- goto bad;
- if (edid->version != 1) {
- DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ if (score == 8) ;
+ else if (score >= 6) {
+ DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+ memcpy(raw_edid, edid_header, sizeof(edid_header));
+ } else
goto bad;
- }
- if (edid->revision > 4)
- DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
goto bad;
}
+ if (edid->version != 1) {
+ DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+ goto bad;
+ }
+
+ if (edid->revision > 4)
+ DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+
return 1;
bad:
@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
+static const int drm_num_dmt_modes =
+ sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
- int i, count;
+ int i;
struct drm_display_mode *ptr, *mode;
- count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
mode = NULL;
- for (i = 0; i < count; i++) {
+ for (i = 0; i < drm_num_dmt_modes; i++) {
ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
@@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
return modes;
}
+/*
+ * XXX fix this for:
+ * - GTF secondary curve formula
+ * - EDID 1.4 range offsets
+ * - CVT extended bits
+ */
+static bool
+mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+{
+ struct detailed_data_monitor_range *range;
+ int hsync, vrefresh;
+
+ range = &timing->data.other_data.data.range;
+
+ hsync = drm_mode_hsync(mode);
+ vrefresh = drm_mode_vrefresh(mode);
+
+ if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+ return false;
+
+ if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+ return false;
+
+ if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
+ /* be forgiving since it's in units of 10MHz */
+ int max_clock = range->pixel_clock_mhz * 10 + 9;
+ max_clock *= 1000;
+ if (mode->clock > max_clock)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int drm_gtf_modes_for_range(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ for (i = 0; i < drm_num_dmt_modes; i++) {
+ if (mode_in_range(drm_dmt_modes + i, timing)) {
+ newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+
+ for (i = 0; i < 4; i++) {
+ int uninitialized_var(width), height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
+ switch (cvt->code[1] & 0xc0) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x40:
+ width = height * 16 / 9;
+ break;
+ case 0x80:
+ width = height * 16 / 10;
+ break;
+ case 0xc0:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int add_detailed_modes(struct drm_connector *connector,
+ struct detailed_timing *timing,
+ struct edid *edid, u32 quirks, int preferred)
+{
+ int i, modes = 0;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ int timing_level = standard_timing_level(edid);
+ int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(dev, edid, timing, quirks);
+ if (!newmode)
+ return 0;
+
+ if (preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, newmode);
+ return 1;
+ }
+
+ /* other timing types */
+ switch (data->type) {
+ case EDID_DETAIL_MONITOR_RANGE:
+ if (gtf)
+ modes += drm_gtf_modes_for_range(connector, timing);
+ break;
+ case EDID_DETAIL_STD_MODES:
+ /* Six modes per detailed section */
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std;
+ struct drm_display_mode *newmode;
+
+ std = &data->data.timings[i];
+ newmode = drm_mode_std(dev, std, edid->revision,
+ timing_level);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ break;
+ case EDID_DETAIL_CVT_3BYTE:
+ modes += drm_cvt_modes(connector, timing);
+ break;
+ default:
+ break;
+ }
+
+ return modes;
+}
+
/**
- * add_detailed_modes - get detailed mode info from EDID data
+ * add_detailed_info - get detailed mode info from EDID data
* @connector: attached connector
* @edid: EDID block to scan
* @quirks: quirks to apply
@@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
static int add_detailed_info(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
- int timing_level;
-
- timing_level = standard_timing_level(edid);
+ int i, modes = 0;
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
struct detailed_timing *timing = &edid->detailed_timings[i];
- struct detailed_non_pixel *data = &timing->data.other_data;
- struct drm_display_mode *newmode;
-
- /* X server check is version 1.1 or higher */
- if (edid->version == 1 && edid->revision >= 1 &&
- !timing->pixel_clock) {
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- for (j = 0; j < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std,
- edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
- }
- } else {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- continue;
+ int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
- /* First detailed mode is preferred */
- if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, newmode);
+ /* In 1.0, only timings are allowed */
+ if (!timing->pixel_clock && edid->version == 1 &&
+ edid->revision == 0)
+ continue;
- modes++;
- }
+ modes += add_detailed_modes(connector, timing, edid, quirks,
+ preferred);
}
return modes;
}
+
/**
* add_detailed_mode_eedid - get detailed mode info from addtional timing
* EDID block
@@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector,
static int add_detailed_info_eedid(struct drm_connector *connector,
struct edid *edid, u32 quirks)
{
- struct drm_device *dev = connector->dev;
- int i, j, modes = 0;
+ int i, modes = 0;
char *edid_ext = NULL;
struct detailed_timing *timing;
- struct detailed_non_pixel *data;
- struct drm_display_mode *newmode;
int edid_ext_num;
int start_offset, end_offset;
int timing_level;
@@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
for (i = start_offset; i < end_offset;
i += sizeof(struct detailed_timing)) {
timing = (struct detailed_timing *)(edid_ext + i);
- data = &timing->data.other_data;
- /* Detailed mode timing */
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- continue;
-
- drm_mode_probed_add(connector, newmode);
-
- modes++;
- continue;
- }
-
- /* Other timing or info */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_SERIAL:
- break;
- case EDID_DETAIL_MONITOR_STRING:
- break;
- case EDID_DETAIL_MONITOR_RANGE:
- /* Get monitor range data */
- break;
- case EDID_DETAIL_MONITOR_NAME:
- break;
- case EDID_DETAIL_MONITOR_CPDATA:
- break;
- case EDID_DETAIL_STD_MODES:
- /* Five modes per detailed section */
- for (j = 0; j < 5; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
-
- std = &data->data.timings[j];
- newmode = drm_mode_std(dev, std,
- edid->revision,
- timing_level);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- break;
- default:
- break;
- }
+ modes += add_detailed_modes(connector, timing, edid, quirks, 0);
}
return modes;
@@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
struct i2c_adapter *adapter,
char *buf, int len)
{
- int ret;
+ int i;
- ret = drm_do_probe_ddc_edid(adapter, buf, len);
- if (ret != 0) {
- goto end;
- }
- if (!edid_is_valid((struct edid *)buf)) {
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
- drm_get_connector_name(connector));
- ret = -1;
+ for (i = 0; i < 4; i++) {
+ if (drm_do_probe_ddc_edid(adapter, buf, len))
+ return -1;
+ if (edid_is_valid((struct edid *)buf))
+ return 0;
}
-end:
- return ret;
+
+ /* repeated checksum failures; warn, but carry on */
+ dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+ drm_get_connector_name(connector));
+ return -1;
}
/**
@@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
ptr->vdisplay > vdisplay)
continue;
}
+ if (drm_mode_vrefresh(ptr) > 61)
+ continue;
mode = drm_mode_duplicate(dev, ptr);
if (mode) {
drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 65ef011fa8b..1b49fa055f4 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
mutex_unlock(&dev->mode_config.mutex);
}
}
- if (dpms_mode == DRM_MODE_DPMS_OFF) {
- mutex_lock(&dev->mode_config.mutex);
- crtc_funcs->dpms(crtc, dpms_mode);
- mutex_unlock(&dev->mode_config.mutex);
- }
+ mutex_lock(&dev->mode_config.mutex);
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&dev->mode_config.mutex);
}
}
}
@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
+ /* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
drm_fb_helper_on(info);
break;
+ /* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
- drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
break;
+ /* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
break;
+ /* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
break;
+ /* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
break;
@@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
if (new_fb) {
info->var.pixclock = 0;
- if (register_framebuffer(info) < 0)
+ ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
+ if (ret)
+ return ret;
+ if (register_framebuffer(info) < 0) {
+ fb_dealloc_cmap(&info->cmap);
return -EINVAL;
+ }
} else {
drm_fb_helper_set_par(info);
}
@@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
drm_fb_helper_crtc_free(helper);
+ fb_dealloc_cmap(&helper->fb->fbdev->cmap);
}
EXPORT_SYMBOL(drm_fb_helper_free);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 251bc0e3b5e..08d14df3bb4 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
+ INIT_LIST_HEAD(&priv->event_list);
+ init_waitqueue_head(&priv->event_wait);
+ priv->event_space = 4096; /* set aside 4k for event buffer */
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
@@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
}
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, priv, true);
+ if (ret) {
+ /* drop both references if this fails */
+ drm_master_put(&priv->minor->master);
+ drm_master_put(&priv->master);
+ mutex_unlock(&dev->struct_mutex);
+ goto out_free;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
} else {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
@@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
}
}
+static void drm_events_release(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e, *et;
+ struct drm_pending_vblank_event *v, *vt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* Remove pending flips */
+ list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+ if (v->base.file_priv == file_priv) {
+ list_del(&v->base.link);
+ drm_vblank_put(dev, v->pipe);
+ v->base.destroy(&v->base);
+ }
+
+ /* Remove unconsumed events */
+ list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+ e->destroy(e);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* Release file.
*
@@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->minor->master)
drm_master_release(dev, filp);
+ drm_events_release(file_priv);
+
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
@@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, true);
drm_master_put(&file_priv->minor->master);
}
}
@@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
-/** No-op. */
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+ size_t total, size_t max, struct drm_pending_event **out)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_pending_event *e;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ *out = NULL;
+ if (list_empty(&file_priv->event_list))
+ goto out;
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length + total > max)
+ goto out;
+
+ file_priv->event_space += e->event->length;
+ list_del(&e->link);
+ *out = e;
+ ret = true;
+
+out:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_pending_event *e;
+ size_t total;
+ ssize_t ret;
+
+ ret = wait_event_interruptible(file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ if (ret < 0)
+ return ret;
+
+ total = 0;
+ while (drm_dequeue_event(file_priv, total, count, &e)) {
+ if (copy_to_user(buffer + total,
+ e->event, e->event->length)) {
+ total = -EFAULT;
+ break;
+ }
+
+ total += e->event->length;
+ e->destroy(e);
+ }
+
+ return total;
+}
+EXPORT_SYMBOL(drm_read);
+
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{
- return 0;
+ struct drm_file *file_priv = filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &file_priv->event_wait, wait);
+
+ if (!list_empty(&file_priv->event_list))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
}
EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 282d9fdf9f4..d61d185cf04 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -104,7 +104,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
&version->desc))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
+ err = drm_ioctl(file,
DRM_IOCTL_VERSION, (unsigned long)version);
if (err)
return err;
@@ -145,8 +145,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
&u->unique))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+ err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
if (err)
return err;
@@ -174,8 +173,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
&u->unique))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+ return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
}
typedef struct drm_map32 {
@@ -205,8 +203,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
if (__put_user(idx, &map->offset))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_MAP, (unsigned long)map);
+ err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
if (err)
return err;
@@ -246,8 +243,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
|| __put_user(m32.flags, &map->flags))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_ADD_MAP, (unsigned long)map);
+ err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
if (err)
return err;
@@ -284,8 +280,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
if (__put_user((void *)(unsigned long)handle, &map->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RM_MAP, (unsigned long)map);
+ return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
}
typedef struct drm_client32 {
@@ -314,8 +309,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (__put_user(idx, &client->idx))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+ err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
if (err)
return err;
@@ -351,8 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_STATS, (unsigned long)stats);
+ err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
if (err)
return err;
@@ -395,8 +388,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
|| __put_user(agp_start, &buf->agp_start))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+ err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
if (err)
return err;
@@ -427,8 +419,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
|| __put_user(b32.high_mark, &buf->high_mark))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+ return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
}
typedef struct drm_buf_info32 {
@@ -469,8 +460,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
|| __put_user(list, &request->list))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
if (err)
return err;
@@ -531,8 +521,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
|| __put_user(list, &request->list))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
if (err)
return err;
@@ -578,8 +567,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
&request->list))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
}
typedef struct drm_ctx_priv_map32 {
@@ -605,8 +593,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
&request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
}
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
@@ -628,8 +615,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
if (__put_user(ctx_id, &request->ctx_id))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
if (err)
return err;
@@ -664,8 +650,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
&res->contexts))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RES_CTX, (unsigned long)res);
+ err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
if (err)
return err;
@@ -718,8 +703,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
&d->request_sizes))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_DMA, (unsigned long)d);
+ err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
if (err)
return err;
@@ -751,8 +735,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
if (put_user(m32.mode, &mode->mode))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+ return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
}
typedef struct drm_agp_info32 {
@@ -781,8 +764,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_INFO, (unsigned long)info);
+ err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
if (err)
return err;
@@ -827,16 +809,14 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
|| __put_user(req32.type, &request->type))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
if (err)
return err;
if (__get_user(req32.handle, &request->handle)
|| __get_user(req32.physical, &request->physical)
|| copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
return -EFAULT;
}
@@ -856,8 +836,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
|| __put_user(handle, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
}
typedef struct drm_agp_binding32 {
@@ -881,8 +860,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
|| __put_user(req32.offset, &request->offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_BIND, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
}
static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
@@ -898,8 +876,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
|| __put_user(handle, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
}
#endif /* __OS_HAS_AGP */
@@ -923,8 +900,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
|| __put_user(x, &request->size))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
if (err)
return err;
@@ -950,8 +926,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
|| __put_user(x << PAGE_SHIFT, &request->handle))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_SG_FREE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
}
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
@@ -981,8 +956,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
__put_user(update32.data, &request->data))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
return err;
}
#endif
@@ -1023,8 +997,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|| __put_user(req32.request.signal, &request->request.signal))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+ err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
if (err)
return err;
@@ -1094,16 +1067,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
* than always failing.
*/
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
- return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ return drm_ioctl(filp, cmd, arg);
fn = drm_compat_ioctls[nr];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0a6f0b3bdc7..7998ee66b31 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -429,15 +429,21 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
- !dev->vblank_enabled[crtc]) {
- ret = dev->driver->enable_vblank(dev, crtc);
- DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
- if (ret)
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ if (!dev->vblank_enabled[crtc]) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ else {
+ dev->vblank_enabled[crtc] = 1;
+ drm_update_vblank_count(dev, crtc);
+ }
+ }
+ } else {
+ if (!dev->vblank_enabled[crtc]) {
atomic_dec(&dev->vblank_refcount[crtc]);
- else {
- dev->vblank_enabled[crtc] = 1;
- drm_update_vblank_count(dev, crtc);
+ ret = -EINVAL;
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -464,6 +470,18 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
}
EXPORT_SYMBOL(drm_vblank_put);
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ dev->vblank_enabled[crtc] = 0;
+ dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
@@ -550,6 +568,63 @@ out:
return ret;
}
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+ union drm_wait_vblank *vblwait,
+ struct drm_file *file_priv)
+{
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ e->pipe = pipe;
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof e->event;
+ e->event.user_data = vblwait->request.signal;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file_priv;
+ e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+ do_gettimeofday(&now);
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ if (file_priv->event_space < sizeof e->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+ return -ENOMEM;
+ }
+
+ file_priv->event_space -= sizeof e->event;
+ seq = drm_vblank_count(dev, pipe);
+ if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ vblwait->reply.sequence = vblwait->request.sequence;
+ }
+
+ DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+ vblwait->request.sequence, seq, pipe);
+
+ e->event.sequence = vblwait->request.sequence;
+ if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ } else {
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return 0;
+}
+
/**
* Wait for VBLANK.
*
@@ -609,6 +684,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
+ if (flags & _DRM_VBLANK_EVENT)
+ return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1<<23)) {
vblwait->request.sequence = seq + 1;
@@ -641,6 +719,38 @@ done:
return ret;
}
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ unsigned int seq;
+
+ do_gettimeofday(&now);
+ seq = drm_vblank_count(dev, crtc);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != crtc)
+ continue;
+ if ((seq - e->event.sequence) > (1<<23))
+ continue;
+
+ DRM_DEBUG("vblank event on %d, current %d\n",
+ e->event.sequence, seq);
+
+ e->event.sequence = seq;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ drm_vblank_put(dev, e->pipe);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
@@ -651,7 +761,11 @@ done:
*/
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
+ if (!dev->num_crtcs)
+ return;
+
atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ drm_handle_vblank_events(dev, crtc);
}
EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 97dc5a4f0de..cdec3297712 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *align_splitoff = NULL;
+ unsigned tmp = 0;
+ unsigned wasted = 0;
+
+ if (node->start < start)
+ wasted += start - node->start;
+ if (alignment)
+ tmp = ((node->start + wasted) % alignment);
+
+ if (tmp)
+ wasted += alignment - tmp;
+ if (wasted) {
+ align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
+ if (unlikely(align_splitoff == NULL))
+ return NULL;
+ }
+
+ if (node->size == size) {
+ list_del_init(&node->fl_entry);
+ node->free = 0;
+ } else {
+ node = drm_mm_split_at_start(node, size, atomic);
+ }
+
+ if (align_splitoff)
+ drm_mm_put_block(align_splitoff);
+
+ return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
@@ -320,7 +358,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
- if (size < best_size) {
+ if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
}
EXPORT_SYMBOL(drm_mm_search_free);
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match)
+{
+ struct list_head *list;
+ const struct list_head *free_stack = &mm->fl_entry;
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
+ unsigned long best_size;
+ unsigned wasted;
+
+ best = NULL;
+ best_size = ~0UL;
+
+ list_for_each(list, free_stack) {
+ entry = list_entry(list, struct drm_mm_node, fl_entry);
+ wasted = 0;
+
+ if (entry->size < size)
+ continue;
+
+ if (entry->start > end || (entry->start+entry->size) < start)
+ continue;
+
+ if (entry->start < start)
+ wasted += start - entry->start;
+
+ if (alignment) {
+ register unsigned tmp = (entry->start + wasted) % alignment;
+ if (tmp)
+ wasted += alignment - tmp;
+ }
+
+ if (entry->size >= size + wasted) {
+ if (!best_match)
+ return entry;
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
+ }
+ }
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
}
EXPORT_SYMBOL(drm_mm_takedown);
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+ struct drm_mm_node *entry;
+ int total_used = 0, total_free = 0, total = 0;
+
+ list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+ prefix, entry->start, entry->start + entry->size,
+ entry->size, entry->free ? "free" : "used");
+ total += entry->size;
+ if (entry->free)
+ total_free += entry->size;
+ else
+ total_used += entry->size;
+ }
+ printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+ total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
#if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
@@ -395,7 +503,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
else
total_used += entry->size;
}
- seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
+ seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 51f677215f1..6d81a02463a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_mode_height);
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(struct drm_display_mode *mode)
+{
+ unsigned int calc_val;
+
+ if (mode->hsync)
+ return mode->hsync;
+
+ if (mode->htotal < 0)
+ return 0;
+
+ calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+ calc_val += 500; /* round to 1000Hz */
+ calc_val /= 1000; /* truncate to kHz */
+
+ return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
@@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height);
* LOCKING:
* None.
*
- * Return @mode's vrefresh rate or calculate it if necessary.
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
*
* FIXME: why is this needed? shouldn't vrefresh be set already?
*
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 55bb8a82d61..ad73e141afd 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master)
kref_get(&master->refcount);
return master;
}
+EXPORT_SYMBOL(drm_master_get);
static void drm_master_destroy(struct kref *kref)
{
@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master)
kref_put(&(*master)->refcount, drm_master_destroy);
*master = NULL;
}
+EXPORT_SYMBOL(drm_master_put);
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ int ret = 0;
+
if (file_priv->is_master)
return 0;
@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
+ }
+ }
mutex_unlock(&dev->struct_mutex);
}
@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
mutex_unlock(&dev->struct_mutex);
@@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
+ spin_lock_init(&dev->event_lock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
new file mode 100644
index 00000000000..6d2abaf35ba
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -0,0 +1,4 @@
+ccflags-y := -Iinclude/drm
+
+ch7006-y := ch7006_drv.o ch7006_mode.o
+obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
new file mode 100644
index 00000000000..81681a07a80
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ch7006_priv.h"
+
+/* DRM encoder functions */
+
+static void ch7006_encoder_set_config(struct drm_encoder *encoder,
+ void *params)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ priv->params = params;
+}
+
+static void ch7006_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ drm_property_destroy(encoder->dev, priv->scale_property);
+
+ kfree(priv);
+ to_encoder_slave(encoder)->slave_priv = NULL;
+
+ drm_i2c_encoder_destroy(encoder);
+}
+
+static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+
+ ch7006_dbg(client, "\n");
+
+ if (mode == priv->last_dpms)
+ return;
+ priv->last_dpms = mode;
+
+ ch7006_setup_power_state(encoder);
+
+ ch7006_load_reg(client, state, CH7006_POWER);
+}
+
+static void ch7006_encoder_save(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ ch7006_dbg(client, "\n");
+
+ ch7006_state_save(client, &priv->saved_state);
+}
+
+static void ch7006_encoder_restore(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ ch7006_dbg(client, "\n");
+
+ ch7006_state_load(client, &priv->saved_state);
+}
+
+static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+
+ /* The ch7006 is painfully picky with the input timings so no
+ * custom modes for now... */
+
+ priv->mode = ch7006_lookup_mode(encoder, mode);
+
+ return !!priv->mode;
+}
+
+static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ if (ch7006_lookup_mode(encoder, mode))
+ return MODE_OK;
+ else
+ return MODE_BAD;
+}
+
+static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *drm_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_encoder_params *params = priv->params;
+ struct ch7006_state *state = &priv->state;
+ uint8_t *regs = state->regs;
+ struct ch7006_mode *mode = priv->mode;
+ struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+ int start_active;
+
+ ch7006_dbg(client, "\n");
+
+ regs[CH7006_DISPMODE] = norm->dispmode | mode->dispmode;
+ regs[CH7006_BWIDTH] = 0;
+ regs[CH7006_INPUT_FORMAT] = bitf(CH7006_INPUT_FORMAT_FORMAT,
+ params->input_format);
+
+ regs[CH7006_CLKMODE] = CH7006_CLKMODE_SUBC_LOCK
+ | bitf(CH7006_CLKMODE_XCM, params->xcm)
+ | bitf(CH7006_CLKMODE_PCM, params->pcm);
+ if (params->clock_mode)
+ regs[CH7006_CLKMODE] |= CH7006_CLKMODE_MASTER;
+ if (params->clock_edge)
+ regs[CH7006_CLKMODE] |= CH7006_CLKMODE_POS_EDGE;
+
+ start_active = (drm_mode->htotal & ~0x7) - (drm_mode->hsync_start & ~0x7);
+ regs[CH7006_POV] = bitf(CH7006_POV_START_ACTIVE_8, start_active);
+ regs[CH7006_START_ACTIVE] = bitf(CH7006_START_ACTIVE_0, start_active);
+
+ regs[CH7006_INPUT_SYNC] = 0;
+ if (params->sync_direction)
+ regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_OUTPUT;
+ if (params->sync_encoding)
+ regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_EMBEDDED;
+ if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PVSYNC;
+ if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PHSYNC;
+
+ regs[CH7006_DETECT] = 0;
+ regs[CH7006_BCLKOUT] = 0;
+
+ regs[CH7006_SUBC_INC3] = 0;
+ if (params->pout_level)
+ regs[CH7006_SUBC_INC3] |= CH7006_SUBC_INC3_POUT_3_3V;
+
+ regs[CH7006_SUBC_INC4] = 0;
+ if (params->active_detect)
+ regs[CH7006_SUBC_INC4] |= CH7006_SUBC_INC4_DS_INPUT;
+
+ regs[CH7006_PLL_CONTROL] = priv->saved_state.regs[CH7006_PLL_CONTROL];
+
+ ch7006_setup_levels(encoder);
+ ch7006_setup_subcarrier(encoder);
+ ch7006_setup_pll(encoder);
+ ch7006_setup_power_state(encoder);
+ ch7006_setup_properties(encoder);
+
+ ch7006_state_load(client, state);
+}
+
+static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+ int det;
+
+ ch7006_dbg(client, "\n");
+
+ ch7006_save_reg(client, state, CH7006_DETECT);
+ ch7006_save_reg(client, state, CH7006_POWER);
+ ch7006_save_reg(client, state, CH7006_CLKMODE);
+
+ ch7006_write(client, CH7006_POWER, CH7006_POWER_RESET |
+ bitfs(CH7006_POWER_LEVEL, NORMAL));
+ ch7006_write(client, CH7006_CLKMODE, CH7006_CLKMODE_MASTER);
+
+ ch7006_write(client, CH7006_DETECT, CH7006_DETECT_SENSE);
+
+ ch7006_write(client, CH7006_DETECT, 0);
+
+ det = ch7006_read(client, CH7006_DETECT);
+
+ ch7006_load_reg(client, state, CH7006_CLKMODE);
+ ch7006_load_reg(client, state, CH7006_POWER);
+ ch7006_load_reg(client, state, CH7006_DETECT);
+
+ if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
+ CH7006_DETECT_SVIDEO_C_TEST|
+ CH7006_DETECT_CVBS_TEST)) == 0)
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
+ else if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
+ CH7006_DETECT_SVIDEO_C_TEST)) == 0)
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
+ else if ((det & CH7006_DETECT_CVBS_TEST) == 0)
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
+ else
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+ drm_connector_property_set_value(connector,
+ encoder->dev->mode_config.tv_subconnector_property,
+ priv->subconnector);
+
+ return priv->subconnector ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_mode *mode;
+ int n = 0;
+
+ for (mode = ch7006_modes; mode->mode.clock; mode++) {
+ if (~mode->valid_scales & 1<<priv->scale ||
+ ~mode->valid_norms & 1<<priv->norm)
+ continue;
+
+ drm_mode_probed_add(connector,
+ drm_mode_duplicate(encoder->dev, &mode->mode));
+
+ n++;
+ }
+
+ return n;
+}
+
+static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_mode_config *conf = &dev->mode_config;
+
+ drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
+
+ priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "scale", 2);
+ priv->scale_property->values[0] = 0;
+ priv->scale_property->values[1] = 2;
+
+ drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+ priv->select_subconnector);
+ drm_connector_attach_property(connector, conf->tv_subconnector_property,
+ priv->subconnector);
+ drm_connector_attach_property(connector, conf->tv_left_margin_property,
+ priv->hmargin);
+ drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+ priv->vmargin);
+ drm_connector_attach_property(connector, conf->tv_mode_property,
+ priv->norm);
+ drm_connector_attach_property(connector, conf->tv_brightness_property,
+ priv->brightness);
+ drm_connector_attach_property(connector, conf->tv_contrast_property,
+ priv->contrast);
+ drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+ priv->flicker);
+ drm_connector_attach_property(connector, priv->scale_property,
+ priv->scale);
+
+ return 0;
+}
+
+static int ch7006_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+ struct drm_mode_config *conf = &encoder->dev->mode_config;
+ struct drm_crtc *crtc = encoder->crtc;
+ bool modes_changed = false;
+
+ ch7006_dbg(client, "\n");
+
+ if (property == conf->tv_select_subconnector_property) {
+ priv->select_subconnector = val;
+
+ ch7006_setup_power_state(encoder);
+
+ ch7006_load_reg(client, state, CH7006_POWER);
+
+ } else if (property == conf->tv_left_margin_property) {
+ priv->hmargin = val;
+
+ ch7006_setup_properties(encoder);
+
+ ch7006_load_reg(client, state, CH7006_POV);
+ ch7006_load_reg(client, state, CH7006_HPOS);
+
+ } else if (property == conf->tv_bottom_margin_property) {
+ priv->vmargin = val;
+
+ ch7006_setup_properties(encoder);
+
+ ch7006_load_reg(client, state, CH7006_POV);
+ ch7006_load_reg(client, state, CH7006_VPOS);
+
+ } else if (property == conf->tv_mode_property) {
+ if (connector->dpms != DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
+ priv->norm = val;
+
+ modes_changed = true;
+
+ } else if (property == conf->tv_brightness_property) {
+ priv->brightness = val;
+
+ ch7006_setup_levels(encoder);
+
+ ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
+
+ } else if (property == conf->tv_contrast_property) {
+ priv->contrast = val;
+
+ ch7006_setup_properties(encoder);
+
+ ch7006_load_reg(client, state, CH7006_CONTRAST);
+
+ } else if (property == conf->tv_flicker_reduction_property) {
+ priv->flicker = val;
+
+ ch7006_setup_properties(encoder);
+
+ ch7006_load_reg(client, state, CH7006_FFILTER);
+
+ } else if (property == priv->scale_property) {
+ if (connector->dpms != DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
+ priv->scale = val;
+
+ modes_changed = true;
+
+ } else {
+ return -EINVAL;
+ }
+
+ if (modes_changed) {
+ drm_helper_probe_single_connector_modes(connector, 0, 0);
+
+ /* Disable the crtc to ensure a full modeset is
+ * performed whenever it's turned on again. */
+ if (crtc) {
+ struct drm_mode_set modeset = {
+ .crtc = crtc,
+ };
+
+ crtc->funcs->set_config(&modeset);
+ }
+ }
+
+ return 0;
+}
+
+static struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
+ .set_config = ch7006_encoder_set_config,
+ .destroy = ch7006_encoder_destroy,
+ .dpms = ch7006_encoder_dpms,
+ .save = ch7006_encoder_save,
+ .restore = ch7006_encoder_restore,
+ .mode_fixup = ch7006_encoder_mode_fixup,
+ .mode_valid = ch7006_encoder_mode_valid,
+ .mode_set = ch7006_encoder_mode_set,
+ .detect = ch7006_encoder_detect,
+ .get_modes = ch7006_encoder_get_modes,
+ .create_resources = ch7006_encoder_create_resources,
+ .set_property = ch7006_encoder_set_property,
+};
+
+
+/* I2C driver functions */
+
+static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ uint8_t addr = CH7006_VERSION_ID;
+ uint8_t val;
+ int ret;
+
+ ch7006_dbg(client, "\n");
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ ch7006_info(client, "Detected version ID: %x\n", val);
+
+ /* I don't know what this is for, but otherwise I get no
+ * signal.
+ */
+ ch7006_write(client, 0x3d, 0x0);
+
+ return 0;
+
+fail:
+ ch7006_err(client, "Error %d reading version ID\n", ret);
+
+ return -ENODEV;
+}
+
+static int ch7006_remove(struct i2c_client *client)
+{
+ ch7006_dbg(client, "\n");
+
+ return 0;
+}
+
+static int ch7006_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+ struct ch7006_priv *priv;
+ int i;
+
+ ch7006_dbg(client, "\n");
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ encoder->slave_priv = priv;
+ encoder->slave_funcs = &ch7006_encoder_funcs;
+
+ priv->norm = TV_NORM_PAL;
+ priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
+ priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+ priv->scale = 1;
+ priv->contrast = 50;
+ priv->brightness = 50;
+ priv->flicker = 50;
+ priv->hmargin = 50;
+ priv->vmargin = 50;
+ priv->last_dpms = -1;
+
+ if (ch7006_tv_norm) {
+ for (i = 0; i < NUM_TV_NORMS; i++) {
+ if (!strcmp(ch7006_tv_norm_names[i], ch7006_tv_norm)) {
+ priv->norm = i;
+ break;
+ }
+ }
+
+ if (i == NUM_TV_NORMS)
+ ch7006_err(client, "Invalid TV norm setting \"%s\".\n",
+ ch7006_tv_norm);
+ }
+
+ if (ch7006_scale >= 0 && ch7006_scale <= 2)
+ priv->scale = ch7006_scale;
+ else
+ ch7006_err(client, "Invalid scale setting \"%d\".\n",
+ ch7006_scale);
+
+ return 0;
+}
+
+static struct i2c_device_id ch7006_ids[] = {
+ { "ch7006", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ch7006_ids);
+
+static struct drm_i2c_encoder_driver ch7006_driver = {
+ .i2c_driver = {
+ .probe = ch7006_probe,
+ .remove = ch7006_remove,
+
+ .driver = {
+ .name = "ch7006",
+ },
+
+ .id_table = ch7006_ids,
+ },
+
+ .encoder_init = ch7006_encoder_init,
+};
+
+
+/* Module initialization */
+
+static int __init ch7006_init(void)
+{
+ return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
+}
+
+static void __exit ch7006_exit(void)
+{
+ drm_i2c_encoder_unregister(&ch7006_driver);
+}
+
+int ch7006_debug;
+module_param_named(debug, ch7006_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Enable debug output.");
+
+char *ch7006_tv_norm;
+module_param_named(tv_norm, ch7006_tv_norm, charp, 0600);
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+ "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, PAL-60, NTSC-M, NTSC-J.\n"
+ "\t\tDefault: PAL");
+
+int ch7006_scale = 1;
+module_param_named(scale, ch7006_scale, int, 0600);
+MODULE_PARM_DESC(scale, "Default scale.\n"
+ "\t\tSupported: 0 -> Select video modes with a higher blanking ratio.\n"
+ "\t\t\t1 -> Select default video modes.\n"
+ "\t\t\t2 -> Select video modes with a lower blanking ratio.");
+
+MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
+MODULE_DESCRIPTION("Chrontel ch7006 TV encoder driver");
+MODULE_LICENSE("GPL and additional rights");
+
+module_init(ch7006_init);
+module_exit(ch7006_exit);
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
new file mode 100644
index 00000000000..e447dfb6389
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ch7006_priv.h"
+
+char *ch7006_tv_norm_names[] = {
+ [TV_NORM_PAL] = "PAL",
+ [TV_NORM_PAL_M] = "PAL-M",
+ [TV_NORM_PAL_N] = "PAL-N",
+ [TV_NORM_PAL_NC] = "PAL-Nc",
+ [TV_NORM_PAL_60] = "PAL-60",
+ [TV_NORM_NTSC_M] = "NTSC-M",
+ [TV_NORM_NTSC_J] = "NTSC-J",
+};
+
+#define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001, \
+ .vdisplay = 480, \
+ .vtotal = 525, \
+ .hvirtual = 660
+
+#define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1, \
+ .vdisplay = 576, \
+ .vtotal = 625, \
+ .hvirtual = 810
+
+struct ch7006_tv_norm_info ch7006_tv_norms[] = {
+ [TV_NORM_NTSC_M] = {
+ NTSC_LIKE_TIMINGS,
+ .black_level = 0.339 * fixed1,
+ .subc_freq = 3579545 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC),
+ .voffset = 0,
+ },
+ [TV_NORM_NTSC_J] = {
+ NTSC_LIKE_TIMINGS,
+ .black_level = 0.286 * fixed1,
+ .subc_freq = 3579545 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J),
+ .voffset = 0,
+ },
+ [TV_NORM_PAL] = {
+ PAL_LIKE_TIMINGS,
+ .black_level = 0.3 * fixed1,
+ .subc_freq = 4433618.75 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+ .voffset = 0,
+ },
+ [TV_NORM_PAL_M] = {
+ NTSC_LIKE_TIMINGS,
+ .black_level = 0.339 * fixed1,
+ .subc_freq = 3575611.433 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
+ .voffset = 16,
+ },
+
+ /* The following modes seem to work right but they're
+ * undocumented */
+
+ [TV_NORM_PAL_N] = {
+ PAL_LIKE_TIMINGS,
+ .black_level = 0.339 * fixed1,
+ .subc_freq = 4433618.75 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+ .voffset = 0,
+ },
+ [TV_NORM_PAL_NC] = {
+ PAL_LIKE_TIMINGS,
+ .black_level = 0.3 * fixed1,
+ .subc_freq = 3582056.25 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
+ .voffset = 0,
+ },
+ [TV_NORM_PAL_60] = {
+ NTSC_LIKE_TIMINGS,
+ .black_level = 0.3 * fixed1,
+ .subc_freq = 4433618.75 * fixed1,
+ .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
+ .voffset = 16,
+ },
+};
+
+#define __MODE(f, hd, vd, ht, vt, hsynp, vsynp, \
+ subc, scale, scale_mask, norm_mask, e_hd, e_vd) { \
+ .mode = { \
+ .name = #hd "x" #vd, \
+ .status = 0, \
+ .type = DRM_MODE_TYPE_DRIVER, \
+ .clock = f, \
+ .hdisplay = hd, \
+ .hsync_start = e_hd + 16, \
+ .hsync_end = e_hd + 80, \
+ .htotal = ht, \
+ .hskew = 0, \
+ .vdisplay = vd, \
+ .vsync_start = vd + 10, \
+ .vsync_end = vd + 26, \
+ .vtotal = vt, \
+ .vscan = 0, \
+ .flags = DRM_MODE_FLAG_##hsynp##HSYNC | \
+ DRM_MODE_FLAG_##vsynp##VSYNC, \
+ .vrefresh = 0, \
+ }, \
+ .enc_hdisp = e_hd, \
+ .enc_vdisp = e_vd, \
+ .subc_coeff = subc * fixed1, \
+ .dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \
+ bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \
+ .valid_scales = scale_mask, \
+ .valid_norms = norm_mask \
+ }
+
+#define MODE(f, hd, vd, ht, vt, hsynp, vsynp, \
+ subc, scale, scale_mask, norm_mask) \
+ __MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale, \
+ scale_mask, norm_mask, hd, vd)
+
+#define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J | \
+ 1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60)
+
+#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
+
+struct ch7006_mode ch7006_modes[] = {
+ MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
+ MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
+ MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
+ MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE),
+ MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE),
+ MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE),
+ MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE),
+ MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE),
+ MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE),
+ MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE),
+ MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE),
+ MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE),
+ MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE),
+ MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE),
+ MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE),
+ MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE),
+ MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE),
+ MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE),
+ MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE),
+ __MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600),
+ MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE),
+ MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE),
+ MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE),
+ MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE),
+ MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE),
+ {}
+};
+
+struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+ struct drm_display_mode *drm_mode)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_mode *mode;
+
+ for (mode = ch7006_modes; mode->mode.clock; mode++) {
+
+ if (~mode->valid_norms & 1<<priv->norm)
+ continue;
+
+ if (mode->mode.hdisplay != drm_mode->hdisplay ||
+ mode->mode.vdisplay != drm_mode->vdisplay ||
+ mode->mode.vtotal != drm_mode->vtotal ||
+ mode->mode.htotal != drm_mode->htotal ||
+ mode->mode.clock != drm_mode->clock)
+ continue;
+
+ return mode;
+ }
+
+ return NULL;
+}
+
+/* Some common HW state calculation code */
+
+void ch7006_setup_levels(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ uint8_t *regs = priv->state.regs;
+ struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+ int gain;
+ int black_level;
+
+ /* Set DAC_GAIN if the voltage drop between white and black is
+ * high enough. */
+ if (norm->black_level < 339*fixed1/1000) {
+ gain = 76;
+
+ regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN;
+ } else {
+ gain = 71;
+
+ regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN;
+ }
+
+ black_level = round_fixed(norm->black_level*26625)/gain;
+
+ /* Correct it with the specified brightness. */
+ black_level = interpolate(90, black_level, 208, priv->brightness);
+
+ regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level);
+
+ ch7006_dbg(client, "black level: %d\n", black_level);
+}
+
+void ch7006_setup_subcarrier(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+ struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+ struct ch7006_mode *mode = priv->mode;
+ uint32_t subc_inc;
+
+ subc_inc = round_fixed((mode->subc_coeff >> 8)
+ * (norm->subc_freq >> 24));
+
+ setbitf(state, CH7006_SUBC_INC0, 28, subc_inc);
+ setbitf(state, CH7006_SUBC_INC1, 24, subc_inc);
+ setbitf(state, CH7006_SUBC_INC2, 20, subc_inc);
+ setbitf(state, CH7006_SUBC_INC3, 16, subc_inc);
+ setbitf(state, CH7006_SUBC_INC4, 12, subc_inc);
+ setbitf(state, CH7006_SUBC_INC5, 8, subc_inc);
+ setbitf(state, CH7006_SUBC_INC6, 4, subc_inc);
+ setbitf(state, CH7006_SUBC_INC7, 0, subc_inc);
+
+ ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc);
+}
+
+void ch7006_setup_pll(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ uint8_t *regs = priv->state.regs;
+ struct ch7006_mode *mode = priv->mode;
+ int n, best_n = 0;
+ int m, best_m = 0;
+ int freq, best_freq = 0;
+
+ for (n = 0; n < CH7006_MAXN; n++) {
+ for (m = 0; m < CH7006_MAXM; m++) {
+ freq = CH7006_FREQ0*(n+2)/(m+2);
+
+ if (abs(freq - mode->mode.clock) <
+ abs(best_freq - mode->mode.clock)) {
+ best_freq = freq;
+ best_n = n;
+ best_m = m;
+ }
+ }
+ }
+
+ regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) |
+ bitf(CH7006_PLLOV_M_8, best_m);
+
+ regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m);
+ regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n);
+
+ if (best_n < 108)
+ regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR;
+ else
+ regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR;
+
+ ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n",
+ best_n, best_m, best_freq, best_n < 108);
+}
+
+void ch7006_setup_power_state(struct drm_encoder *encoder)
+{
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ uint8_t *power = &priv->state.regs[CH7006_POWER];
+ int subconnector;
+
+ subconnector = priv->select_subconnector ? priv->select_subconnector :
+ priv->subconnector;
+
+ *power = CH7006_POWER_RESET;
+
+ if (priv->last_dpms == DRM_MODE_DPMS_ON) {
+ switch (subconnector) {
+ case DRM_MODE_SUBCONNECTOR_SVIDEO:
+ *power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF);
+ break;
+ case DRM_MODE_SUBCONNECTOR_Composite:
+ *power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF);
+ break;
+ case DRM_MODE_SUBCONNECTOR_SCART:
+ *power |= bitfs(CH7006_POWER_LEVEL, NORMAL) |
+ CH7006_POWER_SCART;
+ break;
+ }
+
+ } else {
+ *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+ }
+}
+
+void ch7006_setup_properties(struct drm_encoder *encoder)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ struct ch7006_priv *priv = to_ch7006_priv(encoder);
+ struct ch7006_state *state = &priv->state;
+ struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
+ struct ch7006_mode *ch_mode = priv->mode;
+ struct drm_display_mode *mode = &ch_mode->mode;
+ uint8_t *regs = state->regs;
+ int flicker, contrast, hpos, vpos;
+ uint64_t scale, aspect;
+
+ flicker = interpolate(0, 2, 3, priv->flicker);
+ regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) |
+ bitf(CH7006_FFILTER_LUMA, flicker) |
+ bitf(CH7006_FFILTER_CHROMA, 1);
+
+ contrast = interpolate(0, 5, 7, priv->contrast);
+ regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast);
+
+ scale = norm->vtotal*fixed1;
+ do_div(scale, mode->vtotal);
+
+ aspect = ch_mode->enc_hdisp*fixed1;
+ do_div(aspect, ch_mode->enc_vdisp);
+
+ hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale)
+ * priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4;
+
+ setbitf(state, CH7006_POV, HPOS_8, hpos);
+ setbitf(state, CH7006_HPOS, 0, hpos);
+
+ vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale)
+ + norm->voffset) * priv->vmargin / 100 / 2;
+
+ setbitf(state, CH7006_POV, VPOS_8, vpos);
+ setbitf(state, CH7006_VPOS, 0, vpos);
+
+ ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos);
+}
+
+/* HW access functions */
+
+void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val)
+{
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ ch7006_err(client, "Error %d writing to subaddress 0x%x\n",
+ ret, addr);
+}
+
+uint8_t ch7006_read(struct i2c_client *client, uint8_t addr)
+{
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ ch7006_err(client, "Error %d reading from subaddress 0x%x\n",
+ ret, addr);
+ return 0;
+}
+
+void ch7006_state_load(struct i2c_client *client,
+ struct ch7006_state *state)
+{
+ ch7006_load_reg(client, state, CH7006_POWER);
+
+ ch7006_load_reg(client, state, CH7006_DISPMODE);
+ ch7006_load_reg(client, state, CH7006_FFILTER);
+ ch7006_load_reg(client, state, CH7006_BWIDTH);
+ ch7006_load_reg(client, state, CH7006_INPUT_FORMAT);
+ ch7006_load_reg(client, state, CH7006_CLKMODE);
+ ch7006_load_reg(client, state, CH7006_START_ACTIVE);
+ ch7006_load_reg(client, state, CH7006_POV);
+ ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
+ ch7006_load_reg(client, state, CH7006_HPOS);
+ ch7006_load_reg(client, state, CH7006_VPOS);
+ ch7006_load_reg(client, state, CH7006_INPUT_SYNC);
+ ch7006_load_reg(client, state, CH7006_DETECT);
+ ch7006_load_reg(client, state, CH7006_CONTRAST);
+ ch7006_load_reg(client, state, CH7006_PLLOV);
+ ch7006_load_reg(client, state, CH7006_PLLM);
+ ch7006_load_reg(client, state, CH7006_PLLN);
+ ch7006_load_reg(client, state, CH7006_BCLKOUT);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC0);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC1);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC2);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC3);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC4);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC5);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC6);
+ ch7006_load_reg(client, state, CH7006_SUBC_INC7);
+ ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
+ ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
+}
+
+void ch7006_state_save(struct i2c_client *client,
+ struct ch7006_state *state)
+{
+ ch7006_save_reg(client, state, CH7006_POWER);
+
+ ch7006_save_reg(client, state, CH7006_DISPMODE);
+ ch7006_save_reg(client, state, CH7006_FFILTER);
+ ch7006_save_reg(client, state, CH7006_BWIDTH);
+ ch7006_save_reg(client, state, CH7006_INPUT_FORMAT);
+ ch7006_save_reg(client, state, CH7006_CLKMODE);
+ ch7006_save_reg(client, state, CH7006_START_ACTIVE);
+ ch7006_save_reg(client, state, CH7006_POV);
+ ch7006_save_reg(client, state, CH7006_BLACK_LEVEL);
+ ch7006_save_reg(client, state, CH7006_HPOS);
+ ch7006_save_reg(client, state, CH7006_VPOS);
+ ch7006_save_reg(client, state, CH7006_INPUT_SYNC);
+ ch7006_save_reg(client, state, CH7006_DETECT);
+ ch7006_save_reg(client, state, CH7006_CONTRAST);
+ ch7006_save_reg(client, state, CH7006_PLLOV);
+ ch7006_save_reg(client, state, CH7006_PLLM);
+ ch7006_save_reg(client, state, CH7006_PLLN);
+ ch7006_save_reg(client, state, CH7006_BCLKOUT);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC0);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC1);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC2);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC3);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC4);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC5);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC6);
+ ch7006_save_reg(client, state, CH7006_SUBC_INC7);
+ ch7006_save_reg(client, state, CH7006_PLL_CONTROL);
+ ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0);
+
+ state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) |
+ (state->regs[CH7006_FFILTER] & 0x0c) >> 2 |
+ (state->regs[CH7006_FFILTER] & 0x03) << 2;
+}
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
new file mode 100644
index 00000000000..b06d3d93d8a
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_I2C_CH7006_PRIV_H__
+#define __DRM_I2C_CH7006_PRIV_H__
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "drm_encoder_slave.h"
+#include "i2c/ch7006.h"
+
+typedef int64_t fixed;
+#define fixed1 (1LL << 32)
+
+enum ch7006_tv_norm {
+ TV_NORM_PAL,
+ TV_NORM_PAL_M,
+ TV_NORM_PAL_N,
+ TV_NORM_PAL_NC,
+ TV_NORM_PAL_60,
+ TV_NORM_NTSC_M,
+ TV_NORM_NTSC_J,
+ NUM_TV_NORMS
+};
+
+struct ch7006_tv_norm_info {
+ fixed vrefresh;
+ int vdisplay;
+ int vtotal;
+ int hvirtual;
+
+ fixed subc_freq;
+ fixed black_level;
+
+ uint32_t dispmode;
+ int voffset;
+};
+
+struct ch7006_mode {
+ struct drm_display_mode mode;
+
+ int enc_hdisp;
+ int enc_vdisp;
+
+ fixed subc_coeff;
+ uint32_t dispmode;
+
+ uint32_t valid_scales;
+ uint32_t valid_norms;
+};
+
+struct ch7006_state {
+ uint8_t regs[0x26];
+};
+
+struct ch7006_priv {
+ struct ch7006_encoder_params *params;
+ struct ch7006_mode *mode;
+
+ struct ch7006_state state;
+ struct ch7006_state saved_state;
+
+ struct drm_property *scale_property;
+
+ int select_subconnector;
+ int subconnector;
+ int hmargin;
+ int vmargin;
+ enum ch7006_tv_norm norm;
+ int brightness;
+ int contrast;
+ int flicker;
+ int scale;
+
+ int last_dpms;
+};
+
+#define to_ch7006_priv(x) \
+ ((struct ch7006_priv *)to_encoder_slave(x)->slave_priv)
+
+extern int ch7006_debug;
+extern char *ch7006_tv_norm;
+extern int ch7006_scale;
+
+extern char *ch7006_tv_norm_names[];
+extern struct ch7006_tv_norm_info ch7006_tv_norms[];
+extern struct ch7006_mode ch7006_modes[];
+
+struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
+ struct drm_display_mode *drm_mode);
+
+void ch7006_setup_levels(struct drm_encoder *encoder);
+void ch7006_setup_subcarrier(struct drm_encoder *encoder);
+void ch7006_setup_pll(struct drm_encoder *encoder);
+void ch7006_setup_power_state(struct drm_encoder *encoder);
+void ch7006_setup_properties(struct drm_encoder *encoder);
+
+void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val);
+uint8_t ch7006_read(struct i2c_client *client, uint8_t addr);
+
+void ch7006_state_load(struct i2c_client *client,
+ struct ch7006_state *state);
+void ch7006_state_save(struct i2c_client *client,
+ struct ch7006_state *state);
+
+/* Some helper macros */
+
+#define ch7006_dbg(client, format, ...) do { \
+ if (ch7006_debug) \
+ dev_printk(KERN_DEBUG, &client->dev, \
+ "%s: " format, __func__, ## __VA_ARGS__); \
+ } while (0)
+#define ch7006_info(client, format, ...) \
+ dev_info(&client->dev, format, __VA_ARGS__)
+#define ch7006_err(client, format, ...) \
+ dev_err(&client->dev, format, __VA_ARGS__)
+
+#define __mask(src, bitfield) \
+ (((2 << (1 ? bitfield)) - 1) & ~((1 << (0 ? bitfield)) - 1))
+#define mask(bitfield) __mask(bitfield)
+
+#define __bitf(src, bitfield, x) \
+ (((x) >> (src) << (0 ? bitfield)) & __mask(src, bitfield))
+#define bitf(bitfield, x) __bitf(bitfield, x)
+#define bitfs(bitfield, s) __bitf(bitfield, bitfield##_##s)
+#define setbitf(state, reg, bitfield, x) \
+ state->regs[reg] = (state->regs[reg] & ~mask(reg##_##bitfield)) \
+ | bitf(reg##_##bitfield, x)
+
+#define __unbitf(src, bitfield, x) \
+ ((x & __mask(src, bitfield)) >> (0 ? bitfield) << (src))
+#define unbitf(bitfield, x) __unbitf(bitfield, x)
+
+static inline int interpolate(int y0, int y1, int y2, int x)
+{
+ return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
+}
+
+static inline int32_t round_fixed(fixed x)
+{
+ return (x + fixed1/2) >> 32;
+}
+
+#define ch7006_load_reg(client, state, reg) ch7006_write(client, reg, state->regs[reg])
+#define ch7006_save_reg(client, state, reg) state->regs[reg] = ch7006_read(client, reg)
+
+/* Fixed hardware specs */
+
+#define CH7006_FREQ0 14318
+#define CH7006_MAXN 650
+#define CH7006_MAXM 315
+
+/* Register definitions */
+
+#define CH7006_DISPMODE 0x00
+#define CH7006_DISPMODE_INPUT_RES 0, 7:5
+#define CH7006_DISPMODE_INPUT_RES_512x384 0x0
+#define CH7006_DISPMODE_INPUT_RES_720x400 0x1
+#define CH7006_DISPMODE_INPUT_RES_640x400 0x2
+#define CH7006_DISPMODE_INPUT_RES_640x480 0x3
+#define CH7006_DISPMODE_INPUT_RES_800x600 0x4
+#define CH7006_DISPMODE_INPUT_RES_NATIVE 0x5
+#define CH7006_DISPMODE_OUTPUT_STD 0, 4:3
+#define CH7006_DISPMODE_OUTPUT_STD_PAL 0x0
+#define CH7006_DISPMODE_OUTPUT_STD_NTSC 0x1
+#define CH7006_DISPMODE_OUTPUT_STD_PAL_M 0x2
+#define CH7006_DISPMODE_OUTPUT_STD_NTSC_J 0x3
+#define CH7006_DISPMODE_SCALING_RATIO 0, 2:0
+#define CH7006_DISPMODE_SCALING_RATIO_5_4 0x0
+#define CH7006_DISPMODE_SCALING_RATIO_1_1 0x1
+#define CH7006_DISPMODE_SCALING_RATIO_7_8 0x2
+#define CH7006_DISPMODE_SCALING_RATIO_5_6 0x3
+#define CH7006_DISPMODE_SCALING_RATIO_3_4 0x4
+#define CH7006_DISPMODE_SCALING_RATIO_7_10 0x5
+
+#define CH7006_FFILTER 0x01
+#define CH7006_FFILTER_TEXT 0, 5:4
+#define CH7006_FFILTER_LUMA 0, 3:2
+#define CH7006_FFILTER_CHROMA 0, 1:0
+#define CH7006_FFILTER_CHROMA_NO_DCRAWL 0x3
+
+#define CH7006_BWIDTH 0x03
+#define CH7006_BWIDTH_5L_FFILER (1 << 7)
+#define CH7006_BWIDTH_CVBS_NO_CHROMA (1 << 6)
+#define CH7006_BWIDTH_CHROMA 0, 5:4
+#define CH7006_BWIDTH_SVIDEO_YPEAK (1 << 3)
+#define CH7006_BWIDTH_SVIDEO_LUMA 0, 2:1
+#define CH7006_BWIDTH_CVBS_LUMA 0, 0:0
+
+#define CH7006_INPUT_FORMAT 0x04
+#define CH7006_INPUT_FORMAT_DAC_GAIN (1 << 6)
+#define CH7006_INPUT_FORMAT_RGB_PASS_THROUGH (1 << 5)
+#define CH7006_INPUT_FORMAT_FORMAT 0, 3:0
+#define CH7006_INPUT_FORMAT_FORMAT_RGB16 0x0
+#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m16 0x1
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m16 0x2
+#define CH7006_INPUT_FORMAT_FORMAT_RGB15 0x3
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12C 0x4
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12I 0x5
+#define CH7006_INPUT_FORMAT_FORMAT_RGB24m8 0x6
+#define CH7006_INPUT_FORMAT_FORMAT_RGB16m8 0x7
+#define CH7006_INPUT_FORMAT_FORMAT_RGB15m8 0x8
+#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m8 0x9
+
+#define CH7006_CLKMODE 0x06
+#define CH7006_CLKMODE_SUBC_LOCK (1 << 7)
+#define CH7006_CLKMODE_MASTER (1 << 6)
+#define CH7006_CLKMODE_POS_EDGE (1 << 4)
+#define CH7006_CLKMODE_XCM 0, 3:2
+#define CH7006_CLKMODE_PCM 0, 1:0
+
+#define CH7006_START_ACTIVE 0x07
+#define CH7006_START_ACTIVE_0 0, 7:0
+
+#define CH7006_POV 0x08
+#define CH7006_POV_START_ACTIVE_8 8, 2:2
+#define CH7006_POV_HPOS_8 8, 1:1
+#define CH7006_POV_VPOS_8 8, 0:0
+
+#define CH7006_BLACK_LEVEL 0x09
+#define CH7006_BLACK_LEVEL_0 0, 7:0
+
+#define CH7006_HPOS 0x0a
+#define CH7006_HPOS_0 0, 7:0
+
+#define CH7006_VPOS 0x0b
+#define CH7006_VPOS_0 0, 7:0
+
+#define CH7006_INPUT_SYNC 0x0d
+#define CH7006_INPUT_SYNC_EMBEDDED (1 << 3)
+#define CH7006_INPUT_SYNC_OUTPUT (1 << 2)
+#define CH7006_INPUT_SYNC_PVSYNC (1 << 1)
+#define CH7006_INPUT_SYNC_PHSYNC (1 << 0)
+
+#define CH7006_POWER 0x0e
+#define CH7006_POWER_SCART (1 << 4)
+#define CH7006_POWER_RESET (1 << 3)
+#define CH7006_POWER_LEVEL 0, 2:0
+#define CH7006_POWER_LEVEL_CVBS_OFF 0x0
+#define CH7006_POWER_LEVEL_POWER_OFF 0x1
+#define CH7006_POWER_LEVEL_SVIDEO_OFF 0x2
+#define CH7006_POWER_LEVEL_NORMAL 0x3
+#define CH7006_POWER_LEVEL_FULL_POWER_OFF 0x4
+
+#define CH7006_DETECT 0x10
+#define CH7006_DETECT_SVIDEO_Y_TEST (1 << 3)
+#define CH7006_DETECT_SVIDEO_C_TEST (1 << 2)
+#define CH7006_DETECT_CVBS_TEST (1 << 1)
+#define CH7006_DETECT_SENSE (1 << 0)
+
+#define CH7006_CONTRAST 0x11
+#define CH7006_CONTRAST_0 0, 2:0
+
+#define CH7006_PLLOV 0x13
+#define CH7006_PLLOV_N_8 8, 2:1
+#define CH7006_PLLOV_M_8 8, 0:0
+
+#define CH7006_PLLM 0x14
+#define CH7006_PLLM_0 0, 7:0
+
+#define CH7006_PLLN 0x15
+#define CH7006_PLLN_0 0, 7:0
+
+#define CH7006_BCLKOUT 0x17
+
+#define CH7006_SUBC_INC0 0x18
+#define CH7006_SUBC_INC0_28 28, 3:0
+
+#define CH7006_SUBC_INC1 0x19
+#define CH7006_SUBC_INC1_24 24, 3:0
+
+#define CH7006_SUBC_INC2 0x1a
+#define CH7006_SUBC_INC2_20 20, 3:0
+
+#define CH7006_SUBC_INC3 0x1b
+#define CH7006_SUBC_INC3_GPIO1_VAL (1 << 7)
+#define CH7006_SUBC_INC3_GPIO0_VAL (1 << 6)
+#define CH7006_SUBC_INC3_POUT_3_3V (1 << 5)
+#define CH7006_SUBC_INC3_POUT_INV (1 << 4)
+#define CH7006_SUBC_INC3_16 16, 3:0
+
+#define CH7006_SUBC_INC4 0x1c
+#define CH7006_SUBC_INC4_GPIO1_IN (1 << 7)
+#define CH7006_SUBC_INC4_GPIO0_IN (1 << 6)
+#define CH7006_SUBC_INC4_DS_INPUT (1 << 4)
+#define CH7006_SUBC_INC4_12 12, 3:0
+
+#define CH7006_SUBC_INC5 0x1d
+#define CH7006_SUBC_INC5_8 8, 3:0
+
+#define CH7006_SUBC_INC6 0x1e
+#define CH7006_SUBC_INC6_4 4, 3:0
+
+#define CH7006_SUBC_INC7 0x1f
+#define CH7006_SUBC_INC7_0 0, 3:0
+
+#define CH7006_PLL_CONTROL 0x20
+#define CH7006_PLL_CONTROL_CPI (1 << 5)
+#define CH7006_PLL_CONTROL_CAPACITOR (1 << 4)
+#define CH7006_PLL_CONTROL_7STAGES (1 << 3)
+#define CH7006_PLL_CONTROL_DIGITAL_5V (1 << 2)
+#define CH7006_PLL_CONTROL_ANALOG_5V (1 << 1)
+#define CH7006_PLL_CONTROL_MEMORY_5V (1 << 0)
+
+#define CH7006_CALC_SUBC_INC0 0x21
+#define CH7006_CALC_SUBC_INC0_24 24, 4:3
+#define CH7006_CALC_SUBC_INC0_HYST 0, 2:1
+#define CH7006_CALC_SUBC_INC0_AUTO (1 << 0)
+
+#define CH7006_CALC_SUBC_INC1 0x22
+#define CH7006_CALC_SUBC_INC1_16 16, 7:0
+
+#define CH7006_CALC_SUBC_INC2 0x23
+#define CH7006_CALC_SUBC_INC2_8 8, 7:0
+
+#define CH7006_CALC_SUBC_INC3 0x24
+#define CH7006_CALC_SUBC_INC3_0 0, 7:0
+
+#define CH7006_VERSION_ID 0x25
+
+#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 7d1d88cdf2d..de32d22a8c3 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -115,7 +115,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fabb9a81796..c1e02752e02 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 877bf6cb14a..06bd732e646 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i830_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i830_mmap_buffers,
.fasync = drm_fasync,
};
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 389597e4a62..44f990bed8f 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -70,7 +70,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fa7b9be096b..9929f84ec3e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_lvds.o \
intel_bios.o \
intel_dp.o \
- intel_dp_i2c.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
@@ -23,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_fb.o \
intel_tv.o \
intel_dvo.o \
+ intel_overlay.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 621815b531d..1184c14ba87 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
if (val != CH7017_DEVICE_ID_VALUE &&
val != CH7018_DEVICE_ID_VALUE &&
val != CH7019_DEVICE_ID_VALUE) {
- DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+ "Slave %d.\n",
val, i2cbus->adapter.name,dvo->slave_addr);
goto fail;
}
@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
uint8_t horizontal_active_pixel_output, vertical_active_line_output;
uint8_t active_input_line_output;
- DRM_DEBUG("Registers before mode setting\n");
+ DRM_DEBUG_KMS("Registers before mode setting\n");
ch7017_dump_regs(dvo);
/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
/* Turn the LVDS back on with new settings. */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
- DRM_DEBUG("Registers after mode setting\n");
+ DRM_DEBUG_KMS("Registers after mode setting\n");
ch7017_dump_regs(dvo);
}
@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
#define DUMP(reg) \
do { \
ch7017_read(dvo, reg, &val); \
- DRM_DEBUG(#reg ": %02x\n", val); \
+ DRM_DEBUG_KMS(#reg ": %02x\n", val); \
} while (0)
DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a9b89628968..d56ff5cc22b 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!ch7xxx->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!ch7xxx->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
name = ch7xxx_get_id(vendor);
if (!name) {
- DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
goto out;
if (device != CH7xxx_DID) {
- DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+ DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+ "slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
ch7xxx->quiet = false;
- DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
name, vendor, device);
return true;
out:
@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
if ((i % 8) == 0 )
- DRM_DEBUG("\n %02X: ", i);
- DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
+ DRM_LOG_KMS("\n %02X: ", i);
+ DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
}
}
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index aa176f9921f..24169e528f0 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
};
if (!priv->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+ "%s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
return true;
if (!priv->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
* the address it's responding on.
*/
if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
- DRM_DEBUG("ivch detect failed due to address mismatch "
+ DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
"(%d vs %d)\n",
(temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
goto out;
@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
uint16_t val;
ivch_read(dvo, VR00, &val);
- DRM_DEBUG("VR00: 0x%04x\n", val);
+ DRM_LOG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
- DRM_DEBUG("VR01: 0x%04x\n", val);
+ DRM_LOG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
- DRM_DEBUG("VR30: 0x%04x\n", val);
+ DRM_LOG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
- DRM_DEBUG("VR40: 0x%04x\n", val);
+ DRM_LOG_KMS("VR40: 0x%04x\n", val);
/* GPIO registers */
ivch_read(dvo, VR80, &val);
- DRM_DEBUG("VR80: 0x%04x\n", val);
+ DRM_LOG_KMS("VR80: 0x%04x\n", val);
ivch_read(dvo, VR81, &val);
- DRM_DEBUG("VR81: 0x%04x\n", val);
+ DRM_LOG_KMS("VR81: 0x%04x\n", val);
ivch_read(dvo, VR82, &val);
- DRM_DEBUG("VR82: 0x%04x\n", val);
+ DRM_LOG_KMS("VR82: 0x%04x\n", val);
ivch_read(dvo, VR83, &val);
- DRM_DEBUG("VR83: 0x%04x\n", val);
+ DRM_LOG_KMS("VR83: 0x%04x\n", val);
ivch_read(dvo, VR84, &val);
- DRM_DEBUG("VR84: 0x%04x\n", val);
+ DRM_LOG_KMS("VR84: 0x%04x\n", val);
ivch_read(dvo, VR85, &val);
- DRM_DEBUG("VR85: 0x%04x\n", val);
+ DRM_LOG_KMS("VR85: 0x%04x\n", val);
ivch_read(dvo, VR86, &val);
- DRM_DEBUG("VR86: 0x%04x\n", val);
+ DRM_LOG_KMS("VR86: 0x%04x\n", val);
ivch_read(dvo, VR87, &val);
- DRM_DEBUG("VR87: 0x%04x\n", val);
+ DRM_LOG_KMS("VR87: 0x%04x\n", val);
ivch_read(dvo, VR88, &val);
- DRM_DEBUG("VR88: 0x%04x\n", val);
+ DRM_LOG_KMS("VR88: 0x%04x\n", val);
/* Scratch register 0 - AIM Panel type */
ivch_read(dvo, VR8E, &val);
- DRM_DEBUG("VR8E: 0x%04x\n", val);
+ DRM_LOG_KMS("VR8E: 0x%04x\n", val);
/* Scratch register 1 - Status register */
ivch_read(dvo, VR8F, &val);
- DRM_DEBUG("VR8F: 0x%04x\n", val);
+ DRM_LOG_KMS("VR8F: 0x%04x\n", val);
}
static void ivch_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index e1c1f7341e5..0001c13f0a8 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!sil->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!sil->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_VID & 0xff)) {
- DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo,
goto out;
if (ch != (SIL164_DID & 0xff)) {
- DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
sil->quiet = false;
- DRM_DEBUG("init sil164 dvo controller successfully!\n");
+ DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
return true;
out:
@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
uint8_t val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
- DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
sil164_readb(dvo, SIL164_FREQ_HI, &val);
- DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG8, &val);
- DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG9, &val);
- DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REGC, &val);
- DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
+ DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
}
static void sil164_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 9ecc907384e..c7c391bc116 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
};
if (!tfp->quiet) {
- DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+ DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
return true;
if (!tfp->quiet) {
- DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+ DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, i2cbus->adapter.name, dvo->slave_addr);
}
@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
- DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+ "Slave %d.\n",
id, adapter->name, dvo->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
- DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
+ DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+ "Slave %d.\n",
id, adapter->name, dvo->slave_addr);
goto out;
}
@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
uint8_t val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
- DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_1, &val);
- DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_2, &val);
- DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_3, &val);
- DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_USERCFG, &val);
- DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_DLY, &val);
- DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CTL, &val);
- DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_TOP, &val);
- DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
+ DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
- DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
- DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_H_RES_LO, &val);
tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
- DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_V_RES_LO, &val);
tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
- DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+ DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
static void tfp410_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 26bf0552b3c..18476bf0b58 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,7 @@
*/
#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -96,13 +97,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_gem_object *obj = obj_priv->obj;
- seq_printf(m, " %p: %s %8zd %08x %08x %d %s",
+ seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
obj,
get_pin_flag(obj_priv),
obj->size,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno,
- obj_priv->dirty ? "dirty" : "");
+ obj_priv->dirty ? " dirty" : "",
+ obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
@@ -160,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -412,6 +414,109 @@ static int i915_registers_info(struct seq_file *m, void *data) {
return 0;
}
+static int
+i915_wedged_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_wedged_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof (buf),
+ "wedged : %d\n",
+ atomic_read(&dev_priv->mm.wedged));
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_wedged_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_INFO("Manually setting wedged to %d\n", val);
+
+ atomic_set(&dev_priv->mm.wedged, val);
+ if (val) {
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ queue_work(dev_priv->wq, &dev_priv->error_work);
+ }
+
+ return cnt;
+}
+
+static const struct file_operations i915_wedged_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_wedged_open,
+ .read = i915_wedged_read,
+ .write = i915_wedged_write,
+};
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+ struct dentry *ent,
+ const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+ list_add(&node->list, &minor->debugfs_nodes.list);
+
+ return 0;
+}
+
+static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_wedged",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_wedged_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
+}
static struct drm_info_list i915_debugfs_list[] = {
{"i915_regs", i915_registers_info, 0},
@@ -432,6 +537,12 @@ static struct drm_info_list i915_debugfs_list[] = {
int i915_debugfs_init(struct drm_minor *minor)
{
+ int ret;
+
+ ret = i915_wedged_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
@@ -441,7 +552,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
-
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e5b138be45f..701bfeac7f5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -807,6 +807,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = dev_priv->overlay ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -962,7 +968,7 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
* Some of the preallocated space is taken by the GTT
* and popup. GTT is 1K per MB of aperture size, and popup is 4K.
*/
- if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
+ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
overhead = 4096;
else
overhead = (*aperture_size / 1024) + 4096;
@@ -1048,7 +1054,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
int gtt_offset, gtt_size;
if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1070,7 +1076,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
- DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+ DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
/* Mask out these reserved bits on this hardware. */
if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
@@ -1096,7 +1102,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
phys =(entry & PTE_ADDRESS_MASK) |
((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
- DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+ DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
return phys;
}
@@ -1306,7 +1312,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
- if (!IS_IGD(dev))
+ if (!IS_PINEVIEW(dev))
return;
tmp = I915_READ(CLKCFG);
@@ -1413,7 +1419,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_iomapfree;
- dev_priv->wq = create_workqueue("i915");
+ dev_priv->wq = create_singlethread_workqueue("i915");
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
@@ -1434,7 +1440,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IGDNG(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
@@ -1489,9 +1495,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
/* Must be done after probing outputs */
- /* FIXME: verify on IGDNG */
- if (!IS_IGDNG(dev))
- intel_opregion_init(dev, 0);
+ intel_opregion_init(dev, 0);
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
@@ -1525,6 +1529,15 @@ int i915_driver_unload(struct drm_device *dev)
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->child_dev && dev_priv->child_dev_num) {
+ kfree(dev_priv->child_dev);
+ dev_priv->child_dev = NULL;
+ dev_priv->child_dev_num = 0;
+ }
drm_irq_uninstall(dev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1535,8 +1548,7 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
- if (!IS_IGDNG(dev))
- intel_opregion_free(dev, 0);
+ intel_opregion_free(dev, 0);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_modeset_cleanup(dev);
@@ -1548,6 +1560,8 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
+
+ intel_cleanup_overlay(dev);
}
pci_dev_put(dev_priv->bridge_dev);
@@ -1656,6 +1670,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7f436ec075f..24286ca168f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -329,10 +329,11 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
+ .read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a725f659119..fbecac72f5b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -170,6 +170,8 @@ struct drm_i915_display_funcs {
/* clock gating init */
};
+struct intel_overlay;
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -187,6 +189,7 @@ typedef struct drm_i915_private {
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
+ struct drm_gem_object *pwrctx;
struct resource mch_res;
@@ -206,11 +209,13 @@ typedef struct drm_i915_private {
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask_reg;
u32 pipestat[2];
- /** splitted irq regs for graphics and display engine on IGDNG,
+ /** splitted irq regs for graphics and display engine on Ironlake,
irq_mask_reg is still used for display irq. */
u32 gt_irq_mask_reg;
u32 gt_irq_enable_reg;
u32 de_irq_enable_reg;
+ u32 pch_irq_mask_reg;
+ u32 pch_irq_enable_reg;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
@@ -240,6 +245,9 @@ typedef struct drm_i915_private {
struct intel_opregion opregion;
+ /* overlay */
+ struct intel_overlay *overlay;
+
/* LVDS info */
int backlight_duty_cycle; /* restore backlight to this value */
bool panel_wants_dither;
@@ -258,7 +266,7 @@ typedef struct drm_i915_private {
struct notifier_block lid_notifier;
- int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
+ int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -280,6 +288,7 @@ typedef struct drm_i915_private {
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 saveRENDERSTANDBY;
+ u32 savePWRCTXA;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
@@ -374,8 +383,6 @@ typedef struct drm_i915_private {
u32 saveFDI_RXA_IMR;
u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0;
- u32 saveD_STATE;
- u32 saveDSPCLK_GATE_D;
u32 saveMI_ARB_STATE;
u32 saveSWF0[16];
u32 saveSWF1[16];
@@ -539,13 +546,21 @@ typedef struct drm_i915_private {
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
+ struct drm_crtc *plane_to_crtc_mapping[2];
+ struct drm_crtc *pipe_to_crtc_mapping[2];
+ wait_queue_head_t pending_flip_queue;
+
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
+ /* indicates the reduced downclock for LVDS*/
+ int lvds_downclock;
struct work_struct idle_work;
struct timer_list idle_timer;
bool busy;
u16 orig_clock;
+ int child_dev_num;
+ struct child_device_config *child_dev;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -638,6 +653,13 @@ struct drm_i915_gem_object {
* Advice: are the backing pages purgeable?
*/
int madv;
+
+ /**
+ * Number of crtcs where this object is currently the fb, but
+ * will be page flipped away on the next vblank. When it
+ * reaches 0, dev_priv->pending_flip_queue will be woken up.
+ */
+ atomic_t pending_flip;
};
/**
@@ -738,6 +760,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+void intel_enable_asle (struct drm_device *dev);
+
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -813,6 +837,9 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
int i915_gem_idle(struct drm_device *dev);
+uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t flush_domains);
+int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
int write);
@@ -824,6 +851,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
@@ -863,11 +891,13 @@ extern int i915_restore_state(struct drm_device *dev);
extern int intel_opregion_init(struct drm_device *dev, int resume);
extern void intel_opregion_free(struct drm_device *dev, int suspend);
extern void opregion_asle_intr(struct drm_device *dev);
+extern void ironlake_opregion_gse_intr(struct drm_device *dev);
extern void opregion_enable_asle(struct drm_device *dev);
#else
static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
static inline void opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
static inline void opregion_enable_asle(struct drm_device *dev) { return; }
#endif
@@ -955,8 +985,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I855(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
@@ -990,47 +1020,51 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E42 || \
IS_GM45(dev))
-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
(dev)->pci_device == 0x29D2 || \
- (IS_IGD(dev)))
+ (IS_PINEVIEW(dev)))
-#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
- IS_IGDNG(dev))
+ IS_IRONLAKE(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_IGD(dev) || IS_IGDNG_M(dev))
+ IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
- IS_IGDNG(dev))
+ IS_IRONLAKE(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
+ !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
(IS_I9XX(dev) || IS_GM45(dev)) && \
- !IS_IGD(dev) && \
- !IS_IGDNG(dev))
+ !IS_PINEVIEW(dev) && \
+ !IS_IRONLAKE(dev))
+#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a2a3fa59992..8c463cf2050 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1288,6 +1288,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
DRM_ERROR("failed to add to map hash\n");
+ ret = -ENOMEM;
goto out_free_mm;
}
@@ -1583,7 +1584,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
*
* Returned sequence numbers are nonzero on success.
*/
-static uint32_t
+uint32_t
i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t flush_domains)
{
@@ -1617,7 +1618,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
- DRM_DEBUG("%d\n", seqno);
+ DRM_DEBUG_DRIVER("%d\n", seqno);
request->seqno = seqno;
request->emitted_jiffies = jiffies;
@@ -1820,12 +1821,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+int
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 ier;
@@ -1837,7 +1834,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
return -EIO;
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
@@ -1852,10 +1849,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
dev_priv->mm.waiting_gem_seqno = seqno;
i915_user_irq_get(dev);
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged));
+ if (interruptible)
+ ret = wait_event_interruptible(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+ atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+ atomic_read(&dev_priv->mm.wedged));
+
i915_user_irq_put(dev);
dev_priv->mm.waiting_gem_seqno = 0;
@@ -1879,6 +1881,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
return ret;
}
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+static int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+ return i915_do_wait_request(dev, seqno, 1);
+}
+
static void
i915_gem_flush(struct drm_device *dev,
uint32_t invalidate_domains,
@@ -1947,7 +1959,7 @@ i915_gem_flush(struct drm_device *dev,
#endif
BEGIN_LP_RING(2);
OUT_RING(cmd);
- OUT_RING(0); /* noop */
+ OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
}
@@ -2760,6 +2772,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
old_write_domain);
}
+void
+i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+{
+ switch (obj->write_domain) {
+ case I915_GEM_DOMAIN_GTT:
+ i915_gem_object_flush_gtt_write_domain(obj);
+ break;
+ case I915_GEM_DOMAIN_CPU:
+ i915_gem_object_flush_cpu_write_domain(obj);
+ break;
+ default:
+ i915_gem_object_flush_gpu_write_domain(obj);
+ break;
+ }
+}
+
/**
* Moves a single object to the GTT read, and possibly write domain.
*
@@ -3525,6 +3553,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
return 0;
}
+static int
+i915_gem_wait_for_pending_flip(struct drm_device *dev,
+ struct drm_gem_object **object_list,
+ int count)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ DEFINE_WAIT(wait);
+ int i, ret = 0;
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->pending_flip_queue,
+ &wait, TASK_INTERRUPTIBLE);
+ for (i = 0; i < count; i++) {
+ obj_priv = object_list[i]->driver_private;
+ if (atomic_read(&obj_priv->pending_flip) > 0)
+ break;
+ }
+ if (i == count)
+ break;
+
+ if (!signal_pending(current)) {
+ mutex_unlock(&dev->struct_mutex);
+ schedule();
+ mutex_lock(&dev->struct_mutex);
+ continue;
+ }
+ ret = -ERESTARTSYS;
+ break;
+ }
+ finish_wait(&dev_priv->pending_flip_queue, &wait);
+
+ return ret;
+}
+
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -3540,7 +3603,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int ret, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
- int pin_tries;
+ int pin_tries, flips;
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3615,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
/* Copy in the exec list from userland */
- exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
- object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
if (exec_list == NULL || object_list == NULL) {
DRM_ERROR("Failed to allocate exec or object list "
"for %d buffers\n",
@@ -3598,20 +3661,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_verify_inactive(dev, __FILE__, __LINE__);
if (atomic_read(&dev_priv->mm.wedged)) {
- DRM_ERROR("Execbuf while wedged\n");
mutex_unlock(&dev->struct_mutex);
ret = -EIO;
goto pre_mutex_err;
}
if (dev_priv->mm.suspended) {
- DRM_ERROR("Execbuf while VT-switched.\n");
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
/* Look up object handles */
+ flips = 0;
for (i = 0; i < args->buffer_count; i++) {
object_list[i] = drm_gem_object_lookup(dev, file_priv,
exec_list[i].handle);
@@ -3630,6 +3692,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
goto err;
}
obj_priv->in_execbuffer = true;
+ flips += atomic_read(&obj_priv->pending_flip);
+ }
+
+ if (flips > 0) {
+ ret = i915_gem_wait_for_pending_flip(dev, object_list,
+ args->buffer_count);
+ if (ret)
+ goto err;
}
/* Pin and relocate */
@@ -4356,7 +4426,7 @@ i915_gem_init_hws(struct drm_device *dev)
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
I915_READ(HWS_PGA); /* posting read */
- DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+ DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
}
@@ -4614,8 +4684,8 @@ i915_gem_load(struct drm_device *dev)
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
}
-
i915_gem_detect_bit_6_swizzle(dev);
+ init_waitqueue_head(&dev_priv->pending_flip_queue);
}
/*
@@ -4790,7 +4860,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
- DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
+ DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
ret = copy_from_user(obj_addr, user_data, args->size);
if (ret)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 200e398453c..30d6af6c09b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -121,7 +121,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
- DRM_DEBUG("failed bus alloc: %d\n", ret);
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
goto out;
}
@@ -209,8 +209,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
bool need_disable;
- if (IS_IGDNG(dev)) {
- /* On IGDNG whatever DRAM config, GPU always do
+ if (IS_IRONLAKE(dev)) {
+ /* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 1fe68a251b7..13b028994b2 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
&batchbuffer->cliprects))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_BATCHBUFFER,
+ return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
(unsigned long)batchbuffer);
}
@@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
&cmdbuffer->cliprects))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
+ return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
+ (unsigned long)cmdbuffer);
}
typedef struct drm_i915_irq_emit32 {
@@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
&request->irq_seq))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
+ (unsigned long)request);
}
typedef struct drm_i915_getparam32 {
int param;
@@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
+ (unsigned long)request);
}
typedef struct drm_i915_mem_alloc32 {
@@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
&request->region_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_I915_ALLOC, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
+ (unsigned long)request);
}
drm_ioctl_compat_t *i915_compat_ioctls[] = {
@@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aa7fd82aa6e..85f4c5de97e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -43,10 +43,13 @@
* we leave them always unmasked in IMR and then control enabling them through
* PIPESTAT alone.
*/
-#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_FIX \
+ (I915_ASLE_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
+ I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
+ I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
/** Interrupts that we mask and unmask at runtime. */
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -61,7 +64,7 @@
DRM_I915_VBLANK_PIPE_B)
void
-igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
dev_priv->gt_irq_mask_reg &= ~mask;
@@ -71,7 +74,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
}
static inline void
-igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
dev_priv->gt_irq_mask_reg |= mask;
@@ -82,7 +85,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
/* For display hotplug interrupt */
void
-igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
dev_priv->irq_mask_reg &= ~mask;
@@ -92,7 +95,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
static inline void
-igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != mask) {
dev_priv->irq_mask_reg |= mask;
@@ -157,6 +160,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
}
/**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void intel_enable_asle (struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_display_irq(dev_priv, DE_GSE);
+ else
+ i915_enable_pipestat(dev_priv, 1,
+ I915_LEGACY_BLC_EVENT_ENABLE);
+}
+
+/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
@@ -191,7 +208,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %d\n", pipe);
return 0;
}
@@ -220,7 +238,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
if (!i915_pipe_enabled(dev, pipe)) {
- DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+ DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+ "pipe %d\n", pipe);
return 0;
}
@@ -250,12 +269,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_sysfs_hotplug_event(dev);
}
-irqreturn_t igdng_irq_handler(struct drm_device *dev)
+irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier;
- u32 new_de_iir, new_gt_iir;
+ u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 new_de_iir, new_gt_iir, new_pch_iir;
struct drm_i915_master_private *master_priv;
/* disable master interrupt before clearing iir */
@@ -265,13 +284,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
+ pch_iir = I915_READ(SDEIIR);
for (;;) {
- if (de_iir == 0 && gt_iir == 0)
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
break;
ret = IRQ_HANDLED;
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ new_pch_iir = I915_READ(SDEIIR);
+
I915_WRITE(DEIIR, de_iir);
new_de_iir = I915_READ(DEIIR);
I915_WRITE(GTIIR, gt_iir);
@@ -291,8 +315,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
DRM_WAKEUP(&dev_priv->irq_queue);
}
+ if (de_iir & DE_GSE)
+ ironlake_opregion_gse_intr(dev);
+
+ /* check event from PCH */
+ if ((de_iir & DE_PCH_EVENT) &&
+ (pch_iir & SDE_HOTPLUG_MASK)) {
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+ }
+
de_iir = new_de_iir;
gt_iir = new_gt_iir;
+ pch_iir = new_pch_iir;
}
I915_WRITE(DEIER, de_ier);
@@ -317,19 +351,19 @@ static void i915_error_work_func(struct work_struct *work)
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
- DRM_DEBUG("generating error event\n");
+ DRM_DEBUG_DRIVER("generating error event\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) {
if (IS_I965G(dev)) {
- DRM_DEBUG("resetting chip\n");
+ DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
if (!i965_reset(dev, GDRST_RENDER)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
}
} else {
- printk("reboot required\n");
+ DRM_DEBUG_DRIVER("reboot required\n");
}
}
}
@@ -355,7 +389,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
- DRM_DEBUG("out ot memory, not capturing error state\n");
+ DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
goto out;
}
@@ -512,7 +546,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- printk("i915: Waking up sleeping processes\n");
DRM_WAKEUP(&dev_priv->irq_queue);
}
@@ -535,8 +568,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&dev_priv->irq_received);
- if (IS_IGDNG(dev))
- return igdng_irq_handler(dev);
+ if (IS_IRONLAKE(dev))
+ return ironlake_irq_handler(dev);
iir = I915_READ(IIR);
@@ -568,14 +601,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
*/
if (pipea_stats & 0x8000ffff) {
if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG("pipe a underrun\n");
+ DRM_DEBUG_DRIVER("pipe a underrun\n");
I915_WRITE(PIPEASTAT, pipea_stats);
irq_received = 1;
}
if (pipeb_stats & 0x8000ffff) {
if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG("pipe b underrun\n");
+ DRM_DEBUG_DRIVER("pipe b underrun\n");
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
}
@@ -591,7 +624,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- DRM_DEBUG("hotplug event received, stat 0x%08x\n",
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
if (hotplug_status & dev_priv->hotplug_supported_mask)
queue_work(dev_priv->wq,
@@ -599,27 +632,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
-
- /* EOS interrupts occurs */
- if (IS_IGD(dev) &&
- (hotplug_status & CRT_EOS_INT_STATUS)) {
- u32 temp;
-
- DRM_DEBUG("EOS interrupt occurs\n");
- /* status is already cleared */
- temp = I915_READ(ADPA);
- temp &= ~ADPA_DAC_ENABLE;
- I915_WRITE(ADPA, temp);
-
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp &= ~CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
-
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- }
}
I915_WRITE(IIR, iir);
@@ -641,14 +653,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
+ if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 0);
+
+ if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+ intel_prepare_page_flip(dev, 1);
+
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
+ intel_finish_page_flip(dev, 0);
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
+ intel_finish_page_flip(dev, 1);
}
if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -684,7 +704,7 @@ static int i915_emit_irq(struct drm_device * dev)
i915_kernel_lost_context(dev);
- DRM_DEBUG("\n");
+ DRM_DEBUG_DRIVER("\n");
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -709,8 +729,8 @@ void i915_user_irq_get(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (IS_IGDNG(dev))
- igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -725,8 +745,8 @@ void i915_user_irq_put(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (IS_IGDNG(dev))
- igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ if (IS_IRONLAKE(dev))
+ ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
}
@@ -749,7 +769,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
@@ -832,7 +852,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return 0;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -854,7 +874,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -868,7 +888,7 @@ void i915_enable_interrupt (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -976,7 +996,7 @@ void i915_hangcheck_elapsed(unsigned long data)
/* drm_dma.h hooks
*/
-static void igdng_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -992,14 +1012,21 @@ static void igdng_irq_preinstall(struct drm_device *dev)
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
(void) I915_READ(GTIER);
+
+ /* south display irq */
+ I915_WRITE(SDEIMR, 0xffffffff);
+ I915_WRITE(SDEIER, 0x0);
+ (void) I915_READ(SDEIER);
}
-static int igdng_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
u32 render_mask = GT_USER_INTERRUPT;
+ u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
dev_priv->irq_mask_reg = ~display_mask;
dev_priv->de_irq_enable_reg = display_mask;
@@ -1019,6 +1046,14 @@ static int igdng_irq_postinstall(struct drm_device *dev)
I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
(void) I915_READ(GTIER);
+ dev_priv->pch_irq_mask_reg = ~hotplug_mask;
+ dev_priv->pch_irq_enable_reg = hotplug_mask;
+
+ I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
+ I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
+ (void) I915_READ(SDEIER);
+
return 0;
}
@@ -1031,8 +1066,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_IGDNG(dev)) {
- igdng_irq_preinstall(dev);
+ if (IS_IRONLAKE(dev)) {
+ ironlake_irq_preinstall(dev);
return;
}
@@ -1059,8 +1094,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- if (IS_IGDNG(dev))
- return igdng_irq_postinstall(dev);
+ if (IS_IRONLAKE(dev))
+ return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -1120,7 +1155,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void igdng_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
I915_WRITE(HWSTAM, 0xffffffff);
@@ -1143,8 +1178,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
- if (IS_IGDNG(dev)) {
- igdng_irq_uninstall(dev);
+ if (IS_IRONLAKE(dev)) {
+ ironlake_irq_uninstall(dev);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 2d5193556d3..7cc8410239c 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -118,6 +118,10 @@ struct opregion_asle {
#define ASLE_BACKLIGHT_FAIL (2<<12)
#define ASLE_PFIT_FAIL (2<<14)
#define ASLE_PWM_FREQ_FAIL (2<<16)
+#define ASLE_ALS_ILLUM_FAILED (1<<10)
+#define ASLE_BACKLIGHT_FAILED (1<<12)
+#define ASLE_PFIT_FAILED (1<<14)
+#define ASLE_PWM_FREQ_FAILED (1<<16)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
else {
- if (IS_IGD(dev)) {
+ if (IS_PINEVIEW(dev)) {
blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT;
@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev)
asle_req = asle->aslc & ASLE_REQ_MSK;
if (!asle_req) {
- DRM_DEBUG("non asle set request??\n");
+ DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
+static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 cpu_pwm_ctl, pch_pwm_ctl2;
+ u32 max_backlight, level;
+
+ if (!(bclp & ASLE_BCLP_VALID))
+ return ASLE_BACKLIGHT_FAILED;
+
+ bclp &= ASLE_BCLP_MSK;
+ if (bclp < 0 || bclp > 255)
+ return ASLE_BACKLIGHT_FAILED;
+
+ cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
+ pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ /* get the max PWM frequency */
+ max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
+ /* calculate the expected PMW frequency */
+ level = (bclp * max_backlight) / 255;
+ /* reserve the high 16 bits */
+ cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
+ /* write the updated PWM frequency */
+ I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
+
+ asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+ return 0;
+}
+
+void ironlake_opregion_gse_intr(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct opregion_asle *asle = dev_priv->opregion.asle;
+ u32 asle_stat = 0;
+ u32 asle_req;
+
+ if (!asle)
+ return;
+
+ asle_req = asle->aslc & ASLE_REQ_MSK;
+
+ if (!asle_req) {
+ DRM_DEBUG_DRIVER("non asle set request??\n");
+ return;
+ }
+
+ if (asle_req & ASLE_SET_ALS_ILLUM) {
+ DRM_DEBUG_DRIVER("Illum is not supported\n");
+ asle_stat |= ASLE_ALS_ILLUM_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_BACKLIGHT)
+ asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+
+ if (asle_req & ASLE_SET_PFIT) {
+ DRM_DEBUG_DRIVER("Pfit is not supported\n");
+ asle_stat |= ASLE_PFIT_FAILED;
+ }
+
+ if (asle_req & ASLE_SET_PWM_FREQ) {
+ DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+ asle_stat |= ASLE_PWM_FREQ_FAILED;
+ }
+
+ asle->aslc = asle_stat;
+}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
#define ASLE_PFIT_EN (1<<2)
@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 1,
- I915_LEGACY_BLC_EVENT_ENABLE);
+ intel_enable_asle(dev);
spin_unlock_irqrestore(&dev_priv->user_irq_lock,
irqflags);
}
@@ -361,9 +431,9 @@ int intel_opregion_init(struct drm_device *dev, int resume)
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
- DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+ DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
- DRM_DEBUG("ACPI OpRegion not supported!\n");
+ DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
@@ -373,30 +443,30 @@ int intel_opregion_init(struct drm_device *dev, int resume)
opregion->header = base;
if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG("opregion signature mismatch\n");
+ DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- DRM_DEBUG("Public ACPI methods supported\n");
+ DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_didl_outputs(dev);
} else {
- DRM_DEBUG("Public ACPI methods not supported\n");
+ DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
err = -ENOTSUPP;
goto err_out;
}
opregion->enabled = 1;
if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG("SWSCI supported\n");
+ DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
}
if (mboxes & MBOX_ASLE) {
- DRM_DEBUG("ASLE supported\n");
+ DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
opregion_enable_asle(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1687edf6879..974b3cf7061 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -140,6 +140,7 @@
#define MI_NOOP MI_INSTR(0, 0)
#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
+#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -151,7 +152,13 @@
#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
+#define MI_OVERLAY_CONTINUE (0x0<<21)
+#define MI_OVERLAY_ON (0x1<<21)
+#define MI_OVERLAY_OFF (0x2<<21)
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -260,6 +267,8 @@
#define HWS_PGA 0x02080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
+#define PWRCTXA 0x2088 /* 965GM+ only */
+#define PWRCTX_EN (1<<0)
#define IPEIR 0x02088
#define IPEHR 0x0208c
#define INSTDONE 0x02090
@@ -405,6 +414,13 @@
# define GPIO_DATA_VAL_IN (1 << 12)
# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+#define GMBUS0 0x5100
+#define GMBUS1 0x5104
+#define GMBUS2 0x5108
+#define GMBUS3 0x510c
+#define GMBUS4 0x5110
+#define GMBUS5 0x5120
+
/*
* Clock control & power management
*/
@@ -435,7 +451,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
-#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
+#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -512,7 +528,7 @@
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
+#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
/* i830, required in DVO non-gang */
#define PLL_P2_DIVIDE_BY_4 (1 << 23)
#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -522,7 +538,7 @@
#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
#define PLL_REF_INPUT_MASK (3 << 13)
#define PLL_LOAD_PULSE_PHASE_SHIFT 9
-/* IGDNG */
+/* Ironlake */
# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
@@ -586,12 +602,12 @@
#define FPB0 0x06048
#define FPB1 0x0604c
#define FP_N_DIV_MASK 0x003f0000
-#define FP_N_IGD_DIV_MASK 0x00ff0000
+#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
#define FP_M1_DIV_MASK 0x00003f00
#define FP_M1_DIV_SHIFT 8
#define FP_M2_DIV_MASK 0x0000003f
-#define FP_M2_IGD_DIV_MASK 0x000000ff
+#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
#define DPLL_TEST 0x606c
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -769,7 +785,8 @@
/** GM965 GM45 render standby register */
#define MCHBAR_RENDER_STANDBY 0x111B8
-
+#define RCX_SW_EXIT (1<<23)
+#define RSX_STATUS_MASK 0x00700000
#define PEG_BAND_GAP_DATA 0x14d68
/*
@@ -844,7 +861,6 @@
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
-#define CRT_EOS_INT_EN (1 << 10)
#define CRT_HOTPLUG_INT_EN (1 << 9)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
@@ -868,7 +884,6 @@
HDMID_HOTPLUG_INT_EN | \
SDVOB_HOTPLUG_INT_EN | \
SDVOC_HOTPLUG_INT_EN | \
- TV_HOTPLUG_INT_EN | \
CRT_HOTPLUG_INT_EN)
@@ -879,7 +894,6 @@
#define DPC_HOTPLUG_INT_STATUS (1 << 28)
#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (1 << 27)
-#define CRT_EOS_INT_STATUS (1 << 12)
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -1620,7 +1634,7 @@
#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
#define DP_SCRAMBLING_DISABLE (1 << 12)
-#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7)
+#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
/** limit RGB values to avoid confusing TVs */
#define DP_COLOR_RANGE_16_235 (1 << 8)
@@ -1808,7 +1822,7 @@
#define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
-#define IGD_SELF_REFRESH_EN (1<<30)
+#define PINEVIEW_SELF_REFRESH_EN (1<<30)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
@@ -1824,16 +1838,16 @@
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
-#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
-#define IGD_FIFO_LINE_SIZE 64
-#define IGD_MAX_WM 0x1ff
-#define IGD_DFT_WM 0x3f
-#define IGD_DFT_HPLLOFF_WM 0
-#define IGD_GUARD_WM 10
-#define IGD_CURSOR_FIFO 64
-#define IGD_CURSOR_MAX_WM 0x3f
-#define IGD_CURSOR_DFT_WM 0
-#define IGD_CURSOR_GUARD_WM 5
+#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE 64
+#define PINEVIEW_MAX_WM 0x1ff
+#define PINEVIEW_DFT_WM 0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM 0
+#define PINEVIEW_GUARD_WM 10
+#define PINEVIEW_CURSOR_FIFO 64
+#define PINEVIEW_CURSOR_MAX_WM 0x3f
+#define PINEVIEW_CURSOR_DFT_WM 0
+#define PINEVIEW_CURSOR_GUARD_WM 5
/*
* The two pipe frame counter registers are not synchronized, so
@@ -1907,6 +1921,7 @@
#define DISPPLANE_16BPP (0x5<<26)
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
#define DISPPLANE_32BPP (0x7<<26)
+#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_MASK (1<<24)
@@ -1918,7 +1933,7 @@
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
-#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */
+#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
#define DSPAADDR 0x70184
#define DSPASTRIDE 0x70188
@@ -1971,7 +1986,7 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
-/* IGDNG */
+/* Ironlake */
#define CPU_VGACNTRL 0x41000
@@ -2117,6 +2132,7 @@
#define SDE_PORTC_HOTPLUG (1 << 9)
#define SDE_PORTB_HOTPLUG (1 << 8)
#define SDE_SDVOB_HOTPLUG (1 << 6)
+#define SDE_HOTPLUG_MASK (0xf << 8)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
@@ -2157,6 +2173,13 @@
#define PCH_GPIOE 0xc5020
#define PCH_GPIOF 0xc5024
+#define PCH_GMBUS0 0xc5100
+#define PCH_GMBUS1 0xc5104
+#define PCH_GMBUS2 0xc5108
+#define PCH_GMBUS3 0xc510c
+#define PCH_GMBUS4 0xc5110
+#define PCH_GMBUS5 0xc5120
+
#define PCH_DPLL_A 0xc6014
#define PCH_DPLL_B 0xc6018
@@ -2292,7 +2315,7 @@
#define FDI_DP_PORT_WIDTH_X3 (2<<19)
#define FDI_DP_PORT_WIDTH_X4 (3<<19)
#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
-/* IGDNG: hardwired to 1 */
+/* Ironlake: hardwired to 1 */
#define FDI_TX_PLL_ENABLE (1<<14)
/* both Tx and Rx */
#define FDI_SCRAMBLING_ENABLE (0<<7)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6eec8171a44..d5ebb00a9d4 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -27,14 +27,14 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
-#include "i915_drv.h"
+#include "intel_drv.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
} else {
dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe))
return;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
fpb1_reg = FPB1;
}
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
}
@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveLVDS = I915_READ(LVDS);
}
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
- if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+ if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -713,7 +713,7 @@ void i915_restore_display(struct drm_device *dev)
}
/* VGA state */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
@@ -733,8 +733,10 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
+ if (I915_HAS_RC6(dev)) {
dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+ dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
+ }
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -742,7 +744,7 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev);
/* Interrupt state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
@@ -754,10 +756,6 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- /* Clock gating state */
- dev_priv->saveD_STATE = I915_READ(D_STATE);
- dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
-
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -796,8 +794,10 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
/* Render Standby */
- if (IS_I965G(dev) && IS_MOBILE(dev))
+ if (I915_HAS_RC6(dev)) {
I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+ I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
+ }
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -817,7 +817,7 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev);
/* Interrupt state */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,8 +830,7 @@ int i915_restore_state(struct drm_device *dev)
}
/* Clock gating state */
- I915_WRITE (D_STATE, dev_priv->saveD_STATE);
- I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
+ intel_init_clock_gating(dev);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -846,6 +845,9 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ /* I2C state */
+ intel_i2c_reset_gmbus(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96cd256e60e..f2756774758 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -114,6 +114,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
int lfp_data_size, dvo_timing_offset;
+ int i, temp_downclock;
+ struct drm_display_mode *temp_mode;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
@@ -159,9 +161,49 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+ DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
+ temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
+ temp_downclock = panel_fixed_mode->clock;
+ /*
+ * enumerate the LVDS panel timing info entry in VBT to check whether
+ * the LVDS downclock is found.
+ */
+ for (i = 0; i < 16; i++) {
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
+ dvo_timing = (struct lvds_dvo_timing *)
+ ((unsigned char *)entry + dvo_timing_offset);
+
+ fill_detail_timing_data(temp_mode, dvo_timing);
+
+ if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
+ temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
+ temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
+ temp_mode->htotal == panel_fixed_mode->htotal &&
+ temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
+ temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
+ temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
+ temp_mode->vtotal == panel_fixed_mode->vtotal &&
+ temp_mode->clock < temp_downclock) {
+ /*
+ * downclock is already found. But we expect
+ * to find the lower downclock.
+ */
+ temp_downclock = temp_mode->clock;
+ }
+ /* clear it to zero */
+ memset(temp_mode, 0, sizeof(*temp_mode));
+ }
+ kfree(temp_mode);
+ if (temp_downclock < panel_fixed_mode->clock) {
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ temp_downclock, panel_fixed_mode->clock);
+ }
return;
}
@@ -217,7 +259,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
- else if (IS_IGDNG(dev_priv->dev))
+ else if (IS_IRONLAKE(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
@@ -241,22 +283,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
GPIOF,
};
- /* Set sensible defaults in case we can't find the general block
- or it is the wrong chipset */
- dev_priv->crt_ddc_bus = -1;
-
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
u16 block_size = get_blocksize(general);
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
- DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
+ DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if ((bus_pin >= 1) && (bus_pin <= 6)) {
dev_priv->crt_ddc_bus =
crt_bus_map_table[bus_pin-1];
}
} else {
- DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
+ DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
}
}
@@ -274,7 +312,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
- DRM_DEBUG("No general definition block is found\n");
+ DRM_DEBUG_KMS("No general definition block is found\n");
return;
}
/* judge whether the size of child device meets the requirements.
@@ -284,7 +322,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
*/
if (p_defs->child_dev_size != sizeof(*p_child)) {
/* different child dev size . Ignore it */
- DRM_DEBUG("different child size is found. Invalid.\n");
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
return;
}
/* get the block size of general definitions */
@@ -310,11 +348,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
if (p_child->dvo_port != DEVICE_PORT_DVOB &&
p_child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG("Incorrect SDVO port. Skip it \n");
+ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
continue;
}
- DRM_DEBUG("the SDVO device with slave addr %2x is found on "
- "%s port\n",
+ DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+ " %s port\n",
p_child->slave_addr,
(p_child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
@@ -325,21 +363,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->initialized = 1;
} else {
- DRM_DEBUG("Maybe one SDVO port is shared by "
+ DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
}
if (p_child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG("there exists the slave2_addr. Maybe this "
- "is a SDVO device with multiple inputs.\n");
+ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
}
count++;
}
if (!count) {
/* No SDVO device info is found */
- DRM_DEBUG("No SDVO device info is found in VBT\n");
+ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
}
return;
}
@@ -366,6 +404,70 @@ parse_driver_features(struct drm_i915_private *dev_priv,
dev_priv->render_reclock_avail = true;
}
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+ struct bdb_header *bdb)
+{
+ struct bdb_general_definitions *p_defs;
+ struct child_device_config *p_child, *child_dev_ptr;
+ int i, child_device_num, count;
+ u16 block_size;
+
+ p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ if (!p_defs) {
+ DRM_DEBUG_KMS("No general definition block is found\n");
+ return;
+ }
+ /* judge whether the size of child device meets the requirements.
+ * If the child device size obtained from general definition block
+ * is different with sizeof(struct child_device_config), skip the
+ * parsing of sdvo device info
+ */
+ if (p_defs->child_dev_size != sizeof(*p_child)) {
+ /* different child dev size . Ignore it */
+ DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ return;
+ }
+ /* get the block size of general definitions */
+ block_size = get_blocksize(p_defs);
+ /* get the number of child device */
+ child_device_num = (block_size - sizeof(*p_defs)) /
+ sizeof(*p_child);
+ count = 0;
+ /* get the number of child device that is present */
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ count++;
+ }
+ if (!count) {
+ DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+ return;
+ }
+ dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+ if (!dev_priv->child_dev) {
+ DRM_DEBUG_KMS("No memory space for child device\n");
+ return;
+ }
+
+ dev_priv->child_dev_num = count;
+ count = 0;
+ for (i = 0; i < child_device_num; i++) {
+ p_child = &(p_defs->devices[i]);
+ if (!p_child->device_type) {
+ /* skip the device block if device type is invalid */
+ continue;
+ }
+ child_dev_ptr = dev_priv->child_dev + count;
+ count++;
+ memcpy((void *)child_dev_ptr, (void *)p_child,
+ sizeof(*p_child));
+ }
+ return;
+}
/**
* intel_init_bios - initialize VBIOS settings & find VBT
* @dev: DRM device
@@ -417,6 +519,7 @@ intel_init_bios(struct drm_device *dev)
parse_lfp_panel_data(dev_priv, bdb);
parse_sdvo_panel_data(dev_priv, bdb);
parse_sdvo_device_mapping(dev_priv, bdb);
+ parse_device_mapping(dev_priv, bdb);
parse_driver_features(dev_priv, bdb);
pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 0f8e5f69ac7..425ac9d7f72 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *dev);
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_eDP 0x78C6
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* define the PORT for DP output type */
+#define PORT_IDPB 7
+#define PORT_IDPC 8
+#define PORT_IDPD 9
+
#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e5051446c48..9f3d3e56341 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp, reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = PCH_ADPA;
else
reg = ADPA;
@@ -64,34 +64,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
}
I915_WRITE(reg, temp);
-
- if (IS_IGD(dev)) {
- if (mode == DRM_MODE_DPMS_OFF) {
- /* turn off DAC */
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp &= ~CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
-
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- } else {
- /* turn on DAC. EOS interrupt must be enabled after DAC
- * is enabled, so it sounds not good to enable it in
- * i915_driver_irq_postinstall()
- * wait 12.5ms after DAC is enabled
- */
- msleep(13);
- temp = I915_READ(PORT_HOTPLUG_STAT);
- if (temp & CRT_EOS_INT_STATUS)
- I915_WRITE(PORT_HOTPLUG_STAT,
- CRT_EOS_INT_STATUS);
- temp = I915_READ(PORT_HOTPLUG_EN);
- temp |= CRT_EOS_INT_EN;
- I915_WRITE(PORT_HOTPLUG_EN, temp);
- }
- }
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -141,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
else
dpll_md_reg = DPLL_B_MD;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
@@ -150,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -164,18 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
adpa |= ADPA_PIPE_B_SELECT;
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
I915_WRITE(BCLRPAT_B, 0);
}
I915_WRITE(adpa_reg, adpa);
}
-static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,7 +166,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
ADPA_CRT_HOTPLUG_ENABLE |
ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
- DRM_DEBUG("pch crt adpa 0x%x", adpa);
+ DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
I915_WRITE(PCH_ADPA, adpa);
while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
@@ -227,8 +199,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
u32 hotplug_en;
int i, tries = 0;
- if (IS_IGDNG(dev))
- return intel_igdng_crt_detect_hotplug(connector);
+ if (IS_IRONLAKE(dev))
+ return intel_ironlake_crt_detect_hotplug(connector);
/*
* On 4 series desktop, CRT detect sequence need to be done twice
@@ -549,12 +521,12 @@ void intel_crt_init(struct drm_device *dev)
&intel_output->enc);
/* Set up the DDC bus. */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
i2c_reg = PCH_GPIOA;
else {
i2c_reg = GPIOA;
/* Use VBT information for CRT DDC if available */
- if (dev_priv->crt_ddc_bus != -1)
+ if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}
intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 099f420de57..52cd9b006da 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -32,7 +32,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
@@ -102,32 +102,32 @@ struct intel_limit {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
-#define IGD_VCO_MIN 1700000
-#define IGD_VCO_MAX 3500000
+#define PINEVIEW_VCO_MIN 1700000
+#define PINEVIEW_VCO_MAX 3500000
#define I9XX_N_MIN 1
#define I9XX_N_MAX 6
-/* IGD's Ncounter is a ring counter */
-#define IGD_N_MIN 3
-#define IGD_N_MAX 6
+/* Pineview's Ncounter is a ring counter */
+#define PINEVIEW_N_MIN 3
+#define PINEVIEW_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
-#define IGD_M_MIN 2
-#define IGD_M_MAX 256
+#define PINEVIEW_M_MIN 2
+#define PINEVIEW_M_MAX 256
#define I9XX_M1_MIN 10
#define I9XX_M1_MAX 22
#define I9XX_M2_MIN 5
#define I9XX_M2_MAX 9
-/* IGD M1 is reserved, and must be 0 */
-#define IGD_M1_MIN 0
-#define IGD_M1_MAX 0
-#define IGD_M2_MIN 0
-#define IGD_M2_MAX 254
+/* Pineview M1 is reserved, and must be 0 */
+#define PINEVIEW_M1_MIN 0
+#define PINEVIEW_M1_MAX 0
+#define PINEVIEW_M2_MIN 0
+#define PINEVIEW_M2_MAX 254
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
#define I9XX_P_LVDS_MAX 98
-#define IGD_P_LVDS_MIN 7
-#define IGD_P_LVDS_MAX 112
+#define PINEVIEW_P_LVDS_MIN 7
+#define PINEVIEW_P_LVDS_MAX 112
#define I9XX_P1_MIN 1
#define I9XX_P1_MAX 8
#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -234,33 +234,33 @@ struct intel_limit {
#define G4X_P2_DISPLAY_PORT_FAST 10
#define G4X_P2_DISPLAY_PORT_LIMIT 0
-/* IGDNG */
+/* Ironlake */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
*/
-#define IGDNG_DOT_MIN 25000
-#define IGDNG_DOT_MAX 350000
-#define IGDNG_VCO_MIN 1760000
-#define IGDNG_VCO_MAX 3510000
-#define IGDNG_N_MIN 1
-#define IGDNG_N_MAX 5
-#define IGDNG_M_MIN 79
-#define IGDNG_M_MAX 118
-#define IGDNG_M1_MIN 12
-#define IGDNG_M1_MAX 23
-#define IGDNG_M2_MIN 5
-#define IGDNG_M2_MAX 9
-#define IGDNG_P_SDVO_DAC_MIN 5
-#define IGDNG_P_SDVO_DAC_MAX 80
-#define IGDNG_P_LVDS_MIN 28
-#define IGDNG_P_LVDS_MAX 112
-#define IGDNG_P1_MIN 1
-#define IGDNG_P1_MAX 8
-#define IGDNG_P2_SDVO_DAC_SLOW 10
-#define IGDNG_P2_SDVO_DAC_FAST 5
-#define IGDNG_P2_LVDS_SLOW 14 /* single channel */
-#define IGDNG_P2_LVDS_FAST 7 /* double channel */
-#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */
+#define IRONLAKE_DOT_MIN 25000
+#define IRONLAKE_DOT_MAX 350000
+#define IRONLAKE_VCO_MIN 1760000
+#define IRONLAKE_VCO_MAX 3510000
+#define IRONLAKE_N_MIN 1
+#define IRONLAKE_N_MAX 5
+#define IRONLAKE_M_MIN 79
+#define IRONLAKE_M_MAX 118
+#define IRONLAKE_M1_MIN 12
+#define IRONLAKE_M1_MAX 23
+#define IRONLAKE_M2_MIN 5
+#define IRONLAKE_M2_MAX 9
+#define IRONLAKE_P_SDVO_DAC_MIN 5
+#define IRONLAKE_P_SDVO_DAC_MAX 80
+#define IRONLAKE_P_LVDS_MIN 28
+#define IRONLAKE_P_LVDS_MAX 112
+#define IRONLAKE_P1_MIN 1
+#define IRONLAKE_P1_MAX 8
+#define IRONLAKE_P2_SDVO_DAC_SLOW 10
+#define IRONLAKE_P2_SDVO_DAC_FAST 5
+#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
+#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
+#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -272,15 +272,15 @@ static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
-intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
+intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -453,13 +453,13 @@ static const intel_limit_t intel_limits_g4x_display_port = {
.find_pll = intel_find_pll_g4x_dp,
};
-static const intel_limit_t intel_limits_igd_sdvo = {
+static const intel_limit_t intel_limits_pineview_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
- .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
- .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
- .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
- .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
- .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
+ .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
+ .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
+ .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
+ .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
+ .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
.p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
@@ -468,59 +468,59 @@ static const intel_limit_t intel_limits_igd_sdvo = {
.find_reduced_pll = intel_find_best_reduced_PLL,
};
-static const intel_limit_t intel_limits_igd_lvds = {
+static const intel_limit_t intel_limits_pineview_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
- .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
- .n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
- .m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
- .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
- .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
- .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
+ .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
+ .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
+ .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
+ .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
+ .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
+ .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
- /* IGD only supports single-channel mode. */
+ /* Pineview only supports single-channel mode. */
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
.find_reduced_pll = intel_find_best_reduced_PLL,
};
-static const intel_limit_t intel_limits_igdng_sdvo = {
- .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
- .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
- .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
- .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
- .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
- .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
- .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX },
- .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
- .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
- .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
- .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
- .find_pll = intel_igdng_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_sdvo = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
+ .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
+ .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
+ .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
+ .find_pll = intel_ironlake_find_best_PLL,
};
-static const intel_limit_t intel_limits_igdng_lvds = {
- .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
- .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
- .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
- .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
- .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
- .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
- .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX },
- .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
- .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
- .p2_slow = IGDNG_P2_LVDS_SLOW,
- .p2_fast = IGDNG_P2_LVDS_FAST },
- .find_pll = intel_igdng_find_best_PLL,
+static const intel_limit_t intel_limits_ironlake_lvds = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
+ .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
+ .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_P2_LVDS_SLOW,
+ .p2_fast = IRONLAKE_P2_LVDS_FAST },
+ .find_pll = intel_ironlake_find_best_PLL,
};
-static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
{
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_igdng_lvds;
+ limit = &intel_limits_ironlake_lvds;
else
- limit = &intel_limits_igdng_sdvo;
+ limit = &intel_limits_ironlake_sdvo;
return limit;
}
@@ -557,20 +557,20 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_IGDNG(dev))
- limit = intel_igdng_limit(crtc);
+ if (IS_IRONLAKE(dev))
+ limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
- } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
+ } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
- } else if (IS_IGD(dev)) {
+ } else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_igd_lvds;
+ limit = &intel_limits_pineview_lvds;
else
- limit = &intel_limits_igd_sdvo;
+ limit = &intel_limits_pineview_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
@@ -580,8 +580,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
return limit;
}
-/* m1 is reserved as 0 in IGD, n is a ring counter */
-static void igd_clock(int refclk, intel_clock_t *clock)
+/* m1 is reserved as 0 in Pineview, n is a ring counter */
+static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -591,8 +591,8 @@ static void igd_clock(int refclk, intel_clock_t *clock)
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
{
- if (IS_IGD(dev)) {
- igd_clock(refclk, clock);
+ if (IS_PINEVIEW(dev)) {
+ pineview_clock(refclk, clock);
return;
}
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
@@ -657,7 +657,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid ("m1 out of range\n");
- if (clock->m1 <= clock->m2 && !IS_IGD(dev))
+ if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid ("m out of range\n");
@@ -706,16 +706,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
memset (best_clock, 0, sizeof (*best_clock));
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in IGD */
- if (clock.m2 >= clock.m1 && !IS_IGD(dev))
- break;
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+ clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ /* m1 is always 0 in Pineview */
+ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
+ break;
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
intel_clock(dev, refclk, &clock);
@@ -751,8 +752,8 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
- /* m1 is always 0 in IGD */
- if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+ /* m1 is always 0 in Pineview */
+ if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
break;
for (clock.n = limit->n.min; clock.n <= limit->n.max;
clock.n++) {
@@ -833,8 +834,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
@@ -857,8 +858,8 @@ intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
}
static bool
-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
+intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -871,7 +872,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- return intel_find_pll_igdng_dp(limit, crtc, target,
+ return intel_find_pll_ironlake_dp(limit, crtc, target,
refclk, best_clock);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -949,7 +950,7 @@ void
intel_wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
+ msleep(20);
}
/* Parameters have changed, update FBC info */
@@ -994,7 +995,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
- DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+ DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
}
@@ -1017,7 +1018,7 @@ void i8xx_disable_fbc(struct drm_device *dev)
intel_wait_for_vblank(dev);
- DRM_DEBUG("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
}
static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
@@ -1062,7 +1063,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
- DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
void g4x_disable_fbc(struct drm_device *dev)
@@ -1076,7 +1077,7 @@ void g4x_disable_fbc(struct drm_device *dev)
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
intel_wait_for_vblank(dev);
- DRM_DEBUG("disabled FBC\n");
+ DRM_DEBUG_KMS("disabled FBC\n");
}
static bool g4x_fbc_enabled(struct drm_crtc *crtc)
@@ -1141,25 +1142,27 @@ static void intel_update_fbc(struct drm_crtc *crtc,
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
if (intel_fb->obj->size > dev_priv->cfb_size) {
- DRM_DEBUG("framebuffer too large, disabling compression\n");
+ DRM_DEBUG_KMS("framebuffer too large, disabling "
+ "compression\n");
goto out_disable;
}
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG("mode incompatible with compression, disabling\n");
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
goto out_disable;
}
if ((mode->hdisplay > 2048) ||
(mode->vdisplay > 1536)) {
- DRM_DEBUG("mode too large for compression, disabling\n");
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
- DRM_DEBUG("plane not 0, disabling compression\n");
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
goto out_disable;
}
if (obj_priv->tiling_mode != I915_TILING_X) {
- DRM_DEBUG("framebuffer not tiled, disabling compression\n");
+ DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
goto out_disable;
}
@@ -1181,13 +1184,57 @@ static void intel_update_fbc(struct drm_crtc *crtc,
return;
out_disable:
- DRM_DEBUG("unsupported config, disabling FBC\n");
+ DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
/* Multiple disables should be harmless */
if (dev_priv->display.fbc_enabled(crtc))
dev_priv->display.disable_fbc(dev);
}
static int
+intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ u32 alignment;
+ int ret;
+
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_NONE:
+ alignment = 64 * 1024;
+ break;
+ case I915_TILING_X:
+ /* pin() will align the object as required by fence */
+ alignment = 0;
+ break;
+ case I915_TILING_Y:
+ /* FIXME: Is this true? */
+ DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+ return -EINVAL;
+ default:
+ BUG();
+ }
+
+ ret = i915_gem_object_pin(obj, alignment);
+ if (ret != 0)
+ return ret;
+
+ /* Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always install
+ * a fence as the cost is not that onerous.
+ */
+ if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+ obj_priv->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence_reg(obj);
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -1206,12 +1253,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
- u32 dspcntr, alignment;
+ u32 dspcntr;
int ret;
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -1228,24 +1275,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
obj = intel_fb->obj;
obj_priv = obj->driver_private;
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- alignment = 64 * 1024;
- break;
- case I915_TILING_X:
- /* pin() will align the object as required by fence */
- alignment = 0;
- break;
- case I915_TILING_Y:
- /* FIXME: Is this true? */
- DRM_ERROR("Y tiled not allowed for scan out buffers\n");
- return -EINVAL;
- default:
- BUG();
- }
-
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, alignment);
+ ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1258,20 +1289,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
- * whereas 965+ only requires a fence if using framebuffer compression.
- * For simplicity, we always install a fence as the cost is not that onerous.
- */
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
-
dspcntr = I915_READ(dspcntr_reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -1287,7 +1304,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
break;
case 24:
case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ if (crtc->fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
DRM_ERROR("Unknown color depth\n");
@@ -1302,7 +1322,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1311,7 +1331,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
Start = obj_priv->gtt_offset;
Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
- DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
I915_WRITE(dspstride, crtc->fb->pitch);
if (IS_I965G(dev)) {
I915_WRITE(dspbase, Offset);
@@ -1363,7 +1383,7 @@ static void i915_disable_vga (struct drm_device *dev)
u8 sr1;
u32 vga_reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
@@ -1379,19 +1399,19 @@ static void i915_disable_vga (struct drm_device *dev)
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
}
-static void igdng_disable_pll_edp (struct drm_crtc *crtc)
+static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
}
-static void igdng_enable_pll_edp (struct drm_crtc *crtc)
+static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1404,13 +1424,13 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc)
}
-static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
+static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
- DRM_DEBUG("eDP PLL enable for clock %d\n", clock);
+ DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
@@ -1440,7 +1460,7 @@ static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
udelay(500);
}
-static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1481,10 +1501,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
- DRM_DEBUG("crtc %d dpms on\n", pipe);
+ DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0) {
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ POSTING_READ(PCH_LVDS);
+ }
+ }
+
if (HAS_eDP) {
/* enable eDP PLL */
- igdng_enable_pll_edp(crtc);
+ ironlake_enable_pll_edp(crtc);
} else {
/* enable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
@@ -1501,7 +1530,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(fdi_rx_reg);
udelay(200);
- /* Enable CPU FDI TX PLL, always on for IGDNG */
+ /* Enable CPU FDI TX PLL, always on for Ironlake */
temp = I915_READ(fdi_tx_reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
@@ -1568,12 +1597,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK) == 0) {
for (j = 0; j < tries; j++) {
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
+ temp);
if (temp & FDI_RX_BIT_LOCK)
break;
udelay(200);
@@ -1582,11 +1612,11 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_BIT_LOCK);
else
- DRM_DEBUG("train 1 fail\n");
+ DRM_DEBUG_KMS("train 1 fail\n");
} else {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG("train 1 ok 2!\n");
+ DRM_DEBUG_KMS("train 1 ok 2!\n");
}
temp = I915_READ(fdi_tx_reg);
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1601,12 +1631,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
for (j = 0; j < tries; j++) {
temp = I915_READ(fdi_rx_iir_reg);
- DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
+ temp);
if (temp & FDI_RX_SYMBOL_LOCK)
break;
udelay(200);
@@ -1614,15 +1645,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
if (j != tries) {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG("train 2 ok 1!\n");
+ DRM_DEBUG_KMS("train 2 ok 1!\n");
} else
- DRM_DEBUG("train 2 fail\n");
+ DRM_DEBUG_KMS("train 2 fail\n");
} else {
I915_WRITE(fdi_rx_iir_reg,
temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG("train 2 ok 2!\n");
+ DRM_DEBUG_KMS("train 2 ok 2!\n");
}
- DRM_DEBUG("train done\n");
+ DRM_DEBUG_KMS("train done\n");
/* set transcoder timing */
I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1664,9 +1695,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
case DRM_MODE_DPMS_OFF:
- DRM_DEBUG("crtc %d dpms off\n", pipe);
-
- i915_disable_vga(dev);
+ DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
@@ -1677,6 +1706,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
+ i915_disable_vga(dev);
+
/* disable cpu pipe, disable after all planes disabled */
temp = I915_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -1690,16 +1721,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(500);
continue;
} else {
- DRM_DEBUG("pipe %d off delay\n", pipe);
+ DRM_DEBUG_KMS("pipe %d off delay\n",
+ pipe);
break;
}
}
} else
- DRM_DEBUG("crtc %d is disabled\n", pipe);
+ DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
- if (HAS_eDP) {
- igdng_disable_pll_edp(crtc);
+ udelay(100);
+
+ /* Disable PF */
+ temp = I915_READ(pf_ctl_reg);
+ if ((temp & PF_ENABLE) != 0) {
+ I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+ I915_READ(pf_ctl_reg);
}
+ I915_WRITE(pf_win_size, 0);
/* disable CPU FDI tx and PCH FDI rx */
temp = I915_READ(fdi_tx_reg);
@@ -1725,6 +1763,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(100);
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
+ I915_READ(PCH_LVDS);
+ udelay(100);
+ }
+
/* disable PCH transcoder */
temp = I915_READ(transconf_reg);
if ((temp & TRANS_ENABLE) != 0) {
@@ -1738,12 +1783,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(500);
continue;
} else {
- DRM_DEBUG("transcoder %d off delay\n", pipe);
+ DRM_DEBUG_KMS("transcoder %d off "
+ "delay\n", pipe);
break;
}
}
}
+ udelay(100);
+
/* disable PCH DPLL */
temp = I915_READ(pch_dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -1751,14 +1799,20 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(pch_dpll_reg);
}
- temp = I915_READ(fdi_rx_reg);
- if ((temp & FDI_RX_PLL_ENABLE) != 0) {
- temp &= ~FDI_SEL_PCDCLK;
- temp &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(fdi_rx_reg, temp);
- I915_READ(fdi_rx_reg);
+ if (HAS_eDP) {
+ ironlake_disable_pll_edp(crtc);
}
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_SEL_PCDCLK;
+ I915_WRITE(fdi_rx_reg, temp);
+ I915_READ(fdi_rx_reg);
+
+ temp = I915_READ(fdi_rx_reg);
+ temp &= ~FDI_RX_PLL_ENABLE;
+ I915_WRITE(fdi_rx_reg, temp);
+ I915_READ(fdi_rx_reg);
+
/* Disable CPU FDI TX PLL */
temp = I915_READ(fdi_tx_reg);
if ((temp & FDI_TX_PLL_ENABLE) != 0) {
@@ -1767,20 +1821,43 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(100);
}
- /* Disable PF */
- temp = I915_READ(pf_ctl_reg);
- if ((temp & PF_ENABLE) != 0) {
- I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
- I915_READ(pf_ctl_reg);
- }
- I915_WRITE(pf_win_size, 0);
-
/* Wait for the clocks to turn off. */
- udelay(150);
+ udelay(100);
break;
}
}
+static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+{
+ struct intel_overlay *overlay;
+ int ret;
+
+ if (!enable && intel_crtc->overlay) {
+ overlay = intel_crtc->overlay;
+ mutex_lock(&overlay->dev->struct_mutex);
+ for (;;) {
+ ret = intel_overlay_switch_off(overlay);
+ if (ret == 0)
+ break;
+
+ ret = intel_overlay_recover_from_interrupt(overlay, 0);
+ if (ret != 0) {
+ /* overlay doesn't react anymore. Usually
+ * results in a black screen and an unkillable
+ * X server. */
+ BUG();
+ overlay->hw_wedged = HW_WEDGED;
+ break;
+ }
+ }
+ mutex_unlock(&overlay->dev->struct_mutex);
+ }
+ /* Let userspace switch the overlay on again. In most cases userspace
+ * has to recompute where to put it anyway. */
+
+ return;
+}
+
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -1839,12 +1916,14 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_update_fbc(crtc, &crtc->mode);
/* Give the overlay scaler a chance to enable if it's on this pipe */
- //intel_crtc_dpms_video(crtc, true); TODO
+ intel_crtc_dpms_overlay(intel_crtc, true);
break;
case DRM_MODE_DPMS_OFF:
intel_update_watermarks(dev);
+
/* Give the overlay scaler a chance to disable if it's on this pipe */
- //intel_crtc_dpms_video(crtc, FALSE); TODO
+ intel_crtc_dpms_overlay(intel_crtc, false);
+ drm_vblank_off(dev, pipe);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
@@ -1963,7 +2042,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
@@ -2039,7 +2118,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
-static int intel_panel_fitter_pipe (struct drm_device *dev)
+int intel_panel_fitter_pipe (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control;
@@ -2083,9 +2162,8 @@ fdi_reduce_ratio(u32 *num, u32 *den)
#define LINK_N 0x80000
static void
-igdng_compute_m_n(int bits_per_pixel, int nlanes,
- int pixel_clock, int link_clock,
- struct fdi_m_n *m_n)
+ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
+ int link_clock, struct fdi_m_n *m_n)
{
u64 temp;
@@ -2113,34 +2191,34 @@ struct intel_watermark_params {
unsigned long cacheline_size;
};
-/* IGD has different values for various configs */
-static struct intel_watermark_params igd_display_wm = {
- IGD_DISPLAY_FIFO,
- IGD_MAX_WM,
- IGD_DFT_WM,
- IGD_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+/* Pineview has different values for various configs */
+static struct intel_watermark_params pineview_display_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params igd_display_hplloff_wm = {
- IGD_DISPLAY_FIFO,
- IGD_MAX_WM,
- IGD_DFT_HPLLOFF_WM,
- IGD_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+static struct intel_watermark_params pineview_display_hplloff_wm = {
+ PINEVIEW_DISPLAY_FIFO,
+ PINEVIEW_MAX_WM,
+ PINEVIEW_DFT_HPLLOFF_WM,
+ PINEVIEW_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params igd_cursor_wm = {
- IGD_CURSOR_FIFO,
- IGD_CURSOR_MAX_WM,
- IGD_CURSOR_DFT_WM,
- IGD_CURSOR_GUARD_WM,
- IGD_FIFO_LINE_SIZE,
+static struct intel_watermark_params pineview_cursor_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params igd_cursor_hplloff_wm = {
- IGD_CURSOR_FIFO,
- IGD_CURSOR_MAX_WM,
- IGD_CURSOR_DFT_WM,
- IGD_CURSOR_GUARD_WM,
- IGD_FIFO_LINE_SIZE
+static struct intel_watermark_params pineview_cursor_hplloff_wm = {
+ PINEVIEW_CURSOR_FIFO,
+ PINEVIEW_CURSOR_MAX_WM,
+ PINEVIEW_CURSOR_DFT_WM,
+ PINEVIEW_CURSOR_GUARD_WM,
+ PINEVIEW_FIFO_LINE_SIZE
};
static struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
@@ -2213,11 +2291,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1000;
entries_required /= wm->cacheline_size;
- DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
+ DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
wm_size = wm->fifo_size - (entries_required + wm->guard_size);
- DRM_DEBUG("FIFO watermark level: %d\n", wm_size);
+ DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
@@ -2279,50 +2357,50 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
return latency;
}
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
return NULL;
}
-static void igd_disable_cxsr(struct drm_device *dev)
+static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
/* deactivate cxsr */
reg = I915_READ(DSPFW3);
- reg &= ~(IGD_SELF_REFRESH_EN);
+ reg &= ~(PINEVIEW_SELF_REFRESH_EN);
I915_WRITE(DSPFW3, reg);
DRM_INFO("Big FIFO is disabled\n");
}
-static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
- int pixel_size)
+static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
unsigned long wm;
struct cxsr_latency *latency;
- latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq,
+ latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
dev_priv->mem_freq);
if (!latency) {
- DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
- igd_disable_cxsr(dev);
+ DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ pineview_disable_cxsr(dev);
return;
}
/* Display SR */
- wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= 0x7fffff;
reg |= wm << 23;
I915_WRITE(DSPFW1, reg);
- DRM_DEBUG("DSPFW1 register is %x\n", reg);
+ DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~(0x3f << 24);
@@ -2330,7 +2408,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(clock, &igd_display_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
reg = I915_READ(DSPFW3);
reg &= 0xfffffe00;
@@ -2338,17 +2416,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size,
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~(0x3f << 16);
reg |= (wm & 0x3f) << 16;
I915_WRITE(DSPFW3, reg);
- DRM_DEBUG("DSPFW3 register is %x\n", reg);
+ DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
reg = I915_READ(DSPFW3);
- reg |= IGD_SELF_REFRESH_EN;
+ reg |= PINEVIEW_SELF_REFRESH_EN;
I915_WRITE(DSPFW3, reg);
DRM_INFO("Big FIFO is enabled\n");
@@ -2384,8 +2462,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
(dsparb & 0x7f);
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2403,8 +2481,8 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
(dsparb & 0x1ff);
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2418,7 +2496,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A",
size);
return size;
@@ -2433,8 +2512,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
size = dsparb & 0x7f;
size >>= 1; /* Convert to cachelines */
- DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
- size);
+ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
+ plane ? "B" : "A", size);
return size;
}
@@ -2509,15 +2588,39 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
- int unused3, int unused4)
+static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ int planeb_clock, int sr_hdisplay, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long line_time_us;
+ int sr_clock, sr_entries, srwm = 1;
+
+ /* Calc sr entries for one plane configs */
+ if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ /* self-refresh has much higher latency */
+ const static int sr_latency_ns = 12000;
+
+ sr_clock = planea_clock ? planea_clock : planeb_clock;
+ line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+
+ /* Use ns/us then divide to preserve precision */
+ sr_entries = (((sr_latency_ns / line_time_us) + 1) *
+ pixel_size * sr_hdisplay) / 1000;
+ sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
+ DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ srwm = I945_FIFO_SIZE - sr_entries;
+ if (srwm < 0)
+ srwm = 1;
+ srwm &= 0x3f;
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ }
- DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n");
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
+ srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0));
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
+ (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
}
@@ -2553,7 +2656,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
pixel_size, latency_ns);
planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
pixel_size, latency_ns);
- DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/*
* Overlay gets an aggressive default since video jitter is bad.
@@ -2573,14 +2676,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
sr_entries = (((sr_latency_ns / line_time_us) + 1) *
pixel_size * sr_hdisplay) / 1000;
sr_entries = roundup(sr_entries / cacheline_size, 1);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
srwm = total_size - sr_entries;
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
}
- DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
@@ -2607,7 +2710,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
pixel_size, latency_ns);
fwater_lo |= (3<<8) | planea_wm;
- DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm);
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
I915_WRITE(FW_BLC, fwater_lo);
}
@@ -2661,11 +2764,11 @@ static void intel_update_watermarks(struct drm_device *dev)
if (crtc->enabled) {
enabled++;
if (intel_crtc->plane == 0) {
- DRM_DEBUG("plane A (pipe %d) clock: %d\n",
+ DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
intel_crtc->pipe, crtc->mode.clock);
planea_clock = crtc->mode.clock;
} else {
- DRM_DEBUG("plane B (pipe %d) clock: %d\n",
+ DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
intel_crtc->pipe, crtc->mode.clock);
planeb_clock = crtc->mode.clock;
}
@@ -2682,10 +2785,10 @@ static void intel_update_watermarks(struct drm_device *dev)
return;
/* Single plane configs can enable self refresh */
- if (enabled == 1 && IS_IGD(dev))
- igd_enable_cxsr(dev, sr_clock, pixel_size);
- else if (IS_IGD(dev))
- igd_disable_cxsr(dev);
+ if (enabled == 1 && IS_PINEVIEW(dev))
+ pineview_enable_cxsr(dev, sr_clock, pixel_size);
+ else if (IS_PINEVIEW(dev))
+ pineview_disable_cxsr(dev);
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
sr_hdisplay, pixel_size);
@@ -2779,10 +2882,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
- DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
+ DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+ refclk / 1000);
} else if (IS_I9XX(dev)) {
refclk = 96000;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
@@ -2802,14 +2906,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
- if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) {
+ if (is_lvds && limit->find_reduced_pll &&
+ dev_priv->lvds_downclock_avail) {
memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
has_reduced_clock = limit->find_reduced_pll(limit, crtc,
- (adjusted_mode->clock*3/4),
+ dev_priv->lvds_downclock,
refclk,
&reduced_clock);
+ if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+ /*
+ * If the different P is found, it means that we can't
+ * switch the display clock by using the FP0/FP1.
+ * In such case we will disable the LVDS downclock
+ * feature.
+ */
+ DRM_DEBUG_KMS("Different P is found for "
+ "LVDS clock/downclock\n");
+ has_reduced_clock = 0;
+ }
}
-
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
@@ -2831,7 +2946,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* FDI link */
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
int lane, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -2873,8 +2988,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bpp = 24;
}
- igdng_compute_m_n(bpp, lane, target_clock,
- link_bw, &m_n);
+ ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
/* Ironlake: try to setup display ref clock before DPLL
@@ -2882,7 +2996,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -2917,7 +3031,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
}
- if (IS_IGD(dev)) {
+ if (IS_PINEVIEW(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = (1 << reduced_clock.n) << 16 |
@@ -2929,7 +3043,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
- if (!IS_IGDNG(dev))
+ if (!IS_IRONLAKE(dev))
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@@ -2942,19 +3056,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (IS_IGDNG(dev))
+ else if (IS_IRONLAKE(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
if (is_dp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_IGD(dev))
- dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
+ if (IS_PINEVIEW(dev))
+ dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -2973,7 +3087,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !IS_IGDNG(dev))
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3005,9 +3119,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
- /* IGDNG's plane is forced to pipe, bit 24 is to
+ /* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -3034,20 +3148,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Disable the panel fitter if it was on our pipe */
- if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
+ if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
- DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- /* assign to IGDNG registers */
- if (IS_IGDNG(dev)) {
+ /* assign to Ironlake registers */
+ if (IS_IRONLAKE(dev)) {
fp_reg = pch_fp_reg;
dpll_reg = pch_dpll_reg;
}
if (is_edp) {
- igdng_disable_pll_edp(crtc);
+ ironlake_disable_pll_edp(crtc);
} else if ((dpll & DPLL_VCO_ENABLE)) {
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
@@ -3062,7 +3176,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
u32 lvds;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
@@ -3095,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
udelay(150);
- if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+ if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
if (is_sdvo) {
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3115,14 +3229,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(fp_reg + 4, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG("enabling CxSR downclocking\n");
+ DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(fp_reg + 4, fp);
intel_crtc->lowfreq_avail = false;
if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG("disabling CxSR downclocking\n");
+ DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
@@ -3142,21 +3256,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* pipesrc and dspsize control the size that is scaled from, which should
* always be the user's requested size.
*/
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(dsppos_reg, 0);
}
I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
I915_WRITE(link_m1_reg, m_n.link_m);
I915_WRITE(link_n1_reg, m_n.link_n);
if (is_edp) {
- igdng_set_pll_edp(crtc, adjusted_mode->clock);
+ ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} else {
/* enable FDI RX PLL too */
temp = I915_READ(fdi_rx_reg);
@@ -3170,7 +3284,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_wait_for_vblank(dev);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
@@ -3204,8 +3318,8 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- /* use legacy palette for IGDNG */
- if (IS_IGDNG(dev))
+ /* use legacy palette for Ironlake */
+ if (IS_IRONLAKE(dev))
palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
LGC_PALETTE_B;
@@ -3234,11 +3348,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
size_t addr;
int ret;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
- DRM_DEBUG("cursor off\n");
+ DRM_DEBUG_KMS("cursor off\n");
if (IS_MOBILE(dev) || IS_I9XX(dev)) {
temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
temp |= CURSOR_MODE_DISABLE;
@@ -3546,18 +3660,18 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_IGD(dev)) {
- clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
- clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ if (IS_PINEVIEW(dev)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
if (IS_I9XX(dev)) {
- if (IS_IGD(dev))
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
+ if (IS_PINEVIEW(dev))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -3572,7 +3686,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
7 : 14;
break;
default:
- DRM_DEBUG("Unknown DPLL mode %08x in programmed "
+ DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return 0;
}
@@ -3658,7 +3772,7 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("idle timer fired, downclocking\n");
+ DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
dev_priv->busy = false;
@@ -3669,11 +3783,11 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG("not reclocking render clock\n");
+ DRM_DEBUG_DRIVER("not reclocking render clock\n");
return;
}
@@ -3682,7 +3796,7 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
else if (IS_I85X(dev))
pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
- DRM_DEBUG("increasing render clock frequency\n");
+ DRM_DEBUG_DRIVER("increasing render clock frequency\n");
/* Schedule downclock */
if (schedule)
@@ -3694,11 +3808,11 @@ void intel_decrease_renderclock(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG("not reclocking render clock\n");
+ DRM_DEBUG_DRIVER("not reclocking render clock\n");
return;
}
@@ -3758,7 +3872,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
}
- DRM_DEBUG("decreasing render clock frequency\n");
+ DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
}
/* Note that no increase function is needed for this - increase_renderclock()
@@ -3766,7 +3880,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
*/
void intel_decrease_displayclock(struct drm_device *dev)
{
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
@@ -3792,7 +3906,7 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
- DRM_DEBUG("idle timer fired, downclocking\n");
+ DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
intel_crtc->busy = false;
@@ -3808,14 +3922,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->lvds_downclock_avail)
return;
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
- DRM_DEBUG("upclocking LVDS\n");
+ DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3826,7 +3940,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
- DRM_DEBUG("failed to upclock LVDS!\n");
+ DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3847,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -3858,7 +3972,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
- DRM_DEBUG("downclocking LVDS\n");
+ DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3869,7 +3983,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
intel_wait_for_vblank(dev);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
- DRM_DEBUG("failed to downclock LVDS!\n");
+ DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3936,8 +4050,13 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- dev_priv->busy = true;
- intel_increase_renderclock(dev, true);
+ if (!dev_priv->busy) {
+ dev_priv->busy = true;
+ intel_increase_renderclock(dev, true);
+ } else {
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
@@ -3967,6 +4086,158 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc);
}
+struct intel_unpin_work {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ struct drm_pending_vblank_event *event;
+ int pending;
+};
+
+static void intel_unpin_work_fn(struct work_struct *__work)
+{
+ struct intel_unpin_work *work =
+ container_of(__work, struct intel_unpin_work, work);
+
+ mutex_lock(&work->dev->struct_mutex);
+ i915_gem_object_unpin(work->obj);
+ drm_gem_object_unreference(work->obj);
+ mutex_unlock(&work->dev->struct_mutex);
+ kfree(work);
+}
+
+void intel_finish_page_flip(struct drm_device *dev, int pipe)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_pending_vblank_event *e;
+ struct timeval now;
+ unsigned long flags;
+
+ /* Ignore early vblank irqs */
+ if (intel_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+ if (work == NULL || !work->pending) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
+ intel_crtc->unpin_work = NULL;
+ drm_vblank_put(dev, intel_crtc->pipe);
+
+ if (work->event) {
+ e = work->event;
+ do_gettimeofday(&now);
+ e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ list_add_tail(&e->base.link,
+ &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ obj_priv = work->obj->driver_private;
+ if (atomic_dec_and_test(&obj_priv->pending_flip))
+ DRM_WAKEUP(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
+}
+
+void intel_prepare_page_flip(struct drm_device *dev, int plane)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (intel_crtc->unpin_work)
+ intel_crtc->unpin_work->pending = 1;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ unsigned long flags;
+ int ret;
+ RING_LOCALS;
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL)
+ return -ENOMEM;
+
+ mutex_lock(&dev->struct_mutex);
+
+ work->event = event;
+ work->dev = crtc->dev;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ work->obj = intel_fb->obj;
+ INIT_WORK(&work->work, intel_unpin_work_fn);
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (intel_crtc->unpin_work) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(work);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
+ }
+ intel_crtc->unpin_work = work;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj);
+ if (ret != 0) {
+ kfree(work);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* Reference the old fb object for the scheduled work. */
+ drm_gem_object_reference(work->obj);
+
+ crtc->fb = fb;
+ i915_gem_object_flush_write_domain(obj);
+ drm_vblank_get(dev, intel_crtc->pipe);
+ obj_priv = obj->driver_private;
+ atomic_inc(&obj_priv->pending_flip);
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ if (IS_I965G(dev)) {
+ OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING((fb->width << 16) | fb->height);
+ } else {
+ OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(MI_NOOP);
+ }
+ ADVANCE_LP_RING();
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
@@ -3983,11 +4254,13 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = intel_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
};
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
@@ -4010,10 +4283,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
- DRM_DEBUG("swapping pipes & planes for FBC\n");
+ DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = ((pipe == 0) ? 1 : 0);
}
+ BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
+
intel_crtc->cursor_addr = 0;
intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -4090,7 +4368,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
int found;
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4118,7 +4396,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D);
- } else if (IS_I9XX(dev)) {
+ } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
@@ -4145,10 +4423,10 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev, DP_D);
- } else
+ } else if (IS_I8XX(dev))
intel_dvo_init(dev);
- if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
+ if (SUPPORTS_TV(dev))
intel_tv_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -4257,7 +4535,7 @@ void intel_init_clock_gating(struct drm_device *dev)
* Disable clock gating reported to work incorrectly according to the
* specs, but enable as much else as we can.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4291,11 +4569,52 @@ void intel_init_clock_gating(struct drm_device *dev)
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
- } else if (IS_I855(dev) || IS_I865G(dev)) {
+ } else if (IS_I85X(dev) || IS_I865G(dev)) {
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
} else if (IS_I830(dev)) {
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
+
+ /*
+ * GPU can automatically power down the render unit if given a page
+ * to save state.
+ */
+ if (I915_HAS_RC6(dev)) {
+ struct drm_gem_object *pwrctx;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ if (dev_priv->pwrctx) {
+ obj_priv = dev_priv->pwrctx->driver_private;
+ } else {
+ pwrctx = drm_gem_object_alloc(dev, 4096);
+ if (!pwrctx) {
+ DRM_DEBUG("failed to alloc power context, "
+ "RC6 disabled\n");
+ goto out;
+ }
+
+ ret = i915_gem_object_pin(pwrctx, 4096);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n",
+ ret);
+ drm_gem_object_unreference(pwrctx);
+ goto out;
+ }
+
+ i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+
+ dev_priv->pwrctx = pwrctx;
+ obj_priv = pwrctx->driver_private;
+ }
+
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ }
+
+out:
+ return;
}
/* Set up chip specific display functions */
@@ -4304,8 +4623,8 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_IGDNG(dev))
- dev_priv->display.dpms = igdng_crtc_dpms;
+ if (IS_IRONLAKE(dev))
+ dev_priv->display.dpms = ironlake_crtc_dpms;
else
dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4324,13 +4643,13 @@ static void intel_init_display(struct drm_device *dev)
}
/* Returns the core display clock speed */
- if (IS_I945G(dev))
+ if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_I915GM(dev))
@@ -4339,7 +4658,7 @@ static void intel_init_display(struct drm_device *dev)
else if (IS_I865G(dev))
dev_priv->display.get_display_clock_speed =
i865_get_display_clock_speed;
- else if (IS_I855(dev))
+ else if (IS_I85X(dev))
dev_priv->display.get_display_clock_speed =
i855_get_display_clock_speed;
else /* 852, 830 */
@@ -4347,7 +4666,7 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
dev_priv->display.update_wm = NULL;
else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
@@ -4403,7 +4722,7 @@ void intel_modeset_init(struct drm_device *dev)
num_pipe = 2;
else
num_pipe = 1;
- DRM_DEBUG("%d display pipe%s available.\n",
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
num_pipe, num_pipe > 1 ? "s" : "");
if (IS_I85X(dev))
@@ -4422,6 +4741,15 @@ void intel_modeset_init(struct drm_device *dev)
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
+
+ intel_setup_overlay(dev);
+
+ if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
+ dev_priv->fsb_freq,
+ dev_priv->mem_freq))
+ DRM_INFO("failed to find known CxSR latency "
+ "(found fsb freq %d, mem freq %d), disabling CxSR\n",
+ dev_priv->fsb_freq, dev_priv->mem_freq);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -4445,11 +4773,21 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_increase_renderclock(dev, false);
del_timer_sync(&dev_priv->idle_timer);
- mutex_unlock(&dev->struct_mutex);
-
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
+ if (dev_priv->pwrctx) {
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = dev_priv->pwrctx->driver_private;
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
+ I915_READ(PWRCTXA);
+ i915_gem_object_unpin(dev_priv->pwrctx);
+ drm_gem_object_unreference(dev_priv->pwrctx);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
drm_mode_config_cleanup(dev);
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d83447557f9..4e7aa8b7b93 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -33,7 +33,8 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
+
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
@@ -223,8 +224,8 @@ intel_dp_aux_ch(struct intel_output *intel_output,
*/
if (IS_eDP(intel_output))
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (IS_IGDNG(dev))
- aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */
+ else if (IS_IRONLAKE(dev))
+ aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -282,7 +283,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status);
+ DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
return -ETIMEDOUT;
}
@@ -382,17 +383,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
}
static int
-intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_bytes)
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct intel_dp_priv *dp_priv = container_of(adapter,
struct intel_dp_priv,
adapter);
struct intel_output *intel_output = dp_priv->intel_output;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
- return intel_dp_aux_ch(intel_output,
- send, send_bytes, recv, recv_bytes);
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+ return ret;
+ }
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ DRM_DEBUG_KMS("aux_ch defer\n");
+ udelay(100);
+ break;
+ default:
+ DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
}
static int
@@ -435,7 +496,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
dp_priv->link_bw = bws[clock];
dp_priv->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
- DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n",
+ DRM_DEBUG_KMS("Display port link bw %02x lane "
+ "count %d clock %d\n",
dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
return true;
@@ -514,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(3, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
if (intel_crtc->pipe == 0) {
I915_WRITE(TRANSA_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -606,23 +668,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
-static void igdng_edp_backlight_on (struct drm_device *dev)
+static void ironlake_edp_backlight_on (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
}
-static void igdng_edp_backlight_off (struct drm_device *dev)
+static void ironlake_edp_backlight_off (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
@@ -641,13 +703,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (dp_reg & DP_PORT_EN) {
intel_dp_link_down(intel_output, dp_priv->DP);
if (IS_eDP(intel_output))
- igdng_edp_backlight_off(dev);
+ ironlake_edp_backlight_off(dev);
}
} else {
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
if (IS_eDP(intel_output))
- igdng_edp_backlight_on(dev);
+ ironlake_edp_backlight_on(dev);
}
}
dp_priv->dpms_mode = mode;
@@ -1010,7 +1072,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (IS_eDP(intel_output)) {
DP &= ~DP_PLL_ENABLE;
@@ -1071,7 +1133,7 @@ intel_dp_check_link_status(struct intel_output *intel_output)
}
static enum drm_connector_status
-igdng_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_dp_priv *dp_priv = intel_output->dev_priv;
@@ -1106,8 +1168,8 @@ intel_dp_detect(struct drm_connector *connector)
dp_priv->has_audio = false;
- if (IS_IGDNG(dev))
- return igdng_dp_detect(connector);
+ if (IS_IRONLAKE(dev))
+ return ironlake_dp_detect(connector);
temp = I915_READ(PORT_HOTPLUG_EN);
@@ -1227,7 +1289,53 @@ intel_dp_hot_plug(struct intel_output *intel_output)
if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
intel_dp_check_link_status(intel_output);
}
-
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given DP is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it is assumed that the given
+ * DP is present.
+ */
+static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, dp_port, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ dp_port = 0;
+ if (dp_reg == DP_B || dp_reg == PCH_DP_B)
+ dp_port = PORT_IDPB;
+ else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
+ dp_port = PORT_IDPC;
+ else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
+ dp_port = PORT_IDPD;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not DP, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_DP &&
+ p_child->device_type != DEVICE_TYPE_eDP)
+ continue;
+ /* Find the eDP port */
+ if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
+ ret = 1;
+ break;
+ }
+ /* Find the DP port */
+ if (p_child->dvo_port == dp_port) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
@@ -1237,6 +1345,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
struct intel_dp_priv *dp_priv;
const char *name = NULL;
+ if (!dp_is_present_in_vbt(dev, output_reg)) {
+ DRM_DEBUG_KMS("DP is not present. Ignore it\n");
+ return;
+ }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
if (!intel_output)
@@ -1254,11 +1366,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
else
intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
- if (output_reg == DP_B)
+ if (output_reg == DP_B || output_reg == PCH_DP_B)
intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
- else if (output_reg == DP_C)
+ else if (output_reg == DP_C || output_reg == PCH_DP_C)
intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
- else if (output_reg == DP_D)
+ else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
if (IS_eDP(intel_output)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ef61fe9507e..a51573da1ff 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,32 @@ struct intel_output {
int clone_mask;
};
+struct intel_crtc;
+struct intel_overlay {
+ struct drm_device *dev;
+ struct intel_crtc *crtc;
+ struct drm_i915_gem_object *vid_bo;
+ struct drm_i915_gem_object *old_vid_bo;
+ int active;
+ int pfit_active;
+ u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+ u32 color_key;
+ u32 brightness, contrast, saturation;
+ u32 old_xscale, old_yscale;
+ /* register access */
+ u32 flip_addr;
+ struct drm_i915_gem_object *reg_bo;
+ void *virt_addr;
+ /* flip handling */
+ uint32_t last_flip_req;
+ int hw_wedged;
+#define HW_WEDGED 1
+#define NEEDS_WAIT_FOR_FLIP 2
+#define RELEASE_OLD_VID 3
+#define SWITCH_OFF_STAGE_1 4
+#define SWITCH_OFF_STAGE_2 5
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -121,6 +147,8 @@ struct intel_crtc {
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
+ struct intel_overlay *overlay;
+ struct intel_unpin_work *unpin_work;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -134,6 +162,8 @@ void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
+void intel_i2c_reset_gmbus(struct drm_device *dev);
+
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
@@ -148,6 +178,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
extern void intel_edp_link_config (struct intel_output *, int *, int *);
+extern int intel_panel_fitter_pipe (struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
extern void intel_encoder_commit (struct drm_encoder *encoder);
@@ -177,10 +208,23 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
+extern void intel_init_clock_gating(struct drm_device *dev);
extern int intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_framebuffer **fb,
struct drm_gem_object *obj);
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ int interruptible);
+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 40fcf6fdef3..371d753e362 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -230,8 +230,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
par->intel_fb = intel_fb;
/* To allow resizeing without swapping buffers */
- DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
- intel_fb->base.height, obj_priv->gtt_offset, fbo);
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ intel_fb->base.width, intel_fb->base.height,
+ obj_priv->gtt_offset, fbo);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -249,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
{
int ret;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c33451aec1b..f04dbbe7d40 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -225,7 +225,52 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_hdmi_enc_destroy,
};
-
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given HDMI is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the given
+ * HDMI is present.
+ */
+static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, hdmi_port, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ if (hdmi_reg == SDVOB)
+ hdmi_port = DVO_B;
+ else if (hdmi_reg == SDVOC)
+ hdmi_port = DVO_C;
+ else if (hdmi_reg == HDMIB)
+ hdmi_port = DVO_B;
+ else if (hdmi_reg == HDMIC)
+ hdmi_port = DVO_C;
+ else if (hdmi_reg == HDMID)
+ hdmi_port = DVO_D;
+ else
+ return 0;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not HDMI, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_HDMI)
+ continue;
+ /* Find the HDMI port */
+ if (p_child->dvo_port == hdmi_port) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -233,6 +278,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_output *intel_output;
struct intel_hdmi_priv *hdmi_priv;
+ if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
+ DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
+ return;
+ }
intel_output = kcalloc(sizeof(struct intel_output) +
sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
if (!intel_output)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c7eab724c41..8673c735b8a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -39,7 +39,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
struct drm_i915_private *dev_priv = dev->dev_private;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_IGD(dev))
+ if (!IS_PINEVIEW(dev))
return;
if (enable)
I915_WRITE(DSPCLK_GATE_D,
@@ -118,6 +118,23 @@ static void set_data(void *data, int state_high)
udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
}
+/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
+ * engine, but if the BIOS leaves it enabled, then that can break our use
+ * of the bit-banging I2C interfaces. This is notably the case with the
+ * Mac Mini in EFI mode.
+ */
+void
+intel_i2c_reset_gmbus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_IRONLAKE(dev)) {
+ I915_WRITE(PCH_GMBUS0, 0);
+ } else {
+ I915_WRITE(GMBUS0, 0);
+ }
+}
+
/**
* intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
* @dev: DRM device
@@ -168,6 +185,8 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
if(i2c_bit_add_bus(&chan->adapter))
goto out_free;
+ intel_i2c_reset_gmbus(dev);
+
/* JJJ: raise SCL and SDA? */
intel_i2c_quirk_set(dev, true);
set_data(chan, 1);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index eb365021bb5..3118ce274e6 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl, reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_PCH_CTL2;
else
reg = BLC_PWM_CTL;
@@ -91,7 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_status, ctl_reg, status_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
ctl_reg = PCH_PP_CONTROL;
status_reg = PCH_PP_STATUS;
} else {
@@ -137,7 +137,7 @@ static void intel_lvds_save(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +174,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +297,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* full screen scale for now */
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
goto out;
/* 965+ wants fuzzy fitting */
@@ -327,7 +327,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- if (!IS_IGDNG(dev)) {
+ if (!IS_IRONLAKE(dev)) {
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
}
@@ -548,7 +548,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -587,7 +587,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
- if (IS_IGDNG(dev))
+ if (IS_IRONLAKE(dev))
return;
/*
@@ -914,6 +914,101 @@ static int intel_lid_present(void)
#endif
/**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+static void intel_find_lvds_downclock(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *scan, *panel_fixed_mode;
+ int temp_downclock;
+
+ panel_fixed_mode = dev_priv->panel_fixed_mode;
+ temp_downclock = panel_fixed_mode->clock;
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ /*
+ * If one mode has the same resolution with the fixed_panel
+ * mode while they have the different refresh rate, it means
+ * that the reduced downclock is found for the LVDS. In such
+ * case we can set the different FPx0/1 to dynamically select
+ * between low and high frequency.
+ */
+ if (scan->hdisplay == panel_fixed_mode->hdisplay &&
+ scan->hsync_start == panel_fixed_mode->hsync_start &&
+ scan->hsync_end == panel_fixed_mode->hsync_end &&
+ scan->htotal == panel_fixed_mode->htotal &&
+ scan->vdisplay == panel_fixed_mode->vdisplay &&
+ scan->vsync_start == panel_fixed_mode->vsync_start &&
+ scan->vsync_end == panel_fixed_mode->vsync_end &&
+ scan->vtotal == panel_fixed_mode->vtotal) {
+ if (scan->clock < temp_downclock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ temp_downclock = scan->clock;
+ }
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ if (temp_downclock < panel_fixed_mode->clock) {
+ /* We found the downclock for LVDS. */
+ dev_priv->lvds_downclock_avail = 1;
+ dev_priv->lvds_downclock = temp_downclock;
+ DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+ "Normal clock %dKhz, downclock %dKhz\n",
+ panel_fixed_mode->clock, temp_downclock);
+ }
+ return;
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ * Note: The addin_offset should also be checked for LVDS panel.
+ * Only when it is non-zero, it is assumed that it is present.
+ */
+static int lvds_is_present_in_vbt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not LFP, continue.
+ * If the device type is 0x22, it is also regarded as LFP.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+ p_child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ /* The addin_offset should be checked. Only when it is
+ * non-zero, it is regarded as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
*
@@ -936,21 +1031,20 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- /* Assume that any device without an ACPI LID device also doesn't
- * have an integrated LVDS. We would be better off parsing the BIOS
- * to get a reliable indicator, but that code isn't written yet.
- *
- * In the case of all-in-one desktops using LVDS that we've seen,
- * they're using SDVO LVDS.
+ /*
+ * Assume LVDS is present if there's an ACPI lid device or if the
+ * device is present in the VBT.
*/
- if (!intel_lid_present())
+ if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
return;
+ }
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
if (dev_priv->edp_support) {
- DRM_DEBUG("disable LVDS for eDP support\n");
+ DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return;
}
gpio = PCH_GPIOC;
@@ -1023,6 +1117,7 @@ void intel_lvds_init(struct drm_device *dev)
dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
mutex_unlock(&dev->mode_config.mutex);
+ intel_find_lvds_downclock(dev, connector);
goto out;
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1047,8 +1142,8 @@ void intel_lvds_init(struct drm_device *dev)
* correct mode.
*/
- /* IGDNG: FIXME if still fail, not try pipe mode now */
- if (IS_IGDNG(dev))
+ /* Ironlake: FIXME if still fail, not try pipe mode now */
+ if (IS_IRONLAKE(dev))
goto failed;
lvds = I915_READ(LVDS);
@@ -1069,7 +1164,7 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
out:
- if (IS_IGDNG(dev)) {
+ if (IS_IRONLAKE(dev)) {
u32 pwm;
/* make sure PWM is enabled */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1082,7 +1177,7 @@ out:
}
dev_priv->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
- DRM_DEBUG("lid notifier registration failed\n");
+ DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
drm_sysfs_connector_add(connector);
@@ -1093,5 +1188,6 @@ failed:
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
+ drm_encoder_cleanup(encoder);
kfree(intel_output);
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 00000000000..2639591c72e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1416 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both. */
+#define IMAGE_MAX_WIDTH 2048
+#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY 1024
+#define IMAGE_MAX_HEIGHT_LEGACY 1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE (0x1<<19)
+#define OCMD_MIRROR_MASK (0x3<<17)
+#define OCMD_MIRROR_MODE (0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
+#define OCMD_MIRROR_VERTICAL (0x2<<17)
+#define OCMD_MIRROR_BOTH (0x3<<17)
+#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED (0x8<<10)
+#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR (0xc<<10)
+#define OCMD_YUV_422_PLANAR (0xd<<10)
+#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
+#define OCMD_BUF_TYPE_MASK (Ox1<<5)
+#define OCMD_BUF_TYPE_FRAME (0x0<<5)
+#define OCMD_BUF_TYPE_FIELD (0x1<<5)
+#define OCMD_TEST_MODE (0x1<<4)
+#define OCMD_BUFFER_SELECT (0x3<<2)
+#define OCMD_BUFFER0 (0x0<<2)
+#define OCMD_BUFFER1 (0x1<<2)
+#define OCMD_FIELD_SELECT (0x1<<2)
+#define OCMD_FIELD0 (0x0<<1)
+#define OCMD_FIELD1 (0x1<<1)
+#define OCMD_ENABLE (0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK (0x1<<18)
+#define OCONF_PIPE_A (0x0<<18)
+#define OCONF_PIPE_B (0x1<<18)
+#define OCONF_GAMMA2_ENABLE (0x1<<16)
+#define OCONF_CSC_MODE_BT601 (0x0<<5)
+#define OCONF_CSC_MODE_BT709 (0x1<<5)
+#define OCONF_CSC_BYPASS (0x1<<4)
+#define OCONF_CC_OUT_8BIT (0x1<<3)
+#define OCONF_TEST_MODE (0x1<<2)
+#define OCONF_THREE_LINE_BUFFER (0x1<<0)
+#define OCONF_TWO_LINE_BUFFER (0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE (0x1<<31)
+#define CLK_RGB24_MASK 0x0
+#define CLK_RGB16_MASK 0x070307
+#define CLK_RGB15_MASK 0x070707
+#define CLK_RGB8I_MASK 0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+ (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+ (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS 5
+#define N_VERT_Y_TAPS 3
+#define N_HORIZ_UV_TAPS 3
+#define N_VERT_UV_TAPS 3
+#define N_PHASES 17
+#define MAX_TAPS 5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+ u32 OBUF_0Y;
+ u32 OBUF_1Y;
+ u32 OBUF_0U;
+ u32 OBUF_0V;
+ u32 OBUF_1U;
+ u32 OBUF_1V;
+ u32 OSTRIDE;
+ u32 YRGB_VPH;
+ u32 UV_VPH;
+ u32 HORZ_PH;
+ u32 INIT_PHS;
+ u32 DWINPOS;
+ u32 DWINSZ;
+ u32 SWIDTH;
+ u32 SWIDTHSW;
+ u32 SHEIGHT;
+ u32 YRGBSCALE;
+ u32 UVSCALE;
+ u32 OCLRC0;
+ u32 OCLRC1;
+ u32 DCLRKV;
+ u32 DCLRKM;
+ u32 SCLRKVH;
+ u32 SCLRKVL;
+ u32 SCLRKEN;
+ u32 OCONFIG;
+ u32 OCMD;
+ u32 RESERVED1; /* 0x6C */
+ u32 OSTART_0Y;
+ u32 OSTART_1Y;
+ u32 OSTART_0U;
+ u32 OSTART_0V;
+ u32 OSTART_1U;
+ u32 OSTART_1V;
+ u32 OTILEOFF_0Y;
+ u32 OTILEOFF_1Y;
+ u32 OTILEOFF_0U;
+ u32 OTILEOFF_0V;
+ u32 OTILEOFF_1U;
+ u32 OTILEOFF_1V;
+ u32 FASTHSCALE; /* 0xA0 */
+ u32 UVSCALEV; /* 0xA4 */
+ u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+ u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+ u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+ u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+ u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+ u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+ u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+ u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+ u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
+#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
+
+
+static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ struct overlay_registers *regs;
+
+ /* no recursive mappings */
+ BUG_ON(overlay->virt_addr);
+
+ if (OVERLAY_NONPHYSICAL(overlay->dev)) {
+ regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ overlay->reg_bo->gtt_offset);
+
+ if (!regs) {
+ DRM_ERROR("failed to map overlay regs in GTT\n");
+ return NULL;
+ }
+ } else
+ regs = overlay->reg_bo->phys_obj->handle->vaddr;
+
+ return overlay->virt_addr = regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (OVERLAY_NONPHYSICAL(overlay->dev))
+ io_mapping_unmap_atomic(overlay->virt_addr);
+
+ overlay->virt_addr = NULL;
+
+ I915_READ(OVADD); /* flush wc cashes */
+
+ return;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ RING_LOCALS;
+
+ BUG_ON(overlay->active);
+
+ overlay->active = 1;
+ overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ OUT_RING(overlay->flip_addr | OFC_UPDATE);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static void intel_overlay_continue(struct intel_overlay *overlay,
+ bool load_polyphase_filter)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 flip_addr = overlay->flip_addr;
+ u32 tmp;
+ RING_LOCALS;
+
+ BUG_ON(!overlay->active);
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ /* check for underruns */
+ tmp = I915_READ(DOVSTA);
+ if (tmp & (1 << 17))
+ DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+}
+
+static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ u32 tmp;
+ RING_LOCALS;
+
+ if (overlay->last_flip_req != 0) {
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret == 0) {
+ overlay->last_flip_req = 0;
+
+ tmp = I915_READ(ISR);
+
+ if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
+ return 0;
+ }
+ }
+
+ /* synchronous slowpath */
+ overlay->hw_wedged = RELEASE_OLD_VID;
+
+ BEGIN_LP_RING(2);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+ u32 flip_addr = overlay->flip_addr;
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ RING_LOCALS;
+
+ BUG_ON(!overlay->active);
+
+ /* According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases */
+ flip_addr |= OFC_UPDATE;
+
+ /* wait for overlay to go idle */
+ overlay->hw_wedged = SWITCH_OFF_STAGE_1;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ /* turn overlay off */
+ overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+ if (ret != 0)
+ return ret;
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return ret;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+ struct drm_gem_object *obj;
+
+ /* never have the overlay hw on without showing a frame */
+ BUG_ON(!overlay->vid_bo);
+ obj = overlay->vid_bo->obj;
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->vid_bo = NULL;
+
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+ overlay->active = 0;
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+ int interruptible)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ u32 flip_addr;
+ int ret;
+ RING_LOCALS;
+
+ if (overlay->hw_wedged == HW_WEDGED)
+ return -EIO;
+
+ if (overlay->last_flip_req == 0) {
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+ }
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+ if (ret != 0)
+ return ret;
+
+ switch (overlay->hw_wedged) {
+ case RELEASE_OLD_VID:
+ obj = overlay->old_vid_bo->obj;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->old_vid_bo = NULL;
+ break;
+ case SWITCH_OFF_STAGE_1:
+ flip_addr = overlay->flip_addr;
+ flip_addr |= OFC_UPDATE;
+
+ overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(MI_FLUSH);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+
+ overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+ if (overlay->last_flip_req == 0)
+ return -ENOMEM;
+
+ ret = i915_do_wait_request(dev, overlay->last_flip_req,
+ interruptible);
+ if (ret != 0)
+ return ret;
+
+ case SWITCH_OFF_STAGE_2:
+ intel_overlay_off_tail(overlay);
+ break;
+ default:
+ BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
+ }
+
+ overlay->hw_wedged = 0;
+ overlay->last_flip_req = 0;
+ return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs_atomic */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+ int ret;
+ struct drm_gem_object *obj;
+
+ /* only wait if there is actually an old frame to release to
+ * guarantee forward progress */
+ if (!overlay->old_vid_bo)
+ return 0;
+
+ ret = intel_overlay_wait_flip(overlay);
+ if (ret != 0)
+ return ret;
+
+ obj = overlay->old_vid_bo->obj;
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(obj);
+ overlay->old_vid_bo = NULL;
+
+ return 0;
+}
+
+struct put_image_params {
+ int format;
+ short dst_x;
+ short dst_y;
+ short dst_w;
+ short dst_h;
+ short src_w;
+ short src_scan_h;
+ short src_scan_w;
+ short src_h;
+ short stride_Y;
+ short stride_UV;
+ int offset_Y;
+ int offset_U;
+ int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return 4;
+ case I915_OVERLAY_YUV411:
+ /* return 6; not implemented */
+ default:
+ return -EINVAL;
+ }
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ return width << 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_hsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV420:
+ return 2;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uv_vsubsampling(u32 format)
+{
+ switch (format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV420:
+ case I915_OVERLAY_YUV410:
+ return 2;
+ case I915_OVERLAY_YUV422:
+ case I915_OVERLAY_YUV411:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+ u32 mask, shift, ret;
+ if (IS_I9XX(dev)) {
+ mask = 0x3f;
+ shift = 6;
+ } else {
+ mask = 0x1f;
+ shift = 5;
+ }
+ ret = ((offset + width + mask) >> shift) - (offset >> shift);
+ if (IS_I9XX(dev))
+ ret <<= 1;
+ ret -=1;
+ return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+ 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+ 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+ 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+ 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+ 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+ 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+ 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+ 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+ 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+ 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+ 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+ 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+ 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+ 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+ 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+ 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+ 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+ 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+ 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+ 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+ 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+ 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+ 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+ 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+ 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+ 0x3000, 0x0800, 0x3000};
+
+static void update_polyphase_filter(struct overlay_registers *regs)
+{
+ memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+ memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+ struct overlay_registers *regs,
+ struct put_image_params *params)
+{
+ /* fixed point with a 12 bit shift */
+ u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+ bool scale_changed = false;
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+
+ if (params->dst_w > 1)
+ xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+ /(params->dst_w);
+ else
+ xscale = 1 << FP_SHIFT;
+
+ if (params->dst_h > 1)
+ yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+ /(params->dst_h);
+ else
+ yscale = 1 << FP_SHIFT;
+
+ /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+ xscale_UV = xscale/uv_hscale;
+ yscale_UV = yscale/uv_vscale;
+ /* make the Y scale to UV scale ratio an exact multiply */
+ xscale = xscale_UV * uv_hscale;
+ yscale = yscale_UV * uv_vscale;
+ /*} else {
+ xscale_UV = 0;
+ yscale_UV = 0;
+ }*/
+
+ if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+ scale_changed = true;
+ overlay->old_xscale = xscale;
+ overlay->old_yscale = yscale;
+
+ regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
+ | ((xscale >> FP_SHIFT) << 16)
+ | ((xscale & FRACT_MASK) << 3);
+ regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
+ | ((xscale_UV >> FP_SHIFT) << 16)
+ | ((xscale_UV & FRACT_MASK) << 3);
+ regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
+ | ((yscale_UV >> FP_SHIFT) << 0);
+
+ if (scale_changed)
+ update_polyphase_filter(regs);
+
+ return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ u32 key = overlay->color_key;
+ switch (overlay->crtc->base.fb->bits_per_pixel) {
+ case 8:
+ regs->DCLRKV = 0;
+ regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+ case 16:
+ if (overlay->crtc->base.fb->depth == 15) {
+ regs->DCLRKV = RGB15_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+ } else {
+ regs->DCLRKV = RGB16_TO_COLORKEY(key);
+ regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+ }
+ case 24:
+ case 32:
+ regs->DCLRKV = key;
+ regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+ }
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+ u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PLANAR;
+ break;
+ case I915_OVERLAY_YUV420:
+ cmd |= OCMD_YUV_420_PLANAR;
+ break;
+ case I915_OVERLAY_YUV411:
+ case I915_OVERLAY_YUV410:
+ cmd |= OCMD_YUV_410_PLANAR;
+ break;
+ }
+ } else { /* YUV packed */
+ switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+ case I915_OVERLAY_YUV422:
+ cmd |= OCMD_YUV_422_PACKED;
+ break;
+ case I915_OVERLAY_YUV411:
+ cmd |= OCMD_YUV_411_PACKED;
+ break;
+ }
+
+ switch (params->format & I915_OVERLAY_SWAP_MASK) {
+ case I915_OVERLAY_NO_SWAP:
+ break;
+ case I915_OVERLAY_UV_SWAP:
+ cmd |= OCMD_UV_SWAP;
+ break;
+ case I915_OVERLAY_Y_SWAP:
+ cmd |= OCMD_Y_SWAP;
+ break;
+ case I915_OVERLAY_Y_AND_UV_SWAP:
+ cmd |= OCMD_Y_AND_UV_SWAP;
+ break;
+ }
+ }
+
+ return cmd;
+}
+
+int intel_overlay_do_put_image(struct intel_overlay *overlay,
+ struct drm_gem_object *new_bo,
+ struct put_image_params *params)
+{
+ int ret, tmp_width;
+ struct overlay_registers *regs;
+ bool scale_changed = false;
+ struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
+ struct drm_device *dev = overlay->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ BUG_ON(!overlay);
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
+ if (ret != 0)
+ goto out_unpin;
+
+ if (!overlay->active) {
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+ regs->OCONFIG = OCONF_CC_OUT_8BIT;
+ if (IS_I965GM(overlay->dev))
+ regs->OCONFIG |= OCONF_CSC_MODE_BT709;
+ regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+ OCONF_PIPE_A : OCONF_PIPE_B;
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ ret = intel_overlay_on(overlay);
+ if (ret != 0)
+ goto out_unpin;
+ }
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unpin;
+ }
+
+ regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
+ regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+
+ if (params->format & I915_OVERLAY_YUV_PACKED)
+ tmp_width = packed_width_bytes(params->format, params->src_w);
+ else
+ tmp_width = params->src_w;
+
+ regs->SWIDTH = params->src_w;
+ regs->SWIDTHSW = calc_swidthsw(overlay->dev,
+ params->offset_Y, tmp_width);
+ regs->SHEIGHT = params->src_h;
+ regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ regs->OSTRIDE = params->stride_Y;
+
+ if (params->format & I915_OVERLAY_YUV_PLANAR) {
+ int uv_hscale = uv_hsubsampling(params->format);
+ int uv_vscale = uv_vsubsampling(params->format);
+ u32 tmp_U, tmp_V;
+ regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+ tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ params->src_w/uv_hscale);
+ tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ params->src_w/uv_hscale);
+ regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
+ regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+ regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
+ regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+ regs->OSTRIDE |= params->stride_UV << 16;
+ }
+
+ scale_changed = update_scaling_factors(overlay, regs, params);
+
+ update_colorkey(overlay, regs);
+
+ regs->OCMD = overlay_cmd_reg(params);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ intel_overlay_continue(overlay, scale_changed);
+
+ overlay->old_vid_bo = overlay->vid_bo;
+ overlay->vid_bo = new_bo->driver_private;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin(new_bo);
+ return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+ int ret;
+ struct overlay_registers *regs;
+ struct drm_device *dev = overlay->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+ if (overlay->hw_wedged) {
+ ret = intel_overlay_recover_from_interrupt(overlay, 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (!overlay->active)
+ return 0;
+
+ ret = intel_overlay_release_old_vid(overlay);
+ if (ret != 0)
+ return ret;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ regs->OCMD = 0;
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ ret = intel_overlay_off(overlay);
+ if (ret != 0)
+ return ret;
+
+ intel_overlay_off_tail(overlay);
+
+ return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+ struct intel_crtc *crtc)
+{
+ drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+ u32 pipeconf;
+ int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+
+ if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+ return -EINVAL;
+
+ pipeconf = I915_READ(pipeconf_reg);
+
+ /* can't use the overlay with double wide pipe */
+ if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+ struct drm_device *dev = overlay->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 ratio;
+ u32 pfit_control = I915_READ(PFIT_CONTROL);
+
+ /* XXX: This is not the same logic as in the xorg driver, but more in
+ * line with the intel documentation for the i965 */
+ if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
+ ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
+ } else { /* on i965 use the PGM reg to read out the autoscaler values */
+ ratio = I915_READ(PFIT_PGM_RATIOS);
+ if (IS_I965G(dev))
+ ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+ else
+ ratio >>= PFIT_VERT_SCALE_SHIFT;
+ }
+
+ overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+ struct drm_intel_overlay_put_image *rec)
+{
+ struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+ if ((rec->dst_x < mode->crtc_hdisplay)
+ && (rec->dst_x + rec->dst_width
+ <= mode->crtc_hdisplay)
+ && (rec->dst_y < mode->crtc_vdisplay)
+ && (rec->dst_y + rec->dst_height
+ <= mode->crtc_vdisplay))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+ u32 tmp;
+
+ /* downscaling limit is 8.0 */
+ tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+ tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+ if (tmp > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+ struct drm_intel_overlay_put_image *rec,
+ struct drm_gem_object *new_bo)
+{
+ u32 stride_mask;
+ int depth;
+ int uv_hscale = uv_hsubsampling(rec->flags);
+ int uv_vscale = uv_vsubsampling(rec->flags);
+ size_t tmp;
+
+ /* check src dimensions */
+ if (IS_845G(dev) || IS_I830(dev)) {
+ if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
+ || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+ return -EINVAL;
+ } else {
+ if (rec->src_height > IMAGE_MAX_HEIGHT
+ || rec->src_width > IMAGE_MAX_WIDTH)
+ return -EINVAL;
+ }
+ /* better safe than sorry, use 4 as the maximal subsampling ratio */
+ if (rec->src_height < N_VERT_Y_TAPS*4
+ || rec->src_width < N_HORIZ_Y_TAPS*4)
+ return -EINVAL;
+
+ /* check alingment constrains */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ /* not implemented */
+ return -EINVAL;
+ case I915_OVERLAY_YUV_PACKED:
+ depth = packed_depth_bytes(rec->flags);
+ if (uv_vscale != 1)
+ return -EINVAL;
+ if (depth < 0)
+ return depth;
+ /* ignore UV planes */
+ rec->stride_UV = 0;
+ rec->offset_U = 0;
+ rec->offset_V = 0;
+ /* check pixel alignment */
+ if (rec->offset_Y % depth)
+ return -EINVAL;
+ break;
+ case I915_OVERLAY_YUV_PLANAR:
+ if (uv_vscale < 0 || uv_hscale < 0)
+ return -EINVAL;
+ /* no offset restrictions for planar formats */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rec->src_width % uv_hscale)
+ return -EINVAL;
+
+ /* stride checking */
+ stride_mask = 63;
+
+ if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+ return -EINVAL;
+ if (IS_I965G(dev) && rec->stride_Y < 512)
+ return -EINVAL;
+
+ tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+ 4 : 8;
+ if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+ return -EINVAL;
+
+ /* check buffer dimensions */
+ switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+ case I915_OVERLAY_RGB:
+ case I915_OVERLAY_YUV_PACKED:
+ /* always 4 Y values per depth pixels */
+ if (packed_width_bytes(rec->flags, rec->src_width)
+ > rec->stride_Y)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+ case I915_OVERLAY_YUV_PLANAR:
+ if (rec->src_width > rec->stride_Y)
+ return -EINVAL;
+ if (rec->src_width/uv_hscale > rec->stride_UV)
+ return -EINVAL;
+
+ tmp = rec->stride_Y*rec->src_height;
+ if (rec->offset_Y + tmp > new_bo->size)
+ return -EINVAL;
+ tmp = rec->stride_UV*rec->src_height;
+ tmp /= uv_vscale;
+ if (rec->offset_U + tmp > new_bo->size
+ || rec->offset_V + tmp > new_bo->size)
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_put_image *put_image_rec = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_mode_object *drmmode_obj;
+ struct intel_crtc *crtc;
+ struct drm_gem_object *new_bo;
+ struct put_image_params *params;
+ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ ret = intel_overlay_switch_off(overlay);
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+ }
+
+ params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!drmmode_obj)
+ return -ENOENT;
+ crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+ new_bo = drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle);
+ if (!new_bo)
+ return -ENOENT;
+
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (overlay->hw_wedged) {
+ ret = intel_overlay_recover_from_interrupt(overlay, 1);
+ if (ret != 0)
+ goto out_unlock;
+ }
+
+ if (overlay->crtc != crtc) {
+ struct drm_display_mode *mode = &crtc->base.mode;
+ ret = intel_overlay_switch_off(overlay);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = check_overlay_possible_on_crtc(overlay, crtc);
+ if (ret != 0)
+ goto out_unlock;
+
+ overlay->crtc = crtc;
+ crtc->overlay = overlay;
+
+ if (intel_panel_fitter_pipe(dev) == crtc->pipe
+ /* and line to wide, i.e. one-line-mode */
+ && mode->hdisplay > 1024) {
+ overlay->pfit_active = 1;
+ update_pfit_vscale_ratio(overlay);
+ } else
+ overlay->pfit_active = 0;
+ }
+
+ ret = check_overlay_dst(overlay, put_image_rec);
+ if (ret != 0)
+ goto out_unlock;
+
+ if (overlay->pfit_active) {
+ params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+ overlay->pfit_vscale_ratio);
+ /* shifting right rounds downwards, so add 1 */
+ params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+ overlay->pfit_vscale_ratio) + 1;
+ } else {
+ params->dst_y = put_image_rec->dst_y;
+ params->dst_h = put_image_rec->dst_height;
+ }
+ params->dst_x = put_image_rec->dst_x;
+ params->dst_w = put_image_rec->dst_width;
+
+ params->src_w = put_image_rec->src_width;
+ params->src_h = put_image_rec->src_height;
+ params->src_scan_w = put_image_rec->src_scan_width;
+ params->src_scan_h = put_image_rec->src_scan_height;
+ if (params->src_scan_h > params->src_h
+ || params->src_scan_w > params->src_w) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = check_overlay_src(dev, put_image_rec, new_bo);
+ if (ret != 0)
+ goto out_unlock;
+ params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+ params->stride_Y = put_image_rec->stride_Y;
+ params->stride_UV = put_image_rec->stride_UV;
+ params->offset_Y = put_image_rec->offset_Y;
+ params->offset_U = put_image_rec->offset_U;
+ params->offset_V = put_image_rec->offset_V;
+
+ /* Check scaling after src size to prevent a divide-by-zero. */
+ ret = check_overlay_scaling(params);
+ if (ret != 0)
+ goto out_unlock;
+
+ ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ if (ret != 0)
+ goto out_unlock;
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ kfree(params);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_gem_object_unreference(new_bo);
+ kfree(params);
+
+ return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+ struct overlay_registers *regs)
+{
+ regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
+ regs->OCLRC1 = overlay->saturation;
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+ int i;
+
+ if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+ return false;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (((gamma5 >> i*8) & 0xff) == 0x80)
+ return false;
+ }
+
+ return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+ if (!check_gamma_bounds(0, attrs->gamma0)
+ || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
+ || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
+ || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
+ || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
+ || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
+ || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+ return -EINVAL;
+ if (!check_gamma5_errata(attrs->gamma5))
+ return -EINVAL;
+ return 0;
+}
+
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_overlay_attrs *attrs = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ overlay = dev_priv->overlay;
+ if (!overlay) {
+ DRM_DEBUG("userspace bug: no overlay\n");
+ return -ENODEV;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+ attrs->color_key = overlay->color_key;
+ attrs->brightness = overlay->brightness;
+ attrs->contrast = overlay->contrast;
+ attrs->saturation = overlay->saturation;
+
+ if (IS_I9XX(dev)) {
+ attrs->gamma0 = I915_READ(OGAMC0);
+ attrs->gamma1 = I915_READ(OGAMC1);
+ attrs->gamma2 = I915_READ(OGAMC2);
+ attrs->gamma3 = I915_READ(OGAMC3);
+ attrs->gamma4 = I915_READ(OGAMC4);
+ attrs->gamma5 = I915_READ(OGAMC5);
+ }
+ ret = 0;
+ } else {
+ overlay->color_key = attrs->color_key;
+ if (attrs->brightness >= -128 && attrs->brightness <= 127) {
+ overlay->brightness = attrs->brightness;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (attrs->contrast <= 255) {
+ overlay->contrast = attrs->contrast;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (attrs->saturation <= 1023) {
+ overlay->saturation = attrs->saturation;
+ } else {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+ if (!IS_I9XX(dev)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (overlay->active) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ ret = check_gamma(attrs);
+ if (ret != 0)
+ goto out_unlock;
+
+ I915_WRITE(OGAMC0, attrs->gamma0);
+ I915_WRITE(OGAMC1, attrs->gamma1);
+ I915_WRITE(OGAMC2, attrs->gamma2);
+ I915_WRITE(OGAMC3, attrs->gamma3);
+ I915_WRITE(OGAMC4, attrs->gamma4);
+ I915_WRITE(OGAMC5, attrs->gamma5);
+ }
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_overlay *overlay;
+ struct drm_gem_object *reg_bo;
+ struct overlay_registers *regs;
+ int ret;
+
+ if (!OVERLAY_EXISTS(dev))
+ return;
+
+ overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+ if (!overlay)
+ return;
+ overlay->dev = dev;
+
+ reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+ if (!reg_bo)
+ goto out_free;
+ overlay->reg_bo = reg_bo->driver_private;
+
+ if (OVERLAY_NONPHYSICAL(dev)) {
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ if (ret) {
+ DRM_ERROR("failed to pin overlay register bo\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->gtt_offset;
+ } else {
+ ret = i915_gem_attach_phys_object(dev, reg_bo,
+ I915_GEM_PHYS_OVERLAY_REGS);
+ if (ret) {
+ DRM_ERROR("failed to attach phys overlay regs\n");
+ goto out_free_bo;
+ }
+ overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ }
+
+ /* init all values */
+ overlay->color_key = 0x0101fe;
+ overlay->brightness = -19;
+ overlay->contrast = 75;
+ overlay->saturation = 146;
+
+ regs = intel_overlay_map_regs_atomic(overlay);
+ if (!regs)
+ goto out_free_bo;
+
+ memset(regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(regs);
+
+ update_reg_attrs(overlay, regs);
+
+ intel_overlay_unmap_regs_atomic(overlay);
+
+ dev_priv->overlay = overlay;
+ DRM_INFO("initialized overlay support\n");
+ return;
+
+out_free_bo:
+ drm_gem_object_unreference(reg_bo);
+out_free:
+ kfree(overlay);
+ return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->overlay) {
+ /* The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already */
+ BUG_ON(dev_priv->overlay->active);
+
+ kfree(dev_priv->overlay);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7fa3279e2f..24a3dc99716 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,8 +36,6 @@
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
-#undef SDVO_DEBUG
-
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
@@ -356,7 +354,6 @@ static const struct _sdvo_cmd_name {
#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
-#ifdef SDVO_DEBUG
static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
void *args, int args_len)
{
@@ -379,9 +376,6 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
DRM_LOG_KMS("(%02X)", cmd);
DRM_LOG_KMS("\n");
}
-#else
-#define intel_sdvo_debug_write(o, c, a, l)
-#endif
static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
void *args, int args_len)
@@ -398,7 +392,6 @@ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
}
-#ifdef SDVO_DEBUG
static const char *cmd_status_names[] = {
"Power on",
"Success",
@@ -427,9 +420,6 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
DRM_LOG_KMS("(??? %d)", status);
DRM_LOG_KMS("\n");
}
-#else
-#define intel_sdvo_debug_response(o, r, l, s)
-#endif
static u8 intel_sdvo_read_response(struct intel_output *intel_output,
void *response, int response_len)
@@ -1627,6 +1617,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
intel_sdvo_write_cmd(intel_output,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+ if (sdvo_priv->is_tv) {
+ /* add 30ms delay when the output type is SDVO-TV */
+ mdelay(30);
+ }
status = intel_sdvo_read_response(intel_output, &response, 2);
DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9ca917931af..552ec110b74 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1213,20 +1213,17 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
tv_ctl |= TV_TRILEVEL_SYNC;
if (tv_mode->pal_burst)
tv_ctl |= TV_PAL_BURST;
+
scctl1 = 0;
- /* dda1 implies valid video levels */
- if (tv_mode->dda1_inc) {
+ if (tv_mode->dda1_inc)
scctl1 |= TV_SC_DDA1_EN;
- }
-
if (tv_mode->dda2_inc)
scctl1 |= TV_SC_DDA2_EN;
-
if (tv_mode->dda3_inc)
scctl1 |= TV_SC_DDA3_EN;
-
scctl1 |= tv_mode->sc_reset;
- scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+ if (video_levels)
+ scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1416,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG("Detected Composite TV connection\n");
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG("Detected S-Video TV connection\n");
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG("Detected Component TV connection\n");
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- DRM_DEBUG("No TV connection detected\n");
+ DRM_DEBUG_KMS("No TV connection detected\n");
type = -1;
}
@@ -1702,6 +1699,41 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
.destroy = intel_tv_enc_destroy,
};
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i, ret;
+
+ if (!dev_priv->child_dev_num)
+ return 1;
+
+ ret = 0;
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+ /*
+ * If the device type is not TV, continue.
+ */
+ if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+ p_child->device_type != DEVICE_TYPE_TV)
+ continue;
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (p_child->addin_offset) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
void
intel_tv_init(struct drm_device *dev)
@@ -1717,6 +1749,10 @@ intel_tv_init(struct drm_device *dev)
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
+ if (!tv_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ return;
+ }
/* Even if we have an encoder we may not have a connector */
if (!dev_priv->int_tv_support)
return;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 97ee566ef74..ddfe16197b5 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -68,7 +68,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 30d00478dde..c1f877b7bac 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -100,8 +100,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
if (err)
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
}
typedef struct drm_mga_getparam32 {
@@ -125,8 +124,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
&getparam->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
}
typedef struct drm_mga_drm_bootstrap32 {
@@ -166,8 +164,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
|| __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
return -EFAULT;
- err = drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_MGA_DMA_BOOTSTRAP,
+ err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
(unsigned long)dma_bootstrap);
if (err)
return err;
@@ -220,12 +217,10 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
new file mode 100644
index 00000000000..b1bc1ea182b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -0,0 +1,45 @@
+config DRM_NOUVEAU
+ tristate "Nouveau (nVidia) cards"
+ depends on DRM
+ select FW_LOADER
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select FB
+ select FRAMEBUFFER_CONSOLE if !EMBEDDED
+ select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+ help
+ Choose this option for open-source nVidia support.
+
+config DRM_NOUVEAU_BACKLIGHT
+ bool "Support for backlight control"
+ depends on DRM_NOUVEAU
+ default y
+ help
+ Say Y here if you want to control the backlight of your display
+ (e.g. a laptop panel).
+
+config DRM_NOUVEAU_DEBUG
+ bool "Build in Nouveau's debugfs support"
+ depends on DRM_NOUVEAU && DEBUG_FS
+ default y
+ help
+ Say Y here if you want Nouveau to output debugging information
+ via debugfs.
+
+menu "I2C encoder or helper chips"
+ depends on DRM && I2C
+
+config DRM_I2C_CH7006
+ tristate "Chrontel ch7006 TV encoder"
+ depends on DRM_NOUVEAU
+ default m
+ help
+ Support for Chrontel ch7006 and similar TV encoders, found
+ on some nVidia video cards.
+
+ This driver is currently only useful if you're also using
+ the nouveau driver.
+endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
new file mode 100644
index 00000000000..48c290b5da8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -0,0 +1,32 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
+ nouveau_object.o nouveau_irq.o nouveau_notifier.o \
+ nouveau_sgdma.o nouveau_dma.o \
+ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
+ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
+ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
+ nouveau_dp.o nouveau_grctx.o \
+ nv04_timer.o \
+ nv04_mc.o nv40_mc.o nv50_mc.o \
+ nv04_fb.o nv10_fb.o nv40_fb.o \
+ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv04_graph.o nv10_graph.o nv20_graph.o \
+ nv40_graph.o nv50_graph.o \
+ nv40_grctx.o \
+ nv04_instmem.o nv50_instmem.o \
+ nv50_crtc.o nv50_dac.o nv50_sor.o \
+ nv50_cursor.o nv50_display.o nv50_fbcon.o \
+ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
+ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
+ nv17_gpio.o
+
+nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
+nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
+nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+
+obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
new file mode 100644
index 00000000000..1cf488247a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -0,0 +1,125 @@
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nv50_display.h"
+
+#define NOUVEAU_DSM_SUPPORTED 0x00
+#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
+
+#define NOUVEAU_DSM_ACTIVE 0x01
+#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
+
+#define NOUVEAU_DSM_LED 0x02
+#define NOUVEAU_DSM_LED_STATE 0x00
+#define NOUVEAU_DSM_LED_OFF 0x10
+#define NOUVEAU_DSM_LED_STAMINA 0x11
+#define NOUVEAU_DSM_LED_SPEED 0x12
+
+#define NOUVEAU_DSM_POWER 0x03
+#define NOUVEAU_DSM_POWER_STATE 0x00
+#define NOUVEAU_DSM_POWER_SPEED 0x01
+#define NOUVEAU_DSM_POWER_STAMINA 0x02
+
+static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
+{
+ static char muid[] = {
+ 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
+ };
+
+ struct pci_dev *pdev = dev->pdev;
+ struct acpi_handle *handle;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object params[4];
+ union acpi_object *obj;
+ int err;
+
+ handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+
+ if (!handle)
+ return -ENODEV;
+
+ input.count = 4;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_BUFFER;
+ params[0].buffer.length = sizeof(muid);
+ params[0].buffer.pointer = (char *)muid;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = 0x00000102;
+ params[2].type = ACPI_TYPE_INTEGER;
+ params[2].integer.value = func;
+ params[3].type = ACPI_TYPE_INTEGER;
+ params[3].integer.value = arg;
+
+ err = acpi_evaluate_object(handle, "_DSM", &input, &output);
+ if (err) {
+ NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
+ return err;
+ }
+
+ obj = (union acpi_object *)output.pointer;
+
+ if (obj->type == ACPI_TYPE_INTEGER)
+ if (obj->integer.value == 0x80000002)
+ return -ENODEV;
+
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ if (obj->buffer.length == 4 && result) {
+ *result = 0;
+ *result |= obj->buffer.pointer[0];
+ *result |= (obj->buffer.pointer[1] << 8);
+ *result |= (obj->buffer.pointer[2] << 16);
+ *result |= (obj->buffer.pointer[3] << 24);
+ }
+ }
+
+ kfree(output.pointer);
+ return 0;
+}
+
+int nouveau_hybrid_setup(struct drm_device *dev)
+{
+ int result;
+
+ if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
+ &result))
+ return -ENODEV;
+
+ NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
+
+ if (result & 0x1) { /* Stamina mode - disable the external GPU */
+ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
+ NULL);
+ nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
+ NULL);
+ } else { /* Ensure that the external GPU is enabled */
+ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
+ nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
+ NULL);
+ }
+
+ return 0;
+}
+
+bool nouveau_dsm_probe(struct drm_device *dev)
+{
+ int support = 0;
+
+ if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
+ return false;
+
+ if (!support)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
new file mode 100644
index 00000000000..20564f8cb0e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2009 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Matthew Garrett <mjg@redhat.com>
+ *
+ * Register locations derived from NVClock by Roderick Colenbrander
+ */
+
+#include <linux/backlight.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+
+static int nv40_get_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
+ >> 16;
+
+ return val;
+}
+
+static int nv40_set_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int val = bd->props.brightness;
+ int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT);
+
+ nv_wr32(dev, NV40_PMC_BACKLIGHT,
+ (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
+
+ return 0;
+}
+
+static struct backlight_ops nv40_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = nv40_get_intensity,
+ .update_status = nv40_set_intensity,
+};
+
+static int nv50_get_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+
+ return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
+}
+
+static int nv50_set_intensity(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int val = bd->props.brightness;
+
+ nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
+ val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
+ return 0;
+}
+
+static struct backlight_ops nv50_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = nv50_get_intensity,
+ .update_status = nv50_set_intensity,
+};
+
+static int nouveau_nv40_backlight_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct backlight_device *bd;
+
+ if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
+ return 0;
+
+ bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+ &nv40_bl_ops);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ dev_priv->backlight = bd;
+ bd->props.max_brightness = 31;
+ bd->props.brightness = nv40_get_intensity(bd);
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static int nouveau_nv50_backlight_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct backlight_device *bd;
+
+ if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
+ return 0;
+
+ bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
+ &nv50_bl_ops);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ dev_priv->backlight = bd;
+ bd->props.max_brightness = 1025;
+ bd->props.brightness = nv50_get_intensity(bd);
+ backlight_update_status(bd);
+ return 0;
+}
+
+int nouveau_backlight_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->card_type) {
+ case NV_40:
+ return nouveau_nv40_backlight_init(dev);
+ case NV_50:
+ return nouveau_nv50_backlight_init(dev);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void nouveau_backlight_exit(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->backlight) {
+ backlight_device_unregister(dev_priv->backlight);
+ dev_priv->backlight = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
new file mode 100644
index 00000000000..ba143972769
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -0,0 +1,6078 @@
+/*
+ * Copyright 2005-2006 Erik Waling
+ * Copyright 2006 Stephane Marchesin
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#define NV_DEBUG_NOTRACE
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+/* these defines are made up */
+#define NV_CIO_CRE_44_HEADA 0x0
+#define NV_CIO_CRE_44_HEADB 0x3
+#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
+#define LEGACY_I2C_CRT 0x80
+#define LEGACY_I2C_PANEL 0x81
+#define LEGACY_I2C_TV 0x82
+
+#define EDID1_LEN 128
+
+#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
+#define LOG_OLD_VALUE(x)
+
+#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+
+struct init_exec {
+ bool execute;
+ bool repeat;
+};
+
+static bool nv_cksum(const uint8_t *data, unsigned int length)
+{
+ /*
+ * There's a few checksums in the BIOS, so here's a generic checking
+ * function.
+ */
+ int i;
+ uint8_t sum = 0;
+
+ for (i = 0; i < length; i++)
+ sum += data[i];
+
+ if (sum)
+ return true;
+
+ return false;
+}
+
+static int
+score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
+{
+ if (!(data[0] == 0x55 && data[1] == 0xAA)) {
+ NV_TRACEWARN(dev, "... BIOS signature not found\n");
+ return 0;
+ }
+
+ if (nv_cksum(data, data[2] * 512)) {
+ NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
+ /* if a ro image is somewhat bad, it's probably all rubbish */
+ return writeable ? 2 : 1;
+ } else
+ NV_TRACE(dev, "... appears to be valid\n");
+
+ return 3;
+}
+
+static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t pci_nv_20, save_pci_nv_20;
+ int pcir_ptr;
+ int i;
+
+ if (dev_priv->card_type >= NV_50)
+ pci_nv_20 = 0x88050;
+ else
+ pci_nv_20 = NV_PBUS_PCI_NV_20;
+
+ /* enable ROM access */
+ save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
+ nvWriteMC(dev, pci_nv_20,
+ save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+
+ /* bail if no rom signature */
+ if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
+ nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
+ goto out;
+
+ /* additional check (see note below) - read PCI record header */
+ pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
+ nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
+ if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
+ nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
+ goto out;
+
+ /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a
+ * a good read may be obtained by waiting or re-reading (cargocult: 5x)
+ * each byte. we'll hope pramin has something usable instead
+ */
+ for (i = 0; i < NV_PROM_SIZE; i++)
+ data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
+
+out:
+ /* disable ROM access */
+ nvWriteMC(dev, pci_nv_20,
+ save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+}
+
+static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t old_bar0_pramin = 0;
+ int i;
+
+ if (dev_priv->card_type >= NV_50) {
+ uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
+
+ if (!vbios_vram)
+ vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
+
+ old_bar0_pramin = nv_rd32(dev, 0x1700);
+ nv_wr32(dev, 0x1700, vbios_vram >> 16);
+ }
+
+ /* bail if no rom signature */
+ if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
+ nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
+ goto out;
+
+ for (i = 0; i < NV_PROM_SIZE; i++)
+ data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
+
+out:
+ if (dev_priv->card_type >= NV_50)
+ nv_wr32(dev, 0x1700, old_bar0_pramin);
+}
+
+static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
+{
+ void __iomem *rom = NULL;
+ size_t rom_len;
+ int ret;
+
+ ret = pci_enable_rom(dev->pdev);
+ if (ret)
+ return;
+
+ rom = pci_map_rom(dev->pdev, &rom_len);
+ if (!rom)
+ goto out;
+ memcpy_fromio(data, rom, rom_len);
+ pci_unmap_rom(dev->pdev, rom);
+
+out:
+ pci_disable_rom(dev->pdev);
+}
+
+struct methods {
+ const char desc[8];
+ void (*loadbios)(struct drm_device *, uint8_t *);
+ const bool rw;
+};
+
+static struct methods nv04_methods[] = {
+ { "PROM", load_vbios_prom, false },
+ { "PRAMIN", load_vbios_pramin, true },
+ { "PCIROM", load_vbios_pci, true },
+};
+
+static struct methods nv50_methods[] = {
+ { "PRAMIN", load_vbios_pramin, true },
+ { "PROM", load_vbios_prom, false },
+ { "PCIROM", load_vbios_pci, true },
+};
+
+#define METHODCNT 3
+
+static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct methods *methods;
+ int i;
+ int testscore = 3;
+ int scores[METHODCNT];
+
+ if (nouveau_vbios) {
+ methods = nv04_methods;
+ for (i = 0; i < METHODCNT; i++)
+ if (!strcasecmp(nouveau_vbios, methods[i].desc))
+ break;
+
+ if (i < METHODCNT) {
+ NV_INFO(dev, "Attempting to use BIOS image from %s\n",
+ methods[i].desc);
+
+ methods[i].loadbios(dev, data);
+ if (score_vbios(dev, data, methods[i].rw))
+ return true;
+ }
+
+ NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
+ }
+
+ if (dev_priv->card_type < NV_50)
+ methods = nv04_methods;
+ else
+ methods = nv50_methods;
+
+ for (i = 0; i < METHODCNT; i++) {
+ NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
+ methods[i].desc);
+ data[0] = data[1] = 0; /* avoid reuse of previous image */
+ methods[i].loadbios(dev, data);
+ scores[i] = score_vbios(dev, data, methods[i].rw);
+ if (scores[i] == testscore)
+ return true;
+ }
+
+ while (--testscore > 0) {
+ for (i = 0; i < METHODCNT; i++) {
+ if (scores[i] == testscore) {
+ NV_TRACE(dev, "Using BIOS image from %s\n",
+ methods[i].desc);
+ methods[i].loadbios(dev, data);
+ return true;
+ }
+ }
+ }
+
+ NV_ERROR(dev, "No valid BIOS image found\n");
+ return false;
+}
+
+struct init_tbl_entry {
+ char *name;
+ uint8_t id;
+ int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
+};
+
+struct bit_entry {
+ uint8_t id[2];
+ uint16_t length;
+ uint16_t offset;
+};
+
+static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
+
+#define MACRO_INDEX_SIZE 2
+#define MACRO_SIZE 8
+#define CONDITION_SIZE 12
+#define IO_FLAG_CONDITION_SIZE 9
+#define IO_CONDITION_SIZE 5
+#define MEM_INIT_SIZE 66
+
+static void still_alive(void)
+{
+#if 0
+ sync();
+ msleep(2);
+#endif
+}
+
+static uint32_t
+munge_reg(struct nvbios *bios, uint32_t reg)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct dcb_entry *dcbent = bios->display.output;
+
+ if (dev_priv->card_type < NV_50)
+ return reg;
+
+ if (reg & 0x40000000) {
+ BUG_ON(!dcbent);
+
+ reg += (ffs(dcbent->or) - 1) * 0x800;
+ if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
+ reg += 0x00000080;
+ }
+
+ reg &= ~0x60000000;
+ return reg;
+}
+
+static int
+valid_reg(struct nvbios *bios, uint32_t reg)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct drm_device *dev = bios->dev;
+
+ /* C51 has misaligned regs on purpose. Marvellous */
+ if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) {
+ NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n",
+ reg);
+ return 0;
+ }
+ /*
+ * Warn on C51 regs that have not been verified accessible in
+ * mmiotracing
+ */
+ if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
+ reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
+ NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
+ reg);
+
+ /* Trust the init scripts on G80 */
+ if (dev_priv->card_type >= NV_50)
+ return 1;
+
+ #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
+ if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
+ return 1;
+ if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
+ return 1;
+ if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
+ (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
+ WITHIN(reg, 0xc000, 0x48))
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
+ if (reg == 0x00011014 || reg == 0x00020328)
+ return 1;
+ if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
+ return 1;
+ }
+ if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
+ return 1;
+ if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
+ return 1;
+ if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
+ return 1;
+ if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
+ return 1;
+ if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
+ WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
+ return 1;
+ #undef WITHIN
+
+ NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
+
+ return 0;
+}
+
+static bool
+valid_idx_port(struct nvbios *bios, uint16_t port)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct drm_device *dev = bios->dev;
+
+ /*
+ * If adding more ports here, the read/write functions below will need
+ * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
+ * used for the port in question
+ */
+ if (dev_priv->card_type < NV_50) {
+ if (port == NV_CIO_CRX__COLOR)
+ return true;
+ if (port == NV_VIO_SRX)
+ return true;
+ } else {
+ if (port == NV_CIO_CRX__COLOR)
+ return true;
+ }
+
+ NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
+ port);
+
+ return false;
+}
+
+static bool
+valid_port(struct nvbios *bios, uint16_t port)
+{
+ struct drm_device *dev = bios->dev;
+
+ /*
+ * If adding more ports here, the read/write functions below will need
+ * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
+ * used for the port in question
+ */
+ if (port == NV_VIO_VSE2)
+ return true;
+
+ NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
+
+ return false;
+}
+
+static uint32_t
+bios_rd32(struct nvbios *bios, uint32_t reg)
+{
+ uint32_t data;
+
+ reg = munge_reg(bios, reg);
+ if (!valid_reg(bios, reg))
+ return 0;
+
+ /*
+ * C51 sometimes uses regs with bit0 set in the address. For these
+ * cases there should exist a translation in a BIOS table to an IO
+ * port address which the BIOS uses for accessing the reg
+ *
+ * These only seem to appear for the power control regs to a flat panel,
+ * and the GPIO regs at 0x60081*. In C51 mmio traces the normal regs
+ * for 0x1308 and 0x1310 are used - hence the mask below. An S3
+ * suspend-resume mmio trace from a C51 will be required to see if this
+ * is true for the power microcode in 0x14.., or whether the direct IO
+ * port access method is needed
+ */
+ if (reg & 0x1)
+ reg &= ~0x1;
+
+ data = nv_rd32(bios->dev, reg);
+
+ BIOSLOG(bios, " Read: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
+
+ return data;
+}
+
+static void
+bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+
+ reg = munge_reg(bios, reg);
+ if (!valid_reg(bios, reg))
+ return;
+
+ /* see note in bios_rd32 */
+ if (reg & 0x1)
+ reg &= 0xfffffffe;
+
+ LOG_OLD_VALUE(bios_rd32(bios, reg));
+ BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
+
+ if (dev_priv->VBIOS.execute) {
+ still_alive();
+ nv_wr32(bios->dev, reg, data);
+ }
+}
+
+static uint8_t
+bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct drm_device *dev = bios->dev;
+ uint8_t data;
+
+ if (!valid_idx_port(bios, port))
+ return 0;
+
+ if (dev_priv->card_type < NV_50) {
+ if (port == NV_VIO_SRX)
+ data = NVReadVgaSeq(dev, bios->state.crtchead, index);
+ else /* assume NV_CIO_CRX__COLOR */
+ data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
+ } else {
+ uint32_t data32;
+
+ data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
+ data = (data32 >> ((index & 3) << 3)) & 0xff;
+ }
+
+ BIOSLOG(bios, " Indexed IO read: Port: 0x%04X, Index: 0x%02X, "
+ "Head: 0x%02X, Data: 0x%02X\n",
+ port, index, bios->state.crtchead, data);
+ return data;
+}
+
+static void
+bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
+{
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct drm_device *dev = bios->dev;
+
+ if (!valid_idx_port(bios, port))
+ return;
+
+ /*
+ * The current head is maintained in the nvbios member state.crtchead.
+ * We trap changes to CR44 and update the head variable and hence the
+ * register set written.
+ * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
+ * of the write, and to head1 after the write
+ */
+ if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
+ data != NV_CIO_CRE_44_HEADB)
+ bios->state.crtchead = 0;
+
+ LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
+ BIOSLOG(bios, " Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
+ "Head: 0x%02X, Data: 0x%02X\n",
+ port, index, bios->state.crtchead, data);
+
+ if (bios->execute && dev_priv->card_type < NV_50) {
+ still_alive();
+ if (port == NV_VIO_SRX)
+ NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
+ else /* assume NV_CIO_CRX__COLOR */
+ NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
+ } else
+ if (bios->execute) {
+ uint32_t data32, shift = (index & 3) << 3;
+
+ still_alive();
+
+ data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
+ data32 &= ~(0xff << shift);
+ data32 |= (data << shift);
+ bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
+ }
+
+ if (port == NV_CIO_CRX__COLOR &&
+ index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
+ bios->state.crtchead = 1;
+}
+
+static uint8_t
+bios_port_rd(struct nvbios *bios, uint16_t port)
+{
+ uint8_t data, head = bios->state.crtchead;
+
+ if (!valid_port(bios, port))
+ return 0;
+
+ data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
+
+ BIOSLOG(bios, " IO read: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
+ port, head, data);
+
+ return data;
+}
+
+static void
+bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
+{
+ int head = bios->state.crtchead;
+
+ if (!valid_port(bios, port))
+ return;
+
+ LOG_OLD_VALUE(bios_port_rd(bios, port));
+ BIOSLOG(bios, " IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
+ port, head, data);
+
+ if (!bios->execute)
+ return;
+
+ still_alive();
+ NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
+}
+
+static bool
+io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+ /*
+ * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
+ * for the CRTC index; 1 byte for the mask to apply to the value
+ * retrieved from the CRTC; 1 byte for the shift right to apply to the
+ * masked CRTC value; 2 bytes for the offset to the flag array, to
+ * which the shifted value is added; 1 byte for the mask applied to the
+ * value read from the flag array; and 1 byte for the value to compare
+ * against the masked byte from the flag table.
+ */
+
+ uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
+ uint16_t crtcport = ROM16(bios->data[condptr]);
+ uint8_t crtcindex = bios->data[condptr + 2];
+ uint8_t mask = bios->data[condptr + 3];
+ uint8_t shift = bios->data[condptr + 4];
+ uint16_t flagarray = ROM16(bios->data[condptr + 5]);
+ uint8_t flagarraymask = bios->data[condptr + 7];
+ uint8_t cmpval = bios->data[condptr + 8];
+ uint8_t data;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
+ "Cmpval: 0x%02X\n",
+ offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
+
+ data = bios_idxprt_rd(bios, crtcport, crtcindex);
+
+ data = bios->data[flagarray + ((data & mask) >> shift)];
+ data &= flagarraymask;
+
+ BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
+ offset, data, cmpval);
+
+ return (data == cmpval);
+}
+
+static bool
+bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+ /*
+ * The condition table entry has 4 bytes for the address of the
+ * register to check, 4 bytes for a mask to apply to the register and
+ * 4 for a test comparison value
+ */
+
+ uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
+ uint32_t reg = ROM32(bios->data[condptr]);
+ uint32_t mask = ROM32(bios->data[condptr + 4]);
+ uint32_t cmpval = ROM32(bios->data[condptr + 8]);
+ uint32_t data;
+
+ BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
+ offset, cond, reg, mask);
+
+ data = bios_rd32(bios, reg) & mask;
+
+ BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
+ offset, data, cmpval);
+
+ return (data == cmpval);
+}
+
+static bool
+io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
+{
+ /*
+ * The IO condition entry has 2 bytes for the IO port address; 1 byte
+ * for the index to write to io_port; 1 byte for the mask to apply to
+ * the byte read from io_port+1; and 1 byte for the value to compare
+ * against the masked byte.
+ */
+
+ uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
+ uint16_t io_port = ROM16(bios->data[condptr]);
+ uint8_t port_index = bios->data[condptr + 2];
+ uint8_t mask = bios->data[condptr + 3];
+ uint8_t cmpval = bios->data[condptr + 4];
+
+ uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
+
+ BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
+ offset, data, cmpval);
+
+ return (data == cmpval);
+}
+
+static int
+nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t reg0 = nv_rd32(dev, reg + 0);
+ uint32_t reg1 = nv_rd32(dev, reg + 4);
+ struct nouveau_pll_vals pll;
+ struct pll_lims pll_limits;
+ int ret;
+
+ ret = get_pll_limits(dev, reg, &pll_limits);
+ if (ret)
+ return ret;
+
+ clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
+ if (!clk)
+ return -ERANGE;
+
+ reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
+ reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
+
+ if (dev_priv->VBIOS.execute) {
+ still_alive();
+ nv_wr32(dev, reg + 4, reg1);
+ nv_wr32(dev, reg + 0, reg0);
+ }
+
+ return 0;
+}
+
+static int
+setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
+{
+ struct drm_device *dev = bios->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ /* clk in kHz */
+ struct pll_lims pll_lim;
+ struct nouveau_pll_vals pllvals;
+ int ret;
+
+ if (dev_priv->card_type >= NV_50)
+ return nv50_pll_set(dev, reg, clk);
+
+ /* high regs (such as in the mac g5 table) are not -= 4 */
+ ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
+ if (ret)
+ return ret;
+
+ clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
+ if (!clk)
+ return -ERANGE;
+
+ if (bios->execute) {
+ still_alive();
+ nouveau_hw_setpll(dev, reg, &pllvals);
+ }
+
+ return 0;
+}
+
+static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+
+ /*
+ * For the results of this function to be correct, CR44 must have been
+ * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
+ * and the DCB table parsed, before the script calling the function is
+ * run. run_digital_op_script is example of how to do such setup
+ */
+
+ uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
+
+ if (dcb_entry > bios->bdcb.dcb.entries) {
+ NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
+ "(%02X)\n", dcb_entry);
+ dcb_entry = 0x7f; /* unused / invalid marker */
+ }
+
+ return dcb_entry;
+}
+
+static struct nouveau_i2c_chan *
+init_i2c_device_find(struct drm_device *dev, int i2c_index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
+
+ if (i2c_index == 0xff) {
+ /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
+ int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
+ int default_indices = bdcb->i2c_default_indices;
+
+ if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
+ shift = 4;
+
+ i2c_index = (default_indices >> shift) & 0xf;
+ }
+ if (i2c_index == 0x80) /* g80+ */
+ i2c_index = bdcb->i2c_default_indices & 0xf;
+
+ return nouveau_i2c_find(dev, i2c_index);
+}
+
+static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
+{
+ /*
+ * For mlv < 0x80, it is an index into a table of TMDS base addresses.
+ * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
+ * CR58 for CR57 = 0 to index a table of offsets to the basic
+ * 0x6808b0 address.
+ * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
+ * CR58 for CR57 = 0 to index a table of offsets to the basic
+ * 0x6808b0 address, and then flip the offset by 8.
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const int pramdac_offset[13] = {
+ 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
+ const uint32_t pramdac_table[4] = {
+ 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
+
+ if (mlv >= 0x80) {
+ int dcb_entry, dacoffset;
+
+ /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
+ dcb_entry = dcb_entry_idx_from_crtchead(dev);
+ if (dcb_entry == 0x7f)
+ return 0;
+ dacoffset = pramdac_offset[
+ dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
+ if (mlv == 0x81)
+ dacoffset ^= 8;
+ return 0x6808b0 + dacoffset;
+ } else {
+ if (mlv > ARRAY_SIZE(pramdac_table)) {
+ NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
+ mlv);
+ return 0;
+ }
+ return pramdac_table[mlv];
+ }
+}
+
+static int
+init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_RESTRICT_PROG opcode: 0x32 ('2')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): shift
+ * offset + 6 (8 bit): count
+ * offset + 7 (32 bit): register
+ * offset + 11 (32 bit): configuration 1
+ * ...
+ *
+ * Starting at offset + 11 there are "count" 32 bit values.
+ * To find out which value to use read index "CRTC index" on "CRTC
+ * port", AND this value with "mask" and then bit shift right "shift"
+ * bits. Read the appropriate value using this index and write to
+ * "register"
+ */
+
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t shift = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 6];
+ uint32_t reg = ROM32(bios->data[offset + 7]);
+ uint8_t config;
+ uint32_t configval;
+ int len = 11 + count * 4;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
+ offset, crtcport, crtcindex, mask, shift, count, reg);
+
+ config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+ if (config > count) {
+ NV_ERROR(bios->dev,
+ "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+ offset, config, count);
+ return 0;
+ }
+
+ configval = ROM32(bios->data[offset + 11 + config * 4]);
+
+ BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
+
+ bios_wr32(bios, reg, configval);
+
+ return len;
+}
+
+static int
+init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_REPEAT opcode: 0x33 ('3')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): count
+ *
+ * Execute script following this opcode up to INIT_REPEAT_END
+ * "count" times
+ */
+
+ uint8_t count = bios->data[offset + 1];
+ uint8_t i;
+
+ /* no iexec->execute check by design */
+
+ BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
+ offset, count);
+
+ iexec->repeat = true;
+
+ /*
+ * count - 1, as the script block will execute once when we leave this
+ * opcode -- this is compatible with bios behaviour as:
+ * a) the block is always executed at least once, even if count == 0
+ * b) the bios interpreter skips to the op following INIT_END_REPEAT,
+ * while we don't
+ */
+ for (i = 0; i < count - 1; i++)
+ parse_init_table(bios, offset + 2, iexec);
+
+ iexec->repeat = false;
+
+ return 2;
+}
+
+static int
+init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_RESTRICT_PLL opcode: 0x34 ('4')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): shift
+ * offset + 6 (8 bit): IO flag condition index
+ * offset + 7 (8 bit): count
+ * offset + 8 (32 bit): register
+ * offset + 12 (16 bit): frequency 1
+ * ...
+ *
+ * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
+ * Set PLL register "register" to coefficients for frequency n,
+ * selected by reading index "CRTC index" of "CRTC port" ANDed with
+ * "mask" and shifted right by "shift".
+ *
+ * If "IO flag condition index" > 0, and condition met, double
+ * frequency before setting it.
+ */
+
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t shift = bios->data[offset + 5];
+ int8_t io_flag_condition_idx = bios->data[offset + 6];
+ uint8_t count = bios->data[offset + 7];
+ uint32_t reg = ROM32(bios->data[offset + 8]);
+ uint8_t config;
+ uint16_t freq;
+ int len = 12 + count * 2;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
+ "Count: 0x%02X, Reg: 0x%08X\n",
+ offset, crtcport, crtcindex, mask, shift,
+ io_flag_condition_idx, count, reg);
+
+ config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+ if (config > count) {
+ NV_ERROR(bios->dev,
+ "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+ offset, config, count);
+ return 0;
+ }
+
+ freq = ROM16(bios->data[offset + 12 + config * 2]);
+
+ if (io_flag_condition_idx > 0) {
+ if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
+ "frequency doubled\n", offset);
+ freq *= 2;
+ } else
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
+ "frequency unchanged\n", offset);
+ }
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
+ offset, reg, config, freq);
+
+ setPLL(bios, reg, freq * 10);
+
+ return len;
+}
+
+static int
+init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_END_REPEAT opcode: 0x36 ('6')
+ *
+ * offset (8 bit): opcode
+ *
+ * Marks the end of the block for INIT_REPEAT to repeat
+ */
+
+ /* no iexec->execute check by design */
+
+ /*
+ * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
+ * we're not in repeat mode
+ */
+ if (iexec->repeat)
+ return 0;
+
+ return 1;
+}
+
+static int
+init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_COPY opcode: 0x37 ('7')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (8 bit): shift
+ * offset + 6 (8 bit): srcmask
+ * offset + 7 (16 bit): CRTC port
+ * offset + 9 (8 bit): CRTC index
+ * offset + 10 (8 bit): mask
+ *
+ * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
+ * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
+ * port
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint8_t shift = bios->data[offset + 5];
+ uint8_t srcmask = bios->data[offset + 6];
+ uint16_t crtcport = ROM16(bios->data[offset + 7]);
+ uint8_t crtcindex = bios->data[offset + 9];
+ uint8_t mask = bios->data[offset + 10];
+ uint32_t data;
+ uint8_t crtcdata;
+
+ if (!iexec->execute)
+ return 11;
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
+ "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
+ offset, reg, shift, srcmask, crtcport, crtcindex, mask);
+
+ data = bios_rd32(bios, reg);
+
+ if (shift < 0x80)
+ data >>= shift;
+ else
+ data <<= (0x100 - shift);
+
+ data &= srcmask;
+
+ crtcdata = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
+ crtcdata |= (uint8_t)data;
+ bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
+
+ return 11;
+}
+
+static int
+init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_NOT opcode: 0x38 ('8')
+ *
+ * offset (8 bit): opcode
+ *
+ * Invert the current execute / no-execute condition (i.e. "else")
+ */
+ if (iexec->execute)
+ BIOSLOG(bios, "0x%04X: ------ Skipping following commands ------\n", offset);
+ else
+ BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
+
+ iexec->execute = !iexec->execute;
+ return 1;
+}
+
+static int
+init_io_flag_condition(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_FLAG_CONDITION opcode: 0x39 ('9')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): condition number
+ *
+ * Check condition "condition number" in the IO flag condition table.
+ * If condition not met skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t cond = bios->data[offset + 1];
+
+ if (!iexec->execute)
+ return 2;
+
+ if (io_flag_condition_met(bios, offset, cond))
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+ else {
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+ iexec->execute = false;
+ }
+
+ return 2;
+}
+
+static int
+init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_INDEX_ADDRESS_LATCHED opcode: 0x49 ('I')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): control register
+ * offset + 5 (32 bit): data register
+ * offset + 9 (32 bit): mask
+ * offset + 13 (32 bit): data
+ * offset + 17 (8 bit): count
+ * offset + 18 (8 bit): address 1
+ * offset + 19 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" address and data pairs, write "data n" to
+ * "data register", read the current value of "control register",
+ * and write it back once ANDed with "mask", ORed with "data",
+ * and ORed with "address n"
+ */
+
+ uint32_t controlreg = ROM32(bios->data[offset + 1]);
+ uint32_t datareg = ROM32(bios->data[offset + 5]);
+ uint32_t mask = ROM32(bios->data[offset + 9]);
+ uint32_t data = ROM32(bios->data[offset + 13]);
+ uint8_t count = bios->data[offset + 17];
+ int len = 18 + count * 2;
+ uint32_t value;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
+ "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
+ offset, controlreg, datareg, mask, data, count);
+
+ for (i = 0; i < count; i++) {
+ uint8_t instaddress = bios->data[offset + 18 + i * 2];
+ uint8_t instdata = bios->data[offset + 19 + i * 2];
+
+ BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
+ offset, instaddress, instdata);
+
+ bios_wr32(bios, datareg, instdata);
+ value = bios_rd32(bios, controlreg) & mask;
+ value |= data;
+ value |= instaddress;
+ bios_wr32(bios, controlreg, value);
+ }
+
+ return len;
+}
+
+static int
+init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_RESTRICT_PLL2 opcode: 0x4A ('J')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): shift
+ * offset + 6 (8 bit): count
+ * offset + 7 (32 bit): register
+ * offset + 11 (32 bit): frequency 1
+ * ...
+ *
+ * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
+ * Set PLL register "register" to coefficients for frequency n,
+ * selected by reading index "CRTC index" of "CRTC port" ANDed with
+ * "mask" and shifted right by "shift".
+ */
+
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t shift = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 6];
+ uint32_t reg = ROM32(bios->data[offset + 7]);
+ int len = 11 + count * 4;
+ uint8_t config;
+ uint32_t freq;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
+ offset, crtcport, crtcindex, mask, shift, count, reg);
+
+ if (!reg)
+ return len;
+
+ config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
+ if (config > count) {
+ NV_ERROR(bios->dev,
+ "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
+ offset, config, count);
+ return 0;
+ }
+
+ freq = ROM32(bios->data[offset + 11 + config * 4]);
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
+ offset, reg, config, freq);
+
+ setPLL(bios, reg, freq);
+
+ return len;
+}
+
+static int
+init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_PLL2 opcode: 0x4B ('K')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): freq
+ *
+ * Set PLL register "register" to coefficients for frequency "freq"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t freq = ROM32(bios->data[offset + 5]);
+
+ if (!iexec->execute)
+ return 9;
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
+ offset, reg, freq);
+
+ setPLL(bios, reg, freq);
+ return 9;
+}
+
+static int
+init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_I2C_BYTE opcode: 0x4C ('L')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (8 bit): count
+ * offset + 4 (8 bit): I2C register 1
+ * offset + 5 (8 bit): mask 1
+ * offset + 6 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" registers given by "I2C register n" on the device
+ * addressed by "I2C slave address" on the I2C bus given by
+ * "DCB I2C table entry index", read the register, AND the result with
+ * "mask n" and OR it with "data n" before writing it back to the device
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t count = bios->data[offset + 3];
+ int len = 4 + count * 3;
+ struct nouveau_i2c_chan *chan;
+ struct i2c_msg msg;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+ "Count: 0x%02X\n",
+ offset, i2c_index, i2c_address, count);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
+ uint8_t mask = bios->data[offset + 5 + i * 3];
+ uint8_t data = bios->data[offset + 6 + i * 3];
+ uint8_t value;
+
+ msg.addr = i2c_address;
+ msg.flags = I2C_M_RD;
+ msg.len = 1;
+ msg.buf = &value;
+ if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+ return 0;
+
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, i2c_reg, value, mask, data);
+
+ value = (value & mask) | data;
+
+ if (bios->execute) {
+ msg.addr = i2c_address;
+ msg.flags = 0;
+ msg.len = 1;
+ msg.buf = &value;
+ if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+ return 0;
+ }
+ }
+
+ return len;
+}
+
+static int
+init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_I2C_BYTE opcode: 0x4D ('M')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (8 bit): count
+ * offset + 4 (8 bit): I2C register 1
+ * offset + 5 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" registers given by "I2C register n" on the device
+ * addressed by "I2C slave address" on the I2C bus given by
+ * "DCB I2C table entry index", set the register to "data n"
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t count = bios->data[offset + 3];
+ int len = 4 + count * 2;
+ struct nouveau_i2c_chan *chan;
+ struct i2c_msg msg;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+ "Count: 0x%02X\n",
+ offset, i2c_index, i2c_address, count);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
+ uint8_t data = bios->data[offset + 5 + i * 2];
+
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
+ offset, i2c_reg, data);
+
+ if (bios->execute) {
+ msg.addr = i2c_address;
+ msg.flags = 0;
+ msg.len = 1;
+ msg.buf = &data;
+ if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+ return 0;
+ }
+ }
+
+ return len;
+}
+
+static int
+init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_I2C opcode: 0x4E ('N')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (8 bit): count
+ * offset + 4 (8 bit): data 1
+ * ...
+ *
+ * Send "count" bytes ("data n") to the device addressed by "I2C slave
+ * address" on the I2C bus given by "DCB I2C table entry index"
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2];
+ uint8_t count = bios->data[offset + 3];
+ int len = 4 + count;
+ struct nouveau_i2c_chan *chan;
+ struct i2c_msg msg;
+ uint8_t data[256];
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
+ "Count: 0x%02X\n",
+ offset, i2c_index, i2c_address, count);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ data[i] = bios->data[offset + 4 + i];
+
+ BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
+ }
+
+ if (bios->execute) {
+ msg.addr = i2c_address;
+ msg.flags = 0;
+ msg.len = count;
+ msg.buf = data;
+ if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
+ return 0;
+ }
+
+ return len;
+}
+
+static int
+init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_TMDS opcode: 0x4F ('O') (non-canon name)
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): magic lookup value
+ * offset + 2 (8 bit): TMDS address
+ * offset + 3 (8 bit): mask
+ * offset + 4 (8 bit): data
+ *
+ * Read the data reg for TMDS address "TMDS address", AND it with mask
+ * and OR it with data, then write it back
+ * "magic lookup value" determines which TMDS base address register is
+ * used -- see get_tmds_index_reg()
+ */
+
+ uint8_t mlv = bios->data[offset + 1];
+ uint32_t tmdsaddr = bios->data[offset + 2];
+ uint8_t mask = bios->data[offset + 3];
+ uint8_t data = bios->data[offset + 4];
+ uint32_t reg, value;
+
+ if (!iexec->execute)
+ return 5;
+
+ BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, mlv, tmdsaddr, mask, data);
+
+ reg = get_tmds_index_reg(bios->dev, mlv);
+ if (!reg)
+ return 0;
+
+ bios_wr32(bios, reg,
+ tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
+ value = (bios_rd32(bios, reg + 4) & mask) | data;
+ bios_wr32(bios, reg + 4, value);
+ bios_wr32(bios, reg, tmdsaddr);
+
+ return 5;
+}
+
+static int
+init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_TMDS_GROUP opcode: 0x50 ('P') (non-canon name)
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): magic lookup value
+ * offset + 2 (8 bit): count
+ * offset + 3 (8 bit): addr 1
+ * offset + 4 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" TMDS address and data pairs write "data n" to
+ * "addr n". "magic lookup value" determines which TMDS base address
+ * register is used -- see get_tmds_index_reg()
+ */
+
+ uint8_t mlv = bios->data[offset + 1];
+ uint8_t count = bios->data[offset + 2];
+ int len = 3 + count * 2;
+ uint32_t reg;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
+ offset, mlv, count);
+
+ reg = get_tmds_index_reg(bios->dev, mlv);
+ if (!reg)
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
+ uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
+
+ bios_wr32(bios, reg + 4, tmdsdata);
+ bios_wr32(bios, reg, tmdsaddr);
+ }
+
+ return len;
+}
+
+static int
+init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CR_INDEX_ADDRESS_LATCHED opcode: 0x51 ('Q')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): CRTC index1
+ * offset + 2 (8 bit): CRTC index2
+ * offset + 3 (8 bit): baseaddr
+ * offset + 4 (8 bit): count
+ * offset + 5 (8 bit): data 1
+ * ...
+ *
+ * For each of "count" address and data pairs, write "baseaddr + n" to
+ * "CRTC index1" and "data n" to "CRTC index2"
+ * Once complete, restore initial value read from "CRTC index1"
+ */
+ uint8_t crtcindex1 = bios->data[offset + 1];
+ uint8_t crtcindex2 = bios->data[offset + 2];
+ uint8_t baseaddr = bios->data[offset + 3];
+ uint8_t count = bios->data[offset + 4];
+ int len = 5 + count;
+ uint8_t oldaddr, data;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
+ "BaseAddr: 0x%02X, Count: 0x%02X\n",
+ offset, crtcindex1, crtcindex2, baseaddr, count);
+
+ oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
+
+ for (i = 0; i < count; i++) {
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
+ baseaddr + i);
+ data = bios->data[offset + 5 + i];
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
+ }
+
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
+
+ return len;
+}
+
+static int
+init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_CR opcode: 0x52 ('R')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): CRTC index
+ * offset + 2 (8 bit): mask
+ * offset + 3 (8 bit): data
+ *
+ * Assign the value of at "CRTC index" ANDed with mask and ORed with
+ * data back to "CRTC index"
+ */
+
+ uint8_t crtcindex = bios->data[offset + 1];
+ uint8_t mask = bios->data[offset + 2];
+ uint8_t data = bios->data[offset + 3];
+ uint8_t value;
+
+ if (!iexec->execute)
+ return 4;
+
+ BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
+ offset, crtcindex, mask, data);
+
+ value = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
+ value |= data;
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
+
+ return 4;
+}
+
+static int
+init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_CR opcode: 0x53 ('S')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): CRTC index
+ * offset + 2 (8 bit): value
+ *
+ * Assign "value" to CRTC register with index "CRTC index".
+ */
+
+ uint8_t crtcindex = ROM32(bios->data[offset + 1]);
+ uint8_t data = bios->data[offset + 2];
+
+ if (!iexec->execute)
+ return 3;
+
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
+
+ return 3;
+}
+
+static int
+init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_CR_GROUP opcode: 0x54 ('T')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): count
+ * offset + 2 (8 bit): CRTC index 1
+ * offset + 3 (8 bit): value 1
+ * ...
+ *
+ * For "count", assign "value n" to CRTC register with index
+ * "CRTC index n".
+ */
+
+ uint8_t count = bios->data[offset + 1];
+ int len = 2 + count * 2;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ for (i = 0; i < count; i++)
+ init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
+
+ return len;
+}
+
+static int
+init_condition_time(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CONDITION_TIME opcode: 0x56 ('V')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): condition number
+ * offset + 2 (8 bit): retries / 50
+ *
+ * Check condition "condition number" in the condition table.
+ * Bios code then sleeps for 2ms if the condition is not met, and
+ * repeats up to "retries" times, but on one C51 this has proved
+ * insufficient. In mmiotraces the driver sleeps for 20ms, so we do
+ * this, and bail after "retries" times, or 2s, whichever is less.
+ * If still not met after retries, clear execution flag for this table.
+ */
+
+ uint8_t cond = bios->data[offset + 1];
+ uint16_t retries = bios->data[offset + 2] * 50;
+ unsigned cnt;
+
+ if (!iexec->execute)
+ return 3;
+
+ if (retries > 100)
+ retries = 100;
+
+ BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
+ offset, cond, retries);
+
+ if (!bios->execute) /* avoid 2s delays when "faking" execution */
+ retries = 1;
+
+ for (cnt = 0; cnt < retries; cnt++) {
+ if (bios_condition_met(bios, offset, cond)) {
+ BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
+ offset);
+ break;
+ } else {
+ BIOSLOG(bios, "0x%04X: "
+ "Condition not met, sleeping for 20ms\n",
+ offset);
+ msleep(20);
+ }
+ }
+
+ if (!bios_condition_met(bios, offset, cond)) {
+ NV_WARN(bios->dev,
+ "0x%04X: Condition still not met after %dms, "
+ "skipping following opcodes\n", offset, 20 * retries);
+ iexec->execute = false;
+ }
+
+ return 3;
+}
+
+static int
+init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_REG_SEQUENCE opcode: 0x58 ('X')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): base register
+ * offset + 5 (8 bit): count
+ * offset + 6 (32 bit): value 1
+ * ...
+ *
+ * Starting at offset + 6 there are "count" 32 bit values.
+ * For "count" iterations set "base register" + 4 * current_iteration
+ * to "value current_iteration"
+ */
+
+ uint32_t basereg = ROM32(bios->data[offset + 1]);
+ uint32_t count = bios->data[offset + 5];
+ int len = 6 + count * 4;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
+ offset, basereg, count);
+
+ for (i = 0; i < count; i++) {
+ uint32_t reg = basereg + i * 4;
+ uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
+
+ bios_wr32(bios, reg, data);
+ }
+
+ return len;
+}
+
+static int
+init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_SUB_DIRECT opcode: 0x5B ('[')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): subroutine offset (in bios)
+ *
+ * Calls a subroutine that will execute commands until INIT_DONE
+ * is found.
+ */
+
+ uint16_t sub_offset = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return 3;
+
+ BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
+ offset, sub_offset);
+
+ parse_init_table(bios, sub_offset, iexec);
+
+ BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
+
+ return 3;
+}
+
+static int
+init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_COPY_NV_REG opcode: 0x5F ('_')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): src reg
+ * offset + 5 (8 bit): shift
+ * offset + 6 (32 bit): src mask
+ * offset + 10 (32 bit): xor
+ * offset + 14 (32 bit): dst reg
+ * offset + 18 (32 bit): dst mask
+ *
+ * Shift REGVAL("src reg") right by (signed) "shift", AND result with
+ * "src mask", then XOR with "xor". Write this OR'd with
+ * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
+ */
+
+ uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
+ uint8_t shift = bios->data[offset + 5];
+ uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
+ uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
+ uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
+ uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
+ uint32_t srcvalue, dstvalue;
+
+ if (!iexec->execute)
+ return 22;
+
+ BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
+ "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
+ offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
+
+ srcvalue = bios_rd32(bios, srcreg);
+
+ if (shift < 0x80)
+ srcvalue >>= shift;
+ else
+ srcvalue <<= (0x100 - shift);
+
+ srcvalue = (srcvalue & srcmask) ^ xor;
+
+ dstvalue = bios_rd32(bios, dstreg) & dstmask;
+
+ bios_wr32(bios, dstreg, dstvalue | srcvalue);
+
+ return 22;
+}
+
+static int
+init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_INDEX_IO opcode: 0x62 ('b')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): data
+ *
+ * Write "data" to index "CRTC index" of "CRTC port"
+ */
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t data = bios->data[offset + 4];
+
+ if (!iexec->execute)
+ return 5;
+
+ bios_idxprt_wr(bios, crtcport, crtcindex, data);
+
+ return 5;
+}
+
+static int
+init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_COMPUTE_MEM opcode: 0x63 ('c')
+ *
+ * offset (8 bit): opcode
+ *
+ * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
+ * that the hardware can correctly calculate how much VRAM it has
+ * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
+ *
+ * The implementation of this opcode in general consists of two parts:
+ * 1) determination of the memory bus width
+ * 2) determination of how many of the card's RAM pads have ICs attached
+ *
+ * 1) is done by a cunning combination of writes to offsets 0x1c and
+ * 0x3c in the framebuffer, and seeing whether the written values are
+ * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
+ *
+ * 2) is done by a cunning combination of writes to an offset slightly
+ * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
+ * if the test pattern can be read back. This then affects bits 12-15 of
+ * NV_PFB_CFG0
+ *
+ * In this context a "cunning combination" may include multiple reads
+ * and writes to varying locations, often alternating the test pattern
+ * and 0, doubtless to make sure buffers are filled, residual charges
+ * on tracks are removed etc.
+ *
+ * Unfortunately, the "cunning combination"s mentioned above, and the
+ * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
+ * trace I have.
+ *
+ * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
+ * we started was correct, and use that instead
+ */
+
+ /* no iexec->execute check by design */
+
+ /*
+ * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
+ * and kmmio traces of the binary driver POSTing the card show nothing
+ * being done for this opcode. why is it still listed in the table?!
+ */
+
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ return 1;
+
+ /*
+ * On every card I've seen, this step gets done for us earlier in
+ * the init scripts
+ uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
+ bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
+ */
+
+ /*
+ * This also has probably been done in the scripts, but an mmio trace of
+ * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
+ */
+ bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
+
+ /* write back the saved configuration value */
+ bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
+
+ return 1;
+}
+
+static int
+init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_RESET opcode: 0x65 ('e')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): value1
+ * offset + 9 (32 bit): value2
+ *
+ * Assign "value1" to "register", then assign "value2" to "register"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t value1 = ROM32(bios->data[offset + 5]);
+ uint32_t value2 = ROM32(bios->data[offset + 9]);
+ uint32_t pci_nv_19, pci_nv_20;
+
+ /* no iexec->execute check by design */
+
+ pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
+ bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
+ bios_wr32(bios, reg, value1);
+
+ udelay(10);
+
+ bios_wr32(bios, reg, value2);
+ bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
+
+ pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
+ pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
+ bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
+
+ return 13;
+}
+
+static int
+init_configure_mem(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CONFIGURE_MEM opcode: 0x66 ('f')
+ *
+ * offset (8 bit): opcode
+ *
+ * Equivalent to INIT_DONE on bios version 3 or greater.
+ * For early bios versions, sets up the memory registers, using values
+ * taken from the memory init table
+ */
+
+ /* no iexec->execute check by design */
+
+ uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
+ uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
+ uint32_t reg, data;
+
+ if (bios->major_version > 2)
+ return 0;
+
+ bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
+ bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
+
+ if (bios->data[meminitoffs] & 1)
+ seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
+
+ for (reg = ROM32(bios->data[seqtbloffs]);
+ reg != 0xffffffff;
+ reg = ROM32(bios->data[seqtbloffs += 4])) {
+
+ switch (reg) {
+ case NV_PFB_PRE:
+ data = NV_PFB_PRE_CMD_PRECHARGE;
+ break;
+ case NV_PFB_PAD:
+ data = NV_PFB_PAD_CKE_NORMAL;
+ break;
+ case NV_PFB_REF:
+ data = NV_PFB_REF_CMD_REFRESH;
+ break;
+ default:
+ data = ROM32(bios->data[meminitdata]);
+ meminitdata += 4;
+ if (data == 0xffffffff)
+ continue;
+ }
+
+ bios_wr32(bios, reg, data);
+ }
+
+ return 1;
+}
+
+static int
+init_configure_clk(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CONFIGURE_CLK opcode: 0x67 ('g')
+ *
+ * offset (8 bit): opcode
+ *
+ * Equivalent to INIT_DONE on bios version 3 or greater.
+ * For early bios versions, sets up the NVClk and MClk PLLs, using
+ * values taken from the memory init table
+ */
+
+ /* no iexec->execute check by design */
+
+ uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
+ int clock;
+
+ if (bios->major_version > 2)
+ return 0;
+
+ clock = ROM16(bios->data[meminitoffs + 4]) * 10;
+ setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
+
+ clock = ROM16(bios->data[meminitoffs + 2]) * 10;
+ if (bios->data[meminitoffs] & 1) /* DDR */
+ clock *= 2;
+ setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
+
+ return 1;
+}
+
+static int
+init_configure_preinit(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_CONFIGURE_PREINIT opcode: 0x68 ('h')
+ *
+ * offset (8 bit): opcode
+ *
+ * Equivalent to INIT_DONE on bios version 3 or greater.
+ * For early bios versions, does early init, loading ram and crystal
+ * configuration from straps into CR3C
+ */
+
+ /* no iexec->execute check by design */
+
+ uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
+ uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
+
+ if (bios->major_version > 2)
+ return 0;
+
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
+ NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
+
+ return 1;
+}
+
+static int
+init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_IO opcode: 0x69 ('i')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): mask
+ * offset + 4 (8 bit): data
+ *
+ * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
+ */
+
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t mask = bios->data[offset + 3];
+ uint8_t data = bios->data[offset + 4];
+
+ if (!iexec->execute)
+ return 5;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
+ offset, crtcport, mask, data);
+
+ /*
+ * I have no idea what this does, but NVIDIA do this magic sequence
+ * in the places where this INIT_IO happens..
+ */
+ if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
+ int i;
+
+ bios_wr32(bios, 0x614100, (bios_rd32(
+ bios, 0x614100) & 0x0fffffff) | 0x00800000);
+
+ bios_wr32(bios, 0x00e18c, bios_rd32(
+ bios, 0x00e18c) | 0x00020000);
+
+ bios_wr32(bios, 0x614900, (bios_rd32(
+ bios, 0x614900) & 0x0fffffff) | 0x00800000);
+
+ bios_wr32(bios, 0x000200, bios_rd32(
+ bios, 0x000200) & ~0x40000000);
+
+ mdelay(10);
+
+ bios_wr32(bios, 0x00e18c, bios_rd32(
+ bios, 0x00e18c) & ~0x00020000);
+
+ bios_wr32(bios, 0x000200, bios_rd32(
+ bios, 0x000200) | 0x40000000);
+
+ bios_wr32(bios, 0x614100, 0x00800018);
+ bios_wr32(bios, 0x614900, 0x00800018);
+
+ mdelay(10);
+
+ bios_wr32(bios, 0x614100, 0x10000018);
+ bios_wr32(bios, 0x614900, 0x10000018);
+
+ for (i = 0; i < 3; i++)
+ bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
+ bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
+
+ for (i = 0; i < 2; i++)
+ bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
+ bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
+
+ for (i = 0; i < 3; i++)
+ bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
+ bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
+
+ for (i = 0; i < 2; i++)
+ bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
+ bios, 0x614200 + (i*0x800)) & 0xfffffff0);
+
+ for (i = 0; i < 2; i++)
+ bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
+ bios, 0x614108 + (i*0x800)) & 0x0fffffff);
+ return 5;
+ }
+
+ bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
+ data);
+ return 5;
+}
+
+static int
+init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_SUB opcode: 0x6B ('k')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): script number
+ *
+ * Execute script number "script number", as a subroutine
+ */
+
+ uint8_t sub = bios->data[offset + 1];
+
+ if (!iexec->execute)
+ return 2;
+
+ BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
+
+ parse_init_table(bios,
+ ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
+ iexec);
+
+ BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
+
+ return 2;
+}
+
+static int
+init_ram_condition(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_RAM_CONDITION opcode: 0x6D ('m')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): mask
+ * offset + 2 (8 bit): cmpval
+ *
+ * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
+ * If condition not met skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t mask = bios->data[offset + 1];
+ uint8_t cmpval = bios->data[offset + 2];
+ uint8_t data;
+
+ if (!iexec->execute)
+ return 3;
+
+ data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
+
+ BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
+ offset, data, cmpval);
+
+ if (data == cmpval)
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+ else {
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+ iexec->execute = false;
+ }
+
+ return 3;
+}
+
+static int
+init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_NV_REG opcode: 0x6E ('n')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): mask
+ * offset + 9 (32 bit): data
+ *
+ * Assign ((REGVAL("register") & "mask") | "data") to "register"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t mask = ROM32(bios->data[offset + 5]);
+ uint32_t data = ROM32(bios->data[offset + 9]);
+
+ if (!iexec->execute)
+ return 13;
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
+ offset, reg, mask, data);
+
+ bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
+
+ return 13;
+}
+
+static int
+init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_MACRO opcode: 0x6F ('o')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): macro number
+ *
+ * Look up macro index "macro number" in the macro index table.
+ * The macro index table entry has 1 byte for the index in the macro
+ * table, and 1 byte for the number of times to repeat the macro.
+ * The macro table entry has 4 bytes for the register address and
+ * 4 bytes for the value to write to that register
+ */
+
+ uint8_t macro_index_tbl_idx = bios->data[offset + 1];
+ uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
+ uint8_t macro_tbl_idx = bios->data[tmp];
+ uint8_t count = bios->data[tmp + 1];
+ uint32_t reg, data;
+ int i;
+
+ if (!iexec->execute)
+ return 2;
+
+ BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
+ "Count: 0x%02X\n",
+ offset, macro_index_tbl_idx, macro_tbl_idx, count);
+
+ for (i = 0; i < count; i++) {
+ uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
+
+ reg = ROM32(bios->data[macroentryptr]);
+ data = ROM32(bios->data[macroentryptr + 4]);
+
+ bios_wr32(bios, reg, data);
+ }
+
+ return 2;
+}
+
+static int
+init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_DONE opcode: 0x71 ('q')
+ *
+ * offset (8 bit): opcode
+ *
+ * End the current script
+ */
+
+ /* mild retval abuse to stop parsing this table */
+ return 0;
+}
+
+static int
+init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_RESUME opcode: 0x72 ('r')
+ *
+ * offset (8 bit): opcode
+ *
+ * End the current execute / no-execute condition
+ */
+
+ if (iexec->execute)
+ return 1;
+
+ iexec->execute = true;
+ BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
+
+ return 1;
+}
+
+static int
+init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_TIME opcode: 0x74 ('t')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): time
+ *
+ * Sleep for "time" microseconds.
+ */
+
+ unsigned time = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return 3;
+
+ BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
+ offset, time);
+
+ if (time < 1000)
+ udelay(time);
+ else
+ msleep((time + 900) / 1000);
+
+ return 3;
+}
+
+static int
+init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_CONDITION opcode: 0x75 ('u')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): condition number
+ *
+ * Check condition "condition number" in the condition table.
+ * If condition not met skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t cond = bios->data[offset + 1];
+
+ if (!iexec->execute)
+ return 2;
+
+ BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
+
+ if (bios_condition_met(bios, offset, cond))
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+ else {
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+ iexec->execute = false;
+ }
+
+ return 2;
+}
+
+static int
+init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_IO_CONDITION opcode: 0x76
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): condition number
+ *
+ * Check condition "condition number" in the io condition table.
+ * If condition not met skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t cond = bios->data[offset + 1];
+
+ if (!iexec->execute)
+ return 2;
+
+ BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
+
+ if (io_condition_met(bios, offset, cond))
+ BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
+ else {
+ BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
+ iexec->execute = false;
+ }
+
+ return 2;
+}
+
+static int
+init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_INDEX_IO opcode: 0x78 ('x')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): CRTC port
+ * offset + 3 (8 bit): CRTC index
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): data
+ *
+ * Read value at index "CRTC index" on "CRTC port", AND with "mask",
+ * OR with "data", write-back
+ */
+
+ uint16_t crtcport = ROM16(bios->data[offset + 1]);
+ uint8_t crtcindex = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t data = bios->data[offset + 5];
+ uint8_t value;
+
+ if (!iexec->execute)
+ return 6;
+
+ BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
+ "Data: 0x%02X\n",
+ offset, crtcport, crtcindex, mask, data);
+
+ value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
+ bios_idxprt_wr(bios, crtcport, crtcindex, value);
+
+ return 6;
+}
+
+static int
+init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_PLL opcode: 0x79 ('y')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (16 bit): freq
+ *
+ * Set PLL register "register" to coefficients for frequency (10kHz)
+ * "freq"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint16_t freq = ROM16(bios->data[offset + 5]);
+
+ if (!iexec->execute)
+ return 7;
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
+
+ setPLL(bios, reg, freq * 10);
+
+ return 7;
+}
+
+static int
+init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_REG opcode: 0x7A ('z')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): value
+ *
+ * Assign "value" to "register"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t value = ROM32(bios->data[offset + 5]);
+
+ if (!iexec->execute)
+ return 9;
+
+ if (reg == 0x000200)
+ value |= 1;
+
+ bios_wr32(bios, reg, value);
+
+ return 9;
+}
+
+static int
+init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_RAM_RESTRICT_PLL opcode: 0x87 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): PLL type
+ * offset + 2 (32 bit): frequency 0
+ *
+ * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
+ * ram_restrict_table_ptr. The value read from there is used to select
+ * a frequency from the table starting at 'frequency 0' to be
+ * programmed into the PLL corresponding to 'type'.
+ *
+ * The PLL limits table on cards using this opcode has a mapping of
+ * 'type' to the relevant registers.
+ */
+
+ struct drm_device *dev = bios->dev;
+ uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
+ uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
+ uint8_t type = bios->data[offset + 1];
+ uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
+ uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
+ int len = 2 + bios->ram_restrict_group_count * 4;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
+ NV_ERROR(dev, "PLL limits table not version 3.x\n");
+ return len; /* deliberate, allow default clocks to remain */
+ }
+
+ entry = pll_limits + pll_limits[1];
+ for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
+ if (entry[0] == type) {
+ uint32_t reg = ROM32(entry[3]);
+
+ BIOSLOG(bios, "0x%04X: "
+ "Type %02x Reg 0x%08x Freq %dKHz\n",
+ offset, type, reg, freq);
+
+ setPLL(bios, reg, freq);
+ return len;
+ }
+ }
+
+ NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
+ return len;
+}
+
+static int
+init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_8C opcode: 0x8C ('')
+ *
+ * NOP so far....
+ *
+ */
+
+ return 1;
+}
+
+static int
+init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_8D opcode: 0x8D ('')
+ *
+ * NOP so far....
+ *
+ */
+
+ return 1;
+}
+
+static int
+init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_GPIO opcode: 0x8E ('')
+ *
+ * offset (8 bit): opcode
+ *
+ * Loop over all entries in the DCB GPIO table, and initialise
+ * each GPIO according to various values listed in each entry
+ */
+
+ const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+ const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
+ const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
+ const uint8_t *gpio_entry;
+ int i;
+
+ if (!iexec->execute)
+ return 1;
+
+ if (bios->bdcb.version != 0x40) {
+ NV_ERROR(bios->dev, "DCB table not version 4.0\n");
+ return 0;
+ }
+
+ if (!bios->bdcb.gpio_table_ptr) {
+ NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
+ return 0;
+ }
+
+ gpio_entry = gpio_table + gpio_table[1];
+ for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) {
+ uint32_t entry = ROM32(gpio_entry[0]), r, s, v;
+ int line = (entry & 0x0000001f);
+
+ BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry);
+
+ if ((entry & 0x0000ff00) == 0x0000ff00)
+ continue;
+
+ r = nv50_gpio_reg[line >> 3];
+ s = (line & 0x07) << 2;
+ v = bios_rd32(bios, r) & ~(0x00000003 << s);
+ if (entry & 0x01000000)
+ v |= (((entry & 0x60000000) >> 29) ^ 2) << s;
+ else
+ v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
+ bios_wr32(bios, r, v);
+
+ r = nv50_gpio_ctl[line >> 4];
+ s = (line & 0x0f);
+ v = bios_rd32(bios, r) & ~(0x00010001 << s);
+ switch ((entry & 0x06000000) >> 25) {
+ case 1:
+ v |= (0x00000001 << s);
+ break;
+ case 2:
+ v |= (0x00010000 << s);
+ break;
+ default:
+ break;
+ }
+ bios_wr32(bios, r, v);
+ }
+
+ return 1;
+}
+
+static int
+init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode: 0x8F ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): reg
+ * offset + 5 (8 bit): regincrement
+ * offset + 6 (8 bit): count
+ * offset + 7 (32 bit): value 1,1
+ * ...
+ *
+ * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
+ * ram_restrict_table_ptr. The value read from here is 'n', and
+ * "value 1,n" gets written to "reg". This repeats "count" times and on
+ * each iteration 'm', "reg" increases by "regincrement" and
+ * "value m,n" is used. The extent of n is limited by a number read
+ * from the 'M' BIT table, herein called "blocklen"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint8_t regincrement = bios->data[offset + 5];
+ uint8_t count = bios->data[offset + 6];
+ uint32_t strap_ramcfg, data;
+ /* previously set by 'M' BIT table */
+ uint16_t blocklen = bios->ram_restrict_group_count * 4;
+ int len = 7 + count * blocklen;
+ uint8_t index;
+ int i;
+
+
+ if (!iexec->execute)
+ return len;
+
+ if (!blocklen) {
+ NV_ERROR(bios->dev,
+ "0x%04X: Zero block length - has the M table "
+ "been parsed?\n", offset);
+ return 0;
+ }
+
+ strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
+ index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
+
+ BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
+ "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
+ offset, reg, regincrement, count, strap_ramcfg, index);
+
+ for (i = 0; i < count; i++) {
+ data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
+
+ bios_wr32(bios, reg, data);
+
+ reg += regincrement;
+ }
+
+ return len;
+}
+
+static int
+init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_COPY_ZM_REG opcode: 0x90 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): src reg
+ * offset + 5 (32 bit): dst reg
+ *
+ * Put contents of "src reg" into "dst reg"
+ */
+
+ uint32_t srcreg = ROM32(bios->data[offset + 1]);
+ uint32_t dstreg = ROM32(bios->data[offset + 5]);
+
+ if (!iexec->execute)
+ return 9;
+
+ bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
+
+ return 9;
+}
+
+static int
+init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
+ struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_REG_GROUP_ADDRESS_LATCHED opcode: 0x91 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): dst reg
+ * offset + 5 (8 bit): count
+ * offset + 6 (32 bit): data 1
+ * ...
+ *
+ * For each of "count" values write "data n" to "dst reg"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint8_t count = bios->data[offset + 5];
+ int len = 6 + count * 4;
+ int i;
+
+ if (!iexec->execute)
+ return len;
+
+ for (i = 0; i < count; i++) {
+ uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
+ bios_wr32(bios, reg, data);
+ }
+
+ return len;
+}
+
+static int
+init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_RESERVED opcode: 0x92 ('')
+ *
+ * offset (8 bit): opcode
+ *
+ * Seemingly does nothing
+ */
+
+ return 1;
+}
+
+static int
+init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_96 opcode: 0x96 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): sreg
+ * offset + 5 (8 bit): sshift
+ * offset + 6 (8 bit): smask
+ * offset + 7 (8 bit): index
+ * offset + 8 (32 bit): reg
+ * offset + 12 (32 bit): mask
+ * offset + 16 (8 bit): shift
+ *
+ */
+
+ uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
+ uint32_t reg = ROM32(bios->data[offset + 8]);
+ uint32_t mask = ROM32(bios->data[offset + 12]);
+ uint32_t val;
+
+ val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
+ if (bios->data[offset + 5] < 0x80)
+ val >>= bios->data[offset + 5];
+ else
+ val <<= (0x100 - bios->data[offset + 5]);
+ val &= bios->data[offset + 6];
+
+ val = bios->data[ROM16(bios->data[xlatptr]) + val];
+ val <<= bios->data[offset + 16];
+
+ if (!iexec->execute)
+ return 17;
+
+ bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
+ return 17;
+}
+
+static int
+init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_97 opcode: 0x97 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): register
+ * offset + 5 (32 bit): mask
+ * offset + 9 (32 bit): value
+ *
+ * Adds "value" to "register" preserving the fields specified
+ * by "mask"
+ */
+
+ uint32_t reg = ROM32(bios->data[offset + 1]);
+ uint32_t mask = ROM32(bios->data[offset + 5]);
+ uint32_t add = ROM32(bios->data[offset + 9]);
+ uint32_t val;
+
+ val = bios_rd32(bios, reg);
+ val = (val & mask) | ((val + add) & ~mask);
+
+ if (!iexec->execute)
+ return 13;
+
+ bios_wr32(bios, reg, val);
+ return 13;
+}
+
+static int
+init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_AUXCH opcode: 0x98 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): address
+ * offset + 5 (8 bit): count
+ * offset + 6 (8 bit): mask 0
+ * offset + 7 (8 bit): data 0
+ * ...
+ *
+ */
+
+ struct drm_device *dev = bios->dev;
+ struct nouveau_i2c_chan *auxch;
+ uint32_t addr = ROM32(bios->data[offset + 1]);
+ uint8_t count = bios->data[offset + 5];
+ int len = 6 + count * 2;
+ int ret, i;
+
+ if (!bios->display.output) {
+ NV_ERROR(dev, "INIT_AUXCH: no active output\n");
+ return 0;
+ }
+
+ auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
+ if (!auxch) {
+ NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
+ bios->display.output->i2c_index);
+ return 0;
+ }
+
+ if (!iexec->execute)
+ return len;
+
+ offset += 6;
+ for (i = 0; i < count; i++, offset += 2) {
+ uint8_t data;
+
+ ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
+ if (ret) {
+ NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
+ return 0;
+ }
+
+ data &= bios->data[offset + 0];
+ data |= bios->data[offset + 1];
+
+ ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
+ if (ret) {
+ NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
+ return 0;
+ }
+ }
+
+ return len;
+}
+
+static int
+init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_ZM_AUXCH opcode: 0x99 ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (32 bit): address
+ * offset + 5 (8 bit): count
+ * offset + 6 (8 bit): data 0
+ * ...
+ *
+ */
+
+ struct drm_device *dev = bios->dev;
+ struct nouveau_i2c_chan *auxch;
+ uint32_t addr = ROM32(bios->data[offset + 1]);
+ uint8_t count = bios->data[offset + 5];
+ int len = 6 + count;
+ int ret, i;
+
+ if (!bios->display.output) {
+ NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
+ return 0;
+ }
+
+ auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
+ if (!auxch) {
+ NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
+ bios->display.output->i2c_index);
+ return 0;
+ }
+
+ if (!iexec->execute)
+ return len;
+
+ offset += 6;
+ for (i = 0; i < count; i++, offset++) {
+ ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
+ if (ret) {
+ NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
+ return 0;
+ }
+ }
+
+ return len;
+}
+
+static struct init_tbl_entry itbl_entry[] = {
+ /* command name , id , length , offset , mult , command handler */
+ /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
+ { "INIT_IO_RESTRICT_PROG" , 0x32, init_io_restrict_prog },
+ { "INIT_REPEAT" , 0x33, init_repeat },
+ { "INIT_IO_RESTRICT_PLL" , 0x34, init_io_restrict_pll },
+ { "INIT_END_REPEAT" , 0x36, init_end_repeat },
+ { "INIT_COPY" , 0x37, init_copy },
+ { "INIT_NOT" , 0x38, init_not },
+ { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
+ { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
+ { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
+ { "INIT_PLL2" , 0x4B, init_pll2 },
+ { "INIT_I2C_BYTE" , 0x4C, init_i2c_byte },
+ { "INIT_ZM_I2C_BYTE" , 0x4D, init_zm_i2c_byte },
+ { "INIT_ZM_I2C" , 0x4E, init_zm_i2c },
+ { "INIT_TMDS" , 0x4F, init_tmds },
+ { "INIT_ZM_TMDS_GROUP" , 0x50, init_zm_tmds_group },
+ { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, init_cr_idx_adr_latch },
+ { "INIT_CR" , 0x52, init_cr },
+ { "INIT_ZM_CR" , 0x53, init_zm_cr },
+ { "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
+ { "INIT_CONDITION_TIME" , 0x56, init_condition_time },
+ { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
+ /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
+ { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
+ { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
+ { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
+ { "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
+ { "INIT_RESET" , 0x65, init_reset },
+ { "INIT_CONFIGURE_MEM" , 0x66, init_configure_mem },
+ { "INIT_CONFIGURE_CLK" , 0x67, init_configure_clk },
+ { "INIT_CONFIGURE_PREINIT" , 0x68, init_configure_preinit },
+ { "INIT_IO" , 0x69, init_io },
+ { "INIT_SUB" , 0x6B, init_sub },
+ { "INIT_RAM_CONDITION" , 0x6D, init_ram_condition },
+ { "INIT_NV_REG" , 0x6E, init_nv_reg },
+ { "INIT_MACRO" , 0x6F, init_macro },
+ { "INIT_DONE" , 0x71, init_done },
+ { "INIT_RESUME" , 0x72, init_resume },
+ /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
+ { "INIT_TIME" , 0x74, init_time },
+ { "INIT_CONDITION" , 0x75, init_condition },
+ { "INIT_IO_CONDITION" , 0x76, init_io_condition },
+ { "INIT_INDEX_IO" , 0x78, init_index_io },
+ { "INIT_PLL" , 0x79, init_pll },
+ { "INIT_ZM_REG" , 0x7A, init_zm_reg },
+ { "INIT_RAM_RESTRICT_PLL" , 0x87, init_ram_restrict_pll },
+ { "INIT_8C" , 0x8C, init_8c },
+ { "INIT_8D" , 0x8D, init_8d },
+ { "INIT_GPIO" , 0x8E, init_gpio },
+ { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, init_ram_restrict_zm_reg_group },
+ { "INIT_COPY_ZM_REG" , 0x90, init_copy_zm_reg },
+ { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched },
+ { "INIT_RESERVED" , 0x92, init_reserved },
+ { "INIT_96" , 0x96, init_96 },
+ { "INIT_97" , 0x97, init_97 },
+ { "INIT_AUXCH" , 0x98, init_auxch },
+ { "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
+ { NULL , 0 , NULL }
+};
+
+#define MAX_TABLE_OPS 1000
+
+static int
+parse_init_table(struct nvbios *bios, unsigned int offset,
+ struct init_exec *iexec)
+{
+ /*
+ * Parses all commands in an init table.
+ *
+ * We start out executing all commands found in the init table. Some
+ * opcodes may change the status of iexec->execute to SKIP, which will
+ * cause the following opcodes to perform no operation until the value
+ * is changed back to EXECUTE.
+ */
+
+ int count = 0, i, res;
+ uint8_t id;
+
+ /*
+ * Loop until INIT_DONE causes us to break out of the loop
+ * (or until offset > bios length just in case... )
+ * (and no more than MAX_TABLE_OPS iterations, just in case... )
+ */
+ while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
+ id = bios->data[offset];
+
+ /* Find matching id in itbl_entry */
+ for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
+ ;
+
+ if (itbl_entry[i].name) {
+ BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
+ offset, itbl_entry[i].id, itbl_entry[i].name);
+
+ /* execute eventual command handler */
+ res = (*itbl_entry[i].handler)(bios, offset, iexec);
+ if (!res)
+ break;
+ /*
+ * Add the offset of the current command including all data
+ * of that command. The offset will then be pointing on the
+ * next op code.
+ */
+ offset += res;
+ } else {
+ NV_ERROR(bios->dev,
+ "0x%04X: Init table command not found: "
+ "0x%02X\n", offset, id);
+ return -ENOENT;
+ }
+ }
+
+ if (offset >= bios->length)
+ NV_WARN(bios->dev,
+ "Offset 0x%04X greater than known bios image length. "
+ "Corrupt image?\n", offset);
+ if (count >= MAX_TABLE_OPS)
+ NV_WARN(bios->dev,
+ "More than %d opcodes to a table is unlikely, "
+ "is the bios image corrupt?\n", MAX_TABLE_OPS);
+
+ return 0;
+}
+
+static void
+parse_init_tables(struct nvbios *bios)
+{
+ /* Loops and calls parse_init_table() for each present table. */
+
+ int i = 0;
+ uint16_t table;
+ struct init_exec iexec = {true, false};
+
+ if (bios->old_style_init) {
+ if (bios->init_script_tbls_ptr)
+ parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
+ if (bios->extra_init_script_tbl_ptr)
+ parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
+
+ return;
+ }
+
+ while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
+ NV_INFO(bios->dev,
+ "Parsing VBIOS init table %d at offset 0x%04X\n",
+ i / 2, table);
+ BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
+
+ parse_init_table(bios, table, &iexec);
+ i += 2;
+ }
+}
+
+static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
+{
+ int compare_record_len, i = 0;
+ uint16_t compareclk, scriptptr = 0;
+
+ if (bios->major_version < 5) /* pre BIT */
+ compare_record_len = 3;
+ else
+ compare_record_len = 4;
+
+ do {
+ compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
+ if (pxclk >= compareclk * 10) {
+ if (bios->major_version < 5) {
+ uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
+ scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
+ } else
+ scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
+ break;
+ }
+ i++;
+ } while (compareclk);
+
+ return scriptptr;
+}
+
+static void
+run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
+ struct dcb_entry *dcbent, int head, bool dl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct init_exec iexec = {true, false};
+
+ NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
+ scriptptr);
+ bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44,
+ head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA);
+ /* note: if dcb entries have been merged, index may be misleading */
+ NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
+ parse_init_table(bios, scriptptr, &iexec);
+
+ nv04_dfp_bind_head(dev, dcbent, head, dl);
+}
+
+static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
+ uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
+
+ if (!bios->fp.xlated_entry || !sub || !scriptofs)
+ return -EINVAL;
+
+ run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
+
+ if (script == LVDS_PANEL_OFF) {
+ /* off-on delay in ms */
+ msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
+ }
+#ifdef __powerpc__
+ /* Powerbook specific quirks */
+ if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329))
+ nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
+ if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) {
+ if (script == LVDS_PANEL_ON) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
+ }
+ if (script == LVDS_PANEL_OFF) {
+ bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31));
+ bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
+{
+ /*
+ * The BIT LVDS table's header has the information to setup the
+ * necessary registers. Following the standard 4 byte header are:
+ * A bitmask byte and a dual-link transition pxclk value for use in
+ * selecting the init script when not using straps; 4 script pointers
+ * for panel power, selected by output and on/off; and 8 table pointers
+ * for panel init, the needed one determined by output, and bits in the
+ * conf byte. These tables are similar to the TMDS tables, consisting
+ * of a list of pxclks and script pointers.
+ */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
+ uint16_t scriptptr = 0, clktable;
+ uint8_t clktableptr = 0;
+
+ /*
+ * For now we assume version 3.0 table - g80 support will need some
+ * changes
+ */
+
+ switch (script) {
+ case LVDS_INIT:
+ return -ENOSYS;
+ case LVDS_BACKLIGHT_ON:
+ case LVDS_PANEL_ON:
+ scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
+ break;
+ case LVDS_BACKLIGHT_OFF:
+ case LVDS_PANEL_OFF:
+ scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
+ break;
+ case LVDS_RESET:
+ if (dcbent->lvdsconf.use_straps_for_mode) {
+ if (bios->fp.dual_link)
+ clktableptr += 2;
+ if (bios->fp.BITbit1)
+ clktableptr++;
+ } else {
+ /* using EDID */
+ uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+ int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
+
+ if (bios->fp.dual_link) {
+ clktableptr += 2;
+ fallbackcmpval *= 2;
+ }
+ if (fallbackcmpval & fallback)
+ clktableptr++;
+ }
+
+ /* adding outputset * 8 may not be correct */
+ clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
+ if (!clktable) {
+ NV_ERROR(dev, "Pixel clock comparison table not found\n");
+ return -ENOENT;
+ }
+ scriptptr = clkcmptable(bios, clktable, pxclk);
+ }
+
+ if (!scriptptr) {
+ NV_ERROR(dev, "LVDS output init script not found\n");
+ return -ENOENT;
+ }
+ run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
+
+ return 0;
+}
+
+int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
+{
+ /*
+ * LVDS operations are multiplexed in an effort to present a single API
+ * which works with two vastly differing underlying structures.
+ * This acts as the demux
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
+ uint32_t sel_clk_binding, sel_clk;
+ int ret;
+
+ if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
+ (lvds_ver >= 0x30 && script == LVDS_INIT))
+ return 0;
+
+ if (!bios->fp.lvds_init_run) {
+ bios->fp.lvds_init_run = true;
+ call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
+ }
+
+ if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
+ call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
+ if (script == LVDS_RESET && bios->fp.power_off_for_reset)
+ call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
+
+ NV_TRACE(dev, "Calling LVDS script %d:\n", script);
+
+ /* don't let script change pll->head binding */
+ sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
+
+ if (lvds_ver < 0x30)
+ ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
+ else
+ ret = run_lvds_table(dev, dcbent, head, script, pxclk);
+
+ bios->fp.last_script_invoc = (script << 1 | head);
+
+ sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
+ /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
+
+ return ret;
+}
+
+struct lvdstableheader {
+ uint8_t lvds_ver, headerlen, recordlen;
+};
+
+static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
+{
+ /*
+ * BMP version (0xa) LVDS table has a simple header of version and
+ * record length. The BIT LVDS table has the typical BIT table header:
+ * version byte, header length byte, record length byte, and a byte for
+ * the maximum number of records that can be held in the table.
+ */
+
+ uint8_t lvds_ver, headerlen, recordlen;
+
+ memset(lth, 0, sizeof(struct lvdstableheader));
+
+ if (bios->fp.lvdsmanufacturerpointer == 0x0) {
+ NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n");
+ return -EINVAL;
+ }
+
+ lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
+
+ switch (lvds_ver) {
+ case 0x0a: /* pre NV40 */
+ headerlen = 2;
+ recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+ break;
+ case 0x30: /* NV4x */
+ headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+ if (headerlen < 0x1f) {
+ NV_ERROR(dev, "LVDS table header not understood\n");
+ return -EINVAL;
+ }
+ recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
+ break;
+ case 0x40: /* G80/G90 */
+ headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
+ if (headerlen < 0x7) {
+ NV_ERROR(dev, "LVDS table header not understood\n");
+ return -EINVAL;
+ }
+ recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
+ break;
+ default:
+ NV_ERROR(dev,
+ "LVDS table revision %d.%d not currently supported\n",
+ lvds_ver >> 4, lvds_ver & 0xf);
+ return -ENOSYS;
+ }
+
+ lth->lvds_ver = lvds_ver;
+ lth->headerlen = headerlen;
+ lth->recordlen = recordlen;
+
+ return 0;
+}
+
+static int
+get_fp_strap(struct drm_device *dev, struct nvbios *bios)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*
+ * The fp strap is normally dictated by the "User Strap" in
+ * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
+ * Internal_Flags struct at 0x48 is set, the user strap gets overriden
+ * by the PCI subsystem ID during POST, but not before the previous user
+ * strap has been committed to CR58 for CR57=0xf on head A, which may be
+ * read and used instead
+ */
+
+ if (bios->major_version < 5 && bios->data[0x48] & 0x4)
+ return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
+
+ if (dev_priv->card_type >= NV_50)
+ return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
+ else
+ return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
+}
+
+static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
+{
+ uint8_t *fptable;
+ uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
+ int ret, ofs, fpstrapping;
+ struct lvdstableheader lth;
+
+ if (bios->fp.fptablepointer == 0x0) {
+ /* Apple cards don't have the fp table; the laptops use DDC */
+ /* The table is also missing on some x86 IGPs */
+#ifndef __powerpc__
+ NV_ERROR(dev, "Pointer to flat panel table invalid\n");
+#endif
+ bios->pub.digital_min_front_porch = 0x4b;
+ return 0;
+ }
+
+ fptable = &bios->data[bios->fp.fptablepointer];
+ fptable_ver = fptable[0];
+
+ switch (fptable_ver) {
+ /*
+ * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
+ * version field, and miss one of the spread spectrum/PWM bytes.
+ * This could affect early GF2Go parts (not seen any appropriate ROMs
+ * though). Here we assume that a version of 0x05 matches this case
+ * (combining with a BMP version check would be better), as the
+ * common case for the panel type field is 0x0005, and that is in
+ * fact what we are reading the first byte of.
+ */
+ case 0x05: /* some NV10, 11, 15, 16 */
+ recordlen = 42;
+ ofs = -1;
+ break;
+ case 0x10: /* some NV15/16, and NV11+ */
+ recordlen = 44;
+ ofs = 0;
+ break;
+ case 0x20: /* NV40+ */
+ headerlen = fptable[1];
+ recordlen = fptable[2];
+ fpentries = fptable[3];
+ /*
+ * fptable[4] is the minimum
+ * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
+ */
+ bios->pub.digital_min_front_porch = fptable[4];
+ ofs = -7;
+ break;
+ default:
+ NV_ERROR(dev,
+ "FP table revision %d.%d not currently supported\n",
+ fptable_ver >> 4, fptable_ver & 0xf);
+ return -ENOSYS;
+ }
+
+ if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
+ return 0;
+
+ ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
+ if (ret)
+ return ret;
+
+ if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
+ bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
+ lth.headerlen + 1;
+ bios->fp.xlatwidth = lth.recordlen;
+ }
+ if (bios->fp.fpxlatetableptr == 0x0) {
+ NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n");
+ return -EINVAL;
+ }
+
+ fpstrapping = get_fp_strap(dev, bios);
+
+ fpindex = bios->data[bios->fp.fpxlatetableptr +
+ fpstrapping * bios->fp.xlatwidth];
+
+ if (fpindex > fpentries) {
+ NV_ERROR(dev, "Bad flat panel table index\n");
+ return -ENOENT;
+ }
+
+ /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
+ if (lth.lvds_ver > 0x10)
+ bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
+
+ /*
+ * If either the strap or xlated fpindex value are 0xf there is no
+ * panel using a strap-derived bios mode present. this condition
+ * includes, but is different from, the DDC panel indicator above
+ */
+ if (fpstrapping == 0xf || fpindex == 0xf)
+ return 0;
+
+ bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
+ recordlen * fpindex + ofs;
+
+ NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
+ ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
+ ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
+ ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
+
+ return 0;
+}
+
+bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
+
+ if (!mode) /* just checking whether we can produce a mode */
+ return bios->fp.mode_ptr;
+
+ memset(mode, 0, sizeof(struct drm_display_mode));
+ /*
+ * For version 1.0 (version in byte 0):
+ * bytes 1-2 are "panel type", including bits on whether Colour/mono,
+ * single/dual link, and type (TFT etc.)
+ * bytes 3-6 are bits per colour in RGBX
+ */
+ mode->clock = ROM16(mode_entry[7]) * 10;
+ /* bytes 9-10 is HActive */
+ mode->hdisplay = ROM16(mode_entry[11]) + 1;
+ /*
+ * bytes 13-14 is HValid Start
+ * bytes 15-16 is HValid End
+ */
+ mode->hsync_start = ROM16(mode_entry[17]) + 1;
+ mode->hsync_end = ROM16(mode_entry[19]) + 1;
+ mode->htotal = ROM16(mode_entry[21]) + 1;
+ /* bytes 23-24, 27-30 similarly, but vertical */
+ mode->vdisplay = ROM16(mode_entry[25]) + 1;
+ mode->vsync_start = ROM16(mode_entry[31]) + 1;
+ mode->vsync_end = ROM16(mode_entry[33]) + 1;
+ mode->vtotal = ROM16(mode_entry[35]) + 1;
+ mode->flags |= (mode_entry[37] & 0x10) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (mode_entry[37] & 0x1) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ /*
+ * bytes 38-39 relate to spread spectrum settings
+ * bytes 40-43 are something to do with PWM
+ */
+
+ mode->status = MODE_OK;
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ return bios->fp.mode_ptr;
+}
+
+int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
+{
+ /*
+ * The LVDS table header is (mostly) described in
+ * parse_lvds_manufacturer_table_header(): the BIT header additionally
+ * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
+ * straps are not being used for the panel, this specifies the frequency
+ * at which modes should be set up in the dual link style.
+ *
+ * Following the header, the BMP (ver 0xa) table has several records,
+ * indexed by a seperate xlat table, indexed in turn by the fp strap in
+ * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
+ * numbers for use by INIT_SUB which controlled panel init and power,
+ * and finally a dword of ms to sleep between power off and on
+ * operations.
+ *
+ * In the BIT versions, the table following the header serves as an
+ * integrated config and xlat table: the records in the table are
+ * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
+ * two bytes - the first as a config byte, the second for indexing the
+ * fp mode table pointed to by the BIT 'D' table
+ *
+ * DDC is not used until after card init, so selecting the correct table
+ * entry and setting the dual link flag for EDID equipped panels,
+ * requiring tests against the native-mode pixel clock, cannot be done
+ * until later, when this function should be called with non-zero pxclk
+ */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
+ struct lvdstableheader lth;
+ uint16_t lvdsofs;
+ int ret, chip_version = bios->pub.chip_version;
+
+ ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
+ if (ret)
+ return ret;
+
+ switch (lth.lvds_ver) {
+ case 0x0a: /* pre NV40 */
+ lvdsmanufacturerindex = bios->data[
+ bios->fp.fpxlatemanufacturertableptr +
+ fpstrapping];
+
+ /* we're done if this isn't the EDID panel case */
+ if (!pxclk)
+ break;
+
+ if (chip_version < 0x25) {
+ /* nv17 behaviour
+ *
+ * It seems the old style lvds script pointer is reused
+ * to select 18/24 bit colour depth for EDID panels.
+ */
+ lvdsmanufacturerindex =
+ (bios->legacy.lvds_single_a_script_ptr & 1) ?
+ 2 : 0;
+ if (pxclk >= bios->fp.duallink_transition_clk)
+ lvdsmanufacturerindex++;
+ } else if (chip_version < 0x30) {
+ /* nv28 behaviour (off-chip encoder)
+ *
+ * nv28 does a complex dance of first using byte 121 of
+ * the EDID to choose the lvdsmanufacturerindex, then
+ * later attempting to match the EDID manufacturer and
+ * product IDs in a table (signature 'pidt' (panel id
+ * table?)), setting an lvdsmanufacturerindex of 0 and
+ * an fp strap of the match index (or 0xf if none)
+ */
+ lvdsmanufacturerindex = 0;
+ } else {
+ /* nv31, nv34 behaviour */
+ lvdsmanufacturerindex = 0;
+ if (pxclk >= bios->fp.duallink_transition_clk)
+ lvdsmanufacturerindex = 2;
+ if (pxclk >= 140000)
+ lvdsmanufacturerindex = 3;
+ }
+
+ /*
+ * nvidia set the high nibble of (cr57=f, cr58) to
+ * lvdsmanufacturerindex in this case; we don't
+ */
+ break;
+ case 0x30: /* NV4x */
+ case 0x40: /* G80/G90 */
+ lvdsmanufacturerindex = fpstrapping;
+ break;
+ default:
+ NV_ERROR(dev, "LVDS table revision not currently supported\n");
+ return -ENOSYS;
+ }
+
+ lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
+ switch (lth.lvds_ver) {
+ case 0x0a:
+ bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
+ bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
+ bios->fp.dual_link = bios->data[lvdsofs] & 4;
+ bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
+ *if_is_24bit = bios->data[lvdsofs] & 16;
+ break;
+ case 0x30:
+ /*
+ * My money would be on there being a 24 bit interface bit in
+ * this table, but I have no example of a laptop bios with a
+ * 24 bit panel to confirm that. Hence we shout loudly if any
+ * bit other than bit 0 is set (I've not even seen bit 1)
+ */
+ if (bios->data[lvdsofs] > 1)
+ NV_ERROR(dev,
+ "You have a very unusual laptop display; please report it\n");
+ /*
+ * No sign of the "power off for reset" or "reset for panel
+ * on" bits, but it's safer to assume we should
+ */
+ bios->fp.power_off_for_reset = true;
+ bios->fp.reset_after_pclk_change = true;
+ /*
+ * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
+ * over-written, and BITbit1 isn't used
+ */
+ bios->fp.dual_link = bios->data[lvdsofs] & 1;
+ bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
+ bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+ break;
+ case 0x40:
+ bios->fp.dual_link = bios->data[lvdsofs] & 1;
+ bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
+ bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
+ bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
+ break;
+ }
+
+ /* set dual_link flag for EDID case */
+ if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
+ bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
+
+ *dl = bios->fp.dual_link;
+
+ return 0;
+}
+
+static uint8_t *
+bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
+ uint16_t record, int record_len, int record_nr)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint32_t entry;
+ uint16_t table;
+ int i, v;
+
+ for (i = 0; i < record_nr; i++, record += record_len) {
+ table = ROM16(bios->data[record]);
+ if (!table)
+ continue;
+ entry = ROM32(bios->data[table]);
+
+ v = (entry & 0x000f0000) >> 16;
+ if (!(v & dcbent->or))
+ continue;
+
+ v = (entry & 0x000000f0) >> 4;
+ if (v != dcbent->location)
+ continue;
+
+ v = (entry & 0x0000000f);
+ if (v != dcbent->type)
+ continue;
+
+ return &bios->data[table];
+ }
+
+ return NULL;
+}
+
+void *
+nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
+ int *length)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t *table;
+
+ if (!bios->display.dp_table_ptr) {
+ NV_ERROR(dev, "No pointer to DisplayPort table\n");
+ return NULL;
+ }
+ table = &bios->data[bios->display.dp_table_ptr];
+
+ if (table[0] != 0x21) {
+ NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
+ table[0]);
+ return NULL;
+ }
+
+ *length = table[4];
+ return bios_output_config_match(dev, dcbent,
+ bios->display.dp_table_ptr + table[1],
+ table[2], table[3]);
+}
+
+int
+nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
+ uint32_t sub, int pxclk)
+{
+ /*
+ * The display script table is located by the BIT 'U' table.
+ *
+ * It contains an array of pointers to various tables describing
+ * a particular output type. The first 32-bits of the output
+ * tables contains similar information to a DCB entry, and is
+ * used to decide whether that particular table is suitable for
+ * the output you want to access.
+ *
+ * The "record header length" field here seems to indicate the
+ * offset of the first configuration entry in the output tables.
+ * This is 10 on most cards I've seen, but 12 has been witnessed
+ * on DP cards, and there's another script pointer within the
+ * header.
+ *
+ * offset + 0 ( 8 bits): version
+ * offset + 1 ( 8 bits): header length
+ * offset + 2 ( 8 bits): record length
+ * offset + 3 ( 8 bits): number of records
+ * offset + 4 ( 8 bits): record header length
+ * offset + 5 (16 bits): pointer to first output script table
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct init_exec iexec = {true, false};
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint8_t *table = &bios->data[bios->display.script_table_ptr];
+ uint8_t *otable = NULL;
+ uint16_t script;
+ int i = 0;
+
+ if (!bios->display.script_table_ptr) {
+ NV_ERROR(dev, "No pointer to output script table\n");
+ return 1;
+ }
+
+ /*
+ * Nothing useful has been in any of the pre-2.0 tables I've seen,
+ * so until they are, we really don't need to care.
+ */
+ if (table[0] < 0x20)
+ return 1;
+
+ if (table[0] != 0x20 && table[0] != 0x21) {
+ NV_ERROR(dev, "Output script table version 0x%02x unknown\n",
+ table[0]);
+ return 1;
+ }
+
+ /*
+ * The output script tables describing a particular output type
+ * look as follows:
+ *
+ * offset + 0 (32 bits): output this table matches (hash of DCB)
+ * offset + 4 ( 8 bits): unknown
+ * offset + 5 ( 8 bits): number of configurations
+ * offset + 6 (16 bits): pointer to some script
+ * offset + 8 (16 bits): pointer to some script
+ *
+ * headerlen == 10
+ * offset + 10 : configuration 0
+ *
+ * headerlen == 12
+ * offset + 10 : pointer to some script
+ * offset + 12 : configuration 0
+ *
+ * Each config entry is as follows:
+ *
+ * offset + 0 (16 bits): unknown, assumed to be a match value
+ * offset + 2 (16 bits): pointer to script table (clock set?)
+ * offset + 4 (16 bits): pointer to script table (reset?)
+ *
+ * There doesn't appear to be a count value to say how many
+ * entries exist in each script table, instead, a 0 value in
+ * the first 16-bit word seems to indicate both the end of the
+ * list and the default entry. The second 16-bit word in the
+ * script tables is a pointer to the script to execute.
+ */
+
+ NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
+ dcbent->type, dcbent->location, dcbent->or);
+ otable = bios_output_config_match(dev, dcbent, table[1] +
+ bios->display.script_table_ptr,
+ table[2], table[3]);
+ if (!otable) {
+ NV_ERROR(dev, "Couldn't find matching output script table\n");
+ return 1;
+ }
+
+ if (pxclk < -2 || pxclk > 0) {
+ /* Try to find matching script table entry */
+ for (i = 0; i < otable[5]; i++) {
+ if (ROM16(otable[table[4] + i*6]) == sub)
+ break;
+ }
+
+ if (i == otable[5]) {
+ NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
+ "using first\n",
+ sub, dcbent->type, dcbent->or);
+ i = 0;
+ }
+ }
+
+ bios->display.output = dcbent;
+
+ if (pxclk == 0) {
+ script = ROM16(otable[6]);
+ if (!script) {
+ NV_DEBUG_KMS(dev, "output script 0 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+ parse_init_table(bios, script, &iexec);
+ } else
+ if (pxclk == -1) {
+ script = ROM16(otable[8]);
+ if (!script) {
+ NV_DEBUG_KMS(dev, "output script 1 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+ parse_init_table(bios, script, &iexec);
+ } else
+ if (pxclk == -2) {
+ if (table[4] >= 12)
+ script = ROM16(otable[10]);
+ else
+ script = 0;
+ if (!script) {
+ NV_DEBUG_KMS(dev, "output script 2 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+ parse_init_table(bios, script, &iexec);
+ } else
+ if (pxclk > 0) {
+ script = ROM16(otable[table[4] + i*6 + 2]);
+ if (script)
+ script = clkcmptable(bios, script, pxclk);
+ if (!script) {
+ NV_ERROR(dev, "clock script 0 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+ parse_init_table(bios, script, &iexec);
+ } else
+ if (pxclk < 0) {
+ script = ROM16(otable[table[4] + i*6 + 4]);
+ if (script)
+ script = clkcmptable(bios, script, -pxclk);
+ if (!script) {
+ NV_DEBUG_KMS(dev, "clock script 1 not found\n");
+ return 1;
+ }
+
+ NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+ parse_init_table(bios, script, &iexec);
+ }
+
+ return 0;
+}
+
+
+int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk)
+{
+ /*
+ * the pxclk parameter is in kHz
+ *
+ * This runs the TMDS regs setting code found on BIT bios cards
+ *
+ * For ffs(or) == 1 use the first table, for ffs(or) == 2 and
+ * ffs(or) == 3, use the second.
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int cv = bios->pub.chip_version;
+ uint16_t clktable = 0, scriptptr;
+ uint32_t sel_clk_binding, sel_clk;
+
+ /* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
+ if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
+ dcbent->location != DCB_LOC_ON_CHIP)
+ return 0;
+
+ switch (ffs(dcbent->or)) {
+ case 1:
+ clktable = bios->tmds.output0_script_ptr;
+ break;
+ case 2:
+ case 3:
+ clktable = bios->tmds.output1_script_ptr;
+ break;
+ }
+
+ if (!clktable) {
+ NV_ERROR(dev, "Pixel clock comparison table not found\n");
+ return -EINVAL;
+ }
+
+ scriptptr = clkcmptable(bios, clktable, pxclk);
+
+ if (!scriptptr) {
+ NV_ERROR(dev, "TMDS output init script not found\n");
+ return -ENOENT;
+ }
+
+ /* don't let script change pll->head binding */
+ sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
+ run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
+ sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
+
+ return 0;
+}
+
+int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
+{
+ /*
+ * PLL limits table
+ *
+ * Version 0x10: NV30, NV31
+ * One byte header (version), one record of 24 bytes
+ * Version 0x11: NV36 - Not implemented
+ * Seems to have same record style as 0x10, but 3 records rather than 1
+ * Version 0x20: Found on Geforce 6 cards
+ * Trivial 4 byte BIT header. 31 (0x1f) byte record length
+ * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
+ * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
+ * length in general, some (integrated) have an extra configuration byte
+ * Version 0x30: Found on Geforce 8, separates the register mapping
+ * from the limits tables.
+ */
+
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int cv = bios->pub.chip_version, pllindex = 0;
+ uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
+ uint32_t crystal_strap_mask, crystal_straps;
+
+ if (!bios->pll_limit_tbl_ptr) {
+ if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
+ cv >= 0x40) {
+ NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
+ return -EINVAL;
+ }
+ } else
+ pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
+
+ crystal_strap_mask = 1 << 6;
+ /* open coded dev->twoHeads test */
+ if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
+ crystal_strap_mask |= 1 << 22;
+ crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
+ crystal_strap_mask;
+
+ switch (pll_lim_ver) {
+ /*
+ * We use version 0 to indicate a pre limit table bios (single stage
+ * pll) and load the hard coded limits instead.
+ */
+ case 0:
+ break;
+ case 0x10:
+ case 0x11:
+ /*
+ * Strictly v0x11 has 3 entries, but the last two don't seem
+ * to get used.
+ */
+ headerlen = 1;
+ recordlen = 0x18;
+ entries = 1;
+ pllindex = 0;
+ break;
+ case 0x20:
+ case 0x21:
+ case 0x30:
+ case 0x40:
+ headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
+ recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
+ entries = bios->data[bios->pll_limit_tbl_ptr + 3];
+ break;
+ default:
+ NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
+ "supported\n", pll_lim_ver);
+ return -ENOSYS;
+ }
+
+ /* initialize all members to zero */
+ memset(pll_lim, 0, sizeof(struct pll_lims));
+
+ if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
+ uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
+
+ pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
+ pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
+ pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
+ pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
+ pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
+ pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
+ pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
+
+ /* these values taken from nv30/31/36 */
+ pll_lim->vco1.min_n = 0x1;
+ if (cv == 0x36)
+ pll_lim->vco1.min_n = 0x5;
+ pll_lim->vco1.max_n = 0xff;
+ pll_lim->vco1.min_m = 0x1;
+ pll_lim->vco1.max_m = 0xd;
+ pll_lim->vco2.min_n = 0x4;
+ /*
+ * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
+ * table version (apart from nv35)), N2 is compared to
+ * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
+ * save a comparison
+ */
+ pll_lim->vco2.max_n = 0x28;
+ if (cv == 0x30 || cv == 0x35)
+ /* only 5 bits available for N2 on nv30/35 */
+ pll_lim->vco2.max_n = 0x1f;
+ pll_lim->vco2.min_m = 0x1;
+ pll_lim->vco2.max_m = 0x4;
+ pll_lim->max_log2p = 0x7;
+ pll_lim->max_usable_log2p = 0x6;
+ } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
+ uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
+ uint32_t reg = 0; /* default match */
+ uint8_t *pll_rec;
+ int i;
+
+ /*
+ * First entry is default match, if nothing better. warn if
+ * reg field nonzero
+ */
+ if (ROM32(bios->data[plloffs]))
+ NV_WARN(dev, "Default PLL limit entry has non-zero "
+ "register field\n");
+
+ if (limit_match > MAX_PLL_TYPES)
+ /* we've been passed a reg as the match */
+ reg = limit_match;
+ else /* limit match is a pll type */
+ for (i = 1; i < entries && !reg; i++) {
+ uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
+
+ if (limit_match == NVPLL &&
+ (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
+ reg = cmpreg;
+ if (limit_match == MPLL &&
+ (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
+ reg = cmpreg;
+ if (limit_match == VPLL1 &&
+ (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
+ reg = cmpreg;
+ if (limit_match == VPLL2 &&
+ (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
+ reg = cmpreg;
+ }
+
+ for (i = 1; i < entries; i++)
+ if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
+ pllindex = i;
+ break;
+ }
+
+ pll_rec = &bios->data[plloffs + recordlen * pllindex];
+
+ BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
+ pllindex ? reg : 0);
+
+ /*
+ * Frequencies are stored in tables in MHz, kHz are more
+ * useful, so we convert.
+ */
+
+ /* What output frequencies can each VCO generate? */
+ pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
+ pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
+ pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
+ pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
+
+ /* What input frequencies they accept (past the m-divider)? */
+ pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
+ pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
+ pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
+ pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
+
+ /* What values are accepted as multiplier and divider? */
+ pll_lim->vco1.min_n = pll_rec[20];
+ pll_lim->vco1.max_n = pll_rec[21];
+ pll_lim->vco1.min_m = pll_rec[22];
+ pll_lim->vco1.max_m = pll_rec[23];
+ pll_lim->vco2.min_n = pll_rec[24];
+ pll_lim->vco2.max_n = pll_rec[25];
+ pll_lim->vco2.min_m = pll_rec[26];
+ pll_lim->vco2.max_m = pll_rec[27];
+
+ pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
+ if (pll_lim->max_log2p > 0x7)
+ /* pll decoding in nv_hw.c assumes never > 7 */
+ NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
+ pll_lim->max_log2p);
+ if (cv < 0x60)
+ pll_lim->max_usable_log2p = 0x6;
+ pll_lim->log2p_bias = pll_rec[30];
+
+ if (recordlen > 0x22)
+ pll_lim->refclk = ROM32(pll_rec[31]);
+
+ if (recordlen > 0x23 && pll_rec[35])
+ NV_WARN(dev,
+ "Bits set in PLL configuration byte (%x)\n",
+ pll_rec[35]);
+
+ /* C51 special not seen elsewhere */
+ if (cv == 0x51 && !pll_lim->refclk) {
+ uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
+
+ if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
+ ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
+ if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
+ pll_lim->refclk = 200000;
+ else
+ pll_lim->refclk = 25000;
+ }
+ }
+ } else if (pll_lim_ver == 0x30) { /* ver 0x30 */
+ uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
+ uint8_t *record = NULL;
+ int i;
+
+ BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
+ limit_match);
+
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ if (ROM32(entry[3]) == limit_match) {
+ record = &bios->data[ROM16(entry[1])];
+ break;
+ }
+ }
+
+ if (!record) {
+ NV_ERROR(dev, "Register 0x%08x not found in PLL "
+ "limits table", limit_match);
+ return -ENOENT;
+ }
+
+ pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
+ pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
+ pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
+ pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
+ pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
+ pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
+ pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
+ pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
+ pll_lim->vco1.min_n = record[16];
+ pll_lim->vco1.max_n = record[17];
+ pll_lim->vco1.min_m = record[18];
+ pll_lim->vco1.max_m = record[19];
+ pll_lim->vco2.min_n = record[20];
+ pll_lim->vco2.max_n = record[21];
+ pll_lim->vco2.min_m = record[22];
+ pll_lim->vco2.max_m = record[23];
+ pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
+ pll_lim->log2p_bias = record[27];
+ pll_lim->refclk = ROM32(record[28]);
+ } else if (pll_lim_ver) { /* ver 0x40 */
+ uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
+ uint8_t *record = NULL;
+ int i;
+
+ BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
+ limit_match);
+
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ if (ROM32(entry[3]) == limit_match) {
+ record = &bios->data[ROM16(entry[1])];
+ break;
+ }
+ }
+
+ if (!record) {
+ NV_ERROR(dev, "Register 0x%08x not found in PLL "
+ "limits table", limit_match);
+ return -ENOENT;
+ }
+
+ pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
+ pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
+ pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
+ pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
+ pll_lim->vco1.min_m = record[8];
+ pll_lim->vco1.max_m = record[9];
+ pll_lim->vco1.min_n = record[10];
+ pll_lim->vco1.max_n = record[11];
+ pll_lim->min_p = record[12];
+ pll_lim->max_p = record[13];
+ /* where did this go to?? */
+ if (limit_match == 0x00614100 || limit_match == 0x00614900)
+ pll_lim->refclk = 27000;
+ else
+ pll_lim->refclk = 100000;
+ }
+
+ /*
+ * By now any valid limit table ought to have set a max frequency for
+ * vco1, so if it's zero it's either a pre limit table bios, or one
+ * with an empty limit table (seen on nv18)
+ */
+ if (!pll_lim->vco1.maxfreq) {
+ pll_lim->vco1.minfreq = bios->fminvco;
+ pll_lim->vco1.maxfreq = bios->fmaxvco;
+ pll_lim->vco1.min_inputfreq = 0;
+ pll_lim->vco1.max_inputfreq = INT_MAX;
+ pll_lim->vco1.min_n = 0x1;
+ pll_lim->vco1.max_n = 0xff;
+ pll_lim->vco1.min_m = 0x1;
+ if (crystal_straps == 0) {
+ /* nv05 does this, nv11 doesn't, nv10 unknown */
+ if (cv < 0x11)
+ pll_lim->vco1.min_m = 0x7;
+ pll_lim->vco1.max_m = 0xd;
+ } else {
+ if (cv < 0x11)
+ pll_lim->vco1.min_m = 0x8;
+ pll_lim->vco1.max_m = 0xe;
+ }
+ if (cv < 0x17 || cv == 0x1a || cv == 0x20)
+ pll_lim->max_log2p = 4;
+ else
+ pll_lim->max_log2p = 5;
+ pll_lim->max_usable_log2p = pll_lim->max_log2p;
+ }
+
+ if (!pll_lim->refclk)
+ switch (crystal_straps) {
+ case 0:
+ pll_lim->refclk = 13500;
+ break;
+ case (1 << 6):
+ pll_lim->refclk = 14318;
+ break;
+ case (1 << 22):
+ pll_lim->refclk = 27000;
+ break;
+ case (1 << 22 | 1 << 6):
+ pll_lim->refclk = 25000;
+ break;
+ }
+
+#if 0 /* for easy debugging */
+ ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
+ ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
+ ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
+ ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
+
+ ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
+ ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
+ ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
+ ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
+
+ ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
+ ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
+ ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
+ ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
+ ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
+ ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
+ ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
+ ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
+
+ ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
+ ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
+
+ ErrorF("pll.refclk: %d\n", pll_lim->refclk);
+#endif
+
+ return 0;
+}
+
+static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
+{
+ /*
+ * offset + 0 (8 bits): Micro version
+ * offset + 1 (8 bits): Minor version
+ * offset + 2 (8 bits): Chip version
+ * offset + 3 (8 bits): Major version
+ */
+
+ bios->major_version = bios->data[offset + 3];
+ bios->pub.chip_version = bios->data[offset + 2];
+ NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
+ bios->data[offset + 3], bios->data[offset + 2],
+ bios->data[offset + 1], bios->data[offset]);
+}
+
+static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
+{
+ /*
+ * Parses the init table segment for pointers used in script execution.
+ *
+ * offset + 0 (16 bits): init script tables pointer
+ * offset + 2 (16 bits): macro index table pointer
+ * offset + 4 (16 bits): macro table pointer
+ * offset + 6 (16 bits): condition table pointer
+ * offset + 8 (16 bits): io condition table pointer
+ * offset + 10 (16 bits): io flag condition table pointer
+ * offset + 12 (16 bits): init function table pointer
+ */
+
+ bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
+ bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
+ bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
+ bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
+ bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
+ bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
+ bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
+}
+
+static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the load detect values for g80 cards.
+ *
+ * offset + 0 (16 bits): loadval table pointer
+ */
+
+ uint16_t load_table_ptr;
+ uint8_t version, headerlen, entrylen, num_entries;
+
+ if (bitentry->length != 3) {
+ NV_ERROR(dev, "Do not understand BIT A table\n");
+ return -EINVAL;
+ }
+
+ load_table_ptr = ROM16(bios->data[bitentry->offset]);
+
+ if (load_table_ptr == 0x0) {
+ NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
+ return -EINVAL;
+ }
+
+ version = bios->data[load_table_ptr];
+
+ if (version != 0x10) {
+ NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n",
+ version >> 4, version & 0xF);
+ return -ENOSYS;
+ }
+
+ headerlen = bios->data[load_table_ptr + 1];
+ entrylen = bios->data[load_table_ptr + 2];
+ num_entries = bios->data[load_table_ptr + 3];
+
+ if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
+ NV_ERROR(dev, "Do not understand BIT loadval table\n");
+ return -EINVAL;
+ }
+
+ /* First entry is normal dac, 2nd tv-out perhaps? */
+ bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
+
+ return 0;
+}
+
+static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * offset + 8 (16 bits): PLL limits table pointer
+ *
+ * There's more in here, but that's unknown.
+ */
+
+ if (bitentry->length < 10) {
+ NV_ERROR(dev, "Do not understand BIT C table\n");
+ return -EINVAL;
+ }
+
+ bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
+
+ return 0;
+}
+
+static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the flat panel table segment that the bit entry points to.
+ * Starting at bitentry->offset:
+ *
+ * offset + 0 (16 bits): ??? table pointer - seems to have 18 byte
+ * records beginning with a freq.
+ * offset + 2 (16 bits): mode table pointer
+ */
+
+ if (bitentry->length != 4) {
+ NV_ERROR(dev, "Do not understand BIT display table\n");
+ return -EINVAL;
+ }
+
+ bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
+
+ return 0;
+}
+
+static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the init table segment that the bit entry points to.
+ *
+ * See parse_script_table_pointers for layout
+ */
+
+ if (bitentry->length < 14) {
+ NV_ERROR(dev, "Do not understand init table\n");
+ return -EINVAL;
+ }
+
+ parse_script_table_pointers(bios, bitentry->offset);
+
+ if (bitentry->length >= 16)
+ bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
+ if (bitentry->length >= 18)
+ bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
+
+ return 0;
+}
+
+static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * BIT 'i' (info?) table
+ *
+ * offset + 0 (32 bits): BIOS version dword (as in B table)
+ * offset + 5 (8 bits): BIOS feature byte (same as for BMP?)
+ * offset + 13 (16 bits): pointer to table containing DAC load
+ * detection comparison values
+ *
+ * There's other things in the table, purpose unknown
+ */
+
+ uint16_t daccmpoffset;
+ uint8_t dacver, dacheaderlen;
+
+ if (bitentry->length < 6) {
+ NV_ERROR(dev, "BIT i table too short for needed information\n");
+ return -EINVAL;
+ }
+
+ parse_bios_version(dev, bios, bitentry->offset);
+
+ /*
+ * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
+ * Quadro identity crisis), other bits possibly as for BMP feature byte
+ */
+ bios->feature_byte = bios->data[bitentry->offset + 5];
+ bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
+
+ if (bitentry->length < 15) {
+ NV_WARN(dev, "BIT i table not long enough for DAC load "
+ "detection comparison table\n");
+ return -EINVAL;
+ }
+
+ daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
+
+ /* doesn't exist on g80 */
+ if (!daccmpoffset)
+ return 0;
+
+ /*
+ * The first value in the table, following the header, is the
+ * comparison value, the second entry is a comparison value for
+ * TV load detection.
+ */
+
+ dacver = bios->data[daccmpoffset];
+ dacheaderlen = bios->data[daccmpoffset + 1];
+
+ if (dacver != 0x00 && dacver != 0x10) {
+ NV_WARN(dev, "DAC load detection comparison table version "
+ "%d.%d not known\n", dacver >> 4, dacver & 0xf);
+ return -ENOSYS;
+ }
+
+ bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
+ bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
+
+ return 0;
+}
+
+static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the LVDS table segment that the bit entry points to.
+ * Starting at bitentry->offset:
+ *
+ * offset + 0 (16 bits): LVDS strap xlate table pointer
+ */
+
+ if (bitentry->length != 2) {
+ NV_ERROR(dev, "Do not understand BIT LVDS table\n");
+ return -EINVAL;
+ }
+
+ /*
+ * No idea if it's still called the LVDS manufacturer table, but
+ * the concept's close enough.
+ */
+ bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
+
+ return 0;
+}
+
+static int
+parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_entry *bitentry)
+{
+ /*
+ * offset + 2 (8 bits): number of options in an
+ * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
+ * offset + 3 (16 bits): pointer to strap xlate table for RAM
+ * restrict option selection
+ *
+ * There's a bunch of bits in this table other than the RAM restrict
+ * stuff that we don't use - their use currently unknown
+ */
+
+ /*
+ * Older bios versions don't have a sufficiently long table for
+ * what we want
+ */
+ if (bitentry->length < 0x5)
+ return 0;
+
+ if (bitentry->id[1] < 2) {
+ bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
+ bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
+ } else {
+ bios->ram_restrict_group_count = bios->data[bitentry->offset + 0];
+ bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]);
+ }
+
+ return 0;
+}
+
+static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
+{
+ /*
+ * Parses the pointer to the TMDS table
+ *
+ * Starting at bitentry->offset:
+ *
+ * offset + 0 (16 bits): TMDS table pointer
+ *
+ * The TMDS table is typically found just before the DCB table, with a
+ * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
+ * length?)
+ *
+ * At offset +7 is a pointer to a script, which I don't know how to
+ * run yet.
+ * At offset +9 is a pointer to another script, likewise
+ * Offset +11 has a pointer to a table where the first word is a pxclk
+ * frequency and the second word a pointer to a script, which should be
+ * run if the comparison pxclk frequency is less than the pxclk desired.
+ * This repeats for decreasing comparison frequencies
+ * Offset +13 has a pointer to a similar table
+ * The selection of table (and possibly +7/+9 script) is dictated by
+ * "or" from the DCB.
+ */
+
+ uint16_t tmdstableptr, script1, script2;
+
+ if (bitentry->length != 2) {
+ NV_ERROR(dev, "Do not understand BIT TMDS table\n");
+ return -EINVAL;
+ }
+
+ tmdstableptr = ROM16(bios->data[bitentry->offset]);
+
+ if (tmdstableptr == 0x0) {
+ NV_ERROR(dev, "Pointer to TMDS table invalid\n");
+ return -EINVAL;
+ }
+
+ /* nv50+ has v2.0, but we don't parse it atm */
+ if (bios->data[tmdstableptr] != 0x11) {
+ NV_WARN(dev,
+ "TMDS table revision %d.%d not currently supported\n",
+ bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+ return -ENOSYS;
+ }
+
+ /*
+ * These two scripts are odd: they don't seem to get run even when
+ * they are not stubbed.
+ */
+ script1 = ROM16(bios->data[tmdstableptr + 7]);
+ script2 = ROM16(bios->data[tmdstableptr + 9]);
+ if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
+ NV_WARN(dev, "TMDS table script pointers not stubbed\n");
+
+ bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
+ bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
+
+ return 0;
+}
+
+static int
+parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_entry *bitentry)
+{
+ /*
+ * Parses the pointer to the G80 output script tables
+ *
+ * Starting at bitentry->offset:
+ *
+ * offset + 0 (16 bits): output script table pointer
+ */
+
+ uint16_t outputscripttableptr;
+
+ if (bitentry->length != 3) {
+ NV_ERROR(dev, "Do not understand BIT U table\n");
+ return -EINVAL;
+ }
+
+ outputscripttableptr = ROM16(bios->data[bitentry->offset]);
+ bios->display.script_table_ptr = outputscripttableptr;
+ return 0;
+}
+
+static int
+parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
+ struct bit_entry *bitentry)
+{
+ bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
+ return 0;
+}
+
+struct bit_table {
+ const char id;
+ int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
+};
+
+#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
+
+static int
+parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
+ struct bit_table *table)
+{
+ struct drm_device *dev = bios->dev;
+ uint8_t maxentries = bios->data[bitoffset + 4];
+ int i, offset;
+ struct bit_entry bitentry;
+
+ for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
+ bitentry.id[0] = bios->data[offset];
+
+ if (bitentry.id[0] != table->id)
+ continue;
+
+ bitentry.id[1] = bios->data[offset + 1];
+ bitentry.length = ROM16(bios->data[offset + 2]);
+ bitentry.offset = ROM16(bios->data[offset + 4]);
+
+ return table->parse_fn(dev, bios, &bitentry);
+ }
+
+ NV_INFO(dev, "BIT table '%c' not found\n", table->id);
+ return -ENOSYS;
+}
+
+static int
+parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
+{
+ int ret;
+
+ /*
+ * The only restriction on parsing order currently is having 'i' first
+ * for use of bios->*_version or bios->feature_byte while parsing;
+ * functions shouldn't be actually *doing* anything apart from pulling
+ * data from the image into the bios struct, thus no interdependencies
+ */
+ ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
+ if (ret) /* info? */
+ return ret;
+ if (bios->major_version >= 0x60) /* g80+ */
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
+ ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
+ if (ret)
+ return ret;
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
+ ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
+ if (ret)
+ return ret;
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
+ parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
+
+ return 0;
+}
+
+static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
+{
+ /*
+ * Parses the BMP structure for useful things, but does not act on them
+ *
+ * offset + 5: BMP major version
+ * offset + 6: BMP minor version
+ * offset + 9: BMP feature byte
+ * offset + 10: BCD encoded BIOS version
+ *
+ * offset + 18: init script table pointer (for bios versions < 5.10h)
+ * offset + 20: extra init script table pointer (for bios
+ * versions < 5.10h)
+ *
+ * offset + 24: memory init table pointer (used on early bios versions)
+ * offset + 26: SDR memory sequencing setup data table
+ * offset + 28: DDR memory sequencing setup data table
+ *
+ * offset + 54: index of I2C CRTC pair to use for CRT output
+ * offset + 55: index of I2C CRTC pair to use for TV output
+ * offset + 56: index of I2C CRTC pair to use for flat panel output
+ * offset + 58: write CRTC index for I2C pair 0
+ * offset + 59: read CRTC index for I2C pair 0
+ * offset + 60: write CRTC index for I2C pair 1
+ * offset + 61: read CRTC index for I2C pair 1
+ *
+ * offset + 67: maximum internal PLL frequency (single stage PLL)
+ * offset + 71: minimum internal PLL frequency (single stage PLL)
+ *
+ * offset + 75: script table pointers, as described in
+ * parse_script_table_pointers
+ *
+ * offset + 89: TMDS single link output A table pointer
+ * offset + 91: TMDS single link output B table pointer
+ * offset + 95: LVDS single link output A table pointer
+ * offset + 105: flat panel timings table pointer
+ * offset + 107: flat panel strapping translation table pointer
+ * offset + 117: LVDS manufacturer panel config table pointer
+ * offset + 119: LVDS manufacturer strapping translation table pointer
+ *
+ * offset + 142: PLL limits table pointer
+ *
+ * offset + 156: minimum pixel clock for LVDS dual link
+ */
+
+ uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
+ uint16_t bmplength;
+ uint16_t legacy_scripts_offset, legacy_i2c_offset;
+
+ /* load needed defaults in case we can't parse this info */
+ bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
+ bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
+ bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
+ bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
+ bios->pub.digital_min_front_porch = 0x4b;
+ bios->fmaxvco = 256000;
+ bios->fminvco = 128000;
+ bios->fp.duallink_transition_clk = 90000;
+
+ bmp_version_major = bmp[5];
+ bmp_version_minor = bmp[6];
+
+ NV_TRACE(dev, "BMP version %d.%d\n",
+ bmp_version_major, bmp_version_minor);
+
+ /*
+ * Make sure that 0x36 is blank and can't be mistaken for a DCB
+ * pointer on early versions
+ */
+ if (bmp_version_major < 5)
+ *(uint16_t *)&bios->data[0x36] = 0;
+
+ /*
+ * Seems that the minor version was 1 for all major versions prior
+ * to 5. Version 6 could theoretically exist, but I suspect BIT
+ * happened instead.
+ */
+ if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
+ NV_ERROR(dev, "You have an unsupported BMP version. "
+ "Please send in your bios\n");
+ return -ENOSYS;
+ }
+
+ if (bmp_version_major == 0)
+ /* nothing that's currently useful in this version */
+ return 0;
+ else if (bmp_version_major == 1)
+ bmplength = 44; /* exact for 1.01 */
+ else if (bmp_version_major == 2)
+ bmplength = 48; /* exact for 2.01 */
+ else if (bmp_version_major == 3)
+ bmplength = 54;
+ /* guessed - mem init tables added in this version */
+ else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
+ /* don't know if 5.0 exists... */
+ bmplength = 62;
+ /* guessed - BMP I2C indices added in version 4*/
+ else if (bmp_version_minor < 0x6)
+ bmplength = 67; /* exact for 5.01 */
+ else if (bmp_version_minor < 0x10)
+ bmplength = 75; /* exact for 5.06 */
+ else if (bmp_version_minor == 0x10)
+ bmplength = 89; /* exact for 5.10h */
+ else if (bmp_version_minor < 0x14)
+ bmplength = 118; /* exact for 5.11h */
+ else if (bmp_version_minor < 0x24)
+ /*
+ * Not sure of version where pll limits came in;
+ * certainly exist by 0x24 though.
+ */
+ /* length not exact: this is long enough to get lvds members */
+ bmplength = 123;
+ else if (bmp_version_minor < 0x27)
+ /*
+ * Length not exact: this is long enough to get pll limit
+ * member
+ */
+ bmplength = 144;
+ else
+ /*
+ * Length not exact: this is long enough to get dual link
+ * transition clock.
+ */
+ bmplength = 158;
+
+ /* checksum */
+ if (nv_cksum(bmp, 8)) {
+ NV_ERROR(dev, "Bad BMP checksum\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Bit 4 seems to indicate either a mobile bios or a quadro card --
+ * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
+ * (not nv10gl), bit 5 that the flat panel tables are present, and
+ * bit 6 a tv bios.
+ */
+ bios->feature_byte = bmp[9];
+
+ parse_bios_version(dev, bios, offset + 10);
+
+ if (bmp_version_major < 5 || bmp_version_minor < 0x10)
+ bios->old_style_init = true;
+ legacy_scripts_offset = 18;
+ if (bmp_version_major < 2)
+ legacy_scripts_offset -= 4;
+ bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
+ bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
+
+ if (bmp_version_major > 2) { /* appears in BMP 3 */
+ bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
+ bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
+ bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
+ }
+
+ legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */
+ if (bmplength > 61)
+ legacy_i2c_offset = offset + 54;
+ bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
+ bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
+ bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
+ bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+ bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+ bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+ bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+
+ if (bmplength > 74) {
+ bios->fmaxvco = ROM32(bmp[67]);
+ bios->fminvco = ROM32(bmp[71]);
+ }
+ if (bmplength > 88)
+ parse_script_table_pointers(bios, offset + 75);
+ if (bmplength > 94) {
+ bios->tmds.output0_script_ptr = ROM16(bmp[89]);
+ bios->tmds.output1_script_ptr = ROM16(bmp[91]);
+ /*
+ * Never observed in use with lvds scripts, but is reused for
+ * 18/24 bit panel interface default for EDID equipped panels
+ * (if_is_24bit not set directly to avoid any oscillation).
+ */
+ bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
+ }
+ if (bmplength > 108) {
+ bios->fp.fptablepointer = ROM16(bmp[105]);
+ bios->fp.fpxlatetableptr = ROM16(bmp[107]);
+ bios->fp.xlatwidth = 1;
+ }
+ if (bmplength > 120) {
+ bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
+ bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
+ }
+ if (bmplength > 143)
+ bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
+
+ if (bmplength > 157)
+ bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
+
+ return 0;
+}
+
+static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
+{
+ int i, j;
+
+ for (i = 0; i <= (n - len); i++) {
+ for (j = 0; j < len; j++)
+ if (data[i + j] != str[j])
+ break;
+ if (j == len)
+ return i;
+ }
+
+ return 0;
+}
+
+static int
+read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+{
+ uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+ int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+ int recordoffset = 0, rdofs = 1, wrofs = 0;
+ uint8_t port_type = 0;
+
+ if (!i2ctable)
+ return -EINVAL;
+
+ if (dcb_version >= 0x30) {
+ if (i2ctable[0] != dcb_version) /* necessary? */
+ NV_WARN(dev,
+ "DCB I2C table version mismatch (%02X vs %02X)\n",
+ i2ctable[0], dcb_version);
+ dcb_i2c_ver = i2ctable[0];
+ headerlen = i2ctable[1];
+ if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+ i2c_entries = i2ctable[2];
+ else
+ NV_WARN(dev,
+ "DCB I2C table has more entries than indexable "
+ "(%d entries, max index 15)\n", i2ctable[2]);
+ entry_len = i2ctable[3];
+ /* [4] is i2c_default_indices, read in parse_dcb_table() */
+ }
+ /*
+ * It's your own fault if you call this function on a DCB 1.1 BIOS --
+ * the test below is for DCB 1.2
+ */
+ if (dcb_version < 0x14) {
+ recordoffset = 2;
+ rdofs = 0;
+ wrofs = 1;
+ }
+
+ if (index == 0xf)
+ return 0;
+ if (index > i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
+ index, i2ctable[2]);
+ return -ENOENT;
+ }
+ if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+ NV_ERROR(dev, "DCB I2C entry invalid\n");
+ return -EINVAL;
+ }
+
+ if (dcb_i2c_ver >= 0x30) {
+ port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+
+ /*
+ * Fixup for chips using same address offset for read and
+ * write.
+ */
+ if (port_type == 4) /* seen on C51 */
+ rdofs = wrofs = 1;
+ if (port_type >= 5) /* G80+ */
+ rdofs = wrofs = 0;
+ }
+
+ if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
+ NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+
+ i2c->port_type = port_type;
+ i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+ i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+
+ return 0;
+}
+
+static struct dcb_gpio_entry *
+new_gpio_entry(struct nvbios *bios)
+{
+ struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
+
+ return &gpio->entry[gpio->entries++];
+}
+
+struct dcb_gpio_entry *
+nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int i;
+
+ for (i = 0; i < bios->bdcb.gpio.entries; i++) {
+ if (bios->bdcb.gpio.entry[i].tag != tag)
+ continue;
+
+ return &bios->bdcb.gpio.entry[i];
+ }
+
+ return NULL;
+}
+
+static void
+parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
+{
+ struct dcb_gpio_entry *gpio;
+ uint16_t ent = ROM16(bios->data[offset]);
+ uint8_t line = ent & 0x1f,
+ tag = ent >> 5 & 0x3f,
+ flags = ent >> 11 & 0x1f;
+
+ if (tag == 0x3f)
+ return;
+
+ gpio = new_gpio_entry(bios);
+
+ gpio->tag = tag;
+ gpio->line = line;
+ gpio->invert = flags != 4;
+}
+
+static void
+parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
+{
+ struct dcb_gpio_entry *gpio;
+ uint32_t ent = ROM32(bios->data[offset]);
+ uint8_t line = ent & 0x1f,
+ tag = ent >> 8 & 0xff;
+
+ if (tag == 0xff)
+ return;
+
+ gpio = new_gpio_entry(bios);
+
+ /* Currently unused, we may need more fields parsed at some
+ * point. */
+ gpio->tag = tag;
+ gpio->line = line;
+}
+
+static void
+parse_dcb_gpio_table(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
+ uint8_t *gpio_table = &bios->data[gpio_table_ptr];
+ int header_len = gpio_table[1],
+ entries = gpio_table[2],
+ entry_len = gpio_table[3];
+ void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
+ int i;
+
+ if (bios->bdcb.version >= 0x40) {
+ if (gpio_table_ptr && entry_len != 4) {
+ NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
+ return;
+ }
+
+ parse_entry = parse_dcb40_gpio_entry;
+
+ } else if (bios->bdcb.version >= 0x30) {
+ if (gpio_table_ptr && entry_len != 2) {
+ NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
+ return;
+ }
+
+ parse_entry = parse_dcb30_gpio_entry;
+
+ } else if (bios->bdcb.version >= 0x22) {
+ /*
+ * DCBs older than v3.0 don't really have a GPIO
+ * table, instead they keep some GPIO info at fixed
+ * locations.
+ */
+ uint16_t dcbptr = ROM16(bios->data[0x36]);
+ uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
+
+ if (tvdac_gpio[0] & 1) {
+ struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+ gpio->tag = DCB_GPIO_TVDAC0;
+ gpio->line = tvdac_gpio[1] >> 4;
+ gpio->invert = tvdac_gpio[0] & 2;
+ }
+ }
+
+ if (!gpio_table_ptr)
+ return;
+
+ if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
+ NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
+ entries = DCB_MAX_NUM_GPIO_ENTRIES;
+ }
+
+ for (i = 0; i < entries; i++)
+ parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
+}
+
+struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *dev, int index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct dcb_connector_table_entry *cte;
+
+ if (index >= bios->bdcb.connector.entries)
+ return NULL;
+
+ cte = &bios->bdcb.connector.entry[index];
+ if (cte->type == 0xff)
+ return NULL;
+
+ return cte;
+}
+
+static void
+parse_dcb_connector_table(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ struct dcb_connector_table *ct = &bios->bdcb.connector;
+ struct dcb_connector_table_entry *cte;
+ uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
+ uint8_t *entry;
+ int i;
+
+ if (!bios->bdcb.connector_table_ptr) {
+ NV_DEBUG_KMS(dev, "No DCB connector table present\n");
+ return;
+ }
+
+ NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
+ conntab[0], conntab[1], conntab[2], conntab[3]);
+ if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
+ (conntab[3] != 2 && conntab[3] != 4)) {
+ NV_ERROR(dev, " Unknown! Please report.\n");
+ return;
+ }
+
+ ct->entries = conntab[2];
+
+ entry = conntab + conntab[1];
+ cte = &ct->entry[0];
+ for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+ if (conntab[3] == 2)
+ cte->entry = ROM16(entry[0]);
+ else
+ cte->entry = ROM32(entry[0]);
+ cte->type = (cte->entry & 0x000000ff) >> 0;
+ cte->index = (cte->entry & 0x00000f00) >> 8;
+ switch (cte->entry & 0x00033000) {
+ case 0x00001000:
+ cte->gpio_tag = 0x07;
+ break;
+ case 0x00002000:
+ cte->gpio_tag = 0x08;
+ break;
+ case 0x00010000:
+ cte->gpio_tag = 0x51;
+ break;
+ case 0x00020000:
+ cte->gpio_tag = 0x52;
+ break;
+ default:
+ cte->gpio_tag = 0xff;
+ break;
+ }
+
+ if (cte->type == 0xff)
+ continue;
+
+ NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
+ i, cte->entry, cte->type, cte->index, cte->gpio_tag);
+ }
+}
+
+static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
+{
+ struct dcb_entry *entry = &dcb->entry[dcb->entries];
+
+ memset(entry, 0, sizeof(struct dcb_entry));
+ entry->index = dcb->entries++;
+
+ return entry;
+}
+
+static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
+{
+ struct dcb_entry *entry = new_dcb_entry(dcb);
+
+ entry->type = 0;
+ entry->i2c_index = i2c;
+ entry->heads = heads;
+ entry->location = DCB_LOC_ON_CHIP;
+ /* "or" mostly unused in early gen crt modesetting, 0 is fine */
+}
+
+static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
+{
+ struct dcb_entry *entry = new_dcb_entry(dcb);
+
+ entry->type = 2;
+ entry->i2c_index = LEGACY_I2C_PANEL;
+ entry->heads = twoHeads ? 3 : 1;
+ entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+ entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
+ entry->duallink_possible = false; /* SiI164 and co. are single link */
+
+#if 0
+ /*
+ * For dvi-a either crtc probably works, but my card appears to only
+ * support dvi-d. "nvidia" still attempts to program it for dvi-a,
+ * doing the full fp output setup (program 0x6808.. fp dimension regs,
+ * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
+ * the monitor picks up the mode res ok and lights up, but no pixel
+ * data appears, so the board manufacturer probably connected up the
+ * sync lines, but missed the video traces / components
+ *
+ * with this introduction, dvi-a left as an exercise for the reader.
+ */
+ fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
+#endif
+}
+
+static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
+{
+ struct dcb_entry *entry = new_dcb_entry(dcb);
+
+ entry->type = 1;
+ entry->i2c_index = LEGACY_I2C_TV;
+ entry->heads = twoHeads ? 3 : 1;
+ entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+}
+
+static bool
+parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+ uint32_t conn, uint32_t conf, struct dcb_entry *entry)
+{
+ entry->type = conn & 0xf;
+ entry->i2c_index = (conn >> 4) & 0xf;
+ entry->heads = (conn >> 8) & 0xf;
+ if (bdcb->version >= 0x40)
+ entry->connector = (conn >> 12) & 0xf;
+ entry->bus = (conn >> 16) & 0xf;
+ entry->location = (conn >> 20) & 0x3;
+ entry->or = (conn >> 24) & 0xf;
+ /*
+ * Normal entries consist of a single bit, but dual link has the
+ * next most significant bit set too
+ */
+ entry->duallink_possible =
+ ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
+
+ switch (entry->type) {
+ case OUTPUT_ANALOG:
+ /*
+ * Although the rest of a CRT conf dword is usually
+ * zeros, mac biosen have stuff there so we must mask
+ */
+ entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
+ (conf & 0xffff) * 10 :
+ (conf & 0xff) * 10000;
+ break;
+ case OUTPUT_LVDS:
+ {
+ uint32_t mask;
+ if (conf & 0x1)
+ entry->lvdsconf.use_straps_for_mode = true;
+ if (bdcb->version < 0x22) {
+ mask = ~0xd;
+ /*
+ * The laptop in bug 14567 lies and claims to not use
+ * straps when it does, so assume all DCB 2.0 laptops
+ * use straps, until a broken EDID using one is produced
+ */
+ entry->lvdsconf.use_straps_for_mode = true;
+ /*
+ * Both 0x4 and 0x8 show up in v2.0 tables; assume they
+ * mean the same thing (probably wrong, but might work)
+ */
+ if (conf & 0x4 || conf & 0x8)
+ entry->lvdsconf.use_power_scripts = true;
+ } else {
+ mask = ~0x5;
+ if (conf & 0x4)
+ entry->lvdsconf.use_power_scripts = true;
+ }
+ if (conf & mask) {
+ /*
+ * Until we even try to use these on G8x, it's
+ * useless reporting unknown bits. They all are.
+ */
+ if (bdcb->version >= 0x40)
+ break;
+
+ NV_ERROR(dev, "Unknown LVDS configuration bits, "
+ "please report\n");
+ }
+ break;
+ }
+ case OUTPUT_TV:
+ {
+ if (bdcb->version >= 0x30)
+ entry->tvconf.has_component_output = conf & (0x8 << 4);
+ else
+ entry->tvconf.has_component_output = false;
+
+ break;
+ }
+ case OUTPUT_DP:
+ entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
+ entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
+ switch ((conf & 0x0f000000) >> 24) {
+ case 0xf:
+ entry->dpconf.link_nr = 4;
+ break;
+ case 0x3:
+ entry->dpconf.link_nr = 2;
+ break;
+ default:
+ entry->dpconf.link_nr = 1;
+ break;
+ }
+ break;
+ case OUTPUT_TMDS:
+ entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ break;
+ case 0xe:
+ /* weird g80 mobile type that "nv" treats as a terminator */
+ bdcb->dcb.entries--;
+ return false;
+ }
+
+ /* unsure what DCB version introduces this, 3.0? */
+ if (conf & 0x100000)
+ entry->i2c_upper_default = true;
+
+ return true;
+}
+
+static bool
+parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
+ uint32_t conn, uint32_t conf, struct dcb_entry *entry)
+{
+ if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 &&
+ conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 &&
+ conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 &&
+ conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 &&
+ conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 &&
+ conn != 0xf2205004 && conn != 0xf2209004) {
+ NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n");
+
+ /* cause output setting to fail for !TV, so message is seen */
+ if ((conn & 0xf) != 0x1)
+ dcb->entries = 0;
+
+ return false;
+ }
+ /* most of the below is a "best guess" atm */
+ entry->type = conn & 0xf;
+ if (entry->type == 2)
+ /* another way of specifying straps based lvds... */
+ entry->type = OUTPUT_LVDS;
+ if (entry->type == 4) { /* digital */
+ if (conn & 0x10)
+ entry->type = OUTPUT_LVDS;
+ else
+ entry->type = OUTPUT_TMDS;
+ }
+ /* what's in bits 5-13? could be some encoder maker thing, in tv case */
+ entry->i2c_index = (conn >> 14) & 0xf;
+ /* raw heads field is in range 0-1, so move to 1-2 */
+ entry->heads = ((conn >> 18) & 0x7) + 1;
+ entry->location = (conn >> 21) & 0xf;
+ /* unused: entry->bus = (conn >> 25) & 0x7; */
+ /* set or to be same as heads -- hopefully safe enough */
+ entry->or = entry->heads;
+ entry->duallink_possible = false;
+
+ switch (entry->type) {
+ case OUTPUT_ANALOG:
+ entry->crtconf.maxfreq = (conf & 0xffff) * 10;
+ break;
+ case OUTPUT_LVDS:
+ /*
+ * This is probably buried in conn's unknown bits.
+ * This will upset EDID-ful models, if they exist
+ */
+ entry->lvdsconf.use_straps_for_mode = true;
+ entry->lvdsconf.use_power_scripts = true;
+ break;
+ case OUTPUT_TMDS:
+ /*
+ * Invent a DVI-A output, by copying the fields of the DVI-D
+ * output; reported to work by math_b on an NV20(!).
+ */
+ fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
+ break;
+ case OUTPUT_TV:
+ entry->tvconf.has_component_output = false;
+ break;
+ }
+
+ return true;
+}
+
+static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+ uint32_t conn, uint32_t conf)
+{
+ struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
+ bool ret;
+
+ if (bdcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
+ else
+ ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
+ if (!ret)
+ return ret;
+
+ read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
+ entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
+
+ return true;
+}
+
+static
+void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
+{
+ /*
+ * DCB v2.0 lists each output combination separately.
+ * Here we merge compatible entries to have fewer outputs, with
+ * more options
+ */
+
+ int i, newentries = 0;
+
+ for (i = 0; i < dcb->entries; i++) {
+ struct dcb_entry *ient = &dcb->entry[i];
+ int j;
+
+ for (j = i + 1; j < dcb->entries; j++) {
+ struct dcb_entry *jent = &dcb->entry[j];
+
+ if (jent->type == 100) /* already merged entry */
+ continue;
+
+ /* merge heads field when all other fields the same */
+ if (jent->i2c_index == ient->i2c_index &&
+ jent->type == ient->type &&
+ jent->location == ient->location &&
+ jent->or == ient->or) {
+ NV_TRACE(dev, "Merging DCB entries %d and %d\n",
+ i, j);
+ ient->heads |= jent->heads;
+ jent->type = 100; /* dummy value */
+ }
+ }
+ }
+
+ /* Compact entries merged into others out of dcb */
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].type == 100)
+ continue;
+
+ if (newentries != i) {
+ dcb->entry[newentries] = dcb->entry[i];
+ dcb->entry[newentries].index = newentries;
+ }
+ newentries++;
+ }
+
+ dcb->entries = newentries;
+}
+
+static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+{
+ struct bios_parsed_dcb *bdcb = &bios->bdcb;
+ struct parsed_dcb *dcb;
+ uint16_t dcbptr, i2ctabptr = 0;
+ uint8_t *dcbtable;
+ uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
+ bool configblock = true;
+ int recordlength = 8, confofs = 4;
+ int i;
+
+ dcb = bios->pub.dcb = &bdcb->dcb;
+ dcb->entries = 0;
+
+ /* get the offset from 0x36 */
+ dcbptr = ROM16(bios->data[0x36]);
+
+ if (dcbptr == 0x0) {
+ NV_WARN(dev, "No output data (DCB) found in BIOS, "
+ "assuming a CRT output exists\n");
+ /* this situation likely means a really old card, pre DCB */
+ fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
+
+ if (nv04_tv_identify(dev,
+ bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_tv_output(dcb, twoHeads);
+
+ return 0;
+ }
+
+ dcbtable = &bios->data[dcbptr];
+
+ /* get DCB version */
+ bdcb->version = dcbtable[0];
+ NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
+ bdcb->version >> 4, bdcb->version & 0xf);
+
+ if (bdcb->version >= 0x20) { /* NV17+ */
+ uint32_t sig;
+
+ if (bdcb->version >= 0x30) { /* NV40+ */
+ headerlen = dcbtable[1];
+ entries = dcbtable[2];
+ recordlength = dcbtable[3];
+ i2ctabptr = ROM16(dcbtable[4]);
+ sig = ROM32(dcbtable[6]);
+ bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
+ bdcb->connector_table_ptr = ROM16(dcbtable[20]);
+ } else {
+ i2ctabptr = ROM16(dcbtable[2]);
+ sig = ROM32(dcbtable[4]);
+ headerlen = 8;
+ }
+
+ if (sig != 0x4edcbdcb) {
+ NV_ERROR(dev, "Bad Display Configuration Block "
+ "signature (%08X)\n", sig);
+ return -EINVAL;
+ }
+ } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
+ char sig[8] = { 0 };
+
+ strncpy(sig, (char *)&dcbtable[-7], 7);
+ i2ctabptr = ROM16(dcbtable[2]);
+ recordlength = 10;
+ confofs = 6;
+
+ if (strcmp(sig, "DEV_REC")) {
+ NV_ERROR(dev, "Bad Display Configuration Block "
+ "signature (%s)\n", sig);
+ return -EINVAL;
+ }
+ } else {
+ /*
+ * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
+ * has the same single (crt) entry, even when tv-out present, so
+ * the conclusion is this version cannot really be used.
+ * v1.2 tables (some NV6/10, and NV15+) normally have the same
+ * 5 entries, which are not specific to the card and so no use.
+ * v1.2 does have an I2C table that read_dcb_i2c_table can
+ * handle, but cards exist (nv11 in #14821) with a bad i2c table
+ * pointer, so use the indices parsed in parse_bmp_structure.
+ * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ */
+ NV_TRACEWARN(dev, "No useful information in BIOS output table; "
+ "adding all possible outputs\n");
+ fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
+
+ /*
+ * Attempt to detect TV before DVI because the test
+ * for the former is more accurate and it rules the
+ * latter out.
+ */
+ if (nv04_tv_identify(dev,
+ bios->legacy.i2c_indices.tv) >= 0)
+ fabricate_tv_output(dcb, twoHeads);
+
+ else if (bios->tmds.output0_script_ptr ||
+ bios->tmds.output1_script_ptr)
+ fabricate_dvi_i_output(dcb, twoHeads);
+
+ return 0;
+ }
+
+ if (!i2ctabptr)
+ NV_WARN(dev, "No pointer to DCB I2C port table\n");
+ else {
+ bdcb->i2c_table = &bios->data[i2ctabptr];
+ if (bdcb->version >= 0x30)
+ bdcb->i2c_default_indices = bdcb->i2c_table[4];
+ }
+
+ parse_dcb_gpio_table(bios);
+ parse_dcb_connector_table(bios);
+
+ if (entries > DCB_MAX_NUM_ENTRIES)
+ entries = DCB_MAX_NUM_ENTRIES;
+
+ for (i = 0; i < entries; i++) {
+ uint32_t connection, config = 0;
+
+ connection = ROM32(dcbtable[headerlen + recordlength * i]);
+ if (configblock)
+ config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
+
+ /* seen on an NV11 with DCB v1.5 */
+ if (connection == 0x00000000)
+ break;
+
+ /* seen on an NV17 with DCB v2.0 */
+ if (connection == 0xffffffff)
+ break;
+
+ if ((connection & 0x0000000f) == 0x0000000f)
+ continue;
+
+ NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
+ dcb->entries, connection, config);
+
+ if (!parse_dcb_entry(dev, bdcb, connection, config))
+ break;
+ }
+
+ /*
+ * apart for v2.1+ not being known for requiring merging, this
+ * guarantees dcbent->index is the index of the entry in the rom image
+ */
+ if (bdcb->version < 0x21)
+ merge_like_dcb_entries(dev, dcb);
+
+ return dcb->entries ? 0 : -ENXIO;
+}
+
+static void
+fixup_legacy_connector(struct nvbios *bios)
+{
+ struct bios_parsed_dcb *bdcb = &bios->bdcb;
+ struct parsed_dcb *dcb = &bdcb->dcb;
+ int high = 0, i;
+
+ /*
+ * DCB 3.0 also has the table in most cases, but there are some cards
+ * where the table is filled with stub entries, and the DCB entriy
+ * indices are all 0. We don't need the connector indices on pre-G80
+ * chips (yet?) so limit the use to DCB 4.0 and above.
+ */
+ if (bdcb->version >= 0x40)
+ return;
+
+ /*
+ * No known connector info before v3.0, so make it up. the rule here
+ * is: anything on the same i2c bus is considered to be on the same
+ * connector. any output without an associated i2c bus is assigned
+ * its own unique connector index.
+ */
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].i2c_index == 0xf)
+ continue;
+
+ /*
+ * Ignore the I2C index for on-chip TV-out, as there
+ * are cards with bogus values (nv31m in bug 23212),
+ * and it's otherwise useless.
+ */
+ if (dcb->entry[i].type == OUTPUT_TV &&
+ dcb->entry[i].location == DCB_LOC_ON_CHIP) {
+ dcb->entry[i].i2c_index = 0xf;
+ continue;
+ }
+
+ dcb->entry[i].connector = dcb->entry[i].i2c_index;
+ if (dcb->entry[i].connector > high)
+ high = dcb->entry[i].connector;
+ }
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].i2c_index != 0xf)
+ continue;
+
+ dcb->entry[i].connector = ++high;
+ }
+}
+
+static void
+fixup_legacy_i2c(struct nvbios *bios)
+{
+ struct parsed_dcb *dcb = &bios->bdcb.dcb;
+ int i;
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
+ if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
+ dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
+ }
+}
+
+static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
+{
+ /*
+ * The header following the "HWSQ" signature has the number of entries,
+ * and the entry size
+ *
+ * An entry consists of a dword to write to the sequencer control reg
+ * (0x00001304), followed by the ucode bytes, written sequentially,
+ * starting at reg 0x00001400
+ */
+
+ uint8_t bytes_to_write;
+ uint16_t hwsq_entry_offset;
+ int i;
+
+ if (bios->data[hwsq_offset] <= entry) {
+ NV_ERROR(dev, "Too few entries in HW sequencer table for "
+ "requested entry\n");
+ return -ENOENT;
+ }
+
+ bytes_to_write = bios->data[hwsq_offset + 1];
+
+ if (bytes_to_write != 36) {
+ NV_ERROR(dev, "Unknown HW sequencer entry size\n");
+ return -EINVAL;
+ }
+
+ NV_TRACE(dev, "Loading NV17 power sequencing microcode\n");
+
+ hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
+
+ /* set sequencer control */
+ bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
+ bytes_to_write -= 4;
+
+ /* write ucode */
+ for (i = 0; i < bytes_to_write; i += 4)
+ bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
+
+ /* twiddle NV_PBUS_DEBUG_4 */
+ bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18);
+
+ return 0;
+}
+
+static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
+ struct nvbios *bios)
+{
+ /*
+ * BMP based cards, from NV17, need a microcode loading to correctly
+ * control the GPIO etc for LVDS panels
+ *
+ * BIT based cards seem to do this directly in the init scripts
+ *
+ * The microcode entries are found by the "HWSQ" signature.
+ */
+
+ const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
+ const int sz = sizeof(hwsq_signature);
+ int hwsq_offset;
+
+ hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
+ if (!hwsq_offset)
+ return 0;
+
+ /* always use entry 0? */
+ return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
+}
+
+uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ const uint8_t edid_sig[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
+ uint16_t offset = 0;
+ uint16_t newoffset;
+ int searchlen = NV_PROM_SIZE;
+
+ if (bios->fp.edid)
+ return bios->fp.edid;
+
+ while (searchlen) {
+ newoffset = findstr(&bios->data[offset], searchlen,
+ edid_sig, 8);
+ if (!newoffset)
+ return NULL;
+ offset += newoffset;
+ if (!nv_cksum(&bios->data[offset], EDID1_LEN))
+ break;
+
+ searchlen -= offset;
+ offset++;
+ }
+
+ NV_TRACE(dev, "Found EDID in BIOS\n");
+
+ return bios->fp.edid = &bios->data[offset];
+}
+
+void
+nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
+ struct dcb_entry *dcbent)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct init_exec iexec = { true, false };
+
+ bios->display.output = dcbent;
+ parse_init_table(bios, table, &iexec);
+ bios->display.output = NULL;
+}
+
+static bool NVInitVBIOS(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+
+ memset(bios, 0, sizeof(struct nvbios));
+ bios->dev = dev;
+
+ if (!NVShadowVBIOS(dev, bios->data))
+ return false;
+
+ bios->length = NV_PROM_SIZE;
+ return true;
+}
+
+static int nouveau_parse_vbios_struct(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
+ const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
+ int offset;
+
+ offset = findstr(bios->data, bios->length,
+ bit_signature, sizeof(bit_signature));
+ if (offset) {
+ NV_TRACE(dev, "BIT BIOS found\n");
+ return parse_bit_structure(bios, offset + 6);
+ }
+
+ offset = findstr(bios->data, bios->length,
+ bmp_signature, sizeof(bmp_signature));
+ if (offset) {
+ NV_TRACE(dev, "BMP BIOS found\n");
+ return parse_bmp_structure(dev, bios, offset);
+ }
+
+ NV_ERROR(dev, "No known BIOS signature found\n");
+ return -ENODEV;
+}
+
+int
+nouveau_run_vbios_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ int i, ret = 0;
+
+ NVLockVgaCrtcs(dev, false);
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, bios->state.crtchead);
+
+ if (bios->major_version < 5) /* BMP only */
+ load_nv17_hw_sequencer_ucode(dev, bios);
+
+ if (bios->execute) {
+ bios->fp.last_script_invoc = 0;
+ bios->fp.lvds_init_run = false;
+ }
+
+ parse_init_tables(bios);
+
+ /*
+ * Runs some additional script seen on G8x VBIOSen. The VBIOS'
+ * parser will run this right after the init tables, the binary
+ * driver appears to run it at some point later.
+ */
+ if (bios->some_script_ptr) {
+ struct init_exec iexec = {true, false};
+
+ NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
+ bios->some_script_ptr);
+ parse_init_table(bios, bios->some_script_ptr, &iexec);
+ }
+
+ if (dev_priv->card_type >= NV_50) {
+ for (i = 0; i < bios->bdcb.dcb.entries; i++) {
+ nouveau_bios_run_display_table(dev,
+ &bios->bdcb.dcb.entry[i],
+ 0, 0);
+ }
+ }
+
+ NVLockVgaCrtcs(dev, true);
+
+ return ret;
+}
+
+static void
+nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ struct dcb_i2c_entry *entry;
+ int i;
+
+ entry = &bios->bdcb.dcb.i2c[0];
+ for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
+ nouveau_i2c_fini(dev, entry);
+}
+
+int
+nouveau_bios_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint32_t saved_nv_pextdev_boot_0;
+ bool was_locked;
+ int ret;
+
+ dev_priv->vbios = &bios->pub;
+
+ if (!NVInitVBIOS(dev))
+ return -ENODEV;
+
+ ret = nouveau_parse_vbios_struct(dev);
+ if (ret)
+ return ret;
+
+ ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+ if (ret)
+ return ret;
+
+ fixup_legacy_i2c(bios);
+ fixup_legacy_connector(bios);
+
+ if (!bios->major_version) /* we don't run version 0 bios */
+ return 0;
+
+ /* these will need remembering across a suspend */
+ saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
+ bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
+
+ /* init script execution disabled */
+ bios->execute = false;
+
+ /* ... unless card isn't POSTed already */
+ if (dev_priv->card_type >= NV_10 &&
+ NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
+ NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
+ NV_INFO(dev, "Adaptor not initialised\n");
+ if (dev_priv->card_type < NV_50) {
+ NV_ERROR(dev, "Unable to POST this chipset\n");
+ return -ENODEV;
+ }
+
+ NV_INFO(dev, "Running VBIOS init tables\n");
+ bios->execute = true;
+ }
+
+ bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
+
+ ret = nouveau_run_vbios_init(dev);
+ if (ret) {
+ dev_priv->vbios = NULL;
+ return ret;
+ }
+
+ /* feature_byte on BMP is poor, but init always sets CR4B */
+ was_locked = NVLockVgaCrtcs(dev, false);
+ if (bios->major_version < 5)
+ bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
+
+ /* all BIT systems need p_f_m_t for digital_min_front_porch */
+ if (bios->is_mobile || bios->major_version >= 5)
+ ret = parse_fp_mode_table(dev, bios);
+ NVLockVgaCrtcs(dev, was_locked);
+
+ /* allow subsequent scripts to execute */
+ bios->execute = true;
+
+ return 0;
+}
+
+void
+nouveau_bios_takedown(struct drm_device *dev)
+{
+ nouveau_bios_i2c_devices_takedown(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
new file mode 100644
index 00000000000..058e98c76d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2007-2008 Nouveau Project
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_BIOS_H__
+#define __NOUVEAU_BIOS_H__
+
+#include "nvreg.h"
+#include "nouveau_i2c.h"
+
+#define DCB_MAX_NUM_ENTRIES 16
+#define DCB_MAX_NUM_I2C_ENTRIES 16
+#define DCB_MAX_NUM_GPIO_ENTRIES 32
+#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16
+
+#define DCB_LOC_ON_CHIP 0
+
+struct dcb_entry {
+ int index; /* may not be raw dcb index if merging has happened */
+ uint8_t type;
+ uint8_t i2c_index;
+ uint8_t heads;
+ uint8_t connector;
+ uint8_t bus;
+ uint8_t location;
+ uint8_t or;
+ bool duallink_possible;
+ union {
+ struct sor_conf {
+ int link;
+ } sorconf;
+ struct {
+ int maxfreq;
+ } crtconf;
+ struct {
+ struct sor_conf sor;
+ bool use_straps_for_mode;
+ bool use_power_scripts;
+ } lvdsconf;
+ struct {
+ bool has_component_output;
+ } tvconf;
+ struct {
+ struct sor_conf sor;
+ int link_nr;
+ int link_bw;
+ } dpconf;
+ struct {
+ struct sor_conf sor;
+ } tmdsconf;
+ };
+ bool i2c_upper_default;
+};
+
+struct dcb_i2c_entry {
+ uint8_t port_type;
+ uint8_t read, write;
+ struct nouveau_i2c_chan *chan;
+};
+
+struct parsed_dcb {
+ int entries;
+ struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
+ struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
+};
+
+enum dcb_gpio_tag {
+ DCB_GPIO_TVDAC0 = 0xc,
+ DCB_GPIO_TVDAC1 = 0x2d,
+};
+
+struct dcb_gpio_entry {
+ enum dcb_gpio_tag tag;
+ int line;
+ bool invert;
+};
+
+struct parsed_dcb_gpio {
+ int entries;
+ struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+};
+
+struct dcb_connector_table_entry {
+ uint32_t entry;
+ uint8_t type;
+ uint8_t index;
+ uint8_t gpio_tag;
+};
+
+struct dcb_connector_table {
+ int entries;
+ struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
+};
+
+struct bios_parsed_dcb {
+ uint8_t version;
+
+ struct parsed_dcb dcb;
+
+ uint8_t *i2c_table;
+ uint8_t i2c_default_indices;
+
+ uint16_t gpio_table_ptr;
+ struct parsed_dcb_gpio gpio;
+ uint16_t connector_table_ptr;
+ struct dcb_connector_table connector;
+};
+
+enum nouveau_encoder_type {
+ OUTPUT_ANALOG = 0,
+ OUTPUT_TV = 1,
+ OUTPUT_TMDS = 2,
+ OUTPUT_LVDS = 3,
+ OUTPUT_DP = 6,
+ OUTPUT_ANY = -1
+};
+
+enum nouveau_or {
+ OUTPUT_A = (1 << 0),
+ OUTPUT_B = (1 << 1),
+ OUTPUT_C = (1 << 2)
+};
+
+enum LVDS_script {
+ /* Order *does* matter here */
+ LVDS_INIT = 1,
+ LVDS_RESET,
+ LVDS_BACKLIGHT_ON,
+ LVDS_BACKLIGHT_OFF,
+ LVDS_PANEL_ON,
+ LVDS_PANEL_OFF
+};
+
+/* changing these requires matching changes to reg tables in nv_get_clock */
+#define MAX_PLL_TYPES 4
+enum pll_types {
+ NVPLL,
+ MPLL,
+ VPLL1,
+ VPLL2
+};
+
+struct pll_lims {
+ struct {
+ int minfreq;
+ int maxfreq;
+ int min_inputfreq;
+ int max_inputfreq;
+
+ uint8_t min_m;
+ uint8_t max_m;
+ uint8_t min_n;
+ uint8_t max_n;
+ } vco1, vco2;
+
+ uint8_t max_log2p;
+ /*
+ * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
+ * value) is no different to 6 (at least for vplls) so allowing the MNP
+ * calc to use 7 causes the generated clock to be out by a factor of 2.
+ * however, max_log2p cannot be fixed-up during parsing as the
+ * unmodified max_log2p value is still needed for setting mplls, hence
+ * an additional max_usable_log2p member
+ */
+ uint8_t max_usable_log2p;
+ uint8_t log2p_bias;
+
+ uint8_t min_p;
+ uint8_t max_p;
+
+ int refclk;
+};
+
+struct nouveau_bios_info {
+ struct parsed_dcb *dcb;
+
+ uint8_t chip_version;
+
+ uint32_t dactestval;
+ uint32_t tvdactestval;
+ uint8_t digital_min_front_porch;
+ bool fp_no_ddc;
+};
+
+struct nvbios {
+ struct drm_device *dev;
+ struct nouveau_bios_info pub;
+
+ uint8_t data[NV_PROM_SIZE];
+ unsigned int length;
+ bool execute;
+
+ uint8_t major_version;
+ uint8_t feature_byte;
+ bool is_mobile;
+
+ uint32_t fmaxvco, fminvco;
+
+ bool old_style_init;
+ uint16_t init_script_tbls_ptr;
+ uint16_t extra_init_script_tbl_ptr;
+ uint16_t macro_index_tbl_ptr;
+ uint16_t macro_tbl_ptr;
+ uint16_t condition_tbl_ptr;
+ uint16_t io_condition_tbl_ptr;
+ uint16_t io_flag_condition_tbl_ptr;
+ uint16_t init_function_tbl_ptr;
+
+ uint16_t pll_limit_tbl_ptr;
+ uint16_t ram_restrict_tbl_ptr;
+ uint8_t ram_restrict_group_count;
+
+ uint16_t some_script_ptr; /* BIT I + 14 */
+ uint16_t init96_tbl_ptr; /* BIT I + 16 */
+
+ struct bios_parsed_dcb bdcb;
+
+ struct {
+ int crtchead;
+ /* these need remembering across suspend */
+ uint32_t saved_nv_pfb_cfg0;
+ } state;
+
+ struct {
+ struct dcb_entry *output;
+ uint16_t script_table_ptr;
+ uint16_t dp_table_ptr;
+ } display;
+
+ struct {
+ uint16_t fptablepointer; /* also used by tmds */
+ uint16_t fpxlatetableptr;
+ int xlatwidth;
+ uint16_t lvdsmanufacturerpointer;
+ uint16_t fpxlatemanufacturertableptr;
+ uint16_t mode_ptr;
+ uint16_t xlated_entry;
+ bool power_off_for_reset;
+ bool reset_after_pclk_change;
+ bool dual_link;
+ bool link_c_increment;
+ bool BITbit1;
+ bool if_is_24bit;
+ int duallink_transition_clk;
+ uint8_t strapless_is_24bit;
+ uint8_t *edid;
+
+ /* will need resetting after suspend */
+ int last_script_invoc;
+ bool lvds_init_run;
+ } fp;
+
+ struct {
+ uint16_t output0_script_ptr;
+ uint16_t output1_script_ptr;
+ } tmds;
+
+ struct {
+ uint16_t mem_init_tbl_ptr;
+ uint16_t sdr_seq_tbl_ptr;
+ uint16_t ddr_seq_tbl_ptr;
+
+ struct {
+ uint8_t crt, tv, panel;
+ } i2c_indices;
+
+ uint16_t lvds_single_a_script_ptr;
+ } legacy;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
new file mode 100644
index 00000000000..0cad6d834eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -0,0 +1,682 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+static void
+nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ ttm_bo_kunmap(&nvbo->kmap);
+
+ if (unlikely(nvbo->gem))
+ DRM_ERROR("bo %p still attached to GEM object\n", bo);
+
+ spin_lock(&dev_priv->ttm.bo_list_lock);
+ list_del(&nvbo->head);
+ spin_unlock(&dev_priv->ttm.bo_list_lock);
+ kfree(nvbo);
+}
+
+int
+nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
+ int size, int align, uint32_t flags, uint32_t tile_mode,
+ uint32_t tile_flags, bool no_vm, bool mappable,
+ struct nouveau_bo **pnvbo)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_bo *nvbo;
+ int ret, n = 0;
+
+ nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
+ if (!nvbo)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&nvbo->head);
+ INIT_LIST_HEAD(&nvbo->entry);
+ nvbo->mappable = mappable;
+ nvbo->no_vm = no_vm;
+ nvbo->tile_mode = tile_mode;
+ nvbo->tile_flags = tile_flags;
+
+ /*
+ * Some of the tile_flags have a periodic structure of N*4096 bytes,
+ * align to to that as well as the page size. Overallocate memory to
+ * avoid corruption of other buffer objects.
+ */
+ switch (tile_flags) {
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7a00:
+ if (dev_priv->chipset >= 0xA0) {
+ /* This is based on high end cards with 448 bits
+ * memory bus, could be different elsewhere.*/
+ size += 6 * 28672;
+ /* 8 * 28672 is the actual alignment requirement,
+ * but we must also align to page size. */
+ align = 2 * 8 * 28672;
+ } else if (dev_priv->chipset >= 0x90) {
+ size += 3 * 16384;
+ align = 12 * 16834;
+ } else {
+ size += 3 * 8192;
+ /* 12 * 8192 is the actual alignment requirement,
+ * but we must also align to page size. */
+ align = 2 * 12 * 8192;
+ }
+ break;
+ default:
+ break;
+ }
+
+ align >>= PAGE_SHIFT;
+
+ size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
+ if (dev_priv->card_type == NV_50) {
+ size = (size + 65535) & ~65535;
+ if (align < (65536 / PAGE_SIZE))
+ align = (65536 / PAGE_SIZE);
+ }
+
+ if (flags & TTM_PL_FLAG_VRAM)
+ nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
+ if (flags & TTM_PL_FLAG_TT)
+ nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ nvbo->placement.fpfn = 0;
+ nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
+ nvbo->placement.placement = nvbo->placements;
+ nvbo->placement.busy_placement = nvbo->placements;
+ nvbo->placement.num_placement = n;
+ nvbo->placement.num_busy_placement = n;
+
+ nvbo->channel = chan;
+ nouveau_bo_placement_set(nvbo, flags);
+ ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
+ ttm_bo_type_device, &nvbo->placement, align, 0,
+ false, NULL, size, nouveau_bo_del_ttm);
+ nvbo->channel = NULL;
+ if (ret) {
+ /* ttm will call nouveau_bo_del_ttm if it fails.. */
+ return ret;
+ }
+
+ spin_lock(&dev_priv->ttm.bo_list_lock);
+ list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
+ spin_unlock(&dev_priv->ttm.bo_list_lock);
+ *pnvbo = nvbo;
+ return 0;
+}
+
+void
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
+{
+ int n = 0;
+
+ if (memtype & TTM_PL_FLAG_VRAM)
+ nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
+ if (memtype & TTM_PL_FLAG_TT)
+ nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ if (memtype & TTM_PL_FLAG_SYSTEM)
+ nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
+ nvbo->placement.placement = nvbo->placements;
+ nvbo->placement.busy_placement = nvbo->placements;
+ nvbo->placement.num_placement = n;
+ nvbo->placement.num_busy_placement = n;
+
+ if (nvbo->pin_refcnt) {
+ while (n--)
+ nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
+ }
+}
+
+int
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret, i;
+
+ if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
+ NV_ERROR(nouveau_bdev(bo->bdev)->dev,
+ "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
+ 1 << bo->mem.mem_type, memtype);
+ return -EINVAL;
+ }
+
+ if (nvbo->pin_refcnt++)
+ return 0;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (ret)
+ goto out;
+
+ nouveau_bo_placement_set(nvbo, memtype);
+ for (i = 0; i < nvbo->placement.num_placement; i++)
+ nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ if (ret == 0) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ dev_priv->fb_aper_free -= bo->mem.size;
+ break;
+ case TTM_PL_TT:
+ dev_priv->gart_info.aper_free -= bo->mem.size;
+ break;
+ default:
+ break;
+ }
+ }
+ ttm_bo_unreserve(bo);
+out:
+ if (unlikely(ret))
+ nvbo->pin_refcnt--;
+ return ret;
+}
+
+int
+nouveau_bo_unpin(struct nouveau_bo *nvbo)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret, i;
+
+ if (--nvbo->pin_refcnt)
+ return 0;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nvbo->placement.num_placement; i++)
+ nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
+ if (ret == 0) {
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ dev_priv->fb_aper_free += bo->mem.size;
+ break;
+ case TTM_PL_TT:
+ dev_priv->gart_info.aper_free += bo->mem.size;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+
+int
+nouveau_bo_map(struct nouveau_bo *nvbo)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
+ ttm_bo_unreserve(&nvbo->bo);
+ return ret;
+}
+
+void
+nouveau_bo_unmap(struct nouveau_bo *nvbo)
+{
+ ttm_bo_kunmap(&nvbo->kmap);
+}
+
+u16
+nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
+{
+ bool is_iomem;
+ u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+ mem = &mem[index];
+ if (is_iomem)
+ return ioread16_native((void __force __iomem *)mem);
+ else
+ return *mem;
+}
+
+void
+nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
+{
+ bool is_iomem;
+ u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+ mem = &mem[index];
+ if (is_iomem)
+ iowrite16_native(val, (void __force __iomem *)mem);
+ else
+ *mem = val;
+}
+
+u32
+nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
+{
+ bool is_iomem;
+ u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+ mem = &mem[index];
+ if (is_iomem)
+ return ioread32_native((void __force __iomem *)mem);
+ else
+ return *mem;
+}
+
+void
+nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
+{
+ bool is_iomem;
+ u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
+ mem = &mem[index];
+ if (is_iomem)
+ iowrite32_native(val, (void __force __iomem *)mem);
+ else
+ *mem = val;
+}
+
+static struct ttm_backend *
+nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ switch (dev_priv->gart_info.type) {
+#if __OS_HAS_AGP
+ case NOUVEAU_GART_AGP:
+ return ttm_agp_backend_init(bdev, dev->agp->bridge);
+#endif
+ case NOUVEAU_GART_SGDMA:
+ return nouveau_sgdma_init_ttm(dev);
+ default:
+ NV_ERROR(dev, "Unknown GART type %d\n",
+ dev_priv->gart_info.type);
+ break;
+ }
+
+ return NULL;
+}
+
+static int
+nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+ /* We'll do this from user space. */
+ return 0;
+}
+
+static int
+nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+
+ man->io_addr = NULL;
+ man->io_offset = drm_get_resource_start(dev, 1);
+ man->io_size = drm_get_resource_len(dev, 1);
+ if (man->io_size > nouveau_mem_fb_amount(dev))
+ man->io_size = nouveau_mem_fb_amount(dev);
+
+ man->gpu_offset = dev_priv->vm_vram_base;
+ break;
+ case TTM_PL_TT:
+ switch (dev_priv->gart_info.type) {
+ case NOUVEAU_GART_AGP:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
+ man->available_caching = TTM_PL_FLAG_UNCACHED;
+ man->default_caching = TTM_PL_FLAG_UNCACHED;
+ break;
+ case NOUVEAU_GART_SGDMA:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
+ TTM_MEMTYPE_FLAG_CMA;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ default:
+ NV_ERROR(dev, "Unknown GART type: %d\n",
+ dev_priv->gart_info.type);
+ return -EINVAL;
+ }
+
+ man->io_offset = dev_priv->gart_info.aper_base;
+ man->io_size = dev_priv->gart_info.aper_size;
+ man->io_addr = NULL;
+ man->gpu_offset = dev_priv->vm_gart_base;
+ break;
+ default:
+ NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
+ TTM_PL_FLAG_SYSTEM);
+ break;
+ default:
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
+ break;
+ }
+
+ *pl = nvbo->placement;
+}
+
+
+/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
+ * TTM_PL_{VRAM,TT} directly.
+ */
+static int
+nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
+ struct nouveau_bo *nvbo, bool evict, bool no_wait,
+ struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
+ evict, no_wait, new_mem);
+ nouveau_fence_unref((void *)&fence);
+ return ret;
+}
+
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
+ struct ttm_mem_reg *mem)
+{
+ if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
+ if (mem->mem_type == TTM_PL_TT)
+ return NvDmaGART;
+ return NvDmaVRAM;
+ }
+
+ if (mem->mem_type == TTM_PL_TT)
+ return chan->gart_handle;
+ return chan->vram_handle;
+}
+
+static int
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
+ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_channel *chan;
+ uint64_t src_offset, dst_offset;
+ uint32_t page_count;
+ int ret;
+
+ chan = nvbo->channel;
+ if (!chan || nvbo->tile_flags || nvbo->no_vm)
+ chan = dev_priv->channel;
+
+ src_offset = old_mem->mm_node->start << PAGE_SHIFT;
+ dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
+ if (chan != dev_priv->channel) {
+ if (old_mem->mem_type == TTM_PL_TT)
+ src_offset += dev_priv->vm_gart_base;
+ else
+ src_offset += dev_priv->vm_vram_base;
+
+ if (new_mem->mem_type == TTM_PL_TT)
+ dst_offset += dev_priv->vm_gart_base;
+ else
+ dst_offset += dev_priv->vm_vram_base;
+ }
+
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+ OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
+ OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
+
+ if (dev_priv->card_type >= NV_50) {
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+ OUT_RING(chan, 1);
+ }
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ if (dev_priv->card_type >= NV_50) {
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+ OUT_RING(chan, upper_32_bits(src_offset));
+ OUT_RING(chan, upper_32_bits(dst_offset));
+ }
+ ret = RING_SPACE(chan, 11);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF,
+ NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+ OUT_RING(chan, lower_32_bits(src_offset));
+ OUT_RING(chan, lower_32_bits(dst_offset));
+ OUT_RING(chan, PAGE_SIZE); /* src_pitch */
+ OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
+ OUT_RING(chan, PAGE_SIZE); /* line_length */
+ OUT_RING(chan, line_count);
+ OUT_RING(chan, (1<<8)|(1<<0));
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ OUT_RING(chan, 0);
+
+ page_count -= line_count;
+ src_offset += (PAGE_SIZE * line_count);
+ dst_offset += (PAGE_SIZE * line_count);
+ }
+
+ return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
+}
+
+static int
+nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ struct ttm_placement placement;
+ struct ttm_mem_reg tmp_mem;
+ int ret;
+
+ placement.fpfn = placement.lpfn = 0;
+ placement.num_placement = placement.num_busy_placement = 1;
+ placement.placement = &placement_memtype;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ if (ret)
+ return ret;
+
+ ret = ttm_tt_bind(bo->ttm, &tmp_mem);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
+ if (ret)
+ goto out;
+
+ ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
+out:
+ if (tmp_mem.mm_node) {
+ spin_lock(&bo->bdev->glob->lru_lock);
+ drm_mm_put_block(tmp_mem.mm_node);
+ spin_unlock(&bo->bdev->glob->lru_lock);
+ }
+
+ return ret;
+}
+
+static int
+nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+ struct ttm_placement placement;
+ struct ttm_mem_reg tmp_mem;
+ int ret;
+
+ placement.fpfn = placement.lpfn = 0;
+ placement.num_placement = placement.num_busy_placement = 1;
+ placement.placement = &placement_memtype;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
+ if (ret)
+ goto out;
+
+out:
+ if (tmp_mem.mm_node) {
+ spin_lock(&bo->bdev->glob->lru_lock);
+ drm_mm_put_block(tmp_mem.mm_node);
+ spin_unlock(&bo->bdev->glob->lru_lock);
+ }
+
+ return ret;
+}
+
+static int
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct drm_device *dev = dev_priv->dev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+
+ if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
+ !nvbo->no_vm) {
+ uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
+
+ ret = nv50_mem_vm_bind_linear(dev,
+ offset + dev_priv->vm_vram_base,
+ new_mem->size, nvbo->tile_flags,
+ offset);
+ if (ret)
+ return ret;
+ }
+
+ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
+ !dev_priv->channel)
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+
+ if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
+ BUG_ON(bo->mem.mm_node != NULL);
+ bo->mem = *new_mem;
+ new_mem->mm_node = NULL;
+ return 0;
+ }
+
+ if (new_mem->mem_type == TTM_PL_SYSTEM) {
+ if (old_mem->mem_type == TTM_PL_SYSTEM)
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
+ if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else {
+ if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
+ return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+
+ return 0;
+}
+
+static int
+nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+struct ttm_bo_driver nouveau_bo_driver = {
+ .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+ .invalidate_caches = nouveau_bo_invalidate_caches,
+ .init_mem_type = nouveau_bo_init_mem_type,
+ .evict_flags = nouveau_bo_evict_flags,
+ .move = nouveau_bo_move,
+ .verify_access = nouveau_bo_verify_access,
+ .sync_obj_signaled = nouveau_fence_signalled,
+ .sync_obj_wait = nouveau_fence_wait,
+ .sync_obj_flush = nouveau_fence_flush,
+ .sync_obj_unref = nouveau_fence_unref,
+ .sync_obj_ref = nouveau_fence_ref,
+};
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
new file mode 100644
index 00000000000..ee2b84504d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+/****************************************************************************\
+* *
+* The video arbitration routines calculate some "magic" numbers. Fixes *
+* the snow seen when accessing the framebuffer without it. *
+* It just works (I hope). *
+* *
+\****************************************************************************/
+
+struct nv_fifo_info {
+ int lwm;
+ int burst;
+};
+
+struct nv_sim_state {
+ int pclk_khz;
+ int mclk_khz;
+ int nvclk_khz;
+ int bpp;
+ int mem_page_miss;
+ int mem_latency;
+ int memory_type;
+ int memory_width;
+ int two_heads;
+};
+
+static void
+nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
+{
+ int pagemiss, cas, width, bpp;
+ int nvclks, mclks, pclks, crtpagemiss;
+ int found, mclk_extra, mclk_loop, cbs, m1, p1;
+ int mclk_freq, pclk_freq, nvclk_freq;
+ int us_m, us_n, us_p, crtc_drain_rate;
+ int cpm_us, us_crt, clwm;
+
+ pclk_freq = arb->pclk_khz;
+ mclk_freq = arb->mclk_khz;
+ nvclk_freq = arb->nvclk_khz;
+ pagemiss = arb->mem_page_miss;
+ cas = arb->mem_latency;
+ width = arb->memory_width >> 6;
+ bpp = arb->bpp;
+ cbs = 128;
+
+ pclks = 2;
+ nvclks = 10;
+ mclks = 13 + cas;
+ mclk_extra = 3;
+ found = 0;
+
+ while (!found) {
+ found = 1;
+
+ mclk_loop = mclks + mclk_extra;
+ us_m = mclk_loop * 1000 * 1000 / mclk_freq;
+ us_n = nvclks * 1000 * 1000 / nvclk_freq;
+ us_p = nvclks * 1000 * 1000 / pclk_freq;
+
+ crtc_drain_rate = pclk_freq * bpp / 8;
+ crtpagemiss = 2;
+ crtpagemiss += 1;
+ cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
+ us_crt = cpm_us + us_m + us_n + us_p;
+ clwm = us_crt * crtc_drain_rate / (1000 * 1000);
+ clwm++;
+
+ m1 = clwm + cbs - 512;
+ p1 = m1 * pclk_freq / mclk_freq;
+ p1 = p1 * bpp / 8;
+ if ((p1 < m1 && m1 > 0) || clwm > 519) {
+ found = !mclk_extra;
+ mclk_extra--;
+ }
+ if (clwm < 384)
+ clwm = 384;
+
+ fifo->lwm = clwm;
+ fifo->burst = cbs;
+ }
+}
+
+static void
+nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
+{
+ int fill_rate, drain_rate;
+ int pclks, nvclks, mclks, xclks;
+ int pclk_freq, nvclk_freq, mclk_freq;
+ int fill_lat, extra_lat;
+ int max_burst_o, max_burst_l;
+ int fifo_len, min_lwm, max_lwm;
+ const int burst_lat = 80; /* Maximum allowable latency due
+ * to the CRTC FIFO burst. (ns) */
+
+ pclk_freq = arb->pclk_khz;
+ nvclk_freq = arb->nvclk_khz;
+ mclk_freq = arb->mclk_khz;
+
+ fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
+ drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
+
+ fifo_len = arb->two_heads ? 1536 : 1024; /* B */
+
+ /* Fixed FIFO refill latency. */
+
+ pclks = 4; /* lwm detect. */
+
+ nvclks = 3 /* lwm -> sync. */
+ + 2 /* fbi bus cycles (1 req + 1 busy) */
+ + 1 /* 2 edge sync. may be very close to edge so
+ * just put one. */
+ + 1 /* fbi_d_rdv_n */
+ + 1 /* Fbi_d_rdata */
+ + 1; /* crtfifo load */
+
+ mclks = 1 /* 2 edge sync. may be very close to edge so
+ * just put one. */
+ + 1 /* arb_hp_req */
+ + 5 /* tiling pipeline */
+ + 2 /* latency fifo */
+ + 2 /* memory request to fbio block */
+ + 7; /* data returned from fbio block */
+
+ /* Need to accumulate 256 bits for read */
+ mclks += (arb->memory_type == 0 ? 2 : 1)
+ * arb->memory_width / 32;
+
+ fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */
+ + nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */
+ + pclks * 1000 * 1000 / pclk_freq; /* pclk latency */
+
+ /* Conditional FIFO refill latency. */
+
+ xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
+ * the overlay. */
+ + 2 * arb->mem_page_miss /* Extra pagemiss latency. */
+ + (arb->bpp == 32 ? 8 : 4); /* Margin of error. */
+
+ extra_lat = xclks * 1000 * 1000 / mclk_freq;
+
+ if (arb->two_heads)
+ /* Account for another CRTC. */
+ extra_lat += fill_lat + extra_lat + burst_lat;
+
+ /* FIFO burst */
+
+ /* Max burst not leading to overflows. */
+ max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
+ * (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
+ fifo->burst = min(max_burst_o, 1024);
+
+ /* Max burst value with an acceptable latency. */
+ max_burst_l = burst_lat * fill_rate / (1000 * 1000);
+ fifo->burst = min(max_burst_l, fifo->burst);
+
+ fifo->burst = rounddown_pow_of_two(fifo->burst);
+
+ /* FIFO low watermark */
+
+ min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
+ max_lwm = fifo_len - fifo->burst
+ + fill_lat * drain_rate / (1000 * 1000)
+ + fifo->burst * drain_rate / fill_rate;
+
+ fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
+}
+
+static void
+nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
+ int *burst, int *lwm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv_fifo_info fifo_data;
+ struct nv_sim_state sim_data;
+ int MClk = nouveau_hw_get_clock(dev, MPLL);
+ int NVClk = nouveau_hw_get_clock(dev, NVPLL);
+ uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
+
+ sim_data.pclk_khz = VClk;
+ sim_data.mclk_khz = MClk;
+ sim_data.nvclk_khz = NVClk;
+ sim_data.bpp = bpp;
+ sim_data.two_heads = nv_two_heads(dev);
+ if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+ (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+ uint32_t type;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
+
+ sim_data.memory_type = (type >> 12) & 1;
+ sim_data.memory_width = 64;
+ sim_data.mem_latency = 3;
+ sim_data.mem_page_miss = 10;
+ } else {
+ sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
+ sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
+ sim_data.mem_latency = cfg1 & 0xf;
+ sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
+ }
+
+ if (dev_priv->card_type == NV_04)
+ nv04_calc_arb(&fifo_data, &sim_data);
+ else
+ nv10_calc_arb(&fifo_data, &sim_data);
+
+ *burst = ilog2(fifo_data.burst >> 4);
+ *lwm = fifo_data.lwm >> 3;
+}
+
+static void
+nv30_update_arb(int *burst, int *lwm)
+{
+ unsigned int fifo_size, burst_size, graphics_lwm;
+
+ fifo_size = 2048;
+ burst_size = 512;
+ graphics_lwm = fifo_size - burst_size;
+
+ *burst = ilog2(burst_size >> 5);
+ *lwm = graphics_lwm >> 3;
+}
+
+void
+nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type < NV_30)
+ nv04_update_arb(dev, vclk, bpp, burst, lwm);
+ else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+ (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+ *burst = 128;
+ *lwm = 0x0480;
+ } else
+ nv30_update_arb(burst, lwm);
+}
+
+static int
+getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+ struct nouveau_pll_vals *bestpv)
+{
+ /* Find M, N and P for a single stage PLL
+ *
+ * Note that some bioses (NV3x) have lookup tables of precomputed MNP
+ * values, but we're too lazy to use those atm
+ *
+ * "clk" parameter in kHz
+ * returns calculated clock
+ */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int cv = dev_priv->vbios->chip_version;
+ int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
+ int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
+ int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
+ int minU = pll_lim->vco1.min_inputfreq;
+ int maxU = pll_lim->vco1.max_inputfreq;
+ int minP = pll_lim->max_p ? pll_lim->min_p : 0;
+ int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
+ int crystal = pll_lim->refclk;
+ int M, N, thisP, P;
+ int clkP, calcclk;
+ int delta, bestdelta = INT_MAX;
+ int bestclk = 0;
+
+ /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
+ /* possibly correlated with introduction of 27MHz crystal */
+ if (dev_priv->card_type < NV_50) {
+ if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
+ if (clk > 250000)
+ maxM = 6;
+ if (clk > 340000)
+ maxM = 2;
+ } else if (cv < 0x40) {
+ if (clk > 150000)
+ maxM = 6;
+ if (clk > 200000)
+ maxM = 4;
+ if (clk > 340000)
+ maxM = 2;
+ }
+ }
+
+ P = pll_lim->max_p ? maxP : (1 << maxP);
+ if ((clk * P) < minvco) {
+ minvco = clk * maxP;
+ maxvco = minvco * 2;
+ }
+
+ if (clk + clk/200 > maxvco) /* +0.5% */
+ maxvco = clk + clk/200;
+
+ /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
+ for (thisP = minP; thisP <= maxP; thisP++) {
+ P = pll_lim->max_p ? thisP : (1 << thisP);
+ clkP = clk * P;
+
+ if (clkP < minvco)
+ continue;
+ if (clkP > maxvco)
+ return bestclk;
+
+ for (M = minM; M <= maxM; M++) {
+ if (crystal/M < minU)
+ return bestclk;
+ if (crystal/M > maxU)
+ continue;
+
+ /* add crystal/2 to round better */
+ N = (clkP * M + crystal/2) / crystal;
+
+ if (N < minN)
+ continue;
+ if (N > maxN)
+ break;
+
+ /* more rounding additions */
+ calcclk = ((N * crystal + P/2) / P + M/2) / M;
+ delta = abs(calcclk - clk);
+ /* we do an exhaustive search rather than terminating
+ * on an optimality condition...
+ */
+ if (delta < bestdelta) {
+ bestdelta = delta;
+ bestclk = calcclk;
+ bestpv->N1 = N;
+ bestpv->M1 = M;
+ bestpv->log2P = thisP;
+ if (delta == 0) /* except this one */
+ return bestclk;
+ }
+ }
+ }
+
+ return bestclk;
+}
+
+static int
+getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+ struct nouveau_pll_vals *bestpv)
+{
+ /* Find M, N and P for a two stage PLL
+ *
+ * Note that some bioses (NV30+) have lookup tables of precomputed MNP
+ * values, but we're too lazy to use those atm
+ *
+ * "clk" parameter in kHz
+ * returns calculated clock
+ */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chip_version = dev_priv->vbios->chip_version;
+ int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
+ int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
+ int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
+ int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
+ int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
+ int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
+ int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
+ int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
+ int maxlog2P = pll_lim->max_usable_log2p;
+ int crystal = pll_lim->refclk;
+ bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
+ int M1, N1, M2, N2, log2P;
+ int clkP, calcclk1, calcclk2, calcclkout;
+ int delta, bestdelta = INT_MAX;
+ int bestclk = 0;
+
+ int vco2 = (maxvco2 - maxvco2/200) / 2;
+ for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
+ ;
+ clkP = clk << log2P;
+
+ if (maxvco2 < clk + clk/200) /* +0.5% */
+ maxvco2 = clk + clk/200;
+
+ for (M1 = minM1; M1 <= maxM1; M1++) {
+ if (crystal/M1 < minU1)
+ return bestclk;
+ if (crystal/M1 > maxU1)
+ continue;
+
+ for (N1 = minN1; N1 <= maxN1; N1++) {
+ calcclk1 = crystal * N1 / M1;
+ if (calcclk1 < minvco1)
+ continue;
+ if (calcclk1 > maxvco1)
+ break;
+
+ for (M2 = minM2; M2 <= maxM2; M2++) {
+ if (calcclk1/M2 < minU2)
+ break;
+ if (calcclk1/M2 > maxU2)
+ continue;
+
+ /* add calcclk1/2 to round better */
+ N2 = (clkP * M2 + calcclk1/2) / calcclk1;
+ if (N2 < minN2)
+ continue;
+ if (N2 > maxN2)
+ break;
+
+ if (!fixedgain2) {
+ if (chip_version < 0x60)
+ if (N2/M2 < 4 || N2/M2 > 10)
+ continue;
+
+ calcclk2 = calcclk1 * N2 / M2;
+ if (calcclk2 < minvco2)
+ break;
+ if (calcclk2 > maxvco2)
+ continue;
+ } else
+ calcclk2 = calcclk1;
+
+ calcclkout = calcclk2 >> log2P;
+ delta = abs(calcclkout - clk);
+ /* we do an exhaustive search rather than terminating
+ * on an optimality condition...
+ */
+ if (delta < bestdelta) {
+ bestdelta = delta;
+ bestclk = calcclkout;
+ bestpv->N1 = N1;
+ bestpv->M1 = M1;
+ bestpv->N2 = N2;
+ bestpv->M2 = M2;
+ bestpv->log2P = log2P;
+ if (delta == 0) /* except this one */
+ return bestclk;
+ }
+ }
+ }
+ }
+
+ return bestclk;
+}
+
+int
+nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
+ struct nouveau_pll_vals *pv)
+{
+ int outclk;
+
+ if (!pll_lim->vco2.maxfreq)
+ outclk = getMNP_single(dev, pll_lim, clk, pv);
+ else
+ outclk = getMNP_double(dev, pll_lim, clk, pv);
+
+ if (!outclk)
+ NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
+
+ return outclk;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
new file mode 100644
index 00000000000..9aaa972f882
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2005-2006 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+static int
+nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_bo *pb = chan->pushbuf_bo;
+ struct nouveau_gpuobj *pushbuf = NULL;
+ uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+ int ret;
+
+ if (pb->bo.mem.mem_type == TTM_PL_TT) {
+ ret = nouveau_gpuobj_gart_dma_new(chan, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RO, &pushbuf,
+ NULL);
+ chan->pushbuf_base = start;
+ } else
+ if (dev_priv->card_type != NV_04) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+ dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_VIDMEM, &pushbuf);
+ chan->pushbuf_base = start;
+ } else {
+ /* NV04 cmdbuf hack, from original ddx.. not sure of it's
+ * exact reason for existing :) PCI access to cmdbuf in
+ * VRAM.
+ */
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ drm_get_resource_start(dev, 1),
+ dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_PCI, &pushbuf);
+ chan->pushbuf_base = start;
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
+ if (ret) {
+ NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
+ if (pushbuf != dev_priv->gart_info.sg_ctxdma)
+ nouveau_gpuobj_del(dev, &pushbuf);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct nouveau_bo *
+nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
+{
+ struct nouveau_bo *pushbuf = NULL;
+ int location, ret;
+
+ if (nouveau_vram_pushbuf)
+ location = TTM_PL_FLAG_VRAM;
+ else
+ location = TTM_PL_FLAG_TT;
+
+ ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
+ true, &pushbuf);
+ if (ret) {
+ NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
+ return NULL;
+ }
+
+ ret = nouveau_bo_pin(pushbuf, location);
+ if (ret) {
+ NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
+ nouveau_bo_ref(NULL, &pushbuf);
+ return NULL;
+ }
+
+ return pushbuf;
+}
+
+/* allocates and initializes a fifo for user space consumption */
+int
+nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+ struct drm_file *file_priv,
+ uint32_t vram_handle, uint32_t tt_handle)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan;
+ int channel, user;
+ int ret;
+
+ /*
+ * Alright, here is the full story
+ * Nvidia cards have multiple hw fifo contexts (praise them for that,
+ * no complicated crash-prone context switches)
+ * We allocate a new context for each app and let it write to it
+ * directly (woo, full userspace command submission !)
+ * When there are no more contexts, you lost
+ */
+ for (channel = 0; channel < pfifo->channels; channel++) {
+ if (dev_priv->fifos[channel] == NULL)
+ break;
+ }
+
+ /* no more fifos. you lost. */
+ if (channel == pfifo->channels)
+ return -EINVAL;
+
+ dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
+ GFP_KERNEL);
+ if (!dev_priv->fifos[channel])
+ return -ENOMEM;
+ dev_priv->fifo_alloc_count++;
+ chan = dev_priv->fifos[channel];
+ INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+ INIT_LIST_HEAD(&chan->fence.pending);
+ chan->dev = dev;
+ chan->id = channel;
+ chan->file_priv = file_priv;
+ chan->vram_handle = vram_handle;
+ chan->gart_handle = tt_handle;
+
+ NV_INFO(dev, "Allocating FIFO number %d\n", channel);
+
+ /* Allocate DMA push buffer */
+ chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
+ if (!chan->pushbuf_bo) {
+ ret = -ENOMEM;
+ NV_ERROR(dev, "pushbuf %d\n", ret);
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* Locate channel's user control regs */
+ if (dev_priv->card_type < NV_40)
+ user = NV03_USER(channel);
+ else
+ if (dev_priv->card_type < NV_50)
+ user = NV40_USER(channel);
+ else
+ user = NV50_USER(channel);
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
+ PAGE_SIZE);
+ if (!chan->user) {
+ NV_ERROR(dev, "ioremap of regs failed.\n");
+ nouveau_channel_free(chan);
+ return -ENOMEM;
+ }
+ chan->user_put = 0x40;
+ chan->user_get = 0x44;
+
+ /* Allocate space for per-channel fixed notifier memory */
+ ret = nouveau_notifier_init_channel(chan);
+ if (ret) {
+ NV_ERROR(dev, "ntfy %d\n", ret);
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* Setup channel's default objects */
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+ if (ret) {
+ NV_ERROR(dev, "gpuobj %d\n", ret);
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* Create a dma object for the push buffer */
+ ret = nouveau_channel_pushbuf_ctxdma_init(chan);
+ if (ret) {
+ NV_ERROR(dev, "pbctxdma %d\n", ret);
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* disable the fifo caches */
+ pfifo->reassign(dev, false);
+
+ /* Create a graphics context for new channel */
+ ret = pgraph->create_context(chan);
+ if (ret) {
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ /* Construct inital RAMFC for new channel */
+ ret = pfifo->create_context(chan);
+ if (ret) {
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ pfifo->reassign(dev, true);
+
+ ret = nouveau_dma_init(chan);
+ if (!ret)
+ ret = nouveau_fence_init(chan);
+ if (ret) {
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ nouveau_debugfs_channel_init(chan);
+
+ NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+ *chan_ret = chan;
+ return 0;
+}
+
+int
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t caches;
+ int idle;
+
+ if (!chan) {
+ NV_ERROR(dev, "no channel...\n");
+ return 1;
+ }
+
+ caches = nv_rd32(dev, NV03_PFIFO_CACHES);
+ nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
+
+ if (engine->fifo.channel_id(dev) != chan->id) {
+ struct nouveau_gpuobj *ramfc =
+ chan->ramfc ? chan->ramfc->gpuobj : NULL;
+
+ if (!ramfc) {
+ NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
+ return 1;
+ }
+
+ engine->instmem.prepare_access(dev, false);
+ if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
+ idle = 0;
+ else
+ idle = 1;
+ engine->instmem.finish_access(dev);
+ } else {
+ idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
+ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, caches);
+ return idle;
+}
+
+/* stops a fifo */
+void
+nouveau_channel_free(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ unsigned long flags;
+ int ret;
+
+ NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+
+ nouveau_debugfs_channel_fini(chan);
+
+ /* Give outstanding push buffers a chance to complete */
+ spin_lock_irqsave(&chan->fence.lock, flags);
+ nouveau_fence_update(chan);
+ spin_unlock_irqrestore(&chan->fence.lock, flags);
+ if (chan->fence.sequence != chan->fence.sequence_ack) {
+ struct nouveau_fence *fence = NULL;
+
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (ret == 0) {
+ ret = nouveau_fence_wait(fence, NULL, false, false);
+ nouveau_fence_unref((void *)&fence);
+ }
+
+ if (ret)
+ NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+ }
+
+ /* Ensure all outstanding fences are signaled. They should be if the
+ * above attempts at idling were OK, but if we failed this'll tell TTM
+ * we're done with the buffers.
+ */
+ nouveau_fence_fini(chan);
+
+ /* Ensure the channel is no longer active on the GPU */
+ pfifo->reassign(dev, false);
+
+ if (pgraph->channel(dev) == chan) {
+ pgraph->fifo_access(dev, false);
+ pgraph->unload_context(dev);
+ pgraph->fifo_access(dev, true);
+ }
+ pgraph->destroy_context(chan);
+
+ if (pfifo->channel_id(dev) == chan->id) {
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pfifo->enable(dev);
+ }
+ pfifo->destroy_context(chan);
+
+ pfifo->reassign(dev, true);
+
+ /* Release the channel's resources */
+ nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+ if (chan->pushbuf_bo) {
+ nouveau_bo_unpin(chan->pushbuf_bo);
+ nouveau_bo_ref(NULL, &chan->pushbuf_bo);
+ }
+ nouveau_gpuobj_channel_takedown(chan);
+ nouveau_notifier_takedown_channel(chan);
+ if (chan->user)
+ iounmap(chan->user);
+
+ dev_priv->fifos[chan->id] = NULL;
+ dev_priv->fifo_alloc_count--;
+ kfree(chan);
+}
+
+/* cleans up all the fifos from file_priv */
+void
+nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ int i;
+
+ NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
+ for (i = 0; i < engine->fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (chan && chan->file_priv == file_priv)
+ nouveau_channel_free(chan);
+ }
+}
+
+int
+nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
+ int channel)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+
+ if (channel >= engine->fifo.channels)
+ return 0;
+ if (dev_priv->fifos[channel] == NULL)
+ return 0;
+
+ return (dev_priv->fifos[channel]->file_priv == file_priv);
+}
+
+/***********************************
+ * ioctls wrapping the functions
+ ***********************************/
+
+static int
+nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_channel_alloc *init = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (dev_priv->engine.graph.accel_blocked)
+ return -ENODEV;
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return -EINVAL;
+
+ ret = nouveau_channel_alloc(dev, &chan, file_priv,
+ init->fb_ctxdma_handle,
+ init->tt_ctxdma_handle);
+ if (ret)
+ return ret;
+ init->channel = chan->id;
+
+ init->subchan[0].handle = NvM2MF;
+ if (dev_priv->card_type < NV_50)
+ init->subchan[0].grclass = 0x0039;
+ else
+ init->subchan[0].grclass = 0x5039;
+ init->nr_subchan = 1;
+
+ /* Named memory object area */
+ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
+ &init->notifier_handle);
+ if (ret) {
+ nouveau_channel_free(chan);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_channel_free *cfree = data;
+ struct nouveau_channel *chan;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+
+ nouveau_channel_free(chan);
+ return 0;
+}
+
+/***********************************
+ * finally, the ioctl table
+ ***********************************/
+
+struct drm_ioctl_desc nouveau_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
+};
+
+int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
new file mode 100644
index 00000000000..5a10deb8bdb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_connector.h"
+#include "nouveau_hw.h"
+
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct nouveau_encoder *enc)
+{
+ return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
+}
+
+static struct nouveau_encoder *
+find_encoder_by_type(struct drm_connector *connector, int type)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_mode_object *obj;
+ int i, id;
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ id = connector->encoder_ids[i];
+ if (!id)
+ break;
+
+ obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+ nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+
+ if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
+ return nv_encoder;
+ }
+
+ return NULL;
+}
+
+struct nouveau_connector *
+nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
+{
+ struct drm_device *dev = to_drm_encoder(encoder)->dev;
+ struct drm_connector *drm_connector;
+
+ list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
+ if (drm_connector->encoder == to_drm_encoder(encoder))
+ return nouveau_connector(drm_connector);
+ }
+
+ return NULL;
+}
+
+
+static void
+nouveau_connector_destroy(struct drm_connector *drm_connector)
+{
+ struct nouveau_connector *connector = nouveau_connector(drm_connector);
+ struct drm_device *dev = connector->base.dev;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ if (!connector)
+ return;
+
+ drm_sysfs_connector_remove(drm_connector);
+ drm_connector_cleanup(drm_connector);
+ kfree(drm_connector);
+}
+
+static void
+nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ return;
+
+ *flags = 0;
+ if (NVLockVgaCrtcs(dev_priv->dev, false))
+ *flags |= 1;
+ if (nv_heads_tied(dev_priv->dev))
+ *flags |= 2;
+
+ if (*flags & 2)
+ NVSetOwner(dev_priv->dev, 0); /* necessary? */
+}
+
+static void
+nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50)
+ return;
+
+ if (flags & 2)
+ NVSetOwner(dev_priv->dev, 4);
+ if (flags & 1)
+ NVLockVgaCrtcs(dev_priv->dev, true);
+}
+
+static struct nouveau_i2c_chan *
+nouveau_connector_ddc_detect(struct drm_connector *connector,
+ struct nouveau_encoder **pnv_encoder)
+{
+ struct drm_device *dev = connector->dev;
+ uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
+ int ret, flags, i;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ struct nouveau_i2c_chan *i2c = NULL;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_mode_object *obj;
+ int id;
+
+ id = connector->encoder_ids[i];
+ if (!id)
+ break;
+
+ obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ continue;
+ nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+
+ if (nv_encoder->dcb->i2c_index < 0xf)
+ i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!i2c)
+ continue;
+
+ nouveau_connector_ddc_prepare(connector, &flags);
+ ret = i2c_transfer(&i2c->adapter, msgs, 2);
+ nouveau_connector_ddc_finish(connector, flags);
+
+ if (ret == 2) {
+ *pnv_encoder = nv_encoder;
+ return i2c;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+nouveau_connector_set_encoder(struct drm_connector *connector,
+ struct nouveau_encoder *nv_encoder)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct drm_device *dev = connector->dev;
+
+ if (nv_connector->detected_encoder == nv_encoder)
+ return;
+ nv_connector->detected_encoder = nv_encoder;
+
+ if (nv_encoder->dcb->type == OUTPUT_LVDS ||
+ nv_encoder->dcb->type == OUTPUT_TMDS) {
+ connector->doublescan_allowed = false;
+ connector->interlace_allowed = false;
+ } else {
+ connector->doublescan_allowed = true;
+ if (dev_priv->card_type == NV_20 ||
+ (dev_priv->card_type == NV_10 &&
+ (dev->pci_device & 0x0ff0) != 0x0100 &&
+ (dev->pci_device & 0x0ff0) != 0x0150))
+ /* HW is broken */
+ connector->interlace_allowed = false;
+ else
+ connector->interlace_allowed = true;
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ drm_connector_property_set_value(connector,
+ dev->mode_config.dvi_i_subconnector_property,
+ nv_encoder->dcb->type == OUTPUT_TMDS ?
+ DRM_MODE_SUBCONNECTOR_DVID :
+ DRM_MODE_SUBCONNECTOR_DVIA);
+ }
+}
+
+static enum drm_connector_status
+nouveau_connector_detect(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = NULL;
+ struct nouveau_i2c_chan *i2c;
+ int type, flags;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+ if (nv_encoder && nv_connector->native_mode) {
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return connector_status_connected;
+ }
+
+ i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
+ if (i2c) {
+ nouveau_connector_ddc_prepare(connector, &flags);
+ nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+ nouveau_connector_ddc_finish(connector, flags);
+ drm_mode_connector_update_edid_property(connector,
+ nv_connector->edid);
+ if (!nv_connector->edid) {
+ NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
+ drm_get_connector_name(connector));
+ return connector_status_disconnected;
+ }
+
+ if (nv_encoder->dcb->type == OUTPUT_DP &&
+ !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
+ NV_ERROR(dev, "Detected %s, but failed init\n",
+ drm_get_connector_name(connector));
+ return connector_status_disconnected;
+ }
+
+ /* Override encoder type for DVI-I based on whether EDID
+ * says the display is digital or analog, both use the
+ * same i2c channel so the value returned from ddc_detect
+ * isn't necessarily correct.
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
+ type = OUTPUT_TMDS;
+ else
+ type = OUTPUT_ANALOG;
+
+ nv_encoder = find_encoder_by_type(connector, type);
+ if (!nv_encoder) {
+ NV_ERROR(dev, "Detected %d encoder on %s, "
+ "but no object!\n", type,
+ drm_get_connector_name(connector));
+ return connector_status_disconnected;
+ }
+ }
+
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return connector_status_connected;
+ }
+
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
+ if (!nv_encoder)
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
+ if (nv_encoder) {
+ struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+ struct drm_encoder_helper_funcs *helper =
+ encoder->helper_private;
+
+ if (helper->detect(encoder, connector) ==
+ connector_status_connected) {
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return connector_status_connected;
+ }
+
+ }
+
+ return connector_status_disconnected;
+}
+
+static void
+nouveau_connector_force(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_encoder *nv_encoder;
+ int type;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (connector->force == DRM_FORCE_ON_DIGITAL)
+ type = OUTPUT_TMDS;
+ else
+ type = OUTPUT_ANALOG;
+ } else
+ type = OUTPUT_ANY;
+
+ nv_encoder = find_encoder_by_type(connector, type);
+ if (!nv_encoder) {
+ NV_ERROR(dev, "can't find encoder to force %s on!\n",
+ drm_get_connector_name(connector));
+ connector->status = connector_status_disconnected;
+ return;
+ }
+
+ nouveau_connector_set_encoder(connector, nv_encoder);
+}
+
+static int
+nouveau_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t value)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct drm_device *dev = connector->dev;
+ int ret;
+
+ /* Scaling mode */
+ if (property == dev->mode_config.scaling_mode_property) {
+ struct nouveau_crtc *nv_crtc = NULL;
+ bool modeset = false;
+
+ switch (value) {
+ case DRM_MODE_SCALE_NONE:
+ case DRM_MODE_SCALE_FULLSCREEN:
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* LVDS always needs gpu scaling */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+ value == DRM_MODE_SCALE_NONE)
+ return -EINVAL;
+
+ /* Changing between GPU and panel scaling requires a full
+ * modeset
+ */
+ if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
+ (value == DRM_MODE_SCALE_NONE))
+ modeset = true;
+ nv_connector->scaling_mode = value;
+
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
+ if (!nv_crtc)
+ return 0;
+
+ if (modeset || !nv_crtc->set_scale) {
+ ret = drm_crtc_helper_set_mode(&nv_crtc->base,
+ &nv_crtc->base.mode,
+ nv_crtc->base.x,
+ nv_crtc->base.y, NULL);
+ if (!ret)
+ return -EINVAL;
+ } else {
+ ret = nv_crtc->set_scale(nv_crtc, value, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ /* Dithering */
+ if (property == dev->mode_config.dithering_mode_property) {
+ struct nouveau_crtc *nv_crtc = NULL;
+
+ if (value == DRM_MODE_DITHERING_ON)
+ nv_connector->use_dithering = true;
+ else
+ nv_connector->use_dithering = false;
+
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
+ if (!nv_crtc || !nv_crtc->set_dither)
+ return 0;
+
+ return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
+ true);
+ }
+
+ if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
+ return get_slave_funcs(nv_encoder)->
+ set_property(to_drm_encoder(nv_encoder), connector, property, value);
+
+ return -EINVAL;
+}
+
+static struct drm_display_mode *
+nouveau_connector_native_mode(struct nouveau_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_display_mode *mode, *largest = NULL;
+ int high_w = 0, high_h = 0, high_v = 0;
+
+ /* Use preferred mode if there is one.. */
+ list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ NV_DEBUG_KMS(dev, "native mode from preferred\n");
+ return drm_mode_duplicate(dev, mode);
+ }
+ }
+
+ /* Otherwise, take the resolution with the largest width, then height,
+ * then vertical refresh
+ */
+ list_for_each_entry(mode, &connector->base.probed_modes, head) {
+ if (mode->hdisplay < high_w)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay < high_h)
+ continue;
+
+ if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
+ mode->vrefresh < high_v)
+ continue;
+
+ high_w = mode->hdisplay;
+ high_h = mode->vdisplay;
+ high_v = mode->vrefresh;
+ largest = mode;
+ }
+
+ NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
+ high_w, high_h, high_v);
+ return largest ? drm_mode_duplicate(dev, largest) : NULL;
+}
+
+struct moderec {
+ int hdisplay;
+ int vdisplay;
+};
+
+static struct moderec scaler_modes[] = {
+ { 1920, 1200 },
+ { 1920, 1080 },
+ { 1680, 1050 },
+ { 1600, 1200 },
+ { 1400, 1050 },
+ { 1280, 1024 },
+ { 1280, 960 },
+ { 1152, 864 },
+ { 1024, 768 },
+ { 800, 600 },
+ { 720, 400 },
+ { 640, 480 },
+ { 640, 400 },
+ { 640, 350 },
+ {}
+};
+
+static int
+nouveau_connector_scaler_modes_add(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_display_mode *native = nv_connector->native_mode, *m;
+ struct drm_device *dev = connector->dev;
+ struct moderec *mode = &scaler_modes[0];
+ int modes = 0;
+
+ if (!native)
+ return 0;
+
+ while (mode->hdisplay) {
+ if (mode->hdisplay <= native->hdisplay &&
+ mode->vdisplay <= native->vdisplay) {
+ m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
+ drm_mode_vrefresh(native), false,
+ false, false);
+ if (!m)
+ continue;
+
+ m->type |= DRM_MODE_TYPE_DRIVER;
+
+ drm_mode_probed_add(connector, m);
+ modes++;
+ }
+
+ mode++;
+ }
+
+ return modes;
+}
+
+static int
+nouveau_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ int ret = 0;
+
+ /* If we're not LVDS, destroy the previous native mode, the attached
+ * monitor could have changed.
+ */
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+ nv_connector->native_mode) {
+ drm_mode_destroy(dev, nv_connector->native_mode);
+ nv_connector->native_mode = NULL;
+ }
+
+ if (nv_connector->edid)
+ ret = drm_add_edid_modes(connector, nv_connector->edid);
+
+ /* Find the native mode if this is a digital panel, if we didn't
+ * find any modes through DDC previously add the native mode to
+ * the list of modes.
+ */
+ if (!nv_connector->native_mode)
+ nv_connector->native_mode =
+ nouveau_connector_native_mode(nv_connector);
+ if (ret == 0 && nv_connector->native_mode) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(dev, nv_connector->native_mode);
+ drm_mode_probed_add(connector, mode);
+ ret = 1;
+ }
+
+ if (nv_encoder->dcb->type == OUTPUT_TV)
+ ret = get_slave_funcs(nv_encoder)->
+ get_modes(to_drm_encoder(nv_encoder), connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ ret += nouveau_connector_scaler_modes_add(connector);
+
+ return ret;
+}
+
+static int
+nouveau_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ unsigned min_clock = 25000, max_clock = min_clock;
+ unsigned clock = mode->clock;
+
+ switch (nv_encoder->dcb->type) {
+ case OUTPUT_LVDS:
+ BUG_ON(!nv_connector->native_mode);
+ if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
+ mode->vdisplay > nv_connector->native_mode->vdisplay)
+ return MODE_PANEL;
+
+ min_clock = 0;
+ max_clock = 400000;
+ break;
+ case OUTPUT_TMDS:
+ if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
+ (dev_priv->card_type < NV_50 &&
+ !nv_encoder->dcb->duallink_possible))
+ max_clock = 165000;
+ else
+ max_clock = 330000;
+ break;
+ case OUTPUT_ANALOG:
+ max_clock = nv_encoder->dcb->crtconf.maxfreq;
+ if (!max_clock)
+ max_clock = 350000;
+ break;
+ case OUTPUT_TV:
+ return get_slave_funcs(nv_encoder)->
+ mode_valid(to_drm_encoder(nv_encoder), mode);
+ case OUTPUT_DP:
+ if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
+ max_clock = nv_encoder->dp.link_nr * 270000;
+ else
+ max_clock = nv_encoder->dp.link_nr * 162000;
+
+ clock *= 3;
+ break;
+ }
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
+
+ if (clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+nouveau_connector_best_encoder(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+ if (nv_connector->detected_encoder)
+ return to_drm_encoder(nv_connector->detected_encoder);
+
+ return NULL;
+}
+
+static const struct drm_connector_helper_funcs
+nouveau_connector_helper_funcs = {
+ .get_modes = nouveau_connector_get_modes,
+ .mode_valid = nouveau_connector_mode_valid,
+ .best_encoder = nouveau_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs
+nouveau_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = NULL,
+ .restore = NULL,
+ .detect = nouveau_connector_detect,
+ .destroy = nouveau_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = nouveau_connector_set_property,
+ .force = nouveau_connector_force
+};
+
+static int
+nouveau_connector_create_lvds(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *i2c = NULL;
+ struct nouveau_encoder *nv_encoder;
+ struct drm_display_mode native, *mode, *temp;
+ bool dummy, if_is_24bit = false;
+ int ret, flags;
+
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+ if (!nv_encoder)
+ return -ENODEV;
+
+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
+ if (ret) {
+ NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
+ return ret;
+ }
+ nv_connector->use_dithering = !if_is_24bit;
+
+ /* Firstly try getting EDID over DDC, if allowed and I2C channel
+ * is available.
+ */
+ if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
+ i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+
+ if (i2c) {
+ nouveau_connector_ddc_prepare(connector, &flags);
+ nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
+ nouveau_connector_ddc_finish(connector, flags);
+ }
+
+ /* If no EDID found above, and the VBIOS indicates a hardcoded
+ * modeline is avalilable for the panel, set it as the panel's
+ * native mode and exit.
+ */
+ if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
+ (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
+ dev_priv->VBIOS.pub.fp_no_ddc)) {
+ nv_connector->native_mode = drm_mode_duplicate(dev, &native);
+ goto out;
+ }
+
+ /* Still nothing, some VBIOS images have a hardcoded EDID block
+ * stored for the panel stored in them.
+ */
+ if (!nv_connector->edid && !nv_connector->native_mode &&
+ !dev_priv->VBIOS.pub.fp_no_ddc) {
+ nv_connector->edid =
+ (struct edid *)nouveau_bios_embedded_edid(dev);
+ }
+
+ if (!nv_connector->edid)
+ goto out;
+
+ /* We didn't find/use a panel mode from the VBIOS, so parse the EDID
+ * block and look for the preferred mode there.
+ */
+ ret = drm_add_edid_modes(connector, nv_connector->edid);
+ if (ret == 0)
+ goto out;
+ nv_connector->detected_encoder = nv_encoder;
+ nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
+ list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
+ drm_mode_remove(connector, mode);
+
+out:
+ if (!nv_connector->native_mode) {
+ NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
+ "determine its native mode. Disabling.\n");
+ return -ENODEV;
+ }
+
+ drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+ return 0;
+}
+
+int
+nouveau_connector_create(struct drm_device *dev, int index, int type)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_connector *nv_connector = NULL;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+ if (!nv_connector)
+ return -ENOMEM;
+ nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
+ connector = &nv_connector->base;
+
+ switch (type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ NV_INFO(dev, "Detected a VGA connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ NV_INFO(dev, "Detected a DVI-D connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ NV_INFO(dev, "Detected a DVI-I connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ NV_INFO(dev, "Detected a LVDS connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_TV:
+ NV_INFO(dev, "Detected a TV connector\n");
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ NV_INFO(dev, "Detected a DisplayPort connector\n");
+ break;
+ default:
+ NV_ERROR(dev, "Unknown connector, this is not good.\n");
+ break;
+ }
+
+ /* defaults, will get overridden in detect() */
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
+ drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
+ /* Init DVI-I specific properties */
+ if (type == DRM_MODE_CONNECTOR_DVII) {
+ drm_mode_create_dvi_i_properties(dev);
+ drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+ drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
+ }
+
+ if (type != DRM_MODE_CONNECTOR_LVDS)
+ nv_connector->use_dithering = false;
+
+ if (type == DRM_MODE_CONNECTOR_DVID ||
+ type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_LVDS ||
+ type == DRM_MODE_CONNECTOR_DisplayPort) {
+ nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
+
+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
+ nv_connector->scaling_mode);
+ drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
+ nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
+ : DRM_MODE_DITHERING_OFF);
+
+ } else {
+ nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+
+ if (type == DRM_MODE_CONNECTOR_VGA &&
+ dev_priv->card_type >= NV_50) {
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ nv_connector->scaling_mode);
+ }
+ }
+
+ /* attach encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->connector != index)
+ continue;
+
+ if (get_slave_funcs(nv_encoder))
+ get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ }
+
+ drm_sysfs_connector_add(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ ret = nouveau_connector_create_lvds(dev, connector);
+ if (ret) {
+ connector->funcs->destroy(connector);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
new file mode 100644
index 00000000000..728b8090e5f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_CONNECTOR_H__
+#define __NOUVEAU_CONNECTOR_H__
+
+#include "drm_edid.h"
+#include "nouveau_i2c.h"
+
+struct nouveau_connector {
+ struct drm_connector base;
+
+ struct dcb_connector_table_entry *dcb;
+
+ int scaling_mode;
+ bool use_dithering;
+
+ struct nouveau_encoder *detected_encoder;
+ struct edid *edid;
+ struct drm_display_mode *native_mode;
+};
+
+static inline struct nouveau_connector *nouveau_connector(
+ struct drm_connector *con)
+{
+ return container_of(con, struct nouveau_connector, base);
+}
+
+int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
+
+#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
new file mode 100644
index 00000000000..49fa7b2d257
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_CRTC_H__
+#define __NOUVEAU_CRTC_H__
+
+struct nouveau_crtc {
+ struct drm_crtc base;
+
+ int index;
+
+ struct drm_display_mode *mode;
+
+ uint32_t dpms_saved_fp_control;
+ uint32_t fp_users;
+ int saturation;
+ int sharpness;
+ int last_dpms;
+
+ struct {
+ int cpp;
+ bool blanked;
+ uint32_t offset;
+ uint32_t tile_flags;
+ } fb;
+
+ struct {
+ struct nouveau_bo *nvbo;
+ bool visible;
+ uint32_t offset;
+ void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
+ void (*set_pos)(struct nouveau_crtc *, int x, int y);
+ void (*hide)(struct nouveau_crtc *, bool update);
+ void (*show)(struct nouveau_crtc *, bool update);
+ } cursor;
+
+ struct {
+ struct nouveau_bo *nvbo;
+ uint16_t r[256];
+ uint16_t g[256];
+ uint16_t b[256];
+ int depth;
+ } lut;
+
+ int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
+ int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
+};
+
+static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct nouveau_crtc, base);
+}
+
+static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
+{
+ return &crtc->base;
+}
+
+int nv50_crtc_create(struct drm_device *dev, int index);
+int nv50_cursor_init(struct nouveau_crtc *);
+void nv50_cursor_fini(struct nouveau_crtc *);
+int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
+ uint32_t buffer_handle, uint32_t width,
+ uint32_t height);
+int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
+
+int nv04_cursor_init(struct nouveau_crtc *);
+
+struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
+
+#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 00000000000..d79db3698f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+static int
+nouveau_debugfs_channel_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct nouveau_channel *chan = node->info_ent->data;
+
+ seq_printf(m, "channel id : %d\n", chan->id);
+
+ seq_printf(m, "cpu fifo state:\n");
+ seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
+ seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
+ seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
+ seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
+ seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
+
+ seq_printf(m, "gpu fifo state:\n");
+ seq_printf(m, " get: 0x%08x\n",
+ nvchan_rd32(chan, chan->user_get));
+ seq_printf(m, " put: 0x%08x\n",
+ nvchan_rd32(chan, chan->user_put));
+
+ seq_printf(m, "last fence : %d\n", chan->fence.sequence);
+ seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
+ return 0;
+}
+
+int
+nouveau_debugfs_channel_init(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_minor *minor = chan->dev->primary;
+ int ret;
+
+ if (!dev_priv->debugfs.channel_root) {
+ dev_priv->debugfs.channel_root =
+ debugfs_create_dir("channel", minor->debugfs_root);
+ if (!dev_priv->debugfs.channel_root)
+ return -ENOENT;
+ }
+
+ snprintf(chan->debugfs.name, 32, "%d", chan->id);
+ chan->debugfs.info.name = chan->debugfs.name;
+ chan->debugfs.info.show = nouveau_debugfs_channel_info;
+ chan->debugfs.info.driver_features = 0;
+ chan->debugfs.info.data = chan;
+
+ ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
+ dev_priv->debugfs.channel_root,
+ chan->dev->primary);
+ if (ret == 0)
+ chan->debugfs.active = true;
+ return ret;
+}
+
+void
+nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+
+ if (!chan->debugfs.active)
+ return;
+
+ drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
+ chan->debugfs.active = false;
+
+ if (chan == dev_priv->channel) {
+ debugfs_remove(dev_priv->debugfs.channel_root);
+ dev_priv->debugfs.channel_root = NULL;
+ }
+}
+
+static int
+nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t ppci_0;
+
+ ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
+
+ seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
+ seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
+ ppci_0 & 0xffff, ppci_0 >> 16);
+ return 0;
+}
+
+static int
+nouveau_debugfs_memory_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+
+ seq_printf(m, "VRAM total: %dKiB\n",
+ (int)(nouveau_mem_fb_amount(dev) >> 10));
+ return 0;
+}
+
+static struct drm_info_list nouveau_debugfs_list[] = {
+ { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
+ { "memory", nouveau_debugfs_memory_info, 0, NULL },
+};
+#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
+
+int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ return 0;
+}
+
+void
+nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
+ minor);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
new file mode 100644
index 00000000000..dfc94391d71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+
+static void
+nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
+{
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct drm_device *dev = drm_fb->dev;
+
+ if (drm_fb->fbdev)
+ nouveau_fbcon_remove(dev, drm_fb);
+
+ if (fb->nvbo) {
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(fb->nvbo->gem);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ drm_framebuffer_cleanup(drm_fb);
+ kfree(fb);
+}
+
+static int
+nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+
+ return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+}
+
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+ .destroy = nouveau_user_framebuffer_destroy,
+ .create_handle = nouveau_user_framebuffer_create_handle,
+};
+
+struct drm_framebuffer *
+nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct nouveau_framebuffer *fb;
+ int ret;
+
+ fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
+ if (!fb)
+ return NULL;
+
+ ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ if (ret) {
+ kfree(fb);
+ return NULL;
+ }
+
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+
+ fb->nvbo = nvbo;
+ return &fb->base;
+}
+
+static struct drm_framebuffer *
+nouveau_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ if (!gem)
+ return NULL;
+
+ fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
+ if (!fb) {
+ drm_gem_object_unreference(gem);
+ return NULL;
+ }
+
+ return fb;
+}
+
+const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+ .fb_create = nouveau_user_framebuffer_create,
+ .fb_changed = nouveau_fbcon_probe,
+};
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
new file mode 100644
index 00000000000..703553687b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+int
+nouveau_dma_init(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *m2mf = NULL;
+ int ret, i;
+
+ /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+ ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
+ 0x0039 : 0x5039, &m2mf);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
+ if (ret)
+ return ret;
+
+ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+ if (ret)
+ return ret;
+
+ /* Map push buffer */
+ ret = nouveau_bo_map(chan->pushbuf_bo);
+ if (ret)
+ return ret;
+
+ /* Map M2MF notifier object - fbcon. */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = nouveau_bo_map(chan->notifier_bo);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialise DMA vars */
+ chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+ chan->dma.put = 0;
+ chan->dma.cur = chan->dma.put;
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+
+ /* Insert NOPS for NOUVEAU_DMA_SKIPS */
+ ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(chan, 0);
+
+ /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
+ OUT_RING(chan, NvM2MF);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
+ OUT_RING(chan, NvNotify0);
+
+ /* Sit back and pray the channel works.. */
+ FIRE_RING(chan);
+
+ return 0;
+}
+
+void
+OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
+{
+ bool is_iomem;
+ u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
+ mem = &mem[chan->dma.cur];
+ if (is_iomem)
+ memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
+ else
+ memcpy(mem, data, nr_dwords * 4);
+ chan->dma.cur += nr_dwords;
+}
+
+static inline bool
+READ_GET(struct nouveau_channel *chan, uint32_t *get)
+{
+ uint32_t val;
+
+ val = nvchan_rd32(chan, chan->user_get);
+ if (val < chan->pushbuf_base ||
+ val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
+ /* meaningless to dma_wait() except to know whether the
+ * GPU has stalled or not
+ */
+ *get = val;
+ return false;
+ }
+
+ *get = (val - chan->pushbuf_base) >> 2;
+ return true;
+}
+
+int
+nouveau_dma_wait(struct nouveau_channel *chan, int size)
+{
+ uint32_t get, prev_get = 0, cnt = 0;
+ bool get_valid;
+
+ while (chan->dma.free < size) {
+ /* reset counter as long as GET is still advancing, this is
+ * to avoid misdetecting a GPU lockup if the GPU happens to
+ * just be processing an operation that takes a long time
+ */
+ get_valid = READ_GET(chan, &get);
+ if (get != prev_get) {
+ prev_get = get;
+ cnt = 0;
+ }
+
+ if ((++cnt & 0xff) == 0) {
+ DRM_UDELAY(1);
+ if (cnt > 100000)
+ return -EBUSY;
+ }
+
+ /* loop until we have a usable GET pointer. the value
+ * we read from the GPU may be outside the main ring if
+ * PFIFO is processing a buffer called from the main ring,
+ * discard these values until something sensible is seen.
+ *
+ * the other case we discard GET is while the GPU is fetching
+ * from the SKIPS area, so the code below doesn't have to deal
+ * with some fun corner cases.
+ */
+ if (!get_valid || get < NOUVEAU_DMA_SKIPS)
+ continue;
+
+ if (get <= chan->dma.cur) {
+ /* engine is fetching behind us, or is completely
+ * idle (GET == PUT) so we have free space up until
+ * the end of the push buffer
+ *
+ * we can only hit that path once per call due to
+ * looping back to the beginning of the push buffer,
+ * we'll hit the fetching-ahead-of-us path from that
+ * point on.
+ *
+ * the *one* exception to that rule is if we read
+ * GET==PUT, in which case the below conditional will
+ * always succeed and break us out of the wait loop.
+ */
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+ if (chan->dma.free >= size)
+ break;
+
+ /* not enough space left at the end of the push buffer,
+ * instruct the GPU to jump back to the start right
+ * after processing the currently pending commands.
+ */
+ OUT_RING(chan, chan->pushbuf_base | 0x20000000);
+ WRITE_PUT(NOUVEAU_DMA_SKIPS);
+
+ /* we're now submitting commands at the start of
+ * the push buffer.
+ */
+ chan->dma.cur =
+ chan->dma.put = NOUVEAU_DMA_SKIPS;
+ }
+
+ /* engine fetching ahead of us, we have space up until the
+ * current GET pointer. the "- 1" is to ensure there's
+ * space left to emit a jump back to the beginning of the
+ * push buffer if we require it. we can never get GET == PUT
+ * here, so this is safe.
+ */
+ chan->dma.free = get - chan->dma.cur - 1;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
new file mode 100644
index 00000000000..04e85d8f757
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_DMA_H__
+#define __NOUVEAU_DMA_H__
+
+#ifndef NOUVEAU_DMA_DEBUG
+#define NOUVEAU_DMA_DEBUG 0
+#endif
+
+/*
+ * There's a hw race condition where you can't jump to your PUT offset,
+ * to avoid this we jump to offset + SKIPS and fill the difference with
+ * NOPs.
+ *
+ * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
+ * a SKIPS value of 8. Lets assume that the race condition is to do
+ * with writing into the fetch area, we configure a fetch size of 128
+ * bytes so we need a larger SKIPS value.
+ */
+#define NOUVEAU_DMA_SKIPS (128 / 4)
+
+/* Hardcoded object assignments to subchannels (subchannel id). */
+enum {
+ NvSubM2MF = 0,
+ NvSub2D = 1,
+ NvSubCtxSurf2D = 1,
+ NvSubGdiRect = 2,
+ NvSubImageBlit = 3
+};
+
+/* Object handles. */
+enum {
+ NvM2MF = 0x80000001,
+ NvDmaFB = 0x80000002,
+ NvDmaTT = 0x80000003,
+ NvDmaVRAM = 0x80000004,
+ NvDmaGART = 0x80000005,
+ NvNotify0 = 0x80000006,
+ Nv2D = 0x80000007,
+ NvCtxSurf2D = 0x80000008,
+ NvRop = 0x80000009,
+ NvImagePatt = 0x8000000a,
+ NvClipRect = 0x8000000b,
+ NvGdiRect = 0x8000000c,
+ NvImageBlit = 0x8000000d,
+
+ /* G80+ display objects */
+ NvEvoVRAM = 0x01000000,
+ NvEvoFB16 = 0x01000001,
+ NvEvoFB32 = 0x01000002
+};
+
+#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
+#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
+#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180
+#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184
+#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
+
+#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
+
+static __must_check inline int
+RING_SPACE(struct nouveau_channel *chan, int size)
+{
+ if (chan->dma.free < size) {
+ int ret;
+
+ ret = nouveau_dma_wait(chan, size);
+ if (ret)
+ return ret;
+ }
+
+ chan->dma.free -= size;
+ return 0;
+}
+
+static inline void
+OUT_RING(struct nouveau_channel *chan, int data)
+{
+ if (NOUVEAU_DMA_DEBUG) {
+ NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
+ chan->id, chan->dma.cur << 2, data);
+ }
+
+ nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
+}
+
+extern void
+OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
+
+static inline void
+BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
+{
+ OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
+}
+
+#define WRITE_PUT(val) do { \
+ DRM_MEMORYBARRIER(); \
+ nouveau_bo_rd32(chan->pushbuf_bo, 0); \
+ nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \
+} while (0)
+
+static inline void
+FIRE_RING(struct nouveau_channel *chan)
+{
+ if (NOUVEAU_DMA_DEBUG) {
+ NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
+ chan->id, chan->dma.cur << 2);
+ }
+
+ if (chan->dma.cur == chan->dma.put)
+ return;
+ chan->accel_done = true;
+
+ WRITE_PUT(chan->dma.cur);
+ chan->dma.put = chan->dma.cur;
+}
+
+static inline void
+WIND_RING(struct nouveau_channel *chan)
+{
+ chan->dma.cur = chan->dma.put;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
new file mode 100644
index 00000000000..9e2926c4857
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_encoder.h"
+
+static int
+auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_i2c_chan *auxch;
+ int ret;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return -ENODEV;
+
+ ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
+ return ret;
+}
+
+static int
+nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
+ NV50_SOR_DP_CTRL_LANE_MASK);
+ tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
+ if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+
+ return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ int reg = 0x614300 + (nv_encoder->or * 0x800);
+
+ tmp = nv_rd32(dev, reg);
+ tmp &= 0xfff3ffff;
+ if (cmd == DP_LINK_BW_2_7)
+ tmp |= 0x00040000;
+ nv_wr32(dev, reg, tmp);
+
+ return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t tmp;
+ uint8_t cmd;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+ int ret;
+
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
+ tmp |= (pattern << 24);
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+
+ ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
+ if (ret)
+ return ret;
+ cmd &= ~DP_TRAINING_PATTERN_MASK;
+ cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
+ return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
+}
+
+static int
+nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int i, dpe_headerlen, max_vs = 0;
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ for (i = 0; i < dpe_headerlen; i++, dpse++) {
+ if (dpse->vs_level > max_vs)
+ max_vs = dpse->vs_level;
+ }
+
+ return max_vs;
+}
+
+static int
+nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int i, dpe_headerlen, max_pre = 0;
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ for (i = 0; i < dpe_headerlen; i++, dpse++) {
+ if (dpse->vs_level != vs)
+ continue;
+
+ if (dpse->pre_level > max_pre)
+ max_pre = dpse->pre_level;
+ }
+
+ return max_pre;
+}
+
+static bool
+nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int ret, i, dpe_headerlen, vs = 0, pre = 0;
+ uint8_t request[2];
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
+ if (ret)
+ return false;
+
+ NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
+
+ /* Keep all lanes at the same level.. */
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
+ int lane_vs = lane_req & 3;
+ int lane_pre = (lane_req >> 2) & 3;
+
+ if (lane_vs > vs)
+ vs = lane_vs;
+ if (lane_pre > pre)
+ pre = lane_pre;
+ }
+
+ if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
+ vs = nouveau_dp_max_voltage_swing(encoder);
+ vs |= 4;
+ }
+
+ if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
+ pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
+ pre |= 4;
+ }
+
+ /* Update the configuration for all lanes.. */
+ for (i = 0; i < nv_encoder->dp.link_nr; i++)
+ config[i] = (pre << 3) | vs;
+
+ return true;
+}
+
+static bool
+nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct bit_displayport_encoder_table_entry *dpse;
+ struct bit_displayport_encoder_table *dpe;
+ int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
+ int dpe_headerlen, ret, i;
+
+ NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ config[0], config[1], config[2], config[3]);
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe)
+ return false;
+ dpse = (void *)((char *)dpe + dpe_headerlen);
+
+ for (i = 0; i < dpe->record_nr; i++, dpse++) {
+ if (dpse->vs_level == (config[0] & 3) &&
+ dpse->pre_level == ((config[0] >> 3) & 3))
+ break;
+ }
+ BUG_ON(i == dpe->record_nr);
+
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ const int shift[4] = { 16, 8, 0, 24 };
+ uint32_t mask = 0xff << shift[i];
+ uint32_t reg0, reg1, reg2;
+
+ reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
+ reg0 |= (dpse->reg0 << shift[i]);
+ reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
+ reg1 |= (dpse->reg1 << shift[i]);
+ reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
+ reg2 |= (dpse->reg2 << 8);
+ nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
+ nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
+ nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
+ }
+
+ ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+bool
+nouveau_dp_link_train(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint8_t config[4];
+ uint8_t status[3];
+ bool cr_done, cr_max_vs, eq_done;
+ int ret = 0, i, tries, voltage;
+
+ NV_DEBUG_KMS(dev, "link training!!\n");
+train:
+ cr_done = eq_done = false;
+
+ /* set link configuration */
+ NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
+ nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
+
+ ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
+ if (ret)
+ return false;
+
+ config[0] = nv_encoder->dp.link_nr;
+ if (nv_encoder->dp.dpcd_version >= 0x11)
+ config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ ret = nouveau_dp_lane_count_set(encoder, config[0]);
+ if (ret)
+ return false;
+
+ /* clock recovery */
+ NV_DEBUG_KMS(dev, "\tbegin cr\n");
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto stop;
+
+ tries = 0;
+ voltage = -1;
+ memset(config, 0x00, sizeof(config));
+ for (;;) {
+ if (!nouveau_dp_link_train_commit(encoder, config))
+ break;
+
+ udelay(100);
+
+ ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
+ if (ret)
+ break;
+ NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ status[0], status[1]);
+
+ cr_done = true;
+ cr_max_vs = false;
+ for (i = 0; i < nv_encoder->dp.link_nr; i++) {
+ int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
+
+ if (!(lane & DP_LANE_CR_DONE)) {
+ cr_done = false;
+ if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
+ cr_max_vs = true;
+ break;
+ }
+ }
+
+ if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
+ voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ tries = 0;
+ }
+
+ if (cr_done || cr_max_vs || (++tries == 5))
+ break;
+
+ if (!nouveau_dp_link_train_adjust(encoder, config))
+ break;
+ }
+
+ if (!cr_done)
+ goto stop;
+
+ /* channel equalisation */
+ NV_DEBUG_KMS(dev, "\tbegin eq\n");
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
+ if (ret)
+ goto stop;
+
+ for (tries = 0; tries <= 5; tries++) {
+ udelay(400);
+
+ ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
+ if (ret)
+ break;
+ NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
+ status[0], status[1]);
+
+ eq_done = true;
+ if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
+ eq_done = false;
+
+ for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
+ int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
+
+ if (!(lane & DP_LANE_CR_DONE)) {
+ cr_done = false;
+ break;
+ }
+
+ if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
+ !(lane & DP_LANE_SYMBOL_LOCKED)) {
+ eq_done = false;
+ break;
+ }
+ }
+
+ if (eq_done || !cr_done)
+ break;
+
+ if (!nouveau_dp_link_train_adjust(encoder, config) ||
+ !nouveau_dp_link_train_commit(encoder, config))
+ break;
+ }
+
+stop:
+ /* end link training */
+ ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
+ if (ret)
+ return false;
+
+ /* retry at a lower setting, if possible */
+ if (!ret && !(eq_done && cr_done)) {
+ NV_DEBUG_KMS(dev, "\twe failed\n");
+ if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
+ NV_DEBUG_KMS(dev, "retry link training at low rate\n");
+ nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
+ goto train;
+ }
+ }
+
+ return eq_done;
+}
+
+bool
+nouveau_dp_detect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ uint8_t dpcd[4];
+ int ret;
+
+ ret = auxch_rd(encoder, 0x0000, dpcd, 4);
+ if (ret)
+ return false;
+
+ NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
+ "display: link_bw %d, link_nr %d version 0x%02x\n",
+ nv_encoder->dcb->dpconf.link_bw,
+ nv_encoder->dcb->dpconf.link_nr,
+ dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
+
+ nv_encoder->dp.dpcd_version = dpcd[0];
+
+ nv_encoder->dp.link_bw = dpcd[1];
+ if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
+ !nv_encoder->dcb->dpconf.link_bw)
+ nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
+
+ nv_encoder->dp.link_nr = dpcd[2] & 0xf;
+ if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
+ nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
+
+ return true;
+}
+
+int
+nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+ uint8_t *data, int data_nr)
+{
+ struct drm_device *dev = auxch->dev;
+ uint32_t tmp, ctrl, stat = 0, data32[4] = {};
+ int ret = 0, i, index = auxch->rd;
+
+ NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
+
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ if (!(tmp & 0x01000000)) {
+ NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
+ ret = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < 3; i++) {
+ tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
+ if (tmp & NV50_AUXCH_STAT_STATE_READY)
+ break;
+ udelay(100);
+ }
+
+ if (i == 3) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!(cmd & 1)) {
+ memcpy(data32, data, data_nr);
+ for (i = 0; i < 4; i++) {
+ NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
+ nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
+ }
+ }
+
+ nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
+ ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index));
+ ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
+ ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
+ ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
+
+ for (;;) {
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
+ nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
+ if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
+ NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
+ nv_rd32(dev, NV50_AUXCH_CTRL(index)));
+ return -EBUSY;
+ }
+
+ udelay(400);
+
+ stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
+ if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
+ NV50_AUXCH_STAT_REPLY_AUX_DEFER)
+ break;
+ }
+
+ if (cmd & 1) {
+ for (i = 0; i < 4; i++) {
+ data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
+ NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
+ }
+ memcpy(data, data32, data_nr);
+ }
+
+out:
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
+ tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
+ if (tmp & 0x01000000) {
+ NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
+ ret = -EIO;
+ }
+
+ udelay(400);
+
+ return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
+}
+
+int
+nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
+ struct drm_device *dev = auxch->dev;
+ int ret = 0, cmd, addr = algo_data->address;
+ uint8_t *buf;
+
+ if (mode == MODE_I2C_READ) {
+ cmd = AUX_I2C_READ;
+ buf = read_byte;
+ } else {
+ cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
+ buf = &write_byte;
+ }
+
+ if (!(mode & MODE_I2C_STOP))
+ cmd |= AUX_I2C_MOT;
+
+ if (mode & MODE_I2C_START)
+ return 1;
+
+ for (;;) {
+ ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
+ case NV50_AUXCH_STAT_REPLY_I2C_ACK:
+ return 1;
+ case NV50_AUXCH_STAT_REPLY_I2C_NACK:
+ return -EREMOTEIO;
+ case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
+ udelay(100);
+ break;
+ default:
+ NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
+ return -EREMOTEIO;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
new file mode 100644
index 00000000000..06eb993e088
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+#include "nv50_display.h"
+
+#include "drm_pciids.h"
+
+MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
+int nouveau_ctxfw = 0;
+module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable AGP");
+int nouveau_noagp;
+module_param_named(noagp, nouveau_noagp, int, 0400);
+
+MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
+static int nouveau_modeset = -1; /* kms */
+module_param_named(modeset, nouveau_modeset, int, 0400);
+
+MODULE_PARM_DESC(vbios, "Override default VBIOS location");
+char *nouveau_vbios;
+module_param_named(vbios, nouveau_vbios, charp, 0400);
+
+MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
+int nouveau_vram_pushbuf;
+module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+
+MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
+int nouveau_vram_notify;
+module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
+
+MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
+int nouveau_duallink = 1;
+module_param_named(duallink, nouveau_duallink, int, 0400);
+
+MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
+int nouveau_uscript_lvds = -1;
+module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
+
+MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
+int nouveau_uscript_tmds = -1;
+module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
+
+MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
+ "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
+ "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
+ "\t\tDefault: PAL\n"
+ "\t\t*NOTE* Ignored for cards with external TV encoders.");
+char *nouveau_tv_norm;
+module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
+
+MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
+ "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
+ "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
+ "\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
+int nouveau_reg_debug;
+module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
+
+int nouveau_fbpercrtc;
+#if 0
+module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
+#endif
+
+static struct pci_device_id pciidlist[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+ .class = PCI_BASE_CLASS_DISPLAY << 16,
+ .class_mask = 0xff << 16,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
+ .class = PCI_BASE_CLASS_DISPLAY << 16,
+ .class_mask = 0xff << 16,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static struct drm_driver driver;
+
+static int __devinit
+nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_dev(pdev, ent, &driver);
+}
+
+static void
+nouveau_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static int
+nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan;
+ struct drm_crtc *crtc;
+ uint32_t fbdev_flags;
+ int ret, i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ if (pm_state.event == PM_EVENT_PRETHAW)
+ return 0;
+
+ fbdev_flags = dev_priv->fbdev_info->flags;
+ dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_framebuffer *nouveau_fb;
+
+ nouveau_fb = nouveau_framebuffer(crtc->fb);
+ if (!nouveau_fb || !nouveau_fb->nvbo)
+ continue;
+
+ nouveau_bo_unpin(nouveau_fb->nvbo);
+ }
+
+ NV_INFO(dev, "Evicting buffers...\n");
+ ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+
+ NV_INFO(dev, "Idling channels...\n");
+ for (i = 0; i < pfifo->channels; i++) {
+ struct nouveau_fence *fence = NULL;
+
+ chan = dev_priv->fifos[i];
+ if (!chan || (dev_priv->card_type >= NV_50 &&
+ chan == dev_priv->fifos[0]))
+ continue;
+
+ ret = nouveau_fence_new(chan, &fence, true);
+ if (ret == 0) {
+ ret = nouveau_fence_wait(fence, NULL, false, false);
+ nouveau_fence_unref((void *)&fence);
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
+ chan->id);
+ }
+ }
+
+ pgraph->fifo_access(dev, false);
+ nouveau_wait_for_idle(dev);
+ pfifo->reassign(dev, false);
+ pfifo->disable(dev);
+ pfifo->unload_context(dev);
+ pgraph->unload_context(dev);
+
+ NV_INFO(dev, "Suspending GPU objects...\n");
+ ret = nouveau_gpuobj_suspend(dev);
+ if (ret) {
+ NV_ERROR(dev, "... failed: %d\n", ret);
+ goto out_abort;
+ }
+
+ ret = pinstmem->suspend(dev);
+ if (ret) {
+ NV_ERROR(dev, "... failed: %d\n", ret);
+ nouveau_gpuobj_suspend_cleanup(dev);
+ goto out_abort;
+ }
+
+ NV_INFO(dev, "And we're gone!\n");
+ pci_save_state(pdev);
+ if (pm_state.event == PM_EVENT_SUSPEND) {
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ acquire_console_sem();
+ fb_set_suspend(dev_priv->fbdev_info, 1);
+ release_console_sem();
+ dev_priv->fbdev_info->flags = fbdev_flags;
+ return 0;
+
+out_abort:
+ NV_INFO(dev, "Re-enabling acceleration..\n");
+ pfifo->enable(dev);
+ pfifo->reassign(dev, true);
+ pgraph->fifo_access(dev, true);
+ return ret;
+}
+
+static int
+nouveau_pci_resume(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ struct drm_crtc *crtc;
+ uint32_t fbdev_flags;
+ int ret, i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -ENODEV;
+
+ fbdev_flags = dev_priv->fbdev_info->flags;
+ dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+
+ NV_INFO(dev, "We're back, enabling device...\n");
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev))
+ return -1;
+ pci_set_master(dev->pdev);
+
+ NV_INFO(dev, "POSTing device...\n");
+ ret = nouveau_run_vbios_init(dev);
+ if (ret)
+ return ret;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ ret = nouveau_mem_init_agp(dev);
+ if (ret) {
+ NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
+ return ret;
+ }
+ }
+
+ NV_INFO(dev, "Reinitialising engines...\n");
+ engine->instmem.resume(dev);
+ engine->mc.init(dev);
+ engine->timer.init(dev);
+ engine->fb.init(dev);
+ engine->graph.init(dev);
+ engine->fifo.init(dev);
+
+ NV_INFO(dev, "Restoring GPU objects...\n");
+ nouveau_gpuobj_resume(dev);
+
+ nouveau_irq_postinstall(dev);
+
+ /* Re-write SKIPS, they'll have been lost over the suspend */
+ if (nouveau_vram_pushbuf) {
+ struct nouveau_channel *chan;
+ int j;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ chan = dev_priv->fifos[i];
+ if (!chan || !chan->pushbuf_bo)
+ continue;
+
+ for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
+ nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
+ }
+ }
+
+ NV_INFO(dev, "Restoring mode...\n");
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_framebuffer *nouveau_fb;
+
+ nouveau_fb = nouveau_framebuffer(crtc->fb);
+ if (!nouveau_fb || !nouveau_fb->nvbo)
+ continue;
+
+ nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ }
+
+ if (dev_priv->card_type < NV_50) {
+ nv04_display_restore(dev);
+ NVLockVgaCrtcs(dev, false);
+ } else
+ nv50_display_init(dev);
+
+ /* Force CLUT to get re-loaded during modeset */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->lut.depth = 0;
+ }
+
+ acquire_console_sem();
+ fb_set_suspend(dev_priv->fbdev_info, 0);
+ release_console_sem();
+
+ nouveau_fbcon_zfill(dev);
+
+ drm_helper_resume_force_mode(dev);
+ dev_priv->fbdev_info->flags = fbdev_flags;
+ return 0;
+}
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ .load = nouveau_load,
+ .firstopen = nouveau_firstopen,
+ .lastclose = nouveau_lastclose,
+ .unload = nouveau_unload,
+ .preclose = nouveau_preclose,
+#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
+ .debugfs_init = nouveau_debugfs_init,
+ .debugfs_cleanup = nouveau_debugfs_takedown,
+#endif
+ .irq_preinstall = nouveau_irq_preinstall,
+ .irq_postinstall = nouveau_irq_postinstall,
+ .irq_uninstall = nouveau_irq_uninstall,
+ .irq_handler = nouveau_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = nouveau_ioctls,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = nouveau_ttm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = nouveau_compat_ioctl,
+#endif
+ },
+ .pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = nouveau_pci_probe,
+ .remove = nouveau_pci_remove,
+ .suspend = nouveau_pci_suspend,
+ .resume = nouveau_pci_resume
+ },
+
+ .gem_init_object = nouveau_gem_object_new,
+ .gem_free_object = nouveau_gem_object_del,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+#ifdef GIT_REVISION
+ .date = GIT_REVISION,
+#else
+ .date = DRIVER_DATE,
+#endif
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init nouveau_init(void)
+{
+ driver.num_ioctls = nouveau_max_ioctl;
+
+ if (nouveau_modeset == -1) {
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force())
+ nouveau_modeset = 0;
+ else
+#endif
+ nouveau_modeset = 1;
+ }
+
+ if (nouveau_modeset == 1)
+ driver.driver_features |= DRIVER_MODESET;
+
+ return drm_init(&driver);
+}
+
+static void __exit nouveau_exit(void)
+{
+ drm_exit(&driver);
+}
+
+module_init(nouveau_init);
+module_exit(nouveau_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
new file mode 100644
index 00000000000..5f8cbb79c49
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -0,0 +1,1303 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRV_H__
+#define __NOUVEAU_DRV_H__
+
+#define DRIVER_AUTHOR "Stephane Marchesin"
+#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net"
+
+#define DRIVER_NAME "nouveau"
+#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
+#define DRIVER_DATE "20090420"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 15
+
+#define NOUVEAU_FAMILY 0x0000FFFF
+#define NOUVEAU_FLAGS 0xFFFF0000
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+struct nouveau_fpriv {
+ struct ttm_object_file *tfile;
+};
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+#include "nouveau_bios.h"
+struct nouveau_grctx;
+
+#define MAX_NUM_DCB_ENTRIES 16
+
+#define NOUVEAU_MAX_CHANNEL_NR 128
+
+#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
+#define NV50_VM_BLOCK (512*1024*1024ULL)
+#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+
+struct nouveau_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ u32 placements[3];
+ struct ttm_bo_kmap_obj kmap;
+ struct list_head head;
+
+ /* protected by ttm_bo_reserve() */
+ struct drm_file *reserved_by;
+ struct list_head entry;
+ int pbbo_index;
+
+ struct nouveau_channel *channel;
+
+ bool mappable;
+ bool no_vm;
+
+ uint32_t tile_mode;
+ uint32_t tile_flags;
+
+ struct drm_gem_object *gem;
+ struct drm_file *cpu_filp;
+ int pin_refcnt;
+};
+
+static inline struct nouveau_bo *
+nouveau_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct nouveau_bo, bo);
+}
+
+static inline struct nouveau_bo *
+nouveau_gem_object(struct drm_gem_object *gem)
+{
+ return gem ? gem->driver_private : NULL;
+}
+
+/* TODO: submit equivalent to TTM generic API upstream? */
+static inline void __iomem *
+nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
+{
+ bool is_iomem;
+ void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
+ &nvbo->kmap, &is_iomem);
+ WARN_ON_ONCE(ioptr && !is_iomem);
+ return ioptr;
+}
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ uint64_t start;
+ uint64_t size;
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+enum nouveau_flags {
+ NV_NFORCE = 0x10000000,
+ NV_NFORCE2 = 0x20000000
+};
+
+#define NVOBJ_ENGINE_SW 0
+#define NVOBJ_ENGINE_GR 1
+#define NVOBJ_ENGINE_DISPLAY 2
+#define NVOBJ_ENGINE_INT 0xdeadbeef
+
+#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
+#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
+#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
+#define NVOBJ_FLAG_FAKE (1 << 3)
+struct nouveau_gpuobj {
+ struct list_head list;
+
+ struct nouveau_channel *im_channel;
+ struct mem_block *im_pramin;
+ struct nouveau_bo *im_backing;
+ uint32_t im_backing_start;
+ uint32_t *im_backing_suspend;
+ int im_bound;
+
+ uint32_t flags;
+ int refcount;
+
+ uint32_t engine;
+ uint32_t class;
+
+ void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
+ void *priv;
+};
+
+struct nouveau_gpuobj_ref {
+ struct list_head list;
+
+ struct nouveau_gpuobj *gpuobj;
+ uint32_t instance;
+
+ struct nouveau_channel *channel;
+ int handle;
+};
+
+struct nouveau_channel {
+ struct drm_device *dev;
+ int id;
+
+ /* owner of this fifo */
+ struct drm_file *file_priv;
+ /* mapping of the fifo itself */
+ struct drm_local_map *map;
+
+ /* mapping of the regs controling the fifo */
+ void __iomem *user;
+ uint32_t user_get;
+ uint32_t user_put;
+
+ /* Fencing */
+ struct {
+ /* lock protects the pending list only */
+ spinlock_t lock;
+ struct list_head pending;
+ uint32_t sequence;
+ uint32_t sequence_ack;
+ uint32_t last_sequence_irq;
+ } fence;
+
+ /* DMA push buffer */
+ struct nouveau_gpuobj_ref *pushbuf;
+ struct nouveau_bo *pushbuf_bo;
+ uint32_t pushbuf_base;
+
+ /* Notifier memory */
+ struct nouveau_bo *notifier_bo;
+ struct mem_block *notifier_heap;
+
+ /* PFIFO context */
+ struct nouveau_gpuobj_ref *ramfc;
+ struct nouveau_gpuobj_ref *cache;
+
+ /* PGRAPH context */
+ /* XXX may be merge 2 pointers as private data ??? */
+ struct nouveau_gpuobj_ref *ramin_grctx;
+ void *pgraph_ctx;
+
+ /* NV50 VM */
+ struct nouveau_gpuobj *vm_pd;
+ struct nouveau_gpuobj_ref *vm_gart_pt;
+ struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
+
+ /* Objects */
+ struct nouveau_gpuobj_ref *ramin; /* Private instmem */
+ struct mem_block *ramin_heap; /* Private PRAMIN heap */
+ struct nouveau_gpuobj_ref *ramht; /* Hash table */
+ struct list_head ramht_refs; /* Objects referenced by RAMHT */
+
+ /* GPU object info for stuff used in-kernel (mm_enabled) */
+ uint32_t m2mf_ntfy;
+ uint32_t vram_handle;
+ uint32_t gart_handle;
+ bool accel_done;
+
+ /* Push buffer state (only for drm's channel on !mm_enabled) */
+ struct {
+ int max;
+ int free;
+ int cur;
+ int put;
+ /* access via pushbuf_bo */
+ } dma;
+
+ uint32_t sw_subchannel[8];
+
+ struct {
+ struct nouveau_gpuobj *vblsem;
+ uint32_t vblsem_offset;
+ uint32_t vblsem_rval;
+ struct list_head vbl_wait;
+ } nvsw;
+
+ struct {
+ bool active;
+ char name[32];
+ struct drm_info_list info;
+ } debugfs;
+};
+
+struct nouveau_instmem_engine {
+ void *priv;
+
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+ int (*suspend)(struct drm_device *dev);
+ void (*resume)(struct drm_device *dev);
+
+ int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+ void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+ void (*prepare_access)(struct drm_device *, bool write);
+ void (*finish_access)(struct drm_device *);
+};
+
+struct nouveau_mc_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_timer_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+ uint64_t (*read)(struct drm_device *dev);
+};
+
+struct nouveau_fb_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_fifo_engine {
+ void *priv;
+
+ int channels;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ void (*disable)(struct drm_device *);
+ void (*enable)(struct drm_device *);
+ bool (*reassign)(struct drm_device *, bool enable);
+
+ int (*channel_id)(struct drm_device *);
+
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ int (*load_context)(struct nouveau_channel *);
+ int (*unload_context)(struct drm_device *);
+};
+
+struct nouveau_pgraph_object_method {
+ int id;
+ int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
+ uint32_t data);
+};
+
+struct nouveau_pgraph_object_class {
+ int id;
+ bool software;
+ struct nouveau_pgraph_object_method *methods;
+};
+
+struct nouveau_pgraph_engine {
+ struct nouveau_pgraph_object_class *grclass;
+ bool accel_blocked;
+ void *ctxprog;
+ void *ctxvals;
+ int grctx_size;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ void (*fifo_access)(struct drm_device *, bool);
+
+ struct nouveau_channel *(*channel)(struct drm_device *);
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ int (*load_context)(struct nouveau_channel *);
+ int (*unload_context)(struct drm_device *);
+};
+
+struct nouveau_engine {
+ struct nouveau_instmem_engine instmem;
+ struct nouveau_mc_engine mc;
+ struct nouveau_timer_engine timer;
+ struct nouveau_fb_engine fb;
+ struct nouveau_pgraph_engine graph;
+ struct nouveau_fifo_engine fifo;
+};
+
+struct nouveau_pll_vals {
+ union {
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t N1, M1, N2, M2;
+#else
+ uint8_t M1, N1, M2, N2;
+#endif
+ };
+ struct {
+ uint16_t NM1, NM2;
+ } __attribute__((packed));
+ };
+ int log2P;
+
+ int refclk;
+};
+
+enum nv04_fp_display_regs {
+ FP_DISPLAY_END,
+ FP_TOTAL,
+ FP_CRTC,
+ FP_SYNC_START,
+ FP_SYNC_END,
+ FP_VALID_START,
+ FP_VALID_END
+};
+
+struct nv04_crtc_reg {
+ unsigned char MiscOutReg; /* */
+ uint8_t CRTC[0x9f];
+ uint8_t CR58[0x10];
+ uint8_t Sequencer[5];
+ uint8_t Graphics[9];
+ uint8_t Attribute[21];
+ unsigned char DAC[768]; /* Internal Colorlookuptable */
+
+ /* PCRTC regs */
+ uint32_t fb_start;
+ uint32_t crtc_cfg;
+ uint32_t cursor_cfg;
+ uint32_t gpio_ext;
+ uint32_t crtc_830;
+ uint32_t crtc_834;
+ uint32_t crtc_850;
+ uint32_t crtc_eng_ctrl;
+
+ /* PRAMDAC regs */
+ uint32_t nv10_cursync;
+ struct nouveau_pll_vals pllvals;
+ uint32_t ramdac_gen_ctrl;
+ uint32_t ramdac_630;
+ uint32_t ramdac_634;
+ uint32_t tv_setup;
+ uint32_t tv_vtotal;
+ uint32_t tv_vskew;
+ uint32_t tv_vsync_delay;
+ uint32_t tv_htotal;
+ uint32_t tv_hskew;
+ uint32_t tv_hsync_delay;
+ uint32_t tv_hsync_delay2;
+ uint32_t fp_horiz_regs[7];
+ uint32_t fp_vert_regs[7];
+ uint32_t dither;
+ uint32_t fp_control;
+ uint32_t dither_regs[6];
+ uint32_t fp_debug_0;
+ uint32_t fp_debug_1;
+ uint32_t fp_debug_2;
+ uint32_t fp_margin_color;
+ uint32_t ramdac_8c0;
+ uint32_t ramdac_a20;
+ uint32_t ramdac_a24;
+ uint32_t ramdac_a34;
+ uint32_t ctv_regs[38];
+};
+
+struct nv04_output_reg {
+ uint32_t output;
+ int head;
+};
+
+struct nv04_mode_state {
+ uint32_t bpp;
+ uint32_t width;
+ uint32_t height;
+ uint32_t interlace;
+ uint32_t repaint0;
+ uint32_t repaint1;
+ uint32_t screen;
+ uint32_t scale;
+ uint32_t dither;
+ uint32_t extra;
+ uint32_t fifo;
+ uint32_t pixel;
+ uint32_t horiz;
+ int arbitration0;
+ int arbitration1;
+ uint32_t pll;
+ uint32_t pllB;
+ uint32_t vpll;
+ uint32_t vpll2;
+ uint32_t vpllB;
+ uint32_t vpll2B;
+ uint32_t pllsel;
+ uint32_t sel_clk;
+ uint32_t general;
+ uint32_t crtcOwner;
+ uint32_t head;
+ uint32_t head2;
+ uint32_t cursorConfig;
+ uint32_t cursor0;
+ uint32_t cursor1;
+ uint32_t cursor2;
+ uint32_t timingH;
+ uint32_t timingV;
+ uint32_t displayV;
+ uint32_t crtcSync;
+
+ struct nv04_crtc_reg crtc_reg[2];
+};
+
+enum nouveau_card_type {
+ NV_04 = 0x00,
+ NV_10 = 0x10,
+ NV_20 = 0x20,
+ NV_30 = 0x30,
+ NV_40 = 0x40,
+ NV_50 = 0x50,
+};
+
+struct drm_nouveau_private {
+ struct drm_device *dev;
+ enum {
+ NOUVEAU_CARD_INIT_DOWN,
+ NOUVEAU_CARD_INIT_DONE,
+ NOUVEAU_CARD_INIT_FAILED
+ } init_state;
+
+ /* the card type, takes NV_* as values */
+ enum nouveau_card_type card_type;
+ /* exact chipset, derived from NV_PMC_BOOT_0 */
+ int chipset;
+ int flags;
+
+ void __iomem *mmio;
+ void __iomem *ramin;
+ uint32_t ramin_size;
+
+ struct workqueue_struct *wq;
+ struct work_struct irq_work;
+
+ struct list_head vbl_waiting;
+
+ struct {
+ struct ttm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ spinlock_t bo_list_lock;
+ struct list_head bo_list;
+ atomic_t validate_sequence;
+ } ttm;
+
+ struct fb_info *fbdev_info;
+
+ int fifo_alloc_count;
+ struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+
+ struct nouveau_engine engine;
+ struct nouveau_channel *channel;
+
+ /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
+ struct nouveau_gpuobj *ramht;
+ uint32_t ramin_rsvd_vram;
+ uint32_t ramht_offset;
+ uint32_t ramht_size;
+ uint32_t ramht_bits;
+ uint32_t ramfc_offset;
+ uint32_t ramfc_size;
+ uint32_t ramro_offset;
+ uint32_t ramro_size;
+
+ /* base physical adresses */
+ uint64_t fb_phys;
+ uint64_t fb_available_size;
+ uint64_t fb_mappable_pages;
+ uint64_t fb_aper_free;
+
+ struct {
+ enum {
+ NOUVEAU_GART_NONE = 0,
+ NOUVEAU_GART_AGP,
+ NOUVEAU_GART_SGDMA
+ } type;
+ uint64_t aper_base;
+ uint64_t aper_size;
+ uint64_t aper_free;
+
+ struct nouveau_gpuobj *sg_ctxdma;
+ struct page *sg_dummy_page;
+ dma_addr_t sg_dummy_bus;
+
+ /* nottm hack */
+ struct drm_ttm_backend *sg_be;
+ unsigned long sg_handle;
+ } gart_info;
+
+ /* G8x/G9x virtual address space */
+ uint64_t vm_gart_base;
+ uint64_t vm_gart_size;
+ uint64_t vm_vram_base;
+ uint64_t vm_vram_size;
+ uint64_t vm_end;
+ struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
+ int vm_vram_pt_nr;
+
+ /* the mtrr covering the FB */
+ int fb_mtrr;
+
+ struct mem_block *ramin_heap;
+
+ /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
+ uint32_t ctx_table_size;
+ struct nouveau_gpuobj_ref *ctx_table;
+
+ struct list_head gpuobj_list;
+
+ struct nvbios VBIOS;
+ struct nouveau_bios_info *vbios;
+
+ struct nv04_mode_state mode_reg;
+ struct nv04_mode_state saved_reg;
+ uint32_t saved_vga_font[4][16384];
+ uint32_t crtc_owner;
+ uint32_t dac_users[4];
+
+ struct nouveau_suspend_resume {
+ uint32_t fifo_mode;
+ uint32_t graph_ctx_control;
+ uint32_t graph_state;
+ uint32_t *ramin_copy;
+ uint64_t ramin_size;
+ } susres;
+
+ struct backlight_device *backlight;
+ bool acpi_dsm;
+
+ struct nouveau_channel *evo;
+
+ struct {
+ struct dentry *channel_root;
+ } debugfs;
+};
+
+static inline struct drm_nouveau_private *
+nouveau_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct drm_nouveau_private, ttm.bdev);
+}
+
+static inline int
+nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *prev;
+
+ if (!pnvbo)
+ return -EINVAL;
+ prev = *pnvbo;
+
+ *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
+ if (prev) {
+ struct ttm_buffer_object *bo = &prev->bo;
+
+ ttm_bo_unref(&bo);
+ }
+
+ return 0;
+}
+
+#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
+ struct drm_nouveau_private *nv = dev->dev_private; \
+ if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
+ NV_ERROR(dev, "called without init\n"); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
+ struct drm_nouveau_private *nv = dev->dev_private; \
+ if (!nouveau_channel_owner(dev, (cl), (id))) { \
+ NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
+ DRM_CURRENTPID, (id)); \
+ return -EPERM; \
+ } \
+ (ch) = nv->fifos[(id)]; \
+} while (0)
+
+/* nouveau_drv.c */
+extern int nouveau_noagp;
+extern int nouveau_duallink;
+extern int nouveau_uscript_lvds;
+extern int nouveau_uscript_tmds;
+extern int nouveau_vram_pushbuf;
+extern int nouveau_vram_notify;
+extern int nouveau_fbpercrtc;
+extern char *nouveau_tv_norm;
+extern int nouveau_reg_debug;
+extern char *nouveau_vbios;
+extern int nouveau_ctxfw;
+
+/* nouveau_state.c */
+extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+extern int nouveau_load(struct drm_device *, unsigned long flags);
+extern int nouveau_firstopen(struct drm_device *);
+extern void nouveau_lastclose(struct drm_device *);
+extern int nouveau_unload(struct drm_device *);
+extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
+ struct drm_file *);
+extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_for_idle(struct drm_device *);
+extern int nouveau_card_init(struct drm_device *);
+extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_resume(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_mem.c */
+extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
+ uint64_t size);
+extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
+ uint64_t size, int align2,
+ struct drm_file *, int tail);
+extern void nouveau_mem_takedown(struct mem_block **heap);
+extern void nouveau_mem_free_block(struct mem_block *);
+extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
+extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
+extern int nouveau_mem_init(struct drm_device *);
+extern int nouveau_mem_init_agp(struct drm_device *);
+extern void nouveau_mem_close(struct drm_device *);
+extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
+ uint32_t size, uint32_t flags,
+ uint64_t phys);
+extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
+ uint32_t size);
+
+/* nouveau_notifier.c */
+extern int nouveau_notifier_init_channel(struct nouveau_channel *);
+extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
+extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
+ int cout, uint32_t *offset);
+extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
+extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_channel.c */
+extern struct drm_ioctl_desc nouveau_ioctls[];
+extern int nouveau_max_ioctl;
+extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
+extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
+ int channel);
+extern int nouveau_channel_alloc(struct drm_device *dev,
+ struct nouveau_channel **chan,
+ struct drm_file *file_priv,
+ uint32_t fb_ctxdma, uint32_t tt_ctxdma);
+extern void nouveau_channel_free(struct nouveau_channel *);
+extern int nouveau_channel_idle(struct nouveau_channel *chan);
+
+/* nouveau_object.c */
+extern int nouveau_gpuobj_early_init(struct drm_device *);
+extern int nouveau_gpuobj_init(struct drm_device *);
+extern void nouveau_gpuobj_takedown(struct drm_device *);
+extern void nouveau_gpuobj_late_takedown(struct drm_device *);
+extern int nouveau_gpuobj_suspend(struct drm_device *dev);
+extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
+extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
+ uint32_t vram_h, uint32_t tt_h);
+extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
+extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
+ uint32_t size, int align, uint32_t flags,
+ struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
+ uint32_t handle, struct nouveau_gpuobj *,
+ struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_del(struct drm_device *,
+ struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
+ struct nouveau_gpuobj_ref **ref_ret);
+extern int nouveau_gpuobj_new_ref(struct drm_device *,
+ struct nouveau_channel *alloc_chan,
+ struct nouveau_channel *ref_chan,
+ uint32_t handle, uint32_t size, int align,
+ uint32_t flags, struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *,
+ uint32_t p_offset, uint32_t b_offset,
+ uint32_t size, uint32_t flags,
+ struct nouveau_gpuobj **,
+ struct nouveau_gpuobj_ref**);
+extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
+ uint64_t offset, uint64_t size, int access,
+ int target, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
+ uint64_t offset, uint64_t size,
+ int access, struct nouveau_gpuobj **,
+ uint32_t *o_ret);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
+ struct nouveau_gpuobj **);
+extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_irq.c */
+extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void nouveau_irq_preinstall(struct drm_device *);
+extern int nouveau_irq_postinstall(struct drm_device *);
+extern void nouveau_irq_uninstall(struct drm_device *);
+
+/* nouveau_sgdma.c */
+extern int nouveau_sgdma_init(struct drm_device *);
+extern void nouveau_sgdma_takedown(struct drm_device *);
+extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
+ uint32_t *page);
+extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+
+/* nouveau_debugfs.c */
+#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
+extern int nouveau_debugfs_init(struct drm_minor *);
+extern void nouveau_debugfs_takedown(struct drm_minor *);
+extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
+extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
+#else
+static inline int
+nouveau_debugfs_init(struct drm_minor *minor)
+{
+ return 0;
+}
+
+static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
+{
+}
+
+static inline int
+nouveau_debugfs_channel_init(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+static inline void
+nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
+{
+}
+#endif
+
+/* nouveau_dma.c */
+extern int nouveau_dma_init(struct nouveau_channel *);
+extern int nouveau_dma_wait(struct nouveau_channel *, int size);
+
+/* nouveau_acpi.c */
+#ifdef CONFIG_ACPI
+extern int nouveau_hybrid_setup(struct drm_device *dev);
+extern bool nouveau_dsm_probe(struct drm_device *dev);
+#else
+static inline int nouveau_hybrid_setup(struct drm_device *dev)
+{
+ return 0;
+}
+static inline bool nouveau_dsm_probe(struct drm_device *dev)
+{
+ return false;
+}
+#endif
+
+/* nouveau_backlight.c */
+#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+extern int nouveau_backlight_init(struct drm_device *);
+extern void nouveau_backlight_exit(struct drm_device *);
+#else
+static inline int nouveau_backlight_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void nouveau_backlight_exit(struct drm_device *dev) { }
+#endif
+
+/* nouveau_bios.c */
+extern int nouveau_bios_init(struct drm_device *);
+extern void nouveau_bios_takedown(struct drm_device *dev);
+extern int nouveau_run_vbios_init(struct drm_device *);
+extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
+ struct dcb_entry *);
+extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
+ enum dcb_gpio_tag);
+extern struct dcb_connector_table_entry *
+nouveau_bios_connector_entry(struct drm_device *, int index);
+extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
+ struct pll_lims *);
+extern int nouveau_bios_run_display_table(struct drm_device *,
+ struct dcb_entry *,
+ uint32_t script, int pxclk);
+extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
+ int *length);
+extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
+extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
+extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
+ bool *dl, bool *if_is_24bit);
+extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
+ int head, int pxclk);
+extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
+ enum LVDS_script, int pxclk);
+
+/* nouveau_ttm.c */
+int nouveau_ttm_global_init(struct drm_nouveau_private *);
+void nouveau_ttm_global_release(struct drm_nouveau_private *);
+int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+
+/* nouveau_dp.c */
+int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+ uint8_t *data, int data_nr);
+bool nouveau_dp_detect(struct drm_encoder *);
+bool nouveau_dp_link_train(struct drm_encoder *);
+
+/* nv04_fb.c */
+extern int nv04_fb_init(struct drm_device *);
+extern void nv04_fb_takedown(struct drm_device *);
+
+/* nv10_fb.c */
+extern int nv10_fb_init(struct drm_device *);
+extern void nv10_fb_takedown(struct drm_device *);
+
+/* nv40_fb.c */
+extern int nv40_fb_init(struct drm_device *);
+extern void nv40_fb_takedown(struct drm_device *);
+
+/* nv04_fifo.c */
+extern int nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_disable(struct drm_device *);
+extern void nv04_fifo_enable(struct drm_device *);
+extern bool nv04_fifo_reassign(struct drm_device *, bool);
+extern int nv04_fifo_channel_id(struct drm_device *);
+extern int nv04_fifo_create_context(struct nouveau_channel *);
+extern void nv04_fifo_destroy_context(struct nouveau_channel *);
+extern int nv04_fifo_load_context(struct nouveau_channel *);
+extern int nv04_fifo_unload_context(struct drm_device *);
+
+/* nv10_fifo.c */
+extern int nv10_fifo_init(struct drm_device *);
+extern int nv10_fifo_channel_id(struct drm_device *);
+extern int nv10_fifo_create_context(struct nouveau_channel *);
+extern void nv10_fifo_destroy_context(struct nouveau_channel *);
+extern int nv10_fifo_load_context(struct nouveau_channel *);
+extern int nv10_fifo_unload_context(struct drm_device *);
+
+/* nv40_fifo.c */
+extern int nv40_fifo_init(struct drm_device *);
+extern int nv40_fifo_create_context(struct nouveau_channel *);
+extern void nv40_fifo_destroy_context(struct nouveau_channel *);
+extern int nv40_fifo_load_context(struct nouveau_channel *);
+extern int nv40_fifo_unload_context(struct drm_device *);
+
+/* nv50_fifo.c */
+extern int nv50_fifo_init(struct drm_device *);
+extern void nv50_fifo_takedown(struct drm_device *);
+extern int nv50_fifo_channel_id(struct drm_device *);
+extern int nv50_fifo_create_context(struct nouveau_channel *);
+extern void nv50_fifo_destroy_context(struct nouveau_channel *);
+extern int nv50_fifo_load_context(struct nouveau_channel *);
+extern int nv50_fifo_unload_context(struct drm_device *);
+
+/* nv04_graph.c */
+extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
+extern int nv04_graph_init(struct drm_device *);
+extern void nv04_graph_takedown(struct drm_device *);
+extern void nv04_graph_fifo_access(struct drm_device *, bool);
+extern struct nouveau_channel *nv04_graph_channel(struct drm_device *);
+extern int nv04_graph_create_context(struct nouveau_channel *);
+extern void nv04_graph_destroy_context(struct nouveau_channel *);
+extern int nv04_graph_load_context(struct nouveau_channel *);
+extern int nv04_graph_unload_context(struct drm_device *);
+extern void nv04_graph_context_switch(struct drm_device *);
+
+/* nv10_graph.c */
+extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
+extern int nv10_graph_init(struct drm_device *);
+extern void nv10_graph_takedown(struct drm_device *);
+extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
+extern int nv10_graph_create_context(struct nouveau_channel *);
+extern void nv10_graph_destroy_context(struct nouveau_channel *);
+extern int nv10_graph_load_context(struct nouveau_channel *);
+extern int nv10_graph_unload_context(struct drm_device *);
+extern void nv10_graph_context_switch(struct drm_device *);
+
+/* nv20_graph.c */
+extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
+extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
+extern int nv20_graph_create_context(struct nouveau_channel *);
+extern void nv20_graph_destroy_context(struct nouveau_channel *);
+extern int nv20_graph_load_context(struct nouveau_channel *);
+extern int nv20_graph_unload_context(struct drm_device *);
+extern int nv20_graph_init(struct drm_device *);
+extern void nv20_graph_takedown(struct drm_device *);
+extern int nv30_graph_init(struct drm_device *);
+
+/* nv40_graph.c */
+extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
+extern int nv40_graph_init(struct drm_device *);
+extern void nv40_graph_takedown(struct drm_device *);
+extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
+extern int nv40_graph_create_context(struct nouveau_channel *);
+extern void nv40_graph_destroy_context(struct nouveau_channel *);
+extern int nv40_graph_load_context(struct nouveau_channel *);
+extern int nv40_graph_unload_context(struct drm_device *);
+extern void nv40_grctx_init(struct nouveau_grctx *);
+
+/* nv50_graph.c */
+extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
+extern int nv50_graph_init(struct drm_device *);
+extern void nv50_graph_takedown(struct drm_device *);
+extern void nv50_graph_fifo_access(struct drm_device *, bool);
+extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
+extern int nv50_graph_create_context(struct nouveau_channel *);
+extern void nv50_graph_destroy_context(struct nouveau_channel *);
+extern int nv50_graph_load_context(struct nouveau_channel *);
+extern int nv50_graph_unload_context(struct drm_device *);
+extern void nv50_graph_context_switch(struct drm_device *);
+
+/* nouveau_grctx.c */
+extern int nouveau_grctx_prog_load(struct drm_device *);
+extern void nouveau_grctx_vals_load(struct drm_device *,
+ struct nouveau_gpuobj *);
+extern void nouveau_grctx_fini(struct drm_device *);
+
+/* nv04_instmem.c */
+extern int nv04_instmem_init(struct drm_device *);
+extern void nv04_instmem_takedown(struct drm_device *);
+extern int nv04_instmem_suspend(struct drm_device *);
+extern void nv04_instmem_resume(struct drm_device *);
+extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
+extern void nv04_instmem_finish_access(struct drm_device *);
+
+/* nv50_instmem.c */
+extern int nv50_instmem_init(struct drm_device *);
+extern void nv50_instmem_takedown(struct drm_device *);
+extern int nv50_instmem_suspend(struct drm_device *);
+extern void nv50_instmem_resume(struct drm_device *);
+extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
+extern void nv50_instmem_finish_access(struct drm_device *);
+
+/* nv04_mc.c */
+extern int nv04_mc_init(struct drm_device *);
+extern void nv04_mc_takedown(struct drm_device *);
+
+/* nv40_mc.c */
+extern int nv40_mc_init(struct drm_device *);
+extern void nv40_mc_takedown(struct drm_device *);
+
+/* nv50_mc.c */
+extern int nv50_mc_init(struct drm_device *);
+extern void nv50_mc_takedown(struct drm_device *);
+
+/* nv04_timer.c */
+extern int nv04_timer_init(struct drm_device *);
+extern uint64_t nv04_timer_read(struct drm_device *);
+extern void nv04_timer_takedown(struct drm_device *);
+
+extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+
+/* nv04_dac.c */
+extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+extern int nv04_dac_output_offset(struct drm_encoder *encoder);
+extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
+
+/* nv04_dfp.c */
+extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
+extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
+ int head, bool dl);
+extern void nv04_dfp_disable(struct drm_device *dev, int head);
+extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
+
+/* nv04_tv.c */
+extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
+extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+
+/* nv17_tv.c */
+extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ uint32_t pin_mask);
+
+/* nv04_display.c */
+extern int nv04_display_create(struct drm_device *);
+extern void nv04_display_destroy(struct drm_device *);
+extern void nv04_display_restore(struct drm_device *);
+
+/* nv04_crtc.c */
+extern int nv04_crtc_create(struct drm_device *, int index);
+
+/* nouveau_bo.c */
+extern struct ttm_bo_driver nouveau_bo_driver;
+extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
+ int size, int align, uint32_t flags,
+ uint32_t tile_mode, uint32_t tile_flags,
+ bool no_vm, bool mappable, struct nouveau_bo **);
+extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
+extern int nouveau_bo_unpin(struct nouveau_bo *);
+extern int nouveau_bo_map(struct nouveau_bo *);
+extern void nouveau_bo_unmap(struct nouveau_bo *);
+extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype);
+extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
+extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
+extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
+extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+
+/* nouveau_fence.c */
+struct nouveau_fence;
+extern int nouveau_fence_init(struct nouveau_channel *);
+extern void nouveau_fence_fini(struct nouveau_channel *);
+extern void nouveau_fence_update(struct nouveau_channel *);
+extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
+ bool emit);
+extern int nouveau_fence_emit(struct nouveau_fence *);
+struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
+extern bool nouveau_fence_signalled(void *obj, void *arg);
+extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int nouveau_fence_flush(void *obj, void *arg);
+extern void nouveau_fence_unref(void **obj);
+extern void *nouveau_fence_ref(void *obj);
+extern void nouveau_fence_handler(struct drm_device *dev, int channel);
+
+/* nouveau_gem.c */
+extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
+ int size, int align, uint32_t flags,
+ uint32_t tile_mode, uint32_t tile_flags,
+ bool no_vm, bool mappable, struct nouveau_bo **);
+extern int nouveau_gem_object_new(struct drm_gem_object *);
+extern void nouveau_gem_object_del(struct drm_gem_object *);
+extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
+ struct drm_file *);
+extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
+ struct drm_file *);
+
+/* nv17_gpio.c */
+int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+
+#ifndef ioread32_native
+#ifdef __BIG_ENDIAN
+#define ioread16_native ioread16be
+#define iowrite16_native iowrite16be
+#define ioread32_native ioread32be
+#define iowrite32_native iowrite32be
+#else /* def __BIG_ENDIAN */
+#define ioread16_native ioread16
+#define iowrite16_native iowrite16
+#define ioread32_native ioread32
+#define iowrite32_native iowrite32
+#endif /* def __BIG_ENDIAN else */
+#endif /* !ioread32_native */
+
+/* channel control reg access */
+static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
+{
+ return ioread32_native(chan->user + reg);
+}
+
+static inline void nvchan_wr32(struct nouveau_channel *chan,
+ unsigned reg, u32 val)
+{
+ iowrite32_native(val, chan->user + reg);
+}
+
+/* register access */
+static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return ioread32_native(dev_priv->mmio + reg);
+}
+
+static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ iowrite32_native(val, dev_priv->mmio + reg);
+}
+
+static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return ioread8(dev_priv->mmio + reg);
+}
+
+static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ iowrite8(val, dev_priv->mmio + reg);
+}
+
+#define nv_wait(reg, mask, val) \
+ nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+
+/* PRAMIN access */
+static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return ioread32_native(dev_priv->ramin + offset);
+}
+
+static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ iowrite32_native(val, dev_priv->ramin + offset);
+}
+
+/* object access */
+static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
+ unsigned index)
+{
+ return nv_ri32(dev, obj->im_pramin->start + index * 4);
+}
+
+static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
+ unsigned index, u32 val)
+{
+ nv_wi32(dev, obj->im_pramin->start + index * 4, val);
+}
+
+/*
+ * Logging
+ * Argument d is (struct drm_device *).
+ */
+#define NV_PRINTK(level, d, fmt, arg...) \
+ printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
+ pci_name(d->pdev), ##arg)
+#ifndef NV_DEBUG_NOTRACE
+#define NV_DEBUG(d, fmt, arg...) do { \
+ if (drm_debug & DRM_UT_DRIVER) { \
+ NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
+ __LINE__, ##arg); \
+ } \
+} while (0)
+#define NV_DEBUG_KMS(d, fmt, arg...) do { \
+ if (drm_debug & DRM_UT_KMS) { \
+ NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
+ __LINE__, ##arg); \
+ } \
+} while (0)
+#else
+#define NV_DEBUG(d, fmt, arg...) do { \
+ if (drm_debug & DRM_UT_DRIVER) \
+ NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
+} while (0)
+#define NV_DEBUG_KMS(d, fmt, arg...) do { \
+ if (drm_debug & DRM_UT_KMS) \
+ NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
+} while (0)
+#endif
+#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
+#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
+#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
+#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
+#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
+
+/* nouveau_reg_debug bitmask */
+enum {
+ NOUVEAU_REG_DEBUG_MC = 0x1,
+ NOUVEAU_REG_DEBUG_VIDEO = 0x2,
+ NOUVEAU_REG_DEBUG_FB = 0x4,
+ NOUVEAU_REG_DEBUG_EXTDEV = 0x8,
+ NOUVEAU_REG_DEBUG_CRTC = 0x10,
+ NOUVEAU_REG_DEBUG_RAMDAC = 0x20,
+ NOUVEAU_REG_DEBUG_VGACRTC = 0x40,
+ NOUVEAU_REG_DEBUG_RMVIO = 0x80,
+ NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
+ NOUVEAU_REG_DEBUG_EVO = 0x200,
+};
+
+#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
+ NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
+} while (0)
+
+static inline bool
+nv_two_heads(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const int impl = dev->pci_device & 0x0ff0;
+
+ if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
+ impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
+ return true;
+
+ return false;
+}
+
+static inline bool
+nv_gf4_disp_arch(struct drm_device *dev)
+{
+ return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+}
+
+static inline bool
+nv_two_reg_pll(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const int impl = dev->pci_device & 0x0ff0;
+
+ if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
+ return true;
+ return false;
+}
+
+#define NV50_NVSW 0x0000506e
+#define NV50_NVSW_DMA_SEMAPHORE 0x00000060
+#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064
+#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068
+#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c
+#define NV50_NVSW_DMA_VBLSEM 0x0000018c
+#define NV50_NVSW_VBLSEM_OFFSET 0x00000400
+#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404
+#define NV50_NVSW_VBLSEM_RELEASE 0x00000408
+
+#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
new file mode 100644
index 00000000000..bc4a24029ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_ENCODER_H__
+#define __NOUVEAU_ENCODER_H__
+
+#include "drm_encoder_slave.h"
+#include "nouveau_drv.h"
+
+#define NV_DPMS_CLEARED 0x80
+
+struct nouveau_encoder {
+ struct drm_encoder_slave base;
+
+ struct dcb_entry *dcb;
+ int or;
+
+ struct drm_display_mode mode;
+ int last_dpms;
+
+ struct nv04_output_reg restore;
+
+ void (*disconnect)(struct nouveau_encoder *encoder);
+
+ union {
+ struct {
+ int dpcd_version;
+ int link_nr;
+ int link_bw;
+ } dp;
+ };
+};
+
+static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
+{
+ struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+ return container_of(slave, struct nouveau_encoder, base);
+}
+
+static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
+{
+ return &enc->base.base;
+}
+
+struct nouveau_connector *
+nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
+int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+
+struct bit_displayport_encoder_table {
+ uint32_t match;
+ uint8_t record_nr;
+ uint8_t unknown;
+ uint16_t script0;
+ uint16_t script1;
+ uint16_t unknown_table;
+} __attribute__ ((packed));
+
+struct bit_displayport_encoder_table_entry {
+ uint8_t vs_level;
+ uint8_t pre_level;
+ uint8_t reg0;
+ uint8_t reg1;
+ uint8_t reg2;
+} __attribute__ ((packed));
+
+#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
new file mode 100644
index 00000000000..4a3f31aa194
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_FB_H__
+#define __NOUVEAU_FB_H__
+
+struct nouveau_framebuffer {
+ struct drm_framebuffer base;
+ struct nouveau_bo *nvbo;
+};
+
+static inline struct nouveau_framebuffer *
+nouveau_framebuffer(struct drm_framebuffer *fb)
+{
+ return container_of(fb, struct nouveau_framebuffer, base);
+}
+
+extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
+
+struct drm_framebuffer *
+nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
+ struct drm_mode_fb_cmd *);
+
+#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
new file mode 100644
index 00000000000..84af25c238b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright © 2007 David Airlie
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * David Airlie
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/screen_info.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_dma.h"
+
+static int
+nouveau_fbcon_sync(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int ret, i;
+
+ if (!chan || !chan->accel_done ||
+ info->state != FBINFO_STATE_RUNNING ||
+ info->flags & FBINFO_HWACCEL_DISABLED)
+ return 0;
+
+ if (RING_SPACE(chan, 4)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ return 0;
+ }
+
+ BEGIN_RING(chan, 0, 0x0104, 1);
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, 0, 0x0100, 1);
+ OUT_RING(chan, 0);
+ nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
+ FIRE_RING(chan);
+
+ ret = -EBUSY;
+ for (i = 0; i < 100000; i++) {
+ if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
+ ret = 0;
+ break;
+ }
+ DRM_UDELAY(1);
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ return 0;
+ }
+
+ chan->accel_done = false;
+ return 0;
+}
+
+static struct fb_ops nouveau_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->lut.r[regno] = red;
+ nv_crtc->lut.g[regno] = green;
+ nv_crtc->lut.b[regno] = blue;
+}
+
+static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ *red = nv_crtc->lut.r[regno];
+ *green = nv_crtc->lut.g[regno];
+ *blue = nv_crtc->lut.b[regno];
+}
+
+static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+ .gamma_set = nouveau_fbcon_gamma_set,
+ .gamma_get = nouveau_fbcon_gamma_get
+};
+
+#if defined(__i386__) || defined(__x86_64__)
+static bool
+nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int ramin;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
+ screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return false;
+
+ if (screen_info.lfb_base < pci_resource_start(pdev, 1))
+ goto not_fb;
+
+ if (screen_info.lfb_base + screen_info.lfb_size >=
+ pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
+ goto not_fb;
+
+ return true;
+not_fb:
+ ramin = 2;
+ if (pci_resource_len(pdev, ramin) == 0) {
+ ramin = 3;
+ if (pci_resource_len(pdev, ramin) == 0)
+ return false;
+ }
+
+ if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
+ return false;
+
+ if (screen_info.lfb_base + screen_info.lfb_size >=
+ pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
+ return false;
+
+ return true;
+}
+#endif
+
+void
+nouveau_fbcon_zfill(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct fb_info *info = dev_priv->fbdev_info;
+ struct fb_fillrect rect;
+
+ /* Clear the entire fbcon. The drm will program every connector
+ * with it's preferred mode. If the sizes differ, one display will
+ * quite likely have garbage around the console.
+ */
+ rect.dx = rect.dy = 0;
+ rect.width = info->var.xres_virtual;
+ rect.height = info->var.yres_virtual;
+ rect.color = 0;
+ rect.rop = ROP_COPY;
+ info->fbops->fb_fillrect(info, &rect);
+}
+
+static int
+nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
+ uint32_t fb_height, uint32_t surface_width,
+ uint32_t surface_height, uint32_t surface_depth,
+ uint32_t surface_bpp, struct drm_framebuffer **pfb)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct fb_info *info;
+ struct nouveau_fbcon_par *par;
+ struct drm_framebuffer *fb;
+ struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_bo *nvbo;
+ struct drm_mode_fb_cmd mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ int size, ret;
+
+ mode_cmd.width = surface_width;
+ mode_cmd.height = surface_height;
+
+ mode_cmd.bpp = surface_bpp;
+ mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
+ mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
+ mode_cmd.depth = surface_depth;
+
+ size = mode_cmd.pitch * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, false, true, &nvbo);
+ if (ret) {
+ NV_ERROR(dev, "failed to allocate framebuffer\n");
+ goto out;
+ }
+
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "failed to pin fb: %d\n", ret);
+ nouveau_bo_ref(NULL, &nvbo);
+ goto out;
+ }
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret) {
+ NV_ERROR(dev, "failed to map fb: %d\n", ret);
+ nouveau_bo_unpin(nvbo);
+ nouveau_bo_ref(NULL, &nvbo);
+ goto out;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
+ if (!fb) {
+ ret = -ENOMEM;
+ NV_ERROR(dev, "failed to allocate fb.\n");
+ goto out_unref;
+ }
+
+ list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
+
+ nouveau_fb = nouveau_framebuffer(fb);
+ *pfb = fb;
+
+ info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
+ par = info->par;
+ par->helper.funcs = &nouveau_fbcon_helper_funcs;
+ par->helper.dev = dev;
+ ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
+ if (ret)
+ goto out_unref;
+ dev_priv->fbdev_info = info;
+
+ strcpy(info->fix.id, "nouveaufb");
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+ info->fbops = &nouveau_fbcon_ops;
+ info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
+ dev_priv->vm_vram_base;
+ info->fix.smem_len = size;
+
+ info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
+ info->screen_size = size;
+
+ drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
+
+ /* FIXME: we really shouldn't expose mmio space at all */
+ info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
+ info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
+
+ /* Set aperture base/size for vesafb takeover */
+#if defined(__i386__) || defined(__x86_64__)
+ if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
+ /* Some NVIDIA VBIOS' are stupid and decide to put the
+ * framebuffer in the middle of the PRAMIN BAR for
+ * whatever reason. We need to know the exact lfb_base
+ * to get vesafb kicked off, and the only reliable way
+ * we have left is to find out lfb_base the same way
+ * vesafb did.
+ */
+ info->aperture_base = screen_info.lfb_base;
+ info->aperture_size = screen_info.lfb_size;
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
+ info->aperture_size *= 65536;
+ } else
+#endif
+ {
+ info->aperture_base = info->fix.mmio_start;
+ info->aperture_size = info->fix.mmio_len;
+ }
+
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+
+ fb->fbdev = info;
+
+ par->nouveau_fb = nouveau_fb;
+ par->dev = dev;
+
+ if (dev_priv->channel) {
+ switch (dev_priv->card_type) {
+ case NV_50:
+ nv50_fbcon_accel_init(info);
+ break;
+ default:
+ nv04_fbcon_accel_init(info);
+ break;
+ };
+ }
+
+ nouveau_fbcon_zfill(dev);
+
+ /* To allow resizeing without swapping buffers */
+ NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
+ nouveau_fb->base.width,
+ nouveau_fb->base.height,
+ nvbo->bo.offset, nvbo);
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+
+out_unref:
+ mutex_unlock(&dev->struct_mutex);
+out:
+ return ret;
+}
+
+int
+nouveau_fbcon_probe(struct drm_device *dev)
+{
+ NV_DEBUG_KMS(dev, "\n");
+
+ return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
+}
+
+int
+nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+{
+ struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
+ struct fb_info *info;
+
+ if (!fb)
+ return -EINVAL;
+
+ info = fb->fbdev;
+ if (info) {
+ struct nouveau_fbcon_par *par = info->par;
+
+ unregister_framebuffer(info);
+ nouveau_bo_unmap(nouveau_fb->nvbo);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(nouveau_fb->nvbo->gem);
+ nouveau_fb->nvbo = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ if (par)
+ drm_fb_helper_free(&par->helper);
+ framebuffer_release(info);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
new file mode 100644
index 00000000000..8531140fedb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_FBCON_H__
+#define __NOUVEAU_FBCON_H__
+
+#include "drm_fb_helper.h"
+
+struct nouveau_fbcon_par {
+ struct drm_fb_helper helper;
+ struct drm_device *dev;
+ struct nouveau_framebuffer *nouveau_fb;
+};
+
+int nouveau_fbcon_probe(struct drm_device *dev);
+int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+void nouveau_fbcon_restore(void);
+void nouveau_fbcon_zfill(struct drm_device *dev);
+
+int nv04_fbcon_accel_init(struct fb_info *info);
+int nv50_fbcon_accel_init(struct fb_info *info);
+
+#endif /* __NV50_FBCON_H__ */
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 00000000000..dacac9a0842
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+#define USE_REFCNT (dev_priv->card_type >= NV_10)
+
+struct nouveau_fence {
+ struct nouveau_channel *channel;
+ struct kref refcount;
+ struct list_head entry;
+
+ uint32_t sequence;
+ bool signalled;
+};
+
+static inline struct nouveau_fence *
+nouveau_fence(void *sync_obj)
+{
+ return (struct nouveau_fence *)sync_obj;
+}
+
+static void
+nouveau_fence_del(struct kref *ref)
+{
+ struct nouveau_fence *fence =
+ container_of(ref, struct nouveau_fence, refcount);
+
+ kfree(fence);
+}
+
+void
+nouveau_fence_update(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct list_head *entry, *tmp;
+ struct nouveau_fence *fence;
+ uint32_t sequence;
+
+ if (USE_REFCNT)
+ sequence = nvchan_rd32(chan, 0x48);
+ else
+ sequence = chan->fence.last_sequence_irq;
+
+ if (chan->fence.sequence_ack == sequence)
+ return;
+ chan->fence.sequence_ack = sequence;
+
+ list_for_each_safe(entry, tmp, &chan->fence.pending) {
+ fence = list_entry(entry, struct nouveau_fence, entry);
+
+ sequence = fence->sequence;
+ fence->signalled = true;
+ list_del(&fence->entry);
+ kref_put(&fence->refcount, nouveau_fence_del);
+
+ if (sequence == chan->fence.sequence_ack)
+ break;
+ }
+}
+
+int
+nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
+ bool emit)
+{
+ struct nouveau_fence *fence;
+ int ret = 0;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return -ENOMEM;
+ kref_init(&fence->refcount);
+ fence->channel = chan;
+
+ if (emit)
+ ret = nouveau_fence_emit(fence);
+
+ if (ret)
+ nouveau_fence_unref((void *)&fence);
+ *pfence = fence;
+ return ret;
+}
+
+struct nouveau_channel *
+nouveau_fence_channel(struct nouveau_fence *fence)
+{
+ return fence ? fence->channel : NULL;
+}
+
+int
+nouveau_fence_emit(struct nouveau_fence *fence)
+{
+ struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
+ struct nouveau_channel *chan = fence->channel;
+ unsigned long flags;
+ int ret;
+
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
+ spin_lock_irqsave(&chan->fence.lock, flags);
+ nouveau_fence_update(chan);
+ spin_unlock_irqrestore(&chan->fence.lock, flags);
+
+ BUG_ON(chan->fence.sequence ==
+ chan->fence.sequence_ack - 1);
+ }
+
+ fence->sequence = ++chan->fence.sequence;
+
+ kref_get(&fence->refcount);
+ spin_lock_irqsave(&chan->fence.lock, flags);
+ list_add_tail(&fence->entry, &chan->fence.pending);
+ spin_unlock_irqrestore(&chan->fence.lock, flags);
+
+ BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
+ OUT_RING(chan, fence->sequence);
+ FIRE_RING(chan);
+
+ return 0;
+}
+
+void
+nouveau_fence_unref(void **sync_obj)
+{
+ struct nouveau_fence *fence = nouveau_fence(*sync_obj);
+
+ if (fence)
+ kref_put(&fence->refcount, nouveau_fence_del);
+ *sync_obj = NULL;
+}
+
+void *
+nouveau_fence_ref(void *sync_obj)
+{
+ struct nouveau_fence *fence = nouveau_fence(sync_obj);
+
+ kref_get(&fence->refcount);
+ return sync_obj;
+}
+
+bool
+nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+{
+ struct nouveau_fence *fence = nouveau_fence(sync_obj);
+ struct nouveau_channel *chan = fence->channel;
+ unsigned long flags;
+
+ if (fence->signalled)
+ return true;
+
+ spin_lock_irqsave(&chan->fence.lock, flags);
+ nouveau_fence_update(chan);
+ spin_unlock_irqrestore(&chan->fence.lock, flags);
+ return fence->signalled;
+}
+
+int
+nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+{
+ unsigned long timeout = jiffies + (3 * DRM_HZ);
+ int ret = 0;
+
+ __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+
+ while (1) {
+ if (nouveau_fence_signalled(sync_obj, sync_arg))
+ break;
+
+ if (time_after_eq(jiffies, timeout)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ if (lazy)
+ schedule_timeout(1);
+
+ if (intr && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+ return ret;
+}
+
+int
+nouveau_fence_flush(void *sync_obj, void *sync_arg)
+{
+ return 0;
+}
+
+void
+nouveau_fence_handler(struct drm_device *dev, int channel)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+
+ if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
+ chan = dev_priv->fifos[channel];
+
+ if (chan) {
+ spin_lock_irq(&chan->fence.lock);
+ nouveau_fence_update(chan);
+ spin_unlock_irq(&chan->fence.lock);
+ }
+}
+
+int
+nouveau_fence_init(struct nouveau_channel *chan)
+{
+ INIT_LIST_HEAD(&chan->fence.pending);
+ spin_lock_init(&chan->fence.lock);
+ return 0;
+}
+
+void
+nouveau_fence_fini(struct nouveau_channel *chan)
+{
+ struct list_head *entry, *tmp;
+ struct nouveau_fence *fence;
+
+ list_for_each_safe(entry, tmp, &chan->fence.pending) {
+ fence = list_entry(entry, struct nouveau_fence, entry);
+
+ fence->signalled = true;
+ list_del(&fence->entry);
+ kref_put(&fence->refcount, nouveau_fence_del);
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
new file mode 100644
index 00000000000..18fd8ac9fca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -0,0 +1,985 @@
+/*
+ * Copyright (C) 2008 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+
+#define nouveau_gem_pushbuf_sync(chan) 0
+
+int
+nouveau_gem_object_new(struct drm_gem_object *gem)
+{
+ return 0;
+}
+
+void
+nouveau_gem_object_del(struct drm_gem_object *gem)
+{
+ struct nouveau_bo *nvbo = gem->driver_private;
+ struct ttm_buffer_object *bo = &nvbo->bo;
+
+ if (!nvbo)
+ return;
+ nvbo->gem = NULL;
+
+ if (unlikely(nvbo->cpu_filp))
+ ttm_bo_synccpu_write_release(bo);
+
+ if (unlikely(nvbo->pin_refcnt)) {
+ nvbo->pin_refcnt = 1;
+ nouveau_bo_unpin(nvbo);
+ }
+
+ ttm_bo_unref(&bo);
+}
+
+int
+nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
+ int size, int align, uint32_t flags, uint32_t tile_mode,
+ uint32_t tile_flags, bool no_vm, bool mappable,
+ struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
+ tile_flags, no_vm, mappable, pnvbo);
+ if (ret)
+ return ret;
+ nvbo = *pnvbo;
+
+ nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
+ if (!nvbo->gem) {
+ nouveau_bo_ref(NULL, pnvbo);
+ return -ENOMEM;
+ }
+
+ nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
+ nvbo->gem->driver_private = nvbo;
+ return 0;
+}
+
+static int
+nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ rep->domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+
+ rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ rep->offset = nvbo->bo.offset;
+ rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
+ rep->tile_mode = nvbo->tile_mode;
+ rep->tile_flags = nvbo->tile_flags;
+ return 0;
+}
+
+static bool
+nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
+ switch (tile_flags) {
+ case 0x0000:
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7000:
+ case 0x7400:
+ case 0x7a00:
+ case 0xe000:
+ break;
+ default:
+ NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
+ return false;
+ }
+
+ return true;
+}
+
+int
+nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_gem_new *req = data;
+ struct nouveau_bo *nvbo = NULL;
+ struct nouveau_channel *chan = NULL;
+ uint32_t flags = 0;
+ int ret = 0;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
+ dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
+
+ if (req->channel_hint) {
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
+ file_priv, chan);
+ }
+
+ if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ flags |= TTM_PL_FLAG_VRAM;
+ if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
+ flags |= TTM_PL_FLAG_TT;
+ if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
+ flags |= TTM_PL_FLAG_SYSTEM;
+
+ if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+ return -EINVAL;
+
+ ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
+ req->info.tile_mode, req->info.tile_flags, false,
+ (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
+ &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gem_info(nvbo->gem, &req->info);
+ if (ret)
+ goto out;
+
+ ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(nvbo->gem);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ drm_gem_object_unreference(nvbo->gem);
+ return ret;
+}
+
+static int
+nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
+ uint32_t write_domains, uint32_t valid_domains)
+{
+ struct nouveau_bo *nvbo = gem->driver_private;
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ uint64_t flags;
+
+ if (!valid_domains || (!read_domains && !write_domains))
+ return -EINVAL;
+
+ if (write_domains) {
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+ (write_domains & NOUVEAU_GEM_DOMAIN_GART))
+ flags = TTM_PL_FLAG_TT;
+ else
+ return -EINVAL;
+ } else {
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ bo->mem.mem_type == TTM_PL_VRAM)
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+ (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
+ bo->mem.mem_type == TTM_PL_TT)
+ flags = TTM_PL_FLAG_TT;
+ else
+ if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ flags = TTM_PL_FLAG_TT;
+ }
+
+ nouveau_bo_placement_set(nvbo, flags);
+ return 0;
+}
+
+struct validate_op {
+ struct nouveau_fence *fence;
+ struct list_head vram_list;
+ struct list_head gart_list;
+ struct list_head both_list;
+};
+
+static void
+validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
+{
+ struct list_head *entry, *tmp;
+ struct nouveau_bo *nvbo;
+
+ list_for_each_safe(entry, tmp, list) {
+ nvbo = list_entry(entry, struct nouveau_bo, entry);
+ if (likely(fence)) {
+ struct nouveau_fence *prev_fence;
+
+ spin_lock(&nvbo->bo.lock);
+ prev_fence = nvbo->bo.sync_obj;
+ nvbo->bo.sync_obj = nouveau_fence_ref(fence);
+ spin_unlock(&nvbo->bo.lock);
+ nouveau_fence_unref((void *)&prev_fence);
+ }
+
+ list_del(&nvbo->entry);
+ nvbo->reserved_by = NULL;
+ ttm_bo_unreserve(&nvbo->bo);
+ drm_gem_object_unreference(nvbo->gem);
+ }
+}
+
+static void
+validate_fini(struct validate_op *op, bool success)
+{
+ struct nouveau_fence *fence = op->fence;
+
+ if (unlikely(!success))
+ op->fence = NULL;
+
+ validate_fini_list(&op->vram_list, op->fence);
+ validate_fini_list(&op->gart_list, op->fence);
+ validate_fini_list(&op->both_list, op->fence);
+ nouveau_fence_unref((void *)&fence);
+}
+
+static int
+validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo,
+ int nr_buffers, struct validate_op *op)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t sequence;
+ int trycnt = 0;
+ int ret, i;
+
+ sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
+retry:
+ if (++trycnt > 100000) {
+ NV_ERROR(dev, "%s failed and gave up.\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nr_buffers; i++) {
+ struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+
+ gem = drm_gem_object_lookup(dev, file_priv, b->handle);
+ if (!gem) {
+ NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
+ validate_fini(op, NULL);
+ return -EINVAL;
+ }
+ nvbo = gem->driver_private;
+
+ if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
+ NV_ERROR(dev, "multiple instances of buffer %d on "
+ "validation list\n", b->handle);
+ validate_fini(op, NULL);
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+ if (ret) {
+ validate_fini(op, NULL);
+ if (ret == -EAGAIN)
+ ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+ drm_gem_object_unreference(gem);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+
+ nvbo->reserved_by = file_priv;
+ nvbo->pbbo_index = i;
+ if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
+ (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
+ list_add_tail(&nvbo->entry, &op->both_list);
+ else
+ if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
+ list_add_tail(&nvbo->entry, &op->vram_list);
+ else
+ if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
+ list_add_tail(&nvbo->entry, &op->gart_list);
+ else {
+ NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
+ b->valid_domains);
+ validate_fini(op, NULL);
+ return -EINVAL;
+ }
+
+ if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
+ validate_fini(op, NULL);
+
+ if (nvbo->cpu_filp == file_priv) {
+ NV_ERROR(dev, "bo %p mapped by process trying "
+ "to validate it!\n", nvbo);
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+ }
+
+ return 0;
+}
+
+static int
+validate_list(struct nouveau_channel *chan, struct list_head *list,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
+{
+ struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
+ (void __force __user *)(uintptr_t)user_pbbo_ptr;
+ struct nouveau_bo *nvbo;
+ int ret, relocs = 0;
+
+ list_for_each_entry(nvbo, list, entry) {
+ struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+ struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+
+ if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+ b->write_domains,
+ b->valid_domains);
+ if (unlikely(ret))
+ return ret;
+
+ nvbo->channel = chan;
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+ false, false);
+ nvbo->channel = NULL;
+ if (unlikely(ret))
+ return ret;
+
+ if (nvbo->bo.offset == b->presumed_offset &&
+ ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
+ continue;
+
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed_offset = nvbo->bo.offset;
+ b->presumed_ok = 0;
+ relocs++;
+
+ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
+ return -EFAULT;
+ }
+
+ return relocs;
+}
+
+static int
+nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
+ struct drm_file *file_priv,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo,
+ uint64_t user_buffers, int nr_buffers,
+ struct validate_op *op, int *apply_relocs)
+{
+ int ret, relocs = 0;
+
+ INIT_LIST_HEAD(&op->vram_list);
+ INIT_LIST_HEAD(&op->gart_list);
+ INIT_LIST_HEAD(&op->both_list);
+
+ ret = nouveau_fence_new(chan, &op->fence, false);
+ if (ret)
+ return ret;
+
+ if (nr_buffers == 0)
+ return 0;
+
+ ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
+ if (unlikely(ret))
+ return ret;
+
+ ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
+ if (unlikely(ret < 0)) {
+ validate_fini(op, NULL);
+ return ret;
+ }
+ relocs += ret;
+
+ ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
+ if (unlikely(ret < 0)) {
+ validate_fini(op, NULL);
+ return ret;
+ }
+ relocs += ret;
+
+ ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
+ if (unlikely(ret < 0)) {
+ validate_fini(op, NULL);
+ return ret;
+ }
+ relocs += ret;
+
+ *apply_relocs = relocs;
+ return 0;
+}
+
+static inline void *
+u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
+{
+ void *mem;
+ void __user *userptr = (void __force __user *)(uintptr_t)user;
+
+ mem = kmalloc(nmemb * size, GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
+ kfree(mem);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return mem;
+}
+
+static int
+nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
+ struct drm_nouveau_gem_pushbuf_bo *bo,
+ int nr_relocs, uint64_t ptr_relocs,
+ int nr_dwords, int first_dword,
+ uint32_t *pushbuf, bool is_iomem)
+{
+ struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
+ struct drm_device *dev = chan->dev;
+ int ret = 0, i;
+
+ reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
+ if (IS_ERR(reloc))
+ return PTR_ERR(reloc);
+
+ for (i = 0; i < nr_relocs; i++) {
+ struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
+ struct drm_nouveau_gem_pushbuf_bo *b;
+ uint32_t data;
+
+ if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
+ r->reloc_index >= first_dword + nr_dwords) {
+ NV_ERROR(dev, "Bad relocation %d\n", i);
+ NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
+ NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
+ ret = -EINVAL;
+ break;
+ }
+
+ b = &bo[r->bo_index];
+ if (b->presumed_ok)
+ continue;
+
+ if (r->flags & NOUVEAU_GEM_RELOC_LOW)
+ data = b->presumed_offset + r->data;
+ else
+ if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
+ data = (b->presumed_offset + r->data) >> 32;
+ else
+ data = r->data;
+
+ if (r->flags & NOUVEAU_GEM_RELOC_OR) {
+ if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
+ data |= r->tor;
+ else
+ data |= r->vor;
+ }
+
+ if (is_iomem)
+ iowrite32_native(data, (void __force __iomem *)
+ &pushbuf[r->reloc_index]);
+ else
+ pushbuf[r->reloc_index] = data;
+ }
+
+ kfree(reloc);
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_pushbuf *req = data;
+ struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+ struct nouveau_channel *chan;
+ struct validate_op op;
+ uint32_t *pushbuf = NULL;
+ int ret = 0, do_reloc = 0, i;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+
+ if (req->nr_dwords >= chan->dma.max ||
+ req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
+ req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
+ NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
+ NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
+ chan->dma.max - 1);
+ NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
+ NOUVEAU_GEM_MAX_BUFFERS);
+ NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
+ NOUVEAU_GEM_MAX_RELOCS);
+ return -EINVAL;
+ }
+
+ pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
+ if (IS_ERR(pushbuf))
+ return PTR_ERR(pushbuf);
+
+ bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
+ if (IS_ERR(bo)) {
+ kfree(pushbuf);
+ return PTR_ERR(bo);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Validate buffer list */
+ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+ req->nr_buffers, &op, &do_reloc);
+ if (ret)
+ goto out;
+
+ /* Apply any relocations that are required */
+ if (do_reloc) {
+ ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
+ bo, req->nr_relocs,
+ req->relocs,
+ req->nr_dwords, 0,
+ pushbuf, false);
+ if (ret)
+ goto out;
+ }
+
+ /* Emit push buffer to the hw
+ */
+ ret = RING_SPACE(chan, req->nr_dwords);
+ if (ret)
+ goto out;
+
+ OUT_RINGp(chan, pushbuf, req->nr_dwords);
+
+ ret = nouveau_fence_emit(op.fence);
+ if (ret) {
+ NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
+ WIND_RING(chan);
+ goto out;
+ }
+
+ if (nouveau_gem_pushbuf_sync(chan)) {
+ ret = nouveau_fence_wait(op.fence, NULL, false, false);
+ if (ret) {
+ for (i = 0; i < req->nr_dwords; i++)
+ NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
+ NV_ERROR(dev, "^^ above push buffer is fail :(\n");
+ }
+ }
+
+out:
+ validate_fini(&op, ret == 0);
+ mutex_unlock(&dev->struct_mutex);
+ kfree(pushbuf);
+ kfree(bo);
+ return ret;
+}
+
+#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
+
+int
+nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_gem_pushbuf_call *req = data;
+ struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+ struct nouveau_channel *chan;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *pbbo;
+ struct validate_op op;
+ int i, ret = 0, do_reloc = 0;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+
+ if (unlikely(req->handle == 0))
+ goto out_next;
+
+ if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
+ req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
+ NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
+ NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
+ NOUVEAU_GEM_MAX_BUFFERS);
+ NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
+ NOUVEAU_GEM_MAX_RELOCS);
+ return -EINVAL;
+ }
+
+ bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* Validate buffer list */
+ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
+ req->nr_buffers, &op, &do_reloc);
+ if (ret) {
+ NV_ERROR(dev, "validate: %d\n", ret);
+ goto out;
+ }
+
+ /* Validate DMA push buffer */
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem) {
+ NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
+ ret = -EINVAL;
+ goto out;
+ }
+ pbbo = nouveau_gem_object(gem);
+
+ ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
+ chan->fence.sequence);
+ if (ret) {
+ NV_ERROR(dev, "resv pb: %d\n", ret);
+ drm_gem_object_unreference(gem);
+ goto out;
+ }
+
+ nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
+ ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
+ if (ret) {
+ NV_ERROR(dev, "validate pb: %d\n", ret);
+ ttm_bo_unreserve(&pbbo->bo);
+ drm_gem_object_unreference(gem);
+ goto out;
+ }
+
+ list_add_tail(&pbbo->entry, &op.both_list);
+
+ /* If presumed return address doesn't match, we need to map the
+ * push buffer and fix it..
+ */
+ if (!PUSHBUF_CAL) {
+ uint32_t retaddy;
+
+ if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
+ ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
+ if (ret) {
+ NV_ERROR(dev, "jmp_space: %d\n", ret);
+ goto out;
+ }
+ }
+
+ retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
+ retaddy |= 0x20000000;
+ if (retaddy != req->suffix0) {
+ req->suffix0 = retaddy;
+ do_reloc = 1;
+ }
+ }
+
+ /* Apply any relocations that are required */
+ if (do_reloc) {
+ void *pbvirt;
+ bool is_iomem;
+ ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
+ &pbbo->kmap);
+ if (ret) {
+ NV_ERROR(dev, "kmap pb: %d\n", ret);
+ goto out;
+ }
+
+ pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
+ ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
+ req->nr_relocs,
+ req->relocs,
+ req->nr_dwords,
+ req->offset / 4,
+ pbvirt, is_iomem);
+
+ if (!PUSHBUF_CAL) {
+ nouveau_bo_wr32(pbbo,
+ req->offset / 4 + req->nr_dwords - 2,
+ req->suffix0);
+ }
+
+ ttm_bo_kunmap(&pbbo->kmap);
+ if (ret) {
+ NV_ERROR(dev, "reloc apply: %d\n", ret);
+ goto out;
+ }
+ }
+
+ if (PUSHBUF_CAL) {
+ ret = RING_SPACE(chan, 2);
+ if (ret) {
+ NV_ERROR(dev, "cal_space: %d\n", ret);
+ goto out;
+ }
+ OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
+ req->offset) | 2);
+ OUT_RING(chan, 0);
+ } else {
+ ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
+ if (ret) {
+ NV_ERROR(dev, "jmp_space: %d\n", ret);
+ goto out;
+ }
+ OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
+ req->offset) | 0x20000000);
+ OUT_RING(chan, 0);
+
+ /* Space the jumps apart with NOPs. */
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(chan, 0);
+ }
+
+ ret = nouveau_fence_emit(op.fence);
+ if (ret) {
+ NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
+ WIND_RING(chan);
+ goto out;
+ }
+
+out:
+ validate_fini(&op, ret == 0);
+ mutex_unlock(&dev->struct_mutex);
+ kfree(bo);
+
+out_next:
+ if (PUSHBUF_CAL) {
+ req->suffix0 = 0x00020000;
+ req->suffix1 = 0x00000000;
+ } else {
+ req->suffix0 = 0x20000000 |
+ (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
+ req->suffix1 = 0x00000000;
+ }
+
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_gem_pushbuf_call *req = data;
+
+ req->vram_available = dev_priv->fb_aper_free;
+ req->gart_available = dev_priv->gart_info.aper_free;
+
+ return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
+}
+
+static inline uint32_t
+domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
+{
+ uint32_t flags = 0;
+
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ flags |= TTM_PL_FLAG_VRAM;
+ if (domain & NOUVEAU_GEM_DOMAIN_GART)
+ flags |= TTM_PL_FLAG_TT;
+
+ return flags;
+}
+
+int
+nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_pin *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ int ret = 0;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
+ return -EINVAL;
+ }
+
+ if (!DRM_SUSER(DRM_CURPROC))
+ return -EPERM;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -EINVAL;
+ nvbo = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
+ if (ret)
+ goto out;
+
+ req->offset = nvbo->bo.offset;
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ req->domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_pin *req = data;
+ struct drm_gem_object *gem;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -EINVAL;
+
+ ret = nouveau_bo_unpin(nouveau_gem_object(gem));
+
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_cpu_prep *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
+ int ret = -EINVAL;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return ret;
+ nvbo = nouveau_gem_object(gem);
+
+ if (nvbo->cpu_filp) {
+ if (nvbo->cpu_filp == file_priv)
+ goto out;
+
+ ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
+ if (ret)
+ goto out;
+ }
+
+ if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
+ ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
+ } else {
+ ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
+ if (ret == 0)
+ nvbo->cpu_filp = file_priv;
+ }
+
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_cpu_prep *req = data;
+ struct drm_gem_object *gem;
+ struct nouveau_bo *nvbo;
+ int ret = -EINVAL;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return ret;
+ nvbo = nouveau_gem_object(gem);
+
+ if (nvbo->cpu_filp != file_priv)
+ goto out;
+ nvbo->cpu_filp = NULL;
+
+ ttm_bo_synccpu_write_release(&nvbo->bo);
+ ret = 0;
+
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gem_info *req = data;
+ struct drm_gem_object *gem;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ if (!gem)
+ return -EINVAL;
+
+ ret = nouveau_gem_info(gem, req);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
new file mode 100644
index 00000000000..419f4c2b3b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+struct nouveau_ctxprog {
+ uint32_t signature;
+ uint8_t version;
+ uint16_t length;
+ uint32_t data[];
+} __attribute__ ((packed));
+
+struct nouveau_ctxvals {
+ uint32_t signature;
+ uint8_t version;
+ uint32_t length;
+ struct {
+ uint32_t offset;
+ uint32_t value;
+ } data[];
+} __attribute__ ((packed));
+
+int
+nouveau_grctx_prog_load(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ const int chipset = dev_priv->chipset;
+ const struct firmware *fw;
+ const struct nouveau_ctxprog *cp;
+ const struct nouveau_ctxvals *cv;
+ char name[32];
+ int ret, i;
+
+ if (pgraph->accel_blocked)
+ return -ENODEV;
+
+ if (!pgraph->ctxprog) {
+ sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
+ return ret;
+ }
+
+ pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
+ if (!pgraph->ctxprog) {
+ NV_ERROR(dev, "OOM copying ctxprog\n");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+ memcpy(pgraph->ctxprog, fw->data, fw->size);
+
+ cp = pgraph->ctxprog;
+ if (le32_to_cpu(cp->signature) != 0x5043564e ||
+ cp->version != 0 ||
+ le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
+ NV_ERROR(dev, "ctxprog invalid\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -EINVAL;
+ }
+ release_firmware(fw);
+ }
+
+ if (!pgraph->ctxvals) {
+ sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
+ ret = request_firmware(&fw, name, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
+ nouveau_grctx_fini(dev);
+ return ret;
+ }
+
+ pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
+ if (!pgraph->ctxprog) {
+ NV_ERROR(dev, "OOM copying ctxprog\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -ENOMEM;
+ }
+ memcpy(pgraph->ctxvals, fw->data, fw->size);
+
+ cv = (void *)pgraph->ctxvals;
+ if (le32_to_cpu(cv->signature) != 0x5643564e ||
+ cv->version != 0 ||
+ le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
+ NV_ERROR(dev, "ctxvals invalid\n");
+ release_firmware(fw);
+ nouveau_grctx_fini(dev);
+ return -EINVAL;
+ }
+ release_firmware(fw);
+ }
+
+ cp = pgraph->ctxprog;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < le16_to_cpu(cp->length); i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
+ le32_to_cpu(cp->data[i]));
+
+ return 0;
+}
+
+void
+nouveau_grctx_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+
+ if (pgraph->ctxprog) {
+ kfree(pgraph->ctxprog);
+ pgraph->ctxprog = NULL;
+ }
+
+ if (pgraph->ctxvals) {
+ kfree(pgraph->ctxprog);
+ pgraph->ctxvals = NULL;
+ }
+}
+
+void
+nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_ctxvals *cv = pgraph->ctxvals;
+ int i;
+
+ if (!cv)
+ return;
+
+ for (i = 0; i < le32_to_cpu(cv->length); i++)
+ nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
+ le32_to_cpu(cv->data[i].value));
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
new file mode 100644
index 00000000000..5d39c4ce800
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
@@ -0,0 +1,133 @@
+#ifndef __NOUVEAU_GRCTX_H__
+#define __NOUVEAU_GRCTX_H__
+
+struct nouveau_grctx {
+ struct drm_device *dev;
+
+ enum {
+ NOUVEAU_GRCTX_PROG,
+ NOUVEAU_GRCTX_VALS
+ } mode;
+ void *data;
+
+ uint32_t ctxprog_max;
+ uint32_t ctxprog_len;
+ uint32_t ctxprog_reg;
+ int ctxprog_label[32];
+ uint32_t ctxvals_pos;
+ uint32_t ctxvals_base;
+};
+
+#ifdef CP_CTX
+static inline void
+cp_out(struct nouveau_grctx *ctx, uint32_t inst)
+{
+ uint32_t *ctxprog = ctx->data;
+
+ if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ return;
+
+ BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
+ ctxprog[ctx->ctxprog_len++] = inst;
+}
+
+static inline void
+cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
+{
+ cp_out(ctx, CP_LOAD_SR | val);
+}
+
+static inline void
+cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
+{
+ ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
+
+ ctx->ctxvals_base = ctx->ctxvals_pos;
+ ctx->ctxvals_pos = ctx->ctxvals_base + length;
+
+ if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
+ cp_lsr(ctx, length);
+ length = 0;
+ }
+
+ cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
+}
+
+static inline void
+cp_name(struct nouveau_grctx *ctx, int name)
+{
+ uint32_t *ctxprog = ctx->data;
+ int i;
+
+ if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ return;
+
+ ctx->ctxprog_label[name] = ctx->ctxprog_len;
+ for (i = 0; i < ctx->ctxprog_len; i++) {
+ if ((ctxprog[i] & 0xfff00000) != 0xff400000)
+ continue;
+ if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
+ continue;
+ ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
+ (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
+ }
+}
+
+static inline void
+_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
+{
+ int ip = 0;
+
+ if (mod != 2) {
+ ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
+ if (ip == 0)
+ ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
+ }
+
+ cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
+ (state ? 0 : CP_BRA_IF_CLEAR));
+}
+#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#ifdef CP_BRA_MOD
+#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
+#endif
+
+static inline void
+_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
+{
+ cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
+}
+#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+_cp_set(struct nouveau_grctx *ctx, int flag, int state)
+{
+ cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
+}
+#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+cp_pos(struct nouveau_grctx *ctx, int offset)
+{
+ ctx->ctxvals_pos = offset;
+ ctx->ctxvals_base = ctx->ctxvals_pos;
+
+ cp_lsr(ctx, ctx->ctxvals_pos);
+ cp_out(ctx, CP_SET_CONTEXT_POINTER);
+}
+
+static inline void
+gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
+{
+ if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ return;
+
+ reg = (reg - 0x00400000) / 4;
+ reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
+
+ nv_wo32(ctx->dev, ctx->data, reg, val);
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
new file mode 100644
index 00000000000..dc46792a5c9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+#define CHIPSET_NFORCE 0x01a0
+#define CHIPSET_NFORCE2 0x01f0
+
+/*
+ * misc hw access wrappers/control functions
+ */
+
+void
+NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+ NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
+ NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
+}
+
+uint8_t
+NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
+{
+ NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
+ return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
+}
+
+void
+NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+ NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
+ NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
+}
+
+uint8_t
+NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
+{
+ NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
+ return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
+}
+
+/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
+ * it affects only the 8 bit vga io regs, which we access using mmio at
+ * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
+ * in general, the set value of cr44 does not matter: reg access works as
+ * expected and values can be set for the appropriate head by using a 0x2000
+ * offset as required
+ * however:
+ * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
+ * cr44 must be set to 0 or 3 for accessing values on the correct head
+ * through the common 0xc03c* addresses
+ * b) in tied mode (4) head B is programmed to the values set on head A, and
+ * access using the head B addresses can have strange results, ergo we leave
+ * tied mode in init once we know to what cr44 should be restored on exit
+ *
+ * the owner parameter is slightly abused:
+ * 0 and 1 are treated as head values and so the set value is (owner * 3)
+ * other values are treated as literal values to set
+ */
+void
+NVSetOwner(struct drm_device *dev, int owner)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (owner == 1)
+ owner *= 3;
+
+ if (dev_priv->chipset == 0x11) {
+ /* This might seem stupid, but the blob does it and
+ * omitting it often locks the system up.
+ */
+ NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
+ NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
+ }
+
+ /* CR44 is always changed on CRTC0 */
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
+
+ if (dev_priv->chipset == 0x11) { /* set me harder */
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
+ }
+}
+
+void
+NVBlankScreen(struct drm_device *dev, int head, bool blank)
+{
+ unsigned char seq1;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, head);
+
+ seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
+
+ NVVgaSeqReset(dev, head, true);
+ if (blank)
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
+ else
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
+ NVVgaSeqReset(dev, head, false);
+}
+
+/*
+ * PLL setting
+ */
+
+static int
+powerctrl_1_shift(int chip_version, int reg)
+{
+ int shift = -4;
+
+ if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
+ return shift;
+
+ switch (reg) {
+ case NV_RAMDAC_VPLL2:
+ shift += 4;
+ case NV_PRAMDAC_VPLL_COEFF:
+ shift += 4;
+ case NV_PRAMDAC_MPLL_COEFF:
+ shift += 4;
+ case NV_PRAMDAC_NVPLL_COEFF:
+ shift += 4;
+ }
+
+ /*
+ * the shift for vpll regs is only used for nv3x chips with a single
+ * stage pll
+ */
+ if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
+ chip_version == 0x36 || chip_version >= 0x40))
+ shift = -4;
+
+ return shift;
+}
+
+static void
+setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chip_version = dev_priv->vbios->chip_version;
+ uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
+ int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
+ uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+ uint32_t saved_powerctrl_1 = 0;
+ int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
+
+ if (oldpll == pll)
+ return; /* already set */
+
+ if (shift_powerctrl_1 >= 0) {
+ saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
+ (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+ 1 << shift_powerctrl_1);
+ }
+
+ if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
+ /* upclock -- write new post divider first */
+ NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
+ else
+ /* downclock -- write new NM first */
+ NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
+
+ if (chip_version < 0x17 && chip_version != 0x11)
+ /* wait a bit on older chips */
+ msleep(64);
+ NVReadRAMDAC(dev, 0, reg);
+
+ /* then write the other half as well */
+ NVWriteRAMDAC(dev, 0, reg, pll);
+
+ if (shift_powerctrl_1 >= 0)
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
+}
+
+static uint32_t
+new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
+{
+ bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
+
+ if (ss) /* single stage pll mode */
+ ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
+ NV_RAMDAC_580_VPLL2_ACTIVE;
+ else
+ ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
+ ~NV_RAMDAC_580_VPLL2_ACTIVE;
+
+ return ramdac580;
+}
+
+static void
+setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
+ struct nouveau_pll_vals *pv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chip_version = dev_priv->vbios->chip_version;
+ bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
+ uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
+ uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
+ uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
+ uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
+ uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
+ uint32_t oldramdac580 = 0, ramdac580 = 0;
+ bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
+ uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
+ int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
+
+ /* model specific additions to generic pll1 and pll2 set up above */
+ if (nv3035) {
+ pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
+ (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
+ pll2 = 0;
+ }
+ if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
+ oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
+ ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
+ if (oldramdac580 != ramdac580)
+ oldpll1 = ~0; /* force mismatch */
+ if (single_stage)
+ /* magic value used by nvidia in single stage mode */
+ pll2 |= 0x011f;
+ }
+ if (chip_version > 0x70)
+ /* magic bits set by the blob (but not the bios) on g71-73 */
+ pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
+
+ if (oldpll1 == pll1 && oldpll2 == pll2)
+ return; /* already set */
+
+ if (shift_powerctrl_1 >= 0) {
+ saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
+ (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
+ 1 << shift_powerctrl_1);
+ }
+
+ if (chip_version >= 0x40) {
+ int shift_c040 = 14;
+
+ switch (reg1) {
+ case NV_PRAMDAC_MPLL_COEFF:
+ shift_c040 += 2;
+ case NV_PRAMDAC_NVPLL_COEFF:
+ shift_c040 += 2;
+ case NV_RAMDAC_VPLL2:
+ shift_c040 += 2;
+ case NV_PRAMDAC_VPLL_COEFF:
+ shift_c040 += 2;
+ }
+
+ savedc040 = nvReadMC(dev, 0xc040);
+ if (shift_c040 != 14)
+ nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
+ }
+
+ if (oldramdac580 != ramdac580)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
+
+ if (!nv3035)
+ NVWriteRAMDAC(dev, 0, reg2, pll2);
+ NVWriteRAMDAC(dev, 0, reg1, pll1);
+
+ if (shift_powerctrl_1 >= 0)
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
+ if (chip_version >= 0x40)
+ nvWriteMC(dev, 0xc040, savedc040);
+}
+
+static void
+setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
+ struct nouveau_pll_vals *pv)
+{
+ /* When setting PLLs, there is a merry game of disabling and enabling
+ * various bits of hardware during the process. This function is a
+ * synthesis of six nv4x traces, nearly each card doing a subtly
+ * different thing. With luck all the necessary bits for each card are
+ * combined herein. Without luck it deviates from each card's formula
+ * so as to not work on any :)
+ */
+
+ uint32_t Preg = NMNMreg - 4;
+ bool mpll = Preg == 0x4020;
+ uint32_t oldPval = nvReadMC(dev, Preg);
+ uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
+ uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
+ 0xc << 28 | pv->log2P << 16;
+ uint32_t saved4600 = 0;
+ /* some cards have different maskc040s */
+ uint32_t maskc040 = ~(3 << 14), savedc040;
+ bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
+
+ if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
+ return;
+
+ if (Preg == 0x4000)
+ maskc040 = ~0x333;
+ if (Preg == 0x4058)
+ maskc040 = ~(0xc << 24);
+
+ if (mpll) {
+ struct pll_lims pll_lim;
+ uint8_t Pval2;
+
+ if (get_pll_limits(dev, Preg, &pll_lim))
+ return;
+
+ Pval2 = pv->log2P + pll_lim.log2p_bias;
+ if (Pval2 > pll_lim.max_log2p)
+ Pval2 = pll_lim.max_log2p;
+ Pval |= 1 << 28 | Pval2 << 20;
+
+ saved4600 = nvReadMC(dev, 0x4600);
+ nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
+ }
+ if (single_stage)
+ Pval |= mpll ? 1 << 12 : 1 << 8;
+
+ nvWriteMC(dev, Preg, oldPval | 1 << 28);
+ nvWriteMC(dev, Preg, Pval & ~(4 << 28));
+ if (mpll) {
+ Pval |= 8 << 20;
+ nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
+ nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
+ }
+
+ savedc040 = nvReadMC(dev, 0xc040);
+ nvWriteMC(dev, 0xc040, savedc040 & maskc040);
+
+ nvWriteMC(dev, NMNMreg, NMNM);
+ if (NMNMreg == 0x4024)
+ nvWriteMC(dev, 0x403c, NMNM);
+
+ nvWriteMC(dev, Preg, Pval);
+ if (mpll) {
+ Pval &= ~(8 << 20);
+ nvWriteMC(dev, 0x4020, Pval);
+ nvWriteMC(dev, 0x4038, Pval);
+ nvWriteMC(dev, 0x4600, saved4600);
+ }
+
+ nvWriteMC(dev, 0xc040, savedc040);
+
+ if (mpll) {
+ nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
+ nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
+ }
+}
+
+void
+nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
+ struct nouveau_pll_vals *pv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int cv = dev_priv->vbios->chip_version;
+
+ if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
+ cv >= 0x40) {
+ if (reg1 > 0x405c)
+ setPLL_double_highregs(dev, reg1, pv);
+ else
+ setPLL_double_lowregs(dev, reg1, pv);
+ } else
+ setPLL_single(dev, reg1, pv);
+}
+
+/*
+ * PLL getting
+ */
+
+static void
+nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
+ uint32_t pll2, struct nouveau_pll_vals *pllvals)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
+
+ /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
+ pllvals->log2P = (pll1 >> 16) & 0x7;
+ pllvals->N2 = pllvals->M2 = 1;
+
+ if (reg1 <= 0x405c) {
+ pllvals->NM1 = pll2 & 0xffff;
+ /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
+ if (!(pll1 & 0x1100))
+ pllvals->NM2 = pll2 >> 16;
+ } else {
+ pllvals->NM1 = pll1 & 0xffff;
+ if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
+ pllvals->NM2 = pll2 & 0xffff;
+ else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
+ pllvals->M1 &= 0xf; /* only 4 bits */
+ if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
+ pllvals->M2 = (pll1 >> 4) & 0x7;
+ pllvals->N2 = ((pll1 >> 21) & 0x18) |
+ ((pll1 >> 19) & 0x7);
+ }
+ }
+ }
+}
+
+int
+nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
+ struct nouveau_pll_vals *pllvals)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
+ NV_PRAMDAC_MPLL_COEFF,
+ NV_PRAMDAC_VPLL_COEFF,
+ NV_RAMDAC_VPLL2 };
+ const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
+ 0x4020,
+ NV_PRAMDAC_VPLL_COEFF,
+ NV_RAMDAC_VPLL2 };
+ uint32_t reg1, pll1, pll2 = 0;
+ struct pll_lims pll_lim;
+ int ret;
+
+ if (dev_priv->card_type < NV_40)
+ reg1 = nv04_regs[plltype];
+ else
+ reg1 = nv40_regs[plltype];
+
+ pll1 = nvReadMC(dev, reg1);
+
+ if (reg1 <= 0x405c)
+ pll2 = nvReadMC(dev, reg1 + 4);
+ else if (nv_two_reg_pll(dev)) {
+ uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
+
+ pll2 = nvReadMC(dev, reg2);
+ }
+
+ if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
+ uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
+
+ /* check whether vpll has been forced into single stage mode */
+ if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
+ if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
+ pll2 = 0;
+ } else
+ if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
+ pll2 = 0;
+ }
+
+ nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
+
+ ret = get_pll_limits(dev, plltype, &pll_lim);
+ if (ret)
+ return ret;
+
+ pllvals->refclk = pll_lim.refclk;
+
+ return 0;
+}
+
+int
+nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
+{
+ /* Avoid divide by zero if called at an inappropriate time */
+ if (!pv->M1 || !pv->M2)
+ return 0;
+
+ return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
+}
+
+int
+nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
+{
+ struct nouveau_pll_vals pllvals;
+
+ if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+ uint32_t mpllP;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+ if (!mpllP)
+ mpllP = 4;
+
+ return 400000 / mpllP;
+ } else
+ if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+ uint32_t clock;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+ return clock;
+ }
+
+ nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+
+ return nouveau_hw_pllvals_to_clk(&pllvals);
+}
+
+static void
+nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
+{
+ /* the vpll on an unused head can come up with a random value, way
+ * beyond the pll limits. for some reason this causes the chip to
+ * lock up when reading the dac palette regs, so set a valid pll here
+ * when such a condition detected. only seen on nv11 to date
+ */
+
+ struct pll_lims pll_lim;
+ struct nouveau_pll_vals pv;
+ uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+
+ if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
+ return;
+ nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
+
+ if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
+ pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
+ pv.log2P <= pll_lim.max_log2p)
+ return;
+
+ NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
+
+ /* set lowest clock within static limits */
+ pv.M1 = pll_lim.vco1.max_m;
+ pv.N1 = pll_lim.vco1.min_n;
+ pv.log2P = pll_lim.max_usable_log2p;
+ nouveau_hw_setpll(dev, pllreg, &pv);
+}
+
+/*
+ * vga font save/restore
+ */
+
+static void nouveau_vga_font_io(struct drm_device *dev,
+ void __iomem *iovram,
+ bool save, unsigned plane)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned i;
+
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
+ for (i = 0; i < 16384; i++) {
+ if (save) {
+ dev_priv->saved_vga_font[plane][i] =
+ ioread32_native(iovram + i * 4);
+ } else {
+ iowrite32_native(dev_priv->saved_vga_font[plane][i],
+ iovram + i * 4);
+ }
+ }
+}
+
+void
+nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
+{
+ uint8_t misc, gr4, gr5, gr6, seq2, seq4;
+ bool graphicsmode;
+ unsigned plane;
+ void __iomem *iovram;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, 0);
+
+ NVSetEnablePalette(dev, 0, true);
+ graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
+ NVSetEnablePalette(dev, 0, false);
+
+ if (graphicsmode) /* graphics mode => framebuffer => no need to save */
+ return;
+
+ NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
+
+ /* map first 64KiB of VRAM, holds VGA fonts etc */
+ iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
+ if (!iovram) {
+ NV_ERROR(dev, "Failed to map VRAM, "
+ "cannot save/restore VGA fonts.\n");
+ return;
+ }
+
+ if (nv_two_heads(dev))
+ NVBlankScreen(dev, 1, true);
+ NVBlankScreen(dev, 0, true);
+
+ /* save control regs */
+ misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
+ seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
+ seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
+ gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
+ gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
+ gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
+
+ NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
+
+ /* store font in planes 0..3 */
+ for (plane = 0; plane < 4; plane++)
+ nouveau_vga_font_io(dev, iovram, save, plane);
+
+ /* restore control regs */
+ NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
+ NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
+
+ if (nv_two_heads(dev))
+ NVBlankScreen(dev, 1, false);
+ NVBlankScreen(dev, 0, false);
+
+ iounmap(iovram);
+}
+
+/*
+ * mode state save/load
+ */
+
+static void
+rd_cio_state(struct drm_device *dev, int head,
+ struct nv04_crtc_reg *crtcstate, int index)
+{
+ crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
+}
+
+static void
+wr_cio_state(struct drm_device *dev, int head,
+ struct nv04_crtc_reg *crtcstate, int index)
+{
+ NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
+}
+
+static void
+nv_save_state_ramdac(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ int i;
+
+ if (dev_priv->card_type >= NV_10)
+ regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
+
+ nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
+ state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
+ if (nv_two_heads(dev))
+ state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+ if (dev_priv->chipset == 0x11)
+ regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
+
+ regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
+
+ if (nv_gf4_disp_arch(dev))
+ regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
+ if (dev_priv->chipset >= 0x30)
+ regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
+
+ regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
+ regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
+ regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
+ regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
+ regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
+ regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
+ regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
+ regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
+
+ for (i = 0; i < 7; i++) {
+ uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
+ regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
+ regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
+ }
+
+ if (nv_gf4_disp_arch(dev)) {
+ regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
+ for (i = 0; i < 3; i++) {
+ regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
+ regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
+ }
+ }
+
+ regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+ regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
+ if (!nv_gf4_disp_arch(dev) && head == 0) {
+ /* early chips don't allow access to PRAMDAC_TMDS_* without
+ * the head A FPCLK on (nv11 even locks up) */
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
+ ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
+ }
+ regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
+ regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
+
+ regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
+
+ if (nv_gf4_disp_arch(dev))
+ regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
+
+ if (dev_priv->card_type == NV_40) {
+ regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
+ regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
+ regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
+
+ for (i = 0; i < 38; i++)
+ regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
+ NV_PRAMDAC_CTV + 4*i);
+ }
+}
+
+static void
+nv_load_state_ramdac(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+ int i;
+
+ if (dev_priv->card_type >= NV_10)
+ NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
+
+ nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
+ if (nv_two_heads(dev))
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
+ if (dev_priv->chipset == 0x11)
+ NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
+
+ if (nv_gf4_disp_arch(dev))
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
+ if (dev_priv->chipset >= 0x30)
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
+
+ for (i = 0; i < 7; i++) {
+ uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
+
+ NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
+ NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
+ }
+
+ if (nv_gf4_disp_arch(dev)) {
+ NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
+ for (i = 0; i < 3; i++) {
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
+ }
+ }
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
+
+ if (nv_gf4_disp_arch(dev))
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
+
+ if (dev_priv->card_type == NV_40) {
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
+
+ for (i = 0; i < 38; i++)
+ NVWriteRAMDAC(dev, head,
+ NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
+ }
+}
+
+static void
+nv_save_state_vga(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ int i;
+
+ regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
+
+ for (i = 0; i < 25; i++)
+ rd_cio_state(dev, head, regp, i);
+
+ NVSetEnablePalette(dev, head, true);
+ for (i = 0; i < 21; i++)
+ regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
+ NVSetEnablePalette(dev, head, false);
+
+ for (i = 0; i < 9; i++)
+ regp->Graphics[i] = NVReadVgaGr(dev, head, i);
+
+ for (i = 0; i < 5; i++)
+ regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
+}
+
+static void
+nv_load_state_vga(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ int i;
+
+ NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
+
+ for (i = 0; i < 5; i++)
+ NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
+
+ nv_lock_vga_crtc_base(dev, head, false);
+ for (i = 0; i < 25; i++)
+ wr_cio_state(dev, head, regp, i);
+ nv_lock_vga_crtc_base(dev, head, true);
+
+ for (i = 0; i < 9; i++)
+ NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
+
+ NVSetEnablePalette(dev, head, true);
+ for (i = 0; i < 21; i++)
+ NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
+ NVSetEnablePalette(dev, head, false);
+}
+
+static void
+nv_save_state_ext(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ int i;
+
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
+
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
+ if (dev_priv->card_type >= NV_30)
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
+
+ if (dev_priv->card_type >= NV_10) {
+ regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
+ regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
+
+ if (dev_priv->card_type >= NV_30)
+ regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
+
+ if (dev_priv->card_type == NV_40)
+ regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
+
+ if (nv_two_heads(dev))
+ regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
+ regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
+ }
+
+ regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
+
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
+ if (dev_priv->card_type >= NV_10) {
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
+ }
+ /* NV11 and NV20 don't have this, they stop at 0x52. */
+ if (nv_gf4_disp_arch(dev)) {
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
+
+ for (i = 0; i < 0x10; i++)
+ regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
+
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
+ rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
+ }
+
+ regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
+}
+
+static void
+nv_load_state_ext(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[head];
+ uint32_t reg900;
+ int i;
+
+ if (dev_priv->card_type >= NV_10) {
+ if (nv_two_heads(dev))
+ /* setting ENGINE_CTRL (EC) *must* come before
+ * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
+ * EC that should not be overwritten by writing stale EC
+ */
+ NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
+
+ nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
+ nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
+ nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
+ nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
+ nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
+ nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
+ nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
+ nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
+
+ NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
+ NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
+ NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
+
+ if (dev_priv->card_type >= NV_30)
+ NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
+
+ if (dev_priv->card_type == NV_40) {
+ NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
+
+ reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
+ if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
+ else
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
+ }
+ }
+
+ NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
+ if (dev_priv->card_type >= NV_30)
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+ if (dev_priv->card_type == NV_40)
+ nv_fix_nv40_hw_cursor(dev, head);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
+ if (dev_priv->card_type >= NV_10) {
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
+ }
+ /* NV11 and NV20 stop at 0x52. */
+ if (nv_gf4_disp_arch(dev)) {
+ if (dev_priv->card_type == NV_10) {
+ /* Not waiting for vertical retrace before modifying
+ CRE_53/CRE_54 causes lockups. */
+ nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+ nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+ }
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
+
+ for (i = 0; i < 0x10; i++)
+ NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
+
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
+ wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
+ }
+
+ NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
+
+ /* Setting 1 on this value gives you interrupts for every vblank period. */
+ NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+ NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
+}
+
+static void
+nv_save_state_palette(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ int head_offset = head * NV_PRMDIO_SIZE, i;
+
+ nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
+ NV_PRMDIO_PIXEL_MASK_MASK);
+ nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
+
+ for (i = 0; i < 768; i++) {
+ state->crtc_reg[head].DAC[i] = nv_rd08(dev,
+ NV_PRMDIO_PALETTE_DATA + head_offset);
+ }
+
+ NVSetEnablePalette(dev, head, false);
+}
+
+void
+nouveau_hw_load_state_palette(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ int head_offset = head * NV_PRMDIO_SIZE, i;
+
+ nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
+ NV_PRMDIO_PIXEL_MASK_MASK);
+ nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
+
+ for (i = 0; i < 768; i++) {
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
+ state->crtc_reg[head].DAC[i]);
+ }
+
+ NVSetEnablePalette(dev, head, false);
+}
+
+void nouveau_hw_save_state(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset == 0x11)
+ /* NB: no attempt is made to restore the bad pll later on */
+ nouveau_hw_fix_bad_vpll(dev, head);
+ nv_save_state_ramdac(dev, head, state);
+ nv_save_state_vga(dev, head, state);
+ nv_save_state_palette(dev, head, state);
+ nv_save_state_ext(dev, head, state);
+}
+
+void nouveau_hw_load_state(struct drm_device *dev, int head,
+ struct nv04_mode_state *state)
+{
+ NVVgaProtect(dev, head, true);
+ nv_load_state_ramdac(dev, head, state);
+ nv_load_state_ext(dev, head, state);
+ nouveau_hw_load_state_palette(dev, head, state);
+ nv_load_state_vga(dev, head, state);
+ NVVgaProtect(dev, head, false);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
new file mode 100644
index 00000000000..869130f8360
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -0,0 +1,455 @@
+/*
+ * Copyright 2008 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_HW_H__
+#define __NOUVEAU_HW_H__
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define MASK(field) ( \
+ (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
+
+#define XLATE(src, srclowbit, outfield) ( \
+ (((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield))
+
+void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
+uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
+void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
+uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
+void NVSetOwner(struct drm_device *, int owner);
+void NVBlankScreen(struct drm_device *, int head, bool blank);
+void nouveau_hw_setpll(struct drm_device *, uint32_t reg1,
+ struct nouveau_pll_vals *pv);
+int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
+ struct nouveau_pll_vals *pllvals);
+int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
+int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype);
+void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
+void nouveau_hw_save_state(struct drm_device *, int head,
+ struct nv04_mode_state *state);
+void nouveau_hw_load_state(struct drm_device *, int head,
+ struct nv04_mode_state *state);
+void nouveau_hw_load_state_palette(struct drm_device *, int head,
+ struct nv04_mode_state *state);
+
+/* nouveau_calc.c */
+extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
+ int *burst, int *lwm);
+extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
+ int clk, struct nouveau_pll_vals *pv);
+
+static inline uint32_t
+nvReadMC(struct drm_device *dev, uint32_t reg)
+{
+ uint32_t val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
+ return val;
+}
+
+static inline void
+nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadVIDEO(struct drm_device *dev, uint32_t reg)
+{
+ uint32_t val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
+ return val;
+}
+
+static inline void
+nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadFB(struct drm_device *dev, uint32_t reg)
+{
+ uint32_t val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
+ return val;
+}
+
+static inline void
+nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t
+nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
+{
+ uint32_t val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
+ return val;
+}
+
+static inline void
+nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t NVReadCRTC(struct drm_device *dev,
+ int head, uint32_t reg)
+{
+ uint32_t val;
+ if (head)
+ reg += NV_PCRTC0_SIZE;
+ val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
+ return val;
+}
+
+static inline void NVWriteCRTC(struct drm_device *dev,
+ int head, uint32_t reg, uint32_t val)
+{
+ if (head)
+ reg += NV_PCRTC0_SIZE;
+ NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
+ int head, uint32_t reg)
+{
+ uint32_t val;
+ if (head)
+ reg += NV_PRAMDAC0_SIZE;
+ val = nv_rd32(dev, reg);
+ NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
+ head, reg, val);
+ return val;
+}
+
+static inline void NVWriteRAMDAC(struct drm_device *dev,
+ int head, uint32_t reg, uint32_t val)
+{
+ if (head)
+ reg += NV_PRAMDAC0_SIZE;
+ NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
+ head, reg, val);
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint8_t nv_read_tmds(struct drm_device *dev,
+ int or, int dl, uint8_t address)
+{
+ int ramdac = (or & OUTPUT_C) >> 2;
+
+ NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
+ NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
+ return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8);
+}
+
+static inline void nv_write_tmds(struct drm_device *dev,
+ int or, int dl, uint8_t address,
+ uint8_t data)
+{
+ int ramdac = (or & OUTPUT_C) >> 2;
+
+ NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
+ NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
+}
+
+static inline void NVWriteVgaCrtc(struct drm_device *dev,
+ int head, uint8_t index, uint8_t value)
+{
+ NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
+ head, index, value);
+ nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+ nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
+}
+
+static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
+ int head, uint8_t index)
+{
+ uint8_t val;
+ nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+ val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
+ NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
+ head, index, val);
+ return val;
+}
+
+/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58
+ * I suspect they in fact do nothing, but are merely a way to carry useful
+ * per-head variables around
+ *
+ * Known uses:
+ * CR57 CR58
+ * 0x00 index to the appropriate dcb entry (or 7f for inactive)
+ * 0x02 dcb entry's "or" value (or 00 for inactive)
+ * 0x03 bit0 set for dual link (LVDS, possibly elsewhere too)
+ * 0x08 or 0x09 pxclk in MHz
+ * 0x0f laptop panel info - low nibble for PEXTDEV_BOOT_0 strap
+ * high nibble for xlat strap value
+ */
+
+static inline void
+NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value)
+{
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value);
+}
+
+static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index)
+{
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
+ return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58);
+}
+
+static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
+ int head, uint32_t reg)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint8_t val;
+
+ /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
+ * NVSetOwner for the relevant head to be programmed */
+ if (head && dev_priv->card_type == NV_40)
+ reg += NV_PRMVIO_SIZE;
+
+ val = nv_rd08(dev, reg);
+ NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
+ return val;
+}
+
+static inline void NVWritePRMVIO(struct drm_device *dev,
+ int head, uint32_t reg, uint8_t value)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
+ * NVSetOwner for the relevant head to be programmed */
+ if (head && dev_priv->card_type == NV_40)
+ reg += NV_PRMVIO_SIZE;
+
+ NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n",
+ head, reg, value);
+ nv_wr08(dev, reg, value);
+}
+
+static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
+{
+ nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
+}
+
+static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
+{
+ nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
+}
+
+static inline void NVWriteVgaAttr(struct drm_device *dev,
+ int head, uint8_t index, uint8_t value)
+{
+ if (NVGetEnablePalette(dev, head))
+ index &= ~0x20;
+ else
+ index |= 0x20;
+
+ nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
+ head, index, value);
+ nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+ nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
+}
+
+static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
+ int head, uint8_t index)
+{
+ uint8_t val;
+ if (NVGetEnablePalette(dev, head))
+ index &= ~0x20;
+ else
+ index |= 0x20;
+
+ nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+ val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
+ NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
+ head, index, val);
+ return val;
+}
+
+static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start)
+{
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3);
+}
+
+static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
+{
+ uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
+
+ if (protect) {
+ NVVgaSeqReset(dev, head, true);
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
+ } else {
+ /* Reenable sequencer, then turn on screen */
+ NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */
+ NVVgaSeqReset(dev, head, false);
+ }
+ NVSetEnablePalette(dev, head, protect);
+}
+
+static inline bool
+nv_heads_tied(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset == 0x11)
+ return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28));
+
+ return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
+}
+
+/* makes cr0-7 on the specified head read-only */
+static inline bool
+nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock)
+{
+ uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX);
+ bool waslocked = cr11 & 0x80;
+
+ if (lock)
+ cr11 |= 0x80;
+ else
+ cr11 &= ~0x80;
+ NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11);
+
+ return waslocked;
+}
+
+static inline void
+nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
+{
+ /* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs
+ * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB
+ * bit6: seems to have some effect on CR09 (double scan, VBS_9)
+ * bit5: unlocks HDE
+ * bit4: unlocks VDE
+ * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR
+ * bit2: same as bit 1 of 0x60?804
+ * bit0: same as bit 0 of 0x60?804
+ */
+
+ uint8_t cr21 = lock;
+
+ if (lock < 0)
+ /* 0xfa is generic "unlock all" mask */
+ cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa;
+
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21);
+}
+
+/* renders the extended crtc regs (cr19+) on all crtcs impervious:
+ * immutable and unreadable
+ */
+static inline bool
+NVLockVgaCrtcs(struct drm_device *dev, bool lock)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
+
+ NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
+ lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
+ /* NV11 has independently lockable extended crtcs, except when tied */
+ if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev))
+ NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
+ lock ? NV_CIO_SR_LOCK_VALUE :
+ NV_CIO_SR_UNLOCK_RW_VALUE);
+
+ return waslocked;
+}
+
+/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */
+#define NV04_CURSOR_SIZE 32
+/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */
+#define NV10_CURSOR_SIZE 64
+
+static inline int nv_cursor_width(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
+}
+
+static inline void
+nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
+{
+ /* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40,
+ * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS
+ * for changes to the CRTC CURCTL regs to take effect, whether changing
+ * the pixmap location, or just showing/hiding the cursor
+ */
+ uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos);
+}
+
+static inline void
+nv_show_cursor(struct drm_device *dev, int head, bool show)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint8_t *curctl1 =
+ &dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
+
+ if (show)
+ *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
+ else
+ *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
+
+ if (dev_priv->card_type == NV_40)
+ nv_fix_nv40_hw_cursor(dev, head);
+}
+
+static inline uint32_t
+nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int mask;
+
+ if (bpp == 15)
+ bpp = 16;
+ if (bpp == 24)
+ bpp = 8;
+
+ /* Alignment requirements taken from the Haiku driver */
+ if (dev_priv->card_type == NV_04)
+ mask = 128 / bpp - 1;
+ else
+ mask = 512 / bpp - 1;
+
+ return (width + mask) & ~mask;
+}
+
+#endif /* __NOUVEAU_HW_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
new file mode 100644
index 00000000000..70e994d2812
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_hw.h"
+
+static void
+nv04_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+ NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+}
+
+static void
+nv04_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+ NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+}
+
+static int
+nv04_i2c_getscl(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+}
+
+static int
+nv04_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+}
+
+static void
+nv4e_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+ nv_wr32(dev, i2c->wr, val | 0x01);
+}
+
+static void
+nv4e_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+ uint8_t val;
+
+ val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+ nv_wr32(dev, i2c->wr, val | 0x01);
+}
+
+static int
+nv4e_i2c_getscl(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
+}
+
+static int
+nv4e_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
+}
+
+static int
+nv50_i2c_getscl(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!(nv_rd32(dev, i2c->rd) & 1);
+}
+
+
+static int
+nv50_i2c_getsda(void *data)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ return !!(nv_rd32(dev, i2c->rd) & 2);
+}
+
+static void
+nv50_i2c_setscl(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+}
+
+static void
+nv50_i2c_setsda(void *data, int state)
+{
+ struct nouveau_i2c_chan *i2c = data;
+ struct drm_device *dev = i2c->dev;
+
+ nv_wr32(dev, i2c->wr,
+ (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
+ i2c->data = state;
+}
+
+static const uint32_t nv50_i2c_port[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
+#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
+
+int
+nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *i2c;
+ int ret;
+
+ if (entry->chan)
+ return -EEXIST;
+
+ if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
+ NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
+ return -EINVAL;
+ }
+
+ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+ if (i2c == NULL)
+ return -ENOMEM;
+
+ switch (entry->port_type) {
+ case 0:
+ i2c->algo.bit.setsda = nv04_i2c_setsda;
+ i2c->algo.bit.setscl = nv04_i2c_setscl;
+ i2c->algo.bit.getsda = nv04_i2c_getsda;
+ i2c->algo.bit.getscl = nv04_i2c_getscl;
+ i2c->rd = entry->read;
+ i2c->wr = entry->write;
+ break;
+ case 4:
+ i2c->algo.bit.setsda = nv4e_i2c_setsda;
+ i2c->algo.bit.setscl = nv4e_i2c_setscl;
+ i2c->algo.bit.getsda = nv4e_i2c_getsda;
+ i2c->algo.bit.getscl = nv4e_i2c_getscl;
+ i2c->rd = 0x600800 + entry->read;
+ i2c->wr = 0x600800 + entry->write;
+ break;
+ case 5:
+ i2c->algo.bit.setsda = nv50_i2c_setsda;
+ i2c->algo.bit.setscl = nv50_i2c_setscl;
+ i2c->algo.bit.getsda = nv50_i2c_getsda;
+ i2c->algo.bit.getscl = nv50_i2c_getscl;
+ i2c->rd = nv50_i2c_port[entry->read];
+ i2c->wr = i2c->rd;
+ break;
+ case 6:
+ i2c->rd = entry->read;
+ i2c->wr = entry->write;
+ break;
+ default:
+ NV_ERROR(dev, "DCB I2C port type %d unknown\n",
+ entry->port_type);
+ kfree(i2c);
+ return -EINVAL;
+ }
+
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "nouveau-%s-%d", pci_name(dev->pdev), index);
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+
+ if (entry->port_type < 6) {
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.udelay = 40;
+ i2c->algo.bit.timeout = usecs_to_jiffies(5000);
+ i2c->algo.bit.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ } else {
+ i2c->adapter.algo_data = &i2c->algo.dp;
+ i2c->algo.dp.running = false;
+ i2c->algo.dp.address = 0;
+ i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
+ ret = i2c_dp_aux_add_bus(&i2c->adapter);
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "Failed to register i2c %d\n", index);
+ kfree(i2c);
+ return ret;
+ }
+
+ entry->chan = i2c;
+ return 0;
+}
+
+void
+nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
+{
+ if (!entry->chan)
+ return;
+
+ i2c_del_adapter(&entry->chan->adapter);
+ kfree(entry->chan);
+ entry->chan = NULL;
+}
+
+struct nouveau_i2c_chan *
+nouveau_i2c_find(struct drm_device *dev, int index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+
+ if (index > DCB_MAX_NUM_I2C_ENTRIES)
+ return NULL;
+
+ if (!bios->bdcb.dcb.i2c[index].chan) {
+ if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
+ return NULL;
+ }
+
+ return bios->bdcb.dcb.i2c[index].chan;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
new file mode 100644
index 00000000000..c8eaf7a9fcb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_I2C_H__
+#define __NOUVEAU_I2C_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-id.h>
+#include <linux/i2c-algo-bit.h>
+#include "drm_dp_helper.h"
+
+struct dcb_i2c_entry;
+
+struct nouveau_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ union {
+ struct i2c_algo_bit_data bit;
+ struct i2c_algo_dp_aux_data dp;
+ } algo;
+ unsigned rd;
+ unsigned wr;
+ unsigned data;
+};
+
+int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
+void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+
+int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
+ uint8_t *read_byte);
+
+#endif /* __NOUVEAU_I2C_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
new file mode 100644
index 00000000000..475ba810bba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -0,0 +1,70 @@
+/**
+ * \file mga_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the MGA DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+#if 0
+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+ fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
+#endif
+ if (fn != NULL)
+ ret = (*fn)(filp, cmd, arg);
+ else
+ ret = drm_ioctl(filp, cmd, arg);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
new file mode 100644
index 00000000000..370c72c968d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_reg.h"
+#include <linux/ratelimit.h>
+
+/* needed for hotplug irq */
+#include "nouveau_connector.h"
+#include "nv50_display.h"
+
+void
+nouveau_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Master disable */
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+
+ if (dev_priv->card_type == NV_50) {
+ INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ INIT_LIST_HEAD(&dev_priv->vbl_waiting);
+ }
+}
+
+int
+nouveau_irq_postinstall(struct drm_device *dev)
+{
+ /* Master enable */
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+ return 0;
+}
+
+void
+nouveau_irq_uninstall(struct drm_device *dev)
+{
+ /* Master disable */
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_pgraph_object_method *grm;
+ struct nouveau_pgraph_object_class *grc;
+
+ grc = dev_priv->engine.graph.grclass;
+ while (grc->id) {
+ if (grc->id == class)
+ break;
+ grc++;
+ }
+
+ if (grc->id != class || !grc->methods)
+ return -ENOENT;
+
+ grm = grc->methods;
+ while (grm->id) {
+ if (grm->id == mthd)
+ return grm->exec(chan, class, mthd, data);
+ grm++;
+ }
+
+ return -ENOENT;
+}
+
+static bool
+nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ const int subc = (addr >> 13) & 0x7;
+ const int mthd = addr & 0x1ffc;
+
+ if (mthd == 0x0000) {
+ struct nouveau_gpuobj_ref *ref = NULL;
+
+ if (nouveau_gpuobj_ref_find(chan, data, &ref))
+ return false;
+
+ if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
+ return false;
+
+ chan->sw_subchannel[subc] = ref->gpuobj->class;
+ nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
+ NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
+ return true;
+ }
+
+ /* hw object */
+ if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
+ return false;
+
+ if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
+ return false;
+
+ return true;
+}
+
+static void
+nouveau_fifo_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t status, reassign;
+ int cnt = 0;
+
+ reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+ while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+ struct nouveau_channel *chan = NULL;
+ uint32_t chid, get;
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ chid = engine->fifo.channel_id(dev);
+ if (chid >= 0 && chid < engine->fifo.channels)
+ chan = dev_priv->fifos[chid];
+ get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+ * wrapping on my G80 chips, but CACHE1 isn't big
+ * enough for this much data.. Tests show that it
+ * wraps around to the start at GET=0x800.. No clue
+ * as to why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (dev_priv->card_type < NV_40) {
+ mthd = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
+ NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+ "Mthd 0x%04x Data 0x%08x\n",
+ chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
+
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_DMA_PUSHER);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
+ if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
+ get + 4);
+ }
+
+ if (status) {
+ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+ status, chid);
+ nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+ status = 0;
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+ }
+
+ if (status) {
+ NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+ nv_wr32(dev, 0x2140, 0);
+ nv_wr32(dev, 0x140, 0);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
+
+struct nouveau_bitfield_names {
+ uint32_t mask;
+ const char *name;
+};
+
+static struct nouveau_bitfield_names nstatus_names[] =
+{
+ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nstatus_names_nv10[] =
+{
+ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nsource_names[] =
+{
+ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
+ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
+ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
+ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
+ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
+ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
+ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
+ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
+ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
+};
+
+static void
+nouveau_print_bitfield_names_(uint32_t value,
+ const struct nouveau_bitfield_names *namelist,
+ const int namelist_len)
+{
+ /*
+ * Caller must have already printed the KERN_* log level for us.
+ * Also the caller is responsible for adding the newline.
+ */
+ int i;
+ for (i = 0; i < namelist_len; ++i) {
+ uint32_t mask = namelist[i].mask;
+ if (value & mask) {
+ printk(" %s", namelist[i].name);
+ value &= ~mask;
+ }
+ }
+ if (value)
+ printk(" (unknown bits 0x%08x)", value);
+}
+#define nouveau_print_bitfield_names(val, namelist) \
+ nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
+
+
+static int
+nouveau_graph_chid_from_grctx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+ int i;
+
+ if (dev_priv->card_type < NV_40)
+ return dev_priv->engine.fifo.channels;
+ else
+ if (dev_priv->card_type < NV_50) {
+ inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (!chan || !chan->ramin_grctx)
+ continue;
+
+ if (inst == chan->ramin_grctx->instance)
+ break;
+ }
+ } else {
+ inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->instance)
+ break;
+ }
+ }
+
+
+ return i;
+}
+
+static int
+nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ int channel;
+
+ if (dev_priv->card_type < NV_10)
+ channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
+ else
+ if (dev_priv->card_type < NV_40)
+ channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ else
+ channel = nouveau_graph_chid_from_grctx(dev);
+
+ if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
+ NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
+ return -EINVAL;
+ }
+
+ *channel_ret = channel;
+ return 0;
+}
+
+struct nouveau_pgraph_trap {
+ int channel;
+ int class;
+ int subc, mthd, size;
+ uint32_t data, data2;
+ uint32_t nsource, nstatus;
+};
+
+static void
+nouveau_graph_trap_info(struct drm_device *dev,
+ struct nouveau_pgraph_trap *trap)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t address;
+
+ trap->nsource = trap->nstatus = 0;
+ if (dev_priv->card_type < NV_50) {
+ trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ }
+
+ if (nouveau_graph_trapped_channel(dev, &trap->channel))
+ trap->channel = -1;
+ address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+
+ trap->mthd = address & 0x1FFC;
+ trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ if (dev_priv->card_type < NV_10) {
+ trap->subc = (address >> 13) & 0x7;
+ } else {
+ trap->subc = (address >> 16) & 0x7;
+ trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
+ }
+
+ if (dev_priv->card_type < NV_10)
+ trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
+ else if (dev_priv->card_type < NV_40)
+ trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
+ else if (dev_priv->card_type < NV_50)
+ trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
+ else
+ trap->class = nv_rd32(dev, 0x400814);
+}
+
+static void
+nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
+ struct nouveau_pgraph_trap *trap)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
+
+ NV_INFO(dev, "%s - nSource:", id);
+ nouveau_print_bitfield_names(nsource, nsource_names);
+ printk(", nStatus:");
+ if (dev_priv->card_type < NV_10)
+ nouveau_print_bitfield_names(nstatus, nstatus_names);
+ else
+ nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
+ printk("\n");
+
+ NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
+ "Data 0x%08x:0x%08x\n",
+ id, trap->channel, trap->subc,
+ trap->class, trap->mthd,
+ trap->data2, trap->data);
+}
+
+static int
+nouveau_pgraph_intr_swmthd(struct drm_device *dev,
+ struct nouveau_pgraph_trap *trap)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (trap->channel < 0 ||
+ trap->channel >= dev_priv->engine.fifo.channels ||
+ !dev_priv->fifos[trap->channel])
+ return -ENODEV;
+
+ return nouveau_call_method(dev_priv->fifos[trap->channel],
+ trap->class, trap->mthd, trap->data);
+}
+
+static inline void
+nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
+{
+ struct nouveau_pgraph_trap trap;
+ int unhandled = 0;
+
+ nouveau_graph_trap_info(dev, &trap);
+
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (nouveau_pgraph_intr_swmthd(dev, &trap))
+ unhandled = 1;
+ } else {
+ unhandled = 1;
+ }
+
+ if (unhandled)
+ nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
+}
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+static int nouveau_ratelimit(void)
+{
+ return __ratelimit(&nouveau_ratelimit_state);
+}
+
+
+static inline void
+nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
+{
+ struct nouveau_pgraph_trap trap;
+ int unhandled = 0;
+
+ nouveau_graph_trap_info(dev, &trap);
+ trap.nsource = nsource;
+
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (nouveau_pgraph_intr_swmthd(dev, &trap))
+ unhandled = 1;
+ } else {
+ unhandled = 1;
+ }
+
+ if (unhandled && nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
+}
+
+static inline void
+nouveau_pgraph_intr_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ uint32_t chid;
+
+ chid = engine->fifo.channel_id(dev);
+ NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ nv04_graph_context_switch(dev);
+ break;
+ case NV_10:
+ nv10_graph_context_switch(dev);
+ break;
+ default:
+ NV_ERROR(dev, "Context switch not implemented\n");
+ break;
+ }
+}
+
+static void
+nouveau_pgraph_irq_handler(struct drm_device *dev)
+{
+ uint32_t status;
+
+ while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+
+ if (status & NV_PGRAPH_INTR_NOTIFY) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+
+ status &= ~NV_PGRAPH_INTR_NOTIFY;
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
+ }
+
+ if (status & NV_PGRAPH_INTR_ERROR) {
+ nouveau_pgraph_intr_error(dev, nsource);
+
+ status &= ~NV_PGRAPH_INTR_ERROR;
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
+ }
+
+ if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nouveau_pgraph_intr_context_switch(dev);
+
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv_wr32(dev, NV03_PGRAPH_INTR,
+ NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ }
+
+ if (status) {
+ NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
+ nv_wr32(dev, NV03_PGRAPH_INTR, status);
+ }
+
+ if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+}
+
+static void
+nv50_pgraph_irq_handler(struct drm_device *dev)
+{
+ uint32_t status, nsource;
+
+ status = nv_rd32(dev, NV03_PGRAPH_INTR);
+ nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+
+ if (status & 0x00000001) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+ status &= ~0x00000001;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
+ }
+
+ if (status & 0x00000010) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+
+ status &= ~0x00000010;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
+ }
+
+ if (status & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+ NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, 0x400500, 0x00010001);
+
+ nv50_graph_context_switch(dev);
+
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ }
+
+ if (status & 0x00100000) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_DATA_ERROR);
+
+ status &= ~0x00100000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
+ }
+
+ if (status & 0x00200000) {
+ int r;
+
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+ NV_ERROR(dev, "magic set 1:\n");
+ for (r = 0x408900; r <= 0x408910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+ nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
+ for (r = 0x408e08; r <= 0x408e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+ nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
+
+ NV_ERROR(dev, "magic set 2:\n");
+ for (r = 0x409900; r <= 0x409910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+ nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
+ for (r = 0x409e08; r <= 0x409e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
+ nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
+
+ status &= ~0x00200000;
+ nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
+ }
+
+ if (status) {
+ NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
+ nv_wr32(dev, NV03_PGRAPH_INTR, status);
+ }
+
+ {
+ const int isb = (1 << 16) | (1 << 0);
+
+ if ((nv_rd32(dev, 0x400500) & isb) != isb)
+ nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+}
+
+static void
+nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
+{
+ if (crtc & 1)
+ nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+
+ if (crtc & 2)
+ nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+}
+
+irqreturn_t
+nouveau_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t status, fbdev_flags = 0;
+
+ status = nv_rd32(dev, NV03_PMC_INTR_0);
+ if (!status)
+ return IRQ_NONE;
+
+ if (dev_priv->fbdev_info) {
+ fbdev_flags = dev_priv->fbdev_info->flags;
+ dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
+ nouveau_fifo_irq_handler(dev);
+ status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
+ }
+
+ if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
+ if (dev_priv->card_type >= NV_50)
+ nv50_pgraph_irq_handler(dev);
+ else
+ nouveau_pgraph_irq_handler(dev);
+
+ status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
+ }
+
+ if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
+ nouveau_crtc_irq_handler(dev, (status>>24)&3);
+ status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
+ }
+
+ if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
+ NV_PMC_INTR_0_NV50_I2C_PENDING)) {
+ nv50_display_irq_handler(dev);
+ status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
+ NV_PMC_INTR_0_NV50_I2C_PENDING);
+ }
+
+ if (status)
+ NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
+
+ if (dev_priv->fbdev_info)
+ dev_priv->fbdev_info->flags = fbdev_flags;
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 00000000000..5158a12f784
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ * Copyright 2005 Stephane Marchesin
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "nouveau_drv.h"
+
+static struct mem_block *
+split_block(struct mem_block *p, uint64_t start, uint64_t size,
+ struct drm_file *file_priv)
+{
+ /* Maybe cut off the start of an existing block */
+ if (start > p->start) {
+ struct mem_block *newblock =
+ kmalloc(sizeof(*newblock), GFP_KERNEL);
+ if (!newblock)
+ goto out;
+ newblock->start = start;
+ newblock->size = p->size - (start - p->start);
+ newblock->file_priv = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* Maybe cut off the end of an existing block */
+ if (size < p->size) {
+ struct mem_block *newblock =
+ kmalloc(sizeof(*newblock), GFP_KERNEL);
+ if (!newblock)
+ goto out;
+ newblock->start = start + size;
+ newblock->size = p->size - size;
+ newblock->file_priv = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size = size;
+ }
+
+out:
+ /* Our block is in the middle */
+ p->file_priv = file_priv;
+ return p;
+}
+
+struct mem_block *
+nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
+ int align2, struct drm_file *file_priv, int tail)
+{
+ struct mem_block *p;
+ uint64_t mask = (1 << align2) - 1;
+
+ if (!heap)
+ return NULL;
+
+ if (tail) {
+ list_for_each_prev(p, heap) {
+ uint64_t start = ((p->start + p->size) - size) & ~mask;
+
+ if (p->file_priv == NULL && start >= p->start &&
+ start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
+ }
+ } else {
+ list_for_each(p, heap) {
+ uint64_t start = (p->start + mask) & ~mask;
+
+ if (p->file_priv == NULL &&
+ start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
+ }
+ }
+
+ return NULL;
+}
+
+void nouveau_mem_free_block(struct mem_block *p)
+{
+ p->file_priv = NULL;
+
+ /* Assumes a single contiguous range. Needs a special file_priv in
+ * 'heap' to stop it being subsumed.
+ */
+ if (p->next->file_priv == NULL) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ kfree(q);
+ }
+
+ if (p->prev->file_priv == NULL) {
+ struct mem_block *q = p->prev;
+ q->size += p->size;
+ q->next = p->next;
+ q->next->prev = q;
+ kfree(p);
+ }
+}
+
+/* Initialize. How to check for an uninitialized heap?
+ */
+int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
+ uint64_t size)
+{
+ struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
+
+ if (!blocks)
+ return -ENOMEM;
+
+ *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
+ if (!*heap) {
+ kfree(blocks);
+ return -ENOMEM;
+ }
+
+ blocks->start = start;
+ blocks->size = size;
+ blocks->file_priv = NULL;
+ blocks->next = blocks->prev = *heap;
+
+ memset(*heap, 0, sizeof(**heap));
+ (*heap)->file_priv = (struct drm_file *) -1;
+ (*heap)->next = (*heap)->prev = blocks;
+ return 0;
+}
+
+/*
+ * Free all blocks associated with the releasing file_priv
+ */
+void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap || !heap->next)
+ return;
+
+ list_for_each(p, heap) {
+ if (p->file_priv == file_priv)
+ p->file_priv = NULL;
+ }
+
+ /* Assumes a single contiguous range. Needs a special file_priv in
+ * 'heap' to stop it being subsumed.
+ */
+ list_for_each(p, heap) {
+ while ((p->file_priv == NULL) &&
+ (p->next->file_priv == NULL) &&
+ (p->next != heap)) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ kfree(q);
+ }
+ }
+}
+
+/*
+ * NV50 VM helpers
+ */
+int
+nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
+ uint32_t flags, uint64_t phys)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj **pgt;
+ unsigned psz, pfl, pages;
+
+ if (virt >= dev_priv->vm_gart_base &&
+ (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
+ psz = 12;
+ pgt = &dev_priv->gart_info.sg_ctxdma;
+ pfl = 0x21;
+ virt -= dev_priv->vm_gart_base;
+ } else
+ if (virt >= dev_priv->vm_vram_base &&
+ (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
+ psz = 16;
+ pgt = dev_priv->vm_vram_pt;
+ pfl = 0x01;
+ virt -= dev_priv->vm_vram_base;
+ } else {
+ NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
+ virt, virt + size - 1);
+ return -EINVAL;
+ }
+
+ pages = size >> psz;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ if (flags & 0x80000000) {
+ while (pages--) {
+ struct nouveau_gpuobj *pt = pgt[virt >> 29];
+ unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+
+ nv_wo32(dev, pt, pte++, 0x00000000);
+ nv_wo32(dev, pt, pte++, 0x00000000);
+
+ virt += (1 << psz);
+ }
+ } else {
+ while (pages--) {
+ struct nouveau_gpuobj *pt = pgt[virt >> 29];
+ unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+ unsigned offset_h = upper_32_bits(phys) & 0xff;
+ unsigned offset_l = lower_32_bits(phys);
+
+ nv_wo32(dev, pt, pte++, offset_l | pfl);
+ nv_wo32(dev, pt, pte++, offset_h | flags);
+
+ phys += (1 << psz);
+ virt += (1 << psz);
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x100c80, 0x00050001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00000001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void
+nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
+{
+ nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
+}
+
+/*
+ * Cleanup everything
+ */
+void nouveau_mem_takedown(struct mem_block **heap)
+{
+ struct mem_block *p;
+
+ if (!*heap)
+ return;
+
+ for (p = (*heap)->next; p != *heap;) {
+ struct mem_block *q = p;
+ p = p->next;
+ kfree(q);
+ }
+
+ kfree(*heap);
+ *heap = NULL;
+}
+
+void nouveau_mem_close(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+
+ ttm_bo_device_release(&dev_priv->ttm.bdev);
+
+ nouveau_ttm_global_release(dev_priv);
+
+ if (drm_core_has_AGP(dev) && dev->agp &&
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_agp_mem *entry, *tempe;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until drv_cleanup is called. */
+ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+ if (entry->bound)
+ drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&dev->agp->memory);
+
+ if (dev->agp->acquired)
+ drm_agp_release(dev);
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+
+ if (dev_priv->fb_mtrr) {
+ drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
+ drm_get_resource_len(dev, 1), DRM_MTRR_WC);
+ dev_priv->fb_mtrr = 0;
+ }
+}
+
+/*XXX won't work on BSD because of pci_read_config_dword */
+static uint32_t
+nouveau_mem_fb_amount_igp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t mem;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ NV_ERROR(dev, "no bridge device\n");
+ return 0;
+ }
+
+ if (dev_priv->flags&NV_NFORCE) {
+ pci_read_config_dword(bridge, 0x7C, &mem);
+ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
+ } else
+ if (dev_priv->flags&NV_NFORCE2) {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+ }
+
+ NV_ERROR(dev, "impossible!\n");
+ return 0;
+}
+
+/* returns the amount of FB ram in bytes */
+uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t boot0;
+
+ switch (dev_priv->card_type) {
+ case NV_04:
+ boot0 = nv_rd32(dev, NV03_BOOT_0);
+ if (boot0 & 0x00000100)
+ return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
+
+ switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
+ case NV04_BOOT_0_RAM_AMOUNT_32MB:
+ return 32 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_16MB:
+ return 16 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_8MB:
+ return 8 * 1024 * 1024;
+ case NV04_BOOT_0_RAM_AMOUNT_4MB:
+ return 4 * 1024 * 1024;
+ }
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ case NV_50:
+ default:
+ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
+ return nouveau_mem_fb_amount_igp(dev);
+ } else {
+ uint64_t mem;
+ mem = (nv_rd32(dev, NV04_FIFO_DATA) &
+ NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
+ NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
+ return mem * 1024 * 1024;
+ }
+ break;
+ }
+
+ NV_ERROR(dev,
+ "Unable to detect video ram size. Please report your setup to "
+ DRIVER_EMAIL "\n");
+ return 0;
+}
+
+#if __OS_HAS_AGP
+static void nouveau_mem_reset_agp(struct drm_device *dev)
+{
+ uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
+
+ saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
+ saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
+
+ /* clear busmaster bit */
+ nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
+ /* clear SBA and AGP bits */
+ nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
+
+ /* power cycle pgraph, if enabled */
+ pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
+ if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+ }
+
+ /* and restore (gives effect of resetting AGP) */
+ nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
+ nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
+}
+#endif
+
+int
+nouveau_mem_init_agp(struct drm_device *dev)
+{
+#if __OS_HAS_AGP
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_agp_info info;
+ struct drm_agp_mode mode;
+ int ret;
+
+ if (nouveau_noagp)
+ return 0;
+
+ nouveau_mem_reset_agp(dev);
+
+ if (!dev->agp->acquired) {
+ ret = drm_agp_acquire(dev);
+ if (ret) {
+ NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = drm_agp_info(dev, &info);
+ if (ret) {
+ NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
+ return ret;
+ }
+
+ /* see agp.h for the AGPSTAT_* modes available */
+ mode.mode = info.mode;
+ ret = drm_agp_enable(dev, mode);
+ if (ret) {
+ NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->gart_info.type = NOUVEAU_GART_AGP;
+ dev_priv->gart_info.aper_base = info.aperture_base;
+ dev_priv->gart_info.aper_size = info.aperture_size;
+#endif
+ return 0;
+}
+
+int
+nouveau_mem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ int ret, dma_bits = 32;
+
+ dev_priv->fb_phys = drm_get_resource_start(dev, 1);
+ dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
+ if (dev_priv->card_type >= NV_50 &&
+ pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+ dma_bits = 40;
+
+ ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+ if (ret) {
+ NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_ttm_global_init(dev_priv);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
+ dev_priv->ttm.bo_global_ref.ref.object,
+ &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
+ dma_bits <= 32 ? true : false);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
+ spin_lock_init(&dev_priv->ttm.bo_list_lock);
+
+ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
+
+ dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+ if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
+ dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
+ dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
+ NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
+
+ /* remove reserved space at end of vram from available amount */
+ dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
+ dev_priv->fb_aper_free = dev_priv->fb_available_size;
+
+ /* mappable vram */
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ dev_priv->fb_available_size >> PAGE_SHIFT);
+ if (ret) {
+ NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
+ return ret;
+ }
+
+ /* GART */
+#if !defined(__powerpc__) && !defined(__ia64__)
+ if (drm_device_is_agp(dev) && dev->agp) {
+ ret = nouveau_mem_init_agp(dev);
+ if (ret)
+ NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
+ }
+#endif
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
+ ret = nouveau_sgdma_init(dev);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
+ return ret;
+ }
+ }
+
+ NV_INFO(dev, "%d MiB GART (aperture)\n",
+ (int)(dev_priv->gart_info.aper_size >> 20));
+ dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
+ dev_priv->gart_info.aper_size >> PAGE_SHIFT);
+ if (ret) {
+ NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
+ drm_get_resource_len(dev, 1),
+ DRM_MTRR_WC);
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
new file mode 100644
index 00000000000..6c66a34b634
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nouveau_notifier_init_channel(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_bo *ntfy = NULL;
+ int ret;
+
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
+ TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
+ 0, 0x0000, false, true, &ntfy);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_bo_map(ntfy);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
+ if (ret)
+ goto out_err;
+
+ chan->notifier_bo = ntfy;
+out_err:
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(ntfy->gem);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ return ret;
+}
+
+void
+nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ if (!chan->notifier_bo)
+ return;
+
+ nouveau_bo_unmap(chan->notifier_bo);
+ mutex_lock(&dev->struct_mutex);
+ nouveau_bo_unpin(chan->notifier_bo);
+ drm_gem_object_unreference(chan->notifier_bo->gem);
+ mutex_unlock(&dev->struct_mutex);
+ nouveau_mem_takedown(&chan->notifier_heap);
+}
+
+static void
+nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
+ struct nouveau_gpuobj *gpuobj)
+{
+ NV_DEBUG(dev, "\n");
+
+ if (gpuobj->priv)
+ nouveau_mem_free_block(gpuobj->priv);
+}
+
+int
+nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
+ int size, uint32_t *b_offset)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *nobj = NULL;
+ struct mem_block *mem;
+ uint32_t offset;
+ int target, ret;
+
+ if (!chan->notifier_heap) {
+ NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
+ chan->id);
+ return -EINVAL;
+ }
+
+ mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
+ (struct drm_file *)-2, 0);
+ if (!mem) {
+ NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
+ return -ENOMEM;
+ }
+
+ offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
+ target = NV_DMA_TARGET_VIDMEM;
+ } else
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
+ dev_priv->card_type < NV_50) {
+ ret = nouveau_sgdma_get_page(dev, offset, &offset);
+ if (ret)
+ return ret;
+ target = NV_DMA_TARGET_PCI;
+ } else {
+ target = NV_DMA_TARGET_AGP;
+ }
+ } else {
+ NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
+ chan->notifier_bo->bo.mem.mem_type);
+ return -EINVAL;
+ }
+ offset += mem->start;
+
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
+ mem->size, NV_DMA_ACCESS_RW, target,
+ &nobj);
+ if (ret) {
+ nouveau_mem_free_block(mem);
+ NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
+ return ret;
+ }
+ nobj->dtor = nouveau_notifier_gpuobj_dtor;
+ nobj->priv = mem;
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &nobj);
+ nouveau_mem_free_block(mem);
+ NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ *b_offset = mem->start;
+ return 0;
+}
+
+int
+nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
+{
+ if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
+ return -EINVAL;
+
+ if (poffset) {
+ struct mem_block *mem = nobj->priv;
+
+ if (*poffset >= mem->size)
+ return false;
+
+ *poffset += mem->start;
+ }
+
+ return 0;
+}
+
+int
+nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_notifierobj_alloc *na = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
new file mode 100644
index 00000000000..93379bb81be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -0,0 +1,1294 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/* NVidia uses context objects to drive drawing operations.
+
+ Context objects can be selected into 8 subchannels in the FIFO,
+ and then used via DMA command buffers.
+
+ A context object is referenced by a user defined handle (CARD32). The HW
+ looks up graphics objects in a hash table in the instance RAM.
+
+ An entry in the hash table consists of 2 CARD32. The first CARD32 contains
+ the handle, the second one a bitfield, that contains the address of the
+ object in instance RAM.
+
+ The format of the second CARD32 seems to be:
+
+ NV4 to NV30:
+
+ 15: 0 instance_addr >> 4
+ 17:16 engine (here uses 1 = graphics)
+ 28:24 channel id (here uses 0)
+ 31 valid (use 1)
+
+ NV40:
+
+ 15: 0 instance_addr >> 4 (maybe 19-0)
+ 21:20 engine (here uses 1 = graphics)
+ I'm unsure about the other bits, but using 0 seems to work.
+
+ The key into the hash table depends on the object handle and channel id and
+ is given as:
+*/
+static uint32_t
+nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t hash = 0;
+ int i;
+
+ NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
+
+ for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
+ hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
+ handle >>= dev_priv->ramht_bits;
+ }
+
+ if (dev_priv->card_type < NV_50)
+ hash ^= channel << (dev_priv->ramht_bits - 4);
+ hash <<= 3;
+
+ NV_DEBUG(dev, "hash=0x%08x\n", hash);
+ return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+ uint32_t offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
+
+ if (dev_priv->card_type < NV_40)
+ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+ return (ctx != 0);
+}
+
+static int
+nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_channel *chan = ref->channel;
+ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+ uint32_t ctx, co, ho;
+
+ if (!ramht) {
+ NV_ERROR(dev, "No hash table!\n");
+ return -EINVAL;
+ }
+
+ if (dev_priv->card_type < NV_40) {
+ ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
+ (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else
+ if (dev_priv->card_type < NV_50) {
+ ctx = (ref->instance >> 4) |
+ (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else {
+ if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
+ ctx = (ref->instance << 10) | 2;
+ } else {
+ ctx = (ref->instance >> 4) |
+ ((ref->gpuobj->engine <<
+ NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
+ }
+ }
+
+ instmem->prepare_access(dev, true);
+ co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
+ do {
+ if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+ NV_DEBUG(dev,
+ "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, ref->handle, ctx);
+ nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
+ nv_wo32(dev, ramht, (co + 4)/4, ctx);
+
+ list_add_tail(&ref->list, &chan->ramht_refs);
+ instmem->finish_access(dev);
+ return 0;
+ }
+ NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
+ chan->id, co, nv_ro32(dev, ramht, co/4));
+
+ co += 8;
+ if (co >= dev_priv->ramht_size)
+ co = 0;
+ } while (co != ho);
+ instmem->finish_access(dev);
+
+ NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
+ return -ENOMEM;
+}
+
+static void
+nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_channel *chan = ref->channel;
+ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+ uint32_t co, ho;
+
+ if (!ramht) {
+ NV_ERROR(dev, "No hash table!\n");
+ return;
+ }
+
+ instmem->prepare_access(dev, true);
+ co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
+ do {
+ if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+ (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
+ NV_DEBUG(dev,
+ "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, ref->handle,
+ nv_ro32(dev, ramht, (co + 4)));
+ nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
+ nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
+
+ list_del(&ref->list);
+ instmem->finish_access(dev);
+ return;
+ }
+
+ co += 8;
+ if (co >= dev_priv->ramht_size)
+ co = 0;
+ } while (co != ho);
+ list_del(&ref->list);
+ instmem->finish_access(dev);
+
+ NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
+ chan->id, ref->handle);
+}
+
+int
+nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
+ uint32_t size, int align, uint32_t flags,
+ struct nouveau_gpuobj **gpuobj_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_gpuobj *gpuobj;
+ struct mem_block *pramin = NULL;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
+ chan ? chan->id : -1, size, align, flags);
+
+ if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
+ return -EINVAL;
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+ gpuobj->flags = flags;
+ gpuobj->im_channel = chan;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+ /* Choose between global instmem heap, and per-channel private
+ * instmem heap. On <NV50 allow requests for private instmem
+ * to be satisfied from global heap if no per-channel area
+ * available.
+ */
+ if (chan) {
+ if (chan->ramin_heap) {
+ NV_DEBUG(dev, "private heap\n");
+ pramin = chan->ramin_heap;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ NV_DEBUG(dev, "global heap fallback\n");
+ pramin = dev_priv->ramin_heap;
+ }
+ } else {
+ NV_DEBUG(dev, "global heap\n");
+ pramin = dev_priv->ramin_heap;
+ }
+
+ if (!pramin) {
+ NV_ERROR(dev, "No PRAMIN heap!\n");
+ return -EINVAL;
+ }
+
+ if (!chan) {
+ ret = engine->instmem.populate(dev, gpuobj, &size);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+ }
+
+ /* Allocate a chunk of the PRAMIN aperture */
+ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
+ drm_order(align),
+ (struct drm_file *)-2, 0);
+ if (!gpuobj->im_pramin) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+
+ if (!chan) {
+ ret = engine->instmem.bind(dev, gpuobj);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ int i;
+
+ engine->instmem.prepare_access(dev, true);
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ nv_wo32(dev, gpuobj, i/4, 0);
+ engine->instmem.finish_access(dev);
+ }
+
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
+int
+nouveau_gpuobj_early_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG(dev, "\n");
+
+ INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_gpuobj_new_fake(dev,
+ dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
+ &dev_priv->ramht, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG(dev, "\n");
+
+ nouveau_gpuobj_del(dev, &dev_priv->ramht);
+}
+
+void
+nouveau_gpuobj_late_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ struct list_head *entry, *tmp;
+
+ NV_DEBUG(dev, "\n");
+
+ list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
+ gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
+
+ NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
+ gpuobj, gpuobj->refcount);
+ gpuobj->refcount = 0;
+ nouveau_gpuobj_del(dev, &gpuobj);
+ }
+}
+
+int
+nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+ struct nouveau_gpuobj *gpuobj;
+ int i;
+
+ NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
+
+ if (!dev_priv || !pgpuobj || !(*pgpuobj))
+ return -EINVAL;
+ gpuobj = *pgpuobj;
+
+ if (gpuobj->refcount != 0) {
+ NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
+ return -EINVAL;
+ }
+
+ if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+ engine->instmem.prepare_access(dev, true);
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ nv_wo32(dev, gpuobj, i/4, 0);
+ engine->instmem.finish_access(dev);
+ }
+
+ if (gpuobj->dtor)
+ gpuobj->dtor(dev, gpuobj);
+
+ if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
+ engine->instmem.clear(dev, gpuobj);
+
+ if (gpuobj->im_pramin) {
+ if (gpuobj->flags & NVOBJ_FLAG_FAKE)
+ kfree(gpuobj->im_pramin);
+ else
+ nouveau_mem_free_block(gpuobj->im_pramin);
+ }
+
+ list_del(&gpuobj->list);
+
+ *pgpuobj = NULL;
+ kfree(gpuobj);
+ return 0;
+}
+
+static int
+nouveau_gpuobj_instance_get(struct drm_device *dev,
+ struct nouveau_channel *chan,
+ struct nouveau_gpuobj *gpuobj, uint32_t *inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *cpramin;
+
+ /* <NV50 use PRAMIN address everywhere */
+ if (dev_priv->card_type < NV_50) {
+ *inst = gpuobj->im_pramin->start;
+ return 0;
+ }
+
+ if (chan && gpuobj->im_channel != chan) {
+ NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
+ gpuobj->im_channel->id, chan->id);
+ return -EINVAL;
+ }
+
+ /* NV50 channel-local instance */
+ if (chan) {
+ cpramin = chan->ramin->gpuobj;
+ *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
+ return 0;
+ }
+
+ /* NV50 global (VRAM) instance */
+ if (!gpuobj->im_channel) {
+ /* ...from global heap */
+ if (!gpuobj->im_backing) {
+ NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
+ return -EINVAL;
+ }
+ *inst = gpuobj->im_backing_start;
+ return 0;
+ } else {
+ /* ...from local heap */
+ cpramin = gpuobj->im_channel->ramin->gpuobj;
+ *inst = cpramin->im_backing_start +
+ (gpuobj->im_pramin->start - cpramin->im_pramin->start);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
+ uint32_t handle, struct nouveau_gpuobj *gpuobj,
+ struct nouveau_gpuobj_ref **ref_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_ref *ref;
+ uint32_t instance;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
+ chan ? chan->id : -1, handle, gpuobj);
+
+ if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
+ return -EINVAL;
+
+ if (!chan && !ref_ret)
+ return -EINVAL;
+
+ if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
+ /* sw object */
+ instance = 0x40;
+ } else {
+ ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
+ if (ret)
+ return ret;
+ }
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&ref->list);
+ ref->gpuobj = gpuobj;
+ ref->channel = chan;
+ ref->instance = instance;
+
+ if (!ref_ret) {
+ ref->handle = handle;
+
+ ret = nouveau_ramht_insert(dev, ref);
+ if (ret) {
+ kfree(ref);
+ return ret;
+ }
+ } else {
+ ref->handle = ~0;
+ *ref_ret = ref;
+ }
+
+ ref->gpuobj->refcount++;
+ return 0;
+}
+
+int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
+{
+ struct nouveau_gpuobj_ref *ref;
+
+ NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
+
+ if (!dev || !pref || *pref == NULL)
+ return -EINVAL;
+ ref = *pref;
+
+ if (ref->handle != ~0)
+ nouveau_ramht_remove(dev, ref);
+
+ if (ref->gpuobj) {
+ ref->gpuobj->refcount--;
+
+ if (ref->gpuobj->refcount == 0) {
+ if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
+ nouveau_gpuobj_del(dev, &ref->gpuobj);
+ }
+ }
+
+ *pref = NULL;
+ kfree(ref);
+ return 0;
+}
+
+int
+nouveau_gpuobj_new_ref(struct drm_device *dev,
+ struct nouveau_channel *oc, struct nouveau_channel *rc,
+ uint32_t handle, uint32_t size, int align,
+ uint32_t flags, struct nouveau_gpuobj_ref **ref)
+{
+ struct nouveau_gpuobj *gpuobj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
+ struct nouveau_gpuobj_ref **ref_ret)
+{
+ struct nouveau_gpuobj_ref *ref;
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+ ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+ if (ref->handle == handle) {
+ if (ref_ret)
+ *ref_ret = ref;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
+ uint32_t b_offset, uint32_t size,
+ uint32_t flags, struct nouveau_gpuobj **pgpuobj,
+ struct nouveau_gpuobj_ref **pref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ int i;
+
+ NV_DEBUG(dev,
+ "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
+ p_offset, b_offset, size, flags);
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
+ gpuobj->im_channel = NULL;
+ gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+ if (p_offset != ~0) {
+ gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
+ GFP_KERNEL);
+ if (!gpuobj->im_pramin) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+ gpuobj->im_pramin->start = p_offset;
+ gpuobj->im_pramin->size = size;
+ }
+
+ if (b_offset != ~0) {
+ gpuobj->im_backing = (struct nouveau_bo *)-1;
+ gpuobj->im_backing_start = b_offset;
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ nv_wo32(dev, gpuobj, i/4, 0);
+ dev_priv->engine.instmem.finish_access(dev);
+ }
+
+ if (pref) {
+ i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
+ if (i) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return i;
+ }
+ }
+
+ if (pgpuobj)
+ *pgpuobj = gpuobj;
+ return 0;
+}
+
+
+static uint32_t
+nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*XXX: dodgy hack for now */
+ if (dev_priv->card_type >= NV_50)
+ return 24;
+ if (dev_priv->card_type >= NV_40)
+ return 32;
+ return 16;
+}
+
+/*
+ DMA objects are used to reference a piece of memory in the
+ framebuffer, PCI or AGP address space. Each object is 16 bytes big
+ and looks as follows:
+
+ entry[0]
+ 11:0 class (seems like I can always use 0 here)
+ 12 page table present?
+ 13 page entry linear?
+ 15:14 access: 0 rw, 1 ro, 2 wo
+ 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
+ 31:20 dma adjust (bits 0-11 of the address)
+ entry[1]
+ dma limit (size of transfer)
+ entry[X]
+ 1 0 readonly, 1 readwrite
+ 31:12 dma frame address of the page (bits 12-31 of the address)
+ entry[N]
+ page table terminator, same value as the first pte, as does nvidia
+ rivatv uses 0xffffffff
+
+ Non linear page tables need a list of frame addresses afterwards,
+ the rivatv project has some info on this.
+
+ The method below creates a DMA object in instance RAM and returns a handle
+ to it that can be used to set up context objects.
+*/
+int
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
+ uint64_t offset, uint64_t size, int access,
+ int target, struct nouveau_gpuobj **gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
+ chan->id, class, offset, size);
+ NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+
+ switch (target) {
+ case NV_DMA_TARGET_AGP:
+ offset += dev_priv->gart_info.aper_base;
+ break;
+ default:
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(dev, chan,
+ nouveau_gpuobj_class_instmem_size(dev, class),
+ 16, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+ return ret;
+ }
+
+ instmem->prepare_access(dev, true);
+
+ if (dev_priv->card_type < NV_50) {
+ uint32_t frame, adjust, pte_flags = 0;
+
+ if (access != NV_DMA_ACCESS_RO)
+ pte_flags |= (1<<1);
+ adjust = offset & 0x00000fff;
+ frame = offset & ~0x00000fff;
+
+ nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
+ (adjust << 20) |
+ (access << 14) |
+ (target << 16) |
+ class));
+ nv_wo32(dev, *gpuobj, 1, size - 1);
+ nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
+ nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
+ } else {
+ uint64_t limit = offset + size - 1;
+ uint32_t flags0, flags5;
+
+ if (target == NV_DMA_TARGET_VIDMEM) {
+ flags0 = 0x00190000;
+ flags5 = 0x00010000;
+ } else {
+ flags0 = 0x7fc00000;
+ flags5 = 0x00080000;
+ }
+
+ nv_wo32(dev, *gpuobj, 0, flags0 | class);
+ nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
+ nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
+ nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
+ (upper_32_bits(offset) & 0xff));
+ nv_wo32(dev, *gpuobj, 5, flags5);
+ }
+
+ instmem->finish_access(dev);
+
+ (*gpuobj)->engine = NVOBJ_ENGINE_SW;
+ (*gpuobj)->class = class;
+ return 0;
+}
+
+int
+nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
+ uint64_t offset, uint64_t size, int access,
+ struct nouveau_gpuobj **gpuobj,
+ uint32_t *o_ret)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
+ (dev_priv->card_type >= NV_50 &&
+ dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ offset + dev_priv->vm_gart_base,
+ size, access, NV_DMA_TARGET_AGP,
+ gpuobj);
+ if (o_ret)
+ *o_ret = 0;
+ } else
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
+ *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ if (offset & ~0xffffffffULL) {
+ NV_ERROR(dev, "obj offset exceeds 32-bits\n");
+ return -EINVAL;
+ }
+ if (o_ret)
+ *o_ret = (uint32_t)offset;
+ ret = (*gpuobj != NULL) ? 0 : -EINVAL;
+ } else {
+ NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Context objects in the instance RAM have the following structure.
+ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
+
+ NV4 - NV30:
+
+ entry[0]
+ 11:0 class
+ 12 chroma key enable
+ 13 user clip enable
+ 14 swizzle enable
+ 17:15 patch config:
+ scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
+ 18 synchronize enable
+ 19 endian: 1 big, 0 little
+ 21:20 dither mode
+ 23 single step enable
+ 24 patch status: 0 invalid, 1 valid
+ 25 context_surface 0: 1 valid
+ 26 context surface 1: 1 valid
+ 27 context pattern: 1 valid
+ 28 context rop: 1 valid
+ 29,30 context beta, beta4
+ entry[1]
+ 7:0 mono format
+ 15:8 color format
+ 31:16 notify instance address
+ entry[2]
+ 15:0 dma 0 instance address
+ 31:16 dma 1 instance address
+ entry[3]
+ dma method traps
+
+ NV40:
+ No idea what the exact format is. Here's what can be deducted:
+
+ entry[0]:
+ 11:0 class (maybe uses more bits here?)
+ 17 user clip enable
+ 21:19 patch config
+ 25 patch status valid ?
+ entry[1]:
+ 15:0 DMA notifier (maybe 20:0)
+ entry[2]:
+ 15:0 DMA 0 instance (maybe 20:0)
+ 24 big endian
+ entry[3]:
+ 15:0 DMA 1 instance (maybe 20:0)
+ entry[4]:
+ entry[5]:
+ set to 0?
+*/
+int
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+
+ ret = nouveau_gpuobj_new(dev, chan,
+ nouveau_gpuobj_class_instmem_size(dev, class),
+ 16,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ if (dev_priv->card_type >= NV_50) {
+ nv_wo32(dev, *gpuobj, 0, class);
+ nv_wo32(dev, *gpuobj, 5, 0x00010000);
+ } else {
+ switch (class) {
+ case NV_CLASS_NULL:
+ nv_wo32(dev, *gpuobj, 0, 0x00001030);
+ nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
+ break;
+ default:
+ if (dev_priv->card_type >= NV_40) {
+ nv_wo32(dev, *gpuobj, 0, class);
+#ifdef __BIG_ENDIAN
+ nv_wo32(dev, *gpuobj, 2, 0x01000000);
+#endif
+ } else {
+#ifdef __BIG_ENDIAN
+ nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
+#else
+ nv_wo32(dev, *gpuobj, 0, class);
+#endif
+ }
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ (*gpuobj)->engine = NVOBJ_ENGINE_GR;
+ (*gpuobj)->class = class;
+ return 0;
+}
+
+static int
+nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj_ret)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+
+ if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
+ return -EINVAL;
+
+ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
+ if (!gpuobj)
+ return -ENOMEM;
+ gpuobj->engine = NVOBJ_ENGINE_SW;
+ gpuobj->class = class;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
+static int
+nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *pramin = NULL;
+ uint32_t size;
+ uint32_t base;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ /* Base amount for object storage (4KiB enough?) */
+ size = 0x1000;
+ base = 0;
+
+ /* PGRAPH context */
+
+ if (dev_priv->card_type == NV_50) {
+ /* Various fixed table thingos */
+ size += 0x1400; /* mostly unknown stuff */
+ size += 0x4000; /* vm pd */
+ base = 0x6000;
+ /* RAMHT, not sure about setting size yet, 32KiB to be safe */
+ size += 0x8000;
+ /* RAMFC */
+ size += 0x1000;
+ /* PGRAPH context */
+ size += 0x70000;
+ }
+
+ NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
+ chan->id, size, base);
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
+ &chan->ramin);
+ if (ret) {
+ NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
+ return ret;
+ }
+ pramin = chan->ramin->gpuobj;
+
+ ret = nouveau_mem_init_heap(&chan->ramin_heap,
+ pramin->im_pramin->start + base, size);
+ if (ret) {
+ NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
+ uint32_t vram_h, uint32_t tt_h)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_gpuobj *vram = NULL, *tt = NULL;
+ int ret, i;
+
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
+ NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
+
+ /* Reserve a block of PRAMIN for the channel
+ *XXX: maybe on <NV50 too at some point
+ */
+ if (0 || dev_priv->card_type == NV_50) {
+ ret = nouveau_gpuobj_channel_init_pramin(chan);
+ if (ret) {
+ NV_ERROR(dev, "init pramin\n");
+ return ret;
+ }
+ }
+
+ /* NV50 VM
+ * - Allocate per-channel page-directory
+ * - Map GART and VRAM into the channel's address space at the
+ * locations determined during init.
+ */
+ if (dev_priv->card_type >= NV_50) {
+ uint32_t vm_offset, pde;
+
+ instmem->prepare_access(dev, true);
+
+ vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
+ vm_offset += chan->ramin->gpuobj->im_pramin->start;
+
+ ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
+ 0, &chan->vm_pd, NULL);
+ if (ret) {
+ instmem->finish_access(dev);
+ return ret;
+ }
+ for (i = 0; i < 0x4000; i += 8) {
+ nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
+ nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
+ }
+
+ pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
+ ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+ dev_priv->gart_info.sg_ctxdma,
+ &chan->vm_gart_pt);
+ if (ret) {
+ instmem->finish_access(dev);
+ return ret;
+ }
+ nv_wo32(dev, chan->vm_pd, pde++,
+ chan->vm_gart_pt->instance | 0x03);
+ nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+
+ pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+ ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+ dev_priv->vm_vram_pt[i],
+ &chan->vm_vram_pt[i]);
+ if (ret) {
+ instmem->finish_access(dev);
+ return ret;
+ }
+
+ nv_wo32(dev, chan->vm_pd, pde++,
+ chan->vm_vram_pt[i]->instance | 0x61);
+ nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
+ }
+
+ instmem->finish_access(dev);
+ }
+
+ /* RAMHT */
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
+ &chan->ramht);
+ if (ret)
+ return ret;
+ } else {
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
+ 0x8000, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramht);
+ if (ret)
+ return ret;
+ }
+
+ /* VRAM ctxdma */
+ if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->vm_end,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_AGP, &vram);
+ if (ret) {
+ NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_VIDMEM, &vram);
+ if (ret) {
+ NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
+ if (ret) {
+ NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ /* TT memory ctxdma */
+ if (dev_priv->card_type >= NV_50) {
+ tt = vram;
+ } else
+ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
+ ret = nouveau_gpuobj_gart_dma_new(chan, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RW, &tt, NULL);
+ } else {
+ NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
+ if (ret) {
+ NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_device *dev = chan->dev;
+ struct list_head *entry, *tmp;
+ struct nouveau_gpuobj_ref *ref;
+ int i;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ if (!chan->ramht_refs.next)
+ return;
+
+ list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+ ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+ nouveau_gpuobj_ref_del(dev, &ref);
+ }
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramht);
+
+ nouveau_gpuobj_del(dev, &chan->vm_pd);
+ nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
+ nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+
+ if (chan->ramin_heap)
+ nouveau_mem_takedown(&chan->ramin_heap);
+ if (chan->ramin)
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+
+}
+
+int
+nouveau_gpuobj_suspend(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+ int i;
+
+ if (dev_priv->card_type < NV_50) {
+ dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
+ if (!dev_priv->susres.ramin_copy)
+ return -ENOMEM;
+
+ for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
+ dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
+ return 0;
+ }
+
+ list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+ if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
+ continue;
+
+ gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
+ if (!gpuobj->im_backing_suspend) {
+ nouveau_gpuobj_resume(dev);
+ return -ENOMEM;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+ for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
+ gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
+ dev_priv->engine.instmem.finish_access(dev);
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+
+ if (dev_priv->card_type < NV_50) {
+ vfree(dev_priv->susres.ramin_copy);
+ dev_priv->susres.ramin_copy = NULL;
+ return;
+ }
+
+ list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+ if (!gpuobj->im_backing_suspend)
+ continue;
+
+ vfree(gpuobj->im_backing_suspend);
+ gpuobj->im_backing_suspend = NULL;
+ }
+}
+
+void
+nouveau_gpuobj_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+ int i;
+
+ if (dev_priv->card_type < NV_50) {
+ for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
+ nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+ nouveau_gpuobj_suspend_cleanup(dev);
+ return;
+ }
+
+ list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
+ if (!gpuobj->im_backing_suspend)
+ continue;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
+ nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
+ dev_priv->engine.instmem.finish_access(dev);
+ }
+
+ nouveau_gpuobj_suspend_cleanup(dev);
+}
+
+int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_grobj_alloc *init = data;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_pgraph_object_class *grc;
+ struct nouveau_gpuobj *gr = NULL;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
+
+ if (init->handle == ~0)
+ return -EINVAL;
+
+ grc = pgraph->grclass;
+ while (grc->id) {
+ if (grc->id == init->class)
+ break;
+ grc++;
+ }
+
+ if (!grc->id) {
+ NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
+ return -EPERM;
+ }
+
+ if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
+ return -EEXIST;
+
+ if (!grc->software)
+ ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
+ else
+ ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+
+ if (ret) {
+ NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
+ ret, init->channel, init->handle);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
+ if (ret) {
+ NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
+ ret, init->channel, init->handle);
+ nouveau_gpuobj_del(dev, &gr);
+ return ret;
+ }
+
+ return 0;
+}
+
+int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gpuobj_free *objfree = data;
+ struct nouveau_gpuobj_ref *ref;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+
+ ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
+ if (ret)
+ return ret;
+ nouveau_gpuobj_ref_del(dev, &ref);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
new file mode 100644
index 00000000000..fa1b0e7165b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -0,0 +1,836 @@
+
+
+#define NV03_BOOT_0 0x00100000
+# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
+# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
+# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
+# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
+# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
+# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
+# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
+# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
+# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
+
+#define NV04_FIFO_DATA 0x0010020c
+# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
+# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
+
+#define NV_RAMIN 0x00700000
+
+#define NV_RAMHT_HANDLE_OFFSET 0
+#define NV_RAMHT_CONTEXT_OFFSET 4
+# define NV_RAMHT_CONTEXT_VALID (1<<31)
+# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24
+# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16
+# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0
+# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1
+# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0
+# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23
+# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
+# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
+
+/* DMA object defines */
+#define NV_DMA_ACCESS_RW 0
+#define NV_DMA_ACCESS_RO 1
+#define NV_DMA_ACCESS_WO 2
+#define NV_DMA_TARGET_VIDMEM 0
+#define NV_DMA_TARGET_PCI 2
+#define NV_DMA_TARGET_AGP 3
+/* The following is not a real value used by the card, it's changed by
+ * nouveau_object_dma_create */
+#define NV_DMA_TARGET_PCI_NONLINEAR 8
+
+/* Some object classes we care about in the drm */
+#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
+#define NV_CLASS_DMA_TO_MEMORY 0x00000003
+#define NV_CLASS_NULL 0x00000030
+#define NV_CLASS_DMA_IN_MEMORY 0x0000003D
+
+#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE))
+#define NV03_USER__SIZE 16
+#define NV10_USER__SIZE 32
+#define NV03_USER_SIZE 0x00010000
+#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_PUT__SIZE 16
+#define NV10_USER_DMA_PUT__SIZE 32
+#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_GET__SIZE 16
+#define NV10_USER_DMA_GET__SIZE 32
+#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE))
+#define NV03_USER_REF_CNT__SIZE 16
+#define NV10_USER_REF_CNT__SIZE 32
+
+#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE))
+#define NV40_USER_SIZE 0x00001000
+#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_PUT__SIZE 32
+#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_GET__SIZE 32
+#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE))
+#define NV40_USER_REF_CNT__SIZE 32
+
+#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE))
+#define NV50_USER_SIZE 0x00002000
+#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_PUT__SIZE 128
+#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_GET__SIZE 128
+#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE))
+#define NV50_USER_REF_CNT__SIZE 128
+
+#define NV03_FIFO_SIZE 0x8000UL
+
+#define NV03_PMC_BOOT_0 0x00000000
+#define NV03_PMC_BOOT_1 0x00000004
+#define NV03_PMC_INTR_0 0x00000100
+# define NV_PMC_INTR_0_PFIFO_PENDING (1<<8)
+# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
+# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21)
+# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24)
+# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25)
+# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26)
+# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24)
+#define NV03_PMC_INTR_EN_0 0x00000140
+# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<<0)
+#define NV03_PMC_ENABLE 0x00000200
+# define NV_PMC_ENABLE_PFIFO (1<<8)
+# define NV_PMC_ENABLE_PGRAPH (1<<12)
+/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
+ * the card will hang early on in the X init process.
+ */
+# define NV_PMC_ENABLE_UNK13 (1<<13)
+#define NV40_PMC_BACKLIGHT 0x000015f0
+# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
+#define NV40_PMC_1700 0x00001700
+#define NV40_PMC_1704 0x00001704
+#define NV40_PMC_1708 0x00001708
+#define NV40_PMC_170C 0x0000170C
+
+/* probably PMC ? */
+#define NV50_PUNK_BAR0_PRAMIN 0x00001700
+#define NV50_PUNK_BAR_CFG_BASE 0x00001704
+#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30)
+#define NV50_PUNK_BAR1_CTXDMA 0x00001708
+#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31)
+#define NV50_PUNK_BAR3_CTXDMA 0x0000170C
+#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31)
+#define NV50_PUNK_UNK1710 0x00001710
+
+#define NV04_PBUS_PCI_NV_1 0x00001804
+#define NV04_PBUS_PCI_NV_19 0x0000184C
+#define NV04_PBUS_PCI_NV_20 0x00001850
+# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
+# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
+
+#define NV04_PTIMER_INTR_0 0x00009100
+#define NV04_PTIMER_INTR_EN_0 0x00009140
+#define NV04_PTIMER_NUMERATOR 0x00009200
+#define NV04_PTIMER_DENOMINATOR 0x00009210
+#define NV04_PTIMER_TIME_0 0x00009400
+#define NV04_PTIMER_TIME_1 0x00009410
+#define NV04_PTIMER_ALARM_0 0x00009420
+
+#define NV04_PFB_CFG0 0x00100200
+#define NV04_PFB_CFG1 0x00100204
+#define NV40_PFB_020C 0x0010020C
+#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
+#define NV10_PFB_TILE__SIZE 8
+#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
+#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
+#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16))
+#define NV10_PFB_CLOSE_PAGE2 0x0010033C
+#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
+#define NV40_PFB_TILE__SIZE_0 12
+#define NV40_PFB_TILE__SIZE_1 15
+#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
+#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
+#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
+#define NV40_PFB_UNK_800 0x00100800
+
+#define NV04_PGRAPH_DEBUG_0 0x00400080
+#define NV04_PGRAPH_DEBUG_1 0x00400084
+#define NV04_PGRAPH_DEBUG_2 0x00400088
+#define NV04_PGRAPH_DEBUG_3 0x0040008c
+#define NV10_PGRAPH_DEBUG_4 0x00400090
+#define NV03_PGRAPH_INTR 0x00400100
+#define NV03_PGRAPH_NSTATUS 0x00400104
+# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
+# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
+# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
+# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
+# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
+# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
+# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
+# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
+#define NV03_PGRAPH_NSOURCE 0x00400108
+# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
+# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
+# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
+# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
+# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
+# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
+# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
+# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
+# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
+# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
+# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
+# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
+# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
+# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
+# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
+# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
+# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
+#define NV03_PGRAPH_INTR_EN 0x00400140
+#define NV40_PGRAPH_INTR_EN 0x0040013C
+# define NV_PGRAPH_INTR_NOTIFY (1<<0)
+# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
+# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
+# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
+# define NV_PGRAPH_INTR_ERROR (1<<20)
+#define NV10_PGRAPH_CTX_CONTROL 0x00400144
+#define NV10_PGRAPH_CTX_USER 0x00400148
+#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C
+#define NV10_PGRAPH_CTX_SWITCH2 0x00400150
+#define NV10_PGRAPH_CTX_SWITCH3 0x00400154
+#define NV10_PGRAPH_CTX_SWITCH4 0x00400158
+#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C
+#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
+#define NV10_PGRAPH_CTX_CACHE1 0x00400160
+#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
+#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
+#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
+#define NV04_PGRAPH_CTX_CONTROL 0x00400170
+#define NV04_PGRAPH_CTX_USER 0x00400174
+#define NV04_PGRAPH_CTX_CACHE1 0x00400180
+#define NV10_PGRAPH_CTX_CACHE2 0x00400180
+#define NV03_PGRAPH_CTX_CONTROL 0x00400190
+#define NV03_PGRAPH_CTX_USER 0x00400194
+#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
+#define NV10_PGRAPH_CTX_CACHE3 0x004001A0
+#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
+#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
+#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
+#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
+#define NV40_PGRAPH_CTXCTL_0304 0x00400304
+#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
+#define NV40_PGRAPH_CTXCTL_0310 0x00400310
+#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
+#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
+#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
+#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
+#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
+#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
+#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
+#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
+#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
+#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
+#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
+#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
+#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
+#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
+#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
+#define NV03_PGRAPH_ABS_X_RAM 0x00400400
+#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
+#define NV03_PGRAPH_X_MISC 0x00400500
+#define NV03_PGRAPH_Y_MISC 0x00400504
+#define NV04_PGRAPH_VALID1 0x00400508
+#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
+#define NV04_PGRAPH_MISC24_0 0x00400510
+#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
+#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
+#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
+#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
+#define NV03_PGRAPH_CLIPX_0 0x00400524
+#define NV03_PGRAPH_CLIPX_1 0x00400528
+#define NV03_PGRAPH_CLIPY_0 0x0040052C
+#define NV03_PGRAPH_CLIPY_1 0x00400530
+#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
+#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
+#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
+#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
+#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
+#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
+#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
+#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
+#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
+#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
+#define NV04_PGRAPH_MISC24_1 0x00400570
+#define NV04_PGRAPH_MISC24_2 0x00400574
+#define NV04_PGRAPH_VALID2 0x00400578
+#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
+#define NV04_PGRAPH_PASSTHRU_1 0x00400580
+#define NV04_PGRAPH_PASSTHRU_2 0x00400584
+#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
+#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
+#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
+#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
+#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
+#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
+#define NV04_PGRAPH_FORMAT_0 0x004005A8
+#define NV04_PGRAPH_FORMAT_1 0x004005AC
+#define NV04_PGRAPH_FILTER_0 0x004005B0
+#define NV04_PGRAPH_FILTER_1 0x004005B4
+#define NV03_PGRAPH_MONO_COLOR0 0x00400600
+#define NV04_PGRAPH_ROP3 0x00400604
+#define NV04_PGRAPH_BETA_AND 0x00400608
+#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
+#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
+#define NV04_PGRAPH_FORMATS 0x00400618
+#define NV10_PGRAPH_DEBUG_2 0x00400620
+#define NV04_PGRAPH_BOFFSET0 0x00400640
+#define NV04_PGRAPH_BOFFSET1 0x00400644
+#define NV04_PGRAPH_BOFFSET2 0x00400648
+#define NV04_PGRAPH_BOFFSET3 0x0040064C
+#define NV04_PGRAPH_BOFFSET4 0x00400650
+#define NV04_PGRAPH_BOFFSET5 0x00400654
+#define NV04_PGRAPH_BBASE0 0x00400658
+#define NV04_PGRAPH_BBASE1 0x0040065C
+#define NV04_PGRAPH_BBASE2 0x00400660
+#define NV04_PGRAPH_BBASE3 0x00400664
+#define NV04_PGRAPH_BBASE4 0x00400668
+#define NV04_PGRAPH_BBASE5 0x0040066C
+#define NV04_PGRAPH_BPITCH0 0x00400670
+#define NV04_PGRAPH_BPITCH1 0x00400674
+#define NV04_PGRAPH_BPITCH2 0x00400678
+#define NV04_PGRAPH_BPITCH3 0x0040067C
+#define NV04_PGRAPH_BPITCH4 0x00400680
+#define NV04_PGRAPH_BLIMIT0 0x00400684
+#define NV04_PGRAPH_BLIMIT1 0x00400688
+#define NV04_PGRAPH_BLIMIT2 0x0040068C
+#define NV04_PGRAPH_BLIMIT3 0x00400690
+#define NV04_PGRAPH_BLIMIT4 0x00400694
+#define NV04_PGRAPH_BLIMIT5 0x00400698
+#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
+#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
+#define NV03_PGRAPH_STATUS 0x004006B0
+#define NV04_PGRAPH_STATUS 0x00400700
+#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
+#define NV04_PGRAPH_SURFACE 0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
+#define NV04_PGRAPH_STATE 0x00400710
+#define NV10_PGRAPH_SURFACE 0x00400710
+#define NV04_PGRAPH_NOTIFY 0x00400714
+#define NV10_PGRAPH_STATE 0x00400714
+#define NV10_PGRAPH_NOTIFY 0x00400718
+
+#define NV04_PGRAPH_FIFO 0x00400720
+
+#define NV04_PGRAPH_BPIXEL 0x00400724
+#define NV10_PGRAPH_RDI_INDEX 0x00400750
+#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
+#define NV10_PGRAPH_RDI_DATA 0x00400754
+#define NV04_PGRAPH_DMA_PITCH 0x00400760
+#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
+#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
+#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
+#define NV10_PGRAPH_DMA_PITCH 0x00400770
+#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
+#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
+#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
+#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
+#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
+#define NV04_PGRAPH_PATT_COLOR0 0x00400800
+#define NV04_PGRAPH_PATT_COLOR1 0x00400804
+#define NV04_PGRAPH_PATTERN 0x00400808
+#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
+#define NV04_PGRAPH_CHROMA 0x00400814
+#define NV04_PGRAPH_CONTROL0 0x00400818
+#define NV04_PGRAPH_CONTROL1 0x0040081C
+#define NV04_PGRAPH_CONTROL2 0x00400820
+#define NV04_PGRAPH_BLEND 0x00400824
+#define NV04_PGRAPH_STORED_FMT 0x00400830
+#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
+#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
+#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
+#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
+#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
+#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
+#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
+#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
+#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
+#define NV04_PGRAPH_U_RAM 0x00400D00
+#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
+#define NV04_PGRAPH_V_RAM 0x00400D40
+#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
+#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
+#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
+#define NV10_PGRAPH_XFMODE0 0x00400F40
+#define NV10_PGRAPH_XFMODE1 0x00400F44
+#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
+#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
+#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
+#define NV10_PGRAPH_PIPE_DATA 0x00400F54
+#define NV04_PGRAPH_DMA_START_0 0x00401000
+#define NV04_PGRAPH_DMA_START_1 0x00401004
+#define NV04_PGRAPH_DMA_LENGTH 0x00401008
+#define NV04_PGRAPH_DMA_MISC 0x0040100C
+#define NV04_PGRAPH_DMA_DATA_0 0x00401020
+#define NV04_PGRAPH_DMA_DATA_1 0x00401024
+#define NV04_PGRAPH_DMA_RM 0x00401030
+#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
+#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
+#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
+#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
+#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
+#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
+#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
+#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
+#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
+#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
+#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
+#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
+#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
+#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
+#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
+#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
+#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
+#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
+#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
+
+
+/* It's a guess that this works on NV03. Confirmed on NV04, though */
+#define NV04_PFIFO_DELAY_0 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
+#define NV03_PFIFO_INTR_0 0x00002100
+#define NV03_PFIFO_INTR_EN_0 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
+# define NV_PFIFO_INTR_RUNOUT (1<<4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV03_PFIFO_RAMHT 0x00002210
+#define NV03_PFIFO_RAMFC 0x00002214
+#define NV03_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV03_PFIFO_CACHES 0x00002500
+#define NV04_PFIFO_MODE 0x00002504
+#define NV04_PFIFO_DMA 0x00002508
+#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
+#define NV03_PFIFO_CACHE0_PULL0 0x00003040
+#define NV04_PFIFO_CACHE0_PULL0 0x00003050
+#define NV04_PFIFO_CACHE0_PULL1 0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
+#define NV03_PFIFO_CACHE1_PUT 0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
+# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0 0x00003240
+#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+#define NV03_PFIFO_CACHE1_PULL1 0x00003250
+#define NV04_PFIFO_CACHE1_PULL1 0x00003254
+#define NV04_PFIFO_CACHE1_HASH 0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
+#define NV03_PFIFO_CACHE1_GET 0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
+
+#define NV_CRTC0_INTSTAT 0x00600100
+#define NV_CRTC0_INTEN 0x00600140
+#define NV_CRTC1_INTSTAT 0x00602100
+#define NV_CRTC1_INTEN 0x00602140
+# define NV_CRTC_INTR_VBLANK (1<<0)
+
+#define NV04_PRAMIN 0x00700000
+
+/* Fifo commands. These are not regs, neither masks */
+#define NV03_FIFO_CMD_JUMP 0x20000000
+#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc
+#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
+
+/* This is a partial import from rules-ng, a few things may be duplicated.
+ * Eventually we should completely import everything from rules-ng.
+ * For the moment check rules-ng for docs.
+ */
+
+#define NV50_PMC 0x00000000
+#define NV50_PMC__LEN 0x1
+#define NV50_PMC__ESIZE 0x2000
+# define NV50_PMC_BOOT_0 0x00000000
+# define NV50_PMC_BOOT_0_REVISION 0x000000ff
+# define NV50_PMC_BOOT_0_REVISION__SHIFT 0
+# define NV50_PMC_BOOT_0_ARCH 0x0ff00000
+# define NV50_PMC_BOOT_0_ARCH__SHIFT 20
+# define NV50_PMC_INTR_0 0x00000100
+# define NV50_PMC_INTR_0_PFIFO (1<<8)
+# define NV50_PMC_INTR_0_PGRAPH (1<<12)
+# define NV50_PMC_INTR_0_PTIMER (1<<20)
+# define NV50_PMC_INTR_0_HOTPLUG (1<<21)
+# define NV50_PMC_INTR_0_DISPLAY (1<<26)
+# define NV50_PMC_INTR_EN_0 0x00000140
+# define NV50_PMC_INTR_EN_0_MASTER (1<<0)
+# define NV50_PMC_INTR_EN_0_MASTER_DISABLED (0<<0)
+# define NV50_PMC_INTR_EN_0_MASTER_ENABLED (1<<0)
+# define NV50_PMC_ENABLE 0x00000200
+# define NV50_PMC_ENABLE_PFIFO (1<<8)
+# define NV50_PMC_ENABLE_PGRAPH (1<<12)
+
+#define NV50_PCONNECTOR 0x0000e000
+#define NV50_PCONNECTOR__LEN 0x1
+#define NV50_PCONNECTOR__ESIZE 0x1000
+# define NV50_PCONNECTOR_HOTPLUG_INTR 0x0000e050
+# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0 (1<<0)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1 (1<<1)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2 (1<<2)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3 (1<<3)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0 (1<<16)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1 (1<<17)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2 (1<<18)
+# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3 (1<<19)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL 0x0000e054
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0 (1<<0)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1 (1<<1)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2 (1<<2)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3 (1<<3)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0 (1<<16)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1 (1<<17)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2 (1<<18)
+# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3 (1<<19)
+# define NV50_PCONNECTOR_HOTPLUG_STATE 0x0000e104
+# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2)
+# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6)
+# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10)
+# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14)
+# define NV50_PCONNECTOR_I2C_PORT_0 0x0000e138
+# define NV50_PCONNECTOR_I2C_PORT_1 0x0000e150
+# define NV50_PCONNECTOR_I2C_PORT_2 0x0000e168
+# define NV50_PCONNECTOR_I2C_PORT_3 0x0000e180
+# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240
+# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258
+
+#define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
+#define NV50_AUXCH_DATA_OUT__SIZE 4
+#define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
+#define NV50_AUXCH_DATA_IN__SIZE 4
+#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0)
+#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4)
+#define NV50_AUXCH_CTRL_LINKSTAT 0x01000000
+#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY 0x00000000
+#define NV50_AUXCH_CTRL_LINKSTAT_READY 0x01000000
+#define NV50_AUXCH_CTRL_LINKEN 0x00100000
+#define NV50_AUXCH_CTRL_LINKEN_DISABLED 0x00000000
+#define NV50_AUXCH_CTRL_LINKEN_ENABLED 0x00100000
+#define NV50_AUXCH_CTRL_EXEC 0x00010000
+#define NV50_AUXCH_CTRL_EXEC_COMPLETE 0x00000000
+#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS 0x00010000
+#define NV50_AUXCH_CTRL_CMD 0x0000f000
+#define NV50_AUXCH_CTRL_CMD_SHIFT 12
+#define NV50_AUXCH_CTRL_LEN 0x0000000f
+#define NV50_AUXCH_CTRL_LEN_SHIFT 0
+#define NV50_AUXCH_STAT(i) ((i) * 0x50 + 0x0000e4e8)
+#define NV50_AUXCH_STAT_STATE 0x10000000
+#define NV50_AUXCH_STAT_STATE_NOT_READY 0x00000000
+#define NV50_AUXCH_STAT_STATE_READY 0x10000000
+#define NV50_AUXCH_STAT_REPLY 0x000f0000
+#define NV50_AUXCH_STAT_REPLY_AUX 0x00030000
+#define NV50_AUXCH_STAT_REPLY_AUX_ACK 0x00000000
+#define NV50_AUXCH_STAT_REPLY_AUX_NACK 0x00010000
+#define NV50_AUXCH_STAT_REPLY_AUX_DEFER 0x00020000
+#define NV50_AUXCH_STAT_REPLY_I2C 0x000c0000
+#define NV50_AUXCH_STAT_REPLY_I2C_ACK 0x00000000
+#define NV50_AUXCH_STAT_REPLY_I2C_NACK 0x00040000
+#define NV50_AUXCH_STAT_REPLY_I2C_DEFER 0x00080000
+#define NV50_AUXCH_STAT_COUNT 0x0000001f
+
+#define NV50_PBUS 0x00088000
+#define NV50_PBUS__LEN 0x1
+#define NV50_PBUS__ESIZE 0x1000
+# define NV50_PBUS_PCI_ID 0x00088000
+# define NV50_PBUS_PCI_ID_VENDOR_ID 0x0000ffff
+# define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT 0
+# define NV50_PBUS_PCI_ID_DEVICE_ID 0xffff0000
+# define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT 16
+
+#define NV50_PFB 0x00100000
+#define NV50_PFB__LEN 0x1
+#define NV50_PFB__ESIZE 0x1000
+
+#define NV50_PEXTDEV 0x00101000
+#define NV50_PEXTDEV__LEN 0x1
+#define NV50_PEXTDEV__ESIZE 0x1000
+
+#define NV50_PROM 0x00300000
+#define NV50_PROM__LEN 0x1
+#define NV50_PROM__ESIZE 0x10000
+
+#define NV50_PGRAPH 0x00400000
+#define NV50_PGRAPH__LEN 0x1
+#define NV50_PGRAPH__ESIZE 0x10000
+
+#define NV50_PDISPLAY 0x00610000
+#define NV50_PDISPLAY_OBJECTS 0x00610010
+#define NV50_PDISPLAY_INTR_0 0x00610020
+#define NV50_PDISPLAY_INTR_1 0x00610024
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC 0x0000000c
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT 2
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0 0x00000004
+#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1 0x00000008
+#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
+#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
+#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
+#define NV50_PDISPLAY_INTR_EN 0x0061002c
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
+#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
+#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
+#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
+#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
+#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
+#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
+#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
+#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
+#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
+#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
+#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
+#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
+
+#define NV50_PDISPLAY_CURSOR 0x00610270
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON 0x00000001
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
+#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
+
+#define NV50_PDISPLAY_CTRL_STATE 0x00610300
+#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
+#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
+#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
+#define NV50_PDISPLAY_CTRL_VAL 0x00610304
+#define NV50_PDISPLAY_UNK_380 0x00610380
+#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
+#define NV50_PDISPLAY_UNK_388 0x00610388
+#define NV50_PDISPLAY_UNK_38C 0x0061038c
+
+#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
+#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
+#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */ 0x00610a18
+#define NV50_PDISPLAY_CRTC_CLUT_MODE 0x00610a24
+#define NV50_PDISPLAY_CRTC_INTERLACE 0x00610a48
+#define NV50_PDISPLAY_CRTC_SCALE_CTRL 0x00610a50
+#define NV50_PDISPLAY_CRTC_CURSOR_CTRL 0x00610a58
+#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */ 0x00610a78
+#define NV50_PDISPLAY_CRTC_UNK0AB8 0x00610ab8
+#define NV50_PDISPLAY_CRTC_DEPTH 0x00610ac8
+#define NV50_PDISPLAY_CRTC_CLOCK 0x00610ad0
+#define NV50_PDISPLAY_CRTC_COLOR_CTRL 0x00610ae0
+#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END 0x00610ae8
+#define NV50_PDISPLAY_CRTC_MODE_UNK1 0x00610af0
+#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL 0x00610af8
+#define NV50_PDISPLAY_CRTC_SYNC_DURATION 0x00610b00
+#define NV50_PDISPLAY_CRTC_MODE_UNK2 0x00610b08
+#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */ 0x00610b10
+#define NV50_PDISPLAY_CRTC_FB_SIZE 0x00610b18
+#define NV50_PDISPLAY_CRTC_FB_PITCH 0x00610b20
+#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR 0x00100000
+#define NV50_PDISPLAY_CRTC_FB_POS 0x00610b28
+#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET 0x00610b38
+#define NV50_PDISPLAY_CRTC_REAL_RES 0x00610b40
+#define NV50_PDISPLAY_CRTC_SCALE_RES1 0x00610b48
+#define NV50_PDISPLAY_CRTC_SCALE_RES2 0x00610b50
+
+#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
+#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8)
+#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8)
+#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8)
+
+#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8)
+#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8)
+#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8)
+
+#define NV50_PDISPLAY_CRTC_CLK 0x00614000
+#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100)
+#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED 0x00000600
+#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i) ((i) * 0x800 + 0x614104)
+#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i) ((i) * 0x800 + 0x614108)
+#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i) ((i) * 0x800 + 0x614200)
+
+#define NV50_PDISPLAY_DAC_CLK 0x00614000
+#define NV50_PDISPLAY_DAC_CLK_CTRL2(i) ((i) * 0x800 + 0x614280)
+
+#define NV50_PDISPLAY_SOR_CLK 0x00614000
+#define NV50_PDISPLAY_SOR_CLK_CTRL2(i) ((i) * 0x800 + 0x614300)
+
+#define NV50_PDISPLAY_VGACRTC(r) ((r) + 0x619400)
+
+#define NV50_PDISPLAY_DAC 0x0061a000
+#define NV50_PDISPLAY_DAC_DPMS_CTRL(i) (0x0061a004 + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF 0x00000001
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF 0x00000004
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED 0x00000010
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF 0x00000040
+#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL(i) (0x0061a00c + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE 0x00100000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT 0x38000000
+#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE 0x80000000
+#define NV50_PDISPLAY_DAC_CLK_CTRL1(i) (0x0061a010 + (i) * 0x800)
+#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED 0x00000600
+
+#define NV50_PDISPLAY_SOR 0x0061c000
+#define NV50_PDISPLAY_SOR_DPMS_CTRL(i) (0x0061c004 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING 0x80000000
+#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON 0x00000001
+#define NV50_PDISPLAY_SOR_CLK_CTRL1(i) (0x0061c008 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED 0x00000600
+#define NV50_PDISPLAY_SOR_DPMS_STATE(i) (0x0061c030 + (i) * 0x800)
+#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000
+#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000
+#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000
+#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
+#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
+#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
+#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
+#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
+#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000
+#define NV50_SOR_DP_CTRL_LANE_1_ENABLED 0x00020000
+#define NV50_SOR_DP_CTRL_LANE_2_ENABLED 0x00040000
+#define NV50_SOR_DP_CTRL_LANE_3_ENABLED 0x00080000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN 0x0f000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000
+#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
+#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
+
+#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
+#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000)
+#define NV50_PDISPLAY_USER_GET(i) ((i) * 0x1000 + 0x00640004)
+
+#define NV50_PDISPLAY_CURSOR_USER 0x00647000
+#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i) ((i) * 0x1000 + 0x00647080)
+#define NV50_PDISPLAY_CURSOR_USER_POS(i) ((i) * 0x1000 + 0x00647084)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 00000000000..4c7f1e403e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,321 @@
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <linux/pagemap.h>
+
+#define NV_CTXDMA_PAGE_SHIFT 12
+#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
+#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
+
+struct nouveau_sgdma_be {
+ struct ttm_backend backend;
+ struct drm_device *dev;
+
+ dma_addr_t *pages;
+ unsigned nr_pages;
+
+ unsigned pte_start;
+ bool bound;
+};
+
+static int
+nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
+ struct page **pages, struct page *dummy_read_page)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+
+ NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
+
+ if (nvbe->pages)
+ return -EINVAL;
+
+ nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
+ if (!nvbe->pages)
+ return -ENOMEM;
+
+ nvbe->nr_pages = 0;
+ while (num_pages--) {
+ nvbe->pages[nvbe->nr_pages] =
+ pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev,
+ nvbe->pages[nvbe->nr_pages])) {
+ be->func->clear(be);
+ return -EFAULT;
+ }
+
+ nvbe->nr_pages++;
+ }
+
+ return 0;
+}
+
+static void
+nouveau_sgdma_clear(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+
+ NV_DEBUG(nvbe->dev, "\n");
+
+ if (nvbe && nvbe->pages) {
+ if (nvbe->bound)
+ be->func->unbind(be);
+
+ while (nvbe->nr_pages--) {
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ kfree(nvbe->pages);
+ nvbe->pages = NULL;
+ nvbe->nr_pages = 0;
+ }
+}
+
+static inline unsigned
+nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+
+ if (dev_priv->card_type < NV_50)
+ return pte + 2;
+
+ return pte << 1;
+}
+
+static int
+nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ unsigned i, j, pte;
+
+ NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
+
+ dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
+ pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
+ nvbe->pte_start = pte;
+ for (i = 0; i < nvbe->nr_pages; i++) {
+ dma_addr_t dma_offset = nvbe->pages[i];
+ uint32_t offset_l = lower_32_bits(dma_offset);
+ uint32_t offset_h = upper_32_bits(dma_offset);
+
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
+ if (dev_priv->card_type < NV_50)
+ nv_wo32(dev, gpuobj, pte++, offset_l | 3);
+ else {
+ nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
+ nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
+ }
+
+ dma_offset += NV_CTXDMA_PAGE_SIZE;
+ }
+ }
+ dev_priv->engine.instmem.finish_access(nvbe->dev);
+
+ if (dev_priv->card_type == NV_50) {
+ nv_wr32(dev, 0x100c80, 0x00050001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n",
+ nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00000001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n",
+ nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+ }
+
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nouveau_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_device *dev = nvbe->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ unsigned i, j, pte;
+
+ NV_DEBUG(dev, "\n");
+
+ if (!nvbe->bound)
+ return 0;
+
+ dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
+ pte = nvbe->pte_start;
+ for (i = 0; i < nvbe->nr_pages; i++) {
+ dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
+
+ for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
+ if (dev_priv->card_type < NV_50)
+ nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
+ else {
+ nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
+ nv_wo32(dev, gpuobj, pte++, 0x00000000);
+ }
+
+ dma_offset += NV_CTXDMA_PAGE_SIZE;
+ }
+ }
+ dev_priv->engine.instmem.finish_access(nvbe->dev);
+
+ nvbe->bound = false;
+ return 0;
+}
+
+static void
+nouveau_sgdma_destroy(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+
+ if (be) {
+ NV_DEBUG(nvbe->dev, "\n");
+
+ if (nvbe) {
+ if (nvbe->pages)
+ be->func->clear(be);
+ kfree(nvbe);
+ }
+ }
+}
+
+static struct ttm_backend_func nouveau_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nouveau_sgdma_bind,
+ .unbind = nouveau_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
+struct ttm_backend *
+nouveau_sgdma_init_ttm(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_sgdma_be *nvbe;
+
+ if (!dev_priv->gart_info.sg_ctxdma)
+ return NULL;
+
+ nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
+ if (!nvbe)
+ return NULL;
+
+ nvbe->dev = dev;
+
+ nvbe->backend.func = &nouveau_sgdma_backend;
+
+ return &nvbe->backend;
+}
+
+int
+nouveau_sgdma_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ uint32_t aper_size, obj_size;
+ int i, ret;
+
+ if (dev_priv->card_type < NV_50) {
+ aper_size = (64 * 1024 * 1024);
+ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
+ obj_size += 8; /* ctxdma header */
+ } else {
+ /* 1 entire VM page table */
+ aper_size = (512 * 1024 * 1024);
+ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
+ }
+
+ ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+ NVOBJ_FLAG_ALLOW_NO_REFS |
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->gart_info.sg_dummy_page =
+ alloc_page(GFP_KERNEL|__GFP_DMA32);
+ set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
+ dev_priv->gart_info.sg_dummy_bus =
+ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ if (dev_priv->card_type < NV_50) {
+ /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
+ * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
+ * on those cards? */
+ nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+ (1 << 12) /* PT present */ |
+ (0 << 13) /* PT *not* linear */ |
+ (NV_DMA_ACCESS_RW << 14) |
+ (NV_DMA_TARGET_PCI << 16));
+ nv_wo32(dev, gpuobj, 1, aper_size - 1);
+ for (i = 2; i < 2 + (aper_size >> 12); i++) {
+ nv_wo32(dev, gpuobj, i,
+ dev_priv->gart_info.sg_dummy_bus | 3);
+ }
+ } else {
+ for (i = 0; i < obj_size; i += 8) {
+ nv_wo32(dev, gpuobj, (i+0)/4,
+ dev_priv->gart_info.sg_dummy_bus | 0x21);
+ nv_wo32(dev, gpuobj, (i+4)/4, 0);
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ return 0;
+}
+
+void
+nouveau_sgdma_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->gart_info.sg_dummy_page) {
+ pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
+ NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ unlock_page(dev_priv->gart_info.sg_dummy_page);
+ __free_page(dev_priv->gart_info.sg_dummy_page);
+ dev_priv->gart_info.sg_dummy_page = NULL;
+ dev_priv->gart_info.sg_dummy_bus = 0;
+ }
+
+ nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+}
+
+int
+nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ int pte;
+
+ pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ if (dev_priv->card_type < NV_50) {
+ instmem->prepare_access(dev, false);
+ *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+ instmem->finish_access(dev);
+ return 0;
+ }
+
+ NV_ERROR(dev, "Unimplemented on NV50\n");
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
new file mode 100644
index 00000000000..e76ec2d207a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -0,0 +1,850 @@
+/*
+ * Copyright 2005 Stephane Marchesin
+ * Copyright 2008 Stuart Bennett
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/swab.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "drm_crtc_helper.h"
+#include <linux/vgaarb.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nv50_display.h"
+
+static int nouveau_stub_init(struct drm_device *dev) { return 0; }
+static void nouveau_stub_takedown(struct drm_device *dev) {}
+
+static int nouveau_init_engine_ptrs(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x00:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv04_fb_init;
+ engine->fb.takedown = nv04_fb_takedown;
+ engine->graph.grclass = nv04_graph_grclass;
+ engine->graph.init = nv04_graph_init;
+ engine->graph.takedown = nv04_graph_takedown;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.channel = nv04_graph_channel;
+ engine->graph.create_context = nv04_graph_create_context;
+ engine->graph.destroy_context = nv04_graph_destroy_context;
+ engine->graph.load_context = nv04_graph_load_context;
+ engine->graph.unload_context = nv04_graph_unload_context;
+ engine->fifo.channels = 16;
+ engine->fifo.init = nv04_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv04_fifo_channel_id;
+ engine->fifo.create_context = nv04_fifo_create_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
+ engine->fifo.load_context = nv04_fifo_load_context;
+ engine->fifo.unload_context = nv04_fifo_unload_context;
+ break;
+ case 0x10:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.grclass = nv10_graph_grclass;
+ engine->graph.init = nv10_graph_init;
+ engine->graph.takedown = nv10_graph_takedown;
+ engine->graph.channel = nv10_graph_channel;
+ engine->graph.create_context = nv10_graph_create_context;
+ engine->graph.destroy_context = nv10_graph_destroy_context;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.load_context = nv10_graph_load_context;
+ engine->graph.unload_context = nv10_graph_unload_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv10_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.unload_context = nv10_fifo_unload_context;
+ break;
+ case 0x20:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.grclass = nv20_graph_grclass;
+ engine->graph.init = nv20_graph_init;
+ engine->graph.takedown = nv20_graph_takedown;
+ engine->graph.channel = nv10_graph_channel;
+ engine->graph.create_context = nv20_graph_create_context;
+ engine->graph.destroy_context = nv20_graph_destroy_context;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.load_context = nv20_graph_load_context;
+ engine->graph.unload_context = nv20_graph_unload_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv10_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.unload_context = nv10_fifo_unload_context;
+ break;
+ case 0x30:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.grclass = nv30_graph_grclass;
+ engine->graph.init = nv30_graph_init;
+ engine->graph.takedown = nv20_graph_takedown;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.channel = nv10_graph_channel;
+ engine->graph.create_context = nv20_graph_create_context;
+ engine->graph.destroy_context = nv20_graph_destroy_context;
+ engine->graph.load_context = nv20_graph_load_context;
+ engine->graph.unload_context = nv20_graph_unload_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv10_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.unload_context = nv10_fifo_unload_context;
+ break;
+ case 0x40:
+ case 0x60:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown = nv04_instmem_takedown;
+ engine->instmem.suspend = nv04_instmem_suspend;
+ engine->instmem.resume = nv04_instmem_resume;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->instmem.prepare_access = nv04_instmem_prepare_access;
+ engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->mc.init = nv40_mc_init;
+ engine->mc.takedown = nv40_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv40_fb_init;
+ engine->fb.takedown = nv40_fb_takedown;
+ engine->graph.grclass = nv40_graph_grclass;
+ engine->graph.init = nv40_graph_init;
+ engine->graph.takedown = nv40_graph_takedown;
+ engine->graph.fifo_access = nv04_graph_fifo_access;
+ engine->graph.channel = nv40_graph_channel;
+ engine->graph.create_context = nv40_graph_create_context;
+ engine->graph.destroy_context = nv40_graph_destroy_context;
+ engine->graph.load_context = nv40_graph_load_context;
+ engine->graph.unload_context = nv40_graph_unload_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv40_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv40_fifo_create_context;
+ engine->fifo.destroy_context = nv40_fifo_destroy_context;
+ engine->fifo.load_context = nv40_fifo_load_context;
+ engine->fifo.unload_context = nv40_fifo_unload_context;
+ break;
+ case 0x50:
+ case 0x80: /* gotta love NVIDIA's consistency.. */
+ case 0x90:
+ case 0xA0:
+ engine->instmem.init = nv50_instmem_init;
+ engine->instmem.takedown = nv50_instmem_takedown;
+ engine->instmem.suspend = nv50_instmem_suspend;
+ engine->instmem.resume = nv50_instmem_resume;
+ engine->instmem.populate = nv50_instmem_populate;
+ engine->instmem.clear = nv50_instmem_clear;
+ engine->instmem.bind = nv50_instmem_bind;
+ engine->instmem.unbind = nv50_instmem_unbind;
+ engine->instmem.prepare_access = nv50_instmem_prepare_access;
+ engine->instmem.finish_access = nv50_instmem_finish_access;
+ engine->mc.init = nv50_mc_init;
+ engine->mc.takedown = nv50_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nouveau_stub_init;
+ engine->fb.takedown = nouveau_stub_takedown;
+ engine->graph.grclass = nv50_graph_grclass;
+ engine->graph.init = nv50_graph_init;
+ engine->graph.takedown = nv50_graph_takedown;
+ engine->graph.fifo_access = nv50_graph_fifo_access;
+ engine->graph.channel = nv50_graph_channel;
+ engine->graph.create_context = nv50_graph_create_context;
+ engine->graph.destroy_context = nv50_graph_destroy_context;
+ engine->graph.load_context = nv50_graph_load_context;
+ engine->graph.unload_context = nv50_graph_unload_context;
+ engine->fifo.channels = 128;
+ engine->fifo.init = nv50_fifo_init;
+ engine->fifo.takedown = nv50_fifo_takedown;
+ engine->fifo.disable = nv04_fifo_disable;
+ engine->fifo.enable = nv04_fifo_enable;
+ engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.channel_id = nv50_fifo_channel_id;
+ engine->fifo.create_context = nv50_fifo_create_context;
+ engine->fifo.destroy_context = nv50_fifo_destroy_context;
+ engine->fifo.load_context = nv50_fifo_load_context;
+ engine->fifo.unload_context = nv50_fifo_unload_context;
+ break;
+ default:
+ NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
+ return 1;
+ }
+
+ return 0;
+}
+
+static unsigned int
+nouveau_vga_set_decode(void *priv, bool state)
+{
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static int
+nouveau_card_init_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_channel_alloc(dev, &dev_priv->channel,
+ (struct drm_file *)-2,
+ NvDmaFB, NvDmaTT);
+ if (ret)
+ return ret;
+
+ gpuobj = NULL;
+ ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+ 0, nouveau_mem_fb_amount(dev),
+ NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+ &gpuobj);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
+ gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ gpuobj = NULL;
+ ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RW, &gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
+ gpuobj, NULL);
+ if (ret)
+ goto out_err;
+
+ return 0;
+out_err:
+ nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_channel_free(dev_priv->channel);
+ dev_priv->channel = NULL;
+ return ret;
+}
+
+int
+nouveau_card_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine;
+ int ret;
+
+ NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
+
+ if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
+ return 0;
+
+ vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+
+ /* Initialise internal driver API hooks */
+ ret = nouveau_init_engine_ptrs(dev);
+ if (ret)
+ goto out;
+ engine = &dev_priv->engine;
+ dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+
+ /* Parse BIOS tables / Run init tables if card not POSTed */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = nouveau_bios_init(dev);
+ if (ret)
+ goto out;
+ }
+
+ ret = nouveau_gpuobj_early_init(dev);
+ if (ret)
+ goto out_bios;
+
+ /* Initialise instance memory, must happen before mem_init so we
+ * know exactly how much VRAM we're able to use for "normal"
+ * purposes.
+ */
+ ret = engine->instmem.init(dev);
+ if (ret)
+ goto out_gpuobj_early;
+
+ /* Setup the memory manager */
+ ret = nouveau_mem_init(dev);
+ if (ret)
+ goto out_instmem;
+
+ ret = nouveau_gpuobj_init(dev);
+ if (ret)
+ goto out_mem;
+
+ /* PMC */
+ ret = engine->mc.init(dev);
+ if (ret)
+ goto out_gpuobj;
+
+ /* PTIMER */
+ ret = engine->timer.init(dev);
+ if (ret)
+ goto out_mc;
+
+ /* PFB */
+ ret = engine->fb.init(dev);
+ if (ret)
+ goto out_timer;
+
+ /* PGRAPH */
+ ret = engine->graph.init(dev);
+ if (ret)
+ goto out_fb;
+
+ /* PFIFO */
+ ret = engine->fifo.init(dev);
+ if (ret)
+ goto out_graph;
+
+ /* this call irq_preinstall, register irq handler and
+ * call irq_postinstall
+ */
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto out_fifo;
+
+ ret = drm_vblank_init(dev, 0);
+ if (ret)
+ goto out_irq;
+
+ /* what about PVIDEO/PCRTC/PRAMDAC etc? */
+
+ if (!engine->graph.accel_blocked) {
+ ret = nouveau_card_init_channel(dev);
+ if (ret)
+ goto out_irq;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (dev_priv->card_type >= NV_50)
+ ret = nv50_display_create(dev);
+ else
+ ret = nv04_display_create(dev);
+ if (ret)
+ goto out_irq;
+ }
+
+ ret = nouveau_backlight_init(dev);
+ if (ret)
+ NV_ERROR(dev, "Error %d registering backlight\n", ret);
+
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_helper_initial_config(dev);
+
+ return 0;
+
+out_irq:
+ drm_irq_uninstall(dev);
+out_fifo:
+ engine->fifo.takedown(dev);
+out_graph:
+ engine->graph.takedown(dev);
+out_fb:
+ engine->fb.takedown(dev);
+out_timer:
+ engine->timer.takedown(dev);
+out_mc:
+ engine->mc.takedown(dev);
+out_gpuobj:
+ nouveau_gpuobj_takedown(dev);
+out_mem:
+ nouveau_mem_close(dev);
+out_instmem:
+ engine->instmem.takedown(dev);
+out_gpuobj_early:
+ nouveau_gpuobj_late_takedown(dev);
+out_bios:
+ nouveau_bios_takedown(dev);
+out:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ return ret;
+}
+
+static void nouveau_card_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+
+ NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
+
+ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+ nouveau_backlight_exit(dev);
+
+ if (dev_priv->channel) {
+ nouveau_channel_free(dev_priv->channel);
+ dev_priv->channel = NULL;
+ }
+
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ engine->fb.takedown(dev);
+ engine->timer.takedown(dev);
+ engine->mc.takedown(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
+ mutex_unlock(&dev->struct_mutex);
+ nouveau_sgdma_takedown(dev);
+
+ nouveau_gpuobj_takedown(dev);
+ nouveau_mem_close(dev);
+ engine->instmem.takedown(dev);
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_irq_uninstall(dev);
+
+ nouveau_gpuobj_late_takedown(dev);
+ nouveau_bios_takedown(dev);
+
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+ }
+}
+
+/* here a client dies, release the stuff that was allocated for its
+ * file_priv */
+void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ nouveau_channel_cleanup(dev, file_priv);
+}
+
+/* first module load, setup the mmio/fb mapping */
+/* KMS: we need mmio at load time, not when the first drm client opens. */
+int nouveau_firstopen(struct drm_device *dev)
+{
+ return 0;
+}
+
+/* if we have an OF card, copy vbios to RAMIN */
+static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
+{
+#if defined(__powerpc__)
+ int size, i;
+ const uint32_t *bios;
+ struct device_node *dn = pci_device_to_OF_node(dev->pdev);
+ if (!dn) {
+ NV_INFO(dev, "Unable to get the OF node\n");
+ return;
+ }
+
+ bios = of_get_property(dn, "NVDA,BMP", &size);
+ if (bios) {
+ for (i = 0; i < size; i += 4)
+ nv_wi32(dev, i, bios[i/4]);
+ NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
+ } else {
+ NV_INFO(dev, "Unable to get the OF bios\n");
+ }
+#endif
+}
+
+int nouveau_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_nouveau_private *dev_priv;
+ uint32_t reg0;
+ resource_size_t mmio_start_offs;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (!dev_priv)
+ return -ENOMEM;
+ dev->dev_private = dev_priv;
+ dev_priv->dev = dev;
+
+ dev_priv->flags = flags & NOUVEAU_FLAGS;
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+
+ NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
+ dev->pci_vendor, dev->pci_device, dev->pdev->class);
+
+ dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
+
+ if (dev_priv->acpi_dsm)
+ nouveau_hybrid_setup(dev);
+
+ dev_priv->wq = create_workqueue("nouveau");
+ if (!dev_priv->wq)
+ return -EINVAL;
+
+ /* resource 0 is mmio regs */
+ /* resource 1 is linear FB */
+ /* resource 2 is RAMIN (mmio regs + 0x1000000) */
+ /* resource 6 is bios */
+
+ /* map the mmio regs */
+ mmio_start_offs = pci_resource_start(dev->pdev, 0);
+ dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
+ if (!dev_priv->mmio) {
+ NV_ERROR(dev, "Unable to initialize the mmio mapping. "
+ "Please report your setup to " DRIVER_EMAIL "\n");
+ return -EINVAL;
+ }
+ NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
+ (unsigned long long)mmio_start_offs);
+
+#ifdef __BIG_ENDIAN
+ /* Put the card in BE mode if it's not */
+ if (nv_rd32(dev, NV03_PMC_BOOT_1))
+ nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
+
+ DRM_MEMORYBARRIER();
+#endif
+
+ /* Time to determine the card architecture */
+ reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
+
+ /* We're dealing with >=NV10 */
+ if ((reg0 & 0x0f000000) > 0) {
+ /* Bit 27-20 contain the architecture in hex */
+ dev_priv->chipset = (reg0 & 0xff00000) >> 20;
+ /* NV04 or NV05 */
+ } else if ((reg0 & 0xff00fff0) == 0x20004000) {
+ dev_priv->chipset = 0x04;
+ } else
+ dev_priv->chipset = 0xff;
+
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x00:
+ case 0x10:
+ case 0x20:
+ case 0x30:
+ dev_priv->card_type = dev_priv->chipset & 0xf0;
+ break;
+ case 0x40:
+ case 0x60:
+ dev_priv->card_type = NV_40;
+ break;
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ case 0xa0:
+ dev_priv->card_type = NV_50;
+ break;
+ default:
+ NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
+ return -EINVAL;
+ }
+
+ NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+ dev_priv->card_type, reg0);
+
+ /* map larger RAMIN aperture on NV40 cards */
+ dev_priv->ramin = NULL;
+ if (dev_priv->card_type >= NV_40) {
+ int ramin_bar = 2;
+ if (pci_resource_len(dev->pdev, ramin_bar) == 0)
+ ramin_bar = 3;
+
+ dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
+ dev_priv->ramin = ioremap(
+ pci_resource_start(dev->pdev, ramin_bar),
+ dev_priv->ramin_size);
+ if (!dev_priv->ramin) {
+ NV_ERROR(dev, "Failed to init RAMIN mapping, "
+ "limited instance memory available\n");
+ }
+ }
+
+ /* On older cards (or if the above failed), create a map covering
+ * the BAR0 PRAMIN aperture */
+ if (!dev_priv->ramin) {
+ dev_priv->ramin_size = 1 * 1024 * 1024;
+ dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
+ dev_priv->ramin_size);
+ if (!dev_priv->ramin) {
+ NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
+ return -ENOMEM;
+ }
+ }
+
+ nouveau_OF_copy_vbios_to_ramin(dev);
+
+ /* Special flags */
+ if (dev->pci_device == 0x01a0)
+ dev_priv->flags |= NV_NFORCE;
+ else if (dev->pci_device == 0x01f0)
+ dev_priv->flags |= NV_NFORCE2;
+
+ /* For kernel modesetting, init card now and bring up fbcon */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ int ret = nouveau_card_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nouveau_close(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* In the case of an error dev_priv may not be be allocated yet */
+ if (dev_priv && dev_priv->card_type)
+ nouveau_card_takedown(dev);
+}
+
+/* KMS: we need mmio at load time, not when the first drm client opens. */
+void nouveau_lastclose(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ nouveau_close(dev);
+}
+
+int nouveau_unload(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (dev_priv->card_type >= NV_50)
+ nv50_display_destroy(dev);
+ else
+ nv04_display_destroy(dev);
+ nouveau_close(dev);
+ }
+
+ iounmap(dev_priv->mmio);
+ iounmap(dev_priv->ramin);
+
+ kfree(dev_priv);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int
+nouveau_ioctl_card_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return nouveau_card_init(dev);
+}
+
+int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_getparam *getparam = data;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ switch (getparam->param) {
+ case NOUVEAU_GETPARAM_CHIPSET_ID:
+ getparam->value = dev_priv->chipset;
+ break;
+ case NOUVEAU_GETPARAM_PCI_VENDOR:
+ getparam->value = dev->pci_vendor;
+ break;
+ case NOUVEAU_GETPARAM_PCI_DEVICE:
+ getparam->value = dev->pci_device;
+ break;
+ case NOUVEAU_GETPARAM_BUS_TYPE:
+ if (drm_device_is_agp(dev))
+ getparam->value = NV_AGP;
+ else if (drm_device_is_pcie(dev))
+ getparam->value = NV_PCIE;
+ else
+ getparam->value = NV_PCI;
+ break;
+ case NOUVEAU_GETPARAM_FB_PHYSICAL:
+ getparam->value = dev_priv->fb_phys;
+ break;
+ case NOUVEAU_GETPARAM_AGP_PHYSICAL:
+ getparam->value = dev_priv->gart_info.aper_base;
+ break;
+ case NOUVEAU_GETPARAM_PCI_PHYSICAL:
+ if (dev->sg) {
+ getparam->value = (unsigned long)dev->sg->virtual;
+ } else {
+ NV_ERROR(dev, "Requested PCIGART address, "
+ "while no PCIGART was created\n");
+ return -EINVAL;
+ }
+ break;
+ case NOUVEAU_GETPARAM_FB_SIZE:
+ getparam->value = dev_priv->fb_available_size;
+ break;
+ case NOUVEAU_GETPARAM_AGP_SIZE:
+ getparam->value = dev_priv->gart_info.aper_size;
+ break;
+ case NOUVEAU_GETPARAM_VM_VRAM_BASE:
+ getparam->value = dev_priv->vm_vram_base;
+ break;
+ default:
+ NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+nouveau_ioctl_setparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_setparam *setparam = data;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ switch (setparam->param) {
+ default:
+ NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Wait until (value(reg) & mask) == val, up until timeout has hit */
+bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
+ uint32_t reg, uint32_t mask, uint32_t val)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ uint64_t start = ptimer->read(dev);
+
+ do {
+ if ((nv_rd32(dev, reg) & mask) == val)
+ return true;
+ } while (ptimer->read(dev) - start < timeout);
+
+ return false;
+}
+
+/* Waits for PGRAPH to go completely idle */
+bool nouveau_wait_for_idle(struct drm_device *dev)
+{
+ if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
+ nv_rd32(dev, NV04_PGRAPH_STATUS));
+ return false;
+ }
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
new file mode 100644
index 00000000000..187eb84e4da
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
+ * All Rights Reserved.
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+static struct vm_operations_struct nouveau_ttm_vm_ops;
+static const struct vm_operations_struct *ttm_vm_ops;
+
+static int
+nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ int ret;
+
+ if (unlikely(bo == NULL))
+ return VM_FAULT_NOPAGE;
+
+ ret = ttm_vm_ops->fault(vma, vmf);
+ return ret;
+}
+
+int
+nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_nouveau_private *dev_priv =
+ file_priv->minor->dev->dev_private;
+ int ret;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(ttm_vm_ops == NULL)) {
+ ttm_vm_ops = vma->vm_ops;
+ nouveau_ttm_vm_ops = *ttm_vm_ops;
+ nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
+ }
+
+ vma->vm_ops = &nouveau_ttm_vm_ops;
+ return 0;
+}
+
+static int
+nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+int
+nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
+{
+ struct ttm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &dev_priv->ttm.mem_global_ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &nouveau_ttm_mem_global_init;
+ global_ref->release = &nouveau_ttm_mem_global_release;
+
+ ret = ttm_global_item_ref(global_ref);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM memory accounting\n");
+ dev_priv->ttm.mem_global_ref.release = NULL;
+ return ret;
+ }
+
+ dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
+ global_ref = &dev_priv->ttm.bo_global_ref.ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+
+ ret = ttm_global_item_ref(global_ref);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM BO subsystem\n");
+ ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+ dev_priv->ttm.mem_global_ref.release = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
+{
+ if (dev_priv->ttm.mem_global_ref.release == NULL)
+ return;
+
+ ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
+ ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+ dev_priv->ttm.mem_global_ref.release = NULL;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
new file mode 100644
index 00000000000..d2f143ed97c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -0,0 +1,1002 @@
+/*
+ * Copyright 1993-2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+
+static void
+crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
+{
+ NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
+ crtcstate->CRTC[index]);
+}
+
+static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+ regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
+ if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
+ regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
+ regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
+ }
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
+}
+
+static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+ nv_crtc->sharpness = level;
+ if (level < 0) /* blur is in hw range 0x3f -> 0x20 */
+ level += 0x40;
+ regp->ramdac_634 = level;
+ NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
+}
+
+#define PLLSEL_VPLL1_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
+#define PLLSEL_VPLL2_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
+#define PLLSEL_TV_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
+
+/* NV4x 0x40.. pll notes:
+ * gpu pll: 0x4000 + 0x4004
+ * ?gpu? pll: 0x4008 + 0x400c
+ * vpll1: 0x4010 + 0x4014
+ * vpll2: 0x4018 + 0x401c
+ * mpll: 0x4020 + 0x4024
+ * mpll: 0x4038 + 0x403c
+ *
+ * the first register of each pair has some unknown details:
+ * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
+ * bits 20-23: (mpll) something to do with post divider?
+ * bits 28-31: related to single stage mode? (bit 8/12)
+ */
+
+static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
+ struct nouveau_pll_vals *pv = &regp->pllvals;
+ struct pll_lims pll_lim;
+
+ if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
+ return;
+
+ /* NM2 == 0 is used to determine single stage mode on two stage plls */
+ pv->NM2 = 0;
+
+ /* for newer nv4x the blob uses only the first stage of the vpll below a
+ * certain clock. for a certain nv4b this is 150MHz. since the max
+ * output frequency of the first stage for this card is 300MHz, it is
+ * assumed the threshold is given by vco1 maxfreq/2
+ */
+ /* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
+ * not 8, others unknown), the blob always uses both plls. no problem
+ * has yet been observed in allowing the use a single stage pll on all
+ * nv43 however. the behaviour of single stage use is untested on nv40
+ */
+ if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2))
+ memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
+
+ if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv))
+ return;
+
+ state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
+
+ /* The blob uses this always, so let's do the same */
+ if (dev_priv->card_type == NV_40)
+ state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
+ /* again nv40 and some nv43 act more like nv3x as described above */
+ if (dev_priv->chipset < 0x41)
+ state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
+ NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
+ state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
+
+ if (pv->NM2)
+ NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
+ pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
+ else
+ NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n",
+ pv->N1, pv->M1, pv->log2P);
+
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
+}
+
+static void
+nv_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned char seq1 = 0, crtc17 = 0;
+ unsigned char crtc1A;
+
+ NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
+ nv_crtc->index);
+
+ if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
+ return;
+
+ nv_crtc->last_dpms = mode;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, nv_crtc->index);
+
+ /* nv4ref indicates these two RPC1 bits inhibit h/v sync */
+ crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
+ NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
+ switch (mode) {
+ case DRM_MODE_DPMS_STANDBY:
+ /* Screen: Off; HSync: Off, VSync: On -- Not Supported */
+ seq1 = 0x20;
+ crtc17 = 0x80;
+ crtc1A |= 0x80;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Screen: Off; HSync: On, VSync: Off -- Not Supported */
+ seq1 = 0x20;
+ crtc17 = 0x80;
+ crtc1A |= 0x40;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Screen: Off; HSync: Off, VSync: Off */
+ seq1 = 0x20;
+ crtc17 = 0x00;
+ crtc1A |= 0xC0;
+ break;
+ case DRM_MODE_DPMS_ON:
+ default:
+ /* Screen: On; HSync: On, VSync: On */
+ seq1 = 0x00;
+ crtc17 = 0x80;
+ break;
+ }
+
+ NVVgaSeqReset(dev, nv_crtc->index, true);
+ /* Each head has it's own sequencer, so we can turn it off when we want */
+ seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
+ NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
+ crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
+ mdelay(10);
+ NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
+ NVVgaSeqReset(dev, nv_crtc->index, false);
+
+ NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+}
+
+static bool
+nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void
+nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct drm_framebuffer *fb = crtc->fb;
+
+ /* Calculate our timings */
+ int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
+ int horizStart = (mode->crtc_hsync_start >> 3) - 1;
+ int horizEnd = (mode->crtc_hsync_end >> 3) - 1;
+ int horizTotal = (mode->crtc_htotal >> 3) - 5;
+ int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
+ int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
+ int vertDisplay = mode->crtc_vdisplay - 1;
+ int vertStart = mode->crtc_vsync_start - 1;
+ int vertEnd = mode->crtc_vsync_end - 1;
+ int vertTotal = mode->crtc_vtotal - 2;
+ int vertBlankStart = mode->crtc_vdisplay - 1;
+ int vertBlankEnd = mode->crtc_vtotal - 1;
+
+ struct drm_encoder *encoder;
+ bool fp_output = false;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (encoder->crtc == crtc &&
+ (nv_encoder->dcb->type == OUTPUT_LVDS ||
+ nv_encoder->dcb->type == OUTPUT_TMDS))
+ fp_output = true;
+ }
+
+ if (fp_output) {
+ vertStart = vertTotal - 3;
+ vertEnd = vertTotal - 2;
+ vertBlankStart = vertStart;
+ horizStart = horizTotal - 5;
+ horizEnd = horizTotal - 2;
+ horizBlankEnd = horizTotal + 4;
+#if 0
+ if (dev->overlayAdaptor && dev_priv->card_type >= NV_10)
+ /* This reportedly works around some video overlay bandwidth problems */
+ horizTotal += 2;
+#endif
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vertTotal |= 1;
+
+#if 0
+ ErrorF("horizDisplay: 0x%X \n", horizDisplay);
+ ErrorF("horizStart: 0x%X \n", horizStart);
+ ErrorF("horizEnd: 0x%X \n", horizEnd);
+ ErrorF("horizTotal: 0x%X \n", horizTotal);
+ ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
+ ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
+ ErrorF("vertDisplay: 0x%X \n", vertDisplay);
+ ErrorF("vertStart: 0x%X \n", vertStart);
+ ErrorF("vertEnd: 0x%X \n", vertEnd);
+ ErrorF("vertTotal: 0x%X \n", vertTotal);
+ ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
+ ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
+#endif
+
+ /*
+ * compute correct Hsync & Vsync polarity
+ */
+ if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
+ && (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
+
+ regp->MiscOutReg = 0x23;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ regp->MiscOutReg |= 0x40;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ regp->MiscOutReg |= 0x80;
+ } else {
+ int vdisplay = mode->vdisplay;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ vdisplay *= 2;
+ if (mode->vscan > 1)
+ vdisplay *= mode->vscan;
+ if (vdisplay < 400)
+ regp->MiscOutReg = 0xA3; /* +hsync -vsync */
+ else if (vdisplay < 480)
+ regp->MiscOutReg = 0x63; /* -hsync +vsync */
+ else if (vdisplay < 768)
+ regp->MiscOutReg = 0xE3; /* -hsync -vsync */
+ else
+ regp->MiscOutReg = 0x23; /* +hsync +vsync */
+ }
+
+ regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
+
+ /*
+ * Time Sequencer
+ */
+ regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
+ /* 0x20 disables the sequencer */
+ if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
+ regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
+ else
+ regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
+ regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
+ regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
+ regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
+
+ /*
+ * CRTC
+ */
+ regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
+ regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
+ regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
+ regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
+ XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
+ regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
+ regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
+ XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
+ regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
+ regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
+ XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
+ XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
+ (1 << 4) |
+ XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
+ XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
+ XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
+ XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
+ regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
+ 1 << 6 |
+ XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
+ regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
+ regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
+ regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
+ /* framebuffer can be larger than crtc scanout area. */
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
+ regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
+ regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
+ regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
+ regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
+ regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
+
+ /*
+ * Some extended CRTC registers (they are not saved with the rest of the vga regs).
+ */
+
+ /* framebuffer can be larger than crtc scanout area. */
+ regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
+ MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
+ regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
+ XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
+ XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
+ XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
+ XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
+ regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
+ XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
+ XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
+ XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
+ regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
+ XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
+ XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
+ XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ horizTotal = (horizTotal >> 1) & ~1;
+ regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
+ regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
+ } else
+ regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff; /* interlace off */
+
+ /*
+ * Graphics Display Controller
+ */
+ regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
+ regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
+ regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
+ regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
+ regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
+
+ regp->Attribute[0] = 0x00; /* standard colormap translation */
+ regp->Attribute[1] = 0x01;
+ regp->Attribute[2] = 0x02;
+ regp->Attribute[3] = 0x03;
+ regp->Attribute[4] = 0x04;
+ regp->Attribute[5] = 0x05;
+ regp->Attribute[6] = 0x06;
+ regp->Attribute[7] = 0x07;
+ regp->Attribute[8] = 0x08;
+ regp->Attribute[9] = 0x09;
+ regp->Attribute[10] = 0x0A;
+ regp->Attribute[11] = 0x0B;
+ regp->Attribute[12] = 0x0C;
+ regp->Attribute[13] = 0x0D;
+ regp->Attribute[14] = 0x0E;
+ regp->Attribute[15] = 0x0F;
+ regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
+ /* Non-vga */
+ regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
+ regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
+ regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
+ regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
+}
+
+/**
+ * Sets up registers for the given mode/adjusted_mode pair.
+ *
+ * The clocks, CRTCs and outputs attached to this CRTC must be off.
+ *
+ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
+ * be easily turned on/off after this.
+ */
+static void
+nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
+ struct drm_encoder *encoder;
+ bool lvds_output = false, tmds_output = false, tv_output = false,
+ off_chip_digital = false;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ bool digital = false;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
+ digital = lvds_output = true;
+ if (nv_encoder->dcb->type == OUTPUT_TV)
+ tv_output = true;
+ if (nv_encoder->dcb->type == OUTPUT_TMDS)
+ digital = tmds_output = true;
+ if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
+ off_chip_digital = true;
+ }
+
+ /* Registers not directly related to the (s)vga mode */
+
+ /* What is the meaning of this register? */
+ /* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
+ regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
+
+ regp->crtc_eng_ctrl = 0;
+ /* Except for rare conditions I2C is enabled on the primary crtc */
+ if (nv_crtc->index == 0)
+ regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
+#if 0
+ /* Set overlay to desired crtc. */
+ if (dev->overlayAdaptor) {
+ NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
+ if (pPriv->overlayCRTC == nv_crtc->index)
+ regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
+ }
+#endif
+
+ /* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
+ regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
+ NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
+ NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
+ if (dev_priv->chipset >= 0x11)
+ regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
+
+ /* Unblock some timings */
+ regp->CRTC[NV_CIO_CRE_53] = 0;
+ regp->CRTC[NV_CIO_CRE_54] = 0;
+
+ /* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
+ if (lvds_output)
+ regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
+ else if (tmds_output)
+ regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
+ else
+ regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
+
+ /* These values seem to vary */
+ /* This register seems to be used by the bios to make certain decisions on some G70 cards? */
+ regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
+
+ nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
+
+ /* probably a scratch reg, but kept for cargo-cult purposes:
+ * bit0: crtc0?, head A
+ * bit6: lvds, head A
+ * bit7: (only in X), head A
+ */
+ if (nv_crtc->index == 0)
+ regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
+
+ /* The blob seems to take the current value from crtc 0, add 4 to that
+ * and reuse the old value for crtc 1 */
+ regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
+ if (!nv_crtc->index)
+ regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
+
+ /* the blob sometimes sets |= 0x10 (which is the same as setting |=
+ * 1 << 30 on 0x60.830), for no apparent reason */
+ regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
+
+ regp->crtc_830 = mode->crtc_vdisplay - 3;
+ regp->crtc_834 = mode->crtc_vdisplay - 1;
+
+ if (dev_priv->card_type == NV_40)
+ /* This is what the blob does */
+ regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
+
+ if (dev_priv->card_type >= NV_30)
+ regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
+
+ regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+
+ /* Some misc regs */
+ if (dev_priv->card_type == NV_40) {
+ regp->CRTC[NV_CIO_CRE_85] = 0xFF;
+ regp->CRTC[NV_CIO_CRE_86] = 0x1;
+ }
+
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
+ /* Enable slaved mode (called MODE_TV in nv4ref.h) */
+ if (lvds_output || tmds_output || tv_output)
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
+
+ /* Generic PRAMDAC regs */
+
+ if (dev_priv->card_type >= NV_10)
+ /* Only bit that bios and blob set. */
+ regp->nv10_cursync = (1 << 25);
+
+ regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
+ NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
+ NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
+ if (crtc->fb->depth == 16)
+ regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+ if (dev_priv->chipset >= 0x11)
+ regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
+
+ regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
+ regp->tv_setup = 0;
+
+ nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
+
+ /* Some values the blob sets */
+ regp->ramdac_8c0 = 0x100;
+ regp->ramdac_a20 = 0x0;
+ regp->ramdac_a24 = 0xfffff;
+ regp->ramdac_a34 = 0x1;
+}
+
+/**
+ * Sets up registers for the given mode/adjusted_mode pair.
+ *
+ * The clocks, CRTCs and outputs attached to this CRTC must be off.
+ *
+ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
+ * be easily turned on/off after this.
+ */
+static int
+nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
+ drm_mode_debug_printmodeline(adjusted_mode);
+
+ /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
+ nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
+
+ nv_crtc_mode_set_vga(crtc, adjusted_mode);
+ /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
+ if (dev_priv->card_type == NV_40)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
+ nv_crtc_mode_set_regs(crtc, adjusted_mode);
+ nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
+ return 0;
+}
+
+static void nv_crtc_save(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
+ struct nv04_mode_state *saved = &dev_priv->saved_reg;
+ struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
+
+ if (nv_two_heads(crtc->dev))
+ NVSetOwner(crtc->dev, nv_crtc->index);
+
+ nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
+
+ /* init some state to saved value */
+ state->sel_clk = saved->sel_clk & ~(0x5 << 16);
+ crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
+ state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
+ crtc_state->gpio_ext = crtc_saved->gpio_ext;
+}
+
+static void nv_crtc_restore(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ int head = nv_crtc->index;
+ uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
+
+ if (nv_two_heads(crtc->dev))
+ NVSetOwner(crtc->dev, head);
+
+ nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg);
+ nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
+
+ nv_crtc->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, nv_crtc->index);
+
+ funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ NVBlankScreen(dev, nv_crtc->index, true);
+
+ /* Some more preperation. */
+ NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
+ if (dev_priv->card_type == NV_40) {
+ uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
+ NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
+ }
+}
+
+static void nv_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg);
+ nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
+
+#ifdef __BIG_ENDIAN
+ /* turn on LFB swapping */
+ {
+ uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
+ tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
+ NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
+ }
+#endif
+
+ funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static void nv_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ NV_DEBUG_KMS(crtc->dev, "\n");
+
+ if (!nv_crtc)
+ return;
+
+ drm_crtc_cleanup(crtc);
+
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ kfree(nv_crtc);
+}
+
+static void
+nv_crtc_gamma_load(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
+ int i;
+
+ rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC;
+ for (i = 0; i < 256; i++) {
+ rgbs[i].r = nv_crtc->lut.r[i] >> 8;
+ rgbs[i].g = nv_crtc->lut.g[i] >> 8;
+ rgbs[i].b = nv_crtc->lut.b[i] >> 8;
+ }
+
+ nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg);
+}
+
+static void
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int i;
+
+ if (size != 256)
+ return;
+
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
+ }
+
+ /* We need to know the depth before we upload, but it's possible to
+ * get called before a framebuffer is bound. If this is the case,
+ * mark the lut values as dirty by setting depth==0, and it'll be
+ * uploaded on the first mode_set_base()
+ */
+ if (!nv_crtc->base.fb) {
+ nv_crtc->lut.depth = 0;
+ return;
+ }
+
+ nv_crtc_gamma_load(crtc);
+}
+
+static int
+nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ int arb_burst, arb_lwm;
+ int ret;
+
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(ofb->nvbo);
+ }
+
+ nv_crtc->fb.offset = fb->nvbo->bo.offset;
+
+ if (nv_crtc->lut.depth != drm_fb->depth) {
+ nv_crtc->lut.depth = drm_fb->depth;
+ nv_crtc_gamma_load(crtc);
+ }
+
+ /* Update the framebuffer format. */
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
+ regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+ if (crtc->fb->depth == 16)
+ regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
+ NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
+ regp->ramdac_gen_ctrl);
+
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
+ regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+ XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
+
+ /* Update the framebuffer location. */
+ regp->fb_start = nv_crtc->fb.offset & ~3;
+ regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
+ NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
+
+ /* Update the arbitration parameters. */
+ nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
+ &arb_burst, &arb_lwm);
+
+ regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
+ regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
+
+ if (dev_priv->card_type >= NV_30) {
+ regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
+ }
+
+ return 0;
+}
+
+static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
+ struct nouveau_bo *dst)
+{
+ int width = nv_cursor_width(dev);
+ uint32_t pixel;
+ int i, j;
+
+ for (i = 0; i < width; i++) {
+ for (j = 0; j < width; j++) {
+ pixel = nouveau_bo_rd32(src, i*64 + j);
+
+ nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
+ | (pixel & 0xf80000) >> 9
+ | (pixel & 0xf800) >> 6
+ | (pixel & 0xf8) >> 3);
+ }
+ }
+}
+
+static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
+ struct nouveau_bo *dst)
+{
+ uint32_t pixel;
+ int alpha, i;
+
+ /* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
+ * cursors (though NPM in combination with fp dithering may not work on
+ * nv11, from "nv" driver history)
+ * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
+ * blob uses, however we get given PM cursors so we use PM mode
+ */
+ for (i = 0; i < 64 * 64; i++) {
+ pixel = nouveau_bo_rd32(src, i);
+
+ /* hw gets unhappy if alpha <= rgb values. for a PM image "less
+ * than" shouldn't happen; fix "equal to" case by adding one to
+ * alpha channel (slightly inaccurate, but so is attempting to
+ * get back to NPM images, due to limits of integer precision)
+ */
+ alpha = pixel >> 24;
+ if (alpha > 0 && alpha < 255)
+ pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
+
+#ifdef __BIG_ENDIAN
+ {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset == 0x11) {
+ pixel = ((pixel & 0x000000ff) << 24) |
+ ((pixel & 0x0000ff00) << 8) |
+ ((pixel & 0x00ff0000) >> 8) |
+ ((pixel & 0xff000000) >> 24);
+ }
+ }
+#endif
+
+ nouveau_bo_wr32(dst, i, pixel);
+ }
+}
+
+static int
+nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t buffer_handle, uint32_t width, uint32_t height)
+{
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_bo *cursor = NULL;
+ struct drm_gem_object *gem;
+ int ret = 0;
+
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ if (!buffer_handle) {
+ nv_crtc->cursor.hide(nv_crtc, true);
+ return 0;
+ }
+
+ gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+ if (!gem)
+ return -EINVAL;
+ cursor = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(cursor);
+ if (ret)
+ goto out;
+
+ if (dev_priv->chipset >= 0x11)
+ nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
+ else
+ nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
+
+ nouveau_bo_unmap(cursor);
+ nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset;
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
+ nv_crtc->cursor.show(nv_crtc, true);
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int
+nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->cursor.set_pos(nv_crtc, x, y);
+ return 0;
+}
+
+static const struct drm_crtc_funcs nv04_crtc_funcs = {
+ .save = nv_crtc_save,
+ .restore = nv_crtc_restore,
+ .cursor_set = nv04_crtc_cursor_set,
+ .cursor_move = nv04_crtc_cursor_move,
+ .gamma_set = nv_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nv_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
+ .dpms = nv_crtc_dpms,
+ .prepare = nv_crtc_prepare,
+ .commit = nv_crtc_commit,
+ .mode_fixup = nv_crtc_mode_fixup,
+ .mode_set = nv_crtc_mode_set,
+ .mode_set_base = nv04_crtc_mode_set_base,
+ .load_lut = nv_crtc_gamma_load,
+};
+
+int
+nv04_crtc_create(struct drm_device *dev, int crtc_num)
+{
+ struct nouveau_crtc *nv_crtc;
+ int ret, i;
+
+ nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
+ if (!nv_crtc)
+ return -ENOMEM;
+
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = i << 8;
+ nv_crtc->lut.g[i] = i << 8;
+ nv_crtc->lut.b[i] = i << 8;
+ }
+ nv_crtc->lut.depth = 0;
+
+ nv_crtc->index = crtc_num;
+ nv_crtc->last_dpms = NV_DPMS_CLEARED;
+
+ drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
+ drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
+
+ ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ }
+
+ nv04_cursor_init(nv_crtc);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
new file mode 100644
index 00000000000..89a91b9d8b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -0,0 +1,70 @@
+#include "drmP.h"
+#include "drm_mode.h"
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+
+static void
+nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
+{
+ nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
+}
+
+static void
+nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
+{
+ nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
+}
+
+static void
+nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+ NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
+ NV_PRAMDAC_CU_START_POS,
+ XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
+ XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
+}
+
+static void
+crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
+{
+ NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
+ crtcstate->CRTC[index]);
+}
+
+static void
+nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct drm_crtc *crtc = &nv_crtc->base;
+
+ regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
+ MASK(NV_CIO_CRE_HCUR_ASI) |
+ XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
+ regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
+ XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
+ if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+ regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
+ MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
+ regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
+
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
+ crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
+ if (dev_priv->card_type == NV_40)
+ nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
+}
+
+int
+nv04_cursor_init(struct nouveau_crtc *crtc)
+{
+ crtc->cursor.set_offset = nv04_cursor_set_offset;
+ crtc->cursor.set_pos = nv04_cursor_set_pos;
+ crtc->cursor.hide = nv04_cursor_hide;
+ crtc->cursor.show = nv04_cursor_show;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
new file mode 100644
index 00000000000..d9f32879ba3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -0,0 +1,524 @@
+/*
+ * Copyright 2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+int nv04_dac_output_offset(struct drm_encoder *encoder)
+{
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ int offset = 0;
+
+ if (dcb->or & (8 | OUTPUT_C))
+ offset += 0x68;
+ if (dcb->or & (8 | OUTPUT_B))
+ offset += 0x2000;
+
+ return offset;
+}
+
+/*
+ * arbitrary limit to number of sense oscillations tolerated in one sample
+ * period (observed to be at least 13 in "nvidia")
+ */
+#define MAX_HBLANK_OSC 20
+
+/*
+ * arbitrary limit to number of conflicting sample pairs to tolerate at a
+ * voltage step (observed to be at least 5 in "nvidia")
+ */
+#define MAX_SAMPLE_PAIRS 10
+
+static int sample_load_twice(struct drm_device *dev, bool sense[2])
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ bool sense_a, sense_b, sense_b_prime;
+ int j = 0;
+
+ /*
+ * wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
+ * then wait for transition 0x4->0x5->0x4: enter hblank, leave
+ * hblank again
+ * use a 10ms timeout (guards against crtc being inactive, in
+ * which case blank state would never change)
+ */
+ if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
+ return -EBUSY;
+ if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000001))
+ return -EBUSY;
+ if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+ 0x00000001, 0x00000000))
+ return -EBUSY;
+
+ udelay(100);
+ /* when level triggers, sense is _LO_ */
+ sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+
+ /* take another reading until it agrees with sense_a... */
+ do {
+ udelay(100);
+ sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+ if (sense_a != sense_b) {
+ sense_b_prime =
+ nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
+ if (sense_b == sense_b_prime) {
+ /* ... unless two consecutive subsequent
+ * samples agree; sense_a is replaced */
+ sense_a = sense_b;
+ /* force mis-match so we loop */
+ sense_b = !sense_a;
+ }
+ }
+ } while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
+
+ if (j == MAX_HBLANK_OSC)
+ /* with so much oscillation, default to sense:LO */
+ sense[i] = false;
+ else
+ sense[i] = sense_a;
+ }
+
+ return 0;
+}
+
+static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ uint8_t saved_seq1, saved_pi, saved_rpc1;
+ uint8_t saved_palette0[3], saved_palette_mask;
+ uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
+ int i;
+ uint8_t blue;
+ bool sense = true;
+
+ /*
+ * for this detection to work, there needs to be a mode set up on the
+ * CRTC. this is presumed to be the case
+ */
+
+ if (nv_two_heads(dev))
+ /* only implemented for head A for now */
+ NVSetOwner(dev, 0);
+
+ saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
+
+ saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
+ saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
+
+ msleep(10);
+
+ saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
+ saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
+ saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
+
+ nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
+ for (i = 0; i < 3; i++)
+ saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA);
+ saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK);
+ nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0);
+
+ saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
+ (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
+ NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
+ NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
+
+ blue = 8; /* start of test range */
+
+ do {
+ bool sense_pair[2];
+
+ nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
+ /* testing blue won't find monochrome monitors. I don't care */
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue);
+
+ i = 0;
+ /* take sample pairs until both samples in the pair agree */
+ do {
+ if (sample_load_twice(dev, sense_pair))
+ goto out;
+ } while ((sense_pair[0] != sense_pair[1]) &&
+ ++i < MAX_SAMPLE_PAIRS);
+
+ if (i == MAX_SAMPLE_PAIRS)
+ /* too much oscillation defaults to LO */
+ sense = false;
+ else
+ sense = sense_pair[0];
+
+ /*
+ * if sense goes LO before blue ramps to 0x18, monitor is not connected.
+ * ergo, if blue gets to 0x18, monitor must be connected
+ */
+ } while (++blue < 0x18 && sense);
+
+out:
+ nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
+ nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+ for (i = 0; i < 3; i++)
+ nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
+ NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+
+ if (blue == 0x18) {
+ NV_INFO(dev, "Load detected on head A\n");
+ return connector_status_connected;
+ }
+
+ return connector_status_disconnected;
+}
+
+enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
+ saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
+ int head, present = 0;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+ if (dcb->type == OUTPUT_TV) {
+ testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
+
+ if (dev_priv->vbios->tvdactestval)
+ testval = dev_priv->vbios->tvdactestval;
+ } else {
+ testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
+
+ if (dev_priv->vbios->dactestval)
+ testval = dev_priv->vbios->dactestval;
+ }
+
+ saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
+ saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
+
+ saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2);
+
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
+ if (regoffset == 0x68) {
+ saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
+ }
+
+ saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
+ saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+
+ msleep(4);
+
+ saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+ head = (saved_routput & 0x100) >> 8;
+#if 0
+ /* if there's a spare crtc, using it will minimise flicker for the case
+ * where the in-use crtc is in use by an off-chip tmds encoder */
+ if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
+ head ^= 1;
+#endif
+ /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
+ routput = (saved_routput & 0xfffffece) | head << 8;
+
+ if (dev_priv->card_type >= NV_40) {
+ if (dcb->type == OUTPUT_TV)
+ routput |= 0x1a << 16;
+ else
+ routput &= ~(0x1a << 16);
+ }
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
+ msleep(1);
+
+ temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
+ NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
+ temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
+ temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
+ msleep(5);
+
+ temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+
+ if (dcb->type == OUTPUT_TV)
+ present = (nv17_tv_detect(encoder, connector, temp)
+ == connector_status_connected);
+ else
+ present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
+
+ temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
+ temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
+
+ /* bios does something more complex for restoring, but I think this is good enough */
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
+ if (regoffset == 0x68)
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
+ nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
+
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+
+ if (present) {
+ NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
+ return connector_status_connected;
+ }
+
+ return connector_status_disconnected;
+}
+
+
+static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void nv04_dac_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ nv04_dfp_disable(dev, head);
+
+ /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+ * at LCD__INDEX which we don't alter
+ */
+ if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
+ crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+}
+
+
+static void nv04_dac_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nouveau_crtc(encoder->crtc)->index;
+
+ if (nv_gf4_disp_arch(dev)) {
+ struct drm_encoder *rebind;
+ uint32_t dac_offset = nv04_dac_output_offset(encoder);
+ uint32_t otherdac;
+
+ /* bit 16-19 are bits that are set on some G70 cards,
+ * but don't seem to have much effect */
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
+ head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
+ /* force any other vga encoders to bind to the other crtc */
+ list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
+ if (rebind == encoder
+ || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG)
+ continue;
+
+ dac_offset = nv04_dac_output_offset(rebind);
+ otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
+ (otherdac & ~0x0100) | (head ^ 1) << 8);
+ }
+ }
+
+ /* This could use refinement for flatpanels, but it should work this way */
+ if (dev_priv->chipset < 0x44)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
+ else
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+}
+
+static void nv04_dac_commit(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+
+ if (nv_gf4_disp_arch(dev)) {
+ uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1];
+ int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
+ uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
+
+ if (enable) {
+ *dac_users |= 1 << dcb->index;
+ NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
+
+ } else {
+ *dac_users &= ~(1 << dcb->index);
+ if (!*dac_users)
+ NVWriteRAMDAC(dev, 0, dacclk_off,
+ dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
+ }
+ }
+}
+
+static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->last_dpms == mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+ NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+static void nv04_dac_save(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+
+ if (nv_gf4_disp_arch(dev))
+ nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
+ nv04_dac_output_offset(encoder));
+}
+
+static void nv04_dac_restore(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+
+ if (nv_gf4_disp_arch(dev))
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
+ nv_encoder->restore.output);
+
+ nv_encoder->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv04_dac_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ NV_DEBUG_KMS(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+ kfree(nv_encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
+ .dpms = nv04_dac_dpms,
+ .save = nv04_dac_save,
+ .restore = nv04_dac_restore,
+ .mode_fixup = nv04_dac_mode_fixup,
+ .prepare = nv04_dac_prepare,
+ .commit = nv04_dac_commit,
+ .mode_set = nv04_dac_mode_set,
+ .detect = nv04_dac_detect
+};
+
+static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
+ .dpms = nv04_dac_dpms,
+ .save = nv04_dac_save,
+ .restore = nv04_dac_restore,
+ .mode_fixup = nv04_dac_mode_fixup,
+ .prepare = nv04_dac_prepare,
+ .commit = nv04_dac_commit,
+ .mode_set = nv04_dac_mode_set,
+ .detect = nv17_dac_detect
+};
+
+static const struct drm_encoder_funcs nv04_dac_funcs = {
+ .destroy = nv04_dac_destroy,
+};
+
+int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ const struct drm_encoder_helper_funcs *helper;
+ struct drm_encoder *encoder;
+ struct nouveau_encoder *nv_encoder = NULL;
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+
+ encoder = to_drm_encoder(nv_encoder);
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ if (nv_gf4_disp_arch(dev))
+ helper = &nv17_dac_helper_funcs;
+ else
+ helper = &nv04_dac_helper_funcs;
+
+ drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, helper);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
new file mode 100644
index 00000000000..483f875bdb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -0,0 +1,623 @@
+/*
+ * Copyright 2003 NVIDIA, Corporation
+ * Copyright 2006 Dave Airlie
+ * Copyright 2007 Maarten Maathuis
+ * Copyright 2007-2009 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nvreg.h"
+
+#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
+ NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
+ NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
+#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE | \
+ NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE | \
+ NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
+
+static inline bool is_fpc_off(uint32_t fpc)
+{
+ return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
+ FP_TG_CONTROL_OFF);
+}
+
+int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent)
+{
+ /* special case of nv_read_tmds to find crtc associated with an output.
+ * this does not give a correct answer for off-chip dvi, but there's no
+ * use for such an answer anyway
+ */
+ int ramdac = (dcbent->or & OUTPUT_C) >> 2;
+
+ NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
+ NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
+ return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
+}
+
+void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
+ int head, bool dl)
+{
+ /* The BIOS scripts don't do this for us, sadly
+ * Luckily we do know the values ;-)
+ *
+ * head < 0 indicates we wish to force a setting with the overrideval
+ * (for VT restore etc.)
+ */
+
+ int ramdac = (dcbent->or & OUTPUT_C) >> 2;
+ uint8_t tmds04 = 0x80;
+
+ if (head != ramdac)
+ tmds04 = 0x88;
+
+ if (dcbent->type == OUTPUT_LVDS)
+ tmds04 |= 0x01;
+
+ nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
+
+ if (dl) /* dual link */
+ nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
+}
+
+void nv04_dfp_disable(struct drm_device *dev, int head)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+
+ if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
+ FP_TG_CONTROL_ON) {
+ /* digital remnants must be cleaned before new crtc
+ * values programmed. delay is time for the vga stuff
+ * to realise it's in control again
+ */
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+ FP_TG_CONTROL_OFF);
+ msleep(50);
+ }
+ /* don't inadvertently turn it on when state written later */
+ crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
+}
+
+void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct nouveau_crtc *nv_crtc;
+ uint32_t *fpc;
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ nv_crtc = nouveau_crtc(encoder->crtc);
+ fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+
+ if (is_fpc_off(*fpc)) {
+ /* using saved value is ok, as (is_digital && dpms_on &&
+ * fp_control==OFF) is (at present) *only* true when
+ * fpc's most recent change was by below "off" code
+ */
+ *fpc = nv_crtc->dpms_saved_fp_control;
+ }
+
+ nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
+ NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
+ } else {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ nv_crtc = nouveau_crtc(crtc);
+ fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
+
+ nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
+ if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
+ nv_crtc->dpms_saved_fp_control = *fpc;
+ /* cut the FP output */
+ *fpc &= ~FP_TG_CONTROL_ON;
+ *fpc |= FP_TG_CONTROL_OFF;
+ NVWriteRAMDAC(dev, nv_crtc->index,
+ NV_PRAMDAC_FP_TG_CONTROL, *fpc);
+ }
+ }
+ }
+}
+
+static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
+ /* For internal panels and gpu scaling on DVI we need the native mode */
+ if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+ if (!nv_connector->native_mode)
+ return false;
+ nv_encoder->mode = *nv_connector->native_mode;
+ adjusted_mode->clock = nv_connector->native_mode->clock;
+ } else {
+ nv_encoder->mode = *adjusted_mode;
+ }
+
+ return true;
+}
+
+static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
+ struct nouveau_encoder *nv_encoder, int head)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
+
+ if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
+ return;
+
+ /* SEL_CLK is only used on the primary ramdac
+ * It toggles spread spectrum PLL output and sets the bindings of PLLs
+ * to heads on digital outputs
+ */
+ if (head)
+ state->sel_clk |= bits1618;
+ else
+ state->sel_clk &= ~bits1618;
+
+ /* nv30:
+ * bit 0 NVClk spread spectrum on/off
+ * bit 2 MemClk spread spectrum on/off
+ * bit 4 PixClk1 spread spectrum on/off toggle
+ * bit 6 PixClk2 spread spectrum on/off toggle
+ *
+ * nv40 (observations from bios behaviour and mmio traces):
+ * bits 4&6 as for nv30
+ * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6;
+ * maybe a different spread mode
+ * bits 8&10 seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
+ * The logic behind turning spread spectrum on/off in the first place,
+ * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
+ * entry has the necessary info)
+ */
+ if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) {
+ int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1;
+
+ state->sel_clk &= ~0xf0;
+ state->sel_clk |= (head ? 0x40 : 0x10) << shift;
+ }
+}
+
+static void nv04_dfp_prepare(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
+ uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
+ uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
+
+ helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
+
+ /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+ * at LCD__INDEX which we don't alter
+ */
+ if (!(*cr_lcd & 0x44)) {
+ *cr_lcd = 0x3;
+
+ if (nv_two_heads(dev)) {
+ if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
+ *cr_lcd |= head ? 0x0 : 0x8;
+ else {
+ *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
+ *cr_lcd |= 0x30;
+ if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
+ /* avoid being connected to both crtcs */
+ *cr_lcd_oth &= ~0x30;
+ NVWriteVgaCrtc(dev, head ^ 1,
+ NV_CIO_CRE_LCD__INDEX,
+ *cr_lcd_oth);
+ }
+ }
+ }
+ }
+}
+
+
+static void nv04_dfp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+ struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
+ struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_display_mode *output_mode = &nv_encoder->mode;
+ uint32_t mode_ratio, panel_ratio;
+
+ NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
+ drm_mode_debug_printmodeline(output_mode);
+
+ /* Initialize the FP registers in this CRTC. */
+ regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
+ regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
+ if (!nv_gf4_disp_arch(dev) ||
+ (output_mode->hsync_start - output_mode->hdisplay) >=
+ dev_priv->vbios->digital_min_front_porch)
+ regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
+ else
+ regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
+ regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
+ regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
+ regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
+ regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
+
+ regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
+ regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
+ regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
+ regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
+ regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
+ regp->fp_vert_regs[FP_VALID_START] = 0;
+ regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
+
+ /* bit26: a bit seen on some g7x, no as yet discernable purpose */
+ regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+ (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
+ /* Deal with vsync/hsync polarity */
+ /* LVDS screens do set this, but modes with +ve syncs are very rare */
+ if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
+ if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
+ /* panel scaling first, as native would get set otherwise */
+ if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
+ nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER) /* panel handles it */
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
+ else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
+ adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
+ else /* gpu needs to scale */
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
+ if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
+ regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
+ if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
+ output_mode->clock > 165000)
+ regp->fp_control |= (2 << 24);
+ if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+ bool duallink, dummy;
+
+ nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
+ clock, &duallink, &dummy);
+ if (duallink)
+ regp->fp_control |= (8 << 28);
+ } else
+ if (output_mode->clock > 165000)
+ regp->fp_control |= (8 << 28);
+
+ regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
+ NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
+ NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
+ NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
+ NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
+ NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
+ NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
+
+ /* We want automatic scaling */
+ regp->fp_debug_1 = 0;
+ /* This can override HTOTAL and VTOTAL */
+ regp->fp_debug_2 = 0;
+
+ /* Use 20.12 fixed point format to avoid floats */
+ mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
+ panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
+ /* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
+ * get treated the same as SCALE_FULLSCREEN */
+ if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
+ mode_ratio != panel_ratio) {
+ uint32_t diff, scale;
+ bool divide_by_2 = nv_gf4_disp_arch(dev);
+
+ if (mode_ratio < panel_ratio) {
+ /* vertical needs to expand to glass size (automatic)
+ * horizontal needs to be scaled at vertical scale factor
+ * to maintain aspect */
+
+ scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
+ regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
+ XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
+
+ /* restrict area of screen used, horizontally */
+ diff = output_mode->hdisplay -
+ output_mode->vdisplay * mode_ratio / (1 << 12);
+ regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
+ regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
+ }
+
+ if (mode_ratio > panel_ratio) {
+ /* horizontal needs to expand to glass size (automatic)
+ * vertical needs to be scaled at horizontal scale factor
+ * to maintain aspect */
+
+ scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
+ regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
+ XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
+
+ /* restrict area of screen used, vertically */
+ diff = output_mode->vdisplay -
+ (1 << 12) * output_mode->hdisplay / mode_ratio;
+ regp->fp_vert_regs[FP_VALID_START] += diff / 2;
+ regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
+ }
+ }
+
+ /* Output property. */
+ if (nv_connector->use_dithering) {
+ if (dev_priv->chipset == 0x11)
+ regp->dither = savep->dither | 0x00010000;
+ else {
+ int i;
+ regp->dither = savep->dither | 0x00000001;
+ for (i = 0; i < 3; i++) {
+ regp->dither_regs[i] = 0xe4e4e4e4;
+ regp->dither_regs[i + 3] = 0x44444444;
+ }
+ }
+ } else {
+ if (dev_priv->chipset != 0x11) {
+ /* reset them */
+ int i;
+ for (i = 0; i < 3; i++) {
+ regp->dither_regs[i] = savep->dither_regs[i];
+ regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
+ }
+ }
+ regp->dither = savep->dither;
+ }
+
+ regp->fp_margin_color = 0;
+}
+
+static void nv04_dfp_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct dcb_entry *dcbe = nv_encoder->dcb;
+ int head = nouveau_crtc(encoder->crtc)->index;
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+
+ if (dcbe->type == OUTPUT_TMDS)
+ run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
+ else if (dcbe->type == OUTPUT_LVDS)
+ call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
+
+ /* update fp_control state for any changes made by scripts,
+ * so correct value is written at DPMS on */
+ dev_priv->mode_reg.crtc_reg[head].fp_control =
+ NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+
+ /* This could use refinement for flatpanels, but it should work this way */
+ if (dev_priv->chipset < 0x44)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
+ else
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static inline bool is_powersaving_dpms(int mode)
+{
+ return (mode != DRM_MODE_DPMS_ON);
+}
+
+static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
+
+ if (nv_encoder->last_dpms == mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+ NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ if (was_powersaving && is_powersaving_dpms(mode))
+ return;
+
+ if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
+ struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
+
+ /* when removing an output, crtc may not be set, but PANEL_OFF
+ * must still be run
+ */
+ int head = crtc ? nouveau_crtc(crtc)->index :
+ nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ if (!nv_connector->native_mode) {
+ NV_ERROR(dev, "Not turning on LVDS without native mode\n");
+ return;
+ }
+ call_lvds_script(dev, nv_encoder->dcb, head,
+ LVDS_PANEL_ON, nv_connector->native_mode->clock);
+ } else
+ /* pxclk of 0 is fine for PANEL_OFF, and for a
+ * disconnected LVDS encoder there is no native_mode
+ */
+ call_lvds_script(dev, nv_encoder->dcb, head,
+ LVDS_PANEL_OFF, 0);
+ }
+
+ nv04_dfp_update_fp_control(encoder, mode);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
+ else {
+ dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
+ dev_priv->mode_reg.sel_clk &= ~0xf0;
+ }
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
+}
+
+static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->last_dpms == mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+ NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ nv04_dfp_update_fp_control(encoder, mode);
+}
+
+static void nv04_dfp_save(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+
+ if (nv_two_heads(dev))
+ nv_encoder->restore.head =
+ nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
+}
+
+static void nv04_dfp_restore(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nv_encoder->restore.head;
+
+ if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+ struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
+ if (native_mode)
+ call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
+ native_mode->clock);
+ else
+ NV_ERROR(dev, "Not restoring LVDS without native mode\n");
+
+ } else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
+ int clock = nouveau_hw_pllvals_to_clk
+ (&dev_priv->saved_reg.crtc_reg[head].pllvals);
+
+ run_tmds_table(dev, nv_encoder->dcb, head, clock);
+ }
+
+ nv_encoder->last_dpms = NV_DPMS_CLEARED;
+}
+
+static void nv04_dfp_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ NV_DEBUG_KMS(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+ kfree(nv_encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
+ .dpms = nv04_lvds_dpms,
+ .save = nv04_dfp_save,
+ .restore = nv04_dfp_restore,
+ .mode_fixup = nv04_dfp_mode_fixup,
+ .prepare = nv04_dfp_prepare,
+ .commit = nv04_dfp_commit,
+ .mode_set = nv04_dfp_mode_set,
+ .detect = NULL,
+};
+
+static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
+ .dpms = nv04_tmds_dpms,
+ .save = nv04_dfp_save,
+ .restore = nv04_dfp_restore,
+ .mode_fixup = nv04_dfp_mode_fixup,
+ .prepare = nv04_dfp_prepare,
+ .commit = nv04_dfp_commit,
+ .mode_set = nv04_dfp_mode_set,
+ .detect = NULL,
+};
+
+static const struct drm_encoder_funcs nv04_dfp_funcs = {
+ .destroy = nv04_dfp_destroy,
+};
+
+int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ const struct drm_encoder_helper_funcs *helper;
+ struct drm_encoder *encoder;
+ struct nouveau_encoder *nv_encoder = NULL;
+ int type;
+
+ switch (entry->type) {
+ case OUTPUT_TMDS:
+ type = DRM_MODE_ENCODER_TMDS;
+ helper = &nv04_tmds_helper_funcs;
+ break;
+ case OUTPUT_LVDS:
+ type = DRM_MODE_ENCODER_LVDS;
+ helper = &nv04_lvds_helper_funcs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+
+ encoder = to_drm_encoder(nv_encoder);
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
+ drm_encoder_helper_add(encoder, helper);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
new file mode 100644
index 00000000000..ef77215fa5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_fb.h"
+#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+
+#define MULTIPLE_ENCODERS(e) (e & (e - 1))
+
+static void
+nv04_display_store_initial_head_owner(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset != 0x11) {
+ dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
+ goto ownerknown;
+ }
+
+ /* reading CR44 is broken on nv11, so we attempt to infer it */
+ if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */
+ dev_priv->crtc_owner = 0x4;
+ else {
+ uint8_t slaved_on_A, slaved_on_B;
+ bool tvA = false;
+ bool tvB = false;
+
+ NVLockVgaCrtcs(dev, false);
+
+ slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
+ 0x80;
+ if (slaved_on_B)
+ tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
+ MASK(NV_CIO_CRE_LCD_LCD_SELECT));
+
+ slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
+ 0x80;
+ if (slaved_on_A)
+ tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
+ MASK(NV_CIO_CRE_LCD_LCD_SELECT));
+
+ NVLockVgaCrtcs(dev, true);
+
+ if (slaved_on_A && !tvA)
+ dev_priv->crtc_owner = 0x0;
+ else if (slaved_on_B && !tvB)
+ dev_priv->crtc_owner = 0x3;
+ else if (slaved_on_A)
+ dev_priv->crtc_owner = 0x0;
+ else if (slaved_on_B)
+ dev_priv->crtc_owner = 0x3;
+ else
+ dev_priv->crtc_owner = 0x0;
+ }
+
+ownerknown:
+ NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
+
+ /* we need to ensure the heads are not tied henceforth, or reading any
+ * 8 bit reg on head B will fail
+ * setting a single arbitrary head solves that */
+ NVSetOwner(dev, 0);
+}
+
+int
+nv04_display_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ uint16_t connector[16] = { 0 };
+ int i, ret;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ if (nv_two_heads(dev))
+ nv04_display_store_initial_head_owner(dev);
+ nouveau_hw_save_vga_fonts(dev, 1);
+
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dithering_property(dev);
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ switch (dev_priv->card_type) {
+ case NV_04:
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ break;
+ default:
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ break;
+ }
+
+ dev->mode_config.fb_base = dev_priv->fb_phys;
+
+ nv04_crtc_create(dev, 0);
+ if (nv_two_heads(dev))
+ nv04_crtc_create(dev, 1);
+
+ for (i = 0; i < dcb->entries; i++) {
+ struct dcb_entry *dcbent = &dcb->entry[i];
+
+ switch (dcbent->type) {
+ case OUTPUT_ANALOG:
+ ret = nv04_dac_create(dev, dcbent);
+ break;
+ case OUTPUT_LVDS:
+ case OUTPUT_TMDS:
+ ret = nv04_dfp_create(dev, dcbent);
+ break;
+ case OUTPUT_TV:
+ if (dcbent->location == DCB_LOC_ON_CHIP)
+ ret = nv17_tv_create(dev, dcbent);
+ else
+ ret = nv04_tv_create(dev, dcbent);
+ break;
+ default:
+ NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
+ continue;
+ }
+
+ if (ret)
+ continue;
+
+ connector[dcbent->connector] |= (1 << dcbent->type);
+ }
+
+ for (i = 0; i < dcb->entries; i++) {
+ struct dcb_entry *dcbent = &dcb->entry[i];
+ uint16_t encoders;
+ int type;
+
+ encoders = connector[dcbent->connector];
+ if (!(encoders & (1 << dcbent->type)))
+ continue;
+ connector[dcbent->connector] = 0;
+
+ switch (dcbent->type) {
+ case OUTPUT_ANALOG:
+ if (!MULTIPLE_ENCODERS(encoders))
+ type = DRM_MODE_CONNECTOR_VGA;
+ else
+ type = DRM_MODE_CONNECTOR_DVII;
+ break;
+ case OUTPUT_TMDS:
+ if (!MULTIPLE_ENCODERS(encoders))
+ type = DRM_MODE_CONNECTOR_DVID;
+ else
+ type = DRM_MODE_CONNECTOR_DVII;
+ break;
+ case OUTPUT_LVDS:
+ type = DRM_MODE_CONNECTOR_LVDS;
+#if 0
+ /* don't create i2c adapter when lvds ddc not allowed */
+ if (dcbent->lvdsconf.use_straps_for_mode ||
+ dev_priv->vbios->fp_no_ddc)
+ i2c_index = 0xf;
+#endif
+ break;
+ case OUTPUT_TV:
+ type = DRM_MODE_CONNECTOR_TV;
+ break;
+ default:
+ type = DRM_MODE_CONNECTOR_Unknown;
+ continue;
+ }
+
+ nouveau_connector_create(dev, dcbent->connector, type);
+ }
+
+ /* Save previous state */
+ NVLockVgaCrtcs(dev, false);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ crtc->funcs->save(crtc);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+ func->save(encoder);
+ }
+
+ return 0;
+}
+
+void
+nv04_display_destroy(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ /* Turn every CRTC off. */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct drm_mode_set modeset = {
+ .crtc = crtc,
+ };
+
+ crtc->funcs->set_config(&modeset);
+ }
+
+ /* Restore state */
+ NVLockVgaCrtcs(dev, false);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+ func->restore(encoder);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ crtc->funcs->restore(crtc);
+
+ drm_mode_config_cleanup(dev);
+
+ nouveau_hw_save_vga_fonts(dev, 0);
+}
+
+void
+nv04_display_restore(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ NVLockVgaCrtcs(dev, false);
+
+ /* meh.. modeset apparently doesn't setup all the regs and depends
+ * on pre-existing state, for now load the state of the card *before*
+ * nouveau was loaded, and then do a modeset.
+ *
+ * best thing to do probably is to make save/restore routines not
+ * save/restore "pre-load" state, but more general so we can save
+ * on suspend too.
+ */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct drm_encoder_helper_funcs *func = encoder->helper_private;
+
+ func->restore(encoder);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ crtc->funcs->restore(crtc);
+
+ if (nv_two_heads(dev)) {
+ NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
+ dev_priv->crtc_owner);
+ NVSetOwner(dev, dev_priv->crtc_owner);
+ }
+
+ NVLockVgaCrtcs(dev, true);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
new file mode 100644
index 00000000000..638cf601c42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -0,0 +1,21 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_fb_init(struct drm_device *dev)
+{
+ /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
+ * nvidia reading PFB_CFG_0, then writing back its original value.
+ * (which was 0x701114 in this case)
+ */
+
+ nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
+ return 0;
+}
+
+void
+nv04_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
new file mode 100644
index 00000000000..09a31071ee5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2009 Ben Skeggs
+ * Copyright 2008 Stuart Bennett
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+static void
+nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_copyarea(info, region);
+ return;
+ }
+
+ BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
+ OUT_RING(chan, (region->sy << 16) | region->sx);
+ OUT_RING(chan, (region->dy << 16) | region->dx);
+ OUT_RING(chan, (region->height << 16) | region->width);
+ FIRE_RING(chan);
+}
+
+static void
+nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_fillrect(info, rect);
+ return;
+ }
+
+ BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
+ BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
+ OUT_RING(chan, color);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
+ OUT_RING(chan, (rect->dx << 16) | rect->dy);
+ OUT_RING(chan, (rect->width << 16) | rect->height);
+ FIRE_RING(chan);
+}
+
+static void
+nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t fg;
+ uint32_t bg;
+ uint32_t dsize;
+ uint32_t width;
+ uint32_t *data = (uint32_t *)image->data;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (image->depth != 1) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ width = (image->width + 31) & ~31;
+ dsize = (width * image->height) >> 5;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
+ bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
+ } else {
+ fg = image->fg_color;
+ bg = image->bg_color;
+ }
+
+ BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
+ OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ OUT_RING(chan, ((image->dy + image->height) << 16) |
+ ((image->dx + image->width) & 0xffff));
+ OUT_RING(chan, bg);
+ OUT_RING(chan, fg);
+ OUT_RING(chan, (image->height << 16) | image->width);
+ OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+
+ while (dsize) {
+ int iter_len = dsize > 128 ? 128 : dsize;
+
+ if (RING_SPACE(chan, iter_len + 1)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
+ OUT_RINGp(chan, data, iter_len);
+ data += iter_len;
+ dsize -= iter_len;
+ }
+
+ FIRE_RING(chan);
+}
+
+static int
+nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+nv04_fbcon_accel_init(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ int surface_fmt, pattern_fmt, rect_fmt;
+ int ret;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ surface_fmt = 1;
+ pattern_fmt = 3;
+ rect_fmt = 3;
+ break;
+ case 16:
+ surface_fmt = 4;
+ pattern_fmt = 1;
+ rect_fmt = 1;
+ break;
+ case 32:
+ switch (info->var.transp.length) {
+ case 0: /* depth 24 */
+ case 8: /* depth 32 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ surface_fmt = 6;
+ pattern_fmt = 3;
+ rect_fmt = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+ 0x0062 : 0x0042, NvCtxSurf2D);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+ if (ret)
+ return ret;
+
+ ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
+ 0x009f : 0x005f, NvImageBlit);
+ if (ret)
+ return ret;
+
+ if (RING_SPACE(chan, 49)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ return 0;
+ }
+
+ BEGIN_RING(chan, 1, 0x0000, 1);
+ OUT_RING(chan, NvCtxSurf2D);
+ BEGIN_RING(chan, 1, 0x0184, 2);
+ OUT_RING(chan, NvDmaFB);
+ OUT_RING(chan, NvDmaFB);
+ BEGIN_RING(chan, 1, 0x0300, 4);
+ OUT_RING(chan, surface_fmt);
+ OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
+ OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
+ OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
+
+ BEGIN_RING(chan, 1, 0x0000, 1);
+ OUT_RING(chan, NvRop);
+ BEGIN_RING(chan, 1, 0x0300, 1);
+ OUT_RING(chan, 0x55);
+
+ BEGIN_RING(chan, 1, 0x0000, 1);
+ OUT_RING(chan, NvImagePatt);
+ BEGIN_RING(chan, 1, 0x0300, 8);
+ OUT_RING(chan, pattern_fmt);
+#ifdef __BIG_ENDIAN
+ OUT_RING(chan, 2);
+#else
+ OUT_RING(chan, 1);
+#endif
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ OUT_RING(chan, ~0);
+ OUT_RING(chan, ~0);
+ OUT_RING(chan, ~0);
+ OUT_RING(chan, ~0);
+
+ BEGIN_RING(chan, 1, 0x0000, 1);
+ OUT_RING(chan, NvClipRect);
+ BEGIN_RING(chan, 1, 0x0300, 2);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
+
+ BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
+ OUT_RING(chan, NvImageBlit);
+ BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
+ OUT_RING(chan, NvCtxSurf2D);
+ BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
+ OUT_RING(chan, 3);
+
+ BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
+ OUT_RING(chan, NvGdiRect);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
+ OUT_RING(chan, NvCtxSurf2D);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
+ OUT_RING(chan, NvImagePatt);
+ OUT_RING(chan, NvRop);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
+ OUT_RING(chan, rect_fmt);
+ BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
+ OUT_RING(chan, 3);
+
+ FIRE_RING(chan);
+
+ info->fbops->fb_fillrect = nv04_fbcon_fillrect;
+ info->fbops->fb_copyarea = nv04_fbcon_copyarea;
+ info->fbops->fb_imageblit = nv04_fbcon_imageblit;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
new file mode 100644
index 00000000000..0c3cd53c731
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
+#define NV04_RAMFC__SIZE 32
+#define NV04_RAMFC_DMA_PUT 0x00
+#define NV04_RAMFC_DMA_GET 0x04
+#define NV04_RAMFC_DMA_INSTANCE 0x08
+#define NV04_RAMFC_DMA_STATE 0x0C
+#define NV04_RAMFC_DMA_FETCH 0x10
+#define NV04_RAMFC_ENGINE 0x14
+#define NV04_RAMFC_PULL1_ENGINE 0x18
+
+#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
+ NV04_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \
+ NV04_RAMFC_##offset/4)
+
+void
+nv04_fifo_disable(struct drm_device *dev)
+{
+ uint32_t tmp;
+
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
+}
+
+void
+nv04_fifo_enable(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+bool
+nv04_fifo_reassign(struct drm_device *dev, bool enable)
+{
+ uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
+ return (reassign == 1);
+}
+
+int
+nv04_fifo_channel_id(struct drm_device *dev)
+{
+ return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv04_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
+ NV04_RAMFC__SIZE,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ NULL, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ /* Setup initial state */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ RAMFC_WR(DMA_PUT, chan->pushbuf_base);
+ RAMFC_WR(DMA_GET, chan->pushbuf_base);
+ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
+ RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0));
+ dev_priv->engine.instmem.finish_access(dev);
+
+ /* enable the fifo dma operation */
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ return 0;
+}
+
+void
+nv04_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv04_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fc = NV04_RAMFC(chid), tmp;
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+ tmp = nv_ri32(dev, fc + 8);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv04_fifo_load_context(struct nouveau_channel *chan)
+{
+ uint32_t tmp;
+
+ nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
+ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+ nv04_fifo_do_load_context(chan->dev, chan->id);
+ nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
+ tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+ nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv04_fifo_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan = NULL;
+ uint32_t tmp;
+ int chid;
+
+ chid = pfifo->channel_id(dev);
+ if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ return 0;
+
+ chan = dev_priv->fifos[chid];
+ if (!chan) {
+ NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
+ return -EINVAL;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+ RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
+ tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
+ RAMFC_WR(DMA_INSTANCE, tmp);
+ RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+ RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
+ RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+ RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv04_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+ return 0;
+}
+
+static void
+nv04_fifo_init_reset(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, 0x003224, 0x000f0078);
+ nv_wr32(dev, 0x002044, 0x0101ffff);
+ nv_wr32(dev, 0x002040, 0x000000ff);
+ nv_wr32(dev, 0x002500, 0x00000000);
+ nv_wr32(dev, 0x003000, 0x00000000);
+ nv_wr32(dev, 0x003050, 0x00000000);
+ nv_wr32(dev, 0x003200, 0x00000000);
+ nv_wr32(dev, 0x003250, 0x00000000);
+ nv_wr32(dev, 0x003220, 0x00000000);
+
+ nv_wr32(dev, 0x003250, 0x00000000);
+ nv_wr32(dev, 0x003270, 0x00000000);
+ nv_wr32(dev, 0x003210, 0x00000000);
+}
+
+static void
+nv04_fifo_init_ramxx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht_bits - 9) << 16) |
+ (dev_priv->ramht_offset >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+}
+
+static void
+nv04_fifo_init_intr(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv04_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ int i;
+
+ nv04_fifo_init_reset(dev);
+ nv04_fifo_init_ramxx(dev);
+
+ nv04_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+ nv04_fifo_init_intr(dev);
+ pfifo->enable(dev);
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ if (dev_priv->fifos[i]) {
+ uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+ nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
new file mode 100644
index 00000000000..d561d773c0f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright 2007 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+
+static uint32_t nv04_graph_ctx_regs[] = {
+ NV04_PGRAPH_CTX_SWITCH1,
+ NV04_PGRAPH_CTX_SWITCH2,
+ NV04_PGRAPH_CTX_SWITCH3,
+ NV04_PGRAPH_CTX_SWITCH4,
+ NV04_PGRAPH_CTX_CACHE1,
+ NV04_PGRAPH_CTX_CACHE2,
+ NV04_PGRAPH_CTX_CACHE3,
+ NV04_PGRAPH_CTX_CACHE4,
+ 0x00400184,
+ 0x004001a4,
+ 0x004001c4,
+ 0x004001e4,
+ 0x00400188,
+ 0x004001a8,
+ 0x004001c8,
+ 0x004001e8,
+ 0x0040018c,
+ 0x004001ac,
+ 0x004001cc,
+ 0x004001ec,
+ 0x00400190,
+ 0x004001b0,
+ 0x004001d0,
+ 0x004001f0,
+ 0x00400194,
+ 0x004001b4,
+ 0x004001d4,
+ 0x004001f4,
+ 0x00400198,
+ 0x004001b8,
+ 0x004001d8,
+ 0x004001f8,
+ 0x0040019c,
+ 0x004001bc,
+ 0x004001dc,
+ 0x004001fc,
+ 0x00400174,
+ NV04_PGRAPH_DMA_START_0,
+ NV04_PGRAPH_DMA_START_1,
+ NV04_PGRAPH_DMA_LENGTH,
+ NV04_PGRAPH_DMA_MISC,
+ NV04_PGRAPH_DMA_PITCH,
+ NV04_PGRAPH_BOFFSET0,
+ NV04_PGRAPH_BBASE0,
+ NV04_PGRAPH_BLIMIT0,
+ NV04_PGRAPH_BOFFSET1,
+ NV04_PGRAPH_BBASE1,
+ NV04_PGRAPH_BLIMIT1,
+ NV04_PGRAPH_BOFFSET2,
+ NV04_PGRAPH_BBASE2,
+ NV04_PGRAPH_BLIMIT2,
+ NV04_PGRAPH_BOFFSET3,
+ NV04_PGRAPH_BBASE3,
+ NV04_PGRAPH_BLIMIT3,
+ NV04_PGRAPH_BOFFSET4,
+ NV04_PGRAPH_BBASE4,
+ NV04_PGRAPH_BLIMIT4,
+ NV04_PGRAPH_BOFFSET5,
+ NV04_PGRAPH_BBASE5,
+ NV04_PGRAPH_BLIMIT5,
+ NV04_PGRAPH_BPITCH0,
+ NV04_PGRAPH_BPITCH1,
+ NV04_PGRAPH_BPITCH2,
+ NV04_PGRAPH_BPITCH3,
+ NV04_PGRAPH_BPITCH4,
+ NV04_PGRAPH_SURFACE,
+ NV04_PGRAPH_STATE,
+ NV04_PGRAPH_BSWIZZLE2,
+ NV04_PGRAPH_BSWIZZLE5,
+ NV04_PGRAPH_BPIXEL,
+ NV04_PGRAPH_NOTIFY,
+ NV04_PGRAPH_PATT_COLOR0,
+ NV04_PGRAPH_PATT_COLOR1,
+ NV04_PGRAPH_PATT_COLORRAM+0x00,
+ NV04_PGRAPH_PATT_COLORRAM+0x01,
+ NV04_PGRAPH_PATT_COLORRAM+0x02,
+ NV04_PGRAPH_PATT_COLORRAM+0x03,
+ NV04_PGRAPH_PATT_COLORRAM+0x04,
+ NV04_PGRAPH_PATT_COLORRAM+0x05,
+ NV04_PGRAPH_PATT_COLORRAM+0x06,
+ NV04_PGRAPH_PATT_COLORRAM+0x07,
+ NV04_PGRAPH_PATT_COLORRAM+0x08,
+ NV04_PGRAPH_PATT_COLORRAM+0x09,
+ NV04_PGRAPH_PATT_COLORRAM+0x0A,
+ NV04_PGRAPH_PATT_COLORRAM+0x0B,
+ NV04_PGRAPH_PATT_COLORRAM+0x0C,
+ NV04_PGRAPH_PATT_COLORRAM+0x0D,
+ NV04_PGRAPH_PATT_COLORRAM+0x0E,
+ NV04_PGRAPH_PATT_COLORRAM+0x0F,
+ NV04_PGRAPH_PATT_COLORRAM+0x10,
+ NV04_PGRAPH_PATT_COLORRAM+0x11,
+ NV04_PGRAPH_PATT_COLORRAM+0x12,
+ NV04_PGRAPH_PATT_COLORRAM+0x13,
+ NV04_PGRAPH_PATT_COLORRAM+0x14,
+ NV04_PGRAPH_PATT_COLORRAM+0x15,
+ NV04_PGRAPH_PATT_COLORRAM+0x16,
+ NV04_PGRAPH_PATT_COLORRAM+0x17,
+ NV04_PGRAPH_PATT_COLORRAM+0x18,
+ NV04_PGRAPH_PATT_COLORRAM+0x19,
+ NV04_PGRAPH_PATT_COLORRAM+0x1A,
+ NV04_PGRAPH_PATT_COLORRAM+0x1B,
+ NV04_PGRAPH_PATT_COLORRAM+0x1C,
+ NV04_PGRAPH_PATT_COLORRAM+0x1D,
+ NV04_PGRAPH_PATT_COLORRAM+0x1E,
+ NV04_PGRAPH_PATT_COLORRAM+0x1F,
+ NV04_PGRAPH_PATT_COLORRAM+0x20,
+ NV04_PGRAPH_PATT_COLORRAM+0x21,
+ NV04_PGRAPH_PATT_COLORRAM+0x22,
+ NV04_PGRAPH_PATT_COLORRAM+0x23,
+ NV04_PGRAPH_PATT_COLORRAM+0x24,
+ NV04_PGRAPH_PATT_COLORRAM+0x25,
+ NV04_PGRAPH_PATT_COLORRAM+0x26,
+ NV04_PGRAPH_PATT_COLORRAM+0x27,
+ NV04_PGRAPH_PATT_COLORRAM+0x28,
+ NV04_PGRAPH_PATT_COLORRAM+0x29,
+ NV04_PGRAPH_PATT_COLORRAM+0x2A,
+ NV04_PGRAPH_PATT_COLORRAM+0x2B,
+ NV04_PGRAPH_PATT_COLORRAM+0x2C,
+ NV04_PGRAPH_PATT_COLORRAM+0x2D,
+ NV04_PGRAPH_PATT_COLORRAM+0x2E,
+ NV04_PGRAPH_PATT_COLORRAM+0x2F,
+ NV04_PGRAPH_PATT_COLORRAM+0x30,
+ NV04_PGRAPH_PATT_COLORRAM+0x31,
+ NV04_PGRAPH_PATT_COLORRAM+0x32,
+ NV04_PGRAPH_PATT_COLORRAM+0x33,
+ NV04_PGRAPH_PATT_COLORRAM+0x34,
+ NV04_PGRAPH_PATT_COLORRAM+0x35,
+ NV04_PGRAPH_PATT_COLORRAM+0x36,
+ NV04_PGRAPH_PATT_COLORRAM+0x37,
+ NV04_PGRAPH_PATT_COLORRAM+0x38,
+ NV04_PGRAPH_PATT_COLORRAM+0x39,
+ NV04_PGRAPH_PATT_COLORRAM+0x3A,
+ NV04_PGRAPH_PATT_COLORRAM+0x3B,
+ NV04_PGRAPH_PATT_COLORRAM+0x3C,
+ NV04_PGRAPH_PATT_COLORRAM+0x3D,
+ NV04_PGRAPH_PATT_COLORRAM+0x3E,
+ NV04_PGRAPH_PATT_COLORRAM+0x3F,
+ NV04_PGRAPH_PATTERN,
+ 0x0040080c,
+ NV04_PGRAPH_PATTERN_SHAPE,
+ 0x00400600,
+ NV04_PGRAPH_ROP3,
+ NV04_PGRAPH_CHROMA,
+ NV04_PGRAPH_BETA_AND,
+ NV04_PGRAPH_BETA_PREMULT,
+ NV04_PGRAPH_CONTROL0,
+ NV04_PGRAPH_CONTROL1,
+ NV04_PGRAPH_CONTROL2,
+ NV04_PGRAPH_BLEND,
+ NV04_PGRAPH_STORED_FMT,
+ NV04_PGRAPH_SOURCE_COLOR,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400400,
+ 0x00400480,
+ 0x00400404,
+ 0x00400484,
+ 0x00400408,
+ 0x00400488,
+ 0x0040040c,
+ 0x0040048c,
+ 0x00400410,
+ 0x00400490,
+ 0x00400414,
+ 0x00400494,
+ 0x00400418,
+ 0x00400498,
+ 0x0040041c,
+ 0x0040049c,
+ 0x00400420,
+ 0x004004a0,
+ 0x00400424,
+ 0x004004a4,
+ 0x00400428,
+ 0x004004a8,
+ 0x0040042c,
+ 0x004004ac,
+ 0x00400430,
+ 0x004004b0,
+ 0x00400434,
+ 0x004004b4,
+ 0x00400438,
+ 0x004004b8,
+ 0x0040043c,
+ 0x004004bc,
+ 0x00400440,
+ 0x004004c0,
+ 0x00400444,
+ 0x004004c4,
+ 0x00400448,
+ 0x004004c8,
+ 0x0040044c,
+ 0x004004cc,
+ 0x00400450,
+ 0x004004d0,
+ 0x00400454,
+ 0x004004d4,
+ 0x00400458,
+ 0x004004d8,
+ 0x0040045c,
+ 0x004004dc,
+ 0x00400460,
+ 0x004004e0,
+ 0x00400464,
+ 0x004004e4,
+ 0x00400468,
+ 0x004004e8,
+ 0x0040046c,
+ 0x004004ec,
+ 0x00400470,
+ 0x004004f0,
+ 0x00400474,
+ 0x004004f4,
+ 0x00400478,
+ 0x004004f8,
+ 0x0040047c,
+ 0x004004fc,
+ 0x0040053c,
+ 0x00400544,
+ 0x00400540,
+ 0x00400548,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400534,
+ 0x00400538,
+ 0x00400514,
+ 0x00400518,
+ 0x0040051c,
+ 0x00400520,
+ 0x00400524,
+ 0x00400528,
+ 0x0040052c,
+ 0x00400530,
+ 0x00400d00,
+ 0x00400d40,
+ 0x00400d80,
+ 0x00400d04,
+ 0x00400d44,
+ 0x00400d84,
+ 0x00400d08,
+ 0x00400d48,
+ 0x00400d88,
+ 0x00400d0c,
+ 0x00400d4c,
+ 0x00400d8c,
+ 0x00400d10,
+ 0x00400d50,
+ 0x00400d90,
+ 0x00400d14,
+ 0x00400d54,
+ 0x00400d94,
+ 0x00400d18,
+ 0x00400d58,
+ 0x00400d98,
+ 0x00400d1c,
+ 0x00400d5c,
+ 0x00400d9c,
+ 0x00400d20,
+ 0x00400d60,
+ 0x00400da0,
+ 0x00400d24,
+ 0x00400d64,
+ 0x00400da4,
+ 0x00400d28,
+ 0x00400d68,
+ 0x00400da8,
+ 0x00400d2c,
+ 0x00400d6c,
+ 0x00400dac,
+ 0x00400d30,
+ 0x00400d70,
+ 0x00400db0,
+ 0x00400d34,
+ 0x00400d74,
+ 0x00400db4,
+ 0x00400d38,
+ 0x00400d78,
+ 0x00400db8,
+ 0x00400d3c,
+ 0x00400d7c,
+ 0x00400dbc,
+ 0x00400590,
+ 0x00400594,
+ 0x00400598,
+ 0x0040059c,
+ 0x004005a8,
+ 0x004005ac,
+ 0x004005b0,
+ 0x004005b4,
+ 0x004005c0,
+ 0x004005c4,
+ 0x004005c8,
+ 0x004005cc,
+ 0x004005d0,
+ 0x004005d4,
+ 0x004005d8,
+ 0x004005dc,
+ 0x004005e0,
+ NV04_PGRAPH_PASSTHRU_0,
+ NV04_PGRAPH_PASSTHRU_1,
+ NV04_PGRAPH_PASSTHRU_2,
+ NV04_PGRAPH_DVD_COLORFMT,
+ NV04_PGRAPH_SCALED_FORMAT,
+ NV04_PGRAPH_MISC24_0,
+ NV04_PGRAPH_MISC24_1,
+ NV04_PGRAPH_MISC24_2,
+ 0x00400500,
+ 0x00400504,
+ NV04_PGRAPH_VALID1,
+ NV04_PGRAPH_VALID2
+
+
+};
+
+struct graph_state {
+ int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+};
+
+struct nouveau_channel *
+nv04_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chid = dev_priv->engine.fifo.channels;
+
+ if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
+ chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
+
+ if (chid >= dev_priv->engine.fifo.channels)
+ return NULL;
+
+ return dev_priv->fifos[chid];
+}
+
+void
+nv04_graph_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_channel *chan = NULL;
+ int chid;
+
+ pgraph->fifo_access(dev, false);
+ nouveau_wait_for_idle(dev);
+
+ /* If previous context is valid, we need to save it */
+ pgraph->unload_context(dev);
+
+ /* Load context for next channel */
+ chid = dev_priv->engine.fifo.channel_id(dev);
+ chan = dev_priv->fifos[chid];
+ if (chan)
+ nv04_graph_load_context(chan);
+
+ pgraph->fifo_access(dev, true);
+}
+
+int nv04_graph_create_context(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx;
+ NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
+
+ chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
+ GFP_KERNEL);
+ if (pgraph_ctx == NULL)
+ return -ENOMEM;
+
+ /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
+ pgraph_ctx->nv04[0] = 0x0001ffff;
+ /* is it really needed ??? */
+#if 0
+ dev_priv->fifos[channel].pgraph_ctx[1] =
+ nv_rd32(dev, NV_PGRAPH_DEBUG_4);
+ dev_priv->fifos[channel].pgraph_ctx[2] =
+ nv_rd32(dev, 0x004006b0);
+#endif
+ return 0;
+}
+
+void nv04_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+
+ kfree(pgraph_ctx);
+ chan->pgraph_ctx = NULL;
+}
+
+int nv04_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ uint32_t tmp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+ nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
+
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
+ tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
+ nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
+ return 0;
+}
+
+int
+nv04_graph_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_channel *chan = NULL;
+ struct graph_state *ctx;
+ uint32_t tmp;
+ int i;
+
+ chan = pgraph->channel(dev);
+ if (!chan)
+ return 0;
+ ctx = chan->pgraph_ctx;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+ ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
+
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+int nv04_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ /* Enable PGRAPH interrupts */
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
+ nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+ /*1231C000 blob, 001 haiku*/
+ //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
+ /*0x72111100 blob , 01 haiku*/
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+ /*haiku same*/
+
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+ /*haiku and blob 10d4*/
+
+ nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= dev_priv->engine.fifo.channels << 24;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
+
+ /* These don't belong here, they're part of a per-channel context */
+ nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
+
+ return 0;
+}
+
+void nv04_graph_takedown(struct drm_device *dev)
+{
+}
+
+void
+nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+ if (enabled)
+ nv_wr32(dev, NV04_PGRAPH_FIFO,
+ nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
+ else
+ nv_wr32(dev, NV04_PGRAPH_FIFO,
+ nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
+}
+
+static int
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ chan->fence.last_sequence_irq = data;
+ nouveau_fence_handler(chan->dev, chan->id);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
+ int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+ uint32_t tmp;
+
+ tmp = nv_ri32(dev, instance);
+ tmp &= ~0x00038000;
+ tmp |= ((data & 7) << 15);
+
+ nv_wi32(dev, instance, tmp);
+ nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
+ nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+ return 0;
+}
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
+ { 0x0150, nv04_graph_mthd_set_ref },
+ {}
+};
+
+static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {},
+};
+
+struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
+ { 0x0039, false, nv04_graph_mthds_m2mf },
+ { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
+ { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
+ { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
+ { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
+ { 0x0030, false, NULL }, /* null */
+ { 0x0042, false, NULL }, /* surf2d */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x0052, false, NULL }, /* swzsurf */
+ { 0x0053, false, NULL }, /* surf3d */
+ { 0x0054, false, NULL }, /* tex_tri */
+ { 0x0055, false, NULL }, /* multitex_tri */
+ {}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
new file mode 100644
index 00000000000..a20c206625a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -0,0 +1,208 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+/* returns the size of fifo context */
+static int
+nouveau_fifo_ctx_size(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset >= 0x40)
+ return 128;
+ else
+ if (dev_priv->chipset >= 0x17)
+ return 64;
+
+ return 32;
+}
+
+static void
+nv04_instmem_determine_amount(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Figure out how much instance memory we need */
+ if (dev_priv->card_type >= NV_40) {
+ /* We'll want more instance memory than this on some NV4x cards.
+ * There's a 16MB aperture to play with that maps onto the end
+ * of vram. For now, only reserve a small piece until we know
+ * more about what each chipset requires.
+ */
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x40:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
+ break;
+ default:
+ dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
+ break;
+ }
+ } else {
+ /*XXX: what *are* the limits on <NV40 cards?
+ */
+ dev_priv->ramin_rsvd_vram = (512 * 1024);
+ }
+ NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
+
+ /* Clear all of it, except the BIOS image that's in the first 64KiB */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
+ nv_wi32(dev, i, 0x00000000);
+ dev_priv->engine.instmem.finish_access(dev);
+}
+
+static void
+nv04_instmem_configure_fixed_tables(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
+
+ /* FIFO hash table (RAMHT)
+ * use 4k hash table at RAMIN+0x10000
+ * TODO: extend the hash table
+ */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */
+ dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */
+ NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
+ dev_priv->ramht_size);
+
+ /* FIFO runout table (RAMRO) - 512k at 0x11200 */
+ dev_priv->ramro_offset = 0x11200;
+ dev_priv->ramro_size = 512;
+ NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
+ dev_priv->ramro_size);
+
+ /* FIFO context table (RAMFC)
+ * NV40 : Not sure exactly how to position RAMFC on some cards,
+ * 0x30002 seems to position it at RAMIN+0x20000 on these
+ * cards. RAMFC is 4kb (32 fifos, 128byte entries).
+ * Others: Position RAMFC at RAMIN+0x11400
+ */
+ dev_priv->ramfc_size = engine->fifo.channels *
+ nouveau_fifo_ctx_size(dev);
+ switch (dev_priv->card_type) {
+ case NV_40:
+ dev_priv->ramfc_offset = 0x20000;
+ break;
+ case NV_30:
+ case NV_20:
+ case NV_10:
+ case NV_04:
+ default:
+ dev_priv->ramfc_offset = 0x11400;
+ break;
+ }
+ NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
+ dev_priv->ramfc_size);
+}
+
+int nv04_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t offset;
+ int ret = 0;
+
+ nv04_instmem_determine_amount(dev);
+ nv04_instmem_configure_fixed_tables(dev);
+
+ /* Create a heap to manage RAMIN allocations, we don't allocate
+ * the space that was reserved for RAMHT/FC/RO.
+ */
+ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+
+ /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
+ * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
+ * ("new style" control) the upper 16-bits of 0x2220 points at this
+ * other mysterious table that's clobbering important things.
+ *
+ * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
+ * smashed to pieces on us, so reserve 0x30000-0x40000 too..
+ */
+ if (dev_priv->card_type >= NV_40) {
+ if (offset < 0x40000)
+ offset = 0x40000;
+ }
+
+ ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
+ offset, dev_priv->ramin_rsvd_vram - offset);
+ if (ret) {
+ dev_priv->ramin_heap = NULL;
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ }
+
+ return ret;
+}
+
+void
+nv04_instmem_takedown(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+{
+ if (gpuobj->im_backing)
+ return -EINVAL;
+
+ return 0;
+}
+
+void
+nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->engine.instmem.unbind(dev, gpuobj);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ if (!gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
+
+void
+nv04_instmem_prepare_access(struct drm_device *dev, bool write)
+{
+}
+
+void
+nv04_instmem_finish_access(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_suspend(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nv04_instmem_resume(struct drm_device *dev)
+{
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
new file mode 100644
index 00000000000..617ed1e0526
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -0,0 +1,20 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_mc_init(struct drm_device *dev)
+{
+ /* Power up everything, resetting each individual unit will
+ * be done later if needed.
+ */
+
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+ return 0;
+}
+
+void
+nv04_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
new file mode 100644
index 00000000000..1d09ddd5739
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -0,0 +1,51 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_timer_init(struct drm_device *dev)
+{
+ nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
+ nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
+
+ /* Just use the pre-existing values when possible for now; these regs
+ * are not written in nv (driver writer missed a /4 on the address), and
+ * writing 8 and 3 to the correct regs breaks the timings on the LVDS
+ * hardware sequencing microcode.
+ * A correct solution (involving calculations with the GPU PLL) can
+ * be done when kernel modesetting lands
+ */
+ if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
+ !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
+ nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
+ nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
+ }
+
+ return 0;
+}
+
+uint64_t
+nv04_timer_read(struct drm_device *dev)
+{
+ uint32_t low;
+ /* From kmmio dumps on nv28 this looks like how the blob does this.
+ * It reads the high dword twice, before and after.
+ * The only explanation seems to be that the 64-bit timer counter
+ * advances between high and low dword reads and may corrupt the
+ * result. Not confirmed.
+ */
+ uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
+ uint32_t high1;
+ do {
+ high1 = high2;
+ low = nv_rd32(dev, NV04_PTIMER_TIME_0);
+ high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
+ } while (high1 != high2);
+ return (((uint64_t)high2) << 32) | (uint64_t)low;
+}
+
+void
+nv04_timer_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
new file mode 100644
index 00000000000..9c63099e9c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "drm_crtc_helper.h"
+
+#include "i2c/ch7006.h"
+
+static struct {
+ struct i2c_board_info board_info;
+ struct drm_encoder_funcs funcs;
+ struct drm_encoder_helper_funcs hfuncs;
+ void *params;
+
+} nv04_tv_encoder_info[] = {
+ {
+ .board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
+ .params = &(struct ch7006_encoder_params) {
+ CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
+ 0, 0, 0,
+ CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
+ CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
+ },
+ },
+};
+
+static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
+{
+ struct i2c_msg msg = {
+ .addr = addr,
+ .len = 0,
+ };
+
+ return i2c_transfer(adapter, &msg, 1) == 1;
+}
+
+int nv04_tv_identify(struct drm_device *dev, int i2c_index)
+{
+ struct nouveau_i2c_chan *i2c;
+ bool was_locked;
+ int i, ret;
+
+ NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
+
+ i2c = nouveau_i2c_find(dev, i2c_index);
+ if (!i2c)
+ return -ENODEV;
+
+ was_locked = NVLockVgaCrtcs(dev, false);
+
+ for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
+ if (probe_i2c_addr(&i2c->adapter,
+ nv04_tv_encoder_info[i].board_info.addr)) {
+ ret = i;
+ break;
+ }
+ }
+
+ if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
+ NV_TRACE(dev, "Detected TV encoder: %s\n",
+ nv04_tv_encoder_info[i].board_info.type);
+
+ } else {
+ NV_TRACE(dev, "No TV encoders found.\n");
+ i = -ENODEV;
+ }
+
+ NVLockVgaCrtcs(dev, was_locked);
+ return i;
+}
+
+#define PLLSEL_TV_CRTC1_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
+#define PLLSEL_TV_CRTC2_MASK \
+ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
+ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
+
+static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ uint8_t crtc1A;
+
+ NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ int head = nouveau_crtc(encoder->crtc)->index;
+ crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
+
+ state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
+ PLLSEL_TV_CRTC1_MASK;
+
+ /* Inhibit hsync */
+ crtc1A |= 0x80;
+
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+ }
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
+
+ to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
+}
+
+static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
+
+ state->tv_setup = 0;
+
+ if (bind) {
+ state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
+ state->CRTC[NV_CIO_CRE_49] |= 0x10;
+ } else {
+ state->CRTC[NV_CIO_CRE_49] &= ~0x10;
+ }
+
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
+ state->CRTC[NV_CIO_CRE_LCD__INDEX]);
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
+ state->CRTC[NV_CIO_CRE_49]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
+ state->tv_setup);
+}
+
+static void nv04_tv_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ nv04_dfp_disable(dev, head);
+
+ if (nv_two_heads(dev))
+ nv04_tv_bind(dev, head ^ 1, false);
+
+ nv04_tv_bind(dev, head, true);
+}
+
+static void nv04_tv_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
+
+ regp->tv_htotal = adjusted_mode->htotal;
+ regp->tv_vtotal = adjusted_mode->vtotal;
+
+ /* These delay the TV signals with respect to the VGA port,
+ * they might be useful if we ever allow a CRTC to drive
+ * multiple outputs.
+ */
+ regp->tv_hskew = 1;
+ regp->tv_hsync_delay = 1;
+ regp->tv_hsync_delay2 = 64;
+ regp->tv_vskew = 1;
+ regp->tv_vsync_delay = 1;
+
+ to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
+}
+
+static void nv04_tv_commit(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
+ '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv04_tv_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
+
+ drm_encoder_cleanup(encoder);
+
+ kfree(nv_encoder);
+}
+
+int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct i2c_adapter *adap;
+ struct drm_encoder_funcs *funcs = NULL;
+ struct drm_encoder_helper_funcs *hfuncs = NULL;
+ struct drm_encoder_slave_funcs *sfuncs = NULL;
+ int i2c_index = entry->i2c_index;
+ int type, ret;
+ bool was_locked;
+
+ /* Ensure that we can talk to this encoder */
+ type = nv04_tv_identify(dev, i2c_index);
+ if (type < 0)
+ return type;
+
+ /* Allocate the necessary memory */
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+
+ /* Initialize the common members */
+ encoder = to_drm_encoder(nv_encoder);
+
+ funcs = &nv04_tv_encoder_info[type].funcs;
+ hfuncs = &nv04_tv_encoder_info[type].hfuncs;
+
+ drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_helper_add(encoder, hfuncs);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ /* Run the slave-specific initialization */
+ adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
+
+ was_locked = NVLockVgaCrtcs(dev, false);
+
+ ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
+ &nv04_tv_encoder_info[type].board_info);
+
+ NVLockVgaCrtcs(dev, was_locked);
+
+ if (ret < 0)
+ goto fail;
+
+ /* Fill the function pointers */
+ sfuncs = to_encoder_slave(encoder)->slave_funcs;
+
+ *funcs = (struct drm_encoder_funcs) {
+ .destroy = nv04_tv_destroy,
+ };
+
+ *hfuncs = (struct drm_encoder_helper_funcs) {
+ .dpms = nv04_tv_dpms,
+ .save = sfuncs->save,
+ .restore = sfuncs->restore,
+ .mode_fixup = sfuncs->mode_fixup,
+ .prepare = nv04_tv_prepare,
+ .commit = nv04_tv_commit,
+ .mode_set = nv04_tv_mode_set,
+ .detect = sfuncs->detect,
+ };
+
+ /* Set the slave encoder configuration */
+ sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
+
+ return 0;
+
+fail:
+ drm_encoder_cleanup(encoder);
+
+ kfree(nv_encoder);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
new file mode 100644
index 00000000000..79e2d104d70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -0,0 +1,24 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv10_fb_init(struct drm_device *dev)
+{
+ uint32_t fb_bar_size;
+ int i;
+
+ fb_bar_size = drm_get_resource_len(dev, 0) - 1;
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ nv_wr32(dev, NV10_PFB_TILE(i), 0);
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
+ }
+
+ return 0;
+}
+
+void
+nv10_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
new file mode 100644
index 00000000000..7aeabf262bc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
+#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
+
+int
+nv10_fifo_channel_id(struct drm_device *dev)
+{
+ return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv10_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_device *dev = chan->dev;
+ uint32_t fc = NV10_RAMFC(chan->id);
+ int ret;
+
+ ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
+ NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ /* Fill entries that are seen filled in dumps of nvidia driver just
+ * after channel's is put into DMA mode
+ */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wi32(dev, fc + 0, chan->pushbuf_base);
+ nv_wi32(dev, fc + 4, chan->pushbuf_base);
+ nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+ nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ /* enable the fifo dma operation */
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ return 0;
+}
+
+void
+nv10_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv10_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fc = NV10_RAMFC(chid), tmp;
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
+
+ tmp = nv_ri32(dev, fc + 12);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
+
+ if (dev_priv->chipset < 0x17)
+ goto out;
+
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
+ tmp = nv_ri32(dev, fc + 36);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
+
+out:
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv10_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t tmp;
+
+ nv10_fifo_do_load_context(dev, chan->id);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv10_fifo_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ uint32_t fc, tmp;
+ int chid;
+
+ chid = pfifo->channel_id(dev);
+ if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ return 0;
+ fc = NV10_RAMFC(chid);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+
+ nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+ nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+ nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
+ tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
+ nv_wi32(dev, fc + 12, tmp);
+ nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+ nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
+ nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+ nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+
+ if (dev_priv->chipset < 0x17)
+ goto out;
+
+ nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
+ tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
+ nv_wi32(dev, fc + 36, tmp);
+ nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
+ nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
+ nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+
+out:
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv10_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+ return 0;
+}
+
+static void
+nv10_fifo_init_reset(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, 0x003224, 0x000f0078);
+ nv_wr32(dev, 0x002044, 0x0101ffff);
+ nv_wr32(dev, 0x002040, 0x000000ff);
+ nv_wr32(dev, 0x002500, 0x00000000);
+ nv_wr32(dev, 0x003000, 0x00000000);
+ nv_wr32(dev, 0x003050, 0x00000000);
+
+ nv_wr32(dev, 0x003258, 0x00000000);
+ nv_wr32(dev, 0x003210, 0x00000000);
+ nv_wr32(dev, 0x003270, 0x00000000);
+}
+
+static void
+nv10_fifo_init_ramxx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht_bits - 9) << 16) |
+ (dev_priv->ramht_offset >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+
+ if (dev_priv->chipset < 0x17) {
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
+ } else {
+ nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
+ (1 << 16) /* 64 Bytes entry*/);
+ /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
+ }
+}
+
+static void
+nv10_fifo_init_intr(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv10_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ int i;
+
+ nv10_fifo_init_reset(dev);
+ nv10_fifo_init_ramxx(dev);
+
+ nv10_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+ nv10_fifo_init_intr(dev);
+ pfifo->enable(dev);
+ pfifo->reassign(dev, true);
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ if (dev_priv->fifos[i]) {
+ uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+ nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
new file mode 100644
index 00000000000..6870e0ee2e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -0,0 +1,1003 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+
+#define NV10_FIFO_NUMBER 32
+
+struct pipe_state {
+ uint32_t pipe_0x0000[0x040/4];
+ uint32_t pipe_0x0040[0x010/4];
+ uint32_t pipe_0x0200[0x0c0/4];
+ uint32_t pipe_0x4400[0x080/4];
+ uint32_t pipe_0x6400[0x3b0/4];
+ uint32_t pipe_0x6800[0x2f0/4];
+ uint32_t pipe_0x6c00[0x030/4];
+ uint32_t pipe_0x7000[0x130/4];
+ uint32_t pipe_0x7400[0x0c0/4];
+ uint32_t pipe_0x7800[0x0c0/4];
+};
+
+static int nv10_graph_ctx_regs[] = {
+ NV10_PGRAPH_CTX_SWITCH1,
+ NV10_PGRAPH_CTX_SWITCH2,
+ NV10_PGRAPH_CTX_SWITCH3,
+ NV10_PGRAPH_CTX_SWITCH4,
+ NV10_PGRAPH_CTX_SWITCH5,
+ NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */
+ NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */
+ NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */
+ NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */
+ NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */
+ 0x00400164,
+ 0x00400184,
+ 0x004001a4,
+ 0x004001c4,
+ 0x004001e4,
+ 0x00400168,
+ 0x00400188,
+ 0x004001a8,
+ 0x004001c8,
+ 0x004001e8,
+ 0x0040016c,
+ 0x0040018c,
+ 0x004001ac,
+ 0x004001cc,
+ 0x004001ec,
+ 0x00400170,
+ 0x00400190,
+ 0x004001b0,
+ 0x004001d0,
+ 0x004001f0,
+ 0x00400174,
+ 0x00400194,
+ 0x004001b4,
+ 0x004001d4,
+ 0x004001f4,
+ 0x00400178,
+ 0x00400198,
+ 0x004001b8,
+ 0x004001d8,
+ 0x004001f8,
+ 0x0040017c,
+ 0x0040019c,
+ 0x004001bc,
+ 0x004001dc,
+ 0x004001fc,
+ NV10_PGRAPH_CTX_USER,
+ NV04_PGRAPH_DMA_START_0,
+ NV04_PGRAPH_DMA_START_1,
+ NV04_PGRAPH_DMA_LENGTH,
+ NV04_PGRAPH_DMA_MISC,
+ NV10_PGRAPH_DMA_PITCH,
+ NV04_PGRAPH_BOFFSET0,
+ NV04_PGRAPH_BBASE0,
+ NV04_PGRAPH_BLIMIT0,
+ NV04_PGRAPH_BOFFSET1,
+ NV04_PGRAPH_BBASE1,
+ NV04_PGRAPH_BLIMIT1,
+ NV04_PGRAPH_BOFFSET2,
+ NV04_PGRAPH_BBASE2,
+ NV04_PGRAPH_BLIMIT2,
+ NV04_PGRAPH_BOFFSET3,
+ NV04_PGRAPH_BBASE3,
+ NV04_PGRAPH_BLIMIT3,
+ NV04_PGRAPH_BOFFSET4,
+ NV04_PGRAPH_BBASE4,
+ NV04_PGRAPH_BLIMIT4,
+ NV04_PGRAPH_BOFFSET5,
+ NV04_PGRAPH_BBASE5,
+ NV04_PGRAPH_BLIMIT5,
+ NV04_PGRAPH_BPITCH0,
+ NV04_PGRAPH_BPITCH1,
+ NV04_PGRAPH_BPITCH2,
+ NV04_PGRAPH_BPITCH3,
+ NV04_PGRAPH_BPITCH4,
+ NV10_PGRAPH_SURFACE,
+ NV10_PGRAPH_STATE,
+ NV04_PGRAPH_BSWIZZLE2,
+ NV04_PGRAPH_BSWIZZLE5,
+ NV04_PGRAPH_BPIXEL,
+ NV10_PGRAPH_NOTIFY,
+ NV04_PGRAPH_PATT_COLOR0,
+ NV04_PGRAPH_PATT_COLOR1,
+ NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
+ 0x00400904,
+ 0x00400908,
+ 0x0040090c,
+ 0x00400910,
+ 0x00400914,
+ 0x00400918,
+ 0x0040091c,
+ 0x00400920,
+ 0x00400924,
+ 0x00400928,
+ 0x0040092c,
+ 0x00400930,
+ 0x00400934,
+ 0x00400938,
+ 0x0040093c,
+ 0x00400940,
+ 0x00400944,
+ 0x00400948,
+ 0x0040094c,
+ 0x00400950,
+ 0x00400954,
+ 0x00400958,
+ 0x0040095c,
+ 0x00400960,
+ 0x00400964,
+ 0x00400968,
+ 0x0040096c,
+ 0x00400970,
+ 0x00400974,
+ 0x00400978,
+ 0x0040097c,
+ 0x00400980,
+ 0x00400984,
+ 0x00400988,
+ 0x0040098c,
+ 0x00400990,
+ 0x00400994,
+ 0x00400998,
+ 0x0040099c,
+ 0x004009a0,
+ 0x004009a4,
+ 0x004009a8,
+ 0x004009ac,
+ 0x004009b0,
+ 0x004009b4,
+ 0x004009b8,
+ 0x004009bc,
+ 0x004009c0,
+ 0x004009c4,
+ 0x004009c8,
+ 0x004009cc,
+ 0x004009d0,
+ 0x004009d4,
+ 0x004009d8,
+ 0x004009dc,
+ 0x004009e0,
+ 0x004009e4,
+ 0x004009e8,
+ 0x004009ec,
+ 0x004009f0,
+ 0x004009f4,
+ 0x004009f8,
+ 0x004009fc,
+ NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
+ 0x0040080c,
+ NV04_PGRAPH_PATTERN_SHAPE,
+ NV03_PGRAPH_MONO_COLOR0,
+ NV04_PGRAPH_ROP3,
+ NV04_PGRAPH_CHROMA,
+ NV04_PGRAPH_BETA_AND,
+ NV04_PGRAPH_BETA_PREMULT,
+ 0x00400e70,
+ 0x00400e74,
+ 0x00400e78,
+ 0x00400e7c,
+ 0x00400e80,
+ 0x00400e84,
+ 0x00400e88,
+ 0x00400e8c,
+ 0x00400ea0,
+ 0x00400ea4,
+ 0x00400ea8,
+ 0x00400e90,
+ 0x00400e94,
+ 0x00400e98,
+ 0x00400e9c,
+ NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
+ NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
+ 0x00400f04,
+ 0x00400f24,
+ 0x00400f08,
+ 0x00400f28,
+ 0x00400f0c,
+ 0x00400f2c,
+ 0x00400f10,
+ 0x00400f30,
+ 0x00400f14,
+ 0x00400f34,
+ 0x00400f18,
+ 0x00400f38,
+ 0x00400f1c,
+ 0x00400f3c,
+ NV10_PGRAPH_XFMODE0,
+ NV10_PGRAPH_XFMODE1,
+ NV10_PGRAPH_GLOBALSTATE0,
+ NV10_PGRAPH_GLOBALSTATE1,
+ NV04_PGRAPH_STORED_FMT,
+ NV04_PGRAPH_SOURCE_COLOR,
+ NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
+ NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
+ 0x00400404,
+ 0x00400484,
+ 0x00400408,
+ 0x00400488,
+ 0x0040040c,
+ 0x0040048c,
+ 0x00400410,
+ 0x00400490,
+ 0x00400414,
+ 0x00400494,
+ 0x00400418,
+ 0x00400498,
+ 0x0040041c,
+ 0x0040049c,
+ 0x00400420,
+ 0x004004a0,
+ 0x00400424,
+ 0x004004a4,
+ 0x00400428,
+ 0x004004a8,
+ 0x0040042c,
+ 0x004004ac,
+ 0x00400430,
+ 0x004004b0,
+ 0x00400434,
+ 0x004004b4,
+ 0x00400438,
+ 0x004004b8,
+ 0x0040043c,
+ 0x004004bc,
+ 0x00400440,
+ 0x004004c0,
+ 0x00400444,
+ 0x004004c4,
+ 0x00400448,
+ 0x004004c8,
+ 0x0040044c,
+ 0x004004cc,
+ 0x00400450,
+ 0x004004d0,
+ 0x00400454,
+ 0x004004d4,
+ 0x00400458,
+ 0x004004d8,
+ 0x0040045c,
+ 0x004004dc,
+ 0x00400460,
+ 0x004004e0,
+ 0x00400464,
+ 0x004004e4,
+ 0x00400468,
+ 0x004004e8,
+ 0x0040046c,
+ 0x004004ec,
+ 0x00400470,
+ 0x004004f0,
+ 0x00400474,
+ 0x004004f4,
+ 0x00400478,
+ 0x004004f8,
+ 0x0040047c,
+ 0x004004fc,
+ NV03_PGRAPH_ABS_UCLIP_XMIN,
+ NV03_PGRAPH_ABS_UCLIP_XMAX,
+ NV03_PGRAPH_ABS_UCLIP_YMIN,
+ NV03_PGRAPH_ABS_UCLIP_YMAX,
+ 0x00400550,
+ 0x00400558,
+ 0x00400554,
+ 0x0040055c,
+ NV03_PGRAPH_ABS_UCLIPA_XMIN,
+ NV03_PGRAPH_ABS_UCLIPA_XMAX,
+ NV03_PGRAPH_ABS_UCLIPA_YMIN,
+ NV03_PGRAPH_ABS_UCLIPA_YMAX,
+ NV03_PGRAPH_ABS_ICLIP_XMAX,
+ NV03_PGRAPH_ABS_ICLIP_YMAX,
+ NV03_PGRAPH_XY_LOGIC_MISC0,
+ NV03_PGRAPH_XY_LOGIC_MISC1,
+ NV03_PGRAPH_XY_LOGIC_MISC2,
+ NV03_PGRAPH_XY_LOGIC_MISC3,
+ NV03_PGRAPH_CLIPX_0,
+ NV03_PGRAPH_CLIPX_1,
+ NV03_PGRAPH_CLIPY_0,
+ NV03_PGRAPH_CLIPY_1,
+ NV10_PGRAPH_COMBINER0_IN_ALPHA,
+ NV10_PGRAPH_COMBINER1_IN_ALPHA,
+ NV10_PGRAPH_COMBINER0_IN_RGB,
+ NV10_PGRAPH_COMBINER1_IN_RGB,
+ NV10_PGRAPH_COMBINER_COLOR0,
+ NV10_PGRAPH_COMBINER_COLOR1,
+ NV10_PGRAPH_COMBINER0_OUT_ALPHA,
+ NV10_PGRAPH_COMBINER1_OUT_ALPHA,
+ NV10_PGRAPH_COMBINER0_OUT_RGB,
+ NV10_PGRAPH_COMBINER1_OUT_RGB,
+ NV10_PGRAPH_COMBINER_FINAL0,
+ NV10_PGRAPH_COMBINER_FINAL1,
+ 0x00400e00,
+ 0x00400e04,
+ 0x00400e08,
+ 0x00400e0c,
+ 0x00400e10,
+ 0x00400e14,
+ 0x00400e18,
+ 0x00400e1c,
+ 0x00400e20,
+ 0x00400e24,
+ 0x00400e28,
+ 0x00400e2c,
+ 0x00400e30,
+ 0x00400e34,
+ 0x00400e38,
+ 0x00400e3c,
+ NV04_PGRAPH_PASSTHRU_0,
+ NV04_PGRAPH_PASSTHRU_1,
+ NV04_PGRAPH_PASSTHRU_2,
+ NV10_PGRAPH_DIMX_TEXTURE,
+ NV10_PGRAPH_WDIMX_TEXTURE,
+ NV10_PGRAPH_DVD_COLORFMT,
+ NV10_PGRAPH_SCALED_FORMAT,
+ NV04_PGRAPH_MISC24_0,
+ NV04_PGRAPH_MISC24_1,
+ NV04_PGRAPH_MISC24_2,
+ NV03_PGRAPH_X_MISC,
+ NV03_PGRAPH_Y_MISC,
+ NV04_PGRAPH_VALID1,
+ NV04_PGRAPH_VALID2,
+};
+
+static int nv17_graph_ctx_regs[] = {
+ NV10_PGRAPH_DEBUG_4,
+ 0x004006b0,
+ 0x00400eac,
+ 0x00400eb0,
+ 0x00400eb4,
+ 0x00400eb8,
+ 0x00400ebc,
+ 0x00400ec0,
+ 0x00400ec4,
+ 0x00400ec8,
+ 0x00400ecc,
+ 0x00400ed0,
+ 0x00400ed4,
+ 0x00400ed8,
+ 0x00400edc,
+ 0x00400ee0,
+ 0x00400a00,
+ 0x00400a04,
+};
+
+struct graph_state {
+ int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
+ int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
+ struct pipe_state pipe_state;
+ uint32_t lma_window[4];
+};
+
+#define PIPE_SAVE(dev, state, addr) \
+ do { \
+ int __i; \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
+ state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
+ } while (0)
+
+#define PIPE_RESTORE(dev, state, addr) \
+ do { \
+ int __i; \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+ } while (0)
+
+static void nv10_graph_save_pipe(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *pipe = &pgraph_ctx->pipe_state;
+
+ PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
+ PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
+ PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
+ PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
+ PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
+ PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
+ PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
+ PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
+}
+
+static void nv10_graph_load_pipe(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *pipe = &pgraph_ctx->pipe_state;
+ uint32_t xfmode0, xfmode1;
+ int i;
+
+ nouveau_wait_for_idle(dev);
+ /* XXX check haiku comments */
+ xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+
+ PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
+ nouveau_wait_for_idle(dev);
+
+ /* restore XFMODE */
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
+ PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
+ PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
+ PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
+ PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
+ PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
+ PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
+ PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
+ nouveau_wait_for_idle(dev);
+}
+
+static void nv10_graph_create_pipe(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+ uint32_t *fifo_pipe_state_addr;
+ int i;
+#define PIPE_INIT(addr) \
+ do { \
+ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
+ } while (0)
+#define PIPE_INIT_END(addr) \
+ do { \
+ uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
+ ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
+ if (fifo_pipe_state_addr != __end_addr) \
+ NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
+ addr, fifo_pipe_state_addr, __end_addr); \
+ } while (0)
+#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
+
+ PIPE_INIT(0x0200);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0200);
+
+ PIPE_INIT(0x6400);
+ for (i = 0; i < 211; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ PIPE_INIT_END(0x6400);
+
+ PIPE_INIT(0x6800);
+ for (i = 0; i < 162; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ for (i = 0; i < 25; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6800);
+
+ PIPE_INIT(0x6c00);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0xbf800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6c00);
+
+ PIPE_INIT(0x7000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ for (i = 0; i < 35; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7000);
+
+ PIPE_INIT(0x7400);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7400);
+
+ PIPE_INIT(0x7800);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7800);
+
+ PIPE_INIT(0x4400);
+ for (i = 0; i < 32; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x4400);
+
+ PIPE_INIT(0x0000);
+ for (i = 0; i < 16; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0000);
+
+ PIPE_INIT(0x0040);
+ for (i = 0; i < 4; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0040);
+
+#undef PIPE_INIT
+#undef PIPE_INIT_END
+#undef NV_WRITE_PIPE_INIT
+}
+
+static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
+ if (nv10_graph_ctx_regs[i] == reg)
+ return i;
+ }
+ NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
+ return -1;
+}
+
+static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
+ if (nv17_graph_ctx_regs[i] == reg)
+ return i;
+ }
+ NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
+ return -1;
+}
+
+int nv10_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+ uint32_t tmp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+ nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
+ if (dev_priv->chipset >= 0x17) {
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+ nv_wr32(dev, nv17_graph_ctx_regs[i],
+ pgraph_ctx->nv17[i]);
+ }
+
+ nv10_graph_load_pipe(chan);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
+ tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
+ return 0;
+}
+
+int
+nv10_graph_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan;
+ struct graph_state *ctx;
+ uint32_t tmp;
+ int i;
+
+ chan = pgraph->channel(dev);
+ if (!chan)
+ return 0;
+ ctx = chan->pgraph_ctx;
+
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+ ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
+
+ if (dev_priv->chipset >= 0x17) {
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+ ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
+ }
+
+ nv10_graph_save_pipe(chan);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= (pfifo->channels - 1) << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+void
+nv10_graph_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_channel *chan = NULL;
+ int chid;
+
+ pgraph->fifo_access(dev, false);
+ nouveau_wait_for_idle(dev);
+
+ /* If previous context is valid, we need to save it */
+ nv10_graph_unload_context(dev);
+
+ /* Load context for next channel */
+ chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ chan = dev_priv->fifos[chid];
+ if (chan)
+ nv10_graph_load_context(chan);
+
+ pgraph->fifo_access(dev, true);
+}
+
+#define NV_WRITE_CTX(reg, val) do { \
+ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
+ if (offset > 0) \
+ pgraph_ctx->nv10[offset] = val; \
+ } while (0)
+
+#define NV17_WRITE_CTX(reg, val) do { \
+ int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
+ if (offset > 0) \
+ pgraph_ctx->nv17[offset] = val; \
+ } while (0)
+
+struct nouveau_channel *
+nv10_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chid = dev_priv->engine.fifo.channels;
+
+ if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
+ chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
+
+ if (chid >= dev_priv->engine.fifo.channels)
+ return NULL;
+
+ return dev_priv->fifos[chid];
+}
+
+int nv10_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state *pgraph_ctx;
+
+ NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
+
+ chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
+ GFP_KERNEL);
+ if (pgraph_ctx == NULL)
+ return -ENOMEM;
+
+
+ NV_WRITE_CTX(0x00400e88, 0x08000000);
+ NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
+ NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
+ NV_WRITE_CTX(0x00400e10, 0x00001000);
+ NV_WRITE_CTX(0x00400e14, 0x00001000);
+ NV_WRITE_CTX(0x00400e30, 0x00080008);
+ NV_WRITE_CTX(0x00400e34, 0x00080008);
+ if (dev_priv->chipset >= 0x17) {
+ /* is it really needed ??? */
+ NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
+ nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
+ NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
+ NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
+ NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
+ NV17_WRITE_CTX(0x00400ec0, 0x00000080);
+ NV17_WRITE_CTX(0x00400ed0, 0x00000080);
+ }
+ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
+
+ nv10_graph_create_pipe(chan);
+ return 0;
+}
+
+void nv10_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+
+ kfree(pgraph_ctx);
+ chan->pgraph_ctx = NULL;
+}
+
+int nv10_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+ int i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
+ (1<<29) |
+ (1<<31));
+ if (dev_priv->chipset >= 0x17) {
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ nv_wr32(dev, 0x400a10, 0x3ff3fb6);
+ nv_wr32(dev, 0x400838, 0x2f8684);
+ nv_wr32(dev, 0x40083c, 0x115f3f);
+ nv_wr32(dev, 0x004006b0, 0x40000020);
+ } else
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+
+ /* copy tile info from PFB */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ nv_wr32(dev, NV10_PGRAPH_TILE(i),
+ nv_rd32(dev, NV10_PFB_TILE(i)));
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
+ nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
+ nv_rd32(dev, NV10_PFB_TSIZE(i)));
+ nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
+ nv_rd32(dev, NV10_PFB_TSTATUS(i)));
+ }
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+
+ return 0;
+}
+
+void nv10_graph_takedown(struct drm_device *dev)
+{
+}
+
+static int
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ struct graph_state *ctx = chan->pgraph_ctx;
+ struct pipe_state *pipe = &ctx->pipe_state;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
+ uint32_t xfmode0, xfmode1;
+ int i;
+
+ ctx->lma_window[(mthd - 0x1638) / 4] = data;
+
+ if (mthd != 0x1644)
+ return 0;
+
+ nouveau_wait_for_idle(dev);
+
+ PIPE_SAVE(dev, pipe_0x0040, 0x0040);
+ PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
+
+ PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
+
+ nouveau_wait_for_idle(dev);
+
+ xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
+
+ PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
+ PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
+ PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+ PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
+
+ nouveau_wait_for_idle(dev);
+
+ PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
+
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
+
+ PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
+ PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
+ PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
+ PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nouveau_wait_for_idle(dev);
+
+ pgraph->fifo_access(dev, true);
+
+ return 0;
+}
+
+static int
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
+ nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
+ nv_wr32(dev, 0x004006b0,
+ nv_rd32(dev, 0x004006b0) | 0x8 << 24);
+
+ pgraph->fifo_access(dev, true);
+
+ return 0;
+}
+
+static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
+ { 0x1638, nv17_graph_mthd_lma_window },
+ { 0x163c, nv17_graph_mthd_lma_window },
+ { 0x1640, nv17_graph_mthd_lma_window },
+ { 0x1644, nv17_graph_mthd_lma_window },
+ { 0x1658, nv17_graph_mthd_lma_enable },
+ {}
+};
+
+struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
+ { 0x0030, false, NULL }, /* null */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004a, false, NULL }, /* gdirect */
+ { 0x005f, false, NULL }, /* imageblit */
+ { 0x009f, false, NULL }, /* imageblit (nv12) */
+ { 0x008a, false, NULL }, /* ifc */
+ { 0x0089, false, NULL }, /* sifm */
+ { 0x0062, false, NULL }, /* surf2d */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x0052, false, NULL }, /* swzsurf */
+ { 0x0093, false, NULL }, /* surf3d */
+ { 0x0094, false, NULL }, /* tex_tri */
+ { 0x0095, false, NULL }, /* multitex_tri */
+ { 0x0056, false, NULL }, /* celcius (nv10) */
+ { 0x0096, false, NULL }, /* celcius (nv11) */
+ { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv17_gpio.c
new file mode 100644
index 00000000000..2e58c331e9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_gpio.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+
+static bool
+get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
+ uint32_t *mask)
+{
+ if (ent->line < 2) {
+ *reg = NV_PCRTC_GPIO;
+ *shift = ent->line * 16;
+ *mask = 0x11;
+
+ } else if (ent->line < 10) {
+ *reg = NV_PCRTC_GPIO_EXT;
+ *shift = (ent->line - 2) * 4;
+ *mask = 0x3;
+
+ } else if (ent->line < 14) {
+ *reg = NV_PCRTC_850;
+ *shift = (ent->line - 10) * 4;
+ *mask = 0x3;
+
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+int
+nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+{
+ struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+ uint32_t reg, shift, mask, value;
+
+ if (!ent)
+ return -ENODEV;
+
+ if (!get_gpio_location(ent, &reg, &shift, &mask))
+ return -ENODEV;
+
+ value = NVReadCRTC(dev, 0, reg) >> shift;
+
+ return (ent->invert ? 1 : 0) ^ (value & 1);
+}
+
+int
+nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+{
+ struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+ uint32_t reg, shift, mask, value;
+
+ if (!ent)
+ return -ENODEV;
+
+ if (!get_gpio_location(ent, &reg, &shift, &mask))
+ return -ENODEV;
+
+ value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
+ mask = ~(mask << shift);
+
+ NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
new file mode 100644
index 00000000000..81c01353a9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -0,0 +1,681 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nv17_tv.h"
+
+enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ uint32_t pin_mask)
+{
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+ tv_enc->pin_mask = pin_mask >> 28 & 0xe;
+
+ switch (tv_enc->pin_mask) {
+ case 0x2:
+ case 0x4:
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
+ break;
+ case 0xc:
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
+ break;
+ case 0xe:
+ if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
+ else
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
+ break;
+ default:
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+ break;
+ }
+
+ drm_connector_property_set_value(connector,
+ encoder->dev->mode_config.tv_subconnector_property,
+ tv_enc->subconnector);
+
+ return tv_enc->subconnector ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static const struct {
+ int hdisplay;
+ int vdisplay;
+} modes[] = {
+ { 640, 400 },
+ { 640, 480 },
+ { 720, 480 },
+ { 720, 576 },
+ { 800, 600 },
+ { 1024, 768 },
+ { 1280, 720 },
+ { 1280, 1024 },
+ { 1920, 1080 }
+};
+
+static int nv17_tv_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ struct drm_display_mode *mode;
+ struct drm_display_mode *output_mode;
+ int n = 0;
+ int i;
+
+ if (tv_norm->kind != CTV_ENC_MODE) {
+ struct drm_display_mode *tv_mode;
+
+ for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+ mode = drm_mode_duplicate(encoder->dev, tv_mode);
+
+ mode->clock = tv_norm->tv_enc_mode.vrefresh *
+ mode->htotal / 1000 *
+ mode->vtotal / 1000;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ mode->clock *= 2;
+
+ if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
+ mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ n++;
+ }
+ return n;
+ }
+
+ /* tv_norm->kind == CTV_ENC_MODE */
+ output_mode = &tv_norm->ctv_enc_mode.mode;
+ for (i = 0; i < ARRAY_SIZE(modes); i++) {
+ if (modes[i].hdisplay > output_mode->hdisplay ||
+ modes[i].vdisplay > output_mode->vdisplay)
+ continue;
+
+ if (modes[i].hdisplay == output_mode->hdisplay &&
+ modes[i].vdisplay == output_mode->vdisplay) {
+ mode = drm_mode_duplicate(encoder->dev, output_mode);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ } else {
+ mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
+ modes[i].vdisplay, 60, false,
+ output_mode->flags & DRM_MODE_FLAG_INTERLACE,
+ false);
+ }
+
+ /* CVT modes are sometimes unsuitable... */
+ if (output_mode->hdisplay <= 720
+ || output_mode->hdisplay >= 1920) {
+ mode->htotal = output_mode->htotal;
+ mode->hsync_start = (mode->hdisplay + (mode->htotal
+ - mode->hdisplay) * 9 / 10) & ~7;
+ mode->hsync_end = mode->hsync_start + 8;
+ }
+ if (output_mode->vdisplay >= 1024) {
+ mode->vtotal = output_mode->vtotal;
+ mode->vsync_start = output_mode->vsync_start;
+ mode->vsync_end = output_mode->vsync_end;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+ drm_mode_probed_add(connector, mode);
+ n++;
+ }
+ return n;
+}
+
+static int nv17_tv_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+ if (tv_norm->kind == CTV_ENC_MODE) {
+ struct drm_display_mode *output_mode =
+ &tv_norm->ctv_enc_mode.mode;
+
+ if (mode->clock > 400000)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > output_mode->hdisplay ||
+ mode->vdisplay > output_mode->vdisplay)
+ return MODE_BAD;
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
+ (output_mode->flags & DRM_MODE_FLAG_INTERLACE))
+ return MODE_NO_INTERLACE;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ } else {
+ const int vsync_tolerance = 600;
+
+ if (mode->clock > 70000)
+ return MODE_CLOCK_HIGH;
+
+ if (abs(drm_mode_vrefresh(mode) * 1000 -
+ tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
+ return MODE_VSYNC;
+
+ /* The encoder takes care of the actual interlacing */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+ }
+
+ return MODE_OK;
+}
+
+static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+ if (tv_norm->kind == CTV_ENC_MODE)
+ adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
+ else
+ adjusted_mode->clock = 90000;
+
+ return true;
+}
+
+static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+
+ if (nouveau_encoder(encoder)->last_dpms == mode)
+ return;
+ nouveau_encoder(encoder)->last_dpms = mode;
+
+ NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nouveau_encoder(encoder)->dcb->index);
+
+ regs->ptv_200 &= ~1;
+
+ if (tv_norm->kind == CTV_ENC_MODE) {
+ nv04_dfp_update_fp_control(encoder, mode);
+
+ } else {
+ nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ regs->ptv_200 |= 1;
+ }
+
+ nv_load_ptv(dev, regs, 200);
+
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+
+ nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+}
+
+static void nv17_tv_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ int head = nouveau_crtc(encoder->crtc)->index;
+ uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
+ NV_CIO_CRE_LCD__INDEX];
+ uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
+ nv04_dac_output_offset(encoder);
+ uint32_t dacclk;
+
+ helper->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ nv04_dfp_disable(dev, head);
+
+ /* Unbind any FP encoders from this head if we need the FP
+ * stuff enabled. */
+ if (tv_norm->kind == CTV_ENC_MODE) {
+ struct drm_encoder *enc;
+
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
+
+ if ((dcb->type == OUTPUT_TMDS ||
+ dcb->type == OUTPUT_LVDS) &&
+ !enc->crtc &&
+ nv04_dfp_get_bound_head(dev, dcb) == head) {
+ nv04_dfp_bind_head(dev, dcb, head ^ 1,
+ dev_priv->VBIOS.fp.dual_link);
+ }
+ }
+
+ }
+
+ /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
+ * at LCD__INDEX which we don't alter
+ */
+ if (!(*cr_lcd & 0x44)) {
+ if (tv_norm->kind == CTV_ENC_MODE)
+ *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
+ else
+ *cr_lcd = 0;
+ }
+
+ /* Set the DACCLK register */
+ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
+
+ if (dev_priv->card_type == NV_40)
+ dacclk |= 0x1a << 16;
+
+ if (tv_norm->kind == CTV_ENC_MODE) {
+ dacclk |= 0x20;
+
+ if (head)
+ dacclk |= 0x100;
+ else
+ dacclk &= ~0x100;
+
+ } else {
+ dacclk |= 0x10;
+
+ }
+
+ NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
+}
+
+static void nv17_tv_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *drm_mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
+ struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ int i;
+
+ regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
+ regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
+ regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
+ regs->tv_setup = 1;
+ regs->ramdac_8c0 = 0x0;
+
+ if (tv_norm->kind == TV_ENC_MODE) {
+ tv_regs->ptv_200 = 0x13111100;
+ if (head)
+ tv_regs->ptv_200 |= 0x10;
+
+ tv_regs->ptv_20c = 0x808010;
+ tv_regs->ptv_304 = 0x2d00000;
+ tv_regs->ptv_600 = 0x0;
+ tv_regs->ptv_60c = 0x0;
+ tv_regs->ptv_610 = 0x1e00000;
+
+ if (tv_norm->tv_enc_mode.vdisplay == 576) {
+ tv_regs->ptv_508 = 0x1200000;
+ tv_regs->ptv_614 = 0x33;
+
+ } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
+ tv_regs->ptv_508 = 0xf00000;
+ tv_regs->ptv_614 = 0x13;
+ }
+
+ if (dev_priv->card_type >= NV_30) {
+ tv_regs->ptv_500 = 0xe8e0;
+ tv_regs->ptv_504 = 0x1710;
+ tv_regs->ptv_604 = 0x0;
+ tv_regs->ptv_608 = 0x0;
+ } else {
+ if (tv_norm->tv_enc_mode.vdisplay == 576) {
+ tv_regs->ptv_604 = 0x20;
+ tv_regs->ptv_608 = 0x10;
+ tv_regs->ptv_500 = 0x19710;
+ tv_regs->ptv_504 = 0x68f0;
+
+ } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
+ tv_regs->ptv_604 = 0x10;
+ tv_regs->ptv_608 = 0x20;
+ tv_regs->ptv_500 = 0x4b90;
+ tv_regs->ptv_504 = 0x1b480;
+ }
+ }
+
+ for (i = 0; i < 0x40; i++)
+ tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
+
+ } else {
+ struct drm_display_mode *output_mode =
+ &tv_norm->ctv_enc_mode.mode;
+
+ /* The registers in PRAMDAC+0xc00 control some timings and CSC
+ * parameters for the CTV encoder (It's only used for "HD" TV
+ * modes, I don't think I have enough working to guess what
+ * they exactly mean...), it's probably connected at the
+ * output of the FP encoder, but it also needs the analog
+ * encoder in its OR enabled and routed to the head it's
+ * using. It's enabled with the DACCLK register, bits [5:4].
+ */
+ for (i = 0; i < 38; i++)
+ regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
+
+ regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
+ regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
+ regs->fp_horiz_regs[FP_SYNC_START] =
+ output_mode->hsync_start - 1;
+ regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
+ regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
+ max((output_mode->hdisplay-600)/40 - 1, 1);
+
+ regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
+ regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
+ regs->fp_vert_regs[FP_SYNC_START] =
+ output_mode->vsync_start - 1;
+ regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
+ regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
+
+ regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+ NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+ NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
+
+ if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
+ if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
+
+ regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
+ NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
+ NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
+ NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
+ NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
+ NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
+ NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
+
+ regs->fp_debug_2 = 0;
+
+ regs->fp_margin_color = 0x801080;
+
+ }
+}
+
+static void nv17_tv_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_encoder_helper_funcs *helper = encoder->helper_private;
+
+ if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
+ nv17_tv_update_rescaler(encoder);
+ nv17_tv_update_properties(encoder);
+ } else {
+ nv17_ctv_update_rescaler(encoder);
+ }
+
+ nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
+
+ /* This could use refinement for flatpanels, but it should work */
+ if (dev_priv->chipset < 0x44)
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
+ nv04_dac_output_offset(encoder),
+ 0xf0000000);
+ else
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
+ nv04_dac_output_offset(encoder),
+ 0x00100000);
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+ NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(
+ &nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+}
+
+static void nv17_tv_save(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+ nouveau_encoder(encoder)->restore.output =
+ NVReadRAMDAC(dev, 0,
+ NV_PRAMDAC_DACCLK +
+ nv04_dac_output_offset(encoder));
+
+ nv17_tv_state_save(dev, &tv_enc->saved_state);
+
+ tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
+}
+
+static void nv17_tv_restore(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
+ nv04_dac_output_offset(encoder),
+ nouveau_encoder(encoder)->restore.output);
+
+ nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+}
+
+static int nv17_tv_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_mode_config *conf = &dev->mode_config;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
+ NUM_LD_TV_NORMS;
+ int i;
+
+ if (nouveau_tv_norm) {
+ for (i = 0; i < num_tv_norms; i++) {
+ if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
+ tv_enc->tv_norm = i;
+ break;
+ }
+ }
+
+ if (i == num_tv_norms)
+ NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
+ nouveau_tv_norm);
+ }
+
+ drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
+
+ drm_connector_attach_property(connector,
+ conf->tv_select_subconnector_property,
+ tv_enc->select_subconnector);
+ drm_connector_attach_property(connector,
+ conf->tv_subconnector_property,
+ tv_enc->subconnector);
+ drm_connector_attach_property(connector,
+ conf->tv_mode_property,
+ tv_enc->tv_norm);
+ drm_connector_attach_property(connector,
+ conf->tv_flicker_reduction_property,
+ tv_enc->flicker);
+ drm_connector_attach_property(connector,
+ conf->tv_saturation_property,
+ tv_enc->saturation);
+ drm_connector_attach_property(connector,
+ conf->tv_hue_property,
+ tv_enc->hue);
+ drm_connector_attach_property(connector,
+ conf->tv_overscan_property,
+ tv_enc->overscan);
+
+ return 0;
+}
+
+static int nv17_tv_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_mode_config *conf = &encoder->dev->mode_config;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ bool modes_changed = false;
+
+ if (property == conf->tv_overscan_property) {
+ tv_enc->overscan = val;
+ if (encoder->crtc) {
+ if (tv_norm->kind == CTV_ENC_MODE)
+ nv17_ctv_update_rescaler(encoder);
+ else
+ nv17_tv_update_rescaler(encoder);
+ }
+
+ } else if (property == conf->tv_saturation_property) {
+ if (tv_norm->kind != TV_ENC_MODE)
+ return -EINVAL;
+
+ tv_enc->saturation = val;
+ nv17_tv_update_properties(encoder);
+
+ } else if (property == conf->tv_hue_property) {
+ if (tv_norm->kind != TV_ENC_MODE)
+ return -EINVAL;
+
+ tv_enc->hue = val;
+ nv17_tv_update_properties(encoder);
+
+ } else if (property == conf->tv_flicker_reduction_property) {
+ if (tv_norm->kind != TV_ENC_MODE)
+ return -EINVAL;
+
+ tv_enc->flicker = val;
+ if (encoder->crtc)
+ nv17_tv_update_rescaler(encoder);
+
+ } else if (property == conf->tv_mode_property) {
+ if (connector->dpms != DRM_MODE_DPMS_OFF)
+ return -EINVAL;
+
+ tv_enc->tv_norm = val;
+
+ modes_changed = true;
+
+ } else if (property == conf->tv_select_subconnector_property) {
+ if (tv_norm->kind != TV_ENC_MODE)
+ return -EINVAL;
+
+ tv_enc->select_subconnector = val;
+ nv17_tv_update_properties(encoder);
+
+ } else {
+ return -EINVAL;
+ }
+
+ if (modes_changed) {
+ drm_helper_probe_single_connector_modes(connector, 0, 0);
+
+ /* Disable the crtc to ensure a full modeset is
+ * performed whenever it's turned on again. */
+ if (crtc) {
+ struct drm_mode_set modeset = {
+ .crtc = crtc,
+ };
+
+ crtc->funcs->set_config(&modeset);
+ }
+ }
+
+ return 0;
+}
+
+static void nv17_tv_destroy(struct drm_encoder *encoder)
+{
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+
+ NV_DEBUG_KMS(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+ kfree(tv_enc);
+}
+
+static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
+ .dpms = nv17_tv_dpms,
+ .save = nv17_tv_save,
+ .restore = nv17_tv_restore,
+ .mode_fixup = nv17_tv_mode_fixup,
+ .prepare = nv17_tv_prepare,
+ .commit = nv17_tv_commit,
+ .mode_set = nv17_tv_mode_set,
+ .detect = nv17_dac_detect,
+};
+
+static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
+ .get_modes = nv17_tv_get_modes,
+ .mode_valid = nv17_tv_mode_valid,
+ .create_resources = nv17_tv_create_resources,
+ .set_property = nv17_tv_set_property,
+};
+
+static struct drm_encoder_funcs nv17_tv_funcs = {
+ .destroy = nv17_tv_destroy,
+};
+
+int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ struct drm_encoder *encoder;
+ struct nv17_tv_encoder *tv_enc = NULL;
+
+ tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
+ if (!tv_enc)
+ return -ENOMEM;
+
+ tv_enc->overscan = 50;
+ tv_enc->flicker = 50;
+ tv_enc->saturation = 50;
+ tv_enc->hue = 0;
+ tv_enc->tv_norm = TV_NORM_PAL;
+ tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+ tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
+ tv_enc->pin_mask = 0;
+
+ encoder = to_drm_encoder(&tv_enc->base);
+
+ tv_enc->base.dcb = entry;
+ tv_enc->base.or = ffs(entry->or) - 1;
+
+ drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
+ to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
new file mode 100644
index 00000000000..c00977cedab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NV17_TV_H__
+#define __NV17_TV_H__
+
+struct nv17_tv_state {
+ uint8_t tv_enc[0x40];
+
+ uint32_t hfilter[4][7];
+ uint32_t hfilter2[4][7];
+ uint32_t vfilter[4][7];
+
+ uint32_t ptv_200;
+ uint32_t ptv_204;
+ uint32_t ptv_208;
+ uint32_t ptv_20c;
+ uint32_t ptv_304;
+ uint32_t ptv_500;
+ uint32_t ptv_504;
+ uint32_t ptv_508;
+ uint32_t ptv_600;
+ uint32_t ptv_604;
+ uint32_t ptv_608;
+ uint32_t ptv_60c;
+ uint32_t ptv_610;
+ uint32_t ptv_614;
+};
+
+enum nv17_tv_norm{
+ TV_NORM_PAL,
+ TV_NORM_PAL_M,
+ TV_NORM_PAL_N,
+ TV_NORM_PAL_NC,
+ TV_NORM_NTSC_M,
+ TV_NORM_NTSC_J,
+ NUM_LD_TV_NORMS,
+ TV_NORM_HD480I = NUM_LD_TV_NORMS,
+ TV_NORM_HD480P,
+ TV_NORM_HD576I,
+ TV_NORM_HD576P,
+ TV_NORM_HD720P,
+ TV_NORM_HD1080I,
+ NUM_TV_NORMS
+};
+
+struct nv17_tv_encoder {
+ struct nouveau_encoder base;
+
+ struct nv17_tv_state state;
+ struct nv17_tv_state saved_state;
+
+ int overscan;
+ int flicker;
+ int saturation;
+ int hue;
+ enum nv17_tv_norm tv_norm;
+ int subconnector;
+ int select_subconnector;
+ uint32_t pin_mask;
+};
+#define to_tv_enc(x) container_of(nouveau_encoder(x), \
+ struct nv17_tv_encoder, base)
+
+extern char *nv17_tv_norm_names[NUM_TV_NORMS];
+
+extern struct nv17_tv_norm_params {
+ enum {
+ TV_ENC_MODE,
+ CTV_ENC_MODE,
+ } kind;
+
+ union {
+ struct {
+ int hdisplay;
+ int vdisplay;
+ int vrefresh; /* mHz */
+
+ uint8_t tv_enc[0x40];
+ } tv_enc_mode;
+
+ struct {
+ struct drm_display_mode mode;
+
+ uint32_t ctv_regs[38];
+ } ctv_enc_mode;
+ };
+
+} nv17_tv_norms[NUM_TV_NORMS];
+#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
+
+extern struct drm_display_mode nv17_tv_modes[];
+
+static inline int interpolate(int y0, int y1, int y2, int x)
+{
+ return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
+}
+
+void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state);
+void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state);
+void nv17_tv_update_properties(struct drm_encoder *encoder);
+void nv17_tv_update_rescaler(struct drm_encoder *encoder);
+void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
+
+/* TV hardware access functions */
+
+static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
+{
+ nv_wr32(dev, reg, val);
+}
+
+static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
+{
+ return nv_rd32(dev, reg);
+}
+
+static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
+{
+ nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
+ nv_write_ptv(dev, NV_PTV_TV_DATA, val);
+}
+
+static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
+{
+ nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
+ return nv_read_ptv(dev, NV_PTV_TV_DATA);
+}
+
+#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
+#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
+#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
new file mode 100644
index 00000000000..d64683d97e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -0,0 +1,583 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "nouveau_drv.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_hw.h"
+#include "nv17_tv.h"
+
+char *nv17_tv_norm_names[NUM_TV_NORMS] = {
+ [TV_NORM_PAL] = "PAL",
+ [TV_NORM_PAL_M] = "PAL-M",
+ [TV_NORM_PAL_N] = "PAL-N",
+ [TV_NORM_PAL_NC] = "PAL-Nc",
+ [TV_NORM_NTSC_M] = "NTSC-M",
+ [TV_NORM_NTSC_J] = "NTSC-J",
+ [TV_NORM_HD480I] = "hd480i",
+ [TV_NORM_HD480P] = "hd480p",
+ [TV_NORM_HD576I] = "hd576i",
+ [TV_NORM_HD576P] = "hd576p",
+ [TV_NORM_HD720P] = "hd720p",
+ [TV_NORM_HD1080I] = "hd1080i"
+};
+
+/* TV standard specific parameters */
+
+struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
+ [TV_NORM_PAL] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 576, 50000, {
+ 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+ 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+ 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+ 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_PAL_M] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 480, 59940, {
+ 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
+ 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_PAL_N] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 576, 50000, {
+ 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+ 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_PAL_NC] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 576, 50000, {
+ 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+ 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+ 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+ 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_NTSC_M] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 480, 59940, {
+ 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
+ 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_NTSC_J] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 480, 59940, {
+ 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
+ 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_HD480I] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 480, 59940, {
+ 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
+ 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
+ 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
+ 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_HD576I] = { TV_ENC_MODE, {
+ .tv_enc_mode = { 720, 576, 50000, {
+ 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
+ 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
+ 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
+ 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
+ 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
+ 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
+ 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
+ 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
+ } } } },
+
+
+ [TV_NORM_HD480P] = { CTV_ENC_MODE, {
+ .ctv_enc_mode = {
+ .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
+ 720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
+ 0x354003a, 0x40000, 0x6f0344, 0x18100000,
+ 0x10160004, 0x10060005, 0x1006000c, 0x10060020,
+ 0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
+ 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
+ 0x10000fff, 0x10000fff, 0x10000fff, 0x70,
+ 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
+ 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
+ 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
+ } } } },
+
+ [TV_NORM_HD576P] = { CTV_ENC_MODE, {
+ .ctv_enc_mode = {
+ .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
+ 720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
+ 0x354003a, 0x40000, 0x6f0344, 0x18100000,
+ 0x10060001, 0x10060009, 0x10060026, 0x10060027,
+ 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
+ 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
+ 0x10000fff, 0x10000fff, 0x10000fff, 0x69,
+ 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
+ 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
+ 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
+ } } } },
+
+ [TV_NORM_HD720P] = { CTV_ENC_MODE, {
+ .ctv_enc_mode = {
+ .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
+ 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
+ 0x66b0021, 0x6004a, 0x1210626, 0x8170000,
+ 0x70004, 0x70016, 0x70017, 0x40f0018,
+ 0x702e8, 0x81702ed, 0xfff, 0xfff,
+ 0xfff, 0xfff, 0xfff, 0xfff,
+ 0xfff, 0xfff, 0xfff, 0x0,
+ 0x2e40001, 0x58, 0x2e001e, 0x258012c,
+ 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
+ 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
+ } } } },
+
+ [TV_NORM_HD1080I] = { CTV_ENC_MODE, {
+ .ctv_enc_mode = {
+ .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
+ 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
+ | DRM_MODE_FLAG_INTERLACE) },
+ .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
+ 0x8940028, 0x60054, 0xe80870, 0xbf70000,
+ 0xbc70004, 0x70005, 0x70012, 0x70013,
+ 0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
+ 0x1c70237, 0x70238, 0x70244, 0x70245,
+ 0x40f0246, 0x70462, 0x1f70464, 0x0,
+ 0x2e40001, 0x58, 0x2e001e, 0x258012c,
+ 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
+ 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
+ } } } }
+};
+
+/*
+ * The following is some guesswork on how the TV encoder flicker
+ * filter/rescaler works:
+ *
+ * It seems to use some sort of resampling filter, it is controlled
+ * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
+ * control the horizontal and vertical stage respectively, there is
+ * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
+ * but they seem to do nothing. A rough guess might be that they could
+ * be used to independently control the filtering of each interlaced
+ * field, but I don't know how they are enabled. The whole filtering
+ * process seems to be disabled with bits 26:27 of PTV_200, but we
+ * aren't doing that.
+ *
+ * The layout of both register sets is the same:
+ *
+ * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
+ * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
+ *
+ * Each coefficient is stored in bits [31],[15:9] in two's complement
+ * format. They seem to be some kind of weights used in a low-pass
+ * filter. Both A and B coefficients are applied to the 14 nearest
+ * samples on each side (Listed from nearest to furthermost. They
+ * roughly cover 2 framebuffer pixels on each side). They are
+ * probably multiplied with some more hardwired weights before being
+ * used: B-coefficients are applied the same on both sides,
+ * A-coefficients are inverted before being applied to the opposite
+ * side.
+ *
+ * After all the hassle, I got the following formula by empirical
+ * means...
+ */
+
+#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
+
+#define id1 (1LL << 8)
+#define id2 (1LL << 16)
+#define id3 (1LL << 24)
+#define id4 (1LL << 32)
+#define id5 (1LL << 48)
+
+static struct filter_params{
+ int64_t k1;
+ int64_t ki;
+ int64_t ki2;
+ int64_t ki3;
+ int64_t kr;
+ int64_t kir;
+ int64_t ki2r;
+ int64_t ki3r;
+ int64_t kf;
+ int64_t kif;
+ int64_t ki2f;
+ int64_t ki3f;
+ int64_t krf;
+ int64_t kirf;
+ int64_t ki2rf;
+ int64_t ki3rf;
+} fparams[2][4] = {
+ /* Horizontal filter parameters */
+ {
+ {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
+ 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
+ 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
+ -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
+ {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
+ 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
+ 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
+ -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
+ {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
+ 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
+ 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
+ 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
+ {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
+ -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
+ -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
+ 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
+ },
+
+ /* Vertical filter parameters */
+ {
+ {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
+ -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
+ -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
+ 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
+ {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
+ 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
+ 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
+ -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
+ {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
+ 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
+ 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
+ -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
+ {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
+ 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
+ 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
+ -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
+ }
+};
+
+static void tv_setup_filter(struct drm_encoder *encoder)
+{
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ struct drm_display_mode *mode = &encoder->crtc->mode;
+ uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
+ &tv_enc->state.vfilter};
+ int i, j, k;
+ int32_t overscan = calc_overscan(tv_enc->overscan);
+ int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
+ uint64_t rs[] = {mode->hdisplay * id3,
+ mode->vdisplay * id3};
+
+ do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
+ do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
+
+ for (k = 0; k < 2; k++) {
+ rs[k] = max((int64_t)rs[k], id2);
+
+ for (j = 0; j < 4; j++) {
+ struct filter_params *p = &fparams[k][j];
+
+ for (i = 0; i < 7; i++) {
+ int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
+ + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
+ + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
+ + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
+
+ (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
+ }
+ }
+ }
+}
+
+/* Hardware state saving/restoring */
+
+static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+{
+ int i, j;
+ uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 7; j++)
+ regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
+ }
+}
+
+static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
+{
+ int i, j;
+ uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 7; j++)
+ nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
+ }
+}
+
+void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
+{
+ int i;
+
+ for (i = 0; i < 0x40; i++)
+ state->tv_enc[i] = nv_read_tv_enc(dev, i);
+
+ tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
+ tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
+ tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
+
+ nv_save_ptv(dev, state, 200);
+ nv_save_ptv(dev, state, 204);
+ nv_save_ptv(dev, state, 208);
+ nv_save_ptv(dev, state, 20c);
+ nv_save_ptv(dev, state, 304);
+ nv_save_ptv(dev, state, 500);
+ nv_save_ptv(dev, state, 504);
+ nv_save_ptv(dev, state, 508);
+ nv_save_ptv(dev, state, 600);
+ nv_save_ptv(dev, state, 604);
+ nv_save_ptv(dev, state, 608);
+ nv_save_ptv(dev, state, 60c);
+ nv_save_ptv(dev, state, 610);
+ nv_save_ptv(dev, state, 614);
+}
+
+void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
+{
+ int i;
+
+ for (i = 0; i < 0x40; i++)
+ nv_write_tv_enc(dev, i, state->tv_enc[i]);
+
+ tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
+ tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
+ tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
+
+ nv_load_ptv(dev, state, 200);
+ nv_load_ptv(dev, state, 204);
+ nv_load_ptv(dev, state, 208);
+ nv_load_ptv(dev, state, 20c);
+ nv_load_ptv(dev, state, 304);
+ nv_load_ptv(dev, state, 500);
+ nv_load_ptv(dev, state, 504);
+ nv_load_ptv(dev, state, 508);
+ nv_load_ptv(dev, state, 600);
+ nv_load_ptv(dev, state, 604);
+ nv_load_ptv(dev, state, 608);
+ nv_load_ptv(dev, state, 60c);
+ nv_load_ptv(dev, state, 610);
+ nv_load_ptv(dev, state, 614);
+
+ /* This is required for some settings to kick in. */
+ nv_write_tv_enc(dev, 0x3e, 1);
+ nv_write_tv_enc(dev, 0x3e, 0);
+}
+
+/* Timings similar to the ones the blob sets */
+
+struct drm_display_mode nv17_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
+ 320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
+ | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
+ 320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
+ | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
+ 400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
+ | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
+ 640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
+ 720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
+ 720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
+ 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
+ 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ {}
+};
+
+void nv17_tv_update_properties(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct nv17_tv_state *regs = &tv_enc->state;
+ struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ int subconnector = tv_enc->select_subconnector ?
+ tv_enc->select_subconnector :
+ tv_enc->subconnector;
+
+ switch (subconnector) {
+ case DRM_MODE_SUBCONNECTOR_Composite:
+ {
+ regs->ptv_204 = 0x2;
+
+ /* The composite connector may be found on either pin. */
+ if (tv_enc->pin_mask & 0x4)
+ regs->ptv_204 |= 0x010000;
+ else if (tv_enc->pin_mask & 0x2)
+ regs->ptv_204 |= 0x100000;
+ else
+ regs->ptv_204 |= 0x110000;
+
+ regs->tv_enc[0x7] = 0x10;
+ break;
+ }
+ case DRM_MODE_SUBCONNECTOR_SVIDEO:
+ regs->ptv_204 = 0x11012;
+ regs->tv_enc[0x7] = 0x18;
+ break;
+
+ case DRM_MODE_SUBCONNECTOR_Component:
+ regs->ptv_204 = 0x111333;
+ regs->tv_enc[0x7] = 0x14;
+ break;
+
+ case DRM_MODE_SUBCONNECTOR_SCART:
+ regs->ptv_204 = 0x111012;
+ regs->tv_enc[0x7] = 0x18;
+ break;
+ }
+
+ regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
+ tv_enc->saturation);
+ regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
+ tv_enc->saturation);
+ regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
+
+ nv_load_ptv(dev, regs, 204);
+ nv_load_tv_enc(dev, regs, 7);
+ nv_load_tv_enc(dev, regs, 20);
+ nv_load_tv_enc(dev, regs, 22);
+ nv_load_tv_enc(dev, regs, 25);
+}
+
+void nv17_tv_update_rescaler(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct nv17_tv_state *regs = &tv_enc->state;
+
+ regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
+
+ tv_setup_filter(encoder);
+
+ nv_load_ptv(dev, regs, 208);
+ tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
+ tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
+ tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
+}
+
+void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ int head = nouveau_crtc(encoder->crtc)->index;
+ struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
+ struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
+ struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
+ int overscan, hmargin, vmargin, hratio, vratio;
+
+ /* The rescaler doesn't do the right thing for interlaced modes. */
+ if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ overscan = 100;
+ else
+ overscan = tv_enc->overscan;
+
+ hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
+ vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
+
+ hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
+ overscan);
+ vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
+ overscan);
+
+ hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
+ vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
+
+ regs->fp_horiz_regs[FP_VALID_START] = hmargin;
+ regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
+ regs->fp_vert_regs[FP_VALID_START] = vmargin;
+ regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
+
+ regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
+ XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
+ NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
+ XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
+ regs->fp_horiz_regs[FP_VALID_START]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
+ regs->fp_horiz_regs[FP_VALID_END]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
+ regs->fp_vert_regs[FP_VALID_START]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
+ regs->fp_vert_regs[FP_VALID_END]);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
new file mode 100644
index 00000000000..18ba74f1970
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -0,0 +1,780 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/*
+ * NV20
+ * -----
+ * There are 3 families :
+ * NV20 is 0x10de:0x020*
+ * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
+ * NV2A is 0x10de:0x02A0
+ *
+ * NV30
+ * -----
+ * There are 3 families :
+ * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
+ * NV34 is 0x10de:0x032*
+ * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
+ *
+ * Not seen in the wild, no dumps (probably NV35) :
+ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
+ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
+ *
+ */
+
+#define NV20_GRCTX_SIZE (3580*4)
+#define NV25_GRCTX_SIZE (3529*4)
+#define NV2A_GRCTX_SIZE (3500*4)
+
+#define NV30_31_GRCTX_SIZE (24392)
+#define NV34_GRCTX_SIZE (18140)
+#define NV35_36_GRCTX_SIZE (22396)
+
+static void
+nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+ for (i = 0x04d4; i <= 0x04e0; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x04f4; i <= 0x0500; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080000);
+ for (i = 0x050c; i <= 0x0518; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x051c; i <= 0x0528; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x000105b8);
+ for (i = 0x052c; i <= 0x0538; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ for (i = 0x055c; i <= 0x0598; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
+ nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+ for (i = 0x1c1c; i <= 0x248c; i += 16) {
+ nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+ nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+ nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ }
+ nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
+ nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
+ nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
+ for (i = 0x355c; i <= 0x3578; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
+ nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
+ for (i = 0x0510; i <= 0x051c; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x0530; i <= 0x053c; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080000);
+ for (i = 0x0548; i <= 0x0554; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x0558; i <= 0x0564; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x000105b8);
+ for (i = 0x0568; i <= 0x0574; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ for (i = 0x0598; i <= 0x05d4; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
+ nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
+ nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
+ nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
+ nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
+ nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
+ for (i = 0x1b04; i <= 0x2374; i += 16) {
+ nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+ nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+ nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ }
+ nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
+ nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
+ nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
+ for (i = 0x3484; i <= 0x34a0; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
+ for (i = 0x04d4; i <= 0x04e0; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x04f4; i <= 0x0500; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080000);
+ for (i = 0x050c; i <= 0x0518; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x051c; i <= 0x0528; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x000105b8);
+ for (i = 0x052c; i <= 0x0538; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ for (i = 0x055c; i <= 0x0598; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
+ nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
+ for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
+ nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
+ nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
+ nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
+ }
+ nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
+ nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
+ nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
+ for (i = 0x341c; i <= 0x3438; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x001c527c);
+}
+
+static void
+nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
+ nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
+ nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
+ for (i = 0x04e0; i < 0x04e8; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
+ for (i = 0x0508; i < 0x0548; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
+ nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
+ nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
+ nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
+ nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
+ for (i = 0x0600; i < 0x0640; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00010588);
+ for (i = 0x0640; i < 0x0680; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x06c0; i < 0x0700; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ for (i = 0x0700; i < 0x0740; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x0740; i < 0x0780; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
+ for (i = 0x0864; i < 0x0874; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00040004);
+ for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+ nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+ nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+ nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ }
+ for (i = 0x30b8; i < 0x30c8; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0000ffff);
+ nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
+}
+
+static void
+nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
+ nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
+ nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
+ nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
+ for (i = 0x04d4; i < 0x04dc; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
+ for (i = 0x04fc; i < 0x053c; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
+ nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
+ nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
+ nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
+ nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
+ for (i = 0x05f0; i < 0x0630; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00010588);
+ for (i = 0x0630; i < 0x0670; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x06b0; i < 0x06f0; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ for (i = 0x06f0; i < 0x0730; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x0730; i < 0x0770; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
+ for (i = 0x0858; i < 0x0868; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00040004);
+ for (i = 0x15ac; i <= 0x271c ; i += 16) {
+ nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+ nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+ nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ }
+ for (i = 0x274c; i < 0x275c; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0000ffff);
+ nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
+}
+
+static void
+nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
+ nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
+ nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
+ nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
+ nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
+ nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
+ nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
+ for (i = 0x04dc; i < 0x04e4; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0fff0000);
+ nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
+ for (i = 0x0504; i < 0x0544; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x07ff0000);
+ nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
+ nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
+ nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
+ nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
+ nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
+ nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
+ nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
+ for (i = 0x0604; i < 0x0644; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00010588);
+ for (i = 0x0644; i < 0x0684; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00030303);
+ for (i = 0x06c4; i < 0x0704; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0008aae4);
+ for (i = 0x0704; i < 0x0744; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x01012000);
+ for (i = 0x0744; i < 0x0784; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00080008);
+ nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
+ nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
+ for (i = 0x0868; i < 0x0878; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x00040004);
+ for (i = 0x1f1c; i <= 0x308c ; i += 16) {
+ nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
+ nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
+ nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
+ }
+ for (i = 0x30bc; i < 0x30cc; i += 4)
+ nv_wo32(dev, ctx, i/4, 0x0000ffff);
+ nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
+ nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
+ nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
+ nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
+ nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
+}
+
+int
+nv20_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
+ unsigned int ctx_size;
+ unsigned int idoffs = 0x28/4;
+ int ret;
+
+ switch (dev_priv->chipset) {
+ case 0x20:
+ ctx_size = NV20_GRCTX_SIZE;
+ ctx_init = nv20_graph_context_init;
+ idoffs = 0;
+ break;
+ case 0x25:
+ case 0x28:
+ ctx_size = NV25_GRCTX_SIZE;
+ ctx_init = nv25_graph_context_init;
+ break;
+ case 0x2a:
+ ctx_size = NV2A_GRCTX_SIZE;
+ ctx_init = nv2a_graph_context_init;
+ idoffs = 0;
+ break;
+ case 0x30:
+ case 0x31:
+ ctx_size = NV30_31_GRCTX_SIZE;
+ ctx_init = nv30_31_graph_context_init;
+ break;
+ case 0x34:
+ ctx_size = NV34_GRCTX_SIZE;
+ ctx_init = nv34_graph_context_init;
+ break;
+ case 0x35:
+ case 0x36:
+ ctx_size = NV35_36_GRCTX_SIZE;
+ ctx_init = nv35_36_graph_context_init;
+ break;
+ default:
+ ctx_size = 0;
+ ctx_init = nv35_36_graph_context_init;
+ NV_ERROR(dev, "Please contact the devs if you want your NV%x"
+ " card to work\n", dev_priv->chipset);
+ return -ENOSYS;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx);
+ if (ret)
+ return ret;
+
+ /* Initialise default context values */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ ctx_init(dev, chan->ramin_grctx->gpuobj);
+
+ /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
+ nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
+ (chan->id << 24) | 0x1); /* CTX_USER */
+
+ nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
+ chan->ramin_grctx->instance >> 4);
+
+ dev_priv->engine.instmem.finish_access(dev);
+ return 0;
+}
+
+void
+nv20_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (chan->ramin_grctx)
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
+ dev_priv->engine.instmem.finish_access(dev);
+}
+
+int
+nv20_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t inst;
+
+ if (!chan->ramin_grctx)
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
+ NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+
+ nouveau_wait_for_idle(dev);
+ return 0;
+}
+
+int
+nv20_graph_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_channel *chan;
+ uint32_t inst, tmp;
+
+ chan = pgraph->channel(dev);
+ if (!chan)
+ return 0;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
+ NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= (pfifo->channels - 1) << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+static void
+nv20_graph_rdi(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, writecount = 32;
+ uint32_t rdi_index = 0x2c80000;
+
+ if (dev_priv->chipset == 0x20) {
+ rdi_index = 0x3d0000;
+ writecount = 15;
+ }
+
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
+ for (i = 0; i < writecount; i++)
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
+
+ nouveau_wait_for_idle(dev);
+}
+
+int
+nv20_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv =
+ (struct drm_nouveau_private *)dev->dev_private;
+ uint32_t tmp, vramsz;
+ int ret, i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
+
+ if (!dev_priv->ctx_table) {
+ /* Create Context Pointer Table */
+ dev_priv->ctx_table_size = 32 * 4;
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+ dev_priv->ctx_table_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->ctx_table);
+ if (ret)
+ return ret;
+ }
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ dev_priv->ctx_table->instance >> 4);
+
+ nv20_graph_rdi(dev);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+ nv_wr32(dev, 0x40009C , 0x00000040);
+
+ if (dev_priv->chipset >= 0x25) {
+ nv_wr32(dev, 0x400890, 0x00080000);
+ nv_wr32(dev, 0x400610, 0x304B1FB6);
+ nv_wr32(dev, 0x400B80, 0x18B82880);
+ nv_wr32(dev, 0x400B84, 0x44000000);
+ nv_wr32(dev, 0x400098, 0x40000080);
+ nv_wr32(dev, 0x400B88, 0x000000ff);
+ } else {
+ nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+ nv_wr32(dev, 0x400094, 0x00000005);
+ nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+ nv_wr32(dev, 0x400B84, 0x24000000);
+ nv_wr32(dev, 0x400098, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ }
+
+ /* copy tile info from PFB */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ nv_wr32(dev, 0x00400904 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+ nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+ nv_wr32(dev, 0x00400908 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TSIZE(i)));
+ /* which is NV40_PGRAPH_TSIZE0(i) ?? */
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+ nv_rd32(dev, NV10_PFB_TSIZE(i)));
+ nv_wr32(dev, 0x00400900 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TILE(i)));
+ /* which is NV40_PGRAPH_TILE0(i) ?? */
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+ nv_rd32(dev, NV10_PFB_TILE(i)));
+ }
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
+ nv_rd32(dev, 0x100300 + i * 4));
+ }
+ nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+
+ tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+ nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
+ tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
+ nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
+
+ /* begin RAM config */
+ vramsz = drm_get_resource_len(dev, 0) - 1;
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400820, 0);
+ nv_wr32(dev, 0x400824, 0);
+ nv_wr32(dev, 0x400864, vramsz - 1);
+ nv_wr32(dev, 0x400868, vramsz - 1);
+
+ /* interesting.. the below overwrites some of the tile setup above.. */
+ nv_wr32(dev, 0x400B20, 0x00000000);
+ nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+
+ return 0;
+}
+
+void
+nv20_graph_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
+}
+
+int
+nv30_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
+
+ if (!dev_priv->ctx_table) {
+ /* Create Context Pointer Table */
+ dev_priv->ctx_table_size = 32 * 4;
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+ dev_priv->ctx_table_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->ctx_table);
+ if (ret)
+ return ret;
+ }
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ dev_priv->ctx_table->instance >> 4);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nv_wr32(dev, 0x400890, 0x01b463ff);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+ nv_wr32(dev, 0x400B80, 0x1003d888);
+ nv_wr32(dev, 0x400B84, 0x0c000000);
+ nv_wr32(dev, 0x400098, 0x00000000);
+ nv_wr32(dev, 0x40009C, 0x0005ad00);
+ nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+ nv_wr32(dev, 0x4000a0, 0x00000000);
+ nv_wr32(dev, 0x4000a4, 0x00000008);
+ nv_wr32(dev, 0x4008a8, 0xb784a400);
+ nv_wr32(dev, 0x400ba0, 0x002f8685);
+ nv_wr32(dev, 0x400ba4, 0x00231f3f);
+ nv_wr32(dev, 0x4008a4, 0x40000020);
+
+ if (dev_priv->chipset == 0x34) {
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
+ }
+
+ nv_wr32(dev, 0x4000c0, 0x00000016);
+
+ /* copy tile info from PFB */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ nv_wr32(dev, 0x00400904 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TLIMIT(i)));
+ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+ nv_wr32(dev, 0x00400908 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TSIZE(i)));
+ /* which is NV40_PGRAPH_TSIZE0(i) ?? */
+ nv_wr32(dev, 0x00400900 + i * 0x10,
+ nv_rd32(dev, NV10_PFB_TILE(i)));
+ /* which is NV40_PGRAPH_TILE0(i) ?? */
+ }
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(dev, 0x0040075c , 0x00000001);
+
+ /* begin RAM config */
+ /* vramsz = drm_get_resource_len(dev, 0) - 1; */
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ if (dev_priv->chipset != 0x34) {
+ nv_wr32(dev, 0x400750, 0x00EA0000);
+ nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400750, 0x00EA0004);
+ nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
+ }
+
+ return 0;
+}
+
+struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
+ { 0x0030, false, NULL }, /* null */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004a, false, NULL }, /* gdirect */
+ { 0x009f, false, NULL }, /* imageblit (nv12) */
+ { 0x008a, false, NULL }, /* ifc */
+ { 0x0089, false, NULL }, /* sifm */
+ { 0x0062, false, NULL }, /* surf2d */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x009e, false, NULL }, /* swzsurf */
+ { 0x0096, false, NULL }, /* celcius */
+ { 0x0097, false, NULL }, /* kelvin (nv20) */
+ { 0x0597, false, NULL }, /* kelvin (nv25) */
+ {}
+};
+
+struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
+ { 0x0030, false, NULL }, /* null */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004a, false, NULL }, /* gdirect */
+ { 0x009f, false, NULL }, /* imageblit (nv12) */
+ { 0x008a, false, NULL }, /* ifc */
+ { 0x038a, false, NULL }, /* ifc (nv30) */
+ { 0x0089, false, NULL }, /* sifm */
+ { 0x0389, false, NULL }, /* sifm (nv30) */
+ { 0x0062, false, NULL }, /* surf2d */
+ { 0x0362, false, NULL }, /* surf2d (nv30) */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x039e, false, NULL }, /* swzsurf */
+ { 0x0397, false, NULL }, /* rankine (nv30) */
+ { 0x0497, false, NULL }, /* rankine (nv35) */
+ { 0x0697, false, NULL }, /* rankine (nv34) */
+ {}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
new file mode 100644
index 00000000000..ca1d27107a8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -0,0 +1,62 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fb_bar_size, tmp;
+ int num_tiles;
+ int i;
+
+ /* This is strictly a NV4x register (don't know about NV5x). */
+ /* The blob sets these to all kinds of values, and they mess up our setup. */
+ /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
+ /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
+ /* Any idea what this is? */
+ nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
+ nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
+ num_tiles = NV10_PFB_TILE__SIZE;
+ break;
+ case 0x46: /* G72 */
+ case 0x47: /* G70 */
+ case 0x49: /* G71 */
+ case 0x4b: /* G73 */
+ case 0x4c: /* C51 (G7X version) */
+ num_tiles = NV40_PFB_TILE__SIZE_1;
+ break;
+ default:
+ num_tiles = NV40_PFB_TILE__SIZE_0;
+ break;
+ }
+
+ fb_bar_size = drm_get_resource_len(dev, 0) - 1;
+ switch (dev_priv->chipset) {
+ case 0x40:
+ for (i = 0; i < num_tiles; i++) {
+ nv_wr32(dev, NV10_PFB_TILE(i), 0);
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
+ }
+ break;
+ default:
+ for (i = 0; i < num_tiles; i++) {
+ nv_wr32(dev, NV40_PFB_TILE(i), 0);
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv40_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
new file mode 100644
index 00000000000..b4f19ccb8b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
+#define NV40_RAMFC__SIZE 128
+
+int
+nv40_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fc = NV40_RAMFC(chan->id);
+ int ret;
+
+ ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
+ NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wi32(dev, fc + 0, chan->pushbuf_base);
+ nv_wi32(dev, fc + 4, chan->pushbuf_base);
+ nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
+ nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0x30000000 /* no idea.. */);
+ nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
+ nv_wi32(dev, fc + 60, 0x0001FFFF);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ /* enable the fifo dma operation */
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+ return 0;
+}
+
+void
+nv40_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, NV04_PFIFO_MODE,
+ nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+
+ if (chan->ramfc)
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+static void
+nv40_fifo_do_load_context(struct drm_device *dev, int chid)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
+
+ /* No idea what 0x2058 is.. */
+ tmp = nv_ri32(dev, fc + 24);
+ tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
+ tmp2 |= (tmp & 0x30000000);
+ nv_wr32(dev, 0x2058, tmp2);
+ tmp &= ~0x30000000;
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
+ tmp = nv_ri32(dev, fc + 40);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
+ nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
+ nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
+
+ /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
+ tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
+ tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
+
+ nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
+ /* NVIDIA does this next line twice... */
+ nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
+ nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
+ nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+}
+
+int
+nv40_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t tmp;
+
+ nv40_fifo_do_load_context(dev, chan->id);
+
+ /* Set channel active, and in DMA mode */
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+ NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
+
+ /* Reset DMA_CTL_AT_INFO to INVALID */
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv40_fifo_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ uint32_t fc, tmp;
+ int chid;
+
+ chid = pfifo->channel_id(dev);
+ if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
+ return 0;
+ fc = NV40_RAMFC(chid);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
+ nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+ nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
+ nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
+ nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
+ nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
+ tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
+ nv_wi32(dev, fc + 24, tmp);
+ nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
+ nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
+ nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
+ tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
+ nv_wi32(dev, fc + 40, tmp);
+ nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
+ nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
+ /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
+ * more involved depending on the value of 0x3228?
+ */
+ nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
+ nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
+ nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
+ /* No idea what the below is for exactly, ripped from a mmio-trace */
+ nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
+ /* NVIDIA do this next line twice.. bug? */
+ nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
+ nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
+ nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
+#if 0 /* no real idea which is PUT/GET in UNK_48.. */
+ tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
+ tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
+ nv_wi32(dev, fc + 72, tmp);
+#endif
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv40_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
+ NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
+ return 0;
+}
+
+static void
+nv40_fifo_init_reset(struct drm_device *dev)
+{
+ int i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, 0x003224, 0x000f0078);
+ nv_wr32(dev, 0x003210, 0x00000000);
+ nv_wr32(dev, 0x003270, 0x00000000);
+ nv_wr32(dev, 0x003240, 0x00000000);
+ nv_wr32(dev, 0x003244, 0x00000000);
+ nv_wr32(dev, 0x003258, 0x00000000);
+ nv_wr32(dev, 0x002504, 0x00000000);
+ for (i = 0; i < 16; i++)
+ nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
+ nv_wr32(dev, 0x00250c, 0x0000ffff);
+ nv_wr32(dev, 0x002048, 0x00000000);
+ nv_wr32(dev, 0x003228, 0x00000000);
+ nv_wr32(dev, 0x0032e8, 0x00000000);
+ nv_wr32(dev, 0x002410, 0x00000000);
+ nv_wr32(dev, 0x002420, 0x00000000);
+ nv_wr32(dev, 0x002058, 0x00000001);
+ nv_wr32(dev, 0x00221c, 0x00000000);
+ /* something with 0x2084, read/modify/write, no change */
+ nv_wr32(dev, 0x002040, 0x000000ff);
+ nv_wr32(dev, 0x002500, 0x00000000);
+ nv_wr32(dev, 0x003200, 0x00000000);
+
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
+}
+
+static void
+nv40_fifo_init_ramxx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht_bits - 9) << 16) |
+ (dev_priv->ramht_offset >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, 0x2230, 1);
+ break;
+ default:
+ break;
+ }
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
+ break;
+ default:
+ nv_wr32(dev, 0x2230, 0);
+ nv_wr32(dev, NV40_PFIFO_RAMFC,
+ ((nouveau_mem_fb_amount(dev) - 512 * 1024 +
+ dev_priv->ramfc_offset) >> 16) | (3 << 16));
+ break;
+ }
+}
+
+static void
+nv40_fifo_init_intr(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+}
+
+int
+nv40_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ int i;
+
+ nv40_fifo_init_reset(dev);
+ nv40_fifo_init_ramxx(dev);
+
+ nv40_fifo_do_load_context(dev, pfifo->channels - 1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
+
+ nv40_fifo_init_intr(dev);
+ pfifo->enable(dev);
+ pfifo->reassign(dev, true);
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ if (dev_priv->fifos[i]) {
+ uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
+ nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
new file mode 100644
index 00000000000..2b332bb55ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -0,0 +1,428 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_grctx.h"
+
+struct nouveau_channel *
+nv40_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+ int i;
+
+ inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
+ return NULL;
+ inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (chan && chan->ramin_grctx &&
+ chan->ramin_grctx->instance == inst)
+ return chan;
+ }
+
+ return NULL;
+}
+
+int
+nv40_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ int ret;
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
+ 16, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx);
+ if (ret)
+ return ret;
+
+ /* Initialise default context values */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ if (!pgraph->ctxprog) {
+ struct nouveau_grctx ctx = {};
+
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = chan->ramin_grctx->gpuobj;
+ nv40_grctx_init(&ctx);
+ } else {
+ nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
+ }
+ nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
+ chan->ramin_grctx->gpuobj->im_pramin->start);
+ dev_priv->engine.instmem.finish_access(dev);
+ return 0;
+}
+
+void
+nv40_graph_destroy_context(struct nouveau_channel *chan)
+{
+ nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
+}
+
+static int
+nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
+{
+ uint32_t old_cp, tv = 1000, tmp;
+ int i;
+
+ old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+
+ tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
+ tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
+ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
+
+ tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
+ tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
+
+ nouveau_wait_for_idle(dev);
+
+ for (i = 0; i < tv; i++) {
+ if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
+ break;
+ }
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
+
+ if (i == tv) {
+ uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
+ NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
+ NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
+ ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
+ ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
+ NV_ERROR(dev, "0x40030C = 0x%08x\n",
+ nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/* Restore the context for a specific channel into PGRAPH */
+int
+nv40_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t inst;
+ int ret;
+
+ if (!chan->ramin_grctx)
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ ret = nv40_graph_transfer_context(dev, inst, 0);
+ if (ret)
+ return ret;
+
+ /* 0x40032C, no idea of it's exact function. Could simply be a
+ * record of the currently active PGRAPH context. It's currently
+ * unknown as to what bit 24 does. The nv ddx has it set, so we will
+ * set it here too.
+ */
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
+ (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
+ NV40_PGRAPH_CTXCTL_CUR_LOADED);
+ /* 0x32E0 records the instance address of the active FIFO's PGRAPH
+ * context. If at any time this doesn't match 0x40032C, you will
+ * recieve PGRAPH_INTR_CONTEXT_SWITCH
+ */
+ nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
+ return 0;
+}
+
+int
+nv40_graph_unload_context(struct drm_device *dev)
+{
+ uint32_t inst;
+ int ret;
+
+ inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
+ return 0;
+ inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
+
+ ret = nv40_graph_transfer_context(dev, inst, 1);
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
+ return ret;
+}
+
+/*
+ * G70 0x47
+ * G71 0x49
+ * NV45 0x48
+ * G72[M] 0x46
+ * G73 0x4b
+ * C51_G7X 0x4c
+ * C51 0x4e
+ */
+int
+nv40_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv =
+ (struct drm_nouveau_private *)dev->dev_private;
+ uint32_t vramsz, tmp;
+ int i, j;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ if (nouveau_ctxfw) {
+ nouveau_grctx_prog_load(dev);
+ dev_priv->engine.graph.grctx_size = 175 * 1024;
+ }
+
+ if (!dev_priv->engine.graph.ctxprog) {
+ struct nouveau_grctx ctx = {};
+ uint32_t cp[256];
+
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 256;
+ nv40_grctx_init(&ctx);
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+ }
+
+ /* No context present currently */
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+
+ j = nv_rd32(dev, 0x1540) & 0xff;
+ if (j) {
+ for (i = 0; !(j & 1); j >>= 1, i++)
+ ;
+ nv_wr32(dev, 0x405000, i);
+ }
+
+ if (dev_priv->chipset == 0x40) {
+ nv_wr32(dev, 0x4009b0, 0x83280fff);
+ nv_wr32(dev, 0x4009b4, 0x000000a0);
+ } else {
+ nv_wr32(dev, 0x400820, 0x83280eff);
+ nv_wr32(dev, 0x400824, 0x000000a0);
+ }
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ nv_wr32(dev, 0x4009b8, 0x0078e366);
+ nv_wr32(dev, 0x4009bc, 0x0000014c);
+ break;
+ case 0x41:
+ case 0x42: /* pciid also 0x00Cx */
+ /* case 0x0120: XXX (pciid) */
+ nv_wr32(dev, 0x400828, 0x007596ff);
+ nv_wr32(dev, 0x40082c, 0x00000108);
+ break;
+ case 0x43:
+ nv_wr32(dev, 0x400828, 0x0072cb77);
+ nv_wr32(dev, 0x40082c, 0x00000108);
+ break;
+ case 0x44:
+ case 0x46: /* G72 */
+ case 0x4a:
+ case 0x4c: /* G7x-based C51 */
+ case 0x4e:
+ nv_wr32(dev, 0x400860, 0);
+ nv_wr32(dev, 0x400864, 0);
+ break;
+ case 0x47: /* G70 */
+ case 0x49: /* G71 */
+ case 0x4b: /* G73 */
+ nv_wr32(dev, 0x400828, 0x07830610);
+ nv_wr32(dev, 0x40082c, 0x0000016A);
+ break;
+ default:
+ break;
+ }
+
+ nv_wr32(dev, 0x400b38, 0x2ffff800);
+ nv_wr32(dev, 0x400b3c, 0x00006000);
+
+ /* copy tile info from PFB */
+ switch (dev_priv->chipset) {
+ case 0x40: /* vanilla NV40 */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ tmp = nv_rd32(dev, NV10_PFB_TILE(i));
+ nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+ tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
+ nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ case 0x44:
+ case 0x4a:
+ case 0x4e: /* NV44-based cores don't have 0x406900? */
+ for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
+ tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+ nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+ nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+ }
+ break;
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b: /* G7X-based cores */
+ for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
+ tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+ nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+ nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+ nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ default: /* everything else */
+ for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
+ tmp = nv_rd32(dev, NV40_PFB_TILE(i));
+ nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
+ nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
+ nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ }
+
+ /* begin RAM config */
+ vramsz = drm_get_resource_len(dev, 0) - 1;
+ switch (dev_priv->chipset) {
+ case 0x40:
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400820, 0);
+ nv_wr32(dev, 0x400824, 0);
+ nv_wr32(dev, 0x400864, vramsz);
+ nv_wr32(dev, 0x400868, vramsz);
+ break;
+ default:
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
+ default:
+ nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
+ }
+ nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400840, 0);
+ nv_wr32(dev, 0x400844, 0);
+ nv_wr32(dev, 0x4008A0, vramsz);
+ nv_wr32(dev, 0x4008A4, vramsz);
+ break;
+ }
+
+ return 0;
+}
+
+void nv40_graph_takedown(struct drm_device *dev)
+{
+ nouveau_grctx_fini(dev);
+}
+
+struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
+ { 0x0030, false, NULL }, /* null */
+ { 0x0039, false, NULL }, /* m2mf */
+ { 0x004a, false, NULL }, /* gdirect */
+ { 0x009f, false, NULL }, /* imageblit (nv12) */
+ { 0x008a, false, NULL }, /* ifc */
+ { 0x0089, false, NULL }, /* sifm */
+ { 0x3089, false, NULL }, /* sifm (nv40) */
+ { 0x0062, false, NULL }, /* surf2d */
+ { 0x3062, false, NULL }, /* surf2d (nv40) */
+ { 0x0043, false, NULL }, /* rop */
+ { 0x0012, false, NULL }, /* beta1 */
+ { 0x0072, false, NULL }, /* beta4 */
+ { 0x0019, false, NULL }, /* cliprect */
+ { 0x0044, false, NULL }, /* pattern */
+ { 0x309e, false, NULL }, /* swzsurf */
+ { 0x4097, false, NULL }, /* curie (nv40) */
+ { 0x4497, false, NULL }, /* curie (nv44) */
+ {}
+};
+
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
new file mode 100644
index 00000000000..11b11c31f54
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
@@ -0,0 +1,678 @@
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* NVIDIA context programs handle a number of other conditions which are
+ * not implemented in our versions. It's not clear why NVIDIA context
+ * programs have this code, nor whether it's strictly necessary for
+ * correct operation. We'll implement additional handling if/when we
+ * discover it's necessary.
+ *
+ * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
+ * flag is set, this gets saved into the context.
+ * - On context save, the context program for all cards load nsource
+ * into a flag register and check for ILLEGAL_MTHD. If it's set,
+ * opcode 0x60000d is called before resuming normal operation.
+ * - Some context programs check more conditions than the above. NV44
+ * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
+ * and calls 0x60000d before resuming normal operation.
+ * - At the very beginning of NVIDIA's context programs, flag 9 is checked
+ * and if true 0x800001 is called with count=0, pos=0, the flag is cleared
+ * and then the ctxprog is aborted. It looks like a complicated NOP,
+ * its purpose is unknown.
+ * - In the section of code that loads the per-vs state, NVIDIA check
+ * flag 10. If it's set, they only transfer the small 0x300 byte block
+ * of state + the state for a single vs as opposed to the state for
+ * all vs units. It doesn't seem likely that it'll occur in normal
+ * operation, especially seeing as it appears NVIDIA may have screwed
+ * up the ctxprogs for some cards and have an invalid instruction
+ * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
+ * - There's a number of places where context offset 0 (where we place
+ * the PRAMIN offset of the context) is loaded into either 0x408000,
+ * 0x408004 or 0x408008. Not sure what's up there either.
+ * - The ctxprogs for some cards save 0x400a00 again during the cleanup
+ * path for auto-loadctx.
+ */
+
+#define CP_FLAG_CLEAR 0
+#define CP_FLAG_SET 1
+#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD 0
+#define CP_FLAG_SWAP_DIRECTION_SAVE 1
+#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING 1
+#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING 1
+#define CP_FLAG_STATUS ((3 * 32) + 0)
+#define CP_FLAG_STATUS_IDLE 0
+#define CP_FLAG_STATUS_BUSY 1
+#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING 1
+#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING 1
+#define CP_FLAG_UNK54 ((3 * 32) + 6)
+#define CP_FLAG_UNK54_CLEAR 0
+#define CP_FLAG_UNK54_SET 1
+#define CP_FLAG_ALWAYS ((3 * 32) + 8)
+#define CP_FLAG_ALWAYS_FALSE 0
+#define CP_FLAG_ALWAYS_TRUE 1
+#define CP_FLAG_UNK57 ((3 * 32) + 9)
+#define CP_FLAG_UNK57_CLEAR 0
+#define CP_FLAG_UNK57_SET 1
+
+#define CP_CTX 0x00100000
+#define CP_CTX_COUNT 0x000fc000
+#define CP_CTX_COUNT_SHIFT 14
+#define CP_CTX_REG 0x00003fff
+#define CP_LOAD_SR 0x00200000
+#define CP_LOAD_SR_VALUE 0x000fffff
+#define CP_BRA 0x00400000
+#define CP_BRA_IP 0x0000ff00
+#define CP_BRA_IP_SHIFT 8
+#define CP_BRA_IF_CLEAR 0x00000080
+#define CP_BRA_FLAG 0x0000007f
+#define CP_WAIT 0x00500000
+#define CP_WAIT_SET 0x00000080
+#define CP_WAIT_FLAG 0x0000007f
+#define CP_SET 0x00700000
+#define CP_SET_1 0x00000080
+#define CP_SET_FLAG 0x0000007f
+#define CP_NEXT_TO_SWAP 0x00600007
+#define CP_NEXT_TO_CURRENT 0x00600009
+#define CP_SET_CONTEXT_POINTER 0x0060000a
+#define CP_END 0x0060000e
+#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */
+#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
+#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_grctx.h"
+
+/* TODO:
+ * - get vs count from 0x1540
+ * - document unimplemented bits compared to nvidia
+ * - nsource handling
+ * - R0 & 0x0200 handling
+ * - single-vs handling
+ * - 400314 bit 0
+ */
+
+static int
+nv40_graph_4097(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if ((dev_priv->chipset & 0xf0) == 0x60)
+ return 0;
+
+ return !!(0x0baf & (1 << dev_priv->chipset));
+}
+
+static int
+nv40_graph_vs_count(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ return 8;
+ case 0x40:
+ return 6;
+ case 0x41:
+ case 0x42:
+ return 5;
+ case 0x43:
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ return 3;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ default:
+ return 1;
+ }
+}
+
+
+enum cp_label {
+ cp_check_load = 1,
+ cp_setup_auto_load,
+ cp_setup_load,
+ cp_setup_save,
+ cp_swap_state,
+ cp_swap_state3d_3_is_save,
+ cp_prepare_exit,
+ cp_exit,
+};
+
+static void
+nv40_graph_construct_general(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ cp_ctx(ctx, 0x4000a4, 1);
+ gr_def(ctx, 0x4000a4, 0x00000008);
+ cp_ctx(ctx, 0x400144, 58);
+ gr_def(ctx, 0x400144, 0x00000001);
+ cp_ctx(ctx, 0x400314, 1);
+ gr_def(ctx, 0x400314, 0x00000000);
+ cp_ctx(ctx, 0x400400, 10);
+ cp_ctx(ctx, 0x400480, 10);
+ cp_ctx(ctx, 0x400500, 19);
+ gr_def(ctx, 0x400514, 0x00040000);
+ gr_def(ctx, 0x400524, 0x55555555);
+ gr_def(ctx, 0x400528, 0x55555555);
+ gr_def(ctx, 0x40052c, 0x55555555);
+ gr_def(ctx, 0x400530, 0x55555555);
+ cp_ctx(ctx, 0x400560, 6);
+ gr_def(ctx, 0x400568, 0x0000ffff);
+ gr_def(ctx, 0x40056c, 0x0000ffff);
+ cp_ctx(ctx, 0x40057c, 5);
+ cp_ctx(ctx, 0x400710, 3);
+ gr_def(ctx, 0x400710, 0x20010001);
+ gr_def(ctx, 0x400714, 0x0f73ef00);
+ cp_ctx(ctx, 0x400724, 1);
+ gr_def(ctx, 0x400724, 0x02008821);
+ cp_ctx(ctx, 0x400770, 3);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x400814, 4);
+ cp_ctx(ctx, 0x400828, 5);
+ cp_ctx(ctx, 0x400840, 5);
+ gr_def(ctx, 0x400850, 0x00000040);
+ cp_ctx(ctx, 0x400858, 4);
+ gr_def(ctx, 0x400858, 0x00000040);
+ gr_def(ctx, 0x40085c, 0x00000040);
+ gr_def(ctx, 0x400864, 0x80000000);
+ cp_ctx(ctx, 0x40086c, 9);
+ gr_def(ctx, 0x40086c, 0x80000000);
+ gr_def(ctx, 0x400870, 0x80000000);
+ gr_def(ctx, 0x400874, 0x80000000);
+ gr_def(ctx, 0x400878, 0x80000000);
+ gr_def(ctx, 0x400888, 0x00000040);
+ gr_def(ctx, 0x40088c, 0x80000000);
+ cp_ctx(ctx, 0x4009c0, 8);
+ gr_def(ctx, 0x4009cc, 0x80000000);
+ gr_def(ctx, 0x4009dc, 0x80000000);
+ } else {
+ cp_ctx(ctx, 0x400840, 20);
+ if (!nv40_graph_4097(ctx->dev)) {
+ for (i = 0; i < 8; i++)
+ gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
+ }
+ gr_def(ctx, 0x400880, 0x00000040);
+ gr_def(ctx, 0x400884, 0x00000040);
+ gr_def(ctx, 0x400888, 0x00000040);
+ cp_ctx(ctx, 0x400894, 11);
+ gr_def(ctx, 0x400894, 0x00000040);
+ if (nv40_graph_4097(ctx->dev)) {
+ for (i = 0; i < 8; i++)
+ gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
+ }
+ cp_ctx(ctx, 0x4008e0, 2);
+ cp_ctx(ctx, 0x4008f8, 2);
+ if (dev_priv->chipset == 0x4c ||
+ (dev_priv->chipset & 0xf0) == 0x60)
+ cp_ctx(ctx, 0x4009f8, 1);
+ }
+ cp_ctx(ctx, 0x400a00, 73);
+ gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
+ cp_ctx(ctx, 0x401000, 4);
+ cp_ctx(ctx, 0x405004, 1);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x403448, 1);
+ gr_def(ctx, 0x403448, 0x00001010);
+ break;
+ default:
+ cp_ctx(ctx, 0x403440, 1);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x403440, 0x00000010);
+ break;
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ gr_def(ctx, 0x403440, 0x00003010);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ default:
+ gr_def(ctx, 0x403440, 0x00001010);
+ break;
+ }
+ break;
+ }
+}
+
+static void
+nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x401880, 51);
+ gr_def(ctx, 0x401940, 0x00000100);
+ } else
+ if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
+ dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+ cp_ctx(ctx, 0x401880, 32);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
+ if (dev_priv->chipset == 0x46)
+ cp_ctx(ctx, 0x401900, 16);
+ cp_ctx(ctx, 0x401940, 3);
+ }
+ cp_ctx(ctx, 0x40194c, 18);
+ gr_def(ctx, 0x401954, 0x00000111);
+ gr_def(ctx, 0x401958, 0x00080060);
+ gr_def(ctx, 0x401974, 0x00000080);
+ gr_def(ctx, 0x401978, 0xffff0000);
+ gr_def(ctx, 0x40197c, 0x00000001);
+ gr_def(ctx, 0x401990, 0x46400000);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x4019a0, 2);
+ cp_ctx(ctx, 0x4019ac, 5);
+ } else {
+ cp_ctx(ctx, 0x4019a0, 1);
+ cp_ctx(ctx, 0x4019b4, 3);
+ }
+ gr_def(ctx, 0x4019bc, 0xffff0000);
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x4019c0, 18);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
+ break;
+ }
+ cp_ctx(ctx, 0x401a08, 8);
+ gr_def(ctx, 0x401a10, 0x0fff0000);
+ gr_def(ctx, 0x401a14, 0x0fff0000);
+ gr_def(ctx, 0x401a1c, 0x00011100);
+ cp_ctx(ctx, 0x401a2c, 4);
+ cp_ctx(ctx, 0x401a44, 26);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
+ gr_def(ctx, 0x401a8c, 0x4b7fffff);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x401ab8, 3);
+ } else {
+ cp_ctx(ctx, 0x401ab8, 1);
+ cp_ctx(ctx, 0x401ac0, 1);
+ }
+ cp_ctx(ctx, 0x401ad0, 8);
+ gr_def(ctx, 0x401ad0, 0x30201000);
+ gr_def(ctx, 0x401ad4, 0x70605040);
+ gr_def(ctx, 0x401ad8, 0xb8a89888);
+ gr_def(ctx, 0x401adc, 0xf8e8d8c8);
+ cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
+ gr_def(ctx, 0x401b10, 0x40100000);
+ cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
+ gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
+ 0x00000004 : 0x00000000);
+ cp_ctx(ctx, 0x401b30, 25);
+ gr_def(ctx, 0x401b34, 0x0000ffff);
+ gr_def(ctx, 0x401b68, 0x435185d6);
+ gr_def(ctx, 0x401b6c, 0x2155b699);
+ gr_def(ctx, 0x401b70, 0xfedcba98);
+ gr_def(ctx, 0x401b74, 0x00000098);
+ gr_def(ctx, 0x401b84, 0xffffffff);
+ gr_def(ctx, 0x401b88, 0x00ff7000);
+ gr_def(ctx, 0x401b8c, 0x0000ffff);
+ if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
+ dev_priv->chipset != 0x4e)
+ cp_ctx(ctx, 0x401b94, 1);
+ cp_ctx(ctx, 0x401b98, 8);
+ gr_def(ctx, 0x401b9c, 0x00ff0000);
+ cp_ctx(ctx, 0x401bc0, 9);
+ gr_def(ctx, 0x401be0, 0x00ffff00);
+ cp_ctx(ctx, 0x401c00, 192);
+ for (i = 0; i < 16; i++) { /* fragment texture units */
+ gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
+ gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
+ gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
+ gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
+ gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
+ gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
+ }
+ for (i = 0; i < 4; i++) { /* vertex texture units */
+ gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
+ gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
+ gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
+ gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
+ }
+ cp_ctx(ctx, 0x400f5c, 3);
+ gr_def(ctx, 0x400f5c, 0x00000002);
+ cp_ctx(ctx, 0x400f84, 1);
+}
+
+static void
+nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ cp_ctx(ctx, 0x402000, 1);
+ cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x402404, 0x00000001);
+ break;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ gr_def(ctx, 0x402404, 0x00000020);
+ break;
+ case 0x46:
+ case 0x49:
+ case 0x4b:
+ gr_def(ctx, 0x402404, 0x00000421);
+ break;
+ default:
+ gr_def(ctx, 0x402404, 0x00000021);
+ }
+ if (dev_priv->chipset != 0x40)
+ gr_def(ctx, 0x402408, 0x030c30c3);
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ cp_ctx(ctx, 0x402440, 1);
+ gr_def(ctx, 0x402440, 0x00011001);
+ break;
+ default:
+ break;
+ }
+ cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
+ gr_def(ctx, 0x402488, 0x3e020200);
+ gr_def(ctx, 0x40248c, 0x00ffffff);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x402490, 0x60103f00);
+ break;
+ case 0x47:
+ gr_def(ctx, 0x402490, 0x40103f00);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x49:
+ case 0x4b:
+ gr_def(ctx, 0x402490, 0x20103f00);
+ break;
+ default:
+ gr_def(ctx, 0x402490, 0x0c103f00);
+ break;
+ }
+ gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
+ 0x00020000 : 0x00040000);
+ cp_ctx(ctx, 0x402500, 31);
+ gr_def(ctx, 0x402530, 0x00008100);
+ if (dev_priv->chipset == 0x40)
+ cp_ctx(ctx, 0x40257c, 6);
+ cp_ctx(ctx, 0x402594, 16);
+ cp_ctx(ctx, 0x402800, 17);
+ gr_def(ctx, 0x402800, 0x00000001);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x402864, 1);
+ gr_def(ctx, 0x402864, 0x00001001);
+ cp_ctx(ctx, 0x402870, 3);
+ gr_def(ctx, 0x402878, 0x00000003);
+ if (dev_priv->chipset != 0x47) { /* belong at end!! */
+ cp_ctx(ctx, 0x402900, 1);
+ cp_ctx(ctx, 0x402940, 1);
+ cp_ctx(ctx, 0x402980, 1);
+ cp_ctx(ctx, 0x4029c0, 1);
+ cp_ctx(ctx, 0x402a00, 1);
+ cp_ctx(ctx, 0x402a40, 1);
+ cp_ctx(ctx, 0x402a80, 1);
+ cp_ctx(ctx, 0x402ac0, 1);
+ }
+ break;
+ case 0x40:
+ cp_ctx(ctx, 0x402844, 1);
+ gr_def(ctx, 0x402844, 0x00000001);
+ cp_ctx(ctx, 0x402850, 1);
+ break;
+ default:
+ cp_ctx(ctx, 0x402844, 1);
+ gr_def(ctx, 0x402844, 0x00001001);
+ cp_ctx(ctx, 0x402850, 2);
+ gr_def(ctx, 0x402854, 0x00000003);
+ break;
+ }
+
+ cp_ctx(ctx, 0x402c00, 4);
+ gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
+ 0x80800001 : 0x00888001);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x402c20, 40);
+ for (i = 0; i < 32; i++)
+ gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
+ cp_ctx(ctx, 0x4030b8, 13);
+ gr_def(ctx, 0x4030dc, 0x00000005);
+ gr_def(ctx, 0x4030e8, 0x0000ffff);
+ break;
+ default:
+ cp_ctx(ctx, 0x402c10, 4);
+ if (dev_priv->chipset == 0x40)
+ cp_ctx(ctx, 0x402c20, 36);
+ else
+ if (dev_priv->chipset <= 0x42)
+ cp_ctx(ctx, 0x402c20, 24);
+ else
+ if (dev_priv->chipset <= 0x4a)
+ cp_ctx(ctx, 0x402c20, 16);
+ else
+ cp_ctx(ctx, 0x402c20, 8);
+ cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
+ gr_def(ctx, 0x402cd4, 0x00000005);
+ if (dev_priv->chipset != 0x40)
+ gr_def(ctx, 0x402ce0, 0x0000ffff);
+ break;
+ }
+
+ cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
+ for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
+ gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
+
+ if (dev_priv->chipset != 0x40) {
+ cp_ctx(ctx, 0x403600, 1);
+ gr_def(ctx, 0x403600, 0x00000001);
+ }
+ cp_ctx(ctx, 0x403800, 1);
+
+ cp_ctx(ctx, 0x403c18, 1);
+ gr_def(ctx, 0x403c18, 0x00000001);
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x405018, 1);
+ gr_def(ctx, 0x405018, 0x08e00001);
+ cp_ctx(ctx, 0x405c24, 1);
+ gr_def(ctx, 0x405c24, 0x000e3000);
+ break;
+ }
+ if (dev_priv->chipset != 0x4e)
+ cp_ctx(ctx, 0x405800, 11);
+ cp_ctx(ctx, 0x407000, 1);
+}
+
+static void
+nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
+{
+ int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
+
+ cp_out (ctx, 0x300000);
+ cp_lsr (ctx, len - 4);
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
+ cp_lsr (ctx, len);
+ cp_name(ctx, cp_swap_state3d_3_is_save);
+ cp_out (ctx, 0x800001);
+
+ ctx->ctxvals_pos += len;
+}
+
+static void
+nv40_graph_construct_shader(struct nouveau_grctx *ctx)
+{
+ struct drm_device *dev = ctx->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = ctx->data;
+ int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
+ int offset, i;
+
+ vs_nr = nv40_graph_vs_count(ctx->dev);
+ vs_nr_b0 = 363;
+ vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
+ if (dev_priv->chipset == 0x40) {
+ b0_offset = 0x2200/4; /* 33a0 */
+ b1_offset = 0x55a0/4; /* 1500 */
+ vs_len = 0x6aa0/4;
+ } else
+ if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
+ b0_offset = 0x2200/4; /* 2200 */
+ b1_offset = 0x4400/4; /* 0b00 */
+ vs_len = 0x4f00/4;
+ } else {
+ b0_offset = 0x1d40/4; /* 2200 */
+ b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
+ vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
+ }
+
+ cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
+ cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
+
+ offset = ctx->ctxvals_pos;
+ ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
+
+ if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ return;
+
+ offset += 0x0280/4;
+ for (i = 0; i < 16; i++, offset += 2)
+ nv_wo32(dev, obj, offset, 0x3f800000);
+
+ for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
+ for (i = 0; i < vs_nr_b0 * 6; i += 6)
+ nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
+ for (i = 0; i < vs_nr_b1 * 4; i += 4)
+ nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
+ }
+}
+
+void
+nv40_grctx_init(struct nouveau_grctx *ctx)
+{
+ /* decide whether we're loading/unloading the context */
+ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+ cp_name(ctx, cp_check_load);
+ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+ cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+
+ /* setup for context load */
+ cp_name(ctx, cp_setup_auto_load);
+ cp_wait(ctx, STATUS, IDLE);
+ cp_out (ctx, CP_NEXT_TO_SWAP);
+ cp_name(ctx, cp_setup_load);
+ cp_wait(ctx, STATUS, IDLE);
+ cp_set (ctx, SWAP_DIRECTION, LOAD);
+ cp_out (ctx, 0x00910880); /* ?? */
+ cp_out (ctx, 0x00901ffe); /* ?? */
+ cp_out (ctx, 0x01940000); /* ?? */
+ cp_lsr (ctx, 0x20);
+ cp_out (ctx, 0x0060000b); /* ?? */
+ cp_wait(ctx, UNK57, CLEAR);
+ cp_out (ctx, 0x0060000c); /* ?? */
+ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+ /* setup for context save */
+ cp_name(ctx, cp_setup_save);
+ cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+ /* general PGRAPH state */
+ cp_name(ctx, cp_swap_state);
+ cp_pos (ctx, 0x00020/4);
+ nv40_graph_construct_general(ctx);
+ cp_wait(ctx, STATUS, IDLE);
+
+ /* 3D state, block 1 */
+ cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
+ nv40_graph_construct_state3d(ctx);
+ cp_wait(ctx, STATUS, IDLE);
+
+ /* 3D state, block 2 */
+ nv40_graph_construct_state3d_2(ctx);
+
+ /* Some other block of "random" state */
+ nv40_graph_construct_state3d_3(ctx);
+
+ /* Per-vertex shader state */
+ cp_pos (ctx, ctx->ctxvals_pos);
+ nv40_graph_construct_shader(ctx);
+
+ /* pre-exit state updates */
+ cp_name(ctx, cp_prepare_exit);
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+ cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+ cp_name(ctx, cp_exit);
+ cp_set (ctx, USER_SAVE, NOT_PENDING);
+ cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_out (ctx, CP_END);
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
new file mode 100644
index 00000000000..2a3495e848e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -0,0 +1,38 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_mc_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ /* Power up everything, resetting each individual unit will
+ * be done later if needed.
+ */
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x46: /* G72 */
+ case 0x4e:
+ case 0x4c: /* C51_G7X */
+ tmp = nv_rd32(dev, NV40_PFB_020C);
+ nv_wr32(dev, NV40_PMC_1700, tmp);
+ nv_wr32(dev, NV40_PMC_1704, 0);
+ nv_wr32(dev, NV40_PMC_1708, 0);
+ nv_wr32(dev, NV40_PMC_170C, tmp);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv40_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
new file mode 100644
index 00000000000..118d3285fd8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -0,0 +1,769 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_mode.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+#include "nouveau_fb.h"
+#include "nouveau_connector.h"
+#include "nv50_display.h"
+
+static void
+nv50_crtc_lut_load(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+ int i;
+
+ NV_DEBUG_KMS(crtc->dev, "\n");
+
+ for (i = 0; i < 256; i++) {
+ writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
+ writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
+ writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
+ }
+
+ if (nv_crtc->lut.depth == 30) {
+ writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
+ writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
+ writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
+ }
+}
+
+int
+nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ int index = nv_crtc->index, ret;
+
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked");
+
+ if (blanked) {
+ nv_crtc->cursor.hide(nv_crtc, false);
+
+ ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5);
+ if (ret) {
+ NV_ERROR(dev, "no space while blanking crtc\n");
+ return ret;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
+ OUT_RING(evo, 0);
+ if (dev_priv->chipset != 0x50) {
+ BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
+ } else {
+ if (nv_crtc->cursor.visible)
+ nv_crtc->cursor.show(nv_crtc, false);
+ else
+ nv_crtc->cursor.hide(nv_crtc, false);
+
+ ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8);
+ if (ret) {
+ NV_ERROR(dev, "no space while unblanking crtc\n");
+ return ret;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
+ OUT_RING(evo, nv_crtc->lut.depth == 8 ?
+ NV50_EVO_CRTC_CLUT_MODE_OFF :
+ NV50_EVO_CRTC_CLUT_MODE_ON);
+ OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
+ PAGE_SHIFT) >> 8);
+ if (dev_priv->chipset != 0x50) {
+ BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
+ OUT_RING(evo, NvEvoVRAM);
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
+ OUT_RING(evo, nv_crtc->fb.offset >> 8);
+ OUT_RING(evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
+ if (dev_priv->chipset != 0x50)
+ if (nv_crtc->fb.tile_flags == 0x7a00)
+ OUT_RING(evo, NvEvoFB32);
+ else
+ if (nv_crtc->fb.tile_flags == 0x7000)
+ OUT_RING(evo, NvEvoFB16);
+ else
+ OUT_RING(evo, NvEvoVRAM);
+ else
+ OUT_RING(evo, NvEvoVRAM);
+ }
+
+ nv_crtc->fb.blanked = blanked;
+ return 0;
+}
+
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+ if (ret) {
+ NV_ERROR(dev, "no space while setting dither\n");
+ return ret;
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
+ if (on)
+ OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
+ else
+ OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ }
+
+ return 0;
+}
+
+struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+ /* The safest approach is to find an encoder with the right crtc, that
+ * is also linked to a connector. */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder)
+ if (connector->encoder->crtc == crtc)
+ return nouveau_connector(connector);
+ }
+
+ return NULL;
+}
+
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
+{
+ struct nouveau_connector *nv_connector =
+ nouveau_crtc_connector_get(nv_crtc);
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_display_mode *native_mode = NULL;
+ struct drm_display_mode *mode = &nv_crtc->base.mode;
+ uint32_t outX, outY, horiz, vert;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ switch (scaling_mode) {
+ case DRM_MODE_SCALE_NONE:
+ break;
+ default:
+ if (!nv_connector || !nv_connector->native_mode) {
+ NV_ERROR(dev, "No native mode, forcing panel scaling\n");
+ scaling_mode = DRM_MODE_SCALE_NONE;
+ } else {
+ native_mode = nv_connector->native_mode;
+ }
+ break;
+ }
+
+ switch (scaling_mode) {
+ case DRM_MODE_SCALE_ASPECT:
+ horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
+ vert = (native_mode->vdisplay << 19) / mode->vdisplay;
+
+ if (vert > horiz) {
+ outX = (mode->hdisplay * horiz) >> 19;
+ outY = (mode->vdisplay * horiz) >> 19;
+ } else {
+ outX = (mode->hdisplay * vert) >> 19;
+ outY = (mode->vdisplay * vert) >> 19;
+ }
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ outX = native_mode->hdisplay;
+ outY = native_mode->vdisplay;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ case DRM_MODE_SCALE_NONE:
+ default:
+ outX = mode->hdisplay;
+ outY = mode->vdisplay;
+ break;
+ }
+
+ ret = RING_SPACE(evo, update ? 7 : 5);
+ if (ret)
+ return ret;
+
+ /* Got a better name for SCALER_ACTIVE? */
+ /* One day i've got to really figure out why this is needed. */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
+ if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ mode->hdisplay != outX || mode->vdisplay != outY) {
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
+ } else {
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
+ OUT_RING(evo, outY << 16 | outX);
+ OUT_RING(evo, outY << 16 | outX);
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ }
+
+ return 0;
+}
+
+int
+nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
+{
+ uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ struct nouveau_pll_vals pll;
+ struct pll_lims limits;
+ uint32_t reg1, reg2;
+ int ret;
+
+ ret = get_pll_limits(dev, pll_reg, &limits);
+ if (ret)
+ return ret;
+
+ ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
+ if (ret <= 0)
+ return ret;
+
+ if (limits.vco2.maxfreq) {
+ reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
+ reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
+ nv_wr32(dev, pll_reg, 0x10000611);
+ nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
+ nv_wr32(dev, pll_reg + 8,
+ reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
+ } else {
+ reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
+ nv_wr32(dev, pll_reg, 0x50000610);
+ nv_wr32(dev, pll_reg + 4, reg1 |
+ (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
+ }
+
+ return 0;
+}
+
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ if (!crtc)
+ return;
+
+ drm_crtc_cleanup(&nv_crtc->base);
+
+ nv50_cursor_fini(nv_crtc);
+
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ kfree(nv_crtc->mode);
+ kfree(nv_crtc);
+}
+
+int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t buffer_handle, uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_bo *cursor = NULL;
+ struct drm_gem_object *gem;
+ int ret = 0, i;
+
+ if (width != 64 || height != 64)
+ return -EINVAL;
+
+ if (!buffer_handle) {
+ nv_crtc->cursor.hide(nv_crtc, true);
+ return 0;
+ }
+
+ gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+ if (!gem)
+ return -EINVAL;
+ cursor = nouveau_gem_object(gem);
+
+ ret = nouveau_bo_map(cursor);
+ if (ret)
+ goto out;
+
+ /* The simple will do for now. */
+ for (i = 0; i < 64 * 64; i++)
+ nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
+
+ nouveau_bo_unmap(cursor);
+
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
+ dev_priv->vm_vram_base);
+ nv_crtc->cursor.show(nv_crtc, true);
+
+out:
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(gem);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->cursor.set_pos(nv_crtc, x, y);
+ return 0;
+}
+
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t size)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int i;
+
+ if (size != 256)
+ return;
+
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = r[i];
+ nv_crtc->lut.g[i] = g[i];
+ nv_crtc->lut.b[i] = b[i];
+ }
+
+ /* We need to know the depth before we upload, but it's possible to
+ * get called before a framebuffer is bound. If this is the case,
+ * mark the lut values as dirty by setting depth==0, and it'll be
+ * uploaded on the first mode_set_base()
+ */
+ if (!nv_crtc->base.fb) {
+ nv_crtc->lut.depth = 0;
+ return;
+ }
+
+ nv50_crtc_lut_load(crtc);
+}
+
+static void
+nv50_crtc_save(struct drm_crtc *crtc)
+{
+ NV_ERROR(crtc->dev, "!!\n");
+}
+
+static void
+nv50_crtc_restore(struct drm_crtc *crtc)
+{
+ NV_ERROR(crtc->dev, "!!\n");
+}
+
+static const struct drm_crtc_funcs nv50_crtc_funcs = {
+ .save = nv50_crtc_save,
+ .restore = nv50_crtc_restore,
+ .cursor_set = nv50_crtc_cursor_set,
+ .cursor_move = nv50_crtc_cursor_move,
+ .gamma_set = nv50_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = nv50_crtc_destroy,
+};
+
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+ /* Disconnect all unused encoders. */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (drm_helper_encoder_in_use(encoder))
+ continue;
+
+ nv_encoder->disconnect(nv_encoder);
+ }
+
+ nv50_crtc_blank(nv_crtc, true);
+}
+
+static void
+nv50_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc *crtc2;
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+ nv50_crtc_blank(nv_crtc, false);
+
+ /* Explicitly blank all unused crtc's. */
+ list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
+ if (!drm_helper_crtc_in_use(crtc2))
+ nv50_crtc_blank(nouveau_crtc(crtc2), true);
+ }
+
+ ret = RING_SPACE(evo, 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while committing crtc\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb, bool update)
+{
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
+ struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ int ret, format;
+
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+ switch (drm_fb->depth) {
+ case 8:
+ format = NV50_EVO_CRTC_FB_DEPTH_8;
+ break;
+ case 15:
+ format = NV50_EVO_CRTC_FB_DEPTH_15;
+ break;
+ case 16:
+ format = NV50_EVO_CRTC_FB_DEPTH_16;
+ break;
+ case 24:
+ case 32:
+ format = NV50_EVO_CRTC_FB_DEPTH_24;
+ break;
+ case 30:
+ format = NV50_EVO_CRTC_FB_DEPTH_30;
+ break;
+ default:
+ NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
+ return -EINVAL;
+ }
+
+ ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ if (old_fb) {
+ struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
+ nouveau_bo_unpin(ofb->nvbo);
+ }
+
+ nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+ nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
+ nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
+ if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
+ ret = RING_SPACE(evo, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
+ if (nv_crtc->fb.tile_flags == 0x7a00)
+ OUT_RING(evo, NvEvoFB32);
+ else
+ if (nv_crtc->fb.tile_flags == 0x7000)
+ OUT_RING(evo, NvEvoFB16);
+ else
+ OUT_RING(evo, NvEvoVRAM);
+ }
+
+ ret = RING_SPACE(evo, 12);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
+ OUT_RING(evo, nv_crtc->fb.offset >> 8);
+ OUT_RING(evo, 0);
+ OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
+ if (!nv_crtc->fb.tile_flags) {
+ OUT_RING(evo, drm_fb->pitch | (1 << 20));
+ } else {
+ OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
+ fb->nvbo->tile_mode);
+ }
+ if (dev_priv->chipset == 0x50)
+ OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
+ else
+ OUT_RING(evo, format);
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
+ OUT_RING(evo, fb->base.depth == 8 ?
+ NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
+ OUT_RING(evo, (y << 16) | x);
+
+ if (nv_crtc->lut.depth != fb->base.depth) {
+ nv_crtc->lut.depth = fb->base.depth;
+ nv50_crtc_lut_load(crtc);
+ }
+
+ if (update) {
+ ret = RING_SPACE(evo, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ }
+
+ return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nouveau_connector *nv_connector = NULL;
+ uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
+ uint32_t hunk1, vunk1, vunk2a, vunk2b;
+ int ret;
+
+ /* Find the connector attached to this CRTC */
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+
+ *nv_crtc->mode = *adjusted_mode;
+
+ NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+
+ hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+ vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+ hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
+ vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+ /* I can't give this a proper name, anyone else can? */
+ hunk1 = adjusted_mode->htotal -
+ adjusted_mode->hsync_start + adjusted_mode->hdisplay;
+ vunk1 = adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+ /* Another strange value, this time only for interlaced adjusted_modes. */
+ vunk2a = 2 * adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+ vunk2b = adjusted_mode->vtotal -
+ adjusted_mode->vsync_start + adjusted_mode->vtotal;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vsync_dur /= 2;
+ vsync_start_to_end /= 2;
+ vunk1 /= 2;
+ vunk2a /= 2;
+ vunk2b /= 2;
+ /* magic */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ vsync_start_to_end -= 1;
+ vunk1 -= 1;
+ vunk2a -= 1;
+ vunk2b -= 1;
+ }
+ }
+
+ ret = RING_SPACE(evo, 17);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
+ OUT_RING(evo, adjusted_mode->clock | 0x800000);
+ OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
+ OUT_RING(evo, 0);
+ OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
+ OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
+ OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
+ (hsync_start_to_end - 1));
+ OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
+ OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
+ } else {
+ OUT_RING(evo, 0);
+ OUT_RING(evo, 0);
+ }
+
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
+ OUT_RING(evo, 0);
+
+ /* This is the actual resolution of the mode. */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
+ OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
+
+ nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
+ nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
+
+ return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
+ .dpms = nv50_crtc_dpms,
+ .prepare = nv50_crtc_prepare,
+ .commit = nv50_crtc_commit,
+ .mode_fixup = nv50_crtc_mode_fixup,
+ .mode_set = nv50_crtc_mode_set,
+ .mode_set_base = nv50_crtc_mode_set_base,
+ .load_lut = nv50_crtc_lut_load,
+};
+
+int
+nv50_crtc_create(struct drm_device *dev, int index)
+{
+ struct nouveau_crtc *nv_crtc = NULL;
+ int ret, i;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
+ if (!nv_crtc)
+ return -ENOMEM;
+
+ nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
+ if (!nv_crtc->mode) {
+ kfree(nv_crtc);
+ return -ENOMEM;
+ }
+
+ /* Default CLUT parameters, will be activated on the hw upon
+ * first mode set.
+ */
+ for (i = 0; i < 256; i++) {
+ nv_crtc->lut.r[i] = i << 8;
+ nv_crtc->lut.g[i] = i << 8;
+ nv_crtc->lut.b[i] = i << 8;
+ }
+ nv_crtc->lut.depth = 0;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->lut.nvbo);
+ if (ret)
+ nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ }
+
+ if (ret) {
+ kfree(nv_crtc->mode);
+ kfree(nv_crtc);
+ return ret;
+ }
+
+ nv_crtc->index = index;
+
+ /* set function pointers */
+ nv_crtc->set_dither = nv50_crtc_set_dither;
+ nv_crtc->set_scale = nv50_crtc_set_scale;
+
+ drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
+ drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
+
+ ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ if (!ret) {
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+ }
+
+ nv50_cursor_init(nv_crtc);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
new file mode 100644
index 00000000000..753e723adb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_mode.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_device *dev = nv_crtc->base.dev;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ if (update && nv_crtc->cursor.visible)
+ return;
+
+ ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while unhiding cursor\n");
+ return;
+ }
+
+ if (dev_priv->chipset != 0x50) {
+ BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ OUT_RING(evo, NvEvoVRAM);
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
+ OUT_RING(evo, nv_crtc->cursor.offset >> 8);
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ nv_crtc->cursor.visible = true;
+ }
+}
+
+static void
+nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
+{
+ struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_device *dev = nv_crtc->base.dev;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ if (update && !nv_crtc->cursor.visible)
+ return;
+
+ ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while hiding cursor\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
+ OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
+ OUT_RING(evo, 0);
+ if (dev_priv->chipset != 0x50) {
+ BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
+ OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
+ }
+
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ nv_crtc->cursor.visible = false;
+ }
+}
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
+ ((y & 0xFFFF) << 16) | (x & 0xFFFF));
+ /* Needed to make the cursor move. */
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+ NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
+ if (offset == nv_crtc->cursor.offset)
+ return;
+
+ nv_crtc->cursor.offset = offset;
+ if (nv_crtc->cursor.visible) {
+ nv_crtc->cursor.visible = false;
+ nv_crtc->cursor.show(nv_crtc, true);
+ }
+}
+
+int
+nv50_cursor_init(struct nouveau_crtc *nv_crtc)
+{
+ nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
+ nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
+ nv_crtc->cursor.hide = nv50_cursor_hide;
+ nv_crtc->cursor.show = nv50_cursor_show;
+ return 0;
+}
+
+void
+nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
+{
+ struct drm_device *dev = nv_crtc->base.dev;
+ int idx = nv_crtc->index;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
+ if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
+ NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+ NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
new file mode 100644
index 00000000000..f08f042a8e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
+{
+ struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
+
+ ret = RING_SPACE(evo, 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while disconnecting DAC\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
+ OUT_RING(evo, 0);
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ enum drm_connector_status status = connector_status_disconnected;
+ uint32_t dpms_state, load_pattern, load_state;
+ int or = nv_encoder->or;
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
+ dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+ if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
+ NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
+ NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
+ nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
+ return status;
+ }
+
+ /* Use bios provided value if possible. */
+ if (dev_priv->vbios->dactestval) {
+ load_pattern = dev_priv->vbios->dactestval;
+ NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
+ load_pattern);
+ } else {
+ load_pattern = 340;
+ NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n",
+ load_pattern);
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
+ NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
+ mdelay(45); /* give it some time to process */
+ load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
+ nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+
+ if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
+ NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
+ status = connector_status_connected;
+
+ if (status == connector_status_connected)
+ NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or);
+ else
+ NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or);
+
+ return status;
+}
+
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t val;
+ int or = nv_encoder->or;
+
+ NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
+
+ /* wait for it to be done */
+ if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
+ NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
+ NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
+ nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
+ return;
+ }
+
+ val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_STANDBY:
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
+ val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
+ break;
+ default:
+ break;
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+}
+
+static void
+nv50_dac_save(struct drm_encoder *encoder)
+{
+ NV_ERROR(encoder->dev, "!!\n");
+}
+
+static void
+nv50_dac_restore(struct drm_encoder *encoder)
+{
+ NV_ERROR(encoder->dev, "!!\n");
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *connector;
+
+ NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
+
+ connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!connector) {
+ NV_ERROR(encoder->dev, "Encoder has no connector\n");
+ return false;
+ }
+
+ if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
+ connector->native_mode) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+
+ return true;
+}
+
+static void
+nv50_dac_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
+ uint32_t mode_ctl = 0, mode_ctl2 = 0;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
+
+ nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ if (crtc->index == 1)
+ mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
+ else
+ mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
+
+ /* Lacking a working tv-out, this is not a 100% sure. */
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG)
+ mode_ctl |= 0x40;
+ else
+ if (nv_encoder->dcb->type == OUTPUT_TV)
+ mode_ctl |= 0x100;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
+
+ ret = RING_SPACE(evo, 3);
+ if (ret) {
+ NV_ERROR(dev, "no space while connecting DAC\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
+ OUT_RING(evo, mode_ctl);
+ OUT_RING(evo, mode_ctl2);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
+ .dpms = nv50_dac_dpms,
+ .save = nv50_dac_save,
+ .restore = nv50_dac_restore,
+ .mode_fixup = nv50_dac_mode_fixup,
+ .prepare = nv50_dac_prepare,
+ .commit = nv50_dac_commit,
+ .mode_set = nv50_dac_mode_set,
+ .detect = nv50_dac_detect
+};
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (!encoder)
+ return;
+
+ NV_DEBUG_KMS(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+ kfree(nv_encoder);
+}
+
+static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
+ .destroy = nv50_dac_destroy,
+};
+
+int
+nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ struct nouveau_encoder *nv_encoder;
+ struct drm_encoder *encoder;
+
+ NV_DEBUG_KMS(dev, "\n");
+ NV_INFO(dev, "Detected a DAC output\n");
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ encoder = to_drm_encoder(nv_encoder);
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ nv_encoder->disconnect = nv50_dac_disconnect;
+
+ drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
new file mode 100644
index 00000000000..a9263d92a23
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "nv50_display.h"
+#include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_fb.h"
+#include "drm_crtc_helper.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pchan)
+{
+ struct nouveau_channel *chan = *pchan;
+
+ if (!chan)
+ return;
+ *pchan = NULL;
+
+ nouveau_gpuobj_channel_takedown(chan);
+ nouveau_bo_ref(NULL, &chan->pushbuf_bo);
+
+ if (chan->user)
+ iounmap(chan->user);
+
+ kfree(chan);
+}
+
+static int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
+ uint32_t tile_flags, uint32_t magic_flags,
+ uint32_t offset, uint32_t limit)
+{
+ struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+ struct drm_device *dev = evo->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
+ if (ret)
+ return ret;
+ obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+ ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
+ if (ret) {
+ nouveau_gpuobj_del(dev, &obj);
+ return ret;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
+ nv_wo32(dev, obj, 1, limit);
+ nv_wo32(dev, obj, 2, offset);
+ nv_wo32(dev, obj, 3, 0x00000000);
+ nv_wo32(dev, obj, 4, 0x00000000);
+ nv_wo32(dev, obj, 5, 0x00010000);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ *pchan = chan;
+
+ chan->id = -1;
+ chan->dev = dev;
+ chan->user_get = 4;
+ chan->user_put = 0;
+
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
+ if (ret) {
+ NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
+ im_pramin->start, 32768);
+ if (ret) {
+ NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
+ 0, &chan->ramht);
+ if (ret) {
+ NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ if (dev_priv->chipset != 0x50) {
+ ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
+ 0, 0xffffffff);
+ if (ret) {
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+
+ ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
+ 0, 0xffffffff);
+ if (ret) {
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+ }
+
+ ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
+ 0, nouveau_mem_fb_amount(dev));
+ if (ret) {
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ false, true, &chan->pushbuf_bo);
+ if (ret == 0)
+ ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ ret = nouveau_bo_map(chan->pushbuf_bo);
+ if (ret) {
+ NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+ nv50_evo_channel_del(pchan);
+ return ret;
+ }
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_PDISPLAY_USER(0), PAGE_SIZE);
+ if (!chan->user) {
+ NV_ERROR(dev, "Error mapping EVO control regs.\n");
+ nv50_evo_channel_del(pchan);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct drm_connector *connector;
+ uint32_t val, ram_amount, hpd_en[2];
+ uint64_t start;
+ int ret, i;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+ /*
+ * I think the 0x006101XX range is some kind of main control area
+ * that enables things.
+ */
+ /* CRTC? */
+ for (i = 0; i < 2; i++) {
+ val = nv_rd32(dev, 0x00616100 + (i * 0x800));
+ nv_wr32(dev, 0x00610190 + (i * 0x10), val);
+ val = nv_rd32(dev, 0x00616104 + (i * 0x800));
+ nv_wr32(dev, 0x00610194 + (i * 0x10), val);
+ val = nv_rd32(dev, 0x00616108 + (i * 0x800));
+ nv_wr32(dev, 0x00610198 + (i * 0x10), val);
+ val = nv_rd32(dev, 0x0061610c + (i * 0x800));
+ nv_wr32(dev, 0x0061019c + (i * 0x10), val);
+ }
+ /* DAC */
+ for (i = 0; i < 3; i++) {
+ val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
+ nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
+ }
+ /* SOR */
+ for (i = 0; i < 4; i++) {
+ val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
+ nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
+ }
+ /* Something not yet in use, tv-out maybe. */
+ for (i = 0; i < 3; i++) {
+ val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
+ nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
+ }
+
+ for (i = 0; i < 3; i++) {
+ nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
+ NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
+ nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
+ }
+
+ /* This used to be in crtc unblank, but seems out of place there. */
+ nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
+ /* RAM is clamped to 256 MiB. */
+ ram_amount = nouveau_mem_fb_amount(dev);
+ NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
+ if (ram_amount > 256*1024*1024)
+ ram_amount = 256*1024*1024;
+ nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
+ nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
+ nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
+
+ /* The precise purpose is unknown, i suspect it has something to do
+ * with text mode.
+ */
+ if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
+ nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
+ if (!nv_wait(0x006194e8, 2, 0)) {
+ NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
+ NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
+ nv_rd32(dev, 0x6194e8));
+ return -EBUSY;
+ }
+ }
+
+ /* taken from nv bug #12637, attempts to un-wedge the hw if it's
+ * stuck in some unspecified state
+ */
+ start = ptimer->read(dev);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
+ while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
+ if ((val & 0x9f0000) == 0x20000)
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+ val | 0x800000);
+
+ if ((val & 0x3f0000) == 0x30000)
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+ val | 0x200000);
+
+ if (ptimer->read(dev) - start > 1000000000ULL) {
+ NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
+ NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
+ if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
+ NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
+ NV_ERROR(dev, "0x610200 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
+ return -EBUSY;
+ }
+
+ for (i = 0; i < 2; i++) {
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
+ if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
+ NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
+ NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
+ if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
+ NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
+ NV_ERROR(dev, "timeout: "
+ "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
+ NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
+ nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
+
+ /* initialise fifo */
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
+ ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
+ NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
+ NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
+ if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
+ NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
+ return -EBUSY;
+ }
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
+ (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
+ NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
+ nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
+ NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
+ nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
+
+ evo->dma.max = (4096/4) - 2;
+ evo->dma.put = 0;
+ evo->dma.cur = evo->dma.put;
+ evo->dma.free = evo->dma.max - evo->dma.cur;
+
+ ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(evo, 0);
+
+ ret = RING_SPACE(evo, 11);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
+ OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+ OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
+ OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
+ OUT_RING(evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
+ OUT_RING(evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
+ OUT_RING(evo, 0);
+ FIRE_RING(evo);
+ if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
+ NV_ERROR(dev, "evo pushbuf stalled\n");
+
+ /* enable clock change interrupts. */
+ nv_wr32(dev, 0x610028, 0x00010001);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
+ NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
+ NV50_PDISPLAY_INTR_EN_CLK_UNK40));
+
+ /* enable hotplug interrupts */
+ hpd_en[0] = hpd_en[1] = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ struct dcb_gpio_entry *gpio;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
+ connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
+ connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
+ if (!gpio)
+ continue;
+
+ hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
+ }
+
+ nv_wr32(dev, 0xe054, 0xffffffff);
+ nv_wr32(dev, 0xe050, hpd_en[0]);
+ if (dev_priv->chipset >= 0x90) {
+ nv_wr32(dev, 0xe074, 0xffffffff);
+ nv_wr32(dev, 0xe070, hpd_en[1]);
+ }
+
+ return 0;
+}
+
+static int nv50_display_disable(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_crtc *drm_crtc;
+ int ret, i;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+
+ nv50_crtc_blank(crtc, true);
+ }
+
+ ret = RING_SPACE(dev_priv->evo, 2);
+ if (ret == 0) {
+ BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(dev_priv->evo, 0);
+ }
+ FIRE_RING(dev_priv->evo);
+
+ /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
+ * cleaning up?
+ */
+ list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+ uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+
+ if (!crtc->base.enabled)
+ continue;
+
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
+ if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
+ NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
+ "0x%08x\n", mask, mask);
+ NV_ERROR(dev, "0x610024 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_INTR_1));
+ }
+ }
+
+ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
+ nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
+ if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
+ NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
+ NV_ERROR(dev, "0x610200 = 0x%08x\n",
+ nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
+ NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
+ NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
+ NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
+ nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
+ }
+ }
+
+ /* disable interrupts. */
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+
+ /* disable hotplug interrupts */
+ nv_wr32(dev, 0xe054, 0xffffffff);
+ nv_wr32(dev, 0xe050, 0x00000000);
+ if (dev_priv->chipset >= 0x90) {
+ nv_wr32(dev, 0xe074, 0xffffffff);
+ nv_wr32(dev, 0xe070, 0x00000000);
+ }
+ return 0;
+}
+
+int nv50_display_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+ uint32_t connector[16] = {};
+ int ret, i;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ /* init basic kernel modesetting */
+ drm_mode_config_init(dev);
+
+ /* Initialise some optional connector properties. */
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dithering_property(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+
+ dev->mode_config.fb_base = dev_priv->fb_phys;
+
+ /* Create EVO channel */
+ ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ if (ret) {
+ NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
+ return ret;
+ }
+
+ /* Create CRTC objects */
+ for (i = 0; i < 2; i++)
+ nv50_crtc_create(dev, i);
+
+ /* We setup the encoders from the BIOS table */
+ for (i = 0 ; i < dcb->entries; i++) {
+ struct dcb_entry *entry = &dcb->entry[i];
+
+ if (entry->location != DCB_LOC_ON_CHIP) {
+ NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
+ entry->type, ffs(entry->or) - 1);
+ continue;
+ }
+
+ switch (entry->type) {
+ case OUTPUT_TMDS:
+ case OUTPUT_LVDS:
+ case OUTPUT_DP:
+ nv50_sor_create(dev, entry);
+ break;
+ case OUTPUT_ANALOG:
+ nv50_dac_create(dev, entry);
+ break;
+ default:
+ NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
+ continue;
+ }
+
+ connector[entry->connector] |= (1 << entry->type);
+ }
+
+ /* It appears that DCB 3.0+ VBIOS has a connector table, however,
+ * I'm not 100% certain how to decode it correctly yet so just
+ * look at what encoders are present on each connector index and
+ * attempt to derive the connector type from that.
+ */
+ for (i = 0 ; i < dcb->entries; i++) {
+ struct dcb_entry *entry = &dcb->entry[i];
+ uint16_t encoders;
+ int type;
+
+ encoders = connector[entry->connector];
+ if (!(encoders & (1 << entry->type)))
+ continue;
+ connector[entry->connector] = 0;
+
+ if (encoders & (1 << OUTPUT_DP)) {
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ } else if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ type = DRM_MODE_CONNECTOR_DVII;
+ else
+ type = DRM_MODE_CONNECTOR_DVID;
+ } else if (encoders & (1 << OUTPUT_ANALOG)) {
+ type = DRM_MODE_CONNECTOR_VGA;
+ } else if (encoders & (1 << OUTPUT_LVDS)) {
+ type = DRM_MODE_CONNECTOR_LVDS;
+ } else {
+ type = DRM_MODE_CONNECTOR_Unknown;
+ }
+
+ if (type == DRM_MODE_CONNECTOR_Unknown)
+ continue;
+
+ nouveau_connector_create(dev, entry->connector, type);
+ }
+
+ ret = nv50_display_init(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int nv50_display_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ drm_mode_config_cleanup(dev);
+
+ nv50_display_disable(dev);
+ nv50_evo_channel_del(&dev_priv->evo);
+
+ return 0;
+}
+
+static inline uint32_t
+nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t mc;
+
+ if (sor) {
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
+ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
+ else
+ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
+ } else {
+ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
+ }
+
+ return mc;
+}
+
+static int
+nv50_display_irq_head(struct drm_device *dev, int *phead,
+ struct dcb_entry **pdcbent)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
+ uint32_t dac = 0, sor = 0;
+ int head, i, or = 0, type = OUTPUT_ANY;
+
+ /* We're assuming that head 0 *or* head 1 will be active here,
+ * and not both. I'm not sure if the hw will even signal both
+ * ever, but it definitely shouldn't for us as we commit each
+ * CRTC separately, and submission will be blocked by the GPU
+ * until we handle each in turn.
+ */
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+ head = ffs((unk30 >> 9) & 3) - 1;
+ if (head < 0)
+ return -EINVAL;
+
+ /* This assumes CRTCs are never bound to multiple encoders, which
+ * should be the case.
+ */
+ for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
+ uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
+ if (!(mc & (1 << head)))
+ continue;
+
+ switch ((mc >> 8) & 0xf) {
+ case 0: type = OUTPUT_ANALOG; break;
+ case 1: type = OUTPUT_TV; break;
+ default:
+ NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
+ return -1;
+ }
+
+ or = i;
+ }
+
+ for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
+ uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
+ if (!(mc & (1 << head)))
+ continue;
+
+ switch ((mc >> 8) & 0xf) {
+ case 0: type = OUTPUT_LVDS; break;
+ case 1: type = OUTPUT_TMDS; break;
+ case 2: type = OUTPUT_TMDS; break;
+ case 5: type = OUTPUT_TMDS; break;
+ case 8: type = OUTPUT_DP; break;
+ case 9: type = OUTPUT_DP; break;
+ default:
+ NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
+ return -1;
+ }
+
+ or = i;
+ }
+
+ NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
+ if (type == OUTPUT_ANY) {
+ NV_ERROR(dev, "unknown encoder!!\n");
+ return -1;
+ }
+
+ for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
+ struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
+
+ if (dcbent->type != type)
+ continue;
+
+ if (!(dcbent->or & (1 << or)))
+ continue;
+
+ *phead = head;
+ *pdcbent = dcbent;
+ return 0;
+ }
+
+ NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
+ return 0;
+}
+
+static uint32_t
+nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
+ int pxclk)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->VBIOS;
+ uint32_t mc, script = 0, or;
+
+ or = ffs(dcbent->or) - 1;
+ mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
+ switch (dcbent->type) {
+ case OUTPUT_LVDS:
+ script = (mc >> 8) & 0xf;
+ if (bios->pub.fp_no_ddc) {
+ if (bios->fp.dual_link)
+ script |= 0x0100;
+ if (bios->fp.if_is_24bit)
+ script |= 0x0200;
+ } else {
+ if (pxclk >= bios->fp.duallink_transition_clk) {
+ script |= 0x0100;
+ if (bios->fp.strapless_is_24bit & 2)
+ script |= 0x0200;
+ } else
+ if (bios->fp.strapless_is_24bit & 1)
+ script |= 0x0200;
+ }
+
+ if (nouveau_uscript_lvds >= 0) {
+ NV_INFO(dev, "override script 0x%04x with 0x%04x "
+ "for output LVDS-%d\n", script,
+ nouveau_uscript_lvds, or);
+ script = nouveau_uscript_lvds;
+ }
+ break;
+ case OUTPUT_TMDS:
+ script = (mc >> 8) & 0xf;
+ if (pxclk >= 165000)
+ script |= 0x0100;
+
+ if (nouveau_uscript_tmds >= 0) {
+ NV_INFO(dev, "override script 0x%04x with 0x%04x "
+ "for output TMDS-%d\n", script,
+ nouveau_uscript_tmds, or);
+ script = nouveau_uscript_tmds;
+ }
+ break;
+ case OUTPUT_DP:
+ script = (mc >> 8) & 0xf;
+ break;
+ case OUTPUT_ANALOG:
+ script = 0xff;
+ break;
+ default:
+ NV_ERROR(dev, "modeset on unsupported output type!\n");
+ break;
+ }
+
+ return script;
+}
+
+static void
+nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
+ chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+
+ nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
+ chan->nvsw.vblsem_rval);
+ list_del(&chan->nvsw.vbl_wait);
+ }
+}
+
+static void
+nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
+{
+ intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
+
+ if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
+ nv50_display_vblank_crtc_handler(dev, 0);
+
+ if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
+ nv50_display_vblank_crtc_handler(dev, 1);
+
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
+ NV50_PDISPLAY_INTR_EN) & ~intr);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+}
+
+static void
+nv50_display_unk10_handler(struct drm_device *dev)
+{
+ struct dcb_entry *dcbent;
+ int head, ret;
+
+ ret = nv50_display_irq_head(dev, &head, &dcbent);
+ if (ret)
+ goto ack;
+
+ nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
+
+ nouveau_bios_run_display_table(dev, dcbent, 0, -1);
+
+ack:
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
+ nv_wr32(dev, 0x610030, 0x80000000);
+}
+
+static void
+nv50_display_unk20_handler(struct drm_device *dev)
+{
+ struct dcb_entry *dcbent;
+ uint32_t tmp, pclk, script;
+ int head, or, ret;
+
+ ret = nv50_display_irq_head(dev, &head, &dcbent);
+ if (ret)
+ goto ack;
+ or = ffs(dcbent->or) - 1;
+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
+ script = nv50_display_script_select(dev, dcbent, pclk);
+
+ NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
+
+ if (dcbent->type != OUTPUT_DP)
+ nouveau_bios_run_display_table(dev, dcbent, 0, -2);
+
+ nv50_crtc_set_clock(dev, head, pclk);
+
+ nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
+ tmp &= ~0x000000f;
+ nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
+
+ if (dcbent->type != OUTPUT_ANALOG) {
+ tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
+ tmp &= ~0x00000f0f;
+ if (script & 0x0100)
+ tmp |= 0x00000101;
+ nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
+ } else {
+ nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
+ }
+
+ack:
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
+ nv_wr32(dev, 0x610030, 0x80000000);
+}
+
+static void
+nv50_display_unk40_handler(struct drm_device *dev)
+{
+ struct dcb_entry *dcbent;
+ int head, pclk, script, ret;
+
+ ret = nv50_display_irq_head(dev, &head, &dcbent);
+ if (ret)
+ goto ack;
+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
+ script = nv50_display_script_select(dev, dcbent, pclk);
+
+ nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
+
+ack:
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
+ nv_wr32(dev, 0x610030, 0x80000000);
+ nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
+}
+
+void
+nv50_display_irq_handler_bh(struct work_struct *work)
+{
+ struct drm_nouveau_private *dev_priv =
+ container_of(work, struct drm_nouveau_private, irq_work);
+ struct drm_device *dev = dev_priv->dev;
+
+ for (;;) {
+ uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
+ uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
+
+ NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+
+ if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
+ nv50_display_unk10_handler(dev);
+ else
+ if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
+ nv50_display_unk20_handler(dev);
+ else
+ if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
+ nv50_display_unk40_handler(dev);
+ else
+ break;
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
+}
+
+static void
+nv50_display_error_handler(struct drm_device *dev)
+{
+ uint32_t addr, data;
+
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
+ addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
+ data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
+
+ NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
+ 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+
+ nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
+}
+
+static void
+nv50_display_irq_hotplug(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+ uint32_t unplug_mask, plug_mask, change_mask;
+ uint32_t hpd0, hpd1 = 0;
+
+ hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+ plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
+ unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
+ change_mask = plug_mask | unplug_mask;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct drm_encoder_helper_funcs *helper;
+ struct nouveau_connector *nv_connector =
+ nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder;
+ struct dcb_gpio_entry *gpio;
+ uint32_t reg;
+ bool plugged;
+
+ if (!nv_connector->dcb)
+ continue;
+
+ gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
+ if (!gpio || !(change_mask & (1 << gpio->line)))
+ continue;
+
+ reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
+ plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
+ NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+ drm_get_connector_name(connector)) ;
+
+ if (!connector->encoder || !connector->encoder->crtc ||
+ !connector->encoder->crtc->enabled)
+ continue;
+ nv_encoder = nouveau_encoder(connector->encoder);
+ helper = connector->encoder->helper_private;
+
+ if (nv_encoder->dcb->type != OUTPUT_DP)
+ continue;
+
+ if (plugged)
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+ else
+ helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+ }
+
+ nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
+}
+
+void
+nv50_display_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t delayed = 0;
+
+ while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG)
+ nv50_display_irq_hotplug(dev);
+
+ while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
+ uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
+ uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
+ uint32_t clock;
+
+ NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+
+ if (!intr0 && !(intr1 & ~delayed))
+ break;
+
+ if (intr0 & 0x00010000) {
+ nv50_display_error_handler(dev);
+ intr0 &= ~0x00010000;
+ }
+
+ if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
+ nv50_display_vblank_handler(dev, intr1);
+ intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
+ }
+
+ clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
+ NV50_PDISPLAY_INTR_1_CLK_UNK20 |
+ NV50_PDISPLAY_INTR_1_CLK_UNK40));
+ if (clock) {
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+ if (!work_pending(&dev_priv->irq_work))
+ queue_work(dev_priv->wq, &dev_priv->irq_work);
+ delayed |= clock;
+ intr1 &= ~clock;
+ }
+
+ if (intr0) {
+ NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
+ }
+
+ if (intr1) {
+ NV_ERROR(dev,
+ "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
new file mode 100644
index 00000000000..3ae8d0725f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NV50_DISPLAY_H__
+#define __NV50_DISPLAY_H__
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_reg.h"
+#include "nouveau_crtc.h"
+#include "nv50_evo.h"
+
+void nv50_display_irq_handler(struct drm_device *dev);
+void nv50_display_irq_handler_bh(struct work_struct *work);
+int nv50_display_init(struct drm_device *dev);
+int nv50_display_create(struct drm_device *dev);
+int nv50_display_destroy(struct drm_device *dev);
+int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
+int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+
+#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
new file mode 100644
index 00000000000..aae13343bce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define NV50_EVO_UPDATE 0x00000080
+#define NV50_EVO_UNK84 0x00000084
+#define NV50_EVO_UNK84_NOTIFY 0x40000000
+#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
+#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
+#define NV50_EVO_DMA_NOTIFY 0x00000088
+#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
+#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
+#define NV50_EVO_UNK8C 0x0000008C
+
+#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
+#define NV50_EVO_DAC_MODE_CTRL 0x00000400
+#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
+#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
+#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
+#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
+#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
+
+#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
+#define NV50_EVO_SOR_MODE_CTRL 0x00000600
+#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
+#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
+#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
+#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
+#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
+#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
+
+#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
+#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
+#define NV50_EVO_CRTC_UNK0800 0x00000800
+#define NV50_EVO_CRTC_CLOCK 0x00000804
+#define NV50_EVO_CRTC_INTERLACE 0x00000808
+#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
+#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
+#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
+#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
+#define NV50_EVO_CRTC_UNK0820 0x00000820
+#define NV50_EVO_CRTC_UNK0824 0x00000824
+#define NV50_EVO_CRTC_UNK082C 0x0000082c
+#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
+/* You can't have a palette in 8 bit mode (=OFF) */
+#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
+#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
+#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
+#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
+#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
+#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
+#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
+#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
+#define NV50_EVO_CRTC_FB_SIZE 0x00000868
+#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
+#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
+#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
+#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
+#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
+#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
+#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
+#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
+#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
+#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
+#define NV50_EVO_CRTC_FB_DMA 0x00000874
+#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
+#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
+#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
+#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
+#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
+#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
+#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
+#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
+#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
+#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
+#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
+#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
+#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
+#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
+#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
+#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
+#define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000
+#define NV50_EVO_CRTC_FB_POS 0x000008c0
+#define NV50_EVO_CRTC_REAL_RES 0x000008c8
+#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
+#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
+ ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
+/* Both of these are needed, otherwise nothing happens. */
+#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
+#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
new file mode 100644
index 00000000000..6bcc6d39e9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -0,0 +1,273 @@
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_fbcon.h"
+
+static void
+nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
+ RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_fillrect(info, rect);
+ return;
+ }
+
+ if (rect->rop != ROP_COPY) {
+ BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ OUT_RING(chan, 1);
+ }
+ BEGIN_RING(chan, NvSub2D, 0x0588, 1);
+ OUT_RING(chan, rect->color);
+ BEGIN_RING(chan, NvSub2D, 0x0600, 4);
+ OUT_RING(chan, rect->dx);
+ OUT_RING(chan, rect->dy);
+ OUT_RING(chan, rect->dx + rect->width);
+ OUT_RING(chan, rect->dy + rect->height);
+ if (rect->rop != ROP_COPY) {
+ BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ OUT_RING(chan, 3);
+ }
+ FIRE_RING(chan);
+}
+
+static void
+nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_copyarea(info, region);
+ return;
+ }
+
+ BEGIN_RING(chan, NvSub2D, 0x0110, 1);
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
+ OUT_RING(chan, region->dx);
+ OUT_RING(chan, region->dy);
+ OUT_RING(chan, region->width);
+ OUT_RING(chan, region->height);
+ BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, region->sx);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, region->sy);
+ FIRE_RING(chan);
+}
+
+static void
+nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ uint32_t width, dwords, *data = (uint32_t *)image->data;
+ uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ uint32_t *palette = info->pseudo_palette;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return;
+
+ if (image->depth != 1) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ width = (image->width + 31) & ~31;
+ dwords = (width * image->height) >> 5;
+
+ BEGIN_RING(chan, NvSub2D, 0x0814, 2);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ OUT_RING(chan, palette[image->bg_color] | mask);
+ OUT_RING(chan, palette[image->fg_color] | mask);
+ } else {
+ OUT_RING(chan, image->bg_color);
+ OUT_RING(chan, image->fg_color);
+ }
+ BEGIN_RING(chan, NvSub2D, 0x0838, 2);
+ OUT_RING(chan, image->width);
+ OUT_RING(chan, image->height);
+ BEGIN_RING(chan, NvSub2D, 0x0850, 4);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, image->dx);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, image->dy);
+
+ while (dwords) {
+ int push = dwords > 2047 ? 2047 : dwords;
+
+ if (RING_SPACE(chan, push + 1)) {
+ NV_ERROR(dev,
+ "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ dwords -= push;
+
+ BEGIN_RING(chan, NvSub2D, 0x40000860, push);
+ OUT_RINGp(chan, data, push);
+ data += push;
+ }
+
+ FIRE_RING(chan);
+}
+
+int
+nv50_fbcon_accel_init(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_gpuobj *eng2d = NULL;
+ int ret, format;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ format = 0xf3;
+ break;
+ case 15:
+ format = 0xf8;
+ break;
+ case 16:
+ format = 0xe8;
+ break;
+ case 32:
+ switch (info->var.transp.length) {
+ case 0: /* depth 24 */
+ case 8: /* depth 32, just use 24.. */
+ format = 0xe6;
+ break;
+ case 2: /* depth 30 */
+ format = 0xd1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
+ if (ret)
+ return ret;
+
+ ret = RING_SPACE(chan, 59);
+ if (ret) {
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ return ret;
+ }
+
+ BEGIN_RING(chan, NvSub2D, 0x0000, 1);
+ OUT_RING(chan, Nv2D);
+ BEGIN_RING(chan, NvSub2D, 0x0180, 4);
+ OUT_RING(chan, NvNotify0);
+ OUT_RING(chan, chan->vram_handle);
+ OUT_RING(chan, chan->vram_handle);
+ OUT_RING(chan, chan->vram_handle);
+ BEGIN_RING(chan, NvSub2D, 0x0290, 1);
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, NvSub2D, 0x0888, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
+ OUT_RING(chan, 3);
+ BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
+ OUT_RING(chan, 0x55);
+ BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0580, 2);
+ OUT_RING(chan, 4);
+ OUT_RING(chan, format);
+ BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
+ OUT_RING(chan, 2);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0804, 1);
+ OUT_RING(chan, format);
+ BEGIN_RING(chan, NvSub2D, 0x0800, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0808, 3);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 0);
+ BEGIN_RING(chan, NvSub2D, 0x081c, 1);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0840, 4);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0200, 2);
+ OUT_RING(chan, format);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0214, 5);
+ OUT_RING(chan, info->fix.line_length);
+ OUT_RING(chan, info->var.xres_virtual);
+ OUT_RING(chan, info->var.yres_virtual);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
+ dev_priv->vm_vram_base);
+ BEGIN_RING(chan, NvSub2D, 0x0230, 2);
+ OUT_RING(chan, format);
+ OUT_RING(chan, 1);
+ BEGIN_RING(chan, NvSub2D, 0x0244, 5);
+ OUT_RING(chan, info->fix.line_length);
+ OUT_RING(chan, info->var.xres_virtual);
+ OUT_RING(chan, info->var.yres_virtual);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
+ dev_priv->vm_vram_base);
+
+ info->fbops->fb_fillrect = nv50_fbcon_fillrect;
+ info->fbops->fb_copyarea = nv50_fbcon_copyarea;
+ info->fbops->fb_imageblit = nv50_fbcon_imageblit;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
new file mode 100644
index 00000000000..b7282284f08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -0,0 +1,494 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+struct nv50_fifo_priv {
+ struct nouveau_gpuobj_ref *thingo[2];
+ int cur_thingo;
+};
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
+static void
+nv50_fifo_init_thingo(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+ struct nouveau_gpuobj_ref *cur;
+ int i, nr;
+
+ NV_DEBUG(dev, "\n");
+
+ cur = priv->thingo[priv->cur_thingo];
+ priv->cur_thingo = !priv->cur_thingo;
+
+ /* We never schedule channel 0 or 127 */
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = 1, nr = 0; i < 127; i++) {
+ if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
+ nv_wo32(dev, cur->gpuobj, nr++, i);
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x32f4, cur->instance >> 12);
+ nv_wr32(dev, 0x32ec, nr);
+ nv_wr32(dev, 0x2500, 0x101);
+}
+
+static int
+nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[channel];
+ uint32_t inst;
+
+ NV_DEBUG(dev, "ch%d\n", channel);
+
+ if (!chan->ramfc)
+ return -EINVAL;
+
+ if (IS_G80)
+ inst = chan->ramfc->instance >> 12;
+ else
+ inst = chan->ramfc->instance >> 8;
+ nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
+ inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
+
+ if (!nt)
+ nv50_fifo_init_thingo(dev);
+ return 0;
+}
+
+static void
+nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+
+ NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
+
+ if (IS_G80)
+ inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
+ else
+ inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
+ nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
+
+ if (!nt)
+ nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_reset(struct drm_device *dev)
+{
+ uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
+
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
+}
+
+static void
+nv50_fifo_init_intr(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+}
+
+static void
+nv50_fifo_init_context_table(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ NV_DEBUG(dev, "\n");
+
+ for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
+ if (dev_priv->fifos[i])
+ nv50_fifo_channel_enable(dev, i, true);
+ else
+ nv50_fifo_channel_disable(dev, i, true);
+ }
+
+ nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_regs__nv(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, 0x250c, 0x6f3cfc34);
+}
+
+static void
+nv50_fifo_init_regs(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, 0x2500, 0);
+ nv_wr32(dev, 0x3250, 0);
+ nv_wr32(dev, 0x3220, 0);
+ nv_wr32(dev, 0x3204, 0);
+ nv_wr32(dev, 0x3210, 0);
+ nv_wr32(dev, 0x3270, 0);
+
+ /* Enable dummy channels setup by nv50_instmem.c */
+ nv50_fifo_channel_enable(dev, 0, true);
+ nv50_fifo_channel_enable(dev, 127, true);
+}
+
+int
+nv50_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv;
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ priv = dev_priv->engine.fifo.priv;
+ if (priv) {
+ priv->cur_thingo = !priv->cur_thingo;
+ goto just_reset;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.fifo.priv = priv;
+
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
+ if (ret) {
+ NV_ERROR(dev, "error creating thingo0: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
+ if (ret) {
+ NV_ERROR(dev, "error creating thingo1: %d\n", ret);
+ return ret;
+ }
+
+just_reset:
+ nv50_fifo_init_reset(dev);
+ nv50_fifo_init_intr(dev);
+ nv50_fifo_init_context_table(dev);
+ nv50_fifo_init_regs__nv(dev);
+ nv50_fifo_init_regs(dev);
+ dev_priv->engine.fifo.enable(dev);
+ dev_priv->engine.fifo.reassign(dev, true);
+
+ return 0;
+}
+
+void
+nv50_fifo_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+
+ NV_DEBUG(dev, "\n");
+
+ if (!priv)
+ return;
+
+ nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
+ nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
+
+ dev_priv->engine.fifo.priv = NULL;
+ kfree(priv);
+}
+
+int
+nv50_fifo_channel_id(struct drm_device *dev)
+{
+ return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
+}
+
+int
+nv50_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramfc = NULL;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ if (IS_G80) {
+ uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
+ uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
+
+ ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
+ 0x100, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &ramfc,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
+ ramin_voffset + 0x0400, 4096,
+ 0, NULL, &chan->cache);
+ if (ret)
+ return ret;
+ } else {
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+ ramfc = chan->ramfc->gpuobj;
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
+ 0, &chan->cache);
+ if (ret)
+ return ret;
+ }
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+
+ nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
+ nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
+ nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
+ nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
+ nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
+ nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
+ nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
+ nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
+ nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
+ nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
+ nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
+
+ if (!IS_G80) {
+ nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
+ nv_wo32(dev, chan->ramin->gpuobj, 1,
+ chan->ramfc->instance >> 8);
+
+ nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
+ nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
+ }
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ ret = nv50_fifo_channel_enable(dev, chan->id, false);
+ if (ret) {
+ NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nv50_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->cache);
+
+ nv50_fifo_channel_disable(dev, chan->id, false);
+
+ /* Dummy channel, also used on ch 127 */
+ if (chan->id == 0)
+ nv50_fifo_channel_disable(dev, 127, false);
+}
+
+int
+nv50_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
+ struct nouveau_gpuobj *cache = chan->cache->gpuobj;
+ int ptr, cnt;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ dev_priv->engine.instmem.prepare_access(dev, false);
+
+ nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
+ nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
+ nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
+ nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
+ nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
+ nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
+ nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
+ nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
+ nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
+ nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
+ nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
+ nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
+ nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
+ nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
+ nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
+ nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
+ nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
+ nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
+ nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
+ nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
+ nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
+ nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
+ nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
+ nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
+ nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
+ nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
+ nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
+ nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
+ nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
+ nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
+ nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
+ nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
+ nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
+
+ cnt = nv_ro32(dev, ramfc, 0x84/4);
+ for (ptr = 0; ptr < cnt; ptr++) {
+ nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
+ nv_ro32(dev, cache, (ptr * 2) + 0));
+ nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
+ nv_ro32(dev, cache, (ptr * 2) + 1));
+ }
+ nv_wr32(dev, 0x3210, cnt << 2);
+ nv_wr32(dev, 0x3270, 0);
+
+ /* guessing that all the 0x34xx regs aren't on NV50 */
+ if (!IS_G80) {
+ nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
+ nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
+ nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
+ nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
+ nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
+ }
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
+ return 0;
+}
+
+int
+nv50_fifo_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_gpuobj *ramfc, *cache;
+ struct nouveau_channel *chan = NULL;
+ int chid, get, put, ptr;
+
+ NV_DEBUG(dev, "\n");
+
+ chid = pfifo->channel_id(dev);
+ if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
+ return 0;
+
+ chan = dev_priv->fifos[chid];
+ if (!chan) {
+ NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
+ return -EINVAL;
+ }
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+ ramfc = chan->ramfc->gpuobj;
+ cache = chan->cache->gpuobj;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+
+ nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
+ nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
+ nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
+ nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
+ nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
+ nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
+ nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
+ nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
+ nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
+ nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
+ nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
+ nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
+ nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
+ nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
+ nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
+ nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
+ nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
+ nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
+ nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
+ nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
+ nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
+ nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
+ nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
+ nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
+ nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
+ nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
+ nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
+ nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
+ nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
+ nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
+ nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
+ nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
+ nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
+
+ put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
+ get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
+ ptr = 0;
+ while (put != get) {
+ nv_wo32(dev, cache, ptr++,
+ nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
+ nv_wo32(dev, cache, ptr++,
+ nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
+ get = (get + 1) & 0x1ff;
+ }
+
+ /* guessing that all the 0x34xx regs aren't on NV50 */
+ if (!IS_G80) {
+ nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
+ nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
+ nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
+ nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
+ nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
+ nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
+ }
+
+ dev_priv->engine.instmem.finish_access(dev);
+
+ /*XXX: probably reload ch127 (NULL) state back too */
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
new file mode 100644
index 00000000000..ca79f32be44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+MODULE_FIRMWARE("nouveau/nv50.ctxprog");
+MODULE_FIRMWARE("nouveau/nv50.ctxvals");
+MODULE_FIRMWARE("nouveau/nv84.ctxprog");
+MODULE_FIRMWARE("nouveau/nv84.ctxvals");
+MODULE_FIRMWARE("nouveau/nv86.ctxprog");
+MODULE_FIRMWARE("nouveau/nv86.ctxvals");
+MODULE_FIRMWARE("nouveau/nv92.ctxprog");
+MODULE_FIRMWARE("nouveau/nv92.ctxvals");
+MODULE_FIRMWARE("nouveau/nv94.ctxprog");
+MODULE_FIRMWARE("nouveau/nv94.ctxvals");
+MODULE_FIRMWARE("nouveau/nv96.ctxprog");
+MODULE_FIRMWARE("nouveau/nv96.ctxvals");
+MODULE_FIRMWARE("nouveau/nv98.ctxprog");
+MODULE_FIRMWARE("nouveau/nv98.ctxvals");
+MODULE_FIRMWARE("nouveau/nva0.ctxprog");
+MODULE_FIRMWARE("nouveau/nva0.ctxvals");
+MODULE_FIRMWARE("nouveau/nva5.ctxprog");
+MODULE_FIRMWARE("nouveau/nva5.ctxvals");
+MODULE_FIRMWARE("nouveau/nva8.ctxprog");
+MODULE_FIRMWARE("nouveau/nva8.ctxvals");
+MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
+MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
+MODULE_FIRMWARE("nouveau/nvac.ctxprog");
+MODULE_FIRMWARE("nouveau/nvac.ctxvals");
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
+static void
+nv50_graph_init_reset(struct drm_device *dev)
+{
+ uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
+
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
+}
+
+static void
+nv50_graph_init_intr(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
+}
+
+static void
+nv50_graph_init_regs__nv(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x401804, 0xc0000000);
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+
+ nv_wr32(dev, 0x400824, 0x00004000);
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static void
+nv50_graph_init_regs(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
+ (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
+ nv_wr32(dev, 0x402ca8, 0x800);
+}
+
+static int
+nv50_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_DEBUG(dev, "\n");
+
+ nouveau_grctx_prog_load(dev);
+ if (!dev_priv->engine.graph.ctxprog)
+ dev_priv->engine.graph.accel_blocked = true;
+
+ nv_wr32(dev, 0x400320, 4);
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
+ return 0;
+}
+
+int
+nv50_graph_init(struct drm_device *dev)
+{
+ int ret;
+
+ NV_DEBUG(dev, "\n");
+
+ nv50_graph_init_reset(dev);
+ nv50_graph_init_regs__nv(dev);
+ nv50_graph_init_regs(dev);
+ nv50_graph_init_intr(dev);
+
+ ret = nv50_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nv50_graph_takedown(struct drm_device *dev)
+{
+ NV_DEBUG(dev, "\n");
+ nouveau_grctx_fini(dev);
+}
+
+void
+nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+ const uint32_t mask = 0x00010001;
+
+ if (enabled)
+ nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
+ else
+ nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
+}
+
+struct nouveau_channel *
+nv50_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+ int i;
+
+ inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
+ return NULL;
+ inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
+
+ for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (chan && chan->ramin && chan->ramin->instance == inst)
+ return chan;
+ }
+
+ return NULL;
+}
+
+int
+nv50_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ struct nouveau_gpuobj *ctx;
+ uint32_t grctx_size = 0x70000;
+ int hdr, ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
+ if (ret)
+ return ret;
+ ctx = chan->ramin_grctx->gpuobj;
+
+ hdr = IS_G80 ? 0x200 : 0x20;
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
+ nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
+ grctx_size - 1);
+ nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
+ nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
+ nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
+ nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ nouveau_grctx_vals_load(dev, ctx);
+ nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
+ if ((dev_priv->chipset & 0xf0) == 0xa0)
+ nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
+ else
+ nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ return 0;
+}
+
+void
+nv50_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, hdr = IS_G80 ? 0x200 : 0x20;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ if (!chan->ramin || !chan->ramin->gpuobj)
+ return;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ for (i = hdr; i < hdr + 24; i += 4)
+ nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+}
+
+static int
+nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
+{
+ uint32_t fifo = nv_rd32(dev, 0x400500);
+
+ nv_wr32(dev, 0x400500, fifo & ~1);
+ nv_wr32(dev, 0x400784, inst);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
+ nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
+ nv_wr32(dev, 0x400040, 0xffffffff);
+ (void)nv_rd32(dev, 0x400040);
+ nv_wr32(dev, 0x400040, 0x00000000);
+ nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
+
+ if (nouveau_wait_for_idle(dev))
+ nv_wr32(dev, 0x40032c, inst | (1<<31));
+ nv_wr32(dev, 0x400500, fifo);
+
+ return 0;
+}
+
+int
+nv50_graph_load_context(struct nouveau_channel *chan)
+{
+ uint32_t inst = chan->ramin->instance >> 12;
+
+ NV_DEBUG(chan->dev, "ch%d\n", chan->id);
+ return nv50_graph_do_load_context(chan->dev, inst);
+}
+
+int
+nv50_graph_unload_context(struct drm_device *dev)
+{
+ uint32_t inst, fifo = nv_rd32(dev, 0x400500);
+
+ inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
+ if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
+ return 0;
+ inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
+
+ nv_wr32(dev, 0x400500, fifo & ~1);
+ nv_wr32(dev, 0x400784, inst);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
+ nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
+ nouveau_wait_for_idle(dev);
+ nv_wr32(dev, 0x400500, fifo);
+
+ nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
+ return 0;
+}
+
+void
+nv50_graph_context_switch(struct drm_device *dev)
+{
+ uint32_t inst;
+
+ nv50_graph_unload_context(dev);
+
+ inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
+ inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
+ nv50_graph_do_load_context(dev, inst);
+
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+ NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
+}
+
+static int
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct nouveau_gpuobj_ref *ref = NULL;
+
+ if (nouveau_gpuobj_ref_find(chan, data, &ref))
+ return -ENOENT;
+
+ if (nouveau_notifier_offset(ref->gpuobj, NULL))
+ return -EINVAL;
+
+ chan->nvsw.vblsem = ref->gpuobj;
+ chan->nvsw.vblsem_offset = ~0;
+ return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
+ return -ERANGE;
+
+ chan->nvsw.vblsem_offset = data >> 2;
+ return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ chan->nvsw.vblsem_rval = data;
+ return 0;
+}
+
+static int
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
+ int mthd, uint32_t data)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
+ return -EINVAL;
+
+ if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
+ NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
+ nv_wr32(dev, NV50_PDISPLAY_INTR_1,
+ NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
+ nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
+ NV50_PDISPLAY_INTR_EN) |
+ NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
+ }
+
+ list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+ return 0;
+}
+
+static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
+ { 0x018c, nv50_graph_nvsw_dma_vblsem },
+ { 0x0400, nv50_graph_nvsw_vblsem_offset },
+ { 0x0404, nv50_graph_nvsw_vblsem_release_val },
+ { 0x0408, nv50_graph_nvsw_vblsem_release },
+ {}
+};
+
+struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
+ { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
+ { 0x0030, false, NULL }, /* null */
+ { 0x5039, false, NULL }, /* m2mf */
+ { 0x502d, false, NULL }, /* 2d */
+ { 0x50c0, false, NULL }, /* compute */
+ { 0x5097, false, NULL }, /* tesla (nv50) */
+ { 0x8297, false, NULL }, /* tesla (nv80/nv90) */
+ { 0x8397, false, NULL }, /* tesla (nva0) */
+ { 0x8597, false, NULL }, /* tesla (nva8) */
+ {}
+};
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
new file mode 100644
index 00000000000..94400f777e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+struct nv50_instmem_priv {
+ uint32_t save1700[5]; /* 0x1700->0x1710 */
+
+ struct nouveau_gpuobj_ref *pramin_pt;
+ struct nouveau_gpuobj_ref *pramin_bar;
+ struct nouveau_gpuobj_ref *fb_bar;
+
+ bool last_access_wr;
+};
+
+#define NV50_INSTMEM_PAGE_SHIFT 12
+#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
+#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
+
+/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
+ */
+#define BAR0_WI32(g, o, v) do { \
+ uint32_t offset; \
+ if ((g)->im_backing) { \
+ offset = (g)->im_backing_start; \
+ } else { \
+ offset = chan->ramin->gpuobj->im_backing_start; \
+ offset += (g)->im_pramin->start; \
+ } \
+ offset += (o); \
+ nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
+} while (0)
+
+int
+nv50_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
+ struct nv50_instmem_priv *priv;
+ int ret, i;
+ uint32_t v, save_nv001700;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.instmem.priv = priv;
+
+ /* Save state, will restore at takedown. */
+ for (i = 0x1700; i <= 0x1710; i += 4)
+ priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
+
+ /* Reserve the last MiB of VRAM, we should probably try to avoid
+ * setting up the below tables over the top of the VBIOS image at
+ * some point.
+ */
+ dev_priv->ramin_rsvd_vram = 1 << 20;
+ c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
+ c_size = 128 << 10;
+ c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
+ c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
+ c_base = c_vmpd + 0x4000;
+ pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
+
+ NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
+ NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
+ (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
+ NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
+ NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
+
+ /* Determine VM layout, we need to do this first to make sure
+ * we allocate enough memory for all the page tables.
+ */
+ dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
+ dev_priv->vm_gart_size = NV50_VM_BLOCK;
+
+ dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
+ dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev);
+ if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
+ dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
+ dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
+ dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
+
+ dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
+
+ NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
+ dev_priv->vm_gart_base,
+ dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
+ NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
+ dev_priv->vm_vram_base,
+ dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
+
+ c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
+
+ /* Map BAR0 PRAMIN aperture over the memory we want to use */
+ save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
+ nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
+
+ /* Create a fake channel, and use it as our "dummy" channels 0/127.
+ * The main reason for creating a channel is so we can use the gpuobj
+ * code. However, it's probably worth noting that NVIDIA also setup
+ * their channels 0/127 with the same values they configure here.
+ * So, there may be some other reason for doing this.
+ *
+ * Have to create the entire channel manually, as the real channel
+ * creation code assumes we have PRAMIN access, and we don't until
+ * we're done here.
+ */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->id = 0;
+ chan->dev = dev;
+ chan->file_priv = (struct drm_file *)-2;
+ dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+
+ /* Channel's PRAMIN object + heap */
+ ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
+ NULL, &chan->ramin);
+ if (ret)
+ return ret;
+
+ if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
+ return -ENOMEM;
+
+ /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
+ ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
+ 0x4000, 0, NULL, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < c_vmpd; i += 4)
+ BAR0_WI32(chan->ramin->gpuobj, i, 0);
+
+ /* VM page directory */
+ ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
+ 0x4000, 0, &chan->vm_pd, NULL);
+ if (ret)
+ return ret;
+ for (i = 0; i < 0x4000; i += 8) {
+ BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
+ BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
+ }
+
+ /* PRAMIN page table, cheat and map into VM at 0x0000000000.
+ * We map the entire fake channel into the start of the PRAMIN BAR
+ */
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
+ 0, &priv->pramin_pt);
+ if (ret)
+ return ret;
+
+ for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
+ if (v < (c_offset + c_size))
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
+ else
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ }
+
+ BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
+ BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
+
+ /* VRAM page table(s), mapped into VM at +1GiB */
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
+ NV50_VM_BLOCK/65536*8, 0, 0,
+ &chan->vm_vram_pt[i]);
+ if (ret) {
+ NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
+ ret);
+ dev_priv->vm_vram_pt_nr = i;
+ return ret;
+ }
+ dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
+
+ for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
+ v += 4)
+ BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
+
+ BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
+ chan->vm_vram_pt[i]->instance | 0x61);
+ BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
+ }
+
+ /* DMA object for PRAMIN BAR */
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
+ &priv->pramin_bar);
+ if (ret)
+ return ret;
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
+
+ /* DMA object for FB BAR */
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
+ &priv->fb_bar);
+ if (ret)
+ return ret;
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
+ drm_get_resource_len(dev, 1) - 1);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
+ BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
+
+ /* Poke the relevant regs, and pray it works :) */
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+ nv_wr32(dev, NV50_PUNK_UNK1710, 0);
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+ NV50_PUNK_BAR_CFG_BASE_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+ NV50_PUNK_BAR1_CTXDMA_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+ NV50_PUNK_BAR3_CTXDMA_VALID);
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x1900 + (i*4), 0);
+
+ /* Assume that praying isn't enough, check that we can re-read the
+ * entire fake channel back from the PRAMIN BAR */
+ dev_priv->engine.instmem.prepare_access(dev, false);
+ for (i = 0; i < c_size; i += 4) {
+ if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
+ NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
+ i);
+ dev_priv->engine.instmem.finish_access(dev);
+ return -EINVAL;
+ }
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
+
+ /* Global PRAMIN heap */
+ if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
+ c_size, dev_priv->ramin_size - c_size)) {
+ dev_priv->ramin_heap = NULL;
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ }
+
+ /*XXX: incorrect, but needed to make hash func "work" */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ return 0;
+}
+
+void
+nv50_instmem_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_channel *chan = dev_priv->fifos[0];
+ int i;
+
+ NV_DEBUG(dev, "\n");
+
+ if (!priv)
+ return;
+
+ /* Restore state from before init */
+ for (i = 0x1700; i <= 0x1710; i += 4)
+ nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
+
+ nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
+ nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
+ nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
+
+ /* Destroy dummy channel */
+ if (chan) {
+ for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
+ nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
+ dev_priv->vm_vram_pt[i] = NULL;
+ }
+ dev_priv->vm_vram_pt_nr = 0;
+
+ nouveau_gpuobj_del(dev, &chan->vm_pd);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ nouveau_mem_takedown(&chan->ramin_heap);
+
+ dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
+ kfree(chan);
+ }
+
+ dev_priv->engine.instmem.priv = NULL;
+ kfree(priv);
+}
+
+int
+nv50_instmem_suspend(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[0];
+ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ int i;
+
+ ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
+ if (!ramin->im_backing_suspend)
+ return -ENOMEM;
+
+ for (i = 0; i < ramin->im_pramin->size; i += 4)
+ ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+ return 0;
+}
+
+void
+nv50_instmem_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_channel *chan = dev_priv->fifos[0];
+ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ int i;
+
+ nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
+ for (i = 0; i < ramin->im_pramin->size; i += 4)
+ BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
+ vfree(ramin->im_backing_suspend);
+ ramin->im_backing_suspend = NULL;
+
+ /* Poke the relevant regs, and pray it works :) */
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+ nv_wr32(dev, NV50_PUNK_UNK1710, 0);
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+ NV50_PUNK_BAR_CFG_BASE_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
+ NV50_PUNK_BAR1_CTXDMA_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+ NV50_PUNK_BAR3_CTXDMA_VALID);
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x1900 + (i*4), 0);
+}
+
+int
+nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ uint32_t *sz)
+{
+ int ret;
+
+ if (gpuobj->im_backing)
+ return -EINVAL;
+
+ *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
+ if (*sz == 0)
+ return -EINVAL;
+
+ ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
+ true, false, &gpuobj->im_backing);
+ if (ret) {
+ NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
+ nouveau_bo_ref(NULL, &gpuobj->im_backing);
+ return ret;
+ }
+
+ gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
+ gpuobj->im_backing_start <<= PAGE_SHIFT;
+
+ return 0;
+}
+
+void
+nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->engine.instmem.unbind(dev, gpuobj);
+ nouveau_bo_unpin(gpuobj->im_backing);
+ nouveau_bo_ref(NULL, &gpuobj->im_backing);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ uint32_t pte, pte_end, vram;
+
+ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
+ gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+
+ pte = (gpuobj->im_pramin->start >> 12) << 3;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ vram = gpuobj->im_backing_start;
+
+ NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
+ gpuobj->im_pramin->start, pte, pte_end);
+ NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pte < pte_end) {
+ nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
+ nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+
+ pte += 8;
+ vram += NV50_INSTMEM_PAGE_SIZE;
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x100c80, 0x00040001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00060001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return -EBUSY;
+ }
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ uint32_t pte, pte_end;
+
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ pte = (gpuobj->im_pramin->start >> 12) << 3;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pte < pte_end) {
+ nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
+ nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+ pte += 8;
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
+
+void
+nv50_instmem_prepare_access(struct drm_device *dev, bool write)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+
+ priv->last_access_wr = write;
+}
+
+void
+nv50_instmem_finish_access(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+
+ if (priv->last_access_wr) {
+ nv_wr32(dev, 0x070000, 0x00000001);
+ if (!nv_wait(0x070000, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
+ }
+}
+
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
new file mode 100644
index 00000000000..e0a9c3faa20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mc.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nv50_mc_init(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+ return 0;
+}
+
+void nv50_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
new file mode 100644
index 00000000000..e395c16d30f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2008 Maarten Maathuis.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+#include "nouveau_reg.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_encoder.h"
+#include "nouveau_connector.h"
+#include "nouveau_crtc.h"
+#include "nv50_display.h"
+
+static void
+nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
+{
+ struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
+
+ ret = RING_SPACE(evo, 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while disconnecting SOR\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ OUT_RING(evo, 0);
+}
+
+static void
+nv50_sor_dp_link_train(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct bit_displayport_encoder_table *dpe;
+ int dpe_headerlen;
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe) {
+ NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
+ return;
+ }
+
+ if (dpe->script0) {
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
+ nv_encoder->dcb);
+ }
+
+ if (!nouveau_dp_link_train(encoder))
+ NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
+
+ if (dpe->script1) {
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
+ nv_encoder->dcb);
+ }
+}
+
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ uint32_t val;
+ int or = nv_encoder->or;
+
+ NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
+
+ /* wait for it to be done */
+ if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
+ NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
+ NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
+ NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
+ nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
+ }
+
+ val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
+
+ if (mode == DRM_MODE_DPMS_ON)
+ val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
+ else
+ val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
+
+ nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
+ NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
+ if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
+ NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
+ NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
+ NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
+ nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
+ }
+
+ if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
+ nv50_sor_dp_link_train(encoder);
+}
+
+static void
+nv50_sor_save(struct drm_encoder *encoder)
+{
+ NV_ERROR(encoder->dev, "!!\n");
+}
+
+static void
+nv50_sor_restore(struct drm_encoder *encoder)
+{
+ NV_ERROR(encoder->dev, "!!\n");
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *connector;
+
+ NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
+
+ connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!connector) {
+ NV_ERROR(encoder->dev, "Encoder has no connector\n");
+ return false;
+ }
+
+ if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
+ connector->native_mode) {
+ int id = adjusted_mode->base.id;
+ *adjusted_mode = *connector->native_mode;
+ adjusted_mode->base.id = id;
+ }
+
+ return true;
+}
+
+static void
+nv50_sor_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
+ uint32_t mode_ctl = 0;
+ int ret;
+
+ NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
+
+ nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+ switch (nv_encoder->dcb->type) {
+ case OUTPUT_TMDS:
+ if (nv_encoder->dcb->sorconf.link & 1) {
+ if (adjusted_mode->clock < 165000)
+ mode_ctl = 0x0100;
+ else
+ mode_ctl = 0x0500;
+ } else
+ mode_ctl = 0x0200;
+ break;
+ case OUTPUT_DP:
+ mode_ctl |= 0x00050000;
+ if (nv_encoder->dcb->sorconf.link & 1)
+ mode_ctl |= 0x00000800;
+ else
+ mode_ctl |= 0x00000900;
+ break;
+ default:
+ break;
+ }
+
+ if (crtc->index == 1)
+ mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
+ else
+ mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
+
+ ret = RING_SPACE(evo, 2);
+ if (ret) {
+ NV_ERROR(dev, "no space while connecting SOR\n");
+ return;
+ }
+ BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ OUT_RING(evo, mode_ctl);
+}
+
+static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
+ .dpms = nv50_sor_dpms,
+ .save = nv50_sor_save,
+ .restore = nv50_sor_restore,
+ .mode_fixup = nv50_sor_mode_fixup,
+ .prepare = nv50_sor_prepare,
+ .commit = nv50_sor_commit,
+ .mode_set = nv50_sor_mode_set,
+ .detect = NULL
+};
+
+static void
+nv50_sor_destroy(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (!encoder)
+ return;
+
+ NV_DEBUG_KMS(encoder->dev, "\n");
+
+ drm_encoder_cleanup(encoder);
+
+ kfree(nv_encoder);
+}
+
+static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
+ .destroy = nv50_sor_destroy,
+};
+
+int
+nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
+{
+ struct nouveau_encoder *nv_encoder = NULL;
+ struct drm_encoder *encoder;
+ bool dum;
+ int type;
+
+ NV_DEBUG_KMS(dev, "\n");
+
+ switch (entry->type) {
+ case OUTPUT_TMDS:
+ NV_INFO(dev, "Detected a TMDS output\n");
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ case OUTPUT_LVDS:
+ NV_INFO(dev, "Detected a LVDS output\n");
+ type = DRM_MODE_ENCODER_LVDS;
+
+ if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
+ NV_ERROR(dev, "Failed parsing LVDS table\n");
+ return -EINVAL;
+ }
+ break;
+ case OUTPUT_DP:
+ NV_INFO(dev, "Detected a DP output\n");
+ type = DRM_MODE_ENCODER_TMDS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+ if (!nv_encoder)
+ return -ENOMEM;
+ encoder = to_drm_encoder(nv_encoder);
+
+ nv_encoder->dcb = entry;
+ nv_encoder->or = ffs(entry->or) - 1;
+
+ nv_encoder->disconnect = nv50_sor_disconnect;
+
+ drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
+ drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
+
+ encoder->possible_crtcs = entry->heads;
+ encoder->possible_clones = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
new file mode 100644
index 00000000000..5998c35237b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -0,0 +1,535 @@
+/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */
+/*
+ * Copyright 1996-1997 David J. McKay
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
+ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */
+
+#ifndef __NVREG_H_
+#define __NVREG_H_
+
+#define NV_PMC_OFFSET 0x00000000
+#define NV_PMC_SIZE 0x00001000
+
+#define NV_PBUS_OFFSET 0x00001000
+#define NV_PBUS_SIZE 0x00001000
+
+#define NV_PFIFO_OFFSET 0x00002000
+#define NV_PFIFO_SIZE 0x00002000
+
+#define NV_HDIAG_OFFSET 0x00005000
+#define NV_HDIAG_SIZE 0x00001000
+
+#define NV_PRAM_OFFSET 0x00006000
+#define NV_PRAM_SIZE 0x00001000
+
+#define NV_PVIDEO_OFFSET 0x00008000
+#define NV_PVIDEO_SIZE 0x00001000
+
+#define NV_PTIMER_OFFSET 0x00009000
+#define NV_PTIMER_SIZE 0x00001000
+
+#define NV_PPM_OFFSET 0x0000A000
+#define NV_PPM_SIZE 0x00001000
+
+#define NV_PTV_OFFSET 0x0000D000
+#define NV_PTV_SIZE 0x00001000
+
+#define NV_PRMVGA_OFFSET 0x000A0000
+#define NV_PRMVGA_SIZE 0x00020000
+
+#define NV_PRMVIO0_OFFSET 0x000C0000
+#define NV_PRMVIO_SIZE 0x00002000
+#define NV_PRMVIO1_OFFSET 0x000C2000
+
+#define NV_PFB_OFFSET 0x00100000
+#define NV_PFB_SIZE 0x00001000
+
+#define NV_PEXTDEV_OFFSET 0x00101000
+#define NV_PEXTDEV_SIZE 0x00001000
+
+#define NV_PME_OFFSET 0x00200000
+#define NV_PME_SIZE 0x00001000
+
+#define NV_PROM_OFFSET 0x00300000
+#define NV_PROM_SIZE 0x00010000
+
+#define NV_PGRAPH_OFFSET 0x00400000
+#define NV_PGRAPH_SIZE 0x00010000
+
+#define NV_PCRTC0_OFFSET 0x00600000
+#define NV_PCRTC0_SIZE 0x00002000 /* empirical */
+
+#define NV_PRMCIO0_OFFSET 0x00601000
+#define NV_PRMCIO_SIZE 0x00002000
+#define NV_PRMCIO1_OFFSET 0x00603000
+
+#define NV50_DISPLAY_OFFSET 0x00610000
+#define NV50_DISPLAY_SIZE 0x0000FFFF
+
+#define NV_PRAMDAC0_OFFSET 0x00680000
+#define NV_PRAMDAC0_SIZE 0x00002000
+
+#define NV_PRMDIO0_OFFSET 0x00681000
+#define NV_PRMDIO_SIZE 0x00002000
+#define NV_PRMDIO1_OFFSET 0x00683000
+
+#define NV_PRAMIN_OFFSET 0x00700000
+#define NV_PRAMIN_SIZE 0x00100000
+
+#define NV_FIFO_OFFSET 0x00800000
+#define NV_FIFO_SIZE 0x00800000
+
+#define NV_PMC_BOOT_0 0x00000000
+#define NV_PMC_ENABLE 0x00000200
+
+#define NV_VIO_VSE2 0x000003c3
+#define NV_VIO_SRX 0x000003c4
+
+#define NV_CIO_CRX__COLOR 0x000003d4
+#define NV_CIO_CR__COLOR 0x000003d5
+
+#define NV_PBUS_DEBUG_1 0x00001084
+#define NV_PBUS_DEBUG_4 0x00001098
+#define NV_PBUS_DEBUG_DUALHEAD_CTL 0x000010f0
+#define NV_PBUS_POWERCTRL_1 0x00001584
+#define NV_PBUS_POWERCTRL_2 0x00001588
+#define NV_PBUS_POWERCTRL_4 0x00001590
+#define NV_PBUS_PCI_NV_19 0x0000184C
+#define NV_PBUS_PCI_NV_20 0x00001850
+# define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
+# define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
+
+#define NV_PFIFO_RAMHT 0x00002210
+
+#define NV_PTV_TV_INDEX 0x0000d220
+#define NV_PTV_TV_DATA 0x0000d224
+#define NV_PTV_HFILTER 0x0000d310
+#define NV_PTV_HFILTER2 0x0000d390
+#define NV_PTV_VFILTER 0x0000d510
+
+#define NV_PRMVIO_MISC__WRITE 0x000c03c2
+#define NV_PRMVIO_SRX 0x000c03c4
+#define NV_PRMVIO_SR 0x000c03c5
+# define NV_VIO_SR_RESET_INDEX 0x00
+# define NV_VIO_SR_CLOCK_INDEX 0x01
+# define NV_VIO_SR_PLANE_MASK_INDEX 0x02
+# define NV_VIO_SR_CHAR_MAP_INDEX 0x03
+# define NV_VIO_SR_MEM_MODE_INDEX 0x04
+#define NV_PRMVIO_MISC__READ 0x000c03cc
+#define NV_PRMVIO_GRX 0x000c03ce
+#define NV_PRMVIO_GX 0x000c03cf
+# define NV_VIO_GX_SR_INDEX 0x00
+# define NV_VIO_GX_SREN_INDEX 0x01
+# define NV_VIO_GX_CCOMP_INDEX 0x02
+# define NV_VIO_GX_ROP_INDEX 0x03
+# define NV_VIO_GX_READ_MAP_INDEX 0x04
+# define NV_VIO_GX_MODE_INDEX 0x05
+# define NV_VIO_GX_MISC_INDEX 0x06
+# define NV_VIO_GX_DONT_CARE_INDEX 0x07
+# define NV_VIO_GX_BIT_MASK_INDEX 0x08
+
+#define NV_PFB_BOOT_0 0x00100000
+#define NV_PFB_CFG0 0x00100200
+#define NV_PFB_CFG1 0x00100204
+#define NV_PFB_CSTATUS 0x0010020C
+#define NV_PFB_REFCTRL 0x00100210
+# define NV_PFB_REFCTRL_VALID_1 (1 << 31)
+#define NV_PFB_PAD 0x0010021C
+# define NV_PFB_PAD_CKE_NORMAL (1 << 0)
+#define NV_PFB_TILE_NV10 0x00100240
+#define NV_PFB_TILE_SIZE_NV10 0x00100244
+#define NV_PFB_REF 0x001002D0
+# define NV_PFB_REF_CMD_REFRESH (1 << 0)
+#define NV_PFB_PRE 0x001002D4
+# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0)
+#define NV_PFB_CLOSE_PAGE2 0x0010033C
+#define NV_PFB_TILE_NV40 0x00100600
+#define NV_PFB_TILE_SIZE_NV40 0x00100604
+
+#define NV_PEXTDEV_BOOT_0 0x00101000
+# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
+#define NV_PEXTDEV_BOOT_3 0x0010100c
+
+#define NV_PCRTC_INTR_0 0x00600100
+# define NV_PCRTC_INTR_0_VBLANK (1 << 0)
+#define NV_PCRTC_INTR_EN_0 0x00600140
+#define NV_PCRTC_START 0x00600800
+#define NV_PCRTC_CONFIG 0x00600804
+# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
+# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
+#define NV_PCRTC_CURSOR_CONFIG 0x00600810
+# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
+# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
+# define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM (1 << 8)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32 (1 << 12)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 (1 << 16)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32 (2 << 24)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 (4 << 24)
+# define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA (1 << 28)
+
+/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */
+#define NV_PCRTC_GPIO 0x00600818
+#define NV_PCRTC_GPIO_EXT 0x0060081c
+#define NV_PCRTC_830 0x00600830
+#define NV_PCRTC_834 0x00600834
+#define NV_PCRTC_850 0x00600850
+#define NV_PCRTC_ENGINE_CTRL 0x00600860
+# define NV_CRTC_FSEL_I2C (1 << 4)
+# define NV_CRTC_FSEL_OVERLAY (1 << 12)
+
+#define NV_PRMCIO_ARX 0x006013c0
+#define NV_PRMCIO_AR__WRITE 0x006013c0
+#define NV_PRMCIO_AR__READ 0x006013c1
+# define NV_CIO_AR_MODE_INDEX 0x10
+# define NV_CIO_AR_OSCAN_INDEX 0x11
+# define NV_CIO_AR_PLANE_INDEX 0x12
+# define NV_CIO_AR_HPP_INDEX 0x13
+# define NV_CIO_AR_CSEL_INDEX 0x14
+#define NV_PRMCIO_INP0 0x006013c2
+#define NV_PRMCIO_CRX__COLOR 0x006013d4
+#define NV_PRMCIO_CR__COLOR 0x006013d5
+ /* Standard VGA CRTC registers */
+# define NV_CIO_CR_HDT_INDEX 0x00 /* horizontal display total */
+# define NV_CIO_CR_HDE_INDEX 0x01 /* horizontal display end */
+# define NV_CIO_CR_HBS_INDEX 0x02 /* horizontal blanking start */
+# define NV_CIO_CR_HBE_INDEX 0x03 /* horizontal blanking end */
+# define NV_CIO_CR_HBE_4_0 4:0
+# define NV_CIO_CR_HRS_INDEX 0x04 /* horizontal retrace start */
+# define NV_CIO_CR_HRE_INDEX 0x05 /* horizontal retrace end */
+# define NV_CIO_CR_HRE_4_0 4:0
+# define NV_CIO_CR_HRE_HBE_5 7:7
+# define NV_CIO_CR_VDT_INDEX 0x06 /* vertical display total */
+# define NV_CIO_CR_OVL_INDEX 0x07 /* overflow bits */
+# define NV_CIO_CR_OVL_VDT_8 0:0
+# define NV_CIO_CR_OVL_VDE_8 1:1
+# define NV_CIO_CR_OVL_VRS_8 2:2
+# define NV_CIO_CR_OVL_VBS_8 3:3
+# define NV_CIO_CR_OVL_VDT_9 5:5
+# define NV_CIO_CR_OVL_VDE_9 6:6
+# define NV_CIO_CR_OVL_VRS_9 7:7
+# define NV_CIO_CR_RSAL_INDEX 0x08 /* normally "preset row scan" */
+# define NV_CIO_CR_CELL_HT_INDEX 0x09 /* cell height?! normally "max scan line" */
+# define NV_CIO_CR_CELL_HT_VBS_9 5:5
+# define NV_CIO_CR_CELL_HT_SCANDBL 7:7
+# define NV_CIO_CR_CURS_ST_INDEX 0x0a /* cursor start */
+# define NV_CIO_CR_CURS_END_INDEX 0x0b /* cursor end */
+# define NV_CIO_CR_SA_HI_INDEX 0x0c /* screen start address high */
+# define NV_CIO_CR_SA_LO_INDEX 0x0d /* screen start address low */
+# define NV_CIO_CR_TCOFF_HI_INDEX 0x0e /* cursor offset high */
+# define NV_CIO_CR_TCOFF_LO_INDEX 0x0f /* cursor offset low */
+# define NV_CIO_CR_VRS_INDEX 0x10 /* vertical retrace start */
+# define NV_CIO_CR_VRE_INDEX 0x11 /* vertical retrace end */
+# define NV_CIO_CR_VRE_3_0 3:0
+# define NV_CIO_CR_VDE_INDEX 0x12 /* vertical display end */
+# define NV_CIO_CR_OFFSET_INDEX 0x13 /* sets screen pitch */
+# define NV_CIO_CR_ULINE_INDEX 0x14 /* underline location */
+# define NV_CIO_CR_VBS_INDEX 0x15 /* vertical blank start */
+# define NV_CIO_CR_VBE_INDEX 0x16 /* vertical blank end */
+# define NV_CIO_CR_MODE_INDEX 0x17 /* crtc mode control */
+# define NV_CIO_CR_LCOMP_INDEX 0x18 /* line compare */
+ /* Extended VGA CRTC registers */
+# define NV_CIO_CRE_RPC0_INDEX 0x19 /* repaint control 0 */
+# define NV_CIO_CRE_RPC0_OFFSET_10_8 7:5
+# define NV_CIO_CRE_RPC1_INDEX 0x1a /* repaint control 1 */
+# define NV_CIO_CRE_RPC1_LARGE 2:2
+# define NV_CIO_CRE_FF_INDEX 0x1b /* fifo control */
+# define NV_CIO_CRE_ENH_INDEX 0x1c /* enhanced? */
+# define NV_CIO_SR_LOCK_INDEX 0x1f /* crtc lock */
+# define NV_CIO_SR_UNLOCK_RW_VALUE 0x57
+# define NV_CIO_SR_LOCK_VALUE 0x99
+# define NV_CIO_CRE_FFLWM__INDEX 0x20 /* fifo low water mark */
+# define NV_CIO_CRE_21 0x21 /* vga shadow crtc lock */
+# define NV_CIO_CRE_LSR_INDEX 0x25 /* ? */
+# define NV_CIO_CRE_LSR_VDT_10 0:0
+# define NV_CIO_CRE_LSR_VDE_10 1:1
+# define NV_CIO_CRE_LSR_VRS_10 2:2
+# define NV_CIO_CRE_LSR_VBS_10 3:3
+# define NV_CIO_CRE_LSR_HBE_6 4:4
+# define NV_CIO_CR_ARX_INDEX 0x26 /* attribute index -- ro copy of 0x60.3c0 */
+# define NV_CIO_CRE_CHIP_ID_INDEX 0x27 /* chip revision */
+# define NV_CIO_CRE_PIXEL_INDEX 0x28
+# define NV_CIO_CRE_PIXEL_FORMAT 1:0
+# define NV_CIO_CRE_HEB__INDEX 0x2d /* horizontal extra bits? */
+# define NV_CIO_CRE_HEB_HDT_8 0:0
+# define NV_CIO_CRE_HEB_HDE_8 1:1
+# define NV_CIO_CRE_HEB_HBS_8 2:2
+# define NV_CIO_CRE_HEB_HRS_8 3:3
+# define NV_CIO_CRE_HEB_ILC_8 4:4
+# define NV_CIO_CRE_2E 0x2e /* some scratch or dummy reg to force writes to sink in */
+# define NV_CIO_CRE_HCUR_ADDR2_INDEX 0x2f /* cursor */
+# define NV_CIO_CRE_HCUR_ADDR0_INDEX 0x30 /* pixmap */
+# define NV_CIO_CRE_HCUR_ADDR0_ADR 6:0
+# define NV_CIO_CRE_HCUR_ASI 7:7
+# define NV_CIO_CRE_HCUR_ADDR1_INDEX 0x31 /* address */
+# define NV_CIO_CRE_HCUR_ADDR1_ENABLE 0:0
+# define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL 1:1
+# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2
+# define NV_CIO_CRE_LCD__INDEX 0x33
+# define NV_CIO_CRE_LCD_LCD_SELECT 0:0
+# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36
+# define NV_CIO_CRE_DDC0_WR__INDEX 0x37
+# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */
+# define NV_CIO_CRE_SCRATCH3__INDEX 0x3b
+# define NV_CIO_CRE_SCRATCH4__INDEX 0x3c
+# define NV_CIO_CRE_DDC_STATUS__INDEX 0x3e
+# define NV_CIO_CRE_DDC_WR__INDEX 0x3f
+# define NV_CIO_CRE_EBR_INDEX 0x41 /* extra bits ? (vertical) */
+# define NV_CIO_CRE_EBR_VDT_11 0:0
+# define NV_CIO_CRE_EBR_VDE_11 2:2
+# define NV_CIO_CRE_EBR_VRS_11 4:4
+# define NV_CIO_CRE_EBR_VBS_11 6:6
+# define NV_CIO_CRE_43 0x43
+# define NV_CIO_CRE_44 0x44 /* head control */
+# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
+# define NV_CIO_CRE_RCR 0x46
+# define NV_CIO_CRE_RCR_ENDIAN_BIG 7:7
+# define NV_CIO_CRE_47 0x47 /* extended fifo lwm, used on nv30+ */
+# define NV_CIO_CRE_49 0x49
+# define NV_CIO_CRE_4B 0x4b /* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */
+# define NV_CIO_CRE_TVOUT_LATENCY 0x52
+# define NV_CIO_CRE_53 0x53 /* `fp_htiming' according to Haiku */
+# define NV_CIO_CRE_54 0x54 /* `fp_vtiming' according to Haiku */
+# define NV_CIO_CRE_57 0x57 /* index reg for cr58 */
+# define NV_CIO_CRE_58 0x58 /* data reg for cr57 */
+# define NV_CIO_CRE_59 0x59 /* related to on/off-chip-ness of digital outputs */
+# define NV_CIO_CRE_5B 0x5B /* newer colour saturation reg */
+# define NV_CIO_CRE_85 0x85
+# define NV_CIO_CRE_86 0x86
+#define NV_PRMCIO_INP0__COLOR 0x006013da
+
+#define NV_PRAMDAC_CU_START_POS 0x00680300
+# define NV_PRAMDAC_CU_START_POS_X 15:0
+# define NV_PRAMDAC_CU_START_POS_Y 31:16
+#define NV_RAMDAC_NV10_CURSYNC 0x00680404
+
+#define NV_PRAMDAC_NVPLL_COEFF 0x00680500
+#define NV_PRAMDAC_MPLL_COEFF 0x00680504
+#define NV_PRAMDAC_VPLL_COEFF 0x00680508
+# define NV30_RAMDAC_ENABLE_VCO2 (8 << 4)
+
+#define NV_PRAMDAC_PLL_COEFF_SELECT 0x0068050c
+# define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE (4 << 0)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL (1 << 8)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL (2 << 8)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL (4 << 8)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 (8 << 8)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 (1 << 16)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 (2 << 16)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 (4 << 16)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2 (8 << 16)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP (1 << 20)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2 (1 << 28)
+# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2 (2 << 28)
+
+#define NV_PRAMDAC_PLL_SETUP_CONTROL 0x00680510
+#define NV_RAMDAC_VPLL2 0x00680520
+#define NV_PRAMDAC_SEL_CLK 0x00680524
+#define NV_RAMDAC_DITHER_NV11 0x00680528
+#define NV_PRAMDAC_DACCLK 0x0068052c
+# define NV_PRAMDAC_DACCLK_SEL_DACCLK (1 << 0)
+
+#define NV_RAMDAC_NVPLL_B 0x00680570
+#define NV_RAMDAC_MPLL_B 0x00680574
+#define NV_RAMDAC_VPLL_B 0x00680578
+#define NV_RAMDAC_VPLL2_B 0x0068057c
+# define NV31_RAMDAC_ENABLE_VCO2 (8 << 28)
+#define NV_PRAMDAC_580 0x00680580
+# define NV_RAMDAC_580_VPLL1_ACTIVE (1 << 8)
+# define NV_RAMDAC_580_VPLL2_ACTIVE (1 << 28)
+
+#define NV_PRAMDAC_GENERAL_CONTROL 0x00680600
+# define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON (3 << 4)
+# define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL (1 << 8)
+# define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL (1 << 12)
+# define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM (2 << 16)
+# define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS (1 << 20)
+# define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG (2 << 28)
+#define NV_PRAMDAC_TEST_CONTROL 0x00680608
+# define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED (1 << 12)
+# define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF (1 << 16)
+# define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI (1 << 28)
+#define NV_PRAMDAC_TESTPOINT_DATA 0x00680610
+# define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK (8 << 28)
+#define NV_PRAMDAC_630 0x00680630
+#define NV_PRAMDAC_634 0x00680634
+
+#define NV_PRAMDAC_TV_SETUP 0x00680700
+#define NV_PRAMDAC_TV_VTOTAL 0x00680720
+#define NV_PRAMDAC_TV_VSKEW 0x00680724
+#define NV_PRAMDAC_TV_VSYNC_DELAY 0x00680728
+#define NV_PRAMDAC_TV_HTOTAL 0x0068072c
+#define NV_PRAMDAC_TV_HSKEW 0x00680730
+#define NV_PRAMDAC_TV_HSYNC_DELAY 0x00680734
+#define NV_PRAMDAC_TV_HSYNC_DELAY2 0x00680738
+
+#define NV_PRAMDAC_TV_SETUP 0x00680700
+
+#define NV_PRAMDAC_FP_VDISPLAY_END 0x00680800
+#define NV_PRAMDAC_FP_VTOTAL 0x00680804
+#define NV_PRAMDAC_FP_VCRTC 0x00680808
+#define NV_PRAMDAC_FP_VSYNC_START 0x0068080c
+#define NV_PRAMDAC_FP_VSYNC_END 0x00680810
+#define NV_PRAMDAC_FP_VVALID_START 0x00680814
+#define NV_PRAMDAC_FP_VVALID_END 0x00680818
+#define NV_PRAMDAC_FP_HDISPLAY_END 0x00680820
+#define NV_PRAMDAC_FP_HTOTAL 0x00680824
+#define NV_PRAMDAC_FP_HCRTC 0x00680828
+#define NV_PRAMDAC_FP_HSYNC_START 0x0068082c
+#define NV_PRAMDAC_FP_HSYNC_END 0x00680830
+#define NV_PRAMDAC_FP_HVALID_START 0x00680834
+#define NV_PRAMDAC_FP_HVALID_END 0x00680838
+
+#define NV_RAMDAC_FP_DITHER 0x0068083c
+#define NV_PRAMDAC_FP_TG_CONTROL 0x00680848
+# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS (1 << 0)
+# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE (2 << 0)
+# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS (1 << 4)
+# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE (2 << 4)
+# define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE (0 << 8)
+# define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER (1 << 8)
+# define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE (2 << 8)
+# define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG (1 << 20)
+# define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 (1 << 24)
+# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS (1 << 28)
+# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE (2 << 28)
+#define NV_PRAMDAC_FP_MARGIN_COLOR 0x0068084c
+#define NV_PRAMDAC_850 0x00680850
+#define NV_PRAMDAC_85C 0x0068085c
+#define NV_PRAMDAC_FP_DEBUG_0 0x00680880
+# define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE (1 << 0)
+# define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE (1 << 4)
+/* This doesn't seem to be essential for tmds, but still often set */
+# define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED (8 << 4)
+# define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR (1 << 8)
+# define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR (1 << 12)
+# define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND (1 << 20)
+# define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND (1 << 24)
+# define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK (1 << 28)
+#define NV_PRAMDAC_FP_DEBUG_1 0x00680884
+# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE 11:0
+# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE (1 << 12)
+# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE 27:16
+# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE (1 << 28)
+#define NV_PRAMDAC_FP_DEBUG_2 0x00680888
+#define NV_PRAMDAC_FP_DEBUG_3 0x0068088C
+
+/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */
+#define NV_PRAMDAC_FP_TMDS_CONTROL 0x006808b0
+# define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE (1 << 16)
+#define NV_PRAMDAC_FP_TMDS_DATA 0x006808b4
+
+#define NV_PRAMDAC_8C0 0x006808c0
+
+/* Some kind of switch */
+#define NV_PRAMDAC_900 0x00680900
+#define NV_PRAMDAC_A20 0x00680A20
+#define NV_PRAMDAC_A24 0x00680A24
+#define NV_PRAMDAC_A34 0x00680A34
+
+#define NV_PRAMDAC_CTV 0x00680c00
+
+/* names fabricated from NV_USER_DAC info */
+#define NV_PRMDIO_PIXEL_MASK 0x006813c6
+# define NV_PRMDIO_PIXEL_MASK_MASK 0xff
+#define NV_PRMDIO_READ_MODE_ADDRESS 0x006813c7
+#define NV_PRMDIO_WRITE_MODE_ADDRESS 0x006813c8
+#define NV_PRMDIO_PALETTE_DATA 0x006813c9
+
+#define NV_PGRAPH_DEBUG_0 0x00400080
+#define NV_PGRAPH_DEBUG_1 0x00400084
+#define NV_PGRAPH_DEBUG_2_NV04 0x00400088
+#define NV_PGRAPH_DEBUG_2 0x00400620
+#define NV_PGRAPH_DEBUG_3 0x0040008c
+#define NV_PGRAPH_DEBUG_4 0x00400090
+#define NV_PGRAPH_INTR 0x00400100
+#define NV_PGRAPH_INTR_EN 0x00400140
+#define NV_PGRAPH_CTX_CONTROL 0x00400144
+#define NV_PGRAPH_CTX_CONTROL_NV04 0x00400170
+#define NV_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
+#define NV_PGRAPH_ABS_UCLIP_YMIN 0x00400540
+#define NV_PGRAPH_ABS_UCLIP_XMAX 0x00400544
+#define NV_PGRAPH_ABS_UCLIP_YMAX 0x00400548
+#define NV_PGRAPH_BETA_AND 0x00400608
+#define NV_PGRAPH_LIMIT_VIOL_PIX 0x00400610
+#define NV_PGRAPH_BOFFSET0 0x00400640
+#define NV_PGRAPH_BOFFSET1 0x00400644
+#define NV_PGRAPH_BOFFSET2 0x00400648
+#define NV_PGRAPH_BLIMIT0 0x00400684
+#define NV_PGRAPH_BLIMIT1 0x00400688
+#define NV_PGRAPH_BLIMIT2 0x0040068c
+#define NV_PGRAPH_STATUS 0x00400700
+#define NV_PGRAPH_SURFACE 0x00400710
+#define NV_PGRAPH_STATE 0x00400714
+#define NV_PGRAPH_FIFO 0x00400720
+#define NV_PGRAPH_PATTERN_SHAPE 0x00400810
+#define NV_PGRAPH_TILE 0x00400b00
+
+#define NV_PVIDEO_INTR_EN 0x00008140
+#define NV_PVIDEO_BUFFER 0x00008700
+#define NV_PVIDEO_STOP 0x00008704
+#define NV_PVIDEO_UVPLANE_BASE(buff) (0x00008800+(buff)*4)
+#define NV_PVIDEO_UVPLANE_LIMIT(buff) (0x00008808+(buff)*4)
+#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff) (0x00008820+(buff)*4)
+#define NV_PVIDEO_BASE(buff) (0x00008900+(buff)*4)
+#define NV_PVIDEO_LIMIT(buff) (0x00008908+(buff)*4)
+#define NV_PVIDEO_LUMINANCE(buff) (0x00008910+(buff)*4)
+#define NV_PVIDEO_CHROMINANCE(buff) (0x00008918+(buff)*4)
+#define NV_PVIDEO_OFFSET_BUFF(buff) (0x00008920+(buff)*4)
+#define NV_PVIDEO_SIZE_IN(buff) (0x00008928+(buff)*4)
+#define NV_PVIDEO_POINT_IN(buff) (0x00008930+(buff)*4)
+#define NV_PVIDEO_DS_DX(buff) (0x00008938+(buff)*4)
+#define NV_PVIDEO_DT_DY(buff) (0x00008940+(buff)*4)
+#define NV_PVIDEO_POINT_OUT(buff) (0x00008948+(buff)*4)
+#define NV_PVIDEO_SIZE_OUT(buff) (0x00008950+(buff)*4)
+#define NV_PVIDEO_FORMAT(buff) (0x00008958+(buff)*4)
+# define NV_PVIDEO_FORMAT_PLANAR (1 << 0)
+# define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8 (1 << 16)
+# define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY (1 << 20)
+# define NV_PVIDEO_FORMAT_MATRIX_ITURBT709 (1 << 24)
+#define NV_PVIDEO_COLOR_KEY 0x00008B00
+
+/* NV04 overlay defines from VIDIX & Haiku */
+#define NV_PVIDEO_INTR_EN_0 0x00680140
+#define NV_PVIDEO_STEP_SIZE 0x00680200
+#define NV_PVIDEO_CONTROL_Y 0x00680204
+#define NV_PVIDEO_CONTROL_X 0x00680208
+#define NV_PVIDEO_BUFF0_START_ADDRESS 0x0068020c
+#define NV_PVIDEO_BUFF0_PITCH_LENGTH 0x00680214
+#define NV_PVIDEO_BUFF0_OFFSET 0x0068021c
+#define NV_PVIDEO_BUFF1_START_ADDRESS 0x00680210
+#define NV_PVIDEO_BUFF1_PITCH_LENGTH 0x00680218
+#define NV_PVIDEO_BUFF1_OFFSET 0x00680220
+#define NV_PVIDEO_OE_STATE 0x00680224
+#define NV_PVIDEO_SU_STATE 0x00680228
+#define NV_PVIDEO_RM_STATE 0x0068022c
+#define NV_PVIDEO_WINDOW_START 0x00680230
+#define NV_PVIDEO_WINDOW_SIZE 0x00680234
+#define NV_PVIDEO_FIFO_THRES_SIZE 0x00680238
+#define NV_PVIDEO_FIFO_BURST_LENGTH 0x0068023c
+#define NV_PVIDEO_KEY 0x00680240
+#define NV_PVIDEO_OVERLAY 0x00680244
+#define NV_PVIDEO_RED_CSC_OFFSET 0x00680280
+#define NV_PVIDEO_GREEN_CSC_OFFSET 0x00680284
+#define NV_PVIDEO_BLUE_CSC_OFFSET 0x00680288
+#define NV_PVIDEO_CSC_ADJUST 0x0068028c
+
+#endif
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 601f4c0e5da..b806fdcc717 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -64,7 +64,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index d3cb676eee8..51c99fc4dd3 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -95,8 +95,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
&init->agp_textures_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
}
typedef struct drm_r128_depth32 {
@@ -129,8 +128,7 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
&depth->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+ return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
}
@@ -153,8 +151,7 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
&stipple->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+ return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
}
typedef struct drm_r128_getparam32 {
@@ -178,8 +175,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
&getparam->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
}
drm_ioctl_compat_t *r128_compat_ioctls[] = {
@@ -210,12 +206,10 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5713eedd6e..b5f5fe75e6a 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d67c42555ab..388140a7e65 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -58,6 +58,7 @@ typedef struct {
} atom_exec_context;
int atom_debug = 0;
+static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] =
@@ -263,10 +264,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
+ val = gctx->scratch[((gctx->fb_base + idx) / 4)];
if (print)
DEBUG("FB[0x%02X]", idx);
- printk(KERN_INFO "FB access is not implemented.\n");
- return 0;
+ break;
case ATOM_ARG_IMM:
switch (align) {
case ATOM_SRC_DWORD:
@@ -488,9 +489,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
+ gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
DEBUG("FB[0x%02X]", idx);
- printk(KERN_INFO "FB access is not implemented.\n");
- return;
+ break;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
@@ -573,7 +574,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
}
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -1040,7 +1041,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1092,6 +1093,13 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
kfree(ectx.ws);
}
+void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+ mutex_lock(&ctx->mutex);
+ atom_execute_table_locked(ctx, index, params);
+ mutex_unlock(&ctx->mutex);
+}
+
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
@@ -1214,3 +1222,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
*crev = CU8(idx + 3);
return;
}
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+ int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+ uint16_t data_offset;
+ int usage_bytes;
+ struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+ atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+ firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+ firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+
+ usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ if (usage_bytes == 0)
+ usage_bytes = 20 * 1024;
+ /* allocate some scratch memory */
+ ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+ if (!ctx->scratch)
+ return -ENOMEM;
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index e6eb38f2bca..47fd943f6d1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -120,6 +120,7 @@ struct card_info {
struct atom_context {
struct card_info *card;
+ struct mutex mutex;
void *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;
@@ -132,6 +133,7 @@ struct atom_context {
uint8_t shift;
int cs_equal, cs_above;
int io_mode;
+ uint32_t *scratch;
};
extern int atom_debug;
@@ -142,6 +144,7 @@ int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
#include "atom-types.h"
#include "atombios.h"
#include "ObjectID.h"
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 6643afc36ce..91ad0d1c1b1 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD {
typedef struct _ATOM_HPD_INT_RECORD {
ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucPluggged_PinState;
+ UCHAR ucPlugged_PinState;
} ATOM_HPD_INT_RECORD;
typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
@@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 {
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
} ATOM_POWERPLAY_INFO_V3;
+/* New PPlib */
+/**************************************************************************/
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+ UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
+ UCHAR ucI2cLine; // as interpreted by DAL I2C
+ UCHAR ucI2cAddress;
+ UCHAR ucFanParameters; // Fan Control Parameters.
+ UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
+ UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
+ UCHAR ucReserved; // ----
+ UCHAR ucFlags; // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64 5
+#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
+#define ATOM_PP_THERMALCONTROLLER_RV770 8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+
+typedef struct _ATOM_PPLIB_STATE
+{
+ UCHAR ucNonClockStateIndex;
+ UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ UCHAR ucDataRevision;
+
+ UCHAR ucNumStates;
+ UCHAR ucStateEntrySize;
+ UCHAR ucClockInfoSize;
+ UCHAR ucNonClockSize;
+
+ // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+ USHORT usStateArrayOffset;
+
+ // offset from start of this table to array of ASIC-specific structures,
+ // currently ATOM_PPLIB_CLOCK_INFO.
+ USHORT usClockInfoArrayOffset;
+
+ // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+ USHORT usNonClockInfoArrayOffset;
+
+ USHORT usBackbiasTime; // in microseconds
+ USHORT usVoltageTime; // in microseconds
+ USHORT usTableSize; //the size of this structure, or the extended structure
+
+ ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
+
+ ATOM_PPLIB_THERMALCONTROLLER sThermalController;
+
+ USHORT usBootClockInfoOffset;
+ USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
+// remaining 3 bits are reserved
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+ USHORT usClassification;
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ ULONG ulCapsAndSettings;
+ UCHAR ucRequiredPower;
+ UCHAR ucUnused1[3];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usUnused1;
+ USHORT usUnused2;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+ USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
+ UCHAR ucLowEngineClockHigh;
+ USHORT usHighEngineClockLow; // High Engine clock in MHz.
+ UCHAR ucHighEngineClockHigh;
+ USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+ UCHAR ucMemoryClockHigh; // Currentyl unused.
+ UCHAR ucPadding; // For proper alignment and size.
+ USHORT usVDDC; // For the 780, use: None, Low, High, Variable
+ UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+ ULONG ulFlags;
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+
/**************************************************************************/
/* Following definitions are for compatiblity issue in different SW components. */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c15287a590f..260fcf59f00 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 1);
atombios_blank_crtc(crtc, 0);
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, 1);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, 0);
atombios_enable_crtc(crtc, 0);
break;
}
-
- if (mode != DRM_MODE_DPMS_OFF) {
- radeon_crtc_load_lut(crtc);
- }
}
static void
@@ -457,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type !=
DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
- if (!ASIC_IS_AVIVO(rdev)
- && (encoder->encoder_type ==
- DRM_MODE_ENCODER_LVDS))
+ if (encoder->encoder_type ==
+ DRM_MODE_ENCODER_LVDS)
pll_flags |= RADEON_PLL_USE_REF_DIV;
}
radeon_encoder = to_radeon_encoder(encoder);
@@ -500,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
else
pll = &rdev->clock.p2pll;
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div, pll_flags);
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll)
+ radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
+ else
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock,
+ &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
+ } else
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div, pll_flags);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -574,21 +583,32 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_device *rdev = dev->dev_private;
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
- struct drm_radeon_gem_object *obj_priv;
+ struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ int r;
- if (!crtc->fb)
- return -EINVAL;
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
radeon_fb = to_radeon_framebuffer(crtc->fb);
+ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- obj_priv = obj->driver_private;
-
- if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
return -EINVAL;
}
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
switch (crtc->fb->bits_per_pixel) {
case 8:
@@ -618,8 +638,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- radeon_object_get_tiling_flags(obj->driver_private,
- &tiling_flags, NULL);
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
@@ -674,7 +692,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
- radeon_gem_object_unpin(radeon_fb->obj);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
}
/* Bytes per pixel may have changed */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 00000000000..0d63c4436e7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,790 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include "drm_dp_helper.h"
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_LINK_STATUS_SIZE 6
+#define DP_DPCD_SIZE 8
+
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+static const int dp_clocks[] = {
+ 54000, /* 1 lane, 1.62 Ghz */
+ 90000, /* 1 lane, 2.70 Ghz */
+ 108000, /* 2 lane, 1.62 Ghz */
+ 180000, /* 2 lane, 2.70 Ghz */
+ 216000, /* 4 lane, 1.62 Ghz */
+ 360000, /* 4 lane, 2.70 Ghz */
+};
+
+static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
+
+/* common helper functions */
+static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int i;
+ u8 max_link_bw;
+ u8 max_lane_count;
+
+ if (!dpcd)
+ return 0;
+
+ max_link_bw = dpcd[DP_MAX_LINK_RATE];
+ max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ for (i = 0; i < num_dp_clocks; i++) {
+ if (i % 2)
+ continue;
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock) {
+ if (i < 2)
+ return 1;
+ else if (i < 4)
+ return 2;
+ else
+ return 4;
+ }
+ }
+ break;
+ case DP_LINK_BW_2_7:
+ for (i = 0; i < num_dp_clocks; i++) {
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock) {
+ if (i < 2)
+ return 1;
+ else if (i < 4)
+ return 2;
+ else
+ return 4;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int i;
+ u8 max_link_bw;
+ u8 max_lane_count;
+
+ if (!dpcd)
+ return 0;
+
+ max_link_bw = dpcd[DP_MAX_LINK_RATE];
+ max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ default:
+ for (i = 0; i < num_dp_clocks; i++) {
+ if (i % 2)
+ continue;
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock)
+ return 162000;
+ }
+ break;
+ case DP_LINK_BW_2_7:
+ for (i = 0; i < num_dp_clocks; i++) {
+ switch (max_lane_count) {
+ case 1:
+ if (i > 1)
+ return 0;
+ break;
+ case 2:
+ if (i > 3)
+ return 0;
+ break;
+ case 4:
+ default:
+ break;
+ }
+ if (dp_clocks[i] > mode_clock)
+ return (i % 2) ? 270000 : 162000;
+ }
+ }
+
+ return 0;
+}
+
+int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+ int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
+ int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+
+ if ((lanes == 0) || (bw == 0))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ u8 l = dp_link_status(link_status, i);
+ return (l >> s) & 0xf;
+}
+
+static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count)
+{
+ u8 lane_align;
+ u8 lane_status;
+ int lane;
+
+ lane_align = dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+/* XXX fix me -- chip specific */
+#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
+static u8 dp_pre_emphasis_max(u8 voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+
+static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count,
+ u8 train_set[4])
+{
+ u8 v = 0;
+ u8 p = 0;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+ DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+ lane,
+ voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+ pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= DP_VOLTAGE_MAX)
+ v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p >= dp_pre_emphasis_max(v))
+ p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
+ voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+ pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+ for (lane = 0; lane < 4; lane++)
+ train_set[lane] = v | p;
+}
+
+
+/* radeon aux chan functions */
+bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
+ int num_bytes, u8 *read_byte,
+ u8 read_buf_len, u8 delay)
+{
+ struct drm_device *dev = chan->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+ unsigned char *base;
+
+ memset(&args, 0, sizeof(args));
+
+ base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+ memcpy(base, req_bytes, num_bytes);
+
+ args.lpAuxRequest = 0;
+ args.lpDataOut = 16;
+ args.ucDataOutLen = 0;
+ args.ucChannelID = chan->rec.i2c_id;
+ args.ucDelay = delay / 10;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ if (args.ucReplyStatus) {
+ DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+ req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
+ chan->rec.i2c_id, args.ucReplyStatus);
+ return false;
+ }
+
+ if (args.ucDataOutLen && read_byte && read_buf_len) {
+ if (read_buf_len < args.ucDataOutLen) {
+ DRM_ERROR("Buffer to small for return answer %d %d\n",
+ read_buf_len, args.ucDataOutLen);
+ return false;
+ }
+ {
+ int len = min(read_buf_len, args.ucDataOutLen);
+ memcpy(read_byte, base + 16, len);
+ }
+ }
+ return true;
+}
+
+bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
+ uint8_t send_bytes, uint8_t *send)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[20];
+ u8 msg_len, dp_msg_len;
+ bool ret;
+
+ dp_msg_len = 4;
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_WRITE << 4;
+ dp_msg_len += send_bytes;
+ msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
+
+ if (send_bytes > 16)
+ return false;
+
+ memcpy(&msg[4], send, send_bytes);
+ msg_len = 4 + send_bytes;
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
+ return ret;
+}
+
+bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
+ uint8_t delay, uint8_t expected_bytes,
+ uint8_t *read_p)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[20];
+ u8 msg_len, dp_msg_len;
+ bool ret = false;
+ msg_len = 4;
+ dp_msg_len = 4;
+ msg[0] = address;
+ msg[1] = address >> 8;
+ msg[2] = AUX_NATIVE_READ << 4;
+ msg[3] = (dp_msg_len) << 4;
+ msg[3] |= expected_bytes - 1;
+
+ ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
+ return ret;
+}
+
+/* radeon dp functions */
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
+ uint8_t ucconfig, uint8_t lane_num)
+{
+ DP_ENCODER_SERVICE_PARAMETERS args;
+ int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+ memset(&args, 0, sizeof(args));
+ args.ucLinkClock = dp_clock / 10;
+ args.ucConfig = ucconfig;
+ args.ucAction = action;
+ args.ucLaneNum = lane_num;
+ args.ucStatus = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+ dig_connector->dp_i2c_bus->rec.i2c_id, 0);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 msg[25];
+ int ret;
+
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
+ if (ret) {
+ memcpy(dig_connector->dpcd, msg, 8);
+ {
+ int i;
+ DRM_DEBUG("DPCD: ");
+ for (i = 0; i < 8; i++)
+ DRM_DEBUG("%02x ", msg[i]);
+ DRM_DEBUG("\n");
+ }
+ return true;
+ }
+ dig_connector->dpcd[0] = 0;
+ return false;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ dig_connector->dp_clock =
+ dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
+ dig_connector->dp_lane_count =
+ dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
+}
+
+int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+ return dp_mode_valid(dig_connector->dpcd, mode->clock);
+}
+
+static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
+ u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ int ret;
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
+ DP_LINK_STATUS_SIZE, link_status);
+ if (!ret) {
+ DRM_ERROR("displayport link status failed\n");
+ return false;
+ }
+
+ DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
+ return true;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ return false;
+ if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
+ return false;
+ return true;
+}
+
+static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+ if (dig_connector->dpcd[0] >= 0x11) {
+ radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
+ &power_state);
+ }
+}
+
+static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
+ &downspread);
+}
+
+static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
+ u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
+ link_configuration);
+}
+
+static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
+ struct drm_encoder *encoder,
+ u8 train_set[4])
+{
+ struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+ int i;
+
+ for (i = 0; i < dig_connector->dp_lane_count; i++)
+ atombios_dig_transmitter_setup(encoder,
+ ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+ i, train_set[i]);
+
+ radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
+ dig_connector->dp_lane_count, train_set);
+}
+
+static void dp_set_training(struct radeon_connector *radeon_connector,
+ u8 training)
+{
+ radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
+ 1, &training);
+}
+
+void dp_link_train(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+ int enc_id = 0;
+ bool clock_recovery, channel_eq;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ u8 tries, voltage;
+ u8 train_set[4];
+ int i;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ if (!radeon_encoder->enc_priv)
+ return;
+ dig = radeon_encoder->enc_priv;
+
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ if (ASIC_IS_DCE32(rdev)) {
+ if (dig->dig_block)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_LINK_A;
+ } else {
+ if (dig_connector->linkb)
+ enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
+ else
+ enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
+ }
+
+ memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ if (dig_connector->dp_clock == 270000)
+ link_configuration[0] = DP_LINK_BW_2_7;
+ else
+ link_configuration[0] = DP_LINK_BW_1_62;
+ link_configuration[1] = dig_connector->dp_lane_count;
+ if (dig_connector->dpcd[0] >= 0x11)
+ link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ /* power up the sink */
+ dp_set_power(radeon_connector, DP_SET_POWER_D0);
+ /* disable the training pattern on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+ /* set link bw and lanes on the sink */
+ dp_set_link_bw_lanes(radeon_connector, link_configuration);
+ /* disable downspread on the sink */
+ dp_set_downspread(radeon_connector, 0);
+ /* start training on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+ dig_connector->dp_clock, enc_id, 0);
+ /* set training pattern 1 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 0);
+
+ /* set initial vs/emph */
+ memset(train_set, 0, 4);
+ udelay(400);
+ /* set training pattern 1 on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
+
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+ /* clock recovery loop */
+ clock_recovery = false;
+ tries = 0;
+ voltage = 0xff;
+ for (;;) {
+ udelay(100);
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ break;
+
+ if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
+ clock_recovery = true;
+ break;
+ }
+
+ for (i = 0; i < dig_connector->dp_lane_count; i++) {
+ if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ }
+ if (i == dig_connector->dp_lane_count) {
+ DRM_ERROR("clock recovery reached max voltage\n");
+ break;
+ }
+
+ if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5) {
+ DRM_ERROR("clock recovery tried 5 times\n");
+ break;
+ }
+ } else
+ tries = 0;
+
+ voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by sink */
+ dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+ }
+ if (!clock_recovery)
+ DRM_ERROR("clock recovery failed\n");
+ else
+ DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
+ train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+ (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+
+ /* set training pattern 2 on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
+ /* set training pattern 2 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 1);
+
+ /* channel equalization loop */
+ tries = 0;
+ channel_eq = false;
+ for (;;) {
+ udelay(400);
+ if (!atom_dp_get_link_status(radeon_connector, link_status))
+ break;
+
+ if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times */
+ if (tries > 5) {
+ DRM_ERROR("channel eq failed: 5 tries\n");
+ break;
+ }
+
+ /* Compute new train_set as requested by sink */
+ dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+ dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+ tries++;
+ }
+
+ if (!channel_eq)
+ DRM_ERROR("channel eq failed\n");
+ else
+ DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
+ train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+ (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+ >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+ /* disable the training pattern on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+ dig_connector->dp_clock, enc_id, 0);
+}
+
+int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
+ int ret = 0;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_len, dp_msg_len;
+ int reply_bytes;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[2] = AUX_I2C_READ << 4;
+ else
+ msg[2] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[2] |= AUX_I2C_MOT << 4;
+
+ msg[0] = address;
+ msg[1] = address >> 8;
+
+ reply_bytes = 1;
+
+ msg_len = 4;
+ dp_msg_len = 3;
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[4] = write_byte;
+ msg_len++;
+ dp_msg_len += 2;
+ break;
+ case MODE_I2C_READ:
+ dp_msg_len += 1;
+ break;
+ default:
+ break;
+ }
+
+ msg[3] = (dp_msg_len) << 4;
+ ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
+
+ if (ret) {
+ if (read_byte)
+ *read_byte = reply[0];
+ return reply_bytes;
+ }
+ return -EREMOTEIO;
+}
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9e93eabcf1..71727460968 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,95 @@ MODULE_FIRMWARE(FIRMWARE_R520);
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
*/
+/* hpd for digital panel detect/disconnect */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ return connected;
+}
+
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = r100_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(RADEON_FP_GEN_CNTL);
+ if (connected)
+ tmp &= ~RADEON_FP_DETECT_INT_POL;
+ else
+ tmp |= RADEON_FP_DETECT_INT_POL;
+ WREG32(RADEON_FP_GEN_CNTL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(RADEON_FP2_GEN_CNTL);
+ if (connected)
+ tmp &= ~RADEON_FP2_DETECT_INT_POL;
+ else
+ tmp |= RADEON_FP2_DETECT_INT_POL;
+ WREG32(RADEON_FP2_GEN_CNTL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+void r100_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ rdev->irq.hpd[1] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ r100_irq_set(rdev);
+}
+
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ rdev->irq.hpd[1] = false;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
/*
* PCI GART
*/
@@ -94,6 +183,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return radeon_gart_table_ram_alloc(rdev);
}
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+}
+
int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -105,9 +203,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
WREG32(RADEON_AIC_HI_ADDR, tmp);
- /* Enable bus mastering */
- tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
- WREG32(RADEON_BUS_CNTL, tmp);
/* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -157,6 +252,12 @@ int r100_irq_set(struct radeon_device *rdev)
if (rdev->irq.crtc_vblank_int[1]) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
+ if (rdev->irq.hpd[0]) {
+ tmp |= RADEON_FP_DETECT_MASK;
+ }
+ if (rdev->irq.hpd[1]) {
+ tmp |= RADEON_FP2_DETECT_MASK;
+ }
WREG32(RADEON_GEN_INT_CNTL, tmp);
return 0;
}
@@ -175,8 +276,9 @@ void r100_irq_disable(struct radeon_device *rdev)
static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
- uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
- RADEON_CRTC2_VBLANK_STAT;
+ uint32_t irq_mask = RADEON_SW_INT_TEST |
+ RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+ RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
@@ -187,6 +289,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
int r100_irq_process(struct radeon_device *rdev)
{
uint32_t status, msi_rearm;
+ bool queue_hotplug = false;
status = r100_irq_ack(rdev);
if (!status) {
@@ -207,8 +310,18 @@ int r100_irq_process(struct radeon_device *rdev)
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
}
+ if (status & RADEON_FP_DETECT_STAT) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD1\n");
+ }
+ if (status & RADEON_FP2_DETECT_STAT) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD2\n");
+ }
status = r100_irq_ack(rdev);
}
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
@@ -255,24 +368,27 @@ int r100_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false, &rdev->wb.wb_obj);
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
return r;
}
- r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
- DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+ dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
return r;
}
}
@@ -290,11 +406,19 @@ void r100_wb_disable(struct radeon_device *rdev)
void r100_wb_fini(struct radeon_device *rdev)
{
+ int r;
+
r100_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
- radeon_object_unref(&rdev->wb.wb_obj);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(rdev->dev, "(%d) can't finish WB\n", r);
+ return;
+ }
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
@@ -1250,7 +1374,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_ARGB4444:
case RADEON_TXFORMAT_VYUY422:
case RADEON_TXFORMAT_YVYU422:
- case RADEON_TXFORMAT_DXT1:
case RADEON_TXFORMAT_SHADOW16:
case RADEON_TXFORMAT_LDUDV655:
case RADEON_TXFORMAT_DUDV88:
@@ -1258,12 +1381,19 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
break;
case RADEON_TXFORMAT_ARGB8888:
case RADEON_TXFORMAT_RGBA8888:
- case RADEON_TXFORMAT_DXT23:
- case RADEON_TXFORMAT_DXT45:
case RADEON_TXFORMAT_SHADOW32:
case RADEON_TXFORMAT_LDUDUV8888:
track->textures[i].cpp = 4;
break;
+ case RADEON_TXFORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case RADEON_TXFORMAT_DXT23:
+ case RADEON_TXFORMAT_DXT45:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+ break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
@@ -1288,17 +1418,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
- struct radeon_object *robj)
+ struct radeon_bo *robj)
{
unsigned idx;
u32 value;
idx = pkt->idx + 1;
value = radeon_get_ib_value(p, idx + 2);
- if ((value + 1) > radeon_object_size(robj)) {
+ if ((value + 1) > radeon_bo_size(robj)) {
DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
"(need %u have %lu) !\n",
value + 1,
- radeon_object_size(robj));
+ radeon_bo_size(robj));
return -EINVAL;
}
return 0;
@@ -1583,6 +1713,14 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev);
}
+void r100_hdp_flush(struct radeon_device *rdev)
+{
+ u32 tmp;
+ tmp = RREG32(RADEON_HOST_PATH_CNTL);
+ tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
+ WREG32(RADEON_HOST_PATH_CNTL, tmp);
+}
+
void r100_hdp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -1650,6 +1788,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
return 0;
}
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+ /* set these so they don't interfere with anything */
+ WREG32(RADEON_OV0_SCALE_CNTL, 0);
+ WREG32(RADEON_SUBPIC_CNTL, 0);
+ WREG32(RADEON_VIPH_CONTROL, 0);
+ WREG32(RADEON_I2C_CNTL_1, 0);
+ WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+ WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+ WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+}
/*
* VRAM info
@@ -2588,13 +2737,14 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+ DRM_ERROR("compress format %d\n", t->compress_format);
}
static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
- struct radeon_object *cube_robj;
+ struct radeon_bo *cube_robj;
unsigned long size;
for (face = 0; face < 5; face++) {
@@ -2607,9 +2757,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
size += track->textures[idx].cube_info[face].offset;
- if (size > radeon_object_size(cube_robj)) {
+ if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_object_size(cube_robj));
+ size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
@@ -2617,10 +2767,40 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
return 0;
}
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+ int block_width, block_height, block_bytes;
+ int wblocks, hblocks;
+ int min_wblocks;
+ int sz;
+
+ block_width = 4;
+ block_height = 4;
+
+ switch (compress_format) {
+ case R100_TRACK_COMP_DXT1:
+ block_bytes = 8;
+ min_wblocks = 4;
+ break;
+ default:
+ case R100_TRACK_COMP_DXT35:
+ block_bytes = 16;
+ min_wblocks = 2;
+ break;
+ }
+
+ hblocks = (h + block_height - 1) / block_height;
+ wblocks = (w + block_width - 1) / block_width;
+ if (wblocks < min_wblocks)
+ wblocks = min_wblocks;
+ sz = wblocks * hblocks * block_bytes;
+ return sz;
+}
+
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h;
int ret;
@@ -2654,9 +2834,15 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
- size += w * h;
+ if (track->textures[u].compress_format) {
+
+ size += r100_track_compress_size(track->textures[u].compress_format, w, h);
+ /* compressed textures are block based */
+ } else
+ size += w * h;
}
size *= track->textures[u].cpp;
+
switch (track->textures[u].tex_coord_type) {
case 0:
break;
@@ -2676,9 +2862,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
- if (size > radeon_object_size(robj)) {
+ if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_object_size(robj));
+ "%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
@@ -2695,15 +2881,19 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_cb; i++) {
if (track->cb[i].robj == NULL) {
+ if (!(track->fastfill || track->color_channel_mask ||
+ track->blend_read_enable)) {
+ continue;
+ }
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
- if (size > radeon_object_size(track->cb[i].robj)) {
+ if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
- radeon_object_size(track->cb[i].robj));
+ radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
@@ -2717,10 +2907,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
- if (size > radeon_object_size(track->zb.robj)) {
+ if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
- radeon_object_size(track->zb.robj));
+ radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
@@ -2738,11 +2928,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
"bound\n", prim_walk, i);
return -EINVAL;
}
- if (size > radeon_object_size(track->arrays[i].robj)) {
- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
- "have %lu dwords\n", prim_walk, i,
- size >> 2,
- radeon_object_size(track->arrays[i].robj) >> 2);
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
@@ -2756,10 +2947,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
"bound\n", prim_walk, i);
return -EINVAL;
}
- if (size > radeon_object_size(track->arrays[i].robj)) {
- DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
- "have %lu dwords\n", prim_walk, i, size >> 2,
- radeon_object_size(track->arrays[i].robj) >> 2);
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
return -EINVAL;
}
}
@@ -2821,6 +3014,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
@@ -3101,6 +3295,9 @@ static int r100_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
@@ -3108,13 +3305,13 @@ static int r100_startup(struct radeon_device *rdev)
r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
+ r100_enable_bm(rdev);
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -3150,6 +3347,8 @@ int r100_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r100_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r100_startup(rdev);
}
@@ -3174,7 +3373,7 @@ void r100_fini(struct radeon_device *rdev)
r100_pci_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -3242,14 +3441,14 @@ int r100_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Set asic errata */
r100_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r100_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -3264,7 +3463,7 @@ int r100_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCI) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 0daf0d76a89..b27a6999d21 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -10,26 +10,30 @@
* CS functions
*/
struct r100_cs_track_cb {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned pitch;
unsigned cpp;
unsigned offset;
};
struct r100_cs_track_array {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
unsigned esize;
};
struct r100_cs_cube_info {
- struct radeon_object *robj;
- unsigned offset;
+ struct radeon_bo *robj;
+ unsigned offset;
unsigned width;
unsigned height;
};
+#define R100_TRACK_COMP_NONE 0
+#define R100_TRACK_COMP_DXT1 1
+#define R100_TRACK_COMP_DXT35 2
+
struct r100_cs_track_texture {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
unsigned pitch;
unsigned width;
@@ -44,6 +48,7 @@ struct r100_cs_track_texture {
bool enabled;
bool roundup_w;
bool roundup_h;
+ unsigned compress_format;
};
struct r100_cs_track_limits {
@@ -62,13 +67,15 @@ struct r100_cs_track {
unsigned immd_dwords;
unsigned num_arrays;
unsigned max_indx;
+ unsigned color_channel_mask;
struct r100_cs_track_array arrays[11];
struct r100_cs_track_cb cb[R300_MAX_CB];
struct r100_cs_track_cb zb;
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
bool z_enabled;
bool separate_cube;
-
+ bool fastfill;
+ bool blend_read_enable;
};
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index eb740fc3549..20942127c46 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -401,7 +401,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1;
break;
- case R200_TXFORMAT_DXT1:
case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555:
case R200_TXFORMAT_RGB565:
@@ -418,9 +417,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_ABGR8888:
case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888:
+ track->textures[i].cpp = 4;
+ break;
+ case R200_TXFORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
case R200_TXFORMAT_DXT23:
case R200_TXFORMAT_DXT45:
- track->textures[i].cpp = 4;
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2f43ee8e404..3f2cc9e2e8d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
+ int r;
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -681,7 +686,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= R300_TXO_MACRO_TILE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ tile_flags |= R300_TXO_MICRO_TILE;
+
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
+ tmp |= tile_flags;
+ ib[idx] = tmp;
track->textures[i].robj = reloc->robj;
break;
/* Tracked registers */
@@ -847,7 +860,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_Z6Y5X5:
case R300_TX_FORMAT_W4Z4Y4X4:
case R300_TX_FORMAT_W1Z5Y5X5:
- case R300_TX_FORMAT_DXT1:
case R300_TX_FORMAT_D3DMFT_CxV8U8:
case R300_TX_FORMAT_B8G8_B8G8:
case R300_TX_FORMAT_G8R8_G8B8:
@@ -861,8 +873,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x17:
case R300_TX_FORMAT_FL_I32:
case 0x1e:
- case R300_TX_FORMAT_DXT3:
- case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 4;
break;
case R300_TX_FORMAT_W16Z16Y16X16:
@@ -873,6 +883,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_FL_R32G32B32A32:
track->textures[i].cpp = 16;
break;
+ case R300_TX_FORMAT_DXT1:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ break;
+ case R300_TX_FORMAT_ATI2N:
+ if (p->rdev->family < CHIP_R420) {
+ DRM_ERROR("Invalid texture format %u\n",
+ (idx_value & 0x1F));
+ return -EINVAL;
+ }
+ /* The same rules apply as for DXT3/5. */
+ /* Pass through. */
+ case R300_TX_FORMAT_DXT3:
+ case R300_TX_FORMAT_DXT5:
+ track->textures[i].cpp = 1;
+ track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
+ break;
default:
DRM_ERROR("Invalid texture format %u\n",
(idx_value & 0x1F));
@@ -932,6 +959,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].width_11 = tmp;
tmp = ((idx_value >> 16) & 1) << 11;
track->textures[i].height_11 = tmp;
+
+ /* ATI1N */
+ if (idx_value & (1 << 14)) {
+ /* The same rules apply as for DXT1. */
+ track->textures[i].compress_format =
+ R100_TRACK_COMP_DXT1;
+ }
+ } else if (idx_value & (1 << 14)) {
+ DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+ return -EINVAL;
}
break;
case 0x4480:
@@ -973,6 +1010,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
+ case 0x4e0c:
+ /* RB3D_COLOR_CHANNEL_MASK */
+ track->color_channel_mask = idx_value;
+ break;
+ case 0x4d1c:
+ /* ZB_BW_CNTL */
+ track->fastfill = !!(idx_value & (1 << 2));
+ break;
+ case 0x4e04:
+ /* RB3D_BLENDCNTL */
+ track->blend_read_enable = !!(idx_value & (1 << 2));
+ break;
case 0x4be8:
/* valid register only on RV530 */
if (p->rdev->family == CHIP_RV530)
@@ -1181,6 +1230,9 @@ static int r300_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r300_mc_program(rdev);
/* Resume clock */
r300_clock_startup(rdev);
@@ -1193,13 +1245,18 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
}
+
+ if (rdev->family == CHIP_R300 ||
+ rdev->family == CHIP_R350 ||
+ rdev->family == CHIP_RV350)
+ r100_enable_bm(rdev);
+
if (rdev->flags & RADEON_IS_PCI) {
r = r100_pci_gart_enable(rdev);
if (r)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -1237,6 +1294,8 @@ int r300_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r300_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r300_startup(rdev);
}
@@ -1265,7 +1324,7 @@ void r300_fini(struct radeon_device *rdev)
r100_pci_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -1303,14 +1362,14 @@ int r300_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Set asic errata */
r300_errata(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
r300_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -1325,7 +1384,7 @@ int r300_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
if (rdev->flags & RADEON_IS_PCIE) {
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index cb2e470f97d..34bffa0e4b7 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
int sz;
int addr;
int type;
- int clamp;
+ int isclamp;
int stride;
RING_LOCALS;
@@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
- clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
+ isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
addr |= (type << 16);
- addr |= (clamp << 17);
+ addr |= (isclamp << 17);
stride = type ? 4 : 6;
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 4b7afef35a6..1735a2b6958 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -900,6 +900,7 @@
# define R300_TX_FORMAT_FL_I32 0x1B
# define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
+# define R300_TX_FORMAT_ATI2N 0x1F
/* alpha modes, convenience mostly */
/* if you have alpha, pick constant appropriate to the
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 1cefdbcc085..c05a7270cf0 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev)
{
int r;
+ /* set common regs */
+ r100_set_common_regs(rdev);
+ /* program mc */
r300_mc_program(rdev);
/* Resume clock */
r420_clock_resume(rdev);
@@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev)
}
r420_pipes_init(rdev);
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -229,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
}
/* Resume clock after posting */
r420_clock_resume(rdev);
-
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r420_startup(rdev);
}
@@ -258,7 +261,7 @@ void r420_fini(struct radeon_device *rdev)
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
if (rdev->is_atom_bios) {
radeon_atombios_fini(rdev);
} else {
@@ -301,14 +304,9 @@ int r420_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- if (rdev->is_atom_bios) {
- atom_asic_init(rdev->mode_info.atom_context);
- } else {
- radeon_combios_asic_init(rdev->ddev);
- }
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -331,10 +329,13 @@ int r420_init(struct radeon_device *rdev)
return r;
}
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r) {
return r;
}
+ if (rdev->family == CHIP_R420)
+ r100_enable_bm(rdev);
+
if (rdev->flags & RADEON_IS_PCIE) {
r = rv370_pcie_gart_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 7baa7395556..74ad89bdf2b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -716,6 +716,8 @@
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
+#define AVIVO_DC_GPIO_HPD_A 0x7e94
+
#define AVIVO_GPIO_0 0x7e30
#define AVIVO_GPIO_1 0x7e40
#define AVIVO_GPIO_2 0x7e50
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f7435185c0a..0f3843b6dac 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -221,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return r520_startup(rdev);
}
@@ -254,6 +255,9 @@ int r520_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
if (!radeon_card_posted(rdev) && rdev->bios) {
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
@@ -277,7 +281,7 @@ int r520_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6740ed24358..a0ac3c134b1 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
#define PFP_UCODE_SIZE 576
#define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
+MODULE_FIRMWARE("radeon/R600_rlc.bin");
+MODULE_FIRMWARE("radeon/R700_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
@@ -70,6 +74,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
+/* hpd for digital panel detect/disconnect */
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_4:
+ if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_6:
+ if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ case RADEON_HPD_3:
+ if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ }
+ return connected;
+}
+
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = r600_hpd_sense(rdev, hpd);
+
+ if (ASIC_IS_DCE3(rdev)) {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_4:
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_5:
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_6:
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HPDx_INT_POLARITY;
+ else
+ tmp |= DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_3:
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+ if (connected)
+ tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ else
+ tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void r600_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+ if (ASIC_IS_DCE32(rdev))
+ tmp |= DC_HPDx_EN;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, tmp);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, tmp);
+ rdev->irq.hpd[2] = true;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, tmp);
+ rdev->irq.hpd[3] = true;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, tmp);
+ rdev->irq.hpd[4] = true;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, tmp);
+ rdev->irq.hpd[5] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[1] = true;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
+ rdev->irq.hpd[2] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ r600_irq_set(rdev);
+}
+
+void r600_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HPD2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HPD3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ case RADEON_HPD_4:
+ WREG32(DC_HPD4_CONTROL, 0);
+ rdev->irq.hpd[3] = false;
+ break;
+ /* DCE 3.2 */
+ case RADEON_HPD_5:
+ WREG32(DC_HPD5_CONTROL, 0);
+ rdev->irq.hpd[4] = false;
+ break;
+ case RADEON_HPD_6:
+ WREG32(DC_HPD6_CONTROL, 0);
+ rdev->irq.hpd[5] = false;
+ break;
+ default:
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
+ rdev->irq.hpd[1] = false;
+ break;
+ case RADEON_HPD_3:
+ WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
+ rdev->irq.hpd[2] = false;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
/*
* R600 PCIE GART
*/
@@ -180,7 +459,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -208,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -1101,6 +1384,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA);
}
+void r600_hdp_flush(struct radeon_device *rdev)
+{
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
/*
* CP & Ring
@@ -1110,11 +1397,12 @@ void r600_cp_stop(struct radeon_device *rdev)
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
}
-int r600_cp_init_microcode(struct radeon_device *rdev)
+int r600_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *chip_name;
- size_t pfp_req_size, me_req_size;
+ const char *rlc_chip_name;
+ size_t pfp_req_size, me_req_size, rlc_req_size;
char fw_name[30];
int err;
@@ -1128,30 +1416,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
}
switch (rdev->family) {
- case CHIP_R600: chip_name = "R600"; break;
- case CHIP_RV610: chip_name = "RV610"; break;
- case CHIP_RV630: chip_name = "RV630"; break;
- case CHIP_RV620: chip_name = "RV620"; break;
- case CHIP_RV635: chip_name = "RV635"; break;
- case CHIP_RV670: chip_name = "RV670"; break;
+ case CHIP_R600:
+ chip_name = "R600";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV610:
+ chip_name = "RV610";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV630:
+ chip_name = "RV630";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV620:
+ chip_name = "RV620";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV635:
+ chip_name = "RV635";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV670:
+ chip_name = "RV670";
+ rlc_chip_name = "R600";
+ break;
case CHIP_RS780:
- case CHIP_RS880: chip_name = "RS780"; break;
- case CHIP_RV770: chip_name = "RV770"; break;
+ case CHIP_RS880:
+ chip_name = "RS780";
+ rlc_chip_name = "R600";
+ break;
+ case CHIP_RV770:
+ chip_name = "RV770";
+ rlc_chip_name = "R700";
+ break;
case CHIP_RV730:
- case CHIP_RV740: chip_name = "RV730"; break;
- case CHIP_RV710: chip_name = "RV710"; break;
+ case CHIP_RV740:
+ chip_name = "RV730";
+ rlc_chip_name = "R700";
+ break;
+ case CHIP_RV710:
+ chip_name = "RV710";
+ rlc_chip_name = "R700";
+ break;
default: BUG();
}
if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4;
+ rlc_req_size = R700_RLC_UCODE_SIZE * 4;
} else {
pfp_req_size = PFP_UCODE_SIZE * 4;
me_req_size = PM4_UCODE_SIZE * 12;
+ rlc_req_size = RLC_UCODE_SIZE * 4;
}
- DRM_INFO("Loading %s CP Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1175,6 +1495,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
rdev->me_fw->size, fw_name);
err = -EINVAL;
}
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+
out:
platform_device_unregister(pdev);
@@ -1187,6 +1519,8 @@ out:
rdev->pfp_fw = NULL;
release_firmware(rdev->me_fw);
rdev->me_fw = NULL;
+ release_firmware(rdev->rlc_fw);
+ rdev->rlc_fw = NULL;
}
return err;
}
@@ -1381,10 +1715,16 @@ int r600_ring_test(struct radeon_device *rdev)
void r600_wb_disable(struct radeon_device *rdev)
{
+ int r;
+
WREG32(SCRATCH_UMSK, 0);
if (rdev->wb.wb_obj) {
- radeon_object_kunmap(rdev->wb.wb_obj);
- radeon_object_unpin(rdev->wb.wb_obj);
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return;
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
}
}
@@ -1392,7 +1732,7 @@ void r600_wb_fini(struct radeon_device *rdev)
{
r600_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- radeon_object_unref(&rdev->wb.wb_obj);
+ radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
}
@@ -1403,22 +1743,29 @@ int r600_wb_enable(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
- dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ r600_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->wb.gpu_addr);
if (r) {
- dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
- r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
if (r) {
- dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
r600_wb_fini(rdev);
return r;
}
@@ -1433,10 +1780,14 @@ int r600_wb_enable(struct radeon_device *rdev)
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
+ /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(rdev, RB_INT_STAT);
}
int r600_copy_dma(struct radeon_device *rdev,
@@ -1459,18 +1810,6 @@ int r600_copy_blit(struct radeon_device *rdev,
return 0;
}
-int r600_irq_process(struct radeon_device *rdev)
-{
- /* FIXME: implement */
- return 0;
-}
-
-int r600_irq_set(struct radeon_device *rdev)
-{
- /* FIXME: implement */
- return 0;
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -1506,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
{
int r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
@@ -1516,12 +1863,33 @@ int r600_startup(struct radeon_device *rdev)
}
r600_gpu_init(rdev);
- r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
+ if (!rdev->r600_blit.shader_obj) {
+ r = r600_blit_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
if (r) {
- DRM_ERROR("failed to pin blit object %d\n", r);
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
return r;
}
+ r600_irq_set(rdev);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
@@ -1583,13 +1951,19 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
/* unpin shaders bo */
- radeon_object_unpin(rdev->r600_blit.shader_obj);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
return 0;
}
@@ -1627,7 +2001,11 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev) && rdev->bios) {
+ if (!r600_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
@@ -1650,31 +2028,25 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
- if (!rdev->me_fw || !rdev->pfp_fw) {
- r = r600_cp_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
r = r600_pcie_gart_init(rdev);
if (r)
return r;
rdev->accel_working = true;
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failled blitter (%d).\n", r);
- return r;
- }
-
r = r600_startup(rdev);
if (r) {
r600_suspend(rdev);
@@ -1686,15 +2058,19 @@ int r600_init(struct radeon_device *rdev)
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
+
+ r = r600_audio_init(rdev);
+ if (r)
+ return r; /* TODO error handling */
return 0;
}
@@ -1703,7 +2079,10 @@ void r600_fini(struct radeon_device *rdev)
/* Suspend operations */
r600_suspend(rdev);
+ r600_audio_fini(rdev);
r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
@@ -1712,7 +2091,7 @@ void r600_fini(struct radeon_device *rdev)
radeon_clocks_fini(rdev);
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -1798,8 +2177,657 @@ int r600_ib_test(struct radeon_device *rdev)
return r;
}
+/*
+ * Interrupts
+ *
+ * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
+ * the same as the CP ring buffer, but in reverse. Rather than the CPU
+ * writing to the ring and the GPU consuming, the GPU writes to the ring
+ * and host consumes. As the host irq handler processes interrupts, it
+ * increments the rptr. When the rptr catches up with the wptr, all the
+ * current interrupts have been processed.
+ */
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+{
+ u32 rb_bufsz;
+ /* Align ring size */
+ rb_bufsz = drm_order(ring_size / 4);
+ ring_size = (1 << rb_bufsz) * 4;
+ rdev->ih.ring_size = ring_size;
+ rdev->ih.align_mask = 4 - 1;
+}
+
+static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+{
+ int r;
+
+ rdev->ih.ring_size = ring_size;
+ /* Allocate ring buffer */
+ if (rdev->ih.ring_obj == NULL) {
+ r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+ true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ih.ring_obj,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->ih.gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->ih.ring_obj,
+ (void **)&rdev->ih.ring);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ if (r) {
+ DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
+ return r;
+ }
+ }
+ rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+ rdev->ih.rptr = 0;
+
+ return 0;
+}
+
+static void r600_ih_ring_fini(struct radeon_device *rdev)
+{
+ int r;
+ if (rdev->ih.ring_obj) {
+ r = radeon_bo_reserve(rdev->ih.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ih.ring_obj);
+ radeon_bo_unpin(rdev->ih.ring_obj);
+ radeon_bo_unreserve(rdev->ih.ring_obj);
+ }
+ radeon_bo_unref(&rdev->ih.ring_obj);
+ rdev->ih.ring = NULL;
+ rdev->ih.ring_obj = NULL;
+ }
+}
+
+static void r600_rlc_stop(struct radeon_device *rdev)
+{
+
+ if (rdev->family >= CHIP_RV770) {
+ /* r7xx asics need to soft reset RLC before halting */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(15000);
+ WREG32(SRBM_SOFT_RESET, 0);
+ RREG32(SRBM_SOFT_RESET);
+ }
+
+ WREG32(RLC_CNTL, 0);
+}
+
+static void r600_rlc_start(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, RLC_ENABLE);
+}
+
+static int r600_rlc_init(struct radeon_device *rdev)
+{
+ u32 i;
+ const __be32 *fw_data;
+
+ if (!rdev->rlc_fw)
+ return -EINVAL;
+
+ r600_rlc_stop(rdev);
+
+ WREG32(RLC_HB_BASE, 0);
+ WREG32(RLC_HB_CNTL, 0);
+ WREG32(RLC_HB_RPTR, 0);
+ WREG32(RLC_HB_WPTR, 0);
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ WREG32(RLC_MC_CNTL, 0);
+ WREG32(RLC_UCODE_CNTL, 0);
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->family >= CHIP_RV770) {
+ for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else {
+ for (i = 0; i < RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ }
+ WREG32(RLC_UCODE_ADDR, 0);
+
+ r600_rlc_start(rdev);
+
+ return 0;
+}
+
+static void r600_enable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_cntl = RREG32(IH_CNTL);
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+
+ ih_cntl |= ENABLE_INTR;
+ ih_rb_cntl |= IH_RB_ENABLE;
+ WREG32(IH_CNTL, ih_cntl);
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ rdev->ih.enabled = true;
+}
+
+static void r600_disable_interrupts(struct radeon_device *rdev)
+{
+ u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
+ u32 ih_cntl = RREG32(IH_CNTL);
+
+ ih_rb_cntl &= ~IH_RB_ENABLE;
+ ih_cntl &= ~ENABLE_INTR;
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+ WREG32(IH_CNTL, ih_cntl);
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+ rdev->ih.enabled = false;
+ rdev->ih.wptr = 0;
+ rdev->ih.rptr = 0;
+}
+
+static void r600_disable_interrupt_state(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ WREG32(CP_INT_CNTL, 0);
+ WREG32(GRBM_INT_CNTL, 0);
+ WREG32(DxMODE_INT_MASK, 0);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ if (ASIC_IS_DCE32(rdev)) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, 0);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, 0);
+ }
+ } else {
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+ }
+}
+
+int r600_irq_init(struct radeon_device *rdev)
+{
+ int ret = 0;
+ int rb_bufsz;
+ u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
+
+ /* allocate ring */
+ ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+ if (ret)
+ return ret;
+
+ /* disable irqs */
+ r600_disable_interrupts(rdev);
+
+ /* init rlc */
+ ret = r600_rlc_init(rdev);
+ if (ret) {
+ r600_ih_ring_fini(rdev);
+ return ret;
+ }
+
+ /* setup interrupt control */
+ /* set dummy read address to ring address */
+ WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+ */
+ interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
+ /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
+ interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
+ WREG32(INTERRUPT_CNTL, interrupt_cntl);
+
+ WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
+ rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+
+ ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
+ IH_WPTR_OVERFLOW_CLEAR |
+ (rb_bufsz << 1));
+ /* WPTR writeback, not yet */
+ /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
+ WREG32(IH_RB_WPTR_ADDR_LO, 0);
+ WREG32(IH_RB_WPTR_ADDR_HI, 0);
+
+ WREG32(IH_RB_CNTL, ih_rb_cntl);
+
+ /* set rptr, wptr to 0 */
+ WREG32(IH_RB_RPTR, 0);
+ WREG32(IH_RB_WPTR, 0);
+
+ /* Default settings for IH_CNTL (disabled at first) */
+ ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
+ /* RPTR_REARM only works if msi's are enabled */
+ if (rdev->msi_enabled)
+ ih_cntl |= RPTR_REARM;
+
+#ifdef __BIG_ENDIAN
+ ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
+#endif
+ WREG32(IH_CNTL, ih_cntl);
+
+ /* force the active interrupt state to all disabled */
+ r600_disable_interrupt_state(rdev);
+
+ /* enable irqs */
+ r600_enable_interrupts(rdev);
+
+ return ret;
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+ r600_disable_interrupts(rdev);
+ r600_rlc_stop(rdev);
+ r600_ih_ring_fini(rdev);
+}
+
+int r600_irq_set(struct radeon_device *rdev)
+{
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 mode_int = 0;
+ u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+
+ /* don't enable anything if the ih is disabled */
+ if (!rdev->ih.enabled)
+ return 0;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ if (ASIC_IS_DCE32(rdev)) {
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ }
+ } else {
+ hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ }
+
+ if (rdev->irq.sw_int) {
+ DRM_DEBUG("r600_irq_set: sw int\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ }
+ if (rdev->irq.crtc_vblank_int[0]) {
+ DRM_DEBUG("r600_irq_set: vblank 0\n");
+ mode_int |= D1MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.crtc_vblank_int[1]) {
+ DRM_DEBUG("r600_irq_set: vblank 1\n");
+ mode_int |= D2MODE_VBLANK_INT_MASK;
+ }
+ if (rdev->irq.hpd[0]) {
+ DRM_DEBUG("r600_irq_set: hpd 1\n");
+ hpd1 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[1]) {
+ DRM_DEBUG("r600_irq_set: hpd 2\n");
+ hpd2 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[2]) {
+ DRM_DEBUG("r600_irq_set: hpd 3\n");
+ hpd3 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[3]) {
+ DRM_DEBUG("r600_irq_set: hpd 4\n");
+ hpd4 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[4]) {
+ DRM_DEBUG("r600_irq_set: hpd 5\n");
+ hpd5 |= DC_HPDx_INT_EN;
+ }
+ if (rdev->irq.hpd[5]) {
+ DRM_DEBUG("r600_irq_set: hpd 6\n");
+ hpd6 |= DC_HPDx_INT_EN;
+ }
+
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+ WREG32(DxMODE_INT_MASK, mode_int);
+ if (ASIC_IS_DCE3(rdev)) {
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ }
+ } else {
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
+ }
+
+ return 0;
+}
+
+static inline void r600_irq_ack(struct radeon_device *rdev,
+ u32 *disp_int,
+ u32 *disp_int_cont,
+ u32 *disp_int_cont2)
+{
+ u32 tmp;
+
+ if (ASIC_IS_DCE3(rdev)) {
+ *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+ } else {
+ *disp_int = RREG32(DISP_INTERRUPT_STATUS);
+ *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+ *disp_int_cont2 = 0;
+ }
+
+ if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+ WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (*disp_int & LB_D1_VLINE_INTERRUPT)
+ WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+ WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
+ if (*disp_int & LB_D2_VLINE_INTERRUPT)
+ WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
+ if (*disp_int & DC_HPD1_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int & DC_HPD2_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+ if (ASIC_IS_DCE3(rdev)) {
+ tmp = RREG32(DC_HPD3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ } else {
+ tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
+ }
+ }
+ if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+ tmp = RREG32(DC_HPD4_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ }
+ if (ASIC_IS_DCE32(rdev)) {
+ if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ }
+ if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp |= DC_HPDx_INT_ACK;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
+ }
+ }
+}
+
+void r600_irq_disable(struct radeon_device *rdev)
+{
+ u32 disp_int, disp_int_cont, disp_int_cont2;
+
+ r600_disable_interrupts(rdev);
+ /* Wait and acknowledge irq */
+ mdelay(1);
+ r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+ r600_disable_interrupt_state(rdev);
+}
+
+static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
+{
+ u32 wptr, tmp;
+
+ /* XXX use writeback */
+ wptr = RREG32(IH_RB_WPTR);
+
+ if (wptr & RB_OVERFLOW) {
+ WARN_ON(1);
+ /* XXX deal with overflow */
+ DRM_ERROR("IH RB overflow\n");
+ tmp = RREG32(IH_RB_CNTL);
+ tmp |= IH_WPTR_OVERFLOW_CLEAR;
+ WREG32(IH_RB_CNTL, tmp);
+ }
+ wptr = wptr & WPTR_OFFSET_MASK;
+
+ return wptr;
+}
+
+/* r600 IV Ring
+ * Each IV ring entry is 128 bits:
+ * [7:0] - interrupt source id
+ * [31:8] - reserved
+ * [59:32] - interrupt source data
+ * [127:60] - reserved
+ *
+ * The basic interrupt vector entries
+ * are decoded as follows:
+ * src_id src_data description
+ * 1 0 D1 Vblank
+ * 1 1 D1 Vline
+ * 5 0 D2 Vblank
+ * 5 1 D2 Vline
+ * 19 0 FP Hot plug detection A
+ * 19 1 FP Hot plug detection B
+ * 19 2 DAC A auto-detection
+ * 19 3 DAC B auto-detection
+ * 176 - CP_INT RB
+ * 177 - CP_INT IB1
+ * 178 - CP_INT IB2
+ * 181 - EOP Interrupt
+ * 233 - GUI Idle
+ *
+ * Note, these are based on r600 and may need to be
+ * adjusted or added to on newer asics
+ */
+
+int r600_irq_process(struct radeon_device *rdev)
+{
+ u32 wptr = r600_get_ih_wptr(rdev);
+ u32 rptr = rdev->ih.rptr;
+ u32 src_id, src_data;
+ u32 last_entry = rdev->ih.ring_size - 16;
+ u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+ unsigned long flags;
+ bool queue_hotplug = false;
+
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
+ spin_lock_irqsave(&rdev->ih.lock, flags);
+
+ if (rptr == wptr) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+ if (rdev->shutdown) {
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_NONE;
+ }
+
+restart_ih:
+ /* display interrupts */
+ r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+
+ rdev->ih.wptr = wptr;
+ while (rptr != wptr) {
+ /* wptr/rptr are in bytes! */
+ ring_index = rptr / 4;
+ src_id = rdev->ih.ring[ring_index] & 0xff;
+ src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+
+ switch (src_id) {
+ case 1: /* D1 vblank/vline */
+ switch (src_data) {
+ case 0: /* D1 vblank */
+ if (disp_int & LB_D1_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 0);
+ disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D1_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 5: /* D2 vblank/vline */
+ switch (src_data) {
+ case 0: /* D2 vblank */
+ if (disp_int & LB_D2_VBLANK_INTERRUPT) {
+ drm_handle_vblank(rdev->ddev, 1);
+ disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
+ }
+ break;
+ case 1: /* D1 vline */
+ if (disp_int & LB_D2_VLINE_INTERRUPT) {
+ disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 19: /* HPD/DAC hotplug */
+ switch (src_data) {
+ case 0:
+ if (disp_int & DC_HPD1_INTERRUPT) {
+ disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
+ break;
+ case 1:
+ if (disp_int & DC_HPD2_INTERRUPT) {
+ disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
+ break;
+ case 4:
+ if (disp_int_cont & DC_HPD3_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
+ break;
+ case 5:
+ if (disp_int_cont & DC_HPD4_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
+ break;
+ case 10:
+ if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
+ break;
+ case 12:
+ if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ disp_int_cont &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+ break;
+ case 176: /* CP_INT in ring buffer */
+ case 177: /* CP_INT in IB1 */
+ case 178: /* CP_INT in IB2 */
+ DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev);
+ break;
+ case 181: /* CP EOP event */
+ DRM_DEBUG("IH: CP EOP\n");
+ break;
+ default:
+ DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+ break;
+ }
+
+ /* wptr/rptr are in bytes! */
+ if (rptr == last_entry)
+ rptr = 0;
+ else
+ rptr += 16;
+ }
+ /* make sure wptr hasn't changed while processing */
+ wptr = r600_get_ih_wptr(rdev);
+ if (wptr != rdev->ih.wptr)
+ goto restart_ih;
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
+ rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
+ spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ return IRQ_HANDLED;
+}
/*
* Debugfs info
@@ -1811,21 +2839,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t rdp, wdp;
unsigned count, i, j;
radeon_ring_free_size(rdev);
- rdp = RREG32(CP_RB_RPTR);
- wdp = RREG32(CP_RB_WPTR);
- count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+ count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
- seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
- seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+ seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
+ seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
+ seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
+ seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
+ i = rdev->cp.rptr;
for (j = 0; j <= count; j++) {
- i = (rdp + j) & rdev->cp.ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ i = (i + 1) & rdev->cp.ptr_mask;
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
new file mode 100644
index 00000000000..99e2c3891a7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_reg.h"
+#include "atom.h"
+
+#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
+
+/*
+ * check if the chipset is supported
+ */
+static int r600_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return rdev->family >= CHIP_R600
+ || rdev->family == CHIP_RS600
+ || rdev->family == CHIP_RS690
+ || rdev->family == CHIP_RS740;
+}
+
+/*
+ * current number of channels
+ */
+static int r600_audio_channels(struct radeon_device *rdev)
+{
+ return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
+}
+
+/*
+ * current bits per sample
+ */
+static int r600_audio_bits_per_sample(struct radeon_device *rdev)
+{
+ uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
+ switch (value) {
+ case 0x0: return 8;
+ case 0x1: return 16;
+ case 0x2: return 20;
+ case 0x3: return 24;
+ case 0x4: return 32;
+ }
+
+ DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
+
+ return 16;
+}
+
+/*
+ * current sampling rate in HZ
+ */
+static int r600_audio_rate(struct radeon_device *rdev)
+{
+ uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
+ uint32_t result;
+
+ if (value & 0x4000)
+ result = 44100;
+ else
+ result = 48000;
+
+ result *= ((value >> 11) & 0x7) + 1;
+ result /= ((value >> 8) & 0x7) + 1;
+
+ return result;
+}
+
+/*
+ * iec 60958 status bits
+ */
+static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
+{
+ return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
+}
+
+/*
+ * iec 60958 category code
+ */
+static uint8_t r600_audio_category_code(struct radeon_device *rdev)
+{
+ return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
+}
+
+/*
+ * update all hdmi interfaces with current audio parameters
+ */
+static void r600_audio_update_hdmi(unsigned long param)
+{
+ struct radeon_device *rdev = (struct radeon_device *)param;
+ struct drm_device *dev = rdev->ddev;
+
+ int channels = r600_audio_channels(rdev);
+ int rate = r600_audio_rate(rdev);
+ int bps = r600_audio_bits_per_sample(rdev);
+ uint8_t status_bits = r600_audio_status_bits(rdev);
+ uint8_t category_code = r600_audio_category_code(rdev);
+
+ struct drm_encoder *encoder;
+ int changes = 0;
+
+ changes |= channels != rdev->audio_channels;
+ changes |= rate != rdev->audio_rate;
+ changes |= bps != rdev->audio_bits_per_sample;
+ changes |= status_bits != rdev->audio_status_bits;
+ changes |= category_code != rdev->audio_category_code;
+
+ if (changes) {
+ rdev->audio_channels = channels;
+ rdev->audio_rate = rate;
+ rdev->audio_bits_per_sample = bps;
+ rdev->audio_status_bits = status_bits;
+ rdev->audio_category_code = category_code;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (changes || r600_hdmi_buffer_status_changed(encoder))
+ r600_hdmi_update_audio_settings(
+ encoder, channels,
+ rate, bps, status_bits,
+ category_code);
+ }
+
+ mod_timer(&rdev->audio_timer,
+ jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
+}
+
+/*
+ * initialize the audio vars and register the update timer
+ */
+int r600_audio_init(struct radeon_device *rdev)
+{
+ if (!r600_audio_chipset_supported(rdev))
+ return 0;
+
+ DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
+ WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
+
+ rdev->audio_channels = -1;
+ rdev->audio_rate = -1;
+ rdev->audio_bits_per_sample = -1;
+ rdev->audio_status_bits = 0;
+ rdev->audio_category_code = 0;
+
+ setup_timer(
+ &rdev->audio_timer,
+ r600_audio_update_hdmi,
+ (unsigned long)rdev);
+
+ mod_timer(&rdev->audio_timer, jiffies + 1);
+
+ return 0;
+}
+
+/*
+ * determin how the encoders and audio interface is wired together
+ */
+int r600_audio_tmds_index(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *other;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ return 0;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ /* special case check if an TMDS1 is present */
+ list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
+ if (to_radeon_encoder(other)->encoder_id ==
+ ENCODER_OBJECT_ID_INTERNAL_TMDS1)
+ return 1;
+ }
+ return 0;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ return 1;
+
+ default:
+ DRM_ERROR("Unsupported encoder type 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return -1;
+ }
+}
+
+/*
+ * atach the audio codec to the clock source of the encoder
+ */
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int base_rate = 48000;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
+ break;
+
+ default:
+ DRM_ERROR("Unsupported encoder type 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
+ }
+
+ switch (r600_audio_tmds_index(encoder)) {
+ case 0:
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock*100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+ break;
+
+ case 1:
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock*100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+ break;
+ }
+}
+
+/*
+ * release the audio timer
+ * TODO: How to do this correctly on SMP systems?
+ */
+void r600_audio_fini(struct radeon_device *rdev)
+{
+ if (!r600_audio_chipset_supported(rdev))
+ return;
+
+ WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
+
+ del_timer(&rdev->audio_timer);
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index dbf716e1fbf..9aecafb51b6 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_object_create(rdev, NULL, obj_size,
- true, RADEON_GEM_DOMAIN_VRAM,
- false, &rdev->r600_blit.shader_obj);
+ r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size,
rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
- r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
if (r) {
DRM_ERROR("failed to map blit object %d\n", r);
return r;
}
-
if (rdev->family >= CHIP_RV770)
memcpy_toio(ptr + rdev->r600_blit.state_offset,
r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev)
if (num_packet2s)
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
packet2s, num_packet2s * 4);
-
-
memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
-
- radeon_object_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
return 0;
}
void r600_blit_fini(struct radeon_device *rdev)
{
- radeon_object_unpin(rdev->r600_blit.shader_obj);
- radeon_object_unref(&rdev->r600_blit.shader_obj);
+ int r;
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
+ goto out_unref;
+ }
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+out_unref:
+ radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
int r600_vb_ib_get(struct radeon_device *rdev)
@@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 3; /* fence emit for VB IB */
+ ring_size += 5; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 3; /* fence emit for done copy */
+ ring_size += 5; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
WARN_ON(r);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0d820764f34..44060b92d9e 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -170,7 +170,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
idx, relocs_chunk->length_dw);
return -EINVAL;
}
- *cs_reloc = &p->relocs[0];
+ *cs_reloc = p->relocs;
(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
return 0;
@@ -717,7 +717,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
if (p->chunk_relocs_idx == -1) {
return 0;
}
- p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
new file mode 100644
index 00000000000..fcc949df0e5
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+/*
+ * HDMI color format
+ */
+enum r600_hdmi_color_format {
+ RGB = 0,
+ YCC_422 = 1,
+ YCC_444 = 2
+};
+
+/*
+ * IEC60958 status bits
+ */
+enum r600_hdmi_iec_status_bits {
+ AUDIO_STATUS_DIG_ENABLE = 0x01,
+ AUDIO_STATUS_V = 0x02,
+ AUDIO_STATUS_VCFG = 0x04,
+ AUDIO_STATUS_EMPHASIS = 0x08,
+ AUDIO_STATUS_COPYRIGHT = 0x10,
+ AUDIO_STATUS_NONAUDIO = 0x20,
+ AUDIO_STATUS_PROFESSIONAL = 0x40,
+ AUDIO_STATUS_LEVEL = 0x80
+};
+
+struct {
+ uint32_t Clock;
+
+ int N_32kHz;
+ int CTS_32kHz;
+
+ int N_44_1kHz;
+ int CTS_44_1kHz;
+
+ int N_48kHz;
+ int CTS_48kHz;
+
+} r600_hdmi_ACR[] = {
+ /* 32kHz 44.1kHz 48kHz */
+ /* Clock N CTS N CTS N CTS */
+ { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
+ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
+ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
+ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
+ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
+ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
+ { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+ { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+ { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
+};
+
+/*
+ * calculate CTS value if it's not found in the table
+ */
+static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
+{
+ if (*CTS == 0)
+ *CTS = clock*N/(128*freq)*1000;
+ DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
+ N, *CTS, freq);
+}
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ int CTS;
+ int N;
+ int i;
+
+ for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
+
+ CTS = r600_hdmi_ACR[i].CTS_32kHz;
+ N = r600_hdmi_ACR[i].N_32kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
+ WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_32kHz_N, N);
+
+ CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
+ N = r600_hdmi_ACR[i].N_44_1kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
+ WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_44_1kHz_N, N);
+
+ CTS = r600_hdmi_ACR[i].CTS_48kHz;
+ N = r600_hdmi_ACR[i].N_48kHz;
+ r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
+ WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
+ WREG32(offset+R600_HDMI_48kHz_N, N);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void r600_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
+{
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void r600_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ enum r600_hdmi_color_format color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
+ frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * build a Audio Info Frame
+ */
+static void r600_hdmi_audioinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t channel_count,
+ uint8_t coding_type,
+ uint8_t sample_size,
+ uint8_t sample_frequency,
+ uint8_t format,
+ uint8_t channel_allocation,
+ uint8_t level_shift,
+ int downmix_inhibit
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint8_t frame[11];
+
+ frame[0x0] = 0;
+ frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
+ frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
+ frame[0x3] = format;
+ frame[0x4] = channel_allocation;
+ frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
+ frame[0x6] = 0;
+ frame[0x7] = 0;
+ frame[0x8] = 0;
+ frame[0x9] = 0;
+ frame[0xA] = 0;
+
+ r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
+
+ WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
+}
+
+/*
+ * test if audio buffer is filled enough to start playing
+ */
+static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
+}
+
+/*
+ * have buffer status changed since last call?
+ */
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ int status, result;
+
+ if (!radeon_encoder->hdmi_offset)
+ return 0;
+
+ status = r600_hdmi_is_audio_buffer_filled(encoder);
+ result = radeon_encoder->hdmi_buffer_status != status;
+ radeon_encoder->hdmi_buffer_status = status;
+
+ return result;
+}
+
+/*
+ * write the audio workaround status to the hardware
+ */
+void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset = radeon_encoder->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ if (r600_hdmi_is_audio_buffer_filled(encoder)) {
+ /* disable audio workaround and start delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
+
+ } else if (radeon_encoder->hdmi_audio_workaround) {
+ /* enable audio workaround and start delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
+
+ } else {
+ /* disable audio workaround and stop delivering of audio frames */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
+ }
+}
+
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ r600_audio_set_clock(encoder, mode->clock);
+
+ WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
+ WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
+ WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
+
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
+ WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
+
+ WREG32(offset+R600_HDMI_VERSION, 0x202);
+
+ r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
+ WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
+
+ r600_hdmi_audio_workaround(encoder);
+
+ /* audio packets per line, does anyone know how to calc this ? */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
+
+ /* update? reset? don't realy know */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
+}
+
+/*
+ * update settings with current parameters from audio engine
+ */
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
+ int channels,
+ int rate,
+ int bps,
+ uint8_t status_bits,
+ uint8_t category_code)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ uint32_t iec;
+
+ if (!offset)
+ return;
+
+ DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
+ r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
+ channels, rate, bps);
+ DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
+ (int)status_bits, (int)category_code);
+
+ iec = 0;
+ if (status_bits & AUDIO_STATUS_PROFESSIONAL)
+ iec |= 1 << 0;
+ if (status_bits & AUDIO_STATUS_NONAUDIO)
+ iec |= 1 << 1;
+ if (status_bits & AUDIO_STATUS_COPYRIGHT)
+ iec |= 1 << 2;
+ if (status_bits & AUDIO_STATUS_EMPHASIS)
+ iec |= 1 << 3;
+
+ iec |= category_code << 8;
+
+ switch (rate) {
+ case 32000: iec |= 0x3 << 24; break;
+ case 44100: iec |= 0x0 << 24; break;
+ case 88200: iec |= 0x8 << 24; break;
+ case 176400: iec |= 0xc << 24; break;
+ case 48000: iec |= 0x2 << 24; break;
+ case 96000: iec |= 0xa << 24; break;
+ case 192000: iec |= 0xe << 24; break;
+ }
+
+ WREG32(offset+R600_HDMI_IEC60958_1, iec);
+
+ iec = 0;
+ switch (bps) {
+ case 16: iec |= 0x2; break;
+ case 20: iec |= 0x3; break;
+ case 24: iec |= 0xb; break;
+ }
+ if (status_bits & AUDIO_STATUS_V)
+ iec |= 0x5 << 16;
+
+ WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
+
+ /* 0x021 or 0x031 sets the audio frame length */
+ WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
+ r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
+
+ r600_hdmi_audio_workaround(encoder);
+
+ /* update? reset? don't realy know */
+ WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
+}
+
+/*
+ * enable/disable the HDMI engine
+ */
+void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+
+ if (!offset)
+ return;
+
+ DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
+
+ /* some version of atombios ignore the enable HDMI flag
+ * so enabling/disabling HDMI was moved here for TMDS1+2 */
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* This part is doubtfull in my opinion */
+ WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
+ break;
+
+ default:
+ DRM_ERROR("unknown HDMI output type\n");
+ break;
+ }
+}
+
+/*
+ * determin at which register offset the HDMI encoder is
+ */
+void r600_hdmi_init(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ switch (r600_audio_tmds_index(encoder)) {
+ case 0:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
+ break;
+ case 1:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
+ break;
+ default:
+ radeon_encoder->hdmi_offset = 0;
+ break;
+ }
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
+ break;
+
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ radeon_encoder->hdmi_offset = R600_HDMI_DIG;
+ break;
+
+ default:
+ radeon_encoder->hdmi_offset = 0;
+ break;
+ }
+
+ DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
+ radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+
+ /* TODO: make this configureable */
+ radeon_encoder->hdmi_audio_workaround = 0;
+}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index e2d1f5f33f7..d0e28ffdeda 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -110,5 +110,79 @@
#define R600_BIOS_6_SCRATCH 0x173c
#define R600_BIOS_7_SCRATCH 0x1740
+/* Audio, these regs were reverse enginered,
+ * so the chance is high that the naming is wrong
+ * R6xx+ ??? */
+
+/* Audio clocks */
+#define R600_AUDIO_PLL1_MUL 0x0514
+#define R600_AUDIO_PLL1_DIV 0x0518
+#define R600_AUDIO_PLL2_MUL 0x0524
+#define R600_AUDIO_PLL2_DIV 0x0528
+#define R600_AUDIO_CLK_SRCSEL 0x0534
+
+/* Audio general */
+#define R600_AUDIO_ENABLE 0x7300
+#define R600_AUDIO_TIMING 0x7344
+
+/* Audio params */
+#define R600_AUDIO_VENDOR_ID 0x7380
+#define R600_AUDIO_REVISION_ID 0x7384
+#define R600_AUDIO_ROOT_NODE_COUNT 0x7388
+#define R600_AUDIO_NID1_NODE_COUNT 0x738c
+#define R600_AUDIO_NID1_TYPE 0x7390
+#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394
+#define R600_AUDIO_SUPPORTED_CODEC 0x7398
+#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
+#define R600_AUDIO_NID2_CAPS 0x73a0
+#define R600_AUDIO_NID3_CAPS 0x73a4
+#define R600_AUDIO_NID3_PIN_CAPS 0x73a8
+
+/* Audio conn list */
+#define R600_AUDIO_CONN_LIST_LEN 0x73ac
+#define R600_AUDIO_CONN_LIST 0x73b0
+
+/* Audio verbs */
+#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0
+#define R600_AUDIO_PLAYING 0x73c4
+#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8
+#define R600_AUDIO_CONFIG_DEFAULT 0x73cc
+#define R600_AUDIO_PIN_SENSE 0x73d0
+#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
+#define R600_AUDIO_STATUS_BITS 0x73d8
+
+/* HDMI base register addresses */
+#define R600_HDMI_TMDS1 0x7400
+#define R600_HDMI_TMDS2 0x7700
+#define R600_HDMI_DIG 0x7800
+
+/* HDMI registers */
+#define R600_HDMI_ENABLE 0x00
+#define R600_HDMI_STATUS 0x04
+#define R600_HDMI_CNTL 0x08
+#define R600_HDMI_UNKNOWN_0 0x0C
+#define R600_HDMI_AUDIOCNTL 0x10
+#define R600_HDMI_VIDEOCNTL 0x14
+#define R600_HDMI_VERSION 0x18
+#define R600_HDMI_UNKNOWN_1 0x28
+#define R600_HDMI_VIDEOINFOFRAME_0 0x54
+#define R600_HDMI_VIDEOINFOFRAME_1 0x58
+#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
+#define R600_HDMI_VIDEOINFOFRAME_3 0x60
+#define R600_HDMI_32kHz_CTS 0xac
+#define R600_HDMI_32kHz_N 0xb0
+#define R600_HDMI_44_1kHz_CTS 0xb4
+#define R600_HDMI_44_1kHz_N 0xb8
+#define R600_HDMI_48kHz_CTS 0xbc
+#define R600_HDMI_48kHz_N 0xc0
+#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
+#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
+#define R600_HDMI_IEC60958_1 0xd4
+#define R600_HDMI_IEC60958_2 0xd8
+#define R600_HDMI_UNKNOWN_2 0xdc
+#define R600_HDMI_AUDIO_DEBUG_0 0xe0
+#define R600_HDMI_AUDIO_DEBUG_1 0xe4
+#define R600_HDMI_AUDIO_DEBUG_2 0xe8
+#define R600_HDMI_AUDIO_DEBUG_3 0xec
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 27ab428b149..05894edadab 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -456,7 +456,215 @@
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
#define WAIT_3D_IDLECLEAN_bit (1 << 17)
-
+#define IH_RB_CNTL 0x3e00
+# define IH_RB_ENABLE (1 << 0)
+# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
+# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
+# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
+# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
+# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
+#define IH_RB_BASE 0x3e04
+#define IH_RB_RPTR 0x3e08
+#define IH_RB_WPTR 0x3e0c
+# define RB_OVERFLOW (1 << 0)
+# define WPTR_OFFSET_MASK 0x3fffc
+#define IH_RB_WPTR_ADDR_HI 0x3e10
+#define IH_RB_WPTR_ADDR_LO 0x3e14
+#define IH_CNTL 0x3e18
+# define ENABLE_INTR (1 << 0)
+# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP_NONE 0
+# define IH_MC_SWAP_16BIT 1
+# define IH_MC_SWAP_32BIT 2
+# define IH_MC_SWAP_64BIT 3
+# define RPTR_REARM (1 << 4)
+# define MC_WRREQ_CREDIT(x) ((x) << 15)
+# define MC_WR_CLEAN_CNT(x) ((x) << 20)
+
+#define RLC_CNTL 0x3f00
+# define RLC_ENABLE (1 << 0)
+#define RLC_HB_BASE 0x3f10
+#define RLC_HB_CNTL 0x3f0c
+#define RLC_HB_RPTR 0x3f20
+#define RLC_HB_WPTR 0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR 0x3f14
+#define RLC_HB_WPTR_MSB_ADDR 0x3f18
+#define RLC_MC_CNTL 0x3f44
+#define RLC_UCODE_CNTL 0x3f48
+#define RLC_UCODE_ADDR 0x3f2c
+#define RLC_UCODE_DATA 0x3f30
+
+#define SRBM_SOFT_RESET 0xe60
+# define SOFT_RESET_RLC (1 << 13)
+
+#define CP_INT_CNTL 0xc124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define SCRATCH_INT_ENABLE (1 << 25)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+# define IB2_INT_ENABLE (1 << 29)
+# define IB1_INT_ENABLE (1 << 30)
+# define RB_INT_ENABLE (1 << 31)
+#define CP_INT_STATUS 0xc128
+# define SCRATCH_INT_STAT (1 << 25)
+# define TIME_STAMP_INT_STAT (1 << 26)
+# define IB2_INT_STAT (1 << 29)
+# define IB1_INT_STAT (1 << 30)
+# define RB_INT_STAT (1 << 31)
+
+#define GRBM_INT_CNTL 0x8060
+# define RDERR_INT_ENABLE (1 << 0)
+# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1)
+# define GUI_IDLE_INT_ENABLE (1 << 19)
+
+#define INTERRUPT_CNTL 0x5468
+# define IH_DUMMY_RD_OVERRIDE (1 << 0)
+# define IH_DUMMY_RD_EN (1 << 1)
+# define IH_REQ_NONSNOOP_EN (1 << 3)
+# define GEN_IH_INT_EN (1 << 8)
+#define INTERRUPT_CNTL2 0x546c
+
+#define D1MODE_VBLANK_STATUS 0x6534
+#define D2MODE_VBLANK_STATUS 0x6d34
+# define DxMODE_VBLANK_OCCURRED (1 << 0)
+# define DxMODE_VBLANK_ACK (1 << 4)
+# define DxMODE_VBLANK_STAT (1 << 12)
+# define DxMODE_VBLANK_INTERRUPT (1 << 16)
+# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17)
+#define D1MODE_VLINE_STATUS 0x653c
+#define D2MODE_VLINE_STATUS 0x6d3c
+# define DxMODE_VLINE_OCCURRED (1 << 0)
+# define DxMODE_VLINE_ACK (1 << 4)
+# define DxMODE_VLINE_STAT (1 << 12)
+# define DxMODE_VLINE_INTERRUPT (1 << 16)
+# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17)
+#define DxMODE_INT_MASK 0x6540
+# define D1MODE_VBLANK_INT_MASK (1 << 0)
+# define D1MODE_VLINE_INT_MASK (1 << 4)
+# define D2MODE_VBLANK_INT_MASK (1 << 8)
+# define D2MODE_VLINE_INT_MASK (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc
+# define DC_HPD1_INTERRUPT (1 << 18)
+# define DC_HPD2_INTERRUPT (1 << 19)
+#define DISP_INTERRUPT_STATUS 0x7edc
+# define LB_D1_VLINE_INTERRUPT (1 << 2)
+# define LB_D2_VLINE_INTERRUPT (1 << 3)
+# define LB_D1_VBLANK_INTERRUPT (1 << 4)
+# define LB_D2_VBLANK_INTERRUPT (1 << 5)
+# define DACA_AUTODETECT_INTERRUPT (1 << 16)
+# define DACB_AUTODETECT_INTERRUPT (1 << 17)
+# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18)
+# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19)
+# define DC_I2C_SW_DONE_INTERRUPT (1 << 20)
+# define DC_I2C_HW_DONE_INTERRUPT (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8
+# define DC_HPD4_INTERRUPT (1 << 14)
+# define DC_HPD4_RX_INTERRUPT (1 << 15)
+# define DC_HPD3_INTERRUPT (1 << 28)
+# define DC_HPD1_RX_INTERRUPT (1 << 29)
+# define DC_HPD2_RX_INTERRUPT (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec
+# define DC_HPD3_RX_INTERRUPT (1 << 0)
+# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1)
+# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2)
+# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3)
+# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4)
+# define AUX1_SW_DONE_INTERRUPT (1 << 5)
+# define AUX1_LS_DONE_INTERRUPT (1 << 6)
+# define AUX2_SW_DONE_INTERRUPT (1 << 7)
+# define AUX2_LS_DONE_INTERRUPT (1 << 8)
+# define AUX3_SW_DONE_INTERRUPT (1 << 9)
+# define AUX3_LS_DONE_INTERRUPT (1 << 10)
+# define AUX4_SW_DONE_INTERRUPT (1 << 11)
+# define AUX4_LS_DONE_INTERRUPT (1 << 12)
+# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13)
+# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14)
+/* DCE 3.2 */
+# define AUX5_SW_DONE_INTERRUPT (1 << 15)
+# define AUX5_LS_DONE_INTERRUPT (1 << 16)
+# define AUX6_SW_DONE_INTERRUPT (1 << 17)
+# define AUX6_LS_DONE_INTERRUPT (1 << 18)
+# define DC_HPD5_INTERRUPT (1 << 19)
+# define DC_HPD5_RX_INTERRUPT (1 << 20)
+# define DC_HPD6_INTERRUPT (1 << 21)
+# define DC_HPD6_RX_INTERRUPT (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL 0x7828
+#define DACB_AUTO_DETECT_CONTROL 0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128
+# define DACx_AUTODETECT_MODE(x) ((x) << 0)
+# define DACx_AUTODETECT_MODE_NONE 0
+# define DACx_AUTODETECT_MODE_CONNECT 1
+# define DACx_AUTODETECT_MODE_DISCONNECT 2
+# define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+# define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138
+#define DACA_AUTODETECT_INT_CONTROL 0x7838
+#define DACB_AUTODETECT_INT_CONTROL 0x7a38
+# define DACx_AUTODETECT_ACK (1 << 0)
+# define DACx_AUTODETECT_INT_ENABLE (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24
+# define DC_HOT_PLUG_DETECTx_EN (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28
+# define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0)
+# define DC_HOT_PLUG_DETECTx_SENSE (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS 0x7d00
+#define DC_HPD2_INT_STATUS 0x7d0c
+#define DC_HPD3_INT_STATUS 0x7d18
+#define DC_HPD4_INT_STATUS 0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS 0x7dc0
+#define DC_HPD6_INT_STATUS 0x7df4
+# define DC_HPDx_INT_STATUS (1 << 0)
+# define DC_HPDx_SENSE (1 << 1)
+# define DC_HPDx_RX_INT_STATUS (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c
+# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0)
+# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8)
+# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL 0x7d04
+#define DC_HPD2_INT_CONTROL 0x7d10
+#define DC_HPD3_INT_CONTROL 0x7d1c
+#define DC_HPD4_INT_CONTROL 0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL 0x7dc4
+#define DC_HPD6_INT_CONTROL 0x7df8
+# define DC_HPDx_INT_ACK (1 << 0)
+# define DC_HPDx_INT_POLARITY (1 << 8)
+# define DC_HPDx_INT_EN (1 << 16)
+# define DC_HPDx_RX_INT_ACK (1 << 20)
+# define DC_HPDx_RX_INT_EN (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL 0x7d08
+#define DC_HPD2_CONTROL 0x7d14
+#define DC_HPD3_CONTROL 0x7d20
+#define DC_HPD4_CONTROL 0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL 0x7dc8
+#define DC_HPD6_CONTROL 0x7dfc
+# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
+# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
+/* DCE 3.2 */
+# define DC_HPDx_EN (1 << 28)
/*
* PM4
@@ -500,7 +708,6 @@
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
-#define PACKET3_CP_INTERRUPT 0x40
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_TC_ACTION_ENA (1 << 23)
@@ -674,4 +881,5 @@
#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 224506a2f7b..53b55608102 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -28,8 +28,6 @@
#ifndef __RADEON_H__
#define __RADEON_H__
-#include "radeon_object.h"
-
/* TODO: Here are things that needs to be done :
* - surface allocator & initializer : (bit like scratch reg) should
* initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
#include <linux/list.h>
#include <linux/kref.h>
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
#include "radeon_family.h"
#include "radeon_mode.h"
#include "radeon_reg.h"
@@ -85,6 +88,8 @@ extern int radeon_benchmarking;
extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
+extern int radeon_new_pll;
+extern int radeon_audio;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -157,6 +162,7 @@ struct radeon_fence_driver {
struct list_head created;
struct list_head emited;
struct list_head signaled;
+ bool initialized;
};
struct radeon_fence {
@@ -186,76 +192,63 @@ void radeon_fence_unref(struct radeon_fence **fence);
* Tiling registers
*/
struct radeon_surface_reg {
- struct radeon_object *robj;
+ struct radeon_bo *bo;
};
#define RADEON_GEM_MAX_SURFACES 8
/*
- * Radeon buffer.
+ * TTM.
*/
-struct radeon_object;
+struct radeon_mman {
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_global_reference mem_global_ref;
+ struct ttm_bo_device bdev;
+ bool mem_global_referenced;
+ bool initialized;
+};
+
+struct radeon_bo {
+ /* Protected by gem.mutex */
+ struct list_head list;
+ /* Protected by tbo.reserved */
+ u32 placements[3];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ unsigned pin_count;
+ void *kptr;
+ u32 tiling_flags;
+ u32 pitch;
+ int surface_reg;
+ /* Constant after initialization */
+ struct radeon_device *rdev;
+ struct drm_gem_object *gobj;
+};
-struct radeon_object_list {
+struct radeon_bo_list {
struct list_head list;
- struct radeon_object *robj;
+ struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
- uint32_t tiling_flags;
+ u32 tiling_flags;
};
-int radeon_object_init(struct radeon_device *rdev);
-void radeon_object_fini(struct radeon_device *rdev);
-int radeon_object_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj,
- unsigned long size,
- bool kernel,
- uint32_t domain,
- bool interruptible,
- struct radeon_object **robj_ptr);
-int radeon_object_kmap(struct radeon_object *robj, void **ptr);
-void radeon_object_kunmap(struct radeon_object *robj);
-void radeon_object_unref(struct radeon_object **robj);
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
- uint64_t *gpu_addr);
-void radeon_object_unpin(struct radeon_object *robj);
-int radeon_object_wait(struct radeon_object *robj);
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
-int radeon_object_evict_vram(struct radeon_device *rdev);
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
-void radeon_object_force_delete(struct radeon_device *rdev);
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
- struct list_head *head);
-int radeon_object_list_validate(struct list_head *head, void *fence);
-void radeon_object_list_unvalidate(struct list_head *head);
-void radeon_object_list_clean(struct list_head *head);
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
- struct vm_area_struct *vma);
-unsigned long radeon_object_size(struct radeon_object *robj);
-void radeon_object_clear_surface_reg(struct radeon_object *robj);
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
- bool force_drop);
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
- uint32_t tiling_flags, uint32_t pitch);
-void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
-void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem);
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
/*
* GEM objects.
*/
struct radeon_gem {
+ struct mutex mutex;
struct list_head objects;
};
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
int radeon_gem_object_create(struct radeon_device *rdev, int size,
- int alignment, int initial_domain,
- bool discardable, bool kernel,
- bool interruptible,
- struct drm_gem_object **obj);
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj);
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -271,7 +264,7 @@ struct radeon_gart_table_ram {
};
struct radeon_gart_table_vram {
- struct radeon_object *robj;
+ struct radeon_bo *robj;
volatile uint32_t *ptr;
};
@@ -352,11 +345,16 @@ struct radeon_irq {
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2];
+ /* FIXME: use defines for max hpd/dacs */
+ bool hpd[6];
+ spinlock_t sw_lock;
+ int sw_refcount;
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
-
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
/*
* CP & ring.
@@ -376,7 +374,7 @@ struct radeon_ib {
*/
struct radeon_ib_pool {
struct mutex mutex;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
@@ -384,7 +382,7 @@ struct radeon_ib_pool {
};
struct radeon_cp {
- struct radeon_object *ring_obj;
+ struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned wptr;
@@ -399,8 +397,25 @@ struct radeon_cp {
bool ready;
};
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+ struct radeon_bo *ring_obj;
+ volatile uint32_t *ring;
+ unsigned rptr;
+ unsigned wptr;
+ unsigned wptr_old;
+ unsigned ring_size;
+ uint64_t gpu_addr;
+ uint32_t align_mask;
+ uint32_t ptr_mask;
+ spinlock_t lock;
+ bool enabled;
+};
+
struct r600_blit {
- struct radeon_object *shader_obj;
+ struct radeon_bo *shader_obj;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
@@ -430,8 +445,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
*/
struct radeon_cs_reloc {
struct drm_gem_object *gobj;
- struct radeon_object *robj;
- struct radeon_object_list lobj;
+ struct radeon_bo *robj;
+ struct radeon_bo_list lobj;
uint32_t handle;
uint32_t flags;
};
@@ -527,7 +542,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
* Writeback
*/
struct radeon_wb {
- struct radeon_object *wb_obj;
+ struct radeon_bo *wb_obj;
volatile uint32_t *wb;
uint64_t gpu_addr;
};
@@ -639,6 +654,11 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
+ void (*hdp_flush)(struct radeon_device *rdev);
+ void (*hpd_init)(struct radeon_device *rdev);
+ void (*hpd_fini)(struct radeon_device *rdev);
+ bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
};
/*
@@ -751,9 +771,9 @@ struct radeon_device {
uint8_t *bios;
bool is_atom_bios;
uint16_t bios_header_start;
- struct radeon_object *stollen_vga_memory;
+ struct radeon_bo *stollen_vga_memory;
struct fb_info *fbdev_info;
- struct radeon_object *fbdev_robj;
+ struct radeon_bo *fbdev_rbo;
struct radeon_framebuffer *fbdev_rfb;
/* Register mmio */
resource_size_t rmmio_base;
@@ -791,8 +811,20 @@ struct radeon_device {
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
+ const struct firmware *rlc_fw; /* r6/700 RLC firmware */
struct r600_blit r600_blit;
int msi_enabled; /* msi enabled */
+ struct r600_ih ih; /* r6/700 interrupt ring */
+ struct workqueue_struct *wq;
+ struct work_struct hotplug_work;
+
+ /* audio stuff */
+ struct timer_list audio_timer;
+ int audio_channels;
+ int audio_rate;
+ int audio_bits_per_sample;
+ uint8_t audio_status_bits;
+ uint8_t audio_category_code;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -829,6 +861,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
}
}
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
/*
* Registers read & write functions.
@@ -965,18 +1001,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
+#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
+#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
+#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
/* Common functions */
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern int radeon_clocks_init(struct radeon_device *rdev);
extern void radeon_clocks_fini(struct radeon_device *rdev);
extern void radeon_scratch_init(struct radeon_device *rdev);
@@ -984,6 +1026,8 @@ extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
+extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
@@ -1021,7 +1065,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
extern void r100_vga_render_disable(struct radeon_device *rdev);
extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
- struct radeon_object *robj);
+ struct radeon_bo *robj);
extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
@@ -1029,6 +1073,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx);
+extern void r100_enable_bm(struct radeon_device *rdev);
+extern void r100_set_common_regs(struct radeon_device *rdev);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1104,7 +1150,29 @@ extern void r600_wb_disable(struct radeon_device *rdev);
extern void r600_scratch_init(struct radeon_device *rdev);
extern int r600_blit_init(struct radeon_device *rdev);
extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_cp_init_microcode(struct radeon_device *rdev);
+extern int r600_init_microcode(struct radeon_device *rdev);
extern int r600_gpu_reset(struct radeon_device *rdev);
+/* r600 irq */
+extern int r600_irq_init(struct radeon_device *rdev);
+extern void r600_irq_fini(struct radeon_device *rdev);
+extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+extern int r600_irq_set(struct radeon_device *rdev);
+
+extern int r600_audio_init(struct radeon_device *rdev);
+extern int r600_audio_tmds_index(struct drm_encoder *encoder);
+extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+extern void r600_audio_fini(struct radeon_device *rdev);
+extern void r600_hdmi_init(struct drm_encoder *encoder);
+extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
+extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
+ int channels,
+ int rate,
+ int bps,
+ uint8_t status_bits,
+ uint8_t category_code);
+
+#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c18fbee387d..eb29217bbf1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -33,6 +33,7 @@
*/
uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
@@ -76,6 +77,12 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
+void r100_hdp_flush(struct radeon_device *rdev);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
static struct radeon_asic r100_asic = {
.init = &r100_init,
@@ -100,13 +107,18 @@ static struct radeon_asic r100_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -155,13 +167,18 @@ static struct radeon_asic r300_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
/*
@@ -201,6 +218,11 @@ static struct radeon_asic r420_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -238,13 +260,18 @@ static struct radeon_asic rs400_asic = {
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = NULL,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
};
@@ -263,6 +290,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+
static struct radeon_asic rs600_asic = {
.init = &rs600_init,
.fini = &rs600_fini,
@@ -291,6 +324,11 @@ static struct radeon_asic rs600_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -334,6 +372,11 @@ static struct radeon_asic rs690_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -381,6 +424,11 @@ static struct radeon_asic rv515_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
@@ -419,6 +467,11 @@ static struct radeon_asic r520_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r100_hdp_flush,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
};
/*
@@ -455,6 +508,12 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
+void r600_hdp_flush(struct radeon_device *rdev);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
static struct radeon_asic r600_asic = {
.init = &r600_init,
@@ -470,6 +529,7 @@ static struct radeon_asic r600_asic = {
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
@@ -484,6 +544,11 @@ static struct radeon_asic r600_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r600_hdp_flush,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
};
/*
@@ -509,6 +574,7 @@ static struct radeon_asic rv770_asic = {
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
@@ -523,6 +589,11 @@ static struct radeon_asic rv770_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
+ .hdp_flush = &r600_hdp_flush,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2ed88a82093..321044bef71 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb, uint32_t igp_lane_info,
- uint16_t connector_object_id);
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
/* from radeon_legacy_encoder.c */
extern void
@@ -60,16 +61,16 @@ union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
-static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
- *dev, uint8_t id)
+static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+ uint8_t id)
{
- struct radeon_device *rdev = dev->dev_private;
struct atom_context *ctx = rdev->mode_info.atom_context;
- ATOM_GPIO_I2C_ASSIGMENT gpio;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset;
+ int i;
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
@@ -78,34 +79,121 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
- gpio = i2c_info->asGPIO_Info[id];
-
- i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
- i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
- i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
- i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
- i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
- i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
- i2c.put_data_mask = (1 << gpio.ucDataEnShift);
- i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
- i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
- i2c.valid = true;
+
+ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+
+ if (gpio->sucI2cId.ucAccess == id) {
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ i2c.valid = true;
+ }
+ }
return i2c;
}
+static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+ u8 id)
+{
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ struct radeon_gpio_rec gpio;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+ struct _ATOM_GPIO_PIN_LUT *gpio_info;
+ ATOM_GPIO_PIN_ASSIGNMENT *pin;
+ u16 data_offset, size;
+ int i, num_indices;
+
+ memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+ gpio.valid = false;
+
+ atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
+
+ gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ pin = &gpio_info->asGPIO_Pin[i];
+ if (id == pin->ucGPIO_ID) {
+ gpio.id = pin->ucGPIO_ID;
+ gpio.reg = pin->usGpioPin_AIndex * 4;
+ gpio.mask = (1 << pin->ucGpioPinBitShift);
+ gpio.valid = true;
+ break;
+ }
+ }
+
+ return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+ struct radeon_gpio_rec *gpio)
+{
+ struct radeon_hpd hpd;
+ hpd.gpio = *gpio;
+ if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+ switch(gpio->mask) {
+ case (1 << 0):
+ hpd.hpd = RADEON_HPD_1;
+ break;
+ case (1 << 8):
+ hpd.hpd = RADEON_HPD_2;
+ break;
+ case (1 << 16):
+ hpd.hpd = RADEON_HPD_3;
+ break;
+ case (1 << 24):
+ hpd.hpd = RADEON_HPD_4;
+ break;
+ case (1 << 26):
+ hpd.hpd = RADEON_HPD_5;
+ break;
+ case (1 << 28):
+ hpd.hpd = RADEON_HPD_6;
+ break;
+ default:
+ hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+ } else
+ hpd.hpd = RADEON_HPD_NONE;
+ return hpd;
+}
+
static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint32_t supported_device,
int *connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t *line_mux)
+ uint16_t *line_mux,
+ struct radeon_hpd *hpd)
{
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -135,6 +223,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* HIS X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7146) &&
+ (dev->pdev->subsystem_vendor == 0x17af) &&
+ (dev->pdev->subsystem_device == 0x2058)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+ /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+ if ((dev->pdev->device == 0x7142) &&
+ (dev->pdev->subsystem_vendor == 0x1458) &&
+ (dev->pdev->subsystem_device == 0x2134)) {
+ if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+ return false;
+ }
+
+
/* Funky macbooks */
if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) &&
@@ -172,6 +277,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+ /* Acer laptop reports DVI-D as DVI-I */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1025) &&
+ (dev->pdev->subsystem_device == 0x013c)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
return true;
}
@@ -240,16 +354,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
struct radeon_mode_info *mode_info = &rdev->mode_info;
struct atom_context *ctx = mode_info->atom_context;
int index = GetIndexIntoMasterTable(DATA, Object_Header);
- uint16_t size, data_offset;
- uint8_t frev, crev, line_mux = 0;
+ u16 size, data_offset;
+ u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
int i, j, path_size, device_support;
int connector_type;
- uint16_t igp_lane_info, conn_id, connector_object_id;
+ u16 igp_lane_info, conn_id, connector_object_id;
bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_gpio_rec gpio;
+ struct radeon_hpd hpd;
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -276,7 +392,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
path_size += le16_to_cpu(path->usSize);
linkb = false;
-
if (device_support & le16_to_cpu(path->usDeviceTag)) {
uint8_t con_obj_id, con_obj_num, con_obj_type;
@@ -377,10 +492,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
}
- /* look up gpio for ddc */
+ /* look up gpio for ddc, hpd */
if ((le16_to_cpu(path->usDeviceTag) &
- (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
- == 0) {
+ (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
if (le16_to_cpu(path->usConnObjectId) ==
le16_to_cpu(con_obj->asObjects[j].
@@ -394,21 +508,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
asObjects[j].
usRecordOffset));
ATOM_I2C_RECORD *i2c_record;
+ ATOM_HPD_INT_RECORD *hpd_record;
+ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+ hpd.hpd = RADEON_HPD_NONE;
while (record->ucRecordType > 0
&& record->
ucRecordType <=
ATOM_MAX_OBJECT_RECORD_NUMBER) {
- switch (record->
- ucRecordType) {
+ switch (record->ucRecordType) {
case ATOM_I2C_RECORD_TYPE:
i2c_record =
- (ATOM_I2C_RECORD
- *) record;
- line_mux =
- i2c_record->
- sucI2cId.
- bfI2C_LineMux;
+ (ATOM_I2C_RECORD *)
+ record;
+ i2c_config =
+ (ATOM_I2C_ID_CONFIG_ACCESS *)
+ &i2c_record->sucI2cId;
+ ddc_bus = radeon_lookup_i2c_gpio(rdev,
+ i2c_config->
+ ucAccess);
+ break;
+ case ATOM_HPD_INT_RECORD_TYPE:
+ hpd_record =
+ (ATOM_HPD_INT_RECORD *)
+ record;
+ gpio = radeon_lookup_gpio(rdev,
+ hpd_record->ucHPDIntGPIOID);
+ hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ hpd.plugged_state = hpd_record->ucPlugged_PinState;
break;
}
record =
@@ -421,24 +548,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
break;
}
}
- } else
- line_mux = 0;
-
- if ((le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_TV1_SUPPORT)
- || (le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_TV2_SUPPORT)
- || (le16_to_cpu(path->usDeviceTag) ==
- ATOM_DEVICE_CV_SUPPORT))
+ } else {
+ hpd.hpd = RADEON_HPD_NONE;
ddc_bus.valid = false;
- else
- ddc_bus = radeon_lookup_gpio(dev, line_mux);
+ }
conn_id = le16_to_cpu(path->usConnObjectId);
if (!radeon_atom_apply_quirks
(dev, le16_to_cpu(path->usDeviceTag), &connector_type,
- &ddc_bus, &conn_id))
+ &ddc_bus, &conn_id, &hpd))
continue;
radeon_add_atom_connector(dev,
@@ -447,7 +566,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
usDeviceTag),
connector_type, &ddc_bus,
linkb, igp_lane_info,
- connector_object_id);
+ connector_object_id,
+ &hpd);
}
}
@@ -502,6 +622,7 @@ struct bios_connector {
uint16_t devices;
int connector_type;
struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_hpd hpd;
};
bool radeon_get_atom_connector_info_from_supported_devices_table(struct
@@ -517,7 +638,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
uint16_t device_support;
uint8_t dac;
union atom_supported_devices *supported_devices;
- int i, j;
+ int i, j, max_device;
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -527,7 +648,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ if (frev > 1)
+ max_device = ATOM_MAX_SUPPORTED_DEVICE;
+ else
+ max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+ for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci =
supported_devices->info.asConnInfo[i];
@@ -553,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
- if ((rdev->family == CHIP_RS690) ||
- (rdev->family == CHIP_RS740)) {
- if ((i == ATOM_DEVICE_DFP2_INDEX)
- && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
- else if ((i == ATOM_DEVICE_DFP3_INDEX)
- && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
- else
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux;
- } else
- bios_connectors[i].line_mux =
- ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+ bios_connectors[i].line_mux =
+ ci.sucI2cId.ucAccess;
/* give tv unique connector ids */
if (i == ATOM_DEVICE_TV1_INDEX) {
@@ -582,8 +694,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
bios_connectors[i].line_mux = 52;
} else
bios_connectors[i].ddc_bus =
- radeon_lookup_gpio(dev,
- bios_connectors[i].line_mux);
+ radeon_lookup_i2c_gpio(rdev,
+ bios_connectors[i].line_mux);
+
+ if ((crev > 1) && (frev > 1)) {
+ u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+ switch (isb) {
+ case 0x4:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+ break;
+ case 0xa:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+ break;
+ default:
+ bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+ } else {
+ if (i == ATOM_DEVICE_DFP1_INDEX)
+ bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+ else if (i == ATOM_DEVICE_DFP2_INDEX)
+ bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+ else
+ bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+ }
/* Always set the connector type to VGA for CRT1/CRT2. if they are
* shared with a DVI port, we'll pick up the DVI connector when we
@@ -595,7 +729,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
if (!radeon_atom_apply_quirks
(dev, (1 << i), &bios_connectors[i].connector_type,
- &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
+ &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+ &bios_connectors[i].hpd))
continue;
bios_connectors[i].valid = true;
@@ -610,41 +745,42 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
else
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
- (1 <<
- i),
+ (1 << i),
dac),
(1 << i));
}
/* combine shared connectors */
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ for (i = 0; i < max_device; i++) {
if (bios_connectors[i].valid) {
- for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
+ for (j = 0; j < max_device; j++) {
if (bios_connectors[j].valid && (i != j)) {
if (bios_connectors[i].line_mux ==
bios_connectors[j].line_mux) {
- if (((bios_connectors[i].
- devices &
- (ATOM_DEVICE_DFP_SUPPORT))
- && (bios_connectors[j].
- devices &
- (ATOM_DEVICE_CRT_SUPPORT)))
- ||
- ((bios_connectors[j].
- devices &
- (ATOM_DEVICE_DFP_SUPPORT))
- && (bios_connectors[i].
- devices &
- (ATOM_DEVICE_CRT_SUPPORT)))) {
- bios_connectors[i].
- devices |=
- bios_connectors[j].
- devices;
- bios_connectors[i].
- connector_type =
- DRM_MODE_CONNECTOR_DVII;
- bios_connectors[j].
- valid = false;
+ /* make sure not to combine LVDS */
+ if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ bios_connectors[i].line_mux = 53;
+ bios_connectors[i].ddc_bus.valid = false;
+ continue;
+ }
+ if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ bios_connectors[j].line_mux = 53;
+ bios_connectors[j].ddc_bus.valid = false;
+ continue;
+ }
+ /* combine analog and digital for DVI-I */
+ if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+ (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
+ ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
+ (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
+ bios_connectors[i].devices |=
+ bios_connectors[j].devices;
+ bios_connectors[i].connector_type =
+ DRM_MODE_CONNECTOR_DVII;
+ if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
+ bios_connectors[i].hpd =
+ bios_connectors[j].hpd;
+ bios_connectors[j].valid = false;
}
}
}
@@ -653,7 +789,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
}
/* add the connectors */
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ for (i = 0; i < max_device; i++) {
if (bios_connectors[i].valid) {
uint16_t connector_object_id =
atombios_get_connector_object_id(dev,
@@ -666,7 +802,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
connector_type,
&bios_connectors[i].ddc_bus,
false, 0,
- connector_object_id);
+ connector_object_id,
+ &bios_connectors[i].hpd);
}
}
@@ -731,7 +868,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
* pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
* family.
*/
- p1pll->pll_out_min = 64800;
+ if (!radeon_new_pll)
+ p1pll->pll_out_min = 64800;
}
p1pll->pll_in_min =
@@ -861,6 +999,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
uint8_t frev, crev;
struct radeon_atom_ss *ss = NULL;
+ int i;
if (id > ATOM_MAX_SS_ENTRY)
return NULL;
@@ -878,12 +1017,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
if (!ss)
return NULL;
- ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
- ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
- ss->step = ss_info->asSS_Info[id].ucSS_Step;
- ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
- ss->range = ss_info->asSS_Info[id].ucSS_Range;
- ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
+ for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
+ if (ss_info->asSS_Info[i].ucSS_Id == id) {
+ ss->percentage =
+ le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+ ss->step = ss_info->asSS_Info[i].ucSS_Step;
+ ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+ ss->range = ss_info->asSS_Info[i].ucSS_Range;
+ ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ }
+ }
}
return ss;
}
@@ -901,7 +1045,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
- uint16_t data_offset;
+ uint16_t data_offset, misc;
union lvds_info *lvds_info;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
@@ -940,6 +1084,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+
+ misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+ if (misc & ATOM_VSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+ if (misc & ATOM_HSYNC_POLARITY)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+ if (misc & ATOM_COMPOSITESYNC)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+ if (misc & ATOM_INTERLACE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+ if (misc & ATOM_DOUBLE_CLOCK_MODE)
+ lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1074,6 +1231,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
return true;
}
+enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
+ uint16_t data_offset;
+ uint8_t frev, crev;
+ struct _ATOM_ANALOG_TV_INFO *tv_info;
+ enum radeon_tv_std tv_std = TV_STD_NTSC;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+
+ tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+
+ switch (tv_info->ucTV_BootUpDefaultStandard) {
+ case ATOM_TV_NTSC:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Default TV standard: NTSC\n");
+ break;
+ case ATOM_TV_NTSCJ:
+ tv_std = TV_STD_NTSC_J;
+ DRM_INFO("Default TV standard: NTSC-J\n");
+ break;
+ case ATOM_TV_PAL:
+ tv_std = TV_STD_PAL;
+ DRM_INFO("Default TV standard: PAL\n");
+ break;
+ case ATOM_TV_PALM:
+ tv_std = TV_STD_PAL_M;
+ DRM_INFO("Default TV standard: PAL-M\n");
+ break;
+ case ATOM_TV_PALN:
+ tv_std = TV_STD_PAL_N;
+ DRM_INFO("Default TV standard: PAL-N\n");
+ break;
+ case ATOM_TV_PALCN:
+ tv_std = TV_STD_PAL_CN;
+ DRM_INFO("Default TV standard: PAL-CN\n");
+ break;
+ case ATOM_TV_PAL60:
+ tv_std = TV_STD_PAL_60;
+ DRM_INFO("Default TV standard: PAL-60\n");
+ break;
+ case ATOM_TV_SECAM:
+ tv_std = TV_STD_SECAM;
+ DRM_INFO("Default TV standard: SECAM\n");
+ break;
+ default:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
+ break;
+ }
+ return tv_std;
+}
+
struct radeon_encoder_tv_dac *
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
{
@@ -1109,6 +1321,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
}
return tv_dac;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 10bd50a7db8..4ddfd4b5bc5 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -29,8 +29,8 @@
void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
unsigned sdomain, unsigned ddomain)
{
- struct radeon_object *dobj = NULL;
- struct radeon_object *sobj = NULL;
+ struct radeon_bo *dobj = NULL;
+ struct radeon_bo *sobj = NULL;
struct radeon_fence *fence = NULL;
uint64_t saddr, daddr;
unsigned long start_jiffies;
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
+ r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_pin(sobj, sdomain, &saddr);
+ r = radeon_bo_reserve(sobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(sobj, sdomain, &saddr);
+ radeon_bo_unreserve(sobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
+ r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
- r = radeon_object_pin(dobj, ddomain, &daddr);
+ r = radeon_bo_reserve(dobj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(dobj, ddomain, &daddr);
+ radeon_bo_unreserve(dobj);
if (r) {
goto out_cleanup;
}
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
}
out_cleanup:
if (sobj) {
- radeon_object_unpin(sobj);
- radeon_object_unref(&sobj);
+ r = radeon_bo_reserve(sobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(sobj);
+ radeon_bo_unreserve(sobj);
+ }
+ radeon_bo_unref(&sobj);
}
if (dobj) {
- radeon_object_unpin(dobj);
- radeon_object_unref(&dobj);
+ r = radeon_bo_reserve(dobj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(dobj);
+ radeon_bo_unreserve(dobj);
+ }
+ radeon_bo_unref(&dobj);
}
if (fence) {
radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index a8135416762..812f24dbc2a 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
sclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@@ -58,7 +62,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
}
/* 10 khz */
-static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
{
struct radeon_pll *mpll = &rdev->clock.mpll;
uint32_t fb_div, ref_div, post_div, mclk;
@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+ if (ref_div == 0)
+ return 0;
+
mclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
ret = radeon_combios_get_clock_info(dev);
if (ret) {
- if (p1pll->reference_div < 2)
- p1pll->reference_div = 12;
+ if (p1pll->reference_div < 2) {
+ if (!ASIC_IS_AVIVO(rdev)) {
+ u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+ if (ASIC_IS_R300(rdev))
+ p1pll->reference_div =
+ (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+ else
+ p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ } else
+ p1pll->reference_div = 12;
+ }
if (p2pll->reference_div < 2)
p2pll->reference_div = 12;
if (rdev->family < CHIP_RS600) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5253cbf6db1..fd94dbca33a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -50,7 +50,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id);
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
/* from radeon_legacy_encoder.c */
extern void
@@ -442,38 +443,70 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
-struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
+static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
+ int ddc_line)
{
struct radeon_i2c_bus_rec i2c;
- i2c.mask_clk_mask = RADEON_GPIO_EN_1;
- i2c.mask_data_mask = RADEON_GPIO_EN_0;
- i2c.a_clk_mask = RADEON_GPIO_A_1;
- i2c.a_data_mask = RADEON_GPIO_A_0;
- i2c.put_clk_mask = RADEON_GPIO_EN_1;
- i2c.put_data_mask = RADEON_GPIO_EN_0;
- i2c.get_clk_mask = RADEON_GPIO_Y_1;
- i2c.get_data_mask = RADEON_GPIO_Y_0;
- if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
- (ddc_line == RADEON_MDGPIO_EN_REG)) {
- i2c.mask_clk_reg = ddc_line;
- i2c.mask_data_reg = ddc_line;
- i2c.a_clk_reg = ddc_line;
- i2c.a_data_reg = ddc_line;
- i2c.put_clk_reg = ddc_line;
- i2c.put_data_reg = ddc_line;
- i2c.get_clk_reg = ddc_line + 4;
- i2c.get_data_reg = ddc_line + 4;
+ if (ddc_line == RADEON_GPIOPAD_MASK) {
+ i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
+ i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
+ i2c.a_clk_reg = RADEON_GPIOPAD_A;
+ i2c.a_data_reg = RADEON_GPIOPAD_A;
+ i2c.en_clk_reg = RADEON_GPIOPAD_EN;
+ i2c.en_data_reg = RADEON_GPIOPAD_EN;
+ i2c.y_clk_reg = RADEON_GPIOPAD_Y;
+ i2c.y_data_reg = RADEON_GPIOPAD_Y;
+ } else if (ddc_line == RADEON_MDGPIO_MASK) {
+ i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
+ i2c.mask_data_reg = RADEON_MDGPIO_MASK;
+ i2c.a_clk_reg = RADEON_MDGPIO_A;
+ i2c.a_data_reg = RADEON_MDGPIO_A;
+ i2c.en_clk_reg = RADEON_MDGPIO_EN;
+ i2c.en_data_reg = RADEON_MDGPIO_EN;
+ i2c.y_clk_reg = RADEON_MDGPIO_Y;
+ i2c.y_data_reg = RADEON_MDGPIO_Y;
} else {
+ i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+ i2c.mask_data_mask = RADEON_GPIO_EN_0;
+ i2c.a_clk_mask = RADEON_GPIO_A_1;
+ i2c.a_data_mask = RADEON_GPIO_A_0;
+ i2c.en_clk_mask = RADEON_GPIO_EN_1;
+ i2c.en_data_mask = RADEON_GPIO_EN_0;
+ i2c.y_clk_mask = RADEON_GPIO_Y_1;
+ i2c.y_data_mask = RADEON_GPIO_Y_0;
+
i2c.mask_clk_reg = ddc_line;
i2c.mask_data_reg = ddc_line;
i2c.a_clk_reg = ddc_line;
i2c.a_data_reg = ddc_line;
- i2c.put_clk_reg = ddc_line;
- i2c.put_data_reg = ddc_line;
- i2c.get_clk_reg = ddc_line;
- i2c.get_data_reg = ddc_line;
+ i2c.en_clk_reg = ddc_line;
+ i2c.en_data_reg = ddc_line;
+ i2c.y_clk_reg = ddc_line;
+ i2c.y_data_reg = ddc_line;
+ }
+
+ if (rdev->family < CHIP_R200)
+ i2c.hw_capable = false;
+ else {
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ case RADEON_GPIO_MONID:
+ /* hw i2c on RADEON_GPIO_MONID doesn't seem to work
+ * reliably on some pre-r4xx hardware; not sure why.
+ */
+ i2c.hw_capable = false;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
}
+ i2c.mm_i2c = false;
+ i2c.i2c_id = 0;
if (ddc_line)
i2c.valid = true;
@@ -495,7 +528,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
uint16_t sclk, mclk;
if (rdev->bios == NULL)
- return NULL;
+ return false;
pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
if (pll_info) {
@@ -601,11 +634,10 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
return p_dac;
}
-static enum radeon_tv_std
-radeon_combios_get_tv_info(struct radeon_encoder *encoder)
+enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev)
{
- struct drm_device *dev = encoder->base.dev;
- struct radeon_device *rdev = dev->dev_private;
+ struct drm_device *dev = rdev->ddev;
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
@@ -746,7 +778,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
found = 1;
}
- tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
+ tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
}
if (!found) {
/* then check CRT table */
@@ -993,8 +1025,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
- {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */
- {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */
+ { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */
};
bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
@@ -1028,7 +1060,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
-
ver = RBIOS8(tmds_info);
DRM_INFO("DFP table revision: %d\n", ver);
if (ver == 3) {
@@ -1063,51 +1094,139 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
tmds->tmds_pll[i].value);
}
}
- } else
+ } else {
DRM_INFO("No TMDS info found in BIOS\n");
+ return false;
+ }
return true;
}
-struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
{
- struct radeon_encoder_int_tmds *tmds = NULL;
- bool ret;
-
- tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_i2c_bus_rec i2c_bus;
- if (!tmds)
- return NULL;
+ /* default for macs */
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
- if (ret == false)
- radeon_legacy_get_tmds_info_from_table(encoder, tmds);
+ /* XXX some macs have duallink chips */
+ switch (rdev->mode_info.connector_table) {
+ case CT_POWERBOOK_EXTERNAL:
+ case CT_MINI_EXTERNAL:
+ default:
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
- return tmds;
+ return true;
}
-void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
+bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
- uint16_t ext_tmds_info;
- uint8_t ver;
+ uint16_t offset;
+ uint8_t ver, id, blocks, clk, data;
+ int i;
+ enum radeon_combios_ddc gpio;
+ struct radeon_i2c_bus_rec i2c_bus;
if (rdev->bios == NULL)
- return;
+ return false;
- ext_tmds_info =
- combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
- if (ext_tmds_info) {
- ver = RBIOS8(ext_tmds_info);
- DRM_INFO("External TMDS Table revision: %d\n", ver);
- // TODO
+ tmds->i2c_bus = NULL;
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_INFO("GPIO Table revision: %d\n", ver);
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ i2c_bus.valid = true;
+ i2c_bus.mask_clk_mask = (1 << clk);
+ i2c_bus.mask_data_mask = (1 << data);
+ i2c_bus.a_clk_mask = (1 << clk);
+ i2c_bus.a_data_mask = (1 << data);
+ i2c_bus.en_clk_mask = (1 << clk);
+ i2c_bus.en_data_mask = (1 << data);
+ i2c_bus.y_clk_mask = (1 << clk);
+ i2c_bus.y_data_mask = (1 << data);
+ i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
+ i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
+ i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
+ i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
+ i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
+ i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
+ i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
+ i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
+ break;
+ }
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ ver = RBIOS8(offset);
+ DRM_INFO("External TMDS Table revision: %d\n", ver);
+ tmds->slave_addr = RBIOS8(offset + 4 + 2);
+ tmds->slave_addr >>= 1; /* 7 bit addressing */
+ gpio = RBIOS8(offset + 4 + 3);
+ switch (gpio) {
+ case DDC_MONID:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_DVI:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_VGA:
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_CRT2:
+ /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+ if (rdev->family >= CHIP_R300)
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ else
+ i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ break;
+ case DDC_LCD: /* MM i2c */
+ DRM_ERROR("MM i2c requires hw i2c engine\n");
+ break;
+ default:
+ DRM_ERROR("Unsupported gpio %d\n", gpio);
+ break;
+ }
+ }
}
+
+ if (!tmds->i2c_bus) {
+ DRM_INFO("No valid Ext TMDS info found in BIOS\n");
+ return false;
+ }
+
+ return true;
}
bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_i2c_bus_rec ddc_i2c;
+ struct radeon_hpd hpd;
rdev->mode_info.connector_table = radeon_connector_table;
if (rdev->mode_info.connector_table == CT_NONE) {
@@ -1168,7 +1287,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
/* these are the most common settings */
if (rdev->flags & RADEON_SINGLE_CRTC) {
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1178,10 +1298,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else if (rdev->flags & RADEON_IS_MOBILITY) {
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
+ ddc_i2c = combios_setup_i2c_bus(rdev, 0);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1191,10 +1313,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1204,10 +1328,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else {
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1223,10 +1349,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1236,11 +1364,14 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
}
if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
/* TV - tv dac */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1250,14 +1381,16 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
}
break;
case CT_IBOOK:
DRM_INFO("Connector Table: %d (ibook)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1265,9 +1398,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - TV DAC */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1275,8 +1410,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1285,13 +1423,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_EXTERNAL:
DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1299,9 +1439,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* DVI-I - primary dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP2_SUPPORT,
@@ -1317,8 +1459,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1327,13 +1472,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_INTERNAL:
DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1341,9 +1488,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* DVI-I - primary dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1358,8 +1507,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1368,13 +1520,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_POWERBOOK_VGA:
DRM_INFO("Connector Table: %d (powerbook vga)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_LCD1_SUPPORT,
@@ -1382,9 +1536,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1392,8 +1548,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1402,13 +1561,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_MINI_EXTERNAL:
DRM_INFO("Connector Table: %d (mini external tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP2_SUPPORT,
@@ -1424,8 +1585,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP2_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1434,13 +1598,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_MINI_INTERNAL:
DRM_INFO("Connector Table: %d (mini internal tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1455,8 +1621,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1465,13 +1634,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_IMAC_G5_ISIGHT:
DRM_INFO("Connector Table: %d (imac g5 isight)\n",
rdev->mode_info.connector_table);
/* DVI-D - int tmds */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_DFP1_SUPPORT,
@@ -1479,9 +1650,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+ &hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1489,8 +1662,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1499,13 +1675,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
case CT_EMAC:
DRM_INFO("Connector Table: %d (emac)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
@@ -1513,9 +1691,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
@@ -1523,8 +1703,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
/* TV - TV DAC */
+ ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_TV1_SUPPORT,
@@ -1533,7 +1716,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
@@ -1550,7 +1734,8 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
int bios_index,
enum radeon_combios_connector
*legacy_connector,
- struct radeon_i2c_bus_rec *ddc_i2c)
+ struct radeon_i2c_bus_rec *ddc_i2c,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
@@ -1558,29 +1743,26 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
if ((rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
- *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
else if ((rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) &&
ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
- ddc_i2c->valid = true;
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
ddc_i2c->mask_clk_mask = (0x20 << 8);
ddc_i2c->mask_data_mask = 0x80;
ddc_i2c->a_clk_mask = (0x20 << 8);
ddc_i2c->a_data_mask = 0x80;
- ddc_i2c->put_clk_mask = (0x20 << 8);
- ddc_i2c->put_data_mask = 0x80;
- ddc_i2c->get_clk_mask = (0x20 << 8);
- ddc_i2c->get_data_mask = 0x80;
- ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
- ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
- ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
- ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
- ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
- ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
- ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
- ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
+ ddc_i2c->en_clk_mask = (0x20 << 8);
+ ddc_i2c->en_data_mask = 0x80;
+ ddc_i2c->y_clk_mask = (0x20 << 8);
+ ddc_i2c->y_data_mask = 0x80;
}
+ /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
+ if ((rdev->family >= CHIP_R300) &&
+ ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+ *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
if (dev->pdev->device == 0x515e &&
@@ -1624,6 +1806,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
dev->pdev->subsystem_device == 0x280a)
return false;
+ /* MSI S270 has non-existent TV port */
+ if (dev->pdev->device == 0x5955 &&
+ dev->pdev->subsystem_vendor == 0x1462 &&
+ dev->pdev->subsystem_device == 0x0131)
+ return false;
+
return true;
}
@@ -1671,6 +1859,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
enum radeon_combios_connector connector;
int i = 0;
struct radeon_i2c_bus_rec ddc_i2c;
+ struct radeon_hpd hpd;
if (rdev->bios == NULL)
return false;
@@ -1691,26 +1880,40 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
switch (ddc_type) {
case DDC_MONID:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_MONID);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
break;
case DDC_DVI:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
break;
case DDC_VGA:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
break;
case DDC_CRT2:
ddc_i2c =
- combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
+ combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
break;
default:
break;
}
+ switch (connector) {
+ case CONNECTOR_PROPRIETARY_LEGACY:
+ case CONNECTOR_DVI_I_LEGACY:
+ case CONNECTOR_DVI_D_LEGACY:
+ if ((tmp >> 4) & 0x1)
+ hpd.hpd = RADEON_HPD_2;
+ else
+ hpd.hpd = RADEON_HPD_1;
+ break;
+ default:
+ hpd.hpd = RADEON_HPD_NONE;
+ break;
+ }
+
if (!radeon_apply_legacy_quirks(dev, i, &connector,
- &ddc_i2c))
+ &ddc_i2c, &hpd))
continue;
switch (connector) {
@@ -1727,7 +1930,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
+ &hpd);
break;
case CONNECTOR_CRT_LEGACY:
if (tmp & 0x1) {
@@ -1753,7 +1957,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
break;
case CONNECTOR_DVI_I_LEGACY:
devices = 0;
@@ -1799,7 +2004,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- connector_object_id);
+ connector_object_id,
+ &hpd);
break;
case CONNECTOR_DVI_D_LEGACY:
if ((tmp >> 4) & 0x1) {
@@ -1817,7 +2023,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- connector_object_id);
+ connector_object_id,
+ &hpd);
break;
case CONNECTOR_CTV_LEGACY:
case CONNECTOR_STV_LEGACY:
@@ -1832,7 +2039,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
legacy_connector_convert
[connector],
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
break;
default:
DRM_ERROR("Unknown connector type: %d\n",
@@ -1858,14 +2066,16 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
0),
ATOM_DEVICE_DFP1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT |
ATOM_DEVICE_DFP1_SUPPORT,
DRM_MODE_CONNECTOR_DVII,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+ &hpd);
} else {
uint16_t crt_info =
combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
@@ -1876,13 +2086,15 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_VGA);
+ CONNECTOR_OBJECT_ID_VGA,
+ &hpd);
} else {
DRM_DEBUG("No connector info found\n");
return false;
@@ -1910,27 +2122,27 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
case DDC_MONID:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_MONID);
+ (rdev, RADEON_GPIO_MONID);
break;
case DDC_DVI:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_DVI_DDC);
+ (rdev, RADEON_GPIO_DVI_DDC);
break;
case DDC_VGA:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_VGA_DDC);
+ (rdev, RADEON_GPIO_VGA_DDC);
break;
case DDC_CRT2:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_GPIO_CRT2_DDC);
+ (rdev, RADEON_GPIO_CRT2_DDC);
break;
case DDC_LCD:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_LCD_GPIO_MASK);
+ (rdev, RADEON_GPIOPAD_MASK);
ddc_i2c.mask_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.mask_data_mask =
@@ -1939,19 +2151,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.put_clk_mask =
+ ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.put_data_mask =
+ ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.get_clk_mask =
+ ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.get_data_mask =
+ ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
break;
case DDC_GPIO:
ddc_i2c =
combios_setup_i2c_bus
- (RADEON_MDGPIO_EN_REG);
+ (rdev, RADEON_MDGPIO_MASK);
ddc_i2c.mask_clk_mask =
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.mask_data_mask =
@@ -1960,13 +2172,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
RBIOS32(lcd_ddc_info + 3);
ddc_i2c.a_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.put_clk_mask =
+ ddc_i2c.en_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.put_data_mask =
+ ddc_i2c.en_data_mask =
RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.get_clk_mask =
+ ddc_i2c.y_clk_mask =
RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.get_data_mask =
+ ddc_i2c.y_data_mask =
RBIOS32(lcd_ddc_info + 7);
break;
default:
@@ -1977,12 +2189,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else
ddc_i2c.valid = false;
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
5,
ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_LVDS);
+ CONNECTOR_OBJECT_ID_LVDS,
+ &hpd);
}
}
@@ -1993,6 +2207,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
if (radeon_apply_legacy_tv_quirks(dev)) {
+ hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id
(dev,
@@ -2003,7 +2218,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
- CONNECTOR_OBJECT_ID_SVIDEO);
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
}
}
}
@@ -2014,6 +2230,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
return true;
}
+void radeon_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (!tmds)
+ return;
+
+ switch (tmds->dvo_chip) {
+ case DVO_SIL164:
+ /* sil 164 */
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x30);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x09, 0x00);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0a, 0x90);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0c, 0x89);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x3b);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ case DVO_SIL1178:
+ /* sil 1178 - untested */
+ /*
+ * 0x0f, 0x44
+ * 0x0f, 0x4c
+ * 0x0e, 0x01
+ * 0x0a, 0x80
+ * 0x09, 0x30
+ * 0x0c, 0xc9
+ * 0x0d, 0x70
+ * 0x08, 0x32
+ * 0x08, 0x33
+ */
+ break;
+ default:
+ break;
+ }
+
+}
+
+bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ uint16_t offset;
+ uint8_t blocks, slave_addr, rev;
+ uint32_t index, id;
+ uint32_t reg, val, and_mask, or_mask;
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+
+ if (rdev->bios == NULL)
+ return false;
+
+ if (!tmds)
+ return false;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
+ rev = RBIOS8(offset);
+ if (offset) {
+ rev = RBIOS8(offset);
+ if (rev > 1) {
+ blocks = RBIOS8(offset + 3);
+ index = offset + 4;
+ while (blocks > 0) {
+ id = RBIOS16(index);
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ index += 4;
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 3:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val * 1000);
+ break;
+ case 6:
+ slave_addr = id & 0xff;
+ slave_addr >>= 1; /* 7 bit addressing */
+ index++;
+ reg = RBIOS8(index);
+ index++;
+ val = RBIOS8(index);
+ index++;
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ slave_addr,
+ reg, val);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ blocks--;
+ }
+ return true;
+ }
+ }
+ } else {
+ offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+ if (offset) {
+ index = offset + 10;
+ id = RBIOS16(index);
+ while (id != 0xffff) {
+ index += 2;
+ switch (id >> 13) {
+ case 0:
+ reg = (id & 0x1fff) * 4;
+ val = RBIOS32(index);
+ WREG32(reg, val);
+ break;
+ case 2:
+ reg = (id & 0x1fff) * 4;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32(reg, val);
+ break;
+ case 4:
+ val = RBIOS16(index);
+ index += 2;
+ udelay(val);
+ break;
+ case 5:
+ reg = id & 0x1fff;
+ and_mask = RBIOS32(index);
+ index += 4;
+ or_mask = RBIOS32(index);
+ index += 4;
+ val = RREG32_PLL(reg);
+ val = (val & and_mask) | or_mask;
+ WREG32_PLL(reg, val);
+ break;
+ case 6:
+ reg = id & 0x1fff;
+ val = RBIOS8(index);
+ index += 1;
+ radeon_i2c_do_lock(tmds->i2c_bus, 1);
+ radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ reg, val);
+ radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ break;
+ default:
+ DRM_ERROR("Unknown id %d\n", id >> 13);
+ break;
+ }
+ id = RBIOS16(index);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 29763ceae3a..20161567dbf 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,26 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_needs_link_train(radeon_connector)) {
+ if (connector->encoder)
+ dp_link_train(connector->encoder, connector);
+ }
+ }
+ }
+
+}
+
static void radeon_property_change_mode(struct drm_encoder *encoder)
{
struct drm_crtc *crtc = encoder->crtc;
@@ -188,6 +208,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
drm_mode_set_name(mode);
DRM_DEBUG("Adding native panel mode %s\n", mode->name);
+ } else if (native_mode->hdisplay != 0 &&
+ native_mode->vdisplay != 0) {
+ /* mac laptops without an edid */
+ /* Note that this is not necessarily the exact panel mode,
+ * but an approximation based on the cvt formula. For these
+ * systems we should ideally read the mode info out of the
+ * registers or add a mode table, but this works and is much
+ * simpler.
+ */
+ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+ DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
}
return mode;
}
@@ -445,10 +477,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
ret = connector_status_connected;
else {
if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (radeon_connector->edid)
ret = connector_status_connected;
}
@@ -553,17 +585,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder)
ret = connector_status_disconnected;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -708,17 +740,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected;
bool dret;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -735,6 +767,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
ret = connector_status_disconnected;
} else
ret = connector_status_connected;
+
+ /* multiple connectors on the same encoder with the same ddc line
+ * This tends to be HDMI and DVI on the same encoder with the
+ * same ddc line. If the edid says HDMI, consider the HDMI port
+ * connected and the DVI port disconnected. If the edid doesn't
+ * say HDMI, vice versa.
+ */
+ if (radeon_connector->shared_ddc && connector_status_connected) {
+ struct drm_device *dev = connector->dev;
+ struct drm_connector *list_connector;
+ struct radeon_connector *list_radeon_connector;
+ list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+ if (connector == list_connector)
+ continue;
+ list_radeon_connector = to_radeon_connector(list_connector);
+ if (radeon_connector->devices == list_radeon_connector->devices) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ } else {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ ret = connector_status_disconnected;
+ }
+ }
+ }
+ }
+ }
}
}
@@ -863,6 +928,91 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
.force = radeon_dvi_force,
};
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ if (radeon_connector->ddc_bus)
+ radeon_i2c_destroy(radeon_connector->ddc_bus);
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
+ if (radeon_dig_connector->dp_i2c_bus)
+ radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+ kfree(radeon_connector->con_priv);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_ddc_get_modes(radeon_connector);
+ return ret;
+}
+
+static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ enum drm_connector_status ret = connector_status_disconnected;
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ u8 sink_type;
+
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+
+ sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dp_getdpcd(radeon_connector)) {
+ radeon_dig_connector->dp_sink_type = sink_type;
+ ret = connector_status_connected;
+ }
+ } else {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_ddc_probe(radeon_connector)) {
+ radeon_dig_connector->dp_sink_type = sink_type;
+ ret = connector_status_connected;
+ }
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
+
+ return ret;
+}
+
+static int radeon_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ /* XXX check mode bandwidth */
+
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return radeon_dp_mode_valid_helper(radeon_connector, mode);
+ else
+ return MODE_OK;
+}
+
+struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+ .get_modes = radeon_dp_get_modes,
+ .mode_valid = radeon_dp_mode_valid,
+ .best_encoder = radeon_dvi_encoder,
+};
+
+struct drm_connector_funcs radeon_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = radeon_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = radeon_connector_set_property,
+ .destroy = radeon_dp_connector_destroy,
+ .force = radeon_dvi_force,
+};
+
void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
@@ -871,7 +1021,8 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb,
uint32_t igp_lane_info,
- uint16_t connector_object_id)
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -911,6 +1062,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->devices = supported_device;
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
+ radeon_connector->hpd = *hpd;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -963,10 +1115,12 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
- radeon_connector->dac_load_detect = true;
- drm_connector_attach_property(&radeon_connector->base,
- rdev->mode_info.load_detect_property,
- 1);
+ if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ radeon_connector->dac_load_detect = true;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.load_detect_property,
+ 1);
+ }
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
@@ -997,16 +1151,23 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->linkb = linkb;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
- drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
- ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+ drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+ ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (ret)
goto failed;
if (i2c_bus->valid) {
+ /* add DP i2c bus */
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+ if (!radeon_dig_connector->dp_i2c_bus)
+ goto failed;
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus)
goto failed;
}
subpixel_order = SubPixelHorizontalRGB;
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.coherent_mode_property,
+ 1);
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1020,6 +1181,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_atombios_get_tv_info(rdev));
}
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -1038,7 +1202,6 @@ radeon_add_atom_connector(struct drm_device *dev,
if (!radeon_connector->ddc_bus)
goto failed;
}
- drm_mode_create_scaling_mode_property(dev);
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1063,7 +1226,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id)
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -1093,6 +1257,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->connector_id = connector_id;
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
+ radeon_connector->hpd = *hpd;
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1160,6 +1325,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.tv_std_property,
+ radeon_combios_get_tv_info(rdev));
}
break;
case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 4f7afc79dd8..0b2f9c2ad2c 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
for (t = 0; t < dev_priv->usec_timeout; t++) {
u32 done_age = GET_SCRATCH(dev_priv, 1);
DRM_DEBUG("done_age = %d\n", done_age);
- for (i = start; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[start];
buf_priv = buf->dev_private;
if (buf->file_priv == NULL || (buf->pending &&
buf_priv->age <=
@@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
buf->pending = 0;
return buf;
}
- start = 0;
+ if (++start >= dma->buf_count)
+ start = 0;
}
if (t) {
@@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
}
}
- DRM_DEBUG("returning NULL!\n");
return NULL;
}
-#if 0
-struct drm_buf *radeon_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_buf_priv_t *buf_priv;
- struct drm_buf *buf;
- int i, t;
- int start;
- u32 done_age;
-
- done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
- if (++dev_priv->last_buf >= dma->buf_count)
- dev_priv->last_buf = 0;
-
- start = dev_priv->last_buf;
- dev_priv->stats.freelist_loops++;
-
- for (t = 0; t < 2; t++) {
- for (i = start; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- buf_priv = buf->dev_private;
- if (buf->file_priv == 0 || (buf->pending &&
- buf_priv->age <=
- done_age)) {
- dev_priv->stats.requested_bufs++;
- buf->pending = 0;
- return buf;
- }
- }
- start = 0;
- }
-
- return NULL;
-}
-#endif
-
void radeon_freelist_reset(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5ab2cf96a26..65590a0f1d9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
- p->relocs[i].lobj.robj = p->relocs[i].robj;
+ p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.rdomain = r->read_domains;
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].handle = r->handle;
p->relocs[i].flags = r->flags;
INIT_LIST_HEAD(&p->relocs[i].lobj.list);
- radeon_object_list_add_object(&p->relocs[i].lobj,
- &p->validated);
+ radeon_bo_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
}
}
- return radeon_object_list_validate(&p->validated, p->ib->fence);
+ return radeon_bo_list_validate(&p->validated, p->ib->fence);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
unsigned i;
if (error) {
- radeon_object_list_unvalidate(&parser->validated);
+ radeon_bo_list_unvalidate(&parser->validated,
+ parser->ib->fence);
} else {
- radeon_object_list_clean(&parser->validated);
+ radeon_bo_list_unreserve(&parser->validated);
}
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 41bb76fbe73..7c6848096bc 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
if (rdev->family < CHIP_R600) {
int i;
- for (i = 0; i < 8; i++) {
- WREG32(RADEON_SURFACE0_INFO +
- i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
- 0);
+ for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+ if (rdev->surface_regs[i].bo)
+ radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+ else
+ radeon_clear_surface_reg(rdev, i);
}
/* enable surfaces */
WREG32(RADEON_SURFACE_CNTL, 0);
@@ -208,6 +209,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+ if (radeon_card_posted(rdev))
+ return true;
+
+ if (rdev->bios) {
+ DRM_INFO("GPU not posted. posting now...\n");
+ if (rdev->is_atom_bios)
+ atom_asic_init(rdev->mode_info.atom_context);
+ else
+ radeon_combios_asic_init(rdev->ddev);
+ return true;
+ } else {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return false;
+ }
+}
+
int radeon_dummy_page_init(struct radeon_device *rdev)
{
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@@ -372,6 +391,12 @@ int radeon_asic_init(struct radeon_device *rdev)
/* FIXME: not supported yet */
return -EINVAL;
}
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ rdev->asic->get_memory_clock = NULL;
+ rdev->asic->set_memory_clock = NULL;
+ }
+
return 0;
}
@@ -462,13 +487,18 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
+ mutex_init(&rdev->mode_info.atom_context->mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+ atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
}
void radeon_atombios_fini(struct radeon_device *rdev)
{
- kfree(rdev->mode_info.atom_context);
+ if (rdev->mode_info.atom_context) {
+ kfree(rdev->mode_info.atom_context->scratch);
+ kfree(rdev->mode_info.atom_context);
+ }
kfree(rdev->mode_info.atom_card_info);
}
@@ -516,9 +546,72 @@ void radeon_agp_disable(struct radeon_device *rdev)
}
}
-/*
- * Radeon device.
- */
+void radeon_check_arguments(struct radeon_device *rdev)
+{
+ /* vramlimit must be a power of two */
+ switch (radeon_vram_limit) {
+ case 0:
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
+ radeon_vram_limit);
+ radeon_vram_limit = 0;
+ break;
+ }
+ radeon_vram_limit = radeon_vram_limit << 20;
+ /* gtt size must be power of two and greater or equal to 32M */
+ switch (radeon_gart_size) {
+ case 4:
+ case 8:
+ case 16:
+ dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+ radeon_gart_size);
+ radeon_gart_size = 512;
+ break;
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
+ radeon_gart_size);
+ radeon_gart_size = 512;
+ break;
+ }
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ /* AGP mode can only be -1, 1, 2, 4, 8 */
+ switch (radeon_agpmode) {
+ case -1:
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
+ "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
+ radeon_agpmode = 0;
+ break;
+ }
+}
+
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
@@ -544,16 +637,24 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
+ if (rdev->family >= CHIP_R600)
+ spin_lock_init(&rdev->ih.lock);
+ mutex_init(&rdev->gem.mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
+ /* setup workqueue */
+ rdev->wq = create_workqueue("radeon");
+ if (rdev->wq == NULL)
+ return -ENOMEM;
+
/* Set asic functions */
r = radeon_asic_init(rdev);
- if (r) {
+ if (r)
return r;
- }
+ radeon_check_arguments(rdev);
- if (radeon_agpmode == -1) {
+ if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
@@ -620,6 +721,7 @@ void radeon_device_fini(struct radeon_device *rdev)
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
radeon_fini(rdev);
+ destroy_workqueue(rdev->wq);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
@@ -633,6 +735,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *crtc;
+ int r;
if (dev == NULL || rdev == NULL) {
return -ENODEV;
@@ -643,26 +746,31 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
- struct radeon_object *robj;
+ struct radeon_bo *robj;
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
robj = rfb->obj->driver_private;
- if (robj != rdev->fbdev_robj) {
- radeon_object_unpin(robj);
+ if (robj != rdev->fbdev_rbo) {
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
}
}
/* evict vram memory */
- radeon_object_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
radeon_fence_wait_last(rdev);
radeon_save_bios_scratch_regs(rdev);
radeon_suspend(rdev);
+ radeon_hpd_fini(rdev);
/* evict remaining vram memory */
- radeon_object_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev);
pci_save_state(dev->pdev);
if (state.event == PM_EVENT_SUSPEND) {
@@ -695,6 +803,8 @@ int radeon_resume_kms(struct drm_device *dev)
fb_set_suspend(rdev->fbdev_info, 0);
release_console_sem();
+ /* reset hpd state */
+ radeon_hpd_init(rdev);
/* blat the mode back in */
drm_helper_resume_force_mode(dev);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c85df4afcb7..91d72b70abc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -250,6 +250,16 @@ static const char *connector_names[13] = {
"HDMI-B",
};
+static const char *hpd_names[7] = {
+ "NONE",
+ "HPD1",
+ "HPD2",
+ "HPD3",
+ "HPD4",
+ "HPD5",
+ "HPD6",
+};
+
static void radeon_print_display_setup(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -264,16 +274,18 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus)
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
radeon_connector->ddc_bus->rec.a_clk_reg,
radeon_connector->ddc_bus->rec.a_data_reg,
- radeon_connector->ddc_bus->rec.put_clk_reg,
- radeon_connector->ddc_bus->rec.put_data_reg,
- radeon_connector->ddc_bus->rec.get_clk_reg,
- radeon_connector->ddc_bus->rec.get_data_reg);
+ radeon_connector->ddc_bus->rec.en_clk_reg,
+ radeon_connector->ddc_bus->rec.en_data_reg,
+ radeon_connector->ddc_bus->rec.y_clk_reg,
+ radeon_connector->ddc_bus->rec.y_data_reg);
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -324,6 +336,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
ret = radeon_get_legacy_connector_info_from_table(dev);
}
if (ret) {
+ radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
@@ -336,12 +349,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
int ret = 0;
+ if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
+ }
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
if (radeon_connector->edid) {
@@ -361,9 +379,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
- radeon_i2c_do_lock(radeon_connector, 1);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector, 0);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (edid) {
kfree(edid);
}
@@ -542,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
*post_div_p = best_post_div;
}
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p,
+ int flags)
+{
+ fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
+ fixed20_12 pll_out_max, pll_out_min;
+ fixed20_12 pll_in_max, pll_in_min;
+ fixed20_12 reference_freq;
+ fixed20_12 error, ffreq, a, b;
+
+ pll_out_max.full = rfixed_const(pll->pll_out_max);
+ pll_out_min.full = rfixed_const(pll->pll_out_min);
+ pll_in_max.full = rfixed_const(pll->pll_in_max);
+ pll_in_min.full = rfixed_const(pll->pll_in_min);
+ reference_freq.full = rfixed_const(pll->reference_freq);
+ do_div(freq, 10);
+ ffreq.full = rfixed_const(freq);
+ error.full = rfixed_const(100 * 100);
+
+ /* max p */
+ p.full = rfixed_div(pll_out_max, ffreq);
+ p.full = rfixed_floor(p);
+
+ /* min m */
+ m.full = rfixed_div(reference_freq, pll_in_max);
+ m.full = rfixed_ceil(m);
+
+ while (1) {
+ n.full = rfixed_div(ffreq, reference_freq);
+ n.full = rfixed_mul(n, m);
+ n.full = rfixed_mul(n, p);
+
+ f_vco.full = rfixed_div(n, m);
+ f_vco.full = rfixed_mul(f_vco, reference_freq);
+
+ f_pclk.full = rfixed_div(f_vco, p);
+
+ if (f_pclk.full > ffreq.full)
+ error.full = f_pclk.full - ffreq.full;
+ else
+ error.full = ffreq.full - f_pclk.full;
+ error.full = rfixed_div(error, f_pclk);
+ a.full = rfixed_const(100 * 100);
+ error.full = rfixed_mul(error, a);
+
+ a.full = rfixed_mul(m, p);
+ a.full = rfixed_div(n, a);
+ best_freq.full = rfixed_mul(reference_freq, a);
+
+ if (rfixed_trunc(error) < 25)
+ break;
+
+ a.full = rfixed_const(1);
+ m.full = m.full + a.full;
+ a.full = rfixed_div(reference_freq, m);
+ if (a.full >= pll_in_min.full)
+ continue;
+
+ m.full = rfixed_div(reference_freq, pll_in_max);
+ m.full = rfixed_ceil(m);
+ a.full= rfixed_const(1);
+ p.full = p.full - a.full;
+ a.full = rfixed_mul(p, ffreq);
+ if (a.full >= pll_out_min.full)
+ continue;
+ else {
+ DRM_ERROR("Unable to find pll dividers\n");
+ break;
+ }
+ }
+
+ a.full = rfixed_const(10);
+ b.full = rfixed_mul(n, a);
+
+ frac_n.full = rfixed_floor(n);
+ frac_n.full = rfixed_mul(frac_n, a);
+ frac_n.full = b.full - frac_n.full;
+
+ *dot_clock_p = rfixed_trunc(best_freq);
+ *fb_div_p = rfixed_trunc(n);
+ *frac_fb_div_p = rfixed_trunc(frac_n);
+ *ref_div_p = rfixed_trunc(m);
+ *post_div_p = rfixed_trunc(p);
+
+ DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+}
+
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -629,7 +739,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
{ TV_STD_SECAM, "secam" },
};
-int radeon_modeset_create_props(struct radeon_device *rdev)
+static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int i, sz;
@@ -642,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
return -ENOMEM;
rdev->mode_info.coherent_mode_property->values[0] = 0;
- rdev->mode_info.coherent_mode_property->values[0] = 1;
+ rdev->mode_info.coherent_mode_property->values[1] = 1;
}
if (!ASIC_IS_AVIVO(rdev)) {
@@ -666,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
rdev->mode_info.load_detect_property->values[0] = 0;
- rdev->mode_info.load_detect_property->values[0] = 1;
+ rdev->mode_info.load_detect_property->values[1] = 1;
drm_mode_create_scaling_mode_property(rdev->ddev);
@@ -723,6 +833,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
if (!ret) {
return ret;
}
+ /* initialize hpd */
+ radeon_hpd_init(rdev);
drm_helper_initial_config(rdev->ddev);
return 0;
}
@@ -730,6 +842,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.mode_config_initialized) {
+ radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
@@ -750,9 +863,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
if (first) {
- radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ /* set scaling */
+ if (radeon_encoder->rmx_type == RMX_OFF)
+ radeon_crtc->rmx_type = RMX_OFF;
+ else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+ mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+ radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+ else
+ radeon_crtc->rmx_type = RMX_OFF;
+ /* copy native mode */
memcpy(&radeon_crtc->native_mode,
- &radeon_encoder->native_mode,
+ &radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
first = false;
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 7f50fb864af..8ba3de7994d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -86,6 +86,8 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
+int radeon_new_pll = 1;
+int radeon_audio = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -120,6 +122,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
+MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
+module_param_named(new_pll, radeon_new_pll, int, 0444);
+
+MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
+module_param_named(audio, radeon_audio, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -188,7 +196,7 @@ static struct drm_driver driver_old = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
@@ -276,7 +284,7 @@ static struct drm_driver kms_driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = radeon_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 350962e0f34..e13785282a8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
# define R600_IT_WAIT_REG_MEM 0x00003C00
# define R600_IT_MEM_WRITE 0x00003D00
# define R600_IT_INDIRECT_BUFFER 0x00003200
-# define R600_IT_CP_INTERRUPT 0x00004000
# define R600_IT_SURFACE_SYNC 0x00004300
# define R600_CB0_DEST_BASE_ENA (1 << 6)
# define R600_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d42bc512d75..ccba95f83d1 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -35,6 +35,51 @@ extern int atom_debug;
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *clone_encoder;
+ uint32_t index_mask = 0;
+ int count;
+
+ /* DIG routing gets problematic */
+ if (rdev->family >= CHIP_R600)
+ return index_mask;
+ /* LVDS/TV are too wacky */
+ if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+ return index_mask;
+ /* DVO requires 2x ppll clocks depending on tmds chip */
+ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ return index_mask;
+
+ count = -1;
+ list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+ struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+ count++;
+
+ if (clone_encoder == encoder)
+ continue;
+ if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ continue;
+ if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+ continue;
+ else
+ index_mask |= (1 << count);
+ }
+ return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ encoder->possible_clones = radeon_encoder_clones(encoder);
+ }
+}
+
uint32_t
radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
{
@@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
-/* used for both atom and legacy */
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
-
- if (mode->hdisplay < native_mode->hdisplay ||
- mode->vdisplay < native_mode->vdisplay) {
- int mode_id = adjusted_mode->base.id;
- *adjusted_mode = *native_mode;
- if (!ASIC_IS_AVIVO(rdev)) {
- adjusted_mode->hdisplay = mode->hdisplay;
- adjusted_mode->vdisplay = mode->vdisplay;
- }
- adjusted_mode->base.id = mode_id;
- }
-}
-
-
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -198,14 +220,26 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (radeon_encoder->rmx_type != RMX_OFF)
- radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
-
/* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ if (!ASIC_IS_AVIVO(rdev)) {
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
+ }
+ adjusted_mode->base.id = mode_id;
+ }
+
+ /* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
@@ -218,6 +252,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
}
}
+ if (ASIC_IS_DCE3(rdev) &&
+ (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ radeon_dp_set_link_config(connector, mode);
+ }
+
return true;
}
@@ -392,7 +432,7 @@ union lvds_encoder_control {
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
};
-static void
+void
atombios_digital_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
@@ -400,6 +440,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
+ int hdmi_detected = 0;
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
struct drm_connector *connector;
@@ -420,6 +461,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (!radeon_connector->con_priv)
return;
+ if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ hdmi_detected = 1;
+
dig_connector = radeon_connector->con_priv;
memset(&args, 0, sizeof(args));
@@ -449,13 +493,13 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (hdmi_detected)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & (1 << 0))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= (1 << 1);
} else {
if (dig_connector->linkb)
@@ -474,7 +518,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (hdmi_detected)
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
@@ -482,18 +526,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (dig->lvds_misc & (1 << 0))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (dig->lvds_misc & (1 << 5)) {
+ if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
- if (dig->lvds_misc & (1 << 6)) {
+ if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
- if (dig->lvds_misc & (1 << 1))
+ if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
- if (((dig->lvds_misc >> 2) & 0x3) == 2)
+ if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
@@ -514,7 +558,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
+ r600_hdmi_enable(encoder, hdmi_detected);
}
int
@@ -522,6 +566,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *radeon_dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -551,10 +596,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- /*if (radeon_output->MonType == MT_DP)
- return ATOM_ENCODER_MODE_DP;
- else*/
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ radeon_dig_connector = radeon_connector->con_priv;
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return ATOM_ENCODER_MODE_DP;
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -573,6 +618,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
}
}
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link B -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
+ */
static void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
{
@@ -614,10 +683,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
num = 2;
break;
@@ -652,18 +728,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
}
}
- if (radeon_encoder->pixel_clock > 165000) {
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
+ args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dig_connector->dp_clock == 270000)
+ args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.ucLaneNum = dig_connector->dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
args.ucLaneNum = 8;
- } else {
- if (dig_connector->linkb)
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- else
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+ else
args.ucLaneNum = 4;
- }
- args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+ if (dig_connector->linkb)
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -674,8 +753,8 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
};
-static void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -687,6 +766,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
+ bool is_dp = false;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -704,6 +784,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
dig_connector = radeon_connector->con_priv;
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
+ is_dp = true;
+
memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE32(rdev))
@@ -724,17 +807,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = radeon_connector->connector_object_id;
+ } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+ args.v1.asMode.ucLaneSel = lane_num;
+ args.v1.asMode.ucLaneSet = lane_set;
} else {
- if (radeon_encoder->pixel_clock > 165000)
+ if (is_dp)
+ args.v1.usPixelClock =
+ cpu_to_le16(dig_connector->dp_clock / 10);
+ else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
if (ASIC_IS_DCE32(rdev)) {
- if (radeon_encoder->pixel_clock > 165000)
- args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
if (dig->dig_block)
args.v2.acConfig.ucEncoderSel = 1;
+ if (dig_connector->linkb)
+ args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -751,7 +840,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
break;
}
- if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (is_dp)
+ args.v2.acConfig.fCoherentMode = 1;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
}
@@ -760,17 +851,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
if (rdev->flags & RADEON_IS_IGP) {
if (radeon_encoder->pixel_clock > 165000) {
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B);
if (dig_connector->igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (dig_connector->igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
} else {
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
if (dig_connector->igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (dig_connector->igp_lane_info & 0x2)
@@ -780,42 +874,31 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
else if (dig_connector->igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
}
- } else {
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B |
- ATOM_TRANSMITTER_CONFIG_LANE_0_7);
- else {
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- }
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
- ATOM_TRANSMITTER_CONFIG_LINKA_B |
- ATOM_TRANSMITTER_CONFIG_LANE_0_7);
- else {
- if (dig_connector->linkb)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- }
break;
}
- if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+ if (is_dp)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
}
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
}
static void
@@ -918,12 +1001,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ dp_link_train(encoder, connector);
+ }
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
break;
}
} else {
@@ -1025,13 +1112,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- } else
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ } else {
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return;
+ radeon_connector = to_radeon_connector(connector);
+ if (!radeon_connector->con_priv)
+ return;
+ dig_connector = radeon_connector->con_priv;
+
+ /* XXX doesn't really matter which dig encoder we pick as long as it's
+ * not already in use
+ */
+ if (dig_connector->linkb)
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ else
+ args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ /* Only dig2 encoder can drive LVTMA */
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1060,7 +1167,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
}
static void
@@ -1104,11 +1210,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- if (radeon_encoder->enc_priv) {
- struct radeon_encoder_atom_dig *dig;
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
+ if (radeon_encoder->enc_priv) {
+ struct radeon_encoder_atom_dig *dig;
- dig = radeon_encoder->enc_priv;
- dig->dig_block = radeon_crtc->crtc_id;
+ dig = radeon_encoder->enc_priv;
+ dig->dig_block = radeon_crtc->crtc_id;
+ }
}
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -1134,14 +1243,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1160,6 +1269,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
break;
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
static bool
@@ -1354,7 +1465,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
encoder->possible_crtcs = 0x1;
else
encoder->possible_crtcs = 0x3;
- encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
@@ -1406,4 +1516,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
}
+
+ r600_hdmi_init(encoder);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d10eb43645c..3ba213d1b06 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
- struct radeon_object *robj = NULL;
+ struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
u64 fb_gpuaddr;
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, ttm_bo_type_kernel,
- false, &gobj);
+ &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
surface_width, surface_height);
ret = -ENOMEM;
goto out;
}
- robj = gobj->driver_private;
+ rbo = gobj->driver_private;
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
}
#endif
- if (tiling_flags)
- radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch);
+ if (tiling_flags) {
+ ret = radeon_bo_set_tiling_flags(rbo,
+ tiling_flags | RADEON_TILING_SURFACE,
+ mode_cmd.pitch);
+ if (ret)
+ dev_err(rdev->dev, "FB failed to set tiling flags\n");
+ }
mutex_lock(&rdev->ddev->struct_mutex);
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
if (fb == NULL) {
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
ret = -ENOMEM;
goto out_unref;
}
- ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ ret = radeon_bo_reserve(rbo, false);
+ if (unlikely(ret != 0))
+ goto out_unref;
+ ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ if (ret) {
+ radeon_bo_unreserve(rbo);
+ goto out_unref;
+ }
+ if (fb_tiled)
+ radeon_bo_check_tiling(rbo, 0, 0);
+ ret = radeon_bo_kmap(rbo, &fbptr);
+ radeon_bo_unreserve(rbo);
if (ret) {
- printk(KERN_ERR "failed to pin framebuffer\n");
- ret = -ENOMEM;
goto out_unref;
}
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
*fb_p = fb;
rfb = to_radeon_framebuffer(fb);
rdev->fbdev_rfb = rfb;
- rdev->fbdev_robj = robj;
+ rdev->fbdev_rbo = rbo;
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
if (info == NULL) {
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
if (ret)
goto out_unref;
- if (fb_tiled)
- radeon_object_check_tiling(robj, 0, 0);
-
- ret = radeon_object_kmap(robj, &fbptr);
- if (ret) {
- goto out_unref;
- }
-
- memset_io(fbptr, 0, aligned_size);
+ memset_io(fbptr, 0xff, aligned_size);
strcpy(info->fix.id, "radeondrmfb");
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
return 0;
out_unref:
- if (robj) {
- radeon_object_kunmap(robj);
+ if (rbo) {
+ ret = radeon_bo_reserve(rbo, false);
+ if (likely(ret == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unreserve(rbo);
+ }
}
if (fb && ret) {
list_del(&fb->filp_head);
@@ -321,14 +331,22 @@ int radeon_parse_options(char *options)
int radeonfb_probe(struct drm_device *dev)
{
- return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
+ struct radeon_device *rdev = dev->dev_private;
+ int bpp_sel = 32;
+
+ /* select 8 bpp console on RN50 or 16MB cards */
+ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 8;
+
+ return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
}
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
{
struct fb_info *info;
struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
- struct radeon_object *robj;
+ struct radeon_bo *rbo;
+ int r;
if (!fb) {
return -EINVAL;
@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
info = fb->fbdev;
if (info) {
struct radeon_fb_device *rfbdev = info->par;
- robj = rfb->obj->driver_private;
+ rbo = rfb->obj->driver_private;
unregister_framebuffer(info);
- radeon_object_kunmap(robj);
- radeon_object_unpin(robj);
+ r = radeon_bo_reserve(rbo, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rbo);
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
drm_fb_helper_free(&rfbdev->helper);
framebuffer_release(info);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3beb26d7471..4cdd8b4f754 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return signaled;
}
-int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
-{
- struct radeon_device *rdev;
- int ret = 0;
-
- rdev = fence->rdev;
-
- __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
- while (1) {
- if (radeon_fence_signaled(fence))
- break;
-
- if (time_after_eq(jiffies, fence->timeout)) {
- ret = -EBUSY;
- break;
- }
-
- if (lazy)
- schedule_timeout(1);
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- }
- __set_current_state(TASK_RUNNING);
- return ret;
-}
-
-
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
@@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return 0;
}
- if (rdev->family >= CHIP_R600) {
- r = r600_fence_wait(fence, intr, 0);
- if (r == -ERESTARTSYS)
- return -EBUSY;
- return r;
- }
-
retry:
cur_jiffies = jiffies;
timeout = HZ / 100;
@@ -231,14 +193,17 @@ retry:
}
if (intr) {
+ radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
- if (unlikely(r == -ERESTARTSYS)) {
- return -EBUSY;
- }
+ radeon_irq_kms_sw_irq_put(rdev);
+ if (unlikely(r < 0))
+ return r;
} else {
+ radeon_irq_kms_sw_irq_get(rdev);
r = wait_event_timeout(rdev->fence_drv.queue,
radeon_fence_signaled(fence), timeout);
+ radeon_irq_kms_sw_irq_put(rdev);
}
if (unlikely(!radeon_fence_signaled(fence))) {
if (unlikely(r == 0)) {
@@ -359,7 +324,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
if (r) {
- DRM_ERROR("Fence failed to get a scratch register.");
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
}
@@ -370,9 +335,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
rdev->fence_drv.count_timeout = 0;
init_waitqueue_head(&rdev->fence_drv.queue);
+ rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for fence !\n");
+ dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
return 0;
}
@@ -381,11 +347,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
{
unsigned long irq_flags;
+ if (!rdev->fence_drv.initialized)
+ return;
wake_up_all(&rdev->fence_drv.queue);
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- DRM_INFO("radeon: fence finalized\n");
+ rdev->fence_drv.initialized = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
index 90187d17384..3d4d84e078a 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -38,6 +38,23 @@ typedef union rfixed {
#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
#define rfixed_trunc(A) ((A).full >> 12)
+static inline u32 rfixed_floor(fixed20_12 A)
+{
+ u32 non_frac = rfixed_trunc(A);
+
+ return rfixed_const(non_frac);
+}
+
+static inline u32 rfixed_ceil(fixed20_12 A)
+{
+ u32 non_frac = rfixed_trunc(A);
+
+ if (A.full > rfixed_const(non_frac))
+ return rfixed_const(non_frac + 1);
+ else
+ return rfixed_const(non_frac);
+}
+
static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
{
u64 tmp = ((u64)A.full << 13);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a68d7566178..e73d56e83fa 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj == NULL) {
- r = radeon_object_create(rdev, NULL,
- rdev->gart.table_size,
- true,
- RADEON_GEM_DOMAIN_VRAM,
- false, &rdev->gart.table.vram.robj);
+ r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+ true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->gart.table.vram.robj);
if (r) {
return r;
}
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
uint64_t gpu_addr;
int r;
- r = radeon_object_pin(rdev->gart.table.vram.robj,
- RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
- if (r) {
- radeon_object_unref(&rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (unlikely(r != 0))
return r;
- }
- r = radeon_object_kmap(rdev->gart.table.vram.robj,
- (void **)&rdev->gart.table.vram.ptr);
+ r = radeon_bo_pin(rdev->gart.table.vram.robj,
+ RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
- radeon_object_unpin(rdev->gart.table.vram.robj);
- radeon_object_unref(&rdev->gart.table.vram.robj);
- DRM_ERROR("radeon: failed to map gart vram table.\n");
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
return r;
}
+ r = radeon_bo_kmap(rdev->gart.table.vram.robj,
+ (void **)&rdev->gart.table.vram.ptr);
+ if (r)
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
rdev->gart.table_addr = gpu_addr;
- return 0;
+ return r;
}
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
+ int r;
+
if (rdev->gart.table.vram.robj == NULL) {
return;
}
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
- radeon_object_unref(&rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ radeon_bo_unref(&rdev->gart.table.vram.robj);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d880edf254d..60df2d7e7e4 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
- struct radeon_object *robj = gobj->driver_private;
+ struct radeon_bo *robj = gobj->driver_private;
gobj->driver_private = NULL;
if (robj) {
- radeon_object_unref(&robj);
+ radeon_bo_unref(&robj);
}
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
- int alignment, int initial_domain,
- bool discardable, bool kernel,
- bool interruptible,
- struct drm_gem_object **obj)
+ int alignment, int initial_domain,
+ bool discardable, bool kernel,
+ struct drm_gem_object **obj)
{
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
*obj = NULL;
@@ -65,11 +64,11 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
- interruptible, &robj);
+ r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
if (r) {
- DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
- size, initial_domain, alignment);
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ size, initial_domain, alignment, r);
mutex_lock(&rdev->ddev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
@@ -83,33 +82,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
- struct radeon_object *robj = obj->driver_private;
- uint32_t flags;
+ struct radeon_bo *robj = obj->driver_private;
+ int r;
- switch (pin_domain) {
- case RADEON_GEM_DOMAIN_VRAM:
- flags = TTM_PL_FLAG_VRAM;
- break;
- case RADEON_GEM_DOMAIN_GTT:
- flags = TTM_PL_FLAG_TT;
- break;
- default:
- flags = TTM_PL_FLAG_SYSTEM;
- break;
- }
- return radeon_object_pin(robj, flags, gpu_addr);
+ r = radeon_bo_reserve(robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(robj, pin_domain, gpu_addr);
+ radeon_bo_unreserve(robj);
+ return r;
}
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
- struct radeon_object *robj = obj->driver_private;
- radeon_object_unpin(robj);
+ struct radeon_bo *robj = obj->driver_private;
+ int r;
+
+ r = radeon_bo_reserve(robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(robj);
+ radeon_bo_unreserve(robj);
+ }
}
int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
- struct radeon_object *robj;
+ struct radeon_bo *robj;
uint32_t domain;
int r;
@@ -127,11 +126,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = radeon_object_wait(robj);
+ r = radeon_bo_wait(robj, NULL, false);
if (r) {
printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
+ radeon_hdp_flush(robj->rdev);
}
return 0;
}
@@ -144,7 +144,7 @@ int radeon_gem_init(struct radeon_device *rdev)
void radeon_gem_fini(struct radeon_device *rdev)
{
- radeon_object_force_delete(rdev);
+ radeon_bo_force_delete(rdev);
}
@@ -158,9 +158,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct drm_radeon_gem_info *args = data;
args->vram_size = rdev->mc.real_vram_size;
- /* FIXME: report somethings that makes sense */
- args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
- args->gart_size = rdev->mc.gtt_size;
+ args->vram_visible = rdev->mc.real_vram_size;
+ if (rdev->stollen_vga_memory)
+ args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+ if (rdev->fbdev_rbo)
+ args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+ args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
+ RADEON_IB_POOL_SIZE*64*1024;
return 0;
}
@@ -192,8 +196,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, false,
- false, true, &gobj);
+ args->initial_domain, false,
+ false, &gobj);
if (r) {
return r;
}
@@ -218,7 +222,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* just validate the BO into a certain domain */
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
/* for now if someone requests domain CPU -
@@ -244,19 +248,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
- int r;
+ struct radeon_bo *robj;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_mmap(robj, &args->addr_ptr);
+ args->addr_ptr = radeon_bo_mmap_offset(robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
- return r;
+ return 0;
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -264,16 +267,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
- uint32_t cur_placement;
+ uint32_t cur_placement = 0;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_busy_domain(robj, &cur_placement);
+ r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -297,7 +300,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -305,10 +308,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
robj = gobj->driver_private;
- r = radeon_object_wait(robj);
+ r = radeon_bo_wait(robj, NULL, false);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
+ radeon_hdp_flush(robj->rdev);
return r;
}
@@ -317,7 +321,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_set_tiling *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *robj;
int r = 0;
DRM_DEBUG("%d \n", args->handle);
@@ -325,7 +329,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL)
return -EINVAL;
robj = gobj->driver_private;
- radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+ r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
@@ -337,16 +341,20 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
{
struct drm_radeon_gem_get_tiling *args = data;
struct drm_gem_object *gobj;
- struct radeon_object *robj;
+ struct radeon_bo *rbo;
int r = 0;
DRM_DEBUG("\n");
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -EINVAL;
- robj = gobj->driver_private;
- radeon_object_get_tiling_flags(robj, &args->tiling_flags,
- &args->pitch);
+ rbo = gobj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ goto out;
+ radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+ radeon_bo_unreserve(rbo);
+out:
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index dd438d32e5c..da3da1e89d0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
+void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
- struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
- struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake
*/
- if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
- if (rec->a_clk_reg == RADEON_GPIO_MONID) {
- WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
- R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
- } else {
- WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
- R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+ if (rec->hw_capable) {
+ if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+ if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+ } else {
+ WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+ R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+ }
}
}
- if (lock_state) {
- temp = RREG32(rec->a_clk_reg);
- temp &= ~(rec->a_clk_mask);
- WREG32(rec->a_clk_reg, temp);
-
- temp = RREG32(rec->a_data_reg);
- temp &= ~(rec->a_data_mask);
- WREG32(rec->a_data_reg, temp);
- }
+ /* clear the output pin values */
+ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, temp);
+
+ temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, temp);
+
+ /* set the pins to input */
+ temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, temp);
+
+ temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ WREG32(rec->en_data_reg, temp);
+
+ /* mask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg);
if (lock_state)
temp |= rec->mask_clk_mask;
@@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->get_clk_reg);
- val &= rec->get_clk_mask;
+ /* read the value off the pin */
+ val = RREG32(rec->y_clk_reg);
+ val &= rec->y_clk_mask;
return (val != 0);
}
@@ -126,8 +135,10 @@ static int get_data(void *i2c_priv)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->get_data_reg);
- val &= rec->get_data_mask;
+ /* read the value off the pin */
+ val = RREG32(rec->y_data_reg);
+ val &= rec->y_data_mask;
+
return (val != 0);
}
@@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
- val |= clock ? 0 : rec->put_clk_mask;
- WREG32(rec->put_clk_reg, val);
+ /* set pin direction */
+ val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+ val |= clock ? 0 : rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, val);
}
static void set_data(void *i2c_priv, int data)
@@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data)
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
- val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
- val |= data ? 0 : rec->put_data_mask;
- WREG32(rec->put_data_reg, val);
+ /* set pin direction */
+ val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+ val |= data ? 0 : rec->en_data_mask;
+ WREG32(rec->en_data_reg, val);
}
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
- struct radeon_i2c_bus_rec *rec,
- const char *name)
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
{
struct radeon_i2c_chan *i2c;
int ret;
@@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
return NULL;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.algo_data = &i2c->algo;
i2c->dev = dev;
- i2c->algo.setsda = set_data;
- i2c->algo.setscl = set_clock;
- i2c->algo.getsda = get_data;
- i2c->algo.getscl = get_clock;
- i2c->algo.udelay = 20;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.setsda = set_data;
+ i2c->algo.bit.setscl = set_clock;
+ i2c->algo.bit.getsda = get_data;
+ i2c->algo.bit.getscl = get_clock;
+ i2c->algo.bit.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
- i2c->algo.timeout = 2;
- i2c->algo.data = i2c;
+ i2c->algo.bit.timeout = 2;
+ i2c->algo.bit.data = i2c;
i2c->rec = *rec;
- i2c_set_adapdata(&i2c->adapter, i2c);
-
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
DRM_INFO("Failed to register i2c %s\n", name);
@@ -194,6 +206,38 @@ out_free:
}
+struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct radeon_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+ if (i2c == NULL)
+ return NULL;
+
+ i2c->rec = *rec;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ i2c->adapter.algo_data = &i2c->algo.dp;
+ i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
+ i2c->algo.dp.address = 0;
+ ret = i2c_dp_aux_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_INFO("Failed to register i2c %s\n", name);
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+
+}
+
+
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
@@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{
return NULL;
}
+
+void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val)
+{
+ u8 out_buf[2];
+ u8 in_buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = out_buf,
+ },
+ {
+ .addr = slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = in_buf,
+ }
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = 0;
+
+ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+ *val = in_buf[0];
+ DRM_DEBUG("val = 0x%02x\n", *val);
+ } else {
+ DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+ addr, *val);
+ }
+}
+
+void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 val)
+{
+ uint8_t out_buf[2];
+ struct i2c_msg msg = {
+ .addr = slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ };
+
+ out_buf[0] = addr;
+ out_buf[1] = val;
+
+ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+ DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+ addr, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index a1bf11de308..48b7cea31e0 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
&init->gart_textures_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
}
typedef struct drm_radeon_clear32 {
@@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
&clr->depth_boxes))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
}
typedef struct drm_radeon_stipple32 {
@@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
&request->mask))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
}
typedef struct drm_radeon_tex_image32 {
@@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
&image->data))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
}
typedef struct drm_radeon_vertex2_32 {
@@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
&request->prim))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
}
typedef struct drm_radeon_cmd_buffer32 {
@@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
&request->boxes))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
}
typedef struct drm_radeon_getparam32 {
@@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
}
typedef struct drm_radeon_mem_alloc32 {
@@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
&request->region_offset))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
}
typedef struct drm_radeon_irq_emit32 {
@@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
&request->irq_seq))
return -EFAULT;
- return drm_ioctl(file->f_path.dentry->d_inode, file,
- DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
}
/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
@@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
&request->value))
return -EFAULT;
- return drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+ return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
}
#else
#define compat_radeon_cp_setparam NULL
@@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
- lock_kernel(); /* XXX for now */
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
@@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- lock_kernel(); /* XXX for now */
- ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
+ ret = drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a0fe6232dcb..9223296fe37 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
return radeon_irq_process(rdev);
}
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void radeon_hotplug_work_func(struct work_struct *work)
+{
+ struct radeon_device *rdev = container_of(work, struct radeon_device,
+ hotplug_work);
+ struct drm_device *dev = rdev->ddev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head)
+ radeon_connector_hotplug(connector);
+ }
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_sysfs_hotplug_event(dev);
+}
+
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
unsigned i;
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
for (i = 0; i < 2; i++) {
@@ -87,17 +108,25 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_SINGLE_CRTC)
num_crtc = 1;
-
+ spin_lock_init(&rdev->irq.sw_lock);
r = drm_vblank_init(rdev->ddev, num_crtc);
if (r) {
return r;
}
/* enable msi */
rdev->msi_enabled = 0;
- if (rdev->family >= CHIP_RV380) {
+ /* MSIs don't seem to work on my rs780;
+ * not sure about rs880 or other rs780s.
+ * Needs more investigation.
+ */
+ if ((rdev->family >= CHIP_RV380) &&
+ (rdev->family != CHIP_RS780) &&
+ (rdev->family != CHIP_RS880)) {
int ret = pci_enable_msi(rdev->pdev);
- if (!ret)
+ if (!ret) {
rdev->msi_enabled = 1;
+ DRM_INFO("radeon: using MSI.\n");
+ }
}
drm_irq_install(rdev->ddev);
rdev->irq.installed = true;
@@ -114,3 +143,29 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
pci_disable_msi(rdev->pdev);
}
}
+
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+ if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
+ rdev->irq.sw_int = true;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
+ rdev->irq.sw_int = false;
+ radeon_irq_set(rdev);
+ }
+ spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ba128621057..f23b05606eb 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,10 +30,19 @@
#include "radeon.h"
#include "radeon_drm.h"
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (rdev == NULL)
+ return 0;
+ radeon_modeset_fini(rdev);
+ radeon_device_fini(rdev);
+ kfree(rdev);
+ dev->dev_private = NULL;
+ return 0;
+}
-/*
- * Driver load/unload
- */
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct radeon_device *rdev;
@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
*/
r = radeon_device_init(rdev, dev, dev->pdev, flags);
if (r) {
- DRM_ERROR("Fatal error while trying to initialize radeon.\n");
- return r;
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ goto out;
}
/* Again modeset_init should fail only on fatal error
* otherwise it should provide enough functionalities
* for shadowfb to run
*/
r = radeon_modeset_init(rdev);
- if (r) {
- return r;
- }
- return 0;
-}
-
-int radeon_driver_unload_kms(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
-
- if (rdev == NULL)
- return 0;
- radeon_modeset_fini(rdev);
- radeon_device_fini(rdev);
- kfree(rdev);
- dev->dev_private = NULL;
- return 0;
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+out:
+ if (r)
+ radeon_driver_unload_kms(dev);
+ return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8d0b7aa87fa..cc27485a07a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -30,9 +30,20 @@
#include "radeon.h"
#include "atom.h"
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+ WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -292,8 +303,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
uint32_t mask;
if (radeon_crtc->crtc_id)
- mask = (RADEON_CRTC2_EN |
- RADEON_CRTC2_DISP_DIS |
+ mask = (RADEON_CRTC2_DISP_DIS |
RADEON_CRTC2_VSYNC_DIS |
RADEON_CRTC2_HSYNC_DIS |
RADEON_CRTC2_DISP_REQ_EN_B);
@@ -305,7 +315,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
@@ -319,7 +329,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
- WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
+ WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
@@ -400,14 +410,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *radeon_fb;
struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
uint64_t base;
uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
uint32_t crtc_pitch, pitch_pixels;
uint32_t tiling_flags;
int format;
uint32_t gen_cntl_reg, gen_cntl_val;
+ int r;
DRM_DEBUG("\n");
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
radeon_fb = to_radeon_framebuffer(crtc->fb);
@@ -431,10 +448,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return false;
}
+ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
return -EINVAL;
}
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+ if (tiling_flags & RADEON_TILING_MICRO)
+ DRM_ERROR("trying to scanout microtiled buffer\n");
+
/* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -449,10 +478,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
(crtc->fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
- radeon_object_get_tiling_flags(obj->driver_private,
- &tiling_flags, NULL);
- if (tiling_flags & RADEON_TILING_MICRO)
- DRM_ERROR("trying to scanout microtiled buffer\n");
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
@@ -530,7 +555,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb && old_fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(old_fb);
- radeon_gem_object_unpin(radeon_fb->obj);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
}
/* Bytes per pixel may have changed */
@@ -642,12 +672,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl;
- /* check to see if TV DAC is enabled for another crtc and keep it enabled */
- if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
- crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
- else
- crtc2_gen_cntl = 0;
-
+ /* if TV DAC is enabled for another crtc and keep it enabled */
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
crtc2_gen_cntl |= ((format << 8)
| RADEON_CRTC2_VSYNC_DIS
| RADEON_CRTC2_HSYNC_DIS
@@ -676,7 +702,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc_ext_cntl;
uint32_t disp_merge_cntl;
- crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
+ crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+ crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
| (format << 8)
| RADEON_CRTC_DISP_REQ_EN_B
| ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -779,15 +806,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
- if (lvds) {
- if (lvds->use_bios_dividers) {
- pll_ref_div = lvds->panel_ref_divider;
- pll_fb_post_div = (lvds->panel_fb_divider |
- (lvds->panel_post_divider << 16));
- htotal_cntl = 0;
- use_bios_divs = true;
+ if (!rdev->is_atom_bios) {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ if (lvds) {
+ if (lvds->use_bios_dividers) {
+ pll_ref_div = lvds->panel_ref_divider;
+ pll_fb_post_div = (lvds->panel_fb_divider |
+ (lvds->panel_post_divider << 16));
+ htotal_cntl = 0;
+ use_bios_divs = true;
+ }
}
}
pll_flags |= RADEON_PLL_USE_REF_DIV;
@@ -1027,8 +1056,9 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
radeon_crtc_set_base(crtc, x, y, old_fb);
radeon_set_crtc_timing(crtc, adjusted_mode);
radeon_set_pll(crtc, adjusted_mode);
+ radeon_overscan_setup(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0) {
- radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
+ radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
} else {
if (radeon_crtc->rmx_type != RMX_OFF) {
/* FIXME: only first crtc has rmx what should we
@@ -1042,12 +1072,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * The hardware wedges sometimes if you reconfigure one CRTC
+ * whilst another is running (see fdo bug #24611).
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
}
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *crtci;
+
+ /*
+ * Reenable the CRTCs that should be running.
+ */
+ list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+ if (crtci->enabled)
+ radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+ }
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 00382122869..981508ff703 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
- if ((!rdev->is_atom_bios)) {
+ if (rdev->is_atom_bios) {
+ /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+ * need to call that on resume to set up the reg properly.
+ */
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+ atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+ lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ } else {
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) {
DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
(lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
} else
lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
- } else
- lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ }
lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
lvds_gen_cntl &= ~(RADEON_LVDS_ON |
RADEON_LVDS_BLON |
@@ -184,9 +190,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
-static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -194,15 +200,24 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
- if (radeon_encoder->rmx_type != RMX_OFF)
- radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ int mode_id = adjusted_mode->base.id;
+ *adjusted_mode = *native_mode;
+ adjusted_mode->hdisplay = mode->hdisplay;
+ adjusted_mode->vdisplay = mode->vdisplay;
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
+ adjusted_mode->base.id = mode_id;
+ }
return true;
}
static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.dpms = radeon_legacy_lvds_dpms,
- .mode_fixup = radeon_legacy_lvds_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_lvds_prepare,
.mode_set = radeon_legacy_lvds_mode_set,
.commit = radeon_legacy_lvds_commit,
@@ -214,17 +229,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -410,7 +414,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
.dpms = radeon_legacy_primary_dac_dpms,
- .mode_fixup = radeon_legacy_primary_dac_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_primary_dac_prepare,
.mode_set = radeon_legacy_primary_dac_mode_set,
.commit = radeon_legacy_primary_dac_commit,
@@ -423,16 +427,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -584,7 +578,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
.dpms = radeon_legacy_tmds_int_dpms,
- .mode_fixup = radeon_legacy_tmds_int_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_int_prepare,
.mode_set = radeon_legacy_tmds_int_mode_set,
.commit = radeon_legacy_tmds_int_commit,
@@ -596,17 +590,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
.destroy = radeon_enc_destroy,
};
-static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -697,6 +680,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
/*if (mode->clock > 165000)
fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
}
+ if (!radeon_combios_external_tmds_setup(encoder))
+ radeon_external_tmds_setup(encoder);
}
if (radeon_crtc->crtc_id == 0) {
@@ -724,9 +709,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+ if (tmds) {
+ if (tmds->i2c_bus)
+ radeon_i2c_destroy(tmds->i2c_bus);
+ }
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+}
+
static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
.dpms = radeon_legacy_tmds_ext_dpms,
- .mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tmds_ext_prepare,
.mode_set = radeon_legacy_tmds_ext_mode_set,
.commit = radeon_legacy_tmds_ext_commit,
@@ -735,20 +733,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
- .destroy = radeon_enc_destroy,
+ .destroy = radeon_ext_tmds_enc_destroy,
};
-static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* set the active encoder to connector routing */
- radeon_encoder_set_active_device(encoder);
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
- return true;
-}
-
static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -1265,7 +1252,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
.dpms = radeon_legacy_tv_dac_dpms,
- .mode_fixup = radeon_legacy_tv_dac_mode_fixup,
+ .mode_fixup = radeon_legacy_mode_fixup,
.prepare = radeon_legacy_tv_dac_prepare,
.mode_set = radeon_legacy_tv_dac_mode_set,
.commit = radeon_legacy_tv_dac_commit,
@@ -1302,6 +1289,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
return tmds;
}
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder_ext_tmds *tmds = NULL;
+ bool ret;
+
+ if (rdev->is_atom_bios)
+ return NULL;
+
+ tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+ if (!tmds)
+ return NULL;
+
+ ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+ if (ret == false)
+ radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+ return tmds;
+}
+
void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
{
@@ -1329,7 +1339,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
encoder->possible_crtcs = 0x1;
else
encoder->possible_crtcs = 0x3;
- encoder->possible_clones = 0;
radeon_encoder->enc_priv = NULL;
@@ -1373,7 +1382,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
if (!rdev->is_atom_bios)
- radeon_combios_get_ext_tmds_info(radeon_encoder);
+ radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ace726aa0d7..402369db5ba 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,6 +33,7 @@
#include <drm_crtc.h>
#include <drm_mode.h>
#include <drm_edid.h>
+#include <drm_dp_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/i2c-algo-bit.h>
@@ -87,26 +88,48 @@ enum radeon_tv_std {
TV_STD_SCART_PAL,
TV_STD_SECAM,
TV_STD_PAL_CN,
+ TV_STD_PAL_N,
};
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ * grabs the gpio pins for software use
+ * 0=not held 1=held
+ * 2. "a" reg and bits
+ * output pin value
+ * 0=low 1=high
+ * 3. "en" reg and bits
+ * sets the pin direction
+ * 0=input 1=output
+ * 4. "y" reg and bits
+ * input pin value
+ * 0=low 1=high
+ */
struct radeon_i2c_bus_rec {
bool valid;
+ /* id used by atom */
+ uint8_t i2c_id;
+ /* can be used with hw i2c engine */
+ bool hw_capable;
+ /* uses multi-media i2c engine */
+ bool mm_i2c;
+ /* regs and bits */
uint32_t mask_clk_reg;
uint32_t mask_data_reg;
uint32_t a_clk_reg;
uint32_t a_data_reg;
- uint32_t put_clk_reg;
- uint32_t put_data_reg;
- uint32_t get_clk_reg;
- uint32_t get_data_reg;
+ uint32_t en_clk_reg;
+ uint32_t en_data_reg;
+ uint32_t y_clk_reg;
+ uint32_t y_data_reg;
uint32_t mask_clk_mask;
uint32_t mask_data_mask;
- uint32_t put_clk_mask;
- uint32_t put_data_mask;
- uint32_t get_clk_mask;
- uint32_t get_data_mask;
uint32_t a_clk_mask;
uint32_t a_data_mask;
+ uint32_t en_clk_mask;
+ uint32_t en_data_mask;
+ uint32_t y_clk_mask;
+ uint32_t y_data_mask;
};
struct radeon_tmds_pll {
@@ -150,9 +173,12 @@ struct radeon_pll {
};
struct radeon_i2c_chan {
- struct drm_device *dev;
struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo;
+ struct drm_device *dev;
+ union {
+ struct i2c_algo_dp_aux_data dp;
+ struct i2c_algo_bit_data bit;
+ } algo;
struct radeon_i2c_bus_rec rec;
};
@@ -170,6 +196,11 @@ enum radeon_connector_table {
CT_EMAC,
};
+enum radeon_dvo_chip {
+ DVO_SIL164,
+ DVO_SIL1178,
+};
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
@@ -261,6 +292,13 @@ struct radeon_encoder_int_tmds {
struct radeon_tmds_pll tmds_pll[4];
};
+struct radeon_encoder_ext_tmds {
+ /* tmds over dvo */
+ struct radeon_i2c_chan *i2c_bus;
+ uint8_t slave_addr;
+ enum radeon_dvo_chip dvo_chip;
+};
+
/* spread spectrum */
struct radeon_atom_ss {
uint16_t percentage;
@@ -297,11 +335,43 @@ struct radeon_encoder {
enum radeon_rmx_type rmx_type;
struct drm_display_mode native_mode;
void *enc_priv;
+ int hdmi_offset;
+ int hdmi_audio_workaround;
+ int hdmi_buffer_status;
};
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
bool linkb;
+ /* displayport */
+ struct radeon_i2c_chan *dp_i2c_bus;
+ u8 dpcd[8];
+ u8 dp_sink_type;
+ int dp_clock;
+ int dp_lane_count;
+};
+
+struct radeon_gpio_rec {
+ bool valid;
+ u8 id;
+ u32 reg;
+ u32 mask;
+};
+
+enum radeon_hpd_id {
+ RADEON_HPD_NONE = 0,
+ RADEON_HPD_1,
+ RADEON_HPD_2,
+ RADEON_HPD_3,
+ RADEON_HPD_4,
+ RADEON_HPD_5,
+ RADEON_HPD_6,
+};
+
+struct radeon_hpd {
+ enum radeon_hpd_id hpd;
+ u8 plugged_state;
+ struct radeon_gpio_rec gpio;
};
struct radeon_connector {
@@ -318,6 +388,7 @@ struct radeon_connector {
void *con_priv;
bool dac_load_detect;
uint16_t connector_object_id;
+ struct radeon_hpd hpd;
};
struct radeon_framebuffer {
@@ -325,10 +396,42 @@ struct radeon_framebuffer {
struct drm_gem_object *obj;
};
+extern enum radeon_tv_std
+radeon_combios_get_tv_info(struct radeon_device *rdev);
+extern enum radeon_tv_std
+radeon_atombios_get_tv_info(struct radeon_device *rdev);
+
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+ struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+extern void dp_link_train(struct drm_encoder *encoder,
+ struct drm_connector *connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+ int action, uint8_t lane_num,
+ uint8_t lane_set);
+extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte);
+
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val);
+extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
+ u8 slave_addr,
+ u8 addr,
+ u8 val);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -343,12 +446,24 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *post_div_p,
int flags);
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p,
+ int flags);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
@@ -378,12 +493,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
-bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
- struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+ struct radeon_encoder_ext_tmds *tmds);
extern struct radeon_encoder_primary_dac *
radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_tv_dac *
@@ -395,6 +514,8 @@ extern struct radeon_encoder_tv_dac *
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_primary_dac *
radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@@ -426,16 +547,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
+extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
void radeon_get_clock_info(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f056dadc5c..d9ffe1f56e8 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,100 +34,62 @@
#include "radeon_drm.h"
#include "radeon.h"
-struct radeon_object {
- struct ttm_buffer_object tobj;
- struct list_head list;
- struct radeon_device *rdev;
- struct drm_gem_object *gobj;
- struct ttm_bo_kmap_obj kmap;
- unsigned pin_count;
- uint64_t gpu_addr;
- void *kptr;
- bool is_iomem;
- uint32_t tiling_flags;
- uint32_t pitch;
- int surface_reg;
-};
int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
/*
* To exclude mutual BO access we rely on bo_reserve exclusion, as all
* function are calling it.
*/
-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
- return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
-}
+ struct radeon_bo *bo;
-static void radeon_object_unreserve(struct radeon_object *robj)
-{
- ttm_bo_unreserve(&robj->tobj);
+ bo = container_of(tbo, struct radeon_bo, tbo);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_clear_surface_reg(bo);
+ kfree(bo);
}
-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
+bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
- struct radeon_object *robj;
-
- robj = container_of(tobj, struct radeon_object, tobj);
- list_del_init(&robj->list);
- radeon_object_clear_surface_reg(robj);
- kfree(robj);
+ if (bo->destroy == &radeon_ttm_bo_destroy)
+ return true;
+ return false;
}
-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
- /* Default gpu address */
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
- if (robj->tobj.mem.mm_node == NULL) {
- return;
- }
- robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
- switch (robj->tobj.mem.mem_type) {
- case TTM_PL_VRAM:
- robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
- break;
- case TTM_PL_TT:
- robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
- robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
- return;
- }
-}
+ u32 c = 0;
-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
-{
- uint32_t flags = 0;
- if (domain & RADEON_GEM_DOMAIN_VRAM) {
- flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
- }
- if (domain & RADEON_GEM_DOMAIN_GTT) {
- flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- }
- if (domain & RADEON_GEM_DOMAIN_CPU) {
- flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
- }
- if (!flags) {
- flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
- }
- return flags;
+ rbo->placement.fpfn = 0;
+ rbo->placement.lpfn = 0;
+ rbo->placement.placement = rbo->placements;
+ rbo->placement.busy_placement = rbo->placements;
+ if (domain & RADEON_GEM_DOMAIN_VRAM)
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+ if (domain & RADEON_GEM_DOMAIN_GTT)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ if (domain & RADEON_GEM_DOMAIN_CPU)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ rbo->placement.num_placement = c;
+ rbo->placement.num_busy_placement = c;
}
-int radeon_object_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj,
- unsigned long size,
- bool kernel,
- uint32_t domain,
- bool interruptible,
- struct radeon_object **robj_ptr)
+int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+ unsigned long size, bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr)
{
- struct radeon_object *robj;
+ struct radeon_bo *bo;
enum ttm_bo_type type;
- uint32_t flags;
int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,206 +100,125 @@ int radeon_object_create(struct radeon_device *rdev,
} else {
type = ttm_bo_type_device;
}
- *robj_ptr = NULL;
- robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
- if (robj == NULL) {
+ *bo_ptr = NULL;
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
return -ENOMEM;
- }
- robj->rdev = rdev;
- robj->gobj = gobj;
- robj->surface_reg = -1;
- INIT_LIST_HEAD(&robj->list);
-
- flags = radeon_object_flags_from_domain(domain);
- r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
- 0, 0, false, NULL, size,
- &radeon_ttm_object_object_destroy);
+ bo->rdev = rdev;
+ bo->gobj = gobj;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, 0, 0, !kernel, NULL, size,
+ &radeon_ttm_bo_destroy);
if (unlikely(r != 0)) {
- /* ttm call radeon_ttm_object_object_destroy if error happen */
- DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
- size, flags, 0);
+ if (r != -ERESTARTSYS)
+ dev_err(rdev->dev,
+ "object_init failed for (%lu, 0x%08X)\n",
+ size, domain);
return r;
}
- *robj_ptr = robj;
+ *bo_ptr = bo;
if (gobj) {
- list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_add_tail(&bo->list, &rdev->gem.objects);
+ mutex_unlock(&bo->rdev->gem.mutex);
}
return 0;
}
-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
{
+ bool is_iomem;
int r;
- spin_lock(&robj->tobj.lock);
- if (robj->kptr) {
+ if (bo->kptr) {
if (ptr) {
- *ptr = robj->kptr;
+ *ptr = bo->kptr;
}
- spin_unlock(&robj->tobj.lock);
return 0;
}
- spin_unlock(&robj->tobj.lock);
- r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
if (r) {
return r;
}
- spin_lock(&robj->tobj.lock);
- robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
- spin_unlock(&robj->tobj.lock);
+ bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr) {
- *ptr = robj->kptr;
+ *ptr = bo->kptr;
}
- radeon_object_check_tiling(robj, 0, 0);
+ radeon_bo_check_tiling(bo, 0, 0);
return 0;
}
-void radeon_object_kunmap(struct radeon_object *robj)
+void radeon_bo_kunmap(struct radeon_bo *bo)
{
- spin_lock(&robj->tobj.lock);
- if (robj->kptr == NULL) {
- spin_unlock(&robj->tobj.lock);
+ if (bo->kptr == NULL)
return;
- }
- robj->kptr = NULL;
- spin_unlock(&robj->tobj.lock);
- radeon_object_check_tiling(robj, 0, 0);
- ttm_bo_kunmap(&robj->kmap);
+ bo->kptr = NULL;
+ radeon_bo_check_tiling(bo, 0, 0);
+ ttm_bo_kunmap(&bo->kmap);
}
-void radeon_object_unref(struct radeon_object **robj)
+void radeon_bo_unref(struct radeon_bo **bo)
{
- struct ttm_buffer_object *tobj;
+ struct ttm_buffer_object *tbo;
- if ((*robj) == NULL) {
+ if ((*bo) == NULL)
return;
- }
- tobj = &((*robj)->tobj);
- ttm_bo_unref(&tobj);
- if (tobj == NULL) {
- *robj = NULL;
- }
+ tbo = &((*bo)->tbo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
}
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
- *offset = robj->tobj.addr_space_offset;
- return 0;
-}
+ int r, i;
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
- uint64_t *gpu_addr)
-{
- uint32_t flags;
- uint32_t tmp;
- int r;
-
- flags = radeon_object_flags_from_domain(domain);
- spin_lock(&robj->tobj.lock);
- if (robj->pin_count) {
- robj->pin_count++;
- if (gpu_addr != NULL) {
- *gpu_addr = robj->gpu_addr;
- }
- spin_unlock(&robj->tobj.lock);
+ radeon_ttm_placement_from_domain(bo, domain);
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
return 0;
}
- spin_unlock(&robj->tobj.lock);
- r = radeon_object_reserve(robj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
- return r;
- }
- tmp = robj->tobj.mem.placement;
- ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
- robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- false, false);
- radeon_object_gpu_addr(robj);
- if (gpu_addr != NULL) {
- *gpu_addr = robj->gpu_addr;
- }
- robj->pin_count = 1;
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to pin object.\n");
- }
- radeon_object_unreserve(robj);
- return r;
-}
-
-void radeon_object_unpin(struct radeon_object *robj)
-{
- uint32_t flags;
- int r;
-
- spin_lock(&robj->tobj.lock);
- if (!robj->pin_count) {
- spin_unlock(&robj->tobj.lock);
- printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
- return;
- }
- robj->pin_count--;
- if (robj->pin_count) {
- spin_unlock(&robj->tobj.lock);
- return;
- }
- spin_unlock(&robj->tobj.lock);
- r = radeon_object_reserve(robj, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
- return;
- }
- flags = robj->tobj.mem.placement;
- robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- false, false);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to unpin buffer.\n");
- }
- radeon_object_unreserve(robj);
-}
-
-int radeon_object_wait(struct radeon_object *robj)
-{
- int r = 0;
-
- /* FIXME: should use block reservation instead */
- r = radeon_object_reserve(robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
- return r;
- }
- spin_lock(&robj->tobj.lock);
- if (robj->tobj.sync_obj) {
- r = ttm_bo_wait(&robj->tobj, true, true, false);
- }
- spin_unlock(&robj->tobj.lock);
- radeon_object_unreserve(robj);
+ radeon_ttm_placement_from_domain(bo, domain);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (likely(r == 0)) {
+ bo->pin_count = 1;
+ if (gpu_addr != NULL)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
+ }
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p pin failed\n", bo);
return r;
}
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
+int radeon_bo_unpin(struct radeon_bo *bo)
{
- int r = 0;
+ int r, i;
- r = radeon_object_reserve(robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object for waiting.\n");
- return r;
- }
- spin_lock(&robj->tobj.lock);
- *cur_placement = robj->tobj.mem.mem_type;
- if (robj->tobj.sync_obj) {
- r = ttm_bo_wait(&robj->tobj, true, true, true);
+ if (!bo->pin_count) {
+ dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+ return 0;
}
- spin_unlock(&robj->tobj.lock);
- radeon_object_unreserve(robj);
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
}
-int radeon_object_evict_vram(struct radeon_device *rdev)
+int radeon_bo_evict_vram(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP) {
/* Useless to evict on IGP chips */
@@ -346,30 +227,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}
-void radeon_object_force_delete(struct radeon_device *rdev)
+void radeon_bo_force_delete(struct radeon_device *rdev)
{
- struct radeon_object *robj, *n;
+ struct radeon_bo *bo, *n;
struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
}
- DRM_ERROR("Userspace still has active objects !\n");
- list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+ dev_err(rdev->dev, "Userspace still has active objects !\n");
+ list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
- gobj = robj->gobj;
- DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
- gobj, robj, (unsigned long)gobj->size,
- *((unsigned long *)&gobj->refcount));
- list_del_init(&robj->list);
- radeon_object_unref(&robj);
+ gobj = bo->gobj;
+ dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+ gobj, bo, (unsigned long)gobj->size,
+ *((unsigned long *)&gobj->refcount));
+ mutex_lock(&bo->rdev->gem.mutex);
+ list_del_init(&bo->list);
+ mutex_unlock(&bo->rdev->gem.mutex);
+ radeon_bo_unref(&bo);
gobj->driver_private = NULL;
drm_gem_object_unreference(gobj);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
-int radeon_object_init(struct radeon_device *rdev)
+int radeon_bo_init(struct radeon_device *rdev)
{
/* Add an MTRR for the VRAM */
rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -382,13 +265,13 @@ int radeon_object_init(struct radeon_device *rdev)
return radeon_ttm_init(rdev);
}
-void radeon_object_fini(struct radeon_device *rdev)
+void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
}
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
- struct list_head *head)
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head)
{
if (lobj->wdomain) {
list_add(&lobj->list, head);
@@ -397,72 +280,62 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
}
}
-int radeon_object_list_reserve(struct list_head *head)
+int radeon_bo_list_reserve(struct list_head *head)
{
- struct radeon_object_list *lobj;
+ struct radeon_bo_list *lobj;
int r;
list_for_each_entry(lobj, head, list){
- if (!lobj->robj->pin_count) {
- r = radeon_object_reserve(lobj->robj, true);
- if (unlikely(r != 0)) {
- DRM_ERROR("radeon: failed to reserve object.\n");
- return r;
- }
- } else {
- }
+ r = radeon_bo_reserve(lobj->bo, false);
+ if (unlikely(r != 0))
+ return r;
}
return 0;
}
-void radeon_object_list_unreserve(struct list_head *head)
+void radeon_bo_list_unreserve(struct list_head *head)
{
- struct radeon_object_list *lobj;
+ struct radeon_bo_list *lobj;
list_for_each_entry(lobj, head, list) {
- if (!lobj->robj->pin_count) {
- radeon_object_unreserve(lobj->robj);
- }
+ /* only unreserve object we successfully reserved */
+ if (radeon_bo_is_reserved(lobj->bo))
+ radeon_bo_unreserve(lobj->bo);
}
}
-int radeon_object_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head, void *fence)
{
- struct radeon_object_list *lobj;
- struct radeon_object *robj;
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
struct radeon_fence *old_fence = NULL;
int r;
- r = radeon_object_list_reserve(head);
+ r = radeon_bo_list_reserve(head);
if (unlikely(r != 0)) {
- radeon_object_list_unreserve(head);
return r;
}
list_for_each_entry(lobj, head, list) {
- robj = lobj->robj;
- if (!robj->pin_count) {
+ bo = lobj->bo;
+ if (!bo->pin_count) {
if (lobj->wdomain) {
- robj->tobj.proposed_placement =
- radeon_object_flags_from_domain(lobj->wdomain);
+ radeon_ttm_placement_from_domain(bo,
+ lobj->wdomain);
} else {
- robj->tobj.proposed_placement =
- radeon_object_flags_from_domain(lobj->rdomain);
+ radeon_ttm_placement_from_domain(bo,
+ lobj->rdomain);
}
- r = ttm_buffer_object_validate(&robj->tobj,
- robj->tobj.proposed_placement,
- true, false);
- if (unlikely(r)) {
- DRM_ERROR("radeon: failed to validate.\n");
+ r = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
+ if (unlikely(r))
return r;
- }
- radeon_object_gpu_addr(robj);
}
- lobj->gpu_offset = robj->gpu_addr;
- lobj->tiling_flags = robj->tiling_flags;
+ lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+ lobj->tiling_flags = bo->tiling_flags;
if (fence) {
- old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
- robj->tobj.sync_obj = radeon_fence_ref(fence);
- robj->tobj.sync_obj_arg = NULL;
+ old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+ bo->tbo.sync_obj = radeon_fence_ref(fence);
+ bo->tbo.sync_obj_arg = NULL;
}
if (old_fence) {
radeon_fence_unref(&old_fence);
@@ -471,51 +344,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
return 0;
}
-void radeon_object_list_unvalidate(struct list_head *head)
+void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
{
- struct radeon_object_list *lobj;
- struct radeon_fence *old_fence = NULL;
+ struct radeon_bo_list *lobj;
+ struct radeon_fence *old_fence;
- list_for_each_entry(lobj, head, list) {
- old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
- lobj->robj->tobj.sync_obj = NULL;
- if (old_fence) {
- radeon_fence_unref(&old_fence);
+ if (fence)
+ list_for_each_entry(lobj, head, list) {
+ old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
+ if (old_fence == fence) {
+ lobj->bo->tbo.sync_obj = NULL;
+ radeon_fence_unref(&old_fence);
+ }
}
- }
- radeon_object_list_unreserve(head);
+ radeon_bo_list_unreserve(head);
}
-void radeon_object_list_clean(struct list_head *head)
-{
- radeon_object_list_unreserve(head);
-}
-
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &robj->tobj);
-}
-
-unsigned long radeon_object_size(struct radeon_object *robj)
-{
- return robj->tobj.num_pages << PAGE_SHIFT;
+ return ttm_fbdev_mmap(vma, &bo->tbo);
}
-int radeon_object_get_surface_reg(struct radeon_object *robj)
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{
- struct radeon_device *rdev = robj->rdev;
+ struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg;
- struct radeon_object *old_object;
+ struct radeon_bo *old_object;
int steal;
int i;
- if (!robj->tiling_flags)
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+ if (!bo->tiling_flags)
return 0;
- if (robj->surface_reg >= 0) {
- reg = &rdev->surface_regs[robj->surface_reg];
- i = robj->surface_reg;
+ if (bo->surface_reg >= 0) {
+ reg = &rdev->surface_regs[bo->surface_reg];
+ i = bo->surface_reg;
goto out;
}
@@ -523,10 +389,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
reg = &rdev->surface_regs[i];
- if (!reg->robj)
+ if (!reg->bo)
break;
- old_object = reg->robj;
+ old_object = reg->bo;
if (old_object->pin_count == 0)
steal = i;
}
@@ -537,91 +403,107 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
return -ENOMEM;
/* find someone with a surface reg and nuke their BO */
reg = &rdev->surface_regs[steal];
- old_object = reg->robj;
+ old_object = reg->bo;
/* blow away the mapping */
DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
- ttm_bo_unmap_virtual(&old_object->tobj);
+ ttm_bo_unmap_virtual(&old_object->tbo);
old_object->surface_reg = -1;
i = steal;
}
- robj->surface_reg = i;
- reg->robj = robj;
+ bo->surface_reg = i;
+ reg->bo = bo;
out:
- radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
- robj->tobj.mem.mm_node->start << PAGE_SHIFT,
- robj->tobj.num_pages << PAGE_SHIFT);
+ radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+ bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+ bo->tbo.num_pages << PAGE_SHIFT);
return 0;
}
-void radeon_object_clear_surface_reg(struct radeon_object *robj)
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
{
- struct radeon_device *rdev = robj->rdev;
+ struct radeon_device *rdev = bo->rdev;
struct radeon_surface_reg *reg;
- if (robj->surface_reg == -1)
+ if (bo->surface_reg == -1)
return;
- reg = &rdev->surface_regs[robj->surface_reg];
- radeon_clear_surface_reg(rdev, robj->surface_reg);
+ reg = &rdev->surface_regs[bo->surface_reg];
+ radeon_clear_surface_reg(rdev, bo->surface_reg);
- reg->robj = NULL;
- robj->surface_reg = -1;
+ reg->bo = NULL;
+ bo->surface_reg = -1;
}
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
- uint32_t tiling_flags, uint32_t pitch)
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ uint32_t tiling_flags, uint32_t pitch)
{
- robj->tiling_flags = tiling_flags;
- robj->pitch = pitch;
+ int r;
+
+ r = radeon_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+ bo->tiling_flags = tiling_flags;
+ bo->pitch = pitch;
+ radeon_bo_unreserve(bo);
+ return 0;
}
-void radeon_object_get_tiling_flags(struct radeon_object *robj,
- uint32_t *tiling_flags,
- uint32_t *pitch)
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ uint32_t *tiling_flags,
+ uint32_t *pitch)
{
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
if (tiling_flags)
- *tiling_flags = robj->tiling_flags;
+ *tiling_flags = bo->tiling_flags;
if (pitch)
- *pitch = robj->pitch;
+ *pitch = bo->pitch;
}
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
- bool force_drop)
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop)
{
- if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+ if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
if (force_drop) {
- radeon_object_clear_surface_reg(robj);
+ radeon_bo_clear_surface_reg(bo);
return 0;
}
- if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
+ if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
if (!has_moved)
return 0;
- if (robj->surface_reg >= 0)
- radeon_object_clear_surface_reg(robj);
+ if (bo->surface_reg >= 0)
+ radeon_bo_clear_surface_reg(bo);
return 0;
}
- if ((robj->surface_reg >= 0) && !has_moved)
+ if ((bo->surface_reg >= 0) && !has_moved)
return 0;
- return radeon_object_get_surface_reg(robj);
+ return radeon_bo_get_surface_reg(bo);
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem)
{
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
- radeon_object_check_tiling(robj, 0, 1);
+ struct radeon_bo *rbo;
+ if (!radeon_ttm_bo_is_radeon_bo(bo))
+ return;
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 1);
}
void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
- radeon_object_check_tiling(robj, 0, 0);
+ struct radeon_bo *rbo;
+ if (!radeon_ttm_bo_is_radeon_bo(bo))
+ return;
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6bb45..a02f18011ad 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,146 @@
#ifndef __RADEON_OBJECT_H__
#define __RADEON_OBJECT_H__
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
-/*
- * TTM.
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type: ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+ switch (mem_type) {
+ case TTM_PL_VRAM:
+ return RADEON_GEM_DOMAIN_VRAM;
+ case TTM_PL_TT:
+ return RADEON_GEM_DOMAIN_GTT;
+ case TTM_PL_SYSTEM:
+ return RADEON_GEM_DOMAIN_CPU;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo: bo structure
+ * @no_wait: don't sleep while trying to reserve (return -EBUSY)
+ *
+ * Returns:
+ * -EBUSY: buffer is busy and @no_wait is true
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
*/
-struct radeon_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
- bool mem_global_referenced;
- struct ttm_bo_device bdev;
-};
+static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ return r;
+ }
+ return 0;
+}
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be usefull to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+ return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+ return !!atomic_read(&bo->tbo.reserved);
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo: radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+ return bo->tbo.addr_space_offset;
+}
+
+static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+ bool no_wait)
+{
+ int r;
+
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+ return r;
+ }
+ spin_lock(&bo->tbo.lock);
+ if (mem_type)
+ *mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.lock);
+ ttm_bo_unreserve(&bo->tbo);
+ return r;
+}
+extern int radeon_bo_create(struct radeon_device *rdev,
+ struct drm_gem_object *gobj, unsigned long size,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head);
+extern int radeon_bo_list_reserve(struct list_head *head);
+extern void radeon_bo_list_unreserve(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, void *fence);
+extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ struct vm_area_struct *vma);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+ u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+ bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem);
+extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 46146c6a2a0..8bce64cdc32 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev);
int radeon_pm_init(struct radeon_device *rdev)
{
if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for CP !\n");
+ DRM_ERROR("Failed to register debugfs file for PM!\n");
}
return 0;
@@ -44,8 +44,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
+ if (rdev->asic->get_memory_clock)
+ seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 29ab75903ec..6d0a009dd4a 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -887,6 +887,7 @@
# define RADEON_FP_PANEL_FORMAT (1 << 3)
# define RADEON_FP_EN_TMDS (1 << 7)
# define RADEON_FP_DETECT_SENSE (1 << 8)
+# define RADEON_FP_DETECT_INT_POL (1 << 9)
# define R200_FP_SOURCE_SEL_MASK (3 << 10)
# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
@@ -894,6 +895,7 @@
# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
# define RADEON_FP_SEL_CRTC1 (0 << 13)
# define RADEON_FP_SEL_CRTC2 (1 << 13)
+# define R300_HPD_SEL(x) ((x) << 13)
# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
@@ -909,6 +911,7 @@
# define RADEON_FP2_ON (1 << 2)
# define RADEON_FP2_PANEL_FORMAT (1 << 3)
# define RADEON_FP2_DETECT_SENSE (1 << 8)
+# define RADEON_FP2_DETECT_INT_POL (1 << 9)
# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
@@ -988,14 +991,20 @@
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
+# define RADEON_FP_DETECT_MASK (1 << 4)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
+# define RADEON_FP2_DETECT_MASK (1 << 10)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_FP_DETECT_STAT (1 << 4)
+# define RADEON_FP_DETECT_STAT_ACK (1 << 4)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
+# define RADEON_FP2_DETECT_STAT (1 << 10)
+# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
# define RADEON_SW_INT_FIRE (1 << 26)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
@@ -1051,20 +1060,25 @@
/* Multimedia I2C bus */
#define RADEON_I2C_CNTL_0 0x0090
-#define RADEON_I2C_DONE (1<<0)
-#define RADEON_I2C_NACK (1<<1)
-#define RADEON_I2C_HALT (1<<2)
-#define RADEON_I2C_SOFT_RST (1<<5)
-#define RADEON_I2C_DRIVE_EN (1<<6)
-#define RADEON_I2C_DRIVE_SEL (1<<7)
-#define RADEON_I2C_START (1<<8)
-#define RADEON_I2C_STOP (1<<9)
-#define RADEON_I2C_RECEIVE (1<<10)
-#define RADEON_I2C_ABORT (1<<11)
-#define RADEON_I2C_GO (1<<12)
+#define RADEON_I2C_DONE (1 << 0)
+#define RADEON_I2C_NACK (1 << 1)
+#define RADEON_I2C_HALT (1 << 2)
+#define RADEON_I2C_SOFT_RST (1 << 5)
+#define RADEON_I2C_DRIVE_EN (1 << 6)
+#define RADEON_I2C_DRIVE_SEL (1 << 7)
+#define RADEON_I2C_START (1 << 8)
+#define RADEON_I2C_STOP (1 << 9)
+#define RADEON_I2C_RECEIVE (1 << 10)
+#define RADEON_I2C_ABORT (1 << 11)
+#define RADEON_I2C_GO (1 << 12)
+#define RADEON_I2C_PRESCALE_SHIFT 16
#define RADEON_I2C_CNTL_1 0x0094
-#define RADEON_I2C_SEL (1<<16)
-#define RADEON_I2C_EN (1<<17)
+#define RADEON_I2C_DATA_COUNT_SHIFT 0
+#define RADEON_I2C_ADDR_COUNT_SHIFT 4
+#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
+#define RADEON_I2C_SEL (1 << 16)
+#define RADEON_I2C_EN (1 << 17)
+#define RADEON_I2C_TIME_LIMIT_SHIFT 24
#define RADEON_I2C_DATA 0x0098
#define RADEON_DVI_I2C_CNTL_0 0x02e0
@@ -1072,7 +1086,7 @@
# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
-#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */
+#define RADEON_DVI_I2C_CNTL_1 0x02e4
#define RADEON_DVI_I2C_DATA 0x02e8
#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
@@ -1143,15 +1157,16 @@
# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
-#define RADEON_LCD_GPIO_MASK 0x01a0
-#define RADEON_GPIOPAD_EN 0x01a0
-#define RADEON_LCD_GPIO_Y_REG 0x01a4
-#define RADEON_MDGPIO_A_REG 0x01ac
-#define RADEON_MDGPIO_EN_REG 0x01b0
-#define RADEON_MDGPIO_MASK 0x0198
+
#define RADEON_GPIOPAD_MASK 0x0198
#define RADEON_GPIOPAD_A 0x019c
-#define RADEON_MDGPIO_Y_REG 0x01b4
+#define RADEON_GPIOPAD_EN 0x01a0
+#define RADEON_GPIOPAD_Y 0x01a4
+#define RADEON_MDGPIO_MASK 0x01a8
+#define RADEON_MDGPIO_A 0x01ac
+#define RADEON_MDGPIO_EN 0x01b0
+#define RADEON_MDGPIO_Y 0x01b4
+
#define RADEON_MEM_ADDR_CONFIG 0x0148
#define RADEON_MEM_BASE 0x0f10 /* PCI */
#define RADEON_MEM_CNTL 0x0140
@@ -1360,6 +1375,9 @@
#define RADEON_OVR_CLR 0x0230
#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
+#define RADEON_OVR2_CLR 0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338
/* first capture unit */
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bffb84..4d12b2d17b4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
/* Allocate 1M object buffer */
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
- r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
- true, RADEON_GEM_DOMAIN_GTT,
- false, &rdev->ib_pool.robj);
+ r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ true, RADEON_GEM_DOMAIN_GTT,
+ &rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
}
- r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
if (r) {
+ radeon_bo_unreserve(rdev->ib_pool.robj);
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
return r;
}
- r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+ r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
return r;
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
+ int r;
+
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
- radeon_object_kunmap(rdev->ib_pool.robj);
- radeon_object_unref(&rdev->ib_pool.robj);
+ r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->ib_pool.robj);
+ radeon_bo_unpin(rdev->ib_pool.robj);
+ radeon_bo_unreserve(rdev->ib_pool.robj);
+ }
+ radeon_bo_unref(&rdev->ib_pool.robj);
rdev->ib_pool.robj = NULL;
}
mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
- true,
- RADEON_GEM_DOMAIN_GTT,
- false,
- &rdev->cp.ring_obj);
+ r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
+ RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.ring_obj);
if (r) {
- DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = radeon_object_pin(rdev->cp.ring_obj,
- RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.gpu_addr);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->cp.gpu_addr);
if (r) {
- DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
+ dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
- r = radeon_object_kmap(rdev->cp.ring_obj,
+ r = radeon_bo_kmap(rdev->cp.ring_obj,
(void **)&rdev->cp.ring);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
if (r) {
- DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
- mutex_unlock(&rdev->cp.mutex);
+ dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
void radeon_ring_fini(struct radeon_device *rdev)
{
+ int r;
+
mutex_lock(&rdev->cp.mutex);
if (rdev->cp.ring_obj) {
- radeon_object_kunmap(rdev->cp.ring_obj);
- radeon_object_unpin(rdev->cp.ring_obj);
- radeon_object_unref(&rdev->cp.ring_obj);
+ r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->cp.ring_obj);
+ radeon_bo_unpin(rdev->cp.ring_obj);
+ radeon_bo_unreserve(rdev->cp.ring_obj);
+ }
+ radeon_bo_unref(&rdev->cp.ring_obj);
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f8a465d9a1c..9f5e2f929da 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -30,8 +30,8 @@
/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
void radeon_test_moves(struct radeon_device *rdev)
{
- struct radeon_object *vram_obj = NULL;
- struct radeon_object **gtt_obj = NULL;
+ struct radeon_bo *vram_obj = NULL;
+ struct radeon_bo **gtt_obj = NULL;
struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned i, n, size;
@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffer) / test size
*/
- n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
- rdev->cp.ring_size) / size;
+ n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
+ rdev->cp.ring_size)) / size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
- false, &vram_obj);
+ r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+ &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
}
-
- r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+ r = radeon_bo_reserve(vram_obj, false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
if (r) {
DRM_ERROR("Failed to pin VRAM object\n");
goto out_cleanup;
}
-
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_object_create(rdev, NULL, size, true,
- RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
+ r = radeon_bo_create(rdev, NULL, size, true,
+ RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_cleanup;
}
- r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+ r = radeon_bo_reserve(gtt_obj[i], false);
+ if (unlikely(r != 0))
+ goto out_cleanup;
+ r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_cleanup;
}
- r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object %d\n", i);
goto out_cleanup;
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++)
*gtt_start = gtt_start;
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
r = radeon_fence_create(rdev, &fence);
if (r) {
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_fence_unref(&fence);
- r = radeon_object_kmap(vram_obj, &vram_map);
+ r = radeon_bo_kmap(vram_obj, &vram_map);
if (r) {
DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
goto out_cleanup;
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
"expected 0x%p (GTT map 0x%p-0x%p)\n",
i, *vram_start, gtt_start, gtt_map,
gtt_end);
- radeon_object_kunmap(vram_obj);
+ radeon_bo_kunmap(vram_obj);
goto out_cleanup;
}
*vram_start = vram_start;
}
- radeon_object_kunmap(vram_obj);
+ radeon_bo_kunmap(vram_obj);
r = radeon_fence_create(rdev, &fence);
if (r) {
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_fence_unref(&fence);
- r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+ r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object after copy %d\n", i);
goto out_cleanup;
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
"expected 0x%p (VRAM map 0x%p-0x%p)\n",
i, *gtt_start, vram_start, vram_map,
vram_end);
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
goto out_cleanup;
}
}
- radeon_object_kunmap(gtt_obj[i]);
+ radeon_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
out_cleanup:
if (vram_obj) {
- radeon_object_unpin(vram_obj);
- radeon_object_unref(&vram_obj);
+ if (radeon_bo_is_reserved(vram_obj)) {
+ radeon_bo_unpin(vram_obj);
+ radeon_bo_unreserve(vram_obj);
+ }
+ radeon_bo_unref(&vram_obj);
}
if (gtt_obj) {
for (i = 0; i < n; i++) {
if (gtt_obj[i]) {
- radeon_object_unpin(gtt_obj[i]);
- radeon_object_unref(&gtt_obj[i]);
+ if (radeon_bo_is_reserved(gtt_obj[i])) {
+ radeon_bo_unpin(gtt_obj[i]);
+ radeon_bo_unreserve(gtt_obj[i]);
+ }
+ radeon_bo_unref(&gtt_obj[i]);
}
}
kfree(gtt_obj);
@@ -206,4 +216,3 @@ out_cleanup:
printk(KERN_WARNING "Error while testing BO move.\n");
}
}
-
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index eda4ade24c3..3b0c07b444a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->gpu_offset = 0;
+ man->gpu_offset = rdev->mc.gtt_location;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->gpu_offset = 0;
+ man->gpu_offset = rdev->mc.vram_location;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -197,16 +197,31 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
return 0;
}
-static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
{
- uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
+ struct radeon_bo *rbo;
+ static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!radeon_ttm_bo_is_radeon_bo(bo)) {
+ placement->fpfn = 0;
+ placement->lpfn = 0;
+ placement->placement = &placements;
+ placement->busy_placement = &placements;
+ placement->num_placement = 1;
+ placement->num_busy_placement = 1;
+ return;
+ }
+ rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ break;
+ case TTM_PL_TT:
default:
- return (cur_placement & ~TTM_PL_MASK_CACHING) |
- TTM_PL_FLAG_SYSTEM |
- TTM_PL_FLAG_CACHED;
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
}
+ *placement = rbo->placement;
}
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@ -283,14 +298,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
- uint32_t proposed_placement;
+ u32 placements;
+ struct ttm_placement placement;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait);
if (unlikely(r)) {
return r;
@@ -329,15 +351,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
- uint32_t proposed_flags;
+ struct ttm_placement placement;
+ u32 placements;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
- interruptible, no_wait);
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
if (unlikely(r)) {
return r;
}
@@ -407,18 +435,6 @@ memcpy:
return r;
}
-const uint32_t radeon_mem_prios[] = {
- TTM_PL_VRAM,
- TTM_PL_TT,
- TTM_PL_SYSTEM,
-};
-
-const uint32_t radeon_busy_prios[] = {
- TTM_PL_TT,
- TTM_PL_VRAM,
- TTM_PL_SYSTEM,
-};
-
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
@@ -446,10 +462,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
}
static struct ttm_bo_driver radeon_bo_driver = {
- .mem_type_prio = radeon_mem_prios,
- .mem_busy_prio = radeon_busy_prios,
- .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
- .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
@@ -482,27 +494,32 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
- ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
+ rdev->mman.initialized = true;
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+ rdev->mc.real_vram_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_object_create(rdev, NULL, 256 * 1024, true,
- RADEON_GEM_DOMAIN_VRAM, false,
- &rdev->stollen_vga_memory);
+ r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ &rdev->stollen_vga_memory);
if (r) {
return r;
}
- r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r)
+ return r;
+ r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
if (r) {
- radeon_object_unref(&rdev->stollen_vga_memory);
+ radeon_bo_unref(&rdev->stollen_vga_memory);
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned)rdev->mc.real_vram_size / (1024 * 1024));
- r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
- ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+ r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+ rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
@@ -523,15 +540,24 @@ int radeon_ttm_init(struct radeon_device *rdev)
void radeon_ttm_fini(struct radeon_device *rdev)
{
+ int r;
+
+ if (!rdev->mman.initialized)
+ return;
if (rdev->stollen_vga_memory) {
- radeon_object_unpin(rdev->stollen_vga_memory);
- radeon_object_unref(&rdev->stollen_vga_memory);
+ r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (r == 0) {
+ radeon_bo_unpin(rdev->stollen_vga_memory);
+ radeon_bo_unreserve(rdev->stollen_vga_memory);
+ }
+ radeon_bo_unref(&rdev->stollen_vga_memory);
}
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev);
radeon_ttm_global_fini(rdev);
+ rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index ca037160a58..368415df5f3 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
u32 tmp;
/* Setup GPU memory space */
- tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+ tmp = RREG32(R_00015C_NB_TOM);
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
@@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
rs400_gpu_init(rdev);
+ r100_enable_bm(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r = rs400_gart_enable(rdev);
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
r100_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r300_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs400_startup(rdev);
}
@@ -452,7 +454,7 @@ void rs400_fini(struct radeon_device *rdev)
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -490,12 +492,13 @@ int rs400_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- radeon_combios_asic_init(rdev->ddev);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
/* Get vram informations */
rs400_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
@@ -510,7 +513,7 @@ int rs400_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f117cd8736..4f8ea426057 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,6 +45,122 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+int rs600_mc_init(struct radeon_device *rdev)
+{
+ /* read back the MC value from the hw */
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
+ rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xffffffffUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = false;
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+ if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+ connected = true;
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+ if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+ connected = true;
+ break;
+ default:
+ break;
+ }
+ return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ u32 tmp;
+ bool connected = rs600_hpd_sense(rdev, hpd);
+
+ switch (hpd) {
+ case RADEON_HPD_1:
+ tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ if (connected)
+ tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+ else
+ tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ break;
+ case RADEON_HPD_2:
+ tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ if (connected)
+ tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+ else
+ tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ break;
+ default:
+ break;
+ }
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+ S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+ rdev->irq.hpd[0] = true;
+ break;
+ case RADEON_HPD_2:
+ WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+ S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+ rdev->irq.hpd[1] = true;
+ break;
+ default:
+ break;
+ }
+ }
+ rs600_irq_set(rdev);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+ S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+ rdev->irq.hpd[0] = false;
+ break;
+ case RADEON_HPD_2:
+ WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+ S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+ rdev->irq.hpd[1] = false;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
/*
* GART.
*/
@@ -100,40 +216,40 @@ int rs600_gart_enable(struct radeon_device *rdev)
WREG32(R_00004C_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
- (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
- S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+ (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+ S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
for (i = 0; i < 19; i++) {
WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
- S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
- S_00016C_SYSTEM_ACCESS_MODE_MASK(
- V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
- S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
- V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
- S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
- S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
- S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
+ S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+ S_00016C_SYSTEM_ACCESS_MODE_MASK(
+ V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+ S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+ V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+ S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+ S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+ S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
}
-
- /* System context map to GART space */
- WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
- WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
-
/* enable first context */
- WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
- WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
- S_000102_ENABLE_PAGE_TABLE(1) |
- S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+ S_000102_ENABLE_PAGE_TABLE(1) |
+ S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
/* disable all other contexts */
- for (i = 1; i < 8; i++) {
+ for (i = 1; i < 8; i++)
WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
- }
/* setup the page table */
WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
- rdev->gart.table_addr);
+ rdev->gart.table_addr);
+ WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+ WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+ /* System context maps to VRAM space */
+ WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+ WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
/* enable page tables */
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@@ -146,15 +262,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u32 tmp;
+ int r;
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (r == 0) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -189,6 +310,10 @@ int rs600_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
uint32_t mode_int = 0;
+ u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+ ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+ u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+ ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
@@ -199,8 +324,16 @@ int rs600_irq_set(struct radeon_device *rdev)
if (rdev->irq.crtc_vblank_int[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
+ if (rdev->irq.hpd[0]) {
+ hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+ }
+ if (rdev->irq.hpd[1]) {
+ hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ }
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
return 0;
}
@@ -208,6 +341,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
{
uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
uint32_t irq_mask = ~C_000044_SW_INT;
+ u32 tmp;
if (G_000044_DISPLAY_INT_STAT(irqs)) {
*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
@@ -219,6 +353,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
WREG32(R_006D34_D2MODE_VBLANK_STATUS,
S_006D34_D2MODE_VBLANK_ACK(1));
}
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+ tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+ tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+ WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+ tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+ tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+ }
} else {
*r500_disp_int = 0;
}
@@ -244,6 +388,7 @@ int rs600_irq_process(struct radeon_device *rdev)
{
uint32_t status, msi_rearm;
uint32_t r500_disp_int;
+ bool queue_hotplug = false;
status = rs600_irq_ack(rdev, &r500_disp_int);
if (!status && !r500_disp_int) {
@@ -258,8 +403,18 @@ int rs600_irq_process(struct radeon_device *rdev)
drm_handle_vblank(rdev->ddev, 0);
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
drm_handle_vblank(rdev->ddev, 1);
+ if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD1\n");
+ }
+ if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+ queue_hotplug = true;
+ DRM_DEBUG("HPD2\n");
+ }
status = rs600_irq_ack(rdev, &r500_disp_int);
}
+ if (queue_hotplug)
+ queue_work(rdev->wq, &rdev->hotplug_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
@@ -301,9 +456,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: HDP same place on rs600 ? */
r100_hdp_reset(rdev);
- /* FIXME: is this correct ? */
r420_pipes_init(rdev);
/* Wait for mc idle */
if (rs600_mc_wait_for_idle(rdev))
@@ -312,9 +465,20 @@ void rs600_gpu_init(struct radeon_device *rdev)
void rs600_vram_info(struct radeon_device *rdev)
{
- /* FIXME: to do or is these values sane ? */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
+
+ rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+ if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
}
void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -388,7 +552,6 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -423,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs600_startup(rdev);
}
@@ -445,7 +610,7 @@ void rs600_fini(struct radeon_device *rdev)
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -482,10 +647,9 @@ int rs600_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -493,7 +657,7 @@ int rs600_init(struct radeon_device *rdev)
/* Get vram informations */
rs600_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
+ r = rs600_mc_init(rdev);
if (r)
return r;
rs600_debugfs(rdev);
@@ -505,7 +669,7 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs600_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index 81308924859..c1c8f5885cb 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -30,27 +30,12 @@
/* Registers */
#define R_000040_GEN_INT_CNTL 0x000040
-#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0)
-#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1)
-#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE
-#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12)
-#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1)
-#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF
-#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
-#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
-#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
-#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
-#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
-#define C_000040_SNAPSHOT2 0xFFFFFF7F
-#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
-#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
-#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
-#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
-#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
-#define C_000040_FP2_DETECT 0xFFFFFBFF
-#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
-#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
-#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
+#define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18)
+#define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1)
+#define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF
+#define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19)
+#define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1)
+#define C_000040_GUI_IDLE_MASK 0xFFF7FFFF
#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
@@ -370,7 +355,90 @@
#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
-
+#define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16)
+#define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1)
+#define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF
+#define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17)
+#define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1)
+#define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF
+#define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18)
+#define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1)
+#define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF
+#define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19)
+#define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1)
+#define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL 0x007828
+#define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
+#define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
+#define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC
+#define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
+#define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
+#define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
+#define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838
+#define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
+#define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE
+#define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
+#define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
+#define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28
+#define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
+#define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
+#define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC
+#define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
+#define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
+#define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
+#define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38
+#define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
+#define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE
+#define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
+#define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
+#define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00
+#define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0)
+#define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1)
+#define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04
+#define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE
+#define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1)
+#define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1)
+#define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF
+#define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16)
+#define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1)
+#define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10
+#define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0)
+#define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1)
+#define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14
+#define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0)
+#define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1)
+#define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE
+#define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1)
+#define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1)
+#define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF
+#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
+#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
+#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
/* MC registers */
#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 27547175cf9..1e22f52d603 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev)
void rs690_vram_info(struct radeon_device *rdev)
{
- uint32_t tmp;
fixed20_12 a;
rs400_gart_adjust_size(rdev);
- /* DDR for all card after R300 & IGP */
+
rdev->mc.vram_is_ddr = true;
- /* FIXME: is this correct for RS690/RS740 ? */
- tmp = RREG32(RADEON_MEM_CNTL);
- if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
- rdev->mc.vram_width = 128;
- } else {
- rdev->mc.vram_width = 64;
- }
+ rdev->mc.vram_width = 128;
+
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+ if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
+
rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
@@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev)
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
}
+static int rs690_mc_init(struct radeon_device *rdev)
+{
+ int r;
+ u32 tmp;
+
+ /* Setup GPU memory space */
+ tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
+ rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ r = radeon_mc_setup(rdev);
+ if (r)
+ return r;
+ return 0;
+}
+
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2)
@@ -244,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256);
- a.full = rfixed_mul(wm->num_line_pair, b);
- request_fifo_depth.full = rfixed_div(a, c);
+ a.full = rfixed_div(b, c);
+ request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
@@ -374,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@@ -383,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
} else {
a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a);
+ wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
@@ -605,7 +624,6 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -640,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rs690_startup(rdev);
}
@@ -662,7 +682,7 @@ void rs690_fini(struct radeon_device *rdev)
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -700,10 +720,9 @@ int rs690_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
+
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -711,7 +730,7 @@ int rs690_init(struct radeon_device *rdev)
/* Get vram informations */
rs690_vram_info(rdev);
/* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
+ r = rs690_mc_init(rdev);
if (r)
return r;
rv515_debugfs(rdev);
@@ -723,7 +742,7 @@ int rs690_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ba68c9fe90a..59632a506b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -478,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev)
return r;
}
/* Enable IRQ */
- rdev->irq.sw_int = true;
rs600_irq_set(rdev);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
@@ -514,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
atom_asic_init(rdev->mode_info.atom_context);
/* Resume clock after posting */
rv515_clock_startup(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
return rv515_startup(rdev);
}
@@ -540,11 +541,11 @@ void rv515_fini(struct radeon_device *rdev)
r100_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
- rv370_pcie_gart_fini(rdev);
+ rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
@@ -580,10 +581,8 @@ int rv515_init(struct radeon_device *rdev)
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
- if (!radeon_card_posted(rdev) && rdev->bios) {
- DRM_INFO("GPU not posted. posting now...\n");
- atom_asic_init(rdev->mode_info.atom_context);
- }
+ if (radeon_boot_test_post_card(rdev) == false)
+ return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
@@ -603,7 +602,7 @@ int rv515_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
r = rv370_pcie_gart_init(rdev);
@@ -892,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
b.full = rfixed_const(mode->crtc_hdisplay);
c.full = rfixed_const(256);
- a.full = rfixed_mul(wm->num_line_pair, b);
- request_fifo_depth.full = rfixed_div(a, c);
+ a.full = rfixed_div(b, c);
+ request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+ request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
if (a.full < rfixed_const(4)) {
wm->lb_request_fifo_depth = 4;
} else {
@@ -995,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
a.full = rfixed_const(16);
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+ wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
/* Determine estimated width */
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
estimated_width.full = rfixed_div(estimated_width, consumption_time);
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
- wm->priority_mark.full = rfixed_const(10);
+ wm->priority_mark.full = wm->priority_mark_max.full;
} else {
a.full = rfixed_const(16);
wm->priority_mark.full = rfixed_div(estimated_width, a);
+ wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
}
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 5e06ee7076f..3bcb66e5278 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
- int i;
+ int i, r;
/* Disable all tables */
for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
- radeon_object_kunmap(rdev->gart.table.vram.robj);
- radeon_object_unpin(rdev->gart.table.vram.robj);
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
}
}
@@ -870,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
{
int r;
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
@@ -880,13 +892,34 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
- r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
+ if (!rdev->r600_blit.shader_obj) {
+ r = r600_blit_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("failed to pin blit object %d\n", r);
return r;
}
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
@@ -934,13 +967,19 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
+ int r;
+
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
r600_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
/* unpin shaders bo */
- radeon_object_unpin(rdev->r600_blit.shader_obj);
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
return 0;
}
@@ -975,7 +1014,11 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Post card if necessary */
- if (!r600_card_posted(rdev) && rdev->bios) {
+ if (!r600_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
@@ -998,31 +1041,25 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
/* Memory manager */
- r = radeon_object_init(rdev);
+ r = radeon_bo_init(rdev);
if (r)
return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
- if (!rdev->me_fw || !rdev->pfp_fw) {
- r = r600_cp_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
r = r600_pcie_gart_init(rdev);
if (r)
return r;
rdev->accel_working = true;
- r = r600_blit_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failled blitter (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rv770_startup(rdev);
if (r) {
rv770_suspend(rdev);
@@ -1034,12 +1071,12 @@ int rv770_init(struct radeon_device *rdev)
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
@@ -1051,6 +1088,8 @@ void rv770_fini(struct radeon_device *rdev)
rv770_suspend(rdev);
r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
@@ -1059,7 +1098,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_clocks_fini(rdev);
if (rdev->flags & RADEON_IS_AGP)
radeon_agp_fini(rdev);
- radeon_object_fini(rdev);
+ radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index eee52aa92a7..021de44c15a 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -50,7 +50,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index e725cc0b115..4fd1f067d38 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -80,7 +80,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 012ff2e356b..ec5a43e6572 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7a57c..1e138f5bae0 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+ ttm_object.o ttm_lock.o ttm_execbuf_util.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c06252d46..2920f9a279e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,59 @@ static struct attribute ttm_bo_count = {
.mode = S_IRUGO
};
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+ int i;
+
+ for (i = 0; i <= TTM_PL_PRIV5; i++)
+ if (flags & (1 << i)) {
+ *mem_type = i;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+
+ printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
+ printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
+ printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
+ printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
+ printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
+ printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
+ printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
+ printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
+ man->available_caching);
+ printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
+ man->default_caching);
+ if (mem_type != TTM_PL_SYSTEM) {
+ spin_lock(&bdev->glob->lru_lock);
+ drm_mm_debug_table(&man->manager, TTM_PFX);
+ spin_unlock(&bdev->glob->lru_lock);
+ }
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ int i, ret, mem_type;
+
+ printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
+ for (i = 0; i < placement->num_placement; i++) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return;
+ printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i], mem_type);
+ ttm_mem_type_debug(bo->bdev, mem_type);
+ }
+}
+
static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
@@ -117,12 +178,13 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
ret = wait_event_interruptible(bo->event_queue,
atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0))
- return -ERESTART;
+ return ret;
} else {
wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
}
return 0;
}
+EXPORT_SYMBOL(ttm_bo_wait_unreserved);
static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
/*
* Call bo->mutex locked.
*/
-
static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
page_flags | TTM_PAGE_FLAG_USER,
glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL))
+ if (unlikely(bo->ttm == NULL)) {
ret = -ENOMEM;
- break;
+ break;
+ }
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
- struct ttm_mem_reg *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->placement;
-
- *old_mem = *mem;
+ bo->mem = *mem;
mem->mm_node = NULL;
- ttm_flag_masked(&save_flags, mem->placement,
- TTM_PL_MASK_MEMTYPE);
goto moved;
}
@@ -408,6 +464,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
BUG_ON(ret);
if (bo->ttm)
@@ -415,19 +473,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
if (!list_empty(&bo->ddestroy)) {
list_del_init(&bo->ddestroy);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ ++put_count;
}
if (bo->mem.mm_node) {
+ bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
- put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
atomic_set(&bo->reserved, 0);
while (put_count--)
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
return 0;
}
@@ -554,24 +612,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
}
EXPORT_SYMBOL(ttm_bo_unref);
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
- bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ bool no_wait)
{
- int ret = 0;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_reg evict_mem;
- uint32_t proposed_placement;
-
- if (bo->mem.mem_type != mem_type)
- goto out;
+ struct ttm_placement placement;
+ int ret = 0;
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
- if (ret != -ERESTART) {
+ if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
"Failed to expire sync object before "
"buffer eviction.\n");
@@ -584,116 +639,165 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- proposed_placement = bdev->driver->evict_flags(bo);
-
- ret = ttm_bo_mem_space(bo, proposed_placement,
- &evict_mem, interruptible, no_wait);
- if (unlikely(ret != 0 && ret != -ERESTART))
- ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
- &evict_mem, interruptible, no_wait);
-
+ placement.fpfn = 0;
+ placement.lpfn = 0;
+ placement.num_placement = 0;
+ placement.num_busy_placement = 0;
+ bdev->driver->evict_flags(bo, &placement);
+ ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+ no_wait);
if (ret) {
- if (ret != -ERESTART)
+ if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
"Failed to find memory space for "
"buffer 0x%p eviction.\n", bo);
+ ttm_bo_mem_space_debug(bo, &placement);
+ }
goto out;
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait);
if (ret) {
- if (ret != -ERESTART)
+ if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+ spin_lock(&glob->lru_lock);
+ if (evict_mem.mm_node) {
+ evict_mem.mm_node->private = NULL;
+ drm_mm_put_block(evict_mem.mm_node);
+ evict_mem.mm_node = NULL;
+ }
+ spin_unlock(&glob->lru_lock);
goto out;
}
-
- spin_lock(&glob->lru_lock);
- if (evict_mem.mm_node) {
- drm_mm_put_block(evict_mem.mm_node);
- evict_mem.mm_node = NULL;
- }
- spin_unlock(&glob->lru_lock);
bo->evicted = true;
out:
return ret;
}
-/**
- * Repeatedly evict memory from the LRU for @mem_type until we create enough
- * space, or we've evicted everything and there isn't enough space.
- */
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- uint32_t mem_type,
- bool interruptible, bool no_wait)
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+ uint32_t mem_type,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_global *glob = bdev->glob;
- struct drm_mm_node *node;
- struct ttm_buffer_object *entry;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct list_head *lru;
- unsigned long num_pages = mem->num_pages;
- int put_count = 0;
- int ret;
-
-retry_pre_get:
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret != 0))
- return ret;
+ struct ttm_buffer_object *bo;
+ int ret, put_count = 0;
+retry:
spin_lock(&glob->lru_lock);
- do {
- node = drm_mm_search_free(&man->manager, num_pages,
- mem->page_alignment, 1);
- if (node)
- break;
+ if (list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
+ return -EBUSY;
+ }
- lru = &man->lru;
- if (list_empty(lru))
- break;
+ bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+ kref_get(&bo->list_kref);
- entry = list_first_entry(lru, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- ret =
- ttm_bo_reserve_locked(entry, interruptible, no_wait,
- false, 0);
+ if (unlikely(ret == -EBUSY)) {
+ spin_unlock(&glob->lru_lock);
+ if (likely(!no_wait))
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(entry);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
- spin_unlock(&glob->lru_lock);
+ /**
+ * We *need* to retry after releasing the lru lock.
+ */
if (unlikely(ret != 0))
return ret;
+ goto retry;
+ }
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&glob->lru_lock);
- ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
+ BUG_ON(ret != 0);
- ttm_bo_unreserve(entry);
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
- kref_put(&entry->list_kref, ttm_bo_release_list);
- if (ret)
+ ret = ttm_bo_evict(bo, interruptible, no_wait);
+ ttm_bo_unreserve(bo);
+
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ return ret;
+}
+
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ struct drm_mm_node **node)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ unsigned long lpfn;
+ int ret;
+
+ lpfn = placement->lpfn;
+ if (!lpfn)
+ lpfn = man->size;
+ *node = NULL;
+ do {
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret))
return ret;
spin_lock(&glob->lru_lock);
- } while (1);
-
- if (!node) {
+ *node = drm_mm_search_free_in_range(&man->manager,
+ mem->num_pages, mem->page_alignment,
+ placement->fpfn, lpfn, 1);
+ if (unlikely(*node == NULL)) {
+ spin_unlock(&glob->lru_lock);
+ return 0;
+ }
+ *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
spin_unlock(&glob->lru_lock);
- return -ENOMEM;
- }
+ } while (*node == NULL);
+ return 0;
+}
- node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- goto retry_pre_get;
- }
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+ uint32_t mem_type,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct drm_mm_node *node;
+ int ret;
- spin_unlock(&glob->lru_lock);
+ do {
+ ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+ if (unlikely(ret != 0))
+ return ret;
+ if (node)
+ break;
+ spin_lock(&glob->lru_lock);
+ if (list_empty(&man->lru)) {
+ spin_unlock(&glob->lru_lock);
+ break;
+ }
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+ no_wait);
+ if (unlikely(ret != 0))
+ return ret;
+ } while (1);
+ if (node == NULL)
+ return -ENOMEM;
mem->mm_node = node;
mem->mem_type = mem_type;
return 0;
@@ -724,7 +828,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
return result;
}
-
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
bool disallow_fixed,
uint32_t mem_type,
@@ -757,66 +860,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
* space.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bo->glob;
struct ttm_mem_type_manager *man;
-
- uint32_t num_prios = bdev->driver->num_mem_type_prio;
- const uint32_t *prios = bdev->driver->mem_type_prio;
- uint32_t i;
uint32_t mem_type = TTM_PL_SYSTEM;
uint32_t cur_flags = 0;
bool type_found = false;
bool type_ok = false;
- bool has_eagain = false;
+ bool has_erestartsys = false;
struct drm_mm_node *node = NULL;
- int ret;
+ int i, ret;
mem->mm_node = NULL;
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i < placement->num_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type, proposed_placement,
- &cur_flags);
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->placement[i],
+ &cur_flags);
if (!type_ok)
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM)
break;
if (man->has_type && man->use_type) {
type_found = true;
- do {
- ret = drm_mm_pre_get(&man->manager);
- if (unlikely(ret))
- return ret;
-
- spin_lock(&glob->lru_lock);
- node = drm_mm_search_free(&man->manager,
- mem->num_pages,
- mem->page_alignment,
- 1);
- if (unlikely(!node)) {
- spin_unlock(&glob->lru_lock);
- break;
- }
- node = drm_mm_get_block_atomic(node,
- mem->num_pages,
- mem->
- page_alignment);
- spin_unlock(&glob->lru_lock);
- } while (!node);
+ ret = ttm_bo_man_get_node(bo, man, placement, mem,
+ &node);
+ if (unlikely(ret))
+ return ret;
}
if (node)
break;
@@ -826,67 +918,66 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node;
mem->mem_type = mem_type;
mem->placement = cur_flags;
+ if (node)
+ node->private = bo;
return 0;
}
if (!type_found)
return -EINVAL;
- num_prios = bdev->driver->num_mem_busy_prio;
- prios = bdev->driver->mem_busy_prio;
-
- for (i = 0; i < num_prios; ++i) {
- mem_type = prios[i];
+ for (i = 0; i < placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+ &mem_type);
+ if (ret)
+ return ret;
man = &bdev->man[mem_type];
-
if (!man->has_type)
continue;
-
if (!ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
- mem_type,
- proposed_placement, &cur_flags))
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ placement->busy_placement[i],
+ &cur_flags))
continue;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the memory placement flags to the current flags
+ */
+ ttm_flag_masked(&cur_flags, placement->busy_placement[i],
+ ~TTM_PL_MASK_MEMTYPE);
- ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
- interruptible, no_wait);
-
+ ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+ interruptible, no_wait);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
+ mem->mm_node->private = bo;
return 0;
}
-
- if (ret == -ERESTART)
- has_eagain = true;
+ if (ret == -ERESTARTSYS)
+ has_erestartsys = true;
}
-
- ret = (has_eagain) ? -ERESTART : -ENOMEM;
+ ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
{
- int ret = 0;
-
if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
return -EBUSY;
- ret = wait_event_interruptible(bo->event_queue,
- atomic_read(&bo->cpu_writers) == 0);
-
- if (ret == -ERESTARTSYS)
- ret = -ERESTART;
-
- return ret;
+ return wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->cpu_writers) == 0);
}
+EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@@ -899,147 +990,132 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
-
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
spin_unlock(&bo->lock);
-
if (ret)
return ret;
-
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
-
/*
* Determine where to move the buffer.
*/
-
- ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
- interruptible, no_wait);
+ ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
if (ret)
goto out_unlock;
-
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
+ mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
}
return ret;
}
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
- if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
- return 0;
- if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
- return 0;
-
- return 1;
+ int i;
+
+ for (i = 0; i < placement->num_placement; i++) {
+ if ((placement->placement[i] & mem->placement &
+ TTM_PL_MASK_CACHING) &&
+ (placement->placement[i] & mem->placement &
+ TTM_PL_MASK_MEM))
+ return i;
+ }
+ return -1;
}
-int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait)
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait)
{
int ret;
BUG_ON(!atomic_read(&bo->reserved));
- bo->proposed_placement = proposed_placement;
-
- TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
- (unsigned long)proposed_placement,
- (unsigned long)bo->mem.placement);
-
+ /* Check that range is valid */
+ if (placement->lpfn || placement->fpfn)
+ if (placement->fpfn > placement->lpfn ||
+ (placement->lpfn - placement->fpfn) < bo->num_pages)
+ return -EINVAL;
/*
* Check whether we need to move buffer.
*/
-
- if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
- ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
- interruptible, no_wait);
- if (ret) {
- if (ret != -ERESTART)
- printk(KERN_ERR TTM_PFX
- "Failed moving buffer. "
- "Proposed placement 0x%08x\n",
- bo->proposed_placement);
- if (ret == -ENOMEM)
- printk(KERN_ERR TTM_PFX
- "Out of aperture space or "
- "DRM memory quota.\n");
+ ret = ttm_bo_mem_compat(placement, &bo->mem);
+ if (ret < 0) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+ if (ret)
return ret;
- }
+ } else {
+ /*
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+ ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ~TTM_PL_MASK_MEMTYPE);
}
-
/*
* We might need to add a TTM.
*/
-
if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, true);
if (ret)
return ret;
}
- /*
- * Validation has succeeded, move the access and other
- * non-mapping-related flag bits from the proposed flags to
- * the active flags
- */
-
- ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
- ~TTM_PL_MASK_MEMTYPE);
-
return 0;
}
-EXPORT_SYMBOL(ttm_buffer_object_validate);
+EXPORT_SYMBOL(ttm_bo_validate);
-int
-ttm_bo_check_placement(struct ttm_buffer_object *bo,
- uint32_t set_flags, uint32_t clr_flags)
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
{
- uint32_t new_mask = set_flags | clr_flags;
-
- if ((bo->type == ttm_bo_type_user) &&
- (clr_flags & TTM_PL_FLAG_CACHED)) {
- printk(KERN_ERR TTM_PFX
- "User buffers require cache-coherent memory.\n");
- return -EINVAL;
- }
-
- if (!capable(CAP_SYS_ADMIN)) {
- if (new_mask & TTM_PL_FLAG_NO_EVICT) {
- printk(KERN_ERR TTM_PFX "Need to be root to modify"
- " NO_EVICT status.\n");
+ int i;
+
+ if (placement->fpfn || placement->lpfn) {
+ if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
+ printk(KERN_ERR TTM_PFX "Page number range to small "
+ "Need %lu pages, range is [%u, %u]\n",
+ bo->mem.num_pages, placement->fpfn,
+ placement->lpfn);
return -EINVAL;
}
-
- if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
- (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- printk(KERN_ERR TTM_PFX
- "Incompatible memory specification"
- " for NO_EVICT buffer.\n");
- return -EINVAL;
+ }
+ for (i = 0; i < placement->num_placement; i++) {
+ if (!capable(CAP_SYS_ADMIN)) {
+ if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
+ printk(KERN_ERR TTM_PFX "Need to be root to "
+ "modify NO_EVICT status.\n");
+ return -EINVAL;
+ }
+ }
+ }
+ for (i = 0; i < placement->num_busy_placement; i++) {
+ if (!capable(CAP_SYS_ADMIN)) {
+ if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
+ printk(KERN_ERR TTM_PFX "Need to be root to "
+ "modify NO_EVICT status.\n");
+ return -EINVAL;
+ }
}
}
return 0;
}
-int ttm_buffer_object_init(struct ttm_bo_device *bdev,
- struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
- uint32_t flags,
- uint32_t page_alignment,
- unsigned long buffer_start,
- bool interruptible,
- struct file *persistant_swap_storage,
- size_t acc_size,
- void (*destroy) (struct ttm_buffer_object *))
+int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ bool interruptible,
+ struct file *persistant_swap_storage,
+ size_t acc_size,
+ void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
unsigned long num_pages;
@@ -1065,6 +1141,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
bo->glob = bdev->glob;
bo->type = type;
bo->num_pages = num_pages;
+ bo->mem.size = num_pages << PAGE_SHIFT;
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
@@ -1077,29 +1154,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
bo->acc_size = acc_size;
atomic_inc(&bo->glob->bo_count);
- ret = ttm_bo_check_placement(bo, flags, 0ULL);
+ ret = ttm_bo_check_placement(bo, placement);
if (unlikely(ret != 0))
goto out_err;
/*
- * If no caching attributes are set, accept any form of caching.
- */
-
- if ((flags & TTM_PL_MASK_CACHING) == 0)
- flags |= TTM_PL_MASK_CACHING;
-
- /*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
-
if (bo->type == ttm_bo_type_device) {
ret = ttm_bo_setup_vm(bo);
if (ret)
goto out_err;
}
- ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
if (ret)
goto out_err;
@@ -1112,7 +1181,7 @@ out_err:
return ret;
}
-EXPORT_SYMBOL(ttm_buffer_object_init);
+EXPORT_SYMBOL(ttm_bo_init);
static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
unsigned long num_pages)
@@ -1123,19 +1192,19 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
return glob->ttm_bo_size + 2 * page_array_size;
}
-int ttm_buffer_object_create(struct ttm_bo_device *bdev,
- unsigned long size,
- enum ttm_bo_type type,
- uint32_t flags,
- uint32_t page_alignment,
- unsigned long buffer_start,
- bool interruptible,
- struct file *persistant_swap_storage,
- struct ttm_buffer_object **p_bo)
+int ttm_bo_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ bool interruptible,
+ struct file *persistant_swap_storage,
+ struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
- int ret;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ int ret;
size_t acc_size =
ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1150,76 +1219,41 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
return -ENOMEM;
}
- ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
- page_alignment, buffer_start,
- interruptible,
- persistant_swap_storage, acc_size, NULL);
+ ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+ buffer_start, interruptible,
+ persistant_swap_storage, acc_size, NULL);
if (likely(ret == 0))
*p_bo = bo;
return ret;
}
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
- uint32_t mem_type, bool allow_errors)
-{
- int ret;
-
- spin_lock(&bo->lock);
- ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bo->lock);
-
- if (ret && allow_errors)
- goto out;
-
- if (bo->mem.mem_type == mem_type)
- ret = ttm_bo_evict(bo, mem_type, false, false);
-
- if (ret) {
- if (allow_errors) {
- goto out;
- } else {
- ret = 0;
- printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
- }
- }
-
-out:
- return ret;
-}
-
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- struct list_head *head,
- unsigned mem_type, bool allow_errors)
+ unsigned mem_type, bool allow_errors)
{
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
- struct ttm_buffer_object *entry;
int ret;
- int put_count;
/*
* Can't use standard list traversal since we're unlocking.
*/
spin_lock(&glob->lru_lock);
-
- while (!list_empty(head)) {
- entry = list_first_entry(head, struct ttm_buffer_object, lru);
- kref_get(&entry->list_kref);
- ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
- put_count = ttm_bo_del_from_lru(entry);
+ while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- while (put_count--)
- kref_put(&entry->list_kref, ttm_bo_ref_bug);
- BUG_ON(ret);
- ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
- ttm_bo_unreserve(entry);
- kref_put(&entry->list_kref, ttm_bo_release_list);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ printk(KERN_ERR TTM_PFX
+ "Cleanup eviction failed\n");
+ }
+ }
spin_lock(&glob->lru_lock);
}
-
spin_unlock(&glob->lru_lock);
-
return 0;
}
@@ -1246,7 +1280,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0;
if (mem_type > 0) {
- ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+ ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager))
@@ -1279,12 +1313,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0;
}
- return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+ return ttm_bo_force_list_clean(bdev, mem_type, true);
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_offset, unsigned long p_size)
+ unsigned long p_size)
{
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
@@ -1314,7 +1348,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
type);
return ret;
}
- ret = drm_mm_init(&man->manager, p_offset, p_size);
+ ret = drm_mm_init(&man->manager, 0, p_size);
if (ret)
return ret;
}
@@ -1463,7 +1497,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
- ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
if (unlikely(ret != 0))
goto out_no_sys;
@@ -1693,7 +1727,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
ret = wait_event_interruptible
(bo->event_queue, atomic_read(&bo->reserved) == 0);
if (unlikely(ret != 0))
- return -ERESTART;
+ return ret;
} else {
wait_event(bo->event_queue,
atomic_read(&bo->reserved) == 0);
@@ -1722,12 +1756,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
ttm_bo_unreserve(bo);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
if (atomic_dec_and_test(&bo->cpu_writers))
wake_up_all(&bo->event_queue);
}
+EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
/**
* A buffer object shrink method that tries to swap out the first
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 61c5572d2b9..2ecf7d0c64f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
#endif
return tmp;
}
+EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long bus_base,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d04033..668dbe8b8dd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_wait(bo, false, true, false);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
- retval = (ret != -ERESTART) ?
+ retval = (ret != -ERESTARTSYS) ?
VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
goto out_unlock;
}
@@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
return -EFAULT;
driver = bo->bdev->driver;
- if (unlikely(driver->verify_access)) {
+ if (unlikely(!driver->verify_access)) {
ret = -EPERM;
goto out_unref;
}
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
switch (ret) {
case 0:
break;
- case -ERESTART:
- ret = -EINTR;
- goto out_unref;
case -EBUSY:
ret = -EAGAIN;
goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
switch (ret) {
case 0:
break;
- case -ERESTART:
- return -EINTR;
case -EBUSY:
return -EAGAIN;
default:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 00000000000..c285c2902d1
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ entry->reserved = false;
+ ttm_bo_unreserve(bo);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+{
+ struct ttm_validate_buffer *entry;
+ int ret;
+
+retry:
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ entry->reserved = false;
+ ret = ttm_bo_reserve(bo, true, false, true, val_seq);
+ if (ret != 0) {
+ ttm_eu_backoff_reservation(list);
+ if (ret == -EAGAIN) {
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ } else
+ return ret;
+ }
+
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ttm_eu_backoff_reservation(list);
+ ret = ttm_bo_wait_cpu(bo, false);
+ if (ret)
+ return ret;
+ goto retry;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ void *old_sync_obj;
+
+ spin_lock(&bo->lock);
+ old_sync_obj = bo->sync_obj;
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ bo->sync_obj_arg = entry->new_sync_obj_arg;
+ spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
+ entry->reserved = false;
+ if (old_sync_obj)
+ driver->sync_obj_unref(&old_sync_obj);
+ }
+}
+EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 00000000000..f619ebcaa4e
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,311 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_module.h"
+#include <asm/atomic.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#define TTM_WRITE_LOCK_PENDING (1 << 0)
+#define TTM_VT_LOCK_PENDING (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
+#define TTM_VT_LOCK (1 << 3)
+#define TTM_SUSPEND_LOCK (1 << 4)
+
+void ttm_lock_init(struct ttm_lock *lock)
+{
+ spin_lock_init(&lock->lock);
+ init_waitqueue_head(&lock->queue);
+ lock->rw = 0;
+ lock->flags = 0;
+ lock->kill_takers = false;
+ lock->signal = SIGKILL;
+}
+EXPORT_SYMBOL(ttm_lock_init);
+
+void ttm_read_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ if (--lock->rw == 0)
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_read_unlock);
+
+static bool __ttm_read_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ locked = true;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible)
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_read_lock(lock));
+ else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+ return ret;
+}
+EXPORT_SYMBOL(ttm_read_lock);
+
+static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
+{
+ bool block = true;
+
+ *locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw >= 0 && lock->flags == 0) {
+ ++lock->rw;
+ block = false;
+ *locked = true;
+ } else if (lock->flags == 0) {
+ block = false;
+ }
+ spin_unlock(&lock->lock);
+
+ return !block;
+}
+
+int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+ bool locked;
+
+ if (interruptible)
+ ret = wait_event_interruptible
+ (lock->queue, __ttm_read_trylock(lock, &locked));
+ else
+ wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
+
+ if (unlikely(ret != 0)) {
+ BUG_ON(locked);
+ return ret;
+ }
+
+ return (locked) ? 0 : -EBUSY;
+}
+
+void ttm_write_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 0;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(ttm_write_unlock);
+
+static bool __ttm_write_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
+ if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+ lock->rw = -1;
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ locked = true;
+ } else {
+ lock->flags |= TTM_WRITE_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_write_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ }
+ } else
+ wait_event(lock->queue, __ttm_read_lock(lock));
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_write_lock);
+
+void ttm_write_lock_downgrade(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->rw = 1;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+ int ret = 0;
+
+ spin_lock(&lock->lock);
+ if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+ ret = -EINVAL;
+ lock->flags &= ~TTM_VT_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ printk(KERN_INFO TTM_PFX "vt unlock.\n");
+
+ return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+ int ret;
+
+ *p_base = NULL;
+ ret = __ttm_vt_unlock(lock);
+ BUG_ON(ret != 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ lock->flags |= TTM_VT_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_VT_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+ bool interruptible,
+ struct ttm_object_file *tfile)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_vt_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ return ret;
+ }
+ } else
+ wait_event(lock->queue, __ttm_vt_lock(lock));
+
+ /*
+ * Add a base-object, the destructor of which will
+ * make sure the lock is released if the client dies
+ * while holding it.
+ */
+
+ ret = ttm_base_object_init(tfile, &lock->base, false,
+ ttm_lock_type, &ttm_vt_lock_remove, NULL);
+ if (ret)
+ (void)__ttm_vt_unlock(lock);
+ else {
+ lock->vt_holder = tfile;
+ printk(KERN_INFO TTM_PFX "vt lock.\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_vt_lock);
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+ return ttm_ref_object_base_unref(lock->vt_holder,
+ lock->base.hash.key, TTM_REF_USAGE);
+}
+EXPORT_SYMBOL(ttm_vt_unlock);
+
+void ttm_suspend_unlock(struct ttm_lock *lock)
+{
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_SUSPEND_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+}
+
+static bool __ttm_suspend_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+ lock->flags |= TTM_SUSPEND_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+void ttm_suspend_lock(struct ttm_lock *lock)
+{
+ wait_event(lock->queue, __ttm_suspend_lock(lock));
+}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281a6bb..f5245c02b8f 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
- struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ struct ttm_mem_zone *zone;
uint64_t mem;
int ret;
- if (unlikely(!zone))
- return -ENOMEM;
-
if (si->totalhigh == 0)
return 0;
+ zone = kzalloc(sizeof(*zone), GFP_KERNEL);
+ if (unlikely(!zone))
+ return -ENOMEM;
+
mem = si->totalram;
mem *= si->mem_unit;
@@ -322,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
* No special dma32 zone needed.
*/
- if (mem <= ((uint64_t) 1ULL << 32))
+ if (mem <= ((uint64_t) 1ULL << 32)) {
+ kfree(zone);
return 0;
+ }
/*
* Limit max dma32 memory to 4GB for now
@@ -460,6 +463,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
+EXPORT_SYMBOL(ttm_mem_global_free);
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
@@ -533,6 +537,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
+EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
@@ -588,3 +593,4 @@ size_t ttm_round_pot(size_t size)
}
return 0;
}
+EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 00000000000..1099abac824
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_ref_object.c
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+/**
+ * struct ttm_object_file
+ *
+ * @tdev: Pointer to the ttm_object_device.
+ *
+ * @lock: Lock that protects the ref_list list and the
+ * ref_hash hash tables.
+ *
+ * @ref_list: List of ttm_ref_objects to be destroyed at
+ * file release.
+ *
+ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
+ * for fast lookup of ref objects given a base object.
+ */
+
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+struct ttm_object_file {
+ struct ttm_object_device *tdev;
+ rwlock_t lock;
+ struct list_head ref_list;
+ struct drm_open_hash ref_hash[TTM_REF_NUM];
+ struct kref refcount;
+};
+
+/**
+ * struct ttm_object_device
+ *
+ * @object_lock: lock that protects the object_hash hash table.
+ *
+ * @object_hash: hash table for fast lookup of object global names.
+ *
+ * @object_count: Per device object count.
+ *
+ * This is the per-device data structure needed for ttm object management.
+ */
+
+struct ttm_object_device {
+ rwlock_t object_lock;
+ struct drm_open_hash object_hash;
+ atomic_t object_count;
+ struct ttm_mem_global *mem_glob;
+};
+
+/**
+ * struct ttm_ref_object
+ *
+ * @hash: Hash entry for the per-file object reference hash.
+ *
+ * @head: List entry for the per-file list of ref-objects.
+ *
+ * @kref: Ref count.
+ *
+ * @obj: Base object this ref object is referencing.
+ *
+ * @ref_type: Type of ref object.
+ *
+ * This is similar to an idr object, but it also has a hash table entry
+ * that allows lookup with a pointer to the referenced object as a key. In
+ * that way, one can easily detect whether a base object is referenced by
+ * a particular ttm_object_file. It also carries a ref count to avoid creating
+ * multiple ref objects if a ttm_object_file references the same base
+ * object more than once.
+ */
+
+struct ttm_ref_object {
+ struct drm_hash_item hash;
+ struct list_head head;
+ struct kref kref;
+ struct ttm_base_object *obj;
+ enum ttm_ref_type ref_type;
+ struct ttm_object_file *tfile;
+};
+
+static inline struct ttm_object_file *
+ttm_object_file_ref(struct ttm_object_file *tfile)
+{
+ kref_get(&tfile->refcount);
+ return tfile;
+}
+
+static void ttm_object_file_destroy(struct kref *kref)
+{
+ struct ttm_object_file *tfile =
+ container_of(kref, struct ttm_object_file, refcount);
+
+ kfree(tfile);
+}
+
+
+static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
+{
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ kref_put(&tfile->refcount, ttm_object_file_destroy);
+}
+
+
+int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type object_type,
+ void (*refcount_release) (struct ttm_base_object **),
+ void (*ref_obj_release) (struct ttm_base_object *,
+ enum ttm_ref_type ref_type))
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ int ret;
+
+ base->shareable = shareable;
+ base->tfile = ttm_object_file_ref(tfile);
+ base->refcount_release = refcount_release;
+ base->ref_obj_release = ref_obj_release;
+ base->object_type = object_type;
+ write_lock(&tdev->object_lock);
+ kref_init(&base->refcount);
+ ret = drm_ht_just_insert_please(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
+ write_unlock(&tdev->object_lock);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ ttm_base_object_unref(&base);
+
+ return 0;
+out_err1:
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+out_err0:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_base_object_init);
+
+static void ttm_release_base(struct kref *kref)
+{
+ struct ttm_base_object *base =
+ container_of(kref, struct ttm_base_object, refcount);
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ write_unlock(&tdev->object_lock);
+ if (base->refcount_release) {
+ ttm_object_file_unref(&base->tfile);
+ base->refcount_release(&base);
+ }
+ write_lock(&tdev->object_lock);
+}
+
+void ttm_base_object_unref(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_object_device *tdev = base->tfile->tdev;
+
+ *p_base = NULL;
+
+ /*
+ * Need to take the lock here to avoid racing with
+ * users trying to look up the object.
+ */
+
+ write_lock(&tdev->object_lock);
+ (void)kref_put(&base->refcount, &ttm_release_base);
+ write_unlock(&tdev->object_lock);
+}
+EXPORT_SYMBOL(ttm_base_object_unref);
+
+struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+ uint32_t key)
+{
+ struct ttm_object_device *tdev = tfile->tdev;
+ struct ttm_base_object *base;
+ struct drm_hash_item *hash;
+ int ret;
+
+ read_lock(&tdev->object_lock);
+ ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+
+ if (likely(ret == 0)) {
+ base = drm_hash_entry(hash, struct ttm_base_object, hash);
+ kref_get(&base->refcount);
+ }
+ read_unlock(&tdev->object_lock);
+
+ if (unlikely(ret != 0))
+ return NULL;
+
+ if (tfile != base->tfile && !base->shareable) {
+ printk(KERN_ERR TTM_PFX
+ "Attempted access of non-shareable object.\n");
+ ttm_base_object_unref(&base);
+ return NULL;
+ }
+
+ return base;
+}
+EXPORT_SYMBOL(ttm_base_object_lookup);
+
+int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ int ret = -EINVAL;
+
+ if (existed != NULL)
+ *existed = true;
+
+ while (ret == -EINVAL) {
+ read_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, base->hash.key, &hash);
+
+ if (ret == 0) {
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_get(&ref->kref);
+ read_unlock(&tfile->lock);
+ break;
+ }
+
+ read_unlock(&tfile->lock);
+ ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ if (unlikely(ref == NULL)) {
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ return -ENOMEM;
+ }
+
+ ref->hash.key = base->hash.key;
+ ref->obj = base;
+ ref->tfile = tfile;
+ ref->ref_type = ref_type;
+ kref_init(&ref->kref);
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_insert_item(ht, &ref->hash);
+
+ if (likely(ret == 0)) {
+ list_add_tail(&ref->head, &tfile->ref_list);
+ kref_get(&base->refcount);
+ write_unlock(&tfile->lock);
+ if (existed != NULL)
+ *existed = false;
+ break;
+ }
+
+ write_unlock(&tfile->lock);
+ BUG_ON(ret != -EINVAL);
+
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ttm_ref_object_add);
+
+static void ttm_ref_object_release(struct kref *kref)
+{
+ struct ttm_ref_object *ref =
+ container_of(kref, struct ttm_ref_object, kref);
+ struct ttm_base_object *base = ref->obj;
+ struct ttm_object_file *tfile = ref->tfile;
+ struct drm_open_hash *ht;
+ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+
+ ht = &tfile->ref_hash[ref->ref_type];
+ (void)drm_ht_remove_item(ht, &ref->hash);
+ list_del(&ref->head);
+ write_unlock(&tfile->lock);
+
+ if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
+ base->ref_obj_release(base, ref->ref_type);
+
+ ttm_base_object_unref(&ref->obj);
+ ttm_mem_global_free(mem_glob, sizeof(*ref));
+ kfree(ref);
+ write_lock(&tfile->lock);
+}
+
+int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key, enum ttm_ref_type ref_type)
+{
+ struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
+ struct ttm_ref_object *ref;
+ struct drm_hash_item *hash;
+ int ret;
+
+ write_lock(&tfile->lock);
+ ret = drm_ht_find_item(ht, key, &hash);
+ if (unlikely(ret != 0)) {
+ write_unlock(&tfile->lock);
+ return -EINVAL;
+ }
+ ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
+ kref_put(&ref->kref, ttm_ref_object_release);
+ write_unlock(&tfile->lock);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_ref_object_base_unref);
+
+void ttm_object_file_release(struct ttm_object_file **p_tfile)
+{
+ struct ttm_ref_object *ref;
+ struct list_head *list;
+ unsigned int i;
+ struct ttm_object_file *tfile = *p_tfile;
+
+ *p_tfile = NULL;
+ write_lock(&tfile->lock);
+
+ /*
+ * Since we release the lock within the loop, we have to
+ * restart it from the beginning each time.
+ */
+
+ while (!list_empty(&tfile->ref_list)) {
+ list = tfile->ref_list.next;
+ ref = list_entry(list, struct ttm_ref_object, head);
+ ttm_ref_object_release(&ref->kref);
+ }
+
+ for (i = 0; i < TTM_REF_NUM; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ write_unlock(&tfile->lock);
+ ttm_object_file_unref(&tfile);
+}
+EXPORT_SYMBOL(ttm_object_file_release);
+
+struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
+ unsigned int hash_order)
+{
+ struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+ unsigned int i;
+ unsigned int j = 0;
+ int ret;
+
+ if (unlikely(tfile == NULL))
+ return NULL;
+
+ rwlock_init(&tfile->lock);
+ tfile->tdev = tdev;
+ kref_init(&tfile->refcount);
+ INIT_LIST_HEAD(&tfile->ref_list);
+
+ for (i = 0; i < TTM_REF_NUM; ++i) {
+ ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
+ if (ret) {
+ j = i;
+ goto out_err;
+ }
+ }
+
+ return tfile;
+out_err:
+ for (i = 0; i < j; ++i)
+ drm_ht_remove(&tfile->ref_hash[i]);
+
+ kfree(tfile);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_file_init);
+
+struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
+ *mem_glob,
+ unsigned int hash_order)
+{
+ struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(tdev == NULL))
+ return NULL;
+
+ tdev->mem_glob = mem_glob;
+ rwlock_init(&tdev->object_lock);
+ atomic_set(&tdev->object_count, 0);
+ ret = drm_ht_create(&tdev->object_hash, hash_order);
+
+ if (likely(ret == 0))
+ return tdev;
+
+ kfree(tdev);
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_object_device_init);
+
+void ttm_object_device_release(struct ttm_object_device **p_tdev)
+{
+ struct ttm_object_device *tdev = *p_tdev;
+
+ *p_tdev = NULL;
+
+ write_lock(&tdev->object_lock);
+ drm_ht_remove(&tdev->object_hash);
+ write_unlock(&tdev->object_lock);
+
+ kfree(tdev);
+}
+EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f39ce..9c2b1cc5dba 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
ttm->state = tt_unbound;
return 0;
}
+EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index bc2f5184300..7a1b210401e 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -58,7 +58,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .ioctl = drm_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
new file mode 100644
index 00000000000..f20b8bcbef3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -0,0 +1,13 @@
+config DRM_VMWGFX
+ tristate "DRM driver for VMware Virtual GPU"
+ depends on DRM && PCI
+ select FB_DEFERRED_IO
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select DRM_TTM
+ help
+ KMS enabled DRM driver for SVGA2 virtual hardware.
+
+ If unsure say n. The compiled module will be
+ called vmwgfx.ko
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
new file mode 100644
index 00000000000..1a3cb6816d1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -0,0 +1,9 @@
+
+ccflags-y := -Iinclude/drm
+
+vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
+ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ vmwgfx_overlay.o
+
+obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
new file mode 100644
index 00000000000..77cb4533100
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -0,0 +1,1793 @@
+/**********************************************************
+ * Copyright 1998-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga3d_reg.h --
+ *
+ * SVGA 3D hardware definitions
+ */
+
+#ifndef _SVGA3D_REG_H_
+#define _SVGA3D_REG_H_
+
+#include "svga_reg.h"
+
+
+/*
+ * 3D Hardware Version
+ *
+ * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
+ * register. Is set by the host and read by the guest. This lets
+ * us make new guest drivers which are backwards-compatible with old
+ * SVGA hardware revisions. It does not let us support old guest
+ * drivers. Good enough for now.
+ *
+ */
+
+#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
+#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
+
+typedef enum {
+ SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
+ SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
+ SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
+ SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
+ SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
+ SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
+ SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
+} SVGA3dHardwareVersion;
+
+/*
+ * Generic Types
+ */
+
+typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
+#define SVGA3D_NUM_CLIPPLANES 6
+#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
+
+
+/*
+ * Surface formats.
+ *
+ * If you modify this list, be sure to keep GLUtil.c in sync. It
+ * includes the internal format definition of each surface in
+ * GLUtil_ConvertSurfaceFormat, and it contains a table of
+ * human-readable names in GLUtil_GetFormatName.
+ */
+
+typedef enum SVGA3dSurfaceFormat {
+ SVGA3D_FORMAT_INVALID = 0,
+
+ SVGA3D_X8R8G8B8 = 1,
+ SVGA3D_A8R8G8B8 = 2,
+
+ SVGA3D_R5G6B5 = 3,
+ SVGA3D_X1R5G5B5 = 4,
+ SVGA3D_A1R5G5B5 = 5,
+ SVGA3D_A4R4G4B4 = 6,
+
+ SVGA3D_Z_D32 = 7,
+ SVGA3D_Z_D16 = 8,
+ SVGA3D_Z_D24S8 = 9,
+ SVGA3D_Z_D15S1 = 10,
+
+ SVGA3D_LUMINANCE8 = 11,
+ SVGA3D_LUMINANCE4_ALPHA4 = 12,
+ SVGA3D_LUMINANCE16 = 13,
+ SVGA3D_LUMINANCE8_ALPHA8 = 14,
+
+ SVGA3D_DXT1 = 15,
+ SVGA3D_DXT2 = 16,
+ SVGA3D_DXT3 = 17,
+ SVGA3D_DXT4 = 18,
+ SVGA3D_DXT5 = 19,
+
+ SVGA3D_BUMPU8V8 = 20,
+ SVGA3D_BUMPL6V5U5 = 21,
+ SVGA3D_BUMPX8L8V8U8 = 22,
+ SVGA3D_BUMPL8V8U8 = 23,
+
+ SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
+ SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
+
+ SVGA3D_A2R10G10B10 = 26,
+
+ /* signed formats */
+ SVGA3D_V8U8 = 27,
+ SVGA3D_Q8W8V8U8 = 28,
+ SVGA3D_CxV8U8 = 29,
+
+ /* mixed formats */
+ SVGA3D_X8L8V8U8 = 30,
+ SVGA3D_A2W10V10U10 = 31,
+
+ SVGA3D_ALPHA8 = 32,
+
+ /* Single- and dual-component floating point formats */
+ SVGA3D_R_S10E5 = 33,
+ SVGA3D_R_S23E8 = 34,
+ SVGA3D_RG_S10E5 = 35,
+ SVGA3D_RG_S23E8 = 36,
+
+ /*
+ * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
+ * the most efficient format to use when creating new surfaces
+ * expressly for index or vertex data.
+ */
+ SVGA3D_BUFFER = 37,
+
+ SVGA3D_Z_D24X8 = 38,
+
+ SVGA3D_V16U16 = 39,
+
+ SVGA3D_G16R16 = 40,
+ SVGA3D_A16B16G16R16 = 41,
+
+ /* Packed Video formats */
+ SVGA3D_UYVY = 42,
+ SVGA3D_YUY2 = 43,
+
+ SVGA3D_FORMAT_MAX
+} SVGA3dSurfaceFormat;
+
+typedef uint32 SVGA3dColor; /* a, r, g, b */
+
+/*
+ * These match the D3DFORMAT_OP definitions used by Direct3D. We need
+ * them so that we can query the host for what the supported surface
+ * operations are (when we're using the D3D backend, in particular),
+ * and so we can send those operations to the guest.
+ */
+typedef enum {
+ SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
+ SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
+ SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
+ SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
+ SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
+ SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
+ SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
+
+/*
+ * This format can be used as a render target if the current display mode
+ * is the same depth if the alpha channel is ignored. e.g. if the device
+ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
+ * format op list entry for A8R8G8B8 should have this cap.
+ */
+ SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
+
+/*
+ * This format contains DirectDraw support (including Flip). This flag
+ * should not to be set on alpha formats.
+ */
+ SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
+
+/*
+ * The rasterizer can support some level of Direct3D support in this format
+ * and implies that the driver can create a Context in this mode (for some
+ * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
+ * flag must also be set.
+ */
+ SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
+
+/*
+ * This is set for a private format when the driver has put the bpp in
+ * the structure.
+ */
+ SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
+
+/*
+ * Indicates that this format can be converted to any RGB format for which
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ */
+ SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
+
+/*
+ * Indicates that this format can be used to create offscreen plain surfaces.
+ */
+ SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
+
+/*
+ * Indicated that this format can be read as an SRGB texture (meaning that the
+ * sampler will linearize the looked up data)
+ */
+ SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
+
+/*
+ * Indicates that this format can be used in the bumpmap instructions
+ */
+ SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
+
+/*
+ * Indicates that this format can be sampled by the displacement map sampler
+ */
+ SVGA3DFORMAT_OP_DMAP = 0x00020000,
+
+/*
+ * Indicates that this format cannot be used with texture filtering
+ */
+ SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
+
+/*
+ * Indicates that format conversions are supported to this RGB format if
+ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
+ */
+ SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
+
+/*
+ * Indicated that this format can be written as an SRGB target (meaning that the
+ * pixel pipe will DE-linearize data on output to format)
+ */
+ SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
+
+/*
+ * Indicates that this format cannot be used with alpha blending
+ */
+ SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
+
+/*
+ * Indicates that the device can auto-generated sublevels for resources
+ * of this format
+ */
+ SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
+
+/*
+ * Indicates that this format can be used by vertex texture sampler
+ */
+ SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
+
+/*
+ * Indicates that this format supports neither texture coordinate wrap
+ * modes, nor mipmapping
+ */
+ SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
+} SVGA3dFormatOp;
+
+/*
+ * This structure is a conversion of SVGA3DFORMAT_OP_*.
+ * Entries must be located at the same position.
+ */
+typedef union {
+ uint32 value;
+ struct {
+ uint32 texture : 1;
+ uint32 volumeTexture : 1;
+ uint32 cubeTexture : 1;
+ uint32 offscreenRenderTarget : 1;
+ uint32 sameFormatRenderTarget : 1;
+ uint32 unknown1 : 1;
+ uint32 zStencil : 1;
+ uint32 zStencilArbitraryDepth : 1;
+ uint32 sameFormatUpToAlpha : 1;
+ uint32 unknown2 : 1;
+ uint32 displayMode : 1;
+ uint32 acceleration3d : 1;
+ uint32 pixelSize : 1;
+ uint32 convertToARGB : 1;
+ uint32 offscreenPlain : 1;
+ uint32 sRGBRead : 1;
+ uint32 bumpMap : 1;
+ uint32 dmap : 1;
+ uint32 noFilter : 1;
+ uint32 memberOfGroupARGB : 1;
+ uint32 sRGBWrite : 1;
+ uint32 noAlphaBlend : 1;
+ uint32 autoGenMipMap : 1;
+ uint32 vertexTexture : 1;
+ uint32 noTexCoordWrapNorMip : 1;
+ };
+} SVGA3dSurfaceFormatCaps;
+
+/*
+ * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_RS_INVALID = 0,
+ SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
+ SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
+ SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
+ SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
+ SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
+ SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
+ SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
+ SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
+ SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
+ SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
+ SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
+ SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
+ SVGA3D_RS_STENCILREF = 13, /* uint32 */
+ SVGA3D_RS_STENCILMASK = 14, /* uint32 */
+ SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
+ SVGA3D_RS_FOGSTART = 16, /* float */
+ SVGA3D_RS_FOGEND = 17, /* float */
+ SVGA3D_RS_FOGDENSITY = 18, /* float */
+ SVGA3D_RS_POINTSIZE = 19, /* float */
+ SVGA3D_RS_POINTSIZEMIN = 20, /* float */
+ SVGA3D_RS_POINTSIZEMAX = 21, /* float */
+ SVGA3D_RS_POINTSCALE_A = 22, /* float */
+ SVGA3D_RS_POINTSCALE_B = 23, /* float */
+ SVGA3D_RS_POINTSCALE_C = 24, /* float */
+ SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
+ SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
+ SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
+ SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
+ SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
+ SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
+ SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
+ SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
+ SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
+ SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
+ SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
+ SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
+ SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
+ SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
+ SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
+ SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
+ SVGA3D_RS_ZBIAS = 45, /* float */
+ SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
+ SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
+ SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
+ SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
+ SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
+ SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
+ SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
+ SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
+ SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
+ SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
+ SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
+ SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
+ SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
+ SVGA3D_RS_DEPTHBIAS = 64, /* float */
+
+
+ /*
+ * Output Gamma Level
+ *
+ * Output gamma effects the gamma curve of colors that are output from the
+ * rendering pipeline. A value of 1.0 specifies a linear color space. If the
+ * value is <= 0.0, gamma correction is ignored and linear color space is
+ * used.
+ */
+
+ SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
+ SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
+ SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
+ SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
+ SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
+ SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
+ SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
+ SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
+ SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
+ SVGA3D_RS_TWEENFACTOR = 88, /* float */
+ SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
+ SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
+ SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
+ SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
+ SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
+ SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
+ SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
+ SVGA3D_RS_MAX
+} SVGA3dRenderStateName;
+
+typedef enum {
+ SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
+ SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
+ SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
+} SVGA3dVertexMaterial;
+
+typedef enum {
+ SVGA3D_FILLMODE_INVALID = 0,
+ SVGA3D_FILLMODE_POINT = 1,
+ SVGA3D_FILLMODE_LINE = 2,
+ SVGA3D_FILLMODE_FILL = 3,
+ SVGA3D_FILLMODE_MAX
+} SVGA3dFillModeType;
+
+
+typedef
+union {
+ struct {
+ uint16 mode; /* SVGA3dFillModeType */
+ uint16 face; /* SVGA3dFace */
+ };
+ uint32 uintValue;
+} SVGA3dFillMode;
+
+typedef enum {
+ SVGA3D_SHADEMODE_INVALID = 0,
+ SVGA3D_SHADEMODE_FLAT = 1,
+ SVGA3D_SHADEMODE_SMOOTH = 2,
+ SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
+ SVGA3D_SHADEMODE_MAX
+} SVGA3dShadeMode;
+
+typedef
+union {
+ struct {
+ uint16 repeat;
+ uint16 pattern;
+ };
+ uint32 uintValue;
+} SVGA3dLinePattern;
+
+typedef enum {
+ SVGA3D_BLENDOP_INVALID = 0,
+ SVGA3D_BLENDOP_ZERO = 1,
+ SVGA3D_BLENDOP_ONE = 2,
+ SVGA3D_BLENDOP_SRCCOLOR = 3,
+ SVGA3D_BLENDOP_INVSRCCOLOR = 4,
+ SVGA3D_BLENDOP_SRCALPHA = 5,
+ SVGA3D_BLENDOP_INVSRCALPHA = 6,
+ SVGA3D_BLENDOP_DESTALPHA = 7,
+ SVGA3D_BLENDOP_INVDESTALPHA = 8,
+ SVGA3D_BLENDOP_DESTCOLOR = 9,
+ SVGA3D_BLENDOP_INVDESTCOLOR = 10,
+ SVGA3D_BLENDOP_SRCALPHASAT = 11,
+ SVGA3D_BLENDOP_BLENDFACTOR = 12,
+ SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
+ SVGA3D_BLENDOP_MAX
+} SVGA3dBlendOp;
+
+typedef enum {
+ SVGA3D_BLENDEQ_INVALID = 0,
+ SVGA3D_BLENDEQ_ADD = 1,
+ SVGA3D_BLENDEQ_SUBTRACT = 2,
+ SVGA3D_BLENDEQ_REVSUBTRACT = 3,
+ SVGA3D_BLENDEQ_MINIMUM = 4,
+ SVGA3D_BLENDEQ_MAXIMUM = 5,
+ SVGA3D_BLENDEQ_MAX
+} SVGA3dBlendEquation;
+
+typedef enum {
+ SVGA3D_FRONTWINDING_INVALID = 0,
+ SVGA3D_FRONTWINDING_CW = 1,
+ SVGA3D_FRONTWINDING_CCW = 2,
+ SVGA3D_FRONTWINDING_MAX
+} SVGA3dFrontWinding;
+
+typedef enum {
+ SVGA3D_FACE_INVALID = 0,
+ SVGA3D_FACE_NONE = 1,
+ SVGA3D_FACE_FRONT = 2,
+ SVGA3D_FACE_BACK = 3,
+ SVGA3D_FACE_FRONT_BACK = 4,
+ SVGA3D_FACE_MAX
+} SVGA3dFace;
+
+/*
+ * The order and the values should not be changed
+ */
+
+typedef enum {
+ SVGA3D_CMP_INVALID = 0,
+ SVGA3D_CMP_NEVER = 1,
+ SVGA3D_CMP_LESS = 2,
+ SVGA3D_CMP_EQUAL = 3,
+ SVGA3D_CMP_LESSEQUAL = 4,
+ SVGA3D_CMP_GREATER = 5,
+ SVGA3D_CMP_NOTEQUAL = 6,
+ SVGA3D_CMP_GREATEREQUAL = 7,
+ SVGA3D_CMP_ALWAYS = 8,
+ SVGA3D_CMP_MAX
+} SVGA3dCmpFunc;
+
+/*
+ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
+ * the fog factor to be specified in the alpha component of the specular
+ * (a.k.a. secondary) vertex color.
+ */
+typedef enum {
+ SVGA3D_FOGFUNC_INVALID = 0,
+ SVGA3D_FOGFUNC_EXP = 1,
+ SVGA3D_FOGFUNC_EXP2 = 2,
+ SVGA3D_FOGFUNC_LINEAR = 3,
+ SVGA3D_FOGFUNC_PER_VERTEX = 4
+} SVGA3dFogFunction;
+
+/*
+ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
+ * or per-pixel basis.
+ */
+typedef enum {
+ SVGA3D_FOGTYPE_INVALID = 0,
+ SVGA3D_FOGTYPE_VERTEX = 1,
+ SVGA3D_FOGTYPE_PIXEL = 2,
+ SVGA3D_FOGTYPE_MAX = 3
+} SVGA3dFogType;
+
+/*
+ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
+ * computed using the eye Z value of each pixel (or vertex), whereas range-
+ * based fog is computed using the actual distance (range) to the eye.
+ */
+typedef enum {
+ SVGA3D_FOGBASE_INVALID = 0,
+ SVGA3D_FOGBASE_DEPTHBASED = 1,
+ SVGA3D_FOGBASE_RANGEBASED = 2,
+ SVGA3D_FOGBASE_MAX = 3
+} SVGA3dFogBase;
+
+typedef enum {
+ SVGA3D_STENCILOP_INVALID = 0,
+ SVGA3D_STENCILOP_KEEP = 1,
+ SVGA3D_STENCILOP_ZERO = 2,
+ SVGA3D_STENCILOP_REPLACE = 3,
+ SVGA3D_STENCILOP_INCRSAT = 4,
+ SVGA3D_STENCILOP_DECRSAT = 5,
+ SVGA3D_STENCILOP_INVERT = 6,
+ SVGA3D_STENCILOP_INCR = 7,
+ SVGA3D_STENCILOP_DECR = 8,
+ SVGA3D_STENCILOP_MAX
+} SVGA3dStencilOp;
+
+typedef enum {
+ SVGA3D_CLIPPLANE_0 = (1 << 0),
+ SVGA3D_CLIPPLANE_1 = (1 << 1),
+ SVGA3D_CLIPPLANE_2 = (1 << 2),
+ SVGA3D_CLIPPLANE_3 = (1 << 3),
+ SVGA3D_CLIPPLANE_4 = (1 << 4),
+ SVGA3D_CLIPPLANE_5 = (1 << 5),
+} SVGA3dClipPlanes;
+
+typedef enum {
+ SVGA3D_CLEAR_COLOR = 0x1,
+ SVGA3D_CLEAR_DEPTH = 0x2,
+ SVGA3D_CLEAR_STENCIL = 0x4
+} SVGA3dClearFlag;
+
+typedef enum {
+ SVGA3D_RT_DEPTH = 0,
+ SVGA3D_RT_STENCIL = 1,
+ SVGA3D_RT_COLOR0 = 2,
+ SVGA3D_RT_COLOR1 = 3,
+ SVGA3D_RT_COLOR2 = 4,
+ SVGA3D_RT_COLOR3 = 5,
+ SVGA3D_RT_COLOR4 = 6,
+ SVGA3D_RT_COLOR5 = 7,
+ SVGA3D_RT_COLOR6 = 8,
+ SVGA3D_RT_COLOR7 = 9,
+ SVGA3D_RT_MAX,
+ SVGA3D_RT_INVALID = ((uint32)-1),
+} SVGA3dRenderTargetType;
+
+#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
+
+typedef
+union {
+ struct {
+ uint32 red : 1;
+ uint32 green : 1;
+ uint32 blue : 1;
+ uint32 alpha : 1;
+ };
+ uint32 uintValue;
+} SVGA3dColorMask;
+
+typedef enum {
+ SVGA3D_VBLEND_DISABLE = 0,
+ SVGA3D_VBLEND_1WEIGHT = 1,
+ SVGA3D_VBLEND_2WEIGHT = 2,
+ SVGA3D_VBLEND_3WEIGHT = 3,
+} SVGA3dVertexBlendFlags;
+
+typedef enum {
+ SVGA3D_WRAPCOORD_0 = 1 << 0,
+ SVGA3D_WRAPCOORD_1 = 1 << 1,
+ SVGA3D_WRAPCOORD_2 = 1 << 2,
+ SVGA3D_WRAPCOORD_3 = 1 << 3,
+ SVGA3D_WRAPCOORD_ALL = 0xF,
+} SVGA3dWrapFlags;
+
+/*
+ * SVGA_3D_CMD_TEXTURESTATE Types. All value types
+ * must fit in a uint32.
+ */
+
+typedef enum {
+ SVGA3D_TS_INVALID = 0,
+ SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
+ SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
+ SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
+ SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
+ SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
+ SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
+ SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
+ SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
+ SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
+ SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
+ SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
+ SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
+ SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
+ SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
+ SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
+ SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
+ SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
+ SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
+ SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
+
+
+ /*
+ * Sampler Gamma Level
+ *
+ * Sampler gamma effects the color of samples taken from the sampler. A
+ * value of 1.0 will produce linear samples. If the value is <= 0.0 the
+ * gamma value is ignored and a linear space is used.
+ */
+
+ SVGA3D_TS_GAMMA = 25, /* float */
+ SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
+ SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
+ SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
+ SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
+ SVGA3D_TS_MAX
+} SVGA3dTextureStateName;
+
+typedef enum {
+ SVGA3D_TC_INVALID = 0,
+ SVGA3D_TC_DISABLE = 1,
+ SVGA3D_TC_SELECTARG1 = 2,
+ SVGA3D_TC_SELECTARG2 = 3,
+ SVGA3D_TC_MODULATE = 4,
+ SVGA3D_TC_ADD = 5,
+ SVGA3D_TC_ADDSIGNED = 6,
+ SVGA3D_TC_SUBTRACT = 7,
+ SVGA3D_TC_BLENDTEXTUREALPHA = 8,
+ SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
+ SVGA3D_TC_BLENDCURRENTALPHA = 10,
+ SVGA3D_TC_BLENDFACTORALPHA = 11,
+ SVGA3D_TC_MODULATE2X = 12,
+ SVGA3D_TC_MODULATE4X = 13,
+ SVGA3D_TC_DSDT = 14,
+ SVGA3D_TC_DOTPRODUCT3 = 15,
+ SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
+ SVGA3D_TC_ADDSIGNED2X = 17,
+ SVGA3D_TC_ADDSMOOTH = 18,
+ SVGA3D_TC_PREMODULATE = 19,
+ SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
+ SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
+ SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
+ SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
+ SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
+ SVGA3D_TC_MULTIPLYADD = 25,
+ SVGA3D_TC_LERP = 26,
+ SVGA3D_TC_MAX
+} SVGA3dTextureCombiner;
+
+#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
+
+typedef enum {
+ SVGA3D_TEX_ADDRESS_INVALID = 0,
+ SVGA3D_TEX_ADDRESS_WRAP = 1,
+ SVGA3D_TEX_ADDRESS_MIRROR = 2,
+ SVGA3D_TEX_ADDRESS_CLAMP = 3,
+ SVGA3D_TEX_ADDRESS_BORDER = 4,
+ SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
+ SVGA3D_TEX_ADDRESS_EDGE = 6,
+ SVGA3D_TEX_ADDRESS_MAX
+} SVGA3dTextureAddress;
+
+/*
+ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
+ * disabled, and the rasterizer should use the magnification filter instead.
+ */
+typedef enum {
+ SVGA3D_TEX_FILTER_NONE = 0,
+ SVGA3D_TEX_FILTER_NEAREST = 1,
+ SVGA3D_TEX_FILTER_LINEAR = 2,
+ SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
+ SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
+ SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
+ SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
+ SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
+ SVGA3D_TEX_FILTER_MAX
+} SVGA3dTextureFilter;
+
+typedef enum {
+ SVGA3D_TEX_TRANSFORM_OFF = 0,
+ SVGA3D_TEX_TRANSFORM_S = (1 << 0),
+ SVGA3D_TEX_TRANSFORM_T = (1 << 1),
+ SVGA3D_TEX_TRANSFORM_R = (1 << 2),
+ SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
+ SVGA3D_TEX_PROJECTED = (1 << 15),
+} SVGA3dTexTransformFlags;
+
+typedef enum {
+ SVGA3D_TEXCOORD_GEN_OFF = 0,
+ SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
+ SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
+ SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
+ SVGA3D_TEXCOORD_GEN_SPHERE = 4,
+ SVGA3D_TEXCOORD_GEN_MAX
+} SVGA3dTextureCoordGen;
+
+/*
+ * Texture argument constants for texture combiner
+ */
+typedef enum {
+ SVGA3D_TA_INVALID = 0,
+ SVGA3D_TA_CONSTANT = 1,
+ SVGA3D_TA_PREVIOUS = 2,
+ SVGA3D_TA_DIFFUSE = 3,
+ SVGA3D_TA_TEXTURE = 4,
+ SVGA3D_TA_SPECULAR = 5,
+ SVGA3D_TA_MAX
+} SVGA3dTextureArgData;
+
+#define SVGA3D_TM_MASK_LEN 4
+
+/* Modifiers for texture argument constants defined above. */
+typedef enum {
+ SVGA3D_TM_NONE = 0,
+ SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
+ SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
+} SVGA3dTextureArgModifier;
+
+#define SVGA3D_INVALID_ID ((uint32)-1)
+#define SVGA3D_MAX_CLIP_PLANES 6
+
+/*
+ * This is the limit to the number of fixed-function texture
+ * transforms and texture coordinates we can support. It does *not*
+ * correspond to the number of texture image units (samplers) we
+ * support!
+ */
+#define SVGA3D_MAX_TEXTURE_COORDS 8
+
+/*
+ * Vertex declarations
+ *
+ * Notes:
+ *
+ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
+ * draw with any POSITIONT vertex arrays, the programmable vertex
+ * pipeline will be implicitly disabled. Drawing will take place as if
+ * no vertex shader was bound.
+ */
+
+typedef enum {
+ SVGA3D_DECLUSAGE_POSITION = 0,
+ SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
+ SVGA3D_DECLUSAGE_BLENDINDICES, // 2
+ SVGA3D_DECLUSAGE_NORMAL, // 3
+ SVGA3D_DECLUSAGE_PSIZE, // 4
+ SVGA3D_DECLUSAGE_TEXCOORD, // 5
+ SVGA3D_DECLUSAGE_TANGENT, // 6
+ SVGA3D_DECLUSAGE_BINORMAL, // 7
+ SVGA3D_DECLUSAGE_TESSFACTOR, // 8
+ SVGA3D_DECLUSAGE_POSITIONT, // 9
+ SVGA3D_DECLUSAGE_COLOR, // 10
+ SVGA3D_DECLUSAGE_FOG, // 11
+ SVGA3D_DECLUSAGE_DEPTH, // 12
+ SVGA3D_DECLUSAGE_SAMPLE, // 13
+ SVGA3D_DECLUSAGE_MAX
+} SVGA3dDeclUsage;
+
+typedef enum {
+ SVGA3D_DECLMETHOD_DEFAULT = 0,
+ SVGA3D_DECLMETHOD_PARTIALU,
+ SVGA3D_DECLMETHOD_PARTIALV,
+ SVGA3D_DECLMETHOD_CROSSUV, // Normal
+ SVGA3D_DECLMETHOD_UV,
+ SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
+ SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
+} SVGA3dDeclMethod;
+
+typedef enum {
+ SVGA3D_DECLTYPE_FLOAT1 = 0,
+ SVGA3D_DECLTYPE_FLOAT2 = 1,
+ SVGA3D_DECLTYPE_FLOAT3 = 2,
+ SVGA3D_DECLTYPE_FLOAT4 = 3,
+ SVGA3D_DECLTYPE_D3DCOLOR = 4,
+ SVGA3D_DECLTYPE_UBYTE4 = 5,
+ SVGA3D_DECLTYPE_SHORT2 = 6,
+ SVGA3D_DECLTYPE_SHORT4 = 7,
+ SVGA3D_DECLTYPE_UBYTE4N = 8,
+ SVGA3D_DECLTYPE_SHORT2N = 9,
+ SVGA3D_DECLTYPE_SHORT4N = 10,
+ SVGA3D_DECLTYPE_USHORT2N = 11,
+ SVGA3D_DECLTYPE_USHORT4N = 12,
+ SVGA3D_DECLTYPE_UDEC3 = 13,
+ SVGA3D_DECLTYPE_DEC3N = 14,
+ SVGA3D_DECLTYPE_FLOAT16_2 = 15,
+ SVGA3D_DECLTYPE_FLOAT16_4 = 16,
+ SVGA3D_DECLTYPE_MAX,
+} SVGA3dDeclType;
+
+/*
+ * This structure is used for the divisor for geometry instancing;
+ * it's a direct translation of the Direct3D equivalent.
+ */
+typedef union {
+ struct {
+ /*
+ * For index data, this number represents the number of instances to draw.
+ * For instance data, this number represents the number of
+ * instances/vertex in this stream
+ */
+ uint32 count : 30;
+
+ /*
+ * This is 1 if this is supposed to be the data that is repeated for
+ * every instance.
+ */
+ uint32 indexedData : 1;
+
+ /*
+ * This is 1 if this is supposed to be the per-instance data.
+ */
+ uint32 instanceData : 1;
+ };
+
+ uint32 value;
+} SVGA3dVertexDivisor;
+
+typedef enum {
+ SVGA3D_PRIMITIVE_INVALID = 0,
+ SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
+ SVGA3D_PRIMITIVE_POINTLIST = 2,
+ SVGA3D_PRIMITIVE_LINELIST = 3,
+ SVGA3D_PRIMITIVE_LINESTRIP = 4,
+ SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
+ SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
+ SVGA3D_PRIMITIVE_MAX
+} SVGA3dPrimitiveType;
+
+typedef enum {
+ SVGA3D_COORDINATE_INVALID = 0,
+ SVGA3D_COORDINATE_LEFTHANDED = 1,
+ SVGA3D_COORDINATE_RIGHTHANDED = 2,
+ SVGA3D_COORDINATE_MAX
+} SVGA3dCoordinateType;
+
+typedef enum {
+ SVGA3D_TRANSFORM_INVALID = 0,
+ SVGA3D_TRANSFORM_WORLD = 1,
+ SVGA3D_TRANSFORM_VIEW = 2,
+ SVGA3D_TRANSFORM_PROJECTION = 3,
+ SVGA3D_TRANSFORM_TEXTURE0 = 4,
+ SVGA3D_TRANSFORM_TEXTURE1 = 5,
+ SVGA3D_TRANSFORM_TEXTURE2 = 6,
+ SVGA3D_TRANSFORM_TEXTURE3 = 7,
+ SVGA3D_TRANSFORM_TEXTURE4 = 8,
+ SVGA3D_TRANSFORM_TEXTURE5 = 9,
+ SVGA3D_TRANSFORM_TEXTURE6 = 10,
+ SVGA3D_TRANSFORM_TEXTURE7 = 11,
+ SVGA3D_TRANSFORM_WORLD1 = 12,
+ SVGA3D_TRANSFORM_WORLD2 = 13,
+ SVGA3D_TRANSFORM_WORLD3 = 14,
+ SVGA3D_TRANSFORM_MAX
+} SVGA3dTransformType;
+
+typedef enum {
+ SVGA3D_LIGHTTYPE_INVALID = 0,
+ SVGA3D_LIGHTTYPE_POINT = 1,
+ SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
+ SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
+ SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
+ SVGA3D_LIGHTTYPE_MAX
+} SVGA3dLightType;
+
+typedef enum {
+ SVGA3D_CUBEFACE_POSX = 0,
+ SVGA3D_CUBEFACE_NEGX = 1,
+ SVGA3D_CUBEFACE_POSY = 2,
+ SVGA3D_CUBEFACE_NEGY = 3,
+ SVGA3D_CUBEFACE_POSZ = 4,
+ SVGA3D_CUBEFACE_NEGZ = 5,
+} SVGA3dCubeFace;
+
+typedef enum {
+ SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
+ SVGA3D_SHADERTYPE_VS = 1,
+ SVGA3D_SHADERTYPE_PS = 2,
+ SVGA3D_SHADERTYPE_MAX
+} SVGA3dShaderType;
+
+typedef enum {
+ SVGA3D_CONST_TYPE_FLOAT = 0,
+ SVGA3D_CONST_TYPE_INT = 1,
+ SVGA3D_CONST_TYPE_BOOL = 2,
+} SVGA3dShaderConstType;
+
+#define SVGA3D_MAX_SURFACE_FACES 6
+
+typedef enum {
+ SVGA3D_STRETCH_BLT_POINT = 0,
+ SVGA3D_STRETCH_BLT_LINEAR = 1,
+ SVGA3D_STRETCH_BLT_MAX
+} SVGA3dStretchBltMode;
+
+typedef enum {
+ SVGA3D_QUERYTYPE_OCCLUSION = 0,
+ SVGA3D_QUERYTYPE_MAX
+} SVGA3dQueryType;
+
+typedef enum {
+ SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
+ SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
+ SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
+ SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
+} SVGA3dQueryState;
+
+typedef enum {
+ SVGA3D_WRITE_HOST_VRAM = 1,
+ SVGA3D_READ_HOST_VRAM = 2,
+} SVGA3dTransferType;
+
+/*
+ * The maximum number vertex arrays we're guaranteed to support in
+ * SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_VERTEX_ARRAYS 32
+
+/*
+ * Identifiers for commands in the command FIFO.
+ *
+ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
+ * the SVGA3D protocol and remain reserved; they should not be used in the
+ * future.
+ *
+ * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * current SVGA3D protocol.
+ *
+ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * and up.
+ */
+
+#define SVGA_3D_CMD_LEGACY_BASE 1000
+#define SVGA_3D_CMD_BASE 1040
+
+#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
+#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
+#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
+#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
+#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
+#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
+#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
+#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
+#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
+#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
+#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
+#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
+#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
+#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
+#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
+#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
+#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
+#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
+#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
+#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
+#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
+#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
+#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
+#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
+#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
+#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
+#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
+#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
+#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
+#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
+#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
+
+#define SVGA_3D_CMD_FUTURE_MAX 2000
+
+/*
+ * Common substructures used in multiple FIFO commands:
+ */
+
+typedef struct {
+ union {
+ struct {
+ uint16 function; // SVGA3dFogFunction
+ uint8 type; // SVGA3dFogType
+ uint8 base; // SVGA3dFogBase
+ };
+ uint32 uintValue;
+ };
+} SVGA3dFogMode;
+
+/*
+ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
+ * is a surface ID as well as face/mipmap indices.
+ */
+
+typedef
+struct SVGA3dSurfaceImageId {
+ uint32 sid;
+ uint32 face;
+ uint32 mipmap;
+} SVGA3dSurfaceImageId;
+
+typedef
+struct SVGA3dGuestImage {
+ SVGAGuestPtr ptr;
+
+ /*
+ * A note on interpretation of pitch: This value of pitch is the
+ * number of bytes between vertically adjacent image
+ * blocks. Normally this is the number of bytes between the first
+ * pixel of two adjacent scanlines. With compressed textures,
+ * however, this may represent the number of bytes between
+ * compression blocks rather than between rows of pixels.
+ *
+ * XXX: Compressed textures currently must be tightly packed in guest memory.
+ *
+ * If the image is 1-dimensional, pitch is ignored.
+ *
+ * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+ * assuming each row of blocks is tightly packed.
+ */
+ uint32 pitch;
+} SVGA3dGuestImage;
+
+
+/*
+ * FIFO command format definitions:
+ */
+
+/*
+ * The data size header following cmdNum for every 3d command
+ */
+typedef
+struct {
+ uint32 id;
+ uint32 size;
+} SVGA3dCmdHeader;
+
+/*
+ * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
+ * optional mipmaps and cube faces.
+ */
+
+typedef
+struct {
+ uint32 width;
+ uint32 height;
+ uint32 depth;
+} SVGA3dSize;
+
+typedef enum {
+ SVGA3D_SURFACE_CUBEMAP = (1 << 0),
+ SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
+ SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
+ SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
+ SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
+ SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
+ SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
+ SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
+ SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
+} SVGA3dSurfaceFlags;
+
+typedef
+struct {
+ uint32 numMipLevels;
+} SVGA3dSurfaceFace;
+
+typedef
+struct {
+ uint32 sid;
+ SVGA3dSurfaceFlags surfaceFlags;
+ SVGA3dSurfaceFormat format;
+ SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
+ /*
+ * Followed by an SVGA3dSize structure for each mip level in each face.
+ *
+ * A note on surface sizes: Sizes are always specified in pixels,
+ * even if the true surface size is not a multiple of the minimum
+ * block size of the surface's format. For example, a 3x3x1 DXT1
+ * compressed texture would actually be stored as a 4x4x1 image in
+ * memory.
+ */
+} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
+
+typedef
+struct {
+ uint32 sid;
+} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
+
+typedef
+struct {
+ uint32 cid;
+} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dClearFlag clearFlag;
+ uint32 color;
+ float depth;
+ uint32 stencil;
+ /* Followed by variable number of SVGA3dRect structures */
+} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
+
+typedef
+struct SVGA3dCopyRect {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+ uint32 srcx;
+ uint32 srcy;
+} SVGA3dCopyRect;
+
+typedef
+struct SVGA3dCopyBox {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+ uint32 srcx;
+ uint32 srcy;
+ uint32 srcz;
+} SVGA3dCopyBox;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 w;
+ uint32 h;
+} SVGA3dRect;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+ uint32 w;
+ uint32 h;
+ uint32 d;
+} SVGA3dBox;
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 z;
+} SVGA3dPoint;
+
+typedef
+struct {
+ SVGA3dLightType type;
+ SVGA3dBool inWorldSpace;
+ float diffuse[4];
+ float specular[4];
+ float ambient[4];
+ float position[4];
+ float direction[4];
+ float range;
+ float falloff;
+ float attenuation0;
+ float attenuation1;
+ float attenuation2;
+ float theta;
+ float phi;
+} SVGA3dLightData;
+
+typedef
+struct {
+ uint32 sid;
+ /* Followed by variable number of SVGA3dCopyRect structures */
+} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
+
+typedef
+struct {
+ SVGA3dRenderStateName state;
+ union {
+ uint32 uintValue;
+ float floatValue;
+ };
+} SVGA3dRenderState;
+
+typedef
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dRenderState structures */
+} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRenderTargetType type;
+ SVGA3dSurfaceImageId target;
+} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ /* Followed by variable number of SVGA3dCopyBox structures */
+} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId src;
+ SVGA3dSurfaceImageId dest;
+ SVGA3dBox boxSrc;
+ SVGA3dBox boxDest;
+ SVGA3dStretchBltMode mode;
+} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
+
+typedef
+struct {
+ /*
+ * If the discard flag is present in a surface DMA operation, the host may
+ * discard the contents of the current mipmap level and face of the target
+ * surface before applying the surface DMA contents.
+ */
+ uint32 discard : 1;
+
+ /*
+ * If the unsynchronized flag is present, the host may perform this upload
+ * without syncing to pending reads on this surface.
+ */
+ uint32 unsynchronized : 1;
+
+ /*
+ * Guests *MUST* set the reserved bits to 0 before submitting the command
+ * suffix as future flags may occupy these bits.
+ */
+ uint32 reserved : 30;
+} SVGA3dSurfaceDMAFlags;
+
+typedef
+struct {
+ SVGA3dGuestImage guest;
+ SVGA3dSurfaceImageId host;
+ SVGA3dTransferType transfer;
+ /*
+ * Followed by variable number of SVGA3dCopyBox structures. For consistency
+ * in all clipping logic and coordinate translation, we define the
+ * "source" in each copyBox as the guest image and the
+ * "destination" as the host image, regardless of transfer
+ * direction.
+ *
+ * For efficiency, the SVGA3D device is free to copy more data than
+ * specified. For example, it may round copy boxes outwards such
+ * that they lie on particular alignment boundaries.
+ */
+} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
+
+/*
+ * SVGA3dCmdSurfaceDMASuffix --
+ *
+ * This is a command suffix that will appear after a SurfaceDMA command in
+ * the FIFO. It contains some extra information that hosts may use to
+ * optimize performance or protect the guest. This suffix exists to preserve
+ * backwards compatibility while also allowing for new functionality to be
+ * implemented.
+ */
+
+typedef
+struct {
+ uint32 suffixSize;
+
+ /*
+ * The maximum offset is used to determine the maximum offset from the
+ * guestPtr base address that will be accessed or written to during this
+ * surfaceDMA. If the suffix is supported, the host will respect this
+ * boundary while performing surface DMAs.
+ *
+ * Defaults to MAX_UINT32
+ */
+ uint32 maximumOffset;
+
+ /*
+ * A set of flags that describes optimizations that the host may perform
+ * while performing this surface DMA operation. The guest should never rely
+ * on behaviour that is different when these flags are set for correctness.
+ *
+ * Defaults to 0
+ */
+ SVGA3dSurfaceDMAFlags flags;
+} SVGA3dCmdSurfaceDMASuffix;
+
+/*
+ * SVGA_3D_CMD_DRAW_PRIMITIVES --
+ *
+ * This command is the SVGA3D device's generic drawing entry point.
+ * It can draw multiple ranges of primitives, optionally using an
+ * index buffer, using an arbitrary collection of vertex buffers.
+ *
+ * Each SVGA3dVertexDecl defines a distinct vertex array to bind
+ * during this draw call. The declarations specify which surface
+ * the vertex data lives in, what that vertex data is used for,
+ * and how to interpret it.
+ *
+ * Each SVGA3dPrimitiveRange defines a collection of primitives
+ * to render using the same vertex arrays. An index buffer is
+ * optional.
+ */
+
+typedef
+struct {
+ /*
+ * A range hint is an optional specification for the range of indices
+ * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
+ * that the entire array will be used.
+ *
+ * These are only hints. The SVGA3D device may use them for
+ * performance optimization if possible, but it's also allowed to
+ * ignore these values.
+ */
+ uint32 first;
+ uint32 last;
+} SVGA3dArrayRangeHint;
+
+typedef
+struct {
+ /*
+ * Define the origin and shape of a vertex or index array. Both
+ * 'offset' and 'stride' are in bytes. The provided surface will be
+ * reinterpreted as a flat array of bytes in the same format used
+ * by surface DMA operations. To avoid unnecessary conversions, the
+ * surface should be created with the SVGA3D_BUFFER format.
+ *
+ * Index 0 in the array starts 'offset' bytes into the surface.
+ * Index 1 begins at byte 'offset + stride', etc. Array indices may
+ * not be negative.
+ */
+ uint32 surfaceId;
+ uint32 offset;
+ uint32 stride;
+} SVGA3dArray;
+
+typedef
+struct {
+ /*
+ * Describe a vertex array's data type, and define how it is to be
+ * used by the fixed function pipeline or the vertex shader. It
+ * isn't useful to have two VertexDecls with the same
+ * VertexArrayIdentity in one draw call.
+ */
+ SVGA3dDeclType type;
+ SVGA3dDeclMethod method;
+ SVGA3dDeclUsage usage;
+ uint32 usageIndex;
+} SVGA3dVertexArrayIdentity;
+
+typedef
+struct {
+ SVGA3dVertexArrayIdentity identity;
+ SVGA3dArray array;
+ SVGA3dArrayRangeHint rangeHint;
+} SVGA3dVertexDecl;
+
+typedef
+struct {
+ /*
+ * Define a group of primitives to render, from sequential indices.
+ *
+ * The value of 'primitiveType' and 'primitiveCount' imply the
+ * total number of vertices that will be rendered.
+ */
+ SVGA3dPrimitiveType primType;
+ uint32 primitiveCount;
+
+ /*
+ * Optional index buffer. If indexArray.surfaceId is
+ * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
+ * without an index buffer is identical to rendering with an index
+ * buffer containing the sequence [0, 1, 2, 3, ...].
+ *
+ * If an index buffer is in use, indexWidth specifies the width in
+ * bytes of each index value. It must be less than or equal to
+ * indexArray.stride.
+ *
+ * (Currently, the SVGA3D device requires index buffers to be tightly
+ * packed. In other words, indexWidth == indexArray.stride)
+ */
+ SVGA3dArray indexArray;
+ uint32 indexWidth;
+
+ /*
+ * Optional index bias. This number is added to all indices from
+ * indexArray before they are used as vertex array indices. This
+ * can be used in multiple ways:
+ *
+ * - When not using an indexArray, this bias can be used to
+ * specify where in the vertex arrays to begin rendering.
+ *
+ * - A positive number here is equivalent to increasing the
+ * offset in each vertex array.
+ *
+ * - A negative number can be used to render using a small
+ * vertex array and an index buffer that contains large
+ * values. This may be used by some applications that
+ * crop a vertex buffer without modifying their index
+ * buffer.
+ *
+ * Note that rendering with a negative bias value may be slower and
+ * use more memory than rendering with a positive or zero bias.
+ */
+ int32 indexBias;
+} SVGA3dPrimitiveRange;
+
+typedef
+struct {
+ uint32 cid;
+ uint32 numVertexDecls;
+ uint32 numRanges;
+
+ /*
+ * There are two variable size arrays after the
+ * SVGA3dCmdDrawPrimitives structure. In order,
+ * they are:
+ *
+ * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
+ * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
+ * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
+ * the frequency divisor for this the corresponding vertex decl)
+ */
+} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
+
+typedef
+struct {
+ uint32 stage;
+ SVGA3dTextureStateName name;
+ union {
+ uint32 value;
+ float floatValue;
+ };
+} SVGA3dTextureState;
+
+typedef
+struct {
+ uint32 cid;
+ /* Followed by variable number of SVGA3dTextureState structures */
+} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dTransformType type;
+ float matrix[16];
+} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
+
+typedef
+struct {
+ float min;
+ float max;
+} SVGA3dZRange;
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dZRange zRange;
+} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
+
+typedef
+struct {
+ float diffuse[4];
+ float ambient[4];
+ float specular[4];
+ float emissive[4];
+ float shininess;
+} SVGA3dMaterial;
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dFace face;
+ SVGA3dMaterial material;
+} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ SVGA3dLightData data;
+} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ uint32 enabled;
+} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dRect rect;
+} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 index;
+ float plane[4];
+} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+ /* Followed by variable number of DWORDs for shader bycode */
+} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 shid;
+ SVGA3dShaderType type;
+} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
+
+typedef
+struct {
+ uint32 cid;
+ uint32 reg; /* register number */
+ SVGA3dShaderType type;
+ SVGA3dShaderConstType ctype;
+ uint32 values[4];
+} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dShaderType type;
+ uint32 shid;
+} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
+
+typedef
+struct {
+ uint32 cid;
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
+} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
+
+typedef
+struct {
+ uint32 cid; /* Same parameters passed to END_QUERY */
+ SVGA3dQueryType type;
+ SVGAGuestPtr guestResult;
+} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
+
+typedef
+struct {
+ uint32 totalSize; /* Set by guest before query is ended. */
+ SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
+ union { /* Set by host on exit from PENDING state */
+ uint32 result32;
+ };
+} SVGA3dQueryResult;
+
+/*
+ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
+ *
+ * This is a blit from an SVGA3D surface to a Screen Object. Just
+ * like GMR-to-screen blits, this blit may be directed at a
+ * specific screen or to the virtual coordinate space.
+ *
+ * The blit copies from a rectangular region of an SVGA3D surface
+ * image to a rectangular region of a screen or screens.
+ *
+ * This command takes an optional variable-length list of clipping
+ * rectangles after the body of the command. If no rectangles are
+ * specified, there is no clipping region. The entire destRect is
+ * drawn to. If one or more rectangles are included, they describe
+ * a clipping region. The clip rectangle coordinates are measured
+ * relative to the top-left corner of destRect.
+ *
+ * This clipping region serves multiple purposes:
+ *
+ * - It can be used to perform an irregularly shaped blit more
+ * efficiently than by issuing many separate blit commands.
+ *
+ * - It is equivalent to allowing blits with non-integer
+ * source coordinates. You could blit just one half-pixel
+ * of a source, for example, by specifying a larger
+ * destination rectangle than you need, then removing
+ * part of it using a clip rectangle.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ *
+ * Limitations:
+ *
+ * - Currently, no backend supports blits from a mipmap or face
+ * other than the first one.
+ */
+
+typedef
+struct {
+ SVGA3dSurfaceImageId srcImage;
+ SVGASignedRect srcRect;
+ uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
+ SVGASignedRect destRect; /* Supports scaling if src/rest different size */
+ /* Clipping: zero or more SVGASignedRects follow */
+} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
+
+
+/*
+ * Capability query index.
+ *
+ * Notes:
+ *
+ * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
+ * fixed-function texture units available. Each of these units
+ * work in both FFP and Shader modes, and they support texture
+ * transforms and texture coordinates. The host may have additional
+ * texture image units that are only usable with shaders.
+ *
+ * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
+ * return TRUE. Even on physical hardware that does not support
+ * these formats natively, the SVGA3D device will provide an emulation
+ * which should be invisible to the guest OS.
+ *
+ * In general, the SVGA3D device should support any operation on
+ * any surface format, it just may perform some of these
+ * operations in software depending on the capabilities of the
+ * available physical hardware.
+ *
+ * XXX: In the future, we will add capabilities that describe in
+ * detail what formats are supported in hardware for what kinds
+ * of operations.
+ */
+
+typedef enum {
+ SVGA3D_DEVCAP_3D = 0,
+ SVGA3D_DEVCAP_MAX_LIGHTS = 1,
+ SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
+ SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
+ SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
+ SVGA3D_DEVCAP_VERTEX_SHADER = 5,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
+ SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
+ SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
+ SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
+ SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
+ SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
+ SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
+ SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
+ SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
+ SVGA3D_DEVCAP_QUERY_TYPES = 15,
+ SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
+ SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
+ SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
+ SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
+ SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
+ SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
+ SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
+ SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
+ SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
+ SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
+ SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
+ SVGA3D_DEVCAP_TEXTURE_OPS = 31,
+ SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
+ SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
+ SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
+ SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
+ SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
+ SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
+ SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
+ SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
+ SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
+ SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
+ SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
+ SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
+ SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
+ SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
+ SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
+ SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
+ SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
+ SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
+ SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
+
+ /*
+ * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
+ * render targets. This does no include the depth or stencil targets.
+ */
+ SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
+
+ SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
+ SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
+ SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
+ SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
+ SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
+
+ /*
+ * Don't add new caps into the previous section; the values in this
+ * enumeration must not change. You can put new values right before
+ * SVGA3D_DEVCAP_MAX.
+ */
+ SVGA3D_DEVCAP_MAX /* This must be the last index. */
+} SVGA3dDevCapIndex;
+
+typedef union {
+ Bool b;
+ uint32 u;
+ int32 i;
+ float f;
+} SVGA3dDevCapResult;
+
+#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
new file mode 100644
index 00000000000..7b85e9b8c85
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -0,0 +1,89 @@
+/**********************************************************
+ * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_escape.h --
+ *
+ * Definitions for our own (vendor-specific) SVGA Escape commands.
+ */
+
+#ifndef _SVGA_ESCAPE_H_
+#define _SVGA_ESCAPE_H_
+
+
+/*
+ * Namespace IDs for the escape command
+ */
+
+#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
+#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
+
+
+/*
+ * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
+ * the first DWORD of escape data (after the nsID and size). As a
+ * guideline we're using the high word and low word as a major and
+ * minor command number, respectively.
+ *
+ * Major command number allocation:
+ *
+ * 0000: Reserved
+ * 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
+ * 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
+ * 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
+ */
+
+#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
+
+
+/*
+ * SVGA Hint commands.
+ *
+ * These escapes let the SVGA driver provide optional information to
+ * he host about the state of the guest or guest applications. The
+ * host can use these hints to make user interface or performance
+ * decisions.
+ *
+ * Notes:
+ *
+ * - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
+ * that use the SVGA Screen Object extension. Instead of sending
+ * this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
+ * Screen Object.
+ */
+
+#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
+
+typedef
+struct {
+ uint32 command;
+ uint32 fullscreen;
+ struct {
+ int32 x, y;
+ } monitorPosition;
+} SVGAEscapeHintFullscreen;
+
+#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
new file mode 100644
index 00000000000..f753d73c14b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -0,0 +1,201 @@
+/**********************************************************
+ * Copyright 2007-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_overlay.h --
+ *
+ * Definitions for video-overlay support.
+ */
+
+#ifndef _SVGA_OVERLAY_H_
+#define _SVGA_OVERLAY_H_
+
+#include "svga_reg.h"
+
+/*
+ * Video formats we support
+ */
+
+#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
+#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
+#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
+
+typedef enum {
+ SVGA_OVERLAY_FORMAT_INVALID = 0,
+ SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
+ SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
+ SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
+} SVGAOverlayFormat;
+
+#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
+
+#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
+ /* FIFO escape layout:
+ * Type, Stream Id, (Register Id, Value) pairs */
+
+#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
+ /* FIFO escape layout:
+ * Type, Stream Id */
+
+typedef
+struct SVGAEscapeVideoSetRegs {
+ struct {
+ uint32 cmdType;
+ uint32 streamId;
+ } header;
+
+ // May include zero or more items.
+ struct {
+ uint32 registerId;
+ uint32 value;
+ } items[1];
+} SVGAEscapeVideoSetRegs;
+
+typedef
+struct SVGAEscapeVideoFlush {
+ uint32 cmdType;
+ uint32 streamId;
+} SVGAEscapeVideoFlush;
+
+
+/*
+ * Struct definitions for the video overlay commands built on
+ * SVGAFifoCmdEscape.
+ */
+typedef
+struct {
+ uint32 command;
+ uint32 overlay;
+} SVGAFifoEscapeCmdVideoBase;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+} SVGAFifoEscapeCmdVideoFlush;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+ struct {
+ uint32 regId;
+ uint32 value;
+ } items[1];
+} SVGAFifoEscapeCmdVideoSetRegs;
+
+typedef
+struct {
+ SVGAFifoEscapeCmdVideoBase videoCmd;
+ struct {
+ uint32 regId;
+ uint32 value;
+ } items[SVGA_VIDEO_NUM_REGS];
+} SVGAFifoEscapeCmdVideoSetAllRegs;
+
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * VMwareVideoGetAttributes --
+ *
+ * Computes the size, pitches and offsets for YUV frames.
+ *
+ * Results:
+ * TRUE on success; otherwise FALSE on failure.
+ *
+ * Side effects:
+ * Pitches and offsets for the given YUV frame are put in 'pitches'
+ * and 'offsets' respectively. They are both optional though.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline bool
+VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
+ uint32 *width, // IN / OUT
+ uint32 *height, // IN / OUT
+ uint32 *size, // OUT
+ uint32 *pitches, // OUT (optional)
+ uint32 *offsets) // OUT (optional)
+{
+ int tmp;
+
+ *width = (*width + 1) & ~1;
+
+ if (offsets) {
+ offsets[0] = 0;
+ }
+
+ switch (format) {
+ case VMWARE_FOURCC_YV12:
+ *height = (*height + 1) & ~1;
+ *size = (*width + 3) & ~3;
+
+ if (pitches) {
+ pitches[0] = *size;
+ }
+
+ *size *= *height;
+
+ if (offsets) {
+ offsets[1] = *size;
+ }
+
+ tmp = ((*width >> 1) + 3) & ~3;
+
+ if (pitches) {
+ pitches[1] = pitches[2] = tmp;
+ }
+
+ tmp *= (*height >> 1);
+ *size += tmp;
+
+ if (offsets) {
+ offsets[2] = *size;
+ }
+
+ *size += tmp;
+ break;
+
+ case VMWARE_FOURCC_YUY2:
+ case VMWARE_FOURCC_UYVY:
+ *size = *width * 2;
+
+ if (pitches) {
+ pitches[0] = *size;
+ }
+
+ *size *= *height;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+#endif // _SVGA_OVERLAY_H_
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
new file mode 100644
index 00000000000..1b96c2ec07d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -0,0 +1,1346 @@
+/**********************************************************
+ * Copyright 1998-2009 VMware, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy,
+ * modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ **********************************************************/
+
+/*
+ * svga_reg.h --
+ *
+ * Virtual hardware definitions for the VMware SVGA II device.
+ */
+
+#ifndef _SVGA_REG_H_
+#define _SVGA_REG_H_
+
+/*
+ * PCI device IDs.
+ */
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
+
+/*
+ * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
+ * cursor bypass mode. This is still supported, but no new guest
+ * drivers should use it.
+ */
+#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
+
+/*
+ * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
+ * The changeMap in the monitor is proportional to this number. Therefore, we'd
+ * like to keep it as small as possible to reduce monitor overhead (using
+ * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
+ * 4k!).
+ *
+ * NB: For compatibility reasons, this value must be greater than 0xff0000.
+ * See bug 335072.
+ */
+#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
+
+#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
+#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
+#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
+
+#define SVGA_MAGIC 0x900000UL
+#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
+
+/* Version 2 let the address of the frame buffer be unsigned on Win32 */
+#define SVGA_VERSION_2 2
+#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
+
+/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
+ PALETTE_BASE has moved */
+#define SVGA_VERSION_1 1
+#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
+
+/* Version 0 is the initial version */
+#define SVGA_VERSION_0 0
+#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
+
+/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+#define SVGA_ID_INVALID 0xFFFFFFFF
+
+/* Port offsets, relative to BAR0 */
+#define SVGA_INDEX_PORT 0x0
+#define SVGA_VALUE_PORT 0x1
+#define SVGA_BIOS_PORT 0x2
+#define SVGA_IRQSTATUS_PORT 0x8
+
+/*
+ * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
+ *
+ * Interrupts are only supported when the
+ * SVGA_CAP_IRQMASK capability is present.
+ */
+#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
+
+/*
+ * Registers
+ */
+
+enum {
+ SVGA_REG_ID = 0,
+ SVGA_REG_ENABLE = 1,
+ SVGA_REG_WIDTH = 2,
+ SVGA_REG_HEIGHT = 3,
+ SVGA_REG_MAX_WIDTH = 4,
+ SVGA_REG_MAX_HEIGHT = 5,
+ SVGA_REG_DEPTH = 6,
+ SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */
+ SVGA_REG_PSEUDOCOLOR = 8,
+ SVGA_REG_RED_MASK = 9,
+ SVGA_REG_GREEN_MASK = 10,
+ SVGA_REG_BLUE_MASK = 11,
+ SVGA_REG_BYTES_PER_LINE = 12,
+ SVGA_REG_FB_START = 13, /* (Deprecated) */
+ SVGA_REG_FB_OFFSET = 14,
+ SVGA_REG_VRAM_SIZE = 15,
+ SVGA_REG_FB_SIZE = 16,
+
+ /* ID 0 implementation only had the above registers, then the palette */
+
+ SVGA_REG_CAPABILITIES = 17,
+ SVGA_REG_MEM_START = 18, /* (Deprecated) */
+ SVGA_REG_MEM_SIZE = 19,
+ SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
+ SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
+ SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
+ SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
+ SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
+ SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
+ SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
+ SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
+ SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
+ SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */
+ SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */
+ SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */
+ SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */
+ SVGA_REG_IRQMASK = 33, /* Interrupt mask */
+
+ /* Legacy multi-monitor support */
+ SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
+ SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */
+ SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
+ SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
+ SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
+ SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */
+ SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */
+
+ /* See "Guest memory regions" below. */
+ SVGA_REG_GMR_ID = 41,
+ SVGA_REG_GMR_DESCRIPTOR = 42,
+ SVGA_REG_GMR_MAX_IDS = 43,
+ SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
+
+ SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
+ SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
+
+ SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
+ /* Next 768 (== 256*3) registers exist for colormap */
+
+ SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
+ /* Base of scratch registers */
+ /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
+ First 4 are reserved for VESA BIOS Extension; any remaining are for
+ the use of the current SVGA driver. */
+};
+
+
+/*
+ * Guest memory regions (GMRs):
+ *
+ * This is a new memory mapping feature available in SVGA devices
+ * which have the SVGA_CAP_GMR bit set. Previously, there were two
+ * fixed memory regions available with which to share data between the
+ * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
+ * are our name for an extensible way of providing arbitrary DMA
+ * buffers for use between the driver and the SVGA device. They are a
+ * new alternative to framebuffer memory, usable for both 2D and 3D
+ * graphics operations.
+ *
+ * Since GMR mapping must be done synchronously with guest CPU
+ * execution, we use a new pair of SVGA registers:
+ *
+ * SVGA_REG_GMR_ID --
+ *
+ * Read/write.
+ * This register holds the 32-bit ID (a small positive integer)
+ * of a GMR to create, delete, or redefine. Writing this register
+ * has no side-effects.
+ *
+ * SVGA_REG_GMR_DESCRIPTOR --
+ *
+ * Write-only.
+ * Writing this register will create, delete, or redefine the GMR
+ * specified by the above ID register. If this register is zero,
+ * the GMR is deleted. Any pointers into this GMR (including those
+ * currently being processed by FIFO commands) will be
+ * synchronously invalidated.
+ *
+ * If this register is nonzero, it must be the physical page
+ * number (PPN) of a data structure which describes the physical
+ * layout of the memory region this GMR should describe. The
+ * descriptor structure will be read synchronously by the SVGA
+ * device when this register is written. The descriptor need not
+ * remain allocated for the lifetime of the GMR.
+ *
+ * The guest driver should write SVGA_REG_GMR_ID first, then
+ * SVGA_REG_GMR_DESCRIPTOR.
+ *
+ * SVGA_REG_GMR_MAX_IDS --
+ *
+ * Read-only.
+ * The SVGA device may choose to support a maximum number of
+ * user-defined GMR IDs. This register holds the number of supported
+ * IDs. (The maximum supported ID plus 1)
+ *
+ * SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
+ *
+ * Read-only.
+ * The SVGA device may choose to put a limit on the total number
+ * of SVGAGuestMemDescriptor structures it will read when defining
+ * a single GMR.
+ *
+ * The descriptor structure is an array of SVGAGuestMemDescriptor
+ * structures. Each structure may do one of three things:
+ *
+ * - Terminate the GMR descriptor list.
+ * (ppn==0, numPages==0)
+ *
+ * - Add a PPN or range of PPNs to the GMR's virtual address space.
+ * (ppn != 0, numPages != 0)
+ *
+ * - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
+ * support multi-page GMR descriptor tables without forcing the
+ * driver to allocate physically contiguous memory.
+ * (ppn != 0, numPages == 0)
+ *
+ * Note that each physical page of SVGAGuestMemDescriptor structures
+ * can describe at least 2MB of guest memory. If the driver needs to
+ * use more than one page of descriptor structures, it must use one of
+ * its SVGAGuestMemDescriptors to point to an additional page. The
+ * device will never automatically cross a page boundary.
+ *
+ * Once the driver has described a GMR, it is immediately available
+ * for use via any FIFO command that uses an SVGAGuestPtr structure.
+ * These pointers include a GMR identifier plus an offset into that
+ * GMR.
+ *
+ * The driver must check the SVGA_CAP_GMR bit before using the GMR
+ * registers.
+ */
+
+/*
+ * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
+ * memory as well. In the future, these IDs could even be used to
+ * allow legacy memory regions to be redefined by the guest as GMRs.
+ *
+ * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
+ * is being phased out. Please try to use user-defined GMRs whenever
+ * possible.
+ */
+#define SVGA_GMR_NULL ((uint32) -1)
+#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
+
+typedef
+struct SVGAGuestMemDescriptor {
+ uint32 ppn;
+ uint32 numPages;
+} SVGAGuestMemDescriptor;
+
+typedef
+struct SVGAGuestPtr {
+ uint32 gmrId;
+ uint32 offset;
+} SVGAGuestPtr;
+
+
+/*
+ * SVGAGMRImageFormat --
+ *
+ * This is a packed representation of the source 2D image format
+ * for a GMR-to-screen blit. Currently it is defined as an encoding
+ * of the screen's color depth and bits-per-pixel, however, 16 bits
+ * are reserved for future use to identify other encodings (such as
+ * RGBA or higher-precision images).
+ *
+ * Currently supported formats:
+ *
+ * bpp depth Format Name
+ * --- ----- -----------
+ * 32 24 32-bit BGRX
+ * 24 24 24-bit BGR
+ * 16 16 RGB 5-6-5
+ * 16 15 RGB 5-5-5
+ *
+ */
+
+typedef
+struct SVGAGMRImageFormat {
+ union {
+ struct {
+ uint32 bitsPerPixel : 8;
+ uint32 colorDepth : 8;
+ uint32 reserved : 16; // Must be zero
+ };
+
+ uint32 value;
+ };
+} SVGAGMRImageFormat;
+
+/*
+ * SVGAColorBGRX --
+ *
+ * A 24-bit color format (BGRX), which does not depend on the
+ * format of the legacy guest framebuffer (GFB) or the current
+ * GMRFB state.
+ */
+
+typedef
+struct SVGAColorBGRX {
+ union {
+ struct {
+ uint32 b : 8;
+ uint32 g : 8;
+ uint32 r : 8;
+ uint32 x : 8; // Unused
+ };
+
+ uint32 value;
+ };
+} SVGAColorBGRX;
+
+
+/*
+ * SVGASignedRect --
+ * SVGASignedPoint --
+ *
+ * Signed rectangle and point primitives. These are used by the new
+ * 2D primitives for drawing to Screen Objects, which can occupy a
+ * signed virtual coordinate space.
+ *
+ * SVGASignedRect specifies a half-open interval: the (left, top)
+ * pixel is part of the rectangle, but the (right, bottom) pixel is
+ * not.
+ */
+
+typedef
+struct SVGASignedRect {
+ int32 left;
+ int32 top;
+ int32 right;
+ int32 bottom;
+} SVGASignedRect;
+
+typedef
+struct SVGASignedPoint {
+ int32 x;
+ int32 y;
+} SVGASignedPoint;
+
+
+/*
+ * Capabilities
+ *
+ * Note the holes in the bitfield. Missing bits have been deprecated,
+ * and must not be reused. Those capabilities will never be reported
+ * by new versions of the SVGA device.
+ */
+
+#define SVGA_CAP_NONE 0x00000000
+#define SVGA_CAP_RECT_COPY 0x00000002
+#define SVGA_CAP_CURSOR 0x00000020
+#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_8BIT_EMULATION 0x00000100
+#define SVGA_CAP_ALPHA_CURSOR 0x00000200
+#define SVGA_CAP_3D 0x00004000
+#define SVGA_CAP_EXTENDED_FIFO 0x00008000
+#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
+#define SVGA_CAP_PITCHLOCK 0x00020000
+#define SVGA_CAP_IRQMASK 0x00040000
+#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
+#define SVGA_CAP_GMR 0x00100000
+#define SVGA_CAP_TRACES 0x00200000
+
+
+/*
+ * FIFO register indices.
+ *
+ * The FIFO is a chunk of device memory mapped into guest physmem. It
+ * is always treated as 32-bit words.
+ *
+ * The guest driver gets to decide how to partition it between
+ * - FIFO registers (there are always at least 4, specifying where the
+ * following data area is and how much data it contains; there may be
+ * more registers following these, depending on the FIFO protocol
+ * version in use)
+ * - FIFO data, written by the guest and slurped out by the VMX.
+ * These indices are 32-bit word offsets into the FIFO.
+ */
+
+enum {
+ /*
+ * Block 1 (basic registers): The originally defined FIFO registers.
+ * These exist and are valid for all versions of the FIFO protocol.
+ */
+
+ SVGA_FIFO_MIN = 0,
+ SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
+ SVGA_FIFO_NEXT_CMD,
+ SVGA_FIFO_STOP,
+
+ /*
+ * Block 2 (extended registers): Mandatory registers for the extended
+ * FIFO. These exist if the SVGA caps register includes
+ * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
+ * associated capability bit is enabled.
+ *
+ * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
+ * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
+ * This means that the guest has to test individually (in most cases
+ * using FIFO caps) for the presence of registers after this; the VMX
+ * can define "extended FIFO" to mean whatever it wants, and currently
+ * won't enable it unless there's room for that set and much more.
+ */
+
+ SVGA_FIFO_CAPABILITIES = 4,
+ SVGA_FIFO_FLAGS,
+ // Valid with SVGA_FIFO_CAP_FENCE:
+ SVGA_FIFO_FENCE,
+
+ /*
+ * Block 3a (optional extended registers): Additional registers for the
+ * extended FIFO, whose presence isn't actually implied by
+ * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
+ * leave room for them.
+ *
+ * These in block 3a, the VMX currently considers mandatory for the
+ * extended FIFO.
+ */
+
+ // Valid if exists (i.e. if extended FIFO enabled):
+ SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
+ // Valid with SVGA_FIFO_CAP_PITCHLOCK:
+ SVGA_FIFO_PITCHLOCK,
+
+ // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
+ SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
+ SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
+ SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
+ SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
+ SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
+
+ // Valid with SVGA_FIFO_CAP_RESERVE:
+ SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
+
+ /*
+ * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
+ *
+ * By default this is SVGA_ID_INVALID, to indicate that the cursor
+ * coordinates are specified relative to the virtual root. If this
+ * is set to a specific screen ID, cursor position is reinterpreted
+ * as a signed offset relative to that screen's origin. This is the
+ * only way to place the cursor on a non-rooted screen.
+ */
+ SVGA_FIFO_CURSOR_SCREEN_ID,
+
+ /*
+ * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
+ * registers, but this must be done carefully and with judicious use of
+ * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
+ * enough to tell you whether the register exists: we've shipped drivers
+ * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
+ * the earlier ones. The actual order of introduction was:
+ * - PITCHLOCK
+ * - 3D_CAPS
+ * - CURSOR_* (cursor bypass 3)
+ * - RESERVED
+ * So, code that wants to know whether it can use any of the
+ * aforementioned registers, or anything else added after PITCHLOCK and
+ * before 3D_CAPS, needs to reason about something other than
+ * SVGA_FIFO_MIN.
+ */
+
+ /*
+ * 3D caps block space; valid with 3D hardware version >=
+ * SVGA3D_HWVERSION_WS6_B1.
+ */
+ SVGA_FIFO_3D_CAPS = 32,
+ SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
+
+ /*
+ * End of VMX's current definition of "extended-FIFO registers".
+ * Registers before here are always enabled/disabled as a block; either
+ * the extended FIFO is enabled and includes all preceding registers, or
+ * it's disabled entirely.
+ *
+ * Block 3b (truly optional extended registers): Additional registers for
+ * the extended FIFO, which the VMX already knows how to enable and
+ * disable with correct granularity.
+ *
+ * Registers after here exist if and only if the guest SVGA driver
+ * sets SVGA_FIFO_MIN high enough to leave room for them.
+ */
+
+ // Valid if register exists:
+ SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
+ SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
+ SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
+
+ /*
+ * Always keep this last. This defines the maximum number of
+ * registers we know about. At power-on, this value is placed in
+ * the SVGA_REG_MEM_REGS register, and we expect the guest driver
+ * to allocate this much space in FIFO memory for registers.
+ */
+ SVGA_FIFO_NUM_REGS
+};
+
+
+/*
+ * Definition of registers included in extended FIFO support.
+ *
+ * The guest SVGA driver gets to allocate the FIFO between registers
+ * and data. It must always allocate at least 4 registers, but old
+ * drivers stopped there.
+ *
+ * The VMX will enable extended FIFO support if and only if the guest
+ * left enough room for all registers defined as part of the mandatory
+ * set for the extended FIFO.
+ *
+ * Note that the guest drivers typically allocate the FIFO only at
+ * initialization time, not at mode switches, so it's likely that the
+ * number of FIFO registers won't change without a reboot.
+ *
+ * All registers less than this value are guaranteed to be present if
+ * svgaUser->fifo.extended is set. Any later registers must be tested
+ * individually for compatibility at each use (in the VMX).
+ *
+ * This value is used only by the VMX, so it can change without
+ * affecting driver compatibility; keep it that way?
+ */
+#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
+
+
+/*
+ * FIFO Synchronization Registers
+ *
+ * This explains the relationship between the various FIFO
+ * sync-related registers in IOSpace and in FIFO space.
+ *
+ * SVGA_REG_SYNC --
+ *
+ * The SYNC register can be used in two different ways by the guest:
+ *
+ * 1. If the guest wishes to fully sync (drain) the FIFO,
+ * it will write once to SYNC then poll on the BUSY
+ * register. The FIFO is sync'ed once BUSY is zero.
+ *
+ * 2. If the guest wants to asynchronously wake up the host,
+ * it will write once to SYNC without polling on BUSY.
+ * Ideally it will do this after some new commands have
+ * been placed in the FIFO, and after reading a zero
+ * from SVGA_FIFO_BUSY.
+ *
+ * (1) is the original behaviour that SYNC was designed to
+ * support. Originally, a write to SYNC would implicitly
+ * trigger a read from BUSY. This causes us to synchronously
+ * process the FIFO.
+ *
+ * This behaviour has since been changed so that writing SYNC
+ * will *not* implicitly cause a read from BUSY. Instead, it
+ * makes a channel call which asynchronously wakes up the MKS
+ * thread.
+ *
+ * New guests can use this new behaviour to implement (2)
+ * efficiently. This lets guests get the host's attention
+ * without waiting for the MKS to poll, which gives us much
+ * better CPU utilization on SMP hosts and on UP hosts while
+ * we're blocked on the host GPU.
+ *
+ * Old guests shouldn't notice the behaviour change. SYNC was
+ * never guaranteed to process the entire FIFO, since it was
+ * bounded to a particular number of CPU cycles. Old guests will
+ * still loop on the BUSY register until the FIFO is empty.
+ *
+ * Writing to SYNC currently has the following side-effects:
+ *
+ * - Sets SVGA_REG_BUSY to TRUE (in the monitor)
+ * - Asynchronously wakes up the MKS thread for FIFO processing
+ * - The value written to SYNC is recorded as a "reason", for
+ * stats purposes.
+ *
+ * If SVGA_FIFO_BUSY is available, drivers are advised to only
+ * write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
+ * SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
+ * eventually set SVGA_FIFO_BUSY on its own, but this approach
+ * lets the driver avoid sending multiple asynchronous wakeup
+ * messages to the MKS thread.
+ *
+ * SVGA_REG_BUSY --
+ *
+ * This register is set to TRUE when SVGA_REG_SYNC is written,
+ * and it reads as FALSE when the FIFO has been completely
+ * drained.
+ *
+ * Every read from this register causes us to synchronously
+ * process FIFO commands. There is no guarantee as to how many
+ * commands each read will process.
+ *
+ * CPU time spent processing FIFO commands will be billed to
+ * the guest.
+ *
+ * New drivers should avoid using this register unless they
+ * need to guarantee that the FIFO is completely drained. It
+ * is overkill for performing a sync-to-fence. Older drivers
+ * will use this register for any type of synchronization.
+ *
+ * SVGA_FIFO_BUSY --
+ *
+ * This register is a fast way for the guest driver to check
+ * whether the FIFO is already being processed. It reads and
+ * writes at normal RAM speeds, with no monitor intervention.
+ *
+ * If this register reads as TRUE, the host is guaranteeing that
+ * any new commands written into the FIFO will be noticed before
+ * the MKS goes back to sleep.
+ *
+ * If this register reads as FALSE, no such guarantee can be
+ * made.
+ *
+ * The guest should use this register to quickly determine
+ * whether or not it needs to wake up the host. If the guest
+ * just wrote a command or group of commands that it would like
+ * the host to begin processing, it should:
+ *
+ * 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
+ * action is necessary.
+ *
+ * 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
+ * code that we've already sent a SYNC to the host and we
+ * don't need to send a duplicate.
+ *
+ * 3. Write a reason to SVGA_REG_SYNC. This will send an
+ * asynchronous wakeup to the MKS thread.
+ */
+
+
+/*
+ * FIFO Capabilities
+ *
+ * Fence -- Fence register and command are supported
+ * Accel Front -- Front buffer only commands are supported
+ * Pitch Lock -- Pitch lock register is supported
+ * Video -- SVGA Video overlay units are supported
+ * Escape -- Escape command is supported
+ *
+ * XXX: Add longer descriptions for each capability, including a list
+ * of the new features that each capability provides.
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT --
+ *
+ * Provides dynamic multi-screen rendering, for improved Unity and
+ * multi-monitor modes. With Screen Object, the guest can
+ * dynamically create and destroy 'screens', which can represent
+ * Unity windows or virtual monitors. Screen Object also provides
+ * strong guarantees that DMA operations happen only when
+ * guest-initiated. Screen Object deprecates the BAR1 guest
+ * framebuffer (GFB) and all commands that work only with the GFB.
+ *
+ * New registers:
+ * FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
+ *
+ * New 2D commands:
+ * DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
+ * BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
+ *
+ * New 3D commands:
+ * BLIT_SURFACE_TO_SCREEN
+ *
+ * New guarantees:
+ *
+ * - The host will not read or write guest memory, including the GFB,
+ * except when explicitly initiated by a DMA command.
+ *
+ * - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
+ * is guaranteed to complete before any subsequent FENCEs.
+ *
+ * - All legacy commands which affect a Screen (UPDATE, PRESENT,
+ * PRESENT_READBACK) as well as new Screen blit commands will
+ * all behave consistently as blits, and memory will be read
+ * or written in FIFO order.
+ *
+ * For example, if you PRESENT from one SVGA3D surface to multiple
+ * places on the screen, the data copied will always be from the
+ * SVGA3D surface at the time the PRESENT was issued in the FIFO.
+ * This was not necessarily true on devices without Screen Object.
+ *
+ * This means that on devices that support Screen Object, the
+ * PRESENT_READBACK command should not be necessary unless you
+ * actually want to read back the results of 3D rendering into
+ * system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
+ * command provides a strict superset of functionality.)
+ *
+ * - When a screen is resized, either using Screen Object commands or
+ * legacy multimon registers, its contents are preserved.
+ */
+
+#define SVGA_FIFO_CAP_NONE 0
+#define SVGA_FIFO_CAP_FENCE (1<<0)
+#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
+#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
+#define SVGA_FIFO_CAP_VIDEO (1<<3)
+#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
+#define SVGA_FIFO_CAP_ESCAPE (1<<5)
+#define SVGA_FIFO_CAP_RESERVE (1<<6)
+#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
+
+
+/*
+ * FIFO Flags
+ *
+ * Accel Front -- Driver should use front buffer only commands
+ */
+
+#define SVGA_FIFO_FLAG_NONE 0
+#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
+#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
+
+/*
+ * FIFO reservation sentinel value
+ */
+
+#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
+
+
+/*
+ * Video overlay support
+ */
+
+#define SVGA_NUM_OVERLAY_UNITS 32
+
+
+/*
+ * Video capabilities that the guest is currently using
+ */
+
+#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
+
+
+/*
+ * Offsets for the video overlay registers
+ */
+
+enum {
+ SVGA_VIDEO_ENABLED = 0,
+ SVGA_VIDEO_FLAGS,
+ SVGA_VIDEO_DATA_OFFSET,
+ SVGA_VIDEO_FORMAT,
+ SVGA_VIDEO_COLORKEY,
+ SVGA_VIDEO_SIZE, // Deprecated
+ SVGA_VIDEO_WIDTH,
+ SVGA_VIDEO_HEIGHT,
+ SVGA_VIDEO_SRC_X,
+ SVGA_VIDEO_SRC_Y,
+ SVGA_VIDEO_SRC_WIDTH,
+ SVGA_VIDEO_SRC_HEIGHT,
+ SVGA_VIDEO_DST_X, // Signed int32
+ SVGA_VIDEO_DST_Y, // Signed int32
+ SVGA_VIDEO_DST_WIDTH,
+ SVGA_VIDEO_DST_HEIGHT,
+ SVGA_VIDEO_PITCH_1,
+ SVGA_VIDEO_PITCH_2,
+ SVGA_VIDEO_PITCH_3,
+ SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
+ SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
+ SVGA_VIDEO_NUM_REGS
+};
+
+
+/*
+ * SVGA Overlay Units
+ *
+ * width and height relate to the entire source video frame.
+ * srcX, srcY, srcWidth and srcHeight represent subset of the source
+ * video frame to be displayed.
+ */
+
+typedef struct SVGAOverlayUnit {
+ uint32 enabled;
+ uint32 flags;
+ uint32 dataOffset;
+ uint32 format;
+ uint32 colorKey;
+ uint32 size;
+ uint32 width;
+ uint32 height;
+ uint32 srcX;
+ uint32 srcY;
+ uint32 srcWidth;
+ uint32 srcHeight;
+ int32 dstX;
+ int32 dstY;
+ uint32 dstWidth;
+ uint32 dstHeight;
+ uint32 pitches[3];
+ uint32 dataGMRId;
+ uint32 dstScreenId;
+} SVGAOverlayUnit;
+
+
+/*
+ * SVGAScreenObject --
+ *
+ * This is a new way to represent a guest's multi-monitor screen or
+ * Unity window. Screen objects are only supported if the
+ * SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
+ *
+ * If Screen Objects are supported, they can be used to fully
+ * replace the functionality provided by the framebuffer registers
+ * (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
+ *
+ * The screen object is a struct with guaranteed binary
+ * compatibility. New flags can be added, and the struct may grow,
+ * but existing fields must retain their meaning.
+ *
+ */
+
+#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
+#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
+
+typedef
+struct SVGAScreenObject {
+ uint32 structSize; // sizeof(SVGAScreenObject)
+ uint32 id;
+ uint32 flags;
+ struct {
+ uint32 width;
+ uint32 height;
+ } size;
+ struct {
+ int32 x;
+ int32 y;
+ } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
+} SVGAScreenObject;
+
+
+/*
+ * Commands in the command FIFO:
+ *
+ * Command IDs defined below are used for the traditional 2D FIFO
+ * communication (not all commands are available for all versions of the
+ * SVGA FIFO protocol).
+ *
+ * Note the holes in the command ID numbers: These commands have been
+ * deprecated, and the old IDs must not be reused.
+ *
+ * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
+ * protocol.
+ *
+ * Each command's parameters are described by the comments and
+ * structs below.
+ */
+
+typedef enum {
+ SVGA_CMD_INVALID_CMD = 0,
+ SVGA_CMD_UPDATE = 1,
+ SVGA_CMD_RECT_COPY = 3,
+ SVGA_CMD_DEFINE_CURSOR = 19,
+ SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
+ SVGA_CMD_UPDATE_VERBOSE = 25,
+ SVGA_CMD_FRONT_ROP_FILL = 29,
+ SVGA_CMD_FENCE = 30,
+ SVGA_CMD_ESCAPE = 33,
+ SVGA_CMD_DEFINE_SCREEN = 34,
+ SVGA_CMD_DESTROY_SCREEN = 35,
+ SVGA_CMD_DEFINE_GMRFB = 36,
+ SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37,
+ SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
+ SVGA_CMD_ANNOTATION_FILL = 39,
+ SVGA_CMD_ANNOTATION_COPY = 40,
+ SVGA_CMD_MAX
+} SVGAFifoCmdId;
+
+#define SVGA_CMD_MAX_ARGS 64
+
+
+/*
+ * SVGA_CMD_UPDATE --
+ *
+ * This is a DMA transfer which copies from the Guest Framebuffer
+ * (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
+ * intersect with the provided virtual rectangle.
+ *
+ * This command does not support using arbitrary guest memory as a
+ * data source- it only works with the pre-defined GFB memory.
+ * This command also does not support signed virtual coordinates.
+ * If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
+ * negative root x/y coordinates, the negative portion of those
+ * screens will not be reachable by this command.
+ *
+ * This command is not necessary when using framebuffer
+ * traces. Traces are automatically enabled if the SVGA FIFO is
+ * disabled, and you may explicitly enable/disable traces using
+ * SVGA_REG_TRACES. With traces enabled, any write to the GFB will
+ * automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
+ *
+ * Traces and SVGA_CMD_UPDATE are the only supported ways to render
+ * pseudocolor screen updates. The newer Screen Object commands
+ * only support true color formats.
+ *
+ * Availability:
+ * Always available.
+ */
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+} SVGAFifoCmdUpdate;
+
+
+/*
+ * SVGA_CMD_RECT_COPY --
+ *
+ * Perform a rectangular DMA transfer from one area of the GFB to
+ * another, and copy the result to any screens which intersect it.
+ *
+ * Availability:
+ * SVGA_CAP_RECT_COPY
+ */
+
+typedef
+struct {
+ uint32 srcX;
+ uint32 srcY;
+ uint32 destX;
+ uint32 destY;
+ uint32 width;
+ uint32 height;
+} SVGAFifoCmdRectCopy;
+
+
+/*
+ * SVGA_CMD_DEFINE_CURSOR --
+ *
+ * Provide a new cursor image, as an AND/XOR mask.
+ *
+ * The recommended way to position the cursor overlay is by using
+ * the SVGA_FIFO_CURSOR_* registers, supported by the
+ * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ * SVGA_CAP_CURSOR
+ */
+
+typedef
+struct {
+ uint32 id; // Reserved, must be zero.
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
+ /*
+ * Followed by scanline data for AND mask, then XOR mask.
+ * Each scanline is padded to a 32-bit boundary.
+ */
+} SVGAFifoCmdDefineCursor;
+
+
+/*
+ * SVGA_CMD_DEFINE_ALPHA_CURSOR --
+ *
+ * Provide a new cursor image, in 32-bit BGRA format.
+ *
+ * The recommended way to position the cursor overlay is by using
+ * the SVGA_FIFO_CURSOR_* registers, supported by the
+ * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
+ *
+ * Availability:
+ * SVGA_CAP_ALPHA_CURSOR
+ */
+
+typedef
+struct {
+ uint32 id; // Reserved, must be zero.
+ uint32 hotspotX;
+ uint32 hotspotY;
+ uint32 width;
+ uint32 height;
+ /* Followed by scanline data */
+} SVGAFifoCmdDefineAlphaCursor;
+
+
+/*
+ * SVGA_CMD_UPDATE_VERBOSE --
+ *
+ * Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
+ * 'reason' value, an opaque cookie which is used by internal
+ * debugging tools. Third party drivers should not use this
+ * command.
+ *
+ * Availability:
+ * SVGA_CAP_EXTENDED_FIFO
+ */
+
+typedef
+struct {
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+ uint32 reason;
+} SVGAFifoCmdUpdateVerbose;
+
+
+/*
+ * SVGA_CMD_FRONT_ROP_FILL --
+ *
+ * This is a hint which tells the SVGA device that the driver has
+ * just filled a rectangular region of the GFB with a solid
+ * color. Instead of reading these pixels from the GFB, the device
+ * can assume that they all equal 'color'. This is primarily used
+ * for remote desktop protocols.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_ACCELFRONT
+ */
+
+#define SVGA_ROP_COPY 0x03
+
+typedef
+struct {
+ uint32 color; // In the same format as the GFB
+ uint32 x;
+ uint32 y;
+ uint32 width;
+ uint32 height;
+ uint32 rop; // Must be SVGA_ROP_COPY
+} SVGAFifoCmdFrontRopFill;
+
+
+/*
+ * SVGA_CMD_FENCE --
+ *
+ * Insert a synchronization fence. When the SVGA device reaches
+ * this command, it will copy the 'fence' value into the
+ * SVGA_FIFO_FENCE register. It will also compare the fence against
+ * SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
+ * SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
+ * raise this interrupt.
+ *
+ * Availability:
+ * SVGA_FIFO_FENCE for this command,
+ * SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
+ */
+
+typedef
+struct {
+ uint32 fence;
+} SVGAFifoCmdFence;
+
+
+/*
+ * SVGA_CMD_ESCAPE --
+ *
+ * Send an extended or vendor-specific variable length command.
+ * This is used for video overlay, third party plugins, and
+ * internal debugging tools. See svga_escape.h
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_ESCAPE
+ */
+
+typedef
+struct {
+ uint32 nsid;
+ uint32 size;
+ /* followed by 'size' bytes of data */
+} SVGAFifoCmdEscape;
+
+
+/*
+ * SVGA_CMD_DEFINE_SCREEN --
+ *
+ * Define or redefine an SVGAScreenObject. See the description of
+ * SVGAScreenObject above. The video driver is responsible for
+ * generating new screen IDs. They should be small positive
+ * integers. The virtual device will have an implementation
+ * specific upper limit on the number of screen IDs
+ * supported. Drivers are responsible for recycling IDs. The first
+ * valid ID is zero.
+ *
+ * - Interaction with other registers:
+ *
+ * For backwards compatibility, when the GFB mode registers (WIDTH,
+ * HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
+ * deletes all screens other than screen #0, and redefines screen
+ * #0 according to the specified mode. Drivers that use
+ * SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
+ *
+ * If you use screen objects, do not use the legacy multi-mon
+ * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAScreenObject screen; // Variable-length according to version
+} SVGAFifoCmdDefineScreen;
+
+
+/*
+ * SVGA_CMD_DESTROY_SCREEN --
+ *
+ * Destroy an SVGAScreenObject. Its ID is immediately available for
+ * re-use.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ uint32 screenId;
+} SVGAFifoCmdDestroyScreen;
+
+
+/*
+ * SVGA_CMD_DEFINE_GMRFB --
+ *
+ * This command sets a piece of SVGA device state called the
+ * Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
+ * piece of light-weight state which identifies the location and
+ * format of an image in guest memory or in BAR1. The GMRFB has
+ * an arbitrary size, and it doesn't need to match the geometry
+ * of the GFB or any screen object.
+ *
+ * The GMRFB can be redefined as often as you like. You could
+ * always use the same GMRFB, you could redefine it before
+ * rendering from a different guest screen, or you could even
+ * redefine it before every blit.
+ *
+ * There are multiple ways to use this command. The simplest way is
+ * to use it to move the framebuffer either to elsewhere in the GFB
+ * (BAR1) memory region, or to a user-defined GMR. This lets a
+ * driver use a framebuffer allocated entirely out of normal system
+ * memory, which we encourage.
+ *
+ * Another way to use this command is to set up a ring buffer of
+ * updates in GFB memory. If a driver wants to ensure that no
+ * frames are skipped by the SVGA device, it is important that the
+ * driver not modify the source data for a blit until the device is
+ * done processing the command. One efficient way to accomplish
+ * this is to use a ring of small DMA buffers. Each buffer is used
+ * for one blit, then we move on to the next buffer in the
+ * ring. The FENCE mechanism is used to protect each buffer from
+ * re-use until the device is finished with that buffer's
+ * corresponding blit.
+ *
+ * This command does not affect the meaning of SVGA_CMD_UPDATE.
+ * UPDATEs always occur from the legacy GFB memory area. This
+ * command has no support for pseudocolor GMRFBs. Currently only
+ * true-color 15, 16, and 24-bit depths are supported. Future
+ * devices may expose capabilities for additional framebuffer
+ * formats.
+ *
+ * The default GMRFB value is undefined. Drivers must always send
+ * this command at least once before performing any blit from the
+ * GMRFB.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAGuestPtr ptr;
+ uint32 bytesPerLine;
+ SVGAGMRImageFormat format;
+} SVGAFifoCmdDefineGMRFB;
+
+
+/*
+ * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
+ *
+ * This is a guest-to-host blit. It performs a DMA operation to
+ * copy a rectangular region of pixels from the current GMRFB to
+ * one or more Screen Objects.
+ *
+ * The destination coordinate may be specified relative to a
+ * screen's origin (if a screen ID is specified) or relative to the
+ * virtual coordinate system's origin (if the screen ID is
+ * SVGA_ID_INVALID). The actual destination may span zero or more
+ * screens, in the case of a virtual destination rect or a rect
+ * which extends off the edge of the specified screen.
+ *
+ * This command writes to the screen's "base layer": the underlying
+ * framebuffer which exists below any cursor or video overlays. No
+ * action is necessary to explicitly hide or update any overlays
+ * which exist on top of the updated region.
+ *
+ * The SVGA device is guaranteed to finish reading from the GMRFB
+ * by the time any subsequent FENCE commands are reached.
+ *
+ * This command consumes an annotation. See the
+ * SVGA_CMD_ANNOTATION_* commands for details.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint srcOrigin;
+ SVGASignedRect destRect;
+ uint32 destScreenId;
+} SVGAFifoCmdBlitGMRFBToScreen;
+
+
+/*
+ * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
+ *
+ * This is a host-to-guest blit. It performs a DMA operation to
+ * copy a rectangular region of pixels from a single Screen Object
+ * back to the current GMRFB.
+ *
+ * Usage note: This command should be used rarely. It will
+ * typically be inefficient, but it is necessary for some types of
+ * synchronization between 3D (GPU) and 2D (CPU) rendering into
+ * overlapping areas of a screen.
+ *
+ * The source coordinate is specified relative to a screen's
+ * origin. The provided screen ID must be valid. If any parameters
+ * are invalid, the resulting pixel values are undefined.
+ *
+ * This command reads the screen's "base layer". Overlays like
+ * video and cursor are not included, but any data which was sent
+ * using a blit-to-screen primitive will be available, no matter
+ * whether the data's original source was the GMRFB or the 3D
+ * acceleration hardware.
+ *
+ * Note that our guest-to-host blits and host-to-guest blits aren't
+ * symmetric in their current implementation. While the parameters
+ * are identical, host-to-guest blits are a lot less featureful.
+ * They do not support clipping: If the source parameters don't
+ * fully fit within a screen, the blit fails. They must originate
+ * from exactly one screen. Virtual coordinates are not directly
+ * supported.
+ *
+ * Host-to-guest blits do support the same set of GMRFB formats
+ * offered by guest-to-host blits.
+ *
+ * The SVGA device is guaranteed to finish writing to the GMRFB by
+ * the time any subsequent FENCE commands are reached.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint destOrigin;
+ SVGASignedRect srcRect;
+ uint32 srcScreenId;
+} SVGAFifoCmdBlitScreenToGMRFB;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_FILL --
+ *
+ * This is a blit annotation. This command stores a small piece of
+ * device state which is consumed by the next blit-to-screen
+ * command. The state is only cleared by commands which are
+ * specifically documented as consuming an annotation. Other
+ * commands (such as ESCAPEs for debugging) may intervene between
+ * the annotation and its associated blit.
+ *
+ * This annotation is a promise about the contents of the next
+ * blit: The video driver is guaranteeing that all pixels in that
+ * blit will have the same value, specified here as a color in
+ * SVGAColorBGRX format.
+ *
+ * The SVGA device can still render the blit correctly even if it
+ * ignores this annotation, but the annotation may allow it to
+ * perform the blit more efficiently, for example by ignoring the
+ * source data and performing a fill in hardware.
+ *
+ * This annotation is most important for performance when the
+ * user's display is being remoted over a network connection.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGAColorBGRX color;
+} SVGAFifoCmdAnnotationFill;
+
+
+/*
+ * SVGA_CMD_ANNOTATION_COPY --
+ *
+ * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
+ * information about annotations.
+ *
+ * This annotation is a promise about the contents of the next
+ * blit: The video driver is guaranteeing that all pixels in that
+ * blit will have the same value as those which already exist at an
+ * identically-sized region on the same or a different screen.
+ *
+ * Note that the source pixels for the COPY in this annotation are
+ * sampled before applying the anqnotation's associated blit. They
+ * are allowed to overlap with the blit's destination pixels.
+ *
+ * The copy source rectangle is specified the same way as the blit
+ * destination: it can be a rectangle which spans zero or more
+ * screens, specified relative to either a screen or to the virtual
+ * coordinate system's origin. If the source rectangle includes
+ * pixels which are not from exactly one screen, the results are
+ * undefined.
+ *
+ * Availability:
+ * SVGA_FIFO_CAP_SCREEN_OBJECT
+ */
+
+typedef
+struct {
+ SVGASignedPoint srcOrigin;
+ uint32 srcScreenId;
+} SVGAFifoCmdAnnotationCopy;
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
new file mode 100644
index 00000000000..55836dedcfc
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_types.h
@@ -0,0 +1,45 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Silly typedefs for the svga headers. Currently the headers are shared
+ * between all components that talk to svga. And as such the headers are
+ * are in a completely different style and use weird defines.
+ *
+ * This file lets all the ugly be prefixed with svga*.
+ */
+
+#ifndef _SVGA_TYPES_H_
+#define _SVGA_TYPES_H_
+
+typedef uint16_t uint16;
+typedef uint32_t uint32;
+typedef uint8_t uint8;
+typedef int32_t int32;
+typedef bool Bool;
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
new file mode 100644
index 00000000000..d6f2d2b882e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -0,0 +1,229 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+
+static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_CACHED;
+
+static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
+ TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_NO_EVICT;
+
+static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
+ TTM_PL_FLAG_CACHED;
+
+struct ttm_placement vmw_vram_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vram_placement_flags
+};
+
+struct ttm_placement vmw_vram_ne_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &vram_ne_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &vram_ne_placement_flags
+};
+
+struct ttm_placement vmw_sys_placement = {
+ .fpfn = 0,
+ .lpfn = 0,
+ .num_placement = 1,
+ .placement = &sys_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
+struct vmw_ttm_backend {
+ struct ttm_backend backend;
+};
+
+static int vmw_ttm_populate(struct ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page)
+{
+ return 0;
+}
+
+static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+{
+ return 0;
+}
+
+static int vmw_ttm_unbind(struct ttm_backend *backend)
+{
+ return 0;
+}
+
+static void vmw_ttm_clear(struct ttm_backend *backend)
+{
+}
+
+static void vmw_ttm_destroy(struct ttm_backend *backend)
+{
+ struct vmw_ttm_backend *vmw_be =
+ container_of(backend, struct vmw_ttm_backend, backend);
+
+ kfree(vmw_be);
+}
+
+static struct ttm_backend_func vmw_ttm_func = {
+ .populate = vmw_ttm_populate,
+ .clear = vmw_ttm_clear,
+ .bind = vmw_ttm_bind,
+ .unbind = vmw_ttm_unbind,
+ .destroy = vmw_ttm_destroy,
+};
+
+struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
+{
+ struct vmw_ttm_backend *vmw_be;
+
+ vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
+ if (!vmw_be)
+ return NULL;
+
+ vmw_be->backend.func = &vmw_ttm_func;
+
+ return &vmw_be->backend;
+}
+
+int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+ return 0;
+}
+
+int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ /* System memory */
+
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ /* "On-card" video ram */
+ man->gpu_offset = 0;
+ man->io_offset = dev_priv->vram_start;
+ man->io_size = dev_priv->vram_size;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->io_addr = NULL;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void vmw_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ *placement = vmw_sys_placement;
+}
+
+/**
+ * FIXME: Proper access checks on buffers.
+ */
+
+static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * FIXME: We're using the old vmware polling method to sync.
+ * Do this with fences instead.
+ */
+
+static void *vmw_sync_obj_ref(void *sync_obj)
+{
+ return sync_obj;
+}
+
+static void vmw_sync_obj_unref(void **sync_obj)
+{
+ *sync_obj = NULL;
+}
+
+static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ mutex_unlock(&dev_priv->hw_mutex);
+ return 0;
+}
+
+static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+ uint32_t sequence = (unsigned long) sync_obj;
+
+ return vmw_fence_signaled(dev_priv, sequence);
+}
+
+static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
+ bool lazy, bool interruptible)
+{
+ struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
+ uint32_t sequence = (unsigned long) sync_obj;
+
+ return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
+}
+
+struct ttm_bo_driver vmw_bo_driver = {
+ .create_ttm_backend_entry = vmw_ttm_backend_init,
+ .invalidate_caches = vmw_invalidate_caches,
+ .init_mem_type = vmw_init_mem_type,
+ .evict_flags = vmw_evict_flags,
+ .move = NULL,
+ .verify_access = vmw_verify_access,
+ .sync_obj_signaled = vmw_sync_obj_signaled,
+ .sync_obj_wait = vmw_sync_obj_wait,
+ .sync_obj_flush = vmw_sync_obj_flush,
+ .sync_obj_unref = vmw_sync_obj_unref,
+ .sync_obj_ref = vmw_sync_obj_ref
+};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
new file mode 100644
index 00000000000..1db1ef30be2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -0,0 +1,726 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_module.h"
+
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
+#define VMWGFX_CHIP_SVGAII 0
+#define VMW_FB_RESERVATION 0
+
+/**
+ * Fully encoded drm commands. Might move to vmw_drm.h
+ */
+
+#define DRM_IOCTL_VMW_GET_PARAM \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
+ struct drm_vmw_getparam_arg)
+#define DRM_IOCTL_VMW_ALLOC_DMABUF \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
+ union drm_vmw_alloc_dmabuf_arg)
+#define DRM_IOCTL_VMW_UNREF_DMABUF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
+ struct drm_vmw_unref_dmabuf_arg)
+#define DRM_IOCTL_VMW_CURSOR_BYPASS \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
+ struct drm_vmw_cursor_bypass_arg)
+
+#define DRM_IOCTL_VMW_CONTROL_STREAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
+ struct drm_vmw_control_stream_arg)
+#define DRM_IOCTL_VMW_CLAIM_STREAM \
+ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
+ struct drm_vmw_stream_arg)
+#define DRM_IOCTL_VMW_UNREF_STREAM \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
+ struct drm_vmw_stream_arg)
+
+#define DRM_IOCTL_VMW_CREATE_CONTEXT \
+ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
+ struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_UNREF_CONTEXT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
+ struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_CREATE_SURFACE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
+ union drm_vmw_surface_create_arg)
+#define DRM_IOCTL_VMW_UNREF_SURFACE \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
+ struct drm_vmw_surface_arg)
+#define DRM_IOCTL_VMW_REF_SURFACE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
+ union drm_vmw_surface_reference_arg)
+#define DRM_IOCTL_VMW_EXECBUF \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
+ struct drm_vmw_execbuf_arg)
+#define DRM_IOCTL_VMW_FIFO_DEBUG \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
+ struct drm_vmw_fifo_debug_arg)
+#define DRM_IOCTL_VMW_FENCE_WAIT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
+ struct drm_vmw_fence_wait_arg)
+
+
+/**
+ * The core DRM version of this macro doesn't account for
+ * DRM_COMMAND_BASE.
+ */
+
+#define VMW_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+
+/**
+ * Ioctl definitions.
+ */
+
+static struct drm_ioctl_desc vmw_ioctls[] = {
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+ vmw_kms_cursor_bypass_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+ DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+ DRM_AUTH | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+ DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
+ VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+ DRM_AUTH | DRM_UNLOCKED)
+};
+
+static struct pci_device_id vmw_pci_id_list[] = {
+ {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
+ {0, 0, 0}
+};
+
+static char *vmw_devname = "vmwgfx";
+
+static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
+static void vmw_master_init(struct vmw_master *);
+
+static void vmw_print_capabilities(uint32_t capabilities)
+{
+ DRM_INFO("Capabilities:\n");
+ if (capabilities & SVGA_CAP_RECT_COPY)
+ DRM_INFO(" Rect copy.\n");
+ if (capabilities & SVGA_CAP_CURSOR)
+ DRM_INFO(" Cursor.\n");
+ if (capabilities & SVGA_CAP_CURSOR_BYPASS)
+ DRM_INFO(" Cursor bypass.\n");
+ if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
+ DRM_INFO(" Cursor bypass 2.\n");
+ if (capabilities & SVGA_CAP_8BIT_EMULATION)
+ DRM_INFO(" 8bit emulation.\n");
+ if (capabilities & SVGA_CAP_ALPHA_CURSOR)
+ DRM_INFO(" Alpha cursor.\n");
+ if (capabilities & SVGA_CAP_3D)
+ DRM_INFO(" 3D.\n");
+ if (capabilities & SVGA_CAP_EXTENDED_FIFO)
+ DRM_INFO(" Extended Fifo.\n");
+ if (capabilities & SVGA_CAP_MULTIMON)
+ DRM_INFO(" Multimon.\n");
+ if (capabilities & SVGA_CAP_PITCHLOCK)
+ DRM_INFO(" Pitchlock.\n");
+ if (capabilities & SVGA_CAP_IRQMASK)
+ DRM_INFO(" Irq mask.\n");
+ if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
+ DRM_INFO(" Display Topology.\n");
+ if (capabilities & SVGA_CAP_GMR)
+ DRM_INFO(" GMR.\n");
+ if (capabilities & SVGA_CAP_TRACES)
+ DRM_INFO(" Traces.\n");
+}
+
+static int vmw_request_device(struct vmw_private *dev_priv)
+{
+ int ret;
+
+ vmw_kms_save_vga(dev_priv);
+
+ ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize FIFO.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vmw_release_device(struct vmw_private *dev_priv)
+{
+ vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ vmw_kms_restore_vga(dev_priv);
+}
+
+
+static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+ struct vmw_private *dev_priv;
+ int ret;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (unlikely(dev_priv == NULL)) {
+ DRM_ERROR("Failed allocating a device private struct.\n");
+ return -ENOMEM;
+ }
+ memset(dev_priv, 0, sizeof(*dev_priv));
+
+ dev_priv->dev = dev;
+ dev_priv->vmw_chipset = chipset;
+ mutex_init(&dev_priv->hw_mutex);
+ mutex_init(&dev_priv->cmdbuf_mutex);
+ rwlock_init(&dev_priv->resource_lock);
+ idr_init(&dev_priv->context_idr);
+ idr_init(&dev_priv->surface_idr);
+ idr_init(&dev_priv->stream_idr);
+ ida_init(&dev_priv->gmr_ida);
+ mutex_init(&dev_priv->init_mutex);
+ init_waitqueue_head(&dev_priv->fence_queue);
+ init_waitqueue_head(&dev_priv->fifo_queue);
+ atomic_set(&dev_priv->fence_queue_waiters, 0);
+ atomic_set(&dev_priv->fifo_queue_waiters, 0);
+ INIT_LIST_HEAD(&dev_priv->gmr_lru);
+
+ dev_priv->io_start = pci_resource_start(dev->pdev, 0);
+ dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
+ dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+ if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ dev_priv->max_gmr_descriptors =
+ vmw_read(dev_priv,
+ SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
+ dev_priv->max_gmr_ids =
+ vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
+ }
+
+ dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+ dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+ dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ vmw_print_capabilities(dev_priv->capabilities);
+
+ if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ DRM_INFO("Max GMR ids is %u\n",
+ (unsigned)dev_priv->max_gmr_ids);
+ DRM_INFO("Max GMR descriptors is %u\n",
+ (unsigned)dev_priv->max_gmr_descriptors);
+ }
+ DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
+ dev_priv->vram_start, dev_priv->vram_size / 1024);
+ DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
+ dev_priv->mmio_start, dev_priv->mmio_size / 1024);
+
+ ret = vmw_ttm_global_init(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_err0;
+
+
+ vmw_master_init(&dev_priv->fbdev_master);
+ ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
+ dev_priv->active_master = &dev_priv->fbdev_master;
+
+
+ ret = ttm_bo_device_init(&dev_priv->bdev,
+ dev_priv->bo_global_ref.ref.object,
+ &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
+ false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+ goto out_err1;
+ }
+
+ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
+ (dev_priv->vram_size >> PAGE_SHIFT));
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing memory manager for VRAM.\n");
+ goto out_err2;
+ }
+
+ dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+
+ dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
+ dev_priv->mmio_size);
+
+ if (unlikely(dev_priv->mmio_virt == NULL)) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed mapping MMIO.\n");
+ goto out_err3;
+ }
+
+ dev_priv->tdev = ttm_object_device_init
+ (dev_priv->mem_global_ref.object, 12);
+
+ if (unlikely(dev_priv->tdev == NULL)) {
+ DRM_ERROR("Unable to initialize TTM object management.\n");
+ ret = -ENOMEM;
+ goto out_err4;
+ }
+
+ dev->dev_private = dev_priv;
+
+ if (!dev->devname)
+ dev->devname = vmw_devname;
+
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+ ret = drm_irq_install(dev);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed installing irq: %d\n", ret);
+ goto out_no_irq;
+ }
+ }
+
+ ret = pci_request_regions(dev->pdev, "vmwgfx probe");
+ dev_priv->stealth = (ret != 0);
+ if (dev_priv->stealth) {
+ /**
+ * Request at least the mmio PCI resource.
+ */
+
+ DRM_INFO("It appears like vesafb is loaded. "
+ "Ignore above error if any. Entering stealth mode.\n");
+ ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
+ goto out_no_device;
+ }
+ vmw_kms_init(dev_priv);
+ vmw_overlay_init(dev_priv);
+ } else {
+ ret = vmw_request_device(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_device;
+ vmw_kms_init(dev_priv);
+ vmw_overlay_init(dev_priv);
+ vmw_fb_init(dev_priv);
+ }
+
+ return 0;
+
+out_no_device:
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+ if (dev->devname == vmw_devname)
+ dev->devname = NULL;
+out_no_irq:
+ ttm_object_device_release(&dev_priv->tdev);
+out_err4:
+ iounmap(dev_priv->mmio_virt);
+out_err3:
+ drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+out_err2:
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+out_err1:
+ vmw_ttm_global_release(dev_priv);
+out_err0:
+ ida_destroy(&dev_priv->gmr_ida);
+ idr_destroy(&dev_priv->surface_idr);
+ idr_destroy(&dev_priv->context_idr);
+ idr_destroy(&dev_priv->stream_idr);
+ kfree(dev_priv);
+ return ret;
+}
+
+static int vmw_driver_unload(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
+
+ if (!dev_priv->stealth) {
+ vmw_fb_close(dev_priv);
+ vmw_kms_close(dev_priv);
+ vmw_overlay_close(dev_priv);
+ vmw_release_device(dev_priv);
+ pci_release_regions(dev->pdev);
+ } else {
+ vmw_kms_close(dev_priv);
+ vmw_overlay_close(dev_priv);
+ pci_release_region(dev->pdev, 2);
+ }
+ if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+ drm_irq_uninstall(dev_priv->dev);
+ if (dev->devname == vmw_devname)
+ dev->devname = NULL;
+ ttm_object_device_release(&dev_priv->tdev);
+ iounmap(dev_priv->mmio_virt);
+ drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
+ dev_priv->mmio_size, DRM_MTRR_WC);
+ (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+ vmw_ttm_global_release(dev_priv);
+ ida_destroy(&dev_priv->gmr_ida);
+ idr_destroy(&dev_priv->surface_idr);
+ idr_destroy(&dev_priv->context_idr);
+ idr_destroy(&dev_priv->stream_idr);
+
+ kfree(dev_priv);
+
+ return 0;
+}
+
+static void vmw_postclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct vmw_fpriv *vmw_fp;
+
+ vmw_fp = vmw_fpriv(file_priv);
+ ttm_object_file_release(&vmw_fp->tfile);
+ if (vmw_fp->locked_master)
+ drm_master_put(&vmw_fp->locked_master);
+ kfree(vmw_fp);
+}
+
+static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp;
+ int ret = -ENOMEM;
+
+ vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
+ if (unlikely(vmw_fp == NULL))
+ return ret;
+
+ vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
+ if (unlikely(vmw_fp->tfile == NULL))
+ goto out_no_tfile;
+
+ file_priv->driver_priv = vmw_fp;
+
+ if (unlikely(dev_priv->bdev.dev_mapping == NULL))
+ dev_priv->bdev.dev_mapping =
+ file_priv->filp->f_path.dentry->d_inode->i_mapping;
+
+ return 0;
+
+out_no_tfile:
+ kfree(vmw_fp);
+ return ret;
+}
+
+static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+
+ /*
+ * Do extra checking on driver private ioctls.
+ */
+
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ struct drm_ioctl_desc *ioctl =
+ &vmw_ioctls[nr - DRM_COMMAND_BASE];
+
+ if (unlikely(ioctl->cmd != cmd)) {
+ DRM_ERROR("Invalid command format, ioctl %d\n",
+ nr - DRM_COMMAND_BASE);
+ return -EINVAL;
+ }
+ }
+
+ return drm_ioctl(filp, cmd, arg);
+}
+
+static int vmw_firstopen(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ dev_priv->is_opened = true;
+
+ return 0;
+}
+
+static void vmw_lastclose(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_crtc *crtc;
+ struct drm_mode_set set;
+ int ret;
+
+ /**
+ * Do nothing on the lastclose call from drm_unload.
+ */
+
+ if (!dev_priv->is_opened)
+ return;
+
+ dev_priv->is_opened = false;
+ set.x = 0;
+ set.y = 0;
+ set.fb = NULL;
+ set.mode = NULL;
+ set.connectors = NULL;
+ set.num_connectors = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ set.crtc = crtc;
+ ret = crtc->funcs->set_config(&set);
+ WARN_ON(ret != 0);
+ }
+
+}
+
+static void vmw_master_init(struct vmw_master *vmaster)
+{
+ ttm_lock_init(&vmaster->lock);
+}
+
+static int vmw_master_create(struct drm_device *dev,
+ struct drm_master *master)
+{
+ struct vmw_master *vmaster;
+
+ DRM_INFO("Master create.\n");
+ vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
+ if (unlikely(vmaster == NULL))
+ return -ENOMEM;
+
+ ttm_lock_init(&vmaster->lock);
+ ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+ master->driver_priv = vmaster;
+
+ return 0;
+}
+
+static void vmw_master_destroy(struct drm_device *dev,
+ struct drm_master *master)
+{
+ struct vmw_master *vmaster = vmw_master(master);
+
+ DRM_INFO("Master destroy.\n");
+ master->driver_priv = NULL;
+ kfree(vmaster);
+}
+
+
+static int vmw_master_set(struct drm_device *dev,
+ struct drm_file *file_priv,
+ bool from_open)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_master *active = dev_priv->active_master;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret = 0;
+
+ DRM_INFO("Master set.\n");
+ if (dev_priv->stealth) {
+ ret = vmw_request_device(dev_priv);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ if (active) {
+ BUG_ON(active != &dev_priv->fbdev_master);
+ ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
+ if (unlikely(ret != 0))
+ goto out_no_active_lock;
+
+ ttm_lock_set_kill(&active->lock, true, SIGTERM);
+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to clean VRAM on "
+ "master drop.\n");
+ }
+
+ dev_priv->active_master = NULL;
+ }
+
+ ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+ if (!from_open) {
+ ttm_vt_unlock(&vmaster->lock);
+ BUG_ON(vmw_fp->locked_master != file_priv->master);
+ drm_master_put(&vmw_fp->locked_master);
+ }
+
+ dev_priv->active_master = vmaster;
+
+ return 0;
+
+out_no_active_lock:
+ vmw_release_device(dev_priv);
+ return ret;
+}
+
+static void vmw_master_drop(struct drm_device *dev,
+ struct drm_file *file_priv,
+ bool from_release)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+ DRM_INFO("Master drop.\n");
+
+ /**
+ * Make sure the master doesn't disappear while we have
+ * it locked.
+ */
+
+ vmw_fp->locked_master = drm_master_get(file_priv->master);
+ ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+
+ if (unlikely((ret != 0))) {
+ DRM_ERROR("Unable to lock TTM at VT switch.\n");
+ drm_master_put(&vmw_fp->locked_master);
+ }
+
+ ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+
+ if (dev_priv->stealth) {
+ ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ if (unlikely(ret != 0))
+ DRM_ERROR("Unable to clean VRAM on master drop.\n");
+ vmw_release_device(dev_priv);
+ }
+ dev_priv->active_master = &dev_priv->fbdev_master;
+ ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
+ ttm_vt_unlock(&dev_priv->fbdev_master.lock);
+
+ if (!dev_priv->stealth)
+ vmw_fb_on(dev_priv);
+}
+
+
+static void vmw_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ DRIVER_MODESET,
+ .load = vmw_driver_load,
+ .unload = vmw_driver_unload,
+ .firstopen = vmw_firstopen,
+ .lastclose = vmw_lastclose,
+ .irq_preinstall = vmw_irq_preinstall,
+ .irq_postinstall = vmw_irq_postinstall,
+ .irq_uninstall = vmw_irq_uninstall,
+ .irq_handler = vmw_irq_handler,
+ .reclaim_buffers_locked = NULL,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = vmw_ioctls,
+ .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
+ .dma_quiescent = NULL, /*vmw_dma_quiescent, */
+ .master_create = vmw_master_create,
+ .master_destroy = vmw_master_destroy,
+ .master_set = vmw_master_set,
+ .master_drop = vmw_master_drop,
+ .open = vmw_driver_open,
+ .postclose = vmw_postclose,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = vmw_unlocked_ioctl,
+ .mmap = vmw_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ },
+ .pci_driver = {
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove
+ },
+ .name = VMWGFX_DRIVER_NAME,
+ .desc = VMWGFX_DRIVER_DESC,
+ .date = VMWGFX_DRIVER_DATE,
+ .major = VMWGFX_DRIVER_MAJOR,
+ .minor = VMWGFX_DRIVER_MINOR,
+ .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
+};
+
+static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_dev(pdev, ent, &driver);
+}
+
+static int __init vmwgfx_init(void)
+{
+ int ret;
+ ret = drm_init(&driver);
+ if (ret)
+ DRM_ERROR("Failed initializing DRM.\n");
+ return ret;
+}
+
+static void __exit vmwgfx_exit(void)
+{
+ drm_exit(&driver);
+}
+
+module_init(vmwgfx_init);
+module_exit(vmwgfx_exit);
+
+MODULE_AUTHOR("VMware Inc. and others");
+MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
new file mode 100644
index 00000000000..e61bd85b697
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -0,0 +1,513 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_DRV_H_
+#define _VMWGFX_DRV_H_
+
+#include "vmwgfx_reg.h"
+#include "drmP.h"
+#include "vmwgfx_drm.h"
+#include "drm_hashtab.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_lock.h"
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_module.h"
+
+#define VMWGFX_DRIVER_DATE "20090724"
+#define VMWGFX_DRIVER_MAJOR 0
+#define VMWGFX_DRIVER_MINOR 1
+#define VMWGFX_DRIVER_PATCHLEVEL 2
+#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
+#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+#define VMWGFX_MAX_RELOCATIONS 2048
+#define VMWGFX_MAX_GMRS 2048
+
+struct vmw_fpriv {
+ struct drm_master *locked_master;
+ struct ttm_object_file *tfile;
+};
+
+struct vmw_dma_buffer {
+ struct ttm_buffer_object base;
+ struct list_head validate_list;
+ struct list_head gmr_lru;
+ uint32_t gmr_id;
+ bool gmr_bound;
+ uint32_t cur_validate_node;
+ bool on_validate_list;
+};
+
+struct vmw_resource {
+ struct kref kref;
+ struct vmw_private *dev_priv;
+ struct idr *idr;
+ int id;
+ enum ttm_object_type res_type;
+ bool avail;
+ void (*hw_destroy) (struct vmw_resource *res);
+ void (*res_free) (struct vmw_resource *res);
+
+ /* TODO is a generic snooper needed? */
+#if 0
+ void (*snoop)(struct vmw_resource *res,
+ struct ttm_object_file *tfile,
+ SVGA3dCmdHeader *header);
+ void *snoop_priv;
+#endif
+};
+
+struct vmw_cursor_snooper {
+ struct drm_crtc *crtc;
+ size_t age;
+ uint32_t *image;
+};
+
+struct vmw_surface {
+ struct vmw_resource res;
+ uint32_t flags;
+ uint32_t format;
+ uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ struct drm_vmw_size *sizes;
+ uint32_t num_sizes;
+
+ /* TODO so far just a extra pointer */
+ struct vmw_cursor_snooper snooper;
+};
+
+struct vmw_fifo_state {
+ unsigned long reserved_size;
+ __le32 *dynamic_buffer;
+ __le32 *static_buffer;
+ __le32 *last_buffer;
+ uint32_t last_data_size;
+ uint32_t last_buffer_size;
+ bool last_buffer_add;
+ unsigned long static_buffer_size;
+ bool using_bounce_buffer;
+ uint32_t capabilities;
+ struct rw_semaphore rwsem;
+};
+
+struct vmw_relocation {
+ SVGAGuestPtr *location;
+ uint32_t index;
+};
+
+struct vmw_sw_context{
+ struct ida bo_list;
+ uint32_t last_cid;
+ bool cid_valid;
+ uint32_t last_sid;
+ uint32_t sid_translation;
+ bool sid_valid;
+ struct ttm_object_file *tfile;
+ struct list_head validate_nodes;
+ struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
+ uint32_t cur_reloc;
+ struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
+ uint32_t cur_val_buf;
+};
+
+struct vmw_legacy_display;
+struct vmw_overlay;
+
+struct vmw_master {
+ struct ttm_lock lock;
+};
+
+struct vmw_private {
+ struct ttm_bo_device bdev;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_global_reference mem_global_ref;
+
+ struct vmw_fifo_state fifo;
+
+ struct drm_device *dev;
+ unsigned long vmw_chipset;
+ unsigned int io_start;
+ uint32_t vram_start;
+ uint32_t vram_size;
+ uint32_t mmio_start;
+ uint32_t mmio_size;
+ uint32_t fb_max_width;
+ uint32_t fb_max_height;
+ __le32 __iomem *mmio_virt;
+ int mmio_mtrr;
+ uint32_t capabilities;
+ uint32_t max_gmr_descriptors;
+ uint32_t max_gmr_ids;
+ struct mutex hw_mutex;
+
+ /*
+ * VGA registers.
+ */
+
+ uint32_t vga_width;
+ uint32_t vga_height;
+ uint32_t vga_depth;
+ uint32_t vga_bpp;
+ uint32_t vga_pseudo;
+ uint32_t vga_red_mask;
+ uint32_t vga_blue_mask;
+ uint32_t vga_green_mask;
+
+ /*
+ * Framebuffer info.
+ */
+
+ void *fb_info;
+ struct vmw_legacy_display *ldu_priv;
+ struct vmw_overlay *overlay_priv;
+
+ /*
+ * Context and surface management.
+ */
+
+ rwlock_t resource_lock;
+ struct idr context_idr;
+ struct idr surface_idr;
+ struct idr stream_idr;
+
+ /*
+ * Block lastclose from racing with firstopen.
+ */
+
+ struct mutex init_mutex;
+
+ /*
+ * A resource manager for kernel-only surfaces and
+ * contexts.
+ */
+
+ struct ttm_object_device *tdev;
+
+ /*
+ * Fencing and IRQs.
+ */
+
+ uint32_t fence_seq;
+ wait_queue_head_t fence_queue;
+ wait_queue_head_t fifo_queue;
+ atomic_t fence_queue_waiters;
+ atomic_t fifo_queue_waiters;
+ uint32_t last_read_sequence;
+ spinlock_t irq_lock;
+
+ /*
+ * Device state
+ */
+
+ uint32_t traces_state;
+ uint32_t enable_state;
+ uint32_t config_done_state;
+
+ /**
+ * Execbuf
+ */
+ /**
+ * Protected by the cmdbuf mutex.
+ */
+
+ struct vmw_sw_context ctx;
+ uint32_t val_seq;
+ struct mutex cmdbuf_mutex;
+
+ /**
+ * GMR management. Protected by the lru spinlock.
+ */
+
+ struct ida gmr_ida;
+ struct list_head gmr_lru;
+
+
+ /**
+ * Operating mode.
+ */
+
+ bool stealth;
+ bool is_opened;
+
+ /**
+ * Master management.
+ */
+
+ struct vmw_master *active_master;
+ struct vmw_master fbdev_master;
+};
+
+static inline struct vmw_private *vmw_priv(struct drm_device *dev)
+{
+ return (struct vmw_private *)dev->dev_private;
+}
+
+static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
+{
+ return (struct vmw_fpriv *)file_priv->driver_priv;
+}
+
+static inline struct vmw_master *vmw_master(struct drm_master *master)
+{
+ return (struct vmw_master *) master->driver_priv;
+}
+
+static inline void vmw_write(struct vmw_private *dev_priv,
+ unsigned int offset, uint32_t value)
+{
+ outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+ outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+}
+
+static inline uint32_t vmw_read(struct vmw_private *dev_priv,
+ unsigned int offset)
+{
+ uint32_t val;
+
+ outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
+ val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ return val;
+}
+
+/**
+ * GMR utilities - vmwgfx_gmr.c
+ */
+
+extern int vmw_gmr_bind(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo);
+extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
+
+/**
+ * Resource utilities - vmwgfx_resource.c
+ */
+
+extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
+extern void vmw_resource_unreference(struct vmw_resource **p_res);
+extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id);
+extern void vmw_surface_res_free(struct vmw_resource *res);
+extern int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res));
+extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out);
+extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id);
+extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interuptable,
+ void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
+ uint32_t cur_validate_node);
+extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
+extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+ uint32_t id, struct vmw_dma_buffer **out);
+extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
+extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
+extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
+extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo);
+extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *bo);
+extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t *inout_id,
+ struct vmw_resource **out);
+
+
+/**
+ * Misc Ioctl functionality - vmwgfx_ioctl.c
+ */
+
+extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * Fifo utilities - vmwgfx_fifo.c
+ */
+
+extern int vmw_fifo_init(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo);
+extern void vmw_fifo_release(struct vmw_private *dev_priv,
+ struct vmw_fifo_state *fifo);
+extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
+extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
+extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
+ uint32_t *sequence);
+extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
+extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/**
+ * TTM glue - vmwgfx_ttm_glue.c
+ */
+
+extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
+extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
+extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/**
+ * TTM buffer object driver - vmwgfx_buffer.c
+ */
+
+extern struct ttm_placement vmw_vram_placement;
+extern struct ttm_placement vmw_vram_ne_placement;
+extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_bo_driver vmw_bo_driver;
+extern int vmw_dma_quiescent(struct drm_device *dev);
+
+/**
+ * Command submission - vmwgfx_execbuf.c
+ */
+
+extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * IRQs and wating - vmwgfx_irq.c
+ */
+
+extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
+ uint32_t sequence, bool interruptible,
+ unsigned long timeout);
+extern void vmw_irq_preinstall(struct drm_device *dev);
+extern int vmw_irq_postinstall(struct drm_device *dev);
+extern void vmw_irq_uninstall(struct drm_device *dev);
+extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
+ uint32_t sequence);
+extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int vmw_fallback_wait(struct vmw_private *dev_priv,
+ bool lazy,
+ bool fifo_idle,
+ uint32_t sequence,
+ bool interruptible,
+ unsigned long timeout);
+
+/**
+ * Kernel framebuffer - vmwgfx_fb.c
+ */
+
+int vmw_fb_init(struct vmw_private *vmw_priv);
+int vmw_fb_close(struct vmw_private *dev_priv);
+int vmw_fb_off(struct vmw_private *vmw_priv);
+int vmw_fb_on(struct vmw_private *vmw_priv);
+
+/**
+ * Kernel modesetting - vmwgfx_kms.c
+ */
+
+int vmw_kms_init(struct vmw_private *dev_priv);
+int vmw_kms_close(struct vmw_private *dev_priv);
+int vmw_kms_save_vga(struct vmw_private *vmw_priv);
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header);
+
+/**
+ * Overlay control - vmwgfx_overlay.c
+ */
+
+int vmw_overlay_init(struct vmw_private *dev_priv);
+int vmw_overlay_close(struct vmw_private *dev_priv);
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int vmw_overlay_stop_all(struct vmw_private *dev_priv);
+int vmw_overlay_resume_all(struct vmw_private *dev_priv);
+int vmw_overlay_pause_all(struct vmw_private *dev_priv);
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
+
+/**
+ * Inline helper functions
+ */
+
+static inline void vmw_surface_unreference(struct vmw_surface **srf)
+{
+ struct vmw_surface *tmp_srf = *srf;
+ struct vmw_resource *res = &tmp_srf->res;
+ *srf = NULL;
+
+ vmw_resource_unreference(&res);
+}
+
+static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
+{
+ (void) vmw_resource_reference(&srf->res);
+ return srf;
+}
+
+static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+{
+ struct vmw_dma_buffer *tmp_buf = *buf;
+ struct ttm_buffer_object *bo = &tmp_buf->base;
+ *buf = NULL;
+
+ ttm_bo_unref(&bo);
+}
+
+static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+{
+ if (ttm_bo_reference(&buf->base))
+ return buf;
+ return NULL;
+}
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
new file mode 100644
index 00000000000..2e92da56740
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -0,0 +1,621 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_reg.h"
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_placement.h"
+
+static int vmw_cmd_invalid(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+}
+
+static int vmw_cmd_ok(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ return 0;
+}
+
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_cid_cmd {
+ SVGA3dCmdHeader header;
+ __le32 cid;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_cid_cmd, header);
+ if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
+ return 0;
+
+ ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use context %u\n",
+ (unsigned) cmd->cid);
+ return ret;
+ }
+
+ sw_context->last_cid = cmd->cid;
+ sw_context->cid_valid = true;
+
+ return 0;
+}
+
+static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ uint32_t *sid)
+{
+ if (*sid == SVGA3D_INVALID_ID)
+ return 0;
+
+ if (unlikely((!sw_context->sid_valid ||
+ *sid != sw_context->last_sid))) {
+ int real_id;
+ int ret = vmw_surface_check(dev_priv, sw_context->tfile,
+ *sid, &real_id);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could ot find or use surface 0x%08x "
+ "address 0x%08lx\n",
+ (unsigned int) *sid,
+ (unsigned long) sid);
+ return ret;
+ }
+
+ sw_context->last_sid = *sid;
+ sw_context->sid_valid = true;
+ *sid = real_id;
+ sw_context->sid_translation = real_id;
+ } else
+ *sid = sw_context->sid_translation;
+
+ return 0;
+}
+
+
+static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetRenderTarget body;
+ } *cmd;
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ return ret;
+}
+
+static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceCopy body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ if (unlikely(ret != 0))
+ return ret;
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+}
+
+static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceStretchBlt body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+ if (unlikely(ret != 0))
+ return ret;
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+}
+
+static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdBlitSurfaceToScreen body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+}
+
+static int vmw_cmd_present_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_sid_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ } *cmd;
+
+ cmd = container_of(header, struct vmw_sid_cmd, header);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+}
+
+static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ uint32_t handle;
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ struct vmw_surface *srf = NULL;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ struct vmw_relocation *reloc;
+ int ret;
+ uint32_t cur_validate_node;
+ struct ttm_validate_buffer *val_buf;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+ handle = cmd->dma.guest.ptr.gmrId;
+ ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not find or use GMR region.\n");
+ return -EINVAL;
+ }
+ bo = &vmw_bo->base;
+
+ if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
+ DRM_ERROR("Max number of DMA commands per submission"
+ " exceeded\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc = &sw_context->relocs[sw_context->cur_reloc++];
+ reloc->location = &cmd->dma.guest.ptr;
+
+ cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
+ if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
+ DRM_ERROR("Max number of DMA buffers per submission"
+ " exceeded.\n");
+ ret = -EINVAL;
+ goto out_no_reloc;
+ }
+
+ reloc->index = cur_validate_node;
+ if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
+ val_buf = &sw_context->val_bufs[cur_validate_node];
+ val_buf->bo = ttm_bo_reference(bo);
+ val_buf->new_sync_obj_arg = (void *) dev_priv;
+ list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+ ++sw_context->cur_val_buf;
+ }
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
+ cmd->dma.host.sid, &srf);
+ if (ret) {
+ DRM_ERROR("could not find surface\n");
+ goto out_no_reloc;
+ }
+
+ /**
+ * Patch command stream with device SID.
+ */
+
+ cmd->dma.host.sid = srf->res.id;
+ vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
+ vmw_surface_unreference(&srf);
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ return ret;
+}
+
+static int vmw_cmd_draw(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_draw_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDrawPrimitives body;
+ } *cmd;
+ SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
+ (unsigned long)header + sizeof(*cmd));
+ SVGA3dPrimitiveRange *range;
+ uint32_t i;
+ uint32_t maxnum;
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ cmd = container_of(header, struct vmw_draw_cmd, header);
+ maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
+
+ if (unlikely(cmd->body.numVertexDecls > maxnum)) {
+ DRM_ERROR("Illegal number of vertex declarations.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &decl->array.surfaceId);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ maxnum = (header->size - sizeof(cmd->body) -
+ cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
+ if (unlikely(cmd->body.numRanges > maxnum)) {
+ DRM_ERROR("Illegal number of index ranges.\n");
+ return -EINVAL;
+ }
+
+ range = (SVGA3dPrimitiveRange *) decl;
+ for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &range->indexArray.surfaceId);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+
+static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_tex_state_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetTextureState state;
+ };
+
+ SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+ ((unsigned long) header + header->size + sizeof(header));
+ SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+ ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ for (; cur_state < last_state; ++cur_state) {
+ if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
+ continue;
+
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &cur_state->value);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
+
+typedef int (*vmw_cmd_func) (struct vmw_private *,
+ struct vmw_sw_context *,
+ SVGA3dCmdHeader *);
+
+#define VMW_CMD_DEF(cmd, func) \
+ [cmd - SVGA_3D_CMD_BASE] = func
+
+static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
+ &vmw_cmd_set_render_target_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
+ VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
+ &vmw_cmd_blt_surf_screen_check)
+};
+
+static int vmw_cmd_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf, uint32_t *size)
+{
+ uint32_t cmd_id;
+ uint32_t size_remaining = *size;
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+ int ret;
+
+ cmd_id = ((uint32_t *)buf)[0];
+ if (cmd_id == SVGA_CMD_UPDATE) {
+ *size = 5 << 2;
+ return 0;
+ }
+
+ cmd_id = le32_to_cpu(header->id);
+ *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
+
+ cmd_id -= SVGA_3D_CMD_BASE;
+ if (unlikely(*size > size_remaining))
+ goto out_err;
+
+ if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
+ goto out_err;
+
+ ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ return 0;
+out_err:
+ DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -EINVAL;
+}
+
+static int vmw_cmd_check_all(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ void *buf, uint32_t size)
+{
+ int32_t cur_size = size;
+ int ret;
+
+ while (cur_size > 0) {
+ size = cur_size;
+ ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
+ if (unlikely(ret != 0))
+ return ret;
+ buf = (void *)((unsigned long) buf + size);
+ cur_size -= size;
+ }
+
+ if (unlikely(cur_size != 0)) {
+ DRM_ERROR("Command verifier out of sync.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vmw_free_relocations(struct vmw_sw_context *sw_context)
+{
+ sw_context->cur_reloc = 0;
+}
+
+static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
+{
+ uint32_t i;
+ struct vmw_relocation *reloc;
+ struct ttm_validate_buffer *validate;
+ struct ttm_buffer_object *bo;
+
+ for (i = 0; i < sw_context->cur_reloc; ++i) {
+ reloc = &sw_context->relocs[i];
+ validate = &sw_context->val_bufs[reloc->index];
+ bo = validate->bo;
+ reloc->location->offset += bo->offset;
+ reloc->location->gmrId = vmw_dmabuf_gmr(bo);
+ }
+ vmw_free_relocations(sw_context);
+}
+
+static void vmw_clear_validations(struct vmw_sw_context *sw_context)
+{
+ struct ttm_validate_buffer *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
+ head) {
+ list_del(&entry->head);
+ vmw_dmabuf_validate_clear(entry->bo);
+ ttm_bo_unref(&entry->bo);
+ sw_context->cur_val_buf--;
+ }
+ BUG_ON(sw_context->cur_val_buf != 0);
+}
+
+static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
+ return 0;
+
+ ret = vmw_gmr_bind(dev_priv, bo);
+ if (likely(ret == 0 || ret == -ERESTARTSYS))
+ return ret;
+
+
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
+ return ret;
+}
+
+
+static int vmw_validate_buffers(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context)
+{
+ struct ttm_validate_buffer *entry;
+ int ret;
+
+ list_for_each_entry(entry, &sw_context->validate_nodes, head) {
+ ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+ struct drm_vmw_fence_rep fence_rep;
+ struct drm_vmw_fence_rep __user *user_fence_rep;
+ int ret;
+ void *user_cmd;
+ void *cmd;
+ uint32_t sequence;
+ struct vmw_sw_context *sw_context = &dev_priv->ctx;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+ if (unlikely(ret != 0)) {
+ ret = -ERESTARTSYS;
+ goto out_no_cmd_mutex;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving fifo space for commands.\n");
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ user_cmd = (void __user *)(unsigned long)arg->commands;
+ ret = copy_from_user(cmd, user_cmd, arg->command_size);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed copying commands.\n");
+ goto out_commit;
+ }
+
+ sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->cid_valid = false;
+ sw_context->sid_valid = false;
+ sw_context->cur_reloc = 0;
+ sw_context->cur_val_buf = 0;
+
+ INIT_LIST_HEAD(&sw_context->validate_nodes);
+
+ ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
+ if (unlikely(ret != 0))
+ goto out_err;
+ ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
+ dev_priv->val_seq++);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ ret = vmw_validate_buffers(dev_priv, sw_context);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ vmw_apply_relocations(sw_context);
+ vmw_fifo_commit(dev_priv, arg->command_size);
+
+ ret = vmw_fifo_send_fence(dev_priv, &sequence);
+
+ ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
+ (void *)(unsigned long) sequence);
+ vmw_clear_validations(sw_context);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+ /*
+ * This error is harmless, because if fence submission fails,
+ * vmw_fifo_send_fence will sync.
+ */
+
+ if (ret != 0)
+ DRM_ERROR("Fence submission error. Syncing.\n");
+
+ fence_rep.error = ret;
+ fence_rep.fence_seq = (uint64_t) sequence;
+
+ user_fence_rep = (struct drm_vmw_fence_rep __user *)
+ (unsigned long)arg->fence_rep;
+
+ /*
+ * copy_to_user errors will be detected by user space not
+ * seeing fence_rep::error filled in.
+ */
+
+ ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
+
+ vmw_kms_cursor_post_execbuf(dev_priv);
+ ttm_read_unlock(&vmaster->lock);
+ return 0;
+out_err:
+ vmw_free_relocations(sw_context);
+ ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+ vmw_clear_validations(sw_context);
+out_commit:
+ vmw_fifo_commit(dev_priv, 0);
+out_unlock:
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+out_no_cmd_mutex:
+ ttm_read_unlock(&vmaster->lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
new file mode 100644
index 00000000000..641dde76ada
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -0,0 +1,742 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 David Airlie
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#include "ttm/ttm_placement.h"
+
+#define VMW_DIRTY_DELAY (HZ / 30)
+
+struct vmw_fb_par {
+ struct vmw_private *vmw_priv;
+
+ void *vmalloc;
+
+ struct vmw_dma_buffer *vmw_bo;
+ struct ttm_bo_kmap_obj map;
+
+ u32 pseudo_palette[17];
+
+ unsigned depth;
+ unsigned bpp;
+
+ unsigned max_width;
+ unsigned max_height;
+
+ void *bo_ptr;
+ unsigned bo_size;
+ bool bo_iowrite;
+
+ struct {
+ spinlock_t lock;
+ bool active;
+ unsigned x1;
+ unsigned y1;
+ unsigned x2;
+ unsigned y2;
+ } dirty;
+};
+
+static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
+ struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+ u32 *pal = par->pseudo_palette;
+
+ if (regno > 15) {
+ DRM_ERROR("Bad regno %u.\n", regno);
+ return 1;
+ }
+
+ switch (par->depth) {
+ case 24:
+ case 32:
+ pal[regno] = ((red & 0xff00) << 8) |
+ (green & 0xff00) |
+ ((blue & 0xff00) >> 8);
+ break;
+ default:
+ DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ int depth = var->bits_per_pixel;
+ struct vmw_fb_par *par = info->par;
+ struct vmw_private *vmw_priv = par->vmw_priv;
+
+ switch (var->bits_per_pixel) {
+ case 32:
+ depth = (var->transp.length > 0) ? 32 : 24;
+ break;
+ default:
+ DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (depth) {
+ case 24:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 0;
+ var->transp.offset = 0;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ var->transp.length = 8;
+ var->transp.offset = 24;
+ break;
+ default:
+ DRM_ERROR("Bad depth %u.\n", depth);
+ return -EINVAL;
+ }
+
+ /* without multimon its hard to resize */
+ if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
+ (var->xres != par->max_width ||
+ var->yres != par->max_height)) {
+ DRM_ERROR("Tried to resize, but we don't have multimon\n");
+ return -EINVAL;
+ }
+
+ if (var->xres > par->max_width ||
+ var->yres > par->max_height) {
+ DRM_ERROR("Requested geom can not fit in framebuffer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vmw_fb_set_par(struct fb_info *info)
+{
+ struct vmw_fb_par *par = info->par;
+ struct vmw_private *vmw_priv = par->vmw_priv;
+
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+
+ /* TODO check if pitch and offset changes */
+
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ } else {
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
+
+ /* TODO check if pitch and offset changes */
+ }
+
+ return 0;
+}
+
+static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static int vmw_fb_blank(int blank, struct fb_info *info)
+{
+ return 0;
+}
+
+/*
+ * Dirty code
+ */
+
+static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
+{
+ struct vmw_private *vmw_priv = par->vmw_priv;
+ struct fb_info *info = vmw_priv->fb_info;
+ int stride = (info->fix.line_length / 4);
+ int *src = (int *)info->screen_base;
+ __le32 __iomem *vram_mem = par->bo_ptr;
+ unsigned long flags;
+ unsigned x, y, w, h;
+ int i, k;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdUpdate body;
+ } *cmd;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ if (!par->dirty.active) {
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+ return;
+ }
+ x = par->dirty.x1;
+ y = par->dirty.y1;
+ w = min(par->dirty.x2, info->var.xres) - x;
+ h = min(par->dirty.y2, info->var.yres) - y;
+ par->dirty.x1 = par->dirty.x2 = 0;
+ par->dirty.y1 = par->dirty.y2 = 0;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+ for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
+ for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
+ iowrite32(src[k], vram_mem + k);
+ }
+
+#if 0
+ DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
+#endif
+
+ cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return;
+ }
+
+ cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
+ cmd->body.x = cpu_to_le32(x);
+ cmd->body.y = cpu_to_le32(y);
+ cmd->body.width = cpu_to_le32(w);
+ cmd->body.height = cpu_to_le32(h);
+ vmw_fifo_commit(vmw_priv, sizeof(*cmd));
+}
+
+static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
+ unsigned x1, unsigned y1,
+ unsigned width, unsigned height)
+{
+ struct fb_info *info = par->vmw_priv->fb_info;
+ unsigned long flags;
+ unsigned x2 = x1 + width;
+ unsigned y2 = y1 + height;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ if (par->dirty.x1 == par->dirty.x2) {
+ par->dirty.x1 = x1;
+ par->dirty.y1 = y1;
+ par->dirty.x2 = x2;
+ par->dirty.y2 = y2;
+ /* if we are active start the dirty work
+ * we share the work with the defio system */
+ if (par->dirty.active)
+ schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
+ } else {
+ if (x1 < par->dirty.x1)
+ par->dirty.x1 = x1;
+ if (y1 < par->dirty.y1)
+ par->dirty.y1 = y1;
+ if (x2 > par->dirty.x2)
+ par->dirty.x2 = x2;
+ if (y2 > par->dirty.y2)
+ par->dirty.y2 = y2;
+ }
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+}
+
+static void vmw_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct vmw_fb_par *par = info->par;
+ unsigned long start, end, min, max;
+ unsigned long flags;
+ struct page *page;
+ int y1, y2;
+
+ min = ULONG_MAX;
+ max = 0;
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ min = min(min, start);
+ max = max(max, end);
+ }
+
+ if (min < max) {
+ y1 = min / info->fix.line_length;
+ y2 = (max / info->fix.line_length) + 1;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.x1 = 0;
+ par->dirty.y1 = y1;
+ par->dirty.x2 = info->var.xres;
+ par->dirty.y2 = y2;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+ }
+
+ vmw_fb_dirty_flush(par);
+};
+
+struct fb_deferred_io vmw_defio = {
+ .delay = VMW_DIRTY_DELAY,
+ .deferred_io = vmw_deferred_io,
+};
+
+/*
+ * Draw code
+ */
+
+static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ cfb_fillrect(info, rect);
+ vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
+ rect->width, rect->height);
+}
+
+static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+ cfb_copyarea(info, region);
+ vmw_fb_dirty_mark(info->par, region->dx, region->dy,
+ region->width, region->height);
+}
+
+static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ cfb_imageblit(info, image);
+ vmw_fb_dirty_mark(info->par, image->dx, image->dy,
+ image->width, image->height);
+}
+
+/*
+ * Bring up code
+ */
+
+static struct fb_ops vmw_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = vmw_fb_check_var,
+ .fb_set_par = vmw_fb_set_par,
+ .fb_setcolreg = vmw_fb_setcolreg,
+ .fb_fillrect = vmw_fb_fillrect,
+ .fb_copyarea = vmw_fb_copyarea,
+ .fb_imageblit = vmw_fb_imageblit,
+ .fb_pan_display = vmw_fb_pan_display,
+ .fb_blank = vmw_fb_blank,
+};
+
+static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
+ size_t size, struct vmw_dma_buffer **out)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct ttm_placement ne_placement = vmw_vram_ne_placement;
+ int ret;
+
+ ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /* interuptable? */
+ ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
+ if (!vmw_bo)
+ goto err_unlock;
+
+ ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+ &ne_placement,
+ false,
+ &vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto err_unlock; /* init frees the buffer on failure */
+
+ *out = vmw_bo;
+
+ ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+
+ return 0;
+
+err_unlock:
+ ttm_write_unlock(&vmw_priv->fbdev_master.lock);
+ return ret;
+}
+
+int vmw_fb_init(struct vmw_private *vmw_priv)
+{
+ struct device *device = &vmw_priv->dev->pdev->dev;
+ struct vmw_fb_par *par;
+ struct fb_info *info;
+ unsigned initial_width, initial_height;
+ unsigned fb_width, fb_height;
+ unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
+ int ret;
+
+ initial_width = 800;
+ initial_height = 600;
+
+ fb_bbp = 32;
+ fb_depth = 24;
+
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+ fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
+ } else {
+ fb_width = min(vmw_priv->fb_max_width, initial_width);
+ fb_height = min(vmw_priv->fb_max_height, initial_height);
+ }
+
+ initial_width = min(fb_width, initial_width);
+ initial_height = min(fb_height, initial_height);
+
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+
+ fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
+ fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
+ fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
+
+ DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
+ DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
+ DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
+ DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
+ DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
+ DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
+ DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
+ DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
+ DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
+ DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
+ DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
+ DRM_DEBUG("fb_pitch %u\n", fb_pitch);
+ DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
+
+ info = framebuffer_alloc(sizeof(*par), device);
+ if (!info)
+ return -ENOMEM;
+
+ /*
+ * Par
+ */
+ vmw_priv->fb_info = info;
+ par = info->par;
+ par->vmw_priv = vmw_priv;
+ par->depth = fb_depth;
+ par->bpp = fb_bbp;
+ par->vmalloc = NULL;
+ par->max_width = fb_width;
+ par->max_height = fb_height;
+
+ /*
+ * Create buffers and alloc memory
+ */
+ par->vmalloc = vmalloc(fb_size);
+ if (unlikely(par->vmalloc == NULL)) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
+ if (unlikely(ret != 0))
+ goto err_free;
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base,
+ 0,
+ par->vmw_bo->base.num_pages,
+ &par->map);
+ if (unlikely(ret != 0))
+ goto err_unref;
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+ par->bo_size = fb_size;
+
+ /*
+ * Fixed and var
+ */
+ strcpy(info->fix.id, "svgadrmfb");
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ info->fix.type_aux = 0;
+ info->fix.xpanstep = 1; /* doing it in hw */
+ info->fix.ypanstep = 1; /* doing it in hw */
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+ info->fix.line_length = fb_pitch;
+
+ info->fix.smem_start = 0;
+ info->fix.smem_len = fb_size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ info->pseudo_palette = par->pseudo_palette;
+ info->screen_base = par->vmalloc;
+ info->screen_size = fb_size;
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &vmw_fb_ops;
+
+ /* 24 depth per default */
+ info->var.red.offset = 16;
+ info->var.green.offset = 8;
+ info->var.blue.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+ info->var.transp.offset = 0;
+ info->var.transp.length = 0;
+
+ info->var.xres_virtual = fb_width;
+ info->var.yres_virtual = fb_height;
+ info->var.bits_per_pixel = par->bpp;
+ info->var.xoffset = 0;
+ info->var.yoffset = 0;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->var.height = -1;
+ info->var.width = -1;
+
+ info->var.xres = initial_width;
+ info->var.yres = initial_height;
+
+#if 0
+ info->pixmap.size = 64*1024;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+#else
+ info->pixmap.size = 0;
+ info->pixmap.buf_align = 8;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 1;
+#endif
+
+ /*
+ * Dirty & Deferred IO
+ */
+ par->dirty.x1 = par->dirty.x2 = 0;
+ par->dirty.y1 = par->dirty.y1 = 0;
+ par->dirty.active = true;
+ spin_lock_init(&par->dirty.lock);
+ info->fbdefio = &vmw_defio;
+ fb_deferred_io_init(info);
+
+ ret = register_framebuffer(info);
+ if (unlikely(ret != 0))
+ goto err_defio;
+
+ return 0;
+
+err_defio:
+ fb_deferred_io_cleanup(info);
+ ttm_bo_kunmap(&par->map);
+err_unref:
+ ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
+err_free:
+ vfree(par->vmalloc);
+ framebuffer_release(info);
+ vmw_priv->fb_info = NULL;
+
+ return ret;
+}
+
+int vmw_fb_close(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ struct ttm_buffer_object *bo;
+
+ if (!vmw_priv->fb_info)
+ return 0;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+ bo = &par->vmw_bo->base;
+ par->vmw_bo = NULL;
+
+ /* ??? order */
+ fb_deferred_io_cleanup(info);
+ unregister_framebuffer(info);
+
+ ttm_bo_kunmap(&par->map);
+ ttm_bo_unref(&bo);
+
+ vfree(par->vmalloc);
+ framebuffer_release(info);
+
+ return 0;
+}
+
+int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *vmw_bo)
+{
+ struct ttm_buffer_object *bo = &vmw_bo->base;
+ int ret = 0;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_dma_buffer *vmw_bo)
+{
+ struct ttm_buffer_object *bo = &vmw_bo->base;
+ struct ttm_placement ne_placement = vmw_vram_ne_placement;
+ int ret = 0;
+
+ ne_placement.lpfn = bo->num_pages;
+
+ /* interuptable? */
+ ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err_unlock;
+
+ if (vmw_bo->gmr_bound) {
+ vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
+ spin_lock(&bo->glob->lru_lock);
+ ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
+ spin_unlock(&bo->glob->lru_lock);
+ vmw_bo->gmr_bound = NULL;
+ }
+
+ ret = ttm_bo_validate(bo, &ne_placement, false, false);
+ ttm_bo_unreserve(bo);
+err_unlock:
+ ttm_write_unlock(&vmw_priv->active_master->lock);
+
+ return ret;
+}
+
+int vmw_fb_off(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ unsigned long flags;
+
+ if (!vmw_priv->fb_info)
+ return -EINVAL;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.active = false;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+ flush_scheduled_work();
+
+ par->bo_ptr = NULL;
+ ttm_bo_kunmap(&par->map);
+
+ vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
+
+ return 0;
+}
+
+int vmw_fb_on(struct vmw_private *vmw_priv)
+{
+ struct fb_info *info;
+ struct vmw_fb_par *par;
+ unsigned long flags;
+ bool dummy;
+ int ret;
+
+ if (!vmw_priv->fb_info)
+ return -EINVAL;
+
+ info = vmw_priv->fb_info;
+ par = info->par;
+
+ /* we are already active */
+ if (par->bo_ptr != NULL)
+ return 0;
+
+ /* Make sure that all overlays are stoped when we take over */
+ vmw_overlay_stop_all(vmw_priv);
+
+ ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("could not move buffer to start of VRAM\n");
+ goto err_no_buffer;
+ }
+
+ ret = ttm_bo_kmap(&par->vmw_bo->base,
+ 0,
+ par->vmw_bo->base.num_pages,
+ &par->map);
+ BUG_ON(ret != 0);
+ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
+
+ spin_lock_irqsave(&par->dirty.lock, flags);
+ par->dirty.active = true;
+ spin_unlock_irqrestore(&par->dirty.lock, flags);
+
+err_no_buffer:
+ vmw_fb_set_par(info);
+
+ vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
+
+ /* If there already was stuff dirty we wont
+ * schedule a new work, so lets do it now */
+ schedule_delayed_work(&info->deferred_work, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 00000000000..01feb48af33
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,519 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "drmP.h"
+#include "ttm/ttm_placement.h"
+
+int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max;
+ uint32_t min;
+ uint32_t dummy;
+ int ret;
+
+ fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+ fifo->static_buffer = vmalloc(fifo->static_buffer_size);
+ if (unlikely(fifo->static_buffer == NULL))
+ return -ENOMEM;
+
+ fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
+ fifo->last_data_size = 0;
+ fifo->last_buffer_add = false;
+ fifo->last_buffer = vmalloc(fifo->last_buffer_size);
+ if (unlikely(fifo->last_buffer == NULL)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ fifo->dynamic_buffer = NULL;
+ fifo->reserved_size = 0;
+ fifo->using_bounce_buffer = false;
+
+ init_rwsem(&fifo->rwsem);
+
+ /*
+ * Allow mapping the first page read-only to user-space.
+ */
+
+ DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
+ DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+ DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+
+ mutex_lock(&dev_priv->hw_mutex);
+ dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+ dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+ vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+
+ min = 4;
+ if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
+ min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
+ min <<= 2;
+
+ if (min < PAGE_SIZE)
+ min = PAGE_SIZE;
+
+ iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
+ iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
+ wmb();
+ iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
+ iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
+ mb();
+
+ vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
+
+ DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
+ (unsigned int) max,
+ (unsigned int) min,
+ (unsigned int) fifo->capabilities);
+
+ dev_priv->fence_seq = (uint32_t) -100;
+ dev_priv->last_read_sequence = (uint32_t) -100;
+ iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
+
+ return vmw_fifo_send_fence(dev_priv, &dummy);
+out_err:
+ vfree(fifo->static_buffer);
+ fifo->static_buffer = NULL;
+ return ret;
+}
+
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
+ iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
+ vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+ }
+
+ mutex_unlock(&dev_priv->hw_mutex);
+}
+
+void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
+ vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+
+ dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+
+ vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
+ dev_priv->config_done_state);
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ dev_priv->enable_state);
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (likely(fifo->last_buffer != NULL)) {
+ vfree(fifo->last_buffer);
+ fifo->last_buffer = NULL;
+ }
+
+ if (likely(fifo->static_buffer != NULL)) {
+ vfree(fifo->static_buffer);
+ fifo->static_buffer = NULL;
+ }
+
+ if (likely(fifo->dynamic_buffer != NULL)) {
+ vfree(fifo->dynamic_buffer);
+ fifo->dynamic_buffer = NULL;
+ }
+}
+
+static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+
+ return ((max - next_cmd) + (stop - min) <= bytes);
+}
+
+static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
+ uint32_t bytes, bool interruptible,
+ unsigned long timeout)
+{
+ int ret = 0;
+ unsigned long end_jiffies = jiffies + timeout;
+ DEFINE_WAIT(__wait);
+
+ DRM_INFO("Fifo wait noirq.\n");
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->fifo_queue, &__wait,
+ (interruptible) ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (!vmw_fifo_is_full(dev_priv, bytes))
+ break;
+ if (time_after_eq(jiffies, end_jiffies)) {
+ ret = -EBUSY;
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+ schedule_timeout(1);
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&dev_priv->fifo_queue, &__wait);
+ wake_up_all(&dev_priv->fifo_queue);
+ DRM_INFO("Fifo noirq exit.\n");
+ return ret;
+}
+
+static int vmw_fifo_wait(struct vmw_private *dev_priv,
+ uint32_t bytes, bool interruptible,
+ unsigned long timeout)
+{
+ long ret = 1L;
+ unsigned long irq_flags;
+
+ if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
+ return 0;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return vmw_fifo_wait_noirq(dev_priv, bytes,
+ interruptible, timeout);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_FIFO_PROGRESS,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) |
+ SVGA_IRQFLAG_FIFO_PROGRESS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout
+ (dev_priv->fifo_queue,
+ !vmw_fifo_is_full(dev_priv, bytes), timeout);
+ else
+ ret = wait_event_timeout
+ (dev_priv->fifo_queue,
+ !vmw_fifo_is_full(dev_priv, bytes), timeout);
+
+ if (unlikely(ret == 0))
+ ret = -EBUSY;
+ else if (likely(ret > 0))
+ ret = 0;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) &
+ ~SVGA_IRQFLAG_FIFO_PROGRESS);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return ret;
+}
+
+void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t max;
+ uint32_t min;
+ uint32_t next_cmd;
+ uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+ int ret;
+
+ down_write(&fifo_state->rwsem);
+ max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+
+ if (unlikely(bytes >= (max - min)))
+ goto out_err;
+
+ BUG_ON(fifo_state->reserved_size != 0);
+ BUG_ON(fifo_state->dynamic_buffer != NULL);
+
+ fifo_state->reserved_size = bytes;
+
+ while (1) {
+ uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
+ bool need_bounce = false;
+ bool reserve_in_place = false;
+
+ if (next_cmd >= stop) {
+ if (likely((next_cmd + bytes < max ||
+ (next_cmd + bytes == max && stop > min))))
+ reserve_in_place = true;
+
+ else if (vmw_fifo_is_full(dev_priv, bytes)) {
+ ret = vmw_fifo_wait(dev_priv, bytes,
+ false, 3 * HZ);
+ if (unlikely(ret != 0))
+ goto out_err;
+ } else
+ need_bounce = true;
+
+ } else {
+
+ if (likely((next_cmd + bytes < stop)))
+ reserve_in_place = true;
+ else {
+ ret = vmw_fifo_wait(dev_priv, bytes,
+ false, 3 * HZ);
+ if (unlikely(ret != 0))
+ goto out_err;
+ }
+ }
+
+ if (reserve_in_place) {
+ if (reserveable || bytes <= sizeof(uint32_t)) {
+ fifo_state->using_bounce_buffer = false;
+
+ if (reserveable)
+ iowrite32(bytes, fifo_mem +
+ SVGA_FIFO_RESERVED);
+ return fifo_mem + (next_cmd >> 2);
+ } else {
+ need_bounce = true;
+ }
+ }
+
+ if (need_bounce) {
+ fifo_state->using_bounce_buffer = true;
+ if (bytes < fifo_state->static_buffer_size)
+ return fifo_state->static_buffer;
+ else {
+ fifo_state->dynamic_buffer = vmalloc(bytes);
+ return fifo_state->dynamic_buffer;
+ }
+ }
+ }
+out_err:
+ fifo_state->reserved_size = 0;
+ up_write(&fifo_state->rwsem);
+ return NULL;
+}
+
+static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
+ __le32 __iomem *fifo_mem,
+ uint32_t next_cmd,
+ uint32_t max, uint32_t min, uint32_t bytes)
+{
+ uint32_t chunk_size = max - next_cmd;
+ uint32_t rest;
+ uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+ fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+ if (bytes < chunk_size)
+ chunk_size = bytes;
+
+ iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
+ mb();
+ memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
+ rest = bytes - chunk_size;
+ if (rest)
+ memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
+ rest);
+}
+
+static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
+ __le32 __iomem *fifo_mem,
+ uint32_t next_cmd,
+ uint32_t max, uint32_t min, uint32_t bytes)
+{
+ uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
+ fifo_state->dynamic_buffer : fifo_state->static_buffer;
+
+ while (bytes > 0) {
+ iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
+ next_cmd += sizeof(uint32_t);
+ if (unlikely(next_cmd == max))
+ next_cmd = min;
+ mb();
+ iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ mb();
+ bytes -= sizeof(uint32_t);
+ }
+}
+
+void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
+ uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
+ uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
+ bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
+
+ BUG_ON((bytes & 3) != 0);
+ BUG_ON(bytes > fifo_state->reserved_size);
+
+ fifo_state->reserved_size = 0;
+
+ if (fifo_state->using_bounce_buffer) {
+ if (reserveable)
+ vmw_fifo_res_copy(fifo_state, fifo_mem,
+ next_cmd, max, min, bytes);
+ else
+ vmw_fifo_slow_copy(fifo_state, fifo_mem,
+ next_cmd, max, min, bytes);
+
+ if (fifo_state->dynamic_buffer) {
+ vfree(fifo_state->dynamic_buffer);
+ fifo_state->dynamic_buffer = NULL;
+ }
+
+ }
+
+ if (fifo_state->using_bounce_buffer || reserveable) {
+ next_cmd += bytes;
+ if (next_cmd >= max)
+ next_cmd -= max - min;
+ mb();
+ iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
+ }
+
+ if (reserveable)
+ iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
+ mb();
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ up_write(&fifo_state->rwsem);
+}
+
+int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct svga_fifo_cmd_fence *cmd_fence;
+ void *fm;
+ int ret = 0;
+ uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
+
+ fm = vmw_fifo_reserve(dev_priv, bytes);
+ if (unlikely(fm == NULL)) {
+ down_write(&fifo_state->rwsem);
+ *sequence = dev_priv->fence_seq;
+ up_write(&fifo_state->rwsem);
+ ret = -ENOMEM;
+ (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
+ false, 3*HZ);
+ goto out_err;
+ }
+
+ do {
+ *sequence = dev_priv->fence_seq++;
+ } while (*sequence == 0);
+
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+
+ /*
+ * Don't request hardware to send a fence. The
+ * waiting code in vmwgfx_irq.c will emulate this.
+ */
+
+ vmw_fifo_commit(dev_priv, 0);
+ return 0;
+ }
+
+ *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
+ cmd_fence = (struct svga_fifo_cmd_fence *)
+ ((unsigned long)fm + sizeof(__le32));
+
+ iowrite32(*sequence, &cmd_fence->fence);
+ fifo_state->last_buffer_add = true;
+ vmw_fifo_commit(dev_priv, bytes);
+ fifo_state->last_buffer_add = false;
+
+out_err:
+ return ret;
+}
+
+/**
+ * Map the first page of the FIFO read-only to user-space.
+ */
+
+static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int ret;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+
+ if (address != vma->vm_start)
+ return VM_FAULT_SIGBUS;
+
+ ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
+ if (likely(ret == -EBUSY || ret == 0))
+ return VM_FAULT_NOPAGE;
+ else if (ret == -ENOMEM)
+ return VM_FAULT_OOM;
+
+ return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct vmw_fifo_vm_ops = {
+ .fault = vmw_fifo_vm_fault,
+ .open = NULL,
+ .close = NULL
+};
+
+int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vmw_private *dev_priv;
+
+ file_priv = (struct drm_file *)filp->private_data;
+ dev_priv = vmw_priv(file_priv->minor->dev);
+
+ if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
+ (vma->vm_end - vma->vm_start) != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
+ vma->vm_page_prot);
+ vma->vm_ops = &vmw_fifo_vm_ops;
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
new file mode 100644
index 00000000000..5f8908a5d7f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -0,0 +1,213 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "drmP.h"
+#include "ttm/ttm_bo_driver.h"
+
+/**
+ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
+ * the number of used descriptors.
+ */
+
+static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
+ struct page *pages[],
+ unsigned long num_pages)
+{
+ struct page *page, *next;
+ struct svga_guest_mem_descriptor *page_virtual = NULL;
+ struct svga_guest_mem_descriptor *desc_virtual = NULL;
+ unsigned int desc_per_page;
+ unsigned long prev_pfn;
+ unsigned long pfn;
+ int ret;
+
+ desc_per_page = PAGE_SIZE /
+ sizeof(struct svga_guest_mem_descriptor) - 1;
+
+ while (likely(num_pages != 0)) {
+ page = alloc_page(__GFP_HIGHMEM);
+ if (unlikely(page == NULL)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ list_add_tail(&page->lru, desc_pages);
+
+ /*
+ * Point previous page terminating descriptor to this
+ * page before unmapping it.
+ */
+
+ if (likely(page_virtual != NULL)) {
+ desc_virtual->ppn = page_to_pfn(page);
+ kunmap_atomic(page_virtual, KM_USER0);
+ }
+
+ page_virtual = kmap_atomic(page, KM_USER0);
+ desc_virtual = page_virtual - 1;
+ prev_pfn = ~(0UL);
+
+ while (likely(num_pages != 0)) {
+ pfn = page_to_pfn(*pages);
+
+ if (pfn != prev_pfn + 1) {
+
+ if (desc_virtual - page_virtual ==
+ desc_per_page - 1)
+ break;
+
+ (++desc_virtual)->ppn = cpu_to_le32(pfn);
+ desc_virtual->num_pages = cpu_to_le32(1);
+ } else {
+ uint32_t tmp =
+ le32_to_cpu(desc_virtual->num_pages);
+ desc_virtual->num_pages = cpu_to_le32(tmp + 1);
+ }
+ prev_pfn = pfn;
+ --num_pages;
+ ++pages;
+ }
+
+ (++desc_virtual)->ppn = cpu_to_le32(0);
+ desc_virtual->num_pages = cpu_to_le32(0);
+ }
+
+ if (likely(page_virtual != NULL))
+ kunmap_atomic(page_virtual, KM_USER0);
+
+ return 0;
+out_err:
+ list_for_each_entry_safe(page, next, desc_pages, lru) {
+ list_del_init(&page->lru);
+ __free_page(page);
+ }
+ return ret;
+}
+
+static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, desc_pages, lru) {
+ list_del_init(&page->lru);
+ __free_page(page);
+ }
+}
+
+static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
+ int gmr_id, struct list_head *desc_pages)
+{
+ struct page *page;
+
+ if (unlikely(list_empty(desc_pages)))
+ return;
+
+ page = list_entry(desc_pages->next, struct page, lru);
+
+ mutex_lock(&dev_priv->hw_mutex);
+
+ vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
+ wmb();
+ vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
+ mb();
+
+ mutex_unlock(&dev_priv->hw_mutex);
+
+}
+
+/**
+ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
+ * the number of used descriptors.
+ */
+
+static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
+ unsigned long num_pages)
+{
+ unsigned long prev_pfn = ~(0UL);
+ unsigned long pfn;
+ unsigned long descriptors = 0;
+
+ while (num_pages--) {
+ pfn = page_to_pfn(*pages++);
+ if (prev_pfn + 1 != pfn)
+ ++descriptors;
+ prev_pfn = pfn;
+ }
+
+ return descriptors;
+}
+
+int vmw_gmr_bind(struct vmw_private *dev_priv,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_tt *ttm = bo->ttm;
+ unsigned long descriptors;
+ int ret;
+ uint32_t id;
+ struct list_head desc_pages;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_GMR))
+ return -EINVAL;
+
+ ret = ttm_tt_populate(ttm);
+ if (unlikely(ret != 0))
+ return ret;
+
+ descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
+ if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&desc_pages);
+ ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
+ ttm->num_pages);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_gmr_id_alloc(dev_priv, &id);
+ if (unlikely(ret != 0))
+ goto out_no_id;
+
+ vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
+ vmw_gmr_free_descriptors(&desc_pages);
+ vmw_dmabuf_set_gmr(bo, id);
+ return 0;
+
+out_no_id:
+ vmw_gmr_free_descriptors(&desc_pages);
+ return ret;
+}
+
+void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
+{
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
+ wmb();
+ vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
+ mb();
+ mutex_unlock(&dev_priv->hw_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
new file mode 100644
index 00000000000..5fa6a4ed238
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -0,0 +1,81 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_drm.h"
+
+int vmw_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_vmw_getparam_arg *param =
+ (struct drm_vmw_getparam_arg *)data;
+
+ switch (param->param) {
+ case DRM_VMW_PARAM_NUM_STREAMS:
+ param->value = vmw_overlay_num_overlays(dev_priv);
+ break;
+ case DRM_VMW_PARAM_NUM_FREE_STREAMS:
+ param->value = vmw_overlay_num_free_overlays(dev_priv);
+ break;
+ case DRM_VMW_PARAM_3D:
+ param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0;
+ break;
+ case DRM_VMW_PARAM_FIFO_OFFSET:
+ param->value = dev_priv->mmio_start;
+ break;
+ default:
+ DRM_ERROR("Illegal vmwgfx get param request: %d\n",
+ param->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct drm_vmw_fifo_debug_arg *arg =
+ (struct drm_vmw_fifo_debug_arg *)data;
+ __le32 __user *buffer = (__le32 __user *)
+ (unsigned long)arg->debug_buffer;
+
+ if (unlikely(fifo_state->last_buffer == NULL))
+ return -EINVAL;
+
+ if (arg->debug_buffer_size < fifo_state->last_data_size) {
+ arg->used_size = arg->debug_buffer_size;
+ arg->did_not_fit = 1;
+ } else {
+ arg->used_size = fifo_state->last_data_size;
+ arg->did_not_fit = 0;
+ }
+ return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
new file mode 100644
index 00000000000..d40086fc864
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -0,0 +1,293 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#define VMW_FENCE_WRAP (1 << 24)
+
+irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ spin_lock(&dev_priv->irq_lock);
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ spin_unlock(&dev_priv->irq_lock);
+
+ if (status & SVGA_IRQFLAG_ANY_FENCE)
+ wake_up_all(&dev_priv->fence_queue);
+ if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
+ wake_up_all(&dev_priv->fifo_queue);
+
+ if (likely(status)) {
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
+{
+ uint32_t busy;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ busy = vmw_read(dev_priv, SVGA_REG_BUSY);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return (busy == 0);
+}
+
+
+bool vmw_fence_signaled(struct vmw_private *dev_priv,
+ uint32_t sequence)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ struct vmw_fifo_state *fifo_state;
+ bool ret;
+
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return true;
+
+ dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return true;
+
+ fifo_state = &dev_priv->fifo;
+ if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
+ vmw_fifo_idle(dev_priv, sequence))
+ return true;
+
+ /**
+ * Below is to signal stale fences that have wrapped.
+ * First, block fence submission.
+ */
+
+ down_read(&fifo_state->rwsem);
+
+ /**
+ * Then check if the sequence is higher than what we've actually
+ * emitted. Then the fence is stale and signaled.
+ */
+
+ ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
+ up_read(&fifo_state->rwsem);
+
+ return ret;
+}
+
+int vmw_fallback_wait(struct vmw_private *dev_priv,
+ bool lazy,
+ bool fifo_idle,
+ uint32_t sequence,
+ bool interruptible,
+ unsigned long timeout)
+{
+ struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+
+ uint32_t count = 0;
+ uint32_t signal_seq;
+ int ret;
+ unsigned long end_jiffies = jiffies + timeout;
+ bool (*wait_condition)(struct vmw_private *, uint32_t);
+ DEFINE_WAIT(__wait);
+
+ wait_condition = (fifo_idle) ? &vmw_fifo_idle :
+ &vmw_fence_signaled;
+
+ /**
+ * Block command submission while waiting for idle.
+ */
+
+ if (fifo_idle)
+ down_read(&fifo_state->rwsem);
+ signal_seq = dev_priv->fence_seq;
+ ret = 0;
+
+ for (;;) {
+ prepare_to_wait(&dev_priv->fence_queue, &__wait,
+ (interruptible) ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (wait_condition(dev_priv, sequence))
+ break;
+ if (time_after_eq(jiffies, end_jiffies)) {
+ DRM_ERROR("SVGA device lockup.\n");
+ break;
+ }
+ if (lazy)
+ schedule_timeout(1);
+ else if ((++count & 0x0F) == 0) {
+ /**
+ * FIXME: Use schedule_hr_timeout here for
+ * newer kernels and lower CPU utilization.
+ */
+
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ __set_current_state((interruptible) ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+ }
+ if (interruptible && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&dev_priv->fence_queue, &__wait);
+ if (ret == 0 && fifo_idle) {
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
+ }
+ wake_up_all(&dev_priv->fence_queue);
+ if (fifo_idle)
+ up_read(&fifo_state->rwsem);
+
+ return ret;
+}
+
+int vmw_wait_fence(struct vmw_private *dev_priv,
+ bool lazy, uint32_t sequence,
+ bool interruptible, unsigned long timeout)
+{
+ long ret;
+ unsigned long irq_flags;
+ struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+ if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
+ return 0;
+
+ if (likely(vmw_fence_signaled(dev_priv, sequence)))
+ return 0;
+
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+
+ if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
+ return vmw_fallback_wait(dev_priv, lazy, true, sequence,
+ interruptible, timeout);
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return vmw_fallback_wait(dev_priv, lazy, false, sequence,
+ interruptible, timeout);
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ outl(SVGA_IRQFLAG_ANY_FENCE,
+ dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) |
+ SVGA_IRQFLAG_ANY_FENCE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout
+ (dev_priv->fence_queue,
+ vmw_fence_signaled(dev_priv, sequence),
+ timeout);
+ else
+ ret = wait_event_timeout
+ (dev_priv->fence_queue,
+ vmw_fence_signaled(dev_priv, sequence),
+ timeout);
+
+ if (unlikely(ret == 0))
+ ret = -EBUSY;
+ else if (likely(ret > 0))
+ ret = 0;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK,
+ vmw_read(dev_priv, SVGA_REG_IRQMASK) &
+ ~SVGA_IRQFLAG_ANY_FENCE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return ret;
+}
+
+void vmw_irq_preinstall(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return;
+
+ spin_lock_init(&dev_priv->irq_lock);
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
+
+int vmw_irq_postinstall(struct drm_device *dev)
+{
+ return 0;
+}
+
+void vmw_irq_uninstall(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ uint32_t status;
+
+ if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
+ return;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+}
+
+#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
+
+int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_fence_wait_arg *arg =
+ (struct drm_vmw_fence_wait_arg *)data;
+ unsigned long timeout;
+
+ if (!arg->cookie_valid) {
+ arg->cookie_valid = 1;
+ arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
+ }
+
+ timeout = jiffies;
+ if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
+ return -EBUSY;
+
+ timeout = (unsigned long)arg->kernel_cookie - timeout;
+ return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
new file mode 100644
index 00000000000..b1af76e371c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -0,0 +1,872 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+/* Might need a hrtimer here? */
+#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
+
+
+void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+{
+ if (du->cursor_surface)
+ vmw_surface_unreference(&du->cursor_surface);
+ if (du->cursor_dmabuf)
+ vmw_dmabuf_unreference(&du->cursor_dmabuf);
+ drm_crtc_cleanup(&du->crtc);
+ drm_encoder_cleanup(&du->encoder);
+ drm_connector_cleanup(&du->connector);
+}
+
+/*
+ * Display Unit Cursor functions
+ */
+
+int vmw_cursor_update_image(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY)
+{
+ struct {
+ u32 cmd;
+ SVGAFifoCmdDefineAlphaCursor cursor;
+ } *cmd;
+ u32 image_size = width * height * 4;
+ u32 cmd_size = sizeof(*cmd) + image_size;
+
+ if (!image)
+ return -EINVAL;
+
+ cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(&cmd[1], image, image_size);
+
+ cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
+ cmd->cursor.id = cpu_to_le32(0);
+ cmd->cursor.width = cpu_to_le32(width);
+ cmd->cursor.height = cpu_to_le32(height);
+ cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
+ cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+
+ vmw_fifo_commit(dev_priv, cmd_size);
+
+ return 0;
+}
+
+void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y)
+{
+ __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ uint32_t count;
+
+ iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
+ iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
+ iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
+ count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+ iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
+}
+
+int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_surface *surface = NULL;
+ struct vmw_dma_buffer *dmabuf = NULL;
+ int ret;
+
+ if (handle) {
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ handle, &surface);
+ if (!ret) {
+ if (!surface->snooper.image) {
+ DRM_ERROR("surface not suitable for cursor\n");
+ return -EINVAL;
+ }
+ } else {
+ ret = vmw_user_dmabuf_lookup(tfile,
+ handle, &dmabuf);
+ if (ret) {
+ DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* takedown old cursor */
+ if (du->cursor_surface) {
+ du->cursor_surface->snooper.crtc = NULL;
+ vmw_surface_unreference(&du->cursor_surface);
+ }
+ if (du->cursor_dmabuf)
+ vmw_dmabuf_unreference(&du->cursor_dmabuf);
+
+ /* setup new image */
+ if (surface) {
+ /* vmw_user_surface_lookup takes one reference */
+ du->cursor_surface = surface;
+
+ du->cursor_surface->snooper.crtc = crtc;
+ du->cursor_age = du->cursor_surface->snooper.age;
+ vmw_cursor_update_image(dev_priv, surface->snooper.image,
+ 64, 64, du->hotspot_x, du->hotspot_y);
+ } else if (dmabuf) {
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ void *virtual;
+ bool dummy;
+
+ /* vmw_user_surface_lookup takes one reference */
+ du->cursor_dmabuf = dmabuf;
+
+ kmap_offset = 0;
+ kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return -EINVAL;
+ }
+
+ ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ vmw_cursor_update_image(dev_priv, virtual, 64, 64,
+ du->hotspot_x, du->hotspot_y);
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(&dmabuf->base);
+
+ } else {
+ vmw_cursor_update_position(dev_priv, false, 0, 0);
+ return 0;
+ }
+
+ vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
+
+ return 0;
+}
+
+int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
+
+ du->cursor_x = x + crtc->x;
+ du->cursor_y = y + crtc->y;
+
+ vmw_cursor_update_position(dev_priv, shown,
+ du->cursor_x, du->cursor_y);
+
+ return 0;
+}
+
+void vmw_kms_cursor_snoop(struct vmw_surface *srf,
+ struct ttm_object_file *tfile,
+ struct ttm_buffer_object *bo,
+ SVGA3dCmdHeader *header)
+{
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_num;
+ SVGA3dCopyBox *box;
+ unsigned box_count;
+ void *virtual;
+ bool dummy;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+
+ /* No snooper installed */
+ if (!srf->snooper.image)
+ return;
+
+ if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
+ DRM_ERROR("face and mipmap for cursors should never != 0\n");
+ return;
+ }
+
+ if (cmd->header.size < 64) {
+ DRM_ERROR("at least one full copy box must be given\n");
+ return;
+ }
+
+ box = (SVGA3dCopyBox *)&cmd[1];
+ box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
+ sizeof(SVGA3dCopyBox);
+
+ if (cmd->dma.guest.pitch != (64 * 4) ||
+ cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+ box->x != 0 || box->y != 0 || box->z != 0 ||
+ box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
+ box->w != 64 || box->h != 64 || box->d != 1 ||
+ box_count != 1) {
+ /* TODO handle none page aligned offsets */
+ /* TODO handle partial uploads and pitch != 256 */
+ /* TODO handle more then one copy (size != 64) */
+ DRM_ERROR("lazy programer, cant handle wierd stuff\n");
+ return;
+ }
+
+ kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
+ kmap_num = (64*64*4) >> PAGE_SHIFT;
+
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("reserve failed\n");
+ return;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0))
+ goto err_unreserve;
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+
+ memcpy(srf->snooper.image, virtual, 64*64*4);
+ srf->snooper.age++;
+
+ /* we can't call this function from this function since execbuf has
+ * reserved fifo space.
+ *
+ * if (srf->snooper.crtc)
+ * vmw_ldu_crtc_cursor_update_image(dev_priv,
+ * srf->snooper.image, 64, 64,
+ * du->hotspot_x, du->hotspot_y);
+ */
+
+ ttm_bo_kunmap(&map);
+err_unreserve:
+ ttm_bo_unreserve(bo);
+}
+
+void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_display_unit *du;
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ if (!du->cursor_surface ||
+ du->cursor_age == du->cursor_surface->snooper.age)
+ continue;
+
+ du->cursor_age = du->cursor_surface->snooper.age;
+ vmw_cursor_update_image(dev_priv,
+ du->cursor_surface->snooper.image,
+ 64, 64, du->hotspot_x, du->hotspot_y);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+/*
+ * Generic framebuffer code
+ */
+
+int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ if (handle)
+ handle = 0;
+
+ return 0;
+}
+
+/*
+ * Surface framebuffer code
+ */
+
+#define vmw_framebuffer_to_vfbs(x) \
+ container_of(x, struct vmw_framebuffer_surface, base.base)
+
+struct vmw_framebuffer_surface {
+ struct vmw_framebuffer base;
+ struct vmw_surface *surface;
+ struct delayed_work d_work;
+ struct mutex work_lock;
+ bool present_fs;
+};
+
+void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
+{
+ struct vmw_framebuffer_surface *vfb =
+ vmw_framebuffer_to_vfbs(framebuffer);
+
+ cancel_delayed_work_sync(&vfb->d_work);
+ drm_framebuffer_cleanup(framebuffer);
+ vmw_surface_unreference(&vfb->surface);
+
+ kfree(framebuffer);
+}
+
+static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
+{
+ struct delayed_work *d_work =
+ container_of(work, struct delayed_work, work);
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(d_work, struct vmw_framebuffer_surface, d_work);
+ struct vmw_surface *surf = vfbs->surface;
+ struct drm_framebuffer *framebuffer = &vfbs->base.base;
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ SVGA3dCopyRect cr;
+ } *cmd;
+
+ mutex_lock(&vfbs->work_lock);
+ if (!vfbs->present_fs)
+ goto out_unlock;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
+ goto out_resched;
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
+ cmd->body.sid = cpu_to_le32(surf->res.id);
+ cmd->cr.x = cpu_to_le32(0);
+ cmd->cr.y = cpu_to_le32(0);
+ cmd->cr.srcx = cmd->cr.x;
+ cmd->cr.srcy = cmd->cr.y;
+ cmd->cr.w = cpu_to_le32(framebuffer->width);
+ cmd->cr.h = cpu_to_le32(framebuffer->height);
+ vfbs->present_fs = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+out_resched:
+ /**
+ * Will not re-add if already pending.
+ */
+ schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
+out_unlock:
+ mutex_unlock(&vfbs->work_lock);
+}
+
+
+int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct vmw_framebuffer_surface *vfbs =
+ vmw_framebuffer_to_vfbs(framebuffer);
+ struct vmw_surface *surf = vfbs->surface;
+ struct drm_clip_rect norect;
+ SVGA3dCopyRect *cr;
+ int i, inc = 1;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdPresent body;
+ SVGA3dCopyRect cr;
+ } *cmd;
+
+ if (!num_clips ||
+ !(dev_priv->fifo.capabilities &
+ SVGA_FIFO_CAP_SCREEN_OBJECT)) {
+ int ret;
+
+ mutex_lock(&vfbs->work_lock);
+ vfbs->present_fs = true;
+ ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
+ mutex_unlock(&vfbs->work_lock);
+ if (ret) {
+ /**
+ * No work pending, Force immediate present.
+ */
+ vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
+ }
+ return 0;
+ }
+
+ if (!num_clips) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ inc = 2; /* skip source rects */
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
+ cmd->body.sid = cpu_to_le32(surf->res.id);
+
+ for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
+ cr->x = cpu_to_le16(clips->x1);
+ cr->y = cpu_to_le16(clips->y1);
+ cr->srcx = cr->x;
+ cr->srcy = cr->y;
+ cr->w = cpu_to_le16(clips->x2 - clips->x1);
+ cr->h = cpu_to_le16(clips->y2 - clips->y1);
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+ .destroy = vmw_framebuffer_surface_destroy,
+ .dirty = vmw_framebuffer_surface_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+ struct vmw_surface *surface,
+ struct vmw_framebuffer **out,
+ unsigned width, unsigned height)
+
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_framebuffer_surface *vfbs;
+ int ret;
+
+ vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
+ if (!vfbs) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = drm_framebuffer_init(dev, &vfbs->base.base,
+ &vmw_framebuffer_surface_funcs);
+ if (ret)
+ goto out_err2;
+
+ if (!vmw_surface_reference(surface)) {
+ DRM_ERROR("failed to reference surface %p\n", surface);
+ goto out_err3;
+ }
+
+ /* XXX get the first 3 from the surface info */
+ vfbs->base.base.bits_per_pixel = 32;
+ vfbs->base.base.pitch = width * 32 / 4;
+ vfbs->base.base.depth = 24;
+ vfbs->base.base.width = width;
+ vfbs->base.base.height = height;
+ vfbs->base.pin = NULL;
+ vfbs->base.unpin = NULL;
+ vfbs->surface = surface;
+ mutex_init(&vfbs->work_lock);
+ INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
+ *out = &vfbs->base;
+
+ return 0;
+
+out_err3:
+ drm_framebuffer_cleanup(&vfbs->base.base);
+out_err2:
+ kfree(vfbs);
+out_err1:
+ return ret;
+}
+
+/*
+ * Dmabuf framebuffer code
+ */
+
+#define vmw_framebuffer_to_vfbd(x) \
+ container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+
+struct vmw_framebuffer_dmabuf {
+ struct vmw_framebuffer base;
+ struct vmw_dma_buffer *buffer;
+};
+
+void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+{
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(framebuffer);
+
+ drm_framebuffer_cleanup(framebuffer);
+ vmw_dmabuf_unreference(&vfbd->buffer);
+
+ kfree(vfbd);
+}
+
+int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+ struct drm_clip_rect norect;
+ struct {
+ uint32_t header;
+ SVGAFifoCmdUpdate body;
+ } *cmd;
+ int i, increment = 1;
+
+ if (!num_clips ||
+ !(dev_priv->fifo.capabilities &
+ SVGA_FIFO_CAP_SCREEN_OBJECT)) {
+ num_clips = 1;
+ clips = &norect;
+ norect.x1 = norect.y1 = 0;
+ norect.x2 = framebuffer->width;
+ norect.y2 = framebuffer->height;
+ } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ num_clips /= 2;
+ increment = 2;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_clips; i++, clips += increment) {
+ cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
+ cmd[i].body.x = cpu_to_le32(clips[i].x1);
+ cmd[i].body.y = cpu_to_le32(clips[i].y1);
+ cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1);
+ cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1);
+ }
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
+
+ return 0;
+}
+
+static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+ .destroy = vmw_framebuffer_dmabuf_destroy,
+ .dirty = vmw_framebuffer_dmabuf_dirty,
+ .create_handle = vmw_framebuffer_create_handle,
+};
+
+static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(&vfb->base);
+ int ret;
+
+ vmw_overlay_pause_all(dev_priv);
+
+ ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
+
+ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
+ vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
+ vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
+ vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
+ vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
+ vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
+ vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
+ vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
+ } else
+ WARN_ON(true);
+
+ vmw_overlay_resume_all(dev_priv);
+
+ return 0;
+}
+
+static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_framebuffer_dmabuf *vfbd =
+ vmw_framebuffer_to_vfbd(&vfb->base);
+
+ if (!vfbd->buffer) {
+ WARN_ON(!vfbd->buffer);
+ return 0;
+ }
+
+ return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
+}
+
+int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_framebuffer **out,
+ unsigned width, unsigned height)
+
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct vmw_framebuffer_dmabuf *vfbd;
+ int ret;
+
+ vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
+ if (!vfbd) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ ret = drm_framebuffer_init(dev, &vfbd->base.base,
+ &vmw_framebuffer_dmabuf_funcs);
+ if (ret)
+ goto out_err2;
+
+ if (!vmw_dmabuf_reference(dmabuf)) {
+ DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
+ goto out_err3;
+ }
+
+ /* XXX get the first 3 from the surface info */
+ vfbd->base.base.bits_per_pixel = 32;
+ vfbd->base.base.pitch = width * 32 / 4;
+ vfbd->base.base.depth = 24;
+ vfbd->base.base.width = width;
+ vfbd->base.base.height = height;
+ vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
+ vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+ vfbd->buffer = dmabuf;
+ *out = &vfbd->base;
+
+ return 0;
+
+out_err3:
+ drm_framebuffer_cleanup(&vfbd->base.base);
+out_err2:
+ kfree(vfbd);
+out_err1:
+ return ret;
+}
+
+/*
+ * Generic Kernel modesetting functions
+ */
+
+static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd *mode_cmd)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_framebuffer *vfb = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_dma_buffer *bo = NULL;
+ int ret;
+
+ ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+ mode_cmd->handle, &surface);
+ if (ret)
+ goto try_dmabuf;
+
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+ mode_cmd->width, mode_cmd->height);
+
+ /* vmw_user_surface_lookup takes one ref so does new_fb */
+ vmw_surface_unreference(&surface);
+
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ return NULL;
+ }
+ return &vfb->base;
+
+try_dmabuf:
+ DRM_INFO("%s: trying buffer\n", __func__);
+
+ ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
+ if (ret) {
+ DRM_ERROR("failed to find buffer: %i\n", ret);
+ return NULL;
+ }
+
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+ mode_cmd->width, mode_cmd->height);
+
+ /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
+ vmw_dmabuf_unreference(&bo);
+
+ if (ret) {
+ DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+ return NULL;
+ }
+
+ return &vfb->base;
+}
+
+static int vmw_kms_fb_changed(struct drm_device *dev)
+{
+ return 0;
+}
+
+static struct drm_mode_config_funcs vmw_kms_funcs = {
+ .fb_create = vmw_kms_fb_create,
+ .fb_changed = vmw_kms_fb_changed,
+};
+
+int vmw_kms_init(struct vmw_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ int ret;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = &vmw_kms_funcs;
+ dev->mode_config.min_width = 640;
+ dev->mode_config.min_height = 480;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ ret = vmw_kms_init_legacy_display_system(dev_priv);
+
+ return 0;
+}
+
+int vmw_kms_close(struct vmw_private *dev_priv)
+{
+ /*
+ * Docs says we should take the lock before calling this function
+ * but since it destroys encoders and our destructor calls
+ * drm_encoder_cleanup which takes the lock we deadlock.
+ */
+ drm_mode_config_cleanup(dev_priv->dev);
+ vmw_kms_close_legacy_display_system(dev_priv);
+ return 0;
+}
+
+int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_cursor_bypass_arg *arg = data;
+ struct vmw_display_unit *du;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+
+ mutex_lock(&dev->mode_config.mutex);
+ if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ du = vmw_crtc_to_du(crtc);
+ du->hotspot_x = arg->xhot;
+ du->hotspot_y = arg->yhot;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+ }
+
+ obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ crtc = obj_to_crtc(obj);
+ du = vmw_crtc_to_du(crtc);
+
+ du->hotspot_x = arg->xhot;
+ du->hotspot_y = arg->yhot;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+int vmw_kms_save_vga(struct vmw_private *vmw_priv)
+{
+ /*
+ * setup a single multimon monitor with the size
+ * of 0x0, this stops the UI from resizing when we
+ * change the framebuffer size
+ */
+ if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+ vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
+ vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
+ vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
+ vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
+ vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
+ vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
+ vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
+ vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
+
+ return 0;
+}
+
+int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
+{
+ vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
+ vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
+ vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
+ vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
+ vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
+ vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
+ vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
+ vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
+
+ /* TODO check for multimon */
+ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
new file mode 100644
index 00000000000..8b95249f053
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -0,0 +1,102 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_KMS_H_
+#define VMWGFX_KMS_H_
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+
+#define vmw_framebuffer_to_vfb(x) \
+ container_of(x, struct vmw_framebuffer, base)
+
+/**
+ * Base class for framebuffers
+ *
+ * @pin is called the when ever a crtc uses this framebuffer
+ * @unpin is called
+ */
+struct vmw_framebuffer {
+ struct drm_framebuffer base;
+ int (*pin)(struct vmw_framebuffer *fb);
+ int (*unpin)(struct vmw_framebuffer *fb);
+};
+
+
+#define vmw_crtc_to_du(x) \
+ container_of(x, struct vmw_display_unit, crtc)
+
+/*
+ * Basic cursor manipulation
+ */
+int vmw_cursor_update_image(struct vmw_private *dev_priv,
+ u32 *image, u32 width, u32 height,
+ u32 hotspotX, u32 hotspotY);
+void vmw_cursor_update_position(struct vmw_private *dev_priv,
+ bool show, int x, int y);
+
+/**
+ * Base class display unit.
+ *
+ * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
+ * so the display unit is all of them at the same time. This is true for both
+ * legacy multimon and screen objects.
+ */
+struct vmw_display_unit {
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct vmw_surface *cursor_surface;
+ struct vmw_dma_buffer *cursor_dmabuf;
+ size_t cursor_age;
+
+ int cursor_x;
+ int cursor_y;
+
+ int hotspot_x;
+ int hotspot_y;
+
+ unsigned unit;
+};
+
+/*
+ * Shared display unit functions - vmwgfx_kms.c
+ */
+void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+
+/*
+ * Legacy display unit functions - vmwgfx_ldu.h
+ */
+int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
+int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
new file mode 100644
index 00000000000..90891593bf6
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -0,0 +1,516 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+#define vmw_crtc_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.crtc)
+#define vmw_encoder_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.encoder)
+#define vmw_connector_to_ldu(x) \
+ container_of(x, struct vmw_legacy_display_unit, base.connector)
+
+struct vmw_legacy_display {
+ struct list_head active;
+
+ unsigned num_active;
+
+ struct vmw_framebuffer *fb;
+};
+
+/**
+ * Display unit using the legacy register interface.
+ */
+struct vmw_legacy_display_unit {
+ struct vmw_display_unit base;
+
+ struct list_head active;
+
+ unsigned unit;
+};
+
+static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
+{
+ list_del_init(&ldu->active);
+ vmw_display_unit_cleanup(&ldu->base);
+ kfree(ldu);
+}
+
+
+/*
+ * Legacy Display Unit CRTC functions
+ */
+
+static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
+{
+}
+
+static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t size)
+{
+}
+
+static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
+{
+ vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
+}
+
+static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+{
+ struct vmw_legacy_display *lds = dev_priv->ldu_priv;
+ struct vmw_legacy_display_unit *entry;
+ struct drm_crtc *crtc;
+ int i = 0;
+
+ /* to stop the screen from changing size on resize */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
+ for (i = 0; i < lds->num_active; i++) {
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+ /* Now set the mode */
+ vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
+ i = 0;
+ list_for_each_entry(entry, &lds->active, active) {
+ crtc = &entry->base.crtc;
+
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
+ vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
+ struct vmw_legacy_display_unit *ldu)
+{
+ struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+ if (list_empty(&ldu->active))
+ return 0;
+
+ list_del_init(&ldu->active);
+ if (--(ld->num_active) == 0) {
+ BUG_ON(!ld->fb);
+ if (ld->fb->unpin)
+ ld->fb->unpin(ld->fb);
+ ld->fb = NULL;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
+ struct vmw_legacy_display_unit *ldu,
+ struct vmw_framebuffer *vfb)
+{
+ struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
+ struct vmw_legacy_display_unit *entry;
+ struct list_head *at;
+
+ if (!list_empty(&ldu->active))
+ return 0;
+
+ at = &ld->active;
+ list_for_each_entry(entry, &ld->active, active) {
+ if (entry->unit > ldu->unit)
+ break;
+
+ at = &entry->active;
+ }
+
+ list_add(&ldu->active, at);
+ if (ld->num_active++ == 0) {
+ BUG_ON(ld->fb);
+ if (vfb->pin)
+ vfb->pin(vfb);
+ ld->fb = vfb;
+ }
+
+ return 0;
+}
+
+static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_connector *connector;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct vmw_framebuffer *vfb;
+ struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
+
+ if (!set)
+ return -EINVAL;
+
+ if (!set->crtc)
+ return -EINVAL;
+
+ /* get the ldu */
+ crtc = set->crtc;
+ ldu = vmw_crtc_to_ldu(crtc);
+ vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
+ dev_priv = vmw_priv(crtc->dev);
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("to many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &ldu->base.connector) {
+ DRM_ERROR("connector doesn't match %p %p\n",
+ set->connectors[0], &ldu->base.connector);
+ return -EINVAL;
+ }
+
+ /* ldu only supports one fb active at the time */
+ if (dev_priv->ldu_priv->fb && vfb &&
+ dev_priv->ldu_priv->fb != vfb) {
+ DRM_ERROR("Multiple framebuffers not supported\n");
+ return -EINVAL;
+ }
+
+ /* since they always map one to one these are safe */
+ connector = &ldu->base.connector;
+ encoder = &ldu->base.encoder;
+
+ /* should we turn the crtc off? */
+ if (set->num_connectors == 0 || !set->mode || !set->fb) {
+
+ connector->encoder = NULL;
+ encoder->crtc = NULL;
+ crtc->fb = NULL;
+
+ vmw_ldu_del_active(dev_priv, ldu);
+
+ vmw_ldu_commit_list(dev_priv);
+
+ return 0;
+ }
+
+
+ /* we now know we want to set a mode */
+ mode = set->mode;
+ fb = set->fb;
+
+ if (set->x + mode->hdisplay > fb->width ||
+ set->y + mode->vdisplay > fb->height) {
+ DRM_ERROR("set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ vmw_fb_off(dev_priv);
+
+ crtc->fb = fb;
+ encoder->crtc = crtc;
+ connector->encoder = encoder;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ crtc->mode = *mode;
+
+ vmw_ldu_add_active(dev_priv, ldu, vfb);
+
+ vmw_ldu_commit_list(dev_priv);
+
+ return 0;
+}
+
+static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
+ .save = vmw_ldu_crtc_save,
+ .restore = vmw_ldu_crtc_restore,
+ .cursor_set = vmw_du_crtc_cursor_set,
+ .cursor_move = vmw_du_crtc_cursor_move,
+ .gamma_set = vmw_ldu_crtc_gamma_set,
+ .destroy = vmw_ldu_crtc_destroy,
+ .set_config = vmw_ldu_crtc_set_config,
+};
+
+/*
+ * Legacy Display Unit encoder functions
+ */
+
+static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
+{
+ vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
+}
+
+static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
+ .destroy = vmw_ldu_encoder_destroy,
+};
+
+/*
+ * Legacy Display Unit connector functions
+ */
+
+static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+static void vmw_ldu_connector_save(struct drm_connector *connector)
+{
+}
+
+static void vmw_ldu_connector_restore(struct drm_connector *connector)
+{
+}
+
+static enum drm_connector_status
+ vmw_ldu_connector_detect(struct drm_connector *connector)
+{
+ /* XXX vmwctrl should control connection status */
+ if (vmw_connector_to_ldu(connector)->base.unit == 0)
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static struct drm_display_mode vmw_ldu_connector_builtin[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600",
+ DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
+ 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* Terminate */
+ { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
+ uint32_t max_width, uint32_t max_height)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ int i;
+
+ for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
+ if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
+ vmw_ldu_connector_builtin[i].vdisplay > max_height)
+ continue;
+
+ mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
+ if (!mode)
+ return 0;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ drm_mode_connector_list_update(connector);
+
+ return 1;
+}
+
+static int vmw_ldu_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void vmw_ldu_connector_destroy(struct drm_connector *connector)
+{
+ vmw_ldu_destroy(vmw_connector_to_ldu(connector));
+}
+
+static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+ .dpms = vmw_ldu_connector_dpms,
+ .save = vmw_ldu_connector_save,
+ .restore = vmw_ldu_connector_restore,
+ .detect = vmw_ldu_connector_detect,
+ .fill_modes = vmw_ldu_connector_fill_modes,
+ .set_property = vmw_ldu_connector_set_property,
+ .destroy = vmw_ldu_connector_destroy,
+};
+
+static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
+{
+ struct vmw_legacy_display_unit *ldu;
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
+ if (!ldu)
+ return -ENOMEM;
+
+ ldu->unit = unit;
+ crtc = &ldu->base.crtc;
+ encoder = &ldu->base.encoder;
+ connector = &ldu->base.connector;
+
+ drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ /* Initial status */
+ if (unit == 0)
+ connector->status = connector_status_connected;
+ else
+ connector->status = connector_status_disconnected;
+
+ drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ drm_mode_connector_attach_encoder(connector, encoder);
+ encoder->possible_crtcs = (1 << unit);
+ encoder->possible_clones = 0;
+
+ INIT_LIST_HEAD(&ldu->active);
+
+ drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.dirty_info_property,
+ 1);
+
+ return 0;
+}
+
+int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
+{
+ if (dev_priv->ldu_priv) {
+ DRM_INFO("ldu system already on\n");
+ return -EINVAL;
+ }
+
+ dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+
+ if (!dev_priv->ldu_priv)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
+ dev_priv->ldu_priv->num_active = 0;
+ dev_priv->ldu_priv->fb = NULL;
+
+ drm_mode_create_dirty_info_property(dev_priv->dev);
+
+ vmw_ldu_init(dev_priv, 0);
+ vmw_ldu_init(dev_priv, 1);
+ vmw_ldu_init(dev_priv, 2);
+ vmw_ldu_init(dev_priv, 3);
+ vmw_ldu_init(dev_priv, 4);
+ vmw_ldu_init(dev_priv, 5);
+ vmw_ldu_init(dev_priv, 6);
+ vmw_ldu_init(dev_priv, 7);
+
+ return 0;
+}
+
+int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
+{
+ if (!dev_priv->ldu_priv)
+ return -ENOSYS;
+
+ BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
+
+ kfree(dev_priv->ldu_priv);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
new file mode 100644
index 00000000000..bb6e6a096d2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -0,0 +1,634 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+#include "ttm/ttm_placement.h"
+
+#include "svga_overlay.h"
+#include "svga_escape.h"
+
+#define VMW_MAX_NUM_STREAMS 1
+
+struct vmw_stream {
+ struct vmw_dma_buffer *buf;
+ bool claimed;
+ bool paused;
+ struct drm_vmw_control_stream_arg saved;
+};
+
+/**
+ * Overlay control
+ */
+struct vmw_overlay {
+ /*
+ * Each stream is a single overlay. In Xv these are called ports.
+ */
+ struct mutex mutex;
+ struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
+};
+
+static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ return dev_priv ? dev_priv->overlay_priv : NULL;
+}
+
+struct vmw_escape_header {
+ uint32_t cmd;
+ SVGAFifoCmdEscape body;
+};
+
+struct vmw_escape_video_flush {
+ struct vmw_escape_header escape;
+ SVGAEscapeVideoFlush flush;
+};
+
+static inline void fill_escape(struct vmw_escape_header *header,
+ uint32_t size)
+{
+ header->cmd = SVGA_CMD_ESCAPE;
+ header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
+ header->body.size = size;
+}
+
+static inline void fill_flush(struct vmw_escape_video_flush *cmd,
+ uint32_t stream_id)
+{
+ fill_escape(&cmd->escape, sizeof(cmd->flush));
+ cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
+ cmd->flush.streamId = stream_id;
+}
+
+/**
+ * Pin or unpin a buffer in vram.
+ *
+ * @dev_priv: Driver private.
+ * @buf: DMA buffer to pin or unpin.
+ * @pin: Pin buffer in vram if true.
+ * @interruptible: Use interruptible wait.
+ *
+ * Takes the current masters ttm lock in read.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool pin, bool interruptible)
+{
+ struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_bo_global *glob = bo->glob;
+ struct ttm_placement *overlay_placement = &vmw_vram_placement;
+ int ret;
+
+ ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ if (unlikely(ret != 0))
+ goto err;
+
+ if (buf->gmr_bound) {
+ vmw_gmr_unbind(dev_priv, buf->gmr_id);
+ spin_lock(&glob->lru_lock);
+ ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
+ spin_unlock(&glob->lru_lock);
+ buf->gmr_bound = NULL;
+ }
+
+ if (pin)
+ overlay_placement = &vmw_vram_ne_placement;
+
+ ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
+
+ ttm_bo_unreserve(bo);
+
+err:
+ ttm_read_unlock(&dev_priv->active_master->lock);
+
+ return ret;
+}
+
+/**
+ * Send put command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_put(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct drm_vmw_control_stream_arg *arg,
+ bool interruptible)
+{
+ struct {
+ struct vmw_escape_header escape;
+ struct {
+ struct {
+ uint32_t cmdType;
+ uint32_t streamId;
+ } header;
+ struct {
+ uint32_t registerId;
+ uint32_t value;
+ } items[SVGA_VIDEO_PITCH_3 + 1];
+ } body;
+ struct vmw_escape_video_flush flush;
+ } *cmds;
+ uint32_t offset;
+ int i, ret;
+
+ for (;;) {
+ cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ if (cmds)
+ break;
+
+ ret = vmw_fallback_wait(dev_priv, false, true, 0,
+ interruptible, 3*HZ);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ fill_escape(&cmds->escape, sizeof(cmds->body));
+ cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->body.header.streamId = arg->stream_id;
+
+ for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
+ cmds->body.items[i].registerId = i;
+
+ offset = buf->base.offset + arg->offset;
+
+ cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
+ cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
+ cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
+ cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
+ cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
+ cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
+ cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
+ cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
+ cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
+ cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
+ cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
+ cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
+ cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
+ cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
+ cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
+ cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
+ cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
+ cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
+ cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
+
+ fill_flush(&cmds->flush, arg->stream_id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmds));
+
+ return 0;
+}
+
+/**
+ * Send stop command to hw.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
+ uint32_t stream_id,
+ bool interruptible)
+{
+ struct {
+ struct vmw_escape_header escape;
+ SVGAEscapeVideoSetRegs body;
+ struct vmw_escape_video_flush flush;
+ } *cmds;
+ int ret;
+
+ for (;;) {
+ cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ if (cmds)
+ break;
+
+ ret = vmw_fallback_wait(dev_priv, false, true, 0,
+ interruptible, 3*HZ);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ fill_escape(&cmds->escape, sizeof(cmds->body));
+ cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+ cmds->body.header.streamId = stream_id;
+ cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
+ cmds->body.items[0].value = false;
+ fill_flush(&cmds->flush, stream_id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmds));
+
+ return 0;
+}
+
+/**
+ * Stop or pause a stream.
+ *
+ * If the stream is paused the no evict flag is removed from the buffer
+ * but left in vram. This allows for instance mode_set to evict it
+ * should it need to.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * @stream_id which stream to stop/pause.
+ * @pause true to pause, false to stop completely.
+ */
+static int vmw_overlay_stop(struct vmw_private *dev_priv,
+ uint32_t stream_id, bool pause,
+ bool interruptible)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct vmw_stream *stream = &overlay->stream[stream_id];
+ int ret;
+
+ /* no buffer attached the stream is completely stopped */
+ if (!stream->buf)
+ return 0;
+
+ /* If the stream is paused this is already done */
+ if (!stream->paused) {
+ ret = vmw_overlay_send_stop(dev_priv, stream_id,
+ interruptible);
+ if (ret)
+ return ret;
+
+ /* We just remove the NO_EVICT flag so no -ENOMEM */
+ ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
+ interruptible);
+ if (interruptible && ret == -ERESTARTSYS)
+ return ret;
+ else
+ BUG_ON(ret != 0);
+ }
+
+ if (!pause) {
+ vmw_dmabuf_unreference(&stream->buf);
+ stream->paused = false;
+ } else {
+ stream->paused = true;
+ }
+
+ return 0;
+}
+
+/**
+ * Update a stream and send any put or stop fifo commands needed.
+ *
+ * The caller must hold the overlay lock.
+ *
+ * Returns
+ * -ENOMEM if buffer doesn't fit in vram.
+ * -ERESTARTSYS if interrupted.
+ */
+static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ struct drm_vmw_control_stream_arg *arg,
+ bool interruptible)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct vmw_stream *stream = &overlay->stream[arg->stream_id];
+ int ret = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
+ stream->buf, buf, stream->paused ? "" : "not ");
+
+ if (stream->buf != buf) {
+ ret = vmw_overlay_stop(dev_priv, arg->stream_id,
+ false, interruptible);
+ if (ret)
+ return ret;
+ } else if (!stream->paused) {
+ /* If the buffers match and not paused then just send
+ * the put command, no need to do anything else.
+ */
+ ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+ if (ret == 0)
+ stream->saved = *arg;
+ else
+ BUG_ON(!interruptible);
+
+ return ret;
+ }
+
+ /* We don't start the old stream if we are interrupted.
+ * Might return -ENOMEM if it can't fit the buffer in vram.
+ */
+ ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
+ if (ret)
+ return ret;
+
+ ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
+ if (ret) {
+ /* This one needs to happen no matter what. We only remove
+ * the NO_EVICT flag so this is safe from -ENOMEM.
+ */
+ BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
+ return ret;
+ }
+
+ if (stream->buf != buf)
+ stream->buf = vmw_dmabuf_reference(buf);
+ stream->saved = *arg;
+
+ return 0;
+}
+
+/**
+ * Stop all streams.
+ *
+ * Used by the fb code when starting.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_stop_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ struct vmw_stream *stream = &overlay->stream[i];
+ if (!stream->buf)
+ continue;
+
+ ret = vmw_overlay_stop(dev_priv, i, false, false);
+ WARN_ON(ret != 0);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+/**
+ * Try to resume all paused streams.
+ *
+ * Used by the kms code after moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_resume_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ struct vmw_stream *stream = &overlay->stream[i];
+ if (!stream->paused)
+ continue;
+
+ ret = vmw_overlay_update_stream(dev_priv, stream->buf,
+ &stream->saved, false);
+ if (ret != 0)
+ DRM_INFO("%s: *warning* failed to resume stream %i\n",
+ __func__, i);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+/**
+ * Pauses all active streams.
+ *
+ * Used by the kms code when moving a new scanout buffer to vram.
+ *
+ * Takes the overlay lock.
+ */
+int vmw_overlay_pause_all(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, ret;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ if (overlay->stream[i].paused)
+ DRM_INFO("%s: *warning* stream %i already paused\n",
+ __func__, i);
+ ret = vmw_overlay_stop(dev_priv, i, true, false);
+ WARN_ON(ret != 0);
+ }
+
+ mutex_unlock(&overlay->mutex);
+
+ return 0;
+}
+
+int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ struct drm_vmw_control_stream_arg *arg =
+ (struct drm_vmw_control_stream_arg *)data;
+ struct vmw_dma_buffer *buf;
+ struct vmw_resource *res;
+ int ret;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
+ if (ret)
+ return ret;
+
+ mutex_lock(&overlay->mutex);
+
+ if (!arg->enabled) {
+ ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
+ goto out_unlock;
+ }
+
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
+ if (ret)
+ goto out_unlock;
+
+ ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
+
+ vmw_dmabuf_unreference(&buf);
+
+out_unlock:
+ mutex_unlock(&overlay->mutex);
+ vmw_resource_unreference(&res);
+
+ return ret;
+}
+
+int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
+{
+ if (!dev_priv->overlay_priv)
+ return 0;
+
+ return VMW_MAX_NUM_STREAMS;
+}
+
+int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i, k;
+
+ if (!overlay)
+ return 0;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
+ if (!overlay->stream[i].claimed)
+ k++;
+
+ mutex_unlock(&overlay->mutex);
+
+ return k;
+}
+
+int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ int i;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ mutex_lock(&overlay->mutex);
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+
+ if (overlay->stream[i].claimed)
+ continue;
+
+ overlay->stream[i].claimed = true;
+ *out = i;
+ mutex_unlock(&overlay->mutex);
+ return 0;
+ }
+
+ mutex_unlock(&overlay->mutex);
+ return -ESRCH;
+}
+
+int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+
+ BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
+
+ if (!overlay)
+ return -ENOSYS;
+
+ mutex_lock(&overlay->mutex);
+
+ WARN_ON(!overlay->stream[stream_id].claimed);
+ vmw_overlay_stop(dev_priv, stream_id, false, false);
+ overlay->stream[stream_id].claimed = false;
+
+ mutex_unlock(&overlay->mutex);
+ return 0;
+}
+
+int vmw_overlay_init(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay;
+ int i;
+
+ if (dev_priv->overlay_priv)
+ return -EINVAL;
+
+ if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
+ (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
+ DRM_INFO("hardware doesn't support overlays\n");
+ return -ENOSYS;
+ }
+
+ overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
+ if (!overlay)
+ return -ENOMEM;
+
+ memset(overlay, 0, sizeof(*overlay));
+ mutex_init(&overlay->mutex);
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ overlay->stream[i].buf = NULL;
+ overlay->stream[i].paused = false;
+ overlay->stream[i].claimed = false;
+ }
+
+ dev_priv->overlay_priv = overlay;
+
+ return 0;
+}
+
+int vmw_overlay_close(struct vmw_private *dev_priv)
+{
+ struct vmw_overlay *overlay = dev_priv->overlay_priv;
+ bool forgotten_buffer = false;
+ int i;
+
+ if (!overlay)
+ return -ENOSYS;
+
+ for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
+ if (overlay->stream[i].buf) {
+ forgotten_buffer = true;
+ vmw_overlay_stop(dev_priv, i, false, false);
+ }
+ }
+
+ WARN_ON(forgotten_buffer);
+
+ dev_priv->overlay_priv = NULL;
+ kfree(overlay);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
new file mode 100644
index 00000000000..9d0dd3a342e
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -0,0 +1,57 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * This file contains virtual hardware defines for kernel space.
+ */
+
+#ifndef _VMWGFX_REG_H_
+#define _VMWGFX_REG_H_
+
+#include <linux/types.h>
+
+#define VMWGFX_INDEX_PORT 0x0
+#define VMWGFX_VALUE_PORT 0x1
+#define VMWGFX_IRQSTATUS_PORT 0x8
+
+struct svga_guest_mem_descriptor {
+ __le32 ppn;
+ __le32 num_pages;
+};
+
+struct svga_fifo_cmd_fence {
+ __le32 fence;
+};
+
+#define SVGA_SYNC_GENERIC 1
+#define SVGA_SYNC_FIFOFULL 2
+
+#include "svga_types.h"
+
+#include "svga3d_reg.h"
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 00000000000..c012d5927f6
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1183 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_drm.h"
+#include "ttm/ttm_object.h"
+#include "ttm/ttm_placement.h"
+#include "drmP.h"
+
+#define VMW_RES_CONTEXT ttm_driver_type0
+#define VMW_RES_SURFACE ttm_driver_type1
+#define VMW_RES_STREAM ttm_driver_type2
+
+struct vmw_user_context {
+ struct ttm_base_object base;
+ struct vmw_resource res;
+};
+
+struct vmw_user_surface {
+ struct ttm_base_object base;
+ struct vmw_surface srf;
+};
+
+struct vmw_user_dma_buffer {
+ struct ttm_base_object base;
+ struct vmw_dma_buffer dma;
+};
+
+struct vmw_bo_user_rep {
+ uint32_t handle;
+ uint64_t map_handle;
+};
+
+struct vmw_stream {
+ struct vmw_resource res;
+ uint32_t stream_id;
+};
+
+struct vmw_user_stream {
+ struct ttm_base_object base;
+ struct vmw_stream stream;
+};
+
+static inline struct vmw_dma_buffer *
+vmw_dma_buffer(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vmw_dma_buffer, base);
+}
+
+static inline struct vmw_user_dma_buffer *
+vmw_user_dma_buffer(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
+}
+
+struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
+{
+ kref_get(&res->kref);
+ return res;
+}
+
+static void vmw_resource_release(struct kref *kref)
+{
+ struct vmw_resource *res =
+ container_of(kref, struct vmw_resource, kref);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ idr_remove(res->idr, res->id);
+ write_unlock(&dev_priv->resource_lock);
+
+ if (likely(res->hw_destroy != NULL))
+ res->hw_destroy(res);
+
+ if (res->res_free != NULL)
+ res->res_free(res);
+ else
+ kfree(res);
+
+ write_lock(&dev_priv->resource_lock);
+}
+
+void vmw_resource_unreference(struct vmw_resource **p_res)
+{
+ struct vmw_resource *res = *p_res;
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ *p_res = NULL;
+ write_lock(&dev_priv->resource_lock);
+ kref_put(&res->kref, vmw_resource_release);
+ write_unlock(&dev_priv->resource_lock);
+}
+
+static int vmw_resource_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ struct idr *idr,
+ enum ttm_object_type obj_type,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ kref_init(&res->kref);
+ res->hw_destroy = NULL;
+ res->res_free = res_free;
+ res->res_type = obj_type;
+ res->idr = idr;
+ res->avail = false;
+ res->dev_priv = dev_priv;
+
+ do {
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ write_lock(&dev_priv->resource_lock);
+ ret = idr_get_new_above(idr, res, 1, &res->id);
+ write_unlock(&dev_priv->resource_lock);
+
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_activate
+ *
+ * @res: Pointer to the newly created resource
+ * @hw_destroy: Destroy function. NULL if none.
+ *
+ * Activate a resource after the hardware has been made aware of it.
+ * Set tye destroy function to @destroy. Typically this frees the
+ * resource and destroys the hardware resources associated with it.
+ * Activate basically means that the function vmw_resource_lookup will
+ * find it.
+ */
+
+static void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *))
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ write_lock(&dev_priv->resource_lock);
+ res->avail = true;
+ res->hw_destroy = hw_destroy;
+ write_unlock(&dev_priv->resource_lock);
+}
+
+struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
+ struct idr *idr, int id)
+{
+ struct vmw_resource *res;
+
+ read_lock(&dev_priv->resource_lock);
+ res = idr_find(idr, id);
+ if (res && res->avail)
+ kref_get(&res->kref);
+ else
+ res = NULL;
+ read_unlock(&dev_priv->resource_lock);
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ return res;
+}
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyContext body;
+ } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+ struct vmw_resource *res,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineContext body;
+ } *cmd;
+
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
+ VMW_RES_CONTEXT, res_free);
+
+ if (unlikely(ret != 0)) {
+ if (res_free == NULL)
+ kfree(res);
+ else
+ res_free(res);
+ return ret;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.cid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_resource_activate(res, vmw_hw_context_destroy);
+ return 0;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+ struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+ int ret;
+
+ if (unlikely(res == NULL))
+ return NULL;
+
+ ret = vmw_context_init(dev_priv, res, NULL);
+ return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+
+ kfree(ctx);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_context *ctx =
+ container_of(base, struct vmw_user_context, base);
+ struct vmw_resource *res = &ctx->res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_resource *res;
+ struct vmw_user_context *ctx;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = 0;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_context_free) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx = container_of(res, struct vmw_user_context, res);
+ if (ctx->base.tfile != tfile && !ctx->base.shareable) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
+out:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if (unlikely(ctx == NULL))
+ return -ENOMEM;
+
+ res = &ctx->res;
+ ctx->base.shareable = false;
+ ctx->base.tfile = NULL;
+
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(&ctx->res);
+ ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+ &vmw_user_context_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->cid = res->id;
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+
+}
+
+int vmw_context_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ int id)
+{
+ struct vmw_resource *res;
+ int ret = 0;
+
+ read_lock(&dev_priv->resource_lock);
+ res = idr_find(&dev_priv->context_idr, id);
+ if (res && res->avail) {
+ struct vmw_user_context *ctx =
+ container_of(res, struct vmw_user_context, res);
+ if (ctx->base.tfile != tfile && !ctx->base.shareable)
+ ret = -EPERM;
+ } else
+ ret = -EINVAL;
+ read_unlock(&dev_priv->resource_lock);
+
+ return ret;
+}
+
+
+/**
+ * Surface management.
+ */
+
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroySurface body;
+ } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for surface "
+ "destruction.\n");
+ return;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
+ cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+ cmd->body.sid = cpu_to_le32(res->id);
+
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+}
+
+void vmw_surface_res_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ kfree(srf);
+}
+
+int vmw_surface_init(struct vmw_private *dev_priv,
+ struct vmw_surface *srf,
+ void (*res_free) (struct vmw_resource *res))
+{
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineSurface body;
+ } *cmd;
+ SVGA3dSize *cmd_size;
+ struct vmw_resource *res = &srf->res;
+ struct drm_vmw_size *src_size;
+ size_t submit_size;
+ uint32_t cmd_len;
+ int i;
+
+ BUG_ON(res_free == NULL);
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
+ VMW_RES_SURFACE, res_free);
+
+ if (unlikely(ret != 0)) {
+ res_free(res);
+ return ret;
+ }
+
+ submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
+ cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Fifo reserve failed for create surface.\n");
+ vmw_resource_unreference(&res);
+ return -ENOMEM;
+ }
+
+ cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
+ cmd->header.size = cpu_to_le32(cmd_len);
+ cmd->body.sid = cpu_to_le32(res->id);
+ cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
+ cmd->body.format = cpu_to_le32(srf->format);
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ cmd->body.face[i].numMipLevels =
+ cpu_to_le32(srf->mip_levels[i]);
+ }
+
+ cmd += 1;
+ cmd_size = (SVGA3dSize *) cmd;
+ src_size = srf->sizes;
+
+ for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+ cmd_size->width = cpu_to_le32(src_size->width);
+ cmd_size->height = cpu_to_le32(src_size->height);
+ cmd_size->depth = cpu_to_le32(src_size->depth);
+ }
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ vmw_resource_activate(res, vmw_hw_surface_destroy);
+ return 0;
+}
+
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
+ struct vmw_user_surface *user_srf =
+ container_of(srf, struct vmw_user_surface, srf);
+
+ kfree(srf->sizes);
+ kfree(srf->snooper.image);
+ kfree(user_srf);
+}
+
+int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_surface **out)
+{
+ struct vmw_resource *res;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
+ return -EINVAL;
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ read_lock(&dev_priv->resource_lock);
+
+ if (!res->avail || res->res_free != &vmw_user_surface_free) {
+ read_unlock(&dev_priv->resource_lock);
+ goto out_bad_resource;
+ }
+
+ kref_get(&res->kref);
+ read_unlock(&dev_priv->resource_lock);
+
+ *out = srf;
+ ret = 0;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
+
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_surface *user_srf =
+ container_of(base, struct vmw_user_surface, base);
+ struct vmw_resource *res = &user_srf->srf.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+ return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_surface *user_srf =
+ kmalloc(sizeof(*user_srf), GFP_KERNEL);
+ struct vmw_surface *srf;
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ union drm_vmw_surface_create_arg *arg =
+ (union drm_vmw_surface_create_arg *)data;
+ struct drm_vmw_surface_create_req *req = &arg->req;
+ struct drm_vmw_surface_arg *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct drm_vmw_size __user *user_sizes;
+ int ret;
+ int i;
+
+ if (unlikely(user_srf == NULL))
+ return -ENOMEM;
+
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ srf->flags = req->flags;
+ srf->format = req->format;
+ memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+ srf->num_sizes = 0;
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ srf->num_sizes += srf->mip_levels[i];
+
+ if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+ DRM_VMW_MAX_MIP_LEVELS) {
+ ret = -EINVAL;
+ goto out_err0;
+ }
+
+ srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+ if (unlikely(srf->sizes == NULL)) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr;
+
+ ret = copy_from_user(srf->sizes, user_sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0))
+ goto out_err1;
+
+ user_srf->base.shareable = false;
+ user_srf->base.tfile = NULL;
+
+ /**
+ * From this point, the generic resource management functions
+ * destroy the object on failure.
+ */
+
+ ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(&srf->res);
+ ret = ttm_base_object_init(tfile, &user_srf->base,
+ req->shareable, VMW_RES_SURFACE,
+ &vmw_user_surface_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ vmw_resource_unreference(&res);
+ return ret;
+ }
+
+ if (srf->flags & (1 << 9) &&
+ srf->num_sizes == 1 &&
+ srf->sizes[0].width == 64 &&
+ srf->sizes[0].height == 64 &&
+ srf->format == SVGA3D_A8R8G8B8) {
+
+ srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+ /* clear the image */
+ if (srf->snooper.image)
+ memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+ else
+ DRM_ERROR("Failed to allocate cursor_image\n");
+
+ } else {
+ srf->snooper.image = NULL;
+ }
+ srf->snooper.crtc = NULL;
+
+ rep->sid = user_srf->base.hash.key;
+ if (rep->sid == SVGA3D_INVALID_ID)
+ DRM_ERROR("Created bad Surface ID.\n");
+
+ vmw_resource_unreference(&res);
+ return 0;
+out_err1:
+ kfree(srf->sizes);
+out_err0:
+ kfree(user_srf);
+ return ret;
+}
+
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ union drm_vmw_surface_reference_arg *arg =
+ (union drm_vmw_surface_reference_arg *)data;
+ struct drm_vmw_surface_arg *req = &arg->req;
+ struct drm_vmw_surface_create_req *rep = &arg->rep;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_surface *srf;
+ struct vmw_user_surface *user_srf;
+ struct drm_vmw_size __user *user_sizes;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
+
+ base = ttm_base_object_lookup(tfile, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+
+ ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Could not add a reference to a surface.\n");
+ goto out_no_reference;
+ }
+
+ rep->flags = srf->flags;
+ rep->format = srf->format;
+ memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+ user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+ rep->size_addr;
+
+ if (user_sizes)
+ ret = copy_to_user(user_sizes, srf->sizes,
+ srf->num_sizes * sizeof(*srf->sizes));
+ if (unlikely(ret != 0))
+ DRM_ERROR("copy_to_user failed %p %u\n",
+ user_sizes, srf->num_sizes);
+out_bad_resource:
+out_no_reference:
+ ttm_base_object_unref(&base);
+
+ return ret;
+}
+
+int vmw_surface_check(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, int *id)
+{
+ struct ttm_base_object *base;
+ struct vmw_user_surface *user_srf;
+
+ int ret = -EPERM;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
+ return -EINVAL;
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_surface;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ *id = user_srf->srf.res.id;
+ ret = 0;
+
+out_bad_surface:
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
+
+ ttm_base_object_unref(&base);
+ return ret;
+}
+
+/**
+ * Buffer management.
+ */
+
+static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
+ unsigned long num_pages)
+{
+ static size_t bo_user_size = ~0;
+
+ size_t page_array_size =
+ (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
+
+ if (unlikely(bo_user_size == ~0)) {
+ bo_user_size = glob->ttm_bo_extra_size +
+ ttm_round_pot(sizeof(struct vmw_dma_buffer));
+ }
+
+ return bo_user_size + page_array_size;
+}
+
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ struct ttm_bo_global *glob = bo->glob;
+ struct vmw_private *dev_priv =
+ container_of(bo->bdev, struct vmw_private, bdev);
+
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
+ if (vmw_bo->gmr_bound) {
+ vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
+ spin_lock(&glob->lru_lock);
+ ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
+ spin_unlock(&glob->lru_lock);
+ }
+ kfree(vmw_bo);
+}
+
+int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible,
+ void (*bo_free) (struct ttm_buffer_object *bo))
+{
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ size_t acc_size;
+ int ret;
+
+ BUG_ON(!bo_free);
+
+ acc_size =
+ vmw_dmabuf_acc_size(bdev->glob,
+ (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (unlikely(ret != 0)) {
+ /* we must free the bo here as
+ * ttm_buffer_object_init does so as well */
+ bo_free(&vmw_bo->base);
+ return ret;
+ }
+
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+ INIT_LIST_HEAD(&vmw_bo->gmr_lru);
+ INIT_LIST_HEAD(&vmw_bo->validate_list);
+ vmw_bo->gmr_id = 0;
+ vmw_bo->gmr_bound = false;
+
+ ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, 0, interruptible,
+ NULL, acc_size, bo_free);
+ return ret;
+}
+
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+ struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
+ struct ttm_bo_global *glob = bo->glob;
+ struct vmw_private *dev_priv =
+ container_of(bo->bdev, struct vmw_private, bdev);
+
+ ttm_mem_global_free(glob->mem_glob, bo->acc_size);
+ if (vmw_bo->gmr_bound) {
+ vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
+ spin_lock(&glob->lru_lock);
+ ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
+ spin_unlock(&glob->lru_lock);
+ }
+ kfree(vmw_user_bo);
+}
+
+static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_base_object *base = *p_base;
+ struct ttm_buffer_object *bo;
+
+ *p_base = NULL;
+
+ if (unlikely(base == NULL))
+ return;
+
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ bo = &vmw_user_bo->dma.base;
+ ttm_bo_unref(&bo);
+}
+
+int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ union drm_vmw_alloc_dmabuf_arg *arg =
+ (union drm_vmw_alloc_dmabuf_arg *)data;
+ struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+ struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_buffer_object *tmp;
+ struct vmw_master *vmaster = vmw_master(file_priv->master);
+ int ret;
+
+ vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
+ if (unlikely(vmw_user_bo == NULL))
+ return -ENOMEM;
+
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0)) {
+ kfree(vmw_user_bo);
+ return ret;
+ }
+
+ ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
+ &vmw_vram_placement, true,
+ &vmw_user_dmabuf_destroy);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
+ ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
+ &vmw_user_bo->base,
+ false,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release, NULL);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unref(&tmp);
+ } else {
+ rep->handle = vmw_user_bo->base.hash.key;
+ rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
+ rep->cur_gmr_id = vmw_user_bo->base.hash.key;
+ rep->cur_gmr_offset = 0;
+ }
+ ttm_bo_unref(&tmp);
+
+ ttm_read_unlock(&vmaster->lock);
+
+ return 0;
+}
+
+int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_vmw_unref_dmabuf_arg *arg =
+ (struct drm_vmw_unref_dmabuf_arg *)data;
+
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ arg->handle,
+ TTM_REF_USAGE);
+}
+
+uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
+ uint32_t cur_validate_node)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+
+ if (likely(vmw_bo->on_validate_list))
+ return vmw_bo->cur_validate_node;
+
+ vmw_bo->cur_validate_node = cur_validate_node;
+ vmw_bo->on_validate_list = true;
+
+ return cur_validate_node;
+}
+
+void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+
+ vmw_bo->on_validate_list = false;
+}
+
+uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
+{
+ struct vmw_dma_buffer *vmw_bo;
+
+ if (bo->mem.mem_type == TTM_PL_VRAM)
+ return SVGA_GMR_FRAMEBUFFER;
+
+ vmw_bo = vmw_dma_buffer(bo);
+
+ return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
+}
+
+void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
+{
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ vmw_bo->gmr_bound = true;
+ vmw_bo->gmr_id = id;
+}
+
+int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_dma_buffer **out)
+{
+ struct vmw_user_dma_buffer *vmw_user_bo;
+ struct ttm_base_object *base;
+
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL)) {
+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -ESRCH;
+ }
+
+ if (unlikely(base->object_type != ttm_buffer_type)) {
+ ttm_base_object_unref(&base);
+ printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return -EINVAL;
+ }
+
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ (void)ttm_bo_reference(&vmw_user_bo->dma.base);
+ ttm_base_object_unref(&base);
+ *out = &vmw_user_bo->dma;
+
+ return 0;
+}
+
+/**
+ * TODO: Implement a gmr id eviction mechanism. Currently we just fail
+ * when we're out of ids, causing GMR space to be allocated
+ * out of VRAM.
+ */
+
+int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
+{
+ struct ttm_bo_global *glob = dev_priv->bdev.glob;
+ int id;
+ int ret;
+
+ do {
+ if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ spin_lock(&glob->lru_lock);
+ ret = ida_get_new(&dev_priv->gmr_ida, &id);
+ spin_unlock(&glob->lru_lock);
+ } while (ret == -EAGAIN);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(id >= dev_priv->max_gmr_ids)) {
+ spin_lock(&glob->lru_lock);
+ ida_remove(&dev_priv->gmr_ida, id);
+ spin_unlock(&glob->lru_lock);
+ return -EBUSY;
+ }
+
+ *p_id = (uint32_t) id;
+ return 0;
+}
+
+/*
+ * Stream managment
+ */
+
+static void vmw_stream_destroy(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct vmw_stream *stream;
+ int ret;
+
+ DRM_INFO("%s: unref\n", __func__);
+ stream = container_of(res, struct vmw_stream, res);
+
+ ret = vmw_overlay_unref(dev_priv, stream->stream_id);
+ WARN_ON(ret != 0);
+}
+
+static int vmw_stream_init(struct vmw_private *dev_priv,
+ struct vmw_stream *stream,
+ void (*res_free) (struct vmw_resource *res))
+{
+ struct vmw_resource *res = &stream->res;
+ int ret;
+
+ ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
+ VMW_RES_STREAM, res_free);
+
+ if (unlikely(ret != 0)) {
+ if (res_free == NULL)
+ kfree(stream);
+ else
+ res_free(&stream->res);
+ return ret;
+ }
+
+ ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
+ if (ret) {
+ vmw_resource_unreference(&res);
+ return ret;
+ }
+
+ DRM_INFO("%s: claimed\n", __func__);
+
+ vmw_resource_activate(&stream->res, vmw_stream_destroy);
+ return 0;
+}
+
+/**
+ * User-space context management:
+ */
+
+static void vmw_user_stream_free(struct vmw_resource *res)
+{
+ struct vmw_user_stream *stream =
+ container_of(res, struct vmw_user_stream, stream.res);
+
+ kfree(stream);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct vmw_user_stream *stream =
+ container_of(base, struct vmw_user_stream, base);
+ struct vmw_resource *res = &stream->stream.res;
+
+ *p_base = NULL;
+ vmw_resource_unreference(&res);
+}
+
+int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_resource *res;
+ struct vmw_user_stream *stream;
+ struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret = 0;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_stream_free) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ stream = container_of(res, struct vmw_user_stream, stream.res);
+ if (stream->base.tfile != tfile) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
+out:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ struct vmw_resource *res;
+ struct vmw_resource *tmp;
+ struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
+
+ if (unlikely(stream == NULL))
+ return -ENOMEM;
+
+ res = &stream->stream.res;
+ stream->base.shareable = false;
+ stream->base.tfile = NULL;
+
+ ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
+ if (unlikely(ret != 0))
+ return ret;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
+ &vmw_user_stream_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ arg->stream_id = res->id;
+out_err:
+ vmw_resource_unreference(&res);
+ return ret;
+}
+
+int vmw_user_stream_lookup(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t *inout_id, struct vmw_resource **out)
+{
+ struct vmw_user_stream *stream;
+ struct vmw_resource *res;
+ int ret;
+
+ res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+ if (unlikely(res == NULL))
+ return -EINVAL;
+
+ if (res->res_free != &vmw_user_stream_free) {
+ ret = -EINVAL;
+ goto err_ref;
+ }
+
+ stream = container_of(res, struct vmw_user_stream, stream.res);
+ if (stream->base.tfile != tfile) {
+ ret = -EPERM;
+ goto err_ref;
+ }
+
+ *inout_id = stream->stream.stream_id;
+ *out = res;
+ return 0;
+err_ref:
+ vmw_resource_unreference(&res);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
new file mode 100644
index 00000000000..e3df4adfb4d
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -0,0 +1,99 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct vmw_private *dev_priv;
+
+ if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
+ if (vmw_fifo_mmap(filp, vma) == 0)
+ return 0;
+ return drm_mmap(filp, vma);
+ }
+
+ file_priv = (struct drm_file *)filp->private_data;
+ dev_priv = vmw_priv(file_priv->minor->dev);
+ return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+}
+
+static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
+{
+ DRM_INFO("global init.\n");
+ return ttm_mem_global_init(ref->object);
+}
+
+static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+int vmw_ttm_global_init(struct vmw_private *dev_priv)
+{
+ struct ttm_global_reference *global_ref;
+ int ret;
+
+ global_ref = &dev_priv->mem_global_ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &vmw_ttm_mem_global_init;
+ global_ref->release = &vmw_ttm_mem_global_release;
+
+ ret = ttm_global_item_ref(global_ref);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM memory accounting.\n");
+ return ret;
+ }
+
+ dev_priv->bo_global_ref.mem_glob =
+ dev_priv->mem_global_ref.object;
+ global_ref = &dev_priv->bo_global_ref.ref;
+ global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ ret = ttm_global_item_ref(global_ref);
+
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed setting up TTM buffer objects.\n");
+ goto out_no_bo;
+ }
+
+ return 0;
+out_no_bo:
+ ttm_global_item_unref(&dev_priv->mem_global_ref);
+ return ret;
+}
+
+void vmw_ttm_global_release(struct vmw_private *dev_priv)
+{
+ ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
+ ttm_global_item_unref(&dev_priv->mem_global_ref);
+}
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index 27ae750ca87..bf31592eaf7 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -1,8 +1,6 @@
#ifndef __HID_LG_H
#define __HID_LG_H
-#include <linux/autoconf.h>
-
#ifdef CONFIG_LOGITECH_FF
int lgff_init(struct hid_device *hdev);
#else
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 0258289f3b3..e2997a8d5e1 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1253,10 +1253,9 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
- struct usb_device *udev = interface_to_usbdev(intf);
int status;
- if (udev->auto_pm) {
+ if (message.event & PM_EVENT_AUTO) {
spin_lock_irq(&usbhid->lock); /* Sync with error handler */
if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
&& !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
@@ -1281,7 +1280,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
return -EIO;
}
- if (!ignoreled && udev->auto_pm) {
+ if (!ignoreled && (message.event & PM_EVENT_AUTO)) {
spin_lock_irq(&usbhid->lock);
if (test_bit(HID_LED_ON, &usbhid->iofl)) {
spin_unlock_irq(&usbhid->lock);
@@ -1294,7 +1293,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
hid_cancel_delayed_stuff(usbhid);
hid_cease_io(usbhid);
- if (udev->auto_pm && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
+ if ((message.event & PM_EVENT_AUTO) &&
+ test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
/* lost race against keypresses */
status = hid_start_in(hid);
if (status < 0)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9e640c62ebd..46c3c566307 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -228,6 +228,18 @@ config SENSORS_K8TEMP
This driver can also be built as a module. If so, the module
will be called k8temp.
+config SENSORS_K10TEMP
+ tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
+ depends on X86 && PCI
+ help
+ If you say yes here you get support for the temperature
+ sensor(s) inside your CPU. Supported are later revisions of
+ the AMD Family 10h and all revisions of the AMD Family 11h
+ microarchitectures.
+
+ This driver can also be built as a module. If so, the module
+ will be called k10temp.
+
config SENSORS_AMS
tristate "Apple Motion Sensor driver"
depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C) && EXPERIMENTAL
@@ -810,6 +822,14 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_VIA_CPUTEMP
+ tristate "VIA CPU temperature sensor"
+ depends on X86
+ help
+ If you say yes here you get support for the temperature
+ sensor inside your CPU. Supported are all known variants of
+ the VIA C7 and Nano.
+
config SENSORS_VIA686A
tristate "VIA686A"
depends on PCI
@@ -998,6 +1018,23 @@ config SENSORS_LIS3_SPI
will be called lis3lv02d and a specific module for the SPI transport
is called lis3lv02d_spi.
+config SENSORS_LIS3_I2C
+ tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
+ depends on I2C && INPUT
+ select INPUT_POLLDEV
+ default n
+ help
+ This driver provides support for the LIS3LV02Dx accelerometer connected
+ via I2C. The accelerometer data is readable via
+ /sys/devices/platform/lis3lv02d.
+
+ This driver also provides an absolute input class device, allowing
+ the device to act as a pinball machine-esque joystick.
+
+ This driver can also be built as modules. If so, the core module
+ will be called lis3lv02d and a specific module for the I2C transport
+ is called lis3lv02d_i2c.
+
config SENSORS_APPLESMC
tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
depends on INPUT && X86
@@ -1046,25 +1083,27 @@ config SENSORS_ATK0110
will be called asus_atk0110.
config SENSORS_LIS3LV02D
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
+ tristate "STMicroeletronics LIS3* three-axis digital accelerometer"
depends on INPUT
select INPUT_POLLDEV
select NEW_LEDS
select LEDS_CLASS
default n
help
- This driver provides support for the LIS3LV02Dx accelerometer. In
- particular, it can be found in a number of HP laptops, which have the
- "Mobile Data Protection System 3D" or "3D DriveGuard" feature. On such
- systems the driver should load automatically (via ACPI). The
- accelerometer might also be found in other systems, connected via SPI
- or I2C. The accelerometer data is readable via
- /sys/devices/platform/lis3lv02d.
+ This driver provides support for the LIS3* accelerometers, such as the
+ LIS3LV02DL or the LIS331DL. In particular, it can be found in a number
+ of HP laptops, which have the "Mobile Data Protection System 3D" or
+ "3D DriveGuard" feature. On such systems the driver should load
+ automatically (via ACPI alias). The accelerometer might also be found
+ in other systems, connected via SPI or I2C. The accelerometer data is
+ readable via /sys/devices/platform/lis3lv02d.
This driver also provides an absolute input class device, allowing
- the laptop to act as a pinball machine-esque joystick. On HP laptops,
+ a laptop to act as a pinball machine-esque joystick. It provides also
+ a misc device which can be used to detect free-fall. On HP laptops,
if the led infrastructure is activated, support for a led indicating
- disk protection will be provided as hp:red:hddprotection.
+ disk protection will be provided as hp::hddprotect. For more
+ information on the feature, refer to Documentation/hwmon/lis3lv02d.
This driver can also be built as modules. If so, the core module
will be called lis3lv02d and a specific module for HP laptops will be
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 33c2ee10528..450c8e89427 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -53,8 +53,10 @@ obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
+obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o
+obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o
obj-$(CONFIG_SENSORS_LM63) += lm63.o
obj-$(CONFIG_SENSORS_LM70) += lm70.o
obj-$(CONFIG_SENSORS_LM73) += lm73.o
@@ -87,6 +89,7 @@ obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
obj-$(CONFIG_SENSORS_VT8231) += vt8231.o
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 33acf29531a..1ad0a885c5a 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -34,9 +34,8 @@
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm,
- mc1066);
+enum chips {
+ adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066 };
/* adm1021 constants specified below */
@@ -97,7 +96,7 @@ struct adm1021_data {
static int adm1021_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1021_detect(struct i2c_client *client, int kind,
+static int adm1021_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm1021_init_client(struct i2c_client *client);
static int adm1021_remove(struct i2c_client *client);
@@ -130,7 +129,7 @@ static struct i2c_driver adm1021_driver = {
.remove = adm1021_remove,
.id_table = adm1021_id,
.detect = adm1021_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static ssize_t show_temp(struct device *dev,
@@ -284,7 +283,7 @@ static const struct attribute_group adm1021_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1021_detect(struct i2c_client *client, int kind,
+static int adm1021_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index db6ac2b04f6..251b63165e2 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -64,11 +64,7 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_2(adm1025, ne1619);
+enum chips { adm1025, ne1619 };
/*
* The ADM1025 registers
@@ -111,7 +107,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 };
static int adm1025_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1025_detect(struct i2c_client *client, int kind,
+static int adm1025_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm1025_init_client(struct i2c_client *client);
static int adm1025_remove(struct i2c_client *client);
@@ -137,7 +133,7 @@ static struct i2c_driver adm1025_driver = {
.remove = adm1025_remove,
.id_table = adm1025_id,
.detect = adm1025_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -409,7 +405,7 @@ static const struct attribute_group adm1025_group_in4 = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1025_detect(struct i2c_client *client, int kind,
+static int adm1025_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index fb5363985e2..65335b268fa 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -37,9 +37,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(adm1026);
-
static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static int gpio_output[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -293,7 +290,7 @@ struct adm1026_data {
static int adm1026_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1026_detect(struct i2c_client *client, int kind,
+static int adm1026_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adm1026_remove(struct i2c_client *client);
static int adm1026_read_value(struct i2c_client *client, u8 reg);
@@ -305,7 +302,7 @@ static void adm1026_init_client(struct i2c_client *client);
static const struct i2c_device_id adm1026_id[] = {
- { "adm1026", adm1026 },
+ { "adm1026", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1026_id);
@@ -319,7 +316,7 @@ static struct i2c_driver adm1026_driver = {
.remove = adm1026_remove,
.id_table = adm1026_id,
.detect = adm1026_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int adm1026_read_value(struct i2c_client *client, u8 reg)
@@ -1650,7 +1647,7 @@ static const struct attribute_group adm1026_group_in8_9 = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1026_detect(struct i2c_client *client, int kind,
+static int adm1026_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index ef91e2a4a56..0b8a3b145bd 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -44,12 +44,6 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
};
/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(adm1029);
-
-/*
* The ADM1029 registers
* Manufacturer ID is 0x41 for Analog Devices
*/
@@ -117,7 +111,7 @@ static const u8 ADM1029_REG_FAN_DIV[] = {
static int adm1029_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1029_detect(struct i2c_client *client, int kind,
+static int adm1029_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adm1029_remove(struct i2c_client *client);
static struct adm1029_data *adm1029_update_device(struct device *dev);
@@ -128,7 +122,7 @@ static int adm1029_init_client(struct i2c_client *client);
*/
static const struct i2c_device_id adm1029_id[] = {
- { "adm1029", adm1029 },
+ { "adm1029", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1029_id);
@@ -142,7 +136,7 @@ static struct i2c_driver adm1029_driver = {
.remove = adm1029_remove,
.id_table = adm1029_id,
.detect = adm1029_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -297,7 +291,7 @@ static const struct attribute_group adm1029_group = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1029_detect(struct i2c_client *client, int kind,
+static int adm1029_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 0e722175aae..1644b92e7cc 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -64,8 +64,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(adm1030, adm1031);
+enum chips { adm1030, adm1031 };
typedef u8 auto_chan_table_t[8][2];
@@ -102,7 +101,7 @@ struct adm1031_data {
static int adm1031_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm1031_detect(struct i2c_client *client, int kind,
+static int adm1031_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm1031_init_client(struct i2c_client *client);
static int adm1031_remove(struct i2c_client *client);
@@ -125,7 +124,7 @@ static struct i2c_driver adm1031_driver = {
.remove = adm1031_remove,
.id_table = adm1031_id,
.detect = adm1031_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg)
@@ -813,7 +812,7 @@ static const struct attribute_group adm1031_group_opt = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm1031_detect(struct i2c_client *client, int kind,
+static int adm1031_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 20e0481cc20..0727ad25079 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -55,8 +55,7 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_3(adm9240, ds1780, lm81);
+enum chips { adm9240, ds1780, lm81 };
/* ADM9240 registers */
#define ADM9240_REG_MAN_ID 0x3e
@@ -132,7 +131,7 @@ static inline unsigned int AOUT_FROM_REG(u8 reg)
static int adm9240_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adm9240_detect(struct i2c_client *client, int kind,
+static int adm9240_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm9240_init_client(struct i2c_client *client);
static int adm9240_remove(struct i2c_client *client);
@@ -156,7 +155,7 @@ static struct i2c_driver adm9240_driver = {
.remove = adm9240_remove,
.id_table = adm9240_id,
.detect = adm9240_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* per client data */
@@ -545,7 +544,7 @@ static const struct attribute_group adm9240_group = {
/*** sensor chip detect and driver install ***/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adm9240_detect(struct i2c_client *new_client, int kind,
+static int adm9240_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 451977bca7d..aac85f3aed5 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -47,10 +47,7 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(ads7828);
-
-/* Other module parameters */
+/* Module parameters */
static int se_input = 1; /* Default is SE, 0 == diff */
static int int_vref = 1; /* Default is internal ref ON */
static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */
@@ -72,7 +69,7 @@ struct ads7828_data {
};
/* Function declaration - necessary due to function dependencies */
-static int ads7828_detect(struct i2c_client *client, int kind,
+static int ads7828_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int ads7828_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -168,7 +165,7 @@ static int ads7828_remove(struct i2c_client *client)
}
static const struct i2c_device_id ads7828_id[] = {
- { "ads7828", ads7828 },
+ { "ads7828", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ads7828_id);
@@ -183,11 +180,11 @@ static struct i2c_driver ads7828_driver = {
.remove = ads7828_remove,
.id_table = ads7828_id,
.detect = ads7828_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int ads7828_detect(struct i2c_client *client, int kind,
+static int ads7828_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index f9c9562b6a9..a1a7ef14b51 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -32,9 +32,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(adt7462);
-
/* ADT7462 registers */
#define ADT7462_REG_DEVICE 0x3D
#define ADT7462_REG_VENDOR 0x3E
@@ -237,12 +234,12 @@ struct adt7462_data {
static int adt7462_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adt7462_detect(struct i2c_client *client, int kind,
+static int adt7462_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adt7462_remove(struct i2c_client *client);
static const struct i2c_device_id adt7462_id[] = {
- { "adt7462", adt7462 },
+ { "adt7462", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7462_id);
@@ -256,7 +253,7 @@ static struct i2c_driver adt7462_driver = {
.remove = adt7462_remove,
.id_table = adt7462_id,
.detect = adt7462_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -1902,7 +1899,7 @@ static struct attribute *adt7462_attr[] =
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adt7462_detect(struct i2c_client *client, int kind,
+static int adt7462_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 32b1750a689..3445ce1cba8 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -33,9 +33,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(adt7470);
-
/* ADT7470 registers */
#define ADT7470_REG_BASE_ADDR 0x20
#define ADT7470_REG_TEMP_BASE_ADDR 0x20
@@ -177,12 +174,12 @@ struct adt7470_data {
static int adt7470_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adt7470_detect(struct i2c_client *client, int kind,
+static int adt7470_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adt7470_remove(struct i2c_client *client);
static const struct i2c_device_id adt7470_id[] = {
- { "adt7470", adt7470 },
+ { "adt7470", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7470_id);
@@ -196,7 +193,7 @@ static struct i2c_driver adt7470_driver = {
.remove = adt7470_remove,
.id_table = adt7470_id,
.detect = adt7470_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -1225,7 +1222,7 @@ static struct attribute *adt7470_attr[] =
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adt7470_detect(struct i2c_client *client, int kind,
+static int adt7470_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
index aea244db974..434576f61c8 100644
--- a/drivers/hwmon/adt7473.c
+++ b/drivers/hwmon/adt7473.c
@@ -32,9 +32,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2C, 0x2D, 0x2E, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(adt7473);
-
/* ADT7473 registers */
#define ADT7473_REG_BASE_ADDR 0x20
@@ -166,12 +163,12 @@ struct adt7473_data {
static int adt7473_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int adt7473_detect(struct i2c_client *client, int kind,
+static int adt7473_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int adt7473_remove(struct i2c_client *client);
static const struct i2c_device_id adt7473_id[] = {
- { "adt7473", adt7473 },
+ { "adt7473", 0 },
{ }
};
@@ -184,7 +181,7 @@ static struct i2c_driver adt7473_driver = {
.remove = adt7473_remove,
.id_table = adt7473_id,
.detect = adt7473_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -1085,7 +1082,7 @@ static struct attribute *adt7473_attr[] =
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adt7473_detect(struct i2c_client *client, int kind,
+static int adt7473_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 99abfddedbc..a0c38514568 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -148,7 +148,7 @@
static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-I2C_CLIENT_INSMOD_4(adt7473, adt7475, adt7476, adt7490);
+enum chips { adt7473, adt7475, adt7476, adt7490 };
static const struct i2c_device_id adt7475_id[] = {
{ "adt7473", adt7473 },
@@ -1172,7 +1172,7 @@ static struct attribute_group in4_attr_group = { .attrs = in4_attrs };
static struct attribute_group in5_attr_group = { .attrs = in5_attrs };
static struct attribute_group vid_attr_group = { .attrs = vid_attrs };
-static int adt7475_detect(struct i2c_client *client, int kind,
+static int adt7475_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -1412,7 +1412,7 @@ static struct i2c_driver adt7475_driver = {
.remove = adt7475_remove,
.id_table = adt7475_id,
.detect = adt7475_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static void adt7475_read_hystersis(struct i2c_client *client)
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 7ea6a8f6605..c1605b528e8 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -518,7 +518,7 @@ static int applesmc_pm_restore(struct device *dev)
return applesmc_pm_resume(dev);
}
-static struct dev_pm_ops applesmc_pm_ops = {
+static const struct dev_pm_ops applesmc_pm_ops = {
.resume = applesmc_pm_resume,
.restore = applesmc_pm_restore,
};
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 480f80ea1fa..7dada559b3a 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -51,9 +51,6 @@
/* I2C addresses to scan */
static const unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(asb100);
-
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
@@ -209,14 +206,14 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val);
static int asb100_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int asb100_detect(struct i2c_client *client, int kind,
+static int asb100_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int asb100_remove(struct i2c_client *client);
static struct asb100_data *asb100_update_device(struct device *dev);
static void asb100_init_client(struct i2c_client *client);
static const struct i2c_device_id asb100_id[] = {
- { "asb100", asb100 },
+ { "asb100", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, asb100_id);
@@ -230,7 +227,7 @@ static struct i2c_driver asb100_driver = {
.remove = asb100_remove,
.id_table = asb100_id,
.detect = asb100_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* 7 Voltages */
@@ -697,7 +694,7 @@ ERROR_SC_2:
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int asb100_detect(struct i2c_client *client, int kind,
+static int asb100_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index d6b490d3e36..94cadc19f0c 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -44,17 +44,14 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
-I2C_CLIENT_INSMOD_1(atxp1);
-
static int atxp1_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int atxp1_remove(struct i2c_client *client);
static struct atxp1_data * atxp1_update_device(struct device *dev);
-static int atxp1_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info);
static const struct i2c_device_id atxp1_id[] = {
- { "atxp1", atxp1 },
+ { "atxp1", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, atxp1_id);
@@ -68,7 +65,7 @@ static struct i2c_driver atxp1_driver = {
.remove = atxp1_remove,
.id_table = atxp1_id,
.detect = atxp1_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
struct atxp1_data {
@@ -275,7 +272,7 @@ static const struct attribute_group atxp1_group = {
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int atxp1_detect(struct i2c_client *new_client, int kind,
+static int atxp1_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 4377bb0cc52..823dd28a902 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -57,11 +57,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
/* Addresses to scan */
static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(dme1737, sch5027);
-
-/* ISA chip types */
-enum isa_chips { sch311x = sch5027 + 1 };
+enum chips { dme1737, sch5027, sch311x };
/* ---------------------------------------------------------------------
* Registers
@@ -2208,7 +2204,7 @@ exit:
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int dme1737_i2c_detect(struct i2c_client *client, int kind,
+static int dme1737_i2c_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -2318,7 +2314,7 @@ static struct i2c_driver dme1737_i2c_driver = {
.remove = dme1737_i2c_remove,
.id_table = dme1737_id,
.detect = dme1737_i2c_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* ---------------------------------------------------------------------
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 2a4c6a05b14..e11363467a8 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -38,7 +38,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(ds1621);
static int polarity = -1;
module_param(polarity, int, 0);
MODULE_PARM_DESC(polarity, "Output's polarity: 0 = active high, 1 = active low");
@@ -224,7 +223,7 @@ static const struct attribute_group ds1621_group = {
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int ds1621_detect(struct i2c_client *client, int kind,
+static int ds1621_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -305,8 +304,8 @@ static int ds1621_remove(struct i2c_client *client)
}
static const struct i2c_device_id ds1621_id[] = {
- { "ds1621", ds1621 },
- { "ds1625", ds1621 },
+ { "ds1621", 0 },
+ { "ds1625", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1621_id);
@@ -321,7 +320,7 @@ static struct i2c_driver ds1621_driver = {
.remove = ds1621_remove,
.id_table = ds1621_id,
.detect = ds1621_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init ds1621_init(void)
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 40dfbcd3f3f..277398f9c93 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -39,8 +39,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(f75373, f75375);
+enum chips { f75373, f75375 };
/* Fintek F75375 registers */
#define F75375_REG_CONFIG0 0x0
@@ -113,7 +112,7 @@ struct f75375_data {
s8 temp_max_hyst[2];
};
-static int f75375_detect(struct i2c_client *client, int kind,
+static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int f75375_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -135,7 +134,7 @@ static struct i2c_driver f75375_driver = {
.remove = f75375_remove,
.id_table = f75375_id,
.detect = f75375_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static inline int f75375_read8(struct i2c_client *client, u8 reg)
@@ -677,7 +676,7 @@ static int f75375_remove(struct i2c_client *client)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int f75375_detect(struct i2c_client *client, int kind,
+static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 281829cd153..bd0fc67e804 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -56,7 +56,8 @@ static int nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-I2C_CLIENT_INSMOD_7(fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl);
+
+enum chips { fscpos, fscher, fscscy, fschrc, fschmd, fschds, fscsyl };
/*
* The FSCHMD registers and other defines
@@ -221,7 +222,7 @@ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 };
static int fschmd_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int fschmd_detect(struct i2c_client *client, int kind,
+static int fschmd_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int fschmd_remove(struct i2c_client *client);
static struct fschmd_data *fschmd_update_device(struct device *dev);
@@ -251,7 +252,7 @@ static struct i2c_driver fschmd_driver = {
.remove = fschmd_remove,
.id_table = fschmd_id,
.detect = fschmd_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -1000,7 +1001,7 @@ static void fschmd_dmi_decode(const struct dmi_header *header, void *dummy)
}
}
-static int fschmd_detect(struct i2c_client *client, int _kind,
+static int fschmd_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
enum chips kind;
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 1d69458aa0b..e7ae5743e18 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -46,8 +46,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(gl518sm_r00, gl518sm_r80);
+enum chips { gl518sm_r00, gl518sm_r80 };
/* Many GL518 constants specified below */
@@ -139,8 +138,7 @@ struct gl518_data {
static int gl518_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int gl518_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info);
static void gl518_init_client(struct i2c_client *client);
static int gl518_remove(struct i2c_client *client);
static int gl518_read_value(struct i2c_client *client, u8 reg);
@@ -163,7 +161,7 @@ static struct i2c_driver gl518_driver = {
.remove = gl518_remove,
.id_table = gl518_id,
.detect = gl518_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -484,8 +482,7 @@ static const struct attribute_group gl518_group_r80 = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int gl518_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int rev;
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 92b5720ceaf..ec588026f0a 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -41,9 +41,6 @@ MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=tempe
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(gl520sm);
-
/* Many GL520 constants specified below
One of the inputs can be configured as either temp or voltage.
That's why _TEMP2 and _IN4 access the same register
@@ -81,8 +78,7 @@ static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 };
static int gl520_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int gl520_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info);
static void gl520_init_client(struct i2c_client *client);
static int gl520_remove(struct i2c_client *client);
static int gl520_read_value(struct i2c_client *client, u8 reg);
@@ -91,7 +87,7 @@ static struct gl520_data *gl520_update_device(struct device *dev);
/* Driver data */
static const struct i2c_device_id gl520_id[] = {
- { "gl520sm", gl520sm },
+ { "gl520sm", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, gl520_id);
@@ -105,7 +101,7 @@ static struct i2c_driver gl520_driver = {
.remove = gl520_remove,
.id_table = gl520_id,
.detect = gl520_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* Client data */
@@ -681,8 +677,7 @@ static const struct attribute_group gl520_group_opt = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int gl520_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
new file mode 100644
index 00000000000..d8a26d16d94
--- /dev/null
+++ b/drivers/hwmon/k10temp.c
@@ -0,0 +1,197 @@
+/*
+ * k10temp.c - AMD Family 10h/11h processor hardware monitoring
+ *
+ * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
+ *
+ *
+ * This driver is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this driver; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <asm/processor.h>
+
+MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_LICENSE("GPL");
+
+static bool force;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
+
+#define REG_HARDWARE_THERMAL_CONTROL 0x64
+#define HTC_ENABLE 0x00000001
+
+#define REG_REPORTED_TEMPERATURE 0xa4
+
+#define REG_NORTHBRIDGE_CAPABILITIES 0xe8
+#define NB_CAP_HTC 0x00000400
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 regval;
+
+ pci_read_config_dword(to_pci_dev(dev),
+ REG_REPORTED_TEMPERATURE, &regval);
+ return sprintf(buf, "%u\n", (regval >> 21) * 125);
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", 70 * 1000);
+}
+
+static ssize_t show_temp_crit(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ int show_hyst = attr->index;
+ u32 regval;
+ int value;
+
+ pci_read_config_dword(to_pci_dev(dev),
+ REG_HARDWARE_THERMAL_CONTROL, &regval);
+ value = ((regval >> 16) & 0x7f) * 500 + 52000;
+ if (show_hyst)
+ value -= ((regval >> 24) & 0xf) * 500;
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "k10temp\n");
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static bool __devinit has_erratum_319(void)
+{
+ /*
+ * Erratum 319: The thermal sensor of older Family 10h processors
+ * (B steppings) may be unreliable.
+ */
+ return boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model <= 2;
+}
+
+static int __devinit k10temp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *hwmon_dev;
+ u32 reg_caps, reg_htc;
+ int err;
+
+ if (has_erratum_319() && !force) {
+ dev_err(&pdev->dev,
+ "unreliable CPU thermal sensor; monitoring disabled\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_temp1_input);
+ if (err)
+ goto exit;
+ err = device_create_file(&pdev->dev, &dev_attr_temp1_max);
+ if (err)
+ goto exit_remove;
+
+ pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, &reg_caps);
+ pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, &reg_htc);
+ if ((reg_caps & NB_CAP_HTC) && (reg_htc & HTC_ENABLE)) {
+ err = device_create_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit.dev_attr);
+ if (err)
+ goto exit_remove;
+ err = device_create_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_name);
+ if (err)
+ goto exit_remove;
+
+ hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto exit_remove;
+ }
+ dev_set_drvdata(&pdev->dev, hwmon_dev);
+
+ if (has_erratum_319() && force)
+ dev_warn(&pdev->dev,
+ "unreliable CPU thermal sensor; check erratum 319\n");
+ return 0;
+
+exit_remove:
+ device_remove_file(&pdev->dev, &dev_attr_name);
+ device_remove_file(&pdev->dev, &dev_attr_temp1_input);
+ device_remove_file(&pdev->dev, &dev_attr_temp1_max);
+ device_remove_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit.dev_attr);
+ device_remove_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr);
+exit:
+ return err;
+}
+
+static void __devexit k10temp_remove(struct pci_dev *pdev)
+{
+ hwmon_device_unregister(dev_get_drvdata(&pdev->dev));
+ device_remove_file(&pdev->dev, &dev_attr_name);
+ device_remove_file(&pdev->dev, &dev_attr_temp1_input);
+ device_remove_file(&pdev->dev, &dev_attr_temp1_max);
+ device_remove_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit.dev_attr);
+ device_remove_file(&pdev->dev,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr);
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static struct pci_device_id k10temp_id_table[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, k10temp_id_table);
+
+static struct pci_driver k10temp_driver = {
+ .name = "k10temp",
+ .id_table = k10temp_id_table,
+ .probe = k10temp_probe,
+ .remove = __devexit_p(k10temp_remove),
+};
+
+static int __init k10temp_init(void)
+{
+ return pci_register_driver(&k10temp_driver);
+}
+
+static void __exit k10temp_exit(void)
+{
+ pci_unregister_driver(&k10temp_driver);
+}
+
+module_init(k10temp_init)
+module_exit(k10temp_exit)
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index cf5afb9a10a..b2f2277cad3 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -43,13 +43,30 @@
#define MDPS_POLL_INTERVAL 50
/*
* The sensor can also generate interrupts (DRDY) but it's pretty pointless
- * because their are generated even if the data do not change. So it's better
+ * because they are generated even if the data do not change. So it's better
* to keep the interrupt for the free-fall event. The values are updated at
* 40Hz (at the lowest frequency), but as it can be pretty time consuming on
* some low processor, we poll the sensor only at 20Hz... enough for the
* joystick.
*/
+#define LIS3_PWRON_DELAY_WAI_12B (5000)
+#define LIS3_PWRON_DELAY_WAI_8B (3000)
+
+/*
+ * LIS3LV02D spec says 1024 LSBs corresponds 1 G -> 1LSB is 1000/1024 mG
+ * LIS302D spec says: 18 mG / digit
+ * LIS3_ACCURACY is used to increase accuracy of the intermediate
+ * calculation results.
+ */
+#define LIS3_ACCURACY 1024
+/* Sensitivity values for -2G +2G scale */
+#define LIS3_SENSITIVITY_12B ((LIS3_ACCURACY * 1000) / 1024)
+#define LIS3_SENSITIVITY_8B (18 * LIS3_ACCURACY)
+
+#define LIS3_DEFAULT_FUZZ 3
+#define LIS3_DEFAULT_FLAT 3
+
struct lis3lv02d lis3_dev = {
.misc_wait = __WAIT_QUEUE_HEAD_INITIALIZER(lis3_dev.misc_wait),
};
@@ -65,7 +82,7 @@ static s16 lis3lv02d_read_8(struct lis3lv02d *lis3, int reg)
return lo;
}
-static s16 lis3lv02d_read_16(struct lis3lv02d *lis3, int reg)
+static s16 lis3lv02d_read_12(struct lis3lv02d *lis3, int reg)
{
u8 lo, hi;
@@ -102,16 +119,106 @@ static inline int lis3lv02d_get_axis(s8 axis, int hw_values[3])
static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
{
int position[3];
+ int i;
+ mutex_lock(&lis3->mutex);
position[0] = lis3->read_data(lis3, OUTX);
position[1] = lis3->read_data(lis3, OUTY);
position[2] = lis3->read_data(lis3, OUTZ);
+ mutex_unlock(&lis3->mutex);
+
+ for (i = 0; i < 3; i++)
+ position[i] = (position[i] * lis3->scale) / LIS3_ACCURACY;
*x = lis3lv02d_get_axis(lis3->ac.x, position);
*y = lis3lv02d_get_axis(lis3->ac.y, position);
*z = lis3lv02d_get_axis(lis3->ac.z, position);
}
+/* conversion btw sampling rate and the register values */
+static int lis3_12_rates[4] = {40, 160, 640, 2560};
+static int lis3_8_rates[2] = {100, 400};
+
+/* ODR is Output Data Rate */
+static int lis3lv02d_get_odr(void)
+{
+ u8 ctrl;
+ int shift;
+
+ lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
+ ctrl &= lis3_dev.odr_mask;
+ shift = ffs(lis3_dev.odr_mask) - 1;
+ return lis3_dev.odrs[(ctrl >> shift)];
+}
+
+static int lis3lv02d_set_odr(int rate)
+{
+ u8 ctrl;
+ int i, len, shift;
+
+ lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
+ ctrl &= ~lis3_dev.odr_mask;
+ len = 1 << hweight_long(lis3_dev.odr_mask); /* # of possible values */
+ shift = ffs(lis3_dev.odr_mask) - 1;
+
+ for (i = 0; i < len; i++)
+ if (lis3_dev.odrs[i] == rate) {
+ lis3_dev.write(&lis3_dev, CTRL_REG1,
+ ctrl | (i << shift));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int lis3lv02d_selftest(struct lis3lv02d *lis3, s16 results[3])
+{
+ u8 reg;
+ s16 x, y, z;
+ u8 selftest;
+ int ret;
+
+ mutex_lock(&lis3->mutex);
+ if (lis3_dev.whoami == WAI_12B)
+ selftest = CTRL1_ST;
+ else
+ selftest = CTRL1_STP;
+
+ lis3->read(lis3, CTRL_REG1, &reg);
+ lis3->write(lis3, CTRL_REG1, (reg | selftest));
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+
+ /* Read directly to avoid axis remap */
+ x = lis3->read_data(lis3, OUTX);
+ y = lis3->read_data(lis3, OUTY);
+ z = lis3->read_data(lis3, OUTZ);
+
+ /* back to normal settings */
+ lis3->write(lis3, CTRL_REG1, reg);
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+
+ results[0] = x - lis3->read_data(lis3, OUTX);
+ results[1] = y - lis3->read_data(lis3, OUTY);
+ results[2] = z - lis3->read_data(lis3, OUTZ);
+
+ ret = 0;
+ if (lis3->pdata) {
+ int i;
+ for (i = 0; i < 3; i++) {
+ /* Check against selftest acceptance limits */
+ if ((results[i] < lis3->pdata->st_min_limits[i]) ||
+ (results[i] > lis3->pdata->st_max_limits[i])) {
+ ret = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ /* test passed */
+fail:
+ mutex_unlock(&lis3->mutex);
+ return ret;
+}
+
void lis3lv02d_poweroff(struct lis3lv02d *lis3)
{
/* disable X,Y,Z axis and power down */
@@ -125,14 +232,19 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3)
lis3->init(lis3);
+ /* LIS3 power on delay is quite long */
+ msleep(lis3->pwron_delay / lis3lv02d_get_odr());
+
/*
* Common configuration
- * BDU: LSB and MSB values are not updated until both have been read.
- * So the value read will always be correct.
+ * BDU: (12 bits sensors only) LSB and MSB values are not updated until
+ * both have been read. So the value read will always be correct.
*/
- lis3->read(lis3, CTRL_REG2, &reg);
- reg |= CTRL2_BDU;
- lis3->write(lis3, CTRL_REG2, reg);
+ if (lis3->whoami == WAI_12B) {
+ lis3->read(lis3, CTRL_REG2, &reg);
+ reg |= CTRL2_BDU;
+ lis3->write(lis3, CTRL_REG2, reg);
+ }
}
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
@@ -273,22 +385,17 @@ static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
int x, y, z;
lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x - lis3_dev.xcalib);
- input_report_abs(pidev->input, ABS_Y, y - lis3_dev.ycalib);
- input_report_abs(pidev->input, ABS_Z, z - lis3_dev.zcalib);
-}
-
-
-static inline void lis3lv02d_calibrate_joystick(void)
-{
- lis3lv02d_get_xyz(&lis3_dev,
- &lis3_dev.xcalib, &lis3_dev.ycalib, &lis3_dev.zcalib);
+ input_report_abs(pidev->input, ABS_X, x);
+ input_report_abs(pidev->input, ABS_Y, y);
+ input_report_abs(pidev->input, ABS_Z, z);
+ input_sync(pidev->input);
}
int lis3lv02d_joystick_enable(void)
{
struct input_dev *input_dev;
int err;
+ int max_val, fuzz, flat;
if (lis3_dev.idev)
return -EINVAL;
@@ -301,8 +408,6 @@ int lis3lv02d_joystick_enable(void)
lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
input_dev = lis3_dev.idev->input;
- lis3lv02d_calibrate_joystick();
-
input_dev->name = "ST LIS3LV02DL Accelerometer";
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
@@ -310,9 +415,12 @@ int lis3lv02d_joystick_enable(void)
input_dev->dev.parent = &lis3_dev.pdev->dev;
set_bit(EV_ABS, input_dev->evbit);
- input_set_abs_params(input_dev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
- input_set_abs_params(input_dev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
- input_set_abs_params(input_dev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
+ max_val = (lis3_dev.mdps_max_val * lis3_dev.scale) / LIS3_ACCURACY;
+ fuzz = (LIS3_DEFAULT_FUZZ * lis3_dev.scale) / LIS3_ACCURACY;
+ flat = (LIS3_DEFAULT_FLAT * lis3_dev.scale) / LIS3_ACCURACY;
+ input_set_abs_params(input_dev, ABS_X, -max_val, max_val, fuzz, flat);
+ input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
+ input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
err = input_register_polled_device(lis3_dev.idev);
if (err) {
@@ -332,11 +440,23 @@ void lis3lv02d_joystick_disable(void)
if (lis3_dev.irq)
misc_deregister(&lis3lv02d_misc_device);
input_unregister_polled_device(lis3_dev.idev);
+ input_free_polled_device(lis3_dev.idev);
lis3_dev.idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
/* Sysfs stuff */
+static ssize_t lis3lv02d_selftest_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int result;
+ s16 values[3];
+
+ result = lis3lv02d_selftest(&lis3_dev, values);
+ return sprintf(buf, "%s %d %d %d\n", result == 0 ? "OK" : "FAIL",
+ values[0], values[1], values[2]);
+}
+
static ssize_t lis3lv02d_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -346,41 +466,35 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
}
-static ssize_t lis3lv02d_calibrate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t lis3lv02d_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "(%d,%d,%d)\n", lis3_dev.xcalib, lis3_dev.ycalib, lis3_dev.zcalib);
+ return sprintf(buf, "%d\n", lis3lv02d_get_odr());
}
-static ssize_t lis3lv02d_calibrate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t lis3lv02d_rate_set(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
- lis3lv02d_calibrate_joystick();
- return count;
-}
+ unsigned long rate;
-/* conversion btw sampling rate and the register values */
-static int lis3lv02dl_df_val[4] = {40, 160, 640, 2560};
-static ssize_t lis3lv02d_rate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 ctrl;
- int val;
+ if (strict_strtoul(buf, 0, &rate))
+ return -EINVAL;
- lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
- val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
- return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
+ if (lis3lv02d_set_odr(rate))
+ return -EINVAL;
+
+ return count;
}
+static DEVICE_ATTR(selftest, S_IRUSR, lis3lv02d_selftest_show, NULL);
static DEVICE_ATTR(position, S_IRUGO, lis3lv02d_position_show, NULL);
-static DEVICE_ATTR(calibrate, S_IRUGO|S_IWUSR, lis3lv02d_calibrate_show,
- lis3lv02d_calibrate_store);
-static DEVICE_ATTR(rate, S_IRUGO, lis3lv02d_rate_show, NULL);
+static DEVICE_ATTR(rate, S_IRUGO | S_IWUSR, lis3lv02d_rate_show,
+ lis3lv02d_rate_set);
static struct attribute *lis3lv02d_attributes[] = {
+ &dev_attr_selftest.attr,
&dev_attr_position.attr,
- &dev_attr_calibrate.attr,
&dev_attr_rate.attr,
NULL
};
@@ -409,22 +523,30 @@ EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
/*
* Initialise the accelerometer and the various subsystems.
- * Should be rather independant of the bus system.
+ * Should be rather independent of the bus system.
*/
int lis3lv02d_init_device(struct lis3lv02d *dev)
{
dev->whoami = lis3lv02d_read_8(dev, WHO_AM_I);
switch (dev->whoami) {
- case LIS_DOUBLE_ID:
- printk(KERN_INFO DRIVER_NAME ": 2-byte sensor found\n");
- dev->read_data = lis3lv02d_read_16;
+ case WAI_12B:
+ printk(KERN_INFO DRIVER_NAME ": 12 bits sensor found\n");
+ dev->read_data = lis3lv02d_read_12;
dev->mdps_max_val = 2048;
+ dev->pwron_delay = LIS3_PWRON_DELAY_WAI_12B;
+ dev->odrs = lis3_12_rates;
+ dev->odr_mask = CTRL1_DF0 | CTRL1_DF1;
+ dev->scale = LIS3_SENSITIVITY_12B;
break;
- case LIS_SINGLE_ID:
- printk(KERN_INFO DRIVER_NAME ": 1-byte sensor found\n");
+ case WAI_8B:
+ printk(KERN_INFO DRIVER_NAME ": 8 bits sensor found\n");
dev->read_data = lis3lv02d_read_8;
dev->mdps_max_val = 128;
+ dev->pwron_delay = LIS3_PWRON_DELAY_WAI_8B;
+ dev->odrs = lis3_8_rates;
+ dev->odr_mask = CTRL1_DR;
+ dev->scale = LIS3_SENSITIVITY_8B;
break;
default:
printk(KERN_ERR DRIVER_NAME
@@ -432,6 +554,8 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
return -EINVAL;
}
+ mutex_init(&dev->mutex);
+
lis3lv02d_add_fs(dev);
lis3lv02d_poweron(dev);
@@ -443,7 +567,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
if (dev->pdata) {
struct lis3lv02d_platform_data *p = dev->pdata;
- if (p->click_flags && (dev->whoami == LIS_SINGLE_ID)) {
+ if (p->click_flags && (dev->whoami == WAI_8B)) {
dev->write(dev, CLICK_CFG, p->click_flags);
dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
dev->write(dev, CLICK_LATENCY, p->click_latency);
@@ -454,7 +578,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
(p->click_thresh_y << 4));
}
- if (p->wakeup_flags && (dev->whoami == LIS_SINGLE_ID)) {
+ if (p->wakeup_flags && (dev->whoami == WAI_8B)) {
dev->write(dev, FF_WU_CFG_1, p->wakeup_flags);
dev->write(dev, FF_WU_THS_1, p->wakeup_thresh & 0x7f);
/* default to 2.5ms for now */
@@ -484,4 +608,3 @@ EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
MODULE_DESCRIPTION("ST LIS3LV02Dx three-axis digital accelerometer driver");
MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index 3e1ff46f72d..e6a01f44709 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -2,7 +2,7 @@
* lis3lv02d.h - ST LIS3LV02DL accelerometer driver
*
* Copyright (C) 2007-2008 Yan Burman
- * Copyright (C) 2008 Eric Piel
+ * Copyright (C) 2008-2009 Eric Piel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,20 +22,18 @@
#include <linux/input-polldev.h>
/*
- * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to
- * be connected via SPI. There exists also several similar chips (such as LIS302DL or
- * LIS3L02DQ) and they have slightly different registers, but we can provide a
- * common interface for all of them.
- * They can also be connected via I²C.
+ * This driver tries to support the "digital" accelerometer chips from
+ * STMicroelectronics such as LIS3LV02DL, LIS302DL, LIS3L02DQ, LIS331DL,
+ * LIS35DE, or LIS202DL. They are very similar in terms of programming, with
+ * almost the same registers. In addition to differing on physical properties,
+ * they differ on the number of axes (2/3), precision (8/12 bits), and special
+ * features (freefall detection, click...). Unfortunately, not all the
+ * differences can be probed via a register.
+ * They can be connected either via I²C or SPI.
*/
#include <linux/lis3lv02d.h>
-/* 2-byte registers */
-#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */
-/* 1-byte registers */
-#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */
-
enum lis3_reg {
WHO_AM_I = 0x0F,
OFFSET_X = 0x16,
@@ -94,7 +92,13 @@ enum lis3lv02d_reg {
DD_THSE_H = 0x3F,
};
-enum lis3lv02d_ctrl1 {
+enum lis3_who_am_i {
+ WAI_12B = 0x3A, /* 12 bits: LIS3LV02D[LQ]... */
+ WAI_8B = 0x3B, /* 8 bits: LIS[23]02D[LQ]... */
+ WAI_6B = 0x52, /* 6 bits: LIS331DLF - not supported */
+};
+
+enum lis3lv02d_ctrl1_12b {
CTRL1_Xen = 0x01,
CTRL1_Yen = 0x02,
CTRL1_Zen = 0x04,
@@ -104,6 +108,16 @@ enum lis3lv02d_ctrl1 {
CTRL1_PD0 = 0x40,
CTRL1_PD1 = 0x80,
};
+
+/* Delta to ctrl1_12b version */
+enum lis3lv02d_ctrl1_8b {
+ CTRL1_STM = 0x08,
+ CTRL1_STP = 0x10,
+ CTRL1_FS = 0x20,
+ CTRL1_PD = 0x40,
+ CTRL1_DR = 0x80,
+};
+
enum lis3lv02d_ctrl2 {
CTRL2_DAS = 0x01,
CTRL2_SIM = 0x02,
@@ -194,16 +208,20 @@ struct lis3lv02d {
int (*write) (struct lis3lv02d *lis3, int reg, u8 val);
int (*read) (struct lis3lv02d *lis3, int reg, u8 *ret);
- u8 whoami; /* 3Ah: 2-byte registries, 3Bh: 1-byte registries */
+ int *odrs; /* Supported output data rates */
+ u8 odr_mask; /* ODR bit mask */
+ u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
int mdps_max_val;
+ int pwron_delay;
+ int scale; /*
+ * relationship between 1 LBS and mG
+ * (1/1000th of earth gravity)
+ */
struct input_polled_dev *idev; /* input device */
struct platform_device *pdev; /* platform device */
atomic_t count; /* interrupt count after last read */
- int xcalib; /* calibrated null value for x */
- int ycalib; /* calibrated null value for y */
- int zcalib; /* calibrated null value for z */
struct axis_conversion ac; /* hw -> logical axis */
u32 irq; /* IRQ number */
@@ -212,6 +230,7 @@ struct lis3lv02d {
unsigned long misc_opened; /* bit0: whether the device is open */
struct lis3lv02d_platform_data *pdata; /* for passing board config */
+ struct mutex mutex; /* Serialize poll and selftest */
};
int lis3lv02d_init_device(struct lis3lv02d *lis3);
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
new file mode 100644
index 00000000000..dc1f5402c1d
--- /dev/null
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -0,0 +1,183 @@
+/*
+ * drivers/hwmon/lis3lv02d_i2c.c
+ *
+ * Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer.
+ * Driver is based on corresponding SPI driver written by Daniel Mack
+ * (lis3lv02d_spi.c (C) 2009 Daniel Mack <daniel@caiaq.de> ).
+ *
+ * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "lis3lv02d.h"
+
+#define DRV_NAME "lis3lv02d_i2c"
+
+static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value)
+{
+ struct i2c_client *c = lis3->bus_priv;
+ return i2c_smbus_write_byte_data(c, reg, value);
+}
+
+static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v)
+{
+ struct i2c_client *c = lis3->bus_priv;
+ *v = i2c_smbus_read_byte_data(c, reg);
+ return 0;
+}
+
+static int lis3_i2c_init(struct lis3lv02d *lis3)
+{
+ u8 reg;
+ int ret;
+
+ /* power up the device */
+ ret = lis3->read(lis3, CTRL_REG1, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg |= CTRL1_PD0;
+ return lis3->write(lis3, CTRL_REG1, reg);
+}
+
+/* Default axis mapping but it can be overwritten by platform data */
+static struct axis_conversion lis3lv02d_axis_map = { LIS3_DEV_X,
+ LIS3_DEV_Y,
+ LIS3_DEV_Z };
+
+static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
+
+ if (pdata) {
+ if (pdata->axis_x)
+ lis3lv02d_axis_map.x = pdata->axis_x;
+
+ if (pdata->axis_y)
+ lis3lv02d_axis_map.y = pdata->axis_y;
+
+ if (pdata->axis_z)
+ lis3lv02d_axis_map.z = pdata->axis_z;
+
+ if (pdata->setup_resources)
+ ret = pdata->setup_resources();
+
+ if (ret)
+ goto fail;
+ }
+
+ lis3_dev.pdata = pdata;
+ lis3_dev.bus_priv = client;
+ lis3_dev.init = lis3_i2c_init;
+ lis3_dev.read = lis3_i2c_read;
+ lis3_dev.write = lis3_i2c_write;
+ lis3_dev.irq = client->irq;
+ lis3_dev.ac = lis3lv02d_axis_map;
+
+ i2c_set_clientdata(client, &lis3_dev);
+ ret = lis3lv02d_init_device(&lis3_dev);
+fail:
+ return ret;
+}
+
+static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
+{
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+ struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
+
+ if (pdata && pdata->release_resources)
+ pdata->release_resources();
+
+ lis3lv02d_joystick_disable();
+ lis3lv02d_poweroff(lis3);
+
+ return lis3lv02d_remove_fs(&lis3_dev);
+}
+
+#ifdef CONFIG_PM
+static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ if (!lis3->pdata->wakeup_flags)
+ lis3lv02d_poweroff(lis3);
+ return 0;
+}
+
+static int lis3lv02d_i2c_resume(struct i2c_client *client)
+{
+ struct lis3lv02d *lis3 = i2c_get_clientdata(client);
+
+ if (!lis3->pdata->wakeup_flags)
+ lis3lv02d_poweron(lis3);
+ return 0;
+}
+
+static void lis3lv02d_i2c_shutdown(struct i2c_client *client)
+{
+ lis3lv02d_i2c_suspend(client, PMSG_SUSPEND);
+}
+#else
+#define lis3lv02d_i2c_suspend NULL
+#define lis3lv02d_i2c_resume NULL
+#define lis3lv02d_i2c_shutdown NULL
+#endif
+
+static const struct i2c_device_id lis3lv02d_id[] = {
+ {"lis3lv02d", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lis3lv02d_id);
+
+static struct i2c_driver lis3lv02d_i2c_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .suspend = lis3lv02d_i2c_suspend,
+ .shutdown = lis3lv02d_i2c_shutdown,
+ .resume = lis3lv02d_i2c_resume,
+ .probe = lis3lv02d_i2c_probe,
+ .remove = __devexit_p(lis3lv02d_i2c_remove),
+ .id_table = lis3lv02d_id,
+};
+
+static int __init lis3lv02d_init(void)
+{
+ return i2c_add_driver(&lis3lv02d_i2c_driver);
+}
+
+static void __exit lis3lv02d_exit(void)
+{
+ i2c_del_driver(&lis3lv02d_i2c_driver);
+}
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_DESCRIPTION("lis3lv02d I2C interface");
+MODULE_LICENSE("GPL");
+
+module_init(lis3lv02d_init);
+module_exit(lis3lv02d_exit);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 5da66ab04f7..bf81aff7051 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -56,12 +56,6 @@
static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(lm63);
-
-/*
* The LM63 registers
*/
@@ -134,8 +128,7 @@ static int lm63_remove(struct i2c_client *client);
static struct lm63_data *lm63_update_device(struct device *dev);
-static int lm63_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm63_init_client(struct i2c_client *client);
/*
@@ -143,7 +136,7 @@ static void lm63_init_client(struct i2c_client *client);
*/
static const struct i2c_device_id lm63_id[] = {
- { "lm63", lm63 },
+ { "lm63", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -157,7 +150,7 @@ static struct i2c_driver lm63_driver = {
.remove = lm63_remove,
.id_table = lm63_id,
.detect = lm63_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -423,7 +416,7 @@ static const struct attribute_group lm63_group_fan1 = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm63_detect(struct i2c_client *new_client, int kind,
+static int lm63_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 0bf8b2a8e9f..c5f39ba103c 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -27,9 +27,6 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
0x4d, 0x4e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm73);
-
/* LM73 registers */
#define LM73_REG_INPUT 0x00
#define LM73_REG_CONF 0x01
@@ -145,13 +142,13 @@ static int lm73_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm73_ids[] = {
- { "lm73", lm73 },
+ { "lm73", 0 },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm73_ids);
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm73_detect(struct i2c_client *new_client, int kind,
+static int lm73_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
@@ -182,7 +179,7 @@ static struct i2c_driver lm73_driver = {
.remove = lm73_remove,
.id_table = lm73_ids,
.detect = lm73_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* module glue */
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index e392548cccb..8ae2cfe2d82 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -32,15 +32,12 @@
/*
* This driver handles the LM75 and compatible digital temperature sensors.
- * Only types which are _not_ listed in I2C_CLIENT_INSMOD_*() need to be
- * listed here. We start at 9 since I2C_CLIENT_INSMOD_*() currently allow
- * definition of up to 8 chip types (plus zero).
*/
enum lm75_type { /* keep sorted in alphabetical order */
- ds1775 = 9,
+ ds1775,
ds75,
- /* lm75 -- in I2C_CLIENT_INSMOD_1() */
+ lm75,
lm75a,
max6625,
max6626,
@@ -58,9 +55,6 @@ enum lm75_type { /* keep sorted in alphabetical order */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm75);
-
/* The LM75 registers */
#define LM75_REG_CONF 0x01
@@ -234,7 +228,7 @@ static const struct i2c_device_id lm75_ids[] = {
MODULE_DEVICE_TABLE(i2c, lm75_ids);
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm75_detect(struct i2c_client *new_client, int kind,
+static int lm75_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
@@ -295,7 +289,7 @@ static struct i2c_driver lm75_driver = {
.remove = lm75_remove,
.id_table = lm75_ids,
.detect = lm75_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*-----------------------------------------------------------------------*/
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index ac067fd1948..b28a297be50 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -39,9 +39,6 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm77);
-
/* The LM77 registers */
#define LM77_REG_TEMP 0x00
#define LM77_REG_CONF 0x01
@@ -66,8 +63,7 @@ struct lm77_data {
static int lm77_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int lm77_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm77_init_client(struct i2c_client *client);
static int lm77_remove(struct i2c_client *client);
static u16 lm77_read_value(struct i2c_client *client, u8 reg);
@@ -77,7 +73,7 @@ static struct lm77_data *lm77_update_device(struct device *dev);
static const struct i2c_device_id lm77_id[] = {
- { "lm77", lm77 },
+ { "lm77", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm77_id);
@@ -92,7 +88,7 @@ static struct i2c_driver lm77_driver = {
.remove = lm77_remove,
.id_table = lm77_id,
.detect = lm77_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* straight from the datasheet */
@@ -245,7 +241,7 @@ static const struct attribute_group lm77_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm77_detect(struct i2c_client *new_client, int kind,
+static int lm77_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 5978291cebb..cadcbd90ff3 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -41,8 +41,7 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
0x2e, 0x2f, I2C_CLIENT_END };
static unsigned short isa_address = 0x290;
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(lm78, lm79);
+enum chips { lm78, lm79 };
/* Many LM78 constants specified below */
@@ -142,7 +141,7 @@ struct lm78_data {
};
-static int lm78_i2c_detect(struct i2c_client *client, int kind,
+static int lm78_i2c_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int lm78_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -173,7 +172,7 @@ static struct i2c_driver lm78_driver = {
.remove = lm78_i2c_remove,
.id_table = lm78_i2c_id,
.detect = lm78_i2c_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static struct platform_driver lm78_isa_driver = {
@@ -558,7 +557,7 @@ static int lm78_alias_detect(struct i2c_client *client, u8 chipid)
return 1;
}
-static int lm78_i2c_detect(struct i2c_client *client, int kind,
+static int lm78_i2c_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
int i;
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index bcffc189940..18a0e6c5fe8 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -35,9 +35,6 @@
static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
0x2e, 0x2f, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm80);
-
/* Many LM80 constants specified below */
/* The LM80 registers */
@@ -133,8 +130,7 @@ struct lm80_data {
static int lm80_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int lm80_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm80_init_client(struct i2c_client *client);
static int lm80_remove(struct i2c_client *client);
static struct lm80_data *lm80_update_device(struct device *dev);
@@ -146,7 +142,7 @@ static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value);
*/
static const struct i2c_device_id lm80_id[] = {
- { "lm80", lm80 },
+ { "lm80", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm80_id);
@@ -160,7 +156,7 @@ static struct i2c_driver lm80_driver = {
.remove = lm80_remove,
.id_table = lm80_id,
.detect = lm80_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -447,8 +443,7 @@ static const struct attribute_group lm80_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm80_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int i, cur;
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 08b03e6ed0b..8290476aee4 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -51,11 +51,7 @@
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_2(lm83, lm82);
+enum chips { lm83, lm82 };
/*
* The LM83 registers
@@ -118,7 +114,7 @@ static const u8 LM83_REG_W_HIGH[] = {
* Functions declaration
*/
-static int lm83_detect(struct i2c_client *new_client, int kind,
+static int lm83_detect(struct i2c_client *new_client,
struct i2c_board_info *info);
static int lm83_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -145,7 +141,7 @@ static struct i2c_driver lm83_driver = {
.remove = lm83_remove,
.id_table = lm83_id,
.detect = lm83_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -291,7 +287,7 @@ static const struct attribute_group lm83_group_opt = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm83_detect(struct i2c_client *new_client, int kind,
+static int lm83_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index d56da2e7470..b3841a61559 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -38,9 +38,11 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_7(lm85b, lm85c, adm1027, adt7463, adt7468, emc6d100,
- emc6d102);
+enum chips {
+ any_chip, lm85b, lm85c,
+ adm1027, adt7463, adt7468,
+ emc6d100, emc6d102
+};
/* The LM85 registers */
@@ -323,8 +325,7 @@ struct lm85_data {
struct lm85_zone zone[3];
};
-static int lm85_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info);
static int lm85_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int lm85_remove(struct i2c_client *client);
@@ -357,7 +358,7 @@ static struct i2c_driver lm85_driver = {
.remove = lm85_remove,
.id_table = lm85_id,
.detect = lm85_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
@@ -1156,8 +1157,7 @@ static int lm85_is_fake(struct i2c_client *client)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm85_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int address = client->addr;
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 4929b1815ee..f1e6e7512ff 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -74,11 +74,7 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_2(lm87, adm1024);
+enum chips { lm87, adm1024 };
/*
* The LM87 registers
@@ -158,7 +154,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
static int lm87_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int lm87_detect(struct i2c_client *new_client, int kind,
+static int lm87_detect(struct i2c_client *new_client,
struct i2c_board_info *info);
static void lm87_init_client(struct i2c_client *client);
static int lm87_remove(struct i2c_client *client);
@@ -184,7 +180,7 @@ static struct i2c_driver lm87_driver = {
.remove = lm87_remove,
.id_table = lm87_id,
.detect = lm87_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -662,7 +658,7 @@ static const struct attribute_group lm87_group_opt = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm87_detect(struct i2c_client *new_client, int kind,
+static int lm87_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index b7c905f50ed..7c9bdc16742 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -93,12 +93,7 @@
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680,
- max6646);
+enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646 };
/*
* The LM90 registers
@@ -152,8 +147,7 @@ I2C_CLIENT_INSMOD_8(lm90, adm1032, lm99, lm86, max6657, adt7461, max6680,
* Functions declaration
*/
-static int lm90_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info);
+static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info);
static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static void lm90_init_client(struct i2c_client *client);
@@ -192,7 +186,7 @@ static struct i2c_driver lm90_driver = {
.remove = lm90_remove,
.id_table = lm90_id,
.detect = lm90_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -656,7 +650,7 @@ static int lm90_read_reg(struct i2c_client* client, u8 reg, u8 *value)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm90_detect(struct i2c_client *new_client, int kind,
+static int lm90_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 47ac698709d..7c31e6205f8 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -54,9 +54,6 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm92);
-
/* The LM92 registers */
#define LM92_REG_CONFIG 0x01 /* 8-bit, RW */
#define LM92_REG_TEMP 0x00 /* 16-bit, RO */
@@ -319,7 +316,7 @@ static const struct attribute_group lm92_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm92_detect(struct i2c_client *new_client, int kind,
+static int lm92_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
@@ -401,7 +398,7 @@ static int lm92_remove(struct i2c_client *client)
*/
static const struct i2c_device_id lm92_id[] = {
- { "lm92", lm92 },
+ { "lm92", 0 },
/* max6635 could be added here */
{ }
};
@@ -416,7 +413,7 @@ static struct i2c_driver lm92_driver = {
.remove = lm92_remove,
.id_table = lm92_id,
.detect = lm92_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init sensors_lm92_init(void)
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 124dd7cea54..6669255aadc 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -145,7 +145,6 @@
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm93);
static int disable_block;
module_param(disable_block, bool, 0);
@@ -2501,8 +2500,7 @@ static void lm93_init_client(struct i2c_client *client)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm93_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int mfr, ver;
@@ -2603,7 +2601,7 @@ static int lm93_remove(struct i2c_client *client)
}
static const struct i2c_device_id lm93_id[] = {
- { "lm93", lm93 },
+ { "lm93", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm93_id);
@@ -2617,7 +2615,7 @@ static struct i2c_driver lm93_driver = {
.remove = lm93_remove,
.id_table = lm93_id,
.detect = lm93_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init lm93_init(void)
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 906b896cf1d..8fc8eb8cba4 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -39,9 +39,6 @@
static const unsigned short normal_i2c[] = {
0x19, 0x2a, 0x2b, I2C_CLIENT_END};
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(lm95241);
-
/* LM95241 registers */
#define LM95241_REG_R_MAN_ID 0xFE
#define LM95241_REG_R_CHIP_ID 0xFF
@@ -310,7 +307,7 @@ static const struct attribute_group lm95241_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm95241_detect(struct i2c_client *new_client, int kind,
+static int lm95241_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
@@ -446,7 +443,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
/* Driver data (common to all clients) */
static const struct i2c_device_id lm95241_id[] = {
- { "lm95241", lm95241 },
+ { "lm95241", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm95241_id);
@@ -460,7 +457,7 @@ static struct i2c_driver lm95241_driver = {
.remove = lm95241_remove,
.id_table = lm95241_id,
.detect = lm95241_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init sensors_lm95241_init(void)
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 7fcf5ff89e7..022ded09810 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -41,12 +41,6 @@ static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(max1619);
-
-/*
* The MAX1619 registers
*/
@@ -88,7 +82,7 @@ static int temp_to_reg(int val)
static int max1619_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int max1619_detect(struct i2c_client *client, int kind,
+static int max1619_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void max1619_init_client(struct i2c_client *client);
static int max1619_remove(struct i2c_client *client);
@@ -99,7 +93,7 @@ static struct max1619_data *max1619_update_device(struct device *dev);
*/
static const struct i2c_device_id max1619_id[] = {
- { "max1619", max1619 },
+ { "max1619", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max1619_id);
@@ -113,7 +107,7 @@ static struct i2c_driver max1619_driver = {
.remove = max1619_remove,
.id_table = max1619_id,
.detect = max1619_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -226,7 +220,7 @@ static const struct attribute_group max1619_group = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int max1619_detect(struct i2c_client *client, int kind,
+static int max1619_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 1da561e0cb3..a0160ee5cae 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -62,8 +62,6 @@ module_param(fan_voltage, int, S_IRUGO);
module_param(prescaler, int, S_IRUGO);
module_param(clock, int, S_IRUGO);
-I2C_CLIENT_INSMOD_1(max6650);
-
/*
* MAX 6650/6651 registers
*/
@@ -116,7 +114,7 @@ I2C_CLIENT_INSMOD_1(max6650);
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int max6650_detect(struct i2c_client *client, int kind,
+static int max6650_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int max6650_init_client(struct i2c_client *client);
static int max6650_remove(struct i2c_client *client);
@@ -127,7 +125,7 @@ static struct max6650_data *max6650_update_device(struct device *dev);
*/
static const struct i2c_device_id max6650_id[] = {
- { "max6650", max6650 },
+ { "max6650", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6650_id);
@@ -141,7 +139,7 @@ static struct i2c_driver max6650_driver = {
.remove = max6650_remove,
.id_table = max6650_id,
.detect = max6650_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -528,7 +526,7 @@ static struct attribute_group max6650_attr_grp = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int max6650_detect(struct i2c_client *client, int kind,
+static int max6650_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 1d7ffebd679..d4478794985 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -29,7 +29,6 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(pcf8591);
static int input_mode;
module_param(input_mode, int, 0);
@@ -169,7 +168,7 @@ static const struct attribute_group pcf8591_attr_group_opt = {
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int pcf8591_detect(struct i2c_client *client, int kind,
+static int pcf8591_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -299,7 +298,7 @@ static struct i2c_driver pcf8591_driver = {
.class = I2C_CLASS_HWMON, /* Nearest choice */
.detect = pcf8591_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init pcf8591_init(void)
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index ebe38b680ee..864a371f6eb 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data)
int d1 = 0;
int i;
- for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++)
+ for (i = 1; i < ARRAY_SIZE(temppoints); i++)
/* Find pointer to interpolate */
if (data->supply_uV > temppoints[i - 1].vdd) {
d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
@@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data)
const int c1 = -4;
const int c2 = 40500; /* x 10 ^ -6 */
- const int c3 = 2800; /* x10 ^ -9 */
+ const int c3 = -2800; /* x10 ^ -9 */
RHlinear = c1*1000
+ c2 * data->val_humid/1000
+ (data->val_humid * data->val_humid * c3)/1000000;
- return (temp - 25000) * (10000 + 800 * data->val_humid)
+ return (temp - 25000) * (10000 + 80 * data->val_humid)
/ 1000000 + RHlinear;
}
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 8ad50fdba00..9ca97818bd4 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -136,11 +136,11 @@ struct smsc47m1_data {
struct smsc47m1_sio_data {
enum chips type;
+ u8 activate; /* Remember initial device state */
};
-static int smsc47m1_probe(struct platform_device *pdev);
-static int __devexit smsc47m1_remove(struct platform_device *pdev);
+static int __exit smsc47m1_remove(struct platform_device *pdev);
static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
int init);
@@ -160,8 +160,7 @@ static struct platform_driver smsc47m1_driver = {
.owner = THIS_MODULE,
.name = DRVNAME,
},
- .probe = smsc47m1_probe,
- .remove = __devexit_p(smsc47m1_remove),
+ .remove = __exit_p(smsc47m1_remove),
};
static ssize_t get_fan(struct device *dev, struct device_attribute
@@ -470,24 +469,126 @@ static int __init smsc47m1_find(unsigned short *addr,
superio_select();
*addr = (superio_inb(SUPERIO_REG_BASE) << 8)
| superio_inb(SUPERIO_REG_BASE + 1);
- val = superio_inb(SUPERIO_REG_ACT);
- if (*addr == 0 || (val & 0x01) == 0) {
- pr_info(DRVNAME ": Device is disabled, will not use\n");
+ if (*addr == 0) {
+ pr_info(DRVNAME ": Device address not set, will not use\n");
superio_exit();
return -ENODEV;
}
+ /* Enable only if address is set (needed at least on the
+ * Compaq Presario S4000NX) */
+ sio_data->activate = superio_inb(SUPERIO_REG_ACT);
+ if ((sio_data->activate & 0x01) == 0) {
+ pr_info(DRVNAME ": Enabling device\n");
+ superio_outb(SUPERIO_REG_ACT, sio_data->activate | 0x01);
+ }
+
superio_exit();
return 0;
}
-static int __devinit smsc47m1_probe(struct platform_device *pdev)
+/* Restore device to its initial state */
+static void __init smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
+{
+ if ((sio_data->activate & 0x01) == 0) {
+ superio_enter();
+ superio_select();
+
+ pr_info(DRVNAME ": Disabling device\n");
+ superio_outb(SUPERIO_REG_ACT, sio_data->activate);
+
+ superio_exit();
+ }
+}
+
+#define CHECK 1
+#define REQUEST 2
+#define RELEASE 3
+
+/*
+ * This function can be used to:
+ * - test for resource conflicts with ACPI
+ * - request the resources
+ * - release the resources
+ * We only allocate the I/O ports we really need, to minimize the risk of
+ * conflicts with ACPI or with other drivers.
+ */
+static int smsc47m1_handle_resources(unsigned short address, enum chips type,
+ int action, struct device *dev)
+{
+ static const u8 ports_m1[] = {
+ /* register, region length */
+ 0x04, 1,
+ 0x33, 4,
+ 0x56, 7,
+ };
+
+ static const u8 ports_m2[] = {
+ /* register, region length */
+ 0x04, 1,
+ 0x09, 1,
+ 0x2c, 2,
+ 0x35, 4,
+ 0x56, 7,
+ 0x69, 4,
+ };
+
+ int i, ports_size, err;
+ const u8 *ports;
+
+ switch (type) {
+ case smsc47m1:
+ default:
+ ports = ports_m1;
+ ports_size = ARRAY_SIZE(ports_m1);
+ break;
+ case smsc47m2:
+ ports = ports_m2;
+ ports_size = ARRAY_SIZE(ports_m2);
+ break;
+ }
+
+ for (i = 0; i + 1 < ports_size; i += 2) {
+ unsigned short start = address + ports[i];
+ unsigned short len = ports[i + 1];
+
+ switch (action) {
+ case CHECK:
+ /* Only check for conflicts */
+ err = acpi_check_region(start, len, DRVNAME);
+ if (err)
+ return err;
+ break;
+ case REQUEST:
+ /* Request the resources */
+ if (!request_region(start, len, DRVNAME)) {
+ dev_err(dev, "Region 0x%hx-0x%hx already in "
+ "use!\n", start, start + len);
+
+ /* Undo all requests */
+ for (i -= 2; i >= 0; i -= 2)
+ release_region(address + ports[i],
+ ports[i + 1]);
+ return -EBUSY;
+ }
+ break;
+ case RELEASE:
+ /* Release the resources */
+ release_region(start, len);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int __init smsc47m1_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct smsc47m1_sio_data *sio_data = dev->platform_data;
struct smsc47m1_data *data;
struct resource *res;
- int err = 0;
+ int err;
int fan1, fan2, fan3, pwm1, pwm2, pwm3;
static const char *names[] = {
@@ -496,12 +597,10 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
};
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, SMSC_EXTENT, DRVNAME)) {
- dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
- (unsigned long)res->start,
- (unsigned long)res->end);
- return -EBUSY;
- }
+ err = smsc47m1_handle_resources(res->start, sio_data->type,
+ REQUEST, dev);
+ if (err < 0)
+ return err;
if (!(data = kzalloc(sizeof(struct smsc47m1_data), GFP_KERNEL))) {
err = -ENOMEM;
@@ -637,11 +736,11 @@ error_free:
platform_set_drvdata(pdev, NULL);
kfree(data);
error_release:
- release_region(res->start, SMSC_EXTENT);
+ smsc47m1_handle_resources(res->start, sio_data->type, RELEASE, dev);
return err;
}
-static int __devexit smsc47m1_remove(struct platform_device *pdev)
+static int __exit smsc47m1_remove(struct platform_device *pdev)
{
struct smsc47m1_data *data = platform_get_drvdata(pdev);
struct resource *res;
@@ -650,7 +749,7 @@ static int __devexit smsc47m1_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &smsc47m1_group);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, SMSC_EXTENT);
+ smsc47m1_handle_resources(res->start, data->type, RELEASE, &pdev->dev);
platform_set_drvdata(pdev, NULL);
kfree(data);
@@ -717,7 +816,7 @@ static int __init smsc47m1_device_add(unsigned short address,
};
int err;
- err = acpi_check_resource_conflict(&res);
+ err = smsc47m1_handle_resources(address, sio_data->type, CHECK, NULL);
if (err)
goto exit;
@@ -766,27 +865,29 @@ static int __init sm_smsc47m1_init(void)
if (smsc47m1_find(&address, &sio_data))
return -ENODEV;
- err = platform_driver_register(&smsc47m1_driver);
+ /* Sets global pdev as a side effect */
+ err = smsc47m1_device_add(address, &sio_data);
if (err)
goto exit;
- /* Sets global pdev as a side effect */
- err = smsc47m1_device_add(address, &sio_data);
+ err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe);
if (err)
- goto exit_driver;
+ goto exit_device;
return 0;
-exit_driver:
- platform_driver_unregister(&smsc47m1_driver);
+exit_device:
+ platform_device_unregister(pdev);
+ smsc47m1_restore(&sio_data);
exit:
return err;
}
static void __exit sm_smsc47m1_exit(void)
{
- platform_device_unregister(pdev);
platform_driver_unregister(&smsc47m1_driver);
+ smsc47m1_restore(pdev->dev.platform_data);
+ platform_device_unregister(pdev);
}
MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 4d88c045781..40b26673d87 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -36,9 +36,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(smsc47m192);
-
/* SMSC47M192 registers */
#define SMSC47M192_REG_IN(nr) ((nr)<6 ? (0x20 + (nr)) : \
(0x50 + (nr) - 6))
@@ -115,13 +112,13 @@ struct smsc47m192_data {
static int smsc47m192_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int smsc47m192_detect(struct i2c_client *client, int kind,
+static int smsc47m192_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int smsc47m192_remove(struct i2c_client *client);
static struct smsc47m192_data *smsc47m192_update_device(struct device *dev);
static const struct i2c_device_id smsc47m192_id[] = {
- { "smsc47m192", smsc47m192 },
+ { "smsc47m192", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, smsc47m192_id);
@@ -135,7 +132,7 @@ static struct i2c_driver smsc47m192_driver = {
.remove = smsc47m192_remove,
.id_table = smsc47m192_id,
.detect = smsc47m192_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* Voltages */
@@ -481,7 +478,7 @@ static void smsc47m192_init_client(struct i2c_client *client)
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int smsc47m192_detect(struct i2c_client *client, int kind,
+static int smsc47m192_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 4b793849c73..7dfb4dec4c5 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(thmc50, adm1022);
+enum chips { thmc50, adm1022 };
static unsigned short adm1022_temp3[16];
static unsigned int adm1022_temp3_num;
@@ -84,7 +84,7 @@ struct thmc50_data {
u8 alarms;
};
-static int thmc50_detect(struct i2c_client *client, int kind,
+static int thmc50_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int thmc50_probe(struct i2c_client *client,
const struct i2c_device_id *id);
@@ -108,7 +108,7 @@ static struct i2c_driver thmc50_driver = {
.remove = thmc50_remove,
.id_table = thmc50_id,
.detect = thmc50_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static ssize_t show_analog_out(struct device *dev,
@@ -286,7 +286,7 @@ static const struct attribute_group temp3_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int thmc50_detect(struct i2c_client *client, int kind,
+static int thmc50_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
unsigned company;
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ee9673467c4..a13b30e8d8d 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -42,8 +42,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_2(tmp401, tmp411);
+enum chips { tmp401, tmp411 };
/*
* The TMP401 registers, note some registers have different addresses for
@@ -98,7 +97,7 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int tmp401_detect(struct i2c_client *client, int kind,
+static int tmp401_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int tmp401_remove(struct i2c_client *client);
static struct tmp401_data *tmp401_update_device(struct device *dev);
@@ -123,7 +122,7 @@ static struct i2c_driver tmp401_driver = {
.remove = tmp401_remove,
.id_table = tmp401_id,
.detect = tmp401_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -488,7 +487,7 @@ static void tmp401_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, TMP401_CONFIG_WRITE, config);
}
-static int tmp401_detect(struct i2c_client *client, int _kind,
+static int tmp401_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
enum chips kind;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index bb5464a289c..4f7c051e2d7 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -39,8 +39,7 @@
static unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_3(tmp421, tmp422, tmp423);
+enum chips { tmp421, tmp422, tmp423 };
/* The TMP421 registers */
#define TMP421_CONFIG_REG_1 0x09
@@ -223,7 +222,7 @@ static int tmp421_init_client(struct i2c_client *client)
return 0;
}
-static int tmp421_detect(struct i2c_client *client, int _kind,
+static int tmp421_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
enum chips kind;
@@ -322,7 +321,7 @@ static struct i2c_driver tmp421_driver = {
.remove = tmp421_remove,
.id_table = tmp421_id,
.detect = tmp421_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init tmp421_init(void)
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
new file mode 100644
index 00000000000..7442cf75485
--- /dev/null
+++ b/drivers/hwmon/via-cputemp.c
@@ -0,0 +1,356 @@
+/*
+ * via-cputemp.c - Driver for VIA CPU core temperature monitoring
+ * Copyright (C) 2009 VIA Technologies, Inc.
+ *
+ * based on existing coretemp.c, which is
+ *
+ * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/hwmon.h>
+#include <linux/sysfs.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <asm/msr.h>
+#include <asm/processor.h>
+
+#define DRVNAME "via_cputemp"
+
+enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME } SHOW;
+
+/*
+ * Functions declaration
+ */
+
+struct via_cputemp_data {
+ struct device *hwmon_dev;
+ const char *name;
+ u32 id;
+ u32 msr;
+};
+
+/*
+ * Sysfs stuff
+ */
+
+static ssize_t show_name(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ int ret;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct via_cputemp_data *data = dev_get_drvdata(dev);
+
+ if (attr->index == SHOW_NAME)
+ ret = sprintf(buf, "%s\n", data->name);
+ else /* show label */
+ ret = sprintf(buf, "Core %d\n", data->id);
+ return ret;
+}
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct via_cputemp_data *data = dev_get_drvdata(dev);
+ u32 eax, edx;
+ int err;
+
+ err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
+ if (err)
+ return -EAGAIN;
+
+ return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
+ SHOW_TEMP);
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
+
+static struct attribute *via_cputemp_attributes[] = {
+ &sensor_dev_attr_name.dev_attr.attr,
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group via_cputemp_group = {
+ .attrs = via_cputemp_attributes,
+};
+
+static int __devinit via_cputemp_probe(struct platform_device *pdev)
+{
+ struct via_cputemp_data *data;
+ struct cpuinfo_x86 *c = &cpu_data(pdev->id);
+ int err;
+ u32 eax, edx;
+
+ data = kzalloc(sizeof(struct via_cputemp_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Out of memory\n");
+ goto exit;
+ }
+
+ data->id = pdev->id;
+ data->name = "via_cputemp";
+
+ switch (c->x86_model) {
+ case 0xA:
+ /* C7 A */
+ case 0xD:
+ /* C7 D */
+ data->msr = 0x1169;
+ break;
+ case 0xF:
+ /* Nano */
+ data->msr = 0x1423;
+ break;
+ default:
+ err = -ENODEV;
+ goto exit_free;
+ }
+
+ /* test if we can access the TEMPERATURE MSR */
+ err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Unable to access TEMPERATURE MSR, giving up\n");
+ goto exit_free;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &via_cputemp_group);
+ if (err)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&pdev->dev, "Class registration failed (%d)\n",
+ err);
+ goto exit_remove;
+ }
+
+ return 0;
+
+exit_remove:
+ sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
+exit_free:
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+exit:
+ return err;
+}
+
+static int __devexit via_cputemp_remove(struct platform_device *pdev)
+{
+ struct via_cputemp_data *data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+ return 0;
+}
+
+static struct platform_driver via_cputemp_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRVNAME,
+ },
+ .probe = via_cputemp_probe,
+ .remove = __devexit_p(via_cputemp_remove),
+};
+
+struct pdev_entry {
+ struct list_head list;
+ struct platform_device *pdev;
+ unsigned int cpu;
+};
+
+static LIST_HEAD(pdev_list);
+static DEFINE_MUTEX(pdev_list_mutex);
+
+static int __cpuinit via_cputemp_device_add(unsigned int cpu)
+{
+ int err;
+ struct platform_device *pdev;
+ struct pdev_entry *pdev_entry;
+
+ pdev = platform_device_alloc(DRVNAME, cpu);
+ if (!pdev) {
+ err = -ENOMEM;
+ printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+ goto exit;
+ }
+
+ pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
+ if (!pdev_entry) {
+ err = -ENOMEM;
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(pdev);
+ if (err) {
+ printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
+ err);
+ goto exit_device_free;
+ }
+
+ pdev_entry->pdev = pdev;
+ pdev_entry->cpu = cpu;
+ mutex_lock(&pdev_list_mutex);
+ list_add_tail(&pdev_entry->list, &pdev_list);
+ mutex_unlock(&pdev_list_mutex);
+
+ return 0;
+
+exit_device_free:
+ kfree(pdev_entry);
+exit_device_put:
+ platform_device_put(pdev);
+exit:
+ return err;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void via_cputemp_device_remove(unsigned int cpu)
+{
+ struct pdev_entry *p, *n;
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ if (p->cpu == cpu) {
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+ }
+ mutex_unlock(&pdev_list_mutex);
+}
+
+static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long) hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ via_cputemp_device_add(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ via_cputemp_device_remove(cpu);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block via_cputemp_cpu_notifier __refdata = {
+ .notifier_call = via_cputemp_cpu_callback,
+};
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+static int __init via_cputemp_init(void)
+{
+ int i, err;
+ struct pdev_entry *p, *n;
+
+ if (cpu_data(0).x86_vendor != X86_VENDOR_CENTAUR) {
+ printk(KERN_DEBUG DRVNAME ": Not a VIA CPU\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ err = platform_driver_register(&via_cputemp_driver);
+ if (err)
+ goto exit;
+
+ for_each_online_cpu(i) {
+ struct cpuinfo_x86 *c = &cpu_data(i);
+
+ if (c->x86 != 6)
+ continue;
+
+ if (c->x86_model < 0x0a)
+ continue;
+
+ if (c->x86_model > 0x0f) {
+ printk(KERN_WARNING DRVNAME ": Unknown CPU "
+ "model 0x%x\n", c->x86_model);
+ continue;
+ }
+
+ err = via_cputemp_device_add(i);
+ if (err)
+ goto exit_devices_unreg;
+ }
+ if (list_empty(&pdev_list)) {
+ err = -ENODEV;
+ goto exit_driver_unreg;
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ register_hotcpu_notifier(&via_cputemp_cpu_notifier);
+#endif
+ return 0;
+
+exit_devices_unreg:
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+ mutex_unlock(&pdev_list_mutex);
+exit_driver_unreg:
+ platform_driver_unregister(&via_cputemp_driver);
+exit:
+ return err;
+}
+
+static void __exit via_cputemp_exit(void)
+{
+ struct pdev_entry *p, *n;
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
+#endif
+ mutex_lock(&pdev_list_mutex);
+ list_for_each_entry_safe(p, n, &pdev_list, list) {
+ platform_device_unregister(p->pdev);
+ list_del(&p->list);
+ kfree(p);
+ }
+ mutex_unlock(&pdev_list_mutex);
+ platform_driver_unregister(&via_cputemp_driver);
+}
+
+MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
+MODULE_DESCRIPTION("VIA CPU temperature monitor");
+MODULE_LICENSE("GPL");
+
+module_init(via_cputemp_init)
+module_exit(via_cputemp_exit)
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index bb5e7874878..0dcaba9b718 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -5,6 +5,7 @@
Copyright (C) 2006 Yuan Mu (Winbond),
Rudolf Marek <r.marek@assembler.cz>
David Hubbard <david.c.hubbard@gmail.com>
+ Daniel J Blueman <daniel.blueman@gmail.com>
Shamelessly ripped from the w83627hf driver
Copyright (C) 2003 Mark Studebaker
@@ -177,12 +178,15 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
#define W83627EHF_REG_ALARM3 0x45B
/* SmartFan registers */
+#define W83627EHF_REG_FAN_STEPUP_TIME 0x0f
+#define W83627EHF_REG_FAN_STEPDOWN_TIME 0x0e
+
/* DC or PWM output fan configuration */
static const u8 W83627EHF_REG_PWM_ENABLE[] = {
0x04, /* SYS FAN0 output mode and PWM mode */
0x04, /* CPU FAN0 output mode and PWM mode */
0x12, /* AUX FAN mode */
- 0x62, /* CPU fan1 mode */
+ 0x62, /* CPU FAN1 mode */
};
static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 };
@@ -193,10 +197,12 @@ static const u8 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 };
-
/* Advanced Fan control, some values are common for all fans */
-static const u8 W83627EHF_REG_FAN_MIN_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
-static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0C, 0x0D, 0x17, 0x66 };
+static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
+static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
+static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
+static const u8 W83627EHF_REG_FAN_MAX_OUTPUT[] = { 0xff, 0x67, 0xff, 0x69 };
+static const u8 W83627EHF_REG_FAN_STEP_OUTPUT[] = { 0xff, 0x68, 0xff, 0x6a };
/*
* Conversions
@@ -295,14 +301,19 @@ struct w83627ehf_data {
u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */
u8 pwm_enable[4]; /* 1->manual
- 2->thermal cruise (also called SmartFan I) */
+ 2->thermal cruise mode (also called SmartFan I)
+ 3->fan speed cruise mode
+ 4->variable thermal cruise (also called SmartFan III) */
u8 pwm_num; /* number of pwm */
u8 pwm[4];
u8 target_temp[4];
u8 tolerance[4];
- u8 fan_min_output[4]; /* minimum fan speed */
- u8 fan_stop_time[4];
+ u8 fan_start_output[4]; /* minimum fan speed when spinning up */
+ u8 fan_stop_output[4]; /* minimum fan speed when spinning down */
+ u8 fan_stop_time[4]; /* time at minimum before disabling fan */
+ u8 fan_max_output[4]; /* maximum fan speed */
+ u8 fan_step_output[4]; /* rate of change output value */
u8 vid;
u8 vrm;
@@ -529,8 +540,10 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
& 3) + 1;
data->pwm[i] = w83627ehf_read_value(data,
W83627EHF_REG_PWM[i]);
- data->fan_min_output[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_MIN_OUTPUT[i]);
+ data->fan_start_output[i] = w83627ehf_read_value(data,
+ W83627EHF_REG_FAN_START_OUTPUT[i]);
+ data->fan_stop_output[i] = w83627ehf_read_value(data,
+ W83627EHF_REG_FAN_STOP_OUTPUT[i]);
data->fan_stop_time[i] = w83627ehf_read_value(data,
W83627EHF_REG_FAN_STOP_TIME[i]);
data->target_temp[i] =
@@ -976,7 +989,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
u32 val = simple_strtoul(buf, NULL, 10);
u16 reg;
- if (!val || (val > 2)) /* only modes 1 and 2 are supported */
+ if (!val || (val > 4))
return -EINVAL;
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
@@ -1118,7 +1131,10 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
return count; \
}
-fan_functions(fan_min_output, FAN_MIN_OUTPUT)
+fan_functions(fan_start_output, FAN_START_OUTPUT)
+fan_functions(fan_stop_output, FAN_STOP_OUTPUT)
+fan_functions(fan_max_output, FAN_MAX_OUTPUT)
+fan_functions(fan_step_output, FAN_STEP_OUTPUT)
#define fan_time_functions(reg, REG) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
@@ -1161,8 +1177,14 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static struct sensor_device_attribute sda_sf3_arrays_fan4[] = {
SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
store_fan_stop_time, 3),
- SENSOR_ATTR(pwm4_min_output, S_IWUSR | S_IRUGO, show_fan_min_output,
- store_fan_min_output, 3),
+ SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 3),
+ SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 3),
+ SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
+ store_fan_max_output, 3),
+ SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
+ store_fan_step_output, 3),
};
static struct sensor_device_attribute sda_sf3_arrays[] = {
@@ -1172,12 +1194,24 @@ static struct sensor_device_attribute sda_sf3_arrays[] = {
store_fan_stop_time, 1),
SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time,
store_fan_stop_time, 2),
- SENSOR_ATTR(pwm1_min_output, S_IWUSR | S_IRUGO, show_fan_min_output,
- store_fan_min_output, 0),
- SENSOR_ATTR(pwm2_min_output, S_IWUSR | S_IRUGO, show_fan_min_output,
- store_fan_min_output, 1),
- SENSOR_ATTR(pwm3_min_output, S_IWUSR | S_IRUGO, show_fan_min_output,
- store_fan_min_output, 2),
+ SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 0),
+ SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 1),
+ SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output,
+ store_fan_start_output, 2),
+ SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 0),
+ SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 1),
+ SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output,
+ store_fan_stop_output, 2),
+
+ /* pwm1 and pwm3 don't support max and step settings */
+ SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output,
+ store_fan_max_output, 1),
+ SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output,
+ store_fan_step_output, 1),
};
static ssize_t
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index b257c722373..38e28052307 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1135,6 +1135,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
"W83687THF",
};
+ sio_data->sioaddr = sioaddr;
superio_enter(sio_data);
val = force_id ? force_id : superio_inb(sio_data, DEVID);
switch (val) {
@@ -1177,7 +1178,6 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
}
err = 0;
- sio_data->sioaddr = sioaddr;
pr_info(DRVNAME ": Found %s chip at %#x\n",
names[sio_data->type], *addr);
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 7ab7967da0a..05f9225b6f9 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -56,9 +56,10 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
0x2e, 0x2f, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_4(w83781d, w83782d, w83783s, as99127f);
+enum chips { w83781d, w83782d, w83783s, as99127f };
+
+/* Insmod parameters */
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
@@ -1051,8 +1052,7 @@ w83781d_create_files(struct device *dev, int kind, int is_isa)
/* Return 0 if detection is successful, -ENODEV otherwise */
static int
-w83781d_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+w83781d_detect(struct i2c_client *client, struct i2c_board_info *info)
{
int val1, val2;
struct w83781d_data *isa = w83781d_data_if_isa();
@@ -1537,7 +1537,7 @@ static struct i2c_driver w83781d_driver = {
.remove = w83781d_remove,
.id_table = w83781d_ids,
.detect = w83781d_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 0410bf12c52..400a88bde27 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -52,7 +52,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(w83791d);
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
@@ -326,7 +325,7 @@ struct w83791d_data {
static int w83791d_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83791d_detect(struct i2c_client *client, int kind,
+static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83791d_remove(struct i2c_client *client);
@@ -341,7 +340,7 @@ static void w83791d_print_debug(struct w83791d_data *data, struct device *dev);
static void w83791d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83791d_id[] = {
- { "w83791d", w83791d },
+ { "w83791d", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83791d_id);
@@ -355,7 +354,7 @@ static struct i2c_driver w83791d_driver = {
.remove = w83791d_remove,
.id_table = w83791d_id,
.detect = w83791d_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/* following are the sysfs callback functions */
@@ -1259,7 +1258,7 @@ error_sc_0:
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int w83791d_detect(struct i2c_client *client, int kind,
+static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 38978851333..679718e6b01 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -50,7 +50,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(w83792d);
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
@@ -302,7 +301,7 @@ struct w83792d_data {
static int w83792d_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83792d_detect(struct i2c_client *client, int kind,
+static int w83792d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83792d_remove(struct i2c_client *client);
static struct w83792d_data *w83792d_update_device(struct device *dev);
@@ -314,7 +313,7 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev);
static void w83792d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83792d_id[] = {
- { "w83792d", w83792d },
+ { "w83792d", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83792d_id);
@@ -328,7 +327,7 @@ static struct i2c_driver w83792d_driver = {
.remove = w83792d_remove,
.id_table = w83792d_id,
.detect = w83792d_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static inline long in_count_from_reg(int nr, struct w83792d_data *data)
@@ -1263,7 +1262,7 @@ static const struct attribute_group w83792d_group = {
/* Return 0 if detection is successful, -ENODEV otherwise */
static int
-w83792d_detect(struct i2c_client *client, int kind, struct i2c_board_info *info)
+w83792d_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int val1, val2;
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 80a2191bf12..9a2022b6749 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -41,7 +41,6 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(w83793);
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
@@ -230,7 +229,7 @@ static u8 w83793_read_value(struct i2c_client *client, u16 reg);
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83793_detect(struct i2c_client *client, int kind,
+static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83793_remove(struct i2c_client *client);
static void w83793_init_client(struct i2c_client *client);
@@ -238,7 +237,7 @@ static void w83793_update_nonvolatile(struct device *dev);
static struct w83793_data *w83793_update_device(struct device *dev);
static const struct i2c_device_id w83793_id[] = {
- { "w83793", w83793 },
+ { "w83793", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83793_id);
@@ -252,7 +251,7 @@ static struct i2c_driver w83793_driver = {
.remove = w83793_remove,
.id_table = w83793_id,
.detect = w83793_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static ssize_t
@@ -1161,7 +1160,7 @@ ERROR_SC_0:
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int w83793_detect(struct i2c_client *client, int kind,
+static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
u8 tmp, bank, chip_id;
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 9b6c4c10fba..20781def65e 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -52,12 +52,6 @@
static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
/*
- * Insmod parameters
- */
-
-I2C_CLIENT_INSMOD_1(w83l785ts);
-
-/*
* The W83L785TS-S registers
* Manufacturer ID is 0x5CA3 for Winbond.
*/
@@ -83,7 +77,7 @@ I2C_CLIENT_INSMOD_1(w83l785ts);
static int w83l785ts_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83l785ts_detect(struct i2c_client *client, int kind,
+static int w83l785ts_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83l785ts_remove(struct i2c_client *client);
static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval);
@@ -94,7 +88,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
*/
static const struct i2c_device_id w83l785ts_id[] = {
- { "w83l785ts", w83l785ts },
+ { "w83l785ts", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83l785ts_id);
@@ -108,7 +102,7 @@ static struct i2c_driver w83l785ts_driver = {
.remove = w83l785ts_remove,
.id_table = w83l785ts_id,
.detect = w83l785ts_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
/*
@@ -146,7 +140,7 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, 1);
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int w83l785ts_detect(struct i2c_client *client, int kind,
+static int w83l785ts_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 27da7d2b15f..0254e181893 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -38,7 +38,6 @@
static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END };
/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(w83l786ng);
static int reset;
module_param(reset, bool, 0);
@@ -147,14 +146,14 @@ struct w83l786ng_data {
static int w83l786ng_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int w83l786ng_detect(struct i2c_client *client, int kind,
+static int w83l786ng_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83l786ng_remove(struct i2c_client *client);
static void w83l786ng_init_client(struct i2c_client *client);
static struct w83l786ng_data *w83l786ng_update_device(struct device *dev);
static const struct i2c_device_id w83l786ng_id[] = {
- { "w83l786ng", w83l786ng },
+ { "w83l786ng", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83l786ng_id);
@@ -168,7 +167,7 @@ static struct i2c_driver w83l786ng_driver = {
.remove = w83l786ng_remove,
.id_table = w83l786ng_id,
.detect = w83l786ng_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static u8
@@ -586,8 +585,7 @@ static const struct attribute_group w83l786ng_group = {
};
static int
-w83l786ng_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
u16 man_id;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 049555777f6..7647a20523a 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1155,7 +1155,7 @@ static int i2c_pxa_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops i2c_pxa_dev_pm_ops = {
+static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
.suspend_noirq = i2c_pxa_suspend_noirq,
.resume_noirq = i2c_pxa_resume_noirq,
};
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 96aafb91b69..1d8c98613fa 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -967,7 +967,7 @@ static int s3c24xx_i2c_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
+static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
.suspend_noirq = s3c24xx_i2c_suspend_noirq,
.resume = s3c24xx_i2c_resume,
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 86a9d4e8147..ccc46418ef7 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -647,7 +647,7 @@ static int sh_mobile_i2c_runtime_nop(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
+static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
.runtime_suspend = sh_mobile_i2c_runtime_nop,
.runtime_resume = sh_mobile_i2c_runtime_nop,
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 4f34823e86b..0ac2f90ab84 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -155,6 +155,35 @@ static void i2c_device_shutdown(struct device *dev)
driver->shutdown(client);
}
+#ifdef CONFIG_SUSPEND
+static int i2c_device_pm_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->suspend)
+ return 0;
+ return pm->suspend(dev);
+}
+
+static int i2c_device_pm_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->resume)
+ return 0;
+ return pm->resume(dev);
+}
+#else
+#define i2c_device_pm_suspend NULL
+#define i2c_device_pm_resume NULL
+#endif
+
static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -219,6 +248,11 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
NULL
};
+const static struct dev_pm_ops i2c_device_pm_ops = {
+ .suspend = i2c_device_pm_suspend,
+ .resume = i2c_device_pm_resume,
+};
+
struct bus_type i2c_bus_type = {
.name = "i2c",
.match = i2c_device_match,
@@ -227,6 +261,7 @@ struct bus_type i2c_bus_type = {
.shutdown = i2c_device_shutdown,
.suspend = i2c_device_suspend,
.resume = i2c_device_resume,
+ .pm = &i2c_device_pm_ops,
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
@@ -1184,7 +1219,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
/* Finally call the custom detection function */
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = addr;
- err = driver->detect(temp_client, -1, &info);
+ err = driver->detect(temp_client, &info);
if (err) {
/* -ENODEV is returned if the detection fails. We catch it
here as this isn't an error. */
@@ -1214,13 +1249,13 @@ static int i2c_detect_address(struct i2c_client *temp_client,
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
{
- const struct i2c_client_address_data *address_data;
+ const unsigned short *address_list;
struct i2c_client *temp_client;
int i, err = 0;
int adap_id = i2c_adapter_id(adapter);
- address_data = driver->address_data;
- if (!driver->detect || !address_data)
+ address_list = driver->address_list;
+ if (!driver->detect || !address_list)
return 0;
/* Set up a temporary client to help detect callback */
@@ -1235,7 +1270,7 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
/* Stop here if we can't use SMBUS_QUICK */
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_QUICK)) {
- if (address_data->normal_i2c[0] == I2C_CLIENT_END)
+ if (address_list[0] == I2C_CLIENT_END)
goto exit_free;
dev_warn(&adapter->dev, "SMBus Quick command not supported, "
@@ -1244,11 +1279,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
goto exit_free;
}
- for (i = 0; address_data->normal_i2c[i] != I2C_CLIENT_END; i += 1) {
+ for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
dev_dbg(&adapter->dev, "found normal entry for adapter %d, "
- "addr 0x%02x\n", adap_id,
- address_data->normal_i2c[i]);
- temp_client->addr = address_data->normal_i2c[i];
+ "addr 0x%02x\n", adap_id, address_list[i]);
+ temp_client->addr = address_list[i];
err = i2c_detect_address(temp_client, driver);
if (err)
goto exit_free;
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 97642a7a79c..7a4e788cab2 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -43,10 +43,7 @@
#include <asm/pmac_feature.h>
#include <asm/sections.h>
#include <asm/irq.h>
-
-#ifndef CONFIG_PPC64
#include <asm/mediabay.h>
-#endif
#define DRV_NAME "ide-pmac"
@@ -59,13 +56,14 @@ typedef struct pmac_ide_hwif {
int irq;
int kind;
int aapl_bus_id;
- unsigned mediabay : 1;
unsigned broken_dma : 1;
unsigned broken_dma_warn : 1;
struct device_node* node;
struct macio_dev *mdev;
u32 timings[4];
volatile u32 __iomem * *kauai_fcr;
+ ide_hwif_t *hwif;
+
/* Those fields are duplicating what is in hwif. We currently
* can't use the hwif ones because of some assumptions that are
* beeing done by the generic code about the kind of dma controller
@@ -854,6 +852,11 @@ sanitize_timings(pmac_ide_hwif_t *pmif)
pmif->timings[2] = pmif->timings[3] = value2;
}
+static int on_media_bay(pmac_ide_hwif_t *pmif)
+{
+ return pmif->mdev && pmif->mdev->media_bay != NULL;
+}
+
/* Suspend call back, should be called after the child devices
* have actually been suspended
*/
@@ -866,7 +869,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
disable_irq(pmif->irq);
/* The media bay will handle itself just fine */
- if (pmif->mediabay)
+ if (on_media_bay(pmif))
return 0;
/* Kauai has bus control FCRs directly here */
@@ -889,7 +892,7 @@ static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
{
/* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
- if (!pmif->mediabay) {
+ if (!on_media_bay(pmif)) {
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
msleep(10);
@@ -950,13 +953,11 @@ static void pmac_ide_init_dev(ide_drive_t *drive)
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
- if (pmif->mediabay) {
-#ifdef CONFIG_PMAC_MEDIABAY
- if (check_media_bay_by_base(pmif->regbase, MB_CD) == 0) {
+ if (on_media_bay(pmif)) {
+ if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
drive->dev_flags &= ~IDE_DFLAG_NOPROBE;
return;
}
-#endif
drive->dev_flags |= IDE_DFLAG_NOPROBE;
}
}
@@ -1072,26 +1073,23 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
writel(KAUAI_FCR_UATA_MAGIC |
KAUAI_FCR_UATA_RESET_N |
KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
-
- pmif->mediabay = 0;
/* Make sure we have sane timings */
sanitize_timings(pmif);
+ /* If we are on a media bay, wait for it to settle and lock it */
+ if (pmif->mdev)
+ lock_media_bay(pmif->mdev->media_bay);
+
host = ide_host_alloc(&d, hws, 1);
- if (host == NULL)
- return -ENOMEM;
- hwif = host->ports[0];
+ if (host == NULL) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+ hwif = pmif->hwif = host->ports[0];
-#ifndef CONFIG_PPC64
- /* XXX FIXME: Media bay stuff need re-organizing */
- if (np->parent && np->parent->name
- && strcasecmp(np->parent->name, "media-bay") == 0) {
-#ifdef CONFIG_PMAC_MEDIABAY
- media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq,
- hwif);
-#endif /* CONFIG_PMAC_MEDIABAY */
- pmif->mediabay = 1;
+ if (on_media_bay(pmif)) {
+ /* Fixup bus ID for media bay */
if (!bidp)
pmif->aapl_bus_id = 1;
} else if (pmif->kind == controller_ohare) {
@@ -1100,9 +1098,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
* units, I keep the old way
*/
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
- } else
-#endif
- {
+ } else {
/* This is necessary to enable IDE when net-booting */
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
@@ -1112,17 +1108,21 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
}
printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
- "bus ID %d%s, irq %d\n", model_name[pmif->kind],
- pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
- pmif->mediabay ? " (mediabay)" : "", hw->irq);
+ "bus ID %d%s, irq %d\n", model_name[pmif->kind],
+ pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
+ on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);
rc = ide_host_register(host, &d, hws);
- if (rc) {
- ide_host_free(host);
- return rc;
- }
+ if (rc)
+ pmif->hwif = NULL;
- return 0;
+ if (pmif->mdev)
+ unlock_media_bay(pmif->mdev->media_bay);
+
+ bail:
+ if (rc && host)
+ ide_host_free(host);
+ return rc;
}
static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
@@ -1362,6 +1362,25 @@ pmac_ide_pci_resume(struct pci_dev *pdev)
return rc;
}
+#ifdef CONFIG_PMAC_MEDIABAY
+static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ pmac_ide_hwif_t *pmif =
+ (pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
+
+ switch(mb_state) {
+ case MB_CD:
+ if (!pmif->hwif->present)
+ ide_port_scan(pmif->hwif);
+ break;
+ default:
+ if (pmif->hwif->present)
+ ide_port_unregister_devices(pmif->hwif);
+ }
+}
+#endif /* CONFIG_PMAC_MEDIABAY */
+
+
static struct of_device_id pmac_ide_macio_match[] =
{
{
@@ -1386,6 +1405,9 @@ static struct macio_driver pmac_ide_macio_driver =
.probe = pmac_ide_macio_attach,
.suspend = pmac_ide_macio_suspend,
.resume = pmac_ide_macio_resume,
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = pmac_ide_macio_mb_event,
+#endif
};
static const struct pci_device_id pmac_ide_pci_match[] = {
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
index 1f20a042a4f..dd253002cd5 100644
--- a/drivers/idle/i7300_idle.c
+++ b/drivers/idle/i7300_idle.c
@@ -81,7 +81,7 @@ static u8 i7300_idle_thrtctl_saved;
static u8 i7300_idle_thrtlow_saved;
static u32 i7300_idle_mc_saved;
-static cpumask_t idle_cpumask;
+static cpumask_var_t idle_cpumask;
static ktime_t start_ktime;
static unsigned long avg_idle_us;
@@ -459,9 +459,9 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
spin_lock_irqsave(&i7300_idle_lock, flags);
if (val == IDLE_START) {
- cpu_set(smp_processor_id(), idle_cpumask);
+ cpumask_set_cpu(smp_processor_id(), idle_cpumask);
- if (cpus_weight(idle_cpumask) != num_online_cpus())
+ if (cpumask_weight(idle_cpumask) != num_online_cpus())
goto end;
now_ktime = ktime_get();
@@ -478,8 +478,8 @@ static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
i7300_idle_ioat_start();
} else if (val == IDLE_END) {
- cpu_clear(smp_processor_id(), idle_cpumask);
- if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
+ cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
+ if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
/* First CPU coming out of idle */
u64 idle_duration_us;
@@ -553,7 +553,6 @@ struct debugfs_file_info {
static int __init i7300_idle_init(void)
{
spin_lock_init(&i7300_idle_lock);
- cpus_clear(idle_cpumask);
total_us = 0;
if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
@@ -565,6 +564,9 @@ static int __init i7300_idle_init(void)
if (i7300_idle_ioat_init())
return -ENODEV;
+ if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
if (debugfs_dir) {
int i = 0;
@@ -589,6 +591,7 @@ static int __init i7300_idle_init(void)
static void __exit i7300_idle_exit(void)
{
idle_notifier_unregister(&i7300_idle_nb);
+ free_cpumask_var(idle_cpumask);
if (debugfs_dir) {
int i = 0;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index bd07803e918..abbb06996f9 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -36,7 +36,6 @@
#include <linux/mutex.h>
#include <linux/inetdevice.h>
#include <linux/workqueue.h>
-#include <linux/if_arp.h>
#include <net/arp.h>
#include <net/neighbour.h>
#include <net/route.h>
@@ -92,22 +91,12 @@ EXPORT_SYMBOL(rdma_addr_unregister_client);
int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
const unsigned char *dst_dev_addr)
{
- switch (dev->type) {
- case ARPHRD_INFINIBAND:
- dev_addr->dev_type = RDMA_NODE_IB_CA;
- break;
- case ARPHRD_ETHER:
- dev_addr->dev_type = RDMA_NODE_RNIC;
- break;
- default:
- return -EADDRNOTAVAIL;
- }
-
+ dev_addr->dev_type = dev->type;
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
if (dst_dev_addr)
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
- dev_addr->src_dev = dev;
+ dev_addr->bound_dev_if = dev->ifindex;
return 0;
}
EXPORT_SYMBOL(rdma_copy_addr);
@@ -117,6 +106,15 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
struct net_device *dev;
int ret = -EADDRNOTAVAIL;
+ if (dev_addr->bound_dev_if) {
+ dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ if (!dev)
+ return -ENODEV;
+ ret = rdma_copy_addr(dev_addr, dev, NULL);
+ dev_put(dev);
+ return ret;
+ }
+
switch (addr->sa_family) {
case AF_INET:
dev = ip_dev_find(&init_net,
@@ -131,6 +129,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
+ read_lock(&dev_base_lock);
for_each_netdev(&init_net, dev) {
if (ipv6_chk_addr(&init_net,
&((struct sockaddr_in6 *) addr)->sin6_addr,
@@ -139,6 +138,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
break;
}
}
+ read_unlock(&dev_base_lock);
break;
#endif
}
@@ -176,48 +176,9 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
-static void addr_send_arp(struct sockaddr *dst_in)
-{
- struct rtable *rt;
- struct flowi fl;
-
- memset(&fl, 0, sizeof fl);
-
- switch (dst_in->sa_family) {
- case AF_INET:
- fl.nl_u.ip4_u.daddr =
- ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
-
- if (ip_route_output_key(&init_net, &rt, &fl))
- return;
-
- neigh_event_send(rt->u.dst.neighbour, NULL);
- ip_rt_put(rt);
- break;
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- case AF_INET6:
- {
- struct dst_entry *dst;
-
- fl.nl_u.ip6_u.daddr =
- ((struct sockaddr_in6 *) dst_in)->sin6_addr;
-
- dst = ip6_route_output(&init_net, NULL, &fl);
- if (!dst)
- return;
-
- neigh_event_send(dst->neighbour, NULL);
- dst_release(dst);
- break;
- }
-#endif
- }
-}
-
-static int addr4_resolve_remote(struct sockaddr_in *src_in,
- struct sockaddr_in *dst_in,
- struct rdma_dev_addr *addr)
+static int addr4_resolve(struct sockaddr_in *src_in,
+ struct sockaddr_in *dst_in,
+ struct rdma_dev_addr *addr)
{
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
@@ -229,10 +190,22 @@ static int addr4_resolve_remote(struct sockaddr_in *src_in,
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = dst_ip;
fl.nl_u.ip4_u.saddr = src_ip;
+ fl.oif = addr->bound_dev_if;
+
ret = ip_route_output_key(&init_net, &rt, &fl);
if (ret)
goto out;
+ src_in->sin_family = AF_INET;
+ src_in->sin_addr.s_addr = rt->rt_src;
+
+ if (rt->idev->dev->flags & IFF_LOOPBACK) {
+ ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
+ if (!ret)
+ memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+ goto put;
+ }
+
/* If the device does ARP internally, return 'done' */
if (rt->idev->dev->flags & IFF_NOARP) {
rdma_copy_addr(addr, rt->idev->dev, NULL);
@@ -240,21 +213,14 @@ static int addr4_resolve_remote(struct sockaddr_in *src_in,
}
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
- if (!neigh) {
+ if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+ neigh_event_send(rt->u.dst.neighbour, NULL);
ret = -ENODATA;
+ if (neigh)
+ goto release;
goto put;
}
- if (!(neigh->nud_state & NUD_VALID)) {
- ret = -ENODATA;
- goto release;
- }
-
- if (!src_ip) {
- src_in->sin_family = dst_in->sin_family;
- src_in->sin_addr.s_addr = rt->rt_src;
- }
-
ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
release:
neigh_release(neigh);
@@ -265,52 +231,77 @@ out:
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
- struct sockaddr_in6 *dst_in,
- struct rdma_dev_addr *addr)
+static int addr6_resolve(struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in,
+ struct rdma_dev_addr *addr)
{
struct flowi fl;
struct neighbour *neigh;
struct dst_entry *dst;
- int ret = -ENODATA;
+ int ret;
memset(&fl, 0, sizeof fl);
- fl.nl_u.ip6_u.daddr = dst_in->sin6_addr;
- fl.nl_u.ip6_u.saddr = src_in->sin6_addr;
+ ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr);
+ ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr);
+ fl.oif = addr->bound_dev_if;
dst = ip6_route_output(&init_net, NULL, &fl);
- if (!dst)
- return ret;
+ if ((ret = dst->error))
+ goto put;
+
+ if (ipv6_addr_any(&fl.fl6_src)) {
+ ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
+ &fl.fl6_dst, 0, &fl.fl6_src);
+ if (ret)
+ goto put;
+
+ src_in->sin6_family = AF_INET6;
+ ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src);
+ }
+
+ if (dst->dev->flags & IFF_LOOPBACK) {
+ ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
+ if (!ret)
+ memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+ goto put;
+ }
+ /* If the device does ARP internally, return 'done' */
if (dst->dev->flags & IFF_NOARP) {
ret = rdma_copy_addr(addr, dst->dev, NULL);
- } else {
- neigh = dst->neighbour;
- if (neigh && (neigh->nud_state & NUD_VALID))
- ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
+ goto put;
+ }
+
+ neigh = dst->neighbour;
+ if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+ neigh_event_send(dst->neighbour, NULL);
+ ret = -ENODATA;
+ goto put;
}
+ ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+put:
dst_release(dst);
return ret;
}
#else
-static int addr6_resolve_remote(struct sockaddr_in6 *src_in,
- struct sockaddr_in6 *dst_in,
- struct rdma_dev_addr *addr)
+static int addr6_resolve(struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in,
+ struct rdma_dev_addr *addr)
{
return -EADDRNOTAVAIL;
}
#endif
-static int addr_resolve_remote(struct sockaddr *src_in,
- struct sockaddr *dst_in,
- struct rdma_dev_addr *addr)
+static int addr_resolve(struct sockaddr *src_in,
+ struct sockaddr *dst_in,
+ struct rdma_dev_addr *addr)
{
if (src_in->sa_family == AF_INET) {
- return addr4_resolve_remote((struct sockaddr_in *) src_in,
+ return addr4_resolve((struct sockaddr_in *) src_in,
(struct sockaddr_in *) dst_in, addr);
} else
- return addr6_resolve_remote((struct sockaddr_in6 *) src_in,
+ return addr6_resolve((struct sockaddr_in6 *) src_in,
(struct sockaddr_in6 *) dst_in, addr);
}
@@ -327,8 +318,7 @@ static void process_req(struct work_struct *work)
if (req->status == -ENODATA) {
src_in = (struct sockaddr *) &req->src_addr;
dst_in = (struct sockaddr *) &req->dst_addr;
- req->status = addr_resolve_remote(src_in, dst_in,
- req->addr);
+ req->status = addr_resolve(src_in, dst_in, req->addr);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
else if (req->status == -ENODATA)
@@ -352,82 +342,6 @@ static void process_req(struct work_struct *work)
}
}
-static int addr_resolve_local(struct sockaddr *src_in,
- struct sockaddr *dst_in,
- struct rdma_dev_addr *addr)
-{
- struct net_device *dev;
- int ret;
-
- switch (dst_in->sa_family) {
- case AF_INET:
- {
- __be32 src_ip = ((struct sockaddr_in *) src_in)->sin_addr.s_addr;
- __be32 dst_ip = ((struct sockaddr_in *) dst_in)->sin_addr.s_addr;
-
- dev = ip_dev_find(&init_net, dst_ip);
- if (!dev)
- return -EADDRNOTAVAIL;
-
- if (ipv4_is_zeronet(src_ip)) {
- src_in->sa_family = dst_in->sa_family;
- ((struct sockaddr_in *) src_in)->sin_addr.s_addr = dst_ip;
- ret = rdma_copy_addr(addr, dev, dev->dev_addr);
- } else if (ipv4_is_loopback(src_ip)) {
- ret = rdma_translate_ip(dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
- } else {
- ret = rdma_translate_ip(src_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
- }
- dev_put(dev);
- break;
- }
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- case AF_INET6:
- {
- struct in6_addr *a;
-
- for_each_netdev(&init_net, dev)
- if (ipv6_chk_addr(&init_net,
- &((struct sockaddr_in6 *) dst_in)->sin6_addr,
- dev, 1))
- break;
-
- if (!dev)
- return -EADDRNOTAVAIL;
-
- a = &((struct sockaddr_in6 *) src_in)->sin6_addr;
-
- if (ipv6_addr_any(a)) {
- src_in->sa_family = dst_in->sa_family;
- ((struct sockaddr_in6 *) src_in)->sin6_addr =
- ((struct sockaddr_in6 *) dst_in)->sin6_addr;
- ret = rdma_copy_addr(addr, dev, dev->dev_addr);
- } else if (ipv6_addr_loopback(a)) {
- ret = rdma_translate_ip(dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
- } else {
- ret = rdma_translate_ip(src_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
- }
- break;
- }
-#endif
-
- default:
- ret = -EADDRNOTAVAIL;
- break;
- }
-
- return ret;
-}
-
int rdma_resolve_ip(struct rdma_addr_client *client,
struct sockaddr *src_addr, struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
@@ -443,22 +357,28 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
if (!req)
return -ENOMEM;
- if (src_addr)
- memcpy(&req->src_addr, src_addr, ip_addr_size(src_addr));
- memcpy(&req->dst_addr, dst_addr, ip_addr_size(dst_addr));
+ src_in = (struct sockaddr *) &req->src_addr;
+ dst_in = (struct sockaddr *) &req->dst_addr;
+
+ if (src_addr) {
+ if (src_addr->sa_family != dst_addr->sa_family) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ memcpy(src_in, src_addr, ip_addr_size(src_addr));
+ } else {
+ src_in->sa_family = dst_addr->sa_family;
+ }
+
+ memcpy(dst_in, dst_addr, ip_addr_size(dst_addr));
req->addr = addr;
req->callback = callback;
req->context = context;
req->client = client;
atomic_inc(&client->refcount);
- src_in = (struct sockaddr *) &req->src_addr;
- dst_in = (struct sockaddr *) &req->dst_addr;
-
- req->status = addr_resolve_local(src_in, dst_in, addr);
- if (req->status == -EADDRNOTAVAIL)
- req->status = addr_resolve_remote(src_in, dst_in, addr);
-
+ req->status = addr_resolve(src_in, dst_in, addr);
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -467,15 +387,16 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
case -ENODATA:
req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
queue_req(req);
- addr_send_arp(dst_in);
break;
default:
ret = req->status;
atomic_dec(&client->refcount);
- kfree(req);
- break;
+ goto err;
}
return ret;
+err:
+ kfree(req);
+ return ret;
}
EXPORT_SYMBOL(rdma_resolve_ip);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 075317884b5..fbdd7310600 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -330,17 +330,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
union ib_gid gid;
int ret = -ENODEV;
- switch (rdma_node_get_transport(dev_addr->dev_type)) {
- case RDMA_TRANSPORT_IB:
- ib_addr_get_sgid(dev_addr, &gid);
- break;
- case RDMA_TRANSPORT_IWARP:
- iw_addr_get_sgid(dev_addr, &gid);
- break;
- default:
- return -ENODEV;
- }
-
+ rdma_addr_get_sgid(dev_addr, &gid);
list_for_each_entry(cma_dev, &dev_list, list) {
ret = ib_find_cached_gid(cma_dev->device, &gid,
&id_priv->id.port_num, NULL);
@@ -1032,11 +1022,17 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (rt->num_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
- ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
- ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
- &id->route.addr.dev_addr);
- if (ret)
- goto destroy_id;
+ if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
+ rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
+ rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
+ ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
+ } else {
+ ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
+ &rt->addr.dev_addr);
+ if (ret)
+ goto destroy_id;
+ }
+ rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT;
@@ -1071,10 +1067,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
cma_save_net_info(&id->route.addr, &listen_id->route.addr,
ip_ver, port, src, dst);
- ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
- &id->route.addr.dev_addr);
- if (ret)
- goto err;
+ if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) {
+ ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr,
+ &id->route.addr.dev_addr);
+ if (ret)
+ goto err;
+ }
id_priv = container_of(id, struct rdma_id_private, id);
id_priv->state = CMA_CONNECT;
@@ -1474,15 +1472,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
mutex_unlock(&lock);
}
-static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
-{
- struct sockaddr_storage addr_in;
-
- memset(&addr_in, 0, sizeof addr_in);
- addr_in.ss_family = af;
- return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
-}
-
int rdma_listen(struct rdma_cm_id *id, int backlog)
{
struct rdma_id_private *id_priv;
@@ -1490,7 +1479,8 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
id_priv = container_of(id, struct rdma_id_private, id);
if (id_priv->state == CMA_IDLE) {
- ret = cma_bind_any(id, AF_INET);
+ ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
+ ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
if (ret)
return ret;
}
@@ -1565,8 +1555,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
struct sockaddr_in6 *sin6;
memset(&path_rec, 0, sizeof path_rec);
- ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
- ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
+ rdma_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
+ rdma_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
path_rec.numb_path = 1;
path_rec.reversible = 1;
@@ -1781,7 +1771,11 @@ port_found:
if (ret)
goto out;
- ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ id_priv->id.route.addr.dev_addr.dev_type =
+ (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ?
+ ARPHRD_INFINIBAND : ARPHRD_ETHER;
+
+ rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
id_priv->id.port_num = p;
cma_attach_to_dev(id_priv, cma_dev);
@@ -1839,7 +1833,7 @@ out:
static int cma_resolve_loopback(struct rdma_id_private *id_priv)
{
struct cma_work *work;
- struct sockaddr_in *src_in, *dst_in;
+ struct sockaddr *src, *dst;
union ib_gid gid;
int ret;
@@ -1853,14 +1847,19 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
goto err;
}
- ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
- ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
+ rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) {
- src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
- dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
- src_in->sin_family = dst_in->sin_family;
- src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
+ src = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
+ if (cma_zero_addr(src)) {
+ dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
+ if ((src->sa_family = dst->sa_family) == AF_INET) {
+ ((struct sockaddr_in *) src)->sin_addr.s_addr =
+ ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+ } else {
+ ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
+ &((struct sockaddr_in6 *) dst)->sin6_addr);
+ }
}
work->id = id_priv;
@@ -1878,10 +1877,14 @@ err:
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
struct sockaddr *dst_addr)
{
- if (src_addr && src_addr->sa_family)
- return rdma_bind_addr(id, src_addr);
- else
- return cma_bind_any(id, dst_addr->sa_family);
+ if (!src_addr || !src_addr->sa_family) {
+ src_addr = (struct sockaddr *) &id->route.addr.src_addr;
+ if ((src_addr->sa_family = dst_addr->sa_family) == AF_INET6) {
+ ((struct sockaddr_in6 *) src_addr)->sin6_scope_id =
+ ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id;
+ }
+ }
+ return rdma_bind_addr(id, src_addr);
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
@@ -2077,6 +2080,25 @@ static int cma_get_port(struct rdma_id_private *id_priv)
return ret;
}
+static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
+ struct sockaddr *addr)
+{
+#if defined(CONFIG_IPv6) || defined(CONFIG_IPV6_MODULE)
+ struct sockaddr_in6 *sin6;
+
+ if (addr->sa_family != AF_INET6)
+ return 0;
+
+ sin6 = (struct sockaddr_in6 *) addr;
+ if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ !sin6->sin6_scope_id)
+ return -EINVAL;
+
+ dev_addr->bound_dev_if = sin6->sin6_scope_id;
+#endif
+ return 0;
+}
+
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv;
@@ -2089,7 +2111,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
return -EINVAL;
- if (!cma_any_addr(addr)) {
+ ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
+ if (ret)
+ goto err1;
+
+ if (cma_loopback_addr(addr)) {
+ ret = cma_bind_loopback(id_priv);
+ } else if (!cma_zero_addr(addr)) {
ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
if (ret)
goto err1;
@@ -2108,7 +2136,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return 0;
err2:
- if (!cma_any_addr(addr)) {
+ if (id_priv->cma_dev) {
mutex_lock(&lock);
cma_detach_from_dev(id_priv);
mutex_unlock(&lock);
@@ -2687,10 +2715,15 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
if (cma_any_addr(addr)) {
memset(mgid, 0, sizeof *mgid);
} else if ((addr->sa_family == AF_INET6) &&
- ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
+ ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
0xFF10A01B)) {
/* IPv6 address is an SA assigned MGID. */
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
+ } else if ((addr->sa_family == AF_INET6)) {
+ ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
+ if (id_priv->id.ps == RDMA_PS_UDP)
+ mc_map[7] = 0x01; /* Use RDMA CM signature */
+ *mgid = *(union ib_gid *) (mc_map + 4);
} else {
ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
if (id_priv->id.ps == RDMA_PS_UDP)
@@ -2716,7 +2749,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
if (id_priv->id.ps == RDMA_PS_UDP)
rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
- ib_addr_get_sgid(dev_addr, &rec.port_gid);
+ rdma_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
rec.join_state = 1;
@@ -2815,7 +2848,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
dev_addr = &id_priv->id.route.addr.dev_addr;
- if ((dev_addr->src_dev == ndev) &&
+ if ((dev_addr->bound_dev_if == ndev->ifindex) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
ndev->name, &id_priv->id);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 82543716d59..7e1ffd8ccd5 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -604,6 +604,12 @@ retry:
return ret ? ret : id;
}
+void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
+{
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
+}
+EXPORT_SYMBOL(ib_sa_unpack_path);
+
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
int status,
struct ib_sa_mad *mad)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index bb96d3c4b0f..b2e16c332d5 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -43,6 +43,7 @@
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
#include <rdma/rdma_cm.h>
+#include <rdma/rdma_cm_ib.h>
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
@@ -562,10 +563,10 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
switch (route->num_paths) {
case 0:
dev_addr = &route->addr.dev_addr;
- ib_addr_get_dgid(dev_addr,
- (union ib_gid *) &resp->ib_route[0].dgid);
- ib_addr_get_sgid(dev_addr,
- (union ib_gid *) &resp->ib_route[0].sgid);
+ rdma_addr_get_dgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].dgid);
+ rdma_addr_get_sgid(dev_addr,
+ (union ib_gid *) &resp->ib_route[0].sgid);
resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
break;
case 2:
@@ -812,6 +813,51 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
return ret;
}
+static int ucma_set_ib_path(struct ucma_context *ctx,
+ struct ib_path_rec_data *path_data, size_t optlen)
+{
+ struct ib_sa_path_rec sa_path;
+ struct rdma_cm_event event;
+ int ret;
+
+ if (optlen % sizeof(*path_data))
+ return -EINVAL;
+
+ for (; optlen; optlen -= sizeof(*path_data), path_data++) {
+ if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
+ IB_PATH_BIDIRECTIONAL))
+ break;
+ }
+
+ if (!optlen)
+ return -EINVAL;
+
+ ib_sa_unpack_path(path_data->path_rec, &sa_path);
+ ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+ if (ret)
+ return ret;
+
+ memset(&event, 0, sizeof event);
+ event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ return ucma_event_handler(ctx->cm_id, &event);
+}
+
+static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
+ void *optval, size_t optlen)
+{
+ int ret;
+
+ switch (optname) {
+ case RDMA_OPTION_IB_PATH:
+ ret = ucma_set_ib_path(ctx, optval, optlen);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
static int ucma_set_option_level(struct ucma_context *ctx, int level,
int optname, void *optval, size_t optlen)
{
@@ -821,6 +867,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
case RDMA_OPTION_ID:
ret = ucma_set_option_id(ctx, optname, optval, optlen);
break;
+ case RDMA_OPTION_IB:
+ ret = ucma_set_option_ib(ctx, optname, optval, optlen);
+ break;
default:
ret = -ENOSYS;
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 56feab6c251..112d3970222 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -285,7 +285,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
ucontext = ibdev->alloc_ucontext(ibdev, &udata);
if (IS_ERR(ucontext)) {
- ret = PTR_ERR(file->ucontext);
+ ret = PTR_ERR(ucontext);
goto err;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index aec0fbdfe7f..5f284ffd430 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -492,6 +492,7 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
int is_async, int *fd)
{
struct ib_uverbs_event_file *ev_file;
+ struct path path;
struct file *filp;
int ret;
@@ -519,8 +520,10 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
* system call on a uverbs file, which will already have a
* module reference.
*/
- filp = alloc_file(uverbs_event_mnt, dget(uverbs_event_mnt->mnt_root),
- FMODE_READ, fops_get(&uverbs_event_fops));
+ path.mnt = uverbs_event_mnt;
+ path.dentry = uverbs_event_mnt->mnt_root;
+ path_get(&path);
+ filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
if (!filp) {
ret = -ENFILE;
goto err_fd;
@@ -531,6 +534,8 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
return filp;
err_fd:
+ fops_put(&uverbs_event_fops);
+ path_put(&path);
put_unused_fd(*fd);
err:
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index a6d89440ad2..ad518868df7 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -798,8 +798,10 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
u8 actual_sge_count;
u32 msg_size;
- if (qp->state > IB_QPS_RTS)
- return -EINVAL;
+ if (qp->state > IB_QPS_RTS) {
+ err = -EINVAL;
+ goto out;
+ }
while (ib_wr) {
@@ -930,6 +932,7 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
ib_wr = ib_wr->next;
}
+out:
if (err)
*bad_wr = ib_wr;
return err;
@@ -944,8 +947,10 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
unsigned long lock_flags;
int err = 0;
- if (qp->state > IB_QPS_RTS)
- return -EINVAL;
+ if (qp->state > IB_QPS_RTS) {
+ err = -EINVAL;
+ goto out;
+ }
/*
* Try and post each work request
@@ -998,6 +1003,7 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
ib_wr = ib_wr->next;
}
+out:
if (err)
*bad_wr = ib_wr;
return err;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index bfd03bf8be5..f3d440cc68f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -34,6 +34,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/kfifo.h>
#include "t3_cpl.h"
#include "t3cdev.h"
@@ -75,13 +76,13 @@ struct cxio_hal_ctrl_qp {
};
struct cxio_hal_resource {
- struct kfifo *tpt_fifo;
+ struct kfifo tpt_fifo;
spinlock_t tpt_fifo_lock;
- struct kfifo *qpid_fifo;
+ struct kfifo qpid_fifo;
spinlock_t qpid_fifo_lock;
- struct kfifo *cqid_fifo;
+ struct kfifo cqid_fifo;
spinlock_t cqid_fifo_lock;
- struct kfifo *pdid_fifo;
+ struct kfifo pdid_fifo;
spinlock_t pdid_fifo_lock;
};
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index bd233c08765..31f9201b298 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -39,12 +39,12 @@
#include "cxio_resource.h"
#include "cxio_hal.h"
-static struct kfifo *rhdl_fifo;
+static struct kfifo rhdl_fifo;
static spinlock_t rhdl_fifo_lock;
#define RANDOM_SIZE 16
-static int __cxio_init_resource_fifo(struct kfifo **fifo,
+static int __cxio_init_resource_fifo(struct kfifo *fifo,
spinlock_t *fifo_lock,
u32 nr, u32 skip_low,
u32 skip_high,
@@ -55,12 +55,11 @@ static int __cxio_init_resource_fifo(struct kfifo **fifo,
u32 rarray[16];
spin_lock_init(fifo_lock);
- *fifo = kfifo_alloc(nr * sizeof(u32), GFP_KERNEL, fifo_lock);
- if (IS_ERR(*fifo))
+ if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
return -ENOMEM;
for (i = 0; i < skip_low + skip_high; i++)
- __kfifo_put(*fifo, (unsigned char *) &entry, sizeof(u32));
+ kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
if (random) {
j = 0;
random_bytes = random32();
@@ -72,33 +71,35 @@ static int __cxio_init_resource_fifo(struct kfifo **fifo,
random_bytes = random32();
}
idx = (random_bytes >> (j * 2)) & 0xF;
- __kfifo_put(*fifo,
+ kfifo_in(fifo,
(unsigned char *) &rarray[idx],
sizeof(u32));
rarray[idx] = i;
j++;
}
for (i = 0; i < RANDOM_SIZE; i++)
- __kfifo_put(*fifo,
+ kfifo_in(fifo,
(unsigned char *) &rarray[i],
sizeof(u32));
} else
for (i = skip_low; i < nr - skip_high; i++)
- __kfifo_put(*fifo, (unsigned char *) &i, sizeof(u32));
+ kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
for (i = 0; i < skip_low + skip_high; i++)
- kfifo_get(*fifo, (unsigned char *) &entry, sizeof(u32));
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry,
+ sizeof(u32), fifo_lock) != sizeof(u32))
+ break;
return 0;
}
-static int cxio_init_resource_fifo(struct kfifo **fifo, spinlock_t * fifo_lock,
+static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
u32 nr, u32 skip_low, u32 skip_high)
{
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
skip_high, 0));
}
-static int cxio_init_resource_fifo_random(struct kfifo **fifo,
+static int cxio_init_resource_fifo_random(struct kfifo *fifo,
spinlock_t * fifo_lock,
u32 nr, u32 skip_low, u32 skip_high)
{
@@ -113,15 +114,13 @@ static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
- rdev_p->rscp->qpid_fifo = kfifo_alloc(T3_MAX_NUM_QP * sizeof(u32),
- GFP_KERNEL,
- &rdev_p->rscp->qpid_fifo_lock);
- if (IS_ERR(rdev_p->rscp->qpid_fifo))
+ if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
+ GFP_KERNEL))
return -ENOMEM;
for (i = 16; i < T3_MAX_NUM_QP; i++)
if (!(i & rdev_p->qpmask))
- __kfifo_put(rdev_p->rscp->qpid_fifo,
+ kfifo_in(&rdev_p->rscp->qpid_fifo,
(unsigned char *) &i, sizeof(u32));
return 0;
}
@@ -134,7 +133,7 @@ int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
void cxio_hal_destroy_rhdl_resource(void)
{
- kfifo_free(rhdl_fifo);
+ kfifo_free(&rhdl_fifo);
}
/* nr_* must be power of 2 */
@@ -167,11 +166,11 @@ int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
goto pdid_err;
return 0;
pdid_err:
- kfifo_free(rscp->cqid_fifo);
+ kfifo_free(&rscp->cqid_fifo);
cqid_err:
- kfifo_free(rscp->qpid_fifo);
+ kfifo_free(&rscp->qpid_fifo);
qpid_err:
- kfifo_free(rscp->tpt_fifo);
+ kfifo_free(&rscp->tpt_fifo);
tpt_err:
return -ENOMEM;
}
@@ -179,33 +178,37 @@ tpt_err:
/*
* returns 0 if no resource available
*/
-static u32 cxio_hal_get_resource(struct kfifo *fifo)
+static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
{
u32 entry;
- if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32)))
+ if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
return entry;
else
return 0; /* fifo emptry */
}
-static void cxio_hal_put_resource(struct kfifo *fifo, u32 entry)
+static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
+ u32 entry)
{
- BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0);
+ BUG_ON(
+ kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
+ == 0);
}
u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->tpt_fifo);
+ return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
}
void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
{
- cxio_hal_put_resource(rscp->tpt_fifo, stag);
+ cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
}
u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
{
- u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
+ u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
+ &rscp->qpid_fifo_lock);
PDBG("%s qpid 0x%x\n", __func__, qpid);
return qpid;
}
@@ -213,35 +216,35 @@ u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
{
PDBG("%s qpid 0x%x\n", __func__, qpid);
- cxio_hal_put_resource(rscp->qpid_fifo, qpid);
+ cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
}
u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->cqid_fifo);
+ return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
}
void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
{
- cxio_hal_put_resource(rscp->cqid_fifo, cqid);
+ cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
}
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->pdid_fifo);
+ return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
}
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
{
- cxio_hal_put_resource(rscp->pdid_fifo, pdid);
+ cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
}
void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
{
- kfifo_free(rscp->tpt_fifo);
- kfifo_free(rscp->cqid_fifo);
- kfifo_free(rscp->qpid_fifo);
- kfifo_free(rscp->pdid_fifo);
+ kfifo_free(&rscp->tpt_fifo);
+ kfifo_free(&rscp->cqid_fifo);
+ kfifo_free(&rscp->qpid_fifo);
+ kfifo_free(&rscp->pdid_fifo);
kfree(rscp);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 1cecf98829a..3eb8cecf81d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -365,18 +365,19 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (qhp->attr.state > IWCH_QP_STATE_RTS) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
if (num_wrs <= 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
while (wr) {
if (num_wrs == 0) {
err = -ENOMEM;
- *bad_wr = wr;
break;
}
idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
@@ -428,10 +429,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wr->opcode);
err = -EINVAL;
}
- if (err) {
- *bad_wr = wr;
+ if (err)
break;
- }
wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
sqp->wr_id = wr->wr_id;
sqp->opcode = wr2opcode(t3_wr_opcode);
@@ -454,6 +453,10 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+
+out:
+ if (err)
+ *bad_wr = wr;
return err;
}
@@ -471,18 +474,19 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (qhp->attr.state > IWCH_QP_STATE_RTS) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
qhp->wq.rq_size_log2) - 1;
if (!wr) {
spin_unlock_irqrestore(&qhp->lock, flag);
- return -EINVAL;
+ err = -ENOMEM;
+ goto out;
}
while (wr) {
if (wr->num_sge > T3_MAX_SGE) {
err = -EINVAL;
- *bad_wr = wr;
break;
}
idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
@@ -494,10 +498,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
err = build_zero_stag_recv(qhp, wqe, wr);
else
err = -ENOMEM;
- if (err) {
- *bad_wr = wr;
+
+ if (err)
break;
- }
+
build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
@@ -511,6 +515,10 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
spin_unlock_irqrestore(&qhp->lock, flag);
ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+
+out:
+ if (err)
+ *bad_wr = wr;
return err;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index c825142a2fb..0136abd50dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -375,6 +375,7 @@ extern rwlock_t ehca_qp_idr_lock;
extern rwlock_t ehca_cq_idr_lock;
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;
+extern spinlock_t shca_list_lock;
extern int ehca_static_rate;
extern int ehca_port_act_time;
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
index 523e733c630..3b87589b8ea 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -169,12 +169,15 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
unsigned long flags;
u64 h_ret;
- spin_lock_irqsave(&eq->spinlock, flags);
ibmebus_free_irq(eq->ist, (void *)shca);
- h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
+ spin_lock_irqsave(&shca_list_lock, flags);
+ eq->is_initialized = 0;
+ spin_unlock_irqrestore(&shca_list_lock, flags);
- spin_unlock_irqrestore(&eq->spinlock, flags);
+ tasklet_kill(&eq->interrupt_task);
+
+ h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't free EQ resources.");
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 4b89b791be6..42be0b15084 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -826,8 +826,7 @@ static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
list_del(&cq->entry);
- __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
- smp_processor_id()));
+ __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
}
spin_unlock_irqrestore(&cct->task_lock, flags_cct);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fb2d83c5bf0..129a6bebd6e 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -123,7 +123,7 @@ DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr);
static LIST_HEAD(shca_list); /* list of all registered ehcas */
-static DEFINE_SPINLOCK(shca_list_lock);
+DEFINE_SPINLOCK(shca_list_lock);
static struct timer_list poll_eqs_timer;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 8fd88cd828f..e3ec7fdd67b 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -400,7 +400,6 @@ static inline void map_ib_wc_status(u32 cqe_status,
static inline int post_one_send(struct ehca_qp *my_qp,
struct ib_send_wr *cur_send_wr,
- struct ib_send_wr **bad_send_wr,
int hidden)
{
struct ehca_wqe *wqe_p;
@@ -412,8 +411,6 @@ static inline int post_one_send(struct ehca_qp *my_qp,
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
- if (bad_send_wr)
- *bad_send_wr = cur_send_wr;
ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -ENOMEM;
@@ -433,8 +430,6 @@ static inline int post_one_send(struct ehca_qp *my_qp,
*/
if (unlikely(ret)) {
my_qp->ipz_squeue.current_q_offset = start_offset;
- if (bad_send_wr)
- *bad_send_wr = cur_send_wr;
ehca_err(my_qp->ib_qp.device, "Could not write WQE "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -EINVAL;
@@ -448,7 +443,6 @@ int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr **bad_send_wr)
{
struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
- struct ib_send_wr *cur_send_wr;
int wqe_cnt = 0;
int ret = 0;
unsigned long flags;
@@ -457,7 +451,8 @@ int ehca_post_send(struct ib_qp *qp,
if (unlikely(my_qp->state < IB_QPS_RTS)) {
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
my_qp->state, qp->qp_num);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* LOCK the QUEUE */
@@ -476,24 +471,21 @@ int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr circ_wr;
memset(&circ_wr, 0, sizeof(circ_wr));
circ_wr.opcode = IB_WR_RDMA_READ;
- post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
+ post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
wqe_cnt++;
ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
my_qp->message_count = my_qp->packet_count = 0;
}
/* loop processes list of send reqs */
- for (cur_send_wr = send_wr; cur_send_wr != NULL;
- cur_send_wr = cur_send_wr->next) {
- ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
+ while (send_wr) {
+ ret = post_one_send(my_qp, send_wr, 0);
if (unlikely(ret)) {
- /* if one or more WQEs were successful, don't fail */
- if (wqe_cnt)
- ret = 0;
goto post_send_exit0;
}
wqe_cnt++;
- } /* eof for cur_send_wr */
+ send_wr = send_wr->next;
+ }
post_send_exit0:
iosync(); /* serialize GAL register access */
@@ -503,6 +495,10 @@ post_send_exit0:
my_qp, qp->qp_num, wqe_cnt, ret);
my_qp->message_count += wqe_cnt;
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
+
+out:
+ if (ret)
+ *bad_send_wr = send_wr;
return ret;
}
@@ -511,7 +507,6 @@ static int internal_post_recv(struct ehca_qp *my_qp,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr)
{
- struct ib_recv_wr *cur_recv_wr;
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
@@ -522,27 +517,23 @@ static int internal_post_recv(struct ehca_qp *my_qp,
if (unlikely(!HAS_RQ(my_qp))) {
ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
my_qp, my_qp->real_qp_num, my_qp->ext_type);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_r, flags);
- /* loop processes list of send reqs */
- for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
- cur_recv_wr = cur_recv_wr->next) {
+ /* loop processes list of recv reqs */
+ while (recv_wr) {
u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
/* get pointer next to free WQE */
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
- if (bad_recv_wr)
- *bad_recv_wr = cur_recv_wr;
- if (wqe_cnt == 0) {
- ret = -ENOMEM;
- ehca_err(dev, "Too many posted WQEs "
- "qp_num=%x", my_qp->real_qp_num);
- }
+ ret = -ENOMEM;
+ ehca_err(dev, "Too many posted WQEs "
+ "qp_num=%x", my_qp->real_qp_num);
goto post_recv_exit0;
}
/*
@@ -552,7 +543,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
/* write a RECV WQE into the QUEUE */
- ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr,
+ ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
rq_map_idx);
/*
* if something failed,
@@ -560,22 +551,20 @@ static int internal_post_recv(struct ehca_qp *my_qp,
*/
if (unlikely(ret)) {
my_qp->ipz_rqueue.current_q_offset = start_offset;
- *bad_recv_wr = cur_recv_wr;
- if (wqe_cnt == 0) {
- ret = -EINVAL;
- ehca_err(dev, "Could not write WQE "
- "qp_num=%x", my_qp->real_qp_num);
- }
+ ret = -EINVAL;
+ ehca_err(dev, "Could not write WQE "
+ "qp_num=%x", my_qp->real_qp_num);
goto post_recv_exit0;
}
qmap_entry = &my_qp->rq_map.map[rq_map_idx];
- qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
+ qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
qmap_entry->reported = 0;
qmap_entry->cqe_req = 1;
wqe_cnt++;
- } /* eof for cur_recv_wr */
+ recv_wr = recv_wr->next;
+ } /* eof for recv_wr */
post_recv_exit0:
iosync(); /* serialize GAL register access */
@@ -584,6 +573,11 @@ post_recv_exit0:
ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
my_qp, my_qp->real_qp_num, wqe_cnt, ret);
spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
+
+out:
+ if (ret)
+ *bad_recv_wr = recv_wr;
+
return ret;
}
@@ -597,6 +591,7 @@ int ehca_post_recv(struct ib_qp *qp,
if (unlikely(my_qp->state == IB_QPS_RESET)) {
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
my_qp->state, qp->qp_num);
+ *bad_recv_wr = recv_wr;
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 013d1380e77..d2787fe8030 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -39,6 +39,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
@@ -1697,7 +1698,7 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
unsigned len, int avail)
{
unsigned long flags;
- unsigned end, cnt = 0, next;
+ unsigned end, cnt = 0;
/* There are two bits per send buffer (busy and generation) */
start *= 2;
@@ -1748,12 +1749,7 @@ void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
if (dd->ipath_pioupd_thresh) {
end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
- next = find_first_bit(dd->ipath_pioavailkernel, end);
- while (next < end) {
- cnt++;
- next = find_next_bit(dd->ipath_pioavailkernel, end,
- next + 1);
- }
+ cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
}
spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 3cb3f47a10b..e596537ff35 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -103,7 +103,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
- if (dev->dev->caps.max_gso_sz)
+ if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
props->device_cap_flags |= IB_DEVICE_UD_TSO;
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 256a00c6aee..989555cee88 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -54,7 +54,8 @@ enum {
/*
* Largest possible UD header: send with GRH and immediate data.
*/
- MLX4_IB_UD_HEADER_SIZE = 72
+ MLX4_IB_UD_HEADER_SIZE = 72,
+ MLX4_IB_LSO_HEADER_SPARE = 128,
};
struct mlx4_ib_sqp {
@@ -67,7 +68,8 @@ struct mlx4_ib_sqp {
};
enum {
- MLX4_IB_MIN_SQ_STRIDE = 6
+ MLX4_IB_MIN_SQ_STRIDE = 6,
+ MLX4_IB_CACHE_LINE_SIZE = 64,
};
static const __be32 mlx4_ib_opcode[] = {
@@ -261,7 +263,7 @@ static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
case IB_QPT_UD:
return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_datagram_seg) +
- ((flags & MLX4_IB_QP_LSO) ? 64 : 0);
+ ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0);
case IB_QPT_UC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
@@ -897,7 +899,6 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
(to_mlx4_st(ibqp->qp_type) << 16));
- context->flags |= cpu_to_be32(1 << 8); /* DE? */
if (!(attr_mask & IB_QP_PATH_MIG_STATE))
context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
@@ -1467,16 +1468,12 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
- __be32 *lso_hdr_sz)
+ __be32 *lso_hdr_sz, __be32 *blh)
{
unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
- /*
- * This is a temporary limitation and will be removed in
- * a forthcoming FW release:
- */
- if (unlikely(halign > 64))
- return -EINVAL;
+ if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
+ *blh = cpu_to_be32(1 << 6);
if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
wr->num_sge > qp->sq.max_gs - (halign >> 4)))
@@ -1522,6 +1519,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 dummy;
__be32 *lso_wqe;
__be32 uninitialized_var(lso_hdr_sz);
+ __be32 blh;
int i;
spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1530,6 +1528,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
for (nreq = 0; wr; ++nreq, wr = wr->next) {
lso_wqe = &dummy;
+ blh = 0;
if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
err = -ENOMEM;
@@ -1616,7 +1615,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
if (wr->opcode == IB_WR_LSO) {
- err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz);
+ err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
@@ -1687,7 +1686,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
- (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
+ (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
stamp = ind + qp->sq_spare_wqes;
ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
index d449eb6ec78..846dc97cf26 100644
--- a/drivers/infiniband/hw/nes/Kconfig
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -4,14 +4,13 @@ config INFINIBAND_NES
select LIBCRC32C
select INET_LRO
---help---
- This is a low-level driver for NetEffect RDMA enabled
- Network Interface Cards (RNIC).
+ This is the RDMA Network Interface Card (RNIC) driver for
+ NetEffect Ethernet Cluster Server Adapters.
config INFINIBAND_NES_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_NES
default n
---help---
- This option causes the NetEffect RNIC driver to produce debug
- messages. Select this if you are developing the driver
- or trying to diagnose a problem.
+ This option enables debug messages from the NetEffect RNIC
+ driver. Select this if you are diagnosing a problem.
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index cbde0cfe27e..b9d09bafd6c 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -521,7 +521,8 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
spin_lock_init(&nesdev->indexed_regs_lock);
/* Remap the PCI registers in adapter BAR0 to kernel VA space */
- mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs));
+ mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0),
+ pci_resource_len(pcidev, BAR_0));
if (mmio_regs == NULL) {
printk(KERN_ERR PFX "Unable to remap BAR0\n");
ret = -EIO;
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bcc6abc4faf..98840564bb2 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 73473db1986..39468c27703 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -52,6 +52,7 @@
#include <linux/random.h>
#include <linux/list.h>
#include <linux/threads.h>
+#include <linux/highmem.h>
#include <net/arp.h>
#include <net/neighbour.h>
#include <net/route.h>
@@ -251,6 +252,33 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
mpa_frame = (struct ietf_mpa_frame *)buffer;
cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
+ /* make sure mpa private data len is less than 512 bytes */
+ if (cm_node->mpa_frame_size > IETF_MAX_PRIV_DATA_LEN) {
+ nes_debug(NES_DBG_CM, "The received Length of Private"
+ " Data field exceeds 512 octets\n");
+ return -EINVAL;
+ }
+ /*
+ * make sure MPA receiver interoperate with the
+ * received MPA version and MPA key information
+ *
+ */
+ if (mpa_frame->rev != mpa_version) {
+ nes_debug(NES_DBG_CM, "The received mpa version"
+ " can not be interoperated\n");
+ return -EINVAL;
+ }
+ if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
+ nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
+ return -EINVAL;
+ }
+ } else {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
+ nes_debug(NES_DBG_CM, "Unexpected MPA Key received \n");
+ return -EINVAL;
+ }
+ }
if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
@@ -486,6 +514,8 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
send_reset(cm_node, NULL);
break;
default:
+ add_ref_cm_node(cm_node);
+ send_reset(cm_node, NULL);
create_event(cm_node, NES_CM_EVENT_ABORTED);
}
}
@@ -949,6 +979,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
reset_entry);
{
struct nes_cm_node *loopback = cm_node->loopbackpartner;
+ enum nes_cm_node_state old_state;
if (NES_CM_STATE_FIN_WAIT1 <= cm_node->state) {
rem_ref_cm_node(cm_node->cm_core, cm_node);
} else {
@@ -960,11 +991,12 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
NES_CM_STATE_CLOSED;
WARN_ON(1);
} else {
- cm_node->state =
- NES_CM_STATE_CLOSED;
- rem_ref_cm_node(
- cm_node->cm_core,
- cm_node);
+ old_state = cm_node->state;
+ cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
+ if (old_state != NES_CM_STATE_MPAREQ_RCVD)
+ rem_ref_cm_node(
+ cm_node->cm_core,
+ cm_node);
}
} else {
struct nes_cm_event event;
@@ -980,20 +1012,9 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
loopback->loc_port;
event.cm_info.cm_id = loopback->cm_id;
cm_event_connect_error(&event);
+ cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
loopback->state = NES_CM_STATE_CLOSED;
- event.cm_node = cm_node;
- event.cm_info.rem_addr =
- cm_node->rem_addr;
- event.cm_info.loc_addr =
- cm_node->loc_addr;
- event.cm_info.rem_port =
- cm_node->rem_port;
- event.cm_info.loc_port =
- cm_node->loc_port;
- event.cm_info.cm_id = cm_node->cm_id;
- cm_event_reset(&event);
-
rem_ref_cm_node(cm_node->cm_core,
cm_node);
@@ -1077,12 +1098,13 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
/**
* nes_addr_resolve_neigh
*/
-static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
+static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex)
{
struct rtable *rt;
struct flowi fl;
struct neighbour *neigh;
- int rc = -1;
+ int rc = arpindex;
+ struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = htonl(dst_ip);
@@ -1098,6 +1120,21 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
" is %pM, Gateway is 0x%08X \n", dst_ip,
neigh->ha, ntohl(rt->rt_gateway));
+
+ if (arpindex >= 0) {
+ if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
+ neigh->ha, ETH_ALEN)){
+ /* Mac address same as in nes_arp_table */
+ neigh_release(neigh);
+ ip_rt_put(rt);
+ return rc;
+ }
+
+ nes_manage_arp_cache(nesvnic->netdev,
+ nesadapter->arp_table[arpindex].mac_addr,
+ dst_ip, NES_ARP_DELETE);
+ }
+
nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
dst_ip, NES_ARP_ADD);
rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
@@ -1113,7 +1150,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
return rc;
}
-
/**
* make_cm_node - create a new instance of a cm node
*/
@@ -1123,6 +1159,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
{
struct nes_cm_node *cm_node;
struct timespec ts;
+ int oldarpindex = 0;
int arpindex = 0;
struct nes_device *nesdev;
struct nes_adapter *nesadapter;
@@ -1176,17 +1213,18 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
nesadapter = nesdev->nesadapter;
cm_node->loopbackpartner = NULL;
+
/* get the mac addr for the remote node */
if (ipv4_is_loopback(htonl(cm_node->rem_addr)))
arpindex = nes_arp_table(nesdev, ntohl(nesvnic->local_ipaddr), NULL, NES_ARP_RESOLVE);
- else
- arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
+ else {
+ oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
+ arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr, oldarpindex);
+
+ }
if (arpindex < 0) {
- arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr);
- if (arpindex < 0) {
- kfree(cm_node);
- return NULL;
- }
+ kfree(cm_node);
+ return NULL;
}
/* copy the mac addr to node context */
@@ -1333,13 +1371,20 @@ static void handle_fin_pkt(struct nes_cm_node *cm_node)
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_SYN_SENT:
case NES_CM_STATE_ESTABLISHED:
- case NES_CM_STATE_MPAREQ_SENT:
case NES_CM_STATE_MPAREJ_RCVD:
cm_node->tcp_cntxt.rcv_nxt++;
cleanup_retrans_entry(cm_node);
cm_node->state = NES_CM_STATE_LAST_ACK;
send_fin(cm_node, NULL);
break;
+ case NES_CM_STATE_MPAREQ_SENT:
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+ cm_node->tcp_cntxt.rcv_nxt++;
+ cleanup_retrans_entry(cm_node);
+ cm_node->state = NES_CM_STATE_CLOSED;
+ add_ref_cm_node(cm_node);
+ send_reset(cm_node, NULL);
+ break;
case NES_CM_STATE_FIN_WAIT1:
cm_node->tcp_cntxt.rcv_nxt++;
cleanup_retrans_entry(cm_node);
@@ -1590,6 +1635,7 @@ static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
break;
case NES_CM_STATE_CLOSED:
cleanup_retrans_entry(cm_node);
+ add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_TSA:
@@ -1641,9 +1687,15 @@ static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
passive_open_err(cm_node, skb, 1);
break;
case NES_CM_STATE_LISTENING:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ cleanup_retrans_entry(cm_node);
+ cm_node->state = NES_CM_STATE_CLOSED;
+ send_reset(cm_node, skb);
+ break;
case NES_CM_STATE_CLOSED:
cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
cleanup_retrans_entry(cm_node);
+ add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_ESTABLISHED:
@@ -1712,8 +1764,13 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
dev_kfree_skb_any(skb);
break;
case NES_CM_STATE_LISTENING:
+ cleanup_retrans_entry(cm_node);
+ cm_node->state = NES_CM_STATE_CLOSED;
+ send_reset(cm_node, skb);
+ break;
case NES_CM_STATE_CLOSED:
cleanup_retrans_entry(cm_node);
+ add_ref_cm_node(cm_node);
send_reset(cm_node, skb);
break;
case NES_CM_STATE_LAST_ACK:
@@ -1974,7 +2031,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
if (!cm_node)
return NULL;
mpa_frame = &cm_node->mpa_frame;
- strcpy(mpa_frame->key, IEFT_MPA_KEY_REQ);
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
mpa_frame->flags = IETF_MPA_FLAGS_CRC;
mpa_frame->rev = IETF_MPA_VERSION;
mpa_frame->priv_data_len = htons(private_data_len);
@@ -2102,30 +2159,39 @@ static int mini_cm_reject(struct nes_cm_core *cm_core,
cm_node->state = NES_CM_STATE_CLOSED;
rem_ref_cm_node(cm_core, cm_node);
} else {
- ret = send_mpa_reject(cm_node);
- if (ret) {
- cm_node->state = NES_CM_STATE_CLOSED;
- err = send_reset(cm_node, NULL);
- if (err)
- WARN_ON(1);
- } else
- cm_id->add_ref(cm_id);
+ if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
+ rem_ref_cm_node(cm_core, cm_node);
+ } else {
+ ret = send_mpa_reject(cm_node);
+ if (ret) {
+ cm_node->state = NES_CM_STATE_CLOSED;
+ err = send_reset(cm_node, NULL);
+ if (err)
+ WARN_ON(1);
+ } else
+ cm_id->add_ref(cm_id);
+ }
}
} else {
cm_node->cm_id = NULL;
- event.cm_node = loopback;
- event.cm_info.rem_addr = loopback->rem_addr;
- event.cm_info.loc_addr = loopback->loc_addr;
- event.cm_info.rem_port = loopback->rem_port;
- event.cm_info.loc_port = loopback->loc_port;
- event.cm_info.cm_id = loopback->cm_id;
- cm_event_mpa_reject(&event);
- rem_ref_cm_node(cm_core, cm_node);
- loopback->state = NES_CM_STATE_CLOSING;
+ if (cm_node->state == NES_CM_STATE_LISTENER_DESTROYED) {
+ rem_ref_cm_node(cm_core, cm_node);
+ rem_ref_cm_node(cm_core, loopback);
+ } else {
+ event.cm_node = loopback;
+ event.cm_info.rem_addr = loopback->rem_addr;
+ event.cm_info.loc_addr = loopback->loc_addr;
+ event.cm_info.rem_port = loopback->rem_port;
+ event.cm_info.loc_port = loopback->loc_port;
+ event.cm_info.cm_id = loopback->cm_id;
+ cm_event_mpa_reject(&event);
+ rem_ref_cm_node(cm_core, cm_node);
+ loopback->state = NES_CM_STATE_CLOSING;
- cm_id = loopback->cm_id;
- rem_ref_cm_node(cm_core, loopback);
- cm_id->rem_ref(cm_id);
+ cm_id = loopback->cm_id;
+ rem_ref_cm_node(cm_core, loopback);
+ cm_id->rem_ref(cm_id);
+ }
}
return ret;
@@ -2164,11 +2230,15 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
case NES_CM_STATE_CLOSING:
ret = -1;
break;
- case NES_CM_STATE_MPAREJ_RCVD:
case NES_CM_STATE_LISTENING:
+ cleanup_retrans_entry(cm_node);
+ send_reset(cm_node, NULL);
+ break;
+ case NES_CM_STATE_MPAREJ_RCVD:
case NES_CM_STATE_UNKNOWN:
case NES_CM_STATE_INITED:
case NES_CM_STATE_CLOSED:
+ case NES_CM_STATE_LISTENER_DESTROYED:
ret = rem_ref_cm_node(cm_core, cm_node);
break;
case NES_CM_STATE_TSA:
@@ -2687,8 +2757,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct nes_pd *nespd;
u64 tagged_offset;
-
-
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
return -EINVAL;
@@ -2704,6 +2772,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
"%s\n", cm_node, nesvnic, nesvnic->netdev,
nesvnic->netdev->name);
+ if (NES_CM_STATE_LISTENER_DESTROYED == cm_node->state) {
+ if (cm_node->loopbackpartner)
+ rem_ref_cm_node(cm_node->cm_core, cm_node->loopbackpartner);
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ return -EINVAL;
+ }
+
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
cm_node->nesqp = nesqp;
@@ -2786,6 +2861,10 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cpu_to_le32(conn_param->private_data_len +
sizeof(struct ietf_mpa_frame));
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
+ if (nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
+ kunmap(nesqp->page);
+ }
nesqp->nesqp_context->ird_ord_sizes |=
cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
@@ -2929,7 +3008,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
if (cm_node->mpa_frame_size > MAX_CM_BUFFER)
return -EINVAL;
- strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
+ memcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
if (loopback) {
memcpy(&loopback->mpa_frame.priv_data, pdata, pdata_len);
loopback->mpa_frame.priv_data_len = pdata_len;
@@ -2974,6 +3053,9 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!nesdev)
return -EINVAL;
+ if (!(cm_id->local_addr.sin_port) || !(cm_id->remote_addr.sin_port))
+ return -EINVAL;
+
nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
"0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
ntohl(nesvnic->local_ipaddr),
@@ -3251,6 +3333,11 @@ static void cm_event_connected(struct nes_cm_event *event)
wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+ if (nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
+ kunmap(nesqp->page);
+ }
+
/* use the reserved spot on the WQ for the extra first WQE */
nesqp->nesqp_context->ird_ord_sizes &=
cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
@@ -3346,7 +3433,7 @@ static void cm_event_connect_error(struct nes_cm_event *event)
nesqp->cm_id = NULL;
cm_id->provider_data = NULL;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- cm_event.status = IW_CM_EVENT_STATUS_REJECTED;
+ cm_event.status = -ECONNRESET;
cm_event.provider_data = cm_id->provider_data;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
@@ -3390,6 +3477,8 @@ static void cm_event_reset(struct nes_cm_event *event)
nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
nesqp = cm_id->provider_data;
+ if (!nesqp)
+ return;
nesqp->cm_id = NULL;
/* cm_id->provider_data = NULL; */
@@ -3401,8 +3490,8 @@ static void cm_event_reset(struct nes_cm_event *event)
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
- ret = cm_id->event_handler(cm_id, &cm_event);
cm_id->add_ref(cm_id);
+ ret = cm_id->event_handler(cm_id, &cm_event);
atomic_inc(&cm_closes);
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = IW_CM_EVENT_STATUS_OK;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 90e8e4d8a5c..d9825fda70a 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -47,6 +47,8 @@
#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VERSION 1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE 20
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
@@ -169,7 +171,7 @@ struct nes_timer_entry {
#define NES_CM_DEF_SEQ2 0x18ed5740
#define NES_CM_DEF_LOCAL_ID2 0xb807
-#define MAX_CM_BUFFER 512
+#define MAX_CM_BUFFER (IETF_MPA_FRAME_SIZE + IETF_MAX_PRIV_DATA_LEN)
typedef u32 nes_addr_t;
@@ -198,6 +200,7 @@ enum nes_cm_node_state {
NES_CM_STATE_TIME_WAIT,
NES_CM_STATE_LAST_ACK,
NES_CM_STATE_CLOSING,
+ NES_CM_STATE_LISTENER_DESTROYED,
NES_CM_STATE_CLOSED
};
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
index 0fb8d81d9a6..b4393a16099 100644
--- a/drivers/infiniband/hw/nes/nes_context.h
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 3512d6de301..b1c2cbb88f0 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -424,8 +424,9 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->base_pd = 1;
- nesadapter->device_cap_flags =
- IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
+ nesadapter->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
+ IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
[(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
@@ -436,11 +437,12 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
- /* mark the usual suspect QPs and CQs as in use */
+ /* mark the usual suspect QPs, MR and CQs as in use */
for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) {
set_bit(u32temp, nesadapter->allocated_qps);
set_bit(u32temp, nesadapter->allocated_cqs);
}
+ set_bit(0, nesadapter->allocated_mrs);
for (u32temp = 0; u32temp < 20; u32temp++)
set_bit(u32temp, nesadapter->allocated_pds);
@@ -481,7 +483,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
nesadapter->max_irrq_wr = (u32temp >> 16) & 3;
nesadapter->max_sge = 4;
- nesadapter->max_cqe = 32767;
+ nesadapter->max_cqe = 32766;
if (nes_read_eeprom_values(nesdev, nesadapter)) {
printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
@@ -1355,6 +1357,8 @@ int nes_init_phy(struct nes_device *nesdev)
}
if ((phy_type == NES_PHY_TYPE_ARGUS) ||
(phy_type == NES_PHY_TYPE_SFP_D)) {
+ u32 first_time = 1;
+
/* Check firmware heartbeat */
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
@@ -1362,8 +1366,13 @@ int nes_init_phy(struct nes_device *nesdev)
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if (temp_phy_data != temp_phy_data2)
- return 0;
+ if (temp_phy_data != temp_phy_data2) {
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ if ((temp_phy_data & 0xff) > 0x20)
+ return 0;
+ printk(PFX "Reinitializing PHY\n");
+ }
/* no heartbeat, configure the PHY */
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
@@ -1399,7 +1408,7 @@ int nes_init_phy(struct nes_device *nesdev)
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
do {
if (counter++ > 150) {
- nes_debug(NES_DBG_PHY, "No PHY heartbeat\n");
+ printk(PFX "No PHY heartbeat\n");
break;
}
mdelay(1);
@@ -1413,11 +1422,20 @@ int nes_init_phy(struct nes_device *nesdev)
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
if (counter++ > 300) {
- nes_debug(NES_DBG_PHY, "PHY did not track\n");
- break;
+ if (((temp_phy_data & 0xff) == 0x0) && first_time) {
+ first_time = 0;
+ counter = 0;
+ /* reset AMCC PHY and try again */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
+ continue;
+ } else {
+ printk(PFX "PHY did not track\n");
+ break;
+ }
}
mdelay(10);
- } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
+ } while ((temp_phy_data & 0xff) < 0x30);
/* setup signal integrity */
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index f28a41ba9fa..084be0ee689 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+* Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -546,11 +546,23 @@ enum nes_iwarp_sq_fmr_wqe_word_idx {
NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
};
+enum nes_iwarp_sq_fmr_opcodes {
+ NES_IWARP_SQ_FMR_WQE_ZERO_BASED = (1<<6),
+ NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K = (0<<7),
+ NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M = (1<<7),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ = (1<<16),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE = (1<<17),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ = (1<<18),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE = (1<<19),
+ NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND = (1<<20),
+};
+
+#define NES_IWARP_SQ_FMR_WQE_MR_LENGTH_HIGH_MASK 0xFF;
+
enum nes_iwarp_sq_locinv_wqe_word_idx {
NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
};
-
enum nes_iwarp_rq_wqe_word_idx {
NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
@@ -1153,6 +1165,19 @@ struct nes_pbl {
/* TODO: need to add list for two level tables */
};
+#define NES_4K_PBL_CHUNK_SIZE 4096
+
+struct nes_fast_mr_wqe_pbl {
+ u64 *kva;
+ dma_addr_t paddr;
+};
+
+struct nes_ib_fast_reg_page_list {
+ struct ib_fast_reg_page_list ibfrpl;
+ struct nes_fast_mr_wqe_pbl nes_wqe_pbl;
+ u64 pbl;
+};
+
struct nes_listener {
struct work_struct work;
struct workqueue_struct *wq;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index de18fdfdadf..ab110278018 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
index cc90c14b49e..71e133ab209 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
@@ -86,6 +86,7 @@ enum iwnes_memreg_type {
IWNES_MEMREG_TYPE_CQ = 0x0002,
IWNES_MEMREG_TYPE_MW = 0x0003,
IWNES_MEMREG_TYPE_FMR = 0x0004,
+ IWNES_MEMREG_TYPE_FMEM = 0x0005,
};
struct nes_mem_reg_req {
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 9687c397ce1..729d525c5b7 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index a680c42d6e8..64d3136e374 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -275,342 +275,236 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
}
-/**
- * nes_alloc_fmr
+/*
+ * nes_alloc_fast_mr
*/
-static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
- int ibmr_access_flags,
- struct ib_fmr_attr *ibfmr_attr)
+static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ u32 stag, u32 page_count)
{
- unsigned long flags;
- struct nes_pd *nespd = to_nespd(ibpd);
- struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
- struct nes_device *nesdev = nesvnic->nesdev;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
- struct nes_fmr *nesfmr;
- struct nes_cqp_request *cqp_request;
struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
int ret;
- u32 stag;
- u32 stag_index = 0;
- u32 next_stag_index = 0;
- u32 driver_key = 0;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 opcode = 0;
- u8 stag_key = 0;
- int i=0;
- struct nes_vpbl vpbl;
-
- get_random_bytes(&next_stag_index, sizeof(next_stag_index));
- stag_key = (u8)next_stag_index;
-
- driver_key = 0;
-
- next_stag_index >>= 8;
- next_stag_index %= nesadapter->max_mr;
-
- ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
- nesadapter->max_mr, &stag_index, &next_stag_index);
- if (ret) {
- goto failed_resource_alloc;
- }
-
- nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
- if (!nesfmr) {
- ret = -ENOMEM;
- goto failed_fmr_alloc;
- }
-
- nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR;
- if (ibfmr_attr->max_pages == 1) {
- /* use zero length PBL */
- nesfmr->nesmr.pbl_4k = 0;
- nesfmr->nesmr.pbls_used = 0;
- } else if (ibfmr_attr->max_pages <= 32) {
- /* use PBL 256 */
- nesfmr->nesmr.pbl_4k = 0;
- nesfmr->nesmr.pbls_used = 1;
- } else if (ibfmr_attr->max_pages <= 512) {
- /* use 4K PBLs */
- nesfmr->nesmr.pbl_4k = 1;
- nesfmr->nesmr.pbls_used = 1;
- } else {
- /* use two level 4K PBLs */
- /* add support for two level 256B PBLs */
- nesfmr->nesmr.pbl_4k = 1;
- nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
- ((ibfmr_attr->max_pages & 511) ? 1 : 0);
- }
- /* Register the region with the adapter */
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
-
- /* track PBL resources */
- if (nesfmr->nesmr.pbls_used != 0) {
- if (nesfmr->nesmr.pbl_4k) {
- if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_vpbl_avail;
- } else {
- nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
- }
- } else {
- if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_vpbl_avail;
- } else {
- nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
- }
- }
- }
-
- /* one level pbl */
- if (nesfmr->nesmr.pbls_used == 0) {
- nesfmr->root_vpbl.pbl_vbase = NULL;
- nes_debug(NES_DBG_MR, "zero level pbl \n");
- } else if (nesfmr->nesmr.pbls_used == 1) {
- /* can change it to kmalloc & dma_map_single */
- nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &nesfmr->root_vpbl.pbl_pbase);
- if (!nesfmr->root_vpbl.pbl_vbase) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_vpbl_alloc;
- }
- nesfmr->leaf_pbl_cnt = 0;
- nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n",
- nesfmr->root_vpbl.pbl_vbase);
- }
- /* two level pbl */
- else {
- nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
- &nesfmr->root_vpbl.pbl_pbase);
- if (!nesfmr->root_vpbl.pbl_vbase) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_vpbl_alloc;
- }
-
- nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
- nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC);
- if (!nesfmr->root_vpbl.leaf_vpbl) {
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- ret = -ENOMEM;
- goto failed_leaf_vpbl_alloc;
- }
-
- nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
- " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
- nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
-
- for (i=0; i<nesfmr->leaf_pbl_cnt; i++)
- nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase = NULL;
-
- for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
- vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
- &vpbl.pbl_pbase);
-
- if (!vpbl.pbl_vbase) {
- ret = -ENOMEM;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
- goto failed_leaf_vpbl_pages_alloc;
- }
-
- nesfmr->root_vpbl.pbl_vbase[i].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
- nesfmr->root_vpbl.pbl_vbase[i].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
- nesfmr->root_vpbl.leaf_vpbl[i] = vpbl;
-
- nes_debug(NES_DBG_MR, "pbase_low=0x%x, pbase_high=0x%x, vpbl=%p\n",
- nesfmr->root_vpbl.pbl_vbase[i].pa_low,
- nesfmr->root_vpbl.pbl_vbase[i].pa_high,
- &nesfmr->root_vpbl.leaf_vpbl[i]);
- }
- }
- nesfmr->ib_qp = NULL;
- nesfmr->access_rights =0;
+ u16 major_code;
+ u64 region_length = page_count * PAGE_SIZE;
- stag = stag_index << 8;
- stag |= driver_key;
- stag += (u32)stag_key;
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
cqp_request = nes_get_cqp_request(nesdev);
if (cqp_request == NULL) {
nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
- ret = -ENOMEM;
- goto failed_leaf_vpbl_pages_alloc;
+ return -ENOMEM;
}
+ nes_debug(NES_DBG_MR, "alloc_fast_reg_mr: page_count = %d, "
+ "region_length = %llu\n",
+ page_count, region_length);
cqp_request->waiting = 1;
cqp_wqe = &cqp_request->cqp_wqe;
- nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
- stag, stag_index);
-
- opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
-
- if (nesfmr->nesmr.pbl_4k == 1)
- opcode |= NES_CQP_STAG_PBL_BLK_SIZE;
-
- if (ibmr_access_flags & IB_ACCESS_REMOTE_WRITE) {
- opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE |
- NES_CQP_STAG_RIGHTS_LOCAL_WRITE | NES_CQP_STAG_REM_ACC_EN;
- nesfmr->access_rights |=
- NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_RIGHTS_LOCAL_WRITE |
- NES_CQP_STAG_REM_ACC_EN;
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (nesadapter->free_4kpbl > 0) {
+ nesadapter->free_4kpbl--;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ } else {
+ /* No 4kpbl's available: */
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ nes_debug(NES_DBG_MR, "Out of Pbls\n");
+ nes_free_cqp_request(nesdev, cqp_request);
+ return -ENOMEM;
}
- if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) {
- opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ |
- NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN;
- nesfmr->access_rights |=
- NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ |
- NES_CQP_STAG_REM_ACC_EN;
- }
+ opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_MR |
+ NES_CQP_STAG_PBL_BLK_SIZE | NES_CQP_STAG_VA_TO |
+ NES_CQP_STAG_REM_ACC_EN;
+ /*
+ * The current OFED API does not support the zero based TO option.
+ * If added then need to changed the NES_CQP_STAG_VA* option. Also,
+ * the API does not support that ability to have the MR set for local
+ * access only when created and not allow the SQ op to override. Given
+ * this the remote enable must be set here.
+ */
nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
- set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, 1);
- cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] =
- cpu_to_le32((nesfmr->nesmr.pbls_used>1) ?
- (nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used);
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
+ cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
+ cpu_to_le32(nespd->pd_id & 0x00007fff);
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, 0);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, 0);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, 0);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX, (page_count * 8));
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
+ barrier();
atomic_set(&cqp_request->refcount, 2);
nes_post_cqp_request(nesdev, cqp_request);
/* Wait for CQP */
- ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
- NES_EVENT_TIMEOUT);
- nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
- " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
- stag, ret, cqp_request->major_code, cqp_request->minor_code);
-
- if ((!ret) || (cqp_request->major_code)) {
- nes_put_cqp_request(nesdev, cqp_request);
- ret = (!ret) ? -ETIME : -EIO;
- goto failed_leaf_vpbl_pages_alloc;
- }
+ ret = wait_event_timeout(cqp_request->waitq,
+ (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT);
+
+ nes_debug(NES_DBG_MR, "Allocate STag 0x%08X completed, "
+ "wait_event_timeout ret = %u, CQP Major:Minor codes = "
+ "0x%04X:0x%04X.\n", stag, ret, cqp_request->major_code,
+ cqp_request->minor_code);
+ major_code = cqp_request->major_code;
nes_put_cqp_request(nesdev, cqp_request);
- nesfmr->nesmr.ibfmr.lkey = stag;
- nesfmr->nesmr.ibfmr.rkey = stag;
- nesfmr->attr = *ibfmr_attr;
-
- return &nesfmr->nesmr.ibfmr;
-
- failed_leaf_vpbl_pages_alloc:
- /* unroll all allocated pages */
- for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
- if (nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase) {
- pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
- nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
- }
- }
- if (nesfmr->root_vpbl.leaf_vpbl)
- kfree(nesfmr->root_vpbl.leaf_vpbl);
- failed_leaf_vpbl_alloc:
- if (nesfmr->leaf_pbl_cnt == 0) {
- if (nesfmr->root_vpbl.pbl_vbase)
- pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
- nesfmr->root_vpbl.pbl_pbase);
- } else
- pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
- nesfmr->root_vpbl.pbl_pbase);
-
- failed_vpbl_alloc:
- if (nesfmr->nesmr.pbls_used != 0) {
+ if (!ret || major_code) {
spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (nesfmr->nesmr.pbl_4k)
- nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
- else
- nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
+ nesadapter->free_4kpbl++;
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
}
-failed_vpbl_avail:
- kfree(nesfmr);
-
- failed_fmr_alloc:
- nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
-
- failed_resource_alloc:
- return ERR_PTR(ret);
+ if (!ret)
+ return -ETIME;
+ else if (major_code)
+ return -EIO;
+ return 0;
}
-
-/**
- * nes_dealloc_fmr
+/*
+ * nes_alloc_fast_reg_mr
*/
-static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
+struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len)
{
- unsigned long flags;
- struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
- struct nes_fmr *nesfmr = to_nesfmr(nesmr);
- struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- int i = 0;
- int rc;
- /* free the resources */
- if (nesfmr->leaf_pbl_cnt == 0) {
- /* single PBL case */
- if (nesfmr->root_vpbl.pbl_vbase)
- pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
- nesfmr->root_vpbl.pbl_pbase);
- } else {
- for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) {
- pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
- nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
- }
- kfree(nesfmr->root_vpbl.leaf_vpbl);
- pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
- nesfmr->root_vpbl.pbl_pbase);
- }
- nesmr->ibmw.device = ibfmr->device;
- nesmr->ibmw.pd = ibfmr->pd;
- nesmr->ibmw.rkey = ibfmr->rkey;
- nesmr->ibmw.uobject = NULL;
+ u32 next_stag_index;
+ u8 stag_key = 0;
+ u32 driver_key = 0;
+ int err = 0;
+ u32 stag_index = 0;
+ struct nes_mr *nesmr;
+ u32 stag;
+ int ret;
+ struct ib_mr *ibmr;
+/*
+ * Note: Set to always use a fixed length single page entry PBL. This is to allow
+ * for the fast_reg_mr operation to always know the size of the PBL.
+ */
+ if (max_page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
+ return ERR_PTR(-E2BIG);
- rc = nes_dealloc_mw(&nesmr->ibmw);
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
- if ((rc == 0) && (nesfmr->nesmr.pbls_used != 0)) {
- spin_lock_irqsave(&nesadapter->pbl_lock, flags);
- if (nesfmr->nesmr.pbl_4k) {
- nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
- WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl);
- } else {
- nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
- WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl);
- }
- spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index,
+ &next_stag_index);
+ if (err)
+ return ERR_PTR(err);
+
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
}
- return rc;
-}
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+ nes_debug(NES_DBG_MR, "Allocating STag 0x%08X index = 0x%08X\n",
+ stag, stag_index);
-/**
- * nes_map_phys_fmr
+ ret = alloc_fast_reg_mr(nesdev, nespd, stag, max_page_list_len);
+
+ if (ret == 0) {
+ nesmr->ibmr.rkey = stag;
+ nesmr->ibmr.lkey = stag;
+ nesmr->mode = IWNES_MEMREG_TYPE_FMEM;
+ ibmr = &nesmr->ibmr;
+ } else {
+ kfree(nesmr);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ }
+ return ibmr;
+}
+
+/*
+ * nes_alloc_fast_reg_page_list
*/
-static int nes_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
+static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list(
+ struct ib_device *ibdev,
+ int page_list_len)
{
- return 0;
-}
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct ib_fast_reg_page_list *pifrpl;
+ struct nes_ib_fast_reg_page_list *pnesfrpl;
+ if (page_list_len > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64)))
+ return ERR_PTR(-E2BIG);
+ /*
+ * Allocate the ib_fast_reg_page_list structure, the
+ * nes_fast_bpl structure, and the PLB table.
+ */
+ pnesfrpl = kmalloc(sizeof(struct nes_ib_fast_reg_page_list) +
+ page_list_len * sizeof(u64), GFP_KERNEL);
+
+ if (!pnesfrpl)
+ return ERR_PTR(-ENOMEM);
-/**
- * nes_unmap_frm
+ pifrpl = &pnesfrpl->ibfrpl;
+ pifrpl->page_list = &pnesfrpl->pbl;
+ pifrpl->max_page_list_len = page_list_len;
+ /*
+ * Allocate the WQE PBL
+ */
+ pnesfrpl->nes_wqe_pbl.kva = pci_alloc_consistent(nesdev->pcidev,
+ page_list_len * sizeof(u64),
+ &pnesfrpl->nes_wqe_pbl.paddr);
+
+ if (!pnesfrpl->nes_wqe_pbl.kva) {
+ kfree(pnesfrpl);
+ return ERR_PTR(-ENOMEM);
+ }
+ nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, "
+ "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, "
+ "pbl.paddr= %p\n", pnesfrpl, &pnesfrpl->ibfrpl,
+ pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva,
+ (void *)pnesfrpl->nes_wqe_pbl.paddr);
+
+ return pifrpl;
+}
+
+/*
+ * nes_free_fast_reg_page_list
*/
-static int nes_unmap_fmr(struct list_head *ibfmr_list)
+static void nes_free_fast_reg_page_list(struct ib_fast_reg_page_list *pifrpl)
{
- return 0;
+ struct nes_vnic *nesvnic = to_nesvnic(pifrpl->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_ib_fast_reg_page_list *pnesfrpl;
+
+ pnesfrpl = container_of(pifrpl, struct nes_ib_fast_reg_page_list, ibfrpl);
+ /*
+ * Free the WQE PBL.
+ */
+ pci_free_consistent(nesdev->pcidev,
+ pifrpl->max_page_list_len * sizeof(u64),
+ pnesfrpl->nes_wqe_pbl.kva,
+ pnesfrpl->nes_wqe_pbl.paddr);
+ /*
+ * Free the PBL structure
+ */
+ kfree(pnesfrpl);
}
-
-
/**
* nes_query_device
*/
@@ -633,23 +527,23 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
props->max_sge = nesdev->nesadapter->max_sge;
props->max_cq = nesibdev->max_cq;
- props->max_cqe = nesdev->nesadapter->max_cqe - 1;
+ props->max_cqe = nesdev->nesadapter->max_cqe;
props->max_mr = nesibdev->max_mr;
props->max_mw = nesibdev->max_mr;
props->max_pd = nesibdev->max_pd;
props->max_sge_rd = 1;
switch (nesdev->nesadapter->max_irrq_wr) {
case 0:
- props->max_qp_rd_atom = 1;
+ props->max_qp_rd_atom = 2;
break;
case 1:
- props->max_qp_rd_atom = 4;
+ props->max_qp_rd_atom = 8;
break;
case 2:
- props->max_qp_rd_atom = 16;
+ props->max_qp_rd_atom = 32;
break;
case 3:
- props->max_qp_rd_atom = 32;
+ props->max_qp_rd_atom = 64;
break;
default:
props->max_qp_rd_atom = 0;
@@ -1121,6 +1015,7 @@ static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
kunmap(nesqp->page);
return -ENOMEM;
}
+ nesqp->sq_kmapped = 1;
nesqp->hwqp.q2_vbase = mem;
mem += 256;
memset(nesqp->hwqp.q2_vbase, 0, 256);
@@ -1198,7 +1093,10 @@ static inline void nes_free_qp_mem(struct nes_device *nesdev,
pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase );
nesqp->pbl_vbase = NULL;
- kunmap(nesqp->page);
+ if (nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
+ kunmap(nesqp->page);
+ }
}
}
@@ -1504,8 +1402,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n",
nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp));
spin_lock_init(&nesqp->lock);
- init_waitqueue_head(&nesqp->state_waitq);
- init_waitqueue_head(&nesqp->kick_waitq);
nes_add_ref(&nesqp->ibqp);
break;
default:
@@ -1513,6 +1409,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
return ERR_PTR(-EINVAL);
}
+ nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
+
/* update the QP table */
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
@@ -1607,8 +1505,10 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
}
}
- if (nesqp->pbl_pbase)
+ if (nesqp->pbl_pbase && nesqp->sq_kmapped) {
+ nesqp->sq_kmapped = 0;
kunmap(nesqp->page);
+ }
} else {
/* Clean any pending completions from the cq(s) */
if (nesqp->nesscq)
@@ -1649,6 +1549,9 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
unsigned long flags;
int ret;
+ if (entries > nesadapter->max_cqe)
+ return ERR_PTR(-EINVAL);
+
err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs,
nesadapter->max_cq, &cq_num, &nesadapter->next_cq);
if (err) {
@@ -2606,9 +2509,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
stag = stag_index << 8;
stag |= driver_key;
stag += (u32)stag_key;
- if (stag == 0) {
- stag = 1;
- }
iova_start = virt;
/* Make the leaf PBL the root if only one PBL */
@@ -3109,7 +3009,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
" already done based on hw state.\n",
nesqp->hwqp.qp_id);
issue_modify_qp = 0;
- nesqp->in_disconnect = 0;
}
switch (nesqp->hw_iwarp_state) {
case NES_AEQE_IWARP_STATE_CLOSING:
@@ -3122,7 +3021,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
break;
default:
next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- nesqp->in_disconnect = 1;
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
break;
}
@@ -3139,7 +3037,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
issue_modify_qp = 1;
- nesqp->in_disconnect = 1;
break;
case IB_QPS_ERR:
case IB_QPS_RESET:
@@ -3162,7 +3059,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
(nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
next_iwarp_state |= NES_CQP_QP_RESET;
- nesqp->in_disconnect = 1;
} else {
nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n",
nesqp->hwqp.qp_id, nesqp->hw_tcp_state);
@@ -3373,21 +3269,17 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_qp *nesqp = to_nesqp(ibqp);
struct nes_hw_qp_wqe *wqe;
- int err;
+ int err = 0;
u32 qsize = nesqp->hwqp.sq_size;
u32 head;
- u32 wqe_misc;
- u32 wqe_count;
+ u32 wqe_misc = 0;
+ u32 wqe_count = 0;
u32 counter;
- u32 total_payload_length;
-
- err = 0;
- wqe_misc = 0;
- wqe_count = 0;
- total_payload_length = 0;
- if (nesqp->ibqp_state > IB_QPS_RTS)
- return -EINVAL;
+ if (nesqp->ibqp_state > IB_QPS_RTS) {
+ err = -EINVAL;
+ goto out;
+ }
spin_lock_irqsave(&nesqp->lock, flags);
@@ -3413,94 +3305,208 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
u64temp = (u64)(ib_wr->wr_id);
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
u64temp);
- switch (ib_wr->opcode) {
- case IB_WR_SEND:
- if (ib_wr->send_flags & IB_SEND_SOLICITED) {
- wqe_misc = NES_IWARP_SQ_OP_SENDSE;
- } else {
- wqe_misc = NES_IWARP_SQ_OP_SEND;
- }
- if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
- err = -EINVAL;
- break;
- }
- if (ib_wr->send_flags & IB_SEND_FENCE) {
- wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
- }
- if ((ib_wr->send_flags & IB_SEND_INLINE) &&
- ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
- (ib_wr->sg_list[0].length <= 64)) {
- memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
- (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
- ib_wr->sg_list[0].length);
- wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
- } else {
- fill_wqe_sg_send(wqe, ib_wr, 1);
- }
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ if (IB_WR_SEND == ib_wr->opcode) {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ wqe_misc = NES_IWARP_SQ_OP_SENDSE;
+ else
+ wqe_misc = NES_IWARP_SQ_OP_SEND;
+ } else {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ wqe_misc = NES_IWARP_SQ_OP_SENDSEINV;
+ else
+ wqe_misc = NES_IWARP_SQ_OP_SENDINV;
- break;
- case IB_WR_RDMA_WRITE:
- wqe_misc = NES_IWARP_SQ_OP_RDMAW;
- if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
- nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
- ib_wr->num_sge,
- nesdev->nesadapter->max_sge);
- err = -EINVAL;
- break;
- }
- if (ib_wr->send_flags & IB_SEND_FENCE) {
- wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
- }
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
+ ib_wr->ex.invalidate_rkey);
+ }
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
- ib_wr->wr.rdma.rkey);
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
- ib_wr->wr.rdma.remote_addr);
-
- if ((ib_wr->send_flags & IB_SEND_INLINE) &&
- ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
- (ib_wr->sg_list[0].length <= 64)) {
- memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
- (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
- ib_wr->sg_list[0].length);
- wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
- } else {
- fill_wqe_sg_send(wqe, ib_wr, 1);
- }
- wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
- wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
- break;
- case IB_WR_RDMA_READ:
- /* iWARP only supports 1 sge for RDMA reads */
- if (ib_wr->num_sge > 1) {
- nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
- ib_wr->num_sge);
- err = -EINVAL;
- break;
- }
- wqe_misc = NES_IWARP_SQ_OP_RDMAR;
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
- ib_wr->wr.rdma.remote_addr);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
- ib_wr->wr.rdma.rkey);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
- ib_wr->sg_list->length);
- set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
- ib_wr->sg_list->addr);
- set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
- ib_wr->sg_list->lkey);
- break;
- default:
- /* error */
- err = -EINVAL;
- break;
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ err = -EINVAL;
+ break;
}
- if (ib_wr->send_flags & IB_SEND_SIGNALED) {
- wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+
+ if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+ (ib_wr->sg_list[0].length <= 64)) {
+ memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+ (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ ib_wr->sg_list[0].length);
+ wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+ } else {
+ fill_wqe_sg_send(wqe, ib_wr, 1);
+ }
+
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe_misc = NES_IWARP_SQ_OP_RDMAW;
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
+ ib_wr->num_sge, nesdev->nesadapter->max_sge);
+ err = -EINVAL;
+ break;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+ ib_wr->wr.rdma.rkey);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+ ib_wr->wr.rdma.remote_addr);
+
+ if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+ (ib_wr->sg_list[0].length <= 64)) {
+ memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+ (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ ib_wr->sg_list[0].length);
+ wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+ } else {
+ fill_wqe_sg_send(wqe, ib_wr, 1);
+ }
+
+ wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
+ /* iWARP only supports 1 sge for RDMA reads */
+ if (ib_wr->num_sge > 1) {
+ nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
+ ib_wr->num_sge);
+ err = -EINVAL;
+ break;
+ }
+ if (ib_wr->opcode == IB_WR_RDMA_READ) {
+ wqe_misc = NES_IWARP_SQ_OP_RDMAR;
+ } else {
+ wqe_misc = NES_IWARP_SQ_OP_RDMAR_LOCINV;
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX,
+ ib_wr->ex.invalidate_rkey);
+ }
+
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+ ib_wr->wr.rdma.remote_addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+ ib_wr->wr.rdma.rkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
+ ib_wr->sg_list->length);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+ ib_wr->sg_list->addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
+ ib_wr->sg_list->lkey);
+ break;
+ case IB_WR_LOCAL_INV:
+ wqe_misc = NES_IWARP_SQ_OP_LOCINV;
+ set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX,
+ ib_wr->ex.invalidate_rkey);
+ break;
+ case IB_WR_FAST_REG_MR:
+ {
+ int i;
+ int flags = ib_wr->wr.fast_reg.access_flags;
+ struct nes_ib_fast_reg_page_list *pnesfrpl =
+ container_of(ib_wr->wr.fast_reg.page_list,
+ struct nes_ib_fast_reg_page_list,
+ ibfrpl);
+ u64 *src_page_list = pnesfrpl->ibfrpl.page_list;
+ u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva;
+
+ if (ib_wr->wr.fast_reg.page_list_len >
+ (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) {
+ nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n");
+ err = -EINVAL;
+ break;
+ }
+ wqe_misc = NES_IWARP_SQ_OP_FAST_REG;
+ set_wqe_64bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX,
+ ib_wr->wr.fast_reg.iova_start);
+ set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
+ ib_wr->wr.fast_reg.length);
+ set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
+ ib_wr->wr.fast_reg.rkey);
+ /* Set page size: */
+ if (ib_wr->wr.fast_reg.page_shift == 12) {
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K;
+ } else if (ib_wr->wr.fast_reg.page_shift == 21) {
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M;
+ } else {
+ nes_debug(NES_DBG_IW_TX, "Invalid page shift,"
+ " ib_wr=%u, max=1\n", ib_wr->num_sge);
+ err = -EINVAL;
+ break;
+ }
+ /* Set access_flags */
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_READ;
+ if (flags & IB_ACCESS_LOCAL_WRITE)
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_LOCAL_WRITE;
+
+ if (flags & IB_ACCESS_REMOTE_WRITE)
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_WRITE;
+
+ if (flags & IB_ACCESS_REMOTE_READ)
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_REMOTE_READ;
+
+ if (flags & IB_ACCESS_MW_BIND)
+ wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND;
+
+ /* Fill in PBL info: */
+ if (ib_wr->wr.fast_reg.page_list_len >
+ pnesfrpl->ibfrpl.max_page_list_len) {
+ nes_debug(NES_DBG_IW_TX, "Invalid page list length,"
+ " ib_wr=%p, value=%u, max=%u\n",
+ ib_wr, ib_wr->wr.fast_reg.page_list_len,
+ pnesfrpl->ibfrpl.max_page_list_len);
+ err = -EINVAL;
+ break;
+ }
+
+ set_wqe_64bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX,
+ pnesfrpl->nes_wqe_pbl.paddr);
+
+ set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX,
+ ib_wr->wr.fast_reg.page_list_len * 8);
+
+ for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
+ dst_page_list[i] = cpu_to_le64(src_page_list[i]);
+
+ nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %p, "
+ "length: %d, rkey: %0x, pgl_paddr: %p, "
+ "page_list_len: %u, wqe_misc: %x\n",
+ (void *)ib_wr->wr.fast_reg.iova_start,
+ ib_wr->wr.fast_reg.length,
+ ib_wr->wr.fast_reg.rkey,
+ (void *)pnesfrpl->nes_wqe_pbl.paddr,
+ ib_wr->wr.fast_reg.page_list_len,
+ wqe_misc);
+ break;
+ }
+ default:
+ /* error */
+ err = -EINVAL;
+ break;
}
+
+ if (err)
+ break;
+
+ if ((ib_wr->send_flags & IB_SEND_SIGNALED) || nesqp->sig_all)
+ wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
+
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc);
ib_wr = ib_wr->next;
@@ -3522,6 +3528,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
spin_unlock_irqrestore(&nesqp->lock, flags);
+out:
if (err)
*bad_wr = ib_wr;
return err;
@@ -3548,8 +3555,10 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
u32 counter;
u32 total_payload_length;
- if (nesqp->ibqp_state > IB_QPS_RTS)
- return -EINVAL;
+ if (nesqp->ibqp_state > IB_QPS_RTS) {
+ err = -EINVAL;
+ goto out;
+ }
spin_lock_irqsave(&nesqp->lock, flags);
@@ -3612,6 +3621,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
spin_unlock_irqrestore(&nesqp->lock, flags);
+out:
if (err)
*bad_wr = ib_wr;
return err;
@@ -3720,6 +3730,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
nes_debug(NES_DBG_CQ, "Operation = Send.\n");
entry->opcode = IB_WC_SEND;
break;
+ case NES_IWARP_SQ_OP_LOCINV:
+ entry->opcode = IB_WR_LOCAL_INV;
+ break;
+ case NES_IWARP_SQ_OP_FAST_REG:
+ entry->opcode = IB_WC_FAST_REG_MR;
+ break;
}
nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
@@ -3890,10 +3906,9 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
nesibdev->ibdev.bind_mw = nes_bind_mw;
- nesibdev->ibdev.alloc_fmr = nes_alloc_fmr;
- nesibdev->ibdev.unmap_fmr = nes_unmap_fmr;
- nesibdev->ibdev.dealloc_fmr = nes_dealloc_fmr;
- nesibdev->ibdev.map_phys_fmr = nes_map_phys_fmr;
+ nesibdev->ibdev.alloc_fast_reg_mr = nes_alloc_fast_reg_mr;
+ nesibdev->ibdev.alloc_fast_reg_page_list = nes_alloc_fast_reg_page_list;
+ nesibdev->ibdev.free_fast_reg_page_list = nes_free_fast_reg_page_list;
nesibdev->ibdev.attach_mcast = nes_multicast_attach;
nesibdev->ibdev.detach_mcast = nes_multicast_detach;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 89822d75f82..2df9993e0ca 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -135,19 +135,15 @@ struct nes_qp {
struct ib_qp ibqp;
void *allocated_buffer;
struct iw_cm_id *cm_id;
- struct workqueue_struct *wq;
struct nes_cq *nesscq;
struct nes_cq *nesrcq;
struct nes_pd *nespd;
void *cm_node; /* handle of the node this QP is associated with */
struct ietf_mpa_frame *ietf_frame;
dma_addr_t ietf_frame_pbase;
- wait_queue_head_t state_waitq;
struct ib_mr *lsmm_mr;
- unsigned long socket;
struct nes_hw_qp hwqp;
struct work_struct work;
- struct work_struct ae_work;
enum ib_qp_state ibqp_state;
u32 iwarp_state;
u32 hte_index;
@@ -165,19 +161,20 @@ struct nes_qp {
struct page *page;
struct timer_list terminate_timer;
enum ib_event_type terminate_eventtype;
- wait_queue_head_t kick_waitq;
- u16 in_disconnect;
+ u16 active_conn:1;
+ u16 skip_lsmm:1;
+ u16 user_mode:1;
+ u16 hte_added:1;
+ u16 flush_issued:1;
+ u16 destroyed:1;
+ u16 sig_all:1;
+ u16 rsvd:9;
u16 private_data_len;
u16 term_sq_flush_code;
u16 term_rq_flush_code;
- u8 active_conn;
- u8 skip_lsmm;
- u8 user_mode;
- u8 hte_added;
u8 hw_iwarp_state;
- u8 flush_issued;
u8 hw_tcp_state;
u8 term_flags;
- u8 destroyed;
+ u8 sq_kmapped;
};
#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 2bf5116deec..df3eb8c9fd9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -884,6 +884,7 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
neigh->neighbour = neighbour;
neigh->dev = dev;
+ memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
*to_ipoib_neigh(neighbour) = neigh;
skb_queue_head_init(&neigh->queue);
ipoib_cm_set(neigh, NULL);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index b9453d068e9..274c883ef3e 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -209,6 +209,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
mem_copy->copy_buf = NULL;
}
+#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
+
/**
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
* and returns the length of resulting physical address array (may be less than
@@ -221,62 +223,52 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
* where --few fragments of the same page-- are present in the SG as
* consecutive elements. Also, it handles one entry SG.
*/
+
static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct iser_page_vec *page_vec,
struct ib_device *ibdev)
{
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
- struct scatterlist *sg;
- u64 first_addr, last_addr, page;
- int end_aligned;
- unsigned int cur_page = 0;
+ struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
+ u64 start_addr, end_addr, page, chunk_start = 0;
unsigned long total_sz = 0;
- int i;
+ unsigned int dma_len;
+ int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
/* compute the offset of first element */
page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
+ new_chunk = 1;
+ cur_page = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
- unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
-
+ start_addr = ib_sg_dma_address(ibdev, sg);
+ if (new_chunk)
+ chunk_start = start_addr;
+ dma_len = ib_sg_dma_len(ibdev, sg);
+ end_addr = start_addr + dma_len;
total_sz += dma_len;
- first_addr = ib_sg_dma_address(ibdev, sg);
- last_addr = first_addr + dma_len;
-
- end_aligned = !(last_addr & ~MASK_4K);
-
- /* continue to collect page fragments till aligned or SG ends */
- while (!end_aligned && (i + 1 < data->dma_nents)) {
- sg = sg_next(sg);
- i++;
- dma_len = ib_sg_dma_len(ibdev, sg);
- total_sz += dma_len;
- last_addr = ib_sg_dma_address(ibdev, sg) + dma_len;
- end_aligned = !(last_addr & ~MASK_4K);
+ /* collect page fragments until aligned or end of SG list */
+ if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
+ new_chunk = 0;
+ continue;
}
-
- /* handle the 1st page in the 1st DMA element */
- if (cur_page == 0) {
- page = first_addr & MASK_4K;
- page_vec->pages[cur_page] = page;
- cur_page++;
+ new_chunk = 1;
+
+ /* address of the first page in the contiguous chunk;
+ masking relevant for the very first SG entry,
+ which might be unaligned */
+ page = chunk_start & MASK_4K;
+ do {
+ page_vec->pages[cur_page++] = page;
page += SIZE_4K;
- } else
- page = first_addr;
-
- for (; page < last_addr; page += SIZE_4K) {
- page_vec->pages[cur_page] = page;
- cur_page++;
- }
-
+ } while (page < end_addr);
}
+
page_vec->data_size = total_sz;
iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
return cur_page;
}
-#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
/**
* iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -284,42 +276,40 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
* the number of entries which are aligned correctly. Supports the case where
* consecutive SG elements are actually fragments of the same physcial page.
*/
-static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
- struct ib_device *ibdev)
+static int iser_data_buf_aligned_len(struct iser_data_buf *data,
+ struct ib_device *ibdev)
{
- struct scatterlist *sgl, *sg;
- u64 end_addr, next_addr;
- int i, cnt;
- unsigned int ret_len = 0;
+ struct scatterlist *sgl, *sg, *next_sg = NULL;
+ u64 start_addr, end_addr;
+ int i, ret_len, start_check = 0;
+
+ if (data->dma_nents == 1)
+ return 1;
sgl = (struct scatterlist *)data->buf;
+ start_addr = ib_sg_dma_address(ibdev, sgl);
- cnt = 0;
for_each_sg(sgl, sg, data->dma_nents, i) {
- /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
- "offset: %ld sz: %ld\n", i,
- (unsigned long)sg_phys(sg),
- (unsigned long)sg->offset,
- (unsigned long)sg->length); */
- end_addr = ib_sg_dma_address(ibdev, sg) +
- ib_sg_dma_len(ibdev, sg);
- /* iser_dbg("Checking sg iobuf end address "
- "0x%08lX\n", end_addr); */
- if (i + 1 < data->dma_nents) {
- next_addr = ib_sg_dma_address(ibdev, sg_next(sg));
- /* are i, i+1 fragments of the same page? */
- if (end_addr == next_addr) {
- cnt++;
- continue;
- } else if (!IS_4K_ALIGNED(end_addr)) {
- ret_len = cnt + 1;
- break;
- }
- }
- cnt++;
+ if (start_check && !IS_4K_ALIGNED(start_addr))
+ break;
+
+ next_sg = sg_next(sg);
+ if (!next_sg)
+ break;
+
+ end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
+ start_addr = ib_sg_dma_address(ibdev, next_sg);
+
+ if (end_addr == start_addr) {
+ start_check = 0;
+ continue;
+ } else
+ start_check = 1;
+
+ if (!IS_4K_ALIGNED(end_addr))
+ break;
}
- if (i == data->dma_nents)
- ret_len = cnt; /* loop ended */
+ ret_len = (next_sg) ? i : i+1;
iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n",
ret_len, data->dma_nents, data);
return ret_len;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 5c16001959c..ab060710688 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -296,9 +296,15 @@ static void input_handle_event(struct input_dev *dev,
* @value: value of the event
*
* This function should be used by drivers implementing various input
- * devices. See also input_inject_event().
+ * devices to report input events. See also input_inject_event().
+ *
+ * NOTE: input_event() may be safely used right after input device was
+ * allocated with input_allocate_device(), even before it is registered
+ * with input_register_device(), but the event will not reach any of the
+ * input handlers. Such early invocation of input_event() may be used
+ * to 'seed' initial state of a switch or initial position of absolute
+ * axis, etc.
*/
-
void input_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 203b88a82b5..02c836e1181 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -24,6 +24,16 @@ config KEYBOARD_AAED2000
To compile this driver as a module, choose M here: the
module will be called aaed2000_kbd.
+config KEYBOARD_ADP5520
+ tristate "Keypad Support for ADP5520 PMIC"
+ depends on PMIC_ADP5520
+ help
+ This option enables support for the keypad scan matrix
+ on Analog Devices ADP5520 PMICs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called adp5520-keys.
+
config KEYBOARD_ADP5588
tristate "ADP5588 I2C QWERTY Keypad and IO Expander"
depends on I2C
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 68c017235ce..78654ef6520 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -5,6 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
+obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o
obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
new file mode 100644
index 00000000000..a7ba27fb410
--- /dev/null
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -0,0 +1,220 @@
+/*
+ * Keypad driver for Analog Devices ADP5520 MFD PMICs
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/mfd/adp5520.h>
+
+struct adp5520_keys {
+ struct input_dev *input;
+ struct notifier_block notifier;
+ struct device *master;
+ unsigned short keycode[ADP5520_KEYMAPSIZE];
+};
+
+static void adp5520_keys_report_event(struct adp5520_keys *dev,
+ unsigned short keymask, int value)
+{
+ int i;
+
+ for (i = 0; i < ADP5520_MAXKEYS; i++)
+ if (keymask & (1 << i))
+ input_report_key(dev->input, dev->keycode[i], value);
+
+ input_sync(dev->input);
+}
+
+static int adp5520_keys_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct adp5520_keys *dev;
+ uint8_t reg_val_lo, reg_val_hi;
+ unsigned short keymask;
+
+ dev = container_of(nb, struct adp5520_keys, notifier);
+
+ if (event & ADP5520_KP_INT) {
+ adp5520_read(dev->master, ADP5520_KP_INT_STAT_1, &reg_val_lo);
+ adp5520_read(dev->master, ADP5520_KP_INT_STAT_2, &reg_val_hi);
+
+ keymask = (reg_val_hi << 8) | reg_val_lo;
+ /* Read twice to clear */
+ adp5520_read(dev->master, ADP5520_KP_INT_STAT_1, &reg_val_lo);
+ adp5520_read(dev->master, ADP5520_KP_INT_STAT_2, &reg_val_hi);
+ keymask |= (reg_val_hi << 8) | reg_val_lo;
+ adp5520_keys_report_event(dev, keymask, 1);
+ }
+
+ if (event & ADP5520_KR_INT) {
+ adp5520_read(dev->master, ADP5520_KR_INT_STAT_1, &reg_val_lo);
+ adp5520_read(dev->master, ADP5520_KR_INT_STAT_2, &reg_val_hi);
+
+ keymask = (reg_val_hi << 8) | reg_val_lo;
+ /* Read twice to clear */
+ adp5520_read(dev->master, ADP5520_KR_INT_STAT_1, &reg_val_lo);
+ adp5520_read(dev->master, ADP5520_KR_INT_STAT_2, &reg_val_hi);
+ keymask |= (reg_val_hi << 8) | reg_val_lo;
+ adp5520_keys_report_event(dev, keymask, 0);
+ }
+
+ return 0;
+}
+
+static int __devinit adp5520_keys_probe(struct platform_device *pdev)
+{
+ struct adp5520_keys_platform_data *pdata = pdev->dev.platform_data;
+ struct input_dev *input;
+ struct adp5520_keys *dev;
+ int ret, i;
+ unsigned char en_mask, ctl_mask = 0;
+
+ if (pdev->id != ID_ADP5520) {
+ dev_err(&pdev->dev, "only ADP5520 supports Keypad\n");
+ return -EINVAL;
+ }
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -EINVAL;
+ }
+
+ if (!(pdata->rows_en_mask && pdata->cols_en_mask))
+ return -EINVAL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ input = input_allocate_device();
+ if (!input) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev->master = pdev->dev.parent;
+ dev->input = input;
+
+ input->name = pdev->name;
+ input->phys = "adp5520-keys/input0";
+ input->dev.parent = &pdev->dev;
+
+ input_set_drvdata(input, dev);
+
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x5520;
+ input->id.version = 0x0001;
+
+ input->keycodesize = sizeof(dev->keycode[0]);
+ input->keycodemax = pdata->keymapsize;
+ input->keycode = dev->keycode;
+
+ memcpy(dev->keycode, pdata->keymap,
+ pdata->keymapsize * input->keycodesize);
+
+ /* setup input device */
+ __set_bit(EV_KEY, input->evbit);
+
+ if (pdata->repeat)
+ __set_bit(EV_REP, input->evbit);
+
+ for (i = 0; i < input->keycodemax; i++)
+ __set_bit(dev->keycode[i], input->keybit);
+ __clear_bit(KEY_RESERVED, input->keybit);
+
+ ret = input_register_device(input);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register input device\n");
+ goto err;
+ }
+
+ en_mask = pdata->rows_en_mask | pdata->cols_en_mask;
+
+ ret = adp5520_set_bits(dev->master, ADP5520_GPIO_CFG_1, en_mask);
+
+ if (en_mask & ADP5520_COL_C3)
+ ctl_mask |= ADP5520_C3_MODE;
+
+ if (en_mask & ADP5520_ROW_R3)
+ ctl_mask |= ADP5520_R3_MODE;
+
+ if (ctl_mask)
+ ret |= adp5520_set_bits(dev->master, ADP5520_LED_CONTROL,
+ ctl_mask);
+
+ ret |= adp5520_set_bits(dev->master, ADP5520_GPIO_PULLUP,
+ pdata->rows_en_mask);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write\n");
+ ret = -EIO;
+ goto err1;
+ }
+
+ dev->notifier.notifier_call = adp5520_keys_notifier;
+ ret = adp5520_register_notifier(dev->master, &dev->notifier,
+ ADP5520_KP_IEN | ADP5520_KR_IEN);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register notifier\n");
+ goto err1;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+
+err1:
+ input_unregister_device(input);
+ input = NULL;
+err:
+ input_free_device(input);
+ kfree(dev);
+ return ret;
+}
+
+static int __devexit adp5520_keys_remove(struct platform_device *pdev)
+{
+ struct adp5520_keys *dev = platform_get_drvdata(pdev);
+
+ adp5520_unregister_notifier(dev->master, &dev->notifier,
+ ADP5520_KP_IEN | ADP5520_KR_IEN);
+
+ input_unregister_device(dev->input);
+ kfree(dev);
+ return 0;
+}
+
+static struct platform_driver adp5520_keys_driver = {
+ .driver = {
+ .name = "adp5520-keys",
+ .owner = THIS_MODULE,
+ },
+ .probe = adp5520_keys_probe,
+ .remove = __devexit_p(adp5520_keys_remove),
+};
+
+static int __init adp5520_keys_init(void)
+{
+ return platform_driver_register(&adp5520_keys_driver);
+}
+module_init(adp5520_keys_init);
+
+static void __exit adp5520_keys_exit(void)
+{
+ platform_driver_unregister(&adp5520_keys_driver);
+}
+module_exit(adp5520_keys_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Keys ADP5520 Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:adp5520-keys");
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index d48c808d592..1edb596d927 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -319,7 +319,7 @@ static int adp5588_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops adp5588_dev_pm_ops = {
+static const struct dev_pm_ops adp5588_dev_pm_ops = {
.suspend = adp5588_suspend,
.resume = adp5588_resume,
};
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 181d30e3018..e45740429f7 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -22,11 +22,11 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/input.h>
#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/input/matrix_keypad.h>
#include <mach/hardware.h>
-#include <mach/gpio.h>
#include <mach/ep93xx_keypad.h>
/*
@@ -60,38 +60,37 @@
#define KEY_REG_KEY1_MASK (0x0000003f)
#define KEY_REG_KEY1_SHIFT (0)
-#define keypad_readl(off) __raw_readl(keypad->mmio_base + (off))
-#define keypad_writel(v, off) __raw_writel((v), keypad->mmio_base + (off))
-
-#define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS)
+#define EP93XX_MATRIX_SIZE (EP93XX_MATRIX_ROWS * EP93XX_MATRIX_COLS)
struct ep93xx_keypad {
struct ep93xx_keypad_platform_data *pdata;
-
- struct clk *clk;
struct input_dev *input_dev;
+ struct clk *clk;
+
void __iomem *mmio_base;
- int irq;
- int enabled;
+ unsigned int matrix_keycodes[EP93XX_MATRIX_SIZE];
int key1;
int key2;
- unsigned int matrix_keycodes[MAX_MATRIX_KEY_NUM];
+ int irq;
+
+ bool enabled;
};
static void ep93xx_keypad_build_keycode(struct ep93xx_keypad *keypad)
{
struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
struct input_dev *input_dev = keypad->input_dev;
+ unsigned int *key;
int i;
- for (i = 0; i < pdata->matrix_key_map_size; i++) {
- unsigned int key = pdata->matrix_key_map[i];
- int row = (key >> 28) & 0xf;
- int col = (key >> 24) & 0xf;
- int code = key & 0xffffff;
+ key = &pdata->matrix_key_map[0];
+ for (i = 0; i < pdata->matrix_key_map_size; i++, key++) {
+ int row = KEY_ROW(*key);
+ int col = KEY_COL(*key);
+ int code = KEY_VAL(*key);
keypad->matrix_keycodes[(row << 3) + col] = code;
__set_bit(code, input_dev->keybit);
@@ -102,9 +101,11 @@ static irqreturn_t ep93xx_keypad_irq_handler(int irq, void *dev_id)
{
struct ep93xx_keypad *keypad = dev_id;
struct input_dev *input_dev = keypad->input_dev;
- unsigned int status = keypad_readl(KEY_REG);
+ unsigned int status;
int keycode, key1, key2;
+ status = __raw_readl(keypad->mmio_base + KEY_REG);
+
keycode = (status & KEY_REG_KEY1_MASK) >> KEY_REG_KEY1_SHIFT;
key1 = keypad->matrix_keycodes[keycode];
@@ -152,7 +153,10 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
unsigned int val = 0;
- clk_set_rate(keypad->clk, pdata->flags & EP93XX_KEYPAD_KDIV);
+ if (pdata->flags & EP93XX_KEYPAD_KDIV)
+ clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV4);
+ else
+ clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV16);
if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
val |= KEY_INIT_DIS3KY;
@@ -167,7 +171,7 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
val |= ((pdata->prescale << KEY_INIT_PRSCL_SHIFT) & KEY_INIT_PRSCL_MASK);
- keypad_writel(val, KEY_INIT);
+ __raw_writel(val, keypad->mmio_base + KEY_INIT);
}
static int ep93xx_keypad_open(struct input_dev *pdev)
@@ -177,7 +181,7 @@ static int ep93xx_keypad_open(struct input_dev *pdev)
if (!keypad->enabled) {
ep93xx_keypad_config(keypad);
clk_enable(keypad->clk);
- keypad->enabled = 1;
+ keypad->enabled = true;
}
return 0;
@@ -189,7 +193,7 @@ static void ep93xx_keypad_close(struct input_dev *pdev)
if (keypad->enabled) {
clk_disable(keypad->clk);
- keypad->enabled = 0;
+ keypad->enabled = false;
}
}
@@ -211,7 +215,7 @@ static int ep93xx_keypad_suspend(struct platform_device *pdev,
if (keypad->enabled) {
clk_disable(keypad->clk);
- keypad->enabled = 0;
+ keypad->enabled = false;
}
mutex_unlock(&input_dev->mutex);
@@ -236,7 +240,7 @@ static int ep93xx_keypad_resume(struct platform_device *pdev)
if (!keypad->enabled) {
ep93xx_keypad_config(keypad);
clk_enable(keypad->clk);
- keypad->enabled = 1;
+ keypad->enabled = true;
}
}
@@ -252,88 +256,56 @@ static int ep93xx_keypad_resume(struct platform_device *pdev)
static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad;
- struct ep93xx_keypad_platform_data *pdata = pdev->dev.platform_data;
struct input_dev *input_dev;
struct resource *res;
- int irq, err, i, gpio;
-
- if (!pdata ||
- !pdata->matrix_key_rows ||
- pdata->matrix_key_rows > MAX_MATRIX_KEY_ROWS ||
- !pdata->matrix_key_cols ||
- pdata->matrix_key_cols > MAX_MATRIX_KEY_COLS) {
- dev_err(&pdev->dev, "invalid or missing platform data\n");
- return -EINVAL;
- }
+ int err;
keypad = kzalloc(sizeof(struct ep93xx_keypad), GFP_KERNEL);
- if (!keypad) {
- dev_err(&pdev->dev, "failed to allocate driver data\n");
+ if (!keypad)
return -ENOMEM;
- }
- keypad->pdata = pdata;
+ keypad->pdata = pdev->dev.platform_data;
+ if (!keypad->pdata) {
+ err = -EINVAL;
+ goto failed_free;
+ }
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "failed to get keypad irq\n");
+ keypad->irq = platform_get_irq(pdev, 0);
+ if (!keypad->irq) {
err = -ENXIO;
goto failed_free;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
err = -ENXIO;
goto failed_free;
}
res = request_mem_region(res->start, resource_size(res), pdev->name);
if (!res) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
err = -EBUSY;
goto failed_free;
}
keypad->mmio_base = ioremap(res->start, resource_size(res));
if (keypad->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
err = -ENXIO;
goto failed_free_mem;
}
- /* Request the needed GPIO's */
- gpio = EP93XX_GPIO_LINE_ROW0;
- for (i = 0; i < keypad->pdata->matrix_key_rows; i++, gpio++) {
- err = gpio_request(gpio, pdev->name);
- if (err) {
- dev_err(&pdev->dev, "failed to request gpio-%d\n",
- gpio);
- goto failed_free_rows;
- }
- }
-
- gpio = EP93XX_GPIO_LINE_COL0;
- for (i = 0; i < keypad->pdata->matrix_key_cols; i++, gpio++) {
- err = gpio_request(gpio, pdev->name);
- if (err) {
- dev_err(&pdev->dev, "failed to request gpio-%d\n",
- gpio);
- goto failed_free_cols;
- }
- }
+ err = ep93xx_keypad_acquire_gpio(pdev);
+ if (err)
+ goto failed_free_io;
- keypad->clk = clk_get(&pdev->dev, "key_clk");
+ keypad->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(keypad->clk)) {
- dev_err(&pdev->dev, "failed to get keypad clock\n");
err = PTR_ERR(keypad->clk);
- goto failed_free_io;
+ goto failed_free_gpio;
}
- /* Create and register the input driver */
input_dev = input_allocate_device();
if (!input_dev) {
- dev_err(&pdev->dev, "failed to allocate input device\n");
err = -ENOMEM;
goto failed_put_clk;
}
@@ -358,44 +330,29 @@ static int __devinit ep93xx_keypad_probe(struct platform_device *pdev)
ep93xx_keypad_build_keycode(keypad);
platform_set_drvdata(pdev, keypad);
- err = request_irq(irq, ep93xx_keypad_irq_handler, IRQF_DISABLED,
- pdev->name, keypad);
- if (err) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
+ err = request_irq(keypad->irq, ep93xx_keypad_irq_handler,
+ IRQF_DISABLED, pdev->name, keypad);
+ if (err)
goto failed_free_dev;
- }
-
- keypad->irq = irq;
- /* Register the input device */
err = input_register_device(input_dev);
- if (err) {
- dev_err(&pdev->dev, "failed to register input device\n");
+ if (err)
goto failed_free_irq;
- }
device_init_wakeup(&pdev->dev, 1);
return 0;
failed_free_irq:
- free_irq(irq, pdev);
+ free_irq(keypad->irq, pdev);
platform_set_drvdata(pdev, NULL);
failed_free_dev:
input_free_device(input_dev);
failed_put_clk:
clk_put(keypad->clk);
+failed_free_gpio:
+ ep93xx_keypad_release_gpio(pdev);
failed_free_io:
- i = keypad->pdata->matrix_key_cols - 1;
- gpio = EP93XX_GPIO_LINE_COL0 + i;
-failed_free_cols:
- for ( ; i >= 0; i--, gpio--)
- gpio_free(gpio);
- i = keypad->pdata->matrix_key_rows - 1;
- gpio = EP93XX_GPIO_LINE_ROW0 + i;
-failed_free_rows:
- for ( ; i >= 0; i--, gpio--)
- gpio_free(gpio);
iounmap(keypad->mmio_base);
failed_free_mem:
release_mem_region(res->start, resource_size(res));
@@ -408,7 +365,6 @@ static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
{
struct ep93xx_keypad *keypad = platform_get_drvdata(pdev);
struct resource *res;
- int i, gpio;
free_irq(keypad->irq, pdev);
@@ -420,15 +376,7 @@ static int __devexit ep93xx_keypad_remove(struct platform_device *pdev)
input_unregister_device(keypad->input_dev);
- i = keypad->pdata->matrix_key_cols - 1;
- gpio = EP93XX_GPIO_LINE_COL0 + i;
- for ( ; i >= 0; i--, gpio--)
- gpio_free(gpio);
-
- i = keypad->pdata->matrix_key_rows - 1;
- gpio = EP93XX_GPIO_LINE_ROW0 + i;
- for ( ; i >= 0; i--, gpio--)
- gpio_free(gpio);
+ ep93xx_keypad_release_gpio(pdev);
iounmap(keypad->mmio_base);
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 076111fc72d..8e9380bfed4 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -295,7 +295,7 @@ static int sh_keysc_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_keysc_dev_pm_ops = {
+static const struct dev_pm_ops sh_keysc_dev_pm_ops = {
.suspend = sh_keysc_suspend,
.resume = sh_keysc_resume,
};
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 9a2977c2169..eeaa7acb9cf 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -31,7 +31,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
@@ -133,7 +133,7 @@ struct twl4030_keypad {
static int twl4030_kpread(struct twl4030_keypad *kp,
u8 *data, u32 reg, u8 num_bytes)
{
- int ret = twl4030_i2c_read(TWL4030_MODULE_KEYPAD, data, reg, num_bytes);
+ int ret = twl_i2c_read(TWL4030_MODULE_KEYPAD, data, reg, num_bytes);
if (ret < 0)
dev_warn(kp->dbg_dev,
@@ -145,7 +145,7 @@ static int twl4030_kpread(struct twl4030_keypad *kp,
static int twl4030_kpwrite_u8(struct twl4030_keypad *kp, u8 data, u32 reg)
{
- int ret = twl4030_i2c_write_u8(TWL4030_MODULE_KEYPAD, data, reg);
+ int ret = twl_i2c_write_u8(TWL4030_MODULE_KEYPAD, data, reg);
if (ret < 0)
dev_warn(kp->dbg_dev,
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index 690f3fafa03..61d10177fa8 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -247,7 +247,7 @@ static int bfin_rotary_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops bfin_rotary_pm_ops = {
+static const struct dev_pm_ops bfin_rotary_pm_ops = {
.suspend = bfin_rotary_suspend,
.resume = bfin_rotary_resume,
};
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 039dcb00ebd..008de0c5834 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -55,7 +55,6 @@ pcf50633_input_irq(int irq, void *data)
static int __devinit pcf50633_input_probe(struct platform_device *pdev)
{
struct pcf50633_input *input;
- struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
struct input_dev *input_dev;
int ret;
@@ -71,7 +70,7 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, input);
- input->pcf = pdata->pcf;
+ input->pcf = dev_to_pcf50633(pdev->dev.parent);
input->input_dev = input_dev;
input_dev->name = "PCF50633 PMU events";
@@ -85,9 +84,9 @@ static int __devinit pcf50633_input_probe(struct platform_device *pdev)
kfree(input);
return ret;
}
- pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ONKEYR,
+ pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYR,
pcf50633_input_irq, input);
- pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ONKEYF,
+ pcf50633_register_irq(input->pcf, PCF50633_IRQ_ONKEYF,
pcf50633_input_irq, input);
return 0;
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 21cb755a54f..ea4e1fd1265 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -127,7 +127,7 @@ static void pcspkr_shutdown(struct platform_device *dev)
pcspkr_event(NULL, EV_SND, SND_BELL, 0);
}
-static struct dev_pm_ops pcspkr_pm_ops = {
+static const struct dev_pm_ops pcspkr_pm_ops = {
.suspend = pcspkr_suspend,
};
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index f5fc9974a11..bdde5c88903 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -27,7 +27,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#define PWR_PWRON_IRQ (1 << 0)
@@ -49,7 +49,7 @@ static irqreturn_t powerbutton_irq(int irq, void *_pwr)
local_irq_enable();
#endif
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &value,
STS_HW_CONDITIONS);
if (!err) {
input_report_key(pwr, KEY_POWER, value & PWR_PWRON_IRQ);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index a3f492a5085..f93c2c0daf1 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -5,6 +5,7 @@
* Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com>
* Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru>
* Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net>
*
* ALPS detection, tap switching and status querying info is taken from
* tpconfig utility (by C. Scott Ananian and Bruce Kall).
@@ -28,7 +29,6 @@
#define dbg(format, arg...) do {} while (0)
#endif
-
#define ALPS_OLDPROTO 0x01 /* old style input */
#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -37,7 +37,8 @@
#define ALPS_FW_BK_1 0x10 /* front & back buttons present */
#define ALPS_FW_BK_2 0x20 /* front & back buttons present */
#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
-
+#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
+ 6-byte ALPS packet */
static const struct alps_model_info alps_model_data[] = {
{ { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
@@ -58,7 +59,9 @@ static const struct alps_model_info alps_model_data[] = {
{ { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
{ { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
{ { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude E6500 */
+ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
+ { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
{ { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
};
@@ -69,20 +72,88 @@ static const struct alps_model_info alps_model_data[] = {
*/
/*
- * ALPS abolute Mode - new format
+ * PS/2 packet format
+ *
+ * byte 0: 0 0 YSGN XSGN 1 M R L
+ * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
+ * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+ *
+ * Note that the device never signals overflow condition.
+ *
+ * ALPS absolute Mode - new format
*
* byte 0: 1 ? ? ? 1 ? ? ?
* byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 ? fin ges
+ * byte 2: 0 x10 x9 x8 x7 ? fin ges
* byte 3: 0 y9 y8 y7 1 M R L
* byte 4: 0 y6 y5 y4 y3 y2 y1 y0
* byte 5: 0 z6 z5 z4 z3 z2 z1 z0
*
+ * Dualpoint device -- interleaved packet format
+ *
+ * byte 0: 1 1 0 0 1 1 1 1
+ * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+ * byte 2: 0 x10 x9 x8 x7 0 fin ges
+ * byte 3: 0 0 YSGN XSGN 1 1 1 1
+ * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
+ * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+ * byte 6: 0 y9 y8 y7 1 m r l
+ * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
+ * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
+ *
+ * CAPITALS = stick, miniscules = touchpad
+ *
* ?'s can have different meanings on different models,
* such as wheel rotation, extra buttons, stick buttons
* on a dualpoint, etc.
*/
+static bool alps_is_valid_first_byte(const struct alps_model_info *model,
+ unsigned char data)
+{
+ return (data & model->mask0) == model->byte0;
+}
+
+static void alps_report_buttons(struct psmouse *psmouse,
+ struct input_dev *dev1, struct input_dev *dev2,
+ int left, int right, int middle)
+{
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+
+ if (model->flags & ALPS_PS2_INTERLEAVED) {
+ struct input_dev *dev;
+
+ /*
+ * If shared button has already been reported on the
+ * other device (dev2) then this event should be also
+ * sent through that device.
+ */
+ dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
+ input_report_key(dev, BTN_LEFT, left);
+
+ dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
+ input_report_key(dev, BTN_RIGHT, right);
+
+ dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
+ input_report_key(dev, BTN_MIDDLE, middle);
+
+ /*
+ * Sync the _other_ device now, we'll do the first
+ * device later once we report the rest of the events.
+ */
+ input_sync(dev2);
+ } else {
+ /*
+ * For devices with non-interleaved packets we know what
+ * device buttons belong to so we can simply report them.
+ */
+ input_report_key(dev1, BTN_LEFT, left);
+ input_report_key(dev1, BTN_RIGHT, right);
+ input_report_key(dev1, BTN_MIDDLE, middle);
+ }
+}
+
static void alps_process_packet(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
@@ -93,18 +164,6 @@ static void alps_process_packet(struct psmouse *psmouse)
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
- if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */
- input_report_key(dev2, BTN_LEFT, packet[0] & 1);
- input_report_key(dev2, BTN_RIGHT, packet[0] & 2);
- input_report_key(dev2, BTN_MIDDLE, packet[0] & 4);
- input_report_rel(dev2, REL_X,
- packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
- input_report_rel(dev2, REL_Y,
- packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
- input_sync(dev2);
- return;
- }
-
if (model->flags & ALPS_OLDPROTO) {
left = packet[2] & 0x10;
right = packet[2] & 0x08;
@@ -140,18 +199,13 @@ static void alps_process_packet(struct psmouse *psmouse)
input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x));
input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
- input_report_key(dev2, BTN_LEFT, left);
- input_report_key(dev2, BTN_RIGHT, right);
- input_report_key(dev2, BTN_MIDDLE, middle);
+ alps_report_buttons(psmouse, dev2, dev, left, right, middle);
- input_sync(dev);
input_sync(dev2);
return;
}
- input_report_key(dev, BTN_LEFT, left);
- input_report_key(dev, BTN_RIGHT, right);
- input_report_key(dev, BTN_MIDDLE, middle);
+ alps_report_buttons(psmouse, dev, dev2, left, right, middle);
/* Convert hardware tap to a reasonable Z value */
if (ges && !fin)
@@ -202,25 +256,168 @@ static void alps_process_packet(struct psmouse *psmouse)
input_sync(dev);
}
+static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+ unsigned char packet[],
+ bool report_buttons)
+{
+ struct alps_data *priv = psmouse->private;
+ struct input_dev *dev2 = priv->dev2;
+
+ if (report_buttons)
+ alps_report_buttons(psmouse, dev2, psmouse->dev,
+ packet[0] & 1, packet[0] & 2, packet[0] & 4);
+
+ input_report_rel(dev2, REL_X,
+ packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
+ input_report_rel(dev2, REL_Y,
+ packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
+
+ input_sync(dev2);
+}
+
+static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+
+ if (psmouse->pktcnt < 6)
+ return PSMOUSE_GOOD_DATA;
+
+ if (psmouse->pktcnt == 6) {
+ /*
+ * Start a timer to flush the packet if it ends up last
+ * 6-byte packet in the stream. Timer needs to fire
+ * psmouse core times out itself. 20 ms should be enough
+ * to decide if we are getting more data or not.
+ */
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20));
+ return PSMOUSE_GOOD_DATA;
+ }
+
+ del_timer(&priv->timer);
+
+ if (psmouse->packet[6] & 0x80) {
+
+ /*
+ * Highest bit is set - that means we either had
+ * complete ALPS packet and this is start of the
+ * next packet or we got garbage.
+ */
+
+ if (((psmouse->packet[3] |
+ psmouse->packet[4] |
+ psmouse->packet[5]) & 0x80) ||
+ (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
+ dbg("refusing packet %x %x %x %x "
+ "(suspected interleaved ps/2)\n",
+ psmouse->packet[3], psmouse->packet[4],
+ psmouse->packet[5], psmouse->packet[6]);
+ return PSMOUSE_BAD_DATA;
+ }
+
+ alps_process_packet(psmouse);
+
+ /* Continue with the next packet */
+ psmouse->packet[0] = psmouse->packet[6];
+ psmouse->pktcnt = 1;
+
+ } else {
+
+ /*
+ * High bit is 0 - that means that we indeed got a PS/2
+ * packet in the middle of ALPS packet.
+ *
+ * There is also possibility that we got 6-byte ALPS
+ * packet followed by 3-byte packet from trackpoint. We
+ * can not distinguish between these 2 scenarios but
+ * becase the latter is unlikely to happen in course of
+ * normal operation (user would need to press all
+ * buttons on the pad and start moving trackpoint
+ * without touching the pad surface) we assume former.
+ * Even if we are wrong the wost thing that would happen
+ * the cursor would jump but we should not get protocol
+ * desynchronization.
+ */
+
+ alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
+ false);
+
+ /*
+ * Continue with the standard ALPS protocol handling,
+ * but make sure we won't process it as an interleaved
+ * packet again, which may happen if all buttons are
+ * pressed. To avoid this let's reset the 4th bit which
+ * is normally 1.
+ */
+ psmouse->packet[3] = psmouse->packet[6] & 0xf7;
+ psmouse->pktcnt = 4;
+ }
+
+ return PSMOUSE_GOOD_DATA;
+}
+
+static void alps_flush_packet(unsigned long data)
+{
+ struct psmouse *psmouse = (struct psmouse *)data;
+
+ serio_pause_rx(psmouse->ps2dev.serio);
+
+ if (psmouse->pktcnt == 6) {
+
+ /*
+ * We did not any more data in reasonable amount of time.
+ * Validate the last 3 bytes and process as a standard
+ * ALPS packet.
+ */
+ if ((psmouse->packet[3] |
+ psmouse->packet[4] |
+ psmouse->packet[5]) & 0x80) {
+ dbg("refusing packet %x %x %x "
+ "(suspected interleaved ps/2)\n",
+ psmouse->packet[3], psmouse->packet[4],
+ psmouse->packet[5]);
+ } else {
+ alps_process_packet(psmouse);
+ }
+ psmouse->pktcnt = 0;
+ }
+
+ serio_continue_rx(psmouse->ps2dev.serio);
+}
+
static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
if (psmouse->pktcnt == 3) {
- alps_process_packet(psmouse);
+ alps_report_bare_ps2_packet(psmouse, psmouse->packet,
+ true);
return PSMOUSE_FULL_PACKET;
}
return PSMOUSE_GOOD_DATA;
}
- if ((psmouse->packet[0] & priv->i->mask0) != priv->i->byte0)
+ /* Check for PS/2 packet stuffed in the middle of ALPS packet. */
+
+ if ((model->flags & ALPS_PS2_INTERLEAVED) &&
+ psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
+ return alps_handle_interleaved_ps2(psmouse);
+ }
+
+ if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
+ dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
+ psmouse->packet[0], model->mask0, model->byte0);
return PSMOUSE_BAD_DATA;
+ }
/* Bytes 2 - 6 should have 0 in the highest bit */
if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
- (psmouse->packet[psmouse->pktcnt - 1] & 0x80))
+ (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
+ dbg("refusing packet[%i] = %x\n",
+ psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]);
return PSMOUSE_BAD_DATA;
+ }
if (psmouse->pktcnt == 6) {
alps_process_packet(psmouse);
@@ -459,6 +656,7 @@ static void alps_disconnect(struct psmouse *psmouse)
struct alps_data *priv = psmouse->private;
psmouse_reset(psmouse);
+ del_timer_sync(&priv->timer);
input_unregister_device(priv->dev2);
kfree(priv);
}
@@ -476,6 +674,8 @@ int alps_init(struct psmouse *psmouse)
goto init_fail;
priv->dev2 = dev2;
+ setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
+
psmouse->private = priv;
model = alps_get_model(psmouse, &version);
@@ -487,6 +687,17 @@ int alps_init(struct psmouse *psmouse)
if (alps_hw_init(psmouse))
goto init_fail;
+ /*
+ * Undo part of setup done for us by psmouse core since touchpad
+ * is not a relative device.
+ */
+ __clear_bit(EV_REL, dev1->evbit);
+ __clear_bit(REL_X, dev1->relbit);
+ __clear_bit(REL_Y, dev1->relbit);
+
+ /*
+ * Now set up our capabilities.
+ */
dev1->evbit[BIT_WORD(EV_KEY)] |= BIT_MASK(EV_KEY);
dev1->keybit[BIT_WORD(BTN_TOUCH)] |= BIT_MASK(BTN_TOUCH);
dev1->keybit[BIT_WORD(BTN_TOOL_FINGER)] |= BIT_MASK(BTN_TOOL_FINGER);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index bc87936fee1..904ed8b3c8b 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -23,6 +23,7 @@ struct alps_data {
char phys[32]; /* Phys */
const struct alps_model_info *i;/* Info */
int prev_fin; /* Finger bit from previous packet */
+ struct timer_list timer;
};
#ifdef CONFIG_MOUSE_PS2_ALPS
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index f479ea50919..320b7ca48bf 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -79,11 +79,11 @@ static void altera_ps2_close(struct serio *io)
/*
* Add one device to this driver.
*/
-static int altera_ps2_probe(struct platform_device *pdev)
+static int __devinit altera_ps2_probe(struct platform_device *pdev)
{
struct ps2if *ps2if;
struct serio *serio;
- int error;
+ int error, irq;
ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
@@ -108,11 +108,13 @@ static int altera_ps2_probe(struct platform_device *pdev)
goto err_free_mem;
}
- ps2if->irq = platform_get_irq(pdev, 0);
- if (ps2if->irq < 0) {
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
error = -ENXIO;
goto err_free_mem;
}
+ ps2if->irq = irq;
if (!request_mem_region(ps2if->iomem_res->start,
resource_size(ps2if->iomem_res), pdev->name)) {
@@ -155,7 +157,7 @@ static int altera_ps2_probe(struct platform_device *pdev)
/*
* Remove one device from this driver.
*/
-static int altera_ps2_remove(struct platform_device *pdev)
+static int __devexit altera_ps2_remove(struct platform_device *pdev)
{
struct ps2if *ps2if = platform_get_drvdata(pdev);
@@ -175,9 +177,10 @@ static int altera_ps2_remove(struct platform_device *pdev)
*/
static struct platform_driver altera_ps2_driver = {
.probe = altera_ps2_probe,
- .remove = altera_ps2_remove,
+ .remove = __devexit_p(altera_ps2_remove),
.driver = {
.name = DRV_NAME,
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 89b394183a7..92563a681d6 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -107,7 +107,7 @@ static void amba_kmi_close(struct serio *io)
clk_disable(kmi->clk);
}
-static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
+static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
{
struct amba_kmi_port *kmi;
struct serio *io;
@@ -134,7 +134,7 @@ static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
io->port_data = kmi;
io->dev.parent = &dev->dev;
- kmi->io = io;
+ kmi->io = io;
kmi->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!kmi->base) {
ret = -ENOMEM;
@@ -162,7 +162,7 @@ static int amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
return ret;
}
-static int amba_kmi_remove(struct amba_device *dev)
+static int __devexit amba_kmi_remove(struct amba_device *dev)
{
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
@@ -197,10 +197,11 @@ static struct amba_id amba_kmi_idtable[] = {
static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
+ .owner = THIS_MODULE,
},
.id_table = amba_kmi_idtable,
.probe = amba_kmi_probe,
- .remove = amba_kmi_remove,
+ .remove = __devexit_p(amba_kmi_remove),
.resume = amba_kmi_resume,
};
diff --git a/drivers/input/serio/at32psif.c b/drivers/input/serio/at32psif.c
index a6fb7a3dcc4..b54452a8c77 100644
--- a/drivers/input/serio/at32psif.c
+++ b/drivers/input/serio/at32psif.c
@@ -137,7 +137,7 @@ static int psif_write(struct serio *io, unsigned char val)
spin_lock_irqsave(&psif->lock, flags);
while (!(psif_readl(psif, SR) & PSIF_BIT(TXEMPTY)) && timeout--)
- msleep(10);
+ udelay(50);
if (timeout >= 0) {
psif_writel(psif, THR, val);
@@ -352,6 +352,7 @@ static struct platform_driver psif_driver = {
.remove = __exit_p(psif_remove),
.driver = {
.name = "atmel_psif",
+ .owner = THIS_MODULE,
},
.suspend = psif_suspend,
.resume = psif_resume,
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index bd0f92d9f40..06addfa7cc4 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -6,7 +6,7 @@
* Copyright (c) 2002 Thibaut Varene <varenet@parisc-linux.org>
*
* Pieces of code based on linux-2.4's hp_mouse.c & hp_keyb.c
- * Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca>
+ * Copyright (c) 1999 Alex deVries <alex@onefishtwo.ca>
* Copyright (c) 1999-2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (c) 2000 Xavier Debacker <debackex@esiee.fr>
* Copyright (c) 2000-2001 Thomas Marteau <marteaut@esiee.fr>
@@ -326,7 +326,7 @@ static void gscps2_close(struct serio *port)
* @return: success/error report
*/
-static int __init gscps2_probe(struct parisc_device *dev)
+static int __devinit gscps2_probe(struct parisc_device *dev)
{
struct gscps2port *ps2port;
struct serio *serio;
@@ -443,7 +443,7 @@ static struct parisc_driver parisc_ps2_driver = {
.name = "gsc_ps2",
.id_table = gscps2_device_tbl,
.probe = gscps2_probe,
- .remove = gscps2_remove,
+ .remove = __devexit_p(gscps2_remove),
};
static int __init gscps2_init(void)
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 7ba9f2b2c04..6cd03ebaf5f 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -993,10 +993,8 @@ int hil_mlc_unregister(hil_mlc *mlc)
static int __init hil_mlc_init(void)
{
- init_timer(&hil_mlcs_kicker);
- hil_mlcs_kicker.expires = jiffies + HZ;
- hil_mlcs_kicker.function = &hil_mlcs_timer;
- add_timer(&hil_mlcs_kicker);
+ setup_timer(&hil_mlcs_kicker, &hil_mlcs_timer, 0);
+ mod_timer(&hil_mlcs_kicker, jiffies + HZ);
tasklet_enable(&hil_mlcs_tasklet);
@@ -1005,7 +1003,7 @@ static int __init hil_mlc_init(void)
static void __exit hil_mlc_exit(void)
{
- del_timer(&hil_mlcs_kicker);
+ del_timer_sync(&hil_mlcs_kicker);
tasklet_disable(&hil_mlcs_tasklet);
tasklet_kill(&hil_mlcs_tasklet);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 7fbffe431bc..64b688daf48 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -158,6 +158,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Gigabyte M1022M netbook */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "M1022E"),
+ DMI_MATCH(DMI_BOARD_VERSION, "1.02"),
+ },
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 1df02d25aca..d84a36e545f 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -126,6 +126,8 @@ static unsigned char i8042_suppress_kbd_ack;
static struct platform_device *i8042_platform_device;
static irqreturn_t i8042_interrupt(int irq, void *dev_id);
+static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
+ struct serio *serio);
void i8042_lock_chip(void)
{
@@ -139,6 +141,48 @@ void i8042_unlock_chip(void)
}
EXPORT_SYMBOL(i8042_unlock_chip);
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&i8042_lock, flags);
+
+ if (i8042_platform_filter) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ i8042_platform_filter = filter;
+
+out:
+ spin_unlock_irqrestore(&i8042_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(i8042_install_filter);
+
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *port))
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&i8042_lock, flags);
+
+ if (i8042_platform_filter != filter) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ i8042_platform_filter = NULL;
+
+out:
+ spin_unlock_irqrestore(&i8042_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(i8042_remove_filter);
+
/*
* The i8042_wait_read() and i8042_wait_write functions wait for the i8042 to
* be ready for reading values from it / writing values to it.
@@ -369,6 +413,31 @@ static void i8042_stop(struct serio *serio)
}
/*
+ * i8042_filter() filters out unwanted bytes from the input data stream.
+ * It is called from i8042_interrupt and thus is running with interrupts
+ * off and i8042_lock held.
+ */
+static bool i8042_filter(unsigned char data, unsigned char str,
+ struct serio *serio)
+{
+ if (unlikely(i8042_suppress_kbd_ack)) {
+ if ((~str & I8042_STR_AUXDATA) &&
+ (data == 0xfa || data == 0xfe)) {
+ i8042_suppress_kbd_ack--;
+ dbg("Extra keyboard ACK - filtered out\n");
+ return true;
+ }
+ }
+
+ if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) {
+ dbg("Filtered out by platfrom filter\n");
+ return true;
+ }
+
+ return false;
+}
+
+/*
* i8042_interrupt() is the most important function in this driver -
* it handles the interrupts from the i8042, and sends incoming bytes
* to the upper layers.
@@ -377,13 +446,16 @@ static void i8042_stop(struct serio *serio)
static irqreturn_t i8042_interrupt(int irq, void *dev_id)
{
struct i8042_port *port;
+ struct serio *serio;
unsigned long flags;
unsigned char str, data;
unsigned int dfl;
unsigned int port_no;
+ bool filtered;
int ret = 1;
spin_lock_irqsave(&i8042_lock, flags);
+
str = i8042_read_status();
if (unlikely(~str & I8042_STR_OBF)) {
spin_unlock_irqrestore(&i8042_lock, flags);
@@ -391,8 +463,8 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
ret = 0;
goto out;
}
+
data = i8042_read_data();
- spin_unlock_irqrestore(&i8042_lock, flags);
if (i8042_mux_present && (str & I8042_STR_AUXDATA)) {
static unsigned long last_transmit;
@@ -441,21 +513,19 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
}
port = &i8042_ports[port_no];
+ serio = port->exists ? port->serio : NULL;
dbg("%02x <- i8042 (interrupt, %d, %d%s%s)",
data, port_no, irq,
dfl & SERIO_PARITY ? ", bad parity" : "",
dfl & SERIO_TIMEOUT ? ", timeout" : "");
- if (unlikely(i8042_suppress_kbd_ack))
- if (port_no == I8042_KBD_PORT_NO &&
- (data == 0xfa || data == 0xfe)) {
- i8042_suppress_kbd_ack--;
- goto out;
- }
+ filtered = i8042_filter(data, str, serio);
+
+ spin_unlock_irqrestore(&i8042_lock, flags);
- if (likely(port->exists))
- serio_interrupt(port->serio, data, dfl);
+ if (likely(port->exists && !filtered))
+ serio_interrupt(serio, data, dfl);
out:
return IRQ_RETVAL(ret);
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index f412c69478a..d55874e5d1c 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -180,8 +180,8 @@ static void __devinit ps2_clear_input(struct ps2if *ps2if)
}
}
-static inline unsigned int
-ps2_test_one(struct ps2if *ps2if, unsigned int mask)
+static unsigned int __devinit ps2_test_one(struct ps2if *ps2if,
+ unsigned int mask)
{
unsigned int val;
@@ -197,7 +197,7 @@ ps2_test_one(struct ps2if *ps2if, unsigned int mask)
* Test the keyboard interface. We basically check to make sure that
* we can drive each line to the keyboard independently of each other.
*/
-static int __init ps2_test(struct ps2if *ps2if)
+static int __devinit ps2_test(struct ps2if *ps2if)
{
unsigned int stat;
int ret = 0;
@@ -312,7 +312,7 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
/*
* Remove one device from this driver.
*/
-static int ps2_remove(struct sa1111_dev *dev)
+static int __devexit ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
@@ -335,7 +335,7 @@ static struct sa1111_driver ps2_driver = {
},
.devid = SA1111_DEVID_PS2,
.probe = ps2_probe,
- .remove = ps2_remove,
+ .remove = __devexit_p(ps2_remove),
};
static int __init ps2_init(void)
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 9114ae1c748..16310f368da 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -1,7 +1,7 @@
/*
* drivers/input/tablet/wacom.h
*
- * USB Wacom Graphire and Wacom Intuos tablet support
+ * USB Wacom tablet support
*
* Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@ucw.cz>
* Copyright (c) 2000 Andreas Bach Aaen <abach@stofanet.dk>
@@ -69,6 +69,9 @@
* v1.49 (pc) - Added support for USB Tablet PC (0x90, 0x93, and 0x9A)
* v1.50 (pc) - Fixed a TabletPC touch bug in 2.6.28
* v1.51 (pc) - Added support for Intuos4
+ * v1.52 (pc) - Query Wacom data upon system resume
+ * - add defines for features->type
+ * - add new devices (0x9F, 0xE2, and 0XE3)
*/
/*
@@ -89,9 +92,9 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.51"
+#define DRIVER_VERSION "v1.52"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
-#define DRIVER_DESC "USB Wacom Graphire and Wacom Intuos tablet driver"
+#define DRIVER_DESC "USB Wacom tablet driver"
#define DRIVER_LICENSE "GPL"
MODULE_AUTHOR(DRIVER_AUTHOR);
@@ -133,6 +136,8 @@ extern void input_dev_i4s(struct input_dev *input_dev, struct wacom_wac *wacom_w
extern void input_dev_i4(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
+extern void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
+extern void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern void input_dev_mo(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern void input_dev_bee(struct input_dev *input_dev, struct wacom_wac *wacom_wac);
extern __u16 wacom_le16_to_cpu(unsigned char *data);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index ea30c983a33..072f33b3b2b 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -1,7 +1,7 @@
/*
* drivers/input/tablet/wacom_sys.c
*
- * USB Wacom Graphire and Wacom Intuos tablet support - system specific code
+ * USB Wacom tablet support - system specific code
*/
/*
@@ -209,6 +209,7 @@ void input_dev_g(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
input_dev->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_LEFT) |
BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE);
input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
+ BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_STYLUS2);
input_set_abs_params(input_dev, ABS_DISTANCE, 0, wacom_wac->features->distance_max, 0, 0);
}
@@ -256,6 +257,7 @@ void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE) |
BIT_MASK(BTN_SIDE) | BIT_MASK(BTN_EXTRA);
input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER) |
+ BIT_MASK(BTN_TOOL_PEN) | BIT_MASK(BTN_STYLUS) |
BIT_MASK(BTN_TOOL_MOUSE) | BIT_MASK(BTN_TOOL_BRUSH) |
BIT_MASK(BTN_TOOL_PENCIL) | BIT_MASK(BTN_TOOL_AIRBRUSH) |
BIT_MASK(BTN_TOOL_LENS) | BIT_MASK(BTN_STYLUS2);
@@ -269,7 +271,8 @@ void input_dev_i(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
void input_dev_pl(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
{
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_STYLUS2);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
+ BIT_MASK(BTN_STYLUS) | BIT_MASK(BTN_STYLUS2);
}
void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
@@ -277,12 +280,32 @@ void input_dev_pt(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_RUBBER);
}
+void input_dev_tpc(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
+{
+ if (wacom_wac->features->device_type == BTN_TOOL_DOUBLETAP ||
+ wacom_wac->features->device_type == BTN_TOOL_TRIPLETAP) {
+ input_set_abs_params(input_dev, ABS_RX, 0, wacom_wac->features->x_phy, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, wacom_wac->features->y_phy, 0, 0);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_DOUBLETAP);
+ }
+}
+
+void input_dev_tpc2fg(struct input_dev *input_dev, struct wacom_wac *wacom_wac)
+{
+ if (wacom_wac->features->device_type == BTN_TOOL_TRIPLETAP) {
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_TRIPLETAP);
+ input_dev->evbit[0] |= BIT_MASK(EV_MSC);
+ input_dev->mscbit[0] |= BIT_MASK(MSC_SERIAL);
+ }
+}
+
static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
- struct wacom_wac *wacom_wac)
+ struct wacom_features *features)
{
struct usb_device *dev = interface_to_usbdev(intf);
- struct wacom_features *features = wacom_wac->features;
- char limit = 0, result = 0;
+ char limit = 0;
+ /* result has to be defined as int for some devices */
+ int result = 0;
int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
unsigned char *report;
@@ -328,13 +351,24 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
case HID_USAGE_X:
if (usage == WCM_DESKTOP) {
if (finger) {
- features->touch_x_max =
- features->touch_y_max =
- wacom_le16_to_cpu(&report[i + 3]);
+ features->device_type = BTN_TOOL_DOUBLETAP;
+ if (features->type == TABLETPC2FG) {
+ /* need to reset back */
+ features->pktlen = WACOM_PKGLEN_TPC2FG;
+ features->device_type = BTN_TOOL_TRIPLETAP;
+ }
features->x_max =
+ wacom_le16_to_cpu(&report[i + 3]);
+ features->x_phy =
wacom_le16_to_cpu(&report[i + 6]);
- i += 7;
+ features->unit = report[i + 9];
+ features->unitExpo = report[i + 11];
+ i += 12;
} else if (pen) {
+ /* penabled only accepts exact bytes of data */
+ if (features->type == TABLETPC2FG)
+ features->pktlen = WACOM_PKGLEN_PENABLED;
+ features->device_type = BTN_TOOL_PEN;
features->x_max =
wacom_le16_to_cpu(&report[i + 3]);
i += 4;
@@ -350,10 +384,35 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
break;
case HID_USAGE_Y:
- if (usage == WCM_DESKTOP)
- features->y_max =
- wacom_le16_to_cpu(&report[i + 3]);
- i += 4;
+ if (usage == WCM_DESKTOP) {
+ if (finger) {
+ features->device_type = BTN_TOOL_DOUBLETAP;
+ if (features->type == TABLETPC2FG) {
+ /* need to reset back */
+ features->pktlen = WACOM_PKGLEN_TPC2FG;
+ features->device_type = BTN_TOOL_TRIPLETAP;
+ features->y_max =
+ wacom_le16_to_cpu(&report[i + 3]);
+ features->y_phy =
+ wacom_le16_to_cpu(&report[i + 6]);
+ i += 7;
+ } else {
+ features->y_max =
+ features->x_max;
+ features->y_phy =
+ wacom_le16_to_cpu(&report[i + 3]);
+ i += 4;
+ }
+ } else if (pen) {
+ /* penabled only accepts exact bytes of data */
+ if (features->type == TABLETPC2FG)
+ features->pktlen = WACOM_PKGLEN_PENABLED;
+ features->device_type = BTN_TOOL_PEN;
+ features->y_max =
+ wacom_le16_to_cpu(&report[i + 3]);
+ i += 4;
+ }
+ }
break;
case HID_USAGE_FINGER:
@@ -376,7 +435,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
break;
case HID_COLLECTION:
- /* reset UsagePage ans Finger */
+ /* reset UsagePage and Finger */
finger = usage = 0;
break;
}
@@ -388,43 +447,92 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
return result;
}
-static int wacom_query_tablet_data(struct usb_interface *intf)
+static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features)
{
unsigned char *rep_data;
- int limit = 0;
- int error;
+ int limit = 0, report_id = 2;
+ int error = -ENOMEM;
rep_data = kmalloc(2, GFP_KERNEL);
if (!rep_data)
- return -ENOMEM;
-
- do {
- rep_data[0] = 2;
- rep_data[1] = 2;
- error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
- 2, rep_data, 2);
- if (error >= 0)
- error = usb_get_report(intf,
- WAC_HID_FEATURE_REPORT, 2,
- rep_data, 2);
- } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
+ return error;
+
+ /* ask to report tablet data if it is 2FGT or not a Tablet PC */
+ if (features->device_type == BTN_TOOL_TRIPLETAP) {
+ do {
+ rep_data[0] = 3;
+ rep_data[1] = 4;
+ report_id = 3;
+ error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
+ report_id, rep_data, 2);
+ if (error >= 0)
+ error = usb_get_report(intf,
+ WAC_HID_FEATURE_REPORT, report_id,
+ rep_data, 3);
+ } while ((error < 0 || rep_data[1] != 4) && limit++ < 5);
+ } else if (features->type != TABLETPC && features->type != TABLETPC2FG) {
+ do {
+ rep_data[0] = 2;
+ rep_data[1] = 2;
+ error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
+ report_id, rep_data, 2);
+ if (error >= 0)
+ error = usb_get_report(intf,
+ WAC_HID_FEATURE_REPORT, report_id,
+ rep_data, 2);
+ } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
+ }
kfree(rep_data);
return error < 0 ? error : 0;
}
+static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
+ struct wacom_features *features)
+{
+ int error = 0;
+ struct usb_host_interface *interface = intf->cur_altsetting;
+ struct hid_descriptor *hid_desc;
+
+ /* default device to penabled */
+ features->device_type = BTN_TOOL_PEN;
+
+ /* only Tablet PCs need to retrieve the info */
+ if ((features->type != TABLETPC) && (features->type != TABLETPC2FG))
+ goto out;
+
+ if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
+ if (usb_get_extra_descriptor(&interface->endpoint[0],
+ HID_DEVICET_REPORT, &hid_desc)) {
+ printk("wacom: can not retrieve extra class descriptor\n");
+ error = 1;
+ goto out;
+ }
+ }
+ error = wacom_parse_hid(intf, hid_desc, features);
+ if (error)
+ goto out;
+
+ /* touch device found but size is not defined. use default */
+ if (features->device_type == BTN_TOOL_DOUBLETAP && !features->x_max) {
+ features->x_max = 1023;
+ features->y_max = 1023;
+ }
+
+ out:
+ return error;
+}
+
static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
struct wacom *wacom;
struct wacom_wac *wacom_wac;
struct wacom_features *features;
struct input_dev *input_dev;
int error = -ENOMEM;
- struct hid_descriptor *hid_desc;
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
wacom_wac = kzalloc(sizeof(struct wacom_wac), GFP_KERNEL);
@@ -432,7 +540,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
if (!wacom || !input_dev || !wacom_wac)
goto fail1;
- wacom_wac->data = usb_buffer_alloc(dev, 10, GFP_KERNEL, &wacom->data_dma);
+ wacom_wac->data = usb_buffer_alloc(dev, WACOM_PKGLEN_MAX, GFP_KERNEL, &wacom->data_dma);
if (!wacom_wac->data)
goto fail1;
@@ -448,7 +556,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
wacom_wac->features = features = get_wacom_feature(id);
- BUG_ON(features->pktlen > 10);
+ BUG_ON(features->pktlen > WACOM_PKGLEN_MAX);
input_dev->name = wacom_wac->features->name;
wacom->wacom_wac = wacom_wac;
@@ -463,47 +571,24 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
endpoint = &intf->cur_altsetting->endpoint[0].desc;
- /* Initialize touch_x_max and touch_y_max in case it is not defined */
- if (wacom_wac->features->type == TABLETPC) {
- features->touch_x_max = 1023;
- features->touch_y_max = 1023;
- } else {
- features->touch_x_max = 0;
- features->touch_y_max = 0;
- }
-
- /* TabletPC need to retrieve the physical and logical maximum from report descriptor */
- if (wacom_wac->features->type == TABLETPC) {
- if (usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc)) {
- if (usb_get_extra_descriptor(&interface->endpoint[0],
- HID_DEVICET_REPORT, &hid_desc)) {
- printk("wacom: can not retrive extra class descriptor\n");
- goto fail2;
- }
- }
- error = wacom_parse_hid(intf, hid_desc, wacom_wac);
- if (error)
- goto fail2;
- }
+ /* Retrieve the physical and logical size for OEM devices */
+ error = wacom_retrieve_hid_descriptor(intf, features);
+ if (error)
+ goto fail2;
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_PEN) |
- BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS);
+ input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOUCH);
+
input_set_abs_params(input_dev, ABS_X, 0, features->x_max, 4, 0);
input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, 0, 0);
- if (features->type == TABLETPC) {
- input_dev->keybit[BIT_WORD(BTN_DIGI)] |= BIT_MASK(BTN_TOOL_DOUBLETAP);
- input_set_abs_params(input_dev, ABS_RX, 0, features->touch_x_max, 4, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, features->touch_y_max, 4, 0);
- }
input_dev->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
wacom_init_input_dev(input_dev, wacom_wac);
usb_fill_int_urb(wacom->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
- wacom_wac->data, wacom_wac->features->pktlen,
+ wacom_wac->data, features->pktlen,
wacom_sys_irq, wacom, endpoint->bInterval);
wacom->irq->transfer_dma = wacom->data_dma;
wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -512,18 +597,14 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
if (error)
goto fail3;
- /*
- * Ask the tablet to report tablet data if it is not a Tablet PC.
- * Note that if query fails it is not a hard failure.
- */
- if (wacom_wac->features->type != TABLETPC)
- wacom_query_tablet_data(intf);
+ /* Note that if query fails it is not a hard failure */
+ wacom_query_tablet_data(intf, features);
usb_set_intfdata(intf, wacom);
return 0;
fail3: usb_free_urb(wacom->irq);
- fail2: usb_buffer_free(dev, 10, wacom_wac->data, wacom->data_dma);
+ fail2: usb_buffer_free(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
fail1: input_free_device(input_dev);
kfree(wacom);
kfree(wacom_wac);
@@ -539,7 +620,7 @@ static void wacom_disconnect(struct usb_interface *intf)
usb_kill_urb(wacom->irq);
input_unregister_device(wacom->dev);
usb_free_urb(wacom->irq);
- usb_buffer_free(interface_to_usbdev(intf), 10,
+ usb_buffer_free(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
wacom->wacom_wac->data, wacom->data_dma);
kfree(wacom->wacom_wac);
kfree(wacom);
@@ -559,12 +640,16 @@ static int wacom_suspend(struct usb_interface *intf, pm_message_t message)
static int wacom_resume(struct usb_interface *intf)
{
struct wacom *wacom = usb_get_intfdata(intf);
+ struct wacom_features *features = wacom->wacom_wac->features;
int rv;
mutex_lock(&wacom->lock);
- if (wacom->open)
+ if (wacom->open) {
rv = usb_submit_urb(wacom->irq, GFP_NOIO);
- else
+ /* switch to wacom mode if needed */
+ if (!wacom_retrieve_hid_descriptor(intf, features))
+ wacom_query_tablet_data(intf, features);
+ } else
rv = 0;
mutex_unlock(&wacom->lock);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index c896d6a21b7..1056f149fe3 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1,7 +1,7 @@
/*
* drivers/input/tablet/wacom_wac.c
*
- * USB Wacom Graphire and Wacom Intuos tablet support - Wacom specific code
+ * USB Wacom tablet support - Wacom specific code
*
*/
@@ -58,16 +58,15 @@ static int wacom_pl_irq(struct wacom_wac *wacom, void *wcombo)
unsigned char *data = wacom->data;
int prox, pressure;
- if (data[0] != 2) {
+ if (data[0] != WACOM_REPORT_PENABLED) {
dbg("wacom_pl_irq: received unknown report #%d", data[0]);
return 0;
}
prox = data[1] & 0x40;
- wacom->id[0] = ERASER_DEVICE_ID;
if (prox) {
-
+ wacom->id[0] = ERASER_DEVICE_ID;
pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
if (wacom->features->pressure_max > 255)
pressure = (pressure << 1) | ((data[4] >> 6) & 1);
@@ -128,7 +127,7 @@ static int wacom_ptu_irq(struct wacom_wac *wacom, void *wcombo)
{
unsigned char *data = wacom->data;
- if (data[0] != 2) {
+ if (data[0] != WACOM_REPORT_PENABLED) {
printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]);
return 0;
}
@@ -155,14 +154,16 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
{
unsigned char *data = wacom->data;
int x, y, rw;
+ static int penData = 0;
- if (data[0] != 2) {
+ if (data[0] != WACOM_REPORT_PENABLED) {
dbg("wacom_graphire_irq: received unknown report #%d", data[0]);
return 0;
}
if (data[1] & 0x80) {
/* in prox and not a pad data */
+ penData = 1;
switch ((data[1] >> 5) & 3) {
@@ -232,7 +233,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
switch (wacom->features->type) {
case WACOM_G4:
if (data[7] & 0xf8) {
- wacom_input_sync(wcombo); /* sync last event */
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = PAD_DEVICE_ID;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
@@ -242,10 +247,15 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
} else if (wacom->id[1]) {
- wacom_input_sync(wcombo); /* sync last event */
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = 0;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x40));
wacom_report_key(wcombo, BTN_4, (data[7] & 0x80));
+ wacom_report_rel(wcombo, REL_WHEEL, 0);
wacom_report_key(wcombo, BTN_TOOL_FINGER, 0);
wacom_report_abs(wcombo, ABS_MISC, 0);
wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
@@ -253,7 +263,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
break;
case WACOM_MO:
if ((data[7] & 0xf8) || (data[8] & 0xff)) {
- wacom_input_sync(wcombo); /* sync last event */
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = PAD_DEVICE_ID;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -264,7 +278,11 @@ static int wacom_graphire_irq(struct wacom_wac *wacom, void *wcombo)
wacom_report_abs(wcombo, ABS_MISC, wacom->id[1]);
wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
} else if (wacom->id[1]) {
- wacom_input_sync(wcombo); /* sync last event */
+ if (penData) {
+ wacom_input_sync(wcombo); /* sync last event */
+ if (!wacom->id[0])
+ penData = 0;
+ }
wacom->id[1] = 0;
wacom_report_key(wcombo, BTN_0, (data[7] & 0x08));
wacom_report_key(wcombo, BTN_1, (data[7] & 0x20));
@@ -432,7 +450,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
unsigned int t;
int idx = 0, result;
- if (data[0] != 2 && data[0] != 5 && data[0] != 6 && data[0] != 12) {
+ if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_INTUOSREAD
+ && data[0] != WACOM_REPORT_INTUOSWRITE && data[0] != WACOM_REPORT_INTUOSPAD) {
dbg("wacom_intuos_irq: received unknown report #%d", data[0]);
return 0;
}
@@ -442,7 +461,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
idx = data[1] & 0x01;
/* pad packets. Works as a second tool and is always in prox */
- if (data[0] == 12) {
+ if (data[0] == WACOM_REPORT_INTUOSPAD) {
/* initiate the pad as a device */
if (wacom->tool[1] != BTN_TOOL_FINGER)
wacom->tool[1] = BTN_TOOL_FINGER;
@@ -608,95 +627,163 @@ static int wacom_intuos_irq(struct wacom_wac *wacom, void *wcombo)
return 1;
}
+
+static void wacom_tpc_finger_in(struct wacom_wac *wacom, void *wcombo, char *data, int idx)
+{
+ wacom_report_abs(wcombo, ABS_X,
+ (data[2 + idx * 2] & 0xff) | ((data[3 + idx * 2] & 0x7f) << 8));
+ wacom_report_abs(wcombo, ABS_Y,
+ (data[6 + idx * 2] & 0xff) | ((data[7 + idx * 2] & 0x7f) << 8));
+ wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
+ wacom_report_key(wcombo, wacom->tool[idx], 1);
+ if (idx)
+ wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ else
+ wacom_report_key(wcombo, BTN_TOUCH, 1);
+}
+
+static void wacom_tpc_touch_out(struct wacom_wac *wacom, void *wcombo, int idx)
+{
+ wacom_report_abs(wcombo, ABS_X, 0);
+ wacom_report_abs(wcombo, ABS_Y, 0);
+ wacom_report_abs(wcombo, ABS_MISC, 0);
+ wacom_report_key(wcombo, wacom->tool[idx], 0);
+ if (idx)
+ wacom_input_event(wcombo, EV_MSC, MSC_SERIAL, 0xf0);
+ else
+ wacom_report_key(wcombo, BTN_TOUCH, 0);
+ return;
+}
+
+static void wacom_tpc_touch_in(struct wacom_wac *wacom, void *wcombo)
+{
+ char *data = wacom->data;
+ struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
+ static int firstFinger = 0;
+ static int secondFinger = 0;
+
+ wacom->tool[0] = BTN_TOOL_DOUBLETAP;
+ wacom->id[0] = TOUCH_DEVICE_ID;
+ wacom->tool[1] = BTN_TOOL_TRIPLETAP;
+
+ if (urb->actual_length != WACOM_PKGLEN_TPC1FG) {
+ switch (data[0]) {
+ case WACOM_REPORT_TPC1FG:
+ wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
+ wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
+ wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
+ wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
+ wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
+ wacom_report_key(wcombo, wacom->tool[0], 1);
+ break;
+ case WACOM_REPORT_TPC2FG:
+ /* keep this byte to send proper out-prox event */
+ wacom->id[1] = data[1] & 0x03;
+
+ if (data[1] & 0x01) {
+ wacom_tpc_finger_in(wacom, wcombo, data, 0);
+ firstFinger = 1;
+ } else if (firstFinger) {
+ wacom_tpc_touch_out(wacom, wcombo, 0);
+ }
+
+ if (data[1] & 0x02) {
+ /* sync first finger data */
+ if (firstFinger)
+ wacom_input_sync(wcombo);
+
+ wacom_tpc_finger_in(wacom, wcombo, data, 1);
+ secondFinger = 1;
+ } else if (secondFinger) {
+ /* sync first finger data */
+ if (firstFinger)
+ wacom_input_sync(wcombo);
+
+ wacom_tpc_touch_out(wacom, wcombo, 1);
+ secondFinger = 0;
+ }
+ if (!(data[1] & 0x01))
+ firstFinger = 0;
+ break;
+ }
+ } else {
+ wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
+ wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
+ wacom_report_key(wcombo, BTN_TOUCH, 1);
+ wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
+ wacom_report_key(wcombo, wacom->tool[0], 1);
+ }
+ return;
+}
+
static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
{
char *data = wacom->data;
- int prox = 0, pressure;
+ int prox = 0, pressure, idx = -1;
static int stylusInProx, touchInProx = 1, touchOut;
struct urb *urb = ((struct wacom_combo *)wcombo)->urb;
dbg("wacom_tpc_irq: received report #%d", data[0]);
- if (urb->actual_length == 5 || data[0] == 6) { /* Touch data */
- if (urb->actual_length == 5) { /* with touch */
- prox = data[0] & 0x03;
+ if (urb->actual_length == WACOM_PKGLEN_TPC1FG || /* single touch */
+ data[0] == WACOM_REPORT_TPC1FG || /* single touch */
+ data[0] == WACOM_REPORT_TPC2FG) { /* 2FG touch */
+ if (urb->actual_length == WACOM_PKGLEN_TPC1FG) { /* with touch */
+ prox = data[0] & 0x01;
} else { /* with capacity */
- prox = data[1] & 0x03;
+ if (data[0] == WACOM_REPORT_TPC1FG)
+ /* single touch */
+ prox = data[1] & 0x01;
+ else
+ /* 2FG touch data */
+ prox = data[1] & 0x03;
}
if (!stylusInProx) { /* stylus not in prox */
if (prox) {
if (touchInProx) {
- wacom->tool[1] = BTN_TOOL_DOUBLETAP;
- wacom->id[0] = TOUCH_DEVICE_ID;
- if (urb->actual_length != 5) {
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[2]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[4]));
- wacom_report_abs(wcombo, ABS_PRESSURE, wacom_le16_to_cpu(&data[6]));
- wacom_report_key(wcombo, BTN_TOUCH, wacom_le16_to_cpu(&data[6]));
- } else {
- wacom_report_abs(wcombo, ABS_X, wacom_le16_to_cpu(&data[1]));
- wacom_report_abs(wcombo, ABS_Y, wacom_le16_to_cpu(&data[3]));
- wacom_report_key(wcombo, BTN_TOUCH, 1);
- }
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[1], prox & 0x01);
+ wacom_tpc_touch_in(wacom, wcombo);
touchOut = 1;
return 1;
}
} else {
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[1], prox & 0x01);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
+ /* 2FGT out-prox */
+ if (data[0] == WACOM_REPORT_TPC2FG) {
+ idx = (wacom->id[1] & 0x01) - 1;
+ if (idx == 0) {
+ wacom_tpc_touch_out(wacom, wcombo, idx);
+ /* sync first finger event */
+ if (wacom->id[1] & 0x02)
+ wacom_input_sync(wcombo);
+ }
+ idx = (wacom->id[1] & 0x02) - 1;
+ if (idx == 1)
+ wacom_tpc_touch_out(wacom, wcombo, idx);
+ } else /* one finger touch */
+ wacom_tpc_touch_out(wacom, wcombo, 0);
touchOut = 0;
touchInProx = 1;
return 1;
}
} else if (touchOut || !prox) { /* force touch out-prox */
- wacom_report_abs(wcombo, ABS_MISC, TOUCH_DEVICE_ID);
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_report_key(wcombo, BTN_TOUCH, 0);
+ wacom_tpc_touch_out(wacom, wcombo, 0);
touchOut = 0;
touchInProx = 1;
return 1;
}
- } else if (data[0] == 2) { /* Penabled */
+ } else if (data[0] == WACOM_REPORT_PENABLED) { /* Penabled */
prox = data[1] & 0x20;
touchInProx = 0;
- wacom->id[0] = ERASER_DEVICE_ID;
-
- /*
- * if going from out of proximity into proximity select between the eraser
- * and the pen based on the state of the stylus2 button, choose eraser if
- * pressed else choose pen. if not a proximity change from out to in, send
- * an out of proximity for previous tool then a in for new tool.
- */
if (prox) { /* in prox */
- if (!wacom->tool[0]) {
+ if (!wacom->id[0]) {
/* Going into proximity select tool */
- wacom->tool[1] = (data[1] & 0x08) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
- if (wacom->tool[1] == BTN_TOOL_PEN)
+ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+ if (wacom->tool[0] == BTN_TOOL_PEN)
wacom->id[0] = STYLUS_DEVICE_ID;
- } else if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[1] & 0x08)) {
- /*
- * was entered with stylus2 pressed
- * report out proximity for previous tool
- */
- wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
- wacom_report_key(wcombo, wacom->tool[1], 0);
- wacom_input_sync(wcombo);
-
- /* set new tool */
- wacom->tool[1] = BTN_TOOL_PEN;
- wacom->id[0] = STYLUS_DEVICE_ID;
- return 0;
- }
- if (wacom->tool[1] != BTN_TOOL_RUBBER) {
- /* Unknown tool selected default to pen tool */
- wacom->tool[1] = BTN_TOOL_PEN;
- wacom->id[0] = STYLUS_DEVICE_ID;
+ else
+ wacom->id[0] = ERASER_DEVICE_ID;
}
wacom_report_key(wcombo, BTN_STYLUS, data[1] & 0x02);
wacom_report_key(wcombo, BTN_STYLUS2, data[1] & 0x10);
@@ -706,17 +793,21 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, void *wcombo)
if (pressure < 0)
pressure = wacom->features->pressure_max + pressure + 1;
wacom_report_abs(wcombo, ABS_PRESSURE, pressure);
- wacom_report_key(wcombo, BTN_TOUCH, pressure);
+ wacom_report_key(wcombo, BTN_TOUCH, data[1] & 0x05);
} else {
+ wacom_report_abs(wcombo, ABS_X, 0);
+ wacom_report_abs(wcombo, ABS_Y, 0);
wacom_report_abs(wcombo, ABS_PRESSURE, 0);
wacom_report_key(wcombo, BTN_STYLUS, 0);
wacom_report_key(wcombo, BTN_STYLUS2, 0);
wacom_report_key(wcombo, BTN_TOUCH, 0);
+ wacom->id[0] = 0;
+ /* pen is out so touch can be enabled now */
+ touchInProx = 1;
}
- wacom_report_key(wcombo, wacom->tool[1], prox);
+ wacom_report_key(wcombo, wacom->tool[0], prox);
wacom_report_abs(wcombo, ABS_MISC, wacom->id[0]);
stylusInProx = prox;
- wacom->tool[0] = prox;
return 1;
}
return 0;
@@ -751,6 +842,7 @@ int wacom_wac_irq(struct wacom_wac *wacom_wac, void *wcombo)
return wacom_intuos_irq(wacom_wac, wcombo);
case TABLETPC:
+ case TABLETPC2FG:
return wacom_tpc_irq(wacom_wac, wcombo);
default:
@@ -791,9 +883,17 @@ void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_w
input_dev_i4s(input_dev, wacom_wac);
input_dev_i(input_dev, wacom_wac);
break;
+ case TABLETPC2FG:
+ input_dev_tpc2fg(input_dev, wacom_wac);
+ /* fall through */
+ case TABLETPC:
+ input_dev_tpc(input_dev, wacom_wac);
+ if (wacom_wac->features->device_type != BTN_TOOL_PEN)
+ break; /* no need to process stylus stuff */
+
+ /* fall through */
case PL:
case PTU:
- case TABLETPC:
input_dev_pl(input_dev, wacom_wac);
/* fall through */
case PENPARTNER:
@@ -804,66 +904,69 @@ void wacom_init_input_dev(struct input_dev *input_dev, struct wacom_wac *wacom_w
}
static struct wacom_features wacom_features[] = {
- { "Wacom Penpartner", 7, 5040, 3780, 255, 0, PENPARTNER },
- { "Wacom Graphire", 8, 10206, 7422, 511, 63, GRAPHIRE },
- { "Wacom Graphire2 4x5", 8, 10206, 7422, 511, 63, GRAPHIRE },
- { "Wacom Graphire2 5x7", 8, 13918, 10206, 511, 63, GRAPHIRE },
- { "Wacom Graphire3", 8, 10208, 7424, 511, 63, GRAPHIRE },
- { "Wacom Graphire3 6x8", 8, 16704, 12064, 511, 63, GRAPHIRE },
- { "Wacom Graphire4 4x5", 8, 10208, 7424, 511, 63, WACOM_G4 },
- { "Wacom Graphire4 6x8", 8, 16704, 12064, 511, 63, WACOM_G4 },
- { "Wacom BambooFun 4x5", 9, 14760, 9225, 511, 63, WACOM_MO },
- { "Wacom BambooFun 6x8", 9, 21648, 13530, 511, 63, WACOM_MO },
- { "Wacom Bamboo1 Medium",8, 16704, 12064, 511, 63, GRAPHIRE },
- { "Wacom Volito", 8, 5104, 3712, 511, 63, GRAPHIRE },
- { "Wacom PenStation2", 8, 3250, 2320, 255, 63, GRAPHIRE },
- { "Wacom Volito2 4x5", 8, 5104, 3712, 511, 63, GRAPHIRE },
- { "Wacom Volito2 2x3", 8, 3248, 2320, 511, 63, GRAPHIRE },
- { "Wacom PenPartner2", 8, 3250, 2320, 511, 63, GRAPHIRE },
- { "Wacom Bamboo", 9, 14760, 9225, 511, 63, WACOM_MO },
- { "Wacom Bamboo1", 8, 5104, 3712, 511, 63, GRAPHIRE },
- { "Wacom Intuos 4x5", 10, 12700, 10600, 1023, 31, INTUOS },
- { "Wacom Intuos 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
- { "Wacom Intuos 9x12", 10, 30480, 24060, 1023, 31, INTUOS },
- { "Wacom Intuos 12x12", 10, 30480, 31680, 1023, 31, INTUOS },
- { "Wacom Intuos 12x18", 10, 45720, 31680, 1023, 31, INTUOS },
- { "Wacom PL400", 8, 5408, 4056, 255, 0, PL },
- { "Wacom PL500", 8, 6144, 4608, 255, 0, PL },
- { "Wacom PL600", 8, 6126, 4604, 255, 0, PL },
- { "Wacom PL600SX", 8, 6260, 5016, 255, 0, PL },
- { "Wacom PL550", 8, 6144, 4608, 511, 0, PL },
- { "Wacom PL800", 8, 7220, 5780, 511, 0, PL },
- { "Wacom PL700", 8, 6758, 5406, 511, 0, PL },
- { "Wacom PL510", 8, 6282, 4762, 511, 0, PL },
- { "Wacom DTU710", 8, 34080, 27660, 511, 0, PL },
- { "Wacom DTF521", 8, 6282, 4762, 511, 0, PL },
- { "Wacom DTF720", 8, 6858, 5506, 511, 0, PL },
- { "Wacom DTF720a", 8, 6858, 5506, 511, 0, PL },
- { "Wacom Cintiq Partner",8, 20480, 15360, 511, 0, PTU },
- { "Wacom Intuos2 4x5", 10, 12700, 10600, 1023, 31, INTUOS },
- { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
- { "Wacom Intuos2 9x12", 10, 30480, 24060, 1023, 31, INTUOS },
- { "Wacom Intuos2 12x12", 10, 30480, 31680, 1023, 31, INTUOS },
- { "Wacom Intuos2 12x18", 10, 45720, 31680, 1023, 31, INTUOS },
- { "Wacom Intuos3 4x5", 10, 25400, 20320, 1023, 63, INTUOS3S },
- { "Wacom Intuos3 6x8", 10, 40640, 30480, 1023, 63, INTUOS3 },
- { "Wacom Intuos3 9x12", 10, 60960, 45720, 1023, 63, INTUOS3 },
- { "Wacom Intuos3 12x12", 10, 60960, 60960, 1023, 63, INTUOS3L },
- { "Wacom Intuos3 12x19", 10, 97536, 60960, 1023, 63, INTUOS3L },
- { "Wacom Intuos3 6x11", 10, 54204, 31750, 1023, 63, INTUOS3 },
- { "Wacom Intuos3 4x6", 10, 31496, 19685, 1023, 63, INTUOS3S },
- { "Wacom Intuos4 4x6", 10, 31496, 19685, 2047, 63, INTUOS4S },
- { "Wacom Intuos4 6x9", 10, 44704, 27940, 2047, 63, INTUOS4 },
- { "Wacom Intuos4 8x13", 10, 65024, 40640, 2047, 63, INTUOS4L },
- { "Wacom Intuos4 12x19", 10, 97536, 60960, 2047, 63, INTUOS4L },
- { "Wacom Cintiq 21UX", 10, 87200, 65600, 1023, 63, CINTIQ },
- { "Wacom Cintiq 20WSX", 10, 86680, 54180, 1023, 63, WACOM_BEE },
- { "Wacom Cintiq 12WX", 10, 53020, 33440, 1023, 63, WACOM_BEE },
- { "Wacom DTU1931", 8, 37832, 30305, 511, 0, PL },
- { "Wacom ISDv4 90", 8, 26202, 16325, 255, 0, TABLETPC },
- { "Wacom ISDv4 93", 8, 26202, 16325, 255, 0, TABLETPC },
- { "Wacom ISDv4 9A", 8, 26202, 16325, 255, 0, TABLETPC },
- { "Wacom Intuos2 6x8", 10, 20320, 16240, 1023, 31, INTUOS },
+ { "Wacom Penpartner", WACOM_PKGLEN_PENPRTN, 5040, 3780, 255, 0, PENPARTNER },
+ { "Wacom Graphire", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, 63, GRAPHIRE },
+ { "Wacom Graphire2 4x5", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, 63, GRAPHIRE },
+ { "Wacom Graphire2 5x7", WACOM_PKGLEN_GRAPHIRE, 13918, 10206, 511, 63, GRAPHIRE },
+ { "Wacom Graphire3", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, 63, GRAPHIRE },
+ { "Wacom Graphire3 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, GRAPHIRE },
+ { "Wacom Graphire4 4x5", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, 63, WACOM_G4 },
+ { "Wacom Graphire4 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, WACOM_G4 },
+ { "Wacom BambooFun 4x5", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, 63, WACOM_MO },
+ { "Wacom BambooFun 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 511, 63, WACOM_MO },
+ { "Wacom Bamboo1 Medium", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, GRAPHIRE },
+ { "Wacom Volito", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
+ { "Wacom PenStation2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 255, 63, GRAPHIRE },
+ { "Wacom Volito2 4x5", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
+ { "Wacom Volito2 2x3", WACOM_PKGLEN_GRAPHIRE, 3248, 2320, 511, 63, GRAPHIRE },
+ { "Wacom PenPartner2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 511, 63, GRAPHIRE },
+ { "Wacom Bamboo", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, 63, WACOM_MO },
+ { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE },
+ { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS },
+ { "Wacom Intuos 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
+ { "Wacom Intuos 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, 31, INTUOS },
+ { "Wacom Intuos 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, 31, INTUOS },
+ { "Wacom Intuos 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, 31, INTUOS },
+ { "Wacom PL400", WACOM_PKGLEN_GRAPHIRE, 5408, 4056, 255, 0, PL },
+ { "Wacom PL500", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 255, 0, PL },
+ { "Wacom PL600", WACOM_PKGLEN_GRAPHIRE, 6126, 4604, 255, 0, PL },
+ { "Wacom PL600SX", WACOM_PKGLEN_GRAPHIRE, 6260, 5016, 255, 0, PL },
+ { "Wacom PL550", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 511, 0, PL },
+ { "Wacom PL800", WACOM_PKGLEN_GRAPHIRE, 7220, 5780, 511, 0, PL },
+ { "Wacom PL700", WACOM_PKGLEN_GRAPHIRE, 6758, 5406, 511, 0, PL },
+ { "Wacom PL510", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, 0, PL },
+ { "Wacom DTU710", WACOM_PKGLEN_GRAPHIRE, 34080, 27660, 511, 0, PL },
+ { "Wacom DTF521", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, 0, PL },
+ { "Wacom DTF720", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, 0, PL },
+ { "Wacom DTF720a", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, 0, PL },
+ { "Wacom Cintiq Partner", WACOM_PKGLEN_GRAPHIRE, 20480, 15360, 511, 0, PTU },
+ { "Wacom Intuos2 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS },
+ { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
+ { "Wacom Intuos2 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, 31, INTUOS },
+ { "Wacom Intuos2 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, 31, INTUOS },
+ { "Wacom Intuos2 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, 31, INTUOS },
+ { "Wacom Intuos3 4x5", WACOM_PKGLEN_INTUOS, 25400, 20320, 1023, 63, INTUOS3S },
+ { "Wacom Intuos3 6x8", WACOM_PKGLEN_INTUOS, 40640, 30480, 1023, 63, INTUOS3 },
+ { "Wacom Intuos3 9x12", WACOM_PKGLEN_INTUOS, 60960, 45720, 1023, 63, INTUOS3 },
+ { "Wacom Intuos3 12x12", WACOM_PKGLEN_INTUOS, 60960, 60960, 1023, 63, INTUOS3L },
+ { "Wacom Intuos3 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 1023, 63, INTUOS3L },
+ { "Wacom Intuos3 6x11", WACOM_PKGLEN_INTUOS, 54204, 31750, 1023, 63, INTUOS3 },
+ { "Wacom Intuos3 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 1023, 63, INTUOS3S },
+ { "Wacom Intuos4 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, 63, INTUOS4S },
+ { "Wacom Intuos4 6x9", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS4 },
+ { "Wacom Intuos4 8x13", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, 63, INTUOS4L },
+ { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, 63, INTUOS4L },
+ { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, 63, CINTIQ },
+ { "Wacom Cintiq 20WSX", WACOM_PKGLEN_INTUOS, 86680, 54180, 1023, 63, WACOM_BEE },
+ { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 63, WACOM_BEE },
+ { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 0, PL },
+ { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
+ { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
+ { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC },
+ { "Wacom ISDv4 9F", WACOM_PKGLEN_PENABLED, 26202, 16325, 255, 0, TABLETPC },
+ { "Wacom ISDv4 E2", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG },
+ { "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG },
+ { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS },
{ }
};
@@ -927,6 +1030,9 @@ static struct usb_device_id wacom_ids[] = {
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x90) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x93) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x9A) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0x9F) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xE2) },
+ { USB_DEVICE(USB_VENDOR_ID_WACOM, 0xE3) },
{ USB_DEVICE(USB_VENDOR_ID_WACOM, 0x47) },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index c10235aba7e..ee01e190278 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -9,12 +9,33 @@
#ifndef WACOM_WAC_H
#define WACOM_WAC_H
+/* maximum packet length for USB devices */
+#define WACOM_PKGLEN_MAX 32
+
+/* packet length for individual models */
+#define WACOM_PKGLEN_PENPRTN 7
+#define WACOM_PKGLEN_GRAPHIRE 8
+#define WACOM_PKGLEN_BBFUN 9
+#define WACOM_PKGLEN_INTUOS 10
+#define WACOM_PKGLEN_PENABLED 8
+#define WACOM_PKGLEN_TPC1FG 5
+#define WACOM_PKGLEN_TPC2FG 14
+
+/* device IDs */
#define STYLUS_DEVICE_ID 0x02
#define TOUCH_DEVICE_ID 0x03
#define CURSOR_DEVICE_ID 0x06
#define ERASER_DEVICE_ID 0x0A
#define PAD_DEVICE_ID 0x0F
+/* wacom data packet report IDs */
+#define WACOM_REPORT_PENABLED 2
+#define WACOM_REPORT_INTUOSREAD 5
+#define WACOM_REPORT_INTUOSWRITE 6
+#define WACOM_REPORT_INTUOSPAD 12
+#define WACOM_REPORT_TPC1FG 6
+#define WACOM_REPORT_TPC2FG 13
+
enum {
PENPARTNER = 0,
GRAPHIRE,
@@ -32,6 +53,7 @@ enum {
WACOM_BEE,
WACOM_MO,
TABLETPC,
+ TABLETPC2FG,
MAX_TYPE
};
@@ -43,8 +65,11 @@ struct wacom_features {
int pressure_max;
int distance_max;
int type;
- int touch_x_max;
- int touch_y_max;
+ int device_type;
+ int x_phy;
+ int y_phy;
+ unsigned char unit;
+ unsigned char unitExpo;
};
struct wacom_wac {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 32fc8ba039a..dfafc76da4f 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -450,6 +450,18 @@ config TOUCHSCREEN_USB_COMPOSITE
To compile this driver as a module, choose M here: the
module will be called usbtouchscreen.
+config TOUCHSCREEN_MC13783
+ tristate "Freescale MC13783 touchscreen input driver"
+ depends on MFD_MC13783
+ help
+ Say Y here if you have an Freescale MC13783 PMIC on your
+ board and want to use its touchscreen
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mc13783_ts.
+
config TOUCHSCREEN_USB_EGALAX
default y
bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f1f59c9e121..d61a3b4def9 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
+obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
new file mode 100644
index 00000000000..be115b3b65e
--- /dev/null
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -0,0 +1,258 @@
+/*
+ * Driver for the Freescale Semiconductor MC13783 touchscreen.
+ *
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2009 Sascha Hauer, Pengutronix
+ *
+ * Initial development of this code was funded by
+ * Phytec Messtechnik GmbH, http://www.phytec.de/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/platform_device.h>
+#include <linux/mfd/mc13783.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#define MC13783_TS_NAME "mc13783-ts"
+
+#define DEFAULT_SAMPLE_TOLERANCE 300
+
+static unsigned int sample_tolerance = DEFAULT_SAMPLE_TOLERANCE;
+module_param(sample_tolerance, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sample_tolerance,
+ "If the minimal and maximal value read out for one axis (out "
+ "of three) differ by this value (default: "
+ __stringify(DEFAULT_SAMPLE_TOLERANCE) ") or more, the reading "
+ "is supposed to be wrong and is discarded. Set to 0 to "
+ "disable this check.");
+
+struct mc13783_ts_priv {
+ struct input_dev *idev;
+ struct mc13783 *mc13783;
+ struct delayed_work work;
+ struct workqueue_struct *workq;
+ unsigned int sample[4];
+};
+
+static irqreturn_t mc13783_ts_handler(int irq, void *data)
+{
+ struct mc13783_ts_priv *priv = data;
+
+ mc13783_ackirq(priv->mc13783, irq);
+
+ /*
+ * Kick off reading coordinates. Note that if work happens already
+ * be queued for future execution (it rearms itself) it will not
+ * be rescheduled for immediate execution here. However the rearm
+ * delay is HZ / 50 which is acceptable.
+ */
+ queue_delayed_work(priv->workq, &priv->work, 0);
+
+ return IRQ_HANDLED;
+}
+
+#define sort3(a0, a1, a2) ({ \
+ if (a0 > a1) \
+ swap(a0, a1); \
+ if (a1 > a2) \
+ swap(a1, a2); \
+ if (a0 > a1) \
+ swap(a0, a1); \
+ })
+
+static void mc13783_ts_report_sample(struct mc13783_ts_priv *priv)
+{
+ struct input_dev *idev = priv->idev;
+ int x0, x1, x2, y0, y1, y2;
+ int cr0, cr1;
+
+ /*
+ * the values are 10-bit wide only, but the two least significant
+ * bits are for future 12 bit use and reading yields 0
+ */
+ x0 = priv->sample[0] & 0xfff;
+ x1 = priv->sample[1] & 0xfff;
+ x2 = priv->sample[2] & 0xfff;
+ y0 = priv->sample[3] & 0xfff;
+ y1 = (priv->sample[0] >> 12) & 0xfff;
+ y2 = (priv->sample[1] >> 12) & 0xfff;
+ cr0 = (priv->sample[2] >> 12) & 0xfff;
+ cr1 = (priv->sample[3] >> 12) & 0xfff;
+
+ dev_dbg(&idev->dev,
+ "x: (% 4d,% 4d,% 4d) y: (% 4d, % 4d,% 4d) cr: (% 4d, % 4d)\n",
+ x0, x1, x2, y0, y1, y2, cr0, cr1);
+
+ sort3(x0, x1, x2);
+ sort3(y0, y1, y2);
+
+ cr0 = (cr0 + cr1) / 2;
+
+ if (!cr0 || !sample_tolerance ||
+ (x2 - x0 < sample_tolerance &&
+ y2 - y0 < sample_tolerance)) {
+ /* report the median coordinate and average pressure */
+ if (cr0) {
+ input_report_abs(idev, ABS_X, x1);
+ input_report_abs(idev, ABS_Y, y1);
+
+ dev_dbg(&idev->dev, "report (%d, %d, %d)\n",
+ x1, y1, 0x1000 - cr0);
+ queue_delayed_work(priv->workq, &priv->work, HZ / 50);
+ } else
+ dev_dbg(&idev->dev, "report release\n");
+
+ input_report_abs(idev, ABS_PRESSURE,
+ cr0 ? 0x1000 - cr0 : cr0);
+ input_report_key(idev, BTN_TOUCH, cr0);
+ input_sync(idev);
+ } else
+ dev_dbg(&idev->dev, "discard event\n");
+}
+
+static void mc13783_ts_work(struct work_struct *work)
+{
+ struct mc13783_ts_priv *priv =
+ container_of(work, struct mc13783_ts_priv, work.work);
+ unsigned int mode = MC13783_ADC_MODE_TS;
+ unsigned int channel = 12;
+
+ if (mc13783_adc_do_conversion(priv->mc13783,
+ mode, channel, priv->sample) == 0)
+ mc13783_ts_report_sample(priv);
+}
+
+static int mc13783_ts_open(struct input_dev *dev)
+{
+ struct mc13783_ts_priv *priv = input_get_drvdata(dev);
+ int ret;
+
+ mc13783_lock(priv->mc13783);
+
+ mc13783_ackirq(priv->mc13783, MC13783_IRQ_TS);
+
+ ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_TS,
+ mc13783_ts_handler, MC13783_TS_NAME, priv);
+ if (ret)
+ goto out;
+
+ ret = mc13783_reg_rmw(priv->mc13783, MC13783_ADC0,
+ MC13783_ADC0_TSMOD_MASK, MC13783_ADC0_TSMOD0);
+ if (ret)
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_TS, priv);
+out:
+ mc13783_unlock(priv->mc13783);
+ return ret;
+}
+
+static void mc13783_ts_close(struct input_dev *dev)
+{
+ struct mc13783_ts_priv *priv = input_get_drvdata(dev);
+
+ mc13783_lock(priv->mc13783);
+ mc13783_reg_rmw(priv->mc13783, MC13783_ADC0,
+ MC13783_ADC0_TSMOD_MASK, 0);
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_TS, priv);
+ mc13783_unlock(priv->mc13783);
+
+ cancel_delayed_work_sync(&priv->work);
+}
+
+static int __init mc13783_ts_probe(struct platform_device *pdev)
+{
+ struct mc13783_ts_priv *priv;
+ struct input_dev *idev;
+ int ret = -ENOMEM;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ idev = input_allocate_device();
+ if (!priv || !idev)
+ goto err_free_mem;
+
+ INIT_DELAYED_WORK(&priv->work, mc13783_ts_work);
+ priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
+ priv->idev = idev;
+
+ /*
+ * We need separate workqueue because mc13783_adc_do_conversion
+ * uses keventd and thus would deadlock.
+ */
+ priv->workq = create_singlethread_workqueue("mc13783_ts");
+ if (!priv->workq)
+ goto err_free_mem;
+
+ idev->name = MC13783_TS_NAME;
+ idev->dev.parent = &pdev->dev;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(idev, ABS_X, 0, 0xfff, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, 0xfff, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0, 0xfff, 0, 0);
+
+ idev->open = mc13783_ts_open;
+ idev->close = mc13783_ts_close;
+
+ input_set_drvdata(idev, priv);
+
+ ret = input_register_device(priv->idev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "register input device failed with %d\n", ret);
+ goto err_destroy_wq;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(priv->workq);
+err_free_mem:
+ input_free_device(idev);
+ kfree(priv);
+ return ret;
+}
+
+static int __devexit mc13783_ts_remove(struct platform_device *pdev)
+{
+ struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ destroy_workqueue(priv->workq);
+ input_unregister_device(priv->idev);
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_driver mc13783_ts_driver = {
+ .remove = __devexit_p(mc13783_ts_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = MC13783_TS_NAME,
+ },
+};
+
+static int __init mc13783_ts_init(void)
+{
+ return platform_driver_probe(&mc13783_ts_driver, &mc13783_ts_probe);
+}
+module_init(mc13783_ts_init);
+
+static void __exit mc13783_ts_exit(void)
+{
+ platform_driver_unregister(&mc13783_ts_driver);
+}
+module_exit(mc13783_ts_exit);
+
+MODULE_DESCRIPTION("MC13783 input touchscreen driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" MC13783_TS_NAME);
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index 67fcd33595d..b79097e3028 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -233,7 +233,7 @@ static int pcap_ts_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops pcap_ts_pm_ops = {
+static const struct dev_pm_ops pcap_ts_pm_ops = {
.suspend = pcap_ts_suspend,
.resume = pcap_ts_resume,
};
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index b115726dc08..c721c0a23eb 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -21,7 +21,10 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/input.h>
+
#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/interface/io/fbif.h>
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 82ed1cd14ff..664b0c519c3 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -29,7 +29,7 @@
#endif
/* Module parameters */
-int gigaset_debuglevel = DEBUG_DEFAULT;
+int gigaset_debuglevel;
EXPORT_SYMBOL_GPL(gigaset_debuglevel);
module_param_named(debug, gigaset_debuglevel, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug, "debug level");
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index 5a6ae646a63..94b796d8405 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -108,8 +108,7 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
p_dev->io.NumPorts2 = 0;
/* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index f9bdff39cf4..e5deb15cf40 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -120,8 +120,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
p_dev->io.IOAddrLines = 5;
/* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
/* General socket configuration */
p_dev->conf.Attributes = CONF_ENABLE_IRQ;
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index a2f709f5397..c9a30b1c923 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -137,7 +137,7 @@ static int elsa_cs_probe(struct pcmcia_device *link)
local->cardnr = -1;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.Handler = NULL;
/*
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index af5d393cc2d..7836ec3c7f8 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -144,7 +144,7 @@ static int sedlbauer_probe(struct pcmcia_device *link)
link->priv = local;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.Handler = NULL;
/*
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index ea705394ce2..b0c5976cbdb 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -127,7 +127,7 @@ static int teles_probe(struct pcmcia_device *link)
link->priv = local;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.Handler = NULL;
/*
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 7e5f30dbc0a..f1e8af54dff 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -661,7 +661,7 @@ l1oip_socket_thread(void *data)
size_t recvbuf_size = 1500;
int recvlen;
struct socket *socket = NULL;
- DECLARE_COMPLETION(wait);
+ DECLARE_COMPLETION_ONSTACK(wait);
/* allocate buffer memory */
recvbuf = kmalloc(recvbuf_size, GFP_KERNEL);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e4f599f20e3..8a0e1ec95e4 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -229,6 +229,12 @@ config LEDS_PWM
help
This option enables support for pwm driven LEDs
+config LEDS_REGULATOR
+ tristate "REGULATOR driven LED support"
+ depends on LEDS_CLASS && REGULATOR
+ help
+ This option enables support for regulator driven LEDs.
+
config LEDS_BD2802
tristate "LED driver for BD2802 RGB LED"
depends on LEDS_CLASS && I2C
@@ -236,6 +242,33 @@ config LEDS_BD2802
This option enables support for BD2802GU RGB LED driver chips
accessed via the I2C bus.
+config LEDS_INTEL_SS4200
+ tristate "LED driver for Intel NAS SS4200 series"
+ depends on LEDS_CLASS && PCI && DMI
+ help
+ This option enables support for the Intel SS4200 series of
+ Network Attached Storage servers. You may control the hard
+ drive or power LEDs on the front panel. Using this driver
+ can stop the front LED from blinking after startup.
+
+config LEDS_LT3593
+ tristate "LED driver for LT3593 controllers"
+ depends on LEDS_CLASS && GENERIC_GPIO
+ help
+ This option enables support for LEDs driven by a Linear Technology
+ LT3593 controller. This controller uses a special one-wire pulse
+ coding protocol to set the brightness.
+
+config LEDS_ADP5520
+ tristate "LED Support for ADP5520/ADP5501 PMIC"
+ depends on LEDS_CLASS && PMIC_ADP5520
+ help
+ This option enables support for on-chip LED drivers found
+ on Analog Devices ADP5520/ADP5501 PMICs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called leds-adp5520.
+
comment "LED Triggers"
config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 46d72704d60..9e63869d7c0 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -29,6 +29,10 @@ obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
+obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
+obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
+obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
+obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index f2cc13d7681..782f95822ea 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -50,7 +50,7 @@ static ssize_t led_brightness_store(struct device *dev,
unsigned long state = simple_strtoul(buf, &after, 10);
size_t count = after - buf;
- if (*after && isspace(*after))
+ if (isspace(*after))
count++;
if (count == size) {
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
new file mode 100644
index 00000000000..a8f31590213
--- /dev/null
+++ b/drivers/leds/leds-adp5520.c
@@ -0,0 +1,230 @@
+/*
+ * LEDs driver for Analog Devices ADP5520/ADP5501 MFD PMICs
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Loosely derived from leds-da903x:
+ * Copyright (C) 2008 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Eric Miao <eric.miao@marvell.com>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/adp5520.h>
+
+struct adp5520_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct device *master;
+ enum led_brightness new_brightness;
+ int id;
+ int flags;
+};
+
+static void adp5520_led_work(struct work_struct *work)
+{
+ struct adp5520_led *led = container_of(work, struct adp5520_led, work);
+ adp5520_write(led->master, ADP5520_LED1_CURRENT + led->id - 1,
+ led->new_brightness >> 2);
+}
+
+static void adp5520_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct adp5520_led *led;
+
+ led = container_of(led_cdev, struct adp5520_led, cdev);
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static int adp5520_led_setup(struct adp5520_led *led)
+{
+ struct device *dev = led->master;
+ int flags = led->flags;
+ int ret = 0;
+
+ switch (led->id) {
+ case FLAG_ID_ADP5520_LED1_ADP5501_LED0:
+ ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
+ (flags >> ADP5520_FLAG_OFFT_SHIFT) &
+ ADP5520_FLAG_OFFT_MASK);
+ ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_LED1_EN);
+ break;
+ case FLAG_ID_ADP5520_LED2_ADP5501_LED1:
+ ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
+ ((flags >> ADP5520_FLAG_OFFT_SHIFT) &
+ ADP5520_FLAG_OFFT_MASK) << 2);
+ ret |= adp5520_clr_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_R3_MODE);
+ ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_LED2_EN);
+ break;
+ case FLAG_ID_ADP5520_LED3_ADP5501_LED2:
+ ret |= adp5520_set_bits(dev, ADP5520_LED_TIME,
+ ((flags >> ADP5520_FLAG_OFFT_SHIFT) &
+ ADP5520_FLAG_OFFT_MASK) << 4);
+ ret |= adp5520_clr_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_C3_MODE);
+ ret |= adp5520_set_bits(dev, ADP5520_LED_CONTROL,
+ ADP5520_LED3_EN);
+ break;
+ }
+
+ return ret;
+}
+
+static int __devinit adp5520_led_prepare(struct platform_device *pdev)
+{
+ struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = pdev->dev.parent;
+ int ret = 0;
+
+ ret |= adp5520_write(dev, ADP5520_LED1_CURRENT, 0);
+ ret |= adp5520_write(dev, ADP5520_LED2_CURRENT, 0);
+ ret |= adp5520_write(dev, ADP5520_LED3_CURRENT, 0);
+ ret |= adp5520_write(dev, ADP5520_LED_TIME, pdata->led_on_time << 6);
+ ret |= adp5520_write(dev, ADP5520_LED_FADE, FADE_VAL(pdata->fade_in,
+ pdata->fade_out));
+
+ return ret;
+}
+
+static int __devinit adp5520_led_probe(struct platform_device *pdev)
+{
+ struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_led *led, *led_dat;
+ struct led_info *cur_led;
+ int ret, i;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ if (pdata->num_leds > ADP5520_01_MAXLEDS) {
+ dev_err(&pdev->dev, "can't handle more than %d LEDS\n",
+ ADP5520_01_MAXLEDS);
+ return -EFAULT;
+ }
+
+ led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ ret = adp5520_led_prepare(pdev);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write\n");
+ goto err_free;
+ }
+
+ for (i = 0; i < pdata->num_leds; ++i) {
+ cur_led = &pdata->leds[i];
+ led_dat = &led[i];
+
+ led_dat->cdev.name = cur_led->name;
+ led_dat->cdev.default_trigger = cur_led->default_trigger;
+ led_dat->cdev.brightness_set = adp5520_led_set;
+ led_dat->cdev.brightness = LED_OFF;
+
+ if (cur_led->flags & ADP5520_FLAG_LED_MASK)
+ led_dat->flags = cur_led->flags;
+ else
+ led_dat->flags = i + 1;
+
+ led_dat->id = led_dat->flags & ADP5520_FLAG_LED_MASK;
+
+ led_dat->master = pdev->dev.parent;
+ led_dat->new_brightness = LED_OFF;
+
+ INIT_WORK(&led_dat->work, adp5520_led_work);
+
+ ret = led_classdev_register(led_dat->master, &led_dat->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register LED %d\n",
+ led_dat->id);
+ goto err;
+ }
+
+ ret = adp5520_led_setup(led_dat);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write\n");
+ i++;
+ goto err;
+ }
+ }
+
+ platform_set_drvdata(pdev, led);
+ return 0;
+
+err:
+ if (i > 0) {
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+ }
+
+err_free:
+ kfree(led);
+ return ret;
+}
+
+static int __devexit adp5520_led_remove(struct platform_device *pdev)
+{
+ struct adp5520_leds_platform_data *pdata = pdev->dev.platform_data;
+ struct adp5520_led *led;
+ int i;
+
+ led = platform_get_drvdata(pdev);
+
+ adp5520_clr_bits(led->master, ADP5520_LED_CONTROL,
+ ADP5520_LED1_EN | ADP5520_LED2_EN | ADP5520_LED3_EN);
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ kfree(led);
+ return 0;
+}
+
+static struct platform_driver adp5520_led_driver = {
+ .driver = {
+ .name = "adp5520-led",
+ .owner = THIS_MODULE,
+ },
+ .probe = adp5520_led_probe,
+ .remove = __devexit_p(adp5520_led_remove),
+};
+
+static int __init adp5520_led_init(void)
+{
+ return platform_driver_register(&adp5520_led_driver);
+}
+module_init(adp5520_led_init);
+
+static void __exit adp5520_led_exit(void)
+{
+ platform_driver_unregister(&adp5520_led_driver);
+}
+module_exit(adp5520_led_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("LEDS ADP5520(01) Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:adp5520-led");
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index 731d4eef342..f59ffadf512 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -11,11 +11,24 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/pci.h>
static int force = 0;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
+#define MSR_LBAR_GPIO 0x5140000C
+#define CS5535_GPIO_SIZE 256
+
+static u32 gpio_base;
+
+static struct pci_device_id divil_pci[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+ { } /* NULL entry */
+};
+MODULE_DEVICE_TABLE(pci, divil_pci);
+
struct alix_led {
struct led_classdev cdev;
unsigned short port;
@@ -30,9 +43,9 @@ static void alix_led_set(struct led_classdev *led_cdev,
container_of(led_cdev, struct alix_led, cdev);
if (brightness)
- outl(led_dev->on_value, led_dev->port);
+ outl(led_dev->on_value, gpio_base + led_dev->port);
else
- outl(led_dev->off_value, led_dev->port);
+ outl(led_dev->off_value, gpio_base + led_dev->port);
}
static struct alix_led alix_leds[] = {
@@ -41,7 +54,7 @@ static struct alix_led alix_leds[] = {
.name = "alix:1",
.brightness_set = alix_led_set,
},
- .port = 0x6100,
+ .port = 0x00,
.on_value = 1 << 22,
.off_value = 1 << 6,
},
@@ -50,7 +63,7 @@ static struct alix_led alix_leds[] = {
.name = "alix:2",
.brightness_set = alix_led_set,
},
- .port = 0x6180,
+ .port = 0x80,
.on_value = 1 << 25,
.off_value = 1 << 9,
},
@@ -59,7 +72,7 @@ static struct alix_led alix_leds[] = {
.name = "alix:3",
.brightness_set = alix_led_set,
},
- .port = 0x6180,
+ .port = 0x80,
.on_value = 1 << 27,
.off_value = 1 << 11,
},
@@ -101,64 +114,104 @@ static struct platform_driver alix_led_driver = {
},
};
-static int __init alix_present(void)
+static int __init alix_present(unsigned long bios_phys,
+ const char *alix_sig,
+ size_t alix_sig_len)
{
- const unsigned long bios_phys = 0x000f0000;
const size_t bios_len = 0x00010000;
- const char alix_sig[] = "PC Engines ALIX.";
- const size_t alix_sig_len = sizeof(alix_sig) - 1;
-
const char *bios_virt;
const char *scan_end;
const char *p;
- int ret = 0;
+ char name[64];
if (force) {
printk(KERN_NOTICE "%s: forced to skip BIOS test, "
"assume system has ALIX.2 style LEDs\n",
KBUILD_MODNAME);
- ret = 1;
- goto out;
+ return 1;
}
bios_virt = phys_to_virt(bios_phys);
scan_end = bios_virt + bios_len - (alix_sig_len + 2);
for (p = bios_virt; p < scan_end; p++) {
const char *tail;
+ char *a;
- if (memcmp(p, alix_sig, alix_sig_len) != 0) {
+ if (memcmp(p, alix_sig, alix_sig_len) != 0)
continue;
- }
+
+ memcpy(name, p, sizeof(name));
+
+ /* remove the first \0 character from string */
+ a = strchr(name, '\0');
+ if (a)
+ *a = ' ';
+
+ /* cut the string at a newline */
+ a = strchr(name, '\r');
+ if (a)
+ *a = '\0';
tail = p + alix_sig_len;
- if ((tail[0] == '2' || tail[0] == '3') && tail[1] == '\0') {
+ if ((tail[0] == '2' || tail[0] == '3')) {
printk(KERN_INFO
"%s: system is recognized as \"%s\"\n",
- KBUILD_MODNAME, p);
- ret = 1;
- break;
+ KBUILD_MODNAME, name);
+ return 1;
}
}
-out:
- return ret;
+ return 0;
}
static struct platform_device *pdev;
-static int __init alix_led_init(void)
+static int __init alix_pci_led_init(void)
{
- int ret;
+ u32 low, hi;
- if (!alix_present()) {
- ret = -ENODEV;
- goto out;
+ if (pci_dev_present(divil_pci) == 0) {
+ printk(KERN_WARNING KBUILD_MODNAME": DIVIL not found\n");
+ return -ENODEV;
}
- /* enable output on GPIO for LED 1,2,3 */
- outl(1 << 6, 0x6104);
- outl(1 << 9, 0x6184);
- outl(1 << 11, 0x6184);
+ /* Grab the GPIO I/O range */
+ rdmsr(MSR_LBAR_GPIO, low, hi);
+
+ /* Check the mask and whether GPIO is enabled (sanity check) */
+ if (hi != 0x0000f001) {
+ printk(KERN_WARNING KBUILD_MODNAME": GPIO not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Mask off the IO base address */
+ gpio_base = low & 0x0000ff00;
+
+ if (!request_region(gpio_base, CS5535_GPIO_SIZE, KBUILD_MODNAME)) {
+ printk(KERN_ERR KBUILD_MODNAME": can't allocate I/O for GPIO\n");
+ return -ENODEV;
+ }
+
+ /* Set GPIO function to output */
+ outl(1 << 6, gpio_base + 0x04);
+ outl(1 << 9, gpio_base + 0x84);
+ outl(1 << 11, gpio_base + 0x84);
+
+ return 0;
+}
+
+static int __init alix_led_init(void)
+{
+ int ret = -ENODEV;
+ const char tinybios_sig[] = "PC Engines ALIX.";
+ const char coreboot_sig[] = "PC Engines\0ALIX.";
+
+ if (alix_present(0xf0000, tinybios_sig, sizeof(tinybios_sig) - 1) ||
+ alix_present(0x500, coreboot_sig, sizeof(coreboot_sig) - 1))
+ ret = alix_pci_led_init();
+
+ if (ret < 0)
+ return ret;
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (!IS_ERR(pdev)) {
@@ -168,7 +221,6 @@ static int __init alix_led_init(void)
} else
ret = PTR_ERR(pdev);
-out:
return ret;
}
@@ -176,6 +228,7 @@ static void __exit alix_led_exit(void)
{
platform_device_unregister(pdev);
platform_driver_unregister(&alix_led_driver);
+ release_region(gpio_base, CS5535_GPIO_SIZE);
}
module_init(alix_led_init);
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 8816806accd..da5fb016b1a 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -31,7 +31,7 @@ static struct led_classdev qube_front_led = {
.name = "qube::front",
.brightness = LED_FULL,
.brightness_set = qube_front_led_set,
- .default_trigger = "ide-disk",
+ .default_trigger = "default-on",
};
static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
@@ -43,7 +43,7 @@ static int __devinit cobalt_qube_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, res->end - res->start + 1);
+ led_port = ioremap(res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index defc212105f..438d4838463 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -84,7 +84,7 @@ static int __devinit cobalt_raq_led_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- led_port = ioremap(res->start, res->end - res->start + 1);
+ led_port = ioremap(res->start, resource_size(res));
if (!led_port)
return -ENOMEM;
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index 5f7c9c5c09b..d11d05be0de 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -22,6 +22,13 @@
#include <mach/hardware.h>
#include <asm/io.h>
+#define FSG_LED_WLAN_BIT 0
+#define FSG_LED_WAN_BIT 1
+#define FSG_LED_SATA_BIT 2
+#define FSG_LED_USB_BIT 4
+#define FSG_LED_RING_BIT 5
+#define FSG_LED_SYNC_BIT 7
+
static short __iomem *latch_address;
static unsigned short latch_value;
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
new file mode 100644
index 00000000000..fee40a84195
--- /dev/null
+++ b/drivers/leds/leds-lt3593.c
@@ -0,0 +1,217 @@
+/*
+ * LEDs driver for LT3593 controllers
+ *
+ * See the datasheet at http://cds.linear.com/docs/Datasheet/3593f.pdf
+ *
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * Based on leds-gpio.c,
+ *
+ * Copyright (C) 2007 8D Technologies inc.
+ * Raphael Assenat <raph@8d.com>
+ * Copyright (C) 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+struct lt3593_led_data {
+ struct led_classdev cdev;
+ unsigned gpio;
+ struct work_struct work;
+ u8 new_level;
+};
+
+static void lt3593_led_work(struct work_struct *work)
+{
+ int pulses;
+ struct lt3593_led_data *led_dat =
+ container_of(work, struct lt3593_led_data, work);
+
+ /*
+ * The LT3593 resets its internal current level register to the maximum
+ * level on the first falling edge on the control pin. Each following
+ * falling edge decreases the current level by 625uA. Up to 32 pulses
+ * can be sent, so the maximum power reduction is 20mA.
+ * After a timeout of 128us, the value is taken from the register and
+ * applied is to the output driver.
+ */
+
+ if (led_dat->new_level == 0) {
+ gpio_set_value_cansleep(led_dat->gpio, 0);
+ return;
+ }
+
+ pulses = 32 - (led_dat->new_level * 32) / 255;
+
+ if (pulses == 0) {
+ gpio_set_value_cansleep(led_dat->gpio, 0);
+ mdelay(1);
+ gpio_set_value_cansleep(led_dat->gpio, 1);
+ return;
+ }
+
+ gpio_set_value_cansleep(led_dat->gpio, 1);
+
+ while (pulses--) {
+ gpio_set_value_cansleep(led_dat->gpio, 0);
+ udelay(1);
+ gpio_set_value_cansleep(led_dat->gpio, 1);
+ udelay(1);
+ }
+}
+
+static void lt3593_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct lt3593_led_data *led_dat =
+ container_of(led_cdev, struct lt3593_led_data, cdev);
+
+ led_dat->new_level = value;
+ schedule_work(&led_dat->work);
+}
+
+static int __devinit create_lt3593_led(const struct gpio_led *template,
+ struct lt3593_led_data *led_dat, struct device *parent)
+{
+ int ret, state;
+
+ /* skip leds on GPIOs that aren't available */
+ if (!gpio_is_valid(template->gpio)) {
+ printk(KERN_INFO "%s: skipping unavailable LT3593 LED at gpio %d (%s)\n",
+ KBUILD_MODNAME, template->gpio, template->name);
+ return 0;
+ }
+
+ ret = gpio_request(template->gpio, template->name);
+ if (ret < 0)
+ return ret;
+
+ led_dat->cdev.name = template->name;
+ led_dat->cdev.default_trigger = template->default_trigger;
+ led_dat->gpio = template->gpio;
+
+ led_dat->cdev.brightness_set = lt3593_led_set;
+
+ state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
+ led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
+
+ if (!template->retain_state_suspended)
+ led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+
+ ret = gpio_direction_output(led_dat->gpio, state);
+ if (ret < 0)
+ goto err;
+
+ INIT_WORK(&led_dat->work, lt3593_led_work);
+
+ ret = led_classdev_register(parent, &led_dat->cdev);
+ if (ret < 0)
+ goto err;
+
+ printk(KERN_INFO "%s: registered LT3593 LED '%s' at GPIO %d\n",
+ KBUILD_MODNAME, template->name, template->gpio);
+
+ return 0;
+
+err:
+ gpio_free(led_dat->gpio);
+ return ret;
+}
+
+static void delete_lt3593_led(struct lt3593_led_data *led)
+{
+ if (!gpio_is_valid(led->gpio))
+ return;
+
+ led_classdev_unregister(&led->cdev);
+ cancel_work_sync(&led->work);
+ gpio_free(led->gpio);
+}
+
+static int __devinit lt3593_led_probe(struct platform_device *pdev)
+{
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct lt3593_led_data *leds_data;
+ int i, ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ leds_data = kzalloc(sizeof(struct lt3593_led_data) * pdata->num_leds,
+ GFP_KERNEL);
+ if (!leds_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_leds; i++) {
+ ret = create_lt3593_led(&pdata->leds[i], &leds_data[i],
+ &pdev->dev);
+ if (ret < 0)
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, leds_data);
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--)
+ delete_lt3593_led(&leds_data[i]);
+
+ kfree(leds_data);
+
+ return ret;
+}
+
+static int __devexit lt3593_led_remove(struct platform_device *pdev)
+{
+ int i;
+ struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
+ struct lt3593_led_data *leds_data;
+
+ leds_data = platform_get_drvdata(pdev);
+
+ for (i = 0; i < pdata->num_leds; i++)
+ delete_lt3593_led(&leds_data[i]);
+
+ kfree(leds_data);
+
+ return 0;
+}
+
+static struct platform_driver lt3593_led_driver = {
+ .probe = lt3593_led_probe,
+ .remove = __devexit_p(lt3593_led_remove),
+ .driver = {
+ .name = "leds-lt3593",
+ .owner = THIS_MODULE,
+ },
+};
+
+MODULE_ALIAS("platform:leds-lt3593");
+
+static int __init lt3593_led_init(void)
+{
+ return platform_driver_register(&lt3593_led_driver);
+}
+
+static void __exit lt3593_led_exit(void)
+{
+ platform_driver_unregister(&lt3593_led_driver);
+}
+
+module_init(lt3593_led_init);
+module_exit(lt3593_led_exit);
+
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_DESCRIPTION("LED driver for LT3593 controllers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index cdfdc8714e1..88b1dd091cf 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -27,7 +27,6 @@ struct led_pwm_data {
struct pwm_device *pwm;
unsigned int active_low;
unsigned int period;
- unsigned int max_brightness;
};
static void led_pwm_set(struct led_classdev *led_cdev,
@@ -35,7 +34,7 @@ static void led_pwm_set(struct led_classdev *led_cdev,
{
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
- unsigned int max = led_dat->max_brightness;
+ unsigned int max = led_dat->cdev.max_brightness;
unsigned int period = led_dat->period;
if (brightness == 0) {
@@ -77,10 +76,10 @@ static int led_pwm_probe(struct platform_device *pdev)
led_dat->cdev.name = cur_led->name;
led_dat->cdev.default_trigger = cur_led->default_trigger;
led_dat->active_low = cur_led->active_low;
- led_dat->max_brightness = cur_led->max_brightness;
led_dat->period = cur_led->pwm_period_ns;
led_dat->cdev.brightness_set = led_pwm_set;
led_dat->cdev.brightness = LED_OFF;
+ led_dat->cdev.max_brightness = cur_led->max_brightness;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
new file mode 100644
index 00000000000..7f00de3ef92
--- /dev/null
+++ b/drivers/leds/leds-regulator.c
@@ -0,0 +1,242 @@
+/*
+ * leds-regulator.c - LED class driver for regulator driven LEDs.
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * Inspired by leds-wm8350 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/leds-regulator.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define to_regulator_led(led_cdev) \
+ container_of(led_cdev, struct regulator_led, cdev)
+
+struct regulator_led {
+ struct led_classdev cdev;
+ enum led_brightness value;
+ int enabled;
+ struct mutex mutex;
+ struct work_struct work;
+
+ struct regulator *vcc;
+};
+
+static inline int led_regulator_get_max_brightness(struct regulator *supply)
+{
+ int ret;
+ int voltage = regulator_list_voltage(supply, 0);
+
+ if (voltage <= 0)
+ return 1;
+
+ /* even if regulator can't change voltages,
+ * we still assume it can change status
+ * and the LED can be turned on and off.
+ */
+ ret = regulator_set_voltage(supply, voltage, voltage);
+ if (ret < 0)
+ return 1;
+
+ return regulator_count_voltages(supply);
+}
+
+static int led_regulator_get_voltage(struct regulator *supply,
+ enum led_brightness brightness)
+{
+ if (brightness == 0)
+ return -EINVAL;
+
+ return regulator_list_voltage(supply, brightness - 1);
+}
+
+
+static void regulator_led_enable(struct regulator_led *led)
+{
+ int ret;
+
+ if (led->enabled)
+ return;
+
+ ret = regulator_enable(led->vcc);
+ if (ret != 0) {
+ dev_err(led->cdev.dev, "Failed to enable vcc: %d\n", ret);
+ return;
+ }
+
+ led->enabled = 1;
+}
+
+static void regulator_led_disable(struct regulator_led *led)
+{
+ int ret;
+
+ if (!led->enabled)
+ return;
+
+ ret = regulator_disable(led->vcc);
+ if (ret != 0) {
+ dev_err(led->cdev.dev, "Failed to disable vcc: %d\n", ret);
+ return;
+ }
+
+ led->enabled = 0;
+}
+
+static void regulator_led_set_value(struct regulator_led *led)
+{
+ int voltage;
+ int ret;
+
+ mutex_lock(&led->mutex);
+
+ if (led->value == LED_OFF) {
+ regulator_led_disable(led);
+ goto out;
+ }
+
+ if (led->cdev.max_brightness > 1) {
+ voltage = led_regulator_get_voltage(led->vcc, led->value);
+ dev_dbg(led->cdev.dev, "brightness: %d voltage: %d\n",
+ led->value, voltage);
+
+ ret = regulator_set_voltage(led->vcc, voltage, voltage);
+ if (ret != 0)
+ dev_err(led->cdev.dev, "Failed to set voltage %d: %d\n",
+ voltage, ret);
+ }
+
+ regulator_led_enable(led);
+
+out:
+ mutex_unlock(&led->mutex);
+}
+
+static void led_work(struct work_struct *work)
+{
+ struct regulator_led *led;
+
+ led = container_of(work, struct regulator_led, work);
+ regulator_led_set_value(led);
+}
+
+static void regulator_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct regulator_led *led = to_regulator_led(led_cdev);
+
+ led->value = value;
+ schedule_work(&led->work);
+}
+
+static int __devinit regulator_led_probe(struct platform_device *pdev)
+{
+ struct led_regulator_platform_data *pdata = pdev->dev.platform_data;
+ struct regulator_led *led;
+ struct regulator *vcc;
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
+ }
+
+ vcc = regulator_get_exclusive(&pdev->dev, "vled");
+ if (IS_ERR(vcc)) {
+ dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
+ return PTR_ERR(vcc);
+ }
+
+ led = kzalloc(sizeof(*led), GFP_KERNEL);
+ if (led == NULL) {
+ ret = -ENOMEM;
+ goto err_vcc;
+ }
+
+ led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
+ if (pdata->brightness > led->cdev.max_brightness) {
+ dev_err(&pdev->dev, "Invalid default brightness %d\n",
+ pdata->brightness);
+ ret = -EINVAL;
+ goto err_led;
+ }
+ led->value = pdata->brightness;
+
+ led->cdev.brightness_set = regulator_led_brightness_set;
+ led->cdev.name = pdata->name;
+ led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led->vcc = vcc;
+
+ mutex_init(&led->mutex);
+ INIT_WORK(&led->work, led_work);
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(&pdev->dev, &led->cdev);
+ if (ret < 0) {
+ cancel_work_sync(&led->work);
+ goto err_led;
+ }
+
+ /* to expose the default value to userspace */
+ led->cdev.brightness = led->value;
+
+ /* Set the default led status */
+ regulator_led_set_value(led);
+
+ return 0;
+
+err_led:
+ kfree(led);
+err_vcc:
+ regulator_put(vcc);
+ return ret;
+}
+
+static int __devexit regulator_led_remove(struct platform_device *pdev)
+{
+ struct regulator_led *led = platform_get_drvdata(pdev);
+
+ led_classdev_unregister(&led->cdev);
+ cancel_work_sync(&led->work);
+ regulator_led_disable(led);
+ regulator_put(led->vcc);
+ kfree(led);
+ return 0;
+}
+
+static struct platform_driver regulator_led_driver = {
+ .driver = {
+ .name = "leds-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = regulator_led_probe,
+ .remove = __devexit_p(regulator_led_remove),
+};
+
+static int __init regulator_led_init(void)
+{
+ return platform_driver_register(&regulator_led_driver);
+}
+module_init(regulator_led_init);
+
+static void __exit regulator_led_exit(void)
+{
+ platform_driver_unregister(&regulator_led_driver);
+}
+module_exit(regulator_led_exit);
+
+MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
+MODULE_DESCRIPTION("Regulator driven LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-regulator");
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
new file mode 100644
index 00000000000..97f04984c1c
--- /dev/null
+++ b/drivers/leds/leds-ss4200.c
@@ -0,0 +1,556 @@
+/*
+ * SS4200-E Hardware API
+ * Copyright (c) 2009, Intel Corporation.
+ * Copyright IBM Corporation, 2009
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Author: Dave Hansen <dave@sr71.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+MODULE_AUTHOR("Rodney Girod <rgirod@confocus.com>, Dave Hansen <dave@sr71.net>");
+MODULE_DESCRIPTION("Intel NAS/Home Server ICH7 GPIO Driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * ICH7 LPC/GPIO PCI Config register offsets
+ */
+#define PMBASE 0x040
+#define GPIO_BASE 0x048
+#define GPIO_CTRL 0x04c
+#define GPIO_EN 0x010
+
+/*
+ * The ICH7 GPIO register block is 64 bytes in size.
+ */
+#define ICH7_GPIO_SIZE 64
+
+/*
+ * Define register offsets within the ICH7 register block.
+ */
+#define GPIO_USE_SEL 0x000
+#define GP_IO_SEL 0x004
+#define GP_LVL 0x00c
+#define GPO_BLINK 0x018
+#define GPI_INV 0x030
+#define GPIO_USE_SEL2 0x034
+#define GP_IO_SEL2 0x038
+#define GP_LVL2 0x03c
+
+/*
+ * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
+ */
+static struct pci_device_id ich7_lpc_pci_id[] =
+{
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
+ { } /* NULL entry */
+};
+
+MODULE_DEVICE_TABLE(pci, ich7_lpc_pci_id);
+
+static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id)
+{
+ pr_info("detected '%s'\n", id->ident);
+ return 1;
+}
+
+static unsigned int __initdata nodetect;
+module_param_named(nodetect, nodetect, bool, 0);
+MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
+
+/*
+ * struct nas_led_whitelist - List of known good models
+ *
+ * Contains the known good models this driver is compatible with.
+ * When adding a new model try to be as strict as possible. This
+ * makes it possible to keep the false positives (the model is
+ * detected as working, but in reality it is not) as low as
+ * possible.
+ */
+static struct dmi_system_id __initdata nas_led_whitelist[] = {
+ {
+ .callback = ss4200_led_dmi_callback,
+ .ident = "Intel SS4200-E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SS4200-E"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
+ }
+ },
+};
+
+/*
+ * Base I/O address assigned to the Power Management register block
+ */
+static u32 g_pm_io_base;
+
+/*
+ * Base I/O address assigned to the ICH7 GPIO register block
+ */
+static u32 nas_gpio_io_base;
+
+/*
+ * When we successfully register a region, we are returned a resource.
+ * We use these to identify which regions we need to release on our way
+ * back out.
+ */
+static struct resource *gp_gpio_resource;
+
+struct nasgpio_led {
+ char *name;
+ u32 gpio_bit;
+ struct led_classdev led_cdev;
+};
+
+/*
+ * gpio_bit(s) are the ICH7 GPIO bit assignments
+ */
+static struct nasgpio_led nasgpio_leds[] = {
+ { .name = "hdd1:blue:sata", .gpio_bit = 0 },
+ { .name = "hdd1:amber:sata", .gpio_bit = 1 },
+ { .name = "hdd2:blue:sata", .gpio_bit = 2 },
+ { .name = "hdd2:amber:sata", .gpio_bit = 3 },
+ { .name = "hdd3:blue:sata", .gpio_bit = 4 },
+ { .name = "hdd3:amber:sata", .gpio_bit = 5 },
+ { .name = "hdd4:blue:sata", .gpio_bit = 6 },
+ { .name = "hdd4:amber:sata", .gpio_bit = 7 },
+ { .name = "power:blue:power", .gpio_bit = 27},
+ { .name = "power:amber:power", .gpio_bit = 28},
+};
+
+#define NAS_RECOVERY 0x00000400 /* GPIO10 */
+
+static struct nasgpio_led *
+led_classdev_to_nasgpio_led(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct nasgpio_led, led_cdev);
+}
+
+static struct nasgpio_led *get_led_named(char *name)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) {
+ if (strcmp(nasgpio_leds[i].name, name))
+ continue;
+ return &nasgpio_leds[i];
+ }
+ return NULL;
+}
+
+/*
+ * This protects access to the gpio ports.
+ */
+static DEFINE_SPINLOCK(nasgpio_gpio_lock);
+
+/*
+ * There are two gpio ports, one for blinking and the other
+ * for power. @port tells us if we're doing blinking or
+ * power control.
+ *
+ * Caller must hold nasgpio_gpio_lock
+ */
+static void __nasgpio_led_set_attr(struct led_classdev *led_cdev,
+ u32 port, u32 value)
+{
+ struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
+ u32 gpio_out;
+
+ gpio_out = inl(nas_gpio_io_base + port);
+ if (value)
+ gpio_out |= (1<<led->gpio_bit);
+ else
+ gpio_out &= ~(1<<led->gpio_bit);
+
+ outl(gpio_out, nas_gpio_io_base + port);
+}
+
+static void nasgpio_led_set_attr(struct led_classdev *led_cdev,
+ u32 port, u32 value)
+{
+ spin_lock(&nasgpio_gpio_lock);
+ __nasgpio_led_set_attr(led_cdev, port, value);
+ spin_unlock(&nasgpio_gpio_lock);
+}
+
+u32 nasgpio_led_get_attr(struct led_classdev *led_cdev, u32 port)
+{
+ struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev);
+ u32 gpio_in;
+
+ spin_lock(&nasgpio_gpio_lock);
+ gpio_in = inl(nas_gpio_io_base + port);
+ spin_unlock(&nasgpio_gpio_lock);
+ if (gpio_in & (1<<led->gpio_bit))
+ return 1;
+ return 0;
+}
+
+/*
+ * There is actual brightness control in the hardware,
+ * but it is via smbus commands and not implemented
+ * in this driver.
+ */
+static void nasgpio_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ u32 setting = 0;
+ if (brightness >= LED_HALF)
+ setting = 1;
+ /*
+ * Hold the lock across both operations. This ensures
+ * consistency so that both the "turn off blinking"
+ * and "turn light off" operations complete as a set.
+ */
+ spin_lock(&nasgpio_gpio_lock);
+ /*
+ * LED class documentation asks that past blink state
+ * be disabled when brightness is turned to zero.
+ */
+ if (brightness == 0)
+ __nasgpio_led_set_attr(led_cdev, GPO_BLINK, 0);
+ __nasgpio_led_set_attr(led_cdev, GP_LVL, setting);
+ spin_unlock(&nasgpio_gpio_lock);
+}
+
+static int nasgpio_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u32 setting = 1;
+ if (!(*delay_on == 0 && *delay_off == 0) &&
+ !(*delay_on == 500 && *delay_off == 500))
+ return -EINVAL;
+ /*
+ * These are very approximate.
+ */
+ *delay_on = 500;
+ *delay_off = 500;
+
+ nasgpio_led_set_attr(led_cdev, GPO_BLINK, setting);
+
+ return 0;
+}
+
+
+/*
+ * Initialize the ICH7 GPIO registers for NAS usage. The BIOS should have
+ * already taken care of this, but we will do so in a non destructive manner
+ * so that we have what we need whether the BIOS did it or not.
+ */
+static int __devinit ich7_gpio_init(struct device *dev)
+{
+ int i;
+ u32 config_data = 0;
+ u32 all_nas_led = 0;
+
+ for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++)
+ all_nas_led |= (1<<nasgpio_leds[i].gpio_bit);
+
+ spin_lock(&nasgpio_gpio_lock);
+ /*
+ * We need to enable all of the GPIO lines used by the NAS box,
+ * so we will read the current Use Selection and add our usage
+ * to it. This should be benign with regard to the original
+ * BIOS configuration.
+ */
+ config_data = inl(nas_gpio_io_base + GPIO_USE_SEL);
+ dev_dbg(dev, ": Data read from GPIO_USE_SEL = 0x%08x\n", config_data);
+ config_data |= all_nas_led + NAS_RECOVERY;
+ outl(config_data, nas_gpio_io_base + GPIO_USE_SEL);
+ config_data = inl(nas_gpio_io_base + GPIO_USE_SEL);
+ dev_dbg(dev, ": GPIO_USE_SEL = 0x%08x\n\n", config_data);
+
+ /*
+ * The LED GPIO outputs need to be configured for output, so we
+ * will ensure that all LED lines are cleared for output and the
+ * RECOVERY line ready for input. This too should be benign with
+ * regard to BIOS configuration.
+ */
+ config_data = inl(nas_gpio_io_base + GP_IO_SEL);
+ dev_dbg(dev, ": Data read from GP_IO_SEL = 0x%08x\n",
+ config_data);
+ config_data &= ~all_nas_led;
+ config_data |= NAS_RECOVERY;
+ outl(config_data, nas_gpio_io_base + GP_IO_SEL);
+ config_data = inl(nas_gpio_io_base + GP_IO_SEL);
+ dev_dbg(dev, ": GP_IO_SEL = 0x%08x\n", config_data);
+
+ /*
+ * In our final system, the BIOS will initialize the state of all
+ * of the LEDs. For now, we turn them all off (or Low).
+ */
+ config_data = inl(nas_gpio_io_base + GP_LVL);
+ dev_dbg(dev, ": Data read from GP_LVL = 0x%08x\n", config_data);
+ /*
+ * In our final system, the BIOS will initialize the blink state of all
+ * of the LEDs. For now, we turn blink off for all of them.
+ */
+ config_data = inl(nas_gpio_io_base + GPO_BLINK);
+ dev_dbg(dev, ": Data read from GPO_BLINK = 0x%08x\n", config_data);
+
+ /*
+ * At this moment, I am unsure if anything needs to happen with GPI_INV
+ */
+ config_data = inl(nas_gpio_io_base + GPI_INV);
+ dev_dbg(dev, ": Data read from GPI_INV = 0x%08x\n", config_data);
+
+ spin_unlock(&nasgpio_gpio_lock);
+ return 0;
+}
+
+static void ich7_lpc_cleanup(struct device *dev)
+{
+ /*
+ * If we were given exclusive use of the GPIO
+ * I/O Address range, we must return it.
+ */
+ if (gp_gpio_resource) {
+ dev_dbg(dev, ": Releasing GPIO I/O addresses\n");
+ release_region(nas_gpio_io_base, ICH7_GPIO_SIZE);
+ gp_gpio_resource = NULL;
+ }
+}
+
+/*
+ * The OS has determined that the LPC of the Intel ICH7 Southbridge is present
+ * so we can retrive the required operational information and prepare the GPIO.
+ */
+static struct pci_dev *nas_gpio_pci_dev;
+static int __devinit ich7_lpc_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int status;
+ u32 gc = 0;
+
+ status = pci_enable_device(dev);
+ if (status) {
+ dev_err(&dev->dev, "pci_enable_device failed\n");
+ return -EIO;
+ }
+
+ nas_gpio_pci_dev = dev;
+ status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base);
+ if (status)
+ goto out;
+ g_pm_io_base &= 0x00000ff80;
+
+ status = pci_read_config_dword(dev, GPIO_CTRL, &gc);
+ if (!(GPIO_EN & gc)) {
+ status = -EEXIST;
+ dev_info(&dev->dev,
+ "ERROR: The LPC GPIO Block has not been enabled.\n");
+ goto out;
+ }
+
+ status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base);
+ if (0 > status) {
+ dev_info(&dev->dev, "Unable to read GPIOBASE.\n");
+ goto out;
+ }
+ dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base);
+ nas_gpio_io_base &= 0x00000ffc0;
+
+ /*
+ * Insure that we have exclusive access to the GPIO I/O address range.
+ */
+ gp_gpio_resource = request_region(nas_gpio_io_base, ICH7_GPIO_SIZE,
+ KBUILD_MODNAME);
+ if (NULL == gp_gpio_resource) {
+ dev_info(&dev->dev,
+ "ERROR Unable to register GPIO I/O addresses.\n");
+ status = -1;
+ goto out;
+ }
+
+ /*
+ * Initialize the GPIO for NAS/Home Server Use
+ */
+ ich7_gpio_init(&dev->dev);
+
+out:
+ if (status) {
+ ich7_lpc_cleanup(&dev->dev);
+ pci_disable_device(dev);
+ }
+ return status;
+}
+
+static void ich7_lpc_remove(struct pci_dev *dev)
+{
+ ich7_lpc_cleanup(&dev->dev);
+ pci_disable_device(dev);
+}
+
+/*
+ * pci_driver structure passed to the PCI modules
+ */
+static struct pci_driver nas_gpio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ich7_lpc_pci_id,
+ .probe = ich7_lpc_probe,
+ .remove = ich7_lpc_remove,
+};
+
+static struct led_classdev *get_classdev_for_led_nr(int nr)
+{
+ struct nasgpio_led *nas_led = &nasgpio_leds[nr];
+ struct led_classdev *led = &nas_led->led_cdev;
+ return led;
+}
+
+
+static void set_power_light_amber_noblink(void)
+{
+ struct nasgpio_led *amber = get_led_named("power:amber:power");
+ struct nasgpio_led *blue = get_led_named("power:blue:power");
+
+ if (!amber || !blue)
+ return;
+ /*
+ * LED_OFF implies disabling future blinking
+ */
+ pr_debug("setting blue off and amber on\n");
+
+ nasgpio_led_set_brightness(&blue->led_cdev, LED_OFF);
+ nasgpio_led_set_brightness(&amber->led_cdev, LED_FULL);
+}
+
+static ssize_t nas_led_blink_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led = dev_get_drvdata(dev);
+ int blinking = 0;
+ if (nasgpio_led_get_attr(led, GPO_BLINK))
+ blinking = 1;
+ return sprintf(buf, "%u\n", blinking);
+}
+
+static ssize_t nas_led_blink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ struct led_classdev *led = dev_get_drvdata(dev);
+ unsigned long blink_state;
+
+ ret = strict_strtoul(buf, 10, &blink_state);
+ if (ret)
+ return ret;
+
+ nasgpio_led_set_attr(led, GPO_BLINK, blink_state);
+
+ return size;
+}
+
+static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store);
+
+static int register_nasgpio_led(int led_nr)
+{
+ int ret;
+ struct nasgpio_led *nas_led = &nasgpio_leds[led_nr];
+ struct led_classdev *led = get_classdev_for_led_nr(led_nr);
+
+ led->name = nas_led->name;
+ led->brightness = LED_OFF;
+ if (nasgpio_led_get_attr(led, GP_LVL))
+ led->brightness = LED_FULL;
+ led->brightness_set = nasgpio_led_set_brightness;
+ led->blink_set = nasgpio_led_set_blink;
+ ret = led_classdev_register(&nas_gpio_pci_dev->dev, led);
+ if (ret)
+ return ret;
+ ret = device_create_file(led->dev, &dev_attr_blink);
+ if (ret)
+ led_classdev_unregister(led);
+ return ret;
+}
+
+static void unregister_nasgpio_led(int led_nr)
+{
+ struct led_classdev *led = get_classdev_for_led_nr(led_nr);
+ led_classdev_unregister(led);
+ device_remove_file(led->dev, &dev_attr_blink);
+}
+/*
+ * module load/initialization
+ */
+static int __init nas_gpio_init(void)
+{
+ int i;
+ int ret = 0;
+ int nr_devices = 0;
+
+ nr_devices = dmi_check_system(nas_led_whitelist);
+ if (nodetect) {
+ pr_info("skipping hardware autodetection\n");
+ pr_info("Please send 'dmidecode' output to dave@sr71.net\n");
+ nr_devices++;
+ }
+
+ if (nr_devices <= 0) {
+ pr_info("no LED devices found\n");
+ return -ENODEV;
+ }
+
+ pr_info("registering PCI driver\n");
+ ret = pci_register_driver(&nas_gpio_pci_driver);
+ if (ret)
+ return ret;
+ for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) {
+ ret = register_nasgpio_led(i);
+ if (ret)
+ goto out_err;
+ }
+ /*
+ * When the system powers on, the BIOS leaves the power
+ * light blue and blinking. This will turn it solid
+ * amber once the driver is loaded.
+ */
+ set_power_light_amber_noblink();
+ return 0;
+out_err:
+ for (; i >= 0; i--)
+ unregister_nasgpio_led(i);
+ pci_unregister_driver(&nas_gpio_pci_driver);
+ return ret;
+}
+
+/*
+ * module unload
+ */
+static void __exit nas_gpio_exit(void)
+{
+ int i;
+ pr_info("Unregistering driver\n");
+ for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++)
+ unregister_nasgpio_led(i);
+ pci_unregister_driver(&nas_gpio_pci_driver);
+}
+
+module_init(nas_gpio_init);
+module_exit(nas_gpio_exit);
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 3b83406de75..38b3378be44 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -83,7 +83,7 @@ static ssize_t led_delay_on_store(struct device *dev,
unsigned long state = simple_strtoul(buf, &after, 10);
size_t count = after - buf;
- if (*after && isspace(*after))
+ if (isspace(*after))
count++;
if (count == size) {
@@ -127,7 +127,7 @@ static ssize_t led_delay_off_store(struct device *dev,
unsigned long state = simple_strtoul(buf, &after, 10);
size_t count = after - buf;
- if (*after && isspace(*after))
+ if (isspace(*after))
count++;
if (count == size) {
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6ae388849a3..fb2b7ef7868 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
}
-static DEFINE_PER_CPU(struct lg_cpu *, last_cpu);
+static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
/*S:010
* We approach the Switcher.
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
* meanwhile). If that's not the case, we pretend everything in the
* Guest has changed.
*/
- if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) {
- __get_cpu_var(last_cpu) = cpu;
+ if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+ __get_cpu_var(lg_last_cpu) = cpu;
cpu->last_pages = pages;
cpu->changed = CHANGED_ALL;
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 588a5b0bc4b..26a303a1d1a 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -379,6 +379,11 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->ofdev.dev.parent = parent;
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
+ dev->ofdev.dev.dma_parms = &dev->dma_parms;
+
+ /* Standard DMA paremeters */
+ dma_set_max_seg_size(&dev->ofdev.dev, 65536);
+ dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff);
#ifdef CONFIG_PCI
/* Set the DMA ops to the ones from the PCI device, this could be
@@ -538,6 +543,42 @@ void macio_unregister_driver(struct macio_driver *drv)
driver_unregister(&drv->driver);
}
+/* Managed MacIO resources */
+struct macio_devres {
+ u32 res_mask;
+};
+
+static void maciom_release(struct device *gendev, void *res)
+{
+ struct macio_dev *dev = to_macio_device(gendev);
+ struct macio_devres *dr = res;
+ int i, max;
+
+ max = min(dev->n_resources, 32);
+ for (i = 0; i < max; i++) {
+ if (dr->res_mask & (1 << i))
+ macio_release_resource(dev, i);
+ }
+}
+
+int macio_enable_devres(struct macio_dev *dev)
+{
+ struct macio_devres *dr;
+
+ dr = devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL);
+ if (!dr) {
+ dr = devres_alloc(maciom_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+ }
+ return devres_get(&dev->ofdev.dev, dr, NULL, NULL) != NULL;
+}
+
+static struct macio_devres * find_macio_dr(struct macio_dev *dev)
+{
+ return devres_find(&dev->ofdev.dev, maciom_release, NULL, NULL);
+}
+
/**
* macio_request_resource - Request an MMIO resource
* @dev: pointer to the device holding the resource
@@ -555,6 +596,8 @@ void macio_unregister_driver(struct macio_driver *drv)
int macio_request_resource(struct macio_dev *dev, int resource_no,
const char *name)
{
+ struct macio_devres *dr = find_macio_dr(dev);
+
if (macio_resource_len(dev, resource_no) == 0)
return 0;
@@ -562,6 +605,9 @@ int macio_request_resource(struct macio_dev *dev, int resource_no,
macio_resource_len(dev, resource_no),
name))
goto err_out;
+
+ if (dr && resource_no < 32)
+ dr->res_mask |= 1 << resource_no;
return 0;
@@ -582,10 +628,14 @@ err_out:
*/
void macio_release_resource(struct macio_dev *dev, int resource_no)
{
+ struct macio_devres *dr = find_macio_dr(dev);
+
if (macio_resource_len(dev, resource_no) == 0)
return;
release_mem_region(macio_resource_start(dev, resource_no),
macio_resource_len(dev, resource_no));
+ if (dr && resource_no < 32)
+ dr->res_mask &= ~(1 << resource_no);
}
/**
@@ -744,3 +794,5 @@ EXPORT_SYMBOL(macio_request_resource);
EXPORT_SYMBOL(macio_release_resource);
EXPORT_SYMBOL(macio_request_resources);
EXPORT_SYMBOL(macio_release_resources);
+EXPORT_SYMBOL(macio_enable_devres);
+
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 029ad8ce8a7..08002b88f34 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -33,15 +33,6 @@
#include <linux/adb.h>
#include <linux/pmu.h>
-
-#define MB_DEBUG
-
-#ifdef MB_DEBUG
-#define MBDBG(fmt, arg...) printk(KERN_INFO fmt , ## arg)
-#else
-#define MBDBG(fmt, arg...) do { } while (0)
-#endif
-
#define MB_FCR32(bay, r) ((bay)->base + ((r) >> 2))
#define MB_FCR8(bay, r) (((volatile u8 __iomem *)((bay)->base)) + (r))
@@ -76,28 +67,14 @@ struct media_bay_info {
int index;
int cached_gpio;
int sleeping;
+ int user_lock;
struct mutex lock;
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
- ide_hwif_t *cd_port;
- void __iomem *cd_base;
- int cd_irq;
- int cd_retry;
-#endif
-#if defined(CONFIG_BLK_DEV_IDE_PMAC)
- int cd_index;
-#endif
};
#define MAX_BAYS 2
static struct media_bay_info media_bays[MAX_BAYS];
-int media_bay_count = 0;
-
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
-/* check the busy bit in the media-bay ide interface
- (assumes the media-bay contains an ide device) */
-#define MB_IDE_READY(i) ((readb(media_bays[i].cd_base + 0x70) & 0x80) == 0)
-#endif
+static int media_bay_count = 0;
/*
* Wait that number of ms between each step in normal polling mode
@@ -130,21 +107,11 @@ int media_bay_count = 0;
/*
* Wait this many ticks after an IDE device (e.g. CD-ROM) is inserted
- * (or until the device is ready) before waiting for busy bit to disappear
+ * (or until the device is ready) before calling into the driver
*/
#define MB_IDE_WAIT 1000
/*
- * Timeout waiting for busy bit of an IDE device to go down
- */
-#define MB_IDE_TIMEOUT 5000
-
-/*
- * Max retries of the full power up/down sequence for an IDE device
- */
-#define MAX_CD_RETRIES 3
-
-/*
* States of a media bay
*/
enum {
@@ -153,7 +120,6 @@ enum {
mb_enabling_bay, /* enable bits set, waiting MB_RESET_DELAY */
mb_resetting, /* reset bit unset, waiting MB_SETUP_DELAY */
mb_ide_resetting, /* IDE reset bit unser, waiting MB_IDE_WAIT */
- mb_ide_waiting, /* Waiting for BUSY bit to go away until MB_IDE_TIMEOUT */
mb_up, /* Media bay full */
mb_powering_down /* Powering down (avoid too fast down/up) */
};
@@ -373,12 +339,12 @@ static inline void set_mb_power(struct media_bay_info* bay, int onoff)
if (onoff) {
bay->ops->power(bay, 1);
bay->state = mb_powering_up;
- MBDBG("mediabay%d: powering up\n", bay->index);
+ pr_debug("mediabay%d: powering up\n", bay->index);
} else {
/* Make sure everything is powered down & disabled */
bay->ops->power(bay, 0);
bay->state = mb_powering_down;
- MBDBG("mediabay%d: powering down\n", bay->index);
+ pr_debug("mediabay%d: powering down\n", bay->index);
}
bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
}
@@ -387,107 +353,118 @@ static void poll_media_bay(struct media_bay_info* bay)
{
int id = bay->ops->content(bay);
- if (id == bay->last_value) {
- if (id != bay->content_id) {
- bay->value_count += msecs_to_jiffies(MB_POLL_DELAY);
- if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) {
- /* If the device type changes without going thru
- * "MB_NO", we force a pass by "MB_NO" to make sure
- * things are properly reset
- */
- if ((id != MB_NO) && (bay->content_id != MB_NO)) {
- id = MB_NO;
- MBDBG("mediabay%d: forcing MB_NO\n", bay->index);
- }
- MBDBG("mediabay%d: switching to %d\n", bay->index, id);
- set_mb_power(bay, id != MB_NO);
- bay->content_id = id;
- if (id == MB_NO) {
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
- bay->cd_retry = 0;
-#endif
- printk(KERN_INFO "media bay %d is empty\n", bay->index);
- }
- }
- }
- } else {
+ static char *mb_content_types[] = {
+ "a floppy drive",
+ "a floppy drive",
+ "an unsuported audio device",
+ "an ATA device",
+ "an unsupported PCI device",
+ "an unknown device",
+ };
+
+ if (id != bay->last_value) {
bay->last_value = id;
bay->value_count = 0;
+ return;
+ }
+ if (id == bay->content_id)
+ return;
+
+ bay->value_count += msecs_to_jiffies(MB_POLL_DELAY);
+ if (bay->value_count >= msecs_to_jiffies(MB_STABLE_DELAY)) {
+ /* If the device type changes without going thru
+ * "MB_NO", we force a pass by "MB_NO" to make sure
+ * things are properly reset
+ */
+ if ((id != MB_NO) && (bay->content_id != MB_NO)) {
+ id = MB_NO;
+ pr_debug("mediabay%d: forcing MB_NO\n", bay->index);
+ }
+ pr_debug("mediabay%d: switching to %d\n", bay->index, id);
+ set_mb_power(bay, id != MB_NO);
+ bay->content_id = id;
+ if (id >= MB_NO || id < 0)
+ printk(KERN_INFO "mediabay%d: Bay is now empty\n", bay->index);
+ else
+ printk(KERN_INFO "mediabay%d: Bay contains %s\n",
+ bay->index, mb_content_types[id]);
}
}
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
-int check_media_bay(struct device_node *which_bay, int what)
+int check_media_bay(struct macio_dev *baydev)
{
- int i;
+ struct media_bay_info* bay;
+ int id;
- for (i=0; i<media_bay_count; i++)
- if (media_bays[i].mdev && which_bay == media_bays[i].mdev->ofdev.node) {
- if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up)
- return 0;
- media_bays[i].cd_index = -1;
- return -EINVAL;
- }
- return -ENODEV;
+ if (baydev == NULL)
+ return MB_NO;
+
+ /* This returns an instant snapshot, not locking, sine
+ * we may be called with the bay lock held. The resulting
+ * fuzzyness of the result if called at the wrong time is
+ * not actually a huge deal
+ */
+ bay = macio_get_drvdata(baydev);
+ if (bay == NULL)
+ return MB_NO;
+ id = bay->content_id;
+ if (bay->state != mb_up)
+ return MB_NO;
+ if (id == MB_FD1)
+ return MB_FD;
+ return id;
}
-EXPORT_SYMBOL(check_media_bay);
+EXPORT_SYMBOL_GPL(check_media_bay);
-int check_media_bay_by_base(unsigned long base, int what)
+void lock_media_bay(struct macio_dev *baydev)
{
- int i;
-
- for (i=0; i<media_bay_count; i++)
- if (media_bays[i].mdev && base == (unsigned long) media_bays[i].cd_base) {
- if ((what == media_bays[i].content_id) && media_bays[i].state == mb_up)
- return 0;
- media_bays[i].cd_index = -1;
- return -EINVAL;
- }
+ struct media_bay_info* bay;
- return -ENODEV;
+ if (baydev == NULL)
+ return;
+ bay = macio_get_drvdata(baydev);
+ if (bay == NULL)
+ return;
+ mutex_lock(&bay->lock);
+ bay->user_lock = 1;
}
-EXPORT_SYMBOL_GPL(check_media_bay_by_base);
+EXPORT_SYMBOL_GPL(lock_media_bay);
-int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
- int irq, ide_hwif_t *hwif)
+void unlock_media_bay(struct macio_dev *baydev)
{
- int i;
+ struct media_bay_info* bay;
- for (i=0; i<media_bay_count; i++) {
- struct media_bay_info* bay = &media_bays[i];
-
- if (bay->mdev && which_bay == bay->mdev->ofdev.node) {
- int timeout = 5000, index = hwif->index;
-
- mutex_lock(&bay->lock);
-
- bay->cd_port = hwif;
- bay->cd_base = (void __iomem *) base;
- bay->cd_irq = irq;
-
- if ((MB_CD != bay->content_id) || bay->state != mb_up) {
- mutex_unlock(&bay->lock);
- return 0;
- }
- printk(KERN_DEBUG "Registered ide%d for media bay %d\n", index, i);
- do {
- if (MB_IDE_READY(i)) {
- bay->cd_index = index;
- mutex_unlock(&bay->lock);
- return 0;
- }
- mdelay(1);
- } while(--timeout);
- printk(KERN_DEBUG "Timeount waiting IDE in bay %d\n", i);
- mutex_unlock(&bay->lock);
- return -ENODEV;
- }
+ if (baydev == NULL)
+ return;
+ bay = macio_get_drvdata(baydev);
+ if (bay == NULL)
+ return;
+ if (bay->user_lock) {
+ bay->user_lock = 0;
+ mutex_unlock(&bay->lock);
}
+}
+EXPORT_SYMBOL_GPL(unlock_media_bay);
- return -ENODEV;
+static int mb_broadcast_hotplug(struct device *dev, void *data)
+{
+ struct media_bay_info* bay = data;
+ struct macio_dev *mdev;
+ struct macio_driver *drv;
+ int state;
+
+ if (dev->bus != &macio_bus_type)
+ return 0;
+
+ state = bay->state == mb_up ? bay->content_id : MB_NO;
+ if (state == MB_FD1)
+ state = MB_FD;
+ mdev = to_macio_device(dev);
+ drv = to_macio_driver(dev->driver);
+ if (dev->driver && drv->mediabay_event)
+ drv->mediabay_event(mdev, state);
+ return 0;
}
-EXPORT_SYMBOL_GPL(media_bay_set_ide_infos);
-#endif /* CONFIG_BLK_DEV_IDE_PMAC */
static void media_bay_step(int i)
{
@@ -497,8 +474,8 @@ static void media_bay_step(int i)
if (bay->state != mb_powering_down)
poll_media_bay(bay);
- /* If timer expired or polling IDE busy, run state machine */
- if ((bay->state != mb_ide_waiting) && (bay->timer != 0)) {
+ /* If timer expired run state machine */
+ if (bay->timer != 0) {
bay->timer -= msecs_to_jiffies(MB_POLL_DELAY);
if (bay->timer > 0)
return;
@@ -508,100 +485,50 @@ static void media_bay_step(int i)
switch(bay->state) {
case mb_powering_up:
if (bay->ops->setup_bus(bay, bay->last_value) < 0) {
- MBDBG("mediabay%d: device not supported (kind:%d)\n", i, bay->content_id);
+ pr_debug("mediabay%d: device not supported (kind:%d)\n",
+ i, bay->content_id);
set_mb_power(bay, 0);
break;
}
bay->timer = msecs_to_jiffies(MB_RESET_DELAY);
bay->state = mb_enabling_bay;
- MBDBG("mediabay%d: enabling (kind:%d)\n", i, bay->content_id);
+ pr_debug("mediabay%d: enabling (kind:%d)\n", i, bay->content_id);
break;
case mb_enabling_bay:
bay->ops->un_reset(bay);
bay->timer = msecs_to_jiffies(MB_SETUP_DELAY);
bay->state = mb_resetting;
- MBDBG("mediabay%d: waiting reset (kind:%d)\n", i, bay->content_id);
+ pr_debug("mediabay%d: releasing bay reset (kind:%d)\n",
+ i, bay->content_id);
break;
case mb_resetting:
if (bay->content_id != MB_CD) {
- MBDBG("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id);
+ pr_debug("mediabay%d: bay is up (kind:%d)\n", i,
+ bay->content_id);
bay->state = mb_up;
+ device_for_each_child(&bay->mdev->ofdev.dev,
+ bay, mb_broadcast_hotplug);
break;
}
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
- MBDBG("mediabay%d: waiting IDE reset (kind:%d)\n", i, bay->content_id);
+ pr_debug("mediabay%d: releasing ATA reset (kind:%d)\n",
+ i, bay->content_id);
bay->ops->un_reset_ide(bay);
bay->timer = msecs_to_jiffies(MB_IDE_WAIT);
bay->state = mb_ide_resetting;
-#else
- printk(KERN_DEBUG "media-bay %d is ide (not compiled in kernel)\n", i);
- set_mb_power(bay, 0);
-#endif /* CONFIG_BLK_DEV_IDE_PMAC */
break;
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
+
case mb_ide_resetting:
- bay->timer = msecs_to_jiffies(MB_IDE_TIMEOUT);
- bay->state = mb_ide_waiting;
- MBDBG("mediabay%d: waiting IDE ready (kind:%d)\n", i, bay->content_id);
+ pr_debug("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id);
+ bay->state = mb_up;
+ device_for_each_child(&bay->mdev->ofdev.dev,
+ bay, mb_broadcast_hotplug);
break;
- case mb_ide_waiting:
- if (bay->cd_base == NULL) {
- bay->timer = 0;
- bay->state = mb_up;
- MBDBG("mediabay%d: up before IDE init\n", i);
- break;
- } else if (MB_IDE_READY(i)) {
- bay->timer = 0;
- bay->state = mb_up;
- if (bay->cd_index < 0) {
- printk("mediabay %d, registering IDE...\n", i);
- pmu_suspend();
- ide_port_scan(bay->cd_port);
- if (bay->cd_port->present)
- bay->cd_index = bay->cd_port->index;
- pmu_resume();
- }
- if (bay->cd_index == -1) {
- /* We eventually do a retry */
- bay->cd_retry++;
- printk("IDE register error\n");
- set_mb_power(bay, 0);
- } else {
- printk(KERN_DEBUG "media-bay %d is ide%d\n", i, bay->cd_index);
- MBDBG("mediabay %d IDE ready\n", i);
- }
- break;
- } else if (bay->timer > 0)
- bay->timer -= msecs_to_jiffies(MB_POLL_DELAY);
- if (bay->timer <= 0) {
- printk("\nIDE Timeout in bay %d !, IDE state is: 0x%02x\n",
- i, readb(bay->cd_base + 0x70));
- MBDBG("mediabay%d: nIDE Timeout !\n", i);
- set_mb_power(bay, 0);
- bay->timer = 0;
- }
- break;
-#endif /* CONFIG_BLK_DEV_IDE_PMAC */
+
case mb_powering_down:
bay->state = mb_empty;
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
- if (bay->cd_index >= 0) {
- printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i,
- bay->cd_index);
- ide_port_unregister_devices(bay->cd_port);
- bay->cd_index = -1;
- }
- if (bay->cd_retry) {
- if (bay->cd_retry > MAX_CD_RETRIES) {
- /* Should add an error sound (sort of beep in dmasound) */
- printk("\nmedia-bay %d, IDE device badly inserted or unrecognised\n", i);
- } else {
- /* Force a new power down/up sequence */
- bay->content_id = MB_NO;
- }
- }
-#endif /* CONFIG_BLK_DEV_IDE_PMAC */
- MBDBG("mediabay%d: end of power down\n", i);
+ device_for_each_child(&bay->mdev->ofdev.dev,
+ bay, mb_broadcast_hotplug);
+ pr_debug("mediabay%d: end of power down\n", i);
break;
}
}
@@ -676,11 +603,6 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
bay->last_value = bay->ops->content(bay);
bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY);
bay->state = mb_empty;
- do {
- msleep(MB_POLL_DELAY);
- media_bay_step(i);
- } while((bay->state != mb_empty) &&
- (bay->state != mb_up));
/* Mark us ready by filling our mdev data */
macio_set_drvdata(mdev, bay);
@@ -725,7 +647,7 @@ static int media_bay_resume(struct macio_dev *mdev)
set_mb_power(bay, 0);
msleep(MB_POWER_DELAY);
if (bay->ops->content(bay) != bay->content_id) {
- printk("mediabay%d: content changed during sleep...\n", bay->index);
+ printk("mediabay%d: Content changed during sleep...\n", bay->index);
mutex_unlock(&bay->lock);
return 0;
}
@@ -733,9 +655,6 @@ static int media_bay_resume(struct macio_dev *mdev)
bay->last_value = bay->content_id;
bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY);
bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
- bay->cd_retry = 0;
-#endif
do {
msleep(MB_POLL_DELAY);
media_bay_step(bay->index);
@@ -823,9 +742,6 @@ static int __init media_bay_init(void)
for (i=0; i<MAX_BAYS; i++) {
memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info));
media_bays[i].content_id = -1;
-#ifdef CONFIG_BLK_DEV_IDE_PMAC
- media_bays[i].cd_index = -1;
-#endif
}
if (!machine_is(powermac))
return 0;
diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c
index b195d753d2e..c876349c32d 100644
--- a/drivers/macintosh/nvram.c
+++ b/drivers/macintosh/nvram.c
@@ -13,7 +13,6 @@
#include <linux/fcntl.h>
#include <linux/nvram.h>
#include <linux/init.h>
-#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
@@ -21,7 +20,6 @@
static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
{
- lock_kernel();
switch (origin) {
case 1:
offset += file->f_pos;
@@ -30,12 +28,10 @@ static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
offset += NVRAM_SIZE;
break;
}
- if (offset < 0) {
- unlock_kernel();
+ if (offset < 0)
return -EINVAL;
- }
+
file->f_pos = offset;
- unlock_kernel();
return file->f_pos;
}
@@ -76,8 +72,7 @@ static ssize_t write_nvram(struct file *file, const char __user *buf,
return p - buf;
}
-static int nvram_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch(cmd) {
case PMAC_NVRAM_GET_OFFSET:
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 556f0feaa4d..5ff47ba7f2d 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -79,6 +79,7 @@ struct thermostat {
u8 limits[3];
int last_speed[2];
int last_var[2];
+ int pwm_inv[2];
};
static enum {ADT7460, ADT7467} therm_type;
@@ -229,19 +230,23 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
if (speed >= 0) {
manual = read_reg(th, MANUAL_MODE[fan]);
+ manual &= ~INVERT_MASK;
write_reg(th, MANUAL_MODE[fan],
- (manual|MANUAL_MASK) & (~INVERT_MASK));
+ manual | MANUAL_MASK | th->pwm_inv[fan]);
write_reg(th, FAN_SPD_SET[fan], speed);
} else {
/* back to automatic */
if(therm_type == ADT7460) {
manual = read_reg(th,
MANUAL_MODE[fan]) & (~MANUAL_MASK);
-
+ manual &= ~INVERT_MASK;
+ manual |= th->pwm_inv[fan];
write_reg(th,
MANUAL_MODE[fan], manual|REM_CONTROL[fan]);
} else {
manual = read_reg(th, MANUAL_MODE[fan]);
+ manual &= ~INVERT_MASK;
+ manual |= th->pwm_inv[fan];
write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK));
}
}
@@ -387,7 +392,7 @@ static int probe_thermostat(struct i2c_client *client,
i2c_set_clientdata(client, th);
th->clt = client;
- rc = read_reg(th, 0);
+ rc = read_reg(th, CONFIG_REG);
if (rc < 0) {
dev_err(&client->dev, "Thermostat failed to read config!\n");
kfree(th);
@@ -418,6 +423,10 @@ static int probe_thermostat(struct i2c_client *client,
thermostat = th;
+ /* record invert bit status because fw can corrupt it after suspend */
+ th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK;
+ th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK;
+
/* be sure to really write fan speed the first time */
th->last_speed[0] = -2;
th->last_speed[1] = -2;
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 6f308a4757e..db379c38143 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/device.h>
@@ -186,17 +187,11 @@ static int init_pmu(void);
static void pmu_start(void);
static irqreturn_t via_pmu_interrupt(int irq, void *arg);
static irqreturn_t gpio1_interrupt(int irq, void *arg);
-static int proc_get_info(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-static int proc_get_irqstats(char *page, char **start, off_t off,
- int count, int *eof, void *data);
+static const struct file_operations pmu_info_proc_fops;
+static const struct file_operations pmu_irqstats_proc_fops;
static void pmu_pass_intr(unsigned char *data, int len);
-static int proc_get_batt(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-static int proc_read_options(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-static int proc_write_options(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
+static const struct file_operations pmu_battery_proc_fops;
+static const struct file_operations pmu_options_proc_fops;
#ifdef CONFIG_ADB
struct adb_driver via_pmu_driver = {
@@ -507,19 +502,15 @@ static int __init via_pmu_dev_init(void)
for (i=0; i<pmu_battery_count; i++) {
char title[16];
sprintf(title, "battery_%ld", i);
- proc_pmu_batt[i] = create_proc_read_entry(title, 0, proc_pmu_root,
- proc_get_batt, (void *)i);
+ proc_pmu_batt[i] = proc_create_data(title, 0, proc_pmu_root,
+ &pmu_battery_proc_fops, (void *)i);
}
- proc_pmu_info = create_proc_read_entry("info", 0, proc_pmu_root,
- proc_get_info, NULL);
- proc_pmu_irqstats = create_proc_read_entry("interrupts", 0, proc_pmu_root,
- proc_get_irqstats, NULL);
- proc_pmu_options = create_proc_entry("options", 0600, proc_pmu_root);
- if (proc_pmu_options) {
- proc_pmu_options->read_proc = proc_read_options;
- proc_pmu_options->write_proc = proc_write_options;
- }
+ proc_pmu_info = proc_create("info", 0, proc_pmu_root, &pmu_info_proc_fops);
+ proc_pmu_irqstats = proc_create("interrupts", 0, proc_pmu_root,
+ &pmu_irqstats_proc_fops);
+ proc_pmu_options = proc_create("options", 0600, proc_pmu_root,
+ &pmu_options_proc_fops);
}
return 0;
}
@@ -799,27 +790,33 @@ query_battery_state(void)
2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
}
-static int
-proc_get_info(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int pmu_info_proc_show(struct seq_file *m, void *v)
{
- char* p = page;
-
- p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION);
- p += sprintf(p, "PMU firmware version : %02x\n", pmu_version);
- p += sprintf(p, "AC Power : %d\n",
+ seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION);
+ seq_printf(m, "PMU firmware version : %02x\n", pmu_version);
+ seq_printf(m, "AC Power : %d\n",
((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0);
- p += sprintf(p, "Battery count : %d\n", pmu_battery_count);
+ seq_printf(m, "Battery count : %d\n", pmu_battery_count);
+
+ return 0;
+}
- return p - page;
+static int pmu_info_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmu_info_proc_show, NULL);
}
-static int
-proc_get_irqstats(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static const struct file_operations pmu_info_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pmu_info_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmu_irqstats_proc_show(struct seq_file *m, void *v)
{
int i;
- char* p = page;
static const char *irq_names[] = {
"Total CB1 triggered events",
"Total GPIO1 triggered events",
@@ -835,60 +832,76 @@ proc_get_irqstats(char *page, char **start, off_t off,
};
for (i=0; i<11; i++) {
- p += sprintf(p, " %2u: %10u (%s)\n",
+ seq_printf(m, " %2u: %10u (%s)\n",
i, pmu_irq_stats[i], irq_names[i]);
}
- return p - page;
+ return 0;
}
-static int
-proc_get_batt(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int pmu_irqstats_proc_open(struct inode *inode, struct file *file)
{
- long batnum = (long)data;
- char *p = page;
+ return single_open(file, pmu_irqstats_proc_show, NULL);
+}
+
+static const struct file_operations pmu_irqstats_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pmu_irqstats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmu_battery_proc_show(struct seq_file *m, void *v)
+{
+ long batnum = (long)m->private;
- p += sprintf(p, "\n");
- p += sprintf(p, "flags : %08x\n",
- pmu_batteries[batnum].flags);
- p += sprintf(p, "charge : %d\n",
- pmu_batteries[batnum].charge);
- p += sprintf(p, "max_charge : %d\n",
- pmu_batteries[batnum].max_charge);
- p += sprintf(p, "current : %d\n",
- pmu_batteries[batnum].amperage);
- p += sprintf(p, "voltage : %d\n",
- pmu_batteries[batnum].voltage);
- p += sprintf(p, "time rem. : %d\n",
- pmu_batteries[batnum].time_remaining);
-
- return p - page;
+ seq_putc(m, '\n');
+ seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags);
+ seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge);
+ seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge);
+ seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage);
+ seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage);
+ seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining);
+ return 0;
}
-static int
-proc_read_options(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int pmu_battery_proc_open(struct inode *inode, struct file *file)
{
- char *p = page;
+ return single_open(file, pmu_battery_proc_show, PDE(inode)->data);
+}
+static const struct file_operations pmu_battery_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pmu_battery_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pmu_options_proc_show(struct seq_file *m, void *v)
+{
#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
if (pmu_kind == PMU_KEYLARGO_BASED &&
pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
- p += sprintf(p, "lid_wakeup=%d\n", option_lid_wakeup);
+ seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup);
#endif
if (pmu_kind == PMU_KEYLARGO_BASED)
- p += sprintf(p, "server_mode=%d\n", option_server_mode);
+ seq_printf(m, "server_mode=%d\n", option_server_mode);
- return p - page;
+ return 0;
}
-
-static int
-proc_write_options(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+
+static int pmu_options_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmu_options_proc_show, NULL);
+}
+
+static ssize_t pmu_options_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
{
char tmp[33];
char *label, *val;
- unsigned long fcount = count;
+ size_t fcount = count;
if (!count)
return -EINVAL;
@@ -927,6 +940,15 @@ proc_write_options(struct file *file, const char __user *buffer,
return fcount;
}
+static const struct file_operations pmu_options_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pmu_options_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pmu_options_proc_write,
+};
+
#ifdef CONFIG_ADB
/* Send an ADB command */
static int pmu_send_request(struct adb_request *req, int sync)
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 961fa0e7c2c..6c68b9e5f5c 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -202,6 +202,8 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
fct->ctrl.name = "cpu-front-fan-1";
else if (!strcmp(l, "CPU A PUMP"))
fct->ctrl.name = "cpu-pump-0";
+ else if (!strcmp(l, "CPU B PUMP"))
+ fct->ctrl.name = "cpu-pump-1";
else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") ||
!strcmp(l, "EXPANSION SLOTS INTAKE"))
fct->ctrl.name = "slots-fan";
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 2158377a135..acb3a4e404f 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -185,11 +185,10 @@ config MD_MULTIPATH
tristate "Multipath I/O support"
depends on BLK_DEV_MD
help
- Multipath-IO is the ability of certain devices to address the same
- physical disk over multiple 'IO paths'. The code ensures that such
- paths can be defined and handled at runtime, and ensures that a
- transparent failover to the backup path(s) happens if a IO errors
- arrives on the primary path.
+ MD_MULTIPATH provides a simple multi-path personality for use
+ the MD framework. It is not under active development. New
+ projects should consider using DM_MULTIPATH which has more
+ features and more testing.
If unsure, say N.
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 60e2b322db1..26ac8aad0b1 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -212,7 +212,7 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
*/
/* IO operations when bitmap is stored near all superblocks */
-static struct page *read_sb_page(mddev_t *mddev, long offset,
+static struct page *read_sb_page(mddev_t *mddev, loff_t offset,
struct page *page,
unsigned long index, int size)
{
@@ -287,27 +287,36 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
+ loff_t offset = mddev->bitmap_info.offset;
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
bdev_logical_block_size(rdev->bdev));
/* Just make sure we aren't corrupting data or
* metadata
*/
- if (bitmap->offset < 0) {
+ if (mddev->external) {
+ /* Bitmap could be anywhere. */
+ if (rdev->sb_start + offset + (page->index *(PAGE_SIZE/512)) >
+ rdev->data_offset &&
+ rdev->sb_start + offset <
+ rdev->data_offset + mddev->dev_sectors +
+ (PAGE_SIZE/512))
+ goto bad_alignment;
+ } else if (offset < 0) {
/* DATA BITMAP METADATA */
- if (bitmap->offset
+ if (offset
+ (long)(page->index * (PAGE_SIZE/512))
+ size/512 > 0)
/* bitmap runs in to metadata */
goto bad_alignment;
if (rdev->data_offset + mddev->dev_sectors
- > rdev->sb_start + bitmap->offset)
+ > rdev->sb_start + offset)
/* data runs in to bitmap */
goto bad_alignment;
} else if (rdev->sb_start < rdev->data_offset) {
/* METADATA BITMAP DATA */
if (rdev->sb_start
- + bitmap->offset
+ + offset
+ page->index*(PAGE_SIZE/512) + size/512
> rdev->data_offset)
/* bitmap runs in to data */
@@ -316,7 +325,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
/* DATA METADATA BITMAP - no problems */
}
md_super_write(mddev, rdev,
- rdev->sb_start + bitmap->offset
+ rdev->sb_start + offset
+ page->index * (PAGE_SIZE/512),
size,
page);
@@ -488,6 +497,8 @@ void bitmap_update_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
return;
+ if (bitmap->mddev->bitmap_info.external)
+ return;
spin_lock_irqsave(&bitmap->lock, flags);
if (!bitmap->sb_page) { /* no superblock */
spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -501,6 +512,9 @@ void bitmap_update_sb(struct bitmap *bitmap)
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
}
+ /* Just in case these have been changed via sysfs: */
+ sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
+ sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
kunmap_atomic(sb, KM_USER0);
write_page(bitmap, bitmap->sb_page, 1);
}
@@ -550,7 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
} else {
- bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset,
+ bitmap->sb_page = read_sb_page(bitmap->mddev,
+ bitmap->mddev->bitmap_info.offset,
NULL,
0, sizeof(bitmap_super_t));
}
@@ -563,7 +578,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
chunksize = le32_to_cpu(sb->chunksize);
- daemon_sleep = le32_to_cpu(sb->daemon_sleep);
+ daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
/* verify that the bitmap-specific fields are valid */
@@ -576,7 +591,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
reason = "bitmap chunksize too small";
else if ((1 << ffz(~chunksize)) != chunksize)
reason = "bitmap chunksize not a power of 2";
- else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ)
+ else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
reason = "daemon sleep period out of range";
else if (write_behind > COUNTER_MAX)
reason = "write-behind limit out of range (0 - 16383)";
@@ -610,10 +625,9 @@ static int bitmap_read_sb(struct bitmap *bitmap)
}
success:
/* assign fields using values from superblock */
- bitmap->chunksize = chunksize;
- bitmap->daemon_sleep = daemon_sleep;
- bitmap->daemon_lastrun = jiffies;
- bitmap->max_write_behind = write_behind;
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
+ bitmap->mddev->bitmap_info.max_write_behind = write_behind;
bitmap->flags |= le32_to_cpu(sb->state);
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
bitmap->flags |= BITMAP_HOSTENDIAN;
@@ -664,16 +678,26 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
* general bitmap file operations
*/
+/*
+ * on-disk bitmap:
+ *
+ * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
+ * file a page at a time. There's a superblock at the start of the file.
+ */
/* calculate the index of the page that contains this bit */
-static inline unsigned long file_page_index(unsigned long chunk)
+static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk)
{
- return CHUNK_BIT_OFFSET(chunk) >> PAGE_BIT_SHIFT;
+ if (!bitmap->mddev->bitmap_info.external)
+ chunk += sizeof(bitmap_super_t) << 3;
+ return chunk >> PAGE_BIT_SHIFT;
}
/* calculate the (bit) offset of this bit within a page */
-static inline unsigned long file_page_offset(unsigned long chunk)
+static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk)
{
- return CHUNK_BIT_OFFSET(chunk) & (PAGE_BITS - 1);
+ if (!bitmap->mddev->bitmap_info.external)
+ chunk += sizeof(bitmap_super_t) << 3;
+ return chunk & (PAGE_BITS - 1);
}
/*
@@ -686,8 +710,9 @@ static inline unsigned long file_page_offset(unsigned long chunk)
static inline struct page *filemap_get_page(struct bitmap *bitmap,
unsigned long chunk)
{
- if (file_page_index(chunk) >= bitmap->file_pages) return NULL;
- return bitmap->filemap[file_page_index(chunk) - file_page_index(0)];
+ if (file_page_index(bitmap, chunk) >= bitmap->file_pages) return NULL;
+ return bitmap->filemap[file_page_index(bitmap, chunk)
+ - file_page_index(bitmap, 0)];
}
@@ -710,7 +735,7 @@ static void bitmap_file_unmap(struct bitmap *bitmap)
spin_unlock_irqrestore(&bitmap->lock, flags);
while (pages--)
- if (map[pages]->index != 0) /* 0 is sb_page, release it below */
+ if (map[pages] != sb_page) /* 0 is sb_page, release it below */
free_buffers(map[pages]);
kfree(map);
kfree(attr);
@@ -821,7 +846,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
page = filemap_get_page(bitmap, chunk);
if (!page) return;
- bit = file_page_offset(chunk);
+ bit = file_page_offset(bitmap, chunk);
/* set the bit */
kaddr = kmap_atomic(page, KM_USER0);
@@ -907,7 +932,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
chunks = bitmap->chunks;
file = bitmap->file;
- BUG_ON(!file && !bitmap->offset);
+ BUG_ON(!file && !bitmap->mddev->bitmap_info.offset);
#ifdef INJECT_FAULTS_3
outofdate = 1;
@@ -919,14 +944,17 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
"recovery\n", bmname(bitmap));
bytes = (chunks + 7) / 8;
+ if (!bitmap->mddev->bitmap_info.external)
+ bytes += sizeof(bitmap_super_t);
- num_pages = (bytes + sizeof(bitmap_super_t) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ num_pages = (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
- if (file && i_size_read(file->f_mapping->host) < bytes + sizeof(bitmap_super_t)) {
+ if (file && i_size_read(file->f_mapping->host) < bytes) {
printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
bmname(bitmap),
(unsigned long) i_size_read(file->f_mapping->host),
- bytes + sizeof(bitmap_super_t));
+ bytes);
goto err;
}
@@ -947,17 +975,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
for (i = 0; i < chunks; i++) {
int b;
- index = file_page_index(i);
- bit = file_page_offset(i);
+ index = file_page_index(bitmap, i);
+ bit = file_page_offset(bitmap, i);
if (index != oldindex) { /* this is a new page, read it in */
int count;
/* unmap the old page, we're done with it */
if (index == num_pages-1)
- count = bytes + sizeof(bitmap_super_t)
- - index * PAGE_SIZE;
+ count = bytes - index * PAGE_SIZE;
else
count = PAGE_SIZE;
- if (index == 0) {
+ if (index == 0 && bitmap->sb_page) {
/*
* if we're here then the superblock page
* contains some bits (PAGE_SIZE != sizeof sb)
@@ -967,14 +994,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
offset = sizeof(bitmap_super_t);
if (!file)
read_sb_page(bitmap->mddev,
- bitmap->offset,
+ bitmap->mddev->bitmap_info.offset,
page,
index, count);
} else if (file) {
page = read_page(file, index, bitmap, count);
offset = 0;
} else {
- page = read_sb_page(bitmap->mddev, bitmap->offset,
+ page = read_sb_page(bitmap->mddev,
+ bitmap->mddev->bitmap_info.offset,
NULL,
index, count);
offset = 0;
@@ -1078,23 +1106,32 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
* out to disk
*/
-void bitmap_daemon_work(struct bitmap *bitmap)
+void bitmap_daemon_work(mddev_t *mddev)
{
+ struct bitmap *bitmap;
unsigned long j;
unsigned long flags;
struct page *page = NULL, *lastpage = NULL;
int blocks;
void *paddr;
- if (bitmap == NULL)
+ /* Use a mutex to guard daemon_work against
+ * bitmap_destroy.
+ */
+ mutex_lock(&mddev->bitmap_info.mutex);
+ bitmap = mddev->bitmap;
+ if (bitmap == NULL) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
return;
- if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
+ }
+ if (time_before(jiffies, bitmap->daemon_lastrun
+ + bitmap->mddev->bitmap_info.daemon_sleep))
goto done;
bitmap->daemon_lastrun = jiffies;
if (bitmap->allclean) {
bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- return;
+ goto done;
}
bitmap->allclean = 1;
@@ -1142,7 +1179,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
/* We are possibly going to clear some bits, so make
* sure that events_cleared is up-to-date.
*/
- if (bitmap->need_sync) {
+ if (bitmap->need_sync &&
+ bitmap->mddev->bitmap_info.external == 0) {
bitmap_super_t *sb;
bitmap->need_sync = 0;
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -1152,7 +1190,8 @@ void bitmap_daemon_work(struct bitmap *bitmap)
write_page(bitmap, bitmap->sb_page, 1);
}
spin_lock_irqsave(&bitmap->lock, flags);
- clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
+ if (!bitmap->need_sync)
+ clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
}
bmc = bitmap_get_counter(bitmap,
(sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
@@ -1167,7 +1206,7 @@ void bitmap_daemon_work(struct bitmap *bitmap)
if (*bmc == 2) {
*bmc=1; /* maybe clear the bit next time */
set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
- } else if (*bmc == 1) {
+ } else if (*bmc == 1 && !bitmap->need_sync) {
/* we can clear the bit */
*bmc = 0;
bitmap_count_page(bitmap,
@@ -1177,9 +1216,11 @@ void bitmap_daemon_work(struct bitmap *bitmap)
/* clear the bit */
paddr = kmap_atomic(page, KM_USER0);
if (bitmap->flags & BITMAP_HOSTENDIAN)
- clear_bit(file_page_offset(j), paddr);
+ clear_bit(file_page_offset(bitmap, j),
+ paddr);
else
- ext2_clear_bit(file_page_offset(j), paddr);
+ ext2_clear_bit(file_page_offset(bitmap, j),
+ paddr);
kunmap_atomic(paddr, KM_USER0);
}
} else
@@ -1202,7 +1243,9 @@ void bitmap_daemon_work(struct bitmap *bitmap)
done:
if (bitmap->allclean == 0)
- bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ;
+ bitmap->mddev->thread->timeout =
+ bitmap->mddev->bitmap_info.daemon_sleep;
+ mutex_unlock(&mddev->bitmap_info.mutex);
}
static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1332,6 +1375,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;
+ sysfs_notify_dirent(bitmap->sysfs_can_clear);
}
if (!success && ! (*bmc & NEEDED_MASK))
@@ -1470,7 +1514,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
return;
}
if (time_before(jiffies, (bitmap->last_end_sync
- + bitmap->daemon_sleep * HZ)))
+ + bitmap->mddev->bitmap_info.daemon_sleep)))
return;
wait_event(bitmap->mddev->recovery_wait,
atomic_read(&bitmap->mddev->recovery_active) == 0);
@@ -1522,6 +1566,12 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
bitmap_file_set_bit(bitmap, sec);
+ if (sec < bitmap->mddev->recovery_cp)
+ /* We are asserting that the array is dirty,
+ * so move the recovery_cp address back so
+ * that it is obvious that it is dirty
+ */
+ bitmap->mddev->recovery_cp = sec;
}
}
@@ -1531,7 +1581,7 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
void bitmap_flush(mddev_t *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
- int sleep;
+ long sleep;
if (!bitmap) /* there was no bitmap */
return;
@@ -1539,12 +1589,13 @@ void bitmap_flush(mddev_t *mddev)
/* run the daemon_work three time to ensure everything is flushed
* that can be
*/
- sleep = bitmap->daemon_sleep;
- bitmap->daemon_sleep = 0;
- bitmap_daemon_work(bitmap);
- bitmap_daemon_work(bitmap);
- bitmap_daemon_work(bitmap);
- bitmap->daemon_sleep = sleep;
+ sleep = mddev->bitmap_info.daemon_sleep * 2;
+ bitmap->daemon_lastrun -= sleep;
+ bitmap_daemon_work(mddev);
+ bitmap->daemon_lastrun -= sleep;
+ bitmap_daemon_work(mddev);
+ bitmap->daemon_lastrun -= sleep;
+ bitmap_daemon_work(mddev);
bitmap_update_sb(bitmap);
}
@@ -1574,6 +1625,7 @@ static void bitmap_free(struct bitmap *bitmap)
kfree(bp);
kfree(bitmap);
}
+
void bitmap_destroy(mddev_t *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
@@ -1581,10 +1633,15 @@ void bitmap_destroy(mddev_t *mddev)
if (!bitmap) /* there was no bitmap */
return;
+ mutex_lock(&mddev->bitmap_info.mutex);
mddev->bitmap = NULL; /* disconnect from the md device */
+ mutex_unlock(&mddev->bitmap_info.mutex);
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+ if (bitmap->sysfs_can_clear)
+ sysfs_put(bitmap->sysfs_can_clear);
+
bitmap_free(bitmap);
}
@@ -1598,16 +1655,17 @@ int bitmap_create(mddev_t *mddev)
sector_t blocks = mddev->resync_max_sectors;
unsigned long chunks;
unsigned long pages;
- struct file *file = mddev->bitmap_file;
+ struct file *file = mddev->bitmap_info.file;
int err;
sector_t start;
+ struct sysfs_dirent *bm;
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
- if (!file && !mddev->bitmap_offset) /* bitmap disabled, nothing to do */
+ if (!file && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
return 0;
- BUG_ON(file && mddev->bitmap_offset);
+ BUG_ON(file && mddev->bitmap_info.offset);
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
@@ -1620,8 +1678,14 @@ int bitmap_create(mddev_t *mddev)
bitmap->mddev = mddev;
+ bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
+ if (bm) {
+ bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
+ sysfs_put(bm);
+ } else
+ bitmap->sysfs_can_clear = NULL;
+
bitmap->file = file;
- bitmap->offset = mddev->bitmap_offset;
if (file) {
get_file(file);
/* As future accesses to this file will use bmap,
@@ -1630,12 +1694,22 @@ int bitmap_create(mddev_t *mddev)
*/
vfs_fsync(file, file->f_dentry, 1);
}
- /* read superblock from bitmap file (this sets bitmap->chunksize) */
- err = bitmap_read_sb(bitmap);
+ /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
+ if (!mddev->bitmap_info.external)
+ err = bitmap_read_sb(bitmap);
+ else {
+ err = 0;
+ if (mddev->bitmap_info.chunksize == 0 ||
+ mddev->bitmap_info.daemon_sleep == 0)
+ /* chunksize and time_base need to be
+ * set first. */
+ err = -EINVAL;
+ }
if (err)
goto error;
- bitmap->chunkshift = ffz(~bitmap->chunksize);
+ bitmap->daemon_lastrun = jiffies;
+ bitmap->chunkshift = ffz(~mddev->bitmap_info.chunksize);
/* now that chunksize and chunkshift are set, we can use these macros */
chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
@@ -1677,7 +1751,8 @@ int bitmap_create(mddev_t *mddev)
mddev->bitmap = bitmap;
- mddev->thread->timeout = bitmap->daemon_sleep * HZ;
+ mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
+ md_wakeup_thread(mddev->thread);
bitmap_update_sb(bitmap);
@@ -1688,6 +1763,264 @@ int bitmap_create(mddev_t *mddev)
return err;
}
+static ssize_t
+location_show(mddev_t *mddev, char *page)
+{
+ ssize_t len;
+ if (mddev->bitmap_info.file) {
+ len = sprintf(page, "file");
+ } else if (mddev->bitmap_info.offset) {
+ len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
+ } else
+ len = sprintf(page, "none");
+ len += sprintf(page+len, "\n");
+ return len;
+}
+
+static ssize_t
+location_store(mddev_t *mddev, const char *buf, size_t len)
+{
+
+ if (mddev->pers) {
+ if (!mddev->pers->quiesce)
+ return -EBUSY;
+ if (mddev->recovery || mddev->sync_thread)
+ return -EBUSY;
+ }
+
+ if (mddev->bitmap || mddev->bitmap_info.file ||
+ mddev->bitmap_info.offset) {
+ /* bitmap already configured. Only option is to clear it */
+ if (strncmp(buf, "none", 4) != 0)
+ return -EBUSY;
+ if (mddev->pers) {
+ mddev->pers->quiesce(mddev, 1);
+ bitmap_destroy(mddev);
+ mddev->pers->quiesce(mddev, 0);
+ }
+ mddev->bitmap_info.offset = 0;
+ if (mddev->bitmap_info.file) {
+ struct file *f = mddev->bitmap_info.file;
+ mddev->bitmap_info.file = NULL;
+ restore_bitmap_write_access(f);
+ fput(f);
+ }
+ } else {
+ /* No bitmap, OK to set a location */
+ long long offset;
+ if (strncmp(buf, "none", 4) == 0)
+ /* nothing to be done */;
+ else if (strncmp(buf, "file:", 5) == 0) {
+ /* Not supported yet */
+ return -EINVAL;
+ } else {
+ int rv;
+ if (buf[0] == '+')
+ rv = strict_strtoll(buf+1, 10, &offset);
+ else
+ rv = strict_strtoll(buf, 10, &offset);
+ if (rv)
+ return rv;
+ if (offset == 0)
+ return -EINVAL;
+ if (mddev->bitmap_info.external == 0 &&
+ mddev->major_version == 0 &&
+ offset != mddev->bitmap_info.default_offset)
+ return -EINVAL;
+ mddev->bitmap_info.offset = offset;
+ if (mddev->pers) {
+ mddev->pers->quiesce(mddev, 1);
+ rv = bitmap_create(mddev);
+ if (rv) {
+ bitmap_destroy(mddev);
+ mddev->bitmap_info.offset = 0;
+ }
+ mddev->pers->quiesce(mddev, 0);
+ if (rv)
+ return rv;
+ }
+ }
+ }
+ if (!mddev->external) {
+ /* Ensure new bitmap info is stored in
+ * metadata promptly.
+ */
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ }
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_location =
+__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
+
+static ssize_t
+timeout_show(mddev_t *mddev, char *page)
+{
+ ssize_t len;
+ unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
+ unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
+
+ len = sprintf(page, "%lu", secs);
+ if (jifs)
+ len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
+ len += sprintf(page+len, "\n");
+ return len;
+}
+
+static ssize_t
+timeout_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* timeout can be set at any time */
+ unsigned long timeout;
+ int rv = strict_strtoul_scaled(buf, &timeout, 4);
+ if (rv)
+ return rv;
+
+ /* just to make sure we don't overflow... */
+ if (timeout >= LONG_MAX / HZ)
+ return -EINVAL;
+
+ timeout = timeout * HZ / 10000;
+
+ if (timeout >= MAX_SCHEDULE_TIMEOUT)
+ timeout = MAX_SCHEDULE_TIMEOUT-1;
+ if (timeout < 1)
+ timeout = 1;
+ mddev->bitmap_info.daemon_sleep = timeout;
+ if (mddev->thread) {
+ /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
+ * the bitmap is all clean and we don't need to
+ * adjust the timeout right now
+ */
+ if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
+ mddev->thread->timeout = timeout;
+ md_wakeup_thread(mddev->thread);
+ }
+ }
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_timeout =
+__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
+
+static ssize_t
+backlog_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
+}
+
+static ssize_t
+backlog_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ unsigned long backlog;
+ int rv = strict_strtoul(buf, 10, &backlog);
+ if (rv)
+ return rv;
+ if (backlog > COUNTER_MAX)
+ return -EINVAL;
+ mddev->bitmap_info.max_write_behind = backlog;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_backlog =
+__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
+
+static ssize_t
+chunksize_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
+}
+
+static ssize_t
+chunksize_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* Can only be changed when no bitmap is active */
+ int rv;
+ unsigned long csize;
+ if (mddev->bitmap)
+ return -EBUSY;
+ rv = strict_strtoul(buf, 10, &csize);
+ if (rv)
+ return rv;
+ if (csize < 512 ||
+ !is_power_of_2(csize))
+ return -EINVAL;
+ mddev->bitmap_info.chunksize = csize;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_chunksize =
+__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
+
+static ssize_t metadata_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%s\n", (mddev->bitmap_info.external
+ ? "external" : "internal"));
+}
+
+static ssize_t metadata_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (mddev->bitmap ||
+ mddev->bitmap_info.file ||
+ mddev->bitmap_info.offset)
+ return -EBUSY;
+ if (strncmp(buf, "external", 8) == 0)
+ mddev->bitmap_info.external = 1;
+ else if (strncmp(buf, "internal", 8) == 0)
+ mddev->bitmap_info.external = 0;
+ else
+ return -EINVAL;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_metadata =
+__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+
+static ssize_t can_clear_show(mddev_t *mddev, char *page)
+{
+ int len;
+ if (mddev->bitmap)
+ len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
+ "false" : "true"));
+ else
+ len = sprintf(page, "\n");
+ return len;
+}
+
+static ssize_t can_clear_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (mddev->bitmap == NULL)
+ return -ENOENT;
+ if (strncmp(buf, "false", 5) == 0)
+ mddev->bitmap->need_sync = 1;
+ else if (strncmp(buf, "true", 4) == 0) {
+ if (mddev->degraded)
+ return -EBUSY;
+ mddev->bitmap->need_sync = 0;
+ } else
+ return -EINVAL;
+ return len;
+}
+
+static struct md_sysfs_entry bitmap_can_clear =
+__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
+
+static struct attribute *md_bitmap_attrs[] = {
+ &bitmap_location.attr,
+ &bitmap_timeout.attr,
+ &bitmap_backlog.attr,
+ &bitmap_chunksize.attr,
+ &bitmap_metadata.attr,
+ &bitmap_can_clear.attr,
+ NULL
+};
+struct attribute_group md_bitmap_group = {
+ .name = "bitmap",
+ .attrs = md_bitmap_attrs,
+};
+
+
/* the bitmap API -- for raid personalities */
EXPORT_SYMBOL(bitmap_startwrite);
EXPORT_SYMBOL(bitmap_endwrite);
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index e98900671ca..cb821d76d1b 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -106,7 +106,7 @@ typedef __u16 bitmap_counter_t;
#define BITMAP_BLOCK_SHIFT 9
/* how many blocks per chunk? (this is variable) */
-#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT)
+#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT)
#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT)
#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1)
@@ -118,16 +118,6 @@ typedef __u16 bitmap_counter_t;
(CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1)
#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1)
-/*
- * on-disk bitmap:
- *
- * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
- * file a page at a time. There's a superblock at the start of the file.
- */
-
-/* map chunks (bits) to file pages - offset by the size of the superblock */
-#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3))
-
#endif
/*
@@ -209,7 +199,6 @@ struct bitmap {
int counter_bits; /* how many bits per block counter */
/* bitmap chunksize -- how much data does each bit represent? */
- unsigned long chunksize;
unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
unsigned long chunks; /* total number of data chunks for the array */
@@ -226,7 +215,6 @@ struct bitmap {
/* bitmap spinlock */
spinlock_t lock;
- long offset; /* offset from superblock if file is NULL */
struct file *file; /* backing disk file */
struct page *sb_page; /* cached copy of the bitmap file superblock */
struct page **filemap; /* list of cache pages for the file */
@@ -238,7 +226,6 @@ struct bitmap {
int allclean;
- unsigned long max_write_behind; /* write-behind mode */
atomic_t behind_writes;
/*
@@ -246,7 +233,6 @@ struct bitmap {
* file, cleaning up bits and flushing out pages to disk as necessary
*/
unsigned long daemon_lastrun; /* jiffies of last run */
- unsigned long daemon_sleep; /* how many seconds between updates? */
unsigned long last_end_sync; /* when we lasted called end_sync to
* update bitmap with resync progress */
@@ -254,6 +240,7 @@ struct bitmap {
wait_queue_head_t write_wait;
wait_queue_head_t overflow_wait;
+ struct sysfs_dirent *sysfs_can_clear;
};
/* the bitmap API */
@@ -282,7 +269,7 @@ void bitmap_close_sync(struct bitmap *bitmap);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
void bitmap_unplug(struct bitmap *bitmap);
-void bitmap_daemon_work(struct bitmap *bitmap);
+void bitmap_daemon_work(mddev_t *mddev);
#endif
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e412980763b..a93637223c8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -71,10 +71,21 @@ struct crypt_iv_operations {
int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
const char *opts);
void (*dtr)(struct crypt_config *cc);
- const char *(*status)(struct crypt_config *cc);
+ int (*init)(struct crypt_config *cc);
+ int (*wipe)(struct crypt_config *cc);
int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};
+struct iv_essiv_private {
+ struct crypto_cipher *tfm;
+ struct crypto_hash *hash_tfm;
+ u8 *salt;
+};
+
+struct iv_benbi_private {
+ int shift;
+};
+
/*
* Crypt: maps a linear range of a block device
* and encrypts / decrypts at the same time.
@@ -102,8 +113,8 @@ struct crypt_config {
struct crypt_iv_operations *iv_gen_ops;
char *iv_mode;
union {
- struct crypto_cipher *essiv_tfm;
- int benbi_shift;
+ struct iv_essiv_private essiv;
+ struct iv_benbi_private benbi;
} iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
@@ -147,6 +158,9 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
* plain: the initial vector is the 32-bit little-endian version of the sector
* number, padded with zeros if necessary.
*
+ * plain64: the initial vector is the 64-bit little-endian version of the sector
+ * number, padded with zeros if necessary.
+ *
* essiv: "encrypted sector|salt initial vector", the sector number is
* encrypted with the bulk cipher using a salt as key. The salt
* should be derived from the bulk cipher's key via hashing.
@@ -169,88 +183,123 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
return 0;
}
-static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
- const char *opts)
+static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
+ sector_t sector)
{
- struct crypto_cipher *essiv_tfm;
- struct crypto_hash *hash_tfm;
+ memset(iv, 0, cc->iv_size);
+ *(u64 *)iv = cpu_to_le64(sector);
+
+ return 0;
+}
+
+/* Initialise ESSIV - compute salt but no local memory allocations */
+static int crypt_iv_essiv_init(struct crypt_config *cc)
+{
+ struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
struct hash_desc desc;
struct scatterlist sg;
- unsigned int saltsize;
- u8 *salt;
int err;
- if (opts == NULL) {
+ sg_init_one(&sg, cc->key, cc->key_size);
+ desc.tfm = essiv->hash_tfm;
+ desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
+ if (err)
+ return err;
+
+ return crypto_cipher_setkey(essiv->tfm, essiv->salt,
+ crypto_hash_digestsize(essiv->hash_tfm));
+}
+
+/* Wipe salt and reset key derived from volume key */
+static int crypt_iv_essiv_wipe(struct crypt_config *cc)
+{
+ struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
+ unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+
+ memset(essiv->salt, 0, salt_size);
+
+ return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
+}
+
+static void crypt_iv_essiv_dtr(struct crypt_config *cc)
+{
+ struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
+
+ crypto_free_cipher(essiv->tfm);
+ essiv->tfm = NULL;
+
+ crypto_free_hash(essiv->hash_tfm);
+ essiv->hash_tfm = NULL;
+
+ kzfree(essiv->salt);
+ essiv->salt = NULL;
+}
+
+static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ struct crypto_cipher *essiv_tfm = NULL;
+ struct crypto_hash *hash_tfm = NULL;
+ u8 *salt = NULL;
+ int err;
+
+ if (!opts) {
ti->error = "Digest algorithm missing for ESSIV mode";
return -EINVAL;
}
- /* Hash the cipher key with the given hash algorithm */
+ /* Allocate hash algorithm */
hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hash_tfm)) {
ti->error = "Error initializing ESSIV hash";
- return PTR_ERR(hash_tfm);
+ err = PTR_ERR(hash_tfm);
+ goto bad;
}
- saltsize = crypto_hash_digestsize(hash_tfm);
- salt = kmalloc(saltsize, GFP_KERNEL);
- if (salt == NULL) {
+ salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
+ if (!salt) {
ti->error = "Error kmallocing salt storage in ESSIV";
- crypto_free_hash(hash_tfm);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto bad;
}
- sg_init_one(&sg, cc->key, cc->key_size);
- desc.tfm = hash_tfm;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
- crypto_free_hash(hash_tfm);
-
- if (err) {
- ti->error = "Error calculating hash in ESSIV";
- kfree(salt);
- return err;
- }
-
- /* Setup the essiv_tfm with the given salt */
+ /* Allocate essiv_tfm */
essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(essiv_tfm)) {
ti->error = "Error allocating crypto tfm for ESSIV";
- kfree(salt);
- return PTR_ERR(essiv_tfm);
+ err = PTR_ERR(essiv_tfm);
+ goto bad;
}
if (crypto_cipher_blocksize(essiv_tfm) !=
crypto_ablkcipher_ivsize(cc->tfm)) {
ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher";
- crypto_free_cipher(essiv_tfm);
- kfree(salt);
- return -EINVAL;
+ err = -EINVAL;
+ goto bad;
}
- err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
- if (err) {
- ti->error = "Failed to set key for ESSIV cipher";
- crypto_free_cipher(essiv_tfm);
- kfree(salt);
- return err;
- }
- kfree(salt);
- cc->iv_gen_private.essiv_tfm = essiv_tfm;
+ cc->iv_gen_private.essiv.salt = salt;
+ cc->iv_gen_private.essiv.tfm = essiv_tfm;
+ cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
+
return 0;
-}
-static void crypt_iv_essiv_dtr(struct crypt_config *cc)
-{
- crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
- cc->iv_gen_private.essiv_tfm = NULL;
+bad:
+ if (essiv_tfm && !IS_ERR(essiv_tfm))
+ crypto_free_cipher(essiv_tfm);
+ if (hash_tfm && !IS_ERR(hash_tfm))
+ crypto_free_hash(hash_tfm);
+ kfree(salt);
+ return err;
}
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
memset(iv, 0, cc->iv_size);
*(u64 *)iv = cpu_to_le64(sector);
- crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
+ crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
return 0;
}
@@ -273,7 +322,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL;
}
- cc->iv_gen_private.benbi_shift = 9 - log;
+ cc->iv_gen_private.benbi.shift = 9 - log;
return 0;
}
@@ -288,7 +337,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
- val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
+ val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
return 0;
@@ -305,9 +354,15 @@ static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
+static struct crypt_iv_operations crypt_iv_plain64_ops = {
+ .generator = crypt_iv_plain64_gen
+};
+
static struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
+ .init = crypt_iv_essiv_init,
+ .wipe = crypt_iv_essiv_wipe,
.generator = crypt_iv_essiv_gen
};
@@ -934,14 +989,14 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
- return 0;
+ return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
}
static int crypt_wipe_key(struct crypt_config *cc)
{
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
memset(&cc->key, 0, cc->key_size * sizeof(u8));
- return 0;
+ return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
}
/*
@@ -983,11 +1038,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM;
}
- if (crypt_set_key(cc, argv[1])) {
- ti->error = "Error decoding key";
- goto bad_cipher;
- }
-
/* Compatibility mode for old dm-crypt cipher strings */
if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
chainmode = "cbc";
@@ -1015,6 +1065,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
strcpy(cc->chainmode, chainmode);
cc->tfm = tfm;
+ if (crypt_set_key(cc, argv[1]) < 0) {
+ ti->error = "Error decoding and setting key";
+ goto bad_ivmode;
+ }
+
/*
* Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
* See comments at iv code
@@ -1024,6 +1079,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops = NULL;
else if (strcmp(ivmode, "plain") == 0)
cc->iv_gen_ops = &crypt_iv_plain_ops;
+ else if (strcmp(ivmode, "plain64") == 0)
+ cc->iv_gen_ops = &crypt_iv_plain64_ops;
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
@@ -1039,6 +1096,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
goto bad_ivmode;
+ if (cc->iv_gen_ops && cc->iv_gen_ops->init &&
+ cc->iv_gen_ops->init(cc) < 0) {
+ ti->error = "Error initialising IV";
+ goto bad_slab_pool;
+ }
+
cc->iv_size = crypto_ablkcipher_ivsize(tfm);
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
@@ -1085,11 +1148,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_bs;
}
- if (crypto_ablkcipher_setkey(tfm, cc->key, key_size) < 0) {
- ti->error = "Error setting key";
- goto bad_device;
- }
-
if (sscanf(argv[2], "%llu", &tmpll) != 1) {
ti->error = "Invalid iv_offset sector";
goto bad_device;
@@ -1278,6 +1336,7 @@ static void crypt_resume(struct dm_target *ti)
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
struct crypt_config *cc = ti->private;
+ int ret = -EINVAL;
if (argc < 2)
goto error;
@@ -1287,10 +1346,22 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
DMWARN("not suspended during key manipulation.");
return -EINVAL;
}
- if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
- return crypt_set_key(cc, argv[2]);
- if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
+ if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
+ ret = crypt_set_key(cc, argv[2]);
+ if (ret)
+ return ret;
+ if (cc->iv_gen_ops && cc->iv_gen_ops->init)
+ ret = cc->iv_gen_ops->init(cc);
+ return ret;
+ }
+ if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
+ if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
+ ret = cc->iv_gen_ops->wipe(cc);
+ if (ret)
+ return ret;
+ }
return crypt_wipe_key(cc);
+ }
}
error:
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 7dbe652efb5..2b7907b6dd0 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -172,7 +172,8 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
}
/* Validate the chunk size against the device block size */
- if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
+ if (chunk_size %
+ (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize";
return -EINVAL;
}
@@ -190,6 +191,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
}
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+ struct dm_snapshot *snap,
unsigned *args_used,
struct dm_exception_store **store)
{
@@ -198,7 +200,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_exception_store *tmp_store;
char persistent;
- if (argc < 3) {
+ if (argc < 2) {
ti->error = "Insufficient exception store arguments";
return -EINVAL;
}
@@ -209,14 +211,15 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
return -ENOMEM;
}
- persistent = toupper(*argv[1]);
+ persistent = toupper(*argv[0]);
if (persistent == 'P')
type = get_type("P");
else if (persistent == 'N')
type = get_type("N");
else {
ti->error = "Persistent flag is not P or N";
- return -EINVAL;
+ r = -EINVAL;
+ goto bad_type;
}
if (!type) {
@@ -226,32 +229,23 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
}
tmp_store->type = type;
- tmp_store->ti = ti;
-
- r = dm_get_device(ti, argv[0], 0, 0,
- FMODE_READ | FMODE_WRITE, &tmp_store->cow);
- if (r) {
- ti->error = "Cannot get COW device";
- goto bad_cow;
- }
+ tmp_store->snap = snap;
- r = set_chunk_size(tmp_store, argv[2], &ti->error);
+ r = set_chunk_size(tmp_store, argv[1], &ti->error);
if (r)
- goto bad_ctr;
+ goto bad;
r = type->ctr(tmp_store, 0, NULL);
if (r) {
ti->error = "Exception store type constructor failed";
- goto bad_ctr;
+ goto bad;
}
- *args_used = 3;
+ *args_used = 2;
*store = tmp_store;
return 0;
-bad_ctr:
- dm_put_device(ti, tmp_store->cow);
-bad_cow:
+bad:
put_type(type);
bad_type:
kfree(tmp_store);
@@ -262,7 +256,6 @@ EXPORT_SYMBOL(dm_exception_store_create);
void dm_exception_store_destroy(struct dm_exception_store *store)
{
store->type->dtr(store);
- dm_put_device(store->ti, store->cow);
put_type(store->type);
kfree(store);
}
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 8a223a48802..e8dfa06af3b 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -26,7 +26,7 @@ typedef sector_t chunk_t;
* of chunks that follow contiguously. Remaining bits hold the number of the
* chunk within the device.
*/
-struct dm_snap_exception {
+struct dm_exception {
struct list_head hash_list;
chunk_t old_chunk;
@@ -64,17 +64,34 @@ struct dm_exception_store_type {
* Find somewhere to store the next exception.
*/
int (*prepare_exception) (struct dm_exception_store *store,
- struct dm_snap_exception *e);
+ struct dm_exception *e);
/*
* Update the metadata with this exception.
*/
void (*commit_exception) (struct dm_exception_store *store,
- struct dm_snap_exception *e,
+ struct dm_exception *e,
void (*callback) (void *, int success),
void *callback_context);
/*
+ * Returns 0 if the exception store is empty.
+ *
+ * If there are exceptions still to be merged, sets
+ * *last_old_chunk and *last_new_chunk to the most recent
+ * still-to-be-merged chunk and returns the number of
+ * consecutive previous ones.
+ */
+ int (*prepare_merge) (struct dm_exception_store *store,
+ chunk_t *last_old_chunk, chunk_t *last_new_chunk);
+
+ /*
+ * Clear the last n exceptions.
+ * nr_merged must be <= the value returned by prepare_merge.
+ */
+ int (*commit_merge) (struct dm_exception_store *store, int nr_merged);
+
+ /*
* The snapshot is invalid, note this in the metadata.
*/
void (*drop_snapshot) (struct dm_exception_store *store);
@@ -86,19 +103,19 @@ struct dm_exception_store_type {
/*
* Return how full the snapshot is.
*/
- void (*fraction_full) (struct dm_exception_store *store,
- sector_t *numerator,
- sector_t *denominator);
+ void (*usage) (struct dm_exception_store *store,
+ sector_t *total_sectors, sector_t *sectors_allocated,
+ sector_t *metadata_sectors);
/* For internal device-mapper use only. */
struct list_head list;
};
+struct dm_snapshot;
+
struct dm_exception_store {
struct dm_exception_store_type *type;
- struct dm_target *ti;
-
- struct dm_dev *cow;
+ struct dm_snapshot *snap;
/* Size of data blocks saved - must be a power of 2 */
unsigned chunk_size;
@@ -109,6 +126,11 @@ struct dm_exception_store {
};
/*
+ * Obtain the cow device used by a given snapshot.
+ */
+struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
+
+/*
* Funtions to manipulate consecutive chunks
*/
# if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64)
@@ -120,18 +142,25 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
}
-static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
+static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
{
return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
}
-static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
+static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
{
e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
BUG_ON(!dm_consecutive_chunk_count(e));
}
+static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
+{
+ BUG_ON(!dm_consecutive_chunk_count(e));
+
+ e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
+}
+
# else
# define DM_CHUNK_CONSECUTIVE_BITS 0
@@ -140,12 +169,16 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
return chunk;
}
-static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
+static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
{
return 0;
}
-static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
+static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
+{
+}
+
+static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
{
}
@@ -162,7 +195,7 @@ static inline sector_t get_dev_size(struct block_device *bdev)
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
sector_t sector)
{
- return (sector & ~store->chunk_mask) >> store->chunk_shift;
+ return sector >> store->chunk_shift;
}
int dm_exception_store_type_register(struct dm_exception_store_type *type);
@@ -173,6 +206,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
char **error);
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+ struct dm_snapshot *snap,
unsigned *args_used,
struct dm_exception_store **store);
void dm_exception_store_destroy(struct dm_exception_store *store);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3a2e6a2f8bd..10f457ca6af 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,6 +5,8 @@
* This file is released under the GPL.
*/
+#include "dm.h"
+
#include <linux/device-mapper.h>
#include <linux/bio.h>
@@ -14,12 +16,19 @@
#include <linux/slab.h>
#include <linux/dm-io.h>
+#define DM_MSG_PREFIX "io"
+
+#define DM_IO_MAX_REGIONS BITS_PER_LONG
+
struct dm_io_client {
mempool_t *pool;
struct bio_set *bios;
};
-/* FIXME: can we shrink this ? */
+/*
+ * Aligning 'struct io' reduces the number of bits required to store
+ * its address. Refer to store_io_and_region_in_bio() below.
+ */
struct io {
unsigned long error_bits;
unsigned long eopnotsupp_bits;
@@ -28,7 +37,9 @@ struct io {
struct dm_io_client *client;
io_notify_fn callback;
void *context;
-};
+} __attribute__((aligned(DM_IO_MAX_REGIONS)));
+
+static struct kmem_cache *_dm_io_cache;
/*
* io contexts are only dynamically allocated for asynchronous
@@ -53,7 +64,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
if (!client)
return ERR_PTR(-ENOMEM);
- client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
+ client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
if (!client->pool)
goto bad;
@@ -88,18 +99,29 @@ EXPORT_SYMBOL(dm_io_client_destroy);
/*-----------------------------------------------------------------
* We need to keep track of which region a bio is doing io for.
- * In order to save a memory allocation we store this the last
- * bvec which we know is unused (blech).
- * XXX This is ugly and can OOPS with some configs... find another way.
+ * To avoid a memory allocation to store just 5 or 6 bits, we
+ * ensure the 'struct io' pointer is aligned so enough low bits are
+ * always zero and then combine it with the region number directly in
+ * bi_private.
*---------------------------------------------------------------*/
-static inline void bio_set_region(struct bio *bio, unsigned region)
+static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
+ unsigned region)
{
- bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
+ if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
+ DMCRIT("Unaligned struct io pointer %p", io);
+ BUG();
+ }
+
+ bio->bi_private = (void *)((unsigned long)io | region);
}
-static inline unsigned bio_get_region(struct bio *bio)
+static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
+ unsigned *region)
{
- return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
+ unsigned long val = (unsigned long)bio->bi_private;
+
+ *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
+ *region = val & (DM_IO_MAX_REGIONS - 1);
}
/*-----------------------------------------------------------------
@@ -140,10 +162,8 @@ static void endio(struct bio *bio, int error)
/*
* The bio destructor in bio_put() may use the io object.
*/
- io = bio->bi_private;
- region = bio_get_region(bio);
+ retrieve_io_and_region_from_bio(bio, &io, &region);
- bio->bi_max_vecs++;
bio_put(bio);
dec_count(io, region, error);
@@ -243,7 +263,10 @@ static void vm_dp_init(struct dpages *dp, void *data)
static void dm_bio_destructor(struct bio *bio)
{
- struct io *io = bio->bi_private;
+ unsigned region;
+ struct io *io;
+
+ retrieve_io_and_region_from_bio(bio, &io, &region);
bio_free(bio, io->client->bios);
}
@@ -286,26 +309,23 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned num_bvecs;
sector_t remaining = where->count;
- while (remaining) {
+ /*
+ * where->count may be zero if rw holds a write barrier and we
+ * need to send a zero-sized barrier.
+ */
+ do {
/*
- * Allocate a suitably sized-bio: we add an extra
- * bvec for bio_get/set_region() and decrement bi_max_vecs
- * to hide it from bio_add_page().
+ * Allocate a suitably sized-bio.
*/
num_bvecs = dm_sector_div_up(remaining,
(PAGE_SIZE >> SECTOR_SHIFT));
- num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
- num_bvecs);
- if (unlikely(num_bvecs > BIO_MAX_PAGES))
- num_bvecs = BIO_MAX_PAGES;
+ num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
- bio->bi_private = io;
bio->bi_destructor = dm_bio_destructor;
- bio->bi_max_vecs--;
- bio_set_region(bio, region);
+ store_io_and_region_in_bio(bio, io, region);
/*
* Try and add as many pages as possible.
@@ -323,7 +343,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
atomic_inc(&io->count);
submit_bio(rw, bio);
- }
+ } while (remaining);
}
static void dispatch_io(int rw, unsigned int num_regions,
@@ -333,6 +353,8 @@ static void dispatch_io(int rw, unsigned int num_regions,
int i;
struct dpages old_pages = *dp;
+ BUG_ON(num_regions > DM_IO_MAX_REGIONS);
+
if (sync)
rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
@@ -342,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count)
+ if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
do_region(rw, i, where + i, dp, io);
}
@@ -357,7 +379,14 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, int rw, struct dpages *dp,
unsigned long *error_bits)
{
- struct io io;
+ /*
+ * gcc <= 4.3 can't do the alignment for stack variables, so we must
+ * align it on our own.
+ * volatile prevents the optimizer from removing or reusing
+ * "io_" field from the stack frame (allowed in ANSI C).
+ */
+ volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
+ struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
WARN_ON(1);
@@ -365,33 +394,33 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}
retry:
- io.error_bits = 0;
- io.eopnotsupp_bits = 0;
- atomic_set(&io.count, 1); /* see dispatch_io() */
- io.sleeper = current;
- io.client = client;
+ io->error_bits = 0;
+ io->eopnotsupp_bits = 0;
+ atomic_set(&io->count, 1); /* see dispatch_io() */
+ io->sleeper = current;
+ io->client = client;
- dispatch_io(rw, num_regions, where, dp, &io, 1);
+ dispatch_io(rw, num_regions, where, dp, io, 1);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&io.count))
+ if (!atomic_read(&io->count))
break;
io_schedule();
}
set_current_state(TASK_RUNNING);
- if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
+ if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
rw &= ~(1 << BIO_RW_BARRIER);
goto retry;
}
if (error_bits)
- *error_bits = io.error_bits;
+ *error_bits = io->error_bits;
- return io.error_bits ? -EIO : 0;
+ return io->error_bits ? -EIO : 0;
}
static int async_io(struct dm_io_client *client, unsigned int num_regions,
@@ -472,3 +501,18 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
&dp, io_req->notify.fn, io_req->notify.context);
}
EXPORT_SYMBOL(dm_io);
+
+int __init dm_io_init(void)
+{
+ _dm_io_cache = KMEM_CACHE(io, 0);
+ if (!_dm_io_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dm_io_exit(void)
+{
+ kmem_cache_destroy(_dm_io_cache);
+ _dm_io_cache = NULL;
+}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index a6794293158..1d669322b27 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -56,6 +56,11 @@ static void dm_hash_remove_all(int keep_open_devices);
*/
static DECLARE_RWSEM(_hash_lock);
+/*
+ * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
+ */
+static DEFINE_MUTEX(dm_hash_cells_mutex);
+
static void init_buckets(struct list_head *buckets)
{
unsigned int i;
@@ -206,7 +211,9 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
}
dm_get(md);
+ mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(md, cell);
+ mutex_unlock(&dm_hash_cells_mutex);
up_write(&_hash_lock);
return 0;
@@ -224,9 +231,11 @@ static void __hash_remove(struct hash_cell *hc)
/* remove from the dev hash */
list_del(&hc->uuid_list);
list_del(&hc->name_list);
+ mutex_lock(&dm_hash_cells_mutex);
dm_set_mdptr(hc->md, NULL);
+ mutex_unlock(&dm_hash_cells_mutex);
- table = dm_get_table(hc->md);
+ table = dm_get_live_table(hc->md);
if (table) {
dm_table_event(table);
dm_table_put(table);
@@ -321,13 +330,15 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
*/
list_del(&hc->name_list);
old_name = hc->name;
+ mutex_lock(&dm_hash_cells_mutex);
hc->name = new_name;
+ mutex_unlock(&dm_hash_cells_mutex);
list_add(&hc->name_list, _name_buckets + hash_str(new_name));
/*
* Wake up any dm event waiters.
*/
- table = dm_get_table(hc->md);
+ table = dm_get_live_table(hc->md);
if (table) {
dm_table_event(table);
dm_table_put(table);
@@ -512,8 +523,6 @@ static int list_versions(struct dm_ioctl *param, size_t param_size)
return 0;
}
-
-
static int check_name(const char *name)
{
if (strchr(name, '/')) {
@@ -525,6 +534,40 @@ static int check_name(const char *name)
}
/*
+ * On successful return, the caller must not attempt to acquire
+ * _hash_lock without first calling dm_table_put, because dm_table_destroy
+ * waits for this dm_table_put and could be called under this lock.
+ */
+static struct dm_table *dm_get_inactive_table(struct mapped_device *md)
+{
+ struct hash_cell *hc;
+ struct dm_table *table = NULL;
+
+ down_read(&_hash_lock);
+ hc = dm_get_mdptr(md);
+ if (!hc || hc->md != md) {
+ DMWARN("device has been removed from the dev hash table.");
+ goto out;
+ }
+
+ table = hc->new_map;
+ if (table)
+ dm_table_get(table);
+
+out:
+ up_read(&_hash_lock);
+
+ return table;
+}
+
+static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
+ struct dm_ioctl *param)
+{
+ return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ?
+ dm_get_inactive_table(md) : dm_get_live_table(md);
+}
+
+/*
* Fills in a dm_ioctl structure, ready for sending back to
* userland.
*/
@@ -536,7 +579,7 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
DM_ACTIVE_PRESENT_FLAG);
- if (dm_suspended(md))
+ if (dm_suspended_md(md))
param->flags |= DM_SUSPEND_FLAG;
param->dev = huge_encode_dev(disk_devt(disk));
@@ -548,18 +591,30 @@ static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
*/
param->open_count = dm_open_count(md);
- if (get_disk_ro(disk))
- param->flags |= DM_READONLY_FLAG;
-
param->event_nr = dm_get_event_nr(md);
+ param->target_count = 0;
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (table) {
- param->flags |= DM_ACTIVE_PRESENT_FLAG;
- param->target_count = dm_table_get_num_targets(table);
+ if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
+ if (get_disk_ro(disk))
+ param->flags |= DM_READONLY_FLAG;
+ param->target_count = dm_table_get_num_targets(table);
+ }
dm_table_put(table);
- } else
- param->target_count = 0;
+
+ param->flags |= DM_ACTIVE_PRESENT_FLAG;
+ }
+
+ if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
+ table = dm_get_inactive_table(md);
+ if (table) {
+ if (!(dm_table_get_mode(table) & FMODE_WRITE))
+ param->flags |= DM_READONLY_FLAG;
+ param->target_count = dm_table_get_num_targets(table);
+ dm_table_put(table);
+ }
+ }
return 0;
}
@@ -634,9 +689,9 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
* Sneakily write in both the name and the uuid
* while we have the cell.
*/
- strncpy(param->name, hc->name, sizeof(param->name));
+ strlcpy(param->name, hc->name, sizeof(param->name));
if (hc->uuid)
- strncpy(param->uuid, hc->uuid, sizeof(param->uuid)-1);
+ strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
else
param->uuid[0] = '\0';
@@ -784,7 +839,7 @@ static int do_suspend(struct dm_ioctl *param)
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
- if (!dm_suspended(md))
+ if (!dm_suspended_md(md))
r = dm_suspend(md, suspend_flags);
if (!r)
@@ -800,7 +855,7 @@ static int do_resume(struct dm_ioctl *param)
unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
struct hash_cell *hc;
struct mapped_device *md;
- struct dm_table *new_map;
+ struct dm_table *new_map, *old_map = NULL;
down_write(&_hash_lock);
@@ -826,14 +881,14 @@ static int do_resume(struct dm_ioctl *param)
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
if (param->flags & DM_NOFLUSH_FLAG)
suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
- if (!dm_suspended(md))
+ if (!dm_suspended_md(md))
dm_suspend(md, suspend_flags);
- r = dm_swap_table(md, new_map);
- if (r) {
+ old_map = dm_swap_table(md, new_map);
+ if (IS_ERR(old_map)) {
dm_table_destroy(new_map);
dm_put(md);
- return r;
+ return PTR_ERR(old_map);
}
if (dm_table_get_mode(new_map) & FMODE_WRITE)
@@ -842,9 +897,11 @@ static int do_resume(struct dm_ioctl *param)
set_disk_ro(dm_disk(md), 1);
}
- if (dm_suspended(md))
+ if (dm_suspended_md(md))
r = dm_resume(md);
+ if (old_map)
+ dm_table_destroy(old_map);
if (!r) {
dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
@@ -982,7 +1039,7 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size)
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_or_inactive_table(md, param);
if (table) {
retrieve_status(table, param, param_size);
dm_table_put(table);
@@ -1215,7 +1272,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_or_inactive_table(md, param);
if (table) {
retrieve_deps(table, param, param_size);
dm_table_put(table);
@@ -1244,13 +1301,13 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
if (r)
goto out;
- table = dm_get_table(md);
+ table = dm_get_live_or_inactive_table(md, param);
if (table) {
retrieve_status(table, param, param_size);
dm_table_put(table);
}
- out:
+out:
dm_put(md);
return r;
}
@@ -1288,10 +1345,15 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
goto out;
}
- table = dm_get_table(md);
+ table = dm_get_live_table(md);
if (!table)
goto out_argv;
+ if (dm_deleting_md(md)) {
+ r = -ENXIO;
+ goto out_table;
+ }
+
ti = dm_table_find_target(table, tmsg->sector);
if (!dm_target_is_valid(ti)) {
DMWARN("Target message sector outside device.");
@@ -1303,6 +1365,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
r = -EINVAL;
}
+ out_table:
dm_table_put(table);
out_argv:
kfree(argv);
@@ -1582,8 +1645,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
if (!md)
return -ENXIO;
- dm_get(md);
- down_read(&_hash_lock);
+ mutex_lock(&dm_hash_cells_mutex);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
r = -ENXIO;
@@ -1596,8 +1658,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
strcpy(uuid, hc->uuid ? : "");
out:
- up_read(&_hash_lock);
- dm_put(md);
+ mutex_unlock(&dm_hash_cells_mutex);
return r;
}
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 3e3fc06cb86..addf8347504 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -450,7 +450,10 @@ static void dispatch_job(struct kcopyd_job *job)
{
struct dm_kcopyd_client *kc = job->kc;
atomic_inc(&kc->nr_jobs);
- push(&kc->pages_jobs, job);
+ if (unlikely(!job->source.count))
+ push(&kc->complete_jobs, job);
+ else
+ push(&kc->pages_jobs, job);
wake(kc);
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 9443896ede0..7035582786f 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -145,8 +145,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
EXPORT_SYMBOL(dm_dirty_log_type_unregister);
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
- struct dm_target *ti,
- unsigned int argc, char **argv)
+ struct dm_target *ti,
+ int (*flush_callback_fn)(struct dm_target *ti),
+ unsigned int argc, char **argv)
{
struct dm_dirty_log_type *type;
struct dm_dirty_log *log;
@@ -161,6 +162,7 @@ struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
return NULL;
}
+ log->flush_callback_fn = flush_callback_fn;
log->type = type;
if (type->ctr(log, ti, argc, argv)) {
kfree(log);
@@ -208,7 +210,9 @@ struct log_header {
struct log_c {
struct dm_target *ti;
- int touched;
+ int touched_dirtied;
+ int touched_cleaned;
+ int flush_failed;
uint32_t region_size;
unsigned int region_count;
region_t sync_count;
@@ -233,6 +237,7 @@ struct log_c {
* Disk log fields
*/
int log_dev_failed;
+ int log_dev_flush_failed;
struct dm_dev *log_dev;
struct log_header header;
@@ -253,14 +258,14 @@ static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
ext2_set_bit(bit, (unsigned long *) bs);
- l->touched = 1;
+ l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
ext2_clear_bit(bit, (unsigned long *) bs);
- l->touched = 1;
+ l->touched_dirtied = 1;
}
/*----------------------------------------------------------------
@@ -287,6 +292,19 @@ static int rw_header(struct log_c *lc, int rw)
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
+static int flush_header(struct log_c *lc)
+{
+ struct dm_io_region null_location = {
+ .bdev = lc->header_location.bdev,
+ .sector = 0,
+ .count = 0,
+ };
+
+ lc->io_req.bi_rw = WRITE_BARRIER;
+
+ return dm_io(&lc->io_req, 1, &null_location, NULL);
+}
+
static int read_header(struct log_c *log)
{
int r;
@@ -378,7 +396,9 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
}
lc->ti = ti;
- lc->touched = 0;
+ lc->touched_dirtied = 0;
+ lc->touched_cleaned = 0;
+ lc->flush_failed = 0;
lc->region_size = region_size;
lc->region_count = region_count;
lc->sync = sync;
@@ -406,6 +426,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
} else {
lc->log_dev = dev;
lc->log_dev_failed = 0;
+ lc->log_dev_flush_failed = 0;
lc->header_location.bdev = lc->log_dev->bdev;
lc->header_location.sector = 0;
@@ -614,6 +635,11 @@ static int disk_resume(struct dm_dirty_log *log)
/* write the new header */
r = rw_header(lc, WRITE);
+ if (!r) {
+ r = flush_header(lc);
+ if (r)
+ lc->log_dev_flush_failed = 1;
+ }
if (r) {
DMWARN("%s: Failed to write header on dirty region log device",
lc->log_dev->name);
@@ -656,18 +682,40 @@ static int core_flush(struct dm_dirty_log *log)
static int disk_flush(struct dm_dirty_log *log)
{
- int r;
- struct log_c *lc = (struct log_c *) log->context;
+ int r, i;
+ struct log_c *lc = log->context;
/* only write if the log has changed */
- if (!lc->touched)
+ if (!lc->touched_cleaned && !lc->touched_dirtied)
return 0;
+ if (lc->touched_cleaned && log->flush_callback_fn &&
+ log->flush_callback_fn(lc->ti)) {
+ /*
+ * At this point it is impossible to determine which
+ * regions are clean and which are dirty (without
+ * re-reading the log off disk). So mark all of them
+ * dirty.
+ */
+ lc->flush_failed = 1;
+ for (i = 0; i < lc->region_count; i++)
+ log_clear_bit(lc, lc->clean_bits, i);
+ }
+
r = rw_header(lc, WRITE);
if (r)
fail_log_device(lc);
- else
- lc->touched = 0;
+ else {
+ if (lc->touched_dirtied) {
+ r = flush_header(lc);
+ if (r) {
+ lc->log_dev_flush_failed = 1;
+ fail_log_device(lc);
+ } else
+ lc->touched_dirtied = 0;
+ }
+ lc->touched_cleaned = 0;
+ }
return r;
}
@@ -681,7 +729,8 @@ static void core_mark_region(struct dm_dirty_log *log, region_t region)
static void core_clear_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
- log_set_bit(lc, lc->clean_bits, region);
+ if (likely(!lc->flush_failed))
+ log_set_bit(lc, lc->clean_bits, region);
}
static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
@@ -762,7 +811,9 @@ static int disk_status(struct dm_dirty_log *log, status_type_t status,
switch(status) {
case STATUSTYPE_INFO:
DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
- lc->log_dev_failed ? 'D' : 'A');
+ lc->log_dev_flush_failed ? 'F' :
+ lc->log_dev_failed ? 'D' :
+ 'A');
break;
case STATUSTYPE_TABLE:
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index dce971dbdfa..e81345a1d08 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -93,6 +93,10 @@ struct multipath {
* can resubmit bios on error.
*/
mempool_t *mpio_pool;
+
+ struct mutex work_mutex;
+
+ unsigned suspended; /* Don't create new I/O internally when set. */
};
/*
@@ -198,6 +202,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
+ mutex_init(&m->work_mutex);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
@@ -885,13 +890,18 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
return r;
}
-static void multipath_dtr(struct dm_target *ti)
+static void flush_multipath_work(void)
{
- struct multipath *m = (struct multipath *) ti->private;
-
flush_workqueue(kmpath_handlerd);
flush_workqueue(kmultipathd);
flush_scheduled_work();
+}
+
+static void multipath_dtr(struct dm_target *ti)
+{
+ struct multipath *m = ti->private;
+
+ flush_multipath_work();
free_multipath(m);
}
@@ -1261,6 +1271,16 @@ static void multipath_presuspend(struct dm_target *ti)
queue_if_no_path(m, 0, 1);
}
+static void multipath_postsuspend(struct dm_target *ti)
+{
+ struct multipath *m = ti->private;
+
+ mutex_lock(&m->work_mutex);
+ m->suspended = 1;
+ flush_multipath_work();
+ mutex_unlock(&m->work_mutex);
+}
+
/*
* Restore the queue_if_no_path setting.
*/
@@ -1269,6 +1289,10 @@ static void multipath_resume(struct dm_target *ti)
struct multipath *m = (struct multipath *) ti->private;
unsigned long flags;
+ mutex_lock(&m->work_mutex);
+ m->suspended = 0;
+ mutex_unlock(&m->work_mutex);
+
spin_lock_irqsave(&m->lock, flags);
m->queue_if_no_path = m->saved_queue_if_no_path;
spin_unlock_irqrestore(&m->lock, flags);
@@ -1397,51 +1421,71 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
{
- int r;
+ int r = -EINVAL;
struct dm_dev *dev;
struct multipath *m = (struct multipath *) ti->private;
action_fn action;
+ mutex_lock(&m->work_mutex);
+
+ if (m->suspended) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ if (dm_suspended(ti)) {
+ r = -EBUSY;
+ goto out;
+ }
+
if (argc == 1) {
- if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
- return queue_if_no_path(m, 1, 0);
- else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
- return queue_if_no_path(m, 0, 0);
+ if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) {
+ r = queue_if_no_path(m, 1, 0);
+ goto out;
+ } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) {
+ r = queue_if_no_path(m, 0, 0);
+ goto out;
+ }
}
- if (argc != 2)
- goto error;
+ if (argc != 2) {
+ DMWARN("Unrecognised multipath message received.");
+ goto out;
+ }
- if (!strnicmp(argv[0], MESG_STR("disable_group")))
- return bypass_pg_num(m, argv[1], 1);
- else if (!strnicmp(argv[0], MESG_STR("enable_group")))
- return bypass_pg_num(m, argv[1], 0);
- else if (!strnicmp(argv[0], MESG_STR("switch_group")))
- return switch_pg_num(m, argv[1]);
- else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
+ if (!strnicmp(argv[0], MESG_STR("disable_group"))) {
+ r = bypass_pg_num(m, argv[1], 1);
+ goto out;
+ } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) {
+ r = bypass_pg_num(m, argv[1], 0);
+ goto out;
+ } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) {
+ r = switch_pg_num(m, argv[1]);
+ goto out;
+ } else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
action = reinstate_path;
else if (!strnicmp(argv[0], MESG_STR("fail_path")))
action = fail_path;
- else
- goto error;
+ else {
+ DMWARN("Unrecognised multipath message received.");
+ goto out;
+ }
r = dm_get_device(ti, argv[1], ti->begin, ti->len,
dm_table_get_mode(ti->table), &dev);
if (r) {
DMWARN("message: error getting device %s",
argv[1]);
- return -EINVAL;
+ goto out;
}
r = action_dev(m, dev, action);
dm_put_device(ti, dev);
+out:
+ mutex_unlock(&m->work_mutex);
return r;
-
-error:
- DMWARN("Unrecognised multipath message received.");
- return -EINVAL;
}
static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
@@ -1567,13 +1611,14 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 1, 0},
+ .version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
.map_rq = multipath_map,
.rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend,
+ .postsuspend = multipath_postsuspend,
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index cc9dc79b078..ad779bd13ae 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -35,6 +35,7 @@ static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
*---------------------------------------------------------------*/
enum dm_raid1_error {
DM_RAID1_WRITE_ERROR,
+ DM_RAID1_FLUSH_ERROR,
DM_RAID1_SYNC_ERROR,
DM_RAID1_READ_ERROR
};
@@ -57,6 +58,7 @@ struct mirror_set {
struct bio_list reads;
struct bio_list writes;
struct bio_list failures;
+ struct bio_list holds; /* bios are waiting until suspend */
struct dm_region_hash *rh;
struct dm_kcopyd_client *kcopyd_client;
@@ -67,6 +69,7 @@ struct mirror_set {
region_t nr_regions;
int in_sync;
int log_failure;
+ int leg_failure;
atomic_t suspend;
atomic_t default_mirror; /* Default mirror */
@@ -179,6 +182,17 @@ static void set_default_mirror(struct mirror *m)
atomic_set(&ms->default_mirror, m - m0);
}
+static struct mirror *get_valid_mirror(struct mirror_set *ms)
+{
+ struct mirror *m;
+
+ for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
+ if (!atomic_read(&m->error_count))
+ return m;
+
+ return NULL;
+}
+
/* fail_mirror
* @m: mirror device to fail
* @error_type: one of the enum's, DM_RAID1_*_ERROR
@@ -198,6 +212,8 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
struct mirror_set *ms = m->ms;
struct mirror *new;
+ ms->leg_failure = 1;
+
/*
* error_count is used for nothing more than a
* simple way to tell if a device has encountered
@@ -224,19 +240,50 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
goto out;
}
- for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
- if (!atomic_read(&new->error_count)) {
- set_default_mirror(new);
- break;
- }
-
- if (unlikely(new == ms->mirror + ms->nr_mirrors))
+ new = get_valid_mirror(ms);
+ if (new)
+ set_default_mirror(new);
+ else
DMWARN("All sides of mirror have failed.");
out:
schedule_work(&ms->trigger_event);
}
+static int mirror_flush(struct dm_target *ti)
+{
+ struct mirror_set *ms = ti->private;
+ unsigned long error_bits;
+
+ unsigned int i;
+ struct dm_io_region io[ms->nr_mirrors];
+ struct mirror *m;
+ struct dm_io_request io_req = {
+ .bi_rw = WRITE_BARRIER,
+ .mem.type = DM_IO_KMEM,
+ .mem.ptr.bvec = NULL,
+ .client = ms->io_client,
+ };
+
+ for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
+ io[i].bdev = m->dev->bdev;
+ io[i].sector = 0;
+ io[i].count = 0;
+ }
+
+ error_bits = -1;
+ dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
+ if (unlikely(error_bits != 0)) {
+ for (i = 0; i < ms->nr_mirrors; i++)
+ if (test_bit(i, &error_bits))
+ fail_mirror(ms->mirror + i,
+ DM_RAID1_FLUSH_ERROR);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/*-----------------------------------------------------------------
* Recovery.
*
@@ -396,6 +443,8 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
*/
static sector_t map_sector(struct mirror *m, struct bio *bio)
{
+ if (unlikely(!bio->bi_size))
+ return 0;
return m->offset + (bio->bi_sector - m->ms->ti->begin);
}
@@ -413,6 +462,27 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
io->count = bio->bi_size >> 9;
}
+static void hold_bio(struct mirror_set *ms, struct bio *bio)
+{
+ /*
+ * If device is suspended, complete the bio.
+ */
+ if (atomic_read(&ms->suspend)) {
+ if (dm_noflush_suspending(ms->ti))
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ else
+ bio_endio(bio, -EIO);
+ return;
+ }
+
+ /*
+ * Hold bio until the suspend is complete.
+ */
+ spin_lock_irq(&ms->lock);
+ bio_list_add(&ms->holds, bio);
+ spin_unlock_irq(&ms->lock);
+}
+
/*-----------------------------------------------------------------
* Reads
*---------------------------------------------------------------*/
@@ -511,7 +581,6 @@ static void write_callback(unsigned long error, void *context)
unsigned i, ret = 0;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
- int uptodate = 0;
int should_wake = 0;
unsigned long flags;
@@ -524,36 +593,27 @@ static void write_callback(unsigned long error, void *context)
* This way we handle both writes to SYNC and NOSYNC
* regions with the same code.
*/
- if (likely(!error))
- goto out;
+ if (likely(!error)) {
+ bio_endio(bio, ret);
+ return;
+ }
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error))
fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
- else
- uptodate = 1;
- if (unlikely(!uptodate)) {
- DMERR("All replicated volumes dead, failing I/O");
- /* None of the writes succeeded, fail the I/O. */
- ret = -EIO;
- } else if (errors_handled(ms)) {
- /*
- * Need to raise event. Since raising
- * events can block, we need to do it in
- * the main thread.
- */
- spin_lock_irqsave(&ms->lock, flags);
- if (!ms->failures.head)
- should_wake = 1;
- bio_list_add(&ms->failures, bio);
- spin_unlock_irqrestore(&ms->lock, flags);
- if (should_wake)
- wakeup_mirrord(ms);
- return;
- }
-out:
- bio_endio(bio, ret);
+ /*
+ * Need to raise event. Since raising
+ * events can block, we need to do it in
+ * the main thread.
+ */
+ spin_lock_irqsave(&ms->lock, flags);
+ if (!ms->failures.head)
+ should_wake = 1;
+ bio_list_add(&ms->failures, bio);
+ spin_unlock_irqrestore(&ms->lock, flags);
+ if (should_wake)
+ wakeup_mirrord(ms);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
@@ -562,7 +622,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct dm_io_region io[ms->nr_mirrors], *dest = io;
struct mirror *m;
struct dm_io_request io_req = {
- .bi_rw = WRITE,
+ .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
.mem.type = DM_IO_BVEC,
.mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
.notify.fn = write_callback,
@@ -603,6 +663,11 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
+ if (unlikely(bio_empty_barrier(bio))) {
+ bio_list_add(&sync, bio);
+ continue;
+ }
+
region = dm_rh_bio_to_region(ms->rh, bio);
if (log->type->is_remote_recovering &&
@@ -672,8 +737,12 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
- map_bio(get_default_mirror(ms), bio);
- generic_make_request(bio);
+ if (unlikely(ms->leg_failure) && errors_handled(ms))
+ hold_bio(ms, bio);
+ else {
+ map_bio(get_default_mirror(ms), bio);
+ generic_make_request(bio);
+ }
}
}
@@ -681,20 +750,12 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
{
struct bio *bio;
- if (!failures->head)
- return;
-
- if (!ms->log_failure) {
- while ((bio = bio_list_pop(failures))) {
- ms->in_sync = 0;
- dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
- }
+ if (likely(!failures->head))
return;
- }
/*
* If the log has failed, unattempted writes are being
- * put on the failures list. We can't issue those writes
+ * put on the holds list. We can't issue those writes
* until a log has been marked, so we must store them.
*
* If a 'noflush' suspend is in progress, we can requeue
@@ -709,23 +770,27 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
* for us to treat them the same and requeue them
* as well.
*/
- if (dm_noflush_suspending(ms->ti)) {
- while ((bio = bio_list_pop(failures)))
- bio_endio(bio, DM_ENDIO_REQUEUE);
- return;
- }
+ while ((bio = bio_list_pop(failures))) {
+ if (!ms->log_failure) {
+ ms->in_sync = 0;
+ dm_rh_mark_nosync(ms->rh, bio);
+ }
- if (atomic_read(&ms->suspend)) {
- while ((bio = bio_list_pop(failures)))
+ /*
+ * If all the legs are dead, fail the I/O.
+ * If we have been told to handle errors, hold the bio
+ * and wait for userspace to deal with the problem.
+ * Otherwise pretend that the I/O succeeded. (This would
+ * be wrong if the failed leg returned after reboot and
+ * got replicated back to the good legs.)
+ */
+ if (!get_valid_mirror(ms))
bio_endio(bio, -EIO);
- return;
+ else if (errors_handled(ms))
+ hold_bio(ms, bio);
+ else
+ bio_endio(bio, 0);
}
-
- spin_lock_irq(&ms->lock);
- bio_list_merge(&ms->failures, failures);
- spin_unlock_irq(&ms->lock);
-
- delayed_wake(ms);
}
static void trigger_event(struct work_struct *work)
@@ -784,12 +849,17 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
}
spin_lock_init(&ms->lock);
+ bio_list_init(&ms->reads);
+ bio_list_init(&ms->writes);
+ bio_list_init(&ms->failures);
+ bio_list_init(&ms->holds);
ms->ti = ti;
ms->nr_mirrors = nr_mirrors;
ms->nr_regions = dm_sector_div_up(ti->len, region_size);
ms->in_sync = 0;
ms->log_failure = 0;
+ ms->leg_failure = 0;
atomic_set(&ms->suspend, 0);
atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
@@ -889,7 +959,8 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
return NULL;
}
- dl = dm_dirty_log_create(argv[0], ti, param_count, argv + 2);
+ dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
+ argv + 2);
if (!dl) {
ti->error = "Error creating mirror dirty log";
return NULL;
@@ -995,6 +1066,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = ms;
ti->split_io = dm_rh_get_region_size(ms->rh);
+ ti->num_flush_requests = 1;
ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
if (!ms->kmirrord_wq) {
@@ -1122,7 +1194,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- dm_rh_dec(ms->rh, map_context->ll);
+ if (likely(!bio_empty_barrier(bio)))
+ dm_rh_dec(ms->rh, map_context->ll);
return error;
}
@@ -1180,6 +1253,9 @@ static void mirror_presuspend(struct dm_target *ti)
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
+ struct bio_list holds;
+ struct bio *bio;
+
atomic_set(&ms->suspend, 1);
/*
@@ -1202,6 +1278,22 @@ static void mirror_presuspend(struct dm_target *ti)
* we know that all of our I/O has been pushed.
*/
flush_workqueue(ms->kmirrord_wq);
+
+ /*
+ * Now set ms->suspend is set and the workqueue flushed, no more
+ * entries can be added to ms->hold list, so process it.
+ *
+ * Bios can still arrive concurrently with or after this
+ * presuspend function, but they cannot join the hold list
+ * because ms->suspend is set.
+ */
+ spin_lock_irq(&ms->lock);
+ holds = ms->holds;
+ bio_list_init(&ms->holds);
+ spin_unlock_irq(&ms->lock);
+
+ while ((bio = bio_list_pop(&holds)))
+ hold_bio(ms, bio);
}
static void mirror_postsuspend(struct dm_target *ti)
@@ -1244,7 +1336,8 @@ static char device_status_char(struct mirror *m)
if (!atomic_read(&(m->error_count)))
return 'A';
- return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
+ return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
+ (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
(test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
(test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 36dbe29f2fd..5f19ceb6fe9 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -79,6 +79,11 @@ struct dm_region_hash {
struct list_head recovered_regions;
struct list_head failed_recovered_regions;
+ /*
+ * If there was a barrier failure no regions can be marked clean.
+ */
+ int barrier_failure;
+
void *context;
sector_t target_begin;
@@ -211,6 +216,7 @@ struct dm_region_hash *dm_region_hash_create(
INIT_LIST_HEAD(&rh->quiesced_regions);
INIT_LIST_HEAD(&rh->recovered_regions);
INIT_LIST_HEAD(&rh->failed_recovered_regions);
+ rh->barrier_failure = 0;
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
sizeof(struct dm_region));
@@ -377,8 +383,6 @@ static void complete_resync_work(struct dm_region *reg, int success)
/* dm_rh_mark_nosync
* @ms
* @bio
- * @done
- * @error
*
* The bio was written on some mirror(s) but failed on other mirror(s).
* We can successfully endio the bio but should avoid the region being
@@ -386,8 +390,7 @@ static void complete_resync_work(struct dm_region *reg, int success)
*
* This function is _not_ safe in interrupt context!
*/
-void dm_rh_mark_nosync(struct dm_region_hash *rh,
- struct bio *bio, unsigned done, int error)
+void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
{
unsigned long flags;
struct dm_dirty_log *log = rh->log;
@@ -395,6 +398,11 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh,
region_t region = dm_rh_bio_to_region(rh, bio);
int recovering = 0;
+ if (bio_empty_barrier(bio)) {
+ rh->barrier_failure = 1;
+ return;
+ }
+
/* We must inform the log that the sync count has changed. */
log->type->set_region_sync(log, region, 0);
@@ -419,7 +427,6 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh,
BUG_ON(!list_empty(&reg->list));
spin_unlock_irqrestore(&rh->region_lock, flags);
- bio_endio(bio, error);
if (recovering)
complete_resync_work(reg, 0);
}
@@ -515,8 +522,11 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
{
struct bio *bio;
- for (bio = bios->head; bio; bio = bio->bi_next)
+ for (bio = bios->head; bio; bio = bio->bi_next) {
+ if (bio_empty_barrier(bio))
+ continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+ }
}
EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
@@ -544,7 +554,14 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region)
*/
/* do nothing for DM_RH_NOSYNC */
- if (reg->state == DM_RH_RECOVERING) {
+ if (unlikely(rh->barrier_failure)) {
+ /*
+ * If a write barrier failed some time ago, we
+ * don't know whether or not this write made it
+ * to the disk, so we must resync the device.
+ */
+ reg->state = DM_RH_NOSYNC;
+ } else if (reg->state == DM_RH_RECOVERING) {
list_add_tail(&reg->list, &rh->quiesced_regions);
} else if (reg->state == DM_RH_DIRTY) {
reg->state = DM_RH_CLEAN;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 0c746420c00..7d08879689a 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -55,6 +55,8 @@
*/
#define SNAPSHOT_DISK_VERSION 1
+#define NUM_SNAPSHOT_HDR_CHUNKS 1
+
struct disk_header {
uint32_t magic;
@@ -120,7 +122,22 @@ struct pstore {
/*
* The next free chunk for an exception.
+ *
+ * When creating exceptions, all the chunks here and above are
+ * free. It holds the next chunk to be allocated. On rare
+ * occasions (e.g. after a system crash) holes can be left in
+ * the exception store because chunks can be committed out of
+ * order.
+ *
+ * When merging exceptions, it does not necessarily mean all the
+ * chunks here and above are free. It holds the value it would
+ * have held if all chunks had been committed in order of
+ * allocation. Consequently the value may occasionally be
+ * slightly too low, but since it's only used for 'status' and
+ * it can never reach its minimum value too early this doesn't
+ * matter.
*/
+
chunk_t next_free;
/*
@@ -214,7 +231,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
int metadata)
{
struct dm_io_region where = {
- .bdev = ps->store->cow->bdev,
+ .bdev = dm_snap_cow(ps->store->snap)->bdev,
.sector = ps->store->chunk_size * chunk,
.count = ps->store->chunk_size,
};
@@ -294,7 +311,8 @@ static int read_header(struct pstore *ps, int *new_snapshot)
*/
if (!ps->store->chunk_size) {
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
- bdev_logical_block_size(ps->store->cow->bdev) >> 9);
+ bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
+ bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
chunk_size_supplied = 0;
@@ -408,6 +426,15 @@ static void write_exception(struct pstore *ps,
e->new_chunk = cpu_to_le64(de->new_chunk);
}
+static void clear_exception(struct pstore *ps, uint32_t index)
+{
+ struct disk_exception *e = get_exception(ps, index);
+
+ /* clear it */
+ e->old_chunk = 0;
+ e->new_chunk = 0;
+}
+
/*
* Registers the exceptions that are present in the current area.
* 'full' is filled in to indicate if the area has been
@@ -489,11 +516,23 @@ static struct pstore *get_info(struct dm_exception_store *store)
return (struct pstore *) store->context;
}
-static void persistent_fraction_full(struct dm_exception_store *store,
- sector_t *numerator, sector_t *denominator)
+static void persistent_usage(struct dm_exception_store *store,
+ sector_t *total_sectors,
+ sector_t *sectors_allocated,
+ sector_t *metadata_sectors)
{
- *numerator = get_info(store)->next_free * store->chunk_size;
- *denominator = get_dev_size(store->cow->bdev);
+ struct pstore *ps = get_info(store);
+
+ *sectors_allocated = ps->next_free * store->chunk_size;
+ *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
+
+ /*
+ * First chunk is the fixed header.
+ * Then there are (ps->current_area + 1) metadata chunks, each one
+ * separated from the next by ps->exceptions_per_area data chunks.
+ */
+ *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
+ store->chunk_size;
}
static void persistent_dtr(struct dm_exception_store *store)
@@ -552,44 +591,40 @@ static int persistent_read_metadata(struct dm_exception_store *store,
ps->current_area = 0;
zero_memory_area(ps);
r = zero_disk_area(ps, 0);
- if (r) {
+ if (r)
DMWARN("zero_disk_area(0) failed");
- return r;
- }
- } else {
- /*
- * Sanity checks.
- */
- if (ps->version != SNAPSHOT_DISK_VERSION) {
- DMWARN("unable to handle snapshot disk version %d",
- ps->version);
- return -EINVAL;
- }
+ return r;
+ }
+ /*
+ * Sanity checks.
+ */
+ if (ps->version != SNAPSHOT_DISK_VERSION) {
+ DMWARN("unable to handle snapshot disk version %d",
+ ps->version);
+ return -EINVAL;
+ }
- /*
- * Metadata are valid, but snapshot is invalidated
- */
- if (!ps->valid)
- return 1;
+ /*
+ * Metadata are valid, but snapshot is invalidated
+ */
+ if (!ps->valid)
+ return 1;
- /*
- * Read the metadata.
- */
- r = read_exceptions(ps, callback, callback_context);
- if (r)
- return r;
- }
+ /*
+ * Read the metadata.
+ */
+ r = read_exceptions(ps, callback, callback_context);
- return 0;
+ return r;
}
static int persistent_prepare_exception(struct dm_exception_store *store,
- struct dm_snap_exception *e)
+ struct dm_exception *e)
{
struct pstore *ps = get_info(store);
uint32_t stride;
chunk_t next_free;
- sector_t size = get_dev_size(store->cow->bdev);
+ sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
/* Is there enough room ? */
if (size < ((ps->next_free + 1) * store->chunk_size))
@@ -611,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
}
static void persistent_commit_exception(struct dm_exception_store *store,
- struct dm_snap_exception *e,
+ struct dm_exception *e,
void (*callback) (void *, int success),
void *callback_context)
{
@@ -672,6 +707,85 @@ static void persistent_commit_exception(struct dm_exception_store *store,
ps->callback_count = 0;
}
+static int persistent_prepare_merge(struct dm_exception_store *store,
+ chunk_t *last_old_chunk,
+ chunk_t *last_new_chunk)
+{
+ struct pstore *ps = get_info(store);
+ struct disk_exception de;
+ int nr_consecutive;
+ int r;
+
+ /*
+ * When current area is empty, move back to preceding area.
+ */
+ if (!ps->current_committed) {
+ /*
+ * Have we finished?
+ */
+ if (!ps->current_area)
+ return 0;
+
+ ps->current_area--;
+ r = area_io(ps, READ);
+ if (r < 0)
+ return r;
+ ps->current_committed = ps->exceptions_per_area;
+ }
+
+ read_exception(ps, ps->current_committed - 1, &de);
+ *last_old_chunk = de.old_chunk;
+ *last_new_chunk = de.new_chunk;
+
+ /*
+ * Find number of consecutive chunks within the current area,
+ * working backwards.
+ */
+ for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
+ nr_consecutive++) {
+ read_exception(ps, ps->current_committed - 1 - nr_consecutive,
+ &de);
+ if (de.old_chunk != *last_old_chunk - nr_consecutive ||
+ de.new_chunk != *last_new_chunk - nr_consecutive)
+ break;
+ }
+
+ return nr_consecutive;
+}
+
+static int persistent_commit_merge(struct dm_exception_store *store,
+ int nr_merged)
+{
+ int r, i;
+ struct pstore *ps = get_info(store);
+
+ BUG_ON(nr_merged > ps->current_committed);
+
+ for (i = 0; i < nr_merged; i++)
+ clear_exception(ps, ps->current_committed - 1 - i);
+
+ r = area_io(ps, WRITE);
+ if (r < 0)
+ return r;
+
+ ps->current_committed -= nr_merged;
+
+ /*
+ * At this stage, only persistent_usage() uses ps->next_free, so
+ * we make no attempt to keep ps->next_free strictly accurate
+ * as exceptions may have been committed out-of-order originally.
+ * Once a snapshot has become merging, we set it to the value it
+ * would have held had all the exceptions been committed in order.
+ *
+ * ps->current_area does not get reduced by prepare_merge() until
+ * after commit_merge() has removed the nr_merged previous exceptions.
+ */
+ ps->next_free = (area_location(ps, ps->current_area) - 1) +
+ (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS;
+
+ return 0;
+}
+
static void persistent_drop_snapshot(struct dm_exception_store *store)
{
struct pstore *ps = get_info(store);
@@ -697,7 +811,7 @@ static int persistent_ctr(struct dm_exception_store *store,
ps->area = NULL;
ps->zero_area = NULL;
ps->header_area = NULL;
- ps->next_free = 2; /* skipping the header and first area */
+ ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
ps->current_committed = 0;
ps->callback_count = 0;
@@ -726,8 +840,7 @@ static unsigned persistent_status(struct dm_exception_store *store,
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
- DMEMIT(" %s P %llu", store->cow->name,
- (unsigned long long)store->chunk_size);
+ DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
}
return sz;
@@ -741,8 +854,10 @@ static struct dm_exception_store_type _persistent_type = {
.read_metadata = persistent_read_metadata,
.prepare_exception = persistent_prepare_exception,
.commit_exception = persistent_commit_exception,
+ .prepare_merge = persistent_prepare_merge,
+ .commit_merge = persistent_commit_merge,
.drop_snapshot = persistent_drop_snapshot,
- .fraction_full = persistent_fraction_full,
+ .usage = persistent_usage,
.status = persistent_status,
};
@@ -754,8 +869,10 @@ static struct dm_exception_store_type _persistent_compat_type = {
.read_metadata = persistent_read_metadata,
.prepare_exception = persistent_prepare_exception,
.commit_exception = persistent_commit_exception,
+ .prepare_merge = persistent_prepare_merge,
+ .commit_merge = persistent_commit_merge,
.drop_snapshot = persistent_drop_snapshot,
- .fraction_full = persistent_fraction_full,
+ .usage = persistent_usage,
.status = persistent_status,
};
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index cde5aa558e6..a0898a66a2f 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -36,10 +36,10 @@ static int transient_read_metadata(struct dm_exception_store *store,
}
static int transient_prepare_exception(struct dm_exception_store *store,
- struct dm_snap_exception *e)
+ struct dm_exception *e)
{
struct transient_c *tc = store->context;
- sector_t size = get_dev_size(store->cow->bdev);
+ sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
if (size < (tc->next_free + store->chunk_size))
return -1;
@@ -51,7 +51,7 @@ static int transient_prepare_exception(struct dm_exception_store *store,
}
static void transient_commit_exception(struct dm_exception_store *store,
- struct dm_snap_exception *e,
+ struct dm_exception *e,
void (*callback) (void *, int success),
void *callback_context)
{
@@ -59,11 +59,14 @@ static void transient_commit_exception(struct dm_exception_store *store,
callback(callback_context, 1);
}
-static void transient_fraction_full(struct dm_exception_store *store,
- sector_t *numerator, sector_t *denominator)
+static void transient_usage(struct dm_exception_store *store,
+ sector_t *total_sectors,
+ sector_t *sectors_allocated,
+ sector_t *metadata_sectors)
{
- *numerator = ((struct transient_c *) store->context)->next_free;
- *denominator = get_dev_size(store->cow->bdev);
+ *sectors_allocated = ((struct transient_c *) store->context)->next_free;
+ *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
+ *metadata_sectors = 0;
}
static int transient_ctr(struct dm_exception_store *store,
@@ -91,8 +94,7 @@ static unsigned transient_status(struct dm_exception_store *store,
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
- DMEMIT(" %s N %llu", store->cow->name,
- (unsigned long long)store->chunk_size);
+ DMEMIT(" N %llu", (unsigned long long)store->chunk_size);
}
return sz;
@@ -106,7 +108,7 @@ static struct dm_exception_store_type _transient_type = {
.read_metadata = transient_read_metadata,
.prepare_exception = transient_prepare_exception,
.commit_exception = transient_commit_exception,
- .fraction_full = transient_fraction_full,
+ .usage = transient_usage,
.status = transient_status,
};
@@ -118,7 +120,7 @@ static struct dm_exception_store_type _transient_compat_type = {
.read_metadata = transient_read_metadata,
.prepare_exception = transient_prepare_exception,
.commit_exception = transient_commit_exception,
- .fraction_full = transient_fraction_full,
+ .usage = transient_usage,
.status = transient_status,
};
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 3a3ba46e6d4..ee8eb283650 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -25,6 +25,11 @@
#define DM_MSG_PREFIX "snapshots"
+static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
+
+#define dm_target_is_snapshot_merge(ti) \
+ ((ti)->type->name == dm_snapshot_merge_target_name)
+
/*
* The percentage increment we will wake up users at
*/
@@ -49,7 +54,7 @@
#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
(DM_TRACKED_CHUNK_HASH_SIZE - 1))
-struct exception_table {
+struct dm_exception_table {
uint32_t hash_mask;
unsigned hash_shift;
struct list_head *table;
@@ -59,22 +64,31 @@ struct dm_snapshot {
struct rw_semaphore lock;
struct dm_dev *origin;
+ struct dm_dev *cow;
+
+ struct dm_target *ti;
/* List of snapshots per Origin */
struct list_head list;
- /* You can't use a snapshot if this is 0 (e.g. if full) */
+ /*
+ * You can't use a snapshot if this is 0 (e.g. if full).
+ * A snapshot-merge target never clears this.
+ */
int valid;
/* Origin writes don't trigger exceptions until this is set */
int active;
+ /* Whether or not owning mapped_device is suspended */
+ int suspended;
+
mempool_t *pending_pool;
atomic_t pending_exceptions_count;
- struct exception_table pending;
- struct exception_table complete;
+ struct dm_exception_table pending;
+ struct dm_exception_table complete;
/*
* pe_lock protects all pending_exception operations and access
@@ -95,8 +109,51 @@ struct dm_snapshot {
mempool_t *tracked_chunk_pool;
spinlock_t tracked_chunk_lock;
struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+
+ /*
+ * The merge operation failed if this flag is set.
+ * Failure modes are handled as follows:
+ * - I/O error reading the header
+ * => don't load the target; abort.
+ * - Header does not have "valid" flag set
+ * => use the origin; forget about the snapshot.
+ * - I/O error when reading exceptions
+ * => don't load the target; abort.
+ * (We can't use the intermediate origin state.)
+ * - I/O error while merging
+ * => stop merging; set merge_failed; process I/O normally.
+ */
+ int merge_failed;
+
+ /* Wait for events based on state_bits */
+ unsigned long state_bits;
+
+ /* Range of chunks currently being merged. */
+ chunk_t first_merging_chunk;
+ int num_merging_chunks;
+
+ /*
+ * Incoming bios that overlap with chunks being merged must wait
+ * for them to be committed.
+ */
+ struct bio_list bios_queued_during_merge;
};
+/*
+ * state_bits:
+ * RUNNING_MERGE - Merge operation is in progress.
+ * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
+ * cleared afterwards.
+ */
+#define RUNNING_MERGE 0
+#define SHUTDOWN_MERGE 1
+
+struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
+{
+ return s->cow;
+}
+EXPORT_SYMBOL(dm_snap_cow);
+
static struct workqueue_struct *ksnapd;
static void flush_queued_bios(struct work_struct *work);
@@ -116,7 +173,7 @@ static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
}
struct dm_snap_pending_exception {
- struct dm_snap_exception e;
+ struct dm_exception e;
/*
* Origin buffers waiting for this to complete are held
@@ -125,28 +182,6 @@ struct dm_snap_pending_exception {
struct bio_list origin_bios;
struct bio_list snapshot_bios;
- /*
- * Short-term queue of pending exceptions prior to submission.
- */
- struct list_head list;
-
- /*
- * The primary pending_exception is the one that holds
- * the ref_count and the list of origin_bios for a
- * group of pending_exceptions. It is always last to get freed.
- * These fields get set up when writing to the origin.
- */
- struct dm_snap_pending_exception *primary_pe;
-
- /*
- * Number of pending_exceptions processing this chunk.
- * When this drops to zero we must complete the origin bios.
- * If incrementing or decrementing this, hold pe->snap->lock for
- * the sibling concerned and not pe->primary_pe->snap->lock unless
- * they are the same.
- */
- atomic_t ref_count;
-
/* Pointer back to snapshot context */
struct dm_snapshot *snap;
@@ -222,6 +257,16 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
}
/*
+ * This conflicting I/O is extremely improbable in the caller,
+ * so msleep(1) is sufficient and there is no need for a wait queue.
+ */
+static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
+{
+ while (__chunk_is_tracked(s, chunk))
+ msleep(1);
+}
+
+/*
* One of these per registered origin, held in the snapshot_origins hash
*/
struct origin {
@@ -243,6 +288,10 @@ struct origin {
static struct list_head *_origins;
static struct rw_semaphore _origins_lock;
+static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
+static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
+static uint64_t _pending_exceptions_done_count;
+
static int init_origin_hash(void)
{
int i;
@@ -291,22 +340,144 @@ static void __insert_origin(struct origin *o)
}
/*
+ * _origins_lock must be held when calling this function.
+ * Returns number of snapshots registered using the supplied cow device, plus:
+ * snap_src - a snapshot suitable for use as a source of exception handover
+ * snap_dest - a snapshot capable of receiving exception handover.
+ * snap_merge - an existing snapshot-merge target linked to the same origin.
+ * There can be at most one snapshot-merge target. The parameter is optional.
+ *
+ * Possible return values and states of snap_src and snap_dest.
+ * 0: NULL, NULL - first new snapshot
+ * 1: snap_src, NULL - normal snapshot
+ * 2: snap_src, snap_dest - waiting for handover
+ * 2: snap_src, NULL - handed over, waiting for old to be deleted
+ * 1: NULL, snap_dest - source got destroyed without handover
+ */
+static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
+ struct dm_snapshot **snap_src,
+ struct dm_snapshot **snap_dest,
+ struct dm_snapshot **snap_merge)
+{
+ struct dm_snapshot *s;
+ struct origin *o;
+ int count = 0;
+ int active;
+
+ o = __lookup_origin(snap->origin->bdev);
+ if (!o)
+ goto out;
+
+ list_for_each_entry(s, &o->snapshots, list) {
+ if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
+ *snap_merge = s;
+ if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
+ continue;
+
+ down_read(&s->lock);
+ active = s->active;
+ up_read(&s->lock);
+
+ if (active) {
+ if (snap_src)
+ *snap_src = s;
+ } else if (snap_dest)
+ *snap_dest = s;
+
+ count++;
+ }
+
+out:
+ return count;
+}
+
+/*
+ * On success, returns 1 if this snapshot is a handover destination,
+ * otherwise returns 0.
+ */
+static int __validate_exception_handover(struct dm_snapshot *snap)
+{
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+ struct dm_snapshot *snap_merge = NULL;
+
+ /* Does snapshot need exceptions handed over to it? */
+ if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
+ &snap_merge) == 2) ||
+ snap_dest) {
+ snap->ti->error = "Snapshot cow pairing for exception "
+ "table handover failed";
+ return -EINVAL;
+ }
+
+ /*
+ * If no snap_src was found, snap cannot become a handover
+ * destination.
+ */
+ if (!snap_src)
+ return 0;
+
+ /*
+ * Non-snapshot-merge handover?
+ */
+ if (!dm_target_is_snapshot_merge(snap->ti))
+ return 1;
+
+ /*
+ * Do not allow more than one merging snapshot.
+ */
+ if (snap_merge) {
+ snap->ti->error = "A snapshot is already merging.";
+ return -EINVAL;
+ }
+
+ if (!snap_src->store->type->prepare_merge ||
+ !snap_src->store->type->commit_merge) {
+ snap->ti->error = "Snapshot exception store does not "
+ "support snapshot-merge.";
+ return -EINVAL;
+ }
+
+ return 1;
+}
+
+static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
+{
+ struct dm_snapshot *l;
+
+ /* Sort the list according to chunk size, largest-first smallest-last */
+ list_for_each_entry(l, &o->snapshots, list)
+ if (l->store->chunk_size < s->store->chunk_size)
+ break;
+ list_add_tail(&s->list, &l->list);
+}
+
+/*
* Make a note of the snapshot and its origin so we can look it
* up when the origin has a write on it.
+ *
+ * Also validate snapshot exception store handovers.
+ * On success, returns 1 if this registration is a handover destination,
+ * otherwise returns 0.
*/
static int register_snapshot(struct dm_snapshot *snap)
{
- struct dm_snapshot *l;
- struct origin *o, *new_o;
+ struct origin *o, *new_o = NULL;
struct block_device *bdev = snap->origin->bdev;
+ int r = 0;
new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
if (!new_o)
return -ENOMEM;
down_write(&_origins_lock);
- o = __lookup_origin(bdev);
+ r = __validate_exception_handover(snap);
+ if (r < 0) {
+ kfree(new_o);
+ goto out;
+ }
+
+ o = __lookup_origin(bdev);
if (o)
kfree(new_o);
else {
@@ -320,14 +491,27 @@ static int register_snapshot(struct dm_snapshot *snap)
__insert_origin(o);
}
- /* Sort the list according to chunk size, largest-first smallest-last */
- list_for_each_entry(l, &o->snapshots, list)
- if (l->store->chunk_size < snap->store->chunk_size)
- break;
- list_add_tail(&snap->list, &l->list);
+ __insert_snapshot(o, snap);
+
+out:
+ up_write(&_origins_lock);
+
+ return r;
+}
+
+/*
+ * Move snapshot to correct place in list according to chunk size.
+ */
+static void reregister_snapshot(struct dm_snapshot *s)
+{
+ struct block_device *bdev = s->origin->bdev;
+
+ down_write(&_origins_lock);
+
+ list_del(&s->list);
+ __insert_snapshot(__lookup_origin(bdev), s);
up_write(&_origins_lock);
- return 0;
}
static void unregister_snapshot(struct dm_snapshot *s)
@@ -338,7 +522,7 @@ static void unregister_snapshot(struct dm_snapshot *s)
o = __lookup_origin(s->origin->bdev);
list_del(&s->list);
- if (list_empty(&o->snapshots)) {
+ if (o && list_empty(&o->snapshots)) {
list_del(&o->hash_list);
kfree(o);
}
@@ -351,8 +535,8 @@ static void unregister_snapshot(struct dm_snapshot *s)
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/
-static int init_exception_table(struct exception_table *et, uint32_t size,
- unsigned hash_shift)
+static int dm_exception_table_init(struct dm_exception_table *et,
+ uint32_t size, unsigned hash_shift)
{
unsigned int i;
@@ -368,10 +552,11 @@ static int init_exception_table(struct exception_table *et, uint32_t size,
return 0;
}
-static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
+static void dm_exception_table_exit(struct dm_exception_table *et,
+ struct kmem_cache *mem)
{
struct list_head *slot;
- struct dm_snap_exception *ex, *next;
+ struct dm_exception *ex, *next;
int i, size;
size = et->hash_mask + 1;
@@ -385,19 +570,12 @@ static void exit_exception_table(struct exception_table *et, struct kmem_cache *
vfree(et->table);
}
-static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
{
return (chunk >> et->hash_shift) & et->hash_mask;
}
-static void insert_exception(struct exception_table *eh,
- struct dm_snap_exception *e)
-{
- struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
- list_add(&e->hash_list, l);
-}
-
-static void remove_exception(struct dm_snap_exception *e)
+static void dm_remove_exception(struct dm_exception *e)
{
list_del(&e->hash_list);
}
@@ -406,11 +584,11 @@ static void remove_exception(struct dm_snap_exception *e)
* Return the exception data for a sector, or NULL if not
* remapped.
*/
-static struct dm_snap_exception *lookup_exception(struct exception_table *et,
- chunk_t chunk)
+static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
+ chunk_t chunk)
{
struct list_head *slot;
- struct dm_snap_exception *e;
+ struct dm_exception *e;
slot = &et->table[exception_hash(et, chunk)];
list_for_each_entry (e, slot, hash_list)
@@ -421,9 +599,9 @@ static struct dm_snap_exception *lookup_exception(struct exception_table *et,
return NULL;
}
-static struct dm_snap_exception *alloc_exception(void)
+static struct dm_exception *alloc_completed_exception(void)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
e = kmem_cache_alloc(exception_cache, GFP_NOIO);
if (!e)
@@ -432,7 +610,7 @@ static struct dm_snap_exception *alloc_exception(void)
return e;
}
-static void free_exception(struct dm_snap_exception *e)
+static void free_completed_exception(struct dm_exception *e)
{
kmem_cache_free(exception_cache, e);
}
@@ -457,12 +635,11 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
atomic_dec(&s->pending_exceptions_count);
}
-static void insert_completed_exception(struct dm_snapshot *s,
- struct dm_snap_exception *new_e)
+static void dm_insert_exception(struct dm_exception_table *eh,
+ struct dm_exception *new_e)
{
- struct exception_table *eh = &s->complete;
struct list_head *l;
- struct dm_snap_exception *e = NULL;
+ struct dm_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
@@ -478,7 +655,7 @@ static void insert_completed_exception(struct dm_snapshot *s,
new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
dm_consecutive_chunk_count(e) + 1)) {
dm_consecutive_chunk_count_inc(e);
- free_exception(new_e);
+ free_completed_exception(new_e);
return;
}
@@ -488,7 +665,7 @@ static void insert_completed_exception(struct dm_snapshot *s,
dm_consecutive_chunk_count_inc(e);
e->old_chunk--;
e->new_chunk--;
- free_exception(new_e);
+ free_completed_exception(new_e);
return;
}
@@ -507,9 +684,9 @@ out:
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
struct dm_snapshot *s = context;
- struct dm_snap_exception *e;
+ struct dm_exception *e;
- e = alloc_exception();
+ e = alloc_completed_exception();
if (!e)
return -ENOMEM;
@@ -518,11 +695,30 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
- insert_completed_exception(s, e);
+ dm_insert_exception(&s->complete, e);
return 0;
}
+#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
+
+/*
+ * Return a minimum chunk size of all snapshots that have the specified origin.
+ * Return zero if the origin has no snapshots.
+ */
+static sector_t __minimum_chunk_size(struct origin *o)
+{
+ struct dm_snapshot *snap;
+ unsigned chunk_size = 0;
+
+ if (o)
+ list_for_each_entry(snap, &o->snapshots, list)
+ chunk_size = min_not_zero(chunk_size,
+ snap->store->chunk_size);
+
+ return chunk_size;
+}
+
/*
* Hard coded magic.
*/
@@ -546,16 +742,18 @@ static int init_hash_tables(struct dm_snapshot *s)
* Calculate based on the size of the original volume or
* the COW volume...
*/
- cow_dev_size = get_dev_size(s->store->cow->bdev);
+ cow_dev_size = get_dev_size(s->cow->bdev);
origin_dev_size = get_dev_size(s->origin->bdev);
max_buckets = calc_max_buckets();
hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
hash_size = min(hash_size, max_buckets);
+ if (hash_size < 64)
+ hash_size = 64;
hash_size = rounddown_pow_of_two(hash_size);
- if (init_exception_table(&s->complete, hash_size,
- DM_CHUNK_CONSECUTIVE_BITS))
+ if (dm_exception_table_init(&s->complete, hash_size,
+ DM_CHUNK_CONSECUTIVE_BITS))
return -ENOMEM;
/*
@@ -566,14 +764,284 @@ static int init_hash_tables(struct dm_snapshot *s)
if (hash_size < 64)
hash_size = 64;
- if (init_exception_table(&s->pending, hash_size, 0)) {
- exit_exception_table(&s->complete, exception_cache);
+ if (dm_exception_table_init(&s->pending, hash_size, 0)) {
+ dm_exception_table_exit(&s->complete, exception_cache);
return -ENOMEM;
}
return 0;
}
+static void merge_shutdown(struct dm_snapshot *s)
+{
+ clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&s->state_bits, RUNNING_MERGE);
+}
+
+static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
+{
+ s->first_merging_chunk = 0;
+ s->num_merging_chunks = 0;
+
+ return bio_list_get(&s->bios_queued_during_merge);
+}
+
+/*
+ * Remove one chunk from the index of completed exceptions.
+ */
+static int __remove_single_exception_chunk(struct dm_snapshot *s,
+ chunk_t old_chunk)
+{
+ struct dm_exception *e;
+
+ e = dm_lookup_exception(&s->complete, old_chunk);
+ if (!e) {
+ DMERR("Corruption detected: exception for block %llu is "
+ "on disk but not in memory",
+ (unsigned long long)old_chunk);
+ return -EINVAL;
+ }
+
+ /*
+ * If this is the only chunk using this exception, remove exception.
+ */
+ if (!dm_consecutive_chunk_count(e)) {
+ dm_remove_exception(e);
+ free_completed_exception(e);
+ return 0;
+ }
+
+ /*
+ * The chunk may be either at the beginning or the end of a
+ * group of consecutive chunks - never in the middle. We are
+ * removing chunks in the opposite order to that in which they
+ * were added, so this should always be true.
+ * Decrement the consecutive chunk counter and adjust the
+ * starting point if necessary.
+ */
+ if (old_chunk == e->old_chunk) {
+ e->old_chunk++;
+ e->new_chunk++;
+ } else if (old_chunk != e->old_chunk +
+ dm_consecutive_chunk_count(e)) {
+ DMERR("Attempt to merge block %llu from the "
+ "middle of a chunk range [%llu - %llu]",
+ (unsigned long long)old_chunk,
+ (unsigned long long)e->old_chunk,
+ (unsigned long long)
+ e->old_chunk + dm_consecutive_chunk_count(e));
+ return -EINVAL;
+ }
+
+ dm_consecutive_chunk_count_dec(e);
+
+ return 0;
+}
+
+static void flush_bios(struct bio *bio);
+
+static int remove_single_exception_chunk(struct dm_snapshot *s)
+{
+ struct bio *b = NULL;
+ int r;
+ chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
+
+ down_write(&s->lock);
+
+ /*
+ * Process chunks (and associated exceptions) in reverse order
+ * so that dm_consecutive_chunk_count_dec() accounting works.
+ */
+ do {
+ r = __remove_single_exception_chunk(s, old_chunk);
+ if (r)
+ goto out;
+ } while (old_chunk-- > s->first_merging_chunk);
+
+ b = __release_queued_bios_after_merge(s);
+
+out:
+ up_write(&s->lock);
+ if (b)
+ flush_bios(b);
+
+ return r;
+}
+
+static int origin_write_extent(struct dm_snapshot *merging_snap,
+ sector_t sector, unsigned chunk_size);
+
+static void merge_callback(int read_err, unsigned long write_err,
+ void *context);
+
+static uint64_t read_pending_exceptions_done_count(void)
+{
+ uint64_t pending_exceptions_done;
+
+ spin_lock(&_pending_exceptions_done_spinlock);
+ pending_exceptions_done = _pending_exceptions_done_count;
+ spin_unlock(&_pending_exceptions_done_spinlock);
+
+ return pending_exceptions_done;
+}
+
+static void increment_pending_exceptions_done_count(void)
+{
+ spin_lock(&_pending_exceptions_done_spinlock);
+ _pending_exceptions_done_count++;
+ spin_unlock(&_pending_exceptions_done_spinlock);
+
+ wake_up_all(&_pending_exceptions_done);
+}
+
+static void snapshot_merge_next_chunks(struct dm_snapshot *s)
+{
+ int i, linear_chunks;
+ chunk_t old_chunk, new_chunk;
+ struct dm_io_region src, dest;
+ sector_t io_size;
+ uint64_t previous_count;
+
+ BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
+ if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
+ goto shut;
+
+ /*
+ * valid flag never changes during merge, so no lock required.
+ */
+ if (!s->valid) {
+ DMERR("Snapshot is invalid: can't merge");
+ goto shut;
+ }
+
+ linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
+ &new_chunk);
+ if (linear_chunks <= 0) {
+ if (linear_chunks < 0) {
+ DMERR("Read error in exception store: "
+ "shutting down merge");
+ down_write(&s->lock);
+ s->merge_failed = 1;
+ up_write(&s->lock);
+ }
+ goto shut;
+ }
+
+ /* Adjust old_chunk and new_chunk to reflect start of linear region */
+ old_chunk = old_chunk + 1 - linear_chunks;
+ new_chunk = new_chunk + 1 - linear_chunks;
+
+ /*
+ * Use one (potentially large) I/O to copy all 'linear_chunks'
+ * from the exception store to the origin
+ */
+ io_size = linear_chunks * s->store->chunk_size;
+
+ dest.bdev = s->origin->bdev;
+ dest.sector = chunk_to_sector(s->store, old_chunk);
+ dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
+
+ src.bdev = s->cow->bdev;
+ src.sector = chunk_to_sector(s->store, new_chunk);
+ src.count = dest.count;
+
+ /*
+ * Reallocate any exceptions needed in other snapshots then
+ * wait for the pending exceptions to complete.
+ * Each time any pending exception (globally on the system)
+ * completes we are woken and repeat the process to find out
+ * if we can proceed. While this may not seem a particularly
+ * efficient algorithm, it is not expected to have any
+ * significant impact on performance.
+ */
+ previous_count = read_pending_exceptions_done_count();
+ while (origin_write_extent(s, dest.sector, io_size)) {
+ wait_event(_pending_exceptions_done,
+ (read_pending_exceptions_done_count() !=
+ previous_count));
+ /* Retry after the wait, until all exceptions are done. */
+ previous_count = read_pending_exceptions_done_count();
+ }
+
+ down_write(&s->lock);
+ s->first_merging_chunk = old_chunk;
+ s->num_merging_chunks = linear_chunks;
+ up_write(&s->lock);
+
+ /* Wait until writes to all 'linear_chunks' drain */
+ for (i = 0; i < linear_chunks; i++)
+ __check_for_conflicting_io(s, old_chunk + i);
+
+ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
+ return;
+
+shut:
+ merge_shutdown(s);
+}
+
+static void error_bios(struct bio *bio);
+
+static void merge_callback(int read_err, unsigned long write_err, void *context)
+{
+ struct dm_snapshot *s = context;
+ struct bio *b = NULL;
+
+ if (read_err || write_err) {
+ if (read_err)
+ DMERR("Read error: shutting down merge.");
+ else
+ DMERR("Write error: shutting down merge.");
+ goto shut;
+ }
+
+ if (s->store->type->commit_merge(s->store,
+ s->num_merging_chunks) < 0) {
+ DMERR("Write error in exception store: shutting down merge");
+ goto shut;
+ }
+
+ if (remove_single_exception_chunk(s) < 0)
+ goto shut;
+
+ snapshot_merge_next_chunks(s);
+
+ return;
+
+shut:
+ down_write(&s->lock);
+ s->merge_failed = 1;
+ b = __release_queued_bios_after_merge(s);
+ up_write(&s->lock);
+ error_bios(b);
+
+ merge_shutdown(s);
+}
+
+static void start_merge(struct dm_snapshot *s)
+{
+ if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
+ snapshot_merge_next_chunks(s);
+}
+
+static int wait_schedule(void *ptr)
+{
+ schedule();
+
+ return 0;
+}
+
+/*
+ * Stop the merging process and wait until it finishes.
+ */
+static void stop_merge(struct dm_snapshot *s)
+{
+ set_bit(SHUTDOWN_MERGE, &s->state_bits);
+ wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
+ TASK_UNINTERRUPTIBLE);
+ clear_bit(SHUTDOWN_MERGE, &s->state_bits);
+}
+
/*
* Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
*/
@@ -582,50 +1050,73 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct dm_snapshot *s;
int i;
int r = -EINVAL;
- char *origin_path;
- struct dm_exception_store *store;
- unsigned args_used;
+ char *origin_path, *cow_path;
+ unsigned args_used, num_flush_requests = 1;
+ fmode_t origin_mode = FMODE_READ;
if (argc != 4) {
ti->error = "requires exactly 4 arguments";
r = -EINVAL;
- goto bad_args;
+ goto bad;
+ }
+
+ if (dm_target_is_snapshot_merge(ti)) {
+ num_flush_requests = 2;
+ origin_mode = FMODE_WRITE;
}
origin_path = argv[0];
argv++;
argc--;
- r = dm_exception_store_create(ti, argc, argv, &args_used, &store);
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s) {
+ ti->error = "Cannot allocate snapshot context private "
+ "structure";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ cow_path = argv[0];
+ argv++;
+ argc--;
+
+ r = dm_get_device(ti, cow_path, 0, 0,
+ FMODE_READ | FMODE_WRITE, &s->cow);
+ if (r) {
+ ti->error = "Cannot get COW device";
+ goto bad_cow;
+ }
+
+ r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
if (r) {
ti->error = "Couldn't create exception store";
r = -EINVAL;
- goto bad_args;
+ goto bad_store;
}
argv += args_used;
argc -= args_used;
- s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (!s) {
- ti->error = "Cannot allocate snapshot context private "
- "structure";
- r = -ENOMEM;
- goto bad_snap;
- }
-
- r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
+ r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
if (r) {
ti->error = "Cannot get origin device";
goto bad_origin;
}
- s->store = store;
+ s->ti = ti;
s->valid = 1;
s->active = 0;
+ s->suspended = 0;
atomic_set(&s->pending_exceptions_count, 0);
init_rwsem(&s->lock);
+ INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
+ s->state_bits = 0;
+ s->merge_failed = 0;
+ s->first_merging_chunk = 0;
+ s->num_merging_chunks = 0;
+ bio_list_init(&s->bios_queued_during_merge);
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
@@ -659,39 +1150,55 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&s->tracked_chunk_lock);
- /* Metadata must only be loaded into one table at once */
+ bio_list_init(&s->queued_bios);
+ INIT_WORK(&s->queued_bios_work, flush_queued_bios);
+
+ ti->private = s;
+ ti->num_flush_requests = num_flush_requests;
+
+ /* Add snapshot to the list of snapshots for this origin */
+ /* Exceptions aren't triggered till snapshot_resume() is called */
+ r = register_snapshot(s);
+ if (r == -ENOMEM) {
+ ti->error = "Snapshot origin struct allocation failed";
+ goto bad_load_and_register;
+ } else if (r < 0) {
+ /* invalid handover, register_snapshot has set ti->error */
+ goto bad_load_and_register;
+ }
+
+ /*
+ * Metadata must only be loaded into one table at once, so skip this
+ * if metadata will be handed over during resume.
+ * Chunk size will be set during the handover - set it to zero to
+ * ensure it's ignored.
+ */
+ if (r > 0) {
+ s->store->chunk_size = 0;
+ return 0;
+ }
+
r = s->store->type->read_metadata(s->store, dm_add_exception,
(void *)s);
if (r < 0) {
ti->error = "Failed to read snapshot metadata";
- goto bad_load_and_register;
+ goto bad_read_metadata;
} else if (r > 0) {
s->valid = 0;
DMWARN("Snapshot is marked invalid.");
}
- bio_list_init(&s->queued_bios);
- INIT_WORK(&s->queued_bios_work, flush_queued_bios);
-
if (!s->store->chunk_size) {
ti->error = "Chunk size not set";
- goto bad_load_and_register;
- }
-
- /* Add snapshot to the list of snapshots for this origin */
- /* Exceptions aren't triggered till snapshot_resume() is called */
- if (register_snapshot(s)) {
- r = -EINVAL;
- ti->error = "Cannot register snapshot origin";
- goto bad_load_and_register;
+ goto bad_read_metadata;
}
-
- ti->private = s;
ti->split_io = s->store->chunk_size;
- ti->num_flush_requests = 1;
return 0;
+bad_read_metadata:
+ unregister_snapshot(s);
+
bad_load_and_register:
mempool_destroy(s->tracked_chunk_pool);
@@ -702,19 +1209,22 @@ bad_pending_pool:
dm_kcopyd_client_destroy(s->kcopyd_client);
bad_kcopyd:
- exit_exception_table(&s->pending, pending_cache);
- exit_exception_table(&s->complete, exception_cache);
+ dm_exception_table_exit(&s->pending, pending_cache);
+ dm_exception_table_exit(&s->complete, exception_cache);
bad_hash_tables:
dm_put_device(ti, s->origin);
bad_origin:
- kfree(s);
+ dm_exception_store_destroy(s->store);
-bad_snap:
- dm_exception_store_destroy(store);
+bad_store:
+ dm_put_device(ti, s->cow);
+
+bad_cow:
+ kfree(s);
-bad_args:
+bad:
return r;
}
@@ -723,8 +1233,39 @@ static void __free_exceptions(struct dm_snapshot *s)
dm_kcopyd_client_destroy(s->kcopyd_client);
s->kcopyd_client = NULL;
- exit_exception_table(&s->pending, pending_cache);
- exit_exception_table(&s->complete, exception_cache);
+ dm_exception_table_exit(&s->pending, pending_cache);
+ dm_exception_table_exit(&s->complete, exception_cache);
+}
+
+static void __handover_exceptions(struct dm_snapshot *snap_src,
+ struct dm_snapshot *snap_dest)
+{
+ union {
+ struct dm_exception_table table_swap;
+ struct dm_exception_store *store_swap;
+ } u;
+
+ /*
+ * Swap all snapshot context information between the two instances.
+ */
+ u.table_swap = snap_dest->complete;
+ snap_dest->complete = snap_src->complete;
+ snap_src->complete = u.table_swap;
+
+ u.store_swap = snap_dest->store;
+ snap_dest->store = snap_src->store;
+ snap_src->store = u.store_swap;
+
+ snap_dest->store->snap = snap_dest;
+ snap_src->store->snap = snap_src;
+
+ snap_dest->ti->split_io = snap_dest->store->chunk_size;
+ snap_dest->valid = snap_src->valid;
+
+ /*
+ * Set source invalid to ensure it receives no further I/O.
+ */
+ snap_src->valid = 0;
}
static void snapshot_dtr(struct dm_target *ti)
@@ -733,9 +1274,24 @@ static void snapshot_dtr(struct dm_target *ti)
int i;
#endif
struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
flush_workqueue(ksnapd);
+ down_read(&_origins_lock);
+ /* Check whether exception handover must be cancelled */
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest && (s == snap_src)) {
+ down_write(&snap_dest->lock);
+ snap_dest->valid = 0;
+ up_write(&snap_dest->lock);
+ DMERR("Cancelling snapshot handover.");
+ }
+ up_read(&_origins_lock);
+
+ if (dm_target_is_snapshot_merge(ti))
+ stop_merge(s);
+
/* Prevent further origin writes from using this snapshot. */
/* After this returns there can be no new kcopyd jobs. */
unregister_snapshot(s);
@@ -763,6 +1319,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ dm_put_device(ti, s->cow);
+
kfree(s);
}
@@ -795,6 +1353,26 @@ static void flush_queued_bios(struct work_struct *work)
flush_bios(queued_bios);
}
+static int do_origin(struct dm_dev *origin, struct bio *bio);
+
+/*
+ * Flush a list of buffers.
+ */
+static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
+{
+ struct bio *n;
+ int r;
+
+ while (bio) {
+ n = bio->bi_next;
+ bio->bi_next = NULL;
+ r = do_origin(s->origin, bio);
+ if (r == DM_MAPIO_REMAPPED)
+ generic_make_request(bio);
+ bio = n;
+ }
+}
+
/*
* Error a list of buffers.
*/
@@ -825,45 +1403,12 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
s->valid = 0;
- dm_table_event(s->store->ti->table);
-}
-
-static void get_pending_exception(struct dm_snap_pending_exception *pe)
-{
- atomic_inc(&pe->ref_count);
-}
-
-static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
-{
- struct dm_snap_pending_exception *primary_pe;
- struct bio *origin_bios = NULL;
-
- primary_pe = pe->primary_pe;
-
- /*
- * If this pe is involved in a write to the origin and
- * it is the last sibling to complete then release
- * the bios for the original write to the origin.
- */
- if (primary_pe &&
- atomic_dec_and_test(&primary_pe->ref_count)) {
- origin_bios = bio_list_get(&primary_pe->origin_bios);
- free_pending_exception(primary_pe);
- }
-
- /*
- * Free the pe if it's not linked to an origin write or if
- * it's not itself a primary pe.
- */
- if (!primary_pe || primary_pe != pe)
- free_pending_exception(pe);
-
- return origin_bios;
+ dm_table_event(s->ti->table);
}
static void pending_complete(struct dm_snap_pending_exception *pe, int success)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
@@ -877,7 +1422,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
goto out;
}
- e = alloc_exception();
+ e = alloc_completed_exception();
if (!e) {
down_write(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
@@ -888,28 +1433,27 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
down_write(&s->lock);
if (!s->valid) {
- free_exception(e);
+ free_completed_exception(e);
error = 1;
goto out;
}
- /*
- * Check for conflicting reads. This is extremely improbable,
- * so msleep(1) is sufficient and there is no need for a wait queue.
- */
- while (__chunk_is_tracked(s, pe->e.old_chunk))
- msleep(1);
+ /* Check for conflicting reads */
+ __check_for_conflicting_io(s, pe->e.old_chunk);
/*
* Add a proper exception, and remove the
* in-flight exception from the list.
*/
- insert_completed_exception(s, e);
+ dm_insert_exception(&s->complete, e);
out:
- remove_exception(&pe->e);
+ dm_remove_exception(&pe->e);
snapshot_bios = bio_list_get(&pe->snapshot_bios);
- origin_bios = put_pending_exception(pe);
+ origin_bios = bio_list_get(&pe->origin_bios);
+ free_pending_exception(pe);
+
+ increment_pending_exceptions_done_count();
up_write(&s->lock);
@@ -919,7 +1463,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
else
flush_bios(snapshot_bios);
- flush_bios(origin_bios);
+ retry_origin_bios(s, origin_bios);
}
static void commit_callback(void *context, int success)
@@ -963,7 +1507,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
- dest.bdev = s->store->cow->bdev;
+ dest.bdev = s->cow->bdev;
dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
dest.count = src.count;
@@ -975,7 +1519,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
static struct dm_snap_pending_exception *
__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
{
- struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
+ struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
if (!e)
return NULL;
@@ -1006,8 +1550,6 @@ __find_pending_exception(struct dm_snapshot *s,
pe->e.old_chunk = chunk;
bio_list_init(&pe->origin_bios);
bio_list_init(&pe->snapshot_bios);
- pe->primary_pe = NULL;
- atomic_set(&pe->ref_count, 0);
pe->started = 0;
if (s->store->type->prepare_exception(s->store, &pe->e)) {
@@ -1015,16 +1557,15 @@ __find_pending_exception(struct dm_snapshot *s,
return NULL;
}
- get_pending_exception(pe);
- insert_exception(&s->pending, &pe->e);
+ dm_insert_exception(&s->pending, &pe->e);
return pe;
}
-static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
+static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
- bio->bi_bdev = s->store->cow->bdev;
+ bio->bi_bdev = s->cow->bdev;
bio->bi_sector = chunk_to_sector(s->store,
dm_chunk_number(e->new_chunk) +
(chunk - e->old_chunk)) +
@@ -1035,14 +1576,14 @@ static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
static int snapshot_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
struct dm_snapshot *s = ti->private;
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
if (unlikely(bio_empty_barrier(bio))) {
- bio->bi_bdev = s->store->cow->bdev;
+ bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
}
@@ -1063,7 +1604,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
}
/* If the block is already remapped - use that, else remap it */
- e = lookup_exception(&s->complete, chunk);
+ e = dm_lookup_exception(&s->complete, chunk);
if (e) {
remap_exception(s, e, bio, chunk);
goto out_unlock;
@@ -1087,7 +1628,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
goto out_unlock;
}
- e = lookup_exception(&s->complete, chunk);
+ e = dm_lookup_exception(&s->complete, chunk);
if (e) {
free_pending_exception(pe);
remap_exception(s, e, bio, chunk);
@@ -1125,6 +1666,78 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
return r;
}
+/*
+ * A snapshot-merge target behaves like a combination of a snapshot
+ * target and a snapshot-origin target. It only generates new
+ * exceptions in other snapshots and not in the one that is being
+ * merged.
+ *
+ * For each chunk, if there is an existing exception, it is used to
+ * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
+ * which in turn might generate exceptions in other snapshots.
+ * If merging is currently taking place on the chunk in question, the
+ * I/O is deferred by adding it to s->bios_queued_during_merge.
+ */
+static int snapshot_merge_map(struct dm_target *ti, struct bio *bio,
+ union map_info *map_context)
+{
+ struct dm_exception *e;
+ struct dm_snapshot *s = ti->private;
+ int r = DM_MAPIO_REMAPPED;
+ chunk_t chunk;
+
+ if (unlikely(bio_empty_barrier(bio))) {
+ if (!map_context->flush_request)
+ bio->bi_bdev = s->origin->bdev;
+ else
+ bio->bi_bdev = s->cow->bdev;
+ map_context->ptr = NULL;
+ return DM_MAPIO_REMAPPED;
+ }
+
+ chunk = sector_to_chunk(s->store, bio->bi_sector);
+
+ down_write(&s->lock);
+
+ /* Full merging snapshots are redirected to the origin */
+ if (!s->valid)
+ goto redirect_to_origin;
+
+ /* If the block is already remapped - use that */
+ e = dm_lookup_exception(&s->complete, chunk);
+ if (e) {
+ /* Queue writes overlapping with chunks being merged */
+ if (bio_rw(bio) == WRITE &&
+ chunk >= s->first_merging_chunk &&
+ chunk < (s->first_merging_chunk +
+ s->num_merging_chunks)) {
+ bio->bi_bdev = s->origin->bdev;
+ bio_list_add(&s->bios_queued_during_merge, bio);
+ r = DM_MAPIO_SUBMITTED;
+ goto out_unlock;
+ }
+
+ remap_exception(s, e, bio, chunk);
+
+ if (bio_rw(bio) == WRITE)
+ map_context->ptr = track_chunk(s, chunk);
+ goto out_unlock;
+ }
+
+redirect_to_origin:
+ bio->bi_bdev = s->origin->bdev;
+
+ if (bio_rw(bio) == WRITE) {
+ up_write(&s->lock);
+ return do_origin(s->origin, bio);
+ }
+
+out_unlock:
+ up_write(&s->lock);
+
+ return r;
+}
+
static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
int error, union map_info *map_context)
{
@@ -1137,40 +1750,135 @@ static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
return 0;
}
+static void snapshot_merge_presuspend(struct dm_target *ti)
+{
+ struct dm_snapshot *s = ti->private;
+
+ stop_merge(s);
+}
+
+static void snapshot_postsuspend(struct dm_target *ti)
+{
+ struct dm_snapshot *s = ti->private;
+
+ down_write(&s->lock);
+ s->suspended = 1;
+ up_write(&s->lock);
+}
+
+static int snapshot_preresume(struct dm_target *ti)
+{
+ int r = 0;
+ struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ down_read(&_origins_lock);
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest) {
+ down_read(&snap_src->lock);
+ if (s == snap_src) {
+ DMERR("Unable to resume snapshot source until "
+ "handover completes.");
+ r = -EINVAL;
+ } else if (!snap_src->suspended) {
+ DMERR("Unable to perform snapshot handover until "
+ "source is suspended.");
+ r = -EINVAL;
+ }
+ up_read(&snap_src->lock);
+ }
+ up_read(&_origins_lock);
+
+ return r;
+}
+
static void snapshot_resume(struct dm_target *ti)
{
struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ down_read(&_origins_lock);
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest) {
+ down_write(&snap_src->lock);
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ __handover_exceptions(snap_src, snap_dest);
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
+ }
+ up_read(&_origins_lock);
+
+ /* Now we have correct chunk size, reregister */
+ reregister_snapshot(s);
down_write(&s->lock);
s->active = 1;
+ s->suspended = 0;
up_write(&s->lock);
}
+static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
+{
+ sector_t min_chunksize;
+
+ down_read(&_origins_lock);
+ min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
+ up_read(&_origins_lock);
+
+ return min_chunksize;
+}
+
+static void snapshot_merge_resume(struct dm_target *ti)
+{
+ struct dm_snapshot *s = ti->private;
+
+ /*
+ * Handover exceptions from existing snapshot.
+ */
+ snapshot_resume(ti);
+
+ /*
+ * snapshot-merge acts as an origin, so set ti->split_io
+ */
+ ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
+
+ start_merge(s);
+}
+
static int snapshot_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
- down_write(&snap->lock);
-
switch (type) {
case STATUSTYPE_INFO:
+
+ down_write(&snap->lock);
+
if (!snap->valid)
DMEMIT("Invalid");
+ else if (snap->merge_failed)
+ DMEMIT("Merge failed");
else {
- if (snap->store->type->fraction_full) {
- sector_t numerator, denominator;
- snap->store->type->fraction_full(snap->store,
- &numerator,
- &denominator);
- DMEMIT("%llu/%llu",
- (unsigned long long)numerator,
- (unsigned long long)denominator);
+ if (snap->store->type->usage) {
+ sector_t total_sectors, sectors_allocated,
+ metadata_sectors;
+ snap->store->type->usage(snap->store,
+ &total_sectors,
+ &sectors_allocated,
+ &metadata_sectors);
+ DMEMIT("%llu/%llu %llu",
+ (unsigned long long)sectors_allocated,
+ (unsigned long long)total_sectors,
+ (unsigned long long)metadata_sectors);
}
else
DMEMIT("Unknown");
}
+
+ up_write(&snap->lock);
+
break;
case STATUSTYPE_TABLE:
@@ -1179,14 +1887,12 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
* to make private copies if the output is to
* make sense.
*/
- DMEMIT("%s", snap->origin->name);
+ DMEMIT("%s %s", snap->origin->name, snap->cow->name);
snap->store->type->status(snap->store, type, result + sz,
maxlen - sz);
break;
}
- up_write(&snap->lock);
-
return 0;
}
@@ -1202,17 +1908,36 @@ static int snapshot_iterate_devices(struct dm_target *ti,
/*-----------------------------------------------------------------
* Origin methods
*---------------------------------------------------------------*/
-static int __origin_write(struct list_head *snapshots, struct bio *bio)
+
+/*
+ * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
+ * supplied bio was ignored. The caller may submit it immediately.
+ * (No remapping actually occurs as the origin is always a direct linear
+ * map.)
+ *
+ * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
+ * and any supplied bio is added to a list to be submitted once all
+ * the necessary exceptions exist.
+ */
+static int __origin_write(struct list_head *snapshots, sector_t sector,
+ struct bio *bio)
{
- int r = DM_MAPIO_REMAPPED, first = 0;
+ int r = DM_MAPIO_REMAPPED;
struct dm_snapshot *snap;
- struct dm_snap_exception *e;
- struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
+ struct dm_exception *e;
+ struct dm_snap_pending_exception *pe;
+ struct dm_snap_pending_exception *pe_to_start_now = NULL;
+ struct dm_snap_pending_exception *pe_to_start_last = NULL;
chunk_t chunk;
- LIST_HEAD(pe_queue);
/* Do all the snapshots on this origin */
list_for_each_entry (snap, snapshots, list) {
+ /*
+ * Don't make new exceptions in a merging snapshot
+ * because it has effectively been deleted
+ */
+ if (dm_target_is_snapshot_merge(snap->ti))
+ continue;
down_write(&snap->lock);
@@ -1221,24 +1946,21 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
goto next_snapshot;
/* Nothing to do if writing beyond end of snapshot */
- if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table))
+ if (sector >= dm_table_get_size(snap->ti->table))
goto next_snapshot;
/*
* Remember, different snapshots can have
* different chunk sizes.
*/
- chunk = sector_to_chunk(snap->store, bio->bi_sector);
+ chunk = sector_to_chunk(snap->store, sector);
/*
* Check exception table to see if block
* is already remapped in this snapshot
* and trigger an exception if not.
- *
- * ref_count is initialised to 1 so pending_complete()
- * won't destroy the primary_pe while we're inside this loop.
*/
- e = lookup_exception(&snap->complete, chunk);
+ e = dm_lookup_exception(&snap->complete, chunk);
if (e)
goto next_snapshot;
@@ -1253,7 +1975,7 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
goto next_snapshot;
}
- e = lookup_exception(&snap->complete, chunk);
+ e = dm_lookup_exception(&snap->complete, chunk);
if (e) {
free_pending_exception(pe);
goto next_snapshot;
@@ -1266,59 +1988,43 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio)
}
}
- if (!primary_pe) {
- /*
- * Either every pe here has same
- * primary_pe or none has one yet.
- */
- if (pe->primary_pe)
- primary_pe = pe->primary_pe;
- else {
- primary_pe = pe;
- first = 1;
- }
-
- bio_list_add(&primary_pe->origin_bios, bio);
+ r = DM_MAPIO_SUBMITTED;
- r = DM_MAPIO_SUBMITTED;
- }
+ /*
+ * If an origin bio was supplied, queue it to wait for the
+ * completion of this exception, and start this one last,
+ * at the end of the function.
+ */
+ if (bio) {
+ bio_list_add(&pe->origin_bios, bio);
+ bio = NULL;
- if (!pe->primary_pe) {
- pe->primary_pe = primary_pe;
- get_pending_exception(primary_pe);
+ if (!pe->started) {
+ pe->started = 1;
+ pe_to_start_last = pe;
+ }
}
if (!pe->started) {
pe->started = 1;
- list_add_tail(&pe->list, &pe_queue);
+ pe_to_start_now = pe;
}
next_snapshot:
up_write(&snap->lock);
- }
- if (!primary_pe)
- return r;
-
- /*
- * If this is the first time we're processing this chunk and
- * ref_count is now 1 it means all the pending exceptions
- * got completed while we were in the loop above, so it falls to
- * us here to remove the primary_pe and submit any origin_bios.
- */
-
- if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
- flush_bios(bio_list_get(&primary_pe->origin_bios));
- free_pending_exception(primary_pe);
- /* If we got here, pe_queue is necessarily empty. */
- return r;
+ if (pe_to_start_now) {
+ start_copy(pe_to_start_now);
+ pe_to_start_now = NULL;
+ }
}
/*
- * Now that we have a complete pe list we can start the copying.
+ * Submit the exception against which the bio is queued last,
+ * to give the other exceptions a head start.
*/
- list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
- start_copy(pe);
+ if (pe_to_start_last)
+ start_copy(pe_to_start_last);
return r;
}
@@ -1334,13 +2040,48 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
- r = __origin_write(&o->snapshots, bio);
+ r = __origin_write(&o->snapshots, bio->bi_sector, bio);
up_read(&_origins_lock);
return r;
}
/*
+ * Trigger exceptions in all non-merging snapshots.
+ *
+ * The chunk size of the merging snapshot may be larger than the chunk
+ * size of some other snapshot so we may need to reallocate multiple
+ * chunks in other snapshots.
+ *
+ * We scan all the overlapping exceptions in the other snapshots.
+ * Returns 1 if anything was reallocated and must be waited for,
+ * otherwise returns 0.
+ *
+ * size must be a multiple of merging_snap's chunk_size.
+ */
+static int origin_write_extent(struct dm_snapshot *merging_snap,
+ sector_t sector, unsigned size)
+{
+ int must_wait = 0;
+ sector_t n;
+ struct origin *o;
+
+ /*
+ * The origin's __minimum_chunk_size() got stored in split_io
+ * by snapshot_merge_resume().
+ */
+ down_read(&_origins_lock);
+ o = __lookup_origin(merging_snap->origin->bdev);
+ for (n = 0; n < size; n += merging_snap->ti->split_io)
+ if (__origin_write(&o->snapshots, sector + n, NULL) ==
+ DM_MAPIO_SUBMITTED)
+ must_wait = 1;
+ up_read(&_origins_lock);
+
+ return must_wait;
+}
+
+/*
* Origin: maps a linear range of a device, with hooks for snapshotting.
*/
@@ -1391,8 +2132,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-
/*
* Set the target "split_io" field to the minimum of all the snapshots'
* chunk sizes.
@@ -1400,19 +2139,8 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
static void origin_resume(struct dm_target *ti)
{
struct dm_dev *dev = ti->private;
- struct dm_snapshot *snap;
- struct origin *o;
- unsigned chunk_size = 0;
-
- down_read(&_origins_lock);
- o = __lookup_origin(dev->bdev);
- if (o)
- list_for_each_entry (snap, &o->snapshots, list)
- chunk_size = min_not_zero(chunk_size,
- snap->store->chunk_size);
- up_read(&_origins_lock);
- ti->split_io = chunk_size;
+ ti->split_io = get_origin_minimum_chunksize(dev->bdev);
}
static int origin_status(struct dm_target *ti, status_type_t type, char *result,
@@ -1455,17 +2183,35 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 7, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
.map = snapshot_map,
.end_io = snapshot_end_io,
+ .postsuspend = snapshot_postsuspend,
+ .preresume = snapshot_preresume,
.resume = snapshot_resume,
.status = snapshot_status,
.iterate_devices = snapshot_iterate_devices,
};
+static struct target_type merge_target = {
+ .name = dm_snapshot_merge_target_name,
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+ .map = snapshot_merge_map,
+ .end_io = snapshot_end_io,
+ .presuspend = snapshot_merge_presuspend,
+ .postsuspend = snapshot_postsuspend,
+ .preresume = snapshot_preresume,
+ .resume = snapshot_merge_resume,
+ .status = snapshot_status,
+ .iterate_devices = snapshot_iterate_devices,
+};
+
static int __init dm_snapshot_init(void)
{
int r;
@@ -1477,7 +2223,7 @@ static int __init dm_snapshot_init(void)
}
r = dm_register_target(&snapshot_target);
- if (r) {
+ if (r < 0) {
DMERR("snapshot target register failed %d", r);
goto bad_register_snapshot_target;
}
@@ -1485,34 +2231,40 @@ static int __init dm_snapshot_init(void)
r = dm_register_target(&origin_target);
if (r < 0) {
DMERR("Origin target register failed %d", r);
- goto bad1;
+ goto bad_register_origin_target;
+ }
+
+ r = dm_register_target(&merge_target);
+ if (r < 0) {
+ DMERR("Merge target register failed %d", r);
+ goto bad_register_merge_target;
}
r = init_origin_hash();
if (r) {
DMERR("init_origin_hash failed.");
- goto bad2;
+ goto bad_origin_hash;
}
- exception_cache = KMEM_CACHE(dm_snap_exception, 0);
+ exception_cache = KMEM_CACHE(dm_exception, 0);
if (!exception_cache) {
DMERR("Couldn't create exception cache.");
r = -ENOMEM;
- goto bad3;
+ goto bad_exception_cache;
}
pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
if (!pending_cache) {
DMERR("Couldn't create pending cache.");
r = -ENOMEM;
- goto bad4;
+ goto bad_pending_cache;
}
tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
if (!tracked_chunk_cache) {
DMERR("Couldn't create cache to track chunks in use.");
r = -ENOMEM;
- goto bad5;
+ goto bad_tracked_chunk_cache;
}
ksnapd = create_singlethread_workqueue("ksnapd");
@@ -1526,19 +2278,21 @@ static int __init dm_snapshot_init(void)
bad_pending_pool:
kmem_cache_destroy(tracked_chunk_cache);
-bad5:
+bad_tracked_chunk_cache:
kmem_cache_destroy(pending_cache);
-bad4:
+bad_pending_cache:
kmem_cache_destroy(exception_cache);
-bad3:
+bad_exception_cache:
exit_origin_hash();
-bad2:
+bad_origin_hash:
+ dm_unregister_target(&merge_target);
+bad_register_merge_target:
dm_unregister_target(&origin_target);
-bad1:
+bad_register_origin_target:
dm_unregister_target(&snapshot_target);
-
bad_register_snapshot_target:
dm_exception_store_exit();
+
return r;
}
@@ -1548,6 +2302,7 @@ static void __exit dm_snapshot_exit(void)
dm_unregister_target(&snapshot_target);
dm_unregister_target(&origin_target);
+ dm_unregister_target(&merge_target);
exit_origin_hash();
kmem_cache_destroy(pending_cache);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 4b045903a4e..f53392df7b9 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -59,7 +59,7 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf)
static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
{
- sprintf(buf, "%d\n", dm_suspended(md));
+ sprintf(buf, "%d\n", dm_suspended_md(md));
return strlen(buf);
}
@@ -80,12 +80,20 @@ static struct sysfs_ops dm_sysfs_ops = {
};
/*
+ * The sysfs structure is embedded in md struct, nothing to do here
+ */
+static void dm_sysfs_release(struct kobject *kobj)
+{
+}
+
+/*
* dm kobject is embedded in mapped_device structure
* no need to define release function here
*/
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
+ .release = dm_sysfs_release
};
/*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 1a6cb3c7822..be625475cf6 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -12,6 +12,7 @@
#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
@@ -237,6 +238,9 @@ void dm_table_destroy(struct dm_table *t)
{
unsigned int i;
+ if (!t)
+ return;
+
while (atomic_read(&t->holders))
msleep(1);
smp_mb();
@@ -600,11 +604,8 @@ int dm_split_args(int *argc, char ***argvp, char *input)
return -ENOMEM;
while (1) {
- start = end;
-
/* Skip whitespace */
- while (*start && isspace(*start))
- start++;
+ start = skip_spaces(end);
if (!*start)
break; /* success, we hit the end */
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index 6f65883aef1..c7c555a8c7b 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -139,14 +139,13 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj)
list_del_init(&event->elist);
/*
- * Need to call dm_copy_name_and_uuid from here for now.
- * Context of previous var adds and locking used for
- * hash_cell not compatable.
+ * When a device is being removed this copy fails and we
+ * discard these unsent events.
*/
if (dm_copy_name_and_uuid(event->md, event->name,
event->uuid)) {
- DMERR("%s: dm_copy_name_and_uuid() failed",
- __func__);
+ DMINFO("%s: skipping sending uevent for lost device",
+ __func__);
goto uevent_free;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 724efc63904..3167480b532 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -143,9 +143,19 @@ struct mapped_device {
int barrier_error;
/*
+ * Protect barrier_error from concurrent endio processing
+ * in request-based dm.
+ */
+ spinlock_t barrier_error_lock;
+
+ /*
* Processing queue (flush/barriers)
*/
struct workqueue_struct *wq;
+ struct work_struct barrier_work;
+
+ /* A pointer to the currently processing pre/post flush request */
+ struct request *flush_request;
/*
* The current mapping.
@@ -178,9 +188,6 @@ struct mapped_device {
/* forced geometry settings */
struct hd_geometry geometry;
- /* marker of flush suspend for request-based dm */
- struct request suspend_rq;
-
/* For saving the address of __make_request for request based dm */
make_request_fn *saved_make_request_fn;
@@ -275,6 +282,7 @@ static int (*_inits[])(void) __initdata = {
dm_target_init,
dm_linear_init,
dm_stripe_init,
+ dm_io_init,
dm_kcopyd_init,
dm_interface_init,
};
@@ -284,6 +292,7 @@ static void (*_exits[])(void) = {
dm_target_exit,
dm_linear_exit,
dm_stripe_exit,
+ dm_io_exit,
dm_kcopyd_exit,
dm_interface_exit,
};
@@ -320,6 +329,11 @@ static void __exit dm_exit(void)
/*
* Block device functions
*/
+int dm_deleting_md(struct mapped_device *md)
+{
+ return test_bit(DMF_DELETING, &md->flags);
+}
+
static int dm_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mapped_device *md;
@@ -331,7 +345,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
goto out;
if (test_bit(DMF_FREEING, &md->flags) ||
- test_bit(DMF_DELETING, &md->flags)) {
+ dm_deleting_md(md)) {
md = NULL;
goto out;
}
@@ -388,7 +402,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *tgt;
int r = -ENOTTY;
@@ -401,7 +415,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
tgt = dm_table_get_target(map, 0);
- if (dm_suspended(md)) {
+ if (dm_suspended_md(md)) {
r = -EAGAIN;
goto out;
}
@@ -430,9 +444,10 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
mempool_free(tio, md->tio_pool);
}
-static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
+static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
+ gfp_t gfp_mask)
{
- return mempool_alloc(md->tio_pool, GFP_ATOMIC);
+ return mempool_alloc(md->tio_pool, gfp_mask);
}
static void free_rq_tio(struct dm_rq_target_io *tio)
@@ -450,6 +465,12 @@ static void free_bio_info(struct dm_rq_clone_bio_info *info)
mempool_free(info, info->tio->md->io_pool);
}
+static int md_in_flight(struct mapped_device *md)
+{
+ return atomic_read(&md->pending[READ]) +
+ atomic_read(&md->pending[WRITE]);
+}
+
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
@@ -512,7 +533,7 @@ static void queue_io(struct mapped_device *md, struct bio *bio)
* function to access the md->map field, and make sure they call
* dm_table_put() when finished.
*/
-struct dm_table *dm_get_table(struct mapped_device *md)
+struct dm_table *dm_get_live_table(struct mapped_device *md)
{
struct dm_table *t;
unsigned long flags;
@@ -716,28 +737,38 @@ static void end_clone_bio(struct bio *clone, int error)
blk_update_request(tio->orig, 0, nr_bytes);
}
+static void store_barrier_error(struct mapped_device *md, int error)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&md->barrier_error_lock, flags);
+ /*
+ * Basically, the first error is taken, but:
+ * -EOPNOTSUPP supersedes any I/O error.
+ * Requeue request supersedes any I/O error but -EOPNOTSUPP.
+ */
+ if (!md->barrier_error || error == -EOPNOTSUPP ||
+ (md->barrier_error != -EOPNOTSUPP &&
+ error == DM_ENDIO_REQUEUE))
+ md->barrier_error = error;
+ spin_unlock_irqrestore(&md->barrier_error_lock, flags);
+}
+
/*
* Don't touch any member of the md after calling this function because
* the md may be freed in dm_put() at the end of this function.
* Or do dm_get() before calling this function and dm_put() later.
*/
-static void rq_completed(struct mapped_device *md, int run_queue)
+static void rq_completed(struct mapped_device *md, int rw, int run_queue)
{
- int wakeup_waiters = 0;
- struct request_queue *q = md->queue;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (!queue_in_flight(q))
- wakeup_waiters = 1;
- spin_unlock_irqrestore(q->queue_lock, flags);
+ atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
- if (wakeup_waiters)
+ if (!md_in_flight(md))
wake_up(&md->wait);
if (run_queue)
- blk_run_queue(q);
+ blk_run_queue(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
@@ -753,6 +784,44 @@ static void free_rq_clone(struct request *clone)
free_rq_tio(tio);
}
+/*
+ * Complete the clone and the original request.
+ * Must be called without queue lock.
+ */
+static void dm_end_request(struct request *clone, int error)
+{
+ int rw = rq_data_dir(clone);
+ int run_queue = 1;
+ bool is_barrier = blk_barrier_rq(clone);
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+
+ if (blk_pc_request(rq) && !is_barrier) {
+ rq->errors = clone->errors;
+ rq->resid_len = clone->resid_len;
+
+ if (rq->sense)
+ /*
+ * We are using the sense buffer of the original
+ * request.
+ * So setting the length of the sense data is enough.
+ */
+ rq->sense_len = clone->sense_len;
+ }
+
+ free_rq_clone(clone);
+
+ if (unlikely(is_barrier)) {
+ if (unlikely(error))
+ store_barrier_error(md, error);
+ run_queue = 0;
+ } else
+ blk_end_request_all(rq, error);
+
+ rq_completed(md, rw, run_queue);
+}
+
static void dm_unprep_request(struct request *rq)
{
struct request *clone = rq->special;
@@ -768,12 +837,23 @@ static void dm_unprep_request(struct request *rq)
*/
void dm_requeue_unmapped_request(struct request *clone)
{
+ int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request_queue *q = rq->q;
unsigned long flags;
+ if (unlikely(blk_barrier_rq(clone))) {
+ /*
+ * Barrier clones share an original request.
+ * Leave it to dm_end_request(), which handles this special
+ * case.
+ */
+ dm_end_request(clone, DM_ENDIO_REQUEUE);
+ return;
+ }
+
dm_unprep_request(rq);
spin_lock_irqsave(q->queue_lock, flags);
@@ -782,7 +862,7 @@ void dm_requeue_unmapped_request(struct request *clone)
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
- rq_completed(md, 0);
+ rq_completed(md, rw, 0);
}
EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
@@ -815,34 +895,28 @@ static void start_queue(struct request_queue *q)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-/*
- * Complete the clone and the original request.
- * Must be called without queue lock.
- */
-static void dm_end_request(struct request *clone, int error)
+static void dm_done(struct request *clone, int error, bool mapped)
{
+ int r = error;
struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
- struct request *rq = tio->orig;
+ dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
- if (blk_pc_request(rq)) {
- rq->errors = clone->errors;
- rq->resid_len = clone->resid_len;
+ if (mapped && rq_end_io)
+ r = rq_end_io(tio->ti, clone, error, &tio->info);
- if (rq->sense)
- /*
- * We are using the sense buffer of the original
- * request.
- * So setting the length of the sense data is enough.
- */
- rq->sense_len = clone->sense_len;
+ if (r <= 0)
+ /* The target wants to complete the I/O */
+ dm_end_request(clone, r);
+ else if (r == DM_ENDIO_INCOMPLETE)
+ /* The target will handle the I/O */
+ return;
+ else if (r == DM_ENDIO_REQUEUE)
+ /* The target wants to requeue the I/O */
+ dm_requeue_unmapped_request(clone);
+ else {
+ DMWARN("unimplemented target endio return value: %d", r);
+ BUG();
}
-
- free_rq_clone(clone);
-
- blk_end_request_all(rq, error);
-
- rq_completed(md, 1);
}
/*
@@ -850,27 +924,14 @@ static void dm_end_request(struct request *clone, int error)
*/
static void dm_softirq_done(struct request *rq)
{
+ bool mapped = true;
struct request *clone = rq->completion_data;
struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
- int error = tio->error;
- if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io)
- error = rq_end_io(tio->ti, clone, error, &tio->info);
+ if (rq->cmd_flags & REQ_FAILED)
+ mapped = false;
- if (error <= 0)
- /* The target wants to complete the I/O */
- dm_end_request(clone, error);
- else if (error == DM_ENDIO_INCOMPLETE)
- /* The target will handle the I/O */
- return;
- else if (error == DM_ENDIO_REQUEUE)
- /* The target wants to requeue the I/O */
- dm_requeue_unmapped_request(clone);
- else {
- DMWARN("unimplemented target endio return value: %d", error);
- BUG();
- }
+ dm_done(clone, tio->error, mapped);
}
/*
@@ -882,6 +943,19 @@ static void dm_complete_request(struct request *clone, int error)
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
+ if (unlikely(blk_barrier_rq(clone))) {
+ /*
+ * Barrier clones share an original request. So can't use
+ * softirq_done with the original.
+ * Pass the clone to dm_done() directly in this special case.
+ * It is safe (even if clone->q->queue_lock is held here)
+ * because there is no I/O dispatching during the completion
+ * of barrier clone.
+ */
+ dm_done(clone, error, true);
+ return;
+ }
+
tio->error = error;
rq->completion_data = clone;
blk_complete_request(rq);
@@ -898,6 +972,17 @@ void dm_kill_unmapped_request(struct request *clone, int error)
struct dm_rq_target_io *tio = clone->end_io_data;
struct request *rq = tio->orig;
+ if (unlikely(blk_barrier_rq(clone))) {
+ /*
+ * Barrier clones share an original request.
+ * Leave it to dm_end_request(), which handles this special
+ * case.
+ */
+ BUG_ON(error > 0);
+ dm_end_request(clone, error);
+ return;
+ }
+
rq->cmd_flags |= REQ_FAILED;
dm_complete_request(clone, error);
}
@@ -1214,7 +1299,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
struct clone_info ci;
int error = 0;
- ci.map = dm_get_table(md);
+ ci.map = dm_get_live_table(md);
if (unlikely(!ci.map)) {
if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
bio_io_error(bio);
@@ -1255,7 +1340,7 @@ static int dm_merge_bvec(struct request_queue *q,
struct bio_vec *biovec)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
sector_t max_sectors;
int max_size = 0;
@@ -1352,11 +1437,6 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
- if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
- return 0;
- }
-
return md->saved_make_request_fn(q, bio); /* call __make_request() */
}
@@ -1375,6 +1455,25 @@ static int dm_request(struct request_queue *q, struct bio *bio)
return _dm_request(q, bio);
}
+/*
+ * Mark this request as flush request, so that dm_request_fn() can
+ * recognize.
+ */
+static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
+{
+ rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
+ rq->cmd[0] = REQ_LB_OP_FLUSH;
+}
+
+static bool dm_rq_is_flush_request(struct request *rq)
+{
+ if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
+ rq->cmd[0] == REQ_LB_OP_FLUSH)
+ return true;
+ else
+ return false;
+}
+
void dm_dispatch_request(struct request *rq)
{
int r;
@@ -1420,25 +1519,54 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
static int setup_clone(struct request *clone, struct request *rq,
struct dm_rq_target_io *tio)
{
- int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
- dm_rq_bio_constructor, tio);
+ int r;
- if (r)
- return r;
+ if (dm_rq_is_flush_request(rq)) {
+ blk_rq_init(NULL, clone);
+ clone->cmd_type = REQ_TYPE_FS;
+ clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
+ } else {
+ r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
+ dm_rq_bio_constructor, tio);
+ if (r)
+ return r;
+
+ clone->cmd = rq->cmd;
+ clone->cmd_len = rq->cmd_len;
+ clone->sense = rq->sense;
+ clone->buffer = rq->buffer;
+ }
- clone->cmd = rq->cmd;
- clone->cmd_len = rq->cmd_len;
- clone->sense = rq->sense;
- clone->buffer = rq->buffer;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
return 0;
}
-static int dm_rq_flush_suspending(struct mapped_device *md)
+static struct request *clone_rq(struct request *rq, struct mapped_device *md,
+ gfp_t gfp_mask)
{
- return !md->suspend_rq.special;
+ struct request *clone;
+ struct dm_rq_target_io *tio;
+
+ tio = alloc_rq_tio(md, gfp_mask);
+ if (!tio)
+ return NULL;
+
+ tio->md = md;
+ tio->ti = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ memset(&tio->info, 0, sizeof(tio->info));
+
+ clone = &tio->clone;
+ if (setup_clone(clone, rq, tio)) {
+ /* -ENOMEM */
+ free_rq_tio(tio);
+ return NULL;
+ }
+
+ return clone;
}
/*
@@ -1447,39 +1575,19 @@ static int dm_rq_flush_suspending(struct mapped_device *md)
static int dm_prep_fn(struct request_queue *q, struct request *rq)
{
struct mapped_device *md = q->queuedata;
- struct dm_rq_target_io *tio;
struct request *clone;
- if (unlikely(rq == &md->suspend_rq)) {
- if (dm_rq_flush_suspending(md))
- return BLKPREP_OK;
- else
- /* The flush suspend was interrupted */
- return BLKPREP_KILL;
- }
+ if (unlikely(dm_rq_is_flush_request(rq)))
+ return BLKPREP_OK;
if (unlikely(rq->special)) {
DMWARN("Already has something in rq->special.");
return BLKPREP_KILL;
}
- tio = alloc_rq_tio(md); /* Only one for each original request */
- if (!tio)
- /* -ENOMEM */
- return BLKPREP_DEFER;
-
- tio->md = md;
- tio->ti = NULL;
- tio->orig = rq;
- tio->error = 0;
- memset(&tio->info, 0, sizeof(tio->info));
-
- clone = &tio->clone;
- if (setup_clone(clone, rq, tio)) {
- /* -ENOMEM */
- free_rq_tio(tio);
+ clone = clone_rq(rq, md, GFP_ATOMIC);
+ if (!clone)
return BLKPREP_DEFER;
- }
rq->special = clone;
rq->cmd_flags |= REQ_DONTPREP;
@@ -1487,11 +1595,10 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_OK;
}
-static void map_request(struct dm_target *ti, struct request *rq,
+static void map_request(struct dm_target *ti, struct request *clone,
struct mapped_device *md)
{
int r;
- struct request *clone = rq->special;
struct dm_rq_target_io *tio = clone->end_io_data;
/*
@@ -1511,6 +1618,8 @@ static void map_request(struct dm_target *ti, struct request *rq,
break;
case DM_MAPIO_REMAPPED:
/* The target has remapped the I/O so dispatch it */
+ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
+ blk_rq_pos(tio->orig));
dm_dispatch_request(clone);
break;
case DM_MAPIO_REQUEUE:
@@ -1536,29 +1645,26 @@ static void map_request(struct dm_target *ti, struct request *rq,
static void dm_request_fn(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
struct dm_target *ti;
- struct request *rq;
+ struct request *rq, *clone;
/*
- * For noflush suspend, check blk_queue_stopped() to immediately
- * quit I/O dispatching.
+ * For suspend, check blk_queue_stopped() and increment
+ * ->pending within a single queue_lock not to increment the
+ * number of in-flight I/Os after the queue is stopped in
+ * dm_suspend().
*/
while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
goto plug_and_out;
- if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */
- if (queue_in_flight(q))
- /* Not quiet yet. Wait more */
- goto plug_and_out;
-
- /* This device should be quiet now */
- __stop_queue(q);
+ if (unlikely(dm_rq_is_flush_request(rq))) {
+ BUG_ON(md->flush_request);
+ md->flush_request = rq;
blk_start_request(rq);
- __blk_end_request_all(rq, 0);
- wake_up(&md->wait);
+ queue_work(md->wq, &md->barrier_work);
goto out;
}
@@ -1567,8 +1673,11 @@ static void dm_request_fn(struct request_queue *q)
goto plug_and_out;
blk_start_request(rq);
+ clone = rq->special;
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+
spin_unlock(q->queue_lock);
- map_request(ti, rq, md);
+ map_request(ti, clone, md);
spin_lock_irq(q->queue_lock);
}
@@ -1595,7 +1704,7 @@ static int dm_lld_busy(struct request_queue *q)
{
int r;
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
r = 1;
@@ -1610,7 +1719,7 @@ static int dm_lld_busy(struct request_queue *q)
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map = dm_get_live_table(md);
if (map) {
if (dm_request_based(md))
@@ -1628,7 +1737,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
struct dm_table *map;
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
if (map) {
/*
* Request-based dm cares about only own queue for
@@ -1725,6 +1834,7 @@ out:
static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
+static void dm_rq_barrier_work(struct work_struct *work);
/*
* Allocate and initialise a blank device with a given minor.
@@ -1754,6 +1864,7 @@ static struct mapped_device *alloc_dev(int minor)
init_rwsem(&md->io_lock);
mutex_init(&md->suspend_lock);
spin_lock_init(&md->deferred_lock);
+ spin_lock_init(&md->barrier_error_lock);
rwlock_init(&md->map_lock);
atomic_set(&md->holders, 1);
atomic_set(&md->open_count, 0);
@@ -1788,6 +1899,8 @@ static struct mapped_device *alloc_dev(int minor)
blk_queue_softirq_done(md->queue, dm_softirq_done);
blk_queue_prep_rq(md->queue, dm_prep_fn);
blk_queue_lld_busy(md->queue, dm_lld_busy);
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
+ dm_rq_prepare_flush);
md->disk = alloc_disk(1);
if (!md->disk)
@@ -1797,6 +1910,7 @@ static struct mapped_device *alloc_dev(int minor)
atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
+ INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
init_waitqueue_head(&md->eventq);
md->disk->major = _major;
@@ -1921,9 +2035,13 @@ static void __set_size(struct mapped_device *md, sector_t size)
mutex_unlock(&md->bdev->bd_inode->i_mutex);
}
-static int __bind(struct mapped_device *md, struct dm_table *t,
- struct queue_limits *limits)
+/*
+ * Returns old map, which caller must destroy.
+ */
+static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ struct queue_limits *limits)
{
+ struct dm_table *old_map;
struct request_queue *q = md->queue;
sector_t size;
unsigned long flags;
@@ -1938,11 +2056,6 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
__set_size(md, size);
- if (!size) {
- dm_table_destroy(t);
- return 0;
- }
-
dm_table_event_callback(t, event_callback, md);
/*
@@ -1958,26 +2071,31 @@ static int __bind(struct mapped_device *md, struct dm_table *t,
__bind_mempools(md, t);
write_lock_irqsave(&md->map_lock, flags);
+ old_map = md->map;
md->map = t;
dm_table_set_restrictions(t, q, limits);
write_unlock_irqrestore(&md->map_lock, flags);
- return 0;
+ return old_map;
}
-static void __unbind(struct mapped_device *md)
+/*
+ * Returns unbound table for the caller to free.
+ */
+static struct dm_table *__unbind(struct mapped_device *md)
{
struct dm_table *map = md->map;
unsigned long flags;
if (!map)
- return;
+ return NULL;
dm_table_event_callback(map, NULL, NULL);
write_lock_irqsave(&md->map_lock, flags);
md->map = NULL;
write_unlock_irqrestore(&md->map_lock, flags);
- dm_table_destroy(map);
+
+ return map;
}
/*
@@ -2059,18 +2177,18 @@ void dm_put(struct mapped_device *md)
BUG_ON(test_bit(DMF_FREEING, &md->flags));
if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
idr_replace(&_minor_idr, MINOR_ALLOCED,
MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- if (!dm_suspended(md)) {
+ if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
dm_sysfs_exit(md);
dm_table_put(map);
- __unbind(md);
+ dm_table_destroy(__unbind(md));
free_dev(md);
}
}
@@ -2080,8 +2198,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
{
int r = 0;
DECLARE_WAITQUEUE(wait, current);
- struct request_queue *q = md->queue;
- unsigned long flags;
dm_unplug_all(md->queue);
@@ -2091,15 +2207,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
set_current_state(interruptible);
smp_mb();
- if (dm_request_based(md)) {
- spin_lock_irqsave(q->queue_lock, flags);
- if (!queue_in_flight(q) && blk_queue_stopped(q)) {
- spin_unlock_irqrestore(q->queue_lock, flags);
- break;
- }
- spin_unlock_irqrestore(q->queue_lock, flags);
- } else if (!atomic_read(&md->pending[0]) &&
- !atomic_read(&md->pending[1]))
+ if (!md_in_flight(md))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
@@ -2194,98 +2302,106 @@ static void dm_queue_flush(struct mapped_device *md)
queue_work(md->wq, &md->work);
}
-/*
- * Swap in a new table (destroying old one).
- */
-int dm_swap_table(struct mapped_device *md, struct dm_table *table)
+static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
{
- struct queue_limits limits;
- int r = -EINVAL;
+ struct dm_rq_target_io *tio = clone->end_io_data;
- mutex_lock(&md->suspend_lock);
+ tio->info.flush_request = flush_nr;
+}
- /* device must be suspended */
- if (!dm_suspended(md))
- goto out;
+/* Issue barrier requests to targets and wait for their completion. */
+static int dm_rq_barrier(struct mapped_device *md)
+{
+ int i, j;
+ struct dm_table *map = dm_get_live_table(md);
+ unsigned num_targets = dm_table_get_num_targets(map);
+ struct dm_target *ti;
+ struct request *clone;
- r = dm_calculate_queue_limits(table, &limits);
- if (r)
- goto out;
+ md->barrier_error = 0;
- /* cannot change the device type, once a table is bound */
- if (md->map &&
- (dm_table_get_type(md->map) != dm_table_get_type(table))) {
- DMWARN("can't change the device type after a table is bound");
- goto out;
+ for (i = 0; i < num_targets; i++) {
+ ti = dm_table_get_target(map, i);
+ for (j = 0; j < ti->num_flush_requests; j++) {
+ clone = clone_rq(md->flush_request, md, GFP_NOIO);
+ dm_rq_set_flush_nr(clone, j);
+ atomic_inc(&md->pending[rq_data_dir(clone)]);
+ map_request(ti, clone, md);
+ }
}
- __unbind(md);
- r = __bind(md, table, &limits);
-
-out:
- mutex_unlock(&md->suspend_lock);
- return r;
-}
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
+ dm_table_put(map);
-static void dm_rq_invalidate_suspend_marker(struct mapped_device *md)
-{
- md->suspend_rq.special = (void *)0x1;
+ return md->barrier_error;
}
-static void dm_rq_abort_suspend(struct mapped_device *md, int noflush)
+static void dm_rq_barrier_work(struct work_struct *work)
{
+ int error;
+ struct mapped_device *md = container_of(work, struct mapped_device,
+ barrier_work);
struct request_queue *q = md->queue;
+ struct request *rq;
unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
- if (!noflush)
- dm_rq_invalidate_suspend_marker(md);
- __start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
+ /*
+ * Hold the md reference here and leave it at the last part so that
+ * the md can't be deleted by device opener when the barrier request
+ * completes.
+ */
+ dm_get(md);
-static void dm_rq_start_suspend(struct mapped_device *md, int noflush)
-{
- struct request *rq = &md->suspend_rq;
- struct request_queue *q = md->queue;
+ error = dm_rq_barrier(md);
- if (noflush)
- stop_queue(q);
- else {
- blk_rq_init(q, rq);
- blk_insert_request(q, rq, 0, NULL);
- }
+ rq = md->flush_request;
+ md->flush_request = NULL;
+
+ if (error == DM_ENDIO_REQUEUE) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, rq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ } else
+ blk_end_request_all(rq, error);
+
+ blk_run_queue(q);
+
+ dm_put(md);
}
-static int dm_rq_suspend_available(struct mapped_device *md, int noflush)
+/*
+ * Swap in a new table, returning the old one for the caller to destroy.
+ */
+struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
- int r = 1;
- struct request *rq = &md->suspend_rq;
- struct request_queue *q = md->queue;
- unsigned long flags;
+ struct dm_table *map = ERR_PTR(-EINVAL);
+ struct queue_limits limits;
+ int r;
- if (noflush)
- return r;
+ mutex_lock(&md->suspend_lock);
- /* The marker must be protected by queue lock if it is in use */
- spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(rq->ref_count)) {
- /*
- * This can happen, when the previous flush suspend was
- * interrupted, the marker is still in the queue and
- * this flush suspend has been invoked, because we don't
- * remove the marker at the time of suspend interruption.
- * We have only one marker per mapped_device, so we can't
- * start another flush suspend while it is in use.
- */
- BUG_ON(!rq->special); /* The marker should be invalidated */
- DMWARN("Invalidating the previous flush suspend is still in"
- " progress. Please retry later.");
- r = 0;
+ /* device must be suspended */
+ if (!dm_suspended_md(md))
+ goto out;
+
+ r = dm_calculate_queue_limits(table, &limits);
+ if (r) {
+ map = ERR_PTR(r);
+ goto out;
}
- spin_unlock_irqrestore(q->queue_lock, flags);
- return r;
+ /* cannot change the device type, once a table is bound */
+ if (md->map &&
+ (dm_table_get_type(md->map) != dm_table_get_type(table))) {
+ DMWARN("can't change the device type after a table is bound");
+ goto out;
+ }
+
+ map = __bind(md, table, &limits);
+
+out:
+ mutex_unlock(&md->suspend_lock);
+ return map;
}
/*
@@ -2330,49 +2446,11 @@ static void unlock_fs(struct mapped_device *md)
/*
* Suspend mechanism in request-based dm.
*
- * After the suspend starts, further incoming requests are kept in
- * the request_queue and deferred.
- * Remaining requests in the request_queue at the start of suspend are flushed
- * if it is flush suspend.
- * The suspend completes when the following conditions have been satisfied,
- * so wait for it:
- * 1. q->in_flight is 0 (which means no in_flight request)
- * 2. queue has been stopped (which means no request dispatching)
- *
+ * 1. Flush all I/Os by lock_fs() if needed.
+ * 2. Stop dispatching any I/O by stopping the request_queue.
+ * 3. Wait for all in-flight I/Os to be completed or requeued.
*
- * Noflush suspend
- * ---------------
- * Noflush suspend doesn't need to dispatch remaining requests.
- * So stop the queue immediately. Then, wait for all in_flight requests
- * to be completed or requeued.
- *
- * To abort noflush suspend, start the queue.
- *
- *
- * Flush suspend
- * -------------
- * Flush suspend needs to dispatch remaining requests. So stop the queue
- * after the remaining requests are completed. (Requeued request must be also
- * re-dispatched and completed. Until then, we can't stop the queue.)
- *
- * During flushing the remaining requests, further incoming requests are also
- * inserted to the same queue. To distinguish which requests are to be
- * flushed, we insert a marker request to the queue at the time of starting
- * flush suspend, like a barrier.
- * The dispatching is blocked when the marker is found on the top of the queue.
- * And the queue is stopped when all in_flight requests are completed, since
- * that means the remaining requests are completely flushed.
- * Then, the marker is removed from the queue.
- *
- * To abort flush suspend, we also need to take care of the marker, not only
- * starting the queue.
- * We don't remove the marker forcibly from the queue since it's against
- * the block-layer manner. Instead, we put a invalidated mark on the marker.
- * When the invalidated marker is found on the top of the queue, it is
- * immediately removed from the queue, so it doesn't block dispatching.
- * Because we have only one marker per mapped_device, we can't start another
- * flush suspend until the invalidated marker is removed from the queue.
- * So fail and return with -EBUSY in such a case.
+ * To abort suspend, start the request_queue.
*/
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
@@ -2383,17 +2461,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
mutex_lock(&md->suspend_lock);
- if (dm_suspended(md)) {
+ if (dm_suspended_md(md)) {
r = -EINVAL;
goto out_unlock;
}
- if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) {
- r = -EBUSY;
- goto out_unlock;
- }
-
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
@@ -2406,8 +2479,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
dm_table_presuspend_targets(map);
/*
- * Flush I/O to the device. noflush supersedes do_lockfs,
- * because lock_fs() needs to flush I/Os.
+ * Flush I/O to the device.
+ * Any I/O submitted after lock_fs() may not be flushed.
+ * noflush takes precedence over do_lockfs.
+ * (lock_fs() flushes I/Os and waits for them to complete.)
*/
if (!noflush && do_lockfs) {
r = lock_fs(md);
@@ -2436,10 +2511,15 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
up_write(&md->io_lock);
- flush_workqueue(md->wq);
-
+ /*
+ * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
+ * can be kicked until md->queue is stopped. So stop md->queue before
+ * flushing md->wq.
+ */
if (dm_request_based(md))
- dm_rq_start_suspend(md, noflush);
+ stop_queue(md->queue);
+
+ flush_workqueue(md->wq);
/*
* At this point no more requests are entering target request routines.
@@ -2458,7 +2538,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
dm_queue_flush(md);
if (dm_request_based(md))
- dm_rq_abort_suspend(md, noflush);
+ start_queue(md->queue);
unlock_fs(md);
goto out; /* pushback list is already flushed, so skip flush */
@@ -2470,10 +2550,10 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
* requests are being added to md->deferred list.
*/
- dm_table_postsuspend_targets(map);
-
set_bit(DMF_SUSPENDED, &md->flags);
+ dm_table_postsuspend_targets(map);
+
out:
dm_table_put(map);
@@ -2488,10 +2568,10 @@ int dm_resume(struct mapped_device *md)
struct dm_table *map = NULL;
mutex_lock(&md->suspend_lock);
- if (!dm_suspended(md))
+ if (!dm_suspended_md(md))
goto out;
- map = dm_get_table(md);
+ map = dm_get_live_table(md);
if (!map || !dm_table_get_size(map))
goto out;
@@ -2592,18 +2672,29 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
return NULL;
if (test_bit(DMF_FREEING, &md->flags) ||
- test_bit(DMF_DELETING, &md->flags))
+ dm_deleting_md(md))
return NULL;
dm_get(md);
return md;
}
-int dm_suspended(struct mapped_device *md)
+int dm_suspended_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED, &md->flags);
}
+int dm_suspended(struct dm_target *ti)
+{
+ struct mapped_device *md = dm_table_get_md(ti->table);
+ int r = dm_suspended_md(md);
+
+ dm_put(md);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_suspended);
+
int dm_noflush_suspending(struct dm_target *ti)
{
struct mapped_device *md = dm_table_get_md(ti->table);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a7663eba17e..8dadaa5bc39 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -89,6 +89,16 @@ int dm_target_iterate(void (*iter_func)(struct target_type *tt,
int dm_split_args(int *argc, char ***argvp, char *input);
/*
+ * Is this mapped_device being deleted?
+ */
+int dm_deleting_md(struct mapped_device *md);
+
+/*
+ * Is this mapped_device suspended?
+ */
+int dm_suspended_md(struct mapped_device *md);
+
+/*
* The device-mapper can be driven through one of two interfaces;
* ioctl or filesystem, depending which patch you have applied.
*/
@@ -118,6 +128,9 @@ int dm_lock_for_deletion(struct mapped_device *md);
void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie);
+int dm_io_init(void);
+void dm_io_exit(void);
+
int dm_kcopyd_init(void);
void dm_kcopyd_exit(void);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 87d88dbb667..713acd02ab3 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -360,6 +360,7 @@ static void raid_exit(void)
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fault injection personality for MD");
MODULE_ALIAS("md-personality-10"); /* faulty */
MODULE_ALIAS("md-faulty");
MODULE_ALIAS("md-level--5");
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 1ceceb334d5..00435bd2069 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -292,7 +292,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
@@ -383,6 +383,7 @@ static void linear_exit (void)
module_init(linear_init);
module_exit(linear_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Linear device concatenation personality for MD");
MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
MODULE_ALIAS("md-linear");
MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5f154ef1e4b..f4f5f82f9f5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -39,11 +39,13 @@
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/poll.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/file.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/raid/md_p.h>
#include <linux/raid/md_u.h>
@@ -68,6 +70,12 @@ static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
/*
+ * Default number of read corrections we'll attempt on an rdev
+ * before ejecting it from the array. We divide the read error
+ * count by 2 for every hour elapsed between read errors.
+ */
+#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
+/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that
@@ -213,12 +221,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
return 0;
}
rcu_read_lock();
- if (mddev->suspended) {
+ if (mddev->suspended || mddev->barrier) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
- if (!mddev->suspended)
+ if (!mddev->suspended && !mddev->barrier)
break;
rcu_read_unlock();
schedule();
@@ -260,10 +268,110 @@ static void mddev_resume(mddev_t *mddev)
int mddev_congested(mddev_t *mddev, int bits)
{
+ if (mddev->barrier)
+ return 1;
return mddev->suspended;
}
EXPORT_SYMBOL(mddev_congested);
+/*
+ * Generic barrier handling for md
+ */
+
+#define POST_REQUEST_BARRIER ((void*)1)
+
+static void md_end_barrier(struct bio *bio, int err)
+{
+ mdk_rdev_t *rdev = bio->bi_private;
+ mddev_t *mddev = rdev->mddev;
+ if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER)
+ set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
+
+ rdev_dec_pending(rdev, mddev);
+
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ if (mddev->barrier == POST_REQUEST_BARRIER) {
+ /* This was a post-request barrier */
+ mddev->barrier = NULL;
+ wake_up(&mddev->sb_wait);
+ } else
+ /* The pre-request barrier has finished */
+ schedule_work(&mddev->barrier_work);
+ }
+ bio_put(bio);
+}
+
+static void submit_barriers(mddev_t *mddev)
+{
+ mdk_rdev_t *rdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags)) {
+ /* Take two references, one is dropped
+ * when request finishes, one after
+ * we reclaim rcu_read_lock
+ */
+ struct bio *bi;
+ atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ bi = bio_alloc(GFP_KERNEL, 0);
+ bi->bi_end_io = md_end_barrier;
+ bi->bi_private = rdev;
+ bi->bi_bdev = rdev->bdev;
+ atomic_inc(&mddev->flush_pending);
+ submit_bio(WRITE_BARRIER, bi);
+ rcu_read_lock();
+ rdev_dec_pending(rdev, mddev);
+ }
+ rcu_read_unlock();
+}
+
+static void md_submit_barrier(struct work_struct *ws)
+{
+ mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
+ struct bio *bio = mddev->barrier;
+
+ atomic_set(&mddev->flush_pending, 1);
+
+ if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
+ bio_endio(bio, -EOPNOTSUPP);
+ else if (bio->bi_size == 0)
+ /* an empty barrier - all done */
+ bio_endio(bio, 0);
+ else {
+ bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ if (mddev->pers->make_request(mddev->queue, bio))
+ generic_make_request(bio);
+ mddev->barrier = POST_REQUEST_BARRIER;
+ submit_barriers(mddev);
+ }
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ mddev->barrier = NULL;
+ wake_up(&mddev->sb_wait);
+ }
+}
+
+void md_barrier_request(mddev_t *mddev, struct bio *bio)
+{
+ spin_lock_irq(&mddev->write_lock);
+ wait_event_lock_irq(mddev->sb_wait,
+ !mddev->barrier,
+ mddev->write_lock, /*nothing*/);
+ mddev->barrier = bio;
+ spin_unlock_irq(&mddev->write_lock);
+
+ atomic_set(&mddev->flush_pending, 1);
+ INIT_WORK(&mddev->barrier_work, md_submit_barrier);
+
+ submit_barriers(mddev);
+
+ if (atomic_dec_and_test(&mddev->flush_pending))
+ schedule_work(&mddev->barrier_work);
+}
+EXPORT_SYMBOL(md_barrier_request);
static inline mddev_t *mddev_get(mddev_t *mddev)
{
@@ -363,6 +471,7 @@ static mddev_t * mddev_find(dev_t unit)
mutex_init(&new->open_mutex);
mutex_init(&new->reconfig_mutex);
+ mutex_init(&new->bitmap_info.mutex);
INIT_LIST_HEAD(&new->disks);
INIT_LIST_HEAD(&new->all_mddevs);
init_timer(&new->safemode_timer);
@@ -370,6 +479,7 @@ static mddev_t * mddev_find(dev_t unit)
atomic_set(&new->openers, 0);
atomic_set(&new->active_io, 0);
spin_lock_init(&new->write_lock);
+ atomic_set(&new->flush_pending, 0);
init_waitqueue_head(&new->sb_wait);
init_waitqueue_head(&new->recovery_wait);
new->reshape_position = MaxSector;
@@ -748,7 +858,7 @@ struct super_type {
*/
int md_check_no_bitmap(mddev_t *mddev)
{
- if (!mddev->bitmap_file && !mddev->bitmap_offset)
+ if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
@@ -876,8 +986,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->raid_disks = sb->raid_disks;
mddev->dev_sectors = sb->size * 2;
mddev->events = ev1;
- mddev->bitmap_offset = 0;
- mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
if (mddev->minor_version >= 91) {
mddev->reshape_position = sb->reshape_position;
@@ -911,8 +1021,9 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->max_disks = MD_SB_DISKS;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
- mddev->bitmap_file == NULL)
- mddev->bitmap_offset = mddev->default_bitmap_offset;
+ mddev->bitmap_info.file == NULL)
+ mddev->bitmap_info.offset =
+ mddev->bitmap_info.default_offset;
} else if (mddev->pers == NULL) {
/* Insist on good event counter while assembling */
@@ -1029,7 +1140,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_sectors << 9;
- if (mddev->bitmap && mddev->bitmap_file == NULL)
+ if (mddev->bitmap && mddev->bitmap_info.file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
@@ -1107,7 +1218,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
{
if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
return 0; /* component must fit device */
- if (rdev->mddev->bitmap_offset)
+ if (rdev->mddev->bitmap_info.offset)
return 0; /* can't move bitmap */
rdev->sb_start = calc_dev_sboffset(rdev->bdev);
if (!num_sectors || num_sectors > rdev->sb_start)
@@ -1286,8 +1397,8 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
mddev->events = ev1;
- mddev->bitmap_offset = 0;
- mddev->default_bitmap_offset = 1024 >> 9;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = 1024 >> 9;
mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
@@ -1295,8 +1406,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->max_disks = (4096-256)/2;
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
- mddev->bitmap_file == NULL )
- mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
+ mddev->bitmap_info.file == NULL )
+ mddev->bitmap_info.offset =
+ (__s32)le32_to_cpu(sb->bitmap_offset);
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
@@ -1390,19 +1502,17 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
- if (mddev->bitmap && mddev->bitmap_file == NULL) {
- sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
+ if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
+ sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
}
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags)) {
- if (rdev->recovery_offset > 0) {
- sb->feature_map |=
- cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
- sb->recovery_offset =
- cpu_to_le64(rdev->recovery_offset);
- }
+ sb->feature_map |=
+ cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
+ sb->recovery_offset =
+ cpu_to_le64(rdev->recovery_offset);
}
if (mddev->reshape_position != MaxSector) {
@@ -1436,7 +1546,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
else if (test_bit(In_sync, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
- else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
+ else if (rdev2->raid_disk >= 0)
sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
else
sb->dev_roles[i] = cpu_to_le16(0xffff);
@@ -1458,7 +1568,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
- } else if (rdev->mddev->bitmap_offset) {
+ } else if (rdev->mddev->bitmap_info.offset) {
/* minor version 0 with bitmap we can't move */
return 0;
} else {
@@ -1826,15 +1936,11 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
uuid = sb->set_uuid;
printk(KERN_INFO
- "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x"
- ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n"
+ "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
"md: Name: \"%s\" CT:%llu\n",
le32_to_cpu(sb->major_version),
le32_to_cpu(sb->feature_map),
- uuid[0], uuid[1], uuid[2], uuid[3],
- uuid[4], uuid[5], uuid[6], uuid[7],
- uuid[8], uuid[9], uuid[10], uuid[11],
- uuid[12], uuid[13], uuid[14], uuid[15],
+ uuid,
sb->set_name,
(unsigned long long)le64_to_cpu(sb->ctime)
& MD_SUPERBLOCK_1_TIME_SEC_MASK);
@@ -1843,8 +1949,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
printk(KERN_INFO
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
" RO:%llu\n"
- "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x"
- ":%02x%02x%02x%02x%02x%02x\n"
+ "md: Dev:%08x UUID: %pU\n"
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
"md: (MaxDev:%u) \n",
le32_to_cpu(sb->level),
@@ -1857,10 +1962,7 @@ static void print_sb_1(struct mdp_superblock_1 *sb)
(unsigned long long)le64_to_cpu(sb->super_offset),
(unsigned long long)le64_to_cpu(sb->recovery_offset),
le32_to_cpu(sb->dev_number),
- uuid[0], uuid[1], uuid[2], uuid[3],
- uuid[4], uuid[5], uuid[6], uuid[7],
- uuid[8], uuid[9], uuid[10], uuid[11],
- uuid[12], uuid[13], uuid[14], uuid[15],
+ uuid,
sb->devflags,
(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
(unsigned long long)le64_to_cpu(sb->events),
@@ -2442,12 +2544,49 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
+
+static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page)
+{
+ unsigned long long recovery_start = rdev->recovery_offset;
+
+ if (test_bit(In_sync, &rdev->flags) ||
+ recovery_start == MaxSector)
+ return sprintf(page, "none\n");
+
+ return sprintf(page, "%llu\n", recovery_start);
+}
+
+static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ unsigned long long recovery_start;
+
+ if (cmd_match(buf, "none"))
+ recovery_start = MaxSector;
+ else if (strict_strtoull(buf, 10, &recovery_start))
+ return -EINVAL;
+
+ if (rdev->mddev->pers &&
+ rdev->raid_disk >= 0)
+ return -EBUSY;
+
+ rdev->recovery_offset = recovery_start;
+ if (recovery_start == MaxSector)
+ set_bit(In_sync, &rdev->flags);
+ else
+ clear_bit(In_sync, &rdev->flags);
+ return len;
+}
+
+static struct rdev_sysfs_entry rdev_recovery_start =
+__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
+
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
&rdev_slot.attr,
&rdev_offset.attr,
&rdev_size.attr,
+ &rdev_recovery_start.attr,
NULL,
};
static ssize_t
@@ -2549,6 +2688,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
rdev->flags = 0;
rdev->data_offset = 0;
rdev->sb_events = 0;
+ rdev->last_read_error.tv_sec = 0;
+ rdev->last_read_error.tv_nsec = 0;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
@@ -2659,6 +2800,47 @@ static void analyze_sbs(mddev_t * mddev)
}
}
+/* Read a fixed-point number.
+ * Numbers in sysfs attributes should be in "standard" units where
+ * possible, so time should be in seconds.
+ * However we internally use a a much smaller unit such as
+ * milliseconds or jiffies.
+ * This function takes a decimal number with a possible fractional
+ * component, and produces an integer which is the result of
+ * multiplying that number by 10^'scale'.
+ * all without any floating-point arithmetic.
+ */
+int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
+{
+ unsigned long result = 0;
+ long decimals = -1;
+ while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
+ if (*cp == '.')
+ decimals = 0;
+ else if (decimals < scale) {
+ unsigned int value;
+ value = *cp - '0';
+ result = result * 10 + value;
+ if (decimals >= 0)
+ decimals++;
+ }
+ cp++;
+ }
+ if (*cp == '\n')
+ cp++;
+ if (*cp)
+ return -EINVAL;
+ if (decimals < 0)
+ decimals = 0;
+ while (decimals < scale) {
+ result *= 10;
+ decimals ++;
+ }
+ *res = result;
+ return 0;
+}
+
+
static void md_safemode_timeout(unsigned long data);
static ssize_t
@@ -2670,31 +2852,10 @@ safe_delay_show(mddev_t *mddev, char *page)
static ssize_t
safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
{
- int scale=1;
- int dot=0;
- int i;
unsigned long msec;
- char buf[30];
- /* remove a period, and count digits after it */
- if (len >= sizeof(buf))
- return -EINVAL;
- strlcpy(buf, cbuf, sizeof(buf));
- for (i=0; i<len; i++) {
- if (dot) {
- if (isdigit(buf[i])) {
- buf[i-1] = buf[i];
- scale *= 10;
- }
- buf[i] = 0;
- } else if (buf[i] == '.') {
- dot=1;
- buf[i] = 0;
- }
- }
- if (strict_strtoul(buf, 10, &msec) < 0)
+ if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
return -EINVAL;
- msec = (msec * 1000) / scale;
if (msec == 0)
mddev->safemode_delay = 0;
else {
@@ -2970,7 +3131,9 @@ resync_start_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers)
return -EBUSY;
- if (!*buf || (*e && *e != '\n'))
+ if (cmd_match(buf, "none"))
+ n = MaxSector;
+ else if (!*buf || (*e && *e != '\n'))
return -EINVAL;
mddev->recovery_cp = n;
@@ -3166,6 +3329,29 @@ static struct md_sysfs_entry md_array_state =
__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
static ssize_t
+max_corrected_read_errors_show(mddev_t *mddev, char *page) {
+ return sprintf(page, "%d\n",
+ atomic_read(&mddev->max_corr_read_errors));
+}
+
+static ssize_t
+max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+
+ if (*buf && (*e == 0 || *e == '\n')) {
+ atomic_set(&mddev->max_corr_read_errors, n);
+ return len;
+ }
+ return -EINVAL;
+}
+
+static struct md_sysfs_entry max_corr_read_errors =
+__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
+ max_corrected_read_errors_store);
+
+static ssize_t
null_show(mddev_t *mddev, char *page)
{
return -EINVAL;
@@ -3246,8 +3432,7 @@ bitmap_store(mddev_t *mddev, const char *buf, size_t len)
}
if (*end && !isspace(*end)) break;
bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
- buf = end;
- while (isspace(*buf)) buf++;
+ buf = skip_spaces(end);
}
bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
out:
@@ -3790,6 +3975,7 @@ static struct attribute *md_default_attrs[] = {
&md_array_state.attr,
&md_reshape_position.attr,
&md_array_size.attr,
+ &max_corr_read_errors.attr,
NULL,
};
@@ -3894,6 +4080,7 @@ static void mddev_delayed_delete(struct work_struct *ws)
mddev->sysfs_action = NULL;
mddev->private = NULL;
}
+ sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@@ -3985,6 +4172,8 @@ static int md_alloc(dev_t dev, char *name)
disk->disk_name);
error = 0;
}
+ if (sysfs_create_group(&mddev->kobj, &md_bitmap_group))
+ printk(KERN_DEBUG "pointless warning\n");
abort:
mutex_unlock(&disks_mutex);
if (!error) {
@@ -4206,6 +4395,8 @@ static int do_md_run(mddev_t * mddev)
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
+ atomic_set(&mddev->max_corr_read_errors,
+ MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
mddev->safemode_timer.function = md_safemode_timeout;
mddev->safemode_timer.data = (unsigned long) mddev;
@@ -4310,7 +4501,7 @@ static int deny_bitmap_write_access(struct file * file)
return 0;
}
-static void restore_bitmap_write_access(struct file *file)
+void restore_bitmap_write_access(struct file *file)
{
struct inode *inode = file->f_mapping->host;
@@ -4405,12 +4596,12 @@ out:
printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
bitmap_destroy(mddev);
- if (mddev->bitmap_file) {
- restore_bitmap_write_access(mddev->bitmap_file);
- fput(mddev->bitmap_file);
- mddev->bitmap_file = NULL;
+ if (mddev->bitmap_info.file) {
+ restore_bitmap_write_access(mddev->bitmap_info.file);
+ fput(mddev->bitmap_info.file);
+ mddev->bitmap_info.file = NULL;
}
- mddev->bitmap_offset = 0;
+ mddev->bitmap_info.offset = 0;
/* make sure all md_delayed_delete calls have finished */
flush_scheduled_work();
@@ -4451,6 +4642,11 @@ out:
mddev->degraded = 0;
mddev->barriers_work = 0;
mddev->safemode = 0;
+ mddev->bitmap_info.offset = 0;
+ mddev->bitmap_info.default_offset = 0;
+ mddev->bitmap_info.chunksize = 0;
+ mddev->bitmap_info.daemon_sleep = 0;
+ mddev->bitmap_info.max_write_behind = 0;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
@@ -4636,7 +4832,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
info.state = 0;
if (mddev->in_sync)
info.state = (1<<MD_SB_CLEAN);
- if (mddev->bitmap && mddev->bitmap_offset)
+ if (mddev->bitmap && mddev->bitmap_info.offset)
info.state = (1<<MD_SB_BITMAP_PRESENT);
info.active_disks = insync;
info.working_disks = working;
@@ -4994,23 +5190,23 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
if (fd >= 0) {
if (mddev->bitmap)
return -EEXIST; /* cannot add when bitmap is present */
- mddev->bitmap_file = fget(fd);
+ mddev->bitmap_info.file = fget(fd);
- if (mddev->bitmap_file == NULL) {
+ if (mddev->bitmap_info.file == NULL) {
printk(KERN_ERR "%s: error: failed to get bitmap file\n",
mdname(mddev));
return -EBADF;
}
- err = deny_bitmap_write_access(mddev->bitmap_file);
+ err = deny_bitmap_write_access(mddev->bitmap_info.file);
if (err) {
printk(KERN_ERR "%s: error: bitmap file is already in use\n",
mdname(mddev));
- fput(mddev->bitmap_file);
- mddev->bitmap_file = NULL;
+ fput(mddev->bitmap_info.file);
+ mddev->bitmap_info.file = NULL;
return err;
}
- mddev->bitmap_offset = 0; /* file overrides offset */
+ mddev->bitmap_info.offset = 0; /* file overrides offset */
} else if (mddev->bitmap == NULL)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
@@ -5025,11 +5221,11 @@ static int set_bitmap_file(mddev_t *mddev, int fd)
mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
- if (mddev->bitmap_file) {
- restore_bitmap_write_access(mddev->bitmap_file);
- fput(mddev->bitmap_file);
+ if (mddev->bitmap_info.file) {
+ restore_bitmap_write_access(mddev->bitmap_info.file);
+ fput(mddev->bitmap_info.file);
}
- mddev->bitmap_file = NULL;
+ mddev->bitmap_info.file = NULL;
}
return err;
@@ -5096,8 +5292,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
- mddev->bitmap_offset = 0;
+ mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
+ mddev->bitmap_info.offset = 0;
mddev->reshape_position = MaxSector;
@@ -5197,7 +5393,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
int state = 0;
/* calculate expected state,ignoring low bits */
- if (mddev->bitmap && mddev->bitmap_offset)
+ if (mddev->bitmap && mddev->bitmap_info.offset)
state |= (1 << MD_SB_BITMAP_PRESENT);
if (mddev->major_version != info->major_version ||
@@ -5256,9 +5452,10 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
/* add the bitmap */
if (mddev->bitmap)
return -EEXIST;
- if (mddev->default_bitmap_offset == 0)
+ if (mddev->bitmap_info.default_offset == 0)
return -EINVAL;
- mddev->bitmap_offset = mddev->default_bitmap_offset;
+ mddev->bitmap_info.offset =
+ mddev->bitmap_info.default_offset;
mddev->pers->quiesce(mddev, 1);
rv = bitmap_create(mddev);
if (rv)
@@ -5273,7 +5470,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
- mddev->bitmap_offset = 0;
+ mddev->bitmap_info.offset = 0;
}
}
md_update_sb(mddev, 1);
@@ -5524,6 +5721,25 @@ done:
abort:
return err;
}
+#ifdef CONFIG_COMPAT
+static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case HOT_REMOVE_DISK:
+ case HOT_ADD_DISK:
+ case SET_DISK_FAULTY:
+ case SET_BITMAP_FILE:
+ /* These take in integer arg, do not convert */
+ break;
+ default:
+ arg = (unsigned long)compat_ptr(arg);
+ break;
+ }
+
+ return md_ioctl(bdev, mode, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
static int md_open(struct block_device *bdev, fmode_t mode)
{
@@ -5589,6 +5805,9 @@ static const struct block_device_operations md_fops =
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = md_compat_ioctl,
+#endif
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
@@ -5982,14 +6201,14 @@ static int md_seq_show(struct seq_file *seq, void *v)
unsigned long chunk_kb;
unsigned long flags;
spin_lock_irqsave(&bitmap->lock, flags);
- chunk_kb = bitmap->chunksize >> 10;
+ chunk_kb = mddev->bitmap_info.chunksize >> 10;
seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
"%lu%s chunk",
bitmap->pages - bitmap->missing_pages,
bitmap->pages,
(bitmap->pages - bitmap->missing_pages)
<< (PAGE_SHIFT - 10),
- chunk_kb ? chunk_kb : bitmap->chunksize,
+ chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize,
chunk_kb ? "KB" : "B");
if (bitmap->file) {
seq_printf(seq, ", file: ");
@@ -6338,12 +6557,14 @@ void md_do_sync(mddev_t *mddev)
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
- list_for_each_entry(rdev, &mddev->disks, same_set)
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < j)
j = rdev->recovery_offset;
+ rcu_read_unlock();
}
printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
@@ -6380,6 +6601,7 @@ void md_do_sync(mddev_t *mddev)
desc, mdname(mddev));
mddev->curr_resync = j;
}
+ mddev->curr_resync_completed = mddev->curr_resync;
while (j < max_sectors) {
sector_t sectors;
@@ -6512,22 +6734,29 @@ void md_do_sync(mddev_t *mddev)
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
- list_for_each_entry(rdev, &mddev->disks, same_set)
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
rdev->recovery_offset < mddev->curr_resync)
rdev->recovery_offset = mddev->curr_resync;
+ rcu_read_unlock();
}
}
set_bit(MD_CHANGE_DEVS, &mddev->flags);
skip:
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ /* We completed so min/max setting can be forgotten if used. */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ mddev->resync_min = mddev->curr_resync_completed;
mddev->curr_resync = 0;
- mddev->curr_resync_completed = 0;
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
- /* We completed so max setting can be forgotten. */
- mddev->resync_max = MaxSector;
+ mddev->curr_resync_completed = 0;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
@@ -6590,6 +6819,7 @@ static int remove_and_add_spares(mddev_t *mddev)
nm, mdname(mddev));
spares++;
md_new_event(mddev);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
} else
break;
}
@@ -6625,7 +6855,7 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->bitmap)
- bitmap_daemon_work(mddev->bitmap);
+ bitmap_daemon_work(mddev);
if (mddev->ro)
return;
@@ -6995,5 +7225,6 @@ EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_check_recovery);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MD RAID framework");
MODULE_ALIAS("md");
MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index f184b69ef33..8e4c75c00d4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -97,6 +97,9 @@ struct mdk_rdev_s
atomic_t read_errors; /* number of consecutive read errors that
* we have tried to ignore.
*/
+ struct timespec last_read_error; /* monotonic time since our
+ * last read error
+ */
atomic_t corrected_errors; /* number of corrected read errors,
* for reporting to userspace and storing
* in superblock.
@@ -280,17 +283,38 @@ struct mddev_s
unsigned int max_write_behind; /* 0 = sync */
struct bitmap *bitmap; /* the bitmap for the device */
- struct file *bitmap_file; /* the bitmap file */
- long bitmap_offset; /* offset from superblock of
- * start of bitmap. May be
- * negative, but not '0'
- */
- long default_bitmap_offset; /* this is the offset to use when
- * hot-adding a bitmap. It should
- * eventually be settable by sysfs.
- */
-
+ struct {
+ struct file *file; /* the bitmap file */
+ loff_t offset; /* offset from superblock of
+ * start of bitmap. May be
+ * negative, but not '0'
+ * For external metadata, offset
+ * from start of device.
+ */
+ loff_t default_offset; /* this is the offset to use when
+ * hot-adding a bitmap. It should
+ * eventually be settable by sysfs.
+ */
+ struct mutex mutex;
+ unsigned long chunksize;
+ unsigned long daemon_sleep; /* how many seconds between updates? */
+ unsigned long max_write_behind; /* write-behind mode */
+ int external;
+ } bitmap_info;
+
+ atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
+
+ /* Generic barrier handling.
+ * If there is a pending barrier request, all other
+ * writes are blocked while the devices are flushed.
+ * The last to finish a flush schedules a worker to
+ * submit the barrier request (without the barrier flag),
+ * then submit more flush requests.
+ */
+ struct bio *barrier;
+ atomic_t flush_pending;
+ struct work_struct barrier_work;
};
@@ -353,7 +377,7 @@ struct md_sysfs_entry {
ssize_t (*show)(mddev_t *, char *);
ssize_t (*store)(mddev_t *, const char *, size_t);
};
-
+extern struct attribute_group md_bitmap_group;
static inline char * mdname (mddev_t * mddev)
{
@@ -431,6 +455,7 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
extern int mddev_congested(mddev_t *mddev, int bits);
+extern void md_barrier_request(mddev_t *mddev, struct bio *bio);
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
@@ -443,6 +468,8 @@ extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(mddev_t *mddev);
extern int md_integrity_register(mddev_t *mddev);
-void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
+extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
+extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
+extern void restore_bitmap_write_access(struct file *file);
#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index ee7646f974a..32a662fc55c 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -145,7 +145,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
@@ -581,6 +581,7 @@ static void __exit multipath_exit (void)
module_init(multipath_init);
module_exit(multipath_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("simple multi-path personality for MD");
MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
MODULE_ALIAS("md-multipath");
MODULE_ALIAS("md-level--4");
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d3a4ce06015..77605cdceaf 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -453,7 +453,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
@@ -567,6 +567,7 @@ static void raid0_exit (void)
module_init(raid0_init);
module_exit(raid0_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
MODULE_ALIAS("md-personality-2"); /* RAID0 */
MODULE_ALIAS("md-raid0");
MODULE_ALIAS("md-level-0");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e07ce2e033a..859bd3ffe43 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -677,6 +677,7 @@ static void raise_barrier(conf_t *conf)
static void lower_barrier(conf_t *conf)
{
unsigned long flags;
+ BUG_ON(conf->barrier <= 0);
spin_lock_irqsave(&conf->resync_lock, flags);
conf->barrier--;
spin_unlock_irqrestore(&conf->resync_lock, flags);
@@ -801,6 +802,25 @@ static int make_request(struct request_queue *q, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
+ if (bio_data_dir(bio) == WRITE &&
+ bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
+ bio->bi_sector < mddev->suspend_hi) {
+ /* As the suspend_* range is controlled by
+ * userspace, we want an interruptible
+ * wait.
+ */
+ DEFINE_WAIT(w);
+ for (;;) {
+ flush_signals(current);
+ prepare_to_wait(&conf->wait_barrier,
+ &w, TASK_INTERRUPTIBLE);
+ if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
+ bio->bi_sector >= mddev->suspend_hi)
+ break;
+ schedule();
+ }
+ finish_wait(&conf->wait_barrier, &w);
+ }
if (unlikely(!mddev->barriers_work &&
bio_rw_flagged(bio, BIO_RW_BARRIER))) {
if (rw == WRITE)
@@ -923,7 +943,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
/* do behind I/O ? */
if (bitmap &&
- atomic_read(&bitmap->behind_writes) < bitmap->max_write_behind &&
+ (atomic_read(&bitmap->behind_writes)
+ < mddev->bitmap_info.max_write_behind) &&
(behind_pages = alloc_behind_pages(bio)) != NULL)
set_bit(R1BIO_BehindIO, &r1_bio->state);
@@ -1941,74 +1962,48 @@ static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return mddev->dev_sectors;
}
-static int run(mddev_t *mddev)
+static conf_t *setup_conf(mddev_t *mddev)
{
conf_t *conf;
- int i, j, disk_idx;
+ int i;
mirror_info_t *disk;
mdk_rdev_t *rdev;
+ int err = -ENOMEM;
- if (mddev->level != 1) {
- printk("raid1: %s: raid level not set to mirroring (%d)\n",
- mdname(mddev), mddev->level);
- goto out;
- }
- if (mddev->reshape_position != MaxSector) {
- printk("raid1: %s: reshape_position set but not supported\n",
- mdname(mddev));
- goto out;
- }
- /*
- * copy the already verified devices into our private RAID1
- * bookkeeping area. [whatever we allocate in run(),
- * should be freed in stop()]
- */
conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
- mddev->private = conf;
if (!conf)
- goto out_no_mem;
+ goto abort;
conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->mirrors)
- goto out_no_mem;
+ goto abort;
conf->tmppage = alloc_page(GFP_KERNEL);
if (!conf->tmppage)
- goto out_no_mem;
+ goto abort;
- conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
+ conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
if (!conf->poolinfo)
- goto out_no_mem;
- conf->poolinfo->mddev = NULL;
+ goto abort;
conf->poolinfo->raid_disks = mddev->raid_disks;
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free,
conf->poolinfo);
if (!conf->r1bio_pool)
- goto out_no_mem;
+ goto abort;
+
conf->poolinfo->mddev = mddev;
spin_lock_init(&conf->device_lock);
- mddev->queue->queue_lock = &conf->device_lock;
-
list_for_each_entry(rdev, &mddev->disks, same_set) {
- disk_idx = rdev->raid_disk;
+ int disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
continue;
disk = conf->mirrors + disk_idx;
disk->rdev = rdev;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must never risk
- * violating it, so limit ->max_sector to one PAGE, as
- * a one page request is never in violation.
- */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
- blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
}
@@ -2022,8 +2017,7 @@ static int run(mddev_t *mddev)
bio_list_init(&conf->pending_bio_list);
bio_list_init(&conf->flushing_bio_list);
-
- mddev->degraded = 0;
+ conf->last_used = -1;
for (i = 0; i < conf->raid_disks; i++) {
disk = conf->mirrors + i;
@@ -2031,38 +2025,97 @@ static int run(mddev_t *mddev)
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
- mddev->degraded++;
if (disk->rdev)
conf->fullsync = 1;
- }
+ } else if (conf->last_used < 0)
+ /*
+ * The first working device is used as a
+ * starting point to read balancing.
+ */
+ conf->last_used = i;
}
- if (mddev->degraded == conf->raid_disks) {
+
+ err = -EIO;
+ if (conf->last_used < 0) {
printk(KERN_ERR "raid1: no operational mirrors for %s\n",
- mdname(mddev));
- goto out_free_conf;
+ mdname(mddev));
+ goto abort;
}
- if (conf->raid_disks - mddev->degraded == 1)
- mddev->recovery_cp = MaxSector;
+ err = -ENOMEM;
+ conf->thread = md_register_thread(raid1d, mddev, NULL);
+ if (!conf->thread) {
+ printk(KERN_ERR
+ "raid1: couldn't allocate thread for %s\n",
+ mdname(mddev));
+ goto abort;
+ }
+
+ return conf;
+
+ abort:
+ if (conf) {
+ if (conf->r1bio_pool)
+ mempool_destroy(conf->r1bio_pool);
+ kfree(conf->mirrors);
+ safe_put_page(conf->tmppage);
+ kfree(conf->poolinfo);
+ kfree(conf);
+ }
+ return ERR_PTR(err);
+}
+static int run(mddev_t *mddev)
+{
+ conf_t *conf;
+ int i;
+ mdk_rdev_t *rdev;
+
+ if (mddev->level != 1) {
+ printk("raid1: %s: raid level not set to mirroring (%d)\n",
+ mdname(mddev), mddev->level);
+ return -EIO;
+ }
+ if (mddev->reshape_position != MaxSector) {
+ printk("raid1: %s: reshape_position set but not supported\n",
+ mdname(mddev));
+ return -EIO;
+ }
/*
- * find the first working one and use it as a starting point
- * to read balancing.
+ * copy the already verified devices into our private RAID1
+ * bookkeeping area. [whatever we allocate in run(),
+ * should be freed in stop()]
*/
- for (j = 0; j < conf->raid_disks &&
- (!conf->mirrors[j].rdev ||
- !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
- /* nothing */;
- conf->last_used = j;
+ if (mddev->private == NULL)
+ conf = setup_conf(mddev);
+ else
+ conf = mddev->private;
+ if (IS_ERR(conf))
+ return PTR_ERR(conf);
- mddev->thread = md_register_thread(raid1d, mddev, NULL);
- if (!mddev->thread) {
- printk(KERN_ERR
- "raid1: couldn't allocate thread for %s\n",
- mdname(mddev));
- goto out_free_conf;
+ mddev->queue->queue_lock = &conf->device_lock;
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must never risk
+ * violating it, so limit ->max_sector to one PAGE, as
+ * a one page request is never in violation.
+ */
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
}
+ mddev->degraded = 0;
+ for (i=0; i < conf->raid_disks; i++)
+ if (conf->mirrors[i].rdev == NULL ||
+ !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
+ test_bit(Faulty, &conf->mirrors[i].rdev->flags))
+ mddev->degraded++;
+
+ if (conf->raid_disks - mddev->degraded == 1)
+ mddev->recovery_cp = MaxSector;
+
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "raid1: %s is not clean"
" -- starting background reconstruction\n",
@@ -2071,9 +2124,14 @@ static int run(mddev_t *mddev)
"raid1: raid set %s active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
+
/*
* Ok, everything is just fine now
*/
+ mddev->thread = conf->thread;
+ conf->thread = NULL;
+ mddev->private = conf;
+
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
mddev->queue->unplug_fn = raid1_unplug;
@@ -2081,23 +2139,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev);
return 0;
-
-out_no_mem:
- printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
- mdname(mddev));
-
-out_free_conf:
- if (conf) {
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
- kfree(conf->mirrors);
- safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
- kfree(conf);
- mddev->private = NULL;
- }
-out:
- return -EIO;
}
static int stop(mddev_t *mddev)
@@ -2271,6 +2312,9 @@ static void raid1_quiesce(mddev_t *mddev, int state)
conf_t *conf = mddev->private;
switch(state) {
+ case 2: /* wake for suspend */
+ wake_up(&conf->wait_barrier);
+ break;
case 1:
raise_barrier(conf);
break;
@@ -2280,6 +2324,23 @@ static void raid1_quiesce(mddev_t *mddev, int state)
}
}
+static void *raid1_takeover(mddev_t *mddev)
+{
+ /* raid1 can take over:
+ * raid5 with 2 devices, any layout or chunk size
+ */
+ if (mddev->level == 5 && mddev->raid_disks == 2) {
+ conf_t *conf;
+ mddev->new_level = 1;
+ mddev->new_layout = 0;
+ mddev->new_chunk_sectors = 0;
+ conf = setup_conf(mddev);
+ if (!IS_ERR(conf))
+ conf->barrier = 1;
+ return conf;
+ }
+ return ERR_PTR(-EINVAL);
+}
static struct mdk_personality raid1_personality =
{
@@ -2299,6 +2360,7 @@ static struct mdk_personality raid1_personality =
.size = raid1_size,
.check_reshape = raid1_reshape,
.quiesce = raid1_quiesce,
+ .takeover = raid1_takeover,
};
static int __init raid_init(void)
@@ -2314,6 +2376,7 @@ static void raid_exit(void)
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
MODULE_ALIAS("md-raid1");
MODULE_ALIAS("md-level-1");
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index e87b84deff6..5f2d443ae28 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -59,6 +59,11 @@ struct r1_private_data_s {
mempool_t *r1bio_pool;
mempool_t *r1buf_pool;
+
+ /* When taking over an array from a different personality, we store
+ * the new thread here until we fully activate the array.
+ */
+ struct mdk_thread_s *thread;
};
typedef struct r1_private_data_s conf_t;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c2cb7b87b44..d119b7b75e7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -804,7 +804,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
mdk_rdev_t *blocked_rdev;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
@@ -1432,6 +1432,43 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
/*
+ * Used by fix_read_error() to decay the per rdev read_errors.
+ * We halve the read error count for every hour that has elapsed
+ * since the last recorded read error.
+ *
+ */
+static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ struct timespec cur_time_mon;
+ unsigned long hours_since_last;
+ unsigned int read_errors = atomic_read(&rdev->read_errors);
+
+ ktime_get_ts(&cur_time_mon);
+
+ if (rdev->last_read_error.tv_sec == 0 &&
+ rdev->last_read_error.tv_nsec == 0) {
+ /* first time we've seen a read error */
+ rdev->last_read_error = cur_time_mon;
+ return;
+ }
+
+ hours_since_last = (cur_time_mon.tv_sec -
+ rdev->last_read_error.tv_sec) / 3600;
+
+ rdev->last_read_error = cur_time_mon;
+
+ /*
+ * if hours_since_last is > the number of bits in read_errors
+ * just set read errors to 0. We do this to avoid
+ * overflowing the shift of read_errors by hours_since_last.
+ */
+ if (hours_since_last >= 8 * sizeof(read_errors))
+ atomic_set(&rdev->read_errors, 0);
+ else
+ atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
+}
+
+/*
* This is a kernel thread which:
*
* 1. Retries failed read operations on working mirrors.
@@ -1444,6 +1481,43 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
int sect = 0; /* Offset from r10_bio->sector */
int sectors = r10_bio->sectors;
mdk_rdev_t*rdev;
+ int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
+
+ rcu_read_lock();
+ {
+ int d = r10_bio->devs[r10_bio->read_slot].devnum;
+ char b[BDEVNAME_SIZE];
+ int cur_read_error_count = 0;
+
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
+ bdevname(rdev->bdev, b);
+
+ if (test_bit(Faulty, &rdev->flags)) {
+ rcu_read_unlock();
+ /* drive has already been failed, just ignore any
+ more fix_read_error() attempts */
+ return;
+ }
+
+ check_decay_read_errors(mddev, rdev);
+ atomic_inc(&rdev->read_errors);
+ cur_read_error_count = atomic_read(&rdev->read_errors);
+ if (cur_read_error_count > max_read_errors) {
+ rcu_read_unlock();
+ printk(KERN_NOTICE
+ "raid10: %s: Raid device exceeded "
+ "read_error threshold "
+ "[cur %d:max %d]\n",
+ b, cur_read_error_count, max_read_errors);
+ printk(KERN_NOTICE
+ "raid10: %s: Failing raid "
+ "device\n", b);
+ md_error(mddev, conf->mirrors[d].rdev);
+ return;
+ }
+ }
+ rcu_read_unlock();
+
while(sectors) {
int s = sectors;
int sl = r10_bio->read_slot;
@@ -1488,6 +1562,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
/* write it back and re-read */
rcu_read_lock();
while (sl != r10_bio->read_slot) {
+ char b[BDEVNAME_SIZE];
int d;
if (sl==0)
sl = conf->copies;
@@ -1503,9 +1578,21 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
s<<9, conf->tmppage, WRITE)
- == 0)
+ == 0) {
/* Well, this device is dead */
+ printk(KERN_NOTICE
+ "raid10:%s: read correction "
+ "write failed"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect+
+ rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ printk(KERN_NOTICE "raid10:%s: failing "
+ "drive\n",
+ bdevname(rdev->bdev, b));
md_error(mddev, rdev);
+ }
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
}
@@ -1526,10 +1613,22 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
if (sync_page_io(rdev->bdev,
r10_bio->devs[sl].addr +
sect + rdev->data_offset,
- s<<9, conf->tmppage, READ) == 0)
+ s<<9, conf->tmppage,
+ READ) == 0) {
/* Well, this device is dead */
+ printk(KERN_NOTICE
+ "raid10:%s: unable to read back "
+ "corrected sectors"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(sect+
+ rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ printk(KERN_NOTICE "raid10:%s: failing drive\n",
+ bdevname(rdev->bdev, b));
+
md_error(mddev, rdev);
- else
+ } else {
printk(KERN_INFO
"raid10:%s: read error corrected"
" (%d sectors at %llu on %s)\n",
@@ -1537,6 +1636,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
(unsigned long long)(sect+
rdev->data_offset),
bdevname(rdev->bdev, b));
+ }
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
@@ -2275,13 +2375,6 @@ static void raid10_quiesce(mddev_t *mddev, int state)
lower_barrier(conf);
break;
}
- if (mddev->thread) {
- if (mddev->bitmap)
- mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
- else
- mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- md_wakeup_thread(mddev->thread);
- }
}
static struct mdk_personality raid10_personality =
@@ -2315,6 +2408,7 @@ static void raid_exit(void)
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
MODULE_ALIAS("md-personality-9"); /* RAID10 */
MODULE_ALIAS("md-raid10");
MODULE_ALIAS("md-level-10");
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d29215d966d..e84204eb12d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2947,6 +2947,7 @@ static void handle_stripe5(struct stripe_head *sh)
struct r5dev *dev;
mdk_rdev_t *blocked_rdev = NULL;
int prexor;
+ int dec_preread_active = 0;
memset(&s, 0, sizeof(s));
pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
@@ -3096,12 +3097,8 @@ static void handle_stripe5(struct stripe_head *sh)
set_bit(STRIPE_INSYNC, &sh->state);
}
}
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
@@ -3208,6 +3205,16 @@ static void handle_stripe5(struct stripe_head *sh)
ops_run_io(sh, &s);
+ if (dec_preread_active) {
+ /* We delay this until after ops_run_io so that if make_request
+ * is waiting on a barrier, it won't continue until the writes
+ * have actually been submitted.
+ */
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
return_io(return_bi);
}
@@ -3221,6 +3228,7 @@ static void handle_stripe6(struct stripe_head *sh)
struct r6_state r6s;
struct r5dev *dev, *pdev, *qdev;
mdk_rdev_t *blocked_rdev = NULL;
+ int dec_preread_active = 0;
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
@@ -3358,7 +3366,6 @@ static void handle_stripe6(struct stripe_head *sh)
* completed
*/
if (sh->reconstruct_state == reconstruct_state_drain_result) {
- int qd_idx = sh->qd_idx;
sh->reconstruct_state = reconstruct_state_idle;
/* All the 'written' buffers and the parity blocks are ready to
@@ -3380,12 +3387,8 @@ static void handle_stripe6(struct stripe_head *sh)
set_bit(STRIPE_INSYNC, &sh->state);
}
}
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
@@ -3494,6 +3497,18 @@ static void handle_stripe6(struct stripe_head *sh)
ops_run_io(sh, &s);
+
+ if (dec_preread_active) {
+ /* We delay this until after ops_run_io so that if make_request
+ * is waiting on a barrier, it won't continue until the writes
+ * have actually been submitted.
+ */
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
+
return_io(return_bi);
}
@@ -3741,7 +3756,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
- unsigned int dd_idx;
+ int dd_idx;
struct bio* align_bi;
mdk_rdev_t *rdev;
@@ -3866,7 +3881,13 @@ static int make_request(struct request_queue *q, struct bio * bi)
int cpu, remaining;
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
- bio_endio(bi, -EOPNOTSUPP);
+ /* Drain all pending writes. We only really need
+ * to ensure they have been submitted, but this is
+ * easier.
+ */
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ md_barrier_request(mddev, bi);
return 0;
}
@@ -3990,6 +4011,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
finish_wait(&conf->wait_for_overlap, &w);
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
+ if (mddev->barrier &&
+ !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -4009,6 +4033,14 @@ static int make_request(struct request_queue *q, struct bio * bi)
bio_endio(bi, 0);
}
+
+ if (mddev->barrier) {
+ /* We need to wait for the stripes to all be handled.
+ * So: wait for preread_active_stripes to drop to 0.
+ */
+ wait_event(mddev->thread->wqueue,
+ atomic_read(&conf->preread_active_stripes) == 0);
+ }
return 0;
}
@@ -5860,6 +5892,7 @@ static void raid5_exit(void)
module_init(raid5_init);
module_exit(raid5_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
MODULE_ALIAS("md-personality-4"); /* RAID5 */
MODULE_ALIAS("md-raid5");
MODULE_ALIAS("md-raid4");
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index 866215ac7f2..bffc61bff5a 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -31,25 +31,6 @@ EXPORT_SYMBOL(raid6_empty_zero_page);
struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
-/* Various routine sets */
-extern const struct raid6_calls raid6_intx1;
-extern const struct raid6_calls raid6_intx2;
-extern const struct raid6_calls raid6_intx4;
-extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_intx16;
-extern const struct raid6_calls raid6_intx32;
-extern const struct raid6_calls raid6_mmxx1;
-extern const struct raid6_calls raid6_mmxx2;
-extern const struct raid6_calls raid6_sse1x1;
-extern const struct raid6_calls raid6_sse1x2;
-extern const struct raid6_calls raid6_sse2x1;
-extern const struct raid6_calls raid6_sse2x2;
-extern const struct raid6_calls raid6_sse2x4;
-extern const struct raid6_calls raid6_altivec1;
-extern const struct raid6_calls raid6_altivec2;
-extern const struct raid6_calls raid6_altivec4;
-extern const struct raid6_calls raid6_altivec8;
-
const struct raid6_calls * const raid6_algos[] = {
&raid6_intx1,
&raid6_intx2,
@@ -169,3 +150,4 @@ static void raid6_exit(void)
subsys_initcall(raid6_select_algo);
module_exit(raid6_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
diff --git a/drivers/media/IR/Kconfig b/drivers/media/IR/Kconfig
new file mode 100644
index 00000000000..4dde7d180a3
--- /dev/null
+++ b/drivers/media/IR/Kconfig
@@ -0,0 +1,9 @@
+config IR_CORE
+ tristate
+ depends on INPUT
+ default INPUT
+
+config VIDEO_IR
+ tristate
+ depends on IR_CORE
+ default IR_CORE
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
new file mode 100644
index 00000000000..df5ddb4bbbf
--- /dev/null
+++ b/drivers/media/IR/Makefile
@@ -0,0 +1,5 @@
+ir-common-objs := ir-functions.o ir-keymaps.o
+ir-core-objs := ir-keytable.o
+
+obj-$(CONFIG_IR_CORE) += ir-core.o
+obj-$(CONFIG_VIDEO_IR) += ir-common.o
diff --git a/drivers/media/common/ir-functions.c b/drivers/media/IR/ir-functions.c
index e616f624cea..776a136616d 100644
--- a/drivers/media/common/ir-functions.c
+++ b/drivers/media/IR/ir-functions.c
@@ -34,9 +34,6 @@ static int repeat = 1;
module_param(repeat, int, 0444);
MODULE_PARM_DESC(repeat,"auto-repeat for IR keys (default: on)");
-int media_ir_debug; /* media_ir_debug level (0,1,2) */
-module_param_named(debug, media_ir_debug, int, 0644);
-
/* -------------------------------------------------------------------------- */
static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir)
@@ -55,25 +52,10 @@ static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir)
/* -------------------------------------------------------------------------- */
int ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
- int ir_type, struct ir_scancode_table *ir_codes)
+ int ir_type)
{
ir->ir_type = ir_type;
- ir->keytable.size = ir_roundup_tablesize(ir_codes->size);
- ir->keytable.scan = kzalloc(ir->keytable.size *
- sizeof(struct ir_scancode), GFP_KERNEL);
- if (!ir->keytable.scan)
- return -ENOMEM;
-
- IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n",
- ir->keytable.size,
- ir->keytable.size * sizeof(ir->keytable.scan));
-
- ir_copy_table(&ir->keytable, ir_codes);
- ir_set_keycode_table(dev, &ir->keytable);
-
- clear_bit(0, dev->keybit);
- set_bit(EV_KEY, dev->evbit);
if (repeat)
set_bit(EV_REP, dev->evbit);
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/IR/ir-keymaps.c
index 328c973a083..9bbe6b1e987 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/IR/ir-keymaps.c
@@ -1847,76 +1847,6 @@ struct ir_scancode_table ir_codes_hauppauge_new_table = {
};
EXPORT_SYMBOL_GPL(ir_codes_hauppauge_new_table);
-/*
- * Hauppauge:the newer, gray remotes (seems there are multiple
- * slightly different versions), shipped with cx88+ivtv cards.
- *
- * This table contains the complete RC5 code, instead of just the data part
- */
-static struct ir_scancode ir_codes_rc5_hauppauge_new[] = {
- /* Keys 0 to 9 */
- { 0x1e00, KEY_0 },
- { 0x1e01, KEY_1 },
- { 0x1e02, KEY_2 },
- { 0x1e03, KEY_3 },
- { 0x1e04, KEY_4 },
- { 0x1e05, KEY_5 },
- { 0x1e06, KEY_6 },
- { 0x1e07, KEY_7 },
- { 0x1e08, KEY_8 },
- { 0x1e09, KEY_9 },
-
- { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
- { 0x1e0b, KEY_RED }, /* red button */
- { 0x1e0c, KEY_RADIO },
- { 0x1e0d, KEY_MENU },
- { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
- { 0x1e0f, KEY_MUTE },
- { 0x1e10, KEY_VOLUMEUP },
- { 0x1e11, KEY_VOLUMEDOWN },
- { 0x1e12, KEY_PREVIOUS }, /* previous channel */
- { 0x1e14, KEY_UP },
- { 0x1e15, KEY_DOWN },
- { 0x1e16, KEY_LEFT },
- { 0x1e17, KEY_RIGHT },
- { 0x1e18, KEY_VIDEO }, /* Videos */
- { 0x1e19, KEY_AUDIO }, /* Music */
- /* 0x1e1a: Pictures - presume this means
- "Multimedia Home Platform" -
- no "PICTURES" key in input.h
- */
- { 0x1e1a, KEY_MHP },
-
- { 0x1e1b, KEY_EPG }, /* Guide */
- { 0x1e1c, KEY_TV },
- { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
- { 0x1e1f, KEY_EXIT }, /* back/exit */
- { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
- { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
- { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
- { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
- { 0x1e25, KEY_ENTER }, /* OK */
- { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
- { 0x1e29, KEY_BLUE }, /* blue key */
- { 0x1e2e, KEY_GREEN }, /* green button */
- { 0x1e30, KEY_PAUSE }, /* pause */
- { 0x1e32, KEY_REWIND }, /* backward << */
- { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
- { 0x1e35, KEY_PLAY },
- { 0x1e36, KEY_STOP },
- { 0x1e37, KEY_RECORD }, /* recording */
- { 0x1e38, KEY_YELLOW }, /* yellow key */
- { 0x1e3b, KEY_SELECT }, /* top right button */
- { 0x1e3c, KEY_ZOOM }, /* full */
- { 0x1e3d, KEY_POWER }, /* system power (green button) */
-};
-
-struct ir_scancode_table ir_codes_rc5_hauppauge_new_table = {
- .scan = ir_codes_rc5_hauppauge_new,
- .size = ARRAY_SIZE(ir_codes_rc5_hauppauge_new),
-};
-EXPORT_SYMBOL_GPL(ir_codes_rc5_hauppauge_new_table);
-
static struct ir_scancode ir_codes_npgtech[] = {
{ 0x1d, KEY_SWITCHVIDEOMODE }, /* switch inputs */
{ 0x2a, KEY_FRONT },
@@ -3314,3 +3244,152 @@ struct ir_scancode_table ir_codes_gadmei_rm008z_table = {
};
EXPORT_SYMBOL_GPL(ir_codes_gadmei_rm008z_table);
+/*************************************************************
+ * COMPLETE SCANCODE TABLES
+ * Instead of just a partial scancode, the tables bellow
+ * contains the complete scancode and the receiver protocol
+ *************************************************************/
+
+/*
+ * Hauppauge:the newer, gray remotes (seems there are multiple
+ * slightly different versions), shipped with cx88+ivtv cards.
+ *
+ * This table contains the complete RC5 code, instead of just the data part
+ */
+static struct ir_scancode ir_codes_rc5_hauppauge_new[] = {
+ /* Keys 0 to 9 */
+ { 0x1e00, KEY_0 },
+ { 0x1e01, KEY_1 },
+ { 0x1e02, KEY_2 },
+ { 0x1e03, KEY_3 },
+ { 0x1e04, KEY_4 },
+ { 0x1e05, KEY_5 },
+ { 0x1e06, KEY_6 },
+ { 0x1e07, KEY_7 },
+ { 0x1e08, KEY_8 },
+ { 0x1e09, KEY_9 },
+
+ { 0x1e0a, KEY_TEXT }, /* keypad asterisk as well */
+ { 0x1e0b, KEY_RED }, /* red button */
+ { 0x1e0c, KEY_RADIO },
+ { 0x1e0d, KEY_MENU },
+ { 0x1e0e, KEY_SUBTITLE }, /* also the # key */
+ { 0x1e0f, KEY_MUTE },
+ { 0x1e10, KEY_VOLUMEUP },
+ { 0x1e11, KEY_VOLUMEDOWN },
+ { 0x1e12, KEY_PREVIOUS }, /* previous channel */
+ { 0x1e14, KEY_UP },
+ { 0x1e15, KEY_DOWN },
+ { 0x1e16, KEY_LEFT },
+ { 0x1e17, KEY_RIGHT },
+ { 0x1e18, KEY_VIDEO }, /* Videos */
+ { 0x1e19, KEY_AUDIO }, /* Music */
+ /* 0x1e1a: Pictures - presume this means
+ "Multimedia Home Platform" -
+ no "PICTURES" key in input.h
+ */
+ { 0x1e1a, KEY_MHP },
+
+ { 0x1e1b, KEY_EPG }, /* Guide */
+ { 0x1e1c, KEY_TV },
+ { 0x1e1e, KEY_NEXTSONG }, /* skip >| */
+ { 0x1e1f, KEY_EXIT }, /* back/exit */
+ { 0x1e20, KEY_CHANNELUP }, /* channel / program + */
+ { 0x1e21, KEY_CHANNELDOWN }, /* channel / program - */
+ { 0x1e22, KEY_CHANNEL }, /* source (old black remote) */
+ { 0x1e24, KEY_PREVIOUSSONG }, /* replay |< */
+ { 0x1e25, KEY_ENTER }, /* OK */
+ { 0x1e26, KEY_SLEEP }, /* minimize (old black remote) */
+ { 0x1e29, KEY_BLUE }, /* blue key */
+ { 0x1e2e, KEY_GREEN }, /* green button */
+ { 0x1e30, KEY_PAUSE }, /* pause */
+ { 0x1e32, KEY_REWIND }, /* backward << */
+ { 0x1e34, KEY_FASTFORWARD }, /* forward >> */
+ { 0x1e35, KEY_PLAY },
+ { 0x1e36, KEY_STOP },
+ { 0x1e37, KEY_RECORD }, /* recording */
+ { 0x1e38, KEY_YELLOW }, /* yellow key */
+ { 0x1e3b, KEY_SELECT }, /* top right button */
+ { 0x1e3c, KEY_ZOOM }, /* full */
+ { 0x1e3d, KEY_POWER }, /* system power (green button) */
+};
+
+struct ir_scancode_table ir_codes_rc5_hauppauge_new_table = {
+ .scan = ir_codes_rc5_hauppauge_new,
+ .size = ARRAY_SIZE(ir_codes_rc5_hauppauge_new),
+ .ir_type = IR_TYPE_RC5,
+};
+EXPORT_SYMBOL_GPL(ir_codes_rc5_hauppauge_new_table);
+
+/* Terratec Cinergy Hybrid T USB XS FM
+ Mauro Carvalho Chehab <mchehab@redhat.com>
+ */
+static struct ir_scancode ir_codes_nec_terratec_cinergy_xs[] = {
+ { 0x1441, KEY_HOME},
+ { 0x1401, KEY_POWER2},
+
+ { 0x1442, KEY_MENU}, /* DVD menu */
+ { 0x1443, KEY_SUBTITLE},
+ { 0x1444, KEY_TEXT}, /* Teletext */
+ { 0x1445, KEY_DELETE},
+
+ { 0x1402, KEY_1},
+ { 0x1403, KEY_2},
+ { 0x1404, KEY_3},
+ { 0x1405, KEY_4},
+ { 0x1406, KEY_5},
+ { 0x1407, KEY_6},
+ { 0x1408, KEY_7},
+ { 0x1409, KEY_8},
+ { 0x140a, KEY_9},
+ { 0x140c, KEY_0},
+
+ { 0x140b, KEY_TUNER}, /* AV */
+ { 0x140d, KEY_MODE}, /* A.B */
+
+ { 0x1446, KEY_TV},
+ { 0x1447, KEY_DVD},
+ { 0x1449, KEY_VIDEO},
+ { 0x144a, KEY_RADIO}, /* Music */
+ { 0x144b, KEY_CAMERA}, /* PIC */
+
+ { 0x1410, KEY_UP},
+ { 0x1411, KEY_LEFT},
+ { 0x1412, KEY_OK},
+ { 0x1413, KEY_RIGHT},
+ { 0x1414, KEY_DOWN},
+
+ { 0x140f, KEY_EPG},
+ { 0x1416, KEY_INFO},
+ { 0x144d, KEY_BACKSPACE},
+
+ { 0x141c, KEY_VOLUMEUP},
+ { 0x141e, KEY_VOLUMEDOWN},
+
+ { 0x144c, KEY_PLAY},
+ { 0x141d, KEY_MUTE},
+
+ { 0x141b, KEY_CHANNELUP},
+ { 0x141f, KEY_CHANNELDOWN},
+
+ { 0x1417, KEY_RED},
+ { 0x1418, KEY_GREEN},
+ { 0x1419, KEY_YELLOW},
+ { 0x141a, KEY_BLUE},
+
+ { 0x1458, KEY_RECORD},
+ { 0x1448, KEY_STOP},
+ { 0x1440, KEY_PAUSE},
+
+ { 0x1454, KEY_LAST},
+ { 0x144e, KEY_REWIND},
+ { 0x144f, KEY_FASTFORWARD},
+ { 0x145c, KEY_NEXT},
+};
+struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table = {
+ .scan = ir_codes_nec_terratec_cinergy_xs,
+ .size = ARRAY_SIZE(ir_codes_nec_terratec_cinergy_xs),
+ .ir_type = IR_TYPE_NEC,
+};
+EXPORT_SYMBOL_GPL(ir_codes_nec_terratec_cinergy_xs_table);
+
diff --git a/drivers/media/common/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 26ce5bc2fdd..bff7a535603 100644
--- a/drivers/media/common/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -1,10 +1,19 @@
/* ir-register.c - handle IR scancode->keycode tables
*
* Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
-#include <linux/usb/input.h>
+#include <linux/usb/input.h>
#include <media/ir-common.h>
#define IR_TAB_MIN_SIZE 32
@@ -72,6 +81,7 @@ int ir_roundup_tablesize(int n_elems)
return n_elems;
}
+EXPORT_SYMBOL_GPL(ir_roundup_tablesize);
/**
* ir_copy_table() - copies a keytable, discarding the unused entries
@@ -100,6 +110,7 @@ int ir_copy_table(struct ir_scancode_table *destin,
return 0;
}
+EXPORT_SYMBOL_GPL(ir_copy_table);
/**
* ir_getkeycode() - get a keycode at the evdev scancode ->keycode table
@@ -114,7 +125,8 @@ static int ir_getkeycode(struct input_dev *dev,
int scancode, int *keycode)
{
int elem;
- struct ir_scancode_table *rc_tab = input_get_drvdata(dev);
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
elem = ir_seek_table(rc_tab, scancode);
if (elem >= 0) {
@@ -136,7 +148,6 @@ static int ir_getkeycode(struct input_dev *dev,
return 0;
}
-
/**
* ir_is_resize_needed() - Check if the table needs rezise
* @table: keycode table that may need to resize
@@ -286,7 +297,8 @@ static int ir_setkeycode(struct input_dev *dev,
int scancode, int keycode)
{
int rc = 0;
- struct ir_scancode_table *rc_tab = input_get_drvdata(dev);
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
struct ir_scancode *keymap = rc_tab->scan;
unsigned long flags;
@@ -360,7 +372,8 @@ static int ir_setkeycode(struct input_dev *dev,
*/
u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
{
- struct ir_scancode_table *rc_tab = input_get_drvdata(dev);
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab = &ir_dev->rc_tab;
struct ir_scancode *keymap = rc_tab->scan;
int elem;
@@ -378,9 +391,10 @@ u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
/* Reports userspace that an unknown keycode were got */
return KEY_RESERVED;
}
+EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
/**
- * ir_set_keycode_table() - sets the IR keycode table and add the handlers
+ * ir_input_register() - sets the IR keycode table and add the handlers
* for keymap table get/set
* @input_dev: the struct input_dev descriptor of the device
* @rc_tab: the struct ir_scancode_table table of scancode/keymap
@@ -389,17 +403,34 @@ u32 ir_g_keycode_from_table(struct input_dev *dev, u32 scancode)
* an IR.
* It should be called before registering the IR device.
*/
-int ir_set_keycode_table(struct input_dev *input_dev,
- struct ir_scancode_table *rc_tab)
+int ir_input_register(struct input_dev *input_dev,
+ struct ir_scancode_table *rc_tab)
{
- struct ir_scancode *keymap = rc_tab->scan;
- int i;
-
- spin_lock_init(&rc_tab->lock);
+ struct ir_input_dev *ir_dev;
+ struct ir_scancode *keymap = rc_tab->scan;
+ int i, rc;
if (rc_tab->scan == NULL || !rc_tab->size)
return -EINVAL;
+ ir_dev = kzalloc(sizeof(*ir_dev), GFP_KERNEL);
+ if (!ir_dev)
+ return -ENOMEM;
+
+ spin_lock_init(&rc_tab->lock);
+
+ ir_dev->rc_tab.size = ir_roundup_tablesize(rc_tab->size);
+ ir_dev->rc_tab.scan = kzalloc(ir_dev->rc_tab.size *
+ sizeof(struct ir_scancode), GFP_KERNEL);
+ if (!ir_dev->rc_tab.scan)
+ return -ENOMEM;
+
+ IR_dprintk(1, "Allocated space for %d keycode entries (%zd bytes)\n",
+ ir_dev->rc_tab.size,
+ ir_dev->rc_tab.size * sizeof(ir_dev->rc_tab.scan));
+
+ ir_copy_table(&ir_dev->rc_tab, rc_tab);
+
/* set the bits for the keys */
IR_dprintk(1, "key map size: %d\n", rc_tab->size);
for (i = 0; i < rc_tab->size; i++) {
@@ -407,23 +438,48 @@ int ir_set_keycode_table(struct input_dev *input_dev,
i, keymap[i].keycode);
set_bit(keymap[i].keycode, input_dev->keybit);
}
+ clear_bit(0, input_dev->keybit);
+
+ set_bit(EV_KEY, input_dev->evbit);
input_dev->getkeycode = ir_getkeycode;
input_dev->setkeycode = ir_setkeycode;
- input_set_drvdata(input_dev, rc_tab);
+ input_set_drvdata(input_dev, ir_dev);
- return 0;
+ rc = input_register_device(input_dev);
+ if (rc < 0) {
+ kfree(rc_tab->scan);
+ kfree(ir_dev);
+ input_set_drvdata(input_dev, NULL);
+ }
+
+ return rc;
}
+EXPORT_SYMBOL_GPL(ir_input_register);
-void ir_input_free(struct input_dev *dev)
+void ir_input_unregister(struct input_dev *dev)
{
- struct ir_scancode_table *rc_tab = input_get_drvdata(dev);
+ struct ir_input_dev *ir_dev = input_get_drvdata(dev);
+ struct ir_scancode_table *rc_tab;
+
+ if (!ir_dev)
+ return;
IR_dprintk(1, "Freed keycode table\n");
+ rc_tab = &ir_dev->rc_tab;
rc_tab->size = 0;
kfree(rc_tab->scan);
rc_tab->scan = NULL;
+
+ kfree(ir_dev);
+ input_unregister_device(dev);
}
-EXPORT_SYMBOL_GPL(ir_input_free);
+EXPORT_SYMBOL_GPL(ir_input_unregister);
+
+int ir_core_debug; /* ir_debug level (0,1,2) */
+EXPORT_SYMBOL_GPL(ir_core_debug);
+module_param_named(debug, ir_core_debug, int, 0644);
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index ba69beeb0e2..a28541b2b1a 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -99,6 +99,7 @@ config VIDEO_MEDIA
comment "Multimedia drivers"
source "drivers/media/common/Kconfig"
+source "drivers/media/IR/Kconfig"
#
# Tuner drivers for DVB and V4L
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 09a829d8a7e..499b0810d01 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,7 +2,7 @@
# Makefile for the kernel multimedia device drivers.
#
-obj-y += common/ video/
+obj-y += common/ IR/ video/
obj-$(CONFIG_VIDEO_DEV) += radio/
obj-$(CONFIG_DVB_CORE) += dvb/
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index 169b337b7c9..e3ec9639321 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,8 +1,6 @@
saa7146-objs := saa7146_i2c.o saa7146_core.o
saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o
-ir-common-objs := ir-functions.o ir-keymaps.o ir-keytable.o
obj-y += tuners/
obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o
obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o
-obj-$(CONFIG_VIDEO_IR) += ir-common.o
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 620f655fa9c..7364b9642d0 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -1,7 +1,5 @@
#include <media/saa7146_vv.h>
-#define BOARD_CAN_DO_VBI(dev) (dev->revision != 0 && dev->vv_data->vbi_minor != -1)
-
/****************************************************************************/
/* resource management functions, shamelessly stolen from saa7134 driver */
@@ -194,43 +192,24 @@ void saa7146_buffer_timeout(unsigned long data)
static int fops_open(struct file *file)
{
- unsigned int minor = video_devdata(file)->minor;
- struct saa7146_dev *h = NULL, *dev = NULL;
- struct list_head *list;
+ struct video_device *vdev = video_devdata(file);
+ struct saa7146_dev *dev = video_drvdata(file);
struct saa7146_fh *fh = NULL;
int result = 0;
- enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ enum v4l2_buf_type type;
- DEB_EE(("file:%p, minor:%d\n", file, minor));
+ DEB_EE(("file:%p, dev:%s\n", file, video_device_node_name(vdev)));
if (mutex_lock_interruptible(&saa7146_devices_lock))
return -ERESTARTSYS;
- list_for_each(list,&saa7146_devices) {
- h = list_entry(list, struct saa7146_dev, item);
- if( NULL == h->vv_data ) {
- DEB_D(("device %p has not registered video devices.\n",h));
- continue;
- }
- DEB_D(("trying: %p @ major %d,%d\n",h,h->vv_data->video_minor,h->vv_data->vbi_minor));
-
- if (h->vv_data->video_minor == minor) {
- dev = h;
- }
- if (h->vv_data->vbi_minor == minor) {
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- dev = h;
- }
- }
- if (NULL == dev) {
- DEB_S(("no such video device.\n"));
- result = -ENODEV;
- goto out;
- }
-
DEB_D(("using: %p\n",dev));
+ type = vdev->vfl_type == VFL_TYPE_GRABBER
+ ? V4L2_BUF_TYPE_VIDEO_CAPTURE
+ : V4L2_BUF_TYPE_VBI_CAPTURE;
+
/* check if an extension is registered */
if( NULL == dev->ext ) {
DEB_S(("no extension registered for this device.\n"));
@@ -474,9 +453,6 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
configuration data) */
dev->ext_vv_data = ext_vv;
- vv->video_minor = -1;
- vv->vbi_minor = -1;
-
vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle);
if( NULL == vv->d_clipping.cpu_addr ) {
ERR(("out of memory. aborting.\n"));
@@ -515,7 +491,6 @@ EXPORT_SYMBOL_GPL(saa7146_vv_release);
int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
char *name, int type)
{
- struct saa7146_vv *vv = dev->vv_data;
struct video_device *vfd;
int err;
int i;
@@ -543,15 +518,8 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
return err;
}
- if( VFL_TYPE_GRABBER == type ) {
- vv->video_minor = vfd->minor;
- INFO(("%s: registered device video%d [v4l2]\n",
- dev->name, vfd->num));
- } else {
- vv->vbi_minor = vfd->minor;
- INFO(("%s: registered device vbi%d [v4l2]\n",
- dev->name, vfd->num));
- }
+ INFO(("%s: registered device %s [v4l2]\n",
+ dev->name, video_device_node_name(vfd)));
*vid = vfd;
return 0;
@@ -560,16 +528,8 @@ EXPORT_SYMBOL_GPL(saa7146_register_device);
int saa7146_unregister_device(struct video_device **vid, struct saa7146_dev* dev)
{
- struct saa7146_vv *vv = dev->vv_data;
-
DEB_EE(("dev:%p\n",dev));
- if ((*vid)->vfl_type == VFL_TYPE_GRABBER) {
- vv->video_minor = -1;
- } else {
- vv->vbi_minor = -1;
- }
-
video_unregister_device(*vid);
*vid = NULL;
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 53e3f2a7d31..f0f483ac8b8 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -589,7 +589,7 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105)
snprintf(dm1105->ir.input_phys, sizeof(dm1105->ir.input_phys),
"pci-%s/ir0", pci_name(dm1105->pdev));
- err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type, ir_codes);
+ err = ir_input_init(input_dev, &dm1105->ir.ir, ir_type);
if (err < 0) {
input_free_device(input_dev);
return err;
@@ -611,20 +611,14 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105)
INIT_WORK(&dm1105->ir.work, dm1105_emit_key);
- err = input_register_device(input_dev);
- if (err) {
- ir_input_free(input_dev);
- input_free_device(input_dev);
- return err;
- }
+ err = ir_input_register(input_dev, ir_codes);
- return 0;
+ return err;
}
void __devexit dm1105_ir_exit(struct dm1105dvb *dm1105)
{
- ir_input_free(dm1105->ir.input_dev);
- input_unregister_device(dm1105->ir.input_dev);
+ ir_input_unregister(dm1105->ir.input_dev);
}
static int __devinit dm1105dvb_hw_init(struct dm1105dvb *dm1105dvb)
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 2dee1bf7357..1b249897c9f 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -265,9 +265,13 @@ config DVB_USB_DW2102
select DVB_TDA10021 if !DVB_FE_CUSTOMISE
select DVB_MT312 if !DVB_FE_CUSTOMISE
select DVB_ZL10039 if !DVB_FE_CUSTOMISE
+ select DVB_DS3000 if !DVB_FE_CUSTOMISE
+ select DVB_STB6100 if !DVB_FE_CUSTOMISE
+ select DVB_STV6110 if !DVB_FE_CUSTOMISE
+ select DVB_STV0900 if !DVB_FE_CUSTOMISE
help
- Say Y here to support the DvbWorld DVB-S/S2 USB2.0 receivers
- and the TeVii S650, S630.
+ Say Y here to support the DvbWorld, TeVii, Prof DVB-S/S2 USB2.0
+ receivers.
config DVB_USB_CINERGY_T2
tristate "Terratec CinergyT2/qanu USB 2.0 DVB-T receiver"
diff --git a/drivers/media/dvb/dvb-usb/dib0700.h b/drivers/media/dvb/dvb-usb/dib0700.h
index 8b544fe79b0..495a90577c5 100644
--- a/drivers/media/dvb/dvb-usb/dib0700.h
+++ b/drivers/media/dvb/dvb-usb/dib0700.h
@@ -20,20 +20,22 @@ extern int dvb_usb_dib0700_debug;
#define deb_fwdata(args...) dprintk(dvb_usb_dib0700_debug,0x04,args)
#define deb_data(args...) dprintk(dvb_usb_dib0700_debug,0x08,args)
-#define REQUEST_I2C_READ 0x2
-#define REQUEST_I2C_WRITE 0x3
-#define REQUEST_POLL_RC 0x4 /* deprecated in firmware v1.20 */
-#define REQUEST_JUMPRAM 0x8
-#define REQUEST_SET_CLOCK 0xB
-#define REQUEST_SET_GPIO 0xC
-#define REQUEST_ENABLE_VIDEO 0xF
+#define REQUEST_SET_USB_XFER_LEN 0x0 /* valid only for firmware version */
+ /* higher than 1.21 */
+#define REQUEST_I2C_READ 0x2
+#define REQUEST_I2C_WRITE 0x3
+#define REQUEST_POLL_RC 0x4 /* deprecated in firmware v1.20 */
+#define REQUEST_JUMPRAM 0x8
+#define REQUEST_SET_CLOCK 0xB
+#define REQUEST_SET_GPIO 0xC
+#define REQUEST_ENABLE_VIDEO 0xF
// 1 Byte: 4MSB(1 = enable streaming, 0 = disable streaming) 4LSB(Video Mode: 0 = MPEG2 188Bytes, 1 = Analog)
// 2 Byte: MPEG2 mode: 4MSB(1 = Master Mode, 0 = Slave Mode) 4LSB(Channel 1 = bit0, Channel 2 = bit1)
// 2 Byte: Analog mode: 4MSB(0 = 625 lines, 1 = 525 lines) 4LSB( " " )
-#define REQUEST_SET_RC 0x11
-#define REQUEST_NEW_I2C_READ 0x12
-#define REQUEST_NEW_I2C_WRITE 0x13
-#define REQUEST_GET_VERSION 0x15
+#define REQUEST_SET_RC 0x11
+#define REQUEST_NEW_I2C_READ 0x12
+#define REQUEST_NEW_I2C_WRITE 0x13
+#define REQUEST_GET_VERSION 0x15
struct dib0700_state {
u8 channel_state;
@@ -44,6 +46,8 @@ struct dib0700_state {
u8 is_dib7000pc;
u8 fw_use_new_i2c_api;
u8 disable_streaming_master_mode;
+ u32 fw_version;
+ u32 nb_packet_buffer_size;
};
extern int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index db7f7f79a66..0d3c9a9a33b 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -17,6 +17,14 @@ int dvb_usb_dib0700_ir_proto = 1;
module_param(dvb_usb_dib0700_ir_proto, int, 0644);
MODULE_PARM_DESC(dvb_usb_dib0700_ir_proto, "set ir protocol (0=NEC, 1=RC5 (default), 2=RC6).");
+static int nb_packet_buffer_size = 21;
+module_param(nb_packet_buffer_size, int, 0644);
+MODULE_PARM_DESC(nb_packet_buffer_size,
+ "Set the dib0700 driver data buffer size. This parameter "
+ "corresponds to the number of TS packets. The actual size of "
+ "the data buffer corresponds to this parameter "
+ "multiplied by 188 (default: 21)");
+
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -28,10 +36,14 @@ int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion,
REQUEST_GET_VERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
b, sizeof(b), USB_CTRL_GET_TIMEOUT);
- *hwversion = (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
- *romversion = (b[4] << 24) | (b[5] << 16) | (b[6] << 8) | b[7];
- *ramversion = (b[8] << 24) | (b[9] << 16) | (b[10] << 8) | b[11];
- *fwtype = (b[12] << 24) | (b[13] << 16) | (b[14] << 8) | b[15];
+ if (hwversion != NULL)
+ *hwversion = (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
+ if (romversion != NULL)
+ *romversion = (b[4] << 24) | (b[5] << 16) | (b[6] << 8) | b[7];
+ if (ramversion != NULL)
+ *ramversion = (b[8] << 24) | (b[9] << 16) | (b[10] << 8) | b[11];
+ if (fwtype != NULL)
+ *fwtype = (b[12] << 24) | (b[13] << 16) | (b[14] << 8) | b[15];
return ret;
}
@@ -97,6 +109,27 @@ int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_
return dib0700_ctrl_wr(d,buf,3);
}
+static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets)
+{
+ struct dib0700_state *st = d->priv;
+ u8 b[3];
+ int ret;
+
+ if (st->fw_version >= 0x10201) {
+ b[0] = REQUEST_SET_USB_XFER_LEN;
+ b[1] = (nb_ts_packets >> 8)&0xff;
+ b[2] = nb_ts_packets & 0xff;
+
+ deb_info("set the USB xfer len to %i Ts packet\n", nb_ts_packets);
+
+ ret = dib0700_ctrl_wr(d, b, 3);
+ } else {
+ deb_info("this firmware does not allow to change the USB xfer len\n");
+ ret = -EIO;
+ }
+ return ret;
+}
+
/*
* I2C master xfer function (supported in 1.20 firmware)
*/
@@ -328,7 +361,9 @@ static int dib0700_jumpram(struct usb_device *udev, u32 address)
int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw)
{
struct hexline hx;
- int pos = 0, ret, act_len;
+ int pos = 0, ret, act_len, i, adap_num;
+ u8 b[16];
+ u32 fw_version;
u8 buf[260];
@@ -364,6 +399,34 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
} else
ret = -EIO;
+ /* the number of ts packet has to be at least 1 */
+ if (nb_packet_buffer_size < 1)
+ nb_packet_buffer_size = 1;
+
+ /* get the fimware version */
+ usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ REQUEST_GET_VERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ b, sizeof(b), USB_CTRL_GET_TIMEOUT);
+ fw_version = (b[8] << 24) | (b[9] << 16) | (b[10] << 8) | b[11];
+
+ /* set the buffer size - DVB-USB is allocating URB buffers
+ * only after the firwmare download was successful */
+ for (i = 0; i < dib0700_device_count; i++) {
+ for (adap_num = 0; adap_num < dib0700_devices[i].num_adapters;
+ adap_num++) {
+ if (fw_version >= 0x10201)
+ dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = 188*nb_packet_buffer_size;
+ else {
+ /* for fw version older than 1.20.1,
+ * the buffersize has to be n times 512 */
+ dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = ((188*nb_packet_buffer_size+188/2)/512)*512;
+ if (dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize < 512)
+ dib0700_devices[i].adapter[adap_num].stream.u.bulk.buffersize = 512;
+ }
+ }
+ }
+
return ret;
}
@@ -371,6 +434,18 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
struct dib0700_state *st = adap->dev->priv;
u8 b[4];
+ int ret;
+
+ if ((onoff != 0) && (st->fw_version >= 0x10201)) {
+ /* for firmware later than 1.20.1,
+ * the USB xfer length can be set */
+ ret = dib0700_set_usb_xfer_len(adap->dev,
+ st->nb_packet_buffer_size);
+ if (ret < 0) {
+ deb_info("can not set the USB xfer len\n");
+ return ret;
+ }
+ }
b[0] = REQUEST_ENABLE_VIDEO;
b[1] = (onoff << 4) | 0x00; /* this bit gives a kind of command, rather than enabling something or not */
@@ -415,9 +490,21 @@ static int dib0700_probe(struct usb_interface *intf,
for (i = 0; i < dib0700_device_count; i++)
if (dvb_usb_device_init(intf, &dib0700_devices[i], THIS_MODULE,
- &dev, adapter_nr) == 0)
- {
+ &dev, adapter_nr) == 0) {
+ struct dib0700_state *st = dev->priv;
+ u32 hwversion, romversion, fw_version, fwtype;
+
+ dib0700_get_version(dev, &hwversion, &romversion,
+ &fw_version, &fwtype);
+
+ deb_info("Firmware version: %x, %d, 0x%x, %d\n",
+ hwversion, romversion, fw_version, fwtype);
+
+ st->fw_version = fw_version;
+ st->nb_packet_buffer_size = (u32)nb_packet_buffer_size;
+
dib0700_rc_setup(dev);
+
return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 684146f98eb..44972d01bbd 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -18,6 +18,7 @@
#include "xc5000.h"
#include "s5h1411.h"
#include "dib0070.h"
+#include "dib0090.h"
#include "lgdt3305.h"
#include "mxl5007t.h"
@@ -130,93 +131,95 @@ static int bristol_tuner_attach(struct dvb_usb_adapter *adap)
/* MT226x */
static struct dibx000_agc_config stk7700d_7000p_mt2266_agc_config[2] = {
{
- BAND_UHF, // band_caps
+ BAND_UHF,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1,
* P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
- (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), // setup
-
- 1130, // inv_gain
- 21, // time_stabiliz
-
- 0, // alpha_level
- 118, // thlock
-
- 0, // wbd_inv
- 3530, // wbd_ref
- 1, // wbd_sel
- 0, // wbd_alpha
-
- 65535, // agc1_max
- 33770, // agc1_min
- 65535, // agc2_max
- 23592, // agc2_min
-
- 0, // agc1_pt1
- 62, // agc1_pt2
- 255, // agc1_pt3
- 64, // agc1_slope1
- 64, // agc1_slope2
- 132, // agc2_pt1
- 192, // agc2_pt2
- 80, // agc2_slope1
- 80, // agc2_slope2
-
- 17, // alpha_mant
- 27, // alpha_exp
- 23, // beta_mant
- 51, // beta_exp
-
- 1, // perform_agc_softsplit
+ (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8)
+ | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
+
+ 1130,
+ 21,
+
+ 0,
+ 118,
+
+ 0,
+ 3530,
+ 1,
+ 0,
+
+ 65535,
+ 33770,
+ 65535,
+ 23592,
+
+ 0,
+ 62,
+ 255,
+ 64,
+ 64,
+ 132,
+ 192,
+ 80,
+ 80,
+
+ 17,
+ 27,
+ 23,
+ 51,
+
+ 1,
}, {
- BAND_VHF | BAND_LBAND, // band_caps
+ BAND_VHF | BAND_LBAND,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1,
* P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
- (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), // setup
-
- 2372, // inv_gain
- 21, // time_stabiliz
-
- 0, // alpha_level
- 118, // thlock
-
- 0, // wbd_inv
- 3530, // wbd_ref
- 1, // wbd_sel
- 0, // wbd_alpha
-
- 65535, // agc1_max
- 0, // agc1_min
- 65535, // agc2_max
- 23592, // agc2_min
-
- 0, // agc1_pt1
- 128, // agc1_pt2
- 128, // agc1_pt3
- 128, // agc1_slope1
- 0, // agc1_slope2
- 128, // agc2_pt1
- 253, // agc2_pt2
- 81, // agc2_slope1
- 0, // agc2_slope2
-
- 17, // alpha_mant
- 27, // alpha_exp
- 23, // beta_mant
- 51, // beta_exp
-
- 1, // perform_agc_softsplit
+ (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8)
+ | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
+
+ 2372,
+ 21,
+
+ 0,
+ 118,
+
+ 0,
+ 3530,
+ 1,
+ 0,
+
+ 65535,
+ 0,
+ 65535,
+ 23592,
+
+ 0,
+ 128,
+ 128,
+ 128,
+ 0,
+ 128,
+ 253,
+ 81,
+ 0,
+
+ 17,
+ 27,
+ 23,
+ 51,
+
+ 1,
}
};
static struct dibx000_bandwidth_config stk7700d_mt2266_pll_config = {
- 60000, 30000, // internal, sampling
- 1, 8, 3, 1, 0, // pll_cfg: prediv, ratio, range, reset, bypass
- 0, 0, 1, 1, 2, // misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, modulo
- (3 << 14) | (1 << 12) | (524 << 0), // sad_cfg: refsel, sel, freq_15k
- 0, // ifreq
- 20452225, // timf
+ 60000, 30000,
+ 1, 8, 3, 1, 0,
+ 0, 0, 1, 1, 2,
+ (3 << 14) | (1 << 12) | (524 << 0),
+ 0,
+ 20452225,
};
static struct dib7000p_config stk7700d_dib7000p_mt2266_config[] = {
@@ -605,17 +608,17 @@ static int dib0700_rc_query_v1_20(struct dvb_usb_device *d, u32 *event,
}
break;
default:
- if (actlen != sizeof(buf)) {
- /* We didn't get back the 6 byte message we expected */
- err("Unexpected RC response size [%d]", actlen);
- return -1;
- }
+ if (actlen != sizeof(buf)) {
+ /* We didn't get back the 6 byte message we expected */
+ err("Unexpected RC response size [%d]", actlen);
+ return -1;
+ }
- poll_reply.report_id = buf[0];
- poll_reply.data_state = buf[1];
+ poll_reply.report_id = buf[0];
+ poll_reply.data_state = buf[1];
poll_reply.system = (buf[2] << 8) | buf[3];
- poll_reply.data = buf[4];
- poll_reply.not_data = buf[5];
+ poll_reply.data = buf[4];
+ poll_reply.not_data = buf[5];
break;
}
@@ -632,7 +635,7 @@ static int dib0700_rc_query_v1_20(struct dvb_usb_device *d, u32 *event,
/* Find the key in the map */
for (i = 0; i < d->props.rc_key_map_size; i++) {
if (rc5_custom(&keymap[i]) == (poll_reply.system & 0xff) &&
- rc5_data(&keymap[i]) == poll_reply.data) {
+ rc5_data(&keymap[i]) == poll_reply.data) {
*event = keymap[i].event;
found = 1;
break;
@@ -641,8 +644,8 @@ static int dib0700_rc_query_v1_20(struct dvb_usb_device *d, u32 *event,
if (found == 0) {
err("Unknown remote controller key: %04x %02x %02x",
- poll_reply.system,
- poll_reply.data, poll_reply.not_data);
+ poll_reply.system,
+ poll_reply.data, poll_reply.not_data);
d->last_event = 0;
return 0;
}
@@ -933,47 +936,48 @@ static struct dvb_usb_rc_key dib0700_rc_keys[] = {
/* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */
static struct dibx000_agc_config stk7700p_7000m_mt2060_agc_config = {
- BAND_UHF | BAND_VHF, // band_caps
+ BAND_UHF | BAND_VHF,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
* P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
- (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), // setup
-
- 712, // inv_gain
- 41, // time_stabiliz
-
- 0, // alpha_level
- 118, // thlock
-
- 0, // wbd_inv
- 4095, // wbd_ref
- 0, // wbd_sel
- 0, // wbd_alpha
-
- 42598, // agc1_max
- 17694, // agc1_min
- 45875, // agc2_max
- 2621, // agc2_min
- 0, // agc1_pt1
- 76, // agc1_pt2
- 139, // agc1_pt3
- 52, // agc1_slope1
- 59, // agc1_slope2
- 107, // agc2_pt1
- 172, // agc2_pt2
- 57, // agc2_slope1
- 70, // agc2_slope2
-
- 21, // alpha_mant
- 25, // alpha_exp
- 28, // beta_mant
- 48, // beta_exp
-
- 1, // perform_agc_softsplit
- { 0, // split_min
- 107, // split_max
- 51800, // global_split_min
- 24700 // global_split_max
+ (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
+ | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
+
+ 712,
+ 41,
+
+ 0,
+ 118,
+
+ 0,
+ 4095,
+ 0,
+ 0,
+
+ 42598,
+ 17694,
+ 45875,
+ 2621,
+ 0,
+ 76,
+ 139,
+ 52,
+ 59,
+ 107,
+ 172,
+ 57,
+ 70,
+
+ 21,
+ 25,
+ 28,
+ 48,
+
+ 1,
+ { 0,
+ 107,
+ 51800,
+ 24700
},
};
@@ -982,54 +986,55 @@ static struct dibx000_agc_config stk7700p_7000p_mt2060_agc_config = {
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
* P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */
- (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), // setup
+ (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
+ | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0),
- 712, // inv_gain
- 41, // time_stabiliz
+ 712,
+ 41,
- 0, // alpha_level
- 118, // thlock
+ 0,
+ 118,
- 0, // wbd_inv
- 4095, // wbd_ref
- 0, // wbd_sel
- 0, // wbd_alpha
+ 0,
+ 4095,
+ 0,
+ 0,
- 42598, // agc1_max
- 16384, // agc1_min
- 42598, // agc2_max
- 0, // agc2_min
+ 42598,
+ 16384,
+ 42598,
+ 0,
- 0, // agc1_pt1
- 137, // agc1_pt2
- 255, // agc1_pt3
+ 0,
+ 137,
+ 255,
- 0, // agc1_slope1
- 255, // agc1_slope2
+ 0,
+ 255,
- 0, // agc2_pt1
- 0, // agc2_pt2
+ 0,
+ 0,
- 0, // agc2_slope1
- 41, // agc2_slope2
+ 0,
+ 41,
- 15, // alpha_mant
- 25, // alpha_exp
+ 15,
+ 25,
- 28, // beta_mant
- 48, // beta_exp
+ 28,
+ 48,
- 0, // perform_agc_softsplit
+ 0,
};
static struct dibx000_bandwidth_config stk7700p_pll_config = {
- 60000, 30000, // internal, sampling
- 1, 8, 3, 1, 0, // pll_cfg: prediv, ratio, range, reset, bypass
- 0, 0, 1, 1, 0, // misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, modulo
- (3 << 14) | (1 << 12) | (524 << 0), // sad_cfg: refsel, sel, freq_15k
- 60258167, // ifreq
- 20452225, // timf
- 30000000, // xtal
+ 60000, 30000,
+ 1, 8, 3, 1, 0,
+ 0, 0, 1, 1, 0,
+ (3 << 14) | (1 << 12) | (524 << 0),
+ 60258167,
+ 20452225,
+ 30000000,
};
static struct dib7000m_config stk7700p_dib7000m_config = {
@@ -1115,41 +1120,42 @@ static struct dibx000_agc_config dib7070_agc_config = {
BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND,
/* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0,
* P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
- (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), // setup
-
- 600, // inv_gain
- 10, // time_stabiliz
-
- 0, // alpha_level
- 118, // thlock
-
- 0, // wbd_inv
- 3530, // wbd_ref
- 1, // wbd_sel
- 5, // wbd_alpha
-
- 65535, // agc1_max
- 0, // agc1_min
-
- 65535, // agc2_max
- 0, // agc2_min
-
- 0, // agc1_pt1
- 40, // agc1_pt2
- 183, // agc1_pt3
- 206, // agc1_slope1
- 255, // agc1_slope2
- 72, // agc2_pt1
- 152, // agc2_pt2
- 88, // agc2_slope1
- 90, // agc2_slope2
-
- 17, // alpha_mant
- 27, // alpha_exp
- 23, // beta_mant
- 51, // beta_exp
-
- 0, // perform_agc_softsplit
+ (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
+ | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
+
+ 600,
+ 10,
+
+ 0,
+ 118,
+
+ 0,
+ 3530,
+ 1,
+ 5,
+
+ 65535,
+ 0,
+
+ 65535,
+ 0,
+
+ 0,
+ 40,
+ 183,
+ 206,
+ 255,
+ 72,
+ 152,
+ 88,
+ 90,
+
+ 17,
+ 27,
+ 23,
+ 51,
+
+ 0,
};
static int dib7070_tuner_reset(struct dvb_frontend *fe, int onoff)
@@ -1276,13 +1282,13 @@ static int stk70x0p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
}
static struct dibx000_bandwidth_config dib7070_bw_config_12_mhz = {
- 60000, 15000, // internal, sampling
- 1, 20, 3, 1, 0, // pll_cfg: prediv, ratio, range, reset, bypass
- 0, 0, 1, 1, 2, // misc: refdiv, bypclk_div, IO_CLK_en_core, ADClkSrc, modulo
- (3 << 14) | (1 << 12) | (524 << 0), // sad_cfg: refsel, sel, freq_15k
- (0 << 25) | 0, // ifreq = 0.000000 MHz
- 20452225, // timf
- 12000000, // xtal_hz
+ 60000, 15000,
+ 1, 20, 3, 1, 0,
+ 0, 0, 1, 1, 2,
+ (3 << 14) | (1 << 12) | (524 << 0),
+ (0 << 25) | 0,
+ 20452225,
+ 12000000,
};
static struct dib7000p_config dib7070p_dib7000p_config = {
@@ -1476,12 +1482,12 @@ static struct dib8000_config dib807x_dib8000_config[2] = {
}
};
-static int dib807x_tuner_reset(struct dvb_frontend *fe, int onoff)
+static int dib80xx_tuner_reset(struct dvb_frontend *fe, int onoff)
{
return dib8000_set_gpio(fe, 5, 0, !onoff);
}
-static int dib807x_tuner_sleep(struct dvb_frontend *fe, int onoff)
+static int dib80xx_tuner_sleep(struct dvb_frontend *fe, int onoff)
{
return dib8000_set_gpio(fe, 0, 0, onoff);
}
@@ -1494,8 +1500,8 @@ static const struct dib0070_wbd_gain_cfg dib8070_wbd_gain_cfg[] = {
static struct dib0070_config dib807x_dib0070_config[2] = {
{
.i2c_address = DEFAULT_DIB0070_I2C_ADDRESS,
- .reset = dib807x_tuner_reset,
- .sleep = dib807x_tuner_sleep,
+ .reset = dib80xx_tuner_reset,
+ .sleep = dib80xx_tuner_sleep,
.clock_khz = 12000,
.clock_pad_drive = 4,
.vga_filter = 1,
@@ -1508,8 +1514,8 @@ static struct dib0070_config dib807x_dib0070_config[2] = {
.freq_offset_khz_vhf = -100,
}, {
.i2c_address = DEFAULT_DIB0070_I2C_ADDRESS,
- .reset = dib807x_tuner_reset,
- .sleep = dib807x_tuner_sleep,
+ .reset = dib80xx_tuner_reset,
+ .sleep = dib80xx_tuner_sleep,
.clock_khz = 12000,
.clock_pad_drive = 2,
.vga_filter = 1,
@@ -1566,12 +1572,14 @@ static int dib807x_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int stk807x_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
+static int stk80xx_pid_filter(struct dvb_usb_adapter *adapter, int index,
+ u16 pid, int onoff)
{
return dib8000_pid_filter(adapter->fe, index, pid, onoff);
}
-static int stk807x_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
+static int stk80xx_pid_filter_ctrl(struct dvb_usb_adapter *adapter,
+ int onoff)
{
return dib8000_pid_filter_ctrl(adapter->fe, onoff);
}
@@ -1624,7 +1632,7 @@ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap)
dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
/* initialize IC 0 */
- dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x80);
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x80);
adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80,
&dib807x_dib8000_config[0]);
@@ -1635,7 +1643,7 @@ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap)
static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
{
/* initialize IC 1 */
- dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x82);
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x82);
adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x82,
&dib807x_dib8000_config[1]);
@@ -1643,6 +1651,245 @@ static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap)
return adap->fe == NULL ? -ENODEV : 0;
}
+/* STK8096GP */
+struct dibx000_agc_config dib8090_agc_config[2] = {
+ {
+ BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND,
+ /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1,
+ * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
+ * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
+ (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
+ | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
+
+ 787,
+ 10,
+
+ 0,
+ 118,
+
+ 0,
+ 3530,
+ 1,
+ 5,
+
+ 65535,
+ 0,
+
+ 65535,
+ 0,
+
+ 0,
+ 32,
+ 114,
+ 143,
+ 144,
+ 114,
+ 227,
+ 116,
+ 117,
+
+ 28,
+ 26,
+ 31,
+ 51,
+
+ 0,
+ },
+ {
+ BAND_CBAND,
+ /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1,
+ * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0,
+ * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */
+ (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8)
+ | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0),
+
+ 787,
+ 10,
+
+ 0,
+ 118,
+
+ 0,
+ 3530,
+ 1,
+ 5,
+
+ 0,
+ 0,
+
+ 65535,
+ 0,
+
+ 0,
+ 32,
+ 114,
+ 143,
+ 144,
+ 114,
+ 227,
+ 116,
+ 117,
+
+ 28,
+ 26,
+ 31,
+ 51,
+
+ 0,
+ }
+};
+
+static struct dibx000_bandwidth_config dib8090_pll_config_12mhz = {
+ 54000, 13500,
+ 1, 18, 3, 1, 0,
+ 0, 0, 1, 1, 2,
+ (3 << 14) | (1 << 12) | (599 << 0),
+ (0 << 25) | 0,
+ 20199727,
+ 12000000,
+};
+
+static int dib8090_get_adc_power(struct dvb_frontend *fe)
+{
+ return dib8000_get_adc_power(fe, 1);
+}
+
+static struct dib8000_config dib809x_dib8000_config = {
+ .output_mpeg2_in_188_bytes = 1,
+
+ .agc_config_count = 2,
+ .agc = dib8090_agc_config,
+ .agc_control = dib0090_dcc_freq,
+ .pll = &dib8090_pll_config_12mhz,
+ .tuner_is_baseband = 1,
+
+ .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB8000_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS,
+
+ .hostbus_diversity = 1,
+ .div_cfg = 0x31,
+ .output_mode = OUTMODE_MPEG2_FIFO,
+ .drives = 0x2d98,
+ .diversity_delay = 144,
+ .refclksel = 3,
+};
+
+static struct dib0090_config dib809x_dib0090_config = {
+ .io.pll_bypass = 1,
+ .io.pll_range = 1,
+ .io.pll_prediv = 1,
+ .io.pll_loopdiv = 20,
+ .io.adc_clock_ratio = 8,
+ .io.pll_int_loop_filt = 0,
+ .io.clock_khz = 12000,
+ .reset = dib80xx_tuner_reset,
+ .sleep = dib80xx_tuner_sleep,
+ .clkouttobamse = 1,
+ .analog_output = 1,
+ .i2c_address = DEFAULT_DIB0090_I2C_ADDRESS,
+ .wbd_vhf_offset = 100,
+ .wbd_cband_offset = 450,
+ .use_pwm_agc = 1,
+ .clkoutdrive = 1,
+ .get_adc_power = dib8090_get_adc_power,
+ .freq_offset_khz_uhf = 0,
+ .freq_offset_khz_vhf = -143,
+};
+
+static int dib8096_set_param_override(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *fep)
+{
+ struct dvb_usb_adapter *adap = fe->dvb->priv;
+ struct dib0700_adapter_state *state = adap->priv;
+ u8 band = BAND_OF_FREQUENCY(fep->frequency/1000);
+ u16 offset;
+ int ret = 0;
+ enum frontend_tune_state tune_state = CT_SHUTDOWN;
+ u16 ltgain, rf_gain_limit;
+
+ ret = state->set_param_save(fe, fep);
+ if (ret < 0)
+ return ret;
+
+ switch (band) {
+ case BAND_VHF:
+ offset = 100;
+ break;
+ case BAND_UHF:
+ offset = 550;
+ break;
+ default:
+ offset = 0;
+ break;
+ }
+ offset += (dib0090_get_wbd_offset(fe) * 8 * 18 / 33 + 1) / 2;
+ dib8000_set_wbd_ref(fe, offset);
+
+
+ if (band == BAND_CBAND) {
+ deb_info("tuning in CBAND - soft-AGC startup\n");
+ /* TODO specific wbd target for dib0090 - needed for startup ? */
+ dib0090_set_tune_state(fe, CT_AGC_START);
+ do {
+ ret = dib0090_gain_control(fe);
+ msleep(ret);
+ tune_state = dib0090_get_tune_state(fe);
+ if (tune_state == CT_AGC_STEP_0)
+ dib8000_set_gpio(fe, 6, 0, 1);
+ else if (tune_state == CT_AGC_STEP_1) {
+ dib0090_get_current_gain(fe, NULL, NULL, &rf_gain_limit, &ltgain);
+ if (rf_gain_limit == 0)
+ dib8000_set_gpio(fe, 6, 0, 0);
+ }
+ } while (tune_state < CT_AGC_STOP);
+ dib0090_pwm_gain_reset(fe);
+ dib8000_pwm_agc_reset(fe);
+ dib8000_set_tune_state(fe, CT_DEMOD_START);
+ } else {
+ deb_info("not tuning in CBAND - standard AGC startup\n");
+ dib0090_pwm_gain_reset(fe);
+ }
+
+ return 0;
+}
+
+static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_adapter_state *st = adap->priv;
+ struct i2c_adapter *tun_i2c = dib8000_get_i2c_master(adap->fe, DIBX000_I2C_INTERFACE_TUNER, 1);
+
+ if (dvb_attach(dib0090_register, adap->fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+ return -ENODEV;
+
+ st->set_param_save = adap->fe->ops.tuner_ops.set_params;
+ adap->fe->ops.tuner_ops.set_params = dib8096_set_param_override;
+ return 0;
+}
+
+static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(10);
+ dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
+ dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
+
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+
+ msleep(10);
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+ msleep(10);
+ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
+
+ dib8000_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80);
+
+ adap->fe = dvb_attach(dib8000_attach, &adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config);
+
+ return adap->fe == NULL ? -ENODEV : 0;
+}
/* STK7070PD */
static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
@@ -1929,14 +2176,17 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_STK7700D) },
/* 55 */{ USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_STK7700D_2) },
{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73A) },
- { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
- { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
+ { USB_DEVICE(USB_VID_PCTV, USB_PID_PINNACLE_PCTV73ESE) },
+ { USB_DEVICE(USB_VID_PCTV, USB_PID_PINNACLE_PCTV282E) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK7770P) },
/* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) },
{ USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) },
{ USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) },
+/* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) },
+ { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) },
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK8096GP) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -2238,11 +2488,11 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{ NULL },
},
{ "Pinnacle PCTV 73e SE",
- { &dib0700_usb_id_table[57], NULL },
+ { &dib0700_usb_id_table[57], &dib0700_usb_id_table[65], NULL },
{ NULL },
},
{ "Pinnacle PCTV 282e",
- { &dib0700_usb_id_table[58], NULL },
+ { &dib0700_usb_id_table[58], &dib0700_usb_id_table[66], NULL },
{ NULL },
},
},
@@ -2471,8 +2721,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
- .pid_filter = stk807x_pid_filter,
- .pid_filter_ctrl = stk807x_pid_filter_ctrl,
+ .pid_filter = stk80xx_pid_filter,
+ .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
.frontend_attach = stk807x_frontend_attach,
.tuner_attach = dib807x_tuner_attach,
@@ -2510,8 +2760,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
- .pid_filter = stk807x_pid_filter,
- .pid_filter_ctrl = stk807x_pid_filter_ctrl,
+ .pid_filter = stk80xx_pid_filter,
+ .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
.frontend_attach = stk807xpvr_frontend_attach0,
.tuner_attach = dib807x_tuner_attach,
@@ -2523,8 +2773,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
- .pid_filter = stk807x_pid_filter,
- .pid_filter_ctrl = stk807x_pid_filter_ctrl,
+ .pid_filter = stk80xx_pid_filter,
+ .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
.frontend_attach = stk807xpvr_frontend_attach1,
.tuner_attach = dib807x_tuner_attach,
@@ -2547,6 +2797,37 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.rc_key_map = dib0700_rc_keys,
.rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
.rc_query = dib0700_rc_query
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .caps = DVB_USB_ADAP_HAS_PID_FILTER |
+ DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+ .pid_filter_count = 32,
+ .pid_filter = stk80xx_pid_filter,
+ .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+ .frontend_attach = stk809x_frontend_attach,
+ .tuner_attach = dib809x_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ .size_of_priv =
+ sizeof(struct dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DiBcom STK8096GP reference design",
+ { &dib0700_usb_id_table[67], NULL },
+ { NULL },
+ },
+ },
+
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_key_map = dib0700_rc_keys,
+ .rc_key_map_size = ARRAY_SIZE(dib0700_rc_keys),
+ .rc_query = dib0700_rc_query
},
};
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index da34979b533..9143b5631e8 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -142,8 +142,13 @@ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
} else if ((msg[i].flags & I2C_M_RD) == 0) {
if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0)
break;
- } else
- break;
+ } else if (msg[i].addr != 0x50) {
+ /* 0x50 is the address of the eeprom - we need to protect it
+ * from dibusb's bad i2c implementation: reads without
+ * writing the offset before are forbidden */
+ if (dibusb_i2c_msg(d, msg[i].addr, NULL, 0, msg[i].buf, msg[i].len) < 0)
+ break;
+ }
}
mutex_unlock(&d->i2c_mutex);
@@ -243,6 +248,12 @@ static struct dib3000mc_config mod3000p_dib3000p_config = {
int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
{
+ if (adap->dev->udev->descriptor.idVendor == USB_VID_LITEON &&
+ adap->dev->udev->descriptor.idProduct ==
+ USB_PID_LITEON_DVB_T_WARM) {
+ msleep(1000);
+ }
+
if ((adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000P_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL ||
(adap->fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, DEFAULT_DIB3000MC_I2C_ADDRESS, &mod3000p_dib3000p_config)) != NULL) {
if (adap->priv != NULL) {
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index f1602d4ace6..bc3581d58ce 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -47,6 +47,7 @@
#define USB_VID_MSI_2 0x1462
#define USB_VID_OPERA1 0x695c
#define USB_VID_PINNACLE 0x2304
+#define USB_VID_PCTV 0x2013
#define USB_VID_PIXELVIEW 0x1554
#define USB_VID_TECHNOTREND 0x0b48
#define USB_VID_TERRATEC 0x0ccd
@@ -101,6 +102,7 @@
#define USB_PID_DIBCOM_STK7070PD 0x1ebe
#define USB_PID_DIBCOM_STK807XP 0x1f90
#define USB_PID_DIBCOM_STK807XPVR 0x1f98
+#define USB_PID_DIBCOM_STK8096GP 0x1fa0
#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131
#define USB_PID_DIBCOM_STK7770P 0x1e80
#define USB_PID_DPOSH_M9206_COLD 0x9206
@@ -211,6 +213,7 @@
#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
#define USB_PID_PINNACLE_PCTV73A 0x0243
#define USB_PID_PINNACLE_PCTV73ESE 0x0245
+#define USB_PID_PINNACLE_PCTV74E 0x0246
#define USB_PID_PINNACLE_PCTV282E 0x0248
#define USB_PID_PIXELVIEW_SBTVD 0x5010
#define USB_PID_PCTV_200E 0x020e
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index 5bb9479d154..64132c0cf80 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -20,6 +20,11 @@
#include "tda1002x.h"
#include "mt312.h"
#include "zl10039.h"
+#include "ds3000.h"
+#include "stv0900.h"
+#include "stv6110.h"
+#include "stb6100.h"
+#include "stb6100_proc.h"
#ifndef USB_PID_DW2102
#define USB_PID_DW2102 0x2102
@@ -37,12 +42,20 @@
#define USB_PID_CINERGY_S 0x0064
#endif
+#ifndef USB_PID_TEVII_S630
+#define USB_PID_TEVII_S630 0xd630
+#endif
+
#ifndef USB_PID_TEVII_S650
#define USB_PID_TEVII_S650 0xd650
#endif
-#ifndef USB_PID_TEVII_S630
-#define USB_PID_TEVII_S630 0xd630
+#ifndef USB_PID_TEVII_S660
+#define USB_PID_TEVII_S660 0xd660
+#endif
+
+#ifndef USB_PID_PROF_1100
+#define USB_PID_PROF_1100 0xb012
#endif
#define DW210X_READ_MSG 0
@@ -55,6 +68,10 @@
#define DW2102_VOLTAGE_CTRL (0x1800)
#define DW2102_RC_QUERY (0x1a00)
+#define err_str "did not find the firmware file. (%s) " \
+ "Please see linux/Documentation/dvb/ for more details " \
+ "on firmware-problems."
+
struct dvb_usb_rc_keys_table {
struct dvb_usb_rc_key *rc_keys;
int rc_keys_size;
@@ -71,6 +88,12 @@ static int ir_keymap;
module_param_named(keymap, ir_keymap, int, 0644);
MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ...");
+/* demod probe */
+static int demod_probe = 1;
+module_param_named(demod, demod_probe, int, 0644);
+MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 "
+ "4=stv0903+stb6100(or-able)).");
+
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value,
@@ -183,7 +206,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
switch (num) {
case 2:
/* read si2109 register by number */
- buf6[0] = 0xd0;
+ buf6[0] = msg[0].addr << 1;
buf6[1] = msg[0].len;
buf6[2] = msg[0].buf[0];
ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
@@ -198,7 +221,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
switch (msg[0].addr) {
case 0x68:
/* write to si2109 register */
- buf6[0] = 0xd0;
+ buf6[0] = msg[0].addr << 1;
buf6[1] = msg[0].len;
memcpy(buf6 + 2, msg[0].buf, msg[0].len);
ret = dw210x_op_rw(d->udev, 0xc2, 0, 0, buf6,
@@ -239,7 +262,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
/* read */
/* first write first register number */
u8 ibuf[msg[1].len + 2], obuf[3];
- obuf[0] = 0xd0;
+ obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
obuf[2] = msg[0].buf[0];
ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
@@ -256,7 +279,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
case 0x68: {
/* write to register */
u8 obuf[msg[0].len + 2];
- obuf[0] = 0xd0;
+ obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
@@ -266,7 +289,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
case 0x61: {
/* write to tuner */
u8 obuf[msg[0].len + 2];
- obuf[0] = 0xc2;
+ obuf[0] = msg[0].addr << 1;
obuf[1] = msg[0].len;
memcpy(obuf + 2, msg[0].buf, msg[0].len);
ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
@@ -301,78 +324,78 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int ret = 0;
- int len, i;
+ int len, i, j;
if (!d)
return -ENODEV;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
- switch (num) {
- case 2: {
- /* read */
- /* first write first register number */
- u8 ibuf[msg[1].len + 2], obuf[3];
- obuf[0] = 0xaa;
- obuf[1] = msg[0].len;
- obuf[2] = msg[0].buf[0];
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, msg[0].len + 2, DW210X_WRITE_MSG);
- /* second read registers */
- ret = dw210x_op_rw(d->udev, 0xc3, 0xab , 0,
- ibuf, msg[1].len + 2, DW210X_READ_MSG);
- memcpy(msg[1].buf, ibuf + 2, msg[1].len);
-
- break;
- }
- case 1:
- switch (msg[0].addr) {
- case 0x55: {
- if (msg[0].buf[0] == 0xf7) {
- /* firmware */
- /* Write in small blocks */
- u8 obuf[19];
- obuf[0] = 0xaa;
- obuf[1] = 0x11;
- obuf[2] = 0xf7;
- len = msg[0].len - 1;
- i = 1;
- do {
- memcpy(obuf + 3, msg[0].buf + i, (len > 16 ? 16 : len));
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, (len > 16 ? 16 : len) + 3, DW210X_WRITE_MSG);
- i += 16;
- len -= 16;
- } while (len > 0);
- } else {
- /* write to register */
- u8 obuf[msg[0].len + 2];
- obuf[0] = 0xaa;
- obuf[1] = msg[0].len;
- memcpy(obuf + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
- obuf, msg[0].len + 2, DW210X_WRITE_MSG);
- }
- break;
- }
+ for (j = 0; j < num; j++) {
+ switch (msg[j].addr) {
case(DW2102_RC_QUERY): {
u8 ibuf[2];
ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 2, DW210X_READ_MSG);
- memcpy(msg[0].buf, ibuf , 2);
+ memcpy(msg[j].buf, ibuf , 2);
break;
}
case(DW2102_VOLTAGE_CTRL): {
u8 obuf[2];
obuf[0] = 0x30;
- obuf[1] = msg[0].buf[0];
+ obuf[1] = msg[j].buf[0];
ret = dw210x_op_rw(d->udev, 0xb2, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
+ /*case 0x55: cx24116
+ case 0x6a: stv0903
+ case 0x68: ds3000, stv0903
+ case 0x60: ts2020, stv6110, stb6100 */
+ default: {
+ if (msg[j].flags == I2C_M_RD) {
+ /* read registers */
+ u8 ibuf[msg[j].len + 2];
+ ret = dw210x_op_rw(d->udev, 0xc3,
+ (msg[j].addr << 1) + 1, 0,
+ ibuf, msg[j].len + 2,
+ DW210X_READ_MSG);
+ memcpy(msg[j].buf, ibuf + 2, msg[j].len);
+ mdelay(10);
+ } else if (((msg[j].buf[0] == 0xb0) &&
+ (msg[j].addr == 0x68)) ||
+ ((msg[j].buf[0] == 0xf7) &&
+ (msg[j].addr == 0x55))) {
+ /* write firmware */
+ u8 obuf[19];
+ obuf[0] = msg[j].addr << 1;
+ obuf[1] = (msg[j].len > 15 ? 17 : msg[j].len);
+ obuf[2] = msg[j].buf[0];
+ len = msg[j].len - 1;
+ i = 1;
+ do {
+ memcpy(obuf + 3, msg[j].buf + i,
+ (len > 16 ? 16 : len));
+ ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ obuf, (len > 16 ? 16 : len) + 3,
+ DW210X_WRITE_MSG);
+ i += 16;
+ len -= 16;
+ } while (len > 0);
+ } else {
+ /* write registers */
+ u8 obuf[msg[j].len + 2];
+ obuf[0] = msg[j].addr << 1;
+ obuf[1] = msg[j].len;
+ memcpy(obuf + 2, msg[j].buf, msg[j].len);
+ ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
+ obuf, msg[j].len + 2,
+ DW210X_WRITE_MSG);
+ }
+ break;
+ }
}
- break;
}
mutex_unlock(&d->i2c_mutex);
@@ -442,63 +465,85 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
return num;
}
-static int s630_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int ret = 0;
+ int len, i, j;
if (!d)
return -ENODEV;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
- switch (num) {
- case 2: { /* read */
- u8 ibuf[msg[1].len], obuf[3];
- obuf[0] = msg[1].len;
- obuf[1] = (msg[0].addr << 1);
- obuf[2] = msg[0].buf[0];
-
- ret = dw210x_op_rw(d->udev, 0x90, 0, 0,
- obuf, 3, DW210X_WRITE_MSG);
- msleep(5);
- ret = dw210x_op_rw(d->udev, 0x91, 0, 0,
- ibuf, msg[1].len, DW210X_READ_MSG);
- memcpy(msg[1].buf, ibuf, msg[1].len);
- break;
- }
- case 1:
- switch (msg[0].addr) {
- case 0x60:
- case 0x0e: {
- /* write to zl10313, zl10039 register, */
- u8 obuf[msg[0].len + 2];
- obuf[0] = msg[0].len + 1;
- obuf[1] = (msg[0].addr << 1);
- memcpy(obuf + 2, msg[0].buf, msg[0].len);
- ret = dw210x_op_rw(d->udev, 0x80, 0, 0,
- obuf, msg[0].len + 2, DW210X_WRITE_MSG);
- break;
- }
+ for (j = 0; j < num; j++) {
+ switch (msg[j].addr) {
case (DW2102_RC_QUERY): {
u8 ibuf[4];
ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
ibuf, 4, DW210X_READ_MSG);
- msg[0].buf[0] = ibuf[3];
+ memcpy(msg[j].buf, ibuf + 1, 2);
break;
}
case (DW2102_VOLTAGE_CTRL): {
u8 obuf[2];
- obuf[0] = 0x03;
- obuf[1] = msg[0].buf[0];
+ obuf[0] = 3;
+ obuf[1] = msg[j].buf[0];
ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
}
+ /*case 0x55: cx24116
+ case 0x6a: stv0903
+ case 0x68: ds3000, stv0903
+ case 0x60: ts2020, stv6110, stb6100
+ case 0xa0: eeprom */
+ default: {
+ if (msg[j].flags == I2C_M_RD) {
+ /* read registers */
+ u8 ibuf[msg[j].len];
+ ret = dw210x_op_rw(d->udev, 0x91, 0, 0,
+ ibuf, msg[j].len,
+ DW210X_READ_MSG);
+ memcpy(msg[j].buf, ibuf, msg[j].len);
+ break;
+ } else if ((msg[j].buf[0] == 0xb0) &&
+ (msg[j].addr == 0x68)) {
+ /* write firmware */
+ u8 obuf[19];
+ obuf[0] = (msg[j].len > 16 ?
+ 18 : msg[j].len + 1);
+ obuf[1] = msg[j].addr << 1;
+ obuf[2] = msg[j].buf[0];
+ len = msg[j].len - 1;
+ i = 1;
+ do {
+ memcpy(obuf + 3, msg[j].buf + i,
+ (len > 16 ? 16 : len));
+ ret = dw210x_op_rw(d->udev, 0x80, 0, 0,
+ obuf, (len > 16 ? 16 : len) + 3,
+ DW210X_WRITE_MSG);
+ i += 16;
+ len -= 16;
+ } while (len > 0);
+ } else {
+ /* write registers */
+ u8 obuf[msg[j].len + 2];
+ obuf[0] = msg[j].len + 1;
+ obuf[1] = (msg[j].addr << 1);
+ memcpy(obuf + 2, msg[j].buf, msg[j].len);
+ ret = dw210x_op_rw(d->udev,
+ (num > 1 ? 0x90 : 0x80), 0, 0,
+ obuf, msg[j].len + 2,
+ DW210X_WRITE_MSG);
+ break;
+ }
+ break;
+ }
}
- break;
+ msleep(3);
}
mutex_unlock(&d->i2c_mutex);
@@ -535,8 +580,8 @@ static struct i2c_algorithm dw3101_i2c_algo = {
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm s630_i2c_algo = {
- .master_xfer = s630_i2c_transfer,
+static struct i2c_algorithm s6x0_i2c_algo = {
+ .master_xfer = s6x0_i2c_transfer,
.functionality = dw210x_i2c_func,
};
@@ -564,25 +609,34 @@ static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
return 0;
};
-static int s630_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
+static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
{
int i, ret;
- u8 buf[3], eeprom[256], eepromline[16];
+ u8 ibuf[] = { 0 }, obuf[] = { 0 };
+ u8 eeprom[256], eepromline[16];
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0xa0 >> 1,
+ .flags = 0,
+ .buf = obuf,
+ .len = 1,
+ }, {
+ .addr = 0xa0 >> 1,
+ .flags = I2C_M_RD,
+ .buf = ibuf,
+ .len = 1,
+ }
+ };
for (i = 0; i < 256; i++) {
- buf[0] = 1;
- buf[1] = 0xa0;
- buf[2] = i;
- ret = dw210x_op_rw(d->udev, 0x90, 0, 0,
- buf, 3, DW210X_WRITE_MSG);
- ret = dw210x_op_rw(d->udev, 0x91, 0, 0,
- buf, 1, DW210X_READ_MSG);
- if (ret < 0) {
+ obuf[0] = i;
+ ret = s6x0_i2c_transfer(&d->i2c_adap, msg, 2);
+ if (ret != 2) {
err("read eeprom failed.");
return -1;
} else {
- eepromline[i % 16] = buf[0];
- eeprom[i] = buf[0];
+ eepromline[i % 16] = ibuf[0];
+ eeprom[i] = ibuf[0];
}
if ((i % 16) == 15) {
@@ -644,19 +698,104 @@ static struct mt312_config zl313_config = {
.demod_address = 0x0e,
};
+static struct ds3000_config dw2104_ds3000_config = {
+ .demod_address = 0x68,
+};
+
+static struct stv0900_config dw2104a_stv0900_config = {
+ .demod_address = 0x6a,
+ .demod_mode = 0,
+ .xtal = 27000000,
+ .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */
+ .diseqc_mode = 2,/* 2/3 PWM */
+ .tun1_maddress = 0,/* 0x60 */
+ .tun1_adc = 0,/* 2 Vpp */
+ .path1_mode = 3,
+};
+
+static struct stb6100_config dw2104a_stb6100_config = {
+ .tuner_address = 0x60,
+ .refclock = 27000000,
+};
+
+static struct stv0900_config dw2104_stv0900_config = {
+ .demod_address = 0x68,
+ .demod_mode = 0,
+ .xtal = 8000000,
+ .clkmode = 3,
+ .diseqc_mode = 2,
+ .tun1_maddress = 0,
+ .tun1_adc = 1,/* 1 Vpp */
+ .path1_mode = 3,
+};
+
+static struct stv6110_config dw2104_stv6110_config = {
+ .i2c_address = 0x60,
+ .mclk = 16000000,
+ .clk_div = 1,
+};
+
static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
{
- if ((d->fe = dvb_attach(cx24116_attach, &dw2104_config,
- &d->dev->i2c_adap)) != NULL) {
+ struct dvb_tuner_ops *tuner_ops = NULL;
+
+ if (demod_probe & 4) {
+ d->fe = dvb_attach(stv0900_attach, &dw2104a_stv0900_config,
+ &d->dev->i2c_adap, 0);
+ if (d->fe != NULL) {
+ if (dvb_attach(stb6100_attach, d->fe,
+ &dw2104a_stb6100_config,
+ &d->dev->i2c_adap)) {
+ tuner_ops = &d->fe->ops.tuner_ops;
+ tuner_ops->set_frequency = stb6100_set_freq;
+ tuner_ops->get_frequency = stb6100_get_freq;
+ tuner_ops->set_bandwidth = stb6100_set_bandw;
+ tuner_ops->get_bandwidth = stb6100_get_bandw;
+ d->fe->ops.set_voltage = dw210x_set_voltage;
+ info("Attached STV0900+STB6100!\n");
+ return 0;
+ }
+ }
+ }
+
+ if (demod_probe & 2) {
+ d->fe = dvb_attach(stv0900_attach, &dw2104_stv0900_config,
+ &d->dev->i2c_adap, 0);
+ if (d->fe != NULL) {
+ if (dvb_attach(stv6110_attach, d->fe,
+ &dw2104_stv6110_config,
+ &d->dev->i2c_adap)) {
+ d->fe->ops.set_voltage = dw210x_set_voltage;
+ info("Attached STV0900+STV6110A!\n");
+ return 0;
+ }
+ }
+ }
+
+ if (demod_probe & 1) {
+ d->fe = dvb_attach(cx24116_attach, &dw2104_config,
+ &d->dev->i2c_adap);
+ if (d->fe != NULL) {
+ d->fe->ops.set_voltage = dw210x_set_voltage;
+ info("Attached cx24116!\n");
+ return 0;
+ }
+ }
+
+ d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
+ &d->dev->i2c_adap);
+ if (d->fe != NULL) {
d->fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached cx24116!\n");
+ info("Attached DS3000!\n");
return 0;
}
+
return -EIO;
}
static struct dvb_usb_device_properties dw2102_properties;
static struct dvb_usb_device_properties dw2104_properties;
+static struct dvb_usb_device_properties s6x0_properties;
static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
{
@@ -670,14 +809,17 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
return 0;
}
}
+
if (dw2102_properties.i2c_algo == &dw2102_earda_i2c_algo) {
- /*dw2102_properties.adapter->tuner_attach = dw2102_tuner_attach;*/
d->fe = dvb_attach(stv0288_attach, &earda_config,
&d->dev->i2c_adap);
if (d->fe != NULL) {
- d->fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached stv0288!\n");
- return 0;
+ if (dvb_attach(stb6000_attach, d->fe, 0x61,
+ &d->dev->i2c_adap)) {
+ d->fe->ops.set_voltage = dw210x_set_voltage;
+ info("Attached stv0288!\n");
+ return 0;
+ }
}
}
@@ -705,15 +847,38 @@ static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
-static int s630_frontend_attach(struct dvb_usb_adapter *d)
+static int s6x0_frontend_attach(struct dvb_usb_adapter *d)
{
d->fe = dvb_attach(mt312_attach, &zl313_config,
- &d->dev->i2c_adap);
+ &d->dev->i2c_adap);
+ if (d->fe != NULL) {
+ if (dvb_attach(zl10039_attach, d->fe, 0x60,
+ &d->dev->i2c_adap)) {
+ d->fe->ops.set_voltage = dw210x_set_voltage;
+ info("Attached zl100313+zl10039!\n");
+ return 0;
+ }
+ }
+
+ d->fe = dvb_attach(stv0288_attach, &earda_config,
+ &d->dev->i2c_adap);
+ if (d->fe != NULL) {
+ if (dvb_attach(stb6000_attach, d->fe, 0x61,
+ &d->dev->i2c_adap)) {
+ d->fe->ops.set_voltage = dw210x_set_voltage;
+ info("Attached stv0288+stb6000!\n");
+ return 0;
+ }
+ }
+
+ d->fe = dvb_attach(ds3000_attach, &dw2104_ds3000_config,
+ &d->dev->i2c_adap);
if (d->fe != NULL) {
d->fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached zl10313!\n");
+ info("Attached ds3000+ds2020!\n");
return 0;
}
+
return -EIO;
}
@@ -724,14 +889,6 @@ static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int dw2102_earda_tuner_attach(struct dvb_usb_adapter *adap)
-{
- dvb_attach(stb6000_attach, adap->fe, 0x61,
- &adap->dev->i2c_adap);
-
- return 0;
-}
-
static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe, 0x60,
@@ -740,14 +897,6 @@ static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
-static int s630_zl10039_tuner_attach(struct dvb_usb_adapter *adap)
-{
- dvb_attach(zl10039_attach, adap->fe, 0x60,
- &adap->dev->i2c_adap);
-
- return 0;
-}
-
static struct dvb_usb_rc_key dw210x_rc_keys[] = {
{ 0xf80a, KEY_Q }, /*power*/
{ 0xf80c, KEY_M }, /*mute*/
@@ -922,6 +1071,8 @@ static struct usb_device_id dw2102_table[] = {
{USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
{USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)},
{USB_DEVICE(0x9022, USB_PID_TEVII_S630)},
+ {USB_DEVICE(0x3011, USB_PID_PROF_1100)},
+ {USB_DEVICE(0x9022, USB_PID_TEVII_S660)},
{ }
};
@@ -935,15 +1086,13 @@ static int dw2102_load_firmware(struct usb_device *dev,
u8 reset;
u8 reset16[] = {0, 0, 0, 0, 0, 0, 0};
const struct firmware *fw;
- const char *filename = "dvb-usb-dw2101.fw";
+ const char *fw_2101 = "dvb-usb-dw2101.fw";
switch (dev->descriptor.idProduct) {
case 0x2101:
- ret = request_firmware(&fw, filename, &dev->dev);
+ ret = request_firmware(&fw, fw_2101, &dev->dev);
if (ret != 0) {
- err("did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details "
- "on firmware-problems.", filename);
+ err(err_str, fw_2101);
return ret;
}
break;
@@ -983,6 +1132,11 @@ static int dw2102_load_firmware(struct usb_device *dev,
}
/* init registers */
switch (dev->descriptor.idProduct) {
+ case USB_PID_PROF_1100:
+ s6x0_properties.rc_key_map = tbs_rc_keys;
+ s6x0_properties.rc_key_map_size =
+ ARRAY_SIZE(tbs_rc_keys);
+ break;
case USB_PID_TEVII_S650:
dw2104_properties.rc_key_map = tevii_rc_keys;
dw2104_properties.rc_key_map_size =
@@ -1021,7 +1175,6 @@ static int dw2102_load_firmware(struct usb_device *dev,
DW210X_READ_MSG);
if (reset16[2] == 0x11) {
dw2102_properties.i2c_algo = &dw2102_earda_i2c_algo;
- dw2102_properties.adapter->tuner_attach = &dw2102_earda_tuner_attach;
break;
}
}
@@ -1184,13 +1337,13 @@ static struct dvb_usb_device_properties dw3101_properties = {
}
};
-static struct dvb_usb_device_properties s630_properties = {
+static struct dvb_usb_device_properties s6x0_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.firmware = "dvb-usb-s630.fw",
.no_reconnect = 1,
- .i2c_algo = &s630_i2c_algo,
+ .i2c_algo = &s6x0_i2c_algo,
.rc_key_map = tevii_rc_keys,
.rc_key_map_size = ARRAY_SIZE(tevii_rc_keys),
.rc_interval = 150,
@@ -1199,12 +1352,12 @@ static struct dvb_usb_device_properties s630_properties = {
.generic_bulk_ctrl_endpoint = 0x81,
.num_adapters = 1,
.download_firmware = dw2102_load_firmware,
- .read_mac_address = s630_read_mac_address,
+ .read_mac_address = s6x0_read_mac_address,
.adapter = {
{
- .frontend_attach = s630_frontend_attach,
+ .frontend_attach = s6x0_frontend_attach,
.streaming_ctrl = NULL,
- .tuner_attach = s630_zl10039_tuner_attach,
+ .tuner_attach = NULL,
.stream = {
.type = USB_BULK,
.count = 8,
@@ -1217,12 +1370,20 @@ static struct dvb_usb_device_properties s630_properties = {
},
}
},
- .num_device_descs = 1,
+ .num_device_descs = 3,
.devices = {
{"TeVii S630 USB",
{&dw2102_table[6], NULL},
{NULL},
},
+ {"Prof 1100 USB ",
+ {&dw2102_table[7], NULL},
+ {NULL},
+ },
+ {"TeVii S660 USB",
+ {&dw2102_table[8], NULL},
+ {NULL},
+ },
}
};
@@ -1235,10 +1396,10 @@ static int dw2102_probe(struct usb_interface *intf,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &dw3101_properties,
THIS_MODULE, NULL, adapter_nr) ||
- 0 == dvb_usb_device_init(intf, &s630_properties,
- THIS_MODULE, NULL, adapter_nr)) {
+ 0 == dvb_usb_device_init(intf, &s6x0_properties,
+ THIS_MODULE, NULL, adapter_nr))
return 0;
- }
+
return -ENODEV;
}
@@ -1269,6 +1430,7 @@ module_exit(dw2102_module_exit);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
" DVB-C 3101 USB2.0,"
- " TeVii S600, S630, S650 USB2.0 devices");
+ " TeVii S600, S630, S650, S660 USB2.0,"
+ " Prof 1100 USB2.0 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/friio-fe.c b/drivers/media/dvb/dvb-usb/friio-fe.c
index 9cbbe42ca44..ebb7b9fd115 100644
--- a/drivers/media/dvb/dvb-usb/friio-fe.c
+++ b/drivers/media/dvb/dvb-usb/friio-fe.c
@@ -134,11 +134,13 @@ static int jdvbt90502_pll_set_freq(struct jdvbt90502_state *state, u32 freq)
deb_fe("%s: freq=%d, step=%d\n", __func__, freq,
state->frontend.ops.info.frequency_stepsize);
/* freq -> oscilator frequency conversion. */
- /* freq: 473,000,000 + n*6,000,000 (no 1/7MHz shift to center freq) */
- /* add 400[1/7 MHZ] = 57.142857MHz. 57MHz for the IF, */
- /* 1/7MHz for center freq shift */
+ /* freq: 473,000,000 + n*6,000,000 [+ 142857 (center freq. shift)] */
f = freq / state->frontend.ops.info.frequency_stepsize;
- f += 400;
+ /* add 399[1/7 MHZ] = 57MHz for the IF */
+ f += 399;
+ /* add center frequency shift if necessary */
+ if (f % 7 == 0)
+ f++;
pll_freq_cmd[DEMOD_REDIRECT_REG] = JDVBT90502_2ND_I2C_REG; /* 0xFE */
pll_freq_cmd[ADDRESS_BYTE] = state->config.pll_address << 1;
pll_freq_cmd[DIVIDER_BYTE1] = (f >> 8) & 0x7F;
diff --git a/drivers/media/dvb/dvb-usb/gp8psk-fe.c b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
index 20eadf9318e..7a7f1b2b681 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk-fe.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk-fe.c
@@ -146,8 +146,8 @@ static int gp8psk_fe_set_frontend(struct dvb_frontend* fe,
switch (c->delivery_system) {
case SYS_DVBS:
- /* Only QPSK is supported for DVB-S */
- if (c->modulation != QPSK) {
+ /* Allow QPSK and 8PSK (even for DVB-S) */
+ if (c->modulation != QPSK && c->modulation != PSK_8) {
deb_fe("%s: unsupported modulation selected (%d)\n",
__func__, c->modulation);
return -EOPNOTSUPP;
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 58aac018f10..a3b8b697349 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -526,6 +526,15 @@ config DVB_TUNER_DIB0070
This device is only used inside a SiP called together with a
demodulator for now.
+config DVB_TUNER_DIB0090
+ tristate "DiBcom DiB0090 silicon base-band tuner"
+ depends on I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A driver for the silicon baseband tuner DiB0090 from DiBcom.
+ This device is only used inside a SiP called together with a
+ demodulator for now.
+
comment "SEC control devices for DVB-S"
depends on DVB_CORE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 823482535d1..47575cc7b69 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_DVB_TDA10086) += tda10086.o
obj-$(CONFIG_DVB_TDA826X) += tda826x.o
obj-$(CONFIG_DVB_TDA8261) += tda8261.o
obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
+obj-$(CONFIG_DVB_TUNER_DIB0090) += dib0090.o
obj-$(CONFIG_DVB_TUA6100) += tua6100.o
obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index 2dc2723b724..24268ef2753 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -62,7 +62,7 @@ struct au8522_register_config {
The values are as follows from left to right
0="ATV RF" 1="ATV RF13" 2="CVBS" 3="S-Video" 4="PAL" 5=CVBS13" 6="SVideo13"
*/
-struct au8522_register_config filter_coef[] = {
+static const struct au8522_register_config filter_coef[] = {
{AU8522_FILTER_COEF_R410, {0x25, 0x00, 0x25, 0x25, 0x00, 0x00, 0x00} },
{AU8522_FILTER_COEF_R411, {0x20, 0x00, 0x20, 0x20, 0x00, 0x00, 0x00} },
{AU8522_FILTER_COEF_R412, {0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00} },
@@ -104,7 +104,7 @@ struct au8522_register_config filter_coef[] = {
0="SIF" 1="ATVRF/ATVRF13"
Note: the "ATVRF/ATVRF13" mode has never been tested
*/
-struct au8522_register_config lpfilter_coef[] = {
+static const struct au8522_register_config lpfilter_coef[] = {
{0x060b, {0x21, 0x0b} },
{0x060c, {0xad, 0xad} },
{0x060d, {0x70, 0xf0} },
diff --git a/drivers/media/dvb/frontends/dib0070.c b/drivers/media/dvb/frontends/dib0070.c
index 2be17b93e0b..0d12763603b 100644
--- a/drivers/media/dvb/frontends/dib0070.c
+++ b/drivers/media/dvb/frontends/dib0070.c
@@ -49,21 +49,6 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
#define DIB0070_P1G 0x03
#define DIB0070S_P1A 0x02
-enum frontend_tune_state {
- CT_TUNER_START = 10,
- CT_TUNER_STEP_0,
- CT_TUNER_STEP_1,
- CT_TUNER_STEP_2,
- CT_TUNER_STEP_3,
- CT_TUNER_STEP_4,
- CT_TUNER_STEP_5,
- CT_TUNER_STEP_6,
- CT_TUNER_STEP_7,
- CT_TUNER_STOP,
-};
-
-#define FE_CALLBACK_TIME_NEVER 0xffffffff
-
struct dib0070_state {
struct i2c_adapter *i2c;
struct dvb_frontend *fe;
@@ -71,10 +56,10 @@ struct dib0070_state {
u16 wbd_ff_offset;
u8 revision;
- enum frontend_tune_state tune_state;
- u32 current_rf;
+ enum frontend_tune_state tune_state;
+ u32 current_rf;
- /* for the captrim binary search */
+ /* for the captrim binary search */
s8 step;
u16 adc_diff;
@@ -85,7 +70,7 @@ struct dib0070_state {
const struct dib0070_tuning *current_tune_table_index;
const struct dib0070_lna_match *lna_match;
- u8 wbd_gain_current;
+ u8 wbd_gain_current;
u16 wbd_offset_3_3[2];
};
@@ -93,8 +78,8 @@ static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
{
u8 b[2];
struct i2c_msg msg[2] = {
- {.addr = state->cfg->i2c_address,.flags = 0,.buf = &reg,.len = 1},
- {.addr = state->cfg->i2c_address,.flags = I2C_M_RD,.buf = b,.len = 2},
+ { .addr = state->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 },
+ { .addr = state->cfg->i2c_address, .flags = I2C_M_RD, .buf = b, .len = 2 },
};
if (i2c_transfer(state->i2c, msg, 2) != 2) {
printk(KERN_WARNING "DiB0070 I2C read failed\n");
@@ -106,7 +91,7 @@ static uint16_t dib0070_read_reg(struct dib0070_state *state, u8 reg)
static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
{
u8 b[3] = { reg, val >> 8, val & 0xff };
- struct i2c_msg msg = {.addr = state->cfg->i2c_address,.flags = 0,.buf = b,.len = 3 };
+ struct i2c_msg msg = { .addr = state->cfg->i2c_address, .flags = 0, .buf = b, .len = 3 };
if (i2c_transfer(state->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "DiB0070 I2C write failed\n");
return -EREMOTEIO;
@@ -124,30 +109,30 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
static int dib0070_set_bandwidth(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
{
- struct dib0070_state *state = fe->tuner_priv;
- u16 tmp = dib0070_read_reg(state, 0x02) & 0x3fff;
-
- if (state->fe->dtv_property_cache.bandwidth_hz / 1000 > 7000)
- tmp |= (0 << 14);
- else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 > 6000)
- tmp |= (1 << 14);
- else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 > 5000)
- tmp |= (2 << 14);
- else
- tmp |= (3 << 14);
-
- dib0070_write_reg(state, 0x02, tmp);
-
- /* sharpen the BB filter in ISDB-T to have higher immunity to adjacent channels */
- if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) {
- u16 value = dib0070_read_reg(state, 0x17);
-
- dib0070_write_reg(state, 0x17, value & 0xfffc);
- tmp = dib0070_read_reg(state, 0x01) & 0x01ff;
- dib0070_write_reg(state, 0x01, tmp | (60 << 9));
-
- dib0070_write_reg(state, 0x17, value);
- }
+ struct dib0070_state *state = fe->tuner_priv;
+ u16 tmp = dib0070_read_reg(state, 0x02) & 0x3fff;
+
+ if (state->fe->dtv_property_cache.bandwidth_hz/1000 > 7000)
+ tmp |= (0 << 14);
+ else if (state->fe->dtv_property_cache.bandwidth_hz/1000 > 6000)
+ tmp |= (1 << 14);
+ else if (state->fe->dtv_property_cache.bandwidth_hz/1000 > 5000)
+ tmp |= (2 << 14);
+ else
+ tmp |= (3 << 14);
+
+ dib0070_write_reg(state, 0x02, tmp);
+
+ /* sharpen the BB filter in ISDB-T to have higher immunity to adjacent channels */
+ if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) {
+ u16 value = dib0070_read_reg(state, 0x17);
+
+ dib0070_write_reg(state, 0x17, value & 0xfffc);
+ tmp = dib0070_read_reg(state, 0x01) & 0x01ff;
+ dib0070_write_reg(state, 0x01, tmp | (60 << 9));
+
+ dib0070_write_reg(state, 0x17, value);
+ }
return 0;
}
@@ -160,14 +145,14 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
if (*tune_state == CT_TUNER_STEP_0) {
dib0070_write_reg(state, 0x0f, 0xed10);
- dib0070_write_reg(state, 0x17, 0x0034);
+ dib0070_write_reg(state, 0x17, 0x0034);
dib0070_write_reg(state, 0x18, 0x0032);
state->step = state->captrim = state->fcaptrim = 64;
state->adc_diff = 3000;
ret = 20;
- *tune_state = CT_TUNER_STEP_1;
+ *tune_state = CT_TUNER_STEP_1;
} else if (*tune_state == CT_TUNER_STEP_1) {
state->step /= 2;
dib0070_write_reg(state, 0x14, state->lo4 | state->captrim);
@@ -178,7 +163,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
adc = dib0070_read_reg(state, 0x19);
- dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV", state->captrim, adc, (u32) adc * (u32) 1800 / (u32) 1024);
+ dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
if (adc >= 400) {
adc -= 400;
@@ -193,6 +178,8 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
state->adc_diff = adc;
state->fcaptrim = state->captrim;
+
+
}
state->captrim += (step_sign * state->step);
@@ -213,7 +200,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
static int dib0070_set_ctrl_lo5(struct dvb_frontend *fe, u8 vco_bias_trim, u8 hf_div_trim, u8 cp_current, u8 third_order_filt)
{
struct dib0070_state *state = fe->tuner_priv;
- u16 lo5 = (third_order_filt << 14) | (0 << 13) | (1 << 12) | (3 << 9) | (cp_current << 6) | (hf_div_trim << 3) | (vco_bias_trim << 0);
+ u16 lo5 = (third_order_filt << 14) | (0 << 13) | (1 << 12) | (3 << 9) | (cp_current << 6) | (hf_div_trim << 3) | (vco_bias_trim << 0);
dprintk("CTRL_LO5: 0x%x", lo5);
return dib0070_write_reg(state, 0x15, lo5);
}
@@ -227,99 +214,99 @@ void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open)
dib0070_write_reg(state, 0x1a, 0x0000);
} else {
dib0070_write_reg(state, 0x1b, 0x4112);
- if (state->cfg->vga_filter != 0) {
- dib0070_write_reg(state, 0x1a, state->cfg->vga_filter);
- dprintk("vga filter register is set to %x", state->cfg->vga_filter);
- } else
- dib0070_write_reg(state, 0x1a, 0x0009);
+ if (state->cfg->vga_filter != 0) {
+ dib0070_write_reg(state, 0x1a, state->cfg->vga_filter);
+ dprintk("vga filter register is set to %x", state->cfg->vga_filter);
+ } else
+ dib0070_write_reg(state, 0x1a, 0x0009);
}
}
EXPORT_SYMBOL(dib0070_ctrl_agc_filter);
struct dib0070_tuning {
- u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
- u8 switch_trim;
- u8 vco_band;
- u8 hfdiv;
- u8 vco_multi;
- u8 presc;
- u8 wbdmux;
- u16 tuner_enable;
+ u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
+ u8 switch_trim;
+ u8 vco_band;
+ u8 hfdiv;
+ u8 vco_multi;
+ u8 presc;
+ u8 wbdmux;
+ u16 tuner_enable;
};
struct dib0070_lna_match {
- u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
- u8 lna_band;
+ u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
+ u8 lna_band;
};
static const struct dib0070_tuning dib0070s_tuning_table[] = {
- {570000, 2, 1, 3, 6, 6, 2, 0x4000 | 0x0800}, /* UHF */
- {700000, 2, 0, 2, 4, 2, 2, 0x4000 | 0x0800},
- {863999, 2, 1, 2, 4, 2, 2, 0x4000 | 0x0800},
- {1500000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400}, /* LBAND */
- {1600000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400},
- {2000000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400},
- {0xffffffff, 0, 0, 8, 1, 2, 1, 0x8000 | 0x1000}, /* SBAND */
+ { 570000, 2, 1, 3, 6, 6, 2, 0x4000 | 0x0800 }, /* UHF */
+ { 700000, 2, 0, 2, 4, 2, 2, 0x4000 | 0x0800 },
+ { 863999, 2, 1, 2, 4, 2, 2, 0x4000 | 0x0800 },
+ { 1500000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400 }, /* LBAND */
+ { 1600000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400 },
+ { 2000000, 0, 1, 1, 2, 2, 4, 0x2000 | 0x0400 },
+ { 0xffffffff, 0, 0, 8, 1, 2, 1, 0x8000 | 0x1000 }, /* SBAND */
};
static const struct dib0070_tuning dib0070_tuning_table[] = {
- {115000, 1, 0, 7, 24, 2, 1, 0x8000 | 0x1000}, /* FM below 92MHz cannot be tuned */
- {179500, 1, 0, 3, 16, 2, 1, 0x8000 | 0x1000}, /* VHF */
- {189999, 1, 1, 3, 16, 2, 1, 0x8000 | 0x1000},
- {250000, 1, 0, 6, 12, 2, 1, 0x8000 | 0x1000},
- {569999, 2, 1, 5, 6, 2, 2, 0x4000 | 0x0800}, /* UHF */
- {699999, 2, 0, 1, 4, 2, 2, 0x4000 | 0x0800},
- {863999, 2, 1, 1, 4, 2, 2, 0x4000 | 0x0800},
- {0xffffffff, 0, 1, 0, 2, 2, 4, 0x2000 | 0x0400}, /* LBAND or everything higher than UHF */
+ { 115000, 1, 0, 7, 24, 2, 1, 0x8000 | 0x1000 }, /* FM below 92MHz cannot be tuned */
+ { 179500, 1, 0, 3, 16, 2, 1, 0x8000 | 0x1000 }, /* VHF */
+ { 189999, 1, 1, 3, 16, 2, 1, 0x8000 | 0x1000 },
+ { 250000, 1, 0, 6, 12, 2, 1, 0x8000 | 0x1000 },
+ { 569999, 2, 1, 5, 6, 2, 2, 0x4000 | 0x0800 }, /* UHF */
+ { 699999, 2, 0, 1, 4, 2, 2, 0x4000 | 0x0800 },
+ { 863999, 2, 1, 1, 4, 2, 2, 0x4000 | 0x0800 },
+ { 0xffffffff, 0, 1, 0, 2, 2, 4, 0x2000 | 0x0400 }, /* LBAND or everything higher than UHF */
};
static const struct dib0070_lna_match dib0070_lna_flip_chip[] = {
- {180000, 0}, /* VHF */
- {188000, 1},
- {196400, 2},
- {250000, 3},
- {550000, 0}, /* UHF */
- {590000, 1},
- {666000, 3},
- {864000, 5},
- {1500000, 0}, /* LBAND or everything higher than UHF */
- {1600000, 1},
- {2000000, 3},
- {0xffffffff, 7},
+ { 180000, 0 }, /* VHF */
+ { 188000, 1 },
+ { 196400, 2 },
+ { 250000, 3 },
+ { 550000, 0 }, /* UHF */
+ { 590000, 1 },
+ { 666000, 3 },
+ { 864000, 5 },
+ { 1500000, 0 }, /* LBAND or everything higher than UHF */
+ { 1600000, 1 },
+ { 2000000, 3 },
+ { 0xffffffff, 7 },
};
static const struct dib0070_lna_match dib0070_lna[] = {
- {180000, 0}, /* VHF */
- {188000, 1},
- {196400, 2},
- {250000, 3},
- {550000, 2}, /* UHF */
- {650000, 3},
- {750000, 5},
- {850000, 6},
- {864000, 7},
- {1500000, 0}, /* LBAND or everything higher than UHF */
- {1600000, 1},
- {2000000, 3},
- {0xffffffff, 7},
+ { 180000, 0 }, /* VHF */
+ { 188000, 1 },
+ { 196400, 2 },
+ { 250000, 3 },
+ { 550000, 2 }, /* UHF */
+ { 650000, 3 },
+ { 750000, 5 },
+ { 850000, 6 },
+ { 864000, 7 },
+ { 1500000, 0 }, /* LBAND or everything higher than UHF */
+ { 1600000, 1 },
+ { 2000000, 3 },
+ { 0xffffffff, 7 },
};
-#define LPF 100 // define for the loop filter 100kHz by default 16-07-06
+#define LPF 100
static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
{
- struct dib0070_state *state = fe->tuner_priv;
+ struct dib0070_state *state = fe->tuner_priv;
- const struct dib0070_tuning *tune;
- const struct dib0070_lna_match *lna_match;
+ const struct dib0070_tuning *tune;
+ const struct dib0070_lna_match *lna_match;
- enum frontend_tune_state *tune_state = &state->tune_state;
- int ret = 10; /* 1ms is the default delay most of the time */
+ enum frontend_tune_state *tune_state = &state->tune_state;
+ int ret = 10; /* 1ms is the default delay most of the time */
- u8 band = (u8) BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000);
- u32 freq = fe->dtv_property_cache.frequency / 1000 + (band == BAND_VHF ? state->cfg->freq_offset_khz_vhf : state->cfg->freq_offset_khz_uhf);
+ u8 band = (u8)BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency/1000);
+ u32 freq = fe->dtv_property_cache.frequency/1000 + (band == BAND_VHF ? state->cfg->freq_offset_khz_vhf : state->cfg->freq_offset_khz_uhf);
#ifdef CONFIG_SYS_ISDBT
- if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1)
+ if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1)
if (((state->fe->dtv_property_cache.isdbt_sb_segment_count % 2)
&& (state->fe->dtv_property_cache.isdbt_sb_segment_idx == ((state->fe->dtv_property_cache.isdbt_sb_segment_count / 2) + 1)))
|| (((state->fe->dtv_property_cache.isdbt_sb_segment_count % 2) == 0)
@@ -328,172 +315,180 @@ static int dib0070_tune_digital(struct dvb_frontend *fe, struct dvb_frontend_par
&& (state->fe->dtv_property_cache.isdbt_sb_segment_idx == ((state->fe->dtv_property_cache.isdbt_sb_segment_count / 2) + 1))))
freq += 850;
#endif
+ if (state->current_rf != freq) {
+
+ switch (state->revision) {
+ case DIB0070S_P1A:
+ tune = dib0070s_tuning_table;
+ lna_match = dib0070_lna;
+ break;
+ default:
+ tune = dib0070_tuning_table;
+ if (state->cfg->flip_chip)
+ lna_match = dib0070_lna_flip_chip;
+ else
+ lna_match = dib0070_lna;
+ break;
+ }
+ while (freq > tune->max_freq) /* find the right one */
+ tune++;
+ while (freq > lna_match->max_freq) /* find the right one */
+ lna_match++;
+
+ state->current_tune_table_index = tune;
+ state->lna_match = lna_match;
+ }
+
+ if (*tune_state == CT_TUNER_START) {
+ dprintk("Tuning for Band: %hd (%d kHz)", band, freq);
if (state->current_rf != freq) {
+ u8 REFDIV;
+ u32 FBDiv, Rest, FREF, VCOF_kHz;
+ u8 Den;
+
+ state->current_rf = freq;
+ state->lo4 = (state->current_tune_table_index->vco_band << 11) | (state->current_tune_table_index->hfdiv << 7);
+
+
+ dib0070_write_reg(state, 0x17, 0x30);
+
+
+ VCOF_kHz = state->current_tune_table_index->vco_multi * freq * 2;
+
+ switch (band) {
+ case BAND_VHF:
+ REFDIV = (u8) ((state->cfg->clock_khz + 9999) / 10000);
+ break;
+ case BAND_FM:
+ REFDIV = (u8) ((state->cfg->clock_khz) / 1000);
+ break;
+ default:
+ REFDIV = (u8) (state->cfg->clock_khz / 10000);
+ break;
+ }
+ FREF = state->cfg->clock_khz / REFDIV;
+
+
switch (state->revision) {
case DIB0070S_P1A:
- tune = dib0070s_tuning_table;
- lna_match = dib0070_lna;
+ FBDiv = (VCOF_kHz / state->current_tune_table_index->presc / FREF);
+ Rest = (VCOF_kHz / state->current_tune_table_index->presc) - FBDiv * FREF;
break;
+
+ case DIB0070_P1G:
+ case DIB0070_P1F:
default:
- tune = dib0070_tuning_table;
- if (state->cfg->flip_chip)
- lna_match = dib0070_lna_flip_chip;
- else
- lna_match = dib0070_lna;
+ FBDiv = (freq / (FREF / 2));
+ Rest = 2 * freq - FBDiv * FREF;
break;
}
- while (freq > tune->max_freq) /* find the right one */
- tune++;
- while (freq > lna_match->max_freq) /* find the right one */
- lna_match++;
- state->current_tune_table_index = tune;
- state->lna_match = lna_match;
- }
+ if (Rest < LPF)
+ Rest = 0;
+ else if (Rest < 2 * LPF)
+ Rest = 2 * LPF;
+ else if (Rest > (FREF - LPF)) {
+ Rest = 0;
+ FBDiv += 1;
+ } else if (Rest > (FREF - 2 * LPF))
+ Rest = FREF - 2 * LPF;
+ Rest = (Rest * 6528) / (FREF / 10);
+
+ Den = 1;
+ if (Rest > 0) {
+ state->lo4 |= (1 << 14) | (1 << 12);
+ Den = 255;
+ }
+
- if (*tune_state == CT_TUNER_START) {
- dprintk("Tuning for Band: %hd (%d kHz)", band, freq);
- if (state->current_rf != freq) {
- u8 REFDIV;
- u32 FBDiv, Rest, FREF, VCOF_kHz;
- u8 Den;
-
- state->current_rf = freq;
- state->lo4 = (state->current_tune_table_index->vco_band << 11) | (state->current_tune_table_index->hfdiv << 7);
-
- dib0070_write_reg(state, 0x17, 0x30);
-
- VCOF_kHz = state->current_tune_table_index->vco_multi * freq * 2;
-
- switch (band) {
- case BAND_VHF:
- REFDIV = (u8) ((state->cfg->clock_khz + 9999) / 10000);
- break;
- case BAND_FM:
- REFDIV = (u8) ((state->cfg->clock_khz) / 1000);
- break;
- default:
- REFDIV = (u8) (state->cfg->clock_khz / 10000);
- break;
- }
- FREF = state->cfg->clock_khz / REFDIV;
-
- switch (state->revision) {
- case DIB0070S_P1A:
- FBDiv = (VCOF_kHz / state->current_tune_table_index->presc / FREF);
- Rest = (VCOF_kHz / state->current_tune_table_index->presc) - FBDiv * FREF;
- break;
-
- case DIB0070_P1G:
- case DIB0070_P1F:
- default:
- FBDiv = (freq / (FREF / 2));
- Rest = 2 * freq - FBDiv * FREF;
- break;
- }
-
- if (Rest < LPF)
- Rest = 0;
- else if (Rest < 2 * LPF)
- Rest = 2 * LPF;
- else if (Rest > (FREF - LPF)) {
- Rest = 0;
- FBDiv += 1;
- } else if (Rest > (FREF - 2 * LPF))
- Rest = FREF - 2 * LPF;
- Rest = (Rest * 6528) / (FREF / 10);
-
- Den = 1;
- if (Rest > 0) {
- state->lo4 |= (1 << 14) | (1 << 12);
- Den = 255;
- }
-
- dib0070_write_reg(state, 0x11, (u16) FBDiv);
- dib0070_write_reg(state, 0x12, (Den << 8) | REFDIV);
- dib0070_write_reg(state, 0x13, (u16) Rest);
-
- if (state->revision == DIB0070S_P1A) {
-
- if (band == BAND_SBAND) {
- dib0070_set_ctrl_lo5(fe, 2, 4, 3, 0);
- dib0070_write_reg(state, 0x1d, 0xFFFF);
- } else
- dib0070_set_ctrl_lo5(fe, 5, 4, 3, 1);
- }
-
- dib0070_write_reg(state, 0x20,
- 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
-
- dprintk("REFDIV: %hd, FREF: %d", REFDIV, FREF);
- dprintk("FBDIV: %d, Rest: %d", FBDiv, Rest);
- dprintk("Num: %hd, Den: %hd, SD: %hd", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
- dprintk("HFDIV code: %hd", state->current_tune_table_index->hfdiv);
- dprintk("VCO = %hd", state->current_tune_table_index->vco_band);
- dprintk("VCOF: ((%hd*%d) << 1))", state->current_tune_table_index->vco_multi, freq);
-
- *tune_state = CT_TUNER_STEP_0;
- } else { /* we are already tuned to this frequency - the configuration is correct */
- ret = 50; /* wakeup time */
- *tune_state = CT_TUNER_STEP_5;
+ dib0070_write_reg(state, 0x11, (u16)FBDiv);
+ dib0070_write_reg(state, 0x12, (Den << 8) | REFDIV);
+ dib0070_write_reg(state, 0x13, (u16) Rest);
+
+ if (state->revision == DIB0070S_P1A) {
+
+ if (band == BAND_SBAND) {
+ dib0070_set_ctrl_lo5(fe, 2, 4, 3, 0);
+ dib0070_write_reg(state, 0x1d, 0xFFFF);
+ } else
+ dib0070_set_ctrl_lo5(fe, 5, 4, 3, 1);
}
- } else if ((*tune_state > CT_TUNER_START) && (*tune_state < CT_TUNER_STEP_4)) {
- ret = dib0070_captrim(state, tune_state);
+ dib0070_write_reg(state, 0x20,
+ 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
- } else if (*tune_state == CT_TUNER_STEP_4) {
- const struct dib0070_wbd_gain_cfg *tmp = state->cfg->wbd_gain;
- if (tmp != NULL) {
- while (freq / 1000 > tmp->freq) /* find the right one */
- tmp++;
- dib0070_write_reg(state, 0x0f,
- (0 << 15) | (1 << 14) | (3 << 12) | (tmp->wbd_gain_val << 9) | (0 << 8) | (1 << 7) | (state->
- current_tune_table_index->
- wbdmux << 0));
- state->wbd_gain_current = tmp->wbd_gain_val;
- } else {
+ dprintk("REFDIV: %hd, FREF: %d", REFDIV, FREF);
+ dprintk("FBDIV: %d, Rest: %d", FBDiv, Rest);
+ dprintk("Num: %hd, Den: %hd, SD: %hd", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
+ dprintk("HFDIV code: %hd", state->current_tune_table_index->hfdiv);
+ dprintk("VCO = %hd", state->current_tune_table_index->vco_band);
+ dprintk("VCOF: ((%hd*%d) << 1))", state->current_tune_table_index->vco_multi, freq);
+
+ *tune_state = CT_TUNER_STEP_0;
+ } else { /* we are already tuned to this frequency - the configuration is correct */
+ ret = 50; /* wakeup time */
+ *tune_state = CT_TUNER_STEP_5;
+ }
+ } else if ((*tune_state > CT_TUNER_START) && (*tune_state < CT_TUNER_STEP_4)) {
+
+ ret = dib0070_captrim(state, tune_state);
+
+ } else if (*tune_state == CT_TUNER_STEP_4) {
+ const struct dib0070_wbd_gain_cfg *tmp = state->cfg->wbd_gain;
+ if (tmp != NULL) {
+ while (freq/1000 > tmp->freq) /* find the right one */
+ tmp++;
+ dib0070_write_reg(state, 0x0f,
+ (0 << 15) | (1 << 14) | (3 << 12)
+ | (tmp->wbd_gain_val << 9) | (0 << 8) | (1 << 7)
+ | (state->current_tune_table_index->wbdmux << 0));
+ state->wbd_gain_current = tmp->wbd_gain_val;
+ } else {
dib0070_write_reg(state, 0x0f,
(0 << 15) | (1 << 14) | (3 << 12) | (6 << 9) | (0 << 8) | (1 << 7) | (state->current_tune_table_index->
wbdmux << 0));
- state->wbd_gain_current = 6;
- }
+ state->wbd_gain_current = 6;
+ }
- dib0070_write_reg(state, 0x06, 0x3fff);
+ dib0070_write_reg(state, 0x06, 0x3fff);
dib0070_write_reg(state, 0x07,
(state->current_tune_table_index->switch_trim << 11) | (7 << 8) | (state->lna_match->lna_band << 3) | (3 << 0));
- dib0070_write_reg(state, 0x08, (state->lna_match->lna_band << 10) | (3 << 7) | (127));
- dib0070_write_reg(state, 0x0d, 0x0d80);
+ dib0070_write_reg(state, 0x08, (state->lna_match->lna_band << 10) | (3 << 7) | (127));
+ dib0070_write_reg(state, 0x0d, 0x0d80);
- dib0070_write_reg(state, 0x18, 0x07ff);
- dib0070_write_reg(state, 0x17, 0x0033);
- *tune_state = CT_TUNER_STEP_5;
- } else if (*tune_state == CT_TUNER_STEP_5) {
- dib0070_set_bandwidth(fe, ch);
- *tune_state = CT_TUNER_STOP;
- } else {
- ret = FE_CALLBACK_TIME_NEVER; /* tuner finished, time to call again infinite */
- }
- return ret;
+ dib0070_write_reg(state, 0x18, 0x07ff);
+ dib0070_write_reg(state, 0x17, 0x0033);
+
+
+ *tune_state = CT_TUNER_STEP_5;
+ } else if (*tune_state == CT_TUNER_STEP_5) {
+ dib0070_set_bandwidth(fe, ch);
+ *tune_state = CT_TUNER_STOP;
+ } else {
+ ret = FE_CALLBACK_TIME_NEVER; /* tuner finished, time to call again infinite */
+ }
+ return ret;
}
+
static int dib0070_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
{
- struct dib0070_state *state = fe->tuner_priv;
- uint32_t ret;
+ struct dib0070_state *state = fe->tuner_priv;
+ uint32_t ret;
- state->tune_state = CT_TUNER_START;
+ state->tune_state = CT_TUNER_START;
- do {
- ret = dib0070_tune_digital(fe, p);
- if (ret != FE_CALLBACK_TIME_NEVER)
- msleep(ret / 10);
- else
- break;
- } while (state->tune_state != CT_TUNER_STOP);
+ do {
+ ret = dib0070_tune_digital(fe, p);
+ if (ret != FE_CALLBACK_TIME_NEVER)
+ msleep(ret/10);
+ else
+ break;
+ } while (state->tune_state != CT_TUNER_STOP);
- return 0;
+ return 0;
}
static int dib0070_wakeup(struct dvb_frontend *fe)
@@ -512,92 +507,113 @@ static int dib0070_sleep(struct dvb_frontend *fe)
return 0;
}
-static const u16 dib0070_p1f_defaults[] = {
+u8 dib0070_get_rf_output(struct dvb_frontend *fe)
+{
+ struct dib0070_state *state = fe->tuner_priv;
+ return (dib0070_read_reg(state, 0x07) >> 11) & 0x3;
+}
+EXPORT_SYMBOL(dib0070_get_rf_output);
+
+int dib0070_set_rf_output(struct dvb_frontend *fe, u8 no)
+{
+ struct dib0070_state *state = fe->tuner_priv;
+ u16 rxrf2 = dib0070_read_reg(state, 0x07) & 0xfe7ff;
+ if (no > 3)
+ no = 3;
+ if (no < 1)
+ no = 1;
+ return dib0070_write_reg(state, 0x07, rxrf2 | (no << 11));
+}
+EXPORT_SYMBOL(dib0070_set_rf_output);
+
+static const u16 dib0070_p1f_defaults[] =
+
+{
7, 0x02,
- 0x0008,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0002,
- 0x0100,
+ 0x0008,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0002,
+ 0x0100,
3, 0x0d,
- 0x0d80,
- 0x0001,
- 0x0000,
+ 0x0d80,
+ 0x0001,
+ 0x0000,
4, 0x11,
- 0x0000,
- 0x0103,
- 0x0000,
- 0x0000,
+ 0x0000,
+ 0x0103,
+ 0x0000,
+ 0x0000,
3, 0x16,
- 0x0004 | 0x0040,
- 0x0030,
- 0x07ff,
+ 0x0004 | 0x0040,
+ 0x0030,
+ 0x07ff,
6, 0x1b,
- 0x4112,
- 0xff00,
- 0xc07f,
- 0x0000,
- 0x0180,
- 0x4000 | 0x0800 | 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001,
+ 0x4112,
+ 0xff00,
+ 0xc07f,
+ 0x0000,
+ 0x0180,
+ 0x4000 | 0x0800 | 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001,
0,
};
static u16 dib0070_read_wbd_offset(struct dib0070_state *state, u8 gain)
{
- u16 tuner_en = dib0070_read_reg(state, 0x20);
- u16 offset;
-
- dib0070_write_reg(state, 0x18, 0x07ff);
- dib0070_write_reg(state, 0x20, 0x0800 | 0x4000 | 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001);
- dib0070_write_reg(state, 0x0f, (1 << 14) | (2 << 12) | (gain << 9) | (1 << 8) | (1 << 7) | (0 << 0));
- msleep(9);
- offset = dib0070_read_reg(state, 0x19);
- dib0070_write_reg(state, 0x20, tuner_en);
- return offset;
+ u16 tuner_en = dib0070_read_reg(state, 0x20);
+ u16 offset;
+
+ dib0070_write_reg(state, 0x18, 0x07ff);
+ dib0070_write_reg(state, 0x20, 0x0800 | 0x4000 | 0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001);
+ dib0070_write_reg(state, 0x0f, (1 << 14) | (2 << 12) | (gain << 9) | (1 << 8) | (1 << 7) | (0 << 0));
+ msleep(9);
+ offset = dib0070_read_reg(state, 0x19);
+ dib0070_write_reg(state, 0x20, tuner_en);
+ return offset;
}
static void dib0070_wbd_offset_calibration(struct dib0070_state *state)
{
- u8 gain;
- for (gain = 6; gain < 8; gain++) {
- state->wbd_offset_3_3[gain - 6] = ((dib0070_read_wbd_offset(state, gain) * 8 * 18 / 33 + 1) / 2);
- dprintk("Gain: %d, WBDOffset (3.3V) = %hd", gain, state->wbd_offset_3_3[gain - 6]);
- }
+ u8 gain;
+ for (gain = 6; gain < 8; gain++) {
+ state->wbd_offset_3_3[gain - 6] = ((dib0070_read_wbd_offset(state, gain) * 8 * 18 / 33 + 1) / 2);
+ dprintk("Gain: %d, WBDOffset (3.3V) = %hd", gain, state->wbd_offset_3_3[gain-6]);
+ }
}
u16 dib0070_wbd_offset(struct dvb_frontend *fe)
{
- struct dib0070_state *state = fe->tuner_priv;
- const struct dib0070_wbd_gain_cfg *tmp = state->cfg->wbd_gain;
- u32 freq = fe->dtv_property_cache.frequency / 1000;
-
- if (tmp != NULL) {
- while (freq / 1000 > tmp->freq) /* find the right one */
- tmp++;
- state->wbd_gain_current = tmp->wbd_gain_val;
+ struct dib0070_state *state = fe->tuner_priv;
+ const struct dib0070_wbd_gain_cfg *tmp = state->cfg->wbd_gain;
+ u32 freq = fe->dtv_property_cache.frequency/1000;
+
+ if (tmp != NULL) {
+ while (freq/1000 > tmp->freq) /* find the right one */
+ tmp++;
+ state->wbd_gain_current = tmp->wbd_gain_val;
} else
- state->wbd_gain_current = 6;
+ state->wbd_gain_current = 6;
- return state->wbd_offset_3_3[state->wbd_gain_current - 6];
+ return state->wbd_offset_3_3[state->wbd_gain_current - 6];
}
-
EXPORT_SYMBOL(dib0070_wbd_offset);
#define pgm_read_word(w) (*w)
static int dib0070_reset(struct dvb_frontend *fe)
{
- struct dib0070_state *state = fe->tuner_priv;
+ struct dib0070_state *state = fe->tuner_priv;
u16 l, r, *n;
HARD_RESET(state);
+
#ifndef FORCE_SBAND_TUNER
if ((dib0070_read_reg(state, 0x22) >> 9) & 0x1)
state->revision = (dib0070_read_reg(state, 0x1f) >> 8) & 0xff;
@@ -605,7 +621,7 @@ static int dib0070_reset(struct dvb_frontend *fe)
#else
#warning forcing SBAND
#endif
- state->revision = DIB0070S_P1A;
+ state->revision = DIB0070S_P1A;
/* P1F or not */
dprintk("Revision: %x", state->revision);
@@ -620,7 +636,7 @@ static int dib0070_reset(struct dvb_frontend *fe)
while (l) {
r = pgm_read_word(n++);
do {
- dib0070_write_reg(state, (u8) r, pgm_read_word(n++));
+ dib0070_write_reg(state, (u8)r, pgm_read_word(n++));
r++;
} while (--l);
l = pgm_read_word(n++);
@@ -633,6 +649,7 @@ static int dib0070_reset(struct dvb_frontend *fe)
else
r = 2;
+
r |= state->cfg->osc_buffer_state << 3;
dib0070_write_reg(state, 0x10, r);
@@ -643,16 +660,24 @@ static int dib0070_reset(struct dvb_frontend *fe)
dib0070_write_reg(state, 0x02, r | (1 << 5));
}
- if (state->revision == DIB0070S_P1A)
- dib0070_set_ctrl_lo5(fe, 2, 4, 3, 0);
- else
+ if (state->revision == DIB0070S_P1A)
+ dib0070_set_ctrl_lo5(fe, 2, 4, 3, 0);
+ else
dib0070_set_ctrl_lo5(fe, 5, 4, state->cfg->charge_pump, state->cfg->enable_third_order_filter);
dib0070_write_reg(state, 0x01, (54 << 9) | 0xc8);
- dib0070_wbd_offset_calibration(state);
+ dib0070_wbd_offset_calibration(state);
- return 0;
+ return 0;
+}
+
+static int dib0070_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct dib0070_state *state = fe->tuner_priv;
+
+ *frequency = 1000 * state->current_rf;
+ return 0;
}
static int dib0070_release(struct dvb_frontend *fe)
@@ -664,18 +689,18 @@ static int dib0070_release(struct dvb_frontend *fe)
static const struct dvb_tuner_ops dib0070_ops = {
.info = {
- .name = "DiBcom DiB0070",
- .frequency_min = 45000000,
- .frequency_max = 860000000,
- .frequency_step = 1000,
- },
- .release = dib0070_release,
-
- .init = dib0070_wakeup,
- .sleep = dib0070_sleep,
- .set_params = dib0070_tune,
-
-// .get_frequency = dib0070_get_frequency,
+ .name = "DiBcom DiB0070",
+ .frequency_min = 45000000,
+ .frequency_max = 860000000,
+ .frequency_step = 1000,
+ },
+ .release = dib0070_release,
+
+ .init = dib0070_wakeup,
+ .sleep = dib0070_sleep,
+ .set_params = dib0070_tune,
+
+ .get_frequency = dib0070_get_frequency,
// .get_bandwidth = dib0070_get_bandwidth
};
@@ -687,7 +712,7 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
state->cfg = cfg;
state->i2c = i2c;
- state->fe = fe;
+ state->fe = fe;
fe->tuner_priv = state;
if (dib0070_reset(fe) != 0)
@@ -699,12 +724,11 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
fe->tuner_priv = state;
return fe;
- free_mem:
+free_mem:
kfree(state);
fe->tuner_priv = NULL;
return NULL;
}
-
EXPORT_SYMBOL(dib0070_attach);
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
diff --git a/drivers/media/dvb/frontends/dib0070.h b/drivers/media/dvb/frontends/dib0070.h
index eec9e52ffa7..45c31fae396 100644
--- a/drivers/media/dvb/frontends/dib0070.h
+++ b/drivers/media/dvb/frontends/dib0070.h
@@ -52,6 +52,8 @@ struct dib0070_config {
extern struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg);
extern u16 dib0070_wbd_offset(struct dvb_frontend *);
extern void dib0070_ctrl_agc_filter(struct dvb_frontend *, u8 open);
+extern u8 dib0070_get_rf_output(struct dvb_frontend *fe);
+extern int dib0070_set_rf_output(struct dvb_frontend *fe, u8 no);
#else
static inline struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0070_config *cfg)
{
@@ -62,7 +64,7 @@ static inline struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struc
static inline u16 dib0070_wbd_offset(struct dvb_frontend *fe)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
+ return 0;
}
static inline void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open)
diff --git a/drivers/media/dvb/frontends/dib0090.c b/drivers/media/dvb/frontends/dib0090.c
new file mode 100644
index 00000000000..614552709a6
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib0090.c
@@ -0,0 +1,1522 @@
+/*
+ * Linux-DVB Driver for DiBcom's DiB0090 base-band RF Tuner.
+ *
+ * Copyright (C) 2005-9 DiBcom (http://www.dibcom.fr/)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * This code is more or less generated from another driver, please
+ * excuse some codingstyle oddities.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+
+#include "dvb_frontend.h"
+
+#include "dib0090.h"
+#include "dibx000_common.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
+
+#define dprintk(args...) do { \
+ if (debug) { \
+ printk(KERN_DEBUG "DiB0090: "); \
+ printk(args); \
+ printk("\n"); \
+ } \
+} while (0)
+
+#define CONFIG_SYS_ISDBT
+#define CONFIG_BAND_CBAND
+#define CONFIG_BAND_VHF
+#define CONFIG_BAND_UHF
+#define CONFIG_DIB0090_USE_PWM_AGC
+
+#define EN_LNA0 0x8000
+#define EN_LNA1 0x4000
+#define EN_LNA2 0x2000
+#define EN_LNA3 0x1000
+#define EN_MIX0 0x0800
+#define EN_MIX1 0x0400
+#define EN_MIX2 0x0200
+#define EN_MIX3 0x0100
+#define EN_IQADC 0x0040
+#define EN_PLL 0x0020
+#define EN_TX 0x0010
+#define EN_BB 0x0008
+#define EN_LO 0x0004
+#define EN_BIAS 0x0001
+
+#define EN_IQANA 0x0002
+#define EN_DIGCLK 0x0080 /* not in the 0x24 reg, only in 0x1b */
+#define EN_CRYSTAL 0x0002
+
+#define EN_UHF 0x22E9
+#define EN_VHF 0x44E9
+#define EN_LBD 0x11E9
+#define EN_SBD 0x44E9
+#define EN_CAB 0x88E9
+
+#define pgm_read_word(w) (*w)
+
+struct dc_calibration;
+
+struct dib0090_tuning {
+ u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
+ u8 switch_trim;
+ u8 lna_tune;
+ u8 lna_bias;
+ u16 v2i;
+ u16 mix;
+ u16 load;
+ u16 tuner_enable;
+};
+
+struct dib0090_pll {
+ u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */
+ u8 vco_band;
+ u8 hfdiv_code;
+ u8 hfdiv;
+ u8 topresc;
+};
+
+struct dib0090_state {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend *fe;
+ const struct dib0090_config *config;
+
+ u8 current_band;
+ u16 revision;
+ enum frontend_tune_state tune_state;
+ u32 current_rf;
+
+ u16 wbd_offset;
+ s16 wbd_target; /* in dB */
+
+ s16 rf_gain_limit; /* take-over-point: where to split between bb and rf gain */
+ s16 current_gain; /* keeps the currently programmed gain */
+ u8 agc_step; /* new binary search */
+
+ u16 gain[2]; /* for channel monitoring */
+
+ const u16 *rf_ramp;
+ const u16 *bb_ramp;
+
+ /* for the software AGC ramps */
+ u16 bb_1_def;
+ u16 rf_lt_def;
+ u16 gain_reg[4];
+
+ /* for the captrim/dc-offset search */
+ s8 step;
+ s16 adc_diff;
+ s16 min_adc_diff;
+
+ s8 captrim;
+ s8 fcaptrim;
+
+ const struct dc_calibration *dc;
+ u16 bb6, bb7;
+
+ const struct dib0090_tuning *current_tune_table_index;
+ const struct dib0090_pll *current_pll_table_index;
+
+ u8 tuner_is_tuned;
+ u8 agc_freeze;
+
+ u8 reset;
+};
+
+static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
+{
+ u8 b[2];
+ struct i2c_msg msg[2] = {
+ {.addr = state->config->i2c_address, .flags = 0, .buf = &reg, .len = 1},
+ {.addr = state->config->i2c_address, .flags = I2C_M_RD, .buf = b, .len = 2},
+ };
+ if (i2c_transfer(state->i2c, msg, 2) != 2) {
+ printk(KERN_WARNING "DiB0090 I2C read failed\n");
+ return 0;
+ }
+ return (b[0] << 8) | b[1];
+}
+
+static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
+{
+ u8 b[3] = { reg & 0xff, val >> 8, val & 0xff };
+ struct i2c_msg msg = {.addr = state->config->i2c_address, .flags = 0, .buf = b, .len = 3 };
+ if (i2c_transfer(state->i2c, &msg, 1) != 1) {
+ printk(KERN_WARNING "DiB0090 I2C write failed\n");
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+#define HARD_RESET(state) do { if (cfg->reset) { if (cfg->sleep) cfg->sleep(fe, 0); msleep(10); cfg->reset(fe, 1); msleep(10); cfg->reset(fe, 0); msleep(10); } } while (0)
+#define ADC_TARGET -220
+#define GAIN_ALPHA 5
+#define WBD_ALPHA 6
+#define LPF 100
+static void dib0090_write_regs(struct dib0090_state *state, u8 r, const u16 * b, u8 c)
+{
+ do {
+ dib0090_write_reg(state, r++, *b++);
+ } while (--c);
+}
+
+static u16 dib0090_identify(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ u16 v;
+
+ v = dib0090_read_reg(state, 0x1a);
+
+#ifdef FIRMWARE_FIREFLY
+ /* pll is not locked locked */
+ if (!(v & 0x800))
+ dprintk("FE%d : Identification : pll is not yet locked", fe->id);
+#endif
+
+ /* without PLL lock info */
+ v &= 0x3ff;
+ dprintk("P/V: %04x:", v);
+
+ if ((v >> 8) & 0xf)
+ dprintk("FE%d : Product ID = 0x%x : KROSUS", fe->id, (v >> 8) & 0xf);
+ else
+ return 0xff;
+
+ v &= 0xff;
+ if (((v >> 5) & 0x7) == 0x1)
+ dprintk("FE%d : MP001 : 9090/8096", fe->id);
+ else if (((v >> 5) & 0x7) == 0x4)
+ dprintk("FE%d : MP005 : Single Sband", fe->id);
+ else if (((v >> 5) & 0x7) == 0x6)
+ dprintk("FE%d : MP008 : diversity VHF-UHF-LBAND", fe->id);
+ else if (((v >> 5) & 0x7) == 0x7)
+ dprintk("FE%d : MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND", fe->id);
+ else
+ return 0xff;
+
+ /* revision only */
+ if ((v & 0x1f) == 0x3)
+ dprintk("FE%d : P1-D/E/F detected", fe->id);
+ else if ((v & 0x1f) == 0x1)
+ dprintk("FE%d : P1C detected", fe->id);
+ else if ((v & 0x1f) == 0x0) {
+#ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT
+ dprintk("FE%d : P1-A/B detected: using previous driver - support will be removed soon", fe->id);
+ dib0090_p1b_register(fe);
+#else
+ dprintk("FE%d : P1-A/B detected: driver is deactivated - not available", fe->id);
+ return 0xff;
+#endif
+ }
+
+ return v;
+}
+
+static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+
+ HARD_RESET(state);
+
+ dib0090_write_reg(state, 0x24, EN_PLL);
+ dib0090_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */
+
+ /* adcClkOutRatio=8->7, release reset */
+ dib0090_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (0 << 4) | 0);
+ if (cfg->clkoutdrive != 0)
+ dib0090_write_reg(state, 0x23,
+ (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 10) | (1 << 9) | (0 << 8) | (cfg->clkoutdrive << 5) | (cfg->
+ clkouttobamse
+ << 4) | (0
+ <<
+ 2)
+ | (0));
+ else
+ dib0090_write_reg(state, 0x23,
+ (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 10) | (1 << 9) | (0 << 8) | (7 << 5) | (cfg->
+ clkouttobamse << 4) | (0
+ <<
+ 2)
+ | (0));
+
+ /* enable pll, de-activate reset, ratio: 2/1 = 60MHz */
+ dib0090_write_reg(state, 0x21,
+ (cfg->io.pll_bypass << 15) | (1 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv));
+
+}
+
+static int dib0090_wakeup(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ if (state->config->sleep)
+ state->config->sleep(fe, 0);
+ return 0;
+}
+
+static int dib0090_sleep(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ if (state->config->sleep)
+ state->config->sleep(fe, 1);
+ return 0;
+}
+
+extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ if (fast)
+ dib0090_write_reg(state, 0x04, 0);
+ else
+ dib0090_write_reg(state, 0x04, 1);
+}
+EXPORT_SYMBOL(dib0090_dcc_freq);
+
+static const u16 rf_ramp_pwm_cband[] = {
+ 0, /* max RF gain in 10th of dB */
+ 0, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */
+ 0, /* ramp_max = maximum X used on the ramp */
+ (0 << 10) | 0, /* 0x2c, LNA 1 = 0dB */
+ (0 << 10) | 0, /* 0x2d, LNA 1 */
+ (0 << 10) | 0, /* 0x2e, LNA 2 = 0dB */
+ (0 << 10) | 0, /* 0x2f, LNA 2 */
+ (0 << 10) | 0, /* 0x30, LNA 3 = 0dB */
+ (0 << 10) | 0, /* 0x31, LNA 3 */
+ (0 << 10) | 0, /* GAIN_4_1, LNA 4 = 0dB */
+ (0 << 10) | 0, /* GAIN_4_2, LNA 4 */
+};
+
+static const u16 rf_ramp_vhf[] = {
+ 412, /* max RF gain in 10th of dB */
+ 132, 307, 127, /* LNA1, 13.2dB */
+ 105, 412, 255, /* LNA2, 10.5dB */
+ 50, 50, 127, /* LNA3, 5dB */
+ 125, 175, 127, /* LNA4, 12.5dB */
+ 0, 0, 127, /* CBAND, 0dB */
+};
+
+static const u16 rf_ramp_uhf[] = {
+ 412, /* max RF gain in 10th of dB */
+ 132, 307, 127, /* LNA1 : total gain = 13.2dB, point on the ramp where this amp is full gain, value to write to get full gain */
+ 105, 412, 255, /* LNA2 : 10.5 dB */
+ 50, 50, 127, /* LNA3 : 5.0 dB */
+ 125, 175, 127, /* LNA4 : 12.5 dB */
+ 0, 0, 127, /* CBAND : 0.0 dB */
+};
+
+static const u16 rf_ramp_cband[] = {
+ 332, /* max RF gain in 10th of dB */
+ 132, 252, 127, /* LNA1, dB */
+ 80, 332, 255, /* LNA2, dB */
+ 0, 0, 127, /* LNA3, dB */
+ 0, 0, 127, /* LNA4, dB */
+ 120, 120, 127, /* LT1 CBAND */
+};
+
+static const u16 rf_ramp_pwm_vhf[] = {
+ 404, /* max RF gain in 10th of dB */
+ 25, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */
+ 1011, /* ramp_max = maximum X used on the ramp */
+ (6 << 10) | 417, /* 0x2c, LNA 1 = 13.2dB */
+ (0 << 10) | 756, /* 0x2d, LNA 1 */
+ (16 << 10) | 756, /* 0x2e, LNA 2 = 10.5dB */
+ (0 << 10) | 1011, /* 0x2f, LNA 2 */
+ (16 << 10) | 290, /* 0x30, LNA 3 = 5dB */
+ (0 << 10) | 417, /* 0x31, LNA 3 */
+ (7 << 10) | 0, /* GAIN_4_1, LNA 4 = 12.5dB */
+ (0 << 10) | 290, /* GAIN_4_2, LNA 4 */
+};
+
+static const u16 rf_ramp_pwm_uhf[] = {
+ 404, /* max RF gain in 10th of dB */
+ 25, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x2b */
+ 1011, /* ramp_max = maximum X used on the ramp */
+ (6 << 10) | 417, /* 0x2c, LNA 1 = 13.2dB */
+ (0 << 10) | 756, /* 0x2d, LNA 1 */
+ (16 << 10) | 756, /* 0x2e, LNA 2 = 10.5dB */
+ (0 << 10) | 1011, /* 0x2f, LNA 2 */
+ (16 << 10) | 0, /* 0x30, LNA 3 = 5dB */
+ (0 << 10) | 127, /* 0x31, LNA 3 */
+ (7 << 10) | 127, /* GAIN_4_1, LNA 4 = 12.5dB */
+ (0 << 10) | 417, /* GAIN_4_2, LNA 4 */
+};
+
+static const u16 bb_ramp_boost[] = {
+ 550, /* max BB gain in 10th of dB */
+ 260, 260, 26, /* BB1, 26dB */
+ 290, 550, 29, /* BB2, 29dB */
+};
+
+static const u16 bb_ramp_pwm_normal[] = {
+ 500, /* max RF gain in 10th of dB */
+ 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> 0x34 */
+ 400,
+ (2 << 9) | 0, /* 0x35 = 21dB */
+ (0 << 9) | 168, /* 0x36 */
+ (2 << 9) | 168, /* 0x37 = 29dB */
+ (0 << 9) | 400, /* 0x38 */
+};
+
+struct slope {
+ int16_t range;
+ int16_t slope;
+};
+static u16 slopes_to_scale(const struct slope *slopes, u8 num, s16 val)
+{
+ u8 i;
+ u16 rest;
+ u16 ret = 0;
+ for (i = 0; i < num; i++) {
+ if (val > slopes[i].range)
+ rest = slopes[i].range;
+ else
+ rest = val;
+ ret += (rest * slopes[i].slope) / slopes[i].range;
+ val -= rest;
+ }
+ return ret;
+}
+
+static const struct slope dib0090_wbd_slopes[3] = {
+ {66, 120}, /* -64,-52: offset - 65 */
+ {600, 170}, /* -52,-35: 65 - 665 */
+ {170, 250}, /* -45,-10: 665 - 835 */
+};
+
+static s16 dib0090_wbd_to_db(struct dib0090_state *state, u16 wbd)
+{
+ wbd &= 0x3ff;
+ if (wbd < state->wbd_offset)
+ wbd = 0;
+ else
+ wbd -= state->wbd_offset;
+ /* -64dB is the floor */
+ return -640 + (s16) slopes_to_scale(dib0090_wbd_slopes, ARRAY_SIZE(dib0090_wbd_slopes), wbd);
+}
+
+static void dib0090_wbd_target(struct dib0090_state *state, u32 rf)
+{
+ u16 offset = 250;
+
+ /* TODO : DAB digital N+/-1 interferer perfs : offset = 10 */
+
+ if (state->current_band == BAND_VHF)
+ offset = 650;
+#ifndef FIRMWARE_FIREFLY
+ if (state->current_band == BAND_VHF)
+ offset = state->config->wbd_vhf_offset;
+ if (state->current_band == BAND_CBAND)
+ offset = state->config->wbd_cband_offset;
+#endif
+
+ state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + offset);
+ dprintk("wbd-target: %d dB", (u32) state->wbd_target);
+}
+
+static const int gain_reg_addr[4] = {
+ 0x08, 0x0a, 0x0f, 0x01
+};
+
+static void dib0090_gain_apply(struct dib0090_state *state, s16 gain_delta, s16 top_delta, u8 force)
+{
+ u16 rf, bb, ref;
+ u16 i, v, gain_reg[4] = { 0 }, gain;
+ const u16 *g;
+
+ if (top_delta < -511)
+ top_delta = -511;
+ if (top_delta > 511)
+ top_delta = 511;
+
+ if (force) {
+ top_delta *= (1 << WBD_ALPHA);
+ gain_delta *= (1 << GAIN_ALPHA);
+ }
+
+ if (top_delta >= ((s16) (state->rf_ramp[0] << WBD_ALPHA) - state->rf_gain_limit)) /* overflow */
+ state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA;
+ else
+ state->rf_gain_limit += top_delta;
+
+ if (state->rf_gain_limit < 0) /*underflow */
+ state->rf_gain_limit = 0;
+
+ /* use gain as a temporary variable and correct current_gain */
+ gain = ((state->rf_gain_limit >> WBD_ALPHA) + state->bb_ramp[0]) << GAIN_ALPHA;
+ if (gain_delta >= ((s16) gain - state->current_gain)) /* overflow */
+ state->current_gain = gain;
+ else
+ state->current_gain += gain_delta;
+ /* cannot be less than 0 (only if gain_delta is less than 0 we can have current_gain < 0) */
+ if (state->current_gain < 0)
+ state->current_gain = 0;
+
+ /* now split total gain to rf and bb gain */
+ gain = state->current_gain >> GAIN_ALPHA;
+
+ /* requested gain is bigger than rf gain limit - ACI/WBD adjustment */
+ if (gain > (state->rf_gain_limit >> WBD_ALPHA)) {
+ rf = state->rf_gain_limit >> WBD_ALPHA;
+ bb = gain - rf;
+ if (bb > state->bb_ramp[0])
+ bb = state->bb_ramp[0];
+ } else { /* high signal level -> all gains put on RF */
+ rf = gain;
+ bb = 0;
+ }
+
+ state->gain[0] = rf;
+ state->gain[1] = bb;
+
+ /* software ramp */
+ /* Start with RF gains */
+ g = state->rf_ramp + 1; /* point on RF LNA1 max gain */
+ ref = rf;
+ for (i = 0; i < 7; i++) { /* Go over all amplifiers => 5RF amps + 2 BB amps = 7 amps */
+ if (g[0] == 0 || ref < (g[1] - g[0])) /* if total gain of the current amp is null or this amp is not concerned because it starts to work from an higher gain value */
+ v = 0; /* force the gain to write for the current amp to be null */
+ else if (ref >= g[1]) /* Gain to set is higher than the high working point of this amp */
+ v = g[2]; /* force this amp to be full gain */
+ else /* compute the value to set to this amp because we are somewhere in his range */
+ v = ((ref - (g[1] - g[0])) * g[2]) / g[0];
+
+ if (i == 0) /* LNA 1 reg mapping */
+ gain_reg[0] = v;
+ else if (i == 1) /* LNA 2 reg mapping */
+ gain_reg[0] |= v << 7;
+ else if (i == 2) /* LNA 3 reg mapping */
+ gain_reg[1] = v;
+ else if (i == 3) /* LNA 4 reg mapping */
+ gain_reg[1] |= v << 7;
+ else if (i == 4) /* CBAND LNA reg mapping */
+ gain_reg[2] = v | state->rf_lt_def;
+ else if (i == 5) /* BB gain 1 reg mapping */
+ gain_reg[3] = v << 3;
+ else if (i == 6) /* BB gain 2 reg mapping */
+ gain_reg[3] |= v << 8;
+
+ g += 3; /* go to next gain bloc */
+
+ /* When RF is finished, start with BB */
+ if (i == 4) {
+ g = state->bb_ramp + 1; /* point on BB gain 1 max gain */
+ ref = bb;
+ }
+ }
+ gain_reg[3] |= state->bb_1_def;
+ gain_reg[3] |= ((bb % 10) * 100) / 125;
+
+#ifdef DEBUG_AGC
+ dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x", rf, bb, rf + bb,
+ gain_reg[0], gain_reg[1], gain_reg[2], gain_reg[3]);
+#endif
+
+ /* Write the amplifier regs */
+ for (i = 0; i < 4; i++) {
+ v = gain_reg[i];
+ if (force || state->gain_reg[i] != v) {
+ state->gain_reg[i] = v;
+ dib0090_write_reg(state, gain_reg_addr[i], v);
+ }
+ }
+}
+
+static void dib0090_set_boost(struct dib0090_state *state, int onoff)
+{
+ state->bb_1_def &= 0xdfff;
+ state->bb_1_def |= onoff << 13;
+}
+
+static void dib0090_set_rframp(struct dib0090_state *state, const u16 * cfg)
+{
+ state->rf_ramp = cfg;
+}
+
+static void dib0090_set_rframp_pwm(struct dib0090_state *state, const u16 * cfg)
+{
+ state->rf_ramp = cfg;
+
+ dib0090_write_reg(state, 0x2a, 0xffff);
+
+ dprintk("total RF gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x2a));
+
+ dib0090_write_regs(state, 0x2c, cfg + 3, 6);
+ dib0090_write_regs(state, 0x3e, cfg + 9, 2);
+}
+
+static void dib0090_set_bbramp(struct dib0090_state *state, const u16 * cfg)
+{
+ state->bb_ramp = cfg;
+ dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */
+}
+
+static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg)
+{
+ state->bb_ramp = cfg;
+
+ dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */
+
+ dib0090_write_reg(state, 0x33, 0xffff);
+ dprintk("total BB gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x33));
+ dib0090_write_regs(state, 0x35, cfg + 3, 4);
+}
+
+void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ /* reset the AGC */
+
+ if (state->config->use_pwm_agc) {
+#ifdef CONFIG_BAND_SBAND
+ if (state->current_band == BAND_SBAND) {
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_sband);
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_boost);
+ } else
+#endif
+#ifdef CONFIG_BAND_CBAND
+ if (state->current_band == BAND_CBAND) {
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_cband);
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ } else
+#endif
+#ifdef CONFIG_BAND_VHF
+ if (state->current_band == BAND_VHF) {
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_vhf);
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ } else
+#endif
+ {
+ dib0090_set_rframp_pwm(state, rf_ramp_pwm_uhf);
+ dib0090_set_bbramp_pwm(state, bb_ramp_pwm_normal);
+ }
+
+ if (state->rf_ramp[0] != 0)
+ dib0090_write_reg(state, 0x32, (3 << 11));
+ else
+ dib0090_write_reg(state, 0x32, (0 << 11));
+
+ dib0090_write_reg(state, 0x39, (1 << 10));
+ }
+}
+EXPORT_SYMBOL(dib0090_pwm_gain_reset);
+
+int dib0090_gain_control(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ enum frontend_tune_state *tune_state = &state->tune_state;
+ int ret = 10;
+
+ u16 wbd_val = 0;
+ u8 apply_gain_immediatly = 1;
+ s16 wbd_error = 0, adc_error = 0;
+
+ if (*tune_state == CT_AGC_START) {
+ state->agc_freeze = 0;
+ dib0090_write_reg(state, 0x04, 0x0);
+
+#ifdef CONFIG_BAND_SBAND
+ if (state->current_band == BAND_SBAND) {
+ dib0090_set_rframp(state, rf_ramp_sband);
+ dib0090_set_bbramp(state, bb_ramp_boost);
+ } else
+#endif
+#ifdef CONFIG_BAND_VHF
+ if (state->current_band == BAND_VHF) {
+ dib0090_set_rframp(state, rf_ramp_vhf);
+ dib0090_set_bbramp(state, bb_ramp_boost);
+ } else
+#endif
+#ifdef CONFIG_BAND_CBAND
+ if (state->current_band == BAND_CBAND) {
+ dib0090_set_rframp(state, rf_ramp_cband);
+ dib0090_set_bbramp(state, bb_ramp_boost);
+ } else
+#endif
+ {
+ dib0090_set_rframp(state, rf_ramp_uhf);
+ dib0090_set_bbramp(state, bb_ramp_boost);
+ }
+
+ dib0090_write_reg(state, 0x32, 0);
+ dib0090_write_reg(state, 0x39, 0);
+
+ dib0090_wbd_target(state, state->current_rf);
+
+ state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA;
+ state->current_gain = ((state->rf_ramp[0] + state->bb_ramp[0]) / 2) << GAIN_ALPHA;
+
+ *tune_state = CT_AGC_STEP_0;
+ } else if (!state->agc_freeze) {
+ s16 wbd;
+
+ int adc;
+ wbd_val = dib0090_read_reg(state, 0x1d);
+
+ /* read and calc the wbd power */
+ wbd = dib0090_wbd_to_db(state, wbd_val);
+ wbd_error = state->wbd_target - wbd;
+
+ if (*tune_state == CT_AGC_STEP_0) {
+ if (wbd_error < 0 && state->rf_gain_limit > 0) {
+#ifdef CONFIG_BAND_CBAND
+ /* in case of CBAND tune reduce first the lt_gain2 before adjusting the RF gain */
+ u8 ltg2 = (state->rf_lt_def >> 10) & 0x7;
+ if (state->current_band == BAND_CBAND && ltg2) {
+ ltg2 >>= 1;
+ state->rf_lt_def &= ltg2 << 10; /* reduce in 3 steps from 7 to 0 */
+ }
+#endif
+ } else {
+ state->agc_step = 0;
+ *tune_state = CT_AGC_STEP_1;
+ }
+ } else {
+ /* calc the adc power */
+ adc = state->config->get_adc_power(fe);
+ adc = (adc * ((s32) 355774) + (((s32) 1) << 20)) >> 21; /* included in [0:-700] */
+
+ adc_error = (s16) (((s32) ADC_TARGET) - adc);
+#ifdef CONFIG_STANDARD_DAB
+ if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB)
+ adc_error += 130;
+#endif
+#ifdef CONFIG_STANDARD_DVBT
+ if (state->fe->dtv_property_cache.delivery_system == STANDARD_DVBT &&
+ (state->fe->dtv_property_cache.modulation == QAM_64 || state->fe->dtv_property_cache.modulation == QAM_16))
+ adc_error += 60;
+#endif
+#ifdef CONFIG_SYS_ISDBT
+ if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) && (((state->fe->dtv_property_cache.layer[0].segment_count >
+ 0)
+ &&
+ ((state->fe->dtv_property_cache.layer[0].modulation ==
+ QAM_64)
+ || (state->fe->dtv_property_cache.layer[0].
+ modulation == QAM_16)))
+ ||
+ ((state->fe->dtv_property_cache.layer[1].segment_count >
+ 0)
+ &&
+ ((state->fe->dtv_property_cache.layer[1].modulation ==
+ QAM_64)
+ || (state->fe->dtv_property_cache.layer[1].
+ modulation == QAM_16)))
+ ||
+ ((state->fe->dtv_property_cache.layer[2].segment_count >
+ 0)
+ &&
+ ((state->fe->dtv_property_cache.layer[2].modulation ==
+ QAM_64)
+ || (state->fe->dtv_property_cache.layer[2].
+ modulation == QAM_16)))
+ )
+ )
+ adc_error += 60;
+#endif
+
+ if (*tune_state == CT_AGC_STEP_1) { /* quickly go to the correct range of the ADC power */
+ if (ABS(adc_error) < 50 || state->agc_step++ > 5) {
+
+#ifdef CONFIG_STANDARD_DAB
+ if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB) {
+ dib0090_write_reg(state, 0x02, (1 << 15) | (15 << 11) | (31 << 6) | (63)); /* cap value = 63 : narrow BB filter : Fc = 1.8MHz */
+ dib0090_write_reg(state, 0x04, 0x0);
+ } else
+#endif
+ {
+ dib0090_write_reg(state, 0x02, (1 << 15) | (3 << 11) | (6 << 6) | (32));
+ dib0090_write_reg(state, 0x04, 0x01); /*0 = 1KHz ; 1 = 150Hz ; 2 = 50Hz ; 3 = 50KHz ; 4 = servo fast */
+ }
+
+ *tune_state = CT_AGC_STOP;
+ }
+ } else {
+ /* everything higher than or equal to CT_AGC_STOP means tracking */
+ ret = 100; /* 10ms interval */
+ apply_gain_immediatly = 0;
+ }
+ }
+#ifdef DEBUG_AGC
+ dprintk
+ ("FE: %d, tune state %d, ADC = %3ddB (ADC err %3d) WBD %3ddB (WBD err %3d, WBD val SADC: %4d), RFGainLimit (TOP): %3d, signal: %3ddBm",
+ (u32) fe->id, (u32) *tune_state, (u32) adc, (u32) adc_error, (u32) wbd, (u32) wbd_error, (u32) wbd_val,
+ (u32) state->rf_gain_limit >> WBD_ALPHA, (s32) 200 + adc - (state->current_gain >> GAIN_ALPHA));
+#endif
+ }
+
+ /* apply gain */
+ if (!state->agc_freeze)
+ dib0090_gain_apply(state, adc_error, wbd_error, apply_gain_immediatly);
+ return ret;
+}
+EXPORT_SYMBOL(dib0090_gain_control);
+
+void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ if (rf)
+ *rf = state->gain[0];
+ if (bb)
+ *bb = state->gain[1];
+ if (rf_gain_limit)
+ *rf_gain_limit = state->rf_gain_limit;
+ if (rflt)
+ *rflt = (state->rf_lt_def >> 10) & 0x7;
+}
+EXPORT_SYMBOL(dib0090_get_current_gain);
+
+u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner)
+{
+ struct dib0090_state *st = tuner->tuner_priv;
+ return st->wbd_offset;
+}
+EXPORT_SYMBOL(dib0090_get_wbd_offset);
+
+static const u16 dib0090_defaults[] = {
+
+ 25, 0x01,
+ 0x0000,
+ 0x99a0,
+ 0x6008,
+ 0x0000,
+ 0x8acb,
+ 0x0000,
+ 0x0405,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0xb802,
+ 0x0300,
+ 0x2d12,
+ 0xbac0,
+ 0x7c00,
+ 0xdbb9,
+ 0x0954,
+ 0x0743,
+ 0x8000,
+ 0x0001,
+ 0x0040,
+ 0x0100,
+ 0x0000,
+ 0xe910,
+ 0x149e,
+
+ 1, 0x1c,
+ 0xff2d,
+
+ 1, 0x39,
+ 0x0000,
+
+ 1, 0x1b,
+ EN_IQADC | EN_BB | EN_BIAS | EN_DIGCLK | EN_PLL | EN_CRYSTAL,
+ 2, 0x1e,
+ 0x07FF,
+ 0x0007,
+
+ 1, 0x24,
+ EN_UHF | EN_CRYSTAL,
+
+ 2, 0x3c,
+ 0x3ff,
+ 0x111,
+ 0
+};
+
+static int dib0090_reset(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ u16 l, r, *n;
+
+ dib0090_reset_digital(fe, state->config);
+ state->revision = dib0090_identify(fe);
+
+ /* Revision definition */
+ if (state->revision == 0xff)
+ return -EINVAL;
+#ifdef EFUSE
+ else if ((state->revision & 0x1f) >= 3) /* Update the efuse : Only available for KROSUS > P1C */
+ dib0090_set_EFUSE(state);
+#endif
+
+#ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT
+ if (!(state->revision & 0x1)) /* it is P1B - reset is already done */
+ return 0;
+#endif
+
+ /* Upload the default values */
+ n = (u16 *) dib0090_defaults;
+ l = pgm_read_word(n++);
+ while (l) {
+ r = pgm_read_word(n++);
+ do {
+ /* DEBUG_TUNER */
+ /* dprintk("%d, %d, %d", l, r, pgm_read_word(n)); */
+ dib0090_write_reg(state, r, pgm_read_word(n++));
+ r++;
+ } while (--l);
+ l = pgm_read_word(n++);
+ }
+
+ /* Congigure in function of the crystal */
+ if (state->config->io.clock_khz >= 24000)
+ l = 1;
+ else
+ l = 2;
+ dib0090_write_reg(state, 0x14, l);
+ dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
+
+ state->reset = 3; /* enable iq-offset-calibration and wbd-calibration when tuning next time */
+
+ return 0;
+}
+
+#define steps(u) (((u) > 15) ? ((u)-16) : (u))
+#define INTERN_WAIT 10
+static int dib0090_get_offset(struct dib0090_state *state, enum frontend_tune_state *tune_state)
+{
+ int ret = INTERN_WAIT * 10;
+
+ switch (*tune_state) {
+ case CT_TUNER_STEP_2:
+ /* Turns to positive */
+ dib0090_write_reg(state, 0x1f, 0x7);
+ *tune_state = CT_TUNER_STEP_3;
+ break;
+
+ case CT_TUNER_STEP_3:
+ state->adc_diff = dib0090_read_reg(state, 0x1d);
+
+ /* Turns to negative */
+ dib0090_write_reg(state, 0x1f, 0x4);
+ *tune_state = CT_TUNER_STEP_4;
+ break;
+
+ case CT_TUNER_STEP_4:
+ state->adc_diff -= dib0090_read_reg(state, 0x1d);
+ *tune_state = CT_TUNER_STEP_5;
+ ret = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+struct dc_calibration {
+ uint8_t addr;
+ uint8_t offset;
+ uint8_t pga:1;
+ uint16_t bb1;
+ uint8_t i:1;
+};
+
+static const struct dc_calibration dc_table[] = {
+ /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */
+ {0x06, 5, 1, (1 << 13) | (0 << 8) | (26 << 3), 1},
+ {0x07, 11, 1, (1 << 13) | (0 << 8) | (26 << 3), 0},
+ /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */
+ {0x06, 0, 0, (1 << 13) | (29 << 8) | (26 << 3), 1},
+ {0x06, 10, 0, (1 << 13) | (29 << 8) | (26 << 3), 0},
+ {0},
+};
+
+static void dib0090_set_trim(struct dib0090_state *state)
+{
+ u16 *val;
+
+ if (state->dc->addr == 0x07)
+ val = &state->bb7;
+ else
+ val = &state->bb6;
+
+ *val &= ~(0x1f << state->dc->offset);
+ *val |= state->step << state->dc->offset;
+
+ dib0090_write_reg(state, state->dc->addr, *val);
+}
+
+static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state)
+{
+ int ret = 0;
+
+ switch (*tune_state) {
+
+ case CT_TUNER_START:
+ /* init */
+ dprintk("Internal DC calibration");
+
+ /* the LNA is off */
+ dib0090_write_reg(state, 0x24, 0x02ed);
+
+ /* force vcm2 = 0.8V */
+ state->bb6 = 0;
+ state->bb7 = 0x040d;
+
+ state->dc = dc_table;
+
+ *tune_state = CT_TUNER_STEP_0;
+
+ /* fall through */
+
+ case CT_TUNER_STEP_0:
+ dib0090_write_reg(state, 0x01, state->dc->bb1);
+ dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7));
+
+ state->step = 0;
+
+ state->min_adc_diff = 1023;
+
+ *tune_state = CT_TUNER_STEP_1;
+ ret = 50;
+ break;
+
+ case CT_TUNER_STEP_1:
+ dib0090_set_trim(state);
+
+ *tune_state = CT_TUNER_STEP_2;
+ break;
+
+ case CT_TUNER_STEP_2:
+ case CT_TUNER_STEP_3:
+ case CT_TUNER_STEP_4:
+ ret = dib0090_get_offset(state, tune_state);
+ break;
+
+ case CT_TUNER_STEP_5: /* found an offset */
+ dprintk("FE%d: IQC read=%d, current=%x", state->fe->id, (u32) state->adc_diff, state->step);
+
+ /* first turn for this frequency */
+ if (state->step == 0) {
+ if (state->dc->pga && state->adc_diff < 0)
+ state->step = 0x10;
+ if (state->dc->pga == 0 && state->adc_diff > 0)
+ state->step = 0x10;
+ }
+
+ state->adc_diff = ABS(state->adc_diff);
+
+ if (state->adc_diff < state->min_adc_diff && steps(state->step) < 15) { /* stop search when the delta to 0 is increasing */
+ state->step++;
+ state->min_adc_diff = state->adc_diff;
+ *tune_state = CT_TUNER_STEP_1;
+ } else {
+
+ /* the minimum was what we have seen in the step before */
+ state->step--;
+ dib0090_set_trim(state);
+
+ dprintk("FE%d: BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->fe->id, state->dc->addr, state->adc_diff,
+ state->step);
+
+ state->dc++;
+ if (state->dc->addr == 0) /* done */
+ *tune_state = CT_TUNER_STEP_6;
+ else
+ *tune_state = CT_TUNER_STEP_0;
+
+ }
+ break;
+
+ case CT_TUNER_STEP_6:
+ dib0090_write_reg(state, 0x07, state->bb7 & ~0x0008);
+ dib0090_write_reg(state, 0x1f, 0x7);
+ *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
+ state->reset &= ~0x1;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state)
+{
+ switch (*tune_state) {
+ case CT_TUNER_START:
+ /* WBD-mode=log, Bias=2, Gain=6, Testmode=1, en=1, WBDMUX=1 */
+ dib0090_write_reg(state, 0x10, 0xdb09 | (1 << 10));
+ dib0090_write_reg(state, 0x24, EN_UHF & 0x0fff);
+
+ *tune_state = CT_TUNER_STEP_0;
+ return 90; /* wait for the WBDMUX to switch and for the ADC to sample */
+ case CT_TUNER_STEP_0:
+ state->wbd_offset = dib0090_read_reg(state, 0x1d);
+ dprintk("WBD calibration offset = %d", state->wbd_offset);
+
+ *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */
+ state->reset &= ~0x2;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void dib0090_set_bandwidth(struct dib0090_state *state)
+{
+ u16 tmp;
+
+ if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 5000)
+ tmp = (3 << 14);
+ else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 6000)
+ tmp = (2 << 14);
+ else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 7000)
+ tmp = (1 << 14);
+ else
+ tmp = (0 << 14);
+
+ state->bb_1_def &= 0x3fff;
+ state->bb_1_def |= tmp;
+
+ dib0090_write_reg(state, 0x01, state->bb_1_def); /* be sure that we have the right bb-filter */
+}
+
+static const struct dib0090_pll dib0090_pll_table[] = {
+#ifdef CONFIG_BAND_CBAND
+ {56000, 0, 9, 48, 6},
+ {70000, 1, 9, 48, 6},
+ {87000, 0, 8, 32, 4},
+ {105000, 1, 8, 32, 4},
+ {115000, 0, 7, 24, 6},
+ {140000, 1, 7, 24, 6},
+ {170000, 0, 6, 16, 4},
+#endif
+#ifdef CONFIG_BAND_VHF
+ {200000, 1, 6, 16, 4},
+ {230000, 0, 5, 12, 6},
+ {280000, 1, 5, 12, 6},
+ {340000, 0, 4, 8, 4},
+ {380000, 1, 4, 8, 4},
+ {450000, 0, 3, 6, 6},
+#endif
+#ifdef CONFIG_BAND_UHF
+ {580000, 1, 3, 6, 6},
+ {700000, 0, 2, 4, 4},
+ {860000, 1, 2, 4, 4},
+#endif
+#ifdef CONFIG_BAND_LBAND
+ {1800000, 1, 0, 2, 4},
+#endif
+#ifdef CONFIG_BAND_SBAND
+ {2900000, 0, 14, 1, 4},
+#endif
+};
+
+static const struct dib0090_tuning dib0090_tuning_table_fm_vhf_on_cband[] = {
+
+#ifdef CONFIG_BAND_CBAND
+ {184000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB},
+ {227000, 4, 3, 15, 0x280, 0x2912, 0xb94e, EN_CAB},
+ {380000, 4, 7, 15, 0x280, 0x2912, 0xb94e, EN_CAB},
+#endif
+#ifdef CONFIG_BAND_UHF
+ {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+#endif
+#ifdef CONFIG_BAND_LBAND
+ {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+ {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+ {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+#endif
+#ifdef CONFIG_BAND_SBAND
+ {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD},
+ {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD},
+#endif
+};
+
+static const struct dib0090_tuning dib0090_tuning_table[] = {
+
+#ifdef CONFIG_BAND_CBAND
+ {170000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB},
+#endif
+#ifdef CONFIG_BAND_VHF
+ {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
+ {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
+ {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF},
+#endif
+#ifdef CONFIG_BAND_UHF
+ {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+ {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF},
+#endif
+#ifdef CONFIG_BAND_LBAND
+ {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+ {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+ {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD},
+#endif
+#ifdef CONFIG_BAND_SBAND
+ {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD},
+ {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD},
+#endif
+};
+
+#define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */
+static int dib0090_tune(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ const struct dib0090_tuning *tune = state->current_tune_table_index;
+ const struct dib0090_pll *pll = state->current_pll_table_index;
+ enum frontend_tune_state *tune_state = &state->tune_state;
+
+ u32 rf;
+ u16 lo4 = 0xe900, lo5, lo6, Den;
+ u32 FBDiv, Rest, FREF, VCOF_kHz = 0;
+ u16 tmp, adc;
+ int8_t step_sign;
+ int ret = 10; /* 1ms is the default delay most of the time */
+ u8 c, i;
+
+ state->current_band = (u8) BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000);
+ rf = fe->dtv_property_cache.frequency / 1000 + (state->current_band ==
+ BAND_UHF ? state->config->freq_offset_khz_uhf : state->config->freq_offset_khz_vhf);
+ /* in any case we first need to do a reset if needed */
+ if (state->reset & 0x1)
+ return dib0090_dc_offset_calibration(state, tune_state);
+ else if (state->reset & 0x2)
+ return dib0090_wbd_calibration(state, tune_state);
+
+ /************************* VCO ***************************/
+ /* Default values for FG */
+ /* from these are needed : */
+ /* Cp,HFdiv,VCOband,SD,Num,Den,FB and REFDiv */
+
+#ifdef CONFIG_SYS_ISDBT
+ if (state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1)
+ rf += 850;
+#endif
+
+ if (state->current_rf != rf) {
+ state->tuner_is_tuned = 0;
+
+ tune = dib0090_tuning_table;
+
+ tmp = (state->revision >> 5) & 0x7;
+ if (tmp == 0x4 || tmp == 0x7) {
+ /* CBAND tuner version for VHF */
+ if (state->current_band == BAND_FM || state->current_band == BAND_VHF) {
+ /* Force CBAND */
+ state->current_band = BAND_CBAND;
+ tune = dib0090_tuning_table_fm_vhf_on_cband;
+ }
+ }
+
+ pll = dib0090_pll_table;
+ /* Look for the interval */
+ while (rf > tune->max_freq)
+ tune++;
+ while (rf > pll->max_freq)
+ pll++;
+ state->current_tune_table_index = tune;
+ state->current_pll_table_index = pll;
+ }
+
+ if (*tune_state == CT_TUNER_START) {
+
+ if (state->tuner_is_tuned == 0)
+ state->current_rf = 0;
+
+ if (state->current_rf != rf) {
+
+ dib0090_write_reg(state, 0x0b, 0xb800 | (tune->switch_trim));
+
+ /* external loop filter, otherwise:
+ * lo5 = (0 << 15) | (0 << 12) | (0 << 11) | (3 << 9) | (4 << 6) | (3 << 4) | 4;
+ * lo6 = 0x0e34 */
+ if (pll->vco_band)
+ lo5 = 0x049e;
+ else if (state->config->analog_output)
+ lo5 = 0x041d;
+ else
+ lo5 = 0x041c;
+
+ lo5 |= (pll->hfdiv_code << 11) | (pll->vco_band << 7); /* bit 15 is the split to the slave, we do not do it here */
+
+ if (!state->config->io.pll_int_loop_filt)
+ lo6 = 0xff28;
+ else
+ lo6 = (state->config->io.pll_int_loop_filt << 3);
+
+ VCOF_kHz = (pll->hfdiv * rf) * 2;
+
+ FREF = state->config->io.clock_khz;
+
+ FBDiv = (VCOF_kHz / pll->topresc / FREF);
+ Rest = (VCOF_kHz / pll->topresc) - FBDiv * FREF;
+
+ if (Rest < LPF)
+ Rest = 0;
+ else if (Rest < 2 * LPF)
+ Rest = 2 * LPF;
+ else if (Rest > (FREF - LPF)) {
+ Rest = 0;
+ FBDiv += 1;
+ } else if (Rest > (FREF - 2 * LPF))
+ Rest = FREF - 2 * LPF;
+ Rest = (Rest * 6528) / (FREF / 10);
+
+ Den = 1;
+
+ dprintk(" ***** ******* Rest value = %d", Rest);
+
+ if (Rest > 0) {
+ if (state->config->analog_output)
+ lo6 |= (1 << 2) | 2;
+ else
+ lo6 |= (1 << 2) | 1;
+ Den = 255;
+ }
+#ifdef CONFIG_BAND_SBAND
+ if (state->current_band == BAND_SBAND)
+ lo6 &= 0xfffb;
+#endif
+
+ dib0090_write_reg(state, 0x15, (u16) FBDiv);
+
+ dib0090_write_reg(state, 0x16, (Den << 8) | 1);
+
+ dib0090_write_reg(state, 0x17, (u16) Rest);
+
+ dib0090_write_reg(state, 0x19, lo5);
+
+ dib0090_write_reg(state, 0x1c, lo6);
+
+ lo6 = tune->tuner_enable;
+ if (state->config->analog_output)
+ lo6 = (lo6 & 0xff9f) | 0x2;
+
+ dib0090_write_reg(state, 0x24, lo6 | EN_LO
+#ifdef CONFIG_DIB0090_USE_PWM_AGC
+ | state->config->use_pwm_agc * EN_CRYSTAL
+#endif
+ );
+
+ state->current_rf = rf;
+
+ /* prepare a complete captrim */
+ state->step = state->captrim = state->fcaptrim = 64;
+
+ } else { /* we are already tuned to this frequency - the configuration is correct */
+
+ /* do a minimal captrim even if the frequency has not changed */
+ state->step = 4;
+ state->captrim = state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7f;
+ }
+ state->adc_diff = 3000;
+
+ dib0090_write_reg(state, 0x10, 0x2B1);
+
+ dib0090_write_reg(state, 0x1e, 0x0032);
+
+ ret = 20;
+ *tune_state = CT_TUNER_STEP_1;
+ } else if (*tune_state == CT_TUNER_STEP_0) {
+ /* nothing */
+ } else if (*tune_state == CT_TUNER_STEP_1) {
+ state->step /= 2;
+ dib0090_write_reg(state, 0x18, lo4 | state->captrim);
+ *tune_state = CT_TUNER_STEP_2;
+ } else if (*tune_state == CT_TUNER_STEP_2) {
+
+ adc = dib0090_read_reg(state, 0x1d);
+ dprintk("FE %d CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) fe->id, (u32) state->captrim, (u32) adc,
+ (u32) (adc) * (u32) 1800 / (u32) 1024);
+
+ if (adc >= 400) {
+ adc -= 400;
+ step_sign = -1;
+ } else {
+ adc = 400 - adc;
+ step_sign = 1;
+ }
+
+ if (adc < state->adc_diff) {
+ dprintk("FE %d CAPTRIM=%d is closer to target (%d/%d)", (u32) fe->id, (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
+ state->adc_diff = adc;
+ state->fcaptrim = state->captrim;
+
+ }
+
+ state->captrim += step_sign * state->step;
+ if (state->step >= 1)
+ *tune_state = CT_TUNER_STEP_1;
+ else
+ *tune_state = CT_TUNER_STEP_3;
+
+ ret = 15;
+ } else if (*tune_state == CT_TUNER_STEP_3) {
+ /*write the final cptrim config */
+ dib0090_write_reg(state, 0x18, lo4 | state->fcaptrim);
+
+#ifdef CONFIG_TUNER_DIB0090_CAPTRIM_MEMORY
+ state->memory[state->memory_index].cap = state->fcaptrim;
+#endif
+
+ *tune_state = CT_TUNER_STEP_4;
+ } else if (*tune_state == CT_TUNER_STEP_4) {
+ dib0090_write_reg(state, 0x1e, 0x07ff);
+
+ dprintk("FE %d Final Captrim: %d", (u32) fe->id, (u32) state->fcaptrim);
+ dprintk("FE %d HFDIV code: %d", (u32) fe->id, (u32) pll->hfdiv_code);
+ dprintk("FE %d VCO = %d", (u32) fe->id, (u32) pll->vco_band);
+ dprintk("FE %d VCOF in kHz: %d ((%d*%d) << 1))", (u32) fe->id, (u32) ((pll->hfdiv * rf) * 2), (u32) pll->hfdiv, (u32) rf);
+ dprintk("FE %d REFDIV: %d, FREF: %d", (u32) fe->id, (u32) 1, (u32) state->config->io.clock_khz);
+ dprintk("FE %d FBDIV: %d, Rest: %d", (u32) fe->id, (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
+ dprintk("FE %d Num: %d, Den: %d, SD: %d", (u32) fe->id, (u32) dib0090_read_reg(state, 0x17),
+ (u32) (dib0090_read_reg(state, 0x16) >> 8), (u32) dib0090_read_reg(state, 0x1c) & 0x3);
+
+ c = 4;
+ i = 3;
+#if defined(CONFIG_BAND_LBAND) || defined(CONFIG_BAND_SBAND)
+ if ((state->current_band == BAND_LBAND) || (state->current_band == BAND_SBAND)) {
+ c = 2;
+ i = 2;
+ }
+#endif
+ dib0090_write_reg(state, 0x10, (c << 13) | (i << 11) | (WBD
+#ifdef CONFIG_DIB0090_USE_PWM_AGC
+ | (state->config->use_pwm_agc << 1)
+#endif
+ ));
+ dib0090_write_reg(state, 0x09, (tune->lna_tune << 5) | (tune->lna_bias << 0));
+ dib0090_write_reg(state, 0x0c, tune->v2i);
+ dib0090_write_reg(state, 0x0d, tune->mix);
+ dib0090_write_reg(state, 0x0e, tune->load);
+
+ *tune_state = CT_TUNER_STEP_5;
+ } else if (*tune_state == CT_TUNER_STEP_5) {
+
+ /* initialize the lt gain register */
+ state->rf_lt_def = 0x7c00;
+ dib0090_write_reg(state, 0x0f, state->rf_lt_def);
+
+ dib0090_set_bandwidth(state);
+ state->tuner_is_tuned = 1;
+ *tune_state = CT_TUNER_STOP;
+ } else
+ ret = FE_CALLBACK_TIME_NEVER;
+ return ret;
+}
+
+static int dib0090_release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+
+ return state->tune_state;
+}
+EXPORT_SYMBOL(dib0090_get_tune_state);
+
+int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+
+ state->tune_state = tune_state;
+ return 0;
+}
+EXPORT_SYMBOL(dib0090_set_tune_state);
+
+static int dib0090_get_frequency(struct dvb_frontend *fe, u32 * frequency)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+
+ *frequency = 1000 * state->current_rf;
+ return 0;
+}
+
+static int dib0090_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+{
+ struct dib0090_state *state = fe->tuner_priv;
+ uint32_t ret;
+
+ state->tune_state = CT_TUNER_START;
+
+ do {
+ ret = dib0090_tune(fe);
+ if (ret != FE_CALLBACK_TIME_NEVER)
+ msleep(ret / 10);
+ else
+ break;
+ } while (state->tune_state != CT_TUNER_STOP);
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops dib0090_ops = {
+ .info = {
+ .name = "DiBcom DiB0090",
+ .frequency_min = 45000000,
+ .frequency_max = 860000000,
+ .frequency_step = 1000,
+ },
+ .release = dib0090_release,
+
+ .init = dib0090_wakeup,
+ .sleep = dib0090_sleep,
+ .set_params = dib0090_set_params,
+ .get_frequency = dib0090_get_frequency,
+};
+
+struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config)
+{
+ struct dib0090_state *st = kzalloc(sizeof(struct dib0090_state), GFP_KERNEL);
+ if (st == NULL)
+ return NULL;
+
+ st->config = config;
+ st->i2c = i2c;
+ st->fe = fe;
+ fe->tuner_priv = st;
+
+ if (dib0090_reset(fe) != 0)
+ goto free_mem;
+
+ printk(KERN_INFO "DiB0090: successfully identified\n");
+ memcpy(&fe->ops.tuner_ops, &dib0090_ops, sizeof(struct dvb_tuner_ops));
+
+ return fe;
+ free_mem:
+ kfree(st);
+ fe->tuner_priv = NULL;
+ return NULL;
+}
+EXPORT_SYMBOL(dib0090_register);
+
+MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>");
+MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib0090.h b/drivers/media/dvb/frontends/dib0090.h
new file mode 100644
index 00000000000..aa7711e8877
--- /dev/null
+++ b/drivers/media/dvb/frontends/dib0090.h
@@ -0,0 +1,108 @@
+/*
+ * Linux-DVB Driver for DiBcom's DiB0090 base-band RF Tuner.
+ *
+ * Copyright (C) 2005-7 DiBcom (http://www.dibcom.fr/)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+#ifndef DIB0090_H
+#define DIB0090_H
+
+struct dvb_frontend;
+struct i2c_adapter;
+
+#define DEFAULT_DIB0090_I2C_ADDRESS 0x60
+
+struct dib0090_io_config {
+ u32 clock_khz;
+
+ u8 pll_bypass:1;
+ u8 pll_range:1;
+ u8 pll_prediv:6;
+ u8 pll_loopdiv:6;
+
+ u8 adc_clock_ratio; /* valid is 8, 7 ,6 */
+ u16 pll_int_loop_filt;
+};
+
+struct dib0090_config {
+ struct dib0090_io_config io;
+ int (*reset) (struct dvb_frontend *, int);
+ int (*sleep) (struct dvb_frontend *, int);
+
+ /* offset in kHz */
+ int freq_offset_khz_uhf;
+ int freq_offset_khz_vhf;
+
+ int (*get_adc_power) (struct dvb_frontend *);
+
+ u8 clkouttobamse:1; /* activate or deactivate clock output */
+ u8 analog_output;
+
+ u8 i2c_address;
+ /* add drives and other things if necessary */
+ u16 wbd_vhf_offset;
+ u16 wbd_cband_offset;
+ u8 use_pwm_agc;
+ u8 clkoutdrive;
+};
+
+#if defined(CONFIG_DVB_TUNER_DIB0090) || (defined(CONFIG_DVB_TUNER_DIB0090_MODULE) && defined(MODULE))
+extern struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config);
+extern void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast);
+extern void dib0090_pwm_gain_reset(struct dvb_frontend *fe);
+extern u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner);
+extern int dib0090_gain_control(struct dvb_frontend *fe);
+extern enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe);
+extern int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state);
+extern void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt);
+#else
+static inline struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct dib0090_config *config)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+static inline void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+}
+
+static inline void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+}
+
+static inline u16 dib0090_get_wbd_offset(struct dvb_frontend *tuner)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return 0;
+}
+
+static inline int dib0090_gain_control(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return CT_DONE;
+}
+
+static inline int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
index 898400d331a..6f6fa29d9ea 100644
--- a/drivers/media/dvb/frontends/dib8000.c
+++ b/drivers/media/dvb/frontends/dib8000.c
@@ -28,18 +28,6 @@ MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB8000: "); printk(args); printk("\n"); } } while (0)
-enum frontend_tune_state {
- CT_AGC_START = 20,
- CT_AGC_STEP_0,
- CT_AGC_STEP_1,
- CT_AGC_STEP_2,
- CT_AGC_STEP_3,
- CT_AGC_STEP_4,
- CT_AGC_STOP,
-
- CT_DEMOD_START = 30,
-};
-
#define FE_STATUS_TUNE_FAILED 0
struct i2c_device {
@@ -133,104 +121,104 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
return dib8000_i2c_write16(&state->i2c, reg, val);
}
-const int16_t coeff_2k_sb_1seg_dqpsk[8] = {
+static const int16_t coeff_2k_sb_1seg_dqpsk[8] = {
(769 << 5) | 0x0a, (745 << 5) | 0x03, (595 << 5) | 0x0d, (769 << 5) | 0x0a, (920 << 5) | 0x09, (784 << 5) | 0x02, (519 << 5) | 0x0c,
(920 << 5) | 0x09
};
-const int16_t coeff_2k_sb_1seg[8] = {
+static const int16_t coeff_2k_sb_1seg[8] = {
(692 << 5) | 0x0b, (683 << 5) | 0x01, (519 << 5) | 0x09, (692 << 5) | 0x0b, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f
};
-const int16_t coeff_2k_sb_3seg_0dqpsk_1dqpsk[8] = {
+static const int16_t coeff_2k_sb_3seg_0dqpsk_1dqpsk[8] = {
(832 << 5) | 0x10, (912 << 5) | 0x05, (900 << 5) | 0x12, (832 << 5) | 0x10, (-931 << 5) | 0x0f, (912 << 5) | 0x04, (807 << 5) | 0x11,
(-931 << 5) | 0x0f
};
-const int16_t coeff_2k_sb_3seg_0dqpsk[8] = {
+static const int16_t coeff_2k_sb_3seg_0dqpsk[8] = {
(622 << 5) | 0x0c, (941 << 5) | 0x04, (796 << 5) | 0x10, (622 << 5) | 0x0c, (982 << 5) | 0x0c, (519 << 5) | 0x02, (572 << 5) | 0x0e,
(982 << 5) | 0x0c
};
-const int16_t coeff_2k_sb_3seg_1dqpsk[8] = {
+static const int16_t coeff_2k_sb_3seg_1dqpsk[8] = {
(699 << 5) | 0x14, (607 << 5) | 0x04, (944 << 5) | 0x13, (699 << 5) | 0x14, (-720 << 5) | 0x0d, (640 << 5) | 0x03, (866 << 5) | 0x12,
(-720 << 5) | 0x0d
};
-const int16_t coeff_2k_sb_3seg[8] = {
+static const int16_t coeff_2k_sb_3seg[8] = {
(664 << 5) | 0x0c, (925 << 5) | 0x03, (937 << 5) | 0x10, (664 << 5) | 0x0c, (-610 << 5) | 0x0a, (697 << 5) | 0x01, (836 << 5) | 0x0e,
(-610 << 5) | 0x0a
};
-const int16_t coeff_4k_sb_1seg_dqpsk[8] = {
+static const int16_t coeff_4k_sb_1seg_dqpsk[8] = {
(-955 << 5) | 0x0e, (687 << 5) | 0x04, (818 << 5) | 0x10, (-955 << 5) | 0x0e, (-922 << 5) | 0x0d, (750 << 5) | 0x03, (665 << 5) | 0x0f,
(-922 << 5) | 0x0d
};
-const int16_t coeff_4k_sb_1seg[8] = {
+static const int16_t coeff_4k_sb_1seg[8] = {
(638 << 5) | 0x0d, (683 << 5) | 0x02, (638 << 5) | 0x0d, (638 << 5) | 0x0d, (-655 << 5) | 0x0a, (517 << 5) | 0x00, (698 << 5) | 0x0d,
(-655 << 5) | 0x0a
};
-const int16_t coeff_4k_sb_3seg_0dqpsk_1dqpsk[8] = {
+static const int16_t coeff_4k_sb_3seg_0dqpsk_1dqpsk[8] = {
(-707 << 5) | 0x14, (910 << 5) | 0x06, (889 << 5) | 0x16, (-707 << 5) | 0x14, (-958 << 5) | 0x13, (993 << 5) | 0x05, (523 << 5) | 0x14,
(-958 << 5) | 0x13
};
-const int16_t coeff_4k_sb_3seg_0dqpsk[8] = {
+static const int16_t coeff_4k_sb_3seg_0dqpsk[8] = {
(-723 << 5) | 0x13, (910 << 5) | 0x05, (777 << 5) | 0x14, (-723 << 5) | 0x13, (-568 << 5) | 0x0f, (547 << 5) | 0x03, (696 << 5) | 0x12,
(-568 << 5) | 0x0f
};
-const int16_t coeff_4k_sb_3seg_1dqpsk[8] = {
+static const int16_t coeff_4k_sb_3seg_1dqpsk[8] = {
(-940 << 5) | 0x15, (607 << 5) | 0x05, (915 << 5) | 0x16, (-940 << 5) | 0x15, (-848 << 5) | 0x13, (683 << 5) | 0x04, (543 << 5) | 0x14,
(-848 << 5) | 0x13
};
-const int16_t coeff_4k_sb_3seg[8] = {
+static const int16_t coeff_4k_sb_3seg[8] = {
(612 << 5) | 0x12, (910 << 5) | 0x04, (864 << 5) | 0x14, (612 << 5) | 0x12, (-869 << 5) | 0x13, (683 << 5) | 0x02, (869 << 5) | 0x12,
(-869 << 5) | 0x13
};
-const int16_t coeff_8k_sb_1seg_dqpsk[8] = {
+static const int16_t coeff_8k_sb_1seg_dqpsk[8] = {
(-835 << 5) | 0x12, (684 << 5) | 0x05, (735 << 5) | 0x14, (-835 << 5) | 0x12, (-598 << 5) | 0x10, (781 << 5) | 0x04, (739 << 5) | 0x13,
(-598 << 5) | 0x10
};
-const int16_t coeff_8k_sb_1seg[8] = {
+static const int16_t coeff_8k_sb_1seg[8] = {
(673 << 5) | 0x0f, (683 << 5) | 0x03, (808 << 5) | 0x12, (673 << 5) | 0x0f, (585 << 5) | 0x0f, (512 << 5) | 0x01, (780 << 5) | 0x0f,
(585 << 5) | 0x0f
};
-const int16_t coeff_8k_sb_3seg_0dqpsk_1dqpsk[8] = {
+static const int16_t coeff_8k_sb_3seg_0dqpsk_1dqpsk[8] = {
(863 << 5) | 0x17, (930 << 5) | 0x07, (878 << 5) | 0x19, (863 << 5) | 0x17, (0 << 5) | 0x14, (521 << 5) | 0x05, (980 << 5) | 0x18,
(0 << 5) | 0x14
};
-const int16_t coeff_8k_sb_3seg_0dqpsk[8] = {
+static const int16_t coeff_8k_sb_3seg_0dqpsk[8] = {
(-924 << 5) | 0x17, (910 << 5) | 0x06, (774 << 5) | 0x17, (-924 << 5) | 0x17, (-877 << 5) | 0x15, (565 << 5) | 0x04, (553 << 5) | 0x15,
(-877 << 5) | 0x15
};
-const int16_t coeff_8k_sb_3seg_1dqpsk[8] = {
+static const int16_t coeff_8k_sb_3seg_1dqpsk[8] = {
(-921 << 5) | 0x19, (607 << 5) | 0x06, (881 << 5) | 0x19, (-921 << 5) | 0x19, (-921 << 5) | 0x14, (713 << 5) | 0x05, (1018 << 5) | 0x18,
(-921 << 5) | 0x14
};
-const int16_t coeff_8k_sb_3seg[8] = {
+static const int16_t coeff_8k_sb_3seg[8] = {
(514 << 5) | 0x14, (910 << 5) | 0x05, (861 << 5) | 0x17, (514 << 5) | 0x14, (690 << 5) | 0x14, (683 << 5) | 0x03, (662 << 5) | 0x15,
(690 << 5) | 0x14
};
-const int16_t ana_fe_coeff_3seg[24] = {
+static const int16_t ana_fe_coeff_3seg[24] = {
81, 80, 78, 74, 68, 61, 54, 45, 37, 28, 19, 11, 4, 1022, 1017, 1013, 1010, 1008, 1008, 1008, 1008, 1010, 1014, 1017
};
-const int16_t ana_fe_coeff_1seg[24] = {
+static const int16_t ana_fe_coeff_1seg[24] = {
249, 226, 164, 82, 5, 981, 970, 988, 1018, 20, 31, 26, 8, 1012, 1000, 1018, 1012, 8, 15, 14, 9, 3, 1017, 1003
};
-const int16_t ana_fe_coeff_13seg[24] = {
+static const int16_t ana_fe_coeff_13seg[24] = {
396, 305, 105, -51, -77, -12, 41, 31, -11, -30, -11, 14, 15, -2, -13, -7, 5, 8, 1, -6, -7, -3, 0, 1
};
@@ -852,6 +840,14 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
return 0;
}
+void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ dib8000_set_adc_state(state, DIBX000_ADC_ON);
+ dib8000_set_agc_config(state, (unsigned char)(BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000)));
+}
+EXPORT_SYMBOL(dib8000_pwm_agc_reset);
+
static int dib8000_agc_soft_split(struct dib8000_state *state)
{
u16 agc, split_offset;
@@ -939,6 +935,32 @@ static int dib8000_agc_startup(struct dvb_frontend *fe)
}
+static const int32_t lut_1000ln_mant[] =
+{
+ 908, 7003, 7090, 7170, 7244, 7313, 7377, 7438, 7495, 7549, 7600
+};
+
+int32_t dib8000_get_adc_power(struct dvb_frontend *fe, uint8_t mode)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ uint32_t ix = 0, tmp_val = 0, exp = 0, mant = 0;
+ int32_t val;
+
+ val = dib8000_read32(state, 384);
+ /* mode = 1 : ln_agcpower calc using mant-exp conversion and mantis look up table */
+ if (mode) {
+ tmp_val = val;
+ while (tmp_val >>= 1)
+ exp++;
+ mant = (val * 1000 / (1<<exp));
+ ix = (uint8_t)((mant-1000)/100); /* index of the LUT */
+ val = (lut_1000ln_mant[ix] + 693*(exp-20) - 6908); /* 1000 * ln(adcpower_real) ; 693 = 1000ln(2) ; 6908 = 1000*ln(1000) ; 20 comes from adc_real = adc_pow_int / 2**20 */
+ val = (val*256)/1000;
+ }
+ return val;
+}
+EXPORT_SYMBOL(dib8000_get_adc_power);
+
static void dib8000_update_timf(struct dib8000_state *state)
{
u32 timf = state->timf = dib8000_read32(state, 435);
@@ -1401,10 +1423,9 @@ static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosear
}
break;
}
- }
- if (state->fe.dtv_property_cache.isdbt_sb_mode == 1)
for (i = 0; i < 8; i++)
dib8000_write_word(state, 343 + i, ncoeff[i]);
+ }
// P_small_coef_ext_enable=ISDB-Tsb, P_small_narrow_band=ISDB-Tsb, P_small_last_seg=13, P_small_offset_num_car=5
dib8000_write_word(state, 351,
@@ -1854,6 +1875,24 @@ static int dib8000_sleep(struct dvb_frontend *fe)
}
}
+enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ return state->tune_state;
+}
+EXPORT_SYMBOL(dib8000_get_tune_state);
+
+int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
+{
+ struct dib8000_state *state = fe->demodulator_priv;
+ state->tune_state = tune_state;
+ return 0;
+}
+EXPORT_SYMBOL(dib8000_set_tune_state);
+
+
+
+
static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib8000_state *state = fe->demodulator_priv;
@@ -2043,29 +2082,31 @@ static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
*stat = 0;
- if ((lock >> 14) & 1) // AGC
+ if ((lock >> 13) & 1)
*stat |= FE_HAS_SIGNAL;
- if ((lock >> 8) & 1) // Equal
+ if ((lock >> 8) & 1) /* Equal */
*stat |= FE_HAS_CARRIER;
- if ((lock >> 3) & 1) // TMCC_SYNC
+ if (((lock >> 1) & 0xf) == 0xf) /* TMCC_SYNC */
*stat |= FE_HAS_SYNC;
- if ((lock >> 5) & 7) // FEC MPEG
+ if (((lock >> 12) & 1) && ((lock >> 5) & 7)) /* FEC MPEG */
*stat |= FE_HAS_LOCK;
- lock = dib8000_read_word(state, 554); // Viterbi Layer A
- if (lock & 0x01)
- *stat |= FE_HAS_VITERBI;
+ if ((lock >> 12) & 1) {
+ lock = dib8000_read_word(state, 554); /* Viterbi Layer A */
+ if (lock & 0x01)
+ *stat |= FE_HAS_VITERBI;
- lock = dib8000_read_word(state, 555); // Viterbi Layer B
- if (lock & 0x01)
- *stat |= FE_HAS_VITERBI;
+ lock = dib8000_read_word(state, 555); /* Viterbi Layer B */
+ if (lock & 0x01)
+ *stat |= FE_HAS_VITERBI;
- lock = dib8000_read_word(state, 556); // Viterbi Layer C
- if (lock & 0x01)
- *stat |= FE_HAS_VITERBI;
+ lock = dib8000_read_word(state, 556); /* Viterbi Layer C */
+ if (lock & 0x01)
+ *stat |= FE_HAS_VITERBI;
+ }
return 0;
}
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index 8c89482b738..d99619ae983 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -46,6 +46,10 @@ extern int dib8000_set_gpio(struct dvb_frontend *, u8 num, u8 dir, u8 val);
extern int dib8000_set_wbd_ref(struct dvb_frontend *, u16 value);
extern int dib8000_pid_filter_ctrl(struct dvb_frontend *, u8 onoff);
extern int dib8000_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
+extern int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state);
+extern enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe);
+extern void dib8000_pwm_agc_reset(struct dvb_frontend *fe);
+extern s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode);
#else
static inline struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg)
{
@@ -59,35 +63,53 @@ static inline struct i2c_adapter *dib8000_get_i2c_master(struct dvb_frontend *fe
return NULL;
}
-int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
+static inline int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
}
-int dib8000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
+static inline int dib8000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
}
-int dib8000_set_wbd_ref(struct dvb_frontend *fe, u16 value)
+static inline int dib8000_set_wbd_ref(struct dvb_frontend *fe, u16 value)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
}
-int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
+static inline int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
}
-int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
+static inline int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return -ENODEV;
}
+static inline int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+static inline enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return CT_SHUTDOWN,
+}
+static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+}
+static inline s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+}
#endif
#endif
diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
index 4efca30d212..e6f3d73db9d 100644
--- a/drivers/media/dvb/frontends/dibx000_common.c
+++ b/drivers/media/dvb/frontends/dibx000_common.c
@@ -6,7 +6,7 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); } } while (0)
+#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0)
static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
{
@@ -25,7 +25,7 @@ static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst,
enum dibx000_i2c_interface intf)
{
if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) {
- dprintk("selecting interface: %d\n", intf);
+ dprintk("selecting interface: %d", intf);
mst->selected_interface = intf;
return dibx000_write_word(mst, mst->base_reg + 4, intf);
}
@@ -171,9 +171,18 @@ void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst)
{
i2c_del_adapter(&mst->gated_tuner_i2c_adap);
}
-
EXPORT_SYMBOL(dibx000_exit_i2c_master);
+
+u32 systime()
+{
+ struct timespec t;
+
+ t = current_kernel_time();
+ return (t.tv_sec * 10000) + (t.tv_nsec / 100000);
+}
+EXPORT_SYMBOL(systime);
+
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dibx000_common.h b/drivers/media/dvb/frontends/dibx000_common.h
index 5be10eca07c..4f5d141a308 100644
--- a/drivers/media/dvb/frontends/dibx000_common.h
+++ b/drivers/media/dvb/frontends/dibx000_common.h
@@ -36,13 +36,17 @@ extern struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master
extern void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst);
extern void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst);
+extern u32 systime(void);
+
#define BAND_LBAND 0x01
#define BAND_UHF 0x02
#define BAND_VHF 0x04
#define BAND_SBAND 0x08
-#define BAND_FM 0x10
+#define BAND_FM 0x10
+#define BAND_CBAND 0x20
-#define BAND_OF_FREQUENCY(freq_kHz) ( (freq_kHz) <= 115000 ? BAND_FM : \
+#define BAND_OF_FREQUENCY(freq_kHz) ((freq_kHz) <= 170000 ? BAND_CBAND : \
+ (freq_kHz) <= 115000 ? BAND_FM : \
(freq_kHz) <= 250000 ? BAND_VHF : \
(freq_kHz) <= 863000 ? BAND_UHF : \
(freq_kHz) <= 2000000 ? BAND_LBAND : BAND_SBAND )
@@ -149,4 +153,67 @@ enum dibx000_adc_states {
#define OUTMODE_MPEG2_FIFO 5
#define OUTMODE_ANALOG_ADC 6
+enum frontend_tune_state {
+ CT_TUNER_START = 10,
+ CT_TUNER_STEP_0,
+ CT_TUNER_STEP_1,
+ CT_TUNER_STEP_2,
+ CT_TUNER_STEP_3,
+ CT_TUNER_STEP_4,
+ CT_TUNER_STEP_5,
+ CT_TUNER_STEP_6,
+ CT_TUNER_STEP_7,
+ CT_TUNER_STOP,
+
+ CT_AGC_START = 20,
+ CT_AGC_STEP_0,
+ CT_AGC_STEP_1,
+ CT_AGC_STEP_2,
+ CT_AGC_STEP_3,
+ CT_AGC_STEP_4,
+ CT_AGC_STOP,
+
+ CT_DEMOD_START = 30,
+ CT_DEMOD_STEP_1,
+ CT_DEMOD_STEP_2,
+ CT_DEMOD_STEP_3,
+ CT_DEMOD_STEP_4,
+ CT_DEMOD_STEP_5,
+ CT_DEMOD_STEP_6,
+ CT_DEMOD_STEP_7,
+ CT_DEMOD_STEP_8,
+ CT_DEMOD_STEP_9,
+ CT_DEMOD_STEP_10,
+ CT_DEMOD_SEARCH_NEXT = 41,
+ CT_DEMOD_STEP_LOCKED,
+ CT_DEMOD_STOP,
+
+ CT_DONE = 100,
+ CT_SHUTDOWN,
+
+};
+
+struct dvb_frontend_parametersContext {
+#define CHANNEL_STATUS_PARAMETERS_UNKNOWN 0x01
+#define CHANNEL_STATUS_PARAMETERS_SET 0x02
+ u8 status;
+ u32 tune_time_estimation[2];
+ s32 tps_available;
+ u16 tps[9];
+};
+
+#define FE_STATUS_TUNE_FAILED 0
+#define FE_STATUS_TUNE_TIMED_OUT -1
+#define FE_STATUS_TUNE_TIME_TOO_SHORT -2
+#define FE_STATUS_TUNE_PENDING -3
+#define FE_STATUS_STD_SUCCESS -4
+#define FE_STATUS_FFT_SUCCESS -5
+#define FE_STATUS_DEMOD_SUCCESS -6
+#define FE_STATUS_LOCKED -7
+#define FE_STATUS_DATA_LOCKED -8
+
+#define FE_CALLBACK_TIME_NEVER 0xffffffff
+
+#define ABS(x) ((x < 0) ? (-x) : (x))
+
#endif
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index eabcadc425d..dee53960e7e 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -199,7 +199,7 @@ static int lgs8gxx_set_if_freq(struct lgs8gxx_state *priv, u32 freq /*in kHz*/)
val = freq;
if (freq != 0) {
- val *= (u64)1 << 32;
+ val <<= 32;
if (if_clk != 0)
do_div(val, if_clk);
v32 = val & 0xFFFFFFFF;
@@ -246,7 +246,7 @@ static int lgs8gxx_get_afc_phase(struct lgs8gxx_state *priv)
val = v32;
val *= priv->config->if_clk_freq;
- val /= (u64)1 << 32;
+ val >>= 32;
dprintk("AFC = %u kHz\n", (u32)val);
return 0;
}
diff --git a/drivers/media/dvb/frontends/lnbp21.c b/drivers/media/dvb/frontends/lnbp21.c
index 71f607fe8fc..b181bf023ad 100644
--- a/drivers/media/dvb/frontends/lnbp21.c
+++ b/drivers/media/dvb/frontends/lnbp21.c
@@ -1,7 +1,7 @@
/*
* lnbp21.c - driver for lnb supply and control ic lnbp21
*
- * Copyright (C) 2006 Oliver Endriss
+ * Copyright (C) 2006, 2009 Oliver Endriss <o.endriss@gmx.de>
* Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
*
* This program is free software; you can redistribute it and/or
@@ -91,6 +91,31 @@ static int lnbp21_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg)
return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO;
}
+static int lnbp21_set_tone(struct dvb_frontend *fe,
+ fe_sec_tone_mode_t tone)
+{
+ struct lnbp21 *lnbp21 = (struct lnbp21 *) fe->sec_priv;
+ struct i2c_msg msg = { .addr = lnbp21->i2c_addr, .flags = 0,
+ .buf = &lnbp21->config,
+ .len = sizeof(lnbp21->config) };
+
+ switch (tone) {
+ case SEC_TONE_OFF:
+ lnbp21->config &= ~LNBP21_TEN;
+ break;
+ case SEC_TONE_ON:
+ lnbp21->config |= LNBP21_TEN;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ lnbp21->config |= lnbp21->override_or;
+ lnbp21->config &= lnbp21->override_and;
+
+ return (i2c_transfer(lnbp21->i2c, &msg, 1) == 1) ? 0 : -EIO;
+}
+
static void lnbp21_release(struct dvb_frontend *fe)
{
/* LNBP power off */
@@ -133,6 +158,7 @@ static struct dvb_frontend *lnbx2x_attach(struct dvb_frontend *fe,
/* override frontend ops */
fe->ops.set_voltage = lnbp21_set_voltage;
fe->ops.enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage;
+ fe->ops.set_tone = lnbp21_set_tone;
printk(KERN_INFO "LNBx2x attached on addr=%x\n", lnbp21->i2c_addr);
return fe;
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index df49ea0983b..8762c86044a 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -1451,6 +1451,8 @@ static int stv0900_status(struct stv0900_internal *intp,
{
enum fe_stv0900_search_state demod_state;
int locked = FALSE;
+ u8 tsbitrate0_val, tsbitrate1_val;
+ s32 bitrate;
demod_state = stv0900_get_bits(intp, HEADER_MODE);
switch (demod_state) {
@@ -1473,6 +1475,17 @@ static int stv0900_status(struct stv0900_internal *intp,
dprintk("%s: locked = %d\n", __func__, locked);
+ if (stvdebug) {
+ /* Print TS bitrate */
+ tsbitrate0_val = stv0900_read_reg(intp, TSBITRATE0);
+ tsbitrate1_val = stv0900_read_reg(intp, TSBITRATE1);
+ /* Formula Bit rate = Mclk * px_tsfifo_bitrate / 16384 */
+ bitrate = (stv0900_get_mclk_freq(intp, intp->quartz)/1000000)
+ * (tsbitrate1_val << 8 | tsbitrate0_val);
+ bitrate /= 16384;
+ dprintk("TS bitrate = %d Mbit/sec \n", bitrate);
+ };
+
return locked;
}
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 48edd542242..1573466a5c7 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -3597,7 +3597,8 @@ static int stv090x_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_ma
reg = STV090x_READ_DEMOD(state, DISTXCTL);
- STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 2);
+ STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD,
+ (state->config->diseqc_envelope_mode) ? 4 : 2);
STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
goto err;
@@ -3649,10 +3650,10 @@ static int stv090x_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t
reg = STV090x_READ_DEMOD(state, DISTXCTL);
if (burst == SEC_MINI_A) {
- mode = 3;
+ mode = (state->config->diseqc_envelope_mode) ? 5 : 3;
value = 0x00;
} else {
- mode = 2;
+ mode = (state->config->diseqc_envelope_mode) ? 4 : 2;
value = 0xFF;
}
diff --git a/drivers/media/dvb/frontends/stv090x.h b/drivers/media/dvb/frontends/stv090x.h
index e968c98bb70..b133807663e 100644
--- a/drivers/media/dvb/frontends/stv090x.h
+++ b/drivers/media/dvb/frontends/stv090x.h
@@ -75,6 +75,8 @@ struct stv090x_config {
enum stv090x_i2crpt repeater_level;
+ bool diseqc_envelope_mode;
+
int (*tuner_init) (struct dvb_frontend *fe);
int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index 266033ae278..68bf9fbd8fe 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -662,7 +662,7 @@ adapter_error:
return rc;
}
-int smsdvb_module_init(void)
+static int __init smsdvb_module_init(void)
{
int rc;
@@ -676,7 +676,7 @@ int smsdvb_module_init(void)
return rc;
}
-void smsdvb_module_exit(void)
+static void __exit smsdvb_module_exit(void)
{
smscore_unregister_hotplug(smsdvb_hotplug);
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
index 24206cbda26..195244a3e69 100644
--- a/drivers/media/dvb/siano/smssdio.c
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -48,7 +48,7 @@
#define SMSSDIO_INT 0x04
#define SMSSDIO_BLOCK_SIZE 128
-static const struct sdio_device_id smssdio_ids[] = {
+static const struct sdio_device_id smssdio_ids[] __devinitconst = {
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
.driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
{SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
@@ -222,7 +222,7 @@ static void smssdio_interrupt(struct sdio_func *func)
smscore_onresponse(smsdev->coredev, cb);
}
-static int smssdio_probe(struct sdio_func *func,
+static int __devinit smssdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int ret;
@@ -338,7 +338,7 @@ static struct sdio_driver smssdio_driver = {
/* Module functions */
/*******************************************************************/
-int smssdio_module_init(void)
+static int __init smssdio_module_init(void)
{
int ret = 0;
@@ -350,7 +350,7 @@ int smssdio_module_init(void)
return ret;
}
-void smssdio_module_exit(void)
+static void __exit smssdio_module_exit(void)
{
sdio_unregister_driver(&smssdio_driver);
}
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 8f88a586b0d..5eac27287d9 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -390,7 +390,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
return rc;
}
-static int smsusb_probe(struct usb_interface *intf,
+static int __devinit smsusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
@@ -484,7 +484,7 @@ static int smsusb_resume(struct usb_interface *intf)
return 0;
}
-struct usb_device_id smsusb_id_table[] = {
+static const struct usb_device_id smsusb_id_table[] __devinitconst = {
{ USB_DEVICE(0x187f, 0x0010),
.driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
{ USB_DEVICE(0x187f, 0x0100),
@@ -533,8 +533,18 @@ struct usb_device_id smsusb_id_table[] = {
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xb910),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xb980),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xb990),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xc000),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xc010),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xc080),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xc090),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ } /* Terminating entry */
};
@@ -550,7 +560,7 @@ static struct usb_driver smsusb_driver = {
.resume = smsusb_resume,
};
-int smsusb_module_init(void)
+static int __init smsusb_module_init(void)
{
int rc = usb_register(&smsusb_driver);
if (rc)
@@ -561,7 +571,7 @@ int smsusb_module_init(void)
return rc;
}
-void smsusb_module_exit(void)
+static void __exit smsusb_module_exit(void)
{
/* Regular USB Cleanup */
usb_deregister(&smsusb_driver);
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 7d193ebc0ae..9782e059373 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -190,12 +190,13 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
struct saa7146_dev *saa = budget_ci->budget.dev;
struct input_dev *input_dev = budget_ci->ir.dev;
int error;
+ struct ir_scancode_table *ir_codes;
+
budget_ci->ir.dev = input_dev = input_allocate_device();
if (!input_dev) {
printk(KERN_ERR "budget_ci: IR interface initialisation failed\n");
- error = -ENOMEM;
- goto out1;
+ return -ENOMEM;
}
snprintf(budget_ci->ir.name, sizeof(budget_ci->ir.name),
@@ -217,6 +218,11 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
}
input_dev->dev.parent = &saa->pci->dev;
+ if (rc5_device < 0)
+ budget_ci->ir.rc5_device = IR_DEVICE_ANY;
+ else
+ budget_ci->ir.rc5_device = rc5_device;
+
/* Select keymap and address */
switch (budget_ci->budget.dev->pci->subsystem_device) {
case 0x100c:
@@ -224,53 +230,34 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
case 0x1011:
case 0x1012:
/* The hauppauge keymap is a superset of these remotes */
- error = ir_input_init(input_dev, &budget_ci->ir.state,
- IR_TYPE_RC5, &ir_codes_hauppauge_new_table);
- if (error < 0)
- goto out2;
+ ir_codes = &ir_codes_hauppauge_new_table;
if (rc5_device < 0)
budget_ci->ir.rc5_device = 0x1f;
- else
- budget_ci->ir.rc5_device = rc5_device;
break;
case 0x1010:
case 0x1017:
case 0x101a:
/* for the Technotrend 1500 bundled remote */
- error = ir_input_init(input_dev, &budget_ci->ir.state,
- IR_TYPE_RC5, &ir_codes_tt_1500_table);
- if (error < 0)
- goto out2;
-
- if (rc5_device < 0)
- budget_ci->ir.rc5_device = IR_DEVICE_ANY;
- else
- budget_ci->ir.rc5_device = rc5_device;
+ ir_codes = &ir_codes_tt_1500_table;
break;
default:
/* unknown remote */
- error = ir_input_init(input_dev, &budget_ci->ir.state,
- IR_TYPE_RC5, &ir_codes_budget_ci_old_table);
- if (error < 0)
- goto out2;
-
- if (rc5_device < 0)
- budget_ci->ir.rc5_device = IR_DEVICE_ANY;
- else
- budget_ci->ir.rc5_device = rc5_device;
+ ir_codes = &ir_codes_budget_ci_old_table;
break;
}
+ ir_input_init(input_dev, &budget_ci->ir.state, IR_TYPE_RC5);
+
/* initialise the key-up timeout handler */
init_timer(&budget_ci->ir.timer_keyup);
budget_ci->ir.timer_keyup.function = msp430_ir_keyup;
budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir;
budget_ci->ir.last_raw = 0xffff; /* An impossible value */
- error = input_register_device(input_dev);
+ error = ir_input_register(input_dev, ir_codes);
if (error) {
printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
- goto out2;
+ return error;
}
/* note: these must be after input_register_device */
@@ -284,12 +271,6 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI);
return 0;
-
-out2:
- ir_input_free(input_dev);
- input_free_device(input_dev);
-out1:
- return error;
}
static void msp430_ir_deinit(struct budget_ci *budget_ci)
@@ -304,8 +285,7 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci)
del_timer_sync(&dev->timer);
ir_input_nokey(dev, &budget_ci->ir.state);
- ir_input_free(dev);
- input_unregister_device(dev);
+ ir_input_unregister(dev);
}
static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 4c2b8a24677..3f40f375981 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -215,13 +215,10 @@ config RADIO_MIROPCM20
module will be called radio-miropcm20.
config RADIO_SF16FMI
- tristate "SF16FMI Radio"
+ tristate "SF16-FMI/SF16-FMP Radio"
depends on ISA && VIDEO_V4L2
---help---
- Choose Y here if you have one of these FM radio cards. If you
- compile the driver into the kernel and your card is not PnP one, you
- have to add "sf16fm=<io>" to the kernel command line (I/O address is
- 0x284 or 0x384).
+ Choose Y here if you have one of these FM radio cards.
In order to control your radio card, you will need to use programs
that are compatible with the Video For Linux API. Information on
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 35edee009ba..5bf4985daed 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -268,6 +268,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct rtrack *rt = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
rt_setfreq(rt, f->frequency);
return 0;
}
@@ -277,6 +279,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct rtrack *rt = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = rt->curfreq;
return 0;
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 8daf809eb01..c2231139362 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -254,6 +254,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct aztech *az = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
az_setfreq(az, f->frequency);
return 0;
}
@@ -263,6 +265,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct aztech *az = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = az->curfreq;
return 0;
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index c6cf1166186..000f4d34087 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -240,6 +240,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct gemtek_pci *card = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
if (f->frequency < GEMTEK_PCI_RANGE_LOW ||
f->frequency > GEMTEK_PCI_RANGE_HIGH)
return -EINVAL;
@@ -253,6 +255,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct gemtek_pci *card = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = card->current_frequency;
return 0;
diff --git a/drivers/media/radio/radio-maestro.c b/drivers/media/radio/radio-maestro.c
index 64d737c35ac..f8213b7c8dd 100644
--- a/drivers/media/radio/radio-maestro.c
+++ b/drivers/media/radio/radio-maestro.c
@@ -200,6 +200,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct maestro *dev = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
if (f->frequency < FREQ_LO || f->frequency > FREQ_HI)
return -EINVAL;
mutex_lock(&dev->lock);
@@ -213,6 +215,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct maestro *dev = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
mutex_lock(&dev->lock);
f->frequency = BITS2FREQ(radio_bits_get(dev));
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 3da51fe8fb9..44b4dbedb32 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -262,6 +262,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct maxiradio *dev = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
if (f->frequency < FREQ_LO || f->frequency > FREQ_HI) {
dprintk(dev, 1, "radio freq (%d.%02d MHz) out of range (%d-%d)\n",
f->frequency / 16000,
@@ -285,6 +287,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct maxiradio *dev = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = dev->freq;
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 949f60513d9..02a9cefc9a0 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -374,6 +374,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct amradio_device *radio = file->private_data;
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
return amradio_setfreq(radio, f->frequency);
}
@@ -383,6 +385,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct amradio_device *radio = file->private_data;
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = radio->curfreq;
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 9cb193fa6e3..a79296aac9a 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -167,6 +167,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct rtrack2 *rt = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
rt_setfreq(rt, f->frequency);
return 0;
}
@@ -176,6 +178,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct rtrack2 *rt = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = rt->curfreq;
return 0;
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 49c4aab95da..985359d18aa 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -1,4 +1,4 @@
-/* SF16FMI radio driver for Linux radio support
+/* SF16-FMI and SF16-FMP radio driver for Linux radio support
* heavily based on rtrack driver...
* (c) 1997 M. Kirkwood
* (c) 1998 Petr Vandrovec, vandrove@vc.cvut.cz
@@ -11,7 +11,7 @@
*
* Frequency control is done digitally -- ie out(port,encodefreq(95.8));
* No volume control - only mute/unmute - you have to use line volume
- * control on SB-part of SF16FMI
+ * control on SB-part of SF16-FMI/SF16-FMP
*
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
*/
@@ -30,14 +30,14 @@
#include <media/v4l2-ioctl.h>
MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood");
-MODULE_DESCRIPTION("A driver for the SF16MI radio.");
+MODULE_DESCRIPTION("A driver for the SF16-FMI and SF16-FMP radio.");
MODULE_LICENSE("GPL");
static int io = -1;
static int radio_nr = -1;
module_param(io, int, 0);
-MODULE_PARM_DESC(io, "I/O address of the SF16MI card (0x284 or 0x384)");
+MODULE_PARM_DESC(io, "I/O address of the SF16-FMI or SF16-FMP card (0x284 or 0x384)");
module_param(radio_nr, int, 0);
#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
@@ -47,13 +47,14 @@ struct fmi
struct v4l2_device v4l2_dev;
struct video_device vdev;
int io;
- int curvol; /* 1 or 0 */
+ bool mute;
unsigned long curfreq; /* freq in kHz */
struct mutex lock;
};
static struct fmi fmi_card;
static struct pnp_dev *dev;
+bool pnp_attached;
/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */
/* It is only useful to give freq in interval of 800 (=0.05Mhz),
@@ -105,7 +106,7 @@ static inline int fmi_setfreq(struct fmi *fmi, unsigned long freq)
outbits(8, 0xC0, fmi->io);
msleep(143); /* was schedule_timeout(HZ/7) */
mutex_unlock(&fmi->lock);
- if (fmi->curvol)
+ if (!fmi->mute)
fmi_unmute(fmi);
return 0;
}
@@ -116,7 +117,7 @@ static inline int fmi_getsigstr(struct fmi *fmi)
int res;
mutex_lock(&fmi->lock);
- val = fmi->curvol ? 0x08 : 0x00; /* unmute/mute */
+ val = fmi->mute ? 0x00 : 0x08; /* mute/unmute */
outb(val, fmi->io);
outb(val | 0x10, fmi->io);
msleep(143); /* was schedule_timeout(HZ/7) */
@@ -168,6 +169,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct fmi *fmi = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
if (f->frequency < RSF16_MINFREQ ||
f->frequency > RSF16_MAXFREQ)
return -EINVAL;
@@ -182,6 +185,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct fmi *fmi = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = fmi->curfreq;
return 0;
@@ -204,7 +209,7 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- ctrl->value = fmi->curvol;
+ ctrl->value = fmi->mute;
return 0;
}
return -EINVAL;
@@ -221,7 +226,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
fmi_mute(fmi);
else
fmi_unmute(fmi);
- fmi->curvol = ctrl->value;
+ fmi->mute = ctrl->value;
return 0;
}
return -EINVAL;
@@ -316,26 +321,54 @@ static int __init fmi_init(void)
{
struct fmi *fmi = &fmi_card;
struct v4l2_device *v4l2_dev = &fmi->v4l2_dev;
- int res;
+ int res, i;
+ int probe_ports[] = { 0, 0x284, 0x384 };
+
+ if (io < 0) {
+ for (i = 0; i < ARRAY_SIZE(probe_ports); i++) {
+ io = probe_ports[i];
+ if (io == 0) {
+ io = isapnp_fmi_probe();
+ if (io < 0)
+ continue;
+ pnp_attached = 1;
+ }
+ if (!request_region(io, 2, "radio-sf16fmi")) {
+ if (pnp_attached)
+ pnp_device_detach(dev);
+ io = -1;
+ continue;
+ }
+ if (pnp_attached ||
+ ((inb(io) & 0xf9) == 0xf9 && (inb(io) & 0x4) == 0))
+ break;
+ release_region(io, 2);
+ io = -1;
+ }
+ } else {
+ if (!request_region(io, 2, "radio-sf16fmi")) {
+ printk(KERN_ERR "radio-sf16fmi: port %#x already in use\n", io);
+ return -EBUSY;
+ }
+ if (inb(io) == 0xff) {
+ printk(KERN_ERR "radio-sf16fmi: card not present at %#x\n", io);
+ release_region(io, 2);
+ return -ENODEV;
+ }
+ }
+ if (io < 0) {
+ printk(KERN_ERR "radio-sf16fmi: no cards found\n");
+ return -ENODEV;
+ }
- if (io < 0)
- io = isapnp_fmi_probe();
strlcpy(v4l2_dev->name, "sf16fmi", sizeof(v4l2_dev->name));
fmi->io = io;
- if (fmi->io < 0) {
- v4l2_err(v4l2_dev, "No PnP card found.\n");
- return fmi->io;
- }
- if (!request_region(io, 2, "radio-sf16fmi")) {
- v4l2_err(v4l2_dev, "port 0x%x already in use\n", fmi->io);
- pnp_device_detach(dev);
- return -EBUSY;
- }
res = v4l2_device_register(NULL, v4l2_dev);
if (res < 0) {
release_region(fmi->io, 2);
- pnp_device_detach(dev);
+ if (pnp_attached)
+ pnp_device_detach(dev);
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
return res;
}
@@ -352,7 +385,8 @@ static int __init fmi_init(void)
if (video_register_device(&fmi->vdev, VFL_TYPE_RADIO, radio_nr) < 0) {
v4l2_device_unregister(v4l2_dev);
release_region(fmi->io, 2);
- pnp_device_detach(dev);
+ if (pnp_attached)
+ pnp_device_detach(dev);
return -EINVAL;
}
@@ -369,7 +403,7 @@ static void __exit fmi_exit(void)
video_unregister_device(&fmi->vdev);
v4l2_device_unregister(&fmi->v4l2_dev);
release_region(fmi->io, 2);
- if (dev)
+ if (dev && pnp_attached)
pnp_device_detach(dev);
}
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index a11414f648d..52c7bbb32b8 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -251,6 +251,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct fmr2 *fmr2 = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
if (f->frequency < RSF16_MINFREQ ||
f->frequency > RSF16_MAXFREQ)
return -EINVAL;
@@ -272,6 +274,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct fmr2 *fmr2 = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = fmr2->curfreq;
return 0;
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 3cd76dddb6a..8e718bfcdad 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -314,7 +314,7 @@ static int vidioc_g_tuner(struct file *file, void *priv,
if (v->index > 0)
return -EINVAL;
- memset(v, 0, sizeof(v));
+ memset(v, 0, sizeof(*v));
strcpy(v->name, "FM");
v->type = V4L2_TUNER_RADIO;
tea5764_i2c_read(radio);
@@ -349,7 +349,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct tea5764_device *radio = video_drvdata(file);
- if (f->tuner != 0)
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
return -EINVAL;
if (f->frequency == 0) {
/* We special case this as a power down control. */
@@ -370,8 +370,10 @@ static int vidioc_g_frequency(struct file *file, void *priv,
struct tea5764_device *radio = video_drvdata(file);
struct tea5764_regs *r = &radio->regs;
+ if (f->tuner != 0)
+ return -EINVAL;
tea5764_i2c_read(radio);
- memset(f, 0, sizeof(f));
+ memset(f, 0, sizeof(*f));
f->type = V4L2_TUNER_RADIO;
if (r->tnctrl & TEA5764_TNCTRL_PUPD0)
f->frequency = (tea5764_get_freq(radio) * 2) / 125;
@@ -458,12 +460,8 @@ static int vidioc_s_audio(struct file *file, void *priv,
static int tea5764_open(struct file *file)
{
/* Currently we support only one device */
- int minor = video_devdata(file)->minor;
struct tea5764_device *radio = video_drvdata(file);
- if (radio->videodev->minor != minor)
- return -ENODEV;
-
mutex_lock(&radio->mutex);
/* Only exclusive access */
if (radio->users) {
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index 699db9acaaf..fc1c860fd43 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -240,6 +240,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct terratec *tt = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
tt_setfreq(tt, f->frequency);
return 0;
}
@@ -249,6 +251,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct terratec *tt = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = tt->curfreq;
return 0;
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index 6f9ecc35935..9d6dcf8af5b 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -239,6 +239,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct trust *tr = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
tr_setfreq(tr, f->frequency);
return 0;
}
@@ -248,6 +250,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct trust *tr = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = tr->curfreq;
return 0;
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 3a98f139949..03439282dfc 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -207,6 +207,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct typhoon *dev = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = dev->curfreq;
return 0;
@@ -217,6 +219,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct typhoon *dev = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
dev->curfreq = f->frequency;
typhoon_setfreq(dev, dev->curfreq);
return 0;
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 80e98b6422f..f31eab99c94 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -266,6 +266,8 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct zoltrix *zol = video_drvdata(file);
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
if (zol_setfreq(zol, f->frequency) != 0)
return -EINVAL;
return 0;
@@ -276,6 +278,8 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct zoltrix *zol = video_drvdata(file);
+ if (f->tuner != 0)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
f->frequency = zol->curfreq;
return 0;
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index f33315f2c54..4da0f150c6e 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -426,6 +426,104 @@ int si470x_rds_on(struct si470x_device *radio)
/**************************************************************************
+ * File Operations Interface
+ **************************************************************************/
+
+/*
+ * si470x_fops_read - read RDS data
+ */
+static ssize_t si470x_fops_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct si470x_device *radio = video_drvdata(file);
+ int retval = 0;
+ unsigned int block_count = 0;
+
+ /* switch on rds reception */
+ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
+ si470x_rds_on(radio);
+
+ /* block if no new data available */
+ while (radio->wr_index == radio->rd_index) {
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EWOULDBLOCK;
+ goto done;
+ }
+ if (wait_event_interruptible(radio->read_queue,
+ radio->wr_index != radio->rd_index) < 0) {
+ retval = -EINTR;
+ goto done;
+ }
+ }
+
+ /* calculate block count from byte count */
+ count /= 3;
+
+ /* copy RDS block out of internal buffer and to user buffer */
+ mutex_lock(&radio->lock);
+ while (block_count < count) {
+ if (radio->rd_index == radio->wr_index)
+ break;
+
+ /* always transfer rds complete blocks */
+ if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3))
+ /* retval = -EFAULT; */
+ break;
+
+ /* increment and wrap read pointer */
+ radio->rd_index += 3;
+ if (radio->rd_index >= radio->buf_size)
+ radio->rd_index = 0;
+
+ /* increment counters */
+ block_count++;
+ buf += 3;
+ retval += 3;
+ }
+ mutex_unlock(&radio->lock);
+
+done:
+ return retval;
+}
+
+
+/*
+ * si470x_fops_poll - poll RDS data
+ */
+static unsigned int si470x_fops_poll(struct file *file,
+ struct poll_table_struct *pts)
+{
+ struct si470x_device *radio = video_drvdata(file);
+ int retval = 0;
+
+ /* switch on rds reception */
+ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
+ si470x_rds_on(radio);
+
+ poll_wait(file, &radio->read_queue, pts);
+
+ if (radio->rd_index != radio->wr_index)
+ retval = POLLIN | POLLRDNORM;
+
+ return retval;
+}
+
+
+/*
+ * si470x_fops - file operations interface
+ */
+static const struct v4l2_file_operations si470x_fops = {
+ .owner = THIS_MODULE,
+ .read = si470x_fops_read,
+ .poll = si470x_fops_poll,
+ .ioctl = video_ioctl2,
+ .open = si470x_fops_open,
+ .release = si470x_fops_release,
+};
+
+
+
+/**************************************************************************
* Video4Linux Interface
**************************************************************************/
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 2d53b6a9409..5466015346a 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -22,22 +22,17 @@
*/
-/*
- * ToDo:
- * - RDS support
- */
-
-
/* driver definitions */
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
-#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 0)
+#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 1)
#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
-#define DRIVER_VERSION "1.0.0"
+#define DRIVER_VERSION "1.0.1"
/* kernel includes */
#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include "radio-si470x.h"
@@ -62,6 +57,20 @@ static int radio_nr = -1;
module_param(radio_nr, int, 0444);
MODULE_PARM_DESC(radio_nr, "Radio Nr");
+/* RDS buffer blocks */
+static unsigned int rds_buf = 100;
+module_param(rds_buf, uint, 0444);
+MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*");
+
+/* RDS maximum block errors */
+static unsigned short max_rds_errors = 1;
+/* 0 means 0 errors requiring correction */
+/* 1 means 1-2 errors requiring correction (used by original USBRadio.exe) */
+/* 2 means 3-5 errors requiring correction */
+/* 3 means 6+ errors or errors in checkword, correction not possible */
+module_param(max_rds_errors, ushort, 0644);
+MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
+
/**************************************************************************
@@ -173,7 +182,7 @@ int si470x_disconnect_check(struct si470x_device *radio)
/*
* si470x_fops_open - file open
*/
-static int si470x_fops_open(struct file *file)
+int si470x_fops_open(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
@@ -181,12 +190,21 @@ static int si470x_fops_open(struct file *file)
mutex_lock(&radio->lock);
radio->users++;
- if (radio->users == 1)
+ if (radio->users == 1) {
/* start radio */
retval = si470x_start(radio);
+ if (retval < 0)
+ goto done;
+
+ /* enable RDS interrupt */
+ radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDSIEN;
+ radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_GPIO2;
+ radio->registers[SYSCONFIG1] |= 0x1 << 2;
+ retval = si470x_set_register(radio, SYSCONFIG1);
+ }
+done:
mutex_unlock(&radio->lock);
-
return retval;
}
@@ -194,7 +212,7 @@ static int si470x_fops_open(struct file *file)
/*
* si470x_fops_release - file release
*/
-static int si470x_fops_release(struct file *file)
+int si470x_fops_release(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
@@ -215,17 +233,6 @@ static int si470x_fops_release(struct file *file)
}
-/*
- * si470x_fops - file operations interface
- */
-const struct v4l2_file_operations si470x_fops = {
- .owner = THIS_MODULE,
- .ioctl = video_ioctl2,
- .open = si470x_fops_open,
- .release = si470x_fops_release,
-};
-
-
/**************************************************************************
* Video4Linux Interface
@@ -253,6 +260,105 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
**************************************************************************/
/*
+ * si470x_i2c_interrupt_work - rds processing function
+ */
+static void si470x_i2c_interrupt_work(struct work_struct *work)
+{
+ struct si470x_device *radio = container_of(work,
+ struct si470x_device, radio_work);
+ unsigned char regnr;
+ unsigned char blocknum;
+ unsigned short bler; /* rds block errors */
+ unsigned short rds;
+ unsigned char tmpbuf[3];
+ int retval = 0;
+
+ /* safety checks */
+ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
+ return;
+
+ /* Update RDS registers */
+ for (regnr = 0; regnr < RDS_REGISTER_NUM; regnr++) {
+ retval = si470x_get_register(radio, STATUSRSSI + regnr);
+ if (retval < 0)
+ return;
+ }
+
+ /* get rds blocks */
+ if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0)
+ /* No RDS group ready, better luck next time */
+ return;
+
+ for (blocknum = 0; blocknum < 4; blocknum++) {
+ switch (blocknum) {
+ default:
+ bler = (radio->registers[STATUSRSSI] &
+ STATUSRSSI_BLERA) >> 9;
+ rds = radio->registers[RDSA];
+ break;
+ case 1:
+ bler = (radio->registers[READCHAN] &
+ READCHAN_BLERB) >> 14;
+ rds = radio->registers[RDSB];
+ break;
+ case 2:
+ bler = (radio->registers[READCHAN] &
+ READCHAN_BLERC) >> 12;
+ rds = radio->registers[RDSC];
+ break;
+ case 3:
+ bler = (radio->registers[READCHAN] &
+ READCHAN_BLERD) >> 10;
+ rds = radio->registers[RDSD];
+ break;
+ };
+
+ /* Fill the V4L2 RDS buffer */
+ put_unaligned_le16(rds, &tmpbuf);
+ tmpbuf[2] = blocknum; /* offset name */
+ tmpbuf[2] |= blocknum << 3; /* received offset */
+ if (bler > max_rds_errors)
+ tmpbuf[2] |= 0x80; /* uncorrectable errors */
+ else if (bler > 0)
+ tmpbuf[2] |= 0x40; /* corrected error(s) */
+
+ /* copy RDS block to internal buffer */
+ memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3);
+ radio->wr_index += 3;
+
+ /* wrap write pointer */
+ if (radio->wr_index >= radio->buf_size)
+ radio->wr_index = 0;
+
+ /* check for overflow */
+ if (radio->wr_index == radio->rd_index) {
+ /* increment and wrap read pointer */
+ radio->rd_index += 3;
+ if (radio->rd_index >= radio->buf_size)
+ radio->rd_index = 0;
+ }
+ }
+
+ if (radio->wr_index != radio->rd_index)
+ wake_up_interruptible(&radio->read_queue);
+}
+
+
+/*
+ * si470x_i2c_interrupt - interrupt handler
+ */
+static irqreturn_t si470x_i2c_interrupt(int irq, void *dev_id)
+{
+ struct si470x_device *radio = dev_id;
+
+ if (!work_pending(&radio->radio_work))
+ schedule_work(&radio->radio_work);
+
+ return IRQ_HANDLED;
+}
+
+
+/*
* si470x_i2c_probe - probe for the device
*/
static int __devinit si470x_i2c_probe(struct i2c_client *client,
@@ -268,6 +374,8 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
retval = -ENOMEM;
goto err_initial;
}
+
+ INIT_WORK(&radio->radio_work, si470x_i2c_interrupt_work);
radio->users = 0;
radio->client = client;
mutex_init(&radio->lock);
@@ -319,6 +427,26 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
/* set initial frequency */
si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
+ /* rds buffer allocation */
+ radio->buf_size = rds_buf * 3;
+ radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
+ if (!radio->buffer) {
+ retval = -EIO;
+ goto err_video;
+ }
+
+ /* rds buffer configuration */
+ radio->wr_index = 0;
+ radio->rd_index = 0;
+ init_waitqueue_head(&radio->read_queue);
+
+ retval = request_irq(client->irq, si470x_i2c_interrupt,
+ IRQF_TRIGGER_FALLING, DRIVER_NAME, radio);
+ if (retval) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_rds;
+ }
+
/* register video device */
retval = video_register_device(radio->videodev, VFL_TYPE_RADIO,
radio_nr);
@@ -330,6 +458,9 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
return 0;
err_all:
+ free_irq(client->irq, radio);
+err_rds:
+ kfree(radio->buffer);
err_video:
video_device_release(radio->videodev);
err_radio:
@@ -346,6 +477,8 @@ static __devexit int si470x_i2c_remove(struct i2c_client *client)
{
struct si470x_device *radio = i2c_get_clientdata(client);
+ free_irq(client->irq, radio);
+ cancel_work_sync(&radio->radio_work);
video_unregister_device(radio->videodev);
kfree(radio);
i2c_set_clientdata(client, NULL);
@@ -354,6 +487,44 @@ static __devexit int si470x_i2c_remove(struct i2c_client *client)
}
+#ifdef CONFIG_PM
+/*
+ * si470x_i2c_suspend - suspend the device
+ */
+static int si470x_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ struct si470x_device *radio = i2c_get_clientdata(client);
+
+ /* power down */
+ radio->registers[POWERCFG] |= POWERCFG_DISABLE;
+ if (si470x_set_register(radio, POWERCFG) < 0)
+ return -EIO;
+
+ return 0;
+}
+
+
+/*
+ * si470x_i2c_resume - resume the device
+ */
+static int si470x_i2c_resume(struct i2c_client *client)
+{
+ struct si470x_device *radio = i2c_get_clientdata(client);
+
+ /* power up : need 110ms */
+ radio->registers[POWERCFG] |= POWERCFG_ENABLE;
+ if (si470x_set_register(radio, POWERCFG) < 0)
+ return -EIO;
+ msleep(110);
+
+ return 0;
+}
+#else
+#define si470x_i2c_suspend NULL
+#define si470x_i2c_resume NULL
+#endif
+
+
/*
* si470x_i2c_driver - i2c driver interface
*/
@@ -364,6 +535,8 @@ static struct i2c_driver si470x_i2c_driver = {
},
.probe = si470x_i2c_probe,
.remove = __devexit_p(si470x_i2c_remove),
+ .suspend = si470x_i2c_suspend,
+ .resume = si470x_i2c_resume,
.id_table = si470x_i2c_id,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index f2d0e1ddb30..a96e1b9dd64 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -509,89 +509,9 @@ resubmit:
**************************************************************************/
/*
- * si470x_fops_read - read RDS data
- */
-static ssize_t si470x_fops_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
- unsigned int block_count = 0;
-
- /* switch on rds reception */
- if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
- si470x_rds_on(radio);
-
- /* block if no new data available */
- while (radio->wr_index == radio->rd_index) {
- if (file->f_flags & O_NONBLOCK) {
- retval = -EWOULDBLOCK;
- goto done;
- }
- if (wait_event_interruptible(radio->read_queue,
- radio->wr_index != radio->rd_index) < 0) {
- retval = -EINTR;
- goto done;
- }
- }
-
- /* calculate block count from byte count */
- count /= 3;
-
- /* copy RDS block out of internal buffer and to user buffer */
- mutex_lock(&radio->lock);
- while (block_count < count) {
- if (radio->rd_index == radio->wr_index)
- break;
-
- /* always transfer rds complete blocks */
- if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3))
- /* retval = -EFAULT; */
- break;
-
- /* increment and wrap read pointer */
- radio->rd_index += 3;
- if (radio->rd_index >= radio->buf_size)
- radio->rd_index = 0;
-
- /* increment counters */
- block_count++;
- buf += 3;
- retval += 3;
- }
- mutex_unlock(&radio->lock);
-
-done:
- return retval;
-}
-
-
-/*
- * si470x_fops_poll - poll RDS data
- */
-static unsigned int si470x_fops_poll(struct file *file,
- struct poll_table_struct *pts)
-{
- struct si470x_device *radio = video_drvdata(file);
- int retval = 0;
-
- /* switch on rds reception */
- if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
- si470x_rds_on(radio);
-
- poll_wait(file, &radio->read_queue, pts);
-
- if (radio->rd_index != radio->wr_index)
- retval = POLLIN | POLLRDNORM;
-
- return retval;
-}
-
-
-/*
* si470x_fops_open - file open
*/
-static int si470x_fops_open(struct file *file)
+int si470x_fops_open(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
int retval;
@@ -645,7 +565,7 @@ done:
/*
* si470x_fops_release - file release
*/
-static int si470x_fops_release(struct file *file)
+int si470x_fops_release(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
int retval = 0;
@@ -688,19 +608,6 @@ done:
}
-/*
- * si470x_fops - file operations interface
- */
-const struct v4l2_file_operations si470x_fops = {
- .owner = THIS_MODULE,
- .read = si470x_fops_read,
- .poll = si470x_fops_poll,
- .ioctl = video_ioctl2,
- .open = si470x_fops_open,
- .release = si470x_fops_release,
-};
-
-
/**************************************************************************
* Video4Linux Interface
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index d0af194d194..3cd0a29cd6e 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -29,6 +29,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/input.h>
@@ -181,6 +182,7 @@ struct si470x_device {
#if defined(CONFIG_I2C_SI470X) || defined(CONFIG_I2C_SI470X_MODULE)
struct i2c_client *client;
+ struct work_struct radio_work;
#endif
};
@@ -212,7 +214,6 @@ struct si470x_device {
/**************************************************************************
* Common Functions
**************************************************************************/
-extern const struct v4l2_file_operations si470x_fops;
extern struct video_device si470x_viddev_template;
int si470x_get_register(struct si470x_device *radio, int regnr);
int si470x_set_register(struct si470x_device *radio, int regnr);
@@ -221,5 +222,7 @@ int si470x_set_freq(struct si470x_device *radio, unsigned int freq);
int si470x_start(struct si470x_device *radio);
int si470x_stop(struct si470x_device *radio);
int si470x_rds_on(struct si470x_device *radio);
+int si470x_fops_open(struct file *file);
+int si470x_fops_release(struct file *file);
int si470x_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *capability);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 9dc74c93bf2..2f83be766d9 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -37,10 +37,6 @@ config VIDEO_BTCX
depends on PCI
tristate
-config VIDEO_IR
- tristate
- depends on INPUT
-
config VIDEO_TVEEPROM
tristate
depends on I2C
@@ -840,6 +836,12 @@ config SOC_CAMERA_MT9T031
help
This driver supports MT9T031 cameras from Micron.
+config SOC_CAMERA_MT9T112
+ tristate "mt9t112 support"
+ depends on SOC_CAMERA && I2C
+ help
+ This driver supports MT9T112 cameras from Aptina.
+
config SOC_CAMERA_MT9V022
tristate "mt9v022 support"
depends on SOC_CAMERA && I2C
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 7a2dcc34111..2af68ee8412 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
+obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
@@ -149,7 +150,7 @@ obj-$(CONFIG_VIDEO_VIVI) += vivi.o
obj-$(CONFIG_VIDEO_CX23885) += cx23885/
obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
-obj-$(CONFIG_SOC_CAMERA) += soc_camera.o
+obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o
obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
# soc-camera host drivers have to be linked after camera drivers
obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index d137bac8451..a356d6bd313 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -767,7 +767,6 @@ static struct video_device ar_template = {
.name = "Colour AR VGA",
.fops = &ar_fops,
.release = ar_release,
- .minor = -1,
};
#define ALIGN4(x) ((((int)(x)) & 0x3) == 0)
@@ -860,8 +859,8 @@ static int __init ar_init(void)
goto out_dev;
}
- printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
- ar->vdev->num, M32R_IRQ_INT3, freq);
+ printk("%s: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
+ video_device_node_name(ar->vdev), M32R_IRQ_INT3, freq);
return 0;
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 1485aee18d5..dc67bc40f36 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -40,7 +40,6 @@
#include "au0828.h"
#include "au0828-reg.h"
-static LIST_HEAD(au0828_devlist);
static DEFINE_MUTEX(au0828_sysfs_lock);
#define AU0828_VERSION_CODE KERNEL_VERSION(0, 0, 1)
@@ -693,10 +692,8 @@ void au0828_analog_unregister(struct au0828_dev *dev)
dprintk(1, "au0828_release_resources called\n");
mutex_lock(&au0828_sysfs_lock);
- if (dev->vdev) {
- list_del(&dev->au0828list);
+ if (dev->vdev)
video_unregister_device(dev->vdev);
- }
if (dev->vbi_dev)
video_unregister_device(dev->vbi_dev);
@@ -737,29 +734,15 @@ static void res_free(struct au0828_fh *fh)
static int au0828_v4l2_open(struct file *filp)
{
- int minor = video_devdata(filp)->minor;
int ret = 0;
- struct au0828_dev *h, *dev = NULL;
+ struct au0828_dev *dev = video_drvdata(filp);
struct au0828_fh *fh;
- int type = 0;
- struct list_head *list;
-
- list_for_each(list, &au0828_devlist) {
- h = list_entry(list, struct au0828_dev, au0828list);
- if (h->vdev->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
+ int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
#ifdef VBI_IS_WORKING
- if (h->vbi_dev->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- }
+ if (video_devdata(filp)->vfl_type == VFL_TYPE_GRABBER)
+ type = V4L2_BUF_TYPE_VBI_CAPTURE;
#endif
- }
-
- if (NULL == dev)
- return -ENODEV;
fh = kzalloc(sizeof(struct au0828_fh), GFP_KERNEL);
if (NULL == fh) {
@@ -1587,7 +1570,6 @@ static const struct video_device au0828_video_template = {
.fops = &au0828_v4l_fops,
.release = video_device_release,
.ioctl_ops = &video_ioctl_ops,
- .minor = -1,
.tvnorms = V4L2_STD_NTSC_M,
.current_norm = V4L2_STD_NTSC_M,
};
@@ -1676,25 +1658,23 @@ int au0828_analog_register(struct au0828_dev *dev,
strcpy(dev->vbi_dev->name, "au0828a vbi");
#endif
- list_add_tail(&dev->au0828list, &au0828_devlist);
-
/* Register the v4l2 device */
+ video_set_drvdata(dev->vdev, dev);
retval = video_register_device(dev->vdev, VFL_TYPE_GRABBER, -1);
if (retval != 0) {
dprintk(1, "unable to register video device (error = %d).\n",
retval);
- list_del(&dev->au0828list);
video_device_release(dev->vdev);
return -ENODEV;
}
#ifdef VBI_IS_WORKING
/* Register the vbi device */
+ video_set_drvdata(dev->vbi_dev, dev);
retval = video_register_device(dev->vbi_dev, VFL_TYPE_VBI, -1);
if (retval != 0) {
dprintk(1, "unable to register vbi device (error = %d).\n",
retval);
- list_del(&dev->au0828list);
video_device_release(dev->vbi_dev);
video_device_release(dev->vdev);
return -ENODEV;
diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
index b977915efbd..207f32dec6a 100644
--- a/drivers/media/video/au0828/au0828.h
+++ b/drivers/media/video/au0828/au0828.h
@@ -192,7 +192,6 @@ struct au0828_dev {
struct au0828_dvb dvb;
/* Analog */
- struct list_head au0828list;
struct v4l2_device v4l2_dev;
int users;
unsigned int stream_on:1; /* Locks streams */
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index a6724019c66..3182a406bdd 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3206,24 +3206,24 @@ err:
static int bttv_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
+ struct video_device *vdev = video_devdata(file);
struct bttv *btv = video_drvdata(file);
struct bttv_fh *fh;
enum v4l2_buf_type type = 0;
- dprintk(KERN_DEBUG "bttv: open minor=%d\n",minor);
+ dprintk(KERN_DEBUG "bttv: open dev=%s\n", video_device_node_name(vdev));
- lock_kernel();
- if (btv->video_dev->minor == minor) {
+ if (vdev->vfl_type == VFL_TYPE_GRABBER) {
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- } else if (btv->vbi_dev->minor == minor) {
+ } else if (vdev->vfl_type == VFL_TYPE_VBI) {
type = V4L2_BUF_TYPE_VBI_CAPTURE;
} else {
WARN_ON(1);
- unlock_kernel();
return -ENODEV;
}
+ lock_kernel();
+
dprintk(KERN_DEBUG "bttv%d: open called (type=%s)\n",
btv->c.nr,v4l2_type_names[type]);
@@ -3397,7 +3397,6 @@ static const struct v4l2_ioctl_ops bttv_ioctl_ops = {
static struct video_device bttv_video_template = {
.fops = &bttv_fops,
- .minor = -1,
.ioctl_ops = &bttv_ioctl_ops,
.tvnorms = BTTV_NORMS,
.current_norm = V4L2_STD_PAL,
@@ -3408,18 +3407,13 @@ static struct video_device bttv_video_template = {
static int radio_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
+ struct video_device *vdev = video_devdata(file);
struct bttv *btv = video_drvdata(file);
struct bttv_fh *fh;
- dprintk("bttv: open minor=%d\n",minor);
+ dprintk("bttv: open dev=%s\n", video_device_node_name(vdev));
lock_kernel();
- WARN_ON(btv->radio_dev && btv->radio_dev->minor != minor);
- if (!btv->radio_dev || btv->radio_dev->minor != minor) {
- unlock_kernel();
- return -ENODEV;
- }
dprintk("bttv%d: open called (radio)\n",btv->c.nr);
@@ -3640,7 +3634,6 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
static struct video_device radio_template = {
.fops = &radio_fops,
- .minor = -1,
.ioctl_ops = &radio_ioctl_ops,
};
@@ -4208,21 +4201,21 @@ static struct video_device *vdev_init(struct bttv *btv,
static void bttv_unregister_video(struct bttv *btv)
{
if (btv->video_dev) {
- if (-1 != btv->video_dev->minor)
+ if (video_is_registered(btv->video_dev))
video_unregister_device(btv->video_dev);
else
video_device_release(btv->video_dev);
btv->video_dev = NULL;
}
if (btv->vbi_dev) {
- if (-1 != btv->vbi_dev->minor)
+ if (video_is_registered(btv->vbi_dev))
video_unregister_device(btv->vbi_dev);
else
video_device_release(btv->vbi_dev);
btv->vbi_dev = NULL;
}
if (btv->radio_dev) {
- if (-1 != btv->radio_dev->minor)
+ if (video_is_registered(btv->radio_dev))
video_unregister_device(btv->radio_dev);
else
video_device_release(btv->radio_dev);
@@ -4244,8 +4237,8 @@ static int __devinit bttv_register_video(struct bttv *btv)
if (video_register_device(btv->video_dev, VFL_TYPE_GRABBER,
video_nr[btv->c.nr]) < 0)
goto err;
- printk(KERN_INFO "bttv%d: registered device video%d\n",
- btv->c.nr, btv->video_dev->num);
+ printk(KERN_INFO "bttv%d: registered device %s\n",
+ btv->c.nr, video_device_node_name(btv->video_dev));
if (device_create_file(&btv->video_dev->dev,
&dev_attr_card)<0) {
printk(KERN_ERR "bttv%d: device_create_file 'card' "
@@ -4261,8 +4254,8 @@ static int __devinit bttv_register_video(struct bttv *btv)
if (video_register_device(btv->vbi_dev, VFL_TYPE_VBI,
vbi_nr[btv->c.nr]) < 0)
goto err;
- printk(KERN_INFO "bttv%d: registered device vbi%d\n",
- btv->c.nr, btv->vbi_dev->num);
+ printk(KERN_INFO "bttv%d: registered device %s\n",
+ btv->c.nr, video_device_node_name(btv->vbi_dev));
if (!btv->has_radio)
return 0;
@@ -4273,8 +4266,8 @@ static int __devinit bttv_register_video(struct bttv *btv)
if (video_register_device(btv->radio_dev, VFL_TYPE_RADIO,
radio_nr[btv->c.nr]) < 0)
goto err;
- printk(KERN_INFO "bttv%d: registered device radio%d\n",
- btv->c.nr, btv->radio_dev->num);
+ printk(KERN_INFO "bttv%d: registered device %s\n",
+ btv->c.nr, video_device_node_name(btv->radio_dev));
/* all done */
return 0;
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index beda363418b..63aa31a041e 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -40,7 +40,7 @@ static int i2c_debug;
static int i2c_hw;
static int i2c_scan;
module_param(i2c_debug, int, 0644);
-MODULE_PARM_DESC(i2c_hw,"configure i2c debug level");
+MODULE_PARM_DESC(i2c_debug, "configure i2c debug level");
module_param(i2c_hw, int, 0444);
MODULE_PARM_DESC(i2c_hw,"force use of hardware i2c support, "
"instead of software bitbang");
@@ -400,7 +400,7 @@ int __devinit init_bttv_i2c(struct bttv *btv)
That's why we probe 0x1a (~0x34) first. CB
*/
const unsigned short addr_list[] = {
- 0x1a, 0x18, 0x4b, 0x64, 0x30,
+ 0x1a, 0x18, 0x4b, 0x64, 0x30, 0x71,
I2C_CLIENT_END
};
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index 84a957e52c4..277a092e121 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -368,7 +368,7 @@ int bttv_input_init(struct bttv *btv)
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
pci_name(btv->c.pci));
- err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes);
+ err = ir_input_init(input_dev, &ir->ir, ir_type);
if (err < 0)
goto err_out_free;
@@ -389,7 +389,7 @@ int bttv_input_init(struct bttv *btv)
bttv_ir_start(btv, ir);
/* all done */
- err = input_register_device(btv->remote->dev);
+ err = ir_input_register(btv->remote->dev, ir_codes);
if (err)
goto err_out_stop;
@@ -403,8 +403,6 @@ int bttv_input_init(struct bttv *btv)
bttv_ir_stop(btv);
btv->remote = NULL;
err_out_free:
- ir_input_free(input_dev);
- input_free_device(input_dev);
kfree(ir);
return err;
}
@@ -415,8 +413,7 @@ void bttv_input_fini(struct bttv *btv)
return;
bttv_ir_stop(btv);
- ir_input_free(btv->remote->dev);
- input_unregister_device(btv->remote->dev);
+ ir_input_unregister(btv->remote->dev);
kfree(btv->remote);
btv->remote = NULL;
}
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 85cf1778827..e2cbebab959 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -809,8 +809,8 @@ static int init_cqcam(struct parport *port)
return -ENODEV;
}
- printk(KERN_INFO "video%d: Colour QuickCam found on %s\n",
- qcam->vdev.num, qcam->pport->name);
+ printk(KERN_INFO "%s: Colour QuickCam found on %s\n",
+ video_device_node_name(&qcam->vdev), qcam->pport->name);
qcams[num_cams++] = qcam;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 10230cb3d21..7bb9c1ec781 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -1723,7 +1723,6 @@ static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
static struct video_device cafe_v4l_template = {
.name = "cafe",
- .minor = -1, /* Get one dynamically */
.tvnorms = V4L2_STD_NTSC_M,
.current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 2377313c041..551ddf216a4 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -32,6 +32,7 @@
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
@@ -244,72 +245,67 @@ static void rvfree(void *mem, unsigned long size)
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *cpia_proc_root=NULL;
-static int cpia_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int cpia_proc_show(struct seq_file *m, void *v)
{
- char *out = page;
- int len, tmp;
- struct cam_data *cam = data;
+ struct cam_data *cam = m->private;
+ int tmp;
char tmpstr[29];
- /* IMPORTANT: This output MUST be kept under PAGE_SIZE
- * or we need to get more sophisticated. */
-
- out += sprintf(out, "read-only\n-----------------------\n");
- out += sprintf(out, "V4L Driver version: %d.%d.%d\n",
+ seq_printf(m, "read-only\n-----------------------\n");
+ seq_printf(m, "V4L Driver version: %d.%d.%d\n",
CPIA_MAJ_VER, CPIA_MIN_VER, CPIA_PATCH_VER);
- out += sprintf(out, "CPIA Version: %d.%02d (%d.%d)\n",
+ seq_printf(m, "CPIA Version: %d.%02d (%d.%d)\n",
cam->params.version.firmwareVersion,
cam->params.version.firmwareRevision,
cam->params.version.vcVersion,
cam->params.version.vcRevision);
- out += sprintf(out, "CPIA PnP-ID: %04x:%04x:%04x\n",
+ seq_printf(m, "CPIA PnP-ID: %04x:%04x:%04x\n",
cam->params.pnpID.vendor, cam->params.pnpID.product,
cam->params.pnpID.deviceRevision);
- out += sprintf(out, "VP-Version: %d.%d %04x\n",
+ seq_printf(m, "VP-Version: %d.%d %04x\n",
cam->params.vpVersion.vpVersion,
cam->params.vpVersion.vpRevision,
cam->params.vpVersion.cameraHeadID);
- out += sprintf(out, "system_state: %#04x\n",
+ seq_printf(m, "system_state: %#04x\n",
cam->params.status.systemState);
- out += sprintf(out, "grab_state: %#04x\n",
+ seq_printf(m, "grab_state: %#04x\n",
cam->params.status.grabState);
- out += sprintf(out, "stream_state: %#04x\n",
+ seq_printf(m, "stream_state: %#04x\n",
cam->params.status.streamState);
- out += sprintf(out, "fatal_error: %#04x\n",
+ seq_printf(m, "fatal_error: %#04x\n",
cam->params.status.fatalError);
- out += sprintf(out, "cmd_error: %#04x\n",
+ seq_printf(m, "cmd_error: %#04x\n",
cam->params.status.cmdError);
- out += sprintf(out, "debug_flags: %#04x\n",
+ seq_printf(m, "debug_flags: %#04x\n",
cam->params.status.debugFlags);
- out += sprintf(out, "vp_status: %#04x\n",
+ seq_printf(m, "vp_status: %#04x\n",
cam->params.status.vpStatus);
- out += sprintf(out, "error_code: %#04x\n",
+ seq_printf(m, "error_code: %#04x\n",
cam->params.status.errorCode);
/* QX3 specific entries */
if (cam->params.qx3.qx3_detected) {
- out += sprintf(out, "button: %4d\n",
+ seq_printf(m, "button: %4d\n",
cam->params.qx3.button);
- out += sprintf(out, "cradled: %4d\n",
+ seq_printf(m, "cradled: %4d\n",
cam->params.qx3.cradled);
}
- out += sprintf(out, "video_size: %s\n",
+ seq_printf(m, "video_size: %s\n",
cam->params.format.videoSize == VIDEOSIZE_CIF ?
"CIF " : "QCIF");
- out += sprintf(out, "roi: (%3d, %3d) to (%3d, %3d)\n",
+ seq_printf(m, "roi: (%3d, %3d) to (%3d, %3d)\n",
cam->params.roi.colStart*8,
cam->params.roi.rowStart*4,
cam->params.roi.colEnd*8,
cam->params.roi.rowEnd*4);
- out += sprintf(out, "actual_fps: %3d\n", cam->fps);
- out += sprintf(out, "transfer_rate: %4dkB/s\n",
+ seq_printf(m, "actual_fps: %3d\n", cam->fps);
+ seq_printf(m, "transfer_rate: %4dkB/s\n",
cam->transfer_rate);
- out += sprintf(out, "\nread-write\n");
- out += sprintf(out, "----------------------- current min"
+ seq_printf(m, "\nread-write\n");
+ seq_printf(m, "----------------------- current min"
" max default comment\n");
- out += sprintf(out, "brightness: %8d %8d %8d %8d\n",
+ seq_printf(m, "brightness: %8d %8d %8d %8d\n",
cam->params.colourParams.brightness, 0, 100, 50);
if (cam->params.version.firmwareVersion == 1 &&
cam->params.version.firmwareRevision == 2)
@@ -318,26 +314,26 @@ static int cpia_read_proc(char *page, char **start, off_t off,
else
tmp = 96;
- out += sprintf(out, "contrast: %8d %8d %8d %8d"
+ seq_printf(m, "contrast: %8d %8d %8d %8d"
" steps of 8\n",
cam->params.colourParams.contrast, 0, tmp, 48);
- out += sprintf(out, "saturation: %8d %8d %8d %8d\n",
+ seq_printf(m, "saturation: %8d %8d %8d %8d\n",
cam->params.colourParams.saturation, 0, 100, 50);
tmp = (25000+5000*cam->params.sensorFps.baserate)/
(1<<cam->params.sensorFps.divisor);
- out += sprintf(out, "sensor_fps: %4d.%03d %8d %8d %8d\n",
+ seq_printf(m, "sensor_fps: %4d.%03d %8d %8d %8d\n",
tmp/1000, tmp%1000, 3, 30, 15);
- out += sprintf(out, "stream_start_line: %8d %8d %8d %8d\n",
+ seq_printf(m, "stream_start_line: %8d %8d %8d %8d\n",
2*cam->params.streamStartLine, 0,
cam->params.format.videoSize == VIDEOSIZE_CIF ? 288:144,
cam->params.format.videoSize == VIDEOSIZE_CIF ? 240:120);
- out += sprintf(out, "sub_sample: %8s %8s %8s %8s\n",
+ seq_printf(m, "sub_sample: %8s %8s %8s %8s\n",
cam->params.format.subSample == SUBSAMPLE_420 ?
"420" : "422", "420", "422", "422");
- out += sprintf(out, "yuv_order: %8s %8s %8s %8s\n",
+ seq_printf(m, "yuv_order: %8s %8s %8s %8s\n",
cam->params.format.yuvOrder == YUVORDER_YUYV ?
"YUYV" : "UYVY", "YUYV" , "UYVY", "YUYV");
- out += sprintf(out, "ecp_timing: %8s %8s %8s %8s\n",
+ seq_printf(m, "ecp_timing: %8s %8s %8s %8s\n",
cam->params.ecpTiming ? "slow" : "normal", "slow",
"normal", "normal");
@@ -346,13 +342,13 @@ static int cpia_read_proc(char *page, char **start, off_t off,
} else {
sprintf(tmpstr, "manual");
}
- out += sprintf(out, "color_balance_mode: %8s %8s %8s"
+ seq_printf(m, "color_balance_mode: %8s %8s %8s"
" %8s\n", tmpstr, "manual", "auto", "auto");
- out += sprintf(out, "red_gain: %8d %8d %8d %8d\n",
+ seq_printf(m, "red_gain: %8d %8d %8d %8d\n",
cam->params.colourBalance.redGain, 0, 212, 32);
- out += sprintf(out, "green_gain: %8d %8d %8d %8d\n",
+ seq_printf(m, "green_gain: %8d %8d %8d %8d\n",
cam->params.colourBalance.greenGain, 0, 212, 6);
- out += sprintf(out, "blue_gain: %8d %8d %8d %8d\n",
+ seq_printf(m, "blue_gain: %8d %8d %8d %8d\n",
cam->params.colourBalance.blueGain, 0, 212, 92);
if (cam->params.version.firmwareVersion == 1 &&
@@ -363,10 +359,10 @@ static int cpia_read_proc(char *page, char **start, off_t off,
sprintf(tmpstr, "%8d %8d %8d", 1, 8, 2);
if (cam->params.exposure.gainMode == 0)
- out += sprintf(out, "max_gain: unknown %28s"
+ seq_printf(m, "max_gain: unknown %28s"
" powers of 2\n", tmpstr);
else
- out += sprintf(out, "max_gain: %8d %28s"
+ seq_printf(m, "max_gain: %8d %28s"
" 1,2,4 or 8 \n",
1<<(cam->params.exposure.gainMode-1), tmpstr);
@@ -382,12 +378,12 @@ static int cpia_read_proc(char *page, char **start, off_t off,
sprintf(tmpstr, "unknown");
break;
}
- out += sprintf(out, "exposure_mode: %8s %8s %8s"
+ seq_printf(m, "exposure_mode: %8s %8s %8s"
" %8s\n", tmpstr, "manual", "auto", "auto");
- out += sprintf(out, "centre_weight: %8s %8s %8s %8s\n",
+ seq_printf(m, "centre_weight: %8s %8s %8s %8s\n",
(2-cam->params.exposure.centreWeight) ? "on" : "off",
"off", "on", "on");
- out += sprintf(out, "gain: %8d %8d max_gain %8d 1,2,4,8 possible\n",
+ seq_printf(m, "gain: %8d %8d max_gain %8d 1,2,4,8 possible\n",
1<<cam->params.exposure.gain, 1, 1);
if (cam->params.version.firmwareVersion == 1 &&
cam->params.version.firmwareRevision == 2)
@@ -396,7 +392,7 @@ static int cpia_read_proc(char *page, char **start, off_t off,
else
tmp = 510;
- out += sprintf(out, "fine_exp: %8d %8d %8d %8d\n",
+ seq_printf(m, "fine_exp: %8d %8d %8d %8d\n",
cam->params.exposure.fineExp*2, 0, tmp, 0);
if (cam->params.version.firmwareVersion == 1 &&
cam->params.version.firmwareRevision == 2)
@@ -405,127 +401,122 @@ static int cpia_read_proc(char *page, char **start, off_t off,
else
tmp = MAX_EXP;
- out += sprintf(out, "coarse_exp: %8d %8d %8d"
+ seq_printf(m, "coarse_exp: %8d %8d %8d"
" %8d\n", cam->params.exposure.coarseExpLo+
256*cam->params.exposure.coarseExpHi, 0, tmp, 185);
- out += sprintf(out, "red_comp: %8d %8d %8d %8d\n",
+ seq_printf(m, "red_comp: %8d %8d %8d %8d\n",
cam->params.exposure.redComp, COMP_RED, 255, COMP_RED);
- out += sprintf(out, "green1_comp: %8d %8d %8d %8d\n",
+ seq_printf(m, "green1_comp: %8d %8d %8d %8d\n",
cam->params.exposure.green1Comp, COMP_GREEN1, 255,
COMP_GREEN1);
- out += sprintf(out, "green2_comp: %8d %8d %8d %8d\n",
+ seq_printf(m, "green2_comp: %8d %8d %8d %8d\n",
cam->params.exposure.green2Comp, COMP_GREEN2, 255,
COMP_GREEN2);
- out += sprintf(out, "blue_comp: %8d %8d %8d %8d\n",
+ seq_printf(m, "blue_comp: %8d %8d %8d %8d\n",
cam->params.exposure.blueComp, COMP_BLUE, 255, COMP_BLUE);
- out += sprintf(out, "apcor_gain1: %#8x %#8x %#8x %#8x\n",
+ seq_printf(m, "apcor_gain1: %#8x %#8x %#8x %#8x\n",
cam->params.apcor.gain1, 0, 0xff, 0x1c);
- out += sprintf(out, "apcor_gain2: %#8x %#8x %#8x %#8x\n",
+ seq_printf(m, "apcor_gain2: %#8x %#8x %#8x %#8x\n",
cam->params.apcor.gain2, 0, 0xff, 0x1a);
- out += sprintf(out, "apcor_gain4: %#8x %#8x %#8x %#8x\n",
+ seq_printf(m, "apcor_gain4: %#8x %#8x %#8x %#8x\n",
cam->params.apcor.gain4, 0, 0xff, 0x2d);
- out += sprintf(out, "apcor_gain8: %#8x %#8x %#8x %#8x\n",
+ seq_printf(m, "apcor_gain8: %#8x %#8x %#8x %#8x\n",
cam->params.apcor.gain8, 0, 0xff, 0x2a);
- out += sprintf(out, "vl_offset_gain1: %8d %8d %8d %8d\n",
+ seq_printf(m, "vl_offset_gain1: %8d %8d %8d %8d\n",
cam->params.vlOffset.gain1, 0, 255, 24);
- out += sprintf(out, "vl_offset_gain2: %8d %8d %8d %8d\n",
+ seq_printf(m, "vl_offset_gain2: %8d %8d %8d %8d\n",
cam->params.vlOffset.gain2, 0, 255, 28);
- out += sprintf(out, "vl_offset_gain4: %8d %8d %8d %8d\n",
+ seq_printf(m, "vl_offset_gain4: %8d %8d %8d %8d\n",
cam->params.vlOffset.gain4, 0, 255, 30);
- out += sprintf(out, "vl_offset_gain8: %8d %8d %8d %8d\n",
+ seq_printf(m, "vl_offset_gain8: %8d %8d %8d %8d\n",
cam->params.vlOffset.gain8, 0, 255, 30);
- out += sprintf(out, "flicker_control: %8s %8s %8s %8s\n",
+ seq_printf(m, "flicker_control: %8s %8s %8s %8s\n",
cam->params.flickerControl.flickerMode ? "on" : "off",
"off", "on", "off");
- out += sprintf(out, "mains_frequency: %8d %8d %8d %8d"
+ seq_printf(m, "mains_frequency: %8d %8d %8d %8d"
" only 50/60\n",
cam->mainsFreq ? 60 : 50, 50, 60, 50);
if(cam->params.flickerControl.allowableOverExposure < 0)
- out += sprintf(out, "allowable_overexposure: %4dauto auto %8d auto\n",
+ seq_printf(m, "allowable_overexposure: %4dauto auto %8d auto\n",
-cam->params.flickerControl.allowableOverExposure,
255);
else
- out += sprintf(out, "allowable_overexposure: %8d auto %8d auto\n",
+ seq_printf(m, "allowable_overexposure: %8d auto %8d auto\n",
cam->params.flickerControl.allowableOverExposure,
255);
- out += sprintf(out, "compression_mode: ");
+ seq_printf(m, "compression_mode: ");
switch(cam->params.compression.mode) {
case CPIA_COMPRESSION_NONE:
- out += sprintf(out, "%8s", "none");
+ seq_printf(m, "%8s", "none");
break;
case CPIA_COMPRESSION_AUTO:
- out += sprintf(out, "%8s", "auto");
+ seq_printf(m, "%8s", "auto");
break;
case CPIA_COMPRESSION_MANUAL:
- out += sprintf(out, "%8s", "manual");
+ seq_printf(m, "%8s", "manual");
break;
default:
- out += sprintf(out, "%8s", "unknown");
+ seq_printf(m, "%8s", "unknown");
break;
}
- out += sprintf(out, " none,auto,manual auto\n");
- out += sprintf(out, "decimation_enable: %8s %8s %8s %8s\n",
+ seq_printf(m, " none,auto,manual auto\n");
+ seq_printf(m, "decimation_enable: %8s %8s %8s %8s\n",
cam->params.compression.decimation ==
DECIMATION_ENAB ? "on":"off", "off", "on",
"off");
- out += sprintf(out, "compression_target: %9s %9s %9s %9s\n",
+ seq_printf(m, "compression_target: %9s %9s %9s %9s\n",
cam->params.compressionTarget.frTargeting ==
CPIA_COMPRESSION_TARGET_FRAMERATE ?
"framerate":"quality",
"framerate", "quality", "quality");
- out += sprintf(out, "target_framerate: %8d %8d %8d %8d\n",
+ seq_printf(m, "target_framerate: %8d %8d %8d %8d\n",
cam->params.compressionTarget.targetFR, 1, 30, 15);
- out += sprintf(out, "target_quality: %8d %8d %8d %8d\n",
+ seq_printf(m, "target_quality: %8d %8d %8d %8d\n",
cam->params.compressionTarget.targetQ, 1, 64, 5);
- out += sprintf(out, "y_threshold: %8d %8d %8d %8d\n",
+ seq_printf(m, "y_threshold: %8d %8d %8d %8d\n",
cam->params.yuvThreshold.yThreshold, 0, 31, 6);
- out += sprintf(out, "uv_threshold: %8d %8d %8d %8d\n",
+ seq_printf(m, "uv_threshold: %8d %8d %8d %8d\n",
cam->params.yuvThreshold.uvThreshold, 0, 31, 6);
- out += sprintf(out, "hysteresis: %8d %8d %8d %8d\n",
+ seq_printf(m, "hysteresis: %8d %8d %8d %8d\n",
cam->params.compressionParams.hysteresis, 0, 255, 3);
- out += sprintf(out, "threshold_max: %8d %8d %8d %8d\n",
+ seq_printf(m, "threshold_max: %8d %8d %8d %8d\n",
cam->params.compressionParams.threshMax, 0, 255, 11);
- out += sprintf(out, "small_step: %8d %8d %8d %8d\n",
+ seq_printf(m, "small_step: %8d %8d %8d %8d\n",
cam->params.compressionParams.smallStep, 0, 255, 1);
- out += sprintf(out, "large_step: %8d %8d %8d %8d\n",
+ seq_printf(m, "large_step: %8d %8d %8d %8d\n",
cam->params.compressionParams.largeStep, 0, 255, 3);
- out += sprintf(out, "decimation_hysteresis: %8d %8d %8d %8d\n",
+ seq_printf(m, "decimation_hysteresis: %8d %8d %8d %8d\n",
cam->params.compressionParams.decimationHysteresis,
0, 255, 2);
- out += sprintf(out, "fr_diff_step_thresh: %8d %8d %8d %8d\n",
+ seq_printf(m, "fr_diff_step_thresh: %8d %8d %8d %8d\n",
cam->params.compressionParams.frDiffStepThresh,
0, 255, 5);
- out += sprintf(out, "q_diff_step_thresh: %8d %8d %8d %8d\n",
+ seq_printf(m, "q_diff_step_thresh: %8d %8d %8d %8d\n",
cam->params.compressionParams.qDiffStepThresh,
0, 255, 3);
- out += sprintf(out, "decimation_thresh_mod: %8d %8d %8d %8d\n",
+ seq_printf(m, "decimation_thresh_mod: %8d %8d %8d %8d\n",
cam->params.compressionParams.decimationThreshMod,
0, 255, 2);
/* QX3 specific entries */
if (cam->params.qx3.qx3_detected) {
- out += sprintf(out, "toplight: %8s %8s %8s %8s\n",
+ seq_printf(m, "toplight: %8s %8s %8s %8s\n",
cam->params.qx3.toplight ? "on" : "off",
"off", "on", "off");
- out += sprintf(out, "bottomlight: %8s %8s %8s %8s\n",
+ seq_printf(m, "bottomlight: %8s %8s %8s %8s\n",
cam->params.qx3.bottomlight ? "on" : "off",
"off", "on", "off");
}
- len = out - page;
- len -= off;
- if (len < count) {
- *eof = 1;
- if (len <= 0) return 0;
- } else
- len = count;
-
- *start = page + off;
- return len;
+ return 0;
}
+static int cpia_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cpia_proc_show, PDE(inode)->data);
+}
-static int match(char *checkstr, char **buffer, unsigned long *count,
+static int match(char *checkstr, char **buffer, size_t *count,
int *find_colon, int *err)
{
int ret, colon_found = 1;
@@ -551,7 +542,7 @@ static int match(char *checkstr, char **buffer, unsigned long *count,
return ret;
}
-static unsigned long int value(char **buffer, unsigned long *count, int *err)
+static unsigned long int value(char **buffer, size_t *count, int *err)
{
char *p;
unsigned long int ret;
@@ -565,10 +556,10 @@ static unsigned long int value(char **buffer, unsigned long *count, int *err)
return ret;
}
-static int cpia_write_proc(struct file *file, const char __user *buf,
- unsigned long count, void *data)
+static ssize_t cpia_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
- struct cam_data *cam = data;
+ struct cam_data *cam = PDE(file->f_path.dentry->d_inode)->data;
struct cam_params new_params;
char *page, *buffer;
int retval, find_colon;
@@ -582,7 +573,7 @@ static int cpia_write_proc(struct file *file, const char __user *buf,
* from the comx driver
*/
if (count > PAGE_SIZE) {
- printk(KERN_ERR "count is %lu > %d!!!\n", count, (int)PAGE_SIZE);
+ printk(KERN_ERR "count is %zu > %d!!!\n", count, (int)PAGE_SIZE);
return -ENOSPC;
}
@@ -1340,23 +1331,28 @@ out:
return retval;
}
+static const struct file_operations cpia_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = cpia_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cpia_proc_write,
+};
+
static void create_proc_cpia_cam(struct cam_data *cam)
{
- char name[5 + 1 + 10 + 1];
struct proc_dir_entry *ent;
if (!cpia_proc_root || !cam)
return;
- snprintf(name, sizeof(name), "video%d", cam->vdev.num);
-
- ent = create_proc_entry(name, S_IFREG|S_IRUGO|S_IWUSR, cpia_proc_root);
+ ent = proc_create_data(video_device_node_name(&cam->vdev),
+ S_IRUGO|S_IWUSR, cpia_proc_root,
+ &cpia_proc_fops, cam);
if (!ent)
return;
- ent->data = cam;
- ent->read_proc = cpia_read_proc;
- ent->write_proc = cpia_write_proc;
/*
size of the proc entry is 3736 bytes for the standard webcam;
the extra features of the QX3 microscope add 189 bytes.
@@ -1368,13 +1364,10 @@ static void create_proc_cpia_cam(struct cam_data *cam)
static void destroy_proc_cpia_cam(struct cam_data *cam)
{
- char name[5 + 1 + 10 + 1];
-
if (!cam || !cam->proc_entry)
return;
- snprintf(name, sizeof(name), "video%d", cam->vdev.num);
- remove_proc_entry(name, cpia_proc_root);
+ remove_proc_entry(video_device_node_name(&cam->vdev), cpia_proc_root);
cam->proc_entry = NULL;
}
@@ -3999,7 +3992,7 @@ void cpia_unregister_camera(struct cam_data *cam)
}
#ifdef CONFIG_PROC_FS
- DBG("destroying /proc/cpia/video%d\n", cam->vdev.num);
+ DBG("destroying /proc/cpia/%s\n", video_device_node_name(&cam->vdev));
destroy_proc_cpia_cam(cam);
#endif
if (!cam->open_count) {
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 0b4a8f309cf..6f91415eb7b 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -38,17 +38,12 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/videodev.h>
+#include <linux/stringify.h>
#include <media/v4l2-ioctl.h>
#include "cpia2.h"
#include "cpia2dev.h"
-
-//#define _CPIA2_DEBUG_
-
-#define MAKE_STRING_1(x) #x
-#define MAKE_STRING(x) MAKE_STRING_1(x)
-
static int video_nr = -1;
module_param(video_nr, int, 0);
MODULE_PARM_DESC(video_nr,"video device to register (0=/dev/video0, etc)");
@@ -60,26 +55,26 @@ MODULE_PARM_DESC(buffer_size, "Size for each frame buffer in bytes (default 68k)
static int num_buffers = 3;
module_param(num_buffers, int, 0);
MODULE_PARM_DESC(num_buffers, "Number of frame buffers (1-"
- MAKE_STRING(VIDEO_MAX_FRAME) ", default 3)");
+ __stringify(VIDEO_MAX_FRAME) ", default 3)");
static int alternate = DEFAULT_ALT;
module_param(alternate, int, 0);
-MODULE_PARM_DESC(alternate, "USB Alternate (" MAKE_STRING(USBIF_ISO_1) "-"
- MAKE_STRING(USBIF_ISO_6) ", default "
- MAKE_STRING(DEFAULT_ALT) ")");
+MODULE_PARM_DESC(alternate, "USB Alternate (" __stringify(USBIF_ISO_1) "-"
+ __stringify(USBIF_ISO_6) ", default "
+ __stringify(DEFAULT_ALT) ")");
static int flicker_freq = 60;
module_param(flicker_freq, int, 0);
-MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" MAKE_STRING(50) "or"
- MAKE_STRING(60) ", default "
- MAKE_STRING(60) ")");
+MODULE_PARM_DESC(flicker_freq, "Flicker frequency (" __stringify(50) "or"
+ __stringify(60) ", default "
+ __stringify(60) ")");
static int flicker_mode = NEVER_FLICKER;
module_param(flicker_mode, int, 0);
MODULE_PARM_DESC(flicker_mode,
- "Flicker supression (" MAKE_STRING(NEVER_FLICKER) "or"
- MAKE_STRING(ANTI_FLICKER_ON) ", default "
- MAKE_STRING(NEVER_FLICKER) ")");
+ "Flicker supression (" __stringify(NEVER_FLICKER) "or"
+ __stringify(ANTI_FLICKER_ON) ", default "
+ __stringify(NEVER_FLICKER) ")");
MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
@@ -1926,7 +1921,6 @@ static const struct v4l2_file_operations fops_template = {
static struct video_device cpia2_template = {
/* I could not find any place for the old .initialize initializer?? */
.name= "CPiA2 Camera",
- .minor= -1,
.fops= &fops_template,
.release= video_device_release,
};
@@ -1967,9 +1961,9 @@ void cpia2_unregister_camera(struct camera_data *cam)
if (!cam->open_count) {
video_unregister_device(cam->vdev);
} else {
- LOG("/dev/video%d removed while open, "
- "deferring video_unregister_device\n",
- cam->vdev->num);
+ LOG("%s removed while open, deferring "
+ "video_unregister_device\n",
+ video_device_node_name(cam->vdev));
}
}
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index 4e278db31cc..c0885c69fd8 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -758,8 +758,8 @@ int cx18_v4l2_open(struct file *filp)
mutex_lock(&cx->serialize_lock);
if (cx18_init_on_first_open(cx)) {
- CX18_ERR("Failed to initialize on minor %d\n",
- video_dev->minor);
+ CX18_ERR("Failed to initialize on %s\n",
+ video_device_node_name(video_dev));
mutex_unlock(&cx->serialize_lock);
return -ENXIO;
}
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index c398651dd74..987a9308d93 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -219,6 +219,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
{
struct cx18_stream *s = &cx->streams[type];
int vfl_type = cx18_stream_info[type].vfl_type;
+ const char *name;
int num, ret;
/* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something?
@@ -258,31 +259,30 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
s->video_dev = NULL;
return ret;
}
- num = s->video_dev->num;
+
+ name = video_device_node_name(s->video_dev);
switch (vfl_type) {
case VFL_TYPE_GRABBER:
- CX18_INFO("Registered device video%d for %s "
- "(%d x %d.%02d kB)\n",
- num, s->name, cx->stream_buffers[type],
+ CX18_INFO("Registered device %s for %s (%d x %d.%02d kB)\n",
+ name, s->name, cx->stream_buffers[type],
cx->stream_buf_size[type] / 1024,
(cx->stream_buf_size[type] * 100 / 1024) % 100);
break;
case VFL_TYPE_RADIO:
- CX18_INFO("Registered device radio%d for %s\n",
- num, s->name);
+ CX18_INFO("Registered device %s for %s\n", name, s->name);
break;
case VFL_TYPE_VBI:
if (cx->stream_buffers[type])
- CX18_INFO("Registered device vbi%d for %s "
+ CX18_INFO("Registered device %s for %s "
"(%d x %d bytes)\n",
- num, s->name, cx->stream_buffers[type],
+ name, s->name, cx->stream_buffers[type],
cx->stream_buf_size[type]);
else
- CX18_INFO("Registered device vbi%d for %s\n",
- num, s->name);
+ CX18_INFO("Registered device %s for %s\n",
+ name, s->name);
break;
}
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 319c459459e..a5490823500 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -68,19 +68,19 @@ struct cx231xx_board cx231xx_boards[] = {
.type = CX231XX_VMUX_TELEVISION,
.vmux = CX231XX_VIN_3_1,
.amux = CX231XX_AMUX_VIDEO,
- .gpio = 0,
+ .gpio = NULL,
}, {
.type = CX231XX_VMUX_COMPOSITE1,
.vmux = CX231XX_VIN_2_1,
.amux = CX231XX_AMUX_LINE_IN,
- .gpio = 0,
+ .gpio = NULL,
}, {
.type = CX231XX_VMUX_SVIDEO,
.vmux = CX231XX_VIN_1_1 |
(CX231XX_VIN_1_2 << 8) |
CX25840_SVIDEO_ON,
.amux = CX231XX_AMUX_LINE_IN,
- .gpio = 0,
+ .gpio = NULL,
}
},
},
@@ -107,19 +107,19 @@ struct cx231xx_board cx231xx_boards[] = {
.type = CX231XX_VMUX_TELEVISION,
.vmux = CX231XX_VIN_3_1,
.amux = CX231XX_AMUX_VIDEO,
- .gpio = 0,
+ .gpio = NULL,
}, {
.type = CX231XX_VMUX_COMPOSITE1,
.vmux = CX231XX_VIN_2_1,
.amux = CX231XX_AMUX_LINE_IN,
- .gpio = 0,
+ .gpio = NULL,
}, {
.type = CX231XX_VMUX_SVIDEO,
.vmux = CX231XX_VIN_1_1 |
(CX231XX_VIN_1_2 << 8) |
CX25840_SVIDEO_ON,
.amux = CX231XX_AMUX_LINE_IN,
- .gpio = 0,
+ .gpio = NULL,
}
},
},
@@ -147,19 +147,19 @@ struct cx231xx_board cx231xx_boards[] = {
.type = CX231XX_VMUX_TELEVISION,
.vmux = CX231XX_VIN_3_1,
.amux = CX231XX_AMUX_VIDEO,
- .gpio = 0,
+ .gpio = NULL,
}, {
.type = CX231XX_VMUX_COMPOSITE1,
.vmux = CX231XX_VIN_2_1,
.amux = CX231XX_AMUX_LINE_IN,
- .gpio = 0,
+ .gpio = NULL,
}, {
.type = CX231XX_VMUX_SVIDEO,
.vmux = CX231XX_VIN_1_1 |
(CX231XX_VIN_1_2 << 8) |
CX25840_SVIDEO_ON,
.amux = CX231XX_AMUX_LINE_IN,
- .gpio = 0,
+ .gpio = NULL,
}
},
},
@@ -856,8 +856,9 @@ static void cx231xx_usb_disconnect(struct usb_interface *interface)
if (dev->users) {
cx231xx_warn
- ("device /dev/video%d is open! Deregistration and memory "
- "deallocation are deferred on close.\n", dev->vdev->num);
+ ("device %s is open! Deregistration and memory "
+ "deallocation are deferred on close.\n",
+ video_device_node_name(dev->vdev));
dev->state |= DEV_MISCONFIGURED;
cx231xx_uninit_isoc(dev);
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index 0d333e679f7..4a60dfbc347 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -66,32 +66,6 @@ MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
static LIST_HEAD(cx231xx_devlist);
static DEFINE_MUTEX(cx231xx_devlist_mutex);
-struct cx231xx *cx231xx_get_device(int minor,
- enum v4l2_buf_type *fh_type, int *has_radio)
-{
- struct cx231xx *h, *dev = NULL;
-
- *fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- *has_radio = 0;
-
- mutex_lock(&cx231xx_devlist_mutex);
- list_for_each_entry(h, &cx231xx_devlist, devlist) {
- if (h->vdev->minor == minor)
- dev = h;
- if (h->vbi_dev->minor == minor) {
- dev = h;
- *fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
- }
- if (h->radio_dev && h->radio_dev->minor == minor) {
- dev = h;
- *has_radio = 1;
- }
- }
- mutex_unlock(&cx231xx_devlist_mutex);
-
- return dev;
-}
-
/*
* cx231xx_realease_resources()
* unregisters the v4l2,i2c and usb devices
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index cd135f01b9c..15826f98b68 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -197,8 +197,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER,
- dev->board.ir_codes);
+ err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
if (err < 0)
goto err_out_free;
@@ -217,7 +216,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
cx231xx_ir_start(ir);
/* all done */
- err = input_register_device(ir->input);
+ err = ir_input_register(ir->input, dev->board.ir_codes);
if (err)
goto err_out_stop;
@@ -226,8 +225,6 @@ err_out_stop:
cx231xx_ir_stop(ir);
dev->ir = NULL;
err_out_free:
- ir_input_free(input_dev);
- input_free_device(input_dev);
kfree(ir);
return err;
}
@@ -241,8 +238,7 @@ int cx231xx_ir_fini(struct cx231xx *dev)
return 0;
cx231xx_ir_stop(ir);
- ir_input_free(ir->input);
- input_unregister_device(ir->input);
+ ir_input_unregister(ir->input);
kfree(ir);
/* done */
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index d095aa0d6d1..d4f546f11d7 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -1916,20 +1916,29 @@ static int radio_queryctrl(struct file *file, void *priv,
*/
static int cx231xx_v4l2_open(struct file *filp)
{
- int minor = video_devdata(filp)->minor;
int errCode = 0, radio = 0;
- struct cx231xx *dev = NULL;
+ struct video_device *vdev = video_devdata(filp);
+ struct cx231xx *dev = video_drvdata(filp);
struct cx231xx_fh *fh;
enum v4l2_buf_type fh_type = 0;
- dev = cx231xx_get_device(minor, &fh_type, &radio);
- if (NULL == dev)
- return -ENODEV;
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
+ fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ break;
+ case VFL_TYPE_VBI:
+ fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ break;
+ case VFL_TYPE_RADIO:
+ radio = 1;
+ break;
+ }
mutex_lock(&dev->lock);
- cx231xx_videodbg("open minor=%d type=%s users=%d\n",
- minor, v4l2_type_names[fh_type], dev->users);
+ cx231xx_videodbg("open dev=%s type=%s users=%d\n",
+ video_device_node_name(vdev), v4l2_type_names[fh_type],
+ dev->users);
#if 0
errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
@@ -2020,25 +2029,25 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
/*FIXME: I2C IR should be disconnected */
if (dev->radio_dev) {
- if (-1 != dev->radio_dev->minor)
+ if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
dev->radio_dev = NULL;
}
if (dev->vbi_dev) {
- cx231xx_info("V4L2 device /dev/vbi%d deregistered\n",
- dev->vbi_dev->num);
- if (-1 != dev->vbi_dev->minor)
+ cx231xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(dev->vbi_dev));
+ if (video_is_registered(dev->vbi_dev))
video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->vdev) {
- cx231xx_info("V4L2 device /dev/video%d deregistered\n",
- dev->vdev->num);
- if (-1 != dev->vdev->minor)
+ cx231xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(dev->vdev));
+ if (video_is_registered(dev->vdev))
video_unregister_device(dev->vdev);
else
video_device_release(dev->vdev);
@@ -2268,7 +2277,6 @@ static const struct video_device cx231xx_video_template = {
.fops = &cx231xx_v4l_fops,
.release = video_device_release,
.ioctl_ops = &video_ioctl_ops,
- .minor = -1,
.tvnorms = V4L2_STD_ALL,
.current_norm = V4L2_STD_PAL,
};
@@ -2303,7 +2311,6 @@ static struct video_device cx231xx_radio_template = {
.name = "cx231xx-radio",
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
- .minor = -1,
};
/******************************** usb interface ******************************/
@@ -2319,13 +2326,13 @@ static struct video_device *cx231xx_vdev_init(struct cx231xx *dev,
return NULL;
*vfd = *template;
- vfd->minor = -1;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
+ video_set_drvdata(vfd, dev);
return vfd;
}
@@ -2374,8 +2381,8 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
return ret;
}
- cx231xx_info("%s/0: registered device video%d [v4l2]\n",
- dev->name, dev->vdev->num);
+ cx231xx_info("%s/0: registered device %s [v4l2]\n",
+ dev->name, video_device_node_name(dev->vdev));
/* Initialize VBI template */
memcpy(&cx231xx_vbi_template, &cx231xx_video_template,
@@ -2393,8 +2400,8 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
return ret;
}
- cx231xx_info("%s/0: registered device vbi%d\n",
- dev->name, dev->vbi_dev->num);
+ cx231xx_info("%s/0: registered device %s\n",
+ dev->name, video_device_node_name(dev->vbi_dev));
if (cx231xx_boards[dev->model].radio.type == CX231XX_RADIO) {
dev->radio_dev = cx231xx_vdev_init(dev, &cx231xx_radio_template,
@@ -2409,12 +2416,13 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
cx231xx_errdev("can't register radio device\n");
return ret;
}
- cx231xx_info("Registered radio device as /dev/radio%d\n",
- dev->radio_dev->num);
+ cx231xx_info("Registered radio device as %s\n",
+ video_device_node_name(dev->radio_dev));
}
- cx231xx_info("V4L2 device registered as /dev/video%d and /dev/vbi%d\n",
- dev->vdev->num, dev->vbi_dev->num);
+ cx231xx_info("V4L2 device registered as %s and %s\n",
+ video_device_node_name(dev->vdev),
+ video_device_node_name(dev->vbi_dev));
return 0;
}
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 64e2ddd3c40..17d4d1a800c 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -689,8 +689,6 @@ void cx231xx_release_analog_resources(struct cx231xx *dev);
int cx231xx_register_analog_devices(struct cx231xx *dev);
void cx231xx_remove_from_devlist(struct cx231xx *dev);
void cx231xx_add_into_devlist(struct cx231xx *dev);
-struct cx231xx *cx231xx_get_device(int minor,
- enum v4l2_buf_type *fh_type, int *has_radio);
void cx231xx_init_extension(struct cx231xx *dev);
void cx231xx_close_extension(struct cx231xx *dev);
diff --git a/drivers/media/video/cx23885/cimax2.c b/drivers/media/video/cx23885/cimax2.c
index c04222ffb28..d4a9d2c5947 100644
--- a/drivers/media/video/cx23885/cimax2.c
+++ b/drivers/media/video/cx23885/cimax2.c
@@ -53,6 +53,8 @@
#define NETUP_CI_CTL 0x04
#define NETUP_CI_RD 1
+#define NETUP_IRQ_DETAM 0x1
+#define NETUP_IRQ_IRQAM 0x4
static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
@@ -73,6 +75,9 @@ struct netup_ci_state {
int status;
struct work_struct work;
void *priv;
+ u8 current_irq_mode;
+ int current_ci_flag;
+ unsigned long next_status_checked_time;
};
@@ -169,24 +174,26 @@ int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
if (0 != slot)
return -EINVAL;
- ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
- 0, &store, 1);
- if (ret != 0)
- return ret;
+ if (state->current_ci_flag != flag) {
+ ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
+ 0, &store, 1);
+ if (ret != 0)
+ return ret;
- store &= ~0x0c;
- store |= flag;
+ store &= ~0x0c;
+ store |= flag;
- ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
- 0, &store, 1);
- if (ret != 0)
- return ret;
+ ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
+ 0, &store, 1);
+ if (ret != 0)
+ return ret;
+ };
+ state->current_ci_flag = flag;
mutex_lock(&dev->gpio_lock);
/* write addr */
cx_write(MC417_OEN, NETUP_EN_ALL);
- msleep(2);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADLO | (0xff & addr));
cx_clear(MC417_RWD, NETUP_ADLO);
@@ -196,7 +203,6 @@ int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
if (read) { /* data in */
cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA);
- msleep(2);
} else /* data out */
cx_write(MC417_RWD, NETUP_CTRL_OFF | data);
@@ -213,8 +219,8 @@ int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
if (mem < 0)
return -EREMOTEIO;
- ci_dbg_print("%s: %s: addr=[0x%02x], %s=%x\n", __func__,
- (read) ? "read" : "write", addr,
+ ci_dbg_print("%s: %s: chipaddr=[0x%x] addr=[0x%02x], %s=%x\n", __func__,
+ (read) ? "read" : "write", state->ci_i2c_addr, addr,
(flag == NETUP_CI_CTL) ? "ctl" : "mem",
(read) ? mem : data);
@@ -283,14 +289,39 @@ int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
return 0;
}
+int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
+{
+ struct netup_ci_state *state = en50221->data;
+ int ret;
+
+ if (irq_mode == state->current_irq_mode)
+ return 0;
+
+ ci_dbg_print("%s: chipaddr=[0x%x] setting ci IRQ to [0x%x] \n",
+ __func__, state->ci_i2c_addr, irq_mode);
+ ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
+ 0x1b, &irq_mode, 1);
+
+ if (ret != 0)
+ return ret;
+
+ state->current_irq_mode = irq_mode;
+
+ return 0;
+}
+
int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
- u8 buf = 0x60;
+ u8 buf;
if (0 != slot)
return -EINVAL;
+ netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
+ 0, &buf, 1);
+ buf |= 0x60;
+
return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
}
@@ -303,21 +334,35 @@ static void netup_read_ci_status(struct work_struct *work)
u8 buf[33];
int ret;
- ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
- 0, &buf[0], 33);
+ /* CAM module IRQ processing. fast operation */
+ dvb_ca_en50221_frda_irq(&state->ca, 0);
- if (ret != 0)
- return;
+ /* CAM module INSERT/REMOVE processing. slow operation because of i2c
+ * transfers */
+ if (time_after(jiffies, state->next_status_checked_time)
+ || !state->status) {
+ ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
+ 0, &buf[0], 33);
+
+ state->next_status_checked_time = jiffies
+ + msecs_to_jiffies(1000);
+
+ if (ret != 0)
+ return;
- ci_dbg_print("%s: Slot Status Addr=[0x%04x], Reg=[0x%02x], data=%02x, "
- "TS config = %02x\n", __func__, state->ci_i2c_addr, 0, buf[0],
- buf[32]);
+ ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
+ "Reg=[0x%02x], data=%02x, "
+ "TS config = %02x\n", __func__,
+ state->ci_i2c_addr, 0, buf[0],
+ buf[0]);
- if (buf[0] & 1)
- state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
- DVB_CA_EN50221_POLL_CAM_READY;
- else
- state->status = 0;
+
+ if (buf[0] & 1)
+ state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
+ DVB_CA_EN50221_POLL_CAM_READY;
+ else
+ state->status = 0;
+ };
}
/* CI irq handler */
@@ -347,6 +392,9 @@ int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open
if (0 != slot)
return -EINVAL;
+ netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | NETUP_IRQ_IRQAM)
+ : NETUP_IRQ_DETAM);
+
return state->status;
}
@@ -381,8 +429,8 @@ int netup_ci_init(struct cx23885_tsport *port)
0x01, /* power on (use it like store place) */
0x00, /* RFU */
0x00, /* int status read only */
- 0x01, /* all int unmasked */
- 0x04, /* int config */
+ NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
+ 0x05, /* EXTINT=active-high, INT=push-pull */
0x00, /* USCG1 */
0x04, /* ack active low */
0x00, /* LOCK = 0 */
@@ -422,6 +470,7 @@ int netup_ci_init(struct cx23885_tsport *port)
state->ca.poll_slot_status = netup_poll_ci_slot_status;
state->ca.data = state;
state->priv = port;
+ state->current_irq_mode = NETUP_IRQ_IRQAM | NETUP_IRQ_DETAM;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &cimax_init[0], 34);
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 0eed852c61e..88c0d248111 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1568,28 +1568,11 @@ static int vidioc_queryctrl(struct file *file, void *priv,
static int mpeg_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx23885_dev *h, *dev = NULL;
- struct list_head *list;
+ struct cx23885_dev *dev = video_drvdata(file);
struct cx23885_fh *fh;
dprintk(2, "%s()\n", __func__);
- lock_kernel();
- list_for_each(list, &cx23885_devlist) {
- h = list_entry(list, struct cx23885_dev, devlist);
- if (h->v4l_device &&
- h->v4l_device->minor == minor) {
- dev = h;
- break;
- }
- }
-
- if (dev == NULL) {
- unlock_kernel();
- return -ENODEV;
- }
-
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (NULL == fh) {
@@ -1597,6 +1580,8 @@ static int mpeg_open(struct file *file)
return -ENOMEM;
}
+ lock_kernel();
+
file->private_data = fh;
fh->dev = dev;
@@ -1736,7 +1721,6 @@ static struct video_device cx23885_mpeg_template = {
.name = "cx23885",
.fops = &mpeg_fops,
.ioctl_ops = &mpeg_ioctl_ops,
- .minor = -1,
.tvnorms = CX23885_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
@@ -1746,7 +1730,7 @@ void cx23885_417_unregister(struct cx23885_dev *dev)
dprintk(1, "%s()\n", __func__);
if (dev->v4l_device) {
- if (-1 != dev->v4l_device->minor)
+ if (video_is_registered(dev->v4l_device))
video_unregister_device(dev->v4l_device);
else
video_device_release(dev->v4l_device);
@@ -1803,6 +1787,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
/* Allocate and initialize V4L video device */
dev->v4l_device = cx23885_video_dev_alloc(tsport,
dev->pci, &cx23885_mpeg_template, "mpeg");
+ video_set_drvdata(dev->v4l_device, dev);
err = video_register_device(dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
@@ -1810,8 +1795,8 @@ int cx23885_417_register(struct cx23885_dev *dev)
return err;
}
- printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
- dev->name, dev->v4l_device->num);
+ printk(KERN_INFO "%s: registered device %s [mpeg]\n",
+ dev->name, video_device_node_name(dev->v4l_device));
return 0;
}
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 04b12d27bc1..0dde57e96d3 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -55,9 +55,6 @@ MODULE_PARM_DESC(card, "card type");
static unsigned int cx23885_devcount;
-static DEFINE_MUTEX(devlist);
-LIST_HEAD(cx23885_devlist);
-
#define NO_SYNC_LINE (-1U)
/* FIXME, these allocations will change when
@@ -785,10 +782,6 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
dev->nr = cx23885_devcount++;
sprintf(dev->name, "cx23885[%d]", dev->nr);
- mutex_lock(&devlist);
- list_add_tail(&dev->devlist, &cx23885_devlist);
- mutex_unlock(&devlist);
-
/* Configure the internal memory */
if (dev->pci->device == 0x8880) {
/* Could be 887 or 888, assume a default */
@@ -2008,10 +2001,6 @@ static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
/* unregister stuff */
free_irq(pci_dev->irq, dev);
- mutex_lock(&devlist);
- list_del(&dev->devlist);
- mutex_unlock(&devlist);
-
cx23885_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index 469e083dd5f..768eec92ccf 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -377,7 +377,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
cx23885_boards[dev->board].name);
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(dev->pci));
- ret = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes);
+ ret = ir_input_init(input_dev, &ir->ir, ir_type);
if (ret < 0)
goto err_out_free;
@@ -397,7 +397,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
dev->ir_input = ir;
cx23885_input_ir_start(dev);
- ret = input_register_device(ir->dev);
+ ret = ir_input_register(ir->dev, ir_codes);
if (ret)
goto err_out_stop;
@@ -407,8 +407,6 @@ err_out_stop:
cx23885_input_ir_stop(dev);
dev->ir_input = NULL;
err_out_free:
- ir_input_free(input_dev);
- input_free_device(input_dev);
kfree(ir);
return ret;
}
@@ -420,8 +418,7 @@ void cx23885_input_fini(struct cx23885_dev *dev)
if (dev->ir_input == NULL)
return;
- ir_input_free(dev->ir_input->dev);
- input_unregister_device(dev->ir_input->dev);
+ ir_input_unregister(dev->ir_input->dev);
kfree(dev->ir_input);
dev->ir_input = NULL;
}
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 8b372b4f0de..8934d61cf66 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -318,11 +318,11 @@ static struct video_device *cx23885_vdev_init(struct cx23885_dev *dev,
if (NULL == vfd)
return NULL;
*vfd = *template;
- vfd->minor = -1;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
dev->name, type, cx23885_boards[dev->board].name);
+ video_set_drvdata(vfd, dev);
return vfd;
}
@@ -716,46 +716,34 @@ static int get_resource(struct cx23885_fh *fh)
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx23885_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx23885_dev *dev = video_drvdata(file);
struct cx23885_fh *fh;
- struct list_head *list;
enum v4l2_buf_type type = 0;
int radio = 0;
- lock_kernel();
- list_for_each(list, &cx23885_devlist) {
- h = list_entry(list, struct cx23885_dev, devlist);
- if (h->video_dev &&
- h->video_dev->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- if (h->vbi_dev &&
- h->vbi_dev->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- }
- if (h->radio_dev &&
- h->radio_dev->minor == minor) {
- radio = 1;
- dev = h;
- }
- }
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ break;
+ case VFL_TYPE_VBI:
+ type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ break;
+ case VFL_TYPE_RADIO:
+ radio = 1;
+ break;
}
- dprintk(1, "open minor=%d radio=%d type=%s\n",
- minor, radio, v4l2_type_names[type]);
+ dprintk(1, "open dev=%s radio=%d type=%s\n",
+ video_device_node_name(vdev), radio, v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
+
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
@@ -1441,7 +1429,6 @@ static struct video_device cx23885_vbi_template;
static struct video_device cx23885_video_template = {
.name = "cx23885-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX23885_NORMS,
.current_norm = V4L2_STD_NTSC_M,
@@ -1461,7 +1448,7 @@ void cx23885_video_unregister(struct cx23885_dev *dev)
cx_clear(PCI_INT_MSK, 1);
if (dev->video_dev) {
- if (-1 != dev->video_dev->minor)
+ if (video_is_registered(dev->video_dev))
video_unregister_device(dev->video_dev);
else
video_device_release(dev->video_dev);
@@ -1532,8 +1519,8 @@ int cx23885_video_register(struct cx23885_dev *dev)
dev->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
- dev->name, dev->video_dev->num);
+ printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
+ dev->name, video_device_node_name(dev->video_dev));
/* initial device configuration */
mutex_lock(&dev->lock);
cx23885_set_tvnorm(dev, dev->tvnorm);
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index fa744764dc8..08b3f6b136a 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -303,7 +303,6 @@ struct cx23885_tsport {
};
struct cx23885_dev {
- struct list_head devlist;
atomic_t refcount;
struct v4l2_device v4l2_dev;
@@ -399,8 +398,6 @@ static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
extern struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw);
-extern struct list_head cx23885_devlist;
-
#define SRAM_CH01 0 /* Video A */
#define SRAM_CH02 1 /* VBI A */
#define SRAM_CH03 2 /* Video B */
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c
index 3ccc8afeccf..2bf57a4527d 100644
--- a/drivers/media/video/cx23885/cx23888-ir.c
+++ b/drivers/media/video/cx23885/cx23888-ir.c
@@ -124,15 +124,12 @@ struct cx23888_ir_state {
atomic_t rxclk_divider;
atomic_t rx_invert;
- struct kfifo *rx_kfifo;
+ struct kfifo rx_kfifo;
spinlock_t rx_kfifo_lock;
struct v4l2_subdev_ir_parameters tx_params;
struct mutex tx_params_lock;
atomic_t txclk_divider;
-
- struct kfifo *tx_kfifo;
- spinlock_t tx_kfifo_lock;
};
static inline struct cx23888_ir_state *to_state(struct v4l2_subdev *sd)
@@ -522,6 +519,7 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
{
struct cx23888_ir_state *state = to_state(sd);
struct cx23885_dev *dev = state->dev;
+ unsigned long flags;
u32 cntrl = cx23888_ir_read4(dev, CX23888_IR_CNTRL_REG);
u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG);
@@ -594,8 +592,9 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
if (i == 0)
break;
j = i * sizeof(u32);
- k = kfifo_put(state->rx_kfifo,
- (unsigned char *) rx_data, j);
+ k = kfifo_in_locked(&state->rx_kfifo,
+ (unsigned char *) rx_data, j,
+ &state->rx_kfifo_lock);
if (k != j)
kror++; /* rx_kfifo over run */
}
@@ -631,8 +630,11 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status,
cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl);
*handled = true;
}
- if (kfifo_len(state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2)
+
+ spin_lock_irqsave(&state->rx_kfifo_lock, flags);
+ if (kfifo_len(&state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2)
events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ;
+ spin_unlock_irqrestore(&state->rx_kfifo_lock, flags);
if (events)
v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events);
@@ -657,7 +659,7 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count,
return 0;
}
- n = kfifo_get(state->rx_kfifo, buf, n);
+ n = kfifo_out_locked(&state->rx_kfifo, buf, n, &state->rx_kfifo_lock);
n /= sizeof(u32);
*num = n * sizeof(u32);
@@ -785,7 +787,12 @@ static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd,
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
if (p->enable) {
- kfifo_reset(state->rx_kfifo);
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->rx_kfifo_lock, flags);
+ kfifo_reset(&state->rx_kfifo);
+ /* reset tx_fifo too if there is one... */
+ spin_unlock_irqrestore(&state->rx_kfifo_lock, flags);
if (p->interrupt_enable)
irqenable_rx(dev, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE);
control_rx_enable(dev, p->enable);
@@ -892,7 +899,6 @@ static int cx23888_ir_tx_s_parameters(struct v4l2_subdev *sd,
o->interrupt_enable = p->interrupt_enable;
o->enable = p->enable;
if (p->enable) {
- kfifo_reset(state->tx_kfifo);
if (p->interrupt_enable)
irqenable_tx(dev, IRQEN_TSE);
control_tx_enable(dev, p->enable);
@@ -1168,18 +1174,8 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
return -ENOMEM;
spin_lock_init(&state->rx_kfifo_lock);
- state->rx_kfifo = kfifo_alloc(CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL,
- &state->rx_kfifo_lock);
- if (state->rx_kfifo == NULL)
- return -ENOMEM;
-
- spin_lock_init(&state->tx_kfifo_lock);
- state->tx_kfifo = kfifo_alloc(CX23888_IR_TX_KFIFO_SIZE, GFP_KERNEL,
- &state->tx_kfifo_lock);
- if (state->tx_kfifo == NULL) {
- kfifo_free(state->rx_kfifo);
+ if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
return -ENOMEM;
- }
state->dev = dev;
state->id = V4L2_IDENT_CX23888_IR;
@@ -1211,8 +1207,7 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
sizeof(struct v4l2_subdev_ir_parameters));
v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params);
} else {
- kfifo_free(state->rx_kfifo);
- kfifo_free(state->tx_kfifo);
+ kfifo_free(&state->rx_kfifo);
}
return ret;
}
@@ -1231,8 +1226,7 @@ int cx23888_ir_remove(struct cx23885_dev *dev)
state = to_state(sd);
v4l2_device_unregister_subdev(sd);
- kfifo_free(state->rx_kfifo);
- kfifo_free(state->tx_kfifo);
+ kfifo_free(&state->rx_kfifo);
kfree(state);
/* Nothing more to free() as state held the actual v4l2_subdev object */
return 0;
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index fbdc1cde56a..6fe30e6c426 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -1048,21 +1048,15 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id)
static int mpeg_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx8802_dev *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx8802_dev *dev = video_drvdata(file);
struct cx8802_fh *fh;
struct cx8802_driver *drv = NULL;
int err;
- lock_kernel();
- dev = cx8802_get_device(minor);
-
dprintk( 1, "%s\n", __func__);
- if (dev == NULL) {
- unlock_kernel();
- return -ENODEV;
- }
+ lock_kernel();
/* Make sure we can acquire the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
@@ -1081,7 +1075,7 @@ static int mpeg_open(struct file *file)
unlock_kernel();
return -EINVAL;
}
- dprintk(1,"open minor=%d\n",minor);
+ dprintk(1, "open dev=%s\n", video_device_node_name(vdev));
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
@@ -1129,10 +1123,6 @@ static int mpeg_release(struct file *file)
kfree(fh);
/* Make sure we release the hardware */
- dev = cx8802_get_device(video_devdata(file)->minor);
- if (dev == NULL)
- return -ENODEV;
-
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
if (drv)
drv->request_release(drv);
@@ -1220,7 +1210,6 @@ static struct video_device cx8802_mpeg_template = {
.name = "cx8802",
.fops = &mpeg_fops,
.ioctl_ops = &mpeg_ioctl_ops,
- .minor = -1,
.tvnorms = CX88_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
@@ -1276,7 +1265,7 @@ static int cx8802_blackbird_advise_release(struct cx8802_driver *drv)
static void blackbird_unregister_video(struct cx8802_dev *dev)
{
if (dev->mpeg_dev) {
- if (-1 != dev->mpeg_dev->minor)
+ if (video_is_registered(dev->mpeg_dev))
video_unregister_device(dev->mpeg_dev);
else
video_device_release(dev->mpeg_dev);
@@ -1290,14 +1279,15 @@ static int blackbird_register_video(struct cx8802_dev *dev)
dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci,
&cx8802_mpeg_template,"mpeg");
+ video_set_drvdata(dev->mpeg_dev, dev);
err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1);
if (err < 0) {
printk(KERN_INFO "%s/2: can't register mpeg device\n",
dev->core->name);
return err;
}
- printk(KERN_INFO "%s/2: registered device video%d [mpeg]\n",
- dev->core->name, dev->mpeg_dev->num);
+ printk(KERN_INFO "%s/2: registered device %s [mpeg]\n",
+ dev->core->name, video_device_node_name(dev->mpeg_dev));
return 0;
}
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 92b8cdf9fb8..f9fda18b410 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -360,7 +360,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name);
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci));
- err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes);
+ err = ir_input_init(input_dev, &ir->ir, ir_type);
if (err < 0)
goto err_out_free;
@@ -383,7 +383,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
cx88_ir_start(core, ir);
/* all done */
- err = input_register_device(ir->input);
+ err = ir_input_register(ir->input, ir_codes);
if (err)
goto err_out_stop;
@@ -393,8 +393,6 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
cx88_ir_stop(core, ir);
core->ir = NULL;
err_out_free:
- ir_input_free(input_dev);
- input_free_device(input_dev);
kfree(ir);
return err;
}
@@ -408,8 +406,7 @@ int cx88_ir_fini(struct cx88_core *core)
return 0;
cx88_ir_stop(core, ir);
- ir_input_free(ir->input);
- input_unregister_device(ir->input);
+ ir_input_unregister(ir->input);
kfree(ir);
/* done */
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index de9ff0fc741..bb510489341 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -580,21 +580,6 @@ static int cx8802_resume_common(struct pci_dev *pci_dev)
return 0;
}
-#if defined(CONFIG_VIDEO_CX88_BLACKBIRD) || \
- defined(CONFIG_VIDEO_CX88_BLACKBIRD_MODULE)
-struct cx8802_dev *cx8802_get_device(int minor)
-{
- struct cx8802_dev *dev;
-
- list_for_each_entry(dev, &cx8802_devlist, devlist)
- if (dev->mpeg_dev && dev->mpeg_dev->minor == minor)
- return dev;
-
- return NULL;
-}
-EXPORT_SYMBOL(cx8802_get_device);
-#endif
-
struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype)
{
struct cx8802_driver *d;
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index d7e8fcee559..48c450f4a85 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -75,10 +75,6 @@ MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
#define dprintk(level,fmt, arg...) if (video_debug >= level) \
printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
-/* ------------------------------------------------------------------ */
-
-static LIST_HEAD(cx8800_devlist);
-
/* ------------------------------------------------------------------- */
/* static data */
@@ -753,38 +749,31 @@ static int get_ressource(struct cx8800_fh *fh)
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx8800_dev *h,*dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core;
struct cx8800_fh *fh;
enum v4l2_buf_type type = 0;
int radio = 0;
- lock_kernel();
- list_for_each_entry(h, &cx8800_devlist, devlist) {
- if (h->video_dev->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- if (h->vbi_dev->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- }
- if (h->radio_dev &&
- h->radio_dev->minor == minor) {
- radio = 1;
- dev = h;
- }
- }
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ break;
+ case VFL_TYPE_VBI:
+ type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ break;
+ case VFL_TYPE_RADIO:
+ radio = 1;
+ break;
}
+ lock_kernel();
+
core = dev->core;
- dprintk(1,"open minor=%d radio=%d type=%s\n",
- minor,radio,v4l2_type_names[type]);
+ dprintk(1, "open dev=%s radio=%d type=%s\n",
+ video_device_node_name(vdev), radio, v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
@@ -1733,7 +1722,6 @@ static struct video_device cx8800_vbi_template;
static struct video_device cx8800_video_template = {
.name = "cx8800-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX88_NORMS,
.current_norm = V4L2_STD_NTSC_M,
@@ -1769,7 +1757,6 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
static struct video_device cx8800_radio_template = {
.name = "cx8800-radio",
.fops = &radio_fops,
- .minor = -1,
.ioctl_ops = &radio_ioctl_ops,
};
@@ -1778,21 +1765,21 @@ static struct video_device cx8800_radio_template = {
static void cx8800_unregister_video(struct cx8800_dev *dev)
{
if (dev->radio_dev) {
- if (-1 != dev->radio_dev->minor)
+ if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
dev->radio_dev = NULL;
}
if (dev->vbi_dev) {
- if (-1 != dev->vbi_dev->minor)
+ if (video_is_registered(dev->vbi_dev))
video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->video_dev) {
- if (-1 != dev->video_dev->minor)
+ if (video_is_registered(dev->video_dev))
video_unregister_device(dev->video_dev);
else
video_device_release(dev->video_dev);
@@ -1909,6 +1896,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
/* register v4l devices */
dev->video_dev = cx88_vdev_init(core,dev->pci,
&cx8800_video_template,"video");
+ video_set_drvdata(dev->video_dev, dev);
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[core->nr]);
if (err < 0) {
@@ -1916,10 +1904,11 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
core->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
- core->name, dev->video_dev->num);
+ printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
+ core->name, video_device_node_name(dev->video_dev));
dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
+ video_set_drvdata(dev->vbi_dev, dev);
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[core->nr]);
if (err < 0) {
@@ -1927,12 +1916,13 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
core->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device vbi%d\n",
- core->name, dev->vbi_dev->num);
+ printk(KERN_INFO "%s/0: registered device %s\n",
+ core->name, video_device_node_name(dev->vbi_dev));
if (core->board.radio.type == CX88_RADIO) {
dev->radio_dev = cx88_vdev_init(core,dev->pci,
&cx8800_radio_template,"radio");
+ video_set_drvdata(dev->radio_dev, dev);
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[core->nr]);
if (err < 0) {
@@ -1940,12 +1930,11 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
core->name);
goto fail_unreg;
}
- printk(KERN_INFO "%s/0: registered device radio%d\n",
- core->name, dev->radio_dev->num);
+ printk(KERN_INFO "%s/0: registered device %s\n",
+ core->name, video_device_node_name(dev->radio_dev));
}
/* everything worked */
- list_add_tail(&dev->devlist,&cx8800_devlist);
pci_set_drvdata(pci_dev,dev);
/* initial device configuration */
@@ -2001,7 +1990,6 @@ static void __devexit cx8800_finidev(struct pci_dev *pci_dev)
/* free memory */
btcx_riscmem_free(dev->pci,&dev->vidq.stopper);
- list_del(&dev->devlist);
cx88_core_put(core,dev->pci);
kfree(dev);
}
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index e1c52171010..b1499bf604e 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -423,7 +423,6 @@ struct cx8800_suspend_state {
struct cx8800_dev {
struct cx88_core *core;
- struct list_head devlist;
spinlock_t slock;
/* various device info */
@@ -670,7 +669,6 @@ int cx88_audio_thread(void *data);
int cx8802_register_driver(struct cx8802_driver *drv);
int cx8802_unregister_driver(struct cx8802_driver *drv);
-struct cx8802_dev *cx8802_get_device(int minor);
struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index 12a1b3d7132..de22bc9faf2 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -70,7 +70,6 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <linux/io.h>
#include <media/davinci/vpfe_capture.h>
@@ -1967,7 +1966,6 @@ static __init int vpfe_probe(struct platform_device *pdev)
vfd->release = video_device_release;
vfd->fops = &vpfe_fops;
vfd->ioctl_ops = &vpfe_ioctl_ops;
- vfd->minor = -1;
vfd->tvnorms = 0;
vfd->current_norm = V4L2_STD_PAL;
vfd->v4l2_dev = &vpfe_dev->v4l2_dev;
@@ -2071,7 +2069,7 @@ probe_out_video_unregister:
probe_out_v4l2_unregister:
v4l2_device_unregister(&vpfe_dev->v4l2_dev);
probe_out_video_release:
- if (vpfe_dev->video_dev->minor == -1)
+ if (!video_is_registered(vpfe_dev->video_dev))
video_device_release(vpfe_dev->video_dev);
probe_out_release_irq:
free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
@@ -2091,7 +2089,7 @@ probe_free_dev_mem:
/*
* vpfe_remove : It un-register device from V4L2 driver
*/
-static int vpfe_remove(struct platform_device *pdev)
+static int __devexit vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
struct resource *res;
@@ -2127,7 +2125,7 @@ vpfe_resume(struct device *dev)
return -1;
}
-static struct dev_pm_ops vpfe_dev_pm_ops = {
+static const struct dev_pm_ops vpfe_dev_pm_ops = {
.suspend = vpfe_suspend,
.resume = vpfe_resume,
};
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index 3b8eac31eca..1f532e31cd4 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -266,7 +266,7 @@ fail:
return status;
}
-static int vpif_remove(struct platform_device *pdev)
+static int __devexit vpif_remove(struct platform_device *pdev)
{
iounmap(vpif_base);
release_mem_region(res->start, res_len);
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index d947ee5e4eb..78130721f57 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -2107,7 +2107,7 @@ vpif_resume(struct device *dev)
return -1;
}
-static struct dev_pm_ops vpif_dev_pm_ops = {
+static const struct dev_pm_ops vpif_dev_pm_ops = {
.suspend = vpif_suspend,
.resume = vpif_resume,
};
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index d14cfb200ed..dfddef7228d 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -1347,7 +1347,6 @@ static const struct v4l2_file_operations vpif_fops = {
static struct video_device vpif_video_template = {
.name = "vpif",
.fops = &vpif_fops,
- .minor = -1,
.ioctl_ops = &vpif_ioctl_ops,
.tvnorms = DM646X_V4L2_STD,
.current_norm = V4L2_STD_625_50,
diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c
index 453236bd755..7ee72ecd3d8 100644
--- a/drivers/media/video/davinci/vpss.c
+++ b/drivers/media/video/davinci/vpss.c
@@ -268,7 +268,7 @@ fail1:
return status;
}
-static int vpss_remove(struct platform_device *pdev)
+static int __devexit vpss_remove(struct platform_device *pdev)
{
iounmap(oper_cfg.vpss_bl_regs_base);
release_mem_region(oper_cfg.r1->start, oper_cfg.len1);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 82da205047b..25100001fff 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -2285,7 +2285,7 @@ void em28xx_register_i2c_ir(struct em28xx *dev)
dev->init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
break;
case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
- dev->init_data.ir_codes = &ir_codes_hauppauge_new_table;
+ dev->init_data.ir_codes = &ir_codes_rc5_hauppauge_new_table;
dev->init_data.get_key = em28xx_get_key_em_haup;
dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
break;
@@ -2653,7 +2653,6 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
INIT_LIST_HEAD(&dev->vbiq.active);
INIT_LIST_HEAD(&dev->vbiq.queued);
-
if (dev->board.has_msp34xx) {
/* Send a reset to other chips via gpio */
errCode = em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf7);
@@ -2923,9 +2922,9 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
if (dev->users) {
em28xx_warn
- ("device /dev/video%d is open! Deregistration and memory "
+ ("device %s is open! Deregistration and memory "
"deallocation are deferred on close.\n",
- dev->vdev->num);
+ video_device_node_name(dev->vdev));
dev->state |= DEV_MISCONFIGURED;
em28xx_uninit_isoc(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 3f86d36dff2..b311d4514bd 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -216,7 +216,7 @@ int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val)
* sets only some bits (specified by bitmask) of a register, by first reading
* the actual value
*/
-static int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
+int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
u8 bitmask)
{
int oldval;
@@ -1136,34 +1136,6 @@ void em28xx_wake_i2c(struct em28xx *dev)
static LIST_HEAD(em28xx_devlist);
static DEFINE_MUTEX(em28xx_devlist_mutex);
-struct em28xx *em28xx_get_device(int minor,
- enum v4l2_buf_type *fh_type,
- int *has_radio)
-{
- struct em28xx *h, *dev = NULL;
-
- *fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- *has_radio = 0;
-
- mutex_lock(&em28xx_devlist_mutex);
- list_for_each_entry(h, &em28xx_devlist, devlist) {
- if (h->vdev->minor == minor)
- dev = h;
- if (h->vbi_dev && h->vbi_dev->minor == minor) {
- dev = h;
- *fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
- }
- if (h->radio_dev &&
- h->radio_dev->minor == minor) {
- dev = h;
- *has_radio = 1;
- }
- }
- mutex_unlock(&em28xx_devlist_mutex);
-
- return dev;
-}
-
/*
* em28xx_realease_resources()
* unregisters the v4l2,i2c and usb devices
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index d96ec7c09dc..af0d935c29b 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -112,10 +112,13 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
unsigned char buf[2];
- unsigned char code;
+ u16 code;
+ int size;
/* poll IR chip */
- if (2 != i2c_master_recv(ir->c, buf, 2))
+ size = i2c_master_recv(ir->c, buf, sizeof(buf));
+
+ if (size != 2)
return -EIO;
/* Does eliminate repeated parity code */
@@ -124,16 +127,30 @@ int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
ir->old = buf[1];
- /* Rearranges bits to the right order */
- code = ((buf[0]&0x01)<<5) | /* 0010 0000 */
- ((buf[0]&0x02)<<3) | /* 0001 0000 */
- ((buf[0]&0x04)<<1) | /* 0000 1000 */
- ((buf[0]&0x08)>>1) | /* 0000 0100 */
- ((buf[0]&0x10)>>3) | /* 0000 0010 */
- ((buf[0]&0x20)>>5); /* 0000 0001 */
-
- i2cdprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x)\n",
- code, buf[0]);
+ /*
+ * Rearranges bits to the right order.
+ * The bit order were determined experimentally by using
+ * The original Hauppauge Grey IR and another RC5 that uses addr=0x08
+ * The RC5 code has 14 bits, but we've experimentally determined
+ * the meaning for only 11 bits.
+ * So, the code translation is not complete. Yet, it is enough to
+ * work with the provided RC5 IR.
+ */
+ code =
+ ((buf[0] & 0x01) ? 0x0020 : 0) | /* 0010 0000 */
+ ((buf[0] & 0x02) ? 0x0010 : 0) | /* 0001 0000 */
+ ((buf[0] & 0x04) ? 0x0008 : 0) | /* 0000 1000 */
+ ((buf[0] & 0x08) ? 0x0004 : 0) | /* 0000 0100 */
+ ((buf[0] & 0x10) ? 0x0002 : 0) | /* 0000 0010 */
+ ((buf[0] & 0x20) ? 0x0001 : 0) | /* 0000 0001 */
+ ((buf[1] & 0x08) ? 0x1000 : 0) | /* 0001 0000 */
+ ((buf[1] & 0x10) ? 0x0800 : 0) | /* 0000 1000 */
+ ((buf[1] & 0x20) ? 0x0400 : 0) | /* 0000 0100 */
+ ((buf[1] & 0x40) ? 0x0200 : 0) | /* 0000 0010 */
+ ((buf[1] & 0x80) ? 0x0100 : 0); /* 0000 0001 */
+
+ i2cdprintk("ir hauppauge (em2840): code=0x%02x (rcv=0x%02x%02x)\n",
+ code, buf[1], buf[0]);
/* return key */
*ir_key = code;
@@ -337,19 +354,28 @@ int em28xx_ir_init(struct em28xx *dev)
goto err_out_free;
ir->input = input_dev;
+ ir_config = EM2874_IR_RC5;
+
+ /* Adjust xclk based o IR table for RC5/NEC tables */
+ if (dev->board.ir_codes->ir_type == IR_TYPE_RC5) {
+ dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
+ ir->full_code = 1;
+ } else if (dev->board.ir_codes->ir_type == IR_TYPE_NEC) {
+ dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
+ ir_config = EM2874_IR_NEC;
+ ir->full_code = 1;
+ }
+ em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
+ EM28XX_XCLK_IR_RC5_MODE);
/* Setup the proper handler based on the chip */
switch (dev->chip_id) {
case CHIP_ID_EM2860:
case CHIP_ID_EM2883:
- if (dev->model == EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950)
- ir->full_code = 1;
ir->get_key = default_polling_getkey;
break;
case CHIP_ID_EM2874:
ir->get_key = em2874_polling_getkey;
- /* For now we only support RC5, so enable it */
- ir_config = EM2874_IR_RC5;
em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
break;
default:
@@ -367,8 +393,7 @@ int em28xx_ir_init(struct em28xx *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
- err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER,
- dev->board.ir_codes);
+ err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
if (err < 0)
goto err_out_free;
@@ -387,7 +412,7 @@ int em28xx_ir_init(struct em28xx *dev)
em28xx_ir_start(ir);
/* all done */
- err = input_register_device(ir->input);
+ err = ir_input_register(ir->input, dev->board.ir_codes);
if (err)
goto err_out_stop;
@@ -396,8 +421,6 @@ int em28xx_ir_init(struct em28xx *dev)
em28xx_ir_stop(ir);
dev->ir = NULL;
err_out_free:
- ir_input_free(input_dev);
- input_free_device(input_dev);
kfree(ir);
return err;
}
@@ -411,8 +434,7 @@ int em28xx_ir_fini(struct em28xx *dev)
return 0;
em28xx_ir_stop(ir);
- ir_input_free(ir->input);
- input_unregister_device(ir->input);
+ ir_input_unregister(ir->input);
kfree(ir);
/* done */
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 7ad65370f27..849b18c9403 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -2081,22 +2081,30 @@ static int radio_queryctrl(struct file *file, void *priv,
*/
static int em28xx_v4l2_open(struct file *filp)
{
- int minor = video_devdata(filp)->minor;
- int errCode = 0, radio;
- struct em28xx *dev;
- enum v4l2_buf_type fh_type;
+ int errCode = 0, radio = 0;
+ struct video_device *vdev = video_devdata(filp);
+ struct em28xx *dev = video_drvdata(filp);
+ enum v4l2_buf_type fh_type = 0;
struct em28xx_fh *fh;
enum v4l2_field field;
- dev = em28xx_get_device(minor, &fh_type, &radio);
-
- if (NULL == dev)
- return -ENODEV;
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
+ fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ break;
+ case VFL_TYPE_VBI:
+ fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ break;
+ case VFL_TYPE_RADIO:
+ radio = 1;
+ break;
+ }
mutex_lock(&dev->lock);
- em28xx_videodbg("open minor=%d type=%s users=%d\n",
- minor, v4l2_type_names[fh_type], dev->users);
+ em28xx_videodbg("open dev=%s type=%s users=%d\n",
+ video_device_node_name(vdev), v4l2_type_names[fh_type],
+ dev->users);
fh = kzalloc(sizeof(struct em28xx_fh), GFP_KERNEL);
@@ -2160,25 +2168,25 @@ void em28xx_release_analog_resources(struct em28xx *dev)
/*FIXME: I2C IR should be disconnected */
if (dev->radio_dev) {
- if (-1 != dev->radio_dev->minor)
+ if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
dev->radio_dev = NULL;
}
if (dev->vbi_dev) {
- em28xx_info("V4L2 device /dev/vbi%d deregistered\n",
- dev->vbi_dev->num);
- if (-1 != dev->vbi_dev->minor)
+ em28xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(dev->vbi_dev));
+ if (video_is_registered(dev->vbi_dev))
video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->vdev) {
- em28xx_info("V4L2 device /dev/video%d deregistered\n",
- dev->vdev->num);
- if (-1 != dev->vdev->minor)
+ em28xx_info("V4L2 device %s deregistered\n",
+ video_device_node_name(dev->vdev));
+ if (video_is_registered(dev->vdev))
video_unregister_device(dev->vdev);
else
video_device_release(dev->vdev);
@@ -2397,8 +2405,6 @@ static const struct video_device em28xx_video_template = {
.release = video_device_release,
.ioctl_ops = &video_ioctl_ops,
- .minor = -1,
-
.tvnorms = V4L2_STD_ALL,
.current_norm = V4L2_STD_PAL,
};
@@ -2433,7 +2439,6 @@ static struct video_device em28xx_radio_template = {
.name = "em28xx-radio",
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
- .minor = -1,
};
/******************************** usb interface ******************************/
@@ -2451,7 +2456,6 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
return NULL;
*vfd = *template;
- vfd->minor = -1;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
vfd->debug = video_debug;
@@ -2459,6 +2463,7 @@ static struct video_device *em28xx_vdev_init(struct em28xx *dev,
snprintf(vfd->name, sizeof(vfd->name), "%s %s",
dev->name, type_name);
+ video_set_drvdata(vfd, dev);
return vfd;
}
@@ -2540,16 +2545,16 @@ int em28xx_register_analog_devices(struct em28xx *dev)
em28xx_errdev("can't register radio device\n");
return ret;
}
- em28xx_info("Registered radio device as /dev/radio%d\n",
- dev->radio_dev->num);
+ em28xx_info("Registered radio device as %s\n",
+ video_device_node_name(dev->radio_dev));
}
- em28xx_info("V4L2 video device registered as /dev/video%d\n",
- dev->vdev->num);
+ em28xx_info("V4L2 video device registered as %s\n",
+ video_device_node_name(dev->vdev));
if (dev->vbi_dev)
- em28xx_info("V4L2 VBI device registered as /dev/vbi%d\n",
- dev->vbi_dev->num);
+ em28xx_info("V4L2 VBI device registered as %s\n",
+ video_device_node_name(dev->vbi_dev));
return 0;
}
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 441df644ddb..80d9b4fa1b9 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -643,6 +643,8 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
int len);
int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len);
int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val);
+int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
+ u8 bitmask);
int em28xx_read_ac97(struct em28xx *dev, u8 reg);
int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val);
@@ -666,9 +668,6 @@ int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
void em28xx_wake_i2c(struct em28xx *dev);
void em28xx_remove_from_devlist(struct em28xx *dev);
void em28xx_add_into_devlist(struct em28xx *dev);
-struct em28xx *em28xx_get_device(int minor,
- enum v4l2_buf_type *fh_type,
- int *has_radio);
int em28xx_register_extension(struct em28xx_ops *dev);
void em28xx_unregister_extension(struct em28xx_ops *dev);
void em28xx_init_extension(struct em28xx *dev);
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 88987a57cf7..e6c23d50986 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -587,8 +587,8 @@ static int et61x251_stream_interrupt(struct et61x251_device* cam)
else if (cam->stream != STREAM_OFF) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "URB timeout reached. The camera is misconfigured. To "
- "use it, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use it, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -1195,7 +1195,8 @@ static void et61x251_release_resources(struct kref *kref)
cam = container_of(kref, struct et61x251_device, kref);
- DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
+ DBG(2, "V4L2 device %s deregistered",
+ video_device_node_name(cam->v4ldev));
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
usb_put_dev(cam->usbdev);
@@ -1236,8 +1237,8 @@ static int et61x251_open(struct file *filp)
}
if (cam->users) {
- DBG(2, "Device /dev/video%d is already in use",
- cam->v4ldev->num);
+ DBG(2, "Device %s is already in use",
+ video_device_node_name(cam->v4ldev));
DBG(3, "Simultaneous opens are not supported");
if ((filp->f_flags & O_NONBLOCK) ||
(filp->f_flags & O_NDELAY)) {
@@ -1280,7 +1281,8 @@ static int et61x251_open(struct file *filp)
cam->frame_count = 0;
et61x251_empty_framequeues(cam);
- DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
+ DBG(3, "Video device %s is open",
+ video_device_node_name(cam->v4ldev));
out:
mutex_unlock(&cam->open_mutex);
@@ -1304,7 +1306,8 @@ static int et61x251_release(struct file *filp)
cam->users--;
wake_up_interruptible_nr(&cam->wait_open, 1);
- DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
+ DBG(3, "Video device %s closed",
+ video_device_node_name(cam->v4ldev));
kref_put(&cam->kref, et61x251_release_resources);
@@ -1846,8 +1849,8 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -1859,8 +1862,8 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -ENOMEM;
}
@@ -2069,8 +2072,8 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -2081,8 +2084,8 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
nbuffers != et61x251_request_buffers(cam, nbuffers, cam->io)) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -ENOMEM;
}
@@ -2130,7 +2133,7 @@ et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg)
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
"problems. To use the camera, close and open "
- "/dev/video%d again.", cam->v4ldev->num);
+ "%s again.", video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -2584,7 +2587,6 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
strcpy(cam->v4ldev->name, "ET61X[12]51 PC Camera");
cam->v4ldev->fops = &et61x251_fops;
- cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
cam->v4ldev->parent = &udev->dev;
video_set_drvdata(cam->v4ldev, cam);
@@ -2603,7 +2605,8 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
+ DBG(2, "V4L2 device registered as %s",
+ video_device_node_name(cam->v4ldev));
cam->module_param.force_munmap = force_munmap[dev_nr];
cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2654,9 +2657,9 @@ static void et61x251_usb_disconnect(struct usb_interface* intf)
DBG(2, "Disconnecting %s...", cam->v4ldev->name);
if (cam->users) {
- DBG(2, "Device /dev/video%d is open! Deregistration and "
- "memory deallocation are deferred.",
- cam->v4ldev->num);
+ DBG(2, "Device %s is open! Deregistration and memory "
+ "deallocation are deferred.",
+ video_device_node_name(cam->v4ldev));
cam->state |= DEV_MISCONFIGURED;
et61x251_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 2f0b8d621e0..c98b5d69c43 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1046,14 +1046,14 @@ static struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] __devinitconst = {
{USB_DEVICE(0x0572, 0x0041)},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int sd_probe(struct usb_interface *intf,
+static int __devinit sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 9de86419ae1..fdf4c0ec5e7 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -864,7 +864,7 @@ static struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] __devinitconst = {
{USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106},
#if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE
{USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX},
@@ -875,7 +875,7 @@ static __devinitdata struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int sd_probe(struct usb_interface *intf,
+static int __devinit sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/gl860/gl860-mi1320.c b/drivers/media/video/gspca/gl860/gl860-mi1320.c
index 1355e526ee8..c276a7debde 100644
--- a/drivers/media/video/gspca/gl860/gl860-mi1320.c
+++ b/drivers/media/video/gspca/gl860/gl860-mi1320.c
@@ -345,7 +345,7 @@ static int mi1320_configure_alt(struct gspca_dev *gspca_dev)
return 0;
}
-int mi1320_camera_settings(struct gspca_dev *gspca_dev)
+static int mi1320_camera_settings(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/gl860/gl860-mi2020.c b/drivers/media/video/gspca/gl860/gl860-mi2020.c
index 80cb3f1b36f..7c31b4f2abe 100644
--- a/drivers/media/video/gspca/gl860/gl860-mi2020.c
+++ b/drivers/media/video/gspca/gl860/gl860-mi2020.c
@@ -769,7 +769,7 @@ static int mi2020_configure_alt(struct gspca_dev *gspca_dev)
return 0;
}
-int mi2020_camera_settings(struct gspca_dev *gspca_dev)
+static int mi2020_camera_settings(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index a695e0ae13c..4878c8f6654 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -40,7 +40,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
static void sd_callback(struct gspca_dev *gspca_dev);
static int gl860_guess_sensor(struct gspca_dev *gspca_dev,
- s32 vendor_id, s32 product_id);
+ u16 vendor_id, u16 product_id);
/*============================ driver options ==============================*/
@@ -326,11 +326,11 @@ static int sd_config(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
- s32 vendor_id, product_id;
+ u16 vendor_id, product_id;
/* Get USB VendorID and ProductID */
- vendor_id = le16_to_cpu(id->idVendor);
- product_id = le16_to_cpu(id->idProduct);
+ vendor_id = id->idVendor;
+ product_id = id->idProduct;
sd->nbRightUp = 1;
sd->nbIm = -1;
@@ -534,8 +534,8 @@ static int sd_probe(struct usb_interface *intf,
gspca_dev = usb_get_intfdata(intf);
PDEBUG(D_PROBE,
- "Camera is now controlling video device /dev/video%d",
- gspca_dev->vdev.minor);
+ "Camera is now controlling video device %s",
+ video_device_node_name(&gspca_dev->vdev));
}
return ret;
@@ -673,7 +673,7 @@ void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len)
}
static int gl860_guess_sensor(struct gspca_dev *gspca_dev,
- s32 vendor_id, s32 product_id)
+ u16 vendor_id, u16 product_id)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 probe, nb26, nb96, nOV, ntry;
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 4076f8e5a6f..e930a67d526 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -304,7 +304,6 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
j = gspca_dev->fr_queue[i];
gspca_dev->cur_frame = &gspca_dev->frame[j];
}
- return;
}
EXPORT_SYMBOL(gspca_frame_add);
@@ -321,7 +320,7 @@ static int gspca_is_compressed(__u32 format)
return 0;
}
-static void *rvmalloc(unsigned long size)
+static void *rvmalloc(long size)
{
void *mem;
unsigned long adr;
@@ -329,7 +328,7 @@ static void *rvmalloc(unsigned long size)
mem = vmalloc_32(size);
if (mem != NULL) {
adr = (unsigned long) mem;
- while ((long) size > 0) {
+ while (size > 0) {
SetPageReserved(vmalloc_to_page((void *) adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
@@ -768,6 +767,7 @@ static int vidioc_g_register(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = gspca_dev->sd_desc->get_register(gspca_dev, reg);
else
@@ -791,6 +791,7 @@ static int vidioc_s_register(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = gspca_dev->sd_desc->set_register(gspca_dev, reg);
else
@@ -812,6 +813,7 @@ static int vidioc_g_chip_ident(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip);
else
@@ -983,11 +985,40 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
}
+static int vidioc_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *fival)
+{
+ struct gspca_dev *gspca_dev = priv;
+ int mode = wxh_to_mode(gspca_dev, fival->width, fival->height);
+ __u32 i;
+
+ if (gspca_dev->cam.mode_framerates == NULL ||
+ gspca_dev->cam.mode_framerates[mode].nrates == 0)
+ return -EINVAL;
+
+ if (fival->pixel_format !=
+ gspca_dev->cam.cam_mode[mode].pixelformat)
+ return -EINVAL;
+
+ for (i = 0; i < gspca_dev->cam.mode_framerates[mode].nrates; i++) {
+ if (fival->index == i) {
+ fival->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fival->discrete.numerator = 1;
+ fival->discrete.denominator =
+ gspca_dev->cam.mode_framerates[mode].rates[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static void gspca_release(struct video_device *vfd)
{
struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev);
- PDEBUG(D_PROBE, "/dev/video%d released", gspca_dev->vdev.num);
+ PDEBUG(D_PROBE, "%s released",
+ video_device_node_name(&gspca_dev->vdev));
kfree(gspca_dev->usb_buf);
kfree(gspca_dev);
@@ -1053,6 +1084,7 @@ static int dev_close(struct file *file)
if (gspca_dev->capt_file == file) {
if (gspca_dev->streaming) {
mutex_lock(&gspca_dev->usb_lock);
+ gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
@@ -1143,12 +1175,14 @@ static int vidioc_queryctrl(struct file *file, void *priv,
continue;
ctrls = &gspca_dev->sd_desc->ctrls[i];
}
+ if (ctrls == NULL)
+ return -EINVAL;
} else {
ctrls = get_ctrl(gspca_dev, id);
+ if (ctrls == NULL)
+ return -EINVAL;
i = ctrls - gspca_dev->sd_desc->ctrls;
}
- if (ctrls == NULL)
- return -EINVAL;
memcpy(q_ctrl, ctrls, sizeof *q_ctrl);
if (gspca_dev->ctrl_inac & (1 << i))
q_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
@@ -1172,6 +1206,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = ctrls->set(gspca_dev, ctrl->value);
else
@@ -1193,6 +1228,7 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = ctrls->get(gspca_dev, &ctrl->value);
else
@@ -1307,6 +1343,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
/* stop streaming */
if (gspca_dev->streaming) {
mutex_lock(&gspca_dev->usb_lock);
+ gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
}
@@ -1398,6 +1435,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
ret = -ERESTARTSYS;
goto out;
}
+ gspca_dev->usb_err = 0;
gspca_stream_off(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
@@ -1423,6 +1461,7 @@ static int vidioc_g_jpegcomp(struct file *file, void *priv,
return -EINVAL;
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp);
else
@@ -1441,6 +1480,7 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv,
return -EINVAL;
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp);
else
@@ -1461,6 +1501,7 @@ static int vidioc_g_parm(struct file *filp, void *priv,
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = gspca_dev->sd_desc->get_streamparm(gspca_dev,
parm);
@@ -1490,6 +1531,7 @@ static int vidioc_s_parm(struct file *filp, void *priv,
if (mutex_lock_interruptible(&gspca_dev->usb_lock))
return -ERESTARTSYS;
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
ret = gspca_dev->sd_desc->set_streamparm(gspca_dev,
parm);
@@ -1613,7 +1655,7 @@ static int dev_mmap(struct file *file, struct vm_area_struct *vma)
size -= PAGE_SIZE;
}
- vma->vm_ops = (struct vm_operations_struct *) &gspca_vm_ops;
+ vma->vm_ops = &gspca_vm_ops;
vma->vm_private_data = frame;
gspca_vm_open(vma);
ret = 0;
@@ -1661,6 +1703,7 @@ static int frame_wait(struct gspca_dev *gspca_dev,
if (gspca_dev->sd_desc->dq_callback) {
mutex_lock(&gspca_dev->usb_lock);
+ gspca_dev->usb_err = 0;
if (gspca_dev->present)
gspca_dev->sd_desc->dq_callback(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
@@ -1973,6 +2016,7 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = {
.vidioc_g_parm = vidioc_g_parm,
.vidioc_s_parm = vidioc_s_parm,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
@@ -1988,7 +2032,6 @@ static struct video_device gspca_template = {
.fops = &dev_fops,
.ioctl_ops = &dev_ioctl_ops,
.release = gspca_release,
- .minor = -1,
};
/*
@@ -2049,9 +2092,6 @@ int gspca_dev_probe(struct usb_interface *intf,
ret = sd_desc->init(gspca_dev);
if (ret < 0)
goto out;
- ret = gspca_set_alt0(gspca_dev);
- if (ret < 0)
- goto out;
gspca_set_default_mode(gspca_dev);
mutex_init(&gspca_dev->usb_lock);
@@ -2073,7 +2113,7 @@ int gspca_dev_probe(struct usb_interface *intf,
}
usb_set_intfdata(intf, gspca_dev);
- PDEBUG(D_PROBE, "/dev/video%d created", gspca_dev->vdev.num);
+ PDEBUG(D_PROBE, "%s created", video_device_node_name(&gspca_dev->vdev));
return 0;
out:
kfree(gspca_dev->usb_buf);
@@ -2092,7 +2132,8 @@ void gspca_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
- PDEBUG(D_PROBE, "/dev/video%d disconnect", gspca_dev->vdev.num);
+ PDEBUG(D_PROBE, "%s disconnect",
+ video_device_node_name(&gspca_dev->vdev));
mutex_lock(&gspca_dev->usb_lock);
gspca_dev->present = 0;
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 181617355ec..59c7941da99 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -45,11 +45,20 @@ extern int gspca_debug;
/* image transfers */
#define MAX_NURBS 4 /* max number of URBs */
+
+/* used to list framerates supported by a camera mode (resolution) */
+struct framerates {
+ int *rates;
+ int nrates;
+};
+
/* device information - set at probe time */
struct cam {
int bulk_size; /* buffer size when image transfer by bulk */
const struct v4l2_pix_format *cam_mode; /* size nmodes */
char nmodes;
+ const struct framerates *mode_framerates; /* must have size nmode,
+ * just like cam_mode */
__u8 bulk_nurbs; /* number of URBs in bulk mode
* - cannot be > MAX_NURBS
* - when 0 and bulk_size != 0 means
@@ -171,6 +180,7 @@ struct gspca_dev {
struct mutex usb_lock; /* usb exchange protection */
struct mutex read_lock; /* read protection */
struct mutex queue_lock; /* ISOC queue protection */
+ int usb_err; /* USB error - protected by usb_lock */
#ifdef CONFIG_PM
char frozen; /* suspend - resume */
#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 844fc1d886d..4294c75e3b1 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -81,7 +81,7 @@ int m5602_write_bridge(struct sd *sd, const u8 address, const u8 i2c_data)
return (err < 0) ? err : 0;
}
-int m5602_wait_for_i2c(struct sd *sd)
+static int m5602_wait_for_i2c(struct sd *sd)
{
int err;
u8 data;
@@ -388,7 +388,7 @@ static int m5602_probe(struct usb_interface *intf,
THIS_MODULE);
}
-void m5602_disconnect(struct usb_interface *intf)
+static void m5602_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
struct sd *sd = (struct sd *) gspca_dev;
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index c2739d6605a..923cdd5f7a6 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -439,7 +439,7 @@ int ov9650_start(struct sd *sd)
err = m5602_write_bridge(sd, res_init_ov9650[i][1],
res_init_ov9650[i][2]);
else if (res_init_ov9650[i][0] == SENSOR) {
- u8 data = res_init_ov9650[i][2];
+ data = res_init_ov9650[i][2];
err = m5602_write_sensor(sd,
res_init_ov9650[i][1], &data, 1);
}
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index a27afeb6f39..aa2f3c7e2cb 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -525,7 +525,10 @@ static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
if (err < 0)
return err;
- data = (data & 0xfe) | !val;
+ if (val)
+ data &= 0xfe;
+ else
+ data |= 0x01;
err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
return err;
}
@@ -570,7 +573,10 @@ static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
if (err < 0)
return err;
- data = (data & 0xfe) | !val;
+ if (val)
+ data &= 0xfe;
+ else
+ data |= 0x01;
err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
return err;
}
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 126d968dd9e..9154870e07d 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -67,7 +67,7 @@ MODULE_DESCRIPTION("GSPCA/Mars-Semi MR97310A USB Camera Driver");
MODULE_LICENSE("GPL");
/* global parameters */
-int force_sensor_type = -1;
+static int force_sensor_type = -1;
module_param(force_sensor_type, int, 0644);
MODULE_PARM_DESC(force_sensor_type, "Force sensor type (-1 (auto), 0 or 1)");
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index ad9ec339981..b4f96573124 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -1982,7 +1982,7 @@ static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n)
{
int ret;
- *((u32 *)sd->gspca_dev.usb_buf) = __cpu_to_le32(value);
+ *((__le32 *) sd->gspca_dev.usb_buf) = __cpu_to_le32(value);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
@@ -2021,9 +2021,9 @@ static int ov511_i2c_w(struct sd *sd, __u8 reg, __u8 value)
if (rc < 0)
return rc;
- do
+ do {
rc = reg_r(sd, R511_I2C_CTL);
- while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+ } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
if (rc < 0)
return rc;
@@ -2055,9 +2055,9 @@ static int ov511_i2c_r(struct sd *sd, __u8 reg)
if (rc < 0)
return rc;
- do
+ do {
rc = reg_r(sd, R511_I2C_CTL);
- while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+ } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
if (rc < 0)
return rc;
@@ -2081,9 +2081,9 @@ static int ov511_i2c_r(struct sd *sd, __u8 reg)
if (rc < 0)
return rc;
- do
+ do {
rc = reg_r(sd, R511_I2C_CTL);
- while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+ } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
if (rc < 0)
return rc;
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 74acceea809..de0b66c4b56 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -90,6 +90,9 @@ struct sd {
unsigned char autogain;
__u8 hflip;
__u8 vflip;
+ u8 flags;
+#define FL_HFLIP 0x01 /* mirrored by default */
+#define FL_VFLIP 0x02 /* vertical flipped by default */
u8 sof_read;
u8 autogain_ignore_frames;
@@ -552,6 +555,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->autogain = AUTOGAIN_DEF;
sd->hflip = HFLIP_DEF;
sd->vflip = VFLIP_DEF;
+ sd->flags = id->driver_info;
return 0;
}
@@ -708,10 +712,17 @@ static int sethvflip(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
int ret;
- __u8 data;
+ u8 data, hflip, vflip;
+
+ hflip = sd->hflip;
+ if (sd->flags & FL_HFLIP)
+ hflip = !hflip;
+ vflip = sd->vflip;
+ if (sd->flags & FL_VFLIP)
+ vflip = !vflip;
ret = reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
- data = (sd->hflip ? 0x08 : 0x00) | (sd->vflip ? 0x04 : 0x00);
+ data = (hflip ? 0x08 : 0x00) | (vflip ? 0x04 : 0x00);
if (0 <= ret)
ret = reg_w(gspca_dev, 0x21, data);
/* load registers to sensor (Bit 0, auto clear) */
@@ -1218,15 +1229,15 @@ static struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] __devinitconst = {
{USB_DEVICE(0x06f8, 0x3009)},
{USB_DEVICE(0x093a, 0x2620)},
{USB_DEVICE(0x093a, 0x2621)},
- {USB_DEVICE(0x093a, 0x2622)},
- {USB_DEVICE(0x093a, 0x2624)},
+ {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+ {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2626)},
{USB_DEVICE(0x093a, 0x2628)},
- {USB_DEVICE(0x093a, 0x2629)},
+ {USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x262a)},
{USB_DEVICE(0x093a, 0x262c)},
{}
@@ -1234,7 +1245,7 @@ static __devinitdata struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int sd_probe(struct usb_interface *intf,
+static int __devinit sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index e5697a6345e..42cfcdfd8f4 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -863,7 +863,7 @@ static struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] __devinitconst = {
{USB_DEVICE(0x093a, 0x2600)},
{USB_DEVICE(0x093a, 0x2601)},
{USB_DEVICE(0x093a, 0x2603)},
@@ -875,7 +875,7 @@ static __devinitdata struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int sd_probe(struct usb_interface *intf,
+static int __devinit sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index b1944a7cbb0..4cff8035614 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -1158,7 +1158,7 @@ static int i2c_w2(struct gspca_dev *gspca_dev, u8 reg, u16 val)
return i2c_w(gspca_dev, row);
}
-int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
+static int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 row[8];
@@ -1183,7 +1183,7 @@ int i2c_r1(struct gspca_dev *gspca_dev, u8 reg, u8 *val)
return 0;
}
-int i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val)
+static int i2c_r2(struct gspca_dev *gspca_dev, u8 reg, u16 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 row[8];
@@ -1476,8 +1476,9 @@ static int sn9c20x_input_init(struct gspca_dev *gspca_dev)
if (input_register_device(sd->input_dev))
return -EINVAL;
- sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%d",
- gspca_dev->vdev.minor);
+ sd->input_task = kthread_run(input_kthread, gspca_dev, "sn9c20x/%s-%s",
+ gspca_dev->dev->bus->bus_name,
+ gspca_dev->dev->devpath);
if (IS_ERR(sd->input_task))
return -EINVAL;
@@ -2174,8 +2175,7 @@ static void configure_sensor_output(struct gspca_dev *gspca_dev, int mode)
}
#define HW_WIN(mode, hstart, vstart) \
-((const u8 []){hstart & 0xff, hstart >> 8, \
-vstart & 0xff, vstart >> 8, \
+((const u8 []){hstart, 0, vstart, 0, \
(mode & MODE_SXGA ? 1280 >> 4 : 640 >> 4), \
(mode & MODE_SXGA ? 1024 >> 3 : 480 >> 3)})
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 5be95bc6513..ddff2b5ee5c 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -1226,7 +1226,7 @@ static const struct sd_desc sd_desc = {
.driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge
-static __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] __devinitconst = {
{USB_DEVICE(0x0c45, 0x6001), SB(TAS5110, 102)}, /* TAS5110C1B */
{USB_DEVICE(0x0c45, 0x6005), SB(TAS5110, 101)}, /* TAS5110C1B */
#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
@@ -1257,7 +1257,7 @@ static __devinitdata struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int sd_probe(struct usb_interface *intf,
+static int __devinit sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index ab28cc23e41..39257e4e074 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -685,7 +685,7 @@ static struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] __devinitconst = {
{USB_DEVICE(0x06e1, 0xa190)},
/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
{USB_DEVICE(0x0733, 0x0430)}, */
@@ -696,7 +696,7 @@ static __devinitdata struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
-static int sd_probe(struct usb_interface *intf,
+static int __devinit sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 8e23320d7ab..2e2935532d9 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -126,12 +126,14 @@ static const struct v4l2_pix_format vga_mode[] = {
};
/* -- read a register -- */
-static int reg_r(struct gspca_dev *gspca_dev,
+static u8 reg_r(struct gspca_dev *gspca_dev,
__u16 index)
{
struct usb_device *dev = gspca_dev->dev;
int ret;
+ if (gspca_dev->usb_err < 0)
+ return 0;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
0x00,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -141,18 +143,21 @@ static int reg_r(struct gspca_dev *gspca_dev,
500);
if (ret < 0) {
PDEBUG(D_ERR, "reg_r err %d", ret);
- return ret;
+ gspca_dev->usb_err = ret;
+ return 0;
}
return gspca_dev->usb_buf[0];
}
/* -- write a register -- */
-static int reg_w(struct gspca_dev *gspca_dev,
+static void reg_w(struct gspca_dev *gspca_dev,
__u16 index, __u16 value)
{
struct usb_device *dev = gspca_dev->dev;
int ret;
+ if (gspca_dev->usb_err < 0)
+ return;
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
0x01,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -161,13 +166,14 @@ static int reg_w(struct gspca_dev *gspca_dev,
NULL,
0,
500);
- if (ret < 0)
+ if (ret < 0) {
PDEBUG(D_ERR, "reg_w err %d", ret);
- return ret;
+ gspca_dev->usb_err = ret;
+ }
}
/* -- get a bulk value (4 bytes) -- */
-static int rcv_val(struct gspca_dev *gspca_dev,
+static void rcv_val(struct gspca_dev *gspca_dev,
int ads)
{
struct usb_device *dev = gspca_dev->dev;
@@ -182,17 +188,22 @@ static int rcv_val(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 0x63a, 0);
reg_w(gspca_dev, 0x63b, 0);
reg_w(gspca_dev, 0x630, 5);
+ if (gspca_dev->usb_err < 0)
+ return;
ret = usb_bulk_msg(dev,
usb_rcvbulkpipe(dev, 0x05),
gspca_dev->usb_buf,
4, /* length */
&alen,
500); /* timeout in milliseconds */
- return ret;
+ if (ret < 0) {
+ PDEBUG(D_ERR, "rcv_val err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* -- send a bulk value -- */
-static int snd_val(struct gspca_dev *gspca_dev,
+static void snd_val(struct gspca_dev *gspca_dev,
int ads,
unsigned int val)
{
@@ -201,16 +212,9 @@ static int snd_val(struct gspca_dev *gspca_dev,
__u8 seq = 0;
if (ads == 0x003f08) {
- ret = reg_r(gspca_dev, 0x0704);
- if (ret < 0)
- goto ko;
- ret = reg_r(gspca_dev, 0x0705);
- if (ret < 0)
- goto ko;
- seq = ret; /* keep the sequence number */
- ret = reg_r(gspca_dev, 0x0650);
- if (ret < 0)
- goto ko;
+ reg_r(gspca_dev, 0x0704);
+ seq = reg_r(gspca_dev, 0x0705);
+ reg_r(gspca_dev, 0x0650);
reg_w(gspca_dev, 0x654, seq);
} else {
reg_w(gspca_dev, 0x654, (ads >> 16) & 0xff);
@@ -223,6 +227,8 @@ static int snd_val(struct gspca_dev *gspca_dev,
reg_w(gspca_dev, 0x65a, 0);
reg_w(gspca_dev, 0x65b, 0);
reg_w(gspca_dev, 0x650, 5);
+ if (gspca_dev->usb_err < 0)
+ return;
gspca_dev->usb_buf[0] = val >> 24;
gspca_dev->usb_buf[1] = val >> 16;
gspca_dev->usb_buf[2] = val >> 8;
@@ -233,24 +239,23 @@ static int snd_val(struct gspca_dev *gspca_dev,
4,
&alen,
500); /* timeout in milliseconds */
- if (ret < 0)
- goto ko;
- if (ads == 0x003f08) {
- seq += 4;
- seq &= 0x3f;
- reg_w(gspca_dev, 0x705, seq);
+ if (ret < 0) {
+ PDEBUG(D_ERR, "snd_val err %d", ret);
+ gspca_dev->usb_err = ret;
+ } else {
+ if (ads == 0x003f08) {
+ seq += 4;
+ seq &= 0x3f;
+ reg_w(gspca_dev, 0x705, seq);
+ }
}
- return ret;
-ko:
- PDEBUG(D_ERR, "snd_val err %d", ret);
- return ret;
}
/* set a camera parameter */
-static int set_par(struct gspca_dev *gspca_dev,
+static void set_par(struct gspca_dev *gspca_dev,
int parval)
{
- return snd_val(gspca_dev, 0x003f08, parval);
+ snd_val(gspca_dev, 0x003f08, parval);
}
static void setbrightness(struct gspca_dev *gspca_dev)
@@ -311,18 +316,18 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- int ret;
+ u8 ret;
/* check if the device responds */
usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1);
ret = reg_r(gspca_dev, 0x0740);
- if (ret < 0)
- return ret;
- if (ret != 0xff) {
- PDEBUG(D_ERR|D_STREAM, "init reg: 0x%02x", ret);
- return -1;
+ if (gspca_dev->usb_err >= 0) {
+ if (ret != 0xff) {
+ PDEBUG(D_ERR|D_STREAM, "init reg: 0x%02x", ret);
+ gspca_dev->usb_err = -EIO;
+ }
}
- return 0;
+ return gspca_dev->usb_err;
}
/* -- start the camera -- */
@@ -357,15 +362,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (ret < 0) {
PDEBUG(D_ERR|D_STREAM, "set intf %d %d failed",
gspca_dev->iface, gspca_dev->alt);
+ gspca_dev->usb_err = ret;
goto out;
}
- ret = reg_r(gspca_dev, 0x0630);
- if (ret < 0)
- goto out;
+ reg_r(gspca_dev, 0x0630);
rcv_val(gspca_dev, 0x000020); /* << (value ff ff ff ff) */
- ret = reg_r(gspca_dev, 0x0650);
- if (ret < 0)
- goto out;
+ reg_r(gspca_dev, 0x0650);
snd_val(gspca_dev, 0x000020, 0xffffffff);
reg_w(gspca_dev, 0x0620, 0);
reg_w(gspca_dev, 0x0630, 0);
@@ -384,11 +386,11 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* start the video flow */
set_par(gspca_dev, 0x01000000);
set_par(gspca_dev, 0x01000000);
- PDEBUG(D_STREAM, "camera started alt: 0x%02x", gspca_dev->alt);
- return 0;
+ if (gspca_dev->usb_err >= 0)
+ PDEBUG(D_STREAM, "camera started alt: 0x%02x",
+ gspca_dev->alt);
out:
- PDEBUG(D_ERR|D_STREAM, "camera start err %d", ret);
- return ret;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -456,7 +458,7 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
sd->brightness = val;
if (gspca_dev->streaming)
setbrightness(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -474,7 +476,7 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
sd->contrast = val;
if (gspca_dev->streaming)
setcontrast(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
@@ -492,7 +494,7 @@ static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
sd->colors = val;
if (gspca_dev->streaming)
setcolors(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
@@ -510,7 +512,7 @@ static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
sd->lightfreq = val;
if (gspca_dev->streaming)
setfreq(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
@@ -552,7 +554,7 @@ static int sd_set_jcomp(struct gspca_dev *gspca_dev,
sd->quality = jcomp->quality;
if (gspca_dev->streaming)
jpeg_set_qual(sd->jpeg_hdr, sd->quality);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_get_jcomp(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 72bf3b4f0a3..716df6b15fc 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -460,13 +460,17 @@ static void reg_r(struct gspca_dev *gspca_dev,
u16 index,
u16 len)
{
+ int ret;
+
#ifdef GSPCA_DEBUG
if (len > USB_BUF_SZ) {
err("reg_r: buffer overflow");
return;
}
#endif
- usb_control_msg(gspca_dev->dev,
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -474,6 +478,10 @@ static void reg_r(struct gspca_dev *gspca_dev,
index,
len ? gspca_dev->usb_buf : NULL, len,
500);
+ if (ret < 0) {
+ PDEBUG(D_ERR, "reg_r err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* write one byte */
@@ -483,40 +491,55 @@ static void reg_w_1(struct gspca_dev *gspca_dev,
u16 index,
u16 byte)
{
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return;
gspca_dev->usb_buf[0] = byte;
- usb_control_msg(gspca_dev->dev,
+ ret = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index,
gspca_dev->usb_buf, 1,
500);
+ if (ret < 0) {
+ PDEBUG(D_ERR, "reg_w_1 err %d", ret);
+ gspca_dev->usb_err = ret;
+ }
}
/* write req / index / value */
-static int reg_w_riv(struct usb_device *dev,
+static void reg_w_riv(struct gspca_dev *gspca_dev,
u8 req, u16 index, u16 value)
{
+ struct usb_device *dev = gspca_dev->dev;
int ret;
+ if (gspca_dev->usb_err < 0)
+ return;
ret = usb_control_msg(dev,
usb_sndctrlpipe(dev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
- PDEBUG(D_USBO, "reg write: 0x%02x,0x%02x:0x%02x, %d",
- req, index, value, ret);
- if (ret < 0)
- PDEBUG(D_ERR, "reg write: error %d", ret);
- return ret;
+ if (ret < 0) {
+ PDEBUG(D_ERR, "reg_w_riv err %d", ret);
+ gspca_dev->usb_err = ret;
+ return;
+ }
+ PDEBUG(D_USBO, "reg_w_riv: 0x%02x,0x%04x:0x%04x",
+ req, index, value);
}
/* read 1 byte */
-static int reg_r_1(struct gspca_dev *gspca_dev,
+static u8 reg_r_1(struct gspca_dev *gspca_dev,
u16 value) /* wValue */
{
int ret;
+ if (gspca_dev->usb_err < 0)
+ return 0;
ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
0x20, /* request */
@@ -527,19 +550,22 @@ static int reg_r_1(struct gspca_dev *gspca_dev,
500); /* timeout */
if (ret < 0) {
PDEBUG(D_ERR, "reg_r_1 err %d", ret);
+ gspca_dev->usb_err = ret;
return 0;
}
return gspca_dev->usb_buf[0];
}
-/* read 1 or 2 bytes - returns < 0 if error */
-static int reg_r_12(struct gspca_dev *gspca_dev,
+/* read 1 or 2 bytes */
+static u16 reg_r_12(struct gspca_dev *gspca_dev,
u8 req, /* bRequest */
u16 index, /* wIndex */
u16 length) /* wLength (1 or 2 only) */
{
int ret;
+ if (gspca_dev->usb_err < 0)
+ return 0;
gspca_dev->usb_buf[1] = 0;
ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
@@ -550,62 +576,44 @@ static int reg_r_12(struct gspca_dev *gspca_dev,
gspca_dev->usb_buf, length,
500);
if (ret < 0) {
- PDEBUG(D_ERR, "reg_read err %d", ret);
- return -1;
+ PDEBUG(D_ERR, "reg_r_12 err %d", ret);
+ gspca_dev->usb_err = ret;
+ return 0;
}
return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
}
-static int write_vector(struct gspca_dev *gspca_dev,
+static void write_vector(struct gspca_dev *gspca_dev,
const struct cmd *data, int ncmds)
{
- struct usb_device *dev = gspca_dev->dev;
- int ret;
-
while (--ncmds >= 0) {
- ret = reg_w_riv(dev, data->req, data->idx, data->val);
- if (ret < 0) {
- PDEBUG(D_ERR,
- "Register write failed for 0x%02x, 0x%04x, 0x%04x",
- data->req, data->val, data->idx);
- return ret;
- }
+ reg_w_riv(gspca_dev, data->req, data->idx, data->val);
data++;
}
- return 0;
}
-static int spca50x_setup_qtable(struct gspca_dev *gspca_dev,
- const u8 qtable[2][64])
+static void setup_qtable(struct gspca_dev *gspca_dev,
+ const u8 qtable[2][64])
{
- struct usb_device *dev = gspca_dev->dev;
- int i, err;
+ int i;
/* loop over y components */
- for (i = 0; i < 64; i++) {
- err = reg_w_riv(dev, 0x00, 0x2800 + i, qtable[0][i]);
- if (err < 0)
- return err;
- }
+ for (i = 0; i < 64; i++)
+ reg_w_riv(gspca_dev, 0x00, 0x2800 + i, qtable[0][i]);
/* loop over c components */
- for (i = 0; i < 64; i++) {
- err = reg_w_riv(dev, 0x00, 0x2840 + i, qtable[1][i]);
- if (err < 0)
- return err;
- }
- return 0;
+ for (i = 0; i < 64; i++)
+ reg_w_riv(gspca_dev, 0x00, 0x2840 + i, qtable[1][i]);
}
static void spca504_acknowledged_command(struct gspca_dev *gspca_dev,
u8 req, u16 idx, u16 val)
{
- struct usb_device *dev = gspca_dev->dev;
- int notdone;
+ u16 notdone;
- reg_w_riv(dev, req, idx, val);
+ reg_w_riv(gspca_dev, req, idx, val);
notdone = reg_r_12(gspca_dev, 0x01, 0x0001, 1);
- reg_w_riv(dev, req, idx, val);
+ reg_w_riv(gspca_dev, req, idx, val);
PDEBUG(D_FRAM, "before wait 0x%04x", notdone);
@@ -616,23 +624,22 @@ static void spca504_acknowledged_command(struct gspca_dev *gspca_dev,
static void spca504A_acknowledged_command(struct gspca_dev *gspca_dev,
u8 req,
- u16 idx, u16 val, u8 stat, u8 count)
+ u16 idx, u16 val, u16 endcode, u8 count)
{
- struct usb_device *dev = gspca_dev->dev;
- int status;
- u8 endcode;
+ u16 status;
- reg_w_riv(dev, req, idx, val);
+ reg_w_riv(gspca_dev, req, idx, val);
status = reg_r_12(gspca_dev, 0x01, 0x0001, 1);
- endcode = stat;
- PDEBUG(D_FRAM, "Status 0x%x Need 0x%04x", status, stat);
+ if (gspca_dev->usb_err < 0)
+ return;
+ PDEBUG(D_FRAM, "Status 0x%04x Need 0x%04x", status, endcode);
if (!count)
return;
count = 200;
while (--count > 0) {
msleep(10);
/* gsmart mini2 write a each wait setting 1 ms is enough */
-/* reg_w_riv(dev, req, idx, val); */
+/* reg_w_riv(gspca_dev, req, idx, val); */
status = reg_r_12(gspca_dev, 0x01, 0x0001, 1);
if (status == endcode) {
PDEBUG(D_FRAM, "status 0x%04x after wait %d",
@@ -642,7 +649,7 @@ static void spca504A_acknowledged_command(struct gspca_dev *gspca_dev,
}
}
-static int spca504B_PollingDataReady(struct gspca_dev *gspca_dev)
+static void spca504B_PollingDataReady(struct gspca_dev *gspca_dev)
{
int count = 10;
@@ -652,7 +659,6 @@ static int spca504B_PollingDataReady(struct gspca_dev *gspca_dev)
break;
msleep(10);
}
- return gspca_dev->usb_buf[0];
}
static void spca504B_WaitCmdStatus(struct gspca_dev *gspca_dev)
@@ -686,28 +692,26 @@ static void spca50x_GetFirmware(struct gspca_dev *gspca_dev)
static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
u8 Size;
- int rc;
Size = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
switch (sd->bridge) {
case BRIDGE_SPCA533:
- reg_w_riv(dev, 0x31, 0, 0);
+ reg_w_riv(gspca_dev, 0x31, 0, 0);
spca504B_WaitCmdStatus(gspca_dev);
- rc = spca504B_PollingDataReady(gspca_dev);
+ spca504B_PollingDataReady(gspca_dev);
spca50x_GetFirmware(gspca_dev);
reg_w_1(gspca_dev, 0x24, 0, 8, 2); /* type */
reg_r(gspca_dev, 0x24, 8, 1);
reg_w_1(gspca_dev, 0x25, 0, 4, Size);
reg_r(gspca_dev, 0x25, 4, 1); /* size */
- rc = spca504B_PollingDataReady(gspca_dev);
+ spca504B_PollingDataReady(gspca_dev);
/* Init the cam width height with some values get on init ? */
- reg_w_riv(dev, 0x31, 0, 0x04);
+ reg_w_riv(gspca_dev, 0x31, 0, 0x04);
spca504B_WaitCmdStatus(gspca_dev);
- rc = spca504B_PollingDataReady(gspca_dev);
+ spca504B_PollingDataReady(gspca_dev);
break;
default:
/* case BRIDGE_SPCA504B: */
@@ -716,7 +720,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
reg_r(gspca_dev, 0x25, 4, 1); /* size */
reg_w_1(gspca_dev, 0x27, 0, 0, 6);
reg_r(gspca_dev, 0x27, 0, 1); /* type */
- rc = spca504B_PollingDataReady(gspca_dev);
+ spca504B_PollingDataReady(gspca_dev);
break;
case BRIDGE_SPCA504:
Size += 3;
@@ -733,8 +737,8 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
break;
case BRIDGE_SPCA504C:
/* capture mode */
- reg_w_riv(dev, 0xa0, (0x0500 | (Size & 0x0f)), 0x00);
- reg_w_riv(dev, 0x20, 0x01, 0x0500 | (Size & 0x0f));
+ reg_w_riv(gspca_dev, 0xa0, (0x0500 | (Size & 0x0f)), 0x00);
+ reg_w_riv(gspca_dev, 0x20, 0x01, 0x0500 | (Size & 0x0f));
break;
}
}
@@ -762,37 +766,33 @@ static void spca504B_setQtable(struct gspca_dev *gspca_dev)
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
u16 reg;
reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f0 : 0x21a7;
- reg_w_riv(dev, 0x00, reg, sd->brightness);
+ reg_w_riv(gspca_dev, 0x00, reg, sd->brightness);
}
static void setcontrast(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
u16 reg;
reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f1 : 0x21a8;
- reg_w_riv(dev, 0x00, reg, sd->contrast);
+ reg_w_riv(gspca_dev, 0x00, reg, sd->contrast);
}
static void setcolors(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
u16 reg;
reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f6 : 0x21ae;
- reg_w_riv(dev, 0x00, reg, sd->colors);
+ reg_w_riv(gspca_dev, 0x00, reg, sd->colors);
}
static void init_ctl_reg(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
int pollreg = 1;
setbrightness(gspca_dev);
@@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
default:
/* case BRIDGE_SPCA533: */
/* case BRIDGE_SPCA504B: */
- reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */
- reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */
- reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */
+ reg_w_riv(gspca_dev, 0, 0x00, 0x21ad); /* hue */
+ reg_w_riv(gspca_dev, 0, 0x01, 0x21ac); /* sat/hue */
+ reg_w_riv(gspca_dev, 0, 0x00, 0x21a3); /* gamma */
break;
case BRIDGE_SPCA536:
- reg_w_riv(dev, 0, 0x40, 0x20f5);
- reg_w_riv(dev, 0, 0x01, 0x20f4);
- reg_w_riv(dev, 0, 0x00, 0x2089);
+ reg_w_riv(gspca_dev, 0, 0x40, 0x20f5);
+ reg_w_riv(gspca_dev, 0, 0x01, 0x20f4);
+ reg_w_riv(gspca_dev, 0, 0x00, 0x2089);
break;
}
if (pollreg)
@@ -881,18 +881,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
static int sd_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
- int i, err_code;
+ int i;
u8 info[6];
switch (sd->bridge) {
case BRIDGE_SPCA504B:
- reg_w_riv(dev, 0x1d, 0x00, 0);
- reg_w_riv(dev, 0, 0x01, 0x2306);
- reg_w_riv(dev, 0, 0x00, 0x0d04);
- reg_w_riv(dev, 0, 0x00, 0x2000);
- reg_w_riv(dev, 0, 0x13, 0x2301);
- reg_w_riv(dev, 0, 0x00, 0x2306);
+ reg_w_riv(gspca_dev, 0x1d, 0x00, 0);
+ reg_w_riv(gspca_dev, 0, 0x01, 0x2306);
+ reg_w_riv(gspca_dev, 0, 0x00, 0x0d04);
+ reg_w_riv(gspca_dev, 0, 0x00, 0x2000);
+ reg_w_riv(gspca_dev, 0, 0x13, 0x2301);
+ reg_w_riv(gspca_dev, 0, 0x00, 0x2306);
/* fall thru */
case BRIDGE_SPCA533:
spca504B_PollingDataReady(gspca_dev);
@@ -904,13 +903,13 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w_1(gspca_dev, 0x24, 0, 0, 0);
reg_r(gspca_dev, 0x24, 0, 1);
spca504B_PollingDataReady(gspca_dev);
- reg_w_riv(dev, 0x34, 0, 0);
+ reg_w_riv(gspca_dev, 0x34, 0, 0);
spca504B_WaitCmdStatus(gspca_dev);
break;
case BRIDGE_SPCA504C: /* pccam600 */
PDEBUG(D_STREAM, "Opening SPCA504 (PC-CAM 600)");
- reg_w_riv(dev, 0xe0, 0x0000, 0x0000);
- reg_w_riv(dev, 0xe0, 0x0000, 0x0001); /* reset */
+ reg_w_riv(gspca_dev, 0xe0, 0x0000, 0x0000);
+ reg_w_riv(gspca_dev, 0xe0, 0x0000, 0x0001); /* reset */
spca504_wait_status(gspca_dev);
if (sd->subtype == LogitechClickSmart420)
write_vector(gspca_dev,
@@ -919,12 +918,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
else
write_vector(gspca_dev, spca504_pccam600_open_data,
ARRAY_SIZE(spca504_pccam600_open_data));
- err_code = spca50x_setup_qtable(gspca_dev,
- qtable_creative_pccam);
- if (err_code < 0) {
- PDEBUG(D_ERR|D_STREAM, "spca50x_setup_qtable failed");
- return err_code;
- }
+ setup_qtable(gspca_dev, qtable_creative_pccam);
break;
default:
/* case BRIDGE_SPCA504: */
@@ -958,29 +952,24 @@ static int sd_init(struct gspca_dev *gspca_dev)
6, 0, 0x86, 1); */
/* spca504A_acknowledged_command (gspca_dev, 0x24,
0, 0, 0x9D, 1); */
- reg_w_riv(dev, 0x00, 0x270c, 0x05); /* L92 sno1t.txt */
- reg_w_riv(dev, 0x00, 0x2310, 0x05);
+ reg_w_riv(gspca_dev, 0x00, 0x270c, 0x05);
+ /* L92 sno1t.txt */
+ reg_w_riv(gspca_dev, 0x00, 0x2310, 0x05);
spca504A_acknowledged_command(gspca_dev, 0x01,
0x0f, 0, 0xff, 0);
}
/* setup qtable */
- reg_w_riv(dev, 0, 0x2000, 0);
- reg_w_riv(dev, 0, 0x2883, 1);
- err_code = spca50x_setup_qtable(gspca_dev,
- qtable_spca504_default);
- if (err_code < 0) {
- PDEBUG(D_ERR, "spca50x_setup_qtable failed");
- return err_code;
- }
+ reg_w_riv(gspca_dev, 0, 0x2000, 0);
+ reg_w_riv(gspca_dev, 0, 0x2883, 1);
+ setup_qtable(gspca_dev, qtable_spca504_default);
break;
}
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
int enable;
int i;
u8 info[6];
@@ -1005,13 +994,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
case MegapixV4:
case LogitechClickSmart820:
case MegaImageVI:
- reg_w_riv(dev, 0xf0, 0, 0);
+ reg_w_riv(gspca_dev, 0xf0, 0, 0);
spca504B_WaitCmdStatus(gspca_dev);
reg_r(gspca_dev, 0xf0, 4, 0);
spca504B_WaitCmdStatus(gspca_dev);
break;
default:
- reg_w_riv(dev, 0x31, 0, 0x04);
+ reg_w_riv(gspca_dev, 0x31, 0, 0x04);
spca504B_WaitCmdStatus(gspca_dev);
spca504B_PollingDataReady(gspca_dev);
break;
@@ -1048,8 +1037,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
spca504_acknowledged_command(gspca_dev, 0x24, 0, 0);
}
spca504B_SetSizeType(gspca_dev);
- reg_w_riv(dev, 0x00, 0x270c, 0x05); /* L92 sno1t.txt */
- reg_w_riv(dev, 0x00, 0x2310, 0x05);
+ reg_w_riv(gspca_dev, 0x00, 0x270c, 0x05);
+ /* L92 sno1t.txt */
+ reg_w_riv(gspca_dev, 0x00, 0x2310, 0x05);
break;
case BRIDGE_SPCA504C:
if (sd->subtype == LogitechClickSmart420) {
@@ -1061,36 +1051,37 @@ static int sd_start(struct gspca_dev *gspca_dev)
ARRAY_SIZE(spca504_pccam600_init_data));
}
enable = (sd->autogain ? 0x04 : 0x01);
- reg_w_riv(dev, 0x0c, 0x0000, enable); /* auto exposure */
- reg_w_riv(dev, 0xb0, 0x0000, enable); /* auto whiteness */
+ reg_w_riv(gspca_dev, 0x0c, 0x0000, enable);
+ /* auto exposure */
+ reg_w_riv(gspca_dev, 0xb0, 0x0000, enable);
+ /* auto whiteness */
/* set default exposure compensation and whiteness balance */
- reg_w_riv(dev, 0x30, 0x0001, 800); /* ~ 20 fps */
- reg_w_riv(dev, 0x30, 0x0002, 1600);
+ reg_w_riv(gspca_dev, 0x30, 0x0001, 800); /* ~ 20 fps */
+ reg_w_riv(gspca_dev, 0x30, 0x0002, 1600);
spca504B_SetSizeType(gspca_dev);
break;
}
init_ctl_reg(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
switch (sd->bridge) {
default:
/* case BRIDGE_SPCA533: */
/* case BRIDGE_SPCA536: */
/* case BRIDGE_SPCA504B: */
- reg_w_riv(dev, 0x31, 0, 0);
+ reg_w_riv(gspca_dev, 0x31, 0, 0);
spca504B_WaitCmdStatus(gspca_dev);
spca504B_PollingDataReady(gspca_dev);
break;
case BRIDGE_SPCA504:
case BRIDGE_SPCA504C:
- reg_w_riv(dev, 0x00, 0x2000, 0x0000);
+ reg_w_riv(gspca_dev, 0x00, 0x2000, 0x0000);
if (sd->subtype == AiptekMiniPenCam13) {
/* spca504a aiptek */
@@ -1102,7 +1093,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
0x0f, 0x00, 0xff, 1);
} else {
spca504_acknowledged_command(gspca_dev, 0x24, 0, 0);
- reg_w_riv(dev, 0x01, 0x000f, 0x0000);
+ reg_w_riv(gspca_dev, 0x01, 0x000f, 0x0000);
}
break;
}
@@ -1216,7 +1207,7 @@ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
sd->brightness = val;
if (gspca_dev->streaming)
setbrightness(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
@@ -1234,7 +1225,7 @@ static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
sd->contrast = val;
if (gspca_dev->streaming)
setcontrast(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
@@ -1252,7 +1243,7 @@ static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
sd->colors = val;
if (gspca_dev->streaming)
setcolors(gspca_dev);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
@@ -1292,7 +1283,7 @@ static int sd_set_jcomp(struct gspca_dev *gspca_dev,
sd->quality = jcomp->quality;
if (gspca_dev->streaming)
jpeg_set_qual(sd->jpeg_hdr, sd->quality);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_get_jcomp(struct gspca_dev *gspca_dev,
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 69e5dc4fc9d..1a800fc1c00 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5345,9 +5345,6 @@ static const struct usb_action tas5130cxx_InitialScale[] = { /* 320x240 */
{0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
{0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
- {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN},
- {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
-
{0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
{0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
{0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
@@ -5364,27 +5361,27 @@ static const struct usb_action tas5130cxx_InitialScale[] = { /* 320x240 */
{0xa0, 0xf7, ZC3XX_R101_SENSORCORRECTION},
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
{0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
- {0xa0, 0x95, ZC3XX_R18D_YTARGET},
+ {0xa0, 0x70, ZC3XX_R18D_YTARGET},
{0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN},
{0xa0, 0x00, 0x01ad},
{0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
{0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
{0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
{0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
+ {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN},
+ {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
{}
};
static const struct usb_action tas5130cxx_Initial[] = { /* 640x480 */
{0xa0, 0x01, ZC3XX_R000_SYSTEMCONTROL},
{0xa0, 0x40, ZC3XX_R002_CLOCKSELECT},
- {0xa0, 0x03, ZC3XX_R008_CLOCKSETTING},
+ {0xa0, 0x00, ZC3XX_R008_CLOCKSETTING},
{0xa0, 0x02, ZC3XX_R010_CMOSSENSORSELECT},
{0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
{0xa0, 0x00, ZC3XX_R001_SYSTEMOPERATING},
{0xa0, 0x01, ZC3XX_R012_VIDEOCONTROLFUNC},
{0xa0, 0x01, ZC3XX_R001_SYSTEMOPERATING},
{0xa0, 0x05, ZC3XX_R012_VIDEOCONTROLFUNC},
- {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN},
- {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
{0xa0, 0x02, ZC3XX_R003_FRAMEWIDTHHIGH},
{0xa0, 0x80, ZC3XX_R004_FRAMEWIDTHLOW},
{0xa0, 0x01, ZC3XX_R005_FRAMEHEIGHTHIGH},
@@ -5400,13 +5397,15 @@ static const struct usb_action tas5130cxx_Initial[] = { /* 640x480 */
{0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION},
{0xa0, 0x0d, ZC3XX_R100_OPERATIONMODE},
{0xa0, 0x06, ZC3XX_R189_AWBSTATUS},
- {0xa0, 0x95, ZC3XX_R18D_YTARGET},
+ {0xa0, 0x70, ZC3XX_R18D_YTARGET},
{0xa0, 0x50, ZC3XX_R1A8_DIGITALGAIN},
{0xa0, 0x00, 0x01ad},
{0xa0, 0x03, ZC3XX_R1C5_SHARPNESSMODE},
{0xa0, 0x13, ZC3XX_R1CB_SHARPNESS05},
{0xa0, 0x08, ZC3XX_R250_DEADPIXELSMODE},
{0xa0, 0x08, ZC3XX_R301_EEPROMACCESS},
+ {0xa0, 0x07, ZC3XX_R0A5_EXPOSUREGAIN},
+ {0xa0, 0x02, ZC3XX_R0A6_EXPOSUREBLACKLVL},
{}
};
static const struct usb_action tas5130cxx_50HZ[] = {
@@ -6424,11 +6423,11 @@ static int vga_2wr_probe(struct gspca_dev *gspca_dev)
if (retword != 0)
return 0x0e; /* PAS202BCB */
- start_2wr_probe(dev, 0x02); /* ?? */
+ start_2wr_probe(dev, 0x02); /* TAS5130C */
i2c_write(gspca_dev, 0x01, 0xaa, 0x00);
retword = i2c_read(gspca_dev, 0x01);
if (retword != 0)
- return 0x02; /* ?? */
+ return 0x02; /* TAS5130C */
ov_check:
reg_r(gspca_dev, 0x0010); /* ?? */
reg_r(gspca_dev, 0x0010);
@@ -6505,6 +6504,8 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
reg_r(gspca_dev, 0x0010);
/* value 0x4001 is meaningless */
if (retword != 0x4001) {
+ if ((retword & 0xff00) == 0x6400)
+ return 0x02; /* TAS5130C */
for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) {
if (chipset_revision_sensor[i].revision == retword) {
sd->chip_revision = retword;
@@ -6515,7 +6516,7 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
}
}
- reg_w(dev, 0x01, 0x0000); /* check ?? */
+ reg_w(dev, 0x01, 0x0000); /* check PB0330 */
reg_w(dev, 0x01, 0x0001);
reg_w(dev, 0xdd, 0x008b);
reg_w(dev, 0x0a, 0x0010);
@@ -6524,7 +6525,7 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
retword = i2c_read(gspca_dev, 0x00);
if (retword != 0) {
PDEBUG(D_PROBE, "probe 3wr vga type 0a ?");
- return 0x0a; /* ?? */
+ return 0x0a; /* PB0330 */
}
reg_w(dev, 0x01, 0x0000);
@@ -6673,6 +6674,10 @@ static int sd_config(struct gspca_dev *gspca_dev,
PDEBUG(D_PROBE, "Find Sensor HV7131B");
sd->sensor = SENSOR_HV7131B;
break;
+ case 0x02:
+ PDEBUG(D_PROBE, "Sensor TAS5130C");
+ sd->sensor = SENSOR_TAS5130CXX;
+ break;
case 0x04:
PDEBUG(D_PROBE, "Find Sensor CS2102");
sd->sensor = SENSOR_CS2102;
@@ -6866,11 +6871,14 @@ static int sd_start(struct gspca_dev *gspca_dev)
case SENSOR_GC0305:
case SENSOR_OV7620:
case SENSOR_PO2030:
+ case SENSOR_TAS5130CXX:
case SENSOR_TAS5130C_VF0250:
/* msleep(100); * ?? */
reg_r(gspca_dev, 0x0002); /* --> 0x40 */
reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
reg_w(dev, 0x15, 0x01ae);
+ if (sd->sensor == SENSOR_TAS5130CXX)
+ break;
reg_w(dev, 0x0d, 0x003a);
reg_w(dev, 0x02, 0x003b);
reg_w(dev, 0x00, 0x0038);
@@ -6887,6 +6895,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
case SENSOR_PAS202B:
case SENSOR_GC0305:
+ case SENSOR_TAS5130CXX:
reg_r(gspca_dev, 0x0008);
/* fall thru */
case SENSOR_PO2030:
@@ -6928,6 +6937,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(dev, 0x40, 0x0117);
break;
case SENSOR_GC0305:
+ case SENSOR_TAS5130CXX:
reg_w(dev, 0x09, 0x01ad); /* (from win traces) */
reg_w(dev, 0x15, 0x01ae);
/* fall thru */
@@ -7220,7 +7230,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x0ac8, 0x0302), .driver_info = SENSOR_PAS106},
{USB_DEVICE(0x0ac8, 0x301b)},
{USB_DEVICE(0x0ac8, 0x303b)},
- {USB_DEVICE(0x0ac8, 0x305b), .driver_info = SENSOR_TAS5130C_VF0250},
+ {USB_DEVICE(0x0ac8, 0x305b)},
{USB_DEVICE(0x0ac8, 0x307b)},
{USB_DEVICE(0x10fd, 0x0128)},
{USB_DEVICE(0x10fd, 0x804d)},
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 1c9bc94c905..51f393d03a4 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -145,7 +145,7 @@ static int device_authorization(struct hdpvr_device *dev)
#ifdef HDPVR_DEBUG
else {
hex_dump_to_buffer(dev->usbc_buf, 46, 16, 1, print_buf,
- sizeof(print_buf), 0);
+ 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
"Status request returned, len %d: %s\n",
ret, print_buf);
@@ -168,13 +168,13 @@ static int device_authorization(struct hdpvr_device *dev)
response = dev->usbc_buf+38;
#ifdef HDPVR_DEBUG
- hex_dump_to_buffer(response, 8, 16, 1, print_buf, sizeof(print_buf), 0);
+ hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "challenge: %s\n",
print_buf);
#endif
challenge(response);
#ifdef HDPVR_DEBUG
- hex_dump_to_buffer(response, 8, 16, 1, print_buf, sizeof(print_buf), 0);
+ hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
print_buf);
#endif
@@ -376,8 +376,8 @@ static int hdpvr_probe(struct usb_interface *interface,
usb_set_intfdata(interface, dev);
/* let the user know what node this device is now attached to */
- v4l2_info(&dev->v4l2_dev, "device now attached to /dev/video%d\n",
- dev->video_dev->minor);
+ v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
+ video_device_node_name(dev->video_dev));
return 0;
error:
@@ -391,13 +391,10 @@ error:
static void hdpvr_disconnect(struct usb_interface *interface)
{
struct hdpvr_device *dev;
- int minor;
dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
- minor = dev->video_dev->minor;
-
/* prevent more I/O from starting and stop any ongoing */
mutex_lock(&dev->io_mutex);
dev->status = STATUS_DISCONNECTED;
@@ -425,7 +422,8 @@ static void hdpvr_disconnect(struct usb_interface *interface)
atomic_dec(&dev_nr);
- v4l2_info(&dev->v4l2_dev, "device /dev/video%d disconnected\n", minor);
+ v4l2_info(&dev->v4l2_dev, "device %s disconnected\n",
+ video_device_node_name(dev->video_dev));
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev->usbc_buf);
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index b5439cabb38..fdd782039e9 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -523,7 +523,7 @@ static unsigned int hdpvr_poll(struct file *filp, poll_table *wait)
mutex_lock(&dev->io_mutex);
- if (video_is_unregistered(dev->video_dev)) {
+ if (!video_is_registered(dev->video_dev)) {
mutex_unlock(&dev->io_mutex);
return -EIO;
}
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 64360d26b32..b86e35386ce 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -353,6 +353,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir_type = IR_TYPE_RC5;
ir_codes = &ir_codes_fusionhdtv_mce_table;
break;
+ case 0x0b:
case 0x47:
case 0x71:
if (adap->id == I2C_HW_B_CX2388x ||
@@ -422,7 +423,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Make sure we are all setup before going on */
if (!name || !ir->get_key || !ir_type || !ir_codes) {
- dprintk(1, DEVNAME ": Unsupported device at address 0x%02x\n",
+ dprintk(1, ": Unsupported device at address 0x%02x\n",
addr);
err = -ENODEV;
goto err_out_free;
@@ -437,7 +438,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_name(&client->dev));
/* init + register input device */
- err = ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes);
+ err = ir_input_init(input_dev, &ir->ir, ir_type);
if (err < 0)
goto err_out_free;
@@ -445,7 +446,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
input_dev->name = ir->name;
input_dev->phys = ir->phys;
- err = input_register_device(ir->input);
+ err = ir_input_register(ir->input, ir->ir_codes);
if (err)
goto err_out_free;
@@ -459,8 +460,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
err_out_free:
- ir_input_free(input_dev);
- input_free_device(input_dev);
kfree(ir);
return err;
}
@@ -473,8 +472,7 @@ static int ir_remove(struct i2c_client *client)
cancel_delayed_work_sync(&ir->work);
/* unregister device */
- ir_input_free(ir->input);
- input_unregister_device(ir->input);
+ ir_input_unregister(ir->input);
/* free memory */
kfree(ir);
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index e707ef3086b..babcabd73c0 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -985,8 +985,8 @@ int ivtv_v4l2_open(struct file *filp)
mutex_lock(&itv->serialize_lock);
if (ivtv_init_on_first_open(itv)) {
- IVTV_ERR("Failed to initialize on minor %d\n",
- vdev->minor);
+ IVTV_ERR("Failed to initialize on device %s\n",
+ video_device_node_name(vdev));
mutex_unlock(&itv->serialize_lock);
return -ENXIO;
}
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 67699e3f2aa..e12c6022373 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -245,6 +245,7 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
{
struct ivtv_stream *s = &itv->streams[type];
int vfl_type = ivtv_stream_info[type].vfl_type;
+ const char *name;
int num;
if (s->vdev == NULL)
@@ -268,24 +269,24 @@ static int ivtv_reg_dev(struct ivtv *itv, int type)
s->vdev = NULL;
return -ENOMEM;
}
- num = s->vdev->num;
+ name = video_device_node_name(s->vdev);
switch (vfl_type) {
case VFL_TYPE_GRABBER:
- IVTV_INFO("Registered device video%d for %s (%d kB)\n",
- num, s->name, itv->options.kilobytes[type]);
+ IVTV_INFO("Registered device %s for %s (%d kB)\n",
+ name, s->name, itv->options.kilobytes[type]);
break;
case VFL_TYPE_RADIO:
- IVTV_INFO("Registered device radio%d for %s\n",
- num, s->name);
+ IVTV_INFO("Registered device %s for %s\n",
+ name, s->name);
break;
case VFL_TYPE_VBI:
if (itv->options.kilobytes[type])
- IVTV_INFO("Registered device vbi%d for %s (%d kB)\n",
- num, s->name, itv->options.kilobytes[type]);
+ IVTV_INFO("Registered device %s for %s (%d kB)\n",
+ name, s->name, itv->options.kilobytes[type]);
else
- IVTV_INFO("Registered device vbi%d for %s\n",
- num, s->name);
+ IVTV_INFO("Registered device %s for %s\n",
+ name, s->name);
break;
}
return 0;
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 01e1eefcf1e..b421858ccf9 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -800,8 +800,8 @@ again:
return IRQ_HANDLED;
if (meye.mchip_mode == MCHIP_HIC_MODE_CONT_OUT) {
- if (kfifo_get(meye.grabq, (unsigned char *)&reqnr,
- sizeof(int)) != sizeof(int)) {
+ if (kfifo_out_locked(&meye.grabq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.grabq_lock) != sizeof(int)) {
mchip_free_frame();
return IRQ_HANDLED;
}
@@ -811,7 +811,8 @@ again:
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
- kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int));
+ kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.doneq_lock);
wake_up_interruptible(&meye.proc_list);
} else {
int size;
@@ -820,8 +821,8 @@ again:
mchip_free_frame();
goto again;
}
- if (kfifo_get(meye.grabq, (unsigned char *)&reqnr,
- sizeof(int)) != sizeof(int)) {
+ if (kfifo_out_locked(&meye.grabq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.grabq_lock) != sizeof(int)) {
mchip_free_frame();
goto again;
}
@@ -831,7 +832,8 @@ again:
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
- kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int));
+ kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.doneq_lock);
wake_up_interruptible(&meye.proc_list);
}
mchip_free_frame();
@@ -859,8 +861,8 @@ static int meye_open(struct file *file)
for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
- kfifo_reset(meye.grabq);
- kfifo_reset(meye.doneq);
+ kfifo_reset(&meye.grabq);
+ kfifo_reset(&meye.doneq);
return 0;
}
@@ -933,7 +935,8 @@ static int meyeioc_qbuf_capt(int *nb)
mchip_cont_compression_start();
meye.grab_buffer[*nb].state = MEYE_BUF_USING;
- kfifo_put(meye.grabq, (unsigned char *)nb, sizeof(int));
+ kfifo_in_locked(&meye.grabq, (unsigned char *)nb, sizeof(int),
+ &meye.grabq_lock);
mutex_unlock(&meye.lock);
return 0;
@@ -965,7 +968,9 @@ static int meyeioc_sync(struct file *file, void *fh, int *i)
/* fall through */
case MEYE_BUF_DONE:
meye.grab_buffer[*i].state = MEYE_BUF_UNUSED;
- kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int));
+ if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused,
+ sizeof(int), &meye.doneq_lock) != sizeof(int))
+ break;
}
*i = meye.grab_buffer[*i].size;
mutex_unlock(&meye.lock);
@@ -1452,7 +1457,8 @@ static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->flags |= V4L2_BUF_FLAG_QUEUED;
buf->flags &= ~V4L2_BUF_FLAG_DONE;
meye.grab_buffer[buf->index].state = MEYE_BUF_USING;
- kfifo_put(meye.grabq, (unsigned char *)&buf->index, sizeof(int));
+ kfifo_in_locked(&meye.grabq, (unsigned char *)&buf->index,
+ sizeof(int), &meye.grabq_lock);
mutex_unlock(&meye.lock);
return 0;
@@ -1467,19 +1473,19 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
mutex_lock(&meye.lock);
- if (kfifo_len(meye.doneq) == 0 && file->f_flags & O_NONBLOCK) {
+ if (kfifo_len(&meye.doneq) == 0 && file->f_flags & O_NONBLOCK) {
mutex_unlock(&meye.lock);
return -EAGAIN;
}
if (wait_event_interruptible(meye.proc_list,
- kfifo_len(meye.doneq) != 0) < 0) {
+ kfifo_len(&meye.doneq) != 0) < 0) {
mutex_unlock(&meye.lock);
return -EINTR;
}
- if (!kfifo_get(meye.doneq, (unsigned char *)&reqnr,
- sizeof(int))) {
+ if (!kfifo_out_locked(&meye.doneq, (unsigned char *)&reqnr,
+ sizeof(int), &meye.doneq_lock)) {
mutex_unlock(&meye.lock);
return -EBUSY;
}
@@ -1529,8 +1535,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
{
mutex_lock(&meye.lock);
mchip_hic_stop();
- kfifo_reset(meye.grabq);
- kfifo_reset(meye.doneq);
+ kfifo_reset(&meye.grabq);
+ kfifo_reset(&meye.doneq);
for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
@@ -1572,7 +1578,7 @@ static unsigned int meye_poll(struct file *file, poll_table *wait)
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
- if (kfifo_len(meye.doneq))
+ if (kfifo_len(&meye.doneq))
res = POLLIN | POLLRDNORM;
mutex_unlock(&meye.lock);
return res;
@@ -1681,7 +1687,6 @@ static struct video_device meye_template = {
.fops = &meye_fops,
.ioctl_ops = &meye_ioctl_ops,
.release = video_device_release,
- .minor = -1,
};
#ifdef CONFIG_PM
@@ -1746,16 +1751,14 @@ static int __devinit meye_probe(struct pci_dev *pcidev,
}
spin_lock_init(&meye.grabq_lock);
- meye.grabq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL,
- &meye.grabq_lock);
- if (IS_ERR(meye.grabq)) {
+ if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
+ GFP_KERNEL)) {
printk(KERN_ERR "meye: fifo allocation failed\n");
goto outkfifoalloc1;
}
spin_lock_init(&meye.doneq_lock);
- meye.doneq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL,
- &meye.doneq_lock);
- if (IS_ERR(meye.doneq)) {
+ if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
+ GFP_KERNEL)) {
printk(KERN_ERR "meye: fifo allocation failed\n");
goto outkfifoalloc2;
}
@@ -1869,9 +1872,9 @@ outregions:
outenabledev:
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0);
outsonypienable:
- kfifo_free(meye.doneq);
+ kfifo_free(&meye.doneq);
outkfifoalloc2:
- kfifo_free(meye.grabq);
+ kfifo_free(&meye.grabq);
outkfifoalloc1:
vfree(meye.grab_temp);
outvmalloc:
@@ -1902,8 +1905,8 @@ static void __devexit meye_remove(struct pci_dev *pcidev)
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0);
- kfifo_free(meye.doneq);
- kfifo_free(meye.grabq);
+ kfifo_free(&meye.doneq);
+ kfifo_free(&meye.grabq);
vfree(meye.grab_temp);
diff --git a/drivers/media/video/meye.h b/drivers/media/video/meye.h
index 5f70a106ba2..1321ad5d659 100644
--- a/drivers/media/video/meye.h
+++ b/drivers/media/video/meye.h
@@ -303,9 +303,9 @@ struct meye {
struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS];
int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */
struct mutex lock; /* mutex for open/mmap... */
- struct kfifo *grabq; /* queue for buffers to be grabbed */
+ struct kfifo grabq; /* queue for buffers to be grabbed */
spinlock_t grabq_lock; /* lock protecting the queue */
- struct kfifo *doneq; /* queue for grabbed buffers */
+ struct kfifo doneq; /* queue for grabbed buffers */
spinlock_t doneq_lock; /* lock protecting the queue */
wait_queue_head_t proc_list; /* wait queue */
struct video_device *video_dev; /* video device parameters */
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 45388d2ce2f..b62c0bd3f8e 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -17,9 +17,11 @@
#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
-/* mt9m001 i2c address 0x5d
+/*
+ * mt9m001 i2c address 0x5d
* The platform has to define ctruct i2c_board_info objects and link to them
- * from struct soc_camera_link */
+ * from struct soc_camera_link
+ */
/* mt9m001 selected register addresses */
#define MT9M001_CHIP_VERSION 0x00
@@ -46,42 +48,50 @@
#define MT9M001_COLUMN_SKIP 20
#define MT9M001_ROW_SKIP 12
-static const struct soc_camera_data_format mt9m001_colour_formats[] = {
- /* Order important: first natively supported,
- * second supported with a GPIO extender */
- {
- .name = "Bayer (sRGB) 10 bit",
- .depth = 10,
- .fourcc = V4L2_PIX_FMT_SBGGR16,
- .colorspace = V4L2_COLORSPACE_SRGB,
- }, {
- .name = "Bayer (sRGB) 8 bit",
- .depth = 8,
- .fourcc = V4L2_PIX_FMT_SBGGR8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- }
+/* MT9M001 has only one fixed colorspace per pixelcode */
+struct mt9m001_datafmt {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+/* Find a data format by a pixel code in an array */
+static const struct mt9m001_datafmt *mt9m001_find_datafmt(
+ enum v4l2_mbus_pixelcode code, const struct mt9m001_datafmt *fmt,
+ int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ if (fmt[i].code == code)
+ return fmt + i;
+
+ return NULL;
+}
+
+static const struct mt9m001_datafmt mt9m001_colour_fmts[] = {
+ /*
+ * Order important: first natively supported,
+ * second supported with a GPIO extender
+ */
+ {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
};
-static const struct soc_camera_data_format mt9m001_monochrome_formats[] = {
+static const struct mt9m001_datafmt mt9m001_monochrome_fmts[] = {
/* Order important - see above */
- {
- .name = "Monochrome 10 bit",
- .depth = 10,
- .fourcc = V4L2_PIX_FMT_Y16,
- }, {
- .name = "Monochrome 8 bit",
- .depth = 8,
- .fourcc = V4L2_PIX_FMT_GREY,
- },
+ {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
+ {V4L2_MBUS_FMT_GREY8_1X8, V4L2_COLORSPACE_JPEG},
};
struct mt9m001 {
struct v4l2_subdev subdev;
struct v4l2_rect rect; /* Sensor window */
- __u32 fourcc;
+ const struct mt9m001_datafmt *fmt;
+ const struct mt9m001_datafmt *fmts;
+ int num_fmts;
int model; /* V4L2_IDENT_MT9M001* codes from v4l2-chip-ident.h */
unsigned int gain;
unsigned int exposure;
+ unsigned short y_skip_top; /* Lines to skip at the top */
unsigned char autoexposure;
};
@@ -204,8 +214,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
const u16 hblank = 9, vblank = 25;
unsigned int total_h;
- if (mt9m001->fourcc == V4L2_PIX_FMT_SBGGR8 ||
- mt9m001->fourcc == V4L2_PIX_FMT_SBGGR16)
+ if (mt9m001->fmts == mt9m001_colour_fmts)
/*
* Bayer format - even number of rows for simplicity,
* but let the user play with the top row.
@@ -222,15 +231,17 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
soc_camera_limit_side(&rect.top, &rect.height,
MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT);
- total_h = rect.height + icd->y_skip_top + vblank;
+ total_h = rect.height + mt9m001->y_skip_top + vblank;
/* Blanking and start values - default... */
ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank);
if (!ret)
ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank);
- /* The caller provides a supported format, as verified per
- * call to icd->try_fmt() */
+ /*
+ * The caller provides a supported format, as verified per
+ * call to icd->try_fmt()
+ */
if (!ret)
ret = reg_write(client, MT9M001_COLUMN_START, rect.left);
if (!ret)
@@ -239,7 +250,7 @@ static int mt9m001_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect.width - 1);
if (!ret)
ret = reg_write(client, MT9M001_WINDOW_HEIGHT,
- rect.height + icd->y_skip_top - 1);
+ rect.height + mt9m001->y_skip_top - 1);
if (!ret && mt9m001->autoexposure) {
ret = reg_write(client, MT9M001_SHUTTER_WIDTH, total_h);
if (!ret) {
@@ -283,32 +294,32 @@ static int mt9m001_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-static int mt9m001_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9m001_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
- pix->width = mt9m001->rect.width;
- pix->height = mt9m001->rect.height;
- pix->pixelformat = mt9m001->fourcc;
- pix->field = V4L2_FIELD_NONE;
- pix->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->width = mt9m001->rect.width;
+ mf->height = mt9m001->rect.height;
+ mf->code = mt9m001->fmt->code;
+ mf->colorspace = mt9m001->fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
return 0;
}
-static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9m001_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct mt9m001 *mt9m001 = to_mt9m001(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_crop a = {
.c = {
.left = mt9m001->rect.left,
.top = mt9m001->rect.top,
- .width = pix->width,
- .height = pix->height,
+ .width = mf->width,
+ .height = mf->height,
},
};
int ret;
@@ -316,28 +327,39 @@ static int mt9m001_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
/* No support for scaling so far, just crop. TODO: use skipping */
ret = mt9m001_s_crop(sd, &a);
if (!ret) {
- pix->width = mt9m001->rect.width;
- pix->height = mt9m001->rect.height;
- mt9m001->fourcc = pix->pixelformat;
+ mf->width = mt9m001->rect.width;
+ mf->height = mt9m001->rect.height;
+ mt9m001->fmt = mt9m001_find_datafmt(mf->code,
+ mt9m001->fmts, mt9m001->num_fmts);
+ mf->colorspace = mt9m001->fmt->colorspace;
}
return ret;
}
-static int mt9m001_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9m001_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+ const struct mt9m001_datafmt *fmt;
- v4l_bound_align_image(&pix->width, MT9M001_MIN_WIDTH,
+ v4l_bound_align_image(&mf->width, MT9M001_MIN_WIDTH,
MT9M001_MAX_WIDTH, 1,
- &pix->height, MT9M001_MIN_HEIGHT + icd->y_skip_top,
- MT9M001_MAX_HEIGHT + icd->y_skip_top, 0, 0);
+ &mf->height, MT9M001_MIN_HEIGHT + mt9m001->y_skip_top,
+ MT9M001_MAX_HEIGHT + mt9m001->y_skip_top, 0, 0);
+
+ if (mt9m001->fmts == mt9m001_colour_fmts)
+ mf->height = ALIGN(mf->height - 1, 2);
- if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8 ||
- pix->pixelformat == V4L2_PIX_FMT_SBGGR16)
- pix->height = ALIGN(pix->height - 1, 2);
+ fmt = mt9m001_find_datafmt(mf->code, mt9m001->fmts,
+ mt9m001->num_fmts);
+ if (!fmt) {
+ fmt = mt9m001->fmt;
+ mf->code = fmt->code;
+ }
+
+ mf->colorspace = fmt->colorspace;
return 0;
}
@@ -552,7 +574,7 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
if (ctrl->value) {
const u16 vblank = 25;
unsigned int total_h = mt9m001->rect.height +
- icd->y_skip_top + vblank;
+ mt9m001->y_skip_top + vblank;
if (reg_write(client, MT9M001_SHUTTER_WIDTH,
total_h) < 0)
return -EIO;
@@ -568,8 +590,10 @@ static int mt9m001_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return 0;
}
-/* Interface active, can use i2c. If it fails, it can indeed mean, that
- * this wasn't our capture interface, so, we wait for the right one */
+/*
+ * Interface active, can use i2c. If it fails, it can indeed mean, that
+ * this wasn't our capture interface, so, we wait for the right one
+ */
static int mt9m001_video_probe(struct soc_camera_device *icd,
struct i2c_client *client)
{
@@ -579,8 +603,10 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
unsigned long flags;
int ret;
- /* We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant. */
+ /*
+ * We must have a parent by now. And it cannot be a wrong one.
+ * So this entire test is completely redundant.
+ */
if (!icd->dev.parent ||
to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
return -ENODEV;
@@ -597,11 +623,11 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
case 0x8411:
case 0x8421:
mt9m001->model = V4L2_IDENT_MT9M001C12ST;
- icd->formats = mt9m001_colour_formats;
+ mt9m001->fmts = mt9m001_colour_fmts;
break;
case 0x8431:
mt9m001->model = V4L2_IDENT_MT9M001C12STM;
- icd->formats = mt9m001_monochrome_formats;
+ mt9m001->fmts = mt9m001_monochrome_fmts;
break;
default:
dev_err(&client->dev,
@@ -609,7 +635,7 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
return -ENODEV;
}
- icd->num_formats = 0;
+ mt9m001->num_fmts = 0;
/*
* This is a 10bit sensor, so by default we only allow 10bit.
@@ -622,14 +648,14 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
flags = SOCAM_DATAWIDTH_10;
if (flags & SOCAM_DATAWIDTH_10)
- icd->num_formats++;
+ mt9m001->num_fmts++;
else
- icd->formats++;
+ mt9m001->fmts++;
if (flags & SOCAM_DATAWIDTH_8)
- icd->num_formats++;
+ mt9m001->num_fmts++;
- mt9m001->fourcc = icd->formats->fourcc;
+ mt9m001->fmt = &mt9m001->fmts[0];
dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data,
data == 0x8431 ? "C12STM" : "C12ST");
@@ -655,6 +681,16 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
icl->free_bus(icl);
}
+static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
+{
+ struct i2c_client *client = sd->priv;
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+
+ *lines = mt9m001->y_skip_top;
+
+ return 0;
+}
+
static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
.g_ctrl = mt9m001_g_ctrl,
.s_ctrl = mt9m001_s_ctrl,
@@ -665,19 +701,38 @@ static struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = {
#endif
};
+static int mt9m001_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ struct i2c_client *client = sd->priv;
+ struct mt9m001 *mt9m001 = to_mt9m001(client);
+
+ if ((unsigned int)index >= mt9m001->num_fmts)
+ return -EINVAL;
+
+ *code = mt9m001->fmts[index].code;
+ return 0;
+}
+
static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
.s_stream = mt9m001_s_stream,
- .s_fmt = mt9m001_s_fmt,
- .g_fmt = mt9m001_g_fmt,
- .try_fmt = mt9m001_try_fmt,
+ .s_mbus_fmt = mt9m001_s_fmt,
+ .g_mbus_fmt = mt9m001_g_fmt,
+ .try_mbus_fmt = mt9m001_try_fmt,
.s_crop = mt9m001_s_crop,
.g_crop = mt9m001_g_crop,
.cropcap = mt9m001_cropcap,
+ .enum_mbus_fmt = mt9m001_enum_fmt,
+};
+
+static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
+ .g_skip_top_lines = mt9m001_g_skip_top_lines,
};
static struct v4l2_subdev_ops mt9m001_subdev_ops = {
.core = &mt9m001_subdev_core_ops,
.video = &mt9m001_subdev_video_ops,
+ .sensor = &mt9m001_subdev_sensor_ops,
};
static int mt9m001_probe(struct i2c_client *client,
@@ -714,15 +769,17 @@ static int mt9m001_probe(struct i2c_client *client,
/* Second stage probe - when a capture adapter is there */
icd->ops = &mt9m001_ops;
- icd->y_skip_top = 0;
+ mt9m001->y_skip_top = 0;
mt9m001->rect.left = MT9M001_COLUMN_SKIP;
mt9m001->rect.top = MT9M001_ROW_SKIP;
mt9m001->rect.width = MT9M001_MAX_WIDTH;
mt9m001->rect.height = MT9M001_MAX_HEIGHT;
- /* Simulated autoexposure. If enabled, we calculate shutter width
- * ourselves in the driver based on vertical blanking and frame width */
+ /*
+ * Simulated autoexposure. If enabled, we calculate shutter width
+ * ourselves in the driver based on vertical blanking and frame width
+ */
mt9m001->autoexposure = 1;
ret = mt9m001_video_probe(icd, client);
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index 90da699601e..d35f536f9fc 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -123,23 +123,34 @@
#define MT9M111_MAX_HEIGHT 1024
#define MT9M111_MAX_WIDTH 1280
-#define COL_FMT(_name, _depth, _fourcc, _colorspace) \
- { .name = _name, .depth = _depth, .fourcc = _fourcc, \
- .colorspace = _colorspace }
-#define RGB_FMT(_name, _depth, _fourcc) \
- COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_SRGB)
-#define JPG_FMT(_name, _depth, _fourcc) \
- COL_FMT(_name, _depth, _fourcc, V4L2_COLORSPACE_JPEG)
-
-static const struct soc_camera_data_format mt9m111_colour_formats[] = {
- JPG_FMT("CbYCrY 16 bit", 16, V4L2_PIX_FMT_UYVY),
- JPG_FMT("CrYCbY 16 bit", 16, V4L2_PIX_FMT_VYUY),
- JPG_FMT("YCbYCr 16 bit", 16, V4L2_PIX_FMT_YUYV),
- JPG_FMT("YCrYCb 16 bit", 16, V4L2_PIX_FMT_YVYU),
- RGB_FMT("RGB 565", 16, V4L2_PIX_FMT_RGB565),
- RGB_FMT("RGB 555", 16, V4L2_PIX_FMT_RGB555),
- RGB_FMT("Bayer (sRGB) 10 bit", 10, V4L2_PIX_FMT_SBGGR16),
- RGB_FMT("Bayer (sRGB) 8 bit", 8, V4L2_PIX_FMT_SBGGR8),
+/* MT9M111 has only one fixed colorspace per pixelcode */
+struct mt9m111_datafmt {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+/* Find a data format by a pixel code in an array */
+static const struct mt9m111_datafmt *mt9m111_find_datafmt(
+ enum v4l2_mbus_pixelcode code, const struct mt9m111_datafmt *fmt,
+ int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ if (fmt[i].code == code)
+ return fmt + i;
+
+ return NULL;
+}
+
+static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
+ {V4L2_MBUS_FMT_YUYV8_2X8_LE, V4L2_COLORSPACE_JPEG},
+ {V4L2_MBUS_FMT_YVYU8_2X8_LE, V4L2_COLORSPACE_JPEG},
+ {V4L2_MBUS_FMT_YUYV8_2X8_BE, V4L2_COLORSPACE_JPEG},
+ {V4L2_MBUS_FMT_YVYU8_2X8_BE, V4L2_COLORSPACE_JPEG},
+ {V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
};
enum mt9m111_context {
@@ -152,7 +163,7 @@ struct mt9m111 {
int model; /* V4L2_IDENT_MT9M11x* codes from v4l2-chip-ident.h */
enum mt9m111_context context;
struct v4l2_rect rect;
- u32 pixfmt;
+ const struct mt9m111_datafmt *fmt;
unsigned int gain;
unsigned char autoexposure;
unsigned char datawidth;
@@ -258,8 +269,8 @@ static int mt9m111_setup_rect(struct i2c_client *client,
int width = rect->width;
int height = rect->height;
- if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 ||
- mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16)
+ if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
+ mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE)
is_raw_format = 1;
else
is_raw_format = 0;
@@ -307,7 +318,8 @@ static int mt9m111_setup_pixfmt(struct i2c_client *client, u16 outfmt)
static int mt9m111_setfmt_bayer8(struct i2c_client *client)
{
- return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER);
+ return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER |
+ MT9M111_OUTFMT_RGB);
}
static int mt9m111_setfmt_bayer10(struct i2c_client *client)
@@ -401,8 +413,8 @@ static int mt9m111_make_rect(struct i2c_client *client,
{
struct mt9m111 *mt9m111 = to_mt9m111(client);
- if (mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR8 ||
- mt9m111->pixfmt == V4L2_PIX_FMT_SBGGR16) {
+ if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
+ mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
/* Bayer format - even size lengths */
rect->width = ALIGN(rect->width, 2);
rect->height = ALIGN(rect->height, 2);
@@ -460,120 +472,139 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-static int mt9m111_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9m111_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct mt9m111 *mt9m111 = to_mt9m111(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
- pix->width = mt9m111->rect.width;
- pix->height = mt9m111->rect.height;
- pix->pixelformat = mt9m111->pixfmt;
- pix->field = V4L2_FIELD_NONE;
- pix->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->width = mt9m111->rect.width;
+ mf->height = mt9m111->rect.height;
+ mf->code = mt9m111->fmt->code;
+ mf->field = V4L2_FIELD_NONE;
return 0;
}
-static int mt9m111_set_pixfmt(struct i2c_client *client, u32 pixfmt)
+static int mt9m111_set_pixfmt(struct i2c_client *client,
+ enum v4l2_mbus_pixelcode code)
{
struct mt9m111 *mt9m111 = to_mt9m111(client);
int ret;
- switch (pixfmt) {
- case V4L2_PIX_FMT_SBGGR8:
+ switch (code) {
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
ret = mt9m111_setfmt_bayer8(client);
break;
- case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
ret = mt9m111_setfmt_bayer10(client);
break;
- case V4L2_PIX_FMT_RGB555:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
ret = mt9m111_setfmt_rgb555(client);
break;
- case V4L2_PIX_FMT_RGB565:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
ret = mt9m111_setfmt_rgb565(client);
break;
- case V4L2_PIX_FMT_UYVY:
+ case V4L2_MBUS_FMT_YUYV8_2X8_BE:
mt9m111->swap_yuv_y_chromas = 0;
mt9m111->swap_yuv_cb_cr = 0;
ret = mt9m111_setfmt_yuv(client);
break;
- case V4L2_PIX_FMT_VYUY:
+ case V4L2_MBUS_FMT_YVYU8_2X8_BE:
mt9m111->swap_yuv_y_chromas = 0;
mt9m111->swap_yuv_cb_cr = 1;
ret = mt9m111_setfmt_yuv(client);
break;
- case V4L2_PIX_FMT_YUYV:
+ case V4L2_MBUS_FMT_YUYV8_2X8_LE:
mt9m111->swap_yuv_y_chromas = 1;
mt9m111->swap_yuv_cb_cr = 0;
ret = mt9m111_setfmt_yuv(client);
break;
- case V4L2_PIX_FMT_YVYU:
+ case V4L2_MBUS_FMT_YVYU8_2X8_LE:
mt9m111->swap_yuv_y_chromas = 1;
mt9m111->swap_yuv_cb_cr = 1;
ret = mt9m111_setfmt_yuv(client);
break;
default:
dev_err(&client->dev, "Pixel format not handled : %x\n",
- pixfmt);
+ code);
ret = -EINVAL;
}
- if (!ret)
- mt9m111->pixfmt = pixfmt;
-
return ret;
}
-static int mt9m111_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9m111_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
+ const struct mt9m111_datafmt *fmt;
struct mt9m111 *mt9m111 = to_mt9m111(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_rect rect = {
.left = mt9m111->rect.left,
.top = mt9m111->rect.top,
- .width = pix->width,
- .height = pix->height,
+ .width = mf->width,
+ .height = mf->height,
};
int ret;
+ fmt = mt9m111_find_datafmt(mf->code, mt9m111_colour_fmts,
+ ARRAY_SIZE(mt9m111_colour_fmts));
+ if (!fmt)
+ return -EINVAL;
+
dev_dbg(&client->dev,
- "%s fmt=%x left=%d, top=%d, width=%d, height=%d\n", __func__,
- pix->pixelformat, rect.left, rect.top, rect.width, rect.height);
+ "%s code=%x left=%d, top=%d, width=%d, height=%d\n", __func__,
+ mf->code, rect.left, rect.top, rect.width, rect.height);
ret = mt9m111_make_rect(client, &rect);
if (!ret)
- ret = mt9m111_set_pixfmt(client, pix->pixelformat);
- if (!ret)
- mt9m111->rect = rect;
+ ret = mt9m111_set_pixfmt(client, mf->code);
+ if (!ret) {
+ mt9m111->rect = rect;
+ mt9m111->fmt = fmt;
+ mf->colorspace = fmt->colorspace;
+ }
+
return ret;
}
-static int mt9m111_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9m111_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
- struct v4l2_pix_format *pix = &f->fmt.pix;
- bool bayer = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 ||
- pix->pixelformat == V4L2_PIX_FMT_SBGGR16;
+ struct i2c_client *client = sd->priv;
+ struct mt9m111 *mt9m111 = to_mt9m111(client);
+ const struct mt9m111_datafmt *fmt;
+ bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
+ mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE;
+
+ fmt = mt9m111_find_datafmt(mf->code, mt9m111_colour_fmts,
+ ARRAY_SIZE(mt9m111_colour_fmts));
+ if (!fmt) {
+ fmt = mt9m111->fmt;
+ mf->code = fmt->code;
+ }
/*
* With Bayer format enforce even side lengths, but let the user play
* with the starting pixel
*/
- if (pix->height > MT9M111_MAX_HEIGHT)
- pix->height = MT9M111_MAX_HEIGHT;
- else if (pix->height < 2)
- pix->height = 2;
+ if (mf->height > MT9M111_MAX_HEIGHT)
+ mf->height = MT9M111_MAX_HEIGHT;
+ else if (mf->height < 2)
+ mf->height = 2;
else if (bayer)
- pix->height = ALIGN(pix->height, 2);
+ mf->height = ALIGN(mf->height, 2);
- if (pix->width > MT9M111_MAX_WIDTH)
- pix->width = MT9M111_MAX_WIDTH;
- else if (pix->width < 2)
- pix->width = 2;
+ if (mf->width > MT9M111_MAX_WIDTH)
+ mf->width = MT9M111_MAX_WIDTH;
+ else if (mf->width < 2)
+ mf->width = 2;
else if (bayer)
- pix->width = ALIGN(pix->width, 2);
+ mf->width = ALIGN(mf->width, 2);
+
+ mf->colorspace = fmt->colorspace;
return 0;
}
@@ -863,7 +894,7 @@ static int mt9m111_restore_state(struct i2c_client *client)
struct mt9m111 *mt9m111 = to_mt9m111(client);
mt9m111_set_context(client, mt9m111->context);
- mt9m111_set_pixfmt(client, mt9m111->pixfmt);
+ mt9m111_set_pixfmt(client, mt9m111->fmt->code);
mt9m111_setup_rect(client, &mt9m111->rect);
mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
@@ -952,9 +983,6 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
goto ei2c;
}
- icd->formats = mt9m111_colour_formats;
- icd->num_formats = ARRAY_SIZE(mt9m111_colour_formats);
-
dev_info(&client->dev, "Detected a MT9M11x chip ID %x\n", data);
ei2c:
@@ -971,13 +999,24 @@ static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
#endif
};
+static int mt9m111_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if ((unsigned int)index >= ARRAY_SIZE(mt9m111_colour_fmts))
+ return -EINVAL;
+
+ *code = mt9m111_colour_fmts[index].code;
+ return 0;
+}
+
static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
- .s_fmt = mt9m111_s_fmt,
- .g_fmt = mt9m111_g_fmt,
- .try_fmt = mt9m111_try_fmt,
+ .s_mbus_fmt = mt9m111_s_fmt,
+ .g_mbus_fmt = mt9m111_g_fmt,
+ .try_mbus_fmt = mt9m111_try_fmt,
.s_crop = mt9m111_s_crop,
.g_crop = mt9m111_g_crop,
.cropcap = mt9m111_cropcap,
+ .enum_mbus_fmt = mt9m111_enum_fmt,
};
static struct v4l2_subdev_ops mt9m111_subdev_ops = {
@@ -1019,12 +1058,12 @@ static int mt9m111_probe(struct i2c_client *client,
/* Second stage probe - when a capture adapter is there */
icd->ops = &mt9m111_ops;
- icd->y_skip_top = 0;
mt9m111->rect.left = MT9M111_MIN_DARK_COLS;
mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
mt9m111->rect.width = MT9M111_MAX_WIDTH;
mt9m111->rect.height = MT9M111_MAX_HEIGHT;
+ mt9m111->fmt = &mt9m111_colour_fmts[0];
ret = mt9m111_video_probe(icd, client);
if (ret) {
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 6966f644977..a9061bff79b 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -17,9 +17,11 @@
#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
-/* mt9t031 i2c address 0x5d
+/*
+ * mt9t031 i2c address 0x5d
* The platform has to define i2c_board_info and link to it from
- * struct soc_camera_link */
+ * struct soc_camera_link
+ */
/* mt9t031 selected register addresses */
#define MT9T031_CHIP_VERSION 0x00
@@ -58,15 +60,6 @@
SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH | \
SOCAM_MASTER | SOCAM_DATAWIDTH_10)
-static const struct soc_camera_data_format mt9t031_colour_formats[] = {
- {
- .name = "Bayer (sRGB) 10 bit",
- .depth = 10,
- .fourcc = V4L2_PIX_FMT_SGRBG10,
- .colorspace = V4L2_COLORSPACE_SRGB,
- }
-};
-
struct mt9t031 {
struct v4l2_subdev subdev;
struct v4l2_rect rect; /* Sensor window */
@@ -74,6 +67,7 @@ struct mt9t031 {
u16 xskip;
u16 yskip;
unsigned int gain;
+ unsigned short y_skip_top; /* Lines to skip at the top */
unsigned int exposure;
unsigned char autoexposure;
};
@@ -207,6 +201,71 @@ static unsigned long mt9t031_query_bus_param(struct soc_camera_device *icd)
return soc_camera_apply_sensor_flags(icl, MT9T031_BUS_PARAM);
}
+enum {
+ MT9T031_CTRL_VFLIP,
+ MT9T031_CTRL_HFLIP,
+ MT9T031_CTRL_GAIN,
+ MT9T031_CTRL_EXPOSURE,
+ MT9T031_CTRL_EXPOSURE_AUTO,
+};
+
+static const struct v4l2_queryctrl mt9t031_controls[] = {
+ [MT9T031_CTRL_VFLIP] = {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip Vertically",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ [MT9T031_CTRL_HFLIP] = {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Flip Horizontally",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ [MT9T031_CTRL_GAIN] = {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 64,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ [MT9T031_CTRL_EXPOSURE] = {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 1,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 255,
+ .flags = V4L2_CTRL_FLAG_SLIDER,
+ },
+ [MT9T031_CTRL_EXPOSURE_AUTO] = {
+ .id = V4L2_CID_EXPOSURE_AUTO,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Automatic Exposure",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ }
+};
+
+static struct soc_camera_ops mt9t031_ops = {
+ .set_bus_param = mt9t031_set_bus_param,
+ .query_bus_param = mt9t031_query_bus_param,
+ .controls = mt9t031_controls,
+ .num_controls = ARRAY_SIZE(mt9t031_controls),
+};
+
/* target must be _even_ */
static u16 mt9t031_skip(s32 *source, s32 target, s32 max)
{
@@ -226,10 +285,9 @@ static u16 mt9t031_skip(s32 *source, s32 target, s32 max)
}
/* rect is the sensor rectangle, the caller guarantees parameter validity */
-static int mt9t031_set_params(struct soc_camera_device *icd,
+static int mt9t031_set_params(struct i2c_client *client,
struct v4l2_rect *rect, u16 xskip, u16 yskip)
{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
struct mt9t031 *mt9t031 = to_mt9t031(client);
int ret;
u16 xbin, ybin;
@@ -291,8 +349,10 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
dev_dbg(&client->dev, "new physical left %u, top %u\n",
rect->left, rect->top);
- /* The caller provides a supported format, as guaranteed by
- * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() */
+ /*
+ * The caller provides a supported format, as guaranteed by
+ * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap()
+ */
if (ret >= 0)
ret = reg_write(client, MT9T031_COLUMN_START, rect->left);
if (ret >= 0)
@@ -301,15 +361,14 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
ret = reg_write(client, MT9T031_WINDOW_WIDTH, rect->width - 1);
if (ret >= 0)
ret = reg_write(client, MT9T031_WINDOW_HEIGHT,
- rect->height + icd->y_skip_top - 1);
+ rect->height + mt9t031->y_skip_top - 1);
if (ret >= 0 && mt9t031->autoexposure) {
- unsigned int total_h = rect->height + icd->y_skip_top + vblank;
+ unsigned int total_h = rect->height + mt9t031->y_skip_top + vblank;
ret = set_shutter(client, total_h);
if (ret >= 0) {
const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
const struct v4l2_queryctrl *qctrl =
- soc_camera_find_qctrl(icd->ops,
- V4L2_CID_EXPOSURE);
+ &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
mt9t031->exposure = (shutter_max / 2 + (total_h - 1) *
(qctrl->maximum - qctrl->minimum)) /
shutter_max + qctrl->minimum;
@@ -334,7 +393,6 @@ static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
struct v4l2_rect rect = a->c;
struct i2c_client *client = sd->priv;
struct mt9t031 *mt9t031 = to_mt9t031(client);
- struct soc_camera_device *icd = client->dev.platform_data;
rect.width = ALIGN(rect.width, 2);
rect.height = ALIGN(rect.height, 2);
@@ -345,7 +403,7 @@ static int mt9t031_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
soc_camera_limit_side(&rect.top, &rect.height,
MT9T031_ROW_SKIP, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT);
- return mt9t031_set_params(icd, &rect, mt9t031->xskip, mt9t031->yskip);
+ return mt9t031_set_params(client, &rect, mt9t031->xskip, mt9t031->yskip);
}
static int mt9t031_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
@@ -373,27 +431,26 @@ static int mt9t031_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-static int mt9t031_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9t031_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct mt9t031 *mt9t031 = to_mt9t031(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
- pix->width = mt9t031->rect.width / mt9t031->xskip;
- pix->height = mt9t031->rect.height / mt9t031->yskip;
- pix->pixelformat = V4L2_PIX_FMT_SGRBG10;
- pix->field = V4L2_FIELD_NONE;
- pix->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->width = mt9t031->rect.width / mt9t031->xskip;
+ mf->height = mt9t031->rect.height / mt9t031->yskip;
+ mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->field = V4L2_FIELD_NONE;
return 0;
}
-static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9t031_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct mt9t031 *mt9t031 = to_mt9t031(client);
- struct soc_camera_device *icd = client->dev.platform_data;
- struct v4l2_pix_format *pix = &f->fmt.pix;
u16 xskip, yskip;
struct v4l2_rect rect = mt9t031->rect;
@@ -401,24 +458,29 @@ static int mt9t031_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
* try_fmt has put width and height within limits.
* S_FMT: use binning and skipping for scaling
*/
- xskip = mt9t031_skip(&rect.width, pix->width, MT9T031_MAX_WIDTH);
- yskip = mt9t031_skip(&rect.height, pix->height, MT9T031_MAX_HEIGHT);
+ xskip = mt9t031_skip(&rect.width, mf->width, MT9T031_MAX_WIDTH);
+ yskip = mt9t031_skip(&rect.height, mf->height, MT9T031_MAX_HEIGHT);
+
+ mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
/* mt9t031_set_params() doesn't change width and height */
- return mt9t031_set_params(icd, &rect, xskip, yskip);
+ return mt9t031_set_params(client, &rect, xskip, yskip);
}
/*
* If a user window larger than sensor window is requested, we'll increase the
* sensor window.
*/
-static int mt9t031_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9t031_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
- struct v4l2_pix_format *pix = &f->fmt.pix;
-
v4l_bound_align_image(
- &pix->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
- &pix->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
+ &mf->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
+ &mf->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
+
+ mf->code = V4L2_MBUS_FMT_SBGGR10_1X10;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
}
@@ -479,59 +541,6 @@ static int mt9t031_s_register(struct v4l2_subdev *sd,
}
#endif
-static const struct v4l2_queryctrl mt9t031_controls[] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Vertically",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip Horizontally",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- }, {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 64,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 1,
- .maximum = 255,
- .step = 1,
- .default_value = 255,
- .flags = V4L2_CTRL_FLAG_SLIDER,
- }, {
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- }
-};
-
-static struct soc_camera_ops mt9t031_ops = {
- .set_bus_param = mt9t031_set_bus_param,
- .query_bus_param = mt9t031_query_bus_param,
- .controls = mt9t031_controls,
- .num_controls = ARRAY_SIZE(mt9t031_controls),
-};
-
static int mt9t031_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct i2c_client *client = sd->priv;
@@ -568,15 +577,9 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct i2c_client *client = sd->priv;
struct mt9t031 *mt9t031 = to_mt9t031(client);
- struct soc_camera_device *icd = client->dev.platform_data;
const struct v4l2_queryctrl *qctrl;
int data;
- qctrl = soc_camera_find_qctrl(&mt9t031_ops, ctrl->id);
-
- if (!qctrl)
- return -EINVAL;
-
switch (ctrl->id) {
case V4L2_CID_VFLIP:
if (ctrl->value)
@@ -595,6 +598,7 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return -EIO;
break;
case V4L2_CID_GAIN:
+ qctrl = &mt9t031_controls[MT9T031_CTRL_GAIN];
if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
return -EINVAL;
/* See Datasheet Table 7, Gain settings. */
@@ -634,6 +638,7 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
mt9t031->gain = ctrl->value;
break;
case V4L2_CID_EXPOSURE:
+ qctrl = &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
/* mt9t031 has maximum == default */
if (ctrl->value > qctrl->maximum || ctrl->value < qctrl->minimum)
return -EINVAL;
@@ -657,11 +662,11 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
const u16 vblank = MT9T031_VERTICAL_BLANK;
const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
unsigned int total_h = mt9t031->rect.height +
- icd->y_skip_top + vblank;
+ mt9t031->y_skip_top + vblank;
if (set_shutter(client, total_h) < 0)
return -EIO;
- qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE);
+ qctrl = &mt9t031_controls[MT9T031_CTRL_EXPOSURE];
mt9t031->exposure = (shutter_max / 2 + (total_h - 1) *
(qctrl->maximum - qctrl->minimum)) /
shutter_max + qctrl->minimum;
@@ -669,15 +674,18 @@ static int mt9t031_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
} else
mt9t031->autoexposure = 0;
break;
+ default:
+ return -EINVAL;
}
return 0;
}
-/* Interface active, can use i2c. If it fails, it can indeed mean, that
- * this wasn't our capture interface, so, we wait for the right one */
+/*
+ * Interface active, can use i2c. If it fails, it can indeed mean, that
+ * this wasn't our capture interface, so, we wait for the right one
+ */
static int mt9t031_video_probe(struct i2c_client *client)
{
- struct soc_camera_device *icd = client->dev.platform_data;
struct mt9t031 *mt9t031 = to_mt9t031(client);
s32 data;
int ret;
@@ -692,8 +700,6 @@ static int mt9t031_video_probe(struct i2c_client *client)
switch (data) {
case 0x1621:
mt9t031->model = V4L2_IDENT_MT9T031;
- icd->formats = mt9t031_colour_formats;
- icd->num_formats = ARRAY_SIZE(mt9t031_colour_formats);
break;
default:
dev_err(&client->dev,
@@ -714,6 +720,16 @@ static int mt9t031_video_probe(struct i2c_client *client)
return ret;
}
+static int mt9t031_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
+{
+ struct i2c_client *client = sd->priv;
+ struct mt9t031 *mt9t031 = to_mt9t031(client);
+
+ *lines = mt9t031->y_skip_top;
+
+ return 0;
+}
+
static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
.g_ctrl = mt9t031_g_ctrl,
.s_ctrl = mt9t031_s_ctrl,
@@ -724,19 +740,35 @@ static struct v4l2_subdev_core_ops mt9t031_subdev_core_ops = {
#endif
};
+static int mt9t031_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index)
+ return -EINVAL;
+
+ *code = V4L2_MBUS_FMT_SBGGR10_1X10;
+ return 0;
+}
+
static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
.s_stream = mt9t031_s_stream,
- .s_fmt = mt9t031_s_fmt,
- .g_fmt = mt9t031_g_fmt,
- .try_fmt = mt9t031_try_fmt,
+ .s_mbus_fmt = mt9t031_s_fmt,
+ .g_mbus_fmt = mt9t031_g_fmt,
+ .try_mbus_fmt = mt9t031_try_fmt,
.s_crop = mt9t031_s_crop,
.g_crop = mt9t031_g_crop,
.cropcap = mt9t031_cropcap,
+ .enum_mbus_fmt = mt9t031_enum_fmt,
+};
+
+static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
+ .g_skip_top_lines = mt9t031_g_skip_top_lines,
};
static struct v4l2_subdev_ops mt9t031_subdev_ops = {
.core = &mt9t031_subdev_core_ops,
.video = &mt9t031_subdev_video_ops,
+ .sensor = &mt9t031_subdev_sensor_ops,
};
static int mt9t031_probe(struct i2c_client *client,
@@ -745,18 +777,16 @@ static int mt9t031_probe(struct i2c_client *client,
struct mt9t031 *mt9t031;
struct soc_camera_device *icd = client->dev.platform_data;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct soc_camera_link *icl;
int ret;
- if (!icd) {
- dev_err(&client->dev, "MT9T031: missing soc-camera data!\n");
- return -EINVAL;
- }
+ if (icd) {
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+ if (!icl) {
+ dev_err(&client->dev, "MT9T031 driver needs platform data\n");
+ return -EINVAL;
+ }
- icl = to_soc_camera_link(icd);
- if (!icl) {
- dev_err(&client->dev, "MT9T031 driver needs platform data\n");
- return -EINVAL;
+ icd->ops = &mt9t031_ops;
}
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -771,17 +801,16 @@ static int mt9t031_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&mt9t031->subdev, client, &mt9t031_subdev_ops);
- /* Second stage probe - when a capture adapter is there */
- icd->ops = &mt9t031_ops;
- icd->y_skip_top = 0;
-
+ mt9t031->y_skip_top = 0;
mt9t031->rect.left = MT9T031_COLUMN_SKIP;
mt9t031->rect.top = MT9T031_ROW_SKIP;
mt9t031->rect.width = MT9T031_MAX_WIDTH;
mt9t031->rect.height = MT9T031_MAX_HEIGHT;
- /* Simulated autoexposure. If enabled, we calculate shutter width
- * ourselves in the driver based on vertical blanking and frame width */
+ /*
+ * Simulated autoexposure. If enabled, we calculate shutter width
+ * ourselves in the driver based on vertical blanking and frame width
+ */
mt9t031->autoexposure = 1;
mt9t031->xskip = 1;
@@ -794,7 +823,8 @@ static int mt9t031_probe(struct i2c_client *client,
mt9t031_disable(client);
if (ret) {
- icd->ops = NULL;
+ if (icd)
+ icd->ops = NULL;
i2c_set_clientdata(client, NULL);
kfree(mt9t031);
}
@@ -807,7 +837,8 @@ static int mt9t031_remove(struct i2c_client *client)
struct mt9t031 *mt9t031 = to_mt9t031(client);
struct soc_camera_device *icd = client->dev.platform_data;
- icd->ops = NULL;
+ if (icd)
+ icd->ops = NULL;
i2c_set_clientdata(client, NULL);
client->driver = NULL;
kfree(mt9t031);
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
new file mode 100644
index 00000000000..fc4dd604572
--- /dev/null
+++ b/drivers/media/video/mt9t112.c
@@ -0,0 +1,1177 @@
+/*
+ * mt9t112 Camera Driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on ov772x driver, mt9m111 driver,
+ *
+ * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ * Copyright (C) 2008, Robert Jarzmik <robert.jarzmik@free.fr>
+ * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
+ * Copyright (C) 2008 Magnus Damm
+ * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/mt9t112.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-common.h>
+
+/* you can check PLL/clock info */
+/* #define EXT_CLOCK 24000000 */
+
+/************************************************************************
+
+
+ macro
+
+
+************************************************************************/
+/*
+ * frame size
+ */
+#define MAX_WIDTH 2048
+#define MAX_HEIGHT 1536
+
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+/*
+ * macro of read/write
+ */
+#define ECHECKER(ret, x) \
+ do { \
+ (ret) = (x); \
+ if ((ret) < 0) \
+ return (ret); \
+ } while (0)
+
+#define mt9t112_reg_write(ret, client, a, b) \
+ ECHECKER(ret, __mt9t112_reg_write(client, a, b))
+#define mt9t112_mcu_write(ret, client, a, b) \
+ ECHECKER(ret, __mt9t112_mcu_write(client, a, b))
+
+#define mt9t112_reg_mask_set(ret, client, a, b, c) \
+ ECHECKER(ret, __mt9t112_reg_mask_set(client, a, b, c))
+#define mt9t112_mcu_mask_set(ret, client, a, b, c) \
+ ECHECKER(ret, __mt9t112_mcu_mask_set(client, a, b, c))
+
+#define mt9t112_reg_read(ret, client, a) \
+ ECHECKER(ret, __mt9t112_reg_read(client, a))
+
+/*
+ * Logical address
+ */
+#define _VAR(id, offset, base) (base | (id & 0x1f) << 10 | (offset & 0x3ff))
+#define VAR(id, offset) _VAR(id, offset, 0x0000)
+#define VAR8(id, offset) _VAR(id, offset, 0x8000)
+
+/************************************************************************
+
+
+ struct
+
+
+************************************************************************/
+struct mt9t112_frame_size {
+ u16 width;
+ u16 height;
+};
+
+struct mt9t112_format {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+ u16 fmt;
+ u16 order;
+};
+
+struct mt9t112_priv {
+ struct v4l2_subdev subdev;
+ struct mt9t112_camera_info *info;
+ struct i2c_client *client;
+ struct soc_camera_device icd;
+ struct mt9t112_frame_size frame;
+ const struct mt9t112_format *format;
+ int model;
+ u32 flags;
+/* for flags */
+#define INIT_DONE (1<<0)
+};
+
+/************************************************************************
+
+
+ supported format
+
+
+************************************************************************/
+
+static const struct mt9t112_format mt9t112_cfmts[] = {
+ {
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .fmt = 1,
+ .order = 0,
+ }, {
+ .code = V4L2_MBUS_FMT_YVYU8_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .fmt = 1,
+ .order = 1,
+ }, {
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .fmt = 1,
+ .order = 2,
+ }, {
+ .code = V4L2_MBUS_FMT_YVYU8_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .fmt = 1,
+ .order = 3,
+ }, {
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt = 8,
+ .order = 2,
+ }, {
+ .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .fmt = 4,
+ .order = 2,
+ },
+};
+
+/************************************************************************
+
+
+ general function
+
+
+************************************************************************/
+static struct mt9t112_priv *to_mt9t112(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client),
+ struct mt9t112_priv,
+ subdev);
+}
+
+static int __mt9t112_reg_read(const struct i2c_client *client, u16 command)
+{
+ struct i2c_msg msg[2];
+ u8 buf[2];
+ int ret;
+
+ command = swab16(command);
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = (u8 *)&command;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = buf;
+
+ /*
+ * if return value of this function is < 0,
+ * it mean error.
+ * else, under 16bit is valid data.
+ */
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0)
+ return ret;
+
+ memcpy(&ret, buf, 2);
+ return swab16(ret);
+}
+
+static int __mt9t112_reg_write(const struct i2c_client *client,
+ u16 command, u16 data)
+{
+ struct i2c_msg msg;
+ u8 buf[4];
+ int ret;
+
+ command = swab16(command);
+ data = swab16(data);
+
+ memcpy(buf + 0, &command, 2);
+ memcpy(buf + 2, &data, 2);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = 4;
+ msg.buf = buf;
+
+ /*
+ * i2c_transfer return message length,
+ * but this function should return 0 if correct case
+ */
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret >= 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int __mt9t112_reg_mask_set(const struct i2c_client *client,
+ u16 command,
+ u16 mask,
+ u16 set)
+{
+ int val = __mt9t112_reg_read(client, command);
+ if (val < 0)
+ return val;
+
+ val &= ~mask;
+ val |= set & mask;
+
+ return __mt9t112_reg_write(client, command, val);
+}
+
+/* mcu access */
+static int __mt9t112_mcu_read(const struct i2c_client *client, u16 command)
+{
+ int ret;
+
+ ret = __mt9t112_reg_write(client, 0x098E, command);
+ if (ret < 0)
+ return ret;
+
+ return __mt9t112_reg_read(client, 0x0990);
+}
+
+static int __mt9t112_mcu_write(const struct i2c_client *client,
+ u16 command, u16 data)
+{
+ int ret;
+
+ ret = __mt9t112_reg_write(client, 0x098E, command);
+ if (ret < 0)
+ return ret;
+
+ return __mt9t112_reg_write(client, 0x0990, data);
+}
+
+static int __mt9t112_mcu_mask_set(const struct i2c_client *client,
+ u16 command,
+ u16 mask,
+ u16 set)
+{
+ int val = __mt9t112_mcu_read(client, command);
+ if (val < 0)
+ return val;
+
+ val &= ~mask;
+ val |= set & mask;
+
+ return __mt9t112_mcu_write(client, command, val);
+}
+
+static int mt9t112_reset(const struct i2c_client *client)
+{
+ int ret;
+
+ mt9t112_reg_mask_set(ret, client, 0x001a, 0x0001, 0x0001);
+ msleep(1);
+ mt9t112_reg_mask_set(ret, client, 0x001a, 0x0001, 0x0000);
+
+ return ret;
+}
+
+#ifndef EXT_CLOCK
+#define CLOCK_INFO(a, b)
+#else
+#define CLOCK_INFO(a, b) mt9t112_clock_info(a, b)
+static int mt9t112_clock_info(const struct i2c_client *client, u32 ext)
+{
+ int m, n, p1, p2, p3, p4, p5, p6, p7;
+ u32 vco, clk;
+ char *enable;
+
+ ext /= 1000; /* kbyte order */
+
+ mt9t112_reg_read(n, client, 0x0012);
+ p1 = n & 0x000f;
+ n = n >> 4;
+ p2 = n & 0x000f;
+ n = n >> 4;
+ p3 = n & 0x000f;
+
+ mt9t112_reg_read(n, client, 0x002a);
+ p4 = n & 0x000f;
+ n = n >> 4;
+ p5 = n & 0x000f;
+ n = n >> 4;
+ p6 = n & 0x000f;
+
+ mt9t112_reg_read(n, client, 0x002c);
+ p7 = n & 0x000f;
+
+ mt9t112_reg_read(n, client, 0x0010);
+ m = n & 0x00ff;
+ n = (n >> 8) & 0x003f;
+
+ enable = ((6000 > ext) || (54000 < ext)) ? "X" : "";
+ dev_info(&client->dev, "EXTCLK : %10u K %s\n", ext, enable);
+
+ vco = 2 * m * ext / (n+1);
+ enable = ((384000 > vco) || (768000 < vco)) ? "X" : "";
+ dev_info(&client->dev, "VCO : %10u K %s\n", vco, enable);
+
+ clk = vco / (p1+1) / (p2+1);
+ enable = (96000 < clk) ? "X" : "";
+ dev_info(&client->dev, "PIXCLK : %10u K %s\n", clk, enable);
+
+ clk = vco / (p3+1);
+ enable = (768000 < clk) ? "X" : "";
+ dev_info(&client->dev, "MIPICLK : %10u K %s\n", clk, enable);
+
+ clk = vco / (p6+1);
+ enable = (96000 < clk) ? "X" : "";
+ dev_info(&client->dev, "MCU CLK : %10u K %s\n", clk, enable);
+
+ clk = vco / (p5+1);
+ enable = (54000 < clk) ? "X" : "";
+ dev_info(&client->dev, "SOC CLK : %10u K %s\n", clk, enable);
+
+ clk = vco / (p4+1);
+ enable = (70000 < clk) ? "X" : "";
+ dev_info(&client->dev, "Sensor CLK : %10u K %s\n", clk, enable);
+
+ clk = vco / (p7+1);
+ dev_info(&client->dev, "External sensor : %10u K\n", clk);
+
+ clk = ext / (n+1);
+ enable = ((2000 > clk) || (24000 < clk)) ? "X" : "";
+ dev_info(&client->dev, "PFD : %10u K %s\n", clk, enable);
+
+ return 0;
+}
+#endif
+
+static void mt9t112_frame_check(u32 *width, u32 *height)
+{
+ if (*width > MAX_WIDTH)
+ *width = MAX_WIDTH;
+
+ if (*height > MAX_HEIGHT)
+ *height = MAX_HEIGHT;
+}
+
+static int mt9t112_set_a_frame_size(const struct i2c_client *client,
+ u16 width,
+ u16 height)
+{
+ int ret;
+ u16 wstart = (MAX_WIDTH - width) / 2;
+ u16 hstart = (MAX_HEIGHT - height) / 2;
+
+ /* (Context A) Image Width/Height */
+ mt9t112_mcu_write(ret, client, VAR(26, 0), width);
+ mt9t112_mcu_write(ret, client, VAR(26, 2), height);
+
+ /* (Context A) Output Width/Height */
+ mt9t112_mcu_write(ret, client, VAR(18, 43), 8 + width);
+ mt9t112_mcu_write(ret, client, VAR(18, 45), 8 + height);
+
+ /* (Context A) Start Row/Column */
+ mt9t112_mcu_write(ret, client, VAR(18, 2), 4 + hstart);
+ mt9t112_mcu_write(ret, client, VAR(18, 4), 4 + wstart);
+
+ /* (Context A) End Row/Column */
+ mt9t112_mcu_write(ret, client, VAR(18, 6), 11 + height + hstart);
+ mt9t112_mcu_write(ret, client, VAR(18, 8), 11 + width + wstart);
+
+ mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
+
+ return ret;
+}
+
+static int mt9t112_set_pll_dividers(const struct i2c_client *client,
+ u8 m, u8 n,
+ u8 p1, u8 p2, u8 p3,
+ u8 p4, u8 p5, u8 p6,
+ u8 p7)
+{
+ int ret;
+ u16 val;
+
+ /* N/M */
+ val = (n << 8) |
+ (m << 0);
+ mt9t112_reg_mask_set(ret, client, 0x0010, 0x3fff, val);
+
+ /* P1/P2/P3 */
+ val = ((p3 & 0x0F) << 8) |
+ ((p2 & 0x0F) << 4) |
+ ((p1 & 0x0F) << 0);
+ mt9t112_reg_mask_set(ret, client, 0x0012, 0x0fff, val);
+
+ /* P4/P5/P6 */
+ val = (0x7 << 12) |
+ ((p6 & 0x0F) << 8) |
+ ((p5 & 0x0F) << 4) |
+ ((p4 & 0x0F) << 0);
+ mt9t112_reg_mask_set(ret, client, 0x002A, 0x7fff, val);
+
+ /* P7 */
+ val = (0x1 << 12) |
+ ((p7 & 0x0F) << 0);
+ mt9t112_reg_mask_set(ret, client, 0x002C, 0x100f, val);
+
+ return ret;
+}
+
+static int mt9t112_init_pll(const struct i2c_client *client)
+{
+ struct mt9t112_priv *priv = to_mt9t112(client);
+ int data, i, ret;
+
+ mt9t112_reg_mask_set(ret, client, 0x0014, 0x003, 0x0001);
+
+ /* PLL control: BYPASS PLL = 8517 */
+ mt9t112_reg_write(ret, client, 0x0014, 0x2145);
+
+ /* Replace these registers when new timing parameters are generated */
+ mt9t112_set_pll_dividers(client,
+ priv->info->divider.m,
+ priv->info->divider.n,
+ priv->info->divider.p1,
+ priv->info->divider.p2,
+ priv->info->divider.p3,
+ priv->info->divider.p4,
+ priv->info->divider.p5,
+ priv->info->divider.p6,
+ priv->info->divider.p7);
+
+ /*
+ * TEST_BYPASS on
+ * PLL_ENABLE on
+ * SEL_LOCK_DET on
+ * TEST_BYPASS off
+ */
+ mt9t112_reg_write(ret, client, 0x0014, 0x2525);
+ mt9t112_reg_write(ret, client, 0x0014, 0x2527);
+ mt9t112_reg_write(ret, client, 0x0014, 0x3427);
+ mt9t112_reg_write(ret, client, 0x0014, 0x3027);
+
+ mdelay(10);
+
+ /*
+ * PLL_BYPASS off
+ * Reference clock count
+ * I2C Master Clock Divider
+ */
+ mt9t112_reg_write(ret, client, 0x0014, 0x3046);
+ mt9t112_reg_write(ret, client, 0x0022, 0x0190);
+ mt9t112_reg_write(ret, client, 0x3B84, 0x0212);
+
+ /* External sensor clock is PLL bypass */
+ mt9t112_reg_write(ret, client, 0x002E, 0x0500);
+
+ mt9t112_reg_mask_set(ret, client, 0x0018, 0x0002, 0x0002);
+ mt9t112_reg_mask_set(ret, client, 0x3B82, 0x0004, 0x0004);
+
+ /* MCU disabled */
+ mt9t112_reg_mask_set(ret, client, 0x0018, 0x0004, 0x0004);
+
+ /* out of standby */
+ mt9t112_reg_mask_set(ret, client, 0x0018, 0x0001, 0);
+
+ mdelay(50);
+
+ /*
+ * Standby Workaround
+ * Disable Secondary I2C Pads
+ */
+ mt9t112_reg_write(ret, client, 0x0614, 0x0001);
+ mdelay(1);
+ mt9t112_reg_write(ret, client, 0x0614, 0x0001);
+ mdelay(1);
+ mt9t112_reg_write(ret, client, 0x0614, 0x0001);
+ mdelay(1);
+ mt9t112_reg_write(ret, client, 0x0614, 0x0001);
+ mdelay(1);
+ mt9t112_reg_write(ret, client, 0x0614, 0x0001);
+ mdelay(1);
+ mt9t112_reg_write(ret, client, 0x0614, 0x0001);
+ mdelay(1);
+
+ /* poll to verify out of standby. Must Poll this bit */
+ for (i = 0; i < 100; i++) {
+ mt9t112_reg_read(data, client, 0x0018);
+ if (0x4000 & data)
+ break;
+
+ mdelay(10);
+ }
+
+ return ret;
+}
+
+static int mt9t112_init_setting(const struct i2c_client *client)
+{
+
+ int ret;
+
+ /* Adaptive Output Clock (A) */
+ mt9t112_mcu_mask_set(ret, client, VAR(26, 160), 0x0040, 0x0000);
+
+ /* Read Mode (A) */
+ mt9t112_mcu_write(ret, client, VAR(18, 12), 0x0024);
+
+ /* Fine Correction (A) */
+ mt9t112_mcu_write(ret, client, VAR(18, 15), 0x00CC);
+
+ /* Fine IT Min (A) */
+ mt9t112_mcu_write(ret, client, VAR(18, 17), 0x01f1);
+
+ /* Fine IT Max Margin (A) */
+ mt9t112_mcu_write(ret, client, VAR(18, 19), 0x00fF);
+
+ /* Base Frame Lines (A) */
+ mt9t112_mcu_write(ret, client, VAR(18, 29), 0x032D);
+
+ /* Min Line Length (A) */
+ mt9t112_mcu_write(ret, client, VAR(18, 31), 0x073a);
+
+ /* Line Length (A) */
+ mt9t112_mcu_write(ret, client, VAR(18, 37), 0x07d0);
+
+ /* Adaptive Output Clock (B) */
+ mt9t112_mcu_mask_set(ret, client, VAR(27, 160), 0x0040, 0x0000);
+
+ /* Row Start (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 74), 0x004);
+
+ /* Column Start (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 76), 0x004);
+
+ /* Row End (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 78), 0x60B);
+
+ /* Column End (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 80), 0x80B);
+
+ /* Fine Correction (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 87), 0x008C);
+
+ /* Fine IT Min (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 89), 0x01F1);
+
+ /* Fine IT Max Margin (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 91), 0x00FF);
+
+ /* Base Frame Lines (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 101), 0x0668);
+
+ /* Min Line Length (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 103), 0x0AF0);
+
+ /* Line Length (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 109), 0x0AF0);
+
+ /*
+ * Flicker Dectection registers
+ * This section should be replaced whenever new Timing file is generated
+ * All the following registers need to be replaced
+ * Following registers are generated from Register Wizard but user can
+ * modify them. For detail see auto flicker detection tuning
+ */
+
+ /* FD_FDPERIOD_SELECT */
+ mt9t112_mcu_write(ret, client, VAR8(8, 5), 0x01);
+
+ /* PRI_B_CONFIG_FD_ALGO_RUN */
+ mt9t112_mcu_write(ret, client, VAR(27, 17), 0x0003);
+
+ /* PRI_A_CONFIG_FD_ALGO_RUN */
+ mt9t112_mcu_write(ret, client, VAR(26, 17), 0x0003);
+
+ /*
+ * AFD range detection tuning registers
+ */
+
+ /* search_f1_50 */
+ mt9t112_mcu_write(ret, client, VAR8(18, 165), 0x25);
+
+ /* search_f2_50 */
+ mt9t112_mcu_write(ret, client, VAR8(18, 166), 0x28);
+
+ /* search_f1_60 */
+ mt9t112_mcu_write(ret, client, VAR8(18, 167), 0x2C);
+
+ /* search_f2_60 */
+ mt9t112_mcu_write(ret, client, VAR8(18, 168), 0x2F);
+
+ /* period_50Hz (A) */
+ mt9t112_mcu_write(ret, client, VAR8(18, 68), 0xBA);
+
+ /* secret register by aptina */
+ /* period_50Hz (A MSB) */
+ mt9t112_mcu_write(ret, client, VAR8(18, 303), 0x00);
+
+ /* period_60Hz (A) */
+ mt9t112_mcu_write(ret, client, VAR8(18, 69), 0x9B);
+
+ /* secret register by aptina */
+ /* period_60Hz (A MSB) */
+ mt9t112_mcu_write(ret, client, VAR8(18, 301), 0x00);
+
+ /* period_50Hz (B) */
+ mt9t112_mcu_write(ret, client, VAR8(18, 140), 0x82);
+
+ /* secret register by aptina */
+ /* period_50Hz (B) MSB */
+ mt9t112_mcu_write(ret, client, VAR8(18, 304), 0x00);
+
+ /* period_60Hz (B) */
+ mt9t112_mcu_write(ret, client, VAR8(18, 141), 0x6D);
+
+ /* secret register by aptina */
+ /* period_60Hz (B) MSB */
+ mt9t112_mcu_write(ret, client, VAR8(18, 302), 0x00);
+
+ /* FD Mode */
+ mt9t112_mcu_write(ret, client, VAR8(8, 2), 0x10);
+
+ /* Stat_min */
+ mt9t112_mcu_write(ret, client, VAR8(8, 9), 0x02);
+
+ /* Stat_max */
+ mt9t112_mcu_write(ret, client, VAR8(8, 10), 0x03);
+
+ /* Min_amplitude */
+ mt9t112_mcu_write(ret, client, VAR8(8, 12), 0x0A);
+
+ /* RX FIFO Watermark (A) */
+ mt9t112_mcu_write(ret, client, VAR(18, 70), 0x0014);
+
+ /* RX FIFO Watermark (B) */
+ mt9t112_mcu_write(ret, client, VAR(18, 142), 0x0014);
+
+ /* MCLK: 16MHz
+ * PCLK: 73MHz
+ * CorePixCLK: 36.5 MHz
+ */
+ mt9t112_mcu_write(ret, client, VAR8(18, 0x0044), 133);
+ mt9t112_mcu_write(ret, client, VAR8(18, 0x0045), 110);
+ mt9t112_mcu_write(ret, client, VAR8(18, 0x008c), 130);
+ mt9t112_mcu_write(ret, client, VAR8(18, 0x008d), 108);
+
+ mt9t112_mcu_write(ret, client, VAR8(18, 0x00A5), 27);
+ mt9t112_mcu_write(ret, client, VAR8(18, 0x00a6), 30);
+ mt9t112_mcu_write(ret, client, VAR8(18, 0x00a7), 32);
+ mt9t112_mcu_write(ret, client, VAR8(18, 0x00a8), 35);
+
+ return ret;
+}
+
+static int mt9t112_auto_focus_setting(const struct i2c_client *client)
+{
+ int ret;
+
+ mt9t112_mcu_write(ret, client, VAR(12, 13), 0x000F);
+ mt9t112_mcu_write(ret, client, VAR(12, 23), 0x0F0F);
+ mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
+
+ mt9t112_reg_write(ret, client, 0x0614, 0x0000);
+
+ mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x05);
+ mt9t112_mcu_write(ret, client, VAR8(12, 2), 0x02);
+ mt9t112_mcu_write(ret, client, VAR(12, 3), 0x0002);
+ mt9t112_mcu_write(ret, client, VAR(17, 3), 0x8001);
+ mt9t112_mcu_write(ret, client, VAR(17, 11), 0x0025);
+ mt9t112_mcu_write(ret, client, VAR(17, 13), 0x0193);
+ mt9t112_mcu_write(ret, client, VAR8(17, 33), 0x18);
+ mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x05);
+
+ return ret;
+}
+
+static int mt9t112_auto_focus_trigger(const struct i2c_client *client)
+{
+ int ret;
+
+ mt9t112_mcu_write(ret, client, VAR8(12, 25), 0x01);
+
+ return ret;
+}
+
+static int mt9t112_init_camera(const struct i2c_client *client)
+{
+ int ret;
+
+ ECHECKER(ret, mt9t112_reset(client));
+
+ ECHECKER(ret, mt9t112_init_pll(client));
+
+ ECHECKER(ret, mt9t112_init_setting(client));
+
+ ECHECKER(ret, mt9t112_auto_focus_setting(client));
+
+ mt9t112_reg_mask_set(ret, client, 0x0018, 0x0004, 0);
+
+ /* Analog setting B */
+ mt9t112_reg_write(ret, client, 0x3084, 0x2409);
+ mt9t112_reg_write(ret, client, 0x3092, 0x0A49);
+ mt9t112_reg_write(ret, client, 0x3094, 0x4949);
+ mt9t112_reg_write(ret, client, 0x3096, 0x4950);
+
+ /*
+ * Disable adaptive clock
+ * PRI_A_CONFIG_JPEG_OB_TX_CONTROL_VAR
+ * PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR
+ */
+ mt9t112_mcu_write(ret, client, VAR(26, 160), 0x0A2E);
+ mt9t112_mcu_write(ret, client, VAR(27, 160), 0x0A2E);
+
+ /* Configure STatus in Status_before_length Format and enable header */
+ /* PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR */
+ mt9t112_mcu_write(ret, client, VAR(27, 144), 0x0CB4);
+
+ /* Enable JPEG in context B */
+ /* PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR */
+ mt9t112_mcu_write(ret, client, VAR8(27, 142), 0x01);
+
+ /* Disable Dac_TXLO */
+ mt9t112_reg_write(ret, client, 0x316C, 0x350F);
+
+ /* Set max slew rates */
+ mt9t112_reg_write(ret, client, 0x1E, 0x777);
+
+ return ret;
+}
+
+/************************************************************************
+
+
+ soc_camera_ops
+
+
+************************************************************************/
+static int mt9t112_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+{
+ return 0;
+}
+
+static unsigned long mt9t112_query_bus_param(struct soc_camera_device *icd)
+{
+ struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
+ struct mt9t112_priv *priv = to_mt9t112(client);
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+ unsigned long flags = SOCAM_MASTER | SOCAM_VSYNC_ACTIVE_HIGH |
+ SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_HIGH;
+
+ flags |= (priv->info->flags & MT9T112_FLAG_PCLK_RISING_EDGE) ?
+ SOCAM_PCLK_SAMPLE_RISING : SOCAM_PCLK_SAMPLE_FALLING;
+
+ if (priv->info->flags & MT9T112_FLAG_DATAWIDTH_8)
+ flags |= SOCAM_DATAWIDTH_8;
+ else
+ flags |= SOCAM_DATAWIDTH_10;
+
+ return soc_camera_apply_sensor_flags(icl, flags);
+}
+
+static struct soc_camera_ops mt9t112_ops = {
+ .set_bus_param = mt9t112_set_bus_param,
+ .query_bus_param = mt9t112_query_bus_param,
+};
+
+/************************************************************************
+
+
+ v4l2_subdev_core_ops
+
+
+************************************************************************/
+static int mt9t112_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct i2c_client *client = sd->priv;
+ struct mt9t112_priv *priv = to_mt9t112(client);
+
+ id->ident = priv->model;
+ id->revision = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mt9t112_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = sd->priv;
+ int ret;
+
+ reg->size = 2;
+ mt9t112_reg_read(ret, client, reg->reg);
+
+ reg->val = (__u64)ret;
+
+ return 0;
+}
+
+static int mt9t112_s_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = sd->priv;
+ int ret;
+
+ mt9t112_reg_write(ret, client, reg->reg, reg->val);
+
+ return ret;
+}
+#endif
+
+static struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = {
+ .g_chip_ident = mt9t112_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = mt9t112_g_register,
+ .s_register = mt9t112_s_register,
+#endif
+};
+
+
+/************************************************************************
+
+
+ v4l2_subdev_video_ops
+
+
+************************************************************************/
+static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = sd->priv;
+ struct mt9t112_priv *priv = to_mt9t112(client);
+ int ret = 0;
+
+ if (!enable) {
+ /* FIXME
+ *
+ * If user selected large output size,
+ * and used it long time,
+ * mt9t112 camera will be very warm.
+ *
+ * But current driver can not stop mt9t112 camera.
+ * So, set small size here to solve this problem.
+ */
+ mt9t112_set_a_frame_size(client, VGA_WIDTH, VGA_HEIGHT);
+ return ret;
+ }
+
+ if (!(priv->flags & INIT_DONE)) {
+ u16 param = (MT9T112_FLAG_PCLK_RISING_EDGE &
+ priv->info->flags) ? 0x0001 : 0x0000;
+
+ ECHECKER(ret, mt9t112_init_camera(client));
+
+ /* Invert PCLK (Data sampled on falling edge of pixclk) */
+ mt9t112_reg_write(ret, client, 0x3C20, param);
+
+ mdelay(5);
+
+ priv->flags |= INIT_DONE;
+ }
+
+ mt9t112_mcu_write(ret, client, VAR(26, 7), priv->format->fmt);
+ mt9t112_mcu_write(ret, client, VAR(26, 9), priv->format->order);
+ mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06);
+
+ mt9t112_set_a_frame_size(client,
+ priv->frame.width,
+ priv->frame.height);
+
+ ECHECKER(ret, mt9t112_auto_focus_trigger(client));
+
+ dev_dbg(&client->dev, "format : %d\n", priv->format->code);
+ dev_dbg(&client->dev, "size : %d x %d\n",
+ priv->frame.width,
+ priv->frame.height);
+
+ CLOCK_INFO(client, EXT_CLOCK);
+
+ return ret;
+}
+
+static int mt9t112_set_params(struct i2c_client *client, u32 width, u32 height,
+ enum v4l2_mbus_pixelcode code)
+{
+ struct mt9t112_priv *priv = to_mt9t112(client);
+ int i;
+
+ priv->format = NULL;
+
+ /*
+ * frame size check
+ */
+ mt9t112_frame_check(&width, &height);
+
+ /*
+ * get color format
+ */
+ for (i = 0; i < ARRAY_SIZE(mt9t112_cfmts); i++)
+ if (mt9t112_cfmts[i].code == code)
+ break;
+
+ if (i == ARRAY_SIZE(mt9t112_cfmts))
+ return -EINVAL;
+
+ priv->frame.width = (u16)width;
+ priv->frame.height = (u16)height;
+
+ priv->format = mt9t112_cfmts + i;
+
+ return 0;
+}
+
+static int mt9t112_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = VGA_WIDTH;
+ a->bounds.height = VGA_HEIGHT;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static int mt9t112_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ a->c.left = 0;
+ a->c.top = 0;
+ a->c.width = VGA_WIDTH;
+ a->c.height = VGA_HEIGHT;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+static int mt9t112_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct i2c_client *client = sd->priv;
+ struct v4l2_rect *rect = &a->c;
+
+ return mt9t112_set_params(client, rect->width, rect->height,
+ V4L2_MBUS_FMT_YUYV8_2X8_BE);
+}
+
+static int mt9t112_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = sd->priv;
+ struct mt9t112_priv *priv = to_mt9t112(client);
+
+ if (!priv->format) {
+ int ret = mt9t112_set_params(client, VGA_WIDTH, VGA_HEIGHT,
+ V4L2_MBUS_FMT_YUYV8_2X8_BE);
+ if (ret < 0)
+ return ret;
+ }
+
+ mf->width = priv->frame.width;
+ mf->height = priv->frame.height;
+ /* TODO: set colorspace */
+ mf->code = priv->format->code;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int mt9t112_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = sd->priv;
+
+ /* TODO: set colorspace */
+ return mt9t112_set_params(client, mf->width, mf->height, mf->code);
+}
+
+static int mt9t112_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ mt9t112_frame_check(&mf->width, &mf->height);
+
+ /* TODO: set colorspace */
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int mt9t112_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if ((unsigned int)index >= ARRAY_SIZE(mt9t112_cfmts))
+ return -EINVAL;
+
+ *code = mt9t112_cfmts[index].code;
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = {
+ .s_stream = mt9t112_s_stream,
+ .g_mbus_fmt = mt9t112_g_fmt,
+ .s_mbus_fmt = mt9t112_s_fmt,
+ .try_mbus_fmt = mt9t112_try_fmt,
+ .cropcap = mt9t112_cropcap,
+ .g_crop = mt9t112_g_crop,
+ .s_crop = mt9t112_s_crop,
+ .enum_mbus_fmt = mt9t112_enum_fmt,
+};
+
+/************************************************************************
+
+
+ i2c driver
+
+
+************************************************************************/
+static struct v4l2_subdev_ops mt9t112_subdev_ops = {
+ .core = &mt9t112_subdev_core_ops,
+ .video = &mt9t112_subdev_video_ops,
+};
+
+static int mt9t112_camera_probe(struct soc_camera_device *icd,
+ struct i2c_client *client)
+{
+ struct mt9t112_priv *priv = to_mt9t112(client);
+ const char *devname;
+ int chipid;
+
+ /*
+ * We must have a parent by now. And it cannot be a wrong one.
+ * So this entire test is completely redundant.
+ */
+ if (!icd->dev.parent ||
+ to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
+ return -ENODEV;
+
+ /*
+ * check and show chip ID
+ */
+ mt9t112_reg_read(chipid, client, 0x0000);
+
+ switch (chipid) {
+ case 0x2680:
+ devname = "mt9t111";
+ priv->model = V4L2_IDENT_MT9T111;
+ break;
+ case 0x2682:
+ devname = "mt9t112";
+ priv->model = V4L2_IDENT_MT9T112;
+ break;
+ default:
+ dev_err(&client->dev, "Product ID error %04x\n", chipid);
+ return -ENODEV;
+ }
+
+ dev_info(&client->dev, "%s chip ID %04x\n", devname, chipid);
+
+ return 0;
+}
+
+static int mt9t112_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct mt9t112_priv *priv;
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl;
+ int ret;
+
+ if (!icd) {
+ dev_err(&client->dev, "mt9t112: missing soc-camera data!\n");
+ return -EINVAL;
+ }
+
+ icl = to_soc_camera_link(icd);
+ if (!icl || !icl->priv)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->info = icl->priv;
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
+
+ icd->ops = &mt9t112_ops;
+
+ ret = mt9t112_camera_probe(icd, client);
+ if (ret) {
+ icd->ops = NULL;
+ i2c_set_clientdata(client, NULL);
+ kfree(priv);
+ }
+
+ return ret;
+}
+
+static int mt9t112_remove(struct i2c_client *client)
+{
+ struct mt9t112_priv *priv = to_mt9t112(client);
+ struct soc_camera_device *icd = client->dev.platform_data;
+
+ icd->ops = NULL;
+ i2c_set_clientdata(client, NULL);
+ kfree(priv);
+ return 0;
+}
+
+static const struct i2c_device_id mt9t112_id[] = {
+ { "mt9t112", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mt9t112_id);
+
+static struct i2c_driver mt9t112_i2c_driver = {
+ .driver = {
+ .name = "mt9t112",
+ },
+ .probe = mt9t112_probe,
+ .remove = mt9t112_remove,
+ .id_table = mt9t112_id,
+};
+
+/************************************************************************
+
+
+ module function
+
+
+************************************************************************/
+static int __init mt9t112_module_init(void)
+{
+ return i2c_add_driver(&mt9t112_i2c_driver);
+}
+
+static void __exit mt9t112_module_exit(void)
+{
+ i2c_del_driver(&mt9t112_i2c_driver);
+}
+
+module_init(mt9t112_module_init);
+module_exit(mt9t112_module_exit);
+
+MODULE_DESCRIPTION("SoC Camera driver for mt9t112");
+MODULE_AUTHOR("Kuninori Morimoto");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 995607f9d3b..91df7ec91fb 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -18,9 +18,11 @@
#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
-/* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
+/*
+ * mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
* The platform has to define ctruct i2c_board_info objects and link to them
- * from struct soc_camera_link */
+ * from struct soc_camera_link
+ */
static char *sensor_type;
module_param(sensor_type, charp, S_IRUGO);
@@ -62,41 +64,49 @@ MODULE_PARM_DESC(sensor_type, "Sensor type: \"colour\" or \"monochrome\"");
#define MT9V022_COLUMN_SKIP 1
#define MT9V022_ROW_SKIP 4
-static const struct soc_camera_data_format mt9v022_colour_formats[] = {
- /* Order important: first natively supported,
- * second supported with a GPIO extender */
- {
- .name = "Bayer (sRGB) 10 bit",
- .depth = 10,
- .fourcc = V4L2_PIX_FMT_SBGGR16,
- .colorspace = V4L2_COLORSPACE_SRGB,
- }, {
- .name = "Bayer (sRGB) 8 bit",
- .depth = 8,
- .fourcc = V4L2_PIX_FMT_SBGGR8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- }
+/* MT9V022 has only one fixed colorspace per pixelcode */
+struct mt9v022_datafmt {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+/* Find a data format by a pixel code in an array */
+static const struct mt9v022_datafmt *mt9v022_find_datafmt(
+ enum v4l2_mbus_pixelcode code, const struct mt9v022_datafmt *fmt,
+ int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ if (fmt[i].code == code)
+ return fmt + i;
+
+ return NULL;
+}
+
+static const struct mt9v022_datafmt mt9v022_colour_fmts[] = {
+ /*
+ * Order important: first natively supported,
+ * second supported with a GPIO extender
+ */
+ {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
};
-static const struct soc_camera_data_format mt9v022_monochrome_formats[] = {
+static const struct mt9v022_datafmt mt9v022_monochrome_fmts[] = {
/* Order important - see above */
- {
- .name = "Monochrome 10 bit",
- .depth = 10,
- .fourcc = V4L2_PIX_FMT_Y16,
- }, {
- .name = "Monochrome 8 bit",
- .depth = 8,
- .fourcc = V4L2_PIX_FMT_GREY,
- },
+ {V4L2_MBUS_FMT_Y10_1X10, V4L2_COLORSPACE_JPEG},
+ {V4L2_MBUS_FMT_GREY8_1X8, V4L2_COLORSPACE_JPEG},
};
struct mt9v022 {
struct v4l2_subdev subdev;
struct v4l2_rect rect; /* Sensor window */
- __u32 fourcc;
+ const struct mt9v022_datafmt *fmt;
+ const struct mt9v022_datafmt *fmts;
+ int num_fmts;
int model; /* V4L2_IDENT_MT9V022* codes from v4l2-chip-ident.h */
u16 chip_control;
+ unsigned short y_skip_top; /* Lines to skip at the top */
};
static struct mt9v022 *to_mt9v022(const struct i2c_client *client)
@@ -143,9 +153,11 @@ static int mt9v022_init(struct i2c_client *client)
struct mt9v022 *mt9v022 = to_mt9v022(client);
int ret;
- /* Almost the default mode: master, parallel, simultaneous, and an
+ /*
+ * Almost the default mode: master, parallel, simultaneous, and an
* undocumented bit 0x200, which is present in table 7, but not in 8,
- * plus snapshot mode to disable scan for now */
+ * plus snapshot mode to disable scan for now
+ */
mt9v022->chip_control |= 0x10;
ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
if (!ret)
@@ -265,12 +277,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
struct i2c_client *client = sd->priv;
struct mt9v022 *mt9v022 = to_mt9v022(client);
struct v4l2_rect rect = a->c;
- struct soc_camera_device *icd = client->dev.platform_data;
int ret;
/* Bayer format - even size lengths */
- if (mt9v022->fourcc == V4L2_PIX_FMT_SBGGR8 ||
- mt9v022->fourcc == V4L2_PIX_FMT_SBGGR16) {
+ if (mt9v022->fmts == mt9v022_colour_fmts) {
rect.width = ALIGN(rect.width, 2);
rect.height = ALIGN(rect.height, 2);
/* Let the user play with the starting pixel */
@@ -287,10 +297,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
if (ret >= 0) {
if (ret & 1) /* Autoexposure */
ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH,
- rect.height + icd->y_skip_top + 43);
+ rect.height + mt9v022->y_skip_top + 43);
else
ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
- rect.height + icd->y_skip_top + 43);
+ rect.height + mt9v022->y_skip_top + 43);
}
/* Setup frame format: defaults apart from width and height */
if (!ret)
@@ -298,8 +308,10 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
if (!ret)
ret = reg_write(client, MT9V022_ROW_START, rect.top);
if (!ret)
- /* Default 94, Phytec driver says:
- * "width + horizontal blank >= 660" */
+ /*
+ * Default 94, Phytec driver says:
+ * "width + horizontal blank >= 660"
+ */
ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING,
rect.width > 660 - 43 ? 43 :
660 - rect.width);
@@ -309,7 +321,7 @@ static int mt9v022_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect.width);
if (!ret)
ret = reg_write(client, MT9V022_WINDOW_HEIGHT,
- rect.height + icd->y_skip_top);
+ rect.height + mt9v022->y_skip_top);
if (ret < 0)
return ret;
@@ -346,46 +358,48 @@ static int mt9v022_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-static int mt9v022_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9v022_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
- pix->width = mt9v022->rect.width;
- pix->height = mt9v022->rect.height;
- pix->pixelformat = mt9v022->fourcc;
- pix->field = V4L2_FIELD_NONE;
- pix->colorspace = V4L2_COLORSPACE_SRGB;
+ mf->width = mt9v022->rect.width;
+ mf->height = mt9v022->rect.height;
+ mf->code = mt9v022->fmt->code;
+ mf->colorspace = mt9v022->fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
return 0;
}
-static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9v022_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct mt9v022 *mt9v022 = to_mt9v022(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_crop a = {
.c = {
.left = mt9v022->rect.left,
.top = mt9v022->rect.top,
- .width = pix->width,
- .height = pix->height,
+ .width = mf->width,
+ .height = mf->height,
},
};
int ret;
- /* The caller provides a supported format, as verified per call to
- * icd->try_fmt(), datawidth is from our supported format list */
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_GREY:
- case V4L2_PIX_FMT_Y16:
+ /*
+ * The caller provides a supported format, as verified per call to
+ * icd->try_fmt(), datawidth is from our supported format list
+ */
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_GREY8_1X8:
+ case V4L2_MBUS_FMT_Y10_1X10:
if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATM)
return -EINVAL;
break;
- case V4L2_PIX_FMT_SBGGR8:
- case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_MBUS_FMT_SBGGR8_1X8:
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC)
return -EINVAL;
break;
@@ -399,26 +413,38 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
/* No support for scaling on this camera, just crop. */
ret = mt9v022_s_crop(sd, &a);
if (!ret) {
- pix->width = mt9v022->rect.width;
- pix->height = mt9v022->rect.height;
- mt9v022->fourcc = pix->pixelformat;
+ mf->width = mt9v022->rect.width;
+ mf->height = mt9v022->rect.height;
+ mt9v022->fmt = mt9v022_find_datafmt(mf->code,
+ mt9v022->fmts, mt9v022->num_fmts);
+ mf->colorspace = mt9v022->fmt->colorspace;
}
return ret;
}
-static int mt9v022_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int mt9v022_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
- struct soc_camera_device *icd = client->dev.platform_data;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- int align = pix->pixelformat == V4L2_PIX_FMT_SBGGR8 ||
- pix->pixelformat == V4L2_PIX_FMT_SBGGR16;
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+ const struct mt9v022_datafmt *fmt;
+ int align = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
+ mf->code == V4L2_MBUS_FMT_SBGGR10_1X10;
- v4l_bound_align_image(&pix->width, MT9V022_MIN_WIDTH,
+ v4l_bound_align_image(&mf->width, MT9V022_MIN_WIDTH,
MT9V022_MAX_WIDTH, align,
- &pix->height, MT9V022_MIN_HEIGHT + icd->y_skip_top,
- MT9V022_MAX_HEIGHT + icd->y_skip_top, align, 0);
+ &mf->height, MT9V022_MIN_HEIGHT + mt9v022->y_skip_top,
+ MT9V022_MAX_HEIGHT + mt9v022->y_skip_top, align, 0);
+
+ fmt = mt9v022_find_datafmt(mf->code, mt9v022->fmts,
+ mt9v022->num_fmts);
+ if (!fmt) {
+ fmt = mt9v022->fmt;
+ mf->code = fmt->code;
+ }
+
+ mf->colorspace = fmt->colorspace;
return 0;
}
@@ -635,8 +661,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
48 + range / 2) / range + 16;
if (gain >= 32)
gain &= ~1;
- /* The user wants to set gain manually, hope, she
- * knows, what she's doing... Switch AGC off. */
+ /*
+ * The user wants to set gain manually, hope, she
+ * knows, what she's doing... Switch AGC off.
+ */
if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
return -EIO;
@@ -655,8 +683,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
unsigned long range = qctrl->maximum - qctrl->minimum;
unsigned long shutter = ((ctrl->value - qctrl->minimum) *
479 + range / 2) / range + 1;
- /* The user wants to set shutter width manually, hope,
- * she knows, what she's doing... Switch AEC off. */
+ /*
+ * The user wants to set shutter width manually, hope,
+ * she knows, what she's doing... Switch AEC off.
+ */
if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1) < 0)
return -EIO;
@@ -689,8 +719,10 @@ static int mt9v022_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return 0;
}
-/* Interface active, can use i2c. If it fails, it can indeed mean, that
- * this wasn't our capture interface, so, we wait for the right one */
+/*
+ * Interface active, can use i2c. If it fails, it can indeed mean, that
+ * this wasn't our capture interface, so, we wait for the right one
+ */
static int mt9v022_video_probe(struct soc_camera_device *icd,
struct i2c_client *client)
{
@@ -733,17 +765,17 @@ static int mt9v022_video_probe(struct soc_camera_device *icd,
!strcmp("color", sensor_type))) {
ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11);
mt9v022->model = V4L2_IDENT_MT9V022IX7ATC;
- icd->formats = mt9v022_colour_formats;
+ mt9v022->fmts = mt9v022_colour_fmts;
} else {
ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 0x11);
mt9v022->model = V4L2_IDENT_MT9V022IX7ATM;
- icd->formats = mt9v022_monochrome_formats;
+ mt9v022->fmts = mt9v022_monochrome_fmts;
}
if (ret < 0)
goto ei2c;
- icd->num_formats = 0;
+ mt9v022->num_fmts = 0;
/*
* This is a 10bit sensor, so by default we only allow 10bit.
@@ -756,14 +788,14 @@ static int mt9v022_video_probe(struct soc_camera_device *icd,
flags = SOCAM_DATAWIDTH_10;
if (flags & SOCAM_DATAWIDTH_10)
- icd->num_formats++;
+ mt9v022->num_fmts++;
else
- icd->formats++;
+ mt9v022->fmts++;
if (flags & SOCAM_DATAWIDTH_8)
- icd->num_formats++;
+ mt9v022->num_fmts++;
- mt9v022->fourcc = icd->formats->fourcc;
+ mt9v022->fmt = &mt9v022->fmts[0];
dev_info(&client->dev, "Detected a MT9V022 chip ID %x, %s sensor\n",
data, mt9v022->model == V4L2_IDENT_MT9V022IX7ATM ?
@@ -787,6 +819,16 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
icl->free_bus(icl);
}
+static int mt9v022_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines)
+{
+ struct i2c_client *client = sd->priv;
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+
+ *lines = mt9v022->y_skip_top;
+
+ return 0;
+}
+
static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
.g_ctrl = mt9v022_g_ctrl,
.s_ctrl = mt9v022_s_ctrl,
@@ -797,19 +839,38 @@ static struct v4l2_subdev_core_ops mt9v022_subdev_core_ops = {
#endif
};
+static int mt9v022_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ struct i2c_client *client = sd->priv;
+ struct mt9v022 *mt9v022 = to_mt9v022(client);
+
+ if ((unsigned int)index >= mt9v022->num_fmts)
+ return -EINVAL;
+
+ *code = mt9v022->fmts[index].code;
+ return 0;
+}
+
static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
.s_stream = mt9v022_s_stream,
- .s_fmt = mt9v022_s_fmt,
- .g_fmt = mt9v022_g_fmt,
- .try_fmt = mt9v022_try_fmt,
+ .s_mbus_fmt = mt9v022_s_fmt,
+ .g_mbus_fmt = mt9v022_g_fmt,
+ .try_mbus_fmt = mt9v022_try_fmt,
.s_crop = mt9v022_s_crop,
.g_crop = mt9v022_g_crop,
.cropcap = mt9v022_cropcap,
+ .enum_mbus_fmt = mt9v022_enum_fmt,
+};
+
+static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
+ .g_skip_top_lines = mt9v022_g_skip_top_lines,
};
static struct v4l2_subdev_ops mt9v022_subdev_ops = {
.core = &mt9v022_subdev_core_ops,
.video = &mt9v022_subdev_video_ops,
+ .sensor = &mt9v022_subdev_sensor_ops,
};
static int mt9v022_probe(struct i2c_client *client,
@@ -851,8 +912,7 @@ static int mt9v022_probe(struct i2c_client *client,
* MT9V022 _really_ corrupts the first read out line.
* TODO: verify on i.MX31
*/
- icd->y_skip_top = 1;
-
+ mt9v022->y_skip_top = 1;
mt9v022->rect.left = MT9V022_COLUMN_SKIP;
mt9v022->rect.top = MT9V022_ROW_SKIP;
mt9v022->rect.width = MT9V022_MAX_WIDTH;
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 72802291e81..2ba14fb5b03 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -37,6 +37,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/videobuf-dma-contig.h>
+#include <media/soc_mediabus.h>
#include <asm/dma.h>
#include <asm/fiq.h>
@@ -94,14 +95,16 @@
/* buffer for one video frame */
struct mx1_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
- const struct soc_camera_data_format *fmt;
- int inwork;
+ struct videobuf_buffer vb;
+ enum v4l2_mbus_pixelcode code;
+ int inwork;
};
-/* i.MX1/i.MXL is only supposed to handle one camera on its Camera Sensor
+/*
+ * i.MX1/i.MXL is only supposed to handle one camera on its Camera Sensor
* Interface. If anyone ever builds hardware to enable more than
- * one camera, they will have to modify this driver too */
+ * one camera, they will have to modify this driver too
+ */
struct mx1_camera_dev {
struct soc_camera_host soc_host;
struct soc_camera_device *icd;
@@ -126,9 +129,13 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
- *size = icd->user_width * icd->user_height *
- ((icd->current_fmt->depth + 7) >> 3);
+ *size = bytes_per_line * icd->user_height;
if (!*count)
*count = 32;
@@ -151,8 +158,10 @@ static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
- /* This waits until this buffer is out of danger, i.e., until it is no
- * longer in STATE_QUEUED or STATE_ACTIVE */
+ /*
+ * This waits until this buffer is out of danger, i.e., until it is no
+ * longer in STATE_QUEUED or STATE_ACTIVE
+ */
videobuf_waiton(vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
@@ -165,6 +174,11 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
struct soc_camera_device *icd = vq->priv_data;
struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
int ret;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -174,22 +188,24 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
BUG_ON(NULL == icd->current_fmt);
- /* I think, in buf_prepare you only have to protect global data,
- * the actual buffer is yours */
+ /*
+ * I think, in buf_prepare you only have to protect global data,
+ * the actual buffer is yours
+ */
buf->inwork = 1;
- if (buf->fmt != icd->current_fmt ||
+ if (buf->code != icd->current_fmt->code ||
vb->width != icd->user_width ||
vb->height != icd->user_height ||
vb->field != field) {
- buf->fmt = icd->current_fmt;
+ buf->code = icd->current_fmt->code;
vb->width = icd->user_width;
vb->height = icd->user_height;
vb->field = field;
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3);
+ vb->size = bytes_per_line * vb->height;
if (0 != vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
goto out;
@@ -381,8 +397,10 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
lcdclk = clk_get_rate(pcdev->clk);
- /* We verify platform_mclk_10khz != 0, so if anyone breaks it, here
- * they get a nice Oops */
+ /*
+ * We verify platform_mclk_10khz != 0, so if anyone breaks it, here
+ * they get a nice Oops
+ */
div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
dev_dbg(pcdev->icd->dev.parent,
@@ -420,8 +438,10 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
clk_disable(pcdev->clk);
}
-/* The following two functions absolutely depend on the fact, that
- * there can be only one camera on i.MX1/i.MXL camera sensor interface */
+/*
+ * The following two functions absolutely depend on the fact, that
+ * there can be only one camera on i.MX1/i.MXL camera sensor interface
+ */
static int mx1_camera_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
@@ -487,12 +507,10 @@ static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
/* MX1 supports only 8bit buswidth */
common_flags = soc_camera_bus_param_compatible(camera_flags,
- CSI_BUS_FLAGS);
+ CSI_BUS_FLAGS);
if (!common_flags)
return -EINVAL;
- icd->buswidth = 8;
-
/* Make choises, based on platform choice */
if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
(common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
@@ -545,7 +563,8 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
- int ret;
+ struct v4l2_mbus_framefmt mf;
+ int ret, buswidth;
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
@@ -554,12 +573,33 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- ret = v4l2_subdev_call(sd, video, s_fmt, f);
- if (!ret) {
- icd->buswidth = xlate->buswidth;
- icd->current_fmt = xlate->host_fmt;
+ buswidth = xlate->host_fmt->bits_per_sample;
+ if (buswidth > 8) {
+ dev_warn(icd->dev.parent,
+ "bits-per-sample %d for format %x unsupported\n",
+ buswidth, pix->pixelformat);
+ return -EINVAL;
}
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
return ret;
}
@@ -567,10 +607,36 @@ static int mx1_camera_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
/* TODO: limit to mx1 hardware capabilities */
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->dev.parent, "Format %x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
/* limit to sensor capabilities */
- return v4l2_subdev_call(sd, video, try_fmt, f);
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
+ return 0;
}
static int mx1_camera_reqbufs(struct soc_camera_file *icf,
@@ -578,10 +644,12 @@ static int mx1_camera_reqbufs(struct soc_camera_file *icf,
{
int i;
- /* This is for locking debugging only. I removed spinlocks and now I
+ /*
+ * This is for locking debugging only. I removed spinlocks and now I
* check whether .prepare is ever called on a linked buffer, or whether
* a dma IRQ can occur for an in-work or unlinked buffer. Until now
- * it hadn't triggered */
+ * it hadn't triggered
+ */
for (i = 0; i < p->count; i++) {
struct mx1_buffer *buf = container_of(icf->vb_vidq.bufs[i],
struct mx1_buffer, vb);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 7db82bdf6f3..bd297f567dc 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -23,6 +23,7 @@
#include <media/v4l2-dev.h>
#include <media/videobuf-dma-contig.h>
#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
#include <mach/ipu.h>
#include <mach/mx3_camera.h>
@@ -63,7 +64,7 @@
struct mx3_camera_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
- const struct soc_camera_data_format *fmt;
+ enum v4l2_mbus_pixelcode code;
/* One descriptot per scatterlist (per frame) */
struct dma_async_tx_descriptor *txd;
@@ -118,8 +119,6 @@ struct dma_chan_request {
enum ipu_channel id;
};
-static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt);
-
static u32 csi_reg_read(struct mx3_camera_dev *mx3, off_t reg)
{
return __raw_readl(mx3->base + reg);
@@ -211,17 +210,16 @@ static int mx3_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
- /*
- * bits-per-pixel (depth) as specified in camera's pixel format does
- * not necessarily match what the camera interface writes to RAM, but
- * it should be good enough for now.
- */
- unsigned int bpp = DIV_ROUND_UP(icd->current_fmt->depth, 8);
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
if (!mx3_cam->idmac_channel[0])
return -EINVAL;
- *size = icd->user_width * icd->user_height * bpp;
+ *size = bytes_per_line * icd->user_height;
if (!*count)
*count = 32;
@@ -241,21 +239,26 @@ static int mx3_videobuf_prepare(struct videobuf_queue *vq,
struct mx3_camera_dev *mx3_cam = ici->priv;
struct mx3_camera_buffer *buf =
container_of(vb, struct mx3_camera_buffer, vb);
- /* current_fmt _must_ always be set */
- size_t new_size = icd->user_width * icd->user_height *
- ((icd->current_fmt->depth + 7) >> 3);
+ size_t new_size;
int ret;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ new_size = bytes_per_line * icd->user_height;
/*
* I think, in buf_prepare you only have to protect global data,
* the actual buffer is yours
*/
- if (buf->fmt != icd->current_fmt ||
+ if (buf->code != icd->current_fmt->code ||
vb->width != icd->user_width ||
vb->height != icd->user_height ||
vb->field != field) {
- buf->fmt = icd->current_fmt;
+ buf->code = icd->current_fmt->code;
vb->width = icd->user_width;
vb->height = icd->user_height;
vb->field = field;
@@ -348,13 +351,13 @@ static void mx3_videobuf_queue(struct videobuf_queue *vq,
struct dma_async_tx_descriptor *txd = buf->txd;
struct idmac_channel *ichan = to_idmac_chan(txd->chan);
struct idmac_video_param *video = &ichan->params.video;
- const struct soc_camera_data_format *data_fmt = icd->current_fmt;
dma_cookie_t cookie;
+ u32 fourcc = icd->current_fmt->host_fmt->fourcc;
BUG_ON(!irqs_disabled());
/* This is the configuration of one sg-element */
- video->out_pixel_fmt = fourcc_to_ipu_pix(data_fmt->fourcc);
+ video->out_pixel_fmt = fourcc_to_ipu_pix(fourcc);
video->out_width = icd->user_width;
video->out_height = icd->user_height;
video->out_stride = icd->user_width;
@@ -564,30 +567,37 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
SOCAM_DATA_ACTIVE_HIGH |
SOCAM_DATA_ACTIVE_LOW;
- /* If requested data width is supported by the platform, use it or any
- * possible lower value - i.MX31 is smart enough to schift bits */
+ /*
+ * If requested data width is supported by the platform, use it or any
+ * possible lower value - i.MX31 is smart enough to schift bits
+ */
+ if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
+ *flags |= SOCAM_DATAWIDTH_15 | SOCAM_DATAWIDTH_10 |
+ SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
+ else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10)
+ *flags |= SOCAM_DATAWIDTH_10 | SOCAM_DATAWIDTH_8 |
+ SOCAM_DATAWIDTH_4;
+ else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8)
+ *flags |= SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
+ else if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4)
+ *flags |= SOCAM_DATAWIDTH_4;
+
switch (buswidth) {
case 15:
- if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15))
+ if (!(*flags & SOCAM_DATAWIDTH_15))
return -EINVAL;
- *flags |= SOCAM_DATAWIDTH_15 | SOCAM_DATAWIDTH_10 |
- SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
break;
case 10:
- if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10))
+ if (!(*flags & SOCAM_DATAWIDTH_10))
return -EINVAL;
- *flags |= SOCAM_DATAWIDTH_10 | SOCAM_DATAWIDTH_8 |
- SOCAM_DATAWIDTH_4;
break;
case 8:
- if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8))
+ if (!(*flags & SOCAM_DATAWIDTH_8))
return -EINVAL;
- *flags |= SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_4;
break;
case 4:
- if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4))
+ if (!(*flags & SOCAM_DATAWIDTH_4))
return -EINVAL;
- *flags |= SOCAM_DATAWIDTH_4;
break;
default:
dev_warn(mx3_cam->soc_host.v4l2_dev.dev,
@@ -636,91 +646,92 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
pdata->dma_dev == chan->device->dev;
}
-static const struct soc_camera_data_format mx3_camera_formats[] = {
+static const struct soc_mbus_pixelfmt mx3_camera_formats[] = {
{
- .name = "Bayer (sRGB) 8 bit",
- .depth = 8,
- .fourcc = V4L2_PIX_FMT_SBGGR8,
- .colorspace = V4L2_COLORSPACE_SRGB,
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .name = "Bayer BGGR (sRGB) 8 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
}, {
- .name = "Monochrome 8 bit",
- .depth = 8,
- .fourcc = V4L2_PIX_FMT_GREY,
- .colorspace = V4L2_COLORSPACE_JPEG,
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .name = "Monochrome 8 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
},
};
-static bool buswidth_supported(struct soc_camera_host *ici, int depth)
+/* This will be corrected as we get more formats */
+static bool mx3_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
{
- struct mx3_camera_dev *mx3_cam = ici->priv;
-
- switch (depth) {
- case 4:
- return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_4);
- case 8:
- return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_8);
- case 10:
- return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_10);
- case 15:
- return !!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15);
- }
- return false;
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx,
struct soc_camera_format_xlate *xlate)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- int formats = 0, buswidth, ret;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->dev.parent;
+ int formats = 0, ret;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
- buswidth = icd->formats[idx].depth;
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
- if (!buswidth_supported(ici, buswidth))
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(icd->dev.parent,
+ "Invalid format code #%d: %d\n", idx, code);
return 0;
+ }
- ret = mx3_camera_try_bus_param(icd, buswidth);
+ /* This also checks support for the requested bits-per-sample */
+ ret = mx3_camera_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
- switch (icd->formats[idx].fourcc) {
- case V4L2_PIX_FMT_SGRBG10:
+ switch (code) {
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
formats++;
if (xlate) {
- xlate->host_fmt = &mx3_camera_formats[0];
- xlate->cam_fmt = icd->formats + idx;
- xlate->buswidth = buswidth;
+ xlate->host_fmt = &mx3_camera_formats[0];
+ xlate->code = code;
xlate++;
- dev_dbg(icd->dev.parent,
- "Providing format %s using %s\n",
- mx3_camera_formats[0].name,
- icd->formats[idx].name);
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ mx3_camera_formats[0].name, code);
}
- goto passthrough;
- case V4L2_PIX_FMT_Y16:
+ break;
+ case V4L2_MBUS_FMT_Y10_1X10:
formats++;
if (xlate) {
- xlate->host_fmt = &mx3_camera_formats[1];
- xlate->cam_fmt = icd->formats + idx;
- xlate->buswidth = buswidth;
+ xlate->host_fmt = &mx3_camera_formats[1];
+ xlate->code = code;
xlate++;
- dev_dbg(icd->dev.parent,
- "Providing format %s using %s\n",
- mx3_camera_formats[0].name,
- icd->formats[idx].name);
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ mx3_camera_formats[1].name, code);
}
+ break;
default:
-passthrough:
- /* Generic pass-through */
- formats++;
- if (xlate) {
- xlate->host_fmt = icd->formats + idx;
- xlate->cam_fmt = icd->formats + idx;
- xlate->buswidth = buswidth;
- xlate++;
- dev_dbg(icd->dev.parent,
- "Providing format %s in pass-through mode\n",
- icd->formats[idx].name);
- }
+ if (!mx3_camera_packing_supported(fmt))
+ return 0;
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "Providing format %x in pass-through mode\n",
+ xlate->host_fmt->fourcc);
}
return formats;
@@ -804,8 +815,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct v4l2_format f = {.type = V4L2_BUF_TYPE_VIDEO_CAPTURE};
- struct v4l2_pix_format *pix = &f.fmt.pix;
+ struct v4l2_mbus_framefmt mf;
int ret;
soc_camera_limit_side(&rect->left, &rect->width, 0, 2, 4096);
@@ -816,19 +826,19 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
return ret;
/* The capture device might have changed its output */
- ret = v4l2_subdev_call(sd, video, g_fmt, &f);
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
- if (pix->width & 7) {
+ if (mf.width & 7) {
/* Ouch! We can only handle 8-byte aligned width... */
- stride_align(&pix->width);
- ret = v4l2_subdev_call(sd, video, s_fmt, &f);
+ stride_align(&mf.width);
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
if (ret < 0)
return ret;
}
- if (pix->width != icd->user_width || pix->height != icd->user_height) {
+ if (mf.width != icd->user_width || mf.height != icd->user_height) {
/*
* We now know pixel formats and can decide upon DMA-channel(s)
* So far only direct camera-to-memory is supported
@@ -839,14 +849,14 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
return ret;
}
- configure_geometry(mx3_cam, pix->width, pix->height);
+ configure_geometry(mx3_cam, mf.width, mf.height);
}
dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n",
- pix->width, pix->height);
+ mf.width, mf.height);
- icd->user_width = pix->width;
- icd->user_height = pix->height;
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
return ret;
}
@@ -859,6 +869,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
int ret;
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
@@ -883,11 +894,24 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
configure_geometry(mx3_cam, pix->width, pix->height);
- ret = v4l2_subdev_call(sd, video, s_fmt, f);
- if (!ret) {
- icd->buswidth = xlate->buswidth;
- icd->current_fmt = xlate->host_fmt;
- }
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
dev_dbg(icd->dev.parent, "Sensor set %dx%d\n", pix->width, pix->height);
@@ -900,8 +924,8 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
__u32 pixfmt = pix->pixelformat;
- enum v4l2_field field;
int ret;
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
@@ -916,23 +940,37 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
if (pix->width > 4096)
pix->width = 4096;
- pix->bytesperline = pix->width *
- DIV_ROUND_UP(xlate->host_fmt->depth, 8);
+ pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+ xlate->host_fmt);
+ if (pix->bytesperline < 0)
+ return pix->bytesperline;
pix->sizeimage = pix->height * pix->bytesperline;
- /* camera has to see its format, but the user the original one */
- pix->pixelformat = xlate->cam_fmt->fourcc;
/* limit to sensor capabilities */
- ret = v4l2_subdev_call(sd, video, try_fmt, f);
- pix->pixelformat = xlate->host_fmt->fourcc;
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
- field = pix->field;
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->colorspace = mf.colorspace;
- if (field == V4L2_FIELD_ANY) {
+ switch (mf.field) {
+ case V4L2_FIELD_ANY:
pix->field = V4L2_FIELD_NONE;
- } else if (field != V4L2_FIELD_NONE) {
- dev_err(icd->dev.parent, "Field type %d unsupported.\n", field);
- return -EINVAL;
+ break;
+ case V4L2_FIELD_NONE:
+ break;
+ default:
+ dev_err(icd->dev.parent, "Field type %d unsupported.\n",
+ mf.field);
+ ret = -EINVAL;
}
return ret;
@@ -968,18 +1006,26 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
struct mx3_camera_dev *mx3_cam = ici->priv;
unsigned long bus_flags, camera_flags, common_flags;
u32 dw, sens_conf;
- int ret = test_platform_param(mx3_cam, icd->buswidth, &bus_flags);
+ const struct soc_mbus_pixelfmt *fmt;
+ int buswidth;
+ int ret;
const struct soc_camera_format_xlate *xlate;
struct device *dev = icd->dev.parent;
+ fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
+ if (!fmt)
+ return -EINVAL;
+
+ buswidth = fmt->bits_per_sample;
+ ret = test_platform_param(mx3_cam, buswidth, &bus_flags);
+
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
dev_warn(dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
- dev_dbg(dev, "requested bus width %d bit: %d\n",
- icd->buswidth, ret);
+ dev_dbg(dev, "requested bus width %d bit: %d\n", buswidth, ret);
if (ret < 0)
return ret;
@@ -1027,8 +1073,10 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
}
- /* Make the camera work in widest common mode, we'll take care of
- * the rest */
+ /*
+ * Make the camera work in widest common mode, we'll take care of
+ * the rest
+ */
if (common_flags & SOCAM_DATAWIDTH_15)
common_flags = (common_flags & ~SOCAM_DATAWIDTH_MASK) |
SOCAM_DATAWIDTH_15;
@@ -1078,7 +1126,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
sens_conf |= 1 << CSI_SENS_CONF_DATA_POL_SHIFT;
/* Just do what we're asked to do */
- switch (xlate->host_fmt->depth) {
+ switch (xlate->host_fmt->bits_per_sample) {
case 4:
dw = 0 << CSI_SENS_CONF_DATA_WIDTH_SHIFT;
break;
@@ -1152,8 +1200,10 @@ static int __devinit mx3_camera_probe(struct platform_device *pdev)
if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 |
MX3_CAMERA_DATAWIDTH_8 | MX3_CAMERA_DATAWIDTH_10 |
MX3_CAMERA_DATAWIDTH_15))) {
- /* Platform hasn't set available data widths. This is bad.
- * Warn and use a default. */
+ /*
+ * Platform hasn't set available data widths. This is bad.
+ * Warn and use a default.
+ */
dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
"data widths, using default 8 bit\n");
mx3_cam->platform_flags |= MX3_CAMERA_DATAWIDTH_8;
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index 5fc4ac0d88f..7400eacb4d6 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -1450,12 +1450,11 @@ static int omap24xxcam_mmap(struct file *file, struct vm_area_struct *vma)
static int omap24xxcam_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
struct omap24xxcam_device *cam = omap24xxcam.priv;
struct omap24xxcam_fh *fh;
struct v4l2_format format;
- if (!cam || !cam->vfd || (cam->vfd->minor != minor))
+ if (!cam || !cam->vfd)
return -ENODEV;
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
@@ -1660,7 +1659,6 @@ static int omap24xxcam_device_register(struct v4l2_int_device *s)
strlcpy(vfd->name, CAM_NAME, sizeof(vfd->name));
vfd->fops = &omap24xxcam_fops;
- vfd->minor = -1;
vfd->ioctl_ops = &omap24xxcam_ioctl_fops;
omap24xxcam_hwinit(cam);
@@ -1671,14 +1669,14 @@ static int omap24xxcam_device_register(struct v4l2_int_device *s)
if (video_register_device(vfd, VFL_TYPE_GRABBER, video_nr) < 0) {
dev_err(cam->dev, "could not register V4L device\n");
- vfd->minor = -1;
rval = -EBUSY;
goto err;
}
omap24xxcam_poweron_reset(cam);
- dev_info(cam->dev, "registered device video%d\n", vfd->minor);
+ dev_info(cam->dev, "registered device %s\n",
+ video_device_node_name(vfd));
return 0;
@@ -1695,7 +1693,7 @@ static void omap24xxcam_device_unregister(struct v4l2_int_device *s)
omap24xxcam_sensor_exit(cam);
if (cam->vfd) {
- if (cam->vfd->minor == -1) {
+ if (!video_is_registered(cam->vfd)) {
/*
* The device was never registered, so release the
* video_device struct directly.
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 0bc2cf573c7..e0bce8dc74b 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -4674,7 +4674,6 @@ static struct video_device vdev_template = {
.name = "OV511 USB Camera",
.fops = &ov511_fops,
.release = video_device_release,
- .minor = -1,
};
/****************************************************************************
@@ -5867,8 +5866,8 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
ov511_devused |= 1 << nr;
ov->nr = nr;
- dev_info(&intf->dev, "Device at %s registered to minor %d\n",
- ov->usb_path, ov->vdev->minor);
+ dev_info(&intf->dev, "Device at %s registered to %s\n",
+ ov->usb_path, video_device_node_name(ov->vdev));
usb_set_intfdata(intf, ov);
if (ov_create_sysfs(ov->vdev)) {
@@ -5878,13 +5877,13 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto error;
}
- mutex_lock(&ov->lock);
+ mutex_unlock(&ov->lock);
return 0;
error:
if (ov->vdev) {
- if (-1 == ov->vdev->minor)
+ if (!video_is_registered(ov->vdev))
video_device_release(ov->vdev);
else
video_unregister_device(ov->vdev);
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 20522933346..3a45e945a52 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -24,6 +24,7 @@
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-subdev.h>
#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
#include <media/ov772x.h>
/*
@@ -382,7 +383,8 @@ struct regval_list {
};
struct ov772x_color_format {
- const struct soc_camera_data_format *format;
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
u8 dsp3;
u8 com3;
u8 com7;
@@ -399,7 +401,7 @@ struct ov772x_win_size {
struct ov772x_priv {
struct v4l2_subdev subdev;
struct ov772x_camera_info *info;
- const struct ov772x_color_format *fmt;
+ const struct ov772x_color_format *cfmt;
const struct ov772x_win_size *win;
int model;
unsigned short flag_vflip:1;
@@ -434,93 +436,57 @@ static const struct regval_list ov772x_vga_regs[] = {
};
/*
- * supported format list
- */
-
-#define SETFOURCC(type) .name = (#type), .fourcc = (V4L2_PIX_FMT_ ## type)
-static const struct soc_camera_data_format ov772x_fmt_lists[] = {
- {
- SETFOURCC(YUYV),
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_JPEG,
- },
- {
- SETFOURCC(YVYU),
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_JPEG,
- },
- {
- SETFOURCC(UYVY),
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_JPEG,
- },
- {
- SETFOURCC(RGB555),
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
- {
- SETFOURCC(RGB555X),
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
- {
- SETFOURCC(RGB565),
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
- {
- SETFOURCC(RGB565X),
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_SRGB,
- },
-};
-
-/*
- * color format list
+ * supported color format list
*/
static const struct ov772x_color_format ov772x_cfmts[] = {
{
- .format = &ov772x_fmt_lists[0],
- .dsp3 = 0x0,
- .com3 = SWAP_YUV,
- .com7 = OFMT_YUV,
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .dsp3 = 0x0,
+ .com3 = SWAP_YUV,
+ .com7 = OFMT_YUV,
},
{
- .format = &ov772x_fmt_lists[1],
- .dsp3 = UV_ON,
- .com3 = SWAP_YUV,
- .com7 = OFMT_YUV,
+ .code = V4L2_MBUS_FMT_YVYU8_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .dsp3 = UV_ON,
+ .com3 = SWAP_YUV,
+ .com7 = OFMT_YUV,
},
{
- .format = &ov772x_fmt_lists[2],
- .dsp3 = 0x0,
- .com3 = 0x0,
- .com7 = OFMT_YUV,
+ .code = V4L2_MBUS_FMT_YUYV8_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .dsp3 = 0x0,
+ .com3 = 0x0,
+ .com7 = OFMT_YUV,
},
{
- .format = &ov772x_fmt_lists[3],
- .dsp3 = 0x0,
- .com3 = SWAP_RGB,
- .com7 = FMT_RGB555 | OFMT_RGB,
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .dsp3 = 0x0,
+ .com3 = SWAP_RGB,
+ .com7 = FMT_RGB555 | OFMT_RGB,
},
{
- .format = &ov772x_fmt_lists[4],
- .dsp3 = 0x0,
- .com3 = 0x0,
- .com7 = FMT_RGB555 | OFMT_RGB,
+ .code = V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .dsp3 = 0x0,
+ .com3 = 0x0,
+ .com7 = FMT_RGB555 | OFMT_RGB,
},
{
- .format = &ov772x_fmt_lists[5],
- .dsp3 = 0x0,
- .com3 = SWAP_RGB,
- .com7 = FMT_RGB565 | OFMT_RGB,
+ .code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .dsp3 = 0x0,
+ .com3 = SWAP_RGB,
+ .com7 = FMT_RGB565 | OFMT_RGB,
},
{
- .format = &ov772x_fmt_lists[6],
- .dsp3 = 0x0,
- .com3 = 0x0,
- .com7 = FMT_RGB565 | OFMT_RGB,
+ .code = V4L2_MBUS_FMT_RGB565_2X8_BE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .dsp3 = 0x0,
+ .com3 = 0x0,
+ .com7 = FMT_RGB565 | OFMT_RGB,
},
};
@@ -642,15 +608,15 @@ static int ov772x_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
- if (!priv->win || !priv->fmt) {
+ if (!priv->win || !priv->cfmt) {
dev_err(&client->dev, "norm or win select error\n");
return -EPERM;
}
ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0);
- dev_dbg(&client->dev, "format %s, win %s\n",
- priv->fmt->format->name, priv->win->name);
+ dev_dbg(&client->dev, "format %d, win %s\n",
+ priv->cfmt->code, priv->win->name);
return 0;
}
@@ -806,8 +772,8 @@ static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height)
return win;
}
-static int ov772x_set_params(struct i2c_client *client,
- u32 *width, u32 *height, u32 pixfmt)
+static int ov772x_set_params(struct i2c_client *client, u32 *width, u32 *height,
+ enum v4l2_mbus_pixelcode code)
{
struct ov772x_priv *priv = to_ov772x(client);
int ret = -EINVAL;
@@ -817,14 +783,14 @@ static int ov772x_set_params(struct i2c_client *client,
/*
* select format
*/
- priv->fmt = NULL;
+ priv->cfmt = NULL;
for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) {
- if (pixfmt == ov772x_cfmts[i].format->fourcc) {
- priv->fmt = ov772x_cfmts + i;
+ if (code == ov772x_cfmts[i].code) {
+ priv->cfmt = ov772x_cfmts + i;
break;
}
}
- if (!priv->fmt)
+ if (!priv->cfmt)
goto ov772x_set_fmt_error;
/*
@@ -894,7 +860,7 @@ static int ov772x_set_params(struct i2c_client *client,
/*
* set DSP_CTRL3
*/
- val = priv->fmt->dsp3;
+ val = priv->cfmt->dsp3;
if (val) {
ret = ov772x_mask_set(client,
DSP_CTRL3, UV_MASK, val);
@@ -905,7 +871,7 @@ static int ov772x_set_params(struct i2c_client *client,
/*
* set COM3
*/
- val = priv->fmt->com3;
+ val = priv->cfmt->com3;
if (priv->info->flags & OV772X_FLAG_VFLIP)
val |= VFLIP_IMG;
if (priv->info->flags & OV772X_FLAG_HFLIP)
@@ -923,9 +889,9 @@ static int ov772x_set_params(struct i2c_client *client,
/*
* set COM7
*/
- val = priv->win->com7_bit | priv->fmt->com7;
+ val = priv->win->com7_bit | priv->cfmt->com7;
ret = ov772x_mask_set(client,
- COM7, (SLCT_MASK | FMT_MASK | OFMT_MASK),
+ COM7, SLCT_MASK | FMT_MASK | OFMT_MASK,
val);
if (ret < 0)
goto ov772x_set_fmt_error;
@@ -951,7 +917,7 @@ ov772x_set_fmt_error:
ov772x_reset(client);
priv->win = NULL;
- priv->fmt = NULL;
+ priv->cfmt = NULL;
return ret;
}
@@ -981,54 +947,79 @@ static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-static int ov772x_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int ov772x_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct ov772x_priv *priv = to_ov772x(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
- if (!priv->win || !priv->fmt) {
+ if (!priv->win || !priv->cfmt) {
u32 width = VGA_WIDTH, height = VGA_HEIGHT;
int ret = ov772x_set_params(client, &width, &height,
- V4L2_PIX_FMT_YUYV);
+ V4L2_MBUS_FMT_YUYV8_2X8_LE);
if (ret < 0)
return ret;
}
- f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- pix->width = priv->win->width;
- pix->height = priv->win->height;
- pix->pixelformat = priv->fmt->format->fourcc;
- pix->colorspace = priv->fmt->format->colorspace;
- pix->field = V4L2_FIELD_NONE;
+ mf->width = priv->win->width;
+ mf->height = priv->win->height;
+ mf->code = priv->cfmt->code;
+ mf->colorspace = priv->cfmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
return 0;
}
-static int ov772x_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int ov772x_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct ov772x_priv *priv = to_ov772x(client);
+ int ret = ov772x_set_params(client, &mf->width, &mf->height,
+ mf->code);
+
+ if (!ret)
+ mf->colorspace = priv->cfmt->colorspace;
- return ov772x_set_params(client, &pix->width, &pix->height,
- pix->pixelformat);
+ return ret;
}
static int ov772x_try_fmt(struct v4l2_subdev *sd,
- struct v4l2_format *f)
+ struct v4l2_mbus_framefmt *mf)
{
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct i2c_client *client = sd->priv;
+ struct ov772x_priv *priv = to_ov772x(client);
const struct ov772x_win_size *win;
+ int i;
/*
* select suitable win
*/
- win = ov772x_select_win(pix->width, pix->height);
+ win = ov772x_select_win(mf->width, mf->height);
+
+ mf->width = win->width;
+ mf->height = win->height;
+ mf->field = V4L2_FIELD_NONE;
- pix->width = win->width;
- pix->height = win->height;
- pix->field = V4L2_FIELD_NONE;
+ for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++)
+ if (mf->code == ov772x_cfmts[i].code)
+ break;
+
+ if (i == ARRAY_SIZE(ov772x_cfmts)) {
+ /* Unsupported format requested. Propose either */
+ if (priv->cfmt) {
+ /* the current one or */
+ mf->colorspace = priv->cfmt->colorspace;
+ mf->code = priv->cfmt->code;
+ } else {
+ /* the default one */
+ mf->colorspace = ov772x_cfmts[0].colorspace;
+ mf->code = ov772x_cfmts[0].code;
+ }
+ } else {
+ /* Also return the colorspace */
+ mf->colorspace = ov772x_cfmts[i].colorspace;
+ }
return 0;
}
@@ -1057,9 +1048,6 @@ static int ov772x_video_probe(struct soc_camera_device *icd,
return -ENODEV;
}
- icd->formats = ov772x_fmt_lists;
- icd->num_formats = ARRAY_SIZE(ov772x_fmt_lists);
-
/*
* check and show product ID and manufacturer ID
*/
@@ -1109,13 +1097,24 @@ static struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
#endif
};
+static int ov772x_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if ((unsigned int)index >= ARRAY_SIZE(ov772x_cfmts))
+ return -EINVAL;
+
+ *code = ov772x_cfmts[index].code;
+ return 0;
+}
+
static struct v4l2_subdev_video_ops ov772x_subdev_video_ops = {
.s_stream = ov772x_s_stream,
- .g_fmt = ov772x_g_fmt,
- .s_fmt = ov772x_s_fmt,
- .try_fmt = ov772x_try_fmt,
+ .g_mbus_fmt = ov772x_g_fmt,
+ .s_mbus_fmt = ov772x_s_fmt,
+ .try_mbus_fmt = ov772x_try_fmt,
.cropcap = ov772x_cropcap,
.g_crop = ov772x_g_crop,
+ .enum_mbus_fmt = ov772x_enum_fmt,
};
static struct v4l2_subdev_ops ov772x_subdev_ops = {
@@ -1143,10 +1142,10 @@ static int ov772x_probe(struct i2c_client *client,
}
icl = to_soc_camera_link(icd);
- if (!icl)
+ if (!icl || !icl->priv)
return -EINVAL;
- info = container_of(icl, struct ov772x_camera_info, link);
+ info = icl->priv;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev,
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index c81ae219288..47bf60ceb7a 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -154,19 +154,10 @@ static const struct ov9640_reg ov9640_regs_rgb[] = {
{ OV9640_MTXS, 0x65 },
};
-/*
- * TODO: this sensor also supports RGB555 and RGB565 formats, but support for
- * them has not yet been sufficiently tested and so it is not included with
- * this version of the driver. To test and debug these formats add two entries
- * to the below array, see ov722x.c for an example.
- */
-static const struct soc_camera_data_format ov9640_fmt_lists[] = {
- {
- .name = "UYVY",
- .fourcc = V4L2_PIX_FMT_UYVY,
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_JPEG,
- },
+static enum v4l2_mbus_pixelcode ov9640_codes[] = {
+ V4L2_MBUS_FMT_YUYV8_2X8_BE,
+ V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ V4L2_MBUS_FMT_RGB565_2X8_LE,
};
static const struct v4l2_queryctrl ov9640_controls[] = {
@@ -434,20 +425,22 @@ static void ov9640_res_roundup(u32 *width, u32 *height)
}
/* Prepare necessary register changes depending on color encoding */
-static void ov9640_alter_regs(u32 pixfmt, struct ov9640_reg_alt *alt)
+static void ov9640_alter_regs(enum v4l2_mbus_pixelcode code,
+ struct ov9640_reg_alt *alt)
{
- switch (pixfmt) {
- case V4L2_PIX_FMT_UYVY:
+ switch (code) {
+ default:
+ case V4L2_MBUS_FMT_YUYV8_2X8_BE:
alt->com12 = OV9640_COM12_YUV_AVG;
alt->com13 = OV9640_COM13_Y_DELAY_EN |
OV9640_COM13_YUV_DLY(0x01);
break;
- case V4L2_PIX_FMT_RGB555:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
alt->com7 = OV9640_COM7_RGB;
alt->com13 = OV9640_COM13_RGB_AVG;
alt->com15 = OV9640_COM15_RGB_555;
break;
- case V4L2_PIX_FMT_RGB565:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
alt->com7 = OV9640_COM7_RGB;
alt->com13 = OV9640_COM13_RGB_AVG;
alt->com15 = OV9640_COM15_RGB_565;
@@ -456,8 +449,8 @@ static void ov9640_alter_regs(u32 pixfmt, struct ov9640_reg_alt *alt)
}
/* Setup registers according to resolution and color encoding */
-static int ov9640_write_regs(struct i2c_client *client,
- u32 width, u32 pixfmt, struct ov9640_reg_alt *alts)
+static int ov9640_write_regs(struct i2c_client *client, u32 width,
+ enum v4l2_mbus_pixelcode code, struct ov9640_reg_alt *alts)
{
const struct ov9640_reg *ov9640_regs, *matrix_regs;
int ov9640_regs_len, matrix_regs_len;
@@ -500,7 +493,7 @@ static int ov9640_write_regs(struct i2c_client *client,
}
/* select color matrix configuration for given color encoding */
- if (pixfmt == V4L2_PIX_FMT_UYVY) {
+ if (code == V4L2_MBUS_FMT_YUYV8_2X8_BE) {
matrix_regs = ov9640_regs_yuv;
matrix_regs_len = ARRAY_SIZE(ov9640_regs_yuv);
} else {
@@ -562,15 +555,17 @@ static int ov9640_prog_dflt(struct i2c_client *client)
}
/* set the format we will capture in */
-static int ov9640_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int ov9640_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
- struct v4l2_pix_format *pix = &f->fmt.pix;
struct ov9640_reg_alt alts = {0};
+ enum v4l2_colorspace cspace;
+ enum v4l2_mbus_pixelcode code = mf->code;
int ret;
- ov9640_res_roundup(&pix->width, &pix->height);
- ov9640_alter_regs(pix->pixelformat, &alts);
+ ov9640_res_roundup(&mf->width, &mf->height);
+ ov9640_alter_regs(mf->code, &alts);
ov9640_reset(client);
@@ -578,19 +573,57 @@ static int ov9640_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
if (ret)
return ret;
- return ov9640_write_regs(client, pix->width, pix->pixelformat, &alts);
+ switch (code) {
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ cspace = V4L2_COLORSPACE_SRGB;
+ break;
+ default:
+ code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
+ case V4L2_MBUS_FMT_YUYV8_2X8_BE:
+ cspace = V4L2_COLORSPACE_JPEG;
+ }
+
+ ret = ov9640_write_regs(client, mf->width, code, &alts);
+ if (!ret) {
+ mf->code = code;
+ mf->colorspace = cspace;
+ }
+
+ return ret;
}
-static int ov9640_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int ov9640_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ ov9640_res_roundup(&mf->width, &mf->height);
- ov9640_res_roundup(&pix->width, &pix->height);
- pix->field = V4L2_FIELD_NONE;
+ mf->field = V4L2_FIELD_NONE;
+
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ default:
+ mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
+ case V4L2_MBUS_FMT_YUYV8_2X8_BE:
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ }
return 0;
}
+static int ov9640_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if ((unsigned int)index >= ARRAY_SIZE(ov9640_codes))
+ return -EINVAL;
+
+ *code = ov9640_codes[index];
+ return 0;
+}
+
static int ov9640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
a->c.left = 0;
@@ -637,9 +670,6 @@ static int ov9640_video_probe(struct soc_camera_device *icd,
goto err;
}
- icd->formats = ov9640_fmt_lists;
- icd->num_formats = ARRAY_SIZE(ov9640_fmt_lists);
-
/*
* check and show product ID and manufacturer ID
*/
@@ -702,11 +732,12 @@ static struct v4l2_subdev_core_ops ov9640_core_ops = {
};
static struct v4l2_subdev_video_ops ov9640_video_ops = {
- .s_stream = ov9640_s_stream,
- .s_fmt = ov9640_s_fmt,
- .try_fmt = ov9640_try_fmt,
- .cropcap = ov9640_cropcap,
- .g_crop = ov9640_g_crop,
+ .s_stream = ov9640_s_stream,
+ .s_mbus_fmt = ov9640_s_fmt,
+ .try_mbus_fmt = ov9640_try_fmt,
+ .enum_mbus_fmt = ov9640_enum_fmt,
+ .cropcap = ov9640_cropcap,
+ .g_crop = ov9640_g_crop,
};
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 73ec970ca5c..11a2c26399b 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -31,7 +31,7 @@
#include <linux/init.h>
#include <linux/version.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/videodev2.h>
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 6aa48e0ae73..cc8ddb2d238 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -151,17 +151,6 @@ static struct v4l2_format pvr_format [] = {
};
-static const char *get_v4l_name(int v4l_type)
-{
- switch (v4l_type) {
- case VFL_TYPE_GRABBER: return "video";
- case VFL_TYPE_RADIO: return "radio";
- case VFL_TYPE_VBI: return "vbi";
- default: return "?";
- }
-}
-
-
/*
* pvr_ioctl()
*
@@ -891,10 +880,8 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
{
- int num = dip->devbase.num;
struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw;
enum pvr2_config cfg = dip->config;
- int v4l_type = dip->v4l_type;
pvr2_hdw_v4l_store_minor_number(hdw,dip->minor_type,-1);
@@ -906,8 +893,8 @@ static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
are gone. */
video_unregister_device(&dip->devbase);
- printk(KERN_INFO "pvrusb2: unregistered device %s%u [%s]\n",
- get_v4l_name(v4l_type), num,
+ printk(KERN_INFO "pvrusb2: unregistered device %s [%s]\n",
+ video_device_node_name(&dip->devbase),
pvr2_config_get_name(cfg));
}
@@ -1317,8 +1304,8 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
": Failed to register pvrusb2 v4l device\n");
}
- printk(KERN_INFO "pvrusb2: registered device %s%u [%s]\n",
- get_v4l_name(dip->v4l_type), dip->devbase.num,
+ printk(KERN_INFO "pvrusb2: registered device %s [%s]\n",
+ video_device_node_name(&dip->devbase),
pvr2_config_get_name(dip->config));
pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 89b620f6db7..aea7e224cef 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -169,7 +169,6 @@ static struct video_device pwc_template = {
.name = "Philips Webcam", /* Filled in later */
.release = video_device_release,
.fops = &pwc_fops,
- .minor = -1,
};
/***************************************************************************/
@@ -1807,7 +1806,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
goto err_video_release;
}
- PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->num);
+ PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
/* occupy slot */
if (hint < MAX_DEV_HINTS)
@@ -1948,7 +1947,9 @@ MODULE_PARM_DESC(size, "Initial image size. One of sqcif, qsif, qcif, sif, cif,
MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
MODULE_PARM_DESC(fbufs, "Number of internal frame buffers to reserve");
MODULE_PARM_DESC(mbufs, "Number of external (mmap()ed) image buffers");
+#ifdef CONFIG_USB_PWC_DEBUG
MODULE_PARM_DESC(trace, "For debugging purposes");
+#endif
MODULE_PARM_DESC(power_save, "Turn power save feature in camera on or off");
MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)");
MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index 51b683c63b7..294f860ce2b 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -32,6 +32,7 @@
#include <media/v4l2-dev.h>
#include <media/videobuf-dma-sg.h>
#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
#include <linux/videodev2.h>
@@ -183,23 +184,21 @@ struct pxa_cam_dma {
/* buffer for one video frame */
struct pxa_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
-
- const struct soc_camera_data_format *fmt;
-
+ struct videobuf_buffer vb;
+ enum v4l2_mbus_pixelcode code;
/* our descriptor lists for Y, U and V channels */
- struct pxa_cam_dma dmas[3];
-
- int inwork;
-
- enum pxa_camera_active_dma active_dma;
+ struct pxa_cam_dma dmas[3];
+ int inwork;
+ enum pxa_camera_active_dma active_dma;
};
struct pxa_camera_dev {
struct soc_camera_host soc_host;
- /* PXA27x is only supposed to handle one camera on its Quick Capture
+ /*
+ * PXA27x is only supposed to handle one camera on its Quick Capture
* interface. If anyone ever builds hardware to enable more than
- * one camera, they will have to modify this driver too */
+ * one camera, they will have to modify this driver too
+ */
struct soc_camera_device *icd;
struct clk *clk;
@@ -241,11 +240,15 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct soc_camera_device *icd = vq->priv_data;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
- *size = roundup(icd->user_width * icd->user_height *
- ((icd->current_fmt->depth + 7) >> 3), 8);
+ *size = bytes_per_line * icd->user_height;
if (0 == *count)
*count = 32;
@@ -267,8 +270,10 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
&buf->vb, buf->vb.baddr, buf->vb.bsize);
- /* This waits until this buffer is out of danger, i.e., until it is no
- * longer in STATE_QUEUED or STATE_ACTIVE */
+ /*
+ * This waits until this buffer is out of danger, i.e., until it is no
+ * longer in STATE_QUEUED or STATE_ACTIVE
+ */
videobuf_waiton(&buf->vb, 0, 0);
videobuf_dma_unmap(vq, dma);
videobuf_dma_free(dma);
@@ -429,6 +434,11 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
int ret;
int size_y, size_u = 0, size_v = 0;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -437,29 +447,33 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
WARN_ON(!list_empty(&vb->queue));
#ifdef DEBUG
- /* This can be useful if you want to see if we actually fill
- * the buffer with something */
+ /*
+ * This can be useful if you want to see if we actually fill
+ * the buffer with something
+ */
memset((void *)vb->baddr, 0xaa, vb->bsize);
#endif
BUG_ON(NULL == icd->current_fmt);
- /* I think, in buf_prepare you only have to protect global data,
- * the actual buffer is yours */
+ /*
+ * I think, in buf_prepare you only have to protect global data,
+ * the actual buffer is yours
+ */
buf->inwork = 1;
- if (buf->fmt != icd->current_fmt ||
+ if (buf->code != icd->current_fmt->code ||
vb->width != icd->user_width ||
vb->height != icd->user_height ||
vb->field != field) {
- buf->fmt = icd->current_fmt;
+ buf->code = icd->current_fmt->code;
vb->width = icd->user_width;
vb->height = icd->user_height;
vb->field = field;
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3);
+ vb->size = bytes_per_line * vb->height;
if (0 != vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
goto out;
@@ -834,8 +848,10 @@ static void pxa_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct pxa_camera_dev *pcdev = ici->priv;
- /* We must pass NULL as dev pointer, then all pci_* dma operations
- * transform to normal dma_* ones. */
+ /*
+ * We must pass NULL as dev pointer, then all pci_* dma operations
+ * transform to normal dma_* ones.
+ */
videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct pxa_buffer), icd);
@@ -1051,11 +1067,18 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct pxa_camera_dev *pcdev = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
unsigned long dw, bpp;
- u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0;
+ u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top;
+ int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top);
+
+ if (ret < 0)
+ y_skip_top = 0;
- /* Datawidth is now guaranteed to be equal to one of the three values.
- * We fix bit-per-pixel equal to data-width... */
+ /*
+ * Datawidth is now guaranteed to be equal to one of the three values.
+ * We fix bit-per-pixel equal to data-width...
+ */
switch (flags & SOCAM_DATAWIDTH_MASK) {
case SOCAM_DATAWIDTH_10:
dw = 4;
@@ -1066,8 +1089,10 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
bpp = 0x20;
break;
default:
- /* Actually it can only be 8 now,
- * default is just to silence compiler warnings */
+ /*
+ * Actually it can only be 8 now,
+ * default is just to silence compiler warnings
+ */
case SOCAM_DATAWIDTH_8:
dw = 2;
bpp = 0;
@@ -1118,7 +1143,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
cicr2 = 0;
cicr3 = CICR3_LPF_VAL(icd->user_height - 1) |
- CICR3_BFW_VAL(min((unsigned short)255, icd->y_skip_top));
+ CICR3_BFW_VAL(min((u32)255, y_skip_top));
cicr4 |= pcdev->mclk_divisor;
__raw_writel(cicr1, pcdev->base + CICR1);
@@ -1138,9 +1163,15 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct pxa_camera_dev *pcdev = ici->priv;
unsigned long bus_flags, camera_flags, common_flags;
- int ret = test_platform_param(pcdev, icd->buswidth, &bus_flags);
+ const struct soc_mbus_pixelfmt *fmt;
+ int ret;
struct pxa_cam *cam = icd->host_priv;
+ fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
+ if (!fmt)
+ return -EINVAL;
+
+ ret = test_platform_param(pcdev, fmt->bits_per_sample, &bus_flags);
if (ret < 0)
return ret;
@@ -1204,59 +1235,49 @@ static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
return soc_camera_bus_param_compatible(camera_flags, bus_flags) ? 0 : -EINVAL;
}
-static const struct soc_camera_data_format pxa_camera_formats[] = {
+static const struct soc_mbus_pixelfmt pxa_camera_formats[] = {
{
- .name = "Planar YUV422 16 bit",
- .depth = 16,
- .fourcc = V4L2_PIX_FMT_YUV422P,
- .colorspace = V4L2_COLORSPACE_JPEG,
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .name = "Planar YUV422 16 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
},
};
-static bool buswidth_supported(struct soc_camera_device *icd, int depth)
+/* This will be corrected as we get more formats */
+static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- struct pxa_camera_dev *pcdev = ici->priv;
-
- switch (depth) {
- case 8:
- return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8);
- case 9:
- return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9);
- case 10:
- return !!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10);
- }
- return false;
-}
-
-static int required_buswidth(const struct soc_camera_data_format *fmt)
-{
- switch (fmt->fourcc) {
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_VYUY:
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_YVYU:
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_RGB555:
- return 8;
- default:
- return fmt->depth;
- }
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
struct soc_camera_format_xlate *xlate)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
- int formats = 0, buswidth, ret;
+ int formats = 0, ret;
struct pxa_cam *cam;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
- buswidth = required_buswidth(icd->formats + idx);
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
- if (!buswidth_supported(icd, buswidth))
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(dev, "Invalid format code #%d: %d\n", idx, code);
return 0;
+ }
- ret = pxa_camera_try_bus_param(icd, buswidth);
+ /* This also checks support for the requested bits-per-sample */
+ ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
@@ -1270,45 +1291,40 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
cam = icd->host_priv;
}
- switch (icd->formats[idx].fourcc) {
- case V4L2_PIX_FMT_UYVY:
+ switch (code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8_BE:
formats++;
if (xlate) {
- xlate->host_fmt = &pxa_camera_formats[0];
- xlate->cam_fmt = icd->formats + idx;
- xlate->buswidth = buswidth;
+ xlate->host_fmt = &pxa_camera_formats[0];
+ xlate->code = code;
xlate++;
- dev_dbg(dev, "Providing format %s using %s\n",
- pxa_camera_formats[0].name,
- icd->formats[idx].name);
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ pxa_camera_formats[0].name, code);
}
- case V4L2_PIX_FMT_VYUY:
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_YVYU:
- case V4L2_PIX_FMT_RGB565:
- case V4L2_PIX_FMT_RGB555:
- formats++;
- if (xlate) {
- xlate->host_fmt = icd->formats + idx;
- xlate->cam_fmt = icd->formats + idx;
- xlate->buswidth = buswidth;
- xlate++;
+ case V4L2_MBUS_FMT_YVYU8_2X8_BE:
+ case V4L2_MBUS_FMT_YUYV8_2X8_LE:
+ case V4L2_MBUS_FMT_YVYU8_2X8_LE:
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
+ if (xlate)
dev_dbg(dev, "Providing format %s packed\n",
- icd->formats[idx].name);
- }
+ fmt->name);
break;
default:
- /* Generic pass-through */
- formats++;
- if (xlate) {
- xlate->host_fmt = icd->formats + idx;
- xlate->cam_fmt = icd->formats + idx;
- xlate->buswidth = icd->formats[idx].depth;
- xlate++;
+ if (!pxa_camera_packing_supported(fmt))
+ return 0;
+ if (xlate)
dev_dbg(dev,
"Providing format %s in pass-through mode\n",
- icd->formats[idx].name);
- }
+ fmt->name);
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
}
return formats;
@@ -1320,11 +1336,11 @@ static void pxa_camera_put_formats(struct soc_camera_device *icd)
icd->host_priv = NULL;
}
-static int pxa_camera_check_frame(struct v4l2_pix_format *pix)
+static int pxa_camera_check_frame(u32 width, u32 height)
{
/* limit to pxa hardware capabilities */
- return pix->height < 32 || pix->height > 2048 || pix->width < 48 ||
- pix->width > 2048 || (pix->width & 0x01);
+ return height < 32 || height > 2048 || width < 48 || width > 2048 ||
+ (width & 0x01);
}
static int pxa_camera_set_crop(struct soc_camera_device *icd,
@@ -1339,9 +1355,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
.master_clock = pcdev->mclk,
.pixel_clock_max = pcdev->ciclk / 4,
};
- struct v4l2_format f;
- struct v4l2_pix_format *pix = &f.fmt.pix, pix_tmp;
+ struct v4l2_mbus_framefmt mf;
struct pxa_cam *cam = icd->host_priv;
+ u32 fourcc = icd->current_fmt->host_fmt->fourcc;
int ret;
/* If PCLK is used to latch data from the sensor, check sense */
@@ -1358,27 +1374,23 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
return ret;
}
- f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = v4l2_subdev_call(sd, video, g_fmt, &f);
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
- pix_tmp = *pix;
- if (pxa_camera_check_frame(pix)) {
+ if (pxa_camera_check_frame(mf.width, mf.height)) {
/*
* Camera cropping produced a frame beyond our capabilities.
* FIXME: just extract a subframe, that we can process.
*/
- v4l_bound_align_image(&pix->width, 48, 2048, 1,
- &pix->height, 32, 2048, 0,
- icd->current_fmt->fourcc == V4L2_PIX_FMT_YUV422P ?
- 4 : 0);
- ret = v4l2_subdev_call(sd, video, s_fmt, &f);
+ v4l_bound_align_image(&mf.width, 48, 2048, 1,
+ &mf.height, 32, 2048, 0,
+ fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
if (ret < 0)
return ret;
- if (pxa_camera_check_frame(pix)) {
+ if (pxa_camera_check_frame(mf.width, mf.height)) {
dev_warn(icd->dev.parent,
"Inconsistent state. Use S_FMT to repair\n");
return -EINVAL;
@@ -1395,10 +1407,10 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
recalculate_fifo_timeout(pcdev, sense.pixel_clock);
}
- icd->user_width = pix->width;
- icd->user_height = pix->height;
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
- pxa_camera_setup_cicr(icd, cam->flags, icd->current_fmt->fourcc);
+ pxa_camera_setup_cicr(icd, cam->flags, fourcc);
return ret;
}
@@ -1410,14 +1422,13 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
struct pxa_camera_dev *pcdev = ici->priv;
struct device *dev = icd->dev.parent;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- const struct soc_camera_data_format *cam_fmt = NULL;
const struct soc_camera_format_xlate *xlate = NULL;
struct soc_camera_sense sense = {
.master_clock = pcdev->mclk,
.pixel_clock_max = pcdev->ciclk / 4,
};
struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_format cam_f = *f;
+ struct v4l2_mbus_framefmt mf;
int ret;
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
@@ -1426,26 +1437,31 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
return -EINVAL;
}
- cam_fmt = xlate->cam_fmt;
-
/* If PCLK is used to latch data from the sensor, check sense */
if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN)
+ /* The caller holds a mutex. */
icd->sense = &sense;
- cam_f.fmt.pix.pixelformat = cam_fmt->fourcc;
- ret = v4l2_subdev_call(sd, video, s_fmt, &cam_f);
- cam_f.fmt.pix.pixelformat = pix->pixelformat;
- *pix = cam_f.fmt.pix;
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
icd->sense = NULL;
if (ret < 0) {
dev_warn(dev, "Failed to configure for format %x\n",
pix->pixelformat);
- } else if (pxa_camera_check_frame(pix)) {
+ } else if (pxa_camera_check_frame(mf.width, mf.height)) {
dev_warn(dev,
"Camera driver produced an unsupported frame %dx%d\n",
- pix->width, pix->height);
+ mf.width, mf.height);
ret = -EINVAL;
} else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
if (sense.pixel_clock > sense.pixel_clock_max) {
@@ -1457,10 +1473,14 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
recalculate_fifo_timeout(pcdev, sense.pixel_clock);
}
- if (!ret) {
- icd->buswidth = xlate->buswidth;
- icd->current_fmt = xlate->host_fmt;
- }
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
return ret;
}
@@ -1468,17 +1488,16 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
static int pxa_camera_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
__u32 pixfmt = pix->pixelformat;
- enum v4l2_field field;
int ret;
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(ici->v4l2_dev.dev, "Format %x not found\n", pixfmt);
+ dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -1492,22 +1511,36 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
&pix->height, 32, 2048, 0,
pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
- pix->bytesperline = pix->width *
- DIV_ROUND_UP(xlate->host_fmt->depth, 8);
+ pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+ xlate->host_fmt);
+ if (pix->bytesperline < 0)
+ return pix->bytesperline;
pix->sizeimage = pix->height * pix->bytesperline;
- /* camera has to see its format, but the user the original one */
- pix->pixelformat = xlate->cam_fmt->fourcc;
/* limit to sensor capabilities */
- ret = v4l2_subdev_call(sd, video, try_fmt, f);
- pix->pixelformat = pixfmt;
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
- field = pix->field;
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
- if (field == V4L2_FIELD_ANY) {
- pix->field = V4L2_FIELD_NONE;
- } else if (field != V4L2_FIELD_NONE) {
- dev_err(icd->dev.parent, "Field type %d unsupported.\n", field);
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->colorspace = mf.colorspace;
+
+ switch (mf.field) {
+ case V4L2_FIELD_ANY:
+ case V4L2_FIELD_NONE:
+ pix->field = V4L2_FIELD_NONE;
+ break;
+ default:
+ /* TODO: support interlaced at least in pass-through mode */
+ dev_err(icd->dev.parent, "Field type %d unsupported.\n",
+ mf.field);
return -EINVAL;
}
@@ -1519,10 +1552,12 @@ static int pxa_camera_reqbufs(struct soc_camera_file *icf,
{
int i;
- /* This is for locking debugging only. I removed spinlocks and now I
+ /*
+ * This is for locking debugging only. I removed spinlocks and now I
* check whether .prepare is ever called on a linked buffer, or whether
* a dma IRQ can occur for an in-work or unlinked buffer. Until now
- * it hadn't triggered */
+ * it hadn't triggered
+ */
for (i = 0; i < p->count; i++) {
struct pxa_buffer *buf = container_of(icf->vb_vidq.bufs[i],
struct pxa_buffer, vb);
@@ -1657,8 +1692,10 @@ static int __devinit pxa_camera_probe(struct platform_device *pdev)
pcdev->platform_flags = pcdev->pdata->flags;
if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 |
PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) {
- /* Platform hasn't set available data widths. This is bad.
- * Warn and use a default. */
+ /*
+ * Platform hasn't set available data widths. This is bad.
+ * Warn and use a default.
+ */
dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
"data widths, using default 10 bit\n");
pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 373f2a30a67..7e42989ce0e 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -13,9 +13,11 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/rj54n1cb0c.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-chip-ident.h>
-#include <media/soc_camera.h>
#define RJ54N1_DEV_CODE 0x0400
#define RJ54N1_DEV_CODE2 0x0401
@@ -38,6 +40,7 @@
#define RJ54N1_H_OBEN_OFS 0x0413
#define RJ54N1_V_OBEN_OFS 0x0414
#define RJ54N1_RESIZE_CONTROL 0x0415
+#define RJ54N1_STILL_CONTROL 0x0417
#define RJ54N1_INC_USE_SEL_H 0x0425
#define RJ54N1_INC_USE_SEL_L 0x0426
#define RJ54N1_MIRROR_STILL_MODE 0x0427
@@ -49,10 +52,21 @@
#define RJ54N1_RA_SEL_UL 0x0530
#define RJ54N1_BYTE_SWAP 0x0531
#define RJ54N1_OUT_SIGPO 0x053b
+#define RJ54N1_WB_SEL_WEIGHT_I 0x054e
+#define RJ54N1_BIT8_WB 0x0569
+#define RJ54N1_HCAPS_WB 0x056a
+#define RJ54N1_VCAPS_WB 0x056b
+#define RJ54N1_HCAPE_WB 0x056c
+#define RJ54N1_VCAPE_WB 0x056d
+#define RJ54N1_EXPOSURE_CONTROL 0x058c
#define RJ54N1_FRAME_LENGTH_S_H 0x0595
#define RJ54N1_FRAME_LENGTH_S_L 0x0596
#define RJ54N1_FRAME_LENGTH_P_H 0x0597
#define RJ54N1_FRAME_LENGTH_P_L 0x0598
+#define RJ54N1_PEAK_H 0x05b7
+#define RJ54N1_PEAK_50 0x05b8
+#define RJ54N1_PEAK_60 0x05b9
+#define RJ54N1_PEAK_DIFF 0x05ba
#define RJ54N1_IOC 0x05ef
#define RJ54N1_TG_BYPASS 0x0700
#define RJ54N1_PLL_L 0x0701
@@ -68,6 +82,7 @@
#define RJ54N1_OCLK_SEL_EN 0x0713
#define RJ54N1_CLK_RST 0x0717
#define RJ54N1_RESET_STANDBY 0x0718
+#define RJ54N1_FWFLG 0x07fe
#define E_EXCLK (1 << 7)
#define SOFT_STDBY (1 << 4)
@@ -78,29 +93,53 @@
#define RESIZE_HOLD_SEL (1 << 2)
#define RESIZE_GO (1 << 1)
+/*
+ * When cropping, the camera automatically centers the cropped region, there
+ * doesn't seem to be a way to specify an explicit location of the rectangle.
+ */
#define RJ54N1_COLUMN_SKIP 0
#define RJ54N1_ROW_SKIP 0
#define RJ54N1_MAX_WIDTH 1600
#define RJ54N1_MAX_HEIGHT 1200
+#define PLL_L 2
+#define PLL_N 0x31
+
/* I2C addresses: 0x50, 0x51, 0x60, 0x61 */
-static const struct soc_camera_data_format rj54n1_colour_formats[] = {
- {
- .name = "YUYV",
- .depth = 16,
- .fourcc = V4L2_PIX_FMT_YUYV,
- .colorspace = V4L2_COLORSPACE_JPEG,
- }, {
- .name = "RGB565",
- .depth = 16,
- .fourcc = V4L2_PIX_FMT_RGB565,
- .colorspace = V4L2_COLORSPACE_SRGB,
- }
+/* RJ54N1CB0C has only one fixed colorspace per pixelcode */
+struct rj54n1_datafmt {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+/* Find a data format by a pixel code in an array */
+static const struct rj54n1_datafmt *rj54n1_find_datafmt(
+ enum v4l2_mbus_pixelcode code, const struct rj54n1_datafmt *fmt,
+ int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ if (fmt[i].code == code)
+ return fmt + i;
+
+ return NULL;
+}
+
+static const struct rj54n1_datafmt rj54n1_colour_fmts[] = {
+ {V4L2_MBUS_FMT_YUYV8_2X8_LE, V4L2_COLORSPACE_JPEG},
+ {V4L2_MBUS_FMT_YVYU8_2X8_LE, V4L2_COLORSPACE_JPEG},
+ {V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB},
};
struct rj54n1_clock_div {
- u8 ratio_tg;
+ u8 ratio_tg; /* can be 0 or an odd number */
u8 ratio_t;
u8 ratio_r;
u8 ratio_op;
@@ -109,12 +148,14 @@ struct rj54n1_clock_div {
struct rj54n1 {
struct v4l2_subdev subdev;
+ struct rj54n1_clock_div clk_div;
+ const struct rj54n1_datafmt *fmt;
struct v4l2_rect rect; /* Sensor window */
+ unsigned int tgclk_mhz;
+ bool auto_wb;
unsigned short width; /* Output window */
unsigned short height;
unsigned short resize; /* Sensor * 1024 / resize = Output */
- struct rj54n1_clock_div clk_div;
- u32 fourcc;
unsigned short scale;
u8 bank;
};
@@ -171,7 +212,7 @@ const static struct rj54n1_reg_val bank_7[] = {
{0x714, 0xff},
{0x715, 0xff},
{0x716, 0x1f},
- {0x7FE, 0x02},
+ {0x7FE, 2},
};
const static struct rj54n1_reg_val bank_8[] = {
@@ -359,7 +400,7 @@ const static struct rj54n1_reg_val bank_8[] = {
{0x8BB, 0x00},
{0x8BC, 0xFF},
{0x8BD, 0x00},
- {0x8FE, 0x02},
+ {0x8FE, 2},
};
const static struct rj54n1_reg_val bank_10[] = {
@@ -440,12 +481,24 @@ static int reg_write_multiple(struct i2c_client *client,
return 0;
}
-static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
+static int rj54n1_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
{
- /* TODO: start / stop streaming */
+ if ((unsigned int)index >= ARRAY_SIZE(rj54n1_colour_fmts))
+ return -EINVAL;
+
+ *code = rj54n1_colour_fmts[index].code;
return 0;
}
+static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = sd->priv;
+
+ /* Switch between preview and still shot modes */
+ return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80);
+}
+
static int rj54n1_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
@@ -502,6 +555,44 @@ static int rj54n1_commit(struct i2c_client *client)
return ret;
}
+static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
+ u32 *out_w, u32 *out_h);
+
+static int rj54n1_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct i2c_client *client = sd->priv;
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ struct v4l2_rect *rect = &a->c;
+ unsigned int dummy, output_w, output_h,
+ input_w = rect->width, input_h = rect->height;
+ int ret;
+
+ /* arbitrary minimum width and height, edges unimportant */
+ soc_camera_limit_side(&dummy, &input_w,
+ RJ54N1_COLUMN_SKIP, 8, RJ54N1_MAX_WIDTH);
+
+ soc_camera_limit_side(&dummy, &input_h,
+ RJ54N1_ROW_SKIP, 8, RJ54N1_MAX_HEIGHT);
+
+ output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize;
+ output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize;
+
+ dev_dbg(&client->dev, "Scaling for %ux%u : %u = %ux%u\n",
+ input_w, input_h, rj54n1->resize, output_w, output_h);
+
+ ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
+ if (ret < 0)
+ return ret;
+
+ rj54n1->width = output_w;
+ rj54n1->height = output_h;
+ rj54n1->resize = ret;
+ rj54n1->rect.width = input_w;
+ rj54n1->rect.height = input_h;
+
+ return 0;
+}
+
static int rj54n1_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct i2c_client *client = sd->priv;
@@ -527,16 +618,17 @@ static int rj54n1_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-static int rj54n1_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int rj54n1_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
- pix->pixelformat = rj54n1->fourcc;
- pix->field = V4L2_FIELD_NONE;
- pix->width = rj54n1->width;
- pix->height = rj54n1->height;
+ mf->code = rj54n1->fmt->code;
+ mf->colorspace = rj54n1->fmt->colorspace;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = rj54n1->width;
+ mf->height = rj54n1->height;
return 0;
}
@@ -550,11 +642,44 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
u32 *out_w, u32 *out_h)
{
struct i2c_client *client = sd->priv;
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
unsigned int skip, resize, input_w = *in_w, input_h = *in_h,
output_w = *out_w, output_h = *out_h;
- u16 inc_sel;
+ u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom;
+ unsigned int peak, peak_50, peak_60;
int ret;
+ /*
+ * We have a problem with crops, where the window is larger than 512x384
+ * and output window is larger than a half of the input one. In this
+ * case we have to either reduce the input window to equal or below
+ * 512x384 or the output window to equal or below 1/2 of the input.
+ */
+ if (output_w > max(512U, input_w / 2)) {
+ if (2 * output_w > RJ54N1_MAX_WIDTH) {
+ input_w = RJ54N1_MAX_WIDTH;
+ output_w = RJ54N1_MAX_WIDTH / 2;
+ } else {
+ input_w = output_w * 2;
+ }
+
+ dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n",
+ input_w, output_w);
+ }
+
+ if (output_h > max(384U, input_h / 2)) {
+ if (2 * output_h > RJ54N1_MAX_HEIGHT) {
+ input_h = RJ54N1_MAX_HEIGHT;
+ output_h = RJ54N1_MAX_HEIGHT / 2;
+ } else {
+ input_h = output_h * 2;
+ }
+
+ dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n",
+ input_h, output_h);
+ }
+
+ /* Idea: use the read mode for snapshots, handle separate geometries */
ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L,
RJ54N1_Y_OUTPUT_SIZE_S_L,
RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h);
@@ -566,17 +691,27 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
if (ret < 0)
return ret;
- if (output_w > input_w || output_h > input_h) {
+ if (output_w > input_w && output_h > input_h) {
input_w = output_w;
input_h = output_h;
resize = 1024;
} else {
unsigned int resize_x, resize_y;
- resize_x = input_w * 1024 / output_w;
- resize_y = input_h * 1024 / output_h;
-
- resize = min(resize_x, resize_y);
+ resize_x = (input_w * 1024 + output_w / 2) / output_w;
+ resize_y = (input_h * 1024 + output_h / 2) / output_h;
+
+ /* We want max(resize_x, resize_y), check if it still fits */
+ if (resize_x > resize_y &&
+ (output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT)
+ resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) /
+ output_h;
+ else if (resize_y > resize_x &&
+ (output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH)
+ resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) /
+ output_w;
+ else
+ resize = max(resize_x, resize_y);
/* Prohibited value ranges */
switch (resize) {
@@ -589,12 +724,9 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
case 8160 ... 8191:
resize = 8159;
break;
- case 16320 ... 16383:
+ case 16320 ... 16384:
resize = 16319;
}
-
- input_w = output_w * resize / 1024;
- input_h = output_h * resize / 1024;
}
/* Set scaling */
@@ -607,9 +739,18 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
/*
* Configure a skipping bitmask. The sensor will select a skipping value
- * among set bits automatically.
+ * among set bits automatically. This is very unclear in the datasheet
+ * too. I was told, in this register one enables all skipping values,
+ * that are required for a specific resize, and the camera selects
+ * automatically, which ones to use. But it is unclear how to identify,
+ * which cropping values are needed. Secondly, why don't we just set all
+ * bits and let the camera choose? Would it increase processing time and
+ * reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to
+ * improve the image quality or stability for larger frames (see comment
+ * above), but I didn't check the framerate.
*/
skip = min(resize / 1024, (unsigned)15);
+
inc_sel = 1 << skip;
if (inc_sel <= 2)
@@ -621,6 +762,43 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
if (!ret)
ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8);
+ if (!rj54n1->auto_wb) {
+ /* Auto white balance window */
+ wb_left = output_w / 16;
+ wb_right = (3 * output_w / 4 - 3) / 4;
+ wb_top = output_h / 16;
+ wb_bottom = (3 * output_h / 4 - 3) / 4;
+ wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) |
+ ((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1);
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom);
+ }
+
+ /* Antiflicker */
+ peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz /
+ 10000;
+ peak_50 = peak / 6;
+ peak_60 = peak / 5;
+
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_H,
+ ((peak_50 >> 4) & 0xf0) | (peak_60 >> 8));
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_50, peak_50);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_60, peak_60);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150);
+
/* Start resizing */
if (!ret)
ret = reg_write(client, RJ54N1_RESIZE_CONTROL,
@@ -629,8 +807,6 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
if (ret < 0)
return ret;
- dev_dbg(&client->dev, "resize %u, skip %u\n", resize, skip);
-
/* Constant taken from manufacturer's example */
msleep(230);
@@ -638,11 +814,14 @@ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, u32 *in_w, u32 *in_h,
if (ret < 0)
return ret;
- *in_w = input_w;
- *in_h = input_h;
+ *in_w = (output_w * resize + 512) / 1024;
+ *in_h = (output_h * resize + 512) / 1024;
*out_w = output_w;
*out_h = output_h;
+ dev_dbg(&client->dev, "Scaled for %ux%u : %u = %ux%u, skip %u\n",
+ *in_w, *in_h, resize, output_w, output_h, skip);
+
return resize;
}
@@ -653,14 +832,14 @@ static int rj54n1_set_clock(struct i2c_client *client)
/* Enable external clock */
ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY);
- /* Leave stand-by */
+ /* Leave stand-by. Note: use this when implementing suspend / resume */
if (!ret)
ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK);
if (!ret)
- ret = reg_write(client, RJ54N1_PLL_L, 2);
+ ret = reg_write(client, RJ54N1_PLL_L, PLL_L);
if (!ret)
- ret = reg_write(client, RJ54N1_PLL_N, 0x31);
+ ret = reg_write(client, RJ54N1_PLL_N, PLL_N);
/* TGCLK dividers */
if (!ret)
@@ -719,6 +898,7 @@ static int rj54n1_set_clock(struct i2c_client *client)
"Resetting RJ54N1CB0C clock failed: %d!\n", ret);
return -EIO;
}
+
/* Start the PLL */
ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1);
@@ -731,6 +911,7 @@ static int rj54n1_set_clock(struct i2c_client *client)
static int rj54n1_reg_init(struct i2c_client *client)
{
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
int ret = rj54n1_set_clock(client);
if (!ret)
@@ -753,14 +934,26 @@ static int rj54n1_reg_init(struct i2c_client *client)
if (!ret)
ret = reg_write(client, RJ54N1_Y_GAIN, 0x84);
- /* Mirror the image back: default is upside down and left-to-right... */
+ /*
+ * Mirror the image back: default is upside down and left-to-right...
+ * Set manual preview / still shot switching
+ */
if (!ret)
- ret = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 3, 3);
+ ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27);
if (!ret)
ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4));
+
+ /* Auto exposure area */
if (!ret)
+ ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80);
+ /* Check current auto WB config */
+ if (!ret)
+ ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I);
+ if (ret >= 0) {
+ rj54n1->auto_wb = ret & 0x80;
ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5));
+ }
if (!ret)
ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8));
@@ -777,8 +970,9 @@ static int rj54n1_reg_init(struct i2c_client *client)
ret = reg_write(client, RJ54N1_RESET_STANDBY,
E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX);
+ /* Start register update? Same register as 0x?FE in many bank_* sets */
if (!ret)
- ret = reg_write(client, 0x7fe, 2);
+ ret = reg_write(client, RJ54N1_FWFLG, 2);
/* Constant taken from manufacturer's example */
msleep(700);
@@ -786,27 +980,44 @@ static int rj54n1_reg_init(struct i2c_client *client)
return ret;
}
-/* FIXME: streaming output only up to 800x600 is functional */
-static int rj54n1_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int rj54n1_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct i2c_client *client = sd->priv;
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
+ const struct rj54n1_datafmt *fmt;
+ int align = mf->code == V4L2_MBUS_FMT_SBGGR10_1X10 ||
+ mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE ||
+ mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE ||
+ mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE ||
+ mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE;
+
+ dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
+ __func__, mf->code, mf->width, mf->height);
+
+ fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
+ ARRAY_SIZE(rj54n1_colour_fmts));
+ if (!fmt) {
+ fmt = rj54n1->fmt;
+ mf->code = fmt->code;
+ }
- pix->field = V4L2_FIELD_NONE;
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = fmt->colorspace;
- if (pix->width > 800)
- pix->width = 800;
- if (pix->height > 600)
- pix->height = 600;
+ v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align,
+ &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0);
return 0;
}
-static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int rj54n1_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct rj54n1 *rj54n1 = to_rj54n1(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
- unsigned int output_w, output_h,
+ const struct rj54n1_datafmt *fmt;
+ unsigned int output_w, output_h, max_w, max_h,
input_w = rj54n1->rect.width, input_h = rj54n1->rect.height;
int ret;
@@ -814,14 +1025,13 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
* The host driver can call us without .try_fmt(), so, we have to take
* care ourseleves
*/
- ret = rj54n1_try_fmt(sd, f);
+ rj54n1_try_fmt(sd, mf);
/*
* Verify if the sensor has just been powered on. TODO: replace this
* with proper PM, when a suitable API is available.
*/
- if (!ret)
- ret = reg_read(client, RJ54N1_RESET_STANDBY);
+ ret = reg_read(client, RJ54N1_RESET_STANDBY);
if (ret < 0)
return ret;
@@ -831,50 +1041,105 @@ static int rj54n1_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
return ret;
}
+ dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n",
+ __func__, mf->code, mf->width, mf->height);
+
/* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_YUYV:
+ switch (mf->code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8_LE:
ret = reg_write(client, RJ54N1_OUT_SEL, 0);
if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
break;
- case V4L2_PIX_FMT_RGB565:
+ case V4L2_MBUS_FMT_YVYU8_2X8_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ break;
+ case V4L2_MBUS_FMT_RGB565_2X8_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ break;
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
ret = reg_write(client, RJ54N1_OUT_SEL, 0x11);
if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ break;
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
+ break;
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
+ break;
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 0);
+ break;
+ case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 4);
+ if (!ret)
+ ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8);
+ if (!ret)
+ ret = reg_write(client, RJ54N1_RA_SEL_UL, 8);
+ break;
+ case V4L2_MBUS_FMT_SBGGR10_1X10:
+ ret = reg_write(client, RJ54N1_OUT_SEL, 5);
break;
default:
ret = -EINVAL;
}
+ /* Special case: a raw mode with 10 bits of data per clock tick */
+ if (!ret)
+ ret = reg_set(client, RJ54N1_OCLK_SEL_EN,
+ (mf->code == V4L2_MBUS_FMT_SBGGR10_1X10) << 1, 2);
+
if (ret < 0)
return ret;
- /* Supported scales 1:1 - 1:16 */
- if (pix->width < input_w / 16)
- pix->width = input_w / 16;
- if (pix->height < input_h / 16)
- pix->height = input_h / 16;
+ /* Supported scales 1:1 >= scale > 1:16 */
+ max_w = mf->width * (16 * 1024 - 1) / 1024;
+ if (input_w > max_w)
+ input_w = max_w;
+ max_h = mf->height * (16 * 1024 - 1) / 1024;
+ if (input_h > max_h)
+ input_h = max_h;
- output_w = pix->width;
- output_h = pix->height;
+ output_w = mf->width;
+ output_h = mf->height;
ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h);
if (ret < 0)
return ret;
- rj54n1->fourcc = pix->pixelformat;
+ fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts,
+ ARRAY_SIZE(rj54n1_colour_fmts));
+
+ rj54n1->fmt = fmt;
rj54n1->resize = ret;
rj54n1->rect.width = input_w;
rj54n1->rect.height = input_h;
rj54n1->width = output_w;
rj54n1->height = output_h;
- pix->width = output_w;
- pix->height = output_h;
- pix->field = V4L2_FIELD_NONE;
+ mf->width = output_w;
+ mf->height = output_h;
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = fmt->colorspace;
- return ret;
+ return 0;
}
static int rj54n1_g_chip_ident(struct v4l2_subdev *sd,
@@ -963,6 +1228,14 @@ static const struct v4l2_queryctrl rj54n1_controls[] = {
.step = 1,
.default_value = 66,
.flags = V4L2_CTRL_FLAG_SLIDER,
+ }, {
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto white balance",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
},
};
@@ -976,6 +1249,7 @@ static struct soc_camera_ops rj54n1_ops = {
static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct i2c_client *client = sd->priv;
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
int data;
switch (ctrl->id) {
@@ -998,6 +1272,9 @@ static int rj54n1_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
ctrl->value = data / 2;
break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ctrl->value = rj54n1->auto_wb;
+ break;
}
return 0;
@@ -1007,6 +1284,7 @@ static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
int data;
struct i2c_client *client = sd->priv;
+ struct rj54n1 *rj54n1 = to_rj54n1(client);
const struct v4l2_queryctrl *qctrl;
qctrl = soc_camera_find_qctrl(&rj54n1_ops, ctrl->id);
@@ -1037,6 +1315,13 @@ static int rj54n1_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
else if (reg_write(client, RJ54N1_Y_GAIN, ctrl->value * 2) < 0)
return -EIO;
break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ /* Auto WB area - whole image */
+ if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->value << 7,
+ 0x80) < 0)
+ return -EIO;
+ rj54n1->auto_wb = ctrl->value;
+ break;
}
return 0;
@@ -1054,10 +1339,12 @@ static struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = {
static struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = {
.s_stream = rj54n1_s_stream,
- .s_fmt = rj54n1_s_fmt,
- .g_fmt = rj54n1_g_fmt,
- .try_fmt = rj54n1_try_fmt,
+ .s_mbus_fmt = rj54n1_s_fmt,
+ .g_mbus_fmt = rj54n1_g_fmt,
+ .try_mbus_fmt = rj54n1_try_fmt,
+ .enum_mbus_fmt = rj54n1_enum_fmt,
.g_crop = rj54n1_g_crop,
+ .s_crop = rj54n1_s_crop,
.cropcap = rj54n1_cropcap,
};
@@ -1066,21 +1353,13 @@ static struct v4l2_subdev_ops rj54n1_subdev_ops = {
.video = &rj54n1_subdev_video_ops,
};
-static int rj54n1_pin_config(struct i2c_client *client)
-{
- /*
- * Experimentally found out IOCTRL wired to 0. TODO: add to platform
- * data: 0 or 1 << 7.
- */
- return reg_write(client, RJ54N1_IOC, 0);
-}
-
/*
* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one
*/
static int rj54n1_video_probe(struct soc_camera_device *icd,
- struct i2c_client *client)
+ struct i2c_client *client,
+ struct rj54n1_pdata *priv)
{
int data1, data2;
int ret;
@@ -1101,7 +1380,8 @@ static int rj54n1_video_probe(struct soc_camera_device *icd,
goto ei2c;
}
- ret = rj54n1_pin_config(client);
+ /* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */
+ ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7);
if (ret < 0)
goto ei2c;
@@ -1119,6 +1399,7 @@ static int rj54n1_probe(struct i2c_client *client,
struct soc_camera_device *icd = client->dev.platform_data;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct soc_camera_link *icl;
+ struct rj54n1_pdata *rj54n1_priv;
int ret;
if (!icd) {
@@ -1127,11 +1408,13 @@ static int rj54n1_probe(struct i2c_client *client,
}
icl = to_soc_camera_link(icd);
- if (!icl) {
+ if (!icl || !icl->priv) {
dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n");
return -EINVAL;
}
+ rj54n1_priv = icl->priv;
+
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_warn(&adapter->dev,
"I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n");
@@ -1153,10 +1436,12 @@ static int rj54n1_probe(struct i2c_client *client,
rj54n1->rect.height = RJ54N1_MAX_HEIGHT;
rj54n1->width = RJ54N1_MAX_WIDTH;
rj54n1->height = RJ54N1_MAX_HEIGHT;
- rj54n1->fourcc = V4L2_PIX_FMT_YUYV;
+ rj54n1->fmt = &rj54n1_colour_fmts[0];
rj54n1->resize = 1024;
+ rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) /
+ (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1);
- ret = rj54n1_video_probe(icd, client);
+ ret = rj54n1_video_probe(icd, client, rj54n1_priv);
if (ret < 0) {
icd->ops = NULL;
i2c_set_clientdata(client, NULL);
@@ -1164,9 +1449,6 @@ static int rj54n1_probe(struct i2c_client *client,
return ret;
}
- icd->formats = rj54n1_colour_formats;
- icd->num_formats = ARRAY_SIZE(rj54n1_colour_formats);
-
return ret;
}
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 41765f3c7c2..fb742f1ae71 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -233,7 +233,6 @@ struct s2255_dev {
struct s2255_dmaqueue vidq[MAX_CHANNELS];
struct video_device *vdev[MAX_CHANNELS];
- struct list_head s2255_devlist;
struct timer_list timer;
struct s2255_fw *fw_data;
struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS];
@@ -313,8 +312,6 @@ struct s2255_fh {
/* Channels on box are in reverse order */
static unsigned long G_chnmap[MAX_CHANNELS] = {3, 2, 1, 0};
-static LIST_HEAD(s2255_devlist);
-
static int debug;
static int *s2255_debug = &debug;
@@ -1533,32 +1530,24 @@ static int vidioc_s_parm(struct file *file, void *priv,
}
static int s2255_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct s2255_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct s2255_dev *dev = video_drvdata(file);
struct s2255_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
int i = 0;
int cur_channel = -1;
int state;
- dprintk(1, "s2255: open called (minor=%d)\n", minor);
+
+ dprintk(1, "s2255: open called (dev=%s)\n",
+ video_device_node_name(vdev));
lock_kernel();
- list_for_each(list, &s2255_devlist) {
- h = list_entry(list, struct s2255_dev, s2255_devlist);
- for (i = 0; i < MAX_CHANNELS; i++) {
- if (h->vdev[i]->minor == minor) {
- cur_channel = i;
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
- }
- if ((NULL == dev) || (cur_channel == -1)) {
- unlock_kernel();
- printk(KERN_INFO "s2255: openv4l no dev\n");
- return -ENODEV;
+ for (i = 0; i < MAX_CHANNELS; i++) {
+ if (dev->vdev[i] == vdev) {
+ cur_channel = i;
+ break;
+ }
}
if (atomic_read(&dev->fw_data->fw_state) == S2255_FW_DISCONNECTING) {
@@ -1662,8 +1651,9 @@ static int s2255_open(struct file *file)
for (i = 0; i < ARRAY_SIZE(s2255_qctrl); i++)
qctl_regs[i] = s2255_qctrl[i].default_value;
- dprintk(1, "s2255drv: open minor=%d type=%s users=%d\n",
- minor, v4l2_type_names[type], dev->users[cur_channel]);
+ dprintk(1, "s2255drv: open dev=%s type=%s users=%d\n",
+ video_device_node_name(vdev), v4l2_type_names[type],
+ dev->users[cur_channel]);
dprintk(2, "s2255drv: open: fh=0x%08lx, dev=0x%08lx, vidq=0x%08lx\n",
(unsigned long)fh, (unsigned long)dev,
(unsigned long)&dev->vidq[cur_channel]);
@@ -1699,7 +1689,6 @@ static unsigned int s2255_poll(struct file *file,
static void s2255_destroy(struct kref *kref)
{
struct s2255_dev *dev = to_s2255_dev(kref);
- struct list_head *list;
int i;
if (!dev) {
printk(KERN_ERR "s2255drv: kref problem\n");
@@ -1733,10 +1722,6 @@ static void s2255_destroy(struct kref *kref)
usb_put_dev(dev->udev);
dprintk(1, "%s", __func__);
- while (!list_empty(&s2255_devlist)) {
- list = s2255_devlist.next;
- list_del(list);
- }
mutex_unlock(&dev->open_lock);
kfree(dev);
}
@@ -1745,7 +1730,8 @@ static int s2255_close(struct file *file)
{
struct s2255_fh *fh = file->private_data;
struct s2255_dev *dev = fh->dev;
- int minor = video_devdata(file)->minor;
+ struct video_device *vdev = video_devdata(file);
+
if (!dev)
return -ENODEV;
@@ -1765,8 +1751,8 @@ static int s2255_close(struct file *file)
mutex_unlock(&dev->open_lock);
kref_put(&dev->kref, s2255_destroy);
- dprintk(1, "s2255: close called (minor=%d, users=%d)\n",
- minor, dev->users[fh->channel]);
+ dprintk(1, "s2255: close called (dev=%s, users=%d)\n",
+ video_device_node_name(vdev), dev->users[fh->channel]);
kfree(fh);
return 0;
}
@@ -1830,7 +1816,6 @@ static struct video_device template = {
.name = "s2255v",
.fops = &s2255_fops_v4l,
.ioctl_ops = &s2255_ioctl_ops,
- .minor = -1,
.release = video_device_release,
.tvnorms = S2255_NORMS,
.current_norm = V4L2_STD_NTSC_M,
@@ -1843,7 +1828,6 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
int cur_nr = video_nr;
/* initialize all video 4 linux */
- list_add_tail(&dev->s2255_devlist, &s2255_devlist);
/* register 4 video devices */
for (i = 0; i < MAX_CHANNELS; i++) {
INIT_LIST_HEAD(&dev->vidq[i].active);
@@ -1853,6 +1837,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
dev->vdev[i] = video_device_alloc();
memcpy(dev->vdev[i], &template, sizeof(struct video_device));
dev->vdev[i]->parent = &dev->interface->dev;
+ video_set_drvdata(dev->vdev[i], dev);
if (video_nr == -1)
ret = video_register_device(dev->vdev[i],
VFL_TYPE_GRABBER,
@@ -1880,7 +1865,7 @@ static void s2255_exit_v4l(struct s2255_dev *dev)
int i;
for (i = 0; i < MAX_CHANNELS; i++) {
- if (-1 != dev->vdev[i]->minor) {
+ if (video_is_registered(dev->vdev[i])) {
video_unregister_device(dev->vdev[i]);
printk(KERN_INFO "s2255 unregistered\n");
} else {
diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c
index b624a4c01fd..5ab6a0f901c 100644
--- a/drivers/media/video/saa5246a.c
+++ b/drivers/media/video/saa5246a.c
@@ -1036,7 +1036,6 @@ static struct video_device saa_template =
.name = "saa5246a",
.fops = &saa_fops,
.release = video_device_release,
- .minor = -1,
};
static int saa5246a_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 7e40d6d99dd..03f572708b8 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -7211,9 +7211,31 @@ int saa7134_board_init2(struct saa7134_dev *dev)
}
case SAA7134_BOARD_FLYDVB_TRIO:
{
+ u8 temp = 0;
+ int rc;
u8 data[] = { 0x3c, 0x33, 0x62};
struct i2c_msg msg = {.addr=0x09, .flags=0, .buf=data, .len = sizeof(data)};
i2c_transfer(&dev->i2c_adap, &msg, 1);
+
+ /*
+ * send weak up message to pic16C505 chip
+ * @ LifeView FlyDVB Trio
+ */
+ msg.buf = &temp;
+ msg.addr = 0x0b;
+ msg.len = 1;
+ if (1 != i2c_transfer(&dev->i2c_adap, &msg, 1)) {
+ printk(KERN_WARNING "%s: send wake up byte to pic16C505"
+ "(IR chip) failed\n", dev->name);
+ } else {
+ msg.flags = I2C_M_RD;
+ rc = i2c_transfer(&dev->i2c_adap, &msg, 1);
+ printk(KERN_INFO "%s: probe IR chip @ i2c 0x%02x: %s\n",
+ dev->name, msg.addr,
+ (1 == rc) ? "yes" : "no");
+ if (rc == 1)
+ dev->has_remote = SAA7134_REMOTE_I2C;
+ }
break;
}
case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 0ba7f5af0fc..9f85e917f9f 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -797,27 +797,28 @@ static struct video_device *vdev_init(struct saa7134_dev *dev,
vfd->debug = video_debug;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
dev->name, type, saa7134_boards[dev->board].name);
+ video_set_drvdata(vfd, dev);
return vfd;
}
static void saa7134_unregister_video(struct saa7134_dev *dev)
{
if (dev->video_dev) {
- if (-1 != dev->video_dev->minor)
+ if (video_is_registered(dev->video_dev))
video_unregister_device(dev->video_dev);
else
video_device_release(dev->video_dev);
dev->video_dev = NULL;
}
if (dev->vbi_dev) {
- if (-1 != dev->vbi_dev->minor)
+ if (video_is_registered(dev->vbi_dev))
video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->radio_dev) {
- if (-1 != dev->radio_dev->minor)
+ if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
@@ -1046,8 +1047,8 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
dev->name);
goto fail4;
}
- printk(KERN_INFO "%s: registered device video%d [v4l2]\n",
- dev->name, dev->video_dev->num);
+ printk(KERN_INFO "%s: registered device %s [v4l2]\n",
+ dev->name, video_device_node_name(dev->video_dev));
dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
@@ -1055,8 +1056,8 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
vbi_nr[dev->nr]);
if (err < 0)
goto fail4;
- printk(KERN_INFO "%s: registered device vbi%d\n",
- dev->name, dev->vbi_dev->num);
+ printk(KERN_INFO "%s: registered device %s\n",
+ dev->name, video_device_node_name(dev->vbi_dev));
if (card_has_radio(dev)) {
dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
@@ -1064,8 +1065,8 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
radio_nr[dev->nr]);
if (err < 0)
goto fail4;
- printk(KERN_INFO "%s: registered device radio%d\n",
- dev->name, dev->radio_dev->num);
+ printk(KERN_INFO "%s: registered device %s\n",
+ dev->name, video_device_node_name(dev->radio_dev));
}
/* everything worked */
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 296788c3bf0..7dfecfc6017 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -86,19 +86,11 @@ static int ts_init_encoder(struct saa7134_dev* dev)
static int ts_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct saa7134_dev *dev;
+ struct video_device *vdev = video_devdata(file);
+ struct saa7134_dev *dev = video_drvdata(file);
int err;
- lock_kernel();
- list_for_each_entry(dev, &saa7134_devlist, devlist)
- if (dev->empress_dev && dev->empress_dev->minor == minor)
- goto found;
- unlock_kernel();
- return -ENODEV;
- found:
-
- dprintk("open minor=%d\n",minor);
+ dprintk("open dev=%s\n", video_device_node_name(vdev));
err = -EBUSY;
if (!mutex_trylock(&dev->empress_tsq.vb_lock))
goto done;
@@ -489,7 +481,6 @@ static const struct v4l2_ioctl_ops ts_ioctl_ops = {
static struct video_device saa7134_empress_template = {
.name = "saa7134-empress",
.fops = &ts_fops,
- .minor = -1,
.ioctl_ops = &ts_ioctl_ops,
.tvnorms = SAA7134_NORMS,
@@ -531,6 +522,7 @@ static int empress_init(struct saa7134_dev *dev)
INIT_WORK(&dev->empress_workqueue, empress_signal_update);
+ video_set_drvdata(dev->empress_dev, dev);
err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
empress_nr[dev->nr]);
if (err < 0) {
@@ -540,8 +532,8 @@ static int empress_init(struct saa7134_dev *dev)
dev->empress_dev = NULL;
return err;
}
- printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
- dev->name, dev->empress_dev->num);
+ printk(KERN_INFO "%s: registered device %s [mpeg]\n",
+ dev->name, video_device_node_name(dev->empress_dev));
videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops,
&dev->pci->dev, &dev->slock,
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 744918b1cd4..f8e985989ca 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -127,6 +127,61 @@ static int build_key(struct saa7134_dev *dev)
/* --------------------- Chip specific I2C key builders ----------------- */
+static int get_key_flydvb_trio(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
+{
+ int gpio;
+ int attempt = 0;
+ unsigned char b;
+
+ /* We need this to access GPI Used by the saa_readl macro. */
+ struct saa7134_dev *dev = ir->c->adapter->algo_data;
+
+ if (dev == NULL) {
+ dprintk("get_key_flydvb_trio: "
+ "gir->c->adapter->algo_data is NULL!\n");
+ return -EIO;
+ }
+
+ /* rising SAA7134_GPIGPRESCAN reads the status */
+ saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+ saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
+
+ gpio = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2);
+
+ if (0x40000 & ~gpio)
+ return 0; /* No button press */
+
+ /* No button press - only before first key pressed */
+ if (b == 0xFF)
+ return 0;
+
+ /* poll IR chip */
+ /* weak up the IR chip */
+ b = 0;
+
+ while (1 != i2c_master_send(ir->c, &b, 1)) {
+ if ((attempt++) < 10) {
+ /*
+ * wait a bit for next attempt -
+ * I don't know how make it better
+ */
+ msleep(10);
+ continue;
+ }
+ i2cdprintk("send wake up byte to pic16C505 (IR chip)"
+ "failed %dx\n", attempt);
+ return -EIO;
+ }
+ if (1 != i2c_master_recv(ir->c, &b, 1)) {
+ i2cdprintk("read error\n");
+ return -EIO;
+ }
+
+ *ir_key = b;
+ *ir_raw = b;
+ return 1;
+}
+
static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, u32 *ir_key,
u32 *ir_raw)
{
@@ -622,6 +677,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keyup = 0x020000;
polling = 50; /* ms */
break;
+ break;
}
if (NULL == ir_codes) {
printk("%s: Oops: IR config error [card=%d]\n",
@@ -652,7 +708,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
pci_name(dev->pci));
- err = ir_input_init(input_dev, &ir->ir, ir_type, ir_codes);
+ err = ir_input_init(input_dev, &ir->ir, ir_type);
if (err < 0)
goto err_out_free;
@@ -672,7 +728,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
dev->remote = ir;
saa7134_ir_start(dev, ir);
- err = input_register_device(ir->dev);
+ err = ir_input_register(ir->dev, ir_codes);
if (err)
goto err_out_stop;
@@ -686,8 +742,6 @@ int saa7134_input_init1(struct saa7134_dev *dev)
saa7134_ir_stop(dev);
dev->remote = NULL;
err_out_free:
- ir_input_free(input_dev);
- input_free_device(input_dev);
kfree(ir);
return err;
}
@@ -698,8 +752,7 @@ void saa7134_input_fini(struct saa7134_dev *dev)
return;
saa7134_ir_stop(dev);
- ir_input_free(dev->remote->dev);
- input_unregister_device(dev->remote->dev);
+ ir_input_unregister(dev->remote->dev);
kfree(dev->remote);
dev->remote = NULL;
}
@@ -788,6 +841,12 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_CARDBUS_506:
info.addr = 0x40;
break;
+ case SAA7134_BOARD_FLYDVB_TRIO:
+ dev->init_data.name = "FlyDVB Trio";
+ dev->init_data.get_key = get_key_flydvb_trio;
+ dev->init_data.ir_codes = &ir_codes_flydvb_table;
+ info.addr = 0x0b;
+ break;
default:
dprintk("No I2C IR support for board %x\n", dev->board);
return;
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 35f8daa3a35..cb732640ac4 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1326,33 +1326,26 @@ static int saa7134_resource(struct saa7134_fh *fh)
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct saa7134_dev *dev;
+ struct video_device *vdev = video_devdata(file);
+ struct saa7134_dev *dev = video_drvdata(file);
struct saa7134_fh *fh;
- enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ enum v4l2_buf_type type = 0;
int radio = 0;
- mutex_lock(&saa7134_devlist_lock);
- list_for_each_entry(dev, &saa7134_devlist, devlist) {
- if (dev->video_dev && (dev->video_dev->minor == minor))
- goto found;
- if (dev->radio_dev && (dev->radio_dev->minor == minor)) {
- radio = 1;
- goto found;
- }
- if (dev->vbi_dev && (dev->vbi_dev->minor == minor)) {
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- goto found;
- }
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ break;
+ case VFL_TYPE_VBI:
+ type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ break;
+ case VFL_TYPE_RADIO:
+ radio = 1;
+ break;
}
- mutex_unlock(&saa7134_devlist_lock);
- return -ENODEV;
-
-found:
- mutex_unlock(&saa7134_devlist_lock);
- dprintk("open minor=%d radio=%d type=%s\n",minor,radio,
- v4l2_type_names[type]);
+ dprintk("open dev=%s radio=%d type=%s\n", video_device_node_name(vdev),
+ radio, v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
@@ -2502,7 +2495,6 @@ struct video_device saa7134_video_template = {
.name = "saa7134-video",
.fops = &video_fops,
.ioctl_ops = &video_ioctl_ops,
- .minor = -1,
.tvnorms = SAA7134_NORMS,
.current_norm = V4L2_STD_PAL,
};
@@ -2511,7 +2503,6 @@ struct video_device saa7134_radio_template = {
.name = "saa7134-radio",
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
- .minor = -1,
};
int saa7134_video_init1(struct saa7134_dev *dev)
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 85ffc2cba03..41d0166c0f9 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1428,8 +1428,8 @@ static int se401_probe(struct usb_interface *intf,
err("video_register_device failed");
return -EIO;
}
- dev_info(&intf->dev, "registered new video device: video%d\n",
- se401->vdev.num);
+ dev_info(&intf->dev, "registered new video device: %s\n",
+ video_device_node_name(&se401->vdev));
usb_set_intfdata(intf, se401);
return 0;
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index a4f3472d4db..d69363f0d8c 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -38,6 +38,8 @@
#include <media/soc_camera.h>
#include <media/sh_mobile_ceu.h>
#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-mediabus.h>
+#include <media/soc_mediabus.h>
/* register offsets for sh7722 / sh7723 */
@@ -85,7 +87,7 @@
/* per video frame buffer */
struct sh_mobile_ceu_buffer {
struct videobuf_buffer vb; /* v4l buffer must be first */
- const struct soc_camera_data_format *fmt;
+ enum v4l2_mbus_pixelcode code;
};
struct sh_mobile_ceu_dev {
@@ -105,7 +107,8 @@ struct sh_mobile_ceu_dev {
u32 cflcr;
- unsigned int is_interlaced:1;
+ enum v4l2_field field;
+
unsigned int image_mode:1;
unsigned int is_16bit:1;
};
@@ -114,8 +117,8 @@ struct sh_mobile_ceu_cam {
struct v4l2_rect ceu_rect;
unsigned int cam_width;
unsigned int cam_height;
- const struct soc_camera_data_format *extra_fmt;
- const struct soc_camera_data_format *camera_fmt;
+ const struct soc_mbus_pixelfmt *extra_fmt;
+ enum v4l2_mbus_pixelcode code;
};
static unsigned long make_bus_param(struct sh_mobile_ceu_dev *pcdev)
@@ -197,16 +200,19 @@ static int sh_mobile_ceu_videobuf_setup(struct videobuf_queue *vq,
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- int bytes_per_pixel = (icd->current_fmt->depth + 7) >> 3;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
- *size = PAGE_ALIGN(icd->user_width * icd->user_height *
- bytes_per_pixel);
+ *size = bytes_per_line * icd->user_height;
if (0 == *count)
*count = 2;
if (pcdev->video_limit) {
- while (*size * *count > pcdev->video_limit)
+ while (PAGE_ALIGN(*size) * *count > pcdev->video_limit)
(*count)--;
}
@@ -249,10 +255,13 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
{
struct soc_camera_device *icd = pcdev->icd;
dma_addr_t phys_addr_top, phys_addr_bottom;
+ unsigned long top1, top2;
+ unsigned long bottom1, bottom2;
u32 status;
int ret = 0;
- /* The hardware is _very_ picky about this sequence. Especially
+ /*
+ * The hardware is _very_ picky about this sequence. Especially
* the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
* several not-so-well documented interrupt sources in CETCR.
*/
@@ -276,25 +285,36 @@ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
if (!pcdev->active)
return ret;
+ if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
+ top1 = CDBYR;
+ top2 = CDBCR;
+ bottom1 = CDAYR;
+ bottom2 = CDACR;
+ } else {
+ top1 = CDAYR;
+ top2 = CDACR;
+ bottom1 = CDBYR;
+ bottom2 = CDBCR;
+ }
+
phys_addr_top = videobuf_to_dma_contig(pcdev->active);
- ceu_write(pcdev, CDAYR, phys_addr_top);
- if (pcdev->is_interlaced) {
+ ceu_write(pcdev, top1, phys_addr_top);
+ if (V4L2_FIELD_NONE != pcdev->field) {
phys_addr_bottom = phys_addr_top + icd->user_width;
- ceu_write(pcdev, CDBYR, phys_addr_bottom);
+ ceu_write(pcdev, bottom1, phys_addr_bottom);
}
- switch (icd->current_fmt->fourcc) {
+ switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
phys_addr_top += icd->user_width *
icd->user_height;
- ceu_write(pcdev, CDACR, phys_addr_top);
- if (pcdev->is_interlaced) {
- phys_addr_bottom = phys_addr_top +
- icd->user_width;
- ceu_write(pcdev, CDBCR, phys_addr_bottom);
+ ceu_write(pcdev, top2, phys_addr_top);
+ if (V4L2_FIELD_NONE != pcdev->field) {
+ phys_addr_bottom = phys_addr_top + icd->user_width;
+ ceu_write(pcdev, bottom2, phys_addr_bottom);
}
}
@@ -310,8 +330,13 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq,
{
struct soc_camera_device *icd = vq->priv_data;
struct sh_mobile_ceu_buffer *buf;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
int ret;
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
buf = container_of(vb, struct sh_mobile_ceu_buffer, vb);
dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %zd\n", __func__,
@@ -321,25 +346,27 @@ static int sh_mobile_ceu_videobuf_prepare(struct videobuf_queue *vq,
WARN_ON(!list_empty(&vb->queue));
#ifdef DEBUG
- /* This can be useful if you want to see if we actually fill
- * the buffer with something */
+ /*
+ * This can be useful if you want to see if we actually fill
+ * the buffer with something
+ */
memset((void *)vb->baddr, 0xaa, vb->bsize);
#endif
BUG_ON(NULL == icd->current_fmt);
- if (buf->fmt != icd->current_fmt ||
+ if (buf->code != icd->current_fmt->code ||
vb->width != icd->user_width ||
vb->height != icd->user_height ||
vb->field != field) {
- buf->fmt = icd->current_fmt;
+ buf->code = icd->current_fmt->code;
vb->width = icd->user_width;
vb->height = icd->user_height;
vb->field = field;
vb->state = VIDEOBUF_NEEDS_INIT;
}
- vb->size = vb->width * vb->height * ((buf->fmt->depth + 7) >> 3);
+ vb->size = vb->height * bytes_per_line;
if (0 != vb->baddr && vb->bsize < vb->size) {
ret = -EINVAL;
goto out;
@@ -456,6 +483,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ int ret;
if (pcdev->icd)
return -EBUSY;
@@ -466,9 +494,11 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
pm_runtime_get_sync(ici->v4l2_dev.dev);
- pcdev->icd = icd;
+ ret = sh_mobile_ceu_soft_reset(pcdev);
+ if (!ret)
+ pcdev->icd = icd;
- return sh_mobile_ceu_soft_reset(pcdev);
+ return ret;
}
/* Called with .video_lock held */
@@ -558,24 +588,35 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd,
in_width *= 2;
left_offset *= 2;
}
- width = cdwdr_width = out_width;
+ width = out_width;
+ cdwdr_width = out_width;
} else {
- unsigned int w_factor = (icd->current_fmt->depth + 7) >> 3;
+ int bytes_per_line = soc_mbus_bytes_per_line(out_width,
+ icd->current_fmt->host_fmt);
+ unsigned int w_factor;
- width = out_width * w_factor / 2;
+ width = out_width;
- if (!pcdev->is_16bit)
- w_factor *= 2;
+ switch (icd->current_fmt->host_fmt->packing) {
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ w_factor = 2;
+ break;
+ default:
+ w_factor = 1;
+ }
- in_width = rect->width * w_factor / 2;
- left_offset = left_offset * w_factor / 2;
+ in_width = rect->width * w_factor;
+ left_offset = left_offset * w_factor;
- cdwdr_width = width * 2;
+ if (bytes_per_line < 0)
+ cdwdr_width = out_width;
+ else
+ cdwdr_width = bytes_per_line;
}
height = out_height;
in_height = rect->height;
- if (pcdev->is_interlaced) {
+ if (V4L2_FIELD_NONE != pcdev->field) {
height /= 2;
in_height /= 2;
top_offset /= 2;
@@ -646,6 +687,23 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
if (!common_flags)
return -EINVAL;
+ /* Make choises, based on platform preferences */
+ if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
+ if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW)
+ common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
+ if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW)
+ common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
+ }
+
ret = icd->ops->set_bus_param(icd, common_flags);
if (ret < 0)
return ret;
@@ -667,24 +725,24 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
value = 0x00000010; /* data fetch by default */
yuv_lineskip = 0;
- switch (icd->current_fmt->fourcc) {
+ switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */
/* fall-through */
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
- switch (cam->camera_fmt->fourcc) {
- case V4L2_PIX_FMT_UYVY:
+ switch (cam->code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8_BE:
value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
break;
- case V4L2_PIX_FMT_VYUY:
+ case V4L2_MBUS_FMT_YVYU8_2X8_BE:
value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
break;
- case V4L2_PIX_FMT_YUYV:
+ case V4L2_MBUS_FMT_YUYV8_2X8_LE:
value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
break;
- case V4L2_PIX_FMT_YVYU:
+ case V4L2_MBUS_FMT_YVYU8_2X8_LE:
value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
break;
default:
@@ -692,8 +750,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
}
}
- if (icd->current_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
- icd->current_fmt->fourcc == V4L2_PIX_FMT_NV61)
+ if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
+ icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
@@ -702,14 +760,27 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
ceu_write(pcdev, CAMCR, value);
ceu_write(pcdev, CAPCR, 0x00300000);
- ceu_write(pcdev, CAIFR, pcdev->is_interlaced ? 0x101 : 0);
+
+ switch (pcdev->field) {
+ case V4L2_FIELD_INTERLACED_TB:
+ value = 0x101;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ value = 0x102;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ ceu_write(pcdev, CAIFR, value);
sh_mobile_ceu_set_rect(icd, icd->user_width, icd->user_height);
mdelay(1);
ceu_write(pcdev, CFLCR, pcdev->cflcr);
- /* A few words about byte order (observed in Big Endian mode)
+ /*
+ * A few words about byte order (observed in Big Endian mode)
*
* In data fetch mode bytes are received in chunks of 8 bytes.
* D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
@@ -739,7 +810,8 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
return 0;
}
-static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd)
+static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
+ unsigned char buswidth)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -748,48 +820,75 @@ static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd)
camera_flags = icd->ops->query_bus_param(icd);
common_flags = soc_camera_bus_param_compatible(camera_flags,
make_bus_param(pcdev));
- if (!common_flags)
+ if (!common_flags || buswidth > 16 ||
+ (buswidth > 8 && !(common_flags & SOCAM_DATAWIDTH_16)))
return -EINVAL;
return 0;
}
-static const struct soc_camera_data_format sh_mobile_ceu_formats[] = {
- {
- .name = "NV12",
- .depth = 12,
- .fourcc = V4L2_PIX_FMT_NV12,
- .colorspace = V4L2_COLORSPACE_JPEG,
- },
- {
- .name = "NV21",
- .depth = 12,
- .fourcc = V4L2_PIX_FMT_NV21,
- .colorspace = V4L2_COLORSPACE_JPEG,
- },
- {
- .name = "NV16",
- .depth = 16,
- .fourcc = V4L2_PIX_FMT_NV16,
- .colorspace = V4L2_COLORSPACE_JPEG,
- },
+static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
{
- .name = "NV61",
- .depth = 16,
- .fourcc = V4L2_PIX_FMT_NV61,
- .colorspace = V4L2_COLORSPACE_JPEG,
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .name = "NV12",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .name = "NV21",
+ .bits_per_sample = 12,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .name = "NV16",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .name = "NV61",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
},
};
+/* This will be corrected as we get more formats */
+static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
struct soc_camera_format_xlate *xlate)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
int ret, k, n;
int formats = 0;
struct sh_mobile_ceu_cam *cam;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
- ret = sh_mobile_ceu_try_bus_param(icd);
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(icd->dev.parent,
+ "Invalid format code #%d: %d\n", idx, code);
+ return -EINVAL;
+ }
+
+ ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
@@ -807,13 +906,13 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
if (!idx)
cam->extra_fmt = NULL;
- switch (icd->formats[idx].fourcc) {
- case V4L2_PIX_FMT_UYVY:
- case V4L2_PIX_FMT_VYUY:
- case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_YVYU:
+ switch (code) {
+ case V4L2_MBUS_FMT_YUYV8_2X8_BE:
+ case V4L2_MBUS_FMT_YVYU8_2X8_BE:
+ case V4L2_MBUS_FMT_YUYV8_2X8_LE:
+ case V4L2_MBUS_FMT_YVYU8_2X8_LE:
if (cam->extra_fmt)
- goto add_single_format;
+ break;
/*
* Our case is simple so far: for any of the above four camera
@@ -824,32 +923,31 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
* the host_priv pointer and check whether the format you're
* going to add now is already there.
*/
- cam->extra_fmt = (void *)sh_mobile_ceu_formats;
+ cam->extra_fmt = sh_mobile_ceu_formats;
n = ARRAY_SIZE(sh_mobile_ceu_formats);
formats += n;
for (k = 0; xlate && k < n; k++) {
- xlate->host_fmt = &sh_mobile_ceu_formats[k];
- xlate->cam_fmt = icd->formats + idx;
- xlate->buswidth = icd->formats[idx].depth;
+ xlate->host_fmt = &sh_mobile_ceu_formats[k];
+ xlate->code = code;
xlate++;
- dev_dbg(dev, "Providing format %s using %s\n",
- sh_mobile_ceu_formats[k].name,
- icd->formats[idx].name);
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ sh_mobile_ceu_formats[k].name, code);
}
+ break;
default:
-add_single_format:
- /* Generic pass-through */
- formats++;
- if (xlate) {
- xlate->host_fmt = icd->formats + idx;
- xlate->cam_fmt = icd->formats + idx;
- xlate->buswidth = icd->formats[idx].depth;
- xlate++;
- dev_dbg(dev,
- "Providing format %s in pass-through mode\n",
- icd->formats[idx].name);
- }
+ if (!sh_mobile_ceu_packing_supported(fmt))
+ return 0;
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ dev_dbg(dev, "Providing format %s in pass-through mode\n",
+ xlate->host_fmt->name);
}
return formats;
@@ -1029,17 +1127,15 @@ static int client_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *crop,
static int get_camera_scales(struct v4l2_subdev *sd, struct v4l2_rect *rect,
unsigned int *scale_h, unsigned int *scale_v)
{
- struct v4l2_format f;
+ struct v4l2_mbus_framefmt mf;
int ret;
- f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = v4l2_subdev_call(sd, video, g_fmt, &f);
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
- *scale_h = calc_generic_scale(rect->width, f.fmt.pix.width);
- *scale_v = calc_generic_scale(rect->height, f.fmt.pix.height);
+ *scale_h = calc_generic_scale(rect->width, mf.width);
+ *scale_v = calc_generic_scale(rect->height, mf.height);
return 0;
}
@@ -1054,32 +1150,29 @@ static int get_camera_subwin(struct soc_camera_device *icd,
if (!ceu_rect->width) {
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
- struct v4l2_format f;
- struct v4l2_pix_format *pix = &f.fmt.pix;
+ struct v4l2_mbus_framefmt mf;
int ret;
/* First time */
- f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = v4l2_subdev_call(sd, video, g_fmt, &f);
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
- dev_geo(dev, "camera fmt %ux%u\n", pix->width, pix->height);
+ dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
- if (pix->width > 2560) {
+ if (mf.width > 2560) {
ceu_rect->width = 2560;
- ceu_rect->left = (pix->width - 2560) / 2;
+ ceu_rect->left = (mf.width - 2560) / 2;
} else {
- ceu_rect->width = pix->width;
+ ceu_rect->width = mf.width;
ceu_rect->left = 0;
}
- if (pix->height > 1920) {
+ if (mf.height > 1920) {
ceu_rect->height = 1920;
- ceu_rect->top = (pix->height - 1920) / 2;
+ ceu_rect->top = (mf.height - 1920) / 2;
} else {
- ceu_rect->height = pix->height;
+ ceu_rect->height = mf.height;
ceu_rect->top = 0;
}
@@ -1096,13 +1189,12 @@ static int get_camera_subwin(struct soc_camera_device *icd,
return 0;
}
-static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f,
- bool ceu_can_scale)
+static int client_s_fmt(struct soc_camera_device *icd,
+ struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
- struct v4l2_pix_format *pix = &f->fmt.pix;
- unsigned int width = pix->width, height = pix->height, tmp_w, tmp_h;
+ unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
unsigned int max_width, max_height;
struct v4l2_cropcap cap;
int ret;
@@ -1116,29 +1208,29 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f,
max_width = min(cap.bounds.width, 2560);
max_height = min(cap.bounds.height, 1920);
- ret = v4l2_subdev_call(sd, video, s_fmt, f);
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
if (ret < 0)
return ret;
- dev_geo(dev, "camera scaled to %ux%u\n", pix->width, pix->height);
+ dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height);
- if ((width == pix->width && height == pix->height) || !ceu_can_scale)
+ if ((width == mf->width && height == mf->height) || !ceu_can_scale)
return 0;
/* Camera set a format, but geometry is not precise, try to improve */
- tmp_w = pix->width;
- tmp_h = pix->height;
+ tmp_w = mf->width;
+ tmp_h = mf->height;
/* width <= max_width && height <= max_height - guaranteed by try_fmt */
while ((width > tmp_w || height > tmp_h) &&
tmp_w < max_width && tmp_h < max_height) {
tmp_w = min(2 * tmp_w, max_width);
tmp_h = min(2 * tmp_h, max_height);
- pix->width = tmp_w;
- pix->height = tmp_h;
- ret = v4l2_subdev_call(sd, video, s_fmt, f);
+ mf->width = tmp_w;
+ mf->height = tmp_h;
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, mf);
dev_geo(dev, "Camera scaled to %ux%u\n",
- pix->width, pix->height);
+ mf->width, mf->height);
if (ret < 0) {
/* This shouldn't happen */
dev_err(dev, "Client failed to set format: %d\n", ret);
@@ -1156,27 +1248,26 @@ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_format *f,
*/
static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
struct v4l2_rect *sub_rect, struct v4l2_rect *ceu_rect,
- struct v4l2_format *f, bool ceu_can_scale)
+ struct v4l2_mbus_framefmt *mf, bool ceu_can_scale)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct device *dev = icd->dev.parent;
- struct v4l2_format f_tmp = *f;
- struct v4l2_pix_format *pix_tmp = &f_tmp.fmt.pix;
+ struct v4l2_mbus_framefmt mf_tmp = *mf;
unsigned int scale_h, scale_v;
int ret;
/* 5. Apply iterative camera S_FMT for camera user window. */
- ret = client_s_fmt(icd, &f_tmp, ceu_can_scale);
+ ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale);
if (ret < 0)
return ret;
dev_geo(dev, "5: camera scaled to %ux%u\n",
- pix_tmp->width, pix_tmp->height);
+ mf_tmp.width, mf_tmp.height);
/* 6. Retrieve camera output window (g_fmt) */
- /* unneeded - it is already in "f_tmp" */
+ /* unneeded - it is already in "mf_tmp" */
/* 7. Calculate new camera scales. */
ret = get_camera_scales(sd, rect, &scale_h, &scale_v);
@@ -1185,10 +1276,11 @@ static int client_scale(struct soc_camera_device *icd, struct v4l2_rect *rect,
dev_geo(dev, "7: camera scales %u:%u\n", scale_h, scale_v);
- cam->cam_width = pix_tmp->width;
- cam->cam_height = pix_tmp->height;
- f->fmt.pix.width = pix_tmp->width;
- f->fmt.pix.height = pix_tmp->height;
+ cam->cam_width = mf_tmp.width;
+ cam->cam_height = mf_tmp.height;
+ mf->width = mf_tmp.width;
+ mf->height = mf_tmp.height;
+ mf->colorspace = mf_tmp.colorspace;
/*
* 8. Calculate new CEU crop - apply camera scales to previously
@@ -1252,8 +1344,7 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
struct v4l2_rect *cam_rect = &cam_crop.c, *ceu_rect = &cam->ceu_rect;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
- struct v4l2_format f;
- struct v4l2_pix_format *pix = &f.fmt.pix;
+ struct v4l2_mbus_framefmt mf;
unsigned int scale_comb_h, scale_comb_v, scale_ceu_h, scale_ceu_v,
out_width, out_height;
u32 capsr, cflcr;
@@ -1302,26 +1393,25 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
* 5. Using actual input window and calculated combined scales calculate
* camera target output window.
*/
- pix->width = scale_down(cam_rect->width, scale_comb_h);
- pix->height = scale_down(cam_rect->height, scale_comb_v);
+ mf.width = scale_down(cam_rect->width, scale_comb_h);
+ mf.height = scale_down(cam_rect->height, scale_comb_v);
- dev_geo(dev, "5: camera target %ux%u\n", pix->width, pix->height);
+ dev_geo(dev, "5: camera target %ux%u\n", mf.width, mf.height);
/* 6. - 9. */
- pix->pixelformat = cam->camera_fmt->fourcc;
- pix->colorspace = cam->camera_fmt->colorspace;
+ mf.code = cam->code;
+ mf.field = pcdev->field;
capsr = capture_save_reset(pcdev);
dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
/* Make relative to camera rectangle */
- rect->left -= cam_rect->left;
- rect->top -= cam_rect->top;
+ rect->left -= cam_rect->left;
+ rect->top -= cam_rect->top;
- f.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- ret = client_scale(icd, cam_rect, rect, ceu_rect, &f,
- pcdev->image_mode && !pcdev->is_interlaced);
+ ret = client_scale(icd, cam_rect, rect, ceu_rect, &mf,
+ pcdev->image_mode &&
+ V4L2_FIELD_NONE == pcdev->field);
dev_geo(dev, "6-9: %d\n", ret);
@@ -1368,8 +1458,7 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
- struct v4l2_format cam_f = *f;
- struct v4l2_pix_format *cam_pix = &cam_f.fmt.pix;
+ struct v4l2_mbus_framefmt mf;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->dev.parent;
__u32 pixfmt = pix->pixelformat;
@@ -1379,18 +1468,20 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
unsigned int scale_cam_h, scale_cam_v;
u16 scale_v, scale_h;
int ret;
- bool is_interlaced, image_mode;
+ bool image_mode;
+ enum v4l2_field field;
switch (pix->field) {
- case V4L2_FIELD_INTERLACED:
- is_interlaced = true;
- break;
- case V4L2_FIELD_ANY:
default:
pix->field = V4L2_FIELD_NONE;
/* fall-through */
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_NONE:
- is_interlaced = false;
+ field = pix->field;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ field = V4L2_FIELD_INTERLACED_TB;
break;
}
@@ -1438,9 +1529,11 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
* 4. Calculate camera output window by applying combined scales to real
* input window.
*/
- cam_pix->width = scale_down(cam_rect->width, scale_h);
- cam_pix->height = scale_down(cam_rect->height, scale_v);
- cam_pix->pixelformat = xlate->cam_fmt->fourcc;
+ mf.width = scale_down(cam_rect->width, scale_h);
+ mf.height = scale_down(cam_rect->height, scale_v);
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
switch (pixfmt) {
case V4L2_PIX_FMT_NV12:
@@ -1453,51 +1546,61 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
image_mode = false;
}
- dev_geo(dev, "4: camera output %ux%u\n",
- cam_pix->width, cam_pix->height);
+ dev_geo(dev, "4: camera output %ux%u\n", mf.width, mf.height);
/* 5. - 9. */
- ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &cam_f,
- image_mode && !is_interlaced);
+ ret = client_scale(icd, cam_rect, &cam_subrect, &ceu_rect, &mf,
+ image_mode && V4L2_FIELD_NONE == field);
dev_geo(dev, "5-9: client scale %d\n", ret);
/* Done with the camera. Now see if we can improve the result */
dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
- ret, cam_pix->width, cam_pix->height, pix->width, pix->height);
+ ret, mf.width, mf.height, pix->width, pix->height);
if (ret < 0)
return ret;
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
/* 10. Use CEU scaling to scale to the requested user window. */
/* We cannot scale up */
- if (pix->width > cam_pix->width)
- pix->width = cam_pix->width;
+ if (pix->width > mf.width)
+ pix->width = mf.width;
if (pix->width > ceu_rect.width)
pix->width = ceu_rect.width;
- if (pix->height > cam_pix->height)
- pix->height = cam_pix->height;
+ if (pix->height > mf.height)
+ pix->height = mf.height;
if (pix->height > ceu_rect.height)
pix->height = ceu_rect.height;
- /* Let's rock: scale pix->{width x height} down to width x height */
- scale_h = calc_scale(ceu_rect.width, &pix->width);
- scale_v = calc_scale(ceu_rect.height, &pix->height);
+ pix->colorspace = mf.colorspace;
+
+ if (image_mode) {
+ /* Scale pix->{width x height} down to width x height */
+ scale_h = calc_scale(ceu_rect.width, &pix->width);
+ scale_v = calc_scale(ceu_rect.height, &pix->height);
+
+ pcdev->cflcr = scale_h | (scale_v << 16);
+ } else {
+ pix->width = ceu_rect.width;
+ pix->height = ceu_rect.height;
+ scale_h = scale_v = 0;
+ pcdev->cflcr = 0;
+ }
dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
ceu_rect.width, scale_h, pix->width,
ceu_rect.height, scale_v, pix->height);
- pcdev->cflcr = scale_h | (scale_v << 16);
+ cam->code = xlate->code;
+ cam->ceu_rect = ceu_rect;
+ icd->current_fmt = xlate;
- icd->buswidth = xlate->buswidth;
- icd->current_fmt = xlate->host_fmt;
- cam->camera_fmt = xlate->cam_fmt;
- cam->ceu_rect = ceu_rect;
-
- pcdev->is_interlaced = is_interlaced;
+ pcdev->field = field;
pcdev->image_mode = image_mode;
return 0;
@@ -1509,6 +1612,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_framefmt mf;
__u32 pixfmt = pix->pixelformat;
int width, height;
int ret;
@@ -1527,18 +1631,27 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
width = pix->width;
height = pix->height;
- pix->bytesperline = pix->width *
- DIV_ROUND_UP(xlate->host_fmt->depth, 8);
- pix->sizeimage = pix->height * pix->bytesperline;
-
- pix->pixelformat = xlate->cam_fmt->fourcc;
+ pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt);
+ if (pix->bytesperline < 0)
+ return pix->bytesperline;
+ pix->sizeimage = height * pix->bytesperline;
/* limit to sensor capabilities */
- ret = v4l2_subdev_call(sd, video, try_fmt, f);
- pix->pixelformat = pixfmt;
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.code = xlate->code;
+ mf.colorspace = pix->colorspace;
+
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
switch (pixfmt) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
@@ -1547,21 +1660,25 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
/* FIXME: check against rect_max after converting soc-camera */
/* We can scale precisely, need a bigger image from camera */
if (pix->width < width || pix->height < height) {
- int tmp_w = pix->width, tmp_h = pix->height;
- pix->width = 2560;
- pix->height = 1920;
- ret = v4l2_subdev_call(sd, video, try_fmt, f);
+ /*
+ * We presume, the sensor behaves sanely, i.e., if
+ * requested a bigger rectangle, it will not return a
+ * smaller one.
+ */
+ mf.width = 2560;
+ mf.height = 1920;
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
dev_err(icd->dev.parent,
- "FIXME: try_fmt() returned %d\n", ret);
- pix->width = tmp_w;
- pix->height = tmp_h;
+ "FIXME: client try_fmt() = %d\n", ret);
+ return ret;
}
}
- if (pix->width > width)
+ /* We will scale exactly */
+ if (mf.width > width)
pix->width = width;
- if (pix->height > height)
+ if (mf.height > height)
pix->height = height;
}
@@ -1573,10 +1690,12 @@ static int sh_mobile_ceu_reqbufs(struct soc_camera_file *icf,
{
int i;
- /* This is for locking debugging only. I removed spinlocks and now I
+ /*
+ * This is for locking debugging only. I removed spinlocks and now I
* check whether .prepare is ever called on a linked buffer, or whether
* a dma IRQ can occur for an in-work or unlinked buffer. Until now
- * it hadn't triggered */
+ * it hadn't triggered
+ */
for (i = 0; i < p->count; i++) {
struct sh_mobile_ceu_buffer *buf;
@@ -1624,8 +1743,7 @@ static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q,
&sh_mobile_ceu_videobuf_ops,
icd->dev.parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
- pcdev->is_interlaced ?
- V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE,
+ pcdev->field,
sizeof(struct sh_mobile_ceu_buffer),
icd);
}
@@ -1654,7 +1772,7 @@ static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd,
switch (ctrl->id) {
case V4L2_CID_SHARPNESS:
- switch (icd->current_fmt->fourcc) {
+ switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -1825,7 +1943,7 @@ static int sh_mobile_ceu_runtime_nop(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
+static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
.runtime_suspend = sh_mobile_ceu_runtime_nop,
.runtime_resume = sh_mobile_ceu_runtime_nop,
};
@@ -1836,7 +1954,7 @@ static struct platform_driver sh_mobile_ceu_driver = {
.pm = &sh_mobile_ceu_dev_pm_ops,
},
.probe = sh_mobile_ceu_probe,
- .remove = __exit_p(sh_mobile_ceu_remove),
+ .remove = __devexit_p(sh_mobile_ceu_remove),
};
static int __init sh_mobile_ceu_init(void)
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 4a7711c3e74..cbf8087b286 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -1007,8 +1007,8 @@ static int sn9c102_stream_interrupt(struct sn9c102_device* cam)
else if (cam->stream != STREAM_OFF) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "URB timeout reached. The camera is misconfigured. "
- "To use it, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "To use it, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -1734,7 +1734,8 @@ static void sn9c102_release_resources(struct kref *kref)
cam = container_of(kref, struct sn9c102_device, kref);
- DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
+ DBG(2, "V4L2 device %s deregistered",
+ video_device_node_name(cam->v4ldev));
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
usb_put_dev(cam->usbdev);
@@ -1791,8 +1792,8 @@ static int sn9c102_open(struct file *filp)
}
if (cam->users) {
- DBG(2, "Device /dev/video%d is already in use",
- cam->v4ldev->num);
+ DBG(2, "Device %s is already in use",
+ video_device_node_name(cam->v4ldev));
DBG(3, "Simultaneous opens are not supported");
/*
open() must follow the open flags and should block
@@ -1845,7 +1846,7 @@ static int sn9c102_open(struct file *filp)
cam->frame_count = 0;
sn9c102_empty_framequeues(cam);
- DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
+ DBG(3, "Video device %s is open", video_device_node_name(cam->v4ldev));
out:
mutex_unlock(&cam->open_mutex);
@@ -1870,7 +1871,7 @@ static int sn9c102_release(struct file *filp)
cam->users--;
wake_up_interruptible_nr(&cam->wait_open, 1);
- DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
+ DBG(3, "Video device %s closed", video_device_node_name(cam->v4ldev));
kref_put(&cam->kref, sn9c102_release_resources);
@@ -2433,8 +2434,8 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -2446,8 +2447,8 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
nbuffers != sn9c102_request_buffers(cam, nbuffers, cam->io)) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -ENOMEM;
}
@@ -2690,8 +2691,8 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -2702,8 +2703,8 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
nbuffers != sn9c102_request_buffers(cam, nbuffers, cam->io)) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -ENOMEM;
}
@@ -2748,9 +2749,9 @@ sn9c102_vidioc_s_jpegcomp(struct sn9c102_device* cam, void __user * arg)
err += sn9c102_set_compression(cam, &jc);
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
- DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
- "problems. To use the camera, close and open "
- "/dev/video%d again.", cam->v4ldev->num);
+ DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware problems. "
+ "To use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -3328,7 +3329,6 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
strcpy(cam->v4ldev->name, "SN9C1xx PC Camera");
cam->v4ldev->fops = &sn9c102_fops;
- cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
cam->v4ldev->parent = &udev->dev;
@@ -3346,7 +3346,8 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
+ DBG(2, "V4L2 device registered as %s",
+ video_device_node_name(cam->v4ldev));
video_set_drvdata(cam->v4ldev, cam);
cam->module_param.force_munmap = force_munmap[dev_nr];
@@ -3398,9 +3399,9 @@ static void sn9c102_usb_disconnect(struct usb_interface* intf)
DBG(2, "Disconnecting %s...", cam->v4ldev->name);
if (cam->users) {
- DBG(2, "Device /dev/video%d is open! Deregistration and "
- "memory deallocation are deferred.",
- cam->v4ldev->num);
+ DBG(2, "Device %s is open! Deregistration and memory "
+ "deallocation are deferred.",
+ video_device_node_name(cam->v4ldev));
cam->state |= DEV_MISCONFIGURED;
sn9c102_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 95fdeb23c2c..6b3fbcca774 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -31,6 +31,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-dev.h>
#include <media/videobuf-core.h>
+#include <media/soc_mediabus.h>
/* Default to VGA resolution */
#define DEFAULT_WIDTH 640
@@ -40,18 +41,6 @@ static LIST_HEAD(hosts);
static LIST_HEAD(devices);
static DEFINE_MUTEX(list_lock); /* Protects the list of hosts */
-const struct soc_camera_data_format *soc_camera_format_by_fourcc(
- struct soc_camera_device *icd, unsigned int fourcc)
-{
- unsigned int i;
-
- for (i = 0; i < icd->num_formats; i++)
- if (icd->formats[i].fourcc == fourcc)
- return icd->formats + i;
- return NULL;
-}
-EXPORT_SYMBOL(soc_camera_format_by_fourcc);
-
const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
struct soc_camera_device *icd, unsigned int fourcc)
{
@@ -207,21 +196,26 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
/* Always entered with .video_lock held */
static int soc_camera_init_user_formats(struct soc_camera_device *icd)
{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- int i, fmts = 0, ret;
+ int i, fmts = 0, raw_fmts = 0, ret;
+ enum v4l2_mbus_pixelcode code;
+
+ while (!v4l2_subdev_call(sd, video, enum_mbus_fmt, raw_fmts, &code))
+ raw_fmts++;
if (!ici->ops->get_formats)
/*
* Fallback mode - the host will have to serve all
* sensor-provided formats one-to-one to the user
*/
- fmts = icd->num_formats;
+ fmts = raw_fmts;
else
/*
* First pass - only count formats this host-sensor
* configuration can provide
*/
- for (i = 0; i < icd->num_formats; i++) {
+ for (i = 0; i < raw_fmts; i++) {
ret = ici->ops->get_formats(icd, i, NULL);
if (ret < 0)
return ret;
@@ -242,11 +236,12 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
/* Second pass - actually fill data formats */
fmts = 0;
- for (i = 0; i < icd->num_formats; i++)
+ for (i = 0; i < raw_fmts; i++)
if (!ici->ops->get_formats) {
- icd->user_formats[i].host_fmt = icd->formats + i;
- icd->user_formats[i].cam_fmt = icd->formats + i;
- icd->user_formats[i].buswidth = icd->formats[i].depth;
+ v4l2_subdev_call(sd, video, enum_mbus_fmt, i, &code);
+ icd->user_formats[i].host_fmt =
+ soc_mbus_get_fmtdesc(code);
+ icd->user_formats[i].code = code;
} else {
ret = ici->ops->get_formats(icd, i,
&icd->user_formats[fmts]);
@@ -255,7 +250,7 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
fmts += ret;
}
- icd->current_fmt = icd->user_formats[0].host_fmt;
+ icd->current_fmt = &icd->user_formats[0];
return 0;
@@ -281,7 +276,7 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
((x) >> 24) & 0xff
-/* Called with .vb_lock held */
+/* Called with .vb_lock held, or from the first open(2), see comment there */
static int soc_camera_set_fmt(struct soc_camera_file *icf,
struct v4l2_format *f)
{
@@ -302,7 +297,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf,
if (ret < 0) {
return ret;
} else if (!icd->current_fmt ||
- icd->current_fmt->fourcc != pix->pixelformat) {
+ icd->current_fmt->host_fmt->fourcc != pix->pixelformat) {
dev_err(&icd->dev,
"Host driver hasn't set up current format correctly!\n");
return -EINVAL;
@@ -310,6 +305,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf,
icd->user_width = pix->width;
icd->user_height = pix->height;
+ icd->colorspace = pix->colorspace;
icf->vb_vidq.field =
icd->field = pix->field;
@@ -369,8 +365,9 @@ static int soc_camera_open(struct file *file)
.width = icd->user_width,
.height = icd->user_height,
.field = icd->field,
- .pixelformat = icd->current_fmt->fourcc,
- .colorspace = icd->current_fmt->colorspace,
+ .colorspace = icd->colorspace,
+ .pixelformat =
+ icd->current_fmt->host_fmt->fourcc,
},
};
@@ -390,7 +387,12 @@ static int soc_camera_open(struct file *file)
goto eiciadd;
}
- /* Try to configure with default parameters */
+ /*
+ * Try to configure with default parameters. Notice: this is the
+ * very first open, so, we cannot race against other calls,
+ * apart from someone else calling open() simultaneously, but
+ * .video_lock is protecting us against it.
+ */
ret = soc_camera_set_fmt(icf, &f);
if (ret < 0)
goto esfmt;
@@ -534,7 +536,7 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
{
struct soc_camera_file *icf = file->private_data;
struct soc_camera_device *icd = icf->icd;
- const struct soc_camera_data_format *format;
+ const struct soc_mbus_pixelfmt *format;
WARN_ON(priv != file->private_data);
@@ -543,7 +545,8 @@ static int soc_camera_enum_fmt_vid_cap(struct file *file, void *priv,
format = icd->user_formats[f->index].host_fmt;
- strlcpy(f->description, format->name, sizeof(f->description));
+ if (format->name)
+ strlcpy(f->description, format->name, sizeof(f->description));
f->pixelformat = format->fourcc;
return 0;
}
@@ -560,12 +563,15 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
pix->width = icd->user_width;
pix->height = icd->user_height;
pix->field = icf->vb_vidq.field;
- pix->pixelformat = icd->current_fmt->fourcc;
- pix->bytesperline = pix->width *
- DIV_ROUND_UP(icd->current_fmt->depth, 8);
+ pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
+ pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+ icd->current_fmt->host_fmt);
+ pix->colorspace = icd->colorspace;
+ if (pix->bytesperline < 0)
+ return pix->bytesperline;
pix->sizeimage = pix->height * pix->bytesperline;
dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n",
- icd->current_fmt->fourcc);
+ icd->current_fmt->host_fmt->fourcc);
return 0;
}
@@ -621,8 +627,10 @@ static int soc_camera_streamoff(struct file *file, void *priv,
mutex_lock(&icd->video_lock);
- /* This calls buf_release from host driver's videobuf_queue_ops for all
- * remaining buffers. When the last buffer is freed, stop capture */
+ /*
+ * This calls buf_release from host driver's videobuf_queue_ops for all
+ * remaining buffers. When the last buffer is freed, stop capture
+ */
videobuf_streamoff(&icf->vb_vidq);
v4l2_subdev_call(sd, video, s_stream, 0);
@@ -892,7 +900,7 @@ static int soc_camera_probe(struct device *dev)
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct device *control = NULL;
struct v4l2_subdev *sd;
- struct v4l2_format f = {.type = V4L2_BUF_TYPE_VIDEO_CAPTURE};
+ struct v4l2_mbus_framefmt mf;
int ret;
dev_info(dev, "Probing %s\n", dev_name(dev));
@@ -963,9 +971,11 @@ static int soc_camera_probe(struct device *dev)
/* Try to improve our guess of a reasonable window format */
sd = soc_camera_to_subdev(icd);
- if (!v4l2_subdev_call(sd, video, g_fmt, &f)) {
- icd->user_width = f.fmt.pix.width;
- icd->user_height = f.fmt.pix.height;
+ if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) {
+ icd->user_width = mf.width;
+ icd->user_height = mf.height;
+ icd->colorspace = mf.colorspace;
+ icd->field = mf.field;
}
/* Do we have to sysfs_remove_link() before device_unregister()? */
@@ -1004,8 +1014,10 @@ epower:
return ret;
}
-/* This is called on device_unregister, which only means we have to disconnect
- * from the host, but not remove ourselves from the device list */
+/*
+ * This is called on device_unregister, which only means we have to disconnect
+ * from the host, but not remove ourselves from the device list
+ */
static int soc_camera_remove(struct device *dev)
{
struct soc_camera_device *icd = to_soc_camera_dev(dev);
@@ -1205,8 +1217,10 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
}
if (num < 0)
- /* ok, we have 256 cameras on this host...
- * man, stay reasonable... */
+ /*
+ * ok, we have 256 cameras on this host...
+ * man, stay reasonable...
+ */
return -ENOMEM;
icd->devnum = num;
@@ -1268,7 +1282,6 @@ static int video_dev_create(struct soc_camera_device *icd)
vdev->fops = &soc_camera_fops;
vdev->ioctl_ops = &soc_camera_ioctl_ops;
vdev->release = video_device_release;
- vdev->minor = -1;
vdev->tvnorms = V4L2_STD_UNKNOWN;
icd->vdev = vdev;
@@ -1291,8 +1304,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
!icd->ops->set_bus_param)
return -EINVAL;
- ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER,
- icd->vdev->minor);
+ ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
dev_err(&icd->dev, "video_register_device failed: %d\n", ret);
return ret;
@@ -1335,9 +1347,11 @@ escdevreg:
return ret;
}
-/* Only called on rmmod for each platform device, since they are not
+/*
+ * Only called on rmmod for each platform device, since they are not
* hot-pluggable. Now we know, that all our users - hosts and devices have
- * been unloaded already */
+ * been unloaded already
+ */
static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
{
struct soc_camera_device *icd = platform_get_drvdata(pdev);
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index b6a575ce5da..10b003a8be8 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -22,7 +22,6 @@
struct soc_camera_platform_priv {
struct v4l2_subdev subdev;
- struct soc_camera_data_format format;
};
static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev)
@@ -58,36 +57,36 @@ soc_camera_platform_query_bus_param(struct soc_camera_device *icd)
}
static int soc_camera_platform_try_fmt(struct v4l2_subdev *sd,
- struct v4l2_format *f)
+ struct v4l2_mbus_framefmt *mf)
{
struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- struct v4l2_pix_format *pix = &f->fmt.pix;
- pix->width = p->format.width;
- pix->height = p->format.height;
+ mf->width = p->format.width;
+ mf->height = p->format.height;
+ mf->code = p->format.code;
+ mf->colorspace = p->format.colorspace;
+
return 0;
}
-static void soc_camera_platform_video_probe(struct soc_camera_device *icd,
- struct platform_device *pdev)
+static struct v4l2_subdev_core_ops platform_subdev_core_ops;
+
+static int soc_camera_platform_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
{
- struct soc_camera_platform_priv *priv = get_priv(pdev);
- struct soc_camera_platform_info *p = pdev->dev.platform_data;
+ struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd);
- priv->format.name = p->format_name;
- priv->format.depth = p->format_depth;
- priv->format.fourcc = p->format.pixelformat;
- priv->format.colorspace = p->format.colorspace;
+ if (index)
+ return -EINVAL;
- icd->formats = &priv->format;
- icd->num_formats = 1;
+ *code = p->format.code;
+ return 0;
}
-static struct v4l2_subdev_core_ops platform_subdev_core_ops;
-
static struct v4l2_subdev_video_ops platform_subdev_video_ops = {
.s_stream = soc_camera_platform_s_stream,
- .try_fmt = soc_camera_platform_try_fmt,
+ .try_mbus_fmt = soc_camera_platform_try_fmt,
+ .enum_mbus_fmt = soc_camera_platform_enum_fmt,
};
static struct v4l2_subdev_ops platform_subdev_ops = {
@@ -128,13 +127,10 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
/* Set the control device reference */
dev_set_drvdata(&icd->dev, &pdev->dev);
- icd->y_skip_top = 0;
- icd->ops = &soc_camera_platform_ops;
+ icd->ops = &soc_camera_platform_ops;
ici = to_soc_camera_host(icd->dev.parent);
- soc_camera_platform_video_probe(icd, pdev);
-
v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
v4l2_set_subdevdata(&priv->subdev, p);
strncpy(priv->subdev.name, dev_name(&pdev->dev), V4L2_SUBDEV_NAME_SIZE);
diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
new file mode 100644
index 00000000000..f8d5c87dc2a
--- /dev/null
+++ b/drivers/media/video/soc_mediabus.c
@@ -0,0 +1,157 @@
+/*
+ * soc-camera media bus helper routines
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/soc_mediabus.h>
+
+#define MBUS_IDX(f) (V4L2_MBUS_FMT_ ## f - V4L2_MBUS_FMT_FIXED - 1)
+
+static const struct soc_mbus_pixelfmt mbus_fmt[] = {
+ [MBUS_IDX(YUYV8_2X8_LE)] = {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "YUYV",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(YVYU8_2X8_LE)] = {
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .name = "YVYU",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(YUYV8_2X8_BE)] = {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(YVYU8_2X8_BE)] = {
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .name = "VYUY",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(RGB555_2X8_PADHI_LE)] = {
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .name = "RGB555",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(RGB555_2X8_PADHI_BE)] = {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "RGB555X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(RGB565_2X8_LE)] = {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(RGB565_2X8_BE)] = {
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .name = "RGB565X",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(SBGGR8_1X8)] = {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .name = "Bayer 8 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(SBGGR10_1X10)] = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(GREY8_1X8)] = {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .name = "Grey",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(Y10_1X10)] = {
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .name = "Grey 10bit",
+ .bits_per_sample = 10,
+ .packing = SOC_MBUS_PACKING_EXTEND16,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(SBGGR10_2X8_PADHI_LE)] = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(SBGGR10_2X8_PADLO_LE)] = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADLO,
+ .order = SOC_MBUS_ORDER_LE,
+ }, [MBUS_IDX(SBGGR10_2X8_PADHI_BE)] = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_BE,
+ }, [MBUS_IDX(SBGGR10_2X8_PADLO_BE)] = {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .name = "Bayer 10 BGGR",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADLO,
+ .order = SOC_MBUS_ORDER_BE,
+ },
+};
+
+s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
+{
+ switch (mf->packing) {
+ case SOC_MBUS_PACKING_NONE:
+ return width * mf->bits_per_sample / 8;
+ case SOC_MBUS_PACKING_2X8_PADHI:
+ case SOC_MBUS_PACKING_2X8_PADLO:
+ case SOC_MBUS_PACKING_EXTEND16:
+ return width * 2;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(soc_mbus_bytes_per_line);
+
+const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
+ enum v4l2_mbus_pixelcode code)
+{
+ if ((unsigned int)(code - V4L2_MBUS_FMT_FIXED) > ARRAY_SIZE(mbus_fmt))
+ return NULL;
+ return mbus_fmt + code - V4L2_MBUS_FMT_FIXED - 1;
+}
+EXPORT_SYMBOL(soc_mbus_get_fmtdesc);
+
+static int __init soc_mbus_init(void)
+{
+ return 0;
+}
+
+static void __exit soc_mbus_exit(void)
+{
+}
+
+module_init(soc_mbus_init);
+module_exit(soc_mbus_exit);
+
+MODULE_DESCRIPTION("soc-camera media bus interface");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 6b41865f42b..f07a0f6b71c 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1307,7 +1307,6 @@ static void stk_v4l_dev_release(struct video_device *vd)
static struct video_device stk_v4l_data = {
.name = "stkwebcam",
- .minor = -1,
.tvnorms = V4L2_STD_UNKNOWN,
.current_norm = V4L2_STD_UNKNOWN,
.fops = &v4l_stk_fops,
@@ -1327,8 +1326,8 @@ static int stk_register_video_device(struct stk_camera *dev)
if (err)
STK_ERROR("v4l registration failed\n");
else
- STK_INFO("Syntek USB2.0 Camera is now controlling video device"
- " /dev/video%d\n", dev->vdev.num);
+ STK_INFO("Syntek USB2.0 Camera is now controlling device %s\n",
+ video_device_node_name(&dev->vdev));
return err;
}
@@ -1418,8 +1417,8 @@ static void stk_camera_disconnect(struct usb_interface *interface)
wake_up_interruptible(&dev->wait_frame);
stk_remove_sysfs_files(&dev->vdev);
- STK_INFO("Syntek USB2.0 Camera release resources "
- "video device /dev/video%d\n", dev->vdev.num);
+ STK_INFO("Syntek USB2.0 Camera release resources device %s\n",
+ video_device_node_name(&dev->vdev));
video_unregister_device(&dev->vdev);
}
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index eaada39c76f..a057824e7eb 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -1921,7 +1921,6 @@ static const struct v4l2_file_operations saa_fops = {
static struct video_device saa_template = {
.name = "SAA7146A",
.fops = &saa_fops,
- .minor = -1,
.release = video_device_release_empty,
};
@@ -1972,7 +1971,6 @@ static int __devinit configure_saa7146(struct pci_dev *pdev, int num)
saa->id = pdev->device;
saa->irq = pdev->irq;
- saa->video_dev.minor = -1;
saa->saa7146_adr = pci_resource_start(pdev, 0);
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &saa->revision);
@@ -2134,7 +2132,7 @@ static void stradis_release_saa(struct pci_dev *pdev)
free_irq(saa->irq, saa);
if (saa->saa7146_mem)
iounmap(saa->saa7146_mem);
- if (saa->video_dev.minor != -1)
+ if (video_is_registered(&saa->video_dev))
video_unregister_device(&saa->video_dev);
}
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 6a91714125d..5938ad8702e 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1405,7 +1405,6 @@ static struct video_device stv680_template = {
.name = "STV0680 USB camera",
.fops = &stv680_fops,
.release = video_device_release,
- .minor = -1,
};
static int stv680_probe (struct usb_interface *intf, const struct usb_device_id *id)
@@ -1467,8 +1466,8 @@ static int stv680_probe (struct usb_interface *intf, const struct usb_device_id
retval = -EIO;
goto error_vdev;
}
- PDEBUG(0, "STV(i): registered new video device: video%d",
- stv680->vdev->num);
+ PDEBUG(0, "STV(i): registered new video device: %s",
+ video_device_node_name(stv680->vdev));
usb_set_intfdata (intf, stv680);
retval = stv680_create_sysfs_files(stv680->vdev);
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index 269ab044072..5b801a6e1ee 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -29,7 +29,7 @@
#include <media/tw9910.h>
#define GET_ID(val) ((val & 0xF8) >> 3)
-#define GET_ReV(val) (val & 0x07)
+#define GET_REV(val) (val & 0x07)
/*
* register offset
@@ -117,7 +117,7 @@
#define LCTL24 0x68
#define LCTL25 0x69
#define LCTL26 0x6A
-#define HSGEGIN 0x6B
+#define HSBEGIN 0x6B
#define HSEND 0x6C
#define OVSDLY 0x6D
#define OVSEND 0x6E
@@ -152,7 +152,10 @@
/* 1 : non-auto */
#define VSCTL 0x08 /* 1 : Vertical out ctrl by DVALID */
/* 0 : Vertical out ctrl by HACTIVE and DVALID */
-#define OEN 0x04 /* Output Enable together with TRI_SEL. */
+#define OEN_TRI_SEL_MASK 0x07
+#define OEN_TRI_SEL_ALL_ON 0x00 /* Enable output for Rev0/Rev1 */
+#define OEN_TRI_SEL_ALL_OFF_r0 0x06 /* All tri-stated for Rev0 */
+#define OEN_TRI_SEL_ALL_OFF_r1 0x07 /* All tri-stated for Rev1 */
/* OUTCTR1 */
#define VSP_LO 0x00 /* 0 : VS pin output polarity is active low */
@@ -178,11 +181,18 @@
* but all register content remain unchanged.
* This bit is self-resetting.
*/
+#define ACNTL1_PDN_MASK 0x0e
+#define CLK_PDN 0x08 /* system clock power down */
+#define Y_PDN 0x04 /* Luma ADC power down */
+#define C_PDN 0x02 /* Chroma ADC power down */
+
+/* ACNTL2 */
+#define ACNTL2_PDN_MASK 0x40
+#define PLL_PDN 0x40 /* PLL power down */
/* VBICNTL */
-/* RTSEL : control the real time signal
-* output from the MPOUT pin
-*/
+
+/* RTSEL : control the real time signal output from the MPOUT pin */
#define RTSEL_MASK 0x07
#define RTSEL_VLOSS 0x00 /* 0000 = Video loss */
#define RTSEL_HLOCK 0x01 /* 0001 = H-lock */
@@ -226,28 +236,7 @@ struct tw9910_priv {
struct v4l2_subdev subdev;
struct tw9910_video_info *info;
const struct tw9910_scale_ctrl *scale;
-};
-
-/*
- * register settings
- */
-
-#define ENDMARKER { 0xff, 0xff }
-
-static const struct regval_list tw9910_default_regs[] =
-{
- { OPFORM, 0x00 },
- { OUTCTR1, VSP_LO | VSSL_VVALID | HSP_HI | HSSL_HSYNC },
- ENDMARKER,
-};
-
-static const struct soc_camera_data_format tw9910_color_fmt[] = {
- {
- .name = "VYUY",
- .fourcc = V4L2_PIX_FMT_VYUY,
- .depth = 16,
- .colorspace = V4L2_COLORSPACE_SMPTE170M,
- }
+ u32 revision;
};
static const struct tw9910_scale_ctrl tw9910_ntsc_scales[] = {
@@ -340,13 +329,6 @@ static const struct tw9910_scale_ctrl tw9910_pal_scales[] = {
},
};
-static const struct tw9910_cropping_ctrl tw9910_cropping_ctrl = {
- .vdelay = 0x0012,
- .vactive = 0x00F0,
- .hdelay = 0x0010,
- .hactive = 0x02D0,
-};
-
static const struct tw9910_hsync_ctrl tw9910_hsync_ctrl = {
.start = 0x0260,
.end = 0x0300,
@@ -361,6 +343,19 @@ static struct tw9910_priv *to_tw9910(const struct i2c_client *client)
subdev);
}
+static int tw9910_mask_set(struct i2c_client *client, u8 command,
+ u8 mask, u8 set)
+{
+ s32 val = i2c_smbus_read_byte_data(client, command);
+ if (val < 0)
+ return val;
+
+ val &= ~mask;
+ val |= set & mask;
+
+ return i2c_smbus_write_byte_data(client, command, val);
+}
+
static int tw9910_set_scale(struct i2c_client *client,
const struct tw9910_scale_ctrl *scale)
{
@@ -383,47 +378,14 @@ static int tw9910_set_scale(struct i2c_client *client,
return ret;
}
-static int tw9910_set_cropping(struct i2c_client *client,
- const struct tw9910_cropping_ctrl *cropping)
-{
- int ret;
-
- ret = i2c_smbus_write_byte_data(client, CROP_HI,
- (cropping->vdelay & 0x0300) >> 2 |
- (cropping->vactive & 0x0300) >> 4 |
- (cropping->hdelay & 0x0300) >> 6 |
- (cropping->hactive & 0x0300) >> 8);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, VDELAY_LO,
- cropping->vdelay & 0x00FF);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, VACTIVE_LO,
- cropping->vactive & 0x00FF);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, HDELAY_LO,
- cropping->hdelay & 0x00FF);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, HACTIVE_LO,
- cropping->hactive & 0x00FF);
-
- return ret;
-}
-
static int tw9910_set_hsync(struct i2c_client *client,
const struct tw9910_hsync_ctrl *hsync)
{
+ struct tw9910_priv *priv = to_tw9910(client);
int ret;
/* bit 10 - 3 */
- ret = i2c_smbus_write_byte_data(client, HSGEGIN,
+ ret = i2c_smbus_write_byte_data(client, HSBEGIN,
(hsync->start & 0x07F8) >> 3);
if (ret < 0)
return ret;
@@ -434,50 +396,41 @@ static int tw9910_set_hsync(struct i2c_client *client,
if (ret < 0)
return ret;
+ /* So far only revisions 0 and 1 have been seen */
/* bit 2 - 0 */
- ret = i2c_smbus_read_byte_data(client, HSLOWCTL);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, HSLOWCTL,
- (ret & 0x88) |
- (hsync->start & 0x0007) << 4 |
- (hsync->end & 0x0007));
+ if (1 == priv->revision)
+ ret = tw9910_mask_set(client, HSLOWCTL, 0x77,
+ (hsync->start & 0x0007) << 4 |
+ (hsync->end & 0x0007));
return ret;
}
-static int tw9910_write_array(struct i2c_client *client,
- const struct regval_list *vals)
+static void tw9910_reset(struct i2c_client *client)
{
- while (vals->reg_num != 0xff) {
- int ret = i2c_smbus_write_byte_data(client,
- vals->reg_num,
- vals->value);
- if (ret < 0)
- return ret;
- vals++;
- }
- return 0;
+ tw9910_mask_set(client, ACNTL1, SRESET, SRESET);
+ msleep(1);
}
-static int tw9910_mask_set(struct i2c_client *client, u8 command,
- u8 mask, u8 set)
+static int tw9910_power(struct i2c_client *client, int enable)
{
- s32 val = i2c_smbus_read_byte_data(client, command);
- if (val < 0)
- return val;
+ int ret;
+ u8 acntl1;
+ u8 acntl2;
- val &= ~mask;
- val |= set & mask;
+ if (enable) {
+ acntl1 = 0;
+ acntl2 = 0;
+ } else {
+ acntl1 = CLK_PDN | Y_PDN | C_PDN;
+ acntl2 = PLL_PDN;
+ }
- return i2c_smbus_write_byte_data(client, command, val);
-}
+ ret = tw9910_mask_set(client, ACNTL1, ACNTL1_PDN_MASK, acntl1);
+ if (ret < 0)
+ return ret;
-static void tw9910_reset(struct i2c_client *client)
-{
- i2c_smbus_write_byte_data(client, ACNTL1, SRESET);
- msleep(1);
+ return tw9910_mask_set(client, ACNTL2, ACNTL2_PDN_MASK, acntl2);
}
static const struct tw9910_scale_ctrl*
@@ -518,27 +471,62 @@ static int tw9910_s_stream(struct v4l2_subdev *sd, int enable)
{
struct i2c_client *client = sd->priv;
struct tw9910_priv *priv = to_tw9910(client);
+ u8 val;
+ int ret;
- if (!enable)
- return 0;
+ if (!enable) {
+ switch (priv->revision) {
+ case 0:
+ val = OEN_TRI_SEL_ALL_OFF_r0;
+ break;
+ case 1:
+ val = OEN_TRI_SEL_ALL_OFF_r1;
+ break;
+ default:
+ dev_err(&client->dev, "un-supported revision\n");
+ return -EINVAL;
+ }
+ } else {
+ val = OEN_TRI_SEL_ALL_ON;
- if (!priv->scale) {
- dev_err(&client->dev, "norm select error\n");
- return -EPERM;
+ if (!priv->scale) {
+ dev_err(&client->dev, "norm select error\n");
+ return -EPERM;
+ }
+
+ dev_dbg(&client->dev, "%s %dx%d\n",
+ priv->scale->name,
+ priv->scale->width,
+ priv->scale->height);
}
- dev_dbg(&client->dev, "%s %dx%d\n",
- priv->scale->name,
- priv->scale->width,
- priv->scale->height);
+ ret = tw9910_mask_set(client, OPFORM, OEN_TRI_SEL_MASK, val);
+ if (ret < 0)
+ return ret;
- return 0;
+ return tw9910_power(client, enable);
}
static int tw9910_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
- return 0;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct i2c_client *client = sd->priv;
+ u8 val = VSSL_VVALID | HSSL_DVALID;
+
+ /*
+ * set OUTCTR1
+ *
+ * We use VVALID and DVALID signals to control VSYNC and HSYNC
+ * outputs, in this mode their polarity is inverted.
+ */
+ if (flags & SOCAM_HSYNC_ACTIVE_LOW)
+ val |= HSP_HI;
+
+ if (flags & SOCAM_VSYNC_ACTIVE_LOW)
+ val |= VSP_HI;
+
+ return i2c_smbus_write_byte_data(client, OUTCTR1, val);
}
static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd)
@@ -548,6 +536,7 @@ static unsigned long tw9910_query_bus_param(struct soc_camera_device *icd)
struct soc_camera_link *icl = to_soc_camera_link(icd);
unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
+ SOCAM_VSYNC_ACTIVE_LOW | SOCAM_HSYNC_ACTIVE_LOW |
SOCAM_DATA_ACTIVE_HIGH | priv->info->buswidth;
return soc_camera_apply_sensor_flags(icl, flags);
@@ -576,8 +565,11 @@ static int tw9910_enum_input(struct soc_camera_device *icd,
static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
+ struct i2c_client *client = sd->priv;
+ struct tw9910_priv *priv = to_tw9910(client);
+
id->ident = V4L2_IDENT_TW9910;
- id->revision = 0;
+ id->revision = priv->revision;
return 0;
}
@@ -596,7 +588,8 @@ static int tw9910_g_register(struct v4l2_subdev *sd,
if (ret < 0)
return ret;
- /* ret = int
+ /*
+ * ret = int
* reg->val = __u64
*/
reg->val = (__u64)ret;
@@ -637,9 +630,6 @@ static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
* reset hardware
*/
tw9910_reset(client);
- ret = tw9910_write_array(client, tw9910_default_regs);
- if (ret < 0)
- goto tw9910_set_fmt_error;
/*
* set bus width
@@ -688,13 +678,6 @@ static int tw9910_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
goto tw9910_set_fmt_error;
/*
- * set cropping
- */
- ret = tw9910_set_cropping(client, &tw9910_cropping_ctrl);
- if (ret < 0)
- goto tw9910_set_fmt_error;
-
- /*
* set hsync
*/
ret = tw9910_set_hsync(client, &tw9910_hsync_ctrl);
@@ -762,11 +745,11 @@ static int tw9910_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
return 0;
}
-static int tw9910_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int tw9910_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct tw9910_priv *priv = to_tw9910(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
if (!priv->scale) {
int ret;
@@ -783,74 +766,76 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
return ret;
}
- f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
-
- pix->width = priv->scale->width;
- pix->height = priv->scale->height;
- pix->pixelformat = V4L2_PIX_FMT_VYUY;
- pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- pix->field = V4L2_FIELD_INTERLACED;
+ mf->width = priv->scale->width;
+ mf->height = priv->scale->height;
+ mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+ mf->field = V4L2_FIELD_INTERLACED_BT;
return 0;
}
-static int tw9910_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int tw9910_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct tw9910_priv *priv = to_tw9910(client);
- struct v4l2_pix_format *pix = &f->fmt.pix;
/* See tw9910_s_crop() - no proper cropping support */
struct v4l2_crop a = {
.c = {
.left = 0,
.top = 0,
- .width = pix->width,
- .height = pix->height,
+ .width = mf->width,
+ .height = mf->height,
},
};
- int i, ret;
+ int ret;
+
+ WARN_ON(mf->field != V4L2_FIELD_ANY &&
+ mf->field != V4L2_FIELD_INTERLACED_BT);
/*
* check color format
*/
- for (i = 0; i < ARRAY_SIZE(tw9910_color_fmt); i++)
- if (pix->pixelformat == tw9910_color_fmt[i].fourcc)
- break;
-
- if (i == ARRAY_SIZE(tw9910_color_fmt))
+ if (mf->code != V4L2_MBUS_FMT_YUYV8_2X8_BE)
return -EINVAL;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
ret = tw9910_s_crop(sd, &a);
if (!ret) {
- pix->width = priv->scale->width;
- pix->height = priv->scale->height;
+ mf->width = priv->scale->width;
+ mf->height = priv->scale->height;
}
return ret;
}
-static int tw9910_try_fmt(struct v4l2_subdev *sd, struct v4l2_format *f)
+static int tw9910_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = sd->priv;
struct soc_camera_device *icd = client->dev.platform_data;
- struct v4l2_pix_format *pix = &f->fmt.pix;
const struct tw9910_scale_ctrl *scale;
- if (V4L2_FIELD_ANY == pix->field) {
- pix->field = V4L2_FIELD_INTERLACED;
- } else if (V4L2_FIELD_INTERLACED != pix->field) {
- dev_err(&client->dev, "Field type invalid.\n");
+ if (V4L2_FIELD_ANY == mf->field) {
+ mf->field = V4L2_FIELD_INTERLACED_BT;
+ } else if (V4L2_FIELD_INTERLACED_BT != mf->field) {
+ dev_err(&client->dev, "Field type %d invalid.\n", mf->field);
return -EINVAL;
}
+ mf->code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
/*
* select suitable norm
*/
- scale = tw9910_select_norm(icd, pix->width, pix->height);
+ scale = tw9910_select_norm(icd, mf->width, mf->height);
if (!scale)
return -EINVAL;
- pix->width = scale->width;
- pix->height = scale->height;
+ mf->width = scale->width;
+ mf->height = scale->height;
return 0;
}
@@ -859,7 +844,7 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
struct i2c_client *client)
{
struct tw9910_priv *priv = to_tw9910(client);
- s32 val;
+ s32 id;
/*
* We must have a parent by now. And it cannot be a wrong one.
@@ -878,23 +863,24 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
return -ENODEV;
}
- icd->formats = tw9910_color_fmt;
- icd->num_formats = ARRAY_SIZE(tw9910_color_fmt);
-
/*
* check and show Product ID
+ * So far only revisions 0 and 1 have been seen
*/
- val = i2c_smbus_read_byte_data(client, ID);
+ id = i2c_smbus_read_byte_data(client, ID);
+ priv->revision = GET_REV(id);
+ id = GET_ID(id);
- if (0x0B != GET_ID(val) ||
- 0x00 != GET_ReV(val)) {
+ if (0x0B != id ||
+ 0x01 < priv->revision) {
dev_err(&client->dev,
- "Product ID error %x:%x\n", GET_ID(val), GET_ReV(val));
+ "Product ID error %x:%x\n",
+ id, priv->revision);
return -ENODEV;
}
dev_info(&client->dev,
- "tw9910 Product ID %0x:%0x\n", GET_ID(val), GET_ReV(val));
+ "tw9910 Product ID %0x:%0x\n", id, priv->revision);
icd->vdev->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL;
icd->vdev->current_norm = V4L2_STD_NTSC;
@@ -917,14 +903,25 @@ static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
#endif
};
+static int tw9910_enum_fmt(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index)
+ return -EINVAL;
+
+ *code = V4L2_MBUS_FMT_YUYV8_2X8_BE;
+ return 0;
+}
+
static struct v4l2_subdev_video_ops tw9910_subdev_video_ops = {
.s_stream = tw9910_s_stream,
- .g_fmt = tw9910_g_fmt,
- .s_fmt = tw9910_s_fmt,
- .try_fmt = tw9910_try_fmt,
+ .g_mbus_fmt = tw9910_g_fmt,
+ .s_mbus_fmt = tw9910_s_fmt,
+ .try_mbus_fmt = tw9910_try_fmt,
.cropcap = tw9910_cropcap,
.g_crop = tw9910_g_crop,
.s_crop = tw9910_s_crop,
+ .enum_mbus_fmt = tw9910_enum_fmt,
};
static struct v4l2_subdev_ops tw9910_subdev_ops = {
@@ -954,10 +951,10 @@ static int tw9910_probe(struct i2c_client *client,
}
icl = to_soc_camera_link(icd);
- if (!icl)
+ if (!icl || !icl->priv)
return -EINVAL;
- info = container_of(icl, struct tw9910_video_info, link);
+ info = icl->priv;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev,
@@ -975,7 +972,7 @@ static int tw9910_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops);
icd->ops = &tw9910_ops;
- icd->iface = info->link.bus_id;
+ icd->iface = icl->bus_id;
ret = tw9910_video_probe(icd, client);
if (ret) {
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index dea8b321fb4..5ac37c6c431 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -1053,9 +1053,9 @@ int usbvideo_RegisterVideoDevice(struct uvd *uvd)
"%s: video_register_device() successful\n", __func__);
}
- dev_info(&uvd->dev->dev, "%s on /dev/video%d: canvas=%s videosize=%s\n",
+ dev_info(&uvd->dev->dev, "%s on %s: canvas=%s videosize=%s\n",
(uvd->handle != NULL) ? uvd->handle->drvName : "???",
- uvd->vdev.num, tmp2, tmp1);
+ video_device_node_name(&uvd->vdev), tmp2, tmp1);
usb_get_dev(uvd->dev);
return 0;
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 45fce39ec9a..6030410c667 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -796,7 +796,6 @@ static const struct v4l2_file_operations vicam_fops = {
static struct video_device vicam_template = {
.name = "ViCam-based USB Camera",
.fops = &vicam_fops,
- .minor = -1,
.release = video_device_release_empty,
};
@@ -873,8 +872,8 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
return -EIO;
}
- printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",
- cam->vdev.num);
+ printk(KERN_INFO "ViCam webcam driver now controlling device %s\n",
+ video_device_node_name(&cam->vdev));
usb_set_intfdata (intf, cam);
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index c19f51dba2e..0613922997e 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -215,8 +215,8 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
memcpy(&usbvision->i2c_adap, &i2c_adap_template,
sizeof(struct i2c_adapter));
- sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name),
- " #%d", usbvision->vdev->num);
+ sprintf(usbvision->i2c_adap.name, "%s-%d-%s", i2c_adap_template.name,
+ usbvision->dev->bus->busnum, usbvision->dev->devpath);
PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name);
usbvision->i2c_adap.dev.parent = &usbvision->dev->dev;
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index c07b0ac452a..1054546db90 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1328,7 +1328,6 @@ static struct video_device usbvision_video_template = {
.ioctl_ops = &usbvision_ioctl_ops,
.name = "usbvision-video",
.release = video_device_release,
- .minor = -1,
.tvnorms = USBVISION_NORMS,
.current_norm = V4L2_STD_PAL
};
@@ -1362,7 +1361,6 @@ static struct video_device usbvision_radio_template = {
.fops = &usbvision_radio_fops,
.name = "usbvision-radio",
.release = video_device_release,
- .minor = -1,
.ioctl_ops = &usbvision_radio_ioctl_ops,
.tvnorms = USBVISION_NORMS,
@@ -1382,7 +1380,6 @@ static struct video_device usbvision_vbi_template=
.fops = &usbvision_vbi_fops,
.release = video_device_release,
.name = "usbvision-vbi",
- .minor = -1,
};
@@ -1404,7 +1401,6 @@ static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision,
return NULL;
}
*vdev = *vdev_template;
-// vdev->minor = -1;
vdev->v4l2_dev = &usbvision->v4l2_dev;
snprintf(vdev->name, sizeof(vdev->name), "%s", name);
video_set_drvdata(vdev, usbvision);
@@ -1416,9 +1412,9 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
{
// vbi Device:
if (usbvision->vbi) {
- PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]",
- usbvision->vbi->num);
- if (usbvision->vbi->minor != -1) {
+ PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
+ video_device_node_name(usbvision->vbi));
+ if (video_is_registered(usbvision->vbi)) {
video_unregister_device(usbvision->vbi);
} else {
video_device_release(usbvision->vbi);
@@ -1428,9 +1424,9 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
// Radio Device:
if (usbvision->rdev) {
- PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]",
- usbvision->rdev->num);
- if (usbvision->rdev->minor != -1) {
+ PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
+ video_device_node_name(usbvision->rdev));
+ if (video_is_registered(usbvision->rdev)) {
video_unregister_device(usbvision->rdev);
} else {
video_device_release(usbvision->rdev);
@@ -1440,9 +1436,9 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
// Video Device:
if (usbvision->vdev) {
- PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]",
- usbvision->vdev->num);
- if (usbvision->vdev->minor != -1) {
+ PDEBUG(DBG_PROBE, "unregister %s [v4l2]",
+ video_device_node_name(usbvision->vdev));
+ if (video_is_registered(usbvision->vdev)) {
video_unregister_device(usbvision->vdev);
} else {
video_device_release(usbvision->vdev);
@@ -1466,8 +1462,8 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
video_nr)<0) {
goto err_exit;
}
- printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n",
- usbvision->nr, usbvision->vdev->num);
+ printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n",
+ usbvision->nr, video_device_node_name(usbvision->vdev));
// Radio Device:
if (usbvision_device_data[usbvision->DevModel].Radio) {
@@ -1483,8 +1479,8 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
radio_nr)<0) {
goto err_exit;
}
- printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n",
- usbvision->nr, usbvision->rdev->num);
+ printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n",
+ usbvision->nr, video_device_node_name(usbvision->rdev));
}
// vbi Device:
if (usbvision_device_data[usbvision->DevModel].vbi) {
@@ -1499,8 +1495,8 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
vbi_nr)<0) {
goto err_exit;
}
- printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n",
- usbvision->nr, usbvision->vbi->num);
+ printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device %s [v4l2] (Not Working Yet!)\n",
+ usbvision->nr, video_device_node_name(usbvision->vbi));
}
// all done
return 0;
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index c31bc50113b..391cccca7ff 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -1651,7 +1651,6 @@ static int uvc_register_video(struct uvc_device *dev,
* get another one.
*/
vdev->parent = &dev->intf->dev;
- vdev->minor = -1;
vdev->fops = &uvc_fops;
vdev->release = uvc_release;
strlcpy(vdev->name, dev->name, sizeof vdev->name);
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 05139a4f14f..9a9802830d4 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -145,7 +145,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
uvc_warn_once(stream->dev, UVC_WARN_MINMAX, "UVC non "
"compliance - GET_MIN/MAX(PROBE) incorrectly "
"supported. Enabling workaround.\n");
- memset(ctrl, 0, sizeof ctrl);
+ memset(ctrl, 0, sizeof *ctrl);
ctrl->wCompQuality = le16_to_cpup((__le16 *)data);
ret = 0;
goto out;
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index e8e5affbabc..36b5cb86fb5 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -1024,3 +1024,50 @@ void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
}
}
EXPORT_SYMBOL_GPL(v4l_bound_align_image);
+
+/**
+ * v4l_fill_dv_preset_info - fill description of a digital video preset
+ * @preset - preset value
+ * @info - pointer to struct v4l2_dv_enum_preset
+ *
+ * drivers can use this helper function to fill description of dv preset
+ * in info.
+ */
+int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info)
+{
+ static const struct v4l2_dv_preset_info {
+ u16 width;
+ u16 height;
+ const char *name;
+ } dv_presets[] = {
+ { 0, 0, "Invalid" }, /* V4L2_DV_INVALID */
+ { 720, 480, "480p@59.94" }, /* V4L2_DV_480P59_94 */
+ { 720, 576, "576p@50" }, /* V4L2_DV_576P50 */
+ { 1280, 720, "720p@24" }, /* V4L2_DV_720P24 */
+ { 1280, 720, "720p@25" }, /* V4L2_DV_720P25 */
+ { 1280, 720, "720p@30" }, /* V4L2_DV_720P30 */
+ { 1280, 720, "720p@50" }, /* V4L2_DV_720P50 */
+ { 1280, 720, "720p@59.94" }, /* V4L2_DV_720P59_94 */
+ { 1280, 720, "720p@60" }, /* V4L2_DV_720P60 */
+ { 1920, 1080, "1080i@29.97" }, /* V4L2_DV_1080I29_97 */
+ { 1920, 1080, "1080i@30" }, /* V4L2_DV_1080I30 */
+ { 1920, 1080, "1080i@25" }, /* V4L2_DV_1080I25 */
+ { 1920, 1080, "1080i@50" }, /* V4L2_DV_1080I50 */
+ { 1920, 1080, "1080i@60" }, /* V4L2_DV_1080I60 */
+ { 1920, 1080, "1080p@24" }, /* V4L2_DV_1080P24 */
+ { 1920, 1080, "1080p@25" }, /* V4L2_DV_1080P25 */
+ { 1920, 1080, "1080p@30" }, /* V4L2_DV_1080P30 */
+ { 1920, 1080, "1080p@50" }, /* V4L2_DV_1080P50 */
+ { 1920, 1080, "1080p@60" }, /* V4L2_DV_1080P60 */
+ };
+
+ if (info == NULL || preset >= ARRAY_SIZE(dv_presets))
+ return -EINVAL;
+
+ info->preset = preset;
+ info->width = dv_presets[preset].width;
+ info->height = dv_presets[preset].height;
+ strlcpy(info->name, dv_presets[preset].name, sizeof(info->name));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l_fill_dv_preset_info);
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 997975d5e02..c4150bd2633 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -1077,6 +1077,12 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_DBG_G_REGISTER:
case VIDIOC_DBG_G_CHIP_IDENT:
case VIDIOC_S_HW_FREQ_SEEK:
+ case VIDIOC_ENUM_DV_PRESETS:
+ case VIDIOC_S_DV_PRESET:
+ case VIDIOC_G_DV_PRESET:
+ case VIDIOC_QUERY_DV_PRESET:
+ case VIDIOC_S_DV_TIMINGS:
+ case VIDIOC_G_DV_TIMINGS:
ret = do_video_ioctl(file, cmd, arg);
break;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 500cbe9891a..70906991606 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -189,7 +189,7 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
if (!vdev->fops->read)
return -EINVAL;
- if (video_is_unregistered(vdev))
+ if (!video_is_registered(vdev))
return -EIO;
return vdev->fops->read(filp, buf, sz, off);
}
@@ -201,7 +201,7 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
if (!vdev->fops->write)
return -EINVAL;
- if (video_is_unregistered(vdev))
+ if (!video_is_registered(vdev))
return -EIO;
return vdev->fops->write(filp, buf, sz, off);
}
@@ -210,7 +210,7 @@ static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
- if (!vdev->fops->poll || video_is_unregistered(vdev))
+ if (!vdev->fops->poll || !video_is_registered(vdev))
return DEFAULT_POLLMASK;
return vdev->fops->poll(filp, poll);
}
@@ -250,7 +250,7 @@ static unsigned long v4l2_get_unmapped_area(struct file *filp,
if (!vdev->fops->get_unmapped_area)
return -ENOSYS;
- if (video_is_unregistered(vdev))
+ if (!video_is_registered(vdev))
return -ENODEV;
return vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
}
@@ -260,8 +260,7 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
{
struct video_device *vdev = video_devdata(filp);
- if (!vdev->fops->mmap ||
- video_is_unregistered(vdev))
+ if (!vdev->fops->mmap || !video_is_registered(vdev))
return -ENODEV;
return vdev->fops->mmap(filp, vm);
}
@@ -277,7 +276,7 @@ static int v4l2_open(struct inode *inode, struct file *filp)
vdev = video_devdata(filp);
/* return ENODEV if the video device has been removed
already or if it is not registered anymore. */
- if (vdev == NULL || video_is_unregistered(vdev)) {
+ if (vdev == NULL || !video_is_registered(vdev)) {
mutex_unlock(&videodev_lock);
return -ENODEV;
}
@@ -551,10 +550,11 @@ static int __video_register_device(struct video_device *vdev, int type, int nr,
vdev->dev.release = v4l2_device_release;
if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
- printk(KERN_WARNING "%s: requested %s%d, got %s%d\n",
- __func__, name_base, nr, name_base, vdev->num);
+ printk(KERN_WARNING "%s: requested %s%d, got %s\n", __func__,
+ name_base, nr, video_device_node_name(vdev));
/* Part 5: Activate this minor. The char device can now be used. */
+ set_bit(V4L2_FL_REGISTERED, &vdev->flags);
mutex_lock(&videodev_lock);
video_device[vdev->minor] = vdev;
mutex_unlock(&videodev_lock);
@@ -593,11 +593,11 @@ EXPORT_SYMBOL(video_register_device_no_warn);
void video_unregister_device(struct video_device *vdev)
{
/* Check if vdev was ever registered at all */
- if (!vdev || vdev->minor < 0)
+ if (!vdev || !video_is_registered(vdev))
return;
mutex_lock(&videodev_lock);
- set_bit(V4L2_FL_UNREGISTERED, &vdev->flags);
+ clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
mutex_unlock(&videodev_lock);
device_unregister(&vdev->dev);
}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 30cc3347ae5..4b11257c318 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -284,6 +284,12 @@ static const char *v4l2_ioctls[] = {
[_IOC_NR(VIDIOC_DBG_G_CHIP_IDENT)] = "VIDIOC_DBG_G_CHIP_IDENT",
[_IOC_NR(VIDIOC_S_HW_FREQ_SEEK)] = "VIDIOC_S_HW_FREQ_SEEK",
#endif
+ [_IOC_NR(VIDIOC_ENUM_DV_PRESETS)] = "VIDIOC_ENUM_DV_PRESETS",
+ [_IOC_NR(VIDIOC_S_DV_PRESET)] = "VIDIOC_S_DV_PRESET",
+ [_IOC_NR(VIDIOC_G_DV_PRESET)] = "VIDIOC_G_DV_PRESET",
+ [_IOC_NR(VIDIOC_QUERY_DV_PRESET)] = "VIDIOC_QUERY_DV_PRESET",
+ [_IOC_NR(VIDIOC_S_DV_TIMINGS)] = "VIDIOC_S_DV_TIMINGS",
+ [_IOC_NR(VIDIOC_G_DV_TIMINGS)] = "VIDIOC_G_DV_TIMINGS",
};
#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
@@ -1135,6 +1141,19 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_input *p = arg;
+ /*
+ * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
+ * CAP_STD here based on ioctl handler provided by the
+ * driver. If the driver doesn't support these
+ * for a specific input, it must override these flags.
+ */
+ if (ops->vidioc_s_std)
+ p->capabilities |= V4L2_IN_CAP_STD;
+ if (ops->vidioc_s_dv_preset)
+ p->capabilities |= V4L2_IN_CAP_PRESETS;
+ if (ops->vidioc_s_dv_timings)
+ p->capabilities |= V4L2_IN_CAP_CUSTOM_TIMINGS;
+
if (!ops->vidioc_enum_input)
break;
@@ -1179,6 +1198,19 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_enum_output)
break;
+ /*
+ * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
+ * CAP_STD here based on ioctl handler provided by the
+ * driver. If the driver doesn't support these
+ * for a specific output, it must override these flags.
+ */
+ if (ops->vidioc_s_std)
+ p->capabilities |= V4L2_OUT_CAP_STD;
+ if (ops->vidioc_s_dv_preset)
+ p->capabilities |= V4L2_OUT_CAP_PRESETS;
+ if (ops->vidioc_s_dv_timings)
+ p->capabilities |= V4L2_OUT_CAP_CUSTOM_TIMINGS;
+
ret = ops->vidioc_enum_output(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1794,6 +1826,121 @@ static long __video_do_ioctl(struct file *file,
}
break;
}
+ case VIDIOC_ENUM_DV_PRESETS:
+ {
+ struct v4l2_dv_enum_preset *p = arg;
+
+ if (!ops->vidioc_enum_dv_presets)
+ break;
+
+ ret = ops->vidioc_enum_dv_presets(file, fh, p);
+ if (!ret)
+ dbgarg(cmd,
+ "index=%d, preset=%d, name=%s, width=%d,"
+ " height=%d ",
+ p->index, p->preset, p->name, p->width,
+ p->height);
+ break;
+ }
+ case VIDIOC_S_DV_PRESET:
+ {
+ struct v4l2_dv_preset *p = arg;
+
+ if (!ops->vidioc_s_dv_preset)
+ break;
+
+ dbgarg(cmd, "preset=%d\n", p->preset);
+ ret = ops->vidioc_s_dv_preset(file, fh, p);
+ break;
+ }
+ case VIDIOC_G_DV_PRESET:
+ {
+ struct v4l2_dv_preset *p = arg;
+
+ if (!ops->vidioc_g_dv_preset)
+ break;
+
+ ret = ops->vidioc_g_dv_preset(file, fh, p);
+ if (!ret)
+ dbgarg(cmd, "preset=%d\n", p->preset);
+ break;
+ }
+ case VIDIOC_QUERY_DV_PRESET:
+ {
+ struct v4l2_dv_preset *p = arg;
+
+ if (!ops->vidioc_query_dv_preset)
+ break;
+
+ ret = ops->vidioc_query_dv_preset(file, fh, p);
+ if (!ret)
+ dbgarg(cmd, "preset=%d\n", p->preset);
+ break;
+ }
+ case VIDIOC_S_DV_TIMINGS:
+ {
+ struct v4l2_dv_timings *p = arg;
+
+ if (!ops->vidioc_s_dv_timings)
+ break;
+
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ dbgarg2("bt-656/1120:interlaced=%d, pixelclock=%lld,"
+ " width=%d, height=%d, polarities=%x,"
+ " hfrontporch=%d, hsync=%d, hbackporch=%d,"
+ " vfrontporch=%d, vsync=%d, vbackporch=%d,"
+ " il_vfrontporch=%d, il_vsync=%d,"
+ " il_vbackporch=%d\n",
+ p->bt.interlaced, p->bt.pixelclock,
+ p->bt.width, p->bt.height, p->bt.polarities,
+ p->bt.hfrontporch, p->bt.hsync,
+ p->bt.hbackporch, p->bt.vfrontporch,
+ p->bt.vsync, p->bt.vbackporch,
+ p->bt.il_vfrontporch, p->bt.il_vsync,
+ p->bt.il_vbackporch);
+ ret = ops->vidioc_s_dv_timings(file, fh, p);
+ break;
+ default:
+ dbgarg2("Unknown type %d!\n", p->type);
+ break;
+ }
+ break;
+ }
+ case VIDIOC_G_DV_TIMINGS:
+ {
+ struct v4l2_dv_timings *p = arg;
+
+ if (!ops->vidioc_g_dv_timings)
+ break;
+
+ ret = ops->vidioc_g_dv_timings(file, fh, p);
+ if (!ret) {
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ dbgarg2("bt-656/1120:interlaced=%d,"
+ " pixelclock=%lld,"
+ " width=%d, height=%d, polarities=%x,"
+ " hfrontporch=%d, hsync=%d,"
+ " hbackporch=%d, vfrontporch=%d,"
+ " vsync=%d, vbackporch=%d,"
+ " il_vfrontporch=%d, il_vsync=%d,"
+ " il_vbackporch=%d\n",
+ p->bt.interlaced, p->bt.pixelclock,
+ p->bt.width, p->bt.height,
+ p->bt.polarities, p->bt.hfrontporch,
+ p->bt.hsync, p->bt.hbackporch,
+ p->bt.vfrontporch, p->bt.vsync,
+ p->bt.vbackporch, p->bt.il_vfrontporch,
+ p->bt.il_vsync, p->bt.il_vbackporch);
+ break;
+ default:
+ dbgarg2("Unknown type %d!\n", p->type);
+ break;
+ }
+ }
+ break;
+ }
default:
{
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index d25f28461da..22c01097e8a 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -141,9 +141,11 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
struct vm_area_struct *vma;
unsigned long prev_pfn, this_pfn;
unsigned long pages_done, user_address;
+ unsigned int offset;
int ret;
- mem->size = PAGE_ALIGN(vb->size);
+ offset = vb->baddr & ~PAGE_MASK;
+ mem->size = PAGE_ALIGN(vb->size + offset);
mem->is_userptr = 0;
ret = -EINVAL;
@@ -166,7 +168,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
break;
if (pages_done == 0)
- mem->dma_handle = this_pfn << PAGE_SHIFT;
+ mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
else if (this_pfn != (prev_pfn + 1))
ret = -EFAULT;
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index b034a81d2b1..a15d1e7cbed 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -4068,7 +4068,6 @@ static struct video_device vdev_template = {
.fops = &vino_fops,
.ioctl_ops = &vino_ioctl_ops,
.tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
- .minor = -1,
};
static void vino_module_cleanup(int stage)
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 7705fc6baf0..37632a06496 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1148,7 +1148,8 @@ static int vivi_open(struct file *file)
return -EBUSY;
}
- dprintk(dev, 1, "open /dev/video%d type=%s users=%d\n", dev->vfd->num,
+ dprintk(dev, 1, "open %s type=%s users=%d\n",
+ video_device_node_name(dev->vfd),
v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
/* allocate + initialize per filehandle data */
@@ -1221,8 +1222,7 @@ static int vivi_close(struct file *file)
struct vivi_fh *fh = file->private_data;
struct vivi_dev *dev = fh->dev;
struct vivi_dmaqueue *vidq = &dev->vidq;
-
- int minor = video_devdata(file)->minor;
+ struct video_device *vdev = video_devdata(file);
vivi_stop_thread(vidq);
videobuf_stop(&fh->vb_vidq);
@@ -1234,8 +1234,8 @@ static int vivi_close(struct file *file)
dev->users--;
mutex_unlock(&dev->mutex);
- dprintk(dev, 1, "close called (minor=%d, users=%d)\n",
- minor, dev->users);
+ dprintk(dev, 1, "close called (dev=%s, users=%d)\n",
+ video_device_node_name(vdev), dev->users);
return 0;
}
@@ -1296,7 +1296,6 @@ static struct video_device vivi_template = {
.name = "vivi",
.fops = &vivi_fops,
.ioctl_ops = &vivi_ioctl_ops,
- .minor = -1,
.release = video_device_release,
.tvnorms = V4L2_STD_525_60,
@@ -1317,8 +1316,8 @@ static int vivi_release(void)
list_del(list);
dev = list_entry(list, struct vivi_dev, vivi_devlist);
- v4l2_info(&dev->v4l2_dev, "unregistering /dev/video%d\n",
- dev->vfd->num);
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(dev->vfd));
video_unregister_device(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
@@ -1372,15 +1371,12 @@ static int __init vivi_create_instance(int inst)
/* Now that everything is fine, let's add it to device list */
list_add_tail(&dev->vivi_devlist, &vivi_devlist);
- snprintf(vfd->name, sizeof(vfd->name), "%s (%i)",
- vivi_template.name, vfd->num);
-
if (video_nr >= 0)
video_nr++;
dev->vfd = vfd;
- v4l2_info(&dev->v4l2_dev, "V4L2 device registered as /dev/video%d\n",
- vfd->num);
+ v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
+ video_device_node_name(vfd));
return 0;
rel_vdev:
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index 37fcdc447db..d807eea9175 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -2323,9 +2323,9 @@ static int w9968cf_sensor_init(struct w9968cf_device* cam)
error:
cam->sensor_initialized = 0;
cam->sensor = CC_UNKNOWN;
- DBG(1, "Image sensor initialization failed for %s (/dev/video%d). "
+ DBG(1, "Image sensor initialization failed for %s (%s). "
"Try to detach and attach this device again",
- symbolic(camlist, cam->id), cam->v4ldev->num)
+ symbolic(camlist, cam->id), video_device_node_name(cam->v4ldev))
return err;
}
@@ -2571,7 +2571,8 @@ static void w9968cf_release_resources(struct w9968cf_device* cam)
{
mutex_lock(&w9968cf_devlist_mutex);
- DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->num)
+ DBG(2, "V4L device deregistered: %s",
+ video_device_node_name(cam->v4ldev))
video_unregister_device(cam->v4ldev);
list_del(&cam->v4llist);
@@ -2605,17 +2606,19 @@ static int w9968cf_open(struct file *filp)
if (cam->sensor == CC_UNKNOWN) {
DBG(2, "No supported image sensor has been detected by the "
- "'ovcamchip' module for the %s (/dev/video%d). Make "
- "sure it is loaded *before* (re)connecting the camera.",
- symbolic(camlist, cam->id), cam->v4ldev->num)
+ "'ovcamchip' module for the %s (%s). Make sure "
+ "it is loaded *before* (re)connecting the camera.",
+ symbolic(camlist, cam->id),
+ video_device_node_name(cam->v4ldev))
mutex_unlock(&cam->dev_mutex);
up_read(&w9968cf_disconnect);
return -ENODEV;
}
if (cam->users) {
- DBG(2, "%s (/dev/video%d) has been already occupied by '%s'",
- symbolic(camlist, cam->id), cam->v4ldev->num, cam->command)
+ DBG(2, "%s (%s) has been already occupied by '%s'",
+ symbolic(camlist, cam->id),
+ video_device_node_name(cam->v4ldev), cam->command)
if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) {
mutex_unlock(&cam->dev_mutex);
up_read(&w9968cf_disconnect);
@@ -2636,8 +2639,8 @@ static int w9968cf_open(struct file *filp)
mutex_lock(&cam->dev_mutex);
}
- DBG(5, "Opening '%s', /dev/video%d ...",
- symbolic(camlist, cam->id), cam->v4ldev->num)
+ DBG(5, "Opening '%s', %s ...",
+ symbolic(camlist, cam->id), video_device_node_name(cam->v4ldev))
cam->streaming = 0;
cam->misconfigured = 0;
@@ -2874,8 +2877,7 @@ static long w9968cf_v4l_ioctl(struct file *filp,
.minwidth = cam->minwidth,
.minheight = cam->minheight,
};
- sprintf(cap.name, "W996[87]CF USB Camera #%d",
- cam->v4ldev->num);
+ sprintf(cap.name, "W996[87]CF USB Camera");
cap.maxwidth = (cam->upscaling && w9968cf_vpp)
? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
: cam->maxwidth;
@@ -3485,7 +3487,6 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
strcpy(cam->v4ldev->name, symbolic(camlist, mod_id));
cam->v4ldev->fops = &w9968cf_fops;
- cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
video_set_drvdata(cam->v4ldev, cam);
cam->v4ldev->v4l2_dev = &cam->v4l2_dev;
@@ -3501,7 +3502,8 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->num)
+ DBG(2, "V4L device registered as %s",
+ video_device_node_name(cam->v4ldev))
/* Set some basic constants */
w9968cf_configure_camera(cam, udev, mod_id, dev_nr);
@@ -3557,10 +3559,10 @@ static void w9968cf_usb_disconnect(struct usb_interface* intf)
wake_up_interruptible_all(&cam->open);
if (cam->users) {
- DBG(2, "The device is open (/dev/video%d)! "
+ DBG(2, "The device is open (%s)! "
"Process name: %s. Deregistration and memory "
"deallocation are deferred on close.",
- cam->v4ldev->num, cam->command)
+ video_device_node_name(cam->v4ldev), cam->command)
cam->misconfigured = 1;
w9968cf_stop_transfer(cam);
wake_up_interruptible(&cam->wait_queue);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 312a71336fd..e44e4b5f3e5 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -538,8 +538,8 @@ static int zc0301_stream_interrupt(struct zc0301_device* cam)
else if (cam->stream != STREAM_OFF) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "URB timeout reached. The camera is misconfigured. To "
- "use it, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use it, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -640,7 +640,8 @@ static void zc0301_release_resources(struct kref *kref)
{
struct zc0301_device *cam = container_of(kref, struct zc0301_device,
kref);
- DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
+ DBG(2, "V4L2 device %s deregistered",
+ video_device_node_name(cam->v4ldev));
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
usb_put_dev(cam->usbdev);
@@ -679,7 +680,8 @@ static int zc0301_open(struct file *filp)
}
if (cam->users) {
- DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->num);
+ DBG(2, "Device %s is busy...",
+ video_device_node_name(cam->v4ldev));
DBG(3, "Simultaneous opens are not supported");
if ((filp->f_flags & O_NONBLOCK) ||
(filp->f_flags & O_NDELAY)) {
@@ -722,7 +724,8 @@ static int zc0301_open(struct file *filp)
cam->frame_count = 0;
zc0301_empty_framequeues(cam);
- DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
+ DBG(3, "Video device %s is open",
+ video_device_node_name(cam->v4ldev));
out:
mutex_unlock(&cam->open_mutex);
@@ -746,7 +749,8 @@ static int zc0301_release(struct file *filp)
cam->users--;
wake_up_interruptible_nr(&cam->wait_open, 1);
- DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
+ DBG(3, "Video device %s closed",
+ video_device_node_name(cam->v4ldev));
kref_put(&cam->kref, zc0301_release_resources);
@@ -1276,8 +1280,8 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -1289,8 +1293,8 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
nbuffers != zc0301_request_buffers(cam, nbuffers, cam->io)) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -ENOMEM;
}
@@ -1471,8 +1475,8 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -1483,8 +1487,8 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
nbuffers != zc0301_request_buffers(cam, nbuffers, cam->io)) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
- "use the camera, close and open /dev/video%d again.",
- cam->v4ldev->num);
+ "use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -ENOMEM;
}
@@ -1530,8 +1534,8 @@ zc0301_vidioc_s_jpegcomp(struct zc0301_device* cam, void __user * arg)
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
- "problems. To use the camera, close and open "
- "/dev/video%d again.", cam->v4ldev->num);
+ "problems. To use the camera, close and open %s again.",
+ video_device_node_name(cam->v4ldev));
return -EIO;
}
@@ -1984,7 +1988,6 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
strcpy(cam->v4ldev->name, "ZC0301[P] PC Camera");
cam->v4ldev->fops = &zc0301_fops;
- cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
cam->v4ldev->parent = &udev->dev;
video_set_drvdata(cam->v4ldev, cam);
@@ -2003,7 +2006,8 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
goto fail;
}
- DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
+ DBG(2, "V4L2 device registered as %s",
+ video_device_node_name(cam->v4ldev));
cam->module_param.force_munmap = force_munmap[dev_nr];
cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2040,9 +2044,9 @@ static void zc0301_usb_disconnect(struct usb_interface* intf)
DBG(2, "Disconnecting %s...", cam->v4ldev->name);
if (cam->users) {
- DBG(2, "Device /dev/video%d is open! Deregistration and "
+ DBG(2, "Device %s is open! Deregistration and "
"memory deallocation are deferred.",
- cam->v4ldev->num);
+ video_device_node_name(cam->v4ldev));
cam->state |= DEV_MISCONFIGURED;
zc0301_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index e9f72ca458f..2ddffed019e 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -3387,6 +3387,5 @@ struct video_device zoran_template __devinitdata = {
.ioctl_ops = &zoran_ioctl_ops,
.release = &zoran_vdev_release,
.tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
- .minor = -1
};
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 2ef110b5221..f0eae83e3d8 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -1455,7 +1455,6 @@ static struct video_device zr364xx_template = {
.fops = &zr364xx_fops,
.ioctl_ops = &zr364xx_ioctl_ops,
.release = video_device_release,
- .minor = -1,
};
@@ -1635,8 +1634,8 @@ static int zr364xx_probe(struct usb_interface *intf,
spin_lock_init(&cam->slock);
- dev_info(&udev->dev, DRIVER_DESC " controlling video device %d\n",
- cam->vdev->num);
+ dev_info(&udev->dev, DRIVER_DESC " controlling device %s\n",
+ video_device_node_name(cam->vdev));
return 0;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 610e914abe6..85bc6a685e3 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1587,7 +1587,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
{
u8 __iomem *mem;
int ii;
- unsigned long mem_phys;
+ resource_size_t mem_phys;
unsigned long port;
u32 msize;
u32 psize;
@@ -1677,8 +1677,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
return -EINVAL;
}
ioc->memmap = mem;
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %lx\n",
- ioc->name, mem, mem_phys));
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
+ ioc->name, mem, (unsigned long long)mem_phys));
ioc->mem_phys = mem_phys;
ioc->chip = (SYSIF_REGS __iomem *)mem;
diff --git a/drivers/mfd/88pm8607.c b/drivers/mfd/88pm8607.c
new file mode 100644
index 00000000000..7e3f6590799
--- /dev/null
+++ b/drivers/mfd/88pm8607.c
@@ -0,0 +1,302 @@
+/*
+ * Base driver for Marvell 88PM8607
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/88pm8607.h>
+
+
+#define PM8607_REG_RESOURCE(_start, _end) \
+{ \
+ .start = PM8607_##_start, \
+ .end = PM8607_##_end, \
+ .flags = IORESOURCE_IO, \
+}
+
+static struct resource pm8607_regulator_resources[] = {
+ PM8607_REG_RESOURCE(BUCK1, BUCK1),
+ PM8607_REG_RESOURCE(BUCK2, BUCK2),
+ PM8607_REG_RESOURCE(BUCK3, BUCK3),
+ PM8607_REG_RESOURCE(LDO1, LDO1),
+ PM8607_REG_RESOURCE(LDO2, LDO2),
+ PM8607_REG_RESOURCE(LDO3, LDO3),
+ PM8607_REG_RESOURCE(LDO4, LDO4),
+ PM8607_REG_RESOURCE(LDO5, LDO5),
+ PM8607_REG_RESOURCE(LDO6, LDO6),
+ PM8607_REG_RESOURCE(LDO7, LDO7),
+ PM8607_REG_RESOURCE(LDO8, LDO8),
+ PM8607_REG_RESOURCE(LDO9, LDO9),
+ PM8607_REG_RESOURCE(LDO10, LDO10),
+ PM8607_REG_RESOURCE(LDO12, LDO12),
+ PM8607_REG_RESOURCE(LDO14, LDO14),
+};
+
+#define PM8607_REG_DEVS(_name, _id) \
+{ \
+ .name = "88pm8607-" #_name, \
+ .num_resources = 1, \
+ .resources = &pm8607_regulator_resources[PM8607_ID_##_id], \
+}
+
+static struct mfd_cell pm8607_devs[] = {
+ PM8607_REG_DEVS(buck1, BUCK1),
+ PM8607_REG_DEVS(buck2, BUCK2),
+ PM8607_REG_DEVS(buck3, BUCK3),
+ PM8607_REG_DEVS(ldo1, LDO1),
+ PM8607_REG_DEVS(ldo2, LDO2),
+ PM8607_REG_DEVS(ldo3, LDO3),
+ PM8607_REG_DEVS(ldo4, LDO4),
+ PM8607_REG_DEVS(ldo5, LDO5),
+ PM8607_REG_DEVS(ldo6, LDO6),
+ PM8607_REG_DEVS(ldo7, LDO7),
+ PM8607_REG_DEVS(ldo8, LDO8),
+ PM8607_REG_DEVS(ldo9, LDO9),
+ PM8607_REG_DEVS(ldo10, LDO10),
+ PM8607_REG_DEVS(ldo12, LDO12),
+ PM8607_REG_DEVS(ldo14, LDO14),
+};
+
+static inline int pm8607_read_device(struct pm8607_chip *chip,
+ int reg, int bytes, void *dest)
+{
+ struct i2c_client *i2c = chip->client;
+ unsigned char data;
+ int ret;
+
+ data = (unsigned char)reg;
+ ret = i2c_master_send(i2c, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_recv(i2c, dest, bytes);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int pm8607_write_device(struct pm8607_chip *chip,
+ int reg, int bytes, void *src)
+{
+ struct i2c_client *i2c = chip->client;
+ unsigned char buf[bytes + 1];
+ int ret;
+
+ buf[0] = (unsigned char)reg;
+ memcpy(&buf[1], src, bytes);
+
+ ret = i2c_master_send(i2c, buf, bytes + 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+int pm8607_reg_read(struct pm8607_chip *chip, int reg)
+{
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->read(chip, reg, 1, &data);
+ mutex_unlock(&chip->io_lock);
+
+ if (ret < 0)
+ return ret;
+ else
+ return (int)data;
+}
+EXPORT_SYMBOL(pm8607_reg_read);
+
+int pm8607_reg_write(struct pm8607_chip *chip, int reg,
+ unsigned char data)
+{
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->write(chip, reg, 1, &data);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm8607_reg_write);
+
+int pm8607_bulk_read(struct pm8607_chip *chip, int reg,
+ int count, unsigned char *buf)
+{
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->read(chip, reg, count, buf);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm8607_bulk_read);
+
+int pm8607_bulk_write(struct pm8607_chip *chip, int reg,
+ int count, unsigned char *buf)
+{
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->write(chip, reg, count, buf);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm8607_bulk_write);
+
+int pm8607_set_bits(struct pm8607_chip *chip, int reg,
+ unsigned char mask, unsigned char data)
+{
+ unsigned char value;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = chip->read(chip, reg, 1, &value);
+ if (ret < 0)
+ goto out;
+ value &= ~mask;
+ value |= data;
+ ret = chip->write(chip, reg, 1, &value);
+out:
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm8607_set_bits);
+
+
+static const struct i2c_device_id pm8607_id_table[] = {
+ { "88PM8607", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pm8607_id_table);
+
+
+static int __devinit pm8607_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pm8607_platform_data *pdata = client->dev.platform_data;
+ struct pm8607_chip *chip;
+ int i, count;
+ int ret;
+
+ chip = kzalloc(sizeof(struct pm8607_chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ chip->client = client;
+ chip->dev = &client->dev;
+ chip->read = pm8607_read_device;
+ chip->write = pm8607_write_device;
+ i2c_set_clientdata(client, chip);
+
+ mutex_init(&chip->io_lock);
+ dev_set_drvdata(chip->dev, chip);
+
+ ret = pm8607_reg_read(chip, PM8607_CHIP_ID);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read CHIP ID: %d\n", ret);
+ goto out;
+ }
+ if ((ret & CHIP_ID_MASK) == CHIP_ID)
+ dev_info(chip->dev, "Marvell 88PM8607 (ID: %02x) detected\n",
+ ret);
+ else {
+ dev_err(chip->dev, "Failed to detect Marvell 88PM8607. "
+ "Chip ID: %02x\n", ret);
+ goto out;
+ }
+ chip->chip_id = ret;
+
+ ret = pm8607_reg_read(chip, PM8607_BUCK3);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read BUCK3 register: %d\n", ret);
+ goto out;
+ }
+ if (ret & PM8607_BUCK3_DOUBLE)
+ chip->buck3_double = 1;
+
+ ret = pm8607_reg_read(chip, PM8607_MISC1);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read MISC1 register: %d\n", ret);
+ goto out;
+ }
+ if (pdata->i2c_port == PI2C_PORT)
+ ret |= PM8607_MISC1_PI2C;
+ else
+ ret &= ~PM8607_MISC1_PI2C;
+ ret = pm8607_reg_write(chip, PM8607_MISC1, ret);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to write MISC1 register: %d\n", ret);
+ goto out;
+ }
+
+
+ count = ARRAY_SIZE(pm8607_devs);
+ for (i = 0; i < count; i++) {
+ ret = mfd_add_devices(chip->dev, i, &pm8607_devs[i],
+ 1, NULL, 0);
+ if (ret != 0) {
+ dev_err(chip->dev, "Failed to add subdevs\n");
+ goto out;
+ }
+ }
+
+ return 0;
+
+out:
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return ret;
+}
+
+static int __devexit pm8607_remove(struct i2c_client *client)
+{
+ struct pm8607_chip *chip = i2c_get_clientdata(client);
+
+ mfd_remove_devices(chip->dev);
+ kfree(chip);
+ return 0;
+}
+
+static struct i2c_driver pm8607_driver = {
+ .driver = {
+ .name = "88PM8607",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm8607_probe,
+ .remove = __devexit_p(pm8607_remove),
+ .id_table = pm8607_id_table,
+};
+
+static int __init pm8607_init(void)
+{
+ int ret;
+ ret = i2c_add_driver(&pm8607_driver);
+ if (ret != 0)
+ pr_err("Failed to register 88PM8607 I2C driver: %d\n", ret);
+ return ret;
+}
+subsys_initcall(pm8607_init);
+
+static void __exit pm8607_exit(void)
+{
+ i2c_del_driver(&pm8607_driver);
+}
+module_exit(pm8607_exit);
+
+MODULE_DESCRIPTION("PMIC Driver for Marvell 88PM8607");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index a296e717e86..87829789243 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -103,10 +103,10 @@ config MENELAUS
cell phones and PDAs.
config TWL4030_CORE
- bool "Texas Instruments TWL4030/TPS659x0 Support"
+ bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y && GENERIC_HARDIRQS
help
- Say yes here if you have TWL4030 family chip on your board.
+ Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
facilities, and registers devices for the various functions
so that function-specific drivers can bind to them.
@@ -174,6 +174,16 @@ config PMIC_DA903X
individual components like LCD backlight, voltage regulators,
LEDs and battery-charger under the corresponding menus.
+config PMIC_ADP5520
+ bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
+ depends on I2C=y
+ help
+ Say yes here to add support for Analog Devices AD5520 and ADP5501,
+ Multifunction Power Management IC. This includes
+ the I2C driver and the core APIs _only_, you have to select
+ individual components like LCD backlight, LEDs, GPIOs and Kepad
+ under the corresponding menus.
+
config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
select MFD_CORE
@@ -185,12 +195,12 @@ config MFD_WM8400
the functionality of the device.
config MFD_WM831X
- tristate "Support Wolfson Microelectronics WM831x PMICs"
+ bool "Support Wolfson Microelectronics WM831x/2x PMICs"
select MFD_CORE
- depends on I2C
+ depends on I2C=y
help
- Support for the Wolfson Microelecronics WM831x PMICs. This
- driver provides common support for accessing the device,
+ Support for the Wolfson Microelecronics WM831x and WM832x PMICs.
+ This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
@@ -319,6 +329,25 @@ config EZX_PCAP
This enables the PCAP ASIC present on EZX Phones. This is
needed for MMC, TouchScreen, Sound, USB, etc..
+config MFD_88PM8607
+ bool "Support Marvell 88PM8607"
+ depends on I2C=y
+ select MFD_CORE
+ help
+ This supports for Marvell 88PM8607 Power Management IC. This includes
+ the I2C driver and the core APIs _only_, you have to select
+ individual components like voltage regulators, RTC and
+ battery-charger under the corresponding menus.
+
+config AB4500_CORE
+ tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip"
+ depends on SPI
+ help
+ Select this option to enable access to AB4500 power management
+ chip. This connects to U8500 on the SSP/SPI bus and exports
+ read/write functions for the devices to get access to this chip.
+ This chip embeds various other multimedia funtionalities as well.
+
endmenu
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 11350c1d930..ca2f2c4ff05 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -19,13 +19,14 @@ obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
obj-$(CONFIG_MFD_WM831X) += wm831x.o
wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
+wm8350-objs += wm8350-irq.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_MENELAUS) += menelaus.o
-obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o
+obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
@@ -52,3 +53,6 @@ obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
+obj-$(CONFIG_AB4500_CORE) += ab4500-core.o
+obj-$(CONFIG_MFD_88PM8607) += 88pm8607.o
+obj-$(CONFIG_PMIC_ADP5520) += adp5520.o \ No newline at end of file
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 61348102827..fd42a80e7bf 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -900,9 +900,6 @@ static int __init ab3100_probe(struct i2c_client *client,
goto exit_no_testreg_client;
}
- strlcpy(ab3100->testreg_client->name, id->name,
- sizeof(ab3100->testreg_client->name));
-
err = ab3100_setup(ab3100);
if (err)
goto exit_no_setup;
diff --git a/drivers/mfd/ab4500-core.c b/drivers/mfd/ab4500-core.c
new file mode 100644
index 00000000000..1c44c19e073
--- /dev/null
+++ b/drivers/mfd/ab4500-core.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson
+ *
+ * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
+ *
+ * This program is free software; you can redistribute it
+ * and/or modify it under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation.
+ *
+ * AB4500 is a companion power management chip used with U8500.
+ * On this platform, this is interfaced with SSP0 controller
+ * which is a ARM primecell pl022.
+ *
+ * At the moment the module just exports read/write features.
+ * Interrupt management to be added - TODO.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/ab4500.h>
+
+/* just required if probe fails, we need to
+ * unregister the device
+ */
+static struct spi_driver ab4500_driver;
+
+/*
+ * This funtion writes to any AB4500 registers using
+ * SPI protocol & before it writes it packs the data
+ * in the below 24 bit frame format
+ *
+ * *|------------------------------------|
+ * *| 23|22...18|17.......10|9|8|7......0|
+ * *| r/w bank adr data |
+ * * ------------------------------------
+ *
+ * This function shouldn't be called from interrupt
+ * context
+ */
+int ab4500_write(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr, unsigned char data)
+{
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ int err;
+ unsigned long spi_data =
+ block << 18 | addr << 10 | data;
+
+ mutex_lock(&ab4500->lock);
+ ab4500->tx_buf[0] = spi_data;
+ ab4500->rx_buf[0] = 0;
+
+ xfer.tx_buf = ab4500->tx_buf;
+ xfer.rx_buf = NULL;
+ xfer.len = sizeof(unsigned long);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ err = spi_sync(ab4500->spi, &msg);
+ mutex_unlock(&ab4500->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(ab4500_write);
+
+int ab4500_read(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr)
+{
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ unsigned long spi_data =
+ 1 << 23 | block << 18 | addr << 10;
+
+ mutex_lock(&ab4500->lock);
+ ab4500->tx_buf[0] = spi_data;
+ ab4500->rx_buf[0] = 0;
+
+ xfer.tx_buf = ab4500->tx_buf;
+ xfer.rx_buf = ab4500->rx_buf;
+ xfer.len = sizeof(unsigned long);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ spi_sync(ab4500->spi, &msg);
+ mutex_unlock(&ab4500->lock);
+
+ return ab4500->rx_buf[0];
+}
+EXPORT_SYMBOL(ab4500_read);
+
+/* ref: ab3100 core */
+#define AB4500_DEVICE(devname, devid) \
+static struct platform_device ab4500_##devname##_device = { \
+ .name = devid, \
+ .id = -1, \
+}
+
+/* list of childern devices of ab4500 - all are
+ * not populated here - TODO
+ */
+AB4500_DEVICE(charger, "ab4500-charger");
+AB4500_DEVICE(audio, "ab4500-audio");
+AB4500_DEVICE(usb, "ab4500-usb");
+AB4500_DEVICE(tvout, "ab4500-tvout");
+AB4500_DEVICE(sim, "ab4500-sim");
+AB4500_DEVICE(gpadc, "ab4500-gpadc");
+AB4500_DEVICE(clkmgt, "ab4500-clkmgt");
+AB4500_DEVICE(misc, "ab4500-misc");
+
+static struct platform_device *ab4500_platform_devs[] = {
+ &ab4500_charger_device,
+ &ab4500_audio_device,
+ &ab4500_usb_device,
+ &ab4500_tvout_device,
+ &ab4500_sim_device,
+ &ab4500_gpadc_device,
+ &ab4500_clkmgt_device,
+ &ab4500_misc_device,
+};
+
+static int __init ab4500_probe(struct spi_device *spi)
+{
+ struct ab4500 *ab4500;
+ unsigned char revision;
+ int err = 0;
+ int i;
+
+ ab4500 = kzalloc(sizeof *ab4500, GFP_KERNEL);
+ if (!ab4500) {
+ dev_err(&spi->dev, "could not allocate AB4500\n");
+ err = -ENOMEM;
+ goto not_detect;
+ }
+
+ ab4500->spi = spi;
+ spi_set_drvdata(spi, ab4500);
+
+ mutex_init(&ab4500->lock);
+
+ /* read the revision register */
+ revision = ab4500_read(ab4500, AB4500_MISC, AB4500_REV_REG);
+
+ /* revision id 0x0 is for early drop, 0x10 is for cut1.0 */
+ if (revision == 0x0 || revision == 0x10)
+ dev_info(&spi->dev, "Detected chip: %s, revision = %x\n",
+ ab4500_driver.driver.name, revision);
+ else {
+ dev_err(&spi->dev, "unknown chip: 0x%x\n", revision);
+ goto not_detect;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ab4500_platform_devs); i++) {
+ ab4500_platform_devs[i]->dev.parent =
+ &spi->dev;
+ platform_set_drvdata(ab4500_platform_devs[i], ab4500);
+ }
+
+ /* register the ab4500 platform devices */
+ platform_add_devices(ab4500_platform_devs,
+ ARRAY_SIZE(ab4500_platform_devs));
+
+ return err;
+
+ not_detect:
+ spi_unregister_driver(&ab4500_driver);
+ kfree(ab4500);
+ return err;
+}
+
+static int __devexit ab4500_remove(struct spi_device *spi)
+{
+ struct ab4500 *ab4500 =
+ spi_get_drvdata(spi);
+
+ kfree(ab4500);
+
+ return 0;
+}
+
+static struct spi_driver ab4500_driver = {
+ .driver = {
+ .name = "ab4500",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab4500_probe,
+ .remove = __devexit_p(ab4500_remove)
+};
+
+static int __devinit ab4500_init(void)
+{
+ return spi_register_driver(&ab4500_driver);
+}
+
+static void __exit ab4500_exit(void)
+{
+ spi_unregister_driver(&ab4500_driver);
+}
+
+subsys_initcall(ab4500_init);
+module_exit(ab4500_exit);
+
+MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
+MODULE_DESCRIPTION("AB4500 core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
new file mode 100644
index 00000000000..b26644772d0
--- /dev/null
+++ b/drivers/mfd/adp5520.c
@@ -0,0 +1,379 @@
+/*
+ * Base driver for Analog Devices ADP5520/ADP5501 MFD PMICs
+ * LCD Backlight: drivers/video/backlight/adp5520_bl
+ * LEDs : drivers/led/leds-adp5520
+ * GPIO : drivers/gpio/adp5520-gpio (ADP5520 only)
+ * Keys : drivers/input/keyboard/adp5520-keys (ADP5520 only)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Derived from da903x:
+ * Copyright (C) 2008 Compulab, Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * Copyright (C) 2006-2008 Marvell International Ltd.
+ * Eric Miao <eric.miao@marvell.com>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/adp5520.h>
+
+struct adp5520_chip {
+ struct i2c_client *client;
+ struct device *dev;
+ struct mutex lock;
+ struct blocking_notifier_head notifier_list;
+ int irq;
+ unsigned long id;
+};
+
+static int __adp5520_read(struct i2c_client *client,
+ int reg, uint8_t *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = (uint8_t)ret;
+ return 0;
+}
+
+static int __adp5520_write(struct i2c_client *client,
+ int reg, uint8_t val)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n",
+ val, reg);
+ return ret;
+ }
+ return 0;
+}
+
+static int __adp5520_ack_bits(struct i2c_client *client, int reg,
+ uint8_t bit_mask)
+{
+ struct adp5520_chip *chip = i2c_get_clientdata(client);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&chip->lock);
+
+ ret = __adp5520_read(client, reg, &reg_val);
+
+ if (!ret) {
+ reg_val |= bit_mask;
+ ret = __adp5520_write(client, reg, reg_val);
+ }
+
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+
+int adp5520_write(struct device *dev, int reg, uint8_t val)
+{
+ return __adp5520_write(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(adp5520_write);
+
+int adp5520_read(struct device *dev, int reg, uint8_t *val)
+{
+ return __adp5520_read(to_i2c_client(dev), reg, val);
+}
+EXPORT_SYMBOL_GPL(adp5520_read);
+
+int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&chip->lock);
+
+ ret = __adp5520_read(chip->client, reg, &reg_val);
+
+ if (!ret && ((reg_val & bit_mask) == 0)) {
+ reg_val |= bit_mask;
+ ret = __adp5520_write(chip->client, reg, reg_val);
+ }
+
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adp5520_set_bits);
+
+int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(dev);
+ uint8_t reg_val;
+ int ret;
+
+ mutex_lock(&chip->lock);
+
+ ret = __adp5520_read(chip->client, reg, &reg_val);
+
+ if (!ret && (reg_val & bit_mask)) {
+ reg_val &= ~bit_mask;
+ ret = __adp5520_write(chip->client, reg, reg_val);
+ }
+
+ mutex_unlock(&chip->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adp5520_clr_bits);
+
+int adp5520_register_notifier(struct device *dev, struct notifier_block *nb,
+ unsigned int events)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->irq) {
+ adp5520_set_bits(chip->dev, ADP5520_INTERRUPT_ENABLE,
+ events & (ADP5520_KP_IEN | ADP5520_KR_IEN |
+ ADP5520_OVP_IEN | ADP5520_CMPR_IEN));
+
+ return blocking_notifier_chain_register(&chip->notifier_list,
+ nb);
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(adp5520_register_notifier);
+
+int adp5520_unregister_notifier(struct device *dev, struct notifier_block *nb,
+ unsigned int events)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(dev);
+
+ adp5520_clr_bits(chip->dev, ADP5520_INTERRUPT_ENABLE,
+ events & (ADP5520_KP_IEN | ADP5520_KR_IEN |
+ ADP5520_OVP_IEN | ADP5520_CMPR_IEN));
+
+ return blocking_notifier_chain_unregister(&chip->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(adp5520_unregister_notifier);
+
+static irqreturn_t adp5520_irq_thread(int irq, void *data)
+{
+ struct adp5520_chip *chip = data;
+ unsigned int events;
+ uint8_t reg_val;
+ int ret;
+
+ ret = __adp5520_read(chip->client, ADP5520_MODE_STATUS, &reg_val);
+ if (ret)
+ goto out;
+
+ events = reg_val & (ADP5520_OVP_INT | ADP5520_CMPR_INT |
+ ADP5520_GPI_INT | ADP5520_KR_INT | ADP5520_KP_INT);
+
+ blocking_notifier_call_chain(&chip->notifier_list, events, NULL);
+ /* ACK, Sticky bits are W1C */
+ __adp5520_ack_bits(chip->client, ADP5520_MODE_STATUS, events);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int __remove_subdev(struct device *dev, void *unused)
+{
+ platform_device_unregister(to_platform_device(dev));
+ return 0;
+}
+
+static int adp5520_remove_subdevs(struct adp5520_chip *chip)
+{
+ return device_for_each_child(chip->dev, NULL, __remove_subdev);
+}
+
+static int __devinit adp5520_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adp5520_platform_data *pdata = client->dev.platform_data;
+ struct platform_device *pdev;
+ struct adp5520_chip *chip;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Word Data not Supported\n");
+ return -EIO;
+ }
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->client = client;
+
+ chip->dev = &client->dev;
+ chip->irq = client->irq;
+ chip->id = id->driver_data;
+ mutex_init(&chip->lock);
+
+ if (chip->irq) {
+ BLOCKING_INIT_NOTIFIER_HEAD(&chip->notifier_list);
+
+ ret = request_threaded_irq(chip->irq, NULL, adp5520_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "adp5520", chip);
+ if (ret) {
+ dev_err(&client->dev, "failed to request irq %d\n",
+ chip->irq);
+ goto out_free_chip;
+ }
+ }
+
+ ret = adp5520_write(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ if (ret) {
+ dev_err(&client->dev, "failed to write\n");
+ goto out_free_irq;
+ }
+
+ if (pdata->keys) {
+ pdev = platform_device_register_data(chip->dev, "adp5520-keys",
+ chip->id, pdata->keys, sizeof(*pdata->keys));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_remove_subdevs;
+ }
+ }
+
+ if (pdata->gpio) {
+ pdev = platform_device_register_data(chip->dev, "adp5520-gpio",
+ chip->id, pdata->gpio, sizeof(*pdata->gpio));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_remove_subdevs;
+ }
+ }
+
+ if (pdata->leds) {
+ pdev = platform_device_register_data(chip->dev, "adp5520-led",
+ chip->id, pdata->leds, sizeof(*pdata->leds));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_remove_subdevs;
+ }
+ }
+
+ if (pdata->backlight) {
+ pdev = platform_device_register_data(chip->dev,
+ "adp5520-backlight",
+ chip->id,
+ pdata->backlight,
+ sizeof(*pdata->backlight));
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ goto out_remove_subdevs;
+ }
+ }
+
+ return 0;
+
+out_remove_subdevs:
+ adp5520_remove_subdevs(chip);
+
+out_free_irq:
+ if (chip->irq)
+ free_irq(chip->irq, chip);
+
+out_free_chip:
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+
+ return ret;
+}
+
+static int __devexit adp5520_remove(struct i2c_client *client)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+ if (chip->irq)
+ free_irq(chip->irq, chip);
+
+ adp5520_remove_subdevs(chip);
+ adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0);
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int adp5520_suspend(struct i2c_client *client,
+ pm_message_t state)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+ adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ return 0;
+}
+
+static int adp5520_resume(struct i2c_client *client)
+{
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+ adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
+ return 0;
+}
+#else
+#define adp5520_suspend NULL
+#define adp5520_resume NULL
+#endif
+
+static const struct i2c_device_id adp5520_id[] = {
+ { "pmic-adp5520", ID_ADP5520 },
+ { "pmic-adp5501", ID_ADP5501 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp5520_id);
+
+static struct i2c_driver adp5520_driver = {
+ .driver = {
+ .name = "adp5520",
+ .owner = THIS_MODULE,
+ },
+ .probe = adp5520_probe,
+ .remove = __devexit_p(adp5520_remove),
+ .suspend = adp5520_suspend,
+ .resume = adp5520_resume,
+ .id_table = adp5520_id,
+};
+
+static int __init adp5520_init(void)
+{
+ return i2c_add_driver(&adp5520_driver);
+}
+module_init(adp5520_init);
+
+static void __exit adp5520_exit(void)
+{
+ i2c_del_driver(&adp5520_driver);
+}
+module_exit(adp5520_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 63a2a663210..e22128c3e9a 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -908,7 +908,7 @@ static int __init asic3_probe(struct platform_device *pdev)
return ret;
}
-static int asic3_remove(struct platform_device *pdev)
+static int __devexit asic3_remove(struct platform_device *pdev)
{
int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 87628891797..df405af968f 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -387,7 +387,6 @@ static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
pdev = platform_device_alloc(subdev->name, subdev->id);
pdev->dev.parent = &pcap->spi->dev;
pdev->dev.platform_data = subdev->platform_data;
- platform_set_drvdata(pdev, pcap);
return platform_device_add(pdev);
}
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index e354d2912ef..a1ade2324ea 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -1,286 +1,549 @@
/*
- * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
- *
- * This code is in parts based on wm8350-core.c and pcf50633-core.c
- *
- * Initial development of this code was funded by
- * Phytec Messtechnik GmbH, http://www.phytec.de
+ * Copyright 2009 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * loosely based on an earlier driver that has
+ * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
*/
-
-#include <linux/mfd/mc13783-private.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/mc13783.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/core.h>
-#include <linux/spi/spi.h>
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13783-private.h>
+
+#define MC13783_IRQSTAT0 0
+#define MC13783_IRQSTAT0_ADCDONEI (1 << 0)
+#define MC13783_IRQSTAT0_ADCBISDONEI (1 << 1)
+#define MC13783_IRQSTAT0_TSI (1 << 2)
+#define MC13783_IRQSTAT0_WHIGHI (1 << 3)
+#define MC13783_IRQSTAT0_WLOWI (1 << 4)
+#define MC13783_IRQSTAT0_CHGDETI (1 << 6)
+#define MC13783_IRQSTAT0_CHGOVI (1 << 7)
+#define MC13783_IRQSTAT0_CHGREVI (1 << 8)
+#define MC13783_IRQSTAT0_CHGSHORTI (1 << 9)
+#define MC13783_IRQSTAT0_CCCVI (1 << 10)
+#define MC13783_IRQSTAT0_CHGCURRI (1 << 11)
+#define MC13783_IRQSTAT0_BPONI (1 << 12)
+#define MC13783_IRQSTAT0_LOBATLI (1 << 13)
+#define MC13783_IRQSTAT0_LOBATHI (1 << 14)
+#define MC13783_IRQSTAT0_UDPI (1 << 15)
+#define MC13783_IRQSTAT0_USBI (1 << 16)
+#define MC13783_IRQSTAT0_IDI (1 << 19)
+#define MC13783_IRQSTAT0_SE1I (1 << 21)
+#define MC13783_IRQSTAT0_CKDETI (1 << 22)
+#define MC13783_IRQSTAT0_UDMI (1 << 23)
+
+#define MC13783_IRQMASK0 1
+#define MC13783_IRQMASK0_ADCDONEM MC13783_IRQSTAT0_ADCDONEI
+#define MC13783_IRQMASK0_ADCBISDONEM MC13783_IRQSTAT0_ADCBISDONEI
+#define MC13783_IRQMASK0_TSM MC13783_IRQSTAT0_TSI
+#define MC13783_IRQMASK0_WHIGHM MC13783_IRQSTAT0_WHIGHI
+#define MC13783_IRQMASK0_WLOWM MC13783_IRQSTAT0_WLOWI
+#define MC13783_IRQMASK0_CHGDETM MC13783_IRQSTAT0_CHGDETI
+#define MC13783_IRQMASK0_CHGOVM MC13783_IRQSTAT0_CHGOVI
+#define MC13783_IRQMASK0_CHGREVM MC13783_IRQSTAT0_CHGREVI
+#define MC13783_IRQMASK0_CHGSHORTM MC13783_IRQSTAT0_CHGSHORTI
+#define MC13783_IRQMASK0_CCCVM MC13783_IRQSTAT0_CCCVI
+#define MC13783_IRQMASK0_CHGCURRM MC13783_IRQSTAT0_CHGCURRI
+#define MC13783_IRQMASK0_BPONM MC13783_IRQSTAT0_BPONI
+#define MC13783_IRQMASK0_LOBATLM MC13783_IRQSTAT0_LOBATLI
+#define MC13783_IRQMASK0_LOBATHM MC13783_IRQSTAT0_LOBATHI
+#define MC13783_IRQMASK0_UDPM MC13783_IRQSTAT0_UDPI
+#define MC13783_IRQMASK0_USBM MC13783_IRQSTAT0_USBI
+#define MC13783_IRQMASK0_IDM MC13783_IRQSTAT0_IDI
+#define MC13783_IRQMASK0_SE1M MC13783_IRQSTAT0_SE1I
+#define MC13783_IRQMASK0_CKDETM MC13783_IRQSTAT0_CKDETI
+#define MC13783_IRQMASK0_UDMM MC13783_IRQSTAT0_UDMI
+
+#define MC13783_IRQSTAT1 3
+#define MC13783_IRQSTAT1_1HZI (1 << 0)
+#define MC13783_IRQSTAT1_TODAI (1 << 1)
+#define MC13783_IRQSTAT1_ONOFD1I (1 << 3)
+#define MC13783_IRQSTAT1_ONOFD2I (1 << 4)
+#define MC13783_IRQSTAT1_ONOFD3I (1 << 5)
+#define MC13783_IRQSTAT1_SYSRSTI (1 << 6)
+#define MC13783_IRQSTAT1_RTCRSTI (1 << 7)
+#define MC13783_IRQSTAT1_PCI (1 << 8)
+#define MC13783_IRQSTAT1_WARMI (1 << 9)
+#define MC13783_IRQSTAT1_MEMHLDI (1 << 10)
+#define MC13783_IRQSTAT1_PWRRDYI (1 << 11)
+#define MC13783_IRQSTAT1_THWARNLI (1 << 12)
+#define MC13783_IRQSTAT1_THWARNHI (1 << 13)
+#define MC13783_IRQSTAT1_CLKI (1 << 14)
+#define MC13783_IRQSTAT1_SEMAFI (1 << 15)
+#define MC13783_IRQSTAT1_MC2BI (1 << 17)
+#define MC13783_IRQSTAT1_HSDETI (1 << 18)
+#define MC13783_IRQSTAT1_HSLI (1 << 19)
+#define MC13783_IRQSTAT1_ALSPTHI (1 << 20)
+#define MC13783_IRQSTAT1_AHSSHORTI (1 << 21)
+
+#define MC13783_IRQMASK1 4
+#define MC13783_IRQMASK1_1HZM MC13783_IRQSTAT1_1HZI
+#define MC13783_IRQMASK1_TODAM MC13783_IRQSTAT1_TODAI
+#define MC13783_IRQMASK1_ONOFD1M MC13783_IRQSTAT1_ONOFD1I
+#define MC13783_IRQMASK1_ONOFD2M MC13783_IRQSTAT1_ONOFD2I
+#define MC13783_IRQMASK1_ONOFD3M MC13783_IRQSTAT1_ONOFD3I
+#define MC13783_IRQMASK1_SYSRSTM MC13783_IRQSTAT1_SYSRSTI
+#define MC13783_IRQMASK1_RTCRSTM MC13783_IRQSTAT1_RTCRSTI
+#define MC13783_IRQMASK1_PCM MC13783_IRQSTAT1_PCI
+#define MC13783_IRQMASK1_WARMM MC13783_IRQSTAT1_WARMI
+#define MC13783_IRQMASK1_MEMHLDM MC13783_IRQSTAT1_MEMHLDI
+#define MC13783_IRQMASK1_PWRRDYM MC13783_IRQSTAT1_PWRRDYI
+#define MC13783_IRQMASK1_THWARNLM MC13783_IRQSTAT1_THWARNLI
+#define MC13783_IRQMASK1_THWARNHM MC13783_IRQSTAT1_THWARNHI
+#define MC13783_IRQMASK1_CLKM MC13783_IRQSTAT1_CLKI
+#define MC13783_IRQMASK1_SEMAFM MC13783_IRQSTAT1_SEMAFI
+#define MC13783_IRQMASK1_MC2BM MC13783_IRQSTAT1_MC2BI
+#define MC13783_IRQMASK1_HSDETM MC13783_IRQSTAT1_HSDETI
+#define MC13783_IRQMASK1_HSLM MC13783_IRQSTAT1_HSLI
+#define MC13783_IRQMASK1_ALSPTHM MC13783_IRQSTAT1_ALSPTHI
+#define MC13783_IRQMASK1_AHSSHORTM MC13783_IRQSTAT1_AHSSHORTI
+
+#define MC13783_ADC1 44
+#define MC13783_ADC1_ADEN (1 << 0)
+#define MC13783_ADC1_RAND (1 << 1)
+#define MC13783_ADC1_ADSEL (1 << 3)
+#define MC13783_ADC1_ASC (1 << 20)
+#define MC13783_ADC1_ADTRIGIGN (1 << 21)
+
+#define MC13783_NUMREGS 0x3f
+
+void mc13783_lock(struct mc13783 *mc13783)
+{
+ if (!mutex_trylock(&mc13783->lock)) {
+ dev_dbg(&mc13783->spidev->dev, "wait for %s from %pf\n",
+ __func__, __builtin_return_address(0));
+
+ mutex_lock(&mc13783->lock);
+ }
+ dev_dbg(&mc13783->spidev->dev, "%s from %pf\n",
+ __func__, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(mc13783_lock);
-#define MC13783_MAX_REG_NUM 0x3f
-#define MC13783_FRAME_MASK 0x00ffffff
-#define MC13783_MAX_REG_NUM 0x3f
-#define MC13783_REG_NUM_SHIFT 0x19
-#define MC13783_WRITE_BIT_SHIFT 31
+void mc13783_unlock(struct mc13783 *mc13783)
+{
+ dev_dbg(&mc13783->spidev->dev, "%s from %pf\n",
+ __func__, __builtin_return_address(0));
+ mutex_unlock(&mc13783->lock);
+}
+EXPORT_SYMBOL(mc13783_unlock);
-static inline int spi_rw(struct spi_device *spi, u8 * buf, size_t len)
+#define MC13783_REGOFFSET_SHIFT 25
+int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val)
{
- struct spi_transfer t = {
- .tx_buf = (const void *)buf,
- .rx_buf = buf,
- .len = len,
- .cs_change = 0,
- .delay_usecs = 0,
- };
+ struct spi_transfer t;
struct spi_message m;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&mc13783->lock));
+
+ if (offset > MC13783_NUMREGS)
+ return -EINVAL;
+
+ *val = offset << MC13783_REGOFFSET_SHIFT;
+
+ memset(&t, 0, sizeof(t));
+
+ t.tx_buf = val;
+ t.rx_buf = val;
+ t.len = sizeof(u32);
spi_message_init(&m);
spi_message_add_tail(&t, &m);
- if (spi_sync(spi, &m) != 0 || m.status != 0)
- return -EINVAL;
- return len - m.actual_length;
-}
-static int mc13783_read(struct mc13783 *mc13783, int reg_num, u32 *reg_val)
-{
- unsigned int frame = 0;
- int ret = 0;
+ ret = spi_sync(mc13783->spidev, &m);
- if (reg_num > MC13783_MAX_REG_NUM)
- return -EINVAL;
+ /* error in message.status implies error return from spi_sync */
+ BUG_ON(!ret && m.status);
- frame |= reg_num << MC13783_REG_NUM_SHIFT;
+ if (ret)
+ return ret;
- ret = spi_rw(mc13783->spi_device, (u8 *)&frame, 4);
+ *val &= 0xffffff;
- *reg_val = frame & MC13783_FRAME_MASK;
+ dev_vdbg(&mc13783->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
- return ret;
+ return 0;
}
+EXPORT_SYMBOL(mc13783_reg_read);
-static int mc13783_write(struct mc13783 *mc13783, int reg_num, u32 reg_val)
+int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val)
{
- unsigned int frame = 0;
+ u32 buf;
+ struct spi_transfer t;
+ struct spi_message m;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&mc13783->lock));
- if (reg_num > MC13783_MAX_REG_NUM)
+ dev_vdbg(&mc13783->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+
+ if (offset > MC13783_NUMREGS || val > 0xffffff)
return -EINVAL;
- frame |= (1 << MC13783_WRITE_BIT_SHIFT);
- frame |= reg_num << MC13783_REG_NUM_SHIFT;
- frame |= reg_val & MC13783_FRAME_MASK;
+ buf = 1 << 31 | offset << MC13783_REGOFFSET_SHIFT | val;
+
+ memset(&t, 0, sizeof(t));
- return spi_rw(mc13783->spi_device, (u8 *)&frame, 4);
+ t.tx_buf = &buf;
+ t.rx_buf = &buf;
+ t.len = sizeof(u32);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ ret = spi_sync(mc13783->spidev, &m);
+
+ BUG_ON(!ret && m.status);
+
+ if (ret)
+ return ret;
+
+ return 0;
}
+EXPORT_SYMBOL(mc13783_reg_write);
-int mc13783_reg_read(struct mc13783 *mc13783, int reg_num, u32 *reg_val)
+int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val)
{
int ret;
+ u32 valread;
- mutex_lock(&mc13783->io_lock);
- ret = mc13783_read(mc13783, reg_num, reg_val);
- mutex_unlock(&mc13783->io_lock);
+ BUG_ON(val & ~mask);
- return ret;
+ ret = mc13783_reg_read(mc13783, offset, &valread);
+ if (ret)
+ return ret;
+
+ valread = (valread & ~mask) | val;
+
+ return mc13783_reg_write(mc13783, offset, valread);
}
-EXPORT_SYMBOL_GPL(mc13783_reg_read);
+EXPORT_SYMBOL(mc13783_reg_rmw);
-int mc13783_reg_write(struct mc13783 *mc13783, int reg_num, u32 reg_val)
+int mc13783_mask(struct mc13783 *mc13783, int irq)
{
int ret;
+ unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
+ u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
+ u32 mask;
- mutex_lock(&mc13783->io_lock);
- ret = mc13783_write(mc13783, reg_num, reg_val);
- mutex_unlock(&mc13783->io_lock);
+ if (irq < 0 || irq >= MC13783_NUM_IRQ)
+ return -EINVAL;
- return ret;
+ ret = mc13783_reg_read(mc13783, offmask, &mask);
+ if (ret)
+ return ret;
+
+ if (mask & irqbit)
+ /* already masked */
+ return 0;
+
+ return mc13783_reg_write(mc13783, offmask, mask | irqbit);
}
-EXPORT_SYMBOL_GPL(mc13783_reg_write);
+EXPORT_SYMBOL(mc13783_mask);
-/**
- * mc13783_set_bits - Bitmask write
- *
- * @mc13783: Pointer to mc13783 control structure
- * @reg: Register to access
- * @mask: Mask of bits to change
- * @val: Value to set for masked bits
- */
-int mc13783_set_bits(struct mc13783 *mc13783, int reg, u32 mask, u32 val)
+int mc13783_unmask(struct mc13783 *mc13783, int irq)
{
- u32 tmp;
int ret;
+ unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
+ u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
+ u32 mask;
- mutex_lock(&mc13783->io_lock);
+ if (irq < 0 || irq >= MC13783_NUM_IRQ)
+ return -EINVAL;
- ret = mc13783_read(mc13783, reg, &tmp);
- tmp = (tmp & ~mask) | val;
- if (ret == 0)
- ret = mc13783_write(mc13783, reg, tmp);
+ ret = mc13783_reg_read(mc13783, offmask, &mask);
+ if (ret)
+ return ret;
- mutex_unlock(&mc13783->io_lock);
+ if (!(mask & irqbit))
+ /* already unmasked */
+ return 0;
- return ret;
+ return mc13783_reg_write(mc13783, offmask, mask & ~irqbit);
}
-EXPORT_SYMBOL_GPL(mc13783_set_bits);
+EXPORT_SYMBOL(mc13783_unmask);
-int mc13783_register_irq(struct mc13783 *mc13783, int irq,
- void (*handler) (int, void *), void *data)
+int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev)
{
- if (irq < 0 || irq > MC13783_NUM_IRQ || !handler)
+ BUG_ON(!mutex_is_locked(&mc13783->lock));
+ BUG_ON(!handler);
+
+ if (irq < 0 || irq >= MC13783_NUM_IRQ)
return -EINVAL;
- if (WARN_ON(mc13783->irq_handler[irq].handler))
+ if (mc13783->irqhandler[irq])
return -EBUSY;
- mutex_lock(&mc13783->io_lock);
- mc13783->irq_handler[irq].handler = handler;
- mc13783->irq_handler[irq].data = data;
- mutex_unlock(&mc13783->io_lock);
+ mc13783->irqhandler[irq] = handler;
+ mc13783->irqdata[irq] = dev;
return 0;
}
-EXPORT_SYMBOL_GPL(mc13783_register_irq);
+EXPORT_SYMBOL(mc13783_irq_request_nounmask);
-int mc13783_free_irq(struct mc13783 *mc13783, int irq)
+int mc13783_irq_request(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev)
{
- if (irq < 0 || irq > MC13783_NUM_IRQ)
+ int ret;
+
+ ret = mc13783_irq_request_nounmask(mc13783, irq, handler, name, dev);
+ if (ret)
+ return ret;
+
+ ret = mc13783_unmask(mc13783, irq);
+ if (ret) {
+ mc13783->irqhandler[irq] = NULL;
+ mc13783->irqdata[irq] = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13783_irq_request);
+
+int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev)
+{
+ int ret;
+ BUG_ON(!mutex_is_locked(&mc13783->lock));
+
+ if (irq < 0 || irq >= MC13783_NUM_IRQ || !mc13783->irqhandler[irq] ||
+ mc13783->irqdata[irq] != dev)
return -EINVAL;
- mutex_lock(&mc13783->io_lock);
- mc13783->irq_handler[irq].handler = NULL;
- mutex_unlock(&mc13783->io_lock);
+ ret = mc13783_mask(mc13783, irq);
+ if (ret)
+ return ret;
+
+ mc13783->irqhandler[irq] = NULL;
+ mc13783->irqdata[irq] = NULL;
return 0;
}
-EXPORT_SYMBOL_GPL(mc13783_free_irq);
+EXPORT_SYMBOL(mc13783_irq_free);
-static void mc13783_irq_work(struct work_struct *work)
+static inline irqreturn_t mc13783_irqhandler(struct mc13783 *mc13783, int irq)
{
- struct mc13783 *mc13783 = container_of(work, struct mc13783, work);
- int i;
- unsigned int adc_sts;
-
- /* check if the adc has finished any completion */
- mc13783_reg_read(mc13783, MC13783_REG_INTERRUPT_STATUS_0, &adc_sts);
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_0,
- adc_sts & MC13783_INT_STAT_ADCDONEI);
-
- if (adc_sts & MC13783_INT_STAT_ADCDONEI)
- complete_all(&mc13783->adc_done);
-
- for (i = 0; i < MC13783_NUM_IRQ; i++)
- if (mc13783->irq_handler[i].handler)
- mc13783->irq_handler[i].handler(i,
- mc13783->irq_handler[i].data);
- enable_irq(mc13783->irq);
+ return mc13783->irqhandler[irq](irq, mc13783->irqdata[irq]);
}
-static irqreturn_t mc13783_interrupt(int irq, void *dev_id)
+int mc13783_ackirq(struct mc13783 *mc13783, int irq)
{
- struct mc13783 *mc13783 = dev_id;
+ unsigned int offstat = irq < 24 ? MC13783_IRQSTAT0 : MC13783_IRQSTAT1;
+ unsigned int val = 1 << (irq < 24 ? irq : irq - 24);
- disable_irq_nosync(irq);
+ BUG_ON(irq < 0 || irq >= MC13783_NUM_IRQ);
- schedule_work(&mc13783->work);
- return IRQ_HANDLED;
+ return mc13783_reg_write(mc13783, offstat, val);
}
+EXPORT_SYMBOL(mc13783_ackirq);
-/* set adc to ts interrupt mode, which generates touchscreen wakeup interrupt */
-static inline void mc13783_adc_set_ts_irq_mode(struct mc13783 *mc13783)
+/*
+ * returns: number of handled irqs or negative error
+ * locking: holds mc13783->lock
+ */
+static int mc13783_irq_handle(struct mc13783 *mc13783,
+ unsigned int offstat, unsigned int offmask, int baseirq)
{
- unsigned int reg_adc0, reg_adc1;
+ u32 stat, mask;
+ int ret = mc13783_reg_read(mc13783, offstat, &stat);
+ int num_handled = 0;
+
+ if (ret)
+ return ret;
+
+ ret = mc13783_reg_read(mc13783, offmask, &mask);
+ if (ret)
+ return ret;
+
+ while (stat & ~mask) {
+ int irq = __ffs(stat & ~mask);
+
+ stat &= ~(1 << irq);
+
+ if (likely(mc13783->irqhandler[baseirq + irq])) {
+ irqreturn_t handled;
- reg_adc0 = MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE
- | MC13783_ADC0_TSMOD0;
- reg_adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN;
+ handled = mc13783_irqhandler(mc13783, baseirq + irq);
+ if (handled == IRQ_HANDLED)
+ num_handled++;
+ } else {
+ dev_err(&mc13783->spidev->dev,
+ "BUG: irq %u but no handler\n",
+ baseirq + irq);
- mc13783_reg_write(mc13783, MC13783_REG_ADC_0, reg_adc0);
- mc13783_reg_write(mc13783, MC13783_REG_ADC_1, reg_adc1);
+ mask |= 1 << irq;
+
+ ret = mc13783_reg_write(mc13783, offmask, mask);
+ }
+ }
+
+ return num_handled;
}
+static irqreturn_t mc13783_irq_thread(int irq, void *data)
+{
+ struct mc13783 *mc13783 = data;
+ irqreturn_t ret;
+ int handled = 0;
+
+ mc13783_lock(mc13783);
+
+ ret = mc13783_irq_handle(mc13783, MC13783_IRQSTAT0,
+ MC13783_IRQMASK0, MC13783_IRQ_ADCDONE);
+ if (ret > 0)
+ handled = 1;
+
+ ret = mc13783_irq_handle(mc13783, MC13783_IRQSTAT1,
+ MC13783_IRQMASK1, MC13783_IRQ_1HZ);
+ if (ret > 0)
+ handled = 1;
+
+ mc13783_unlock(mc13783);
+
+ return IRQ_RETVAL(handled);
+}
+
+#define MC13783_ADC1_CHAN0_SHIFT 5
+#define MC13783_ADC1_CHAN1_SHIFT 8
+
+struct mc13783_adcdone_data {
+ struct mc13783 *mc13783;
+ struct completion done;
+};
+
+static irqreturn_t mc13783_handler_adcdone(int irq, void *data)
+{
+ struct mc13783_adcdone_data *adcdone_data = data;
+
+ mc13783_ackirq(adcdone_data->mc13783, irq);
+
+ complete_all(&adcdone_data->done);
+
+ return IRQ_HANDLED;
+}
+
+#define MC13783_ADC_WORKING (1 << 16)
+
int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
unsigned int channel, unsigned int *sample)
{
- unsigned int reg_adc0, reg_adc1;
- int i;
+ u32 adc0, adc1, old_adc0;
+ int i, ret;
+ struct mc13783_adcdone_data adcdone_data = {
+ .mc13783 = mc13783,
+ };
+ init_completion(&adcdone_data.done);
+
+ dev_dbg(&mc13783->spidev->dev, "%s\n", __func__);
+
+ mc13783_lock(mc13783);
+
+ if (mc13783->flags & MC13783_ADC_WORKING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mc13783->flags |= MC13783_ADC_WORKING;
- mutex_lock(&mc13783->adc_conv_lock);
+ mc13783_reg_read(mc13783, MC13783_ADC0, &old_adc0);
- /* set up auto incrementing anyway to make quick read */
- reg_adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2;
- /* enable the adc, ignore external triggering and set ASC to trigger
- * conversion */
- reg_adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN
- | MC13783_ADC1_ASC;
+ adc0 = MC13783_ADC0_ADINC1 | MC13783_ADC0_ADINC2;
+ adc1 = MC13783_ADC1_ADEN | MC13783_ADC1_ADTRIGIGN | MC13783_ADC1_ASC;
- /* setup channel number */
if (channel > 7)
- reg_adc1 |= MC13783_ADC1_ADSEL;
+ adc1 |= MC13783_ADC1_ADSEL;
switch (mode) {
case MC13783_ADC_MODE_TS:
- /* enables touch screen reference mode and set touchscreen mode
- * to position mode */
- reg_adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_ADREFMODE
- | MC13783_ADC0_TSMOD0 | MC13783_ADC0_TSMOD1;
- reg_adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
+ adc0 |= MC13783_ADC0_ADREFEN | MC13783_ADC0_TSMOD0 |
+ MC13783_ADC0_TSMOD1;
+ adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
break;
+
case MC13783_ADC_MODE_SINGLE_CHAN:
- reg_adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT;
- reg_adc1 |= MC13783_ADC1_RAND;
+ adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
+ adc1 |= (channel & 0x7) << MC13783_ADC1_CHAN0_SHIFT;
+ adc1 |= MC13783_ADC1_RAND;
break;
+
case MC13783_ADC_MODE_MULT_CHAN:
- reg_adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
+ adc0 |= old_adc0 & MC13783_ADC0_TSMOD_MASK;
+ adc1 |= 4 << MC13783_ADC1_CHAN1_SHIFT;
break;
+
default:
+ mc13783_unlock(mc13783);
return -EINVAL;
}
- mc13783_reg_write(mc13783, MC13783_REG_ADC_0, reg_adc0);
- mc13783_reg_write(mc13783, MC13783_REG_ADC_1, reg_adc1);
+ dev_dbg(&mc13783->spidev->dev, "%s: request irq\n", __func__);
+ mc13783_irq_request(mc13783, MC13783_IRQ_ADCDONE,
+ mc13783_handler_adcdone, __func__, &adcdone_data);
+ mc13783_ackirq(mc13783, MC13783_IRQ_ADCDONE);
- wait_for_completion_interruptible(&mc13783->adc_done);
+ mc13783_reg_write(mc13783, MC13783_REG_ADC_0, adc0);
+ mc13783_reg_write(mc13783, MC13783_REG_ADC_1, adc1);
- for (i = 0; i < 4; i++)
- mc13783_reg_read(mc13783, MC13783_REG_ADC_2, &sample[i]);
+ mc13783_unlock(mc13783);
- if (mc13783->ts_active)
- mc13783_adc_set_ts_irq_mode(mc13783);
+ ret = wait_for_completion_interruptible_timeout(&adcdone_data.done, HZ);
- mutex_unlock(&mc13783->adc_conv_lock);
+ if (!ret)
+ ret = -ETIMEDOUT;
- return 0;
+ mc13783_lock(mc13783);
+
+ mc13783_irq_free(mc13783, MC13783_IRQ_ADCDONE, &adcdone_data);
+
+ if (ret > 0)
+ for (i = 0; i < 4; ++i) {
+ ret = mc13783_reg_read(mc13783,
+ MC13783_REG_ADC_2, &sample[i]);
+ if (ret)
+ break;
+ }
+
+ if (mode == MC13783_ADC_MODE_TS)
+ /* restore TSMOD */
+ mc13783_reg_write(mc13783, MC13783_REG_ADC_0, old_adc0);
+
+ mc13783->flags &= ~MC13783_ADC_WORKING;
+out:
+ mc13783_unlock(mc13783);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
-void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status)
+static int mc13783_add_subdevice_pdata(struct mc13783 *mc13783,
+ const char *name, void *pdata, size_t pdata_size)
{
- mc13783->ts_active = status;
+ struct mfd_cell cell = {
+ .name = name,
+ .platform_data = pdata,
+ .data_size = pdata_size,
+ };
+
+ return mfd_add_devices(&mc13783->spidev->dev, -1, &cell, 1, NULL, 0);
+}
+
+static int mc13783_add_subdevice(struct mc13783 *mc13783, const char *name)
+{
+ return mc13783_add_subdevice_pdata(mc13783, name, NULL, 0);
}
-EXPORT_SYMBOL_GPL(mc13783_adc_set_ts_status);
static int mc13783_check_revision(struct mc13783 *mc13783)
{
u32 rev_id, rev1, rev2, finid, icid;
- mc13783_read(mc13783, MC13783_REG_REVISION, &rev_id);
+ mc13783_reg_read(mc13783, MC13783_REG_REVISION, &rev_id);
rev1 = (rev_id & 0x018) >> 3;
rev2 = (rev_id & 0x007);
@@ -292,38 +555,24 @@ static int mc13783_check_revision(struct mc13783 *mc13783)
rev1 = 3;
if (rev1 == 0 || icid != 2) {
- dev_err(mc13783->dev, "No MC13783 detected.\n");
+ dev_err(&mc13783->spidev->dev, "No MC13783 detected.\n");
return -ENODEV;
}
- mc13783->revision = ((rev1 * 10) + rev2);
- dev_info(mc13783->dev, "MC13783 Rev %d.%d FinVer %x detected\n", rev1,
- rev2, finid);
+ dev_info(&mc13783->spidev->dev,
+ "MC13783 Rev %d.%d FinVer %x detected\n",
+ rev1, rev2, finid);
return 0;
}
-/*
- * Register a client device. This is non-fatal since there is no need to
- * fail the entire device init due to a single platform device failing.
- */
-static void mc13783_client_dev_register(struct mc13783 *mc13783,
- const char *name)
-{
- struct mfd_cell cell = {};
-
- cell.name = name;
-
- mfd_add_devices(mc13783->dev, -1, &cell, 1, NULL, 0);
-}
-
-static int __devinit mc13783_probe(struct spi_device *spi)
+static int mc13783_probe(struct spi_device *spi)
{
struct mc13783 *mc13783;
- struct mc13783_platform_data *pdata = spi->dev.platform_data;
+ struct mc13783_platform_data *pdata = dev_get_platdata(&spi->dev);
int ret;
- mc13783 = kzalloc(sizeof(struct mc13783), GFP_KERNEL);
+ mc13783 = kzalloc(sizeof(*mc13783), GFP_KERNEL);
if (!mc13783)
return -ENOMEM;
@@ -332,96 +581,104 @@ static int __devinit mc13783_probe(struct spi_device *spi)
spi->bits_per_word = 32;
spi_setup(spi);
- mc13783->spi_device = spi;
- mc13783->dev = &spi->dev;
- mc13783->irq = spi->irq;
+ mc13783->spidev = spi;
+
+ mutex_init(&mc13783->lock);
+ mc13783_lock(mc13783);
+
+ ret = mc13783_check_revision(mc13783);
+ if (ret)
+ goto err_revision;
+
+ /* mask all irqs */
+ ret = mc13783_reg_write(mc13783, MC13783_IRQMASK0, 0x00ffffff);
+ if (ret)
+ goto err_mask;
- INIT_WORK(&mc13783->work, mc13783_irq_work);
- mutex_init(&mc13783->io_lock);
- mutex_init(&mc13783->adc_conv_lock);
- init_completion(&mc13783->adc_done);
+ ret = mc13783_reg_write(mc13783, MC13783_IRQMASK1, 0x00ffffff);
+ if (ret)
+ goto err_mask;
+
+ ret = request_threaded_irq(spi->irq, NULL, mc13783_irq_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13783", mc13783);
+
+ if (ret) {
+err_mask:
+err_revision:
+ mutex_unlock(&mc13783->lock);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(mc13783);
+ return ret;
+ }
+ /* This should go away (BEGIN) */
if (pdata) {
mc13783->flags = pdata->flags;
mc13783->regulators = pdata->regulators;
mc13783->num_regulators = pdata->num_regulators;
}
+ /* This should go away (END) */
- if (mc13783_check_revision(mc13783)) {
- ret = -ENODEV;
- goto err_out;
+ if (pdata->flags & MC13783_USE_ADC)
+ mc13783_add_subdevice(mc13783, "mc13783-adc");
+
+ if (pdata->flags & MC13783_USE_CODEC)
+ mc13783_add_subdevice(mc13783, "mc13783-codec");
+
+ if (pdata->flags & MC13783_USE_REGULATOR) {
+ struct mc13783_regulator_platform_data regulator_pdata = {
+ .num_regulators = pdata->num_regulators,
+ .regulators = pdata->regulators,
+ };
+
+ mc13783_add_subdevice_pdata(mc13783, "mc13783-regulator",
+ &regulator_pdata, sizeof(regulator_pdata));
}
- /* clear and mask all interrupts */
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_0, 0x00ffffff);
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_MASK_0, 0x00ffffff);
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_STATUS_1, 0x00ffffff);
- mc13783_reg_write(mc13783, MC13783_REG_INTERRUPT_MASK_1, 0x00ffffff);
+ if (pdata->flags & MC13783_USE_RTC)
+ mc13783_add_subdevice(mc13783, "mc13783-rtc");
- /* unmask adcdone interrupts */
- mc13783_set_bits(mc13783, MC13783_REG_INTERRUPT_MASK_0,
- MC13783_INT_MASK_ADCDONEM, 0);
+ if (pdata->flags & MC13783_USE_TOUCHSCREEN)
+ mc13783_add_subdevice(mc13783, "mc13783-ts");
- ret = request_irq(mc13783->irq, mc13783_interrupt,
- IRQF_DISABLED | IRQF_TRIGGER_HIGH, "mc13783",
- mc13783);
- if (ret)
- goto err_out;
-
- if (mc13783->flags & MC13783_USE_CODEC)
- mc13783_client_dev_register(mc13783, "mc13783-codec");
- if (mc13783->flags & MC13783_USE_ADC)
- mc13783_client_dev_register(mc13783, "mc13783-adc");
- if (mc13783->flags & MC13783_USE_RTC)
- mc13783_client_dev_register(mc13783, "mc13783-rtc");
- if (mc13783->flags & MC13783_USE_REGULATOR)
- mc13783_client_dev_register(mc13783, "mc13783-regulator");
- if (mc13783->flags & MC13783_USE_TOUCHSCREEN)
- mc13783_client_dev_register(mc13783, "mc13783-ts");
+ mc13783_unlock(mc13783);
return 0;
-
-err_out:
- kfree(mc13783);
- return ret;
}
static int __devexit mc13783_remove(struct spi_device *spi)
{
- struct mc13783 *mc13783;
+ struct mc13783 *mc13783 = dev_get_drvdata(&spi->dev);
- mc13783 = dev_get_drvdata(&spi->dev);
-
- free_irq(mc13783->irq, mc13783);
+ free_irq(mc13783->spidev->irq, mc13783);
mfd_remove_devices(&spi->dev);
return 0;
}
-static struct spi_driver pmic_driver = {
+static struct spi_driver mc13783_driver = {
.driver = {
- .name = "mc13783",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
+ .name = "mc13783",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
},
.probe = mc13783_probe,
.remove = __devexit_p(mc13783_remove),
};
-static int __init pmic_init(void)
+static int __init mc13783_init(void)
{
- return spi_register_driver(&pmic_driver);
+ return spi_register_driver(&mc13783_driver);
}
-subsys_initcall(pmic_init);
+subsys_initcall(mc13783_init);
-static void __exit pmic_exit(void)
+static void __exit mc13783_exit(void)
{
- spi_unregister_driver(&pmic_driver);
+ spi_unregister_driver(&mc13783_driver);
}
-module_exit(pmic_exit);
-
-MODULE_DESCRIPTION("Core/Protocol driver for Freescale MC13783 PMIC");
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
-MODULE_LICENSE("GPL");
+module_exit(mc13783_exit);
+MODULE_DESCRIPTION("Core driver for Freescale MC13783 PMIC");
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index 3d31e97d6a4..6d2e8466df1 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -209,17 +209,16 @@ static void pcf50633_adc_irq(int irq, void *data)
static int __devinit pcf50633_adc_probe(struct platform_device *pdev)
{
- struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
struct pcf50633_adc *adc;
adc = kzalloc(sizeof(*adc), GFP_KERNEL);
if (!adc)
return -ENOMEM;
- adc->pcf = pdata->pcf;
+ adc->pcf = dev_to_pcf50633(pdev->dev.parent);
platform_set_drvdata(pdev, adc);
- pcf50633_register_irq(pdata->pcf, PCF50633_IRQ_ADCRDY,
+ pcf50633_register_irq(adc->pcf, PCF50633_IRQ_ADCRDY,
pcf50633_adc_irq, adc);
mutex_init(&adc->queue_mutex);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index d26d7747175..03dcc920070 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -290,7 +290,7 @@ out:
int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
{
- dev_info(pcf->dev, "Masking IRQ %d\n", irq);
+ dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
return __pcf50633_irq_mask_set(pcf, irq, 1);
}
@@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
{
- dev_info(pcf->dev, "Unmasking IRQ %d\n", irq);
+ dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
return __pcf50633_irq_mask_set(pcf, irq, 0);
}
@@ -345,6 +345,9 @@ static void pcf50633_irq_worker(struct work_struct *work)
goto out;
}
+ /* defeat 8s death from lowsys on A5 */
+ pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
+
/* We immediately read the usb and adapter status. We thus make sure
* only of USBINS/USBREM IRQ handlers are called */
if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
@@ -453,7 +456,6 @@ static void
pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
struct platform_device **pdev)
{
- struct pcf50633_subdev_pdata *subdev_pdata;
int ret;
*pdev = platform_device_alloc(name, -1);
@@ -462,15 +464,6 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
return;
}
- subdev_pdata = kmalloc(sizeof(*subdev_pdata), GFP_KERNEL);
- if (!subdev_pdata) {
- dev_err(pcf->dev, "Error allocating subdev pdata\n");
- platform_device_put(*pdev);
- }
-
- subdev_pdata->pcf = pcf;
- platform_device_add_data(*pdev, subdev_pdata, sizeof(*subdev_pdata));
-
(*pdev)->dev.parent = pcf->dev;
ret = platform_device_add(*pdev);
@@ -482,13 +475,13 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
}
#ifdef CONFIG_PM
-static int pcf50633_suspend(struct device *dev, pm_message_t state)
+static int pcf50633_suspend(struct i2c_client *client, pm_message_t state)
{
struct pcf50633 *pcf;
int ret = 0, i;
u8 res[5];
- pcf = dev_get_drvdata(dev);
+ pcf = i2c_get_clientdata(client);
/* Make sure our interrupt handlers are not called
* henceforth */
@@ -523,12 +516,12 @@ out:
return ret;
}
-static int pcf50633_resume(struct device *dev)
+static int pcf50633_resume(struct i2c_client *client)
{
struct pcf50633 *pcf;
int ret;
- pcf = dev_get_drvdata(dev);
+ pcf = i2c_get_clientdata(client);
/* Write the saved mask registers */
ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
@@ -560,9 +553,14 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
{
struct pcf50633 *pcf;
struct pcf50633_platform_data *pdata = client->dev.platform_data;
- int i, ret = 0;
+ int i, ret;
int version, variant;
+ if (!client->irq) {
+ dev_err(&client->dev, "Missing IRQ\n");
+ return -ENOENT;
+ }
+
pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
if (!pcf)
return -ENOMEM;
@@ -577,6 +575,12 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pcf->irq = client->irq;
pcf->work_queue = create_singlethread_workqueue("pcf50633");
+ if (!pcf->work_queue) {
+ dev_err(&client->dev, "Failed to alloc workqueue\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
INIT_WORK(&pcf->irq_work, pcf50633_irq_worker);
version = pcf50633_reg_read(pcf, 0);
@@ -584,7 +588,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
- goto err;
+ goto err_destroy_workqueue;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -598,6 +602,14 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
+ ret = request_irq(client->irq, pcf50633_irq,
+ IRQF_TRIGGER_LOW, "pcf50633", pcf);
+
+ if (ret) {
+ dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
+ goto err_destroy_workqueue;
+ }
+
/* Create sub devices */
pcf50633_client_dev_register(pcf, "pcf50633-input",
&pcf->input_pdev);
@@ -613,31 +625,18 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pdev = platform_device_alloc("pcf50633-regltr", i);
if (!pdev) {
- dev_err(pcf->dev, "Cannot create regulator\n");
+ dev_err(pcf->dev, "Cannot create regulator %d\n", i);
continue;
}
pdev->dev.parent = pcf->dev;
- pdev->dev.platform_data = &pdata->reg_init_data[i];
- dev_set_drvdata(&pdev->dev, pcf);
+ platform_device_add_data(pdev, &pdata->reg_init_data[i],
+ sizeof(pdata->reg_init_data[i]));
pcf->regulator_pdev[i] = pdev;
platform_device_add(pdev);
}
- if (client->irq) {
- ret = request_irq(client->irq, pcf50633_irq,
- IRQF_TRIGGER_LOW, "pcf50633", pcf);
-
- if (ret) {
- dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
- goto err;
- }
- } else {
- dev_err(pcf->dev, "No IRQ configured\n");
- goto err;
- }
-
if (enable_irq_wake(client->irq) < 0)
dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
"in this hardware revision", client->irq);
@@ -651,9 +650,12 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return 0;
-err:
+err_destroy_workqueue:
destroy_workqueue(pcf->work_queue);
+err_free:
+ i2c_set_clientdata(client, NULL);
kfree(pcf);
+
return ret;
}
@@ -686,12 +688,12 @@ static struct i2c_device_id pcf50633_id_table[] = {
static struct i2c_driver pcf50633_driver = {
.driver = {
.name = "pcf50633",
- .suspend = pcf50633_suspend,
- .resume = pcf50633_resume,
},
.id_table = pcf50633_id_table,
.probe = pcf50633_probe,
.remove = __devexit_p(pcf50633_remove),
+ .suspend = pcf50633_suspend,
+ .resume = pcf50633_resume,
};
static int __init pcf50633_init(void)
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index acf8b9d5f57..e5955306c2f 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -637,7 +637,7 @@ static int tps65010_probe(struct i2c_client *client,
tps, DEBUG_FOPS);
/* optionally register GPIOs */
- if (board && board->base > 0) {
+ if (board && board->base != 0) {
tps->outmask = board->outmask;
tps->chip.label = client->name;
@@ -964,6 +964,34 @@ int tps65010_config_vregs1(unsigned value)
}
EXPORT_SYMBOL(tps65010_config_vregs1);
+int tps65010_config_vdcdc2(unsigned value)
+{
+ struct i2c_client *c;
+ int status;
+
+ if (!the_tps)
+ return -ENODEV;
+
+ c = the_tps->client;
+ mutex_lock(&the_tps->lock);
+
+ pr_debug("%s: vdcdc2 0x%02x\n", DRIVER_NAME,
+ i2c_smbus_read_byte_data(c, TPS_VDCDC2));
+
+ status = i2c_smbus_write_byte_data(c, TPS_VDCDC2, value);
+
+ if (status != 0)
+ printk(KERN_ERR "%s: Failed to write vdcdc2 register\n",
+ DRIVER_NAME);
+ else
+ pr_debug("%s: vregs1 0x%02x\n", DRIVER_NAME,
+ i2c_smbus_read_byte_data(c, TPS_VDCDC2));
+
+ mutex_unlock(&the_tps->lock);
+ return status;
+}
+EXPORT_SYMBOL(tps65010_config_vdcdc2);
+
/*-------------------------------------------------------------------------*/
/* tps65013_set_low_pwr parameter:
* mode: ON or OFF
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl-core.c
index 40449cdf09d..2a760653419 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1,5 +1,6 @@
/*
- * twl4030_core.c - driver for TWL4030/TPS659x0 PM and audio CODEC devices
+ * twl_core.c - driver for TWL4030/TWL5030/TWL60X0/TPS659x0 PM
+ * and audio CODEC devices
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
@@ -36,7 +37,7 @@
#include <linux/regulator/machine.h>
#include <linux/i2c.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
#include <plat/cpu.h>
@@ -55,7 +56,7 @@
* (and associated registers).
*/
-#define DRIVER_NAME "twl4030"
+#define DRIVER_NAME "twl"
#if defined(CONFIG_TWL4030_BCI_BATTERY) || \
defined(CONFIG_TWL4030_BCI_BATTERY_MODULE)
@@ -125,7 +126,7 @@
/* Last - for index max*/
#define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG
-#define TWL4030_NUM_SLAVES 4
+#define TWL_NUM_SLAVES 4
#if defined(CONFIG_INPUT_TWL4030_PWRBUTTON) \
|| defined(CONFIG_INPUT_TWL4030_PWBUTTON_MODULE)
@@ -134,6 +135,13 @@
#define twl_has_pwrbutton() false
#endif
+#define SUB_CHIP_ID0 0
+#define SUB_CHIP_ID1 1
+#define SUB_CHIP_ID2 2
+#define SUB_CHIP_ID3 3
+
+#define TWL_MODULE_LAST TWL4030_MODULE_LAST
+
/* Base Address defns for twl4030_map[] */
/* subchip/slave 0 - USB ID */
@@ -158,6 +166,10 @@
#define TWL4030_BASEADD_PWMB 0x00F1
#define TWL4030_BASEADD_KEYPAD 0x00D2
+#define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */
+#define TWL5031_BASEADD_INTERRUPTS 0x00B9 /* Different than TWL4030's
+ one */
+
/* subchip/slave 3 - POWER ID */
#define TWL4030_BASEADD_BACKUP 0x0014
#define TWL4030_BASEADD_INT 0x002E
@@ -169,6 +181,30 @@
/* Triton Core internal information (END) */
+/* subchip/slave 0 0x48 - POWER */
+#define TWL6030_BASEADD_RTC 0x0000
+#define TWL6030_BASEADD_MEM 0x0017
+#define TWL6030_BASEADD_PM_MASTER 0x001F
+#define TWL6030_BASEADD_PM_SLAVE_MISC 0x0030 /* PM_RECEIVER */
+#define TWL6030_BASEADD_PM_MISC 0x00E2
+#define TWL6030_BASEADD_PM_PUPD 0x00F0
+
+/* subchip/slave 1 0x49 - FEATURE */
+#define TWL6030_BASEADD_USB 0x0000
+#define TWL6030_BASEADD_GPADC_CTRL 0x002E
+#define TWL6030_BASEADD_AUX 0x0090
+#define TWL6030_BASEADD_PWM 0x00BA
+#define TWL6030_BASEADD_GASGAUGE 0x00C0
+#define TWL6030_BASEADD_PIH 0x00D0
+#define TWL6030_BASEADD_CHARGER 0x00E0
+
+/* subchip/slave 2 0x4A - DFT */
+#define TWL6030_BASEADD_DIEID 0x00C0
+
+/* subchip/slave 3 0x4B - AUDIO */
+#define TWL6030_BASEADD_AUDIO 0x0000
+#define TWL6030_BASEADD_RSV 0x0000
+
/* Few power values */
#define R_CFG_BOOT 0x05
#define R_PROTECT_KEY 0x0E
@@ -183,19 +219,29 @@
#define HFCLK_FREQ_26_MHZ (2 << 0)
#define HFCLK_FREQ_38p4_MHZ (3 << 0)
#define HIGH_PERF_SQ (1 << 3)
+#define CK32K_LOWPWR_EN (1 << 7)
/* chip-specific feature flags, for i2c_device_id.driver_data */
#define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */
#define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */
+#define TWL5031 BIT(2) /* twl5031 has different registers */
+#define TWL6030_CLASS BIT(3) /* TWL6030 class */
/*----------------------------------------------------------------------*/
/* is driver active, bound to a chip? */
static bool inuse;
-/* Structure for each TWL4030 Slave */
-struct twl4030_client {
+static unsigned int twl_id;
+unsigned int twl_rev(void)
+{
+ return twl_id;
+}
+EXPORT_SYMBOL(twl_rev);
+
+/* Structure for each TWL4030/TWL6030 Slave */
+struct twl_client {
struct i2c_client *client;
u8 address;
@@ -206,19 +252,20 @@ struct twl4030_client {
struct mutex xfer_lock;
};
-static struct twl4030_client twl4030_modules[TWL4030_NUM_SLAVES];
+static struct twl_client twl_modules[TWL_NUM_SLAVES];
/* mapping the module id to slave id and base address */
-struct twl4030mapping {
+struct twl_mapping {
unsigned char sid; /* Slave ID */
unsigned char base; /* base address */
};
+struct twl_mapping *twl_map;
-static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
+static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
/*
* NOTE: don't change this table without updating the
- * <linux/i2c/twl4030.h> defines for TWL4030_MODULE_*
+ * <linux/i2c/twl.h> defines for TWL4030_MODULE_*
* so they continue to match the order in this table.
*/
@@ -240,6 +287,8 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
{ 2, TWL4030_BASEADD_PWM1 },
{ 2, TWL4030_BASEADD_PWMA },
{ 2, TWL4030_BASEADD_PWMB },
+ { 2, TWL5031_BASEADD_ACCESSORY },
+ { 2, TWL5031_BASEADD_INTERRUPTS },
{ 3, TWL4030_BASEADD_BACKUP },
{ 3, TWL4030_BASEADD_INT },
@@ -249,12 +298,46 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
{ 3, TWL4030_BASEADD_SECURED_REG },
};
+static struct twl_mapping twl6030_map[] = {
+ /*
+ * NOTE: don't change this table without updating the
+ * <linux/i2c/twl.h> defines for TWL4030_MODULE_*
+ * so they continue to match the order in this table.
+ */
+ { SUB_CHIP_ID1, TWL6030_BASEADD_USB },
+ { SUB_CHIP_ID3, TWL6030_BASEADD_AUDIO },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_DIEID },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID1, TWL6030_BASEADD_PIH },
+
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID1, TWL6030_BASEADD_GPADC_CTRL },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+
+ { SUB_CHIP_ID1, TWL6030_BASEADD_CHARGER },
+ { SUB_CHIP_ID1, TWL6030_BASEADD_GASGAUGE },
+ { SUB_CHIP_ID1, TWL6030_BASEADD_PWM },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+ { SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER },
+ { SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC },
+
+ { SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
+ { SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
+};
+
/*----------------------------------------------------------------------*/
/* Exported Functions */
/**
- * twl4030_i2c_write - Writes a n bit register in TWL4030
+ * twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0
* @mod_no: module number
* @value: an array of num_bytes+1 containing data to write
* @reg: register address (just offset will do)
@@ -265,19 +348,19 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
*
* Returns the result of operation - 0 is success
*/
-int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
{
int ret;
int sid;
- struct twl4030_client *twl;
+ struct twl_client *twl;
struct i2c_msg *msg;
- if (unlikely(mod_no > TWL4030_MODULE_LAST)) {
+ if (unlikely(mod_no > TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl4030_map[mod_no].sid;
- twl = &twl4030_modules[sid];
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
if (unlikely(!inuse)) {
pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
@@ -294,19 +377,26 @@ int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
msg->flags = 0;
msg->buf = value;
/* over write the first byte of buffer with the register address */
- *value = twl4030_map[mod_no].base + reg;
+ *value = twl_map[mod_no].base + reg;
ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1);
mutex_unlock(&twl->xfer_lock);
- /* i2cTransfer returns num messages.translate it pls.. */
- if (ret >= 0)
- ret = 0;
- return ret;
+ /* i2c_transfer returns number of messages transferred */
+ if (ret != 1) {
+ pr_err("%s: i2c_write failed to transfer all messages\n",
+ DRIVER_NAME);
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+ } else {
+ return 0;
+ }
}
-EXPORT_SYMBOL(twl4030_i2c_write);
+EXPORT_SYMBOL(twl_i2c_write);
/**
- * twl4030_i2c_read - Reads a n bit register in TWL4030
+ * twl_i2c_read - Reads a n bit register in TWL4030/TWL5030/TWL60X0
* @mod_no: module number
* @value: an array of num_bytes containing data to be read
* @reg: register address (just offset will do)
@@ -314,20 +404,20 @@ EXPORT_SYMBOL(twl4030_i2c_write);
*
* Returns result of operation - num_bytes is success else failure.
*/
-int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
+int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
{
int ret;
u8 val;
int sid;
- struct twl4030_client *twl;
+ struct twl_client *twl;
struct i2c_msg *msg;
- if (unlikely(mod_no > TWL4030_MODULE_LAST)) {
+ if (unlikely(mod_no > TWL_MODULE_LAST)) {
pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
return -EPERM;
}
- sid = twl4030_map[mod_no].sid;
- twl = &twl4030_modules[sid];
+ sid = twl_map[mod_no].sid;
+ twl = &twl_modules[sid];
if (unlikely(!inuse)) {
pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
@@ -339,7 +429,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
msg->addr = twl->address;
msg->len = 1;
msg->flags = 0; /* Read the register value */
- val = twl4030_map[mod_no].base + reg;
+ val = twl_map[mod_no].base + reg;
msg->buf = &val;
/* [MSG2] fill the data rx buffer */
msg = &twl->xfer_msg[1];
@@ -350,45 +440,52 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2);
mutex_unlock(&twl->xfer_lock);
- /* i2cTransfer returns num messages.translate it pls.. */
- if (ret >= 0)
- ret = 0;
- return ret;
+ /* i2c_transfer returns number of messages transferred */
+ if (ret != 2) {
+ pr_err("%s: i2c_read failed to transfer all messages\n",
+ DRIVER_NAME);
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+ } else {
+ return 0;
+ }
}
-EXPORT_SYMBOL(twl4030_i2c_read);
+EXPORT_SYMBOL(twl_i2c_read);
/**
- * twl4030_i2c_write_u8 - Writes a 8 bit register in TWL4030
+ * twl_i2c_write_u8 - Writes a 8 bit register in TWL4030/TWL5030/TWL60X0
* @mod_no: module number
* @value: the value to be written 8 bit
* @reg: register address (just offset will do)
*
* Returns result of operation - 0 is success
*/
-int twl4030_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
+int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
{
/* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
u8 temp_buffer[2] = { 0 };
/* offset 1 contains the data */
temp_buffer[1] = value;
- return twl4030_i2c_write(mod_no, temp_buffer, reg, 1);
+ return twl_i2c_write(mod_no, temp_buffer, reg, 1);
}
-EXPORT_SYMBOL(twl4030_i2c_write_u8);
+EXPORT_SYMBOL(twl_i2c_write_u8);
/**
- * twl4030_i2c_read_u8 - Reads a 8 bit register from TWL4030
+ * twl_i2c_read_u8 - Reads a 8 bit register from TWL4030/TWL5030/TWL60X0
* @mod_no: module number
* @value: the value read 8 bit
* @reg: register address (just offset will do)
*
* Returns result of operation - 0 is success
*/
-int twl4030_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
+int twl_i2c_read_u8(u8 mod_no, u8 *value, u8 reg)
{
- return twl4030_i2c_read(mod_no, value, reg, 1);
+ return twl_i2c_read(mod_no, value, reg, 1);
}
-EXPORT_SYMBOL(twl4030_i2c_read_u8);
+EXPORT_SYMBOL(twl_i2c_read_u8);
/*----------------------------------------------------------------------*/
@@ -398,7 +495,7 @@ add_numbered_child(unsigned chip, const char *name, int num,
bool can_wakeup, int irq0, int irq1)
{
struct platform_device *pdev;
- struct twl4030_client *twl = &twl4030_modules[chip];
+ struct twl_client *twl = &twl_modules[chip];
int status;
pdev = platform_device_alloc(name, num);
@@ -456,6 +553,7 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
struct regulator_consumer_supply *consumers,
unsigned num_consumers)
{
+ unsigned sub_chip_id;
/* regulator framework demands init_data ... */
if (!pdata)
return NULL;
@@ -466,7 +564,8 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
}
/* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */
- return add_numbered_child(3, "twl4030_reg", num,
+ sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid;
+ return add_numbered_child(sub_chip_id, "twl_reg", num,
pdata, sizeof(*pdata), false, 0, 0);
}
@@ -486,29 +585,32 @@ static int
add_children(struct twl4030_platform_data *pdata, unsigned long features)
{
struct device *child;
+ unsigned sub_chip_id;
- if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) {
+ if (twl_has_bci() && pdata->bci &&
+ !(features & (TPS_SUBSET | TWL5031))) {
child = add_child(3, "twl4030_bci",
pdata->bci, sizeof(*pdata->bci),
false,
/* irq0 = CHG_PRES, irq1 = BCI */
- pdata->irq_base + 8 + 1, pdata->irq_base + 2);
+ pdata->irq_base + BCI_PRES_INTR_OFFSET,
+ pdata->irq_base + BCI_INTR_OFFSET);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (twl_has_gpio() && pdata->gpio) {
- child = add_child(1, "twl4030_gpio",
+ child = add_child(SUB_CHIP_ID1, "twl4030_gpio",
pdata->gpio, sizeof(*pdata->gpio),
- false, pdata->irq_base + 0, 0);
+ false, pdata->irq_base + GPIO_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
if (twl_has_keypad() && pdata->keypad) {
- child = add_child(2, "twl4030_keypad",
+ child = add_child(SUB_CHIP_ID2, "twl4030_keypad",
pdata->keypad, sizeof(*pdata->keypad),
- true, pdata->irq_base + 1, 0);
+ true, pdata->irq_base + KEYPAD_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
@@ -516,7 +618,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
if (twl_has_madc() && pdata->madc) {
child = add_child(2, "twl4030_madc",
pdata->madc, sizeof(*pdata->madc),
- true, pdata->irq_base + 3, 0);
+ true, pdata->irq_base + MADC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
@@ -529,14 +631,15 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
* Eventually, Linux might become more aware of such
* HW security concerns, and "least privilege".
*/
- child = add_child(3, "twl4030_rtc",
+ sub_chip_id = twl_map[TWL_MODULE_RTC].sid;
+ child = add_child(sub_chip_id, "twl_rtc",
NULL, 0,
- true, pdata->irq_base + 8 + 3, 0);
+ true, pdata->irq_base + RTC_INTR_OFFSET, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- if (twl_has_usb() && pdata->usb) {
+ if (twl_has_usb() && pdata->usb && twl_class_is_4030()) {
static struct regulator_consumer_supply usb1v5 = {
.supply = "usb1v5",
@@ -581,7 +684,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
pdata->usb, sizeof(*pdata->usb),
true,
/* irq0 = USB_PRES, irq1 = USB */
- pdata->irq_base + 8 + 2, pdata->irq_base + 4);
+ pdata->irq_base + USB_PRES_INTR_OFFSET,
+ pdata->irq_base + USB_INTR_OFFSET);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -615,12 +719,23 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
- if (twl_has_regulator()) {
- /*
+ /* twl4030 regulators */
+ if (twl_has_regulator() && twl_class_is_4030()) {
child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1);
if (IS_ERR(child))
return PTR_ERR(child);
- */
+
+ child = add_regulator(TWL4030_REG_VIO, pdata->vio);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1);
if (IS_ERR(child))
@@ -636,10 +751,23 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
pdata->vaux2);
if (IS_ERR(child))
return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
}
/* maybe add LDOs that are omitted on cost-reduced parts */
- if (twl_has_regulator() && !(features & TPS_SUBSET)) {
+ if (twl_has_regulator() && !(features & TPS_SUBSET)
+ && twl_class_is_4030()) {
child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -665,6 +793,49 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
+ /* twl6030 regulators */
+ if (twl_has_regulator() && twl_class_is_6030()) {
+ child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VPP, pdata->vpp);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VANA, pdata->vana);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VDAC, pdata->vdac);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VUSB, pdata->vusb);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
return 0;
}
@@ -679,7 +850,7 @@ static inline int __init protect_pm_master(void)
{
int e = 0;
- e = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_LOCK,
+ e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_LOCK,
R_PROTECT_KEY);
return e;
}
@@ -688,14 +859,15 @@ static inline int __init unprotect_pm_master(void)
{
int e = 0;
- e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK1,
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK1,
R_PROTECT_KEY);
- e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, KEY_UNLOCK2,
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK2,
R_PROTECT_KEY);
return e;
}
-static void clocks_init(struct device *dev)
+static void clocks_init(struct device *dev,
+ struct twl4030_clock_init_data *clock)
{
int e = 0;
struct clk *osc;
@@ -709,7 +881,7 @@ static void clocks_init(struct device *dev)
osc = clk_get(dev, "osc_sys_ck");
if (IS_ERR(osc)) {
- printk(KERN_WARNING "Skipping twl4030 internal clock init and "
+ printk(KERN_WARNING "Skipping twl internal clock init and "
"using bootloader value (unknown osc rate)\n");
return;
}
@@ -723,7 +895,7 @@ static void clocks_init(struct device *dev)
*/
osc = ERR_PTR(-EIO);
- printk(KERN_WARNING "Skipping twl4030 internal clock init and "
+ printk(KERN_WARNING "Skipping twl internal clock init and "
"using bootloader value (unknown osc rate)\n");
return;
@@ -742,9 +914,12 @@ static void clocks_init(struct device *dev)
}
ctrl |= HIGH_PERF_SQ;
+ if (clock && clock->ck32k_lowpwr_enable)
+ ctrl |= CK32K_LOWPWR_EN;
+
e |= unprotect_pm_master();
/* effect->MADC+USB ck en */
- e |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, ctrl, R_CFG_BOOT);
+ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, ctrl, R_CFG_BOOT);
e |= protect_pm_master();
if (e < 0)
@@ -753,24 +928,31 @@ static void clocks_init(struct device *dev)
/*----------------------------------------------------------------------*/
-int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
-int twl_exit_irq(void);
+int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
+int twl4030_exit_irq(void);
+int twl4030_init_chip_irq(const char *chip);
+int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
+int twl6030_exit_irq(void);
-static int twl4030_remove(struct i2c_client *client)
+static int twl_remove(struct i2c_client *client)
{
unsigned i;
int status;
- status = twl_exit_irq();
+ if (twl_class_is_4030())
+ status = twl4030_exit_irq();
+ else
+ status = twl6030_exit_irq();
+
if (status < 0)
return status;
- for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
- struct twl4030_client *twl = &twl4030_modules[i];
+ for (i = 0; i < TWL_NUM_SLAVES; i++) {
+ struct twl_client *twl = &twl_modules[i];
if (twl->client && twl->client != client)
i2c_unregister_device(twl->client);
- twl4030_modules[i].client = NULL;
+ twl_modules[i].client = NULL;
}
inuse = false;
return 0;
@@ -778,7 +960,7 @@ static int twl4030_remove(struct i2c_client *client)
/* NOTE: this driver only handles a single twl4030/tps659x0 chip */
static int __init
-twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
+twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int status;
unsigned i;
@@ -799,8 +981,8 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -EBUSY;
}
- for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
- struct twl4030_client *twl = &twl4030_modules[i];
+ for (i = 0; i < TWL_NUM_SLAVES; i++) {
+ struct twl_client *twl = &twl_modules[i];
twl->address = client->addr + i;
if (i == 0)
@@ -814,15 +996,20 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
status = -ENOMEM;
goto fail;
}
- strlcpy(twl->client->name, id->name,
- sizeof(twl->client->name));
}
mutex_init(&twl->xfer_lock);
}
inuse = true;
+ if ((id->driver_data) & TWL6030_CLASS) {
+ twl_id = TWL6030_CLASS_ID;
+ twl_map = &twl6030_map[0];
+ } else {
+ twl_id = TWL4030_CLASS_ID;
+ twl_map = &twl4030_map[0];
+ }
/* setup clock framework */
- clocks_init(&client->dev);
+ clocks_init(&client->dev, pdata->clock);
/* load power event scripts */
if (twl_has_power() && pdata->power)
@@ -832,7 +1019,15 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (client->irq
&& pdata->irq_base
&& pdata->irq_end > pdata->irq_base) {
- status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end);
+ if (twl_class_is_4030()) {
+ twl4030_init_chip_irq(id->name);
+ status = twl4030_init_irq(client->irq, pdata->irq_base,
+ pdata->irq_end);
+ } else {
+ status = twl6030_init_irq(client->irq, pdata->irq_base,
+ pdata->irq_end);
+ }
+
if (status < 0)
goto fail;
}
@@ -840,40 +1035,42 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
status = add_children(pdata, id->driver_data);
fail:
if (status < 0)
- twl4030_remove(client);
+ twl_remove(client);
return status;
}
-static const struct i2c_device_id twl4030_ids[] = {
+static const struct i2c_device_id twl_ids[] = {
{ "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
{ "twl5030", 0 }, /* T2 updated */
+ { "twl5031", TWL5031 }, /* TWL5030 updated */
{ "tps65950", 0 }, /* catalog version of twl5030 */
{ "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */
{ "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */
+ { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */
{ /* end of list */ },
};
-MODULE_DEVICE_TABLE(i2c, twl4030_ids);
+MODULE_DEVICE_TABLE(i2c, twl_ids);
/* One Client Driver , 4 Clients */
-static struct i2c_driver twl4030_driver = {
+static struct i2c_driver twl_driver = {
.driver.name = DRIVER_NAME,
- .id_table = twl4030_ids,
- .probe = twl4030_probe,
- .remove = twl4030_remove,
+ .id_table = twl_ids,
+ .probe = twl_probe,
+ .remove = twl_remove,
};
-static int __init twl4030_init(void)
+static int __init twl_init(void)
{
- return i2c_add_driver(&twl4030_driver);
+ return i2c_add_driver(&twl_driver);
}
-subsys_initcall(twl4030_init);
+subsys_initcall(twl_init);
-static void __exit twl4030_exit(void)
+static void __exit twl_exit(void)
{
- i2c_del_driver(&twl4030_driver);
+ i2c_del_driver(&twl_driver);
}
-module_exit(twl4030_exit);
+module_exit(twl_exit);
MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("I2C Core interface for TWL4030");
+MODULE_DESCRIPTION("I2C Core interface for TWL");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
index 77b914907d7..700b149c1b9 100644
--- a/drivers/mfd/twl4030-codec.c
+++ b/drivers/mfd/twl4030-codec.c
@@ -26,7 +26,7 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl4030-codec.h>
@@ -56,7 +56,7 @@ static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
u8 val;
- twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
codec->resource[id].reg);
if (enable)
@@ -64,7 +64,7 @@ static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
else
val &= ~codec->resource[id].mask;
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
val, codec->resource[id].reg);
return val;
@@ -75,7 +75,7 @@ static inline int twl4030_codec_get_resource(enum twl4030_codec_res id)
struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
u8 val;
- twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
codec->resource[id].reg);
return val;
@@ -183,7 +183,7 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Invalid audio_mclk\n");
return -EINVAL;
}
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
val, TWL4030_REG_APLL_CTL);
codec = kzalloc(sizeof(struct twl4030_codec), GFP_KERNEL);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index fb194fe244c..20d29bafc9f 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -32,7 +32,7 @@
#include <linux/irq.h>
#include <linux/kthread.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
@@ -74,6 +74,8 @@ struct sih {
u8 edr_offset;
u8 bytes_edr; /* bytelen of EDR */
+ u8 irq_lines; /* number of supported irq lines */
+
/* SIR ignored -- set interrupt, for testing only */
struct irq_data {
u8 isr_offset;
@@ -82,6 +84,9 @@ struct sih {
/* + 2 bytes padding */
};
+static const struct sih *sih_modules;
+static int nr_sih_modules;
+
#define SIH_INITIALIZER(modname, nbits) \
.module = TWL4030_MODULE_ ## modname, \
.control_offset = TWL4030_ ## modname ## _SIH_CTRL, \
@@ -89,6 +94,7 @@ struct sih {
.bytes_ixr = DIV_ROUND_UP(nbits, 8), \
.edr_offset = TWL4030_ ## modname ## _EDR, \
.bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \
+ .irq_lines = 2, \
.mask = { { \
.isr_offset = TWL4030_ ## modname ## _ISR1, \
.imr_offset = TWL4030_ ## modname ## _IMR1, \
@@ -107,7 +113,8 @@ struct sih {
/* Order in this table matches order in PIH_ISR. That is,
* BIT(n) in PIH_ISR is sih_modules[n].
*/
-static const struct sih sih_modules[6] = {
+/* sih_modules_twl4030 is used both in twl4030 and twl5030 */
+static const struct sih sih_modules_twl4030[6] = {
[0] = {
.name = "gpio",
.module = TWL4030_MODULE_GPIO,
@@ -118,6 +125,7 @@ static const struct sih sih_modules[6] = {
/* Note: *all* of these IRQs default to no-trigger */
.edr_offset = REG_GPIO_EDR1,
.bytes_edr = 5,
+ .irq_lines = 2,
.mask = { {
.isr_offset = REG_GPIO_ISR1A,
.imr_offset = REG_GPIO_IMR1A,
@@ -140,6 +148,7 @@ static const struct sih sih_modules[6] = {
.edr_offset = TWL4030_INTERRUPTS_BCIEDR1,
/* Note: most of these IRQs default to no-trigger */
.bytes_edr = 3,
+ .irq_lines = 2,
.mask = { {
.isr_offset = TWL4030_INTERRUPTS_BCIISR1A,
.imr_offset = TWL4030_INTERRUPTS_BCIIMR1A,
@@ -164,6 +173,99 @@ static const struct sih sih_modules[6] = {
/* there are no SIH modules #6 or #7 ... */
};
+static const struct sih sih_modules_twl5031[8] = {
+ [0] = {
+ .name = "gpio",
+ .module = TWL4030_MODULE_GPIO,
+ .control_offset = REG_GPIO_SIH_CTRL,
+ .set_cor = true,
+ .bits = TWL4030_GPIO_MAX,
+ .bytes_ixr = 3,
+ /* Note: *all* of these IRQs default to no-trigger */
+ .edr_offset = REG_GPIO_EDR1,
+ .bytes_edr = 5,
+ .irq_lines = 2,
+ .mask = { {
+ .isr_offset = REG_GPIO_ISR1A,
+ .imr_offset = REG_GPIO_IMR1A,
+ }, {
+ .isr_offset = REG_GPIO_ISR1B,
+ .imr_offset = REG_GPIO_IMR1B,
+ }, },
+ },
+ [1] = {
+ .name = "keypad",
+ .set_cor = true,
+ SIH_INITIALIZER(KEYPAD_KEYP, 4)
+ },
+ [2] = {
+ .name = "bci",
+ .module = TWL5031_MODULE_INTERRUPTS,
+ .control_offset = TWL5031_INTERRUPTS_BCISIHCTRL,
+ .bits = 7,
+ .bytes_ixr = 1,
+ .edr_offset = TWL5031_INTERRUPTS_BCIEDR1,
+ /* Note: most of these IRQs default to no-trigger */
+ .bytes_edr = 2,
+ .irq_lines = 2,
+ .mask = { {
+ .isr_offset = TWL5031_INTERRUPTS_BCIISR1,
+ .imr_offset = TWL5031_INTERRUPTS_BCIIMR1,
+ }, {
+ .isr_offset = TWL5031_INTERRUPTS_BCIISR2,
+ .imr_offset = TWL5031_INTERRUPTS_BCIIMR2,
+ }, },
+ },
+ [3] = {
+ .name = "madc",
+ SIH_INITIALIZER(MADC, 4)
+ },
+ [4] = {
+ /* USB doesn't use the same SIH organization */
+ .name = "usb",
+ },
+ [5] = {
+ .name = "power",
+ .set_cor = true,
+ SIH_INITIALIZER(INT_PWR, 8)
+ },
+ [6] = {
+ /*
+ * ACI doesn't use the same SIH organization.
+ * For example, it supports only one interrupt line
+ */
+ .name = "aci",
+ .module = TWL5031_MODULE_ACCESSORY,
+ .bits = 9,
+ .bytes_ixr = 2,
+ .irq_lines = 1,
+ .mask = { {
+ .isr_offset = TWL5031_ACIIDR_LSB,
+ .imr_offset = TWL5031_ACIIMR_LSB,
+ }, },
+
+ },
+ [7] = {
+ /* Accessory */
+ .name = "acc",
+ .module = TWL5031_MODULE_ACCESSORY,
+ .control_offset = TWL5031_ACCSIHCTRL,
+ .bits = 2,
+ .bytes_ixr = 1,
+ .edr_offset = TWL5031_ACCEDR1,
+ /* Note: most of these IRQs default to no-trigger */
+ .bytes_edr = 1,
+ .irq_lines = 2,
+ .mask = { {
+ .isr_offset = TWL5031_ACCISR1,
+ .imr_offset = TWL5031_ACCIMR1,
+ }, {
+ .isr_offset = TWL5031_ACCISR2,
+ .imr_offset = TWL5031_ACCIMR2,
+ }, },
+ },
+};
+
#undef TWL4030_MODULE_KEYPAD_KEYP
#undef TWL4030_MODULE_INT_PWR
#undef TWL4030_INT_PWR_EDR
@@ -194,7 +296,7 @@ static int twl4030_irq_thread(void *data)
/* Wait for IRQ, then read PIH irq status (also blocking) */
wait_for_completion_interruptible(&irq_event);
- ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
+ ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
REG_PIH_ISR_P1);
if (ret) {
pr_warning("twl4030: I2C error %d reading PIH ISR\n",
@@ -284,13 +386,17 @@ static int twl4030_init_sih_modules(unsigned line)
/* disable all interrupts on our line */
memset(buf, 0xff, sizeof buf);
sih = sih_modules;
- for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
+ for (i = 0; i < nr_sih_modules; i++, sih++) {
/* skip USB -- it's funky */
if (!sih->bytes_ixr)
continue;
- status = twl4030_i2c_write(sih->module, buf,
+ /* Not all the SIH modules support multiple interrupt lines */
+ if (sih->irq_lines <= line)
+ continue;
+
+ status = twl_i2c_write(sih->module, buf,
sih->mask[line].imr_offset, sih->bytes_ixr);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
@@ -304,7 +410,7 @@ static int twl4030_init_sih_modules(unsigned line)
* And for PWR_INT it's not documented...
*/
if (sih->set_cor) {
- status = twl4030_i2c_write_u8(sih->module,
+ status = twl_i2c_write_u8(sih->module,
TWL4030_SIH_CTRL_COR_MASK,
sih->control_offset);
if (status < 0)
@@ -314,7 +420,7 @@ static int twl4030_init_sih_modules(unsigned line)
}
sih = sih_modules;
- for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
+ for (i = 0; i < nr_sih_modules; i++, sih++) {
u8 rxbuf[4];
int j;
@@ -322,20 +428,24 @@ static int twl4030_init_sih_modules(unsigned line)
if (!sih->bytes_ixr)
continue;
+ /* Not all the SIH modules support multiple interrupt lines */
+ if (sih->irq_lines <= line)
+ continue;
+
/* Clear pending interrupt status. Either the read was
* enough, or we need to write those bits. Repeat, in
* case an IRQ is pending (PENDDIS=0) ... that's not
* uncommon with PWR_INT.PWRON.
*/
for (j = 0; j < 2; j++) {
- status = twl4030_i2c_read(sih->module, rxbuf,
+ status = twl_i2c_read(sih->module, rxbuf,
sih->mask[line].isr_offset, sih->bytes_ixr);
if (status < 0)
pr_err("twl4030: err %d initializing %s %s\n",
status, sih->name, "ISR");
if (!sih->set_cor)
- status = twl4030_i2c_write(sih->module, buf,
+ status = twl_i2c_write(sih->module, buf,
sih->mask[line].isr_offset,
sih->bytes_ixr);
/* else COR=1 means read sufficed.
@@ -404,7 +514,7 @@ static void twl4030_sih_do_mask(struct work_struct *work)
return;
/* write the whole mask ... simpler than subsetting it */
- status = twl4030_i2c_write(sih->module, imr.bytes,
+ status = twl_i2c_write(sih->module, imr.bytes,
sih->mask[irq_line].imr_offset, sih->bytes_ixr);
if (status)
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -435,7 +545,7 @@ static void twl4030_sih_do_edge(struct work_struct *work)
* any processor on the other IRQ line, EDR registers are
* shared.
*/
- status = twl4030_i2c_read(sih->module, bytes + 1,
+ status = twl_i2c_read(sih->module, bytes + 1,
sih->edr_offset, sih->bytes_edr);
if (status) {
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -469,7 +579,7 @@ static void twl4030_sih_do_edge(struct work_struct *work)
}
/* Write */
- status = twl4030_i2c_write(sih->module, bytes,
+ status = twl_i2c_write(sih->module, bytes,
sih->edr_offset, sih->bytes_edr);
if (status)
pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -554,7 +664,7 @@ static inline int sih_read_isr(const struct sih *sih)
/* FIXME need retry-on-error ... */
isr.word = 0;
- status = twl4030_i2c_read(sih->module, isr.bytes,
+ status = twl_i2c_read(sih->module, isr.bytes,
sih->mask[irq_line].isr_offset, sih->bytes_ixr);
return (status < 0) ? status : le32_to_cpu(isr.word);
@@ -611,7 +721,7 @@ int twl4030_sih_setup(int module)
/* only support modules with standard clear-on-read for now */
for (sih_mod = 0, sih = sih_modules;
- sih_mod < ARRAY_SIZE(sih_modules);
+ sih_mod < nr_sih_modules;
sih_mod++, sih++) {
if (sih->module == module && sih->set_cor) {
if (!WARN((irq_base + sih->bits) > NR_IRQS,
@@ -668,7 +778,7 @@ int twl4030_sih_setup(int module)
/* FIXME pass in which interrupt line we'll use ... */
#define twl_irq_line 0
-int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
+int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
{
static struct irq_chip twl4030_irq_chip;
@@ -728,7 +838,8 @@ int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
goto fail_rqirq;
}
- task = kthread_run(twl4030_irq_thread, (void *)irq_num, "twl4030-irq");
+ task = kthread_run(twl4030_irq_thread, (void *)(long)irq_num,
+ "twl4030-irq");
if (IS_ERR(task)) {
pr_err("twl4030: could not create irq %d thread!\n", irq_num);
status = PTR_ERR(task);
@@ -747,7 +858,7 @@ fail:
return status;
}
-int twl_exit_irq(void)
+int twl4030_exit_irq(void)
{
/* FIXME undo twl_init_irq() */
if (twl4030_irq_base) {
@@ -756,3 +867,16 @@ int twl_exit_irq(void)
}
return 0;
}
+
+int twl4030_init_chip_irq(const char *chip)
+{
+ if (!strcmp(chip, "twl5031")) {
+ sih_modules = sih_modules_twl5031;
+ nr_sih_modules = ARRAY_SIZE(sih_modules_twl5031);
+ } else {
+ sih_modules = sih_modules_twl4030;
+ nr_sih_modules = ARRAY_SIZE(sih_modules_twl4030);
+ }
+
+ return 0;
+}
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index d423e0c4176..0815292fdaf 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -26,7 +26,7 @@
#include <linux/module.h>
#include <linux/pm.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/platform_device.h>
#include <asm/mach-types.h>
@@ -67,19 +67,35 @@ static u8 twl4030_start_script_address = 0x2b;
#define R_KEY_1 0xC0
#define R_KEY_2 0x0C
-/* resource configuration registers */
-
-#define DEVGROUP_OFFSET 0
+/* resource configuration registers
+ <RESOURCE>_DEV_GRP at address 'n+0'
+ <RESOURCE>_TYPE at address 'n+1'
+ <RESOURCE>_REMAP at address 'n+2'
+ <RESOURCE>_DEDICATED at address 'n+3'
+*/
+#define DEV_GRP_OFFSET 0
#define TYPE_OFFSET 1
+#define REMAP_OFFSET 2
+#define DEDICATED_OFFSET 3
+
+/* Bit positions in the registers */
+
+/* <RESOURCE>_DEV_GRP */
+#define DEV_GRP_SHIFT 5
+#define DEV_GRP_MASK (7 << DEV_GRP_SHIFT)
-/* Bit positions */
-#define DEVGROUP_SHIFT 5
-#define DEVGROUP_MASK (7 << DEVGROUP_SHIFT)
+/* <RESOURCE>_TYPE */
#define TYPE_SHIFT 0
#define TYPE_MASK (7 << TYPE_SHIFT)
#define TYPE2_SHIFT 3
#define TYPE2_MASK (3 << TYPE2_SHIFT)
+/* <RESOURCE>_REMAP */
+#define SLEEP_STATE_SHIFT 0
+#define SLEEP_STATE_MASK (0xf << SLEEP_STATE_SHIFT)
+#define OFF_STATE_SHIFT 4
+#define OFF_STATE_MASK (0xf << OFF_STATE_SHIFT)
+
static u8 res_config_addrs[] = {
[RES_VAUX1] = 0x17,
[RES_VAUX2] = 0x1b,
@@ -115,11 +131,11 @@ static int __init twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_MEMORY_ADDRESS);
if (err)
goto out;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
R_MEMORY_DATA);
out:
return err;
@@ -176,18 +192,18 @@ static int __init twl4030_config_wakeup3_sequence(u8 address)
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P3 */
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_S2A3);
if (err)
goto out;
/* P3 LVL_WAKEUP should be on LEVEL */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P3_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P3_SW_EVENTS);
out:
if (err)
@@ -201,42 +217,42 @@ static int __init twl4030_config_wakeup12_sequence(u8 address)
u8 data;
/* Set SLEEP to ACTIVE SEQ address for P1 and P2 */
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_S2A12);
if (err)
goto out;
/* P1/P2 LVL_WAKEUP should be on LEVEL */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P1_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_P2_SW_EVENTS);
if (err)
goto out;
data |= LVL_WAKEUP;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
R_P2_SW_EVENTS);
if (err)
goto out;
if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) {
/* Disabling AC charger effect on sleep-active transitions */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
R_CFG_P1_TRANSITION);
if (err)
goto out;
data &= ~(1<<1);
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
R_CFG_P1_TRANSITION);
if (err)
goto out;
@@ -254,7 +270,7 @@ static int __init twl4030_config_sleep_sequence(u8 address)
int err;
/* Set ACTIVE to SLEEP SEQ address in T2 memory*/
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_A2S);
if (err)
@@ -269,41 +285,41 @@ static int __init twl4030_config_warmreset_sequence(u8 address)
u8 rd_data;
/* Set WARM RESET SEQ address for P1 */
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
R_SEQ_ADD_WARM);
if (err)
goto out;
/* P1/P2/P3 enable WARMRESET */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P1_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P1_SW_EVENTS);
if (err)
goto out;
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P2_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P2_SW_EVENTS);
if (err)
goto out;
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
R_P3_SW_EVENTS);
if (err)
goto out;
rd_data |= ENABLE_WARMRESET;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
R_P3_SW_EVENTS);
out:
if (err)
@@ -317,6 +333,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
int err;
u8 type;
u8 grp;
+ u8 remap;
if (rconfig->resource > TOTAL_RESOURCES) {
pr_err("TWL4030 Resource %d does not exist\n",
@@ -327,19 +344,19 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
rconfig_addr = res_config_addrs[rconfig->resource];
/* Set resource group */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
- rconfig_addr + DEVGROUP_OFFSET);
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
+ rconfig_addr + DEV_GRP_OFFSET);
if (err) {
pr_err("TWL4030 Resource %d group could not be read\n",
rconfig->resource);
return err;
}
- if (rconfig->devgroup >= 0) {
- grp &= ~DEVGROUP_MASK;
- grp |= rconfig->devgroup << DEVGROUP_SHIFT;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
- grp, rconfig_addr + DEVGROUP_OFFSET);
+ if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) {
+ grp &= ~DEV_GRP_MASK;
+ grp |= rconfig->devgroup << DEV_GRP_SHIFT;
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ grp, rconfig_addr + DEV_GRP_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program devgroup\n");
return err;
@@ -347,7 +364,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
}
/* Set resource types */
- err = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 Resource %d type could not be read\n",
@@ -355,23 +372,50 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
return err;
}
- if (rconfig->type >= 0) {
+ if (rconfig->type != TWL4030_RESCONFIG_UNDEF) {
type &= ~TYPE_MASK;
type |= rconfig->type << TYPE_SHIFT;
}
- if (rconfig->type2 >= 0) {
+ if (rconfig->type2 != TWL4030_RESCONFIG_UNDEF) {
type &= ~TYPE2_MASK;
type |= rconfig->type2 << TYPE2_SHIFT;
}
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
type, rconfig_addr + TYPE_OFFSET);
if (err < 0) {
pr_err("TWL4030 failed to program resource type\n");
return err;
}
+ /* Set remap states */
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap,
+ rconfig_addr + REMAP_OFFSET);
+ if (err < 0) {
+ pr_err("TWL4030 Resource %d remap could not be read\n",
+ rconfig->resource);
+ return err;
+ }
+
+ if (rconfig->remap_off != TWL4030_RESCONFIG_UNDEF) {
+ remap &= ~OFF_STATE_MASK;
+ remap |= rconfig->remap_off << OFF_STATE_SHIFT;
+ }
+
+ if (rconfig->remap_sleep != TWL4030_RESCONFIG_UNDEF) {
+ remap &= ~SLEEP_STATE_MASK;
+ remap |= rconfig->remap_off << SLEEP_STATE_SHIFT;
+ }
+
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ remap,
+ rconfig_addr + REMAP_OFFSET);
+ if (err < 0) {
+ pr_err("TWL4030 failed to program remap\n");
+ return err;
+ }
+
return 0;
}
@@ -424,12 +468,12 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
struct twl4030_resconfig *resconfig;
u8 address = twl4030_start_script_address;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_1,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_1,
R_PROTECT_KEY);
if (err)
goto unlock;
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_2,
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, R_KEY_2,
R_PROTECT_KEY);
if (err)
goto unlock;
@@ -452,7 +496,7 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
}
}
- err = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY);
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, R_PROTECT_KEY);
if (err)
pr_err("TWL4030 Unable to relock registers\n");
return;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
new file mode 100644
index 00000000000..10bf228ad62
--- /dev/null
+++ b/drivers/mfd/twl6030-irq.c
@@ -0,0 +1,299 @@
+/*
+ * twl6030-irq.c - TWL6030 irq support
+ *
+ * Copyright (C) 2005-2009 Texas Instruments, Inc.
+ *
+ * Modifications to defer interrupt handling to a kernel thread:
+ * Copyright (C) 2006 MontaVista Software, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * Code cleanup and modifications to IRQ handler.
+ * by syed khasim <x0khasim@ti.com>
+ *
+ * TWL6030 specific code and IRQ handling changes by
+ * Jagadeesh Bhaskar Pakaravoor <j-pakaravoor@ti.com>
+ * Balaji T K <balajitk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+#include <linux/i2c/twl.h>
+
+/*
+ * TWL6030 (unlike its predecessors, which had two level interrupt handling)
+ * three interrupt registers INT_STS_A, INT_STS_B and INT_STS_C.
+ * It exposes status bits saying who has raised an interrupt. There are
+ * three mask registers that corresponds to these status registers, that
+ * enables/disables these interrupts.
+ *
+ * We set up IRQs starting at a platform-specified base. An interrupt map table,
+ * specifies mapping between interrupt number and the associated module.
+ *
+ */
+
+static int twl6030_interrupt_mapping[24] = {
+ PWR_INTR_OFFSET, /* Bit 0 PWRON */
+ PWR_INTR_OFFSET, /* Bit 1 RPWRON */
+ PWR_INTR_OFFSET, /* Bit 2 BAT_VLOW */
+ RTC_INTR_OFFSET, /* Bit 3 RTC_ALARM */
+ RTC_INTR_OFFSET, /* Bit 4 RTC_PERIOD */
+ HOTDIE_INTR_OFFSET, /* Bit 5 HOT_DIE */
+ SMPSLDO_INTR_OFFSET, /* Bit 6 VXXX_SHORT */
+ SMPSLDO_INTR_OFFSET, /* Bit 7 VMMC_SHORT */
+
+ SMPSLDO_INTR_OFFSET, /* Bit 8 VUSIM_SHORT */
+ BATDETECT_INTR_OFFSET, /* Bit 9 BAT */
+ SIMDETECT_INTR_OFFSET, /* Bit 10 SIM */
+ MMCDETECT_INTR_OFFSET, /* Bit 11 MMC */
+ RSV_INTR_OFFSET, /* Bit 12 Reserved */
+ MADC_INTR_OFFSET, /* Bit 13 GPADC_RT_EOC */
+ MADC_INTR_OFFSET, /* Bit 14 GPADC_SW_EOC */
+ GASGAUGE_INTR_OFFSET, /* Bit 15 CC_AUTOCAL */
+
+ USBOTG_INTR_OFFSET, /* Bit 16 ID_WKUP */
+ USBOTG_INTR_OFFSET, /* Bit 17 VBUS_WKUP */
+ USBOTG_INTR_OFFSET, /* Bit 18 ID */
+ USBOTG_INTR_OFFSET, /* Bit 19 VBUS */
+ CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */
+ CHARGER_INTR_OFFSET, /* Bit 21 EXT_CHRG */
+ CHARGER_INTR_OFFSET, /* Bit 22 INT_CHRG */
+ RSV_INTR_OFFSET, /* Bit 23 Reserved */
+};
+/*----------------------------------------------------------------------*/
+
+static unsigned twl6030_irq_base;
+
+static struct completion irq_event;
+
+/*
+ * This thread processes interrupts reported by the Primary Interrupt Handler.
+ */
+static int twl6030_irq_thread(void *data)
+{
+ long irq = (long)data;
+ static unsigned i2c_errors;
+ static const unsigned max_i2c_errors = 100;
+ int ret;
+
+ current->flags |= PF_NOFREEZE;
+
+ while (!kthread_should_stop()) {
+ int i;
+ union {
+ u8 bytes[4];
+ u32 int_sts;
+ } sts;
+
+ /* Wait for IRQ, then read PIH irq status (also blocking) */
+ wait_for_completion_interruptible(&irq_event);
+
+ /* read INT_STS_A, B and C in one shot using a burst read */
+ ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes,
+ REG_INT_STS_A, 3);
+ if (ret) {
+ pr_warning("twl6030: I2C error %d reading PIH ISR\n",
+ ret);
+ if (++i2c_errors >= max_i2c_errors) {
+ printk(KERN_ERR "Maximum I2C error count"
+ " exceeded. Terminating %s.\n",
+ __func__);
+ break;
+ }
+ complete(&irq_event);
+ continue;
+ }
+
+
+
+ sts.bytes[3] = 0; /* Only 24 bits are valid*/
+
+ for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
+ local_irq_disable();
+ if (sts.int_sts & 0x1) {
+ int module_irq = twl6030_irq_base +
+ twl6030_interrupt_mapping[i];
+ struct irq_desc *d = irq_to_desc(module_irq);
+
+ if (!d) {
+ pr_err("twl6030: Invalid SIH IRQ: %d\n",
+ module_irq);
+ return -EINVAL;
+ }
+
+ /* These can't be masked ... always warn
+ * if we get any surprises.
+ */
+ if (d->status & IRQ_DISABLED)
+ note_interrupt(module_irq, d,
+ IRQ_NONE);
+ else
+ d->handle_irq(module_irq, d);
+
+ }
+ local_irq_enable();
+ }
+ ret = twl_i2c_write(TWL_MODULE_PIH, sts.bytes,
+ REG_INT_STS_A, 3); /* clear INT_STS_A */
+ if (ret)
+ pr_warning("twl6030: I2C error in clearing PIH ISR\n");
+
+ enable_irq(irq);
+ }
+
+ return 0;
+}
+
+/*
+ * handle_twl6030_int() is the desc->handle method for the twl6030 interrupt.
+ * This is a chained interrupt, so there is no desc->action method for it.
+ * Now we need to query the interrupt controller in the twl6030 to determine
+ * which module is generating the interrupt request. However, we can't do i2c
+ * transactions in interrupt context, so we must defer that work to a kernel
+ * thread. All we do here is acknowledge and mask the interrupt and wakeup
+ * the kernel thread.
+ */
+static irqreturn_t handle_twl6030_pih(int irq, void *devid)
+{
+ disable_irq_nosync(irq);
+ complete(devid);
+ return IRQ_HANDLED;
+}
+
+/*----------------------------------------------------------------------*/
+
+static inline void activate_irq(int irq)
+{
+#ifdef CONFIG_ARM
+ /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
+ * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
+ */
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ /* same effect on other architectures */
+ set_irq_noprobe(irq);
+#endif
+}
+
+/*----------------------------------------------------------------------*/
+
+static unsigned twl6030_irq_next;
+
+/*----------------------------------------------------------------------*/
+int twl6030_interrupt_unmask(u8 bit_mask, u8 offset)
+{
+ int ret;
+ u8 unmask_value;
+ ret = twl_i2c_read_u8(TWL_MODULE_PIH, &unmask_value,
+ REG_INT_STS_A + offset);
+ unmask_value &= (~(bit_mask));
+ ret |= twl_i2c_write_u8(TWL_MODULE_PIH, unmask_value,
+ REG_INT_STS_A + offset); /* unmask INT_MSK_A/B/C */
+ return ret;
+}
+EXPORT_SYMBOL(twl6030_interrupt_unmask);
+
+int twl6030_interrupt_mask(u8 bit_mask, u8 offset)
+{
+ int ret;
+ u8 mask_value;
+ ret = twl_i2c_read_u8(TWL_MODULE_PIH, &mask_value,
+ REG_INT_STS_A + offset);
+ mask_value |= (bit_mask);
+ ret |= twl_i2c_write_u8(TWL_MODULE_PIH, mask_value,
+ REG_INT_STS_A + offset); /* mask INT_MSK_A/B/C */
+ return ret;
+}
+EXPORT_SYMBOL(twl6030_interrupt_mask);
+
+int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
+{
+
+ int status = 0;
+ int i;
+ struct task_struct *task;
+ int ret;
+ u8 mask[4];
+
+ static struct irq_chip twl6030_irq_chip;
+ mask[1] = 0xFF;
+ mask[2] = 0xFF;
+ mask[3] = 0xFF;
+ ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
+ REG_INT_MSK_LINE_A, 3); /* MASK ALL INT LINES */
+ ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
+ REG_INT_MSK_STS_A, 3); /* MASK ALL INT STS */
+ ret = twl_i2c_write(TWL_MODULE_PIH, &mask[0],
+ REG_INT_STS_A, 3); /* clear INT_STS_A,B,C */
+
+ twl6030_irq_base = irq_base;
+
+ /* install an irq handler for each of the modules;
+ * clone dummy irq_chip since PIH can't *do* anything
+ */
+ twl6030_irq_chip = dummy_irq_chip;
+ twl6030_irq_chip.name = "twl6030";
+ twl6030_irq_chip.set_type = NULL;
+
+ for (i = irq_base; i < irq_end; i++) {
+ set_irq_chip_and_handler(i, &twl6030_irq_chip,
+ handle_simple_irq);
+ activate_irq(i);
+ }
+
+ twl6030_irq_next = i;
+ pr_info("twl6030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
+ irq_num, irq_base, twl6030_irq_next - 1);
+
+ /* install an irq handler to demultiplex the TWL6030 interrupt */
+ init_completion(&irq_event);
+ task = kthread_run(twl6030_irq_thread, (void *)irq_num, "twl6030-irq");
+ if (IS_ERR(task)) {
+ pr_err("twl6030: could not create irq %d thread!\n", irq_num);
+ status = PTR_ERR(task);
+ goto fail_kthread;
+ }
+
+ status = request_irq(irq_num, handle_twl6030_pih, IRQF_DISABLED,
+ "TWL6030-PIH", &irq_event);
+ if (status < 0) {
+ pr_err("twl6030: could not claim irq%d: %d\n", irq_num, status);
+ goto fail_irq;
+ }
+ return status;
+fail_irq:
+ free_irq(irq_num, &irq_event);
+
+fail_kthread:
+ for (i = irq_base; i < irq_end; i++)
+ set_irq_chip_and_handler(i, NULL, NULL);
+ return status;
+}
+
+int twl6030_exit_irq(void)
+{
+
+ if (twl6030_irq_base) {
+ pr_err("twl6030: can't yet clean up IRQs?\n");
+ return -ENOSYS;
+ }
+ return 0;
+}
+
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 7f27576ca04..4b2021af1d9 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -90,9 +90,10 @@ int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
EXPORT_SYMBOL_GPL(wm831x_isinkv_values);
enum wm831x_parent {
- WM8310 = 0,
- WM8311 = 1,
- WM8312 = 2,
+ WM8310 = 0x8310,
+ WM8311 = 0x8311,
+ WM8312 = 0x8312,
+ WM8320 = 0x8320,
};
static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg)
@@ -478,6 +479,20 @@ static struct resource wm831x_dcdc4_resources[] = {
},
};
+static struct resource wm8320_dcdc4_buck_resources[] = {
+ {
+ .start = WM831X_DC4_CONTROL,
+ .end = WM832X_DC4_SLEEP_CONTROL,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "UV",
+ .start = WM831X_IRQ_UV_DC4,
+ .end = WM831X_IRQ_UV_DC4,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct resource wm831x_gpio_resources[] = {
{
.start = WM831X_IRQ_GPIO_1,
@@ -794,6 +809,9 @@ static struct resource wm831x_wdt_resources[] = {
static struct mfd_cell wm8310_devs[] = {
{
+ .name = "wm831x-backup",
+ },
+ {
.name = "wm831x-buckv",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
@@ -947,6 +965,9 @@ static struct mfd_cell wm8310_devs[] = {
static struct mfd_cell wm8311_devs[] = {
{
+ .name = "wm831x-backup",
+ },
+ {
.name = "wm831x-buckv",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
@@ -1081,6 +1102,9 @@ static struct mfd_cell wm8311_devs[] = {
static struct mfd_cell wm8312_devs[] = {
{
+ .name = "wm831x-backup",
+ },
+ {
.name = "wm831x-buckv",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
@@ -1237,6 +1261,137 @@ static struct mfd_cell wm8312_devs[] = {
},
};
+static struct mfd_cell wm8320_devs[] = {
+ {
+ .name = "wm831x-backup",
+ },
+ {
+ .name = "wm831x-buckv",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(wm831x_dcdc1_resources),
+ .resources = wm831x_dcdc1_resources,
+ },
+ {
+ .name = "wm831x-buckv",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(wm831x_dcdc2_resources),
+ .resources = wm831x_dcdc2_resources,
+ },
+ {
+ .name = "wm831x-buckp",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(wm831x_dcdc3_resources),
+ .resources = wm831x_dcdc3_resources,
+ },
+ {
+ .name = "wm831x-buckp",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(wm8320_dcdc4_buck_resources),
+ .resources = wm8320_dcdc4_buck_resources,
+ },
+ {
+ .name = "wm831x-gpio",
+ .num_resources = ARRAY_SIZE(wm831x_gpio_resources),
+ .resources = wm831x_gpio_resources,
+ },
+ {
+ .name = "wm831x-hwmon",
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(wm831x_ldo1_resources),
+ .resources = wm831x_ldo1_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(wm831x_ldo2_resources),
+ .resources = wm831x_ldo2_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 3,
+ .num_resources = ARRAY_SIZE(wm831x_ldo3_resources),
+ .resources = wm831x_ldo3_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 4,
+ .num_resources = ARRAY_SIZE(wm831x_ldo4_resources),
+ .resources = wm831x_ldo4_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 5,
+ .num_resources = ARRAY_SIZE(wm831x_ldo5_resources),
+ .resources = wm831x_ldo5_resources,
+ },
+ {
+ .name = "wm831x-ldo",
+ .id = 6,
+ .num_resources = ARRAY_SIZE(wm831x_ldo6_resources),
+ .resources = wm831x_ldo6_resources,
+ },
+ {
+ .name = "wm831x-aldo",
+ .id = 7,
+ .num_resources = ARRAY_SIZE(wm831x_ldo7_resources),
+ .resources = wm831x_ldo7_resources,
+ },
+ {
+ .name = "wm831x-aldo",
+ .id = 8,
+ .num_resources = ARRAY_SIZE(wm831x_ldo8_resources),
+ .resources = wm831x_ldo8_resources,
+ },
+ {
+ .name = "wm831x-aldo",
+ .id = 9,
+ .num_resources = ARRAY_SIZE(wm831x_ldo9_resources),
+ .resources = wm831x_ldo9_resources,
+ },
+ {
+ .name = "wm831x-aldo",
+ .id = 10,
+ .num_resources = ARRAY_SIZE(wm831x_ldo10_resources),
+ .resources = wm831x_ldo10_resources,
+ },
+ {
+ .name = "wm831x-alive-ldo",
+ .id = 11,
+ .num_resources = ARRAY_SIZE(wm831x_ldo11_resources),
+ .resources = wm831x_ldo11_resources,
+ },
+ {
+ .name = "wm831x-on",
+ .num_resources = ARRAY_SIZE(wm831x_on_resources),
+ .resources = wm831x_on_resources,
+ },
+ {
+ .name = "wm831x-rtc",
+ .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
+ .resources = wm831x_rtc_resources,
+ },
+ {
+ .name = "wm831x-status",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(wm831x_status1_resources),
+ .resources = wm831x_status1_resources,
+ },
+ {
+ .name = "wm831x-status",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(wm831x_status2_resources),
+ .resources = wm831x_status2_resources,
+ },
+ {
+ .name = "wm831x-watchdog",
+ .num_resources = ARRAY_SIZE(wm831x_wdt_resources),
+ .resources = wm831x_wdt_resources,
+ },
+};
+
static struct mfd_cell backlight_devs[] = {
{
.name = "wm831x-backlight",
@@ -1282,50 +1437,37 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
goto err;
}
+ /* Some engineering samples do not have the ID set, rely on
+ * the device being registered correctly.
+ */
+ if (ret == 0) {
+ dev_info(wm831x->dev, "Device is an engineering sample\n");
+ ret = id;
+ }
+
switch (ret) {
- case 0x8310:
+ case WM8310:
parent = WM8310;
- switch (rev) {
- case 0:
- dev_info(wm831x->dev, "WM8310 revision %c\n",
- 'A' + rev);
- break;
- }
+ wm831x->num_gpio = 16;
+ dev_info(wm831x->dev, "WM8310 revision %c\n", 'A' + rev);
break;
- case 0x8311:
+ case WM8311:
parent = WM8311;
- switch (rev) {
- case 0:
- dev_info(wm831x->dev, "WM8311 revision %c\n",
- 'A' + rev);
- break;
- }
+ wm831x->num_gpio = 16;
+ dev_info(wm831x->dev, "WM8311 revision %c\n", 'A' + rev);
break;
- case 0x8312:
+ case WM8312:
parent = WM8312;
- switch (rev) {
- case 0:
- dev_info(wm831x->dev, "WM8312 revision %c\n",
- 'A' + rev);
- break;
- }
+ wm831x->num_gpio = 16;
+ dev_info(wm831x->dev, "WM8312 revision %c\n", 'A' + rev);
break;
- case 0:
- /* Some engineering samples do not have the ID set,
- * rely on the device being registered correctly.
- * This will need revisiting for future devices with
- * multiple dies.
- */
- parent = id;
- switch (rev) {
- case 0:
- dev_info(wm831x->dev, "WM831%d ES revision %c\n",
- parent, 'A' + rev);
- break;
- }
+ case WM8320:
+ parent = WM8320;
+ wm831x->num_gpio = 12;
+ dev_info(wm831x->dev, "WM8320 revision %c\n", 'A' + rev);
break;
default:
@@ -1338,7 +1480,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
* current parts.
*/
if (parent != id)
- dev_warn(wm831x->dev, "Device was registered as a WM831%lu\n",
+ dev_warn(wm831x->dev, "Device was registered as a WM%lx\n",
id);
/* Bootstrap the user key */
@@ -1371,18 +1513,24 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8310:
ret = mfd_add_devices(wm831x->dev, -1,
wm8310_devs, ARRAY_SIZE(wm8310_devs),
- NULL, 0);
+ NULL, wm831x->irq_base);
break;
case WM8311:
ret = mfd_add_devices(wm831x->dev, -1,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
- NULL, 0);
+ NULL, wm831x->irq_base);
break;
case WM8312:
ret = mfd_add_devices(wm831x->dev, -1,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
+ NULL, wm831x->irq_base);
+ break;
+
+ case WM8320:
+ ret = mfd_add_devices(wm831x->dev, -1,
+ wm8320_devs, ARRAY_SIZE(wm8320_devs),
NULL, 0);
break;
@@ -1399,7 +1547,8 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
if (pdata && pdata->backlight) {
/* Treat errors as non-critical */
ret = mfd_add_devices(wm831x->dev, -1, backlight_devs,
- ARRAY_SIZE(backlight_devs), NULL, 0);
+ ARRAY_SIZE(backlight_devs), NULL,
+ wm831x->irq_base);
if (ret < 0)
dev_err(wm831x->dev, "Failed to add backlight: %d\n",
ret);
@@ -1511,6 +1660,7 @@ static const struct i2c_device_id wm831x_i2c_id[] = {
{ "wm8310", WM8310 },
{ "wm8311", WM8311 },
{ "wm8312", WM8312 },
+ { "wm8320", WM8320 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id);
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index ac056ea6b66..30132769711 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/interrupt.h>
@@ -339,110 +340,71 @@ static inline int irq_data_to_mask_reg(struct wm831x_irq_data *irq_data)
return WM831X_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
}
-static void __wm831x_enable_irq(struct wm831x *wm831x, int irq)
+static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
+ int irq)
{
- struct wm831x_irq_data *irq_data = &wm831x_irqs[irq];
-
- wm831x->irq_masks[irq_data->reg - 1] &= ~irq_data->mask;
- wm831x_reg_write(wm831x, irq_data_to_mask_reg(irq_data),
- wm831x->irq_masks[irq_data->reg - 1]);
+ return &wm831x_irqs[irq - wm831x->irq_base];
}
-void wm831x_enable_irq(struct wm831x *wm831x, int irq)
+static void wm831x_irq_lock(unsigned int irq)
{
- mutex_lock(&wm831x->irq_lock);
- __wm831x_enable_irq(wm831x, irq);
- mutex_unlock(&wm831x->irq_lock);
-}
-EXPORT_SYMBOL_GPL(wm831x_enable_irq);
+ struct wm831x *wm831x = get_irq_chip_data(irq);
-static void __wm831x_disable_irq(struct wm831x *wm831x, int irq)
-{
- struct wm831x_irq_data *irq_data = &wm831x_irqs[irq];
-
- wm831x->irq_masks[irq_data->reg - 1] |= irq_data->mask;
- wm831x_reg_write(wm831x, irq_data_to_mask_reg(irq_data),
- wm831x->irq_masks[irq_data->reg - 1]);
-}
-
-void wm831x_disable_irq(struct wm831x *wm831x, int irq)
-{
mutex_lock(&wm831x->irq_lock);
- __wm831x_disable_irq(wm831x, irq);
- mutex_unlock(&wm831x->irq_lock);
}
-EXPORT_SYMBOL_GPL(wm831x_disable_irq);
-int wm831x_request_irq(struct wm831x *wm831x,
- unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name,
- void *dev)
+static void wm831x_irq_sync_unlock(unsigned int irq)
{
- int ret = 0;
-
- if (irq < 0 || irq >= WM831X_NUM_IRQS)
- return -EINVAL;
-
- mutex_lock(&wm831x->irq_lock);
-
- if (wm831x_irqs[irq].handler) {
- dev_err(wm831x->dev, "Already have handler for IRQ %d\n", irq);
- ret = -EINVAL;
- goto out;
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
+ /* If there's been a change in the mask write it back
+ * to the hardware. */
+ if (wm831x->irq_masks_cur[i] != wm831x->irq_masks_cache[i]) {
+ wm831x->irq_masks_cache[i] = wm831x->irq_masks_cur[i];
+ wm831x_reg_write(wm831x,
+ WM831X_INTERRUPT_STATUS_1_MASK + i,
+ wm831x->irq_masks_cur[i]);
+ }
}
- wm831x_irqs[irq].handler = handler;
- wm831x_irqs[irq].handler_data = dev;
-
- __wm831x_enable_irq(wm831x, irq);
-
-out:
mutex_unlock(&wm831x->irq_lock);
-
- return ret;
}
-EXPORT_SYMBOL_GPL(wm831x_request_irq);
-void wm831x_free_irq(struct wm831x *wm831x, unsigned int irq, void *data)
+static void wm831x_irq_unmask(unsigned int irq)
{
- if (irq < 0 || irq >= WM831X_NUM_IRQS)
- return;
-
- mutex_lock(&wm831x->irq_lock);
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
- wm831x_irqs[irq].handler = NULL;
- wm831x_irqs[irq].handler_data = NULL;
-
- __wm831x_disable_irq(wm831x, irq);
-
- mutex_unlock(&wm831x->irq_lock);
+ wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-EXPORT_SYMBOL_GPL(wm831x_free_irq);
-
-static void wm831x_handle_irq(struct wm831x *wm831x, int irq, int status)
+static void wm831x_irq_mask(unsigned int irq)
{
- struct wm831x_irq_data *irq_data = &wm831x_irqs[irq];
-
- if (irq_data->handler) {
- irq_data->handler(irq, irq_data->handler_data);
- wm831x_reg_write(wm831x, irq_data_to_status_reg(irq_data),
- irq_data->mask);
- } else {
- dev_err(wm831x->dev, "Unhandled IRQ %d, masking\n", irq);
- __wm831x_disable_irq(wm831x, irq);
- }
+ struct wm831x *wm831x = get_irq_chip_data(irq);
+ struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
+
+ wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
-/* Main interrupt handling occurs in a workqueue since we need
- * interrupts enabled to interact with the chip. */
-static void wm831x_irq_worker(struct work_struct *work)
+static struct irq_chip wm831x_irq_chip = {
+ .name = "wm831x",
+ .bus_lock = wm831x_irq_lock,
+ .bus_sync_unlock = wm831x_irq_sync_unlock,
+ .mask = wm831x_irq_mask,
+ .unmask = wm831x_irq_unmask,
+};
+
+/* The processing of the primary interrupt occurs in a thread so that
+ * we can interact with the device over I2C or SPI. */
+static irqreturn_t wm831x_irq_thread(int irq, void *data)
{
- struct wm831x *wm831x = container_of(work, struct wm831x, irq_work);
+ struct wm831x *wm831x = data;
unsigned int i;
int primary;
- int status_regs[5];
- int read[5] = { 0 };
+ int status_regs[WM831X_NUM_IRQ_REGS] = { 0 };
+ int read[WM831X_NUM_IRQ_REGS] = { 0 };
int *status;
primary = wm831x_reg_read(wm831x, WM831X_SYSTEM_INTERRUPTS);
@@ -452,8 +414,6 @@ static void wm831x_irq_worker(struct work_struct *work)
goto out;
}
- mutex_lock(&wm831x->irq_lock);
-
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
int offset = wm831x_irqs[i].reg - 1;
@@ -471,41 +431,34 @@ static void wm831x_irq_worker(struct work_struct *work)
dev_err(wm831x->dev,
"Failed to read IRQ status: %d\n",
*status);
- goto out_lock;
+ goto out;
}
- /* Mask out the disabled IRQs */
- *status &= ~wm831x->irq_masks[offset];
read[offset] = 1;
}
- if (*status & wm831x_irqs[i].mask)
- wm831x_handle_irq(wm831x, i, *status);
+ /* Report it if it isn't masked, or forget the status. */
+ if ((*status & ~wm831x->irq_masks_cur[offset])
+ & wm831x_irqs[i].mask)
+ handle_nested_irq(wm831x->irq_base + i);
+ else
+ *status &= ~wm831x_irqs[i].mask;
}
-out_lock:
- mutex_unlock(&wm831x->irq_lock);
out:
- enable_irq(wm831x->irq);
-}
-
-
-static irqreturn_t wm831x_cpu_irq(int irq, void *data)
-{
- struct wm831x *wm831x = data;
-
- /* Shut the interrupt to the CPU up and schedule the actual
- * handler; we can't check that the IRQ is asserted. */
- disable_irq_nosync(irq);
-
- queue_work(wm831x->irq_wq, &wm831x->irq_work);
+ for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
+ if (status_regs[i])
+ wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
+ status_regs[i]);
+ }
return IRQ_HANDLED;
}
int wm831x_irq_init(struct wm831x *wm831x, int irq)
{
- int i, ret;
+ struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ int i, cur_irq, ret;
mutex_init(&wm831x->irq_lock);
@@ -515,41 +468,53 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
return 0;
}
-
- wm831x->irq_wq = create_singlethread_workqueue("wm831x-irq");
- if (!wm831x->irq_wq) {
- dev_err(wm831x->dev, "Failed to allocate IRQ worker\n");
- return -ESRCH;
+ if (!pdata || !pdata->irq_base) {
+ dev_err(wm831x->dev,
+ "No interrupt base specified, no interrupts\n");
+ return 0;
}
wm831x->irq = irq;
- INIT_WORK(&wm831x->irq_work, wm831x_irq_worker);
+ wm831x->irq_base = pdata->irq_base;
/* Mask the individual interrupt sources */
- for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks); i++) {
- wm831x->irq_masks[i] = 0xffff;
+ for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
+ wm831x->irq_masks_cur[i] = 0xffff;
+ wm831x->irq_masks_cache[i] = 0xffff;
wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
0xffff);
}
- /* Enable top level interrupts, we mask at secondary level */
- wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0);
+ /* Register them with genirq */
+ for (cur_irq = wm831x->irq_base;
+ cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
+ cur_irq++) {
+ set_irq_chip_data(cur_irq, wm831x);
+ set_irq_chip_and_handler(cur_irq, &wm831x_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ set_irq_noprobe(cur_irq);
+#endif
+ }
- /* We're good to go. We set IRQF_SHARED since there's a
- * chance the driver will interoperate with another driver but
- * the need to disable the IRQ while handing via I2C/SPI means
- * that this may break and performance will be impacted. If
- * this does happen it's a hardware design issue and the only
- * other alternative would be polling.
- */
- ret = request_irq(irq, wm831x_cpu_irq, IRQF_TRIGGER_LOW | IRQF_SHARED,
- "wm831x", wm831x);
+ ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "wm831x", wm831x);
if (ret != 0) {
dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
return ret;
}
+ /* Enable top level interrupts, we mask at secondary level */
+ wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0);
+
return 0;
}
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index ba27c9dc1ad..8485a701806 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -337,733 +337,6 @@ int wm8350_reg_unlock(struct wm8350 *wm8350)
}
EXPORT_SYMBOL_GPL(wm8350_reg_unlock);
-static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq)
-{
- mutex_lock(&wm8350->irq_mutex);
-
- if (wm8350->irq[irq].handler)
- wm8350->irq[irq].handler(wm8350, irq, wm8350->irq[irq].data);
- else {
- dev_err(wm8350->dev, "irq %d nobody cared. now masked.\n",
- irq);
- wm8350_mask_irq(wm8350, irq);
- }
-
- mutex_unlock(&wm8350->irq_mutex);
-}
-
-/*
- * This is a threaded IRQ handler so can access I2C/SPI. Since all
- * interrupts are clear on read the IRQ line will be reasserted and
- * the physical IRQ will be handled again if another interrupt is
- * asserted while we run - in the normal course of events this is a
- * rare occurrence so we save I2C/SPI reads.
- */
-static irqreturn_t wm8350_irq(int irq, void *data)
-{
- struct wm8350 *wm8350 = data;
- u16 level_one, status1, status2, comp;
-
- /* TODO: Use block reads to improve performance? */
- level_one = wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS)
- & ~wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK);
- status1 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_1)
- & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_1_MASK);
- status2 = wm8350_reg_read(wm8350, WM8350_INT_STATUS_2)
- & ~wm8350_reg_read(wm8350, WM8350_INT_STATUS_2_MASK);
- comp = wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS)
- & ~wm8350_reg_read(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK);
-
- /* over current */
- if (level_one & WM8350_OC_INT) {
- u16 oc;
-
- oc = wm8350_reg_read(wm8350, WM8350_OVER_CURRENT_INT_STATUS);
- oc &= ~wm8350_reg_read(wm8350,
- WM8350_OVER_CURRENT_INT_STATUS_MASK);
-
- if (oc & WM8350_OC_LS_EINT) /* limit switch */
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_OC_LS);
- }
-
- /* under voltage */
- if (level_one & WM8350_UV_INT) {
- u16 uv;
-
- uv = wm8350_reg_read(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS);
- uv &= ~wm8350_reg_read(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK);
-
- if (uv & WM8350_UV_DC1_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC1);
- if (uv & WM8350_UV_DC2_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC2);
- if (uv & WM8350_UV_DC3_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC3);
- if (uv & WM8350_UV_DC4_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC4);
- if (uv & WM8350_UV_DC5_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC5);
- if (uv & WM8350_UV_DC6_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_DC6);
- if (uv & WM8350_UV_LDO1_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO1);
- if (uv & WM8350_UV_LDO2_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO2);
- if (uv & WM8350_UV_LDO3_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO3);
- if (uv & WM8350_UV_LDO4_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_UV_LDO4);
- }
-
- /* charger, RTC */
- if (status1) {
- if (status1 & WM8350_CHG_BAT_HOT_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_BAT_HOT);
- if (status1 & WM8350_CHG_BAT_COLD_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_BAT_COLD);
- if (status1 & WM8350_CHG_BAT_FAIL_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_BAT_FAIL);
- if (status1 & WM8350_CHG_TO_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_TO);
- if (status1 & WM8350_CHG_END_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_END);
- if (status1 & WM8350_CHG_START_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CHG_START);
- if (status1 & WM8350_CHG_FAST_RDY_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_FAST_RDY);
- if (status1 & WM8350_CHG_VBATT_LT_3P9_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_VBATT_LT_3P9);
- if (status1 & WM8350_CHG_VBATT_LT_3P1_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_VBATT_LT_3P1);
- if (status1 & WM8350_CHG_VBATT_LT_2P85_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CHG_VBATT_LT_2P85);
- if (status1 & WM8350_RTC_ALM_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_ALM);
- if (status1 & WM8350_RTC_SEC_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_SEC);
- if (status1 & WM8350_RTC_PER_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_RTC_PER);
- }
-
- /* current sink, system, aux adc */
- if (status2) {
- if (status2 & WM8350_CS1_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS1);
- if (status2 & WM8350_CS2_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CS2);
-
- if (status2 & WM8350_SYS_HYST_COMP_FAIL_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_SYS_HYST_COMP_FAIL);
- if (status2 & WM8350_SYS_CHIP_GT115_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_SYS_CHIP_GT115);
- if (status2 & WM8350_SYS_CHIP_GT140_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_SYS_CHIP_GT140);
- if (status2 & WM8350_SYS_WDOG_TO_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_SYS_WDOG_TO);
-
- if (status2 & WM8350_AUXADC_DATARDY_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DATARDY);
- if (status2 & WM8350_AUXADC_DCOMP4_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DCOMP4);
- if (status2 & WM8350_AUXADC_DCOMP3_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DCOMP3);
- if (status2 & WM8350_AUXADC_DCOMP2_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DCOMP2);
- if (status2 & WM8350_AUXADC_DCOMP1_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_AUXADC_DCOMP1);
-
- if (status2 & WM8350_USB_LIMIT_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_USB_LIMIT);
- }
-
- /* wake, codec, ext */
- if (comp) {
- if (comp & WM8350_WKUP_OFF_STATE_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_OFF_STATE);
- if (comp & WM8350_WKUP_HIB_STATE_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_HIB_STATE);
- if (comp & WM8350_WKUP_CONV_FAULT_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_CONV_FAULT);
- if (comp & WM8350_WKUP_WDOG_RST_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_WDOG_RST);
- if (comp & WM8350_WKUP_GP_PWR_ON_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_GP_PWR_ON);
- if (comp & WM8350_WKUP_ONKEY_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_WKUP_ONKEY);
- if (comp & WM8350_WKUP_GP_WAKEUP_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_WKUP_GP_WAKEUP);
-
- if (comp & WM8350_CODEC_JCK_DET_L_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CODEC_JCK_DET_L);
- if (comp & WM8350_CODEC_JCK_DET_R_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CODEC_JCK_DET_R);
- if (comp & WM8350_CODEC_MICSCD_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_CODEC_MICSCD);
- if (comp & WM8350_CODEC_MICD_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_CODEC_MICD);
-
- if (comp & WM8350_EXT_USB_FB_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_USB_FB);
- if (comp & WM8350_EXT_WALL_FB_EINT)
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_EXT_WALL_FB);
- if (comp & WM8350_EXT_BAT_FB_EINT)
- wm8350_irq_call_handler(wm8350, WM8350_IRQ_EXT_BAT_FB);
- }
-
- if (level_one & WM8350_GP_INT) {
- int i;
- u16 gpio;
-
- gpio = wm8350_reg_read(wm8350, WM8350_GPIO_INT_STATUS);
- gpio &= ~wm8350_reg_read(wm8350,
- WM8350_GPIO_INT_STATUS_MASK);
-
- for (i = 0; i < 12; i++) {
- if (gpio & (1 << i))
- wm8350_irq_call_handler(wm8350,
- WM8350_IRQ_GPIO(i));
- }
- }
-
- return IRQ_HANDLED;
-}
-
-int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- void (*handler) (struct wm8350 *, int, void *),
- void *data)
-{
- if (irq < 0 || irq > WM8350_NUM_IRQ || !handler)
- return -EINVAL;
-
- if (wm8350->irq[irq].handler)
- return -EBUSY;
-
- mutex_lock(&wm8350->irq_mutex);
- wm8350->irq[irq].handler = handler;
- wm8350->irq[irq].data = data;
- mutex_unlock(&wm8350->irq_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(wm8350_register_irq);
-
-int wm8350_free_irq(struct wm8350 *wm8350, int irq)
-{
- if (irq < 0 || irq > WM8350_NUM_IRQ)
- return -EINVAL;
-
- mutex_lock(&wm8350->irq_mutex);
- wm8350->irq[irq].handler = NULL;
- mutex_unlock(&wm8350->irq_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(wm8350_free_irq);
-
-int wm8350_mask_irq(struct wm8350 *wm8350, int irq)
-{
- switch (irq) {
- case WM8350_IRQ_CHG_BAT_HOT:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_HOT_EINT);
- case WM8350_IRQ_CHG_BAT_COLD:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_COLD_EINT);
- case WM8350_IRQ_CHG_BAT_FAIL:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_FAIL_EINT);
- case WM8350_IRQ_CHG_TO:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_TO_EINT);
- case WM8350_IRQ_CHG_END:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_END_EINT);
- case WM8350_IRQ_CHG_START:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_START_EINT);
- case WM8350_IRQ_CHG_FAST_RDY:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_FAST_RDY_EINT);
- case WM8350_IRQ_RTC_PER:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_PER_EINT);
- case WM8350_IRQ_RTC_SEC:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_SEC_EINT);
- case WM8350_IRQ_RTC_ALM:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_ALM_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_3P9:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_3P9_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_3P1:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_3P1_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_2P85:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_2P85_EINT);
- case WM8350_IRQ_CS1:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_CS1_EINT);
- case WM8350_IRQ_CS2:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_CS2_EINT);
- case WM8350_IRQ_USB_LIMIT:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_USB_LIMIT_EINT);
- case WM8350_IRQ_AUXADC_DATARDY:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DATARDY_EINT);
- case WM8350_IRQ_AUXADC_DCOMP4:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP4_EINT);
- case WM8350_IRQ_AUXADC_DCOMP3:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP3_EINT);
- case WM8350_IRQ_AUXADC_DCOMP2:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP2_EINT);
- case WM8350_IRQ_AUXADC_DCOMP1:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP1_EINT);
- case WM8350_IRQ_SYS_HYST_COMP_FAIL:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_HYST_COMP_FAIL_EINT);
- case WM8350_IRQ_SYS_CHIP_GT115:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_CHIP_GT115_EINT);
- case WM8350_IRQ_SYS_CHIP_GT140:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_CHIP_GT140_EINT);
- case WM8350_IRQ_SYS_WDOG_TO:
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_WDOG_TO_EINT);
- case WM8350_IRQ_UV_LDO4:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO4_EINT);
- case WM8350_IRQ_UV_LDO3:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO3_EINT);
- case WM8350_IRQ_UV_LDO2:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO2_EINT);
- case WM8350_IRQ_UV_LDO1:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO1_EINT);
- case WM8350_IRQ_UV_DC6:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC6_EINT);
- case WM8350_IRQ_UV_DC5:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC5_EINT);
- case WM8350_IRQ_UV_DC4:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC4_EINT);
- case WM8350_IRQ_UV_DC3:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC3_EINT);
- case WM8350_IRQ_UV_DC2:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC2_EINT);
- case WM8350_IRQ_UV_DC1:
- return wm8350_set_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC1_EINT);
- case WM8350_IRQ_OC_LS:
- return wm8350_set_bits(wm8350,
- WM8350_OVER_CURRENT_INT_STATUS_MASK,
- WM8350_IM_OC_LS_EINT);
- case WM8350_IRQ_EXT_USB_FB:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_USB_FB_EINT);
- case WM8350_IRQ_EXT_WALL_FB:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_WALL_FB_EINT);
- case WM8350_IRQ_EXT_BAT_FB:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_BAT_FB_EINT);
- case WM8350_IRQ_CODEC_JCK_DET_L:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_JCK_DET_L_EINT);
- case WM8350_IRQ_CODEC_JCK_DET_R:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_JCK_DET_R_EINT);
- case WM8350_IRQ_CODEC_MICSCD:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_MICSCD_EINT);
- case WM8350_IRQ_CODEC_MICD:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_MICD_EINT);
- case WM8350_IRQ_WKUP_OFF_STATE:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_OFF_STATE_EINT);
- case WM8350_IRQ_WKUP_HIB_STATE:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_HIB_STATE_EINT);
- case WM8350_IRQ_WKUP_CONV_FAULT:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_CONV_FAULT_EINT);
- case WM8350_IRQ_WKUP_WDOG_RST:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_OFF_STATE_EINT);
- case WM8350_IRQ_WKUP_GP_PWR_ON:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_GP_PWR_ON_EINT);
- case WM8350_IRQ_WKUP_ONKEY:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_ONKEY_EINT);
- case WM8350_IRQ_WKUP_GP_WAKEUP:
- return wm8350_set_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_GP_WAKEUP_EINT);
- case WM8350_IRQ_GPIO(0):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP0_EINT);
- case WM8350_IRQ_GPIO(1):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP1_EINT);
- case WM8350_IRQ_GPIO(2):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP2_EINT);
- case WM8350_IRQ_GPIO(3):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP3_EINT);
- case WM8350_IRQ_GPIO(4):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP4_EINT);
- case WM8350_IRQ_GPIO(5):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP5_EINT);
- case WM8350_IRQ_GPIO(6):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP6_EINT);
- case WM8350_IRQ_GPIO(7):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP7_EINT);
- case WM8350_IRQ_GPIO(8):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP8_EINT);
- case WM8350_IRQ_GPIO(9):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP9_EINT);
- case WM8350_IRQ_GPIO(10):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP10_EINT);
- case WM8350_IRQ_GPIO(11):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP11_EINT);
- case WM8350_IRQ_GPIO(12):
- return wm8350_set_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP12_EINT);
- default:
- dev_warn(wm8350->dev, "Attempting to mask unknown IRQ %d\n",
- irq);
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(wm8350_mask_irq);
-
-int wm8350_unmask_irq(struct wm8350 *wm8350, int irq)
-{
- switch (irq) {
- case WM8350_IRQ_CHG_BAT_HOT:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_HOT_EINT);
- case WM8350_IRQ_CHG_BAT_COLD:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_COLD_EINT);
- case WM8350_IRQ_CHG_BAT_FAIL:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_BAT_FAIL_EINT);
- case WM8350_IRQ_CHG_TO:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_TO_EINT);
- case WM8350_IRQ_CHG_END:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_END_EINT);
- case WM8350_IRQ_CHG_START:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_START_EINT);
- case WM8350_IRQ_CHG_FAST_RDY:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_FAST_RDY_EINT);
- case WM8350_IRQ_RTC_PER:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_PER_EINT);
- case WM8350_IRQ_RTC_SEC:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_SEC_EINT);
- case WM8350_IRQ_RTC_ALM:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_RTC_ALM_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_3P9:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_3P9_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_3P1:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_3P1_EINT);
- case WM8350_IRQ_CHG_VBATT_LT_2P85:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK,
- WM8350_IM_CHG_VBATT_LT_2P85_EINT);
- case WM8350_IRQ_CS1:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_CS1_EINT);
- case WM8350_IRQ_CS2:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_CS2_EINT);
- case WM8350_IRQ_USB_LIMIT:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_USB_LIMIT_EINT);
- case WM8350_IRQ_AUXADC_DATARDY:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DATARDY_EINT);
- case WM8350_IRQ_AUXADC_DCOMP4:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP4_EINT);
- case WM8350_IRQ_AUXADC_DCOMP3:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP3_EINT);
- case WM8350_IRQ_AUXADC_DCOMP2:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP2_EINT);
- case WM8350_IRQ_AUXADC_DCOMP1:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_AUXADC_DCOMP1_EINT);
- case WM8350_IRQ_SYS_HYST_COMP_FAIL:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_HYST_COMP_FAIL_EINT);
- case WM8350_IRQ_SYS_CHIP_GT115:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_CHIP_GT115_EINT);
- case WM8350_IRQ_SYS_CHIP_GT140:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_CHIP_GT140_EINT);
- case WM8350_IRQ_SYS_WDOG_TO:
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_2_MASK,
- WM8350_IM_SYS_WDOG_TO_EINT);
- case WM8350_IRQ_UV_LDO4:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO4_EINT);
- case WM8350_IRQ_UV_LDO3:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO3_EINT);
- case WM8350_IRQ_UV_LDO2:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO2_EINT);
- case WM8350_IRQ_UV_LDO1:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_LDO1_EINT);
- case WM8350_IRQ_UV_DC6:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC6_EINT);
- case WM8350_IRQ_UV_DC5:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC5_EINT);
- case WM8350_IRQ_UV_DC4:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC4_EINT);
- case WM8350_IRQ_UV_DC3:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC3_EINT);
- case WM8350_IRQ_UV_DC2:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC2_EINT);
- case WM8350_IRQ_UV_DC1:
- return wm8350_clear_bits(wm8350,
- WM8350_UNDER_VOLTAGE_INT_STATUS_MASK,
- WM8350_IM_UV_DC1_EINT);
- case WM8350_IRQ_OC_LS:
- return wm8350_clear_bits(wm8350,
- WM8350_OVER_CURRENT_INT_STATUS_MASK,
- WM8350_IM_OC_LS_EINT);
- case WM8350_IRQ_EXT_USB_FB:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_USB_FB_EINT);
- case WM8350_IRQ_EXT_WALL_FB:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_WALL_FB_EINT);
- case WM8350_IRQ_EXT_BAT_FB:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_EXT_BAT_FB_EINT);
- case WM8350_IRQ_CODEC_JCK_DET_L:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_JCK_DET_L_EINT);
- case WM8350_IRQ_CODEC_JCK_DET_R:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_JCK_DET_R_EINT);
- case WM8350_IRQ_CODEC_MICSCD:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_MICSCD_EINT);
- case WM8350_IRQ_CODEC_MICD:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_CODEC_MICD_EINT);
- case WM8350_IRQ_WKUP_OFF_STATE:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_OFF_STATE_EINT);
- case WM8350_IRQ_WKUP_HIB_STATE:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_HIB_STATE_EINT);
- case WM8350_IRQ_WKUP_CONV_FAULT:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_CONV_FAULT_EINT);
- case WM8350_IRQ_WKUP_WDOG_RST:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_OFF_STATE_EINT);
- case WM8350_IRQ_WKUP_GP_PWR_ON:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_GP_PWR_ON_EINT);
- case WM8350_IRQ_WKUP_ONKEY:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_ONKEY_EINT);
- case WM8350_IRQ_WKUP_GP_WAKEUP:
- return wm8350_clear_bits(wm8350,
- WM8350_COMPARATOR_INT_STATUS_MASK,
- WM8350_IM_WKUP_GP_WAKEUP_EINT);
- case WM8350_IRQ_GPIO(0):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP0_EINT);
- case WM8350_IRQ_GPIO(1):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP1_EINT);
- case WM8350_IRQ_GPIO(2):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP2_EINT);
- case WM8350_IRQ_GPIO(3):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP3_EINT);
- case WM8350_IRQ_GPIO(4):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP4_EINT);
- case WM8350_IRQ_GPIO(5):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP5_EINT);
- case WM8350_IRQ_GPIO(6):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP6_EINT);
- case WM8350_IRQ_GPIO(7):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP7_EINT);
- case WM8350_IRQ_GPIO(8):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP8_EINT);
- case WM8350_IRQ_GPIO(9):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP9_EINT);
- case WM8350_IRQ_GPIO(10):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP10_EINT);
- case WM8350_IRQ_GPIO(11):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP11_EINT);
- case WM8350_IRQ_GPIO(12):
- return wm8350_clear_bits(wm8350,
- WM8350_GPIO_INT_STATUS_MASK,
- WM8350_IM_GP12_EINT);
- default:
- dev_warn(wm8350->dev, "Attempting to unmask unknown IRQ %d\n",
- irq);
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(wm8350_unmask_irq);
-
int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref)
{
u16 reg, result = 0;
@@ -1264,7 +537,7 @@ static void wm8350_client_dev_register(struct wm8350 *wm8350,
int ret;
*pdev = platform_device_alloc(name, -1);
- if (pdev == NULL) {
+ if (*pdev == NULL) {
dev_err(wm8350->dev, "Failed to allocate %s\n", name);
return;
}
@@ -1409,49 +682,18 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
return ret;
}
- wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_INT_STATUS_2_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_GPIO_INT_STATUS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK, 0xFFFF);
-
mutex_init(&wm8350->auxadc_mutex);
- mutex_init(&wm8350->irq_mutex);
- if (irq) {
- int flags = IRQF_ONESHOT;
-
- if (pdata && pdata->irq_high) {
- flags |= IRQF_TRIGGER_HIGH;
-
- wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
- WM8350_IRQ_POL);
- } else {
- flags |= IRQF_TRIGGER_LOW;
-
- wm8350_clear_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
- WM8350_IRQ_POL);
- }
- ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
- "wm8350", wm8350);
- if (ret != 0) {
- dev_err(wm8350->dev, "Failed to request IRQ: %d\n",
- ret);
- goto err;
- }
- } else {
- dev_err(wm8350->dev, "No IRQ configured\n");
+ ret = wm8350_irq_init(wm8350, irq, pdata);
+ if (ret < 0)
goto err;
- }
- wm8350->chip_irq = irq;
if (pdata && pdata->init) {
ret = pdata->init(wm8350);
if (ret != 0) {
dev_err(wm8350->dev, "Platform init() failed: %d\n",
ret);
- goto err;
+ goto err_irq;
}
}
@@ -1470,6 +712,8 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
return 0;
+err_irq:
+ wm8350_irq_exit(wm8350);
err:
kfree(wm8350->reg_cache);
return ret;
@@ -1493,7 +737,8 @@ void wm8350_device_exit(struct wm8350 *wm8350)
platform_device_unregister(wm8350->gpio.pdev);
platform_device_unregister(wm8350->codec.pdev);
- free_irq(wm8350->chip_irq, wm8350);
+ wm8350_irq_exit(wm8350);
+
kfree(wm8350->reg_cache);
}
EXPORT_SYMBOL_GPL(wm8350_device_exit);
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
new file mode 100644
index 00000000000..c8df547c474
--- /dev/null
+++ b/drivers/mfd/wm8350-irq.c
@@ -0,0 +1,529 @@
+/*
+ * wm8350-irq.c -- IRQ support for Wolfson WM8350
+ *
+ * Copyright 2007, 2008, 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Liam Girdwood, Mark Brown
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/wm8350/core.h>
+#include <linux/mfd/wm8350/audio.h>
+#include <linux/mfd/wm8350/comparator.h>
+#include <linux/mfd/wm8350/gpio.h>
+#include <linux/mfd/wm8350/pmic.h>
+#include <linux/mfd/wm8350/rtc.h>
+#include <linux/mfd/wm8350/supply.h>
+#include <linux/mfd/wm8350/wdt.h>
+
+#define WM8350_NUM_IRQ_REGS 7
+
+#define WM8350_INT_OFFSET_1 0
+#define WM8350_INT_OFFSET_2 1
+#define WM8350_POWER_UP_INT_OFFSET 2
+#define WM8350_UNDER_VOLTAGE_INT_OFFSET 3
+#define WM8350_OVER_CURRENT_INT_OFFSET 4
+#define WM8350_GPIO_INT_OFFSET 5
+#define WM8350_COMPARATOR_INT_OFFSET 6
+
+struct wm8350_irq_data {
+ int primary;
+ int reg;
+ int mask;
+ int primary_only;
+};
+
+static struct wm8350_irq_data wm8350_irqs[] = {
+ [WM8350_IRQ_OC_LS] = {
+ .primary = WM8350_OC_INT,
+ .reg = WM8350_OVER_CURRENT_INT_OFFSET,
+ .mask = WM8350_OC_LS_EINT,
+ .primary_only = 1,
+ },
+ [WM8350_IRQ_UV_DC1] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC1_EINT,
+ },
+ [WM8350_IRQ_UV_DC2] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC2_EINT,
+ },
+ [WM8350_IRQ_UV_DC3] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC3_EINT,
+ },
+ [WM8350_IRQ_UV_DC4] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC4_EINT,
+ },
+ [WM8350_IRQ_UV_DC5] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC5_EINT,
+ },
+ [WM8350_IRQ_UV_DC6] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_DC6_EINT,
+ },
+ [WM8350_IRQ_UV_LDO1] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_LDO1_EINT,
+ },
+ [WM8350_IRQ_UV_LDO2] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_LDO2_EINT,
+ },
+ [WM8350_IRQ_UV_LDO3] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_LDO3_EINT,
+ },
+ [WM8350_IRQ_UV_LDO4] = {
+ .primary = WM8350_UV_INT,
+ .reg = WM8350_UNDER_VOLTAGE_INT_OFFSET,
+ .mask = WM8350_UV_LDO4_EINT,
+ },
+ [WM8350_IRQ_CHG_BAT_HOT] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_BAT_HOT_EINT,
+ },
+ [WM8350_IRQ_CHG_BAT_COLD] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_BAT_COLD_EINT,
+ },
+ [WM8350_IRQ_CHG_BAT_FAIL] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_BAT_FAIL_EINT,
+ },
+ [WM8350_IRQ_CHG_TO] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_TO_EINT,
+ },
+ [WM8350_IRQ_CHG_END] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_END_EINT,
+ },
+ [WM8350_IRQ_CHG_START] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_START_EINT,
+ },
+ [WM8350_IRQ_CHG_FAST_RDY] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_FAST_RDY_EINT,
+ },
+ [WM8350_IRQ_CHG_VBATT_LT_3P9] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_VBATT_LT_3P9_EINT,
+ },
+ [WM8350_IRQ_CHG_VBATT_LT_3P1] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_VBATT_LT_3P1_EINT,
+ },
+ [WM8350_IRQ_CHG_VBATT_LT_2P85] = {
+ .primary = WM8350_CHG_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_CHG_VBATT_LT_2P85_EINT,
+ },
+ [WM8350_IRQ_RTC_ALM] = {
+ .primary = WM8350_RTC_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_RTC_ALM_EINT,
+ },
+ [WM8350_IRQ_RTC_SEC] = {
+ .primary = WM8350_RTC_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_RTC_SEC_EINT,
+ },
+ [WM8350_IRQ_RTC_PER] = {
+ .primary = WM8350_RTC_INT,
+ .reg = WM8350_INT_OFFSET_1,
+ .mask = WM8350_RTC_PER_EINT,
+ },
+ [WM8350_IRQ_CS1] = {
+ .primary = WM8350_CS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_CS1_EINT,
+ },
+ [WM8350_IRQ_CS2] = {
+ .primary = WM8350_CS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_CS2_EINT,
+ },
+ [WM8350_IRQ_SYS_HYST_COMP_FAIL] = {
+ .primary = WM8350_SYS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_SYS_HYST_COMP_FAIL_EINT,
+ },
+ [WM8350_IRQ_SYS_CHIP_GT115] = {
+ .primary = WM8350_SYS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_SYS_CHIP_GT115_EINT,
+ },
+ [WM8350_IRQ_SYS_CHIP_GT140] = {
+ .primary = WM8350_SYS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_SYS_CHIP_GT140_EINT,
+ },
+ [WM8350_IRQ_SYS_WDOG_TO] = {
+ .primary = WM8350_SYS_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_SYS_WDOG_TO_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DATARDY] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DATARDY_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DCOMP4] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DCOMP4_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DCOMP3] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DCOMP3_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DCOMP2] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DCOMP2_EINT,
+ },
+ [WM8350_IRQ_AUXADC_DCOMP1] = {
+ .primary = WM8350_AUXADC_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_AUXADC_DCOMP1_EINT,
+ },
+ [WM8350_IRQ_USB_LIMIT] = {
+ .primary = WM8350_USB_INT,
+ .reg = WM8350_INT_OFFSET_2,
+ .mask = WM8350_USB_LIMIT_EINT,
+ .primary_only = 1,
+ },
+ [WM8350_IRQ_WKUP_OFF_STATE] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_OFF_STATE_EINT,
+ },
+ [WM8350_IRQ_WKUP_HIB_STATE] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_HIB_STATE_EINT,
+ },
+ [WM8350_IRQ_WKUP_CONV_FAULT] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_CONV_FAULT_EINT,
+ },
+ [WM8350_IRQ_WKUP_WDOG_RST] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_WDOG_RST_EINT,
+ },
+ [WM8350_IRQ_WKUP_GP_PWR_ON] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_GP_PWR_ON_EINT,
+ },
+ [WM8350_IRQ_WKUP_ONKEY] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_ONKEY_EINT,
+ },
+ [WM8350_IRQ_WKUP_GP_WAKEUP] = {
+ .primary = WM8350_WKUP_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_WKUP_GP_WAKEUP_EINT,
+ },
+ [WM8350_IRQ_CODEC_JCK_DET_L] = {
+ .primary = WM8350_CODEC_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_CODEC_JCK_DET_L_EINT,
+ },
+ [WM8350_IRQ_CODEC_JCK_DET_R] = {
+ .primary = WM8350_CODEC_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_CODEC_JCK_DET_R_EINT,
+ },
+ [WM8350_IRQ_CODEC_MICSCD] = {
+ .primary = WM8350_CODEC_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_CODEC_MICSCD_EINT,
+ },
+ [WM8350_IRQ_CODEC_MICD] = {
+ .primary = WM8350_CODEC_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_CODEC_MICD_EINT,
+ },
+ [WM8350_IRQ_EXT_USB_FB] = {
+ .primary = WM8350_EXT_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_EXT_USB_FB_EINT,
+ },
+ [WM8350_IRQ_EXT_WALL_FB] = {
+ .primary = WM8350_EXT_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_EXT_WALL_FB_EINT,
+ },
+ [WM8350_IRQ_EXT_BAT_FB] = {
+ .primary = WM8350_EXT_INT,
+ .reg = WM8350_COMPARATOR_INT_OFFSET,
+ .mask = WM8350_EXT_BAT_FB_EINT,
+ },
+ [WM8350_IRQ_GPIO(0)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP0_EINT,
+ },
+ [WM8350_IRQ_GPIO(1)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP1_EINT,
+ },
+ [WM8350_IRQ_GPIO(2)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP2_EINT,
+ },
+ [WM8350_IRQ_GPIO(3)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP3_EINT,
+ },
+ [WM8350_IRQ_GPIO(4)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP4_EINT,
+ },
+ [WM8350_IRQ_GPIO(5)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP5_EINT,
+ },
+ [WM8350_IRQ_GPIO(6)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP6_EINT,
+ },
+ [WM8350_IRQ_GPIO(7)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP7_EINT,
+ },
+ [WM8350_IRQ_GPIO(8)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP8_EINT,
+ },
+ [WM8350_IRQ_GPIO(9)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP9_EINT,
+ },
+ [WM8350_IRQ_GPIO(10)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP10_EINT,
+ },
+ [WM8350_IRQ_GPIO(11)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP11_EINT,
+ },
+ [WM8350_IRQ_GPIO(12)] = {
+ .primary = WM8350_GP_INT,
+ .reg = WM8350_GPIO_INT_OFFSET,
+ .mask = WM8350_GP12_EINT,
+ },
+};
+
+static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq)
+{
+ mutex_lock(&wm8350->irq_mutex);
+
+ if (wm8350->irq[irq].handler)
+ wm8350->irq[irq].handler(irq, wm8350->irq[irq].data);
+ else {
+ dev_err(wm8350->dev, "irq %d nobody cared. now masked.\n",
+ irq);
+ wm8350_mask_irq(wm8350, irq);
+ }
+
+ mutex_unlock(&wm8350->irq_mutex);
+}
+
+/*
+ * This is a threaded IRQ handler so can access I2C/SPI. Since all
+ * interrupts are clear on read the IRQ line will be reasserted and
+ * the physical IRQ will be handled again if another interrupt is
+ * asserted while we run - in the normal course of events this is a
+ * rare occurrence so we save I2C/SPI reads.
+ */
+static irqreturn_t wm8350_irq(int irq, void *irq_data)
+{
+ struct wm8350 *wm8350 = irq_data;
+ u16 level_one;
+ u16 sub_reg[WM8350_NUM_IRQ_REGS];
+ int read_done[WM8350_NUM_IRQ_REGS];
+ struct wm8350_irq_data *data;
+ int i;
+
+ /* TODO: Use block reads to improve performance? */
+ level_one = wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS)
+ & ~wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK);
+
+ if (!level_one)
+ return IRQ_NONE;
+
+ memset(&read_done, 0, sizeof(read_done));
+
+ for (i = 0; i < ARRAY_SIZE(wm8350_irqs); i++) {
+ data = &wm8350_irqs[i];
+
+ if (!(level_one & data->primary))
+ continue;
+
+ if (!read_done[data->reg]) {
+ sub_reg[data->reg] =
+ wm8350_reg_read(wm8350, WM8350_INT_STATUS_1 +
+ data->reg);
+ sub_reg[data->reg] &=
+ ~wm8350_reg_read(wm8350,
+ WM8350_INT_STATUS_1_MASK +
+ data->reg);
+ read_done[data->reg] = 1;
+ }
+
+ if (sub_reg[data->reg] & data->mask)
+ wm8350_irq_call_handler(wm8350, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int wm8350_register_irq(struct wm8350 *wm8350, int irq,
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data)
+{
+ if (irq < 0 || irq > WM8350_NUM_IRQ || !handler)
+ return -EINVAL;
+
+ if (wm8350->irq[irq].handler)
+ return -EBUSY;
+
+ mutex_lock(&wm8350->irq_mutex);
+ wm8350->irq[irq].handler = handler;
+ wm8350->irq[irq].data = data;
+ mutex_unlock(&wm8350->irq_mutex);
+
+ wm8350_unmask_irq(wm8350, irq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8350_register_irq);
+
+int wm8350_free_irq(struct wm8350 *wm8350, int irq)
+{
+ if (irq < 0 || irq > WM8350_NUM_IRQ)
+ return -EINVAL;
+
+ wm8350_mask_irq(wm8350, irq);
+
+ mutex_lock(&wm8350->irq_mutex);
+ wm8350->irq[irq].handler = NULL;
+ mutex_unlock(&wm8350->irq_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm8350_free_irq);
+
+int wm8350_mask_irq(struct wm8350 *wm8350, int irq)
+{
+ return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK +
+ wm8350_irqs[irq].reg,
+ wm8350_irqs[irq].mask);
+}
+EXPORT_SYMBOL_GPL(wm8350_mask_irq);
+
+int wm8350_unmask_irq(struct wm8350 *wm8350, int irq)
+{
+ return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK +
+ wm8350_irqs[irq].reg,
+ wm8350_irqs[irq].mask);
+}
+EXPORT_SYMBOL_GPL(wm8350_unmask_irq);
+
+int wm8350_irq_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata)
+{
+ int ret;
+ int flags = IRQF_ONESHOT;
+
+ if (!irq) {
+ dev_err(wm8350->dev, "No IRQ configured\n");
+ return -EINVAL;
+ }
+
+ wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_INT_STATUS_2_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_GPIO_INT_STATUS_MASK, 0xFFFF);
+ wm8350_reg_write(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK, 0xFFFF);
+
+ mutex_init(&wm8350->irq_mutex);
+ wm8350->chip_irq = irq;
+
+ if (pdata && pdata->irq_high) {
+ flags |= IRQF_TRIGGER_HIGH;
+
+ wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
+ WM8350_IRQ_POL);
+ } else {
+ flags |= IRQF_TRIGGER_LOW;
+
+ wm8350_clear_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
+ WM8350_IRQ_POL);
+ }
+
+ ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
+ "wm8350", wm8350);
+ if (ret != 0)
+ dev_err(wm8350->dev, "Failed to request IRQ: %d\n", ret);
+
+ return ret;
+}
+
+int wm8350_irq_exit(struct wm8350 *wm8350)
+{
+ free_irq(wm8350->chip_irq, wm8350);
+ return 0;
+}
diff --git a/drivers/mfd/wm8350-regmap.c b/drivers/mfd/wm8350-regmap.c
index 7ccc1eab98a..e965139e5cd 100644
--- a/drivers/mfd/wm8350-regmap.c
+++ b/drivers/mfd/wm8350-regmap.c
@@ -3170,14 +3170,6 @@ const u16 wm8352_mode3_defaults[] = {
};
#endif
-/* The register defaults for the config mode used must be compiled in but
- * due to the impact on kernel size it is possible to disable
- */
-#ifndef WM8350_HAVE_CONFIG_MODE
-#warning No WM8350 config modes supported - select at least one of the
-#warning MFD_WM8350_CONFIG_MODE_n options from the board driver.
-#endif
-
/*
* Access masks.
*/
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2c16ca6501d..e3551d20464 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -13,6 +13,20 @@ menuconfig MISC_DEVICES
if MISC_DEVICES
+config AD525X_DPOT
+ tristate "Analog Devices AD525x Digital Potentiometers"
+ depends on I2C && SYSFS
+ help
+ If you say yes here, you get support for the Analog Devices
+ AD5258, AD5259, AD5251, AD5252, AD5253, AD5254 and AD5255
+ digital potentiometer chips.
+
+ See Documentation/misc-devices/ad525x_dpot.txt for the
+ userspace interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called ad525x_dpot.
+
config ATMEL_PWM
tristate "Atmel AT32/AT91 PWM support"
depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9
@@ -173,6 +187,31 @@ config SGI_XP
this feature will allow for direct communication between SSIs
based on a network adapter and DMA messaging.
+config CS5535_MFGPT
+ tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
+ depends on PCI
+ depends on X86
+ default n
+ help
+ This driver provides access to MFGPT functionality for other
+ drivers that need timers. MFGPTs are available in the CS5535 and
+ CS5536 companion chips that are found in AMD Geode and several
+ other platforms. They have a better resolution and max interval
+ than the generic PIT, and are suitable for use as high-res timers.
+ You probably don't want to enable this manually; other drivers that
+ make use of it should enable it.
+
+config CS5535_MFGPT_DEFAULT_IRQ
+ int
+ depends on CS5535_MFGPT
+ default 7
+ help
+ MFGPTs on the CS5535 require an interrupt. The selected IRQ
+ can be overridden as a module option as well as by driver that
+ use the cs5535_mfgpt_ API; however, different architectures might
+ want to use a different IRQ by default. This is here for
+ architectures to set as necessary.
+
config HP_ILO
tristate "Channel interface driver for HP iLO/iLO2 processor"
depends on PCI
@@ -210,19 +249,6 @@ config SGI_GRU_DEBUG
This option enables addition debugging code for the SGI GRU driver. If
you are unsure, say N.
-config DELL_LAPTOP
- tristate "Dell Laptop Extras (EXPERIMENTAL)"
- depends on X86
- depends on DCDBAS
- depends on EXPERIMENTAL
- depends on BACKLIGHT_CLASS_DEVICE
- depends on RFKILL
- depends on POWER_SUPPLY
- default n
- ---help---
- This driver adds support for rfkill and backlight control to Dell
- laptops.
-
config ISL29003
tristate "Intersil ISL29003 ambient light sensor"
depends on I2C && SYSFS
@@ -256,6 +282,16 @@ config DS1682
This driver can also be built as a module. If so, the module
will be called ds1682.
+config TI_DAC7512
+ tristate "Texas Instruments DAC7512"
+ depends on SPI && SYSFS
+ help
+ If you say yes here you get support for the Texas Instruments
+ DAC7512 16-bit digital-to-analog converter.
+
+ This driver can also be built as a module. If so, the module
+ will be calles ti_dac7512.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 906a0edcea4..049ff2482f3 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_IBM_ASM) += ibmasm/
obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
+obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
@@ -17,10 +18,12 @@ obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
+obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
obj-$(CONFIG_HP_ILO) += hpilo.o
obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
+obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-y += eeprom/
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
new file mode 100644
index 00000000000..30a59f2bacd
--- /dev/null
+++ b/drivers/misc/ad525x_dpot.c
@@ -0,0 +1,666 @@
+/*
+ * ad525x_dpot: Driver for the Analog Devices AD525x digital potentiometers
+ * Copyright (c) 2009 Analog Devices, Inc.
+ * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
+ *
+ * DEVID #Wipers #Positions Resistor Options (kOhm)
+ * AD5258 1 64 1, 10, 50, 100
+ * AD5259 1 256 5, 10, 50, 100
+ * AD5251 2 64 1, 10, 50, 100
+ * AD5252 2 256 1, 10, 50, 100
+ * AD5255 3 512 25, 250
+ * AD5253 4 64 1, 10, 50, 100
+ * AD5254 4 256 1, 10, 50, 100
+ *
+ * See Documentation/misc-devices/ad525x_dpot.txt for more info.
+ *
+ * derived from ad5258.c
+ * Copyright (c) 2009 Cyber Switching, Inc.
+ * Author: Chris Verges <chrisv@cyberswitching.com>
+ *
+ * derived from ad5252.c
+ * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+#define DRIVER_NAME "ad525x_dpot"
+#define DRIVER_VERSION "0.1"
+
+enum dpot_devid {
+ AD5258_ID,
+ AD5259_ID,
+ AD5251_ID,
+ AD5252_ID,
+ AD5253_ID,
+ AD5254_ID,
+ AD5255_ID,
+};
+
+#define AD5258_MAX_POSITION 64
+#define AD5259_MAX_POSITION 256
+#define AD5251_MAX_POSITION 64
+#define AD5252_MAX_POSITION 256
+#define AD5253_MAX_POSITION 64
+#define AD5254_MAX_POSITION 256
+#define AD5255_MAX_POSITION 512
+
+#define AD525X_RDAC0 0
+#define AD525X_RDAC1 1
+#define AD525X_RDAC2 2
+#define AD525X_RDAC3 3
+
+#define AD525X_REG_TOL 0x18
+#define AD525X_TOL_RDAC0 (AD525X_REG_TOL | AD525X_RDAC0)
+#define AD525X_TOL_RDAC1 (AD525X_REG_TOL | AD525X_RDAC1)
+#define AD525X_TOL_RDAC2 (AD525X_REG_TOL | AD525X_RDAC2)
+#define AD525X_TOL_RDAC3 (AD525X_REG_TOL | AD525X_RDAC3)
+
+/* RDAC-to-EEPROM Interface Commands */
+#define AD525X_I2C_RDAC (0x00 << 5)
+#define AD525X_I2C_EEPROM (0x01 << 5)
+#define AD525X_I2C_CMD (0x80)
+
+#define AD525X_DEC_ALL_6DB (AD525X_I2C_CMD | (0x4 << 3))
+#define AD525X_INC_ALL_6DB (AD525X_I2C_CMD | (0x9 << 3))
+#define AD525X_DEC_ALL (AD525X_I2C_CMD | (0x6 << 3))
+#define AD525X_INC_ALL (AD525X_I2C_CMD | (0xB << 3))
+
+static s32 ad525x_read(struct i2c_client *client, u8 reg);
+static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value);
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct dpot_data {
+ struct mutex update_lock;
+ unsigned rdac_mask;
+ unsigned max_pos;
+ unsigned devid;
+};
+
+/* sysfs functions */
+
+static ssize_t sysfs_show_reg(struct device *dev,
+ struct device_attribute *attr, char *buf, u32 reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dpot_data *data = i2c_get_clientdata(client);
+ s32 value;
+
+ mutex_lock(&data->update_lock);
+ value = ad525x_read(client, reg);
+ mutex_unlock(&data->update_lock);
+
+ if (value < 0)
+ return -EINVAL;
+ /*
+ * Let someone else deal with converting this ...
+ * the tolerance is a two-byte value where the MSB
+ * is a sign + integer value, and the LSB is a
+ * decimal value. See page 18 of the AD5258
+ * datasheet (Rev. A) for more details.
+ */
+
+ if (reg & AD525X_REG_TOL)
+ return sprintf(buf, "0x%04x\n", value & 0xFFFF);
+ else
+ return sprintf(buf, "%u\n", value & data->rdac_mask);
+}
+
+static ssize_t sysfs_set_reg(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count, u32 reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dpot_data *data = i2c_get_clientdata(client);
+ unsigned long value;
+ int err;
+
+ err = strict_strtoul(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > data->rdac_mask)
+ value = data->rdac_mask;
+
+ mutex_lock(&data->update_lock);
+ ad525x_write(client, reg, value);
+ if (reg & AD525X_I2C_EEPROM)
+ msleep(26); /* Sleep while the EEPROM updates */
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t sysfs_do_cmd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count, u32 reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct dpot_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+ ad525x_write(client, reg, 0);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t show_rdac0(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC0);
+}
+
+static ssize_t set_rdac0(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_RDAC | AD525X_RDAC0);
+}
+
+static DEVICE_ATTR(rdac0, S_IWUSR | S_IRUGO, show_rdac0, set_rdac0);
+
+static ssize_t show_eeprom0(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC0);
+}
+
+static ssize_t set_eeprom0(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_EEPROM | AD525X_RDAC0);
+}
+
+static DEVICE_ATTR(eeprom0, S_IWUSR | S_IRUGO, show_eeprom0, set_eeprom0);
+
+static ssize_t show_tolerance0(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf,
+ AD525X_I2C_EEPROM | AD525X_TOL_RDAC0);
+}
+
+static DEVICE_ATTR(tolerance0, S_IRUGO, show_tolerance0, NULL);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t show_rdac1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC1);
+}
+
+static ssize_t set_rdac1(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_RDAC | AD525X_RDAC1);
+}
+
+static DEVICE_ATTR(rdac1, S_IWUSR | S_IRUGO, show_rdac1, set_rdac1);
+
+static ssize_t show_eeprom1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC1);
+}
+
+static ssize_t set_eeprom1(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_EEPROM | AD525X_RDAC1);
+}
+
+static DEVICE_ATTR(eeprom1, S_IWUSR | S_IRUGO, show_eeprom1, set_eeprom1);
+
+static ssize_t show_tolerance1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf,
+ AD525X_I2C_EEPROM | AD525X_TOL_RDAC1);
+}
+
+static DEVICE_ATTR(tolerance1, S_IRUGO, show_tolerance1, NULL);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t show_rdac2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC2);
+}
+
+static ssize_t set_rdac2(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_RDAC | AD525X_RDAC2);
+}
+
+static DEVICE_ATTR(rdac2, S_IWUSR | S_IRUGO, show_rdac2, set_rdac2);
+
+static ssize_t show_eeprom2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC2);
+}
+
+static ssize_t set_eeprom2(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_EEPROM | AD525X_RDAC2);
+}
+
+static DEVICE_ATTR(eeprom2, S_IWUSR | S_IRUGO, show_eeprom2, set_eeprom2);
+
+static ssize_t show_tolerance2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf,
+ AD525X_I2C_EEPROM | AD525X_TOL_RDAC2);
+}
+
+static DEVICE_ATTR(tolerance2, S_IRUGO, show_tolerance2, NULL);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t show_rdac3(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_RDAC | AD525X_RDAC3);
+}
+
+static ssize_t set_rdac3(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_RDAC | AD525X_RDAC3);
+}
+
+static DEVICE_ATTR(rdac3, S_IWUSR | S_IRUGO, show_rdac3, set_rdac3);
+
+static ssize_t show_eeprom3(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf, AD525X_I2C_EEPROM | AD525X_RDAC3);
+}
+
+static ssize_t set_eeprom3(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_set_reg(dev, attr, buf, count,
+ AD525X_I2C_EEPROM | AD525X_RDAC3);
+}
+
+static DEVICE_ATTR(eeprom3, S_IWUSR | S_IRUGO, show_eeprom3, set_eeprom3);
+
+static ssize_t show_tolerance3(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_show_reg(dev, attr, buf,
+ AD525X_I2C_EEPROM | AD525X_TOL_RDAC3);
+}
+
+static DEVICE_ATTR(tolerance3, S_IRUGO, show_tolerance3, NULL);
+
+static struct attribute *ad525x_attributes_wipers[4][4] = {
+ {
+ &dev_attr_rdac0.attr,
+ &dev_attr_eeprom0.attr,
+ &dev_attr_tolerance0.attr,
+ NULL
+ }, {
+ &dev_attr_rdac1.attr,
+ &dev_attr_eeprom1.attr,
+ &dev_attr_tolerance1.attr,
+ NULL
+ }, {
+ &dev_attr_rdac2.attr,
+ &dev_attr_eeprom2.attr,
+ &dev_attr_tolerance2.attr,
+ NULL
+ }, {
+ &dev_attr_rdac3.attr,
+ &dev_attr_eeprom3.attr,
+ &dev_attr_tolerance3.attr,
+ NULL
+ }
+};
+
+static const struct attribute_group ad525x_group_wipers[] = {
+ {.attrs = ad525x_attributes_wipers[AD525X_RDAC0]},
+ {.attrs = ad525x_attributes_wipers[AD525X_RDAC1]},
+ {.attrs = ad525x_attributes_wipers[AD525X_RDAC2]},
+ {.attrs = ad525x_attributes_wipers[AD525X_RDAC3]},
+};
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t set_inc_all(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL);
+}
+
+static DEVICE_ATTR(inc_all, S_IWUSR, NULL, set_inc_all);
+
+static ssize_t set_dec_all(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL);
+}
+
+static DEVICE_ATTR(dec_all, S_IWUSR, NULL, set_dec_all);
+
+static ssize_t set_inc_all_6db(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_do_cmd(dev, attr, buf, count, AD525X_INC_ALL_6DB);
+}
+
+static DEVICE_ATTR(inc_all_6db, S_IWUSR, NULL, set_inc_all_6db);
+
+static ssize_t set_dec_all_6db(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return sysfs_do_cmd(dev, attr, buf, count, AD525X_DEC_ALL_6DB);
+}
+
+static DEVICE_ATTR(dec_all_6db, S_IWUSR, NULL, set_dec_all_6db);
+
+static struct attribute *ad525x_attributes_commands[] = {
+ &dev_attr_inc_all.attr,
+ &dev_attr_dec_all.attr,
+ &dev_attr_inc_all_6db.attr,
+ &dev_attr_dec_all_6db.attr,
+ NULL
+};
+
+static const struct attribute_group ad525x_group_commands = {
+ .attrs = ad525x_attributes_commands,
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* i2c device functions */
+
+/**
+ * ad525x_read - return the value contained in the specified register
+ * on the AD5258 device.
+ * @client: value returned from i2c_new_device()
+ * @reg: the register to read
+ *
+ * If the tolerance register is specified, 2 bytes are returned.
+ * Otherwise, 1 byte is returned. A negative value indicates an error
+ * occurred while reading the register.
+ */
+static s32 ad525x_read(struct i2c_client *client, u8 reg)
+{
+ struct dpot_data *data = i2c_get_clientdata(client);
+
+ if ((reg & AD525X_REG_TOL) || (data->max_pos > 256))
+ return i2c_smbus_read_word_data(client, (reg & 0xF8) |
+ ((reg & 0x7) << 1));
+ else
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+/**
+ * ad525x_write - store the given value in the specified register on
+ * the AD5258 device.
+ * @client: value returned from i2c_new_device()
+ * @reg: the register to write
+ * @value: the byte to store in the register
+ *
+ * For certain instructions that do not require a data byte, "NULL"
+ * should be specified for the "value" parameter. These instructions
+ * include NOP, RESTORE_FROM_EEPROM, and STORE_TO_EEPROM.
+ *
+ * A negative return value indicates an error occurred while reading
+ * the register.
+ */
+static s32 ad525x_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ struct dpot_data *data = i2c_get_clientdata(client);
+
+ /* Only write the instruction byte for certain commands */
+ if (reg & AD525X_I2C_CMD)
+ return i2c_smbus_write_byte(client, reg);
+
+ if (data->max_pos > 256)
+ return i2c_smbus_write_word_data(client, (reg & 0xF8) |
+ ((reg & 0x7) << 1), value);
+ else
+ /* All other registers require instruction + data bytes */
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int ad525x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct dpot_data *data;
+ int err = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(dev, "missing I2C functionality for this driver\n");
+ goto exit;
+ }
+
+ data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ switch (id->driver_data) {
+ case AD5258_ID:
+ data->max_pos = AD5258_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ break;
+ case AD5259_ID:
+ data->max_pos = AD5259_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ break;
+ case AD5251_ID:
+ data->max_pos = AD5251_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5252_ID:
+ data->max_pos = AD5252_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5253_ID:
+ data->max_pos = AD5253_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5254_ID:
+ data->max_pos = AD5254_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5255_ID:
+ data->max_pos = AD5255_MAX_POSITION;
+ err = sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ err |= sysfs_create_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ err |= sysfs_create_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ default:
+ err = -ENODEV;
+ goto exit_free;
+ }
+
+ if (err) {
+ dev_err(dev, "failed to register sysfs hooks\n");
+ goto exit_free;
+ }
+
+ data->devid = id->driver_data;
+ data->rdac_mask = data->max_pos - 1;
+
+ dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
+ id->name, data->max_pos);
+
+ return 0;
+
+exit_free:
+ kfree(data);
+ i2c_set_clientdata(client, NULL);
+exit:
+ dev_err(dev, "failed to create client\n");
+ return err;
+}
+
+static int __devexit ad525x_remove(struct i2c_client *client)
+{
+ struct dpot_data *data = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+
+ switch (data->devid) {
+ case AD5258_ID:
+ case AD5259_ID:
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ break;
+ case AD5251_ID:
+ case AD5252_ID:
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5253_ID:
+ case AD5254_ID:
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC3]);
+ sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ case AD5255_ID:
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC0]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC1]);
+ sysfs_remove_group(&dev->kobj,
+ &ad525x_group_wipers[AD525X_RDAC2]);
+ sysfs_remove_group(&dev->kobj, &ad525x_group_commands);
+ break;
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad525x_idtable[] = {
+ {"ad5258", AD5258_ID},
+ {"ad5259", AD5259_ID},
+ {"ad5251", AD5251_ID},
+ {"ad5252", AD5252_ID},
+ {"ad5253", AD5253_ID},
+ {"ad5254", AD5254_ID},
+ {"ad5255", AD5255_ID},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad525x_idtable);
+
+static struct i2c_driver ad525x_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .id_table = ad525x_idtable,
+ .probe = ad525x_probe,
+ .remove = __devexit_p(ad525x_remove),
+};
+
+static int __init ad525x_init(void)
+{
+ return i2c_add_driver(&ad525x_driver);
+}
+
+module_init(ad525x_init);
+
+static void __exit ad525x_exit(void)
+{
+ i2c_del_driver(&ad525x_driver);
+}
+
+module_exit(ad525x_exit);
+
+MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
+ "Michael Hennerich <hennerich@blackfin.uclinux.org>, ");
+MODULE_DESCRIPTION("AD5258/9 digital potentiometer driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
new file mode 100644
index 00000000000..8110460558f
--- /dev/null
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -0,0 +1,370 @@
+/*
+ * Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT)
+ *
+ * Copyright (C) 2006, Advanced Micro Devices, Inc.
+ * Copyright (C) 2007 Andres Salomon <dilinger@debian.org>
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/cs5535.h>
+
+#define DRV_NAME "cs5535-mfgpt"
+#define MFGPT_BAR 2
+
+static int mfgpt_reset_timers;
+module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
+MODULE_PARM_DESC(mfgptfix, "Reset the MFGPT timers during init; "
+ "required by some broken BIOSes (ie, TinyBIOS < 0.99).");
+
+struct cs5535_mfgpt_timer {
+ struct cs5535_mfgpt_chip *chip;
+ int nr;
+};
+
+static struct cs5535_mfgpt_chip {
+ DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
+ resource_size_t base;
+
+ struct pci_dev *pdev;
+ spinlock_t lock;
+ int initialized;
+} cs5535_mfgpt_chip;
+
+int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable)
+{
+ uint32_t msr, mask, value, dummy;
+ int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
+
+ if (!timer) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ /*
+ * The register maps for these are described in sections 6.17.1.x of
+ * the AMD Geode CS5536 Companion Device Data Book.
+ */
+ switch (event) {
+ case MFGPT_EVENT_RESET:
+ /*
+ * XXX: According to the docs, we cannot reset timers above
+ * 6; that is, resets for 7 and 8 will be ignored. Is this
+ * a problem? -dilinger
+ */
+ msr = MSR_MFGPT_NR;
+ mask = 1 << (timer->nr + 24);
+ break;
+
+ case MFGPT_EVENT_NMI:
+ msr = MSR_MFGPT_NR;
+ mask = 1 << (timer->nr + shift);
+ break;
+
+ case MFGPT_EVENT_IRQ:
+ msr = MSR_MFGPT_IRQ;
+ mask = 1 << (timer->nr + shift);
+ break;
+
+ default:
+ return -EIO;
+ }
+
+ rdmsr(msr, value, dummy);
+
+ if (enable)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ wrmsr(msr, value, dummy);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event);
+
+int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq,
+ int enable)
+{
+ uint32_t zsel, lpc, dummy;
+ int shift;
+
+ if (!timer) {
+ WARN_ON(1);
+ return -EIO;
+ }
+
+ /*
+ * Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
+ * is using the same CMP of the timer's Siamese twin, the IRQ is set to
+ * 2, and we mustn't use nor change it.
+ * XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
+ * IRQ of the 1st. This can only happen if forcing an IRQ, calling this
+ * with *irq==0 is safe. Currently there _are_ no 2 drivers.
+ */
+ rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
+ shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4;
+ if (((zsel >> shift) & 0xF) == 2)
+ return -EIO;
+
+ /* Choose IRQ: if none supplied, keep IRQ already set or use default */
+ if (!*irq)
+ *irq = (zsel >> shift) & 0xF;
+ if (!*irq)
+ *irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+
+ /* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
+ if (*irq < 1 || *irq == 2 || *irq > 15)
+ return -EIO;
+ rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
+ if (lpc & (1 << *irq))
+ return -EIO;
+
+ /* All chosen and checked - go for it */
+ if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
+ return -EIO;
+ if (enable) {
+ zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
+ wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq);
+
+struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
+{
+ struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip;
+ struct cs5535_mfgpt_timer *timer = NULL;
+ unsigned long flags;
+ int max;
+
+ if (!mfgpt->initialized)
+ goto done;
+
+ /* only allocate timers from the working domain if requested */
+ if (domain == MFGPT_DOMAIN_WORKING)
+ max = 6;
+ else
+ max = MFGPT_MAX_TIMERS;
+
+ if (timer_nr >= max) {
+ /* programmer error. silly programmers! */
+ WARN_ON(1);
+ goto done;
+ }
+
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ if (timer_nr < 0) {
+ unsigned long t;
+
+ /* try to find any available timer */
+ t = find_first_bit(mfgpt->avail, max);
+ /* set timer_nr to -1 if no timers available */
+ timer_nr = t < max ? (int) t : -1;
+ } else {
+ /* check if the requested timer's available */
+ if (test_bit(timer_nr, mfgpt->avail))
+ timer_nr = -1;
+ }
+
+ if (timer_nr >= 0)
+ /* if timer_nr is not -1, it's an available timer */
+ __clear_bit(timer_nr, mfgpt->avail);
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+
+ if (timer_nr < 0)
+ goto done;
+
+ timer = kmalloc(sizeof(*timer), GFP_KERNEL);
+ if (!timer) {
+ /* aw hell */
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ __set_bit(timer_nr, mfgpt->avail);
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+ goto done;
+ }
+ timer->chip = mfgpt;
+ timer->nr = timer_nr;
+ dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr);
+
+done:
+ return timer;
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer);
+
+/*
+ * XXX: This frees the timer memory, but never resets the actual hardware
+ * timer. The old geode_mfgpt code did this; it would be good to figure
+ * out a way to actually release the hardware timer. See comments below.
+ */
+void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer)
+{
+ kfree(timer);
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer);
+
+uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg)
+{
+ return inw(timer->chip->base + reg + (timer->nr * 8));
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_read);
+
+void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value)
+{
+ outw(value, timer->chip->base + reg + (timer->nr * 8));
+}
+EXPORT_SYMBOL_GPL(cs5535_mfgpt_write);
+
+/*
+ * This is a sledgehammer that resets all MFGPT timers. This is required by
+ * some broken BIOSes which leave the system in an unstable state
+ * (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to
+ * whether or not this secret MSR can be used to release individual timers.
+ * Jordan tells me that he and Mitch once played w/ it, but it's unclear
+ * what the results of that were (and they experienced some instability).
+ */
+static void __init reset_all_timers(void)
+{
+ uint32_t val, dummy;
+
+ /* The following undocumented bit resets the MFGPT timers */
+ val = 0xFF; dummy = 0;
+ wrmsr(MSR_MFGPT_SETUP, val, dummy);
+}
+
+/*
+ * Check whether any MFGPTs are available for the kernel to use. In most
+ * cases, firmware that uses AMD's VSA code will claim all timers during
+ * bootup; we certainly don't want to take them if they're already in use.
+ * In other cases (such as with VSAless OpenFirmware), the system firmware
+ * leaves timers available for us to use.
+ */
+static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+{
+ struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
+ unsigned long flags;
+ int timers = 0;
+ uint16_t val;
+ int i;
+
+ /* bios workaround */
+ if (mfgpt_reset_timers)
+ reset_all_timers();
+
+ /* just to be safe, protect this section w/ lock */
+ spin_lock_irqsave(&mfgpt->lock, flags);
+ for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
+ timer.nr = i;
+ val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP);
+ if (!(val & MFGPT_SETUP_SETUP)) {
+ __set_bit(i, mfgpt->avail);
+ timers++;
+ }
+ }
+ spin_unlock_irqrestore(&mfgpt->lock, flags);
+
+ return timers;
+}
+
+static int __init cs5535_mfgpt_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ int err, t;
+
+ /* There are two ways to get the MFGPT base address; one is by
+ * fetching it from MSR_LBAR_MFGPT, the other is by reading the
+ * PCI BAR info. The latter method is easier (especially across
+ * different architectures), so we'll stick with that for now. If
+ * it turns out to be unreliable in the face of crappy BIOSes, we
+ * can always go back to using MSRs.. */
+
+ err = pci_enable_device_io(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "can't enable device IO\n");
+ goto done;
+ }
+
+ err = pci_request_region(pdev, MFGPT_BAR, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", MFGPT_BAR);
+ goto done;
+ }
+
+ /* set up the driver-specific struct */
+ cs5535_mfgpt_chip.base = pci_resource_start(pdev, MFGPT_BAR);
+ cs5535_mfgpt_chip.pdev = pdev;
+ spin_lock_init(&cs5535_mfgpt_chip.lock);
+
+ dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", MFGPT_BAR,
+ (unsigned long long) cs5535_mfgpt_chip.base);
+
+ /* detect the available timers */
+ t = scan_timers(&cs5535_mfgpt_chip);
+ dev_info(&pdev->dev, DRV_NAME ": %d MFGPT timers available\n", t);
+ cs5535_mfgpt_chip.initialized = 1;
+ return 0;
+
+done:
+ return err;
+}
+
+static struct pci_device_id cs5535_mfgpt_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, cs5535_mfgpt_pci_tbl);
+
+/*
+ * Just like with the cs5535-gpio driver, we can't use the standard PCI driver
+ * registration stuff. It only allows only one driver to bind to each PCI
+ * device, and we want the GPIO and MFGPT drivers to be able to share a PCI
+ * device. Instead, we manually scan for the PCI device, request a single
+ * region, and keep track of the devices that we're using.
+ */
+
+static int __init cs5535_mfgpt_scan_pci(void)
+{
+ struct pci_dev *pdev;
+ int err = -ENODEV;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs5535_mfgpt_pci_tbl); i++) {
+ pdev = pci_get_device(cs5535_mfgpt_pci_tbl[i].vendor,
+ cs5535_mfgpt_pci_tbl[i].device, NULL);
+ if (pdev) {
+ err = cs5535_mfgpt_probe(pdev,
+ &cs5535_mfgpt_pci_tbl[i]);
+ if (err)
+ pci_dev_put(pdev);
+
+ /* we only support a single CS5535/6 southbridge */
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int __init cs5535_mfgpt_init(void)
+{
+ return cs5535_mfgpt_scan_pci();
+}
+
+module_init(cs5535_mfgpt_init);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@collabora.co.uk>");
+MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2c27193aeaa..f939ebc2507 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -32,9 +32,6 @@
static const unsigned short normal_i2c[] = { 0x50, 0x51, 0x52, 0x53, 0x54,
0x55, 0x56, 0x57, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(eeprom);
-
/* Size of EEPROM in bytes */
#define EEPROM_SIZE 256
@@ -135,8 +132,7 @@ static struct bin_attribute eeprom_attr = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int eeprom_detect(struct i2c_client *client, int kind,
- struct i2c_board_info *info)
+static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
@@ -233,7 +229,7 @@ static struct i2c_driver eeprom_driver = {
.class = I2C_CLASS_DDC | I2C_CLASS_SPD,
.detect = eeprom_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static int __init eeprom_init(void)
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index e9eae4a7840..1eac626e710 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
[ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
[ENCLOSURE_STATUS_UNKNOWN] = "unknown",
[ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
+ [ENCLOSURE_STATUS_MAX] = NULL,
};
static const char *const enclosure_type [] = {
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 38576050776..247eb386a97 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -44,9 +44,20 @@ struct ilo_hwinfo {
struct pci_dev *ilo_dev;
+ /*
+ * open_lock serializes ccb_cnt during open and close
+ * [ irq disabled ]
+ * -> alloc_lock used when adding/removing/searching ccb_alloc,
+ * which represents all ccbs open on the device
+ * --> fifo_lock controls access to fifo queues shared with hw
+ *
+ * Locks must be taken in this order, but open_lock and alloc_lock
+ * are optional, they do not need to be held in order to take a
+ * lower level lock.
+ */
+ spinlock_t open_lock;
spinlock_t alloc_lock;
spinlock_t fifo_lock;
- spinlock_t open_lock;
struct cdev cdev;
};
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 4bb7a3af9ad..395a4ea64e9 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -30,9 +30,6 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
-/* Insmod parameters */
-I2C_CLIENT_INSMOD_1(ics932s401);
-
/* ICS932S401 registers */
#define ICS932S401_REG_CFG2 0x01
#define ICS932S401_CFG1_SPREAD 0x01
@@ -106,12 +103,12 @@ struct ics932s401_data {
static int ics932s401_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int ics932s401_detect(struct i2c_client *client, int kind,
+static int ics932s401_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int ics932s401_remove(struct i2c_client *client);
static const struct i2c_device_id ics932s401_id[] = {
- { "ics932s401", ics932s401 },
+ { "ics932s401", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ics932s401_id);
@@ -125,7 +122,7 @@ static struct i2c_driver ics932s401_driver = {
.remove = ics932s401_remove,
.id_table = ics932s401_id,
.detect = ics932s401_detect,
- .address_data = &addr_data,
+ .address_list = normal_i2c,
};
static struct ics932s401_data *ics932s401_update_device(struct device *dev)
@@ -413,7 +410,7 @@ static ssize_t show_spread(struct device *dev,
}
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int ics932s401_detect(struct i2c_client *client, int kind,
+static int ics932s401_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 60b0b1a4fb3..09dcb699e66 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -138,7 +138,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is)
* even though the following code utilizes external interrupt registers
* to perform the speed calculation.
*/
-static void
+static void __devinit
ioc4_clock_calibrate(struct ioc4_driver_data *idd)
{
union ioc4_int_out int_out;
@@ -230,7 +230,7 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
* on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
* If neither is present, it's a PCI-RT.
*/
-static unsigned int
+static unsigned int __devinit
ioc4_variant(struct ioc4_driver_data *idd)
{
struct pci_dev *pdev = NULL;
@@ -269,7 +269,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
return IOC4_VARIANT_PCI_RT;
}
-static void
+static void __devinit
ioc4_load_modules(struct work_struct *work)
{
/* arg just has to be freed */
@@ -280,7 +280,7 @@ ioc4_load_modules(struct work_struct *work)
}
/* Adds a new instance of an IOC4 card */
-static int
+static int __devinit
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc4_driver_data *idd;
@@ -425,7 +425,7 @@ out:
}
/* Removes a particular instance of an IOC4 card. */
-static void
+static void __devexit
ioc4_remove(struct pci_dev *pdev)
{
struct ioc4_submodule *is;
@@ -476,7 +476,7 @@ static struct pci_driver ioc4_driver = {
.name = "IOC4",
.id_table = ioc4_id_table,
.probe = ioc4_probe,
- .remove = ioc4_remove,
+ .remove = __devexit_p(ioc4_remove),
};
MODULE_DEVICE_TABLE(pci, ioc4_id_table);
@@ -486,14 +486,14 @@ MODULE_DEVICE_TABLE(pci, ioc4_id_table);
*********************/
/* Module load */
-static int __devinit
+static int __init
ioc4_init(void)
{
return pci_register_driver(&ioc4_driver);
}
/* Module unload */
-static void __devexit
+static void __exit
ioc4_exit(void)
{
/* Ensure ioc4_load_modules() has completed before exiting */
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index e4ff50b95a5..fcb6ec1af17 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -712,6 +712,12 @@ static int run_simple_test(int is_get_char, int chr)
/* End of packet == #XX so look for the '#' */
if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') {
+ if (put_buf_cnt >= BUFMAX) {
+ eprintk("kgdbts: ERROR: put buffer overflow on"
+ " '%s' line %i\n", ts.name, ts.idx);
+ put_buf_cnt = 0;
+ return 0;
+ }
put_buf[put_buf_cnt] = '\0';
v2printk("put%i: %s\n", ts.idx, put_buf);
/* Trigger check here */
@@ -885,16 +891,16 @@ static void kgdbts_run_tests(void)
int nmi_sleep = 0;
int i;
- ptr = strstr(config, "F");
+ ptr = strchr(config, 'F');
if (ptr)
fork_test = simple_strtol(ptr + 1, NULL, 10);
- ptr = strstr(config, "S");
+ ptr = strchr(config, 'S');
if (ptr)
do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
- ptr = strstr(config, "N");
+ ptr = strchr(config, 'N');
if (ptr)
nmi_sleep = simple_strtol(ptr+1, NULL, 10);
- ptr = strstr(config, "I");
+ ptr = strchr(config, 'I');
if (ptr)
sstep_test = simple_strtol(ptr+1, NULL, 10);
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h
index f93f03a9e6e..3ad76cd18b4 100644
--- a/drivers/misc/sgi-gru/gru.h
+++ b/drivers/misc/sgi-gru/gru.h
@@ -53,6 +53,17 @@ struct gru_chiplet_info {
int free_user_cbr;
};
+/*
+ * Statictics kept for each context.
+ */
+struct gru_gseg_statistics {
+ unsigned long fmm_tlbmiss;
+ unsigned long upm_tlbmiss;
+ unsigned long tlbdropin;
+ unsigned long context_stolen;
+ unsigned long reserved[10];
+};
+
/* Flags for GRU options on the gru_create_context() call */
/* Select one of the follow 4 options to specify how TLB misses are handled */
#define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h
index 3c9c06618e6..d95587cc794 100644
--- a/drivers/misc/sgi-gru/gru_instructions.h
+++ b/drivers/misc/sgi-gru/gru_instructions.h
@@ -34,17 +34,17 @@ extern void gru_wait_abort_proc(void *cb);
#include <asm/intrinsics.h>
#define __flush_cache(p) ia64_fc((unsigned long)p)
/* Use volatile on IA64 to ensure ordering via st4.rel */
-#define gru_ordered_store_int(p, v) \
+#define gru_ordered_store_ulong(p, v) \
do { \
barrier(); \
- *((volatile int *)(p)) = v; /* force st.rel */ \
+ *((volatile unsigned long *)(p)) = v; /* force st.rel */ \
} while (0)
#elif defined(CONFIG_X86_64)
#define __flush_cache(p) clflush(p)
-#define gru_ordered_store_int(p, v) \
+#define gru_ordered_store_ulong(p, v) \
do { \
barrier(); \
- *(int *)p = v; \
+ *(unsigned long *)p = v; \
} while (0)
#else
#error "Unsupported architecture"
@@ -129,8 +129,13 @@ struct gru_instruction_bits {
*/
struct gru_instruction {
/* DW 0 */
- unsigned int op32; /* icmd,xtype,iaa0,ima,opc */
- unsigned int tri0;
+ union {
+ unsigned long op64; /* icmd,xtype,iaa0,ima,opc,tri0 */
+ struct {
+ unsigned int op32;
+ unsigned int tri0;
+ };
+ };
unsigned long tri1_bufsize; /* DW 1 */
unsigned long baddr0; /* DW 2 */
unsigned long nelem; /* DW 3 */
@@ -140,7 +145,7 @@ struct gru_instruction {
unsigned long avalue; /* DW 7 */
};
-/* Some shifts and masks for the low 32 bits of a GRU command */
+/* Some shifts and masks for the low 64 bits of a GRU command */
#define GRU_CB_ICMD_SHFT 0
#define GRU_CB_ICMD_MASK 0x1
#define GRU_CB_XTYPE_SHFT 8
@@ -155,6 +160,10 @@ struct gru_instruction {
#define GRU_CB_OPC_MASK 0xff
#define GRU_CB_EXOPC_SHFT 24
#define GRU_CB_EXOPC_MASK 0xff
+#define GRU_IDEF2_SHFT 32
+#define GRU_IDEF2_MASK 0x3ffff
+#define GRU_ISTATUS_SHFT 56
+#define GRU_ISTATUS_MASK 0x3
/* GRU instruction opcodes (opc field) */
#define OP_NOP 0x00
@@ -256,6 +265,7 @@ struct gru_instruction {
#define CBE_CAUSE_PROTOCOL_STATE_DATA_ERROR (1 << 16)
#define CBE_CAUSE_RA_RESPONSE_DATA_ERROR (1 << 17)
#define CBE_CAUSE_HA_RESPONSE_DATA_ERROR (1 << 18)
+#define CBE_CAUSE_FORCED_ERROR (1 << 19)
/* CBE cbrexecstatus bits */
#define CBR_EXS_ABORT_OCC_BIT 0
@@ -264,13 +274,15 @@ struct gru_instruction {
#define CBR_EXS_QUEUED_BIT 3
#define CBR_EXS_TLB_INVAL_BIT 4
#define CBR_EXS_EXCEPTION_BIT 5
+#define CBR_EXS_CB_INT_PENDING_BIT 6
#define CBR_EXS_ABORT_OCC (1 << CBR_EXS_ABORT_OCC_BIT)
#define CBR_EXS_INT_OCC (1 << CBR_EXS_INT_OCC_BIT)
#define CBR_EXS_PENDING (1 << CBR_EXS_PENDING_BIT)
#define CBR_EXS_QUEUED (1 << CBR_EXS_QUEUED_BIT)
-#define CBR_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
+#define CBR_EXS_TLB_INVAL (1 << CBR_EXS_TLB_INVAL_BIT)
#define CBR_EXS_EXCEPTION (1 << CBR_EXS_EXCEPTION_BIT)
+#define CBR_EXS_CB_INT_PENDING (1 << CBR_EXS_CB_INT_PENDING_BIT)
/*
* Exceptions are retried for the following cases. If any OTHER bits are set
@@ -296,12 +308,14 @@ union gru_mesqhead {
/* Generate the low word of a GRU instruction */
-static inline unsigned int
-__opword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
+static inline unsigned long
+__opdword(unsigned char opcode, unsigned char exopc, unsigned char xtype,
unsigned char iaa0, unsigned char iaa1,
- unsigned char ima)
+ unsigned long idef2, unsigned char ima)
{
return (1 << GRU_CB_ICMD_SHFT) |
+ ((unsigned long)CBS_ACTIVE << GRU_ISTATUS_SHFT) |
+ (idef2<< GRU_IDEF2_SHFT) |
(iaa0 << GRU_CB_IAA0_SHFT) |
(iaa1 << GRU_CB_IAA1_SHFT) |
(ima << GRU_CB_IMA_SHFT) |
@@ -319,12 +333,13 @@ static inline void gru_flush_cache(void *p)
}
/*
- * Store the lower 32 bits of the command including the "start" bit. Then
+ * Store the lower 64 bits of the command including the "start" bit. Then
* start the instruction executing.
*/
-static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
+static inline void gru_start_instruction(struct gru_instruction *ins, unsigned long op64)
{
- gru_ordered_store_int(ins, op32);
+ gru_ordered_store_ulong(ins, op64);
+ mb();
gru_flush_cache(ins);
}
@@ -340,6 +355,30 @@ static inline void gru_start_instruction(struct gru_instruction *ins, int op32)
* - nelem and stride are in elements
* - tri0/tri1 is in bytes for the beginning of the data segment.
*/
+static inline void gru_vload_phys(void *cb, unsigned long gpa,
+ unsigned int tri0, int iaa, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
+ ins->nelem = 1;
+ ins->op1_stride = 1;
+ gru_start_instruction(ins, __opdword(OP_VLOAD, 0, XTYPE_DW, iaa, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
+static inline void gru_vstore_phys(void *cb, unsigned long gpa,
+ unsigned int tri0, int iaa, unsigned long hints)
+{
+ struct gru_instruction *ins = (struct gru_instruction *)cb;
+
+ ins->baddr0 = (long)gpa | ((unsigned long)iaa << 62);
+ ins->nelem = 1;
+ ins->op1_stride = 1;
+ gru_start_instruction(ins, __opdword(OP_VSTORE, 0, XTYPE_DW, iaa, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
+}
+
static inline void gru_vload(void *cb, unsigned long mem_addr,
unsigned int tri0, unsigned char xtype, unsigned long nelem,
unsigned long stride, unsigned long hints)
@@ -348,10 +387,9 @@ static inline void gru_vload(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VLOAD, 0, xtype, IAA_RAM, 0,
+ (unsigned long)tri0, CB_IMA(hints)));
}
static inline void gru_vstore(void *cb, unsigned long mem_addr,
@@ -362,10 +400,9 @@ static inline void gru_vstore(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VSTORE, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_ivload(void *cb, unsigned long mem_addr,
@@ -376,10 +413,9 @@ static inline void gru_ivload(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVLOAD, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_ivstore(void *cb, unsigned long mem_addr,
@@ -390,10 +426,9 @@ static inline void gru_ivstore(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVSTORE, 0, xtype, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline void gru_vset(void *cb, unsigned long mem_addr,
@@ -406,8 +441,8 @@ static inline void gru_vset(void *cb, unsigned long mem_addr,
ins->op2_value_baddr1 = value;
ins->nelem = nelem;
ins->op1_stride = stride;
- gru_start_instruction(ins, __opword(OP_VSET, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VSET, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_ivset(void *cb, unsigned long mem_addr,
@@ -420,8 +455,8 @@ static inline void gru_ivset(void *cb, unsigned long mem_addr,
ins->op2_value_baddr1 = value;
ins->nelem = nelem;
ins->tri1_bufsize = tri1;
- gru_start_instruction(ins, __opword(OP_IVSET, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_IVSET, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_vflush(void *cb, unsigned long mem_addr,
@@ -433,15 +468,15 @@ static inline void gru_vflush(void *cb, unsigned long mem_addr,
ins->baddr0 = (long)mem_addr;
ins->op1_stride = stride;
ins->nelem = nelem;
- gru_start_instruction(ins, __opword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_VFLUSH, 0, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_nop(void *cb, int hints)
{
struct gru_instruction *ins = (void *)cb;
- gru_start_instruction(ins, __opword(OP_NOP, 0, 0, 0, 0, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_NOP, 0, 0, 0, 0, 0, CB_IMA(hints)));
}
@@ -455,10 +490,9 @@ static inline void gru_bcopy(void *cb, const unsigned long src,
ins->baddr0 = (long)src;
ins->op2_value_baddr1 = (long)dest;
ins->nelem = nelem;
- ins->tri0 = tri0;
ins->tri1_bufsize = bufsize;
- gru_start_instruction(ins, __opword(OP_BCOPY, 0, xtype, IAA_RAM,
- IAA_RAM, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_BCOPY, 0, xtype, IAA_RAM,
+ IAA_RAM, tri0, CB_IMA(hints)));
}
static inline void gru_bstore(void *cb, const unsigned long src,
@@ -470,9 +504,8 @@ static inline void gru_bstore(void *cb, const unsigned long src,
ins->baddr0 = (long)src;
ins->op2_value_baddr1 = (long)dest;
ins->nelem = nelem;
- ins->tri0 = tri0;
- gru_start_instruction(ins, __opword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_BSTORE, 0, xtype, 0, IAA_RAM,
+ tri0, CB_IMA(hints)));
}
static inline void gru_gamir(void *cb, int exopc, unsigned long src,
@@ -481,8 +514,8 @@ static inline void gru_gamir(void *cb, int exopc, unsigned long src,
struct gru_instruction *ins = (void *)cb;
ins->baddr0 = (long)src;
- gru_start_instruction(ins, __opword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMIR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
@@ -491,8 +524,8 @@ static inline void gru_gamirr(void *cb, int exopc, unsigned long src,
struct gru_instruction *ins = (void *)cb;
ins->baddr0 = (long)src;
- gru_start_instruction(ins, __opword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMIRR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamer(void *cb, int exopc, unsigned long src,
@@ -505,8 +538,8 @@ static inline void gru_gamer(void *cb, int exopc, unsigned long src,
ins->baddr0 = (long)src;
ins->op1_stride = operand1;
ins->op2_value_baddr1 = operand2;
- gru_start_instruction(ins, __opword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMER, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
@@ -518,8 +551,8 @@ static inline void gru_gamerr(void *cb, int exopc, unsigned long src,
ins->baddr0 = (long)src;
ins->op1_stride = operand1;
ins->op2_value_baddr1 = operand2;
- gru_start_instruction(ins, __opword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMERR, exopc, xtype, IAA_RAM, 0,
+ 0, CB_IMA(hints)));
}
static inline void gru_gamxr(void *cb, unsigned long src,
@@ -529,8 +562,8 @@ static inline void gru_gamxr(void *cb, unsigned long src,
ins->baddr0 = (long)src;
ins->nelem = 4;
- gru_start_instruction(ins, __opword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
- IAA_RAM, 0, CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_GAMXR, EOP_XR_CSWAP, XTYPE_DW,
+ IAA_RAM, 0, 0, CB_IMA(hints)));
}
static inline void gru_mesq(void *cb, unsigned long queue,
@@ -541,9 +574,8 @@ static inline void gru_mesq(void *cb, unsigned long queue,
ins->baddr0 = (long)queue;
ins->nelem = nelem;
- ins->tri0 = tri0;
- gru_start_instruction(ins, __opword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
- CB_IMA(hints)));
+ gru_start_instruction(ins, __opdword(OP_MESQ, 0, XTYPE_CL, IAA_RAM, 0,
+ tri0, CB_IMA(hints)));
}
static inline unsigned long gru_get_amo_value(void *cb)
@@ -662,6 +694,14 @@ static inline void gru_wait_abort(void *cb)
gru_wait_abort_proc(cb);
}
+/*
+ * Get a pointer to the start of a gseg
+ * p - Any valid pointer within the gseg
+ */
+static inline void *gru_get_gseg_pointer (void *p)
+{
+ return (void *)((unsigned long)p & ~(GRU_GSEG_PAGESIZE - 1));
+}
/*
* Get a pointer to a control block
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 679e0177828..38657cdaf54 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -40,6 +40,12 @@
#include "gru_instructions.h"
#include <asm/uv/uv_hub.h>
+/* Return codes for vtop functions */
+#define VTOP_SUCCESS 0
+#define VTOP_INVALID -1
+#define VTOP_RETRY -2
+
+
/*
* Test if a physical address is a valid GRU GSEG address
*/
@@ -90,19 +96,22 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct gru_thread_state *gts = NULL;
+ struct gru_thread_state *gts = ERR_PTR(-EINVAL);
down_write(&mm->mmap_sem);
vma = gru_find_vma(vaddr);
- if (vma)
- gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
- if (gts) {
- mutex_lock(&gts->ts_ctxlock);
- downgrade_write(&mm->mmap_sem);
- } else {
- up_write(&mm->mmap_sem);
- }
+ if (!vma)
+ goto err;
+ gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
+ if (IS_ERR(gts))
+ goto err;
+ mutex_lock(&gts->ts_ctxlock);
+ downgrade_write(&mm->mmap_sem);
+ return gts;
+
+err:
+ up_write(&mm->mmap_sem);
return gts;
}
@@ -122,39 +131,15 @@ static void gru_unlock_gts(struct gru_thread_state *gts)
* is necessary to prevent the user from seeing a stale cb.istatus that will
* change as soon as the TFH restart is complete. Races may cause an
* occasional failure to clear the cb.istatus, but that is ok.
- *
- * If the cb address is not valid (should not happen, but...), nothing
- * bad will happen.. The get_user()/put_user() will fail but there
- * are no bad side-effects.
*/
-static void gru_cb_set_istatus_active(unsigned long __user *cb)
+static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
{
- union {
- struct gru_instruction_bits bits;
- unsigned long dw;
- } u;
-
- if (cb) {
- get_user(u.dw, cb);
- u.bits.istatus = CBS_ACTIVE;
- put_user(u.dw, cb);
+ if (cbk) {
+ cbk->istatus = CBS_ACTIVE;
}
}
/*
- * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
- * interrupt. Interrupts are always sent to a cpu on the blade that contains the
- * GRU (except for headless blades which are not currently supported). A blade
- * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
- * number uniquely identifies the GRU chiplet on the local blade that caused the
- * interrupt. Always called in interrupt context.
- */
-static inline struct gru_state *irq_to_gru(int irq)
-{
- return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
-}
-
-/*
* Read & clear a TFM
*
* The GRU has an array of fault maps. A map is private to a cpu
@@ -207,10 +192,11 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
{
struct page *page;
- /* ZZZ Need to handle HUGE pages */
- if (is_vm_hugetlb_page(vma))
- return -EFAULT;
+#ifdef CONFIG_HUGETLB_PAGE
+ *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
+#else
*pageshift = PAGE_SHIFT;
+#endif
if (get_user_pages
(current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
return -EFAULT;
@@ -268,7 +254,6 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
return 0;
err:
- local_irq_enable();
return 1;
}
@@ -301,14 +286,69 @@ static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
paddr = paddr & ~((1UL << ps) - 1);
*gpa = uv_soc_phys_ram_to_gpa(paddr);
*pageshift = ps;
- return 0;
+ return VTOP_SUCCESS;
inval:
- return -1;
+ return VTOP_INVALID;
upm:
- return -2;
+ return VTOP_RETRY;
+}
+
+
+/*
+ * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
+ * CBE cacheline so that the line will be written back to home agent.
+ * Otherwise the line may be silently dropped. This has no impact
+ * except on performance.
+ */
+static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
+{
+ if (unlikely(cbe)) {
+ cbe->cbrexecstatus = 0; /* make CL dirty */
+ gru_flush_cache(cbe);
+ }
}
+/*
+ * Preload the TLB with entries that may be required. Currently, preloading
+ * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
+ * the end of the bcopy tranfer, whichever is smaller.
+ */
+static void gru_preload_tlb(struct gru_state *gru,
+ struct gru_thread_state *gts, int atomic,
+ unsigned long fault_vaddr, int asid, int write,
+ unsigned char tlb_preload_count,
+ struct gru_tlb_fault_handle *tfh,
+ struct gru_control_block_extended *cbe)
+{
+ unsigned long vaddr = 0, gpa;
+ int ret, pageshift;
+
+ if (cbe->opccpy != OP_BCOPY)
+ return;
+
+ if (fault_vaddr == cbe->cbe_baddr0)
+ vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
+ else if (fault_vaddr == cbe->cbe_baddr1)
+ vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
+
+ fault_vaddr &= PAGE_MASK;
+ vaddr &= PAGE_MASK;
+ vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
+
+ while (vaddr > fault_vaddr) {
+ ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
+ if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
+ GRU_PAGESIZE(pageshift)))
+ return;
+ gru_dbg(grudev,
+ "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
+ atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
+ vaddr, asid, write, pageshift, gpa);
+ vaddr -= PAGE_SIZE;
+ STAT(tlb_preload_page);
+ }
+}
/*
* Drop a TLB entry into the GRU. The fault is described by info in an TFH.
@@ -320,11 +360,14 @@ upm:
* < 0 = error code
*
*/
-static int gru_try_dropin(struct gru_thread_state *gts,
+static int gru_try_dropin(struct gru_state *gru,
+ struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
- unsigned long __user *cb)
+ struct gru_instruction_bits *cbk)
{
- int pageshift = 0, asid, write, ret, atomic = !cb;
+ struct gru_control_block_extended *cbe = NULL;
+ unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
+ int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
unsigned long gpa = 0, vaddr = 0;
/*
@@ -335,24 +378,34 @@ static int gru_try_dropin(struct gru_thread_state *gts,
*/
/*
+ * Prefetch the CBE if doing TLB preloading
+ */
+ if (unlikely(tlb_preload_count)) {
+ cbe = gru_tfh_to_cbe(tfh);
+ prefetchw(cbe);
+ }
+
+ /*
* Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
* Might be a hardware race OR a stupid user. Ignore FMM because FMM
* is a transient state.
*/
if (tfh->status != TFHSTATUS_EXCEPTION) {
gru_flush_cache(tfh);
+ sync_core();
if (tfh->status != TFHSTATUS_EXCEPTION)
goto failnoexception;
STAT(tfh_stale_on_fault);
}
if (tfh->state == TFHSTATE_IDLE)
goto failidle;
- if (tfh->state == TFHSTATE_MISS_FMM && cb)
+ if (tfh->state == TFHSTATE_MISS_FMM && cbk)
goto failfmm;
write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
vaddr = tfh->missvaddr;
asid = tfh->missasid;
+ indexway = tfh->indexway;
if (asid == 0)
goto failnoasid;
@@ -366,41 +419,51 @@ static int gru_try_dropin(struct gru_thread_state *gts,
goto failactive;
ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
- if (ret == -1)
+ if (ret == VTOP_INVALID)
goto failinval;
- if (ret == -2)
+ if (ret == VTOP_RETRY)
goto failupm;
if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
- if (atomic || !gru_update_cch(gts, 0)) {
+ if (atomic || !gru_update_cch(gts)) {
gts->ts_force_cch_reload = 1;
goto failupm;
}
}
- gru_cb_set_istatus_active(cb);
+
+ if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
+ gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
+ gru_flush_cache_cbe(cbe);
+ }
+
+ gru_cb_set_istatus_active(cbk);
+ gts->ustats.tlbdropin++;
tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
GRU_PAGESIZE(pageshift));
- STAT(tlb_dropin);
gru_dbg(grudev,
- "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
- ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
- pageshift, gpa);
+ "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
+ " rw %d, ps %d, gpa 0x%lx\n",
+ atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
+ indexway, write, pageshift, gpa);
+ STAT(tlb_dropin);
return 0;
failnoasid:
/* No asid (delayed unload). */
STAT(tlb_dropin_fail_no_asid);
gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
- if (!cb)
+ if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
return -EAGAIN;
failupm:
/* Atomic failure switch CBR to UPM */
tfh_user_polling_mode(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_upm);
gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return 1;
@@ -408,6 +471,7 @@ failupm:
failfmm:
/* FMM state on UPM call */
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_fmm);
gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
@@ -415,17 +479,20 @@ failfmm:
failnoexception:
/* TFH status did not show exception pending */
gru_flush_cache(tfh);
- if (cb)
- gru_flush_cache(cb);
+ gru_flush_cache_cbe(cbe);
+ if (cbk)
+ gru_flush_cache(cbk);
STAT(tlb_dropin_fail_no_exception);
- gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n", tfh, tfh->status, tfh->state);
+ gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
+ tfh, tfh->status, tfh->state);
return 0;
failidle:
/* TFH state was idle - no miss pending */
gru_flush_cache(tfh);
- if (cb)
- gru_flush_cache(cb);
+ gru_flush_cache_cbe(cbe);
+ if (cbk)
+ gru_flush_cache(cbk);
STAT(tlb_dropin_fail_idle);
gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
return 0;
@@ -433,16 +500,18 @@ failidle:
failinval:
/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
tfh_exception(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_invalid);
gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
return -EFAULT;
failactive:
/* Range invalidate active. Switch to UPM iff atomic */
- if (!cb)
+ if (!cbk)
tfh_user_polling_mode(tfh);
else
gru_flush_cache(tfh);
+ gru_flush_cache_cbe(cbe);
STAT(tlb_dropin_fail_range_active);
gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
tfh, vaddr);
@@ -455,31 +524,41 @@ failactive:
* Note that this is the interrupt handler that is registered with linux
* interrupt handlers.
*/
-irqreturn_t gru_intr(int irq, void *dev_id)
+static irqreturn_t gru_intr(int chiplet, int blade)
{
struct gru_state *gru;
struct gru_tlb_fault_map imap, dmap;
struct gru_thread_state *gts;
struct gru_tlb_fault_handle *tfh = NULL;
+ struct completion *cmp;
int cbrnum, ctxnum;
STAT(intr);
- gru = irq_to_gru(irq);
+ gru = &gru_base[blade]->bs_grus[chiplet];
if (!gru) {
- dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
- raw_smp_processor_id(), irq);
+ dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
+ raw_smp_processor_id(), chiplet);
return IRQ_NONE;
}
get_clear_fault_map(gru, &imap, &dmap);
+ gru_dbg(grudev,
+ "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
+ smp_processor_id(), chiplet, gru->gs_gid,
+ imap.fault_bits[0], imap.fault_bits[1],
+ dmap.fault_bits[0], dmap.fault_bits[1]);
for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
- complete(gru->gs_blade->bs_async_wq);
+ STAT(intr_cbr);
+ cmp = gru->gs_blade->bs_async_wq;
+ if (cmp)
+ complete(cmp);
gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
- gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
+ gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
}
for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
+ STAT(intr_tfh);
tfh = get_tfh_by_index(gru, cbrnum);
prefetchw(tfh); /* Helps on hdw, required for emulator */
@@ -492,14 +571,20 @@ irqreturn_t gru_intr(int irq, void *dev_id)
ctxnum = tfh->ctxnum;
gts = gru->gs_gts[ctxnum];
+ /* Spurious interrupts can cause this. Ignore. */
+ if (!gts) {
+ STAT(intr_spurious);
+ continue;
+ }
+
/*
* This is running in interrupt context. Trylock the mmap_sem.
* If it fails, retry the fault in user context.
*/
+ gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
down_read_trylock(&gts->ts_mm->mmap_sem)) {
- gts->ustats.fmm_tlbdropin++;
- gru_try_dropin(gts, tfh, NULL);
+ gru_try_dropin(gru, gts, tfh, NULL);
up_read(&gts->ts_mm->mmap_sem);
} else {
tfh_user_polling_mode(tfh);
@@ -509,20 +594,43 @@ irqreturn_t gru_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t gru0_intr(int irq, void *dev_id)
+{
+ return gru_intr(0, uv_numa_blade_id());
+}
+
+irqreturn_t gru1_intr(int irq, void *dev_id)
+{
+ return gru_intr(1, uv_numa_blade_id());
+}
+
+irqreturn_t gru_intr_mblade(int irq, void *dev_id)
+{
+ int blade;
+
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ gru_intr(0, blade);
+ gru_intr(1, blade);
+ }
+ return IRQ_HANDLED;
+}
+
static int gru_user_dropin(struct gru_thread_state *gts,
struct gru_tlb_fault_handle *tfh,
- unsigned long __user *cb)
+ void *cb)
{
struct gru_mm_struct *gms = gts->ts_gms;
int ret;
- gts->ustats.upm_tlbdropin++;
+ gts->ustats.upm_tlbmiss++;
while (1) {
wait_event(gms->ms_wait_queue,
atomic_read(&gms->ms_range_active) == 0);
prefetchw(tfh); /* Helps on hdw, required for emulator */
- ret = gru_try_dropin(gts, tfh, cb);
+ ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
if (ret <= 0)
return ret;
STAT(call_os_wait_queue);
@@ -538,52 +646,41 @@ int gru_handle_user_call_os(unsigned long cb)
{
struct gru_tlb_fault_handle *tfh;
struct gru_thread_state *gts;
- unsigned long __user *cbp;
+ void *cbk;
int ucbnum, cbrnum, ret = -EINVAL;
STAT(call_os);
- gru_dbg(grudev, "address 0x%lx\n", cb);
/* sanity check the cb pointer */
ucbnum = get_cb_number((void *)cb);
if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
return -EINVAL;
- cbp = (unsigned long *)cb;
gts = gru_find_lock_gts(cb);
if (!gts)
return -EINVAL;
+ gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
goto exit;
- /*
- * If force_unload is set, the UPM TLB fault is phony. The task
- * has migrated to another node and the GSEG must be moved. Just
- * unload the context. The task will page fault and assign a new
- * context.
- */
- if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
- gts->ts_blade != uv_numa_blade_id()) {
- STAT(call_os_offnode_reference);
- gts->ts_force_unload = 1;
- }
+ gru_check_context_placement(gts);
/*
* CCH may contain stale data if ts_force_cch_reload is set.
*/
if (gts->ts_gru && gts->ts_force_cch_reload) {
gts->ts_force_cch_reload = 0;
- gru_update_cch(gts, 0);
+ gru_update_cch(gts);
}
ret = -EAGAIN;
cbrnum = thread_cbr_number(gts, ucbnum);
- if (gts->ts_force_unload) {
- gru_unload_context(gts, 1);
- } else if (gts->ts_gru) {
+ if (gts->ts_gru) {
tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
- ret = gru_user_dropin(gts, tfh, cbp);
+ cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
+ gts->ts_ctxnum, ucbnum);
+ ret = gru_user_dropin(gts, tfh, cbk);
}
exit:
gru_unlock_gts(gts);
@@ -605,11 +702,11 @@ int gru_get_exception_detail(unsigned long arg)
if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
return -EFAULT;
- gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
gts = gru_find_lock_gts(excdet.cb);
if (!gts)
return -EINVAL;
+ gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
ucbnum = get_cb_number((void *)excdet.cb);
if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
ret = -EINVAL;
@@ -617,6 +714,7 @@ int gru_get_exception_detail(unsigned long arg)
cbrnum = thread_cbr_number(gts, ucbnum);
cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
+ sync_core(); /* make sure we are have current data */
excdet.opc = cbe->opccpy;
excdet.exopc = cbe->exopccpy;
excdet.ecause = cbe->ecause;
@@ -624,7 +722,7 @@ int gru_get_exception_detail(unsigned long arg)
excdet.exceptdet1 = cbe->idef3upd;
excdet.cbrstate = cbe->cbrstate;
excdet.cbrexecstatus = cbe->cbrexecstatus;
- gru_flush_cache(cbe);
+ gru_flush_cache_cbe(cbe);
ret = 0;
} else {
ret = -EAGAIN;
@@ -733,6 +831,11 @@ long gru_get_gseg_statistics(unsigned long arg)
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
+ /*
+ * The library creates arrays of contexts for threaded programs.
+ * If no gts exists in the array, the context has never been used & all
+ * statistics are implicitly 0.
+ */
gts = gru_find_lock_gts(req.gseg);
if (gts) {
memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
@@ -762,11 +865,25 @@ int gru_set_context_option(unsigned long arg)
return -EFAULT;
gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
- gts = gru_alloc_locked_gts(req.gseg);
- if (!gts)
- return -EINVAL;
+ gts = gru_find_lock_gts(req.gseg);
+ if (!gts) {
+ gts = gru_alloc_locked_gts(req.gseg);
+ if (IS_ERR(gts))
+ return PTR_ERR(gts);
+ }
switch (req.op) {
+ case sco_blade_chiplet:
+ /* Select blade/chiplet for GRU context */
+ if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
+ req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
+ ret = -EINVAL;
+ } else {
+ gts->ts_user_blade_id = req.val1;
+ gts->ts_user_chiplet_id = req.val0;
+ gru_check_context_placement(gts);
+ }
+ break;
case sco_gseg_owner:
/* Register the current task as the GSEG owner */
gts->ts_tgid_owner = current->tgid;
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index ce5eda985ab..cb3b4d22847 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -35,6 +35,9 @@
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_X86_64
+#include <asm/uv/uv_irq.h>
+#endif
#include <asm/uv/uv.h>
#include "gru.h"
#include "grulib.h"
@@ -130,7 +133,6 @@ static int gru_create_new_context(unsigned long arg)
struct gru_vma_data *vdata;
int ret = -EINVAL;
-
if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
return -EFAULT;
@@ -150,6 +152,7 @@ static int gru_create_new_context(unsigned long arg)
vdata->vd_dsr_au_count =
GRU_DS_BYTES_TO_AU(req.data_segment_bytes);
vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks);
+ vdata->vd_tlb_preload_count = req.tlb_preload_count;
ret = 0;
}
up_write(&current->mm->mmap_sem);
@@ -190,7 +193,7 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
{
int err = -EBADRQC;
- gru_dbg(grudev, "file %p\n", file);
+ gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg);
switch (req) {
case GRU_CREATE_CONTEXT:
@@ -232,23 +235,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
* system.
*/
static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
- void *vaddr, int nid, int bid, int grunum)
+ void *vaddr, int blade_id, int chiplet_id)
{
spin_lock_init(&gru->gs_lock);
spin_lock_init(&gru->gs_asid_lock);
gru->gs_gru_base_paddr = paddr;
gru->gs_gru_base_vaddr = vaddr;
- gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum;
- gru->gs_blade = gru_base[bid];
- gru->gs_blade_id = bid;
+ gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
+ gru->gs_blade = gru_base[blade_id];
+ gru->gs_blade_id = blade_id;
+ gru->gs_chiplet_id = chiplet_id;
gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
gru->gs_asid_limit = MAX_ASID;
gru_tgh_flush_init(gru);
if (gru->gs_gid >= gru_max_gids)
gru_max_gids = gru->gs_gid + 1;
- gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
- bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr,
+ gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
+ blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
gru->gs_gru_base_paddr);
}
@@ -264,12 +268,10 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
max_user_cbrs = GRU_NUM_CB;
max_user_dsr_bytes = GRU_NUM_DSR_BYTES;
- for_each_online_node(nid) {
- bid = uv_node_to_blade_id(nid);
- pnode = uv_node_to_pnode(nid);
- if (bid < 0 || gru_base[bid])
- continue;
- page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
+ for_each_possible_blade(bid) {
+ pnode = uv_blade_to_pnode(bid);
+ nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */
+ page = alloc_pages_node(nid, GFP_KERNEL, order);
if (!page)
goto fail;
gru_base[bid] = page_address(page);
@@ -285,7 +287,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
chip++, gru++) {
paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
- gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip);
+ gru_init_chiplet(gru, paddr, vaddr, bid, chip);
n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
cbrs = max(cbrs, n);
n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
@@ -298,39 +300,215 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
return 0;
fail:
- for (nid--; nid >= 0; nid--)
- free_pages((unsigned long)gru_base[nid], order);
+ for (bid--; bid >= 0; bid--)
+ free_pages((unsigned long)gru_base[bid], order);
return -ENOMEM;
}
-#ifdef CONFIG_IA64
+static void gru_free_tables(void)
+{
+ int bid;
+ int order = get_order(sizeof(struct gru_state) *
+ GRU_CHIPLETS_PER_BLADE);
-static int get_base_irq(void)
+ for (bid = 0; bid < GRU_MAX_BLADES; bid++)
+ free_pages((unsigned long)gru_base[bid], order);
+}
+
+static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
{
- return IRQ_GRU;
+ unsigned long mmr = 0;
+ int core;
+
+ /*
+ * We target the cores of a blade and not the hyperthreads themselves.
+ * There is a max of 8 cores per socket and 2 sockets per blade,
+ * making for a max total of 16 cores (i.e., 16 CPUs without
+ * hyperthreading and 32 CPUs with hyperthreading).
+ */
+ core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
+ if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
+ return 0;
+
+ if (chiplet == 0) {
+ mmr = UVH_GR0_TLB_INT0_CONFIG +
+ core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
+ } else if (chiplet == 1) {
+ mmr = UVH_GR1_TLB_INT0_CONFIG +
+ core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
+ } else {
+ BUG();
+ }
+
+ *corep = core;
+ return mmr;
}
-#elif defined CONFIG_X86_64
+#ifdef CONFIG_IA64
-static void noop(unsigned int irq)
+static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
+
+static void gru_noop(unsigned int irq)
{
}
-static struct irq_chip gru_chip = {
- .name = "gru",
- .mask = noop,
- .unmask = noop,
- .ack = noop,
+static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
+ [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
+ .mask = gru_noop,
+ .unmask = gru_noop,
+ .ack = gru_noop
+ }
};
-static int get_base_irq(void)
+static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
+ irq_handler_t irq_handler, int cpu, int blade)
+{
+ unsigned long mmr;
+ int irq = IRQ_GRU + chiplet;
+ int ret, core;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return 0;
+
+ if (gru_irq_count[chiplet] == 0) {
+ gru_chip[chiplet].name = irq_name;
+ ret = set_irq_chip(irq, &gru_chip[chiplet]);
+ if (ret) {
+ printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+
+ ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
+ if (ret) {
+ printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+ }
+ gru_irq_count[chiplet]++;
+
+ return 0;
+}
+
+static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
+{
+ unsigned long mmr;
+ int core, irq = IRQ_GRU + chiplet;
+
+ if (gru_irq_count[chiplet] == 0)
+ return;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return;
+
+ if (--gru_irq_count[chiplet] == 0)
+ free_irq(irq, NULL);
+}
+
+#elif defined CONFIG_X86_64
+
+static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
+ irq_handler_t irq_handler, int cpu, int blade)
+{
+ unsigned long mmr;
+ int irq, core;
+ int ret;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr == 0)
+ return 0;
+
+ irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
+ if (irq < 0) {
+ printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -irq);
+ return irq;
+ }
+
+ ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
+ if (ret) {
+ uv_teardown_irq(irq);
+ printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
+ GRU_DRIVER_ID_STR, -ret);
+ return ret;
+ }
+ gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
+ return 0;
+}
+
+static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
{
- set_irq_chip(IRQ_GRU, &gru_chip);
- set_irq_chip(IRQ_GRU + 1, &gru_chip);
- return IRQ_GRU;
+ int irq, core;
+ unsigned long mmr;
+
+ mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
+ if (mmr) {
+ irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
+ if (irq) {
+ free_irq(irq, NULL);
+ uv_teardown_irq(irq);
+ }
+ }
}
+
#endif
+static void gru_teardown_tlb_irqs(void)
+{
+ int blade;
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ blade = uv_cpu_to_blade_id(cpu);
+ gru_chiplet_teardown_tlb_irq(0, cpu, blade);
+ gru_chiplet_teardown_tlb_irq(1, cpu, blade);
+ }
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ gru_chiplet_teardown_tlb_irq(0, 0, blade);
+ gru_chiplet_teardown_tlb_irq(1, 0, blade);
+ }
+}
+
+static int gru_setup_tlb_irqs(void)
+{
+ int blade;
+ int cpu;
+ int ret;
+
+ for_each_online_cpu(cpu) {
+ blade = uv_cpu_to_blade_id(cpu);
+ ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
+ if (ret != 0)
+ goto exit1;
+
+ ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
+ if (ret != 0)
+ goto exit1;
+ }
+ for_each_possible_blade(blade) {
+ if (uv_blade_nr_possible_cpus(blade))
+ continue;
+ ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
+ if (ret != 0)
+ goto exit1;
+
+ ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
+ if (ret != 0)
+ goto exit1;
+ }
+
+ return 0;
+
+exit1:
+ gru_teardown_tlb_irqs();
+ return ret;
+}
+
/*
* gru_init
*
@@ -338,8 +516,7 @@ static int get_base_irq(void)
*/
static int __init gru_init(void)
{
- int ret, irq, chip;
- char id[10];
+ int ret;
if (!is_uv_system())
return 0;
@@ -354,41 +531,29 @@ static int __init gru_init(void)
gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
gru_start_paddr, gru_end_paddr);
- irq = get_base_irq();
- for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
- ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
- /* TODO: fix irq handling on x86. For now ignore failure because
- * interrupts are not required & not yet fully supported */
- if (ret) {
- printk(KERN_WARNING
- "!!!WARNING: GRU ignoring request failure!!!\n");
- ret = 0;
- }
- if (ret) {
- printk(KERN_ERR "%s: request_irq failed\n",
- GRU_DRIVER_ID_STR);
- goto exit1;
- }
- }
-
ret = misc_register(&gru_miscdev);
if (ret) {
printk(KERN_ERR "%s: misc_register failed\n",
GRU_DRIVER_ID_STR);
- goto exit1;
+ goto exit0;
}
ret = gru_proc_init();
if (ret) {
printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
- goto exit2;
+ goto exit1;
}
ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
if (ret) {
printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
- goto exit3;
+ goto exit2;
}
+
+ ret = gru_setup_tlb_irqs();
+ if (ret != 0)
+ goto exit3;
+
gru_kservices_init();
printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
@@ -396,31 +561,24 @@ static int __init gru_init(void)
return 0;
exit3:
- gru_proc_exit();
+ gru_free_tables();
exit2:
- misc_deregister(&gru_miscdev);
+ gru_proc_exit();
exit1:
- for (--chip; chip >= 0; chip--)
- free_irq(irq + chip, NULL);
+ misc_deregister(&gru_miscdev);
+exit0:
return ret;
}
static void __exit gru_exit(void)
{
- int i, bid;
- int order = get_order(sizeof(struct gru_state) *
- GRU_CHIPLETS_PER_BLADE);
-
if (!is_uv_system())
return;
- for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
- free_irq(IRQ_GRU + i, NULL);
+ gru_teardown_tlb_irqs();
gru_kservices_exit();
- for (bid = 0; bid < GRU_MAX_BLADES; bid++)
- free_pages((unsigned long)gru_base[bid], order);
-
+ gru_free_tables();
misc_deregister(&gru_miscdev);
gru_proc_exit();
}
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index 37e7cfc53b9..2f30badc6ff 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -27,9 +27,11 @@
#ifdef CONFIG_IA64
#include <asm/processor.h>
#define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
+#define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
#else
#include <asm/tsc.h>
#define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+#define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
#endif
/* Extract the status field from a kernel handle */
@@ -39,21 +41,39 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
static void update_mcs_stats(enum mcs_op op, unsigned long clks)
{
+ unsigned long nsec;
+
+ nsec = CLKS2NSEC(clks);
atomic_long_inc(&mcs_op_statistics[op].count);
- atomic_long_add(clks, &mcs_op_statistics[op].total);
- if (mcs_op_statistics[op].max < clks)
- mcs_op_statistics[op].max = clks;
+ atomic_long_add(nsec, &mcs_op_statistics[op].total);
+ if (mcs_op_statistics[op].max < nsec)
+ mcs_op_statistics[op].max = nsec;
}
static void start_instruction(void *h)
{
unsigned long *w0 = h;
- wmb(); /* setting CMD bit must be last */
- *w0 = *w0 | 1;
+ wmb(); /* setting CMD/STATUS bits must be last */
+ *w0 = *w0 | 0x20001;
gru_flush_cache(h);
}
+static void report_instruction_timeout(void *h)
+{
+ unsigned long goff = GSEGPOFF((unsigned long)h);
+ char *id = "???";
+
+ if (TYPE_IS(CCH, goff))
+ id = "CCH";
+ else if (TYPE_IS(TGH, goff))
+ id = "TGH";
+ else if (TYPE_IS(TFH, goff))
+ id = "TFH";
+
+ panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
+}
+
static int wait_instruction_complete(void *h, enum mcs_op opc)
{
int status;
@@ -64,9 +84,10 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
status = GET_MSEG_HANDLE_STATUS(h);
if (status != CCHSTATUS_ACTIVE)
break;
- if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time))
- panic("GRU %p is malfunctioning: start %ld, end %ld\n",
- h, start_time, (unsigned long)get_cycles());
+ if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
+ report_instruction_timeout(h);
+ start_time = get_cycles();
+ }
}
if (gru_options & OPT_STATS)
update_mcs_stats(opc, get_cycles() - start_time);
@@ -75,9 +96,18 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
int cch_allocate(struct gru_context_configuration_handle *cch)
{
+ int ret;
+
cch->opc = CCHOP_ALLOCATE;
start_instruction(cch);
- return wait_instruction_complete(cch, cchop_allocate);
+ ret = wait_instruction_complete(cch, cchop_allocate);
+
+ /*
+ * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
+ * The GSEG memory does not exist until the ALLOCATE completes.
+ */
+ sync_core();
+ return ret;
}
int cch_start(struct gru_context_configuration_handle *cch)
@@ -96,9 +126,18 @@ int cch_interrupt(struct gru_context_configuration_handle *cch)
int cch_deallocate(struct gru_context_configuration_handle *cch)
{
+ int ret;
+
cch->opc = CCHOP_DEALLOCATE;
start_instruction(cch);
- return wait_instruction_complete(cch, cchop_deallocate);
+ ret = wait_instruction_complete(cch, cchop_deallocate);
+
+ /*
+ * Stop speculation into the GSEG being unmapped by the previous
+ * DEALLOCATE.
+ */
+ sync_core();
+ return ret;
}
int cch_interrupt_sync(struct gru_context_configuration_handle
@@ -126,17 +165,20 @@ int tgh_invalidate(struct gru_tlb_global_handle *tgh,
return wait_instruction_complete(tgh, tghop_invalidate);
}
-void tfh_write_only(struct gru_tlb_fault_handle *tfh,
- unsigned long pfn, unsigned long vaddr,
- int asid, int dirty, int pagesize)
+int tfh_write_only(struct gru_tlb_fault_handle *tfh,
+ unsigned long paddr, int gaa,
+ unsigned long vaddr, int asid, int dirty,
+ int pagesize)
{
tfh->fillasid = asid;
tfh->fillvaddr = vaddr;
- tfh->pfn = pfn;
+ tfh->pfn = paddr >> GRU_PADDR_SHIFT;
+ tfh->gaa = gaa;
tfh->dirty = dirty;
tfh->pagesize = pagesize;
tfh->opc = TFHOP_WRITE_ONLY;
start_instruction(tfh);
+ return wait_instruction_complete(tfh, tfhop_write_only);
}
void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h
index f44112242d0..3f998b924d8 100644
--- a/drivers/misc/sgi-gru/gruhandles.h
+++ b/drivers/misc/sgi-gru/gruhandles.h
@@ -91,6 +91,12 @@
/* Convert an arbitrary handle address to the beginning of the GRU segment */
#define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
+/* Test a valid handle address to determine the type */
+#define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \
+ GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \
+ (((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
+
+
/* General addressing macros. */
static inline void *get_gseg_base_address(void *base, int ctxnum)
{
@@ -158,6 +164,16 @@ static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
return vaddr + GRU_SIZE * (2 * pnode + chiplet);
}
+static inline struct gru_control_block_extended *gru_tfh_to_cbe(
+ struct gru_tlb_fault_handle *tfh)
+{
+ unsigned long cbe;
+
+ cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
+ return (struct gru_control_block_extended*)cbe;
+}
+
+
/*
@@ -236,6 +252,17 @@ enum gru_tgh_state {
TGHSTATE_RESTART_CTX,
};
+enum gru_tgh_cause {
+ TGHCAUSE_RR_ECC,
+ TGHCAUSE_TLB_ECC,
+ TGHCAUSE_LRU_ECC,
+ TGHCAUSE_PS_ECC,
+ TGHCAUSE_MUL_ERR,
+ TGHCAUSE_DATA_ERR,
+ TGHCAUSE_SW_FORCE
+};
+
+
/*
* TFH - TLB Global Handle
* Used for TLB dropins into the GRU TLB.
@@ -440,6 +467,12 @@ struct gru_control_block_extended {
unsigned int cbrexecstatus:8;
};
+/* CBE fields for active BCOPY instructions */
+#define cbe_baddr0 idef1upd
+#define cbe_baddr1 idef3upd
+#define cbe_src_cl idef6cpy
+#define cbe_nelemcur idef5upd
+
enum gru_cbr_state {
CBRSTATE_INACTIVE,
CBRSTATE_IDLE,
@@ -487,8 +520,8 @@ int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
unsigned long vaddrmask, int asid, int pagesize, int global, int n,
unsigned short ctxbitmap);
-void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn,
- unsigned long vaddr, int asid, int dirty, int pagesize);
+int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
+ int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
void tfh_restart(struct gru_tlb_fault_handle *tfh);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 55eabfa8558..9b2062d1732 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -44,7 +44,8 @@ static int gru_user_copy_handle(void __user **dp, void *s)
static int gru_dump_context_data(void *grubase,
struct gru_context_configuration_handle *cch,
- void __user *ubuf, int ctxnum, int dsrcnt)
+ void __user *ubuf, int ctxnum, int dsrcnt,
+ int flush_cbrs)
{
void *cb, *cbe, *tfh, *gseg;
int i, scr;
@@ -55,6 +56,8 @@ static int gru_dump_context_data(void *grubase,
tfh = grubase + GRU_TFH_BASE;
for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
+ if (flush_cbrs)
+ gru_flush_cache(cb);
if (gru_user_copy_handle(&ubuf, cb))
goto fail;
if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
@@ -115,7 +118,7 @@ fail:
static int gru_dump_context(struct gru_state *gru, int ctxnum,
void __user *ubuf, void __user *ubufend, char data_opt,
- char lock_cch)
+ char lock_cch, char flush_cbrs)
{
struct gru_dump_context_header hdr;
struct gru_dump_context_header __user *uhdr = ubuf;
@@ -159,8 +162,7 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ret = -EFBIG;
else
ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
- dsrcnt);
-
+ dsrcnt, flush_cbrs);
}
if (cch_locked)
unlock_cch_handle(cch);
@@ -215,7 +217,8 @@ int gru_dump_chiplet_request(unsigned long arg)
for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
if (req.ctxnum == ctxnum || req.ctxnum < 0) {
ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
- req.data_opt, req.lock_cch);
+ req.data_opt, req.lock_cch,
+ req.flush_cbrs);
if (ret < 0)
goto fail;
ubuf += ret;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 766e21e1557..34749ee88df 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <asm/io_apic.h>
#include "gru.h"
#include "grulib.h"
#include "grutables.h"
@@ -97,9 +98,6 @@
#define ASYNC_HAN_TO_BID(h) ((h) - 1)
#define ASYNC_BID_TO_HAN(b) ((b) + 1)
#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
-#define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
- (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
-#define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
#define GRU_NUM_KERNEL_CBR 1
#define GRU_NUM_KERNEL_DSR_BYTES 256
@@ -160,8 +158,10 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
up_read(&bs->bs_kgts_sema);
down_write(&bs->bs_kgts_sema);
- if (!bs->bs_kgts)
- bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
+ if (!bs->bs_kgts) {
+ bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
+ bs->bs_kgts->ts_user_blade_id = blade_id;
+ }
kgts = bs->bs_kgts;
if (!kgts->ts_gru) {
@@ -172,9 +172,9 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
GRU_NUM_KERNEL_DSR_BYTES * ncpus +
bs->bs_async_dsr_bytes);
- while (!gru_assign_gru_context(kgts, blade_id)) {
+ while (!gru_assign_gru_context(kgts)) {
msleep(1);
- gru_steal_context(kgts, blade_id);
+ gru_steal_context(kgts);
}
gru_load_context(kgts);
gru = bs->bs_kgts->ts_gru;
@@ -200,13 +200,15 @@ static int gru_free_kernel_contexts(void)
bs = gru_base[bid];
if (!bs)
continue;
+
+ /* Ignore busy contexts. Don't want to block here. */
if (down_write_trylock(&bs->bs_kgts_sema)) {
kgts = bs->bs_kgts;
if (kgts && kgts->ts_gru)
gru_unload_context(kgts, 0);
- kfree(kgts);
bs->bs_kgts = NULL;
up_write(&bs->bs_kgts_sema);
+ kfree(kgts);
} else {
ret++;
}
@@ -220,13 +222,21 @@ static int gru_free_kernel_contexts(void)
static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
{
struct gru_blade_state *bs;
+ int bid;
STAT(lock_kernel_context);
- bs = gru_base[blade_id];
+again:
+ bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
+ bs = gru_base[bid];
+ /* Handle the case where migration occured while waiting for the sema */
down_read(&bs->bs_kgts_sema);
+ if (blade_id < 0 && bid != uv_numa_blade_id()) {
+ up_read(&bs->bs_kgts_sema);
+ goto again;
+ }
if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
- gru_load_kernel_context(bs, blade_id);
+ gru_load_kernel_context(bs, bid);
return bs;
}
@@ -255,7 +265,7 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
preempt_disable();
- bs = gru_lock_kernel_context(uv_numa_blade_id());
+ bs = gru_lock_kernel_context(-1);
lcpu = uv_blade_processor_id();
*cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
*dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
@@ -384,13 +394,31 @@ int gru_get_cb_exception_detail(void *cb,
struct control_block_extended_exc_detail *excdet)
{
struct gru_control_block_extended *cbe;
- struct gru_blade_state *bs;
- int cbrnum;
-
- bs = KCB_TO_BS(cb);
- cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
+ struct gru_thread_state *kgts = NULL;
+ unsigned long off;
+ int cbrnum, bid;
+
+ /*
+ * Locate kgts for cb. This algorithm is SLOW but
+ * this function is rarely called (ie., almost never).
+ * Performance does not matter.
+ */
+ for_each_possible_blade(bid) {
+ if (!gru_base[bid])
+ break;
+ kgts = gru_base[bid]->bs_kgts;
+ if (!kgts || !kgts->ts_gru)
+ continue;
+ off = cb - kgts->ts_gru->gs_gru_base_vaddr;
+ if (off < GRU_SIZE)
+ break;
+ kgts = NULL;
+ }
+ BUG_ON(!kgts);
+ cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
cbe = get_cbe(GRUBASE(cb), cbrnum);
gru_flush_cache(cbe); /* CBE not coherent */
+ sync_core();
excdet->opc = cbe->opccpy;
excdet->exopc = cbe->exopccpy;
excdet->ecause = cbe->ecause;
@@ -409,8 +437,8 @@ char *gru_get_cb_exception_detail_str(int ret, void *cb,
if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
gru_get_cb_exception_detail(cb, &excdet);
snprintf(buf, size,
- "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
- "excdet0 0x%lx, excdet1 0x%x",
+ "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
+ "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
gen, excdet.opc, excdet.exopc, excdet.ecause,
excdet.exceptdet0, excdet.exceptdet1);
} else {
@@ -457,9 +485,10 @@ int gru_check_status_proc(void *cb)
int ret;
ret = gen->istatus;
- if (ret != CBS_EXCEPTION)
- return ret;
- return gru_retry_exception(cb);
+ if (ret == CBS_EXCEPTION)
+ ret = gru_retry_exception(cb);
+ rmb();
+ return ret;
}
@@ -471,7 +500,7 @@ int gru_wait_proc(void *cb)
ret = gru_wait_idle_or_exception(gen);
if (ret == CBS_EXCEPTION)
ret = gru_retry_exception(cb);
-
+ rmb();
return ret;
}
@@ -538,7 +567,7 @@ int gru_create_message_queue(struct gru_message_queue_desc *mqd,
mqd->mq = mq;
mqd->mq_gpa = uv_gpa(mq);
mqd->qlines = qlines;
- mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
+ mqd->interrupt_pnode = nasid >> 1;
mqd->interrupt_vector = vector;
mqd->interrupt_apicid = apicid;
return 0;
@@ -598,6 +627,8 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
ret = MQE_UNEXPECTED_CB_ERR;
break;
case CBSS_PAGE_OVERFLOW:
+ STAT(mesq_noop_page_overflow);
+ /* fallthru */
default:
BUG();
}
@@ -673,18 +704,6 @@ cberr:
}
/*
- * Send a cross-partition interrupt to the SSI that contains the target
- * message queue. Normally, the interrupt is automatically delivered by hardware
- * but some error conditions require explicit delivery.
- */
-static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
-{
- if (mqd->interrupt_vector)
- uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
- mqd->interrupt_vector);
-}
-
-/*
* Handle a PUT failure. Note: if message was a 2-line message, one of the
* lines might have successfully have been written. Before sending the
* message, "present" must be cleared in BOTH lines to prevent the receiver
@@ -693,7 +712,8 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
- unsigned long m;
+ unsigned long m, *val = mesg, gpa, save;
+ int ret;
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
if (lines == 2) {
@@ -704,7 +724,26 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
if (gru_wait(cb) != CBS_IDLE)
return MQE_UNEXPECTED_CB_ERR;
- send_message_queue_interrupt(mqd);
+
+ if (!mqd->interrupt_vector)
+ return MQE_OK;
+
+ /*
+ * Send a cross-partition interrupt to the SSI that contains the target
+ * message queue. Normally, the interrupt is automatically delivered by
+ * hardware but some error conditions require explicit delivery.
+ * Use the GRU to deliver the interrupt. Otherwise partition failures
+ * could cause unrecovered errors.
+ */
+ gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
+ save = *val;
+ *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
+ dest_Fixed);
+ gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
+ ret = gru_wait(cb);
+ *val = save;
+ if (ret != CBS_IDLE)
+ return MQE_UNEXPECTED_CB_ERR;
return MQE_OK;
}
@@ -739,6 +778,9 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
STAT(mesq_send_put_nacked);
ret = send_message_put_nacked(cb, mqd, mesg, lines);
break;
+ case CBSS_PAGE_OVERFLOW:
+ STAT(mesq_page_overflow);
+ /* fallthru */
default:
BUG();
}
@@ -831,7 +873,6 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
int present = mhdr->present;
/* skip NOOP messages */
- STAT(mesq_receive);
while (present == MQS_NOOP) {
gru_free_message(mqd, mhdr);
mhdr = mq->next;
@@ -851,6 +892,7 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd)
if (mhdr->lines == 2)
restore_present2(mhdr, mhdr->present2);
+ STAT(mesq_receive);
return mhdr;
}
EXPORT_SYMBOL_GPL(gru_get_next_message);
@@ -858,6 +900,29 @@ EXPORT_SYMBOL_GPL(gru_get_next_message);
/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
/*
+ * Load a DW from a global GPA. The GPA can be a memory or MMR address.
+ */
+int gru_read_gpa(unsigned long *value, unsigned long gpa)
+{
+ void *cb;
+ void *dsr;
+ int ret, iaa;
+
+ STAT(read_gpa);
+ if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
+ return MQE_BUG_NO_RESOURCES;
+ iaa = gpa >> 62;
+ gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
+ ret = gru_wait(cb);
+ if (ret == CBS_IDLE)
+ *value = *(unsigned long *)dsr;
+ gru_free_cpu_resources(cb, dsr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gru_read_gpa);
+
+
+/*
* Copy a block of data using the GRU resources
*/
int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
@@ -898,24 +963,24 @@ static int quicktest0(unsigned long arg)
gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
- printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n");
+ printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
goto done;
}
if (*p != MAGIC) {
- printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p);
+ printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
goto done;
}
gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
if (gru_wait(cb) != CBS_IDLE) {
- printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n");
+ printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
goto done;
}
if (word0 != word1 || word1 != MAGIC) {
printk(KERN_DEBUG
- "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
- word1, MAGIC);
+ "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
+ smp_processor_id(), word1, MAGIC);
goto done;
}
ret = 0;
@@ -952,8 +1017,11 @@ static int quicktest1(unsigned long arg)
if (ret)
break;
}
- if (ret != MQE_QUEUE_FULL || i != 4)
+ if (ret != MQE_QUEUE_FULL || i != 4) {
+ printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
+ smp_processor_id(), ret, i);
goto done;
+ }
for (i = 0; i < 6; i++) {
m = gru_get_next_message(&mqd);
@@ -961,7 +1029,12 @@ static int quicktest1(unsigned long arg)
break;
gru_free_message(&mqd, m);
}
- ret = (i == 4) ? 0 : -EIO;
+ if (i != 4) {
+ printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
+ smp_processor_id(), i, m, m ? m[8] : -1);
+ goto done;
+ }
+ ret = 0;
done:
kfree(p);
@@ -977,6 +1050,7 @@ static int quicktest2(unsigned long arg)
int ret = 0;
unsigned long *buf;
void *cb0, *cb;
+ struct gru_control_block_status *gen;
int i, k, istatus, bytes;
bytes = numcb * 4 * 8;
@@ -996,20 +1070,30 @@ static int quicktest2(unsigned long arg)
XTYPE_DW, 4, 1, IMA_INTERRUPT);
ret = 0;
- for (k = 0; k < numcb; k++) {
+ k = numcb;
+ do {
gru_wait_async_cbr(han);
for (i = 0; i < numcb; i++) {
cb = cb0 + i * GRU_HANDLE_STRIDE;
istatus = gru_check_status(cb);
- if (istatus == CBS_ACTIVE)
- continue;
- if (istatus == CBS_EXCEPTION)
- ret = -EFAULT;
- else if (buf[i] || buf[i + 1] || buf[i + 2] ||
- buf[i + 3])
- ret = -EIO;
+ if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
+ break;
}
- }
+ if (i == numcb)
+ continue;
+ if (istatus != CBS_IDLE) {
+ printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
+ ret = -EFAULT;
+ } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
+ buf[4 * i + 3]) {
+ printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
+ smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
+ ret = -EIO;
+ }
+ k--;
+ gen = cb;
+ gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
+ } while (k);
BUG_ON(cmp.done);
gru_unlock_async_resource(han);
@@ -1019,6 +1103,22 @@ done:
return ret;
}
+#define BUFSIZE 200
+static int quicktest3(unsigned long arg)
+{
+ char buf1[BUFSIZE], buf2[BUFSIZE];
+ int ret = 0;
+
+ memset(buf2, 0, sizeof(buf2));
+ memset(buf1, get_cycles() & 255, sizeof(buf1));
+ gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
+ if (memcmp(buf1, buf2, BUFSIZE)) {
+ printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
+ ret = -EIO;
+ }
+ return ret;
+}
+
/*
* Debugging only. User hook for various kernel tests
* of driver & gru.
@@ -1037,6 +1137,9 @@ int gru_ktest(unsigned long arg)
case 2:
ret = quicktest2(arg);
break;
+ case 3:
+ ret = quicktest3(arg);
+ break;
case 99:
ret = gru_free_kernel_contexts();
break;
diff --git a/drivers/misc/sgi-gru/grukservices.h b/drivers/misc/sgi-gru/grukservices.h
index d60d34bca44..02aa94d8484 100644
--- a/drivers/misc/sgi-gru/grukservices.h
+++ b/drivers/misc/sgi-gru/grukservices.h
@@ -131,6 +131,20 @@ extern void *gru_get_next_message(struct gru_message_queue_desc *mqd);
/*
+ * Read a GRU global GPA. Source can be located in a remote partition.
+ *
+ * Input:
+ * value memory address where MMR value is returned
+ * gpa source numalink physical address of GPA
+ *
+ * Output:
+ * 0 OK
+ * >0 error
+ */
+int gru_read_gpa(unsigned long *value, unsigned long gpa);
+
+
+/*
* Copy data using the GRU. Source or destination can be located in a remote
* partition.
*
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h
index 889bc442a3e..e77d1b1f9d0 100644
--- a/drivers/misc/sgi-gru/grulib.h
+++ b/drivers/misc/sgi-gru/grulib.h
@@ -63,18 +63,9 @@
#define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th))
#define GSEG_START(cb) ((void *)((unsigned long)(cb) & ~(GRU_GSEG_PAGESIZE - 1)))
-/*
- * Statictics kept on a per-GTS basis.
- */
-struct gts_statistics {
- unsigned long fmm_tlbdropin;
- unsigned long upm_tlbdropin;
- unsigned long context_stolen;
-};
-
struct gru_get_gseg_statistics_req {
- unsigned long gseg;
- struct gts_statistics stats;
+ unsigned long gseg;
+ struct gru_gseg_statistics stats;
};
/*
@@ -86,6 +77,7 @@ struct gru_create_context_req {
unsigned int control_blocks;
unsigned int maximum_thread_count;
unsigned int options;
+ unsigned char tlb_preload_count;
};
/*
@@ -98,11 +90,12 @@ struct gru_unload_context_req {
/*
* Structure used to set context options
*/
-enum {sco_gseg_owner, sco_cch_req_slice};
+enum {sco_gseg_owner, sco_cch_req_slice, sco_blade_chiplet};
struct gru_set_context_option_req {
unsigned long gseg;
int op;
- unsigned long val1;
+ int val0;
+ long val1;
};
/*
@@ -124,6 +117,8 @@ struct gru_dump_chiplet_state_req {
int ctxnum;
char data_opt;
char lock_cch;
+ char flush_cbrs;
+ char fill[10];
pid_t pid;
void *buf;
size_t buflen;
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 3bc643dad60..f8538bbd0bf 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -27,6 +27,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/err.h>
#include <asm/uv/uv_hub.h>
#include "gru.h"
#include "grutables.h"
@@ -48,12 +49,20 @@ struct device *grudev = &gru_device;
/*
* Select a gru fault map to be used by the current cpu. Note that
* multiple cpus may be using the same map.
- * ZZZ should "shift" be used?? Depends on HT cpu numbering
* ZZZ should be inline but did not work on emulator
*/
int gru_cpu_fault_map_id(void)
{
+#ifdef CONFIG_IA64
return uv_blade_processor_id() % GRU_NUM_TFM;
+#else
+ int cpu = smp_processor_id();
+ int id, core;
+
+ core = uv_cpu_core_number(cpu);
+ id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
+ return id;
+#endif
}
/*--------- ASID Management -------------------------------------------
@@ -286,7 +295,8 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
void gts_drop(struct gru_thread_state *gts)
{
if (gts && atomic_dec_return(&gts->ts_refcnt) == 0) {
- gru_drop_mmu_notifier(gts->ts_gms);
+ if (gts->ts_gms)
+ gru_drop_mmu_notifier(gts->ts_gms);
kfree(gts);
STAT(gts_free);
}
@@ -310,16 +320,18 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data
* Allocate a thread state structure.
*/
struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
- int cbr_au_count, int dsr_au_count, int options, int tsid)
+ int cbr_au_count, int dsr_au_count,
+ unsigned char tlb_preload_count, int options, int tsid)
{
struct gru_thread_state *gts;
+ struct gru_mm_struct *gms;
int bytes;
bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
bytes += sizeof(struct gru_thread_state);
gts = kmalloc(bytes, GFP_KERNEL);
if (!gts)
- return NULL;
+ return ERR_PTR(-ENOMEM);
STAT(gts_alloc);
memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
@@ -327,7 +339,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
mutex_init(&gts->ts_ctxlock);
gts->ts_cbr_au_count = cbr_au_count;
gts->ts_dsr_au_count = dsr_au_count;
+ gts->ts_tlb_preload_count = tlb_preload_count;
gts->ts_user_options = options;
+ gts->ts_user_blade_id = -1;
+ gts->ts_user_chiplet_id = -1;
gts->ts_tsid = tsid;
gts->ts_ctxnum = NULLCTX;
gts->ts_tlb_int_select = -1;
@@ -336,9 +351,10 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
if (vma) {
gts->ts_mm = current->mm;
gts->ts_vma = vma;
- gts->ts_gms = gru_register_mmu_notifier();
- if (!gts->ts_gms)
+ gms = gru_register_mmu_notifier();
+ if (IS_ERR(gms))
goto err;
+ gts->ts_gms = gms;
}
gru_dbg(grudev, "alloc gts %p\n", gts);
@@ -346,7 +362,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
err:
gts_drop(gts);
- return NULL;
+ return ERR_CAST(gms);
}
/*
@@ -360,6 +376,7 @@ struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid)
if (!vdata)
return NULL;
+ STAT(vdata_alloc);
INIT_LIST_HEAD(&vdata->vd_head);
spin_lock_init(&vdata->vd_lock);
gru_dbg(grudev, "alloc vdata %p\n", vdata);
@@ -392,10 +409,12 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma,
struct gru_vma_data *vdata = vma->vm_private_data;
struct gru_thread_state *gts, *ngts;
- gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count,
+ gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count,
+ vdata->vd_dsr_au_count,
+ vdata->vd_tlb_preload_count,
vdata->vd_user_options, tsid);
- if (!gts)
- return NULL;
+ if (IS_ERR(gts))
+ return gts;
spin_lock(&vdata->vd_lock);
ngts = gru_find_current_gts_nolock(vdata, tsid);
@@ -493,6 +512,9 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
memset(cbe + i * GRU_HANDLE_STRIDE, 0,
GRU_CACHE_LINE_BYTES);
}
+ /* Flush CBE to hide race in context restart */
+ mb();
+ gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
cb += GRU_HANDLE_STRIDE;
}
@@ -513,6 +535,12 @@ static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
cb = gseg + GRU_CB_BASE;
cbe = grubase + GRU_CBE_BASE;
length = hweight64(dsrmap) * GRU_DSR_AU_BYTES;
+
+ /* CBEs may not be coherent. Flush them from cache */
+ for_each_cbr_in_allocation_map(i, &cbrmap, scr)
+ gru_flush_cache(cbe + i * GRU_HANDLE_STRIDE);
+ mb(); /* Let the CL flush complete */
+
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
@@ -533,7 +561,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
- gru_dbg(grudev, "gts %p\n", gts);
+ gru_dbg(grudev, "gts %p, cbrmap 0x%lx, dsrmap 0x%lx\n",
+ gts, gts->ts_cbr_map, gts->ts_dsr_map);
lock_cch_handle(cch);
if (cch_interrupt_sync(cch))
BUG();
@@ -549,7 +578,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
if (cch_deallocate(cch))
BUG();
- gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
unlock_cch_handle(cch);
gru_free_gru_context(gts);
@@ -565,9 +593,7 @@ void gru_load_context(struct gru_thread_state *gts)
struct gru_context_configuration_handle *cch;
int i, err, asid, ctxnum = gts->ts_ctxnum;
- gru_dbg(grudev, "gts %p\n", gts);
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
-
lock_cch_handle(cch);
cch->tfm_fault_bit_enable =
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
@@ -591,6 +617,7 @@ void gru_load_context(struct gru_thread_state *gts)
cch->unmap_enable = 1;
cch->tfm_done_bit_enable = 1;
cch->cb_int_enable = 1;
+ cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
} else {
cch->unmap_enable = 0;
cch->tfm_done_bit_enable = 0;
@@ -616,17 +643,18 @@ void gru_load_context(struct gru_thread_state *gts)
if (cch_start(cch))
BUG();
unlock_cch_handle(cch);
+
+ gru_dbg(grudev, "gid %d, gts %p, cbrmap 0x%lx, dsrmap 0x%lx, tie %d, tis %d\n",
+ gts->ts_gru->gs_gid, gts, gts->ts_cbr_map, gts->ts_dsr_map,
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR), gts->ts_tlb_int_select);
}
/*
* Update fields in an active CCH:
* - retarget interrupts on local blade
* - update sizeavail mask
- * - force a delayed context unload by clearing the CCH asids. This
- * forces TLB misses for new GRU instructions. The context is unloaded
- * when the next TLB miss occurs.
*/
-int gru_update_cch(struct gru_thread_state *gts, int force_unload)
+int gru_update_cch(struct gru_thread_state *gts)
{
struct gru_context_configuration_handle *cch;
struct gru_state *gru = gts->ts_gru;
@@ -640,21 +668,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
goto exit;
if (cch_interrupt(cch))
BUG();
- if (!force_unload) {
- for (i = 0; i < 8; i++)
- cch->sizeavail[i] = gts->ts_sizeavail;
- gts->ts_tlb_int_select = gru_cpu_fault_map_id();
- cch->tlb_int_select = gru_cpu_fault_map_id();
- cch->tfm_fault_bit_enable =
- (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
- || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
- } else {
- for (i = 0; i < 8; i++)
- cch->asid[i] = 0;
- cch->tfm_fault_bit_enable = 0;
- cch->tlb_int_enable = 0;
- gts->ts_force_unload = 1;
- }
+ for (i = 0; i < 8; i++)
+ cch->sizeavail[i] = gts->ts_sizeavail;
+ gts->ts_tlb_int_select = gru_cpu_fault_map_id();
+ cch->tlb_int_select = gru_cpu_fault_map_id();
+ cch->tfm_fault_bit_enable =
+ (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
+ || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
if (cch_start(cch))
BUG();
ret = 1;
@@ -679,7 +699,54 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
gru_cpu_fault_map_id());
- return gru_update_cch(gts, 0);
+ return gru_update_cch(gts);
+}
+
+/*
+ * Check if a GRU context is allowed to use a specific chiplet. By default
+ * a context is assigned to any blade-local chiplet. However, users can
+ * override this.
+ * Returns 1 if assignment allowed, 0 otherwise
+ */
+static int gru_check_chiplet_assignment(struct gru_state *gru,
+ struct gru_thread_state *gts)
+{
+ int blade_id;
+ int chiplet_id;
+
+ blade_id = gts->ts_user_blade_id;
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
+
+ chiplet_id = gts->ts_user_chiplet_id;
+ return gru->gs_blade_id == blade_id &&
+ (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
+}
+
+/*
+ * Unload the gru context if it is not assigned to the correct blade or
+ * chiplet. Misassignment can occur if the process migrates to a different
+ * blade or if the user changes the selected blade/chiplet.
+ */
+void gru_check_context_placement(struct gru_thread_state *gts)
+{
+ struct gru_state *gru;
+
+ /*
+ * If the current task is the context owner, verify that the
+ * context is correctly placed. This test is skipped for non-owner
+ * references. Pthread apps use non-owner references to the CBRs.
+ */
+ gru = gts->ts_gru;
+ if (!gru || gts->ts_tgid_owner != current->tgid)
+ return;
+
+ if (!gru_check_chiplet_assignment(gru, gts)) {
+ STAT(check_context_unload);
+ gru_unload_context(gts, 1);
+ } else if (gru_retarget_intr(gts)) {
+ STAT(check_context_retarget_intr);
+ }
}
@@ -712,13 +779,17 @@ static void gts_stolen(struct gru_thread_state *gts,
}
}
-void gru_steal_context(struct gru_thread_state *gts, int blade_id)
+void gru_steal_context(struct gru_thread_state *gts)
{
struct gru_blade_state *blade;
struct gru_state *gru, *gru0;
struct gru_thread_state *ngts = NULL;
int ctxnum, ctxnum0, flag = 0, cbr, dsr;
+ int blade_id;
+ blade_id = gts->ts_user_blade_id;
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
cbr = gts->ts_cbr_au_count;
dsr = gts->ts_dsr_au_count;
@@ -729,35 +800,39 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
gru = blade->bs_lru_gru;
if (ctxnum == 0)
gru = next_gru(blade, gru);
+ blade->bs_lru_gru = gru;
+ blade->bs_lru_ctxnum = ctxnum;
ctxnum0 = ctxnum;
gru0 = gru;
while (1) {
- if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
- break;
- spin_lock(&gru->gs_lock);
- for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
- if (flag && gru == gru0 && ctxnum == ctxnum0)
+ if (gru_check_chiplet_assignment(gru, gts)) {
+ if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
break;
- ngts = gru->gs_gts[ctxnum];
- /*
- * We are grabbing locks out of order, so trylock is
- * needed. GTSs are usually not locked, so the odds of
- * success are high. If trylock fails, try to steal a
- * different GSEG.
- */
- if (ngts && is_gts_stealable(ngts, blade))
+ spin_lock(&gru->gs_lock);
+ for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
+ if (flag && gru == gru0 && ctxnum == ctxnum0)
+ break;
+ ngts = gru->gs_gts[ctxnum];
+ /*
+ * We are grabbing locks out of order, so trylock is
+ * needed. GTSs are usually not locked, so the odds of
+ * success are high. If trylock fails, try to steal a
+ * different GSEG.
+ */
+ if (ngts && is_gts_stealable(ngts, blade))
+ break;
+ ngts = NULL;
+ }
+ spin_unlock(&gru->gs_lock);
+ if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
break;
- ngts = NULL;
- flag = 1;
}
- spin_unlock(&gru->gs_lock);
- if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
+ if (flag && gru == gru0)
break;
+ flag = 1;
ctxnum = 0;
gru = next_gru(blade, gru);
}
- blade->bs_lru_gru = gru;
- blade->bs_lru_ctxnum = ctxnum;
spin_unlock(&blade->bs_lock);
if (ngts) {
@@ -776,19 +851,34 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
}
/*
+ * Assign a gru context.
+ */
+static int gru_assign_context_number(struct gru_state *gru)
+{
+ int ctxnum;
+
+ ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
+ __set_bit(ctxnum, &gru->gs_context_map);
+ return ctxnum;
+}
+
+/*
* Scan the GRUs on the local blade & assign a GRU context.
*/
-struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
- int blade)
+struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
{
struct gru_state *gru, *grux;
int i, max_active_contexts;
+ int blade_id = gts->ts_user_blade_id;
-
+ if (blade_id < 0)
+ blade_id = uv_numa_blade_id();
again:
gru = NULL;
max_active_contexts = GRU_NUM_CCH;
- for_each_gru_on_blade(grux, blade, i) {
+ for_each_gru_on_blade(grux, blade_id, i) {
+ if (!gru_check_chiplet_assignment(grux, gts))
+ continue;
if (check_gru_resources(grux, gts->ts_cbr_au_count,
gts->ts_dsr_au_count,
max_active_contexts)) {
@@ -809,12 +899,9 @@ again:
reserve_gru_resources(gru, gts);
gts->ts_gru = gru;
gts->ts_blade = gru->gs_blade_id;
- gts->ts_ctxnum =
- find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
- BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
+ gts->ts_ctxnum = gru_assign_context_number(gru);
atomic_inc(&gts->ts_refcnt);
gru->gs_gts[gts->ts_ctxnum] = gts;
- __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
spin_unlock(&gru->gs_lock);
STAT(assign_context);
@@ -842,7 +929,6 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct gru_thread_state *gts;
unsigned long paddr, vaddr;
- int blade_id;
vaddr = (unsigned long)vmf->virtual_address;
gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@@ -857,28 +943,18 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
again:
mutex_lock(&gts->ts_ctxlock);
preempt_disable();
- blade_id = uv_numa_blade_id();
- if (gts->ts_gru) {
- if (gts->ts_gru->gs_blade_id != blade_id) {
- STAT(migrated_nopfn_unload);
- gru_unload_context(gts, 1);
- } else {
- if (gru_retarget_intr(gts))
- STAT(migrated_nopfn_retarget);
- }
- }
+ gru_check_context_placement(gts);
if (!gts->ts_gru) {
STAT(load_user_context);
- if (!gru_assign_gru_context(gts, blade_id)) {
+ if (!gru_assign_gru_context(gts)) {
preempt_enable();
mutex_unlock(&gts->ts_ctxlock);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
- blade_id = uv_numa_blade_id();
if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
- gru_steal_context(gts, blade_id);
+ gru_steal_context(gts);
goto again;
}
gru_load_context(gts);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 3f2375c5ba5..7768b87d995 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -36,8 +36,7 @@ static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
{
unsigned long val = atomic_long_read(v);
- if (val)
- seq_printf(s, "%16lu %s\n", val, id);
+ seq_printf(s, "%16lu %s\n", val, id);
}
static int statistics_show(struct seq_file *s, void *p)
@@ -46,7 +45,8 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, vdata_free);
printstat(s, gts_alloc);
printstat(s, gts_free);
- printstat(s, vdata_double_alloc);
+ printstat(s, gms_alloc);
+ printstat(s, gms_free);
printstat(s, gts_double_allocate);
printstat(s, assign_context);
printstat(s, assign_context_failed);
@@ -59,28 +59,25 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, steal_kernel_context);
printstat(s, steal_context_failed);
printstat(s, nopfn);
- printstat(s, break_cow);
printstat(s, asid_new);
printstat(s, asid_next);
printstat(s, asid_wrap);
printstat(s, asid_reuse);
printstat(s, intr);
+ printstat(s, intr_cbr);
+ printstat(s, intr_tfh);
+ printstat(s, intr_spurious);
printstat(s, intr_mm_lock_failed);
printstat(s, call_os);
- printstat(s, call_os_offnode_reference);
- printstat(s, call_os_check_for_bug);
printstat(s, call_os_wait_queue);
printstat(s, user_flush_tlb);
printstat(s, user_unload_context);
printstat(s, user_exception);
printstat(s, set_context_option);
- printstat(s, migrate_check);
- printstat(s, migrated_retarget);
- printstat(s, migrated_unload);
- printstat(s, migrated_unload_delay);
- printstat(s, migrated_nopfn_retarget);
- printstat(s, migrated_nopfn_unload);
+ printstat(s, check_context_retarget_intr);
+ printstat(s, check_context_unload);
printstat(s, tlb_dropin);
+ printstat(s, tlb_preload_page);
printstat(s, tlb_dropin_fail_no_asid);
printstat(s, tlb_dropin_fail_upm);
printstat(s, tlb_dropin_fail_invalid);
@@ -88,16 +85,15 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, tlb_dropin_fail_idle);
printstat(s, tlb_dropin_fail_fmm);
printstat(s, tlb_dropin_fail_no_exception);
- printstat(s, tlb_dropin_fail_no_exception_war);
printstat(s, tfh_stale_on_fault);
printstat(s, mmu_invalidate_range);
printstat(s, mmu_invalidate_page);
- printstat(s, mmu_clear_flush_young);
printstat(s, flush_tlb);
printstat(s, flush_tlb_gru);
printstat(s, flush_tlb_gru_tgh);
printstat(s, flush_tlb_gru_zero_asid);
printstat(s, copy_gpa);
+ printstat(s, read_gpa);
printstat(s, mesq_receive);
printstat(s, mesq_receive_none);
printstat(s, mesq_send);
@@ -108,7 +104,6 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, mesq_send_qlimit_reached);
printstat(s, mesq_send_amo_nacked);
printstat(s, mesq_send_put_nacked);
- printstat(s, mesq_qf_not_full);
printstat(s, mesq_qf_locked);
printstat(s, mesq_qf_noop_not_full);
printstat(s, mesq_qf_switch_head_failed);
@@ -118,6 +113,7 @@ static int statistics_show(struct seq_file *s, void *p)
printstat(s, mesq_noop_qlimit_reached);
printstat(s, mesq_noop_amo_nacked);
printstat(s, mesq_noop_put_nacked);
+ printstat(s, mesq_noop_page_overflow);
return 0;
}
@@ -133,8 +129,10 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
int op;
unsigned long total, count, max;
static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt",
- "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
+ "cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
+ "tfh_write_restart", "tgh_invalidate"};
+ seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
@@ -154,6 +152,7 @@ static ssize_t mcs_statistics_write(struct file *file,
static int options_show(struct seq_file *s, void *p)
{
+ seq_printf(s, "#bitmask: 1=trace, 2=statistics\n");
seq_printf(s, "0x%lx\n", gru_options);
return 0;
}
@@ -183,16 +182,17 @@ static int cch_seq_show(struct seq_file *file, void *data)
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
- seq_printf(file, "#%5s%5s%6s%9s%6s%8s%8s\n", "gid", "bid",
- "ctx#", "pid", "cbrs", "dsbytes", "mode");
+ seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
+ "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
if (!ts)
continue;
- seq_printf(file, " %5d%5d%6d%9d%6d%8d%8s\n",
+ seq_printf(file, " %5d%5d%6d%7d%9d%6d%8d%8s\n",
gru->gs_gid, gru->gs_blade_id, i,
- ts->ts_tgid_owner,
+ is_kernel_context(ts) ? 0 : ts->ts_gms->ms_asids[gid].mt_asid,
+ is_kernel_context(ts) ? 0 : ts->ts_tgid_owner,
ts->ts_cbr_au_count * GRU_CBR_AU_SIZE,
ts->ts_cbr_au_count * GRU_DSR_AU_BYTES,
mode[ts->ts_user_options &
@@ -355,7 +355,7 @@ static void delete_proc_files(void)
for (p = proc_files; p->name; p++)
if (p->entry)
remove_proc_entry(p->name, proc_gru);
- remove_proc_entry("gru", NULL);
+ remove_proc_entry("gru", proc_gru->parent);
}
}
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 46990bcfa53..02a77b8b8ee 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -161,7 +161,7 @@ extern unsigned int gru_max_gids;
#define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE)
#define GRU_DRIVER_ID_STR "SGI GRU Device Driver"
-#define GRU_DRIVER_VERSION_STR "0.80"
+#define GRU_DRIVER_VERSION_STR "0.85"
/*
* GRU statistics.
@@ -171,7 +171,8 @@ struct gru_stats_s {
atomic_long_t vdata_free;
atomic_long_t gts_alloc;
atomic_long_t gts_free;
- atomic_long_t vdata_double_alloc;
+ atomic_long_t gms_alloc;
+ atomic_long_t gms_free;
atomic_long_t gts_double_allocate;
atomic_long_t assign_context;
atomic_long_t assign_context_failed;
@@ -184,28 +185,25 @@ struct gru_stats_s {
atomic_long_t steal_kernel_context;
atomic_long_t steal_context_failed;
atomic_long_t nopfn;
- atomic_long_t break_cow;
atomic_long_t asid_new;
atomic_long_t asid_next;
atomic_long_t asid_wrap;
atomic_long_t asid_reuse;
atomic_long_t intr;
+ atomic_long_t intr_cbr;
+ atomic_long_t intr_tfh;
+ atomic_long_t intr_spurious;
atomic_long_t intr_mm_lock_failed;
atomic_long_t call_os;
- atomic_long_t call_os_offnode_reference;
- atomic_long_t call_os_check_for_bug;
atomic_long_t call_os_wait_queue;
atomic_long_t user_flush_tlb;
atomic_long_t user_unload_context;
atomic_long_t user_exception;
atomic_long_t set_context_option;
- atomic_long_t migrate_check;
- atomic_long_t migrated_retarget;
- atomic_long_t migrated_unload;
- atomic_long_t migrated_unload_delay;
- atomic_long_t migrated_nopfn_retarget;
- atomic_long_t migrated_nopfn_unload;
+ atomic_long_t check_context_retarget_intr;
+ atomic_long_t check_context_unload;
atomic_long_t tlb_dropin;
+ atomic_long_t tlb_preload_page;
atomic_long_t tlb_dropin_fail_no_asid;
atomic_long_t tlb_dropin_fail_upm;
atomic_long_t tlb_dropin_fail_invalid;
@@ -213,17 +211,16 @@ struct gru_stats_s {
atomic_long_t tlb_dropin_fail_idle;
atomic_long_t tlb_dropin_fail_fmm;
atomic_long_t tlb_dropin_fail_no_exception;
- atomic_long_t tlb_dropin_fail_no_exception_war;
atomic_long_t tfh_stale_on_fault;
atomic_long_t mmu_invalidate_range;
atomic_long_t mmu_invalidate_page;
- atomic_long_t mmu_clear_flush_young;
atomic_long_t flush_tlb;
atomic_long_t flush_tlb_gru;
atomic_long_t flush_tlb_gru_tgh;
atomic_long_t flush_tlb_gru_zero_asid;
atomic_long_t copy_gpa;
+ atomic_long_t read_gpa;
atomic_long_t mesq_receive;
atomic_long_t mesq_receive_none;
@@ -235,7 +232,7 @@ struct gru_stats_s {
atomic_long_t mesq_send_qlimit_reached;
atomic_long_t mesq_send_amo_nacked;
atomic_long_t mesq_send_put_nacked;
- atomic_long_t mesq_qf_not_full;
+ atomic_long_t mesq_page_overflow;
atomic_long_t mesq_qf_locked;
atomic_long_t mesq_qf_noop_not_full;
atomic_long_t mesq_qf_switch_head_failed;
@@ -245,11 +242,13 @@ struct gru_stats_s {
atomic_long_t mesq_noop_qlimit_reached;
atomic_long_t mesq_noop_amo_nacked;
atomic_long_t mesq_noop_put_nacked;
+ atomic_long_t mesq_noop_page_overflow;
};
enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
- cchop_deallocate, tghop_invalidate, mcsop_last};
+ cchop_deallocate, tfhop_write_only, tfhop_write_restart,
+ tghop_invalidate, mcsop_last};
struct mcs_op_statistic {
atomic_long_t count;
@@ -259,8 +258,8 @@ struct mcs_op_statistic {
extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
-#define OPT_DPRINT 1
-#define OPT_STATS 2
+#define OPT_DPRINT 1
+#define OPT_STATS 2
#define IRQ_GRU 110 /* Starting IRQ number for interrupts */
@@ -283,7 +282,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
#define gru_dbg(dev, fmt, x...) \
do { \
if (gru_options & OPT_DPRINT) \
- dev_dbg(dev, "%s: " fmt, __func__, x); \
+ printk(KERN_DEBUG "GRU:%d %s: " fmt, smp_processor_id(), __func__, x);\
} while (0)
#else
#define gru_dbg(x...)
@@ -297,13 +296,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
#define ASID_INC 8 /* number of regions */
/* Generate a GRU asid value from a GRU base asid & a virtual address. */
-#if defined CONFIG_IA64
#define VADDR_HI_BIT 64
-#elif defined CONFIG_X86_64
-#define VADDR_HI_BIT 48
-#else
-#error "Unsupported architecture"
-#endif
#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
#define GRUASID(asid, addr) ((asid) + GRUREGION(addr))
@@ -345,6 +338,7 @@ struct gru_vma_data {
long vd_user_options;/* misc user option flags */
int vd_cbr_au_count;
int vd_dsr_au_count;
+ unsigned char vd_tlb_preload_count;
};
/*
@@ -360,6 +354,7 @@ struct gru_thread_state {
struct gru_state *ts_gru; /* GRU where the context is
loaded */
struct gru_mm_struct *ts_gms; /* asid & ioproc struct */
+ unsigned char ts_tlb_preload_count; /* TLB preload pages */
unsigned long ts_cbr_map; /* map of allocated CBRs */
unsigned long ts_dsr_map; /* map of allocated DATA
resources */
@@ -368,6 +363,8 @@ struct gru_thread_state {
long ts_user_options;/* misc user option flags */
pid_t ts_tgid_owner; /* task that is using the
context - for migration */
+ short ts_user_blade_id;/* user selected blade */
+ char ts_user_chiplet_id;/* user selected chiplet */
unsigned short ts_sizeavail; /* Pagesizes in use */
int ts_tsid; /* thread that owns the
structure */
@@ -384,13 +381,11 @@ struct gru_thread_state {
char ts_blade; /* If >= 0, migrate context if
ref from diferent blade */
char ts_force_cch_reload;
- char ts_force_unload;/* force context to be unloaded
- after migration */
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
allocated CB */
int ts_data_valid; /* Indicates if ts_gdata has
valid data */
- struct gts_statistics ustats; /* User statistics */
+ struct gru_gseg_statistics ustats; /* User statistics */
unsigned long ts_gdata[0]; /* save area for GRU data (CB,
DS, CBE) */
};
@@ -422,6 +417,7 @@ struct gru_state {
gru segments (64) */
unsigned short gs_gid; /* unique GRU number */
unsigned short gs_blade_id; /* blade of GRU */
+ unsigned char gs_chiplet_id; /* blade chiplet of GRU */
unsigned char gs_tgh_local_shift; /* used to pick TGH for
local flush */
unsigned char gs_tgh_first_remote; /* starting TGH# for
@@ -453,6 +449,7 @@ struct gru_state {
in use */
struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
the context */
+ int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */
};
/*
@@ -619,6 +616,15 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
return !gts->ts_mm;
}
+/*
+ * The following are for Nehelem-EX. A more general scheme is needed for
+ * future processors.
+ */
+#define UV_MAX_INT_CORES 8
+#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
+#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
+#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
+ ((cpu_physical_id(p) >> 1) & 3))
/*-----------------------------------------------------------------------------
* Function prototypes & externs
*/
@@ -633,24 +639,26 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
*vma, int tsid);
extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
*vma, int tsid);
-extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
- int blade);
+extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
extern void gru_load_context(struct gru_thread_state *gts);
-extern void gru_steal_context(struct gru_thread_state *gts, int blade_id);
+extern void gru_steal_context(struct gru_thread_state *gts);
extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
-extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
+extern int gru_update_cch(struct gru_thread_state *gts);
extern void gts_drop(struct gru_thread_state *gts);
extern void gru_tgh_flush_init(struct gru_state *gru);
extern int gru_kservices_init(void);
extern void gru_kservices_exit(void);
+extern irqreturn_t gru0_intr(int irq, void *dev_id);
+extern irqreturn_t gru1_intr(int irq, void *dev_id);
+extern irqreturn_t gru_intr_mblade(int irq, void *dev_id);
extern int gru_dump_chiplet_request(unsigned long arg);
extern long gru_get_gseg_statistics(unsigned long arg);
-extern irqreturn_t gru_intr(int irq, void *dev_id);
extern int gru_handle_user_call_os(unsigned long address);
extern int gru_user_flush_tlb(unsigned long arg);
extern int gru_user_unload_context(unsigned long arg);
extern int gru_get_exception_detail(unsigned long arg);
extern int gru_set_context_option(unsigned long address);
+extern void gru_check_context_placement(struct gru_thread_state *gts);
extern int gru_cpu_fault_map_id(void);
extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
extern void gru_flush_all_tlb(struct gru_state *gru);
@@ -658,7 +666,8 @@ extern int gru_proc_init(void);
extern void gru_proc_exit(void);
extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
- int cbr_au_count, int dsr_au_count, int options, int tsid);
+ int cbr_au_count, int dsr_au_count,
+ unsigned char tlb_preload_count, int options, int tsid);
extern unsigned long gru_reserve_cb_resources(struct gru_state *gru,
int cbr_au_count, char *cbmap);
extern unsigned long gru_reserve_ds_resources(struct gru_state *gru,
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c
index 1d125091f5e..240a6d36166 100644
--- a/drivers/misc/sgi-gru/grutlbpurge.c
+++ b/drivers/misc/sgi-gru/grutlbpurge.c
@@ -184,8 +184,8 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
STAT(flush_tlb_gru_tgh);
asid = GRUASID(asid, start);
gru_dbg(grudev,
- " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n",
- gid, asid, num, asids->mt_ctxbitmap);
+ " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
+ gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
tgh = get_lock_tgh_handle(gru);
tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
num - 1, asids->mt_ctxbitmap);
@@ -299,6 +299,7 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
{
struct gru_mm_struct *gms;
struct mmu_notifier *mn;
+ int err;
mn = mmu_find_ops(current->mm, &gru_mmuops);
if (mn) {
@@ -307,16 +308,22 @@ struct gru_mm_struct *gru_register_mmu_notifier(void)
} else {
gms = kzalloc(sizeof(*gms), GFP_KERNEL);
if (gms) {
+ STAT(gms_alloc);
spin_lock_init(&gms->ms_asid_lock);
gms->ms_notifier.ops = &gru_mmuops;
atomic_set(&gms->ms_refcnt, 1);
init_waitqueue_head(&gms->ms_wait_queue);
- __mmu_notifier_register(&gms->ms_notifier, current->mm);
+ err = __mmu_notifier_register(&gms->ms_notifier, current->mm);
+ if (err)
+ goto error;
}
}
gru_dbg(grudev, "gms %p, refcnt %d\n", gms,
atomic_read(&gms->ms_refcnt));
return gms;
+error:
+ kfree(gms);
+ return ERR_PTR(err);
}
void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
@@ -327,6 +334,7 @@ void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
if (!gms->ms_released)
mmu_notifier_unregister(&gms->ms_notifier, current->mm);
kfree(gms);
+ STAT(gms_free);
}
}
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 2275126cb33..851b2f25ce0 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -339,6 +339,7 @@ extern short xp_partition_id;
extern u8 xp_region_size;
extern unsigned long (*xp_pa) (void *);
+extern unsigned long (*xp_socket_pa) (unsigned long);
extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
size_t);
extern int (*xp_cpu_to_nasid) (int);
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 7896849b16d..01be66d02ca 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -44,6 +44,9 @@ EXPORT_SYMBOL_GPL(xp_region_size);
unsigned long (*xp_pa) (void *addr);
EXPORT_SYMBOL_GPL(xp_pa);
+unsigned long (*xp_socket_pa) (unsigned long gpa);
+EXPORT_SYMBOL_GPL(xp_socket_pa);
+
enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
const unsigned long src_gpa, size_t len);
EXPORT_SYMBOL_GPL(xp_remote_memcpy);
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index fb3ec9d735a..d8e463f8724 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -84,6 +84,15 @@ xp_pa_sn2(void *addr)
}
/*
+ * Convert a global physical to a socket physical address.
+ */
+static unsigned long
+xp_socket_pa_sn2(unsigned long gpa)
+{
+ return gpa;
+}
+
+/*
* Wrapper for bte_copy().
*
* dst_pa - physical address of the destination of the transfer.
@@ -162,6 +171,7 @@ xp_init_sn2(void)
xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
+ xp_socket_pa = xp_socket_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
xp_expand_memprotect = xp_expand_memprotect_sn2;
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c
index d238576b26f..a0d093274dc 100644
--- a/drivers/misc/sgi-xp/xp_uv.c
+++ b/drivers/misc/sgi-xp/xp_uv.c
@@ -32,12 +32,44 @@ xp_pa_uv(void *addr)
return uv_gpa(addr);
}
+/*
+ * Convert a global physical to socket physical address.
+ */
+static unsigned long
+xp_socket_pa_uv(unsigned long gpa)
+{
+ return uv_gpa_to_soc_phys_ram(gpa);
+}
+
+static enum xp_retval
+xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa,
+ size_t len)
+{
+ int ret;
+ unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa));
+
+ BUG_ON(!uv_gpa_in_mmr_space(src_gpa));
+ BUG_ON(len != 8);
+
+ ret = gru_read_gpa(dst_va, src_gpa);
+ if (ret == 0)
+ return xpSuccess;
+
+ dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx "
+ "len=%ld\n", dst_gpa, src_gpa, len);
+ return xpGruCopyError;
+}
+
+
static enum xp_retval
xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa,
size_t len)
{
int ret;
+ if (uv_gpa_in_mmr_space(src_gpa))
+ return xp_remote_mmr_read(dst_gpa, src_gpa, len);
+
ret = gru_copy_gpa(dst_gpa, src_gpa, len);
if (ret == 0)
return xpSuccess;
@@ -123,6 +155,7 @@ xp_init_uv(void)
xp_region_size = sn_region_size;
xp_pa = xp_pa_uv;
+ xp_socket_pa = xp_socket_pa_uv;
xp_remote_memcpy = xp_remote_memcpy_uv;
xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
xp_expand_memprotect = xp_expand_memprotect_uv;
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 65877bc5eda..9a6268c89fd 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -18,6 +18,7 @@
#include <linux/device.h>
#include <linux/hardirq.h>
#include "xpc.h"
+#include <asm/uv/uv_hub.h>
/* XPC is exiting flag */
int xpc_exiting;
@@ -92,8 +93,12 @@ xpc_get_rsvd_page_pa(int nasid)
break;
/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
- if (L1_CACHE_ALIGN(len) > buf_len) {
- kfree(buf_base);
+ if (is_shub())
+ len = L1_CACHE_ALIGN(len);
+
+ if (len > buf_len) {
+ if (buf_base != NULL)
+ kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
@@ -105,7 +110,7 @@ xpc_get_rsvd_page_pa(int nasid)
}
}
- ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len);
+ ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
if (ret != xpSuccess) {
dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
break;
@@ -143,7 +148,7 @@ xpc_setup_rsvd_page(void)
dev_err(xpc_part, "SAL failed to locate the reserved page\n");
return -ESRCH;
}
- rp = (struct xpc_rsvd_page *)__va(rp_pa);
+ rp = (struct xpc_rsvd_page *)__va(xp_socket_pa(rp_pa));
if (rp->SAL_version < 3) {
/* SAL_versions < 3 had a SAL_partid defined as a u8 */
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b5bbe59f9c5..8725d5e8ab0 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -157,22 +157,24 @@ xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
-#if defined CONFIG_X86_64
- ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, uv_gpa(mq->address),
- mq->order, &mq->mmr_offset);
- if (ret < 0) {
- dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
- "ret=%d\n", ret);
- return ret;
- }
-#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
- ret = sn_mq_watchlist_alloc(mq->mmr_blade, (void *)uv_gpa(mq->address),
+#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
+
+ ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
ret);
return -EBUSY;
}
+#elif defined CONFIG_X86_64
+ ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
+ mq->order, &mq->mmr_offset);
+ if (ret < 0) {
+ dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
+ "ret=%d\n", ret);
+ return ret;
+ }
#else
#error not a supported configuration
#endif
@@ -185,12 +187,13 @@ static void
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
+ int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
- ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
- ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
+ ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
#error not a supported configuration
@@ -204,6 +207,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
enum xp_retval xp_ret;
int ret;
int nid;
+ int nasid;
int pg_order;
struct page *page;
struct xpc_gru_mq_uv *mq;
@@ -259,9 +263,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
goto out_5;
}
+ nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
+
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
- nid, mmr_value->vector, mmr_value->dest);
+ nasid, mmr_value->vector, mmr_value->dest);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
@@ -946,11 +952,13 @@ xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
head->first = first->next;
if (head->first == NULL)
head->last = NULL;
+
+ head->n_entries--;
+ BUG_ON(head->n_entries < 0);
+
+ first->next = NULL;
}
- head->n_entries--;
- BUG_ON(head->n_entries < 0);
spin_unlock_irqrestore(&head->lock, irq_flags);
- first->next = NULL;
return first;
}
@@ -1019,7 +1027,8 @@ xpc_make_first_contact_uv(struct xpc_partition *part)
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
- while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) {
+ while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
+ (part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
dev_dbg(xpc_part, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
@@ -1422,7 +1431,6 @@ xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
msg_slot = ch_uv->recv_msg_slots +
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
- BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number);
BUG_ON(msg_slot->hdr.size != 0);
memcpy(msg_slot, msg, msg->hdr.size);
@@ -1646,8 +1654,6 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
sizeof(struct xpc_notify_mq_msghdr_uv));
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
-
- msg->hdr.msg_slot_number += ch->remote_nentries;
}
static struct xpc_arch_operations xpc_arch_ops_uv = {
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
new file mode 100644
index 00000000000..d3f229a3a77
--- /dev/null
+++ b/drivers/misc/ti_dac7512.c
@@ -0,0 +1,101 @@
+/*
+ * dac7512.c - Linux kernel module for
+ * Texas Instruments DAC7512
+ *
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+
+#define DAC7512_DRV_NAME "dac7512"
+#define DRIVER_VERSION "1.0"
+
+static ssize_t dac7512_store_val(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char tmp[2];
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ tmp[0] = val >> 8;
+ tmp[1] = val & 0xff;
+ spi_write(spi, tmp, sizeof(tmp));
+ return count;
+}
+
+static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val);
+
+static struct attribute *dac7512_attributes[] = {
+ &dev_attr_value.attr,
+ NULL
+};
+
+static const struct attribute_group dac7512_attr_group = {
+ .attrs = dac7512_attributes,
+};
+
+static int __devinit dac7512_probe(struct spi_device *spi)
+{
+ int ret;
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group);
+}
+
+static int __devexit dac7512_remove(struct spi_device *spi)
+{
+ sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group);
+ return 0;
+}
+
+static struct spi_driver dac7512_driver = {
+ .driver = {
+ .name = DAC7512_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dac7512_probe,
+ .remove = __devexit_p(dac7512_remove),
+};
+
+static int __init dac7512_init(void)
+{
+ return spi_register_driver(&dac7512_driver);
+}
+
+static void __exit dac7512_exit(void)
+{
+ spi_unregister_driver(&dac7512_driver);
+}
+
+MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
+MODULE_DESCRIPTION("DAC7512 16-bit DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
+
+module_init(dac7512_init);
+module_exit(dac7512_exit);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index b8e7c5ae981..f53755533e7 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/serial_reg.h>
@@ -73,11 +74,10 @@ struct uart_icount {
};
struct sdio_uart_port {
+ struct tty_port port;
struct kref kref;
struct tty_struct *tty;
unsigned int index;
- unsigned int opened;
- struct mutex open_lock;
struct sdio_func *func;
struct mutex func_lock;
struct task_struct *in_sdio_uart_irq;
@@ -87,6 +87,7 @@ struct sdio_uart_port {
struct uart_icount icount;
unsigned int uartclk;
unsigned int mctrl;
+ unsigned int rx_mctrl;
unsigned int read_status_mask;
unsigned int ignore_status_mask;
unsigned char x_char;
@@ -102,7 +103,6 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)
int index, ret = -EBUSY;
kref_init(&port->kref);
- mutex_init(&port->open_lock);
mutex_init(&port->func_lock);
spin_lock_init(&port->write_lock);
@@ -151,6 +151,7 @@ static void sdio_uart_port_put(struct sdio_uart_port *port)
static void sdio_uart_port_remove(struct sdio_uart_port *port)
{
struct sdio_func *func;
+ struct tty_struct *tty;
BUG_ON(sdio_uart_table[port->index] != port);
@@ -165,15 +166,19 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port)
* give up on that port ASAP.
* Beware: the lock ordering is critical.
*/
- mutex_lock(&port->open_lock);
+ mutex_lock(&port->port.mutex);
mutex_lock(&port->func_lock);
func = port->func;
sdio_claim_host(func);
port->func = NULL;
mutex_unlock(&port->func_lock);
- if (port->opened)
- tty_hangup(port->tty);
- mutex_unlock(&port->open_lock);
+ tty = tty_port_tty_get(&port->port);
+ /* tty_hangup is async so is this safe as is ?? */
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ mutex_unlock(&port->port.mutex);
sdio_release_irq(func);
sdio_disable_func(func);
sdio_release_host(func);
@@ -217,6 +222,8 @@ static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
unsigned char status;
unsigned int ret;
+ /* FIXME: What stops this losing the delta bits and breaking
+ sdio_uart_check_modem_status ? */
status = sdio_in(port, UART_MSR);
ret = 0;
@@ -391,7 +398,7 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
static void sdio_uart_receive_chars(struct sdio_uart_port *port,
unsigned int *status)
{
- struct tty_struct *tty = port->tty;
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
unsigned int ch, flag;
int max_count = 256;
@@ -428,24 +435,30 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
}
if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ if (tty)
+ tty_insert_flip_char(tty, ch, flag);
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (*status & ~port->ignore_status_mask & UART_LSR_OE)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ if (tty)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
*status = sdio_in(port, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- tty_flip_buffer_push(tty);
+ if (tty) {
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
}
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
{
struct circ_buf *xmit = &port->xmit;
int count;
+ struct tty_struct *tty;
if (port->x_char) {
sdio_out(port, UART_TX, port->x_char);
@@ -453,8 +466,13 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
port->x_char = 0;
return;
}
- if (circ_empty(xmit) || port->tty->stopped || port->tty->hw_stopped) {
+
+ tty = tty_port_tty_get(&port->port);
+
+ if (tty == NULL || circ_empty(xmit) ||
+ tty->stopped || tty->hw_stopped) {
sdio_uart_stop_tx(port);
+ tty_kref_put(tty);
return;
}
@@ -468,15 +486,17 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
} while (--count > 0);
if (circ_chars_pending(xmit) < WAKEUP_CHARS)
- tty_wakeup(port->tty);
+ tty_wakeup(tty);
if (circ_empty(xmit))
sdio_uart_stop_tx(port);
+ tty_kref_put(tty);
}
static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
{
int status;
+ struct tty_struct *tty;
status = sdio_in(port, UART_MSR);
@@ -487,25 +507,39 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
port->icount.rng++;
if (status & UART_MSR_DDSR)
port->icount.dsr++;
- if (status & UART_MSR_DDCD)
+ if (status & UART_MSR_DDCD) {
port->icount.dcd++;
+ /* DCD raise - wake for open */
+ if (status & UART_MSR_DCD)
+ wake_up_interruptible(&port->port.open_wait);
+ else {
+ /* DCD drop - hang up if tty attached */
+ tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ }
+ }
if (status & UART_MSR_DCTS) {
port->icount.cts++;
- if (port->tty->termios->c_cflag & CRTSCTS) {
+ tty = tty_port_tty_get(&port->port);
+ if (tty && (tty->termios->c_cflag & CRTSCTS)) {
int cts = (status & UART_MSR_CTS);
- if (port->tty->hw_stopped) {
+ if (tty->hw_stopped) {
if (cts) {
- port->tty->hw_stopped = 0;
+ tty->hw_stopped = 0;
sdio_uart_start_tx(port);
- tty_wakeup(port->tty);
+ tty_wakeup(tty);
}
} else {
if (!cts) {
- port->tty->hw_stopped = 1;
+ tty->hw_stopped = 1;
sdio_uart_stop_tx(port);
}
}
}
+ tty_kref_put(tty);
}
}
@@ -542,8 +576,62 @@ static void sdio_uart_irq(struct sdio_func *func)
port->in_sdio_uart_irq = NULL;
}
-static int sdio_uart_startup(struct sdio_uart_port *port)
+static int uart_carrier_raised(struct tty_port *tport)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ unsigned int ret = sdio_uart_claim_func(port);
+ if (ret) /* Missing hardware shoudn't block for carrier */
+ return 1;
+ ret = sdio_uart_get_mctrl(port);
+ sdio_uart_release_func(port);
+ if (ret & TIOCM_CAR)
+ return 1;
+ return 0;
+}
+
+/**
+ * uart_dtr_rts - port helper to set uart signals
+ * @tport: tty port to be updated
+ * @onoff: set to turn on DTR/RTS
+ *
+ * Called by the tty port helpers when the modem signals need to be
+ * adjusted during an open, close and hangup.
+ */
+
+static void uart_dtr_rts(struct tty_port *tport, int onoff)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ int ret = sdio_uart_claim_func(port);
+ if (ret)
+ return;
+ if (onoff == 0)
+ sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ else
+ sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ sdio_uart_release_func(port);
+}
+
+/**
+ * sdio_uart_activate - start up hardware
+ * @tport: tty port to activate
+ * @tty: tty bound to this port
+ *
+ * Activate a tty port. The port locking guarantees us this will be
+ * run exactly once per set of opens, and if successful will see the
+ * shutdown method run exactly once to match. Start up and shutdown are
+ * protected from each other by the internal locking and will not run
+ * at the same time even during a hangup event.
+ *
+ * If we successfully start up the port we take an extra kref as we
+ * will keep it around until shutdown when the kref is dropped.
+ */
+
+static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
unsigned long page;
int ret;
@@ -551,7 +639,7 @@ static int sdio_uart_startup(struct sdio_uart_port *port)
* Set the TTY IO error marker - we will only clear this
* once we have successfully opened the port.
*/
- set_bit(TTY_IO_ERROR, &port->tty->flags);
+ set_bit(TTY_IO_ERROR, &tty->flags);
/* Initialise and allocate the transmit buffer. */
page = __get_free_page(GFP_KERNEL);
@@ -592,19 +680,19 @@ static int sdio_uart_startup(struct sdio_uart_port *port)
*/
sdio_out(port, UART_LCR, UART_LCR_WLEN8);
- port->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE;
+ port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;
port->mctrl = TIOCM_OUT2;
- sdio_uart_change_speed(port, port->tty->termios, NULL);
+ sdio_uart_change_speed(port, tty->termios, NULL);
- if (port->tty->termios->c_cflag & CBAUD)
+ if (tty->termios->c_cflag & CBAUD)
sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
- if (port->tty->termios->c_cflag & CRTSCTS)
+ if (tty->termios->c_cflag & CRTSCTS)
if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
- port->tty->hw_stopped = 1;
+ tty->hw_stopped = 1;
- clear_bit(TTY_IO_ERROR, &port->tty->flags);
+ clear_bit(TTY_IO_ERROR, &tty->flags);
/* Kick the IRQ handler once while we're still holding the host lock */
sdio_uart_irq(port->func);
@@ -621,8 +709,20 @@ err1:
return ret;
}
-static void sdio_uart_shutdown(struct sdio_uart_port *port)
+/**
+ * sdio_uart_shutdown - stop hardware
+ * @tport: tty port to shut down
+ *
+ * Deactivate a tty port. The port locking guarantees us this will be
+ * run only if a successful matching activate already ran. The two are
+ * protected from each other by the internal locking and will not run
+ * at the same time even during a hangup event.
+ */
+
+static void sdio_uart_shutdown(struct tty_port *tport)
{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
int ret;
ret = sdio_uart_claim_func(port);
@@ -631,12 +731,6 @@ static void sdio_uart_shutdown(struct sdio_uart_port *port)
sdio_uart_stop_rx(port);
- /* TODO: wait here for TX FIFO to drain */
-
- /* Turn off DTR and RTS early. */
- if (port->tty->termios->c_cflag & HUPCL)
- sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
-
/* Disable interrupts from this port */
sdio_release_irq(port->func);
port->ier = 0;
@@ -661,77 +755,70 @@ skip:
free_page((unsigned long)port->xmit.buf);
}
-static int sdio_uart_open(struct tty_struct *tty, struct file *filp)
+/**
+ * sdio_uart_install - install method
+ * @driver: the driver in use (sdio_uart in our case)
+ * @tty: the tty being bound
+ *
+ * Look up and bind the tty and the driver together. Initialize
+ * any needed private data (in our case the termios)
+ */
+
+static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)
{
- struct sdio_uart_port *port;
- int ret;
+ int idx = tty->index;
+ struct sdio_uart_port *port = sdio_uart_port_get(idx);
+ int ret = tty_init_termios(tty);
+
+ if (ret == 0) {
+ tty_driver_kref_get(driver);
+ tty->count++;
+ /* This is the ref sdio_uart_port get provided */
+ tty->driver_data = port;
+ driver->ttys[idx] = tty;
+ } else
+ sdio_uart_port_put(port);
+ return ret;
+}
- port = sdio_uart_port_get(tty->index);
- if (!port)
- return -ENODEV;
+/**
+ * sdio_uart_cleanup - called on the last tty kref drop
+ * @tty: the tty being destroyed
+ *
+ * Called asynchronously when the last reference to the tty is dropped.
+ * We cannot destroy the tty->driver_data port kref until this point
+ */
- mutex_lock(&port->open_lock);
+static void sdio_uart_cleanup(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ tty->driver_data = NULL; /* Bug trap */
+ sdio_uart_port_put(port);
+}
- /*
- * Make sure not to mess up with a dead port
- * which has not been closed yet.
- */
- if (tty->driver_data && tty->driver_data != port) {
- mutex_unlock(&port->open_lock);
- sdio_uart_port_put(port);
- return -EBUSY;
- }
+/*
+ * Open/close/hangup is now entirely boilerplate
+ */
- if (!port->opened) {
- tty->driver_data = port;
- port->tty = tty;
- ret = sdio_uart_startup(port);
- if (ret) {
- tty->driver_data = NULL;
- port->tty = NULL;
- mutex_unlock(&port->open_lock);
- sdio_uart_port_put(port);
- return ret;
- }
- }
- port->opened++;
- mutex_unlock(&port->open_lock);
- return 0;
+static int sdio_uart_open(struct tty_struct *tty, struct file *filp)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ return tty_port_open(&port->port, tty, filp);
}
static void sdio_uart_close(struct tty_struct *tty, struct file * filp)
{
struct sdio_uart_port *port = tty->driver_data;
+ tty_port_close(&port->port, tty, filp);
+}
- if (!port)
- return;
-
- mutex_lock(&port->open_lock);
- BUG_ON(!port->opened);
-
- /*
- * This is messy. The tty layer calls us even when open()
- * returned an error. Ignore this close request if tty->count
- * is larger than port->count.
- */
- if (tty->count > port->opened) {
- mutex_unlock(&port->open_lock);
- return;
- }
-
- if (--port->opened == 0) {
- tty->closing = 1;
- sdio_uart_shutdown(port);
- tty_ldisc_flush(tty);
- port->tty = NULL;
- tty->driver_data = NULL;
- tty->closing = 0;
- }
- mutex_unlock(&port->open_lock);
- sdio_uart_port_put(port);
+static void sdio_uart_hangup(struct tty_struct *tty)
+{
+ struct sdio_uart_port *port = tty->driver_data;
+ tty_port_hangup(&port->port);
}
-static int sdio_uart_write(struct tty_struct * tty, const unsigned char *buf,
+static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
int count)
{
struct sdio_uart_port *port = tty->driver_data;
@@ -756,7 +843,7 @@ static int sdio_uart_write(struct tty_struct * tty, const unsigned char *buf,
}
spin_unlock(&port->write_lock);
- if ( !(port->ier & UART_IER_THRI)) {
+ if (!(port->ier & UART_IER_THRI)) {
int err = sdio_uart_claim_func(port);
if (!err) {
sdio_uart_start_tx(port);
@@ -843,17 +930,12 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
sdio_uart_release_func(port);
}
-static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void sdio_uart_set_termios(struct tty_struct *tty,
+ struct ktermios *old_termios)
{
struct sdio_uart_port *port = tty->driver_data;
unsigned int cflag = tty->termios->c_cflag;
-#define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
- if ((cflag ^ old_termios->c_cflag) == 0 &&
- RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0)
- return;
-
if (sdio_uart_claim_func(port) != 0)
return;
@@ -928,7 +1010,7 @@ static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file,
int result;
result = sdio_uart_claim_func(port);
- if(!result) {
+ if (!result) {
sdio_uart_update_mctrl(port, set, clear);
sdio_uart_release_func(port);
}
@@ -946,7 +1028,7 @@ static int sdio_uart_proc_show(struct seq_file *m, void *v)
struct sdio_uart_port *port = sdio_uart_port_get(i);
if (port) {
seq_printf(m, "%d: uart:SDIO", i);
- if(capable(CAP_SYS_ADMIN)) {
+ if (capable(CAP_SYS_ADMIN)) {
seq_printf(m, " tx:%d rx:%d",
port->icount.tx, port->icount.rx);
if (port->icount.frame)
@@ -994,6 +1076,13 @@ static const struct file_operations sdio_uart_proc_fops = {
.release = single_release,
};
+static const struct tty_port_operations sdio_uart_port_ops = {
+ .dtr_rts = uart_dtr_rts,
+ .carrier_raised = uart_carrier_raised,
+ .shutdown = sdio_uart_shutdown,
+ .activate = sdio_uart_activate,
+};
+
static const struct tty_operations sdio_uart_ops = {
.open = sdio_uart_open,
.close = sdio_uart_close,
@@ -1004,9 +1093,12 @@ static const struct tty_operations sdio_uart_ops = {
.throttle = sdio_uart_throttle,
.unthrottle = sdio_uart_unthrottle,
.set_termios = sdio_uart_set_termios,
+ .hangup = sdio_uart_hangup,
.break_ctl = sdio_uart_break_ctl,
.tiocmget = sdio_uart_tiocmget,
.tiocmset = sdio_uart_tiocmset,
+ .install = sdio_uart_install,
+ .cleanup = sdio_uart_cleanup,
.proc_fops = &sdio_uart_proc_fops,
};
@@ -1043,7 +1135,7 @@ static int sdio_uart_probe(struct sdio_func *func,
}
if (!tpl) {
printk(KERN_WARNING
- "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
+ "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
sdio_func_id(func));
kfree(port);
return -EINVAL;
@@ -1068,13 +1160,16 @@ static int sdio_uart_probe(struct sdio_func *func,
port->func = func;
sdio_set_drvdata(func, port);
+ tty_port_init(&port->port);
+ port->port.ops = &sdio_uart_port_ops;
ret = sdio_uart_add_port(port);
if (ret) {
kfree(port);
} else {
struct device *dev;
- dev = tty_register_device(sdio_uart_tty_driver, port->index, &func->dev);
+ dev = tty_register_device(sdio_uart_tty_driver,
+ port->index, &func->dev);
if (IS_ERR(dev)) {
sdio_uart_port_remove(port);
ret = PTR_ERR(dev);
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ab37a6d9d32..bb22ffd76ef 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -3,7 +3,7 @@
#
config MMC_UNSAFE_RESUME
- bool "Allow unsafe resume (DANGEROUS)"
+ bool "Assume MMC/SD cards are non-removable (DANGEROUS)"
help
If you say Y here, the MMC layer will assume that all cards
stayed in their respective slots during the suspend. The
@@ -14,3 +14,5 @@ config MMC_UNSAFE_RESUME
This option is usually just for embedded systems which use
a MMC/SD card for rootfs. Most people should say N here.
+ This option sets a default which can be overridden by the
+ module parameter "removable=0" or "removable=1".
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7dab2e5f4bc..30acd526582 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -48,6 +48,22 @@ int use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
/*
+ * We normally treat cards as removed during suspend if they are not
+ * known to be on a non-removable bus, to avoid the risk of writing
+ * back data to a different card after resume. Allow this to be
+ * overridden if necessary.
+ */
+#ifdef CONFIG_MMC_UNSAFE_RESUME
+int mmc_assume_removable;
+#else
+int mmc_assume_removable = 1;
+#endif
+module_param_named(removable, mmc_assume_removable, bool, 0644);
+MODULE_PARM_DESC(
+ removable,
+ "MMC/SD cards are removable and may be removed during suspend");
+
+/*
* Internal function. Schedule delayed work in the MMC work queue.
*/
static int mmc_schedule_delayed_work(struct delayed_work *work,
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 67ae6abc423..a811c52a165 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -54,7 +54,9 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
int mmc_attach_sd(struct mmc_host *host, u32 ocr);
int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
+/* Module parameters */
extern int use_spi_crc;
+extern int mmc_assume_removable;
/* Debugfs information for hosts and cards */
void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bfefce365ae..c11189446a1 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -602,25 +602,6 @@ static int mmc_awake(struct mmc_host *host)
return err;
}
-#ifdef CONFIG_MMC_UNSAFE_RESUME
-
-static const struct mmc_bus_ops mmc_ops = {
- .awake = mmc_awake,
- .sleep = mmc_sleep,
- .remove = mmc_remove,
- .detect = mmc_detect,
- .suspend = mmc_suspend,
- .resume = mmc_resume,
- .power_restore = mmc_power_restore,
-};
-
-static void mmc_attach_bus_ops(struct mmc_host *host)
-{
- mmc_attach_bus(host, &mmc_ops);
-}
-
-#else
-
static const struct mmc_bus_ops mmc_ops = {
.awake = mmc_awake,
.sleep = mmc_sleep,
@@ -645,15 +626,13 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
- if (host->caps & MMC_CAP_NONREMOVABLE)
+ if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
bus_ops = &mmc_ops_unsafe;
else
bus_ops = &mmc_ops;
mmc_attach_bus(host, bus_ops);
}
-#endif
-
/*
* Starting point for MMC card init.
*/
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 10b2a4d20f5..fdd414eded0 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -606,23 +606,6 @@ static void mmc_sd_power_restore(struct mmc_host *host)
mmc_release_host(host);
}
-#ifdef CONFIG_MMC_UNSAFE_RESUME
-
-static const struct mmc_bus_ops mmc_sd_ops = {
- .remove = mmc_sd_remove,
- .detect = mmc_sd_detect,
- .suspend = mmc_sd_suspend,
- .resume = mmc_sd_resume,
- .power_restore = mmc_sd_power_restore,
-};
-
-static void mmc_sd_attach_bus_ops(struct mmc_host *host)
-{
- mmc_attach_bus(host, &mmc_sd_ops);
-}
-
-#else
-
static const struct mmc_bus_ops mmc_sd_ops = {
.remove = mmc_sd_remove,
.detect = mmc_sd_detect,
@@ -643,15 +626,13 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
{
const struct mmc_bus_ops *bus_ops;
- if (host->caps & MMC_CAP_NONREMOVABLE)
+ if (host->caps & MMC_CAP_NONREMOVABLE || !mmc_assume_removable)
bus_ops = &mmc_sd_ops_unsafe;
else
bus_ops = &mmc_sd_ops;
mmc_attach_bus(host, bus_ops);
}
-#endif
-
/*
* Starting point for SD card init.
*/
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index cdb845b68ab..06b64085a35 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -516,7 +516,8 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
* The number of functions on the card is encoded inside
* the ocr.
*/
- card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28;
+ funcs = (ocr & 0x70000000) >> 28;
+ card->sdio_funcs = 0;
/*
* If needed, disconnect card detection pull-up resistor.
@@ -528,7 +529,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
/*
* Initialize (but don't add) all present functions.
*/
- for (i = 0;i < funcs;i++) {
+ for (i = 0; i < funcs; i++, card->sdio_funcs++) {
err = sdio_init_func(host->card, i + 1);
if (err)
goto remove;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d37464e296a..9e060c87e64 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -248,12 +248,15 @@ int sdio_add_func(struct sdio_func *func)
/*
* Unregister a SDIO function with the driver model, and
* (eventually) free it.
+ * This function can be called through error paths where sdio_add_func() was
+ * never executed (because a failure occurred at an earlier point).
*/
void sdio_remove_func(struct sdio_func *func)
{
- if (sdio_func_present(func))
- device_del(&func->dev);
+ if (!sdio_func_present(func))
+ return;
+ device_del(&func->dev);
put_device(&func->dev);
}
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index f85dcd53650..9538389783c 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -97,26 +97,56 @@ static const unsigned char speed_val[16] =
static const unsigned int speed_unit[8] =
{ 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
-/* FUNCE tuples with these types get passed to SDIO drivers */
-static const unsigned char funce_type_whitelist[] = {
- 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */
+
+typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
+ const unsigned char *, unsigned);
+
+struct cis_tpl {
+ unsigned char code;
+ unsigned char min_size;
+ tpl_parse_t *parse;
};
-static int cistpl_funce_whitelisted(unsigned char type)
+static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,
+ const char *tpl_descr,
+ const struct cis_tpl *tpl, int tpl_count,
+ unsigned char code,
+ const unsigned char *buf, unsigned size)
{
- int i;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) {
- if (funce_type_whitelist[i] == type)
- return 1;
+ /* look for a matching code in the table */
+ for (i = 0; i < tpl_count; i++, tpl++) {
+ if (tpl->code == code)
+ break;
}
- return 0;
+ if (i < tpl_count) {
+ if (size >= tpl->min_size) {
+ if (tpl->parse)
+ ret = tpl->parse(card, func, buf, size);
+ else
+ ret = -EILSEQ; /* known tuple, not parsed */
+ } else {
+ /* invalid tuple */
+ ret = -EINVAL;
+ }
+ if (ret && ret != -EILSEQ && ret != -ENOENT) {
+ printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n",
+ mmc_hostname(card->host), tpl_descr, code, size);
+ }
+ } else {
+ /* unknown tuple */
+ ret = -ENOENT;
+ }
+
+ return ret;
}
-static int cistpl_funce_common(struct mmc_card *card,
+static int cistpl_funce_common(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
- if (size < 0x04 || buf[0] != 0)
+ /* Only valid for the common CIS (function 0) */
+ if (func)
return -EINVAL;
/* TPLFE_FN0_BLK_SIZE */
@@ -129,20 +159,24 @@ static int cistpl_funce_common(struct mmc_card *card,
return 0;
}
-static int cistpl_funce_func(struct sdio_func *func,
+static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
unsigned vsn;
unsigned min_size;
- /* let SDIO drivers take care of whitelisted FUNCE tuples */
- if (cistpl_funce_whitelisted(buf[0]))
- return -EILSEQ;
+ /* Only valid for the individual function's CIS (1-7) */
+ if (!func)
+ return -EINVAL;
+ /*
+ * This tuple has a different length depending on the SDIO spec
+ * version.
+ */
vsn = func->card->cccr.sdio_vsn;
min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
- if (size < min_size || buf[0] != 1)
+ if (size < min_size)
return -EINVAL;
/* TPLFE_MAX_BLK_SIZE */
@@ -157,39 +191,32 @@ static int cistpl_funce_func(struct sdio_func *func,
return 0;
}
+/*
+ * Known TPLFE_TYPEs table for CISTPL_FUNCE tuples.
+ *
+ * Note that, unlike PCMCIA, CISTPL_FUNCE tuples are not parsed depending
+ * on the TPLFID_FUNCTION value of the previous CISTPL_FUNCID as on SDIO
+ * TPLFID_FUNCTION is always hardcoded to 0x0C.
+ */
+static const struct cis_tpl cis_tpl_funce_list[] = {
+ { 0x00, 4, cistpl_funce_common },
+ { 0x01, 0, cistpl_funce_func },
+ { 0x04, 1+1+6, /* CISTPL_FUNCE_LAN_NODE_ID */ },
+};
+
static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size)
{
- int ret;
-
- /*
- * There should be two versions of the CISTPL_FUNCE tuple,
- * one for the common CIS (function 0) and a version used by
- * the individual function's CIS (1-7). Yet, the later has a
- * different length depending on the SDIO spec version.
- */
- if (func)
- ret = cistpl_funce_func(func, buf, size);
- else
- ret = cistpl_funce_common(card, buf, size);
-
- if (ret && ret != -EILSEQ) {
- printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
- "type %u\n", mmc_hostname(card->host), size, buf[0]);
- }
+ if (size < 1)
+ return -EINVAL;
- return ret;
+ return cis_tpl_parse(card, func, "CISTPL_FUNCE",
+ cis_tpl_funce_list,
+ ARRAY_SIZE(cis_tpl_funce_list),
+ buf[0], buf, size);
}
-typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
- const unsigned char *, unsigned);
-
-struct cis_tpl {
- unsigned char code;
- unsigned char min_size;
- tpl_parse_t *parse;
-};
-
+/* Known TPL_CODEs table for CIS tuples */
static const struct cis_tpl cis_tpl_list[] = {
{ 0x15, 3, cistpl_vers_1 },
{ 0x20, 4, cistpl_manfid },
@@ -268,46 +295,38 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
break;
}
- for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++)
- if (cis_tpl_list[i].code == tpl_code)
- break;
- if (i < ARRAY_SIZE(cis_tpl_list)) {
- const struct cis_tpl *tpl = cis_tpl_list + i;
- if (tpl_link < tpl->min_size) {
- printk(KERN_ERR
- "%s: bad CIS tuple 0x%02x"
- " (length = %u, expected >= %u)\n",
- mmc_hostname(card->host),
- tpl_code, tpl_link, tpl->min_size);
- ret = -EINVAL;
- } else if (tpl->parse) {
- ret = tpl->parse(card, func,
- this->data, tpl_link);
- }
+ /* Try to parse the CIS tuple */
+ ret = cis_tpl_parse(card, func, "CIS",
+ cis_tpl_list, ARRAY_SIZE(cis_tpl_list),
+ tpl_code, this->data, tpl_link);
+ if (ret == -EILSEQ || ret == -ENOENT) {
/*
- * We don't need the tuple anymore if it was
- * successfully parsed by the SDIO core or if it is
- * not going to be parsed by SDIO drivers.
+ * The tuple is unknown or known but not parsed.
+ * Queue the tuple for the function driver.
*/
- if (!ret || ret != -EILSEQ)
- kfree(this);
- } else {
- /* unknown tuple */
- ret = -EILSEQ;
- }
-
- if (ret == -EILSEQ) {
- /* this tuple is unknown to the core or whitelisted */
this->next = NULL;
this->code = tpl_code;
this->size = tpl_link;
*prev = this;
prev = &this->next;
- printk(KERN_DEBUG
- "%s: queuing CIS tuple 0x%02x length %u\n",
- mmc_hostname(card->host), tpl_code, tpl_link);
+
+ if (ret == -ENOENT) {
+ /* warn about unknown tuples */
+ printk(KERN_WARNING "%s: queuing unknown"
+ " CIS tuple 0x%02x (%u bytes)\n",
+ mmc_hostname(card->host),
+ tpl_code, tpl_link);
+ }
+
/* keep on analyzing tuples */
ret = 0;
+ } else {
+ /*
+ * We don't need the tuple anymore if it was
+ * successfully parsed by the SDIO core or if it is
+ * not going to be queued for a driver.
+ */
+ kfree(this);
}
ptr += tpl_link;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e04b751680d..ce1d28884e2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -44,6 +44,19 @@ config MMC_SDHCI_IO_ACCESSORS
This is silent Kconfig symbol that is selected by the drivers that
need to overwrite SDHCI IO memory accessors.
+config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ bool
+ select MMC_SDHCI_IO_ACCESSORS
+ help
+ This option is selected by drivers running on big endian hosts
+ and performing I/O to a SDHCI controller through a bus that
+ implements a hardware byte swapper using a 32-bit datum.
+ This endian mapping mode is called "data invariance" and
+ has the effect of scrambling the addresses and formats of data
+ accessed in sizes other than the datum size.
+
+ This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
+
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
@@ -75,11 +88,29 @@ config MMC_RICOH_MMC
config MMC_SDHCI_OF
tristate "SDHCI support on OpenFirmware platforms"
depends on MMC_SDHCI && PPC_OF
- select MMC_SDHCI_IO_ACCESSORS
help
This selects the OF support for Secure Digital Host Controller
- Interfaces. So far, only the Freescale eSDHC controller is known
- to exist on OF platforms.
+ Interfaces.
+
+ If unsure, say N.
+
+config MMC_SDHCI_OF_ESDHC
+ bool "SDHCI OF support for the Freescale eSDHC controller"
+ depends on MMC_SDHCI_OF
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ help
+ This selects the Freescale eSDHC controller support.
+
+ If unsure, say N.
+
+config MMC_SDHCI_OF_HLWD
+ bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
+ depends on MMC_SDHCI_OF
+ select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ found in the "Hollywood" chipset of the Nintendo Wii video game
+ console.
If unsure, say N.
@@ -251,6 +282,14 @@ config MMC_MVSDIO
To compile this driver as a module, choose M here: the
module will be called mvsdio.
+config MMC_DAVINCI
+ tristate "TI DAVINCI Multimedia Card Interface support"
+ depends on ARCH_DAVINCI
+ help
+ This selects the TI DAVINCI Multimedia card Interface.
+ If you have an DAVINCI board with a Multimedia Card slot,
+ say Y or M here. If unsure, say N.
+
config MMC_SPI
tristate "MMC/SD/SDIO over SPI"
depends on SPI_MASTER && !HIGHMEM && HAS_DMA
@@ -357,3 +396,22 @@ config MMC_VIA_SDMMC
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config SDH_BFIN
+ tristate "Blackfin Secure Digital Host support"
+ depends on MMC && ((BF54x && !BF544) || (BF51x && !BF512))
+ help
+ If you say yes here you will get support for the Blackfin on-chip
+ Secure Digital Host interface. This includes support for MMC and
+ SD cards.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_sdh.
+
+ If unsure, say N.
+
+config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ bool "Blackfin EZkit Missing SDH_CMD Pull Up Resistor Workaround"
+ depends on SDH_BFIN
+ help
+ If you say yes here SD-Cards may work on the EZkit.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index abcb0400e06..3d253dd4240 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
-obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -25,6 +24,7 @@ obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o
obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
obj-$(CONFIG_MMC_MSM7X00A) += msm_sdcc.o
obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
+obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
@@ -34,6 +34,12 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
+obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
+
+obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
+sdhci-of-y := sdhci-of-core.o
+sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
+sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fc25586b7ee..8072128e933 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -25,6 +25,8 @@
#include <linux/stat.h>
#include <linux/mmc/host.h>
+
+#include <mach/atmel-mci.h>
#include <linux/atmel-mci.h>
#include <asm/io.h>
@@ -92,6 +94,7 @@ struct atmel_mci_dma {
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
* @mode_reg: Value of the MR register.
+ * @cfg_reg: Value of the CFG register.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @mapbase: Physical address of the MMIO registers.
@@ -155,6 +158,7 @@ struct atmel_mci {
bool need_clock_update;
bool need_reset;
u32 mode_reg;
+ u32 cfg_reg;
unsigned long bus_hz;
unsigned long mapbase;
struct clk *mck;
@@ -223,6 +227,19 @@ static bool mci_has_rwproof(void)
}
/*
+ * The new MCI2 module isn't 100% compatible with the old MCI module,
+ * and it has a few nice features which we want to use...
+ */
+static inline bool atmci_is_mci2(void)
+{
+ if (cpu_is_at91sam9g45())
+ return true;
+
+ return false;
+}
+
+
+/*
* The debugfs stuff below is mostly optimized away when
* CONFIG_DEBUG_FS is not set.
*/
@@ -357,12 +374,33 @@ static int atmci_regs_show(struct seq_file *s, void *v)
buf[MCI_BLKR / 4],
buf[MCI_BLKR / 4] & 0xffff,
(buf[MCI_BLKR / 4] >> 16) & 0xffff);
+ if (atmci_is_mci2())
+ seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]);
/* Don't read RSPR and RDR; it will consume the data there */
atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
+ if (atmci_is_mci2()) {
+ u32 val;
+
+ val = buf[MCI_DMA / 4];
+ seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
+ val, val & 3,
+ ((val >> 4) & 3) ?
+ 1 << (((val >> 4) & 3) + 1) : 1,
+ val & MCI_DMAEN ? " DMAEN" : "");
+
+ val = buf[MCI_CFG / 4];
+ seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
+ val,
+ val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
+ val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
+ val & MCI_CFG_HSMODE ? " HSMODE" : "",
+ val & MCI_CFG_LSYNC ? " LSYNC" : "");
+ }
+
kfree(buf);
return 0;
@@ -557,6 +595,10 @@ static void atmci_dma_complete(void *arg)
dev_vdbg(&host->pdev->dev, "DMA complete\n");
+ if (atmci_is_mci2())
+ /* Disable DMA hardware handshaking on MCI */
+ mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN);
+
atmci_dma_cleanup(host);
/*
@@ -592,7 +634,7 @@ static void atmci_dma_complete(void *arg)
}
static int
-atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
+atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
@@ -624,6 +666,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (!chan)
return -ENODEV;
+ if (atmci_is_mci2())
+ mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN);
+
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
@@ -641,10 +686,6 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
host->dma.data_desc = desc;
desc->callback = atmci_dma_complete;
desc->callback_param = host;
- desc->tx_submit(desc);
-
- /* Go! */
- chan->device->device_issue_pending(chan);
return 0;
unmap_exit:
@@ -652,13 +693,26 @@ unmap_exit:
return -ENOMEM;
}
+static void atmci_submit_data(struct atmel_mci *host)
+{
+ struct dma_chan *chan = host->data_chan;
+ struct dma_async_tx_descriptor *desc = host->dma.data_desc;
+
+ if (chan) {
+ desc->tx_submit(desc);
+ chan->device->device_issue_pending(chan);
+ }
+}
+
#else /* CONFIG_MMC_ATMELMCI_DMA */
-static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
+static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
return -ENOSYS;
}
+static void atmci_submit_data(struct atmel_mci *host) {}
+
static void atmci_stop_dma(struct atmel_mci *host)
{
/* Data transfer was stopped by the interrupt handler */
@@ -672,7 +726,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
* Returns a mask of interrupt flags to be enabled after the whole
* request has been prepared.
*/
-static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
+static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags;
@@ -683,7 +737,7 @@ static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
host->data = data;
iflags = ATMCI_DATA_ERROR_FLAGS;
- if (atmci_submit_data_dma(host, data)) {
+ if (atmci_prepare_data_dma(host, data)) {
host->data_chan = NULL;
/*
@@ -729,6 +783,8 @@ static void atmci_start_request(struct atmel_mci *host,
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
mci_writel(host, MR, host->mode_reg);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
host->need_reset = false;
}
mci_writel(host, SDCR, slot->sdc_reg);
@@ -744,6 +800,7 @@ static void atmci_start_request(struct atmel_mci *host,
while (!(mci_readl(host, SR) & MCI_CMDRDY))
cpu_relax();
}
+ iflags = 0;
data = mrq->data;
if (data) {
atmci_set_timeout(host, slot, data);
@@ -753,15 +810,17 @@ static void atmci_start_request(struct atmel_mci *host,
| MCI_BLKLEN(data->blksz));
dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
+
+ iflags |= atmci_prepare_data(host, data);
}
- iflags = MCI_CMDRDY;
+ iflags |= MCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
atmci_start_command(host, cmd, cmdflags);
if (data)
- iflags |= atmci_submit_data(host, data);
+ atmci_submit_data(host);
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
@@ -857,6 +916,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
clk_enable(host->mck);
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
}
/*
@@ -1095,6 +1156,8 @@ static void atmci_detect_change(unsigned long data)
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
mci_writel(host, MR, host->mode_reg);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
host->data = NULL;
host->cmd = NULL;
@@ -1584,14 +1647,47 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
#ifdef CONFIG_MMC_ATMELMCI_DMA
static bool filter(struct dma_chan *chan, void *slave)
{
- struct dw_dma_slave *dws = slave;
+ struct mci_dma_data *sl = slave;
- if (dws->dma_dev == chan->device->dev) {
- chan->private = dws;
+ if (sl && find_slave_dev(sl) == chan->device->dev) {
+ chan->private = slave_data_ptr(sl);
return true;
- } else
+ } else {
return false;
+ }
}
+
+static void atmci_configure_dma(struct atmel_mci *host)
+{
+ struct mci_platform_data *pdata;
+
+ if (host == NULL)
+ return;
+
+ pdata = host->pdev->dev.platform_data;
+
+ if (pdata && find_slave_dev(pdata->dma_slave)) {
+ dma_cap_mask_t mask;
+
+ setup_dma_addr(pdata->dma_slave,
+ host->mapbase + MCI_TDR,
+ host->mapbase + MCI_RDR);
+
+ /* Try to grab a DMA channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ host->dma.chan =
+ dma_request_channel(mask, filter, pdata->dma_slave);
+ }
+ if (!host->dma.chan)
+ dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
+ else
+ dev_info(&host->pdev->dev,
+ "Using %s for DMA transfers\n",
+ dma_chan_name(host->dma.chan));
+}
+#else
+static void atmci_configure_dma(struct atmel_mci *host) {}
#endif
static int __init atmci_probe(struct platform_device *pdev)
@@ -1645,22 +1741,7 @@ static int __init atmci_probe(struct platform_device *pdev)
if (ret)
goto err_request_irq;
-#ifdef CONFIG_MMC_ATMELMCI_DMA
- if (pdata->dma_slave.dma_dev) {
- struct dw_dma_slave *dws = &pdata->dma_slave;
- dma_cap_mask_t mask;
-
- dws->tx_reg = regs->start + MCI_TDR;
- dws->rx_reg = regs->start + MCI_RDR;
-
- /* Try to grab a DMA channel */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- host->dma.chan = dma_request_channel(mask, filter, dws);
- }
- if (!host->dma.chan)
- dev_notice(&pdev->dev, "DMA not available, using PIO\n");
-#endif /* CONFIG_MMC_ATMELMCI_DMA */
+ atmci_configure_dma(host);
platform_set_drvdata(pdev, host);
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
new file mode 100644
index 00000000000..3343a57355c
--- /dev/null
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -0,0 +1,639 @@
+/*
+ * bfin_sdh.c - Analog Devices Blackfin SDH Controller
+ *
+ * Copyright (C) 2007-2009 Analog Device Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#define DRIVER_NAME "bfin-sdh"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/host.h>
+#include <linux/proc_fs.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+#include <asm/bfin_sdh.h>
+
+#if defined(CONFIG_BF51x)
+#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL
+#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL
+#define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL
+#define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL
+#define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT
+#define bfin_write_SDH_COMMAND bfin_write_RSI_COMMAND
+#define bfin_write_SDH_DATA_TIMER bfin_write_RSI_DATA_TIMER
+#define bfin_read_SDH_RESPONSE0 bfin_read_RSI_RESPONSE0
+#define bfin_read_SDH_RESPONSE1 bfin_read_RSI_RESPONSE1
+#define bfin_read_SDH_RESPONSE2 bfin_read_RSI_RESPONSE2
+#define bfin_read_SDH_RESPONSE3 bfin_read_RSI_RESPONSE3
+#define bfin_write_SDH_DATA_LGTH bfin_write_RSI_DATA_LGTH
+#define bfin_read_SDH_DATA_CTL bfin_read_RSI_DATA_CTL
+#define bfin_write_SDH_DATA_CTL bfin_write_RSI_DATA_CTL
+#define bfin_read_SDH_DATA_CNT bfin_read_RSI_DATA_CNT
+#define bfin_write_SDH_STATUS_CLR bfin_write_RSI_STATUS_CLR
+#define bfin_read_SDH_E_STATUS bfin_read_RSI_E_STATUS
+#define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS
+#define bfin_read_SDH_STATUS bfin_read_RSI_STATUS
+#define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0
+#define bfin_read_SDH_CFG bfin_read_RSI_CFG
+#define bfin_write_SDH_CFG bfin_write_RSI_CFG
+#endif
+
+struct dma_desc_array {
+ unsigned long start_addr;
+ unsigned short cfg;
+ unsigned short x_count;
+ short x_modify;
+} __packed;
+
+struct sdh_host {
+ struct mmc_host *mmc;
+ spinlock_t lock;
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+ int stat_irq;
+ int dma_ch;
+ int dma_dir;
+ struct dma_desc_array *sg_cpu;
+ dma_addr_t sg_dma;
+ int dma_len;
+
+ unsigned int imask;
+ unsigned int power_mode;
+ unsigned int clk_div;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+};
+
+static struct bfin_sd_host *get_sdh_data(struct platform_device *pdev)
+{
+ return pdev->dev.platform_data;
+}
+
+static void sdh_stop_clock(struct sdh_host *host)
+{
+ bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() & ~CLK_E);
+ SSYNC();
+}
+
+static void sdh_enable_stat_irq(struct sdh_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask |= mask;
+ bfin_write_SDH_MASK0(mask);
+ SSYNC();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sdh_disable_stat_irq(struct sdh_host *host, unsigned int mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->imask &= ~mask;
+ bfin_write_SDH_MASK0(host->imask);
+ SSYNC();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
+{
+ unsigned int length;
+ unsigned int data_ctl;
+ unsigned int dma_cfg;
+ struct scatterlist *sg;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags);
+ host->data = data;
+ data_ctl = 0;
+ dma_cfg = 0;
+
+ length = data->blksz * data->blocks;
+ bfin_write_SDH_DATA_LGTH(length);
+
+ if (data->flags & MMC_DATA_STREAM)
+ data_ctl |= DTX_MODE;
+
+ if (data->flags & MMC_DATA_READ)
+ data_ctl |= DTX_DIR;
+ /* Only supports power-of-2 block size */
+ if (data->blksz & (data->blksz - 1))
+ return -EINVAL;
+ data_ctl |= ((ffs(data->blksz) - 1) << 4);
+
+ bfin_write_SDH_DATA_CTL(data_ctl);
+
+ bfin_write_SDH_DATA_TIMER(0xFFFF);
+ SSYNC();
+
+ if (data->flags & MMC_DATA_READ) {
+ host->dma_dir = DMA_FROM_DEVICE;
+ dma_cfg |= WNR;
+ } else
+ host->dma_dir = DMA_TO_DEVICE;
+
+ sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));
+ host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir);
+#if defined(CONFIG_BF54x)
+ dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN;
+ {
+ int i;
+ for_each_sg(data->sg, sg, host->dma_len, i) {
+ host->sg_cpu[i].start_addr = sg_dma_address(sg);
+ host->sg_cpu[i].cfg = dma_cfg;
+ host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;
+ host->sg_cpu[i].x_modify = 4;
+ dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, "
+ "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n",
+ i, host->sg_cpu[i].start_addr,
+ host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,
+ host->sg_cpu[i].x_modify);
+ }
+ }
+ flush_dcache_range((unsigned int)host->sg_cpu,
+ (unsigned int)host->sg_cpu +
+ host->dma_len * sizeof(struct dma_desc_array));
+ /* Set the last descriptor to stop mode */
+ host->sg_cpu[host->dma_len - 1].cfg &= ~(DMAFLOW | NDSIZE);
+ host->sg_cpu[host->dma_len - 1].cfg |= DI_EN;
+
+ set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);
+ set_dma_x_count(host->dma_ch, 0);
+ set_dma_x_modify(host->dma_ch, 0);
+ set_dma_config(host->dma_ch, dma_cfg);
+#elif defined(CONFIG_BF51x)
+ /* RSI DMA doesn't work in array mode */
+ dma_cfg |= WDSIZE_32 | DMAEN;
+ set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));
+ set_dma_x_count(host->dma_ch, length / 4);
+ set_dma_x_modify(host->dma_ch, 4);
+ set_dma_config(host->dma_ch, dma_cfg);
+#endif
+ bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E);
+
+ SSYNC();
+
+ dev_dbg(mmc_dev(host->mmc), "%s exit\n", __func__);
+ return 0;
+}
+
+static void sdh_start_cmd(struct sdh_host *host, struct mmc_command *cmd)
+{
+ unsigned int sdh_cmd;
+ unsigned int stat_mask;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter cmd: 0x%p\n", __func__, cmd);
+ WARN_ON(host->cmd != NULL);
+ host->cmd = cmd;
+
+ sdh_cmd = 0;
+ stat_mask = 0;
+
+ sdh_cmd |= cmd->opcode;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ sdh_cmd |= CMD_RSP;
+ stat_mask |= CMD_RESP_END;
+ } else {
+ stat_mask |= CMD_SENT;
+ }
+
+ if (cmd->flags & MMC_RSP_136)
+ sdh_cmd |= CMD_L_RSP;
+
+ stat_mask |= CMD_CRC_FAIL | CMD_TIME_OUT;
+
+ sdh_enable_stat_irq(host, stat_mask);
+
+ bfin_write_SDH_ARGUMENT(cmd->arg);
+ bfin_write_SDH_COMMAND(sdh_cmd | CMD_E);
+ bfin_write_SDH_CLK_CTL(bfin_read_SDH_CLK_CTL() | CLK_E);
+ SSYNC();
+}
+
+static void sdh_finish_request(struct sdh_host *host, struct mmc_request *mrq)
+{
+ dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
+static int sdh_cmd_done(struct sdh_host *host, unsigned int stat)
+{
+ struct mmc_command *cmd = host->cmd;
+ int ret = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter cmd: %p\n", __func__, cmd);
+ if (!cmd)
+ return 0;
+
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = bfin_read_SDH_RESPONSE0();
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[1] = bfin_read_SDH_RESPONSE1();
+ cmd->resp[2] = bfin_read_SDH_RESPONSE2();
+ cmd->resp[3] = bfin_read_SDH_RESPONSE3();
+ }
+ }
+ if (stat & CMD_TIME_OUT)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & CMD_CRC_FAIL && cmd->flags & MMC_RSP_CRC)
+ cmd->error = -EILSEQ;
+
+ sdh_disable_stat_irq(host, (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL));
+
+ if (host->data && !cmd->error) {
+ if (host->data->flags & MMC_DATA_WRITE) {
+ ret = sdh_setup_data(host, host->data);
+ if (ret)
+ return 0;
+ }
+
+ sdh_enable_stat_irq(host, DAT_END | RX_OVERRUN | TX_UNDERRUN | DAT_TIME_OUT);
+ } else
+ sdh_finish_request(host, host->mrq);
+
+ return 1;
+}
+
+static int sdh_data_done(struct sdh_host *host, unsigned int stat)
+{
+ struct mmc_data *data = host->data;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter stat: 0x%x\n", __func__, stat);
+ if (!data)
+ return 0;
+
+ disable_dma(host->dma_ch);
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ host->dma_dir);
+
+ if (stat & DAT_TIME_OUT)
+ data->error = -ETIMEDOUT;
+ else if (stat & DAT_CRC_FAIL)
+ data->error = -EILSEQ;
+ else if (stat & (RX_OVERRUN | TX_UNDERRUN))
+ data->error = -EIO;
+
+ if (!data->error)
+ data->bytes_xfered = data->blocks * data->blksz;
+ else
+ data->bytes_xfered = 0;
+
+ sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN);
+ bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \
+ DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);
+ bfin_write_SDH_DATA_CTL(0);
+ SSYNC();
+
+ host->data = NULL;
+ if (host->mrq->stop) {
+ sdh_stop_clock(host);
+ sdh_start_cmd(host, host->mrq->stop);
+ } else {
+ sdh_finish_request(host, host->mrq);
+ }
+
+ return 1;
+}
+
+static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct sdh_host *host = mmc_priv(mmc);
+ int ret = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = mrq;
+ host->data = mrq->data;
+
+ if (mrq->data && mrq->data->flags & MMC_DATA_READ) {
+ ret = sdh_setup_data(host, mrq->data);
+ if (ret)
+ return;
+ }
+
+ sdh_start_cmd(host, mrq->cmd);
+}
+
+static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdh_host *host;
+ unsigned long flags;
+ u16 clk_ctl = 0;
+ u16 pwr_ctl = 0;
+ u16 cfg;
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (ios->clock) {
+ unsigned long sys_clk, ios_clk;
+ unsigned char clk_div;
+ ios_clk = 2 * ios->clock;
+ sys_clk = get_sclk();
+ clk_div = sys_clk / ios_clk;
+ if (sys_clk % ios_clk == 0)
+ clk_div -= 1;
+ clk_div = min_t(unsigned char, clk_div, 0xFF);
+ clk_ctl |= clk_div;
+ clk_ctl |= CLK_E;
+ host->clk_div = clk_div;
+ } else
+ sdh_stop_clock(host);
+
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
+ pwr_ctl |= ROD_CTL;
+#else
+ pwr_ctl |= SD_CMD_OD | ROD_CTL;
+#endif
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ cfg = bfin_read_SDH_CFG();
+ cfg &= ~PD_SDDAT3;
+ cfg |= PUP_SDDAT3;
+ /* Enable 4 bit SDIO */
+ cfg |= (SD4E | MWE);
+ bfin_write_SDH_CFG(cfg);
+ clk_ctl |= WIDE_BUS;
+ } else {
+ cfg = bfin_read_SDH_CFG();
+ cfg |= MWE;
+ bfin_write_SDH_CFG(cfg);
+ }
+
+ bfin_write_SDH_CLK_CTL(clk_ctl);
+
+ host->power_mode = ios->power_mode;
+ if (ios->power_mode == MMC_POWER_ON)
+ pwr_ctl |= PWR_ON;
+
+ bfin_write_SDH_PWR_CTL(pwr_ctl);
+ SSYNC();
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",
+ host->clk_div,
+ host->clk_div ? get_sclk() / (2 * (host->clk_div + 1)) : 0,
+ ios->clock);
+}
+
+static const struct mmc_host_ops sdh_ops = {
+ .request = sdh_request,
+ .set_ios = sdh_set_ios,
+};
+
+static irqreturn_t sdh_dma_irq(int irq, void *devid)
+{
+ struct sdh_host *host = devid;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__,
+ get_dma_curr_irqstat(host->dma_ch));
+ clear_dma_irqstat(host->dma_ch);
+ SSYNC();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sdh_stat_irq(int irq, void *devid)
+{
+ struct sdh_host *host = devid;
+ unsigned int status;
+ int handled = 0;
+
+ dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__);
+ status = bfin_read_SDH_E_STATUS();
+ if (status & SD_CARD_DET) {
+ mmc_detect_change(host->mmc, 0);
+ bfin_write_SDH_E_STATUS(SD_CARD_DET);
+ }
+ status = bfin_read_SDH_STATUS();
+ if (status & (CMD_SENT | CMD_RESP_END | CMD_TIME_OUT | CMD_CRC_FAIL)) {
+ handled |= sdh_cmd_done(host, status);
+ bfin_write_SDH_STATUS_CLR(CMD_SENT_STAT | CMD_RESP_END_STAT | \
+ CMD_TIMEOUT_STAT | CMD_CRC_FAIL_STAT);
+ SSYNC();
+ }
+
+ status = bfin_read_SDH_STATUS();
+ if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))
+ handled |= sdh_data_done(host, status);
+
+ dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int __devinit sdh_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct sdh_host *host;
+ struct bfin_sd_host *drv_data = get_sdh_data(pdev);
+ int ret;
+
+ if (!drv_data) {
+ dev_err(&pdev->dev, "missing platform driver data\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mmc->ops = &sdh_ops;
+ mmc->max_phys_segs = 32;
+ mmc->max_seg_size = 1 << 16;
+ mmc->max_blk_size = 1 << 11;
+ mmc->max_blk_count = 1 << 11;
+ mmc->max_req_size = PAGE_SIZE;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ mmc->f_max = get_sclk();
+ mmc->f_min = mmc->f_max >> 9;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ spin_lock_init(&host->lock);
+ host->irq = drv_data->irq_int0;
+ host->dma_ch = drv_data->dma_chan;
+
+ ret = request_dma(host->dma_ch, DRIVER_NAME "DMA");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request DMA channel\n");
+ goto out1;
+ }
+
+ ret = set_dma_callback(host->dma_ch, sdh_dma_irq, host);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request DMA irq\n");
+ goto out2;
+ }
+
+ host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
+ if (host->sg_cpu == NULL) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+
+ platform_set_drvdata(pdev, mmc);
+ mmc_add_host(mmc);
+
+ ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request status irq\n");
+ goto out3;
+ }
+
+ ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request peripheral pins\n");
+ goto out4;
+ }
+#if defined(CONFIG_BF54x)
+ /* Secure Digital Host shares DMA with Nand controller */
+ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
+#endif
+
+ bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
+ SSYNC();
+
+ /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and
+ * mmc stack will do the detection.
+ */
+ bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
+ SSYNC();
+
+ return 0;
+
+out4:
+ free_irq(host->irq, host);
+out3:
+ mmc_remove_host(mmc);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+out2:
+ free_dma(host->dma_ch);
+out1:
+ mmc_free_host(mmc);
+ out:
+ return ret;
+}
+
+static int __devexit sdh_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (mmc) {
+ struct sdh_host *host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+
+ sdh_stop_clock(host);
+ free_irq(host->irq, host);
+ free_dma(host->dma_ch);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+ mmc_free_host(mmc);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sdh_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct bfin_sd_host *drv_data = get_sdh_data(dev);
+ int ret = 0;
+
+ if (mmc)
+ ret = mmc_suspend_host(mmc, state);
+
+ bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
+ peripheral_free_list(drv_data->pin_req);
+
+ return ret;
+}
+
+static int sdh_resume(struct platform_device *dev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(dev);
+ struct bfin_sd_host *drv_data = get_sdh_data(dev);
+ int ret = 0;
+
+ ret = peripheral_request_list(drv_data->pin_req, DRIVER_NAME);
+ if (ret) {
+ dev_err(&dev->dev, "unable to request peripheral pins\n");
+ return ret;
+ }
+
+ bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON);
+#if defined(CONFIG_BF54x)
+ /* Secure Digital Host shares DMA with Nand controller */
+ bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1);
+#endif
+ bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN);
+ SSYNC();
+
+ bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3));
+ SSYNC();
+
+ if (mmc)
+ ret = mmc_resume_host(mmc);
+
+ return ret;
+}
+#else
+# define sdh_suspend NULL
+# define sdh_resume NULL
+#endif
+
+static struct platform_driver sdh_driver = {
+ .probe = sdh_probe,
+ .remove = __devexit_p(sdh_remove),
+ .suspend = sdh_suspend,
+ .resume = sdh_resume,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sdh_init(void)
+{
+ return platform_driver_register(&sdh_driver);
+}
+module_init(sdh_init);
+
+static void __exit sdh_exit(void)
+{
+ platform_driver_unregister(&sdh_driver);
+}
+module_exit(sdh_exit);
+
+MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
+MODULE_AUTHOR("Cliff Cai, Roy Huang");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
new file mode 100644
index 00000000000..dd45e7c3517
--- /dev/null
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -0,0 +1,1349 @@
+/*
+ * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Original author: Purushotam Kumar
+ * Copyright (C) 2009 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/cpufreq.h>
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/mmc/mmc.h>
+
+#include <mach/mmc.h>
+#include <mach/edma.h>
+
+/*
+ * Register Definitions
+ */
+#define DAVINCI_MMCCTL 0x00 /* Control Register */
+#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */
+#define DAVINCI_MMCST0 0x08 /* Status Register 0 */
+#define DAVINCI_MMCST1 0x0C /* Status Register 1 */
+#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */
+#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */
+#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */
+#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */
+#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */
+#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */
+#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */
+#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */
+#define DAVINCI_MMCCMD 0x30 /* Command Register */
+#define DAVINCI_MMCARGHL 0x34 /* Argument Register */
+#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */
+#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */
+#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */
+#define DAVINCI_MMCETOK 0x4C
+#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */
+#define DAVINCI_MMCCKC 0x54
+#define DAVINCI_MMCTORC 0x58
+#define DAVINCI_MMCTODC 0x5C
+#define DAVINCI_MMCBLNC 0x60
+#define DAVINCI_SDIOCTL 0x64
+#define DAVINCI_SDIOST0 0x68
+#define DAVINCI_SDIOEN 0x6C
+#define DAVINCI_SDIOST 0x70
+#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
+
+/* DAVINCI_MMCCTL definitions */
+#define MMCCTL_DATRST (1 << 0)
+#define MMCCTL_CMDRST (1 << 1)
+#define MMCCTL_WIDTH_4_BIT (1 << 2)
+#define MMCCTL_DATEG_DISABLED (0 << 6)
+#define MMCCTL_DATEG_RISING (1 << 6)
+#define MMCCTL_DATEG_FALLING (2 << 6)
+#define MMCCTL_DATEG_BOTH (3 << 6)
+#define MMCCTL_PERMDR_LE (0 << 9)
+#define MMCCTL_PERMDR_BE (1 << 9)
+#define MMCCTL_PERMDX_LE (0 << 10)
+#define MMCCTL_PERMDX_BE (1 << 10)
+
+/* DAVINCI_MMCCLK definitions */
+#define MMCCLK_CLKEN (1 << 8)
+#define MMCCLK_CLKRT_MASK (0xFF << 0)
+
+/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
+#define MMCST0_DATDNE BIT(0) /* data done */
+#define MMCST0_BSYDNE BIT(1) /* busy done */
+#define MMCST0_RSPDNE BIT(2) /* command done */
+#define MMCST0_TOUTRD BIT(3) /* data read timeout */
+#define MMCST0_TOUTRS BIT(4) /* command response timeout */
+#define MMCST0_CRCWR BIT(5) /* data write CRC error */
+#define MMCST0_CRCRD BIT(6) /* data read CRC error */
+#define MMCST0_CRCRS BIT(7) /* command response CRC error */
+#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */
+#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/
+#define MMCST0_DATED BIT(11) /* DAT3 edge detect */
+#define MMCST0_TRNDNE BIT(12) /* transfer done */
+
+/* DAVINCI_MMCST1 definitions */
+#define MMCST1_BUSY (1 << 0)
+
+/* DAVINCI_MMCCMD definitions */
+#define MMCCMD_CMD_MASK (0x3F << 0)
+#define MMCCMD_PPLEN (1 << 7)
+#define MMCCMD_BSYEXP (1 << 8)
+#define MMCCMD_RSPFMT_MASK (3 << 9)
+#define MMCCMD_RSPFMT_NONE (0 << 9)
+#define MMCCMD_RSPFMT_R1456 (1 << 9)
+#define MMCCMD_RSPFMT_R2 (2 << 9)
+#define MMCCMD_RSPFMT_R3 (3 << 9)
+#define MMCCMD_DTRW (1 << 11)
+#define MMCCMD_STRMTP (1 << 12)
+#define MMCCMD_WDATX (1 << 13)
+#define MMCCMD_INITCK (1 << 14)
+#define MMCCMD_DCLR (1 << 15)
+#define MMCCMD_DMATRIG (1 << 16)
+
+/* DAVINCI_MMCFIFOCTL definitions */
+#define MMCFIFOCTL_FIFORST (1 << 0)
+#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
+#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
+#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
+#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */
+#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */
+#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
+#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
+
+
+/* MMCSD Init clock in Hz in opendrain mode */
+#define MMCSD_INIT_CLOCK 200000
+
+/*
+ * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
+ * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
+ * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
+ * than the page or two that's otherwise typical. NR_SG == 16 gives at
+ * least the same throughput boost, using EDMA transfer linkage instead
+ * of spending CPU time copying pages.
+ */
+#define MAX_CCNT ((1 << 16) - 1)
+
+#define NR_SG 16
+
+static unsigned rw_threshold = 32;
+module_param(rw_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(rw_threshold,
+ "Read/Write threshold. Default = 32");
+
+static unsigned __initdata use_dma = 1;
+module_param(use_dma, uint, 0);
+MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
+
+struct mmc_davinci_host {
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_host *mmc;
+ struct clk *clk;
+ unsigned int mmc_input_clk;
+ void __iomem *base;
+ struct resource *mem_res;
+ int irq;
+ unsigned char bus_mode;
+
+#define DAVINCI_MMC_DATADIR_NONE 0
+#define DAVINCI_MMC_DATADIR_READ 1
+#define DAVINCI_MMC_DATADIR_WRITE 2
+ unsigned char data_dir;
+
+ /* buffer is used during PIO of one scatterlist segment, and
+ * is updated along with buffer_bytes_left. bytes_left applies
+ * to all N blocks of the PIO transfer.
+ */
+ u8 *buffer;
+ u32 buffer_bytes_left;
+ u32 bytes_left;
+
+ u32 rxdma, txdma;
+ bool use_dma;
+ bool do_dma;
+
+ /* Scatterlist DMA uses one or more parameter RAM entries:
+ * the main one (associated with rxdma or txdma) plus zero or
+ * more links. The entries for a given transfer differ only
+ * by memory buffer (address, length) and link field.
+ */
+ struct edmacc_param tx_template;
+ struct edmacc_param rx_template;
+ unsigned n_link;
+ u32 links[NR_SG - 1];
+
+ /* For PIO we walk scatterlists one segment at a time. */
+ unsigned int sg_len;
+ struct scatterlist *sg;
+
+ /* Version of the MMC/SD controller */
+ u8 version;
+ /* for ns in one cycle calculation */
+ unsigned ns_in_one_cycle;
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+};
+
+
+/* PIO only */
+static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
+{
+ host->buffer_bytes_left = sg_dma_len(host->sg);
+ host->buffer = sg_virt(host->sg);
+ if (host->buffer_bytes_left > host->bytes_left)
+ host->buffer_bytes_left = host->bytes_left;
+}
+
+static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
+ unsigned int n)
+{
+ u8 *p;
+ unsigned int i;
+
+ if (host->buffer_bytes_left == 0) {
+ host->sg = sg_next(host->data->sg);
+ mmc_davinci_sg_to_buf(host);
+ }
+
+ p = host->buffer;
+ if (n > host->buffer_bytes_left)
+ n = host->buffer_bytes_left;
+ host->buffer_bytes_left -= n;
+ host->bytes_left -= n;
+
+ /* NOTE: we never transfer more than rw_threshold bytes
+ * to/from the fifo here; there's no I/O overlap.
+ * This also assumes that access width( i.e. ACCWD) is 4 bytes
+ */
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ for (i = 0; i < (n >> 2); i++) {
+ writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
+ p = p + 4;
+ }
+ if (n & 3) {
+ iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
+ p = p + (n & 3);
+ }
+ } else {
+ for (i = 0; i < (n >> 2); i++) {
+ *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
+ p = p + 4;
+ }
+ if (n & 3) {
+ ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
+ p = p + (n & 3);
+ }
+ }
+ host->buffer = p;
+}
+
+static void mmc_davinci_start_command(struct mmc_davinci_host *host,
+ struct mmc_command *cmd)
+{
+ u32 cmd_reg = 0;
+ u32 im_val;
+
+ dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
+ cmd->opcode, cmd->arg,
+ ({ char *s;
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ s = ", R1/R5/R6/R7 response";
+ break;
+ case MMC_RSP_R1B:
+ s = ", R1b response";
+ break;
+ case MMC_RSP_R2:
+ s = ", R2 response";
+ break;
+ case MMC_RSP_R3:
+ s = ", R3/R4 response";
+ break;
+ default:
+ s = ", (R? response)";
+ break;
+ }; s; }));
+ host->cmd = cmd;
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1B:
+ /* There's some spec confusion about when R1B is
+ * allowed, but if the card doesn't issue a BUSY
+ * then it's harmless for us to allow it.
+ */
+ cmd_reg |= MMCCMD_BSYEXP;
+ /* FALLTHROUGH */
+ case MMC_RSP_R1: /* 48 bits, CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R1456;
+ break;
+ case MMC_RSP_R2: /* 136 bits, CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R2;
+ break;
+ case MMC_RSP_R3: /* 48 bits, no CRC */
+ cmd_reg |= MMCCMD_RSPFMT_R3;
+ break;
+ default:
+ cmd_reg |= MMCCMD_RSPFMT_NONE;
+ dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
+ mmc_resp_type(cmd));
+ break;
+ }
+
+ /* Set command index */
+ cmd_reg |= cmd->opcode;
+
+ /* Enable EDMA transfer triggers */
+ if (host->do_dma)
+ cmd_reg |= MMCCMD_DMATRIG;
+
+ if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
+ host->data_dir == DAVINCI_MMC_DATADIR_READ)
+ cmd_reg |= MMCCMD_DMATRIG;
+
+ /* Setting whether command involves data transfer or not */
+ if (cmd->data)
+ cmd_reg |= MMCCMD_WDATX;
+
+ /* Setting whether stream or block transfer */
+ if (cmd->flags & MMC_DATA_STREAM)
+ cmd_reg |= MMCCMD_STRMTP;
+
+ /* Setting whether data read or write */
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
+ cmd_reg |= MMCCMD_DTRW;
+
+ if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
+ cmd_reg |= MMCCMD_PPLEN;
+
+ /* set Command timeout */
+ writel(0x1FFF, host->base + DAVINCI_MMCTOR);
+
+ /* Enable interrupt (calculate here, defer until FIFO is stuffed). */
+ im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
+
+ if (!host->do_dma)
+ im_val |= MMCST0_DXRDY;
+ } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
+ im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
+
+ if (!host->do_dma)
+ im_val |= MMCST0_DRRDY;
+ }
+
+ /*
+ * Before non-DMA WRITE commands the controller needs priming:
+ * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
+ */
+ if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
+ davinci_fifo_data_trans(host, rw_threshold);
+
+ writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
+ writel(cmd_reg, host->base + DAVINCI_MMCCMD);
+ writel(im_val, host->base + DAVINCI_MMCIM);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* DMA infrastructure */
+
+static void davinci_abort_dma(struct mmc_davinci_host *host)
+{
+ int sync_dev;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
+ sync_dev = host->rxdma;
+ else
+ sync_dev = host->txdma;
+
+ edma_stop(sync_dev);
+ edma_clean_channel(sync_dev);
+}
+
+static void
+mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
+
+static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
+{
+ if (DMA_COMPLETE != ch_status) {
+ struct mmc_davinci_host *host = data;
+
+ /* Currently means: DMA Event Missed, or "null" transfer
+ * request was seen. In the future, TC errors (like bad
+ * addresses) might be presented too.
+ */
+ dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
+ (host->data->flags & MMC_DATA_WRITE)
+ ? "write" : "read");
+ host->data->error = -EIO;
+ mmc_davinci_xfer_done(host, host->data);
+ }
+}
+
+/* Set up tx or rx template, to be modified and updated later */
+static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
+ bool tx, struct edmacc_param *template)
+{
+ unsigned sync_dev;
+ const u16 acnt = 4;
+ const u16 bcnt = rw_threshold >> 2;
+ const u16 ccnt = 0;
+ u32 src_port = 0;
+ u32 dst_port = 0;
+ s16 src_bidx, dst_bidx;
+ s16 src_cidx, dst_cidx;
+
+ /*
+ * A-B Sync transfer: each DMA request is for one "frame" of
+ * rw_threshold bytes, broken into "acnt"-size chunks repeated
+ * "bcnt" times. Each segment needs "ccnt" such frames; since
+ * we tell the block layer our mmc->max_seg_size limit, we can
+ * trust (later) that it's within bounds.
+ *
+ * The FIFOs are read/written in 4-byte chunks (acnt == 4) and
+ * EDMA will optimize memory operations to use larger bursts.
+ */
+ if (tx) {
+ sync_dev = host->txdma;
+
+ /* src_prt, ccnt, and link to be set up later */
+ src_bidx = acnt;
+ src_cidx = acnt * bcnt;
+
+ dst_port = host->mem_res->start + DAVINCI_MMCDXR;
+ dst_bidx = 0;
+ dst_cidx = 0;
+ } else {
+ sync_dev = host->rxdma;
+
+ src_port = host->mem_res->start + DAVINCI_MMCDRR;
+ src_bidx = 0;
+ src_cidx = 0;
+
+ /* dst_prt, ccnt, and link to be set up later */
+ dst_bidx = acnt;
+ dst_cidx = acnt * bcnt;
+ }
+
+ /*
+ * We can't use FIFO mode for the FIFOs because MMC FIFO addresses
+ * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT
+ * parameter is ignored.
+ */
+ edma_set_src(sync_dev, src_port, INCR, W8BIT);
+ edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
+
+ edma_set_src_index(sync_dev, src_bidx, src_cidx);
+ edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
+
+ edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
+
+ edma_read_slot(sync_dev, template);
+
+ /* don't bother with irqs or chaining */
+ template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
+}
+
+static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
+ struct mmc_data *data)
+{
+ struct edmacc_param *template;
+ int channel, slot;
+ unsigned link;
+ struct scatterlist *sg;
+ unsigned sg_len;
+ unsigned bytes_left = host->bytes_left;
+ const unsigned shift = ffs(rw_threshold) - 1;;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ template = &host->tx_template;
+ channel = host->txdma;
+ } else {
+ template = &host->rx_template;
+ channel = host->rxdma;
+ }
+
+ /* We know sg_len and ccnt will never be out of range because
+ * we told the mmc layer which in turn tells the block layer
+ * to ensure that it only hands us one scatterlist segment
+ * per EDMA PARAM entry. Update the PARAM
+ * entries needed for each segment of this scatterlist.
+ */
+ for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len;
+ sg_len-- != 0 && bytes_left;
+ sg = sg_next(sg), slot = host->links[link++]) {
+ u32 buf = sg_dma_address(sg);
+ unsigned count = sg_dma_len(sg);
+
+ template->link_bcntrld = sg_len
+ ? (EDMA_CHAN_SLOT(host->links[link]) << 5)
+ : 0xffff;
+
+ if (count > bytes_left)
+ count = bytes_left;
+ bytes_left -= count;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
+ template->src = buf;
+ else
+ template->dst = buf;
+ template->ccnt = count >> shift;
+
+ edma_write_slot(slot, template);
+ }
+
+ if (host->version == MMC_CTLR_VERSION_2)
+ edma_clear_event(channel);
+
+ edma_start(channel);
+}
+
+static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
+ struct mmc_data *data)
+{
+ int i;
+ int mask = rw_threshold - 1;
+
+ host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE));
+
+ /* no individual DMA segment should need a partial FIFO */
+ for (i = 0; i < host->sg_len; i++) {
+ if (sg_dma_len(data->sg + i) & mask) {
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ return -1;
+ }
+ }
+
+ host->do_dma = 1;
+ mmc_davinci_send_dma_request(host, data);
+
+ return 0;
+}
+
+static void __init_or_module
+davinci_release_dma_channels(struct mmc_davinci_host *host)
+{
+ unsigned i;
+
+ if (!host->use_dma)
+ return;
+
+ for (i = 0; i < host->n_link; i++)
+ edma_free_slot(host->links[i]);
+
+ edma_free_channel(host->txdma);
+ edma_free_channel(host->rxdma);
+}
+
+static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+{
+ int r, i;
+
+ /* Acquire master DMA write channel */
+ r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host,
+ EVENTQ_DEFAULT);
+ if (r < 0) {
+ dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
+ "tx", r);
+ return r;
+ }
+ mmc_davinci_dma_setup(host, true, &host->tx_template);
+
+ /* Acquire master DMA read channel */
+ r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
+ EVENTQ_DEFAULT);
+ if (r < 0) {
+ dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
+ "rx", r);
+ goto free_master_write;
+ }
+ mmc_davinci_dma_setup(host, false, &host->rx_template);
+
+ /* Allocate parameter RAM slots, which will later be bound to a
+ * channel as needed to handle a scatterlist.
+ */
+ for (i = 0; i < ARRAY_SIZE(host->links); i++) {
+ r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
+ if (r < 0) {
+ dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
+ r);
+ break;
+ }
+ host->links[i] = r;
+ }
+ host->n_link = i;
+
+ return 0;
+
+free_master_write:
+ edma_free_channel(host->txdma);
+
+ return r;
+}
+
+/*----------------------------------------------------------------------*/
+
+static void
+mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
+{
+ int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
+ int timeout;
+ struct mmc_data *data = req->data;
+
+ if (host->version == MMC_CTLR_VERSION_2)
+ fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
+
+ host->data = data;
+ if (data == NULL) {
+ host->data_dir = DAVINCI_MMC_DATADIR_NONE;
+ writel(0, host->base + DAVINCI_MMCBLEN);
+ writel(0, host->base + DAVINCI_MMCNBLK);
+ return;
+ }
+
+ dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
+ (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
+ (data->flags & MMC_DATA_WRITE) ? "write" : "read",
+ data->blocks, data->blksz);
+ dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
+ data->timeout_clks, data->timeout_ns);
+ timeout = data->timeout_clks +
+ (data->timeout_ns / host->ns_in_one_cycle);
+ if (timeout > 0xffff)
+ timeout = 0xffff;
+
+ writel(timeout, host->base + DAVINCI_MMCTOD);
+ writel(data->blocks, host->base + DAVINCI_MMCNBLK);
+ writel(data->blksz, host->base + DAVINCI_MMCBLEN);
+
+ /* Configure the FIFO */
+ switch (data->flags & MMC_DATA_WRITE) {
+ case MMC_DATA_WRITE:
+ host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
+ host->base + DAVINCI_MMCFIFOCTL);
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
+ host->base + DAVINCI_MMCFIFOCTL);
+ break;
+
+ default:
+ host->data_dir = DAVINCI_MMC_DATADIR_READ;
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
+ host->base + DAVINCI_MMCFIFOCTL);
+ writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
+ host->base + DAVINCI_MMCFIFOCTL);
+ break;
+ }
+
+ host->buffer = NULL;
+ host->bytes_left = data->blocks * data->blksz;
+
+ /* For now we try to use DMA whenever we won't need partial FIFO
+ * reads or writes, either for the whole transfer (as tested here)
+ * or for any individual scatterlist segment (tested when we call
+ * start_dma_transfer).
+ *
+ * While we *could* change that, unusual block sizes are rarely
+ * used. The occasional fallback to PIO should't hurt.
+ */
+ if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
+ && mmc_davinci_start_dma_transfer(host, data) == 0) {
+ /* zero this to ensure we take no PIO paths */
+ host->bytes_left = 0;
+ } else {
+ /* Revert to CPU Copy */
+ host->sg_len = data->sg_len;
+ host->sg = host->data->sg;
+ mmc_davinci_sg_to_buf(host);
+ }
+}
+
+static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
+{
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+ unsigned long timeout = jiffies + msecs_to_jiffies(900);
+ u32 mmcst1 = 0;
+
+ /* Card may still be sending BUSY after a previous operation,
+ * typically some kind of write. If so, we can't proceed yet.
+ */
+ while (time_before(jiffies, timeout)) {
+ mmcst1 = readl(host->base + DAVINCI_MMCST1);
+ if (!(mmcst1 & MMCST1_BUSY))
+ break;
+ cpu_relax();
+ }
+ if (mmcst1 & MMCST1_BUSY) {
+ dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
+ req->cmd->error = -ETIMEDOUT;
+ mmc_request_done(mmc, req);
+ return;
+ }
+
+ host->do_dma = 0;
+ mmc_davinci_prepare_data(host, req);
+ mmc_davinci_start_command(host, req->cmd);
+}
+
+static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
+ unsigned int mmc_req_freq)
+{
+ unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
+
+ mmc_pclk = host->mmc_input_clk;
+ if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
+ mmc_push_pull_divisor = ((unsigned int)mmc_pclk
+ / (2 * mmc_req_freq)) - 1;
+ else
+ mmc_push_pull_divisor = 0;
+
+ mmc_freq = (unsigned int)mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1));
+
+ if (mmc_freq > mmc_req_freq)
+ mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
+ /* Convert ns to clock cycles */
+ if (mmc_req_freq <= 400000)
+ host->ns_in_one_cycle = (1000000) / (((mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1)))/1000));
+ else
+ host->ns_in_one_cycle = (1000000) / (((mmc_pclk
+ / (2 * (mmc_push_pull_divisor + 1)))/1000000));
+
+ return mmc_push_pull_divisor;
+}
+
+static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ unsigned int open_drain_freq = 0, mmc_pclk = 0;
+ unsigned int mmc_push_pull_freq = 0;
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
+ u32 temp;
+
+ /* Ignoring the init clock value passed for fixing the inter
+ * operability with different cards.
+ */
+ open_drain_freq = ((unsigned int)mmc_pclk
+ / (2 * MMCSD_INIT_CLOCK)) - 1;
+
+ if (open_drain_freq > 0xFF)
+ open_drain_freq = 0xFF;
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
+ temp |= open_drain_freq;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ /* Convert ns to clock cycles */
+ host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
+ } else {
+ u32 temp;
+ mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
+
+ if (mmc_push_pull_freq > 0xFF)
+ mmc_push_pull_freq = 0xFF;
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ udelay(10);
+
+ temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
+ temp |= mmc_push_pull_freq;
+ writel(temp, host->base + DAVINCI_MMCCLK);
+
+ writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
+
+ udelay(10);
+ }
+}
+
+static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ unsigned int mmc_pclk = 0;
+ struct mmc_davinci_host *host = mmc_priv(mmc);
+
+ mmc_pclk = host->mmc_input_clk;
+ dev_dbg(mmc_dev(host->mmc),
+ "clock %dHz busmode %d powermode %d Vdd %04x\n",
+ ios->clock, ios->bus_mode, ios->power_mode,
+ ios->vdd);
+ if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
+ writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ } else {
+ dev_dbg(mmc_dev(host->mmc), "Disabling 4 bit mode\n");
+ writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ }
+
+ calculate_clk_divider(mmc, ios);
+
+ host->bus_mode = ios->bus_mode;
+ if (ios->power_mode == MMC_POWER_UP) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+ bool lose = true;
+
+ /* Send clock cycles, poll completion */
+ writel(0, host->base + DAVINCI_MMCARGHL);
+ writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
+ while (time_before(jiffies, timeout)) {
+ u32 tmp = readl(host->base + DAVINCI_MMCST0);
+
+ if (tmp & MMCST0_RSPDNE) {
+ lose = false;
+ break;
+ }
+ cpu_relax();
+ }
+ if (lose)
+ dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
+ }
+
+ /* FIXME on power OFF, reset things ... */
+}
+
+static void
+mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+ host->data = NULL;
+
+ if (host->do_dma) {
+ davinci_abort_dma(host);
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ host->do_dma = false;
+ }
+ host->data_dir = DAVINCI_MMC_DATADIR_NONE;
+
+ if (!data->stop || (host->cmd && host->cmd->error)) {
+ mmc_request_done(host->mmc, data->mrq);
+ writel(0, host->base + DAVINCI_MMCIM);
+ } else
+ mmc_davinci_start_command(host, data->stop);
+}
+
+static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
+ struct mmc_command *cmd)
+{
+ host->cmd = NULL;
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
+ cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
+ cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
+ cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
+ }
+ }
+
+ if (host->data == NULL || cmd->error) {
+ if (cmd->error == -ETIMEDOUT)
+ cmd->mrq->cmd->retries = 0;
+ mmc_request_done(host->mmc, cmd->mrq);
+ writel(0, host->base + DAVINCI_MMCIM);
+ }
+}
+
+static void
+davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
+{
+ u32 temp;
+
+ /* reset command and data state machines */
+ temp = readl(host->base + DAVINCI_MMCCTL);
+ writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST,
+ host->base + DAVINCI_MMCCTL);
+
+ temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
+ udelay(10);
+ writel(temp, host->base + DAVINCI_MMCCTL);
+}
+
+static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
+{
+ struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
+ unsigned int status, qstatus;
+ int end_command = 0;
+ int end_transfer = 0;
+ struct mmc_data *data = host->data;
+
+ if (host->cmd == NULL && host->data == NULL) {
+ status = readl(host->base + DAVINCI_MMCST0);
+ dev_dbg(mmc_dev(host->mmc),
+ "Spurious interrupt 0x%04x\n", status);
+ /* Disable the interrupt from mmcsd */
+ writel(0, host->base + DAVINCI_MMCIM);
+ return IRQ_NONE;
+ }
+
+ status = readl(host->base + DAVINCI_MMCST0);
+ qstatus = status;
+
+ /* handle FIFO first when using PIO for data.
+ * bytes_left will decrease to zero as I/O progress and status will
+ * read zero over iteration because this controller status
+ * register(MMCST0) reports any status only once and it is cleared
+ * by read. So, it is not unbouned loop even in the case of
+ * non-dma.
+ */
+ while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
+ davinci_fifo_data_trans(host, rw_threshold);
+ status = readl(host->base + DAVINCI_MMCST0);
+ if (!status)
+ break;
+ qstatus |= status;
+ }
+
+ if (qstatus & MMCST0_DATDNE) {
+ /* All blocks sent/received, and CRC checks passed */
+ if (data != NULL) {
+ if ((host->do_dma == 0) && (host->bytes_left > 0)) {
+ /* if datasize < rw_threshold
+ * no RX ints are generated
+ */
+ davinci_fifo_data_trans(host, host->bytes_left);
+ }
+ end_transfer = 1;
+ data->bytes_xfered = data->blocks * data->blksz;
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "DATDNE with no host->data\n");
+ }
+ }
+
+ if (qstatus & MMCST0_TOUTRD) {
+ /* Read data timeout */
+ data->error = -ETIMEDOUT;
+ end_transfer = 1;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "read data timeout, status %x\n",
+ qstatus);
+
+ davinci_abort_data(host, data);
+ }
+
+ if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
+ /* Data CRC error */
+ data->error = -EILSEQ;
+ end_transfer = 1;
+
+ /* NOTE: this controller uses CRCWR to report both CRC
+ * errors and timeouts (on writes). MMCDRSP values are
+ * only weakly documented, but 0x9f was clearly a timeout
+ * case and the two three-bit patterns in various SD specs
+ * (101, 010) aren't part of it ...
+ */
+ if (qstatus & MMCST0_CRCWR) {
+ u32 temp = readb(host->base + DAVINCI_MMCDRSP);
+
+ if (temp == 0x9f)
+ data->error = -ETIMEDOUT;
+ }
+ dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
+ (qstatus & MMCST0_CRCWR) ? "write" : "read",
+ (data->error == -ETIMEDOUT) ? "timeout" : "CRC");
+
+ davinci_abort_data(host, data);
+ }
+
+ if (qstatus & MMCST0_TOUTRS) {
+ /* Command timeout */
+ if (host->cmd) {
+ dev_dbg(mmc_dev(host->mmc),
+ "CMD%d timeout, status %x\n",
+ host->cmd->opcode, qstatus);
+ host->cmd->error = -ETIMEDOUT;
+ if (data) {
+ end_transfer = 1;
+ davinci_abort_data(host, data);
+ } else
+ end_command = 1;
+ }
+ }
+
+ if (qstatus & MMCST0_CRCRS) {
+ /* Command CRC error */
+ dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
+ if (host->cmd) {
+ host->cmd->error = -EILSEQ;
+ end_command = 1;
+ }
+ }
+
+ if (qstatus & MMCST0_RSPDNE) {
+ /* End of command phase */
+ end_command = (int) host->cmd;
+ }
+
+ if (end_command)
+ mmc_davinci_cmd_done(host, host->cmd);
+ if (end_transfer)
+ mmc_davinci_xfer_done(host, data);
+ return IRQ_HANDLED;
+}
+
+static int mmc_davinci_get_cd(struct mmc_host *mmc)
+{
+ struct platform_device *pdev = to_platform_device(mmc->parent);
+ struct davinci_mmc_config *config = pdev->dev.platform_data;
+
+ if (!config || !config->get_cd)
+ return -ENOSYS;
+ return config->get_cd(pdev->id);
+}
+
+static int mmc_davinci_get_ro(struct mmc_host *mmc)
+{
+ struct platform_device *pdev = to_platform_device(mmc->parent);
+ struct davinci_mmc_config *config = pdev->dev.platform_data;
+
+ if (!config || !config->get_ro)
+ return -ENOSYS;
+ return config->get_ro(pdev->id);
+}
+
+static struct mmc_host_ops mmc_davinci_ops = {
+ .request = mmc_davinci_request,
+ .set_ios = mmc_davinci_set_ios,
+ .get_cd = mmc_davinci_get_cd,
+ .get_ro = mmc_davinci_get_ro,
+};
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_CPU_FREQ
+static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct mmc_davinci_host *host;
+ unsigned int mmc_pclk;
+ struct mmc_host *mmc;
+ unsigned long flags;
+
+ host = container_of(nb, struct mmc_davinci_host, freq_transition);
+ mmc = host->mmc;
+ mmc_pclk = clk_get_rate(host->clk);
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ spin_lock_irqsave(&mmc->lock, flags);
+ host->mmc_input_clk = mmc_pclk;
+ calculate_clk_divider(mmc, &mmc->ios);
+ spin_unlock_irqrestore(&mmc->lock, flags);
+ }
+
+ return 0;
+}
+
+static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
+{
+ host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
+
+ return cpufreq_register_notifier(&host->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
+{
+ cpufreq_unregister_notifier(&host->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+#else
+static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
+{
+ return 0;
+}
+
+static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
+{
+}
+#endif
+static void __init init_mmcsd_host(struct mmc_davinci_host *host)
+{
+ /* DAT line portion is diabled and in reset state */
+ writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
+ host->base + DAVINCI_MMCCTL);
+
+ /* CMD line portion is diabled and in reset state */
+ writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
+ host->base + DAVINCI_MMCCTL);
+
+ udelay(10);
+
+ writel(0, host->base + DAVINCI_MMCCLK);
+ writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
+
+ writel(0x1FFF, host->base + DAVINCI_MMCTOR);
+ writel(0xFFFF, host->base + DAVINCI_MMCTOD);
+
+ writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST,
+ host->base + DAVINCI_MMCCTL);
+ writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
+ host->base + DAVINCI_MMCCTL);
+
+ udelay(10);
+}
+
+static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+{
+ struct davinci_mmc_config *pdata = pdev->dev.platform_data;
+ struct mmc_davinci_host *host = NULL;
+ struct mmc_host *mmc = NULL;
+ struct resource *r, *mem = NULL;
+ int ret = 0, irq = 0;
+ size_t mem_size;
+
+ /* REVISIT: when we're fully converted, fail if pdata is NULL */
+
+ ret = -ENODEV;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!r || irq == NO_IRQ)
+ goto out;
+
+ ret = -EBUSY;
+ mem_size = resource_size(r);
+ mem = request_mem_region(r->start, mem_size, pdev->name);
+ if (!mem)
+ goto out;
+
+ ret = -ENOMEM;
+ mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
+ if (!mmc)
+ goto out;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc; /* Important */
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r)
+ goto out;
+ host->rxdma = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!r)
+ goto out;
+ host->txdma = r->start;
+
+ host->mem_res = mem;
+ host->base = ioremap(mem->start, mem_size);
+ if (!host->base)
+ goto out;
+
+ ret = -ENXIO;
+ host->clk = clk_get(&pdev->dev, "MMCSDCLK");
+ if (IS_ERR(host->clk)) {
+ ret = PTR_ERR(host->clk);
+ goto out;
+ }
+ clk_enable(host->clk);
+ host->mmc_input_clk = clk_get_rate(host->clk);
+
+ init_mmcsd_host(host);
+
+ host->use_dma = use_dma;
+ host->irq = irq;
+
+ if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
+ host->use_dma = 0;
+
+ /* REVISIT: someday, support IRQ-driven card detection. */
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+ if (!pdata || pdata->wires == 4 || pdata->wires == 0)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ host->version = pdata->version;
+
+ mmc->ops = &mmc_davinci_ops;
+ mmc->f_min = 312500;
+ mmc->f_max = 25000000;
+ if (pdata && pdata->max_freq)
+ mmc->f_max = pdata->max_freq;
+ if (pdata && pdata->caps)
+ mmc->caps |= pdata->caps;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+ /* With no iommu coalescing pages, each phys_seg is a hw_seg.
+ * Each hw_seg uses one EDMA parameter RAM slot, always one
+ * channel and then usually some linked slots.
+ */
+ mmc->max_hw_segs = 1 + host->n_link;
+ mmc->max_phys_segs = mmc->max_hw_segs;
+
+ /* EDMA limit per hw segment (one or two MBytes) */
+ mmc->max_seg_size = MAX_CCNT * rw_threshold;
+
+ /* MMC/SD controller limits for multiblock requests */
+ mmc->max_blk_size = 4095; /* BLEN is 12 bits */
+ mmc->max_blk_count = 65535; /* NBLK is 16 bits */
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+
+ dev_dbg(mmc_dev(host->mmc), "max_phys_segs=%d\n", mmc->max_phys_segs);
+ dev_dbg(mmc_dev(host->mmc), "max_hw_segs=%d\n", mmc->max_hw_segs);
+ dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
+ dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
+ dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
+
+ platform_set_drvdata(pdev, host);
+
+ ret = mmc_davinci_cpufreq_register(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register cpufreq\n");
+ goto cpu_freq_fail;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto out;
+
+ ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
+ if (ret)
+ goto out;
+
+ rename_region(mem, mmc_hostname(mmc));
+
+ dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
+ host->use_dma ? "DMA" : "PIO",
+ (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+
+ return 0;
+
+out:
+ mmc_davinci_cpufreq_deregister(host);
+cpu_freq_fail:
+ if (host) {
+ davinci_release_dma_channels(host);
+
+ if (host->clk) {
+ clk_disable(host->clk);
+ clk_put(host->clk);
+ }
+
+ if (host->base)
+ iounmap(host->base);
+ }
+
+ if (mmc)
+ mmc_free_host(mmc);
+
+ if (mem)
+ release_resource(mem);
+
+ dev_dbg(&pdev->dev, "probe err %d\n", ret);
+
+ return ret;
+}
+
+static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
+{
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ if (host) {
+ mmc_davinci_cpufreq_deregister(host);
+
+ mmc_remove_host(host->mmc);
+ free_irq(host->irq, host);
+
+ davinci_release_dma_channels(host);
+
+ clk_disable(host->clk);
+ clk_put(host->clk);
+
+ iounmap(host->base);
+
+ release_resource(host->mem_res);
+
+ mmc_free_host(host->mmc);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg)
+{
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ return mmc_suspend_host(host->mmc, msg);
+}
+
+static int davinci_mmcsd_resume(struct platform_device *pdev)
+{
+ struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+
+ return mmc_resume_host(host->mmc);
+}
+#else
+#define davinci_mmcsd_suspend NULL
+#define davinci_mmcsd_resume NULL
+#endif
+
+static struct platform_driver davinci_mmcsd_driver = {
+ .driver = {
+ .name = "davinci_mmc",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(davinci_mmcsd_remove),
+ .suspend = davinci_mmcsd_suspend,
+ .resume = davinci_mmcsd_resume,
+};
+
+static int __init davinci_mmcsd_init(void)
+{
+ return platform_driver_probe(&davinci_mmcsd_driver,
+ davinci_mmcsd_probe);
+}
+module_init(davinci_mmcsd_init);
+
+static void __exit davinci_mmcsd_exit(void)
+{
+ platform_driver_unregister(&davinci_mmcsd_driver);
+}
+module_exit(davinci_mmcsd_exit);
+
+MODULE_AUTHOR("Texas Instruments India");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
+
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index dba4600bcdb..b31946e0b4c 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -38,10 +38,9 @@
#include <asm/div64.h>
#include <asm/sizes.h>
-#include <asm/mach/mmc.h>
+#include <mach/mmc.h>
#include <mach/msm_iomap.h>
#include <mach/dma.h>
-#include <mach/htc_pwrsink.h>
#include "msm_sdcc.h"
@@ -775,13 +774,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_OFF:
- htc_pwrsink_set(PWRSINK_SDCARD, 0);
break;
case MMC_POWER_UP:
pwr |= MCI_PWR_UP;
break;
case MMC_POWER_ON:
- htc_pwrsink_set(PWRSINK_SDCARD, 100);
pwr |= MCI_PWR_ON;
break;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 88671529c45..60a2b69e54f 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -679,17 +679,17 @@ static int mxcmci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct mxcmci_host *host = NULL;
- struct resource *r;
+ struct resource *iores, *r;
int ret = 0, irq;
printk(KERN_INFO "i.MX SDHC driver\n");
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
+ if (!iores || irq < 0)
return -EINVAL;
- r = request_mem_region(r->start, resource_size(r), pdev->name);
+ r = request_mem_region(iores->start, resource_size(iores), pdev->name);
if (!r)
return -EBUSY;
@@ -809,7 +809,7 @@ out_iounmap:
out_free:
mmc_free_host(mmc);
out_release_mem:
- release_mem_region(host->res->start, resource_size(host->res));
+ release_mem_region(iores->start, resource_size(iores));
return ret;
}
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 0c44d560bf1..0c7a63c1f12 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -22,6 +22,8 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+MODULE_LICENSE("GPL");
+
enum {
CD_GPIO = 0,
WP_GPIO,
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 5f970e253e5..c6d7e8ecadb 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1459,8 +1459,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
goto err_ioremap;
host->iclk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(host->iclk))
+ if (IS_ERR(host->iclk)) {
+ ret = PTR_ERR(host->iclk);
goto err_free_mmc_host;
+ }
clk_enable(host->iclk);
host->fclk = clk_get(&pdev->dev, "fck");
@@ -1500,10 +1502,8 @@ err_free_irq:
err_free_fclk:
clk_put(host->fclk);
err_free_iclk:
- if (host->iclk != NULL) {
- clk_disable(host->iclk);
- clk_put(host->iclk);
- }
+ clk_disable(host->iclk);
+ clk_put(host->iclk);
err_free_mmc_host:
iounmap(host->virt_base);
err_ioremap:
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index bb47ff465c0..0d783f3e79e 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -828,7 +828,7 @@ static int pxamci_resume(struct device *dev)
return ret;
}
-static struct dev_pm_ops pxamci_pm_ops = {
+static const struct dev_pm_ops pxamci_pm_ops = {
.suspend = pxamci_suspend,
.resume = pxamci_resume,
};
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 941a4d35ef8..d96e1abf2d6 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -820,7 +820,7 @@ fail_request:
static void finalize_request(struct s3cmci_host *host)
{
struct mmc_request *mrq = host->mrq;
- struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
+ struct mmc_command *cmd;
int debug_as_failure = 0;
if (host->complete_what != COMPLETION_FINALIZE)
@@ -828,6 +828,7 @@ static void finalize_request(struct s3cmci_host *host)
if (!mrq)
return;
+ cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
if (cmd->data && (cmd->error == 0) &&
(cmd->data->error == 0)) {
@@ -1302,10 +1303,8 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
if (pdata->no_wprotect)
return 0;
- ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
-
- if (pdata->wprotect_invert)
- ret = !ret;
+ ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0;
+ ret ^= pdata->wprotect_invert;
return ret;
}
@@ -1654,7 +1653,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
goto probe_free_irq;
}
- host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
+ host->irq_cd = gpio_to_irq(host->pdata->gpio_detect);
if (host->irq_cd >= 0) {
if (request_irq(host->irq_cd, s3cmci_irq_cd,
@@ -1892,7 +1891,7 @@ static int s3cmci_resume(struct device *dev)
return mmc_resume_host(mmc);
}
-static struct dev_pm_ops s3cmci_pm = {
+static const struct dev_pm_ops s3cmci_pm = {
.suspend = s3cmci_suspend,
.resume = s3cmci_resume,
};
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of-core.c
index 01ab916c280..55e33135edb 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -22,62 +22,37 @@
#include <linux/of_platform.h>
#include <linux/mmc/host.h>
#include <asm/machdep.h>
+#include "sdhci-of.h"
#include "sdhci.h"
-struct sdhci_of_data {
- unsigned int quirks;
- struct sdhci_ops ops;
-};
-
-struct sdhci_of_host {
- unsigned int clock;
- u16 xfer_mode_shadow;
-};
+#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
/*
- * Ops and quirks for the Freescale eSDHC controller.
+ * These accessors are designed for big endian hosts doing I/O to
+ * little endian controllers incorporating a 32-bit hardware byte swapper.
*/
-#define ESDHC_DMA_SYSCTL 0x40c
-#define ESDHC_DMA_SNOOP 0x00000040
-
-#define ESDHC_SYSTEM_CONTROL 0x2c
-#define ESDHC_CLOCK_MASK 0x0000fff0
-#define ESDHC_PREDIV_SHIFT 8
-#define ESDHC_DIVIDER_SHIFT 4
-#define ESDHC_CLOCK_PEREN 0x00000004
-#define ESDHC_CLOCK_HCKEN 0x00000002
-#define ESDHC_CLOCK_IPGEN 0x00000001
-
-#define ESDHC_HOST_CONTROL_RES 0x05
-
-static u32 esdhc_readl(struct sdhci_host *host, int reg)
+u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
{
return in_be32(host->ioaddr + reg);
}
-static u16 esdhc_readw(struct sdhci_host *host, int reg)
+u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
{
- u16 ret;
-
- if (unlikely(reg == SDHCI_HOST_VERSION))
- ret = in_be16(host->ioaddr + reg);
- else
- ret = in_be16(host->ioaddr + (reg ^ 0x2));
- return ret;
+ return in_be16(host->ioaddr + (reg ^ 0x2));
}
-static u8 esdhc_readb(struct sdhci_host *host, int reg)
+u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
{
return in_8(host->ioaddr + (reg ^ 0x3));
}
-static void esdhc_writel(struct sdhci_host *host, u32 val, int reg)
+void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg)
{
out_be32(host->ioaddr + reg, val);
}
-static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
+void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_of_host *of_host = sdhci_priv(host);
int base = reg & ~0x3;
@@ -92,106 +67,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
of_host->xfer_mode_shadow = val;
return;
case SDHCI_COMMAND:
- esdhc_writel(host, val << 16 | of_host->xfer_mode_shadow,
- SDHCI_TRANSFER_MODE);
+ sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow,
+ SDHCI_TRANSFER_MODE);
return;
- case SDHCI_BLOCK_SIZE:
- /*
- * Two last DMA bits are reserved, and first one is used for
- * non-standard blksz of 4096 bytes that we don't support
- * yet. So clear the DMA boundary bits.
- */
- val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
- /* fall through */
}
clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
}
-static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
+void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
{
int base = reg & ~0x3;
int shift = (reg & 0x3) * 8;
- /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
- if (reg == SDHCI_HOST_CONTROL)
- val &= ~ESDHC_HOST_CONTROL_RES;
-
clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
}
-
-static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
-{
- int pre_div = 2;
- int div = 1;
-
- clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
- ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
-
- if (clock == 0)
- goto out;
-
- while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
- pre_div *= 2;
-
- while (host->max_clk / pre_div / div > clock && div < 16)
- div++;
-
- dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / pre_div / div);
-
- pre_div >>= 1;
- div--;
-
- setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
- ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
- div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
- mdelay(100);
-out:
- host->clock = clock;
-}
-
-static int esdhc_enable_dma(struct sdhci_host *host)
-{
- setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
- return 0;
-}
-
-static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
-{
- struct sdhci_of_host *of_host = sdhci_priv(host);
-
- return of_host->clock;
-}
-
-static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
-{
- struct sdhci_of_host *of_host = sdhci_priv(host);
-
- return of_host->clock / 256 / 16;
-}
-
-static struct sdhci_of_data sdhci_esdhc = {
- .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
- SDHCI_QUIRK_BROKEN_CARD_DETECTION |
- SDHCI_QUIRK_NO_BUSY_IRQ |
- SDHCI_QUIRK_NONSTANDARD_CLOCK |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_PIO_NEEDS_DELAY |
- SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
- SDHCI_QUIRK_NO_CARD_NO_RESET,
- .ops = {
- .readl = esdhc_readl,
- .readw = esdhc_readw,
- .readb = esdhc_readb,
- .writel = esdhc_writel,
- .writew = esdhc_writew,
- .writeb = esdhc_writeb,
- .set_clock = esdhc_set_clock,
- .enable_dma = esdhc_enable_dma,
- .get_max_clock = esdhc_get_max_clock,
- .get_min_clock = esdhc_get_min_clock,
- },
-};
+#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
#ifdef CONFIG_PM
@@ -301,9 +191,14 @@ static int __devexit sdhci_of_remove(struct of_device *ofdev)
}
static const struct of_device_id sdhci_of_match[] = {
+#ifdef CONFIG_MMC_SDHCI_OF_ESDHC
{ .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
{ .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
{ .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
+#endif
+#ifdef CONFIG_MMC_SDHCI_OF_HLWD
+ { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, },
+#endif
{ .compatible = "generic-sdhci", },
{},
};
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
new file mode 100644
index 00000000000..d5b11a17e64
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -0,0 +1,143 @@
+/*
+ * Freescale eSDHC controller driver.
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include "sdhci-of.h"
+#include "sdhci.h"
+
+/*
+ * Ops and quirks for the Freescale eSDHC controller.
+ */
+
+#define ESDHC_DMA_SYSCTL 0x40c
+#define ESDHC_DMA_SNOOP 0x00000040
+
+#define ESDHC_SYSTEM_CONTROL 0x2c
+#define ESDHC_CLOCK_MASK 0x0000fff0
+#define ESDHC_PREDIV_SHIFT 8
+#define ESDHC_DIVIDER_SHIFT 4
+#define ESDHC_CLOCK_PEREN 0x00000004
+#define ESDHC_CLOCK_HCKEN 0x00000002
+#define ESDHC_CLOCK_IPGEN 0x00000001
+
+#define ESDHC_HOST_CONTROL_RES 0x05
+
+static u16 esdhc_readw(struct sdhci_host *host, int reg)
+{
+ u16 ret;
+
+ if (unlikely(reg == SDHCI_HOST_VERSION))
+ ret = in_be16(host->ioaddr + reg);
+ else
+ ret = sdhci_be32bs_readw(host, reg);
+ return ret;
+}
+
+static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ if (reg == SDHCI_BLOCK_SIZE) {
+ /*
+ * Two last DMA bits are reserved, and first one is used for
+ * non-standard blksz of 4096 bytes that we don't support
+ * yet. So clear the DMA boundary bits.
+ */
+ val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
+ }
+ sdhci_be32bs_writew(host, val, reg);
+}
+
+static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
+ if (reg == SDHCI_HOST_CONTROL)
+ val &= ~ESDHC_HOST_CONTROL_RES;
+ sdhci_be32bs_writeb(host, val, reg);
+}
+
+static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int pre_div = 2;
+ int div = 1;
+
+ clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
+ ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK);
+
+ if (clock == 0)
+ goto out;
+
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+ while (host->max_clk / pre_div / div > clock && div < 16)
+ div++;
+
+ dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
+ clock, host->max_clk / pre_div / div);
+
+ pre_div >>= 1;
+ div--;
+
+ setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN |
+ ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN |
+ div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT);
+ mdelay(100);
+out:
+ host->clock = clock;
+}
+
+static int esdhc_enable_dma(struct sdhci_host *host)
+{
+ setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
+ return 0;
+}
+
+static unsigned int esdhc_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_of_host *of_host = sdhci_priv(host);
+
+ return of_host->clock;
+}
+
+static unsigned int esdhc_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_of_host *of_host = sdhci_priv(host);
+
+ return of_host->clock / 256 / 16;
+}
+
+struct sdhci_of_data sdhci_esdhc = {
+ .quirks = SDHCI_QUIRK_FORCE_BLK_SZ_2048 |
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_NONSTANDARD_CLOCK |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_PIO_NEEDS_DELAY |
+ SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
+ SDHCI_QUIRK_NO_CARD_NO_RESET,
+ .ops = {
+ .readl = sdhci_be32bs_readl,
+ .readw = esdhc_readw,
+ .readb = sdhci_be32bs_readb,
+ .writel = sdhci_be32bs_writel,
+ .writew = esdhc_writew,
+ .writeb = esdhc_writeb,
+ .set_clock = esdhc_set_clock,
+ .enable_dma = esdhc_enable_dma,
+ .get_max_clock = esdhc_get_max_clock,
+ .get_min_clock = esdhc_get_min_clock,
+ },
+};
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
new file mode 100644
index 00000000000..35117f3ed75
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -0,0 +1,65 @@
+/*
+ * drivers/mmc/host/sdhci-of-hlwd.c
+ *
+ * Nintendo Wii Secure Digital Host Controller Interface.
+ * Copyright (C) 2009 The GameCube Linux Team
+ * Copyright (C) 2009 Albert Herranz
+ *
+ * Based on sdhci-of-esdhc.c
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include "sdhci-of.h"
+#include "sdhci.h"
+
+/*
+ * Ops and quirks for the Nintendo Wii SDHCI controllers.
+ */
+
+/*
+ * We need a small delay after each write, or things go horribly wrong.
+ */
+#define SDHCI_HLWD_WRITE_DELAY 5 /* usecs */
+
+static void sdhci_hlwd_writel(struct sdhci_host *host, u32 val, int reg)
+{
+ sdhci_be32bs_writel(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+static void sdhci_hlwd_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ sdhci_be32bs_writew(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ sdhci_be32bs_writeb(host, val, reg);
+ udelay(SDHCI_HLWD_WRITE_DELAY);
+}
+
+struct sdhci_of_data sdhci_hlwd = {
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE,
+ .ops = {
+ .readl = sdhci_be32bs_readl,
+ .readw = sdhci_be32bs_readw,
+ .readb = sdhci_be32bs_readb,
+ .writel = sdhci_hlwd_writel,
+ .writew = sdhci_hlwd_writew,
+ .writeb = sdhci_hlwd_writeb,
+ },
+};
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h
new file mode 100644
index 00000000000..ad09ad9915d
--- /dev/null
+++ b/drivers/mmc/host/sdhci-of.h
@@ -0,0 +1,42 @@
+/*
+ * OpenFirmware bindings for Secure Digital Host Controller Interface.
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef __SDHCI_OF_H
+#define __SDHCI_OF_H
+
+#include <linux/types.h>
+#include "sdhci.h"
+
+struct sdhci_of_data {
+ unsigned int quirks;
+ struct sdhci_ops ops;
+};
+
+struct sdhci_of_host {
+ unsigned int clock;
+ u16 xfer_mode_shadow;
+};
+
+extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg);
+extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg);
+extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg);
+extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg);
+extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg);
+extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg);
+
+extern struct sdhci_of_data sdhci_esdhc;
+extern struct sdhci_of_data sdhci_hlwd;
+
+#endif /* __SDHCI_OF_H */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index e0356644d1a..5c3a1767770 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -285,6 +285,73 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
.resume = jmicron_resume,
};
+/* SysKonnect CardBus2SDIO extra registers */
+#define SYSKT_CTRL 0x200
+#define SYSKT_RDFIFO_STAT 0x204
+#define SYSKT_WRFIFO_STAT 0x208
+#define SYSKT_POWER_DATA 0x20c
+#define SYSKT_POWER_330 0xef
+#define SYSKT_POWER_300 0xf8
+#define SYSKT_POWER_184 0xcc
+#define SYSKT_POWER_CMD 0x20d
+#define SYSKT_POWER_START (1 << 7)
+#define SYSKT_POWER_STATUS 0x20e
+#define SYSKT_POWER_STATUS_OK (1 << 0)
+#define SYSKT_BOARD_REV 0x210
+#define SYSKT_CHIP_REV 0x211
+#define SYSKT_CONF_DATA 0x212
+#define SYSKT_CONF_DATA_1V8 (1 << 2)
+#define SYSKT_CONF_DATA_2V5 (1 << 1)
+#define SYSKT_CONF_DATA_3V3 (1 << 0)
+
+static int syskt_probe(struct sdhci_pci_chip *chip)
+{
+ if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
+ chip->pdev->class &= ~0x0000FF;
+ chip->pdev->class |= PCI_SDHCI_IFDMA;
+ }
+ return 0;
+}
+
+static int syskt_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int tm, ps;
+
+ u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
+ u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
+ dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
+ "board rev %d.%d, chip rev %d.%d\n",
+ board_rev >> 4, board_rev & 0xf,
+ chip_rev >> 4, chip_rev & 0xf);
+ if (chip_rev >= 0x20)
+ slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
+
+ writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
+ writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
+ udelay(50);
+ tm = 10; /* Wait max 1 ms */
+ do {
+ ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
+ if (ps & SYSKT_POWER_STATUS_OK)
+ break;
+ udelay(100);
+ } while (--tm);
+ if (!tm) {
+ dev_err(&slot->chip->pdev->dev,
+ "power regulator never stabilized");
+ writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_syskt = {
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
+ .probe = syskt_probe,
+ .probe_slot = syskt_probe_slot,
+};
+
static int via_probe(struct sdhci_pci_chip *chip)
{
if (chip->pdev->revision == 0x10)
@@ -363,6 +430,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
},
{
+ .vendor = PCI_VENDOR_ID_SYSKONNECT,
+ .device = 0x8000,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_syskt,
+ },
+
+ {
.vendor = PCI_VENDOR_ID_VIA,
.device = 0x95d0,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index ce5f1d73dc0..842f46f9428 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -8,6 +8,8 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
+#ifndef __SDHCI_H
+#define __SDHCI_H
#include <linux/scatterlist.h>
#include <linux/compiler.h>
@@ -408,3 +410,5 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
extern int sdhci_resume_host(struct sdhci_host *host);
#endif
+
+#endif /* __SDHCI_H */
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 91991b460c4..7cccc852374 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -591,7 +591,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
disable_mmc_irqs(host, TMIO_MASK_ALL);
ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
- IRQF_TRIGGER_FALLING, "tmio-mmc", host);
+ IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
if (ret)
goto unmap_cnf;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e7563a9872d..5fbf29e1e64 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -43,15 +43,17 @@
// debugging, turns off buffer write mode if set to 1
#define FORCE_WORD_WRITE 0
-#define MANUFACTURER_INTEL 0x0089
+/* Intel chips */
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
-#define MANUFACTURER_ST 0x0020
+/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
#define M50FLW080B 0x0081
+/* Atmel chips */
#define AT49BV640D 0x02de
+#define AT49BV640DT 0x02db
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -199,6 +201,16 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
cfi->cfiq->BufWriteTimeoutMax = 0;
}
+static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ cfip->FeatureSupport |= (1 << 5);
+ mtd->flags |= MTD_POWERUP_LOCK;
+}
+
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -283,6 +295,8 @@ static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
+ { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
+ { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
#endif
@@ -294,16 +308,16 @@ static struct cfi_fixup cfi_fixup_table[] = {
#endif
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
- { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
+ { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
- { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
- { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
+ { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup fixup_table[] = {
@@ -319,7 +333,7 @@ static struct cfi_fixup fixup_table[] = {
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_intelext *extp)
{
- if (cfi->mfr == MANUFACTURER_INTEL &&
+ if (cfi->mfr == CFI_MFR_INTEL &&
cfi->id == PF38F4476 && extp->MinorVersion == '3')
extp->MinorVersion = '1';
}
@@ -2235,7 +2249,7 @@ static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
/* Some chips have OTP located in the _top_ partition only.
For example: Intel 28F256L18T (T means top-parameter device) */
- if (cfi->mfr == MANUFACTURER_INTEL) {
+ if (cfi->mfr == CFI_MFR_INTEL) {
switch (cfi->id) {
case 0x880b:
case 0x880c:
@@ -2564,6 +2578,7 @@ static int cfi_intelext_reset(struct mtd_info *mtd)
if (!ret) {
map_write(map, CMD(0xff), chip->start);
chip->state = FL_SHUTDOWN;
+ put_chip(map, chip, chip->start);
}
spin_unlock(chip->mutex);
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 94bb61e1904..f3600e8d538 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -490,10 +490,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
}
#endif
- /* FIXME: erase-suspend-program is broken. See
- http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
- printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
-
__module_get(THIS_MODULE);
return mtd;
@@ -573,7 +569,6 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
if (time_after(jiffies, timeo)) {
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
- spin_unlock(chip->mutex);
return -EIO;
}
spin_unlock(chip->mutex);
@@ -589,15 +584,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
return 0;
case FL_ERASING:
- if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
- goto sleep;
-
- if (!( mode == FL_READY
- || mode == FL_POINT
- || !cfip
- || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
- || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
- )))
+ if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
/* We could check to see if we're trying to access the sector
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index c5a84fda541..ca584d0380b 100755
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -71,6 +71,13 @@ int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
if (cfi_qry_present(map, base, cfi))
return 1;
+ /* some old SST chips, e.g. 39VF160x/39VF320x */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL);
+ if (cfi_qry_present(map, base, cfi))
+ return 1;
/* QRY not found */
return 0;
}
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 736a3be265f..1bec5e1ce6a 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -142,8 +142,8 @@
/* ST - www.st.com */
#define M29F800AB 0x0058
-#define M29W800DT 0x00D7
-#define M29W800DB 0x005B
+#define M29W800DT 0x22D7
+#define M29W800DB 0x225B
#define M29W400DT 0x00EE
#define M29W400DB 0x00EF
#define M29W160DT 0x22C4
@@ -1575,7 +1575,7 @@ static const struct amd_flash_info jedec_table[] = {
.dev_id = M29W800DT,
.name = "ST M29W800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
- .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
@@ -1590,7 +1590,7 @@ static const struct amd_flash_info jedec_table[] = {
.dev_id = M29W800DB,
.name = "ST M29W800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
- .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 4c19269de91..f3f4768d6e1 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -22,6 +22,7 @@
#include <linux/mutex.h>
#include <linux/math64.h>
#include <linux/sched.h>
+#include <linux/mod_devicetable.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -29,9 +30,6 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
-
-#define FLASH_PAGESIZE 256
-
/* Flash opcodes. */
#define OPCODE_WREN 0x06 /* Write enable */
#define OPCODE_RDSR 0x05 /* Read status register */
@@ -61,7 +59,7 @@
/* Define max times to check status register before we give up. */
#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
-#define CMD_SIZE 4
+#define MAX_CMD_SIZE 4
#ifdef CONFIG_M25PXX_USE_FAST_READ
#define OPCODE_READ OPCODE_FAST_READ
@@ -78,8 +76,10 @@ struct m25p {
struct mutex lock;
struct mtd_info mtd;
unsigned partitioned:1;
+ u16 page_size;
+ u16 addr_width;
u8 erase_opcode;
- u8 command[CMD_SIZE + FAST_READ_DUMMY_BYTE];
+ u8 *command;
};
static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
@@ -198,6 +198,19 @@ static int erase_chip(struct m25p *flash)
return 0;
}
+static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd)
+{
+ /* opcode is in cmd[0] */
+ cmd[1] = addr >> (flash->addr_width * 8 - 8);
+ cmd[2] = addr >> (flash->addr_width * 8 - 16);
+ cmd[3] = addr >> (flash->addr_width * 8 - 24);
+}
+
+static int m25p_cmdsz(struct m25p *flash)
+{
+ return 1 + flash->addr_width;
+}
+
/*
* Erase one sector of flash memory at offset ``offset'' which is any
* address within the sector which should be erased.
@@ -219,11 +232,9 @@ static int erase_sector(struct m25p *flash, u32 offset)
/* Set up command buffer. */
flash->command[0] = flash->erase_opcode;
- flash->command[1] = offset >> 16;
- flash->command[2] = offset >> 8;
- flash->command[3] = offset;
+ m25p_addr2cmd(flash, offset, flash->command);
- spi_write(flash->spi, flash->command, CMD_SIZE);
+ spi_write(flash->spi, flash->command, m25p_cmdsz(flash));
return 0;
}
@@ -325,7 +336,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
* Should add 1 byte DUMMY_BYTE.
*/
t[0].tx_buf = flash->command;
- t[0].len = CMD_SIZE + FAST_READ_DUMMY_BYTE;
+ t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE;
spi_message_add_tail(&t[0], &m);
t[1].rx_buf = buf;
@@ -352,13 +363,11 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Set up the write data buffer. */
flash->command[0] = OPCODE_READ;
- flash->command[1] = from >> 16;
- flash->command[2] = from >> 8;
- flash->command[3] = from;
+ m25p_addr2cmd(flash, from, flash->command);
spi_sync(flash->spi, &m);
- *retlen = m.actual_length - CMD_SIZE - FAST_READ_DUMMY_BYTE;
+ *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE;
mutex_unlock(&flash->lock);
@@ -396,7 +405,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
memset(t, 0, (sizeof t));
t[0].tx_buf = flash->command;
- t[0].len = CMD_SIZE;
+ t[0].len = m25p_cmdsz(flash);
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
@@ -414,41 +423,36 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Set up the opcode in the write buffer. */
flash->command[0] = OPCODE_PP;
- flash->command[1] = to >> 16;
- flash->command[2] = to >> 8;
- flash->command[3] = to;
+ m25p_addr2cmd(flash, to, flash->command);
- /* what page do we start with? */
- page_offset = to % FLASH_PAGESIZE;
+ page_offset = to & (flash->page_size - 1);
/* do all the bytes fit onto one page? */
- if (page_offset + len <= FLASH_PAGESIZE) {
+ if (page_offset + len <= flash->page_size) {
t[1].len = len;
spi_sync(flash->spi, &m);
- *retlen = m.actual_length - CMD_SIZE;
+ *retlen = m.actual_length - m25p_cmdsz(flash);
} else {
u32 i;
/* the size of data remaining on the first page */
- page_size = FLASH_PAGESIZE - page_offset;
+ page_size = flash->page_size - page_offset;
t[1].len = page_size;
spi_sync(flash->spi, &m);
- *retlen = m.actual_length - CMD_SIZE;
+ *retlen = m.actual_length - m25p_cmdsz(flash);
- /* write everything in PAGESIZE chunks */
+ /* write everything in flash->page_size chunks */
for (i = page_size; i < len; i += page_size) {
page_size = len - i;
- if (page_size > FLASH_PAGESIZE)
- page_size = FLASH_PAGESIZE;
+ if (page_size > flash->page_size)
+ page_size = flash->page_size;
/* write the next page to flash */
- flash->command[1] = (to + i) >> 16;
- flash->command[2] = (to + i) >> 8;
- flash->command[3] = (to + i);
+ m25p_addr2cmd(flash, to + i, flash->command);
t[1].tx_buf = buf + i;
t[1].len = page_size;
@@ -460,7 +464,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
spi_sync(flash->spi, &m);
if (retlen)
- *retlen += m.actual_length - CMD_SIZE;
+ *retlen += m.actual_length - m25p_cmdsz(flash);
}
}
@@ -492,7 +496,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
memset(t, 0, (sizeof t));
t[0].tx_buf = flash->command;
- t[0].len = CMD_SIZE;
+ t[0].len = m25p_cmdsz(flash);
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
@@ -511,9 +515,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Start write from odd address. */
if (actual) {
flash->command[0] = OPCODE_BP;
- flash->command[1] = to >> 16;
- flash->command[2] = to >> 8;
- flash->command[3] = to;
+ m25p_addr2cmd(flash, to, flash->command);
/* write one byte. */
t[1].len = 1;
@@ -521,17 +523,15 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
ret = wait_till_ready(flash);
if (ret)
goto time_out;
- *retlen += m.actual_length - CMD_SIZE;
+ *retlen += m.actual_length - m25p_cmdsz(flash);
}
to += actual;
flash->command[0] = OPCODE_AAI_WP;
- flash->command[1] = to >> 16;
- flash->command[2] = to >> 8;
- flash->command[3] = to;
+ m25p_addr2cmd(flash, to, flash->command);
/* Write out most of the data here. */
- cmd_sz = CMD_SIZE;
+ cmd_sz = m25p_cmdsz(flash);
for (; actual < len - 1; actual += 2) {
t[0].len = cmd_sz;
/* write two bytes. */
@@ -555,10 +555,8 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
if (actual != len) {
write_enable(flash);
flash->command[0] = OPCODE_BP;
- flash->command[1] = to >> 16;
- flash->command[2] = to >> 8;
- flash->command[3] = to;
- t[0].len = CMD_SIZE;
+ m25p_addr2cmd(flash, to, flash->command);
+ t[0].len = m25p_cmdsz(flash);
t[1].len = 1;
t[1].tx_buf = buf + actual;
@@ -566,7 +564,7 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
ret = wait_till_ready(flash);
if (ret)
goto time_out;
- *retlen += m.actual_length - CMD_SIZE;
+ *retlen += m.actual_length - m25p_cmdsz(flash);
write_disable(flash);
}
@@ -582,8 +580,6 @@ time_out:
*/
struct flash_info {
- char *name;
-
/* JEDEC id zero means "no ID" (most older chips); otherwise it has
* a high byte of zero plus three data bytes: the manufacturer id,
* then a two byte device id.
@@ -597,87 +593,119 @@ struct flash_info {
unsigned sector_size;
u16 n_sectors;
+ u16 page_size;
+ u16 addr_width;
+
u16 flags;
#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
+#define M25P_NO_ERASE 0x02 /* No erase command needed */
};
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .jedec_id = (_jedec_id), \
+ .ext_id = (_ext_id), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .addr_width = 3, \
+ .flags = (_flags), \
+ })
+
+#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \
+ ((kernel_ulong_t)&(struct flash_info) { \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = (_page_size), \
+ .addr_width = (_addr_width), \
+ .flags = M25P_NO_ERASE, \
+ })
/* NOTE: double check command sets and memory organization when you add
* more flash chips. This current list focusses on newer chips, which
* have been converging on command sets which including JEDEC ID.
*/
-static struct flash_info __devinitdata m25p_data [] = {
-
+static const struct spi_device_id m25p_ids[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
- { "at25fs010", 0x1f6601, 0, 32 * 1024, 4, SECT_4K, },
- { "at25fs040", 0x1f6604, 0, 64 * 1024, 8, SECT_4K, },
+ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
+ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
- { "at25df041a", 0x1f4401, 0, 64 * 1024, 8, SECT_4K, },
- { "at25df641", 0x1f4800, 0, 64 * 1024, 128, SECT_4K, },
+ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
+ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
- { "at26f004", 0x1f0400, 0, 64 * 1024, 8, SECT_4K, },
- { "at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K, },
- { "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
- { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
+ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
+ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
+ { "at26df321", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
/* Macronix */
- { "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, },
- { "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, },
- { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
- { "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, },
+ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
+ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
+ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
- { "s25sl004a", 0x010212, 0, 64 * 1024, 8, },
- { "s25sl008a", 0x010213, 0, 64 * 1024, 16, },
- { "s25sl016a", 0x010214, 0, 64 * 1024, 32, },
- { "s25sl032a", 0x010215, 0, 64 * 1024, 64, },
- { "s25sl064a", 0x010216, 0, 64 * 1024, 128, },
- { "s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, },
- { "s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, },
- { "s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, },
- { "s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, },
+ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
+ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
+ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
+ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
+ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
+ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
+ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
- { "sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K, },
- { "sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K, },
- { "sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K, },
- { "sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K, },
- { "sst25wf512", 0xbf2501, 0, 64 * 1024, 1, SECT_4K, },
- { "sst25wf010", 0xbf2502, 0, 64 * 1024, 2, SECT_4K, },
- { "sst25wf020", 0xbf2503, 0, 64 * 1024, 4, SECT_4K, },
- { "sst25wf040", 0xbf2504, 0, 64 * 1024, 8, SECT_4K, },
+ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) },
+ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) },
+ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) },
+ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) },
+ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) },
+ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) },
+ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) },
+ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) },
/* ST Microelectronics -- newer production may have feature updates */
- { "m25p05", 0x202010, 0, 32 * 1024, 2, },
- { "m25p10", 0x202011, 0, 32 * 1024, 4, },
- { "m25p20", 0x202012, 0, 64 * 1024, 4, },
- { "m25p40", 0x202013, 0, 64 * 1024, 8, },
- { "m25p80", 0, 0, 64 * 1024, 16, },
- { "m25p16", 0x202015, 0, 64 * 1024, 32, },
- { "m25p32", 0x202016, 0, 64 * 1024, 64, },
- { "m25p64", 0x202017, 0, 64 * 1024, 128, },
- { "m25p128", 0x202018, 0, 256 * 1024, 64, },
-
- { "m45pe10", 0x204011, 0, 64 * 1024, 2, },
- { "m45pe80", 0x204014, 0, 64 * 1024, 16, },
- { "m45pe16", 0x204015, 0, 64 * 1024, 32, },
-
- { "m25pe80", 0x208014, 0, 64 * 1024, 16, },
- { "m25pe16", 0x208015, 0, 64 * 1024, 32, SECT_4K, },
+ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
+ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
+ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
+ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
+ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
+ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
+ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
+ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
+ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+
+ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
+ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
+ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
+
+ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
+ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
- { "w25x10", 0xef3011, 0, 64 * 1024, 2, SECT_4K, },
- { "w25x20", 0xef3012, 0, 64 * 1024, 4, SECT_4K, },
- { "w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K, },
- { "w25x80", 0xef3014, 0, 64 * 1024, 16, SECT_4K, },
- { "w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K, },
- { "w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K, },
- { "w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K, },
+ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
+ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
+ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+
+ /* Catalyst / On Semiconductor -- non-JEDEC */
+ { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
+ { "cat25c03", CAT25_INFO( 32, 8, 16, 2) },
+ { "cat25c09", CAT25_INFO( 128, 8, 32, 2) },
+ { "cat25c17", CAT25_INFO( 256, 8, 32, 2) },
+ { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
+ { },
};
+MODULE_DEVICE_TABLE(spi, m25p_ids);
-static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
+static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi)
{
int tmp;
u8 code = OPCODE_RDID;
@@ -702,18 +730,24 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
jedec = jedec << 8;
jedec |= id[2];
+ /*
+ * Some chips (like Numonyx M25P80) have JEDEC and non-JEDEC variants,
+ * which depend on technology process. Officially RDID command doesn't
+ * exist for non-JEDEC chips, but for compatibility they return ID 0.
+ */
+ if (jedec == 0)
+ return NULL;
+
ext_jedec = id[3] << 8 | id[4];
- for (tmp = 0, info = m25p_data;
- tmp < ARRAY_SIZE(m25p_data);
- tmp++, info++) {
+ for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) {
+ info = (void *)m25p_ids[tmp].driver_data;
if (info->jedec_id == jedec) {
if (info->ext_id != 0 && info->ext_id != ext_jedec)
continue;
- return info;
+ return &m25p_ids[tmp];
}
}
- dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
return NULL;
}
@@ -725,6 +759,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
*/
static int __devinit m25p_probe(struct spi_device *spi)
{
+ const struct spi_device_id *id = spi_get_device_id(spi);
struct flash_platform_data *data;
struct m25p *flash;
struct flash_info *info;
@@ -737,50 +772,65 @@ static int __devinit m25p_probe(struct spi_device *spi)
*/
data = spi->dev.platform_data;
if (data && data->type) {
- for (i = 0, info = m25p_data;
- i < ARRAY_SIZE(m25p_data);
- i++, info++) {
- if (strcmp(data->type, info->name) == 0)
- break;
- }
+ const struct spi_device_id *plat_id;
- /* unrecognized chip? */
- if (i == ARRAY_SIZE(m25p_data)) {
- DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n",
- dev_name(&spi->dev), data->type);
- info = NULL;
-
- /* recognized; is that chip really what's there? */
- } else if (info->jedec_id) {
- struct flash_info *chip = jedec_probe(spi);
-
- if (!chip || chip != info) {
- dev_warn(&spi->dev, "found %s, expected %s\n",
- chip ? chip->name : "UNKNOWN",
- info->name);
- info = NULL;
- }
+ for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) {
+ plat_id = &m25p_ids[i];
+ if (strcmp(data->type, plat_id->name))
+ continue;
+ break;
}
- } else
- info = jedec_probe(spi);
- if (!info)
- return -ENODEV;
+ if (plat_id)
+ id = plat_id;
+ else
+ dev_warn(&spi->dev, "unrecognized id %s\n", data->type);
+ }
+
+ info = (void *)id->driver_data;
+
+ if (info->jedec_id) {
+ const struct spi_device_id *jid;
+
+ jid = jedec_probe(spi);
+ if (!jid) {
+ dev_info(&spi->dev, "non-JEDEC variant of %s\n",
+ id->name);
+ } else if (jid != id) {
+ /*
+ * JEDEC knows better, so overwrite platform ID. We
+ * can't trust partitions any longer, but we'll let
+ * mtd apply them anyway, since some partitions may be
+ * marked read-only, and we don't want to lose that
+ * information, even if it's not 100% accurate.
+ */
+ dev_warn(&spi->dev, "found %s, expected %s\n",
+ jid->name, id->name);
+ id = jid;
+ info = (void *)jid->driver_data;
+ }
+ }
flash = kzalloc(sizeof *flash, GFP_KERNEL);
if (!flash)
return -ENOMEM;
+ flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
+ if (!flash->command) {
+ kfree(flash);
+ return -ENOMEM;
+ }
flash->spi = spi;
mutex_init(&flash->lock);
dev_set_drvdata(&spi->dev, flash);
/*
- * Atmel serial flash tend to power up
- * with the software protection bits set
+ * Atmel and SST serial flash tend to power
+ * up with the software protection bits set
*/
- if (info->jedec_id >> 16 == 0x1f) {
+ if (info->jedec_id >> 16 == 0x1f ||
+ info->jedec_id >> 16 == 0xbf) {
write_enable(flash);
write_sr(flash, 0);
}
@@ -812,9 +862,14 @@ static int __devinit m25p_probe(struct spi_device *spi)
flash->mtd.erasesize = info->sector_size;
}
+ if (info->flags & M25P_NO_ERASE)
+ flash->mtd.flags |= MTD_NO_ERASE;
+
flash->mtd.dev.parent = &spi->dev;
+ flash->page_size = info->page_size;
+ flash->addr_width = info->addr_width;
- dev_info(&spi->dev, "%s (%lld Kbytes)\n", info->name,
+ dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
(long long)flash->mtd.size >> 10);
DEBUG(MTD_DEBUG_LEVEL2,
@@ -888,8 +943,10 @@ static int __devexit m25p_remove(struct spi_device *spi)
status = del_mtd_partitions(&flash->mtd);
else
status = del_mtd_device(&flash->mtd);
- if (status == 0)
+ if (status == 0) {
+ kfree(flash->command);
kfree(flash);
+ }
return 0;
}
@@ -900,6 +957,7 @@ static struct spi_driver m25p80_driver = {
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
+ .id_table = m25p_ids,
.probe = m25p_probe,
.remove = __devexit_p(m25p_remove),
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 93e3627be74..19817404ce7 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -636,6 +636,7 @@ add_dataflash_otp(struct spi_device *spi, char *name,
struct mtd_info *device;
struct flash_platform_data *pdata = spi->dev.platform_data;
char *otp_tag = "";
+ int err = 0;
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
@@ -693,13 +694,23 @@ add_dataflash_otp(struct spi_device *spi, char *name,
if (nr_parts > 0) {
priv->partitioned = 1;
- return add_mtd_partitions(device, parts, nr_parts);
+ err = add_mtd_partitions(device, parts, nr_parts);
+ goto out;
}
} else if (pdata && pdata->nr_parts)
dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, device->name);
- return add_mtd_device(device) == 1 ? -ENODEV : 0;
+ if (add_mtd_device(device) == 1)
+ err = -ENODEV;
+
+out:
+ if (!err)
+ return 0;
+
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(priv);
+ return err;
}
static inline int __devinit
@@ -932,8 +943,10 @@ static int __devexit dataflash_remove(struct spi_device *spi)
status = del_mtd_partitions(&flash->mtd);
else
status = del_mtd_device(&flash->mtd);
- if (status == 0)
+ if (status == 0) {
+ dev_set_drvdata(&spi->dev, NULL);
kfree(flash);
+ }
return status;
}
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 847e214ade5..4c364d44ad5 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -359,12 +359,6 @@ config MTD_SA1100
the SA1100 and SA1110, including the Assabet and the Compaq iPAQ.
If you have such a board, say 'Y'.
-config MTD_IPAQ
- tristate "CFI Flash device mapped on Compaq/HP iPAQ"
- depends on IPAQ_HANDHELD && MTD_CFI
- help
- This provides a driver for the on-board flash of the iPAQ.
-
config MTD_DC21285
tristate "CFI Flash device mapped on DC21285 Footbridge"
depends on MTD_CFI && ARCH_FOOTBRIDGE && MTD_COMPLEX_MAPPINGS
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index ae2f6dbe43c..ce315214ff2 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -24,12 +24,12 @@ obj-$(CONFIG_MTD_CEIVA) += ceiva.o
obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
+obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
-obj-$(CONFIG_MTD_IPAQ) += ipaq-flash.o
obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
obj-$(CONFIG_MTD_NETSC520) += netsc520.o
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
deleted file mode 100644
index 76708e796b7..00000000000
--- a/drivers/mtd/maps/ipaq-flash.c
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * Flash memory access on iPAQ Handhelds (either SA1100 or PXA250 based)
- *
- * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
- * (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com>
- * (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <asm/mach-types.h>
-#include <asm/system.h>
-#include <asm/errno.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#ifdef CONFIG_MTD_CONCAT
-#include <linux/mtd/concat.h>
-#endif
-
-#include <mach/hardware.h>
-#include <mach/h3600.h>
-#include <asm/io.h>
-
-
-#ifndef CONFIG_IPAQ_HANDHELD
-#error This is for iPAQ Handhelds only
-#endif
-#ifdef CONFIG_SA1100_JORNADA56X
-
-static void jornada56x_set_vpp(struct map_info *map, int vpp)
-{
- if (vpp)
- GPSR = GPIO_GPIO26;
- else
- GPCR = GPIO_GPIO26;
- GPDR |= GPIO_GPIO26;
-}
-
-#endif
-
-#ifdef CONFIG_SA1100_JORNADA720
-
-static void jornada720_set_vpp(struct map_info *map, int vpp)
-{
- if (vpp)
- PPSR |= 0x80;
- else
- PPSR &= ~0x80;
- PPDR |= 0x80;
-}
-
-#endif
-
-#define MAX_IPAQ_CS 2 /* Number of CS we are going to test */
-
-#define IPAQ_MAP_INIT(X) \
- { \
- name: "IPAQ flash " X, \
- }
-
-
-static struct map_info ipaq_map[MAX_IPAQ_CS] = {
- IPAQ_MAP_INIT("bank 1"),
- IPAQ_MAP_INIT("bank 2")
-};
-
-static struct mtd_info *my_sub_mtd[MAX_IPAQ_CS] = {
- NULL,
- NULL
-};
-
-/*
- * Here are partition information for all known IPAQ-based devices.
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size which
- * is not necessarily the actual flash size. It must be no more than
- * the value specified in the "struct map_desc *_io_desc" mapping
- * definition for the corresponding machine.
- *
- * Please keep these in alphabetical order, and formatted as per existing
- * entries. Thanks.
- */
-
-#ifdef CONFIG_IPAQ_HANDHELD
-static unsigned long h3xxx_max_flash_size = 0x04000000;
-static struct mtd_partition h3xxx_partitions[] = {
- {
- name: "H3XXX boot firmware",
-#ifndef CONFIG_LAB
- size: 0x00040000,
-#else
- size: 0x00080000,
-#endif
- offset: 0,
-#ifndef CONFIG_LAB
- mask_flags: MTD_WRITEABLE, /* force read-only */
-#endif
- },
- {
- name: "H3XXX root jffs2",
-#ifndef CONFIG_LAB
- size: 0x2000000 - 2*0x40000, /* Warning, this is fixed later */
- offset: 0x00040000,
-#else
- size: 0x2000000 - 0x40000 - 0x80000, /* Warning, this is fixed later */
- offset: 0x00080000,
-#endif
- },
- {
- name: "asset",
- size: 0x40000,
- offset: 0x2000000 - 0x40000, /* Warning, this is fixed later */
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }
-};
-
-#ifndef CONFIG_MTD_CONCAT
-static struct mtd_partition h3xxx_partitions_bank2[] = {
- /* this is used only on 2 CS machines when concat is not present */
- {
- name: "second H3XXX root jffs2",
- size: 0x1000000 - 0x40000, /* Warning, this is fixed later */
- offset: 0x00000000,
- },
- {
- name: "second asset",
- size: 0x40000,
- offset: 0x1000000 - 0x40000, /* Warning, this is fixed later */
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }
-};
-#endif
-
-static DEFINE_SPINLOCK(ipaq_vpp_lock);
-
-static void h3xxx_set_vpp(struct map_info *map, int vpp)
-{
- static int nest = 0;
-
- spin_lock(&ipaq_vpp_lock);
- if (vpp)
- nest++;
- else
- nest--;
- if (nest)
- assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 1);
- else
- assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 0);
- spin_unlock(&ipaq_vpp_lock);
-}
-
-#endif
-
-#if defined(CONFIG_SA1100_JORNADA56X) || defined(CONFIG_SA1100_JORNADA720)
-static unsigned long jornada_max_flash_size = 0x02000000;
-static struct mtd_partition jornada_partitions[] = {
- {
- name: "Jornada boot firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "Jornada root jffs2",
- size: MTDPART_SIZ_FULL,
- offset: 0x00040000,
- }
-};
-#endif
-
-
-static struct mtd_partition *parsed_parts;
-static struct mtd_info *mymtd;
-
-static unsigned long cs_phys[] = {
-#ifdef CONFIG_ARCH_SA1100
- SA1100_CS0_PHYS,
- SA1100_CS1_PHYS,
- SA1100_CS2_PHYS,
- SA1100_CS3_PHYS,
- SA1100_CS4_PHYS,
- SA1100_CS5_PHYS,
-#else
- PXA_CS0_PHYS,
- PXA_CS1_PHYS,
- PXA_CS2_PHYS,
- PXA_CS3_PHYS,
- PXA_CS4_PHYS,
- PXA_CS5_PHYS,
-#endif
-};
-
-static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-
-static int __init h1900_special_case(void);
-
-static int __init ipaq_mtd_init(void)
-{
- struct mtd_partition *parts = NULL;
- int nb_parts = 0;
- int parsed_nr_parts = 0;
- const char *part_type;
- int i; /* used when we have >1 flash chips */
- unsigned long tot_flashsize = 0; /* used when we have >1 flash chips */
-
- /* Default flash bankwidth */
- // ipaq_map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
-
- if (machine_is_h1900())
- {
- /* For our intents, the h1900 is not a real iPAQ, so we special-case it. */
- return h1900_special_case();
- }
-
- if (machine_is_h3100() || machine_is_h1900())
- for(i=0; i<MAX_IPAQ_CS; i++)
- ipaq_map[i].bankwidth = 2;
- else
- for(i=0; i<MAX_IPAQ_CS; i++)
- ipaq_map[i].bankwidth = 4;
-
- /*
- * Static partition definition selection
- */
- part_type = "static";
-
- simple_map_init(&ipaq_map[0]);
- simple_map_init(&ipaq_map[1]);
-
-#ifdef CONFIG_IPAQ_HANDHELD
- if (machine_is_ipaq()) {
- parts = h3xxx_partitions;
- nb_parts = ARRAY_SIZE(h3xxx_partitions);
- for(i=0; i<MAX_IPAQ_CS; i++) {
- ipaq_map[i].size = h3xxx_max_flash_size;
- ipaq_map[i].set_vpp = h3xxx_set_vpp;
- ipaq_map[i].phys = cs_phys[i];
- ipaq_map[i].virt = ioremap(cs_phys[i], 0x04000000);
- if (machine_is_h3100 () || machine_is_h1900())
- ipaq_map[i].bankwidth = 2;
- }
- if (machine_is_h3600()) {
- /* No asset partition here */
- h3xxx_partitions[1].size += 0x40000;
- nb_parts--;
- }
- }
-#endif
-#ifdef CONFIG_ARCH_H5400
- if (machine_is_h5400()) {
- ipaq_map[0].size = 0x02000000;
- ipaq_map[1].size = 0x02000000;
- ipaq_map[1].phys = 0x02000000;
- ipaq_map[1].virt = ipaq_map[0].virt + 0x02000000;
- }
-#endif
-#ifdef CONFIG_ARCH_H1900
- if (machine_is_h1900()) {
- ipaq_map[0].size = 0x00400000;
- ipaq_map[1].size = 0x02000000;
- ipaq_map[1].phys = 0x00080000;
- ipaq_map[1].virt = ipaq_map[0].virt + 0x00080000;
- }
-#endif
-
-#ifdef CONFIG_SA1100_JORNADA56X
- if (machine_is_jornada56x()) {
- parts = jornada_partitions;
- nb_parts = ARRAY_SIZE(jornada_partitions);
- ipaq_map[0].size = jornada_max_flash_size;
- ipaq_map[0].set_vpp = jornada56x_set_vpp;
- ipaq_map[0].virt = (__u32)ioremap(0x0, 0x04000000);
- }
-#endif
-#ifdef CONFIG_SA1100_JORNADA720
- if (machine_is_jornada720()) {
- parts = jornada_partitions;
- nb_parts = ARRAY_SIZE(jornada_partitions);
- ipaq_map[0].size = jornada_max_flash_size;
- ipaq_map[0].set_vpp = jornada720_set_vpp;
- }
-#endif
-
-
- if (machine_is_ipaq()) { /* for iPAQs only */
- for(i=0; i<MAX_IPAQ_CS; i++) {
- printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with CFI.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
- my_sub_mtd[i] = do_map_probe("cfi_probe", &ipaq_map[i]);
- if (!my_sub_mtd[i]) {
- printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
- my_sub_mtd[i] = do_map_probe("jedec_probe", &ipaq_map[i]);
- }
- if (!my_sub_mtd[i]) {
- printk(KERN_NOTICE "iPAQ flash: failed to find flash.\n");
- if (i)
- break;
- else
- return -ENXIO;
- } else
- printk(KERN_NOTICE "iPAQ flash: found %d bytes\n", my_sub_mtd[i]->size);
-
- /* do we really need this debugging? --joshua 20030703 */
- // printk("my_sub_mtd[%d]=%p\n", i, my_sub_mtd[i]);
- my_sub_mtd[i]->owner = THIS_MODULE;
- tot_flashsize += my_sub_mtd[i]->size;
- }
-#ifdef CONFIG_MTD_CONCAT
- /* fix the asset location */
-# ifdef CONFIG_LAB
- h3xxx_partitions[1].size = tot_flashsize - 0x40000 - 0x80000 /* extra big boot block */;
-# else
- h3xxx_partitions[1].size = tot_flashsize - 2 * 0x40000;
-# endif
- h3xxx_partitions[2].offset = tot_flashsize - 0x40000;
- /* and concat the devices */
- mymtd = mtd_concat_create(&my_sub_mtd[0], i,
- "ipaq");
- if (!mymtd) {
- printk("Cannot create iPAQ concat device\n");
- return -ENXIO;
- }
-#else
- mymtd = my_sub_mtd[0];
-
- /*
- *In the very near future, command line partition parsing
- * will use the device name as 'mtd-id' instead of a value
- * passed to the parse_cmdline_partitions() routine. Since
- * the bootldr says 'ipaq', make sure it continues to work.
- */
- mymtd->name = "ipaq";
-
- if ((machine_is_h3600())) {
-# ifdef CONFIG_LAB
- h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x80000;
-# else
- h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000;
-# endif
- nb_parts = 2;
- } else {
-# ifdef CONFIG_LAB
- h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000 - 0x80000; /* extra big boot block */
-# else
- h3xxx_partitions[1].size = my_sub_mtd[0]->size - 2*0x40000;
-# endif
- h3xxx_partitions[2].offset = my_sub_mtd[0]->size - 0x40000;
- }
-
- if (my_sub_mtd[1]) {
-# ifdef CONFIG_LAB
- h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x80000;
-# else
- h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x40000;
-# endif
- h3xxx_partitions_bank2[1].offset = my_sub_mtd[1]->size - 0x40000;
- }
-#endif
- }
- else {
- /*
- * Now let's probe for the actual flash. Do it here since
- * specific machine settings might have been set above.
- */
- printk(KERN_NOTICE "IPAQ flash: probing %d-bit flash bus, window=%lx\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
- mymtd = do_map_probe("cfi_probe", &ipaq_map[0]);
- if (!mymtd)
- return -ENXIO;
- mymtd->owner = THIS_MODULE;
- }
-
-
- /*
- * Dynamic partition selection stuff (might override the static ones)
- */
-
- i = parse_mtd_partitions(mymtd, part_probes, &parsed_parts, 0);
-
- if (i > 0) {
- nb_parts = parsed_nr_parts = i;
- parts = parsed_parts;
- part_type = "dynamic";
- }
-
- if (!parts) {
- printk(KERN_NOTICE "IPAQ flash: no partition info available, registering whole flash at once\n");
- add_mtd_device(mymtd);
-#ifndef CONFIG_MTD_CONCAT
- if (my_sub_mtd[1])
- add_mtd_device(my_sub_mtd[1]);
-#endif
- } else {
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(mymtd, parts, nb_parts);
-#ifndef CONFIG_MTD_CONCAT
- if (my_sub_mtd[1])
- add_mtd_partitions(my_sub_mtd[1], h3xxx_partitions_bank2, ARRAY_SIZE(h3xxx_partitions_bank2));
-#endif
- }
-
- return 0;
-}
-
-static void __exit ipaq_mtd_cleanup(void)
-{
- int i;
-
- if (mymtd) {
- del_mtd_partitions(mymtd);
-#ifndef CONFIG_MTD_CONCAT
- if (my_sub_mtd[1])
- del_mtd_partitions(my_sub_mtd[1]);
-#endif
- map_destroy(mymtd);
-#ifdef CONFIG_MTD_CONCAT
- for(i=0; i<MAX_IPAQ_CS; i++)
-#else
- for(i=1; i<MAX_IPAQ_CS; i++)
-#endif
- {
- if (my_sub_mtd[i])
- map_destroy(my_sub_mtd[i]);
- }
- kfree(parsed_parts);
- }
-}
-
-static int __init h1900_special_case(void)
-{
- /* The iPAQ h1900 is a special case - it has weird ROM. */
- simple_map_init(&ipaq_map[0]);
- ipaq_map[0].size = 0x80000;
- ipaq_map[0].set_vpp = h3xxx_set_vpp;
- ipaq_map[0].phys = 0x0;
- ipaq_map[0].virt = ioremap(0x0, 0x04000000);
- ipaq_map[0].bankwidth = 2;
-
- printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
- mymtd = do_map_probe("jedec_probe", &ipaq_map[0]);
- if (!mymtd)
- return -ENODEV;
- add_mtd_device(mymtd);
- printk(KERN_NOTICE "iPAQ flash: registered h1910 flash\n");
-
- return 0;
-}
-
-module_init(ipaq_mtd_init);
-module_exit(ipaq_mtd_cleanup);
-
-MODULE_AUTHOR("Jamey Hicks");
-MODULE_DESCRIPTION("IPAQ CFI map driver");
-MODULE_LICENSE("MIT");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 7214b876feb..7b051529741 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -210,7 +210,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
* not attempt to do a direct access on us.
*/
info->map.phys = NO_XIP;
- info->map.size = dev->resource->end - dev->resource->start + 1;
+ info->map.size = resource_size(dev->resource);
/*
* We only support 16-bit accesses for now. If and when
@@ -224,7 +224,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
info->map.copy_from = ixp4xx_copy_from,
info->res = request_mem_region(dev->resource->start,
- dev->resource->end - dev->resource->start + 1,
+ resource_size(dev->resource),
"IXP4XXFlash");
if (!info->res) {
printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
@@ -233,7 +233,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
}
info->map.virt = ioremap(dev->resource->start,
- dev->resource->end - dev->resource->start + 1);
+ resource_size(dev->resource));
if (!info->map.virt) {
printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
err = -EIO;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 380648e9051..d9603f7f965 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -48,23 +48,22 @@ static int physmap_flash_remove(struct platform_device *dev)
if (info->cmtd) {
#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts || physmap_data->nr_parts)
+ if (info->nr_parts || physmap_data->nr_parts) {
del_mtd_partitions(info->cmtd);
- else
+
+ if (info->nr_parts)
+ kfree(info->parts);
+ } else {
del_mtd_device(info->cmtd);
+ }
#else
del_mtd_device(info->cmtd);
#endif
- }
-#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts)
- kfree(info->parts);
-#endif
-
#ifdef CONFIG_MTD_CONCAT
- if (info->cmtd != info->mtd[0])
- mtd_concat_destroy(info->cmtd);
+ if (info->cmtd != info->mtd[0])
+ mtd_concat_destroy(info->cmtd);
#endif
+ }
for (i = 0; i < MAX_RESOURCES; i++) {
if (info->mtd[i] != NULL)
@@ -130,7 +129,7 @@ static int physmap_flash_probe(struct platform_device *dev)
info->map[i].size);
if (info->map[i].virt == NULL) {
dev_err(&dev->dev, "Failed to ioremap flash region\n");
- err = EIO;
+ err = -EIO;
goto err_out;
}
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 74fa075c838..b13f6417b5b 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -20,14 +20,23 @@
#include <asm/io.h>
#include <mach/hardware.h>
-#include <asm/cacheflush.h>
#include <asm/mach/flash.h>
+#define CACHELINESIZE 32
+
static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
ssize_t len)
{
- flush_ioremap_region(map->phys, map->cached, from, len);
+ unsigned long start = (unsigned long)map->cached + from;
+ unsigned long end = start + len;
+
+ start &= ~(CACHELINESIZE - 1);
+ while (start < end) {
+ /* invalidate D cache line */
+ asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
+ start += CACHELINESIZE;
+ }
}
struct pxa2xx_flash_info {
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index d7a47574d21..f3af87e08ec 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -248,7 +248,7 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla
plat->exit();
}
-static struct sa_info *__init
+static struct sa_info *__devinit
sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
{
struct sa_info *info;
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 1f73297e777..82afad0ddd7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -612,16 +612,15 @@ static int __devinit vmu_connect(struct maple_device *mdev)
test_flash_data = be32_to_cpu(mdev->devinfo.function);
/* Need to count how many bits are set - to find out which
- * function_data element has details of the memory card:
- * using Brian Kernighan's/Peter Wegner's method */
- for (c = 0; test_flash_data; c++)
- test_flash_data &= test_flash_data - 1;
+ * function_data element has details of the memory card
+ */
+ c = hweight_long(test_flash_data);
basic_flash_data = be32_to_cpu(mdev->devinfo.function_data[c - 1]);
card = kmalloc(sizeof(struct memcard), GFP_KERNEL);
if (!card) {
- error = ENOMEM;
+ error = -ENOMEM;
goto fail_nomem;
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 64e2b379a35..c82e09bbc5f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -84,9 +84,6 @@ static int mtd_blktrans_thread(void *arg)
struct request_queue *rq = tr->blkcore_priv->rq;
struct request *req = NULL;
- /* we might get involved when memory gets low, so use PF_MEMALLOC */
- current->flags |= PF_MEMALLOC;
-
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
@@ -381,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
"%sd", tr->name);
if (IS_ERR(tr->blkcore_priv->thread)) {
- int ret = PTR_ERR(tr->blkcore_priv->thread);
+ ret = PTR_ERR(tr->blkcore_priv->thread);
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 467a4f177bf..c356c0a30c3 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -447,7 +447,7 @@ struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
for (i=0; i< MAX_MTD_DEVICES; i++)
if (mtd_table[i] == mtd)
ret = mtd_table[i];
- } else if (num < MAX_MTD_DEVICES) {
+ } else if (num >= 0 && num < MAX_MTD_DEVICES) {
ret = mtd_table[num];
if (mtd && mtd != ret)
ret = NULL;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1060337c06d..a714ec48276 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -29,14 +29,34 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
-#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
+#include <linux/kmsg_dump.h>
+
+/* Maximum MTD partition size */
+#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
-#define OOPS_PAGE_SIZE 4096
+#define MTDOOPS_HEADER_SIZE 8
+
+static unsigned long record_size = 4096;
+module_param(record_size, ulong, 0400);
+MODULE_PARM_DESC(record_size,
+ "record size for MTD OOPS pages in bytes (default 4096)");
+
+static char mtddev[80];
+module_param_string(mtddev, mtddev, 80, 0400);
+MODULE_PARM_DESC(mtddev,
+ "name or index number of the MTD device to use");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+ "set to 1 to dump oopses, 0 to only dump panics (default 1)");
static struct mtdoops_context {
+ struct kmsg_dumper dump;
+
int mtd_index;
struct work_struct work_erase;
struct work_struct work_write;
@@ -44,28 +64,43 @@ static struct mtdoops_context {
int oops_pages;
int nextpage;
int nextcount;
- char *name;
+ unsigned long *oops_page_used;
void *oops_buf;
-
- /* writecount and disabling ready are spin lock protected */
- spinlock_t writecount_lock;
- int ready;
- int writecount;
} oops_cxt;
+static void mark_page_used(struct mtdoops_context *cxt, int page)
+{
+ set_bit(page, cxt->oops_page_used);
+}
+
+static void mark_page_unused(struct mtdoops_context *cxt, int page)
+{
+ clear_bit(page, cxt->oops_page_used);
+}
+
+static int page_is_used(struct mtdoops_context *cxt, int page)
+{
+ return test_bit(page, cxt->oops_page_used);
+}
+
static void mtdoops_erase_callback(struct erase_info *done)
{
wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
wake_up(wait_q);
}
-static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
+static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
{
+ struct mtd_info *mtd = cxt->mtd;
+ u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
+ u32 start_page = start_page_offset / record_size;
+ u32 erase_pages = mtd->erasesize / record_size;
struct erase_info erase;
DECLARE_WAITQUEUE(wait, current);
wait_queue_head_t wait_q;
int ret;
+ int page;
init_waitqueue_head(&wait_q);
erase.mtd = mtd;
@@ -81,25 +116,24 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
- printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] "
- "on \"%s\" failed\n",
- (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name);
+ printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
+ (unsigned long long)erase.addr,
+ (unsigned long long)erase.len, mtddev);
return ret;
}
schedule(); /* Wait for erase to finish. */
remove_wait_queue(&wait_q, &wait);
+ /* Mark pages as unused */
+ for (page = start_page; page < start_page + erase_pages; page++)
+ mark_page_unused(cxt, page);
+
return 0;
}
static void mtdoops_inc_counter(struct mtdoops_context *cxt)
{
- struct mtd_info *mtd = cxt->mtd;
- size_t retlen;
- u32 count;
- int ret;
-
cxt->nextpage++;
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
@@ -107,25 +141,13 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt)
if (cxt->nextcount == 0xffffffff)
cxt->nextcount = 0;
- ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
- &retlen, (u_char *) &count);
- if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
- printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
- ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
- retlen, ret);
+ if (page_is_used(cxt, cxt->nextpage)) {
schedule_work(&cxt->work_erase);
return;
}
- /* See if we need to erase the next block */
- if (count != 0xffffffff) {
- schedule_work(&cxt->work_erase);
- return;
- }
-
- printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
- cxt->nextpage, cxt->nextcount);
- cxt->ready = 1;
+ printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
+ cxt->nextpage, cxt->nextcount);
}
/* Scheduled work - when we can't proceed without erasing a block */
@@ -140,47 +162,47 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
if (!mtd)
return;
- mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
+ mod = (cxt->nextpage * record_size) % mtd->erasesize;
if (mod != 0) {
- cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
+ cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
}
while (mtd->block_isbad) {
- ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+ ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
if (!ret)
break;
if (ret < 0) {
- printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
+ printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
return;
}
badblock:
- printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
- cxt->nextpage * OOPS_PAGE_SIZE);
+ printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
+ cxt->nextpage * record_size);
i++;
- cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
+ cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
- if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
- printk(KERN_ERR "mtdoops: All blocks bad!\n");
+ if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
+ printk(KERN_ERR "mtdoops: all blocks bad!\n");
return;
}
}
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
- ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+ ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
if (ret >= 0) {
- printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
- cxt->ready = 1;
+ printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
+ cxt->nextpage, cxt->nextcount);
return;
}
- if (mtd->block_markbad && (ret == -EIO)) {
- ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+ if (mtd->block_markbad && ret == -EIO) {
+ ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
if (ret < 0) {
- printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
+ printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
}
}
@@ -191,36 +213,37 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
{
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
+ u32 *hdr;
int ret;
- if (cxt->writecount < OOPS_PAGE_SIZE)
- memset(cxt->oops_buf + cxt->writecount, 0xff,
- OOPS_PAGE_SIZE - cxt->writecount);
+ /* Add mtdoops header to the buffer */
+ hdr = cxt->oops_buf;
+ hdr[0] = cxt->nextcount;
+ hdr[1] = MTDOOPS_KERNMSG_MAGIC;
if (panic)
- ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
- OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+ ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
else
- ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
- OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
-
- cxt->writecount = 0;
+ ret = mtd->write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
- if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
- printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
- cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
+ if (retlen != record_size || ret < 0)
+ printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
+ cxt->nextpage * record_size, retlen, record_size, ret);
+ mark_page_used(cxt, cxt->nextpage);
+ memset(cxt->oops_buf, 0xff, record_size);
mtdoops_inc_counter(cxt);
}
-
static void mtdoops_workfunc_write(struct work_struct *work)
{
struct mtdoops_context *cxt =
container_of(work, struct mtdoops_context, work_write);
mtdoops_write(cxt, 0);
-}
+}
static void find_next_position(struct mtdoops_context *cxt)
{
@@ -230,28 +253,33 @@ static void find_next_position(struct mtdoops_context *cxt)
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
- ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]);
- if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) {
- printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)"
- ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
+ /* Assume the page is used */
+ mark_page_used(cxt, page);
+ ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+ &retlen, (u_char *) &count[0]);
+ if (retlen != MTDOOPS_HEADER_SIZE ||
+ (ret < 0 && ret != -EUCLEAN)) {
+ printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
+ page * record_size, retlen,
+ MTDOOPS_HEADER_SIZE, ret);
continue;
}
- if (count[1] != MTDOOPS_KERNMSG_MAGIC)
- continue;
+ if (count[0] == 0xffffffff && count[1] == 0xffffffff)
+ mark_page_unused(cxt, page);
if (count[0] == 0xffffffff)
continue;
if (maxcount == 0xffffffff) {
maxcount = count[0];
maxpos = page;
- } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) {
+ } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
maxcount = count[0];
maxpos = page;
- } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) {
+ } else if (count[0] > maxcount && count[0] < 0xc0000000) {
maxcount = count[0];
maxpos = page;
- } else if ((count[0] > maxcount) && (count[0] > 0xc0000000)
- && (maxcount > 0x80000000)) {
+ } else if (count[0] > maxcount && count[0] > 0xc0000000
+ && maxcount > 0x80000000) {
maxcount = count[0];
maxpos = page;
}
@@ -269,187 +297,170 @@ static void find_next_position(struct mtdoops_context *cxt)
mtdoops_inc_counter(cxt);
}
-
-static void mtdoops_notify_add(struct mtd_info *mtd)
+static void mtdoops_do_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2)
{
- struct mtdoops_context *cxt = &oops_cxt;
+ struct mtdoops_context *cxt = container_of(dumper,
+ struct mtdoops_context, dump);
+ unsigned long s1_start, s2_start;
+ unsigned long l1_cpy, l2_cpy;
+ char *dst;
+
+ /* Only dump oopses if dump_oops is set */
+ if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ return;
- if (cxt->name && !strcmp(mtd->name, cxt->name))
- cxt->mtd_index = mtd->index;
+ dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
+ l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
+ l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
- if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
- return;
+ s2_start = l2 - l2_cpy;
+ s1_start = l1 - l1_cpy;
- if (mtd->size < (mtd->erasesize * 2)) {
- printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n",
- mtd->index);
- return;
- }
+ memcpy(dst, s1 + s1_start, l1_cpy);
+ memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
- if (mtd->erasesize < OOPS_PAGE_SIZE) {
- printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
- mtd->index);
+ /* Panics must be written immediately */
+ if (reason == KMSG_DUMP_PANIC) {
+ if (!cxt->mtd->panic_write)
+ printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
+ else
+ mtdoops_write(cxt, 1);
return;
}
- cxt->mtd = mtd;
- if (mtd->size > INT_MAX)
- cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE;
- else
- cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE;
-
- find_next_position(cxt);
-
- printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
+ /* For other cases, schedule work to write it "nicely" */
+ schedule_work(&cxt->work_write);
}
-static void mtdoops_notify_remove(struct mtd_info *mtd)
+static void mtdoops_notify_add(struct mtd_info *mtd)
{
struct mtdoops_context *cxt = &oops_cxt;
+ u64 mtdoops_pages = div_u64(mtd->size, record_size);
+ int err;
- if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
- return;
-
- cxt->mtd = NULL;
- flush_scheduled_work();
-}
-
-static void mtdoops_console_sync(void)
-{
- struct mtdoops_context *cxt = &oops_cxt;
- struct mtd_info *mtd = cxt->mtd;
- unsigned long flags;
+ if (!strcmp(mtd->name, mtddev))
+ cxt->mtd_index = mtd->index;
- if (!cxt->ready || !mtd || cxt->writecount == 0)
+ if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
return;
- /*
- * Once ready is 0 and we've held the lock no further writes to the
- * buffer will happen
- */
- spin_lock_irqsave(&cxt->writecount_lock, flags);
- if (!cxt->ready) {
- spin_unlock_irqrestore(&cxt->writecount_lock, flags);
+ if (mtd->size < mtd->erasesize * 2) {
+ printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
+ mtd->index);
return;
}
- cxt->ready = 0;
- spin_unlock_irqrestore(&cxt->writecount_lock, flags);
-
- if (mtd->panic_write && in_interrupt())
- /* Interrupt context, we're going to panic so try and log */
- mtdoops_write(cxt, 1);
- else
- schedule_work(&cxt->work_write);
-}
-
-static void
-mtdoops_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct mtdoops_context *cxt = co->data;
- struct mtd_info *mtd = cxt->mtd;
- unsigned long flags;
-
- if (!oops_in_progress) {
- mtdoops_console_sync();
+ if (mtd->erasesize < record_size) {
+ printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
+ mtd->index);
return;
}
-
- if (!cxt->ready || !mtd)
+ if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
+ printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
+ mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
return;
+ }
- /* Locking on writecount ensures sequential writes to the buffer */
- spin_lock_irqsave(&cxt->writecount_lock, flags);
-
- /* Check ready status didn't change whilst waiting for the lock */
- if (!cxt->ready) {
- spin_unlock_irqrestore(&cxt->writecount_lock, flags);
+ /* oops_page_used is a bit field */
+ cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
+ BITS_PER_LONG));
+ if (!cxt->oops_page_used) {
+ printk(KERN_ERR "mtdoops: could not allocate page array\n");
return;
}
- if (cxt->writecount == 0) {
- u32 *stamp = cxt->oops_buf;
- *stamp++ = cxt->nextcount;
- *stamp = MTDOOPS_KERNMSG_MAGIC;
- cxt->writecount = 8;
+ cxt->dump.dump = mtdoops_do_dump;
+ err = kmsg_dump_register(&cxt->dump);
+ if (err) {
+ printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
+ vfree(cxt->oops_page_used);
+ cxt->oops_page_used = NULL;
+ return;
}
- if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
- count = OOPS_PAGE_SIZE - cxt->writecount;
-
- memcpy(cxt->oops_buf + cxt->writecount, s, count);
- cxt->writecount += count;
-
- spin_unlock_irqrestore(&cxt->writecount_lock, flags);
-
- if (cxt->writecount == OOPS_PAGE_SIZE)
- mtdoops_console_sync();
+ cxt->mtd = mtd;
+ cxt->oops_pages = (int)mtd->size / record_size;
+ find_next_position(cxt);
+ printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
}
-static int __init mtdoops_console_setup(struct console *co, char *options)
+static void mtdoops_notify_remove(struct mtd_info *mtd)
{
- struct mtdoops_context *cxt = co->data;
+ struct mtdoops_context *cxt = &oops_cxt;
- if (cxt->mtd_index != -1 || cxt->name)
- return -EBUSY;
- if (options) {
- cxt->name = kstrdup(options, GFP_KERNEL);
- return 0;
- }
- if (co->index == -1)
- return -EINVAL;
+ if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
+ return;
- cxt->mtd_index = co->index;
- return 0;
+ if (kmsg_dump_unregister(&cxt->dump) < 0)
+ printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
+
+ cxt->mtd = NULL;
+ flush_scheduled_work();
}
+
static struct mtd_notifier mtdoops_notifier = {
.add = mtdoops_notify_add,
.remove = mtdoops_notify_remove,
};
-static struct console mtdoops_console = {
- .name = "ttyMTD",
- .write = mtdoops_console_write,
- .setup = mtdoops_console_setup,
- .unblank = mtdoops_console_sync,
- .index = -1,
- .data = &oops_cxt,
-};
-
-static int __init mtdoops_console_init(void)
+static int __init mtdoops_init(void)
{
struct mtdoops_context *cxt = &oops_cxt;
+ int mtd_index;
+ char *endp;
+ if (strlen(mtddev) == 0) {
+ printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
+ return -EINVAL;
+ }
+ if ((record_size & 4095) != 0) {
+ printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
+ return -EINVAL;
+ }
+ if (record_size < 4096) {
+ printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
+ return -EINVAL;
+ }
+
+ /* Setup the MTD device to use */
cxt->mtd_index = -1;
- cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
- spin_lock_init(&cxt->writecount_lock);
+ mtd_index = simple_strtoul(mtddev, &endp, 0);
+ if (*endp == '\0')
+ cxt->mtd_index = mtd_index;
+ if (cxt->mtd_index > MAX_MTD_DEVICES) {
+ printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
+ mtd_index);
+ return -EINVAL;
+ }
+ cxt->oops_buf = vmalloc(record_size);
if (!cxt->oops_buf) {
- printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
+ printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
return -ENOMEM;
}
+ memset(cxt->oops_buf, 0xff, record_size);
INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
- register_console(&mtdoops_console);
register_mtd_user(&mtdoops_notifier);
return 0;
}
-static void __exit mtdoops_console_exit(void)
+static void __exit mtdoops_exit(void)
{
struct mtdoops_context *cxt = &oops_cxt;
unregister_mtd_user(&mtdoops_notifier);
- unregister_console(&mtdoops_console);
- kfree(cxt->name);
vfree(cxt->oops_buf);
+ vfree(cxt->oops_page_used);
}
-subsys_initcall(mtdoops_console_init);
-module_exit(mtdoops_console_exit);
+module_init(mtdoops_init);
+module_exit(mtdoops_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 0e35e1aefd2..677cd53f18c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -201,6 +201,22 @@ config MTD_NAND_S3C2410_CLKSTOP
when the is NAND chip selected or released, but will save
approximately 5mA of power when there is nothing happening.
+config MTD_NAND_BCM_UMI
+ tristate "NAND Flash support for BCM Reference Boards"
+ depends on ARCH_BCMRING && MTD_NAND
+ help
+ This enables the NAND flash controller on the BCM UMI block.
+
+ No board specfic support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_BCM_UMI_HWCS
+ bool "BCM UMI NAND Hardware CS"
+ depends on MTD_NAND_BCM_UMI
+ help
+ Enable the use of the BCM UMI block's internal CS using NAND.
+ This should only be used if you know the external NAND CS can toggle.
+
config MTD_NAND_DISKONCHIP
tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -275,14 +291,6 @@ config MTD_NAND_SHARPSL
tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
depends on ARCH_PXA
-config MTD_NAND_BASLER_EXCITE
- tristate "Support for NAND Flash on Basler eXcite"
- depends on BASLER_EXCITE
- help
- This enables the driver for the NAND flash device found on the
- Basler eXcite Smart Camera. If built as a module, the driver
- will be named excite_nandflash.
-
config MTD_NAND_CAFE
tristate "NAND support for OLPC CAFÉ chip"
depends on PCI
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6950d3dabf1..1407bd14401 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
-obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
@@ -42,5 +41,6 @@ obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
+obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 6d9649159a1..2d6773281fd 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -372,15 +372,6 @@ static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
return __alauda_read_page(mtd, from, ignore_buf, oob);
}
-static int popcount8(u8 c)
-{
- int ret = 0;
-
- for ( ; c; c>>=1)
- ret += c & 1;
- return ret;
-}
-
static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
{
u8 oob[16];
@@ -391,7 +382,7 @@ static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
return err;
/* A block is marked bad if two or more bits are zero */
- return popcount8(oob[5]) >= 7 ? 0 : 1;
+ return hweight8(oob[5]) >= 7 ? 0 : 1;
}
static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index f8e9975c86e..524e6c9e067 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -192,7 +192,6 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
{
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
- uint32_t *eccpos = nand_chip->ecc.layout->eccpos;
unsigned int ecc_value;
/* get the first 2 ECC bytes */
@@ -464,7 +463,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (host->board->det_pin) {
if (gpio_get_value(host->board->det_pin)) {
printk(KERN_INFO "No SmartMedia card inserted.\n");
- res = ENXIO;
+ res = -ENXIO;
goto err_no_card;
}
}
@@ -535,7 +534,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if ((!partitions) || (num_partitions == 0)) {
printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
- res = ENXIO;
+ res = -ENXIO;
goto err_no_partitions;
}
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
new file mode 100644
index 00000000000..a930666d068
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -0,0 +1,213 @@
+/*****************************************************************************
+* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+/* ---- Include Files ---------------------------------------------------- */
+#include "nand_bcm_umi.h"
+
+/* ---- External Variable Declarations ----------------------------------- */
+/* ---- External Function Prototypes ------------------------------------- */
+/* ---- Public Variables ------------------------------------------------- */
+/* ---- Private Constants and Types -------------------------------------- */
+
+/* ---- Private Function Prototypes -------------------------------------- */
+static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int page);
+static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf);
+
+/* ---- Private Variables ------------------------------------------------ */
+
+/*
+** nand_hw_eccoob
+** New oob placement block for use with hardware ecc generation.
+*/
+static struct nand_ecclayout nand_hw_eccoob_512 = {
+ /* Reserve 5 for BI indicator */
+ .oobfree = {
+#if (NAND_ECC_NUM_BYTES > 3)
+ {.offset = 0, .length = 2}
+#else
+ {.offset = 0, .length = 5},
+ {.offset = 6, .length = 7}
+#endif
+ }
+};
+
+/*
+** We treat the OOB for a 2K page as if it were 4 512 byte oobs,
+** except the BI is at byte 0.
+*/
+static struct nand_ecclayout nand_hw_eccoob_2048 = {
+ /* Reserve 0 as BI indicator */
+ .oobfree = {
+#if (NAND_ECC_NUM_BYTES > 10)
+ {.offset = 1, .length = 2},
+#elif (NAND_ECC_NUM_BYTES > 7)
+ {.offset = 1, .length = 5},
+ {.offset = 16, .length = 6},
+ {.offset = 32, .length = 6},
+ {.offset = 48, .length = 6}
+#else
+ {.offset = 1, .length = 8},
+ {.offset = 16, .length = 9},
+ {.offset = 32, .length = 9},
+ {.offset = 48, .length = 9}
+#endif
+ }
+};
+
+/* We treat the OOB for a 4K page as if it were 8 512 byte oobs,
+ * except the BI is at byte 0. */
+static struct nand_ecclayout nand_hw_eccoob_4096 = {
+ /* Reserve 0 as BI indicator */
+ .oobfree = {
+#if (NAND_ECC_NUM_BYTES > 10)
+ {.offset = 1, .length = 2},
+ {.offset = 16, .length = 3},
+ {.offset = 32, .length = 3},
+ {.offset = 48, .length = 3},
+ {.offset = 64, .length = 3},
+ {.offset = 80, .length = 3},
+ {.offset = 96, .length = 3},
+ {.offset = 112, .length = 3}
+#else
+ {.offset = 1, .length = 5},
+ {.offset = 16, .length = 6},
+ {.offset = 32, .length = 6},
+ {.offset = 48, .length = 6},
+ {.offset = 64, .length = 6},
+ {.offset = 80, .length = 6},
+ {.offset = 96, .length = 6},
+ {.offset = 112, .length = 6}
+#endif
+ }
+};
+
+/* ---- Private Functions ------------------------------------------------ */
+/* ==== Public Functions ================================================= */
+
+/****************************************************************************
+*
+* bcm_umi_bch_read_page_hwecc - hardware ecc based page read function
+* @mtd: mtd info structure
+* @chip: nand chip info structure
+* @buf: buffer to store read data
+*
+***************************************************************************/
+static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t * buf,
+ int page)
+{
+ int sectorIdx = 0;
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *datap = buf;
+ uint8_t eccCalc[NAND_ECC_NUM_BYTES];
+ int sectorOobSize = mtd->oobsize / eccsteps;
+ int stat;
+
+ for (sectorIdx = 0; sectorIdx < eccsteps;
+ sectorIdx++, datap += eccsize) {
+ if (sectorIdx > 0) {
+ /* Seek to page location within sector */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize,
+ -1);
+ }
+
+ /* Enable hardware ECC before reading the buf */
+ nand_bcm_umi_bch_enable_read_hwecc();
+
+ /* Read in data */
+ bcm_umi_nand_read_buf(mtd, datap, eccsize);
+
+ /* Pause hardware ECC after reading the buf */
+ nand_bcm_umi_bch_pause_read_ecc_calc();
+
+ /* Read the OOB ECC */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + sectorIdx * sectorOobSize, -1);
+ nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc,
+ NAND_ECC_NUM_BYTES,
+ chip->oob_poi +
+ sectorIdx * sectorOobSize);
+
+ /* Correct any ECC detected errors */
+ stat =
+ nand_bcm_umi_bch_correct_page(datap, eccCalc,
+ NAND_ECC_NUM_BYTES);
+
+ /* Update Stats */
+ if (stat < 0) {
+#if defined(NAND_BCM_UMI_DEBUG)
+ printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n",
+ __func__, sectorIdx);
+ printk(KERN_WARNING
+ "%s data %02x %02x %02x %02x "
+ "%02x %02x %02x %02x\n",
+ __func__, datap[0], datap[1], datap[2], datap[3],
+ datap[4], datap[5], datap[6], datap[7]);
+ printk(KERN_WARNING
+ "%s ecc %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x\n",
+ __func__, eccCalc[0], eccCalc[1], eccCalc[2],
+ eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6],
+ eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10],
+ eccCalc[11], eccCalc[12]);
+ BUG();
+#endif
+ mtd->ecc_stats.failed++;
+ } else {
+#if defined(NAND_BCM_UMI_DEBUG)
+ if (stat > 0) {
+ printk(KERN_INFO
+ "%s %d correctable_errors detected\n",
+ __func__, stat);
+ }
+#endif
+ mtd->ecc_stats.corrected += stat;
+ }
+ }
+ return 0;
+}
+
+/****************************************************************************
+*
+* bcm_umi_bch_write_page_hwecc - hardware ecc based page write function
+* @mtd: mtd info structure
+* @chip: nand chip info structure
+* @buf: data buffer
+*
+***************************************************************************/
+static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf)
+{
+ int sectorIdx = 0;
+ int eccsize = chip->ecc.size;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *datap = buf;
+ uint8_t *oobp = chip->oob_poi;
+ int sectorOobSize = mtd->oobsize / eccsteps;
+
+ for (sectorIdx = 0; sectorIdx < eccsteps;
+ sectorIdx++, datap += eccsize, oobp += sectorOobSize) {
+ /* Enable hardware ECC before writing the buf */
+ nand_bcm_umi_bch_enable_write_hwecc();
+ bcm_umi_nand_write_buf(mtd, datap, eccsize);
+ nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp,
+ NAND_ECC_NUM_BYTES);
+ }
+
+ bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+}
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
new file mode 100644
index 00000000000..087bcd745bb
--- /dev/null
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -0,0 +1,581 @@
+/*****************************************************************************
+* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+/* ---- Include Files ---------------------------------------------------- */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-types.h>
+#include <asm/system.h>
+
+#include <mach/reg_nand.h>
+#include <mach/reg_umi.h>
+
+#include "nand_bcm_umi.h"
+
+#include <mach/memory_settings.h>
+
+#define USE_DMA 1
+#include <mach/dma.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+
+/* ---- External Variable Declarations ----------------------------------- */
+/* ---- External Function Prototypes ------------------------------------- */
+/* ---- Public Variables ------------------------------------------------- */
+/* ---- Private Constants and Types -------------------------------------- */
+static const __devinitconst char gBanner[] = KERN_INFO \
+ "BCM UMI MTD NAND Driver: 1.00\n";
+
+#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+#if NAND_ECC_BCH
+static uint8_t scan_ff_pattern[] = { 0xff };
+
+static struct nand_bbt_descr largepage_bbt = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+#endif
+
+/*
+** Preallocate a buffer to avoid having to do this every dma operation.
+** This is the size of the preallocated coherent DMA buffer.
+*/
+#if USE_DMA
+#define DMA_MIN_BUFLEN 512
+#define DMA_MAX_BUFLEN PAGE_SIZE
+#define USE_DIRECT_IO(len) (((len) < DMA_MIN_BUFLEN) || \
+ ((len) > DMA_MAX_BUFLEN))
+
+/*
+ * The current NAND data space goes from 0x80001900 to 0x80001FFF,
+ * which is only 0x700 = 1792 bytes long. This is too small for 2K, 4K page
+ * size NAND flash. Need to break the DMA down to multiple 1Ks.
+ *
+ * Need to make sure REG_NAND_DATA_PADDR + DMA_MAX_LEN < 0x80002000
+ */
+#define DMA_MAX_LEN 1024
+
+#else /* !USE_DMA */
+#define DMA_MIN_BUFLEN 0
+#define DMA_MAX_BUFLEN 0
+#define USE_DIRECT_IO(len) 1
+#endif
+/* ---- Private Function Prototypes -------------------------------------- */
+static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len);
+static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
+ int len);
+
+/* ---- Private Variables ------------------------------------------------ */
+static struct mtd_info *board_mtd;
+static void __iomem *bcm_umi_io_base;
+static void *virtPtr;
+static dma_addr_t physPtr;
+static struct completion nand_comp;
+
+/* ---- Private Functions ------------------------------------------------ */
+#if NAND_ECC_BCH
+#include "bcm_umi_bch.c"
+#else
+#include "bcm_umi_hamming.c"
+#endif
+
+#if USE_DMA
+
+/* Handler called when the DMA finishes. */
+static void nand_dma_handler(DMA_Device_t dev, int reason, void *userData)
+{
+ complete(&nand_comp);
+}
+
+static int nand_dma_init(void)
+{
+ int rc;
+
+ rc = dma_set_device_handler(DMA_DEVICE_NAND_MEM_TO_MEM,
+ nand_dma_handler, NULL);
+ if (rc != 0) {
+ printk(KERN_ERR "dma_set_device_handler failed: %d\n", rc);
+ return rc;
+ }
+
+ virtPtr =
+ dma_alloc_coherent(NULL, DMA_MAX_BUFLEN, &physPtr, GFP_KERNEL);
+ if (virtPtr == NULL) {
+ printk(KERN_ERR "NAND - Failed to allocate memory for DMA buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nand_dma_term(void)
+{
+ if (virtPtr != NULL)
+ dma_free_coherent(NULL, DMA_MAX_BUFLEN, virtPtr, physPtr);
+}
+
+static void nand_dma_read(void *buf, int len)
+{
+ int offset = 0;
+ int tmp_len = 0;
+ int len_left = len;
+ DMA_Handle_t hndl;
+
+ if (virtPtr == NULL)
+ panic("nand_dma_read: virtPtr == NULL\n");
+
+ if ((void *)physPtr == NULL)
+ panic("nand_dma_read: physPtr == NULL\n");
+
+ hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
+ if (hndl < 0) {
+ printk(KERN_ERR
+ "nand_dma_read: unable to allocate dma channel: %d\n",
+ (int)hndl);
+ panic("\n");
+ }
+
+ while (len_left > 0) {
+ if (len_left > DMA_MAX_LEN) {
+ tmp_len = DMA_MAX_LEN;
+ len_left -= DMA_MAX_LEN;
+ } else {
+ tmp_len = len_left;
+ len_left = 0;
+ }
+
+ init_completion(&nand_comp);
+ dma_transfer_mem_to_mem(hndl, REG_NAND_DATA_PADDR,
+ physPtr + offset, tmp_len);
+ wait_for_completion(&nand_comp);
+
+ offset += tmp_len;
+ }
+
+ dma_free_channel(hndl);
+
+ if (buf != NULL)
+ memcpy(buf, virtPtr, len);
+}
+
+static void nand_dma_write(const void *buf, int len)
+{
+ int offset = 0;
+ int tmp_len = 0;
+ int len_left = len;
+ DMA_Handle_t hndl;
+
+ if (buf == NULL)
+ panic("nand_dma_write: buf == NULL\n");
+
+ if (virtPtr == NULL)
+ panic("nand_dma_write: virtPtr == NULL\n");
+
+ if ((void *)physPtr == NULL)
+ panic("nand_dma_write: physPtr == NULL\n");
+
+ memcpy(virtPtr, buf, len);
+
+
+ hndl = dma_request_channel(DMA_DEVICE_NAND_MEM_TO_MEM);
+ if (hndl < 0) {
+ printk(KERN_ERR
+ "nand_dma_write: unable to allocate dma channel: %d\n",
+ (int)hndl);
+ panic("\n");
+ }
+
+ while (len_left > 0) {
+ if (len_left > DMA_MAX_LEN) {
+ tmp_len = DMA_MAX_LEN;
+ len_left -= DMA_MAX_LEN;
+ } else {
+ tmp_len = len_left;
+ len_left = 0;
+ }
+
+ init_completion(&nand_comp);
+ dma_transfer_mem_to_mem(hndl, physPtr + offset,
+ REG_NAND_DATA_PADDR, tmp_len);
+ wait_for_completion(&nand_comp);
+
+ offset += tmp_len;
+ }
+
+ dma_free_channel(hndl);
+}
+
+#endif
+
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ return nand_bcm_umi_dev_ready();
+}
+
+/****************************************************************************
+*
+* bcm_umi_nand_inithw
+*
+* This routine does the necessary hardware (board-specific)
+* initializations. This includes setting up the timings, etc.
+*
+***************************************************************************/
+int bcm_umi_nand_inithw(void)
+{
+ /* Configure nand timing parameters */
+ REG_UMI_NAND_TCR &= ~0x7ffff;
+ REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR;
+
+#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
+ /* enable software control of CS */
+ REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL;
+#endif
+
+ /* keep NAND chip select asserted */
+ REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED;
+
+ REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16;
+ /* enable writes to flash */
+ REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP;
+
+ writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
+ nand_bcm_umi_wait_till_ready();
+
+#if NAND_ECC_BCH
+ nand_bcm_umi_bch_config_ecc(NAND_ECC_NUM_BYTES);
+#endif
+
+ return 0;
+}
+
+/* Used to turn latch the proper register for access. */
+static void bcm_umi_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ /* send command to hardware */
+ struct nand_chip *chip = mtd->priv;
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_CLE) {
+ chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_CMD_OFFSET;
+ goto CMD;
+ }
+ if (ctrl & NAND_ALE) {
+ chip->IO_ADDR_W =
+ bcm_umi_io_base + REG_NAND_ADDR_OFFSET;
+ goto CMD;
+ }
+ chip->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
+ }
+
+CMD:
+ /* Send command to chip directly */
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, chip->IO_ADDR_W);
+}
+
+static void bcm_umi_nand_write_buf(struct mtd_info *mtd, const u_char * buf,
+ int len)
+{
+ if (USE_DIRECT_IO(len)) {
+ /* Do it the old way if the buffer is small or too large.
+ * Probably quicker than starting and checking dma. */
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], this->IO_ADDR_W);
+ }
+#if USE_DMA
+ else
+ nand_dma_write(buf, len);
+#endif
+}
+
+static void bcm_umi_nand_read_buf(struct mtd_info *mtd, u_char * buf, int len)
+{
+ if (USE_DIRECT_IO(len)) {
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb(this->IO_ADDR_R);
+ }
+#if USE_DMA
+ else
+ nand_dma_read(buf, len);
+#endif
+}
+
+static uint8_t readbackbuf[NAND_MAX_PAGESIZE];
+static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
+ int len)
+{
+ /*
+ * Try to readback page with ECC correction. This is necessary
+ * for MLC parts which may have permanently stuck bits.
+ */
+ struct nand_chip *chip = mtd->priv;
+ int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
+ if (ret < 0)
+ return -EFAULT;
+ else {
+ if (memcmp(readbackbuf, buf, len) == 0)
+ return 0;
+
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
+{
+ struct nand_chip *this;
+ struct resource *r;
+ int err = 0;
+
+ printk(gBanner);
+
+ /* Allocate memory for MTD device structure and private data */
+ board_mtd =
+ kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!board_mtd) {
+ printk(KERN_WARNING
+ "Unable to allocate NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!r)
+ return -ENXIO;
+
+ /* map physical adress */
+ bcm_umi_io_base = ioremap(r->start, r->end - r->start + 1);
+
+ if (!bcm_umi_io_base) {
+ printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
+ kfree(board_mtd);
+ return -EIO;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *)(&board_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *)board_mtd, 0, sizeof(struct mtd_info));
+ memset((char *)this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ board_mtd->priv = this;
+
+ /* Initialize the NAND hardware. */
+ if (bcm_umi_nand_inithw() < 0) {
+ printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
+ iounmap(bcm_umi_io_base);
+ kfree(board_mtd);
+ return -EIO;
+ }
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_W = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
+ this->IO_ADDR_R = bcm_umi_io_base + REG_NAND_DATA8_OFFSET;
+
+ /* Set command delay time, see datasheet for correct value */
+ this->chip_delay = 0;
+ /* Assign the device ready function, if available */
+ this->dev_ready = nand_dev_ready;
+ this->options = 0;
+
+ this->write_buf = bcm_umi_nand_write_buf;
+ this->read_buf = bcm_umi_nand_read_buf;
+ this->verify_buf = bcm_umi_nand_verify_buf;
+
+ this->cmd_ctrl = bcm_umi_nand_hwcontrol;
+ this->ecc.mode = NAND_ECC_HW;
+ this->ecc.size = 512;
+ this->ecc.bytes = NAND_ECC_NUM_BYTES;
+#if NAND_ECC_BCH
+ this->ecc.read_page = bcm_umi_bch_read_page_hwecc;
+ this->ecc.write_page = bcm_umi_bch_write_page_hwecc;
+#else
+ this->ecc.correct = nand_correct_data512;
+ this->ecc.calculate = bcm_umi_hamming_get_hw_ecc;
+ this->ecc.hwctl = bcm_umi_hamming_enable_hwecc;
+#endif
+
+#if USE_DMA
+ err = nand_dma_init();
+ if (err != 0)
+ return err;
+#endif
+
+ /* Figure out the size of the device that we have.
+ * We need to do this to figure out which ECC
+ * layout we'll be using.
+ */
+
+ err = nand_scan_ident(board_mtd, 1);
+ if (err) {
+ printk(KERN_ERR "nand_scan failed: %d\n", err);
+ iounmap(bcm_umi_io_base);
+ kfree(board_mtd);
+ return err;
+ }
+
+ /* Now that we know the nand size, we can setup the ECC layout */
+
+ switch (board_mtd->writesize) { /* writesize is the pagesize */
+ case 4096:
+ this->ecc.layout = &nand_hw_eccoob_4096;
+ break;
+ case 2048:
+ this->ecc.layout = &nand_hw_eccoob_2048;
+ break;
+ case 512:
+ this->ecc.layout = &nand_hw_eccoob_512;
+ break;
+ default:
+ {
+ printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
+ board_mtd->writesize);
+ return -EINVAL;
+ }
+ }
+
+#if NAND_ECC_BCH
+ if (board_mtd->writesize > 512) {
+ if (this->options & NAND_USE_FLASH_BBT)
+ largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
+ this->badblock_pattern = &largepage_bbt;
+ }
+#endif
+
+ /* Now finish off the scan, now that ecc.layout has been initialized. */
+
+ err = nand_scan_tail(board_mtd);
+ if (err) {
+ printk(KERN_ERR "nand_scan failed: %d\n", err);
+ iounmap(bcm_umi_io_base);
+ kfree(board_mtd);
+ return err;
+ }
+
+ /* Register the partitions */
+ {
+ int nr_partitions;
+ struct mtd_partition *partition_info;
+
+ board_mtd->name = "bcm_umi-nand";
+ nr_partitions =
+ parse_mtd_partitions(board_mtd, part_probes,
+ &partition_info, 0);
+
+ if (nr_partitions <= 0) {
+ printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
+ nr_partitions);
+ iounmap(bcm_umi_io_base);
+ kfree(board_mtd);
+ return -EIO;
+ }
+ add_mtd_partitions(board_mtd, partition_info, nr_partitions);
+ }
+
+ /* Return happy */
+ return 0;
+}
+
+static int bcm_umi_nand_remove(struct platform_device *pdev)
+{
+#if USE_DMA
+ nand_dma_term();
+#endif
+
+ /* Release resources, unregister device */
+ nand_release(board_mtd);
+
+ /* unmap physical adress */
+ iounmap(bcm_umi_io_base);
+
+ /* Free the MTD device structure */
+ kfree(board_mtd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bcm_umi_nand_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ printk(KERN_ERR "MTD NAND suspend is being called\n");
+ return 0;
+}
+
+static int bcm_umi_nand_resume(struct platform_device *pdev)
+{
+ printk(KERN_ERR "MTD NAND resume is being called\n");
+ return 0;
+}
+#else
+#define bcm_umi_nand_suspend NULL
+#define bcm_umi_nand_resume NULL
+#endif
+
+static struct platform_driver nand_driver = {
+ .driver = {
+ .name = "bcm-nand",
+ .owner = THIS_MODULE,
+ },
+ .probe = bcm_umi_nand_probe,
+ .remove = bcm_umi_nand_remove,
+ .suspend = bcm_umi_nand_suspend,
+ .resume = bcm_umi_nand_resume,
+};
+
+static int __init nand_init(void)
+{
+ return platform_driver_register(&nand_driver);
+}
+
+static void __exit nand_exit(void)
+{
+ platform_driver_unregister(&nand_driver);
+}
+
+module_init(nand_init);
+module_exit(nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("BCM UMI MTD NAND driver");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index f13f5b9afaf..fe3eba87de4 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -591,6 +591,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
info->chip.options = pdata->options;
+ info->chip.bbt_td = pdata->bbt_td;
+ info->chip.bbt_md = pdata->bbt_md;
info->ioaddr = (uint32_t __force) vaddr;
@@ -599,7 +601,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
- info->mask_ale = pdata->mask_cle ? : MASK_ALE;
+ info->mask_ale = pdata->mask_ale ? : MASK_ALE;
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
deleted file mode 100644
index 72446fb48d4..00000000000
--- a/drivers/mtd/nand/excite_nandflash.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
-* Copyright (C) 2005 - 2007 by Basler Vision Technologies AG
-* Author: Thomas Koeller <thomas.koeller.qbaslerweb.com>
-* Original code by Thies Moeller <thies.moeller@baslerweb.com>
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/rm9k-ocd.h>
-
-#include <excite_nandflash.h>
-
-#define EXCITE_NANDFLASH_VERSION "0.1"
-
-/* I/O register offsets */
-#define EXCITE_NANDFLASH_DATA_BYTE 0x00
-#define EXCITE_NANDFLASH_STATUS_BYTE 0x0c
-#define EXCITE_NANDFLASH_ADDR_BYTE 0x10
-#define EXCITE_NANDFLASH_CMD_BYTE 0x14
-
-/* prefix for debug output */
-static const char module_id[] = "excite_nandflash";
-
-/*
- * partition definition
- */
-static const struct mtd_partition partition_info[] = {
- {
- .name = "eXcite RootFS",
- .offset = 0,
- .size = MTDPART_SIZ_FULL
- }
-};
-
-static inline const struct resource *
-excite_nand_get_resource(struct platform_device *d, unsigned long flags,
- const char *basename)
-{
- char buf[80];
-
- if (snprintf(buf, sizeof buf, "%s_%u", basename, d->id) >= sizeof buf)
- return NULL;
- return platform_get_resource_byname(d, flags, buf);
-}
-
-static inline void __iomem *
-excite_nand_map_regs(struct platform_device *d, const char *basename)
-{
- void *result = NULL;
- const struct resource *const r =
- excite_nand_get_resource(d, IORESOURCE_MEM, basename);
-
- if (r)
- result = ioremap_nocache(r->start, r->end + 1 - r->start);
- return result;
-}
-
-/* controller and mtd information */
-struct excite_nand_drvdata {
- struct mtd_info board_mtd;
- struct nand_chip board_chip;
- void __iomem *regs;
- void __iomem *tgt;
-};
-
-/* Control function */
-static void excite_nand_control(struct mtd_info *mtd, int cmd,
- unsigned int ctrl)
-{
- struct excite_nand_drvdata * const d =
- container_of(mtd, struct excite_nand_drvdata, board_mtd);
-
- switch (ctrl) {
- case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
- d->tgt = d->regs + EXCITE_NANDFLASH_CMD_BYTE;
- break;
- case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
- d->tgt = d->regs + EXCITE_NANDFLASH_ADDR_BYTE;
- break;
- case NAND_CTRL_CHANGE | NAND_NCE:
- d->tgt = d->regs + EXCITE_NANDFLASH_DATA_BYTE;
- break;
- }
-
- if (cmd != NAND_CMD_NONE)
- __raw_writeb(cmd, d->tgt);
-}
-
-/* Return 0 if flash is busy, 1 if ready */
-static int excite_nand_devready(struct mtd_info *mtd)
-{
- struct excite_nand_drvdata * const drvdata =
- container_of(mtd, struct excite_nand_drvdata, board_mtd);
-
- return __raw_readb(drvdata->regs + EXCITE_NANDFLASH_STATUS_BYTE);
-}
-
-/*
- * Called by device layer to remove the driver.
- * The binding to the mtd and all allocated
- * resources are released.
- */
-static int __exit excite_nand_remove(struct platform_device *dev)
-{
- struct excite_nand_drvdata * const this = platform_get_drvdata(dev);
-
- platform_set_drvdata(dev, NULL);
-
- if (unlikely(!this)) {
- printk(KERN_ERR "%s: called %s without private data!!",
- module_id, __func__);
- return -EINVAL;
- }
-
- /* first thing we need to do is release our mtd
- * then go through freeing the resource used
- */
- nand_release(&this->board_mtd);
-
- /* free the common resources */
- iounmap(this->regs);
- kfree(this);
-
- DEBUG(MTD_DEBUG_LEVEL1, "%s: removed\n", module_id);
- return 0;
-}
-
-/*
- * Called by device layer when it finds a device matching
- * one our driver can handle. This code checks to see if
- * it can allocate all necessary resources then calls the
- * nand layer to look for devices.
-*/
-static int __init excite_nand_probe(struct platform_device *pdev)
-{
- struct excite_nand_drvdata *drvdata; /* private driver data */
- struct nand_chip *board_chip; /* private flash chip data */
- struct mtd_info *board_mtd; /* mtd info for this board */
- int scan_res;
-
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
- if (unlikely(!drvdata)) {
- printk(KERN_ERR "%s: no memory for drvdata\n",
- module_id);
- return -ENOMEM;
- }
-
- /* bind private data into driver */
- platform_set_drvdata(pdev, drvdata);
-
- /* allocate and map the resource */
- drvdata->regs =
- excite_nand_map_regs(pdev, EXCITE_NANDFLASH_RESOURCE_REGS);
-
- if (unlikely(!drvdata->regs)) {
- printk(KERN_ERR "%s: cannot reserve register region\n",
- module_id);
- kfree(drvdata);
- return -ENXIO;
- }
-
- drvdata->tgt = drvdata->regs + EXCITE_NANDFLASH_DATA_BYTE;
-
- /* initialise our chip */
- board_chip = &drvdata->board_chip;
- board_chip->IO_ADDR_R = board_chip->IO_ADDR_W =
- drvdata->regs + EXCITE_NANDFLASH_DATA_BYTE;
- board_chip->cmd_ctrl = excite_nand_control;
- board_chip->dev_ready = excite_nand_devready;
- board_chip->chip_delay = 25;
- board_chip->ecc.mode = NAND_ECC_SOFT;
-
- /* link chip to mtd */
- board_mtd = &drvdata->board_mtd;
- board_mtd->priv = board_chip;
-
- DEBUG(MTD_DEBUG_LEVEL2, "%s: device scan\n", module_id);
- scan_res = nand_scan(&drvdata->board_mtd, 1);
-
- if (likely(!scan_res)) {
- DEBUG(MTD_DEBUG_LEVEL2, "%s: register partitions\n", module_id);
- add_mtd_partitions(&drvdata->board_mtd, partition_info,
- ARRAY_SIZE(partition_info));
- } else {
- iounmap(drvdata->regs);
- kfree(drvdata);
- printk(KERN_ERR "%s: device scan failed\n", module_id);
- return -EIO;
- }
- return 0;
-}
-
-static struct platform_driver excite_nand_driver = {
- .driver = {
- .name = "excite_nand",
- .owner = THIS_MODULE,
- },
- .probe = excite_nand_probe,
- .remove = __devexit_p(excite_nand_remove)
-};
-
-static int __init excite_nand_init(void)
-{
- pr_info("Basler eXcite nand flash driver Version "
- EXCITE_NANDFLASH_VERSION "\n");
- return platform_driver_register(&excite_nand_driver);
-}
-
-static void __exit excite_nand_exit(void)
-{
- platform_driver_unregister(&excite_nand_driver);
-}
-
-module_init(excite_nand_init);
-module_exit(excite_nand_exit);
-
-MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
-MODULE_DESCRIPTION("Basler eXcite NAND-Flash driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(EXCITE_NANDFLASH_VERSION)
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ddd37d2554e..ae30fb6eed9 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -237,12 +237,15 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
ctrl->use_mdr = 0;
- dev_vdbg(ctrl->dev,
- "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n",
- ctrl->status, ctrl->mdr, in_be32(&lbc->fmr));
+ if (ctrl->status != LTESR_CC) {
+ dev_info(ctrl->dev,
+ "command failed: fir %x fcr %x status %x mdr %x\n",
+ in_be32(&lbc->fir), in_be32(&lbc->fcr),
+ ctrl->status, ctrl->mdr);
+ return -EIO;
+ }
- /* returns 0 on success otherwise non-zero) */
- return ctrl->status == LTESR_CC ? 0 : -EIO;
+ return 0;
}
static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
@@ -253,17 +256,17 @@ static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
if (priv->page_size) {
out_be32(&lbc->fir,
- (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
- (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_CM1 << FIR_OP3_SHIFT) |
(FIR_OP_RBW << FIR_OP4_SHIFT));
out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
(NAND_CMD_READSTART << FCR_CMD1_SHIFT));
} else {
out_be32(&lbc->fir,
- (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
(FIR_OP_RBW << FIR_OP3_SHIFT));
@@ -332,7 +335,7 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_READID:
dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
- out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
@@ -359,16 +362,20 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
out_be32(&lbc->fir,
- (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_PA << FIR_OP1_SHIFT) |
- (FIR_OP_CM1 << FIR_OP2_SHIFT));
+ (FIR_OP_CM2 << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RS << FIR_OP4_SHIFT));
out_be32(&lbc->fcr,
(NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
- (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT));
+ (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
out_be32(&lbc->fbcr, 0);
ctrl->read_bytes = 0;
+ ctrl->use_mdr = 1;
fsl_elbc_run_command(mtd);
return;
@@ -383,40 +390,41 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
ctrl->column = column;
ctrl->oob = 0;
+ ctrl->use_mdr = 1;
- if (priv->page_size) {
- fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) |
- (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT);
+ fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
+ (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
+ if (priv->page_size) {
out_be32(&lbc->fir,
- (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP0_SHIFT) |
(FIR_OP_CA << FIR_OP1_SHIFT) |
(FIR_OP_PA << FIR_OP2_SHIFT) |
(FIR_OP_WB << FIR_OP3_SHIFT) |
- (FIR_OP_CW1 << FIR_OP4_SHIFT));
+ (FIR_OP_CM3 << FIR_OP4_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP5_SHIFT) |
+ (FIR_OP_RS << FIR_OP6_SHIFT));
} else {
- fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
- (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
-
out_be32(&lbc->fir,
- (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_CM2 << FIR_OP1_SHIFT) |
(FIR_OP_CA << FIR_OP2_SHIFT) |
(FIR_OP_PA << FIR_OP3_SHIFT) |
(FIR_OP_WB << FIR_OP4_SHIFT) |
- (FIR_OP_CW1 << FIR_OP5_SHIFT));
+ (FIR_OP_CM3 << FIR_OP5_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP6_SHIFT) |
+ (FIR_OP_RS << FIR_OP7_SHIFT));
if (column >= mtd->writesize) {
/* OOB area --> READOOB */
column -= mtd->writesize;
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
ctrl->oob = 1;
- } else if (column < 256) {
+ } else {
+ WARN_ON(column != 0);
/* First 256 bytes --> READ0 */
fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
- } else {
- /* Second 256 bytes --> READ1 */
- fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
}
}
@@ -628,22 +636,6 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
struct fsl_elbc_mtd *priv = chip->priv;
struct fsl_elbc_ctrl *ctrl = priv->ctrl;
- struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
-
- if (ctrl->status != LTESR_CC)
- return NAND_STATUS_FAIL;
-
- /* Use READ_STATUS command, but wait for the device to be ready */
- ctrl->use_mdr = 0;
- out_be32(&lbc->fir,
- (FIR_OP_CW0 << FIR_OP0_SHIFT) |
- (FIR_OP_RBW << FIR_OP1_SHIFT));
- out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
- out_be32(&lbc->fbcr, 1);
- set_addr(mtd, 0, 0, 0);
- ctrl->read_bytes = 1;
-
- fsl_elbc_run_command(mtd);
if (ctrl->status != LTESR_CC)
return NAND_STATUS_FAIL;
@@ -651,8 +643,7 @@ static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
/* The chip always seems to report that it is
* write-protected, even when it is not.
*/
- setbits8(ctrl->addr, NAND_STATUS_WP);
- return fsl_elbc_read_byte(mtd);
+ return (ctrl->mdr & 0xff) | NAND_STATUS_WP;
}
static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
@@ -946,6 +937,13 @@ static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
{
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+ /*
+ * NAND transactions can tie up the bus for a long time, so set the
+ * bus timeout to max by clearing LBCR[BMT] (highest base counter
+ * value) and setting LBCR[BMTPS] to the highest prescaler value.
+ */
+ clrsetbits_be32(&lbc->lbcr, LBCR_BMT, 15);
+
/* clear event registers */
setbits32(&lbc->ltesr, LTESR_NAND_MASK);
out_be32(&lbc->lteatr, 0);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index d120cd8d726..071a60cb420 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -112,7 +112,7 @@ static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
if (mchip_nr == -1) {
chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
- } else if (mchip_nr >= 0) {
+ } else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
fun->mchip_number = mchip_nr;
chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
chip->IO_ADDR_W = chip->IO_ADDR_R;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 65b26d5a5c0..45dec5770da 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -33,9 +33,13 @@
#include <asm/mach/flash.h>
#include <mach/mxc_nand.h>
+#include <mach/hardware.h>
#define DRIVER_NAME "mxc_nand"
+#define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35())
+#define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27())
+
/* Addresses for NFC registers */
#define NFC_BUF_SIZE 0xE00
#define NFC_BUF_ADDR 0xE04
@@ -46,24 +50,14 @@
#define NFC_RSLTMAIN_AREA 0xE0E
#define NFC_RSLTSPARE_AREA 0xE10
#define NFC_WRPROT 0xE12
-#define NFC_UNLOCKSTART_BLKADDR 0xE14
-#define NFC_UNLOCKEND_BLKADDR 0xE16
+#define NFC_V1_UNLOCKSTART_BLKADDR 0xe14
+#define NFC_V1_UNLOCKEND_BLKADDR 0xe16
+#define NFC_V21_UNLOCKSTART_BLKADDR 0xe20
+#define NFC_V21_UNLOCKEND_BLKADDR 0xe22
#define NFC_NF_WRPRST 0xE18
#define NFC_CONFIG1 0xE1A
#define NFC_CONFIG2 0xE1C
-/* Addresses for NFC RAM BUFFER Main area 0 */
-#define MAIN_AREA0 0x000
-#define MAIN_AREA1 0x200
-#define MAIN_AREA2 0x400
-#define MAIN_AREA3 0x600
-
-/* Addresses for NFC SPARE BUFFER Spare area 0 */
-#define SPARE_AREA0 0x800
-#define SPARE_AREA1 0x810
-#define SPARE_AREA2 0x820
-#define SPARE_AREA3 0x830
-
/* Set INT to 0, FCMD to 1, rest to 0 in NFC_CONFIG2 Register
* for Command operation */
#define NFC_CMD 0x1
@@ -106,48 +100,66 @@ struct mxc_nand_host {
struct mtd_partition *parts;
struct device *dev;
+ void *spare0;
+ void *main_area0;
+ void *main_area1;
+
+ void __iomem *base;
void __iomem *regs;
- int spare_only;
int status_request;
- int pagesize_2k;
- uint16_t col_addr;
struct clk *clk;
int clk_act;
int irq;
wait_queue_head_t irq_waitq;
-};
-
-/* Define delays in microsec for NAND device operations */
-#define TROP_US_DELAY 2000
-/* Macros to get byte and bit positions of ECC */
-#define COLPOS(x) ((x) >> 3)
-#define BITPOS(x) ((x) & 0xf)
-/* Define single bit Error positions in Main & Spare area */
-#define MAIN_SINGLEBIT_ERROR 0x4
-#define SPARE_SINGLEBIT_ERROR 0x1
-
-/* OOB placement block for use with hardware ecc generation */
-static struct nand_ecclayout nand_hw_eccoob_8 = {
- .eccbytes = 5,
- .eccpos = {6, 7, 8, 9, 10},
- .oobfree = {{0, 5}, {11, 5}, }
+ uint8_t *data_buf;
+ unsigned int buf_start;
+ int spare_len;
};
-static struct nand_ecclayout nand_hw_eccoob_16 = {
+/* OOB placement block for use with hardware ecc generation */
+static struct nand_ecclayout nandv1_hw_eccoob_smallpage = {
.eccbytes = 5,
.eccpos = {6, 7, 8, 9, 10},
- .oobfree = {{0, 5}, {11, 5}, }
+ .oobfree = {{0, 5}, {12, 4}, }
};
-static struct nand_ecclayout nand_hw_eccoob_64 = {
+static struct nand_ecclayout nandv1_hw_eccoob_largepage = {
.eccbytes = 20,
.eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
.oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
};
+/* OOB description for 512 byte pages with 16 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_smallpage = {
+ .eccbytes = 1 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15
+ },
+ .oobfree = {
+ {.offset = 0, .length = 5}
+ }
+};
+
+/* OOB description for 2048 byte pages with 64 byte OOB */
+static struct nand_ecclayout nandv2_hw_eccoob_largepage = {
+ .eccbytes = 4 * 9,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63
+ },
+ .oobfree = {
+ {.offset = 2, .length = 4},
+ {.offset = 16, .length = 7},
+ {.offset = 32, .length = 7},
+ {.offset = 48, .length = 7}
+ }
+};
+
#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
#endif
@@ -170,10 +182,10 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
-static void wait_op_done(struct mxc_nand_host *host, int max_retries,
- uint16_t param, int useirq)
+static void wait_op_done(struct mxc_nand_host *host, int useirq)
{
uint32_t tmp;
+ int max_retries = 2000;
if (useirq) {
if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) {
@@ -200,8 +212,8 @@ static void wait_op_done(struct mxc_nand_host *host, int max_retries,
udelay(1);
}
if (max_retries < 0)
- DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
- __func__, param);
+ DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n",
+ __func__);
}
}
@@ -215,7 +227,7 @@ static void send_cmd(struct mxc_nand_host *host, uint16_t cmd, int useirq)
writew(NFC_CMD, host->regs + NFC_CONFIG2);
/* Wait for operation to complete */
- wait_op_done(host, TROP_US_DELAY, cmd, useirq);
+ wait_op_done(host, useirq);
}
/* This function sends an address (or partial address) to the
@@ -229,82 +241,47 @@ static void send_addr(struct mxc_nand_host *host, uint16_t addr, int islast)
writew(NFC_ADDR, host->regs + NFC_CONFIG2);
/* Wait for operation to complete */
- wait_op_done(host, TROP_US_DELAY, addr, islast);
+ wait_op_done(host, islast);
}
-/* This function requests the NANDFC to initate the transfer
- * of data currently in the NANDFC RAM buffer to the NAND device. */
-static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id,
- int spare_only)
+static void send_page(struct mtd_info *mtd, unsigned int ops)
{
- DEBUG(MTD_DEBUG_LEVEL3, "send_prog_page (%d)\n", spare_only);
-
- /* NANDFC buffer 0 is used for page read/write */
- writew(buf_id, host->regs + NFC_BUF_ADDR);
-
- /* Configure spare or page+spare access */
- if (!host->pagesize_2k) {
- uint16_t config1 = readw(host->regs + NFC_CONFIG1);
- if (spare_only)
- config1 |= NFC_SP_EN;
- else
- config1 &= ~(NFC_SP_EN);
- writew(config1, host->regs + NFC_CONFIG1);
- }
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ int bufs, i;
- writew(NFC_INPUT, host->regs + NFC_CONFIG2);
+ if (nfc_is_v1() && mtd->writesize > 512)
+ bufs = 4;
+ else
+ bufs = 1;
- /* Wait for operation to complete */
- wait_op_done(host, TROP_US_DELAY, spare_only, true);
-}
+ for (i = 0; i < bufs; i++) {
-/* Requests NANDFC to initated the transfer of data from the
- * NAND device into in the NANDFC ram buffer. */
-static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id,
- int spare_only)
-{
- DEBUG(MTD_DEBUG_LEVEL3, "send_read_page (%d)\n", spare_only);
+ /* NANDFC buffer 0 is used for page read/write */
+ writew(i, host->regs + NFC_BUF_ADDR);
- /* NANDFC buffer 0 is used for page read/write */
- writew(buf_id, host->regs + NFC_BUF_ADDR);
+ writew(ops, host->regs + NFC_CONFIG2);
- /* Configure spare or page+spare access */
- if (!host->pagesize_2k) {
- uint32_t config1 = readw(host->regs + NFC_CONFIG1);
- if (spare_only)
- config1 |= NFC_SP_EN;
- else
- config1 &= ~NFC_SP_EN;
- writew(config1, host->regs + NFC_CONFIG1);
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
}
-
- writew(NFC_OUTPUT, host->regs + NFC_CONFIG2);
-
- /* Wait for operation to complete */
- wait_op_done(host, TROP_US_DELAY, spare_only, true);
}
/* Request the NANDFC to perform a read of the NAND device ID. */
static void send_read_id(struct mxc_nand_host *host)
{
struct nand_chip *this = &host->nand;
- uint16_t tmp;
/* NANDFC buffer 0 is used for device ID output */
writew(0x0, host->regs + NFC_BUF_ADDR);
- /* Read ID into main buffer */
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_SP_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
-
writew(NFC_ID, host->regs + NFC_CONFIG2);
/* Wait for operation to complete */
- wait_op_done(host, TROP_US_DELAY, 0, true);
+ wait_op_done(host, true);
if (this->options & NAND_BUSWIDTH_16) {
- void __iomem *main_buf = host->regs + MAIN_AREA0;
+ void __iomem *main_buf = host->main_area0;
/* compress the ID info */
writeb(readb(main_buf + 2), main_buf + 1);
writeb(readb(main_buf + 4), main_buf + 2);
@@ -312,15 +289,16 @@ static void send_read_id(struct mxc_nand_host *host)
writeb(readb(main_buf + 8), main_buf + 4);
writeb(readb(main_buf + 10), main_buf + 5);
}
+ memcpy(host->data_buf, host->main_area0, 16);
}
/* This function requests the NANDFC to perform a read of the
* NAND device status and returns the current status. */
static uint16_t get_dev_status(struct mxc_nand_host *host)
{
- void __iomem *main_buf = host->regs + MAIN_AREA1;
+ void __iomem *main_buf = host->main_area1;
uint32_t store;
- uint16_t ret, tmp;
+ uint16_t ret;
/* Issue status request to NAND device */
/* store the main area1 first word, later do recovery */
@@ -329,15 +307,10 @@ static uint16_t get_dev_status(struct mxc_nand_host *host)
* corruption of read/write buffer on status requests. */
writew(1, host->regs + NFC_BUF_ADDR);
- /* Read status into main buffer */
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_SP_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
-
writew(NFC_STATUS, host->regs + NFC_CONFIG2);
/* Wait for operation to complete */
- wait_op_done(host, TROP_US_DELAY, 0, true);
+ wait_op_done(host, true);
/* Status is placed in first word of main buffer */
/* get status, then recovery area 1 data */
@@ -397,32 +370,14 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- uint8_t ret = 0;
- uint16_t col, rd_word;
- uint16_t __iomem *main_buf = host->regs + MAIN_AREA0;
- uint16_t __iomem *spare_buf = host->regs + SPARE_AREA0;
+ uint8_t ret;
/* Check for status request */
if (host->status_request)
return get_dev_status(host) & 0xFF;
- /* Get column for 16-bit access */
- col = host->col_addr >> 1;
-
- /* If we are accessing the spare region */
- if (host->spare_only)
- rd_word = readw(&spare_buf[col]);
- else
- rd_word = readw(&main_buf[col]);
-
- /* Pick upper/lower byte of word from RAM buffer */
- if (host->col_addr & 0x1)
- ret = (rd_word >> 8) & 0xFF;
- else
- ret = rd_word & 0xFF;
-
- /* Update saved column address */
- host->col_addr++;
+ ret = *(uint8_t *)(host->data_buf + host->buf_start);
+ host->buf_start++;
return ret;
}
@@ -431,33 +386,10 @@ static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- uint16_t col, rd_word, ret;
- uint16_t __iomem *p;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "mxc_nand_read_word(col = %d)\n", host->col_addr);
-
- col = host->col_addr;
- /* Adjust saved column address */
- if (col < mtd->writesize && host->spare_only)
- col += mtd->writesize;
+ uint16_t ret;
- if (col < mtd->writesize)
- p = (host->regs + MAIN_AREA0) + (col >> 1);
- else
- p = (host->regs + SPARE_AREA0) + ((col - mtd->writesize) >> 1);
-
- if (col & 1) {
- rd_word = readw(p);
- ret = (rd_word >> 8) & 0xff;
- rd_word = readw(&p[1]);
- ret |= (rd_word << 8) & 0xff00;
-
- } else
- ret = readw(p);
-
- /* Update saved column address */
- host->col_addr = col + 2;
+ ret = *(uint16_t *)(host->data_buf + host->buf_start);
+ host->buf_start += 2;
return ret;
}
@@ -470,94 +402,14 @@ static void mxc_nand_write_buf(struct mtd_info *mtd,
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- int n, col, i = 0;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr,
- len);
-
- col = host->col_addr;
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
- /* Adjust saved column address */
- if (col < mtd->writesize && host->spare_only)
- col += mtd->writesize;
+ n = min(n, len);
- n = mtd->writesize + mtd->oobsize - col;
- n = min(len, n);
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n);
-
- while (n) {
- void __iomem *p;
-
- if (col < mtd->writesize)
- p = host->regs + MAIN_AREA0 + (col & ~3);
- else
- p = host->regs + SPARE_AREA0 -
- mtd->writesize + (col & ~3);
-
- DEBUG(MTD_DEBUG_LEVEL3, "%s:%d: p = %p\n", __func__,
- __LINE__, p);
-
- if (((col | (int)&buf[i]) & 3) || n < 16) {
- uint32_t data = 0;
-
- if (col & 3 || n < 4)
- data = readl(p);
-
- switch (col & 3) {
- case 0:
- if (n) {
- data = (data & 0xffffff00) |
- (buf[i++] << 0);
- n--;
- col++;
- }
- case 1:
- if (n) {
- data = (data & 0xffff00ff) |
- (buf[i++] << 8);
- n--;
- col++;
- }
- case 2:
- if (n) {
- data = (data & 0xff00ffff) |
- (buf[i++] << 16);
- n--;
- col++;
- }
- case 3:
- if (n) {
- data = (data & 0x00ffffff) |
- (buf[i++] << 24);
- n--;
- col++;
- }
- }
-
- writel(data, p);
- } else {
- int m = mtd->writesize - col;
+ memcpy(host->data_buf + col, buf, n);
- if (col >= mtd->writesize)
- m += mtd->oobsize;
-
- m = min(n, m) & ~3;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "%s:%d: n = %d, m = %d, i = %d, col = %d\n",
- __func__, __LINE__, n, m, i, col);
-
- memcpy(p, &buf[i], m);
- col += m;
- i += m;
- n -= m;
- }
- }
- /* Update saved column address */
- host->col_addr = col;
+ host->buf_start += n;
}
/* Read the data buffer from the NAND Flash. To read the data from NAND
@@ -568,75 +420,14 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
- int n, col, i = 0;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr, len);
-
- col = host->col_addr;
+ u16 col = host->buf_start;
+ int n = mtd->oobsize + mtd->writesize - col;
- /* Adjust saved column address */
- if (col < mtd->writesize && host->spare_only)
- col += mtd->writesize;
+ n = min(n, len);
- n = mtd->writesize + mtd->oobsize - col;
- n = min(len, n);
-
- while (n) {
- void __iomem *p;
-
- if (col < mtd->writesize)
- p = host->regs + MAIN_AREA0 + (col & ~3);
- else
- p = host->regs + SPARE_AREA0 -
- mtd->writesize + (col & ~3);
-
- if (((col | (int)&buf[i]) & 3) || n < 16) {
- uint32_t data;
-
- data = readl(p);
- switch (col & 3) {
- case 0:
- if (n) {
- buf[i++] = (uint8_t) (data);
- n--;
- col++;
- }
- case 1:
- if (n) {
- buf[i++] = (uint8_t) (data >> 8);
- n--;
- col++;
- }
- case 2:
- if (n) {
- buf[i++] = (uint8_t) (data >> 16);
- n--;
- col++;
- }
- case 3:
- if (n) {
- buf[i++] = (uint8_t) (data >> 24);
- n--;
- col++;
- }
- }
- } else {
- int m = mtd->writesize - col;
-
- if (col >= mtd->writesize)
- m += mtd->oobsize;
-
- m = min(n, m) & ~3;
- memcpy(&buf[i], p, m);
- col += m;
- i += m;
- n -= m;
- }
- }
- /* Update saved column address */
- host->col_addr = col;
+ memcpy(buf, host->data_buf + col, len);
+ host->buf_start += len;
}
/* Used by the upper layer to verify the data in NAND Flash
@@ -654,23 +445,6 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
-#ifdef CONFIG_MTD_NAND_MXC_FORCE_CE
- if (chip > 0) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "ERROR: Illegal chip select (chip = %d)\n", chip);
- return;
- }
-
- if (chip == -1) {
- writew(readw(host->regs + NFC_CONFIG1) & ~NFC_CE,
- host->regs + NFC_CONFIG1);
- return;
- }
-
- writew(readw(host->regs + NFC_CONFIG1) | NFC_CE,
- host->regs + NFC_CONFIG1);
-#endif
-
switch (chip) {
case -1:
/* Disable the NFC clock */
@@ -692,94 +466,40 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
}
}
-/* Used by the upper layer to write command to NAND Flash for
- * different operations to be carried out on NAND Flash */
-static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
- int column, int page_addr)
+/*
+ * Function to transfer data to/from spare area.
+ */
+static void copy_spare(struct mtd_info *mtd, bool bfrom)
{
- struct nand_chip *nand_chip = mtd->priv;
- struct mxc_nand_host *host = nand_chip->priv;
- int useirq = true;
-
- DEBUG(MTD_DEBUG_LEVEL3,
- "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
- command, column, page_addr);
-
- /* Reset command state information */
- host->status_request = false;
-
- /* Command pre-processing step */
- switch (command) {
-
- case NAND_CMD_STATUS:
- host->col_addr = 0;
- host->status_request = true;
- break;
-
- case NAND_CMD_READ0:
- host->col_addr = column;
- host->spare_only = false;
- useirq = false;
- break;
-
- case NAND_CMD_READOOB:
- host->col_addr = column;
- host->spare_only = true;
- useirq = false;
- if (host->pagesize_2k)
- command = NAND_CMD_READ0; /* only READ0 is valid */
- break;
-
- case NAND_CMD_SEQIN:
- if (column >= mtd->writesize) {
- /*
- * FIXME: before send SEQIN command for write OOB,
- * We must read one page out.
- * For K9F1GXX has no READ1 command to set current HW
- * pointer to spare area, we must write the whole page
- * including OOB together.
- */
- if (host->pagesize_2k)
- /* call ourself to read a page */
- mxc_nand_command(mtd, NAND_CMD_READ0, 0,
- page_addr);
-
- host->col_addr = column - mtd->writesize;
- host->spare_only = true;
-
- /* Set program pointer to spare region */
- if (!host->pagesize_2k)
- send_cmd(host, NAND_CMD_READOOB, false);
- } else {
- host->spare_only = false;
- host->col_addr = column;
-
- /* Set program pointer to page start */
- if (!host->pagesize_2k)
- send_cmd(host, NAND_CMD_READ0, false);
- }
- useirq = false;
- break;
-
- case NAND_CMD_PAGEPROG:
- send_prog_page(host, 0, host->spare_only);
-
- if (host->pagesize_2k) {
- /* data in 4 areas datas */
- send_prog_page(host, 1, host->spare_only);
- send_prog_page(host, 2, host->spare_only);
- send_prog_page(host, 3, host->spare_only);
- }
-
- break;
+ struct nand_chip *this = mtd->priv;
+ struct mxc_nand_host *host = this->priv;
+ u16 i, j;
+ u16 n = mtd->writesize >> 9;
+ u8 *d = host->data_buf + mtd->writesize;
+ u8 *s = host->spare0;
+ u16 t = host->spare_len;
+
+ j = (mtd->oobsize / n >> 1) << 1;
+
+ if (bfrom) {
+ for (i = 0; i < n - 1; i++)
+ memcpy(d + i * j, s + i * t, j);
+
+ /* the last section */
+ memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
+ } else {
+ for (i = 0; i < n - 1; i++)
+ memcpy(&s[i * t], &d[i * j], j);
- case NAND_CMD_ERASE1:
- useirq = false;
- break;
+ /* the last section */
+ memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
}
+}
- /* Write out the command to the device. */
- send_cmd(host, command, useirq);
+static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
/* Write out column address, if necessary */
if (column != -1) {
@@ -791,7 +511,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
* the full page.
*/
send_addr(host, 0, page_addr == -1);
- if (host->pagesize_2k)
+ if (mtd->writesize > 512)
/* another col addr cycle for 2k page */
send_addr(host, 0, false);
}
@@ -801,7 +521,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* paddr_0 - p_addr_7 */
send_addr(host, (page_addr & 0xff), false);
- if (host->pagesize_2k) {
+ if (mtd->writesize > 512) {
if (mtd->size >= 0x10000000) {
/* paddr_8 - paddr_15 */
send_addr(host, (page_addr >> 8) & 0xff, false);
@@ -820,52 +540,136 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
send_addr(host, (page_addr >> 8) & 0xff, true);
}
}
+}
+
+/* Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash */
+static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ DEBUG(MTD_DEBUG_LEVEL3,
+ "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ command, column, page_addr);
+
+ /* Reset command state information */
+ host->status_request = false;
- /* Command post-processing step */
+ /* Command pre-processing step */
switch (command) {
- case NAND_CMD_RESET:
+ case NAND_CMD_STATUS:
+ host->buf_start = 0;
+ host->status_request = true;
+
+ send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
break;
- case NAND_CMD_READOOB:
case NAND_CMD_READ0:
- if (host->pagesize_2k) {
- /* send read confirm command */
+ case NAND_CMD_READOOB:
+ if (command == NAND_CMD_READ0)
+ host->buf_start = column;
+ else
+ host->buf_start = column + mtd->writesize;
+
+ if (mtd->writesize > 512)
+ command = NAND_CMD_READ0; /* only READ0 is valid */
+
+ send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+
+ if (mtd->writesize > 512)
send_cmd(host, NAND_CMD_READSTART, true);
- /* read for each AREA */
- send_read_page(host, 0, host->spare_only);
- send_read_page(host, 1, host->spare_only);
- send_read_page(host, 2, host->spare_only);
- send_read_page(host, 3, host->spare_only);
- } else
- send_read_page(host, 0, host->spare_only);
+
+ send_page(mtd, NFC_OUTPUT);
+
+ memcpy(host->data_buf, host->main_area0, mtd->writesize);
+ copy_spare(mtd, true);
break;
- case NAND_CMD_READID:
- host->col_addr = 0;
- send_read_id(host);
+ case NAND_CMD_SEQIN:
+ if (column >= mtd->writesize) {
+ /*
+ * FIXME: before send SEQIN command for write OOB,
+ * We must read one page out.
+ * For K9F1GXX has no READ1 command to set current HW
+ * pointer to spare area, we must write the whole page
+ * including OOB together.
+ */
+ if (mtd->writesize > 512)
+ /* call ourself to read a page */
+ mxc_nand_command(mtd, NAND_CMD_READ0, 0,
+ page_addr);
+
+ host->buf_start = column;
+
+ /* Set program pointer to spare region */
+ if (mtd->writesize == 512)
+ send_cmd(host, NAND_CMD_READOOB, false);
+ } else {
+ host->buf_start = column;
+
+ /* Set program pointer to page start */
+ if (mtd->writesize == 512)
+ send_cmd(host, NAND_CMD_READ0, false);
+ }
+
+ send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_PAGEPROG:
+ memcpy(host->main_area0, host->data_buf, mtd->writesize);
+ copy_spare(mtd, false);
+ send_page(mtd, NFC_INPUT);
+ send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
break;
- case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ send_cmd(host, command, true);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ send_read_id(host);
+ host->buf_start = column;
break;
+ case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
+ send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+
break;
}
}
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks. */
-static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+/*
+ * The generic flash bbt decriptors overlap with our ecc
+ * hardware, so define some i.MX specific ones.
+ */
+static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
-static struct nand_bbt_descr smallpage_memorybased = {
- .options = NAND_BBT_SCAN2NDPAGE,
- .offs = 5,
- .len = 1,
- .pattern = scan_ff_pattern
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
};
static int __init mxcnd_probe(struct platform_device *pdev)
@@ -877,12 +681,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
struct resource *res;
uint16_t tmp;
int err = 0, nr_parts = 0;
+ struct nand_ecclayout *oob_smallpage, *oob_largepage;
/* Allocate memory for MTD device structure and private data */
- host = kzalloc(sizeof(struct mxc_nand_host), GFP_KERNEL);
+ host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
+ NAND_MAX_OOBSIZE, GFP_KERNEL);
if (!host)
return -ENOMEM;
+ host->data_buf = (uint8_t *)(host + 1);
+
host->dev = &pdev->dev;
/* structures must be linked */
this = &host->nand;
@@ -890,7 +698,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
mtd->priv = this;
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
- mtd->name = "mxc_nand";
+ mtd->name = DRIVER_NAME;
/* 50 us command delay time */
this->chip_delay = 5;
@@ -920,62 +728,93 @@ static int __init mxcnd_probe(struct platform_device *pdev)
goto eres;
}
- host->regs = ioremap(res->start, res->end - res->start + 1);
- if (!host->regs) {
+ host->base = ioremap(res->start, resource_size(res));
+ if (!host->base) {
err = -ENOMEM;
goto eres;
}
+ host->main_area0 = host->base;
+ host->main_area1 = host->base + 0x200;
+
+ if (nfc_is_v21()) {
+ host->regs = host->base + 0x1000;
+ host->spare0 = host->base + 0x1000;
+ host->spare_len = 64;
+ oob_smallpage = &nandv2_hw_eccoob_smallpage;
+ oob_largepage = &nandv2_hw_eccoob_largepage;
+ } else if (nfc_is_v1()) {
+ host->regs = host->base;
+ host->spare0 = host->base + 0x800;
+ host->spare_len = 16;
+ oob_smallpage = &nandv1_hw_eccoob_smallpage;
+ oob_largepage = &nandv1_hw_eccoob_largepage;
+ } else
+ BUG();
+
+ /* disable interrupt and spare enable */
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_INT_MSK;
+ tmp &= ~NFC_SP_EN;
writew(tmp, host->regs + NFC_CONFIG1);
init_waitqueue_head(&host->irq_waitq);
host->irq = platform_get_irq(pdev, 0);
- err = request_irq(host->irq, mxc_nfc_irq, 0, "mxc_nd", host);
+ err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host);
if (err)
goto eirq;
+ /* Reset NAND */
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* preset operation */
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, host->regs + NFC_CONFIG);
+
+ /* Blocks to be unlocked */
+ if (nfc_is_v21()) {
+ writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR);
+ writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR);
+ this->ecc.bytes = 9;
+ } else if (nfc_is_v1()) {
+ writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR);
+ this->ecc.bytes = 3;
+ } else
+ BUG();
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, host->regs + NFC_WRPROT);
+
+ this->ecc.size = 512;
+ this->ecc.layout = oob_smallpage;
+
if (pdata->hw_ecc) {
this->ecc.calculate = mxc_nand_calculate_ecc;
this->ecc.hwctl = mxc_nand_enable_hwecc;
this->ecc.correct = mxc_nand_correct_data;
this->ecc.mode = NAND_ECC_HW;
- this->ecc.size = 512;
- this->ecc.bytes = 3;
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
} else {
- this->ecc.size = 512;
- this->ecc.bytes = 3;
- this->ecc.layout = &nand_hw_eccoob_8;
this->ecc.mode = NAND_ECC_SOFT;
tmp = readw(host->regs + NFC_CONFIG1);
tmp &= ~NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
}
- /* Reset NAND */
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
- /* preset operation */
- /* Unlock the internal RAM Buffer */
- writew(0x2, host->regs + NFC_CONFIG);
-
- /* Blocks to be unlocked */
- writew(0x0, host->regs + NFC_UNLOCKSTART_BLKADDR);
- writew(0x4000, host->regs + NFC_UNLOCKEND_BLKADDR);
-
- /* Unlock Block Command for given address range */
- writew(0x4, host->regs + NFC_WRPROT);
-
/* NAND bus width determines access funtions used by upper layer */
- if (pdata->width == 2) {
+ if (pdata->width == 2)
this->options |= NAND_BUSWIDTH_16;
- this->ecc.layout = &nand_hw_eccoob_16;
+
+ if (pdata->flash_bbt) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ /* update flash based bbt */
+ this->options |= NAND_USE_FLASH_BBT;
}
/* first scan to find the device and get the page size */
@@ -984,38 +823,8 @@ static int __init mxcnd_probe(struct platform_device *pdev)
goto escan;
}
- if (mtd->writesize == 2048) {
- host->pagesize_2k = 1;
- this->badblock_pattern = &smallpage_memorybased;
- }
-
- if (this->ecc.mode == NAND_ECC_HW) {
- switch (mtd->oobsize) {
- case 8:
- this->ecc.layout = &nand_hw_eccoob_8;
- break;
- case 16:
- this->ecc.layout = &nand_hw_eccoob_16;
- break;
- case 64:
- this->ecc.layout = &nand_hw_eccoob_64;
- break;
- default:
- /* page size not handled by HW ECC */
- /* switching back to soft ECC */
- this->ecc.size = 512;
- this->ecc.bytes = 3;
- this->ecc.layout = &nand_hw_eccoob_8;
- this->ecc.mode = NAND_ECC_SOFT;
- this->ecc.calculate = NULL;
- this->ecc.correct = NULL;
- this->ecc.hwctl = NULL;
- tmp = readw(host->regs + NFC_CONFIG1);
- tmp &= ~NFC_ECC_EN;
- writew(tmp, host->regs + NFC_CONFIG1);
- break;
- }
- }
+ if (mtd->writesize == 2048)
+ this->ecc.layout = oob_largepage;
/* second phase scan */
if (nand_scan_tail(mtd)) {
@@ -1043,7 +852,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
escan:
free_irq(host->irq, host);
eirq:
- iounmap(host->regs);
+ iounmap(host->base);
eres:
clk_put(host->clk);
eclk:
@@ -1062,7 +871,7 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
nand_release(&host->mtd);
free_irq(host->irq, host);
- iounmap(host->regs);
+ iounmap(host->base);
kfree(host);
return 0;
@@ -1113,7 +922,7 @@ static struct platform_driver mxcnd_driver = {
.driver = {
.name = DRIVER_NAME,
},
- .remove = __exit_p(mxcnd_remove),
+ .remove = __devexit_p(mxcnd_remove),
.suspend = mxcnd_suspend,
.resume = mxcnd_resume,
};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 2957cc70da3..8f2958fe214 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -428,6 +428,28 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
return nand_isbad_bbt(mtd, ofs, allowbbt);
}
+/**
+ * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout
+ *
+ * Helper function for nand_wait_ready used when needing to wait in interrupt
+ * context.
+ */
+static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ struct nand_chip *chip = mtd->priv;
+ int i;
+
+ /* Wait for the device to get ready */
+ for (i = 0; i < timeo; i++) {
+ if (chip->dev_ready(mtd))
+ break;
+ touch_softlockup_watchdog();
+ mdelay(1);
+ }
+}
+
/*
* Wait for the ready pin, after a command
* The timeout is catched later.
@@ -437,6 +459,10 @@ void nand_wait_ready(struct mtd_info *mtd)
struct nand_chip *chip = mtd->priv;
unsigned long timeo = jiffies + 2;
+ /* 400ms timeout */
+ if (in_interrupt() || oops_in_progress)
+ return panic_nand_wait_ready(mtd, 400);
+
led_trigger_event(nand_led_trigger, LED_FULL);
/* wait until command is processed or timeout occures */
do {
@@ -672,6 +698,22 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/**
+ * panic_nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Used when in panic, no locks are taken.
+ */
+static void panic_nand_get_device(struct nand_chip *chip,
+ struct mtd_info *mtd, int new_state)
+{
+ /* Hardware controller shared among independend devices */
+ chip->controller->active = chip;
+ chip->state = new_state;
+}
+
+/**
* nand_get_device - [GENERIC] Get chip for selected access
* @chip: the nand chip descriptor
* @mtd: MTD device structure
@@ -698,8 +740,14 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
return 0;
}
if (new_state == FL_PM_SUSPENDED) {
- spin_unlock(lock);
- return (chip->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
+ if (chip->controller->active->state == FL_PM_SUSPENDED) {
+ chip->state = FL_PM_SUSPENDED;
+ spin_unlock(lock);
+ return 0;
+ } else {
+ spin_unlock(lock);
+ return -EAGAIN;
+ }
}
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(wq, &wait);
@@ -710,6 +758,32 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
}
/**
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: Timeout
+ *
+ * Wait for command done. This is a helper function for nand_wait used when
+ * we are in interrupt context. May happen when in panic and trying to write
+ * an oops trough mtdoops.
+ */
+static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
+ unsigned long timeo)
+{
+ int i;
+ for (i = 0; i < timeo; i++) {
+ if (chip->dev_ready) {
+ if (chip->dev_ready(mtd))
+ break;
+ } else {
+ if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+/**
* nand_wait - [DEFAULT] wait until the command is done
* @mtd: MTD device structure
* @chip: NAND chip structure
@@ -740,15 +814,19 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
else
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- while (time_before(jiffies, timeo)) {
- if (chip->dev_ready) {
- if (chip->dev_ready(mtd))
- break;
- } else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
- break;
+ if (in_interrupt() || oops_in_progress)
+ panic_nand_wait(mtd, chip, timeo);
+ else {
+ while (time_before(jiffies, timeo)) {
+ if (chip->dev_ready) {
+ if (chip->dev_ready(mtd))
+ break;
+ } else {
+ if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ break;
+ }
+ cond_resched();
}
- cond_resched();
}
led_trigger_event(nand_led_trigger, LED_OFF);
@@ -1949,6 +2027,45 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
}
/**
+ * panic_nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC. Used when performing writes in interrupt context, this
+ * may for example be called by mtdoops when writing an oops while in panic.
+ */
+static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct nand_chip *chip = mtd->priv;
+ int ret;
+
+ /* Do not allow reads past end of device */
+ if ((to + len) > mtd->size)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ /* Wait for the device to get ready. */
+ panic_nand_wait(mtd, chip, 400);
+
+ /* Grab the device. */
+ panic_nand_get_device(chip, mtd, FL_WRITING);
+
+ chip->ops.len = len;
+ chip->ops.datbuf = (uint8_t *)buf;
+ chip->ops.oobbuf = NULL;
+
+ ret = nand_do_write_ops(mtd, to, &chip->ops);
+
+ *retlen = chip->ops.retlen;
+ return ret;
+}
+
+/**
* nand_write - [MTD Interface] NAND write with ECC
* @mtd: MTD device structure
* @to: offset to write to
@@ -2645,7 +2762,8 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips)
type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
if (IS_ERR(type)) {
- printk(KERN_WARNING "No NAND device found!!!\n");
+ if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+ printk(KERN_WARNING "No NAND device found.\n");
chip->select_chip(mtd, -1);
return PTR_ERR(type);
}
@@ -2877,6 +2995,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->unpoint = NULL;
mtd->read = nand_read;
mtd->write = nand_write;
+ mtd->panic_write = panic_nand_write;
mtd->read_oob = nand_read_oob;
mtd->write_oob = nand_write_oob;
mtd->sync = nand_sync;
diff --git a/drivers/mtd/nand/nand_bcm_umi.c b/drivers/mtd/nand/nand_bcm_umi.c
new file mode 100644
index 00000000000..46a6bc9c4b7
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.c
@@ -0,0 +1,149 @@
+/*****************************************************************************
+* Copyright 2004 - 2009 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+
+/* ---- Include Files ---------------------------------------------------- */
+#include <mach/reg_umi.h>
+#include "nand_bcm_umi.h"
+#ifdef BOOT0_BUILD
+#include <uart.h>
+#endif
+
+/* ---- External Variable Declarations ----------------------------------- */
+/* ---- External Function Prototypes ------------------------------------- */
+/* ---- Public Variables ------------------------------------------------- */
+/* ---- Private Constants and Types -------------------------------------- */
+/* ---- Private Function Prototypes -------------------------------------- */
+/* ---- Private Variables ------------------------------------------------ */
+/* ---- Private Functions ------------------------------------------------ */
+
+#if NAND_ECC_BCH
+/****************************************************************************
+* nand_bch_ecc_flip_bit - Routine to flip an errored bit
+*
+* PURPOSE:
+* This is a helper routine that flips the bit (0 -> 1 or 1 -> 0) of the
+* errored bit specified
+*
+* PARAMETERS:
+* datap - Container that holds the 512 byte data
+* errorLocation - Location of the bit that needs to be flipped
+*
+* RETURNS:
+* None
+****************************************************************************/
+static void nand_bcm_umi_bch_ecc_flip_bit(uint8_t *datap, int errorLocation)
+{
+ int locWithinAByte = (errorLocation & REG_UMI_BCH_ERR_LOC_BYTE) >> 0;
+ int locWithinAWord = (errorLocation & REG_UMI_BCH_ERR_LOC_WORD) >> 3;
+ int locWithinAPage = (errorLocation & REG_UMI_BCH_ERR_LOC_PAGE) >> 5;
+
+ uint8_t errorByte = 0;
+ uint8_t byteMask = 1 << locWithinAByte;
+
+ /* BCH uses big endian, need to change the location
+ * bits to little endian */
+ locWithinAWord = 3 - locWithinAWord;
+
+ errorByte = datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord];
+
+#ifdef BOOT0_BUILD
+ puthexs("\nECC Correct Offset: ",
+ locWithinAPage * sizeof(uint32_t) + locWithinAWord);
+ puthexs(" errorByte:", errorByte);
+ puthex8(" Bit: ", locWithinAByte);
+#endif
+
+ if (errorByte & byteMask) {
+ /* bit needs to be cleared */
+ errorByte &= ~byteMask;
+ } else {
+ /* bit needs to be set */
+ errorByte |= byteMask;
+ }
+
+ /* write back the value with the fixed bit */
+ datap[locWithinAPage * sizeof(uint32_t) + locWithinAWord] = errorByte;
+}
+
+/****************************************************************************
+* nand_correct_page_bch - Routine to correct bit errors when reading NAND
+*
+* PURPOSE:
+* This routine reads the BCH registers to determine if there are any bit
+* errors during the read of the last 512 bytes of data + ECC bytes. If
+* errors exists, the routine fixes it.
+*
+* PARAMETERS:
+* datap - Container that holds the 512 byte data
+*
+* RETURNS:
+* 0 or greater = Number of errors corrected
+* (No errors are found or errors have been fixed)
+* -1 = Error(s) cannot be fixed
+****************************************************************************/
+int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
+ int numEccBytes)
+{
+ int numErrors;
+ int errorLocation;
+ int idx;
+ uint32_t regValue;
+
+ /* wait for read ECC to be valid */
+ regValue = nand_bcm_umi_bch_poll_read_ecc_calc();
+
+ /*
+ * read the control status register to determine if there
+ * are error'ed bits
+ * see if errors are correctible
+ */
+ if ((regValue & REG_UMI_BCH_CTRL_STATUS_UNCORR_ERR) > 0) {
+ int i;
+
+ for (i = 0; i < numEccBytes; i++) {
+ if (readEccData[i] != 0xff) {
+ /* errors cannot be fixed, return -1 */
+ return -1;
+ }
+ }
+ /* If ECC is unprogrammed then we can't correct,
+ * assume everything OK */
+ return 0;
+ }
+
+ if ((regValue & REG_UMI_BCH_CTRL_STATUS_CORR_ERR) == 0) {
+ /* no errors */
+ return 0;
+ }
+
+ /*
+ * Fix errored bits by doing the following:
+ * 1. Read the number of errors in the control and status register
+ * 2. Read the error location registers that corresponds to the number
+ * of errors reported
+ * 3. Invert the bit in the data
+ */
+ numErrors = (regValue & REG_UMI_BCH_CTRL_STATUS_NB_CORR_ERROR) >> 20;
+
+ for (idx = 0; idx < numErrors; idx++) {
+ errorLocation =
+ REG_UMI_BCH_ERR_LOC_ADDR(idx) & REG_UMI_BCH_ERR_LOC_MASK;
+
+ /* Flip bit */
+ nand_bcm_umi_bch_ecc_flip_bit(datap, errorLocation);
+ }
+ /* Errors corrected */
+ return numErrors;
+}
+#endif
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
new file mode 100644
index 00000000000..7cec2cd9785
--- /dev/null
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -0,0 +1,358 @@
+/*****************************************************************************
+* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved.
+*
+* Unless you and Broadcom execute a separate written software license
+* agreement governing use of this software, this software is licensed to you
+* under the terms of the GNU General Public License version 2, available at
+* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
+*
+* Notwithstanding the above, under no circumstances may you combine this
+* software in any way with any other Broadcom software provided under a
+* license other than the GPL, without Broadcom's express prior written
+* consent.
+*****************************************************************************/
+#ifndef NAND_BCM_UMI_H
+#define NAND_BCM_UMI_H
+
+/* ---- Include Files ---------------------------------------------------- */
+#include <mach/reg_umi.h>
+#include <mach/reg_nand.h>
+#include <cfg_global.h>
+
+/* ---- Constants and Types ---------------------------------------------- */
+#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
+#define NAND_ECC_BCH (CFG_GLOBAL_CHIP_REV > 0xA0)
+#else
+#define NAND_ECC_BCH 0
+#endif
+
+#define CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES 13
+
+#if NAND_ECC_BCH
+#ifdef BOOT0_BUILD
+#define NAND_ECC_NUM_BYTES 13
+#else
+#define NAND_ECC_NUM_BYTES CFG_GLOBAL_NAND_ECC_BCH_NUM_BYTES
+#endif
+#else
+#define NAND_ECC_NUM_BYTES 3
+#endif
+
+#define NAND_DATA_ACCESS_SIZE 512
+
+/* ---- Variable Externs ------------------------------------------ */
+/* ---- Function Prototypes --------------------------------------- */
+int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
+ int numEccBytes);
+
+/* Check in device is ready */
+static inline int nand_bcm_umi_dev_ready(void)
+{
+ return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY;
+}
+
+/* Wait until device is ready */
+static inline void nand_bcm_umi_wait_till_ready(void)
+{
+ while (nand_bcm_umi_dev_ready() == 0)
+ ;
+}
+
+/* Enable Hamming ECC */
+static inline void nand_bcm_umi_hamming_enable_hwecc(void)
+{
+ /* disable and reset ECC, 512 byte page */
+ REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
+ REG_UMI_NAND_ECC_CSR_256BYTE);
+ /* enable ECC */
+ REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE;
+}
+
+#if NAND_ECC_BCH
+/* BCH ECC specifics */
+#define ECC_BITS_PER_CORRECTABLE_BIT 13
+
+/* Enable BCH Read ECC */
+static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
+{
+ /* disable and reset ECC */
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
+ /* Turn on ECC */
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
+}
+
+/* Enable BCH Write ECC */
+static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
+{
+ /* disable and reset ECC */
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID;
+ /* Turn on ECC */
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN;
+}
+
+/* Config number of BCH ECC bytes */
+static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
+{
+ uint32_t nValue;
+ uint32_t tValue;
+ uint32_t kValue;
+ uint32_t numBits = numEccBytes * 8;
+
+ /* disable and reset ECC */
+ REG_UMI_BCH_CTRL_STATUS =
+ REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
+ REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
+
+ /* Every correctible bit requires 13 ECC bits */
+ tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
+
+ /* Total data in number of bits for generating and computing BCH ECC */
+ nValue = (NAND_DATA_ACCESS_SIZE + numEccBytes) * 8;
+
+ /* K parameter is used internally. K = N - (T * 13) */
+ kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
+
+ /* Write the settings */
+ REG_UMI_BCH_N = nValue;
+ REG_UMI_BCH_T = tValue;
+ REG_UMI_BCH_K = kValue;
+}
+
+/* Pause during ECC read calculation to skip bytes in OOB */
+static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
+{
+ REG_UMI_BCH_CTRL_STATUS =
+ REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN |
+ REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC;
+}
+
+/* Resume during ECC read calculation after skipping bytes in OOB */
+static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
+{
+ REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
+}
+
+/* Poll read ECC calc to check when hardware completes */
+static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
+{
+ uint32_t regVal;
+
+ do {
+ /* wait for ECC to be valid */
+ regVal = REG_UMI_BCH_CTRL_STATUS;
+ } while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
+
+ return regVal;
+}
+
+/* Poll write ECC calc to check when hardware completes */
+static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
+{
+ /* wait for ECC to be valid */
+ while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
+ == 0)
+ ;
+}
+
+/* Read the OOB and ECC, for kernel write OOB to a buffer */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
+ uint8_t *eccCalc, int numEccBytes, uint8_t *oobp)
+#else
+static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
+ uint8_t *eccCalc, int numEccBytes)
+#endif
+{
+ int eccPos = 0;
+ int numToRead = 16; /* There are 16 bytes per sector in the OOB */
+
+ /* ECC is already paused when this function is called */
+
+ if (pageSize == NAND_DATA_ACCESS_SIZE) {
+ while (numToRead > numEccBytes) {
+ /* skip free oob region */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+
+ /* read ECC bytes before BI */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+
+ while (numToRead > 11) {
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
+#else
+ eccCalc[eccPos++] = REG_NAND_DATA8;
+#endif
+ }
+
+ nand_bcm_umi_bch_pause_read_ecc_calc();
+
+ if (numToRead == 11) {
+ /* read BI */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+
+ /* read ECC bytes */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+ while (numToRead) {
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
+#else
+ eccCalc[eccPos++] = REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+ } else {
+ /* skip BI */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+
+ while (numToRead > numEccBytes) {
+ /* skip free oob region */
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp++ = REG_NAND_DATA8;
+#else
+ REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+
+ /* read ECC bytes */
+ nand_bcm_umi_bch_resume_read_ecc_calc();
+ while (numToRead) {
+#if defined(__KERNEL__) && !defined(STANDALONE)
+ *oobp = REG_NAND_DATA8;
+ eccCalc[eccPos++] = *oobp;
+ oobp++;
+#else
+ eccCalc[eccPos++] = REG_NAND_DATA8;
+#endif
+ numToRead--;
+ }
+ }
+}
+
+/* Helper function to write ECC */
+static inline void NAND_BCM_UMI_ECC_WRITE(int numEccBytes, int eccBytePos,
+ uint8_t *oobp, uint8_t eccVal)
+{
+ if (eccBytePos <= numEccBytes)
+ *oobp = eccVal;
+}
+
+/* Write OOB with ECC */
+static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
+ uint8_t *oobp, int numEccBytes)
+{
+ uint32_t eccVal = 0xffffffff;
+
+ /* wait for write ECC to be valid */
+ nand_bcm_umi_bch_poll_write_ecc_calc();
+
+ /*
+ ** Get the hardware ecc from the 32-bit result registers.
+ ** Read after 512 byte accesses. Format B3B2B1B0
+ ** where B3 = ecc3, etc.
+ */
+
+ if (pageSize == NAND_DATA_ACCESS_SIZE) {
+ /* Now fill in the ECC bytes */
+ if (numEccBytes >= 13)
+ eccVal = REG_UMI_BCH_WR_ECC_3;
+
+ /* Usually we skip CM in oob[0,1] */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
+ (eccVal >> 16) & 0xff);
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[1],
+ (eccVal >> 8) & 0xff);
+
+ /* Write ECC in oob[2,3,4] */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[2],
+ eccVal & 0xff); /* ECC 12 */
+
+ if (numEccBytes >= 9)
+ eccVal = REG_UMI_BCH_WR_ECC_2;
+
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
+ (eccVal >> 24) & 0xff); /* ECC11 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[4],
+ (eccVal >> 16) & 0xff); /* ECC10 */
+
+ /* Always Skip BI in oob[5] */
+ } else {
+ /* Always Skip BI in oob[0] */
+
+ /* Now fill in the ECC bytes */
+ if (numEccBytes >= 13)
+ eccVal = REG_UMI_BCH_WR_ECC_3;
+
+ /* Usually skip CM in oob[1,2] */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
+ (eccVal >> 16) & 0xff);
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 14, &oobp[2],
+ (eccVal >> 8) & 0xff);
+
+ /* Write ECC in oob[3-15] */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 13, &oobp[3],
+ eccVal & 0xff); /* ECC12 */
+
+ if (numEccBytes >= 9)
+ eccVal = REG_UMI_BCH_WR_ECC_2;
+
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
+ (eccVal >> 24) & 0xff); /* ECC11 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 11, &oobp[5],
+ (eccVal >> 16) & 0xff); /* ECC10 */
+ }
+
+ /* Fill in the remainder of ECC locations */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 10, &oobp[6],
+ (eccVal >> 8) & 0xff); /* ECC9 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 9, &oobp[7],
+ eccVal & 0xff); /* ECC8 */
+
+ if (numEccBytes >= 5)
+ eccVal = REG_UMI_BCH_WR_ECC_1;
+
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
+ (eccVal >> 24) & 0xff); /* ECC7 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 7, &oobp[9],
+ (eccVal >> 16) & 0xff); /* ECC6 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 6, &oobp[10],
+ (eccVal >> 8) & 0xff); /* ECC5 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 5, &oobp[11],
+ eccVal & 0xff); /* ECC4 */
+
+ if (numEccBytes >= 1)
+ eccVal = REG_UMI_BCH_WR_ECC_0;
+
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
+ (eccVal >> 24) & 0xff); /* ECC3 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 3, &oobp[13],
+ (eccVal >> 16) & 0xff); /* ECC2 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 2, &oobp[14],
+ (eccVal >> 8) & 0xff); /* ECC1 */
+ NAND_BCM_UMI_ECC_WRITE(numEccBytes, 1, &oobp[15],
+ eccVal & 0xff); /* ECC0 */
+}
+#endif
+
+#endif /* NAND_BCM_UMI_H */
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 92320a64327..271b8e735e8 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -150,20 +150,19 @@ static const char addressbits[256] = {
};
/**
- * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
* block
- * @mtd: MTD block structure
* @buf: input buffer with raw data
+ * @eccsize: data bytes per ecc step (256 or 512)
* @code: output buffer with ECC
*/
-int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
unsigned char *code)
{
int i;
const uint32_t *bp = (uint32_t *)buf;
/* 256 or 512 bytes/ecc */
- const uint32_t eccsize_mult =
- (((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
+ const uint32_t eccsize_mult = eccsize >> 8;
uint32_t cur; /* current value in buffer */
/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
@@ -412,6 +411,22 @@ int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
(invparity[par & 0x55] << 2) |
(invparity[rp17] << 1) |
(invparity[rp16] << 0);
+}
+EXPORT_SYMBOL(__nand_calculate_ecc);
+
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @mtd: MTD block structure
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
+{
+ __nand_calculate_ecc(buf,
+ ((struct nand_chip *)mtd->priv)->ecc.size, code);
+
return 0;
}
EXPORT_SYMBOL(nand_calculate_ecc);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index cd0711b83ac..7281000fef2 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -161,7 +161,7 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I
MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory");
/* The largest possible page size */
-#define NS_LARGEST_PAGE_SIZE 2048
+#define NS_LARGEST_PAGE_SIZE 4096
/* The prefix for simulator output */
#define NS_OUTPUT_PREFIX "[nandsim]"
@@ -259,7 +259,8 @@ MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of mem
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
-#define OPT_LARGEPAGE (OPT_PAGE2048) /* 2048-byte page chips */
+#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
+#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
/* Remove action bits ftom state */
@@ -588,6 +589,8 @@ static int init_nandsim(struct mtd_info *mtd)
ns->options |= OPT_PAGE512_8BIT;
} else if (ns->geom.pgsz == 2048) {
ns->options |= OPT_PAGE2048;
+ } else if (ns->geom.pgsz == 4096) {
+ ns->options |= OPT_PAGE4096;
} else {
NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
return -EIO;
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 7c302d55910..66123419f65 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -216,7 +216,7 @@ static int nomadik_nand_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops nomadik_nand_pm_ops = {
+static const struct dev_pm_ops nomadik_nand_pm_ops = {
.suspend = nomadik_nand_suspend,
.resume = nomadik_nand_resume,
};
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 4e16c6f5bdd..8d467315f02 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -34,7 +34,12 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
{
struct platform_nand_data *pdata = pdev->dev.platform_data;
struct plat_nand_data *data;
- int res = 0;
+ struct resource *res;
+ int err = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
/* Allocate memory for the device structure (and zero it) */
data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
@@ -43,12 +48,18 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
return -ENOMEM;
}
- data->io_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -EBUSY;
+ goto out_free;
+ }
+
+ data->io_base = ioremap(res->start, resource_size(res));
if (data->io_base == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
- kfree(data);
- return -EIO;
+ err = -EIO;
+ goto out_release_io;
}
data->chip.priv = &data;
@@ -74,24 +85,24 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
/* Handle any platform specific setup */
if (pdata->ctrl.probe) {
- res = pdata->ctrl.probe(pdev);
- if (res)
+ err = pdata->ctrl.probe(pdev);
+ if (err)
goto out;
}
/* Scan to find existance of the device */
if (nand_scan(&data->mtd, 1)) {
- res = -ENXIO;
+ err = -ENXIO;
goto out;
}
#ifdef CONFIG_MTD_PARTITIONS
if (pdata->chip.part_probe_types) {
- res = parse_mtd_partitions(&data->mtd,
+ err = parse_mtd_partitions(&data->mtd,
pdata->chip.part_probe_types,
&data->parts, 0);
- if (res > 0) {
- add_mtd_partitions(&data->mtd, data->parts, res);
+ if (err > 0) {
+ add_mtd_partitions(&data->mtd, data->parts, err);
return 0;
}
}
@@ -99,14 +110,14 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
- res = add_mtd_partitions(&data->mtd, data->parts,
+ err = add_mtd_partitions(&data->mtd, data->parts,
pdata->chip.nr_partitions);
} else
#endif
- res = add_mtd_device(&data->mtd);
+ err = add_mtd_device(&data->mtd);
- if (!res)
- return res;
+ if (!err)
+ return err;
nand_release(&data->mtd);
out:
@@ -114,8 +125,11 @@ out:
pdata->ctrl.remove(pdev);
platform_set_drvdata(pdev, NULL);
iounmap(data->io_base);
+out_release_io:
+ release_mem_region(res->start, resource_size(res));
+out_free:
kfree(data);
- return res;
+ return err;
}
/*
@@ -125,6 +139,9 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS
@@ -134,6 +151,7 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
iounmap(data->io_base);
+ release_mem_region(res->start, resource_size(res));
kfree(data);
return 0;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 68b5b3a486a..fa6e9c7fe51 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -774,7 +774,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->select_chip = s3c2410_nand_select_chip;
chip->chip_delay = 50;
chip->priv = nmtd;
- chip->options = 0;
+ chip->options = set->options;
chip->controller = &info->controller;
switch (info->cpu_type) {
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 73af8324d0d..863513c3b69 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -429,11 +429,10 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
chip = mtd->priv;
txx9_priv = chip->priv;
+ nand_release(mtd);
#ifdef CONFIG_MTD_PARTITIONS
- del_mtd_partitions(mtd);
kfree(drvdata->parts[i]);
#endif
- del_mtd_device(mtd);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 86c4f6dcdc6..75f38b95811 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -112,10 +112,24 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
unsigned long timeout;
u32 syscfg;
- if (state == FL_RESETING) {
- int i;
+ if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
+ state == FL_VERIFYING_ERASE) {
+ int i = 21;
+ unsigned int intr_flags = ONENAND_INT_MASTER;
+
+ switch (state) {
+ case FL_RESETING:
+ intr_flags |= ONENAND_INT_RESET;
+ break;
+ case FL_PREPARING_ERASE:
+ intr_flags |= ONENAND_INT_ERASE;
+ break;
+ case FL_VERIFYING_ERASE:
+ i = 101;
+ break;
+ }
- for (i = 0; i < 20; i++) {
+ while (--i) {
udelay(1);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if (intr & ONENAND_INT_MASTER)
@@ -126,7 +140,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
- if (!(intr & ONENAND_INT_RESET)) {
+ if ((intr & intr_flags) != intr_flags) {
wait_err("timeout", state, ctrl, intr);
return -EIO;
}
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index ff66e4330aa..f63b1db3ffb 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1,17 +1,19 @@
/*
* linux/drivers/mtd/onenand/onenand_base.c
*
- * Copyright (C) 2005-2007 Samsung Electronics
+ * Copyright © 2005-2009 Samsung Electronics
+ * Copyright © 2007 Nokia Corporation
+ *
* Kyungmin Park <kyungmin.park@samsung.com>
*
* Credits:
* Adrian Hunter <ext-adrian.hunter@nokia.com>:
* auto-placement support, read-while load support, various fixes
- * Copyright (C) Nokia Corporation, 2007
*
* Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
* Flex-OneNAND support
- * Copyright (C) Samsung Electronics, 2008
+ * Amul Kumar Saha <amul.saha at samsung.com>
+ * OTP support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -32,6 +34,13 @@
#include <asm/io.h>
+/*
+ * Multiblock erase if number of blocks to erase is 2 or more.
+ * Maximum number of blocks for simultaneous erase is 64.
+ */
+#define MB_ERASE_MIN_BLK_COUNT 2
+#define MB_ERASE_MAX_BLK_COUNT 64
+
/* Default Flex-OneNAND boundary and lock respectively */
static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
@@ -43,6 +52,18 @@ MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
" : 0->Set boundary in unlocked status"
" : 1->Set boundary in locked status");
+/* Default OneNAND/Flex-OneNAND OTP options*/
+static int otp;
+
+module_param(otp, int, 0400);
+MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
+ "Syntax : otp=LOCK_TYPE"
+ "LOCK_TYPE : Keys issued, for specific OTP Lock type"
+ " : 0 -> Default (No Blocks Locked)"
+ " : 1 -> OTP Block lock"
+ " : 2 -> 1st Block lock"
+ " : 3 -> BOTH OTP Block and 1st Block lock");
+
/**
* onenand_oob_128 - oob info for Flex-Onenand with 4KB page
* For now, we expose only 64 out of 80 ecc bytes
@@ -339,6 +360,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
break;
case ONENAND_CMD_ERASE:
+ case ONENAND_CMD_MULTIBLOCK_ERASE:
+ case ONENAND_CMD_ERASE_VERIFY:
case ONENAND_CMD_BUFFERRAM:
case ONENAND_CMD_OTP_ACCESS:
block = onenand_block(this, addr);
@@ -483,7 +506,7 @@ static int onenand_wait(struct mtd_info *mtd, int state)
if (interrupt & flags)
break;
- if (state != FL_READING)
+ if (state != FL_READING && state != FL_PREPARING_ERASE)
cond_resched();
}
/* To get correct interrupt status in timeout case */
@@ -500,25 +523,40 @@ static int onenand_wait(struct mtd_info *mtd, int state)
int ecc = onenand_read_ecc(this);
if (ecc) {
if (ecc & ONENAND_ECC_2BIT_ALL) {
- printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
+ printk(KERN_ERR "%s: ECC error = 0x%04x\n",
+ __func__, ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
- printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
+ printk(KERN_DEBUG "%s: correctable ECC error = 0x%04x\n",
+ __func__, ecc);
mtd->ecc_stats.corrected++;
}
}
} else if (state == FL_READING) {
- printk(KERN_ERR "onenand_wait: read timeout! ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt);
+ printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
+ return -EIO;
+ }
+
+ if (state == FL_PREPARING_ERASE && !(interrupt & ONENAND_INT_ERASE)) {
+ printk(KERN_ERR "%s: mb erase timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
+ return -EIO;
+ }
+
+ if (!(interrupt & ONENAND_INT_MASTER)) {
+ printk(KERN_ERR "%s: timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
return -EIO;
}
/* If there's controller error, it's a real error */
if (ctrl & ONENAND_CTRL_ERROR) {
- printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n",
- ctrl);
+ printk(KERN_ERR "%s: controller error = 0x%04x\n",
+ __func__, ctrl);
if (ctrl & ONENAND_CTRL_LOCK)
- printk(KERN_ERR "onenand_wait: it's locked error.\n");
+ printk(KERN_ERR "%s: it's locked error.\n", __func__);
return -EIO;
}
@@ -1015,7 +1053,8 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
/* We are attempting to reread, so decrement stats.failed
* which was incremented by onenand_wait due to read failure
*/
- printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n");
+ printk(KERN_INFO "%s: Attempting to recover from uncorrectable read\n",
+ __func__);
mtd->ecc_stats.failed--;
/* Issue the LSB page recovery command */
@@ -1046,7 +1085,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
int ret = 0;
int writesize = this->writesize;
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
+ __func__, (unsigned int) from, (int) len);
if (ops->mode == MTD_OOB_AUTO)
oobsize = this->ecclayout->oobavail;
@@ -1057,7 +1097,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/* Do not allow reads past end of device */
if (from + len > mtd->size) {
- printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n");
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
ops->retlen = 0;
ops->oobretlen = 0;
return -EINVAL;
@@ -1146,7 +1187,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
int ret = 0, boundary = 0;
int writesize = this->writesize;
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
+ __func__, (unsigned int) from, (int) len);
if (ops->mode == MTD_OOB_AUTO)
oobsize = this->ecclayout->oobavail;
@@ -1157,7 +1199,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/* Do not allow reads past end of device */
if ((from + len) > mtd->size) {
- printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n");
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
ops->retlen = 0;
ops->oobretlen = 0;
return -EINVAL;
@@ -1275,7 +1318,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
from += ops->ooboffs;
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
+ __func__, (unsigned int) from, (int) len);
/* Initialize return length value */
ops->oobretlen = 0;
@@ -1288,7 +1332,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
column = from & (mtd->oobsize - 1);
if (unlikely(column >= oobsize)) {
- printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n");
+ printk(KERN_ERR "%s: Attempted to start read outside oob\n",
+ __func__);
return -EINVAL;
}
@@ -1296,7 +1341,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
if (unlikely(from >= mtd->size ||
column + len > ((mtd->size >> this->page_shift) -
(from >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n");
+ printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
+ __func__);
return -EINVAL;
}
@@ -1319,7 +1365,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
ret = onenand_recover_lsb(mtd, from, ret);
if (ret && ret != -EBADMSG) {
- printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
+ printk(KERN_ERR "%s: read failed = 0x%x\n",
+ __func__, ret);
break;
}
@@ -1450,20 +1497,21 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
if (interrupt & ONENAND_INT_READ) {
int ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
- printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
- ", controller error 0x%04x\n", ecc, ctrl);
+ printk(KERN_WARNING "%s: ecc error = 0x%04x, "
+ "controller error 0x%04x\n",
+ __func__, ecc, ctrl);
return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
- printk(KERN_ERR "onenand_bbt_wait: read timeout!"
- "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt);
+ printk(KERN_ERR "%s: read timeout! ctrl=0x%04x intr=0x%04x\n",
+ __func__, ctrl, interrupt);
return ONENAND_BBT_READ_FATAL_ERROR;
}
/* Initial bad block case: 0x2400 or 0x0400 */
if (ctrl & ONENAND_CTRL_ERROR) {
- printk(KERN_DEBUG "onenand_bbt_wait: "
- "controller error = 0x%04x\n", ctrl);
+ printk(KERN_DEBUG "%s: controller error = 0x%04x\n",
+ __func__, ctrl);
return ONENAND_BBT_READ_ERROR;
}
@@ -1487,14 +1535,16 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_bbt_read_oob: from = 0x%08x, len = %zi\n", (unsigned int) from, len);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n",
+ __func__, (unsigned int) from, len);
/* Initialize return value */
ops->oobretlen = 0;
/* Do not allow reads past end of device */
if (unlikely((from + len) > mtd->size)) {
- printk(KERN_ERR "onenand_bbt_read_oob: Attempt read beyond end of device\n");
+ printk(KERN_ERR "%s: Attempt read beyond end of device\n",
+ __func__);
return ONENAND_BBT_READ_FATAL_ERROR;
}
@@ -1661,21 +1711,23 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Wait for any existing operation to clear */
onenand_panic_wait(mtd);
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n",
- (unsigned int) to, (int) len);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int) to, (int) len);
/* Initialize retlen, in case of early exit */
*retlen = 0;
/* Do not allow writes past end of device */
if (unlikely((to + len) > mtd->size)) {
- printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n");
+ printk(KERN_ERR "%s: Attempt write to past end of device\n",
+ __func__);
return -EINVAL;
}
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
- printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n");
+ printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
+ __func__);
return -EINVAL;
}
@@ -1711,7 +1763,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
}
if (ret) {
- printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret);
+ printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
break;
}
@@ -1792,7 +1844,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
u_char *oobbuf;
int ret = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ops_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int) to, (int) len);
/* Initialize retlen, in case of early exit */
ops->retlen = 0;
@@ -1800,13 +1853,15 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/* Do not allow writes past end of device */
if (unlikely((to + len) > mtd->size)) {
- printk(KERN_ERR "onenand_write_ops_nolock: Attempt write to past end of device\n");
+ printk(KERN_ERR "%s: Attempt write to past end of device\n",
+ __func__);
return -EINVAL;
}
/* Reject writes, which are not page aligned */
if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
- printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n");
+ printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
+ __func__);
return -EINVAL;
}
@@ -1879,7 +1934,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
onenand_update_bufferram(mtd, prev, !ret && !prev_subpage);
if (ret) {
written -= prevlen;
- printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret);
+ printk(KERN_ERR "%s: write failed %d\n",
+ __func__, ret);
break;
}
@@ -1887,7 +1943,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/* Only check verify write turn on */
ret = onenand_verify(mtd, buf - len, to - len, len);
if (ret)
- printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
break;
}
@@ -1905,14 +1962,16 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/* In partial page write we don't update bufferram */
onenand_update_bufferram(mtd, to, !ret && !subpage);
if (ret) {
- printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret);
+ printk(KERN_ERR "%s: write failed %d\n",
+ __func__, ret);
break;
}
/* Only check verify write turn on */
ret = onenand_verify(mtd, buf, to, thislen);
if (ret) {
- printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
break;
}
@@ -1968,7 +2027,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
to += ops->ooboffs;
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int) to, (int) len);
/* Initialize retlen, in case of early exit */
ops->oobretlen = 0;
@@ -1981,14 +2041,15 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
column = to & (mtd->oobsize - 1);
if (unlikely(column >= oobsize)) {
- printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n");
+ printk(KERN_ERR "%s: Attempted to start write outside oob\n",
+ __func__);
return -EINVAL;
}
/* For compatibility with NAND: Do not allow write past end of page */
if (unlikely(column + len > oobsize)) {
- printk(KERN_ERR "onenand_write_oob_nolock: "
- "Attempt to write past end of page\n");
+ printk(KERN_ERR "%s: Attempt to write past end of page\n",
+ __func__);
return -EINVAL;
}
@@ -1996,7 +2057,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
if (unlikely(to >= mtd->size ||
column + len > ((mtd->size >> this->page_shift) -
(to >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n");
+ printk(KERN_ERR "%s: Attempted to write past end of device\n",
+ __func__);
return -EINVAL;
}
@@ -2038,13 +2100,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
ret = this->wait(mtd, FL_WRITING);
if (ret) {
- printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret);
+ printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
break;
}
ret = onenand_verify_oob(mtd, oobbuf, to);
if (ret) {
- printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret);
+ printk(KERN_ERR "%s: verify failed %d\n",
+ __func__, ret);
break;
}
@@ -2140,78 +2203,186 @@ static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allo
return bbm->isbad_bbt(mtd, ofs, allowbbt);
}
+
+static int onenand_multiblock_erase_verify(struct mtd_info *mtd,
+ struct erase_info *instr)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t addr = instr->addr;
+ int len = instr->len;
+ unsigned int block_size = (1 << this->erase_shift);
+ int ret = 0;
+
+ while (len) {
+ this->command(mtd, ONENAND_CMD_ERASE_VERIFY, addr, block_size);
+ ret = this->wait(mtd, FL_VERIFYING_ERASE);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed verify, block %d\n",
+ __func__, onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = addr;
+ return -1;
+ }
+ len -= block_size;
+ addr += block_size;
+ }
+ return 0;
+}
+
/**
- * onenand_erase - [MTD Interface] erase block(s)
+ * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase
* @param mtd MTD device structure
* @param instr erase instruction
+ * @param region erase region
*
- * Erase one ore more blocks
+ * Erase one or more blocks up to 64 block at a time
*/
-static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
+static int onenand_multiblock_erase(struct mtd_info *mtd,
+ struct erase_info *instr,
+ unsigned int block_size)
{
struct onenand_chip *this = mtd->priv;
- unsigned int block_size;
loff_t addr = instr->addr;
- loff_t len = instr->len;
- int ret = 0, i;
- struct mtd_erase_region_info *region = NULL;
- loff_t region_end = 0;
+ int len = instr->len;
+ int eb_count = 0;
+ int ret = 0;
+ int bdry_block = 0;
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
+ instr->state = MTD_ERASING;
- /* Do not allow erase past end of device */
- if (unlikely((len + addr) > mtd->size)) {
- printk(KERN_ERR "onenand_erase: Erase past end of device\n");
- return -EINVAL;
+ if (ONENAND_IS_DDP(this)) {
+ loff_t bdry_addr = this->chipsize >> 1;
+ if (addr < bdry_addr && (addr + len) > bdry_addr)
+ bdry_block = bdry_addr >> this->erase_shift;
}
- if (FLEXONENAND(this)) {
- /* Find the eraseregion of this address */
- i = flexonenand_region(mtd, addr);
- region = &mtd->eraseregions[i];
+ /* Pre-check bbs */
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks */
+ if (onenand_block_isbad_nolock(mtd, addr, 0)) {
+ printk(KERN_WARNING "%s: attempt to erase a bad block "
+ "at addr 0x%012llx\n",
+ __func__, (unsigned long long) addr);
+ instr->state = MTD_ERASE_FAILED;
+ return -EIO;
+ }
+ len -= block_size;
+ addr += block_size;
+ }
- block_size = region->erasesize;
- region_end = region->offset + region->erasesize * region->numblocks;
+ len = instr->len;
+ addr = instr->addr;
- /* Start address within region must align on block boundary.
- * Erase region's start offset is always block start address.
- */
- if (unlikely((addr - region->offset) & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Unaligned address\n");
- return -EINVAL;
+ /* loop over 64 eb batches */
+ while (len) {
+ struct erase_info verify_instr = *instr;
+ int max_eb_count = MB_ERASE_MAX_BLK_COUNT;
+
+ verify_instr.addr = addr;
+ verify_instr.len = 0;
+
+ /* do not cross chip boundary */
+ if (bdry_block) {
+ int this_block = (addr >> this->erase_shift);
+
+ if (this_block < bdry_block) {
+ max_eb_count = min(max_eb_count,
+ (bdry_block - this_block));
+ }
}
- } else {
- block_size = 1 << this->erase_shift;
- /* Start address must align on block boundary */
- if (unlikely(addr & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Unaligned address\n");
- return -EINVAL;
+ eb_count = 0;
+
+ while (len > block_size && eb_count < (max_eb_count - 1)) {
+ this->command(mtd, ONENAND_CMD_MULTIBLOCK_ERASE,
+ addr, block_size);
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_PREPARING_ERASE);
+ if (ret) {
+ printk(KERN_ERR "%s: Failed multiblock erase, "
+ "block %d\n", __func__,
+ onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EIO;
+ }
+
+ len -= block_size;
+ addr += block_size;
+ eb_count++;
+ }
+
+ /* last block of 64-eb series */
+ cond_resched();
+ this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_ERASING);
+ /* Check if it is write protected */
+ if (ret) {
+ printk(KERN_ERR "%s: Failed erase, block %d\n",
+ __func__, onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EIO;
+ }
+
+ len -= block_size;
+ addr += block_size;
+ eb_count++;
+
+ /* verify */
+ verify_instr.len = eb_count * block_size;
+ if (onenand_multiblock_erase_verify(mtd, &verify_instr)) {
+ instr->state = verify_instr.state;
+ instr->fail_addr = verify_instr.fail_addr;
+ return -EIO;
}
- }
- /* Length must align on block boundary */
- if (unlikely(len & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Length not block aligned\n");
- return -EINVAL;
}
+ return 0;
+}
- instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
- /* Grab the lock and see if the device is available */
- onenand_get_device(mtd, FL_ERASING);
+/**
+ * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase
+ * @param mtd MTD device structure
+ * @param instr erase instruction
+ * @param region erase region
+ * @param block_size erase block size
+ *
+ * Erase one or more blocks one block at a time
+ */
+static int onenand_block_by_block_erase(struct mtd_info *mtd,
+ struct erase_info *instr,
+ struct mtd_erase_region_info *region,
+ unsigned int block_size)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t addr = instr->addr;
+ int len = instr->len;
+ loff_t region_end = 0;
+ int ret = 0;
+
+ if (region) {
+ /* region is set for Flex-OneNAND */
+ region_end = region->offset + region->erasesize * region->numblocks;
+ }
- /* Loop through the blocks */
instr->state = MTD_ERASING;
+ /* Loop through the blocks */
while (len) {
cond_resched();
/* Check if we have a bad block, we do not erase bad blocks */
if (onenand_block_isbad_nolock(mtd, addr, 0)) {
- printk (KERN_WARNING "onenand_erase: attempt to erase a bad block at addr 0x%012llx\n", (unsigned long long) addr);
+ printk(KERN_WARNING "%s: attempt to erase a bad block "
+ "at addr 0x%012llx\n",
+ __func__, (unsigned long long) addr);
instr->state = MTD_ERASE_FAILED;
- goto erase_exit;
+ return -EIO;
}
this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
@@ -2221,11 +2392,11 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
ret = this->wait(mtd, FL_ERASING);
/* Check, if it is write protected */
if (ret) {
- printk(KERN_ERR "onenand_erase: Failed erase, block %d\n",
- onenand_block(this, addr));
+ printk(KERN_ERR "%s: Failed erase, block %d\n",
+ __func__, onenand_block(this, addr));
instr->state = MTD_ERASE_FAILED;
instr->fail_addr = addr;
- goto erase_exit;
+ return -EIO;
}
len -= block_size;
@@ -2241,25 +2412,88 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
if (len & (block_size - 1)) {
/* FIXME: This should be handled at MTD partitioning level. */
- printk(KERN_ERR "onenand_erase: Unaligned address\n");
- goto erase_exit;
+ printk(KERN_ERR "%s: Unaligned address\n",
+ __func__);
+ return -EIO;
}
}
+ }
+ return 0;
+}
+
+/**
+ * onenand_erase - [MTD Interface] erase block(s)
+ * @param mtd MTD device structure
+ * @param instr erase instruction
+ *
+ * Erase one or more blocks
+ */
+static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int block_size;
+ loff_t addr = instr->addr;
+ loff_t len = instr->len;
+ int ret = 0;
+ struct mtd_erase_region_info *region = NULL;
+ loff_t region_offset = 0;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__,
+ (unsigned long long) instr->addr, (unsigned long long) instr->len);
+
+ /* Do not allow erase past end of device */
+ if (unlikely((len + addr) > mtd->size)) {
+ printk(KERN_ERR "%s: Erase past end of device\n", __func__);
+ return -EINVAL;
+ }
+
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ int i = flexonenand_region(mtd, addr);
+
+ region = &mtd->eraseregions[i];
+ block_size = region->erasesize;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ region_offset = region->offset;
+ } else
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely((addr - region_offset) & (block_size - 1))) {
+ printk(KERN_ERR "%s: Unaligned address\n", __func__);
+ return -EINVAL;
+ }
+ /* Length must align on block boundary */
+ if (unlikely(len & (block_size - 1))) {
+ printk(KERN_ERR "%s: Length not block aligned\n", __func__);
+ return -EINVAL;
}
- instr->state = MTD_ERASE_DONE;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-erase_exit:
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_ERASING);
- ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+ if (region || instr->len < MB_ERASE_MIN_BLK_COUNT * block_size) {
+ /* region is set for Flex-OneNAND (no mb erase) */
+ ret = onenand_block_by_block_erase(mtd, instr,
+ region, block_size);
+ } else {
+ ret = onenand_multiblock_erase(mtd, instr, block_size);
+ }
/* Deselect and wake up anyone waiting on the device */
onenand_release_device(mtd);
/* Do call back function */
- if (!ret)
+ if (!ret) {
+ instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
+ }
return ret;
}
@@ -2272,7 +2506,7 @@ erase_exit:
*/
static void onenand_sync(struct mtd_info *mtd)
{
- DEBUG(MTD_DEBUG_LEVEL3, "onenand_sync: called\n");
+ DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_SYNCING);
@@ -2406,7 +2640,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
/* Check lock status */
status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
if (!(status & wp_status_mask))
- printk(KERN_ERR "wp status = 0x%x\n", status);
+ printk(KERN_ERR "%s: wp status = 0x%x\n",
+ __func__, status);
return 0;
}
@@ -2435,7 +2670,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
/* Check lock status */
status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
if (!(status & wp_status_mask))
- printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status);
+ printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
+ __func__, block, status);
}
return 0;
@@ -2502,7 +2738,8 @@ static int onenand_check_lock_status(struct onenand_chip *this)
/* Check lock status */
status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
if (!(status & ONENAND_WP_US)) {
- printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status);
+ printk(KERN_ERR "%s: block = %d, wp status = 0x%x\n",
+ __func__, block, status);
return 0;
}
}
@@ -2557,6 +2794,208 @@ static void onenand_unlock_all(struct mtd_info *mtd)
#ifdef CONFIG_MTD_ONENAND_OTP
+/**
+ * onenand_otp_command - Send OTP specific command to OneNAND device
+ * @param mtd MTD device structure
+ * @param cmd the command to be sent
+ * @param addr offset to read from or write to
+ * @param len number of bytes to read or write
+ */
+static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int value, block, page;
+
+ /* Address translation */
+ switch (cmd) {
+ case ONENAND_CMD_OTP_ACCESS:
+ block = (int) (addr >> this->erase_shift);
+ page = -1;
+ break;
+
+ default:
+ block = (int) (addr >> this->erase_shift);
+ page = (int) (addr >> this->page_shift);
+
+ if (ONENAND_IS_2PLANE(this)) {
+ /* Make the even block number */
+ block &= ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page >>= 1;
+ }
+ page &= this->page_mask;
+ break;
+ }
+
+ if (block != -1) {
+ /* Write 'DFS, FBA' of Flash */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS1);
+ }
+
+ if (page != -1) {
+ /* Now we use page size operation */
+ int sectors = 4, count = 4;
+ int dataram;
+
+ switch (cmd) {
+ default:
+ if (ONENAND_IS_2PLANE(this) && cmd == ONENAND_CMD_PROG)
+ cmd = ONENAND_CMD_2X_PROG;
+ dataram = ONENAND_CURRENT_BUFFERRAM(this);
+ break;
+ }
+
+ /* Write 'FPA, FSA' of Flash */
+ value = onenand_page_address(page, sectors);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS8);
+
+ /* Write 'BSA, BSC' of DataRAM */
+ value = onenand_buffer_address(dataram, sectors, count);
+ this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
+ }
+
+ /* Interrupt clear */
+ this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
+
+ /* Write command */
+ this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
+
+ return 0;
+}
+
+/**
+ * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ *
+ * OneNAND write out-of-band only for OTP
+ */
+static int onenand_otp_write_oob_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, ret = 0, oobsize;
+ int written = 0;
+ u_char *oobbuf;
+ size_t len = ops->ooblen;
+ const u_char *buf = ops->oobbuf;
+ int block, value, status;
+
+ to += ops->ooboffs;
+
+ /* Initialize retlen, in case of early exit */
+ ops->oobretlen = 0;
+
+ oobsize = mtd->oobsize;
+
+ column = to & (mtd->oobsize - 1);
+
+ oobbuf = this->oob_buf;
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, oobsize, len - written);
+
+ cond_resched();
+
+ block = (int) (to >> this->erase_shift);
+ /*
+ * Write 'DFS, FBA' of Flash
+ * Add: F100h DQ=DFS, FBA
+ */
+
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS1);
+
+ /*
+ * Select DataRAM for DDP
+ * Add: F101h DQ=DBS
+ */
+
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base +
+ ONENAND_REG_START_ADDRESS2);
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ /*
+ * Enter OTP access mode
+ */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memcpy(oobbuf + column, buf, thislen);
+
+ /*
+ * Write Data into DataRAM
+ * Add: 8th Word
+ * in sector0/spare/page0
+ * DQ=XXFCh
+ */
+ this->write_bufferram(mtd, ONENAND_SPARERAM,
+ oobbuf, 0, mtd->oobsize);
+
+ onenand_otp_command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
+ onenand_update_bufferram(mtd, to, 0);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, 0);
+ }
+
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
+ break;
+ }
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETING);
+
+ status = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+ status &= 0x60;
+
+ if (status == 0x60) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tLOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tLOCKED\n");
+ } else if (status == 0x20) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tLOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tUN-LOCKED\n");
+ } else if (status == 0x40) {
+ printk(KERN_DEBUG "\nBLOCK\tSTATUS\n");
+ printk(KERN_DEBUG "1st Block\tUN-LOCKED\n");
+ printk(KERN_DEBUG "OTP Block\tLOCKED\n");
+ } else {
+ printk(KERN_DEBUG "Reboot to check\n");
+ }
+
+ written += thislen;
+ if (written == len)
+ break;
+
+ to += mtd->writesize;
+ buf += thislen;
+ column = 0;
+ }
+
+ ops->oobretlen = written;
+
+ return ret;
+}
+
/* Internal OTP operation */
typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
size_t *retlen, u_char *buf);
@@ -2659,11 +3098,11 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
struct mtd_oob_ops ops;
int ret;
- /* Enter OTP access mode */
- this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
- this->wait(mtd, FL_OTPING);
-
if (FLEXONENAND(this)) {
+
+ /* Enter OTP access mode */
+ this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
+ this->wait(mtd, FL_OTPING);
/*
* For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
* main area of page 49.
@@ -2674,19 +3113,19 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
ops.oobbuf = NULL;
ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
*retlen = ops.retlen;
+
+ /* Exit OTP access mode */
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETING);
} else {
ops.mode = MTD_OOB_PLACE;
ops.ooblen = len;
ops.oobbuf = buf;
ops.ooboffs = 0;
- ret = onenand_write_oob_nolock(mtd, from, &ops);
+ ret = onenand_otp_write_oob_nolock(mtd, from, &ops);
*retlen = ops.oobretlen;
}
- /* Exit OTP access mode */
- this->command(mtd, ONENAND_CMD_RESET, 0, 0);
- this->wait(mtd, FL_RESETING);
-
return ret;
}
@@ -2717,16 +3156,21 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
if (density < ONENAND_DEVICE_DENSITY_512Mb)
otp_pages = 20;
else
- otp_pages = 10;
+ otp_pages = 50;
if (mode == MTD_OTP_FACTORY) {
from += mtd->writesize * otp_pages;
- otp_pages = 64 - otp_pages;
+ otp_pages = ONENAND_PAGES_PER_BLOCK - otp_pages;
}
/* Check User/Factory boundary */
- if (((mtd->writesize * otp_pages) - (from + len)) < 0)
- return 0;
+ if (mode == MTD_OTP_USER) {
+ if (mtd->writesize * otp_pages < from + len)
+ return 0;
+ } else {
+ if (mtd->writesize * otp_pages < len)
+ return 0;
+ }
onenand_get_device(mtd, FL_OTPING);
while (len > 0 && otp_pages > 0) {
@@ -2749,13 +3193,12 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
*retlen += sizeof(struct otp_info);
} else {
size_t tmp_retlen;
- int size = len;
ret = action(mtd, from, len, &tmp_retlen, buf);
- buf += size;
- len -= size;
- *retlen += size;
+ buf += tmp_retlen;
+ len -= tmp_retlen;
+ *retlen += tmp_retlen;
if (ret)
break;
@@ -2868,21 +3311,11 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
size_t retlen;
int ret;
+ unsigned int otp_lock_offset = ONENAND_OTP_LOCK_OFFSET;
memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
: mtd->oobsize);
/*
- * Note: OTP lock operation
- * OTP block : 0xXXFC
- * 1st block : 0xXXF3 (If chip support)
- * Both : 0xXXF0 (If chip support)
- */
- if (FLEXONENAND(this))
- buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
- else
- buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
-
- /*
* Write lock mark to 8th word of sector0 of page0 of the spare0.
* We write 16 bytes spare area instead of 2 bytes.
* For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
@@ -2892,10 +3325,30 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
from = 0;
len = FLEXONENAND(this) ? mtd->writesize : 16;
+ /*
+ * Note: OTP lock operation
+ * OTP block : 0xXXFC XX 1111 1100
+ * 1st block : 0xXXF3 (If chip support) XX 1111 0011
+ * Both : 0xXXF0 (If chip support) XX 1111 0000
+ */
+ if (FLEXONENAND(this))
+ otp_lock_offset = FLEXONENAND_OTP_LOCK_OFFSET;
+
+ /* ONENAND_OTP_AREA | ONENAND_OTP_BLOCK0 | ONENAND_OTP_AREA_BLOCK0 */
+ if (otp == 1)
+ buf[otp_lock_offset] = 0xFC;
+ else if (otp == 2)
+ buf[otp_lock_offset] = 0xF3;
+ else if (otp == 3)
+ buf[otp_lock_offset] = 0xF0;
+ else if (otp != 0)
+ printk(KERN_DEBUG "[OneNAND] Invalid option selected for OTP\n");
+
ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
return ret ? : retlen;
}
+
#endif /* CONFIG_MTD_ONENAND_OTP */
/**
@@ -3172,7 +3625,8 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
break;
if (i != mtd->oobsize) {
- printk(KERN_WARNING "Block %d not erased.\n", block);
+ printk(KERN_WARNING "%s: Block %d not erased.\n",
+ __func__, block);
return 1;
}
}
@@ -3204,8 +3658,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
if (boundary >= blksperdie) {
- printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. "
- "Boundary not changed.\n");
+ printk(KERN_ERR "%s: Invalid boundary value. "
+ "Boundary not changed.\n", __func__);
return -EINVAL;
}
@@ -3214,7 +3668,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
new = boundary + (die * this->density_mask);
ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
if (ret) {
- printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
+ printk(KERN_ERR "%s: Please erase blocks "
+ "before boundary change\n", __func__);
return ret;
}
@@ -3227,12 +3682,12 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
thisboundary = this->read_word(this->base + ONENAND_DATARAM);
if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
- printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
+ printk(KERN_ERR "%s: boundary locked\n", __func__);
ret = 1;
goto out;
}
- printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
+ printk(KERN_INFO "Changing die %d boundary: %d%s\n",
die, boundary, lock ? "(Locked)" : "(Unlocked)");
addr = die ? this->diesize[0] : 0;
@@ -3243,7 +3698,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
ret = this->wait(mtd, FL_ERASING);
if (ret) {
- printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die);
+ printk(KERN_ERR "%s: Failed PI erase for Die %d\n",
+ __func__, die);
goto out;
}
@@ -3251,7 +3707,8 @@ int flexonenand_set_boundary(struct mtd_info *mtd, int die,
this->command(mtd, ONENAND_CMD_PROG, addr, 0);
ret = this->wait(mtd, FL_WRITING);
if (ret) {
- printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die);
+ printk(KERN_ERR "%s: Failed PI write for Die %d\n",
+ __func__, die);
goto out;
}
@@ -3408,8 +3865,8 @@ static void onenand_resume(struct mtd_info *mtd)
if (this->state == FL_PM_SUSPENDED)
onenand_release_device(mtd);
else
- printk(KERN_ERR "resume() called for the chip which is not"
- "in suspended state\n");
+ printk(KERN_ERR "%s: resume() called for the chip which is not "
+ "in suspended state\n", __func__);
}
/**
@@ -3464,7 +3921,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
if (!this->page_buf) {
this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
if (!this->page_buf) {
- printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n");
+ printk(KERN_ERR "%s: Can't allocate page_buf\n",
+ __func__);
return -ENOMEM;
}
this->options |= ONENAND_PAGEBUF_ALLOC;
@@ -3472,7 +3930,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
if (!this->oob_buf) {
this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
if (!this->oob_buf) {
- printk(KERN_ERR "onenand_scan(): Can't allocate oob_buf\n");
+ printk(KERN_ERR "%s: Can't allocate oob_buf\n",
+ __func__);
if (this->options & ONENAND_PAGEBUF_ALLOC) {
this->options &= ~ONENAND_PAGEBUF_ALLOC;
kfree(this->page_buf);
@@ -3505,8 +3964,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
break;
default:
- printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n",
- mtd->oobsize);
+ printk(KERN_WARNING "%s: No OOB scheme defined for oobsize %d\n",
+ __func__, mtd->oobsize);
mtd->subpage_sft = 0;
/* To prevent kernel oops */
this->ecclayout = &onenand_oob_32;
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index c1d50133500..b44dcab940d 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_MTD_TESTS) += mtd_speedtest.o
obj-$(CONFIG_MTD_TESTS) += mtd_stresstest.o
obj-$(CONFIG_MTD_TESTS) += mtd_subpagetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_torturetest.o
+obj-$(CONFIG_MTD_TESTS) += mtd_nandecctest.o
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
new file mode 100644
index 00000000000..c1f31051784
--- /dev/null
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -0,0 +1,87 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+#include <linux/mtd/nand_ecc.h>
+
+#if defined(CONFIG_MTD_NAND) || defined(CONFIG_MTD_NAND_MODULE)
+
+static void inject_single_bit_error(void *data, size_t size)
+{
+ unsigned long offset = random32() % (size * BITS_PER_BYTE);
+
+ __change_bit(offset, data);
+}
+
+static unsigned char data[512];
+static unsigned char error_data[512];
+
+static int nand_ecc_test(const size_t size)
+{
+ unsigned char code[3];
+ unsigned char error_code[3];
+ char testname[30];
+
+ BUG_ON(sizeof(data) < size);
+
+ sprintf(testname, "nand-ecc-%zu", size);
+
+ get_random_bytes(data, size);
+
+ memcpy(error_data, data, size);
+ inject_single_bit_error(error_data, size);
+
+ __nand_calculate_ecc(data, size, code);
+ __nand_calculate_ecc(error_data, size, error_code);
+ __nand_correct_data(error_data, code, error_code, size);
+
+ if (!memcmp(data, error_data, size)) {
+ printk(KERN_INFO "mtd_nandecctest: ok - %s\n", testname);
+ return 0;
+ }
+
+ printk(KERN_ERR "mtd_nandecctest: not ok - %s\n", testname);
+
+ printk(KERN_DEBUG "hexdump of data:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ data, size, false);
+ printk(KERN_DEBUG "hexdump of error data:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ error_data, size, false);
+
+ return -1;
+}
+
+#else
+
+static int nand_ecc_test(const size_t size)
+{
+ return 0;
+}
+
+#endif
+
+static int __init ecc_test_init(void)
+{
+ srandom32(jiffies);
+
+ nand_ecc_test(256);
+ nand_ecc_test(512);
+
+ return 0;
+}
+
+static void __exit ecc_test_exit(void)
+{
+}
+
+module_init(ecc_test_init);
+module_exit(ecc_test_exit);
+
+MODULE_DESCRIPTION("NAND ECC function test module");
+MODULE_AUTHOR("Akinobu Mita");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index 5553cd4eab2..5813920e79a 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -343,7 +343,6 @@ static int scan_for_bad_eraseblocks(void)
printk(PRINT_PREF "error: cannot allocate memory\n");
return -ENOMEM;
}
- memset(bbt, 0 , ebcnt);
printk(PRINT_PREF "scanning for bad eraseblocks\n");
for (i = 0; i < ebcnt; ++i) {
@@ -392,7 +391,6 @@ static int __init mtd_oobtest_init(void)
mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
err = -ENOMEM;
- mtd->erasesize = mtd->erasesize;
readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
if (!readbuf) {
printk(PRINT_PREF "error: cannot allocate memory\n");
@@ -476,18 +474,10 @@ static int __init mtd_oobtest_init(void)
use_len_max = mtd->ecclayout->oobavail;
vary_offset = 1;
simple_srand(5);
- printk(PRINT_PREF "writing OOBs of whole device\n");
- for (i = 0; i < ebcnt; ++i) {
- if (bbt[i])
- continue;
- err = write_eraseblock(i);
- if (err)
- goto out;
- if (i % 256 == 0)
- printk(PRINT_PREF "written up to eraseblock %u\n", i);
- cond_resched();
- }
- printk(PRINT_PREF "written %u eraseblocks\n", i);
+
+ err = write_whole_device();
+ if (err)
+ goto out;
/* Check all eraseblocks */
use_offset = 0;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 103cac480fe..ce17cbe918c 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -523,6 +523,7 @@ static int __init mtd_pagetest_init(void)
do_div(tmp, mtd->erasesize);
ebcnt = tmp;
pgcnt = mtd->erasesize / mtd->writesize;
+ pgsize = mtd->writesize;
printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, "
"page size %u, count of eraseblocks %u, pages per "
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 78b7167a8ce..39db0e96815 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -837,7 +837,7 @@ static int vortex_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops vortex_pm_ops = {
+static const struct dev_pm_ops vortex_pm_ops = {
.suspend = vortex_suspend,
.resume = vortex_resume,
.freeze = vortex_suspend,
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0bbd5ae4986..e58a65391ad 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1953,6 +1953,8 @@ config BCM63XX_ENET
source "drivers/net/fs_enet/Kconfig"
+source "drivers/net/octeon/Kconfig"
+
endif # NET_ETHERNET
#
@@ -2696,6 +2698,7 @@ config NETXEN_NIC
config NIU
tristate "Sun Neptune 10Gbit Ethernet support"
depends on PCI
+ select CRC32
help
This enables support for cards based upon Sun's
Neptune chipset.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 246323d7f16..ad1346dd9da 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -285,3 +285,5 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_WIMAX) += wimax/
+
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 691b81eb0f4..c3dfbdd2cdc 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -322,7 +322,7 @@ static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
spin_unlock_irqrestore(&mdio_lock, flags);
#if DEBUG_MDIO
- printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
+ printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
bus->name, phy_id, location, val, ret);
#endif
return ret;
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index a348a22551d..efe5435bc3d 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -479,6 +479,9 @@ struct atl1c_buffer {
#define ATL1C_PCIMAP_PAGE 0x0008
#define ATL1C_PCIMAP_TYPE_MASK 0x000C
+#define ATL1C_PCIMAP_TODEVICE 0x0010
+#define ATL1C_PCIMAP_FROMDEVICE 0x0020
+#define ATL1C_PCIMAP_DIRECTION_MASK 0x0030
dma_addr_t dma;
};
@@ -487,9 +490,11 @@ struct atl1c_buffer {
((buff)->flags) |= (state); \
} while (0)
-#define ATL1C_SET_PCIMAP_TYPE(buff, type) do { \
- ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \
- ((buff)->flags) |= (type); \
+#define ATL1C_SET_PCIMAP_TYPE(buff, type, direction) do { \
+ ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \
+ ((buff)->flags) |= (type); \
+ ((buff)->flags) &= ~ATL1C_PCIMAP_DIRECTION_MASK; \
+ ((buff)->flags) |= (direction); \
} while (0)
/* transimit packet descriptor (tpd) ring */
@@ -550,6 +555,9 @@ struct atl1c_adapter {
#define __AT_TESTING 0x0001
#define __AT_RESETTING 0x0002
#define __AT_DOWN 0x0003
+ u8 work_event;
+#define ATL1C_WORK_EVENT_RESET 0x01
+#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02
u32 msg_enable;
bool have_msi;
@@ -561,8 +569,7 @@ struct atl1c_adapter {
spinlock_t tx_lock;
atomic_t irq_sem;
- struct work_struct reset_task;
- struct work_struct link_chg_task;
+ struct work_struct common_task;
struct timer_list watchdog_timer;
struct timer_list phy_config_timer;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 6eb9241cee0..2f4be59b9c0 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -198,27 +198,12 @@ static void atl1c_phy_config(unsigned long data)
void atl1c_reinit_locked(struct atl1c_adapter *adapter)
{
-
WARN_ON(in_interrupt());
atl1c_down(adapter);
atl1c_up(adapter);
clear_bit(__AT_RESETTING, &adapter->flags);
}
-static void atl1c_reset_task(struct work_struct *work)
-{
- struct atl1c_adapter *adapter;
- struct net_device *netdev;
-
- adapter = container_of(work, struct atl1c_adapter, reset_task);
- netdev = adapter->netdev;
-
- netif_device_detach(netdev);
- atl1c_down(adapter);
- atl1c_up(adapter);
- netif_device_attach(netdev);
-}
-
static void atl1c_check_link_status(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
@@ -275,18 +260,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
}
}
-/*
- * atl1c_link_chg_task - deal with link change event Out of interrupt context
- * @netdev: network interface device structure
- */
-static void atl1c_link_chg_task(struct work_struct *work)
-{
- struct atl1c_adapter *adapter;
-
- adapter = container_of(work, struct atl1c_adapter, link_chg_task);
- atl1c_check_link_status(adapter);
-}
-
static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -311,20 +284,40 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
adapter->link_speed = SPEED_0;
}
}
- schedule_work(&adapter->link_chg_task);
+
+ adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE;
+ schedule_work(&adapter->common_task);
}
-static void atl1c_del_timer(struct atl1c_adapter *adapter)
+static void atl1c_common_task(struct work_struct *work)
{
- del_timer_sync(&adapter->phy_config_timer);
+ struct atl1c_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = container_of(work, struct atl1c_adapter, common_task);
+ netdev = adapter->netdev;
+
+ if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
+ netif_device_detach(netdev);
+ atl1c_down(adapter);
+ atl1c_up(adapter);
+ netif_device_attach(netdev);
+ return;
+ }
+
+ if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
+ atl1c_check_link_status(adapter);
+
+ return;
}
-static void atl1c_cancel_work(struct atl1c_adapter *adapter)
+
+static void atl1c_del_timer(struct atl1c_adapter *adapter)
{
- cancel_work_sync(&adapter->reset_task);
- cancel_work_sync(&adapter->link_chg_task);
+ del_timer_sync(&adapter->phy_config_timer);
}
+
/*
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
@@ -334,7 +327,8 @@ static void atl1c_tx_timeout(struct net_device *netdev)
struct atl1c_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
- schedule_work(&adapter->reset_task);
+ adapter->work_event |= ATL1C_WORK_EVENT_RESET;
+ schedule_work(&adapter->common_task);
}
/*
@@ -713,15 +707,21 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
struct atl1c_buffer *buffer_info, int in_irq)
{
+ u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
return;
if (buffer_info->dma) {
+ if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
+ pci_driection = PCI_DMA_FROMDEVICE;
+ else
+ pci_driection = PCI_DMA_TODEVICE;
+
if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
pci_unmap_single(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->length, pci_driection);
else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->length, pci_driection);
}
if (buffer_info->skb) {
if (in_irq)
@@ -1533,7 +1533,8 @@ static irqreturn_t atl1c_intr(int irq, void *data)
/* reset MAC */
hw->intr_mask &= ~ISR_ERROR;
AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
- schedule_work(&adapter->reset_task);
+ adapter->work_event |= ATL1C_WORK_EVENT_RESET;
+ schedule_work(&adapter->common_task);
break;
}
@@ -1606,7 +1607,8 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
buffer_info->dma = pci_map_single(pdev, vir_addr,
buffer_info->length,
PCI_DMA_FROMDEVICE);
- ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
+ ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
+ ATL1C_PCIMAP_FROMDEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_next_to_use = next_next;
if (++next_next == rfd_ring->count)
@@ -1967,7 +1969,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
- ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
+ ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
+ ATL1C_PCIMAP_TODEVICE);
mapped_len += map_len;
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
@@ -1988,7 +1991,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
pci_map_single(adapter->pdev, skb->data + mapped_len,
buffer_info->length, PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
- ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
+ ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
+ ATL1C_PCIMAP_TODEVICE);
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
@@ -2009,7 +2013,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info->length,
PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
- ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE);
+ ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
+ ATL1C_PCIMAP_TODEVICE);
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
@@ -2198,8 +2203,7 @@ void atl1c_down(struct atl1c_adapter *adapter)
struct net_device *netdev = adapter->netdev;
atl1c_del_timer(adapter);
- atl1c_cancel_work(adapter);
-
+ adapter->work_event = 0; /* clear all event */
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
set_bit(__AT_DOWN, &adapter->flags);
@@ -2599,8 +2603,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
atl1c_hw_set_mac_addr(&adapter->hw);
- INIT_WORK(&adapter->reset_task, atl1c_reset_task);
- INIT_WORK(&adapter->link_chg_task, atl1c_link_chg_task);
+ INIT_WORK(&adapter->common_task, atl1c_common_task);
+ adapter->work_event = 0;
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "register netdevice failed\n");
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 2a9132343b6..4869adb6958 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1505,8 +1505,7 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
for (k = 0; k< ethaddr_bytes; k++) {
ppattern[offset + magicsync +
(j * ETH_ALEN) + k] = macaddr[k];
- len++;
- set_bit(len, (unsigned long *) pmask);
+ set_bit(len++, (unsigned long *) pmask);
}
}
return len - 1;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 1f6c5486d71..0bd47d32ec4 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1245,9 +1245,15 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev,
drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
-static int bcm_enet_get_stats_count(struct net_device *netdev)
+static int bcm_enet_get_sset_count(struct net_device *netdev,
+ int string_set)
{
- return BCM_ENET_STATS_LEN;
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCM_ENET_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
}
static void bcm_enet_get_strings(struct net_device *netdev,
@@ -1473,7 +1479,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev,
static struct ethtool_ops bcm_enet_ethtool_ops = {
.get_strings = bcm_enet_get_strings,
- .get_stats_count = bcm_enet_get_stats_count,
+ .get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.get_settings = bcm_enet_get_settings,
.set_settings = bcm_enet_set_settings,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 24c7d9900ba..3a1f7902c16 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -759,7 +759,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
- if ((adapter->cap == 0x400) && !vtm)
+ if ((adapter->cap & 0x400) && !vtm)
vlanf = 0;
skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
@@ -816,7 +816,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
/* vlanf could be wrongly set in some cards.
* ignore if vtm is not set */
- if ((adapter->cap == 0x400) && !vtm)
+ if ((adapter->cap & 0x400) && !vtm)
vlanf = 0;
skb = napi_get_frags(&eq_obj->napi);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 4bfc8081292..65df1de447e 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -653,12 +653,20 @@ static void
bnx2_netif_stop(struct bnx2 *bp)
{
bnx2_cnic_stop(bp);
- bnx2_disable_int_sync(bp);
if (netif_running(bp->dev)) {
+ int i;
+
bnx2_napi_disable(bp);
netif_tx_disable(bp->dev);
- bp->dev->trans_start = jiffies; /* prevent tx timeout */
+ /* prevent tx timeout */
+ for (i = 0; i < bp->dev->num_tx_queues; i++) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(bp->dev, i);
+ txq->trans_start = jiffies;
+ }
}
+ bnx2_disable_int_sync(bp);
}
static void
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d69e6838f21..0fb7a4964e7 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
@@ -352,7 +354,8 @@ static u16 __get_link_speed(struct port *port)
}
}
- pr_debug("Port %d Received link speed %d update from adapter\n", port->actor_port_number, speed);
+ pr_debug("Port %d Received link speed %d update from adapter\n",
+ port->actor_port_number, speed);
return speed;
}
@@ -378,12 +381,14 @@ static u8 __get_duplex(struct port *port)
switch (slave->duplex) {
case DUPLEX_FULL:
retval=0x1;
- pr_debug("Port %d Received status full duplex update from adapter\n", port->actor_port_number);
+ pr_debug("Port %d Received status full duplex update from adapter\n",
+ port->actor_port_number);
break;
case DUPLEX_HALF:
default:
retval=0x0;
- pr_debug("Port %d Received status NOT full duplex update from adapter\n", port->actor_port_number);
+ pr_debug("Port %d Received status NOT full duplex update from adapter\n",
+ port->actor_port_number);
break;
}
}
@@ -980,7 +985,9 @@ static void ad_mux_machine(struct port *port)
// check if the state machine was changed
if (port->sm_mux_state != last_state) {
- pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_mux_state);
+ pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number, last_state,
+ port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
__detach_bond_from_agg(port);
@@ -1079,7 +1086,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
// check if the State machine was changed or new lacpdu arrived
if ((port->sm_rx_state != last_state) || (lacpdu)) {
- pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_rx_state);
+ pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number, last_state,
+ port->sm_rx_state);
switch (port->sm_rx_state) {
case AD_RX_INITIALIZE:
if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) {
@@ -1126,9 +1135,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
// detect loopback situation
if (!MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->actor_system))) {
// INFO_RECEIVED_LOOPBACK_FRAMES
- pr_err(DRV_NAME ": %s: An illegal loopback occurred on "
- "adapter (%s). Check the configuration to verify that all "
- "Adapters are connected to 802.3ad compliant switch ports\n",
+ pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
+ "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->dev->master->name, port->slave->dev->name);
__release_rx_machine_lock(port);
return;
@@ -1166,7 +1174,8 @@ static void ad_tx_machine(struct port *port)
__update_lacpdu_from_port(port);
if (ad_lacpdu_send(port) >= 0) {
- pr_debug("Sent LACPDU on port %d\n", port->actor_port_number);
+ pr_debug("Sent LACPDU on port %d\n",
+ port->actor_port_number);
/* mark ntt as false, so it will not be sent again until
demanded */
@@ -1241,7 +1250,9 @@ static void ad_periodic_machine(struct port *port)
// check if the state machine was changed
if (port->sm_periodic_state != last_state) {
- pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_periodic_state);
+ pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n",
+ port->actor_port_number, last_state,
+ port->sm_periodic_state);
switch (port->sm_periodic_state) {
case AD_NO_PERIODIC:
port->sm_periodic_timer_counter = 0; // zero timer
@@ -1298,7 +1309,9 @@ static void ad_port_selection_logic(struct port *port)
port->next_port_in_aggregator=NULL;
port->actor_port_aggregator_identifier=0;
- pr_debug("Port %d left LAG %d\n", port->actor_port_number, temp_aggregator->aggregator_identifier);
+ pr_debug("Port %d left LAG %d\n",
+ port->actor_port_number,
+ temp_aggregator->aggregator_identifier);
// if the aggregator is empty, clear its parameters, and set it ready to be attached
if (!temp_aggregator->lag_ports) {
ad_clear_agg(temp_aggregator);
@@ -1307,9 +1320,7 @@ static void ad_port_selection_logic(struct port *port)
}
}
if (!curr_port) { // meaning: the port was related to an aggregator but was not on the aggregator port list
- pr_warning(DRV_NAME ": %s: Warning: Port %d (on %s) "
- "was related to aggregator %d but was not "
- "on its port list\n",
+ pr_warning("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
port->slave->dev->master->name,
port->actor_port_number,
port->slave->dev->name,
@@ -1343,7 +1354,9 @@ static void ad_port_selection_logic(struct port *port)
port->next_port_in_aggregator=aggregator->lag_ports;
port->aggregator->num_of_ports++;
aggregator->lag_ports=port;
- pr_debug("Port %d joined LAG %d(existing LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ pr_debug("Port %d joined LAG %d(existing LAG)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
// mark this port as selected
port->sm_vars |= AD_PORT_SELECTED;
@@ -1380,10 +1393,11 @@ static void ad_port_selection_logic(struct port *port)
// mark this port as selected
port->sm_vars |= AD_PORT_SELECTED;
- pr_debug("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ pr_debug("Port %d joined LAG %d(new LAG)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
} else {
- pr_err(DRV_NAME ": %s: Port %d (on %s) did not find "
- "a suitable aggregator\n",
+ pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n",
port->slave->dev->master->name,
port->actor_port_number, port->slave->dev->name);
}
@@ -1460,8 +1474,7 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
break;
default:
- pr_warning(DRV_NAME
- ": %s: Impossible agg select mode %d\n",
+ pr_warning("%s: Impossible agg select mode %d\n",
curr->slave->dev->master->name,
__get_agg_selection_mode(curr->lag_ports));
break;
@@ -1546,40 +1559,38 @@ static void ad_agg_selection_logic(struct aggregator *agg)
// if there is new best aggregator, activate it
if (best) {
pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
- best->aggregator_identifier, best->num_of_ports,
- best->actor_oper_aggregator_key,
- best->partner_oper_aggregator_key,
- best->is_individual, best->is_active);
+ best->aggregator_identifier, best->num_of_ports,
+ best->actor_oper_aggregator_key,
+ best->partner_oper_aggregator_key,
+ best->is_individual, best->is_active);
pr_debug("best ports %p slave %p %s\n",
- best->lag_ports, best->slave,
- best->slave ? best->slave->dev->name : "NULL");
+ best->lag_ports, best->slave,
+ best->slave ? best->slave->dev->name : "NULL");
for (agg = __get_first_agg(best->lag_ports); agg;
agg = __get_next_agg(agg)) {
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
- agg->aggregator_identifier, agg->num_of_ports,
- agg->actor_oper_aggregator_key,
- agg->partner_oper_aggregator_key,
- agg->is_individual, agg->is_active);
+ agg->aggregator_identifier, agg->num_of_ports,
+ agg->actor_oper_aggregator_key,
+ agg->partner_oper_aggregator_key,
+ agg->is_individual, agg->is_active);
}
// check if any partner replys
if (best->is_individual) {
- pr_warning(DRV_NAME ": %s: Warning: No 802.3ad"
- " response from the link partner for any"
- " adapters in the bond\n",
- best->slave->dev->master->name);
+ pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+ best->slave->dev->master->name);
}
best->is_active = 1;
pr_debug("LAG %d chosen as the active LAG\n",
- best->aggregator_identifier);
+ best->aggregator_identifier);
pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
- best->aggregator_identifier, best->num_of_ports,
- best->actor_oper_aggregator_key,
- best->partner_oper_aggregator_key,
- best->is_individual, best->is_active);
+ best->aggregator_identifier, best->num_of_ports,
+ best->actor_oper_aggregator_key,
+ best->partner_oper_aggregator_key,
+ best->is_individual, best->is_active);
// disable the ports that were related to the former active_aggregator
if (active) {
@@ -1633,7 +1644,8 @@ static void ad_clear_agg(struct aggregator *aggregator)
aggregator->lag_ports = NULL;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
- pr_debug("LAG %d was cleared\n", aggregator->aggregator_identifier);
+ pr_debug("LAG %d was cleared\n",
+ aggregator->aggregator_identifier);
}
}
@@ -1728,7 +1740,9 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
static void ad_enable_collecting_distributing(struct port *port)
{
if (port->aggregator->is_active) {
- pr_debug("Enabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ pr_debug("Enabling port %d(LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
__enable_port(port);
}
}
@@ -1741,7 +1755,9 @@ static void ad_enable_collecting_distributing(struct port *port)
static void ad_disable_collecting_distributing(struct port *port)
{
if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
- pr_debug("Disabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
+ pr_debug("Disabling port %d(LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
__disable_port(port);
}
}
@@ -1779,7 +1795,8 @@ static void ad_marker_info_send(struct port *port)
// send the marker information
if (ad_marker_send(port, &marker) >= 0) {
- pr_debug("Sent Marker Information on port %d\n", port->actor_port_number);
+ pr_debug("Sent Marker Information on port %d\n",
+ port->actor_port_number);
}
}
#endif
@@ -1803,7 +1820,8 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
// send the marker response
if (ad_marker_send(port, &marker) >= 0) {
- pr_debug("Sent Marker Response on port %d\n", port->actor_port_number);
+ pr_debug("Sent Marker Response on port %d\n",
+ port->actor_port_number);
}
}
@@ -1889,8 +1907,7 @@ int bond_3ad_bind_slave(struct slave *slave)
struct aggregator *aggregator;
if (bond == NULL) {
- pr_err(DRV_NAME ": %s: The slave %s is not attached to "
- "its bond\n",
+ pr_err("%s: The slave %s is not attached to its bond\n",
slave->dev->master->name, slave->dev->name);
return -1;
}
@@ -1966,13 +1983,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
- pr_warning(DRV_NAME ": Warning: %s: Trying to "
- "unbind an uninitialized port on %s\n",
+ pr_warning("Warning: %s: Trying to unbind an uninitialized port on %s\n",
slave->dev->master->name, slave->dev->name);
return;
}
- pr_debug("Unbinding Link Aggregation Group %d\n", aggregator->aggregator_identifier);
+ pr_debug("Unbinding Link Aggregation Group %d\n",
+ aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
@@ -1996,10 +2013,12 @@ void bond_3ad_unbind_slave(struct slave *slave)
// if new aggregator found, copy the aggregator's parameters
// and connect the related lag_ports to the new aggregator
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
- pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier);
+ pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
+ aggregator->aggregator_identifier,
+ new_aggregator->aggregator_identifier);
if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
- pr_info(DRV_NAME ": %s: Removing an active aggregator\n",
+ pr_info("%s: Removing an active aggregator\n",
aggregator->slave->dev->master->name);
// select new active aggregator
select_new_active_agg = 1;
@@ -2030,8 +2049,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
ad_agg_selection_logic(__get_first_agg(port));
}
} else {
- pr_warning(DRV_NAME ": %s: Warning: unbinding aggregator, "
- "and could not find a new aggregator for its ports\n",
+ pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
slave->dev->master->name);
}
} else { // in case that the only port related to this aggregator is the one we want to remove
@@ -2039,7 +2057,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
// clear the aggregator
ad_clear_agg(aggregator);
if (select_new_active_agg) {
- pr_info(DRV_NAME ": %s: Removing an active aggregator\n",
+ pr_info("%s: Removing an active aggregator\n",
slave->dev->master->name);
// select new active aggregator
ad_agg_selection_logic(__get_first_agg(port));
@@ -2066,7 +2084,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
// clear the aggregator
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
- pr_info(DRV_NAME ": %s: Removing an active aggregator\n",
+ pr_info("%s: Removing an active aggregator\n",
slave->dev->master->name);
// select new active aggregator
ad_agg_selection_logic(__get_first_agg(port));
@@ -2115,8 +2133,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
// select the active aggregator for the bond
if ((port = __get_first_port(bond))) {
if (!port->slave) {
- pr_warning(DRV_NAME ": %s: Warning: bond's first port is "
- "uninitialized\n", bond->dev->name);
+ pr_warning("%s: Warning: bond's first port is uninitialized\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2129,8 +2147,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
// for each port run the state machines
for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
if (!port->slave) {
- pr_warning(DRV_NAME ": %s: Warning: Found an uninitialized "
- "port\n", bond->dev->name);
+ pr_warning("%s: Warning: Found an uninitialized port\n",
+ bond->dev->name);
goto re_arm;
}
@@ -2171,15 +2189,15 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
port = &(SLAVE_AD_INFO(slave).port);
if (!port->slave) {
- pr_warning(DRV_NAME ": %s: Warning: port of slave %s "
- "is uninitialized\n",
+ pr_warning("%s: Warning: port of slave %s is uninitialized\n",
slave->dev->name, slave->dev->master->name);
return;
}
switch (lacpdu->subtype) {
case AD_TYPE_LACPDU:
- pr_debug("Received LACPDU on port %d\n", port->actor_port_number);
+ pr_debug("Received LACPDU on port %d\n",
+ port->actor_port_number);
ad_rx_machine(lacpdu, port);
break;
@@ -2188,17 +2206,20 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
switch (((struct bond_marker *)lacpdu)->tlv_type) {
case AD_MARKER_INFORMATION_SUBTYPE:
- pr_debug("Received Marker Information on port %d\n", port->actor_port_number);
+ pr_debug("Received Marker Information on port %d\n",
+ port->actor_port_number);
ad_marker_info_received((struct bond_marker *)lacpdu, port);
break;
case AD_MARKER_RESPONSE_SUBTYPE:
- pr_debug("Received Marker Response on port %d\n", port->actor_port_number);
+ pr_debug("Received Marker Response on port %d\n",
+ port->actor_port_number);
ad_marker_response_received((struct bond_marker *)lacpdu, port);
break;
default:
- pr_debug("Received an unknown Marker subtype on slot %d\n", port->actor_port_number);
+ pr_debug("Received an unknown Marker subtype on slot %d\n",
+ port->actor_port_number);
}
}
}
@@ -2218,8 +2239,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
- pr_warning(DRV_NAME ": Warning: %s: speed "
- "changed for uninitialized port on %s\n",
+ pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
slave->dev->master->name, slave->dev->name);
return;
}
@@ -2246,8 +2266,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
// if slave is null, the whole port is not initialized
if (!port->slave) {
- pr_warning(DRV_NAME ": %s: Warning: duplex changed "
- "for uninitialized port on %s\n",
+ pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
slave->dev->master->name, slave->dev->name);
return;
}
@@ -2275,8 +2294,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
// if slave is null, the whole port is not initialized
if (!port->slave) {
- pr_warning(DRV_NAME ": Warning: %s: link status changed for "
- "uninitialized port on %s\n",
+ pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
slave->dev->master->name, slave->dev->name);
return;
}
@@ -2381,8 +2399,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
- pr_debug(DRV_NAME ": %s: Error: "
- "bond_3ad_get_active_agg_info failed\n", dev->name);
+ pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
+ dev->name);
goto out;
}
@@ -2391,8 +2409,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
if (slaves_in_agg == 0) {
/*the aggregator is empty*/
- pr_debug(DRV_NAME ": %s: Error: active aggregator is empty\n",
- dev->name);
+ pr_debug("%s: Error: active aggregator is empty\n", dev->name);
goto out;
}
@@ -2410,8 +2427,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
if (slave_agg_no >= 0) {
- pr_err(DRV_NAME ": %s: Error: Couldn't find a slave to tx on "
- "for aggregator ID %d\n", dev->name, agg_id);
+ pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
+ dev->name, agg_id);
goto out;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 00ab51ef312..40fdc41446c 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -201,8 +203,7 @@ static int tlb_initialize(struct bonding *bond)
new_hashtbl = kzalloc(size, GFP_KERNEL);
if (!new_hashtbl) {
- pr_err(DRV_NAME
- ": %s: Error: Failed to allocate TLB hash table\n",
+ pr_err("%s: Error: Failed to allocate TLB hash table\n",
bond->dev->name);
return -1;
}
@@ -514,8 +515,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
client_info->slave->dev->dev_addr,
client_info->mac_dst);
if (!skb) {
- pr_err(DRV_NAME
- ": %s: Error: failed to create an ARP packet\n",
+ pr_err("%s: Error: failed to create an ARP packet\n",
client_info->slave->dev->master->name);
continue;
}
@@ -525,8 +525,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
if (client_info->tag) {
skb = vlan_put_tag(skb, client_info->vlan_id);
if (!skb) {
- pr_err(DRV_NAME
- ": %s: Error: failed to insert VLAN tag\n",
+ pr_err("%s: Error: failed to insert VLAN tag\n",
client_info->slave->dev->master->name);
continue;
}
@@ -609,9 +608,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (!client_info->slave) {
- pr_err(DRV_NAME
- ": %s: Error: found a client with no channel in "
- "the client's hash table\n",
+ pr_err("%s: Error: found a client with no channel in the client's hash table\n",
bond->dev->name);
continue;
}
@@ -806,8 +803,7 @@ static int rlb_initialize(struct bonding *bond)
new_hashtbl = kmalloc(size, GFP_KERNEL);
if (!new_hashtbl) {
- pr_err(DRV_NAME
- ": %s: Error: Failed to allocate RLB hash table\n",
+ pr_err("%s: Error: Failed to allocate RLB hash table\n",
bond->dev->name);
return -1;
}
@@ -928,8 +924,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
skb = vlan_put_tag(skb, vlan->vlan_id);
if (!skb) {
- pr_err(DRV_NAME
- ": %s: Error: failed to insert VLAN tag\n",
+ pr_err("%s: Error: failed to insert VLAN tag\n",
bond->dev->name);
continue;
}
@@ -958,11 +953,8 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
memcpy(s_addr.sa_data, addr, dev->addr_len);
s_addr.sa_family = dev->type;
if (dev_set_mac_address(dev, &s_addr)) {
- pr_err(DRV_NAME
- ": %s: Error: dev_set_mac_address of dev %s failed! ALB "
- "mode requires that the base driver support setting "
- "the hw address also when the network device's "
- "interface is open\n",
+ pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
+ "ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
dev->master->name, dev->name);
return -EOPNOTSUPP;
}
@@ -1169,18 +1161,12 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
bond->alb_info.rlb_enabled);
- pr_warning(DRV_NAME
- ": %s: Warning: the hw address of slave %s is "
- "in use by the bond; giving it the hw address "
- "of %s\n",
+ pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
bond->dev->name, slave->dev->name,
free_mac_slave->dev->name);
} else if (has_bond_addr) {
- pr_err(DRV_NAME
- ": %s: Error: the hw address of slave %s is in use by the "
- "bond; couldn't find a slave with a free hw address to "
- "give it (this should not have happened)\n",
+ pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
bond->dev->name, slave->dev->name);
return -EFAULT;
}
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index b72e1dc8cf8..6dd64cf3cb7 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/if_vlan.h>
#include <net/ipv6.h>
@@ -74,20 +76,20 @@ static void bond_na_send(struct net_device *slave_dev,
addrconf_addr_solict_mult(daddr, &mcaddr);
pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
- slave_dev->name, &mcaddr, daddr);
+ slave_dev->name, &mcaddr, daddr);
skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
ND_OPT_TARGET_LL_ADDR);
if (!skb) {
- pr_err(DRV_NAME ": NA packet allocation failed\n");
+ pr_err("NA packet allocation failed\n");
return;
}
if (vlan_id) {
skb = vlan_put_tag(skb, vlan_id);
if (!skb) {
- pr_err(DRV_NAME ": failed to insert VLAN tag\n");
+ pr_err("failed to insert VLAN tag\n");
return;
}
}
@@ -109,8 +111,8 @@ void bond_send_unsolicited_na(struct bonding *bond)
struct inet6_dev *idev;
int is_router;
- pr_debug("bond_send_unsol_na: bond %s slave %s\n", bond->dev->name,
- slave ? slave->dev->name : "NULL");
+ pr_debug("%s: bond %s slave %s\n", bond->dev->name,
+ __func__, slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_unsol_na ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index af9b9c4eb49..3f0071cfe56 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -31,6 +31,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -260,7 +262,7 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
struct vlan_entry *vlan;
pr_debug("bond: %s, vlan id %d\n",
- (bond ? bond->dev->name : "None"), vlan_id);
+ (bond ? bond->dev->name : "None"), vlan_id);
vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL);
if (!vlan)
@@ -303,8 +305,8 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
if (bond_is_lb(bond))
bond_alb_clear_vlan(bond, vlan_id);
- pr_debug("removed VLAN ID %d from bond %s\n", vlan_id,
- bond->dev->name);
+ pr_debug("removed VLAN ID %d from bond %s\n",
+ vlan_id, bond->dev->name);
kfree(vlan);
@@ -323,8 +325,8 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
}
}
- pr_debug("couldn't find VLAN ID %d in bond %s\n", vlan_id,
- bond->dev->name);
+ pr_debug("couldn't find VLAN ID %d in bond %s\n",
+ vlan_id, bond->dev->name);
out:
write_unlock_bh(&bond->lock);
@@ -348,7 +350,7 @@ static int bond_has_challenged_slaves(struct bonding *bond)
bond_for_each_slave(bond, slave, i) {
if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_debug("found VLAN challenged slave - %s\n",
- slave->dev->name);
+ slave->dev->name);
return 1;
}
}
@@ -499,8 +501,7 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
res = bond_add_vlan(bond, vid);
if (res) {
- pr_err(DRV_NAME
- ": %s: Error: Failed to add vlan id %d\n",
+ pr_err("%s: Error: Failed to add vlan id %d\n",
bond_dev->name, vid);
}
}
@@ -534,8 +535,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
res = bond_del_vlan(bond, vid);
if (res) {
- pr_err(DRV_NAME
- ": %s: Error: Failed to remove vlan id %d\n",
+ pr_err("%s: Error: Failed to remove vlan id %d\n",
bond_dev->name, vid);
}
}
@@ -1053,8 +1053,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
rv = dev_set_mac_address(new_active->dev, &saddr);
if (rv) {
- pr_err(DRV_NAME
- ": %s: Error %d setting MAC of slave %s\n",
+ pr_err("%s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
goto out;
}
@@ -1067,16 +1066,14 @@ static void bond_do_fail_over_mac(struct bonding *bond,
rv = dev_set_mac_address(old_active->dev, &saddr);
if (rv)
- pr_err(DRV_NAME
- ": %s: Error %d setting MAC of slave %s\n",
+ pr_err("%s: Error %d setting MAC of slave %s\n",
bond->dev->name, -rv, new_active->dev->name);
out:
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
break;
default:
- pr_err(DRV_NAME
- ": %s: bond_do_fail_over_mac impossible: bad policy %d\n",
+ pr_err("%s: bond_do_fail_over_mac impossible: bad policy %d\n",
bond->dev->name, bond->params.fail_over_mac);
break;
}
@@ -1178,11 +1175,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (new_active->link == BOND_LINK_BACK) {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info(DRV_NAME
- ": %s: making interface %s the new "
- "active one %d ms earlier.\n",
- bond->dev->name, new_active->dev->name,
- (bond->params.updelay - new_active->delay) * bond->params.miimon);
+ pr_info("%s: making interface %s the new active one %d ms earlier.\n",
+ bond->dev->name, new_active->dev->name,
+ (bond->params.updelay - new_active->delay) * bond->params.miimon);
}
new_active->delay = 0;
@@ -1195,10 +1190,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info(DRV_NAME
- ": %s: making interface %s the new "
- "active one.\n",
- bond->dev->name, new_active->dev->name);
+ pr_info("%s: making interface %s the new active one.\n",
+ bond->dev->name, new_active->dev->name);
}
}
}
@@ -1268,13 +1261,11 @@ void bond_select_active_slave(struct bonding *bond)
return;
if (netif_carrier_ok(bond->dev)) {
- pr_info(DRV_NAME
- ": %s: first active interface up!\n",
- bond->dev->name);
+ pr_info("%s: first active interface up!\n",
+ bond->dev->name);
} else {
- pr_info(DRV_NAME ": %s: "
- "now running without any active interface !\n",
- bond->dev->name);
+ pr_info("%s: now running without any active interface !\n",
+ bond->dev->name);
}
}
}
@@ -1423,16 +1414,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- pr_warning(DRV_NAME
- ": %s: Warning: no link monitoring support for %s\n",
- bond_dev->name, slave_dev->name);
+ pr_warning("%s: Warning: no link monitoring support for %s\n",
+ bond_dev->name, slave_dev->name);
}
/* bond must be initialized by bond_open() before enslaving */
if (!(bond_dev->flags & IFF_UP)) {
- pr_warning(DRV_NAME
- " %s: master_dev is not up in bond_enslave\n",
- bond_dev->name);
+ pr_warning("%s: master_dev is not up in bond_enslave\n",
+ bond_dev->name);
}
/* already enslaved */
@@ -1446,19 +1435,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
if (!list_empty(&bond->vlan_list)) {
- pr_err(DRV_NAME
- ": %s: Error: cannot enslave VLAN "
- "challenged slave %s on VLAN enabled "
- "bond %s\n", bond_dev->name, slave_dev->name,
- bond_dev->name);
+ pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
+ bond_dev->name, slave_dev->name, bond_dev->name);
return -EPERM;
} else {
- pr_warning(DRV_NAME
- ": %s: Warning: enslaved VLAN challenged "
- "slave %s. Adding VLANs will be blocked as "
- "long as %s is part of bond %s\n",
- bond_dev->name, slave_dev->name, slave_dev->name,
- bond_dev->name);
+ pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+ bond_dev->name, slave_dev->name,
+ slave_dev->name, bond_dev->name);
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
}
} else {
@@ -1478,8 +1461,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* enslaving it; the old ifenslave will not.
*/
if ((slave_dev->flags & IFF_UP)) {
- pr_err(DRV_NAME ": %s is up. "
- "This may be due to an out of date ifenslave.\n",
+ pr_err("%s is up. This may be due to an out of date ifenslave.\n",
slave_dev->name);
res = -EPERM;
goto err_undo_flags;
@@ -1495,7 +1477,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond->slave_cnt == 0) {
if (bond_dev->type != slave_dev->type) {
pr_debug("%s: change device type from %d to %d\n",
- bond_dev->name, bond_dev->type, slave_dev->type);
+ bond_dev->name,
+ bond_dev->type, slave_dev->type);
netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
@@ -1507,28 +1490,21 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
}
} else if (bond_dev->type != slave_dev->type) {
- pr_err(DRV_NAME ": %s ether type (%d) is different "
- "from other slaves (%d), can not enslave it.\n",
- slave_dev->name,
- slave_dev->type, bond_dev->type);
- res = -EINVAL;
- goto err_undo_flags;
+ pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
+ slave_dev->name,
+ slave_dev->type, bond_dev->type);
+ res = -EINVAL;
+ goto err_undo_flags;
}
if (slave_ops->ndo_set_mac_address == NULL) {
if (bond->slave_cnt == 0) {
- pr_warning(DRV_NAME
- ": %s: Warning: The first slave device "
- "specified does not support setting the MAC "
- "address. Setting fail_over_mac to active.",
- bond_dev->name);
+ pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
+ bond_dev->name);
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
- pr_err(DRV_NAME
- ": %s: Error: The slave device specified "
- "does not support setting the MAC address, "
- "but fail_over_mac is not set to active.\n"
- , bond_dev->name);
+ pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
+ bond_dev->name);
res = -EOPNOTSUPP;
goto err_undo_flags;
}
@@ -1655,22 +1631,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- pr_warning(DRV_NAME
- ": %s: Warning: MII and ETHTOOL support not "
- "available for interface %s, and "
- "arp_interval/arp_ip_target module parameters "
- "not specified, thus bonding will not detect "
- "link failures! see bonding.txt for details.\n",
+ pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
bond_dev->name, slave_dev->name);
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- pr_warning(DRV_NAME
- ": %s: Warning: can't get link status from "
- "interface %s; the network driver associated "
- "with this interface does not support MII or "
- "ETHTOOL link status reporting, thus miimon "
- "has no effect on this interface.\n",
- bond_dev->name, slave_dev->name);
+ pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
+ bond_dev->name, slave_dev->name);
}
}
@@ -1678,34 +1644,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.miimon ||
(bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
if (bond->params.updelay) {
- pr_debug("Initial state of slave_dev is "
- "BOND_LINK_BACK\n");
+ pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n");
new_slave->link = BOND_LINK_BACK;
new_slave->delay = bond->params.updelay;
} else {
- pr_debug("Initial state of slave_dev is "
- "BOND_LINK_UP\n");
+ pr_debug("Initial state of slave_dev is BOND_LINK_UP\n");
new_slave->link = BOND_LINK_UP;
}
new_slave->jiffies = jiffies;
} else {
- pr_debug("Initial state of slave_dev is "
- "BOND_LINK_DOWN\n");
+ pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n");
new_slave->link = BOND_LINK_DOWN;
}
if (bond_update_speed_duplex(new_slave) &&
(new_slave->link != BOND_LINK_DOWN)) {
- pr_warning(DRV_NAME
- ": %s: Warning: failed to get speed and duplex from %s, "
- "assumed to be 100Mb/sec and Full.\n",
- bond_dev->name, new_slave->dev->name);
+ pr_warning("%s: Warning: failed to get speed and duplex from %s, assumed to be 100Mb/sec and Full.\n",
+ bond_dev->name, new_slave->dev->name);
if (bond->params.mode == BOND_MODE_8023AD) {
- pr_warning(DRV_NAME
- ": %s: Warning: Operation of 802.3ad mode requires ETHTOOL "
- "support in base driver for proper aggregator "
- "selection.\n", bond_dev->name);
+ pr_warning("%s: Warning: Operation of 802.3ad mode requires ETHTOOL support in base driver for proper aggregator selection.\n",
+ bond_dev->name);
}
}
@@ -1777,11 +1736,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (res)
goto err_close;
- pr_info(DRV_NAME
- ": %s: enslaving %s as a%s interface with a%s link.\n",
- bond_dev->name, slave_dev->name,
- new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
- new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
+ pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
+ bond_dev->name, slave_dev->name,
+ new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup",
+ new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
/* enslave is successful */
return 0;
@@ -1833,8 +1791,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
(slave_dev->master != bond_dev)) {
- pr_err(DRV_NAME
- ": %s: Error: cannot release %s.\n",
+ pr_err("%s: Error: cannot release %s.\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
}
@@ -1844,9 +1801,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
- pr_info(DRV_NAME
- ": %s: %s not enslaved\n",
- bond_dev->name, slave_dev->name);
+ pr_info("%s: %s not enslaved\n",
+ bond_dev->name, slave_dev->name);
write_unlock_bh(&bond->lock);
return -EINVAL;
}
@@ -1854,14 +1810,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.fail_over_mac) {
if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond->slave_cnt > 1)
- pr_warning(DRV_NAME
- ": %s: Warning: the permanent HWaddr of %s - "
- "%pM - is still in use by %s. "
- "Set the HWaddr of %s to a different address "
- "to avoid conflicts.\n",
- bond_dev->name, slave_dev->name,
- slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
+ pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
+ bond_dev->name, slave_dev->name,
+ slave->perm_hwaddr,
+ bond_dev->name, slave_dev->name);
}
/* Inform AD package of unbinding of slave. */
@@ -1872,12 +1824,10 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
bond_3ad_unbind_slave(slave);
}
- pr_info(DRV_NAME
- ": %s: releasing %s interface %s\n",
- bond_dev->name,
- (slave->state == BOND_STATE_ACTIVE)
- ? "active" : "backup",
- slave_dev->name);
+ pr_info("%s: releasing %s interface %s\n",
+ bond_dev->name,
+ (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup",
+ slave_dev->name);
oldcurrent = bond->curr_active_slave;
@@ -1934,21 +1884,15 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
if (list_empty(&bond->vlan_list)) {
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
} else {
- pr_warning(DRV_NAME
- ": %s: Warning: clearing HW address of %s while it "
- "still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning(DRV_NAME
- ": %s: When re-adding slaves, make sure the bond's "
- "HW address matches its VLANs'.\n",
- bond_dev->name);
+ pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
+ bond_dev->name, bond_dev->name);
+ pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
+ bond_dev->name);
}
} else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
!bond_has_challenged_slaves(bond)) {
- pr_info(DRV_NAME
- ": %s: last VLAN challenged slave %s "
- "left bond %s. VLAN blocking is removed\n",
- bond_dev->name, slave_dev->name, bond_dev->name);
+ pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ bond_dev->name, slave_dev->name, bond_dev->name);
bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
}
@@ -2011,8 +1955,8 @@ int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if ((ret == 0) && (bond->slave_cnt == 0)) {
- pr_info(DRV_NAME ": %s: destroying bond %s.\n",
- bond_dev->name, bond_dev->name);
+ pr_info("%s: destroying bond %s.\n",
+ bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
}
return ret;
@@ -2116,19 +2060,13 @@ static int bond_release_all(struct net_device *bond_dev)
if (list_empty(&bond->vlan_list))
bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
else {
- pr_warning(DRV_NAME
- ": %s: Warning: clearing HW address of %s while it "
- "still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning(DRV_NAME
- ": %s: When re-adding slaves, make sure the bond's "
- "HW address matches its VLANs'.\n",
- bond_dev->name);
+ pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
+ bond_dev->name, bond_dev->name);
+ pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
+ bond_dev->name);
}
- pr_info(DRV_NAME
- ": %s: released all slaves\n",
- bond_dev->name);
+ pr_info("%s: released all slaves\n", bond_dev->name);
out:
write_unlock_bh(&bond->lock);
@@ -2254,16 +2192,14 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->link = BOND_LINK_FAIL;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- pr_info(DRV_NAME
- ": %s: link status down for %s"
- "interface %s, disabling it in %d ms.\n",
- bond->dev->name,
- (bond->params.mode ==
- BOND_MODE_ACTIVEBACKUP) ?
- ((slave->state == BOND_STATE_ACTIVE) ?
- "active " : "backup ") : "",
- slave->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
+ bond->dev->name,
+ (bond->params.mode ==
+ BOND_MODE_ACTIVEBACKUP) ?
+ ((slave->state == BOND_STATE_ACTIVE) ?
+ "active " : "backup ") : "",
+ slave->dev->name,
+ bond->params.downdelay * bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_FAIL:
@@ -2273,13 +2209,11 @@ static int bond_miimon_inspect(struct bonding *bond)
*/
slave->link = BOND_LINK_UP;
slave->jiffies = jiffies;
- pr_info(DRV_NAME
- ": %s: link status up again after %d "
- "ms for interface %s.\n",
- bond->dev->name,
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon,
- slave->dev->name);
+ pr_info("%s: link status up again after %d ms for interface %s.\n",
+ bond->dev->name,
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon,
+ slave->dev->name);
continue;
}
@@ -2300,25 +2234,21 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = bond->params.updelay;
if (slave->delay) {
- pr_info(DRV_NAME
- ": %s: link status up for "
- "interface %s, enabling it in %d ms.\n",
- bond->dev->name, slave->dev->name,
- ignore_updelay ? 0 :
- bond->params.updelay *
- bond->params.miimon);
+ pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
+ bond->dev->name, slave->dev->name,
+ ignore_updelay ? 0 :
+ bond->params.updelay *
+ bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_BACK:
if (!link_state) {
slave->link = BOND_LINK_DOWN;
- pr_info(DRV_NAME
- ": %s: link status down again after %d "
- "ms for interface %s.\n",
- bond->dev->name,
- (bond->params.updelay - slave->delay) *
- bond->params.miimon,
- slave->dev->name);
+ pr_info("%s: link status down again after %d ms for interface %s.\n",
+ bond->dev->name,
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon,
+ slave->dev->name);
continue;
}
@@ -2366,10 +2296,8 @@ static void bond_miimon_commit(struct bonding *bond)
slave->state = BOND_STATE_BACKUP;
}
- pr_info(DRV_NAME
- ": %s: link status definitely "
- "up for interface %s.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: link status definitely up for interface %s.\n",
+ bond->dev->name, slave->dev->name);
/* notify ad that the link status has changed */
if (bond->params.mode == BOND_MODE_8023AD)
@@ -2395,10 +2323,8 @@ static void bond_miimon_commit(struct bonding *bond)
bond->params.mode == BOND_MODE_8023AD)
bond_set_slave_inactive_flags(slave);
- pr_info(DRV_NAME
- ": %s: link status definitely down for "
- "interface %s, disabling it\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: link status definitely down for interface %s, disabling it\n",
+ bond->dev->name, slave->dev->name);
if (bond->params.mode == BOND_MODE_8023AD)
bond_3ad_handle_link_change(slave,
@@ -2414,8 +2340,7 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
default:
- pr_err(DRV_NAME
- ": %s: invalid new link %d on slave %s\n",
+ pr_err("%s: invalid new link %d on slave %s\n",
bond->dev->name, slave->new_link,
slave->dev->name);
slave->new_link = BOND_LINK_NOCHANGE;
@@ -2534,19 +2459,19 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
struct sk_buff *skb;
pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
- slave_dev->name, dest_ip, src_ip, vlan_id);
+ slave_dev->name, dest_ip, src_ip, vlan_id);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
if (!skb) {
- pr_err(DRV_NAME ": ARP packet allocation failed\n");
+ pr_err("ARP packet allocation failed\n");
return;
}
if (vlan_id) {
skb = vlan_put_tag(skb, vlan_id);
if (!skb) {
- pr_err(DRV_NAME ": failed to insert VLAN tag\n");
+ pr_err("failed to insert VLAN tag\n");
return;
}
}
@@ -2586,9 +2511,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
if (rv) {
if (net_ratelimit()) {
- pr_warning(DRV_NAME
- ": %s: no route to arp_ip_target %pI4\n",
- bond->dev->name, &fl.fl4_dst);
+ pr_warning("%s: no route to arp_ip_target %pI4\n",
+ bond->dev->name, &fl.fl4_dst);
}
continue;
}
@@ -2623,10 +2547,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
}
if (net_ratelimit()) {
- pr_warning(DRV_NAME
- ": %s: no path to arp_ip_target %pI4 via rt.dev %s\n",
- bond->dev->name, &fl.fl4_dst,
- rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
+ pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
+ bond->dev->name, &fl.fl4_dst,
+ rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
}
ip_rt_put(rt);
}
@@ -2644,8 +2567,8 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
struct vlan_entry *vlan;
struct net_device *vlan_dev;
- pr_debug("bond_send_grat_arp: bond %s slave %s\n", bond->dev->name,
- slave ? slave->dev->name : "NULL");
+ pr_debug("bond_send_grat_arp: bond %s slave %s\n",
+ bond->dev->name, slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_grat_arp ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
@@ -2674,7 +2597,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n",
- &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip));
+ &sip, &tip, i, &targets[i],
+ bond_has_this_ip(bond, tip));
if (sip == targets[i]) {
if (bond_has_this_ip(bond, tip))
slave->last_arp_rx = jiffies;
@@ -2698,8 +2622,8 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
read_lock(&bond->lock);
pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n",
- bond->dev->name, skb->dev ? skb->dev->name : "NULL",
- orig_dev ? orig_dev->name : "NULL");
+ bond->dev->name, skb->dev ? skb->dev->name : "NULL",
+ orig_dev ? orig_dev->name : "NULL");
slave = bond_get_slave_by_dev(bond, orig_dev);
if (!slave || !slave_do_arp_validate(bond, slave))
@@ -2724,9 +2648,9 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
memcpy(&tip, arp_ptr, 4);
pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
- bond->dev->name, slave->dev->name, slave->state,
- bond->params.arp_validate, slave_do_arp_validate(bond, slave),
- &sip, &tip);
+ bond->dev->name, slave->dev->name, slave->state,
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ &sip, &tip);
/*
* Backup slaves won't see the ARP reply, but do come through
@@ -2800,17 +2724,14 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* is closed.
*/
if (!oldcurrent) {
- pr_info(DRV_NAME
- ": %s: link status definitely "
- "up for interface %s, ",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: link status definitely up for interface %s, ",
+ bond->dev->name,
+ slave->dev->name);
do_failover = 1;
} else {
- pr_info(DRV_NAME
- ": %s: interface %s is now up\n",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: interface %s is now up\n",
+ bond->dev->name,
+ slave->dev->name);
}
}
} else {
@@ -2829,10 +2750,9 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- pr_info(DRV_NAME
- ": %s: interface %s is now down.\n",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: interface %s is now down.\n",
+ bond->dev->name,
+ slave->dev->name);
if (slave == oldcurrent)
do_failover = 1;
@@ -2965,9 +2885,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
slave->link = BOND_LINK_UP;
bond->current_arp_slave = NULL;
- pr_info(DRV_NAME
- ": %s: link status definitely "
- "up for interface %s.\n",
+ pr_info("%s: link status definitely up for interface %s.\n",
bond->dev->name, slave->dev->name);
if (!bond->curr_active_slave ||
@@ -2985,9 +2903,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
slave->link = BOND_LINK_DOWN;
bond_set_slave_inactive_flags(slave);
- pr_info(DRV_NAME
- ": %s: link status definitely down for "
- "interface %s, disabling it\n",
+ pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
if (slave == bond->curr_active_slave) {
@@ -2998,8 +2914,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
continue;
default:
- pr_err(DRV_NAME
- ": %s: impossible: new_link %d on slave %s\n",
+ pr_err("%s: impossible: new_link %d on slave %s\n",
bond->dev->name, slave->new_link,
slave->dev->name);
continue;
@@ -3028,9 +2943,9 @@ static void bond_ab_arp_probe(struct bonding *bond)
read_lock(&bond->curr_slave_lock);
if (bond->current_arp_slave && bond->curr_active_slave)
- pr_info(DRV_NAME "PROBE: c_arp %s && cas %s BAD\n",
- bond->current_arp_slave->dev->name,
- bond->curr_active_slave->dev->name);
+ pr_info("PROBE: c_arp %s && cas %s BAD\n",
+ bond->current_arp_slave->dev->name,
+ bond->curr_active_slave->dev->name);
if (bond->curr_active_slave) {
bond_arp_send_all(bond, bond->curr_active_slave);
@@ -3078,9 +2993,8 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(slave);
- pr_info(DRV_NAME
- ": %s: backup interface %s is now down.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: backup interface %s is now down.\n",
+ bond->dev->name, slave->dev->name);
}
}
}
@@ -3360,9 +3274,8 @@ static void bond_create_proc_entry(struct bonding *bond)
S_IRUGO, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
- pr_warning(DRV_NAME
- ": Warning: Cannot create /proc/net/%s/%s\n",
- DRV_NAME, bond_dev->name);
+ pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
+ DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
@@ -3388,9 +3301,8 @@ static void bond_create_proc_dir(struct bond_net *bn)
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
- pr_warning(DRV_NAME
- ": Warning: cannot create /proc/net/%s\n",
- DRV_NAME);
+ pr_warning("Warning: cannot create /proc/net/%s\n",
+ DRV_NAME);
}
}
@@ -3539,8 +3451,8 @@ static int bond_netdev_event(struct notifier_block *this,
struct net_device *event_dev = (struct net_device *)ptr;
pr_debug("event_dev: %s, event: %lx\n",
- (event_dev ? event_dev->name : "None"),
- event);
+ event_dev ? event_dev->name : "None",
+ event);
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
@@ -3875,8 +3787,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
struct mii_ioctl_data *mii = NULL;
int res = 0;
- pr_debug("bond_ioctl: master=%s, cmd=%d\n",
- bond_dev->name, cmd);
+ pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
switch (cmd) {
case SIOCGMIIPHY:
@@ -3945,12 +3856,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
- pr_debug("slave_dev=%p: \n", slave_dev);
+ pr_debug("slave_dev=%p:\n", slave_dev);
if (!slave_dev)
res = -ENODEV;
else {
- pr_debug("slave_dev->name=%s: \n", slave_dev->name);
+ pr_debug("slave_dev->name=%s:\n", slave_dev->name);
switch (cmd) {
case BOND_ENSLAVE_OLD:
case SIOCBONDENSLAVE:
@@ -4059,7 +3970,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
int i;
pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
- (bond_dev ? bond_dev->name : "None"), new_mtu);
+ (bond_dev ? bond_dev->name : "None"), new_mtu);
/* Can't hold bond->lock with bh disabled here since
* some base drivers panic. On the other hand we can't
@@ -4077,8 +3988,10 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
*/
bond_for_each_slave(bond, slave, i) {
- pr_debug("s %p s->p %p c_m %p\n", slave,
- slave->prev, slave->dev->netdev_ops->ndo_change_mtu);
+ pr_debug("s %p s->p %p c_m %p\n",
+ slave,
+ slave->prev,
+ slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -4108,8 +4021,8 @@ unwind:
tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
if (tmp_res) {
- pr_debug("unwind err %d dev %s\n", tmp_res,
- slave->dev->name);
+ pr_debug("unwind err %d dev %s\n",
+ tmp_res, slave->dev->name);
}
}
@@ -4135,7 +4048,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
return bond_alb_set_mac_address(bond_dev, addr);
- pr_debug("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None"));
+ pr_debug("bond=%p, name=%s\n",
+ bond, bond_dev ? bond_dev->name : "None");
/*
* If fail_over_mac is set to active, do nothing and return
@@ -4200,8 +4114,8 @@ unwind:
tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
if (tmp_res) {
- pr_debug("unwind err %d dev %s\n", tmp_res,
- slave->dev->name);
+ pr_debug("unwind err %d dev %s\n",
+ tmp_res, slave->dev->name);
}
}
@@ -4357,9 +4271,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
if (tx_dev) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
- pr_err(DRV_NAME
- ": %s: Error: bond_xmit_broadcast(): "
- "skb_clone() failed\n",
+ pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
bond_dev->name);
continue;
}
@@ -4425,8 +4337,8 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
return bond_alb_xmit(skb, dev);
default:
/* Should never happen, mode already checked */
- pr_err(DRV_NAME ": %s: Error: Unknown bonding mode %d\n",
- dev->name, bond->params.mode);
+ pr_err("%s: Error: Unknown bonding mode %d\n",
+ dev->name, bond->params.mode);
WARN_ON_ONCE(1);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -4462,10 +4374,8 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
break;
default:
/* Should never happen, mode already checked */
- pr_err(DRV_NAME
- ": %s: Error: Unknown bonding mode %d\n",
- bond_dev->name,
- mode);
+ pr_err("%s: Error: Unknown bonding mode %d\n",
+ bond_dev->name, mode);
break;
}
}
@@ -4650,8 +4560,7 @@ static int bond_check_params(struct bond_params *params)
if (mode) {
bond_mode = bond_parse_parm(mode, bond_mode_tbl);
if (bond_mode == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid bonding mode \"%s\"\n",
+ pr_err("Error: Invalid bonding mode \"%s\"\n",
mode == NULL ? "NULL" : mode);
return -EINVAL;
}
@@ -4660,16 +4569,13 @@ static int bond_check_params(struct bond_params *params)
if (xmit_hash_policy) {
if ((bond_mode != BOND_MODE_XOR) &&
(bond_mode != BOND_MODE_8023AD)) {
- pr_info(DRV_NAME
- ": xmit_hash_policy param is irrelevant in"
- " mode %s\n",
+ pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
bond_mode_name(bond_mode));
} else {
xmit_hashtype = bond_parse_parm(xmit_hash_policy,
xmit_hashtype_tbl);
if (xmit_hashtype == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid xmit_hash_policy \"%s\"\n",
+ pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
xmit_hash_policy == NULL ? "NULL" :
xmit_hash_policy);
return -EINVAL;
@@ -4679,14 +4585,12 @@ static int bond_check_params(struct bond_params *params)
if (lacp_rate) {
if (bond_mode != BOND_MODE_8023AD) {
- pr_info(DRV_NAME
- ": lacp_rate param is irrelevant in mode %s\n",
- bond_mode_name(bond_mode));
+ pr_info("lacp_rate param is irrelevant in mode %s\n",
+ bond_mode_name(bond_mode));
} else {
lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
if (lacp_fast == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid lacp rate \"%s\"\n",
+ pr_err("Error: Invalid lacp rate \"%s\"\n",
lacp_rate == NULL ? "NULL" : lacp_rate);
return -EINVAL;
}
@@ -4696,82 +4600,64 @@ static int bond_check_params(struct bond_params *params)
if (ad_select) {
params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
if (params->ad_select == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid ad_select \"%s\"\n",
+ pr_err("Error: Invalid ad_select \"%s\"\n",
ad_select == NULL ? "NULL" : ad_select);
return -EINVAL;
}
if (bond_mode != BOND_MODE_8023AD) {
- pr_warning(DRV_NAME
- ": ad_select param only affects 802.3ad mode\n");
+ pr_warning("ad_select param only affects 802.3ad mode\n");
}
} else {
params->ad_select = BOND_AD_STABLE;
}
if (max_bonds < 0) {
- pr_warning(DRV_NAME
- ": Warning: max_bonds (%d) not in range %d-%d, so it "
- "was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
- max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
+ pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
+ max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
if (miimon < 0) {
- pr_warning(DRV_NAME
- ": Warning: miimon module parameter (%d), "
- "not in range 0-%d, so it was reset to %d\n",
- miimon, INT_MAX, BOND_LINK_MON_INTERV);
+ pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to %d\n",
+ miimon, INT_MAX, BOND_LINK_MON_INTERV);
miimon = BOND_LINK_MON_INTERV;
}
if (updelay < 0) {
- pr_warning(DRV_NAME
- ": Warning: updelay module parameter (%d), "
- "not in range 0-%d, so it was reset to 0\n",
- updelay, INT_MAX);
+ pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
- pr_warning(DRV_NAME
- ": Warning: downdelay module parameter (%d), "
- "not in range 0-%d, so it was reset to 0\n",
- downdelay, INT_MAX);
+ pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ downdelay, INT_MAX);
downdelay = 0;
}
if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warning(DRV_NAME
- ": Warning: use_carrier module parameter (%d), "
- "not of valid value (0/1), so it was set to 1\n",
- use_carrier);
+ pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
+ use_carrier);
use_carrier = 1;
}
if (num_grat_arp < 0 || num_grat_arp > 255) {
- pr_warning(DRV_NAME
- ": Warning: num_grat_arp (%d) not in range 0-255 so it "
- "was reset to 1 \n", num_grat_arp);
+ pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n",
+ num_grat_arp);
num_grat_arp = 1;
}
if (num_unsol_na < 0 || num_unsol_na > 255) {
- pr_warning(DRV_NAME
- ": Warning: num_unsol_na (%d) not in range 0-255 so it "
- "was reset to 1 \n", num_unsol_na);
+ pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n",
+ num_unsol_na);
num_unsol_na = 1;
}
/* reset values for 802.3ad */
if (bond_mode == BOND_MODE_8023AD) {
if (!miimon) {
- pr_warning(DRV_NAME
- ": Warning: miimon must be specified, "
- "otherwise bonding will not detect link "
- "failure, speed and duplex which are "
- "essential for 802.3ad operation\n");
+ pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
pr_warning("Forcing miimon to 100msec\n");
miimon = 100;
}
@@ -4781,24 +4667,15 @@ static int bond_check_params(struct bond_params *params)
if ((bond_mode == BOND_MODE_TLB) ||
(bond_mode == BOND_MODE_ALB)) {
if (!miimon) {
- pr_warning(DRV_NAME
- ": Warning: miimon must be specified, "
- "otherwise bonding will not detect link "
- "failure and link speed which are essential "
- "for TLB/ALB load balancing\n");
+ pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
pr_warning("Forcing miimon to 100msec\n");
miimon = 100;
}
}
if (bond_mode == BOND_MODE_ALB) {
- pr_notice(DRV_NAME
- ": In ALB mode you might experience client "
- "disconnections upon reconnection of a link if the "
- "bonding module updelay parameter (%d msec) is "
- "incompatible with the forwarding delay time of the "
- "switch\n",
- updelay);
+ pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
+ updelay);
}
if (!miimon) {
@@ -4806,49 +4683,37 @@ static int bond_check_params(struct bond_params *params)
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
- pr_warning(DRV_NAME
- ": Warning: miimon module parameter not set "
- "and updelay (%d) or downdelay (%d) module "
- "parameter is set; updelay and downdelay have "
- "no effect unless miimon is set\n",
- updelay, downdelay);
+ pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
+ updelay, downdelay);
}
} else {
/* don't allow arp monitoring */
if (arp_interval) {
- pr_warning(DRV_NAME
- ": Warning: miimon (%d) and arp_interval (%d) "
- "can't be used simultaneously, disabling ARP "
- "monitoring\n",
- miimon, arp_interval);
+ pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
+ miimon, arp_interval);
arp_interval = 0;
}
if ((updelay % miimon) != 0) {
- pr_warning(DRV_NAME
- ": Warning: updelay (%d) is not a multiple "
- "of miimon (%d), updelay rounded to %d ms\n",
- updelay, miimon, (updelay / miimon) * miimon);
+ pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ updelay, miimon,
+ (updelay / miimon) * miimon);
}
updelay /= miimon;
if ((downdelay % miimon) != 0) {
- pr_warning(DRV_NAME
- ": Warning: downdelay (%d) is not a multiple "
- "of miimon (%d), downdelay rounded to %d ms\n",
- downdelay, miimon,
- (downdelay / miimon) * miimon);
+ pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
+ downdelay, miimon,
+ (downdelay / miimon) * miimon);
}
downdelay /= miimon;
}
if (arp_interval < 0) {
- pr_warning(DRV_NAME
- ": Warning: arp_interval module parameter (%d) "
- ", not in range 0-%d, so it was reset to %d\n",
- arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
+ pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to %d\n",
+ arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
arp_interval = BOND_LINK_ARP_INTERV;
}
@@ -4858,10 +4723,8 @@ static int bond_check_params(struct bond_params *params)
/* not complete check, but should be good enough to
catch mistakes */
if (!isdigit(arp_ip_target[arp_ip_count][0])) {
- pr_warning(DRV_NAME
- ": Warning: bad arp_ip_target module parameter "
- "(%s), ARP monitoring will not be performed\n",
- arp_ip_target[arp_ip_count]);
+ pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
+ arp_ip_target[arp_ip_count]);
arp_interval = 0;
} else {
__be32 ip = in_aton(arp_ip_target[arp_ip_count]);
@@ -4871,31 +4734,25 @@ static int bond_check_params(struct bond_params *params)
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
- pr_warning(DRV_NAME
- ": Warning: arp_interval module parameter (%d) "
- "specified without providing an arp_ip_target "
- "parameter, arp_interval was reset to 0\n",
- arp_interval);
+ pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
+ arp_interval);
arp_interval = 0;
}
if (arp_validate) {
if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err(DRV_NAME
- ": arp_validate only supported in active-backup mode\n");
+ pr_err("arp_validate only supported in active-backup mode\n");
return -EINVAL;
}
if (!arp_interval) {
- pr_err(DRV_NAME
- ": arp_validate requires arp_interval\n");
+ pr_err("arp_validate requires arp_interval\n");
return -EINVAL;
}
arp_validate_value = bond_parse_parm(arp_validate,
arp_validate_tbl);
if (arp_validate_value == -1) {
- pr_err(DRV_NAME
- ": Error: invalid arp_validate \"%s\"\n",
+ pr_err("Error: invalid arp_validate \"%s\"\n",
arp_validate == NULL ? "NULL" : arp_validate);
return -EINVAL;
}
@@ -4903,17 +4760,14 @@ static int bond_check_params(struct bond_params *params)
arp_validate_value = 0;
if (miimon) {
- pr_info(DRV_NAME
- ": MII link monitoring set to %d ms\n",
- miimon);
+ pr_info("MII link monitoring set to %d ms\n", miimon);
} else if (arp_interval) {
int i;
- pr_info(DRV_NAME ": ARP monitoring set to %d ms,"
- " validate %s, with %d target(s):",
- arp_interval,
- arp_validate_tbl[arp_validate_value].modename,
- arp_ip_count);
+ pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
+ arp_interval,
+ arp_validate_tbl[arp_validate_value].modename,
+ arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
pr_info(" %s", arp_ip_target[i]);
@@ -4924,21 +4778,15 @@ static int bond_check_params(struct bond_params *params)
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_warning(DRV_NAME
- ": Warning: either miimon or arp_interval and "
- "arp_ip_target module parameters must be specified, "
- "otherwise bonding will not detect link failures! see "
- "bonding.txt for details.\n");
+ pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
- pr_warning(DRV_NAME
- ": Warning: %s primary device specified but has no "
- "effect in %s mode\n",
- primary, bond_mode_name(bond_mode));
+ pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
+ primary, bond_mode_name(bond_mode));
primary = NULL;
}
@@ -4946,8 +4794,7 @@ static int bond_check_params(struct bond_params *params)
primary_reselect_value = bond_parse_parm(primary_reselect,
pri_reselect_tbl);
if (primary_reselect_value == -1) {
- pr_err(DRV_NAME
- ": Error: Invalid primary_reselect \"%s\"\n",
+ pr_err("Error: Invalid primary_reselect \"%s\"\n",
primary_reselect ==
NULL ? "NULL" : primary_reselect);
return -EINVAL;
@@ -4960,16 +4807,13 @@ static int bond_check_params(struct bond_params *params)
fail_over_mac_value = bond_parse_parm(fail_over_mac,
fail_over_mac_tbl);
if (fail_over_mac_value == -1) {
- pr_err(DRV_NAME
- ": Error: invalid fail_over_mac \"%s\"\n",
+ pr_err("Error: invalid fail_over_mac \"%s\"\n",
arp_validate == NULL ? "NULL" : arp_validate);
return -EINVAL;
}
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
- pr_warning(DRV_NAME
- ": Warning: fail_over_mac only affects "
- "active-backup mode.\n");
+ pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
@@ -5076,8 +4920,7 @@ int bond_create(struct net *net, const char *name)
bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
bond_setup);
if (!bond_dev) {
- pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n",
- name);
+ pr_err("%s: eek! can't alloc netdev!\n", name);
res = -ENOMEM;
goto out;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 4e00b4f8364..5acd557cea9 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -19,6 +19,9 @@
* file called LICENSE.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -109,11 +112,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
goto err_no_cmd;
if (command[0] == '+') {
- pr_info(DRV_NAME
- ": %s is being created...\n", ifname);
+ pr_info("%s is being created...\n", ifname);
rv = bond_create(net, ifname);
if (rv) {
- pr_info(DRV_NAME ": Bond creation failed.\n");
+ pr_info("Bond creation failed.\n");
res = rv;
}
} else if (command[0] == '-') {
@@ -122,12 +124,10 @@ static ssize_t bonding_store_bonds(struct class *cls,
rtnl_lock();
bond_dev = bond_get_by_name(net, ifname);
if (bond_dev) {
- pr_info(DRV_NAME ": %s is being deleted...\n",
- ifname);
+ pr_info("%s is being deleted...\n", ifname);
unregister_netdevice(bond_dev);
} else {
- pr_err(DRV_NAME ": unable to delete non-existent %s\n",
- ifname);
+ pr_err("unable to delete non-existent %s\n", ifname);
res = -ENODEV;
}
rtnl_unlock();
@@ -140,8 +140,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
return res;
err_no_cmd:
- pr_err(DRV_NAME ": no command found in bonding_masters."
- " Use +ifname or -ifname.\n");
+ pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n");
return -EPERM;
}
@@ -225,8 +224,8 @@ static ssize_t bonding_store_slaves(struct device *d,
/* Quick sanity check -- is the bond interface up? */
if (!(bond->dev->flags & IFF_UP)) {
- pr_warning(DRV_NAME ": %s: doing slave updates when "
- "interface is down.\n", bond->dev->name);
+ pr_warning("%s: doing slave updates when interface is down.\n",
+ bond->dev->name);
}
/* Note: We can't hold bond->lock here, as bond_create grabs it. */
@@ -247,17 +246,14 @@ static ssize_t bonding_store_slaves(struct device *d,
dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!dev) {
- pr_info(DRV_NAME
- ": %s: Interface %s does not exist!\n",
- bond->dev->name, ifname);
+ pr_info("%s: Interface %s does not exist!\n",
+ bond->dev->name, ifname);
ret = -ENODEV;
goto out;
}
if (dev->flags & IFF_UP) {
- pr_err(DRV_NAME
- ": %s: Error: Unable to enslave %s "
- "because it is already up.\n",
+ pr_err("%s: Error: Unable to enslave %s because it is already up.\n",
bond->dev->name, dev->name);
ret = -EPERM;
goto out;
@@ -266,8 +262,7 @@ static ssize_t bonding_store_slaves(struct device *d,
read_lock(&bond->lock);
bond_for_each_slave(bond, slave, i)
if (slave->dev == dev) {
- pr_err(DRV_NAME
- ": %s: Interface %s is already enslaved!\n",
+ pr_err("%s: Interface %s is already enslaved!\n",
bond->dev->name, ifname);
ret = -EPERM;
read_unlock(&bond->lock);
@@ -275,8 +270,7 @@ static ssize_t bonding_store_slaves(struct device *d,
}
read_unlock(&bond->lock);
- pr_info(DRV_NAME ": %s: Adding slave %s.\n",
- bond->dev->name, ifname);
+ pr_info("%s: Adding slave %s.\n", bond->dev->name, ifname);
/* If this is the first slave, then we need to set
the master's hardware address to be the same as the
@@ -313,7 +307,7 @@ static ssize_t bonding_store_slaves(struct device *d,
break;
}
if (dev) {
- pr_info(DRV_NAME ": %s: Removing slave %s\n",
+ pr_info("%s: Removing slave %s\n",
bond->dev->name, dev->name);
res = bond_release(bond->dev, dev);
if (res) {
@@ -323,16 +317,16 @@ static ssize_t bonding_store_slaves(struct device *d,
/* set the slave MTU to the default */
dev_set_mtu(dev, original_mtu);
} else {
- pr_err(DRV_NAME ": unable to remove non-existent"
- " slave %s for bond %s.\n",
- ifname, bond->dev->name);
+ pr_err("unable to remove non-existent slave %s for bond %s.\n",
+ ifname, bond->dev->name);
ret = -ENODEV;
}
goto out;
}
err_no_cmd:
- pr_err(DRV_NAME ": no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name);
+ pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+ bond->dev->name);
ret = -EPERM;
out:
@@ -365,18 +359,16 @@ static ssize_t bonding_store_mode(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- pr_err(DRV_NAME ": unable to update mode of %s"
- " because interface is up.\n", bond->dev->name);
+ pr_err("unable to update mode of %s because interface is up.\n",
+ bond->dev->name);
ret = -EPERM;
goto out;
}
new_value = bond_parse_parm(buf, bond_mode_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid mode value %.*s.\n",
- bond->dev->name,
- (int)strlen(buf) - 1, buf);
+ pr_err("%s: Ignoring invalid mode value %.*s.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
ret = -EINVAL;
goto out;
} else {
@@ -388,8 +380,8 @@ static ssize_t bonding_store_mode(struct device *d,
bond->params.mode = new_value;
bond_set_mode_ops(bond, bond->params.mode);
- pr_info(DRV_NAME ": %s: setting mode to %s (%d).\n",
- bond->dev->name, bond_mode_tbl[new_value].modename,
+ pr_info("%s: setting mode to %s (%d).\n",
+ bond->dev->name, bond_mode_tbl[new_value].modename,
new_value);
}
out:
@@ -421,8 +413,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- pr_err(DRV_NAME
- "%s: Interface is up. Unable to update xmit policy.\n",
+ pr_err("%s: Interface is up. Unable to update xmit policy.\n",
bond->dev->name);
ret = -EPERM;
goto out;
@@ -430,8 +421,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
new_value = bond_parse_parm(buf, xmit_hashtype_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid xmit hash policy value %.*s.\n",
+ pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n",
bond->dev->name,
(int)strlen(buf) - 1, buf);
ret = -EINVAL;
@@ -439,7 +429,7 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
} else {
bond->params.xmit_policy = new_value;
bond_set_mode_ops(bond, bond->params.mode);
- pr_info(DRV_NAME ": %s: setting xmit hash policy to %s (%d).\n",
+ pr_info("%s: setting xmit hash policy to %s (%d).\n",
bond->dev->name,
xmit_hashtype_tbl[new_value].modename, new_value);
}
@@ -472,20 +462,18 @@ static ssize_t bonding_store_arp_validate(struct device *d,
new_value = bond_parse_parm(buf, arp_validate_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid arp_validate value %s\n",
+ pr_err("%s: Ignoring invalid arp_validate value %s\n",
bond->dev->name, buf);
return -EINVAL;
}
if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
- pr_err(DRV_NAME
- ": %s: arp_validate only supported in active-backup mode.\n",
+ pr_err("%s: arp_validate only supported in active-backup mode.\n",
bond->dev->name);
return -EINVAL;
}
- pr_info(DRV_NAME ": %s: setting arp_validate to %s (%d).\n",
- bond->dev->name, arp_validate_tbl[new_value].modename,
- new_value);
+ pr_info("%s: setting arp_validate to %s (%d).\n",
+ bond->dev->name, arp_validate_tbl[new_value].modename,
+ new_value);
if (!bond->params.arp_validate && new_value)
bond_register_arp(bond);
@@ -523,24 +511,22 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->slave_cnt != 0) {
- pr_err(DRV_NAME
- ": %s: Can't alter fail_over_mac with slaves in bond.\n",
+ pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name);
return -EPERM;
}
new_value = bond_parse_parm(buf, fail_over_mac_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid fail_over_mac value %s.\n",
+ pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
bond->dev->name, buf);
return -EINVAL;
}
bond->params.fail_over_mac = new_value;
- pr_info(DRV_NAME ": %s: Setting fail_over_mac to %s (%d).\n",
- bond->dev->name, fail_over_mac_tbl[new_value].modename,
- new_value);
+ pr_info("%s: Setting fail_over_mac to %s (%d).\n",
+ bond->dev->name, fail_over_mac_tbl[new_value].modename,
+ new_value);
return count;
}
@@ -571,31 +557,26 @@ static ssize_t bonding_store_arp_interval(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no arp_interval value specified.\n",
+ pr_err("%s: no arp_interval value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
+ pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
bond->dev->name, new_value, INT_MAX);
ret = -EINVAL;
goto out;
}
- pr_info(DRV_NAME
- ": %s: Setting ARP monitoring interval to %d.\n",
- bond->dev->name, new_value);
+ pr_info("%s: Setting ARP monitoring interval to %d.\n",
+ bond->dev->name, new_value);
bond->params.arp_interval = new_value;
if (bond->params.arp_interval)
bond->dev->priv_flags |= IFF_MASTER_ARPMON;
if (bond->params.miimon) {
- pr_info(DRV_NAME
- ": %s: ARP monitoring cannot be used with MII monitoring. "
- "%s Disabling MII monitoring.\n",
- bond->dev->name, bond->dev->name);
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
if (delayed_work_pending(&bond->mii_work)) {
cancel_delayed_work(&bond->mii_work);
@@ -603,10 +584,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
}
}
if (!bond->params.arp_targets[0]) {
- pr_info(DRV_NAME
- ": %s: ARP monitoring has been set up, "
- "but no ARP targets have been specified.\n",
- bond->dev->name);
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
@@ -666,8 +645,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
/* look for adds */
if (buf[0] == '+') {
if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
- pr_err(DRV_NAME
- ": %s: invalid ARP target %pI4 specified for addition\n",
+ pr_err("%s: invalid ARP target %pI4 specified for addition\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
@@ -675,23 +653,20 @@ static ssize_t bonding_store_arp_targets(struct device *d,
/* look for an empty slot to put the target in, and check for dupes */
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
if (targets[i] == newtarget) { /* duplicate */
- pr_err(DRV_NAME
- ": %s: ARP target %pI4 is already present\n",
+ pr_err("%s: ARP target %pI4 is already present\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
}
if (targets[i] == 0) {
- pr_info(DRV_NAME
- ": %s: adding ARP target %pI4.\n",
- bond->dev->name, &newtarget);
+ pr_info("%s: adding ARP target %pI4.\n",
+ bond->dev->name, &newtarget);
done = 1;
targets[i] = newtarget;
}
}
if (!done) {
- pr_err(DRV_NAME
- ": %s: ARP target table is full!\n",
+ pr_err("%s: ARP target table is full!\n",
bond->dev->name);
ret = -EINVAL;
goto out;
@@ -699,8 +674,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
} else if (buf[0] == '-') {
if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
- pr_err(DRV_NAME
- ": %s: invalid ARP target %pI4 specified for removal\n",
+ pr_err("%s: invalid ARP target %pI4 specified for removal\n",
bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
@@ -709,9 +683,8 @@ static ssize_t bonding_store_arp_targets(struct device *d,
for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
if (targets[i] == newtarget) {
int j;
- pr_info(DRV_NAME
- ": %s: removing ARP target %pI4.\n",
- bond->dev->name, &newtarget);
+ pr_info("%s: removing ARP target %pI4.\n",
+ bond->dev->name, &newtarget);
for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
targets[j] = targets[j+1];
@@ -720,16 +693,14 @@ static ssize_t bonding_store_arp_targets(struct device *d,
}
}
if (!done) {
- pr_info(DRV_NAME
- ": %s: unable to remove nonexistent ARP target %pI4.\n",
- bond->dev->name, &newtarget);
+ pr_info("%s: unable to remove nonexistent ARP target %pI4.\n",
+ bond->dev->name, &newtarget);
ret = -EINVAL;
goto out;
}
} else {
- pr_err(DRV_NAME ": no command found in arp_ip_targets file"
- " for bond %s. Use +<addr> or -<addr>.\n",
- bond->dev->name);
+ pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ bond->dev->name);
ret = -EPERM;
goto out;
}
@@ -761,41 +732,34 @@ static ssize_t bonding_store_downdelay(struct device *d,
struct bonding *bond = to_bond(d);
if (!(bond->params.miimon)) {
- pr_err(DRV_NAME
- ": %s: Unable to set down delay as MII monitoring is disabled\n",
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
bond->dev->name);
ret = -EPERM;
goto out;
}
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no down delay value specified.\n",
- bond->dev->name);
+ pr_err("%s: no down delay value specified.\n", bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
if ((new_value % bond->params.miimon) != 0) {
- pr_warning(DRV_NAME
- ": %s: Warning: down delay (%d) is not a "
- "multiple of miimon (%d), delay rounded "
- "to %d ms\n",
+ pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
bond->dev->name, new_value,
bond->params.miimon,
(new_value / bond->params.miimon) *
bond->params.miimon);
}
bond->params.downdelay = new_value / bond->params.miimon;
- pr_info(DRV_NAME ": %s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: Setting down delay to %d.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
}
@@ -823,41 +787,35 @@ static ssize_t bonding_store_updelay(struct device *d,
struct bonding *bond = to_bond(d);
if (!(bond->params.miimon)) {
- pr_err(DRV_NAME
- ": %s: Unable to set up delay as MII monitoring is disabled\n",
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
bond->dev->name);
ret = -EPERM;
goto out;
}
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no up delay value specified.\n",
+ pr_err("%s: no up delay value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
if ((new_value % bond->params.miimon) != 0) {
- pr_warning(DRV_NAME
- ": %s: Warning: up delay (%d) is not a "
- "multiple of miimon (%d), updelay rounded "
- "to %d ms\n",
+ pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
bond->dev->name, new_value,
bond->params.miimon,
(new_value / bond->params.miimon) *
bond->params.miimon);
}
bond->params.updelay = new_value / bond->params.miimon;
- pr_info(DRV_NAME ": %s: Setting up delay to %d.\n",
- bond->dev->name, bond->params.updelay * bond->params.miimon);
-
+ pr_info("%s: Setting up delay to %d.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
}
out:
@@ -889,16 +847,14 @@ static ssize_t bonding_store_lacp(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- pr_err(DRV_NAME
- ": %s: Unable to update LACP rate because interface is up.\n",
+ pr_err("%s: Unable to update LACP rate because interface is up.\n",
bond->dev->name);
ret = -EPERM;
goto out;
}
if (bond->params.mode != BOND_MODE_8023AD) {
- pr_err(DRV_NAME
- ": %s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
+ pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n",
bond->dev->name);
ret = -EPERM;
goto out;
@@ -908,12 +864,11 @@ static ssize_t bonding_store_lacp(struct device *d,
if ((new_value == 1) || (new_value == 0)) {
bond->params.lacp_fast = new_value;
- pr_info(DRV_NAME ": %s: Setting LACP rate to %s (%d).\n",
+ pr_info("%s: Setting LACP rate to %s (%d).\n",
bond->dev->name, bond_lacp_tbl[new_value].modename,
new_value);
} else {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid LACP rate value %.*s.\n",
+ pr_err("%s: Ignoring invalid LACP rate value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
ret = -EINVAL;
}
@@ -943,9 +898,8 @@ static ssize_t bonding_store_ad_select(struct device *d,
struct bonding *bond = to_bond(d);
if (bond->dev->flags & IFF_UP) {
- pr_err(DRV_NAME
- ": %s: Unable to update ad_select because interface "
- "is up.\n", bond->dev->name);
+ pr_err("%s: Unable to update ad_select because interface is up.\n",
+ bond->dev->name);
ret = -EPERM;
goto out;
}
@@ -954,13 +908,11 @@ static ssize_t bonding_store_ad_select(struct device *d,
if (new_value != -1) {
bond->params.ad_select = new_value;
- pr_info(DRV_NAME
- ": %s: Setting ad_select to %s (%d).\n",
- bond->dev->name, ad_select_tbl[new_value].modename,
- new_value);
+ pr_info("%s: Setting ad_select to %s (%d).\n",
+ bond->dev->name, ad_select_tbl[new_value].modename,
+ new_value);
} else {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid ad_select value %.*s.\n",
+ pr_err("%s: Ignoring invalid ad_select value %.*s.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
ret = -EINVAL;
}
@@ -990,15 +942,13 @@ static ssize_t bonding_store_n_grat_arp(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no num_grat_arp value specified.\n",
+ pr_err("%s: no num_grat_arp value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0 || new_value > 255) {
- pr_err(DRV_NAME
- ": %s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
+ pr_err("%s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
bond->dev->name, new_value);
ret = -EINVAL;
goto out;
@@ -1031,16 +981,14 @@ static ssize_t bonding_store_n_unsol_na(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no num_unsol_na value specified.\n",
+ pr_err("%s: no num_unsol_na value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0 || new_value > 255) {
- pr_err(DRV_NAME
- ": %s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
+ pr_err("%s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
bond->dev->name, new_value);
ret = -EINVAL;
goto out;
@@ -1075,40 +1023,31 @@ static ssize_t bonding_store_miimon(struct device *d,
struct bonding *bond = to_bond(d);
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no miimon value specified.\n",
+ pr_err("%s: no miimon value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Invalid miimon value %d not in range %d-%d; rejected.\n",
+ pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
- pr_info(DRV_NAME
- ": %s: Setting MII monitoring interval to %d.\n",
- bond->dev->name, new_value);
+ pr_info("%s: Setting MII monitoring interval to %d.\n",
+ bond->dev->name, new_value);
bond->params.miimon = new_value;
if (bond->params.updelay)
- pr_info(DRV_NAME
- ": %s: Note: Updating updelay (to %d) "
- "since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
- pr_info(DRV_NAME
- ": %s: Note: Updating downdelay (to %d) "
- "since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
if (bond->params.arp_interval) {
- pr_info(DRV_NAME
- ": %s: MII monitoring cannot be used with "
- "ARP monitoring. Disabling ARP monitoring...\n",
- bond->dev->name);
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ bond->dev->name);
bond->params.arp_interval = 0;
bond->dev->priv_flags &= ~IFF_MASTER_ARPMON;
if (bond->params.arp_validate) {
@@ -1176,17 +1115,15 @@ static ssize_t bonding_store_primary(struct device *d,
write_lock_bh(&bond->curr_slave_lock);
if (!USES_PRIMARY(bond->params.mode)) {
- pr_info(DRV_NAME
- ": %s: Unable to set primary slave; %s is in mode %d\n",
- bond->dev->name, bond->dev->name, bond->params.mode);
+ pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
+ bond->dev->name, bond->dev->name, bond->params.mode);
} else {
bond_for_each_slave(bond, slave, i) {
if (strnicmp
(slave->dev->name, buf,
strlen(slave->dev->name)) == 0) {
- pr_info(DRV_NAME
- ": %s: Setting %s as primary slave.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: Setting %s as primary slave.\n",
+ bond->dev->name, slave->dev->name);
bond->primary_slave = slave;
strcpy(bond->params.primary, slave->dev->name);
bond_select_active_slave(bond);
@@ -1197,15 +1134,13 @@ static ssize_t bonding_store_primary(struct device *d,
/* if we got here, then we didn't match the name of any slave */
if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info(DRV_NAME
- ": %s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
} else {
- pr_info(DRV_NAME
- ": %s: Unable to set %.*s as primary slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ pr_info("%s: Unable to set %.*s as primary slave as it is not a slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
}
}
out:
@@ -1244,8 +1179,7 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
new_value = bond_parse_parm(buf, pri_reselect_tbl);
if (new_value < 0) {
- pr_err(DRV_NAME
- ": %s: Ignoring invalid primary_reselect value %.*s.\n",
+ pr_err("%s: Ignoring invalid primary_reselect value %.*s.\n",
bond->dev->name,
(int) strlen(buf) - 1, buf);
ret = -EINVAL;
@@ -1253,7 +1187,7 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
}
bond->params.primary_reselect = new_value;
- pr_info(DRV_NAME ": %s: setting primary_reselect to %s (%d).\n",
+ pr_info("%s: setting primary_reselect to %s (%d).\n",
bond->dev->name, pri_reselect_tbl[new_value].modename,
new_value);
@@ -1291,20 +1225,18 @@ static ssize_t bonding_store_carrier(struct device *d,
if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err(DRV_NAME
- ": %s: no use_carrier value specified.\n",
+ pr_err("%s: no use_carrier value specified.\n",
bond->dev->name);
ret = -EINVAL;
goto out;
}
if ((new_value == 0) || (new_value == 1)) {
bond->params.use_carrier = new_value;
- pr_info(DRV_NAME ": %s: Setting use_carrier to %d.\n",
- bond->dev->name, new_value);
+ pr_info("%s: Setting use_carrier to %d.\n",
+ bond->dev->name, new_value);
} else {
- pr_info(DRV_NAME
- ": %s: Ignoring invalid use_carrier value %d.\n",
- bond->dev->name, new_value);
+ pr_info("%s: Ignoring invalid use_carrier value %d.\n",
+ bond->dev->name, new_value);
}
out:
return count;
@@ -1349,8 +1281,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
write_lock_bh(&bond->curr_slave_lock);
if (!USES_PRIMARY(bond->params.mode))
- pr_info(DRV_NAME ": %s: Unable to change active slave;"
- " %s is in mode %d\n",
+ pr_info("%s: Unable to change active slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
else {
bond_for_each_slave(bond, slave, i) {
@@ -1361,9 +1292,9 @@ static ssize_t bonding_store_active_slave(struct device *d,
new_active = slave;
if (new_active == old_active) {
/* do nothing */
- pr_info(DRV_NAME
- ": %s: %s is already the current active slave.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: %s is already the current active slave.\n",
+ bond->dev->name,
+ slave->dev->name);
goto out;
}
else {
@@ -1371,16 +1302,15 @@ static ssize_t bonding_store_active_slave(struct device *d,
(old_active) &&
(new_active->link == BOND_LINK_UP) &&
IS_UP(new_active->dev)) {
- pr_info(DRV_NAME
- ": %s: Setting %s as active slave.\n",
- bond->dev->name, slave->dev->name);
+ pr_info("%s: Setting %s as active slave.\n",
+ bond->dev->name,
+ slave->dev->name);
bond_change_active_slave(bond, new_active);
}
else {
- pr_info(DRV_NAME
- ": %s: Could not set %s as active slave; "
- "either %s is down or the link is down.\n",
- bond->dev->name, slave->dev->name,
+ pr_info("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ bond->dev->name,
+ slave->dev->name,
slave->dev->name);
}
goto out;
@@ -1391,14 +1321,12 @@ static ssize_t bonding_store_active_slave(struct device *d,
/* if we got here, then we didn't match the name of any slave */
if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info(DRV_NAME
- ": %s: Setting active slave to None.\n",
+ pr_info("%s: Setting active slave to None.\n",
bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
} else {
- pr_info(DRV_NAME ": %s: Unable to set %.*s"
- " as active slave as it is not a slave.\n",
+ pr_info("%s: Unable to set %.*s as active slave as it is not a slave.\n",
bond->dev->name, (int)strlen(buf) - 1, buf);
}
}
@@ -1600,8 +1528,7 @@ int bond_create_sysfs(void)
/* Is someone being kinky and naming a device bonding_master? */
if (__dev_get_by_name(&init_net,
class_attr_bonding_masters.attr.name))
- pr_err("network device named %s already "
- "exists in sysfs",
+ pr_err("network device named %s already exists in sysfs",
class_attr_bonding_masters.attr.name);
ret = 0;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index bb803fa1e6a..05b751719bd 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -50,10 +50,19 @@ config CAN_TI_HECC
config CAN_MCP251X
tristate "Microchip MCP251x SPI CAN controllers"
- depends on CAN_DEV && SPI
+ depends on CAN_DEV && SPI && HAS_DMA
---help---
Driver for the Microchip MCP251x SPI CAN controllers.
+config CAN_BFIN
+ depends on CAN_DEV && (BF534 || BF536 || BF537 || BF538 || BF539 || BF54x)
+ tristate "Analog Devices Blackfin on-chip CAN"
+ ---help---
+ Driver for the Analog Devices Blackfin on-chip CAN controllers
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin_can.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 56899fef1c6..7a702f28d01 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -14,5 +14,6 @@ obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+obj-$(CONFIG_CAN_BFIN) += bfin_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index cbe3fce53e3..166cc7e579c 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -474,7 +474,7 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
reg_msr = at91_read(priv, AT91_MSR(mb));
if (reg_msr & AT91_MSR_MRTR)
cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
+ cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
@@ -1037,7 +1037,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || !irq) {
+ if (!res || irq <= 0) {
err = -ENODEV;
goto exit_put;
}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
new file mode 100644
index 00000000000..0ec1524523c
--- /dev/null
+++ b/drivers/net/can/bfin_can.c
@@ -0,0 +1,783 @@
+/*
+ * Blackfin On-Chip CAN Driver
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include <asm/portmux.h>
+
+#define DRV_NAME "bfin_can"
+#define BFIN_CAN_TIMEOUT 100
+
+/*
+ * transmit and receive channels
+ */
+#define TRANSMIT_CHL 24
+#define RECEIVE_STD_CHL 0
+#define RECEIVE_EXT_CHL 4
+#define RECEIVE_RTR_CHL 8
+#define RECEIVE_EXT_RTR_CHL 12
+#define MAX_CHL_NUMBER 32
+
+/*
+ * bfin can registers layout
+ */
+struct bfin_can_mask_regs {
+ u16 aml;
+ u16 dummy1;
+ u16 amh;
+ u16 dummy2;
+};
+
+struct bfin_can_channel_regs {
+ u16 data[8];
+ u16 dlc;
+ u16 dummy1;
+ u16 tsv;
+ u16 dummy2;
+ u16 id0;
+ u16 dummy3;
+ u16 id1;
+ u16 dummy4;
+};
+
+struct bfin_can_regs {
+ /*
+ * global control and status registers
+ */
+ u16 mc1; /* offset 0 */
+ u16 dummy1;
+ u16 md1; /* offset 4 */
+ u16 rsv1[13];
+ u16 mbtif1; /* offset 0x20 */
+ u16 dummy2;
+ u16 mbrif1; /* offset 0x24 */
+ u16 dummy3;
+ u16 mbim1; /* offset 0x28 */
+ u16 rsv2[11];
+ u16 mc2; /* offset 0x40 */
+ u16 dummy4;
+ u16 md2; /* offset 0x44 */
+ u16 dummy5;
+ u16 trs2; /* offset 0x48 */
+ u16 rsv3[11];
+ u16 mbtif2; /* offset 0x60 */
+ u16 dummy6;
+ u16 mbrif2; /* offset 0x64 */
+ u16 dummy7;
+ u16 mbim2; /* offset 0x68 */
+ u16 rsv4[11];
+ u16 clk; /* offset 0x80 */
+ u16 dummy8;
+ u16 timing; /* offset 0x84 */
+ u16 rsv5[3];
+ u16 status; /* offset 0x8c */
+ u16 dummy9;
+ u16 cec; /* offset 0x90 */
+ u16 dummy10;
+ u16 gis; /* offset 0x94 */
+ u16 dummy11;
+ u16 gim; /* offset 0x98 */
+ u16 rsv6[3];
+ u16 ctrl; /* offset 0xa0 */
+ u16 dummy12;
+ u16 intr; /* offset 0xa4 */
+ u16 rsv7[7];
+ u16 esr; /* offset 0xb4 */
+ u16 rsv8[37];
+
+ /*
+ * channel(mailbox) mask and message registers
+ */
+ struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */
+ struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */
+};
+
+/*
+ * bfin can private data
+ */
+struct bfin_can_priv {
+ struct can_priv can; /* must be the first member */
+ struct net_device *dev;
+ void __iomem *membase;
+ int rx_irq;
+ int tx_irq;
+ int err_irq;
+ unsigned short *pin_list;
+};
+
+/*
+ * bfin can timing parameters
+ */
+static struct can_bittiming_const bfin_can_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ /*
+ * Although the BRP field can be set to any value, it is recommended
+ * that the value be greater than or equal to 4, as restrictions
+ * apply to the bit timing configuration when BRP is less than 4.
+ */
+ .brp_min = 4,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static int bfin_can_set_bittiming(struct net_device *dev)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u16 clk, timing;
+
+ clk = bt->brp - 1;
+ timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) |
+ ((bt->phase_seg2 - 1) << 4);
+
+ /*
+ * If the SAM bit is set, the input signal is oversampled three times
+ * at the SCLK rate.
+ */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ timing |= SAM;
+
+ bfin_write16(&reg->clk, clk);
+ bfin_write16(&reg->timing, timing);
+
+ dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
+ clk, timing);
+
+ return 0;
+}
+
+static void bfin_can_set_reset_mode(struct net_device *dev)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+ int timeout = BFIN_CAN_TIMEOUT;
+ int i;
+
+ /* disable interrupts */
+ bfin_write16(&reg->mbim1, 0);
+ bfin_write16(&reg->mbim2, 0);
+ bfin_write16(&reg->gim, 0);
+
+ /* reset can and enter configuration mode */
+ bfin_write16(&reg->ctrl, SRS | CCR);
+ SSYNC();
+ bfin_write16(&reg->ctrl, CCR);
+ SSYNC();
+ while (!(bfin_read16(&reg->ctrl) & CCA)) {
+ udelay(10);
+ if (--timeout == 0) {
+ dev_err(dev->dev.parent,
+ "fail to enter configuration mode\n");
+ BUG();
+ }
+ }
+
+ /*
+ * All mailbox configurations are marked as inactive
+ * by writing to CAN Mailbox Configuration Registers 1 and 2
+ * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
+ */
+ bfin_write16(&reg->mc1, 0);
+ bfin_write16(&reg->mc2, 0);
+
+ /* Set Mailbox Direction */
+ bfin_write16(&reg->md1, 0xFFFF); /* mailbox 1-16 are RX */
+ bfin_write16(&reg->md2, 0); /* mailbox 17-32 are TX */
+
+ /* RECEIVE_STD_CHL */
+ for (i = 0; i < 2; i++) {
+ bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
+ bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
+ bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
+ bfin_write16(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
+ bfin_write16(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
+ }
+
+ /* RECEIVE_EXT_CHL */
+ for (i = 0; i < 2; i++) {
+ bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
+ bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
+ bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
+ bfin_write16(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
+ bfin_write16(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
+ }
+
+ bfin_write16(&reg->mc2, BIT(TRANSMIT_CHL - 16));
+ bfin_write16(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
+ SSYNC();
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static void bfin_can_set_normal_mode(struct net_device *dev)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+ int timeout = BFIN_CAN_TIMEOUT;
+
+ /*
+ * leave configuration mode
+ */
+ bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) & ~CCR);
+
+ while (bfin_read16(&reg->status) & CCA) {
+ udelay(10);
+ if (--timeout == 0) {
+ dev_err(dev->dev.parent,
+ "fail to leave configuration mode\n");
+ BUG();
+ }
+ }
+
+ /*
+ * clear _All_ tx and rx interrupts
+ */
+ bfin_write16(&reg->mbtif1, 0xFFFF);
+ bfin_write16(&reg->mbtif2, 0xFFFF);
+ bfin_write16(&reg->mbrif1, 0xFFFF);
+ bfin_write16(&reg->mbrif2, 0xFFFF);
+
+ /*
+ * clear global interrupt status register
+ */
+ bfin_write16(&reg->gis, 0x7FF); /* overwrites with '1' */
+
+ /*
+ * Initialize Interrupts
+ * - set bits in the mailbox interrupt mask register
+ * - global interrupt mask
+ */
+ bfin_write16(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
+ bfin_write16(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
+
+ bfin_write16(&reg->gim, EPIM | BOIM | RMLIM);
+ SSYNC();
+}
+
+static void bfin_can_start(struct net_device *dev)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+
+ /* enter reset mode */
+ if (priv->can.state != CAN_STATE_STOPPED)
+ bfin_can_set_reset_mode(dev);
+
+ /* leave reset mode */
+ bfin_can_set_normal_mode(dev);
+}
+
+static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ bfin_can_start(dev);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 dlc = cf->can_dlc;
+ canid_t id = cf->can_id;
+ u8 *data = cf->data;
+ u16 val;
+ int i;
+
+ netif_stop_queue(dev);
+
+ /* fill id */
+ if (id & CAN_EFF_FLAG) {
+ bfin_write16(&reg->chl[TRANSMIT_CHL].id0, id);
+ if (id & CAN_RTR_FLAG)
+ writew(((id & 0x1FFF0000) >> 16) | IDE | AME | RTR,
+ &reg->chl[TRANSMIT_CHL].id1);
+ else
+ writew(((id & 0x1FFF0000) >> 16) | IDE | AME,
+ &reg->chl[TRANSMIT_CHL].id1);
+
+ } else {
+ if (id & CAN_RTR_FLAG)
+ writew((id << 2) | AME | RTR,
+ &reg->chl[TRANSMIT_CHL].id1);
+ else
+ bfin_write16(&reg->chl[TRANSMIT_CHL].id1,
+ (id << 2) | AME);
+ }
+
+ /* fill payload */
+ for (i = 0; i < 8; i += 2) {
+ val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
+ ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
+ bfin_write16(&reg->chl[TRANSMIT_CHL].data[i], val);
+ }
+
+ /* fill data length code */
+ bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
+
+ dev->trans_start = jiffies;
+
+ can_put_echo_skb(skb, dev, 0);
+
+ /* set transmit request */
+ bfin_write16(&reg->trs2, BIT(TRANSMIT_CHL - 16));
+
+ return 0;
+}
+
+static void bfin_can_rx(struct net_device *dev, u16 isrc)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct bfin_can_regs __iomem *reg = priv->membase;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ int obj;
+ int i;
+ u16 val;
+
+ skb = alloc_can_skb(dev, &cf);
+ if (skb == NULL)
+ return;
+
+ /* get id */
+ if (isrc & BIT(RECEIVE_EXT_CHL)) {
+ /* extended frame format (EFF) */
+ cf->can_id = ((bfin_read16(&reg->chl[RECEIVE_EXT_CHL].id1)
+ & 0x1FFF) << 16)
+ + bfin_read16(&reg->chl[RECEIVE_EXT_CHL].id0);
+ cf->can_id |= CAN_EFF_FLAG;
+ obj = RECEIVE_EXT_CHL;
+ } else {
+ /* standard frame format (SFF) */
+ cf->can_id = (bfin_read16(&reg->chl[RECEIVE_STD_CHL].id1)
+ & 0x1ffc) >> 2;
+ obj = RECEIVE_STD_CHL;
+ }
+ if (bfin_read16(&reg->chl[obj].id1) & RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+
+ /* get data length code */
+ cf->can_dlc = get_can_dlc(bfin_read16(&reg->chl[obj].dlc) & 0xF);
+
+ /* get payload */
+ for (i = 0; i < 8; i += 2) {
+ val = bfin_read16(&reg->chl[obj].data[i]);
+ cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
+ cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state state = priv->can.state;
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ if (isrc & RMLIS) {
+ /* data overrun interrupt */
+ dev_dbg(dev->dev.parent, "data overrun interrupt\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ if (isrc & BOIS) {
+ dev_dbg(dev->dev.parent, "bus-off mode interrupt\n");
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(dev);
+ }
+
+ if (isrc & EPIS) {
+ /* error passive interrupt */
+ dev_dbg(dev->dev.parent, "error passive interrupt\n");
+ state = CAN_STATE_ERROR_PASSIVE;
+ }
+
+ if ((isrc & EWTIS) || (isrc & EWRIS)) {
+ dev_dbg(dev->dev.parent,
+ "Error Warning Transmit/Receive Interrupt\n");
+ state = CAN_STATE_ERROR_WARNING;
+ }
+
+ if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
+ state == CAN_STATE_ERROR_PASSIVE)) {
+ u16 cec = bfin_read16(&reg->cec);
+ u8 rxerr = cec;
+ u8 txerr = cec >> 8;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ if (state == CAN_STATE_ERROR_WARNING) {
+ priv->can.can_stats.error_warning++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ } else {
+ priv->can.can_stats.error_passive++;
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ }
+
+ if (status) {
+ priv->can.can_stats.bus_error++;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ if (status & BEF)
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ else if (status & FER)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ else if (status & SER)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ else
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ }
+
+ priv->can.state = state;
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 0;
+}
+
+irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+ struct net_device_stats *stats = &dev->stats;
+ u16 status, isrc;
+
+ if ((irq == priv->tx_irq) && bfin_read16(&reg->mbtif2)) {
+ /* transmission complete interrupt */
+ bfin_write16(&reg->mbtif2, 0xFFFF);
+ stats->tx_packets++;
+ stats->tx_bytes += bfin_read16(&reg->chl[TRANSMIT_CHL].dlc);
+ can_get_echo_skb(dev, 0);
+ netif_wake_queue(dev);
+ } else if ((irq == priv->rx_irq) && bfin_read16(&reg->mbrif1)) {
+ /* receive interrupt */
+ isrc = bfin_read16(&reg->mbrif1);
+ bfin_write16(&reg->mbrif1, 0xFFFF);
+ bfin_can_rx(dev, isrc);
+ } else if ((irq == priv->err_irq) && bfin_read16(&reg->gis)) {
+ /* error interrupt */
+ isrc = bfin_read16(&reg->gis);
+ status = bfin_read16(&reg->esr);
+ bfin_write16(&reg->gis, 0x7FF);
+ bfin_can_err(dev, isrc, status);
+ } else {
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bfin_can_open(struct net_device *dev)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* set chip into reset mode */
+ bfin_can_set_reset_mode(dev);
+
+ /* common open */
+ err = open_candev(dev);
+ if (err)
+ goto exit_open;
+
+ /* register interrupt handler */
+ err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0,
+ "bfin-can-rx", dev);
+ if (err)
+ goto exit_rx_irq;
+ err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0,
+ "bfin-can-tx", dev);
+ if (err)
+ goto exit_tx_irq;
+ err = request_irq(priv->err_irq, &bfin_can_interrupt, 0,
+ "bfin-can-err", dev);
+ if (err)
+ goto exit_err_irq;
+
+ bfin_can_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+exit_err_irq:
+ free_irq(priv->tx_irq, dev);
+exit_tx_irq:
+ free_irq(priv->rx_irq, dev);
+exit_rx_irq:
+ close_candev(dev);
+exit_open:
+ return err;
+}
+
+static int bfin_can_close(struct net_device *dev)
+{
+ struct bfin_can_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ bfin_can_set_reset_mode(dev);
+
+ close_candev(dev);
+
+ free_irq(priv->rx_irq, dev);
+ free_irq(priv->tx_irq, dev);
+ free_irq(priv->err_irq, dev);
+
+ return 0;
+}
+
+struct net_device *alloc_bfin_candev(void)
+{
+ struct net_device *dev;
+ struct bfin_can_priv *priv;
+
+ dev = alloc_candev(sizeof(*priv));
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &bfin_can_bittiming_const;
+ priv->can.do_set_bittiming = bfin_can_set_bittiming;
+ priv->can.do_set_mode = bfin_can_set_mode;
+
+ return dev;
+}
+
+static const struct net_device_ops bfin_can_netdev_ops = {
+ .ndo_open = bfin_can_open,
+ .ndo_stop = bfin_can_close,
+ .ndo_start_xmit = bfin_can_start_xmit,
+};
+
+static int __devinit bfin_can_probe(struct platform_device *pdev)
+{
+ int err;
+ struct net_device *dev;
+ struct bfin_can_priv *priv;
+ struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
+ unsigned short *pdata;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ err = -EINVAL;
+ goto exit;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+ if (!res_mem || !rx_irq || !tx_irq || !err_irq) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (!request_mem_region(res_mem->start, resource_size(res_mem),
+ dev_name(&pdev->dev))) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ /* request peripheral pins */
+ err = peripheral_request_list(pdata, dev_name(&pdev->dev));
+ if (err)
+ goto exit_mem_release;
+
+ dev = alloc_bfin_candev();
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_peri_pin_free;
+ }
+
+ priv = netdev_priv(dev);
+ priv->membase = (void __iomem *)res_mem->start;
+ priv->rx_irq = rx_irq->start;
+ priv->tx_irq = tx_irq->start;
+ priv->err_irq = err_irq->start;
+ priv->pin_list = pdata;
+ priv->can.clock.freq = get_sclk();
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ dev->flags |= IFF_ECHO; /* we support local echo */
+ dev->netdev_ops = &bfin_can_netdev_ops;
+
+ bfin_can_set_reset_mode(dev);
+
+ err = register_candev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "registering failed (err=%d)\n", err);
+ goto exit_candev_free;
+ }
+
+ dev_info(&pdev->dev,
+ "%s device registered"
+ "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
+ DRV_NAME, (void *)priv->membase, priv->rx_irq,
+ priv->tx_irq, priv->err_irq, priv->can.clock.freq);
+ return 0;
+
+exit_candev_free:
+ free_candev(dev);
+exit_peri_pin_free:
+ peripheral_free_list(pdata);
+exit_mem_release:
+ release_mem_region(res_mem->start, resource_size(res_mem));
+exit:
+ return err;
+}
+
+static int __devexit bfin_can_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct resource *res;
+
+ bfin_can_set_reset_mode(dev);
+
+ unregister_candev(dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ peripheral_free_list(priv->pin_list);
+
+ free_candev(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+ int timeout = BFIN_CAN_TIMEOUT;
+
+ if (netif_running(dev)) {
+ /* enter sleep mode */
+ bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) | SMR);
+ SSYNC();
+ while (!(bfin_read16(&reg->intr) & SMACK)) {
+ udelay(10);
+ if (--timeout == 0) {
+ dev_err(dev->dev.parent,
+ "fail to enter sleep mode\n");
+ BUG();
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int bfin_can_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct bfin_can_priv *priv = netdev_priv(dev);
+ struct bfin_can_regs __iomem *reg = priv->membase;
+
+ if (netif_running(dev)) {
+ /* leave sleep mode */
+ bfin_write16(&reg->intr, 0);
+ SSYNC();
+ }
+
+ return 0;
+}
+#else
+#define bfin_can_suspend NULL
+#define bfin_can_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver bfin_can_driver = {
+ .probe = bfin_can_probe,
+ .remove = __devexit_p(bfin_can_remove),
+ .suspend = bfin_can_suspend,
+ .resume = bfin_can_resume,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bfin_can_init(void)
+{
+ return platform_driver_register(&bfin_can_driver);
+}
+module_init(bfin_can_init);
+
+static void __exit bfin_can_exit(void)
+{
+ platform_driver_unregister(&bfin_can_driver);
+}
+module_exit(bfin_can_exit);
+
+MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 78b1b69b292..9c5a1537939 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -403,9 +403,8 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
for (i = 1; i < RXBDAT_OFF; i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
- len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
- if (len > 8)
- len = 8;
+
+ len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
for (; i < (RXBDAT_OFF + len); i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
@@ -455,13 +454,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
}
/* Data length */
- frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
- if (frame->can_dlc > 8) {
- dev_warn(&spi->dev, "invalid frame recevied\n");
- priv->net->stats.rx_errors++;
- dev_kfree_skb(skb);
- return;
- }
+ frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
priv->net->stats.rx_packets++;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index bb06dfb58f2..07346f880ca 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -297,7 +297,8 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
frame->can_id |= can_id >> 1;
if (can_id & 1)
frame->can_id |= CAN_RTR_FLAG;
- frame->can_dlc = in_8(&regs->rx.dlr) & 0xf;
+
+ frame->can_dlc = get_can_dlc(in_8(&regs->rx.dlr) & 0xf);
if (!(frame->can_id & CAN_RTR_FLAG)) {
void __iomem *data = &regs->rx.dsr1_0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index b4ba88a3107..542a4f7255b 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -293,15 +293,14 @@ static void sja1000_rx(struct net_device *dev)
uint8_t fi;
uint8_t dreg;
canid_t id;
- uint8_t dlc;
int i;
+ /* create zero'ed CAN frame buffer */
skb = alloc_can_skb(dev, &cf);
if (skb == NULL)
return;
fi = priv->read_reg(priv, REG_FI);
- dlc = fi & 0x0F;
if (fi & FI_FF) {
/* extended frame format (EFF) */
@@ -318,16 +317,15 @@ static void sja1000_rx(struct net_device *dev)
| (priv->read_reg(priv, REG_ID2) >> 5);
}
- if (fi & FI_RTR)
+ if (fi & FI_RTR) {
id |= CAN_RTR_FLAG;
+ } else {
+ cf->can_dlc = get_can_dlc(fi & 0x0F);
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = priv->read_reg(priv, dreg++);
+ }
cf->can_id = id;
- cf->can_dlc = dlc;
- for (i = 0; i < dlc; i++)
- cf->data[i] = priv->read_reg(priv, dreg++);
-
- while (i < 8)
- cf->data[i++] = 0;
/* release receive buffer */
priv->write_reg(priv, REG_CMR, CMD_RRB);
@@ -335,7 +333,7 @@ static void sja1000_rx(struct net_device *dev)
netif_rx(skb);
stats->rx_packets++;
- stats->rx_bytes += dlc;
+ stats->rx_bytes += cf->can_dlc;
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 07e8016b17e..5c993c2da52 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -552,7 +552,7 @@ static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
if (data & HECC_CANMCF_RTR)
cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = data & 0xF;
+ cf->can_dlc = get_can_dlc(data & 0xF);
data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
*(u32 *)(cf->data) = cpu_to_be32(data);
if (cf->can_dlc > 4) {
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 591eb0eb1c2..efbb05c71bf 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -316,7 +316,7 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
return;
cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
- cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
+ cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF);
if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME ||
msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 8c658cf6f62..109d2783e4d 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
}
__skb_pull(skb, sizeof(*p));
- st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
+ st = this_cpu_ptr(sge->port_stats[p->iff]);
skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
@@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct adapter *adapter = dev->ml_priv;
struct sge *sge = adapter->sge;
- struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
- smp_processor_id());
+ struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
struct cpl_tx_pkt *cpl;
struct sk_buff *orig_skb = skb;
int ret;
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index d4c6e7fcff5..4332b3a2faf 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1104,6 +1104,8 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->bnx2x_status_blk = cp->status_blk;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+ memset(cp->bnx2x_status_blk, 0, sizeof(struct host_status_block));
+
cp->l2_rx_ring_size = 15;
ret = cnic_alloc_l2_rings(dev, 4);
@@ -4183,6 +4185,12 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
msleep(10);
+
+ memset(&l5_data, 0, sizeof(l5_data));
+ cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
+ BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE |
+ (1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data);
+ msleep(10);
}
}
@@ -4289,6 +4297,9 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
offsetof(struct cstorm_status_block_c,
index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
0);
+ CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+ CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0);
+ CNIC_WR16(dev, cp->kcq_io_addr, 0);
cnic_free_resc(dev);
}
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 67822238940..8d0be26f94e 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1163,7 +1163,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->dev = dev;
priv->ring_size = 64;
priv->msg_enable = netif_msg_init(debug_level, 0xff);
- memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
+ memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index cef3f882e2b..89bec9c3c14 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2860,6 +2860,7 @@ static int t3_reenable_adapter(struct adapter *adapter)
}
pci_set_master(adapter->pdev);
pci_restore_state(adapter->pdev);
+ pci_save_state(adapter->pdev);
/* Free sge resources */
t3_free_sge_resources(adapter);
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 8edac8915ea..34e03104c3c 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2272,7 +2272,7 @@ static int emac_mii_reset(struct mii_bus *bus)
unsigned int clk_div;
int mdio_bus_freq = emac_bus_frequency;
- if (mdio_max_freq & mdio_bus_freq)
+ if (mdio_max_freq && mdio_bus_freq)
clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
else
clk_div = 0xFF;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 0cbe3c0e7c0..b3773006568 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1646,7 +1646,7 @@ dm9000_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops dm9000_drv_pm_ops = {
+static const struct dev_pm_ops dm9000_drv_pm_ops = {
.suspend = dm9000_drv_suspend,
.resume = dm9000_drv_resume,
};
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 929701ca07d..839fb2b136d 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1829,6 +1829,7 @@ static int e100_alloc_cbs(struct nic *nic)
&nic->cbs_dma_addr);
if (!nic->cbs)
return -ENOMEM;
+ memset(nic->cbs, 0, count * sizeof(struct cb));
for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
@@ -1837,7 +1838,6 @@ static int e100_alloc_cbs(struct nic *nic)
cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
cb->link = cpu_to_le32(nic->cbs_dma_addr +
((i+1) % count) * sizeof(struct cb));
- cb->skb = NULL;
}
nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index c1a42cfc80b..b979464091b 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1290,7 +1290,6 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
{
u32 ctrl;
- u32 led_ctrl;
s32 ret_val;
ctrl = er32(CTRL);
@@ -1305,11 +1304,6 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
break;
case e1000_phy_igp_2:
ret_val = e1000e_copper_link_setup_igp(hw);
- /* Setup activity LED */
- led_ctrl = er32(LEDCTL);
- led_ctrl &= IGP_ACTIVITY_LED_MASK;
- led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- ew32(LEDCTL, led_ctrl);
break;
default:
return -E1000_ERR_PHY;
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 86d2809763c..e02e38221ed 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -74,7 +74,7 @@
#define E1000_WUS_BC E1000_WUFC_BC
/* Extended Device Control */
-#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index d2a10479460..3028f23da89 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -46,6 +46,9 @@
#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
@@ -462,28 +465,36 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return ret_val;
}
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
- * before the device has completed the "Page Select" MDI
- * transaction. So we wait 200us after each MDI command...
- */
- udelay(200);
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+ /*
+ * The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ udelay(200);
- /* ...and verify the command was successful. */
- ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+ /* ...and verify the command was successful. */
+ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
- if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
- ret_val = -E1000_ERR_PHY;
- e1000_release_phy_80003es2lan(hw);
- return ret_val;
- }
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ ret_val = -E1000_ERR_PHY;
+ e1000_release_phy_80003es2lan(hw);
+ return ret_val;
+ }
- udelay(200);
+ udelay(200);
- ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ udelay(200);
+ } else {
+ ret_val = e1000e_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
- udelay(200);
e1000_release_phy_80003es2lan(hw);
return ret_val;
@@ -526,28 +537,35 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return ret_val;
}
+ if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+ /*
+ * The "ready" bit in the MDIC register may be incorrectly set
+ * before the device has completed the "Page Select" MDI
+ * transaction. So we wait 200us after each MDI command...
+ */
+ udelay(200);
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
- * before the device has completed the "Page Select" MDI
- * transaction. So we wait 200us after each MDI command...
- */
- udelay(200);
+ /* ...and verify the command was successful. */
+ ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
- /* ...and verify the command was successful. */
- ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+ if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+ e1000_release_phy_80003es2lan(hw);
+ return -E1000_ERR_PHY;
+ }
- if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
- e1000_release_phy_80003es2lan(hw);
- return -E1000_ERR_PHY;
- }
+ udelay(200);
- udelay(200);
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
- ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ udelay(200);
+ } else {
+ ret_val = e1000e_write_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ }
- udelay(200);
e1000_release_phy_80003es2lan(hw);
return ret_val;
@@ -866,6 +884,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
reg_data &= ~0x00100000;
E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+ /* default to true to enable the MDIC W/A */
+ hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
+
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT,
+ &i);
+ if (!ret_val) {
+ if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
+ }
+
/*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index a7d08dae79c..2784cf44a6f 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -302,6 +302,8 @@ enum e1e_registers {
#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
#define E1000_KMRNCTRLSTA_REN 0x00200000
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
@@ -898,6 +900,10 @@ struct e1000_dev_spec_82571 {
u32 smb_counter;
};
+struct e1000_dev_spec_80003es2lan {
+ bool mdic_wa_enable;
+};
+
struct e1000_shadow_ram {
u16 value;
bool modified;
@@ -926,6 +932,7 @@ struct e1000_hw {
union {
struct e1000_dev_spec_82571 e82571;
+ struct e1000_dev_spec_80003es2lan e80003es2lan;
struct e1000_dev_spec_ich8lan ich8lan;
} dev_spec;
};
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 7b33be98a2c..9b09246af06 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -2755,14 +2755,16 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
* and increase the max iterations when polling the phy;
* this fixes erroneous timeouts at 10Mbps.
*/
- ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
if (ret_val)
return ret_val;
- ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+ ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
- ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+ ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ reg_data);
if (ret_val)
return ret_val;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c3105c5087e..762b697ce73 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4541,7 +4541,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
e1000_media_type_internal_serdes) {
/* keep the laser running in D3 */
ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
+ ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
ew32(CTRL_EXT, ctrl_ext);
}
diff --git a/drivers/net/ehea/ehea_hcall.h b/drivers/net/ehea/ehea_hcall.h
deleted file mode 100644
index 8e7d1c3edc6..00000000000
--- a/drivers/net/ehea/ehea_hcall.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * linux/drivers/net/ehea/ehea_hcall.h
- *
- * eHEA ethernet device driver for IBM eServer System p
- *
- * (C) Copyright IBM Corp. 2006
- *
- * Authors:
- * Christoph Raisch <raisch@de.ibm.com>
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Thomas Klein <tklein@de.ibm.com>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __EHEA_HCALL_H__
-#define __EHEA_HCALL_H__
-
-/**
- * This file contains HCALL defines that are to be included in the appropriate
- * kernel files later
- */
-
-#define H_ALLOC_HEA_RESOURCE 0x278
-#define H_MODIFY_HEA_QP 0x250
-#define H_QUERY_HEA_QP 0x254
-#define H_QUERY_HEA 0x258
-#define H_QUERY_HEA_PORT 0x25C
-#define H_MODIFY_HEA_PORT 0x260
-#define H_REG_BCMC 0x264
-#define H_DEREG_BCMC 0x268
-#define H_REGISTER_HEA_RPAGES 0x26C
-#define H_DISABLE_AND_GET_HEA 0x270
-#define H_GET_HEA_INFO 0x274
-#define H_ADD_CONN 0x284
-#define H_DEL_CONN 0x288
-
-#endif /* __EHEA_HCALL_H__ */
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index f3628c80356..2f8174c248b 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -33,7 +33,6 @@
#include <asm/hvcall.h>
#include "ehea.h"
#include "ehea_hw.h"
-#include "ehea_hcall.h"
/* Some abbreviations used here:
*
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 6407672b28e..848e8407ea8 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -85,11 +85,15 @@ MODULE_PARM_DESC(debug, "debugging messages level");
static void mpc52xx_fec_tx_timeout(struct net_device *dev)
{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
dev_warn(&dev->dev, "transmit timed out\n");
+ spin_lock_irqsave(&priv->lock, flags);
mpc52xx_fec_reset(dev);
-
dev->stats.tx_errors++;
+ spin_unlock_irqrestore(&priv->lock, flags);
netif_wake_queue(dev);
}
@@ -135,28 +139,32 @@ static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task
}
}
+static void
+mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct bcom_fec_bd *bd;
+
+ bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
+ bd->status = FEC_RX_BUFFER_SIZE;
+ bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
+ FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+ bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
+}
+
static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
{
- while (!bcom_queue_full(rxtsk)) {
- struct sk_buff *skb;
- struct bcom_fec_bd *bd;
+ struct sk_buff *skb;
+ while (!bcom_queue_full(rxtsk)) {
skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
- if (skb == NULL)
+ if (!skb)
return -EAGAIN;
/* zero out the initial receive buffers to aid debugging */
memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
-
- bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk);
-
- bd->status = FEC_RX_BUFFER_SIZE;
- bd->skb_pa = dma_map_single(dev->dev.parent, skb->data,
- FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
-
- bcom_submit_next_buffer(rxtsk, skb);
+ mpc52xx_fec_rx_submit(dev, skb);
}
-
return 0;
}
@@ -328,13 +336,12 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
DMA_TO_DEVICE);
bcom_submit_next_buffer(priv->tx_dmatsk, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
if (bcom_queue_full(priv->tx_dmatsk)) {
netif_stop_queue(dev);
}
- spin_unlock_irqrestore(&priv->lock, flags);
-
return NETDEV_TX_OK;
}
@@ -359,9 +366,9 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ unsigned long flags;
- spin_lock(&priv->lock);
-
+ spin_lock_irqsave(&priv->lock, flags);
while (bcom_buffer_done(priv->tx_dmatsk)) {
struct sk_buff *skb;
struct bcom_fec_bd *bd;
@@ -372,11 +379,10 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
dev_kfree_skb_irq(skb);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
netif_wake_queue(dev);
- spin_unlock(&priv->lock);
-
return IRQ_HANDLED;
}
@@ -384,67 +390,60 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct sk_buff *rskb; /* received sk_buff */
+ struct sk_buff *skb; /* new sk_buff to enqueue in its place */
+ struct bcom_fec_bd *bd;
+ u32 status, physaddr;
+ int length;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
while (bcom_buffer_done(priv->rx_dmatsk)) {
- struct sk_buff *skb;
- struct sk_buff *rskb;
- struct bcom_fec_bd *bd;
- u32 status;
rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
- (struct bcom_bd **)&bd);
- dma_unmap_single(dev->dev.parent, bd->skb_pa, rskb->len,
- DMA_FROM_DEVICE);
+ (struct bcom_bd **)&bd);
+ physaddr = bd->skb_pa;
/* Test for errors in received frame */
if (status & BCOM_FEC_RX_BD_ERRORS) {
/* Drop packet and reuse the buffer */
- bd = (struct bcom_fec_bd *)
- bcom_prepare_next_buffer(priv->rx_dmatsk);
-
- bd->status = FEC_RX_BUFFER_SIZE;
- bd->skb_pa = dma_map_single(dev->dev.parent,
- rskb->data,
- FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
-
- bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
-
+ mpc52xx_fec_rx_submit(dev, rskb);
dev->stats.rx_dropped++;
-
continue;
}
/* skbs are allocated on open, so now we allocate a new one,
* and remove the old (with the packet) */
skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
- if (skb) {
- /* Process the received skb */
- int length = status & BCOM_FEC_RX_BD_LEN_MASK;
-
- skb_put(rskb, length - 4); /* length without CRC32 */
-
- rskb->dev = dev;
- rskb->protocol = eth_type_trans(rskb, dev);
-
- netif_rx(rskb);
- } else {
+ if (!skb) {
/* Can't get a new one : reuse the same & drop pkt */
- dev_notice(&dev->dev, "Memory squeeze, dropping packet.\n");
+ dev_notice(&dev->dev, "Low memory - dropped packet.\n");
+ mpc52xx_fec_rx_submit(dev, rskb);
dev->stats.rx_dropped++;
-
- skb = rskb;
+ continue;
}
- bd = (struct bcom_fec_bd *)
- bcom_prepare_next_buffer(priv->rx_dmatsk);
+ /* Enqueue the new sk_buff back on the hardware */
+ mpc52xx_fec_rx_submit(dev, skb);
- bd->status = FEC_RX_BUFFER_SIZE;
- bd->skb_pa = dma_map_single(dev->dev.parent, skb->data,
- FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+ /* Process the received skb - Drop the spin lock while
+ * calling into the network stack */
+ spin_unlock_irqrestore(&priv->lock, flags);
- bcom_submit_next_buffer(priv->rx_dmatsk, skb);
+ dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
+ DMA_FROM_DEVICE);
+ length = status & BCOM_FEC_RX_BD_LEN_MASK;
+ skb_put(rskb, length - 4); /* length without CRC32 */
+ rskb->dev = dev;
+ rskb->protocol = eth_type_trans(rskb, dev);
+ netif_rx(rskb);
+
+ spin_lock_irqsave(&priv->lock, flags);
}
+ spin_unlock_irqrestore(&priv->lock, flags);
+
return IRQ_HANDLED;
}
@@ -454,6 +453,7 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
u32 ievent;
+ unsigned long flags;
ievent = in_be32(&fec->ievent);
@@ -471,9 +471,10 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
+ spin_lock_irqsave(&priv->lock, flags);
mpc52xx_fec_reset(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
- netif_wake_queue(dev);
return IRQ_HANDLED;
}
@@ -768,6 +769,8 @@ static void mpc52xx_fec_reset(struct net_device *dev)
bcom_enable(priv->tx_dmatsk);
mpc52xx_fec_start(dev);
+
+ netif_wake_queue(dev);
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 16def131c39..e0620d08464 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -357,8 +357,11 @@ static void gfar_init_mac(struct net_device *ndev)
/* Configure the coalescing support */
gfar_configure_coalescing(priv, 0xFF, 0xFF);
- if (priv->rx_filer_enable)
+ if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
+ /* Program the RIR0 reg with the required distribution */
+ gfar_write(&regs->rir0, DEFAULT_RIR0);
+ }
if (priv->rx_csum_enable)
rctrl |= RCTRL_CHECKSUMMING;
@@ -414,6 +417,36 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}
+static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct netdev_queue *txq;
+ unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
+ unsigned long tx_packets = 0, tx_bytes = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ rx_packets += priv->rx_queue[i]->stats.rx_packets;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+ }
+
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_dropped = rx_dropped;
+
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ txq = netdev_get_tx_queue(dev, i);
+ tx_bytes += txq->tx_bytes;
+ tx_packets += txq->tx_packets;
+ }
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+
+ return &dev->stats;
+}
+
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
@@ -423,6 +456,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_select_queue = gfar_select_queue,
+ .ndo_get_stats = gfar_get_stats,
.ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1022,6 +1056,9 @@ static int gfar_probe(struct of_device *ofdev,
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
+ /* enable filer if using multiple RX queues*/
+ if(priv->num_rx_queues > 1)
+ priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1937,7 +1974,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- dev->stats.tx_bytes += skb->len;
+ txq->tx_bytes += skb->len;
+ txq->tx_packets ++;
txbdp = txbdp_start = tx_queue->cur_tx;
@@ -2295,8 +2333,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
tx_queue->skb_dirtytx = skb_dirtytx;
tx_queue->dirty_tx = bdp;
- dev->stats.tx_packets += howmany;
-
return howmany;
}
@@ -2510,14 +2546,14 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
}
} else {
/* Increment the number of packets */
- dev->stats.rx_packets++;
+ rx_queue->stats.rx_packets++;
howmany++;
if (likely(skb)) {
pkt_len = bdp->length - ETH_FCS_LEN;
/* Remove the FCS from the packet length */
skb_put(skb, pkt_len);
- dev->stats.rx_bytes += pkt_len;
+ rx_queue->stats.rx_bytes += pkt_len;
gfar_process_frame(dev, skb, amount_pull);
@@ -2525,7 +2561,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (netif_msg_rx_err(priv))
printk(KERN_WARNING
"%s: Missing skb!\n", dev->name);
- dev->stats.rx_dropped++;
+ rx_queue->stats.rx_dropped++;
priv->extra_stats.rx_skbmissing++;
}
@@ -2644,6 +2680,7 @@ static void gfar_netpoll(struct net_device *dev)
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
&priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptTransmit);
+ }
}
}
#endif
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index cbb451011cb..3d72dc43dca 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -333,7 +333,7 @@ extern const char gfar_driver_version[];
#define IMASK_BSY 0x20000000
#define IMASK_EBERR 0x10000000
#define IMASK_MSRO 0x04000000
-#define IMASK_GRSC 0x02000000
+#define IMASK_GTSC 0x02000000
#define IMASK_BABT 0x01000000
#define IMASK_TXC 0x00800000
#define IMASK_TXEEN 0x00400000
@@ -344,7 +344,7 @@ extern const char gfar_driver_version[];
#define IMASK_XFUN 0x00010000
#define IMASK_RXB0 0x00008000
#define IMASK_MAG 0x00000800
-#define IMASK_GTSC 0x00000100
+#define IMASK_GRSC 0x00000100
#define IMASK_RXFEN0 0x00000080
#define IMASK_FIR 0x00000008
#define IMASK_FIQ 0x00000004
@@ -401,6 +401,10 @@ extern const char gfar_driver_version[];
#define FPR_FILER_MASK 0xFFFFFFFF
#define MAX_FILER_IDX 0xFF
+/* This default RIR value directly corresponds
+ * to the 3-bit hash value generated */
+#define DEFAULT_RIR0 0x05397700
+
/* RQFCR register bits */
#define RQFCR_GPI 0x80000000
#define RQFCR_HASHTBL_Q 0x00000000
@@ -936,6 +940,15 @@ struct gfar_priv_tx_q {
unsigned short txtime;
};
+/*
+ * Per RX queue stats
+ */
+struct rx_q_stats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long rx_dropped;
+};
+
/**
* struct gfar_priv_rx_q - per rx queue structure
* @rxlock: per queue rx spin lock
@@ -958,6 +971,7 @@ struct gfar_priv_rx_q {
struct rxbd8 *cur_rx;
struct net_device *dev;
struct gfar_priv_grp *grp;
+ struct rx_q_stats stats;
u16 skb_currx;
u16 qindex;
unsigned int rx_ring_size;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 16349ba6873..78963a0e128 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4608,8 +4608,14 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
- if (retval)
+ if (retval) {
+ /* if receive failed revoke VF CTS stats and restart init */
dev_err(&pdev->dev, "Error receiving message from VF\n");
+ vf_data->flags &= ~IGB_VF_FLAG_CTS;
+ if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+ return;
+ goto out;
+ }
/* this is a message we already processed, do nothing */
if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
@@ -4626,12 +4632,10 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
}
if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
- msgbuf[0] = E1000_VT_MSGTYPE_NACK;
- if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
- igb_write_mbx(hw, msgbuf, 1, vf);
- vf_data->last_nack = jiffies;
- }
- return;
+ if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
+ return;
+ retval = -1;
+ goto out;
}
switch ((msgbuf[0] & 0xFFFF)) {
@@ -4656,14 +4660,14 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
break;
}
+ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+out:
/* notify the VF of the results of what it sent us */
if (retval)
msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
else
msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
- msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
-
igb_write_mbx(hw, msgbuf, 1, vf);
}
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 3d1ee7a8478..a1774b29d22 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -276,6 +276,7 @@ struct igbvf_adapter {
unsigned long led_status;
unsigned int flags;
+ unsigned long last_reset;
};
struct igbvf_info {
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index a127620dc65..e9dd95f136a 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1469,6 +1469,8 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
memcpy(netdev->perm_addr, adapter->hw.mac.addr,
netdev->addr_len);
}
+
+ adapter->last_reset = jiffies;
}
int igbvf_up(struct igbvf_adapter *adapter)
@@ -1812,11 +1814,15 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
s32 ret_val = E1000_SUCCESS;
bool link_active;
+ /* If interface is down, stay link down */
+ if (test_bit(__IGBVF_DOWN, &adapter->state))
+ return false;
+
ret_val = hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
/* if check for link returns error we will need to reset */
- if (ret_val)
+ if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
schedule_work(&adapter->reset_task);
return link_active;
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index e2d5343f127..204177d78ce 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -510,6 +510,40 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_validate_link_ready - Function looks for phy link
+ * @hw: pointer to hardware structure
+ *
+ * Function indicates success when phy link is available. If phy is not ready
+ * within 5 seconds of MAC indicating link, the function returns error.
+ **/
+static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+ u32 timeout;
+ u16 an_reg;
+
+ if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+ return 0;
+
+ for (timeout = 0;
+ timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+ hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
+
+ if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
+ (an_reg & MDIO_STAT1_LSTATUS))
+ break;
+
+ msleep(100);
+ }
+
+ if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+ hw_dbg(hw, "Link was indicated but link is down\n");
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_check_mac_link_82598 - Get link/speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -589,6 +623,10 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
else
*speed = IXGBE_LINK_SPEED_1GB_FULL;
+ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
+ (ixgbe_validate_link_ready(hw) != 0))
+ *link_up = false;
+
/* if link is down, zero out the current_mode */
if (*link_up == false) {
hw->fc.current_mode = ixgbe_fc_none;
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 72106898a5c..538340527aa 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -342,6 +342,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_KX4:
case IXGBE_DEV_ID_82599_KX4_MEZZ:
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+ case IXGBE_DEV_ID_82599_KR:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 06a9d18bbdb..0bd49d3b9f6 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -990,6 +990,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
char *p = NULL;
ixgbe_update_stats(adapter);
+ dev_get_stats(netdev);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
switch (ixgbe_gstrings_stats[i].type) {
case NETDEV_STATS:
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 247ed2a2476..bd64387563f 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -96,6 +96,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
@@ -435,8 +437,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
tx_ring->total_packets += total_packets;
tx_ring->stats.packets += total_packets;
tx_ring->stats.bytes += total_bytes;
- netdev->stats.tx_bytes += total_bytes;
- netdev->stats.tx_packets += total_packets;
return (count < tx_ring->work_limit);
}
@@ -4511,6 +4511,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
+ u64 non_eop_descs = 0, restart_queue = 0;
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
u64 rsc_count = 0;
@@ -4528,10 +4529,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->restart_queue += adapter->tx_ring[i].restart_queue;
+ restart_queue += adapter->tx_ring[i].restart_queue;
+ adapter->restart_queue = restart_queue;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->non_eop_descs += adapter->tx_ring[i].non_eop_descs;
+ non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+ adapter->non_eop_descs = non_eop_descs;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) {
@@ -5003,7 +5006,18 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
+ __be16 protocol;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+ const struct vlan_ethhdr *vhdr =
+ (const struct vlan_ethhdr *)skb->data;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ } else {
+ protocol = skb->protocol;
+ }
+
+ switch (protocol) {
case cpu_to_be16(ETH_P_IP):
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5327,6 +5341,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
+ struct netdev_queue *txq;
unsigned int first;
unsigned int tx_flags = 0;
u8 hdr_len = 0;
@@ -5424,6 +5439,9 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_ring->atr_count = 0;
}
}
+ txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
+ txq->tx_bytes += skb->len;
+ txq->tx_packets++;
ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
hdr_len);
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
@@ -5438,19 +5456,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
}
/**
- * ixgbe_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
-{
- /* only return the current stats */
- return &netdev->stats;
-}
-
-/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -5580,7 +5585,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
.ndo_select_queue = ixgbe_select_queue,
- .ndo_get_stats = ixgbe_get_stats,
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_set_multicast_list = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 21b6633da57..84650c6ebe0 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -50,6 +50,7 @@
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
+#define IXGBE_DEV_ID_82599_KR 0x1517
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
@@ -840,6 +841,8 @@
#define IXGBE_MPVC 0x04318
#define IXGBE_SGMIIC 0x04314
+#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
+
/* Omer CORECTL */
#define IXGBE_CORECTL 0x014F00
/* BARCTRL */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index eae4ad749e9..b9fcc981983 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
/* it's OK to use per_cpu_ptr() because BHs are off */
pcpu_lstats = dev->ml_priv;
- lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
+ lb_stats = this_cpu_ptr(pcpu_lstats);
len = skb->len;
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index ad95d5f7b63..8c8515619b8 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -72,35 +72,6 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
mlx4_bitmap_free_range(bitmap, obj, 1);
}
-static unsigned long find_aligned_range(unsigned long *bitmap,
- u32 start, u32 nbits,
- int len, int align)
-{
- unsigned long end, i;
-
-again:
- start = ALIGN(start, align);
-
- while ((start < nbits) && test_bit(start, bitmap))
- start += align;
-
- if (start >= nbits)
- return -1;
-
- end = start+len;
- if (end > nbits)
- return -1;
-
- for (i = start + 1; i < end; i++) {
- if (test_bit(i, bitmap)) {
- start = i + 1;
- goto again;
- }
- }
-
- return start;
-}
-
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
u32 obj, i;
@@ -110,13 +81,13 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
spin_lock(&bitmap->lock);
- obj = find_aligned_range(bitmap->table, bitmap->last,
- bitmap->max, cnt, align);
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ bitmap->last, cnt, align - 1);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
- obj = find_aligned_range(bitmap->table, 0, bitmap->max,
- cnt, align);
+ obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+ 0, cnt, align - 1);
}
if (obj < bitmap->max) {
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 3c16602172f..04f42ae1eda 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -90,6 +90,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[ 9] = "Q_Key violation counter",
[10] = "VMM",
[12] = "DPDP",
+ [15] = "Big LSO headers",
[16] = "MW support",
[17] = "APM support",
[18] = "Atomic ops support",
@@ -235,7 +236,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
dev_cap->max_mpts = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
- dev_cap->reserved_eqs = 1 << (field & 0xf);
+ dev_cap->reserved_eqs = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index f36ae691cab..015fbe785c1 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -53,7 +53,7 @@ static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
if (out_param > 2) {
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
- return EINVAL;
+ return -EINVAL;
}
*type = out_param;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 796a493f95a..1405a170bb4 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1827,6 +1827,9 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EINVAL;
+
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
netif_addr_lock_bh(dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index d38921906bb..3fcb1c356e0 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.5.1-1.451"
+#define MYRI10GE_VERSION_STR "1.5.1-1.453"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -347,7 +347,7 @@ static int myri10ge_max_slices = 1;
module_param(myri10ge_max_slices, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
-static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
+static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
module_param(myri10ge_rss_hash, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 80a66746051..02f8d4b4db6 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -619,17 +619,20 @@ nx_set_product_offs(struct netxen_adapter *adapter)
uint32_t i;
__le32 entries;
+ int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
+ 1 : netxen_p3_has_mn(adapter);
+
ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
if (ptab_descr == NULL)
return -1;
entries = cpu_to_le32(ptab_descr->num_entries);
+nomn:
for (i = 0; i < entries; i++) {
__le32 flags, file_chiprev, offs;
u8 chiprev = adapter->ahw.revision_id;
- int mn_present = netxen_p3_has_mn(adapter);
uint32_t flagbit;
offs = cpu_to_le32(ptab_descr->findex) +
@@ -647,6 +650,11 @@ nx_set_product_offs(struct netxen_adapter *adapter)
}
}
+ if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ mn_present = 0;
+ goto nomn;
+ }
+
return -1;
}
@@ -1021,6 +1029,10 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
u32 capability, flashed_ver;
capability = 0;
+ /* NX2031 always had MN */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
netxen_rom_fast_read(adapter,
NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index e5d187fce51..6cae26a5bd6 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -57,7 +57,9 @@ static int use_msi = 1;
static int use_msi_x = 1;
-static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED;
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
static int __devinit netxen_nic_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
@@ -946,8 +948,9 @@ netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
}
+/* with rtnl_lock */
static int
-netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
{
int err;
@@ -988,14 +991,32 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
return 0;
}
+/* Usage: During resume and firmware recovery module.*/
+
+static inline int
+netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __netxen_nic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+/* with rtnl_lock */
static void
-netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
{
if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
return;
- clear_bit(__NX_DEV_UP, &adapter->state);
+ if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state))
+ return;
+ smp_mb();
spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
@@ -1014,6 +1035,17 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
spin_unlock(&adapter->tx_clean_lock);
}
+/* Usage: During suspend and firmware recovery module */
+
+static inline void
+netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __netxen_nic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
static int
netxen_nic_attach(struct netxen_adapter *adapter)
@@ -1122,14 +1154,14 @@ netxen_nic_reset_context(struct netxen_adapter *adapter)
netif_device_detach(netdev);
if (netif_running(netdev))
- netxen_nic_down(adapter, netdev);
+ __netxen_nic_down(adapter, netdev);
netxen_nic_detach(adapter);
if (netif_running(netdev)) {
err = netxen_nic_attach(adapter);
if (!err)
- err = netxen_nic_up(adapter, netdev);
+ err = __netxen_nic_up(adapter, netdev);
if (err)
goto done;
@@ -1499,7 +1531,7 @@ static int netxen_nic_open(struct net_device *netdev)
if (err)
return err;
- err = netxen_nic_up(adapter, netdev);
+ err = __netxen_nic_up(adapter, netdev);
if (err)
goto err_out;
@@ -1519,7 +1551,7 @@ static int netxen_nic_close(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- netxen_nic_down(adapter, netdev);
+ __netxen_nic_down(adapter, netdev);
return 0;
}
@@ -2025,7 +2057,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- if (netif_running(adapter->netdev))
+ if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
}
@@ -2210,8 +2242,7 @@ netxen_detach_work(struct work_struct *work)
netif_device_detach(netdev);
- if (netif_running(netdev))
- netxen_nic_down(adapter, netdev);
+ netxen_nic_down(adapter, netdev);
netxen_nic_detach(adapter);
@@ -2505,42 +2536,6 @@ static struct bin_attribute bin_attr_mem = {
.write = netxen_sysfs_write_mem,
};
-#ifdef CONFIG_MODULES
-static ssize_t
-netxen_store_auto_fw_reset(struct module_attribute *mattr,
- struct module *mod, const char *buf, size_t count)
-
-{
- unsigned long new;
-
- if (strict_strtoul(buf, 16, &new))
- return -EINVAL;
-
- if ((new == AUTO_FW_RESET_ENABLED) || (new == AUTO_FW_RESET_DISABLED)) {
- auto_fw_reset = new;
- return count;
- }
-
- return -EINVAL;
-}
-
-static ssize_t
-netxen_show_auto_fw_reset(struct module_attribute *mattr,
- struct module *mod, char *buf)
-
-{
- if (auto_fw_reset == AUTO_FW_RESET_ENABLED)
- return sprintf(buf, "enabled\n");
- else
- return sprintf(buf, "disabled\n");
-}
-
-static struct module_attribute mod_attr_fw_reset = {
- .attr = {.name = "auto_fw_reset", .mode = (S_IRUGO | S_IWUSR)},
- .show = netxen_show_auto_fw_reset,
- .store = netxen_store_auto_fw_reset,
-};
-#endif
static void
netxen_create_sysfs_entries(struct netxen_adapter *adapter)
@@ -2746,23 +2741,12 @@ static struct pci_driver netxen_driver = {
static int __init netxen_init_module(void)
{
-#ifdef CONFIG_MODULES
- struct module *mod = THIS_MODULE;
-#endif
-
printk(KERN_INFO "%s\n", netxen_nic_driver_string);
#ifdef CONFIG_INET
register_netdevice_notifier(&netxen_netdev_cb);
register_inetaddr_notifier(&netxen_inetaddr_cb);
#endif
-
-#ifdef CONFIG_MODULES
- if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr))
- printk(KERN_ERR "%s: Failed to create auto_fw_reset "
- "sysfs entry.", netxen_nic_driver_name);
-#endif
-
return pci_register_driver(&netxen_driver);
}
@@ -2770,12 +2754,6 @@ module_init(netxen_init_module);
static void __exit netxen_exit_module(void)
{
-#ifdef CONFIG_MODULES
- struct module *mod = THIS_MODULE;
-
- sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr);
-#endif
-
pci_unregister_driver(&netxen_driver);
#ifdef CONFIG_INET
diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig
new file mode 100644
index 00000000000..1e56bbf3f5c
--- /dev/null
+++ b/drivers/net/octeon/Kconfig
@@ -0,0 +1,10 @@
+config OCTEON_MGMT_ETHERNET
+ tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
+ depends on CPU_CAVIUM_OCTEON
+ select PHYLIB
+ select MDIO_OCTEON
+ default y
+ help
+ This option enables the ethernet driver for the management
+ port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
+ CN54XX, CN52XX, and CN6XXX chips.
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile
new file mode 100644
index 00000000000..906edecacfd
--- /dev/null
+++ b/drivers/net/octeon/Makefile
@@ -0,0 +1,2 @@
+
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
new file mode 100644
index 00000000000..050538bf155
--- /dev/null
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -0,0 +1,1176 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Cavium Networks
+ */
+
+#include <linux/capability.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <linux/spinlock.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-mixx-defs.h>
+#include <asm/octeon/cvmx-agl-defs.h>
+
+#define DRV_NAME "octeon_mgmt"
+#define DRV_VERSION "2.0"
+#define DRV_DESCRIPTION \
+ "Cavium Networks Octeon MII (management) port Network Driver"
+
+#define OCTEON_MGMT_NAPI_WEIGHT 16
+
+/*
+ * Ring sizes that are powers of two allow for more efficient modulo
+ * opertions.
+ */
+#define OCTEON_MGMT_RX_RING_SIZE 512
+#define OCTEON_MGMT_TX_RING_SIZE 128
+
+/* Allow 8 bytes for vlan and FCS. */
+#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+union mgmt_port_ring_entry {
+ u64 d64;
+ struct {
+ u64 reserved_62_63:2;
+ /* Length of the buffer/packet in bytes */
+ u64 len:14;
+ /* For TX, signals that the packet should be timestamped */
+ u64 tstamp:1;
+ /* The RX error code */
+ u64 code:7;
+#define RING_ENTRY_CODE_DONE 0xf
+#define RING_ENTRY_CODE_MORE 0x10
+ /* Physical address of the buffer */
+ u64 addr:40;
+ } s;
+};
+
+struct octeon_mgmt {
+ struct net_device *netdev;
+ int port;
+ int irq;
+ u64 *tx_ring;
+ dma_addr_t tx_ring_handle;
+ unsigned int tx_next;
+ unsigned int tx_next_clean;
+ unsigned int tx_current_fill;
+ /* The tx_list lock also protects the ring related variables */
+ struct sk_buff_head tx_list;
+
+ /* RX variables only touched in napi_poll. No locking necessary. */
+ u64 *rx_ring;
+ dma_addr_t rx_ring_handle;
+ unsigned int rx_next;
+ unsigned int rx_next_fill;
+ unsigned int rx_current_fill;
+ struct sk_buff_head rx_list;
+
+ spinlock_t lock;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ struct device *dev;
+ struct napi_struct napi;
+ struct tasklet_struct tx_clean_tasklet;
+ struct phy_device *phydev;
+};
+
+static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
+{
+ int port = p->port;
+ union cvmx_mixx_intena mix_intena;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
+ mix_intena.s.ithena = enable ? 1 : 0;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
+{
+ int port = p->port;
+ union cvmx_mixx_intena mix_intena;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
+ mix_intena.s.othena = enable ? 1 : 0;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_rx_irq(p, 1);
+}
+
+static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_rx_irq(p, 0);
+}
+
+static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_tx_irq(p, 1);
+}
+
+static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_tx_irq(p, 0);
+}
+
+static unsigned int ring_max_fill(unsigned int ring_size)
+{
+ return ring_size - 8;
+}
+
+static unsigned int ring_size_to_bytes(unsigned int ring_size)
+{
+ return ring_size * sizeof(union mgmt_port_ring_entry);
+}
+
+static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+
+ while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
+ unsigned int size;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+
+ /* CN56XX pass 1 needs 8 bytes of padding. */
+ size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
+
+ skb = netdev_alloc_skb(netdev, size);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ __skb_queue_tail(&p->rx_list, skb);
+
+ re.d64 = 0;
+ re.s.len = size;
+ re.s.addr = dma_map_single(p->dev, skb->data,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* Put it in the ring. */
+ p->rx_ring[p->rx_next_fill] = re.d64;
+ dma_sync_single_for_device(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ p->rx_next_fill =
+ (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
+ p->rx_current_fill++;
+ /* Ring the bell. */
+ cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
+ }
+}
+
+static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
+{
+ int port = p->port;
+ union cvmx_mixx_orcnt mix_orcnt;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+ int cleaned = 0;
+ unsigned long flags;
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ while (mix_orcnt.s.orcnt) {
+ dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ re.d64 = p->tx_ring[p->tx_next_clean];
+ p->tx_next_clean =
+ (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
+ skb = __skb_dequeue(&p->tx_list);
+
+ mix_orcnt.u64 = 0;
+ mix_orcnt.s.orcnt = 1;
+
+ /* Acknowledge to hardware that we have the buffer. */
+ cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
+ p->tx_current_fill--;
+
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_unmap_single(p->dev, re.s.addr, re.s.len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ cleaned++;
+
+ mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ }
+
+ if (cleaned && netif_queue_stopped(p->netdev))
+ netif_wake_queue(p->netdev);
+}
+
+static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
+{
+ struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
+ octeon_mgmt_clean_tx_buffers(p);
+ octeon_mgmt_enable_tx_irq(p);
+}
+
+static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ unsigned long flags;
+ u64 drop, bad;
+
+ /* These reads also clear the count registers. */
+ drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
+ bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
+
+ if (drop || bad) {
+ /* Do an atomic update. */
+ spin_lock_irqsave(&p->lock, flags);
+ netdev->stats.rx_errors += bad;
+ netdev->stats.rx_dropped += drop;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ unsigned long flags;
+
+ union cvmx_agl_gmx_txx_stat0 s0;
+ union cvmx_agl_gmx_txx_stat1 s1;
+
+ /* These reads also clear the count registers. */
+ s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
+ s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
+
+ if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
+ /* Do an atomic update. */
+ spin_lock_irqsave(&p->lock, flags);
+ netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
+ netdev->stats.collisions += s1.s.scol + s1.s.mcol;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+/*
+ * Dequeue a receive skb and its corresponding ring entry. The ring
+ * entry is returned, *pskb is updated to point to the skb.
+ */
+static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
+ struct sk_buff **pskb)
+{
+ union mgmt_port_ring_entry re;
+
+ dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ re.d64 = p->rx_ring[p->rx_next];
+ p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
+ p->rx_current_fill--;
+ *pskb = __skb_dequeue(&p->rx_list);
+
+ dma_unmap_single(p->dev, re.s.addr,
+ ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
+ DMA_FROM_DEVICE);
+
+ return re.d64;
+}
+
+
+static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
+{
+ int port = p->port;
+ struct net_device *netdev = p->netdev;
+ union cvmx_mixx_ircnt mix_ircnt;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+ struct sk_buff *skb2;
+ struct sk_buff *skb_new;
+ union mgmt_port_ring_entry re2;
+ int rc = 1;
+
+
+ re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
+ if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
+ /* A good packet, send it up. */
+ skb_put(skb, re.s.len);
+good:
+ skb->protocol = eth_type_trans(skb, netdev);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+ netdev->last_rx = jiffies;
+ netif_receive_skb(skb);
+ rc = 0;
+ } else if (re.s.code == RING_ENTRY_CODE_MORE) {
+ /*
+ * Packet split across skbs. This can happen if we
+ * increase the MTU. Buffers that are already in the
+ * rx ring can then end up being too small. As the rx
+ * ring is refilled, buffers sized for the new MTU
+ * will be used and we should go back to the normal
+ * non-split case.
+ */
+ skb_put(skb, re.s.len);
+ do {
+ re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
+ if (re2.s.code != RING_ENTRY_CODE_MORE
+ && re2.s.code != RING_ENTRY_CODE_DONE)
+ goto split_error;
+ skb_put(skb2, re2.s.len);
+ skb_new = skb_copy_expand(skb, 0, skb2->len,
+ GFP_ATOMIC);
+ if (!skb_new)
+ goto split_error;
+ if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
+ skb2->len))
+ goto split_error;
+ skb_put(skb_new, skb2->len);
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb2);
+ skb = skb_new;
+ } while (re2.s.code == RING_ENTRY_CODE_MORE);
+ goto good;
+ } else {
+ /* Some other error, discard it. */
+ dev_kfree_skb_any(skb);
+ /*
+ * Error statistics are accumulated in
+ * octeon_mgmt_update_rx_stats.
+ */
+ }
+ goto done;
+split_error:
+ /* Discard the whole mess. */
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb2);
+ while (re2.s.code == RING_ENTRY_CODE_MORE) {
+ re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
+ dev_kfree_skb_any(skb2);
+ }
+ netdev->stats.rx_errors++;
+
+done:
+ /* Tell the hardware we processed a packet. */
+ mix_ircnt.u64 = 0;
+ mix_ircnt.s.ircnt = 1;
+ cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
+ return rc;
+
+}
+
+static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
+{
+ int port = p->port;
+ unsigned int work_done = 0;
+ union cvmx_mixx_ircnt mix_ircnt;
+ int rc;
+
+
+ mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ while (work_done < budget && mix_ircnt.s.ircnt) {
+
+ rc = octeon_mgmt_receive_one(p);
+ if (!rc)
+ work_done++;
+
+ /* Check for more packets. */
+ mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ }
+
+ octeon_mgmt_rx_fill_ring(p->netdev);
+
+ return work_done;
+}
+
+static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
+ struct net_device *netdev = p->netdev;
+ unsigned int work_done = 0;
+
+ work_done = octeon_mgmt_receive_packets(p, budget);
+
+ if (work_done < budget) {
+ /* We stopped because no more packets were available. */
+ napi_complete(napi);
+ octeon_mgmt_enable_rx_irq(p);
+ }
+ octeon_mgmt_update_rx_stats(netdev);
+
+ return work_done;
+}
+
+/* Reset the hardware to clean state. */
+static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
+{
+ union cvmx_mixx_ctl mix_ctl;
+ union cvmx_mixx_bist mix_bist;
+ union cvmx_agl_gmx_bist agl_gmx_bist;
+
+ mix_ctl.u64 = 0;
+ cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
+ do {
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
+ } while (mix_ctl.s.busy);
+ mix_ctl.s.reset = 1;
+ cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
+ cvmx_read_csr(CVMX_MIXX_CTL(p->port));
+ cvmx_wait(64);
+
+ mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
+ if (mix_bist.u64)
+ dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
+ (unsigned long long)mix_bist.u64);
+
+ agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
+ if (agl_gmx_bist.u64)
+ dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
+ (unsigned long long)agl_gmx_bist.u64);
+}
+
+struct octeon_mgmt_cam_state {
+ u64 cam[6];
+ u64 cam_mask;
+ int cam_index;
+};
+
+static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
+ unsigned char *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
+ cs->cam_mask |= (1ULL << cs->cam_index);
+ cs->cam_index++;
+}
+
+static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ int i;
+ union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
+ union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
+ unsigned long flags;
+ unsigned int prev_packet_enable;
+ unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
+ unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
+ struct octeon_mgmt_cam_state cam_state;
+ struct dev_addr_list *list;
+ struct list_head *pos;
+ int available_cam_entries;
+
+ memset(&cam_state, 0, sizeof(cam_state));
+
+ if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) {
+ cam_mode = 0;
+ available_cam_entries = 8;
+ } else {
+ /*
+ * One CAM entry for the primary address, leaves seven
+ * for the secondary addresses.
+ */
+ available_cam_entries = 7 - netdev->dev_addrs.count;
+ }
+
+ if (netdev->flags & IFF_MULTICAST) {
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
+ || netdev->mc_count > available_cam_entries)
+ multicast_mode = 2; /* 1 - Accept all multicast. */
+ else
+ multicast_mode = 0; /* 0 - Use CAM. */
+ }
+
+ if (cam_mode == 1) {
+ /* Add primary address. */
+ octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
+ list_for_each(pos, &netdev->dev_addrs.list) {
+ struct netdev_hw_addr *hw_addr;
+ hw_addr = list_entry(pos, struct netdev_hw_addr, list);
+ octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
+ list = list->next;
+ }
+ }
+ if (multicast_mode == 0) {
+ i = netdev->mc_count;
+ list = netdev->mc_list;
+ while (i--) {
+ octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
+ list = list->next;
+ }
+ }
+
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ /* Disable packet I/O. */
+ agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prev_packet_enable = agl_gmx_prtx.s.en;
+ agl_gmx_prtx.s.en = 0;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+
+ adr_ctl.u64 = 0;
+ adr_ctl.s.cam_mode = cam_mode;
+ adr_ctl.s.mcst = multicast_mode;
+ adr_ctl.s.bcst = 1; /* Allow broadcast */
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
+
+ /* Restore packet I/O. */
+ agl_gmx_prtx.s.en = prev_packet_enable;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+
+ octeon_mgmt_set_rx_filtering(netdev);
+
+ return 0;
+}
+
+static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
+
+ /*
+ * Limit the MTU to make sure the ethernet packets are between
+ * 64 bytes and 16383 bytes.
+ */
+ if (size_without_fcs < 64 || size_without_fcs > 16383) {
+ dev_warn(p->dev, "MTU must be between %d and %d.\n",
+ 64 - OCTEON_MGMT_RX_HEADROOM,
+ 16383 - OCTEON_MGMT_RX_HEADROOM);
+ return -EINVAL;
+ }
+
+ netdev->mtu = new_mtu;
+
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
+ (size_without_fcs + 7) & 0xfff8);
+
+ return 0;
+}
+
+static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_mixx_isr mixx_isr;
+
+ mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(CVMX_MIXX_ISR(port),
+ cvmx_read_csr(CVMX_MIXX_ISR(port)));
+ cvmx_read_csr(CVMX_MIXX_ISR(port));
+
+ if (mixx_isr.s.irthresh) {
+ octeon_mgmt_disable_rx_irq(p);
+ napi_schedule(&p->napi);
+ }
+ if (mixx_isr.s.orthresh) {
+ octeon_mgmt_disable_tx_irq(p);
+ tasklet_schedule(&p->tx_clean_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int octeon_mgmt_ioctl(struct net_device *netdev,
+ struct ifreq *rq, int cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!p->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(p->phydev, if_mii(rq), cmd);
+}
+
+static void octeon_mgmt_adjust_link(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+ unsigned long flags;
+ int link_changed = 0;
+
+ spin_lock_irqsave(&p->lock, flags);
+ if (p->phydev->link) {
+ if (!p->last_link)
+ link_changed = 1;
+ if (p->last_duplex != p->phydev->duplex) {
+ p->last_duplex = p->phydev->duplex;
+ prtx_cfg.u64 =
+ cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.duplex = p->phydev->duplex;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
+ prtx_cfg.u64);
+ }
+ } else {
+ if (p->last_link)
+ link_changed = -1;
+ }
+ p->last_link = p->phydev->link;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (link_changed != 0) {
+ if (link_changed > 0) {
+ netif_carrier_on(netdev);
+ pr_info("%s: Link is up - %d/%s\n", netdev->name,
+ p->phydev->speed,
+ DUPLEX_FULL == p->phydev->duplex ?
+ "Full" : "Half");
+ } else {
+ netif_carrier_off(netdev);
+ pr_info("%s: Link is down\n", netdev->name);
+ }
+ }
+}
+
+static int octeon_mgmt_init_phy(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ char phy_id[20];
+
+ if (octeon_is_simulation()) {
+ /* No PHYs in the simulator. */
+ netif_carrier_on(netdev);
+ return 0;
+ }
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port);
+
+ p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(p->phydev)) {
+ p->phydev = NULL;
+ return -1;
+ }
+
+ phy_start_aneg(p->phydev);
+
+ return 0;
+}
+
+static int octeon_mgmt_open(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union cvmx_mixx_ctl mix_ctl;
+ union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
+ union cvmx_mixx_oring1 oring1;
+ union cvmx_mixx_iring1 iring1;
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+ union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ union cvmx_mixx_irhwm mix_irhwm;
+ union cvmx_mixx_orhwm mix_orhwm;
+ union cvmx_mixx_intena mix_intena;
+ struct sockaddr sa;
+
+ /* Allocate ring buffers. */
+ p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ GFP_KERNEL);
+ if (!p->tx_ring)
+ return -ENOMEM;
+ p->tx_ring_handle =
+ dma_map_single(p->dev, p->tx_ring,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ p->tx_next = 0;
+ p->tx_next_clean = 0;
+ p->tx_current_fill = 0;
+
+
+ p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ GFP_KERNEL);
+ if (!p->rx_ring)
+ goto err_nomem;
+ p->rx_ring_handle =
+ dma_map_single(p->dev, p->rx_ring,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ p->rx_next = 0;
+ p->rx_next_fill = 0;
+ p->rx_current_fill = 0;
+
+ octeon_mgmt_reset_hw(p);
+
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+
+ /* Bring it out of reset if needed. */
+ if (mix_ctl.s.reset) {
+ mix_ctl.s.reset = 0;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+ do {
+ mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ } while (mix_ctl.s.reset);
+ }
+
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+
+ oring1.u64 = 0;
+ oring1.s.obase = p->tx_ring_handle >> 3;
+ oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
+ cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
+
+ iring1.u64 = 0;
+ iring1.s.ibase = p->rx_ring_handle >> 3;
+ iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
+ cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
+
+ /* Disable packet I/O. */
+ prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.en = 0;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
+
+ memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
+ octeon_mgmt_set_mac_address(netdev, &sa);
+
+ octeon_mgmt_change_mtu(netdev, netdev->mtu);
+
+ /*
+ * Enable the port HW. Packets are not allowed until
+ * cvmx_mgmt_port_enable() is called.
+ */
+ mix_ctl.u64 = 0;
+ mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
+ mix_ctl.s.en = 1; /* Enable the port */
+ mix_ctl.s.nbtarb = 0; /* Arbitration mode */
+ /* MII CB-request FIFO programmable high watermark */
+ mix_ctl.s.mrq_hwm = 1;
+ cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ /*
+ * Force compensation values, as they are not
+ * determined properly by HW
+ */
+ union cvmx_agl_gmx_drv_ctl drv_ctl;
+
+ drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
+ if (port) {
+ drv_ctl.s.byp_en1 = 1;
+ drv_ctl.s.nctl1 = 6;
+ drv_ctl.s.pctl1 = 6;
+ } else {
+ drv_ctl.s.byp_en = 1;
+ drv_ctl.s.nctl = 6;
+ drv_ctl.s.pctl = 6;
+ }
+ cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ }
+
+ octeon_mgmt_rx_fill_ring(netdev);
+
+ /* Clear statistics. */
+ /* Clear on read. */
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
+
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
+ cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
+
+ if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
+ netdev)) {
+ dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
+ goto err_noirq;
+ }
+
+ /* Interrupt every single RX packet */
+ mix_irhwm.u64 = 0;
+ mix_irhwm.s.irhwm = 0;
+ cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
+
+ /* Interrupt when we have 5 or more packets to clean. */
+ mix_orhwm.u64 = 0;
+ mix_orhwm.s.orhwm = 5;
+ cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
+
+ /* Enable receive and transmit interrupts */
+ mix_intena.u64 = 0;
+ mix_intena.s.ithena = 1;
+ mix_intena.s.othena = 1;
+ cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+
+
+ /* Enable packet I/O. */
+
+ rxx_frm_ctl.u64 = 0;
+ rxx_frm_ctl.s.pre_align = 1;
+ /*
+ * When set, disables the length check for non-min sized pkts
+ * with padding in the client data.
+ */
+ rxx_frm_ctl.s.pad_len = 1;
+ /* When set, disables the length check for VLAN pkts */
+ rxx_frm_ctl.s.vlan_len = 1;
+ /* When set, PREAMBLE checking is less strict */
+ rxx_frm_ctl.s.pre_free = 1;
+ /* Control Pause Frames can match station SMAC */
+ rxx_frm_ctl.s.ctl_smac = 0;
+ /* Control Pause Frames can match globally assign Multicast address */
+ rxx_frm_ctl.s.ctl_mcst = 1;
+ /* Forward pause information to TX block */
+ rxx_frm_ctl.s.ctl_bck = 1;
+ /* Drop Control Pause Frames */
+ rxx_frm_ctl.s.ctl_drp = 1;
+ /* Strip off the preamble */
+ rxx_frm_ctl.s.pre_strp = 1;
+ /*
+ * This port is configured to send PREAMBLE+SFD to begin every
+ * frame. GMX checks that the PREAMBLE is sent correctly.
+ */
+ rxx_frm_ctl.s.pre_chk = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
+
+ /* Enable the AGL block */
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+
+ /* Configure the port duplex and enables */
+ prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.s.tx_en = 1;
+ prtx_cfg.s.rx_en = 1;
+ prtx_cfg.s.en = 1;
+ p->last_duplex = 1;
+ prtx_cfg.s.duplex = p->last_duplex;
+ cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
+
+ p->last_link = 0;
+ netif_carrier_off(netdev);
+
+ if (octeon_mgmt_init_phy(netdev)) {
+ dev_err(p->dev, "Cannot initialize PHY.\n");
+ goto err_noirq;
+ }
+
+ netif_wake_queue(netdev);
+ napi_enable(&p->napi);
+
+ return 0;
+err_noirq:
+ octeon_mgmt_reset_hw(p);
+ dma_unmap_single(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->rx_ring);
+err_nomem:
+ dma_unmap_single(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->tx_ring);
+ return -ENOMEM;
+}
+
+static int octeon_mgmt_stop(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ napi_disable(&p->napi);
+ netif_stop_queue(netdev);
+
+ if (p->phydev)
+ phy_disconnect(p->phydev);
+
+ netif_carrier_off(netdev);
+
+ octeon_mgmt_reset_hw(p);
+
+
+ free_irq(p->irq, netdev);
+
+ /* dma_unmap is a nop on Octeon, so just free everything. */
+ skb_queue_purge(&p->tx_list);
+ skb_queue_purge(&p->rx_list);
+
+ dma_unmap_single(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->rx_ring);
+
+ dma_unmap_single(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->tx_ring);
+
+
+ return 0;
+}
+
+static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int port = p->port;
+ union mgmt_port_ring_entry re;
+ unsigned long flags;
+
+ re.d64 = 0;
+ re.s.len = skb->len;
+ re.s.addr = dma_map_single(p->dev, skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ if (unlikely(p->tx_current_fill >=
+ ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_unmap_single(p->dev, re.s.addr, re.s.len,
+ DMA_TO_DEVICE);
+
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ __skb_queue_tail(&p->tx_list, skb);
+
+ /* Put it in the ring. */
+ p->tx_ring[p->tx_next] = re.d64;
+ p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
+ p->tx_current_fill++;
+
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_sync_single_for_device(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ /* Ring the bell. */
+ cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
+
+ netdev->trans_start = jiffies;
+ octeon_mgmt_clean_tx_buffers(p);
+ octeon_mgmt_update_tx_stats(netdev);
+ return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void octeon_mgmt_poll_controller(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ octeon_mgmt_receive_packets(p, 16);
+ octeon_mgmt_update_rx_stats(netdev);
+ return;
+}
+#endif
+
+static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strncpy(info->version, DRV_VERSION, sizeof(info->version));
+ strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ info->n_stats = 0;
+ info->testinfo_len = 0;
+ info->regdump_len = 0;
+ info->eedump_len = 0;
+}
+
+static int octeon_mgmt_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (p->phydev)
+ return phy_ethtool_gset(p->phydev, cmd);
+
+ return -EINVAL;
+}
+
+static int octeon_mgmt_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (p->phydev)
+ return phy_ethtool_sset(p->phydev, cmd);
+
+ return -EINVAL;
+}
+
+static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
+ .get_drvinfo = octeon_mgmt_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_settings = octeon_mgmt_get_settings,
+ .set_settings = octeon_mgmt_set_settings
+};
+
+static const struct net_device_ops octeon_mgmt_ops = {
+ .ndo_open = octeon_mgmt_open,
+ .ndo_stop = octeon_mgmt_stop,
+ .ndo_start_xmit = octeon_mgmt_xmit,
+ .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
+ .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering,
+ .ndo_set_mac_address = octeon_mgmt_set_mac_address,
+ .ndo_do_ioctl = octeon_mgmt_ioctl,
+ .ndo_change_mtu = octeon_mgmt_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = octeon_mgmt_poll_controller,
+#endif
+};
+
+static int __init octeon_mgmt_probe(struct platform_device *pdev)
+{
+ struct resource *res_irq;
+ struct net_device *netdev;
+ struct octeon_mgmt *p;
+ int i;
+
+ netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
+ if (netdev == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, netdev);
+ p = netdev_priv(netdev);
+ netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
+
+ p->netdev = netdev;
+ p->dev = &pdev->dev;
+
+ p->port = pdev->id;
+ snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
+
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ goto err;
+
+ p->irq = res_irq->start;
+ spin_lock_init(&p->lock);
+
+ skb_queue_head_init(&p->tx_list);
+ skb_queue_head_init(&p->rx_list);
+ tasklet_init(&p->tx_clean_tasklet,
+ octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
+
+ netdev->netdev_ops = &octeon_mgmt_ops;
+ netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
+
+
+ /* The mgmt ports get the first N MACs. */
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
+ netdev->dev_addr[5] += p->port;
+
+ if (p->port >= octeon_bootinfo->mac_addr_count)
+ dev_err(&pdev->dev,
+ "Error %s: Using MAC outside of the assigned range: "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
+ netdev->dev_addr[0], netdev->dev_addr[1],
+ netdev->dev_addr[2], netdev->dev_addr[3],
+ netdev->dev_addr[4], netdev->dev_addr[5]);
+
+ if (register_netdev(netdev))
+ goto err;
+
+ dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
+ return 0;
+err:
+ free_netdev(netdev);
+ return -ENOENT;
+}
+
+static int __exit octeon_mgmt_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+ return 0;
+}
+
+static struct platform_driver octeon_mgmt_driver = {
+ .driver = {
+ .name = "octeon_mgmt",
+ .owner = THIS_MODULE,
+ },
+ .probe = octeon_mgmt_probe,
+ .remove = __exit_p(octeon_mgmt_remove),
+};
+
+extern void octeon_mdiobus_force_mod_depencency(void);
+
+static int __init octeon_mgmt_mod_init(void)
+{
+ /* Force our mdiobus driver module to be loaded first. */
+ octeon_mdiobus_force_mod_depencency();
+ return platform_driver_register(&octeon_mgmt_driver);
+}
+
+static void __exit octeon_mgmt_mod_exit(void)
+{
+ platform_driver_unregister(&octeon_mgmt_driver);
+}
+
+module_init(octeon_mgmt_mod_init);
+module_exit(octeon_mgmt_mod_exit);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 17a27225cc9..98938ea9e0b 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -912,7 +912,11 @@ static void media_check(unsigned long arg)
if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
if (!lp->fast_poll)
printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
+
+ local_irq_save(flags);
el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
lp->fast_poll = HZ;
}
if (lp->fast_poll) {
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 6f8d7e2e592..322e11df009 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -711,7 +711,11 @@ static void media_check(unsigned long arg)
(inb(ioaddr + EL3_TIMER) == 0xff)) {
if (!lp->fast_poll)
printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name);
+
+ local_irq_save(flags);
el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
lp->fast_poll = HZ;
}
if (lp->fast_poll) {
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 81bafd57847..d431b59e7d1 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -270,7 +270,7 @@ static int try_io_port(struct pcmcia_device *link)
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ IRQ_TYPE_DYNAMIC_SHARING;
}
} else {
/* This should be two 16-port windows */
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 8ad8384fc1c..813aca3fc43 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -426,7 +426,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
if (link->io.NumPorts2 != 0) {
link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ IRQ_TYPE_DYNAMIC_SHARING;
ret = mfc_try_io_port(link);
if (ret != 0) goto failed;
} else if (cardtype == UNGERMANN) {
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2d26b6ca28b..92ed3fbf89a 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -490,7 +490,7 @@ static int try_io_port(struct pcmcia_device *link)
/* for master/slave multifunction cards */
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ IRQ_TYPE_DYNAMIC_SHARING;
}
} else {
/* This should be two 16-port windows */
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index cc4853bc025..6dd486d2977 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -454,7 +454,7 @@ static int mhz_mfc_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ IRQ_TYPE_DYNAMIC_SHARING;
link->io.IOAddrLines = 16;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index a2eda28f903..466fc72698c 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -841,7 +841,7 @@ xirc2ps_config(struct pcmcia_device * link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status |= CCSR_AUDIO_ENA;
}
- link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED ;
+ link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts2 = 8;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
if (local->dingo) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d5d8e1c5bc9..fc5938ba3d7 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -115,4 +115,15 @@ config MDIO_GPIO
To compile this driver as a module, choose M here: the module
will be called mdio-gpio.
+config MDIO_OCTEON
+ tristate "Support for MDIO buses on Octeon SOCs"
+ depends on CPU_CAVIUM_OCTEON
+ default y
+ help
+
+ This module provides a driver for the Octeon MDIO busses.
+ It is required by the Octeon Ethernet device drivers.
+
+ If in doubt, say Y.
+
endif # PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index edfaac48cbd..1342585af38 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
+obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f63c96a4ecb..c13cf64095b 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -326,7 +326,8 @@ error:
static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
{
- u32 val, orig;
+ u32 orig;
+ int val;
bool clk125en = true;
/* Abort if we are using an untested phy. */
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
new file mode 100644
index 00000000000..61a4461cbda
--- /dev/null
+++ b/drivers/net/phy/mdio-octeon.c
@@ -0,0 +1,180 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009 Cavium Networks
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-smix-defs.h>
+
+#define DRV_VERSION "1.0"
+#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+
+struct octeon_mdiobus {
+ struct mii_bus *mii_bus;
+ int unit;
+ int phy_irq[PHY_MAX_ADDR];
+};
+
+static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct octeon_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_rd_dat smi_rd;
+ int timeout = 1000;
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
+
+ do {
+ /*
+ * Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ cvmx_wait(1000);
+ smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit));
+ } while (smi_rd.s.pending && --timeout);
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -EIO;
+}
+
+static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
+ int regnum, u16 val)
+{
+ struct octeon_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+ int timeout = 1000;
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
+
+ do {
+ /*
+ * Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ cvmx_wait(1000);
+ smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit));
+ } while (smi_wr.s.pending && --timeout);
+
+ if (timeout <= 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int __init octeon_mdiobus_probe(struct platform_device *pdev)
+{
+ struct octeon_mdiobus *bus;
+ int i;
+ int err = -ENOENT;
+
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ /* The platform_device id is our unit number. */
+ bus->unit = pdev->id;
+
+ bus->mii_bus = mdiobus_alloc();
+
+ if (!bus->mii_bus)
+ goto err;
+
+ /*
+ * Standard Octeon evaluation boards don't support phy
+ * interrupts, we need to poll.
+ */
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bus->phy_irq[i] = PHY_POLL;
+
+ bus->mii_bus->priv = bus;
+ bus->mii_bus->irq = bus->phy_irq;
+ bus->mii_bus->name = "mdio-octeon";
+ snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit);
+ bus->mii_bus->parent = &pdev->dev;
+
+ bus->mii_bus->read = octeon_mdiobus_read;
+ bus->mii_bus->write = octeon_mdiobus_write;
+
+ dev_set_drvdata(&pdev->dev, bus);
+
+ err = mdiobus_register(bus->mii_bus);
+ if (err)
+ goto err_register;
+
+ dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
+
+ return 0;
+err_register:
+ mdiobus_free(bus->mii_bus);
+
+err:
+ devm_kfree(&pdev->dev, bus);
+ return err;
+}
+
+static int __exit octeon_mdiobus_remove(struct platform_device *pdev)
+{
+ struct octeon_mdiobus *bus;
+
+ bus = dev_get_drvdata(&pdev->dev);
+
+ mdiobus_unregister(bus->mii_bus);
+ mdiobus_free(bus->mii_bus);
+ return 0;
+}
+
+static struct platform_driver octeon_mdiobus_driver = {
+ .driver = {
+ .name = "mdio-octeon",
+ .owner = THIS_MODULE,
+ },
+ .probe = octeon_mdiobus_probe,
+ .remove = __exit_p(octeon_mdiobus_remove),
+};
+
+void octeon_mdiobus_force_mod_depencency(void)
+{
+ /* Let ethernet drivers force us to be loaded. */
+}
+EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
+
+static int __init octeon_mdiobus_mod_init(void)
+{
+ return platform_driver_register(&octeon_mdiobus_driver);
+}
+
+static void __exit octeon_mdiobus_mod_exit(void)
+{
+ platform_driver_unregister(&octeon_mdiobus_driver);
+}
+
+module_init(octeon_mdiobus_mod_init);
+module_exit(octeon_mdiobus_mod_exit);
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index acfc5a3aa49..60f96c468a2 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -4859,7 +4859,7 @@ out:
return 0;
}
-static struct dev_pm_ops rtl8169_pm_ops = {
+static const struct dev_pm_ops rtl8169_pm_ops = {
.suspend = rtl8169_suspend,
.resume = rtl8169_resume,
.freeze = rtl8169_suspend,
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 14949bb303a..af393357979 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -47,7 +47,7 @@ static const unsigned char payload_source[ETH_ALEN] = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
-static const char *payload_msg =
+static const char payload_msg[] =
"Hello world! This is an Efx loopback test in progress!";
/**
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index c88bc101304..ca6285016df 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -84,6 +84,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.mpr = 1,
.tpauser = 1,
.hw_swap = 1,
+ .rpadir = 1,
+ .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
@@ -175,7 +177,6 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tpauser = 1,
.bculr = 1,
.hw_swap = 1,
- .rpadir = 1,
.no_trimd = 1,
.no_ade = 1,
};
@@ -501,6 +502,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
*/
mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
(((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
+ if (mdp->cd->rpadir)
+ mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
@@ -715,6 +718,8 @@ static int sh_eth_rx(struct net_device *ndev)
pkt_len + 2);
skb = mdp->rx_skbuff[entry];
mdp->rx_skbuff[entry] = NULL;
+ if (mdp->cd->rpadir)
+ skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 044e6817986..1c01b96c961 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -644,7 +644,6 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
{
u32 reg1;
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~phy_power[port];
@@ -652,7 +651,6 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
sky2_pci_read32(hw, PCI_DEV_REG1);
if (hw->chip_id == CHIP_ID_YUKON_FE)
@@ -709,11 +707,9 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
}
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
/* Force a renegotiation */
@@ -2643,7 +2639,6 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
@@ -2651,14 +2646,12 @@ static void sky2_hw_intr(struct sky2_hw *hw)
sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_IS_PCI_EXP) {
/* PCI-Express uncorrectable Error occurred */
u32 err;
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful);
@@ -2666,7 +2659,6 @@ static void sky2_hw_intr(struct sky2_hw *hw)
dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_HWE_L1_MASK)
@@ -2968,8 +2960,13 @@ static int __devinit sky2_init(struct sky2_hw *hw)
break;
case CHIP_ID_YUKON_UL_2:
+ hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_ADV_POWER_CTL;
+ break;
+
case CHIP_ID_YUKON_OPT:
hw->flags = SKY2_HW_GIGABIT
+ | SKY2_HW_NEW_LE
| SKY2_HW_ADV_POWER_CTL;
break;
@@ -3040,7 +3037,6 @@ static void sky2_reset(struct sky2_hw *hw)
}
sky2_power_on(hw);
- sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
for (i = 0; i < hw->ports; i++) {
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -4521,7 +4517,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
"Optima", /* 0xbc */
};
- if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
+ if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OPT)
strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
else
snprintf(buf, sz, "(chip %#x)", chipid);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index ae4983a5127..ea4fae79d6e 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -534,9 +534,9 @@ static inline void smc_rcv(struct net_device *dev)
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else
-#define smc_special_trylock(lock, flags) (1)
-#define smc_special_lock(lock, flags) do { } while (0)
-#define smc_special_unlock(lock, flags) do { } while (0)
+#define smc_special_trylock(lock, flags) (flags == flags)
+#define smc_special_lock(lock, flags) do { flags = 0; } while (0)
+#define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
#endif
/*
@@ -2387,7 +2387,7 @@ static int smc_drv_resume(struct device *dev)
if (ndev) {
struct smc_local *lp = netdev_priv(ndev);
- smc_enable_device(dev);
+ smc_enable_device(pdev);
if (netif_running(ndev)) {
smc_reset(ndev);
smc_enable(ndev);
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 7815bfc300f..54799544bda 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -206,21 +206,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
}
}
-#elif defined(CONFIG_ARCH_OMAP)
-
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
#elif defined(CONFIG_SH_SH4202_MICRODEV)
#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 20d6095cf41..494cd91ea39 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2154,7 +2154,7 @@ static int smsc911x_resume(struct device *dev)
return (to == 0) ? -EIO : 0;
}
-static struct dev_pm_ops smsc911x_pm_ops = {
+static const struct dev_pm_ops smsc911x_pm_ops = {
.suspend = smsc911x_suspend,
.resume = smsc911x_resume,
};
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 3b80e8d2d62..f1d64ef67ef 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -716,7 +716,7 @@ static int kaweth_open(struct net_device *net)
return 0;
err_out:
- usb_autopm_enable(kaweth->intf);
+ usb_autopm_put_interface(kaweth->intf);
return -EIO;
}
@@ -753,7 +753,7 @@ static int kaweth_close(struct net_device *net)
kaweth->status &= ~KAWETH_STATUS_CLOSING;
- usb_autopm_enable(kaweth->intf);
+ usb_autopm_put_interface(kaweth->intf);
return 0;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index b091e20ca16..f14d225404d 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
dbg("%02X:", netdev->dev_addr[i]);
dbg("%02X\n", netdev->dev_addr[i]);
/* Set the IDR registers. */
- set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr);
+ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
#ifdef EEPROM_WRITE
{
u8 cr;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 63099c58a6d..3a15de56df9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device *rcv = NULL;
struct veth_priv *priv, *rcv_priv;
struct veth_net_stats *stats, *rcv_stats;
- int length, cpu;
+ int length;
priv = netdev_priv(dev);
rcv = priv->peer;
rcv_priv = netdev_priv(rcv);
- cpu = smp_processor_id();
- stats = per_cpu_ptr(priv->stats, cpu);
- rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);
+ stats = this_cpu_ptr(priv->stats);
+ rcv_stats = this_cpu_ptr(rcv_priv->stats);
if (!(rcv->flags & IFF_UP))
goto tx_drop;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 1ceb9d0f8b9..9cc438282d7 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2689,7 +2689,7 @@ vmxnet3_resume(struct device *device)
return 0;
}
-static struct dev_pm_ops vmxnet3_pm_ops = {
+static const struct dev_pm_ops vmxnet3_pm_ops = {
.suspend = vmxnet3_suspend,
.resume = vmxnet3_resume,
};
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 47e84ef355c..3b48681f8a0 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -579,7 +579,7 @@ void i2400mu_disconnect(struct usb_interface *iface)
*
* As well, the device might refuse going to sleep for whichever
* reason. In this case we just fail. For system suspend/hibernate,
- * we *can't* fail. We look at usb_dev->auto_pm to see if the
+ * we *can't* fail. We check PM_EVENT_AUTO to see if the
* suspend call comes from the USB stack or from the system and act
* in consequence.
*
@@ -591,14 +591,11 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
int result = 0;
struct device *dev = &iface->dev;
struct i2400mu *i2400mu = usb_get_intfdata(iface);
-#ifdef CONFIG_PM
- struct usb_device *usb_dev = i2400mu->usb_dev;
-#endif
unsigned is_autosuspend = 0;
struct i2400m *i2400m = &i2400mu->i2400m;
#ifdef CONFIG_PM
- if (usb_dev->auto_pm > 0)
+ if (pm_msg.event & PM_EVENT_AUTO)
is_autosuspend = 1;
#endif
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 81ea52c4faf..5d1c8677f18 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -97,6 +97,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int ret;
u16 val;
+ u32 cksum, offset;
/*
* Read values from EEPROM and store them in the capability structure
@@ -111,7 +112,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
return 0;
-#ifdef notyet
/*
* Validate the checksum of the EEPROM date. There are some
* devices with invalid EEPROMs.
@@ -124,7 +124,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
return -EIO;
}
-#endif
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
ee_ant_gain);
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index d495890355d..60f547503d7 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -79,6 +79,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
/* IBM-specific AR5212 (all others) */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
+ /* Dell Vostro A860 (shahar@shahar-or.co.il) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0112), ATH_LED(3, 0) },
{ }
};
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 564c6cb1c2b..2a11cc57cee 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2078,7 +2078,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
&txq->axq_q, lastbf->list.prev);
txq->axq_depth--;
- txok = (ds->ds_txstat.ts_status == 0);
+ txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
txq->axq_tx_inprogress = false;
spin_unlock_bh(&txq->axq_lock);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 71e5c996bd0..4c41cfe44f2 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1784,7 +1784,10 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
- b43_controller_restart(dev, "DMA error");
+ b43err(dev->wl, "This device does not support DMA "
+ "on your system. Please use PIO instead.\n");
+ b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
+ "your kernel configuration.\n");
return;
}
if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 675b7df632f..27ca859e745 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,7 +63,7 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
/************************
* forward declarations *
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 58b132f9cf2..00da5e152d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1353,7 +1353,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
if (priv->stations[sta_id].tid[tid].agg.state ==
IWL_EMPTYING_HW_QUEUE_ADDBA) {
IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
+ ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
return 0;
}
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index b9b371bfa30..42611bea76a 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1365,7 +1365,7 @@ static void lbs_send_confirmsleep(struct lbs_private *priv)
priv->dnld_sent = DNLD_RES_RECEIVED;
/* If nothing to do, go back to sleep (?) */
- if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx])
+ if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx])
priv->psstate = PS_STATE_SLEEP;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -1439,7 +1439,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
}
/* Pending events or command responses? */
- if (__kfifo_len(priv->event_fifo) || priv->resp_len[priv->resp_idx]) {
+ if (kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) {
allowed = 0;
lbs_deb_host("pending events or command responses\n");
}
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6a8d2b291d8..05bb298dfae 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -10,7 +10,7 @@
#include "scan.h"
#include "assoc.h"
-
+#include <linux/kfifo.h>
/** sleep_params */
struct sleep_params {
@@ -120,7 +120,7 @@ struct lbs_private {
u32 resp_len[2];
/* Events sent from hardware to driver */
- struct kfifo *event_fifo;
+ struct kfifo event_fifo;
/** thread to service interrupts */
struct task_struct *main_thread;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index db38a5a719f..c2975c8e2f2 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -459,7 +459,7 @@ static int lbs_thread(void *data)
else if (!list_empty(&priv->cmdpendingq) &&
!(priv->wakeup_dev_required))
shouldsleep = 0; /* We have a command to send */
- else if (__kfifo_len(priv->event_fifo))
+ else if (kfifo_len(&priv->event_fifo))
shouldsleep = 0; /* We have an event to process */
else
shouldsleep = 1; /* No command */
@@ -511,10 +511,13 @@ static int lbs_thread(void *data)
/* Process hardware events, e.g. card removed, link lost */
spin_lock_irq(&priv->driver_lock);
- while (__kfifo_len(priv->event_fifo)) {
+ while (kfifo_len(&priv->event_fifo)) {
u32 event;
- __kfifo_get(priv->event_fifo, (unsigned char *) &event,
- sizeof(event));
+
+ if (kfifo_out(&priv->event_fifo,
+ (unsigned char *) &event, sizeof(event)) !=
+ sizeof(event))
+ break;
spin_unlock_irq(&priv->driver_lock);
lbs_process_event(priv, event);
spin_lock_irq(&priv->driver_lock);
@@ -883,10 +886,9 @@ static int lbs_init_adapter(struct lbs_private *priv)
priv->resp_len[0] = priv->resp_len[1] = 0;
/* Create the event FIFO */
- priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL);
- if (IS_ERR(priv->event_fifo)) {
+ ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
+ if (ret) {
lbs_pr_err("Out of memory allocating event FIFO buffer\n");
- ret = -ENOMEM;
goto out;
}
@@ -901,8 +903,7 @@ static void lbs_free_adapter(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_MAIN);
lbs_free_cmd_buffer(priv);
- if (priv->event_fifo)
- kfifo_free(priv->event_fifo);
+ kfifo_free(&priv->event_fifo);
del_timer(&priv->command_timer);
del_timer(&priv->auto_deepsleep_timer);
kfree(priv->networks);
@@ -1177,7 +1178,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event)
if (priv->psstate == PS_STATE_SLEEP)
priv->psstate = PS_STATE_AWAKE;
- __kfifo_put(priv->event_fifo, (unsigned char *) &event, sizeof(u32));
+ kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32));
wake_up_interruptible(&priv->waitq);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 0cb5ecc822a..59d49159cf2 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -84,7 +84,8 @@ struct rxd_ops {
int rxd_size;
void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
void (*rxd_refill)(void *rxd, dma_addr_t addr, int len);
- int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status);
+ int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status,
+ __le16 *qos);
};
struct mwl8k_device_info {
@@ -184,7 +185,7 @@ struct mwl8k_priv {
/* PHY parameters */
struct ieee80211_supported_band band;
struct ieee80211_channel channels[14];
- struct ieee80211_rate rates[13];
+ struct ieee80211_rate rates[14];
bool radio_on;
bool radio_short_preamble;
@@ -220,15 +221,6 @@ struct mwl8k_vif {
u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
- /*
- * Subset of supported legacy rates.
- * Intersection of AP and STA supported rates.
- */
- struct ieee80211_rate legacy_rates[13];
-
- /* number of supported legacy rates */
- u8 legacy_nrates;
-
/* Index into station database.Returned by update_sta_db call */
u8 peer_id;
@@ -266,6 +258,11 @@ static const struct ieee80211_rate mwl8k_rates[] = {
{ .bitrate = 360, .hw_value = 72, },
{ .bitrate = 480, .hw_value = 96, },
{ .bitrate = 540, .hw_value = 108, },
+ { .bitrate = 720, .hw_value = 144, },
+};
+
+static const u8 mwl8k_rateids[12] = {
+ 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108,
};
/* Set or get info from Firmware */
@@ -574,7 +571,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
"helper image\n", pci_name(priv->pdev));
return rc;
}
- msleep(1);
+ msleep(5);
rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
} else {
@@ -591,9 +588,8 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
else
iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
- msleep(1);
- loops = 200000;
+ loops = 500000;
do {
u32 ready_code;
@@ -633,9 +629,6 @@ struct ewc_ht_info {
/* Peer Entry flags - used to define the type of the peer node */
#define MWL8K_PEER_TYPE_ACCESSPOINT 2
-#define MWL8K_IEEE_LEGACY_DATA_RATES 13
-#define MWL8K_MCS_BITMAP_SIZE 16
-
struct peer_capability_info {
/* Peer type - AP vs. STA. */
__u8 peer_type;
@@ -652,10 +645,10 @@ struct peer_capability_info {
struct ewc_ht_info ewc_info;
/* Legacy rate table. Intersection of our rates and peer rates. */
- __u8 legacy_rates[MWL8K_IEEE_LEGACY_DATA_RATES];
+ __u8 legacy_rates[12];
/* HT rate table. Intersection of our rates and peer rates. */
- __u8 ht_rates[MWL8K_MCS_BITMAP_SIZE];
+ __u8 ht_rates[16];
__u8 pad[16];
/* If set, interoperability mode, no proprietary extensions. */
@@ -706,55 +699,64 @@ static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
struct mwl8k_dma_data {
__le16 fwlen;
struct ieee80211_hdr wh;
+ char data[0];
} __attribute__((packed));
/* Routines to add/remove DMA header from skb. */
-static inline void mwl8k_remove_dma_header(struct sk_buff *skb)
+static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
{
- struct mwl8k_dma_data *tr = (struct mwl8k_dma_data *)skb->data;
- void *dst, *src = &tr->wh;
- int hdrlen = ieee80211_hdrlen(tr->wh.frame_control);
- u16 space = sizeof(struct mwl8k_dma_data) - hdrlen;
+ struct mwl8k_dma_data *tr;
+ int hdrlen;
- dst = (void *)tr + space;
- if (dst != src) {
- memmove(dst, src, hdrlen);
- skb_pull(skb, space);
+ tr = (struct mwl8k_dma_data *)skb->data;
+ hdrlen = ieee80211_hdrlen(tr->wh.frame_control);
+
+ if (hdrlen != sizeof(tr->wh)) {
+ if (ieee80211_is_data_qos(tr->wh.frame_control)) {
+ memmove(tr->data - hdrlen, &tr->wh, hdrlen - 2);
+ *((__le16 *)(tr->data - 2)) = qos;
+ } else {
+ memmove(tr->data - hdrlen, &tr->wh, hdrlen);
+ }
}
+
+ if (hdrlen != sizeof(*tr))
+ skb_pull(skb, sizeof(*tr) - hdrlen);
}
static inline void mwl8k_add_dma_header(struct sk_buff *skb)
{
struct ieee80211_hdr *wh;
- u32 hdrlen, pktlen;
+ int hdrlen;
struct mwl8k_dma_data *tr;
+ /*
+ * Add a firmware DMA header; the firmware requires that we
+ * present a 2-byte payload length followed by a 4-address
+ * header (without QoS field), followed (optionally) by any
+ * WEP/ExtIV header (but only filled in for CCMP).
+ */
wh = (struct ieee80211_hdr *)skb->data;
+
hdrlen = ieee80211_hdrlen(wh->frame_control);
- pktlen = skb->len;
+ if (hdrlen != sizeof(*tr))
+ skb_push(skb, sizeof(*tr) - hdrlen);
- /*
- * Copy up/down the 802.11 header; the firmware requires
- * we present a 2-byte payload length followed by a
- * 4-address header (w/o QoS), followed (optionally) by
- * any WEP/ExtIV header (but only filled in for CCMP).
- */
- if (hdrlen != sizeof(struct mwl8k_dma_data))
- skb_push(skb, sizeof(struct mwl8k_dma_data) - hdrlen);
+ if (ieee80211_is_data_qos(wh->frame_control))
+ hdrlen -= 2;
tr = (struct mwl8k_dma_data *)skb->data;
if (wh != &tr->wh)
memmove(&tr->wh, wh, hdrlen);
-
- /* Clear addr4 */
- memset(tr->wh.addr4, 0, ETH_ALEN);
+ if (hdrlen != sizeof(tr->wh))
+ memset(((void *)&tr->wh) + hdrlen, 0, sizeof(tr->wh) - hdrlen);
/*
* Firmware length is the length of the fully formed "802.11
* payload". That is, everything except for the 802.11 header.
* This includes all crypto material including the MIC.
*/
- tr->fwlen = cpu_to_le16(pktlen - hdrlen);
+ tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr));
}
@@ -779,6 +781,10 @@ struct mwl8k_rxd_8366 {
__u8 rx_ctrl;
} __attribute__((packed));
+#define MWL8K_8366_RATE_INFO_MCS_FORMAT 0x80
+#define MWL8K_8366_RATE_INFO_40MHZ 0x40
+#define MWL8K_8366_RATE_INFO_RATEID(x) ((x) & 0x3f)
+
#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80
static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr)
@@ -800,7 +806,8 @@ static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status)
+mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
+ __le16 *qos)
{
struct mwl8k_rxd_8366 *rxd = _rxd;
@@ -813,9 +820,11 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status)
status->signal = -rxd->rssi;
status->noise = -rxd->noise_floor;
- if (rxd->rate & 0x80) {
+ if (rxd->rate & MWL8K_8366_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
- status->rate_idx = rxd->rate & 0x7f;
+ if (rxd->rate & MWL8K_8366_RATE_INFO_40MHZ)
+ status->flag |= RX_FLAG_40MHZ;
+ status->rate_idx = MWL8K_8366_RATE_INFO_RATEID(rxd->rate);
} else {
int i;
@@ -830,6 +839,8 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status)
status->band = IEEE80211_BAND_2GHZ;
status->freq = ieee80211_channel_to_frequency(rxd->channel);
+ *qos = rxd->qos_control;
+
return le16_to_cpu(rxd->pkt_len);
}
@@ -888,7 +899,8 @@ static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status)
+mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
+ __le16 *qos)
{
struct mwl8k_rxd_8687 *rxd = _rxd;
u16 rate_info;
@@ -903,7 +915,6 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status)
status->signal = -rxd->rssi;
status->noise = -rxd->noise_level;
- status->qual = rxd->link_quality;
status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info);
status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info);
@@ -919,6 +930,8 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status)
status->band = IEEE80211_BAND_2GHZ;
status->freq = ieee80211_channel_to_frequency(rxd->channel);
+ *qos = rxd->qos_control;
+
return le16_to_cpu(rxd->pkt_len);
}
@@ -1090,6 +1103,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
void *rxd;
int pkt_len;
struct ieee80211_rx_status status;
+ __le16 qos;
skb = rxq->buf[rxq->head].skb;
if (skb == NULL)
@@ -1097,7 +1111,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size);
- pkt_len = priv->rxd_ops->rxd_process(rxd, &status);
+ pkt_len = priv->rxd_ops->rxd_process(rxd, &status, &qos);
if (pkt_len < 0)
break;
@@ -1115,7 +1129,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
rxq->rxd_count--;
skb_put(skb, pkt_len);
- mwl8k_remove_dma_header(skb);
+ mwl8k_remove_dma_header(skb, qos);
/*
* Check for a pending join operation. Save a
@@ -1221,99 +1235,106 @@ static inline void mwl8k_tx_start(struct mwl8k_priv *priv)
ioread32(priv->regs + MWL8K_HIU_INT_CODE);
}
-struct mwl8k_txq_info {
- u32 fw_owned;
- u32 drv_owned;
- u32 unused;
- u32 len;
- u32 head;
- u32 tail;
-};
-
-static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
- struct mwl8k_txq_info *txinfo)
+static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
{
- int count, desc, status;
- struct mwl8k_tx_queue *txq;
- struct mwl8k_tx_desc *tx_desc;
- int ndescs = 0;
+ struct mwl8k_priv *priv = hw->priv;
+ int i;
- memset(txinfo, 0, MWL8K_TX_QUEUES * sizeof(struct mwl8k_txq_info));
+ for (i = 0; i < MWL8K_TX_QUEUES; i++) {
+ struct mwl8k_tx_queue *txq = priv->txq + i;
+ int fw_owned = 0;
+ int drv_owned = 0;
+ int unused = 0;
+ int desc;
- for (count = 0; count < MWL8K_TX_QUEUES; count++) {
- txq = priv->txq + count;
- txinfo[count].len = txq->stats.len;
- txinfo[count].head = txq->head;
- txinfo[count].tail = txq->tail;
for (desc = 0; desc < MWL8K_TX_DESCS; desc++) {
- tx_desc = txq->txd + desc;
- status = le32_to_cpu(tx_desc->status);
+ struct mwl8k_tx_desc *tx_desc = txq->txd + desc;
+ u32 status;
+ status = le32_to_cpu(tx_desc->status);
if (status & MWL8K_TXD_STATUS_FW_OWNED)
- txinfo[count].fw_owned++;
+ fw_owned++;
else
- txinfo[count].drv_owned++;
+ drv_owned++;
if (tx_desc->pkt_len == 0)
- txinfo[count].unused++;
+ unused++;
}
- }
- return ndescs;
+ printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d "
+ "fw_owned=%d drv_owned=%d unused=%d\n",
+ wiphy_name(hw->wiphy), i,
+ txq->stats.len, txq->head, txq->tail,
+ fw_owned, drv_owned, unused);
+ }
}
/*
* Must be called with priv->fw_mutex held and tx queues stopped.
*/
+#define MWL8K_TX_WAIT_TIMEOUT_MS 1000
+
static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
DECLARE_COMPLETION_ONSTACK(tx_wait);
- u32 count;
- unsigned long timeout;
+ int retry;
+ int rc;
might_sleep();
+ /*
+ * The TX queues are stopped at this point, so this test
+ * doesn't need to take ->tx_lock.
+ */
+ if (!priv->pending_tx_pkts)
+ return 0;
+
+ retry = 0;
+ rc = 0;
+
spin_lock_bh(&priv->tx_lock);
- count = priv->pending_tx_pkts;
- if (count)
- priv->tx_wait = &tx_wait;
- spin_unlock_bh(&priv->tx_lock);
+ priv->tx_wait = &tx_wait;
+ while (!rc) {
+ int oldcount;
+ unsigned long timeout;
- if (count) {
- struct mwl8k_txq_info txinfo[MWL8K_TX_QUEUES];
- int index;
- int newcount;
+ oldcount = priv->pending_tx_pkts;
+ spin_unlock_bh(&priv->tx_lock);
timeout = wait_for_completion_timeout(&tx_wait,
- msecs_to_jiffies(5000));
- if (timeout)
- return 0;
-
+ msecs_to_jiffies(MWL8K_TX_WAIT_TIMEOUT_MS));
spin_lock_bh(&priv->tx_lock);
- priv->tx_wait = NULL;
- newcount = priv->pending_tx_pkts;
- mwl8k_scan_tx_ring(priv, txinfo);
- spin_unlock_bh(&priv->tx_lock);
- printk(KERN_ERR "%s(%u) TIMEDOUT:5000ms Pend:%u-->%u\n",
- __func__, __LINE__, count, newcount);
+ if (timeout) {
+ WARN_ON(priv->pending_tx_pkts);
+ if (retry) {
+ printk(KERN_NOTICE "%s: tx rings drained\n",
+ wiphy_name(hw->wiphy));
+ }
+ break;
+ }
- for (index = 0; index < MWL8K_TX_QUEUES; index++)
- printk(KERN_ERR "TXQ:%u L:%u H:%u T:%u FW:%u "
- "DRV:%u U:%u\n",
- index,
- txinfo[index].len,
- txinfo[index].head,
- txinfo[index].tail,
- txinfo[index].fw_owned,
- txinfo[index].drv_owned,
- txinfo[index].unused);
+ if (priv->pending_tx_pkts < oldcount) {
+ printk(KERN_NOTICE "%s: timeout waiting for tx "
+ "rings to drain (%d -> %d pkts), retrying\n",
+ wiphy_name(hw->wiphy), oldcount,
+ priv->pending_tx_pkts);
+ retry = 1;
+ continue;
+ }
- return -ETIMEDOUT;
+ priv->tx_wait = NULL;
+
+ printk(KERN_ERR "%s: tx rings stuck for %d ms\n",
+ wiphy_name(hw->wiphy), MWL8K_TX_WAIT_TIMEOUT_MS);
+ mwl8k_dump_tx_rings(hw);
+
+ rc = -ETIMEDOUT;
}
+ spin_unlock_bh(&priv->tx_lock);
- return 0;
+ return rc;
}
#define MWL8K_TXD_SUCCESS(status) \
@@ -1361,7 +1382,7 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
BUG_ON(skb == NULL);
pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE);
- mwl8k_remove_dma_header(skb);
+ mwl8k_remove_dma_header(skb, tx_desc->qos_control);
/* Mark descriptor as unused */
tx_desc->pkt_phys_addr = 0;
@@ -1563,8 +1584,8 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw)
* Command processing.
*/
-/* Timeout firmware commands after 2000ms */
-#define MWL8K_CMD_TIMEOUT_MS 2000
+/* Timeout firmware commands after 10s */
+#define MWL8K_CMD_TIMEOUT_MS 10000
static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
{
@@ -1615,12 +1636,21 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
MWL8K_CMD_TIMEOUT_MS);
rc = -ETIMEDOUT;
} else {
+ int ms;
+
+ ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(timeout);
+
rc = cmd->result ? -EINVAL : 0;
if (rc)
printk(KERN_ERR "%s: Command %s error 0x%x\n",
wiphy_name(hw->wiphy),
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
le16_to_cpu(cmd->result));
+ else if (ms > 2000)
+ printk(KERN_NOTICE "%s: Command %s took %d ms\n",
+ wiphy_name(hw->wiphy),
+ mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
+ ms);
}
return rc;
@@ -2439,8 +2469,6 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
/*
* CMD_FINALIZE_JOIN.
*/
-
-/* FJ beacon buffer size is compiled into the firmware. */
#define MWL8K_FJ_BEACON_MAXLEN 128
struct mwl8k_cmd_finalize_join {
@@ -2450,17 +2478,13 @@ struct mwl8k_cmd_finalize_join {
} __attribute__((packed));
static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
- __u16 framelen, __u16 dtim)
+ int framelen, int dtim)
{
struct mwl8k_cmd_finalize_join *cmd;
struct ieee80211_mgmt *payload = frame;
- u16 hdrlen;
- u32 payload_len;
+ int payload_len;
int rc;
- if (frame == NULL)
- return -EINVAL;
-
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
@@ -2469,24 +2493,17 @@ static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
- hdrlen = ieee80211_hdrlen(payload->frame_control);
-
- payload_len = framelen > hdrlen ? framelen - hdrlen : 0;
-
- /* XXX TBD Might just have to abort and return an error */
- if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
- printk(KERN_ERR "%s(): WARNING: Incomplete beacon "
- "sent to firmware. Sz=%u MAX=%u\n", __func__,
- payload_len, MWL8K_FJ_BEACON_MAXLEN);
-
- if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
+ payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
+ if (payload_len < 0)
+ payload_len = 0;
+ else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
payload_len = MWL8K_FJ_BEACON_MAXLEN;
- if (payload && payload_len)
- memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
+ memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
+
return rc;
}
@@ -2515,9 +2532,7 @@ static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *info = &mv_vif->bss_info;
struct mwl8k_cmd_update_sta_db *cmd;
struct peer_capability_info *peer_info;
- struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
int rc;
- __u8 count, *rates;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
@@ -2536,13 +2551,11 @@ static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
/* Build peer_info block */
peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
+ memcpy(peer_info->legacy_rates, mwl8k_rateids,
+ sizeof(mwl8k_rateids));
peer_info->interop = 1;
peer_info->amsdu_enabled = 0;
- rates = peer_info->legacy_rates;
- for (count = 0; count < mv_vif->legacy_nrates; count++)
- rates[count] = bitrates[count].hw_value;
-
rc = mwl8k_post_cmd(hw, &cmd->header);
if (rc == 0)
mv_vif->peer_id = peer_info->station_id;
@@ -2565,8 +2578,6 @@ static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
/*
* CMD_SET_AID.
*/
-#define MWL8K_RATE_INDEX_MAX_ARRAY 14
-
#define MWL8K_FRAME_PROT_DISABLED 0x00
#define MWL8K_FRAME_PROT_11G 0x07
#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
@@ -2579,7 +2590,7 @@ struct mwl8k_cmd_update_set_aid {
/* AP's MAC address (BSSID) */
__u8 bssid[ETH_ALEN];
__le16 protection_mode;
- __u8 supp_rates[MWL8K_RATE_INDEX_MAX_ARRAY];
+ __u8 supp_rates[14];
} __attribute__((packed));
static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
@@ -2588,8 +2599,6 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
struct ieee80211_bss_conf *info = &mv_vif->bss_info;
struct mwl8k_cmd_update_set_aid *cmd;
- struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
- int count;
u16 prot_mode;
int rc;
@@ -2621,8 +2630,7 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
}
cmd->protection_mode = cpu_to_le16(prot_mode);
- for (count = 0; count < mv_vif->legacy_nrates; count++)
- cmd->supp_rates[count] = bitrates[count].hw_value;
+ memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2635,20 +2643,17 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
*/
struct mwl8k_cmd_update_rateset {
struct mwl8k_cmd_pkt header;
- __u8 legacy_rates[MWL8K_RATE_INDEX_MAX_ARRAY];
+ __u8 legacy_rates[14];
/* Bitmap for supported MCS codes. */
- __u8 mcs_set[MWL8K_IEEE_LEGACY_DATA_RATES];
- __u8 reserved[MWL8K_IEEE_LEGACY_DATA_RATES];
+ __u8 mcs_set[16];
+ __u8 reserved[16];
} __attribute__((packed));
static int mwl8k_update_rateset(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
struct mwl8k_cmd_update_rateset *cmd;
- struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
- int count;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2657,9 +2662,7 @@ static int mwl8k_update_rateset(struct ieee80211_hw *hw,
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
-
- for (count = 0; count < mv_vif->legacy_nrates; count++)
- cmd->legacy_rates[count] = bitrates[count].hw_value;
+ memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2932,11 +2935,6 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
/* Back pointer to parent config block */
mwl8k_vif->priv = priv;
- /* Setup initial PHY parameters */
- memcpy(mwl8k_vif->legacy_rates,
- priv->rates, sizeof(mwl8k_vif->legacy_rates));
- mwl8k_vif->legacy_nrates = ARRAY_SIZE(priv->rates);
-
/* Set Initial sequence number to zero */
mwl8k_vif->seqno = 0;
@@ -3014,9 +3012,6 @@ static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
int rc;
- if (changed & BSS_CHANGED_BSSID)
- memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN);
-
if ((changed & BSS_CHANGED_ASSOC) == 0)
return;
@@ -3030,6 +3025,8 @@ static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
memcpy(&mwl8k_vif->bss_info, info,
sizeof(struct ieee80211_bss_conf));
+ memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN);
+
/* Install rates */
rc = mwl8k_update_rateset(hw, vif);
if (rc)
@@ -3366,7 +3363,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot obtain PCI resources\n",
MWL8K_NAME);
- return rc;
+ goto err_disable_device;
}
pci_set_master(pdev);
@@ -3597,6 +3594,8 @@ err_iounmap:
err_free_reg:
pci_release_regions(pdev);
+
+err_disable_device:
pci_disable_device(pdev);
return rc;
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index 84200da900b..fb157eb889c 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -427,7 +427,7 @@ int hermesi_program_init(hermes_t *hw, u32 offset)
if (err)
return err;
- pr_debug(KERN_DEBUG PFX "Enabling volatile, EP 0x%08x\n", offset);
+ pr_debug(PFX "Enabling volatile, EP 0x%08x\n", offset);
err = hermes_doicmd_wait(hw,
HERMES_PROGRAM_ENABLE_VOLATILE,
offset & 0xFFFFu,
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index abb4907cf29..6af0f3f71f3 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -23,6 +23,7 @@
#define RTL8187_EEPROM_TXPWR_CHAN_1 0x16 /* 3 channels */
#define RTL8187_EEPROM_TXPWR_CHAN_6 0x1B /* 2 channels */
#define RTL8187_EEPROM_TXPWR_CHAN_4 0x3D /* 2 channels */
+#define RTL8187_EEPROM_SELECT_GPIO 0x3B
#define RTL8187_REQT_READ 0xC0
#define RTL8187_REQT_WRITE 0x40
@@ -31,6 +32,9 @@
#define RTL8187_MAX_RX 0x9C4
+#define RFKILL_MASK_8187_89_97 0x2
+#define RFKILL_MASK_8198 0x4
+
struct rtl8187_rx_info {
struct urb *urb;
struct ieee80211_hw *dev;
@@ -104,6 +108,7 @@ struct rtl8187_priv {
struct delayed_work work;
struct ieee80211_hw *dev;
#ifdef CONFIG_RTL8187_LEDS
+ struct rtl8187_led led_radio;
struct rtl8187_led led_tx;
struct rtl8187_led led_rx;
struct delayed_work led_on;
@@ -122,6 +127,7 @@ struct rtl8187_priv {
u8 noise;
u8 slot_time;
u8 aifsn[4];
+ u8 rfkill_mask;
struct {
__le64 buf;
struct sk_buff_head queue;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 76973b8c709..bc5726dd5fe 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1322,6 +1322,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
struct ieee80211_channel *channel;
const char *chip_name;
u16 txpwr, reg;
+ u16 product_id = le16_to_cpu(udev->descriptor.idProduct);
int err, i;
dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
@@ -1481,6 +1482,13 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
(*channel++).hw_value = txpwr & 0xFF;
(*channel++).hw_value = txpwr >> 8;
}
+ /* Handle the differing rfkill GPIO bit in different models */
+ priv->rfkill_mask = RFKILL_MASK_8187_89_97;
+ if (product_id == 0x8197 || product_id == 0x8198) {
+ eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_SELECT_GPIO, &reg);
+ if (reg & 0xFF00)
+ priv->rfkill_mask = RFKILL_MASK_8198;
+ }
/*
* XXX: Once this driver supports anything that requires
@@ -1509,9 +1517,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
mutex_init(&priv->conf_mutex);
skb_queue_head_init(&priv->b_tx_status.queue);
- printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s\n",
+ printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
- chip_name, priv->asic_rev, priv->rf->name);
+ chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask);
#ifdef CONFIG_RTL8187_LEDS
eeprom_93cx6_read(&eeprom, 0x3F, &reg);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index cf8a4a40fdf..ded44c045eb 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -105,19 +105,36 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
struct rtl8187_led *led = container_of(led_dev, struct rtl8187_led,
led_dev);
struct ieee80211_hw *hw = led->dev;
- struct rtl8187_priv *priv = hw->priv;
+ struct rtl8187_priv *priv;
+ static bool radio_on;
- if (brightness == LED_OFF) {
- ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
- /* The LED is off for 1/20 sec so that it just blinks. */
- ieee80211_queue_delayed_work(hw, &priv->led_on, HZ / 20);
- } else
- ieee80211_queue_delayed_work(hw, &priv->led_on, 0);
+ if (!hw)
+ return;
+ priv = hw->priv;
+ if (led->is_radio) {
+ if (brightness == LED_FULL) {
+ ieee80211_queue_delayed_work(hw, &priv->led_on, 0);
+ radio_on = true;
+ } else if (radio_on) {
+ radio_on = false;
+ cancel_delayed_work_sync(&priv->led_on);
+ ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
+ }
+ } else if (radio_on) {
+ if (brightness == LED_OFF) {
+ ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
+ /* The LED is off for 1/20 sec - it just blinks. */
+ ieee80211_queue_delayed_work(hw, &priv->led_on,
+ HZ / 20);
+ } else
+ ieee80211_queue_delayed_work(hw, &priv->led_on, 0);
+ }
}
static int rtl8187_register_led(struct ieee80211_hw *dev,
struct rtl8187_led *led, const char *name,
- const char *default_trigger, u8 ledpin)
+ const char *default_trigger, u8 ledpin,
+ bool is_radio)
{
int err;
struct rtl8187_priv *priv = dev->priv;
@@ -128,6 +145,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev,
return -EINVAL;
led->dev = dev;
led->ledpin = ledpin;
+ led->is_radio = is_radio;
strncpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
@@ -145,7 +163,11 @@ static int rtl8187_register_led(struct ieee80211_hw *dev,
static void rtl8187_unregister_led(struct rtl8187_led *led)
{
+ struct ieee80211_hw *hw = led->dev;
+ struct rtl8187_priv *priv = hw->priv;
+
led_classdev_unregister(&led->led_dev);
+ flush_delayed_work(&priv->led_off);
led->dev = NULL;
}
@@ -183,33 +205,37 @@ void rtl8187_leds_init(struct ieee80211_hw *dev, u16 custid)
INIT_DELAYED_WORK(&priv->led_off, led_turn_off);
snprintf(name, sizeof(name),
+ "rtl8187-%s::radio", wiphy_name(dev->wiphy));
+ err = rtl8187_register_led(dev, &priv->led_radio, name,
+ ieee80211_get_radio_led_name(dev), ledpin, true);
+ if (err)
+ return;
+
+ snprintf(name, sizeof(name),
"rtl8187-%s::tx", wiphy_name(dev->wiphy));
err = rtl8187_register_led(dev, &priv->led_tx, name,
- ieee80211_get_tx_led_name(dev), ledpin);
+ ieee80211_get_tx_led_name(dev), ledpin, false);
if (err)
- goto error;
+ goto err_tx;
+
snprintf(name, sizeof(name),
"rtl8187-%s::rx", wiphy_name(dev->wiphy));
err = rtl8187_register_led(dev, &priv->led_rx, name,
- ieee80211_get_rx_led_name(dev), ledpin);
- if (!err) {
- ieee80211_queue_delayed_work(dev, &priv->led_on, 0);
+ ieee80211_get_rx_led_name(dev), ledpin, false);
+ if (!err)
return;
- }
- /* registration of RX LED failed - unregister TX */
+
+ /* registration of RX LED failed - unregister */
rtl8187_unregister_led(&priv->led_tx);
-error:
- /* If registration of either failed, cancel delayed work */
- cancel_delayed_work_sync(&priv->led_off);
- cancel_delayed_work_sync(&priv->led_on);
+err_tx:
+ rtl8187_unregister_led(&priv->led_radio);
}
void rtl8187_leds_exit(struct ieee80211_hw *dev)
{
struct rtl8187_priv *priv = dev->priv;
- /* turn the LED off before exiting */
- ieee80211_queue_delayed_work(dev, &priv->led_off, 0);
+ rtl8187_unregister_led(&priv->led_radio);
rtl8187_unregister_led(&priv->led_rx);
rtl8187_unregister_led(&priv->led_tx);
cancel_delayed_work_sync(&priv->led_off);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.h b/drivers/net/wireless/rtl818x/rtl8187_leds.h
index a0332027aea..efe8041bdda 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.h
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.h
@@ -47,6 +47,8 @@ struct rtl8187_led {
u8 ledpin;
/* The unique name string for this LED device. */
char name[RTL8187_LED_MAX_NAME_LEN + 1];
+ /* If the LED is radio or tx/rx */
+ bool is_radio;
};
void rtl8187_leds_init(struct ieee80211_hw *dev, u16 code);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
index cad8037ab2a..03555e1e0ca 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
@@ -25,10 +25,10 @@ static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv)
u8 gpio;
gpio = rtl818x_ioread8(priv, &priv->map->GPIO0);
- rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~0x02);
+ rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~priv->rfkill_mask);
gpio = rtl818x_ioread8(priv, &priv->map->GPIO1);
- return gpio & 0x02;
+ return gpio & priv->rfkill_mask;
}
void rtl8187_rfkill_init(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index ff4be7bf5d3..2f50a256efa 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -629,10 +629,6 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
goto out_sleep;
}
- ret = wl1251_build_null_data(wl);
- if (ret < 0)
- goto out_sleep;
-
if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm enabled");
@@ -1110,6 +1106,21 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (changed & BSS_CHANGED_BSSID) {
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+
+ ret = wl1251_build_null_data(wl);
+ if (ret < 0)
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_IBSS) {
+ ret = wl1251_join(wl, wl->bss_type, wl->channel,
+ wl->beacon_int, wl->dtim_period);
+ if (ret < 0)
+ goto out_sleep;
+ }
+ }
+
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
@@ -1169,23 +1180,6 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BSSID) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
-
- ret = wl1251_build_null_data(wl);
- if (ret < 0)
- goto out;
-
- if (wl->bss_type != BSS_TYPE_IBSS) {
- ret = wl1251_join(wl, wl->bss_type, wl->channel,
- wl->beacon_int, wl->dtim_period);
- if (ret < 0)
- goto out_sleep;
- wl1251_warning("Set ctsprotect failed %d", ret);
- goto out_sleep;
- }
- }
-
if (changed & BSS_CHANGED_BEACON) {
beacon = ieee80211_beacon_get(hw, vif);
ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index baa051d5bfb..a869b45d3d3 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -42,6 +42,7 @@
#include <linux/mm.h>
#include <net/ip.h>
+#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/page.h>
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 298de0f95d7..d58ade170c4 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -65,47 +65,322 @@ static int of_platform_device_remove(struct device *dev)
return 0;
}
-static int of_platform_device_suspend(struct device *dev, pm_message_t state)
+static void of_platform_device_shutdown(struct device *dev)
{
struct of_device *of_dev = to_of_device(dev);
struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
- int error = 0;
- if (dev->driver && drv->suspend)
- error = drv->suspend(of_dev, state);
- return error;
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(of_dev);
}
-static int of_platform_device_resume(struct device * dev)
+#ifdef CONFIG_PM_SLEEP
+
+static int of_platform_legacy_suspend(struct device *dev, pm_message_t mesg)
{
struct of_device *of_dev = to_of_device(dev);
struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
- int error = 0;
+ int ret = 0;
- if (dev->driver && drv->resume)
- error = drv->resume(of_dev);
- return error;
+ if (dev->driver && drv->suspend)
+ ret = drv->suspend(of_dev, mesg);
+ return ret;
}
-static void of_platform_device_shutdown(struct device *dev)
+static int of_platform_legacy_resume(struct device *dev)
{
struct of_device *of_dev = to_of_device(dev);
struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+ int ret = 0;
- if (dev->driver && drv->shutdown)
- drv->shutdown(of_dev);
+ if (dev->driver && drv->resume)
+ ret = drv->resume(of_dev);
+ return ret;
+}
+
+static int of_platform_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+static void of_platform_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#ifdef CONFIG_SUSPEND
+
+static int of_platform_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = of_platform_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
}
+static int of_platform_pm_suspend_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend_noirq)
+ ret = drv->pm->suspend_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = of_platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_resume_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume_noirq)
+ ret = drv->pm->resume_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define of_platform_pm_suspend NULL
+#define of_platform_pm_resume NULL
+#define of_platform_pm_suspend_noirq NULL
+#define of_platform_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int of_platform_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = of_platform_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_freeze_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze_noirq)
+ ret = drv->pm->freeze_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = of_platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_thaw_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw_noirq)
+ ret = drv->pm->thaw_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = of_platform_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff_noirq)
+ ret = drv->pm->poweroff_noirq(dev);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = of_platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+static int of_platform_pm_restore_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore_noirq)
+ ret = drv->pm->restore_noirq(dev);
+ }
+
+ return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define of_platform_pm_freeze NULL
+#define of_platform_pm_thaw NULL
+#define of_platform_pm_poweroff NULL
+#define of_platform_pm_restore NULL
+#define of_platform_pm_freeze_noirq NULL
+#define of_platform_pm_thaw_noirq NULL
+#define of_platform_pm_poweroff_noirq NULL
+#define of_platform_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+static struct dev_pm_ops of_platform_dev_pm_ops = {
+ .prepare = of_platform_pm_prepare,
+ .complete = of_platform_pm_complete,
+ .suspend = of_platform_pm_suspend,
+ .resume = of_platform_pm_resume,
+ .freeze = of_platform_pm_freeze,
+ .thaw = of_platform_pm_thaw,
+ .poweroff = of_platform_pm_poweroff,
+ .restore = of_platform_pm_restore,
+ .suspend_noirq = of_platform_pm_suspend_noirq,
+ .resume_noirq = of_platform_pm_resume_noirq,
+ .freeze_noirq = of_platform_pm_freeze_noirq,
+ .thaw_noirq = of_platform_pm_thaw_noirq,
+ .poweroff_noirq = of_platform_pm_poweroff_noirq,
+ .restore_noirq = of_platform_pm_restore_noirq,
+};
+
+#define OF_PLATFORM_PM_OPS_PTR (&of_platform_dev_pm_ops)
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define OF_PLATFORM_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
int of_bus_type_init(struct bus_type *bus, const char *name)
{
bus->name = name;
bus->match = of_platform_bus_match;
bus->probe = of_platform_device_probe;
bus->remove = of_platform_device_remove;
- bus->suspend = of_platform_device_suspend;
- bus->resume = of_platform_device_resume;
bus->shutdown = of_platform_device_shutdown;
bus->dev_attrs = of_platform_device_attrs;
+ bus->pm = OF_PLATFORM_PM_OPS_PTR;
return bus_register(bus);
}
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index a7aae24f288..166b67ea622 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -47,7 +47,7 @@
*/
static struct ring_buffer *op_ring_buffer_read;
static struct ring_buffer *op_ring_buffer_write;
-DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
+DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
static void wq_sync_buffer(struct work_struct *work);
@@ -61,8 +61,7 @@ unsigned long oprofile_get_cpu_buffer_size(void)
void oprofile_cpu_buffer_inc_smpl_lost(void)
{
- struct oprofile_cpu_buffer *cpu_buf
- = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
cpu_buf->sample_lost_overflow++;
}
@@ -95,7 +94,7 @@ int alloc_cpu_buffers(void)
goto fail;
for_each_possible_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
b->last_task = NULL;
b->last_is_kernel = -1;
@@ -122,7 +121,7 @@ void start_cpu_work(void)
work_enabled = 1;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
/*
* Spread the work by 1 jiffy per cpu so they dont all
@@ -139,7 +138,7 @@ void end_cpu_work(void)
work_enabled = 0;
for_each_online_cpu(i) {
- struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
+ struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
cancel_delayed_work(&b->work);
}
@@ -330,7 +329,7 @@ static inline void
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
unsigned long backtrace = oprofile_backtrace_depth;
/*
@@ -375,7 +374,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
{
struct op_sample *sample;
int is_kernel = !user_mode(regs);
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
cpu_buf->sample_received++;
@@ -430,13 +429,13 @@ int oprofile_write_commit(struct op_entry *entry)
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
log_sample(cpu_buf, pc, 0, is_kernel, event);
}
void oprofile_add_trace(unsigned long pc)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
if (!cpu_buf->tracing)
return;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 272995d2029..68ea16ab645 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
struct delayed_work work;
};
-DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
+DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
/*
* Resets the cpu buffer to a sane state.
@@ -60,7 +60,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
*/
static inline void op_cpu_buffer_reset(int cpu)
{
- struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
+ struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
cpu_buf->last_is_kernel = -1;
cpu_buf->last_task = NULL;
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 61689e814d4..917d28ebeac 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
int i;
for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(cpu_buffer, i);
+ cpu_buf = &per_cpu(op_cpu_buffer, i);
cpu_buf->sample_received = 0;
cpu_buf->sample_lost_overflow = 0;
cpu_buf->backtrace_aborted = 0;
@@ -51,7 +51,7 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
return;
for_each_possible_cpu(i) {
- cpu_buf = &per_cpu(cpu_buffer, i);
+ cpu_buf = &per_cpu(op_cpu_buffer, i);
snprintf(buf, 10, "cpu%d", i);
cpudir = oprofilefs_mkdir(sb, dir, buf);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index d69bde6a234..c542c7bb745 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -354,7 +354,7 @@ static unsigned int dino_startup_irq(unsigned int irq)
}
static struct irq_chip dino_interrupt_type = {
- .typename = "GSC-PCI",
+ .name = "GSC-PCI",
.startup = dino_startup_irq,
.shutdown = dino_disable_irq,
.enable = dino_enable_irq,
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 51220749cb6..46f503fb7fc 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -189,7 +189,7 @@ static unsigned int eisa_startup_irq(unsigned int irq)
}
static struct irq_chip eisa_interrupt_type = {
- .typename = "EISA",
+ .name = "EISA",
.startup = eisa_startup_irq,
.shutdown = eisa_disable_irq,
.enable = eisa_enable_irq,
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index 647adc9f85a..c4e1f3c3c2f 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -149,7 +149,7 @@ static unsigned int gsc_asic_startup_irq(unsigned int irq)
}
static struct irq_chip gsc_asic_interrupt_type = {
- .typename = "GSC-ASIC",
+ .name = "GSC-ASIC",
.startup = gsc_asic_startup_irq,
.shutdown = gsc_asic_disable_irq,
.enable = gsc_asic_enable_irq,
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 88e33355321..c76836727ca 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -730,7 +730,7 @@ static int iosapic_set_affinity_irq(unsigned int irq,
#endif
static struct irq_chip iosapic_interrupt_type = {
- .typename = "IO-SAPIC-level",
+ .name = "IO-SAPIC-level",
.startup = iosapic_startup_irq,
.shutdown = iosapic_disable_irq,
.enable = iosapic_enable_irq,
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 79caf1ca4a2..188bc8496a2 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -38,6 +38,7 @@
#include <linux/kernel_stat.h>
#include <linux/reboot.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/blkdev.h>
#include <linux/workqueue.h>
@@ -147,41 +148,34 @@ device_initcall(start_task);
static void (*led_func_ptr) (unsigned char) __read_mostly;
#ifdef CONFIG_PROC_FS
-static int led_proc_read(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int led_proc_show(struct seq_file *m, void *v)
{
- char *out = page;
- int len;
-
- switch ((long)data)
+ switch ((long)m->private)
{
case LED_NOLCD:
- out += sprintf(out, "Heartbeat: %d\n", led_heartbeat);
- out += sprintf(out, "Disk IO: %d\n", led_diskio);
- out += sprintf(out, "LAN Rx/Tx: %d\n", led_lanrxtx);
+ seq_printf(m, "Heartbeat: %d\n", led_heartbeat);
+ seq_printf(m, "Disk IO: %d\n", led_diskio);
+ seq_printf(m, "LAN Rx/Tx: %d\n", led_lanrxtx);
break;
case LED_HASLCD:
- out += sprintf(out, "%s\n", lcd_text);
+ seq_printf(m, "%s\n", lcd_text);
break;
default:
- *eof = 1;
return 0;
}
+ return 0;
+}
- len = out - page - off;
- if (len < count) {
- *eof = 1;
- if (len <= 0) return 0;
- } else {
- len = count;
- }
- *start = page + off;
- return len;
+static int led_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, led_proc_show, PDE(inode)->data);
}
-static int led_proc_write(struct file *file, const char *buf,
- unsigned long count, void *data)
+
+static ssize_t led_proc_write(struct file *file, const char *buf,
+ size_t count, loff_t *pos)
{
+ void *data = PDE(file->f_path.dentry->d_inode)->data;
char *cur, lbuf[count + 1];
int d;
@@ -234,6 +228,15 @@ parse_error:
return -EINVAL;
}
+static const struct file_operations led_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = led_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = led_proc_write,
+};
+
static int __init led_create_procfs(void)
{
struct proc_dir_entry *proc_pdc_root = NULL;
@@ -243,19 +246,15 @@ static int __init led_create_procfs(void)
proc_pdc_root = proc_mkdir("pdc", 0);
if (!proc_pdc_root) return -1;
- ent = create_proc_entry("led", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root);
+ ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
+ &led_proc_fops, (void *)LED_NOLCD); /* LED */
if (!ent) return -1;
- ent->data = (void *)LED_NOLCD; /* LED */
- ent->read_proc = led_proc_read;
- ent->write_proc = led_proc_write;
if (led_type == LED_HASLCD)
{
- ent = create_proc_entry("lcd", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root);
+ ent = proc_create_data("lcd", S_IRUGO|S_IWUSR, proc_pdc_root,
+ &led_proc_fops, (void *)LED_HASLCD); /* LCD */
if (!ent) return -1;
- ent->data = (void *)LED_HASLCD; /* LCD */
- ent->read_proc = led_proc_read;
- ent->write_proc = led_proc_write;
}
return 0;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 13a64bc081b..0bc5d474b16 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -779,12 +779,9 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
read_unlock(&pathentry->rw_lock);
DPRINTK("%s: flags before: 0x%X\n", __func__, flags);
-
- temp = in;
-
- while (*temp && isspace(*temp))
- temp++;
-
+
+ temp = skip_spaces(in);
+
c = *temp++ - '0';
if ((c != 0) && (c != 1))
goto parse_error;
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 675f04e6597..a35c9c5b89e 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -326,7 +326,7 @@ static unsigned int superio_startup_irq(unsigned int irq)
}
static struct irq_chip superio_interrupt_type = {
- .typename = SUPERIO,
+ .name = SUPERIO,
.startup = superio_startup_irq,
.shutdown = superio_disable_irq,
.enable = superio_enable_irq,
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 2597145a066..ad113b0f62d 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3403,7 +3403,7 @@ static int __init parport_parse_param(const char *s, int *val,
*val = automatic;
else if (!strncmp(s, "none", 4))
*val = none;
- else if (nofifo && !strncmp(s, "nofifo", 4))
+ else if (nofifo && !strncmp(s, "nofifo", 6))
*val = nofifo;
else {
char *ep;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index fdc864f9cf2..b1ecefa2a23 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -27,10 +27,10 @@ config PCI_LEGACY
default y
help
Say Y here if you want to include support for the deprecated
- pci_find_slot() and pci_find_device() APIs. Most drivers have
- been converted over to using the proper hotplug APIs, so this
- option serves to include/exclude only a few drivers that are
- still using this API.
+ pci_find_device() API. Most drivers have been converted over
+ to using the proper hotplug APIs, so this option serves to
+ include/exclude only a few drivers that are still using this
+ API.
config PCI_DEBUG
bool "PCI Debugging"
@@ -69,3 +69,10 @@ config PCI_IOV
physical resources.
If unsure, say N.
+
+config PCI_IOAPIC
+ bool
+ depends on PCI
+ depends on ACPI
+ depends on HOTPLUG
+ default y
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 4a7f11d8f43..4df48d58eaa 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -14,6 +14,8 @@ CFLAGS_legacy.o += -Wno-deprecated-declarations
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
+obj-$(CONFIG_PCI_IOAPIC) += ioapic.o
+
obj-$(CONFIG_HOTPLUG) += hotplug.o
# Build the PCI Hotplug drivers if we were asked to
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 416f6ac65b7..83aae474759 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -320,7 +320,7 @@ found:
for (bus = dev->bus; bus; bus = bus->parent) {
struct pci_dev *bridge = bus->self;
- if (!bridge || !bridge->is_pcie ||
+ if (!bridge || !pci_is_pcie(bridge) ||
bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
return 0;
@@ -339,6 +339,35 @@ found:
}
#endif
+#ifdef CONFIG_ACPI_NUMA
+static int __init
+dmar_parse_one_rhsa(struct acpi_dmar_header *header)
+{
+ struct acpi_dmar_rhsa *rhsa;
+ struct dmar_drhd_unit *drhd;
+
+ rhsa = (struct acpi_dmar_rhsa *)header;
+ for_each_drhd_unit(drhd) {
+ if (drhd->reg_base_addr == rhsa->base_address) {
+ int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
+
+ if (!node_online(node))
+ node = -1;
+ drhd->iommu->node = node;
+ return 0;
+ }
+ }
+ WARN(1, "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ drhd->reg_base_addr,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+
+ return 0;
+}
+#endif
+
static void __init
dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
{
@@ -458,7 +487,9 @@ parse_dmar_table(void)
#endif
break;
case ACPI_DMAR_HARDWARE_AFFINITY:
- /* We don't do anything with RHSA (yet?) */
+#ifdef CONFIG_ACPI_NUMA
+ ret = dmar_parse_one_rhsa(entry_header);
+#endif
break;
default:
printk(KERN_WARNING PREFIX
@@ -582,6 +613,8 @@ int __init dmar_table_init(void)
return 0;
}
+static int bios_warned;
+
int __init check_zero_address(void)
{
struct acpi_table_dmar *dmar;
@@ -601,6 +634,9 @@ int __init check_zero_address(void)
}
if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
+ void __iomem *addr;
+ u64 cap, ecap;
+
drhd = (void *)entry_header;
if (!drhd->address) {
/* Promote an attitude of violence to a BIOS engineer today */
@@ -609,17 +645,40 @@ int __init check_zero_address(void)
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
-#ifdef CONFIG_DMAR
- dmar_disabled = 1;
-#endif
- return 0;
+ bios_warned = 1;
+ goto failed;
+ }
+
+ addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
+ if (!addr ) {
+ printk("IOMMU: can't validate: %llx\n", drhd->address);
+ goto failed;
+ }
+ cap = dmar_readq(addr + DMAR_CAP_REG);
+ ecap = dmar_readq(addr + DMAR_ECAP_REG);
+ early_iounmap(addr, VTD_PAGE_SIZE);
+ if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
+ /* Promote an attitude of violence to a BIOS engineer today */
+ WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ drhd->address,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ bios_warned = 1;
+ goto failed;
}
- break;
}
entry_header = ((void *)entry_header + entry_header->length);
}
return 1;
+
+failed:
+#ifdef CONFIG_DMAR
+ dmar_disabled = 1;
+#endif
+ return 0;
}
void __init detect_intel_iommu(void)
@@ -645,8 +704,11 @@ void __init detect_intel_iommu(void)
"x2apic and Intr-remapping.\n");
#endif
#ifdef CONFIG_DMAR
- if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
+ if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+ }
#endif
#ifdef CONFIG_X86
if (ret)
@@ -667,6 +729,18 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
int agaw = 0;
int msagaw = 0;
+ if (!drhd->reg_base_addr) {
+ if (!bios_warned) {
+ WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ bios_warned = 1;
+ }
+ return -EINVAL;
+ }
+
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return -ENOMEM;
@@ -683,13 +757,16 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
- /* Promote an attitude of violence to a BIOS engineer today */
- WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- drhd->reg_base_addr,
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
+ if (!bios_warned) {
+ /* Promote an attitude of violence to a BIOS engineer today */
+ WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ drhd->reg_base_addr,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ bios_warned = 1;
+ }
goto err_unmap;
}
@@ -712,6 +789,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->agaw = agaw;
iommu->msagaw = msagaw;
+ iommu->node = -1;
+
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
cap_max_fault_reg_offset(iommu->cap));
@@ -1053,6 +1132,7 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
int dmar_enable_qi(struct intel_iommu *iommu)
{
struct q_inval *qi;
+ struct page *desc_page;
if (!ecap_qis(iommu->ecap))
return -ENOENT;
@@ -1069,13 +1149,16 @@ int dmar_enable_qi(struct intel_iommu *iommu)
qi = iommu->qi;
- qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
- if (!qi->desc) {
+
+ desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
+ if (!desc_page) {
kfree(qi);
iommu->qi = 0;
return -ENOMEM;
}
+ qi->desc = page_address(desc_page);
+
qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
if (!qi->desc_status) {
free_page((unsigned long) qi->desc);
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 3625b094bf7..6cd9f3c9887 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -6,18 +6,22 @@ obj-$(CONFIG_HOTPLUG_PCI) += pci_hotplug.o
obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o
obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o
-# pciehp should be linked before acpiphp in order to allow the native driver
-# to attempt to bind first. We can then fall back to generic support.
+# native drivers should be linked before acpiphp in order to allow the
+# native driver to attempt to bind first. We can then fall back to
+# generic support.
obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o
-obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
-obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o
obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o
obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
+obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
+
+# acpiphp_ibm extends acpiphp, so should be linked afterwards.
+
+obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
# Link this last so it doesn't claim devices that have a real hotplug driver
obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 0f32571b94d..3c76fc67cf0 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -362,6 +362,8 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
status = acpi_pci_osc_control_set(handle, flags);
if (ACPI_SUCCESS(status))
goto got_one;
+ if (status == AE_SUPPORT)
+ goto no_control;
kfree(string.pointer);
string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
}
@@ -394,10 +396,9 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
if (ACPI_FAILURE(status))
break;
}
-
+no_control:
dbg("Cannot get control of hotplug hardware for pci %s\n",
pci_name(pdev));
-
kfree(string.pointer);
return -ENODEV;
got_one:
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 7d938df7920..bab52047baa 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -146,12 +146,6 @@ struct acpiphp_attention_info
struct module *owner;
};
-struct acpiphp_ioapic {
- struct pci_dev *dev;
- u32 gsi_base;
- struct list_head list;
-};
-
/* PCI bus bridge HID */
#define ACPI_PCI_HOST_HID "PNP0A03"
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index df1b0ea089d..8e952fdab76 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -52,8 +52,6 @@
#include "acpiphp.h"
static LIST_HEAD(bridge_list);
-static LIST_HEAD(ioapic_list);
-static DEFINE_SPINLOCK(ioapic_list_lock);
#define MY_NAME "acpiphp_glue"
@@ -311,17 +309,13 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge)
/* find acpiphp_func from acpiphp_bridge */
static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle)
{
- struct list_head *node, *l;
struct acpiphp_bridge *bridge;
struct acpiphp_slot *slot;
struct acpiphp_func *func;
- list_for_each(node, &bridge_list) {
- bridge = list_entry(node, struct acpiphp_bridge, list);
+ list_for_each_entry(bridge, &bridge_list, list) {
for (slot = bridge->slots; slot; slot = slot->next) {
- list_for_each(l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func,
- sibling);
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (func->handle == handle)
return func;
}
@@ -495,21 +489,19 @@ static int add_bridge(acpi_handle handle)
static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
{
- struct list_head *head;
- list_for_each(head, &bridge_list) {
- struct acpiphp_bridge *bridge = list_entry(head,
- struct acpiphp_bridge, list);
+ struct acpiphp_bridge *bridge;
+
+ list_for_each_entry(bridge, &bridge_list, list)
if (bridge->handle == handle)
return bridge;
- }
return NULL;
}
static void cleanup_bridge(struct acpiphp_bridge *bridge)
{
- struct list_head *list, *tmp;
- struct acpiphp_slot *slot;
+ struct acpiphp_slot *slot, *next;
+ struct acpiphp_func *func, *tmp;
acpi_status status;
acpi_handle handle = bridge->handle;
@@ -530,10 +522,8 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
slot = bridge->slots;
while (slot) {
- struct acpiphp_slot *next = slot->next;
- list_for_each_safe (list, tmp, &slot->funcs) {
- struct acpiphp_func *func;
- func = list_entry(list, struct acpiphp_func, sibling);
+ next = slot->next;
+ list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
if (is_dock_device(func->handle)) {
unregister_hotplug_dock_device(func->handle);
unregister_dock_notifier(&func->nb);
@@ -545,7 +535,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
if (ACPI_FAILURE(status))
err("failed to remove notify handler\n");
}
- list_del(list);
+ list_del(&func->sibling);
kfree(func);
}
acpiphp_unregister_hotplug_slot(slot);
@@ -606,204 +596,17 @@ static void remove_bridge(acpi_handle handle)
handle_hotplug_event_bridge);
}
-static struct pci_dev * get_apic_pci_info(acpi_handle handle)
-{
- struct pci_dev *dev;
-
- dev = acpi_get_pci_dev(handle);
- if (!dev)
- return NULL;
-
- if ((dev->class != PCI_CLASS_SYSTEM_PIC_IOAPIC) &&
- (dev->class != PCI_CLASS_SYSTEM_PIC_IOXAPIC))
- {
- pci_dev_put(dev);
- return NULL;
- }
-
- return dev;
-}
-
-static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
-{
- acpi_status status;
- int result = -1;
- unsigned long long gsb;
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- union acpi_object *obj;
- void *table;
-
- status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
- if (ACPI_SUCCESS(status)) {
- *gsi_base = (u32)gsb;
- return 0;
- }
-
- status = acpi_evaluate_object(handle, "_MAT", NULL, &buffer);
- if (ACPI_FAILURE(status) || !buffer.length || !buffer.pointer)
- return -1;
-
- obj = buffer.pointer;
- if (obj->type != ACPI_TYPE_BUFFER)
- goto out;
-
- table = obj->buffer.pointer;
- switch (((struct acpi_subtable_header *)table)->type) {
- case ACPI_MADT_TYPE_IO_SAPIC:
- *gsi_base = ((struct acpi_madt_io_sapic *)table)->global_irq_base;
- result = 0;
- break;
- case ACPI_MADT_TYPE_IO_APIC:
- *gsi_base = ((struct acpi_madt_io_apic *)table)->global_irq_base;
- result = 0;
- break;
- default:
- break;
- }
- out:
- kfree(buffer.pointer);
- return result;
-}
-
-static acpi_status
-ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- acpi_status status;
- unsigned long long sta;
- acpi_handle tmp;
- struct pci_dev *pdev;
- u32 gsi_base;
- u64 phys_addr;
- struct acpiphp_ioapic *ioapic;
-
- /* Evaluate _STA if present */
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL)
- return AE_CTRL_DEPTH;
-
- /* Scan only PCI bus scope */
- status = acpi_get_handle(handle, "_HID", &tmp);
- if (ACPI_SUCCESS(status))
- return AE_CTRL_DEPTH;
-
- if (get_gsi_base(handle, &gsi_base))
- return AE_OK;
-
- ioapic = kmalloc(sizeof(*ioapic), GFP_KERNEL);
- if (!ioapic)
- return AE_NO_MEMORY;
-
- pdev = get_apic_pci_info(handle);
- if (!pdev)
- goto exit_kfree;
-
- if (pci_enable_device(pdev))
- goto exit_pci_dev_put;
-
- pci_set_master(pdev);
-
- if (pci_request_region(pdev, 0, "I/O APIC(acpiphp)"))
- goto exit_pci_disable_device;
-
- phys_addr = pci_resource_start(pdev, 0);
- if (acpi_register_ioapic(handle, phys_addr, gsi_base))
- goto exit_pci_release_region;
-
- ioapic->gsi_base = gsi_base;
- ioapic->dev = pdev;
- spin_lock(&ioapic_list_lock);
- list_add_tail(&ioapic->list, &ioapic_list);
- spin_unlock(&ioapic_list_lock);
-
- return AE_OK;
-
- exit_pci_release_region:
- pci_release_region(pdev, 0);
- exit_pci_disable_device:
- pci_disable_device(pdev);
- exit_pci_dev_put:
- pci_dev_put(pdev);
- exit_kfree:
- kfree(ioapic);
-
- return AE_OK;
-}
-
-static acpi_status
-ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- acpi_status status;
- unsigned long long sta;
- acpi_handle tmp;
- u32 gsi_base;
- struct acpiphp_ioapic *pos, *n, *ioapic = NULL;
-
- /* Evaluate _STA if present */
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL)
- return AE_CTRL_DEPTH;
-
- /* Scan only PCI bus scope */
- status = acpi_get_handle(handle, "_HID", &tmp);
- if (ACPI_SUCCESS(status))
- return AE_CTRL_DEPTH;
-
- if (get_gsi_base(handle, &gsi_base))
- return AE_OK;
-
- acpi_unregister_ioapic(handle, gsi_base);
-
- spin_lock(&ioapic_list_lock);
- list_for_each_entry_safe(pos, n, &ioapic_list, list) {
- if (pos->gsi_base != gsi_base)
- continue;
- ioapic = pos;
- list_del(&ioapic->list);
- break;
- }
- spin_unlock(&ioapic_list_lock);
-
- if (!ioapic)
- return AE_OK;
-
- pci_release_region(ioapic->dev, 0);
- pci_disable_device(ioapic->dev);
- pci_dev_put(ioapic->dev);
- kfree(ioapic);
-
- return AE_OK;
-}
-
-static int acpiphp_configure_ioapics(acpi_handle handle)
-{
- ioapic_add(handle, 0, NULL, NULL);
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- ACPI_UINT32_MAX, ioapic_add, NULL, NULL, NULL);
- return 0;
-}
-
-static int acpiphp_unconfigure_ioapics(acpi_handle handle)
-{
- ioapic_remove(handle, 0, NULL, NULL);
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- ACPI_UINT32_MAX, ioapic_remove, NULL, NULL, NULL);
- return 0;
-}
-
static int power_on_slot(struct acpiphp_slot *slot)
{
acpi_status status;
struct acpiphp_func *func;
- struct list_head *l;
int retval = 0;
/* if already enabled, just skip */
if (slot->flags & SLOT_POWEREDON)
goto err_exit;
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_PS0) {
dbg("%s: executing _PS0\n", __func__);
status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL);
@@ -829,7 +632,6 @@ static int power_off_slot(struct acpiphp_slot *slot)
{
acpi_status status;
struct acpiphp_func *func;
- struct list_head *l;
int retval = 0;
@@ -837,9 +639,7 @@ static int power_off_slot(struct acpiphp_slot *slot)
if ((slot->flags & SLOT_POWEREDON) == 0)
goto err_exit;
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_PS3) {
status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL);
if (ACPI_FAILURE(status)) {
@@ -966,7 +766,6 @@ static int __ref enable_device(struct acpiphp_slot *slot)
{
struct pci_dev *dev;
struct pci_bus *bus = slot->bridge->pci_bus;
- struct list_head *l;
struct acpiphp_func *func;
int retval = 0;
int num, max, pass;
@@ -1006,21 +805,16 @@ static int __ref enable_device(struct acpiphp_slot *slot)
}
}
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
+ list_for_each_entry(func, &slot->funcs, sibling)
acpiphp_bus_add(func);
- }
pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
- list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_configure_ioapics(func->handle);
pci_enable_bridges(bus);
pci_bus_add_devices(bus);
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
+ list_for_each_entry(func, &slot->funcs, sibling) {
dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
func->function));
if (!dev)
@@ -1091,7 +885,6 @@ static int disable_device(struct acpiphp_slot *slot)
}
list_for_each_entry(func, &slot->funcs, sibling) {
- acpiphp_unconfigure_ioapics(func->handle);
acpiphp_bus_trim(func->handle);
}
@@ -1119,12 +912,9 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
acpi_status status;
unsigned long long sta = 0;
u32 dvid;
- struct list_head *l;
struct acpiphp_func *func;
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_STA) {
status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && sta)
@@ -1152,13 +942,10 @@ int acpiphp_eject_slot(struct acpiphp_slot *slot)
{
acpi_status status;
struct acpiphp_func *func;
- struct list_head *l;
struct acpi_object_list arg_list;
union acpi_object arg;
- list_for_each (l, &slot->funcs) {
- func = list_entry(l, struct acpiphp_func, sibling);
-
+ list_for_each_entry(func, &slot->funcs, sibling) {
/* We don't want to call _EJ0 on non-existing functions. */
if ((func->flags & FUNC_HAS_EJ0)) {
/* _EJ0 method take one argument */
@@ -1275,7 +1062,6 @@ static int acpiphp_configure_bridge (acpi_handle handle)
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
pci_enable_bridges(bus);
- acpiphp_configure_ioapics(handle);
return 0;
}
@@ -1542,7 +1328,7 @@ int __init acpiphp_get_num_slots(void)
struct acpiphp_bridge *bridge;
int num_slots = 0;
- list_for_each_entry (bridge, &bridge_list, list) {
+ list_for_each_entry(bridge, &bridge_list, list) {
dbg("Bus %04x:%02x has %d slot%s\n",
pci_domain_nr(bridge->pci_bus),
bridge->pci_bus->number, bridge->nr_slots,
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index 83f337c891a..c7084f0eca5 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -890,7 +890,7 @@ static int poll_hpc(void *data)
msleep(POLL_INTERVAL_SEC * 1000);
if (kthread_should_stop())
- break;
+ goto out_sleep;
down (&semOperations);
@@ -904,6 +904,7 @@ static int poll_hpc(void *data)
/* give up the hardware semaphore */
up (&semOperations);
/* sleep for a short time just for good measure */
+out_sleep:
msleep(100);
}
up (&sem_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 0325d989bb4..38183a534b6 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -68,26 +68,26 @@ static DEFINE_MUTEX(pci_hp_mutex);
static char *pci_bus_speed_strings[] = {
"33 MHz PCI", /* 0x00 */
"66 MHz PCI", /* 0x01 */
- "66 MHz PCIX", /* 0x02 */
- "100 MHz PCIX", /* 0x03 */
- "133 MHz PCIX", /* 0x04 */
+ "66 MHz PCI-X", /* 0x02 */
+ "100 MHz PCI-X", /* 0x03 */
+ "133 MHz PCI-X", /* 0x04 */
NULL, /* 0x05 */
NULL, /* 0x06 */
NULL, /* 0x07 */
NULL, /* 0x08 */
- "66 MHz PCIX 266", /* 0x09 */
- "100 MHz PCIX 266", /* 0x0a */
- "133 MHz PCIX 266", /* 0x0b */
+ "66 MHz PCI-X 266", /* 0x09 */
+ "100 MHz PCI-X 266", /* 0x0a */
+ "133 MHz PCI-X 266", /* 0x0b */
NULL, /* 0x0c */
NULL, /* 0x0d */
NULL, /* 0x0e */
NULL, /* 0x0f */
NULL, /* 0x10 */
- "66 MHz PCIX 533", /* 0x11 */
- "100 MHz PCIX 533", /* 0x12 */
- "133 MHz PCIX 533", /* 0x13 */
- "2.5 GT/s PCI-E", /* 0x14 */
- "5.0 GT/s PCI-E", /* 0x15 */
+ "66 MHz PCI-X 533", /* 0x11 */
+ "100 MHz PCI-X 533", /* 0x12 */
+ "133 MHz PCI-X 533", /* 0x13 */
+ "2.5 GT/s PCIe", /* 0x14 */
+ "5.0 GT/s PCIe", /* 0x15 */
};
#ifdef CONFIG_HOTPLUG_PCI_CPCI
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 3070f77eb56..4ed76b47b6d 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -91,7 +91,6 @@ struct controller {
struct slot *slot;
wait_queue_head_t queue; /* sleep & wake process */
u32 slot_cap;
- u8 cap_base;
struct timer_list poll_timer;
unsigned int cmd_busy:1;
unsigned int no_cmd_complete:1;
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 37c8d3d0323..b09b083011d 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -87,7 +87,8 @@ static int __init dummy_probe(struct pcie_device *dev)
/* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
if (pciehp_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
- if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP)))
+ pos = pci_pcie_cap(pdev);
+ if (!pos)
return -ENODEV;
pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index bc234719b1d..5674b2075bd 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -72,18 +72,6 @@ static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
- .set_attention_status = set_attention_status,
- .enable_slot = enable_slot,
- .disable_slot = disable_slot,
- .get_power_status = get_power_status,
- .get_attention_status = get_attention_status,
- .get_latch_status = get_latch_status,
- .get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
-};
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -95,6 +83,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, hotplug_slot_name(hotplug_slot));
+ kfree(hotplug_slot->ops);
kfree(hotplug_slot->info);
kfree(hotplug_slot);
}
@@ -104,6 +93,7 @@ static int init_slot(struct controller *ctrl)
struct slot *slot = ctrl->slot;
struct hotplug_slot *hotplug = NULL;
struct hotplug_slot_info *info = NULL;
+ struct hotplug_slot_ops *ops = NULL;
char name[SLOT_NAME_SIZE];
int retval = -ENOMEM;
@@ -115,11 +105,28 @@ static int init_slot(struct controller *ctrl)
if (!info)
goto out;
+ /* Setup hotplug slot ops */
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ goto out;
+ ops->enable_slot = enable_slot;
+ ops->disable_slot = disable_slot;
+ ops->get_power_status = get_power_status;
+ ops->get_adapter_status = get_adapter_status;
+ ops->get_max_bus_speed = get_max_bus_speed;
+ ops->get_cur_bus_speed = get_cur_bus_speed;
+ if (MRL_SENS(ctrl))
+ ops->get_latch_status = get_latch_status;
+ if (ATTN_LED(ctrl)) {
+ ops->get_attention_status = get_attention_status;
+ ops->set_attention_status = set_attention_status;
+ }
+
/* register this slot with the hotplug pci core */
hotplug->info = info;
hotplug->private = slot;
hotplug->release = &release_slot;
- hotplug->ops = &pciehp_hotplug_slot_ops;
+ hotplug->ops = ops;
slot->hotplug_slot = hotplug;
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
@@ -128,17 +135,12 @@ static int init_slot(struct controller *ctrl)
ctrl->pcie->port->subordinate->number, PSN(ctrl));
retval = pci_hp_register(hotplug,
ctrl->pcie->port->subordinate, 0, name);
- if (retval) {
+ if (retval)
ctrl_err(ctrl,
"pci_hp_register failed with error %d\n", retval);
- goto out;
- }
- get_power_status(hotplug, &info->power_status);
- get_attention_status(hotplug, &info->attention_status);
- get_latch_status(hotplug, &info->latch_status);
- get_adapter_status(hotplug, &info->adapter_status);
out:
if (retval) {
+ kfree(ops);
kfree(info);
kfree(hotplug);
}
@@ -160,12 +162,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- hotplug_slot->info->attention_status = status;
-
- if (ATTN_LED(slot->ctrl))
- pciehp_set_attention_status(slot, status);
-
- return 0;
+ return pciehp_set_attention_status(slot, status);
}
@@ -193,92 +190,62 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- int retval;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = pciehp_get_power_status(slot, value);
- if (retval < 0)
- *value = hotplug_slot->info->power_status;
-
- return 0;
+ return pciehp_get_power_status(slot, value);
}
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- int retval;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = pciehp_get_attention_status(slot, value);
- if (retval < 0)
- *value = hotplug_slot->info->attention_status;
-
- return 0;
+ return pciehp_get_attention_status(slot, value);
}
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- int retval;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = pciehp_get_latch_status(slot, value);
- if (retval < 0)
- *value = hotplug_slot->info->latch_status;
-
- return 0;
+ return pciehp_get_latch_status(slot, value);
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
- int retval;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = pciehp_get_adapter_status(slot, value);
- if (retval < 0)
- *value = hotplug_slot->info->adapter_status;
-
- return 0;
+ return pciehp_get_adapter_status(slot, value);
}
static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
enum pci_bus_speed *value)
{
struct slot *slot = hotplug_slot->private;
- int retval;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = pciehp_get_max_link_speed(slot, value);
- if (retval < 0)
- *value = PCI_SPEED_UNKNOWN;
-
- return 0;
+ return pciehp_get_max_link_speed(slot, value);
}
static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{
struct slot *slot = hotplug_slot->private;
- int retval;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = pciehp_get_cur_link_speed(slot, value);
- if (retval < 0)
- *value = PCI_SPEED_UNKNOWN;
-
- return 0;
+ return pciehp_get_cur_link_speed(slot, value);
}
static int pciehp_probe(struct pcie_device *dev)
@@ -286,14 +253,13 @@ static int pciehp_probe(struct pcie_device *dev)
int rc;
struct controller *ctrl;
struct slot *slot;
- u8 value;
- struct pci_dev *pdev = dev->port;
+ u8 occupied, poweron;
if (pciehp_force)
dev_info(&dev->device,
"Bypassing BIOS check for pciehp use on %s\n",
- pci_name(pdev));
- else if (pciehp_get_hp_hw_control_from_firmware(pdev))
+ pci_name(dev->port));
+ else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
goto err_out_none;
ctrl = pcie_init(dev);
@@ -318,23 +284,18 @@ static int pciehp_probe(struct pcie_device *dev)
rc = pcie_init_notification(ctrl);
if (rc) {
ctrl_err(ctrl, "Notification initialization failed\n");
- goto err_out_release_ctlr;
+ goto err_out_free_ctrl_slot;
}
/* Check if slot is occupied */
slot = ctrl->slot;
- pciehp_get_adapter_status(slot, &value);
- if (value) {
- if (pciehp_force)
- pciehp_enable_slot(slot);
- } else {
- /* Power off slot if not occupied */
- if (POWER_CTRL(ctrl)) {
- rc = pciehp_power_off_slot(slot);
- if (rc)
- goto err_out_free_ctrl_slot;
- }
- }
+ pciehp_get_adapter_status(slot, &occupied);
+ pciehp_get_power_status(slot, &poweron);
+ if (occupied && pciehp_force)
+ pciehp_enable_slot(slot);
+ /* If empty slot's power status is on, turn power off */
+ if (!occupied && poweron && POWER_CTRL(ctrl))
+ pciehp_power_off_slot(slot);
return 0;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 84487d126e4..d6ac1b261dd 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -142,23 +142,9 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
/* power fault */
ctrl_dbg(ctrl, "Power fault interrupt received\n");
-
- if (!pciehp_query_power_fault(p_slot)) {
- /*
- * power fault Cleared
- */
- ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
- slot_name(p_slot));
- event_type = INT_POWER_FAULT_CLEAR;
- } else {
- /*
- * power fault
- */
- ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
- event_type = INT_POWER_FAULT;
- ctrl_info(ctrl, "Power fault bit %x set\n", 0);
- }
-
+ ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
+ event_type = INT_POWER_FAULT;
+ ctrl_info(ctrl, "Power fault bit %x set\n", 0);
queue_interrupt_event(p_slot, event_type);
return 1;
@@ -224,13 +210,12 @@ static int board_added(struct slot *p_slot)
retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
- set_slot_off(ctrl, p_slot);
- return retval;
+ goto err_exit;
}
/* Check for a power fault */
- if (pciehp_query_power_fault(p_slot)) {
- ctrl_dbg(ctrl, "Power fault detected\n");
+ if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
+ ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
retval = -EIO;
goto err_exit;
}
@@ -363,25 +348,6 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
mutex_unlock(&p_slot->lock);
}
-static int update_slot_info(struct slot *slot)
-{
- struct hotplug_slot_info *info;
- int result;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- pciehp_get_power_status(slot, &info->power_status);
- pciehp_get_attention_status(slot, &info->attention_status);
- pciehp_get_latch_status(slot, &info->latch_status);
- pciehp_get_adapter_status(slot, &info->adapter_status);
-
- result = pci_hp_change_slot_info(slot->hotplug_slot, info);
- kfree (info);
- return result;
-}
-
/*
* Note: This function must be called with slot->lock held
*/
@@ -442,7 +408,6 @@ static void handle_button_press_event(struct slot *p_slot)
* to hot-add or hot-remove is undergoing
*/
ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
- update_slot_info(p_slot);
break;
default:
ctrl_warn(ctrl, "Not a valid state\n");
@@ -500,11 +465,9 @@ static void interrupt_event_handler(struct work_struct *work)
if (!HP_SUPR_RM(ctrl))
break;
ctrl_dbg(ctrl, "Surprise Removal\n");
- update_slot_info(p_slot);
handle_surprise_event(p_slot);
break;
default:
- update_slot_info(p_slot);
break;
}
mutex_unlock(&p_slot->lock);
@@ -547,9 +510,6 @@ int pciehp_enable_slot(struct slot *p_slot)
if (rc) {
pciehp_get_latch_status(p_slot, &getstatus);
}
-
- update_slot_info(p_slot);
-
return rc;
}
@@ -590,10 +550,7 @@ int pciehp_disable_slot(struct slot *p_slot)
}
}
- ret = remove_board(p_slot);
- update_slot_info(p_slot);
-
- return ret;
+ return remove_board(p_slot);
}
int pciehp_sysfs_enable_slot(struct slot *p_slot)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9ef4605c1ef..10040d58c8e 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -45,25 +45,25 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
{
struct pci_dev *dev = ctrl->pcie->port;
- return pci_read_config_word(dev, ctrl->cap_base + reg, value);
+ return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value);
}
static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
{
struct pci_dev *dev = ctrl->pcie->port;
- return pci_read_config_dword(dev, ctrl->cap_base + reg, value);
+ return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value);
}
static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
{
struct pci_dev *dev = ctrl->pcie->port;
- return pci_write_config_word(dev, ctrl->cap_base + reg, value);
+ return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value);
}
static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
{
struct pci_dev *dev = ctrl->pcie->port;
- return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
+ return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value);
}
/* Power Control Command */
@@ -318,8 +318,8 @@ int pciehp_get_attention_status(struct slot *slot, u8 *status)
return retval;
}
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n",
- __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6;
@@ -356,8 +356,8 @@ int pciehp_get_power_status(struct slot *slot, u8 *status)
ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
return retval;
}
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n",
- __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10;
@@ -427,27 +427,24 @@ int pciehp_set_attention_status(struct slot *slot, u8 value)
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
u16 cmd_mask;
- int rc;
cmd_mask = PCI_EXP_SLTCTL_AIC;
switch (value) {
- case 0 : /* turn off */
- slot_cmd = 0x00C0;
- break;
- case 1: /* turn on */
- slot_cmd = 0x0040;
- break;
- case 2: /* turn blink */
- slot_cmd = 0x0080;
- break;
- default:
- return -1;
+ case 0 : /* turn off */
+ slot_cmd = 0x00C0;
+ break;
+ case 1: /* turn on */
+ slot_cmd = 0x0040;
+ break;
+ case 2: /* turn blink */
+ slot_cmd = 0x0080;
+ break;
+ default:
+ return -EINVAL;
}
- rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
- __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
-
- return rc;
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ return pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
}
void pciehp_green_led_on(struct slot *slot)
@@ -459,8 +456,8 @@ void pciehp_green_led_on(struct slot *slot)
slot_cmd = 0x0100;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
- __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
void pciehp_green_led_off(struct slot *slot)
@@ -472,8 +469,8 @@ void pciehp_green_led_off(struct slot *slot)
slot_cmd = 0x0300;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
- __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
void pciehp_green_led_blink(struct slot *slot)
@@ -485,8 +482,8 @@ void pciehp_green_led_blink(struct slot *slot)
slot_cmd = 0x0200;
cmd_mask = PCI_EXP_SLTCTL_PIC;
pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
- __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
int pciehp_power_on_slot(struct slot * slot)
@@ -514,97 +511,38 @@ int pciehp_power_on_slot(struct slot * slot)
return retval;
}
}
+ ctrl->power_fault_detected = 0;
slot_cmd = POWER_ON;
cmd_mask = PCI_EXP_SLTCTL_PCC;
- if (!pciehp_poll_mode) {
- /* Enable power fault detection turned off at power off time */
- slot_cmd |= PCI_EXP_SLTCTL_PFDE;
- cmd_mask |= PCI_EXP_SLTCTL_PFDE;
- }
-
retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
if (retval) {
ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
return retval;
}
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
- __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
- ctrl->power_fault_detected = 0;
return retval;
}
-static inline int pcie_mask_bad_dllp(struct controller *ctrl)
-{
- struct pci_dev *dev = ctrl->pcie->port;
- int pos;
- u32 reg;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return 0;
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg);
- if (reg & PCI_ERR_COR_BAD_DLLP)
- return 0;
- reg |= PCI_ERR_COR_BAD_DLLP;
- pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
- return 1;
-}
-
-static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
-{
- struct pci_dev *dev = ctrl->pcie->port;
- u32 reg;
- int pos;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return;
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg);
- if (!(reg & PCI_ERR_COR_BAD_DLLP))
- return;
- reg &= ~PCI_ERR_COR_BAD_DLLP;
- pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
-}
-
int pciehp_power_off_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
u16 cmd_mask;
- int retval = 0;
- int changed;
-
- /*
- * Set Bad DLLP Mask bit in Correctable Error Mask
- * Register. This is the workaround against Bad DLLP error
- * that sometimes happens during turning power off the slot
- * which conforms to PCI Express 1.0a spec.
- */
- changed = pcie_mask_bad_dllp(ctrl);
+ int retval;
slot_cmd = POWER_OFF;
cmd_mask = PCI_EXP_SLTCTL_PCC;
- if (!pciehp_poll_mode) {
- /* Disable power fault detection */
- slot_cmd &= ~PCI_EXP_SLTCTL_PFDE;
- cmd_mask |= PCI_EXP_SLTCTL_PFDE;
- }
-
retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
if (retval) {
ctrl_err(ctrl, "Write command failed!\n");
- retval = -1;
- goto out;
+ return retval;
}
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n",
- __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
- out:
- if (changed)
- pcie_unmask_bad_dllp(ctrl);
-
- return retval;
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ return 0;
}
static irqreturn_t pcie_isr(int irq, void *dev_id)
@@ -840,11 +778,19 @@ int pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
+ /*
+ * TBD: Power fault detected software notification support.
+ *
+ * Power fault detected software notification is not enabled
+ * now, because it caused power fault detected interrupt storm
+ * on some machines. On those machines, power fault detected
+ * bit in the slot status register was set again immediately
+ * when it is cleared in the interrupt service routine, and
+ * next power fault detected interrupt was notified again.
+ */
cmd = PCI_EXP_SLTCTL_PDCE;
if (ATTN_BUTTN(ctrl))
cmd |= PCI_EXP_SLTCTL_ABPE;
- if (POWER_CTRL(ctrl))
- cmd |= PCI_EXP_SLTCTL_PFDE;
if (MRL_SENS(ctrl))
cmd |= PCI_EXP_SLTCTL_MRLSCE;
if (!pciehp_poll_mode)
@@ -866,7 +812,8 @@ static void pcie_disable_notification(struct controller *ctrl)
u16 mask;
mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
- PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE);
+ PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
+ PCI_EXP_SLTCTL_DLLSCE);
if (pcie_write_cmd(ctrl, 0, mask))
ctrl_warn(ctrl, "Cannot disable software notification\n");
}
@@ -934,7 +881,8 @@ static inline void dbg_ctrl(struct controller *ctrl)
pdev->subsystem_device);
ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n",
pdev->subsystem_vendor);
- ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", ctrl->cap_base);
+ ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n",
+ pci_pcie_cap(pdev));
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (!pci_resource_len(pdev, i))
continue;
@@ -978,8 +926,7 @@ struct controller *pcie_init(struct pcie_device *dev)
goto abort;
}
ctrl->pcie = dev;
- ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!ctrl->cap_base) {
+ if (!pci_pcie_cap(pdev)) {
ctrl_err(ctrl, "Cannot find PCI Express capability\n");
goto abort_ctrl;
}
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index cc8ec3aa41a..80b461c9855 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -43,7 +43,7 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
* Perhaps we *should* use default settings for PCIe, but
* pciehp didn't, so we won't either.
*/
- if (dev->is_pcie)
+ if (pci_is_pcie(dev))
return;
dev_info(&dev->dev, "using default PCI settings\n");
hpp = &pci_default_type0;
@@ -102,7 +102,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
return;
/* Find PCI Express capability */
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
if (!pos)
return;
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 9261327b49f..e56f9bed6f2 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -277,6 +277,7 @@ static int hw_pass_through = 1;
struct dmar_domain {
int id; /* domain id */
+ int nid; /* node id */
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */
@@ -386,30 +387,14 @@ static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
static struct kmem_cache *iommu_iova_cache;
-static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
+static inline void *alloc_pgtable_page(int node)
{
- unsigned int flags;
- void *vaddr;
-
- /* trying to avoid low memory issues */
- flags = current->flags & PF_MEMALLOC;
- current->flags |= PF_MEMALLOC;
- vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
- current->flags &= (~PF_MEMALLOC | flags);
- return vaddr;
-}
-
+ struct page *page;
+ void *vaddr = NULL;
-static inline void *alloc_pgtable_page(void)
-{
- unsigned int flags;
- void *vaddr;
-
- /* trying to avoid low memory issues */
- flags = current->flags & PF_MEMALLOC;
- current->flags |= PF_MEMALLOC;
- vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
- current->flags &= (~PF_MEMALLOC | flags);
+ page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
+ if (page)
+ vaddr = page_address(page);
return vaddr;
}
@@ -420,7 +405,7 @@ static inline void free_pgtable_page(void *vaddr)
static inline void *alloc_domain_mem(void)
{
- return iommu_kmem_cache_alloc(iommu_domain_cache);
+ return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
}
static void free_domain_mem(void *vaddr)
@@ -430,7 +415,7 @@ static void free_domain_mem(void *vaddr)
static inline void * alloc_devinfo_mem(void)
{
- return iommu_kmem_cache_alloc(iommu_devinfo_cache);
+ return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
}
static inline void free_devinfo_mem(void *vaddr)
@@ -440,7 +425,7 @@ static inline void free_devinfo_mem(void *vaddr)
struct iova *alloc_iova_mem(void)
{
- return iommu_kmem_cache_alloc(iommu_iova_cache);
+ return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
}
void free_iova_mem(struct iova *iova)
@@ -589,7 +574,8 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
root = &iommu->root_entry[bus];
context = get_context_addr_from_root(root);
if (!context) {
- context = (struct context_entry *)alloc_pgtable_page();
+ context = (struct context_entry *)
+ alloc_pgtable_page(iommu->node);
if (!context) {
spin_unlock_irqrestore(&iommu->lock, flags);
return NULL;
@@ -732,7 +718,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) {
uint64_t pteval;
- tmp_page = alloc_pgtable_page();
+ tmp_page = alloc_pgtable_page(domain->nid);
if (!tmp_page)
return NULL;
@@ -868,7 +854,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
struct root_entry *root;
unsigned long flags;
- root = (struct root_entry *)alloc_pgtable_page();
+ root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root)
return -ENOMEM;
@@ -1263,6 +1249,7 @@ static struct dmar_domain *alloc_domain(void)
if (!domain)
return NULL;
+ domain->nid = -1;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
domain->flags = 0;
@@ -1420,9 +1407,10 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_snooping = 0;
domain->iommu_count = 1;
+ domain->nid = iommu->node;
/* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
if (!domain->pgd)
return -ENOMEM;
__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
@@ -1523,12 +1511,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
/* Skip top levels of page tables for
* iommu which has less agaw than default.
+ * Unnecessary for PT mode.
*/
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd)) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return -ENOMEM;
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+ for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd)) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return -ENOMEM;
+ }
}
}
}
@@ -1577,6 +1568,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
spin_lock_irqsave(&domain->iommu_lock, flags);
if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
domain->iommu_count++;
+ if (domain->iommu_count == 1)
+ domain->nid = iommu->node;
domain_update_iommu_cap(domain);
}
spin_unlock_irqrestore(&domain->iommu_lock, flags);
@@ -1611,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
return ret;
parent = parent->bus->self;
}
- if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
+ if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate),
tmp->subordinate->number, 0,
@@ -1651,7 +1644,7 @@ static int domain_context_mapped(struct pci_dev *pdev)
return ret;
parent = parent->bus->self;
}
- if (tmp->is_pcie)
+ if (pci_is_pcie(tmp))
return device_context_mapped(iommu, tmp->subordinate->number,
0);
else
@@ -1821,7 +1814,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
dev_tmp = pci_find_upstream_pcie_bridge(pdev);
if (dev_tmp) {
- if (dev_tmp->is_pcie) {
+ if (pci_is_pcie(dev_tmp)) {
bus = dev_tmp->subordinate->number;
devfn = 0;
} else {
@@ -1991,6 +1984,16 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
+ if (end < start) {
+ WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ ret = -EIO;
+ goto error;
+ }
+
if (end >> agaw_to_width(domain->agaw)) {
WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@ -2182,7 +2185,7 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
* the 1:1 domain, just in _case_ one of their siblings turns out
* not to be able to map all of memory.
*/
- if (!pdev->is_pcie) {
+ if (!pci_is_pcie(pdev)) {
if (!pci_is_root_bus(pdev->bus))
return 0;
if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
@@ -3228,6 +3231,9 @@ static int device_notifier(struct notifier_block *nb,
struct pci_dev *pdev = to_pci_dev(dev);
struct dmar_domain *domain;
+ if (iommu_no_mapping(dev))
+ return 0;
+
domain = find_domain(pdev);
if (!domain)
return 0;
@@ -3319,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
parent->devfn);
parent = parent->bus->self;
}
- if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
+ if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
iommu_detach_dev(iommu,
tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
@@ -3455,6 +3461,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
return NULL;
domain->id = vm_domid++;
+ domain->nid = -1;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -3481,9 +3488,10 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_coherency = 0;
domain->iommu_snooping = 0;
domain->max_addr = 0;
+ domain->nid = -1;
/* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
if (!domain->pgd)
return -ENOMEM;
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3b3658669be..8b65a489581 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -520,7 +520,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
return -1;
/* PCIe device or Root Complex integrated PCI device */
- if (dev->is_pcie || !dev->bus->parent) {
+ if (pci_is_pcie(dev) || !dev->bus->parent) {
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
(dev->bus->number << 8) | dev->devfn);
return 0;
@@ -528,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
bridge = pci_find_upstream_pcie_bridge(dev);
if (bridge) {
- if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
+ if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */
set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
(bridge->bus->number << 8) | dev->bus->number);
else /* this is a legacy PCI bridge */
@@ -590,7 +590,8 @@ static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
if (!iommu->ir_table)
return -ENOMEM;
- pages = alloc_pages(GFP_ATOMIC | __GFP_ZERO, INTR_REMAP_PAGE_ORDER);
+ pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
+ INTR_REMAP_PAGE_ORDER);
if (!pages) {
printk(KERN_ERR "failed to allocate pages of order %d\n",
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
new file mode 100644
index 00000000000..3e0d7b5dd1b
--- /dev/null
+++ b/drivers/pci/ioapic.c
@@ -0,0 +1,127 @@
+/*
+ * IOAPIC/IOxAPIC/IOSAPIC driver
+ *
+ * Copyright (C) 2009 Fujitsu Limited.
+ * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This driver manages PCI I/O APICs added by hotplug after boot. We try to
+ * claim all I/O APIC PCI devices, but those present at boot were registered
+ * when we parsed the ACPI MADT, so we'll fail when we try to re-register
+ * them.
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+struct ioapic {
+ acpi_handle handle;
+ u32 gsi_base;
+};
+
+static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long gsb;
+ struct ioapic *ioapic;
+ u64 addr;
+ int ret;
+ char *type;
+
+ handle = DEVICE_ACPI_HANDLE(&dev->dev);
+ if (!handle)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ /*
+ * The previous code in acpiphp evaluated _MAT if _GSB failed, but
+ * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs.
+ */
+
+ ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
+ if (!ioapic)
+ return -ENOMEM;
+
+ ioapic->handle = handle;
+ ioapic->gsi_base = (u32) gsb;
+
+ if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC)
+ type = "IOAPIC";
+ else
+ type = "IOxAPIC";
+
+ ret = pci_enable_device(dev);
+ if (ret < 0)
+ goto exit_free;
+
+ pci_set_master(dev);
+
+ if (pci_request_region(dev, 0, type))
+ goto exit_disable;
+
+ addr = pci_resource_start(dev, 0);
+ if (acpi_register_ioapic(ioapic->handle, addr, ioapic->gsi_base))
+ goto exit_release;
+
+ pci_set_drvdata(dev, ioapic);
+ dev_info(&dev->dev, "%s at %#llx, GSI %u\n", type, addr,
+ ioapic->gsi_base);
+ return 0;
+
+exit_release:
+ pci_release_region(dev, 0);
+exit_disable:
+ pci_disable_device(dev);
+exit_free:
+ kfree(ioapic);
+ return -ENODEV;
+}
+
+static void ioapic_remove(struct pci_dev *dev)
+{
+ struct ioapic *ioapic = pci_get_drvdata(dev);
+
+ acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base);
+ pci_release_region(dev, 0);
+ pci_disable_device(dev);
+ kfree(ioapic);
+}
+
+
+static struct pci_device_id ioapic_devices[] = {
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, },
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, },
+ { }
+};
+
+static struct pci_driver ioapic_driver = {
+ .name = "ioapic",
+ .id_table = ioapic_devices,
+ .probe = ioapic_probe,
+ .remove = __devexit_p(ioapic_remove),
+};
+
+static int __init ioapic_init(void)
+{
+ return pci_register_driver(&ioapic_driver);
+}
+
+static void __exit ioapic_exit(void)
+{
+ pci_unregister_driver(&ioapic_driver);
+}
+
+module_init(ioapic_init);
+module_exit(ioapic_exit);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e03fe98f061..b2a448e19fe 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -555,7 +555,7 @@ int pci_iov_init(struct pci_dev *dev)
{
int pos;
- if (!dev->is_pcie)
+ if (!pci_is_pcie(dev))
return -ENODEV;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 33317df4769..cc617ddd33d 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -116,7 +116,7 @@ static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
int ret;
ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
- if (!ret || bridge->is_pcie)
+ if (!ret || pci_is_pcie(bridge))
return;
bus = bus->parent;
}
@@ -131,7 +131,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
if (acpi_pci_can_wakeup(dev))
return acpi_pm_device_sleep_wake(&dev->dev, enable);
- if (!dev->is_pcie)
+ if (!pci_is_pcie(dev))
acpi_pci_propagate_wakeup_enable(dev->bus, enable);
return 0;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 0f6382f090e..c5df94e8667 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -74,7 +74,11 @@ static ssize_t local_cpus_show(struct device *dev,
const struct cpumask *mask;
int len;
+#ifdef CONFIG_NUMA
+ mask = cpumask_of_node(dev_to_node(dev));
+#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
+#endif
len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
buf[len++] = '\n';
buf[len] = '\0';
@@ -88,7 +92,11 @@ static ssize_t local_cpulist_show(struct device *dev,
const struct cpumask *mask;
int len;
+#ifdef CONFIG_NUMA
+ mask = cpumask_of_node(dev_to_node(dev));
+#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
+#endif
len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
buf[len++] = '\n';
buf[len] = '\0';
@@ -176,6 +184,21 @@ numa_node_show(struct device *dev, struct device_attribute *attr, char *buf)
#endif
static ssize_t
+dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf (buf, "%d\n", fls64(pdev->dma_mask));
+}
+
+static ssize_t
+consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask));
+}
+
+static ssize_t
msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -306,6 +329,8 @@ struct device_attribute pci_dev_attrs[] = {
#ifdef CONFIG_NUMA
__ATTR_RO(numa_node),
#endif
+ __ATTR_RO(dma_mask_bits),
+ __ATTR_RO(consistent_dma_mask_bits),
__ATTR(enable, 0600, is_enabled_show, is_enabled_store),
__ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
broken_parity_status_show,broken_parity_status_store),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4e4c295a049..0bc27e05901 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -47,6 +47,15 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+/*
+ * The default CLS is used if arch didn't set CLS explicitly and not
+ * all pci devices agree on the same value. Arch can override either
+ * the dfl or actual value as it sees fit. Don't forget this is
+ * measured in 32-bit words, not bytes.
+ */
+u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -373,8 +382,12 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
continue; /* Wrong type */
if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
return r; /* Exact match */
- if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
- best = r; /* Approximating prefetchable by non-prefetchable */
+ /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
+ if (r->flags & IORESOURCE_PREFETCH)
+ continue;
+ /* .. but we can put a prefetchable resource inside a non-prefetchable one */
+ if (!best)
+ best = r;
}
return best;
}
@@ -728,8 +741,8 @@ static int pci_save_pcie_state(struct pci_dev *dev)
u16 *cap;
u16 flags;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (pos <= 0)
+ pos = pci_pcie_cap(dev);
+ if (!pos)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
@@ -837,7 +850,7 @@ pci_save_state(struct pci_dev *dev)
int i;
/* XXX: 100% dword access ok here? */
for (i = 0; i < 16; i++)
- pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
+ pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
dev->state_saved = true;
if ((i = pci_save_pcie_state(dev)) != 0)
return i;
@@ -1202,7 +1215,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
+ dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
enable ? "enabled" : "disabled");
}
@@ -1413,7 +1426,8 @@ void pci_pm_init(struct pci_dev *dev)
pmc &= PCI_PM_CAP_PME_MASK;
if (pmc) {
- dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "PME# supported from%s%s%s%s%s\n",
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -1510,7 +1524,7 @@ void pci_enable_ari(struct pci_dev *dev)
u16 ctrl;
struct pci_dev *bridge;
- if (!dev->is_pcie || dev->devfn)
+ if (!pci_is_pcie(dev) || dev->devfn)
return;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
@@ -1518,10 +1532,10 @@ void pci_enable_ari(struct pci_dev *dev)
return;
bridge = dev->bus->self;
- if (!bridge || !bridge->is_pcie)
+ if (!bridge || !pci_is_pcie(bridge))
return;
- pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(bridge);
if (!pos)
return;
@@ -1536,6 +1550,54 @@ void pci_enable_ari(struct pci_dev *dev)
bridge->ari_enabled = 1;
}
+static int pci_acs_enable;
+
+/**
+ * pci_request_acs - ask for ACS to be enabled if supported
+ */
+void pci_request_acs(void)
+{
+ pci_acs_enable = 1;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+ u16 ctrl;
+
+ if (!pci_acs_enable)
+ return;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return;
+
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* Source Validation */
+ ctrl |= (cap & PCI_ACS_SV);
+
+ /* P2P Request Redirect */
+ ctrl |= (cap & PCI_ACS_RR);
+
+ /* P2P Completion Redirect */
+ ctrl |= (cap & PCI_ACS_CR);
+
+ /* Upstream Forwarding */
+ ctrl |= (cap & PCI_ACS_UF);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+}
+
/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
@@ -1669,9 +1731,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
return 0;
err_out:
- dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
- bar,
- pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
+ dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
&pdev->resource[bar]);
return -EBUSY;
}
@@ -1866,31 +1926,6 @@ void pci_clear_master(struct pci_dev *dev)
__pci_set_master(dev, false);
}
-#ifdef PCI_DISABLE_MWI
-int pci_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-int pci_try_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-void pci_clear_mwi(struct pci_dev *dev)
-{
-}
-
-#else
-
-#ifndef PCI_CACHE_LINE_BYTES
-#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
-#endif
-
-/* This can be overridden by arch code. */
-/* Don't forget this is measured in 32-bit words, not bytes */
-u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
-
/**
* pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
* @dev: the PCI device for which MWI is to be enabled
@@ -1901,13 +1936,12 @@ u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-static int
-pci_set_cacheline_size(struct pci_dev *dev)
+int pci_set_cacheline_size(struct pci_dev *dev)
{
u8 cacheline_size;
if (!pci_cache_line_size)
- return -EINVAL; /* The system doesn't support MWI. */
+ return -EINVAL;
/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
equal to or multiple of the right value. */
@@ -1928,6 +1962,24 @@ pci_set_cacheline_size(struct pci_dev *dev)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
+
+#ifdef PCI_DISABLE_MWI
+int pci_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void pci_clear_mwi(struct pci_dev *dev)
+{
+}
+
+#else
/**
* pci_set_mwi - enables memory-write-invalidate PCI transaction
@@ -2062,6 +2114,7 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
dev->dma_mask = mask;
+ dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask));
return 0;
}
@@ -2073,6 +2126,7 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
dev->dev.coherent_dma_mask = mask;
+ dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask));
return 0;
}
@@ -2099,9 +2153,9 @@ static int pcie_flr(struct pci_dev *dev, int probe)
int i;
int pos;
u32 cap;
- u16 status;
+ u16 status, control;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTTY;
@@ -2126,8 +2180,10 @@ static int pcie_flr(struct pci_dev *dev, int probe)
"proceeding with reset anyway\n");
clear:
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
+ control |= PCI_EXP_DEVCTL_BCR_FLR;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
+
msleep(100);
return 0;
@@ -2450,7 +2506,7 @@ int pcie_get_readrq(struct pci_dev *dev)
int ret, cap;
u16 ctl;
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(dev);
if (!cap)
return -EINVAL;
@@ -2480,7 +2536,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
v = (ffs(rq) - 8) << 12;
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(dev);
if (!cap)
goto out;
@@ -2540,7 +2596,7 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
return reg;
}
- dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
+ dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
return 0;
}
@@ -2590,7 +2646,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
-spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(resource_alignment_lock);
/**
* pci_specified_resource_alignment - get resource alignment specified by user.
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d92d1954a2f..33ed8e0aba1 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -311,4 +311,6 @@ static inline int pci_resource_alignment(struct pci_dev *dev,
return resource_alignment(res);
}
+extern void pci_enable_acs(struct pci_dev *dev);
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 62d15f652bb..7fcd5331b14 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include <linux/stddef.h>
#include "aerdrv.h"
struct aer_error_inj {
@@ -35,10 +36,12 @@ struct aer_error_inj {
u32 header_log1;
u32 header_log2;
u32 header_log3;
+ u16 domain;
};
struct aer_error {
struct list_head list;
+ u16 domain;
unsigned int bus;
unsigned int devfn;
int pos_cap_err;
@@ -66,22 +69,27 @@ static LIST_HEAD(pci_bus_ops_list);
/* Protect einjected and pci_bus_ops_list */
static DEFINE_SPINLOCK(inject_lock);
-static void aer_error_init(struct aer_error *err, unsigned int bus,
- unsigned int devfn, int pos_cap_err)
+static void aer_error_init(struct aer_error *err, u16 domain,
+ unsigned int bus, unsigned int devfn,
+ int pos_cap_err)
{
INIT_LIST_HEAD(&err->list);
+ err->domain = domain;
err->bus = bus;
err->devfn = devfn;
err->pos_cap_err = pos_cap_err;
}
/* inject_lock must be held before calling */
-static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
+static struct aer_error *__find_aer_error(u16 domain, unsigned int bus,
+ unsigned int devfn)
{
struct aer_error *err;
list_for_each_entry(err, &einjected, list) {
- if (bus == err->bus && devfn == err->devfn)
+ if (domain == err->domain &&
+ bus == err->bus &&
+ devfn == err->devfn)
return err;
}
return NULL;
@@ -90,7 +98,10 @@ static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
/* inject_lock must be held before calling */
static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
{
- return __find_aer_error(dev->bus->number, dev->devfn);
+ int domain = pci_domain_nr(dev->bus);
+ if (domain < 0)
+ return NULL;
+ return __find_aer_error((u16)domain, dev->bus->number, dev->devfn);
}
/* inject_lock must be held before calling */
@@ -172,11 +183,15 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
struct aer_error *err;
unsigned long flags;
struct pci_ops *ops;
+ int domain;
spin_lock_irqsave(&inject_lock, flags);
if (size != sizeof(u32))
goto out;
- err = __find_aer_error(bus->number, devfn);
+ domain = pci_domain_nr(bus);
+ if (domain < 0)
+ goto out;
+ err = __find_aer_error((u16)domain, bus->number, devfn);
if (!err)
goto out;
@@ -200,11 +215,15 @@ int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
unsigned long flags;
int rw1cs;
struct pci_ops *ops;
+ int domain;
spin_lock_irqsave(&inject_lock, flags);
if (size != sizeof(u32))
goto out;
- err = __find_aer_error(bus->number, devfn);
+ domain = pci_domain_nr(bus);
+ if (domain < 0)
+ goto out;
+ err = __find_aer_error((u16)domain, bus->number, devfn);
if (!err)
goto out;
@@ -262,7 +281,7 @@ out:
static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
while (1) {
- if (!dev->is_pcie)
+ if (!pci_is_pcie(dev))
break;
if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
return dev;
@@ -305,25 +324,25 @@ static int aer_inject(struct aer_error_inj *einj)
u32 sever;
int ret = 0;
- dev = pci_get_bus_and_slot(einj->bus, devfn);
+ dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
if (!dev)
- return -EINVAL;
+ return -ENODEV;
rpdev = pcie_find_root_port(dev);
if (!rpdev) {
- ret = -EINVAL;
+ ret = -ENOTTY;
goto out_put;
}
pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos_cap_err) {
- ret = -EIO;
+ ret = -ENOTTY;
goto out_put;
}
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
if (!rp_pos_cap_err) {
- ret = -EIO;
+ ret = -ENOTTY;
goto out_put;
}
@@ -344,7 +363,8 @@ static int aer_inject(struct aer_error_inj *einj)
if (!err) {
err = err_alloc;
err_alloc = NULL;
- aer_error_init(err, einj->bus, devfn, pos_cap_err);
+ aer_error_init(err, einj->domain, einj->bus, devfn,
+ pos_cap_err);
list_add(&err->list, &einjected);
}
err->uncor_status |= einj->uncor_status;
@@ -358,7 +378,8 @@ static int aer_inject(struct aer_error_inj *einj)
if (!rperr) {
rperr = rperr_alloc;
rperr_alloc = NULL;
- aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
+ aer_error_init(rperr, pci_domain_nr(rpdev->bus),
+ rpdev->bus->number, rpdev->devfn,
rp_pos_cap_err);
list_add(&rperr->list, &einjected);
}
@@ -411,10 +432,11 @@ static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
-
- if (usize != sizeof(struct aer_error_inj))
+ if (usize < offsetof(struct aer_error_inj, domain) ||
+ usize > sizeof(einj))
return -EINVAL;
+ memset(&einj, 0, sizeof(einj));
if (copy_from_user(&einj, ubuf, usize))
return -EFAULT;
@@ -452,7 +474,7 @@ static void __exit aer_inject_exit(void)
}
spin_lock_irqsave(&inject_lock, flags);
- list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) {
+ list_for_each_entry_safe(err, err_next, &einjected, list) {
list_del(&err->list);
kfree(err);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 40c3cc5d1ca..97a345927b5 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -53,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = {
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
- .port_type = PCIE_RC_PORT,
+ .port_type = PCI_EXP_TYPE_ROOT_PORT,
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
@@ -295,7 +295,7 @@ static void aer_error_resume(struct pci_dev *dev)
u16 reg16;
/* Clean up Root device status */
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &reg16);
pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 9f5ccbeb4fa..ae672ca8033 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -35,11 +35,14 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
u16 reg16 = 0;
int pos;
+ if (dev->aer_firmware_first)
+ return -EIO;
+
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return -EIO;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
if (!pos)
return -EIO;
@@ -60,7 +63,10 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
u16 reg16 = 0;
int pos;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (dev->aer_firmware_first)
+ return -EIO;
+
+ pos = pci_pcie_cap(dev);
if (!pos)
return -EIO;
@@ -78,48 +84,27 @@ EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
int pos;
- u32 status, mask;
+ u32 status;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return -EIO;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
- if (dev->error_state == pci_channel_io_normal)
- status &= ~mask; /* Clear corresponding nonfatal bits */
- else
- status &= mask; /* Clear corresponding fatal bits */
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ if (status)
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
-#if 0
-int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
-{
- int pos;
- u32 status;
-
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return -EIO;
-
- pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
-
- return 0;
-}
-#endif /* 0 */
-
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
- if (dev->pcie_type == PCIE_RC_PORT ||
- dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
- dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
+ if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
+ (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
if (enable)
pci_enable_pcie_error_reporting(dev);
else
@@ -218,7 +203,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
*/
if (atomic_read(&dev->enable_cnt) == 0)
return 0;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
if (!pos)
return 0;
/* Check if AER is enabled */
@@ -431,10 +416,9 @@ static int find_aer_service_iter(struct device *device, void *data)
result = (struct find_aer_service_data *) data;
if (device->bus == &pcie_port_bus_type) {
- struct pcie_port_data *port_data;
+ struct pcie_device *pcie = to_pcie_device(device);
- port_data = pci_get_drvdata(to_pcie_device(device)->port);
- if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
+ if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
result->is_downstream = 1;
driver = device->driver;
@@ -612,7 +596,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
u16 reg16;
u32 reg32;
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(pdev);
/* Clear PCIE Capability's Device Status */
pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
@@ -874,8 +858,22 @@ void aer_delete_rootport(struct aer_rpc *rpc)
*/
int aer_init(struct pcie_device *dev)
{
- if (aer_osc_setup(dev) && !forceload)
- return -ENXIO;
+ if (dev->port->aer_firmware_first) {
+ dev_printk(KERN_DEBUG, &dev->device,
+ "PCIe errors handled by platform firmware.\n");
+ goto out;
+ }
+
+ if (aer_osc_setup(dev))
+ goto out;
return 0;
+out:
+ if (forceload) {
+ dev_printk(KERN_DEBUG, &dev->device,
+ "aerdrv forceload requested.\n");
+ dev->port->aer_firmware_first = 0;
+ return 0;
+ }
+ return -ENXIO;
}
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
index a928d8ab6bd..a2747a663bc 100644
--- a/drivers/pci/pcie/aer/ecrc.c
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -51,7 +51,7 @@ static int enable_ecrc_checking(struct pci_dev *dev)
int pos;
u32 reg32;
- if (!dev->is_pcie)
+ if (!pci_is_pcie(dev))
return -ENODEV;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -79,7 +79,7 @@ static int disable_ecrc_checking(struct pci_dev *dev)
int pos;
u32 reg32;
- if (!dev->is_pcie)
+ if (!pci_is_pcie(dev))
return -ENODEV;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5b7056cec00..5a01fc7fbf0 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -122,7 +122,7 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
struct pci_bus *linkbus = link->pdev->subordinate;
list_for_each_entry(child, &linkbus->devices, bus_list) {
- pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(child);
if (!pos)
return;
pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
@@ -156,7 +156,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
/* All functions should have the same cap and state, take the worst */
list_for_each_entry(child, &linkbus->devices, bus_list) {
- pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(child);
if (!pos)
return;
pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
@@ -191,23 +191,23 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
* Configuration, so just check one function
*/
child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
- BUG_ON(!child->is_pcie);
+ BUG_ON(!pci_is_pcie(child));
/* Check downstream component if bit Slot Clock Configuration is 1 */
- cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ cpos = pci_pcie_cap(child);
pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Check upstream component if bit Slot Clock Configuration is 1 */
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ ppos = pci_pcie_cap(parent);
pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Configure downstream component, all functions */
list_for_each_entry(child, &linkbus->devices, bus_list) {
- cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ cpos = pci_pcie_cap(child);
pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
child_reg[PCI_FUNC(child->devfn)] = reg16;
if (same_clock)
@@ -247,7 +247,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
dev_printk(KERN_ERR, &parent->dev,
"ASPM: Could not configure common clock\n");
list_for_each_entry(child, &linkbus->devices, bus_list) {
- cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ cpos = pci_pcie_cap(child);
pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
child_reg[PCI_FUNC(child->devfn)]);
}
@@ -300,7 +300,7 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
u16 reg16;
u32 reg32;
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(pdev);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
@@ -420,7 +420,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
child->pcie_type != PCI_EXP_TYPE_LEG_END)
continue;
- pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(child);
pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
/* Calculate endpoint L0s acceptable latency */
encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
@@ -436,7 +436,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
u16 reg16;
- int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ int pos = pci_pcie_cap(pdev);
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
reg16 &= ~0x3;
@@ -503,7 +503,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
* very strange. Disable ASPM for the whole slot
*/
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(child);
if (!pos)
return -EINVAL;
/*
@@ -563,7 +563,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
- if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
+ if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state)
return;
if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
@@ -629,7 +629,8 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
- if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state)
+ if (aspm_disabled || !pci_is_pcie(pdev) ||
+ !parent || !parent->link_state)
return;
if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
(parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -670,7 +671,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
- if (aspm_disabled || !pdev->is_pcie || !link)
+ if (aspm_disabled || !pci_is_pcie(pdev) || !link)
return;
if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
(pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -696,7 +697,7 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
- if (aspm_disabled || !pdev->is_pcie)
+ if (aspm_disabled || !pci_is_pcie(pdev))
return;
if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
@@ -841,8 +842,9 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
{
struct pcie_link_state *link_state = pdev->link_state;
- if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+ if (!pci_is_pcie(pdev) ||
+ (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
if (link_state->aspm_support)
@@ -857,8 +859,9 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
{
struct pcie_link_state *link_state = pdev->link_state;
- if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+ if (!pci_is_pcie(pdev) ||
+ (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
if (link_state->aspm_support)
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 17ad53868f9..aaeb9d21cba 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,31 +11,16 @@
#include <linux/compiler.h>
-#if !defined(PCI_CAP_ID_PME)
-#define PCI_CAP_ID_PME 1
-#endif
-
-#if !defined(PCI_CAP_ID_EXP)
-#define PCI_CAP_ID_EXP 0x10
-#endif
-
-#define PORT_TYPE_MASK 0xf
-#define PORT_TO_SLOT_MASK 0x100
-#define SLOT_HP_CAPABLE_MASK 0x40
-#define PCIE_CAPABILITIES_REG 0x2
-#define PCIE_SLOT_CAPABILITIES_REG 0x14
-#define PCIE_PORT_DEVICE_MAXSERVICES 4
-#define PCIE_PORT_MSI_VECTOR_MASK 0x1f
+#define PCIE_PORT_DEVICE_MAXSERVICES 4
/*
- * According to the PCI Express Base Specification 2.0, the indices of the MSI-X
- * table entires used by port services must not exceed 31
+ * According to the PCI Express Base Specification 2.0, the indices of
+ * the MSI-X table entires used by port services must not exceed 31
*/
#define PCIE_PORT_MAX_MSIX_ENTRIES 32
#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
extern struct bus_type pcie_port_bus_type;
-extern int pcie_port_device_probe(struct pci_dev *dev);
extern int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
extern int pcie_port_device_suspend(struct device *dev);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index ef3a4eeaebb..18bf90f748f 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -26,7 +26,6 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type);
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
{
struct pcie_device *pciedev;
- struct pcie_port_data *port_data;
struct pcie_port_service_driver *driver;
if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
@@ -38,10 +37,8 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
if (driver->service != pciedev->service)
return 0;
- port_data = pci_get_drvdata(pciedev->port);
-
- if (driver->port_type != PCIE_ANY_PORT
- && driver->port_type != port_data->port_type)
+ if ((driver->port_type != PCIE_ANY_PORT) &&
+ (driver->port_type != pciedev->port->pcie_type))
return 0;
return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 52f84fca9f7..413262eb95b 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -108,9 +108,9 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
* the value in this field indicates which MSI-X Table entry is
* used to generate the interrupt message."
*/
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
- entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK;
+ pos = pci_pcie_cap(dev);
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+ entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
if (entry >= nr_entries)
goto Error;
@@ -177,37 +177,40 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
}
/**
- * assign_interrupt_mode - choose interrupt mode for PCI Express port services
- * (INTx, MSI-X, MSI) and set up vectors
+ * init_service_irqs - initialize irqs for PCI Express port services
* @dev: PCI Express port to handle
- * @vectors: Array of interrupt vectors to populate
+ * @irqs: Array of irqs to populate
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
*
* Return value: Interrupt mode associated with the port
*/
-static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
+static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
- int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
- int i;
+ int i, irq;
/* Try to use MSI-X if supported */
- if (!pcie_port_enable_msix(dev, vectors, mask))
- return PCIE_PORT_MSIX_MODE;
-
+ if (!pcie_port_enable_msix(dev, irqs, mask))
+ return 0;
/* We're not going to use MSI-X, so try MSI and fall back to INTx */
- if (!pci_enable_msi(dev))
- interrupt_mode = PCIE_PORT_MSI_MODE;
-
- if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
- interrupt_mode = PCIE_PORT_INTx_MODE;
+ irq = -1;
+ if (!pci_enable_msi(dev) || dev->pin)
+ irq = dev->irq;
- irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
- vectors[i] = irq;
+ irqs[i] = irq;
+ irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
- vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
+ if (irq < 0)
+ return -ENODEV;
+ return 0;
+}
- return interrupt_mode;
+static void cleanup_service_irqs(struct pci_dev *dev)
+{
+ if (dev->msix_enabled)
+ pci_disable_msix(dev);
+ else if (dev->msi_enabled)
+ pci_disable_msi(dev);
}
/**
@@ -226,13 +229,12 @@ static int get_port_device_capability(struct pci_dev *dev)
u16 reg16;
u32 reg32;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16);
+ pos = pci_pcie_cap(dev);
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
/* Hot-Plug Capable */
- if (reg16 & PORT_TO_SLOT_MASK) {
- pci_read_config_dword(dev,
- pos + PCIE_SLOT_CAPABILITIES_REG, &reg32);
- if (reg32 & SLOT_HP_CAPABLE_MASK)
+ if (reg16 & PCI_EXP_FLAGS_SLOT) {
+ pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
+ if (reg32 & PCI_EXP_SLTCAP_HPC)
services |= PCIE_PORT_SERVICE_HP;
}
/* AER capable */
@@ -241,80 +243,47 @@ static int get_port_device_capability(struct pci_dev *dev)
/* VC support */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
services |= PCIE_PORT_SERVICE_VC;
+ /* Root ports are capable of generating PME too */
+ if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ services |= PCIE_PORT_SERVICE_PME;
return services;
}
/**
- * pcie_device_init - initialize PCI Express port service device
- * @dev: Port service device to initialize
- * @parent: PCI Express port to associate the service device with
- * @port_type: Type of the port
- * @service_type: Type of service to associate with the service device
+ * pcie_device_init - allocate and initialize PCI Express port service device
+ * @pdev: PCI Express port to associate the service device with
+ * @service: Type of service to associate with the service device
* @irq: Interrupt vector to associate with the service device
*/
-static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
- int service_type, int irq)
+static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
{
- struct pcie_port_data *port_data = pci_get_drvdata(parent);
+ int retval;
+ struct pcie_device *pcie;
struct device *device;
- int port_type = port_data->port_type;
- dev->port = parent;
- dev->irq = irq;
- dev->service = service_type;
+ pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+ pcie->port = pdev;
+ pcie->irq = irq;
+ pcie->service = service;
/* Initialize generic device interface */
- device = &dev->device;
- memset(device, 0, sizeof(struct device));
+ device = &pcie->device;
device->bus = &pcie_port_bus_type;
- device->driver = NULL;
- dev_set_drvdata(device, NULL);
device->release = release_pcie_device; /* callback to free pcie dev */
dev_set_name(device, "%s:pcie%02x",
- pci_name(parent), get_descriptor_id(port_type, service_type));
- device->parent = &parent->dev;
-}
-
-/**
- * alloc_pcie_device - allocate PCI Express port service device structure
- * @parent: PCI Express port to associate the service device with
- * @port_type: Type of the port
- * @service_type: Type of service to associate with the service device
- * @irq: Interrupt vector to associate with the service device
- */
-static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
- int service_type, int irq)
-{
- struct pcie_device *device;
-
- device = kzalloc(sizeof(struct pcie_device), GFP_KERNEL);
- if (!device)
- return NULL;
-
- pcie_device_init(parent, device, service_type, irq);
- return device;
-}
-
-/**
- * pcie_port_device_probe - check if device is a PCI Express port
- * @dev: Device to check
- */
-int pcie_port_device_probe(struct pci_dev *dev)
-{
- int pos, type;
- u16 reg;
-
- if (!(pos = pci_find_capability(dev, PCI_CAP_ID_EXP)))
- return -ENODEV;
-
- pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg);
- type = (reg >> 4) & PORT_TYPE_MASK;
- if ( type == PCIE_RC_PORT || type == PCIE_SW_UPSTREAM_PORT ||
- type == PCIE_SW_DOWNSTREAM_PORT )
- return 0;
-
- return -ENODEV;
+ pci_name(pdev),
+ get_descriptor_id(pdev->pcie_type, service));
+ device->parent = &pdev->dev;
+
+ retval = device_register(device);
+ if (retval)
+ kfree(pcie);
+ else
+ get_device(device);
+ return retval;
}
/**
@@ -326,77 +295,49 @@ int pcie_port_device_probe(struct pci_dev *dev)
*/
int pcie_port_device_register(struct pci_dev *dev)
{
- struct pcie_port_data *port_data;
- int status, capabilities, irq_mode, i, nr_serv;
- int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
- u16 reg16;
-
- port_data = kzalloc(sizeof(*port_data), GFP_KERNEL);
- if (!port_data)
- return -ENOMEM;
- pci_set_drvdata(dev, port_data);
-
- /* Get port type */
- pci_read_config_word(dev,
- pci_find_capability(dev, PCI_CAP_ID_EXP) +
- PCIE_CAPABILITIES_REG, &reg16);
- port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK;
+ int status, capabilities, i, nr_service;
+ int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
+ /* Get and check PCI Express port services */
capabilities = get_port_device_capability(dev);
- /* Root ports are capable of generating PME too */
- if (port_data->port_type == PCIE_RC_PORT)
- capabilities |= PCIE_PORT_SERVICE_PME;
-
- irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
- if (irq_mode == PCIE_PORT_NO_IRQ) {
- /*
- * Don't use service devices that require interrupts if there is
- * no way to generate them.
- */
- if (!(capabilities & PCIE_PORT_SERVICE_VC)) {
- status = -ENODEV;
- goto Error;
- }
- capabilities = PCIE_PORT_SERVICE_VC;
- }
- port_data->port_irq_mode = irq_mode;
+ if (!capabilities)
+ return -ENODEV;
+ /* Enable PCI Express port device */
status = pci_enable_device(dev);
if (status)
- goto Error;
+ return status;
pci_set_master(dev);
+ /*
+ * Initialize service irqs. Don't use service devices that
+ * require interrupts if there is no way to generate them.
+ */
+ status = init_service_irqs(dev, irqs, capabilities);
+ if (status) {
+ capabilities &= PCIE_PORT_SERVICE_VC;
+ if (!capabilities)
+ goto error_disable;
+ }
/* Allocate child services if any */
- for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
- struct pcie_device *child;
+ status = -ENODEV;
+ nr_service = 0;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
int service = 1 << i;
-
if (!(capabilities & service))
continue;
-
- child = alloc_pcie_device(dev, service, vectors[i]);
- if (!child)
- continue;
-
- status = device_register(&child->device);
- if (status) {
- kfree(child);
- continue;
- }
-
- get_device(&child->device);
- nr_serv++;
- }
- if (!nr_serv) {
- pci_disable_device(dev);
- status = -ENODEV;
- goto Error;
+ if (!pcie_device_init(dev, service, irqs[i]))
+ nr_service++;
}
+ if (!nr_service)
+ goto error_cleanup_irqs;
return 0;
- Error:
- kfree(port_data);
+error_cleanup_irqs:
+ cleanup_service_irqs(dev);
+error_disable:
+ pci_disable_device(dev);
return status;
}
@@ -464,21 +405,9 @@ static int remove_iter(struct device *dev, void *data)
*/
void pcie_port_device_remove(struct pci_dev *dev)
{
- struct pcie_port_data *port_data = pci_get_drvdata(dev);
-
device_for_each_child(&dev->dev, NULL, remove_iter);
+ cleanup_service_irqs(dev);
pci_disable_device(dev);
-
- switch (port_data->port_irq_mode) {
- case PCIE_PORT_MSIX_MODE:
- pci_disable_msix(dev);
- break;
- case PCIE_PORT_MSI_MODE:
- pci_disable_msi(dev);
- break;
- }
-
- kfree(port_data);
}
/**
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f635e476d63..a49452e2aed 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -43,7 +43,7 @@ static int pcie_portdrv_restore_config(struct pci_dev *dev)
}
#ifdef CONFIG_PM
-static struct dev_pm_ops pcie_portdrv_pm_ops = {
+static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume = pcie_port_device_resume,
.freeze = pcie_port_device_suspend,
@@ -67,14 +67,16 @@ static struct dev_pm_ops pcie_portdrv_pm_ops = {
* this port device.
*
*/
-static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
- const struct pci_device_id *id )
+static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
- int status;
+ int status;
- status = pcie_port_device_probe(dev);
- if (status)
- return status;
+ if (!pci_is_pcie(dev) ||
+ ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) &&
+ (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
+ return -ENODEV;
if (!dev->irq && dev->pin) {
dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8105e32117f..98ffb2de22e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
+#include <acpi/acpi_hest.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -163,12 +164,12 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
{
u32 l, sz, mask;
- mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0;
+ mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
res->name = pci_name(dev);
pci_read_config_dword(dev, pos, &l);
- pci_write_config_dword(dev, pos, mask);
+ pci_write_config_dword(dev, pos, l | mask);
pci_read_config_dword(dev, pos, &sz);
pci_write_config_dword(dev, pos, l);
@@ -223,9 +224,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
goto fail;
if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
- dev_err(&dev->dev, "can't handle 64-bit BAR\n");
+ dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
+ pos);
goto fail;
- } else if ((sizeof(resource_size_t) < 8) && l) {
+ }
+
+ res->flags |= IORESOURCE_MEM_64;
+ if ((sizeof(resource_size_t) < 8) && l) {
/* Address above 32-bit boundary; disable the BAR */
pci_write_config_dword(dev, pos, 0);
pci_write_config_dword(dev, pos + 4, 0);
@@ -234,14 +239,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
} else {
res->start = l64;
res->end = l64 + sz64;
- dev_printk(KERN_DEBUG, &dev->dev,
- "reg %x %s: %pR\n", pos,
- (res->flags & IORESOURCE_PREFETCH) ?
- "64bit mmio pref" : "64bit mmio",
- res);
+ dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
+ pos, res);
}
-
- res->flags |= IORESOURCE_MEM_64;
} else {
sz = pci_size(l, sz, mask);
@@ -251,11 +251,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->start = l;
res->end = l + sz;
- dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
- (res->flags & IORESOURCE_IO) ? "io port" :
- ((res->flags & IORESOURCE_PREFETCH) ?
- "32bit mmio pref" : "32bit mmio"),
- res);
+ dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
}
out:
@@ -297,8 +293,11 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
+ dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
+ child->secondary, child->subordinate,
+ dev->transparent ? " (subtractive decode)": "");
+
if (dev->transparent) {
- dev_info(&dev->dev, "transparent bridge\n");
for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
child->resource[i] = child->parent->resource[i - 3];
}
@@ -323,7 +322,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->start = base;
if (!res->end)
res->end = limit + 0xfff;
- dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res);
+ dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
}
res = child->resource[1];
@@ -335,8 +334,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
res->start = base;
res->end = limit + 0xfffff;
- dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n",
- res);
+ dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
}
res = child->resource[2];
@@ -375,9 +373,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
res->flags |= IORESOURCE_MEM_64;
res->start = base;
res->end = limit + 0xfffff;
- dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
- (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
- res);
+ dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
}
}
@@ -651,13 +647,14 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
(child->number > bus->subordinate) ||
(child->number < bus->number) ||
(child->subordinate < bus->number)) {
- pr_debug("PCI: Bus #%02x (-#%02x) is %s "
- "hidden behind%s bridge #%02x (-#%02x)\n",
+ dev_info(&child->dev, "[bus %02x-%02x] %s "
+ "hidden behind%s bridge %s [bus %02x-%02x]\n",
child->number, child->subordinate,
(bus->number > child->subordinate &&
bus->subordinate < child->number) ?
"wholly" : "partially",
bus->self->transparent ? " transparent" : "",
+ dev_name(&bus->dev),
bus->number, bus->subordinate);
}
bus = bus->parent;
@@ -693,6 +690,7 @@ static void set_pcie_port_type(struct pci_dev *pdev)
if (!pos)
return;
pdev->is_pcie = 1;
+ pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
}
@@ -703,7 +701,7 @@ static void set_pcie_hotplug_bridge(struct pci_dev *pdev)
u16 reg16;
u32 reg32;
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(pdev);
if (!pos)
return;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
@@ -714,6 +712,12 @@ static void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pdev->is_hotplug_bridge = 1;
}
+static void set_pci_aer_firmware_first(struct pci_dev *pdev)
+{
+ if (acpi_hest_firmware_first_pci(pdev))
+ pdev->aer_firmware_first = 1;
+}
+
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
@@ -731,6 +735,7 @@ int pci_setup_device(struct pci_dev *dev)
u32 class;
u8 hdr_type;
struct pci_slot *slot;
+ int pos = 0;
if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
return -EIO;
@@ -742,6 +747,7 @@ int pci_setup_device(struct pci_dev *dev)
dev->multifunction = !!(hdr_type & 0x80);
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
+ set_pci_aer_firmware_first(dev);
list_for_each_entry(slot, &dev->bus->slots, list)
if (PCI_SLOT(dev->devfn) == slot->number)
@@ -822,6 +828,11 @@ int pci_setup_device(struct pci_dev *dev)
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
set_pcie_hotplug_bridge(dev);
+ pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
+ pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
+ }
break;
case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
@@ -907,7 +918,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
if (class == PCI_CLASS_BRIDGE_HOST)
return pci_cfg_space_size_ext(dev);
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
if (!pos) {
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!pos)
@@ -1014,6 +1025,9 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Single Root I/O Virtualization */
pci_iov_init(dev);
+
+ /* Enable ACS P2P upstream forwarding */
+ pci_enable_acs(dev);
}
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1110,7 +1124,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
unsigned int devfn, pass, max = bus->secondary;
struct pci_dev *dev;
- pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
+ dev_dbg(&bus->dev, "scanning bus\n");
/* Go find them, Rover! */
for (devfn = 0; devfn < 0x100; devfn += 8)
@@ -1124,8 +1138,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
* all PCI-to-PCI bridges on this bus.
*/
if (!bus->is_added) {
- pr_debug("PCI: Fixups for bus %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
+ dev_dbg(&bus->dev, "fixups for bus\n");
pcibios_fixup_bus(bus);
if (pci_is_root_bus(bus))
bus->is_added = 1;
@@ -1145,8 +1158,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
*
* Return how far we've got finding sub-buses.
*/
- pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n",
- pci_domain_nr(bus), bus->number, max);
+ dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
return max;
}
@@ -1154,7 +1166,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
int bus, struct pci_ops *ops, void *sysdata)
{
int error;
- struct pci_bus *b;
+ struct pci_bus *b, *b2;
struct device *dev;
b = pci_alloc_bus();
@@ -1170,9 +1182,10 @@ struct pci_bus * pci_create_bus(struct device *parent,
b->sysdata = sysdata;
b->ops = ops;
- if (pci_find_bus(pci_domain_nr(b), bus)) {
+ b2 = pci_find_bus(pci_domain_nr(b), bus);
+ if (b2) {
/* If we already got to this bus through a different bridge, ignore it */
- pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
+ dev_dbg(&b2->dev, "bus already known\n");
goto err_out;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 245d2cdb476..7cfa7c38d31 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -357,7 +357,7 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
pcibios_bus_to_resource(dev, res, &bus_region);
pci_claim_resource(dev, nr);
- dev_info(&dev->dev, "quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name);
+ dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
}
}
@@ -1680,6 +1680,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_
*/
#define AMD_813X_MISC 0x40
#define AMD_813X_NOIOAMODE (1<<0)
+#define AMD_813X_REV_B1 0x12
#define AMD_813X_REV_B2 0x13
static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
@@ -1688,7 +1689,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
if (noioapicquirk)
return;
- if (dev->revision == AMD_813X_REV_B2)
+ if ((dev->revision == AMD_813X_REV_B1) ||
+ (dev->revision == AMD_813X_REV_B2))
return;
pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
@@ -1698,8 +1700,10 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
#define AMD_8111_PCI_IRQ_ROUTING 0x56
@@ -2595,9 +2599,37 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
static int __init pci_apply_final_quirks(void)
{
struct pci_dev *dev = NULL;
+ u8 cls = 0;
+ u8 tmp;
+
+ if (pci_cache_line_size)
+ printk(KERN_DEBUG "PCI: CLS %u bytes\n",
+ pci_cache_line_size << 2);
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
pci_fixup_device(pci_fixup_final, dev);
+ /*
+ * If arch hasn't set it explicitly yet, use the CLS
+ * value shared by all PCI devices. If there's a
+ * mismatch, fall back to the default value.
+ */
+ if (!pci_cache_line_size) {
+ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
+ if (!cls)
+ cls = tmp;
+ if (!tmp || cls == tmp)
+ continue;
+
+ printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), "
+ "using %u bytes\n", cls << 2, tmp << 2,
+ pci_dfl_cache_line_size << 2);
+ pci_cache_line_size = pci_dfl_cache_line_size;
+ }
+ }
+ if (!pci_cache_line_size) {
+ printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
+ cls << 2, pci_dfl_cache_line_size << 2);
+ pci_cache_line_size = cls;
}
return 0;
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index ec415352d9b..6dae8714325 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -26,14 +26,14 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
{
struct pci_dev *tmp = NULL;
- if (pdev->is_pcie)
+ if (pci_is_pcie(pdev))
return NULL;
while (1) {
if (pci_is_root_bus(pdev->bus))
break;
pdev = pdev->bus->self;
/* a p2p bridge */
- if (!pdev->is_pcie) {
+ if (!pci_is_pcie(pdev)) {
tmp = pdev;
continue;
}
@@ -149,32 +149,33 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
}
/**
- * pci_get_bus_and_slot - locate PCI device from a given PCI bus & slot
- * @bus: number of PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
- * in case of multi-function devices.
- *
- * Note: the bus/slot search is limited to PCI domain (segment) 0.
+ * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot
+ * @domain: PCI domain/segment on which the PCI device resides.
+ * @bus: PCI bus on which desired PCI device resides
+ * @devfn: encodes number of PCI slot in which the desired PCI device
+ * resides and the logical device number within that slot in case of
+ * multi-function devices.
*
- * Given a PCI bus and slot/function number, the desired PCI device
- * is located in system global list of PCI devices. If the device
- * is found, a pointer to its data structure is returned. If no
- * device is found, %NULL is returned. The returned device has its
- * reference count bumped by one.
+ * Given a PCI domain, bus, and slot/function number, the desired PCI
+ * device is located in the list of PCI devices. If the device is
+ * found, its reference count is increased and this function returns a
+ * pointer to its data structure. The caller must decrement the
+ * reference count by calling pci_dev_put(). If no device is found,
+ * %NULL is returned.
*/
-
-struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
+struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
+ unsigned int devfn)
{
struct pci_dev *dev = NULL;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (pci_domain_nr(dev->bus) == 0 &&
- (dev->bus->number == bus && dev->devfn == devfn))
+ if (pci_domain_nr(dev->bus) == domain &&
+ (dev->bus->number == bus && dev->devfn == devfn))
return dev;
}
return NULL;
}
+EXPORT_SYMBOL(pci_get_domain_bus_and_slot);
static int match_pci_dev_by_id(struct device *dev, void *data)
{
@@ -354,5 +355,4 @@ EXPORT_SYMBOL(pci_find_next_bus);
EXPORT_SYMBOL(pci_get_device);
EXPORT_SYMBOL(pci_get_subsys);
EXPORT_SYMBOL(pci_get_slot);
-EXPORT_SYMBOL(pci_get_bus_and_slot);
EXPORT_SYMBOL(pci_get_class);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index cb1a027eb55..c48cd377b3f 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -71,53 +71,50 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus)
void pci_setup_cardbus(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
+ struct resource *res;
struct pci_bus_region region;
- dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
+ dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
+ bus->secondary, bus->subordinate);
- pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
- if (bus->resource[0]->flags & IORESOURCE_IO) {
+ res = bus->resource[0];
+ pcibios_resource_to_bus(bridge, &region, res);
+ if (res->flags & IORESOURCE_IO) {
/*
* The IO resource is allocated a range twice as large as it
* would normally need. This allows us to set both IO regs.
*/
- dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n",
- (unsigned long)region.start,
- (unsigned long)region.end);
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
region.start);
pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
region.end);
}
- pcibios_resource_to_bus(bridge, &region, bus->resource[1]);
- if (bus->resource[1]->flags & IORESOURCE_IO) {
- dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n",
- (unsigned long)region.start,
- (unsigned long)region.end);
+ res = bus->resource[1];
+ pcibios_resource_to_bus(bridge, &region, res);
+ if (res->flags & IORESOURCE_IO) {
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
region.start);
pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
region.end);
}
- pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
- if (bus->resource[2]->flags & IORESOURCE_MEM) {
- dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n",
- (unsigned long)region.start,
- (unsigned long)region.end);
+ res = bus->resource[2];
+ pcibios_resource_to_bus(bridge, &region, res);
+ if (res->flags & IORESOURCE_MEM) {
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
region.start);
pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
region.end);
}
- pcibios_resource_to_bus(bridge, &region, bus->resource[3]);
- if (bus->resource[3]->flags & IORESOURCE_MEM) {
- dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n",
- (unsigned long)region.start,
- (unsigned long)region.end);
+ res = bus->resource[3];
+ pcibios_resource_to_bus(bridge, &region, res);
+ if (res->flags & IORESOURCE_MEM) {
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
region.start);
pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
@@ -140,34 +137,33 @@ EXPORT_SYMBOL(pci_setup_cardbus);
static void pci_setup_bridge(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
+ struct resource *res;
struct pci_bus_region region;
u32 l, bu, lu, io_upper16;
- int pref_mem64;
if (pci_is_enabled(bridge))
return;
- dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
- pci_domain_nr(bus), bus->number);
+ dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
+ bus->secondary, bus->subordinate);
/* Set up the top and bottom of the PCI I/O segment for this bus. */
- pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
- if (bus->resource[0]->flags & IORESOURCE_IO) {
+ res = bus->resource[0];
+ pcibios_resource_to_bus(bridge, &region, res);
+ if (res->flags & IORESOURCE_IO) {
pci_read_config_dword(bridge, PCI_IO_BASE, &l);
l &= 0xffff0000;
l |= (region.start >> 8) & 0x00f0;
l |= region.end & 0xf000;
/* Set up upper 16 bits of I/O base/limit. */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
- dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n",
- (unsigned long)region.start,
- (unsigned long)region.end);
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
}
else {
/* Clear upper 16 bits of I/O base/limit. */
io_upper16 = 0;
l = 0x00f0;
- dev_info(&bridge->dev, " IO window: disabled\n");
+ dev_info(&bridge->dev, " bridge window [io disabled]\n");
}
/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@@ -178,17 +174,16 @@ static void pci_setup_bridge(struct pci_bus *bus)
/* Set up the top and bottom of the PCI Memory segment
for this bus. */
- pcibios_resource_to_bus(bridge, &region, bus->resource[1]);
- if (bus->resource[1]->flags & IORESOURCE_MEM) {
+ res = bus->resource[1];
+ pcibios_resource_to_bus(bridge, &region, res);
+ if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n",
- (unsigned long)region.start,
- (unsigned long)region.end);
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
}
else {
l = 0x0000fff0;
- dev_info(&bridge->dev, " MEM window: disabled\n");
+ dev_info(&bridge->dev, " bridge window [mem disabled]\n");
}
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
@@ -198,34 +193,27 @@ static void pci_setup_bridge(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
/* Set up PREF base/limit. */
- pref_mem64 = 0;
bu = lu = 0;
- pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
- if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
- int width = 8;
+ res = bus->resource[2];
+ pcibios_resource_to_bus(bridge, &region, res);
+ if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
- pref_mem64 = 1;
+ if (res->flags & IORESOURCE_MEM_64) {
bu = upper_32_bits(region.start);
lu = upper_32_bits(region.end);
- width = 16;
}
- dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
- width, (unsigned long long)region.start,
- width, (unsigned long long)region.end);
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
}
else {
l = 0x0000fff0;
- dev_info(&bridge->dev, " PREFETCH window: disabled\n");
+ dev_info(&bridge->dev, " bridge window [mem pref disabled]\n");
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- if (pref_mem64) {
- /* Set the upper 32 bits of PREF base & limit. */
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
- }
+ /* Set the upper 32 bits of PREF base & limit. */
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -345,6 +333,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
#endif
size = ALIGN(size + size1, 4096);
if (!size) {
+ if (b_res->start || b_res->end)
+ dev_info(&bus->self->dev, "disabling bridge window "
+ "%pR to [bus %02x-%02x] (unused)\n", b_res,
+ bus->secondary, bus->subordinate);
b_res->flags = 0;
return;
}
@@ -390,8 +382,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
align = pci_resource_alignment(dev, r);
order = __ffs(align) - 20;
if (order > 11) {
- dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
- "%pR\n", i, (unsigned long long)align, r);
+ dev_warn(&dev->dev, "disabling BAR %d: %pR "
+ "(bad alignment %#llx)\n", i, r,
+ (unsigned long long) align);
r->flags = 0;
continue;
}
@@ -425,6 +418,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
size = ALIGN(size, min_align);
if (!size) {
+ if (b_res->start || b_res->end)
+ dev_info(&bus->self->dev, "disabling bridge window "
+ "%pR to [bus %02x-%02x] (unused)\n", b_res,
+ bus->secondary, bus->subordinate);
b_res->flags = 0;
return 1;
}
@@ -582,10 +579,7 @@ static void pci_bus_dump_res(struct pci_bus *bus)
if (!res || !res->end)
continue;
- dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i,
- (res->flags & IORESOURCE_IO) ? "io: " :
- ((res->flags & IORESOURCE_PREFETCH)? "pref mem":"mem:"),
- res);
+ dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
}
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index c54526b206b..7d678bb15ff 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -51,12 +51,6 @@ void pci_update_resource(struct pci_dev *dev, int resno)
pcibios_resource_to_bus(dev, &region, res);
- dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] "
- "flags %#lx\n", resno, res,
- (unsigned long long)region.start,
- (unsigned long long)region.end,
- (unsigned long)res->flags);
-
new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
if (res->flags & IORESOURCE_IO)
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
@@ -91,9 +85,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
}
}
res->flags &= ~IORESOURCE_UNSET;
- dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n",
- resno, (unsigned long long)region.start,
- (unsigned long long)region.end, res->flags);
+ dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n",
+ resno, res, (unsigned long long)region.start,
+ (unsigned long long)region.end);
}
int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -103,20 +97,17 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
int err;
root = pci_find_parent_resource(dev, res);
-
- err = -EINVAL;
- if (root != NULL)
- err = request_resource(root, res);
-
- if (err) {
- const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
- dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
- resource,
- root ? "address space collision on" :
- "no parent found for",
- dtype, res);
+ if (!root) {
+ dev_err(&dev->dev, "no compatible bridge window for %pR\n",
+ res);
+ return -EINVAL;
}
+ err = request_resource(root, res);
+ if (err)
+ dev_err(&dev->dev,
+ "address space collision: %pR already in use\n", res);
+
return err;
}
EXPORT_SYMBOL(pci_claim_resource);
@@ -124,7 +115,7 @@ EXPORT_SYMBOL(pci_claim_resource);
#ifdef CONFIG_PCI_QUIRKS
void pci_disable_bridge_window(struct pci_dev *dev)
{
- dev_dbg(&dev->dev, "Disabling bridge window.\n");
+ dev_info(&dev->dev, "disabling bridge mem windows\n");
/* MMIO Base/Limit */
pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
@@ -165,6 +156,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
if (!ret) {
res->flags &= ~IORESOURCE_STARTALIGN;
+ dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
}
@@ -178,12 +170,12 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
resource_size_t align;
struct pci_bus *bus;
int ret;
+ char *type;
align = pci_resource_alignment(dev, res);
if (!align) {
- dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
- "alignment) %pR flags %#lx\n",
- resno, res, res->flags);
+ dev_info(&dev->dev, "BAR %d: can't assign %pR "
+ "(bogus alignment)\n", resno, res);
return -EINVAL;
}
@@ -198,9 +190,20 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
break;
}
- if (ret)
- dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
- resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
+ if (ret) {
+ if (res->flags & IORESOURCE_MEM)
+ if (res->flags & IORESOURCE_PREFETCH)
+ type = "mem pref";
+ else
+ type = "mem";
+ else if (res->flags & IORESOURCE_IO)
+ type = "io";
+ else
+ type = "unknown";
+ dev_info(&dev->dev,
+ "BAR %d: can't assign %s (size %#llx)\n",
+ resno, type, (unsigned long long) resource_size(res));
+ }
return ret;
}
@@ -225,9 +228,8 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
r_align = pci_resource_alignment(dev, r);
if (!r_align) {
- dev_warn(&dev->dev, "BAR %d: bogus alignment "
- "%pR flags %#lx\n",
- i, r, r->flags);
+ dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n",
+ i, r);
continue;
}
for (list = head; ; list = list->next) {
@@ -274,8 +276,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
continue;
if (!r->parent) {
- dev_err(&dev->dev, "device not available because of "
- "BAR %d %pR collisions\n", i, r);
+ dev_err(&dev->dev, "device not available "
+ "(can't reserve %pR)\n", r);
return -EINVAL;
}
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index cd5082d3ca1..9f3adbd9f70 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -64,7 +64,7 @@ config PCMCIA_IOCTL
If unsure, say Y.
config CARDBUS
- bool "32-bit CardBus support"
+ bool "32-bit CardBus support"
depends on PCI
default y
---help---
@@ -87,8 +87,8 @@ config YENTA
select PCCARD_NONSTATIC
---help---
This option enables support for CardBus host bridges. Virtually
- all modern PCMCIA bridges are CardBus compatible. A "bridge" is
- the hardware inside your computer that PCMCIA cards are plugged
+ all modern PCMCIA bridges are CardBus compatible. A "bridge" is
+ the hardware inside your computer that PCMCIA cards are plugged
into.
To compile this driver as modules, choose M here: the
@@ -208,7 +208,7 @@ config PCMCIA_PXA2XX
depends on ARM && ARCH_PXA && PCMCIA
depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
|| MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
- || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2)
+ || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2)
select PCMCIA_SOC_COMMON
help
Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 38293831399..83ff802de54 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -67,7 +67,7 @@ pxa2xx-obj-$(CONFIG_ARCH_LUBBOCK) += pxa2xx_lubbock_cs.o
pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
-pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
+pxa2xx-obj-$(CONFIG_ARCOM_PCMCIA) += pxa2xx_viper.o
pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 4cd70d05681..cdf50f3bc2d 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -27,8 +27,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
@@ -58,7 +58,7 @@
image number and an offset within that image. xlate_rom_addr()
converts an image/offset address to an absolute offset from the
ROM's base address.
-
+
=====================================================================*/
static u_int xlate_rom_addr(void __iomem *b, u_int addr)
@@ -85,10 +85,10 @@ static u_int xlate_rom_addr(void __iomem *b, u_int addr)
These are similar to setup_cis_mem and release_cis_mem for 16-bit
cards. The "result" that is used externally is the cb_cis_virt
pointer in the struct pcmcia_socket structure.
-
+
=====================================================================*/
-static void cb_release_cis_mem(struct pcmcia_socket * s)
+static void cb_release_cis_mem(struct pcmcia_socket *s)
{
if (s->cb_cis_virt) {
dev_dbg(&s->dev, "cb_release_cis_mem()\n");
@@ -98,7 +98,7 @@ static void cb_release_cis_mem(struct pcmcia_socket * s)
}
}
-static int cb_setup_cis_mem(struct pcmcia_socket * s, struct resource *res)
+static int cb_setup_cis_mem(struct pcmcia_socket *s, struct resource *res)
{
unsigned int start, size;
@@ -124,10 +124,11 @@ static int cb_setup_cis_mem(struct pcmcia_socket * s, struct resource *res)
This is used by the CIS processing code to read CIS information
from a CardBus device.
-
+
=====================================================================*/
-int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void *ptr)
+int read_cb_mem(struct pcmcia_socket *s, int space, u_int addr, u_int len,
+ void *ptr)
{
struct pci_dev *dev;
struct resource *res;
@@ -181,40 +182,47 @@ fail:
cb_alloc() and cb_free() allocate and free the kernel data
structures for a Cardbus device, and handle the lowest level PCI
device setup issues.
-
+
=====================================================================*/
-/*
- * Since there is only one interrupt available to CardBus
- * devices, all devices downstream of this device must
- * be using this IRQ.
- */
-static void cardbus_assign_irqs(struct pci_bus *bus, int irq)
+static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
u8 irq_pin;
+ /*
+ * Since there is only one interrupt available to
+ * CardBus devices, all devices downstream of this
+ * device must be using this IRQ.
+ */
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin);
if (irq_pin) {
dev->irq = irq;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
}
+ /*
+ * Some controllers transfer very slowly with 0 CLS.
+ * Configure it. This may fail as CLS configuration
+ * is mandatory only for MWI.
+ */
+ pci_set_cacheline_size(dev);
+
if (dev->subordinate)
- cardbus_assign_irqs(dev->subordinate, irq);
+ cardbus_config_irq_and_cls(dev->subordinate, irq);
}
}
-int __ref cb_alloc(struct pcmcia_socket * s)
+int __ref cb_alloc(struct pcmcia_socket *s)
{
struct pci_bus *bus = s->cb_dev->subordinate;
struct pci_dev *dev;
unsigned int max, pass;
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
-// pcibios_fixup_bus(bus);
+/* pcibios_fixup_bus(bus); */
max = bus->secondary;
for (pass = 0; pass < 2; pass++)
@@ -228,7 +236,7 @@ int __ref cb_alloc(struct pcmcia_socket * s)
*/
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
- cardbus_assign_irqs(bus, s->pci_irq);
+ cardbus_config_irq_and_cls(bus, s->pci_irq);
/* socket specific tune function */
if (s->tune_bridge)
@@ -241,7 +249,7 @@ int __ref cb_alloc(struct pcmcia_socket * s)
return 0;
}
-void cb_free(struct pcmcia_socket * s)
+void cb_free(struct pcmcia_socket *s)
{
struct pci_dev *bridge = s->cb_dev;
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 8c1b73cf021..25b1cd219e3 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -23,7 +23,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/ioport.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -125,7 +125,7 @@ set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flag
Low-level functions to read and write CIS memory. I think the
write routine is only useful for writing one-byte registers.
-
+
======================================================================*/
/* Bits in attr field */
@@ -137,7 +137,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
{
void __iomem *sys, *end;
unsigned char *buf = ptr;
-
+
dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
if (attr & IS_INDIRECT) {
@@ -203,7 +203,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
{
void __iomem *sys, *end;
unsigned char *buf = ptr;
-
+
dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
if (attr & IS_INDIRECT) {
@@ -262,7 +262,7 @@ EXPORT_SYMBOL(pcmcia_write_cis_mem);
This is a wrapper around read_cis_mem, with the same interface,
but which caches information, for cards whose CIS may not be
readable all the time.
-
+
======================================================================*/
static void read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
@@ -342,7 +342,7 @@ EXPORT_SYMBOL(destroy_cis_cache);
This verifies if the CIS of a card matches what is in the CIS
cache.
-
+
======================================================================*/
int verify_cis_cache(struct pcmcia_socket *s)
@@ -381,7 +381,7 @@ int verify_cis_cache(struct pcmcia_socket *s)
For really bad cards, we provide a facility for uploading a
replacement CIS.
-
+
======================================================================*/
int pcmcia_replace_cis(struct pcmcia_socket *s,
@@ -406,7 +406,7 @@ EXPORT_SYMBOL(pcmcia_replace_cis);
/*======================================================================
The high-level CIS tuple services
-
+
======================================================================*/
typedef struct tuple_flags {
@@ -421,8 +421,6 @@ typedef struct tuple_flags {
#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn)
#define SPACE(f) (((tuple_flags *)(&(f)))->space)
-int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int func, tuple_t *tuple);
-
int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple)
{
if (!s)
@@ -523,10 +521,11 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
ofs++; continue;
}
}
-
+
/* End of chain? Follow long link if possible */
if (link[0] == CISTPL_END) {
- if ((ofs = follow_link(s, tuple)) < 0)
+ ofs = follow_link(s, tuple);
+ if (ofs < 0)
return -ENOSPC;
attr = SPACE(tuple->Flags);
read_cis_cache(s, attr, ofs, 2, link);
@@ -578,7 +577,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
} else
if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
break;
-
+
if (link[0] == tuple->DesiredTuple)
break;
ofs += link[1] + 2;
@@ -587,7 +586,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
return -ENOSPC;
}
-
+
tuple->TupleCode = link[0];
tuple->TupleLink = link[1];
tuple->CISOffset = ofs + 2;
@@ -623,7 +622,7 @@ EXPORT_SYMBOL(pccard_get_tuple_data);
/*======================================================================
Parsing routines for individual tuples
-
+
======================================================================*/
static int parse_device(tuple_t *tuple, cistpl_device_t *device)
@@ -637,26 +636,37 @@ static int parse_device(tuple_t *tuple, cistpl_device_t *device)
device->ndev = 0;
for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
-
- if (*p == 0xff) break;
+
+ if (*p == 0xff)
+ break;
device->dev[i].type = (*p >> 4);
device->dev[i].wp = (*p & 0x08) ? 1 : 0;
switch (*p & 0x07) {
- case 0: device->dev[i].speed = 0; break;
- case 1: device->dev[i].speed = 250; break;
- case 2: device->dev[i].speed = 200; break;
- case 3: device->dev[i].speed = 150; break;
- case 4: device->dev[i].speed = 100; break;
+ case 0:
+ device->dev[i].speed = 0;
+ break;
+ case 1:
+ device->dev[i].speed = 250;
+ break;
+ case 2:
+ device->dev[i].speed = 200;
+ break;
+ case 3:
+ device->dev[i].speed = 150;
+ break;
+ case 4:
+ device->dev[i].speed = 100;
+ break;
case 7:
- if (++p == q)
- return -EINVAL;
- device->dev[i].speed = SPEED_CVT(*p);
- while (*p & 0x80)
if (++p == q)
return -EINVAL;
- break;
+ device->dev[i].speed = SPEED_CVT(*p);
+ while (*p & 0x80)
+ if (++p == q)
+ return -EINVAL;
+ break;
default:
- return -EINVAL;
+ return -EINVAL;
}
if (++p == q)
@@ -671,7 +681,7 @@ static int parse_device(tuple_t *tuple, cistpl_device_t *device)
if (++p == q)
break;
}
-
+
return 0;
}
@@ -706,9 +716,9 @@ static int parse_longlink_mfc(tuple_t *tuple,
{
u_char *p;
int i;
-
+
p = (u_char *)tuple->TupleData;
-
+
link->nfn = *p; p++;
if (tuple->TupleDataLen <= link->nfn*5)
return -EINVAL;
@@ -737,11 +747,13 @@ static int parse_strings(u_char *p, u_char *q, int max,
ns++;
for (;;) {
s[j++] = (*p == 0xff) ? '\0' : *p;
- if ((*p == '\0') || (*p == 0xff)) break;
+ if ((*p == '\0') || (*p == 0xff))
+ break;
if (++p == q)
return -EINVAL;
}
- if ((*p == 0xff) || (++p == q)) break;
+ if ((*p == 0xff) || (++p == q))
+ break;
}
if (found) {
*found = ns;
@@ -756,10 +768,10 @@ static int parse_strings(u_char *p, u_char *q, int max,
static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
{
u_char *p, *q;
-
+
p = (u_char *)tuple->TupleData;
q = p + tuple->TupleDataLen;
-
+
vers_1->major = *p; p++;
vers_1->minor = *p; p++;
if (p >= q)
@@ -774,10 +786,10 @@ static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr)
{
u_char *p, *q;
-
+
p = (u_char *)tuple->TupleData;
q = p + tuple->TupleDataLen;
-
+
return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
altstr->str, altstr->ofs, &altstr->ns);
}
@@ -793,7 +805,8 @@ static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec)
q = p + tuple->TupleDataLen;
for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
- if (p > q-2) break;
+ if (p > q-2)
+ break;
jedec->id[nid].mfr = p[0];
jedec->id[nid].info = p[1];
p += 2;
@@ -871,7 +884,7 @@ static int parse_config(tuple_t *tuple, cistpl_config_t *config)
The following routines are all used to parse the nightmarish
config table entries.
-
+
======================================================================*/
static u_char *parse_power(u_char *p, u_char *q,
@@ -880,17 +893,20 @@ static u_char *parse_power(u_char *p, u_char *q,
int i;
u_int scale;
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
pwr->present = *p;
pwr->flags = 0;
p++;
for (i = 0; i < 7; i++)
if (pwr->present & (1<<i)) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
pwr->param[i] = POWER_CVT(*p);
scale = POWER_SCALE(*p);
while (*p & 0x80) {
- if (++p == q) return NULL;
+ if (++p == q)
+ return NULL;
if ((*p & 0x7f) < 100)
pwr->param[i] += (*p & 0x7f) * scale / 100;
else if (*p == 0x7d)
@@ -914,24 +930,28 @@ static u_char *parse_timing(u_char *p, u_char *q,
{
u_char scale;
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
scale = *p;
if ((scale & 3) != 3) {
- if (++p == q) return NULL;
+ if (++p == q)
+ return NULL;
timing->wait = SPEED_CVT(*p);
timing->waitscale = exponent[scale & 3];
} else
timing->wait = 0;
scale >>= 2;
if ((scale & 7) != 7) {
- if (++p == q) return NULL;
+ if (++p == q)
+ return NULL;
timing->ready = SPEED_CVT(*p);
timing->rdyscale = exponent[scale & 7];
} else
timing->ready = 0;
scale >>= 3;
if (scale != 7) {
- if (++p == q) return NULL;
+ if (++p == q)
+ return NULL;
timing->reserved = SPEED_CVT(*p);
timing->rsvscale = exponent[scale];
} else
@@ -946,7 +966,8 @@ static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
{
int i, j, bsz, lsz;
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
io->flags = *p;
if (!(*p & 0x80)) {
@@ -955,24 +976,29 @@ static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
return p+1;
}
-
- if (++p == q) return NULL;
+
+ if (++p == q)
+ return NULL;
io->nwin = (*p & 0x0f) + 1;
bsz = (*p & 0x30) >> 4;
- if (bsz == 3) bsz++;
+ if (bsz == 3)
+ bsz++;
lsz = (*p & 0xc0) >> 6;
- if (lsz == 3) lsz++;
+ if (lsz == 3)
+ lsz++;
p++;
-
+
for (i = 0; i < io->nwin; i++) {
io->win[i].base = 0;
io->win[i].len = 1;
for (j = 0; j < bsz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
io->win[i].base += *p << (j*8);
}
for (j = 0; j < lsz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
io->win[i].len += *p << (j*8);
}
}
@@ -986,27 +1012,32 @@ static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
int i, j, asz, lsz, has_ha;
u_int len, ca, ha;
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
mem->nwin = (*p & 0x07) + 1;
lsz = (*p & 0x18) >> 3;
asz = (*p & 0x60) >> 5;
has_ha = (*p & 0x80);
- if (++p == q) return NULL;
-
+ if (++p == q)
+ return NULL;
+
for (i = 0; i < mem->nwin; i++) {
len = ca = ha = 0;
for (j = 0; j < lsz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
len += *p << (j*8);
}
for (j = 0; j < asz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
ca += *p << (j*8);
}
if (has_ha)
for (j = 0; j < asz; j++, p++) {
- if (p == q) return NULL;
+ if (p == q)
+ return NULL;
ha += *p << (j*8);
}
mem->win[i].len = len << 8;
@@ -1095,7 +1126,7 @@ static int parse_cftable_entry(tuple_t *tuple,
entry->timing.ready = 0;
entry->timing.reserved = 0;
}
-
+
/* I/O window options */
if (features & 0x08) {
p = parse_io(p, q, &entry->io);
@@ -1103,7 +1134,7 @@ static int parse_cftable_entry(tuple_t *tuple,
return -EINVAL;
} else
entry->io.nwin = 0;
-
+
/* Interrupt options */
if (features & 0x10) {
p = parse_irq(p, q, &entry->irq);
@@ -1153,7 +1184,7 @@ static int parse_cftable_entry(tuple_t *tuple,
}
entry->subtuples = q-p;
-
+
return 0;
}
@@ -1176,7 +1207,7 @@ static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar)
static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config)
{
u_char *p;
-
+
p = (u_char *)tuple->TupleData;
if ((*p != 3) || (tuple->TupleDataLen < 6))
return -EINVAL;
@@ -1231,7 +1262,7 @@ static int parse_cftable_entry_cb(tuple_t *tuple,
entry->io = *p; p++;
} else
entry->io = 0;
-
+
/* Interrupt options */
if (features & 0x10) {
p = parse_irq(p, q, &entry->irq);
@@ -1264,7 +1295,7 @@ static int parse_cftable_entry_cb(tuple_t *tuple,
}
entry->subtuples = q-p;
-
+
return 0;
}
@@ -1281,7 +1312,8 @@ static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
q = p + tuple->TupleDataLen;
for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
- if (p > q-6) break;
+ if (p > q-6)
+ break;
geo->geo[n].buswidth = p[0];
geo->geo[n].erase_block = 1 << (p[1]-1);
geo->geo[n].read_block = 1 << (p[2]-1);
@@ -1302,13 +1334,13 @@ static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
if (tuple->TupleDataLen < 10)
return -EINVAL;
-
+
p = tuple->TupleData;
q = p + tuple->TupleDataLen;
v2->vers = p[0];
v2->comply = p[1];
- v2->dindex = get_unaligned_le16(p +2 );
+ v2->dindex = get_unaligned_le16(p + 2);
v2->vspec8 = p[6];
v2->vspec9 = p[7];
v2->nhdr = p[8];
@@ -1322,7 +1354,7 @@ static int parse_org(tuple_t *tuple, cistpl_org_t *org)
{
u_char *p, *q;
int i;
-
+
p = tuple->TupleData;
q = p + tuple->TupleDataLen;
if (p == q)
@@ -1332,7 +1364,8 @@ static int parse_org(tuple_t *tuple, cistpl_org_t *org)
return -EINVAL;
for (i = 0; i < 30; i++) {
org->desc[i] = *p;
- if (*p == '\0') break;
+ if (*p == '\0')
+ break;
if (++p == q)
return -EINVAL;
}
@@ -1363,7 +1396,7 @@ static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
{
int ret = 0;
-
+
if (tuple->TupleDataLen > tuple->TupleDataMax)
return -EINVAL;
switch (tuple->TupleCode) {
@@ -1448,7 +1481,7 @@ EXPORT_SYMBOL(pcmcia_parse_tuple);
/*======================================================================
This is used internally by Card Services to look up CIS stuff.
-
+
======================================================================*/
int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse)
@@ -1550,7 +1583,7 @@ EXPORT_SYMBOL(pccard_loop_tuple);
checks include making sure several critical tuples are present and
valid; seeing if the total number of tuples is reasonable; and
looking for tuples that use reserved codes.
-
+
======================================================================*/
int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 790af87a922..6d6f82b38a6 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -135,7 +135,7 @@ int pcmcia_socket_dev_resume(struct device *dev)
EXPORT_SYMBOL(pcmcia_socket_dev_resume);
-struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt)
+struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt)
{
struct device *dev = get_device(&skt->dev);
if (!dev)
@@ -145,7 +145,7 @@ struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt)
put_device(&skt->dev);
return NULL;
}
- return (skt);
+ return skt;
}
EXPORT_SYMBOL(pcmcia_get_socket);
@@ -297,7 +297,7 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
EXPORT_SYMBOL(pcmcia_unregister_socket);
-struct pcmcia_socket * pcmcia_get_socket_by_nr(unsigned int nr)
+struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr)
{
struct pcmcia_socket *s;
@@ -736,7 +736,7 @@ EXPORT_SYMBOL(pcmcia_parse_events);
/* register pcmcia_callback */
int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c)
{
- int ret = 0;
+ int ret = 0;
/* s->skt_mutex also protects s->callback */
mutex_lock(&s->skt_mutex);
@@ -848,7 +848,7 @@ EXPORT_SYMBOL(pcmcia_suspend_card);
int pcmcia_resume_card(struct pcmcia_socket *skt)
{
int ret;
-
+
dev_dbg(&skt->dev, "waking up socket\n");
mutex_lock(&skt->skt_mutex);
@@ -876,7 +876,7 @@ EXPORT_SYMBOL(pcmcia_resume_card);
int pcmcia_eject_card(struct pcmcia_socket *skt)
{
int ret;
-
+
dev_dbg(&skt->dev, "user eject request\n");
mutex_lock(&skt->skt_mutex);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 05893d41dd4..1a4a3c49cc1 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -57,7 +57,7 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
"function\n", p_drv->drv.name);
while (did && did->match_flags) {
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
if (!did->prod_id[i])
continue;
@@ -105,7 +105,7 @@ pcmcia_store_new_id(struct device_driver *driver, const char *buf, size_t count)
__u16 match_flags, manf_id, card_id;
__u8 func_id, function, device_no;
__u32 prod_id_hash[4] = {0, 0, 0, 0};
- int fields=0;
+ int fields = 0;
int retval = 0;
fields = sscanf(buf, "%hx %hx %hx %hhx %hhx %hhx %x %x %x %x",
@@ -214,7 +214,7 @@ EXPORT_SYMBOL(pcmcia_unregister_driver);
/* pcmcia_device handling */
-struct pcmcia_device * pcmcia_get_dev(struct pcmcia_device *p_dev)
+struct pcmcia_device *pcmcia_get_dev(struct pcmcia_device *p_dev)
{
struct device *tmp_dev;
tmp_dev = get_device(&p_dev->dev);
@@ -258,7 +258,7 @@ static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc)
return;
}
-static int pcmcia_device_probe(struct device * dev)
+static int pcmcia_device_probe(struct device *dev)
{
struct pcmcia_device *p_dev;
struct pcmcia_driver *p_drv;
@@ -325,7 +325,7 @@ put_module:
put_dev:
if (ret)
put_device(dev);
- return (ret);
+ return ret;
}
@@ -354,7 +354,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
list_del(&p_dev->socket_device_list);
- p_dev->_removed=1;
+ p_dev->_removed = 1;
spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
dev_dbg(&p_dev->dev, "unregistering device\n");
@@ -364,7 +364,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
return;
}
-static int pcmcia_device_remove(struct device * dev)
+static int pcmcia_device_remove(struct device *dev)
{
struct pcmcia_device *p_dev;
struct pcmcia_driver *p_drv;
@@ -391,7 +391,7 @@ static int pcmcia_device_remove(struct device * dev)
return 0;
if (p_drv->remove)
- p_drv->remove(p_dev);
+ p_drv->remove(p_dev);
p_dev->dev_node = NULL;
@@ -499,7 +499,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
*/
static DEFINE_MUTEX(device_add_lock);
-struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
+struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int function)
{
struct pcmcia_device *p_dev, *tmp_dev;
unsigned long flags;
@@ -545,8 +545,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
* Note that this is serialized by the device_add_lock, so that
* only one such struct will be created.
*/
- list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
- if (p_dev->func == tmp_dev->func) {
+ list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
+ if (p_dev->func == tmp_dev->func) {
p_dev->function_config = tmp_dev->function_config;
p_dev->io = tmp_dev->io;
p_dev->irq = tmp_dev->irq;
@@ -627,10 +627,10 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
no_funcs = 1;
s->functions = no_funcs;
- for (i=0; i < no_funcs; i++)
+ for (i = 0; i < no_funcs; i++)
pcmcia_device_add(s, i);
- return (ret);
+ return ret;
}
@@ -756,7 +756,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
release:
release_firmware(fw);
- return (ret);
+ return ret;
}
#else /* !CONFIG_PCMCIA_LOAD_CIS */
@@ -852,7 +852,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
if (did->match_flags & PCMCIA_DEV_ID_MATCH_ANONYMOUS) {
int i;
- for (i=0; i<4; i++)
+ for (i = 0; i < 4; i++)
if (dev->prod_id[i])
return 0;
if (dev->has_manf_id || dev->has_card_id || dev->has_func_id)
@@ -865,9 +865,10 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
}
-static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
- struct pcmcia_device * p_dev = to_pcmcia_dev(dev);
- struct pcmcia_driver * p_drv = to_pcmcia_drv(drv);
+static int pcmcia_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+ struct pcmcia_driver *p_drv = to_pcmcia_drv(drv);
struct pcmcia_device_id *did = p_drv->id_table;
struct pcmcia_dynid *dynid;
@@ -917,7 +918,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
p_dev = to_pcmcia_dev(dev);
/* calculate hashes */
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
if (!p_dev->prod_id[i])
continue;
hash[i] = crc32(0, p_dev->prod_id[i], strlen(p_dev->prod_id[i]));
@@ -984,14 +985,14 @@ static void runtime_resume(struct device *dev)
static ssize_t field##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
- return p_dev->test ? sprintf (buf, format, p_dev->field) : -ENODEV; \
+ return p_dev->test ? sprintf(buf, format, p_dev->field) : -ENODEV; \
}
#define pcmcia_device_stringattr(name, field) \
static ssize_t name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct pcmcia_device *p_dev = to_pcmcia_dev(dev); \
- return p_dev->field ? sprintf (buf, "%s\n", p_dev->field) : -ENODEV; \
+ return p_dev->field ? sprintf(buf, "%s\n", p_dev->field) : -ENODEV; \
}
pcmcia_device_attr(func, socket, "0x%02x\n");
@@ -1020,8 +1021,8 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
int ret = 0;
- if (!count)
- return -EINVAL;
+ if (!count)
+ return -EINVAL;
if ((!p_dev->suspended) && !strncmp(buf, "off", 3))
ret = runtime_suspend(dev);
@@ -1039,10 +1040,11 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
u32 hash[4] = { 0, 0, 0, 0};
/* calculate hashes */
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
if (!p_dev->prod_id[i])
continue;
- hash[i] = crc32(0,p_dev->prod_id[i],strlen(p_dev->prod_id[i]));
+ hash[i] = crc32(0, p_dev->prod_id[i],
+ strlen(p_dev->prod_id[i]));
}
return sprintf(buf, "pcmcia:m%04Xc%04Xf%02Xfn%02Xpfn%02X"
"pa%08Xpb%08Xpc%08Xpd%08X\n",
@@ -1091,7 +1093,7 @@ static struct device_attribute pcmcia_dev_attrs[] = {
/* PM support, also needed for reset */
-static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
+static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
struct pcmcia_driver *p_drv = NULL;
@@ -1131,10 +1133,10 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
}
-static int pcmcia_dev_resume(struct device * dev)
+static int pcmcia_dev_resume(struct device *dev)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
- struct pcmcia_driver *p_drv = NULL;
+ struct pcmcia_driver *p_drv = NULL;
int ret = 0;
if (!p_dev->suspended)
@@ -1211,7 +1213,7 @@ static int pcmcia_bus_suspend(struct pcmcia_socket *skt)
/*======================================================================
The card status event handler.
-
+
======================================================================*/
/* Normally, the event is passed to individual drivers after
@@ -1264,7 +1266,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
} /* ds_event */
-struct pcmcia_device * pcmcia_dev_present(struct pcmcia_device *_p_dev)
+struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
{
struct pcmcia_device *p_dev;
struct pcmcia_device *ret = NULL;
@@ -1329,7 +1331,7 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
if (ret) {
dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n");
pcmcia_put_socket(socket);
- return (ret);
+ return ret;
}
return 0;
@@ -1400,7 +1402,7 @@ static int __init init_pcmcia_bus(void)
return 0;
}
-fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that
+fs_initcall(init_pcmcia_bus); /* one level after subsys_initcall so that
* pcmcia_socket_class is already registered */
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index c4d7908fa37..f73fd5beaa3 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -88,12 +88,12 @@ static struct pcmcia_driver *get_pcmcia_driver(dev_info_t *dev_info)
p_drv = container_of(drv, struct pcmcia_driver, drv);
- return (p_drv);
+ return p_drv;
}
#ifdef CONFIG_PROC_FS
-static struct proc_dir_entry *proc_pccard = NULL;
+static struct proc_dir_entry *proc_pccard;
static int proc_read_drivers_callback(struct device_driver *driver, void *_m)
{
@@ -158,7 +158,8 @@ static int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
#else
-static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj) {
+static inline int adjust_irq(struct pcmcia_socket *s, adjust_t *adj)
+{
return 0;
}
@@ -195,7 +196,7 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
begin = adj->resource.memory.Base;
end = adj->resource.memory.Base + adj->resource.memory.Size - 1;
if (s->resource_ops->add_mem)
- ret =s->resource_ops->add_mem(s, adj->Action, begin, end);
+ ret = s->resource_ops->add_mem(s, adj->Action, begin, end);
case RES_IO_RANGE:
begin = adj->resource.io.BasePort;
end = adj->resource.io.BasePort + adj->resource.io.NumPorts - 1;
@@ -215,7 +216,7 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
}
up_read(&pcmcia_socket_list_rwsem);
- return (ret);
+ return ret;
}
@@ -490,7 +491,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
}
spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
- list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
+ list_for_each_entry(p_dev, &s->devices_list, socket_device_list) {
if (p_dev->func == bind_info->function) {
if ((p_dev->dev.driver == &p_drv->drv)) {
if (p_dev->cardmgr) {
@@ -558,7 +559,7 @@ rescan:
err_put:
pcmcia_put_socket(s);
- return (ret);
+ return ret;
} /* bind_request */
#ifdef CONFIG_CARDBUS
@@ -655,7 +656,7 @@ static int get_device_info(struct pcmcia_socket *s, bind_info_t *bind_info, int
err_put:
pcmcia_put_dev(p_dev);
- return (ret);
+ return ret;
} /* get_device_info */
@@ -664,7 +665,7 @@ static int ds_open(struct inode *inode, struct file *file)
socket_t i = iminor(inode);
struct pcmcia_socket *s;
user_info_t *user;
- static int warning_printed = 0;
+ static int warning_printed;
int ret = 0;
pr_debug("ds_open(socket %d)\n", i);
@@ -738,12 +739,13 @@ static int ds_release(struct inode *inode, struct file *file)
s = user->socket;
/* Unlink user data structure */
- if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ if ((file->f_flags & O_ACCMODE) != O_RDONLY)
s->pcmcia_state.busy = 0;
- }
+
file->private_data = NULL;
for (link = &s->user; *link; link = &(*link)->next)
- if (*link == user) break;
+ if (*link == user)
+ break;
if (link == NULL)
goto out;
*link = user->next;
@@ -774,7 +776,7 @@ static ssize_t ds_read(struct file *file, char __user *buf,
s = user->socket;
if (s->pcmcia_state.dead)
- return -EIO;
+ return -EIO;
ret = wait_event_interruptible(s->queue, !queue_empty(user));
if (ret == 0)
@@ -824,7 +826,7 @@ static u_int ds_poll(struct file *file, poll_table *wait)
/*====================================================================*/
-static int ds_ioctl(struct inode * inode, struct file * file,
+static int ds_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg)
{
struct pcmcia_socket *s;
@@ -842,10 +844,11 @@ static int ds_ioctl(struct inode * inode, struct file * file,
s = user->socket;
if (s->pcmcia_state.dead)
- return -EIO;
+ return -EIO;
size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
- if (size > sizeof(ds_ioctl_arg_t)) return -EINVAL;
+ if (size > sizeof(ds_ioctl_arg_t))
+ return -EINVAL;
/* Permission check */
if (!(cmd & IOC_OUT) && !capable(CAP_SYS_ADMIN))
@@ -1024,8 +1027,8 @@ static int ds_ioctl(struct inode * inode, struct file * file,
}
if (cmd & IOC_OUT) {
- if (__copy_to_user(uarg, (char *)buf, size))
- err = -EFAULT;
+ if (__copy_to_user(uarg, (char *)buf, size))
+ err = -EFAULT;
}
free_out:
@@ -1045,7 +1048,8 @@ static const struct file_operations ds_fops = {
.poll = ds_poll,
};
-void __init pcmcia_setup_ioctl(void) {
+void __init pcmcia_setup_ioctl(void)
+{
int i;
/* Set up character device for user mode clients */
@@ -1064,7 +1068,8 @@ void __init pcmcia_setup_ioctl(void) {
}
-void __exit pcmcia_cleanup_ioctl(void) {
+void __exit pcmcia_cleanup_ioctl(void)
+{
#ifdef CONFIG_PROC_FS
if (proc_pccard) {
remove_proc_entry("drivers", proc_pccard);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index a8bf8c1b45e..d5db95644b6 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -33,7 +33,7 @@
/* Access speed for IO windows */
-static int io_speed = 0;
+static int io_speed;
module_param(io_speed, int, 0444);
@@ -62,7 +62,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
num, align);
align = 0;
} else
- while (align && (align < num)) align <<= 1;
+ while (align && (align < num))
+ align <<= 1;
}
if (*base & ~(align-1)) {
dev_dbg(&s->dev, "odd IO request: base %#x align %#x\n",
@@ -338,7 +339,7 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
struct pcmcia_socket *s = p_dev->socket;
config_t *c = p_dev->function_config;
- if (!p_dev->_io )
+ if (!p_dev->_io)
return -EINVAL;
p_dev->_io = 0;
@@ -362,7 +363,7 @@ static int pcmcia_release_io(struct pcmcia_device *p_dev, io_req_t *req)
static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
{
struct pcmcia_socket *s = p_dev->socket;
- config_t *c= p_dev->function_config;
+ config_t *c = p_dev->function_config;
if (!p_dev->_irq)
return -EINVAL;
@@ -383,9 +384,8 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
s->irq.AssignedIRQ = 0;
}
- if (req->Handler) {
+ if (req->Handler)
free_irq(req->AssignedIRQ, p_dev->priv);
- }
#ifdef CONFIG_PCMCIA_PROBE
pcmcia_used_irq[req->AssignedIRQ]--;
@@ -656,7 +656,8 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
type = IRQF_SHARED;
else if (req->Attributes & IRQ_TYPE_DYNAMIC_SHARING)
type = IRQF_SHARED;
- else printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
+ else
+ printk(KERN_WARNING "pcmcia: Driver needs updating to support IRQ sharing.\n");
#ifdef CONFIG_PCMCIA_PROBE
@@ -788,7 +789,8 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
/* Allocate system memory window */
for (w = 0; w < MAX_WIN; w++)
- if (!(s->state & SOCKET_WIN_REQ(w))) break;
+ if (!(s->state & SOCKET_WIN_REQ(w)))
+ break;
if (w == MAX_WIN) {
dev_dbg(&s->dev, "all windows are used already\n");
return -EINVAL;
@@ -826,18 +828,19 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
s->state |= SOCKET_WIN_REQ(w);
/* Return window handle */
- if (s->features & SS_CAP_STATIC_MAP) {
+ if (s->features & SS_CAP_STATIC_MAP)
req->Base = win->static_start;
- } else {
+ else
req->Base = win->res->start;
- }
+
*wh = w + 1;
return 0;
} /* pcmcia_request_window */
EXPORT_SYMBOL(pcmcia_request_window);
-void pcmcia_disable_device(struct pcmcia_device *p_dev) {
+void pcmcia_disable_device(struct pcmcia_device *p_dev)
+{
pcmcia_release_configuration(p_dev);
pcmcia_release_io(p_dev, &p_dev->io);
pcmcia_release_irq(p_dev, &p_dev->irq);
@@ -970,7 +973,7 @@ int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
&loop, pcmcia_do_loop_tuple);
-};
+}
EXPORT_SYMBOL(pcmcia_loop_tuple);
@@ -1000,7 +1003,7 @@ static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
} else
dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
return 0;
-};
+}
/**
* pcmcia_get_tuple() - get first tuple from CIS
@@ -1024,7 +1027,7 @@ size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
return get.len;
-};
+}
EXPORT_SYMBOL(pcmcia_get_tuple);
@@ -1057,7 +1060,7 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
for (i = 0; i < 6; i++)
dev->dev_addr[i] = tuple->TupleData[i+2];
return 0;
-};
+}
/**
* pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
@@ -1071,6 +1074,6 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
{
return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
-};
+}
EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 84dde7768ad..76e640bccde 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -214,7 +214,8 @@ static void pxa2xx_configure_sockets(struct device *dev)
MECR |= MECR_CIT;
/* Set MECR:NOS (Number Of Sockets) */
- if ((ops->first + ops->nr) > 1 || machine_is_viper())
+ if ((ops->first + ops->nr) > 1 ||
+ machine_is_viper() || machine_is_arcom_zeus())
MECR |= MECR_NOS;
else
MECR &= ~MECR_NOS;
@@ -252,6 +253,7 @@ int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt)
return soc_pcmcia_add_one(skt);
}
+EXPORT_SYMBOL(pxa2xx_drv_pcmcia_add_one);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
{
@@ -261,19 +263,19 @@ void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
ops->frequency_change = pxa2xx_pcmcia_frequency_change;
#endif
}
+EXPORT_SYMBOL(pxa2xx_drv_pcmcia_ops);
-int __pxa2xx_drv_pcmcia_probe(struct device *dev)
+static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
{
int i, ret = 0;
struct pcmcia_low_level *ops;
struct skt_dev_info *sinfo;
struct soc_pcmcia_socket *skt;
- if (!dev || !dev->platform_data)
+ ops = (struct pcmcia_low_level *)dev->dev.platform_data;
+ if (!ops)
return -ENODEV;
- ops = (struct pcmcia_low_level *)dev->platform_data;
-
pxa2xx_drv_pcmcia_ops(ops);
sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
@@ -289,7 +291,7 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev)
skt->nr = ops->first + i;
skt->ops = ops;
skt->socket.owner = ops->owner;
- skt->socket.dev.parent = dev;
+ skt->socket.dev.parent = &dev->dev;
skt->socket.pci_irq = NO_IRQ;
ret = pxa2xx_drv_pcmcia_add_one(skt);
@@ -302,19 +304,12 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev)
soc_pcmcia_remove_one(&sinfo->skt[i]);
kfree(sinfo);
} else {
- pxa2xx_configure_sockets(dev);
- dev_set_drvdata(dev, sinfo);
+ pxa2xx_configure_sockets(&dev->dev);
+ dev_set_drvdata(&dev->dev, sinfo);
}
return ret;
}
-EXPORT_SYMBOL(__pxa2xx_drv_pcmcia_probe);
-
-
-static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
-{
- return __pxa2xx_drv_pcmcia_probe(&dev->dev);
-}
static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
{
@@ -341,7 +336,7 @@ static int pxa2xx_drv_pcmcia_resume(struct device *dev)
return pcmcia_socket_dev_resume(dev);
}
-static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
+static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
.suspend = pxa2xx_drv_pcmcia_suspend,
.resume = pxa2xx_drv_pcmcia_resume,
};
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index cb5efaec886..bb62ea87b8f 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,6 +1,3 @@
-/* temporary measure */
-extern int __pxa2xx_drv_pcmcia_probe(struct device *);
-
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index 3a8993ed562..459a232d66b 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -67,7 +67,7 @@ static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
if (ret)
goto err7;
- skt->irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
+ skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
return 0;
err7:
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index 490749ea677..d08802fe35f 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -40,7 +40,7 @@ static struct pcmcia_irqs irqs[] = {
static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->irq = IRQ_GPIO(SG2_S0_GPIO_READY);
+ skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
}
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index 27be2e154df..a51f2077644 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -1,9 +1,8 @@
/*
- * VIPER PCMCIA support
+ * Viper/Zeus PCMCIA support
* Copyright 2004 Arcom Control Systems
*
* Maintained by Marc Zyngier <maz@misterjones.org>
- * <marc.zyngier@altran.com>
*
* Based on:
* iPAQ h2200 PCMCIA support
@@ -26,37 +25,47 @@
#include <asm/irq.h>
-#include <mach/viper.h>
-#include <asm/mach-types.h>
+#include <mach/arcom-pcmcia.h>
#include "soc_common.h"
#include "pxa2xx_base.h"
+static struct platform_device *arcom_pcmcia_dev;
+
static struct pcmcia_irqs irqs[] = {
- { 0, gpio_to_irq(VIPER_CF_CD_GPIO), "PCMCIA_CD" }
+ {
+ .sock = 0,
+ .str = "PCMCIA_CD",
+ },
};
+static inline struct arcom_pcmcia_pdata *viper_get_pdata(void)
+{
+ return arcom_pcmcia_dev->dev.platform_data;
+}
+
static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
+ struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
unsigned long flags;
- skt->socket.pci_irq = gpio_to_irq(VIPER_CF_RDY_GPIO);
+ skt->socket.pci_irq = gpio_to_irq(pdata->rdy_gpio);
+ irqs[0].irq = gpio_to_irq(pdata->cd_gpio);
- if (gpio_request(VIPER_CF_CD_GPIO, "CF detect"))
+ if (gpio_request(pdata->cd_gpio, "CF detect"))
goto err_request_cd;
- if (gpio_request(VIPER_CF_RDY_GPIO, "CF ready"))
+ if (gpio_request(pdata->rdy_gpio, "CF ready"))
goto err_request_rdy;
- if (gpio_request(VIPER_CF_POWER_GPIO, "CF power"))
+ if (gpio_request(pdata->pwr_gpio, "CF power"))
goto err_request_pwr;
local_irq_save(flags);
- /* GPIO 82 is the CF power enable line. initially off */
- if (gpio_direction_output(VIPER_CF_POWER_GPIO, 0) ||
- gpio_direction_input(VIPER_CF_CD_GPIO) ||
- gpio_direction_input(VIPER_CF_RDY_GPIO)) {
+ if (gpio_direction_output(pdata->pwr_gpio, 0) ||
+ gpio_direction_input(pdata->cd_gpio) ||
+ gpio_direction_input(pdata->rdy_gpio)) {
local_irq_restore(flags);
goto err_dir;
}
@@ -66,13 +75,13 @@ static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
err_dir:
- gpio_free(VIPER_CF_POWER_GPIO);
+ gpio_free(pdata->pwr_gpio);
err_request_pwr:
- gpio_free(VIPER_CF_RDY_GPIO);
+ gpio_free(pdata->rdy_gpio);
err_request_rdy:
- gpio_free(VIPER_CF_CD_GPIO);
+ gpio_free(pdata->cd_gpio);
err_request_cd:
- printk(KERN_ERR "viper: Failed to setup PCMCIA GPIOs\n");
+ dev_err(&arcom_pcmcia_dev->dev, "Failed to setup PCMCIA GPIOs\n");
return -1;
}
@@ -81,17 +90,21 @@ err_request_cd:
*/
static void viper_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
+ struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
+
soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
- gpio_free(VIPER_CF_POWER_GPIO);
- gpio_free(VIPER_CF_RDY_GPIO);
- gpio_free(VIPER_CF_CD_GPIO);
+ gpio_free(pdata->pwr_gpio);
+ gpio_free(pdata->rdy_gpio);
+ gpio_free(pdata->cd_gpio);
}
static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
- state->detect = gpio_get_value(VIPER_CF_CD_GPIO) ? 0 : 1;
- state->ready = gpio_get_value(VIPER_CF_RDY_GPIO) ? 1 : 0;
+ struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
+
+ state->detect = !gpio_get_value(pdata->cd_gpio);
+ state->ready = !!gpio_get_value(pdata->rdy_gpio);
state->bvd1 = 1;
state->bvd2 = 1;
state->wrprot = 0;
@@ -102,20 +115,21 @@ static void viper_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
+ struct arcom_pcmcia_pdata *pdata = viper_get_pdata();
+
/* Silently ignore Vpp, output enable, speaker enable. */
- viper_cf_rst(state->flags & SS_RESET);
+ pdata->reset(state->flags & SS_RESET);
/* Apply socket voltage */
switch (state->Vcc) {
case 0:
- gpio_set_value(VIPER_CF_POWER_GPIO, 0);
+ gpio_set_value(pdata->pwr_gpio, 0);
break;
case 33:
- gpio_set_value(VIPER_CF_POWER_GPIO, 1);
+ gpio_set_value(pdata->pwr_gpio, 1);
break;
default:
- printk(KERN_ERR "%s: Unsupported Vcc:%d\n",
- __func__, state->Vcc);
+ dev_err(&arcom_pcmcia_dev->dev, "Unsupported Vcc:%d\n", state->Vcc);
return -1;
}
@@ -130,7 +144,7 @@ static void viper_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
{
}
-static struct pcmcia_low_level viper_pcmcia_ops __initdata = {
+static struct pcmcia_low_level viper_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = viper_pcmcia_hw_init,
.hw_shutdown = viper_pcmcia_hw_shutdown,
@@ -143,17 +157,25 @@ static struct pcmcia_low_level viper_pcmcia_ops __initdata = {
static struct platform_device *viper_pcmcia_device;
-static int __init viper_pcmcia_init(void)
+static int viper_pcmcia_probe(struct platform_device *pdev)
{
int ret;
- if (!machine_is_viper())
- return -ENODEV;
+ /* I can't imagine more than one device, but you never know... */
+ if (arcom_pcmcia_dev)
+ return -EEXIST;
+
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
viper_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!viper_pcmcia_device)
return -ENOMEM;
+ arcom_pcmcia_dev = pdev;
+
+ viper_pcmcia_device->dev.parent = &pdev->dev;
+
ret = platform_device_add_data(viper_pcmcia_device,
&viper_pcmcia_ops,
sizeof(viper_pcmcia_ops));
@@ -161,18 +183,49 @@ static int __init viper_pcmcia_init(void)
if (!ret)
ret = platform_device_add(viper_pcmcia_device);
- if (ret)
+ if (ret) {
platform_device_put(viper_pcmcia_device);
+ arcom_pcmcia_dev = NULL;
+ }
return ret;
}
-static void __exit viper_pcmcia_exit(void)
+static int viper_pcmcia_remove(struct platform_device *pdev)
{
platform_device_unregister(viper_pcmcia_device);
+ arcom_pcmcia_dev = NULL;
+ return 0;
+}
+
+static struct platform_device_id viper_pcmcia_id_table[] = {
+ { .name = "viper-pcmcia", },
+ { .name = "zeus-pcmcia", },
+ { },
+};
+
+static struct platform_driver viper_pcmcia_driver = {
+ .probe = viper_pcmcia_probe,
+ .remove = viper_pcmcia_remove,
+ .driver = {
+ .name = "arcom-pcmcia",
+ .owner = THIS_MODULE,
+ },
+ .id_table = viper_pcmcia_id_table,
+};
+
+static int __init viper_pcmcia_init(void)
+{
+ return platform_driver_register(&viper_pcmcia_driver);
+}
+
+static void __exit viper_pcmcia_exit(void)
+{
+ return platform_driver_unregister(&viper_pcmcia_driver);
}
module_init(viper_pcmcia_init);
module_exit(viper_pcmcia_exit);
+MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table);
MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index de0e770ce6a..52db17263d8 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -126,16 +126,16 @@ static void pcmcia_align(void *align_data, struct resource *res,
res->start = start;
#ifdef CONFIG_X86
- if (res->flags & IORESOURCE_IO) {
- if (start & 0x300) {
- start = (start + 0x3ff) & ~0x3ff;
- res->start = start;
- }
- }
+ if (res->flags & IORESOURCE_IO) {
+ if (start & 0x300) {
+ start = (start + 0x3ff) & ~0x3ff;
+ res->start = start;
+ }
+ }
#endif
#ifdef CONFIG_M68K
- if (res->flags & IORESOURCE_IO) {
+ if (res->flags & IORESOURCE_IO) {
if ((res->start + size - 1) >= 1024)
res->start = res->end;
}
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 7039f3cf5b7..9b0dc433a8c 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -24,9 +24,9 @@
#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/io.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
@@ -144,43 +144,44 @@ static int add_interval(struct resource_map *map, u_long base, u_long num)
static int sub_interval(struct resource_map *map, u_long base, u_long num)
{
- struct resource_map *p, *q;
-
- for (p = map; ; p = q) {
- q = p->next;
- if (q == map)
- break;
- if ((q->base+q->num > base) && (base+num > q->base)) {
- if (q->base >= base) {
- if (q->base+q->num <= base+num) {
- /* Delete whole block */
- p->next = q->next;
- kfree(q);
- /* don't advance the pointer yet */
- q = p;
- } else {
- /* Cut off bit from the front */
- q->num = q->base + q->num - base - num;
- q->base = base + num;
- }
- } else if (q->base+q->num <= base+num) {
- /* Cut off bit from the end */
- q->num = base - q->base;
- } else {
- /* Split the block into two pieces */
- p = kmalloc(sizeof(struct resource_map), GFP_KERNEL);
- if (!p) {
- printk(KERN_WARNING "out of memory to update resources\n");
- return -ENOMEM;
+ struct resource_map *p, *q;
+
+ for (p = map; ; p = q) {
+ q = p->next;
+ if (q == map)
+ break;
+ if ((q->base+q->num > base) && (base+num > q->base)) {
+ if (q->base >= base) {
+ if (q->base+q->num <= base+num) {
+ /* Delete whole block */
+ p->next = q->next;
+ kfree(q);
+ /* don't advance the pointer yet */
+ q = p;
+ } else {
+ /* Cut off bit from the front */
+ q->num = q->base + q->num - base - num;
+ q->base = base + num;
+ }
+ } else if (q->base+q->num <= base+num) {
+ /* Cut off bit from the end */
+ q->num = base - q->base;
+ } else {
+ /* Split the block into two pieces */
+ p = kmalloc(sizeof(struct resource_map),
+ GFP_KERNEL);
+ if (!p) {
+ printk(KERN_WARNING "out of memory to update resources\n");
+ return -ENOMEM;
+ }
+ p->base = base+num;
+ p->num = q->base+q->num - p->base;
+ q->num = base - q->base;
+ p->next = q->next ; q->next = p;
+ }
}
- p->base = base+num;
- p->num = q->base+q->num - p->base;
- q->num = base - q->base;
- p->next = q->next ; q->next = p;
- }
}
- }
- return 0;
+ return 0;
}
/*======================================================================
@@ -194,69 +195,72 @@ static int sub_interval(struct resource_map *map, u_long base, u_long num)
static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
unsigned int num)
{
- struct resource *res;
- struct socket_data *s_data = s->resource_data;
- unsigned int i, j, bad;
- int any;
- u_char *b, hole, most;
-
- dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:",
- base, base+num-1);
-
- /* First, what does a floating port look like? */
- b = kzalloc(256, GFP_KERNEL);
- if (!b) {
- printk("\n");
- dev_printk(KERN_ERR, &s->dev,
- "do_io_probe: unable to kmalloc 256 bytes");
- return;
- }
- for (i = base, most = 0; i < base+num; i += 8) {
- res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA IO probe");
- if (!res)
- continue;
- hole = inb(i);
- for (j = 1; j < 8; j++)
- if (inb(i+j) != hole) break;
- free_region(res);
- if ((j == 8) && (++b[hole] > b[most]))
- most = hole;
- if (b[most] == 127) break;
- }
- kfree(b);
-
- bad = any = 0;
- for (i = base; i < base+num; i += 8) {
- res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA IO probe");
- if (!res)
- continue;
- for (j = 0; j < 8; j++)
- if (inb(i+j) != most) break;
- free_region(res);
- if (j < 8) {
- if (!any)
- printk(" excluding");
- if (!bad)
- bad = any = i;
- } else {
- if (bad) {
- sub_interval(&s_data->io_db, bad, i-bad);
- printk(" %#x-%#x", bad, i-1);
- bad = 0;
- }
+ struct resource *res;
+ struct socket_data *s_data = s->resource_data;
+ unsigned int i, j, bad;
+ int any;
+ u_char *b, hole, most;
+
+ dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:",
+ base, base+num-1);
+
+ /* First, what does a floating port look like? */
+ b = kzalloc(256, GFP_KERNEL);
+ if (!b) {
+ printk("\n");
+ dev_printk(KERN_ERR, &s->dev,
+ "do_io_probe: unable to kmalloc 256 bytes");
+ return;
}
- }
- if (bad) {
- if ((num > 16) && (bad == base) && (i == base+num)) {
- printk(" nothing: probe failed.\n");
- return;
- } else {
- sub_interval(&s_data->io_db, bad, i-bad);
- printk(" %#x-%#x", bad, i-1);
+ for (i = base, most = 0; i < base+num; i += 8) {
+ res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
+ if (!res)
+ continue;
+ hole = inb(i);
+ for (j = 1; j < 8; j++)
+ if (inb(i+j) != hole)
+ break;
+ free_region(res);
+ if ((j == 8) && (++b[hole] > b[most]))
+ most = hole;
+ if (b[most] == 127)
+ break;
}
- }
+ kfree(b);
- printk(any ? "\n" : " clean.\n");
+ bad = any = 0;
+ for (i = base; i < base+num; i += 8) {
+ res = claim_region(NULL, i, 8, IORESOURCE_IO, "PCMCIA ioprobe");
+ if (!res)
+ continue;
+ for (j = 0; j < 8; j++)
+ if (inb(i+j) != most)
+ break;
+ free_region(res);
+ if (j < 8) {
+ if (!any)
+ printk(" excluding");
+ if (!bad)
+ bad = any = i;
+ } else {
+ if (bad) {
+ sub_interval(&s_data->io_db, bad, i-bad);
+ printk(" %#x-%#x", bad, i-1);
+ bad = 0;
+ }
+ }
+ }
+ if (bad) {
+ if ((num > 16) && (bad == base) && (i == base+num)) {
+ printk(" nothing: probe failed.\n");
+ return;
+ } else {
+ sub_interval(&s_data->io_db, bad, i-bad);
+ printk(" %#x-%#x", bad, i-1);
+ }
+ }
+
+ printk(any ? "\n" : " clean.\n");
}
#endif
@@ -327,8 +331,9 @@ cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size)
unsigned int info1, info2;
int ret = 0;
- res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe");
- res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "cs memory probe");
+ res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe");
+ res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM,
+ "PCMCIA memprobe");
if (res1 && res2) {
ret = readable(s, res1, &info1);
@@ -347,8 +352,9 @@ checksum_match(struct pcmcia_socket *s, unsigned long base, unsigned long size)
struct resource *res1, *res2;
int a = -1, b = -1;
- res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "cs memory probe");
- res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "cs memory probe");
+ res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe");
+ res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM,
+ "PCMCIA memprobe");
if (res1 && res2) {
a = checksum(s, res1);
@@ -371,42 +377,43 @@ checksum_match(struct pcmcia_socket *s, unsigned long base, unsigned long size)
static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s)
{
- struct socket_data *s_data = s->resource_data;
- u_long i, j, bad, fail, step;
-
- dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:",
- base, base+num-1);
- bad = fail = 0;
- step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff);
- /* don't allow too large steps */
- if (step > 0x800000)
- step = 0x800000;
- /* cis_readable wants to map 2x map_size */
- if (step < 2 * s->map_size)
- step = 2 * s->map_size;
- for (i = j = base; i < base+num; i = j + step) {
- if (!fail) {
- for (j = i; j < base+num; j += step) {
- if (cis_readable(s, j, step))
- break;
- }
- fail = ((i == base) && (j == base+num));
- }
- if (fail) {
- for (j = i; j < base+num; j += 2*step)
- if (checksum_match(s, j, step) &&
- checksum_match(s, j + step, step))
- break;
- }
- if (i != j) {
- if (!bad) printk(" excluding");
- printk(" %#05lx-%#05lx", i, j-1);
- sub_interval(&s_data->mem_db, i, j-i);
- bad += j-i;
+ struct socket_data *s_data = s->resource_data;
+ u_long i, j, bad, fail, step;
+
+ dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:",
+ base, base+num-1);
+ bad = fail = 0;
+ step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff);
+ /* don't allow too large steps */
+ if (step > 0x800000)
+ step = 0x800000;
+ /* cis_readable wants to map 2x map_size */
+ if (step < 2 * s->map_size)
+ step = 2 * s->map_size;
+ for (i = j = base; i < base+num; i = j + step) {
+ if (!fail) {
+ for (j = i; j < base+num; j += step) {
+ if (cis_readable(s, j, step))
+ break;
+ }
+ fail = ((i == base) && (j == base+num));
+ }
+ if (fail) {
+ for (j = i; j < base+num; j += 2*step)
+ if (checksum_match(s, j, step) &&
+ checksum_match(s, j + step, step))
+ break;
+ }
+ if (i != j) {
+ if (!bad)
+ printk(" excluding");
+ printk(" %#05lx-%#05lx", i, j-1);
+ sub_interval(&s_data->mem_db, i, j-i);
+ bad += j-i;
+ }
}
- }
- printk(bad ? "\n" : " clean.\n");
- return (num - bad);
+ printk(bad ? "\n" : " clean.\n");
+ return num - bad;
}
#ifdef CONFIG_PCMCIA_PROBE
@@ -656,7 +663,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
return res;
}
-static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
+static struct resource *nonstatic_find_mem_region(u_long base, u_long num,
u_long align, int low, struct pcmcia_socket *s)
{
struct resource *res = make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev));
@@ -794,7 +801,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
return -EINVAL;
#endif
- for (i=0; i < PCI_BUS_NUM_RESOURCES; i++) {
+ for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
res = s->cb_dev->bus->resource[i];
if (!res)
continue;
@@ -908,14 +915,14 @@ static ssize_t show_io_db(struct device *dev,
for (p = data->io_db.next; p != &data->io_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf (&buf[ret], (PAGE_SIZE - ret - 1),
- "0x%08lx - 0x%08lx\n",
- ((unsigned long) p->base),
- ((unsigned long) p->base + p->num - 1));
+ ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ "0x%08lx - 0x%08lx\n",
+ ((unsigned long) p->base),
+ ((unsigned long) p->base + p->num - 1));
}
mutex_unlock(&rsrc_mutex);
- return (ret);
+ return ret;
}
static ssize_t store_io_db(struct device *dev,
@@ -927,12 +934,13 @@ static ssize_t store_io_db(struct device *dev,
unsigned int add = ADD_MANAGED_RESOURCE;
ssize_t ret = 0;
- ret = sscanf (buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr);
if (ret != 2) {
- ret = sscanf (buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr);
add = REMOVE_MANAGED_RESOURCE;
if (ret != 2) {
- ret = sscanf (buf, "0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr,
+ &end_addr);
add = ADD_MANAGED_RESOURCE;
if (ret != 2)
return -EINVAL;
@@ -963,14 +971,14 @@ static ssize_t show_mem_db(struct device *dev,
for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf (&buf[ret], (PAGE_SIZE - ret - 1),
- "0x%08lx - 0x%08lx\n",
- ((unsigned long) p->base),
- ((unsigned long) p->base + p->num - 1));
+ ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ "0x%08lx - 0x%08lx\n",
+ ((unsigned long) p->base),
+ ((unsigned long) p->base + p->num - 1));
}
mutex_unlock(&rsrc_mutex);
- return (ret);
+ return ret;
}
static ssize_t store_mem_db(struct device *dev,
@@ -982,12 +990,13 @@ static ssize_t store_mem_db(struct device *dev,
unsigned int add = ADD_MANAGED_RESOURCE;
ssize_t ret = 0;
- ret = sscanf (buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr);
if (ret != 2) {
- ret = sscanf (buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr);
add = REMOVE_MANAGED_RESOURCE;
if (ret != 2) {
- ret = sscanf (buf, "0x%lx - 0x%lx", &start_addr, &end_addr);
+ ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr,
+ &end_addr);
add = ADD_MANAGED_RESOURCE;
if (ret != 2)
return -EINVAL;
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 78d5aab542f..7a456000332 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -164,7 +164,7 @@ static ssize_t pccard_store_irq_mask(struct device *dev,
if (!count)
return -EINVAL;
- ret = sscanf (buf, "0x%x\n", &mask);
+ ret = sscanf(buf, "0x%x\n", &mask);
if (ret == 1) {
s->irq_mask &= mask;
@@ -278,7 +278,7 @@ static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off
free_tuple:
kfree(tuplebuffer);
- return (ret);
+ return ret;
}
static ssize_t pccard_show_cis(struct kobject *kobj,
@@ -308,7 +308,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj,
count = pccard_extract_cis(s, buf, off, count);
}
- return (count);
+ return count;
}
static ssize_t pccard_store_cis(struct kobject *kobj,
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 8be4cc447a1..e4d12acdd52 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -6,7 +6,7 @@
* Changelog:
* Aug 2002: Manfred Spraul <manfred@colorfullife.com>
* Dynamically adjust the size of the bridge resource
- *
+ *
* May 2003: Dominik Brodowski <linux@brodo.de>
* Merge pci_socket.c and yenta.c into one file
*/
@@ -16,13 +16,12 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
-#include <asm/io.h>
-
#include "yenta_socket.h"
#include "i82365.h"
@@ -55,7 +54,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket);
static unsigned int override_bios;
module_param(override_bios, uint, 0000);
-MODULE_PARM_DESC (override_bios, "yenta ignore bios resource allocation");
+MODULE_PARM_DESC(override_bios, "yenta ignore bios resource allocation");
/*
* Generate easy-to-use ways of reading a cardbus sockets
@@ -237,24 +236,42 @@ static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state)
/* i82365SL-DF style */
if (socket->flags & YENTA_16BIT_POWER_DF) {
switch (state->Vcc) {
- case 33: reg |= I365_VCC_3V; break;
- case 50: reg |= I365_VCC_5V; break;
- default: reg = 0; break;
+ case 33:
+ reg |= I365_VCC_3V;
+ break;
+ case 50:
+ reg |= I365_VCC_5V;
+ break;
+ default:
+ reg = 0;
+ break;
}
switch (state->Vpp) {
case 33:
- case 50: reg |= I365_VPP1_5V; break;
- case 120: reg |= I365_VPP1_12V; break;
+ case 50:
+ reg |= I365_VPP1_5V;
+ break;
+ case 120:
+ reg |= I365_VPP1_12V;
+ break;
}
} else {
/* i82365SL-B style */
switch (state->Vcc) {
- case 50: reg |= I365_VCC_5V; break;
- default: reg = 0; break;
+ case 50:
+ reg |= I365_VCC_5V;
+ break;
+ default:
+ reg = 0;
+ break;
}
switch (state->Vpp) {
- case 50: reg |= I365_VPP1_5V | I365_VPP2_5V; break;
- case 120: reg |= I365_VPP1_12V | I365_VPP2_12V; break;
+ case 50:
+ reg |= I365_VPP1_5V | I365_VPP2_5V;
+ break;
+ case 120:
+ reg |= I365_VPP1_12V | I365_VPP2_12V;
+ break;
}
}
@@ -263,14 +280,26 @@ static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state)
} else {
u32 reg = 0; /* CB_SC_STPCLK? */
switch (state->Vcc) {
- case 33: reg = CB_SC_VCC_3V; break;
- case 50: reg = CB_SC_VCC_5V; break;
- default: reg = 0; break;
+ case 33:
+ reg = CB_SC_VCC_3V;
+ break;
+ case 50:
+ reg = CB_SC_VCC_5V;
+ break;
+ default:
+ reg = 0;
+ break;
}
switch (state->Vpp) {
- case 33: reg |= CB_SC_VPP_3V; break;
- case 50: reg |= CB_SC_VPP_5V; break;
- case 120: reg |= CB_SC_VPP_12V; break;
+ case 33:
+ reg |= CB_SC_VPP_3V;
+ break;
+ case 50:
+ reg |= CB_SC_VPP_5V;
+ break;
+ case 120:
+ reg |= CB_SC_VPP_12V;
+ break;
}
if (reg != cb_readl(socket, CB_SOCKET_CONTROL))
cb_writel(socket, CB_SOCKET_CONTROL, reg);
@@ -314,23 +343,29 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
reg = exca_readb(socket, I365_POWER) & (I365_VCC_MASK|I365_VPP1_MASK);
reg |= I365_PWR_NORESET;
- if (state->flags & SS_PWR_AUTO) reg |= I365_PWR_AUTO;
- if (state->flags & SS_OUTPUT_ENA) reg |= I365_PWR_OUT;
+ if (state->flags & SS_PWR_AUTO)
+ reg |= I365_PWR_AUTO;
+ if (state->flags & SS_OUTPUT_ENA)
+ reg |= I365_PWR_OUT;
if (exca_readb(socket, I365_POWER) != reg)
exca_writeb(socket, I365_POWER, reg);
/* CSC interrupt: no ISA irq for CSC */
reg = I365_CSC_DETECT;
if (state->flags & SS_IOCARD) {
- if (state->csc_mask & SS_STSCHG) reg |= I365_CSC_STSCHG;
+ if (state->csc_mask & SS_STSCHG)
+ reg |= I365_CSC_STSCHG;
} else {
- if (state->csc_mask & SS_BATDEAD) reg |= I365_CSC_BVD1;
- if (state->csc_mask & SS_BATWARN) reg |= I365_CSC_BVD2;
- if (state->csc_mask & SS_READY) reg |= I365_CSC_READY;
+ if (state->csc_mask & SS_BATDEAD)
+ reg |= I365_CSC_BVD1;
+ if (state->csc_mask & SS_BATWARN)
+ reg |= I365_CSC_BVD2;
+ if (state->csc_mask & SS_READY)
+ reg |= I365_CSC_READY;
}
exca_writeb(socket, I365_CSCINT, reg);
exca_readb(socket, I365_CSC);
- if(sock->zoom_video)
+ if (sock->zoom_video)
sock->zoom_video(sock, state->flags & SS_ZVCARD);
}
config_writew(socket, CB_BRIDGE_CONTROL, bridge);
@@ -368,9 +403,12 @@ static int yenta_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io
exca_writew(socket, I365_IO(map)+I365_W_STOP, io->stop);
ioctl = exca_readb(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map);
- if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map);
- if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map);
- if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map);
+ if (io->flags & MAP_0WS)
+ ioctl |= I365_IOCTL_0WS(map);
+ if (io->flags & MAP_16BIT)
+ ioctl |= I365_IOCTL_16BIT(map);
+ if (io->flags & MAP_AUTOSZ)
+ ioctl |= I365_IOCTL_IOCS16(map);
exca_writeb(socket, I365_IOCTL, ioctl);
if (io->flags & MAP_ACTIVE)
@@ -416,10 +454,17 @@ static int yenta_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *
word = (stop >> 12) & 0x0fff;
switch (to_cycles(mem->speed)) {
- case 0: break;
- case 1: word |= I365_MEM_WS0; break;
- case 2: word |= I365_MEM_WS1; break;
- default: word |= I365_MEM_WS1 | I365_MEM_WS0; break;
+ case 0:
+ break;
+ case 1:
+ word |= I365_MEM_WS0;
+ break;
+ case 2:
+ word |= I365_MEM_WS1;
+ break;
+ default:
+ word |= I365_MEM_WS1 | I365_MEM_WS0;
+ break;
}
exca_writew(socket, I365_MEM(map) + I365_W_STOP, word);
@@ -547,9 +592,9 @@ static int yenta_sock_suspend(struct pcmcia_socket *sock)
* max 4 MB, min 16 kB. We try very hard to not get below
* the "ACC" values, though.
*/
-#define BRIDGE_MEM_MAX 4*1024*1024
-#define BRIDGE_MEM_ACC 128*1024
-#define BRIDGE_MEM_MIN 16*1024
+#define BRIDGE_MEM_MAX (4*1024*1024)
+#define BRIDGE_MEM_ACC (128*1024)
+#define BRIDGE_MEM_MIN (16*1024)
#define BRIDGE_IO_MAX 512
#define BRIDGE_IO_ACC 256
@@ -574,7 +619,7 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
int i;
size = BRIDGE_MEM_MAX;
if (size > avail/8) {
- size=(avail+1)/8;
+ size = (avail+1)/8;
/* round size down to next power of 2 */
i = 0;
while ((size /= 2) != 0)
@@ -590,7 +635,7 @@ static int yenta_search_one_res(struct resource *root, struct resource *res,
do {
if (allocate_resource(root, res, size, start, end, align,
- NULL, NULL)==0) {
+ NULL, NULL) == 0) {
return 1;
}
size = size/2;
@@ -605,8 +650,8 @@ static int yenta_search_res(struct yenta_socket *socket, struct resource *res,
u32 min)
{
int i;
- for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) {
- struct resource * root = socket->dev->bus->resource[i];
+ for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
+ struct resource *root = socket->dev->bus->resource[i];
if (!root)
continue;
@@ -704,7 +749,7 @@ static void yenta_allocate_resources(struct yenta_socket *socket)
static void yenta_free_resources(struct yenta_socket *socket)
{
int i;
- for (i=0;i<4;i++) {
+ for (i = 0; i < 4; i++) {
struct resource *res;
res = socket->dev->resource + PCI_BRIDGE_RESOURCES + i;
if (res->start != 0 && res->end != 0)
@@ -726,7 +771,7 @@ static void __devexit yenta_close(struct pci_dev *dev)
/* we don't want a dying socket registered */
pcmcia_unregister_socket(&sock->socket);
-
+
/* Disable all events so we don't die in an IRQ storm */
cb_writel(sock, CB_SOCKET_MASK, 0x0);
exca_writeb(sock, I365_CSCINT, 0);
@@ -898,7 +943,7 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id)
{
struct yenta_socket *socket = (struct yenta_socket *) dev_id;
u8 csc;
- u32 cb_event;
+ u32 cb_event;
/* Clear interrupt status for the event */
cb_event = cb_readl(socket, CB_SOCKET_EVENT);
@@ -1019,7 +1064,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
{
struct list_head *tmp;
unsigned char upper_limit;
- /*
+ /*
* We only check and fix the parent bridge: All systems which need
* this fixup that have been reviewed are laptops and the only bridge
* which needed fixing was the parent bridge of the CardBus bridge:
@@ -1038,7 +1083,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
/* check the bus ranges of all silbling bridges to prevent overlap */
list_for_each(tmp, &bridge_to_fix->parent->children) {
- struct pci_bus * silbling = pci_bus_b(tmp);
+ struct pci_bus *silbling = pci_bus_b(tmp);
/*
* If the silbling has a higher secondary bus number
* and it's secondary is equal or smaller than our
@@ -1083,7 +1128,7 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
* interrupt, and that we can map the cardbus area. Fill in the
* socket information structure..
*/
-static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_id *id)
+static int __devinit yenta_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct yenta_socket *socket;
int ret;
@@ -1285,7 +1330,7 @@ static int yenta_dev_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops yenta_pm_ops = {
+static const struct dev_pm_ops yenta_pm_ops = {
.suspend_noirq = yenta_dev_suspend_noirq,
.resume_noirq = yenta_dev_resume_noirq,
.resume = yenta_dev_resume,
@@ -1302,7 +1347,7 @@ static struct dev_pm_ops yenta_pm_ops = {
#define YENTA_PM_OPS NULL
#endif
-#define CB_ID(vend,dev,type) \
+#define CB_ID(vend, dev, type) \
{ \
.vendor = vend, \
.device = dev, \
@@ -1313,7 +1358,7 @@ static struct dev_pm_ops yenta_pm_ops = {
.driver_data = CARDBUS_TYPE_##type, \
}
-static struct pci_device_id yenta_table [] = {
+static struct pci_device_id yenta_table[] = {
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI),
/*
@@ -1403,13 +1448,13 @@ static struct pci_driver yenta_cardbus_driver = {
static int __init yenta_socket_init(void)
{
- return pci_register_driver (&yenta_cardbus_driver);
+ return pci_register_driver(&yenta_cardbus_driver);
}
-static void __exit yenta_socket_exit (void)
+static void __exit yenta_socket_exit(void)
{
- pci_unregister_driver (&yenta_cardbus_driver);
+ pci_unregister_driver(&yenta_cardbus_driver);
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 55ca39dea42..fc5bf9d2a3f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -334,6 +334,8 @@ config EEEPC_LAPTOP
depends on HOTPLUG_PCI
select BACKLIGHT_CLASS_DEVICE
select HWMON
+ select LEDS_CLASS
+ select NEW_LEDS
---help---
This driver supports the Fn-Fx keys on Eee PC laptops.
@@ -365,6 +367,18 @@ config ACPI_WMI
It is safe to enable this driver even if your DSDT doesn't define
any ACPI-WMI devices.
+config MSI_WMI
+ tristate "MSI WMI extras"
+ depends on ACPI_WMI
+ depends on INPUT
+ depends on BACKLIGHT_CLASS_DEVICE
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want to support WMI-based hotkeys on MSI laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called msi-wmi.
+
config ACPI_ASUS
tristate "ASUS/Medion Laptop Extras (DEPRECATED)"
depends on ACPI
@@ -435,4 +449,19 @@ config ACPI_TOSHIBA
If you have a legacy free Toshiba laptop (such as the Libretto L1
series), say Y.
+
+config TOSHIBA_BT_RFKILL
+ tristate "Toshiba Bluetooth RFKill switch support"
+ depends on ACPI
+ ---help---
+ This driver adds support for Bluetooth events for the RFKill
+ switch on modern Toshiba laptops with full ACPI support and
+ an RFKill switch.
+
+ This driver handles RFKill events for the TOS6205 Bluetooth,
+ and re-enables it when the switch is set back to the 'on'
+ position.
+
+ If you have a modern Toshiba laptop with a Bluetooth and an
+ RFKill switch (such as the Portege R500), say Y.
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index d1c16210a51..b7474b6a8bf 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ACPI_WMI) += wmi.o
+obj-$(CONFIG_MSI_WMI) += msi-wmi.o
obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index ab64522aaa6..79b15b9d9cf 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -52,7 +52,7 @@
*/
#undef START_IN_KERNEL_MODE
-#define DRV_VER "0.5.18"
+#define DRV_VER "0.5.20"
/*
* According to the Atom N270 datasheet,
@@ -112,12 +112,14 @@ module_param_string(force_product, force_product, 16, 0);
MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check");
/*
- * cmd_off: to switch the fan completely off / to check if the fan is off
+ * cmd_off: to switch the fan completely off
+ * chk_off: to check if the fan is off
* cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then
* the fan speed depending on the temperature
*/
struct fancmd {
u8 cmd_off;
+ u8 chk_off;
u8 cmd_auto;
};
@@ -134,32 +136,41 @@ struct bios_settings_t {
/* Register addresses and values for different BIOS versions */
static const struct bios_settings_t bios_tbl[] = {
/* AOA110 */
- {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
- {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} },
- {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
- {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x1f, 0x00} },
+ {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x1f, 0x00} },
+ {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0xaf, 0x00} },
+ {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x21, 0x00} },
/* AOA150 */
- {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
- {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ /* Acer 1410 */
+ {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} },
/* special BIOS / other */
- {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
- {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
- {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Gateway ", "LT31 ", "v1.3103 ", 0x55, 0x58,
+ {0x10, 0x0f, 0x00} },
+ {"Gateway ", "LT31 ", "v1.3201 ", 0x55, 0x58,
+ {0x10, 0x0f, 0x00} },
+ {"Gateway ", "LT31 ", "v1.3302 ", 0x55, 0x58,
+ {0x10, 0x0f, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} },
+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x21, 0x00} },
+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} },
/* pewpew-terminator */
- {"", "", "", 0, 0, {0, 0} }
+ {"", "", "", 0, 0, {0, 0, 0} }
};
static const struct bios_settings_t *bios_cfg __read_mostly;
@@ -183,7 +194,7 @@ static int acerhdf_get_fanstate(int *state)
if (ec_read(bios_cfg->fanreg, &fan))
return -EINVAL;
- if (fan != bios_cfg->cmd.cmd_off)
+ if (fan != bios_cfg->cmd.chk_off)
*state = ACERHDF_FAN_AUTO;
else
*state = ACERHDF_FAN_OFF;
@@ -460,7 +471,7 @@ static int acerhdf_remove(struct platform_device *device)
return 0;
}
-static struct dev_pm_ops acerhdf_pm_ops = {
+static const struct dev_pm_ops acerhdf_pm_ops = {
.suspend = acerhdf_suspend,
.freeze = acerhdf_suspend,
};
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index b39d2bb3e75..61a1c750365 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -221,6 +221,7 @@ static struct asus_hotk *hotk;
*/
static const struct acpi_device_id asus_device_ids[] = {
{"ATK0100", 0},
+ {"ATK0101", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, asus_device_ids);
@@ -232,6 +233,7 @@ static void asus_hotk_notify(struct acpi_device *device, u32 event);
static struct acpi_driver asus_hotk_driver = {
.name = ASUS_HOTK_NAME,
.class = ASUS_HOTK_CLASS,
+ .owner = THIS_MODULE,
.ids = asus_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
@@ -293,6 +295,11 @@ struct key_entry {
enum { KE_KEY, KE_END };
static struct key_entry asus_keymap[] = {
+ {KE_KEY, 0x02, KEY_SCREENLOCK},
+ {KE_KEY, 0x05, KEY_WLAN},
+ {KE_KEY, 0x08, KEY_F13},
+ {KE_KEY, 0x17, KEY_ZOOM},
+ {KE_KEY, 0x1f, KEY_BATTERY},
{KE_KEY, 0x30, KEY_VOLUMEUP},
{KE_KEY, 0x31, KEY_VOLUMEDOWN},
{KE_KEY, 0x32, KEY_MUTE},
@@ -312,8 +319,11 @@ static struct key_entry asus_keymap[] = {
{KE_KEY, 0x5F, KEY_WLAN},
{KE_KEY, 0x60, KEY_SWITCHVIDEOMODE},
{KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */
+ {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE},
+ {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE},
+ {KE_KEY, 0x6B, KEY_F13}, /* Lock Touchpad */
{KE_KEY, 0x82, KEY_CAMERA},
+ {KE_KEY, 0x88, KEY_WLAN },
{KE_KEY, 0x8A, KEY_PROG1},
{KE_KEY, 0x95, KEY_MEDIA},
{KE_KEY, 0x99, KEY_PHONE},
@@ -1240,9 +1250,6 @@ static int asus_hotk_add(struct acpi_device *device)
{
int result;
- if (!device)
- return -EINVAL;
-
pr_notice("Asus Laptop Support version %s\n",
ASUS_LAPTOP_VERSION);
@@ -1283,8 +1290,8 @@ static int asus_hotk_add(struct acpi_device *device)
hotk->ledd_status = 0xFFF;
/* Set initial values of light sensor and level */
- hotk->light_switch = 1; /* Default to light sensor disabled */
- hotk->light_level = 0; /* level 5 for sensor sensitivity */
+ hotk->light_switch = 0; /* Default to light sensor disabled */
+ hotk->light_level = 5; /* level 5 for sensor sensitivity */
if (ls_switch_handle)
set_light_sens_switch(hotk->light_switch);
@@ -1306,9 +1313,6 @@ end:
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
kfree(hotk->name);
kfree(hotk);
@@ -1444,9 +1448,6 @@ static int __init asus_laptop_init(void)
{
int result;
- if (acpi_disabled)
- return -ENODEV;
-
result = acpi_bus_register_driver(&asus_hotk_driver);
if (result < 0)
return result;
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ddf5240ade8..0c9c53111a2 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -466,6 +466,7 @@ MODULE_DEVICE_TABLE(acpi, asus_device_ids);
static struct acpi_driver asus_hotk_driver = {
.name = "asus_acpi",
.class = ACPI_HOTK_CLASS,
+ .owner = THIS_MODULE,
.ids = asus_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
@@ -1334,9 +1335,6 @@ static int asus_hotk_add(struct acpi_device *device)
acpi_status status = AE_OK;
int result;
- if (!device)
- return -EINVAL;
-
printk(KERN_NOTICE "Asus Laptop ACPI Extras version %s\n",
ASUS_ACPI_VERSION);
@@ -1392,9 +1390,6 @@ end:
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
asus_hotk_remove_fs(device);
kfree(hotk);
@@ -1422,21 +1417,17 @@ static int __init asus_acpi_init(void)
{
int result;
- if (acpi_disabled)
- return -ENODEV;
+ result = acpi_bus_register_driver(&asus_hotk_driver);
+ if (result < 0)
+ return result;
asus_proc_dir = proc_mkdir(PROC_ASUS, acpi_root_dir);
if (!asus_proc_dir) {
printk(KERN_ERR "Asus ACPI: Unable to create /proc entry\n");
+ acpi_bus_unregister_driver(&asus_hotk_driver);
return -ENODEV;
}
- result = acpi_bus_register_driver(&asus_hotk_driver);
- if (result < 0) {
- remove_proc_entry(PROC_ASUS, acpi_root_dir);
- return result;
- }
-
/*
* This is a bit of a kludge. We only want this module loaded
* for ASUS systems, but there's currently no way to probe the
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 11003bba10d..1a387e79f71 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -51,7 +51,6 @@
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
-#include <linux/autoconf.h>
#define COMPAL_DRIVER_VERSION "0.2.6"
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 74909c4aaee..3780994dc8f 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -58,6 +58,14 @@ static int da_command_code;
static int da_num_tokens;
static struct calling_interface_token *da_tokens;
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "dell-laptop",
+ .owner = THIS_MODULE,
+ }
+};
+
+static struct platform_device *platform_device;
static struct backlight_device *dell_backlight_device;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
@@ -74,7 +82,7 @@ static const struct dmi_system_id __initdata dell_device_table[] = {
{ }
};
-static void parse_da_table(const struct dmi_header *dm)
+static void __init parse_da_table(const struct dmi_header *dm)
{
/* Final token is a terminator, so we don't want to copy it */
int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
@@ -103,7 +111,7 @@ static void parse_da_table(const struct dmi_header *dm)
da_num_tokens += tokens;
}
-static void find_tokens(const struct dmi_header *dm, void *dummy)
+static void __init find_tokens(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0xd4: /* Indexed IO */
@@ -197,8 +205,8 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data)
dell_send_request(&buffer, 17, 11);
status = buffer.output[1];
- if (status & BIT(bit))
- rfkill_set_hw_state(rfkill, !!(status & BIT(16)));
+ rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
+ rfkill_set_hw_state(rfkill, !(status & BIT(16)));
}
static const struct rfkill_ops dell_rfkill_ops = {
@@ -206,7 +214,7 @@ static const struct rfkill_ops dell_rfkill_ops = {
.query = dell_rfkill_query,
};
-static int dell_setup_rfkill(void)
+static int __init dell_setup_rfkill(void)
{
struct calling_interface_buffer buffer;
int status;
@@ -217,7 +225,8 @@ static int dell_setup_rfkill(void)
status = buffer.output[1];
if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
- wifi_rfkill = rfkill_alloc("dell-wifi", NULL, RFKILL_TYPE_WLAN,
+ wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
+ RFKILL_TYPE_WLAN,
&dell_rfkill_ops, (void *) 1);
if (!wifi_rfkill) {
ret = -ENOMEM;
@@ -229,7 +238,8 @@ static int dell_setup_rfkill(void)
}
if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
- bluetooth_rfkill = rfkill_alloc("dell-bluetooth", NULL,
+ bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
+ &platform_device->dev,
RFKILL_TYPE_BLUETOOTH,
&dell_rfkill_ops, (void *) 2);
if (!bluetooth_rfkill) {
@@ -242,7 +252,9 @@ static int dell_setup_rfkill(void)
}
if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
- wwan_rfkill = rfkill_alloc("dell-wwan", NULL, RFKILL_TYPE_WWAN,
+ wwan_rfkill = rfkill_alloc("dell-wwan",
+ &platform_device->dev,
+ RFKILL_TYPE_WWAN,
&dell_rfkill_ops, (void *) 3);
if (!wwan_rfkill) {
ret = -ENOMEM;
@@ -268,6 +280,22 @@ err_wifi:
return ret;
}
+static void dell_cleanup_rfkill(void)
+{
+ if (wifi_rfkill) {
+ rfkill_unregister(wifi_rfkill);
+ rfkill_destroy(wifi_rfkill);
+ }
+ if (bluetooth_rfkill) {
+ rfkill_unregister(bluetooth_rfkill);
+ rfkill_destroy(bluetooth_rfkill);
+ }
+ if (wwan_rfkill) {
+ rfkill_unregister(wwan_rfkill);
+ rfkill_destroy(wwan_rfkill);
+ }
+}
+
static int dell_send_intensity(struct backlight_device *bd)
{
struct calling_interface_buffer buffer;
@@ -326,11 +354,23 @@ static int __init dell_init(void)
return -ENODEV;
}
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ goto fail_platform_driver;
+ platform_device = platform_device_alloc("dell-laptop", -1);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device1;
+ }
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device2;
+
ret = dell_setup_rfkill();
if (ret) {
printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n");
- goto out;
+ goto fail_rfkill;
}
#ifdef CONFIG_ACPI
@@ -352,13 +392,13 @@ static int __init dell_init(void)
if (max_intensity) {
dell_backlight_device = backlight_device_register(
"dell_backlight",
- NULL, NULL,
+ &platform_device->dev, NULL,
&dell_ops);
if (IS_ERR(dell_backlight_device)) {
ret = PTR_ERR(dell_backlight_device);
dell_backlight_device = NULL;
- goto out;
+ goto fail_backlight;
}
dell_backlight_device->props.max_brightness = max_intensity;
@@ -368,13 +408,16 @@ static int __init dell_init(void)
}
return 0;
-out:
- if (wifi_rfkill)
- rfkill_unregister(wifi_rfkill);
- if (bluetooth_rfkill)
- rfkill_unregister(bluetooth_rfkill);
- if (wwan_rfkill)
- rfkill_unregister(wwan_rfkill);
+
+fail_backlight:
+ dell_cleanup_rfkill();
+fail_rfkill:
+ platform_device_del(platform_device);
+fail_platform_device2:
+ platform_device_put(platform_device);
+fail_platform_device1:
+ platform_driver_unregister(&platform_driver);
+fail_platform_driver:
kfree(da_tokens);
return ret;
}
@@ -382,12 +425,7 @@ out:
static void __exit dell_exit(void)
{
backlight_device_unregister(dell_backlight_device);
- if (wifi_rfkill)
- rfkill_unregister(wifi_rfkill);
- if (bluetooth_rfkill)
- rfkill_unregister(bluetooth_rfkill);
- if (wwan_rfkill)
- rfkill_unregister(wwan_rfkill);
+ dell_cleanup_rfkill();
}
module_init(dell_init);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 0f900cc9fa7..67f3fe71c50 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -31,6 +31,7 @@
#include <acpi/acpi_drivers.h>
#include <linux/acpi.h>
#include <linux/string.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
@@ -38,6 +39,8 @@ MODULE_LICENSE("GPL");
#define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492"
+static int acpi_video;
+
MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
struct key_entry {
@@ -54,7 +57,7 @@ enum { KE_KEY, KE_SW, KE_IGNORE, KE_END };
* via the keyboard controller so should not be sent again.
*/
-static struct key_entry dell_wmi_keymap[] = {
+static struct key_entry dell_legacy_wmi_keymap[] = {
{KE_KEY, 0xe045, KEY_PROG1},
{KE_KEY, 0xe009, KEY_EJECTCD},
@@ -72,7 +75,7 @@ static struct key_entry dell_wmi_keymap[] = {
/* The next device is at offset 6, the active devices are at
offset 8 and the attached devices at offset 10 */
- {KE_KEY, 0xe00b, KEY_DISPLAYTOGGLE},
+ {KE_KEY, 0xe00b, KEY_SWITCHVIDEOMODE},
{KE_IGNORE, 0xe00c, KEY_KBDILLUMTOGGLE},
@@ -96,6 +99,47 @@ static struct key_entry dell_wmi_keymap[] = {
{KE_END, 0}
};
+static bool dell_new_hk_type;
+
+struct dell_new_keymap_entry {
+ u16 scancode;
+ u16 keycode;
+};
+
+struct dell_hotkey_table {
+ struct dmi_header header;
+ struct dell_new_keymap_entry keymap[];
+
+};
+
+static struct key_entry *dell_new_wmi_keymap;
+
+static u16 bios_to_linux_keycode[256] = {
+
+ KEY_MEDIA, KEY_NEXTSONG, KEY_PLAYPAUSE, KEY_PREVIOUSSONG,
+ KEY_STOPCD, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_WWW, KEY_UNKNOWN, KEY_VOLUMEDOWN, KEY_MUTE,
+ KEY_VOLUMEUP, KEY_UNKNOWN, KEY_BATTERY, KEY_EJECTCD,
+ KEY_UNKNOWN, KEY_SLEEP, KEY_PROG1, KEY_BRIGHTNESSDOWN,
+ KEY_BRIGHTNESSUP, KEY_UNKNOWN, KEY_KBDILLUMTOGGLE,
+ KEY_UNKNOWN, KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_SWITCHVIDEOMODE, KEY_UNKNOWN, KEY_UNKNOWN, KEY_PROG2,
+ KEY_UNKNOWN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ KEY_PROG3
+};
+
+
+static struct key_entry *dell_wmi_keymap = dell_legacy_wmi_keymap;
+
static struct input_dev *dell_wmi_input_dev;
static struct key_entry *dell_wmi_get_entry_by_scancode(int code)
@@ -164,24 +208,78 @@ static void dell_wmi_notify(u32 value, void *context)
obj = (union acpi_object *)response.pointer;
if (obj && obj->type == ACPI_TYPE_BUFFER) {
- int *buffer = (int *)obj->buffer.pointer;
- /*
- * The upper bytes of the event may contain
- * additional information, so mask them off for the
- * scancode lookup
- */
- key = dell_wmi_get_entry_by_scancode(buffer[1] & 0xFFFF);
- if (key) {
+ int reported_key;
+ u16 *buffer_entry = (u16 *)obj->buffer.pointer;
+ if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
+ printk(KERN_INFO "dell-wmi: Received unknown WMI event"
+ " (0x%x)\n", buffer_entry[1]);
+ return;
+ }
+
+ if (dell_new_hk_type)
+ reported_key = (int)buffer_entry[2];
+ else
+ reported_key = (int)buffer_entry[1] & 0xffff;
+
+ key = dell_wmi_get_entry_by_scancode(reported_key);
+
+ if (!key) {
+ printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
+ reported_key);
+ } else if ((key->keycode == KEY_BRIGHTNESSUP ||
+ key->keycode == KEY_BRIGHTNESSDOWN) && acpi_video) {
+ /* Don't report brightness notifications that will also
+ * come via ACPI */
+ return;
+ } else {
input_report_key(dell_wmi_input_dev, key->keycode, 1);
input_sync(dell_wmi_input_dev);
input_report_key(dell_wmi_input_dev, key->keycode, 0);
input_sync(dell_wmi_input_dev);
- } else if (buffer[1] & 0xFFFF)
- printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
- buffer[1] & 0xFFFF);
+ }
}
}
+
+static void setup_new_hk_map(const struct dmi_header *dm)
+{
+
+ int i;
+ int hotkey_num = (dm->length-4)/sizeof(struct dell_new_keymap_entry);
+ struct dell_hotkey_table *table =
+ container_of(dm, struct dell_hotkey_table, header);
+
+ dell_new_wmi_keymap = kzalloc((hotkey_num+1) *
+ sizeof(struct key_entry), GFP_KERNEL);
+
+ for (i = 0; i < hotkey_num; i++) {
+ dell_new_wmi_keymap[i].type = KE_KEY;
+ dell_new_wmi_keymap[i].code = table->keymap[i].scancode;
+ dell_new_wmi_keymap[i].keycode =
+ (table->keymap[i].keycode > 255) ? 0 :
+ bios_to_linux_keycode[table->keymap[i].keycode];
+ }
+
+ dell_new_wmi_keymap[i].type = KE_END;
+ dell_new_wmi_keymap[i].code = 0;
+ dell_new_wmi_keymap[i].keycode = 0;
+
+ dell_wmi_keymap = dell_new_wmi_keymap;
+
+}
+
+
+static void find_hk_type(const struct dmi_header *dm, void *dummy)
+{
+
+ if ((dm->type == 0xb2) && (dm->length > 6)) {
+ dell_new_hk_type = true;
+ setup_new_hk_map(dm);
+ }
+
+}
+
+
static int __init dell_wmi_input_setup(void)
{
struct key_entry *key;
@@ -226,6 +324,9 @@ static int __init dell_wmi_init(void)
int err;
if (wmi_has_guid(DELL_EVENT_GUID)) {
+
+ dmi_walk(find_hk_type, NULL);
+
err = dell_wmi_input_setup();
if (err)
@@ -240,6 +341,8 @@ static int __init dell_wmi_init(void)
return err;
}
+ acpi_video = acpi_video_backlight_support();
+
} else
printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 4226e535273..5838c69b2fb 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1,5 +1,5 @@
/*
- * eepc-laptop.c - Asus Eee PC extras
+ * eeepc-laptop.c - Asus Eee PC extras
*
* Based on asus_acpi.c as patched for the Eee PC by Asus:
* ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
@@ -34,20 +34,23 @@
#include <linux/rfkill.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
+#include <linux/leds.h>
#define EEEPC_LAPTOP_VERSION "0.1"
+#define EEEPC_LAPTOP_NAME "Eee PC Hotkey Driver"
+#define EEEPC_LAPTOP_FILE "eeepc"
-#define EEEPC_HOTK_NAME "Eee PC Hotkey Driver"
-#define EEEPC_HOTK_FILE "eeepc"
-#define EEEPC_HOTK_CLASS "hotkey"
-#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
-#define EEEPC_HOTK_HID "ASUS010"
+#define EEEPC_ACPI_CLASS "hotkey"
+#define EEEPC_ACPI_DEVICE_NAME "Hotkey"
+#define EEEPC_ACPI_HID "ASUS010"
+MODULE_AUTHOR("Corentin Chary, Eric Cooper");
+MODULE_DESCRIPTION(EEEPC_LAPTOP_NAME);
+MODULE_LICENSE("GPL");
/*
* Definitions for Asus EeePC
*/
-#define NOTIFY_WLAN_ON 0x10
#define NOTIFY_BRN_MIN 0x20
#define NOTIFY_BRN_MAX 0x2f
@@ -117,58 +120,6 @@ static const char *cm_setv[] = {
NULL, NULL, "PBPS", "TPDS"
};
-#define EEEPC_EC "\\_SB.PCI0.SBRG.EC0."
-
-#define EEEPC_EC_FAN_PWM EEEPC_EC "SC02" /* Fan PWM duty cycle (%) */
-#define EEEPC_EC_SC02 0x63
-#define EEEPC_EC_FAN_HRPM EEEPC_EC "SC05" /* High byte, fan speed (RPM) */
-#define EEEPC_EC_FAN_LRPM EEEPC_EC "SC06" /* Low byte, fan speed (RPM) */
-#define EEEPC_EC_FAN_CTRL EEEPC_EC "SFB3" /* Byte containing SF25 */
-#define EEEPC_EC_SFB3 0xD3
-
-/*
- * This is the main structure, we can use it to store useful information
- * about the hotk device
- */
-struct eeepc_hotk {
- struct acpi_device *device; /* the device we are in */
- acpi_handle handle; /* the handle of the hotk device */
- u32 cm_supported; /* the control methods supported
- by this BIOS */
- uint init_flag; /* Init flags */
- u16 event_count[128]; /* count for each event */
- struct input_dev *inputdev;
- u16 *keycode_map;
- struct rfkill *wlan_rfkill;
- struct rfkill *bluetooth_rfkill;
- struct rfkill *wwan3g_rfkill;
- struct rfkill *wimax_rfkill;
- struct hotplug_slot *hotplug_slot;
- struct mutex hotplug_lock;
-};
-
-/* The actual device the driver binds to */
-static struct eeepc_hotk *ehotk;
-
-/* Platform device/driver */
-static int eeepc_hotk_thaw(struct device *device);
-static int eeepc_hotk_restore(struct device *device);
-
-static struct dev_pm_ops eeepc_pm_ops = {
- .thaw = eeepc_hotk_thaw,
- .restore = eeepc_hotk_restore,
-};
-
-static struct platform_driver platform_driver = {
- .driver = {
- .name = EEEPC_HOTK_FILE,
- .owner = THIS_MODULE,
- .pm = &eeepc_pm_ops,
- }
-};
-
-static struct platform_device *platform_device;
-
struct key_entry {
char type;
u8 code;
@@ -177,7 +128,7 @@ struct key_entry {
enum { KE_KEY, KE_END };
-static struct key_entry eeepc_keymap[] = {
+static const struct key_entry eeepc_keymap[] = {
/* Sleep already handled via generic ACPI code */
{KE_KEY, 0x10, KEY_WLAN },
{KE_KEY, 0x11, KEY_WLAN },
@@ -185,77 +136,56 @@ static struct key_entry eeepc_keymap[] = {
{KE_KEY, 0x13, KEY_MUTE },
{KE_KEY, 0x14, KEY_VOLUMEDOWN },
{KE_KEY, 0x15, KEY_VOLUMEUP },
+ {KE_KEY, 0x16, KEY_DISPLAY_OFF },
{KE_KEY, 0x1a, KEY_COFFEE },
{KE_KEY, 0x1b, KEY_ZOOM },
{KE_KEY, 0x1c, KEY_PROG2 },
{KE_KEY, 0x1d, KEY_PROG3 },
- {KE_KEY, NOTIFY_BRN_MIN, KEY_BRIGHTNESSDOWN },
- {KE_KEY, NOTIFY_BRN_MIN + 2, KEY_BRIGHTNESSUP },
+ {KE_KEY, NOTIFY_BRN_MIN, KEY_BRIGHTNESSDOWN },
+ {KE_KEY, NOTIFY_BRN_MAX, KEY_BRIGHTNESSUP },
{KE_KEY, 0x30, KEY_SWITCHVIDEOMODE },
{KE_KEY, 0x31, KEY_SWITCHVIDEOMODE },
{KE_KEY, 0x32, KEY_SWITCHVIDEOMODE },
+ {KE_KEY, 0x37, KEY_F13 }, /* Disable Touchpad */
+ {KE_KEY, 0x38, KEY_F14 },
{KE_END, 0},
};
+
/*
- * The hotkey driver declaration
+ * This is the main structure, we can use it to store useful information
*/
-static int eeepc_hotk_add(struct acpi_device *device);
-static int eeepc_hotk_remove(struct acpi_device *device, int type);
-static void eeepc_hotk_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id eeepc_device_ids[] = {
- {EEEPC_HOTK_HID, 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
-
-static struct acpi_driver eeepc_hotk_driver = {
- .name = EEEPC_HOTK_NAME,
- .class = EEEPC_HOTK_CLASS,
- .ids = eeepc_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
- .ops = {
- .add = eeepc_hotk_add,
- .remove = eeepc_hotk_remove,
- .notify = eeepc_hotk_notify,
- },
-};
+struct eeepc_laptop {
+ acpi_handle handle; /* the handle of the acpi device */
+ u32 cm_supported; /* the control methods supported
+ by this BIOS */
+ u16 event_count[128]; /* count for each event */
-/* PCI hotplug ops */
-static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value);
+ struct platform_device *platform_device;
+ struct device *hwmon_device;
+ struct backlight_device *backlight_device;
-static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
- .owner = THIS_MODULE,
- .get_adapter_status = eeepc_get_adapter_status,
- .get_power_status = eeepc_get_adapter_status,
-};
+ struct input_dev *inputdev;
+ struct key_entry *keymap;
-/* The backlight device /sys/class/backlight */
-static struct backlight_device *eeepc_backlight_device;
+ struct rfkill *wlan_rfkill;
+ struct rfkill *bluetooth_rfkill;
+ struct rfkill *wwan3g_rfkill;
+ struct rfkill *wimax_rfkill;
-/* The hwmon device */
-static struct device *eeepc_hwmon_device;
+ struct hotplug_slot *hotplug_slot;
+ struct mutex hotplug_lock;
-/*
- * The backlight class declaration
- */
-static int read_brightness(struct backlight_device *bd);
-static int update_bl_status(struct backlight_device *bd);
-static struct backlight_ops eeepcbl_ops = {
- .get_brightness = read_brightness,
- .update_status = update_bl_status,
+ struct led_classdev tpd_led;
+ int tpd_led_wk;
+ struct workqueue_struct *led_workqueue;
+ struct work_struct tpd_led_work;
};
-MODULE_AUTHOR("Corentin Chary, Eric Cooper");
-MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
-MODULE_LICENSE("GPL");
-
/*
* ACPI Helpers
*/
-static int write_acpi_int(acpi_handle handle, const char *method, int val,
- struct acpi_buffer *output)
+static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
struct acpi_object_list params;
union acpi_object in_obj;
@@ -266,7 +196,7 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = val;
- status = acpi_evaluate_object(handle, (char *)method, &params, output);
+ status = acpi_evaluate_object(handle, (char *)method, &params, NULL);
return (status == AE_OK ? 0 : -1);
}
@@ -285,81 +215,56 @@ static int read_acpi_int(acpi_handle handle, const char *method, int *val)
}
}
-static int set_acpi(int cm, int value)
+static int set_acpi(struct eeepc_laptop *eeepc, int cm, int value)
{
- if (ehotk->cm_supported & (0x1 << cm)) {
- const char *method = cm_setv[cm];
- if (method == NULL)
- return -ENODEV;
- if (write_acpi_int(ehotk->handle, method, value, NULL))
- pr_warning("Error writing %s\n", method);
- }
- return 0;
-}
+ const char *method = cm_setv[cm];
-static int get_acpi(int cm)
-{
- int value = -ENODEV;
- if ((ehotk->cm_supported & (0x1 << cm))) {
- const char *method = cm_getv[cm];
- if (method == NULL)
- return -ENODEV;
- if (read_acpi_int(ehotk->handle, method, &value))
- pr_warning("Error reading %s\n", method);
- }
- return value;
-}
-
-/*
- * Backlight
- */
-static int read_brightness(struct backlight_device *bd)
-{
- return get_acpi(CM_ASL_PANELBRIGHT);
-}
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
-static int set_brightness(struct backlight_device *bd, int value)
-{
- value = max(0, min(15, value));
- return set_acpi(CM_ASL_PANELBRIGHT, value);
+ if (write_acpi_int(eeepc->handle, method, value))
+ pr_warning("Error writing %s\n", method);
+ return 0;
}
-static int update_bl_status(struct backlight_device *bd)
+static int get_acpi(struct eeepc_laptop *eeepc, int cm)
{
- return set_brightness(bd, bd->props.brightness);
-}
+ const char *method = cm_getv[cm];
+ int value;
-/*
- * Rfkill helpers
- */
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
-static bool eeepc_wlan_rfkill_blocked(void)
-{
- if (get_acpi(CM_ASL_WLAN) == 1)
- return false;
- return true;
+ if (read_acpi_int(eeepc->handle, method, &value))
+ pr_warning("Error reading %s\n", method);
+ return value;
}
-static int eeepc_rfkill_set(void *data, bool blocked)
+static int acpi_setter_handle(struct eeepc_laptop *eeepc, int cm,
+ acpi_handle *handle)
{
- unsigned long asl = (unsigned long)data;
- return set_acpi(asl, !blocked);
-}
+ const char *method = cm_setv[cm];
+ acpi_status status;
-static const struct rfkill_ops eeepc_rfkill_ops = {
- .set_block = eeepc_rfkill_set,
-};
+ if (method == NULL)
+ return -ENODEV;
+ if ((eeepc->cm_supported & (0x1 << cm)) == 0)
+ return -ENODEV;
-static void __devinit eeepc_enable_camera(void)
-{
- /*
- * If the following call to set_acpi() fails, it's because there's no
- * camera so we can ignore the error.
- */
- if (get_acpi(CM_ASL_CAMERA) == 0)
- set_acpi(CM_ASL_CAMERA, 1);
+ status = acpi_get_handle(eeepc->handle, (char *)method,
+ handle);
+ if (status != AE_OK) {
+ pr_warning("Error finding %s\n", method);
+ return -ENODEV;
+ }
+ return 0;
}
+
/*
* Sys helpers
*/
@@ -372,60 +277,63 @@ static int parse_arg(const char *buf, unsigned long count, int *val)
return count;
}
-static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
+static ssize_t store_sys_acpi(struct device *dev, int cm,
+ const char *buf, size_t count)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
- value = set_acpi(cm, value);
+ value = set_acpi(eeepc, cm, value);
if (value < 0)
- return value;
+ return -EIO;
return rv;
}
-static ssize_t show_sys_acpi(int cm, char *buf)
+static ssize_t show_sys_acpi(struct device *dev, int cm, char *buf)
{
- int value = get_acpi(cm);
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
+ int value = get_acpi(eeepc, cm);
if (value < 0)
- return value;
+ return -EIO;
return sprintf(buf, "%d\n", value);
}
-#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \
+#define EEEPC_CREATE_DEVICE_ATTR(_name, _mode, _cm) \
static ssize_t show_##_name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- return show_sys_acpi(_cm, buf); \
+ return show_sys_acpi(dev, _cm, buf); \
} \
static ssize_t store_##_name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- return store_sys_acpi(_cm, buf, count); \
+ return store_sys_acpi(dev, _cm, buf, count); \
} \
static struct device_attribute dev_attr_##_name = { \
.attr = { \
.name = __stringify(_name), \
- .mode = 0644 }, \
+ .mode = _mode }, \
.show = show_##_name, \
.store = store_##_name, \
}
-EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
-EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
-EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
+EEEPC_CREATE_DEVICE_ATTR(camera, 0644, CM_ASL_CAMERA);
+EEEPC_CREATE_DEVICE_ATTR(cardr, 0644, CM_ASL_CARDREADER);
+EEEPC_CREATE_DEVICE_ATTR(disp, 0200, CM_ASL_DISPLAYSWITCH);
struct eeepc_cpufv {
int num;
int cur;
};
-static int get_cpufv(struct eeepc_cpufv *c)
+static int get_cpufv(struct eeepc_laptop *eeepc, struct eeepc_cpufv *c)
{
- c->cur = get_acpi(CM_ASL_CPUFV);
+ c->cur = get_acpi(eeepc, CM_ASL_CPUFV);
c->num = (c->cur >> 8) & 0xff;
c->cur &= 0xff;
if (c->cur < 0 || c->num <= 0 || c->num > 12)
@@ -437,11 +345,12 @@ static ssize_t show_available_cpufv(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
int i;
ssize_t len = 0;
- if (get_cpufv(&c))
+ if (get_cpufv(eeepc, &c))
return -ENODEV;
for (i = 0; i < c.num; i++)
len += sprintf(buf + len, "%d ", i);
@@ -453,9 +362,10 @@ static ssize_t show_cpufv(struct device *dev,
struct device_attribute *attr,
char *buf)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
- if (get_cpufv(&c))
+ if (get_cpufv(eeepc, &c))
return -ENODEV;
return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
}
@@ -464,17 +374,18 @@ static ssize_t store_cpufv(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(dev);
struct eeepc_cpufv c;
int rv, value;
- if (get_cpufv(&c))
+ if (get_cpufv(eeepc, &c))
return -ENODEV;
rv = parse_arg(buf, count, &value);
if (rv < 0)
return rv;
if (!rv || value < 0 || value >= c.num)
return -EINVAL;
- set_acpi(CM_ASL_CPUFV, value);
+ set_acpi(eeepc, CM_ASL_CPUFV, value);
return rv;
}
@@ -506,156 +417,125 @@ static struct attribute_group platform_attribute_group = {
.attrs = platform_attributes
};
-/*
- * Hotkey functions
- */
-static struct key_entry *eepc_get_entry_by_scancode(int code)
+static int eeepc_platform_init(struct eeepc_laptop *eeepc)
{
- struct key_entry *key;
-
- for (key = eeepc_keymap; key->type != KE_END; key++)
- if (code == key->code)
- return key;
+ int result;
- return NULL;
-}
+ eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, -1);
+ if (!eeepc->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(eeepc->platform_device, eeepc);
-static struct key_entry *eepc_get_entry_by_keycode(int code)
-{
- struct key_entry *key;
+ result = platform_device_add(eeepc->platform_device);
+ if (result)
+ goto fail_platform_device;
- for (key = eeepc_keymap; key->type != KE_END; key++)
- if (code == key->keycode && key->type == KE_KEY)
- return key;
+ result = sysfs_create_group(&eeepc->platform_device->dev.kobj,
+ &platform_attribute_group);
+ if (result)
+ goto fail_sysfs;
+ return 0;
- return NULL;
+fail_sysfs:
+ platform_device_del(eeepc->platform_device);
+fail_platform_device:
+ platform_device_put(eeepc->platform_device);
+ return result;
}
-static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+static void eeepc_platform_exit(struct eeepc_laptop *eeepc)
{
- struct key_entry *key = eepc_get_entry_by_scancode(scancode);
+ sysfs_remove_group(&eeepc->platform_device->dev.kobj,
+ &platform_attribute_group);
+ platform_device_unregister(eeepc->platform_device);
+}
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
- return 0;
- }
+/*
+ * LEDs
+ */
+/*
+ * These functions actually update the LED's, and are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Asus ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+static void tpd_led_update(struct work_struct *work)
+ {
+ struct eeepc_laptop *eeepc;
- return -EINVAL;
+ eeepc = container_of(work, struct eeepc_laptop, tpd_led_work);
+
+ set_acpi(eeepc, CM_ASL_TPD, eeepc->tpd_led_wk);
}
-static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
+static void tpd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
- struct key_entry *key;
- int old_keycode;
+ struct eeepc_laptop *eeepc;
- if (keycode < 0 || keycode > KEY_MAX)
- return -EINVAL;
+ eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
- key = eepc_get_entry_by_scancode(scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!eepc_get_entry_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
- return 0;
- }
-
- return -EINVAL;
+ eeepc->tpd_led_wk = (value > 0) ? 1 : 0;
+ queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
}
-static void cmsg_quirk(int cm, const char *name)
+static int eeepc_led_init(struct eeepc_laptop *eeepc)
{
- int dummy;
+ int rv;
- /* Some BIOSes do not report cm although it is avaliable.
- Check if cm_getv[cm] works and, if yes, assume cm should be set. */
- if (!(ehotk->cm_supported & (1 << cm))
- && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) {
- pr_info("%s (%x) not reported by BIOS,"
- " enabling anyway\n", name, 1 << cm);
- ehotk->cm_supported |= 1 << cm;
- }
-}
+ if (get_acpi(eeepc, CM_ASL_TPD) == -ENODEV)
+ return 0;
-static void cmsg_quirks(void)
-{
- cmsg_quirk(CM_ASL_LID, "LID");
- cmsg_quirk(CM_ASL_TYPE, "TYPE");
- cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER");
- cmsg_quirk(CM_ASL_TPD, "TPD");
-}
+ eeepc->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!eeepc->led_workqueue)
+ return -ENOMEM;
+ INIT_WORK(&eeepc->tpd_led_work, tpd_led_update);
-static int eeepc_hotk_check(void)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- int result;
+ eeepc->tpd_led.name = "eeepc::touchpad";
+ eeepc->tpd_led.brightness_set = tpd_led_set;
+ eeepc->tpd_led.max_brightness = 1;
- result = acpi_bus_get_status(ehotk->device);
- if (result)
- return result;
- if (ehotk->device->status.present) {
- if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
- &buffer)) {
- pr_err("Hotkey initialization failed\n");
- return -ENODEV;
- } else {
- pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag);
- }
- /* get control methods supported */
- if (read_acpi_int(ehotk->handle, "CMSG"
- , &ehotk->cm_supported)) {
- pr_err("Get control methods supported failed\n");
- return -ENODEV;
- } else {
- cmsg_quirks();
- pr_info("Get control methods supported: 0x%x\n",
- ehotk->cm_supported);
- }
- } else {
- pr_err("Hotkey device not present, aborting\n");
- return -EINVAL;
+ rv = led_classdev_register(&eeepc->platform_device->dev,
+ &eeepc->tpd_led);
+ if (rv) {
+ destroy_workqueue(eeepc->led_workqueue);
+ return rv;
}
+
return 0;
}
-static int notify_brn(void)
+static void eeepc_led_exit(struct eeepc_laptop *eeepc)
{
- /* returns the *previous* brightness, or -1 */
- struct backlight_device *bd = eeepc_backlight_device;
- if (bd) {
- int old = bd->props.brightness;
- backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
- return old;
- }
- return -1;
+ if (eeepc->tpd_led.dev)
+ led_classdev_unregister(&eeepc->tpd_led);
+ if (eeepc->led_workqueue)
+ destroy_workqueue(eeepc->led_workqueue);
}
-static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
- u8 *value)
-{
- int val = get_acpi(CM_ASL_WLAN);
- if (val == 1 || val == 0)
- *value = val;
- else
- return -EINVAL;
-
- return 0;
+/*
+ * PCI hotplug (for wlan rfkill)
+ */
+static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc)
+{
+ if (get_acpi(eeepc, CM_ASL_WLAN) == 1)
+ return false;
+ return true;
}
-static void eeepc_rfkill_hotplug(void)
+static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
{
struct pci_dev *dev;
struct pci_bus *bus;
- bool blocked = eeepc_wlan_rfkill_blocked();
+ bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
- if (ehotk->wlan_rfkill)
- rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
+ if (eeepc->wlan_rfkill)
+ rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
- mutex_lock(&ehotk->hotplug_lock);
+ mutex_lock(&eeepc->hotplug_lock);
- if (ehotk->hotplug_slot) {
+ if (eeepc->hotplug_slot) {
bus = pci_find_bus(0, 1);
if (!bus) {
pr_warning("Unable to find PCI bus 1?\n");
@@ -685,69 +565,23 @@ static void eeepc_rfkill_hotplug(void)
}
out_unlock:
- mutex_unlock(&ehotk->hotplug_lock);
+ mutex_unlock(&eeepc->hotplug_lock);
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
{
+ struct eeepc_laptop *eeepc = data;
+
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
- eeepc_rfkill_hotplug();
+ eeepc_rfkill_hotplug(eeepc);
}
-static void eeepc_hotk_notify(struct acpi_device *device, u32 event)
+static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
+ char *node)
{
- static struct key_entry *key;
- u16 count;
- int brn = -ENODEV;
-
- if (!ehotk)
- return;
- if (event > ACPI_MAX_SYS_NOTIFY)
- return;
- if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
- brn = notify_brn();
- count = ehotk->event_count[event % 128]++;
- acpi_bus_generate_proc_event(ehotk->device, event, count);
- acpi_bus_generate_netlink_event(ehotk->device->pnp.device_class,
- dev_name(&ehotk->device->dev), event,
- count);
- if (ehotk->inputdev) {
- if (brn != -ENODEV) {
- /* brightness-change events need special
- * handling for conversion to key events
- */
- if (brn < 0)
- brn = event;
- else
- brn += NOTIFY_BRN_MIN;
- if (event < brn)
- event = NOTIFY_BRN_MIN; /* brightness down */
- else if (event > brn)
- event = NOTIFY_BRN_MIN + 2; /* ... up */
- else
- event = NOTIFY_BRN_MIN + 1; /* ... unchanged */
- }
- key = eepc_get_entry_by_scancode(event);
- if (key) {
- switch (key->type) {
- case KE_KEY:
- input_report_key(ehotk->inputdev, key->keycode,
- 1);
- input_sync(ehotk->inputdev);
- input_report_key(ehotk->inputdev, key->keycode,
- 0);
- input_sync(ehotk->inputdev);
- break;
- }
- }
- }
-}
-
-static int eeepc_register_rfkill_notifier(char *node)
-{
- acpi_status status = AE_OK;
+ acpi_status status;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
@@ -756,7 +590,7 @@ static int eeepc_register_rfkill_notifier(char *node)
status = acpi_install_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify,
- NULL);
+ eeepc);
if (ACPI_FAILURE(status))
pr_warning("Failed to register notify on %s\n", node);
} else
@@ -765,7 +599,8 @@ static int eeepc_register_rfkill_notifier(char *node)
return 0;
}
-static void eeepc_unregister_rfkill_notifier(char *node)
+static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
+ char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
@@ -782,13 +617,33 @@ static void eeepc_unregister_rfkill_notifier(char *node)
}
}
+static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
+ u8 *value)
+{
+ struct eeepc_laptop *eeepc = hotplug_slot->private;
+ int val = get_acpi(eeepc, CM_ASL_WLAN);
+
+ if (val == 1 || val == 0)
+ *value = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
{
kfree(hotplug_slot->info);
kfree(hotplug_slot);
}
-static int eeepc_setup_pci_hotplug(void)
+static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
+ .owner = THIS_MODULE,
+ .get_adapter_status = eeepc_get_adapter_status,
+ .get_power_status = eeepc_get_adapter_status,
+};
+
+static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
{
int ret = -ENOMEM;
struct pci_bus *bus = pci_find_bus(0, 1);
@@ -798,22 +653,22 @@ static int eeepc_setup_pci_hotplug(void)
return -ENODEV;
}
- ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!ehotk->hotplug_slot)
+ eeepc->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
+ if (!eeepc->hotplug_slot)
goto error_slot;
- ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
+ eeepc->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
GFP_KERNEL);
- if (!ehotk->hotplug_slot->info)
+ if (!eeepc->hotplug_slot->info)
goto error_info;
- ehotk->hotplug_slot->private = ehotk;
- ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
- ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
- eeepc_get_adapter_status(ehotk->hotplug_slot,
- &ehotk->hotplug_slot->info->adapter_status);
+ eeepc->hotplug_slot->private = eeepc;
+ eeepc->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
+ eeepc->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
+ eeepc_get_adapter_status(eeepc->hotplug_slot,
+ &eeepc->hotplug_slot->info->adapter_status);
- ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi");
+ ret = pci_hp_register(eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
@@ -822,17 +677,156 @@ static int eeepc_setup_pci_hotplug(void)
return 0;
error_register:
- kfree(ehotk->hotplug_slot->info);
+ kfree(eeepc->hotplug_slot->info);
error_info:
- kfree(ehotk->hotplug_slot);
- ehotk->hotplug_slot = NULL;
+ kfree(eeepc->hotplug_slot);
+ eeepc->hotplug_slot = NULL;
error_slot:
return ret;
}
+/*
+ * Rfkill devices
+ */
+static int eeepc_rfkill_set(void *data, bool blocked)
+{
+ acpi_handle handle = data;
+
+ return write_acpi_int(handle, NULL, !blocked);
+}
+
+static const struct rfkill_ops eeepc_rfkill_ops = {
+ .set_block = eeepc_rfkill_set,
+};
+
+static int eeepc_new_rfkill(struct eeepc_laptop *eeepc,
+ struct rfkill **rfkill,
+ const char *name,
+ enum rfkill_type type, int cm)
+{
+ acpi_handle handle;
+ int result;
+
+ result = acpi_setter_handle(eeepc, cm, &handle);
+ if (result < 0)
+ return result;
+
+ *rfkill = rfkill_alloc(name, &eeepc->platform_device->dev, type,
+ &eeepc_rfkill_ops, handle);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ rfkill_init_sw_state(*rfkill, get_acpi(eeepc, cm) != 1);
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
+{
+ eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
+ eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
+ eeepc_unregister_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
+ if (eeepc->wlan_rfkill) {
+ rfkill_unregister(eeepc->wlan_rfkill);
+ rfkill_destroy(eeepc->wlan_rfkill);
+ eeepc->wlan_rfkill = NULL;
+ }
+ /*
+ * Refresh pci hotplug in case the rfkill state was changed after
+ * eeepc_unregister_rfkill_notifier()
+ */
+ eeepc_rfkill_hotplug(eeepc);
+ if (eeepc->hotplug_slot)
+ pci_hp_deregister(eeepc->hotplug_slot);
+
+ if (eeepc->bluetooth_rfkill) {
+ rfkill_unregister(eeepc->bluetooth_rfkill);
+ rfkill_destroy(eeepc->bluetooth_rfkill);
+ eeepc->bluetooth_rfkill = NULL;
+ }
+ if (eeepc->wwan3g_rfkill) {
+ rfkill_unregister(eeepc->wwan3g_rfkill);
+ rfkill_destroy(eeepc->wwan3g_rfkill);
+ eeepc->wwan3g_rfkill = NULL;
+ }
+ if (eeepc->wimax_rfkill) {
+ rfkill_unregister(eeepc->wimax_rfkill);
+ rfkill_destroy(eeepc->wimax_rfkill);
+ eeepc->wimax_rfkill = NULL;
+ }
+}
+
+static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
+{
+ int result = 0;
+
+ mutex_init(&eeepc->hotplug_lock);
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wlan_rfkill,
+ "eeepc-wlan", RFKILL_TYPE_WLAN,
+ CM_ASL_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->bluetooth_rfkill,
+ "eeepc-bluetooth", RFKILL_TYPE_BLUETOOTH,
+ CM_ASL_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wwan3g_rfkill,
+ "eeepc-wwan3g", RFKILL_TYPE_WWAN,
+ CM_ASL_3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(eeepc, &eeepc->wimax_rfkill,
+ "eeepc-wimax", RFKILL_TYPE_WIMAX,
+ CM_ASL_WIMAX);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_setup_pci_hotplug(eeepc);
+ /*
+ * If we get -EBUSY then something else is handling the PCI hotplug -
+ * don't fail in this case
+ */
+ if (result == -EBUSY)
+ result = 0;
+
+ eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
+ eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
+ eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
+ /*
+ * Refresh pci hotplug in case the rfkill state was changed during
+ * setup.
+ */
+ eeepc_rfkill_hotplug(eeepc);
+
+exit:
+ if (result && result != -ENODEV)
+ eeepc_rfkill_exit(eeepc);
+ return result;
+}
+
+/*
+ * Platform driver - hibernate/resume callbacks
+ */
static int eeepc_hotk_thaw(struct device *device)
{
- if (ehotk->wlan_rfkill) {
+ struct eeepc_laptop *eeepc = dev_get_drvdata(device);
+
+ if (eeepc->wlan_rfkill) {
bool wlan;
/*
@@ -840,8 +834,8 @@ static int eeepc_hotk_thaw(struct device *device)
* during suspend. Normally it restores it on resume, but
* we should kick it ourselves in case hibernation is aborted.
*/
- wlan = get_acpi(CM_ASL_WLAN);
- set_acpi(CM_ASL_WLAN, wlan);
+ wlan = get_acpi(eeepc, CM_ASL_WLAN);
+ set_acpi(eeepc, CM_ASL_WLAN, wlan);
}
return 0;
@@ -849,70 +843,96 @@ static int eeepc_hotk_thaw(struct device *device)
static int eeepc_hotk_restore(struct device *device)
{
+ struct eeepc_laptop *eeepc = dev_get_drvdata(device);
+
/* Refresh both wlan rfkill state and pci hotplug */
- if (ehotk->wlan_rfkill)
- eeepc_rfkill_hotplug();
-
- if (ehotk->bluetooth_rfkill)
- rfkill_set_sw_state(ehotk->bluetooth_rfkill,
- get_acpi(CM_ASL_BLUETOOTH) != 1);
- if (ehotk->wwan3g_rfkill)
- rfkill_set_sw_state(ehotk->wwan3g_rfkill,
- get_acpi(CM_ASL_3G) != 1);
- if (ehotk->wimax_rfkill)
- rfkill_set_sw_state(ehotk->wimax_rfkill,
- get_acpi(CM_ASL_WIMAX) != 1);
+ if (eeepc->wlan_rfkill)
+ eeepc_rfkill_hotplug(eeepc);
+
+ if (eeepc->bluetooth_rfkill)
+ rfkill_set_sw_state(eeepc->bluetooth_rfkill,
+ get_acpi(eeepc, CM_ASL_BLUETOOTH) != 1);
+ if (eeepc->wwan3g_rfkill)
+ rfkill_set_sw_state(eeepc->wwan3g_rfkill,
+ get_acpi(eeepc, CM_ASL_3G) != 1);
+ if (eeepc->wimax_rfkill)
+ rfkill_set_sw_state(eeepc->wimax_rfkill,
+ get_acpi(eeepc, CM_ASL_WIMAX) != 1);
return 0;
}
+static const struct dev_pm_ops eeepc_pm_ops = {
+ .thaw = eeepc_hotk_thaw,
+ .restore = eeepc_hotk_restore,
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = EEEPC_LAPTOP_FILE,
+ .owner = THIS_MODULE,
+ .pm = &eeepc_pm_ops,
+ }
+};
+
/*
- * Hwmon
+ * Hwmon device
*/
+
+#define EEEPC_EC_SC00 0x61
+#define EEEPC_EC_FAN_PWM (EEEPC_EC_SC00 + 2) /* Fan PWM duty cycle (%) */
+#define EEEPC_EC_FAN_HRPM (EEEPC_EC_SC00 + 5) /* High byte, fan speed (RPM) */
+#define EEEPC_EC_FAN_LRPM (EEEPC_EC_SC00 + 6) /* Low byte, fan speed (RPM) */
+
+#define EEEPC_EC_SFB0 0xD0
+#define EEEPC_EC_FAN_CTRL (EEEPC_EC_SFB0 + 3) /* Byte containing SF25 */
+
static int eeepc_get_fan_pwm(void)
{
- int value = 0;
+ u8 value = 0;
- read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
- value = value * 255 / 100;
- return (value);
+ ec_read(EEEPC_EC_FAN_PWM, &value);
+ return value * 255 / 100;
}
static void eeepc_set_fan_pwm(int value)
{
value = SENSORS_LIMIT(value, 0, 255);
value = value * 100 / 255;
- ec_write(EEEPC_EC_SC02, value);
+ ec_write(EEEPC_EC_FAN_PWM, value);
}
static int eeepc_get_fan_rpm(void)
{
- int high = 0;
- int low = 0;
+ u8 high = 0;
+ u8 low = 0;
- read_acpi_int(NULL, EEEPC_EC_FAN_HRPM, &high);
- read_acpi_int(NULL, EEEPC_EC_FAN_LRPM, &low);
- return (high << 8 | low);
+ ec_read(EEEPC_EC_FAN_HRPM, &high);
+ ec_read(EEEPC_EC_FAN_LRPM, &low);
+ return high << 8 | low;
}
static int eeepc_get_fan_ctrl(void)
{
- int value = 0;
+ u8 value = 0;
- read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
- return ((value & 0x02 ? 1 : 0));
+ ec_read(EEEPC_EC_FAN_CTRL, &value);
+ if (value & 0x02)
+ return 1; /* manual */
+ else
+ return 2; /* automatic */
}
static void eeepc_set_fan_ctrl(int manual)
{
- int value = 0;
+ u8 value = 0;
- read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
- if (manual)
+ ec_read(EEEPC_EC_FAN_CTRL, &value);
+ if (manual == 1)
value |= 0x02;
else
value &= ~0x02;
- ec_write(EEEPC_EC_SFB3, value);
+ ec_write(EEEPC_EC_FAN_CTRL, value);
}
static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
@@ -970,348 +990,485 @@ static struct attribute_group hwmon_attribute_group = {
.attrs = hwmon_attributes
};
-/*
- * exit/init
- */
-static void eeepc_backlight_exit(void)
+static void eeepc_hwmon_exit(struct eeepc_laptop *eeepc)
{
- if (eeepc_backlight_device)
- backlight_device_unregister(eeepc_backlight_device);
- eeepc_backlight_device = NULL;
+ struct device *hwmon;
+
+ hwmon = eeepc->hwmon_device;
+ if (!hwmon)
+ return;
+ sysfs_remove_group(&hwmon->kobj,
+ &hwmon_attribute_group);
+ hwmon_device_unregister(hwmon);
+ eeepc->hwmon_device = NULL;
}
-static void eeepc_rfkill_exit(void)
+static int eeepc_hwmon_init(struct eeepc_laptop *eeepc)
{
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P5");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
- if (ehotk->wlan_rfkill) {
- rfkill_unregister(ehotk->wlan_rfkill);
- rfkill_destroy(ehotk->wlan_rfkill);
- ehotk->wlan_rfkill = NULL;
- }
- /*
- * Refresh pci hotplug in case the rfkill state was changed after
- * eeepc_unregister_rfkill_notifier()
- */
- eeepc_rfkill_hotplug();
- if (ehotk->hotplug_slot)
- pci_hp_deregister(ehotk->hotplug_slot);
-
- if (ehotk->bluetooth_rfkill) {
- rfkill_unregister(ehotk->bluetooth_rfkill);
- rfkill_destroy(ehotk->bluetooth_rfkill);
- ehotk->bluetooth_rfkill = NULL;
- }
- if (ehotk->wwan3g_rfkill) {
- rfkill_unregister(ehotk->wwan3g_rfkill);
- rfkill_destroy(ehotk->wwan3g_rfkill);
- ehotk->wwan3g_rfkill = NULL;
- }
- if (ehotk->wimax_rfkill) {
- rfkill_unregister(ehotk->wimax_rfkill);
- rfkill_destroy(ehotk->wimax_rfkill);
- ehotk->wimax_rfkill = NULL;
+ struct device *hwmon;
+ int result;
+
+ hwmon = hwmon_device_register(&eeepc->platform_device->dev);
+ if (IS_ERR(hwmon)) {
+ pr_err("Could not register eeepc hwmon device\n");
+ eeepc->hwmon_device = NULL;
+ return PTR_ERR(hwmon);
}
+ eeepc->hwmon_device = hwmon;
+ result = sysfs_create_group(&hwmon->kobj,
+ &hwmon_attribute_group);
+ if (result)
+ eeepc_hwmon_exit(eeepc);
+ return result;
}
-static void eeepc_input_exit(void)
+/*
+ * Backlight device
+ */
+static int read_brightness(struct backlight_device *bd)
{
- if (ehotk->inputdev)
- input_unregister_device(ehotk->inputdev);
+ struct eeepc_laptop *eeepc = bl_get_data(bd);
+
+ return get_acpi(eeepc, CM_ASL_PANELBRIGHT);
}
-static void eeepc_hwmon_exit(void)
+static int set_brightness(struct backlight_device *bd, int value)
{
- struct device *hwmon;
+ struct eeepc_laptop *eeepc = bl_get_data(bd);
- hwmon = eeepc_hwmon_device;
- if (!hwmon)
- return ;
- sysfs_remove_group(&hwmon->kobj,
- &hwmon_attribute_group);
- hwmon_device_unregister(hwmon);
- eeepc_hwmon_device = NULL;
+ return set_acpi(eeepc, CM_ASL_PANELBRIGHT, value);
}
-static int eeepc_new_rfkill(struct rfkill **rfkill,
- const char *name, struct device *dev,
- enum rfkill_type type, int cm)
+static int update_bl_status(struct backlight_device *bd)
{
- int result;
+ return set_brightness(bd, bd->props.brightness);
+}
- result = get_acpi(cm);
- if (result < 0)
- return result;
+static struct backlight_ops eeepcbl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
- *rfkill = rfkill_alloc(name, dev, type,
- &eeepc_rfkill_ops, (void *)(unsigned long)cm);
+static int eeepc_backlight_notify(struct eeepc_laptop *eeepc)
+{
+ struct backlight_device *bd = eeepc->backlight_device;
+ int old = bd->props.brightness;
- if (!*rfkill)
- return -EINVAL;
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
- rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1);
- result = rfkill_register(*rfkill);
- if (result) {
- rfkill_destroy(*rfkill);
- *rfkill = NULL;
- return result;
- }
- return 0;
+ return old;
}
-
-static int eeepc_rfkill_init(struct device *dev)
+static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
{
- int result = 0;
-
- mutex_init(&ehotk->hotplug_lock);
+ struct backlight_device *bd;
- result = eeepc_new_rfkill(&ehotk->wlan_rfkill,
- "eeepc-wlan", dev,
- RFKILL_TYPE_WLAN, CM_ASL_WLAN);
+ bd = backlight_device_register(EEEPC_LAPTOP_FILE,
+ &eeepc->platform_device->dev,
+ eeepc, &eeepcbl_ops);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register eeepc backlight device\n");
+ eeepc->backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+ eeepc->backlight_device = bd;
+ bd->props.max_brightness = 15;
+ bd->props.brightness = read_brightness(bd);
+ bd->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(bd);
+ return 0;
+}
- if (result && result != -ENODEV)
- goto exit;
+static void eeepc_backlight_exit(struct eeepc_laptop *eeepc)
+{
+ if (eeepc->backlight_device)
+ backlight_device_unregister(eeepc->backlight_device);
+ eeepc->backlight_device = NULL;
+}
- result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill,
- "eeepc-bluetooth", dev,
- RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH);
- if (result && result != -ENODEV)
- goto exit;
+/*
+ * Input device (i.e. hotkeys)
+ */
+static struct key_entry *eeepc_get_entry_by_scancode(
+ struct eeepc_laptop *eeepc,
+ int code)
+{
+ struct key_entry *key;
- result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill,
- "eeepc-wwan3g", dev,
- RFKILL_TYPE_WWAN, CM_ASL_3G);
+ for (key = eeepc->keymap; key->type != KE_END; key++)
+ if (code == key->code)
+ return key;
- if (result && result != -ENODEV)
- goto exit;
+ return NULL;
+}
- result = eeepc_new_rfkill(&ehotk->wimax_rfkill,
- "eeepc-wimax", dev,
- RFKILL_TYPE_WIMAX, CM_ASL_WIMAX);
+static void eeepc_input_notify(struct eeepc_laptop *eeepc, int event)
+{
+ static struct key_entry *key;
- if (result && result != -ENODEV)
- goto exit;
+ key = eeepc_get_entry_by_scancode(eeepc, event);
+ if (key) {
+ switch (key->type) {
+ case KE_KEY:
+ input_report_key(eeepc->inputdev, key->keycode,
+ 1);
+ input_sync(eeepc->inputdev);
+ input_report_key(eeepc->inputdev, key->keycode,
+ 0);
+ input_sync(eeepc->inputdev);
+ break;
+ }
+ }
+}
- result = eeepc_setup_pci_hotplug();
- /*
- * If we get -EBUSY then something else is handling the PCI hotplug -
- * don't fail in this case
- */
- if (result == -EBUSY)
- result = 0;
+static struct key_entry *eeepc_get_entry_by_keycode(
+ struct eeepc_laptop *eeepc, int code)
+{
+ struct key_entry *key;
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P5");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
- /*
- * Refresh pci hotplug in case the rfkill state was changed during
- * setup.
- */
- eeepc_rfkill_hotplug();
+ for (key = eeepc->keymap; key->type != KE_END; key++)
+ if (code == key->keycode && key->type == KE_KEY)
+ return key;
-exit:
- if (result && result != -ENODEV)
- eeepc_rfkill_exit();
- return result;
+ return NULL;
}
-static int eeepc_backlight_init(struct device *dev)
+static int eeepc_getkeycode(struct input_dev *dev, int scancode, int *keycode)
{
- struct backlight_device *bd;
+ struct eeepc_laptop *eeepc = input_get_drvdata(dev);
+ struct key_entry *key = eeepc_get_entry_by_scancode(eeepc, scancode);
- bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
- NULL, &eeepcbl_ops);
- if (IS_ERR(bd)) {
- pr_err("Could not register eeepc backlight device\n");
- eeepc_backlight_device = NULL;
- return PTR_ERR(bd);
+ if (key && key->type == KE_KEY) {
+ *keycode = key->keycode;
+ return 0;
}
- eeepc_backlight_device = bd;
- bd->props.max_brightness = 15;
- bd->props.brightness = read_brightness(NULL);
- bd->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(bd);
- return 0;
+
+ return -EINVAL;
}
-static int eeepc_hwmon_init(struct device *dev)
+static int eeepc_setkeycode(struct input_dev *dev, int scancode, int keycode)
{
- struct device *hwmon;
- int result;
+ struct eeepc_laptop *eeepc = input_get_drvdata(dev);
+ struct key_entry *key;
+ int old_keycode;
- hwmon = hwmon_device_register(dev);
- if (IS_ERR(hwmon)) {
- pr_err("Could not register eeepc hwmon device\n");
- eeepc_hwmon_device = NULL;
- return PTR_ERR(hwmon);
+ if (keycode < 0 || keycode > KEY_MAX)
+ return -EINVAL;
+
+ key = eeepc_get_entry_by_scancode(eeepc, scancode);
+ if (key && key->type == KE_KEY) {
+ old_keycode = key->keycode;
+ key->keycode = keycode;
+ set_bit(keycode, dev->keybit);
+ if (!eeepc_get_entry_by_keycode(eeepc, old_keycode))
+ clear_bit(old_keycode, dev->keybit);
+ return 0;
}
- eeepc_hwmon_device = hwmon;
- result = sysfs_create_group(&hwmon->kobj,
- &hwmon_attribute_group);
- if (result)
- eeepc_hwmon_exit();
- return result;
+
+ return -EINVAL;
}
-static int eeepc_input_init(struct device *dev)
+static int eeepc_input_init(struct eeepc_laptop *eeepc)
{
const struct key_entry *key;
int result;
- ehotk->inputdev = input_allocate_device();
- if (!ehotk->inputdev) {
+ eeepc->inputdev = input_allocate_device();
+ if (!eeepc->inputdev) {
pr_info("Unable to allocate input device\n");
return -ENOMEM;
}
- ehotk->inputdev->name = "Asus EeePC extra buttons";
- ehotk->inputdev->dev.parent = dev;
- ehotk->inputdev->phys = EEEPC_HOTK_FILE "/input0";
- ehotk->inputdev->id.bustype = BUS_HOST;
- ehotk->inputdev->getkeycode = eeepc_getkeycode;
- ehotk->inputdev->setkeycode = eeepc_setkeycode;
-
+ eeepc->inputdev->name = "Asus EeePC extra buttons";
+ eeepc->inputdev->dev.parent = &eeepc->platform_device->dev;
+ eeepc->inputdev->phys = EEEPC_LAPTOP_FILE "/input0";
+ eeepc->inputdev->id.bustype = BUS_HOST;
+ eeepc->inputdev->getkeycode = eeepc_getkeycode;
+ eeepc->inputdev->setkeycode = eeepc_setkeycode;
+ input_set_drvdata(eeepc->inputdev, eeepc);
+
+ eeepc->keymap = kmemdup(eeepc_keymap, sizeof(eeepc_keymap),
+ GFP_KERNEL);
for (key = eeepc_keymap; key->type != KE_END; key++) {
switch (key->type) {
case KE_KEY:
- set_bit(EV_KEY, ehotk->inputdev->evbit);
- set_bit(key->keycode, ehotk->inputdev->keybit);
+ set_bit(EV_KEY, eeepc->inputdev->evbit);
+ set_bit(key->keycode, eeepc->inputdev->keybit);
break;
}
}
- result = input_register_device(ehotk->inputdev);
+ result = input_register_device(eeepc->inputdev);
if (result) {
pr_info("Unable to register input device\n");
- input_free_device(ehotk->inputdev);
+ input_free_device(eeepc->inputdev);
return result;
}
return 0;
}
-static int __devinit eeepc_hotk_add(struct acpi_device *device)
+static void eeepc_input_exit(struct eeepc_laptop *eeepc)
+{
+ if (eeepc->inputdev) {
+ input_unregister_device(eeepc->inputdev);
+ kfree(eeepc->keymap);
+ }
+}
+
+/*
+ * ACPI driver
+ */
+static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct eeepc_laptop *eeepc = acpi_driver_data(device);
+ u16 count;
+
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
+ count = eeepc->event_count[event % 128]++;
+ acpi_bus_generate_proc_event(device, event, count);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event,
+ count);
+
+ /* Brightness events are special */
+ if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX) {
+
+ /* Ignore them completely if the acpi video driver is used */
+ if (eeepc->backlight_device != NULL) {
+ int old_brightness, new_brightness;
+
+ /* Update the backlight device. */
+ old_brightness = eeepc_backlight_notify(eeepc);
+
+ /* Convert event to keypress (obsolescent hack) */
+ new_brightness = event - NOTIFY_BRN_MIN;
+
+ if (new_brightness < old_brightness) {
+ event = NOTIFY_BRN_MIN; /* brightness down */
+ } else if (new_brightness > old_brightness) {
+ event = NOTIFY_BRN_MAX; /* brightness up */
+ } else {
+ /*
+ * no change in brightness - already at min/max,
+ * event will be desired value (or else ignored)
+ */
+ }
+ eeepc_input_notify(eeepc, event);
+ }
+ } else {
+ /* Everything else is a bona-fide keypress event */
+ eeepc_input_notify(eeepc, event);
+ }
+}
+
+static void cmsg_quirk(struct eeepc_laptop *eeepc, int cm, const char *name)
+{
+ int dummy;
+
+ /* Some BIOSes do not report cm although it is avaliable.
+ Check if cm_getv[cm] works and, if yes, assume cm should be set. */
+ if (!(eeepc->cm_supported & (1 << cm))
+ && !read_acpi_int(eeepc->handle, cm_getv[cm], &dummy)) {
+ pr_info("%s (%x) not reported by BIOS,"
+ " enabling anyway\n", name, 1 << cm);
+ eeepc->cm_supported |= 1 << cm;
+ }
+}
+
+static void cmsg_quirks(struct eeepc_laptop *eeepc)
+{
+ cmsg_quirk(eeepc, CM_ASL_LID, "LID");
+ cmsg_quirk(eeepc, CM_ASL_TYPE, "TYPE");
+ cmsg_quirk(eeepc, CM_ASL_PANELPOWER, "PANELPOWER");
+ cmsg_quirk(eeepc, CM_ASL_TPD, "TPD");
+}
+
+static int eeepc_acpi_init(struct eeepc_laptop *eeepc,
+ struct acpi_device *device)
{
- struct device *dev;
+ unsigned int init_flags;
int result;
- if (!device)
- return -EINVAL;
- pr_notice(EEEPC_HOTK_NAME "\n");
- ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
- if (!ehotk)
- return -ENOMEM;
- ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
- ehotk->handle = device->handle;
- strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
- strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
- device->driver_data = ehotk;
- ehotk->device = device;
-
- result = eeepc_hotk_check();
+ result = acpi_bus_get_status(device);
if (result)
- goto fail_platform_driver;
- eeepc_enable_camera();
+ return result;
+ if (!device->status.present) {
+ pr_err("Hotkey device not present, aborting\n");
+ return -ENODEV;
+ }
- /* Register platform stuff */
- result = platform_driver_register(&platform_driver);
- if (result)
- goto fail_platform_driver;
- platform_device = platform_device_alloc(EEEPC_HOTK_FILE, -1);
- if (!platform_device) {
- result = -ENOMEM;
- goto fail_platform_device1;
+ init_flags = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
+ pr_notice("Hotkey init flags 0x%x\n", init_flags);
+
+ if (write_acpi_int(eeepc->handle, "INIT", init_flags)) {
+ pr_err("Hotkey initialization failed\n");
+ return -ENODEV;
}
- result = platform_device_add(platform_device);
- if (result)
- goto fail_platform_device2;
- result = sysfs_create_group(&platform_device->dev.kobj,
- &platform_attribute_group);
+
+ /* get control methods supported */
+ if (read_acpi_int(eeepc->handle, "CMSG", &eeepc->cm_supported)) {
+ pr_err("Get control methods supported failed\n");
+ return -ENODEV;
+ }
+ cmsg_quirks(eeepc);
+ pr_info("Get control methods supported: 0x%x\n", eeepc->cm_supported);
+
+ return 0;
+}
+
+static void __devinit eeepc_enable_camera(struct eeepc_laptop *eeepc)
+{
+ /*
+ * If the following call to set_acpi() fails, it's because there's no
+ * camera so we can ignore the error.
+ */
+ if (get_acpi(eeepc, CM_ASL_CAMERA) == 0)
+ set_acpi(eeepc, CM_ASL_CAMERA, 1);
+}
+
+static bool eeepc_device_present;
+
+static int __devinit eeepc_acpi_add(struct acpi_device *device)
+{
+ struct eeepc_laptop *eeepc;
+ int result;
+
+ pr_notice(EEEPC_LAPTOP_NAME "\n");
+ eeepc = kzalloc(sizeof(struct eeepc_laptop), GFP_KERNEL);
+ if (!eeepc)
+ return -ENOMEM;
+ eeepc->handle = device->handle;
+ strcpy(acpi_device_name(device), EEEPC_ACPI_DEVICE_NAME);
+ strcpy(acpi_device_class(device), EEEPC_ACPI_CLASS);
+ device->driver_data = eeepc;
+
+ result = eeepc_acpi_init(eeepc, device);
if (result)
- goto fail_sysfs;
+ goto fail_platform;
+ eeepc_enable_camera(eeepc);
- dev = &platform_device->dev;
+ /*
+ * Register the platform device first. It is used as a parent for the
+ * sub-devices below.
+ *
+ * Note that if there are multiple instances of this ACPI device it
+ * will bail out, because the platform device is registered with a
+ * fixed name. Of course it doesn't make sense to have more than one,
+ * and machine-specific scripts find the fixed name convenient. But
+ * It's also good for us to exclude multiple instances because both
+ * our hwmon and our wlan rfkill subdevice use global ACPI objects
+ * (the EC and the wlan PCI slot respectively).
+ */
+ result = eeepc_platform_init(eeepc);
+ if (result)
+ goto fail_platform;
if (!acpi_video_backlight_support()) {
- result = eeepc_backlight_init(dev);
+ result = eeepc_backlight_init(eeepc);
if (result)
goto fail_backlight;
} else
- pr_info("Backlight controlled by ACPI video "
- "driver\n");
+ pr_info("Backlight controlled by ACPI video driver\n");
- result = eeepc_input_init(dev);
+ result = eeepc_input_init(eeepc);
if (result)
goto fail_input;
- result = eeepc_hwmon_init(dev);
+ result = eeepc_hwmon_init(eeepc);
if (result)
goto fail_hwmon;
- result = eeepc_rfkill_init(dev);
+ result = eeepc_led_init(eeepc);
+ if (result)
+ goto fail_led;
+
+ result = eeepc_rfkill_init(eeepc);
if (result)
goto fail_rfkill;
+ eeepc_device_present = true;
return 0;
fail_rfkill:
- eeepc_hwmon_exit();
+ eeepc_led_exit(eeepc);
+fail_led:
+ eeepc_hwmon_exit(eeepc);
fail_hwmon:
- eeepc_input_exit();
+ eeepc_input_exit(eeepc);
fail_input:
- eeepc_backlight_exit();
+ eeepc_backlight_exit(eeepc);
fail_backlight:
- sysfs_remove_group(&platform_device->dev.kobj,
- &platform_attribute_group);
-fail_sysfs:
- platform_device_del(platform_device);
-fail_platform_device2:
- platform_device_put(platform_device);
-fail_platform_device1:
- platform_driver_unregister(&platform_driver);
-fail_platform_driver:
- kfree(ehotk);
+ eeepc_platform_exit(eeepc);
+fail_platform:
+ kfree(eeepc);
return result;
}
-static int eeepc_hotk_remove(struct acpi_device *device, int type)
+static int eeepc_acpi_remove(struct acpi_device *device, int type)
{
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
+ struct eeepc_laptop *eeepc = acpi_driver_data(device);
- eeepc_backlight_exit();
- eeepc_rfkill_exit();
- eeepc_input_exit();
- eeepc_hwmon_exit();
- sysfs_remove_group(&platform_device->dev.kobj,
- &platform_attribute_group);
- platform_device_unregister(platform_device);
- platform_driver_unregister(&platform_driver);
+ eeepc_backlight_exit(eeepc);
+ eeepc_rfkill_exit(eeepc);
+ eeepc_input_exit(eeepc);
+ eeepc_hwmon_exit(eeepc);
+ eeepc_led_exit(eeepc);
+ eeepc_platform_exit(eeepc);
- kfree(ehotk);
+ kfree(eeepc);
return 0;
}
+
+static const struct acpi_device_id eeepc_device_ids[] = {
+ {EEEPC_ACPI_HID, 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
+
+static struct acpi_driver eeepc_acpi_driver = {
+ .name = EEEPC_LAPTOP_NAME,
+ .class = EEEPC_ACPI_CLASS,
+ .owner = THIS_MODULE,
+ .ids = eeepc_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = eeepc_acpi_add,
+ .remove = eeepc_acpi_remove,
+ .notify = eeepc_acpi_notify,
+ },
+};
+
+
static int __init eeepc_laptop_init(void)
{
int result;
- if (acpi_disabled)
- return -ENODEV;
- result = acpi_bus_register_driver(&eeepc_hotk_driver);
+ result = platform_driver_register(&platform_driver);
if (result < 0)
return result;
- if (!ehotk) {
- acpi_bus_unregister_driver(&eeepc_hotk_driver);
- return -ENODEV;
+
+ result = acpi_bus_register_driver(&eeepc_acpi_driver);
+ if (result < 0)
+ goto fail_acpi_driver;
+ if (!eeepc_device_present) {
+ result = -ENODEV;
+ goto fail_no_device;
}
return 0;
+
+fail_no_device:
+ acpi_bus_unregister_driver(&eeepc_acpi_driver);
+fail_acpi_driver:
+ platform_driver_unregister(&platform_driver);
+ return result;
}
static void __exit eeepc_laptop_exit(void)
{
- acpi_bus_unregister_driver(&eeepc_hotk_driver);
+ acpi_bus_unregister_driver(&eeepc_acpi_driver);
+ platform_driver_unregister(&platform_driver);
}
module_init(eeepc_laptop_init);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index bcd4ba8be7d..b66029bd75d 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -164,7 +164,7 @@ struct fujitsu_hotkey_t {
struct input_dev *input;
char phys[32];
struct platform_device *pf_device;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
int rfkill_supported;
int rfkill_state;
@@ -824,12 +824,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
/* kfifo */
spin_lock_init(&fujitsu_hotkey->fifo_lock);
- fujitsu_hotkey->fifo =
- kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL,
- &fujitsu_hotkey->fifo_lock);
- if (IS_ERR(fujitsu_hotkey->fifo)) {
+ error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
+ GFP_KERNEL);
+ if (error) {
printk(KERN_ERR "kfifo_alloc failed\n");
- error = PTR_ERR(fujitsu_hotkey->fifo);
goto err_stop;
}
@@ -934,7 +932,7 @@ err_unregister_input_dev:
err_free_input_dev:
input_free_device(input);
err_free_fifo:
- kfifo_free(fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_hotkey->fifo);
err_stop:
return result;
}
@@ -956,7 +954,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type)
input_free_device(input);
- kfifo_free(fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_hotkey->fifo);
fujitsu_hotkey->acpi_handle = NULL;
@@ -1008,9 +1006,10 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
vdbg_printk(FUJLAPTOP_DBG_TRACE,
"Push keycode into ringbuffer [%d]\n",
keycode);
- status = kfifo_put(fujitsu_hotkey->fifo,
+ status = kfifo_in_locked(&fujitsu_hotkey->fifo,
(unsigned char *)&keycode,
- sizeof(keycode));
+ sizeof(keycode),
+ &fujitsu_hotkey->fifo_lock);
if (status != sizeof(keycode)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
"Could not push keycode [0x%x]\n",
@@ -1021,11 +1020,12 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
}
} else if (keycode == 0) {
while ((status =
- kfifo_get
- (fujitsu_hotkey->fifo, (unsigned char *)
- &keycode_r,
- sizeof
- (keycode_r))) == sizeof(keycode_r)) {
+ kfifo_out_locked(
+ &fujitsu_hotkey->fifo,
+ (unsigned char *) &keycode_r,
+ sizeof(keycode_r),
+ &fujitsu_hotkey->fifo_lock))
+ == sizeof(keycode_r)) {
input_report_key(input, keycode_r, 0);
input_sync(input);
vdbg_printk(FUJLAPTOP_DBG_TRACE,
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index c2842171cec..63c3e658a88 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -51,6 +51,12 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_HOTKEY_QUERY 0xc
+enum hp_wmi_radio {
+ HPWMI_WIFI = 0,
+ HPWMI_BLUETOOTH = 1,
+ HPWMI_WWAN = 2,
+};
+
static int __init hp_wmi_bios_setup(struct platform_device *device);
static int __exit hp_wmi_bios_remove(struct platform_device *device);
static int hp_wmi_resume_handler(struct device *device);
@@ -94,7 +100,7 @@ static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
-static struct dev_pm_ops hp_wmi_pm_ops = {
+static const struct dev_pm_ops hp_wmi_pm_ops = {
.resume = hp_wmi_resume_handler,
.restore = hp_wmi_resume_handler,
};
@@ -175,8 +181,8 @@ static int hp_wmi_tablet_state(void)
static int hp_wmi_set_block(void *data, bool blocked)
{
- unsigned long b = (unsigned long) data;
- int query = BIT(b + 8) | ((!blocked) << b);
+ enum hp_wmi_radio r = (enum hp_wmi_radio) data;
+ int query = BIT(r + 8) | ((!blocked) << r);
return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query);
}
@@ -185,31 +191,23 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
.set_block = hp_wmi_set_block,
};
-static bool hp_wmi_wifi_state(void)
-{
- int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
-
- if (wireless & 0x100)
- return false;
- else
- return true;
-}
-
-static bool hp_wmi_bluetooth_state(void)
+static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
{
int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
+ int mask = 0x200 << (r * 8);
- if (wireless & 0x10000)
+ if (wireless & mask)
return false;
else
return true;
}
-static bool hp_wmi_wwan_state(void)
+static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
{
int wireless = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 0);
+ int mask = 0x800 << (r * 8);
- if (wireless & 0x1000000)
+ if (wireless & mask)
return false;
else
return true;
@@ -334,49 +332,55 @@ static void hp_wmi_notify(u32 value, void *context)
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
+ int eventcode;
wmi_get_event_data(value, &response);
obj = (union acpi_object *)response.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == 8) {
- int eventcode = *((u8 *) obj->buffer.pointer);
- if (eventcode == 0x4)
- eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
- 0);
- key = hp_wmi_get_entry_by_scancode(eventcode);
- if (key) {
- switch (key->type) {
- case KE_KEY:
- input_report_key(hp_wmi_input_dev,
- key->keycode, 1);
- input_sync(hp_wmi_input_dev);
- input_report_key(hp_wmi_input_dev,
- key->keycode, 0);
- input_sync(hp_wmi_input_dev);
- break;
- }
- } else if (eventcode == 0x1) {
- input_report_switch(hp_wmi_input_dev, SW_DOCK,
- hp_wmi_dock_state());
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
- hp_wmi_tablet_state());
+ if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length != 8) {
+ printk(KERN_INFO "HP WMI: Unknown response received\n");
+ return;
+ }
+
+ eventcode = *((u8 *) obj->buffer.pointer);
+ if (eventcode == 0x4)
+ eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
+ 0);
+ key = hp_wmi_get_entry_by_scancode(eventcode);
+ if (key) {
+ switch (key->type) {
+ case KE_KEY:
+ input_report_key(hp_wmi_input_dev,
+ key->keycode, 1);
+ input_sync(hp_wmi_input_dev);
+ input_report_key(hp_wmi_input_dev,
+ key->keycode, 0);
input_sync(hp_wmi_input_dev);
- } else if (eventcode == 0x5) {
- if (wifi_rfkill)
- rfkill_set_sw_state(wifi_rfkill,
- hp_wmi_wifi_state());
- if (bluetooth_rfkill)
- rfkill_set_sw_state(bluetooth_rfkill,
- hp_wmi_bluetooth_state());
- if (wwan_rfkill)
- rfkill_set_sw_state(wwan_rfkill,
- hp_wmi_wwan_state());
- } else
- printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
- eventcode);
+ break;
+ }
+ } else if (eventcode == 0x1) {
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
+ } else if (eventcode == 0x5) {
+ if (wifi_rfkill)
+ rfkill_set_states(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI),
+ hp_wmi_get_hw_state(HPWMI_WIFI));
+ if (bluetooth_rfkill)
+ rfkill_set_states(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+ if (wwan_rfkill)
+ rfkill_set_states(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN),
+ hp_wmi_get_hw_state(HPWMI_WWAN));
} else
- printk(KERN_INFO "HP WMI: Unknown response received\n");
+ printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
+ eventcode);
}
static int __init hp_wmi_input_setup(void)
@@ -455,7 +459,11 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
RFKILL_TYPE_WLAN,
&hp_wmi_rfkill_ops,
- (void *) 0);
+ (void *) HPWMI_WIFI);
+ rfkill_init_sw_state(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI));
+ rfkill_set_hw_state(wifi_rfkill,
+ hp_wmi_get_hw_state(HPWMI_WIFI));
err = rfkill_register(wifi_rfkill);
if (err)
goto register_wifi_error;
@@ -465,7 +473,11 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev,
RFKILL_TYPE_BLUETOOTH,
&hp_wmi_rfkill_ops,
- (void *) 1);
+ (void *) HPWMI_BLUETOOTH);
+ rfkill_init_sw_state(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
+ rfkill_set_hw_state(bluetooth_rfkill,
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
err = rfkill_register(bluetooth_rfkill);
if (err)
goto register_bluetooth_error;
@@ -475,7 +487,11 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev,
RFKILL_TYPE_WWAN,
&hp_wmi_rfkill_ops,
- (void *) 2);
+ (void *) HPWMI_WWAN);
+ rfkill_init_sw_state(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN));
+ rfkill_set_hw_state(wwan_rfkill,
+ hp_wmi_get_hw_state(HPWMI_WWAN));
err = rfkill_register(wwan_rfkill);
if (err)
goto register_wwan_err;
@@ -533,6 +549,19 @@ static int hp_wmi_resume_handler(struct device *device)
input_sync(hp_wmi_input_dev);
}
+ if (wifi_rfkill)
+ rfkill_set_states(wifi_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WIFI),
+ hp_wmi_get_hw_state(HPWMI_WIFI));
+ if (bluetooth_rfkill)
+ rfkill_set_states(bluetooth_rfkill,
+ hp_wmi_get_sw_state(HPWMI_BLUETOOTH),
+ hp_wmi_get_hw_state(HPWMI_BLUETOOTH));
+ if (wwan_rfkill)
+ rfkill_set_states(wwan_rfkill,
+ hp_wmi_get_sw_state(HPWMI_WWAN),
+ hp_wmi_get_hw_state(HPWMI_WWAN));
+
return 0;
}
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
new file mode 100644
index 00000000000..0c8fe145c4a
--- /dev/null
+++ b/drivers/platform/x86/msi-wmi.c
@@ -0,0 +1,293 @@
+/*
+ * MSI WMI hotkeys
+ *
+ * Copyright (C) 2009 Novell <trenn@suse.de>
+ *
+ * Most stuff taken over from hp-wmi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+
+MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
+MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver");
+MODULE_LICENSE("GPL");
+
+MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
+MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
+
+/* Temporary workaround until the WMI sysfs interface goes in
+ { "svn", DMI_SYS_VENDOR },
+ { "pn", DMI_PRODUCT_NAME },
+ { "pvr", DMI_PRODUCT_VERSION },
+ { "rvn", DMI_BOARD_VENDOR },
+ { "rn", DMI_BOARD_NAME },
+*/
+
+MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-6638:*");
+
+#define DRV_NAME "msi-wmi"
+#define DRV_PFX DRV_NAME ": "
+
+#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
+#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
+
+#define dprintk(msg...) pr_debug(DRV_PFX msg)
+
+#define KEYCODE_BASE 0xD0
+#define MSI_WMI_BRIGHTNESSUP KEYCODE_BASE
+#define MSI_WMI_BRIGHTNESSDOWN (KEYCODE_BASE + 1)
+#define MSI_WMI_VOLUMEUP (KEYCODE_BASE + 2)
+#define MSI_WMI_VOLUMEDOWN (KEYCODE_BASE + 3)
+static struct key_entry msi_wmi_keymap[] = {
+ { KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
+ { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
+ { KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} },
+ { KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
+ { KE_END, 0}
+};
+static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1];
+
+struct backlight_device *backlight;
+
+static int backlight_map[] = { 0x00, 0x33, 0x66, 0x99, 0xCC, 0xFF };
+
+static struct input_dev *msi_wmi_input_dev;
+
+static int msi_wmi_query_block(int instance, int *ret)
+{
+ acpi_status status;
+ union acpi_object *obj;
+
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ status = wmi_query_block(MSIWMI_BIOS_GUID, instance, &output);
+
+ obj = output.pointer;
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER) {
+ if (obj) {
+ printk(KERN_ERR DRV_PFX "query block returned object "
+ "type: %d - buffer length:%d\n", obj->type,
+ obj->type == ACPI_TYPE_BUFFER ?
+ obj->buffer.length : 0);
+ }
+ kfree(obj);
+ return -EINVAL;
+ }
+ *ret = obj->integer.value;
+ kfree(obj);
+ return 0;
+}
+
+static int msi_wmi_set_block(int instance, int value)
+{
+ acpi_status status;
+
+ struct acpi_buffer input = { sizeof(int), &value };
+
+ dprintk("Going to set block of instance: %d - value: %d\n",
+ instance, value);
+
+ status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input);
+
+ return ACPI_SUCCESS(status) ? 0 : 1;
+}
+
+static int bl_get(struct backlight_device *bd)
+{
+ int level, err, ret;
+
+ /* Instance 1 is "get backlight", cmp with DSDT */
+ err = msi_wmi_query_block(1, &ret);
+ if (err) {
+ printk(KERN_ERR DRV_PFX "Could not query backlight: %d\n", err);
+ return -EINVAL;
+ }
+ dprintk("Get: Query block returned: %d\n", ret);
+ for (level = 0; level < ARRAY_SIZE(backlight_map); level++) {
+ if (backlight_map[level] == ret) {
+ dprintk("Current backlight level: 0x%X - index: %d\n",
+ backlight_map[level], level);
+ break;
+ }
+ }
+ if (level == ARRAY_SIZE(backlight_map)) {
+ printk(KERN_ERR DRV_PFX "get: Invalid brightness value: 0x%X\n",
+ ret);
+ return -EINVAL;
+ }
+ return level;
+}
+
+static int bl_set_status(struct backlight_device *bd)
+{
+ int bright = bd->props.brightness;
+ if (bright >= ARRAY_SIZE(backlight_map) || bright < 0)
+ return -EINVAL;
+
+ /* Instance 0 is "set backlight" */
+ return msi_wmi_set_block(0, backlight_map[bright]);
+}
+
+static struct backlight_ops msi_backlight_ops = {
+ .get_brightness = bl_get,
+ .update_status = bl_set_status,
+};
+
+static void msi_wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ static struct key_entry *key;
+ union acpi_object *obj;
+ ktime_t cur;
+
+ wmi_get_event_data(value, &response);
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (obj && obj->type == ACPI_TYPE_INTEGER) {
+ int eventcode = obj->integer.value;
+ dprintk("Eventcode: 0x%x\n", eventcode);
+ key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
+ eventcode);
+ if (key) {
+ ktime_t diff;
+ cur = ktime_get_real();
+ diff = ktime_sub(cur, last_pressed[key->code -
+ KEYCODE_BASE]);
+ /* Ignore event if the same event happened in a 50 ms
+ timeframe -> Key press may result in 10-20 GPEs */
+ if (ktime_to_us(diff) < 1000 * 50) {
+ dprintk("Suppressed key event 0x%X - "
+ "Last press was %lld us ago\n",
+ key->code, ktime_to_us(diff));
+ return;
+ }
+ last_pressed[key->code - KEYCODE_BASE] = cur;
+
+ if (key->type == KE_KEY &&
+ /* Brightness is served via acpi video driver */
+ (!acpi_video_backlight_support() ||
+ (key->code != MSI_WMI_BRIGHTNESSUP &&
+ key->code != MSI_WMI_BRIGHTNESSDOWN))) {
+ dprintk("Send key: 0x%X - "
+ "Input layer keycode: %d\n", key->code,
+ key->keycode);
+ sparse_keymap_report_entry(msi_wmi_input_dev,
+ key, 1, true);
+ }
+ } else
+ printk(KERN_INFO "Unknown key pressed - %x\n",
+ eventcode);
+ } else
+ printk(KERN_INFO DRV_PFX "Unknown event received\n");
+ kfree(response.pointer);
+}
+
+static int __init msi_wmi_input_setup(void)
+{
+ int err;
+
+ msi_wmi_input_dev = input_allocate_device();
+ if (!msi_wmi_input_dev)
+ return -ENOMEM;
+
+ msi_wmi_input_dev->name = "MSI WMI hotkeys";
+ msi_wmi_input_dev->phys = "wmi/input0";
+ msi_wmi_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(msi_wmi_input_dev, msi_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(msi_wmi_input_dev);
+
+ if (err)
+ goto err_free_keymap;
+
+ memset(last_pressed, 0, sizeof(last_pressed));
+
+ return 0;
+
+err_free_keymap:
+ sparse_keymap_free(msi_wmi_input_dev);
+err_free_dev:
+ input_free_device(msi_wmi_input_dev);
+ return err;
+}
+
+static int __init msi_wmi_init(void)
+{
+ int err;
+
+ if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
+ printk(KERN_ERR
+ "This machine doesn't have MSI-hotkeys through WMI\n");
+ return -ENODEV;
+ }
+ err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
+ msi_wmi_notify, NULL);
+ if (err)
+ return -EINVAL;
+
+ err = msi_wmi_input_setup();
+ if (err)
+ goto err_uninstall_notifier;
+
+ if (!acpi_video_backlight_support()) {
+ backlight = backlight_device_register(DRV_NAME,
+ NULL, NULL, &msi_backlight_ops);
+ if (IS_ERR(backlight))
+ goto err_free_input;
+
+ backlight->props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
+ err = bl_get(NULL);
+ if (err < 0)
+ goto err_free_backlight;
+
+ backlight->props.brightness = err;
+ }
+ dprintk("Event handler installed\n");
+
+ return 0;
+
+err_free_backlight:
+ backlight_device_unregister(backlight);
+err_free_input:
+ input_unregister_device(msi_wmi_input_dev);
+err_uninstall_notifier:
+ wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+ return err;
+}
+
+static void __exit msi_wmi_exit(void)
+{
+ if (wmi_has_guid(MSIWMI_EVENT_GUID)) {
+ wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+ sparse_keymap_free(msi_wmi_input_dev);
+ input_unregister_device(msi_wmi_input_dev);
+ backlight_device_unregister(backlight);
+ }
+}
+
+module_init(msi_wmi_init);
+module_exit(msi_wmi_exit);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 7a2cc8a5c97..2896ca4cd9a 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -142,7 +142,7 @@ struct sony_laptop_input_s {
atomic_t users;
struct input_dev *jog_dev;
struct input_dev *key_dev;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
struct workqueue_struct *wq;
};
@@ -300,8 +300,9 @@ static void do_sony_laptop_release_key(struct work_struct *work)
{
struct sony_laptop_keypress kp;
- while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp,
- sizeof(kp)) == sizeof(kp)) {
+ while (kfifo_out_locked(&sony_laptop_input.fifo, (unsigned char *)&kp,
+ sizeof(kp), &sony_laptop_input.fifo_lock)
+ == sizeof(kp)) {
msleep(10);
input_report_key(kp.dev, kp.key, 0);
input_sync(kp.dev);
@@ -362,8 +363,9 @@ static void sony_laptop_report_input_event(u8 event)
/* we emit the scancode so we can always remap the key */
input_event(kp.dev, EV_MSC, MSC_SCAN, event);
input_sync(kp.dev);
- kfifo_put(sony_laptop_input.fifo,
- (unsigned char *)&kp, sizeof(kp));
+ kfifo_in_locked(&sony_laptop_input.fifo,
+ (unsigned char *)&kp, sizeof(kp),
+ &sony_laptop_input.fifo_lock);
if (!work_pending(&sony_laptop_release_key_work))
queue_work(sony_laptop_input.wq,
@@ -385,12 +387,10 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
/* kfifo */
spin_lock_init(&sony_laptop_input.fifo_lock);
- sony_laptop_input.fifo =
- kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
- &sony_laptop_input.fifo_lock);
- if (IS_ERR(sony_laptop_input.fifo)) {
+ error =
+ kfifo_alloc(&sony_laptop_input.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
+ if (error) {
printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
- error = PTR_ERR(sony_laptop_input.fifo);
goto err_dec_users;
}
@@ -474,7 +474,7 @@ err_destroy_wq:
destroy_workqueue(sony_laptop_input.wq);
err_free_kfifo:
- kfifo_free(sony_laptop_input.fifo);
+ kfifo_free(&sony_laptop_input.fifo);
err_dec_users:
atomic_dec(&sony_laptop_input.users);
@@ -500,7 +500,7 @@ static void sony_laptop_remove_input(void)
}
destroy_workqueue(sony_laptop_input.wq);
- kfifo_free(sony_laptop_input.fifo);
+ kfifo_free(&sony_laptop_input.fifo);
}
/*********** Platform Device ***********/
@@ -2079,7 +2079,7 @@ static struct attribute_group spic_attribute_group = {
struct sonypi_compat_s {
struct fasync_struct *fifo_async;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
wait_queue_head_t fifo_proc_list;
atomic_t open_count;
@@ -2104,12 +2104,12 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
/* Flush input queue on first open */
unsigned long flags;
- spin_lock_irqsave(sonypi_compat.fifo->lock, flags);
+ spin_lock_irqsave(&sonypi_compat.fifo_lock, flags);
if (atomic_inc_return(&sonypi_compat.open_count) == 1)
- __kfifo_reset(sonypi_compat.fifo);
+ kfifo_reset(&sonypi_compat.fifo);
- spin_unlock_irqrestore(sonypi_compat.fifo->lock, flags);
+ spin_unlock_irqrestore(&sonypi_compat.fifo_lock, flags);
return 0;
}
@@ -2120,17 +2120,18 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
ssize_t ret;
unsigned char c;
- if ((kfifo_len(sonypi_compat.fifo) == 0) &&
+ if ((kfifo_len(&sonypi_compat.fifo) == 0) &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
ret = wait_event_interruptible(sonypi_compat.fifo_proc_list,
- kfifo_len(sonypi_compat.fifo) != 0);
+ kfifo_len(&sonypi_compat.fifo) != 0);
if (ret)
return ret;
while (ret < count &&
- (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) {
+ (kfifo_out_locked(&sonypi_compat.fifo, &c, sizeof(c),
+ &sonypi_compat.fifo_lock) == sizeof(c))) {
if (put_user(c, buf++))
return -EFAULT;
ret++;
@@ -2147,7 +2148,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
- if (kfifo_len(sonypi_compat.fifo))
+ if (kfifo_len(&sonypi_compat.fifo))
return POLLIN | POLLRDNORM;
return 0;
}
@@ -2309,7 +2310,8 @@ static struct miscdevice sonypi_misc_device = {
static void sonypi_compat_report_event(u8 event)
{
- kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event));
+ kfifo_in_locked(&sonypi_compat.fifo, (unsigned char *)&event,
+ sizeof(event), &sonypi_compat.fifo_lock);
kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_compat.fifo_proc_list);
}
@@ -2319,11 +2321,11 @@ static int sonypi_compat_init(void)
int error;
spin_lock_init(&sonypi_compat.fifo_lock);
- sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
- &sonypi_compat.fifo_lock);
- if (IS_ERR(sonypi_compat.fifo)) {
+ error =
+ kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
+ if (error) {
printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
- return PTR_ERR(sonypi_compat.fifo);
+ return error;
}
init_waitqueue_head(&sonypi_compat.fifo_proc_list);
@@ -2342,14 +2344,14 @@ static int sonypi_compat_init(void)
return 0;
err_free_kfifo:
- kfifo_free(sonypi_compat.fifo);
+ kfifo_free(&sonypi_compat.fifo);
return error;
}
static void sonypi_compat_exit(void)
{
misc_deregister(&sonypi_misc_device);
- kfifo_free(sonypi_compat.fifo);
+ kfifo_free(&sonypi_compat.fifo);
}
#else
static int sonypi_compat_init(void) { return 0; }
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0ed84806f8a..448c8aeb166 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -21,8 +21,8 @@
* 02110-1301, USA.
*/
-#define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020500
+#define TPACPI_VERSION "0.24"
+#define TPACPI_SYSFS_VERSION 0x020700
/*
* Changelog:
@@ -61,6 +61,7 @@
#include <linux/nvram.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/sysfs.h>
#include <linux/backlight.h>
#include <linux/fb.h>
@@ -76,6 +77,10 @@
#include <linux/jiffies.h>
#include <linux/workqueue.h>
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+
#include <acpi/acpi_drivers.h>
#include <linux/pci_ids.h>
@@ -231,6 +236,7 @@ enum tpacpi_hkey_event_t {
#define TPACPI_DBG_HKEY 0x0008
#define TPACPI_DBG_FAN 0x0010
#define TPACPI_DBG_BRGHT 0x0020
+#define TPACPI_DBG_MIXER 0x0040
#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
@@ -256,7 +262,7 @@ struct tp_acpi_drv_struct {
struct ibm_struct {
char *name;
- int (*read) (char *);
+ int (*read) (struct seq_file *);
int (*write) (char *);
void (*exit) (void);
void (*resume) (void);
@@ -298,6 +304,7 @@ static struct {
u32 fan_ctrl_status_undef:1;
u32 second_fan:1;
u32 beep_needs_two_args:1;
+ u32 mixer_no_level_control:1;
u32 input_device_registered:1;
u32 platform_drv_registered:1;
u32 platform_drv_attrs_registered:1;
@@ -309,6 +316,7 @@ static struct {
static struct {
u16 hotkey_mask_ff:1;
+ u16 volume_ctrl_forbidden:1;
} tp_warned;
struct thinkpad_id_data {
@@ -425,6 +433,12 @@ static void tpacpi_log_usertask(const char * const what)
.ec = TPACPI_MATCH_ANY, \
.quirks = (__quirk) }
+#define TPACPI_QEC_LNV(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = (__quirk) }
+
struct tpacpi_quirk {
unsigned int vendor;
u16 bios;
@@ -776,36 +790,25 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
****************************************************************************
****************************************************************************/
-static int dispatch_procfs_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int dispatch_proc_show(struct seq_file *m, void *v)
{
- struct ibm_struct *ibm = data;
- int len;
+ struct ibm_struct *ibm = m->private;
if (!ibm || !ibm->read)
return -EINVAL;
+ return ibm->read(m);
+}
- len = ibm->read(page);
- if (len < 0)
- return len;
-
- if (len <= off + count)
- *eof = 1;
- *start = page + off;
- len -= off;
- if (len > count)
- len = count;
- if (len < 0)
- len = 0;
-
- return len;
+static int dispatch_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dispatch_proc_show, PDE(inode)->data);
}
-static int dispatch_procfs_write(struct file *file,
+static ssize_t dispatch_proc_write(struct file *file,
const char __user *userbuf,
- unsigned long count, void *data)
+ size_t count, loff_t *pos)
{
- struct ibm_struct *ibm = data;
+ struct ibm_struct *ibm = PDE(file->f_path.dentry->d_inode)->data;
char *kernbuf;
int ret;
@@ -834,6 +837,15 @@ static int dispatch_procfs_write(struct file *file,
return ret;
}
+static const struct file_operations dispatch_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = dispatch_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dispatch_proc_write,
+};
+
static char *next_cmd(char **cmds)
{
char *start = *cmds;
@@ -1006,11 +1018,8 @@ static int parse_strtoul(const char *buf,
{
char *endp;
- while (*buf && isspace(*buf))
- buf++;
- *value = simple_strtoul(buf, &endp, 0);
- while (*endp && isspace(*endp))
- endp++;
+ *value = simple_strtoul(skip_spaces(buf), &endp, 0);
+ endp = skip_spaces(endp);
if (*endp || *value > max)
return -EINVAL;
@@ -1264,6 +1273,7 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
struct tpacpi_rfk *atp_rfk;
int res;
bool sw_state = false;
+ bool hw_state;
int sw_status;
BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
@@ -1298,7 +1308,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
}
}
- rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
+ hw_state = tpacpi_rfk_check_hwblock_state();
+ rfkill_set_hw_state(atp_rfk->rfkill, hw_state);
res = rfkill_register(atp_rfk->rfkill);
if (res < 0) {
@@ -1311,6 +1322,9 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
}
tpacpi_rfkill_switches[id] = atp_rfk;
+
+ printk(TPACPI_INFO "rfkill switch %s: radio is %sblocked\n",
+ name, (sw_state || hw_state) ? "" : "un");
return 0;
}
@@ -1383,12 +1397,10 @@ static ssize_t tpacpi_rfk_sysfs_enable_store(const enum tpacpi_rfk_id id,
}
/* procfs -------------------------------------------------------------- */
-static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
+static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, struct seq_file *m)
{
- int len = 0;
-
if (id >= TPACPI_RFK_SW_MAX)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
int status;
@@ -1402,13 +1414,13 @@ static int tpacpi_rfk_procfs_read(const enum tpacpi_rfk_id id, char *p)
return status;
}
- len += sprintf(p + len, "status:\t\t%s\n",
+ seq_printf(m, "status:\t\t%s\n",
(status == TPACPI_RFK_RADIO_ON) ?
"enabled" : "disabled");
- len += sprintf(p + len, "commands:\tenable, disable\n");
+ seq_printf(m, "commands:\tenable, disable\n");
}
- return len;
+ return 0;
}
static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
@@ -1779,7 +1791,7 @@ static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
TPV_QL1('7', '9', 'E', '3', '5', '0'), /* T60/p */
TPV_QL1('7', 'C', 'D', '2', '2', '2'), /* R60, R60i */
- TPV_QL0('7', 'E', 'D', '0'), /* R60e, R60i */
+ TPV_QL1('7', 'E', 'D', '0', '1', '5'), /* R60e, R60i */
/* BIOS FW BIOS VERS EC FW EC VERS */
TPV_QI2('1', 'W', '9', '0', '1', 'V', '2', '8'), /* R50e (1) */
@@ -1795,8 +1807,8 @@ static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
TPV_QI1('7', '4', '6', '4', '2', '7'), /* X41 (0) */
TPV_QI1('7', '5', '6', '0', '2', '0'), /* X41t (0) */
- TPV_QL0('7', 'B', 'D', '7'), /* X60/s */
- TPV_QL0('7', 'J', '3', '0'), /* X60t */
+ TPV_QL1('7', 'B', 'D', '7', '4', '0'), /* X60/s */
+ TPV_QL1('7', 'J', '3', '0', '1', '3'), /* X60t */
/* (0) - older versions lack DMI EC fw string and functionality */
/* (1) - older versions known to lack functionality */
@@ -1886,14 +1898,11 @@ static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
return 0;
}
-static int thinkpad_acpi_driver_read(char *p)
+static int thinkpad_acpi_driver_read(struct seq_file *m)
{
- int len = 0;
-
- len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
- len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
-
- return len;
+ seq_printf(m, "driver:\t\t%s\n", TPACPI_DESC);
+ seq_printf(m, "version:\t%s\n", TPACPI_VERSION);
+ return 0;
}
static struct ibm_struct thinkpad_acpi_driver_data = {
@@ -2189,7 +2198,8 @@ static int hotkey_mask_set(u32 mask)
fwmask, hotkey_acpi_mask);
}
- hotkey_mask_warn_incomplete_mask();
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING)
+ hotkey_mask_warn_incomplete_mask();
return rc;
}
@@ -3185,6 +3195,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
int res, i;
int status;
int hkeyv;
+ bool radiosw_state = false;
+ bool tabletsw_state = false;
unsigned long quirks;
@@ -3290,6 +3302,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
if (dbg_wlswemul) {
tp_features.hotkey_wlsw = 1;
+ radiosw_state = !!tpacpi_wlsw_emulstate;
printk(TPACPI_INFO
"radio switch emulation enabled\n");
} else
@@ -3297,6 +3310,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Not all thinkpads have a hardware radio switch */
if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
+ radiosw_state = !!status;
printk(TPACPI_INFO
"radio switch found; radios are %s\n",
enabled(status, 0));
@@ -3308,11 +3322,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* For X41t, X60t, X61t Tablets... */
if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
tp_features.hotkey_tablet = 1;
+ tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
printk(TPACPI_INFO
"possible tablet mode switch found; "
"ThinkPad in %s mode\n",
- (status & TP_HOTKEY_TABLET_MASK)?
- "tablet" : "laptop");
+ (tabletsw_state) ? "tablet" : "laptop");
res = add_to_attr_set(hotkey_dev_attributes,
&dev_attr_hotkey_tablet_mode.attr);
}
@@ -3347,16 +3361,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
TPACPI_HOTKEY_MAP_SIZE);
}
- set_bit(EV_KEY, tpacpi_inputdev->evbit);
- set_bit(EV_MSC, tpacpi_inputdev->evbit);
- set_bit(MSC_SCAN, tpacpi_inputdev->mscbit);
+ input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
tpacpi_inputdev->keycodemax = TPACPI_HOTKEY_MAP_LEN;
tpacpi_inputdev->keycode = hotkey_keycode_map;
for (i = 0; i < TPACPI_HOTKEY_MAP_LEN; i++) {
if (hotkey_keycode_map[i] != KEY_RESERVED) {
- set_bit(hotkey_keycode_map[i],
- tpacpi_inputdev->keybit);
+ input_set_capability(tpacpi_inputdev, EV_KEY,
+ hotkey_keycode_map[i]);
} else {
if (i < sizeof(hotkey_reserved_mask)*8)
hotkey_reserved_mask |= 1 << i;
@@ -3364,12 +3376,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
}
if (tp_features.hotkey_wlsw) {
- set_bit(EV_SW, tpacpi_inputdev->evbit);
- set_bit(SW_RFKILL_ALL, tpacpi_inputdev->swbit);
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_RFKILL_ALL);
+ input_report_switch(tpacpi_inputdev,
+ SW_RFKILL_ALL, radiosw_state);
}
if (tp_features.hotkey_tablet) {
- set_bit(EV_SW, tpacpi_inputdev->evbit);
- set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
+ input_set_capability(tpacpi_inputdev, EV_SW, SW_TABLET_MODE);
+ input_report_switch(tpacpi_inputdev,
+ SW_TABLET_MODE, tabletsw_state);
}
/* Do not issue duplicate brightness change events to
@@ -3436,8 +3450,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
tpacpi_inputdev->close = &hotkey_inputdev_close;
hotkey_poll_setup_safe(true);
- tpacpi_send_radiosw_update();
- tpacpi_input_send_tabletsw();
return 0;
@@ -3545,49 +3557,57 @@ static bool hotkey_notify_usrevent(const u32 hkey,
}
}
+static void thermal_dump_all_sensors(void);
+
static bool hotkey_notify_thermal(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
{
+ bool known = true;
+
/* 0x6000-0x6FFF: thermal alarms */
*send_acpi_ev = true;
*ignore_acpi_ev = false;
switch (hkey) {
+ case TP_HKEY_EV_THM_TABLE_CHANGED:
+ printk(TPACPI_INFO
+ "EC reports that Thermal Table has changed\n");
+ /* recommended action: do nothing, we don't have
+ * Lenovo ATM information */
+ return true;
case TP_HKEY_EV_ALARM_BAT_HOT:
printk(TPACPI_CRIT
"THERMAL ALARM: battery is too hot!\n");
/* recommended action: warn user through gui */
- return true;
+ break;
case TP_HKEY_EV_ALARM_BAT_XHOT:
printk(TPACPI_ALERT
"THERMAL EMERGENCY: battery is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
- return true;
+ break;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
printk(TPACPI_CRIT
"THERMAL ALARM: "
"a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
/* some internal component is too hot */
- return true;
+ break;
case TP_HKEY_EV_ALARM_SENSOR_XHOT:
printk(TPACPI_ALERT
"THERMAL EMERGENCY: "
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
- return true;
- case TP_HKEY_EV_THM_TABLE_CHANGED:
- printk(TPACPI_INFO
- "EC reports that Thermal Table has changed\n");
- /* recommended action: do nothing, we don't have
- * Lenovo ATM information */
- return true;
+ break;
default:
printk(TPACPI_ALERT
"THERMAL ALERT: unknown thermal alarm received\n");
- return false;
+ known = false;
}
+
+ thermal_dump_all_sensors();
+
+ return known;
}
static void hotkey_notify(struct ibm_struct *ibm, u32 event)
@@ -3730,14 +3750,13 @@ static void hotkey_resume(void)
}
/* procfs -------------------------------------------------------------- */
-static int hotkey_read(char *p)
+static int hotkey_read(struct seq_file *m)
{
int res, status;
- int len = 0;
if (!tp_features.hotkey) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
if (mutex_lock_killable(&hotkey_mutex))
@@ -3749,17 +3768,16 @@ static int hotkey_read(char *p)
if (res)
return res;
- len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "status:\t\t%s\n", enabled(status, 0));
if (hotkey_all_mask) {
- len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
- len += sprintf(p + len,
- "commands:\tenable, disable, reset, <mask>\n");
+ seq_printf(m, "mask:\t\t0x%08x\n", hotkey_user_mask);
+ seq_printf(m, "commands:\tenable, disable, reset, <mask>\n");
} else {
- len += sprintf(p + len, "mask:\t\tnot supported\n");
- len += sprintf(p + len, "commands:\tenable, disable, reset\n");
+ seq_printf(m, "mask:\t\tnot supported\n");
+ seq_printf(m, "commands:\tenable, disable, reset\n");
}
- return len;
+ return 0;
}
static void hotkey_enabledisable_warn(bool enable)
@@ -3866,15 +3884,6 @@ enum {
#define TPACPI_RFK_BLUETOOTH_SW_NAME "tpacpi_bluetooth_sw"
-static void bluetooth_suspend(pm_message_t state)
-{
- /* Try to make sure radio will resume powered off */
- if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
- TP_ACPI_BLTH_PWR_OFF_ON_RESUME))
- vdbg_printk(TPACPI_DBG_RFKILL,
- "bluetooth power down on resume request failed\n");
-}
-
static int bluetooth_get_status(void)
{
int status;
@@ -3908,10 +3917,9 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
#endif
/* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
+ status = TP_ACPI_BLUETOOTH_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status = TP_ACPI_BLUETOOTH_RADIOSSW;
- else
- status = 0;
+ status |= TP_ACPI_BLUETOOTH_RADIOSSW;
if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
return -EIO;
@@ -4035,9 +4043,9 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
}
/* procfs -------------------------------------------------------------- */
-static int bluetooth_read(char *p)
+static int bluetooth_read(struct seq_file *m)
{
- return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, p);
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_BLUETOOTH_SW_ID, m);
}
static int bluetooth_write(char *buf)
@@ -4050,7 +4058,6 @@ static struct ibm_struct bluetooth_driver_data = {
.read = bluetooth_read,
.write = bluetooth_write,
.exit = bluetooth_exit,
- .suspend = bluetooth_suspend,
.shutdown = bluetooth_shutdown,
};
@@ -4068,15 +4075,6 @@ enum {
#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
-static void wan_suspend(pm_message_t state)
-{
- /* Try to make sure radio will resume powered off */
- if (!acpi_evalf(NULL, NULL, "\\WGSV", "qvd",
- TP_ACPI_WGSV_PWR_OFF_ON_RESUME))
- vdbg_printk(TPACPI_DBG_RFKILL,
- "WWAN power down on resume request failed\n");
-}
-
static int wan_get_status(void)
{
int status;
@@ -4109,11 +4107,10 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
}
#endif
- /* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */
+ /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
+ status = TP_ACPI_WANCARD_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status = TP_ACPI_WANCARD_RADIOSSW;
- else
- status = 0;
+ status |= TP_ACPI_WANCARD_RADIOSSW;
if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
@@ -4236,9 +4233,9 @@ static int __init wan_init(struct ibm_init_struct *iibm)
}
/* procfs -------------------------------------------------------------- */
-static int wan_read(char *p)
+static int wan_read(struct seq_file *m)
{
- return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, p);
+ return tpacpi_rfk_procfs_read(TPACPI_RFK_WWAN_SW_ID, m);
}
static int wan_write(char *buf)
@@ -4251,7 +4248,6 @@ static struct ibm_struct wan_driver_data = {
.read = wan_read,
.write = wan_write,
.exit = wan_exit,
- .suspend = wan_suspend,
.shutdown = wan_shutdown,
};
@@ -4614,14 +4610,13 @@ static int video_expand_toggle(void)
/* not reached */
}
-static int video_read(char *p)
+static int video_read(struct seq_file *m)
{
int status, autosw;
- int len = 0;
if (video_supported == TPACPI_VIDEO_NONE) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
status = video_outputsw_get();
@@ -4632,20 +4627,20 @@ static int video_read(char *p)
if (autosw < 0)
return autosw;
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "lcd:\t\t%s\n", enabled(status, 0));
- len += sprintf(p + len, "crt:\t\t%s\n", enabled(status, 1));
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "lcd:\t\t%s\n", enabled(status, 0));
+ seq_printf(m, "crt:\t\t%s\n", enabled(status, 1));
if (video_supported == TPACPI_VIDEO_NEW)
- len += sprintf(p + len, "dvi:\t\t%s\n", enabled(status, 3));
- len += sprintf(p + len, "auto:\t\t%s\n", enabled(autosw, 0));
- len += sprintf(p + len, "commands:\tlcd_enable, lcd_disable\n");
- len += sprintf(p + len, "commands:\tcrt_enable, crt_disable\n");
+ seq_printf(m, "dvi:\t\t%s\n", enabled(status, 3));
+ seq_printf(m, "auto:\t\t%s\n", enabled(autosw, 0));
+ seq_printf(m, "commands:\tlcd_enable, lcd_disable\n");
+ seq_printf(m, "commands:\tcrt_enable, crt_disable\n");
if (video_supported == TPACPI_VIDEO_NEW)
- len += sprintf(p + len, "commands:\tdvi_enable, dvi_disable\n");
- len += sprintf(p + len, "commands:\tauto_enable, auto_disable\n");
- len += sprintf(p + len, "commands:\tvideo_switch, expand_toggle\n");
+ seq_printf(m, "commands:\tdvi_enable, dvi_disable\n");
+ seq_printf(m, "commands:\tauto_enable, auto_disable\n");
+ seq_printf(m, "commands:\tvideo_switch, expand_toggle\n");
- return len;
+ return 0;
}
static int video_write(char *buf)
@@ -4837,25 +4832,24 @@ static void light_exit(void)
flush_workqueue(tpacpi_wq);
}
-static int light_read(char *p)
+static int light_read(struct seq_file *m)
{
- int len = 0;
int status;
if (!tp_features.light) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
} else if (!tp_features.light_status) {
- len += sprintf(p + len, "status:\t\tunknown\n");
- len += sprintf(p + len, "commands:\ton, off\n");
+ seq_printf(m, "status:\t\tunknown\n");
+ seq_printf(m, "commands:\ton, off\n");
} else {
status = light_get_status();
if (status < 0)
return status;
- len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
- len += sprintf(p + len, "commands:\ton, off\n");
+ seq_printf(m, "status:\t\t%s\n", onoff(status, 0));
+ seq_printf(m, "commands:\ton, off\n");
}
- return len;
+ return 0;
}
static int light_write(char *buf)
@@ -4933,20 +4927,18 @@ static void cmos_exit(void)
device_remove_file(&tpacpi_pdev->dev, &dev_attr_cmos_command);
}
-static int cmos_read(char *p)
+static int cmos_read(struct seq_file *m)
{
- int len = 0;
-
/* cmos not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
R30, R31, T20-22, X20-21 */
if (!cmos_handle)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-21)\n");
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "commands:\t<cmd> (<cmd> is 0-21)\n");
}
- return len;
+ return 0;
}
static int cmos_write(char *buf)
@@ -5321,15 +5313,13 @@ static int __init led_init(struct ibm_init_struct *iibm)
((s) == TPACPI_LED_OFF ? "off" : \
((s) == TPACPI_LED_ON ? "on" : "blinking"))
-static int led_read(char *p)
+static int led_read(struct seq_file *m)
{
- int len = 0;
-
if (!led_supported) {
- len += sprintf(p + len, "status:\t\tnot supported\n");
- return len;
+ seq_printf(m, "status:\t\tnot supported\n");
+ return 0;
}
- len += sprintf(p + len, "status:\t\tsupported\n");
+ seq_printf(m, "status:\t\tsupported\n");
if (led_supported == TPACPI_LED_570) {
/* 570 */
@@ -5338,15 +5328,15 @@ static int led_read(char *p)
status = led_get_status(i);
if (status < 0)
return -EIO;
- len += sprintf(p + len, "%d:\t\t%s\n",
+ seq_printf(m, "%d:\t\t%s\n",
i, str_led_status(status));
}
}
- len += sprintf(p + len, "commands:\t"
+ seq_printf(m, "commands:\t"
"<led> on, <led> off, <led> blink (<led> is 0-15)\n");
- return len;
+ return 0;
}
static int led_write(char *buf)
@@ -5419,18 +5409,16 @@ static int __init beep_init(struct ibm_init_struct *iibm)
return (beep_handle)? 0 : 1;
}
-static int beep_read(char *p)
+static int beep_read(struct seq_file *m)
{
- int len = 0;
-
if (!beep_handle)
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
else {
- len += sprintf(p + len, "status:\t\tsupported\n");
- len += sprintf(p + len, "commands:\t<cmd> (<cmd> is 0-17)\n");
+ seq_printf(m, "status:\t\tsupported\n");
+ seq_printf(m, "commands:\t<cmd> (<cmd> is 0-17)\n");
}
- return len;
+ return 0;
}
static int beep_write(char *buf)
@@ -5483,8 +5471,11 @@ enum { /* TPACPI_THERMAL_TPEC_* */
TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
+
+ TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
};
+
#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
struct ibm_thermal_sensors_struct {
s32 temp[TPACPI_MAX_THERMAL_SENSORS];
@@ -5574,6 +5565,28 @@ static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
return n;
}
+static void thermal_dump_all_sensors(void)
+{
+ int n, i;
+ struct ibm_thermal_sensors_struct t;
+
+ n = thermal_get_sensors(&t);
+ if (n <= 0)
+ return;
+
+ printk(TPACPI_NOTICE
+ "temperatures (Celsius):");
+
+ for (i = 0; i < n; i++) {
+ if (t.temp[i] != TPACPI_THERMAL_SENSOR_NA)
+ printk(KERN_CONT " %d", (int)(t.temp[i] / 1000));
+ else
+ printk(KERN_CONT " N/A");
+ }
+
+ printk(KERN_CONT "\n");
+}
+
/* sysfs temp##_input -------------------------------------------------- */
static ssize_t thermal_temp_input_show(struct device *dev,
@@ -5589,7 +5602,7 @@ static ssize_t thermal_temp_input_show(struct device *dev,
res = thermal_get_sensor(idx, &value);
if (res)
return res;
- if (value == TP_EC_THERMAL_TMP_NA * 1000)
+ if (value == TPACPI_THERMAL_SENSOR_NA)
return -ENXIO;
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -5766,9 +5779,8 @@ static void thermal_exit(void)
}
}
-static int thermal_read(char *p)
+static int thermal_read(struct seq_file *m)
{
- int len = 0;
int n, i;
struct ibm_thermal_sensors_struct t;
@@ -5776,16 +5788,16 @@ static int thermal_read(char *p)
if (unlikely(n < 0))
return n;
- len += sprintf(p + len, "temperatures:\t");
+ seq_printf(m, "temperatures:\t");
if (n > 0) {
for (i = 0; i < (n - 1); i++)
- len += sprintf(p + len, "%d ", t.temp[i] / 1000);
- len += sprintf(p + len, "%d\n", t.temp[i] / 1000);
+ seq_printf(m, "%d ", t.temp[i] / 1000);
+ seq_printf(m, "%d\n", t.temp[i] / 1000);
} else
- len += sprintf(p + len, "not supported\n");
+ seq_printf(m, "not supported\n");
- return len;
+ return 0;
}
static struct ibm_struct thermal_driver_data = {
@@ -5800,39 +5812,38 @@ static struct ibm_struct thermal_driver_data = {
static u8 ecdump_regs[256];
-static int ecdump_read(char *p)
+static int ecdump_read(struct seq_file *m)
{
- int len = 0;
int i, j;
u8 v;
- len += sprintf(p + len, "EC "
+ seq_printf(m, "EC "
" +00 +01 +02 +03 +04 +05 +06 +07"
" +08 +09 +0a +0b +0c +0d +0e +0f\n");
for (i = 0; i < 256; i += 16) {
- len += sprintf(p + len, "EC 0x%02x:", i);
+ seq_printf(m, "EC 0x%02x:", i);
for (j = 0; j < 16; j++) {
if (!acpi_ec_read(i + j, &v))
break;
if (v != ecdump_regs[i + j])
- len += sprintf(p + len, " *%02x", v);
+ seq_printf(m, " *%02x", v);
else
- len += sprintf(p + len, " %02x", v);
+ seq_printf(m, " %02x", v);
ecdump_regs[i + j] = v;
}
- len += sprintf(p + len, "\n");
+ seq_putc(m, '\n');
if (j != 16)
break;
}
/* These are way too dangerous to advertise openly... */
#if 0
- len += sprintf(p + len, "commands:\t0x<offset> 0x<value>"
+ seq_printf(m, "commands:\t0x<offset> 0x<value>"
" (<offset> is 00-ff, <value> is 00-ff)\n");
- len += sprintf(p + len, "commands:\t0x<offset> <value> "
+ seq_printf(m, "commands:\t0x<offset> <value> "
" (<offset> is 00-ff, <value> is 0-255)\n");
#endif
- return len;
+ return 0;
}
static int ecdump_write(char *buf)
@@ -6095,6 +6106,12 @@ static int brightness_get(struct backlight_device *bd)
return status & TP_EC_BACKLIGHT_LVLMSK;
}
+static void tpacpi_brightness_notify_change(void)
+{
+ backlight_force_update(ibm_backlight_device,
+ BACKLIGHT_UPDATE_HOTKEY);
+}
+
static struct backlight_ops ibm_backlight_data = {
.get_brightness = brightness_get,
.update_status = brightness_update_status,
@@ -6123,8 +6140,8 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
/* Models with Intel Extreme Graphics 2 */
TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
- TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
- TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC),
+ TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
/* Models with Intel GMA900 */
TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */
@@ -6249,6 +6266,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
backlight_update_status(ibm_backlight_device);
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
+ "brightness: registering brightness hotkeys "
+ "as change notification\n");
+ tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
+ | TP_ACPI_HKEY_BRGHTUP_MASK
+ | TP_ACPI_HKEY_BRGHTDWN_MASK);;
return 0;
}
@@ -6273,23 +6296,22 @@ static void brightness_exit(void)
tpacpi_brightness_checkpoint_nvram();
}
-static int brightness_read(char *p)
+static int brightness_read(struct seq_file *m)
{
- int len = 0;
int level;
level = brightness_get(NULL);
if (level < 0) {
- len += sprintf(p + len, "level:\t\tunreadable\n");
+ seq_printf(m, "level:\t\tunreadable\n");
} else {
- len += sprintf(p + len, "level:\t\t%d\n", level);
- len += sprintf(p + len, "commands:\tup, down\n");
- len += sprintf(p + len, "commands:\tlevel <level>"
+ seq_printf(m, "level:\t\t%d\n", level);
+ seq_printf(m, "commands:\tup, down\n");
+ seq_printf(m, "commands:\tlevel <level>"
" (<level> is 0-%d)\n",
(tp_features.bright_16levels) ? 15 : 7);
}
- return len;
+ return 0;
}
static int brightness_write(char *buf)
@@ -6325,6 +6347,9 @@ static int brightness_write(char *buf)
* Doing it this way makes the syscall restartable in case of EINTR
*/
rc = brightness_set(level);
+ if (!rc && ibm_backlight_device)
+ backlight_force_update(ibm_backlight_device,
+ BACKLIGHT_UPDATE_SYSFS);
return (rc == -EINTR)? -ERESTARTSYS : rc;
}
@@ -6341,99 +6366,654 @@ static struct ibm_struct brightness_driver_data = {
* Volume subdriver
*/
-static int volume_offset = 0x30;
+/*
+ * IBM ThinkPads have a simple volume controller with MUTE gating.
+ * Very early Lenovo ThinkPads follow the IBM ThinkPad spec.
+ *
+ * Since the *61 series (and probably also the later *60 series), Lenovo
+ * ThinkPads only implement the MUTE gate.
+ *
+ * EC register 0x30
+ * Bit 6: MUTE (1 mutes sound)
+ * Bit 3-0: Volume
+ * Other bits should be zero as far as we know.
+ *
+ * This is also stored in CMOS NVRAM, byte 0x60, bit 6 (MUTE), and
+ * bits 3-0 (volume). Other bits in NVRAM may have other functions,
+ * such as bit 7 which is used to detect repeated presses of MUTE,
+ * and we leave them unchanged.
+ */
+
+#define TPACPI_ALSA_DRVNAME "ThinkPad EC"
+#define TPACPI_ALSA_SHRTNAME "ThinkPad Console Audio Control"
+#define TPACPI_ALSA_MIXERNAME TPACPI_ALSA_SHRTNAME
+
+static int alsa_index = SNDRV_DEFAULT_IDX1;
+static char *alsa_id = "ThinkPadEC";
+static int alsa_enable = SNDRV_DEFAULT_ENABLE1;
+
+struct tpacpi_alsa_data {
+ struct snd_card *card;
+ struct snd_ctl_elem_id *ctl_mute_id;
+ struct snd_ctl_elem_id *ctl_vol_id;
+};
+
+static struct snd_card *alsa_card;
+
+enum {
+ TP_EC_AUDIO = 0x30,
+
+ /* TP_EC_AUDIO bits */
+ TP_EC_AUDIO_MUTESW = 6,
+
+ /* TP_EC_AUDIO bitmasks */
+ TP_EC_AUDIO_LVL_MSK = 0x0F,
+ TP_EC_AUDIO_MUTESW_MSK = (1 << TP_EC_AUDIO_MUTESW),
+
+ /* Maximum volume */
+ TP_EC_VOLUME_MAX = 14,
+};
+
+enum tpacpi_volume_access_mode {
+ TPACPI_VOL_MODE_AUTO = 0, /* Not implemented yet */
+ TPACPI_VOL_MODE_EC, /* Pure EC control */
+ TPACPI_VOL_MODE_UCMS_STEP, /* UCMS step-based control: N/A */
+ TPACPI_VOL_MODE_ECNVRAM, /* EC control w/ NVRAM store */
+ TPACPI_VOL_MODE_MAX
+};
+
+enum tpacpi_volume_capabilities {
+ TPACPI_VOL_CAP_AUTO = 0, /* Use white/blacklist */
+ TPACPI_VOL_CAP_VOLMUTE, /* Output vol and mute */
+ TPACPI_VOL_CAP_MUTEONLY, /* Output mute only */
+ TPACPI_VOL_CAP_MAX
+};
+
+static enum tpacpi_volume_access_mode volume_mode =
+ TPACPI_VOL_MODE_MAX;
+
+static enum tpacpi_volume_capabilities volume_capabilities;
+static int volume_control_allowed;
-static int volume_read(char *p)
+/*
+ * Used to syncronize writers to TP_EC_AUDIO and
+ * TP_NVRAM_ADDR_MIXER, as we need to do read-modify-write
+ */
+static struct mutex volume_mutex;
+
+static void tpacpi_volume_checkpoint_nvram(void)
{
- int len = 0;
- u8 level;
+ u8 lec = 0;
+ u8 b_nvram;
+ u8 ec_mask;
+
+ if (volume_mode != TPACPI_VOL_MODE_ECNVRAM)
+ return;
+ if (!volume_control_allowed)
+ return;
+
+ vdbg_printk(TPACPI_DBG_MIXER,
+ "trying to checkpoint mixer state to NVRAM...\n");
- if (!acpi_ec_read(volume_offset, &level)) {
- len += sprintf(p + len, "level:\t\tunreadable\n");
+ if (tp_features.mixer_no_level_control)
+ ec_mask = TP_EC_AUDIO_MUTESW_MSK;
+ else
+ ec_mask = TP_EC_AUDIO_MUTESW_MSK | TP_EC_AUDIO_LVL_MSK;
+
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return;
+
+ if (unlikely(!acpi_ec_read(TP_EC_AUDIO, &lec)))
+ goto unlock;
+ lec &= ec_mask;
+ b_nvram = nvram_read_byte(TP_NVRAM_ADDR_MIXER);
+
+ if (lec != (b_nvram & ec_mask)) {
+ /* NVRAM needs update */
+ b_nvram &= ~ec_mask;
+ b_nvram |= lec;
+ nvram_write_byte(b_nvram, TP_NVRAM_ADDR_MIXER);
+ dbg_printk(TPACPI_DBG_MIXER,
+ "updated NVRAM mixer status to 0x%02x (0x%02x)\n",
+ (unsigned int) lec, (unsigned int) b_nvram);
} else {
- len += sprintf(p + len, "level:\t\t%d\n", level & 0xf);
- len += sprintf(p + len, "mute:\t\t%s\n", onoff(level, 6));
- len += sprintf(p + len, "commands:\tup, down, mute\n");
- len += sprintf(p + len, "commands:\tlevel <level>"
- " (<level> is 0-15)\n");
+ vdbg_printk(TPACPI_DBG_MIXER,
+ "NVRAM mixer status already is 0x%02x (0x%02x)\n",
+ (unsigned int) lec, (unsigned int) b_nvram);
}
- return len;
+unlock:
+ mutex_unlock(&volume_mutex);
}
-static int volume_write(char *buf)
+static int volume_get_status_ec(u8 *status)
{
- int cmos_cmd, inc, i;
- u8 level, mute;
- int new_level, new_mute;
- char *cmd;
+ u8 s;
- while ((cmd = next_cmd(&buf))) {
- if (!acpi_ec_read(volume_offset, &level))
- return -EIO;
- new_mute = mute = level & 0x40;
- new_level = level = level & 0xf;
+ if (!acpi_ec_read(TP_EC_AUDIO, &s))
+ return -EIO;
- if (strlencmp(cmd, "up") == 0) {
- if (mute)
- new_mute = 0;
- else
- new_level = level == 15 ? 15 : level + 1;
- } else if (strlencmp(cmd, "down") == 0) {
- if (mute)
- new_mute = 0;
- else
- new_level = level == 0 ? 0 : level - 1;
- } else if (sscanf(cmd, "level %d", &new_level) == 1 &&
- new_level >= 0 && new_level <= 15) {
- /* new_level set */
- } else if (strlencmp(cmd, "mute") == 0) {
- new_mute = 0x40;
- } else
- return -EINVAL;
+ *status = s;
- if (new_level != level) {
- /* mute doesn't change */
+ dbg_printk(TPACPI_DBG_MIXER, "status 0x%02x\n", s);
- cmos_cmd = (new_level > level) ?
- TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN;
- inc = new_level > level ? 1 : -1;
+ return 0;
+}
- if (mute && (issue_thinkpad_cmos_command(cmos_cmd) ||
- !acpi_ec_write(volume_offset, level)))
- return -EIO;
+static int volume_get_status(u8 *status)
+{
+ return volume_get_status_ec(status);
+}
- for (i = level; i != new_level; i += inc)
- if (issue_thinkpad_cmos_command(cmos_cmd) ||
- !acpi_ec_write(volume_offset, i + inc))
- return -EIO;
+static int volume_set_status_ec(const u8 status)
+{
+ if (!acpi_ec_write(TP_EC_AUDIO, status))
+ return -EIO;
- if (mute &&
- (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) ||
- !acpi_ec_write(volume_offset, new_level + mute))) {
- return -EIO;
- }
+ dbg_printk(TPACPI_DBG_MIXER, "set EC mixer to 0x%02x\n", status);
+
+ return 0;
+}
+
+static int volume_set_status(const u8 status)
+{
+ return volume_set_status_ec(status);
+}
+
+static int volume_set_mute_ec(const bool mute)
+{
+ int rc;
+ u8 s, n;
+
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return -EINTR;
+
+ rc = volume_get_status_ec(&s);
+ if (rc)
+ goto unlock;
+
+ n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
+ s & ~TP_EC_AUDIO_MUTESW_MSK;
+
+ if (n != s)
+ rc = volume_set_status_ec(n);
+
+unlock:
+ mutex_unlock(&volume_mutex);
+ return rc;
+}
+
+static int volume_set_mute(const bool mute)
+{
+ dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
+ (mute) ? "" : "un");
+ return volume_set_mute_ec(mute);
+}
+
+static int volume_set_volume_ec(const u8 vol)
+{
+ int rc;
+ u8 s, n;
+
+ if (vol > TP_EC_VOLUME_MAX)
+ return -EINVAL;
+
+ if (mutex_lock_killable(&volume_mutex) < 0)
+ return -EINTR;
+
+ rc = volume_get_status_ec(&s);
+ if (rc)
+ goto unlock;
+
+ n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
+
+ if (n != s)
+ rc = volume_set_status_ec(n);
+
+unlock:
+ mutex_unlock(&volume_mutex);
+ return rc;
+}
+
+static int volume_set_volume(const u8 vol)
+{
+ dbg_printk(TPACPI_DBG_MIXER,
+ "trying to set volume level to %hu\n", vol);
+ return volume_set_volume_ec(vol);
+}
+
+static void volume_alsa_notify_change(void)
+{
+ struct tpacpi_alsa_data *d;
+
+ if (alsa_card && alsa_card->private_data) {
+ d = alsa_card->private_data;
+ if (d->ctl_mute_id)
+ snd_ctl_notify(alsa_card,
+ SNDRV_CTL_EVENT_MASK_VALUE,
+ d->ctl_mute_id);
+ if (d->ctl_vol_id)
+ snd_ctl_notify(alsa_card,
+ SNDRV_CTL_EVENT_MASK_VALUE,
+ d->ctl_vol_id);
+ }
+}
+
+static int volume_alsa_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = TP_EC_VOLUME_MAX;
+ return 0;
+}
+
+static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 s;
+ int rc;
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ ucontrol->value.integer.value[0] = s & TP_EC_AUDIO_LVL_MSK;
+ return 0;
+}
+
+static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return volume_set_volume(ucontrol->value.integer.value[0]);
+}
+
+#define volume_alsa_mute_info snd_ctl_boolean_mono_info
+
+static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 s;
+ int rc;
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ ucontrol->value.integer.value[0] =
+ (s & TP_EC_AUDIO_MUTESW_MSK) ? 0 : 1;
+ return 0;
+}
+
+static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return volume_set_mute(!ucontrol->value.integer.value[0]);
+}
+
+static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Console Playback Volume",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = volume_alsa_vol_info,
+ .get = volume_alsa_vol_get,
+};
+
+static struct snd_kcontrol_new volume_alsa_control_mute __devinitdata = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Console Playback Switch",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = volume_alsa_mute_info,
+ .get = volume_alsa_mute_get,
+};
+
+static void volume_suspend(pm_message_t state)
+{
+ tpacpi_volume_checkpoint_nvram();
+}
+
+static void volume_resume(void)
+{
+ volume_alsa_notify_change();
+}
+
+static void volume_shutdown(void)
+{
+ tpacpi_volume_checkpoint_nvram();
+}
+
+static void volume_exit(void)
+{
+ if (alsa_card) {
+ snd_card_free(alsa_card);
+ alsa_card = NULL;
+ }
+
+ tpacpi_volume_checkpoint_nvram();
+}
+
+static int __init volume_create_alsa_mixer(void)
+{
+ struct snd_card *card;
+ struct tpacpi_alsa_data *data;
+ struct snd_kcontrol *ctl_vol;
+ struct snd_kcontrol *ctl_mute;
+ int rc;
+
+ rc = snd_card_create(alsa_index, alsa_id, THIS_MODULE,
+ sizeof(struct tpacpi_alsa_data), &card);
+ if (rc < 0)
+ return rc;
+ if (!card)
+ return -ENOMEM;
+
+ BUG_ON(!card->private_data);
+ data = card->private_data;
+ data->card = card;
+
+ strlcpy(card->driver, TPACPI_ALSA_DRVNAME,
+ sizeof(card->driver));
+ strlcpy(card->shortname, TPACPI_ALSA_SHRTNAME,
+ sizeof(card->shortname));
+ snprintf(card->mixername, sizeof(card->mixername), "ThinkPad EC %s",
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "(unknown)");
+ snprintf(card->longname, sizeof(card->longname),
+ "%s at EC reg 0x%02x, fw %s", card->shortname, TP_EC_AUDIO,
+ (thinkpad_id.ec_version_str) ?
+ thinkpad_id.ec_version_str : "unknown");
+
+ if (volume_control_allowed) {
+ volume_alsa_control_vol.put = volume_alsa_vol_put;
+ volume_alsa_control_vol.access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+
+ volume_alsa_control_mute.put = volume_alsa_mute_put;
+ volume_alsa_control_mute.access =
+ SNDRV_CTL_ELEM_ACCESS_READWRITE;
+ }
+
+ if (!tp_features.mixer_no_level_control) {
+ ctl_vol = snd_ctl_new1(&volume_alsa_control_vol, NULL);
+ rc = snd_ctl_add(card, ctl_vol);
+ if (rc < 0) {
+ printk(TPACPI_ERR
+ "Failed to create ALSA volume control\n");
+ goto err_out;
}
+ data->ctl_vol_id = &ctl_vol->id;
+ }
- if (new_mute != mute) {
- /* level doesn't change */
+ ctl_mute = snd_ctl_new1(&volume_alsa_control_mute, NULL);
+ rc = snd_ctl_add(card, ctl_mute);
+ if (rc < 0) {
+ printk(TPACPI_ERR "Failed to create ALSA mute control\n");
+ goto err_out;
+ }
+ data->ctl_mute_id = &ctl_mute->id;
- cmos_cmd = (new_mute) ?
- TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP;
+ snd_card_set_dev(card, &tpacpi_pdev->dev);
+ rc = snd_card_register(card);
- if (issue_thinkpad_cmos_command(cmos_cmd) ||
- !acpi_ec_write(volume_offset, level + new_mute))
- return -EIO;
+err_out:
+ if (rc < 0) {
+ snd_card_free(card);
+ card = NULL;
+ }
+
+ alsa_card = card;
+ return rc;
+}
+
+#define TPACPI_VOL_Q_MUTEONLY 0x0001 /* Mute-only control available */
+#define TPACPI_VOL_Q_LEVEL 0x0002 /* Volume control available */
+
+static const struct tpacpi_quirk volume_quirk_table[] __initconst = {
+ /* Whitelist volume level on all IBM by default */
+ { .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY,
+ .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_VOL_Q_LEVEL },
+
+ /* Lenovo models with volume control (needs confirmation) */
+ TPACPI_QEC_LNV('7', 'C', TPACPI_VOL_Q_LEVEL), /* R60/i */
+ TPACPI_QEC_LNV('7', 'E', TPACPI_VOL_Q_LEVEL), /* R60e/i */
+ TPACPI_QEC_LNV('7', '9', TPACPI_VOL_Q_LEVEL), /* T60/p */
+ TPACPI_QEC_LNV('7', 'B', TPACPI_VOL_Q_LEVEL), /* X60/s */
+ TPACPI_QEC_LNV('7', 'J', TPACPI_VOL_Q_LEVEL), /* X60t */
+ TPACPI_QEC_LNV('7', '7', TPACPI_VOL_Q_LEVEL), /* Z60 */
+ TPACPI_QEC_LNV('7', 'F', TPACPI_VOL_Q_LEVEL), /* Z61 */
+
+ /* Whitelist mute-only on all Lenovo by default */
+ { .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY,
+ .ec = TPACPI_MATCH_ANY,
+ .quirks = TPACPI_VOL_Q_MUTEONLY }
+};
+
+static int __init volume_init(struct ibm_init_struct *iibm)
+{
+ unsigned long quirks;
+ int rc;
+
+ vdbg_printk(TPACPI_DBG_INIT, "initializing volume subdriver\n");
+
+ mutex_init(&volume_mutex);
+
+ /*
+ * Check for module parameter bogosity, note that we
+ * init volume_mode to TPACPI_VOL_MODE_MAX in order to be
+ * able to detect "unspecified"
+ */
+ if (volume_mode > TPACPI_VOL_MODE_MAX)
+ return -EINVAL;
+
+ if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
+ printk(TPACPI_ERR
+ "UCMS step volume mode not implemented, "
+ "please contact %s\n", TPACPI_MAIL);
+ return 1;
+ }
+
+ if (volume_capabilities >= TPACPI_VOL_CAP_MAX)
+ return -EINVAL;
+
+ /*
+ * The ALSA mixer is our primary interface.
+ * When disabled, don't install the subdriver at all
+ */
+ if (!alsa_enable) {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "ALSA mixer disabled by parameter, "
+ "not loading volume subdriver...\n");
+ return 1;
+ }
+
+ quirks = tpacpi_check_quirks(volume_quirk_table,
+ ARRAY_SIZE(volume_quirk_table));
+
+ switch (volume_capabilities) {
+ case TPACPI_VOL_CAP_AUTO:
+ if (quirks & TPACPI_VOL_Q_MUTEONLY)
+ tp_features.mixer_no_level_control = 1;
+ else if (quirks & TPACPI_VOL_Q_LEVEL)
+ tp_features.mixer_no_level_control = 0;
+ else
+ return 1; /* no mixer */
+ break;
+ case TPACPI_VOL_CAP_VOLMUTE:
+ tp_features.mixer_no_level_control = 0;
+ break;
+ case TPACPI_VOL_CAP_MUTEONLY:
+ tp_features.mixer_no_level_control = 1;
+ break;
+ default:
+ return 1;
+ }
+
+ if (volume_capabilities != TPACPI_VOL_CAP_AUTO)
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "using user-supplied volume_capabilities=%d\n",
+ volume_capabilities);
+
+ if (volume_mode == TPACPI_VOL_MODE_AUTO ||
+ volume_mode == TPACPI_VOL_MODE_MAX) {
+ volume_mode = TPACPI_VOL_MODE_ECNVRAM;
+
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "driver auto-selected volume_mode=%d\n",
+ volume_mode);
+ } else {
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "using user-supplied volume_mode=%d\n",
+ volume_mode);
+ }
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "mute is supported, volume control is %s\n",
+ str_supported(!tp_features.mixer_no_level_control));
+
+ rc = volume_create_alsa_mixer();
+ if (rc) {
+ printk(TPACPI_ERR
+ "Could not create the ALSA mixer interface\n");
+ return rc;
+ }
+
+ printk(TPACPI_INFO
+ "Console audio control enabled, mode: %s\n",
+ (volume_control_allowed) ?
+ "override (read/write)" :
+ "monitor (read only)");
+
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
+ "registering volume hotkeys as change notification\n");
+ tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
+ | TP_ACPI_HKEY_VOLUP_MASK
+ | TP_ACPI_HKEY_VOLDWN_MASK
+ | TP_ACPI_HKEY_MUTE_MASK);
+
+ return 0;
+}
+
+static int volume_read(struct seq_file *m)
+{
+ u8 status;
+
+ if (volume_get_status(&status) < 0) {
+ seq_printf(m, "level:\t\tunreadable\n");
+ } else {
+ if (tp_features.mixer_no_level_control)
+ seq_printf(m, "level:\t\tunsupported\n");
+ else
+ seq_printf(m, "level:\t\t%d\n",
+ status & TP_EC_AUDIO_LVL_MSK);
+
+ seq_printf(m, "mute:\t\t%s\n",
+ onoff(status, TP_EC_AUDIO_MUTESW));
+
+ if (volume_control_allowed) {
+ seq_printf(m, "commands:\tunmute, mute\n");
+ if (!tp_features.mixer_no_level_control) {
+ seq_printf(m,
+ "commands:\tup, down\n");
+ seq_printf(m,
+ "commands:\tlevel <level>"
+ " (<level> is 0-%d)\n",
+ TP_EC_VOLUME_MAX);
+ }
}
}
return 0;
}
+static int volume_write(char *buf)
+{
+ u8 s;
+ u8 new_level, new_mute;
+ int l;
+ char *cmd;
+ int rc;
+
+ /*
+ * We do allow volume control at driver startup, so that the
+ * user can set initial state through the volume=... parameter hack.
+ */
+ if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) {
+ if (unlikely(!tp_warned.volume_ctrl_forbidden)) {
+ tp_warned.volume_ctrl_forbidden = 1;
+ printk(TPACPI_NOTICE
+ "Console audio control in monitor mode, "
+ "changes are not allowed.\n");
+ printk(TPACPI_NOTICE
+ "Use the volume_control=1 module parameter "
+ "to enable volume control\n");
+ }
+ return -EPERM;
+ }
+
+ rc = volume_get_status(&s);
+ if (rc < 0)
+ return rc;
+
+ new_level = s & TP_EC_AUDIO_LVL_MSK;
+ new_mute = s & TP_EC_AUDIO_MUTESW_MSK;
+
+ while ((cmd = next_cmd(&buf))) {
+ if (!tp_features.mixer_no_level_control) {
+ if (strlencmp(cmd, "up") == 0) {
+ if (new_mute)
+ new_mute = 0;
+ else if (new_level < TP_EC_VOLUME_MAX)
+ new_level++;
+ continue;
+ } else if (strlencmp(cmd, "down") == 0) {
+ if (new_mute)
+ new_mute = 0;
+ else if (new_level > 0)
+ new_level--;
+ continue;
+ } else if (sscanf(cmd, "level %u", &l) == 1 &&
+ l >= 0 && l <= TP_EC_VOLUME_MAX) {
+ new_level = l;
+ continue;
+ }
+ }
+ if (strlencmp(cmd, "mute") == 0)
+ new_mute = TP_EC_AUDIO_MUTESW_MSK;
+ else if (strlencmp(cmd, "unmute") == 0)
+ new_mute = 0;
+ else
+ return -EINVAL;
+ }
+
+ if (tp_features.mixer_no_level_control) {
+ tpacpi_disclose_usertask("procfs volume", "%smute\n",
+ new_mute ? "" : "un");
+ rc = volume_set_mute(!!new_mute);
+ } else {
+ tpacpi_disclose_usertask("procfs volume",
+ "%smute and set level to %d\n",
+ new_mute ? "" : "un", new_level);
+ rc = volume_set_status(new_mute | new_level);
+ }
+ volume_alsa_notify_change();
+
+ return (rc == -EINTR) ? -ERESTARTSYS : rc;
+}
+
static struct ibm_struct volume_driver_data = {
.name = "volume",
.read = volume_read,
.write = volume_write,
+ .exit = volume_exit,
+ .suspend = volume_suspend,
+ .resume = volume_resume,
+ .shutdown = volume_shutdown,
};
/*************************************************************************
@@ -7510,9 +8090,8 @@ static void fan_resume(void)
}
}
-static int fan_read(char *p)
+static int fan_read(struct seq_file *m)
{
- int len = 0;
int rc;
u8 status;
unsigned int speed = 0;
@@ -7524,7 +8103,7 @@ static int fan_read(char *p)
if (rc < 0)
return rc;
- len += sprintf(p + len, "status:\t\t%s\n"
+ seq_printf(m, "status:\t\t%s\n"
"level:\t\t%d\n",
(status != 0) ? "enabled" : "disabled", status);
break;
@@ -7535,54 +8114,54 @@ static int fan_read(char *p)
if (rc < 0)
return rc;
- len += sprintf(p + len, "status:\t\t%s\n",
+ seq_printf(m, "status:\t\t%s\n",
(status != 0) ? "enabled" : "disabled");
rc = fan_get_speed(&speed);
if (rc < 0)
return rc;
- len += sprintf(p + len, "speed:\t\t%d\n", speed);
+ seq_printf(m, "speed:\t\t%d\n", speed);
if (status & TP_EC_FAN_FULLSPEED)
/* Disengaged mode takes precedence */
- len += sprintf(p + len, "level:\t\tdisengaged\n");
+ seq_printf(m, "level:\t\tdisengaged\n");
else if (status & TP_EC_FAN_AUTO)
- len += sprintf(p + len, "level:\t\tauto\n");
+ seq_printf(m, "level:\t\tauto\n");
else
- len += sprintf(p + len, "level:\t\t%d\n", status);
+ seq_printf(m, "level:\t\t%d\n", status);
break;
case TPACPI_FAN_NONE:
default:
- len += sprintf(p + len, "status:\t\tnot supported\n");
+ seq_printf(m, "status:\t\tnot supported\n");
}
if (fan_control_commands & TPACPI_FAN_CMD_LEVEL) {
- len += sprintf(p + len, "commands:\tlevel <level>");
+ seq_printf(m, "commands:\tlevel <level>");
switch (fan_control_access_mode) {
case TPACPI_FAN_WR_ACPI_SFAN:
- len += sprintf(p + len, " (<level> is 0-7)\n");
+ seq_printf(m, " (<level> is 0-7)\n");
break;
default:
- len += sprintf(p + len, " (<level> is 0-7, "
+ seq_printf(m, " (<level> is 0-7, "
"auto, disengaged, full-speed)\n");
break;
}
}
if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
- len += sprintf(p + len, "commands:\tenable, disable\n"
+ seq_printf(m, "commands:\tenable, disable\n"
"commands:\twatchdog <timeout> (<timeout> "
"is 0 (off), 1-120 (seconds))\n");
if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
- len += sprintf(p + len, "commands:\tspeed <speed>"
+ seq_printf(m, "commands:\tspeed <speed>"
" (<speed> is 0-65535)\n");
- return len;
+ return 0;
}
static int fan_write_cmd_level(const char *cmd, int *rc)
@@ -7724,10 +8303,23 @@ static struct ibm_struct fan_driver_data = {
*/
static void tpacpi_driver_event(const unsigned int hkey_event)
{
+ if (ibm_backlight_device) {
+ switch (hkey_event) {
+ case TP_HKEY_EV_BRGHT_UP:
+ case TP_HKEY_EV_BRGHT_DOWN:
+ tpacpi_brightness_notify_change();
+ }
+ }
+ if (alsa_card) {
+ switch (hkey_event) {
+ case TP_HKEY_EV_VOL_UP:
+ case TP_HKEY_EV_VOL_DOWN:
+ case TP_HKEY_EV_VOL_MUTE:
+ volume_alsa_notify_change();
+ }
+ }
}
-
-
static void hotkey_driver_event(const unsigned int scancode)
{
tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
@@ -7856,19 +8448,19 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
"%s installed\n", ibm->name);
if (ibm->read) {
- entry = create_proc_entry(ibm->name,
- S_IFREG | S_IRUGO | S_IWUSR,
- proc_dir);
+ mode_t mode;
+
+ mode = S_IRUGO;
+ if (ibm->write)
+ mode |= S_IWUSR;
+ entry = proc_create_data(ibm->name, mode, proc_dir,
+ &dispatch_proc_fops, ibm);
if (!entry) {
printk(TPACPI_ERR "unable to create proc entry %s\n",
ibm->name);
ret = -ENODEV;
goto err_out;
}
- entry->data = ibm;
- entry->read_proc = &dispatch_procfs_read;
- if (ibm->write)
- entry->write_proc = &dispatch_procfs_write;
ibm->flags.proc_created = 1;
}
@@ -8080,6 +8672,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
.data = &brightness_driver_data,
},
{
+ .init = volume_init,
.data = &volume_driver_data,
},
{
@@ -8115,36 +8708,59 @@ static int __init set_ibm_param(const char *val, struct kernel_param *kp)
return -EINVAL;
}
-module_param(experimental, int, 0);
+module_param(experimental, int, 0444);
MODULE_PARM_DESC(experimental,
"Enables experimental features when non-zero");
module_param_named(debug, dbg_level, uint, 0);
MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
-module_param(force_load, bool, 0);
+module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load,
"Attempts to load the driver even on a "
"mis-identified ThinkPad when true");
-module_param_named(fan_control, fan_control_allowed, bool, 0);
+module_param_named(fan_control, fan_control_allowed, bool, 0444);
MODULE_PARM_DESC(fan_control,
"Enables setting fan parameters features when true");
-module_param_named(brightness_mode, brightness_mode, uint, 0);
+module_param_named(brightness_mode, brightness_mode, uint, 0444);
MODULE_PARM_DESC(brightness_mode,
"Selects brightness control strategy: "
"0=auto, 1=EC, 2=UCMS, 3=EC+NVRAM");
-module_param(brightness_enable, uint, 0);
+module_param(brightness_enable, uint, 0444);
MODULE_PARM_DESC(brightness_enable,
"Enables backlight control when 1, disables when 0");
-module_param(hotkey_report_mode, uint, 0);
+module_param(hotkey_report_mode, uint, 0444);
MODULE_PARM_DESC(hotkey_report_mode,
"used for backwards compatibility with userspace, "
"see documentation");
+module_param_named(volume_mode, volume_mode, uint, 0444);
+MODULE_PARM_DESC(volume_mode,
+ "Selects volume control strategy: "
+ "0=auto, 1=EC, 2=N/A, 3=EC+NVRAM");
+
+module_param_named(volume_capabilities, volume_capabilities, uint, 0444);
+MODULE_PARM_DESC(volume_capabilities,
+ "Selects the mixer capabilites: "
+ "0=auto, 1=volume and mute, 2=mute only");
+
+module_param_named(volume_control, volume_control_allowed, bool, 0444);
+MODULE_PARM_DESC(volume_control,
+ "Enables software override for the console audio "
+ "control when true");
+
+/* ALSA module API parameters */
+module_param_named(index, alsa_index, int, 0444);
+MODULE_PARM_DESC(index, "ALSA index for the ACPI EC Mixer");
+module_param_named(id, alsa_id, charp, 0444);
+MODULE_PARM_DESC(id, "ALSA id for the ACPI EC Mixer");
+module_param_named(enable, alsa_enable, bool, 0444);
+MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer");
+
#define TPACPI_PARAM(feature) \
module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command " \
@@ -8163,25 +8779,25 @@ TPACPI_PARAM(volume);
TPACPI_PARAM(fan);
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
-module_param(dbg_wlswemul, uint, 0);
+module_param(dbg_wlswemul, uint, 0444);
MODULE_PARM_DESC(dbg_wlswemul, "Enables WLSW emulation");
module_param_named(wlsw_state, tpacpi_wlsw_emulstate, bool, 0);
MODULE_PARM_DESC(wlsw_state,
"Initial state of the emulated WLSW switch");
-module_param(dbg_bluetoothemul, uint, 0);
+module_param(dbg_bluetoothemul, uint, 0444);
MODULE_PARM_DESC(dbg_bluetoothemul, "Enables bluetooth switch emulation");
module_param_named(bluetooth_state, tpacpi_bluetooth_emulstate, bool, 0);
MODULE_PARM_DESC(bluetooth_state,
"Initial state of the emulated bluetooth switch");
-module_param(dbg_wwanemul, uint, 0);
+module_param(dbg_wwanemul, uint, 0444);
MODULE_PARM_DESC(dbg_wwanemul, "Enables WWAN switch emulation");
module_param_named(wwan_state, tpacpi_wwan_emulstate, bool, 0);
MODULE_PARM_DESC(wwan_state,
"Initial state of the emulated WWAN switch");
-module_param(dbg_uwbemul, uint, 0);
+module_param(dbg_uwbemul, uint, 0444);
MODULE_PARM_DESC(dbg_uwbemul, "Enables UWB switch emulation");
module_param_named(uwb_state, tpacpi_uwb_emulstate, bool, 0);
MODULE_PARM_DESC(uwb_state,
@@ -8374,6 +8990,7 @@ static int __init thinkpad_acpi_module_init(void)
PCI_VENDOR_ID_IBM;
tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
+ tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
}
for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
ret = ibm_init(&ibms_init[i]);
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
new file mode 100644
index 00000000000..a350418e87e
--- /dev/null
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -0,0 +1,144 @@
+/*
+ * Toshiba Bluetooth Enable Driver
+ *
+ * Copyright (C) 2009 Jes Sorensen <Jes.Sorensen@gmail.com>
+ *
+ * Thanks to Matthew Garrett for background info on ACPI innards which
+ * normal people aren't meant to understand :-)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Note the Toshiba Bluetooth RFKill switch seems to be a strange
+ * fish. It only provides a BT event when the switch is flipped to
+ * the 'on' position. When flipping it to 'off', the USB device is
+ * simply pulled away underneath us, without any BT event being
+ * delivered.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
+MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
+MODULE_LICENSE("GPL");
+
+
+static int toshiba_bt_rfkill_add(struct acpi_device *device);
+static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type);
+static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
+static int toshiba_bt_resume(struct acpi_device *device);
+
+static const struct acpi_device_id bt_device_ids[] = {
+ { "TOS6205", 0},
+ { "", 0},
+};
+MODULE_DEVICE_TABLE(acpi, bt_device_ids);
+
+static struct acpi_driver toshiba_bt_rfkill_driver = {
+ .name = "Toshiba BT",
+ .class = "Toshiba",
+ .ids = bt_device_ids,
+ .ops = {
+ .add = toshiba_bt_rfkill_add,
+ .remove = toshiba_bt_rfkill_remove,
+ .notify = toshiba_bt_rfkill_notify,
+ .resume = toshiba_bt_resume,
+ },
+ .owner = THIS_MODULE,
+};
+
+
+static int toshiba_bluetooth_enable(acpi_handle handle)
+{
+ acpi_status res1, res2;
+ acpi_integer result;
+
+ /*
+ * Query ACPI to verify RFKill switch is set to 'on'.
+ * If not, we return silently, no need to report it as
+ * an error.
+ */
+ res1 = acpi_evaluate_integer(handle, "BTST", NULL, &result);
+ if (ACPI_FAILURE(res1))
+ return res1;
+ if (!(result & 0x01))
+ return 0;
+
+ printk(KERN_INFO "toshiba_bluetooth: Re-enabling Toshiba Bluetooth\n");
+ res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
+ res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
+ if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
+ return 0;
+
+ printk(KERN_WARNING "toshiba_bluetooth: Failed to re-enable "
+ "Toshiba Bluetooth\n");
+
+ return -ENODEV;
+}
+
+static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
+{
+ toshiba_bluetooth_enable(device->handle);
+}
+
+static int toshiba_bt_resume(struct acpi_device *device)
+{
+ return toshiba_bluetooth_enable(device->handle);
+}
+
+static int toshiba_bt_rfkill_add(struct acpi_device *device)
+{
+ acpi_status status;
+ acpi_integer bt_present;
+ int result = -ENODEV;
+
+ /*
+ * Some Toshiba laptops may have a fake TOS6205 device in
+ * their ACPI BIOS, so query the _STA method to see if there
+ * is really anything there, before trying to enable it.
+ */
+ status = acpi_evaluate_integer(device->handle, "_STA", NULL,
+ &bt_present);
+
+ if (!ACPI_FAILURE(status) && bt_present) {
+ printk(KERN_INFO "Detected Toshiba ACPI Bluetooth device - "
+ "installing RFKill handler\n");
+ result = toshiba_bluetooth_enable(device->handle);
+ }
+
+ return result;
+}
+
+static int __init toshiba_bt_rfkill_init(void)
+{
+ int result;
+
+ result = acpi_bus_register_driver(&toshiba_bt_rfkill_driver);
+ if (result < 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error registering driver\n"));
+ return result;
+ }
+
+ return 0;
+}
+
+static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type)
+{
+ /* clean up */
+ return 0;
+}
+
+static void __exit toshiba_bt_rfkill_exit(void)
+{
+ acpi_bus_unregister_driver(&toshiba_bt_rfkill_driver);
+}
+
+module_init(toshiba_bt_rfkill_init);
+module_exit(toshiba_bt_rfkill_exit);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 177f8d767df..e425a868cd3 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
@@ -65,6 +66,7 @@ struct wmi_block {
acpi_handle handle;
wmi_notify_handler handler;
void *handler_data;
+ struct device *dev;
};
static struct wmi_block wmi_blocks;
@@ -195,6 +197,34 @@ static bool wmi_parse_guid(const u8 *src, u8 *dest)
return true;
}
+/*
+ * Convert a raw GUID to the ACII string representation
+ */
+static int wmi_gtoa(const char *in, char *out)
+{
+ int i;
+
+ for (i = 3; i >= 0; i--)
+ out += sprintf(out, "%02X", in[i] & 0xFF);
+
+ out += sprintf(out, "-");
+ out += sprintf(out, "%02X", in[5] & 0xFF);
+ out += sprintf(out, "%02X", in[4] & 0xFF);
+ out += sprintf(out, "-");
+ out += sprintf(out, "%02X", in[7] & 0xFF);
+ out += sprintf(out, "%02X", in[6] & 0xFF);
+ out += sprintf(out, "-");
+ out += sprintf(out, "%02X", in[8] & 0xFF);
+ out += sprintf(out, "%02X", in[9] & 0xFF);
+ out += sprintf(out, "-");
+
+ for (i = 10; i <= 15; i++)
+ out += sprintf(out, "%02X", in[i] & 0xFF);
+
+ out = '\0';
+ return 0;
+}
+
static bool find_guid(const char *guid_string, struct wmi_block **out)
{
char tmp[16], guid_input[16];
@@ -555,6 +585,138 @@ bool wmi_has_guid(const char *guid_string)
EXPORT_SYMBOL_GPL(wmi_has_guid);
/*
+ * sysfs interface
+ */
+static ssize_t show_modalias(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ char guid_string[37];
+ struct wmi_block *wblock;
+
+ wblock = dev_get_drvdata(dev);
+ if (!wblock)
+ return -ENOMEM;
+
+ wmi_gtoa(wblock->gblock.guid, guid_string);
+
+ return sprintf(buf, "wmi:%s\n", guid_string);
+}
+static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
+
+static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ char guid_string[37];
+
+ struct wmi_block *wblock;
+
+ if (add_uevent_var(env, "MODALIAS="))
+ return -ENOMEM;
+
+ wblock = dev_get_drvdata(dev);
+ if (!wblock)
+ return -ENOMEM;
+
+ wmi_gtoa(wblock->gblock.guid, guid_string);
+
+ strcpy(&env->buf[env->buflen - 1], "wmi:");
+ memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36);
+ env->buflen += 40;
+
+ return 0;
+}
+
+static void wmi_dev_free(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct class wmi_class = {
+ .name = "wmi",
+ .dev_release = wmi_dev_free,
+ .dev_uevent = wmi_dev_uevent,
+};
+
+static int wmi_create_devs(void)
+{
+ int result;
+ char guid_string[37];
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+ struct device *guid_dev;
+
+ /* Create devices for all the GUIDs */
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+
+ guid_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!guid_dev)
+ return -ENOMEM;
+
+ wblock->dev = guid_dev;
+
+ guid_dev->class = &wmi_class;
+ dev_set_drvdata(guid_dev, wblock);
+
+ gblock = &wblock->gblock;
+
+ wmi_gtoa(gblock->guid, guid_string);
+ dev_set_name(guid_dev, guid_string);
+
+ result = device_register(guid_dev);
+ if (result)
+ return result;
+
+ result = device_create_file(guid_dev, &dev_attr_modalias);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+
+static void wmi_remove_devs(void)
+{
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+ struct device *guid_dev;
+
+ /* Delete devices for all the GUIDs */
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+
+ guid_dev = wblock->dev;
+ gblock = &wblock->gblock;
+
+ device_remove_file(guid_dev, &dev_attr_modalias);
+
+ device_unregister(guid_dev);
+ }
+}
+
+static void wmi_class_exit(void)
+{
+ wmi_remove_devs();
+ class_unregister(&wmi_class);
+}
+
+static int wmi_class_init(void)
+{
+ int ret;
+
+ ret = class_register(&wmi_class);
+ if (ret)
+ return ret;
+
+ ret = wmi_create_devs();
+ if (ret)
+ wmi_class_exit();
+
+ return ret;
+}
+
+/*
* Parse the _WDG method for the GUID data blocks
*/
static __init acpi_status parse_wdg(acpi_handle handle)
@@ -709,10 +871,17 @@ static int __init acpi_wmi_init(void)
if (result < 0) {
printk(KERN_INFO PREFIX "Error loading mapper\n");
- } else {
- printk(KERN_INFO PREFIX "Mapper loaded\n");
+ return -ENODEV;
+ }
+
+ result = wmi_class_init();
+ if (result) {
+ acpi_bus_unregister_driver(&acpi_wmi_driver);
+ return result;
}
+ printk(KERN_INFO PREFIX "Mapper loaded\n");
+
return result;
}
@@ -721,6 +890,8 @@ static void __exit acpi_wmi_exit(void)
struct list_head *p, *tmp;
struct wmi_block *wblock;
+ wmi_class_exit();
+
acpi_bus_unregister_driver(&acpi_wmi_driver);
list_for_each_safe(p, tmp, &wmi_blocks.list) {
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index c3f1c8e9d25..68b0c04987e 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -310,8 +310,7 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
goto done;
}
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf);
if (!strnicmp(buf, "disable", 7)) {
retval = pnp_disable_dev(dev);
goto done;
@@ -353,19 +352,13 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
pnp_init_resources(dev);
mutex_lock(&pnp_res_mutex);
while (1) {
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf);
if (!strnicmp(buf, "io", 2)) {
- buf += 2;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 2);
start = simple_strtoul(buf, &buf, 0);
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf);
if (*buf == '-') {
- buf += 1;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 1);
end = simple_strtoul(buf, &buf, 0);
} else
end = start;
@@ -373,16 +366,11 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
continue;
}
if (!strnicmp(buf, "mem", 3)) {
- buf += 3;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf);
if (*buf == '-') {
- buf += 1;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 1);
end = simple_strtoul(buf, &buf, 0);
} else
end = start;
@@ -390,17 +378,13 @@ static ssize_t pnp_set_current_resources(struct device *dmdev,
continue;
}
if (!strnicmp(buf, "irq", 3)) {
- buf += 3;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
pnp_add_irq_resource(dev, start, 0);
continue;
}
if (!strnicmp(buf, "dma", 3)) {
- buf += 3;
- while (isspace(*buf))
- ++buf;
+ buf = skip_spaces(buf + 3);
start = simple_strtoul(buf, &buf, 0);
pnp_add_dma_resource(dev, start, 0);
continue;
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 83b8b5ac49c..5314bf630bc 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -80,7 +80,8 @@ static int pnpacpi_get_resources(struct pnp_dev *dev)
static int pnpacpi_set_resources(struct pnp_dev *dev)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
struct acpi_buffer buffer;
int ret;
@@ -103,7 +104,8 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
static int pnpacpi_disable_resources(struct pnp_dev *dev)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
int ret;
dev_dbg(&dev->dev, "disable resources\n");
@@ -121,6 +123,8 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
#ifdef CONFIG_ACPI_SLEEP
static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
{
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
int power_state;
power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
@@ -128,16 +132,19 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
power_state = (state.event == PM_EVENT_ON) ?
ACPI_STATE_D0 : ACPI_STATE_D3;
- return acpi_bus_set_power((acpi_handle) dev->data, power_state);
+ return acpi_bus_set_power(handle, power_state);
}
static int pnpacpi_resume(struct pnp_dev *dev)
{
- return acpi_bus_set_power((acpi_handle) dev->data, ACPI_STATE_D0);
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
+
+ return acpi_bus_set_power(handle, ACPI_STATE_D0);
}
#endif
-static struct pnp_protocol pnpacpi_protocol = {
+struct pnp_protocol pnpacpi_protocol = {
.name = "Plug and Play ACPI",
.get = pnpacpi_get_resources,
.set = pnpacpi_set_resources,
@@ -147,6 +154,7 @@ static struct pnp_protocol pnpacpi_protocol = {
.resume = pnpacpi_resume,
#endif
};
+EXPORT_SYMBOL(pnpacpi_protocol);
static int __init pnpacpi_add_device(struct acpi_device *device)
{
@@ -168,7 +176,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
if (!dev)
return -ENOMEM;
- dev->data = device->handle;
+ dev->data = device;
/* .enabled means the device can decode the resources */
dev->active = device->status.enabled;
status = acpi_get_handle(device->handle, "_SRS", &temp);
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index ef3a2cd3a7a..5702b2c8691 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -465,7 +465,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
acpi_status status;
pnp_dbg(&dev->dev, "parse allocated resources\n");
@@ -773,7 +774,8 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
acpi_status status;
struct acpipnp_parse_option_s parse_data;
@@ -845,7 +847,8 @@ static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
int pnpacpi_build_resource_template(struct pnp_dev *dev,
struct acpi_buffer *buffer)
{
- acpi_handle handle = dev->data;
+ struct acpi_device *acpi_dev = dev->data;
+ acpi_handle handle = acpi_dev->handle;
struct acpi_resource *resource;
int res_cnt = 0;
acpi_status status;
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index b35d921bac6..2d8ac43f78e 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/pnp.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <asm/uaccess.h>
@@ -33,42 +34,65 @@
static struct proc_dir_entry *proc_pnp = NULL;
static struct proc_dir_entry *proc_pnp_boot = NULL;
-static int proc_read_pnpconfig(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int pnpconfig_proc_show(struct seq_file *m, void *v)
{
struct pnp_isa_config_struc pnps;
if (pnp_bios_isapnp_config(&pnps))
return -EIO;
- return snprintf(buf, count,
- "structure_revision %d\n"
- "number_of_CSNs %d\n"
- "ISA_read_data_port 0x%x\n",
- pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
+ seq_printf(m, "structure_revision %d\n"
+ "number_of_CSNs %d\n"
+ "ISA_read_data_port 0x%x\n",
+ pnps.revision, pnps.no_csns, pnps.isa_rd_data_port);
+ return 0;
}
-static int proc_read_escdinfo(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int pnpconfig_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnpconfig_proc_show, NULL);
+}
+
+static const struct file_operations pnpconfig_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnpconfig_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int escd_info_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
if (pnp_bios_escd_info(&escd))
return -EIO;
- return snprintf(buf, count,
- "min_ESCD_write_size %d\n"
+ seq_printf(m, "min_ESCD_write_size %d\n"
"ESCD_size %d\n"
"NVRAM_base 0x%x\n",
escd.min_escd_write_size,
escd.escd_size, escd.nv_storage_base);
+ return 0;
}
+static int escd_info_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, escd_info_proc_show, NULL);
+}
+
+static const struct file_operations escd_info_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = escd_info_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#define MAX_SANE_ESCD_SIZE (32*1024)
-static int proc_read_escd(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int escd_proc_show(struct seq_file *m, void *v)
{
struct escd_info_struc escd;
char *tmpbuf;
- int escd_size, escd_left_to_read, n;
+ int escd_size;
if (pnp_bios_escd_info(&escd))
return -EIO;
@@ -76,7 +100,7 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
/* sanity check */
if (escd.escd_size > MAX_SANE_ESCD_SIZE) {
printk(KERN_ERR
- "PnPBIOS: proc_read_escd: ESCD size reported by BIOS escd_info call is too great\n");
+ "PnPBIOS: %s: ESCD size reported by BIOS escd_info call is too great\n", __func__);
return -EFBIG;
}
@@ -94,56 +118,75 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
/* sanity check */
if (escd_size > MAX_SANE_ESCD_SIZE) {
- printk(KERN_ERR "PnPBIOS: proc_read_escd: ESCD size reported by"
- " BIOS read_escd call is too great\n");
+ printk(KERN_ERR "PnPBIOS: %s: ESCD size reported by"
+ " BIOS read_escd call is too great\n", __func__);
kfree(tmpbuf);
return -EFBIG;
}
- escd_left_to_read = escd_size - pos;
- if (escd_left_to_read < 0)
- escd_left_to_read = 0;
- if (escd_left_to_read == 0)
- *eof = 1;
- n = min(count, escd_left_to_read);
- memcpy(buf, tmpbuf + pos, n);
+ seq_write(m, tmpbuf, escd_size);
kfree(tmpbuf);
- *start = buf;
- return n;
+ return 0;
}
-static int proc_read_legacyres(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static int escd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, escd_proc_show, NULL);
+}
+
+static const struct file_operations escd_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = escd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnp_legacyres_proc_show(struct seq_file *m, void *v)
{
- /* Assume that the following won't overflow the buffer */
- if (pnp_bios_get_stat_res(buf))
+ void *buf;
+
+ buf = kmalloc(65536, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (pnp_bios_get_stat_res(buf)) {
+ kfree(buf);
return -EIO;
+ }
+
+ seq_write(m, buf, 65536);
+ kfree(buf);
+ return 0;
+}
- return count; // FIXME: Return actual length
+static int pnp_legacyres_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnp_legacyres_proc_show, NULL);
}
-static int proc_read_devices(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static const struct file_operations pnp_legacyres_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnp_legacyres_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnp_devices_proc_show(struct seq_file *m, void *v)
{
struct pnp_bios_node *node;
u8 nodenum;
- char *p = buf;
-
- if (pos >= 0xff)
- return 0;
node = kzalloc(node_info.max_node_size, GFP_KERNEL);
if (!node)
return -ENOMEM;
- for (nodenum = pos; nodenum < 0xff;) {
+ for (nodenum = 0; nodenum < 0xff;) {
u8 thisnodenum = nodenum;
- /* 26 = the number of characters per line sprintf'ed */
- if ((p - buf + 26) > count)
- break;
+
if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
break;
- p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
+ seq_printf(m, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
node->handle, node->eisa_id,
node->type_code[0], node->type_code[1],
node->type_code[2], node->flags);
@@ -153,20 +196,29 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
"PnPBIOS: proc_read_devices:",
(unsigned int)nodenum,
(unsigned int)thisnodenum);
- *eof = 1;
break;
}
}
kfree(node);
- if (nodenum == 0xff)
- *eof = 1;
- *start = (char *)((off_t) nodenum - pos);
- return p - buf;
+ return 0;
+}
+
+static int pnp_devices_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnp_devices_proc_show, NULL);
}
-static int proc_read_node(char *buf, char **start, off_t pos,
- int count, int *eof, void *data)
+static const struct file_operations pnp_devices_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnp_devices_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pnpbios_proc_show(struct seq_file *m, void *v)
{
+ void *data = m->private;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
@@ -180,14 +232,20 @@ static int proc_read_node(char *buf, char **start, off_t pos,
return -EIO;
}
len = node->size - sizeof(struct pnp_bios_node);
- memcpy(buf, node->data, len);
+ seq_write(m, node->data, len);
kfree(node);
- return len;
+ return 0;
+}
+
+static int pnpbios_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pnpbios_proc_show, PDE(inode)->data);
}
-static int proc_write_node(struct file *file, const char __user * buf,
- unsigned long count, void *data)
+static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
{
+ void *data = PDE(file->f_path.dentry->d_inode)->data;
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
@@ -218,34 +276,33 @@ out:
return ret;
}
+static const struct file_operations pnpbios_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pnpbios_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pnpbios_proc_write,
+};
+
int pnpbios_interface_attach_device(struct pnp_bios_node *node)
{
char name[3];
- struct proc_dir_entry *ent;
sprintf(name, "%02x", node->handle);
if (!proc_pnp)
return -EIO;
if (!pnpbios_dont_use_current_config) {
- ent = create_proc_entry(name, 0, proc_pnp);
- if (ent) {
- ent->read_proc = proc_read_node;
- ent->write_proc = proc_write_node;
- ent->data = (void *)(long)(node->handle);
- }
+ proc_create_data(name, 0644, proc_pnp, &pnpbios_proc_fops,
+ (void *)(long)(node->handle));
}
if (!proc_pnp_boot)
return -EIO;
- ent = create_proc_entry(name, 0, proc_pnp_boot);
- if (ent) {
- ent->read_proc = proc_read_node;
- ent->write_proc = proc_write_node;
- ent->data = (void *)(long)(node->handle + 0x100);
+ if (proc_create_data(name, 0644, proc_pnp_boot, &pnpbios_proc_fops,
+ (void *)(long)(node->handle + 0x100)))
return 0;
- }
-
return -EIO;
}
@@ -262,14 +319,11 @@ int __init pnpbios_proc_init(void)
proc_pnp_boot = proc_mkdir("boot", proc_pnp);
if (!proc_pnp_boot)
return -EIO;
- create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL);
- create_proc_read_entry("configuration_info", 0, proc_pnp,
- proc_read_pnpconfig, NULL);
- create_proc_read_entry("escd_info", 0, proc_pnp, proc_read_escdinfo,
- NULL);
- create_proc_read_entry("escd", S_IRUSR, proc_pnp, proc_read_escd, NULL);
- create_proc_read_entry("legacy_device_resources", 0, proc_pnp,
- proc_read_legacyres, NULL);
+ proc_create("devices", 0, proc_pnp, &pnp_devices_proc_fops);
+ proc_create("configuration_info", 0, proc_pnp, &pnpconfig_proc_fops);
+ proc_create("escd_info", 0, proc_pnp, &escd_info_proc_fops);
+ proc_create("escd", S_IRUSR, proc_pnp, &escd_proc_fops);
+ proc_create("legacy_device_resources", 0, proc_pnp, &pnp_legacyres_proc_fops);
return 0;
}
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 8473fe5ed7f..dfbd5a6cc58 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -285,15 +285,10 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
* the PCI region, and that might prevent a PCI
* driver from requesting its resources.
*/
- dev_warn(&dev->dev, "%s resource "
- "(0x%llx-0x%llx) overlaps %s BAR %d "
- "(0x%llx-0x%llx), disabling\n",
- pnp_resource_type_name(res),
- (unsigned long long) pnp_start,
- (unsigned long long) pnp_end,
- pci_name(pdev), i,
- (unsigned long long) pci_start,
- (unsigned long long) pci_end);
+ dev_warn(&dev->dev,
+ "disabling %pR because it overlaps "
+ "%s BAR %d %pR\n", res,
+ pci_name(pdev), i, &pdev->resource[i]);
res->flags |= IORESOURCE_DISABLED;
}
}
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index ba976542788..64d0596bafb 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -517,7 +517,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
res->start = irq;
res->end = irq;
- pnp_dbg(&dev->dev, " add irq %d flags %#x\n", irq, flags);
+ pnp_dbg(&dev->dev, " add %pr\n", res);
return pnp_res;
}
@@ -538,7 +538,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
res->start = dma;
res->end = dma;
- pnp_dbg(&dev->dev, " add dma %d flags %#x\n", dma, flags);
+ pnp_dbg(&dev->dev, " add %pr\n", res);
return pnp_res;
}
@@ -562,8 +562,7 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- pnp_dbg(&dev->dev, " add io %#llx-%#llx flags %#x\n",
- (unsigned long long) start, (unsigned long long) end, flags);
+ pnp_dbg(&dev->dev, " add %pr\n", res);
return pnp_res;
}
@@ -587,8 +586,7 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
res->start = start;
res->end = end;
- pnp_dbg(&dev->dev, " add mem %#llx-%#llx flags %#x\n",
- (unsigned long long) start, (unsigned long long) end, flags);
+ pnp_dbg(&dev->dev, " add %pr\n", res);
return pnp_res;
}
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index 63087d5ce60..9585c1c1cc3 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -75,47 +75,14 @@ char *pnp_resource_type_name(struct resource *res)
void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
{
- char buf[128];
- int len;
struct pnp_resource *pnp_res;
- struct resource *res;
- if (list_empty(&dev->resources)) {
+ if (list_empty(&dev->resources))
pnp_dbg(&dev->dev, "%s: no current resources\n", desc);
- return;
- }
-
- pnp_dbg(&dev->dev, "%s: current resources:\n", desc);
- list_for_each_entry(pnp_res, &dev->resources, list) {
- res = &pnp_res->res;
- len = 0;
-
- len += scnprintf(buf + len, sizeof(buf) - len, " %-3s ",
- pnp_resource_type_name(res));
-
- if (res->flags & IORESOURCE_DISABLED) {
- pnp_dbg(&dev->dev, "%sdisabled\n", buf);
- continue;
- }
-
- switch (pnp_resource_type(res)) {
- case IORESOURCE_IO:
- case IORESOURCE_MEM:
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%#llx-%#llx flags %#lx",
- (unsigned long long) res->start,
- (unsigned long long) res->end,
- res->flags);
- break;
- case IORESOURCE_IRQ:
- case IORESOURCE_DMA:
- len += scnprintf(buf + len, sizeof(buf) - len,
- "%lld flags %#lx",
- (unsigned long long) res->start,
- res->flags);
- break;
- }
- pnp_dbg(&dev->dev, "%s\n", buf);
+ else {
+ pnp_dbg(&dev->dev, "%s: current resources:\n", desc);
+ list_for_each_entry(pnp_res, &dev->resources, list)
+ pnp_dbg(&dev->dev, "%pr\n", &pnp_res->res);
}
}
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 59b90922da8..49c1720df59 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -22,11 +22,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
{"", 0}
};
-static void reserve_range(struct pnp_dev *dev, resource_size_t start,
- resource_size_t end, int port)
+static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
{
char *regionid;
const char *pnpid = dev_name(&dev->dev);
+ resource_size_t start = r->start, end = r->end;
struct resource *res;
regionid = kmalloc(16, GFP_KERNEL);
@@ -48,10 +48,8 @@ static void reserve_range(struct pnp_dev *dev, resource_size_t start,
* example do reserve stuff they know about too, so we may well
* have double reservations.
*/
- dev_info(&dev->dev, "%s range 0x%llx-0x%llx %s reserved\n",
- port ? "ioport" : "iomem",
- (unsigned long long) start, (unsigned long long) end,
- res ? "has been" : "could not be");
+ dev_info(&dev->dev, "%pR %s reserved\n", r,
+ res ? "has been" : "could not be");
}
static void reserve_resources_of_dev(struct pnp_dev *dev)
@@ -77,14 +75,14 @@ static void reserve_resources_of_dev(struct pnp_dev *dev)
if (res->end < res->start)
continue; /* invalid */
- reserve_range(dev, res->start, res->end, 1);
+ reserve_range(dev, res, 1);
}
for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
if (res->flags & IORESOURCE_DISABLED)
continue;
- reserve_range(dev, res->start, res->end, 0);
+ reserve_range(dev, res, 0);
}
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 11867492551..d4b3d67f054 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -29,6 +29,13 @@ config APM_POWER
Say Y here to enable support APM status emulation using
battery class devices.
+config WM831X_BACKUP
+ tristate "WM831X backup battery charger support"
+ depends on MFD_WM831X
+ help
+ Say Y here to enable support for the backup battery charger
+ in the Wolfson Microelectronics WM831x PMICs.
+
config WM831X_POWER
tristate "WM831X PMU support"
depends on MFD_WM831X
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 356cdfd3c8b..573597c683b 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
obj-$(CONFIG_PDA_POWER) += pda_power.o
obj-$(CONFIG_APM_POWER) += apm_power.o
+obj-$(CONFIG_WM831X_BACKUP) += wm831x_backup.o
obj-$(CONFIG_WM831X_POWER) += wm831x_power.o
obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index e8b278f7178..ea3fdfaca90 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -29,15 +29,12 @@
struct pcf50633_mbc {
struct pcf50633 *pcf;
- int adapter_active;
int adapter_online;
- int usb_active;
int usb_online;
struct power_supply usb;
struct power_supply adapter;
-
- struct delayed_work charging_restart_work;
+ struct power_supply ac;
};
int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
@@ -47,16 +44,21 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
u8 bits;
int charging_start = 1;
u8 mbcs2, chgmod;
+ unsigned int mbcc5;
- if (ma >= 1000)
+ if (ma >= 1000) {
bits = PCF50633_MBCC7_USB_1000mA;
- else if (ma >= 500)
+ ma = 1000;
+ } else if (ma >= 500) {
bits = PCF50633_MBCC7_USB_500mA;
- else if (ma >= 100)
+ ma = 500;
+ } else if (ma >= 100) {
bits = PCF50633_MBCC7_USB_100mA;
- else {
+ ma = 100;
+ } else {
bits = PCF50633_MBCC7_USB_SUSPEND;
charging_start = 0;
+ ma = 0;
}
ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7,
@@ -66,21 +68,40 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
else
dev_info(pcf->dev, "usb curlim to %d mA\n", ma);
- /* Manual charging start */
- mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
+ /*
+ * We limit the charging current to be the USB current limit.
+ * The reason is that on pcf50633, when it enters PMU Standby mode,
+ * which it does when the device goes "off", the USB current limit
+ * reverts to the variant default. In at least one common case, that
+ * default is 500mA. By setting the charging current to be the same
+ * as the USB limit we set here before PMU standby, we enforce it only
+ * using the correct amount of current even when the USB current limit
+ * gets reset to the wrong thing
+ */
+
+ if (mbc->pcf->pdata->charger_reference_current_ma) {
+ mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma;
+ if (mbcc5 > 255)
+ mbcc5 = 255;
+ pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5);
+ }
+
+ mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
/* If chgmod == BATFULL, setting chgena has no effect.
- * We need to set resume instead.
+ * Datasheet says we need to set resume instead but when autoresume is
+ * used resume doesn't work. Clear and set chgena instead.
*/
if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
- else
+ else {
+ pcf50633_reg_clear_bits(pcf, PCF50633_REG_MBCC1,
+ PCF50633_MBCC1_CHGENA);
pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1,
- PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME);
-
- mbc->usb_active = charging_start;
+ PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA);
+ }
power_supply_changed(&mbc->usb);
@@ -92,20 +113,44 @@ int pcf50633_mbc_get_status(struct pcf50633 *pcf)
{
struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
int status = 0;
+ u8 chgmod;
+
+ if (!mbc)
+ return 0;
+
+ chgmod = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2)
+ & PCF50633_MBCS2_MBC_MASK;
if (mbc->usb_online)
status |= PCF50633_MBC_USB_ONLINE;
- if (mbc->usb_active)
+ if (chgmod == PCF50633_MBCS2_MBC_USB_PRE ||
+ chgmod == PCF50633_MBCS2_MBC_USB_PRE_WAIT ||
+ chgmod == PCF50633_MBCS2_MBC_USB_FAST ||
+ chgmod == PCF50633_MBCS2_MBC_USB_FAST_WAIT)
status |= PCF50633_MBC_USB_ACTIVE;
if (mbc->adapter_online)
status |= PCF50633_MBC_ADAPTER_ONLINE;
- if (mbc->adapter_active)
+ if (chgmod == PCF50633_MBCS2_MBC_ADP_PRE ||
+ chgmod == PCF50633_MBCS2_MBC_ADP_PRE_WAIT ||
+ chgmod == PCF50633_MBCS2_MBC_ADP_FAST ||
+ chgmod == PCF50633_MBCS2_MBC_ADP_FAST_WAIT)
status |= PCF50633_MBC_ADAPTER_ACTIVE;
return status;
}
EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status);
+int pcf50633_mbc_get_usb_online_status(struct pcf50633 *pcf)
+{
+ struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
+
+ if (!mbc)
+ return 0;
+
+ return mbc->usb_online;
+}
+EXPORT_SYMBOL_GPL(pcf50633_mbc_get_usb_online_status);
+
static ssize_t
show_chgmode(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -156,9 +201,55 @@ static ssize_t set_usblim(struct device *dev,
static DEVICE_ATTR(usb_curlim, S_IRUGO | S_IWUSR, show_usblim, set_usblim);
+static ssize_t
+show_chglim(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
+ u8 mbcc5 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC5);
+ unsigned int ma;
+
+ if (!mbc->pcf->pdata->charger_reference_current_ma)
+ return -ENODEV;
+
+ ma = (mbc->pcf->pdata->charger_reference_current_ma * mbcc5) >> 8;
+
+ return sprintf(buf, "%u\n", ma);
+}
+
+static ssize_t set_chglim(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pcf50633_mbc *mbc = dev_get_drvdata(dev);
+ unsigned long ma;
+ unsigned int mbcc5;
+ int ret;
+
+ if (!mbc->pcf->pdata->charger_reference_current_ma)
+ return -ENODEV;
+
+ ret = strict_strtoul(buf, 10, &ma);
+ if (ret)
+ return -EINVAL;
+
+ mbcc5 = (ma << 8) / mbc->pcf->pdata->charger_reference_current_ma;
+ if (mbcc5 > 255)
+ mbcc5 = 255;
+ pcf50633_reg_write(mbc->pcf, PCF50633_REG_MBCC5, mbcc5);
+
+ return count;
+}
+
+/*
+ * This attribute allows to change MBC charging limit on the fly
+ * independently of usb current limit. It also gets set automatically every
+ * time usb current limit is changed.
+ */
+static DEVICE_ATTR(chg_curlim, S_IRUGO | S_IWUSR, show_chglim, set_chglim);
+
static struct attribute *pcf50633_mbc_sysfs_entries[] = {
&dev_attr_chgmode.attr,
&dev_attr_usb_curlim.attr,
+ &dev_attr_chg_curlim.attr,
NULL,
};
@@ -167,76 +258,26 @@ static struct attribute_group mbc_attr_group = {
.attrs = pcf50633_mbc_sysfs_entries,
};
-/* MBC state machine switches into charging mode when the battery voltage
- * falls below 96% of a battery float voltage. But the voltage drop in Li-ion
- * batteries is marginal(1~2 %) till about 80% of its capacity - which means,
- * after a BATFULL, charging won't be restarted until 80%.
- *
- * This work_struct function restarts charging at regular intervals to make
- * sure we don't discharge too much
- */
-
-static void pcf50633_mbc_charging_restart(struct work_struct *work)
-{
- struct pcf50633_mbc *mbc;
- u8 mbcs2, chgmod;
-
- mbc = container_of(work, struct pcf50633_mbc,
- charging_restart_work.work);
-
- mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
- chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
-
- if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL)
- return;
-
- /* Restart charging */
- pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1,
- PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME);
- mbc->usb_active = 1;
- power_supply_changed(&mbc->usb);
-
- dev_info(mbc->pcf->dev, "Charging restarted\n");
-}
-
static void
pcf50633_mbc_irq_handler(int irq, void *data)
{
struct pcf50633_mbc *mbc = data;
- int chg_restart_interval =
- mbc->pcf->pdata->charging_restart_interval;
/* USB */
if (irq == PCF50633_IRQ_USBINS) {
mbc->usb_online = 1;
} else if (irq == PCF50633_IRQ_USBREM) {
mbc->usb_online = 0;
- mbc->usb_active = 0;
pcf50633_mbc_usb_curlim_set(mbc->pcf, 0);
- cancel_delayed_work_sync(&mbc->charging_restart_work);
}
/* Adapter */
- if (irq == PCF50633_IRQ_ADPINS) {
+ if (irq == PCF50633_IRQ_ADPINS)
mbc->adapter_online = 1;
- mbc->adapter_active = 1;
- } else if (irq == PCF50633_IRQ_ADPREM) {
+ else if (irq == PCF50633_IRQ_ADPREM)
mbc->adapter_online = 0;
- mbc->adapter_active = 0;
- }
-
- if (irq == PCF50633_IRQ_BATFULL) {
- mbc->usb_active = 0;
- mbc->adapter_active = 0;
-
- if (chg_restart_interval > 0)
- schedule_delayed_work(&mbc->charging_restart_work,
- chg_restart_interval);
- } else if (irq == PCF50633_IRQ_USBLIMON)
- mbc->usb_active = 0;
- else if (irq == PCF50633_IRQ_USBLIMOFF)
- mbc->usb_active = 1;
+ power_supply_changed(&mbc->ac);
power_supply_changed(&mbc->usb);
power_supply_changed(&mbc->adapter);
@@ -269,10 +310,34 @@ static int usb_get_property(struct power_supply *psy,
{
struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, usb);
int ret = 0;
+ u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
+ PCF50633_MBCC7_USB_MASK;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = mbc->usb_online;
+ val->intval = mbc->usb_online &&
+ (usblim <= PCF50633_MBCC7_USB_500mA);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct pcf50633_mbc *mbc = container_of(psy, struct pcf50633_mbc, ac);
+ int ret = 0;
+ u8 usblim = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCC7) &
+ PCF50633_MBCC7_USB_MASK;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = mbc->usb_online &&
+ (usblim == PCF50633_MBCC7_USB_1000mA);
break;
default:
ret = -EINVAL;
@@ -303,7 +368,6 @@ static const u8 mbc_irq_handlers[] = {
static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
{
struct pcf50633_mbc *mbc;
- struct pcf50633_subdev_pdata *pdata = pdev->dev.platform_data;
int ret;
int i;
u8 mbcs1;
@@ -313,7 +377,7 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, mbc);
- mbc->pcf = pdata->pcf;
+ mbc->pcf = dev_to_pcf50633(pdev->dev.parent);
/* Set up IRQ handlers */
for (i = 0; i < ARRAY_SIZE(mbc_irq_handlers); i++)
@@ -337,6 +401,14 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
mbc->usb.supplied_to = mbc->pcf->pdata->batteries;
mbc->usb.num_supplicants = mbc->pcf->pdata->num_batteries;
+ mbc->ac.name = "ac";
+ mbc->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ mbc->ac.properties = power_props;
+ mbc->ac.num_properties = ARRAY_SIZE(power_props);
+ mbc->ac.get_property = ac_get_property;
+ mbc->ac.supplied_to = mbc->pcf->pdata->batteries;
+ mbc->ac.num_supplicants = mbc->pcf->pdata->num_batteries;
+
ret = power_supply_register(&pdev->dev, &mbc->adapter);
if (ret) {
dev_err(mbc->pcf->dev, "failed to register adapter\n");
@@ -352,8 +424,14 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev)
return ret;
}
- INIT_DELAYED_WORK(&mbc->charging_restart_work,
- pcf50633_mbc_charging_restart);
+ ret = power_supply_register(&pdev->dev, &mbc->ac);
+ if (ret) {
+ dev_err(mbc->pcf->dev, "failed to register ac\n");
+ power_supply_unregister(&mbc->adapter);
+ power_supply_unregister(&mbc->usb);
+ kfree(mbc);
+ return ret;
+ }
ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group);
if (ret)
@@ -379,8 +457,7 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev)
power_supply_unregister(&mbc->usb);
power_supply_unregister(&mbc->adapter);
-
- cancel_delayed_work_sync(&mbc->charging_restart_work);
+ power_supply_unregister(&mbc->ac);
kfree(mbc);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 08144393d64..c790e0c77d4 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -65,7 +65,10 @@ static ssize_t power_supply_show_property(struct device *dev,
ret = psy->get_property(psy, off, &value);
if (ret < 0) {
- if (ret != -ENODEV)
+ if (ret == -ENODATA)
+ dev_dbg(dev, "driver has no data for `%s' property\n",
+ attr->attr.name);
+ else if (ret != -ENODEV)
dev_err(dev, "driver failed to report `%s' property\n",
attr->attr.name);
return ret;
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
new file mode 100644
index 00000000000..bf4f387a800
--- /dev/null
+++ b/drivers/power/wm831x_backup.c
@@ -0,0 +1,233 @@
+/*
+ * Backup battery driver for Wolfson Microelectronics wm831x PMICs
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/auxadc.h>
+#include <linux/mfd/wm831x/pmu.h>
+#include <linux/mfd/wm831x/pdata.h>
+
+struct wm831x_backup {
+ struct wm831x *wm831x;
+ struct power_supply backup;
+};
+
+static int wm831x_backup_read_voltage(struct wm831x *wm831x,
+ enum wm831x_auxadc src,
+ union power_supply_propval *val)
+{
+ int ret;
+
+ ret = wm831x_auxadc_read_uv(wm831x, src);
+ if (ret >= 0)
+ val->intval = ret;
+
+ return ret;
+}
+
+/*********************************************************************
+ * Backup supply properties
+ *********************************************************************/
+
+static void wm831x_config_backup(struct wm831x *wm831x)
+{
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
+ struct wm831x_backup_pdata *pdata;
+ int ret, reg;
+
+ if (!wm831x_pdata || !wm831x_pdata->backup) {
+ dev_warn(wm831x->dev,
+ "No backup battery charger configuration\n");
+ return;
+ }
+
+ pdata = wm831x_pdata->backup;
+
+ reg = 0;
+
+ if (pdata->charger_enable)
+ reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA;
+ if (pdata->no_constant_voltage)
+ reg |= WM831X_BKUP_CHG_MODE;
+
+ switch (pdata->vlim) {
+ case 2500:
+ break;
+ case 3100:
+ reg |= WM831X_BKUP_CHG_VLIM;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n",
+ pdata->vlim);
+ }
+
+ switch (pdata->ilim) {
+ case 100:
+ break;
+ case 200:
+ reg |= 1;
+ break;
+ case 300:
+ reg |= 2;
+ break;
+ case 400:
+ reg |= 3;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid backup current limit %duA\n",
+ pdata->ilim);
+ }
+
+ ret = wm831x_reg_unlock(wm831x);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
+ return;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL,
+ WM831X_BKUP_CHG_ENA_MASK |
+ WM831X_BKUP_CHG_MODE_MASK |
+ WM831X_BKUP_BATT_DET_ENA_MASK |
+ WM831X_BKUP_CHG_VLIM_MASK |
+ WM831X_BKUP_CHG_ILIM_MASK,
+ reg);
+ if (ret != 0)
+ dev_err(wm831x->dev,
+ "Failed to set backup charger config: %d\n", ret);
+
+ wm831x_reg_lock(wm831x);
+}
+
+static int wm831x_backup_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wm831x_backup *devdata = dev_get_drvdata(psy->dev->parent);
+ struct wm831x *wm831x = devdata->wm831x;
+ int ret = 0;
+
+ ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (ret & WM831X_BKUP_CHG_STS)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = wm831x_backup_read_voltage(wm831x, WM831X_AUX_BKUP_BATT,
+ val);
+ break;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ if (ret & WM831X_BKUP_CHG_STS)
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property wm831x_backup_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_PRESENT,
+};
+
+/*********************************************************************
+ * Initialisation
+ *********************************************************************/
+
+static __devinit int wm831x_backup_probe(struct platform_device *pdev)
+{
+ struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_backup *devdata;
+ struct power_supply *backup;
+ int ret;
+
+ devdata = kzalloc(sizeof(struct wm831x_backup), GFP_KERNEL);
+ if (devdata == NULL)
+ return -ENOMEM;
+
+ devdata->wm831x = wm831x;
+ platform_set_drvdata(pdev, devdata);
+
+ backup = &devdata->backup;
+
+ /* We ignore configuration failures since we can still read
+ * back the status without enabling the charger (which may
+ * already be enabled anyway).
+ */
+ wm831x_config_backup(wm831x);
+
+ backup->name = "wm831x-backup";
+ backup->type = POWER_SUPPLY_TYPE_BATTERY;
+ backup->properties = wm831x_backup_props;
+ backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
+ backup->get_property = wm831x_backup_get_prop;
+ ret = power_supply_register(&pdev->dev, backup);
+ if (ret)
+ goto err_kmalloc;
+
+ return ret;
+
+err_kmalloc:
+ kfree(devdata);
+ return ret;
+}
+
+static __devexit int wm831x_backup_remove(struct platform_device *pdev)
+{
+ struct wm831x_backup *devdata = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&devdata->backup);
+ kfree(devdata);
+
+ return 0;
+}
+
+static struct platform_driver wm831x_backup_driver = {
+ .probe = wm831x_backup_probe,
+ .remove = __devexit_p(wm831x_backup_remove),
+ .driver = {
+ .name = "wm831x-backup",
+ },
+};
+
+static int __init wm831x_backup_init(void)
+{
+ return platform_driver_register(&wm831x_backup_driver);
+}
+module_init(wm831x_backup_init);
+
+static void __exit wm831x_backup_exit(void)
+{
+ platform_driver_unregister(&wm831x_backup_driver);
+}
+module_exit(wm831x_backup_exit);
+
+MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm831x-backup");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 2a4c8b0b829..f85e80b1b40 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -21,7 +21,6 @@
struct wm831x_power {
struct wm831x *wm831x;
struct power_supply wall;
- struct power_supply backup;
struct power_supply usb;
struct power_supply battery;
};
@@ -454,125 +453,6 @@ static irqreturn_t wm831x_bat_irq(int irq, void *data)
/*********************************************************************
- * Backup supply properties
- *********************************************************************/
-
-static void wm831x_config_backup(struct wm831x *wm831x)
-{
- struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
- struct wm831x_backup_pdata *pdata;
- int ret, reg;
-
- if (!wm831x_pdata || !wm831x_pdata->backup) {
- dev_warn(wm831x->dev,
- "No backup battery charger configuration\n");
- return;
- }
-
- pdata = wm831x_pdata->backup;
-
- reg = 0;
-
- if (pdata->charger_enable)
- reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA;
- if (pdata->no_constant_voltage)
- reg |= WM831X_BKUP_CHG_MODE;
-
- switch (pdata->vlim) {
- case 2500:
- break;
- case 3100:
- reg |= WM831X_BKUP_CHG_VLIM;
- break;
- default:
- dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n",
- pdata->vlim);
- }
-
- switch (pdata->ilim) {
- case 100:
- break;
- case 200:
- reg |= 1;
- break;
- case 300:
- reg |= 2;
- break;
- case 400:
- reg |= 3;
- break;
- default:
- dev_err(wm831x->dev, "Invalid backup current limit %duA\n",
- pdata->ilim);
- }
-
- ret = wm831x_reg_unlock(wm831x);
- if (ret != 0) {
- dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret);
- return;
- }
-
- ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL,
- WM831X_BKUP_CHG_ENA_MASK |
- WM831X_BKUP_CHG_MODE_MASK |
- WM831X_BKUP_BATT_DET_ENA_MASK |
- WM831X_BKUP_CHG_VLIM_MASK |
- WM831X_BKUP_CHG_ILIM_MASK,
- reg);
- if (ret != 0)
- dev_err(wm831x->dev,
- "Failed to set backup charger config: %d\n", ret);
-
- wm831x_reg_lock(wm831x);
-}
-
-static int wm831x_backup_get_prop(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent);
- struct wm831x *wm831x = wm831x_power->wm831x;
- int ret = 0;
-
- ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL);
- if (ret < 0)
- return ret;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- if (ret & WM831X_BKUP_CHG_STS)
- val->intval = POWER_SUPPLY_STATUS_CHARGING;
- else
- val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
- break;
-
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BKUP_BATT,
- val);
- break;
-
- case POWER_SUPPLY_PROP_PRESENT:
- if (ret & WM831X_BKUP_CHG_STS)
- val->intval = 1;
- else
- val->intval = 0;
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static enum power_supply_property wm831x_backup_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_PRESENT,
-};
-
-/*********************************************************************
* Initialisation
*********************************************************************/
@@ -595,10 +475,7 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
dev_dbg(wm831x->dev, "Power source changed\n");
- /* Just notify for everything - little harm in overnotifying.
- * The backup battery is not a power source while the system
- * is running so skip that.
- */
+ /* Just notify for everything - little harm in overnotifying. */
power_supply_changed(&wm831x_power->battery);
power_supply_changed(&wm831x_power->usb);
power_supply_changed(&wm831x_power->wall);
@@ -613,7 +490,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
struct power_supply *usb;
struct power_supply *battery;
struct power_supply *wall;
- struct power_supply *backup;
int ret, irq, i;
power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL);
@@ -626,13 +502,11 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
usb = &power->usb;
battery = &power->battery;
wall = &power->wall;
- backup = &power->backup;
/* We ignore configuration failures since we can still read back
- * the status without enabling either of the chargers.
+ * the status without enabling the charger.
*/
wm831x_config_battery(wm831x);
- wm831x_config_backup(wm831x);
wall->name = "wm831x-wall";
wall->type = POWER_SUPPLY_TYPE_MAINS;
@@ -661,15 +535,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_battery;
- backup->name = "wm831x-backup";
- backup->type = POWER_SUPPLY_TYPE_BATTERY;
- backup->properties = wm831x_backup_props;
- backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
- backup->get_property = wm831x_backup_get_prop;
- ret = power_supply_register(&pdev->dev, backup);
- if (ret)
- goto err_usb;
-
irq = platform_get_irq_byname(pdev, "SYSLO");
ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq,
IRQF_TRIGGER_RISING, "SYSLO",
@@ -677,7 +542,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
irq, ret);
- goto err_backup;
+ goto err_usb;
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
@@ -716,8 +581,6 @@ err_bat_irq:
err_syslo:
irq = platform_get_irq_byname(pdev, "SYSLO");
wm831x_free_irq(wm831x, irq, power);
-err_backup:
- power_supply_unregister(backup);
err_usb:
power_supply_unregister(usb);
err_battery:
@@ -746,7 +609,6 @@ static __devexit int wm831x_power_remove(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, "SYSLO");
wm831x_free_irq(wm831x, irq, wm831x_power);
- power_supply_unregister(&wm831x_power->backup);
power_supply_unregister(&wm831x_power->battery);
power_supply_unregister(&wm831x_power->wall);
power_supply_unregister(&wm831x_power->usb);
diff --git a/drivers/power/wm8350_power.c b/drivers/power/wm8350_power.c
index 28b0299c004..ad4f071e128 100644
--- a/drivers/power/wm8350_power.c
+++ b/drivers/power/wm8350_power.c
@@ -184,8 +184,9 @@ static ssize_t charger_state_show(struct device *dev,
static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL);
-static void wm8350_charger_handler(struct wm8350 *wm8350, int irq, void *data)
+static irqreturn_t wm8350_charger_handler(int irq, void *data)
{
+ struct wm8350 *wm8350 = data;
struct wm8350_power *power = &wm8350->power;
struct wm8350_charger_policy *policy = power->policy;
@@ -238,6 +239,8 @@ static void wm8350_charger_handler(struct wm8350 *wm8350, int irq, void *data)
default:
dev_err(wm8350->dev, "Unknown interrupt %d\n", irq);
}
+
+ return IRQ_HANDLED;
}
/*********************************************************************
@@ -387,73 +390,55 @@ static void wm8350_init_charger(struct wm8350 *wm8350)
{
/* register our interest in charger events */
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT);
+ wm8350_charger_handler, 0, "Battery hot", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD);
+ wm8350_charger_handler, 0, "Battery cold", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL);
+ wm8350_charger_handler, 0, "Battery fail", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_TO);
+ wm8350_charger_handler, 0,
+ "Charger timeout", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_END);
+ wm8350_charger_handler, 0,
+ "Charge end", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_START);
+ wm8350_charger_handler, 0,
+ "Charge start", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY);
+ wm8350_charger_handler, 0,
+ "Fast charge ready", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9);
+ wm8350_charger_handler, 0,
+ "Battery <3.9V", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1);
+ wm8350_charger_handler, 0,
+ "Battery <3.1V", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85);
+ wm8350_charger_handler, 0,
+ "Battery <2.85V", wm8350);
/* and supply change events */
wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_USB_FB);
+ wm8350_charger_handler, 0, "USB", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_WALL_FB);
+ wm8350_charger_handler, 0, "Wall", wm8350);
wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB,
- wm8350_charger_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_EXT_BAT_FB);
+ wm8350_charger_handler, 0, "Battery", wm8350);
}
static void free_charger_irq(struct wm8350 *wm8350)
{
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_TO);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_END);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_START);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85);
- wm8350_mask_irq(wm8350, WM8350_IRQ_EXT_USB_FB);
wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB);
- wm8350_mask_irq(wm8350, WM8350_IRQ_EXT_WALL_FB);
wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB);
- wm8350_mask_irq(wm8350, WM8350_IRQ_EXT_BAT_FB);
wm8350_free_irq(wm8350, WM8350_IRQ_EXT_BAT_FB);
}
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index f2bfd296dba..fa39e759a27 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -157,7 +157,7 @@ static int wm97xx_bat_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops wm97xx_bat_pm_ops = {
+static const struct dev_pm_ops wm97xx_bat_pm_ops = {
.suspend = wm97xx_bat_suspend,
.resume = wm97xx_bat_resume,
};
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
new file mode 100644
index 00000000000..04719551381
--- /dev/null
+++ b/drivers/regulator/88pm8607.c
@@ -0,0 +1,685 @@
+/*
+ * Regulators driver for Marvell 88PM8607
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/88pm8607.h>
+
+struct pm8607_regulator_info {
+ struct regulator_desc desc;
+ struct pm8607_chip *chip;
+ struct regulator_dev *regulator;
+
+ int min_uV;
+ int max_uV;
+ int step_uV;
+ int vol_reg;
+ int vol_shift;
+ int vol_nbits;
+ int update_reg;
+ int update_bit;
+ int enable_reg;
+ int enable_bit;
+ int slope_double;
+};
+
+static inline int check_range(struct pm8607_regulator_info *info,
+ int min_uV, int max_uV)
+{
+ if (max_uV < info->min_uV || min_uV > info->max_uV || min_uV > max_uV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ uint8_t chip_id = info->chip->chip_id;
+ int ret = -EINVAL;
+
+ switch (info->desc.id) {
+ case PM8607_ID_BUCK1:
+ ret = (index < 0x1d) ? (index * 25000 + 800000) :
+ ((index < 0x20) ? 1500000 :
+ ((index < 0x40) ? ((index - 0x20) * 25000) :
+ -EINVAL));
+ break;
+ case PM8607_ID_BUCK3:
+ ret = (index < 0x3d) ? (index * 25000) :
+ ((index < 0x40) ? 1500000 : -EINVAL);
+ if (ret < 0)
+ break;
+ if (info->slope_double)
+ ret <<= 1;
+ break;
+ case PM8607_ID_LDO1:
+ ret = (index == 0) ? 1800000 :
+ ((index == 1) ? 1200000 :
+ ((index == 2) ? 2800000 : -EINVAL));
+ break;
+ case PM8607_ID_LDO5:
+ ret = (index == 0) ? 2900000 :
+ ((index == 1) ? 3000000 :
+ ((index == 2) ? 3100000 : 3300000));
+ break;
+ case PM8607_ID_LDO7:
+ case PM8607_ID_LDO8:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ -EINVAL);
+ break;
+ case PM8607_ID_LDO12:
+ ret = (index < 2) ? (index * 100000 + 1800000) :
+ ((index < 7) ? (index * 100000 + 2500000) :
+ ((index == 7) ? 3300000 : 1200000));
+ break;
+ case PM8607_ID_LDO2:
+ case PM8607_ID_LDO3:
+ case PM8607_ID_LDO9:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ -EINVAL);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2550000) :
+ 3300000);
+ break;
+ }
+ break;
+ case PM8607_ID_LDO4:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ -EINVAL);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 6) ? (index * 50000 + 2550000) :
+ ((index == 6) ? 2900000 : 3300000));
+ break;
+ }
+ break;
+ case PM8607_ID_LDO6:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2450000) :
+ -EINVAL);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 2) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2500000) :
+ 3300000);
+ break;
+ }
+ break;
+ case PM8607_ID_LDO10:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ 1200000);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2550000) :
+ ((index == 7) ? 3300000 : 1200000));
+ break;
+ }
+ break;
+ case PM8607_ID_LDO14:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 8) ? (index * 50000 + 2550000) :
+ -EINVAL);
+ break;
+ case PM8607_CHIP_B0:
+ ret = (index < 2) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2600000) :
+ 3300000);
+ break;
+ }
+ break;
+ }
+ return ret;
+}
+
+static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ uint8_t chip_id = info->chip->chip_id;
+ int val = -ENOENT;
+ int ret;
+
+ switch (info->desc.id) {
+ case PM8607_ID_BUCK1:
+ if (min_uV >= 800000) /* 800mV ~ 1500mV / 25mV */
+ val = (min_uV - 775001) / 25000;
+ else { /* 25mV ~ 775mV / 25mV */
+ val = (min_uV + 249999) / 25000;
+ val += 32;
+ }
+ break;
+ case PM8607_ID_BUCK3:
+ if (info->slope_double)
+ min_uV = min_uV >> 1;
+ val = (min_uV + 249999) / 25000; /* 0mV ~ 1500mV / 25mV */
+
+ break;
+ case PM8607_ID_LDO1:
+ if (min_uV > 1800000)
+ val = 2;
+ else if (min_uV > 1200000)
+ val = 0;
+ else
+ val = 1;
+ break;
+ case PM8607_ID_LDO5:
+ if (min_uV > 3100000)
+ val = 3;
+ else /* 2900mV ~ 3100mV / 100mV */
+ val = (min_uV - 2800001) / 100000;
+ break;
+ case PM8607_ID_LDO7:
+ case PM8607_ID_LDO8:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0; /* 1800mv */
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_ID_LDO10:
+ if (min_uV > 2850000)
+ val = 7;
+ else if (min_uV <= 1200000)
+ val = 8;
+ else if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
+ val = (min_uV - 1750001) / 50000;
+ else { /* 2700mV ~ 2850mV / 50mV */
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ }
+ break;
+ case PM8607_ID_LDO12:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 100mV */
+ if (min_uV <= 1200000)
+ val = 8; /* 1200mV */
+ else if (min_uV <= 1800000)
+ val = 0; /* 1800mV */
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1700001) / 100000;
+ else
+ val = 2; /* 2700mV */
+ } else { /* 2700mV ~ 3100mV / 100mV */
+ if (min_uV <= 3100000) {
+ val = (min_uV - 2600001) / 100000;
+ val += 2;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_ID_LDO2:
+ case PM8607_ID_LDO3:
+ case PM8607_ID_LDO9:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_CHIP_B0:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2850mV / 50mV */
+ if (min_uV <= 2850000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ }
+ break;
+ case PM8607_ID_LDO4:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_CHIP_B0:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2800mV / 50mV */
+ if (min_uV <= 2850000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else if (min_uV <= 2900000)
+ val = 6;
+ else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ }
+ break;
+ case PM8607_ID_LDO6:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ if (min_uV < 2600000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2600mV */
+ } else { /* 2600mV ~ 2800mV / 50mV */
+ if (min_uV <= 2800000) {
+ val = (min_uV - 2550001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_CHIP_B0:
+ if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1850000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 2; /* 2600mV */
+ } else { /* 2600mV ~ 2800mV / 50mV */
+ if (min_uV <= 2800000) {
+ val = (min_uV - 2550001) / 50000;
+ val += 2;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ }
+ break;
+ case PM8607_ID_LDO14:
+ switch (chip_id) {
+ case PM8607_CHIP_A0:
+ case PM8607_CHIP_A1:
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else
+ val = -EINVAL;
+ }
+ break;
+ case PM8607_CHIP_B0:
+ if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1850000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 2; /* 2700mV */
+ } else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 2;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
+ }
+ break;
+ }
+ break;
+ }
+ if (val >= 0) {
+ ret = pm8607_list_voltage(rdev, val);
+ if (ret > max_uV) {
+ pr_err("exceed voltage range (%d %d) uV",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ } else
+ pr_err("invalid voltage range (%d %d) uV", min_uV, max_uV);
+ return val;
+}
+
+static int pm8607_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+ uint8_t val, mask;
+ int ret;
+
+ if (check_range(info, min_uV, max_uV)) {
+ pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ ret = choose_voltage(rdev, min_uV, max_uV);
+ if (ret < 0)
+ return -EINVAL;
+ val = (uint8_t)(ret << info->vol_shift);
+ mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
+
+ ret = pm8607_set_bits(chip, info->vol_reg, mask, val);
+ if (ret)
+ return ret;
+ switch (info->desc.id) {
+ case PM8607_ID_BUCK1:
+ case PM8607_ID_BUCK3:
+ ret = pm8607_set_bits(chip, info->update_reg,
+ 1 << info->update_bit,
+ 1 << info->update_bit);
+ break;
+ }
+ return ret;
+}
+
+static int pm8607_get_voltage(struct regulator_dev *rdev)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+ uint8_t val, mask;
+ int ret;
+
+ ret = pm8607_reg_read(chip, info->vol_reg);
+ if (ret < 0)
+ return ret;
+
+ mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
+ val = ((unsigned char)ret & mask) >> info->vol_shift;
+
+ return pm8607_list_voltage(rdev, val);
+}
+
+static int pm8607_enable(struct regulator_dev *rdev)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+
+ return pm8607_set_bits(chip, info->enable_reg,
+ 1 << info->enable_bit,
+ 1 << info->enable_bit);
+}
+
+static int pm8607_disable(struct regulator_dev *rdev)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+
+ return pm8607_set_bits(chip, info->enable_reg,
+ 1 << info->enable_bit, 0);
+}
+
+static int pm8607_is_enabled(struct regulator_dev *rdev)
+{
+ struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
+ struct pm8607_chip *chip = info->chip;
+ int ret;
+
+ ret = pm8607_reg_read(chip, info->enable_reg);
+ if (ret < 0)
+ return ret;
+
+ return !!((unsigned char)ret & (1 << info->enable_bit));
+}
+
+static struct regulator_ops pm8607_regulator_ops = {
+ .set_voltage = pm8607_set_voltage,
+ .get_voltage = pm8607_get_voltage,
+ .enable = pm8607_enable,
+ .disable = pm8607_disable,
+ .is_enabled = pm8607_is_enabled,
+};
+
+#define PM8607_DVC(_id, min, max, step, vreg, nbits, ureg, ubit, ereg, ebit) \
+{ \
+ .desc = { \
+ .name = "BUCK" #_id, \
+ .ops = &pm8607_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM8607_ID_BUCK##_id, \
+ .owner = THIS_MODULE, \
+ }, \
+ .min_uV = (min) * 1000, \
+ .max_uV = (max) * 1000, \
+ .step_uV = (step) * 1000, \
+ .vol_reg = PM8607_##vreg, \
+ .vol_shift = (0), \
+ .vol_nbits = (nbits), \
+ .update_reg = PM8607_##ureg, \
+ .update_bit = (ubit), \
+ .enable_reg = PM8607_##ereg, \
+ .enable_bit = (ebit), \
+ .slope_double = (0), \
+}
+
+#define PM8607_LDO(_id, min, max, step, vreg, shift, nbits, ereg, ebit) \
+{ \
+ .desc = { \
+ .name = "LDO" #_id, \
+ .ops = &pm8607_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM8607_ID_LDO##_id, \
+ .owner = THIS_MODULE, \
+ }, \
+ .min_uV = (min) * 1000, \
+ .max_uV = (max) * 1000, \
+ .step_uV = (step) * 1000, \
+ .vol_reg = PM8607_##vreg, \
+ .vol_shift = (shift), \
+ .vol_nbits = (nbits), \
+ .enable_reg = PM8607_##ereg, \
+ .enable_bit = (ebit), \
+ .slope_double = (0), \
+}
+
+static struct pm8607_regulator_info pm8607_regulator_info[] = {
+ PM8607_DVC(1, 0, 1500, 25, BUCK1, 6, GO, 0, SUPPLIES_EN11, 0),
+ PM8607_DVC(3, 0, 1500, 25, BUCK3, 6, GO, 2, SUPPLIES_EN11, 2),
+
+ PM8607_LDO(1 , 1200, 2800, 0, LDO1 , 0, 2, SUPPLIES_EN11, 3),
+ PM8607_LDO(2 , 1800, 3300, 0, LDO2 , 0, 3, SUPPLIES_EN11, 4),
+ PM8607_LDO(3 , 1800, 3300, 0, LDO3 , 0, 3, SUPPLIES_EN11, 5),
+ PM8607_LDO(4 , 1800, 3300, 0, LDO4 , 0, 3, SUPPLIES_EN11, 6),
+ PM8607_LDO(5 , 2900, 3300, 0, LDO5 , 0, 2, SUPPLIES_EN11, 7),
+ PM8607_LDO(6 , 1800, 3300, 0, LDO6 , 0, 3, SUPPLIES_EN12, 0),
+ PM8607_LDO(7 , 1800, 2900, 0, LDO7 , 0, 3, SUPPLIES_EN12, 1),
+ PM8607_LDO(8 , 1800, 2900, 0, LDO8 , 0, 3, SUPPLIES_EN12, 2),
+ PM8607_LDO(9 , 1800, 3300, 0, LDO9 , 0, 3, SUPPLIES_EN12, 3),
+ PM8607_LDO(10, 1200, 3300, 0, LDO10, 0, 4, SUPPLIES_EN11, 4),
+ PM8607_LDO(12, 1200, 3300, 0, LDO12, 0, 4, SUPPLIES_EN11, 5),
+ PM8607_LDO(14, 1800, 3300, 0, LDO14, 0, 3, SUPPLIES_EN11, 6),
+};
+
+static inline struct pm8607_regulator_info *find_regulator_info(int id)
+{
+ struct pm8607_regulator_info *info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
+ info = &pm8607_regulator_info[i];
+ if (info->desc.id == id)
+ return info;
+ }
+ return NULL;
+}
+
+static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
+{
+ struct pm8607_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm8607_platform_data *pdata = chip->dev->platform_data;
+ struct pm8607_regulator_info *info = NULL;
+
+ info = find_regulator_info(pdev->id);
+ if (info == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+ }
+
+ info->chip = chip;
+
+ info->regulator = regulator_register(&info->desc, &pdev->dev,
+ pdata->regulator[pdev->id], info);
+ if (IS_ERR(info->regulator)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ info->desc.name);
+ return PTR_ERR(info->regulator);
+ }
+
+ /* check DVC ramp slope double */
+ if (info->desc.id == PM8607_ID_BUCK3)
+ if (info->chip->buck3_double)
+ info->slope_double = 1;
+
+ platform_set_drvdata(pdev, info);
+ return 0;
+}
+
+static int __devexit pm8607_regulator_remove(struct platform_device *pdev)
+{
+ struct pm8607_regulator_info *info = platform_get_drvdata(pdev);
+
+ regulator_unregister(info->regulator);
+ return 0;
+}
+
+#define PM8607_REGULATOR_DRIVER(_name) \
+{ \
+ .driver = { \
+ .name = "88pm8607-" #_name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .probe = pm8607_regulator_probe, \
+ .remove = __devexit_p(pm8607_regulator_remove), \
+}
+
+static struct platform_driver pm8607_regulator_driver[] = {
+ PM8607_REGULATOR_DRIVER(buck1),
+ PM8607_REGULATOR_DRIVER(buck2),
+ PM8607_REGULATOR_DRIVER(buck3),
+ PM8607_REGULATOR_DRIVER(ldo1),
+ PM8607_REGULATOR_DRIVER(ldo2),
+ PM8607_REGULATOR_DRIVER(ldo3),
+ PM8607_REGULATOR_DRIVER(ldo4),
+ PM8607_REGULATOR_DRIVER(ldo5),
+ PM8607_REGULATOR_DRIVER(ldo6),
+ PM8607_REGULATOR_DRIVER(ldo7),
+ PM8607_REGULATOR_DRIVER(ldo8),
+ PM8607_REGULATOR_DRIVER(ldo9),
+ PM8607_REGULATOR_DRIVER(ldo10),
+ PM8607_REGULATOR_DRIVER(ldo12),
+ PM8607_REGULATOR_DRIVER(ldo14),
+};
+
+static int __init pm8607_regulator_init(void)
+{
+ int i, count, ret;
+
+ count = ARRAY_SIZE(pm8607_regulator_driver);
+ for (i = 0; i < count; i++) {
+ ret = platform_driver_register(&pm8607_regulator_driver[i]);
+ if (ret != 0)
+ pr_err("Failed to register regulator driver: %d\n",
+ ret);
+ }
+ return 0;
+}
+subsys_initcall(pm8607_regulator_init);
+
+static void __exit pm8607_regulator_exit(void)
+{
+ int i, count;
+
+ count = ARRAY_SIZE(pm8607_regulator_driver);
+ for (i = 0; i < count; i++)
+ platform_driver_unregister(&pm8607_regulator_driver[i]);
+}
+module_exit(pm8607_regulator_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM8607 PMIC");
+MODULE_ALIAS("platform:88pm8607-regulator");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index bcbb161bde0..262f62eec83 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -69,8 +69,15 @@ config REGULATOR_MAX1586
regulator via I2C bus. The provided regulator is suitable
for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX8660
+ tristate "Maxim 8660/8661 voltage regulator"
+ depends on I2C
+ help
+ This driver controls a Maxim 8660/8661 voltage output
+ regulator via I2C bus.
+
config REGULATOR_TWL4030
- bool "TI TWL4030/TWL5030/TPS695x0 PMIC"
+ bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
depends on TWL4030_CORE
help
This driver supports the voltage regulators provided by
@@ -157,5 +164,11 @@ config REGULATOR_TPS6507X
three step-down converters and two general-purpose LDO voltage regulators.
It supports TI's software based Class-2 SmartReflex implementation.
+config REGULATOR_88PM8607
+ bool "Marvell 88PM8607 Power regulators"
+ depends on MFD_88PM8607=y
+ help
+ This driver supports 88PM8607 voltage regulator chips.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 4257a868377..b3c806c7941 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -11,7 +11,8 @@ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
-obj-$(CONFIG_REGULATOR_TWL4030) += twl4030-regulator.o
+obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
+obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
@@ -20,10 +21,11 @@ obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
-obj-$(CONFIG_REGULATOR_MC13783) += mc13783.o
+obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
+obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 49aeee823a2..b349db4504b 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -81,7 +81,7 @@ static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = {
#define LDO_C_VOLTAGE 2650000
#define LDO_D_VOLTAGE 2650000
-static const int const ldo_e_buck_typ_voltages[] = {
+static const int ldo_e_buck_typ_voltages[] = {
1800000,
1400000,
1300000,
@@ -91,7 +91,7 @@ static const int const ldo_e_buck_typ_voltages[] = {
900000,
};
-static const int const ldo_f_typ_voltages[] = {
+static const int ldo_f_typ_voltages[] = {
1800000,
1400000,
1300000,
@@ -102,21 +102,21 @@ static const int const ldo_f_typ_voltages[] = {
2650000,
};
-static const int const ldo_g_typ_voltages[] = {
+static const int ldo_g_typ_voltages[] = {
2850000,
2750000,
1800000,
1500000,
};
-static const int const ldo_h_typ_voltages[] = {
+static const int ldo_h_typ_voltages[] = {
2750000,
1800000,
1500000,
1200000,
};
-static const int const ldo_k_typ_voltages[] = {
+static const int ldo_k_typ_voltages[] = {
2750000,
1800000,
};
@@ -241,24 +241,12 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
* LDO D is a special regulator. When it is disabled, the entire
* system is shut down. So this is handled specially.
*/
+ pr_info("Called ab3100_disable_regulator\n");
if (abreg->regreg == AB3100_LDO_D) {
- int i;
-
dev_info(&reg->dev, "disabling LDO D - shut down system\n");
- /*
- * Set regulators to default values, ignore any errors,
- * we're going DOWN
- */
- for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
- (void) ab3100_set_register_interruptible(abreg->ab3100,
- ab3100_reg_init_order[i],
- abreg->plfdata->reg_initvals[i]);
- }
-
/* Setting LDO D to 0x00 cuts the power to the SoC */
return ab3100_set_register_interruptible(abreg->ab3100,
AB3100_LDO_D, 0x00U);
-
}
/*
@@ -607,13 +595,6 @@ static int __init ab3100_regulators_probe(struct platform_device *pdev)
}
}
- if (err) {
- dev_err(&pdev->dev,
- "LDO D regulator initialization failed with error %d\n",
- err);
- return err;
- }
-
/* Register the regulators */
for (i = 0; i < AB3100_NUM_REGULATORS; i++) {
struct ab3100_regulator *reg = &ab3100_regulators[i];
@@ -688,7 +669,7 @@ static __init int ab3100_regulators_init(void)
static __exit void ab3100_regulators_exit(void)
{
- platform_driver_register(&ab3100_regulators_driver);
+ platform_driver_unregister(&ab3100_regulators_driver);
}
subsys_initcall(ab3100_regulators_init);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index efe568deda1..686ef270ecf 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -66,6 +66,16 @@ static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+static const char *rdev_get_name(struct regulator_dev *rdev)
+{
+ if (rdev->constraints && rdev->constraints->name)
+ return rdev->constraints->name;
+ else if (rdev->desc->name)
+ return rdev->desc->name;
+ else
+ return "";
+}
+
/* gets the regulator for a given consumer device */
static struct regulator *get_device_regulator(struct device *dev)
{
@@ -96,12 +106,12 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
if (!rdev->constraints) {
printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev->desc->name);
+ rdev_get_name(rdev));
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return -EPERM;
}
@@ -124,12 +134,12 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
if (!rdev->constraints) {
printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev->desc->name);
+ rdev_get_name(rdev));
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return -EPERM;
}
@@ -159,17 +169,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
if (!rdev->constraints) {
printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev->desc->name);
+ rdev_get_name(rdev));
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return -EPERM;
}
if (!(rdev->constraints->valid_modes_mask & mode)) {
printk(KERN_ERR "%s: invalid mode %x for %s\n",
- __func__, mode, rdev->desc->name);
+ __func__, mode, rdev_get_name(rdev));
return -EINVAL;
}
return 0;
@@ -180,12 +190,12 @@ static int regulator_check_drms(struct regulator_dev *rdev)
{
if (!rdev->constraints) {
printk(KERN_ERR "%s: no constraints for %s\n", __func__,
- rdev->desc->name);
+ rdev_get_name(rdev));
return -ENODEV;
}
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
printk(KERN_ERR "%s: operation not allowed for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return -EPERM;
}
return 0;
@@ -230,16 +240,8 @@ static ssize_t regulator_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
- const char *name;
- if (rdev->constraints && rdev->constraints->name)
- name = rdev->constraints->name;
- else if (rdev->desc->name)
- name = rdev->desc->name;
- else
- name = "";
-
- return sprintf(buf, "%s\n", name);
+ return sprintf(buf, "%s\n", rdev_get_name(rdev));
}
static ssize_t regulator_print_opmode(char *buf, int mode)
@@ -388,7 +390,7 @@ static ssize_t regulator_total_uA_show(struct device *dev,
mutex_lock(&rdev->mutex);
list_for_each_entry(regulator, &rdev->consumer_list, list)
- uA += regulator->uA_load;
+ uA += regulator->uA_load;
mutex_unlock(&rdev->mutex);
return sprintf(buf, "%d\n", uA);
}
@@ -563,7 +565,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
/* calc total requested load */
list_for_each_entry(sibling, &rdev->consumer_list, list)
- current_uA += sibling->uA_load;
+ current_uA += sibling->uA_load;
/* now get the optimum mode for our new total regulator load */
mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
@@ -579,10 +581,29 @@ static int suspend_set_state(struct regulator_dev *rdev,
struct regulator_state *rstate)
{
int ret = 0;
+ bool can_set_state;
- /* enable & disable are mandatory for suspend control */
- if (!rdev->desc->ops->set_suspend_enable ||
- !rdev->desc->ops->set_suspend_disable) {
+ can_set_state = rdev->desc->ops->set_suspend_enable &&
+ rdev->desc->ops->set_suspend_disable;
+
+ /* If we have no suspend mode configration don't set anything;
+ * only warn if the driver actually makes the suspend mode
+ * configurable.
+ */
+ if (!rstate->enabled && !rstate->disabled) {
+ if (can_set_state)
+ printk(KERN_WARNING "%s: No configuration for %s\n",
+ __func__, rdev_get_name(rdev));
+ return 0;
+ }
+
+ if (rstate->enabled && rstate->disabled) {
+ printk(KERN_ERR "%s: invalid configuration for %s\n",
+ __func__, rdev_get_name(rdev));
+ return -EINVAL;
+ }
+
+ if (!can_set_state) {
printk(KERN_ERR "%s: no way to set suspend state\n",
__func__);
return -EINVAL;
@@ -641,25 +662,43 @@ static void print_constraints(struct regulator_dev *rdev)
{
struct regulation_constraints *constraints = rdev->constraints;
char buf[80];
- int count;
+ int count = 0;
+ int ret;
- if (rdev->desc->type == REGULATOR_VOLTAGE) {
+ if (constraints->min_uV && constraints->max_uV) {
if (constraints->min_uV == constraints->max_uV)
- count = sprintf(buf, "%d mV ",
- constraints->min_uV / 1000);
+ count += sprintf(buf + count, "%d mV ",
+ constraints->min_uV / 1000);
else
- count = sprintf(buf, "%d <--> %d mV ",
- constraints->min_uV / 1000,
- constraints->max_uV / 1000);
- } else {
+ count += sprintf(buf + count, "%d <--> %d mV ",
+ constraints->min_uV / 1000,
+ constraints->max_uV / 1000);
+ }
+
+ if (!constraints->min_uV ||
+ constraints->min_uV != constraints->max_uV) {
+ ret = _regulator_get_voltage(rdev);
+ if (ret > 0)
+ count += sprintf(buf + count, "at %d mV ", ret / 1000);
+ }
+
+ if (constraints->min_uA && constraints->max_uA) {
if (constraints->min_uA == constraints->max_uA)
- count = sprintf(buf, "%d mA ",
- constraints->min_uA / 1000);
+ count += sprintf(buf + count, "%d mA ",
+ constraints->min_uA / 1000);
else
- count = sprintf(buf, "%d <--> %d mA ",
- constraints->min_uA / 1000,
- constraints->max_uA / 1000);
+ count += sprintf(buf + count, "%d <--> %d mA ",
+ constraints->min_uA / 1000,
+ constraints->max_uA / 1000);
}
+
+ if (!constraints->min_uA ||
+ constraints->min_uA != constraints->max_uA) {
+ ret = _regulator_get_current_limit(rdev);
+ if (ret > 0)
+ count += sprintf(buf + count, "at %d uA ", ret / 1000);
+ }
+
if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
count += sprintf(buf + count, "fast ");
if (constraints->valid_modes_mask & REGULATOR_MODE_NORMAL)
@@ -669,33 +708,30 @@ static void print_constraints(struct regulator_dev *rdev)
if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
count += sprintf(buf + count, "standby");
- printk(KERN_INFO "regulator: %s: %s\n", rdev->desc->name, buf);
+ printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf);
}
-/**
- * set_machine_constraints - sets regulator constraints
- * @rdev: regulator source
- * @constraints: constraints to apply
- *
- * Allows platform initialisation code to define and constrain
- * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
- * Constraints *must* be set by platform code in order for some
- * regulator operations to proceed i.e. set_voltage, set_current_limit,
- * set_mode.
- */
-static int set_machine_constraints(struct regulator_dev *rdev,
+static int machine_constraints_voltage(struct regulator_dev *rdev,
struct regulation_constraints *constraints)
{
- int ret = 0;
- const char *name;
struct regulator_ops *ops = rdev->desc->ops;
+ const char *name = rdev_get_name(rdev);
+ int ret;
- if (constraints->name)
- name = constraints->name;
- else if (rdev->desc->name)
- name = rdev->desc->name;
- else
- name = "regulator";
+ /* do we need to apply the constraint voltage */
+ if (rdev->constraints->apply_uV &&
+ rdev->constraints->min_uV == rdev->constraints->max_uV &&
+ ops->set_voltage) {
+ ret = ops->set_voltage(rdev,
+ rdev->constraints->min_uV, rdev->constraints->max_uV);
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
+ __func__,
+ rdev->constraints->min_uV, name);
+ rdev->constraints = NULL;
+ return ret;
+ }
+ }
/* constrain machine-level voltage specs to fit
* the actual range supported by this regulator.
@@ -719,14 +755,13 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* voltage constraints are optional */
if ((cmin == 0) && (cmax == 0))
- goto out;
+ return 0;
/* else require explicit machine-level constraints */
if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
pr_err("%s: %s '%s' voltage constraints\n",
__func__, "invalid", name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* initial: [cmin..cmax] valid, [min_uV..max_uV] not */
@@ -748,8 +783,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
if (max_uV < min_uV) {
pr_err("%s: %s '%s' voltage constraints\n",
__func__, "unsupportable", name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* use regulator's subset of machine constraints */
@@ -767,22 +801,34 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
+ return 0;
+}
+
+/**
+ * set_machine_constraints - sets regulator constraints
+ * @rdev: regulator source
+ * @constraints: constraints to apply
+ *
+ * Allows platform initialisation code to define and constrain
+ * regulator circuits e.g. valid voltage/current ranges, etc. NOTE:
+ * Constraints *must* be set by platform code in order for some
+ * regulator operations to proceed i.e. set_voltage, set_current_limit,
+ * set_mode.
+ */
+static int set_machine_constraints(struct regulator_dev *rdev,
+ struct regulation_constraints *constraints)
+{
+ int ret = 0;
+ const char *name;
+ struct regulator_ops *ops = rdev->desc->ops;
+
rdev->constraints = constraints;
- /* do we need to apply the constraint voltage */
- if (rdev->constraints->apply_uV &&
- rdev->constraints->min_uV == rdev->constraints->max_uV &&
- ops->set_voltage) {
- ret = ops->set_voltage(rdev,
- rdev->constraints->min_uV, rdev->constraints->max_uV);
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
- __func__,
- rdev->constraints->min_uV, name);
- rdev->constraints = NULL;
- goto out;
- }
- }
+ name = rdev_get_name(rdev);
+
+ ret = machine_constraints_voltage(rdev, constraints);
+ if (ret != 0)
+ goto out;
/* do we need to setup our suspend state */
if (constraints->initial_state) {
@@ -903,7 +949,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
dev_name(&node->regulator->dev),
node->regulator->desc->name,
supply,
- dev_name(&rdev->dev), rdev->desc->name);
+ dev_name(&rdev->dev), rdev_get_name(rdev));
return -EBUSY;
}
@@ -1212,7 +1258,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
ret = _regulator_enable(rdev->supply);
if (ret < 0) {
printk(KERN_ERR "%s: failed to enable %s: %d\n",
- __func__, rdev->desc->name, ret);
+ __func__, rdev_get_name(rdev), ret);
return ret;
}
}
@@ -1238,7 +1284,7 @@ static int _regulator_enable(struct regulator_dev *rdev)
}
} else if (ret < 0) {
printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
- __func__, rdev->desc->name, ret);
+ __func__, rdev_get_name(rdev), ret);
return ret;
}
/* Fallthrough on positive return values - already enabled */
@@ -1279,7 +1325,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n",
- rdev->desc->name))
+ rdev_get_name(rdev)))
return -EIO;
/* are we the last user and permitted to disable ? */
@@ -1292,7 +1338,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
ret = rdev->desc->ops->disable(rdev);
if (ret < 0) {
printk(KERN_ERR "%s: failed to disable %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return ret;
}
}
@@ -1349,7 +1395,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
ret = rdev->desc->ops->disable(rdev);
if (ret < 0) {
printk(KERN_ERR "%s: failed to force disable %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
return ret;
}
/* notify other consumers that power has been forced off */
@@ -1766,7 +1812,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
output_uV = rdev->desc->ops->get_voltage(rdev);
if (output_uV <= 0) {
printk(KERN_ERR "%s: invalid output voltage found for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
goto out;
}
@@ -1777,13 +1823,13 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
printk(KERN_ERR "%s: invalid input voltage found for %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
goto out;
}
/* calc total requested load for this regulator */
list_for_each_entry(consumer, &rdev->consumer_list, list)
- total_uA_load += consumer->uA_load;
+ total_uA_load += consumer->uA_load;
mode = rdev->desc->ops->get_optimum_mode(rdev,
input_uV, output_uV,
@@ -1791,7 +1837,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
ret = regulator_check_mode(rdev, mode);
if (ret < 0) {
printk(KERN_ERR "%s: failed to get optimum mode for %s @"
- " %d uA %d -> %d uV\n", __func__, rdev->desc->name,
+ " %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev),
total_uA_load, input_uV, output_uV);
goto out;
}
@@ -1799,7 +1845,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
ret = rdev->desc->ops->set_mode(rdev, mode);
if (ret < 0) {
printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
- __func__, mode, rdev->desc->name);
+ __func__, mode, rdev_get_name(rdev));
goto out;
}
ret = mode;
@@ -1852,9 +1898,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
/* now notify regulator we supply */
list_for_each_entry(_rdev, &rdev->supply_list, slist) {
- mutex_lock(&_rdev->mutex);
- _notifier_call_chain(_rdev, event, data);
- mutex_unlock(&_rdev->mutex);
+ mutex_lock(&_rdev->mutex);
+ _notifier_call_chain(_rdev, event, data);
+ mutex_unlock(&_rdev->mutex);
}
}
@@ -1885,9 +1931,9 @@ int regulator_bulk_get(struct device *dev, int num_consumers,
consumers[i].consumer = regulator_get(dev,
consumers[i].supply);
if (IS_ERR(consumers[i].consumer)) {
- dev_err(dev, "Failed to get supply '%s'\n",
- consumers[i].supply);
ret = PTR_ERR(consumers[i].consumer);
+ dev_err(dev, "Failed to get supply '%s': %d\n",
+ consumers[i].supply, ret);
consumers[i].consumer = NULL;
goto err;
}
@@ -1930,8 +1976,8 @@ int regulator_bulk_enable(int num_consumers,
return 0;
err:
- printk(KERN_ERR "Failed to enable %s\n", consumers[i].supply);
- for (i = 0; i < num_consumers; i++)
+ printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret);
+ for (--i; i >= 0; --i)
regulator_disable(consumers[i].consumer);
return ret;
@@ -1965,8 +2011,9 @@ int regulator_bulk_disable(int num_consumers,
return 0;
err:
- printk(KERN_ERR "Failed to disable %s\n", consumers[i].supply);
- for (i = 0; i < num_consumers; i++)
+ printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply,
+ ret);
+ for (--i; i >= 0; --i)
regulator_enable(consumers[i].consumer);
return ret;
@@ -2316,7 +2363,7 @@ int regulator_suspend_prepare(suspend_state_t state)
if (ret < 0) {
printk(KERN_ERR "%s: failed to prepare %s\n",
- __func__, rdev->desc->name);
+ __func__, rdev_get_name(rdev));
goto out;
}
}
@@ -2429,12 +2476,7 @@ static int __init regulator_init_complete(void)
ops = rdev->desc->ops;
c = rdev->constraints;
- if (c && c->name)
- name = c->name;
- else if (rdev->desc->name)
- name = rdev->desc->name;
- else
- name = "regulator";
+ name = rdev_get_name(rdev);
if (!ops->disable || (c && c->always_on))
continue;
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index aa224d936e0..f8c4661a7a8 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -331,7 +331,7 @@ static int da9034_get_ldo12_voltage(struct regulator_dev *rdev)
static int da9034_list_ldo12_voltage(struct regulator_dev *rdev,
unsigned selector)
{
- if (selector > ARRAY_SIZE(da9034_ldo12_data))
+ if (selector >= ARRAY_SIZE(da9034_ldo12_data))
return -EINVAL;
return da9034_ldo12_data[selector] * 1000;
}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 7803a320543..76d08c282f9 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -446,8 +446,8 @@ static int setup_regulators(struct lp3971 *lp3971,
lp3971->rdev[i] = regulator_register(&regulators[id],
lp3971->dev, pdata->regulators[i].initdata, lp3971);
- err = IS_ERR(lp3971->rdev[i]);
- if (err) {
+ if (IS_ERR(lp3971->rdev[i])) {
+ err = PTR_ERR(lp3971->rdev[i]);
dev_err(lp3971->dev, "regulator init failed: %d\n",
err);
goto error;
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
new file mode 100644
index 00000000000..acc2fb7b608
--- /dev/null
+++ b/drivers/regulator/max8660.c
@@ -0,0 +1,510 @@
+/*
+ * max8660.c -- Voltage regulation for the Maxim 8660/8661
+ *
+ * based on max1586.c and wm8400-regulator.c
+ *
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Some info:
+ *
+ * Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8660-MAX8661.pdf
+ *
+ * This chip is a bit nasty because it is a write-only device. Thus, the driver
+ * uses shadow registers to keep track of its values. The main problem appears
+ * to be the initialization: When Linux boots up, we cannot know if the chip is
+ * in the default state or not, so we would have to pass such information in
+ * platform_data. As this adds a bit of complexity to the driver, this is left
+ * out for now until it is really needed.
+ *
+ * [A|S|M]DTV1 registers are currently not used, but [A|S|M]DTV2.
+ *
+ * If the driver is feature complete, it might be worth to check if one set of
+ * functions for V3-V7 is sufficient. For maximum flexibility during
+ * development, they are separated for now.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/max8660.h>
+
+#define MAX8660_DCDC_MIN_UV 725000
+#define MAX8660_DCDC_MAX_UV 1800000
+#define MAX8660_DCDC_STEP 25000
+#define MAX8660_DCDC_MAX_SEL 0x2b
+
+#define MAX8660_LDO5_MIN_UV 1700000
+#define MAX8660_LDO5_MAX_UV 2000000
+#define MAX8660_LDO5_STEP 25000
+#define MAX8660_LDO5_MAX_SEL 0x0c
+
+#define MAX8660_LDO67_MIN_UV 1800000
+#define MAX8660_LDO67_MAX_UV 3300000
+#define MAX8660_LDO67_STEP 100000
+#define MAX8660_LDO67_MAX_SEL 0x0f
+
+enum {
+ MAX8660_OVER1,
+ MAX8660_OVER2,
+ MAX8660_VCC1,
+ MAX8660_ADTV1,
+ MAX8660_ADTV2,
+ MAX8660_SDTV1,
+ MAX8660_SDTV2,
+ MAX8660_MDTV1,
+ MAX8660_MDTV2,
+ MAX8660_L12VCR,
+ MAX8660_FPWM,
+ MAX8660_N_REGS, /* not a real register */
+};
+
+struct max8660 {
+ struct i2c_client *client;
+ u8 shadow_regs[MAX8660_N_REGS]; /* as chip is write only */
+ struct regulator_dev *rdev[];
+};
+
+static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val)
+{
+ static const u8 max8660_addresses[MAX8660_N_REGS] =
+ { 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 };
+
+ int ret;
+ u8 reg_val = (max8660->shadow_regs[reg] & mask) | val;
+ dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n",
+ max8660_addresses[reg], reg_val);
+
+ ret = i2c_smbus_write_byte_data(max8660->client,
+ max8660_addresses[reg], reg_val);
+ if (ret == 0)
+ max8660->shadow_regs[reg] = reg_val;
+
+ return ret;
+}
+
+
+/*
+ * DCDC functions
+ */
+
+static int max8660_dcdc_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 val = max8660->shadow_regs[MAX8660_OVER1];
+ u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+ return !!(val & mask);
+}
+
+static int max8660_dcdc_enable(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+ return max8660_write(max8660, MAX8660_OVER1, 0xff, bit);
+}
+
+static int max8660_dcdc_disable(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4;
+ return max8660_write(max8660, MAX8660_OVER1, mask, 0);
+}
+
+static int max8660_dcdc_list(struct regulator_dev *rdev, unsigned selector)
+{
+ if (selector > MAX8660_DCDC_MAX_SEL)
+ return -EINVAL;
+ return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
+}
+
+static int max8660_dcdc_get(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
+ u8 selector = max8660->shadow_regs[reg];
+ return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
+}
+
+static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 reg, selector, bits;
+ int ret;
+
+ if (min_uV < MAX8660_DCDC_MIN_UV || min_uV > MAX8660_DCDC_MAX_UV)
+ return -EINVAL;
+ if (max_uV < MAX8660_DCDC_MIN_UV || max_uV > MAX8660_DCDC_MAX_UV)
+ return -EINVAL;
+
+ selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1))
+ / MAX8660_DCDC_STEP;
+
+ ret = max8660_dcdc_list(rdev, selector);
+ if (ret < 0 || ret > max_uV)
+ return -EINVAL;
+
+ reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
+ ret = max8660_write(max8660, reg, 0, selector);
+ if (ret)
+ return ret;
+
+ /* Select target voltage register and activate regulation */
+ bits = (rdev_get_id(rdev) == MAX8660_V3) ? 0x03 : 0x30;
+ return max8660_write(max8660, MAX8660_VCC1, 0xff, bits);
+}
+
+static struct regulator_ops max8660_dcdc_ops = {
+ .is_enabled = max8660_dcdc_is_enabled,
+ .list_voltage = max8660_dcdc_list,
+ .set_voltage = max8660_dcdc_set,
+ .get_voltage = max8660_dcdc_get,
+};
+
+
+/*
+ * LDO5 functions
+ */
+
+static int max8660_ldo5_list(struct regulator_dev *rdev, unsigned selector)
+{
+ if (selector > MAX8660_LDO5_MAX_SEL)
+ return -EINVAL;
+ return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
+}
+
+static int max8660_ldo5_get(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 selector = max8660->shadow_regs[MAX8660_MDTV2];
+
+ return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
+}
+
+static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 selector;
+ int ret;
+
+ if (min_uV < MAX8660_LDO5_MIN_UV || min_uV > MAX8660_LDO5_MAX_UV)
+ return -EINVAL;
+ if (max_uV < MAX8660_LDO5_MIN_UV || max_uV > MAX8660_LDO5_MAX_UV)
+ return -EINVAL;
+
+ selector = (min_uV - (MAX8660_LDO5_MIN_UV - MAX8660_LDO5_STEP + 1))
+ / MAX8660_LDO5_STEP;
+ ret = max8660_ldo5_list(rdev, selector);
+ if (ret < 0 || ret > max_uV)
+ return -EINVAL;
+
+ ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
+ if (ret)
+ return ret;
+
+ /* Select target voltage register and activate regulation */
+ return max8660_write(max8660, MAX8660_VCC1, 0xff, 0xc0);
+}
+
+static struct regulator_ops max8660_ldo5_ops = {
+ .list_voltage = max8660_ldo5_list,
+ .set_voltage = max8660_ldo5_set,
+ .get_voltage = max8660_ldo5_get,
+};
+
+
+/*
+ * LDO67 functions
+ */
+
+static int max8660_ldo67_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 val = max8660->shadow_regs[MAX8660_OVER2];
+ u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+ return !!(val & mask);
+}
+
+static int max8660_ldo67_enable(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+ return max8660_write(max8660, MAX8660_OVER2, 0xff, bit);
+}
+
+static int max8660_ldo67_disable(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4;
+ return max8660_write(max8660, MAX8660_OVER2, mask, 0);
+}
+
+static int max8660_ldo67_list(struct regulator_dev *rdev, unsigned selector)
+{
+ if (selector > MAX8660_LDO67_MAX_SEL)
+ return -EINVAL;
+ return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
+}
+
+static int max8660_ldo67_get(struct regulator_dev *rdev)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4;
+ u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf;
+
+ return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
+}
+
+static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+{
+ struct max8660 *max8660 = rdev_get_drvdata(rdev);
+ u8 selector;
+ int ret;
+
+ if (min_uV < MAX8660_LDO67_MIN_UV || min_uV > MAX8660_LDO67_MAX_UV)
+ return -EINVAL;
+ if (max_uV < MAX8660_LDO67_MIN_UV || max_uV > MAX8660_LDO67_MAX_UV)
+ return -EINVAL;
+
+ selector = (min_uV - (MAX8660_LDO67_MIN_UV - MAX8660_LDO67_STEP + 1))
+ / MAX8660_LDO67_STEP;
+
+ ret = max8660_ldo67_list(rdev, selector);
+ if (ret < 0 || ret > max_uV)
+ return -EINVAL;
+
+ if (rdev_get_id(rdev) == MAX8660_V6)
+ return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
+ else
+ return max8660_write(max8660, MAX8660_L12VCR, 0x0f, selector << 4);
+}
+
+static struct regulator_ops max8660_ldo67_ops = {
+ .is_enabled = max8660_ldo67_is_enabled,
+ .enable = max8660_ldo67_enable,
+ .disable = max8660_ldo67_disable,
+ .list_voltage = max8660_ldo67_list,
+ .get_voltage = max8660_ldo67_get,
+ .set_voltage = max8660_ldo67_set,
+};
+
+static struct regulator_desc max8660_reg[] = {
+ {
+ .name = "V3(DCDC)",
+ .id = MAX8660_V3,
+ .ops = &max8660_dcdc_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_DCDC_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "V4(DCDC)",
+ .id = MAX8660_V4,
+ .ops = &max8660_dcdc_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_DCDC_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "V5(LDO)",
+ .id = MAX8660_V5,
+ .ops = &max8660_ldo5_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_LDO5_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "V6(LDO)",
+ .id = MAX8660_V6,
+ .ops = &max8660_ldo67_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_LDO67_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "V7(LDO)",
+ .id = MAX8660_V7,
+ .ops = &max8660_ldo67_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = MAX8660_LDO67_MAX_SEL + 1,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int max8660_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ struct regulator_dev **rdev;
+ struct max8660_platform_data *pdata = client->dev.platform_data;
+ struct max8660 *max8660;
+ int boot_on, i, id, ret = -EINVAL;
+
+ if (pdata->num_subdevs > MAX8660_V_END) {
+ dev_err(&client->dev, "Too much regulators found!\n");
+ goto out;
+ }
+
+ max8660 = kzalloc(sizeof(struct max8660) +
+ sizeof(struct regulator_dev *) * MAX8660_V_END,
+ GFP_KERNEL);
+ if (!max8660) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ max8660->client = client;
+ rdev = max8660->rdev;
+
+ if (pdata->en34_is_high) {
+ /* Simulate always on */
+ max8660->shadow_regs[MAX8660_OVER1] = 5;
+ } else {
+ /* Otherwise devices can be toggled via software */
+ max8660_dcdc_ops.enable = max8660_dcdc_enable;
+ max8660_dcdc_ops.disable = max8660_dcdc_disable;
+ }
+
+ /*
+ * First, set up shadow registers to prevent glitches. As some
+ * registers are shared between regulators, everything must be properly
+ * set up for all regulators in advance.
+ */
+ max8660->shadow_regs[MAX8660_ADTV1] =
+ max8660->shadow_regs[MAX8660_ADTV2] =
+ max8660->shadow_regs[MAX8660_SDTV1] =
+ max8660->shadow_regs[MAX8660_SDTV2] = 0x1b;
+ max8660->shadow_regs[MAX8660_MDTV1] =
+ max8660->shadow_regs[MAX8660_MDTV2] = 0x04;
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+
+ if (!pdata->subdevs[i].platform_data)
+ goto err_free;
+
+ boot_on = pdata->subdevs[i].platform_data->constraints.boot_on;
+
+ switch (pdata->subdevs[i].id) {
+ case MAX8660_V3:
+ if (boot_on)
+ max8660->shadow_regs[MAX8660_OVER1] |= 1;
+ break;
+
+ case MAX8660_V4:
+ if (boot_on)
+ max8660->shadow_regs[MAX8660_OVER1] |= 4;
+ break;
+
+ case MAX8660_V5:
+ break;
+
+ case MAX8660_V6:
+ if (boot_on)
+ max8660->shadow_regs[MAX8660_OVER2] |= 2;
+ break;
+
+ case MAX8660_V7:
+ if (!strcmp(i2c_id->name, "max8661")) {
+ dev_err(&client->dev, "Regulator not on this chip!\n");
+ goto err_free;
+ }
+
+ if (boot_on)
+ max8660->shadow_regs[MAX8660_OVER2] |= 4;
+ break;
+
+ default:
+ dev_err(&client->dev, "invalid regulator %s\n",
+ pdata->subdevs[i].name);
+ goto err_free;
+ }
+ }
+
+ /* Finally register devices */
+ for (i = 0; i < pdata->num_subdevs; i++) {
+
+ id = pdata->subdevs[i].id;
+
+ rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
+ pdata->subdevs[i].platform_data,
+ max8660);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(&client->dev, "failed to register %s\n",
+ max8660_reg[id].name);
+ goto err_unregister;
+ }
+ }
+
+ i2c_set_clientdata(client, rdev);
+ dev_info(&client->dev, "Maxim 8660/8661 regulator driver loaded\n");
+ return 0;
+
+err_unregister:
+ while (--i >= 0)
+ regulator_unregister(rdev[i]);
+err_free:
+ kfree(max8660);
+out:
+ return ret;
+}
+
+static int max8660_remove(struct i2c_client *client)
+{
+ struct regulator_dev **rdev = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < MAX8660_V_END; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+ kfree(rdev);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id max8660_id[] = {
+ { "max8660", 0 },
+ { "max8661", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max8660_id);
+
+static struct i2c_driver max8660_driver = {
+ .probe = max8660_probe,
+ .remove = max8660_remove,
+ .driver = {
+ .name = "max8660",
+ },
+ .id_table = max8660_id,
+};
+
+static int __init max8660_init(void)
+{
+ return i2c_add_driver(&max8660_driver);
+}
+subsys_initcall(max8660_init);
+
+static void __exit max8660_exit(void)
+{
+ i2c_del_driver(&max8660_driver);
+}
+module_exit(max8660_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MAXIM 8660/8661 voltage regulator driver");
+MODULE_AUTHOR("Wolfram Sang");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
new file mode 100644
index 00000000000..39c49530004
--- /dev/null
+++ b/drivers/regulator/mc13783-regulator.c
@@ -0,0 +1,245 @@
+/*
+ * Regulator Driver for Freescale MC13783 PMIC
+ *
+ * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13783.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#define MC13783_REG_SWITCHERS4 28
+#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18)
+
+#define MC13783_REG_SWITCHERS5 29
+#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
+
+#define MC13783_REG_REGULATORMODE0 32
+#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
+#define MC13783_REG_REGULATORMODE0_VIOHIEN (1 << 3)
+#define MC13783_REG_REGULATORMODE0_VIOLOEN (1 << 6)
+#define MC13783_REG_REGULATORMODE0_VDIGEN (1 << 9)
+#define MC13783_REG_REGULATORMODE0_VGENEN (1 << 12)
+#define MC13783_REG_REGULATORMODE0_VRFDIGEN (1 << 15)
+#define MC13783_REG_REGULATORMODE0_VRFREFEN (1 << 18)
+#define MC13783_REG_REGULATORMODE0_VRFCPEN (1 << 21)
+
+#define MC13783_REG_REGULATORMODE1 33
+#define MC13783_REG_REGULATORMODE1_VSIMEN (1 << 0)
+#define MC13783_REG_REGULATORMODE1_VESIMEN (1 << 3)
+#define MC13783_REG_REGULATORMODE1_VCAMEN (1 << 6)
+#define MC13783_REG_REGULATORMODE1_VRFBGEN (1 << 9)
+#define MC13783_REG_REGULATORMODE1_VVIBEN (1 << 11)
+#define MC13783_REG_REGULATORMODE1_VRF1EN (1 << 12)
+#define MC13783_REG_REGULATORMODE1_VRF2EN (1 << 15)
+#define MC13783_REG_REGULATORMODE1_VMMC1EN (1 << 18)
+#define MC13783_REG_REGULATORMODE1_VMMC2EN (1 << 21)
+
+#define MC13783_REG_POWERMISC 34
+#define MC13783_REG_POWERMISC_GPO1EN (1 << 6)
+#define MC13783_REG_POWERMISC_GPO2EN (1 << 8)
+#define MC13783_REG_POWERMISC_GPO3EN (1 << 10)
+#define MC13783_REG_POWERMISC_GPO4EN (1 << 12)
+
+struct mc13783_regulator {
+ struct regulator_desc desc;
+ int reg;
+ int enable_bit;
+};
+
+static struct regulator_ops mc13783_regulator_ops;
+
+#define MC13783_DEFINE(prefix, _name, _reg) \
+ [MC13783_ ## prefix ## _ ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .ops = &mc13783_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MC13783_ ## prefix ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = MC13783_REG_ ## _reg, \
+ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ }
+
+#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg)
+#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg)
+
+static struct mc13783_regulator mc13783_regulators[] = {
+ MC13783_DEFINE_SW(SW3, SWITCHERS5),
+ MC13783_DEFINE_SW(PLL, SWITCHERS4),
+
+ MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VDIG, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VGEN, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0),
+ MC13783_DEFINE_REGU(VSIM, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VESIM, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VCAM, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VVIB, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VRF1, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VRF2, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1),
+ MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1),
+ MC13783_DEFINE_REGU(GPO1, POWERMISC),
+ MC13783_DEFINE_REGU(GPO2, POWERMISC),
+ MC13783_DEFINE_REGU(GPO3, POWERMISC),
+ MC13783_DEFINE_REGU(GPO4, POWERMISC),
+};
+
+struct mc13783_regulator_priv {
+ struct mc13783 *mc13783;
+ struct regulator_dev *regulators[];
+};
+
+static int mc13783_regulator_enable(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
+ mc13783_regulators[id].enable_bit,
+ mc13783_regulators[id].enable_bit);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_regulator_disable(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
+ mc13783_regulators[id].enable_bit, 0);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ return (val & mc13783_regulators[id].enable_bit) != 0;
+}
+
+static struct regulator_ops mc13783_regulator_ops = {
+ .enable = mc13783_regulator_enable,
+ .disable = mc13783_regulator_disable,
+ .is_enabled = mc13783_regulator_is_enabled,
+};
+
+static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
+{
+ struct mc13783_regulator_priv *priv;
+ struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
+ struct mc13783_regulator_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct mc13783_regulator_init_data *init_data;
+ int i, ret;
+
+ dev_dbg(&pdev->dev, "mc13783_regulator_probe id %d\n", pdev->id);
+
+ priv = kzalloc(sizeof(*priv) +
+ pdata->num_regulators * sizeof(priv->regulators[0]),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mc13783 = mc13783;
+
+ for (i = 0; i < pdata->num_regulators; i++) {
+ init_data = &pdata->regulators[i];
+ priv->regulators[i] = regulator_register(
+ &mc13783_regulators[init_data->id].desc,
+ &pdev->dev, init_data->init_data, priv);
+
+ if (IS_ERR(priv->regulators[i])) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ mc13783_regulators[i].desc.name);
+ ret = PTR_ERR(priv->regulators[i]);
+ goto err;
+ }
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+err:
+ while (--i >= 0)
+ regulator_unregister(priv->regulators[i]);
+
+ kfree(priv);
+
+ return ret;
+}
+
+static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
+{
+ struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev);
+ struct mc13783_regulator_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < pdata->num_regulators; i++)
+ regulator_unregister(priv->regulators[i]);
+
+ return 0;
+}
+
+static struct platform_driver mc13783_regulator_driver = {
+ .driver = {
+ .name = "mc13783-regulator",
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(mc13783_regulator_remove),
+ .probe = mc13783_regulator_probe,
+};
+
+static int __init mc13783_regulator_init(void)
+{
+ return platform_driver_register(&mc13783_regulator_driver);
+}
+subsys_initcall(mc13783_regulator_init);
+
+static void __exit mc13783_regulator_exit(void)
+{
+ platform_driver_unregister(&mc13783_regulator_driver);
+}
+module_exit(mc13783_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
+MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/mc13783.c b/drivers/regulator/mc13783.c
deleted file mode 100644
index 710211f6744..00000000000
--- a/drivers/regulator/mc13783.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Regulator Driver for Freescale MC13783 PMIC
- *
- * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/mfd/mc13783-private.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/driver.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/mc13783.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-
-struct mc13783_regulator {
- struct regulator_desc desc;
- int reg;
- int enable_bit;
-};
-
-static struct regulator_ops mc13783_regulator_ops;
-
-static struct mc13783_regulator mc13783_regulators[] = {
- [MC13783_SW_SW3] = {
- .desc = {
- .name = "SW_SW3",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_SW_SW3,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_SWITCHERS_5,
- .enable_bit = MC13783_SWCTRL_SW3_EN,
- },
- [MC13783_SW_PLL] = {
- .desc = {
- .name = "SW_PLL",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_SW_PLL,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_SWITCHERS_4,
- .enable_bit = MC13783_SWCTRL_PLL_EN,
- },
- [MC13783_REGU_VAUDIO] = {
- .desc = {
- .name = "REGU_VAUDIO",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VAUDIO,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VAUDIO_EN,
- },
- [MC13783_REGU_VIOHI] = {
- .desc = {
- .name = "REGU_VIOHI",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VIOHI,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VIOHI_EN,
- },
- [MC13783_REGU_VIOLO] = {
- .desc = {
- .name = "REGU_VIOLO",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VIOLO,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VIOLO_EN,
- },
- [MC13783_REGU_VDIG] = {
- .desc = {
- .name = "REGU_VDIG",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VDIG,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VDIG_EN,
- },
- [MC13783_REGU_VGEN] = {
- .desc = {
- .name = "REGU_VGEN",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VGEN,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VGEN_EN,
- },
- [MC13783_REGU_VRFDIG] = {
- .desc = {
- .name = "REGU_VRFDIG",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRFDIG,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VRFDIG_EN,
- },
- [MC13783_REGU_VRFREF] = {
- .desc = {
- .name = "REGU_VRFREF",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRFREF,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VRFREF_EN,
- },
- [MC13783_REGU_VRFCP] = {
- .desc = {
- .name = "REGU_VRFCP",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRFCP,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_0,
- .enable_bit = MC13783_REGCTRL_VRFCP_EN,
- },
- [MC13783_REGU_VSIM] = {
- .desc = {
- .name = "REGU_VSIM",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VSIM,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VSIM_EN,
- },
- [MC13783_REGU_VESIM] = {
- .desc = {
- .name = "REGU_VESIM",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VESIM,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VESIM_EN,
- },
- [MC13783_REGU_VCAM] = {
- .desc = {
- .name = "REGU_VCAM",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VCAM,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VCAM_EN,
- },
- [MC13783_REGU_VRFBG] = {
- .desc = {
- .name = "REGU_VRFBG",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRFBG,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VRFBG_EN,
- },
- [MC13783_REGU_VVIB] = {
- .desc = {
- .name = "REGU_VVIB",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VVIB,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VVIB_EN,
- },
- [MC13783_REGU_VRF1] = {
- .desc = {
- .name = "REGU_VRF1",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRF1,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VRF1_EN,
- },
- [MC13783_REGU_VRF2] = {
- .desc = {
- .name = "REGU_VRF2",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VRF2,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VRF2_EN,
- },
- [MC13783_REGU_VMMC1] = {
- .desc = {
- .name = "REGU_VMMC1",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VMMC1,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VMMC1_EN,
- },
- [MC13783_REGU_VMMC2] = {
- .desc = {
- .name = "REGU_VMMC2",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_VMMC2,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_REGULATOR_MODE_1,
- .enable_bit = MC13783_REGCTRL_VMMC2_EN,
- },
- [MC13783_REGU_GPO1] = {
- .desc = {
- .name = "REGU_GPO1",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_GPO1,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_POWER_MISCELLANEOUS,
- .enable_bit = MC13783_REGCTRL_GPO1_EN,
- },
- [MC13783_REGU_GPO2] = {
- .desc = {
- .name = "REGU_GPO2",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_GPO2,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_POWER_MISCELLANEOUS,
- .enable_bit = MC13783_REGCTRL_GPO2_EN,
- },
- [MC13783_REGU_GPO3] = {
- .desc = {
- .name = "REGU_GPO3",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_GPO3,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_POWER_MISCELLANEOUS,
- .enable_bit = MC13783_REGCTRL_GPO3_EN,
- },
- [MC13783_REGU_GPO4] = {
- .desc = {
- .name = "REGU_GPO4",
- .ops = &mc13783_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .id = MC13783_REGU_GPO4,
- .owner = THIS_MODULE,
- },
- .reg = MC13783_REG_POWER_MISCELLANEOUS,
- .enable_bit = MC13783_REGCTRL_GPO4_EN,
- },
-};
-
-struct mc13783_priv {
- struct regulator_desc desc[ARRAY_SIZE(mc13783_regulators)];
- struct mc13783 *mc13783;
- struct regulator_dev *regulators[0];
-};
-
-static int mc13783_enable(struct regulator_dev *rdev)
-{
- struct mc13783_priv *priv = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
- return mc13783_set_bits(priv->mc13783, mc13783_regulators[id].reg,
- mc13783_regulators[id].enable_bit,
- mc13783_regulators[id].enable_bit);
-}
-
-static int mc13783_disable(struct regulator_dev *rdev)
-{
- struct mc13783_priv *priv = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
-
- dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
- return mc13783_set_bits(priv->mc13783, mc13783_regulators[id].reg,
- mc13783_regulators[id].enable_bit, 0);
-}
-
-static int mc13783_is_enabled(struct regulator_dev *rdev)
-{
- struct mc13783_priv *priv = rdev_get_drvdata(rdev);
- int ret, id = rdev_get_id(rdev);
- unsigned int val;
-
- ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
- if (ret)
- return ret;
-
- return (val & mc13783_regulators[id].enable_bit) != 0;
-}
-
-static struct regulator_ops mc13783_regulator_ops = {
- .enable = mc13783_enable,
- .disable = mc13783_disable,
- .is_enabled = mc13783_is_enabled,
-};
-
-static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
-{
- struct mc13783_priv *priv;
- struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
- struct mc13783_regulator_init_data *init_data;
- int i, ret;
-
- dev_dbg(&pdev->dev, "mc13783_regulator_probe id %d\n", pdev->id);
-
- priv = kzalloc(sizeof(*priv) + mc13783->num_regulators * sizeof(void *),
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->mc13783 = mc13783;
-
- for (i = 0; i < mc13783->num_regulators; i++) {
- init_data = &mc13783->regulators[i];
- priv->regulators[i] = regulator_register(
- &mc13783_regulators[init_data->id].desc,
- &pdev->dev, init_data->init_data, priv);
-
- if (IS_ERR(priv->regulators[i])) {
- dev_err(&pdev->dev, "failed to register regulator %s\n",
- mc13783_regulators[i].desc.name);
- ret = PTR_ERR(priv->regulators[i]);
- goto err;
- }
- }
-
- platform_set_drvdata(pdev, priv);
-
- return 0;
-err:
- while (--i >= 0)
- regulator_unregister(priv->regulators[i]);
-
- kfree(priv);
-
- return ret;
-}
-
-static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
-{
- struct mc13783_priv *priv = platform_get_drvdata(pdev);
- struct mc13783 *mc13783 = priv->mc13783;
- int i;
-
- for (i = 0; i < mc13783->num_regulators; i++)
- regulator_unregister(priv->regulators[i]);
-
- return 0;
-}
-
-static struct platform_driver mc13783_regulator_driver = {
- .driver = {
- .name = "mc13783-regulator",
- .owner = THIS_MODULE,
- },
- .remove = __devexit_p(mc13783_regulator_remove),
-};
-
-static int __init mc13783_regulator_init(void)
-{
- return platform_driver_probe(&mc13783_regulator_driver,
- mc13783_regulator_probe);
-}
-subsys_initcall(mc13783_regulator_init);
-
-static void __exit mc13783_regulator_exit(void)
-{
- platform_driver_unregister(&mc13783_regulator_driver);
-}
-module_exit(mc13783_regulator_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
-MODULE_DESCRIPTION("Regulator Driver for Freescale MC13783 PMIC");
-MODULE_ALIAS("platform:mc13783-regulator");
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 0803ffe6236..c8f41dc05b7 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -314,13 +314,15 @@ static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
struct pcf50633 *pcf;
/* Already set by core driver */
- pcf = platform_get_drvdata(pdev);
+ pcf = dev_to_pcf50633(pdev->dev.parent);
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
pdev->dev.platform_data, pcf);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
+ platform_set_drvdata(pdev, rdev);
+
if (pcf->pdata->regulator_registered)
pcf->pdata->regulator_registered(pcf, pdev->id);
@@ -331,6 +333,7 @@ static int __devexit pcf50633_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
regulator_unregister(rdev);
return 0;
diff --git a/drivers/regulator/twl4030-regulator.c b/drivers/regulator/twl-regulator.c
index e2032fb60b5..7e674859bd5 100644
--- a/drivers/regulator/twl4030-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1,5 +1,5 @@
/*
- * twl4030-regulator.c -- support regulators in twl4030 family chips
+ * twl-regulator.c -- support regulators in twl4030/twl6030 family chips
*
* Copyright (C) 2008 David Brownell
*
@@ -12,14 +12,15 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
- * The TWL4030/TW5030/TPS659x0 family chips include power management, a
+ * The TWL4030/TW5030/TPS659x0/TWL6030 family chips include power management, a
* USB OTG transceiver, an RTC, ADC, PWM, and lots more. Some versions
* include an audio codec, battery charger, and more voltage regulators.
* These chips are often used in OMAP-based systems.
@@ -33,13 +34,19 @@ struct twlreg_info {
/* start of regulator's PM_RECEIVER control register bank */
u8 base;
- /* twl4030 resource ID, for resource control state machine */
+ /* twl resource ID, for resource control state machine */
u8 id;
/* voltage in mV = table[VSEL]; table_len must be a power-of-two */
u8 table_len;
const u16 *table;
+ /* regulator specific turn-on delay */
+ u16 delay;
+
+ /* State REMAP default configuration */
+ u8 remap;
+
/* chip constraints on regulator behavior */
u16 min_mV;
@@ -52,27 +59,38 @@ struct twlreg_info {
* The first three registers of all power resource banks help hardware to
* manage the various resource groups.
*/
+/* Common offset in TWL4030/6030 */
#define VREG_GRP 0
+/* TWL4030 register offsets */
#define VREG_TYPE 1
#define VREG_REMAP 2
#define VREG_DEDICATED 3 /* LDO control */
-
+/* TWL6030 register offsets */
+#define VREG_TRANS 1
+#define VREG_STATE 2
+#define VREG_VOLTAGE 3
+/* TWL6030 Misc register offsets */
+#define VREG_BC_ALL 1
+#define VREG_BC_REF 2
+#define VREG_BC_PROC 3
+#define VREG_BC_CLK_RST 4
static inline int
-twl4030reg_read(struct twlreg_info *info, unsigned offset)
+twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
{
u8 value;
int status;
- status = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER,
+ status = twl_i2c_read_u8(slave_subgp,
&value, info->base + offset);
return (status < 0) ? status : value;
}
static inline int
-twl4030reg_write(struct twlreg_info *info, unsigned offset, u8 value)
+twlreg_write(struct twlreg_info *info, unsigned slave_subgp, unsigned offset,
+ u8 value)
{
- return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+ return twl_i2c_write_u8(slave_subgp,
value, info->base + offset);
}
@@ -80,59 +98,84 @@ twl4030reg_write(struct twlreg_info *info, unsigned offset, u8 value)
/* generic power resource operations, which work on all regulators */
-static int twl4030reg_grp(struct regulator_dev *rdev)
+static int twlreg_grp(struct regulator_dev *rdev)
{
- return twl4030reg_read(rdev_get_drvdata(rdev), VREG_GRP);
+ return twlreg_read(rdev_get_drvdata(rdev), TWL_MODULE_PM_RECEIVER,
+ VREG_GRP);
}
/*
* Enable/disable regulators by joining/leaving the P1 (processor) group.
* We assume nobody else is updating the DEV_GRP registers.
*/
-
-#define P3_GRP BIT(7) /* "peripherals" */
-#define P2_GRP BIT(6) /* secondary processor, modem, etc */
-#define P1_GRP BIT(5) /* CPU/Linux */
-
-static int twl4030reg_is_enabled(struct regulator_dev *rdev)
+/* definition for 4030 family */
+#define P3_GRP_4030 BIT(7) /* "peripherals" */
+#define P2_GRP_4030 BIT(6) /* secondary processor, modem, etc */
+#define P1_GRP_4030 BIT(5) /* CPU/Linux */
+/* definition for 6030 family */
+#define P3_GRP_6030 BIT(2) /* secondary processor, modem, etc */
+#define P2_GRP_6030 BIT(1) /* "peripherals" */
+#define P1_GRP_6030 BIT(0) /* CPU/Linux */
+
+static int twlreg_is_enabled(struct regulator_dev *rdev)
{
- int state = twl4030reg_grp(rdev);
+ int state = twlreg_grp(rdev);
if (state < 0)
return state;
- return (state & P1_GRP) != 0;
+ if (twl_class_is_4030())
+ state &= P1_GRP_4030;
+ else
+ state &= P1_GRP_6030;
+ return state;
}
-static int twl4030reg_enable(struct regulator_dev *rdev)
+static int twlreg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
+ int ret;
- grp = twl4030reg_read(info, VREG_GRP);
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
if (grp < 0)
return grp;
- grp |= P1_GRP;
- return twl4030reg_write(info, VREG_GRP, grp);
+ if (twl_class_is_4030())
+ grp |= P1_GRP_4030;
+ else
+ grp |= P1_GRP_6030;
+
+ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+
+ udelay(info->delay);
+
+ return ret;
}
-static int twl4030reg_disable(struct regulator_dev *rdev)
+static int twlreg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int grp;
- grp = twl4030reg_read(info, VREG_GRP);
+ grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
if (grp < 0)
return grp;
- grp &= ~P1_GRP;
- return twl4030reg_write(info, VREG_GRP, grp);
+ if (twl_class_is_4030())
+ grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
+ else
+ grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
+
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
}
-static int twl4030reg_get_status(struct regulator_dev *rdev)
+static int twlreg_get_status(struct regulator_dev *rdev)
{
- int state = twl4030reg_grp(rdev);
+ int state = twlreg_grp(rdev);
+
+ if (twl_class_is_6030())
+ return 0; /* FIXME return for 6030 regulator */
if (state < 0)
return state;
@@ -146,12 +189,15 @@ static int twl4030reg_get_status(struct regulator_dev *rdev)
: REGULATOR_STATUS_STANDBY;
}
-static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
+static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
unsigned message;
int status;
+ if (twl_class_is_6030())
+ return 0; /* FIXME return for 6030 regulator */
+
/* We can only set the mode through state machine commands... */
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -165,18 +211,18 @@ static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
}
/* Ensure the resource is associated with some group */
- status = twl4030reg_grp(rdev);
+ status = twlreg_grp(rdev);
if (status < 0)
return status;
- if (!(status & (P3_GRP | P2_GRP | P1_GRP)))
+ if (!(status & (P3_GRP_4030 | P2_GRP_4030 | P1_GRP_4030)))
return -EACCES;
- status = twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
message >> 8, 0x15 /* PB_WORD_MSB */ );
if (status >= 0)
return status;
- return twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
+ return twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
message, 0x16 /* PB_WORD_LSB */ );
}
@@ -260,9 +306,43 @@ static const u16 VSIM_VSEL_table[] = {
static const u16 VDAC_VSEL_table[] = {
1200, 1300, 1800, 1800,
};
+static const u16 VDD1_VSEL_table[] = {
+ 800, 1450,
+};
+static const u16 VDD2_VSEL_table[] = {
+ 800, 1450, 1500,
+};
+static const u16 VIO_VSEL_table[] = {
+ 1800, 1850,
+};
+static const u16 VINTANA2_VSEL_table[] = {
+ 2500, 2750,
+};
+static const u16 VAUX1_6030_VSEL_table[] = {
+ 1000, 1300, 1800, 2500,
+ 2800, 2900, 3000, 3000,
+};
+static const u16 VAUX2_6030_VSEL_table[] = {
+ 1200, 1800, 2500, 2750,
+ 2800, 2800, 2800, 2800,
+};
+static const u16 VAUX3_6030_VSEL_table[] = {
+ 1000, 1200, 1300, 1800,
+ 2500, 2800, 3000, 3000,
+};
+static const u16 VMMC_VSEL_table[] = {
+ 1200, 1800, 2800, 2900,
+ 3000, 3000, 3000, 3000,
+};
+static const u16 VPP_VSEL_table[] = {
+ 1800, 1900, 2000, 2100,
+ 2200, 2300, 2400, 2500,
+};
+static const u16 VUSIM_VSEL_table[] = {
+ 1200, 1800, 2500, 2900,
+};
-
-static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int twlldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int mV = info->table[index];
@@ -271,7 +351,7 @@ static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
}
static int
-twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twlldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
int vsel;
@@ -288,16 +368,18 @@ twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
/* use the first in-range value */
if (min_uV <= uV && uV <= max_uV)
- return twl4030reg_write(info, VREG_DEDICATED, vsel);
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE, vsel);
}
return -EDOM;
}
-static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
+static int twlldo_get_voltage(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel = twl4030reg_read(info, VREG_DEDICATED);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE);
if (vsel < 0)
return vsel;
@@ -306,19 +388,19 @@ static int twl4030ldo_get_voltage(struct regulator_dev *rdev)
return LDO_MV(info->table[vsel]) * 1000;
}
-static struct regulator_ops twl4030ldo_ops = {
- .list_voltage = twl4030ldo_list_voltage,
+static struct regulator_ops twlldo_ops = {
+ .list_voltage = twlldo_list_voltage,
- .set_voltage = twl4030ldo_set_voltage,
- .get_voltage = twl4030ldo_get_voltage,
+ .set_voltage = twlldo_set_voltage,
+ .get_voltage = twlldo_get_voltage,
- .enable = twl4030reg_enable,
- .disable = twl4030reg_disable,
- .is_enabled = twl4030reg_is_enabled,
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
- .set_mode = twl4030reg_set_mode,
+ .set_mode = twlreg_set_mode,
- .get_status = twl4030reg_get_status,
+ .get_status = twlreg_get_status,
};
/*----------------------------------------------------------------------*/
@@ -326,60 +408,82 @@ static struct regulator_ops twl4030ldo_ops = {
/*
* Fixed voltage LDOs don't have a VSEL field to update.
*/
-static int twl4030fixed_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int twlfixed_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
return info->min_mV * 1000;
}
-static int twl4030fixed_get_voltage(struct regulator_dev *rdev)
+static int twlfixed_get_voltage(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
return info->min_mV * 1000;
}
-static struct regulator_ops twl4030fixed_ops = {
- .list_voltage = twl4030fixed_list_voltage,
+static struct regulator_ops twlfixed_ops = {
+ .list_voltage = twlfixed_list_voltage,
- .get_voltage = twl4030fixed_get_voltage,
+ .get_voltage = twlfixed_get_voltage,
- .enable = twl4030reg_enable,
- .disable = twl4030reg_disable,
- .is_enabled = twl4030reg_is_enabled,
+ .enable = twlreg_enable,
+ .disable = twlreg_disable,
+ .is_enabled = twlreg_is_enabled,
- .set_mode = twl4030reg_set_mode,
+ .set_mode = twlreg_set_mode,
- .get_status = twl4030reg_get_status,
+ .get_status = twlreg_get_status,
};
/*----------------------------------------------------------------------*/
-#define TWL_ADJUSTABLE_LDO(label, offset, num) { \
+#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
+ TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
+ remap_conf, TWL4030)
+#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf) \
+ TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf, TWL4030)
+#define TWL6030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
+ remap_conf) \
+ TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, \
+ remap_conf, TWL6030)
+#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf) \
+ TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+ remap_conf, TWL6030)
+
+#define TWL_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf, \
+ family) { \
.base = offset, \
.id = num, \
.table_len = ARRAY_SIZE(label##_VSEL_table), \
.table = label##_VSEL_table, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
.desc = { \
.name = #label, \
- .id = TWL4030_REG_##label, \
+ .id = family##_REG_##label, \
.n_voltages = ARRAY_SIZE(label##_VSEL_table), \
- .ops = &twl4030ldo_ops, \
+ .ops = &twlldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL_FIXED_LDO(label, offset, mVolts, num) { \
+#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
+ family) { \
.base = offset, \
.id = num, \
.min_mV = mVolts, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
.desc = { \
.name = #label, \
- .id = TWL4030_REG_##label, \
+ .id = family##_REG_##label, \
.n_voltages = 1, \
- .ops = &twl4030fixed_ops, \
+ .ops = &twlfixed_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
@@ -389,35 +493,45 @@ static struct regulator_ops twl4030fixed_ops = {
* We list regulators here if systems need some level of
* software control over them after boot.
*/
-static struct twlreg_info twl4030_regs[] = {
- TWL_ADJUSTABLE_LDO(VAUX1, 0x17, 1),
- TWL_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2),
- TWL_ADJUSTABLE_LDO(VAUX2, 0x1b, 2),
- TWL_ADJUSTABLE_LDO(VAUX3, 0x1f, 3),
- TWL_ADJUSTABLE_LDO(VAUX4, 0x23, 4),
- TWL_ADJUSTABLE_LDO(VMMC1, 0x27, 5),
- TWL_ADJUSTABLE_LDO(VMMC2, 0x2b, 6),
- /*
- TWL_ADJUSTABLE_LDO(VPLL1, 0x2f, 7),
- */
- TWL_ADJUSTABLE_LDO(VPLL2, 0x33, 8),
- TWL_ADJUSTABLE_LDO(VSIM, 0x37, 9),
- TWL_ADJUSTABLE_LDO(VDAC, 0x3b, 10),
- /*
- TWL_ADJUSTABLE_LDO(VINTANA1, 0x3f, 11),
- TWL_ADJUSTABLE_LDO(VINTANA2, 0x43, 12),
- TWL_ADJUSTABLE_LDO(VINTDIG, 0x47, 13),
- TWL_SMPS(VIO, 0x4b, 14),
- TWL_SMPS(VDD1, 0x55, 15),
- TWL_SMPS(VDD2, 0x63, 16),
- */
- TWL_FIXED_LDO(VUSB1V5, 0x71, 1500, 17),
- TWL_FIXED_LDO(VUSB1V8, 0x74, 1800, 18),
- TWL_FIXED_LDO(VUSB3V1, 0x77, 3100, 19),
+static struct twlreg_info twl_regs[] = {
+ TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7, 100, 0x00),
+ TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00),
+ TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08),
+ TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
+ TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
+ TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
+ TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
+ TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
/* VUSBCP is managed *only* by the USB subchip */
+
+ /* 6030 REG with base as PMC Slave Misc : 0x0030 */
+ /* Turnon-delay and remap configuration values for 6030 are not
+ verified since the specification is not public */
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x08),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x08),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x08),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x08),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x08),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x08)
};
-static int twl4030reg_probe(struct platform_device *pdev)
+static int twlreg_probe(struct platform_device *pdev)
{
int i;
struct twlreg_info *info;
@@ -425,10 +539,10 @@ static int twl4030reg_probe(struct platform_device *pdev)
struct regulation_constraints *c;
struct regulator_dev *rdev;
- for (i = 0, info = NULL; i < ARRAY_SIZE(twl4030_regs); i++) {
- if (twl4030_regs[i].desc.id != pdev->id)
+ for (i = 0, info = NULL; i < ARRAY_SIZE(twl_regs); i++) {
+ if (twl_regs[i].desc.id != pdev->id)
continue;
- info = twl4030_regs + i;
+ info = twl_regs + i;
break;
}
if (!info)
@@ -446,6 +560,19 @@ static int twl4030reg_probe(struct platform_device *pdev)
c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS;
+ switch (pdev->id) {
+ case TWL4030_REG_VIO:
+ case TWL4030_REG_VDD1:
+ case TWL4030_REG_VDD2:
+ case TWL4030_REG_VPLL1:
+ case TWL4030_REG_VINTANA1:
+ case TWL4030_REG_VINTANA2:
+ case TWL4030_REG_VINTDIG:
+ c->always_on = true;
+ break;
+ default:
+ break;
+ }
rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
if (IS_ERR(rdev)) {
@@ -455,6 +582,9 @@ static int twl4030reg_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rdev);
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
+ info->remap);
+
/* NOTE: many regulators support short-circuit IRQs (presentable
* as REGULATOR_OVER_CURRENT notifications?) configured via:
* - SC_CONFIG
@@ -466,35 +596,35 @@ static int twl4030reg_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit twl4030reg_remove(struct platform_device *pdev)
+static int __devexit twlreg_remove(struct platform_device *pdev)
{
regulator_unregister(platform_get_drvdata(pdev));
return 0;
}
-MODULE_ALIAS("platform:twl4030_reg");
+MODULE_ALIAS("platform:twl_reg");
-static struct platform_driver twl4030reg_driver = {
- .probe = twl4030reg_probe,
- .remove = __devexit_p(twl4030reg_remove),
+static struct platform_driver twlreg_driver = {
+ .probe = twlreg_probe,
+ .remove = __devexit_p(twlreg_remove),
/* NOTE: short name, to work around driver model truncation of
- * "twl4030_regulator.12" (and friends) to "twl4030_regulator.1".
+ * "twl_regulator.12" (and friends) to "twl_regulator.1".
*/
- .driver.name = "twl4030_reg",
+ .driver.name = "twl_reg",
.driver.owner = THIS_MODULE,
};
-static int __init twl4030reg_init(void)
+static int __init twlreg_init(void)
{
- return platform_driver_register(&twl4030reg_driver);
+ return platform_driver_register(&twlreg_driver);
}
-subsys_initcall(twl4030reg_init);
+subsys_initcall(twlreg_init);
-static void __exit twl4030reg_exit(void)
+static void __exit twlreg_exit(void)
{
- platform_driver_unregister(&twl4030reg_driver);
+ platform_driver_unregister(&twlreg_driver);
}
-module_exit(twl4030reg_exit)
+module_exit(twlreg_exit)
-MODULE_DESCRIPTION("TWL4030 regulator driver");
+MODULE_DESCRIPTION("TWL regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 2eefc1a0cf0..0a6577577e8 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -19,6 +19,8 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/regulator.h>
@@ -39,6 +41,7 @@
#define WM831X_DCDC_CONTROL_2 1
#define WM831X_DCDC_ON_CONFIG 2
#define WM831X_DCDC_SLEEP_CONTROL 3
+#define WM831X_DCDC_DVS_CONTROL 4
/*
* Shared
@@ -50,6 +53,10 @@ struct wm831x_dcdc {
int base;
struct wm831x *wm831x;
struct regulator_dev *regulator;
+ int dvs_gpio;
+ int dvs_gpio_state;
+ int on_vsel;
+ int dvs_vsel;
};
static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev)
@@ -240,11 +247,9 @@ static int wm831x_buckv_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV)
+static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = dcdc->wm831x;
u16 vsel;
if (min_uV < 600000)
@@ -257,39 +262,126 @@ static int wm831x_buckv_set_voltage_int(struct regulator_dev *rdev, int reg,
if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV)
return -EINVAL;
- return wm831x_set_bits(wm831x, reg, WM831X_DC1_ON_VSEL_MASK, vsel);
+ return vsel;
+}
+
+static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ u16 vsel;
+
+ if (max_uV < 600000 || max_uV > 1800000)
+ return -EINVAL;
+
+ vsel = ((max_uV - 600000) / 12500) + 8;
+
+ if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV ||
+ wm831x_buckv_list_voltage(rdev, vsel) < max_uV)
+ return -EINVAL;
+
+ return vsel;
+}
+
+static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
+{
+ struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
+
+ if (state == dcdc->dvs_gpio_state)
+ return 0;
+
+ dcdc->dvs_gpio_state = state;
+ gpio_set_value(dcdc->dvs_gpio, state);
+
+ /* Should wait for DVS state change to be asserted if we have
+ * a GPIO for it, for now assume the device is configured
+ * for the fastest possible transition.
+ */
+
+ return 0;
}
static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+ int min_uV, int max_uV)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
+ struct wm831x *wm831x = dcdc->wm831x;
+ int on_reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
+ int dvs_reg = dcdc->base + WM831X_DCDC_DVS_CONTROL;
+ int vsel, ret;
+
+ vsel = wm831x_buckv_select_min_voltage(rdev, min_uV, max_uV);
+ if (vsel < 0)
+ return vsel;
+
+ /* If this value is already set then do a GPIO update if we can */
+ if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
+ return wm831x_buckv_set_dvs(rdev, 0);
+
+ if (dcdc->dvs_gpio && dcdc->dvs_vsel == vsel)
+ return wm831x_buckv_set_dvs(rdev, 1);
+
+ /* Always set the ON status to the minimum voltage */
+ ret = wm831x_set_bits(wm831x, on_reg, WM831X_DC1_ON_VSEL_MASK, vsel);
+ if (ret < 0)
+ return ret;
+ dcdc->on_vsel = vsel;
+
+ if (!dcdc->dvs_gpio)
+ return ret;
+
+ /* Kick the voltage transition now */
+ ret = wm831x_buckv_set_dvs(rdev, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set the high voltage as the DVS voltage. This is optimised
+ * for CPUfreq usage, most processors will keep the maximum
+ * voltage constant and lower the minimum with the frequency. */
+ vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV);
+ if (vsel < 0) {
+ /* This should never happen - at worst the same vsel
+ * should be chosen */
+ WARN_ON(vsel < 0);
+ return 0;
+ }
+
+ /* Don't bother if it's the same VSEL we're already using */
+ if (vsel == dcdc->on_vsel)
+ return 0;
- return wm831x_buckv_set_voltage_int(rdev, reg, min_uV, max_uV);
+ ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = vsel;
+ else
+ dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n",
+ ret);
+
+ return 0;
}
static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
- int uV)
+ int uV)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
+ struct wm831x *wm831x = dcdc->wm831x;
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
+ int vsel;
+
+ vsel = wm831x_buckv_select_min_voltage(rdev, uV, uV);
+ if (vsel < 0)
+ return vsel;
- return wm831x_buckv_set_voltage_int(rdev, reg, uV, uV);
+ return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel);
}
static int wm831x_buckv_get_voltage(struct regulator_dev *rdev)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = dcdc->wm831x;
- u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
- int val;
- val = wm831x_reg_read(wm831x, reg);
- if (val < 0)
- return val;
-
- return wm831x_buckv_list_voltage(rdev, val & WM831X_DC1_ON_VSEL_MASK);
+ if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
+ return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel);
+ else
+ return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel);
}
/* Current limit options */
@@ -346,6 +438,64 @@ static struct regulator_ops wm831x_buckv_ops = {
.set_suspend_mode = wm831x_dcdc_set_suspend_mode,
};
+/*
+ * Set up DVS control. We just log errors since we can still run
+ * (with reduced performance) if we fail.
+ */
+static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
+ struct wm831x_buckv_pdata *pdata)
+{
+ struct wm831x *wm831x = dcdc->wm831x;
+ int ret;
+ u16 ctrl;
+
+ if (!pdata || !pdata->dvs_gpio)
+ return;
+
+ switch (pdata->dvs_control_src) {
+ case 1:
+ ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ case 2:
+ ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
+ pdata->dvs_control_src, dcdc->name);
+ return;
+ }
+
+ ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
+ WM831X_DC1_DVS_SRC_MASK, ctrl);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
+ dcdc->name, ret);
+ return;
+ }
+
+ ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
+ dcdc->name, ret);
+ return;
+ }
+
+ /* gpiolib won't let us read the GPIO status so pick the higher
+ * of the two existing voltages so we take it as platform data.
+ */
+ dcdc->dvs_gpio_state = pdata->dvs_init_state;
+
+ ret = gpio_direction_output(pdata->dvs_gpio, dcdc->dvs_gpio_state);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to enable %s DVS GPIO: %d\n",
+ dcdc->name, ret);
+ gpio_free(pdata->dvs_gpio);
+ return;
+ }
+
+ dcdc->dvs_gpio = pdata->dvs_gpio;
+}
+
static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
@@ -384,6 +534,23 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
dcdc->desc.ops = &wm831x_buckv_ops;
dcdc->desc.owner = THIS_MODULE;
+ ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to read ON VSEL: %d\n", ret);
+ goto err;
+ }
+ dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK;
+
+ ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret);
+ goto err;
+ }
+ dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK;
+
+ if (pdata->dcdc[id])
+ wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
+
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
pdata->dcdc[id], dcdc);
if (IS_ERR(dcdc->regulator)) {
@@ -422,6 +589,8 @@ err_uv:
err_regulator:
regulator_unregister(dcdc->regulator);
err:
+ if (dcdc->dvs_gpio)
+ gpio_free(dcdc->dvs_gpio);
kfree(dcdc);
return ret;
}
@@ -434,6 +603,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc);
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
+ if (dcdc->dvs_gpio)
+ gpio_free(dcdc->dvs_gpio);
kfree(dcdc);
return 0;
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 902db56ce09..61e02ac2fda 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -470,7 +470,7 @@ static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
struct wm831x *wm831x = ldo->wm831x;
int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
- unsigned int ret;
+ int ret;
ret = wm831x_reg_read(wm831x, on_reg);
if (ret < 0)
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 768bd0e5b48..1bbff099a54 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1330,9 +1330,10 @@ static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
},
};
-static void pmic_uv_handler(struct wm8350 *wm8350, int irq, void *data)
+static irqreturn_t pmic_uv_handler(int irq, void *data)
{
struct regulator_dev *rdev = (struct regulator_dev *)data;
+ struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
mutex_lock(&rdev->mutex);
if (irq == WM8350_IRQ_CS1 || irq == WM8350_IRQ_CS2)
@@ -1344,6 +1345,8 @@ static void pmic_uv_handler(struct wm8350 *wm8350, int irq, void *data)
REGULATOR_EVENT_UNDER_VOLTAGE,
wm8350);
mutex_unlock(&rdev->mutex);
+
+ return IRQ_HANDLED;
}
static int wm8350_regulator_probe(struct platform_device *pdev)
@@ -1388,7 +1391,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
/* register regulator IRQ */
ret = wm8350_register_irq(wm8350, wm8350_reg[pdev->id].irq,
- pmic_uv_handler, rdev);
+ pmic_uv_handler, 0, "UV", rdev);
if (ret < 0) {
regulator_unregister(rdev);
dev_err(&pdev->dev, "failed to register regulator %s IRQ\n",
@@ -1396,8 +1399,6 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
return ret;
}
- wm8350_unmask_irq(wm8350, wm8350_reg[pdev->id].irq);
-
return 0;
}
@@ -1406,7 +1407,6 @@ static int wm8350_regulator_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- wm8350_mask_irq(wm8350, wm8350_reg[pdev->id].irq);
wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq);
regulator_unregister(rdev);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f2e1004d12c..8167e9e6827 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -242,6 +242,15 @@ config RTC_DRV_M41T80_WDT
If you say Y here you will get support for the
watchdog timer in the ST M41T60 and M41T80 RTC chips series.
+config RTC_DRV_BQ32K
+ tristate "TI BQ32000"
+ help
+ If you say Y here you will get support for the TI
+ BQ32000 I2C RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-bq32k.
+
config RTC_DRV_DM355EVM
tristate "TI DaVinci DM355 EVM RTC"
depends on MFD_DM355EVM_MSP
@@ -258,14 +267,14 @@ config RTC_DRV_TWL92330
the Menelaus driver; it's not separate module.
config RTC_DRV_TWL4030
- tristate "TI TWL4030/TWL5030/TPS659x0"
+ tristate "TI TWL4030/TWL5030/TWL6030/TPS659x0"
depends on RTC_CLASS && TWL4030_CORE
help
If you say yes here you get support for the RTC on the
- TWL4030 family chips, used mostly with OMAP3 platforms.
+ TWL4030/TWL5030/TWL6030 family chips, used mostly with OMAP3 platforms.
This driver can also be built as a module. If so, the module
- will be called rtc-twl4030.
+ will be called rtc-twl.
config RTC_DRV_S35390A
tristate "Seiko Instruments S-35390A"
@@ -592,15 +601,22 @@ config RTC_DRV_AB3100
Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
support. This chip contains a battery- and capacitor-backed RTC.
+config RTC_DRV_NUC900
+ tristate "NUC910/NUC920 RTC driver"
+ depends on RTC_CLASS && ARCH_W90X900
+ help
+ If you say yes here you get support for the RTC subsystem of the
+ NUC910/NUC920 used in embedded systems.
comment "on-CPU RTC drivers"
config RTC_DRV_OMAP
tristate "TI OMAP1"
- depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730
+ depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
help
- Say "yes" here to support the real time clock on TI OMAP1 chips.
- This driver can also be built as a module called rtc-omap.
+ Say "yes" here to support the real time clock on TI OMAP1 and
+ DA8xx/OMAP-L13x chips. This driver can also be built as a
+ module called rtc-omap.
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
@@ -846,4 +862,10 @@ config RTC_DRV_PCAP
If you say Y here you will get support for the RTC found on
the PCAP2 ASIC used on some Motorola phones.
+config RTC_DRV_MC13783
+ depends on MFD_MC13783
+ tristate "Freescale MC13783 RTC"
+ help
+ This enables support for the Freescale MC13783 PMIC RTC
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index af1ba7ae285..e5160fddc44 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
+obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o
obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o
@@ -52,8 +53,10 @@ obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
+obj-$(CONFIG_RTC_DRV_MC13783) += rtc-mc13783.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
+obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
@@ -80,7 +83,7 @@ obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
-obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o
+obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index e1ec33e40e3..8825695777d 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -256,6 +256,8 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
goto out_iounmap;
}
+ platform_set_drvdata(pdev, rtc);
+
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
&at32_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
@@ -264,7 +266,6 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
goto out_free_irq;
}
- platform_set_drvdata(pdev, rtc);
device_init_wakeup(&pdev->dev, 1);
dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
@@ -273,6 +274,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
return 0;
out_free_irq:
+ platform_set_drvdata(pdev, NULL);
free_irq(irq, rtc);
out_iounmap:
iounmap(rtc->regs);
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
new file mode 100644
index 00000000000..408cc8f735b
--- /dev/null
+++ b/drivers/rtc/rtc-bq32k.c
@@ -0,0 +1,204 @@
+/*
+ * Driver for TI BQ32000 RTC.
+ *
+ * Copyright (C) 2009 Semihalf.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/bcd.h>
+
+#define BQ32K_SECONDS 0x00 /* Seconds register address */
+#define BQ32K_SECONDS_MASK 0x7F /* Mask over seconds value */
+#define BQ32K_STOP 0x80 /* Oscillator Stop flat */
+
+#define BQ32K_MINUTES 0x01 /* Minutes register address */
+#define BQ32K_MINUTES_MASK 0x7F /* Mask over minutes value */
+#define BQ32K_OF 0x80 /* Oscillator Failure flag */
+
+#define BQ32K_HOURS_MASK 0x3F /* Mask over hours value */
+#define BQ32K_CENT 0x40 /* Century flag */
+#define BQ32K_CENT_EN 0x80 /* Century flag enable bit */
+
+struct bq32k_regs {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t cent_hours;
+ uint8_t day;
+ uint8_t date;
+ uint8_t month;
+ uint8_t years;
+};
+
+static struct i2c_driver bq32k_driver;
+
+static int bq32k_read(struct device *dev, void *data, uint8_t off, uint8_t len)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &off,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ }
+ };
+
+ if (i2c_transfer(client->adapter, msgs, 2) == 2)
+ return 0;
+
+ return -EIO;
+}
+
+static int bq32k_write(struct device *dev, void *data, uint8_t off, uint8_t len)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ uint8_t buffer[len + 1];
+
+ buffer[0] = off;
+ memcpy(&buffer[1], data, len);
+
+ if (i2c_master_send(client, buffer, len + 1) == len + 1)
+ return 0;
+
+ return -EIO;
+}
+
+static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct bq32k_regs regs;
+ int error;
+
+ error = bq32k_read(dev, &regs, 0, sizeof(regs));
+ if (error)
+ return error;
+
+ tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
+ tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
+ tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
+ tm->tm_mday = bcd2bin(regs.date);
+ tm->tm_wday = bcd2bin(regs.day) - 1;
+ tm->tm_mon = bcd2bin(regs.month) - 1;
+ tm->tm_year = bcd2bin(regs.years) +
+ ((regs.cent_hours & BQ32K_CENT) ? 100 : 0);
+
+ return rtc_valid_tm(tm);
+}
+
+static int bq32k_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct bq32k_regs regs;
+
+ regs.seconds = bin2bcd(tm->tm_sec);
+ regs.minutes = bin2bcd(tm->tm_min);
+ regs.cent_hours = bin2bcd(tm->tm_hour) | BQ32K_CENT_EN;
+ regs.day = bin2bcd(tm->tm_wday + 1);
+ regs.date = bin2bcd(tm->tm_mday);
+ regs.month = bin2bcd(tm->tm_mon + 1);
+
+ if (tm->tm_year >= 100) {
+ regs.cent_hours |= BQ32K_CENT;
+ regs.years = bin2bcd(tm->tm_year - 100);
+ } else
+ regs.years = bin2bcd(tm->tm_year);
+
+ return bq32k_write(dev, &regs, 0, sizeof(regs));
+}
+
+static const struct rtc_class_ops bq32k_rtc_ops = {
+ .read_time = bq32k_rtc_read_time,
+ .set_time = bq32k_rtc_set_time,
+};
+
+static int bq32k_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct rtc_device *rtc;
+ uint8_t reg;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ /* Check Oscillator Stop flag */
+ error = bq32k_read(dev, &reg, BQ32K_SECONDS, 1);
+ if (!error && (reg & BQ32K_STOP)) {
+ dev_warn(dev, "Oscillator was halted. Restarting...\n");
+ reg &= ~BQ32K_STOP;
+ error = bq32k_write(dev, &reg, BQ32K_SECONDS, 1);
+ }
+ if (error)
+ return error;
+
+ /* Check Oscillator Failure flag */
+ error = bq32k_read(dev, &reg, BQ32K_MINUTES, 1);
+ if (!error && (reg & BQ32K_OF)) {
+ dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
+ reg &= ~BQ32K_OF;
+ error = bq32k_write(dev, &reg, BQ32K_MINUTES, 1);
+ }
+ if (error)
+ return error;
+
+ rtc = rtc_device_register(bq32k_driver.driver.name, &client->dev,
+ &bq32k_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ i2c_set_clientdata(client, rtc);
+
+ return 0;
+}
+
+static int __devexit bq32k_remove(struct i2c_client *client)
+{
+ struct rtc_device *rtc = i2c_get_clientdata(client);
+
+ rtc_device_unregister(rtc);
+ return 0;
+}
+
+static const struct i2c_device_id bq32k_id[] = {
+ { "bq32000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bq32k_id);
+
+static struct i2c_driver bq32k_driver = {
+ .driver = {
+ .name = "bq32k",
+ .owner = THIS_MODULE,
+ },
+ .probe = bq32k_probe,
+ .remove = __devexit_p(bq32k_remove),
+ .id_table = bq32k_id,
+};
+
+static __init int bq32k_init(void)
+{
+ return i2c_add_driver(&bq32k_driver);
+}
+module_init(bq32k_init);
+
+static __exit void bq32k_exit(void)
+{
+ i2c_del_driver(&bq32k_driver);
+}
+module_exit(bq32k_exit);
+
+MODULE_AUTHOR("Semihalf, Piotr Ziecik <kosmo@semihalf.com>");
+MODULE_DESCRIPTION("TI BQ32000 I2C RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index d00a274df8f..280fe48ada0 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -169,6 +169,8 @@ static int __devinit bq4802_probe(struct platform_device *pdev)
goto out_free;
}
+ platform_set_drvdata(pdev, p);
+
p->rtc = rtc_device_register("bq4802", &pdev->dev,
&bq4802_ops, THIS_MODULE);
if (IS_ERR(p->rtc)) {
@@ -176,7 +178,6 @@ static int __devinit bq4802_probe(struct platform_device *pdev)
goto out_iounmap;
}
- platform_set_drvdata(pdev, p);
err = 0;
out:
return err;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index f7a4701bf86..c8c12325e69 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -420,49 +420,43 @@ static int cmos_irq_set_state(struct device *dev, int enabled)
return 0;
}
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
-static int
-cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned long flags;
- switch (cmd) {
- case RTC_AIE_OFF:
- case RTC_AIE_ON:
- case RTC_UIE_OFF:
- case RTC_UIE_ON:
- if (!is_valid_irq(cmos->irq))
- return -EINVAL;
- break;
- /* PIE ON/OFF is handled by cmos_irq_set_state() */
- default:
- return -ENOIOCTLCMD;
- }
+ if (!is_valid_irq(cmos->irq))
+ return -EINVAL;
spin_lock_irqsave(&rtc_lock, flags);
- switch (cmd) {
- case RTC_AIE_OFF: /* alarm off */
- cmos_irq_disable(cmos, RTC_AIE);
- break;
- case RTC_AIE_ON: /* alarm on */
+
+ if (enabled)
cmos_irq_enable(cmos, RTC_AIE);
- break;
- case RTC_UIE_OFF: /* update off */
- cmos_irq_disable(cmos, RTC_UIE);
- break;
- case RTC_UIE_ON: /* update on */
- cmos_irq_enable(cmos, RTC_UIE);
- break;
- }
+ else
+ cmos_irq_disable(cmos, RTC_AIE);
+
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
-#else
-#define cmos_rtc_ioctl NULL
-#endif
+static int cmos_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ if (!is_valid_irq(cmos->irq))
+ return -EINVAL;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ if (enabled)
+ cmos_irq_enable(cmos, RTC_UIE);
+ else
+ cmos_irq_disable(cmos, RTC_UIE);
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return 0;
+}
#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
@@ -503,14 +497,15 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
#endif
static const struct rtc_class_ops cmos_rtc_ops = {
- .ioctl = cmos_rtc_ioctl,
- .read_time = cmos_read_time,
- .set_time = cmos_set_time,
- .read_alarm = cmos_read_alarm,
- .set_alarm = cmos_set_alarm,
- .proc = cmos_procfs,
- .irq_set_freq = cmos_irq_set_freq,
- .irq_set_state = cmos_irq_set_state,
+ .read_time = cmos_read_time,
+ .set_time = cmos_set_time,
+ .read_alarm = cmos_read_alarm,
+ .set_alarm = cmos_set_alarm,
+ .proc = cmos_procfs,
+ .irq_set_freq = cmos_irq_set_freq,
+ .irq_set_state = cmos_irq_set_state,
+ .alarm_irq_enable = cmos_alarm_irq_enable,
+ .update_irq_enable = cmos_update_irq_enable,
};
/*----------------------------------------------------------------*/
@@ -691,7 +686,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
*/
#if defined(CONFIG_ATARI)
address_space = 64;
-#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__sparc__)
+#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
+ || defined(__sparc__) || defined(__mips__)
address_space = 128;
#else
#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
@@ -871,8 +867,9 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg)
mask = RTC_IRQMASK;
tmp &= ~mask;
CMOS_WRITE(tmp, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(mask);
+ /* shut down hpet emulation - we don't need it for alarm */
+ hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
cmos_checkintr(cmos, tmp);
}
spin_unlock_irq(&rtc_lock);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 1e73c8f42e3..532acf9b05d 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -143,7 +143,6 @@ static int ds1302_rtc_ioctl(struct device *dev, unsigned int cmd,
#ifdef RTC_SET_CHARGE
case RTC_SET_CHARGE:
{
- struct ds1302_rtc *rtc = dev_get_drvdata(dev);
int tcs_val;
if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int)))
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 2736b11a1b1..9630e7d3314 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -617,7 +617,6 @@ static struct bin_attribute nvram = {
static int __devinit ds1305_probe(struct spi_device *spi)
{
struct ds1305 *ds1305;
- struct rtc_device *rtc;
int status;
u8 addr, value;
struct ds1305_platform_data *pdata = spi->dev.platform_data;
@@ -756,14 +755,13 @@ static int __devinit ds1305_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "AM/PM\n");
/* register RTC ... from here on, ds1305->ctrl needs locking */
- rtc = rtc_device_register("ds1305", &spi->dev,
+ ds1305->rtc = rtc_device_register("ds1305", &spi->dev,
&ds1305_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- status = PTR_ERR(rtc);
+ if (IS_ERR(ds1305->rtc)) {
+ status = PTR_ERR(ds1305->rtc);
dev_dbg(&spi->dev, "register rtc --> %d\n", status);
goto fail0;
}
- ds1305->rtc = rtc;
/* Maybe set up alarm IRQ; be ready to handle it triggering right
* away. NOTE that we don't share this. The signal is active low,
@@ -774,12 +772,14 @@ static int __devinit ds1305_probe(struct spi_device *spi)
if (spi->irq) {
INIT_WORK(&ds1305->work, ds1305_work);
status = request_irq(spi->irq, ds1305_irq,
- 0, dev_name(&rtc->dev), ds1305);
+ 0, dev_name(&ds1305->rtc->dev), ds1305);
if (status < 0) {
dev_dbg(&spi->dev, "request_irq %d --> %d\n",
spi->irq, status);
goto fail1;
}
+
+ device_set_wakeup_capable(&spi->dev, 1);
}
/* export NVRAM */
@@ -794,7 +794,7 @@ static int __devinit ds1305_probe(struct spi_device *spi)
fail2:
free_irq(spi->irq, ds1305);
fail1:
- rtc_device_unregister(rtc);
+ rtc_device_unregister(ds1305->rtc);
fail0:
kfree(ds1305);
return status;
@@ -802,7 +802,7 @@ fail0:
static int __devexit ds1305_remove(struct spi_device *spi)
{
- struct ds1305 *ds1305 = spi_get_drvdata(spi);
+ struct ds1305 *ds1305 = spi_get_drvdata(spi);
sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index eb99ee4fa0f..c4ec5c158aa 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -874,13 +874,15 @@ read_rtc:
}
if (want_irq) {
- err = request_irq(client->irq, ds1307_irq, 0,
+ err = request_irq(client->irq, ds1307_irq, IRQF_SHARED,
ds1307->rtc->name, client);
if (err) {
dev_err(&client->dev,
"unable to request IRQ!\n");
goto exit_irq;
}
+
+ device_set_wakeup_capable(&client->dev, 1);
set_bit(HAS_ALARM, &ds1307->flags);
dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
}
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 713f7bf5afb..5317bbcbc7a 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -383,6 +383,8 @@ static int ds1374_probe(struct i2c_client *client,
dev_err(&client->dev, "unable to request IRQ\n");
goto out_free;
}
+
+ device_set_wakeup_capable(&client->dev, 1);
}
ds1374->rtc = rtc_device_register(client->name, &client->dev,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 539676e25fd..4166b84cb51 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -87,7 +87,6 @@ enum ds1511reg {
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
- unsigned long baseaddr; /* physical base address */
int size; /* amount of memory mapped */
int irq;
unsigned int irqen;
@@ -95,6 +94,7 @@ struct rtc_plat_data {
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static DEFINE_SPINLOCK(ds1511_lock);
@@ -302,7 +302,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
{
unsigned long flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ spin_lock_irqsave(&pdata->lock, flags);
rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
RTC_ALARM_DATE);
@@ -317,7 +317,7 @@ ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
RTC_ALARM_SEC);
rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
rtc_read(RTC_CMD1); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+ spin_unlock_irqrestore(&pdata->lock, flags);
}
static int
@@ -362,61 +362,63 @@ ds1511_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/*
* read and clear interrupt
*/
- if (!(rtc_read(RTC_CMD1) & DS1511_IRQF)) {
- return IRQ_NONE;
- }
- if (rtc_read(RTC_ALARM_SEC) & 0x80) {
- events |= RTC_UF;
- } else {
- events |= RTC_AF;
- }
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
+ events = RTC_IRQF;
+ if (rtc_read(RTC_ALARM_SEC) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
+ }
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
- static int
-ds1511_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- if (pdata->irq <= 0) {
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- }
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_OFF:
- pdata->irqen &= ~RTC_UF;
- ds1511_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_ON:
+ else
+ pdata->irqen &= ~RTC_AF;
+ ds1511_rtc_update_alarm(pdata);
+ return 0;
+}
+
+static int ds1511_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_UF;
- ds1511_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_UF;
+ ds1511_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1511_rtc_ops = {
- .read_time = ds1511_rtc_read_time,
- .set_time = ds1511_rtc_set_time,
- .read_alarm = ds1511_rtc_read_alarm,
- .set_alarm = ds1511_rtc_set_alarm,
- .ioctl = ds1511_rtc_ioctl,
+ .read_time = ds1511_rtc_read_time,
+ .set_time = ds1511_rtc_set_time,
+ .read_alarm = ds1511_rtc_read_alarm,
+ .set_alarm = ds1511_rtc_set_alarm,
+ .alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
+ .update_irq_enable = ds1511_rtc_update_irq_enable,
};
static ssize_t
@@ -492,29 +494,23 @@ ds1511_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct resource *res;
- struct rtc_plat_data *pdata = NULL;
+ struct rtc_plat_data *pdata;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
return -ENODEV;
}
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
pdata->size = res->end - res->start + 1;
- if (!request_mem_region(res->start, pdata->size, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- pdata->size = pdata->size;
- ds1511_base = ioremap(pdata->baseaddr, pdata->size);
- if (!ds1511_base) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
+ pdev->name))
+ return -EBUSY;
+ ds1511_base = devm_ioremap(&pdev->dev, res->start, pdata->size);
+ if (!ds1511_base)
+ return -ENOMEM;
pdata->ioaddr = ds1511_base;
pdata->irq = platform_get_irq(pdev, 0);
@@ -540,13 +536,15 @@ ds1511_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "voltage-low detected.\n");
}
+ spin_lock_init(&pdata->lock);
+ platform_set_drvdata(pdev, pdata);
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
*/
if (pdata->irq > 0) {
rtc_read(RTC_CMD1);
- if (request_irq(pdata->irq, ds1511_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
@@ -556,33 +554,13 @@ ds1511_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops,
THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
pdata->rtc = rtc;
- platform_set_drvdata(pdev, pdata);
+
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (ret) {
- goto out;
- }
- return 0;
- out:
- if (pdata->rtc) {
+ if (ret)
rtc_device_unregister(pdata->rtc);
- }
- if (pdata->irq > 0) {
- free_irq(pdata->irq, pdev);
- }
- if (ds1511_base) {
- iounmap(ds1511_base);
- ds1511_base = NULL;
- }
- if (pdata->baseaddr) {
- release_mem_region(pdata->baseaddr, pdata->size);
- }
-
- kfree(pdata);
return ret;
}
@@ -593,19 +571,13 @@ ds1511_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
rtc_device_unregister(pdata->rtc);
- pdata->rtc = NULL;
if (pdata->irq > 0) {
/*
* disable the alarm interrupt
*/
rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
rtc_read(RTC_CMD1);
- free_irq(pdata->irq, pdev);
}
- iounmap(pdata->ioaddr);
- ds1511_base = NULL;
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 717288527c6..ed1ef7c9cc0 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
#define RTC_REG_SIZE 0x2000
#define RTC_OFFSET 0x1ff0
@@ -61,7 +61,6 @@
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
- resource_size_t baseaddr;
unsigned long last_jiffies;
int irq;
unsigned int irqen;
@@ -69,6 +68,7 @@ struct rtc_plat_data {
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static int ds1553_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -139,7 +139,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
void __iomem *ioaddr = pdata->ioaddr;
unsigned long flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ spin_lock_irqsave(&pdata->lock, flags);
writeb(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
0x80 : bin2bcd(pdata->alrm_mday),
ioaddr + RTC_DATE_ALARM);
@@ -154,7 +154,7 @@ static void ds1553_rtc_update_alarm(struct rtc_plat_data *pdata)
ioaddr + RTC_SECONDS_ALARM);
writeb(pdata->irqen ? RTC_INTS_AE : 0, ioaddr + RTC_INTERRUPTS);
readb(ioaddr + RTC_FLAGS); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+ spin_unlock_irqrestore(&pdata->lock, flags);
}
static int ds1553_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -194,64 +194,69 @@ static irqreturn_t ds1553_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/* read and clear interrupt */
- if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
- return IRQ_NONE;
- if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
+ events = RTC_IRQF;
+ if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
+ }
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
-static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_OFF:
- pdata->irqen &= ~RTC_UF;
- ds1553_rtc_update_alarm(pdata);
- break;
- case RTC_UIE_ON:
+ else
+ pdata->irqen &= ~RTC_AF;
+ ds1553_rtc_update_alarm(pdata);
+ return 0;
+}
+
+static int ds1553_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq <= 0)
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_UF;
- ds1553_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_UF;
+ ds1553_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops ds1553_rtc_ops = {
- .read_time = ds1553_rtc_read_time,
- .set_time = ds1553_rtc_set_time,
- .read_alarm = ds1553_rtc_read_alarm,
- .set_alarm = ds1553_rtc_set_alarm,
- .ioctl = ds1553_rtc_ioctl,
+ .read_time = ds1553_rtc_read_time,
+ .set_time = ds1553_rtc_set_time,
+ .read_alarm = ds1553_rtc_read_alarm,
+ .set_alarm = ds1553_rtc_set_alarm,
+ .alarm_irq_enable = ds1553_rtc_alarm_irq_enable,
+ .update_irq_enable = ds1553_rtc_update_irq_enable,
};
static ssize_t ds1553_nvram_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@@ -265,8 +270,8 @@ static ssize_t ds1553_nvram_write(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@@ -291,26 +296,23 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
- struct rtc_plat_data *pdata = NULL;
- void __iomem *ioaddr = NULL;
+ struct rtc_plat_data *pdata;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
+ pdev->name))
+ return -EBUSY;
+
+ ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
+ if (!ioaddr)
+ return -ENOMEM;
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
@@ -326,9 +328,13 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_BLF)
dev_warn(&pdev->dev, "voltage-low detected.\n");
+ spin_lock_init(&pdata->lock);
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
- if (request_irq(pdata->irq, ds1553_rtc_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq,
+ ds1553_rtc_interrupt,
IRQF_DISABLED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
pdata->irq = 0;
@@ -337,27 +343,13 @@ static int __devinit ds1553_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev,
&ds1553_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
pdata->rtc = rtc;
- pdata->last_jiffies = jiffies;
- platform_set_drvdata(pdev, pdata);
+
ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
if (ret)
- goto out;
- return 0;
- out:
- if (pdata->rtc)
- rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0)
- free_irq(pdata->irq, pdev);
- if (ioaddr)
- iounmap(ioaddr);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
+ rtc_device_unregister(rtc);
return ret;
}
@@ -367,13 +359,8 @@ static int __devexit ds1553_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &ds1553_nvram_attr);
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0) {
+ if (pdata->irq > 0)
writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
- free_irq(pdata->irq, pdev);
- }
- iounmap(pdata->ioaddr);
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 09249459e9a..a1273360a44 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -21,7 +21,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
-#define DRV_VERSION "0.3"
+#define DRV_VERSION "0.4"
#define RTC_SIZE 8
@@ -55,7 +55,6 @@ struct rtc_plat_data {
void __iomem *ioaddr_rtc;
size_t size_nvram;
size_t size;
- resource_size_t baseaddr;
unsigned long last_jiffies;
struct bin_attribute nvram_attr;
};
@@ -132,8 +131,8 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
@@ -147,8 +146,8 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr_nvram;
ssize_t count;
@@ -163,27 +162,24 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
struct rtc_device *rtc;
struct resource *res;
unsigned int cen, sec;
- struct rtc_plat_data *pdata = NULL;
- void __iomem *ioaddr = NULL;
+ struct rtc_plat_data *pdata;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->size = res->end - res->start + 1;
- if (!request_mem_region(res->start, pdata->size, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, pdata->size);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
+ pdev->name))
+ return -EBUSY;
+ ioaddr = devm_ioremap(&pdev->dev, res->start, pdata->size);
+ if (!ioaddr)
+ return -ENOMEM;
+
pdata->ioaddr_nvram = ioaddr;
pdata->size_nvram = pdata->size - RTC_SIZE;
pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
@@ -207,31 +203,19 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
if (!(readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG))
dev_warn(&pdev->dev, "voltage-low detected.\n");
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
rtc = rtc_device_register(pdev->name, &pdev->dev,
&ds1742_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
pdata->rtc = rtc;
- pdata->last_jiffies = jiffies;
- platform_set_drvdata(pdev, pdata);
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
if (ret) {
dev_err(&pdev->dev, "creating nvram file in sysfs failed\n");
- goto out;
+ rtc_device_unregister(rtc);
}
-
- return 0;
- out:
- if (pdata->rtc)
- rtc_device_unregister(pdata->rtc);
- if (pdata->ioaddr_nvram)
- iounmap(pdata->ioaddr_nvram);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return ret;
}
@@ -241,9 +225,6 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr);
rtc_device_unregister(pdata->rtc);
- iounmap(pdata->ioaddr_nvram);
- release_mem_region(pdata->baseaddr, pdata->size);
- kfree(pdata);
return 0;
}
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 0b219755994..8cb5b8959e5 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -142,7 +142,6 @@ static const struct rtc_class_ops m48t35_ops = {
static int __devinit m48t35_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
struct resource *res;
struct m48t35_priv *priv;
int ret = 0;
@@ -171,20 +170,21 @@ static int __devinit m48t35_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto out;
}
+
spin_lock_init(&priv->lock);
- rtc = rtc_device_register("m48t35", &pdev->dev,
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->rtc = rtc_device_register("m48t35", &pdev->dev,
&m48t35_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
+ if (IS_ERR(priv->rtc)) {
+ ret = PTR_ERR(priv->rtc);
goto out;
}
- priv->rtc = rtc;
- platform_set_drvdata(pdev, priv);
+
return 0;
out:
- if (priv->rtc)
- rtc_device_unregister(priv->rtc);
if (priv->reg)
iounmap(priv->reg);
if (priv->baseaddr)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 33921a6b170..ede43b84685 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -481,6 +481,9 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
goto out;
}
+ spin_lock_init(&m48t59->lock);
+ platform_set_drvdata(pdev, m48t59);
+
m48t59->rtc = rtc_device_register(name, &pdev->dev, ops, THIS_MODULE);
if (IS_ERR(m48t59->rtc)) {
ret = PTR_ERR(m48t59->rtc);
@@ -490,16 +493,14 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
m48t59_nvram_attr.size = pdata->offset;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
- if (ret)
+ if (ret) {
+ rtc_device_unregister(m48t59->rtc);
goto out;
+ }
- spin_lock_init(&m48t59->lock);
- platform_set_drvdata(pdev, m48t59);
return 0;
out:
- if (!IS_ERR(m48t59->rtc))
- rtc_device_unregister(m48t59->rtc);
if (m48t59->irq != NO_IRQ)
free_irq(m48t59->irq, &pdev->dev);
if (m48t59->ioaddr)
diff --git a/drivers/rtc/rtc-mc13783.c b/drivers/rtc/rtc-mc13783.c
new file mode 100644
index 00000000000..850f983c039
--- /dev/null
+++ b/drivers/rtc/rtc-mc13783.c
@@ -0,0 +1,262 @@
+/*
+ * Real Time Clock driver for Freescale MC13783 PMIC
+ *
+ * (C) 2009 Sascha Hauer, Pengutronix
+ * (C) 2009 Uwe Kleine-Koenig, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13783.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+
+#define DRIVER_NAME "mc13783-rtc"
+
+#define MC13783_RTCTOD 20
+#define MC13783_RTCTODA 21
+#define MC13783_RTCDAY 22
+#define MC13783_RTCDAYA 23
+
+struct mc13783_rtc {
+ struct rtc_device *rtc;
+ struct mc13783 *mc13783;
+ int valid;
+};
+
+static int mc13783_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ unsigned int seconds, days1, days2;
+ unsigned long s1970;
+ int ret;
+
+ mc13783_lock(priv->mc13783);
+
+ if (!priv->valid) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days1);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTOD, &seconds);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days2);
+out:
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ if (days2 == days1 + 1) {
+ if (seconds >= 86400 / 2)
+ days2 = days1;
+ else
+ days1 = days2;
+ }
+
+ if (days1 != days2)
+ return -EIO;
+
+ s1970 = days1 * 86400 + seconds;
+
+ rtc_time_to_tm(s1970, tm);
+
+ return rtc_valid_tm(tm);
+}
+
+static int mc13783_rtc_set_mmss(struct device *dev, unsigned long secs)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ unsigned int seconds, days;
+ int ret;
+
+ seconds = secs % 86400;
+ days = secs / 86400;
+
+ mc13783_lock(priv->mc13783);
+
+ /*
+ * first write seconds=0 to prevent a day switch between writing days
+ * and seconds below
+ */
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, 0);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCDAY, days);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, seconds);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_ackirq(priv->mc13783, MC13783_IRQ_RTCRST);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_unmask(priv->mc13783, MC13783_IRQ_RTCRST);
+out:
+ priv->valid = !ret;
+
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static irqreturn_t mc13783_rtc_update_handler(int irq, void *dev)
+{
+ struct mc13783_rtc *priv = dev;
+ struct mc13783 *mc13783 = priv->mc13783;
+
+ dev_dbg(&priv->rtc->dev, "1HZ\n");
+
+ rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
+
+ mc13783_ackirq(mc13783, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int mc13783_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ int ret = -ENODATA;
+
+ mc13783_lock(priv->mc13783);
+ if (!priv->valid)
+ goto out;
+
+ ret = (enabled ? mc13783_unmask : mc13783_mask)(priv->mc13783,
+ MC13783_IRQ_1HZ);
+out:
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static const struct rtc_class_ops mc13783_rtc_ops = {
+ .read_time = mc13783_rtc_read_time,
+ .set_mmss = mc13783_rtc_set_mmss,
+ .update_irq_enable = mc13783_rtc_update_irq_enable,
+};
+
+static irqreturn_t mc13783_rtc_reset_handler(int irq, void *dev)
+{
+ struct mc13783_rtc *priv = dev;
+ struct mc13783 *mc13783 = priv->mc13783;
+
+ dev_dbg(&priv->rtc->dev, "RTCRST\n");
+ priv->valid = 0;
+
+ mc13783_mask(mc13783, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mc13783_rtc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct mc13783_rtc *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
+ platform_set_drvdata(pdev, priv);
+
+ priv->valid = 1;
+
+ mc13783_lock(priv->mc13783);
+
+ ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_RTCRST,
+ mc13783_rtc_reset_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_reset_irq_request;
+
+ ret = mc13783_irq_request_nounmask(priv->mc13783, MC13783_IRQ_1HZ,
+ mc13783_rtc_update_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_update_irq_request;
+
+ mc13783_unlock(priv->mc13783);
+
+ priv->rtc = rtc_device_register(pdev->name,
+ &pdev->dev, &mc13783_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(priv->rtc)) {
+ ret = PTR_ERR(priv->rtc);
+
+ mc13783_lock(priv->mc13783);
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
+err_update_irq_request:
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
+err_reset_irq_request:
+
+ mc13783_unlock(priv->mc13783);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(priv);
+ }
+
+ return ret;
+}
+
+static int __devexit mc13783_rtc_remove(struct platform_device *pdev)
+{
+ struct mc13783_rtc *priv = platform_get_drvdata(pdev);
+
+ rtc_device_unregister(priv->rtc);
+
+ mc13783_lock(priv->mc13783);
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
+
+ mc13783_unlock(priv->mc13783);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static struct platform_driver mc13783_rtc_driver = {
+ .remove = __devexit_p(mc13783_rtc_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mc13783_rtc_init(void)
+{
+ return platform_driver_probe(&mc13783_rtc_driver, &mc13783_rtc_probe);
+}
+module_init(mc13783_rtc_init);
+
+static void __exit mc13783_rtc_exit(void)
+{
+ platform_driver_unregister(&mc13783_rtc_driver);
+}
+module_exit(mc13783_rtc_exit);
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("RTC driver for Freescale MC13783 PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index e0263d2005e..dc052ce6e63 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -27,10 +27,17 @@
#define RTC_MONTH_OFFS 8
#define RTC_YEAR_OFFS 16
+#define RTC_ALARM_TIME_REG_OFFS 8
+#define RTC_ALARM_DATE_REG_OFFS 0xc
+#define RTC_ALARM_VALID (1 << 7)
+
+#define RTC_ALARM_INTERRUPT_MASK_REG_OFFS 0x10
+#define RTC_ALARM_INTERRUPT_CASUE_REG_OFFS 0x14
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
+ int irq;
};
static int mv_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -84,12 +91,134 @@ static int mv_rtc_read_time(struct device *dev, struct rtc_time *tm)
return rtc_valid_tm(tm);
}
+static int mv_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 rtc_time, rtc_date;
+ unsigned int year, month, day, hour, minute, second, wday;
+
+ rtc_time = readl(ioaddr + RTC_ALARM_TIME_REG_OFFS);
+ rtc_date = readl(ioaddr + RTC_ALARM_DATE_REG_OFFS);
+
+ second = rtc_time & 0x7f;
+ minute = (rtc_time >> RTC_MINUTES_OFFS) & 0x7f;
+ hour = (rtc_time >> RTC_HOURS_OFFS) & 0x3f; /* assume 24 hours mode */
+ wday = (rtc_time >> RTC_WDAY_OFFS) & 0x7;
+
+ day = rtc_date & 0x3f;
+ month = (rtc_date >> RTC_MONTH_OFFS) & 0x3f;
+ year = (rtc_date >> RTC_YEAR_OFFS) & 0xff;
+
+ alm->time.tm_sec = bcd2bin(second);
+ alm->time.tm_min = bcd2bin(minute);
+ alm->time.tm_hour = bcd2bin(hour);
+ alm->time.tm_mday = bcd2bin(day);
+ alm->time.tm_wday = bcd2bin(wday);
+ alm->time.tm_mon = bcd2bin(month) - 1;
+ /* hw counts from year 2000, but tm_year is relative to 1900 */
+ alm->time.tm_year = bcd2bin(year) + 100;
+
+ if (rtc_valid_tm(&alm->time) < 0) {
+ dev_err(dev, "retrieved alarm date/time is not valid.\n");
+ rtc_time_to_tm(0, &alm->time);
+ }
+
+ alm->enabled = !!readl(ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ return 0;
+}
+
+static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 rtc_reg = 0;
+
+ if (alm->time.tm_sec >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_sec))
+ << RTC_SECONDS_OFFS;
+ if (alm->time.tm_min >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_min))
+ << RTC_MINUTES_OFFS;
+ if (alm->time.tm_hour >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_hour))
+ << RTC_HOURS_OFFS;
+
+ writel(rtc_reg, ioaddr + RTC_ALARM_TIME_REG_OFFS);
+
+ if (alm->time.tm_mday >= 0)
+ rtc_reg = (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mday))
+ << RTC_MDAY_OFFS;
+ else
+ rtc_reg = 0;
+
+ if (alm->time.tm_mon >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_mon + 1))
+ << RTC_MONTH_OFFS;
+
+ if (alm->time.tm_year >= 0)
+ rtc_reg |= (RTC_ALARM_VALID | bin2bcd(alm->time.tm_year % 100))
+ << RTC_YEAR_OFFS;
+
+ writel(rtc_reg, ioaddr + RTC_ALARM_DATE_REG_OFFS);
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
+ writel(alm->enabled ? 1 : 0,
+ ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+
+ return 0;
+}
+
+static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ if (pdata->irq < 0)
+ return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ break;
+ case RTC_AIE_ON:
+ writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static irqreturn_t mv_rtc_interrupt(int irq, void *data)
+{
+ struct rtc_plat_data *pdata = data;
+ void __iomem *ioaddr = pdata->ioaddr;
+
+ /* alarm irq? */
+ if (!readl(ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS))
+ return IRQ_NONE;
+
+ /* clear interrupt */
+ writel(0, ioaddr + RTC_ALARM_INTERRUPT_CASUE_REG_OFFS);
+ rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
static const struct rtc_class_ops mv_rtc_ops = {
.read_time = mv_rtc_read_time,
.set_time = mv_rtc_set_time,
};
-static int __init mv_rtc_probe(struct platform_device *pdev)
+static const struct rtc_class_ops mv_rtc_alarm_ops = {
+ .read_time = mv_rtc_read_time,
+ .set_time = mv_rtc_set_time,
+ .read_alarm = mv_rtc_read_alarm,
+ .set_alarm = mv_rtc_set_alarm,
+ .ioctl = mv_rtc_ioctl,
+};
+
+static int __devinit mv_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_plat_data *pdata;
@@ -130,12 +259,31 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
}
}
+ pdata->irq = platform_get_irq(pdev, 0);
+
platform_set_drvdata(pdev, pdata);
- pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
- &mv_rtc_ops, THIS_MODULE);
+
+ if (pdata->irq >= 0) {
+ device_init_wakeup(&pdev->dev, 1);
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &mv_rtc_alarm_ops,
+ THIS_MODULE);
+ } else
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &mv_rtc_ops, THIS_MODULE);
if (IS_ERR(pdata->rtc))
return PTR_ERR(pdata->rtc);
+ if (pdata->irq >= 0) {
+ writel(0, pdata->ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
+ if (devm_request_irq(&pdev->dev, pdata->irq, mv_rtc_interrupt,
+ IRQF_DISABLED | IRQF_SHARED,
+ pdev->name, pdata) < 0) {
+ dev_warn(&pdev->dev, "interrupt not available.\n");
+ pdata->irq = -1;
+ }
+ }
+
return 0;
}
@@ -143,6 +291,9 @@ static int __exit mv_rtc_remove(struct platform_device *pdev)
{
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ if (pdata->irq >= 0)
+ device_init_wakeup(&pdev->dev, 0);
+
rtc_device_unregister(pdata->rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
new file mode 100644
index 00000000000..bf59c9c586b
--- /dev/null
+++ b/drivers/rtc/rtc-nuc900.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2008-2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/bcd.h>
+
+/* RTC Control Registers */
+#define REG_RTC_INIR 0x00
+#define REG_RTC_AER 0x04
+#define REG_RTC_FCR 0x08
+#define REG_RTC_TLR 0x0C
+#define REG_RTC_CLR 0x10
+#define REG_RTC_TSSR 0x14
+#define REG_RTC_DWR 0x18
+#define REG_RTC_TAR 0x1C
+#define REG_RTC_CAR 0x20
+#define REG_RTC_LIR 0x24
+#define REG_RTC_RIER 0x28
+#define REG_RTC_RIIR 0x2C
+#define REG_RTC_TTR 0x30
+
+#define RTCSET 0x01
+#define AERRWENB 0x10000
+#define INIRRESET 0xa5eb1357
+#define AERPOWERON 0xA965
+#define AERPOWEROFF 0x0000
+#define LEAPYEAR 0x0001
+#define TICKENB 0x80
+#define TICKINTENB 0x0002
+#define ALARMINTENB 0x0001
+#define MODE24 0x0001
+
+struct nuc900_rtc {
+ int irq_num;
+ void __iomem *rtc_reg;
+ struct rtc_device *rtcdev;
+};
+
+struct nuc900_bcd_time {
+ int bcd_sec;
+ int bcd_min;
+ int bcd_hour;
+ int bcd_mday;
+ int bcd_mon;
+ int bcd_year;
+};
+
+static irqreturn_t nuc900_rtc_interrupt(int irq, void *_rtc)
+{
+ struct nuc900_rtc *rtc = _rtc;
+ unsigned long events = 0, rtc_irq;
+
+ rtc_irq = __raw_readl(rtc->rtc_reg + REG_RTC_RIIR);
+
+ if (rtc_irq & ALARMINTENB) {
+ rtc_irq &= ~ALARMINTENB;
+ __raw_writel(rtc_irq, rtc->rtc_reg + REG_RTC_RIIR);
+ events |= RTC_AF | RTC_IRQF;
+ }
+
+ if (rtc_irq & TICKINTENB) {
+ rtc_irq &= ~TICKINTENB;
+ __raw_writel(rtc_irq, rtc->rtc_reg + REG_RTC_RIIR);
+ events |= RTC_UF | RTC_IRQF;
+ }
+
+ rtc_update_irq(rtc->rtcdev, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc)
+{
+ unsigned int i;
+ __raw_writel(INIRRESET, nuc900_rtc->rtc_reg + REG_RTC_INIR);
+
+ mdelay(10);
+
+ __raw_writel(AERPOWERON, nuc900_rtc->rtc_reg + REG_RTC_AER);
+
+ for (i = 0; i < 1000; i++) {
+ if (__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB)
+ return 0;
+ }
+
+ if ((__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB) == 0x0)
+ return ERR_PTR(-ENODEV);
+
+ return ERR_PTR(-EPERM);
+}
+
+static void nuc900_rtc_bcd2bin(unsigned int timereg,
+ unsigned int calreg, struct rtc_time *tm)
+{
+ tm->tm_mday = bcd2bin(calreg >> 0);
+ tm->tm_mon = bcd2bin(calreg >> 8);
+ tm->tm_year = bcd2bin(calreg >> 16) + 100;
+
+ tm->tm_sec = bcd2bin(timereg >> 0);
+ tm->tm_min = bcd2bin(timereg >> 8);
+ tm->tm_hour = bcd2bin(timereg >> 16);
+
+ rtc_valid_tm(tm);
+}
+
+static void nuc900_rtc_bin2bcd(struct rtc_time *settm,
+ struct nuc900_bcd_time *gettm)
+{
+ gettm->bcd_mday = bin2bcd(settm->tm_mday) << 0;
+ gettm->bcd_mon = bin2bcd(settm->tm_mon) << 8;
+ gettm->bcd_year = bin2bcd(settm->tm_year - 100) << 16;
+
+ gettm->bcd_sec = bin2bcd(settm->tm_sec) << 0;
+ gettm->bcd_min = bin2bcd(settm->tm_min) << 8;
+ gettm->bcd_hour = bin2bcd(settm->tm_hour) << 16;
+}
+
+static int nuc900_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+
+ if (enabled)
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
+ (TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
+ else
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
+ (~TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
+
+ return 0;
+}
+
+static int nuc900_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+
+ if (enabled)
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
+ (ALARMINTENB), rtc->rtc_reg + REG_RTC_RIER);
+ else
+ __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
+ (~ALARMINTENB), rtc->rtc_reg + REG_RTC_RIER);
+
+ return 0;
+}
+
+static int nuc900_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int timeval, clrval;
+
+ timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TLR);
+ clrval = __raw_readl(rtc->rtc_reg + REG_RTC_CLR);
+
+ nuc900_rtc_bcd2bin(timeval, clrval, tm);
+
+ return 0;
+}
+
+static int nuc900_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ struct nuc900_bcd_time gettm;
+ unsigned long val;
+ int *err;
+
+ nuc900_rtc_bin2bcd(tm, &gettm);
+
+ err = check_rtc_access_enable(rtc);
+ if (IS_ERR(err))
+ return PTR_ERR(err);
+
+ val = gettm.bcd_mday | gettm.bcd_mon | gettm.bcd_year;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_CLR);
+
+ val = gettm.bcd_sec | gettm.bcd_min | gettm.bcd_hour;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_TLR);
+
+ return 0;
+}
+
+static int nuc900_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ unsigned int timeval, carval;
+
+ timeval = __raw_readl(rtc->rtc_reg + REG_RTC_TAR);
+ carval = __raw_readl(rtc->rtc_reg + REG_RTC_CAR);
+
+ nuc900_rtc_bcd2bin(timeval, carval, &alrm->time);
+
+ return 0;
+}
+
+static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct nuc900_rtc *rtc = dev_get_drvdata(dev);
+ struct nuc900_bcd_time tm;
+ unsigned long val;
+ int *err;
+
+ nuc900_rtc_bin2bcd(&alrm->time, &tm);
+
+ err = check_rtc_access_enable(rtc);
+ if (IS_ERR(err))
+ return PTR_ERR(err);
+
+ val = tm.bcd_mday | tm.bcd_mon | tm.bcd_year;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_CAR);
+
+ val = tm.bcd_sec | tm.bcd_min | tm.bcd_hour;
+ __raw_writel(val, rtc->rtc_reg + REG_RTC_TAR);
+
+ return 0;
+}
+
+static struct rtc_class_ops nuc900_rtc_ops = {
+ .read_time = nuc900_rtc_read_time,
+ .set_time = nuc900_rtc_set_time,
+ .read_alarm = nuc900_rtc_read_alarm,
+ .set_alarm = nuc900_rtc_set_alarm,
+ .alarm_irq_enable = nuc900_alarm_irq_enable,
+ .update_irq_enable = nuc900_update_irq_enable,
+};
+
+static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct nuc900_rtc *nuc900_rtc;
+ int err = 0;
+
+ nuc900_rtc = kzalloc(sizeof(struct nuc900_rtc), GFP_KERNEL);
+ if (!nuc900_rtc) {
+ dev_err(&pdev->dev, "kzalloc nuc900_rtc failed\n");
+ return -ENOMEM;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "platform_get_resource failed\n");
+ err = -ENXIO;
+ goto fail1;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ err = -EBUSY;
+ goto fail1;
+ }
+
+ nuc900_rtc->rtc_reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_rtc->rtc_reg) {
+ dev_err(&pdev->dev, "ioremap rtc_reg failed\n");
+ err = -ENOMEM;
+ goto fail2;
+ }
+
+ nuc900_rtc->irq_num = platform_get_irq(pdev, 0);
+ if (request_irq(nuc900_rtc->irq_num, nuc900_rtc_interrupt,
+ IRQF_DISABLED, "nuc900rtc", nuc900_rtc)) {
+ dev_err(&pdev->dev, "NUC900 RTC request irq failed\n");
+ err = -EBUSY;
+ goto fail3;
+ }
+
+ nuc900_rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev,
+ &nuc900_rtc_ops, THIS_MODULE);
+ if (IS_ERR(nuc900_rtc->rtcdev)) {
+ dev_err(&pdev->dev, "rtc device register faild\n");
+ err = PTR_ERR(nuc900_rtc->rtcdev);
+ goto fail4;
+ }
+
+ platform_set_drvdata(pdev, nuc900_rtc);
+ __raw_writel(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_TSSR) | MODE24,
+ nuc900_rtc->rtc_reg + REG_RTC_TSSR);
+
+ return 0;
+
+fail4: free_irq(nuc900_rtc->irq_num, nuc900_rtc);
+fail3: iounmap(nuc900_rtc->rtc_reg);
+fail2: release_mem_region(res->start, resource_size(res));
+fail1: kfree(nuc900_rtc);
+ return err;
+}
+
+static int __devexit nuc900_rtc_remove(struct platform_device *pdev)
+{
+ struct nuc900_rtc *nuc900_rtc = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ rtc_device_unregister(nuc900_rtc->rtcdev);
+ free_irq(nuc900_rtc->irq_num, nuc900_rtc);
+ iounmap(nuc900_rtc->rtc_reg);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ kfree(nuc900_rtc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver nuc900_rtc_driver = {
+ .remove = __devexit_p(nuc900_rtc_remove),
+ .driver = {
+ .name = "nuc900-rtc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nuc900_rtc_init(void)
+{
+ return platform_driver_probe(&nuc900_rtc_driver, nuc900_rtc_probe);
+}
+
+static void __exit nuc900_rtc_exit(void)
+{
+ platform_driver_unregister(&nuc900_rtc_driver);
+}
+
+module_init(nuc900_rtc_init);
+module_exit(nuc900_rtc_exit);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("nuc910/nuc920 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-rtc");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 0587d53987f..64d9727b722 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -87,9 +87,10 @@
#define OMAP_RTC_INTERRUPTS_IT_ALARM (1<<3)
#define OMAP_RTC_INTERRUPTS_IT_TIMER (1<<2)
+static void __iomem *rtc_base;
-#define rtc_read(addr) omap_readb(OMAP_RTC_BASE + (addr))
-#define rtc_write(val, addr) omap_writeb(val, OMAP_RTC_BASE + (addr))
+#define rtc_read(addr) __raw_readb(rtc_base + (addr))
+#define rtc_write(val, addr) __raw_writeb(val, rtc_base + (addr))
/* we rely on the rtc framework to handle locking (rtc->ops_lock),
@@ -330,32 +331,31 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
return -ENOENT;
}
- /* NOTE: using static mapping for RTC registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res && res->start != OMAP_RTC_BASE) {
- pr_debug("%s: RTC registers at %08x, expected %08x\n",
- pdev->name, (unsigned) res->start, OMAP_RTC_BASE);
+ if (!res) {
+ pr_debug("%s: RTC resource data missing\n", pdev->name);
return -ENOENT;
}
- if (res)
- mem = request_mem_region(res->start,
- res->end - res->start + 1,
- pdev->name);
- else
- mem = NULL;
+ mem = request_mem_region(res->start, resource_size(res), pdev->name);
if (!mem) {
pr_debug("%s: RTC registers at %08x are not free\n",
- pdev->name, OMAP_RTC_BASE);
+ pdev->name, res->start);
return -EBUSY;
}
+ rtc_base = ioremap(res->start, resource_size(res));
+ if (!rtc_base) {
+ pr_debug("%s: RTC registers can't be mapped\n", pdev->name);
+ goto fail;
+ }
+
rtc = rtc_device_register(pdev->name, &pdev->dev,
&omap_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
pr_debug("%s: can't register RTC device, err %ld\n",
pdev->name, PTR_ERR(rtc));
- goto fail;
+ goto fail0;
}
platform_set_drvdata(pdev, rtc);
dev_set_drvdata(&rtc->dev, mem);
@@ -380,13 +380,14 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
dev_name(&rtc->dev), rtc)) {
pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n",
pdev->name, omap_rtc_timer);
- goto fail0;
+ goto fail1;
}
- if (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED,
- dev_name(&rtc->dev), rtc)) {
+ if ((omap_rtc_timer != omap_rtc_alarm) &&
+ (request_irq(omap_rtc_alarm, rtc_irq, IRQF_DISABLED,
+ dev_name(&rtc->dev), rtc))) {
pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n",
pdev->name, omap_rtc_alarm);
- goto fail1;
+ goto fail2;
}
/* On boards with split power, RTC_ON_NOFF won't reset the RTC */
@@ -419,10 +420,12 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
return 0;
-fail1:
+fail2:
free_irq(omap_rtc_timer, NULL);
-fail0:
+fail1:
rtc_device_unregister(rtc);
+fail0:
+ iounmap(rtc_base);
fail:
release_resource(mem);
return -EIO;
@@ -438,7 +441,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
free_irq(omap_rtc_timer, rtc);
- free_irq(omap_rtc_alarm, rtc);
+
+ if (omap_rtc_timer != omap_rtc_alarm)
+ free_irq(omap_rtc_alarm, rtc);
release_resource(dev_get_drvdata(&rtc->dev));
rtc_device_unregister(rtc);
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 4c5d5d0c4cf..854c3cb365a 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -58,6 +58,7 @@ struct pcf50633_time {
struct pcf50633_rtc {
int alarm_enabled;
int second_enabled;
+ int alarm_pending;
struct pcf50633 *pcf;
struct rtc_device *rtc_dev;
@@ -209,6 +210,7 @@ static int pcf50633_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
rtc = dev_get_drvdata(dev);
alrm->enabled = rtc->alarm_enabled;
+ alrm->pending = rtc->alarm_pending;
ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA,
PCF50633_TI_EXTENT, &pcf_tm.time[0]);
@@ -244,6 +246,8 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* Returns 0 on success */
ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA,
PCF50633_TI_EXTENT, &pcf_tm.time[0]);
+ if (!alrm->enabled)
+ rtc->alarm_pending = 0;
if (!alarm_masked || alrm->enabled)
pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
@@ -268,6 +272,7 @@ static void pcf50633_rtc_irq(int irq, void *data)
switch (irq) {
case PCF50633_IRQ_ALARM:
rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
+ rtc->alarm_pending = 1;
break;
case PCF50633_IRQ_SECOND:
rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
@@ -277,16 +282,13 @@ static void pcf50633_rtc_irq(int irq, void *data)
static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
{
- struct pcf50633_subdev_pdata *pdata;
struct pcf50633_rtc *rtc;
-
rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- pdata = pdev->dev.platform_data;
- rtc->pcf = pdata->pcf;
+ rtc->pcf = dev_to_pcf50633(pdev->dev.parent);
platform_set_drvdata(pdev, rtc);
rtc->rtc_dev = rtc_device_register("pcf50633-rtc", &pdev->dev,
&pcf50633_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index b725913ccbe..65f346b2fba 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -212,6 +212,8 @@ static int pcf8563_probe(struct i2c_client *client,
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+ i2c_set_clientdata(client, pcf8563);
+
pcf8563->rtc = rtc_device_register(pcf8563_driver.driver.name,
&client->dev, &pcf8563_rtc_ops, THIS_MODULE);
@@ -220,8 +222,6 @@ static int pcf8563_probe(struct i2c_client *client,
goto exit_kfree;
}
- i2c_set_clientdata(client, pcf8563);
-
return 0;
exit_kfree:
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 7d33cda3f8f..2d201afead3 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -277,6 +277,8 @@ static int pcf8583_probe(struct i2c_client *client,
if (!pcf8583)
return -ENOMEM;
+ i2c_set_clientdata(client, pcf8583);
+
pcf8583->rtc = rtc_device_register(pcf8583_driver.driver.name,
&client->dev, &pcf8583_rtc_ops, THIS_MODULE);
@@ -285,7 +287,6 @@ static int pcf8583_probe(struct i2c_client *client,
goto exit_kfree;
}
- i2c_set_clientdata(client, pcf8583);
return 0;
exit_kfree:
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f41873f98f6..0264b117893 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -51,10 +51,10 @@ static int pl031_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RTC_AIE_OFF:
- __raw_writel(1, ldata->base + RTC_MIS);
+ writel(1, ldata->base + RTC_MIS);
return 0;
case RTC_AIE_ON:
- __raw_writel(0, ldata->base + RTC_MIS);
+ writel(0, ldata->base + RTC_MIS);
return 0;
}
@@ -65,7 +65,7 @@ static int pl031_read_time(struct device *dev, struct rtc_time *tm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(__raw_readl(ldata->base + RTC_DR), tm);
+ rtc_time_to_tm(readl(ldata->base + RTC_DR), tm);
return 0;
}
@@ -76,7 +76,7 @@ static int pl031_set_time(struct device *dev, struct rtc_time *tm)
struct pl031_local *ldata = dev_get_drvdata(dev);
rtc_tm_to_time(tm, &time);
- __raw_writel(time, ldata->base + RTC_LR);
+ writel(time, ldata->base + RTC_LR);
return 0;
}
@@ -85,9 +85,9 @@ static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct pl031_local *ldata = dev_get_drvdata(dev);
- rtc_time_to_tm(__raw_readl(ldata->base + RTC_MR), &alarm->time);
- alarm->pending = __raw_readl(ldata->base + RTC_RIS);
- alarm->enabled = __raw_readl(ldata->base + RTC_IMSC);
+ rtc_time_to_tm(readl(ldata->base + RTC_MR), &alarm->time);
+ alarm->pending = readl(ldata->base + RTC_RIS);
+ alarm->enabled = readl(ldata->base + RTC_IMSC);
return 0;
}
@@ -99,8 +99,8 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
rtc_tm_to_time(&alarm->time, &time);
- __raw_writel(time, ldata->base + RTC_MR);
- __raw_writel(!alarm->enabled, ldata->base + RTC_MIS);
+ writel(time, ldata->base + RTC_MR);
+ writel(!alarm->enabled, ldata->base + RTC_MIS);
return 0;
}
@@ -180,8 +180,9 @@ err_req:
static struct amba_id pl031_ids[] __initdata = {
{
- .id = 0x00041031,
- .mask = 0x000fffff, },
+ .id = 0x00041031,
+ .mask = 0x000fffff,
+ },
{0, 0},
};
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 747ca194fad..e6351b743da 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -456,7 +456,7 @@ static int pxa_rtc_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops pxa_rtc_pm_ops = {
+static const struct dev_pm_ops pxa_rtc_pm_ops = {
.suspend = pxa_rtc_suspend,
.resume = pxa_rtc_resume,
};
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 29f98a70586..e4a44b64170 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -407,7 +407,7 @@ static int sa1100_rtc_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sa1100_rtc_pm_ops = {
+static const struct dev_pm_ops sa1100_rtc_pm_ops = {
.suspend = sa1100_rtc_suspend,
.resume = sa1100_rtc_resume,
};
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index e6ed5404bca..e95cc6f8d61 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -826,7 +826,7 @@ static int sh_rtc_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_rtc_dev_pm_ops = {
+static const struct dev_pm_ops sh_rtc_dev_pm_ops = {
.suspend = sh_rtc_suspend,
.resume = sh_rtc_resume,
};
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index d491eb265c3..67700831b5c 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -62,7 +62,6 @@
struct rtc_plat_data {
struct rtc_device *rtc;
void __iomem *ioaddr;
- unsigned long baseaddr;
unsigned long last_jiffies;
int irq;
unsigned int irqen;
@@ -70,6 +69,7 @@ struct rtc_plat_data {
int alrm_min;
int alrm_hour;
int alrm_mday;
+ spinlock_t lock;
};
static int stk17ta8_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -142,7 +142,7 @@ static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
unsigned long irqflags;
u8 flags;
- spin_lock_irqsave(&pdata->rtc->irq_lock, irqflags);
+ spin_lock_irqsave(&pdata->lock, irqflags);
flags = readb(ioaddr + RTC_FLAGS);
writeb(flags | RTC_WRITE, ioaddr + RTC_FLAGS);
@@ -162,7 +162,7 @@ static void stk17ta8_rtc_update_alarm(struct rtc_plat_data *pdata)
writeb(pdata->irqen ? RTC_INTS_AIE : 0, ioaddr + RTC_INTERRUPTS);
readb(ioaddr + RTC_FLAGS); /* clear interrupts */
writeb(flags & ~RTC_WRITE, ioaddr + RTC_FLAGS);
- spin_unlock_irqrestore(&pdata->rtc->irq_lock, irqflags);
+ spin_unlock_irqrestore(&pdata->lock, irqflags);
}
static int stk17ta8_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -202,56 +202,53 @@ static irqreturn_t stk17ta8_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
- unsigned long events = RTC_IRQF;
+ unsigned long events = 0;
+ spin_lock(&pdata->lock);
/* read and clear interrupt */
- if (!(readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF))
- return IRQ_NONE;
- if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
- return IRQ_HANDLED;
+ if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_AF) {
+ events = RTC_IRQF;
+ if (readb(ioaddr + RTC_SECONDS_ALARM) & 0x80)
+ events |= RTC_UF;
+ else
+ events |= RTC_AF;
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
+ }
+ spin_unlock(&pdata->lock);
+ return events ? IRQ_HANDLED : IRQ_NONE;
}
-static int stk17ta8_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int stk17ta8_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq <= 0)
- return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
- switch (cmd) {
- case RTC_AIE_OFF:
- pdata->irqen &= ~RTC_AF;
- stk17ta8_rtc_update_alarm(pdata);
- break;
- case RTC_AIE_ON:
+ return -EINVAL;
+ if (enabled)
pdata->irqen |= RTC_AF;
- stk17ta8_rtc_update_alarm(pdata);
- break;
- default:
- return -ENOIOCTLCMD;
- }
+ else
+ pdata->irqen &= ~RTC_AF;
+ stk17ta8_rtc_update_alarm(pdata);
return 0;
}
static const struct rtc_class_ops stk17ta8_rtc_ops = {
- .read_time = stk17ta8_rtc_read_time,
- .set_time = stk17ta8_rtc_set_time,
- .read_alarm = stk17ta8_rtc_read_alarm,
- .set_alarm = stk17ta8_rtc_set_alarm,
- .ioctl = stk17ta8_rtc_ioctl,
+ .read_time = stk17ta8_rtc_read_time,
+ .set_time = stk17ta8_rtc_set_time,
+ .read_alarm = stk17ta8_rtc_read_alarm,
+ .set_alarm = stk17ta8_rtc_set_alarm,
+ .alarm_irq_enable = stk17ta8_rtc_alarm_irq_enable,
};
static ssize_t stk17ta8_nvram_read(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@@ -265,8 +262,8 @@ static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t size)
{
- struct platform_device *pdev =
- to_platform_device(container_of(kobj, struct device, kobj));
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
ssize_t count;
@@ -288,31 +285,26 @@ static struct bin_attribute stk17ta8_nvram_attr = {
static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
{
- struct rtc_device *rtc;
struct resource *res;
unsigned int cal;
unsigned int flags;
struct rtc_plat_data *pdata;
- void __iomem *ioaddr = NULL;
+ void __iomem *ioaddr;
int ret = 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
- ret = -EBUSY;
- goto out;
- }
- pdata->baseaddr = res->start;
- ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
- if (!ioaddr) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start, RTC_REG_SIZE,
+ pdev->name))
+ return -EBUSY;
+ ioaddr = devm_ioremap(&pdev->dev, res->start, RTC_REG_SIZE);
+ if (!ioaddr)
+ return -ENOMEM;
pdata->ioaddr = ioaddr;
pdata->irq = platform_get_irq(pdev, 0);
@@ -328,9 +320,13 @@ static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
if (readb(ioaddr + RTC_FLAGS) & RTC_FLAGS_PF)
dev_warn(&pdev->dev, "voltage-low detected.\n");
+ spin_lock_init(&pdata->lock);
+ pdata->last_jiffies = jiffies;
+ platform_set_drvdata(pdev, pdata);
if (pdata->irq > 0) {
writeb(0, ioaddr + RTC_INTERRUPTS);
- if (request_irq(pdata->irq, stk17ta8_rtc_interrupt,
+ if (devm_request_irq(&pdev->dev, pdata->irq,
+ stk17ta8_rtc_interrupt,
IRQF_DISABLED | IRQF_SHARED,
pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
@@ -338,29 +334,14 @@ static int __devinit stk17ta8_rtc_probe(struct platform_device *pdev)
}
}
- rtc = rtc_device_register(pdev->name, &pdev->dev,
+ pdata->rtc = rtc_device_register(pdev->name, &pdev->dev,
&stk17ta8_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- ret = PTR_ERR(rtc);
- goto out;
- }
- pdata->rtc = rtc;
- pdata->last_jiffies = jiffies;
- platform_set_drvdata(pdev, pdata);
+ if (IS_ERR(pdata->rtc))
+ return PTR_ERR(pdata->rtc);
+
ret = sysfs_create_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
if (ret)
- goto out;
- return 0;
- out:
- if (pdata->rtc)
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0)
- free_irq(pdata->irq, pdev);
- if (ioaddr)
- iounmap(ioaddr);
- if (pdata->baseaddr)
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return ret;
}
@@ -370,13 +351,8 @@ static int __devexit stk17ta8_rtc_remove(struct platform_device *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, &stk17ta8_nvram_attr);
rtc_device_unregister(pdata->rtc);
- if (pdata->irq > 0) {
+ if (pdata->irq > 0)
writeb(0, pdata->ioaddr + RTC_INTERRUPTS);
- free_irq(pdata->irq, pdev);
- }
- iounmap(pdata->ioaddr);
- release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
- kfree(pdata);
return 0;
}
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl.c
index 9c8c70c497d..c6a83a2a722 100644
--- a/drivers/rtc/rtc-twl4030.c
+++ b/drivers/rtc/rtc-twl.c
@@ -1,5 +1,5 @@
/*
- * rtc-twl4030.c -- TWL4030 Real Time Clock interface
+ * rtc-twl.c -- TWL Real Time Clock interface
*
* Copyright (C) 2007 MontaVista Software, Inc
* Author: Alexandre Rusev <source@mvista.com>
@@ -28,33 +28,81 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
/*
* RTC block register offsets (use TWL_MODULE_RTC)
*/
-#define REG_SECONDS_REG 0x00
-#define REG_MINUTES_REG 0x01
-#define REG_HOURS_REG 0x02
-#define REG_DAYS_REG 0x03
-#define REG_MONTHS_REG 0x04
-#define REG_YEARS_REG 0x05
-#define REG_WEEKS_REG 0x06
-
-#define REG_ALARM_SECONDS_REG 0x07
-#define REG_ALARM_MINUTES_REG 0x08
-#define REG_ALARM_HOURS_REG 0x09
-#define REG_ALARM_DAYS_REG 0x0A
-#define REG_ALARM_MONTHS_REG 0x0B
-#define REG_ALARM_YEARS_REG 0x0C
-
-#define REG_RTC_CTRL_REG 0x0D
-#define REG_RTC_STATUS_REG 0x0E
-#define REG_RTC_INTERRUPTS_REG 0x0F
-
-#define REG_RTC_COMP_LSB_REG 0x10
-#define REG_RTC_COMP_MSB_REG 0x11
+enum {
+ REG_SECONDS_REG = 0,
+ REG_MINUTES_REG,
+ REG_HOURS_REG,
+ REG_DAYS_REG,
+ REG_MONTHS_REG,
+ REG_YEARS_REG,
+ REG_WEEKS_REG,
+
+ REG_ALARM_SECONDS_REG,
+ REG_ALARM_MINUTES_REG,
+ REG_ALARM_HOURS_REG,
+ REG_ALARM_DAYS_REG,
+ REG_ALARM_MONTHS_REG,
+ REG_ALARM_YEARS_REG,
+
+ REG_RTC_CTRL_REG,
+ REG_RTC_STATUS_REG,
+ REG_RTC_INTERRUPTS_REG,
+
+ REG_RTC_COMP_LSB_REG,
+ REG_RTC_COMP_MSB_REG,
+};
+const static u8 twl4030_rtc_reg_map[] = {
+ [REG_SECONDS_REG] = 0x00,
+ [REG_MINUTES_REG] = 0x01,
+ [REG_HOURS_REG] = 0x02,
+ [REG_DAYS_REG] = 0x03,
+ [REG_MONTHS_REG] = 0x04,
+ [REG_YEARS_REG] = 0x05,
+ [REG_WEEKS_REG] = 0x06,
+
+ [REG_ALARM_SECONDS_REG] = 0x07,
+ [REG_ALARM_MINUTES_REG] = 0x08,
+ [REG_ALARM_HOURS_REG] = 0x09,
+ [REG_ALARM_DAYS_REG] = 0x0A,
+ [REG_ALARM_MONTHS_REG] = 0x0B,
+ [REG_ALARM_YEARS_REG] = 0x0C,
+
+ [REG_RTC_CTRL_REG] = 0x0D,
+ [REG_RTC_STATUS_REG] = 0x0E,
+ [REG_RTC_INTERRUPTS_REG] = 0x0F,
+
+ [REG_RTC_COMP_LSB_REG] = 0x10,
+ [REG_RTC_COMP_MSB_REG] = 0x11,
+};
+const static u8 twl6030_rtc_reg_map[] = {
+ [REG_SECONDS_REG] = 0x00,
+ [REG_MINUTES_REG] = 0x01,
+ [REG_HOURS_REG] = 0x02,
+ [REG_DAYS_REG] = 0x03,
+ [REG_MONTHS_REG] = 0x04,
+ [REG_YEARS_REG] = 0x05,
+ [REG_WEEKS_REG] = 0x06,
+
+ [REG_ALARM_SECONDS_REG] = 0x08,
+ [REG_ALARM_MINUTES_REG] = 0x09,
+ [REG_ALARM_HOURS_REG] = 0x0A,
+ [REG_ALARM_DAYS_REG] = 0x0B,
+ [REG_ALARM_MONTHS_REG] = 0x0C,
+ [REG_ALARM_YEARS_REG] = 0x0D,
+
+ [REG_RTC_CTRL_REG] = 0x10,
+ [REG_RTC_STATUS_REG] = 0x11,
+ [REG_RTC_INTERRUPTS_REG] = 0x12,
+
+ [REG_RTC_COMP_LSB_REG] = 0x13,
+ [REG_RTC_COMP_MSB_REG] = 0x14,
+};
/* RTC_CTRL_REG bitfields */
#define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01
@@ -84,31 +132,32 @@
#define ALL_TIME_REGS 6
/*----------------------------------------------------------------------*/
+static u8 *rtc_reg_map;
/*
- * Supports 1 byte read from TWL4030 RTC register.
+ * Supports 1 byte read from TWL RTC register.
*/
-static int twl4030_rtc_read_u8(u8 *data, u8 reg)
+static int twl_rtc_read_u8(u8 *data, u8 reg)
{
int ret;
- ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg);
+ ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
if (ret < 0)
- pr_err("twl4030_rtc: Could not read TWL4030"
+ pr_err("twl_rtc: Could not read TWL"
"register %X - error %d\n", reg, ret);
return ret;
}
/*
- * Supports 1 byte write to TWL4030 RTC registers.
+ * Supports 1 byte write to TWL RTC registers.
*/
-static int twl4030_rtc_write_u8(u8 data, u8 reg)
+static int twl_rtc_write_u8(u8 data, u8 reg)
{
int ret;
- ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg);
+ ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg]));
if (ret < 0)
- pr_err("twl4030_rtc: Could not write TWL4030"
+ pr_err("twl_rtc: Could not write TWL"
"register %X - error %d\n", reg, ret);
return ret;
}
@@ -129,7 +178,7 @@ static int set_rtc_irq_bit(unsigned char bit)
val = rtc_irq_bits | bit;
val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M;
- ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
rtc_irq_bits = val;
@@ -145,14 +194,14 @@ static int mask_rtc_irq_bit(unsigned char bit)
int ret;
val = rtc_irq_bits & ~bit;
- ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
rtc_irq_bits = val;
return ret;
}
-static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
+static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
{
int ret;
@@ -164,7 +213,7 @@ static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
return ret;
}
-static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
+static int twl_rtc_update_irq_enable(struct device *dev, unsigned enabled)
{
int ret;
@@ -177,7 +226,7 @@ static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
}
/*
- * Gets current TWL4030 RTC time and date parameters.
+ * Gets current TWL RTC time and date parameters.
*
* The RTC's time/alarm representation is not what gmtime(3) requires
* Linux to use:
@@ -185,24 +234,24 @@ static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
* - Months are 1..12 vs Linux 0-11
* - Years are 0..99 vs Linux 1900..N (we assume 21st century)
*/
-static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned char rtc_data[ALL_TIME_REGS + 1];
int ret;
u8 save_control;
- ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
if (ret < 0)
return ret;
save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
- ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
if (ret < 0)
return ret;
- ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
- REG_SECONDS_REG, ALL_TIME_REGS);
+ ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
+ (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_time error %d\n", ret);
@@ -219,7 +268,7 @@ static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
return ret;
}
-static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char save_control;
unsigned char rtc_data[ALL_TIME_REGS + 1];
@@ -233,18 +282,18 @@ static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_data[6] = bin2bcd(tm->tm_year - 100);
/* Stop RTC while updating the TC registers */
- ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
- twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
/* update all the time registers in one shot */
- ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data,
- REG_SECONDS_REG, ALL_TIME_REGS);
+ ret = twl_i2c_write(TWL_MODULE_RTC, rtc_data,
+ (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_set_time error %d\n", ret);
goto out;
@@ -252,22 +301,22 @@ static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* Start back RTC */
save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
- ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
out:
return ret;
}
/*
- * Gets current TWL4030 RTC alarm time.
+ * Gets current TWL RTC alarm time.
*/
-static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
unsigned char rtc_data[ALL_TIME_REGS + 1];
int ret;
- ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
- REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
+ ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
+ (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_alarm error %d\n", ret);
return ret;
@@ -288,12 +337,12 @@ static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
return ret;
}
-static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
unsigned char alarm_data[ALL_TIME_REGS + 1];
int ret;
- ret = twl4030_rtc_alarm_irq_enable(dev, 0);
+ ret = twl_rtc_alarm_irq_enable(dev, 0);
if (ret)
goto out;
@@ -305,20 +354,20 @@ static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
/* update all the alarm registers in one shot */
- ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data,
- REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
+ ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data,
+ (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS);
if (ret) {
dev_err(dev, "rtc_set_alarm error %d\n", ret);
goto out;
}
if (alm->enabled)
- ret = twl4030_rtc_alarm_irq_enable(dev, 1);
+ ret = twl_rtc_alarm_irq_enable(dev, 1);
out:
return ret;
}
-static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
+static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
{
unsigned long events = 0;
int ret = IRQ_NONE;
@@ -333,7 +382,7 @@ static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
local_irq_enable();
#endif
- res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (res)
goto out;
/*
@@ -347,26 +396,28 @@ static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
else
events |= RTC_IRQF | RTC_UF;
- res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
+ res = twl_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
REG_RTC_STATUS_REG);
if (res)
goto out;
- /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
- * needs 2 reads to clear the interrupt. One read is done in
- * do_twl4030_pwrirq(). Doing the second read, to clear
- * the bit.
- *
- * FIXME the reason PWR_ISR1 needs an extra read is that
- * RTC_IF retriggered until we cleared REG_ALARM_M above.
- * But re-reading like this is a bad hack; by doing so we
- * risk wrongly clearing status for some other IRQ (losing
- * the interrupt). Be smarter about handling RTC_UF ...
- */
- res = twl4030_i2c_read_u8(TWL4030_MODULE_INT,
+ if (twl_class_is_4030()) {
+ /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
+ * needs 2 reads to clear the interrupt. One read is done in
+ * do_twl_pwrirq(). Doing the second read, to clear
+ * the bit.
+ *
+ * FIXME the reason PWR_ISR1 needs an extra read is that
+ * RTC_IF retriggered until we cleared REG_ALARM_M above.
+ * But re-reading like this is a bad hack; by doing so we
+ * risk wrongly clearing status for some other IRQ (losing
+ * the interrupt). Be smarter about handling RTC_UF ...
+ */
+ res = twl_i2c_read_u8(TWL4030_MODULE_INT,
&rd_reg, TWL4030_INT_PWR_ISR1);
- if (res)
- goto out;
+ if (res)
+ goto out;
+ }
/* Notify RTC core on event */
rtc_update_irq(rtc, 1, events);
@@ -376,18 +427,18 @@ out:
return ret;
}
-static struct rtc_class_ops twl4030_rtc_ops = {
- .read_time = twl4030_rtc_read_time,
- .set_time = twl4030_rtc_set_time,
- .read_alarm = twl4030_rtc_read_alarm,
- .set_alarm = twl4030_rtc_set_alarm,
- .alarm_irq_enable = twl4030_rtc_alarm_irq_enable,
- .update_irq_enable = twl4030_rtc_update_irq_enable,
+static struct rtc_class_ops twl_rtc_ops = {
+ .read_time = twl_rtc_read_time,
+ .set_time = twl_rtc_set_time,
+ .read_alarm = twl_rtc_read_alarm,
+ .set_alarm = twl_rtc_set_alarm,
+ .alarm_irq_enable = twl_rtc_alarm_irq_enable,
+ .update_irq_enable = twl_rtc_update_irq_enable,
};
/*----------------------------------------------------------------------*/
-static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
+static int __devinit twl_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
int ret = 0;
@@ -398,7 +449,7 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
return -EINVAL;
rtc = rtc_device_register(pdev->name,
- &pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
+ &pdev->dev, &twl_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
@@ -409,7 +460,7 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
+ ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
goto out1;
@@ -420,11 +471,11 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
/* Clear RTC Power up reset and pending alarm interrupts */
- ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
+ ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
goto out1;
- ret = request_irq(irq, twl4030_rtc_interrupt,
+ ret = request_irq(irq, twl_rtc_interrupt,
IRQF_TRIGGER_RISING,
dev_name(&rtc->dev), rtc);
if (ret < 0) {
@@ -432,21 +483,28 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
goto out1;
}
+ if (twl_class_is_6030()) {
+ twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
+ REG_INT_MSK_LINE_A);
+ twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
+ REG_INT_MSK_STS_A);
+ }
+
/* Check RTC module status, Enable if it is off */
- ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
+ ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
goto out2;
if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
- dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n");
+ dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
- ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
goto out2;
}
/* init cached IRQ enable bits */
- ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
+ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
goto out2;
@@ -461,10 +519,10 @@ out0:
}
/*
- * Disable all TWL4030 RTC module interrupts.
+ * Disable all TWL RTC module interrupts.
* Sets status flag to free.
*/
-static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
+static int __devexit twl_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
struct rtc_device *rtc = platform_get_drvdata(pdev);
@@ -472,6 +530,13 @@ static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
+ if (twl_class_is_6030()) {
+ twl6030_interrupt_mask(TWL6030_RTC_INT_MASK,
+ REG_INT_MSK_LINE_A);
+ twl6030_interrupt_mask(TWL6030_RTC_INT_MASK,
+ REG_INT_MSK_STS_A);
+ }
+
free_irq(irq, rtc);
@@ -480,7 +545,7 @@ static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
return 0;
}
-static void twl4030_rtc_shutdown(struct platform_device *pdev)
+static void twl_rtc_shutdown(struct platform_device *pdev)
{
/* mask timer interrupts, but leave alarm interrupts on to enable
power-on when alarm is triggered */
@@ -491,7 +556,7 @@ static void twl4030_rtc_shutdown(struct platform_device *pdev)
static unsigned char irqstat;
-static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+static int twl_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
irqstat = rtc_irq_bits;
@@ -499,42 +564,47 @@ static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int twl4030_rtc_resume(struct platform_device *pdev)
+static int twl_rtc_resume(struct platform_device *pdev)
{
set_rtc_irq_bit(irqstat);
return 0;
}
#else
-#define twl4030_rtc_suspend NULL
-#define twl4030_rtc_resume NULL
+#define twl_rtc_suspend NULL
+#define twl_rtc_resume NULL
#endif
-MODULE_ALIAS("platform:twl4030_rtc");
+MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
- .probe = twl4030_rtc_probe,
- .remove = __devexit_p(twl4030_rtc_remove),
- .shutdown = twl4030_rtc_shutdown,
- .suspend = twl4030_rtc_suspend,
- .resume = twl4030_rtc_resume,
+ .probe = twl_rtc_probe,
+ .remove = __devexit_p(twl_rtc_remove),
+ .shutdown = twl_rtc_shutdown,
+ .suspend = twl_rtc_suspend,
+ .resume = twl_rtc_resume,
.driver = {
.owner = THIS_MODULE,
- .name = "twl4030_rtc",
+ .name = "twl_rtc",
},
};
-static int __init twl4030_rtc_init(void)
+static int __init twl_rtc_init(void)
{
+ if (twl_class_is_4030())
+ rtc_reg_map = (u8 *) twl4030_rtc_reg_map;
+ else
+ rtc_reg_map = (u8 *) twl6030_rtc_reg_map;
+
return platform_driver_register(&twl4030rtc_driver);
}
-module_init(twl4030_rtc_init);
+module_init(twl_rtc_init);
-static void __exit twl4030_rtc_exit(void)
+static void __exit twl_rtc_exit(void)
{
platform_driver_unregister(&twl4030rtc_driver);
}
-module_exit(twl4030_rtc_exit);
+module_exit(twl_rtc_exit);
MODULE_AUTHOR("Texas Instruments, MontaVista Software");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 4a6ed1104fb..9ee81d8aa7c 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -17,6 +17,7 @@
struct tx4939rtc_plat_data {
struct rtc_device *rtc;
struct tx4939_rtc_reg __iomem *rtcreg;
+ spinlock_t lock;
};
static struct tx4939rtc_plat_data *get_tx4939rtc_plat_data(struct device *dev)
@@ -52,14 +53,14 @@ static int tx4939_rtc_set_mmss(struct device *dev, unsigned long secs)
buf[3] = secs >> 8;
buf[4] = secs >> 16;
buf[5] = secs >> 24;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
__raw_writel(0, &rtcreg->adr);
for (i = 0; i < 6; i++)
__raw_writel(buf[i], &rtcreg->dat);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_SETTIME |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
@@ -71,18 +72,18 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned long sec;
unsigned char buf[6];
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_GETTIME |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
if (ret) {
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
__raw_writel(2, &rtcreg->adr);
for (i = 2; i < 6; i++)
buf[i] = __raw_readl(&rtcreg->dat);
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
rtc_time_to_tm(sec, tm);
return rtc_valid_tm(tm);
@@ -110,13 +111,13 @@ static int tx4939_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
buf[3] = sec >> 8;
buf[4] = sec >> 16;
buf[5] = sec >> 24;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
__raw_writel(0, &rtcreg->adr);
for (i = 0; i < 6; i++)
__raw_writel(buf[i], &rtcreg->dat);
ret = tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_SETALARM |
(alrm->enabled ? TX4939_RTCCTL_ALME : 0));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
@@ -129,12 +130,12 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned char buf[6];
u32 ctl;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
ret = tx4939_rtc_cmd(rtcreg,
TX4939_RTCCTL_COMMAND_GETALARM |
(__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALME));
if (ret) {
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return ret;
}
__raw_writel(2, &rtcreg->adr);
@@ -143,7 +144,7 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ctl = __raw_readl(&rtcreg->ctl);
alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
rtc_time_to_tm(sec, &alrm->time);
return rtc_valid_tm(&alrm->time);
@@ -153,11 +154,11 @@ static int tx4939_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev);
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg,
TX4939_RTCCTL_COMMAND_NOP |
(enabled ? TX4939_RTCCTL_ALME : 0));
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return 0;
}
@@ -167,13 +168,14 @@ static irqreturn_t tx4939_rtc_interrupt(int irq, void *dev_id)
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
unsigned long events = RTC_IRQF;
- spin_lock(&pdata->rtc->irq_lock);
+ spin_lock(&pdata->lock);
if (__raw_readl(&rtcreg->ctl) & TX4939_RTCCTL_ALMD) {
events |= RTC_AF;
tx4939_rtc_cmd(rtcreg, TX4939_RTCCTL_COMMAND_NOP);
}
- spin_unlock(&pdata->rtc->irq_lock);
- rtc_update_irq(pdata->rtc, 1, events);
+ spin_unlock(&pdata->lock);
+ if (likely(pdata->rtc))
+ rtc_update_irq(pdata->rtc, 1, events);
return IRQ_HANDLED;
}
@@ -194,13 +196,13 @@ static ssize_t tx4939_rtc_nvram_read(struct kobject *kobj,
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
ssize_t count;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
count++, size--) {
__raw_writel(pos++, &rtcreg->adr);
*buf++ = __raw_readl(&rtcreg->dat);
}
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return count;
}
@@ -213,13 +215,13 @@ static ssize_t tx4939_rtc_nvram_write(struct kobject *kobj,
struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg;
ssize_t count;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irq(&pdata->lock);
for (count = 0; size > 0 && pos < TX4939_RTC_REG_RAMSIZE;
count++, size--) {
__raw_writel(pos++, &rtcreg->adr);
__raw_writel(*buf++, &rtcreg->dat);
}
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irq(&pdata->lock);
return count;
}
@@ -259,6 +261,7 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
if (!pdata->rtcreg)
return -EBUSY;
+ spin_lock_init(&pdata->lock);
tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
if (devm_request_irq(&pdev->dev, irq, tx4939_rtc_interrupt,
IRQF_DISABLED, pdev->name, &pdev->dev) < 0)
@@ -277,14 +280,12 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
static int __exit tx4939_rtc_remove(struct platform_device *pdev)
{
struct tx4939rtc_plat_data *pdata = platform_get_drvdata(pdev);
- struct rtc_device *rtc = pdata->rtc;
- spin_lock_irq(&rtc->irq_lock);
- tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
- spin_unlock_irq(&rtc->irq_lock);
sysfs_remove_bin_file(&pdev->dev.kobj, &tx4939_rtc_nvram_attr);
- rtc_device_unregister(rtc);
- platform_set_drvdata(pdev, NULL);
+ rtc_device_unregister(pdata->rtc);
+ spin_lock_irq(&pdata->lock);
+ tx4939_rtc_cmd(pdata->rtcreg, TX4939_RTCCTL_COMMAND_NOP);
+ spin_unlock_irq(&pdata->lock);
return 0;
}
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index ad741afd47d..bed4cab0704 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -304,7 +304,6 @@ static int rtc_probe(struct platform_device *pdev)
{
struct v3020_platform_data *pdata = pdev->dev.platform_data;
struct v3020 *chip;
- struct rtc_device *rtc;
int retval = -EBUSY;
int i;
int temp;
@@ -353,13 +352,12 @@ static int rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
- rtc = rtc_device_register("v3020",
+ chip->rtc = rtc_device_register("v3020",
&pdev->dev, &v3020_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc)) {
- retval = PTR_ERR(rtc);
+ if (IS_ERR(chip->rtc)) {
+ retval = PTR_ERR(chip->rtc);
goto err_io;
}
- chip->rtc = rtc;
return 0;
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index fadddac1e5a..c3244244e8c 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -327,7 +327,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- rtc1_base = ioremap(res->start, res->end - res->start + 1);
+ rtc1_base = ioremap(res->start, resource_size(res));
if (!rtc1_base)
return -EBUSY;
@@ -337,7 +337,7 @@ static int __devinit rtc_probe(struct platform_device *pdev)
goto err_rtc1_iounmap;
}
- rtc2_base = ioremap(res->start, res->end - res->start + 1);
+ rtc2_base = ioremap(res->start, resource_size(res));
if (!rtc2_base) {
retval = -EBUSY;
goto err_rtc1_iounmap;
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 79795cdf6ed..000c7e481e5 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -485,7 +485,7 @@ static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
return 0;
}
-static struct dev_pm_ops wm831x_rtc_pm_ops = {
+static const struct dev_pm_ops wm831x_rtc_pm_ops = {
.suspend = wm831x_rtc_suspend,
.resume = wm831x_rtc_resume,
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index c91edc572eb..f1e440521c5 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -315,9 +315,9 @@ static int wm8350_rtc_update_irq_enable(struct device *dev,
return 0;
}
-static void wm8350_rtc_alarm_handler(struct wm8350 *wm8350, int irq,
- void *data)
+static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data)
{
+ struct wm8350 *wm8350 = data;
struct rtc_device *rtc = wm8350->rtc.rtc;
int ret;
@@ -330,14 +330,18 @@ static void wm8350_rtc_alarm_handler(struct wm8350 *wm8350, int irq,
dev_err(&(wm8350->rtc.pdev->dev),
"Failed to disable alarm: %d\n", ret);
}
+
+ return IRQ_HANDLED;
}
-static void wm8350_rtc_update_handler(struct wm8350 *wm8350, int irq,
- void *data)
+static irqreturn_t wm8350_rtc_update_handler(int irq, void *data)
{
+ struct wm8350 *wm8350 = data;
struct rtc_device *rtc = wm8350->rtc.rtc;
rtc_update_irq(rtc, 1, RTC_IRQF | RTC_UF);
+
+ return IRQ_HANDLED;
}
static const struct rtc_class_ops wm8350_rtc_ops = {
@@ -350,8 +354,9 @@ static const struct rtc_class_ops wm8350_rtc_ops = {
};
#ifdef CONFIG_PM
-static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+static int wm8350_rtc_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
int ret = 0;
u16 reg;
@@ -369,8 +374,9 @@ static int wm8350_rtc_suspend(struct platform_device *pdev, pm_message_t state)
return ret;
}
-static int wm8350_rtc_resume(struct platform_device *pdev)
+static int wm8350_rtc_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev);
int ret;
@@ -455,15 +461,14 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
return ret;
}
- wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
- wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_PER);
-
wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
- wm8350_rtc_update_handler, NULL);
+ wm8350_rtc_update_handler, 0,
+ "RTC Seconds", wm8350);
+ wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM,
- wm8350_rtc_alarm_handler, NULL);
- wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_ALM);
+ wm8350_rtc_alarm_handler, 0,
+ "RTC Alarm", wm8350);
return 0;
}
@@ -473,8 +478,6 @@ static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
struct wm8350_rtc *wm_rtc = &wm8350->rtc;
- wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
-
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC);
wm8350_free_irq(wm8350, WM8350_IRQ_RTC_ALM);
@@ -483,13 +486,17 @@ static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
return 0;
}
+static struct dev_pm_ops wm8350_rtc_pm_ops = {
+ .suspend = wm8350_rtc_suspend,
+ .resume = wm8350_rtc_resume,
+};
+
static struct platform_driver wm8350_rtc_driver = {
.probe = wm8350_rtc_probe,
.remove = __devexit_p(wm8350_rtc_remove),
- .suspend = wm8350_rtc_suspend,
- .resume = wm8350_rtc_resume,
.driver = {
.name = "wm8350-rtc",
+ .pm = &wm8350_rtc_pm_ops,
},
};
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 6583c1a8b07..9aae49139a0 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -155,11 +155,11 @@ static int x1205_get_status(struct i2c_client *client, unsigned char *sr)
}
static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
- int datetoo, u8 reg_base, unsigned char alm_enable)
+ u8 reg_base, unsigned char alm_enable)
{
- int i, xfer, nbytes;
- unsigned char buf[8];
+ int i, xfer;
unsigned char rdata[10] = { 0, reg_base };
+ unsigned char *buf = rdata + 2;
static const unsigned char wel[3] = { 0, X1205_REG_SR,
X1205_SR_WEL };
@@ -170,9 +170,9 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
static const unsigned char diswe[3] = { 0, X1205_REG_SR, 0 };
dev_dbg(&client->dev,
- "%s: secs=%d, mins=%d, hours=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour);
+ "%s: sec=%d min=%d hour=%d mday=%d mon=%d year=%d wday=%d\n",
+ __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday,
+ tm->tm_mon, tm->tm_year, tm->tm_wday);
buf[CCR_SEC] = bin2bcd(tm->tm_sec);
buf[CCR_MIN] = bin2bcd(tm->tm_min);
@@ -180,23 +180,15 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
/* set hour and 24hr bit */
buf[CCR_HOUR] = bin2bcd(tm->tm_hour) | X1205_HR_MIL;
- /* should we also set the date? */
- if (datetoo) {
- dev_dbg(&client->dev,
- "%s: mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
+ buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
- buf[CCR_MDAY] = bin2bcd(tm->tm_mday);
+ /* month, 1 - 12 */
+ buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
- /* month, 1 - 12 */
- buf[CCR_MONTH] = bin2bcd(tm->tm_mon + 1);
-
- /* year, since the rtc epoch*/
- buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
- buf[CCR_WDAY] = tm->tm_wday & 0x07;
- buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
- }
+ /* year, since the rtc epoch*/
+ buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
+ buf[CCR_WDAY] = tm->tm_wday & 0x07;
+ buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
/* If writing alarm registers, set compare bits on registers 0-4 */
if (reg_base < X1205_CCR_BASE)
@@ -214,17 +206,8 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
return -EIO;
}
-
- /* write register's data */
- if (datetoo)
- nbytes = 8;
- else
- nbytes = 3;
- for (i = 0; i < nbytes; i++)
- rdata[2+i] = buf[i];
-
- xfer = i2c_master_send(client, rdata, nbytes+2);
- if (xfer != nbytes+2) {
+ xfer = i2c_master_send(client, rdata, sizeof(rdata));
+ if (xfer != sizeof(rdata)) {
dev_err(&client->dev,
"%s: result=%d addr=%02x, data=%02x\n",
__func__,
@@ -282,7 +265,7 @@ static int x1205_fix_osc(struct i2c_client *client)
memset(&tm, 0, sizeof(tm));
- err = x1205_set_datetime(client, &tm, 1, X1205_CCR_BASE, 0);
+ err = x1205_set_datetime(client, &tm, X1205_CCR_BASE, 0);
if (err < 0)
dev_err(&client->dev, "unable to restart the oscillator\n");
@@ -481,7 +464,7 @@ static int x1205_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int x1205_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
return x1205_set_datetime(to_i2c_client(dev),
- &alrm->time, 1, X1205_ALM0_BASE, alrm->enabled);
+ &alrm->time, X1205_ALM0_BASE, alrm->enabled);
}
static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -493,7 +476,7 @@ static int x1205_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int x1205_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return x1205_set_datetime(to_i2c_client(dev),
- tm, 1, X1205_CCR_BASE, 0);
+ tm, X1205_CCR_BASE, 0);
}
static int x1205_rtc_proc(struct device *dev, struct seq_file *seq)
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index fd1231738ef..148b1dd2407 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -218,7 +218,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
spin_unlock_irqrestore(&aliastree.lock, flags);
newlcu = _allocate_lcu(uid);
if (IS_ERR(newlcu))
- return PTR_ERR(lcu);
+ return PTR_ERR(newlcu);
spin_lock_irqsave(&aliastree.lock, flags);
lcu = _find_lcu(server, uid);
if (!lcu) {
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index f64d0db881b..6e14863f5c7 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
*
*/
-#define KMSG_COMPONENT "dasd-diag"
+#define KMSG_COMPONENT "dasd"
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -146,16 +146,16 @@ dasd_diag_erp(struct dasd_device *device)
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
if (rc == 4) {
if (!(device->features & DASD_FEATURE_READONLY)) {
- dev_warn(&device->cdev->dev,
- "The access mode of a DIAG device changed"
- " to read-only");
+ pr_warning("%s: The access mode of a DIAG device "
+ "changed to read-only\n",
+ dev_name(&device->cdev->dev));
device->features |= DASD_FEATURE_READONLY;
}
rc = 0;
}
if (rc)
- dev_warn(&device->cdev->dev, "DIAG ERP failed with "
- "rc=%d\n", rc);
+ pr_warning("%s: DIAG ERP failed with "
+ "rc=%d\n", dev_name(&device->cdev->dev), rc);
}
/* Start a given request at the device. Return zero on success, non-zero
@@ -371,8 +371,9 @@ dasd_diag_check_device(struct dasd_device *device)
private->pt_block = 2;
break;
default:
- dev_warn(&device->cdev->dev, "Device type %d is not supported "
- "in DIAG mode\n", private->rdc_data.vdev_class);
+ pr_warning("%s: Device type %d is not supported "
+ "in DIAG mode\n", dev_name(&device->cdev->dev),
+ private->rdc_data.vdev_class);
rc = -EOPNOTSUPP;
goto out;
}
@@ -413,8 +414,8 @@ dasd_diag_check_device(struct dasd_device *device)
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(&private->iob, RW_BIO);
if (rc == 3) {
- dev_warn(&device->cdev->dev,
- "A 64-bit DIAG call failed\n");
+ pr_warning("%s: A 64-bit DIAG call failed\n",
+ dev_name(&device->cdev->dev));
rc = -EOPNOTSUPP;
goto out_label;
}
@@ -423,8 +424,9 @@ dasd_diag_check_device(struct dasd_device *device)
break;
}
if (bsize > PAGE_SIZE) {
- dev_warn(&device->cdev->dev, "Accessing the DASD failed because"
- " of an incorrect format (rc=%d)\n", rc);
+ pr_warning("%s: Accessing the DASD failed because of an "
+ "incorrect format (rc=%d)\n",
+ dev_name(&device->cdev->dev), rc);
rc = -EIO;
goto out_label;
}
@@ -442,18 +444,18 @@ dasd_diag_check_device(struct dasd_device *device)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
if (rc && (rc != 4)) {
- dev_warn(&device->cdev->dev, "DIAG initialization "
- "failed with rc=%d\n", rc);
+ pr_warning("%s: DIAG initialization failed with rc=%d\n",
+ dev_name(&device->cdev->dev), rc);
rc = -EIO;
} else {
if (rc == 4)
device->features |= DASD_FEATURE_READONLY;
- dev_info(&device->cdev->dev,
- "New DASD with %ld byte/block, total size %ld KB%s\n",
- (unsigned long) block->bp_block,
- (unsigned long) (block->blocks <<
- block->s2b_shift) >> 1,
- (rc == 4) ? ", read-only device" : "");
+ pr_info("%s: New DASD with %ld byte/block, total size %ld "
+ "KB%s\n", dev_name(&device->cdev->dev),
+ (unsigned long) block->bp_block,
+ (unsigned long) (block->blocks <<
+ block->s2b_shift) >> 1,
+ (rc == 4) ? ", read-only device" : "");
rc = 0;
}
out_label:
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 5f23eca8280..6315fbd8e68 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -14,6 +14,7 @@
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
@@ -272,10 +273,10 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
/* check for valid verbs */
- for (str = buffer; isspace(*str); str++);
+ str = skip_spaces(buffer);
if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
/* 'set xxx' was given */
- for (str = str + 4; isspace(*str); str++);
+ str = skip_spaces(str + 4);
if (strcmp(str, "on") == 0) {
/* switch on statistics profiling */
dasd_profile_level = DASD_PROFILE_ON;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index f76f4bd82b9..9b43ae94beb 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -1005,7 +1005,7 @@ static int dcssblk_thaw(struct device *dev)
return 0;
}
-static struct dev_pm_ops dcssblk_pm_ops = {
+static const struct dev_pm_ops dcssblk_pm_ops = {
.freeze = dcssblk_freeze,
.thaw = dcssblk_thaw,
.restore = dcssblk_restore,
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 116d1b3eeb1..118de392af6 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -407,7 +407,7 @@ static int xpram_restore(struct device *dev)
return 0;
}
-static struct dev_pm_ops xpram_pm_ops = {
+static const struct dev_pm_ops xpram_pm_ops = {
.restore = xpram_restore,
};
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 28e4649fa9e..247b2b93472 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -467,7 +467,7 @@ fs3270_open(struct inode *inode, struct file *filp)
if (IS_ERR(ib)) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
- rc = PTR_ERR(fp);
+ rc = PTR_ERR(ib);
goto out;
}
fp->rdbuf = ib;
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 60473f86e1f..33e96484d54 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -529,7 +529,7 @@ static int monreader_restore(struct device *dev)
return monreader_thaw(dev);
}
-static struct dev_pm_ops monreader_pm_ops = {
+static const struct dev_pm_ops monreader_pm_ops = {
.freeze = monreader_freeze,
.thaw = monreader_thaw,
.restore = monreader_restore,
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 6532ed8b4af..668a0579b26 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -323,7 +323,7 @@ static int monwriter_thaw(struct device *dev)
return monwriter_restore(dev);
}
-static struct dev_pm_ops monwriter_pm_ops = {
+static const struct dev_pm_ops monwriter_pm_ops = {
.freeze = monwriter_freeze,
.thaw = monwriter_thaw,
.restore = monwriter_restore,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a983f508678..ec88c59842e 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1019,7 +1019,7 @@ static int sclp_restore(struct device *dev)
return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
}
-static struct dev_pm_ops sclp_pm_ops = {
+static const struct dev_pm_ops sclp_pm_ops = {
.freeze = sclp_freeze,
.thaw = sclp_thaw,
.restore = sclp_restore,
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 28b5afc129c..b3beab610da 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -547,7 +547,7 @@ struct read_storage_sccb {
u32 entries[0];
} __packed;
-static struct dev_pm_ops sclp_mem_pm_ops = {
+static const struct dev_pm_ops sclp_mem_pm_ops = {
.freeze = sclp_mem_freeze,
};
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 3657fe103c2..cb70fa1cf53 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -9,6 +9,7 @@
*/
#define KMSG_COMPONENT "tape_34xx"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 0c72aadb839..9821c588661 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -9,6 +9,7 @@
*/
#define KMSG_COMPONENT "tape_3590"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
@@ -136,7 +137,7 @@ static void int_to_ext_kekl(struct tape3592_kekl *in,
out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
memcpy(out->label, in->label, sizeof(in->label));
EBCASC(out->label, sizeof(in->label));
- strstrip(out->label);
+ strim(out->label);
}
static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 4799cc2f73c..96816149368 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -11,6 +11,7 @@
*/
#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/fs.h>
#include <linux/module.h>
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 23d773a0d11..2125ec7d95f 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -10,6 +10,9 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index ddc914ccea8..b2864e3edb6 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -7,6 +7,10 @@
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
+
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include "tape_class.h"
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f5d6802dc5d..81b094e480e 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -12,6 +12,8 @@
*/
#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h> // for kernel parameters
#include <linux/kmod.h> // for requesting modules
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index ebd820ccfb2..0ceb37984f7 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -11,6 +11,9 @@
* PROCFS Functions
*/
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 750354ad16e..03f07e5dd6e 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -11,6 +11,9 @@
* Stefan Bader <shbader@de.ibm.com>
*/
+#define KMSG_COMPONENT "tape"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/bio.h>
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 899aa795bf3..7dfa5412d5a 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -675,7 +675,7 @@ static int vmlogrdr_pm_prepare(struct device *dev)
}
-static struct dev_pm_ops vmlogrdr_pm_ops = {
+static const struct dev_pm_ops vmlogrdr_pm_ops = {
.prepare = vmlogrdr_pm_prepare,
};
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index a5a62f1f774..5f97ea2ee6b 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -560,7 +560,7 @@ static int ccwgroup_pm_restore(struct device *dev)
return gdrv->restore ? gdrv->restore(gdev) : 0;
}
-static struct dev_pm_ops ccwgroup_pm_ops = {
+static const struct dev_pm_ops ccwgroup_pm_ops = {
.prepare = ccwgroup_pm_prepare,
.complete = ccwgroup_pm_complete,
.freeze = ccwgroup_pm_freeze,
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 9509e386093..7a28a3029a3 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -49,7 +49,6 @@ static u16 ccwreq_next_path(struct ccw_device *cdev)
*/
static void ccwreq_stop(struct ccw_device *cdev, int rc)
{
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
if (req->done)
@@ -57,7 +56,6 @@ static void ccwreq_stop(struct ccw_device *cdev, int rc)
req->done = 1;
ccw_device_set_timeout(cdev, 0);
memset(&cdev->private->irb, 0, sizeof(struct irb));
- sch->lpm = sch->schib.pmcw.pam;
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
@@ -80,7 +78,6 @@ static void ccwreq_do(struct ccw_device *cdev)
continue;
}
/* Perform start function. */
- sch->lpm = 0xff;
memset(&cdev->private->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92ff88ac110..7679aee6fa1 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1148,7 +1148,7 @@ static int css_pm_restore(struct device *dev)
return drv->restore ? drv->restore(sch) : 0;
}
-static struct dev_pm_ops css_pm_ops = {
+static const struct dev_pm_ops css_pm_ops = {
.prepare = css_pm_prepare,
.complete = css_pm_complete,
.freeze = css_pm_freeze,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 9fecfb4223a..a6c7d5426fb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1519,6 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
sch->driver = &io_subchannel_driver;
/* Initialize the ccw_device structure. */
cdev->dev.parent= &sch->dev;
+ sch_set_cdev(sch, cdev);
io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
@@ -1904,7 +1905,7 @@ out_unlock:
return ret;
}
-static struct dev_pm_ops ccw_pm_ops = {
+static const struct dev_pm_ops ccw_pm_ops = {
.prepare = ccw_device_pm_prepare,
.complete = ccw_device_pm_complete,
.freeze = ccw_device_pm_freeze,
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index aad188e43b4..6facb5499a6 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -142,7 +142,7 @@ static void spid_do(struct ccw_device *cdev)
u8 fn;
/* Use next available path that is not already in correct state. */
- req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm);
+ req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
if (!req->lpm)
goto out_nopath;
/* Channel program setup. */
@@ -254,15 +254,15 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
*p = first;
}
-static u8 pgid_to_vpm(struct ccw_device *cdev)
+static u8 pgid_to_donepm(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct pgid *pgid;
int i;
int lpm;
- u8 vpm = 0;
+ u8 donepm = 0;
- /* Set VPM bits for paths which are already in the target state. */
+ /* Set bits for paths which are already in the target state. */
for (i = 0; i < 8; i++) {
lpm = 0x80 >> i;
if ((cdev->private->pgid_valid_mask & lpm) == 0)
@@ -282,10 +282,10 @@ static u8 pgid_to_vpm(struct ccw_device *cdev)
if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
continue;
}
- vpm |= lpm;
+ donepm |= lpm;
}
- return vpm;
+ return donepm;
}
static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
@@ -307,6 +307,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
int mismatch = 0;
int reserved = 0;
int reset = 0;
+ u8 donepm;
if (rc)
goto out;
@@ -316,18 +317,20 @@ static void snid_done(struct ccw_device *cdev, int rc)
else if (mismatch)
rc = -EOPNOTSUPP;
else {
- sch->vpm = pgid_to_vpm(cdev);
+ donepm = pgid_to_donepm(cdev);
+ sch->vpm = donepm & sch->opm;
+ cdev->private->pgid_todo_mask &= ~donepm;
pgid_fill(cdev, pgid);
}
out:
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
- "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc,
- cdev->private->pgid_valid_mask, sch->vpm, mismatch,
- reserved, reset);
+ "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
switch (rc) {
case 0:
/* Anything left to do? */
- if (sch->vpm == sch->schib.pmcw.pam) {
+ if (cdev->private->pgid_todo_mask == 0) {
verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
return;
}
@@ -411,6 +414,7 @@ static void verify_start(struct ccw_device *cdev)
struct ccw_dev_id *devid = &cdev->private->dev_id;
sch->vpm = 0;
+ sch->lpm = sch->schib.pmcw.pam;
/* Initialize request data. */
memset(req, 0, sizeof(*req));
req->timeout = PGID_TIMEOUT;
@@ -442,11 +446,14 @@ static void verify_start(struct ccw_device *cdev)
*/
void ccw_device_verify_start(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
CIO_TRACE_EVENT(4, "vrfy");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
/* Initialize PGID data. */
memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
/*
* Initialize pathgroup and multipath state with target values.
* They may change in the course of path verification.
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 61677dfbdc9..ca5e9bb9d45 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -163,7 +163,7 @@ void tcw_finalize(struct tcw *tcw, int num_tidaws)
/* Add tcat to tccb. */
tccb = tcw_get_tccb(tcw);
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
- memset(tcat, 0, sizeof(tcat));
+ memset(tcat, 0, sizeof(*tcat));
/* Calculate tcw input/output count and tcat transport count. */
count = calc_dcw_count(tccb);
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(tccb_init);
*/
void tsb_init(struct tsb *tsb)
{
- memset(tsb, 0, sizeof(tsb));
+ memset(tsb, 0, sizeof(*tsb));
}
EXPORT_SYMBOL(tsb_init);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index d72ae4c93af..b9ce712a7f2 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -150,6 +150,7 @@ struct ccw_device_private {
struct ccw_request req; /* internal I/O request */
int iretry;
u8 pgid_valid_mask; /* mask of valid PGIDs */
+ u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4be6e84b959..b2275c5000e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -486,7 +486,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
q->first_to_check = add_buf(q->first_to_check, count);
- atomic_sub(count, &q->nr_buf_used);
+ if (atomic_sub(count, &q->nr_buf_used) == 0)
+ qdio_perf_stat_inc(&perf_stats.inbound_queue_full);
break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index 968e3c7c263..54f7c325a3e 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -64,6 +64,8 @@ static int qdio_perf_proc_show(struct seq_file *m, void *v)
(long)atomic_long_read(&perf_stats.fast_requeue));
seq_printf(m, "Number of outbound target full condition\t: %li\n",
(long)atomic_long_read(&perf_stats.outbound_target_full));
+ seq_printf(m, "Number of inbound queue full condition\t\t: %li\n",
+ (long)atomic_long_read(&perf_stats.inbound_queue_full));
seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_tl_out_timer));
seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
index ff4504ce1e3..12454231dc8 100644
--- a/drivers/s390/cio/qdio_perf.h
+++ b/drivers/s390/cio/qdio_perf.h
@@ -36,6 +36,7 @@ struct qdio_perf_stats {
atomic_long_t outbound_handler;
atomic_long_t fast_requeue;
atomic_long_t outbound_target_full;
+ atomic_long_t inbound_queue_full;
/* for debugging */
atomic_long_t debug_tl_out_timer;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 18d54fc21ce..8c2dea5fa2b 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -48,7 +48,6 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
if (!irq_ptr)
return;
- WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
irq_ptr->qib.pfmt = qib_param_field_format;
if (qib_param_field)
memcpy(irq_ptr->qib.parm, qib_param_field,
@@ -82,14 +81,12 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
- WARN_ON((unsigned long)q & 0xff);
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
return -ENOMEM;
}
- WARN_ON((unsigned long)q->slib & 0x7ff);
irq_ptr_qs[i] = q;
}
return 0;
@@ -131,7 +128,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
q->sbal[j] = *sbals_array++;
- WARN_ON((unsigned long)q->sbal[j] & 0xff);
+ BUG_ON((unsigned long)q->sbal[j] & 0xff);
}
/* fill in slib */
@@ -147,11 +144,6 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
-
- DBF_EVENT("sl-slsb-sbal");
- DBF_HEX(q->sl, sizeof(void *));
- DBF_HEX(&q->slsb, sizeof(void *));
- DBF_HEX(q->sbal, sizeof(void *));
}
static void setup_queues(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 395c04c2b00..65ebee0a326 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
#define IUCV_DBF_TEXT_(name, level, text...) \
do { \
if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
- char* iucv_dbf_txt_buf = \
- get_cpu_var(iucv_dbf_txt_buf); \
- sprintf(iucv_dbf_txt_buf, text); \
- debug_text_event(iucv_dbf_##name, level, \
- iucv_dbf_txt_buf); \
+ char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
+ sprintf(__buf, text); \
+ debug_text_event(iucv_dbf_##name, level, __buf); \
put_cpu_var(iucv_dbf_txt_buf); \
} \
} while (0)
@@ -161,7 +159,7 @@ static void netiucv_pm_complete(struct device *);
static int netiucv_pm_freeze(struct device *);
static int netiucv_pm_restore_thaw(struct device *);
-static struct dev_pm_ops netiucv_pm_ops = {
+static const struct dev_pm_ops netiucv_pm_ops = {
.prepare = netiucv_pm_prepare,
.complete = netiucv_pm_complete,
.freeze = netiucv_pm_freeze,
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 3012355f830..67f2485d237 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -168,7 +168,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
return 0;
}
-static struct dev_pm_ops smsg_pm_ops = {
+static const struct dev_pm_ops smsg_pm_ops = {
.freeze = smsg_pm_freeze,
.thaw = smsg_pm_restore_thaw,
.restore = smsg_pm_restore_thaw,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3bf75924741..84d3bbaa95e 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -76,6 +76,7 @@
Fix bug in twa_get_param() on 4GB+.
Use pci_resource_len() for ioremap().
2.26.02.012 - Add power management support.
+ 2.26.02.013 - Fix bug in twa_load_sgl().
*/
#include <linux/module.h>
@@ -100,7 +101,7 @@
#include "3w-9xxx.h"
/* Globals */
-#define TW_DRIVER_VERSION "2.26.02.012"
+#define TW_DRIVER_VERSION "2.26.02.013"
static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
static unsigned int twa_device_extension_count;
static int twa_major = -1;
@@ -1382,10 +1383,12 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
newcommand = &full_command_packet->command.newcommand;
newcommand->request_id__lunl =
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
- newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
- newcommand->sg_list[0].length = cpu_to_le32(length);
+ if (length) {
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].length = cpu_to_le32(length);
+ }
newcommand->sgl_entries__lunh =
- cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1));
+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
} else {
oldcommand = &full_command_packet->command.oldcommand;
oldcommand->request_id = request_id;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 36900c71a59..9191d1ea645 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID
Please read the comments at the top of
<file:drivers/scsi/3w-xxxx.c>.
+config SCSI_HPSA
+ tristate "HP Smart Array SCSI driver"
+ depends on PCI && SCSI
+ help
+ This driver supports HP Smart Array Controllers (circa 2009).
+ It is a SCSI alternative to the cciss driver, which is a block
+ driver. Anyone wishing to use HP Smart Array controllers who
+ would prefer the devices be presented to linux as SCSI devices,
+ rather than as generic block devices should say Y here.
+
config SCSI_3W_9XXX
tristate "3ware 9xxx SATA-RAID support"
depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 280d3c657d6..92a8c500b23 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
+obj-$(CONFIG_SCSI_HPSA) += hpsa.o
obj-$(CONFIG_SCSI_DTC3280) += dtc.o
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 698a527d6cc..f008708f1b0 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -135,11 +135,15 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
while ((compl = be_mcc_compl_get(phba))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
- BUG_ON(!is_link_state_evt(compl->flags));
+ if (is_link_state_evt(compl->flags))
+ /* Interpret compl as a async link evt */
+ beiscsi_async_link_state_process(phba,
+ (struct be_async_event_link_state *) compl);
+ else
+ SE_DEBUG(DBG_LVL_1,
+ " Unsupported Async Event, flags"
+ " = 0x%08x \n", compl->flags);
- /* Interpret compl as a async link evt */
- beiscsi_async_link_state_process(phba,
- (struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
status = be_mcc_compl_process(ctrl, compl);
atomic_dec(&phba->ctrl.mcc_obj.q.used);
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 2b973f3c2eb..6cf9dc37d78 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -684,6 +684,7 @@ extern unsigned int error_mask1, error_mask2;
extern u64 iscsi_error_mask;
extern unsigned int en_tcp_dack;
extern unsigned int event_coal_div;
+extern unsigned int event_coal_min;
extern struct scsi_transport_template *bnx2i_scsi_xport_template;
extern struct iscsi_transport bnx2i_iscsi_transport;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5c8d7630c13..1af578dec27 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -133,20 +133,38 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
{
struct bnx2i_5771x_cq_db *cq_db;
u16 cq_index;
+ u16 next_index;
+ u32 num_active_cmds;
+
+ /* Coalesce CQ entries only on 10G devices */
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
return;
+ /* Do not update CQ DB multiple times before firmware writes
+ * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
+ * interrupts and other unwanted results
+ */
+ cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+ if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+ return;
+
if (action == CNIC_ARM_CQE) {
- cq_index = ep->qp.cqe_exp_seq_sn +
- ep->num_active_cmds / event_coal_div;
- cq_index %= (ep->qp.cqe_size * 2 + 1);
- if (!cq_index) {
+ num_active_cmds = ep->num_active_cmds;
+ if (num_active_cmds <= event_coal_min)
+ next_index = 1;
+ else
+ next_index = event_coal_min +
+ (num_active_cmds - event_coal_min) / event_coal_div;
+ if (!next_index)
+ next_index = 1;
+ cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
+ if (cq_index > ep->qp.cqe_size * 2)
+ cq_index -= ep->qp.cqe_size * 2;
+ if (!cq_index)
cq_index = 1;
- cq_db = (struct bnx2i_5771x_cq_db *)
- ep->qp.cq_pgtbl_virt;
- cq_db->sqn[0] = cq_index;
- }
+
+ cq_db->sqn[0] = cq_index;
}
}
@@ -366,6 +384,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_tmf_request *tmfabort_wqe;
u32 dword;
+ u32 scsi_lun[2];
bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -376,27 +395,35 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
tmfabort_wqe->op_attr = 0;
tmfabort_wqe->op_attr =
ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
- tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
- tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
tmfabort_wqe->reserved2 = 0;
tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
- if (!ctask || ctask->sc)
+ if (!ctask || !ctask->sc)
/*
* the iscsi layer must have completed the cmd while this
* was starting up.
+ *
+ * Note: In the case of a SCSI cmd timeout, the task's sc
+ * is still active; hence ctask->sc != 0
+ * In this case, the task must be aborted
*/
return 0;
+
ref_sc = ctask->sc;
+ /* Retrieve LUN directly from the ref_sc */
+ int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun);
+ tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
+ tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
+
if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
else
dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
- tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
+ tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 0c4210d48ee..6d8172e781c 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.0.1e"
-#define DRV_MODULE_RELDATE "June 22, 2009"
+#define DRV_MODULE_VERSION "2.1.0"
+#define DRV_MODULE_RELDATE "Dec 06, 2009"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -32,6 +32,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
static DEFINE_MUTEX(bnx2i_dev_lock);
+unsigned int event_coal_min = 24;
+module_param(event_coal_min, int, 0664);
+MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
+
unsigned int event_coal_div = 1;
module_param(event_coal_div, int, 0664);
MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
@@ -83,8 +87,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711)
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+ else
+ printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
+ hba->pci_did);
}
@@ -363,7 +371,7 @@ static int __init bnx2i_mod_init(void)
printk(KERN_INFO "%s", version);
- if (!is_power_of_2(sq_size))
+ if (sq_size && !is_power_of_2(sq_size))
sq_size = roundup_pow_of_two(sq_size);
mutex_init(&bnx2i_dev_lock);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 132898c88d5..33b2294625b 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -485,7 +485,6 @@ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
struct iscsi_task *task = session->cmds[i];
struct bnx2i_cmd *cmd = task->dd_data;
- /* Anil */
task->hdr = &cmd->hdr;
task->hdr_max = sizeof(struct iscsi_hdr);
@@ -765,7 +764,6 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
hba->pci_svid = hba->pcidev->subsystem_vendor;
hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
- bnx2i_identify_device(hba);
bnx2i_identify_device(hba);
bnx2i_setup_host_queue_size(hba, shost);
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index c1d5be4adf9..26ffdcd5a43 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -291,7 +291,7 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
c3cn_hold(c3cn);
spin_lock_bh(&c3cn->lock);
if (c3cn->state == C3CN_STATE_CONNECTING)
- fail_act_open(c3cn, EHOSTUNREACH);
+ fail_act_open(c3cn, -EHOSTUNREACH);
spin_unlock_bh(&c3cn->lock);
c3cn_put(c3cn);
__kfree_skb(skb);
@@ -792,18 +792,18 @@ static int act_open_rpl_status_to_errno(int status)
{
switch (status) {
case CPL_ERR_CONN_RESET:
- return ECONNREFUSED;
+ return -ECONNREFUSED;
case CPL_ERR_ARP_MISS:
- return EHOSTUNREACH;
+ return -EHOSTUNREACH;
case CPL_ERR_CONN_TIMEDOUT:
- return ETIMEDOUT;
+ return -ETIMEDOUT;
case CPL_ERR_TCAM_FULL:
- return ENOMEM;
+ return -ENOMEM;
case CPL_ERR_CONN_EXIST:
cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
- return EADDRINUSE;
+ return -EADDRINUSE;
default:
- return EIO;
+ return -EIO;
}
}
@@ -817,7 +817,7 @@ static void act_open_retry_timer(unsigned long data)
spin_lock_bh(&c3cn->lock);
skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
if (!skb)
- fail_act_open(c3cn, ENOMEM);
+ fail_act_open(c3cn, -ENOMEM);
else {
skb->sk = (struct sock *)c3cn;
set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -966,14 +966,14 @@ static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
case CPL_ERR_BAD_SYN: /* fall through */
case CPL_ERR_CONN_RESET:
return c3cn->state > C3CN_STATE_ESTABLISHED ?
- EPIPE : ECONNRESET;
+ -EPIPE : -ECONNRESET;
case CPL_ERR_XMIT_TIMEDOUT:
case CPL_ERR_PERSIST_TIMEDOUT:
case CPL_ERR_FINWAIT2_TIMEDOUT:
case CPL_ERR_KEEPALIVE_TIMEDOUT:
- return ETIMEDOUT;
+ return -ETIMEDOUT;
default:
- return EIO;
+ return -EIO;
}
}
@@ -1563,7 +1563,7 @@ free_tid:
s3_free_atid(cdev, c3cn->tid);
c3cn->tid = 0;
out_err:
- return -1;
+ return -EINVAL;
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
index 70910507117..1fe3b0f1f3c 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_pdu.c
@@ -388,8 +388,8 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
if (err > 0) {
int pdulen = err;
- cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
- task, skb, skb->len, skb->data_len, err);
+ cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
+ task, skb, skb->len, skb->data_len, err);
if (task->conn->hdrdgst_en)
pdulen += ISCSI_DIGEST_SIZE;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 47cfe1c49c3..1a660191a90 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -748,6 +748,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1724"},
{"IBM", "1726"},
{"IBM", "1742"},
+ {"IBM", "1745"},
+ {"IBM", "1746"},
{"IBM", "1814"},
{"IBM", "1815"},
{"IBM", "1818"},
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index a30ffaa1222..10be9f36a4c 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -101,6 +101,8 @@ static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
static int fcoe_create(const char *, struct kernel_param *);
static int fcoe_destroy(const char *, struct kernel_param *);
+static int fcoe_enable(const char *, struct kernel_param *);
+static int fcoe_disable(const char *, struct kernel_param *);
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
@@ -115,10 +117,16 @@ static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
@@ -545,6 +553,23 @@ static void fcoe_queue_timer(ulong lport)
}
/**
+ * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
+ * @netdev: the associated net device
+ * @wwn: the output WWN
+ * @type: the type of WWN (WWPN or WWNN)
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+ const struct net_device_ops *ops = netdev->netdev_ops;
+
+ if (ops->ndo_fcoe_get_wwn)
+ return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
+ return -EINVAL;
+}
+
+/**
* fcoe_netdev_config() - Set up net devive for SW FCoE
* @lport: The local port that is associated with the net device
* @netdev: The associated net device
@@ -611,9 +636,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
*/
if (netdev->priv_flags & IFF_802_1Q_VLAN)
vid = vlan_dev_vlan_id(netdev);
- wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+
+ if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
+ wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
fc_set_wwnn(lport, wwnn);
- wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid);
+ if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
+ wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+ 2, vid);
fc_set_wwpn(lport, wwpn);
}
@@ -1231,7 +1260,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
"CPU.\n");
spin_unlock_bh(&fps->fcoe_rx_list.lock);
- cpu = first_cpu(cpu_online_map);
+ cpu = cpumask_first(cpu_online_mask);
fps = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&fps->fcoe_rx_list.lock);
if (!fps->thread) {
@@ -1838,6 +1867,104 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
}
/**
+ * fcoe_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_disable(const char *buffer, struct kernel_param *kp)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ int rc = 0;
+
+ mutex_lock(&fcoe_config_mutex);
+#ifdef CONFIG_FCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module paramter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ rtnl_lock();
+ fcoe = fcoe_hostlist_lookup_port(netdev);
+ rtnl_unlock();
+
+ if (fcoe)
+ fc_fabric_logoff(fcoe->ctlr.lp);
+ else
+ rc = -ENODEV;
+
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+}
+
+/**
+ * fcoe_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_enable(const char *buffer, struct kernel_param *kp)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ int rc = 0;
+
+ mutex_lock(&fcoe_config_mutex);
+#ifdef CONFIG_FCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module paramter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ rtnl_lock();
+ fcoe = fcoe_hostlist_lookup_port(netdev);
+ rtnl_unlock();
+
+ if (fcoe)
+ rc = fc_fabric_login(fcoe->ctlr.lp);
+ else
+ rc = -ENODEV;
+
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&fcoe_config_mutex);
+ return rc;
+}
+
+/**
* fcoe_destroy() - Destroy a FCoE interface
* @buffer: The name of the Ethernet interface to be destroyed
* @kp: The associated kernel parameter
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
new file mode 100644
index 00000000000..bb96fdd58e2
--- /dev/null
+++ b/drivers/scsi/hpsa.c
@@ -0,0 +1,3531 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/compat.h>
+#include <linux/blktrace_api.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <linux/cciss_ioctl.h>
+#include <linux/string.h>
+#include <linux/bitmap.h>
+#include <asm/atomic.h>
+#include <linux/kthread.h>
+#include "hpsa_cmd.h"
+#include "hpsa.h"
+
+/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
+#define HPSA_DRIVER_VERSION "1.0.0"
+#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
+
+/* How long to wait (in milliseconds) for board to go into simple mode */
+#define MAX_CONFIG_WAIT 30000
+#define MAX_IOCTL_CONFIG_WAIT 1000
+
+/*define how many times we will try a command because of bus resets */
+#define MAX_CMD_RETRIES 3
+
+/* Embedded module documentation macros - see modules.h */
+MODULE_AUTHOR("Hewlett-Packard Company");
+MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
+ HPSA_DRIVER_VERSION);
+MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
+MODULE_VERSION(HPSA_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static int hpsa_allow_any;
+module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_allow_any,
+ "Allow hpsa driver to access unknown HP Smart Array hardware");
+
+/* define the PCI info for the cards we can control */
+static const struct pci_device_id hpsa_pci_device_id[] = {
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
+ {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
+
+/* board_id = Subsystem Device ID & Vendor ID
+ * product = Marketing Name for the board
+ * access = Address of the struct of function pointers
+ */
+static struct board_type products[] = {
+ {0x3223103C, "Smart Array P800", &SA5_access},
+ {0x3234103C, "Smart Array P400", &SA5_access},
+ {0x323d103c, "Smart Array P700M", &SA5_access},
+ {0x3241103C, "Smart Array P212", &SA5_access},
+ {0x3243103C, "Smart Array P410", &SA5_access},
+ {0x3245103C, "Smart Array P410i", &SA5_access},
+ {0x3247103C, "Smart Array P411", &SA5_access},
+ {0x3249103C, "Smart Array P812", &SA5_access},
+ {0x324a103C, "Smart Array P712m", &SA5_access},
+ {0x324b103C, "Smart Array P711m", &SA5_access},
+ {0xFFFF103C, "Unknown Smart Array", &SA5_access},
+};
+
+static int number_of_controllers;
+
+static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
+static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
+static void start_io(struct ctlr_info *h);
+
+#ifdef CONFIG_COMPAT
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
+#endif
+
+static void cmd_free(struct ctlr_info *h, struct CommandList *c);
+static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
+static struct CommandList *cmd_alloc(struct ctlr_info *h);
+static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
+static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
+ void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+ int cmd_type);
+
+static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *));
+
+static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
+static int hpsa_slave_alloc(struct scsi_device *sdev);
+static void hpsa_slave_destroy(struct scsi_device *sdev);
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t lunid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t unique_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
+static ssize_t host_store_rescan(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+static int check_for_unit_attention(struct ctlr_info *h,
+ struct CommandList *c);
+static void check_ioctl_unit_attention(struct ctlr_info *h,
+ struct CommandList *c);
+
+static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+
+static struct device_attribute *hpsa_sdev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_lunid,
+ &dev_attr_unique_id,
+ NULL,
+};
+
+static struct device_attribute *hpsa_shost_attrs[] = {
+ &dev_attr_rescan,
+ NULL,
+};
+
+static struct scsi_host_template hpsa_driver_template = {
+ .module = THIS_MODULE,
+ .name = "hpsa",
+ .proc_name = "hpsa",
+ .queuecommand = hpsa_scsi_queue_command,
+ .can_queue = 512,
+ .this_id = -1,
+ .sg_tablesize = MAXSGENTRIES,
+ .cmd_per_lun = 512,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = hpsa_eh_device_reset_handler,
+ .ioctl = hpsa_ioctl,
+ .slave_alloc = hpsa_slave_alloc,
+ .slave_destroy = hpsa_slave_destroy,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hpsa_compat_ioctl,
+#endif
+ .sdev_attrs = hpsa_sdev_attrs,
+ .shost_attrs = hpsa_shost_attrs,
+};
+
+static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
+{
+ unsigned long *priv = shost_priv(sdev->host);
+ return (struct ctlr_info *) *priv;
+}
+
+static struct task_struct *hpsa_scan_thread;
+static DEFINE_MUTEX(hpsa_scan_mutex);
+static LIST_HEAD(hpsa_scan_q);
+static int hpsa_scan_func(void *data);
+
+/**
+ * add_to_scan_list() - add controller to rescan queue
+ * @h: Pointer to the controller.
+ *
+ * Adds the controller to the rescan queue if not already on the queue.
+ *
+ * returns 1 if added to the queue, 0 if skipped (could be on the
+ * queue already, or the controller could be initializing or shutting
+ * down).
+ **/
+static int add_to_scan_list(struct ctlr_info *h)
+{
+ struct ctlr_info *test_h;
+ int found = 0;
+ int ret = 0;
+
+ if (h->busy_initializing)
+ return 0;
+
+ /*
+ * If we don't get the lock, it means the driver is unloading
+ * and there's no point in scheduling a new scan.
+ */
+ if (!mutex_trylock(&h->busy_shutting_down))
+ return 0;
+
+ mutex_lock(&hpsa_scan_mutex);
+ list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
+ if (test_h == h) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found && !h->busy_scanning) {
+ INIT_COMPLETION(h->scan_wait);
+ list_add_tail(&h->scan_list, &hpsa_scan_q);
+ ret = 1;
+ }
+ mutex_unlock(&hpsa_scan_mutex);
+ mutex_unlock(&h->busy_shutting_down);
+
+ return ret;
+}
+
+/**
+ * remove_from_scan_list() - remove controller from rescan queue
+ * @h: Pointer to the controller.
+ *
+ * Removes the controller from the rescan queue if present. Blocks if
+ * the controller is currently conducting a rescan. The controller
+ * can be in one of three states:
+ * 1. Doesn't need a scan
+ * 2. On the scan list, but not scanning yet (we remove it)
+ * 3. Busy scanning (and not on the list). In this case we want to wait for
+ * the scan to complete to make sure the scanning thread for this
+ * controller is completely idle.
+ **/
+static void remove_from_scan_list(struct ctlr_info *h)
+{
+ struct ctlr_info *test_h, *tmp_h;
+
+ mutex_lock(&hpsa_scan_mutex);
+ list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
+ if (test_h == h) { /* state 2. */
+ list_del(&h->scan_list);
+ complete_all(&h->scan_wait);
+ mutex_unlock(&hpsa_scan_mutex);
+ return;
+ }
+ }
+ if (h->busy_scanning) { /* state 3. */
+ mutex_unlock(&hpsa_scan_mutex);
+ wait_for_completion(&h->scan_wait);
+ } else { /* state 1, nothing to do. */
+ mutex_unlock(&hpsa_scan_mutex);
+ }
+}
+
+/* hpsa_scan_func() - kernel thread used to rescan controllers
+ * @data: Ignored.
+ *
+ * A kernel thread used scan for drive topology changes on
+ * controllers. The thread processes only one controller at a time
+ * using a queue. Controllers are added to the queue using
+ * add_to_scan_list() and removed from the queue either after done
+ * processing or using remove_from_scan_list().
+ *
+ * returns 0.
+ **/
+static int hpsa_scan_func(__attribute__((unused)) void *data)
+{
+ struct ctlr_info *h;
+ int host_no;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ if (kthread_should_stop())
+ break;
+
+ while (1) {
+ mutex_lock(&hpsa_scan_mutex);
+ if (list_empty(&hpsa_scan_q)) {
+ mutex_unlock(&hpsa_scan_mutex);
+ break;
+ }
+ h = list_entry(hpsa_scan_q.next, struct ctlr_info,
+ scan_list);
+ list_del(&h->scan_list);
+ h->busy_scanning = 1;
+ mutex_unlock(&hpsa_scan_mutex);
+ host_no = h->scsi_host ? h->scsi_host->host_no : -1;
+ hpsa_update_scsi_devices(h, host_no);
+ complete_all(&h->scan_wait);
+ mutex_lock(&hpsa_scan_mutex);
+ h->busy_scanning = 0;
+ mutex_unlock(&hpsa_scan_mutex);
+ }
+ }
+ return 0;
+}
+
+static int check_for_unit_attention(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
+ return 0;
+
+ switch (c->err_info->SenseInfo[12]) {
+ case STATE_CHANGED:
+ dev_warn(&h->pdev->dev, "hpsa%d: a state change "
+ "detected, command retried\n", h->ctlr);
+ break;
+ case LUN_FAILED:
+ dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
+ "detected, action required\n", h->ctlr);
+ break;
+ case REPORT_LUNS_CHANGED:
+ dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
+ "changed\n", h->ctlr);
+ /*
+ * Here, we could call add_to_scan_list and wake up the scan thread,
+ * except that it's quite likely that we will get more than one
+ * REPORT_LUNS_CHANGED condition in quick succession, which means
+ * that those which occur after the first one will likely happen
+ * *during* the hpsa_scan_thread's rescan. And the rescan code is not
+ * robust enough to restart in the middle, undoing what it has already
+ * done, and it's not clear that it's even possible to do this, since
+ * part of what it does is notify the SCSI mid layer, which starts
+ * doing it's own i/o to read partition tables and so on, and the
+ * driver doesn't have visibility to know what might need undoing.
+ * In any event, if possible, it is horribly complicated to get right
+ * so we just don't do it for now.
+ *
+ * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
+ */
+ break;
+ case POWER_OR_RESET:
+ dev_warn(&h->pdev->dev, "hpsa%d: a power on "
+ "or device reset detected\n", h->ctlr);
+ break;
+ case UNIT_ATTENTION_CLEARED:
+ dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
+ "cleared by another initiator\n", h->ctlr);
+ break;
+ default:
+ dev_warn(&h->pdev->dev, "hpsa%d: unknown "
+ "unit attention detected\n", h->ctlr);
+ break;
+ }
+ return 1;
+}
+
+static ssize_t host_store_rescan(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ unsigned long *priv = shost_priv(shost);
+ h = (struct ctlr_info *) *priv;
+ if (add_to_scan_list(h)) {
+ wake_up_process(hpsa_scan_thread);
+ wait_for_completion_interruptible(&h->scan_wait);
+ }
+ return count;
+}
+
+/* Enqueuing and dequeuing functions for cmdlists. */
+static inline void addQ(struct hlist_head *list, struct CommandList *c)
+{
+ hlist_add_head(&c->list, list);
+}
+
+static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static inline void removeQ(struct CommandList *c)
+{
+ if (WARN_ON(hlist_unhashed(&c->list)))
+ return;
+ hlist_del_init(&c->list);
+}
+
+static inline int is_hba_lunid(unsigned char scsi3addr[])
+{
+ return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+}
+
+static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
+{
+ return (scsi3addr[3] & 0xC0) == 0x40;
+}
+
+static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
+ "UNKNOWN"
+};
+#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t l = 0;
+ int rlevel;
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+
+ /* Is this even a logical drive? */
+ if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ l = snprintf(buf, PAGE_SIZE, "N/A\n");
+ return l;
+ }
+
+ rlevel = hdev->raid_level;
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (rlevel < 0 || rlevel > RAID_UNKNOWN)
+ rlevel = RAID_UNKNOWN;
+ l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
+ return l;
+}
+
+static ssize_t lunid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ unsigned char lunid[8];
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ lunid[0], lunid[1], lunid[2], lunid[3],
+ lunid[4], lunid[5], lunid[6], lunid[7]);
+}
+
+static ssize_t unique_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ unsigned char sn[16];
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ memcpy(sn, hdev->device_id, sizeof(sn));
+ spin_unlock_irqrestore(&h->lock, flags);
+ return snprintf(buf, 16 * 2 + 2,
+ "%02X%02X%02X%02X%02X%02X%02X%02X"
+ "%02X%02X%02X%02X%02X%02X%02X%02X\n",
+ sn[0], sn[1], sn[2], sn[3],
+ sn[4], sn[5], sn[6], sn[7],
+ sn[8], sn[9], sn[10], sn[11],
+ sn[12], sn[13], sn[14], sn[15]);
+}
+
+static int hpsa_find_target_lun(struct ctlr_info *h,
+ unsigned char scsi3addr[], int bus, int *target, int *lun)
+{
+ /* finds an unused bus, target, lun for a new physical device
+ * assumes h->devlock is held
+ */
+ int i, found = 0;
+ DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
+
+ memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
+
+ for (i = 0; i < h->ndevices; i++) {
+ if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
+ set_bit(h->dev[i]->target, lun_taken);
+ }
+
+ for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
+ if (!test_bit(i, lun_taken)) {
+ /* *bus = 1; */
+ *target = i;
+ *lun = 0;
+ found = 1;
+ break;
+ }
+ }
+ return !found;
+}
+
+/* Add an entry into h->dev[] array. */
+static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
+ struct hpsa_scsi_dev_t *device,
+ struct hpsa_scsi_dev_t *added[], int *nadded)
+{
+ /* assumes h->devlock is held */
+ int n = h->ndevices;
+ int i;
+ unsigned char addr1[8], addr2[8];
+ struct hpsa_scsi_dev_t *sd;
+
+ if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
+ dev_err(&h->pdev->dev, "too many devices, some will be "
+ "inaccessible.\n");
+ return -1;
+ }
+
+ /* physical devices do not have lun or target assigned until now. */
+ if (device->lun != -1)
+ /* Logical device, lun is already assigned. */
+ goto lun_assigned;
+
+ /* If this device a non-zero lun of a multi-lun device
+ * byte 4 of the 8-byte LUN addr will contain the logical
+ * unit no, zero otherise.
+ */
+ if (device->scsi3addr[4] == 0) {
+ /* This is not a non-zero lun of a multi-lun device */
+ if (hpsa_find_target_lun(h, device->scsi3addr,
+ device->bus, &device->target, &device->lun) != 0)
+ return -1;
+ goto lun_assigned;
+ }
+
+ /* This is a non-zero lun of a multi-lun device.
+ * Search through our list and find the device which
+ * has the same 8 byte LUN address, excepting byte 4.
+ * Assign the same bus and target for this new LUN.
+ * Use the logical unit number from the firmware.
+ */
+ memcpy(addr1, device->scsi3addr, 8);
+ addr1[4] = 0;
+ for (i = 0; i < n; i++) {
+ sd = h->dev[i];
+ memcpy(addr2, sd->scsi3addr, 8);
+ addr2[4] = 0;
+ /* differ only in byte 4? */
+ if (memcmp(addr1, addr2, 8) == 0) {
+ device->bus = sd->bus;
+ device->target = sd->target;
+ device->lun = device->scsi3addr[4];
+ break;
+ }
+ }
+ if (device->lun == -1) {
+ dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
+ " suspect firmware bug or unsupported hardware "
+ "configuration.\n");
+ return -1;
+ }
+
+lun_assigned:
+
+ h->dev[n] = device;
+ h->ndevices++;
+ added[*nadded] = device;
+ (*nadded)++;
+
+ /* initially, (before registering with scsi layer) we don't
+ * know our hostno and we don't want to print anything first
+ * time anyway (the scsi layer's inquiries will show that info)
+ */
+ /* if (hostno != -1) */
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
+ scsi_device_type(device->devtype), hostno,
+ device->bus, device->target, device->lun);
+ return 0;
+}
+
+/* Remove an entry from h->dev[] array. */
+static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
+ struct hpsa_scsi_dev_t *removed[], int *nremoved)
+{
+ /* assumes h->devlock is held */
+ int i;
+ struct hpsa_scsi_dev_t *sd;
+
+ if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+ BUG();
+
+ sd = h->dev[entry];
+ removed[*nremoved] = h->dev[entry];
+ (*nremoved)++;
+
+ for (i = entry; i < h->ndevices-1; i++)
+ h->dev[i] = h->dev[i+1];
+ h->ndevices--;
+ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
+ scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
+ sd->lun);
+}
+
+#define SCSI3ADDR_EQ(a, b) ( \
+ (a)[7] == (b)[7] && \
+ (a)[6] == (b)[6] && \
+ (a)[5] == (b)[5] && \
+ (a)[4] == (b)[4] && \
+ (a)[3] == (b)[3] && \
+ (a)[2] == (b)[2] && \
+ (a)[1] == (b)[1] && \
+ (a)[0] == (b)[0])
+
+static void fixup_botched_add(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *added)
+{
+ /* called when scsi_add_device fails in order to re-adjust
+ * h->dev[] to match the mid layer's view.
+ */
+ unsigned long flags;
+ int i, j;
+
+ spin_lock_irqsave(&h->lock, flags);
+ for (i = 0; i < h->ndevices; i++) {
+ if (h->dev[i] == added) {
+ for (j = i; j < h->ndevices-1; j++)
+ h->dev[j] = h->dev[j+1];
+ h->ndevices--;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ kfree(added);
+}
+
+static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
+ struct hpsa_scsi_dev_t *dev2)
+{
+ if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
+ (dev1->lun != -1 && dev2->lun != -1)) &&
+ dev1->devtype != 0x0C)
+ return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
+
+ /* we compare everything except lun and target as these
+ * are not yet assigned. Compare parts likely
+ * to differ first
+ */
+ if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
+ sizeof(dev1->scsi3addr)) != 0)
+ return 0;
+ if (memcmp(dev1->device_id, dev2->device_id,
+ sizeof(dev1->device_id)) != 0)
+ return 0;
+ if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
+ return 0;
+ if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
+ return 0;
+ if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
+ return 0;
+ if (dev1->devtype != dev2->devtype)
+ return 0;
+ if (dev1->raid_level != dev2->raid_level)
+ return 0;
+ if (dev1->bus != dev2->bus)
+ return 0;
+ return 1;
+}
+
+/* Find needle in haystack. If exact match found, return DEVICE_SAME,
+ * and return needle location in *index. If scsi3addr matches, but not
+ * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
+ * location in *index. If needle not found, return DEVICE_NOT_FOUND.
+ */
+static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
+ struct hpsa_scsi_dev_t *haystack[], int haystack_size,
+ int *index)
+{
+ int i;
+#define DEVICE_NOT_FOUND 0
+#define DEVICE_CHANGED 1
+#define DEVICE_SAME 2
+ for (i = 0; i < haystack_size; i++) {
+ if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
+ *index = i;
+ if (device_is_the_same(needle, haystack[i]))
+ return DEVICE_SAME;
+ else
+ return DEVICE_CHANGED;
+ }
+ }
+ *index = -1;
+ return DEVICE_NOT_FOUND;
+}
+
+static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+ struct hpsa_scsi_dev_t *sd[], int nsds)
+{
+ /* sd contains scsi3 addresses and devtypes, and inquiry
+ * data. This function takes what's in sd to be the current
+ * reality and updates h->dev[] to reflect that reality.
+ */
+ int i, entry, device_change, changes = 0;
+ struct hpsa_scsi_dev_t *csd;
+ unsigned long flags;
+ struct hpsa_scsi_dev_t **added, **removed;
+ int nadded, nremoved;
+ struct Scsi_Host *sh = NULL;
+
+ added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+ GFP_KERNEL);
+ removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+ GFP_KERNEL);
+
+ if (!added || !removed) {
+ dev_warn(&h->pdev->dev, "out of memory in "
+ "adjust_hpsa_scsi_table\n");
+ goto free_and_out;
+ }
+
+ spin_lock_irqsave(&h->devlock, flags);
+
+ /* find any devices in h->dev[] that are not in
+ * sd[] and remove them from h->dev[], and for any
+ * devices which have changed, remove the old device
+ * info and add the new device info.
+ */
+ i = 0;
+ nremoved = 0;
+ nadded = 0;
+ while (i < h->ndevices) {
+ csd = h->dev[i];
+ device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
+ if (device_change == DEVICE_NOT_FOUND) {
+ changes++;
+ hpsa_scsi_remove_entry(h, hostno, i,
+ removed, &nremoved);
+ continue; /* remove ^^^, hence i not incremented */
+ } else if (device_change == DEVICE_CHANGED) {
+ changes++;
+ hpsa_scsi_remove_entry(h, hostno, i,
+ removed, &nremoved);
+ (void) hpsa_scsi_add_entry(h, hostno, sd[entry],
+ added, &nadded);
+ /* add can't fail, we just removed one. */
+ sd[entry] = NULL; /* prevent it from being freed */
+ }
+ i++;
+ }
+
+ /* Now, make sure every device listed in sd[] is also
+ * listed in h->dev[], adding them if they aren't found
+ */
+
+ for (i = 0; i < nsds; i++) {
+ if (!sd[i]) /* if already added above. */
+ continue;
+ device_change = hpsa_scsi_find_entry(sd[i], h->dev,
+ h->ndevices, &entry);
+ if (device_change == DEVICE_NOT_FOUND) {
+ changes++;
+ if (hpsa_scsi_add_entry(h, hostno, sd[i],
+ added, &nadded) != 0)
+ break;
+ sd[i] = NULL; /* prevent from being freed later. */
+ } else if (device_change == DEVICE_CHANGED) {
+ /* should never happen... */
+ changes++;
+ dev_warn(&h->pdev->dev,
+ "device unexpectedly changed.\n");
+ /* but if it does happen, we just ignore that device */
+ }
+ }
+ spin_unlock_irqrestore(&h->devlock, flags);
+
+ /* Don't notify scsi mid layer of any changes the first time through
+ * (or if there are no changes) scsi_scan_host will do it later the
+ * first time through.
+ */
+ if (hostno == -1 || !changes)
+ goto free_and_out;
+
+ sh = h->scsi_host;
+ /* Notify scsi mid layer of any removed devices */
+ for (i = 0; i < nremoved; i++) {
+ struct scsi_device *sdev =
+ scsi_device_lookup(sh, removed[i]->bus,
+ removed[i]->target, removed[i]->lun);
+ if (sdev != NULL) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else {
+ /* We don't expect to get here.
+ * future cmds to this device will get selection
+ * timeout as if the device was gone.
+ */
+ dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
+ " for removal.", hostno, removed[i]->bus,
+ removed[i]->target, removed[i]->lun);
+ }
+ kfree(removed[i]);
+ removed[i] = NULL;
+ }
+
+ /* Notify scsi mid layer of any added devices */
+ for (i = 0; i < nadded; i++) {
+ if (scsi_add_device(sh, added[i]->bus,
+ added[i]->target, added[i]->lun) == 0)
+ continue;
+ dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
+ "device not added.\n", hostno, added[i]->bus,
+ added[i]->target, added[i]->lun);
+ /* now we have to remove it from h->dev,
+ * since it didn't get added to scsi mid layer
+ */
+ fixup_botched_add(h, added[i]);
+ }
+
+free_and_out:
+ kfree(added);
+ kfree(removed);
+ return 0;
+}
+
+/*
+ * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
+ * Assume's h->devlock is held.
+ */
+static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
+ int bus, int target, int lun)
+{
+ int i;
+ struct hpsa_scsi_dev_t *sd;
+
+ for (i = 0; i < h->ndevices; i++) {
+ sd = h->dev[i];
+ if (sd->bus == bus && sd->target == target && sd->lun == lun)
+ return sd;
+ }
+ return NULL;
+}
+
+/* link sdev->hostdata to our per-device structure. */
+static int hpsa_slave_alloc(struct scsi_device *sdev)
+{
+ struct hpsa_scsi_dev_t *sd;
+ unsigned long flags;
+ struct ctlr_info *h;
+
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->devlock, flags);
+ sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
+ sdev_id(sdev), sdev->lun);
+ if (sd != NULL)
+ sdev->hostdata = sd;
+ spin_unlock_irqrestore(&h->devlock, flags);
+ return 0;
+}
+
+static void hpsa_slave_destroy(struct scsi_device *sdev)
+{
+ return; /* nothing to do. */
+}
+
+static void hpsa_scsi_setup(struct ctlr_info *h)
+{
+ h->ndevices = 0;
+ h->scsi_host = NULL;
+ spin_lock_init(&h->devlock);
+ return;
+}
+
+static void complete_scsi_command(struct CommandList *cp,
+ int timeout, __u32 tag)
+{
+ struct scsi_cmnd *cmd;
+ struct ctlr_info *h;
+ struct ErrorInfo *ei;
+
+ unsigned char sense_key;
+ unsigned char asc; /* additional sense code */
+ unsigned char ascq; /* additional sense code qualifier */
+
+ ei = cp->err_info;
+ cmd = (struct scsi_cmnd *) cp->scsi_cmd;
+ h = cp->h;
+
+ scsi_dma_unmap(cmd); /* undo the DMA mappings */
+
+ cmd->result = (DID_OK << 16); /* host byte */
+ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ cmd->result |= (ei->ScsiStatus << 1);
+
+ /* copy the sense data whether we need to or not. */
+ memcpy(cmd->sense_buffer, ei->SenseInfo,
+ ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
+ SCSI_SENSE_BUFFERSIZE :
+ ei->SenseLen);
+ scsi_set_resid(cmd, ei->ResidualCnt);
+
+ if (ei->CommandStatus == 0) {
+ cmd->scsi_done(cmd);
+ cmd_free(h, cp);
+ return;
+ }
+
+ /* an error has occurred */
+ switch (ei->CommandStatus) {
+
+ case CMD_TARGET_STATUS:
+ if (ei->ScsiStatus) {
+ /* Get sense key */
+ sense_key = 0xf & ei->SenseInfo[2];
+ /* Get additional sense code */
+ asc = ei->SenseInfo[12];
+ /* Get addition sense code qualifier */
+ ascq = ei->SenseInfo[13];
+ }
+
+ if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
+ if (check_for_unit_attention(h, cp)) {
+ cmd->result = DID_SOFT_ERROR << 16;
+ break;
+ }
+ if (sense_key == ILLEGAL_REQUEST) {
+ /*
+ * SCSI REPORT_LUNS is commonly unsupported on
+ * Smart Array. Suppress noisy complaint.
+ */
+ if (cp->Request.CDB[0] == REPORT_LUNS)
+ break;
+
+ /* If ASC/ASCQ indicate Logical Unit
+ * Not Supported condition,
+ */
+ if ((asc == 0x25) && (ascq == 0x0)) {
+ dev_warn(&h->pdev->dev, "cp %p "
+ "has check condition\n", cp);
+ break;
+ }
+ }
+
+ if (sense_key == NOT_READY) {
+ /* If Sense is Not Ready, Logical Unit
+ * Not ready, Manual Intervention
+ * required
+ */
+ if ((asc == 0x04) && (ascq == 0x03)) {
+ cmd->result = DID_NO_CONNECT << 16;
+ dev_warn(&h->pdev->dev, "cp %p "
+ "has check condition: unit "
+ "not ready, manual "
+ "intervention required\n", cp);
+ break;
+ }
+ }
+
+
+ /* Must be some other type of check condition */
+ dev_warn(&h->pdev->dev, "cp %p has check condition: "
+ "unknown type: "
+ "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
+ "Returning result: 0x%x, "
+ "cmd=[%02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x]\n",
+ cp, sense_key, asc, ascq,
+ cmd->result,
+ cmd->cmnd[0], cmd->cmnd[1],
+ cmd->cmnd[2], cmd->cmnd[3],
+ cmd->cmnd[4], cmd->cmnd[5],
+ cmd->cmnd[6], cmd->cmnd[7],
+ cmd->cmnd[8], cmd->cmnd[9]);
+ break;
+ }
+
+
+ /* Problem was not a check condition
+ * Pass it up to the upper layers...
+ */
+ if (ei->ScsiStatus) {
+ dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
+ "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
+ "Returning result: 0x%x\n",
+ cp, ei->ScsiStatus,
+ sense_key, asc, ascq,
+ cmd->result);
+ } else { /* scsi status is zero??? How??? */
+ dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
+ "Returning no connection.\n", cp),
+
+ /* Ordinarily, this case should never happen,
+ * but there is a bug in some released firmware
+ * revisions that allows it to happen if, for
+ * example, a 4100 backplane loses power and
+ * the tape drive is in it. We assume that
+ * it's a fatal error of some kind because we
+ * can't show that it wasn't. We will make it
+ * look like selection timeout since that is
+ * the most common reason for this to occur,
+ * and it's severe enough.
+ */
+
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
+
+ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ break;
+ case CMD_DATA_OVERRUN:
+ dev_warn(&h->pdev->dev, "cp %p has"
+ " completed with data overrun "
+ "reported\n", cp);
+ break;
+ case CMD_INVALID: {
+ /* print_bytes(cp, sizeof(*cp), 1, 0);
+ print_cmd(cp); */
+ /* We get CMD_INVALID if you address a non-existent device
+ * instead of a selection timeout (no response). You will
+ * see this if you yank out a drive, then try to access it.
+ * This is kind of a shame because it means that any other
+ * CMD_INVALID (e.g. driver bug) will get interpreted as a
+ * missing target. */
+ cmd->result = DID_NO_CONNECT << 16;
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
+ dev_warn(&h->pdev->dev, "cp %p has "
+ "protocol error \n", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
+ break;
+ case CMD_CONNECTION_LOST:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
+ break;
+ case CMD_ABORTED:
+ cmd->result = DID_ABORT << 16;
+ dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
+ cp, ei->ScsiStatus);
+ break;
+ case CMD_ABORT_FAILED:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ cmd->result = DID_ABORT << 16;
+ dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
+ "abort\n", cp);
+ break;
+ case CMD_TIMEOUT:
+ cmd->result = DID_TIME_OUT << 16;
+ dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
+ break;
+ default:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
+ cp, ei->CommandStatus);
+ }
+ cmd->scsi_done(cmd);
+ cmd_free(h, cp);
+}
+
+static int hpsa_scsi_detect(struct ctlr_info *h)
+{
+ struct Scsi_Host *sh;
+ int error;
+
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+ if (sh == NULL)
+ goto fail;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->this_id = -1;
+ sh->max_channel = 3;
+ sh->max_cmd_len = MAX_COMMAND_SIZE;
+ sh->max_lun = HPSA_MAX_LUN;
+ sh->max_id = HPSA_MAX_LUN;
+ h->scsi_host = sh;
+ sh->hostdata[0] = (unsigned long) h;
+ sh->irq = h->intr[SIMPLE_MODE_INT];
+ sh->unique_id = sh->irq;
+ error = scsi_add_host(sh, &h->pdev->dev);
+ if (error)
+ goto fail_host_put;
+ scsi_scan_host(sh);
+ return 0;
+
+ fail_host_put:
+ dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
+ " failed for controller %d\n", h->ctlr);
+ scsi_host_put(sh);
+ return -1;
+ fail:
+ dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
+ " failed for controller %d\n", h->ctlr);
+ return -1;
+}
+
+static void hpsa_pci_unmap(struct pci_dev *pdev,
+ struct CommandList *c, int sg_used, int data_direction)
+{
+ int i;
+ union u64bit addr64;
+
+ for (i = 0; i < sg_used; i++) {
+ addr64.val32.lower = c->SG[i].Addr.lower;
+ addr64.val32.upper = c->SG[i].Addr.upper;
+ pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
+ data_direction);
+ }
+}
+
+static void hpsa_map_one(struct pci_dev *pdev,
+ struct CommandList *cp,
+ unsigned char *buf,
+ size_t buflen,
+ int data_direction)
+{
+ __u64 addr64;
+
+ if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = 0;
+ return;
+ }
+
+ addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+ cp->SG[0].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[0].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[0].Len = buflen;
+ cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+}
+
+static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ c->waiting = &wait;
+ enqueue_cmd_and_start_io(h, c);
+ wait_for_completion(&wait);
+}
+
+static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
+ struct CommandList *c, int data_direction)
+{
+ int retry_count = 0;
+
+ do {
+ memset(c->err_info, 0, sizeof(c->err_info));
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ retry_count++;
+ } while (check_for_unit_attention(h, c) && retry_count <= 3);
+ hpsa_pci_unmap(h->pdev, c, 1, data_direction);
+}
+
+static void hpsa_scsi_interpret_error(struct CommandList *cp)
+{
+ struct ErrorInfo *ei;
+ struct device *d = &cp->h->pdev->dev;
+
+ ei = cp->err_info;
+ switch (ei->CommandStatus) {
+ case CMD_TARGET_STATUS:
+ dev_warn(d, "cmd %p has completed with errors\n", cp);
+ dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
+ ei->ScsiStatus);
+ if (ei->ScsiStatus == 0)
+ dev_warn(d, "SCSI status is abnormally zero. "
+ "(probably indicates selection timeout "
+ "reported incorrectly due to a known "
+ "firmware bug, circa July, 2001.)\n");
+ break;
+ case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+ dev_info(d, "UNDERRUN\n");
+ break;
+ case CMD_DATA_OVERRUN:
+ dev_warn(d, "cp %p has completed with data overrun\n", cp);
+ break;
+ case CMD_INVALID: {
+ /* controller unfortunately reports SCSI passthru's
+ * to non-existent targets as invalid commands.
+ */
+ dev_warn(d, "cp %p is reported invalid (probably means "
+ "target device no longer present)\n", cp);
+ /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
+ print_cmd(cp); */
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
+ dev_warn(d, "cp %p has protocol error \n", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ /* cmd->result = DID_ERROR << 16; */
+ dev_warn(d, "cp %p had hardware error\n", cp);
+ break;
+ case CMD_CONNECTION_LOST:
+ dev_warn(d, "cp %p had connection lost\n", cp);
+ break;
+ case CMD_ABORTED:
+ dev_warn(d, "cp %p was aborted\n", cp);
+ break;
+ case CMD_ABORT_FAILED:
+ dev_warn(d, "cp %p reports abort failed\n", cp);
+ break;
+ case CMD_UNSOLICITED_ABORT:
+ dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
+ break;
+ case CMD_TIMEOUT:
+ dev_warn(d, "cp %p timed out\n", cp);
+ break;
+ default:
+ dev_warn(d, "cp %p returned unknown status %x\n", cp,
+ ei->CommandStatus);
+ }
+}
+
+static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
+ unsigned char page, unsigned char *buf,
+ unsigned char bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -1;
+ }
+
+ fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(c);
+ rc = -1;
+ }
+ cmd_special_free(h, c);
+ return rc;
+}
+
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+
+ if (c == NULL) { /* trouble... */
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -1;
+ }
+
+ fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ /* no unmap needed here because no data xfer. */
+
+ ei = c->err_info;
+ if (ei->CommandStatus != 0) {
+ hpsa_scsi_interpret_error(c);
+ rc = -1;
+ }
+ cmd_special_free(h, c);
+ return rc;
+}
+
+static void hpsa_get_raid_level(struct ctlr_info *h,
+ unsigned char *scsi3addr, unsigned char *raid_level)
+{
+ int rc;
+ unsigned char *buf;
+
+ *raid_level = RAID_UNKNOWN;
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
+ if (rc == 0)
+ *raid_level = buf[8];
+ if (*raid_level > RAID_UNKNOWN)
+ *raid_level = RAID_UNKNOWN;
+ kfree(buf);
+ return;
+}
+
+/* Get the device id from inquiry page 0x83 */
+static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
+ unsigned char *device_id, int buflen)
+{
+ int rc;
+ unsigned char *buf;
+
+ if (buflen > 16)
+ buflen = 16;
+ buf = kzalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -1;
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+ if (rc == 0)
+ memcpy(device_id, &buf[8], buflen);
+ kfree(buf);
+ return rc != 0;
+}
+
+static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
+ struct ReportLUNdata *buf, int bufsize,
+ int extended_response)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ unsigned char scsi3addr[8];
+ struct ErrorInfo *ei;
+
+ c = cmd_special_alloc(h);
+ if (c == NULL) { /* trouble... */
+ dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ return -1;
+ }
+
+ memset(&scsi3addr[0], 0, 8); /* address the controller */
+
+ fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ buf, bufsize, 0, scsi3addr, TYPE_CMD);
+ if (extended_response)
+ c->Request.CDB[1] = extended_response;
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 &&
+ ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(c);
+ rc = -1;
+ }
+ cmd_special_free(h, c);
+ return rc;
+}
+
+static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
+ struct ReportLUNdata *buf,
+ int bufsize, int extended_response)
+{
+ return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
+}
+
+static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
+ struct ReportLUNdata *buf, int bufsize)
+{
+ return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
+}
+
+static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
+ int bus, int target, int lun)
+{
+ device->bus = bus;
+ device->target = target;
+ device->lun = lun;
+}
+
+static int hpsa_update_device_info(struct ctlr_info *h,
+ unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
+{
+#define OBDR_TAPE_INQ_SIZE 49
+ unsigned char *inq_buff = NULL;
+
+ inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ if (!inq_buff)
+ goto bail_out;
+
+ memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
+ /* Do an inquiry to the device to see what it is. */
+ if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
+ (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
+ /* Inquiry failed (msg printed already) */
+ dev_err(&h->pdev->dev,
+ "hpsa_update_device_info: inquiry failed\n");
+ goto bail_out;
+ }
+
+ /* As a side effect, record the firmware version number
+ * if we happen to be talking to the RAID controller.
+ */
+ if (is_hba_lunid(scsi3addr))
+ memcpy(h->firm_ver, &inq_buff[32], 4);
+
+ this_device->devtype = (inq_buff[0] & 0x1f);
+ memcpy(this_device->scsi3addr, scsi3addr, 8);
+ memcpy(this_device->vendor, &inq_buff[8],
+ sizeof(this_device->vendor));
+ memcpy(this_device->model, &inq_buff[16],
+ sizeof(this_device->model));
+ memcpy(this_device->revision, &inq_buff[32],
+ sizeof(this_device->revision));
+ memset(this_device->device_id, 0,
+ sizeof(this_device->device_id));
+ hpsa_get_device_id(h, scsi3addr, this_device->device_id,
+ sizeof(this_device->device_id));
+
+ if (this_device->devtype == TYPE_DISK &&
+ is_logical_dev_addr_mode(scsi3addr))
+ hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
+ else
+ this_device->raid_level = RAID_UNKNOWN;
+
+ kfree(inq_buff);
+ return 0;
+
+bail_out:
+ kfree(inq_buff);
+ return 1;
+}
+
+static unsigned char *msa2xxx_model[] = {
+ "MSA2012",
+ "MSA2024",
+ "MSA2312",
+ "MSA2324",
+ NULL,
+};
+
+static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
+{
+ int i;
+
+ for (i = 0; msa2xxx_model[i]; i++)
+ if (strncmp(device->model, msa2xxx_model[i],
+ strlen(msa2xxx_model[i])) == 0)
+ return 1;
+ return 0;
+}
+
+/* Helper function to assign bus, target, lun mapping of devices.
+ * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
+ * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
+ * Logical drive target and lun are assigned at this time, but
+ * physical device lun and target assignment are deferred (assigned
+ * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
+ */
+static void figure_bus_target_lun(struct ctlr_info *h,
+ __u8 *lunaddrbytes, int *bus, int *target, int *lun,
+ struct hpsa_scsi_dev_t *device)
+{
+
+ __u32 lunid;
+
+ if (is_logical_dev_addr_mode(lunaddrbytes)) {
+ /* logical device */
+ memcpy(&lunid, lunaddrbytes, sizeof(lunid));
+ lunid = le32_to_cpu(lunid);
+
+ if (is_msa2xxx(h, device)) {
+ *bus = 1;
+ *target = (lunid >> 16) & 0x3fff;
+ *lun = lunid & 0x00ff;
+ } else {
+ *bus = 0;
+ *lun = 0;
+ *target = lunid & 0x3fff;
+ }
+ } else {
+ /* physical device */
+ if (is_hba_lunid(lunaddrbytes))
+ *bus = 3;
+ else
+ *bus = 2;
+ *target = -1;
+ *lun = -1; /* we will fill these in later. */
+ }
+}
+
+/*
+ * If there is no lun 0 on a target, linux won't find any devices.
+ * For the MSA2xxx boxes, we have to manually detect the enclosure
+ * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
+ * it for some reason. *tmpdevice is the target we're adding,
+ * this_device is a pointer into the current element of currentsd[]
+ * that we're building up in update_scsi_devices(), below.
+ * lunzerobits is a bitmap that tracks which targets already have a
+ * lun 0 assigned.
+ * Returns 1 if an enclosure was added, 0 if not.
+ */
+static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *tmpdevice,
+ struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
+ int bus, int target, int lun, unsigned long lunzerobits[],
+ int *nmsa2xxx_enclosures)
+{
+ unsigned char scsi3addr[8];
+
+ if (test_bit(target, lunzerobits))
+ return 0; /* There is already a lun 0 on this target. */
+
+ if (!is_logical_dev_addr_mode(lunaddrbytes))
+ return 0; /* It's the logical targets that may lack lun 0. */
+
+ if (!is_msa2xxx(h, tmpdevice))
+ return 0; /* It's only the MSA2xxx that have this problem. */
+
+ if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
+ return 0;
+
+ if (is_hba_lunid(scsi3addr))
+ return 0; /* Don't add the RAID controller here. */
+
+#define MAX_MSA2XXX_ENCLOSURES 32
+ if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
+ dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
+ "enclosures exceeded. Check your hardware "
+ "configuration.");
+ return 0;
+ }
+
+ memset(scsi3addr, 0, 8);
+ scsi3addr[3] = target;
+ if (hpsa_update_device_info(h, scsi3addr, this_device))
+ return 0;
+ (*nmsa2xxx_enclosures)++;
+ hpsa_set_bus_target_lun(this_device, bus, target, 0);
+ set_bit(target, lunzerobits);
+ return 1;
+}
+
+/*
+ * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
+ * logdev. The number of luns in physdev and logdev are returned in
+ * *nphysicals and *nlogicals, respectively.
+ * Returns 0 on success, -1 otherwise.
+ */
+static int hpsa_gather_lun_info(struct ctlr_info *h,
+ int reportlunsize,
+ struct ReportLUNdata *physdev, __u32 *nphysicals,
+ struct ReportLUNdata *logdev, __u32 *nlogicals)
+{
+ if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
+ dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
+ return -1;
+ }
+ memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
+ *nphysicals = be32_to_cpu(*nphysicals) / 8;
+#ifdef DEBUG
+ dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
+#endif
+ if (*nphysicals > HPSA_MAX_PHYS_LUN) {
+ dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
+ " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+ *nphysicals - HPSA_MAX_PHYS_LUN);
+ *nphysicals = HPSA_MAX_PHYS_LUN;
+ }
+ if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
+ dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
+ return -1;
+ }
+ memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
+ *nlogicals = be32_to_cpu(*nlogicals) / 8;
+#ifdef DEBUG
+ dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
+#endif
+ /* Reject Logicals in excess of our max capability. */
+ if (*nlogicals > HPSA_MAX_LUN) {
+ dev_warn(&h->pdev->dev,
+ "maximum logical LUNs (%d) exceeded. "
+ "%d LUNs ignored.\n", HPSA_MAX_LUN,
+ *nlogicals - HPSA_MAX_LUN);
+ *nlogicals = HPSA_MAX_LUN;
+ }
+ if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
+ dev_warn(&h->pdev->dev,
+ "maximum logical + physical LUNs (%d) exceeded. "
+ "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+ *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
+ *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
+ }
+ return 0;
+}
+
+static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+{
+ /* the idea here is we could get notified
+ * that some devices have changed, so we do a report
+ * physical luns and report logical luns cmd, and adjust
+ * our list of devices accordingly.
+ *
+ * The scsi3addr's of devices won't change so long as the
+ * adapter is not reset. That means we can rescan and
+ * tell which devices we already know about, vs. new
+ * devices, vs. disappearing devices.
+ */
+ struct ReportLUNdata *physdev_list = NULL;
+ struct ReportLUNdata *logdev_list = NULL;
+ unsigned char *inq_buff = NULL;
+ __u32 nphysicals = 0;
+ __u32 nlogicals = 0;
+ __u32 ndev_allocated = 0;
+ struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
+ int ncurrent = 0;
+ int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+ int i, nmsa2xxx_enclosures, ndevs_to_allocate;
+ int bus, target, lun;
+ DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
+
+ currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+ GFP_KERNEL);
+ physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
+ logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
+ inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+ tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
+
+ if (!currentsd || !physdev_list || !logdev_list ||
+ !inq_buff || !tmpdevice) {
+ dev_err(&h->pdev->dev, "out of memory\n");
+ goto out;
+ }
+ memset(lunzerobits, 0, sizeof(lunzerobits));
+
+ if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
+ logdev_list, &nlogicals))
+ goto out;
+
+ /* We might see up to 32 MSA2xxx enclosures, actually 8 of them
+ * but each of them 4 times through different paths. The plus 1
+ * is for the RAID controller.
+ */
+ ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
+
+ /* Allocate the per device structures */
+ for (i = 0; i < ndevs_to_allocate; i++) {
+ currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
+ if (!currentsd[i]) {
+ dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
+ __FILE__, __LINE__);
+ goto out;
+ }
+ ndev_allocated++;
+ }
+
+ /* adjust our table of devices */
+ nmsa2xxx_enclosures = 0;
+ for (i = 0; i < nphysicals + nlogicals + 1; i++) {
+ __u8 *lunaddrbytes;
+
+ /* Figure out where the LUN ID info is coming from */
+ if (i < nphysicals)
+ lunaddrbytes = &physdev_list->LUN[i][0];
+ else
+ if (i < nphysicals + nlogicals)
+ lunaddrbytes =
+ &logdev_list->LUN[i-nphysicals][0];
+ else /* jam in the RAID controller at the end */
+ lunaddrbytes = RAID_CTLR_LUNID;
+
+ /* skip masked physical devices. */
+ if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
+ continue;
+
+ /* Get device type, vendor, model, device id */
+ if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
+ continue; /* skip it if we can't talk to it. */
+ figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
+ tmpdevice);
+ this_device = currentsd[ncurrent];
+
+ /*
+ * For the msa2xxx boxes, we have to insert a LUN 0 which
+ * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
+ * is nonetheless an enclosure device there. We have to
+ * present that otherwise linux won't find anything if
+ * there is no lun 0.
+ */
+ if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
+ lunaddrbytes, bus, target, lun, lunzerobits,
+ &nmsa2xxx_enclosures)) {
+ ncurrent++;
+ this_device = currentsd[ncurrent];
+ }
+
+ *this_device = *tmpdevice;
+ hpsa_set_bus_target_lun(this_device, bus, target, lun);
+
+ switch (this_device->devtype) {
+ case TYPE_ROM: {
+ /* We don't *really* support actual CD-ROM devices,
+ * just "One Button Disaster Recovery" tape drive
+ * which temporarily pretends to be a CD-ROM drive.
+ * So we check that the device is really an OBDR tape
+ * device by checking for "$DR-10" in bytes 43-48 of
+ * the inquiry data.
+ */
+ char obdr_sig[7];
+#define OBDR_TAPE_SIG "$DR-10"
+ strncpy(obdr_sig, &inq_buff[43], 6);
+ obdr_sig[6] = '\0';
+ if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
+ /* Not OBDR device, ignore it. */
+ break;
+ }
+ ncurrent++;
+ break;
+ case TYPE_DISK:
+ if (i < nphysicals)
+ break;
+ ncurrent++;
+ break;
+ case TYPE_TAPE:
+ case TYPE_MEDIUM_CHANGER:
+ ncurrent++;
+ break;
+ case TYPE_RAID:
+ /* Only present the Smartarray HBA as a RAID controller.
+ * If it's a RAID controller other than the HBA itself
+ * (an external RAID controller, MSA500 or similar)
+ * don't present it.
+ */
+ if (!is_hba_lunid(lunaddrbytes))
+ break;
+ ncurrent++;
+ break;
+ default:
+ break;
+ }
+ if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+ break;
+ }
+ adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
+out:
+ kfree(tmpdevice);
+ for (i = 0; i < ndev_allocated; i++)
+ kfree(currentsd[i]);
+ kfree(currentsd);
+ kfree(inq_buff);
+ kfree(physdev_list);
+ kfree(logdev_list);
+ return;
+}
+
+/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
+ * dma mapping and fills in the scatter gather entries of the
+ * hpsa command, cp.
+ */
+static int hpsa_scatter_gather(struct pci_dev *pdev,
+ struct CommandList *cp,
+ struct scsi_cmnd *cmd)
+{
+ unsigned int len;
+ struct scatterlist *sg;
+ __u64 addr64;
+ int use_sg, i;
+
+ BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+
+ use_sg = scsi_dma_map(cmd);
+ if (use_sg < 0)
+ return use_sg;
+
+ if (!use_sg)
+ goto sglist_finished;
+
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
+ addr64 = (__u64) sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ cp->SG[i].Addr.lower =
+ (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Addr.upper =
+ (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+ cp->SG[i].Len = len;
+ cp->SG[i].Ext = 0; /* we are not chaining */
+ }
+
+sglist_finished:
+
+ cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
+ cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
+ return 0;
+}
+
+
+static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+ void (*done)(struct scsi_cmnd *))
+{
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+ unsigned char scsi3addr[8];
+ struct CommandList *c;
+ unsigned long flags;
+
+ /* Get the ptr to our adapter structure out of cmd->host. */
+ h = sdev_to_hba(cmd->device);
+ dev = cmd->device->hostdata;
+ if (!dev) {
+ cmd->result = DID_NO_CONNECT << 16;
+ done(cmd);
+ return 0;
+ }
+ memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
+
+ /* Need a lock as this is being allocated from the pool */
+ spin_lock_irqsave(&h->lock, flags);
+ c = cmd_alloc(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (c == NULL) { /* trouble... */
+ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* Fill in the command list header */
+
+ cmd->scsi_done = done; /* save this for use by completion code */
+
+ /* save c in case we have to abort it */
+ cmd->host_scribble = (unsigned char *) c;
+
+ c->cmd_type = CMD_SCSI;
+ c->scsi_cmd = cmd;
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+ c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
+
+ /* Fill in the request block... */
+
+ c->Request.Timeout = 0;
+ memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+ BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
+ c->Request.CDBLen = cmd->cmd_len;
+ memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
+ c->Request.Type.Type = TYPE_CMD;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ switch (cmd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ c->Request.Type.Direction = XFER_WRITE;
+ break;
+ case DMA_FROM_DEVICE:
+ c->Request.Type.Direction = XFER_READ;
+ break;
+ case DMA_NONE:
+ c->Request.Type.Direction = XFER_NONE;
+ break;
+ case DMA_BIDIRECTIONAL:
+ /* This can happen if a buggy application does a scsi passthru
+ * and sets both inlen and outlen to non-zero. ( see
+ * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
+ */
+
+ c->Request.Type.Direction = XFER_RSVD;
+ /* This is technically wrong, and hpsa controllers should
+ * reject it with CMD_INVALID, which is the most correct
+ * response, but non-fibre backends appear to let it
+ * slide by, and give the same results as if this field
+ * were set correctly. Either way is acceptable for
+ * our purposes here.
+ */
+
+ break;
+
+ default:
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+ cmd->sc_data_direction);
+ BUG();
+ break;
+ }
+
+ if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
+ cmd_free(h, c);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ enqueue_cmd_and_start_io(h, c);
+ /* the cmd'll come back via intr handler in complete_scsi_command() */
+ return 0;
+}
+
+static void hpsa_unregister_scsi(struct ctlr_info *h)
+{
+ /* we are being forcibly unloaded, and may not refuse. */
+ scsi_remove_host(h->scsi_host);
+ scsi_host_put(h->scsi_host);
+ h->scsi_host = NULL;
+}
+
+static int hpsa_register_scsi(struct ctlr_info *h)
+{
+ int rc;
+
+ hpsa_update_scsi_devices(h, -1);
+ rc = hpsa_scsi_detect(h);
+ if (rc != 0)
+ dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
+ " hpsa_scsi_detect(), rc is %d\n", rc);
+ return rc;
+}
+
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+ unsigned char lunaddr[])
+{
+ int rc = 0;
+ int count = 0;
+ int waittime = 1; /* seconds */
+ struct CommandList *c;
+
+ c = cmd_special_alloc(h);
+ if (!c) {
+ dev_warn(&h->pdev->dev, "out of memory in "
+ "wait_for_device_to_become_ready.\n");
+ return IO_ERROR;
+ }
+
+ /* Send test unit ready until device ready, or give up. */
+ while (count < HPSA_TUR_RETRY_LIMIT) {
+
+ /* Wait for a bit. do this first, because if we send
+ * the TUR right away, the reset will just abort it.
+ */
+ msleep(1000 * waittime);
+ count++;
+
+ /* Increase wait time with each try, up to a point. */
+ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
+ waittime = waittime * 2;
+
+ /* Send the Test Unit Ready */
+ fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ /* no unmap needed here because no data xfer. */
+
+ if (c->err_info->CommandStatus == CMD_SUCCESS)
+ break;
+
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
+ (c->err_info->SenseInfo[2] == NO_SENSE ||
+ c->err_info->SenseInfo[2] == UNIT_ATTENTION))
+ break;
+
+ dev_warn(&h->pdev->dev, "waiting %d secs "
+ "for device to become ready.\n", waittime);
+ rc = 1; /* device not ready. */
+ }
+
+ if (rc)
+ dev_warn(&h->pdev->dev, "giving up on device.\n");
+ else
+ dev_warn(&h->pdev->dev, "device is ready.\n");
+
+ cmd_special_free(h, c);
+ return rc;
+}
+
+/* Need at least one of these error handlers to keep ../scsi/hosts.c from
+ * complaining. Doing a host- or bus-reset can't do anything good here.
+ */
+static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
+{
+ int rc;
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *dev;
+
+ /* find the controller to which the command to be aborted was sent */
+ h = sdev_to_hba(scsicmd->device);
+ if (h == NULL) /* paranoia */
+ return FAILED;
+ dev_warn(&h->pdev->dev, "resetting drive\n");
+
+ dev = scsicmd->device->hostdata;
+ if (!dev) {
+ dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
+ "device lookup failed.\n");
+ return FAILED;
+ }
+ /* send a reset to the SCSI LUN which the command was sent to */
+ rc = hpsa_send_reset(h, dev->scsi3addr);
+ if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
+ return SUCCESS;
+
+ dev_warn(&h->pdev->dev, "resetting device failed.\n");
+ return FAILED;
+}
+
+/*
+ * For operations that cannot sleep, a command block is allocated at init,
+ * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
+ * which ones are free or in use. Lock must be held when calling this.
+ * cmd_free() is the complement.
+ */
+static struct CommandList *cmd_alloc(struct ctlr_info *h)
+{
+ struct CommandList *c;
+ int i;
+ union u64bit temp64;
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+
+ do {
+ i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+ if (i == h->nr_cmds)
+ return NULL;
+ } while (test_and_set_bit
+ (i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+ c = h->cmd_pool + i;
+ memset(c, 0, sizeof(*c));
+ cmd_dma_handle = h->cmd_pool_dhandle
+ + i * sizeof(*c);
+ c->err_info = h->errinfo_pool + i;
+ memset(c->err_info, 0, sizeof(*c->err_info));
+ err_dma_handle = h->errinfo_pool_dhandle
+ + i * sizeof(*c->err_info);
+ h->nr_allocs++;
+
+ c->cmdindex = i;
+
+ INIT_HLIST_NODE(&c->list);
+ c->busaddr = (__u32) cmd_dma_handle;
+ temp64.val = (__u64) err_dma_handle;
+ c->ErrDesc.Addr.lower = temp64.val32.lower;
+ c->ErrDesc.Addr.upper = temp64.val32.upper;
+ c->ErrDesc.Len = sizeof(*c->err_info);
+
+ c->h = h;
+ return c;
+}
+
+/* For operations that can wait for kmalloc to possibly sleep,
+ * this routine can be called. Lock need not be held to call
+ * cmd_special_alloc. cmd_special_free() is the complement.
+ */
+static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
+{
+ struct CommandList *c;
+ union u64bit temp64;
+ dma_addr_t cmd_dma_handle, err_dma_handle;
+
+ c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
+ if (c == NULL)
+ return NULL;
+ memset(c, 0, sizeof(*c));
+
+ c->cmdindex = -1;
+
+ c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
+ &err_dma_handle);
+
+ if (c->err_info == NULL) {
+ pci_free_consistent(h->pdev,
+ sizeof(*c), c, cmd_dma_handle);
+ return NULL;
+ }
+ memset(c->err_info, 0, sizeof(*c->err_info));
+
+ INIT_HLIST_NODE(&c->list);
+ c->busaddr = (__u32) cmd_dma_handle;
+ temp64.val = (__u64) err_dma_handle;
+ c->ErrDesc.Addr.lower = temp64.val32.lower;
+ c->ErrDesc.Addr.upper = temp64.val32.upper;
+ c->ErrDesc.Len = sizeof(*c->err_info);
+
+ c->h = h;
+ return c;
+}
+
+static void cmd_free(struct ctlr_info *h, struct CommandList *c)
+{
+ int i;
+
+ i = c - h->cmd_pool;
+ clear_bit(i & (BITS_PER_LONG - 1),
+ h->cmd_pool_bits + (i / BITS_PER_LONG));
+ h->nr_frees++;
+}
+
+static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
+{
+ union u64bit temp64;
+
+ temp64.val32.lower = c->ErrDesc.Addr.lower;
+ temp64.val32.upper = c->ErrDesc.Addr.upper;
+ pci_free_consistent(h->pdev, sizeof(*c->err_info),
+ c->err_info, (dma_addr_t) temp64.val);
+ pci_free_consistent(h->pdev, sizeof(*c),
+ c, (dma_addr_t) c->busaddr);
+}
+
+#ifdef CONFIG_COMPAT
+
+static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = hpsa_ioctl(dev, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
+static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+ int cmd, void *arg);
+
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+ switch (cmd) {
+ case CCISS_GETPCIINFO:
+ case CCISS_GETINTINFO:
+ case CCISS_SETINTINFO:
+ case CCISS_GETNODENAME:
+ case CCISS_SETNODENAME:
+ case CCISS_GETHEARTBEAT:
+ case CCISS_GETBUSTYPES:
+ case CCISS_GETFIRMVER:
+ case CCISS_GETDRIVVER:
+ case CCISS_REVALIDVOLS:
+ case CCISS_DEREGDISK:
+ case CCISS_REGNEWDISK:
+ case CCISS_REGNEWD:
+ case CCISS_RESCANDISK:
+ case CCISS_GETLUNINFO:
+ return do_ioctl(dev, cmd, arg);
+
+ case CCISS_PASSTHRU32:
+ return hpsa_ioctl32_passthru(dev, cmd, arg);
+ case CCISS_BIG_PASSTHRU32:
+ return hpsa_ioctl32_big_passthru(dev, cmd, arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
+{
+ IOCTL32_Command_struct __user *arg32 =
+ (IOCTL32_Command_struct __user *) arg;
+ IOCTL_Command_struct arg64;
+ IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+ int err;
+ u32 cp;
+
+ err = 0;
+ err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+ sizeof(arg64.LUN_info));
+ err |= copy_from_user(&arg64.Request, &arg32->Request,
+ sizeof(arg64.Request));
+ err |= copy_from_user(&arg64.error_info, &arg32->error_info,
+ sizeof(arg64.error_info));
+ err |= get_user(arg64.buf_size, &arg32->buf_size);
+ err |= get_user(cp, &arg32->buf);
+ arg64.buf = compat_ptr(cp);
+ err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+ if (err)
+ return -EFAULT;
+
+ err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
+ if (err)
+ return err;
+ err |= copy_in_user(&arg32->error_info, &p->error_info,
+ sizeof(arg32->error_info));
+ if (err)
+ return -EFAULT;
+ return err;
+}
+
+static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+ int cmd, void *arg)
+{
+ BIG_IOCTL32_Command_struct __user *arg32 =
+ (BIG_IOCTL32_Command_struct __user *) arg;
+ BIG_IOCTL_Command_struct arg64;
+ BIG_IOCTL_Command_struct __user *p =
+ compat_alloc_user_space(sizeof(arg64));
+ int err;
+ u32 cp;
+
+ err = 0;
+ err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+ sizeof(arg64.LUN_info));
+ err |= copy_from_user(&arg64.Request, &arg32->Request,
+ sizeof(arg64.Request));
+ err |= copy_from_user(&arg64.error_info, &arg32->error_info,
+ sizeof(arg64.error_info));
+ err |= get_user(arg64.buf_size, &arg32->buf_size);
+ err |= get_user(arg64.malloc_size, &arg32->malloc_size);
+ err |= get_user(cp, &arg32->buf);
+ arg64.buf = compat_ptr(cp);
+ err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+ if (err)
+ return -EFAULT;
+
+ err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
+ if (err)
+ return err;
+ err |= copy_in_user(&arg32->error_info, &p->error_info,
+ sizeof(arg32->error_info));
+ if (err)
+ return -EFAULT;
+ return err;
+}
+#endif
+
+static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ struct hpsa_pci_info pciinfo;
+
+ if (!argp)
+ return -EINVAL;
+ pciinfo.domain = pci_domain_nr(h->pdev->bus);
+ pciinfo.bus = h->pdev->bus->number;
+ pciinfo.dev_fn = h->pdev->devfn;
+ pciinfo.board_id = h->board_id;
+ if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ DriverVer_type DriverVer;
+ unsigned char vmaj, vmin, vsubmin;
+ int rc;
+
+ rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
+ &vmaj, &vmin, &vsubmin);
+ if (rc != 3) {
+ dev_info(&h->pdev->dev, "driver version string '%s' "
+ "unrecognized.", HPSA_DRIVER_VERSION);
+ vmaj = 0;
+ vmin = 0;
+ vsubmin = 0;
+ }
+ DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
+ if (!argp)
+ return -EINVAL;
+ if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
+ return -EFAULT;
+ return 0;
+}
+
+static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ IOCTL_Command_struct iocommand;
+ struct CommandList *c;
+ char *buff = NULL;
+ union u64bit temp64;
+
+ if (!argp)
+ return -EINVAL;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
+ return -EFAULT;
+ if ((iocommand.buf_size < 1) &&
+ (iocommand.Request.Type.Direction != XFER_NONE)) {
+ return -EINVAL;
+ }
+ if (iocommand.buf_size > 0) {
+ buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
+ if (buff == NULL)
+ return -EFAULT;
+ }
+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
+ /* Copy the data into the buffer we created */
+ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
+ kfree(buff);
+ return -EFAULT;
+ }
+ } else
+ memset(buff, 0, iocommand.buf_size);
+ c = cmd_special_alloc(h);
+ if (c == NULL) {
+ kfree(buff);
+ return -ENOMEM;
+ }
+ /* Fill in the command type */
+ c->cmd_type = CMD_IOCTL_PEND;
+ /* Fill in Command Header */
+ c->Header.ReplyQueue = 0; /* unused in simple mode */
+ if (iocommand.buf_size > 0) { /* buffer to fill */
+ c->Header.SGList = 1;
+ c->Header.SGTotal = 1;
+ } else { /* no buffers to fill */
+ c->Header.SGList = 0;
+ c->Header.SGTotal = 0;
+ }
+ memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
+ /* use the kernel address the cmd block for tag */
+ c->Header.Tag.lower = c->busaddr;
+
+ /* Fill in Request block */
+ memcpy(&c->Request, &iocommand.Request,
+ sizeof(c->Request));
+
+ /* Fill in the scatter gather information */
+ if (iocommand.buf_size > 0) {
+ temp64.val = pci_map_single(h->pdev, buff,
+ iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ c->SG[0].Addr.lower = temp64.val32.lower;
+ c->SG[0].Addr.upper = temp64.val32.upper;
+ c->SG[0].Len = iocommand.buf_size;
+ c->SG[0].Ext = 0; /* we are not chaining*/
+ }
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ check_ioctl_unit_attention(h, c);
+
+ /* Copy the error information out */
+ memcpy(&iocommand.error_info, c->err_info,
+ sizeof(iocommand.error_info));
+ if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
+ kfree(buff);
+ cmd_special_free(h, c);
+ return -EFAULT;
+ }
+
+ if (iocommand.Request.Type.Direction == XFER_READ) {
+ /* Copy the data out of the buffer we created */
+ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
+ kfree(buff);
+ cmd_special_free(h, c);
+ return -EFAULT;
+ }
+ }
+ kfree(buff);
+ cmd_special_free(h, c);
+ return 0;
+}
+
+static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+{
+ BIG_IOCTL_Command_struct *ioc;
+ struct CommandList *c;
+ unsigned char **buff = NULL;
+ int *buff_size = NULL;
+ union u64bit temp64;
+ BYTE sg_used = 0;
+ int status = 0;
+ int i;
+ __u32 left;
+ __u32 sz;
+ BYTE __user *data_ptr;
+
+ if (!argp)
+ return -EINVAL;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ ioc = (BIG_IOCTL_Command_struct *)
+ kmalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ if (copy_from_user(ioc, argp, sizeof(*ioc))) {
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ if ((ioc->buf_size < 1) &&
+ (ioc->Request.Type.Direction != XFER_NONE)) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ /* Check kmalloc limits using all SGs */
+ if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
+ status = -EINVAL;
+ goto cleanup1;
+ }
+ buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
+ if (!buff) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
+ if (!buff_size) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ left = ioc->buf_size;
+ data_ptr = ioc->buf;
+ while (left) {
+ sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
+ buff_size[sg_used] = sz;
+ buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+ if (buff[sg_used] == NULL) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ if (ioc->Request.Type.Direction == XFER_WRITE) {
+ if (copy_from_user(buff[sg_used], data_ptr, sz)) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ } else
+ memset(buff[sg_used], 0, sz);
+ left -= sz;
+ data_ptr += sz;
+ sg_used++;
+ }
+ c = cmd_special_alloc(h);
+ if (c == NULL) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ c->cmd_type = CMD_IOCTL_PEND;
+ c->Header.ReplyQueue = 0;
+
+ if (ioc->buf_size > 0) {
+ c->Header.SGList = sg_used;
+ c->Header.SGTotal = sg_used;
+ } else {
+ c->Header.SGList = 0;
+ c->Header.SGTotal = 0;
+ }
+ memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
+ c->Header.Tag.lower = c->busaddr;
+ memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
+ if (ioc->buf_size > 0) {
+ int i;
+ for (i = 0; i < sg_used; i++) {
+ temp64.val = pci_map_single(h->pdev, buff[i],
+ buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ c->SG[i].Addr.lower = temp64.val32.lower;
+ c->SG[i].Addr.upper = temp64.val32.upper;
+ c->SG[i].Len = buff_size[i];
+ /* we are not chaining */
+ c->SG[i].Ext = 0;
+ }
+ }
+ hpsa_scsi_do_simple_cmd_core(h, c);
+ hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ check_ioctl_unit_attention(h, c);
+ /* Copy the error information out */
+ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
+ if (copy_to_user(argp, ioc, sizeof(*ioc))) {
+ cmd_special_free(h, c);
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ if (ioc->Request.Type.Direction == XFER_READ) {
+ /* Copy the data out of the buffer we created */
+ BYTE __user *ptr = ioc->buf;
+ for (i = 0; i < sg_used; i++) {
+ if (copy_to_user(ptr, buff[i], buff_size[i])) {
+ cmd_special_free(h, c);
+ status = -EFAULT;
+ goto cleanup1;
+ }
+ ptr += buff_size[i];
+ }
+ }
+ cmd_special_free(h, c);
+ status = 0;
+cleanup1:
+ if (buff) {
+ for (i = 0; i < sg_used; i++)
+ kfree(buff[i]);
+ kfree(buff);
+ }
+ kfree(buff_size);
+ kfree(ioc);
+ return status;
+}
+
+static void check_ioctl_unit_attention(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+ c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
+ (void) check_for_unit_attention(h, c);
+}
+/*
+ * ioctl
+ */
+static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+ struct ctlr_info *h;
+ void __user *argp = (void __user *)arg;
+
+ h = sdev_to_hba(dev);
+
+ switch (cmd) {
+ case CCISS_DEREGDISK:
+ case CCISS_REGNEWDISK:
+ case CCISS_REGNEWD:
+ hpsa_update_scsi_devices(h, dev->host->host_no);
+ return 0;
+ case CCISS_GETPCIINFO:
+ return hpsa_getpciinfo_ioctl(h, argp);
+ case CCISS_GETDRIVVER:
+ return hpsa_getdrivver_ioctl(h, argp);
+ case CCISS_PASSTHRU:
+ return hpsa_passthru_ioctl(h, argp);
+ case CCISS_BIG_PASSTHRU:
+ return hpsa_big_passthru_ioctl(h, argp);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
+ void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+ int cmd_type)
+{
+ int pci_dir = XFER_NONE;
+
+ c->cmd_type = CMD_IOCTL_PEND;
+ c->Header.ReplyQueue = 0;
+ if (buff != NULL && size > 0) {
+ c->Header.SGList = 1;
+ c->Header.SGTotal = 1;
+ } else {
+ c->Header.SGList = 0;
+ c->Header.SGTotal = 0;
+ }
+ c->Header.Tag.lower = c->busaddr;
+ memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
+
+ c->Request.Type.Type = cmd_type;
+ if (cmd_type == TYPE_CMD) {
+ switch (cmd) {
+ case HPSA_INQUIRY:
+ /* are we trying to read a vital product page */
+ if (page_code != 0) {
+ c->Request.CDB[1] = 0x01;
+ c->Request.CDB[2] = page_code;
+ }
+ c->Request.CDBLen = 6;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = HPSA_INQUIRY;
+ c->Request.CDB[4] = size & 0xFF;
+ break;
+ case HPSA_REPORT_LOG:
+ case HPSA_REPORT_PHYS:
+ /* Talking to controller so It's a physical command
+ mode = 00 target = 0. Nothing to write.
+ */
+ c->Request.CDBLen = 12;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
+ c->Request.CDB[9] = size & 0xFF;
+ break;
+
+ case HPSA_READ_CAPACITY:
+ c->Request.CDBLen = 10;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_READ;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ break;
+ case HPSA_CACHE_FLUSH:
+ c->Request.CDBLen = 12;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_WRITE;
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = BMIC_WRITE;
+ c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ break;
+ case TEST_UNIT_READY:
+ c->Request.CDBLen = 6;
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_NONE;
+ c->Request.Timeout = 0;
+ break;
+ default:
+ dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
+ BUG();
+ return;
+ }
+ } else if (cmd_type == TYPE_MSG) {
+ switch (cmd) {
+
+ case HPSA_DEVICE_RESET_MSG:
+ c->Request.CDBLen = 16;
+ c->Request.Type.Type = 1; /* It is a MSG not a CMD */
+ c->Request.Type.Attribute = ATTR_SIMPLE;
+ c->Request.Type.Direction = XFER_NONE;
+ c->Request.Timeout = 0; /* Don't time out */
+ c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */
+ c->Request.CDB[1] = 0x03; /* Reset target above */
+ /* If bytes 4-7 are zero, it means reset the */
+ /* LunID device */
+ c->Request.CDB[4] = 0x00;
+ c->Request.CDB[5] = 0x00;
+ c->Request.CDB[6] = 0x00;
+ c->Request.CDB[7] = 0x00;
+ break;
+
+ default:
+ dev_warn(&h->pdev->dev, "unknown message type %d\n",
+ cmd);
+ BUG();
+ }
+ } else {
+ dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
+ BUG();
+ }
+
+ switch (c->Request.Type.Direction) {
+ case XFER_READ:
+ pci_dir = PCI_DMA_FROMDEVICE;
+ break;
+ case XFER_WRITE:
+ pci_dir = PCI_DMA_TODEVICE;
+ break;
+ case XFER_NONE:
+ pci_dir = PCI_DMA_NONE;
+ break;
+ default:
+ pci_dir = PCI_DMA_BIDIRECTIONAL;
+ }
+
+ hpsa_map_one(h->pdev, c, buff, size, pci_dir);
+
+ return;
+}
+
+/*
+ * Map (physical) PCI mem into (virtual) kernel space
+ */
+static void __iomem *remap_pci_mem(ulong base, ulong size)
+{
+ ulong page_base = ((ulong) base) & PAGE_MASK;
+ ulong page_offs = ((ulong) base) - page_base;
+ void __iomem *page_remapped = ioremap(page_base, page_offs + size);
+
+ return page_remapped ? (page_remapped + page_offs) : NULL;
+}
+
+/* Takes cmds off the submission queue and sends them to the hardware,
+ * then puts them on the queue of cmds waiting for completion.
+ */
+static void start_io(struct ctlr_info *h)
+{
+ struct CommandList *c;
+
+ while (!hlist_empty(&h->reqQ)) {
+ c = hlist_entry(h->reqQ.first, struct CommandList, list);
+ /* can't do anything if fifo is full */
+ if ((h->access.fifo_full(h))) {
+ dev_warn(&h->pdev->dev, "fifo full\n");
+ break;
+ }
+
+ /* Get the first entry from the Request Q */
+ removeQ(c);
+ h->Qdepth--;
+
+ /* Tell the controller execute command */
+ h->access.submit_command(h, c);
+
+ /* Put job onto the completed Q */
+ addQ(&h->cmpQ, c);
+ }
+}
+
+static inline unsigned long get_next_completion(struct ctlr_info *h)
+{
+ return h->access.command_completed(h);
+}
+
+static inline int interrupt_pending(struct ctlr_info *h)
+{
+ return h->access.intr_pending(h);
+}
+
+static inline long interrupt_not_for_us(struct ctlr_info *h)
+{
+ return ((h->access.intr_pending(h) == 0) ||
+ (h->interrupts_enabled == 0));
+}
+
+static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
+ __u32 raw_tag)
+{
+ if (unlikely(tag_index >= h->nr_cmds)) {
+ dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
+ return 1;
+ }
+ return 0;
+}
+
+static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
+{
+ removeQ(c);
+ if (likely(c->cmd_type == CMD_SCSI))
+ complete_scsi_command(c, 0, raw_tag);
+ else if (c->cmd_type == CMD_IOCTL_PEND)
+ complete(c->waiting);
+}
+
+static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+{
+ struct ctlr_info *h = dev_id;
+ struct CommandList *c;
+ unsigned long flags;
+ __u32 raw_tag, tag, tag_index;
+ struct hlist_node *tmp;
+
+ if (interrupt_not_for_us(h))
+ return IRQ_NONE;
+ spin_lock_irqsave(&h->lock, flags);
+ while (interrupt_pending(h)) {
+ while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
+ if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
+ tag_index = HPSA_TAG_TO_INDEX(raw_tag);
+ if (bad_tag(h, tag_index, raw_tag))
+ return IRQ_HANDLED;
+ c = h->cmd_pool + tag_index;
+ finish_cmd(c, raw_tag);
+ continue;
+ }
+ tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
+ c = NULL;
+ hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ if (c->busaddr == tag) {
+ finish_cmd(c, raw_tag);
+ break;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/* Send a message CDB to the firmware. */
+static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+ unsigned char type)
+{
+ struct Command {
+ struct CommandListHeader CommandHeader;
+ struct RequestBlock Request;
+ struct ErrDescriptor ErrorDescriptor;
+ };
+ struct Command *cmd;
+ static const size_t cmd_sz = sizeof(*cmd) +
+ sizeof(cmd->ErrorDescriptor);
+ dma_addr_t paddr64;
+ uint32_t paddr32, tag;
+ void __iomem *vaddr;
+ int i, err;
+
+ vaddr = pci_ioremap_bar(pdev, 0);
+ if (vaddr == NULL)
+ return -ENOMEM;
+
+ /* The Inbound Post Queue only accepts 32-bit physical addresses for the
+ * CCISS commands, so they must be allocated from the lower 4GiB of
+ * memory.
+ */
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ iounmap(vaddr);
+ return -ENOMEM;
+ }
+
+ cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+ if (cmd == NULL) {
+ iounmap(vaddr);
+ return -ENOMEM;
+ }
+
+ /* This must fit, because of the 32-bit consistent DMA mask. Also,
+ * although there's no guarantee, we assume that the address is at
+ * least 4-byte aligned (most likely, it's page-aligned).
+ */
+ paddr32 = paddr64;
+
+ cmd->CommandHeader.ReplyQueue = 0;
+ cmd->CommandHeader.SGList = 0;
+ cmd->CommandHeader.SGTotal = 0;
+ cmd->CommandHeader.Tag.lower = paddr32;
+ cmd->CommandHeader.Tag.upper = 0;
+ memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
+
+ cmd->Request.CDBLen = 16;
+ cmd->Request.Type.Type = TYPE_MSG;
+ cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
+ cmd->Request.Type.Direction = XFER_NONE;
+ cmd->Request.Timeout = 0; /* Don't time out */
+ cmd->Request.CDB[0] = opcode;
+ cmd->Request.CDB[1] = type;
+ memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
+ cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
+ cmd->ErrorDescriptor.Addr.upper = 0;
+ cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
+
+ writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
+
+ for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
+ tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
+ if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
+ break;
+ msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
+ }
+
+ iounmap(vaddr);
+
+ /* we leak the DMA buffer here ... no choice since the controller could
+ * still complete the command.
+ */
+ if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
+ dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
+ opcode, type);
+ return -ETIMEDOUT;
+ }
+
+ pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+
+ if (tag & HPSA_ERROR_BIT) {
+ dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
+ opcode, type);
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
+ opcode, type);
+ return 0;
+}
+
+#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
+#define hpsa_noop(p) hpsa_message(p, 3, 0)
+
+static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
+{
+/* the #defines are stolen from drivers/pci/msi.h. */
+#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
+#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
+
+ int pos;
+ u16 control = 0;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(pdev, msi_control_reg(pos), &control);
+ if (control & PCI_MSI_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI\n");
+ pci_write_config_word(pdev, msi_control_reg(pos),
+ control & ~PCI_MSI_FLAGS_ENABLE);
+ }
+ }
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, msi_control_reg(pos), &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev, msi_control_reg(pos),
+ control & ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+
+ return 0;
+}
+
+/* This does a hard reset of the controller using PCI power management
+ * states.
+ */
+static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
+{
+ u16 pmcsr, saved_config_space[32];
+ int i, pos;
+
+ dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+
+ /* This is very nearly the same thing as
+ *
+ * pci_save_state(pci_dev);
+ * pci_set_power_state(pci_dev, PCI_D3hot);
+ * pci_set_power_state(pci_dev, PCI_D0);
+ * pci_restore_state(pci_dev);
+ *
+ * but we can't use these nice canned kernel routines on
+ * kexec, because they also check the MSI/MSI-X state in PCI
+ * configuration space and do the wrong thing when it is
+ * set/cleared. Also, the pci_save/restore_state functions
+ * violate the ordering requirements for restoring the
+ * configuration space from the CCISS document (see the
+ * comment below). So we roll our own ....
+ */
+
+ for (i = 0; i < 32; i++)
+ pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pos == 0) {
+ dev_err(&pdev->dev,
+ "hpsa_reset_controller: PCI PM not supported\n");
+ return -ENODEV;
+ }
+
+ /* Quoting from the Open CISS Specification: "The Power
+ * Management Control/Status Register (CSR) controls the power
+ * state of the device. The normal operating state is D0,
+ * CSR=00h. The software off state is D3, CSR=03h. To reset
+ * the controller, place the interface device in D3 then to
+ * D0, this causes a secondary PCI reset which will reset the
+ * controller."
+ */
+
+ /* enter the D3hot power management state */
+ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= PCI_D3hot;
+ pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+ msleep(500);
+
+ /* enter the D0 power management state */
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= PCI_D0;
+ pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+ msleep(500);
+
+ /* Restore the PCI configuration space. The Open CISS
+ * Specification says, "Restore the PCI Configuration
+ * Registers, offsets 00h through 60h. It is important to
+ * restore the command register, 16-bits at offset 04h,
+ * last. Do not restore the configuration status register,
+ * 16-bits at offset 06h." Note that the offset is 2*i.
+ */
+ for (i = 0; i < 32; i++) {
+ if (i == 2 || i == 3)
+ continue;
+ pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+ }
+ wmb();
+ pci_write_config_word(pdev, 4, saved_config_space[2]);
+
+ return 0;
+}
+
+/*
+ * We cannot read the structure directly, for portability we must use
+ * the io functions.
+ * This is for debug only.
+ */
+#ifdef HPSA_DEBUG
+static void print_cfg_table(struct device *dev, struct CfgTable *tb)
+{
+ int i;
+ char temp_name[17];
+
+ dev_info(dev, "Controller Configuration information\n");
+ dev_info(dev, "------------------------------------\n");
+ for (i = 0; i < 4; i++)
+ temp_name[i] = readb(&(tb->Signature[i]));
+ temp_name[4] = '\0';
+ dev_info(dev, " Signature = %s\n", temp_name);
+ dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
+ dev_info(dev, " Transport methods supported = 0x%x\n",
+ readl(&(tb->TransportSupport)));
+ dev_info(dev, " Transport methods active = 0x%x\n",
+ readl(&(tb->TransportActive)));
+ dev_info(dev, " Requested transport Method = 0x%x\n",
+ readl(&(tb->HostWrite.TransportRequest)));
+ dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
+ readl(&(tb->HostWrite.CoalIntDelay)));
+ dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
+ readl(&(tb->HostWrite.CoalIntCount)));
+ dev_info(dev, " Max outstanding commands = 0x%d\n",
+ readl(&(tb->CmdsOutMax)));
+ dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
+ for (i = 0; i < 16; i++)
+ temp_name[i] = readb(&(tb->ServerName[i]));
+ temp_name[16] = '\0';
+ dev_info(dev, " Server Name = %s\n", temp_name);
+ dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
+ readl(&(tb->HeartBeat)));
+}
+#endif /* HPSA_DEBUG */
+
+static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+{
+ int i, offset, mem_type, bar_type;
+
+ if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
+ return 0;
+ offset = 0;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
+ if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
+ offset += 4;
+ else {
+ mem_type = pci_resource_flags(pdev, i) &
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ offset += 4; /* 32 bit */
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ offset += 8;
+ break;
+ default: /* reserved in PCI 2.2 */
+ dev_warn(&pdev->dev,
+ "base address is invalid\n");
+ return -1;
+ break;
+ }
+ }
+ if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
+ return i + 1;
+ }
+ return -1;
+}
+
+/* If MSI/MSI-X is supported by the kernel we will try to enable it on
+ * controllers that are capable. If not, we use IO-APIC mode.
+ */
+
+static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
+ struct pci_dev *pdev, __u32 board_id)
+{
+#ifdef CONFIG_PCI_MSI
+ int err;
+ struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
+ {0, 2}, {0, 3}
+ };
+
+ /* Some boards advertise MSI but don't really support it */
+ if ((board_id == 0x40700E11) ||
+ (board_id == 0x40800E11) ||
+ (board_id == 0x40820E11) || (board_id == 0x40830E11))
+ goto default_int_mode;
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+ dev_info(&pdev->dev, "MSIX\n");
+ err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
+ if (!err) {
+ h->intr[0] = hpsa_msix_entries[0].vector;
+ h->intr[1] = hpsa_msix_entries[1].vector;
+ h->intr[2] = hpsa_msix_entries[2].vector;
+ h->intr[3] = hpsa_msix_entries[3].vector;
+ h->msix_vector = 1;
+ return;
+ }
+ if (err > 0) {
+ dev_warn(&pdev->dev, "only %d MSI-X vectors "
+ "available\n", err);
+ goto default_int_mode;
+ } else {
+ dev_warn(&pdev->dev, "MSI-X init failed %d\n",
+ err);
+ goto default_int_mode;
+ }
+ }
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
+ dev_info(&pdev->dev, "MSI\n");
+ if (!pci_enable_msi(pdev))
+ h->msi_vector = 1;
+ else
+ dev_warn(&pdev->dev, "MSI init failed\n");
+ }
+default_int_mode:
+#endif /* CONFIG_PCI_MSI */
+ /* if we get here we're going to use the default interrupt mode */
+ h->intr[SIMPLE_MODE_INT] = pdev->irq;
+ return;
+}
+
+static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+{
+ ushort subsystem_vendor_id, subsystem_device_id, command;
+ __u32 board_id, scratchpad = 0;
+ __u64 cfg_offset;
+ __u32 cfg_base_addr;
+ __u64 cfg_base_addr_index;
+ int i, prod_index, err;
+
+ subsystem_vendor_id = pdev->subsystem_vendor;
+ subsystem_device_id = pdev->subsystem_device;
+ board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+ subsystem_vendor_id);
+
+ for (i = 0; i < ARRAY_SIZE(products); i++)
+ if (board_id == products[i].board_id)
+ break;
+
+ prod_index = i;
+
+ if (prod_index == ARRAY_SIZE(products)) {
+ prod_index--;
+ if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
+ !hpsa_allow_any) {
+ dev_warn(&pdev->dev, "unrecognized board ID:"
+ " 0x%08lx, ignoring.\n",
+ (unsigned long) board_id);
+ return -ENODEV;
+ }
+ }
+ /* check to see if controller has been disabled
+ * BEFORE trying to enable it
+ */
+ (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
+ if (!(command & 0x02)) {
+ dev_warn(&pdev->dev, "controller appears to be disabled\n");
+ return -ENODEV;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_warn(&pdev->dev, "unable to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, "hpsa");
+ if (err) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
+ return err;
+ }
+
+ /* If the kernel supports MSI/MSI-X we will try to enable that,
+ * else we use the IO-APIC interrupt assigned to us by system ROM.
+ */
+ hpsa_interrupt_mode(h, pdev, board_id);
+
+ /* find the memory BAR */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
+ break;
+ }
+ if (i == DEVICE_COUNT_RESOURCE) {
+ dev_warn(&pdev->dev, "no memory BAR found\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+
+ h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
+ * already removed
+ */
+
+ h->vaddr = remap_pci_mem(h->paddr, 0x250);
+
+ /* Wait for the board to become ready. */
+ for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
+ scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (scratchpad == HPSA_FIRMWARE_READY)
+ break;
+ msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
+ }
+ if (scratchpad != HPSA_FIRMWARE_READY) {
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+
+ /* get the address index number */
+ cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
+ cfg_base_addr &= (__u32) 0x0000ffff;
+ cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
+ if (cfg_base_addr_index == -1) {
+ dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+
+ cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
+ h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
+ cfg_base_addr_index) + cfg_offset,
+ sizeof(h->cfgtable));
+ h->board_id = board_id;
+
+ /* Query controller for max supported commands: */
+ h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+
+ h->product_name = products[prod_index].product_name;
+ h->access = *(products[prod_index].access);
+ /* Allow room for some ioctls */
+ h->nr_cmds = h->max_commands - 4;
+
+ if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
+ (readb(&h->cfgtable->Signature[1]) != 'I') ||
+ (readb(&h->cfgtable->Signature[2]) != 'S') ||
+ (readb(&h->cfgtable->Signature[3]) != 'S')) {
+ dev_warn(&pdev->dev, "not a valid CISS config table\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+#ifdef CONFIG_X86
+ {
+ /* Need to enable prefetch in the SCSI core for 6400 in x86 */
+ __u32 prefetch;
+ prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
+ prefetch |= 0x100;
+ writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
+ }
+#endif
+
+ /* Disabling DMA prefetch for the P600
+ * An ASIC bug may result in a prefetch beyond
+ * physical memory.
+ */
+ if (board_id == 0x3225103C) {
+ __u32 dma_prefetch;
+ dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+ dma_prefetch |= 0x8000;
+ writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+ }
+
+ h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+ /* Update the field, and then ring the doorbell */
+ writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+
+ /* under certain very rare conditions, this can take awhile.
+ * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+ * as we enter this code.)
+ */
+ for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+ if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ break;
+ /* delay and try again */
+ msleep(10);
+ }
+
+#ifdef HPSA_DEBUG
+ print_cfg_table(&pdev->dev, h->cfgtable);
+#endif /* HPSA_DEBUG */
+
+ if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+ dev_warn(&pdev->dev, "unable to get board into simple mode\n");
+ err = -ENODEV;
+ goto err_out_free_res;
+ }
+ return 0;
+
+err_out_free_res:
+ /*
+ * Deliberately omit pci_disable_device(): it does something nasty to
+ * Smart Array controllers that pci_enable_device does not undo
+ */
+ pci_release_regions(pdev);
+ return err;
+}
+
+static int __devinit hpsa_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int i;
+ int dac;
+ struct ctlr_info *h;
+
+ if (number_of_controllers == 0)
+ printk(KERN_INFO DRIVER_NAME "\n");
+ if (reset_devices) {
+ /* Reset the controller with a PCI power-cycle */
+ if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
+ return -ENODEV;
+
+ /* Some devices (notably the HP Smart Array 5i Controller)
+ need a little pause here */
+ msleep(HPSA_POST_RESET_PAUSE_MSECS);
+
+ /* Now try to get the controller to respond to a no-op */
+ for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
+ if (hpsa_noop(pdev) == 0)
+ break;
+ else
+ dev_warn(&pdev->dev, "no-op failed%s\n",
+ (i < 11 ? "; re-trying" : ""));
+ }
+ }
+
+ BUILD_BUG_ON(sizeof(struct CommandList) % 8);
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -1;
+
+ h->busy_initializing = 1;
+ INIT_HLIST_HEAD(&h->cmpQ);
+ INIT_HLIST_HEAD(&h->reqQ);
+ mutex_init(&h->busy_shutting_down);
+ init_completion(&h->scan_wait);
+ if (hpsa_pci_init(h, pdev) != 0)
+ goto clean1;
+
+ sprintf(h->devname, "hpsa%d", number_of_controllers);
+ h->ctlr = number_of_controllers;
+ number_of_controllers++;
+ h->pdev = pdev;
+
+ /* configure PCI DMA stuff */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ dac = 1;
+ else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ dac = 0;
+ else {
+ dev_err(&pdev->dev, "no suitable DMA available\n");
+ goto clean1;
+ }
+
+ /* make sure the board interrupts are off */
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+ if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
+ IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
+ dev_err(&pdev->dev, "unable to get irq %d for %s\n",
+ h->intr[SIMPLE_MODE_INT], h->devname);
+ goto clean2;
+ }
+
+ dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+ h->devname, pdev->device, pci_name(pdev),
+ h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
+
+ h->cmd_pool_bits =
+ kmalloc(((h->nr_cmds + BITS_PER_LONG -
+ 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
+ h->cmd_pool = pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->cmd_pool),
+ &(h->cmd_pool_dhandle));
+ h->errinfo_pool = pci_alloc_consistent(h->pdev,
+ h->nr_cmds * sizeof(*h->errinfo_pool),
+ &(h->errinfo_pool_dhandle));
+ if ((h->cmd_pool_bits == NULL)
+ || (h->cmd_pool == NULL)
+ || (h->errinfo_pool == NULL)) {
+ dev_err(&pdev->dev, "out of memory");
+ goto clean4;
+ }
+ spin_lock_init(&h->lock);
+
+ pci_set_drvdata(pdev, h);
+ memset(h->cmd_pool_bits, 0,
+ ((h->nr_cmds + BITS_PER_LONG -
+ 1) / BITS_PER_LONG) * sizeof(unsigned long));
+
+ hpsa_scsi_setup(h);
+
+ /* Turn the interrupts on so we can service requests */
+ h->access.set_intr_mask(h, HPSA_INTR_ON);
+
+ hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
+ h->busy_initializing = 0;
+ return 1;
+
+clean4:
+ kfree(h->cmd_pool_bits);
+ if (h->cmd_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct CommandList),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ if (h->errinfo_pool)
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct ErrorInfo),
+ h->errinfo_pool,
+ h->errinfo_pool_dhandle);
+ free_irq(h->intr[SIMPLE_MODE_INT], h);
+clean2:
+clean1:
+ h->busy_initializing = 0;
+ kfree(h);
+ return -1;
+}
+
+static void hpsa_flush_cache(struct ctlr_info *h)
+{
+ char *flush_buf;
+ struct CommandList *c;
+
+ flush_buf = kzalloc(4, GFP_KERNEL);
+ if (!flush_buf)
+ return;
+
+ c = cmd_special_alloc(h);
+ if (!c) {
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+ goto out_of_memory;
+ }
+ fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+ RAID_CTLR_LUNID, TYPE_CMD);
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
+ if (c->err_info->CommandStatus != 0)
+ dev_warn(&h->pdev->dev,
+ "error flushing cache on controller\n");
+ cmd_special_free(h, c);
+out_of_memory:
+ kfree(flush_buf);
+}
+
+static void hpsa_shutdown(struct pci_dev *pdev)
+{
+ struct ctlr_info *h;
+
+ h = pci_get_drvdata(pdev);
+ /* Turn board interrupts off and send the flush cache command
+ * sendcmd will turn off interrupt, and send the flush...
+ * To write all data in the battery backed cache to disks
+ */
+ hpsa_flush_cache(h);
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
+ free_irq(h->intr[2], h);
+#ifdef CONFIG_PCI_MSI
+ if (h->msix_vector)
+ pci_disable_msix(h->pdev);
+ else if (h->msi_vector)
+ pci_disable_msi(h->pdev);
+#endif /* CONFIG_PCI_MSI */
+}
+
+static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+{
+ struct ctlr_info *h;
+
+ if (pci_get_drvdata(pdev) == NULL) {
+ dev_err(&pdev->dev, "unable to remove device \n");
+ return;
+ }
+ h = pci_get_drvdata(pdev);
+ mutex_lock(&h->busy_shutting_down);
+ remove_from_scan_list(h);
+ hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
+ hpsa_shutdown(pdev);
+ iounmap(h->vaddr);
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct CommandList),
+ h->cmd_pool, h->cmd_pool_dhandle);
+ pci_free_consistent(h->pdev,
+ h->nr_cmds * sizeof(struct ErrorInfo),
+ h->errinfo_pool, h->errinfo_pool_dhandle);
+ kfree(h->cmd_pool_bits);
+ /*
+ * Deliberately omit pci_disable_device(): it does something nasty to
+ * Smart Array controllers that pci_enable_device does not undo
+ */
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ mutex_unlock(&h->busy_shutting_down);
+ kfree(h);
+}
+
+static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
+ __attribute__((unused)) pm_message_t state)
+{
+ return -ENOSYS;
+}
+
+static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
+{
+ return -ENOSYS;
+}
+
+static struct pci_driver hpsa_pci_driver = {
+ .name = "hpsa",
+ .probe = hpsa_init_one,
+ .remove = __devexit_p(hpsa_remove_one),
+ .id_table = hpsa_pci_device_id, /* id_table */
+ .shutdown = hpsa_shutdown,
+ .suspend = hpsa_suspend,
+ .resume = hpsa_resume,
+};
+
+/*
+ * This is it. Register the PCI driver information for the cards we control
+ * the OS will call our registered routines when it finds one of our cards.
+ */
+static int __init hpsa_init(void)
+{
+ int err;
+ /* Start the scan thread */
+ hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
+ if (IS_ERR(hpsa_scan_thread)) {
+ err = PTR_ERR(hpsa_scan_thread);
+ return -ENODEV;
+ }
+ err = pci_register_driver(&hpsa_pci_driver);
+ if (err)
+ kthread_stop(hpsa_scan_thread);
+ return err;
+}
+
+static void __exit hpsa_cleanup(void)
+{
+ pci_unregister_driver(&hpsa_pci_driver);
+ kthread_stop(hpsa_scan_thread);
+}
+
+module_init(hpsa_init);
+module_exit(hpsa_cleanup);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
new file mode 100644
index 00000000000..6bd1949144b
--- /dev/null
+++ b/drivers/scsi/hpsa.h
@@ -0,0 +1,273 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef HPSA_H
+#define HPSA_H
+
+#include <scsi/scsicam.h>
+
+#define IO_OK 0
+#define IO_ERROR 1
+
+struct ctlr_info;
+
+struct access_method {
+ void (*submit_command)(struct ctlr_info *h,
+ struct CommandList *c);
+ void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
+ unsigned long (*fifo_full)(struct ctlr_info *h);
+ unsigned long (*intr_pending)(struct ctlr_info *h);
+ unsigned long (*command_completed)(struct ctlr_info *h);
+};
+
+struct hpsa_scsi_dev_t {
+ int devtype;
+ int bus, target, lun; /* as presented to the OS */
+ unsigned char scsi3addr[8]; /* as presented to the HW */
+#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+ unsigned char device_id[16]; /* from inquiry pg. 0x83 */
+ unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
+ unsigned char model[16]; /* bytes 16-31 of inquiry data */
+ unsigned char revision[4]; /* bytes 32-35 of inquiry data */
+ unsigned char raid_level; /* from inquiry page 0xC1 */
+};
+
+struct ctlr_info {
+ int ctlr;
+ char devname[8];
+ char *product_name;
+ char firm_ver[4]; /* Firmware version */
+ struct pci_dev *pdev;
+ __u32 board_id;
+ void __iomem *vaddr;
+ unsigned long paddr;
+ int nr_cmds; /* Number of commands allowed on this controller */
+ struct CfgTable __iomem *cfgtable;
+ int interrupts_enabled;
+ int major;
+ int max_commands;
+ int commands_outstanding;
+ int max_outstanding; /* Debug */
+ int usage_count; /* number of opens all all minor devices */
+# define DOORBELL_INT 0
+# define PERF_MODE_INT 1
+# define SIMPLE_MODE_INT 2
+# define MEMQ_MODE_INT 3
+ unsigned int intr[4];
+ unsigned int msix_vector;
+ unsigned int msi_vector;
+ struct access_method access;
+
+ /* queue and queue Info */
+ struct hlist_head reqQ;
+ struct hlist_head cmpQ;
+ unsigned int Qdepth;
+ unsigned int maxQsinceinit;
+ unsigned int maxSG;
+ spinlock_t lock;
+
+ /* pointers to command and error info pool */
+ struct CommandList *cmd_pool;
+ dma_addr_t cmd_pool_dhandle;
+ struct ErrorInfo *errinfo_pool;
+ dma_addr_t errinfo_pool_dhandle;
+ unsigned long *cmd_pool_bits;
+ int nr_allocs;
+ int nr_frees;
+ int busy_initializing;
+ int busy_scanning;
+ struct mutex busy_shutting_down;
+ struct list_head scan_list;
+ struct completion scan_wait;
+
+ struct Scsi_Host *scsi_host;
+ spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
+ int ndevices; /* number of used elements in .dev[] array. */
+#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
+ struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
+};
+#define HPSA_ABORT_MSG 0
+#define HPSA_DEVICE_RESET_MSG 1
+#define HPSA_BUS_RESET_MSG 2
+#define HPSA_HOST_RESET_MSG 3
+#define HPSA_MSG_SEND_RETRY_LIMIT 10
+#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
+
+/* Maximum time in seconds driver will wait for command completions
+ * when polling before giving up.
+ */
+#define HPSA_MAX_POLL_TIME_SECS (20)
+
+/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
+ * how many times to retry TEST UNIT READY on a device
+ * while waiting for it to become ready before giving up.
+ * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
+ * between sending TURs while waiting for a device
+ * to become ready.
+ */
+#define HPSA_TUR_RETRY_LIMIT (20)
+#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
+
+/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
+ * to become ready, in seconds, before giving up on it.
+ * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
+ * between polling the board to see if it is ready, in
+ * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
+ * HPSA_BOARD_READY_ITERATIONS are derived from those.
+ */
+#define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
+#define HPSA_BOARD_READY_POLL_INTERVAL \
+ ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
+#define HPSA_BOARD_READY_ITERATIONS \
+ ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
+ HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_POST_RESET_PAUSE_MSECS (3000)
+#define HPSA_POST_RESET_NOOP_RETRIES (12)
+
+/* Defining the diffent access_menthods */
+/*
+ * Memory mapped FIFO interface (SMART 53xx cards)
+ */
+#define SA5_DOORBELL 0x20
+#define SA5_REQUEST_PORT_OFFSET 0x40
+#define SA5_REPLY_INTR_MASK_OFFSET 0x34
+#define SA5_REPLY_PORT_OFFSET 0x44
+#define SA5_INTR_STATUS 0x30
+#define SA5_SCRATCHPAD_OFFSET 0xB0
+
+#define SA5_CTCFG_OFFSET 0xB4
+#define SA5_CTMEM_OFFSET 0xB8
+
+#define SA5_INTR_OFF 0x08
+#define SA5B_INTR_OFF 0x04
+#define SA5_INTR_PENDING 0x08
+#define SA5B_INTR_PENDING 0x04
+#define FIFO_EMPTY 0xffffffff
+#define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
+
+#define HPSA_ERROR_BIT 0x02
+#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
+#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
+#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
+
+#define HPSA_INTR_ON 1
+#define HPSA_INTR_OFF 0
+/*
+ Send the command to the hardware
+*/
+static void SA5_submit_command(struct ctlr_info *h,
+ struct CommandList *c)
+{
+#ifdef HPSA_DEBUG
+ printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
+ c->busaddr);
+#endif /* HPSA_DEBUG */
+ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ h->commands_outstanding++;
+ if (h->commands_outstanding > h->max_outstanding)
+ h->max_outstanding = h->commands_outstanding;
+}
+
+/*
+ * This card is the opposite of the other cards.
+ * 0 turns interrupts on...
+ * 0x08 turns them off...
+ */
+static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+ if (val) { /* Turn interrupts on */
+ h->interrupts_enabled = 1;
+ writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ } else { /* Turn them off */
+ h->interrupts_enabled = 0;
+ writel(SA5_INTR_OFF,
+ h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+ }
+}
+/*
+ * Returns true if fifo is full.
+ *
+ */
+static unsigned long SA5_fifo_full(struct ctlr_info *h)
+{
+ if (h->commands_outstanding >= h->max_commands)
+ return 1;
+ else
+ return 0;
+
+}
+/*
+ * returns value read from hardware.
+ * returns FIFO_EMPTY if there is nothing to read
+ */
+static unsigned long SA5_completed(struct ctlr_info *h)
+{
+ unsigned long register_value
+ = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+
+ if (register_value != FIFO_EMPTY)
+ h->commands_outstanding--;
+
+#ifdef HPSA_DEBUG
+ if (register_value != FIFO_EMPTY)
+ printk(KERN_INFO "hpsa: Read %lx back from board\n",
+ register_value);
+ else
+ printk(KERN_INFO "hpsa: FIFO Empty read\n");
+#endif
+
+ return register_value;
+}
+/*
+ * Returns true if an interrupt is pending..
+ */
+static unsigned long SA5_intr_pending(struct ctlr_info *h)
+{
+ unsigned long register_value =
+ readl(h->vaddr + SA5_INTR_STATUS);
+#ifdef HPSA_DEBUG
+ printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
+#endif /* HPSA_DEBUG */
+ if (register_value & SA5_INTR_PENDING)
+ return 1;
+ return 0 ;
+}
+
+
+static struct access_method SA5_access = {
+ SA5_submit_command,
+ SA5_intr_mask,
+ SA5_fifo_full,
+ SA5_intr_pending,
+ SA5_completed,
+};
+
+struct board_type {
+ __u32 board_id;
+ char *product_name;
+ struct access_method *access;
+};
+
+
+/* end of old hpsa_scsi.h file */
+
+#endif /* HPSA_H */
+
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
new file mode 100644
index 00000000000..12d71387ed9
--- /dev/null
+++ b/drivers/scsi/hpsa_cmd.h
@@ -0,0 +1,326 @@
+/*
+ * Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef HPSA_CMD_H
+#define HPSA_CMD_H
+
+/* general boundary defintions */
+#define SENSEINFOBYTES 32 /* may vary between hbas */
+#define MAXSGENTRIES 31
+#define MAXREPLYQS 256
+
+/* Command Status value */
+#define CMD_SUCCESS 0x0000
+#define CMD_TARGET_STATUS 0x0001
+#define CMD_DATA_UNDERRUN 0x0002
+#define CMD_DATA_OVERRUN 0x0003
+#define CMD_INVALID 0x0004
+#define CMD_PROTOCOL_ERR 0x0005
+#define CMD_HARDWARE_ERR 0x0006
+#define CMD_CONNECTION_LOST 0x0007
+#define CMD_ABORTED 0x0008
+#define CMD_ABORT_FAILED 0x0009
+#define CMD_UNSOLICITED_ABORT 0x000A
+#define CMD_TIMEOUT 0x000B
+#define CMD_UNABORTABLE 0x000C
+
+/* Unit Attentions ASC's as defined for the MSA2012sa */
+#define POWER_OR_RESET 0x29
+#define STATE_CHANGED 0x2a
+#define UNIT_ATTENTION_CLEARED 0x2f
+#define LUN_FAILED 0x3e
+#define REPORT_LUNS_CHANGED 0x3f
+
+/* Unit Attentions ASCQ's as defined for the MSA2012sa */
+
+ /* These ASCQ's defined for ASC = POWER_OR_RESET */
+#define POWER_ON_RESET 0x00
+#define POWER_ON_REBOOT 0x01
+#define SCSI_BUS_RESET 0x02
+#define MSA_TARGET_RESET 0x03
+#define CONTROLLER_FAILOVER 0x04
+#define TRANSCEIVER_SE 0x05
+#define TRANSCEIVER_LVD 0x06
+
+ /* These ASCQ's defined for ASC = STATE_CHANGED */
+#define RESERVATION_PREEMPTED 0x03
+#define ASYM_ACCESS_CHANGED 0x06
+#define LUN_CAPACITY_CHANGED 0x09
+
+/* transfer direction */
+#define XFER_NONE 0x00
+#define XFER_WRITE 0x01
+#define XFER_READ 0x02
+#define XFER_RSVD 0x03
+
+/* task attribute */
+#define ATTR_UNTAGGED 0x00
+#define ATTR_SIMPLE 0x04
+#define ATTR_HEADOFQUEUE 0x05
+#define ATTR_ORDERED 0x06
+#define ATTR_ACA 0x07
+
+/* cdb type */
+#define TYPE_CMD 0x00
+#define TYPE_MSG 0x01
+
+/* config space register offsets */
+#define CFG_VENDORID 0x00
+#define CFG_DEVICEID 0x02
+#define CFG_I2OBAR 0x10
+#define CFG_MEM1BAR 0x14
+
+/* i2o space register offsets */
+#define I2O_IBDB_SET 0x20
+#define I2O_IBDB_CLEAR 0x70
+#define I2O_INT_STATUS 0x30
+#define I2O_INT_MASK 0x34
+#define I2O_IBPOST_Q 0x40
+#define I2O_OBPOST_Q 0x44
+#define I2O_DMA1_CFG 0x214
+
+/* Configuration Table */
+#define CFGTBL_ChangeReq 0x00000001l
+#define CFGTBL_AccCmds 0x00000001l
+
+#define CFGTBL_Trans_Simple 0x00000002l
+
+#define CFGTBL_BusType_Ultra2 0x00000001l
+#define CFGTBL_BusType_Ultra3 0x00000002l
+#define CFGTBL_BusType_Fibre1G 0x00000100l
+#define CFGTBL_BusType_Fibre2G 0x00000200l
+struct vals32 {
+ __u32 lower;
+ __u32 upper;
+};
+
+union u64bit {
+ struct vals32 val32;
+ __u64 val;
+};
+
+/* FIXME this is a per controller value (barf!) */
+#define HPSA_MAX_TARGETS_PER_CTLR 16
+#define HPSA_MAX_LUN 256
+#define HPSA_MAX_PHYS_LUN 1024
+
+/* SCSI-3 Commands */
+#pragma pack(1)
+
+#define HPSA_INQUIRY 0x12
+struct InquiryData {
+ __u8 data_byte[36];
+};
+
+#define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */
+#define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */
+struct ReportLUNdata {
+ __u8 LUNListLength[4];
+ __u32 reserved;
+ __u8 LUN[HPSA_MAX_LUN][8];
+};
+
+struct ReportExtendedLUNdata {
+ __u8 LUNListLength[4];
+ __u8 extended_response_flag;
+ __u8 reserved[3];
+ __u8 LUN[HPSA_MAX_LUN][24];
+};
+
+struct SenseSubsystem_info {
+ __u8 reserved[36];
+ __u8 portname[8];
+ __u8 reserved1[1108];
+};
+
+#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
+struct ReadCapdata {
+ __u8 total_size[4]; /* Total size in blocks */
+ __u8 block_size[4]; /* Size of blocks in bytes */
+};
+
+#if 0
+/* 12 byte commands not implemented in firmware yet. */
+#define HPSA_READ 0xa8
+#define HPSA_WRITE 0xaa
+#endif
+
+#define HPSA_READ 0x28 /* Read(10) */
+#define HPSA_WRITE 0x2a /* Write(10) */
+
+/* BMIC commands */
+#define BMIC_READ 0x26
+#define BMIC_WRITE 0x27
+#define BMIC_CACHE_FLUSH 0xc2
+#define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
+
+/* Command List Structure */
+union SCSI3Addr {
+ struct {
+ __u8 Dev;
+ __u8 Bus:6;
+ __u8 Mode:2; /* b00 */
+ } PeripDev;
+ struct {
+ __u8 DevLSB;
+ __u8 DevMSB:6;
+ __u8 Mode:2; /* b01 */
+ } LogDev;
+ struct {
+ __u8 Dev:5;
+ __u8 Bus:3;
+ __u8 Targ:6;
+ __u8 Mode:2; /* b10 */
+ } LogUnit;
+};
+
+struct PhysDevAddr {
+ __u32 TargetId:24;
+ __u32 Bus:6;
+ __u32 Mode:2;
+ /* 2 level target device addr */
+ union SCSI3Addr Target[2];
+};
+
+struct LogDevAddr {
+ __u32 VolId:30;
+ __u32 Mode:2;
+ __u8 reserved[4];
+};
+
+union LUNAddr {
+ __u8 LunAddrBytes[8];
+ union SCSI3Addr SCSI3Lun[4];
+ struct PhysDevAddr PhysDev;
+ struct LogDevAddr LogDev;
+};
+
+struct CommandListHeader {
+ __u8 ReplyQueue;
+ __u8 SGList;
+ __u16 SGTotal;
+ struct vals32 Tag;
+ union LUNAddr LUN;
+};
+
+struct RequestBlock {
+ __u8 CDBLen;
+ struct {
+ __u8 Type:3;
+ __u8 Attribute:3;
+ __u8 Direction:2;
+ } Type;
+ __u16 Timeout;
+ __u8 CDB[16];
+};
+
+struct ErrDescriptor {
+ struct vals32 Addr;
+ __u32 Len;
+};
+
+struct SGDescriptor {
+ struct vals32 Addr;
+ __u32 Len;
+ __u32 Ext;
+};
+
+union MoreErrInfo {
+ struct {
+ __u8 Reserved[3];
+ __u8 Type;
+ __u32 ErrorInfo;
+ } Common_Info;
+ struct {
+ __u8 Reserved[2];
+ __u8 offense_size; /* size of offending entry */
+ __u8 offense_num; /* byte # of offense 0-base */
+ __u32 offense_value;
+ } Invalid_Cmd;
+};
+struct ErrorInfo {
+ __u8 ScsiStatus;
+ __u8 SenseLen;
+ __u16 CommandStatus;
+ __u32 ResidualCnt;
+ union MoreErrInfo MoreErrInfo;
+ __u8 SenseInfo[SENSEINFOBYTES];
+};
+/* Command types */
+#define CMD_IOCTL_PEND 0x01
+#define CMD_SCSI 0x03
+
+struct ctlr_info; /* defined in hpsa.h */
+/* The size of this structure needs to be divisible by 8
+ * od on all architectures, because the controller uses 2
+ * lower bits of the address, and the driver uses 1 lower
+ * bit (3 bits total.)
+ */
+struct CommandList {
+ struct CommandListHeader Header;
+ struct RequestBlock Request;
+ struct ErrDescriptor ErrDesc;
+ struct SGDescriptor SG[MAXSGENTRIES];
+ /* information associated with the command */
+ __u32 busaddr; /* physical addr of this record */
+ struct ErrorInfo *err_info; /* pointer to the allocated mem */
+ struct ctlr_info *h;
+ int cmd_type;
+ long cmdindex;
+ struct hlist_node list;
+ struct CommandList *prev;
+ struct CommandList *next;
+ struct request *rq;
+ struct completion *waiting;
+ int retry_count;
+ void *scsi_cmd;
+};
+
+/* Configuration Table Structure */
+struct HostWrite {
+ __u32 TransportRequest;
+ __u32 Reserved;
+ __u32 CoalIntDelay;
+ __u32 CoalIntCount;
+};
+
+struct CfgTable {
+ __u8 Signature[4];
+ __u32 SpecValence;
+ __u32 TransportSupport;
+ __u32 TransportActive;
+ struct HostWrite HostWrite;
+ __u32 CmdsOutMax;
+ __u32 BusTypes;
+ __u32 Reserved;
+ __u8 ServerName[16];
+ __u32 HeartBeat;
+ __u32 SCSI_Prefetch;
+};
+
+struct hpsa_pci_info {
+ unsigned char bus;
+ unsigned char dev_fn;
+ unsigned short domain;
+ __u32 board_id;
+};
+
+#pragma pack()
+#endif /* HPSA_CMD_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 206c2fa8c1b..9e52d16c7c3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1333,7 +1333,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
error = &hostrcb->hcam.u.error.u.type_17_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
- strstrip(error->failure_reason);
+ strim(error->failure_reason);
ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
be32_to_cpu(hostrcb->hcam.u.error.prc));
@@ -1359,7 +1359,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
error = &hostrcb->hcam.u.error.u.type_07_error;
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
- strstrip(error->failure_reason);
+ strim(error->failure_reason);
ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
be32_to_cpu(hostrcb->hcam.u.error.prc));
@@ -6521,6 +6521,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
int rc;
ENTER;
+ ioa_cfg->pdev->state_saved = true;
rc = pci_restore_state(ioa_cfg->pdev);
if (rc != PCIBIOS_SUCCESSFUL) {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c4b58d042f6..881d5dfe8c7 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -68,18 +68,20 @@ struct kmem_cache *scsi_pkt_cachep;
/**
* struct fc_fcp_internal - FCP layer internal data
- * @scsi_pkt_pool: Memory pool to draw FCP packets from
+ * @scsi_pkt_pool: Memory pool to draw FCP packets from
+ * @scsi_queue_lock: Protects the scsi_pkt_queue
* @scsi_pkt_queue: Current FCP packets
* @last_can_queue_ramp_down_time: ramp down time
* @last_can_queue_ramp_up_time: ramp up time
* @max_can_queue: max can_queue size
*/
struct fc_fcp_internal {
- mempool_t *scsi_pkt_pool;
- struct list_head scsi_pkt_queue;
- unsigned long last_can_queue_ramp_down_time;
- unsigned long last_can_queue_ramp_up_time;
- int max_can_queue;
+ mempool_t *scsi_pkt_pool;
+ spinlock_t scsi_queue_lock;
+ struct list_head scsi_pkt_queue;
+ unsigned long last_can_queue_ramp_down_time;
+ unsigned long last_can_queue_ramp_up_time;
+ int max_can_queue;
};
#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -410,12 +412,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
unsigned long flags;
fp = fc_frame_alloc(lport, len);
- if (!fp) {
- spin_lock_irqsave(lport->host->host_lock, flags);
- fc_fcp_can_queue_ramp_down(lport);
- spin_unlock_irqrestore(lport->host->host_lock, flags);
- }
- return fp;
+ if (likely(fp))
+ return fp;
+
+ /* error case */
+ spin_lock_irqsave(lport->host->host_lock, flags);
+ fc_fcp_can_queue_ramp_down(lport);
+ spin_unlock_irqrestore(lport->host->host_lock, flags);
+ return NULL;
}
/**
@@ -990,7 +994,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
struct scsi_cmnd *sc_cmd;
unsigned long flags;
- spin_lock_irqsave(lport->host->host_lock, flags);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
restart:
list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
sc_cmd = fsp->cmd;
@@ -1001,7 +1005,7 @@ restart:
continue;
fc_fcp_pkt_hold(fsp);
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
if (!fc_fcp_lock_pkt(fsp)) {
fc_fcp_cleanup_cmd(fsp, error);
@@ -1010,14 +1014,14 @@ restart:
}
fc_fcp_pkt_release(fsp);
- spin_lock_irqsave(lport->host->host_lock, flags);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
/*
* while we dropped the lock multiple pkts could
* have been released, so we have to start over.
*/
goto restart;
}
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
/**
@@ -1035,11 +1039,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport)
* @fsp: The FCP packet to send
*
* Return: Zero for success and -1 for failure
- * Locks: Called with the host lock and irqs disabled.
+ * Locks: Called without locks held
*/
static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
{
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ unsigned long flags;
int rc;
fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1049,13 +1054,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
int_to_scsilun(fsp->cmd->device->lun,
(struct scsi_lun *)fsp->cdb_cmd.fc_lun);
memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
- list_add_tail(&fsp->list, &si->scsi_pkt_queue);
- spin_unlock_irq(lport->host->host_lock);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
- spin_lock_irq(lport->host->host_lock);
- if (rc)
+ if (unlikely(rc)) {
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ }
return rc;
}
@@ -1752,6 +1760,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
struct fcoe_dev_stats *stats;
lport = shost_priv(sc_cmd->device->host);
+ spin_unlock_irq(lport->host->host_lock);
rval = fc_remote_port_chkready(rport);
if (rval) {
@@ -1834,6 +1843,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
rc = SCSI_MLQUEUE_HOST_BUSY;
}
out:
+ spin_lock_irq(lport->host->host_lock);
return rc;
}
EXPORT_SYMBOL(fc_queuecommand);
@@ -1864,11 +1874,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
lport = fsp->lp;
si = fc_get_scsi_internal(lport);
- spin_lock_irqsave(lport->host->host_lock, flags);
- if (!fsp->cmd) {
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ if (!fsp->cmd)
return;
- }
/*
* if can_queue ramp down is done then try can_queue ramp up
@@ -1880,10 +1887,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
sc_cmd = fsp->cmd;
fsp->cmd = NULL;
- if (!sc_cmd->SCp.ptr) {
- spin_unlock_irqrestore(lport->host->host_lock, flags);
+ if (!sc_cmd->SCp.ptr)
return;
- }
CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
switch (fsp->status_code) {
@@ -1945,10 +1950,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
break;
}
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
sc_cmd->SCp.ptr = NULL;
sc_cmd->scsi_done(sc_cmd);
- spin_unlock_irqrestore(lport->host->host_lock, flags);
/* release ref from initial allocation in queue command */
fc_fcp_pkt_release(fsp);
@@ -2216,6 +2222,7 @@ int fc_fcp_init(struct fc_lport *lport)
lport->scsi_priv = si;
si->max_can_queue = lport->host->can_queue;
INIT_LIST_HEAD(&si->scsi_pkt_queue);
+ spin_lock_init(&si->scsi_queue_lock);
si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
if (!si->scsi_pkt_pool) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 74338c83ad0..0b165024a21 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -537,7 +537,9 @@ int fc_fabric_login(struct fc_lport *lport)
int rc = -1;
mutex_lock(&lport->lp_mutex);
- if (lport->state == LPORT_ST_DISABLED) {
+ if (lport->state == LPORT_ST_DISABLED ||
+ lport->state == LPORT_ST_LOGO) {
+ fc_lport_state_enter(lport, LPORT_ST_RESET);
fc_lport_enter_reset(lport);
rc = 0;
}
@@ -967,6 +969,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
fc_lport_state(lport));
+ if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
+ return;
+
if (lport->vport) {
if (lport->link_up)
fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 35ca0e72df4..02300523b23 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -310,6 +310,7 @@ static void fc_rport_work(struct work_struct *work)
restart = 1;
else
list_del(&rdata->peers);
+ rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
mutex_unlock(&lport->disc.disc_mutex);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index b7689f3d05f..c28a712fd4d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -517,7 +517,7 @@ static void iscsi_free_task(struct iscsi_task *task)
if (conn->login_task == task)
return;
- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+ kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
if (sc) {
task->sc = NULL;
@@ -737,7 +737,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
- if (!__kfifo_get(session->cmdpool.queue,
+ if (!kfifo_out(&session->cmdpool.queue,
(void*)&task, sizeof(void*)))
return NULL;
}
@@ -1567,7 +1567,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
{
struct iscsi_task *task;
- if (!__kfifo_get(conn->session->cmdpool.queue,
+ if (!kfifo_out(&conn->session->cmdpool.queue,
(void *) &task, sizeof(void *)))
return NULL;
@@ -2461,12 +2461,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
if (q->pool == NULL)
return -ENOMEM;
- q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
- GFP_KERNEL, NULL);
- if (IS_ERR(q->queue)) {
- q->queue = NULL;
- goto enomem;
- }
+ kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*));
for (i = 0; i < max; i++) {
q->pool[i] = kzalloc(item_size, GFP_KERNEL);
@@ -2474,7 +2469,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
q->max = i;
goto enomem;
}
- __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
+ kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*));
}
if (items) {
@@ -2497,7 +2492,6 @@ void iscsi_pool_free(struct iscsi_pool *q)
for (i = 0; i < q->max; i++)
kfree(q->pool[i]);
kfree(q->pool);
- kfree(q->queue);
}
EXPORT_SYMBOL_GPL(iscsi_pool_free);
@@ -2825,7 +2819,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
/* allocate login_task used for the login/text sequences */
spin_lock_bh(&session->lock);
- if (!__kfifo_get(session->cmdpool.queue,
+ if (!kfifo_out(&session->cmdpool.queue,
(void*)&conn->login_task,
sizeof(void*))) {
spin_unlock_bh(&session->lock);
@@ -2845,7 +2839,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
return cls_conn;
login_task_data_alloc_fail:
- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
+ kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
login_task_alloc_fail:
iscsi_destroy_conn(cls_conn);
@@ -2908,7 +2902,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
+ kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
if (session->leadconn == conn)
session->leadconn = NULL;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index ca25ee5190b..db6856c138f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -445,15 +445,15 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
return;
/* flush task's r2t queues */
- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
}
r2t = tcp_task->r2t;
if (r2t != NULL) {
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
tcp_task->r2t = NULL;
}
@@ -541,7 +541,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
return 0;
}
- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
if (!rc) {
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
"Target has sent more R2Ts than it "
@@ -554,7 +554,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
if (r2t->data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
}
@@ -570,7 +570,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
"invalid R2T with data len %u at offset %u "
"and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_out(task->sc)->length);
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
}
@@ -580,7 +580,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
r2t->sent = 0;
tcp_task->exp_datasn = r2tsn + 1;
- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+ kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
conn->r2t_pdus_cnt++;
iscsi_requeue_task(task);
@@ -951,7 +951,7 @@ int iscsi_tcp_task_init(struct iscsi_task *task)
return conn->session->tt->init_pdu(task, 0, task->data_count);
}
- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+ BUG_ON(kfifo_len(&tcp_task->r2tqueue));
tcp_task->exp_datasn = 0;
/* Prepare PDU, optionally w/ immediate data */
@@ -982,7 +982,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
if (r2t->data_length <= r2t->sent) {
ISCSI_DBG_TCP(task->conn,
" done with r2t %p\n", r2t);
- __kfifo_put(tcp_task->r2tpool.queue,
+ kfifo_in(&tcp_task->r2tpool.queue,
(void *)&tcp_task->r2t,
sizeof(void *));
tcp_task->r2t = r2t = NULL;
@@ -990,8 +990,13 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
}
if (r2t == NULL) {
- __kfifo_get(tcp_task->r2tqueue,
- (void *)&tcp_task->r2t, sizeof(void *));
+ if (kfifo_out(&tcp_task->r2tqueue,
+ (void *)&tcp_task->r2t, sizeof(void *)) !=
+ sizeof(void *)) {
+ WARN_ONCE(1, "unexpected fifo state");
+ r2t = NULL;
+ }
+
r2t = tcp_task->r2t;
}
spin_unlock_bh(&session->lock);
@@ -1127,9 +1132,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)
}
/* R2T xmit queue */
- tcp_task->r2tqueue = kfifo_alloc(
- session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+ if (kfifo_alloc(&tcp_task->r2tqueue,
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) {
iscsi_pool_free(&tcp_task->r2tpool);
goto r2t_alloc_fail;
}
@@ -1142,7 +1146,7 @@ r2t_alloc_fail:
struct iscsi_task *task = session->cmds[i];
struct iscsi_tcp_task *tcp_task = task->dd_data;
- kfifo_free(tcp_task->r2tqueue);
+ kfifo_free(&tcp_task->r2tqueue);
iscsi_pool_free(&tcp_task->r2tpool);
}
return -ENOMEM;
@@ -1157,7 +1161,7 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
struct iscsi_task *task = session->cmds[i];
struct iscsi_tcp_task *tcp_task = task->dd_data;
- kfifo_free(tcp_task->r2tqueue);
+ kfifo_free(&tcp_task->r2tqueue);
iscsi_pool_free(&tcp_task->r2tpool);
}
}
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 9ad38e81e34..ab19b3b4be5 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -58,19 +58,15 @@ static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
goto free_pool;
spin_lock_init(&q->lock);
- q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
- GFP_KERNEL, &q->lock);
- if (IS_ERR(q->queue))
- goto free_item;
+ kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *));
for (i = 0, iue = q->items; i < max; i++) {
- __kfifo_put(q->queue, (void *) &iue, sizeof(void *));
+ kfifo_in(&q->queue, (void *) &iue, sizeof(void *));
iue->sbuf = ring[i];
iue++;
}
return 0;
-free_item:
kfree(q->items);
free_pool:
kfree(q->pool);
@@ -167,7 +163,11 @@ struct iu_entry *srp_iu_get(struct srp_target *target)
{
struct iu_entry *iue = NULL;
- kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
+ if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue,
+ sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) {
+ WARN_ONCE(1, "unexpected fifo state");
+ return NULL;
+ }
if (!iue)
return iue;
iue->target = target;
@@ -179,7 +179,8 @@ EXPORT_SYMBOL_GPL(srp_iu_get);
void srp_iu_put(struct iu_entry *iue)
{
- kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
+ kfifo_in_locked(&iue->target->iu_queue.queue, (void *) &iue,
+ sizeof(void *), &iue->target->iu_queue.lock);
}
EXPORT_SYMBOL_GPL(srp_iu_put);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 226920d15ea..d4da6bdd0e7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4506,9 +4506,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
pdev = phba->pcidev;
/* Set the device DMA mask size */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
return error;
+ }
+ }
/* Get the bus address of Bar0 and Bar2 and the number of bytes
* required by each mapping.
@@ -6021,9 +6025,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
pdev = phba->pcidev;
/* Set the device DMA mask size */
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+ || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
return error;
+ }
+ }
/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
* number of bytes required by each mapping. They are actually
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 134c63ef6d3..99ff99e45be 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -2501,7 +2501,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
instance->base_addr = pci_resource_start(instance->pdev, 0);
}
- if (pci_request_regions(instance->pdev, "megasas: LSI")) {
+ if (pci_request_selected_regions(instance->pdev,
+ pci_select_bars(instance->pdev, IORESOURCE_MEM),
+ "megasas: LSI")) {
printk(KERN_DEBUG "megasas: IO memory region busy!\n");
return -EBUSY;
}
@@ -2642,7 +2644,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
iounmap(instance->reg_set);
fail_ioremap:
- pci_release_regions(instance->pdev);
+ pci_release_selected_regions(instance->pdev,
+ pci_select_bars(instance->pdev, IORESOURCE_MEM));
return -EINVAL;
}
@@ -2662,7 +2665,8 @@ static void megasas_release_mfi(struct megasas_instance *instance)
iounmap(instance->reg_set);
- pci_release_regions(instance->pdev);
+ pci_release_selected_regions(instance->pdev,
+ pci_select_bars(instance->pdev, IORESOURCE_MEM));
}
/**
@@ -2971,7 +2975,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/*
* PCI prepping: enable device set bus mastering and dma mask
*/
- rval = pci_enable_device(pdev);
+ rval = pci_enable_device_mem(pdev);
if (rval) {
return rval;
@@ -3276,7 +3280,7 @@ megasas_resume(struct pci_dev *pdev)
/*
* PCI prepping: enable device set bus mastering and dma mask
*/
- rval = pci_enable_device(pdev);
+ rval = pci_enable_device_mem(pdev);
if (rval) {
printk(KERN_ERR "megasas: Enable device failed\n");
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6422e258fd5..89d02401b9e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3583,6 +3583,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
mutex_init(&ioc->transport_cmds.mutex);
+ /* scsih internal command bits */
+ ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
+ mutex_init(&ioc->scsih_cmds.mutex);
+
/* task management internal command bits */
ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index c790d45876c..cae6b2cf492 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
+ { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
{ } /* terminate list */
};
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 950202a70bc..24223473f57 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -432,30 +432,23 @@ static void _osd_free_seg(struct osd_request *or __unused,
seg->alloc_size = 0;
}
-static void _put_request(struct request *rq , bool is_async)
+static void _put_request(struct request *rq)
{
- if (is_async) {
- WARN_ON(rq->bio);
- __blk_put_request(rq->q, rq);
- } else {
- /*
- * If osd_finalize_request() was called but the request was not
- * executed through the block layer, then we must release BIOs.
- * TODO: Keep error code in or->async_error. Need to audit all
- * code paths.
- */
- if (unlikely(rq->bio))
- blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
- else
- blk_put_request(rq);
- }
+ /*
+ * If osd_finalize_request() was called but the request was not
+ * executed through the block layer, then we must release BIOs.
+ * TODO: Keep error code in or->async_error. Need to audit all
+ * code paths.
+ */
+ if (unlikely(rq->bio))
+ blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
+ else
+ blk_put_request(rq);
}
void osd_end_request(struct osd_request *or)
{
struct request *rq = or->request;
- /* IMPORTANT: make sure this agrees with osd_execute_request_async */
- bool is_async = (or->request->end_io_data == or);
_osd_free_seg(or, &or->set_attr);
_osd_free_seg(or, &or->enc_get_attr);
@@ -463,20 +456,34 @@ void osd_end_request(struct osd_request *or)
if (rq) {
if (rq->next_rq) {
- _put_request(rq->next_rq, is_async);
+ _put_request(rq->next_rq);
rq->next_rq = NULL;
}
- _put_request(rq, is_async);
+ _put_request(rq);
}
_osd_request_free(or);
}
EXPORT_SYMBOL(osd_end_request);
+static void _set_error_resid(struct osd_request *or, struct request *req,
+ int error)
+{
+ or->async_error = error;
+ or->req_errors = req->errors ? : error;
+ or->sense_len = req->sense_len;
+ if (or->out.req)
+ or->out.residual = or->out.req->resid_len;
+ if (or->in.req)
+ or->in.residual = or->in.req->resid_len;
+}
+
int osd_execute_request(struct osd_request *or)
{
- return or->async_error =
- blk_execute_rq(or->request->q, NULL, or->request, 0);
+ int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
+
+ _set_error_resid(or, or->request, error);
+ return error;
}
EXPORT_SYMBOL(osd_execute_request);
@@ -484,15 +491,17 @@ static void osd_request_async_done(struct request *req, int error)
{
struct osd_request *or = req->end_io_data;
- or->async_error = error;
-
- if (unlikely(error)) {
- OSD_DEBUG("osd_request_async_done error recieved %d "
- "errors 0x%x\n", error, req->errors);
- if (!req->errors) /* don't miss out on this one */
- req->errors = error;
+ _set_error_resid(or, req, error);
+ if (req->next_rq) {
+ __blk_put_request(req->q, req->next_rq);
+ req->next_rq = NULL;
}
+ __blk_put_request(req->q, req);
+ or->request = NULL;
+ or->in.req = NULL;
+ or->out.req = NULL;
+
if (or->async_done)
or->async_done(or, or->async_private);
else
@@ -1489,21 +1498,18 @@ int osd_req_decode_sense_full(struct osd_request *or,
#endif
int ret;
- if (likely(!or->request->errors)) {
- osi->out_resid = 0;
- osi->in_resid = 0;
+ if (likely(!or->req_errors))
return 0;
- }
osi = osi ? : &local_osi;
memset(osi, 0, sizeof(*osi));
- ssdb = or->request->sense;
- sense_len = or->request->sense_len;
+ ssdb = (typeof(ssdb))or->sense;
+ sense_len = or->sense_len;
if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
OSD_ERR("Block-layer returned error(0x%x) but "
"sense_len(%u) || key(%d) is empty\n",
- or->request->errors, sense_len, ssdb->sense_key);
+ or->req_errors, sense_len, ssdb->sense_key);
goto analyze;
}
@@ -1525,7 +1531,7 @@ int osd_req_decode_sense_full(struct osd_request *or,
"additional_code=0x%x async_error=%d errors=0x%x\n",
osi->key, original_sense_len, sense_len,
osi->additional_code, or->async_error,
- or->request->errors);
+ or->req_errors);
if (original_sense_len < sense_len)
sense_len = original_sense_len;
@@ -1695,10 +1701,10 @@ analyze:
ret = -EIO;
}
- if (or->out.req)
- osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes;
- if (or->in.req)
- osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes;
+ if (!or->out.residual)
+ or->out.residual = or->out.total_bytes;
+ if (!or->in.residual)
+ or->in.residual = or->in.total_bytes;
return ret;
}
diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h
index 22644de2639..63ad4aa0c42 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.h
+++ b/drivers/scsi/pm8001/pm8001_ctl.h
@@ -45,16 +45,6 @@
#define HEADER_LEN 28
#define SIZE_OFFSET 16
-struct pm8001_ioctl_payload {
- u32 signature;
- u16 major_function;
- u16 minor_function;
- u16 length;
- u16 status;
- u16 offset;
- u16 id;
- u8 func_specific[1];
-};
#define FLASH_OK 0x000000
#define FAIL_OPEN_BIOS_FILE 0x000100
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index a3de306b904..9b44c6f1b10 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -373,10 +373,7 @@ static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
static void __devinit
mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
{
- u32 offset;
- u32 value;
- u32 i, j;
- u32 bit_cnt;
+ u32 value, offset, i;
#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -392,55 +389,35 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
*/
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
return;
- /* set SSC bit of PHY 0 - 3 */
+
for (i = 0; i < 4; i++) {
offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
- value = pm8001_cr32(pm8001_ha, 2, offset);
- if (SSCbit) {
- value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
- value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
- } else {
- value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
- value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
- }
- bit_cnt = 0;
- for (j = 0; j < 31; j++)
- if ((value >> j) & 0x00000001)
- bit_cnt++;
- if (bit_cnt % 2)
- value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
- else
- value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
-
- pm8001_cw32(pm8001_ha, 2, offset, value);
+ pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
}
-
/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
return;
-
- /* set SSC bit of PHY 4 - 7 */
for (i = 4; i < 8; i++) {
offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
- value = pm8001_cr32(pm8001_ha, 2, offset);
- if (SSCbit) {
- value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
- value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
- } else {
- value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
- value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
- }
- bit_cnt = 0;
- for (j = 0; j < 31; j++)
- if ((value >> j) & 0x00000001)
- bit_cnt++;
- if (bit_cnt % 2)
- value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
- else
- value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
-
- pm8001_cw32(pm8001_ha, 2, offset, value);
+ pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
}
+ /*************************************************************
+ Change the SSC upspreading value to 0x0 so that upspreading is disabled.
+ Device MABC SMOD0 Controls
+ Address: (via MEMBASE-III):
+ Using shifted destination address 0x0_0000: with Offset 0xD8
+
+ 31:28 R/W Reserved Do not change
+ 27:24 R/W SAS_SMOD_SPRDUP 0000
+ 23:20 R/W SAS_SMOD_SPRDDN 0000
+ 19:0 R/W Reserved Do not change
+ Upon power-up this register will read as 0x8990c016,
+ and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
+ so that the written value will be 0x8090c016.
+ This will ensure only down-spreading SSC is enabled on the SPC.
+ *************************************************************/
+ value = pm8001_cr32(pm8001_ha, 2, 0xd8);
+ pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
/*set the shifted destination address to 0x0 to avoid error operation */
bar4_shift(pm8001_ha, 0x0);
@@ -1901,7 +1878,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
struct sas_task *t;
struct pm8001_ccb_info *ccb;
- unsigned long flags;
+ unsigned long flags = 0;
u32 param;
u32 status;
u32 tag;
@@ -2040,7 +2017,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*in order to force CPU ordering*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2058,7 +2037,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2084,7 +2065,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2149,7 +2132,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2171,7 +2156,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2200,11 +2187,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
+ } else if (t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/* ditto */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ } else if (!t->uldd_task) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
}
}
@@ -2212,7 +2208,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
{
struct sas_task *t;
- unsigned long flags;
+ unsigned long flags = 0;
struct task_status_struct *ts;
struct pm8001_ccb_info *ccb;
struct pm8001_device *pm8001_dev;
@@ -2292,7 +2288,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_QUEUE_FULL;
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
return;
}
break;
@@ -2401,11 +2399,20 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat));
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
+ } else if (t->uldd_task) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
+ mb();/* ditto */
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ } else if (!t->uldd_task) {
+ spin_unlock_irqrestore(&t->task_state_lock, flags);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ mb();/*ditto*/
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ t->task_done(t);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
}
}
@@ -2876,15 +2883,20 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
u8 link_rate =
(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+ u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
u8 phy_id =
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
-
+ port->port_state = portstate;
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("HW_EVENT_SAS_PHY_UP \n"));
+ pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
+ port_id, phy_id));
switch (deviceType) {
case SAS_PHY_UNUSED:
@@ -2895,16 +2907,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
PHY_NOTIFY_ENABLE_SPINUP);
+ port->port_attached = 1;
get_lrate_mode(phy, link_rate);
break;
case SAS_EDGE_EXPANDER_DEVICE:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("expander device.\n"));
+ port->port_attached = 1;
get_lrate_mode(phy, link_rate);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("fanout expander device.\n"));
+ port->port_attached = 1;
get_lrate_mode(phy, link_rate);
break;
default:
@@ -2946,11 +2961,20 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
u8 link_rate =
(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+ u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
u8 phy_id =
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+ u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+ u8 portstate = (u8)(npip_portstate & 0x0000000F);
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
+ " phy id = %d\n", port_id, phy_id));
+ port->port_state = portstate;
+ port->port_attached = 1;
get_lrate_mode(phy, link_rate);
phy->phy_type |= PORT_TYPE_SATA;
phy->phy_attached = 1;
@@ -2984,7 +3008,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
u8 portstate = (u8)(npip_portstate & 0x0000000F);
-
+ struct pm8001_port *port = &pm8001_ha->port[port_id];
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ port->port_state = portstate;
+ phy->phy_type = 0;
+ phy->identify.device_type = 0;
+ phy->phy_attached = 0;
+ memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
switch (portstate) {
case PORT_VALID:
break;
@@ -2993,26 +3023,30 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk(" PortInvalid portID %d \n", port_id));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
+ port->port_attached = 0;
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
break;
case PORT_IN_RESET:
PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk(" PortInReset portID %d \n", port_id));
+ pm8001_printk(" Port In Reset portID %d \n", port_id));
break;
case PORT_NOT_ESTABLISHED:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+ port->port_attached = 0;
break;
case PORT_LOSTCOMM:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
+ port->port_attached = 0;
pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
break;
default:
+ port->port_attached = 0;
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" phy Down and(default) = %x\n",
portstate));
@@ -3770,7 +3804,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
- ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for
+ ssp_cmd.dir_m_tlr =
+ cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
SAS 1.1 compatible TLR*/
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
@@ -3841,7 +3876,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
- ncg_tag = cpu_to_le32(hdr_tag);
+ ncg_tag = hdr_tag;
dir = data_dir_flags[task->data_dir] << 8;
sata_cmd.tag = cpu_to_le32(tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -3986,7 +4021,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
((stp_sspsmp_sata & 0x03) * 0x10000000));
payload.firstburstsize_ITNexustimeout =
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
- memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr,
+ memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
return rc;
@@ -4027,7 +4062,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ;
int ret;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
- memset((u8 *)&payload, 0, sizeof(payload));
+ memset(&payload, 0, sizeof(payload));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = 1;
payload.phyop_phyid =
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 96e4daa68b8..833a5201eda 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -242,8 +242,7 @@ struct reg_dev_req {
__le32 phyid_portid;
__le32 dtype_dlr_retry;
__le32 firstburstsize_ITNexustimeout;
- u32 sas_addr_hi;
- u32 sas_addr_low;
+ u8 sas_addr[SAS_ADDR_SIZE];
__le32 upper_device_id;
u32 reserved[8];
} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 42ebe725d5a..c2f1032496c 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -200,8 +200,13 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
{
int i;
spin_lock_init(&pm8001_ha->lock);
- for (i = 0; i < pm8001_ha->chip->n_phy; i++)
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
pm8001_phy_init(pm8001_ha, i);
+ pm8001_ha->port[i].wide_port_phymap = 0;
+ pm8001_ha->port[i].port_attached = 0;
+ pm8001_ha->port[i].port_state = 0;
+ INIT_LIST_HEAD(&pm8001_ha->port[i].list);
+ }
pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
if (!pm8001_ha->tags)
@@ -511,19 +516,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
u8 i;
#ifdef PM8001_READ_VPD
DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_ioctl_payload payload;
pm8001_ha->nvmd_completion = &completion;
- PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0);
+ payload.minor_function = 0;
+ payload.length = 128;
+ payload.func_specific = kzalloc(128, GFP_KERNEL);
+ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
wait_for_completion(&completion);
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
SAS_ADDR_SIZE);
PM8001_INIT_DBG(pm8001_ha,
- pm8001_printk("phy %d sas_addr = %x \n", i,
- (u64)pm8001_ha->phy[i].dev_sas_addr));
+ pm8001_printk("phy %d sas_addr = %016llx \n", i,
+ pm8001_ha->phy[i].dev_sas_addr));
}
#else
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
- pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL;
+ pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
pm8001_ha->phy[i].dev_sas_addr =
cpu_to_be64((u64)
(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 1f767a0e727..7f9c83a7639 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -329,6 +329,23 @@ int pm8001_slave_configure(struct scsi_device *sdev)
}
return 0;
}
+ /* Find the local port id that's attached to this device */
+static int sas_find_local_port_id(struct domain_device *dev)
+{
+ struct domain_device *pdev = dev->parent;
+
+ /* Directly attached device */
+ if (!pdev)
+ return dev->port->id;
+ while (pdev) {
+ struct domain_device *pdev_p = pdev->parent;
+ if (!pdev_p)
+ return pdev->port->id;
+ pdev = pdev->parent;
+ }
+ return 0;
+}
+
/**
* pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
* @task: the task to be execute.
@@ -346,11 +363,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
struct domain_device *dev = task->dev;
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev;
+ struct pm8001_port *port = NULL;
struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
u32 tag = 0xdeadbeef, rc, n_elem = 0;
u32 n = num;
- unsigned long flags = 0;
+ unsigned long flags = 0, flags_libsas = 0;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
@@ -379,6 +397,35 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
rc = SAS_PHY_DOWN;
goto out_done;
}
+ port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+ if (!port->port_attached) {
+ if (sas_protocol_ata(t->task_proto)) {
+ struct task_status_struct *ts = &t->task_status;
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ spin_unlock_irqrestore(dev->sata_dev.ap->lock,
+ flags_libsas);
+ t->task_done(t);
+ spin_lock_irqsave(dev->sata_dev.ap->lock,
+ flags_libsas);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
+ if (n > 1)
+ t = list_entry(t->list.next,
+ struct sas_task, list);
+ continue;
+ } else {
+ struct task_status_struct *ts = &t->task_status;
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ t->task_done(t);
+ if (n > 1)
+ t = list_entry(t->list.next,
+ struct sas_task, list);
+ continue;
+ }
+ }
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
goto err_out;
@@ -569,11 +616,11 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_device = pm8001_alloc_dev(pm8001_ha);
- pm8001_device->sas_device = dev;
if (!pm8001_device) {
res = -1;
goto found_out;
}
+ pm8001_device->sas_device = dev;
dev->lldd_dev = pm8001_device;
pm8001_device->dev_type = dev->dev_type;
pm8001_device->dcompletion = &completion;
@@ -609,7 +656,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
wait_for_completion(&completion);
if (dev->dev_type == SAS_END_DEV)
msleep(50);
- pm8001_ha->flags = PM8001F_RUN_TIME ;
+ pm8001_ha->flags |= PM8001F_RUN_TIME ;
return 0;
found_out:
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -772,7 +819,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
task->task_done = pm8001_task_done;
task->timer.data = (unsigned long)task;
task->timer.function = pm8001_tmf_timedout;
- task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
+ task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
add_timer(&task->timer);
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
@@ -897,6 +944,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
if (dev_is_sata(dev)) {
DECLARE_COMPLETION_ONSTACK(completion_setstate);
+ if (scsi_is_sas_phy_local(phy))
+ return 0;
rc = sas_phy_reset(phy, 1);
msleep(2000);
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 30f2ede55a7..8e38ca8cd10 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -59,11 +59,11 @@
#define DRV_NAME "pm8001"
#define DRV_VERSION "0.1.36"
-#define PM8001_FAIL_LOGGING 0x01 /* libsas EH function logging */
+#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
#define PM8001_IO_LOGGING 0x08 /* I/O path logging */
-#define PM8001_EH_LOGGING 0x10 /* Error message logging */
+#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\
@@ -100,6 +100,7 @@ do { \
#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
+#define PM8001_READ_VPD
#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV))
@@ -111,7 +112,22 @@ extern const struct pm8001_dispatch pm8001_8001_dispatch;
struct pm8001_hba_info;
struct pm8001_ccb_info;
struct pm8001_device;
-struct pm8001_tmf_task;
+/* define task management IU */
+struct pm8001_tmf_task {
+ u8 tmf;
+ u32 tag_of_task_to_be_managed;
+};
+struct pm8001_ioctl_payload {
+ u32 signature;
+ u16 major_function;
+ u16 minor_function;
+ u16 length;
+ u16 status;
+ u16 offset;
+ u16 id;
+ u8 *func_specific;
+};
+
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -164,6 +180,10 @@ struct pm8001_chip_info {
struct pm8001_port {
struct asd_sas_port sas_port;
+ u8 port_attached;
+ u8 wide_port_phymap;
+ u8 port_state;
+ struct list_head list;
};
struct pm8001_phy {
@@ -386,11 +406,7 @@ struct pm8001_fw_image_header {
__be32 startup_entry;
} __attribute__((packed, aligned(4)));
-/* define task management IU */
-struct pm8001_tmf_task {
- u8 tmf;
- u32 tag_of_task_to_be_managed;
-};
+
/**
* FW Flash Update status values
*/
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 34c6b896a91..e7d2688fbeb 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1,7 +1,8 @@
/*
* pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
*
- * Written By: PMC Sierra Corporation
+ * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
+ * PMC-Sierra Inc
*
* Copyright (C) 2008, 2009 PMC Sierra Inc
*
@@ -79,7 +80,7 @@ DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
/*
* Module parameters
*/
-MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com");
+MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(PMCRAID_DRIVER_VERSION);
@@ -162,10 +163,10 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
list_for_each_entry(temp, &pinstance->used_res_q, queue) {
- /* do not expose VSETs with order-ids >= 240 */
+ /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
if (RES_IS_VSET(temp->cfg_entry)) {
target = temp->cfg_entry.unique_flags1;
- if (target >= PMCRAID_MAX_VSET_TARGETS)
+ if (target > PMCRAID_MAX_VSET_TARGETS)
continue;
bus = PMCRAID_VSET_BUS_ID;
lun = 0;
@@ -1210,7 +1211,7 @@ static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
int retval = 0;
if (cfgte->resource_type == RES_TYPE_VSET)
- retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE);
+ retval = ((cfgte->unique_flags1 & 0x80) == 0);
else if (cfgte->resource_type == RES_TYPE_GSCSI)
retval = (RES_BUS(cfgte->resource_address) !=
PMCRAID_VIRTUAL_ENCL_BUS_ID);
@@ -1361,6 +1362,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
* Return value:
* none
*/
+
static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
{
struct pmcraid_config_table_entry *cfg_entry;
@@ -1368,9 +1370,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
struct pmcraid_cmd *cmd;
struct pmcraid_cmd *cfgcmd;
struct pmcraid_resource_entry *res = NULL;
- u32 new_entry = 1;
unsigned long lock_flags;
unsigned long host_lock_flags;
+ u32 new_entry = 1;
+ u32 hidden_entry = 0;
int rc;
ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
@@ -1406,9 +1409,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
}
/* If this resource is not going to be added to mid-layer, just notify
- * applications and return
+ * applications and return. If this notification is about hiding a VSET
+ * resource, check if it was exposed already.
*/
- if (!pmcraid_expose_resource(cfg_entry))
+ if (pinstance->ccn.hcam->notification_type ==
+ NOTIFICATION_TYPE_ENTRY_CHANGED &&
+ cfg_entry->resource_type == RES_TYPE_VSET &&
+ cfg_entry->unique_flags1 & 0x80) {
+ hidden_entry = 1;
+ } else if (!pmcraid_expose_resource(cfg_entry))
goto out_notify_apps;
spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
@@ -1424,6 +1433,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
if (new_entry) {
+ if (hidden_entry) {
+ spin_unlock_irqrestore(&pinstance->resource_lock,
+ lock_flags);
+ goto out_notify_apps;
+ }
+
/* If there are more number of resources than what driver can
* manage, do not notify the applications about the CCN. Just
* ignore this notifications and re-register the same HCAM
@@ -1454,8 +1469,9 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
sizeof(struct pmcraid_config_table_entry));
if (pinstance->ccn.hcam->notification_type ==
- NOTIFICATION_TYPE_ENTRY_DELETED) {
+ NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
if (res->scsi_dev) {
+ res->cfg_entry.unique_flags1 &= 0x7F;
res->change_detected = RES_CHANGE_DEL;
res->cfg_entry.resource_handle =
PMCRAID_INVALID_RES_HANDLE;
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 2752b56cad5..92f89d50850 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1,6 +1,9 @@
/*
* pmcraid.h -- PMC Sierra MaxRAID controller driver header file
*
+ * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
+ * PMC-Sierra Inc
+ *
* Copyright (C) 2008, 2009 PMC Sierra Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -106,7 +109,7 @@
#define PMCRAID_VSET_LUN_ID 0x0
#define PMCRAID_PHYS_BUS_ID 0x0
#define PMCRAID_VIRTUAL_ENCL_BUS_ID 0x8
-#define PMCRAID_MAX_VSET_TARGETS 240
+#define PMCRAID_MAX_VSET_TARGETS 0x7F
#define PMCRAID_MAX_VSET_LUNS_PER_TARGET 8
#define PMCRAID_IOA_MAX_SECTORS 32767
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6b9bf23c773..384afda7dbe 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1570,9 +1570,6 @@ typedef struct fc_port {
struct fc_rport *rport, *drport;
u32 supported_classes;
- unsigned long last_queue_full;
- unsigned long last_ramp_up;
-
uint16_t vp_idx;
} fc_port_t;
@@ -2265,6 +2262,7 @@ struct qla_hw_data {
uint32_t port0 :1;
uint32_t running_gold_fw :1;
uint32_t cpu_affinity_enabled :1;
+ uint32_t disable_msix_handshake :1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2387,6 +2385,7 @@ struct qla_hw_data {
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
(ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e2185135850..0b6801fc638 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,8 +72,6 @@ extern int ql2xloginretrycount;
extern int ql2xfdmienable;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
-extern int ql2xqfullrampup;
-extern int ql2xqfulltracking;
extern int ql2xiidmaenable;
extern int ql2xmaxqueues;
extern int ql2xmultique_tag;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b74924b279e..73a793539d4 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1442,7 +1442,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->firmware_options_2 |=
__constant_cpu_to_le32(BIT_18);
- icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22);
+ /* Use Disable MSIX Handshake mode for capable adapters */
+ if (IS_MSIX_NACK_CAPABLE(ha)) {
+ icb->firmware_options_2 &=
+ __constant_cpu_to_le32(~BIT_22);
+ ha->flags.disable_msix_handshake = 1;
+ qla_printk(KERN_INFO, ha,
+ "MSIX Handshake Disable Mode turned on\n");
+ } else {
+ icb->firmware_options_2 |=
+ __constant_cpu_to_le32(BIT_22);
+ }
icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 804987397b7..1692a883f4d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -811,78 +811,6 @@ skip_rio:
qla2x00_alert_all_vps(rsp, mb);
}
-static void
-qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
-{
- fc_port_t *fcport = data;
- struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = NULL;
-
- if (!ql2xqfulltracking)
- return;
-
- req = vha->req;
- if (!req)
- return;
- if (req->max_q_depth <= sdev->queue_depth)
- return;
-
- if (sdev->ordered_tags)
- scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
- sdev->queue_depth + 1);
- else
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
- sdev->queue_depth + 1);
-
- fcport->last_ramp_up = jiffies;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
- fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
- sdev->queue_depth));
-}
-
-static void
-qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
-{
- fc_port_t *fcport = data;
-
- if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
- return;
-
- DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
- "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
- fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
- sdev->queue_depth));
-}
-
-static inline void
-qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
- srb_t *sp)
-{
- fc_port_t *fcport;
- struct scsi_device *sdev;
-
- if (!ql2xqfulltracking)
- return;
-
- sdev = sp->cmd->device;
- if (sdev->queue_depth >= req->max_q_depth)
- return;
-
- fcport = sp->fcport;
- if (time_before(jiffies,
- fcport->last_ramp_up + ql2xqfullrampup * HZ))
- return;
- if (time_before(jiffies,
- fcport->last_queue_full + ql2xqfullrampup * HZ))
- return;
-
- starget_for_each_device(sdev->sdev_target, fcport,
- qla2x00_adjust_sdev_qdepth_up);
-}
-
/**
* qla2x00_process_completed_request() - Process a Fast Post response.
* @ha: SCSI driver HA context
@@ -913,8 +841,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
/* Save ISP completion status */
sp->cmd->result = DID_OK << 16;
-
- qla2x00_ramp_up_queue_depth(vha, req, sp);
qla2x00_sp_compl(ha, sp);
} else {
DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
@@ -1435,13 +1361,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"scsi(%ld): QUEUE FULL status detected "
"0x%x-0x%x.\n", vha->host_no, comp_status,
scsi_status));
-
- /* Adjust queue depth for all luns on the port. */
- if (!ql2xqfulltracking)
- break;
- fcport->last_queue_full = jiffies;
- starget_for_each_device(cp->device->sdev_target,
- fcport, qla2x00_adjust_sdev_qdepth_down);
break;
}
if (lscsi_status != SS_CHECK_CONDITION)
@@ -1516,17 +1435,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"scsi(%ld): QUEUE FULL status detected "
"0x%x-0x%x.\n", vha->host_no, comp_status,
scsi_status));
-
- /*
- * Adjust queue depth for all luns on the
- * port.
- */
- if (!ql2xqfulltracking)
- break;
- fcport->last_queue_full = jiffies;
- starget_for_each_device(
- cp->device->sdev_target, fcport,
- qla2x00_adjust_sdev_qdepth_down);
break;
}
if (lscsi_status != SS_CHECK_CONDITION)
@@ -2020,7 +1928,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
vha = qla25xx_get_host(rsp);
qla24xx_process_response_queue(vha, rsp);
- if (!ha->mqenable) {
+ if (!ha->flags.disable_msix_handshake) {
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
}
@@ -2034,6 +1942,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
{
struct qla_hw_data *ha;
struct rsp_que *rsp;
+ struct device_reg_24xx __iomem *reg;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -2043,6 +1952,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
}
ha = rsp->hw;
+ /* Clear the interrupt, if enabled, for this response queue */
+ if (rsp->options & ~BIT_6) {
+ reg = &ha->iobase->isp24;
+ spin_lock_irq(&ha->hardware_lock);
+ WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ RD_REG_DWORD_RELAXED(&reg->hccr);
+ spin_unlock_irq(&ha->hardware_lock);
+ }
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
return IRQ_HANDLED;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a47d34308a3..2a4c7f4e7b6 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -696,6 +696,10 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
/* Use alternate PCI devfn */
if (LSB(rsp->rid))
options |= BIT_5;
+ /* Enable MSIX handshake mode on for uncapable adapters */
+ if (!IS_MSIX_NACK_CAPABLE(ha))
+ options |= BIT_6;
+
rsp->options = options;
rsp->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 41669357b18..2f873d23732 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -78,21 +78,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to report for target devices.");
-int ql2xqfulltracking = 1;
-module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql2xqfulltracking,
- "Controls whether the driver tracks queue full status "
- "returns and dynamically adjusts a scsi device's queue "
- "depth. Default is 1, perform tracking. Set to 0 to "
- "disable dynamic tracking and adjustment of queue depth.");
-
-int ql2xqfullrampup = 120;
-module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql2xqfullrampup,
- "Number of seconds to wait to begin to ramp-up the queue "
- "depth for a device after a queue-full condition has been "
- "detected. Default is 120 seconds.");
-
int ql2xiidmaenable=1;
module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xiidmaenable,
@@ -1217,13 +1202,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
sdev->hostdata = NULL;
}
+static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
+{
+ fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
+
+ if (!scsi_track_queue_full(sdev, qdepth))
+ return;
+
+ DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
+ "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
+ fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
+ sdev->queue_depth));
+}
+
+static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
+{
+ fc_port_t *fcport = sdev->hostdata;
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+
+ req = vha->req;
+ if (!req)
+ return;
+
+ if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
+ return;
+
+ if (sdev->ordered_tags)
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
+ else
+ scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
+ fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
+ sdev->queue_depth));
+}
+
static int
qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
{
- if (reason != SCSI_QDEPTH_DEFAULT)
- return -EOPNOTSUPP;
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ qla2x00_handle_queue_full(sdev, qdepth);
+ break;
+ case SCSI_QDEPTH_RAMP_UP:
+ qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
+ break;
+ default:
+ return EOPNOTSUPP;
+ }
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
return sdev->queue_depth;
}
@@ -2003,13 +2036,13 @@ skip_dpc:
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
base_vha->host_no, ha));
- base_vha->flags.init_done = 1;
- base_vha->flags.online = 1;
-
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
+ base_vha->flags.init_done = 1;
+ base_vha->flags.online = 1;
+
ha->isp_ops->enable_intrs(ha);
scsi_scan_host(host);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 807e0dbc67f..c482220f7ee 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k7"
+#define QLA2XXX_VERSION "8.03.01-k8"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e495d381394..d8927681ec8 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -859,6 +859,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
case 0x07: /* operation in progress */
case 0x08: /* Long write in progress */
case 0x09: /* self test in progress */
+ case 0x14: /* space allocation in progress */
action = ACTION_DELAYED_RETRY;
break;
default:
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 6531c91501b..ddfcecd5099 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -649,11 +649,22 @@ static __init int fc_transport_init(void)
return error;
error = transport_class_register(&fc_vport_class);
if (error)
- return error;
+ goto unreg_host_class;
error = transport_class_register(&fc_rport_class);
if (error)
- return error;
- return transport_class_register(&fc_transport_class);
+ goto unreg_vport_class;
+ error = transport_class_register(&fc_transport_class);
+ if (error)
+ goto unreg_rport_class;
+ return 0;
+
+unreg_rport_class:
+ transport_class_unregister(&fc_rport_class);
+unreg_vport_class:
+ transport_class_unregister(&fc_vport_class);
+unreg_host_class:
+ transport_class_unregister(&fc_host_class);
+ return error;
}
static void __exit fc_transport_exit(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9093c7261f3..255da53e5a0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%u\n", sdkp->ATO);
}
+static ssize_t
+sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
+}
+
static struct device_attribute sd_disk_attrs[] = {
__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
sd_store_cache_type),
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
sd_store_manage_start_stop),
__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
+ __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
__ATTR_NULL,
};
@@ -399,6 +409,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
}
/**
+ * sd_prepare_discard - unmap blocks on thinly provisioned device
+ * @rq: Request to prepare
+ *
+ * Will issue either UNMAP or WRITE SAME(16) depending on preference
+ * indicated by target device.
+ **/
+static int sd_prepare_discard(struct request *rq)
+{
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct bio *bio = rq->bio;
+ sector_t sector = bio->bi_sector;
+ unsigned int num = bio_sectors(bio);
+
+ if (sdkp->device->sector_size == 4096) {
+ sector >>= 3;
+ num >>= 3;
+ }
+
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->timeout = SD_TIMEOUT;
+
+ memset(rq->cmd, 0, rq->cmd_len);
+
+ if (sdkp->unmap) {
+ char *buf = kmap_atomic(bio_page(bio), KM_USER0);
+
+ rq->cmd[0] = UNMAP;
+ rq->cmd[8] = 24;
+ rq->cmd_len = 10;
+
+ /* Ensure that data length matches payload */
+ rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
+
+ put_unaligned_be16(6 + 16, &buf[0]);
+ put_unaligned_be16(16, &buf[2]);
+ put_unaligned_be64(sector, &buf[8]);
+ put_unaligned_be32(num, &buf[16]);
+
+ kunmap_atomic(buf, KM_USER0);
+ } else {
+ rq->cmd[0] = WRITE_SAME_16;
+ rq->cmd[1] = 0x8; /* UNMAP */
+ put_unaligned_be64(sector, &rq->cmd[2]);
+ put_unaligned_be32(num, &rq->cmd[10]);
+ rq->cmd_len = 16;
+ }
+
+ return BLKPREP_OK;
+}
+
+/**
* sd_init_command - build a scsi (read or write) command from
* information in the request structure.
* @SCpnt: pointer to mid-level's per scsi command structure that
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
int ret, host_dif;
unsigned char protect;
+ /*
+ * Discard request come in as REQ_TYPE_FS but we turn them into
+ * block PC requests to make life easier.
+ */
+ if (blk_discard_rq(rq))
+ ret = sd_prepare_discard(rq);
+
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
goto out;
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
sd_printk(KERN_NOTICE, sdkp,
"physical block alignment offset: %u\n", alignment);
+ if (buffer[14] & 0x80) { /* TPE */
+ struct request_queue *q = sdp->request_queue;
+
+ sdkp->thin_provisioning = 1;
+ q->limits.discard_granularity = sdkp->hw_sector_size;
+ q->limits.max_discard_sectors = 0xffffffff;
+
+ if (buffer[14] & 0x40) /* TPRZ */
+ q->limits.discard_zeroes_data = 1;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ }
+
sdkp->capacity = lba + 1;
return sector_size;
}
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
+ struct request_queue *q = sdkp->disk->queue;
unsigned int sector_sz = sdkp->device->sector_size;
char *buffer;
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
blk_queue_io_opt(sdkp->disk->queue,
get_unaligned_be32(&buffer[12]) * sector_sz);
+ /* Thin provisioning enabled and page length indicates TP support */
+ if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
+ unsigned int lba_count, desc_count, granularity;
+
+ lba_count = get_unaligned_be32(&buffer[20]);
+ desc_count = get_unaligned_be32(&buffer[24]);
+
+ if (lba_count) {
+ q->limits.max_discard_sectors =
+ lba_count * sector_sz >> 9;
+
+ if (desc_count)
+ sdkp->unmap = 1;
+ }
+
+ granularity = get_unaligned_be32(&buffer[28]);
+
+ if (granularity)
+ q->limits.discard_granularity = granularity * sector_sz;
+
+ if (buffer[32] & 0x80)
+ q->limits.discard_alignment =
+ get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+ }
+
kfree(buffer);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index e374804d26f..43d3caf268e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -60,6 +60,8 @@ struct scsi_disk {
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
unsigned first_scan : 1;
+ unsigned thin_provisioning : 1;
+ unsigned unmap : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ad59abb4772..d04ea9a6f67 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
SRpnt->waiting = waiting;
if (STp->buffer->do_dio) {
+ mdata->page_order = 0;
mdata->nr_entries = STp->buffer->sg_segs;
mdata->pages = STp->buffer->mapped_pages;
} else {
+ mdata->page_order = STp->buffer->reserved_page_order;
mdata->nr_entries =
DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
- STp->buffer->map_data.pages = STp->buffer->reserved_pages;
- STp->buffer->map_data.offset = 0;
+ mdata->pages = STp->buffer->reserved_pages;
+ mdata->offset = 0;
}
memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
@@ -3719,7 +3721,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
priority |= __GFP_ZERO;
if (STbuffer->frp_segs) {
- order = STbuffer->map_data.page_order;
+ order = STbuffer->reserved_page_order;
b_size = PAGE_SIZE << order;
} else {
for (b_size = PAGE_SIZE, order = 0;
@@ -3752,7 +3754,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
segs++;
}
STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
- STbuffer->map_data.page_order = order;
+ STbuffer->reserved_page_order = order;
return 1;
}
@@ -3765,7 +3767,7 @@ static void clear_buffer(struct st_buffer * st_bp)
for (i=0; i < st_bp->frp_segs; i++)
memset(page_address(st_bp->reserved_pages[i]), 0,
- PAGE_SIZE << st_bp->map_data.page_order);
+ PAGE_SIZE << st_bp->reserved_page_order);
st_bp->cleared = 1;
}
@@ -3773,7 +3775,7 @@ static void clear_buffer(struct st_buffer * st_bp)
/* Release the extra buffer */
static void normalize_buffer(struct st_buffer * STbuffer)
{
- int i, order = STbuffer->map_data.page_order;
+ int i, order = STbuffer->reserved_page_order;
for (i = 0; i < STbuffer->frp_segs; i++) {
__free_pages(STbuffer->reserved_pages[i], order);
@@ -3781,7 +3783,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
}
STbuffer->frp_segs = 0;
STbuffer->sg_segs = 0;
- STbuffer->map_data.page_order = 0;
+ STbuffer->reserved_page_order = 0;
STbuffer->map_data.offset = 0;
}
@@ -3791,7 +3793,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
{
int i, cnt, res, offset;
- int length = PAGE_SIZE << st_bp->map_data.page_order;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
for (i = 0, offset = st_bp->buffer_bytes;
i < st_bp->frp_segs && offset >= length; i++)
@@ -3823,7 +3825,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
{
int i, cnt, res, offset;
- int length = PAGE_SIZE << st_bp->map_data.page_order;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
for (i = 0, offset = st_bp->read_pointer;
i < st_bp->frp_segs && offset >= length; i++)
@@ -3856,7 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
{
int src_seg, dst_seg, src_offset = 0, dst_offset;
int count, total;
- int length = PAGE_SIZE << st_bp->map_data.page_order;
+ int length = PAGE_SIZE << st_bp->reserved_page_order;
if (offset == 0)
return;
@@ -4578,7 +4580,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
}
mdata->offset = uaddr & ~PAGE_MASK;
- mdata->page_order = 0;
STbp->mapped_pages = pages;
return nr_pages;
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 544dc6b1f54..f91a67c6d96 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -46,6 +46,7 @@ struct st_buffer {
struct st_request *last_SRpnt;
struct st_cmdstatus cmdstat;
struct page **reserved_pages;
+ int reserved_page_order;
struct page **mapped_pages;
struct rq_map_data map_data;
unsigned char *b_data;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 2b38f6ad6e1..8b955b534a3 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -984,7 +984,7 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
}
}
-static int skip_spaces(char *ptr, int len)
+static int sym_skip_spaces(char *ptr, int len)
{
int cnt, c;
@@ -1012,7 +1012,7 @@ static int is_keyword(char *ptr, int len, char *verb)
}
#define SKIP_SPACES(ptr, len) \
- if ((arg_len = skip_spaces(ptr, len)) < 1) \
+ if ((arg_len = sym_skip_spaces(ptr, len)) < 1) \
return -EINVAL; \
ptr += arg_len; len -= arg_len;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 737b4c96097..c3e37c8e7e2 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1339,14 +1339,12 @@ static void serial8250_start_tx(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
if (up->bugs & UART_BUG_TXEN) {
- unsigned char lsr, iir;
+ unsigned char lsr;
lsr = serial_in(up, UART_LSR);
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
- iir = serial_in(up, UART_IIR) & 0x0f;
if ((up->port.type == PORT_RM9000) ?
- (lsr & UART_LSR_THRE &&
- (iir == UART_IIR_NO_INT || iir == UART_IIR_THRI)) :
- (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT))
+ (lsr & UART_LSR_THRE) :
+ (lsr & UART_LSR_TEMT))
transmit_chars(up);
}
}
@@ -2646,7 +2644,7 @@ static void __init serial8250_isa_init_ports(void)
{
struct uart_8250_port *up;
static int first = 1;
- int i;
+ int i, irqflag = 0;
if (!first)
return;
@@ -2670,6 +2668,9 @@ static void __init serial8250_isa_init_ports(void)
up->port.ops = &serial8250_pops;
}
+ if (share_irqs)
+ irqflag = IRQF_SHARED;
+
for (i = 0, up = serial8250_ports;
i < ARRAY_SIZE(old_serial_port) && i < nr_uarts;
i++, up++) {
@@ -2683,8 +2684,7 @@ static void __init serial8250_isa_init_ports(void)
up->port.iotype = old_serial_port[i].io_type;
up->port.regshift = old_serial_port[i].iomem_reg_shift;
set_io_from_upio(&up->port);
- if (share_irqs)
- up->port.irqflags |= IRQF_SHARED;
+ up->port.irqflags |= irqflag;
}
}
@@ -2940,10 +2940,13 @@ static int __devinit serial8250_probe(struct platform_device *dev)
{
struct plat_serial8250_port *p = dev->dev.platform_data;
struct uart_port port;
- int ret, i;
+ int ret, i, irqflag = 0;
memset(&port, 0, sizeof(struct uart_port));
+ if (share_irqs)
+ irqflag = IRQF_SHARED;
+
for (i = 0; p && p->flags != 0; p++, i++) {
port.iobase = p->iobase;
port.membase = p->membase;
@@ -2960,8 +2963,7 @@ static int __devinit serial8250_probe(struct platform_device *dev)
port.serial_in = p->serial_in;
port.serial_out = p->serial_out;
port.dev = &dev->dev;
- if (share_irqs)
- port.irqflags |= IRQF_SHARED;
+ port.irqflags |= irqflag;
ret = serial8250_register_port(&port);
if (ret < 0) {
dev_err(&dev->dev, "unable to register port at index %d "
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index d8983dd5c4b..85dc0410ac1 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2162,7 +2162,7 @@ static struct ioc3_submodule ioc3uart_ops = {
/**
* ioc3_detect - module init called,
*/
-static int __devinit ioc3uart_init(void)
+static int __init ioc3uart_init(void)
{
int ret;
@@ -2179,7 +2179,7 @@ static int __devinit ioc3uart_init(void)
return ret;
}
-static void __devexit ioc3uart_exit(void)
+static void __exit ioc3uart_exit(void)
{
ioc3_unregister_submodule(&ioc3uart_ops);
uart_unregister_driver(&ioc3_uart);
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 2e02c3026d2..836d9ab4f72 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -2904,7 +2904,7 @@ static struct ioc4_submodule ioc4_serial_submodule = {
/**
* ioc4_serial_init - module init
*/
-int ioc4_serial_init(void)
+static int __init ioc4_serial_init(void)
{
int ret;
@@ -2913,20 +2913,30 @@ int ioc4_serial_init(void)
printk(KERN_WARNING
"%s: Couldn't register rs232 IOC4 serial driver\n",
__func__);
- return ret;
+ goto out;
}
if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) {
printk(KERN_WARNING
"%s: Couldn't register rs422 IOC4 serial driver\n",
__func__);
- return ret;
+ goto out_uart_rs232;
}
/* register with IOC4 main module */
- return ioc4_register_submodule(&ioc4_serial_submodule);
+ ret = ioc4_register_submodule(&ioc4_serial_submodule);
+ if (ret)
+ goto out_uart_rs422;
+ return 0;
+
+out_uart_rs422:
+ uart_unregister_driver(&ioc4_uart_rs422);
+out_uart_rs232:
+ uart_unregister_driver(&ioc4_uart_rs232);
+out:
+ return ret;
}
-static void __devexit ioc4_serial_exit(void)
+static void __exit ioc4_serial_exit(void)
{
ioc4_unregister_submodule(&ioc4_serial_submodule);
uart_unregister_driver(&ioc4_uart_rs232);
diff --git a/drivers/serial/jsm/jsm.h b/drivers/serial/jsm/jsm.h
index 4e5f3bde046..38a509c684c 100644
--- a/drivers/serial/jsm/jsm.h
+++ b/drivers/serial/jsm/jsm.h
@@ -138,7 +138,6 @@ struct jsm_board
u32 nasync; /* Number of ports on card */
u32 irq; /* Interrupt request number */
- u64 intr_count; /* Count of interrupts */
u64 membase; /* Start of base memory of the card */
u64 membase_end; /* End of base memory of the card */
@@ -206,8 +205,6 @@ struct jsm_channel {
u64 ch_close_delay; /* How long we should drop RTS/DTR for */
- u64 ch_cpstime; /* Time for CPS calculations */
-
tcflag_t ch_c_iflag; /* channel iflags */
tcflag_t ch_c_cflag; /* channel cflags */
tcflag_t ch_c_oflag; /* channel oflags */
@@ -215,11 +212,6 @@ struct jsm_channel {
u8 ch_stopc; /* Stop character */
u8 ch_startc; /* Start character */
- u32 ch_old_baud; /* Cache of the current baud */
- u32 ch_custom_speed;/* Custom baud, if set */
-
- u32 ch_wopen; /* Waiting for open process cnt */
-
u8 ch_mostat; /* FEP output modem status */
u8 ch_mistat; /* FEP input modem status */
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index b3604aa322a..108c3e0471f 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -48,6 +48,17 @@ struct uart_driver jsm_uart_driver = {
.nr = NR_PORTS,
};
+static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev);
+static void jsm_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers jsm_err_handler = {
+ .error_detected = jsm_io_error_detected,
+ .slot_reset = jsm_io_slot_reset,
+ .resume = jsm_io_resume,
+};
+
int jsm_debug;
module_param(jsm_debug, int, 0);
MODULE_PARM_DESC(jsm_debug, "Driver debugging level");
@@ -123,7 +134,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
}
rc = request_irq(brd->irq, brd->bd_ops->intr,
- IRQF_DISABLED|IRQF_SHARED, "JSM", brd);
+ IRQF_SHARED, "JSM", brd);
if (rc) {
printk(KERN_WARNING "Failed to hook IRQ %d\n",brd->irq);
goto out_iounmap;
@@ -164,6 +175,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
}
pci_set_drvdata(pdev, brd);
+ pci_save_state(pdev);
return 0;
out_free_irq:
@@ -222,8 +234,42 @@ static struct pci_driver jsm_driver = {
.id_table = jsm_pci_tbl,
.probe = jsm_probe_one,
.remove = __devexit_p(jsm_remove_one),
+ .err_handler = &jsm_err_handler,
};
+static pci_ers_result_t jsm_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct jsm_board *brd = pci_get_drvdata(pdev);
+
+ jsm_remove_uart_port(brd);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t jsm_io_slot_reset(struct pci_dev *pdev)
+{
+ int rc;
+
+ rc = pci_enable_device(pdev);
+
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void jsm_io_resume(struct pci_dev *pdev)
+{
+ struct jsm_board *brd = pci_get_drvdata(pdev);
+
+ pci_restore_state(pdev);
+
+ jsm_uart_port_init(brd);
+}
+
static int __init jsm_init_module(void)
{
int rc;
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/serial/jsm/jsm_neo.c
index b4b124e4828..7960d9633c1 100644
--- a/drivers/serial/jsm/jsm_neo.c
+++ b/drivers/serial/jsm/jsm_neo.c
@@ -954,13 +954,8 @@ static void neo_param(struct jsm_channel *ch)
ch->ch_flags |= (CH_BAUD0);
ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
neo_assert_modem_signals(ch);
- ch->ch_old_baud = 0;
return;
- } else if (ch->ch_custom_speed) {
- baud = ch->ch_custom_speed;
- if (ch->ch_flags & CH_BAUD0)
- ch->ch_flags &= ~(CH_BAUD0);
} else {
int i;
unsigned int cflag;
@@ -1045,7 +1040,6 @@ static void neo_param(struct jsm_channel *ch)
quot = ch->ch_bd->bd_dividend / baud;
if (quot != 0) {
- ch->ch_old_baud = baud;
writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr);
writeb((quot & 0xff), &ch->ch_neo_uart->txrx);
writeb((quot >> 8), &ch->ch_neo_uart->ier);
@@ -1123,8 +1117,6 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
unsigned long lock_flags2;
int outofloop_count = 0;
- brd->intr_count++;
-
/* Lock out the slow poller from running on this board. */
spin_lock_irqsave(&brd->bd_intr_lock, lock_flags);
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 7439c037362..cd95e215550 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -296,8 +296,6 @@ static void jsm_tty_close(struct uart_port *port)
bd->bd_ops->assert_modem_signals(channel);
}
- channel->ch_old_baud = 0;
-
/* Turn off UART interrupts for this port */
channel->ch_bd->bd_ops->uart_off(channel);
@@ -432,7 +430,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd)
return 0;
}
-int __devinit jsm_uart_port_init(struct jsm_board *brd)
+int jsm_uart_port_init(struct jsm_board *brd)
{
int i;
unsigned int line;
@@ -472,7 +470,7 @@ int __devinit jsm_uart_port_init(struct jsm_board *brd)
if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
printk(KERN_INFO "jsm: add device failed\n");
else
- printk(KERN_INFO "Added device \n");
+ printk(KERN_INFO "jsm: Port %d added\n", i);
}
jsm_printk(INIT, INFO, &brd->pci_dev, "finish\n");
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index b8629d74f6a..56ee082157a 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -438,6 +438,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned char cval, fcr = 0;
unsigned long flags;
unsigned int baud, quot;
+ unsigned int dll;
switch (termios->c_cflag & CSIZE) {
case CS5:
@@ -534,10 +535,18 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
else
up->mcr &= ~UART_MCR_AFE;
- serial_out(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+
+ /*
+ * work around Errata #75 according to Intel(R) PXA27x Processor Family
+ * Specification Update (Nov 2005)
+ */
+ dll = serial_in(up, UART_DLL);
+ WARN_ON(dll != (quot & 0xff));
+
serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
- serial_out(up, UART_LCR, cval); /* reset DLAB */
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval; /* Save LCR */
serial_pxa_set_mctrl(&up->port, up->port.mctrl);
serial_out(up, UART_FCR, fcr);
@@ -747,7 +756,7 @@ static int serial_pxa_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops serial_pxa_pm_ops = {
+static const struct dev_pm_ops serial_pxa_pm_ops = {
.suspend = serial_pxa_suspend,
.resume = serial_pxa_resume,
};
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index dcc72444e8e..047530b285b 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -342,11 +342,11 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
if (flags == UPF_SPD_HI)
altbaud = 57600;
- if (flags == UPF_SPD_VHI)
+ else if (flags == UPF_SPD_VHI)
altbaud = 115200;
- if (flags == UPF_SPD_SHI)
+ else if (flags == UPF_SPD_SHI)
altbaud = 230400;
- if (flags == UPF_SPD_WARP)
+ else if (flags == UPF_SPD_WARP)
altbaud = 460800;
for (try = 0; try < 2; try++) {
@@ -1217,9 +1217,8 @@ static void uart_set_termios(struct tty_struct *tty,
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
uart_clear_mctrl(state->uart_port, TIOCM_RTS | TIOCM_DTR);
-
/* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
+ else if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
unsigned int mask = TIOCM_DTR;
if (!(cflag & CRTSCTS) ||
!test_bit(TTY_THROTTLED, &tty->flags))
@@ -1234,9 +1233,8 @@ static void uart_set_termios(struct tty_struct *tty,
__uart_start(tty);
spin_unlock_irqrestore(&state->uart_port->lock, flags);
}
-
/* Handle turning on CRTSCTS */
- if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
+ else if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
spin_lock_irqsave(&state->uart_port->lock, flags);
if (!(state->uart_port->ops->get_mctrl(state->uart_port) & TIOCM_CTS)) {
tty->hw_stopped = 1;
@@ -2344,7 +2342,7 @@ static const struct tty_operations uart_ops = {
*/
int uart_register_driver(struct uart_driver *drv)
{
- struct tty_driver *normal = NULL;
+ struct tty_driver *normal;
int i, retval;
BUG_ON(drv->state);
@@ -2354,13 +2352,12 @@ int uart_register_driver(struct uart_driver *drv)
* we have a large number of ports to handle.
*/
drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL);
- retval = -ENOMEM;
if (!drv->state)
goto out;
- normal = alloc_tty_driver(drv->nr);
+ normal = alloc_tty_driver(drv->nr);
if (!normal)
- goto out;
+ goto out_kfree;
drv->tty_driver = normal;
@@ -2393,12 +2390,14 @@ int uart_register_driver(struct uart_driver *drv)
}
retval = tty_register_driver(normal);
- out:
- if (retval < 0) {
- put_tty_driver(normal);
- kfree(drv->state);
- }
- return retval;
+ if (retval >= 0)
+ return retval;
+
+ put_tty_driver(normal);
+out_kfree:
+ kfree(drv->state);
+out:
+ return -ENOMEM;
}
/**
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index ff38dbdb5c6..68c7f6cfd72 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1043,10 +1043,14 @@ static void __devinit sci_init_single(struct platform_device *dev,
sci_port->port.iotype = UPIO_MEM;
sci_port->port.line = index;
sci_port->port.fifosize = 1;
- sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
- sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
- sci_port->enable = sci_clk_enable;
- sci_port->disable = sci_clk_disable;
+
+ if (dev) {
+ sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL;
+ sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
+ sci_port->enable = sci_clk_enable;
+ sci_port->disable = sci_clk_disable;
+ sci_port->port.dev = &dev->dev;
+ }
sci_port->break_timer.data = (unsigned long)sci_port;
sci_port->break_timer.function = sci_break_timer;
@@ -1057,7 +1061,6 @@ static void __devinit sci_init_single(struct platform_device *dev,
sci_port->port.irq = p->irqs[SCIx_TXI_IRQ];
sci_port->port.flags = p->flags;
- sci_port->port.dev = &dev->dev;
sci_port->type = sci_port->port.type = p->type;
memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
@@ -1101,7 +1104,7 @@ static void serial_console_write(struct console *co, const char *s,
sci_port->disable(port);
}
-static int __init serial_console_setup(struct console *co, char *options)
+static int __devinit serial_console_setup(struct console *co, char *options)
{
struct sci_port *sci_port;
struct uart_port *port;
@@ -1119,9 +1122,14 @@ static int __init serial_console_setup(struct console *co, char *options)
if (co->index >= SCI_NPORTS)
co->index = 0;
- sci_port = &sci_ports[co->index];
- port = &sci_port->port;
- co->data = port;
+ if (co->data) {
+ port = co->data;
+ sci_port = to_sci_port(port);
+ } else {
+ sci_port = &sci_ports[co->index];
+ port = &sci_port->port;
+ co->data = port;
+ }
/*
* Also need to check port->type, we don't actually have any
@@ -1165,6 +1173,15 @@ static int __init sci_console_init(void)
return 0;
}
console_initcall(sci_console_init);
+
+static struct sci_port early_serial_port;
+static struct console early_serial_console = {
+ .name = "early_ttySC",
+ .write = serial_console_write,
+ .flags = CON_PRINTBUFFER,
+};
+static char early_serial_buf[32];
+
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
@@ -1250,6 +1267,21 @@ static int __devinit sci_probe(struct platform_device *dev)
struct sh_sci_priv *priv;
int i, ret = -EINVAL;
+#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+ if (is_early_platform_device(dev)) {
+ if (dev->id == -1)
+ return -ENOTSUPP;
+ early_serial_console.index = dev->id;
+ early_serial_console.data = &early_serial_port.port;
+ sci_init_single(NULL, &early_serial_port, dev->id, p);
+ serial_console_setup(&early_serial_console, early_serial_buf);
+ if (!strstr(early_serial_buf, "keep"))
+ early_serial_console.flags |= CON_BOOT;
+ register_console(&early_serial_console);
+ return 0;
+ }
+#endif
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -1312,7 +1344,7 @@ static int sci_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sci_dev_pm_ops = {
+static const struct dev_pm_ops sci_dev_pm_ops = {
.suspend = sci_suspend,
.resume = sci_resume,
};
@@ -1349,6 +1381,10 @@ static void __exit sci_exit(void)
uart_unregister_driver(&sci_uart_driver);
}
+#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+early_platform_init_buffer("earlyprintk", &sci_driver,
+ early_serial_buf, ARRAY_SIZE(early_serial_buf));
+#endif
module_init(sci_init);
module_exit(sci_exit);
diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c
index 46de564aaea..465f2fae102 100644
--- a/drivers/serial/ucc_uart.c
+++ b/drivers/serial/ucc_uart.c
@@ -1179,16 +1179,18 @@ static void uart_firmware_cont(const struct firmware *fw, void *context)
if (firmware->header.length != fw->size) {
dev_err(dev, "invalid firmware\n");
- return;
+ goto out;
}
ret = qe_upload_firmware(firmware);
if (ret) {
dev_err(dev, "could not load firmware\n");
- return;
+ goto out;
}
firmware_loaded = 1;
+ out:
+ release_firmware(fw);
}
static int ucc_uart_probe(struct of_device *ofdev,
@@ -1247,7 +1249,7 @@ static int ucc_uart_probe(struct of_device *ofdev,
*/
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG, filename, &ofdev->dev,
- &ofdev->dev, uart_firmware_cont);
+ GFP_KERNEL, &ofdev->dev, uart_firmware_cont);
if (ret) {
dev_err(&ofdev->dev,
"could not load firmware %s\n",
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index a7e5c2e9986..d5d7f23c19a 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -806,6 +806,8 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
if (d->state.event != PM_EVENT_FREEZE)
break;
for_each_irq_desc(irq, desc) {
+ if (desc->handle_irq == intc_redirect_irq)
+ continue;
if (desc->chip != &d->chip)
continue;
if (desc->status & IRQ_DISABLED)
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index 841ed5030c8..082604edc4c 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -71,7 +71,7 @@ static void gpio_write_bit(struct pinmux_data_reg *dr,
pos = dr->reg_width - (in_pos + 1);
- pr_debug("write_bit addr = %lx, value = %ld, pos = %ld, "
+ pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
"r_width = %ld\n",
dr->reg, !!value, pos, dr->reg_width);
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 816d4c592a3..66802a4390c 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -574,11 +574,11 @@ void ioc3_unregister_submodule(struct ioc3_submodule *is)
* Device management *
*********************/
-static char *
+static char * __devinitdata
ioc3_class_names[]={"unknown", "IP27 BaseIO", "IP30 system", "MENET 1/2/3",
"MENET 4", "CADduo", "Altix Serial"};
-static int ioc3_class(struct ioc3_driver_data *idd)
+static int __devinit ioc3_class(struct ioc3_driver_data *idd)
{
int res = IOC3_CLASS_NONE;
/* NIC-based logic */
@@ -601,7 +601,8 @@ static int ioc3_class(struct ioc3_driver_data *idd)
return res;
}
/* Adds a new instance of an IOC3 card */
-static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+static int __devinit
+ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct ioc3_driver_data *idd;
uint32_t pcmd;
@@ -753,7 +754,7 @@ out:
}
/* Removes a particular instance of an IOC3 card. */
-static void ioc3_remove(struct pci_dev *pdev)
+static void __devexit ioc3_remove(struct pci_dev *pdev)
{
int id;
struct ioc3_driver_data *idd;
@@ -805,7 +806,7 @@ static struct pci_driver ioc3_driver = {
.name = "IOC3",
.id_table = ioc3_id_table,
.probe = ioc3_probe,
- .remove = ioc3_remove,
+ .remove = __devexit_p(ioc3_remove),
};
MODULE_DEVICE_TABLE(pci, ioc3_id_table);
@@ -815,15 +816,15 @@ MODULE_DEVICE_TABLE(pci, ioc3_id_table);
*********************/
/* Module load */
-static int __devinit ioc3_init(void)
+static int __init ioc3_init(void)
{
if (ia64_platform_is("sn2"))
return pci_register_driver(&ioc3_driver);
- return 0;
+ return -ENODEV;
}
/* Module unload */
-static void __devexit ioc3_exit(void)
+static void __exit ioc3_exit(void)
{
pci_unregister_driver(&ioc3_driver);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4b6f7cba3b3..f55eb010733 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -133,6 +133,14 @@ config SPI_LM70_LLP
which interfaces to an LM70 temperature sensor using
a parallel port.
+config SPI_MPC52xx
+ tristate "Freescale MPC52xx SPI (non-PSC) controller support"
+ depends on PPC_MPC52xx && SPI
+ select SPI_MASTER_OF
+ help
+ This drivers supports the MPC52xx SPI controller in master SPI
+ mode.
+
config SPI_MPC52xx_PSC
tristate "Freescale MPC52xx PSC SPI controller"
depends on PPC_MPC52xx && EXPERIMENTAL
@@ -147,9 +155,6 @@ config SPI_MPC8xxx
This enables using the Freescale MPC8xxx SPI controllers in master
mode.
- This driver uses a simple set of shift registers for data (opposed
- to the CPM based descriptor model).
-
config SPI_OMAP_UWIRE
tristate "OMAP1 MicroWire"
depends on ARCH_OMAP1
@@ -164,6 +169,12 @@ config SPI_OMAP24XX
SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
(McSPI) modules.
+config SPI_OMAP_100K
+ tristate "OMAP SPI 100K"
+ depends on SPI_MASTER && (ARCH_OMAP850 || ARCH_OMAP730)
+ help
+ OMAP SPI 100K master controller for omap7xx boards.
+
config SPI_ORION
tristate "Orion SPI master (EXPERIMENTAL)"
depends on PLAT_ORION && EXPERIMENTAL
@@ -205,6 +216,17 @@ config SPI_S3C24XX
help
SPI driver for Samsung S3C24XX series ARM SoCs
+config SPI_S3C24XX_FIQ
+ bool "S3C24XX driver with FIQ pseudo-DMA"
+ depends on SPI_S3C24XX
+ select FIQ
+ help
+ Enable FIQ support for the S3C24XX SPI driver to provide pseudo
+ DMA by using the fast-interrupt request framework, This allows
+ the driver to get DMA-like performance when there are either
+ no free DMA channels, or when doing transfers that required both
+ TX and RX data paths.
+
config SPI_S3C24XX_GPIO
tristate "Samsung S3C24XX series SPI by GPIO"
depends on ARCH_S3C2410 && EXPERIMENTAL
@@ -215,6 +237,20 @@ config SPI_S3C24XX_GPIO
the inbuilt hardware cannot provide the transfer mode, or
where the board is using non hardware connected pins.
+config SPI_S3C64XX
+ tristate "Samsung S3C64XX series type SPI"
+ depends on ARCH_S3C64XX && EXPERIMENTAL
+ select S3C64XX_DMA
+ help
+ SPI driver for Samsung S3C64XX and newer SoCs.
+
+config SPI_SH_MSIOF
+ tristate "SuperH MSIOF SPI controller"
+ depends on SUPERH && HAVE_CLK
+ select SPI_BITBANG
+ help
+ SPI driver for SuperH MSIOF blocks.
+
config SPI_SH_SCI
tristate "SuperH SCI SPI controller"
depends on SUPERH
@@ -235,19 +271,52 @@ config SPI_TXX9
SPI driver for Toshiba TXx9 MIPS SoCs
config SPI_XILINX
- tristate "Xilinx SPI controller"
- depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL
+ tristate "Xilinx SPI controller common module"
+ depends on HAS_IOMEM && EXPERIMENTAL
select SPI_BITBANG
+ select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE)
help
This exposes the SPI controller IP from the Xilinx EDK.
See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
Product Specification document (DS464) for hardware details.
+ Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
+
+config SPI_XILINX_OF
+ tristate "Xilinx SPI controller OF device"
+ depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE)
+ help
+ This is the OF driver for the SPI controller IP from the Xilinx EDK.
+
+config SPI_XILINX_PLTFM
+ tristate "Xilinx SPI controller platform device"
+ depends on SPI_XILINX
+ help
+ This is the platform driver for the SPI controller IP
+ from the Xilinx EDK.
+
+config SPI_NUC900
+ tristate "Nuvoton NUC900 series SPI"
+ depends on ARCH_W90X900 && EXPERIMENTAL
+ select SPI_BITBANG
+ help
+ SPI driver for Nuvoton NUC900 series ARM SoCs
+
#
# Add new SPI master controllers in alphabetical order above this line
#
+config SPI_DESIGNWARE
+ bool "DesignWare SPI controller core support"
+ depends on SPI_MASTER
+ help
+ general driver for SPI controller core from DesignWare
+
+config SPI_DW_PCI
+ tristate "PCI interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE && PCI
+
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 21a118269ca..f3d2810ba11 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,23 +16,37 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
+obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
+obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
obj-$(CONFIG_SPI_IMX) += spi_imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
+obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
obj-$(CONFIG_SPI_ORION) += orion_spi.o
obj-$(CONFIG_SPI_PL022) += amba-pl022.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
+obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
-obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
+obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
+obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
+obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
+obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
+obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
+obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
+
+# special build for s3c24xx spi driver with fiq support
+spi_s3c24xx_hw-y := spi_s3c24xx.o
+spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
+
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index f5b3fdbb1e2..d21c24eaf0a 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -189,14 +189,14 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
/* use scratch buffer only when rx or tx data is unspecified */
if (xfer->rx_buf)
- *rx_dma = xfer->rx_dma + xfer->len - len;
+ *rx_dma = xfer->rx_dma + xfer->len - *plen;
else {
*rx_dma = as->buffer_dma;
if (len > BUFFER_SIZE)
len = BUFFER_SIZE;
}
if (xfer->tx_buf)
- *tx_dma = xfer->tx_dma + xfer->len - len;
+ *tx_dma = xfer->tx_dma + xfer->len - *plen;
else {
*tx_dma = as->buffer_dma;
if (len > BUFFER_SIZE)
@@ -788,7 +788,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
spin_lock_init(&as->lock);
INIT_LIST_HEAD(&as->queue);
as->pdev = pdev;
- as->regs = ioremap(regs->start, (regs->end - regs->start) + 1);
+ as->regs = ioremap(regs->start, resource_size(regs));
if (!as->regs)
goto out_free_buffer;
as->irq = irq;
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 76cbc1a6659..cfd5ff9508f 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -237,8 +237,14 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
unsigned bpw, hz;
u32 cfg, stat;
- bpw = t ? t->bits_per_word : spi->bits_per_word;
- hz = t ? t->speed_hz : spi->max_speed_hz;
+ bpw = spi->bits_per_word;
+ hz = spi->max_speed_hz;
+ if (t) {
+ if (t->bits_per_word)
+ bpw = t->bits_per_word;
+ if (t->speed_hz)
+ hz = t->speed_hz;
+ }
if (bpw < 4 || bpw > 24) {
dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n",
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
new file mode 100644
index 00000000000..31620fae77b
--- /dev/null
+++ b/drivers/spi/dw_spi.c
@@ -0,0 +1,944 @@
+/*
+ * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+
+#include <linux/spi/dw_spi.h>
+#include <linux/spi/spi.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+
+#define QUEUE_RUNNING 0
+#define QUEUE_STOPPED 1
+
+#define MRST_SPI_DEASSERT 0
+#define MRST_SPI_ASSERT 1
+
+/* Slave spi_dev related */
+struct chip_data {
+ u16 cr0;
+ u8 cs; /* chip select pin */
+ u8 n_bytes; /* current is a 1/2/4 byte op */
+ u8 tmode; /* TR/TO/RO/EEPROM */
+ u8 type; /* SPI/SSP/MicroWire */
+
+ u8 poll_mode; /* 1 means use poll mode */
+
+ u32 dma_width;
+ u32 rx_threshold;
+ u32 tx_threshold;
+ u8 enable_dma;
+ u8 bits_per_word;
+ u16 clk_div; /* baud rate divider */
+ u32 speed_hz; /* baud rate */
+ int (*write)(struct dw_spi *dws);
+ int (*read)(struct dw_spi *dws);
+ void (*cs_control)(u32 command);
+};
+
+#ifdef CONFIG_DEBUG_FS
+static int spi_show_regs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define SPI_REGS_BUFSIZE 1024
+static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_spi *dws;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ dws = file->private_data;
+
+ buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "MRST SPI0 registers:\n");
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SER: \t\t0x%08x\n", dw_readl(dws, ser));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SR: \t\t0x%08x\n", dw_readl(dws, sr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "IMR: \t\t0x%08x\n", dw_readl(dws, imr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "ISR: \t\t0x%08x\n", dw_readl(dws, isr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "=================================\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations mrst_spi_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = spi_show_regs_open,
+ .read = spi_show_regs,
+};
+
+static int mrst_spi_debugfs_init(struct dw_spi *dws)
+{
+ dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
+ if (!dws->debugfs)
+ return -ENOMEM;
+
+ debugfs_create_file("registers", S_IFREG | S_IRUGO,
+ dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
+ return 0;
+}
+
+static void mrst_spi_debugfs_remove(struct dw_spi *dws)
+{
+ if (dws->debugfs)
+ debugfs_remove_recursive(dws->debugfs);
+}
+
+#else
+static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
+{
+}
+
+static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void wait_till_not_busy(struct dw_spi *dws)
+{
+ unsigned long end = jiffies + usecs_to_jiffies(1000);
+
+ while (time_before(jiffies, end)) {
+ if (!(dw_readw(dws, sr) & SR_BUSY))
+ return;
+ }
+ dev_err(&dws->master->dev,
+ "DW SPI: Stutus keeps busy for 1000us after a read/write!\n");
+}
+
+static void flush(struct dw_spi *dws)
+{
+ while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+ dw_readw(dws, dr);
+
+ wait_till_not_busy(dws);
+}
+
+static void null_cs_control(u32 command)
+{
+}
+
+static int null_writer(struct dw_spi *dws)
+{
+ u8 n_bytes = dws->n_bytes;
+
+ if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
+ || (dws->tx == dws->tx_end))
+ return 0;
+ dw_writew(dws, dr, 0);
+ dws->tx += n_bytes;
+
+ wait_till_not_busy(dws);
+ return 1;
+}
+
+static int null_reader(struct dw_spi *dws)
+{
+ u8 n_bytes = dws->n_bytes;
+
+ while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+ && (dws->rx < dws->rx_end)) {
+ dw_readw(dws, dr);
+ dws->rx += n_bytes;
+ }
+ wait_till_not_busy(dws);
+ return dws->rx == dws->rx_end;
+}
+
+static int u8_writer(struct dw_spi *dws)
+{
+ if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
+ || (dws->tx == dws->tx_end))
+ return 0;
+
+ dw_writew(dws, dr, *(u8 *)(dws->tx));
+ ++dws->tx;
+
+ wait_till_not_busy(dws);
+ return 1;
+}
+
+static int u8_reader(struct dw_spi *dws)
+{
+ while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+ && (dws->rx < dws->rx_end)) {
+ *(u8 *)(dws->rx) = dw_readw(dws, dr);
+ ++dws->rx;
+ }
+
+ wait_till_not_busy(dws);
+ return dws->rx == dws->rx_end;
+}
+
+static int u16_writer(struct dw_spi *dws)
+{
+ if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
+ || (dws->tx == dws->tx_end))
+ return 0;
+
+ dw_writew(dws, dr, *(u16 *)(dws->tx));
+ dws->tx += 2;
+
+ wait_till_not_busy(dws);
+ return 1;
+}
+
+static int u16_reader(struct dw_spi *dws)
+{
+ u16 temp;
+
+ while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
+ && (dws->rx < dws->rx_end)) {
+ temp = dw_readw(dws, dr);
+ *(u16 *)(dws->rx) = temp;
+ dws->rx += 2;
+ }
+
+ wait_till_not_busy(dws);
+ return dws->rx == dws->rx_end;
+}
+
+static void *next_transfer(struct dw_spi *dws)
+{
+ struct spi_message *msg = dws->cur_msg;
+ struct spi_transfer *trans = dws->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ dws->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer,
+ transfer_list);
+ return RUNNING_STATE;
+ } else
+ return DONE_STATE;
+}
+
+/*
+ * Note: first step is the protocol driver prepares
+ * a dma-capable memory, and this func just need translate
+ * the virt addr to physical
+ */
+static int map_dma_buffers(struct dw_spi *dws)
+{
+ if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
+ || !dws->cur_chip->enable_dma)
+ return 0;
+
+ if (dws->cur_transfer->tx_dma)
+ dws->tx_dma = dws->cur_transfer->tx_dma;
+
+ if (dws->cur_transfer->rx_dma)
+ dws->rx_dma = dws->cur_transfer->rx_dma;
+
+ return 1;
+}
+
+/* Caller already set message->status; dma and pio irqs are blocked */
+static void giveback(struct dw_spi *dws)
+{
+ struct spi_transfer *last_transfer;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&dws->lock, flags);
+ msg = dws->cur_msg;
+ dws->cur_msg = NULL;
+ dws->cur_transfer = NULL;
+ dws->prev_chip = dws->cur_chip;
+ dws->cur_chip = NULL;
+ dws->dma_mapped = 0;
+ queue_work(dws->workqueue, &dws->pump_messages);
+ spin_unlock_irqrestore(&dws->lock, flags);
+
+ last_transfer = list_entry(msg->transfers.prev,
+ struct spi_transfer,
+ transfer_list);
+
+ if (!last_transfer->cs_change)
+ dws->cs_control(MRST_SPI_DEASSERT);
+
+ msg->state = NULL;
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static void int_error_stop(struct dw_spi *dws, const char *msg)
+{
+ /* Stop and reset hw */
+ flush(dws);
+ spi_enable_chip(dws, 0);
+
+ dev_err(&dws->master->dev, "%s\n", msg);
+ dws->cur_msg->state = ERROR_STATE;
+ tasklet_schedule(&dws->pump_transfers);
+}
+
+static void transfer_complete(struct dw_spi *dws)
+{
+ /* Update total byte transfered return count actual bytes read */
+ dws->cur_msg->actual_length += dws->len;
+
+ /* Move to next transfer */
+ dws->cur_msg->state = next_transfer(dws);
+
+ /* Handle end of message */
+ if (dws->cur_msg->state == DONE_STATE) {
+ dws->cur_msg->status = 0;
+ giveback(dws);
+ } else
+ tasklet_schedule(&dws->pump_transfers);
+}
+
+static irqreturn_t interrupt_transfer(struct dw_spi *dws)
+{
+ u16 irq_status, irq_mask = 0x3f;
+
+ irq_status = dw_readw(dws, isr) & irq_mask;
+ /* Error handling */
+ if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
+ dw_readw(dws, txoicr);
+ dw_readw(dws, rxoicr);
+ dw_readw(dws, rxuicr);
+ int_error_stop(dws, "interrupt_transfer: fifo overrun");
+ return IRQ_HANDLED;
+ }
+
+ /* INT comes from tx */
+ if (dws->tx && (irq_status & SPI_INT_TXEI)) {
+ while (dws->tx < dws->tx_end)
+ dws->write(dws);
+
+ if (dws->tx == dws->tx_end) {
+ spi_mask_intr(dws, SPI_INT_TXEI);
+ transfer_complete(dws);
+ }
+ }
+
+ /* INT comes from rx */
+ if (dws->rx && (irq_status & SPI_INT_RXFI)) {
+ if (dws->read(dws))
+ transfer_complete(dws);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dw_spi_irq(int irq, void *dev_id)
+{
+ struct dw_spi *dws = dev_id;
+
+ if (!dws->cur_msg) {
+ spi_mask_intr(dws, SPI_INT_TXEI);
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ return dws->transfer_handler(dws);
+}
+
+/* Must be called inside pump_transfers() */
+static void poll_transfer(struct dw_spi *dws)
+{
+ if (dws->tx) {
+ while (dws->write(dws))
+ dws->read(dws);
+ }
+
+ dws->read(dws);
+ transfer_complete(dws);
+}
+
+static void dma_transfer(struct dw_spi *dws, int cs_change)
+{
+}
+
+static void pump_transfers(unsigned long data)
+{
+ struct dw_spi *dws = (struct dw_spi *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct spi_device *spi = NULL;
+ struct chip_data *chip = NULL;
+ u8 bits = 0;
+ u8 imask = 0;
+ u8 cs_change = 0;
+ u16 clk_div = 0;
+ u32 speed = 0;
+ u32 cr0 = 0;
+
+ /* Get current state information */
+ message = dws->cur_msg;
+ transfer = dws->cur_transfer;
+ chip = dws->cur_chip;
+ spi = message->spi;
+
+ if (message->state == ERROR_STATE) {
+ message->status = -EIO;
+ goto early_exit;
+ }
+
+ /* Handle end of message */
+ if (message->state == DONE_STATE) {
+ message->status = 0;
+ goto early_exit;
+ }
+
+ /* Delay if requested at end of transfer*/
+ if (message->state == RUNNING_STATE) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ dws->n_bytes = chip->n_bytes;
+ dws->dma_width = chip->dma_width;
+ dws->cs_control = chip->cs_control;
+
+ dws->rx_dma = transfer->rx_dma;
+ dws->tx_dma = transfer->tx_dma;
+ dws->tx = (void *)transfer->tx_buf;
+ dws->tx_end = dws->tx + transfer->len;
+ dws->rx = transfer->rx_buf;
+ dws->rx_end = dws->rx + transfer->len;
+ dws->write = dws->tx ? chip->write : null_writer;
+ dws->read = dws->rx ? chip->read : null_reader;
+ dws->cs_change = transfer->cs_change;
+ dws->len = dws->cur_transfer->len;
+ if (chip != dws->prev_chip)
+ cs_change = 1;
+
+ cr0 = chip->cr0;
+
+ /* Handle per transfer options for bpw and speed */
+ if (transfer->speed_hz) {
+ speed = chip->speed_hz;
+
+ if (transfer->speed_hz != speed) {
+ speed = transfer->speed_hz;
+ if (speed > dws->max_freq) {
+ printk(KERN_ERR "MRST SPI0: unsupported"
+ "freq: %dHz\n", speed);
+ message->status = -EIO;
+ goto early_exit;
+ }
+
+ /* clk_div doesn't support odd number */
+ clk_div = dws->max_freq / speed;
+ clk_div = (clk_div >> 1) << 1;
+
+ chip->speed_hz = speed;
+ chip->clk_div = clk_div;
+ }
+ }
+ if (transfer->bits_per_word) {
+ bits = transfer->bits_per_word;
+
+ switch (bits) {
+ case 8:
+ dws->n_bytes = 1;
+ dws->dma_width = 1;
+ dws->read = (dws->read != null_reader) ?
+ u8_reader : null_reader;
+ dws->write = (dws->write != null_writer) ?
+ u8_writer : null_writer;
+ break;
+ case 16:
+ dws->n_bytes = 2;
+ dws->dma_width = 2;
+ dws->read = (dws->read != null_reader) ?
+ u16_reader : null_reader;
+ dws->write = (dws->write != null_writer) ?
+ u16_writer : null_writer;
+ break;
+ default:
+ printk(KERN_ERR "MRST SPI0: unsupported bits:"
+ "%db\n", bits);
+ message->status = -EIO;
+ goto early_exit;
+ }
+
+ cr0 = (bits - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | (spi->mode << SPI_MODE_OFFSET)
+ | (chip->tmode << SPI_TMOD_OFFSET);
+ }
+ message->state = RUNNING_STATE;
+
+ /* Check if current transfer is a DMA transaction */
+ dws->dma_mapped = map_dma_buffers(dws);
+
+ if (!dws->dma_mapped && !chip->poll_mode) {
+ if (dws->rx)
+ imask |= SPI_INT_RXFI;
+ if (dws->tx)
+ imask |= SPI_INT_TXEI;
+ dws->transfer_handler = interrupt_transfer;
+ }
+
+ /*
+ * Reprogram registers only if
+ * 1. chip select changes
+ * 2. clk_div is changed
+ * 3. control value changes
+ */
+ if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) {
+ spi_enable_chip(dws, 0);
+
+ if (dw_readw(dws, ctrl0) != cr0)
+ dw_writew(dws, ctrl0, cr0);
+
+ /* Set the interrupt mask, for poll mode just diable all int */
+ spi_mask_intr(dws, 0xff);
+ if (!chip->poll_mode)
+ spi_umask_intr(dws, imask);
+
+ spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
+ spi_chip_sel(dws, spi->chip_select);
+ spi_enable_chip(dws, 1);
+
+ if (cs_change)
+ dws->prev_chip = chip;
+ }
+
+ if (dws->dma_mapped)
+ dma_transfer(dws, cs_change);
+
+ if (chip->poll_mode)
+ poll_transfer(dws);
+
+ return;
+
+early_exit:
+ giveback(dws);
+ return;
+}
+
+static void pump_messages(struct work_struct *work)
+{
+ struct dw_spi *dws =
+ container_of(work, struct dw_spi, pump_messages);
+ unsigned long flags;
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&dws->lock, flags);
+ if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
+ dws->busy = 0;
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (dws->cur_msg) {
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
+ list_del_init(&dws->cur_msg->queue);
+
+ /* Initial message state*/
+ dws->cur_msg->state = START_STATE;
+ dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
+ struct spi_transfer,
+ transfer_list);
+ dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&dws->pump_transfers);
+
+ dws->busy = 1;
+ spin_unlock_irqrestore(&dws->lock, flags);
+}
+
+/* spi_device use this to queue in their spi_msg */
+static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dws->lock, flags);
+
+ if (dws->run == QUEUE_STOPPED) {
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ list_add_tail(&msg->queue, &dws->queue);
+
+ if (dws->run == QUEUE_RUNNING && !dws->busy) {
+
+ if (dws->cur_transfer || dws->cur_msg)
+ queue_work(dws->workqueue,
+ &dws->pump_messages);
+ else {
+ /* If no other data transaction in air, just go */
+ spin_unlock_irqrestore(&dws->lock, flags);
+ pump_messages(&dws->pump_messages);
+ return 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return 0;
+}
+
+/* This may be called twice for each spi dev */
+static int dw_spi_setup(struct spi_device *spi)
+{
+ struct dw_spi_chip *chip_info = NULL;
+ struct chip_data *chip;
+
+ if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
+ return -EINVAL;
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->cs_control = null_cs_control;
+ chip->enable_dma = 0;
+ }
+
+ /*
+ * Protocol drivers may change the chip settings, so...
+ * if chip_info exists, use it
+ */
+ chip_info = spi->controller_data;
+
+ /* chip_info doesn't always exist */
+ if (chip_info) {
+ if (chip_info->cs_control)
+ chip->cs_control = chip_info->cs_control;
+
+ chip->poll_mode = chip_info->poll_mode;
+ chip->type = chip_info->type;
+
+ chip->rx_threshold = 0;
+ chip->tx_threshold = 0;
+
+ chip->enable_dma = chip_info->enable_dma;
+ }
+
+ if (spi->bits_per_word <= 8) {
+ chip->n_bytes = 1;
+ chip->dma_width = 1;
+ chip->read = u8_reader;
+ chip->write = u8_writer;
+ } else if (spi->bits_per_word <= 16) {
+ chip->n_bytes = 2;
+ chip->dma_width = 2;
+ chip->read = u16_reader;
+ chip->write = u16_writer;
+ } else {
+ /* Never take >16b case for MRST SPIC */
+ dev_err(&spi->dev, "invalid wordsize\n");
+ return -EINVAL;
+ }
+ chip->bits_per_word = spi->bits_per_word;
+
+ chip->speed_hz = spi->max_speed_hz;
+ if (chip->speed_hz)
+ chip->clk_div = 25000000 / chip->speed_hz;
+ else
+ chip->clk_div = 8; /* default value */
+
+ chip->tmode = 0; /* Tx & Rx */
+ /* Default SPI mode is SCPOL = 0, SCPH = 0 */
+ chip->cr0 = (chip->bits_per_word - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | (spi->mode << SPI_MODE_OFFSET)
+ | (chip->tmode << SPI_TMOD_OFFSET);
+
+ spi_set_ctldata(spi, chip);
+ return 0;
+}
+
+static void dw_spi_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+ kfree(chip);
+}
+
+static int __init init_queue(struct dw_spi *dws)
+{
+ INIT_LIST_HEAD(&dws->queue);
+ spin_lock_init(&dws->lock);
+
+ dws->run = QUEUE_STOPPED;
+ dws->busy = 0;
+
+ tasklet_init(&dws->pump_transfers,
+ pump_transfers, (unsigned long)dws);
+
+ INIT_WORK(&dws->pump_messages, pump_messages);
+ dws->workqueue = create_singlethread_workqueue(
+ dev_name(dws->master->dev.parent));
+ if (dws->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int start_queue(struct dw_spi *dws)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dws->lock, flags);
+
+ if (dws->run == QUEUE_RUNNING || dws->busy) {
+ spin_unlock_irqrestore(&dws->lock, flags);
+ return -EBUSY;
+ }
+
+ dws->run = QUEUE_RUNNING;
+ dws->cur_msg = NULL;
+ dws->cur_transfer = NULL;
+ dws->cur_chip = NULL;
+ dws->prev_chip = NULL;
+ spin_unlock_irqrestore(&dws->lock, flags);
+
+ queue_work(dws->workqueue, &dws->pump_messages);
+
+ return 0;
+}
+
+static int stop_queue(struct dw_spi *dws)
+{
+ unsigned long flags;
+ unsigned limit = 50;
+ int status = 0;
+
+ spin_lock_irqsave(&dws->lock, flags);
+ dws->run = QUEUE_STOPPED;
+ while (!list_empty(&dws->queue) && dws->busy && limit--) {
+ spin_unlock_irqrestore(&dws->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&dws->lock, flags);
+ }
+
+ if (!list_empty(&dws->queue) || dws->busy)
+ status = -EBUSY;
+ spin_unlock_irqrestore(&dws->lock, flags);
+
+ return status;
+}
+
+static int destroy_queue(struct dw_spi *dws)
+{
+ int status;
+
+ status = stop_queue(dws);
+ if (status != 0)
+ return status;
+ destroy_workqueue(dws->workqueue);
+ return 0;
+}
+
+/* Restart the controller, disable all interrupts, clean rx fifo */
+static void spi_hw_init(struct dw_spi *dws)
+{
+ spi_enable_chip(dws, 0);
+ spi_mask_intr(dws, 0xff);
+ spi_enable_chip(dws, 1);
+ flush(dws);
+}
+
+int __devinit dw_spi_add_host(struct dw_spi *dws)
+{
+ struct spi_master *master;
+ int ret;
+
+ BUG_ON(dws == NULL);
+
+ master = spi_alloc_master(dws->parent_dev, 0);
+ if (!master) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ dws->master = master;
+ dws->type = SSI_MOTO_SPI;
+ dws->prev_chip = NULL;
+ dws->dma_inited = 0;
+ dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+
+ ret = request_irq(dws->irq, dw_spi_irq, 0,
+ "dw_spi", dws);
+ if (ret < 0) {
+ dev_err(&master->dev, "can not get IRQ\n");
+ goto err_free_master;
+ }
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->bus_num = dws->bus_num;
+ master->num_chipselect = dws->num_cs;
+ master->cleanup = dw_spi_cleanup;
+ master->setup = dw_spi_setup;
+ master->transfer = dw_spi_transfer;
+
+ dws->dma_inited = 0;
+
+ /* Basic HW init */
+ spi_hw_init(dws);
+
+ /* Initial and start queue */
+ ret = init_queue(dws);
+ if (ret) {
+ dev_err(&master->dev, "problem initializing queue\n");
+ goto err_diable_hw;
+ }
+ ret = start_queue(dws);
+ if (ret) {
+ dev_err(&master->dev, "problem starting queue\n");
+ goto err_diable_hw;
+ }
+
+ spi_master_set_devdata(master, dws);
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&master->dev, "problem registering spi master\n");
+ goto err_queue_alloc;
+ }
+
+ mrst_spi_debugfs_init(dws);
+ return 0;
+
+err_queue_alloc:
+ destroy_queue(dws);
+err_diable_hw:
+ spi_enable_chip(dws, 0);
+ free_irq(dws->irq, dws);
+err_free_master:
+ spi_master_put(master);
+exit:
+ return ret;
+}
+EXPORT_SYMBOL(dw_spi_add_host);
+
+void __devexit dw_spi_remove_host(struct dw_spi *dws)
+{
+ int status = 0;
+
+ if (!dws)
+ return;
+ mrst_spi_debugfs_remove(dws);
+
+ /* Remove the queue */
+ status = destroy_queue(dws);
+ if (status != 0)
+ dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
+ "complete, message memory not freed\n");
+
+ spi_enable_chip(dws, 0);
+ /* Disable clk */
+ spi_set_clk(dws, 0);
+ free_irq(dws->irq, dws);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(dws->master);
+}
+
+int dw_spi_suspend_host(struct dw_spi *dws)
+{
+ int ret = 0;
+
+ ret = stop_queue(dws);
+ if (ret)
+ return ret;
+ spi_enable_chip(dws, 0);
+ spi_set_clk(dws, 0);
+ return ret;
+}
+EXPORT_SYMBOL(dw_spi_suspend_host);
+
+int dw_spi_resume_host(struct dw_spi *dws)
+{
+ int ret;
+
+ spi_hw_init(dws);
+ ret = start_queue(dws);
+ if (ret)
+ dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL(dw_spi_resume_host);
+
+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
new file mode 100644
index 00000000000..34ba6916173
--- /dev/null
+++ b/drivers/spi/dw_spi_pci.c
@@ -0,0 +1,169 @@
+/*
+ * mrst_spi_pci.c - PCI interface driver for DW SPI Core
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/spi/dw_spi.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "dw_spi_pci"
+
+struct dw_spi_pci {
+ struct pci_dev *pdev;
+ struct dw_spi dws;
+};
+
+static int __devinit spi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct dw_spi_pci *dwpci;
+ struct dw_spi *dws;
+ int pci_bar = 0;
+ int ret;
+
+ printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
+ pdev->vendor, pdev->device);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
+ if (!dwpci) {
+ ret = -ENOMEM;
+ goto err_disable;
+ }
+
+ dwpci->pdev = pdev;
+ dws = &dwpci->dws;
+
+ /* Get basic io resource and map it */
+ dws->paddr = pci_resource_start(pdev, pci_bar);
+ dws->iolen = pci_resource_len(pdev, pci_bar);
+
+ ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
+ if (ret)
+ goto err_kfree;
+
+ dws->regs = ioremap_nocache((unsigned long)dws->paddr,
+ pci_resource_len(pdev, pci_bar));
+ if (!dws->regs) {
+ ret = -ENOMEM;
+ goto err_release_reg;
+ }
+
+ dws->parent_dev = &pdev->dev;
+ dws->bus_num = 0;
+ dws->num_cs = 4;
+ dws->max_freq = 25000000; /* for Moorestwon */
+ dws->irq = pdev->irq;
+
+ ret = dw_spi_add_host(dws);
+ if (ret)
+ goto err_unmap;
+
+ /* PCI hook and SPI hook use the same drv data */
+ pci_set_drvdata(pdev, dwpci);
+ return 0;
+
+err_unmap:
+ iounmap(dws->regs);
+err_release_reg:
+ pci_release_region(pdev, pci_bar);
+err_kfree:
+ kfree(dwpci);
+err_disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit spi_pci_remove(struct pci_dev *pdev)
+{
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+ iounmap(dwpci->dws.regs);
+ pci_release_region(pdev, 0);
+ kfree(dwpci);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = dw_spi_suspend_host(&dwpci->dws);
+ if (ret)
+ return ret;
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return ret;
+}
+
+static int spi_resume(struct pci_dev *pdev)
+{
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+ int ret;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ return dw_spi_resume_host(&dwpci->dws);
+}
+#else
+#define spi_suspend NULL
+#define spi_resume NULL
+#endif
+
+static const struct pci_device_id pci_ids[] __devinitdata = {
+ /* Intel Moorestown platform SPI controller 0 */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
+ {},
+};
+
+static struct pci_driver dw_spi_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pci_ids,
+ .probe = spi_pci_probe,
+ .remove = __devexit_p(spi_pci_remove),
+ .suspend = spi_suspend,
+ .resume = spi_resume,
+};
+
+static int __init mrst_spi_init(void)
+{
+ return pci_register_driver(&dw_spi_driver);
+}
+
+static void __exit mrst_spi_exit(void)
+{
+ pci_unregister_driver(&dw_spi_driver);
+}
+
+module_init(mrst_spi_init);
+module_exit(mrst_spi_exit);
+
+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 1b74d5ca03f..f50c81df336 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
+#include <linux/of_spi.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/io.h>
@@ -313,11 +314,13 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
u32 mclken_div;
- int ret = 0;
+ int ret;
/* default sysclk is 512MHz */
mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK;
- mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
+ ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
+ if (ret)
+ return ret;
/* Reset the PSC into a known state */
out_8(&psc->command, MPC52xx_PSC_RST_RX);
@@ -341,7 +344,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
mps->bits_per_word = 8;
- return ret;
+ return 0;
}
static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
@@ -410,8 +413,10 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
goto free_master;
ret = mpc52xx_psc_spi_port_config(master->bus_num, mps);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(dev, "can't configure PSC! Is it capable of SPI?\n");
goto free_irq;
+ }
spin_lock_init(&mps->lock);
init_completion(&mps->done);
@@ -464,10 +469,11 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
const u32 *regaddr_p;
u64 regaddr64, size64;
s16 id = -1;
+ int rc;
regaddr_p = of_get_address(op->node, 0, &size64, NULL);
if (!regaddr_p) {
- printk(KERN_ERR "Invalid PSC address\n");
+ dev_err(&op->dev, "Invalid PSC address\n");
return -EINVAL;
}
regaddr64 = of_translate_address(op->node, regaddr_p);
@@ -478,15 +484,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
psc_nump = of_get_property(op->node, "cell-index", NULL);
if (!psc_nump || *psc_nump > 5) {
- printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid "
- "cell-index property\n", op->node->full_name);
+ dev_err(&op->dev, "Invalid cell-index property\n");
return -EINVAL;
}
id = *psc_nump + 1;
}
- return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
+ rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
irq_of_parse_and_map(op->node, 0), id);
+ if (rc == 0)
+ of_register_spi_devices(dev_get_drvdata(&op->dev), op->node);
+
+ return rc;
}
static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
new file mode 100644
index 00000000000..45bfe645817
--- /dev/null
+++ b/drivers/spi/mpc52xx_spi.c
@@ -0,0 +1,578 @@
+/*
+ * MPC52xx SPI bus driver.
+ *
+ * Copyright (C) 2008 Secret Lab Technologies Ltd.
+ *
+ * This file is released under the GPLv2
+ *
+ * This is the driver for the MPC5200's dedicated SPI controller.
+ *
+ * Note: this driver does not support the MPC5200 PSC in SPI mode. For
+ * that driver see drivers/spi/mpc52xx_psc_spi.c
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/of_spi.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <asm/time.h>
+#include <asm/mpc52xx.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
+MODULE_LICENSE("GPL");
+
+/* Register offsets */
+#define SPI_CTRL1 0x00
+#define SPI_CTRL1_SPIE (1 << 7)
+#define SPI_CTRL1_SPE (1 << 6)
+#define SPI_CTRL1_MSTR (1 << 4)
+#define SPI_CTRL1_CPOL (1 << 3)
+#define SPI_CTRL1_CPHA (1 << 2)
+#define SPI_CTRL1_SSOE (1 << 1)
+#define SPI_CTRL1_LSBFE (1 << 0)
+
+#define SPI_CTRL2 0x01
+#define SPI_BRR 0x04
+
+#define SPI_STATUS 0x05
+#define SPI_STATUS_SPIF (1 << 7)
+#define SPI_STATUS_WCOL (1 << 6)
+#define SPI_STATUS_MODF (1 << 4)
+
+#define SPI_DATA 0x09
+#define SPI_PORTDATA 0x0d
+#define SPI_DATADIR 0x10
+
+/* FSM state return values */
+#define FSM_STOP 0 /* Nothing more for the state machine to */
+ /* do. If something interesting happens */
+ /* then an IRQ will be received */
+#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
+ /* not expected */
+#define FSM_CONTINUE 2 /* Keep iterating the state machine */
+
+/* Driver internal data */
+struct mpc52xx_spi {
+ struct spi_master *master;
+ void __iomem *regs;
+ int irq0; /* MODF irq */
+ int irq1; /* SPIF irq */
+ unsigned int ipb_freq;
+
+ /* Statistics; not used now, but will be reintroduced for debugfs */
+ int msg_count;
+ int wcol_count;
+ int wcol_ticks;
+ u32 wcol_tx_timestamp;
+ int modf_count;
+ int byte_count;
+
+ struct list_head queue; /* queue of pending messages */
+ spinlock_t lock;
+ struct work_struct work;
+
+ /* Details of current transfer (length, and buffer pointers) */
+ struct spi_message *message; /* current message */
+ struct spi_transfer *transfer; /* current transfer */
+ int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
+ int len;
+ int timestamp;
+ u8 *rx_buf;
+ const u8 *tx_buf;
+ int cs_change;
+ int gpio_cs_count;
+ unsigned int *gpio_cs;
+};
+
+/*
+ * CS control function
+ */
+static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
+{
+ int cs;
+
+ if (ms->gpio_cs_count > 0) {
+ cs = ms->message->spi->chip_select;
+ gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1);
+ } else
+ out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
+}
+
+/*
+ * Start a new transfer. This is called both by the idle state
+ * for the first transfer in a message, and by the wait state when the
+ * previous transfer in a message is complete.
+ */
+static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
+{
+ ms->rx_buf = ms->transfer->rx_buf;
+ ms->tx_buf = ms->transfer->tx_buf;
+ ms->len = ms->transfer->len;
+
+ /* Activate the chip select */
+ if (ms->cs_change)
+ mpc52xx_spi_chipsel(ms, 1);
+ ms->cs_change = ms->transfer->cs_change;
+
+ /* Write out the first byte */
+ ms->wcol_tx_timestamp = get_tbl();
+ if (ms->tx_buf)
+ out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
+ else
+ out_8(ms->regs + SPI_DATA, 0);
+}
+
+/* Forward declaration of state handlers */
+static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data);
+static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data);
+
+/*
+ * IDLE state
+ *
+ * No transfers are in progress; if another transfer is pending then retrieve
+ * it and kick it off. Otherwise, stop processing the state machine
+ */
+static int
+mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
+{
+ struct spi_device *spi;
+ int spr, sppr;
+ u8 ctrl1;
+
+ if (status && (irq != NO_IRQ))
+ dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
+ status);
+
+ /* Check if there is another transfer waiting. */
+ if (list_empty(&ms->queue))
+ return FSM_STOP;
+
+ /* get the head of the queue */
+ ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
+ list_del_init(&ms->message->queue);
+
+ /* Setup the controller parameters */
+ ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
+ spi = ms->message->spi;
+ if (spi->mode & SPI_CPHA)
+ ctrl1 |= SPI_CTRL1_CPHA;
+ if (spi->mode & SPI_CPOL)
+ ctrl1 |= SPI_CTRL1_CPOL;
+ if (spi->mode & SPI_LSB_FIRST)
+ ctrl1 |= SPI_CTRL1_LSBFE;
+ out_8(ms->regs + SPI_CTRL1, ctrl1);
+
+ /* Setup the controller speed */
+ /* minimum divider is '2'. Also, add '1' to force rounding the
+ * divider up. */
+ sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
+ spr = 0;
+ if (sppr < 1)
+ sppr = 1;
+ while (((sppr - 1) & ~0x7) != 0) {
+ sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
+ spr++;
+ }
+ sppr--; /* sppr quantity in register is offset by 1 */
+ if (spr > 7) {
+ /* Don't overrun limits of SPI baudrate register */
+ spr = 7;
+ sppr = 7;
+ }
+ out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
+
+ ms->cs_change = 1;
+ ms->transfer = container_of(ms->message->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ mpc52xx_spi_start_transfer(ms);
+ ms->state = mpc52xx_spi_fsmstate_transfer;
+
+ return FSM_CONTINUE;
+}
+
+/*
+ * TRANSFER state
+ *
+ * In the middle of a transfer. If the SPI core has completed processing
+ * a byte, then read out the received data and write out the next byte
+ * (unless this transfer is finished; in which case go on to the wait
+ * state)
+ */
+static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data)
+{
+ if (!status)
+ return ms->irq0 ? FSM_STOP : FSM_POLL;
+
+ if (status & SPI_STATUS_WCOL) {
+ /* The SPI controller is stoopid. At slower speeds, it may
+ * raise the SPIF flag before the state machine is actually
+ * finished, which causes a collision (internal to the state
+ * machine only). The manual recommends inserting a delay
+ * between receiving the interrupt and sending the next byte,
+ * but it can also be worked around simply by retrying the
+ * transfer which is what we do here. */
+ ms->wcol_count++;
+ ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp;
+ ms->wcol_tx_timestamp = get_tbl();
+ data = 0;
+ if (ms->tx_buf)
+ data = *(ms->tx_buf - 1);
+ out_8(ms->regs + SPI_DATA, data); /* try again */
+ return FSM_CONTINUE;
+ } else if (status & SPI_STATUS_MODF) {
+ ms->modf_count++;
+ dev_err(&ms->master->dev, "mode fault\n");
+ mpc52xx_spi_chipsel(ms, 0);
+ ms->message->status = -EIO;
+ ms->message->complete(ms->message->context);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ return FSM_CONTINUE;
+ }
+
+ /* Read data out of the spi device */
+ ms->byte_count++;
+ if (ms->rx_buf)
+ *ms->rx_buf++ = data;
+
+ /* Is the transfer complete? */
+ ms->len--;
+ if (ms->len == 0) {
+ ms->timestamp = get_tbl();
+ ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec;
+ ms->state = mpc52xx_spi_fsmstate_wait;
+ return FSM_CONTINUE;
+ }
+
+ /* Write out the next byte */
+ ms->wcol_tx_timestamp = get_tbl();
+ if (ms->tx_buf)
+ out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
+ else
+ out_8(ms->regs + SPI_DATA, 0);
+
+ return FSM_CONTINUE;
+}
+
+/*
+ * WAIT state
+ *
+ * A transfer has completed; need to wait for the delay period to complete
+ * before starting the next transfer
+ */
+static int
+mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
+{
+ if (status && irq)
+ dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
+ status);
+
+ if (((int)get_tbl()) - ms->timestamp < 0)
+ return FSM_POLL;
+
+ ms->message->actual_length += ms->transfer->len;
+
+ /* Check if there is another transfer in this message. If there
+ * aren't then deactivate CS, notify sender, and drop back to idle
+ * to start the next message. */
+ if (ms->transfer->transfer_list.next == &ms->message->transfers) {
+ ms->msg_count++;
+ mpc52xx_spi_chipsel(ms, 0);
+ ms->message->status = 0;
+ ms->message->complete(ms->message->context);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ return FSM_CONTINUE;
+ }
+
+ /* There is another transfer; kick it off */
+
+ if (ms->cs_change)
+ mpc52xx_spi_chipsel(ms, 0);
+
+ ms->transfer = container_of(ms->transfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ mpc52xx_spi_start_transfer(ms);
+ ms->state = mpc52xx_spi_fsmstate_transfer;
+ return FSM_CONTINUE;
+}
+
+/**
+ * mpc52xx_spi_fsm_process - Finite State Machine iteration function
+ * @irq: irq number that triggered the FSM or 0 for polling
+ * @ms: pointer to mpc52xx_spi driver data
+ */
+static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
+{
+ int rc = FSM_CONTINUE;
+ u8 status, data;
+
+ while (rc == FSM_CONTINUE) {
+ /* Interrupt cleared by read of STATUS followed by
+ * read of DATA registers */
+ status = in_8(ms->regs + SPI_STATUS);
+ data = in_8(ms->regs + SPI_DATA);
+ rc = ms->state(irq, ms, status, data);
+ }
+
+ if (rc == FSM_POLL)
+ schedule_work(&ms->work);
+}
+
+/**
+ * mpc52xx_spi_irq - IRQ handler
+ */
+static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
+{
+ struct mpc52xx_spi *ms = _ms;
+ spin_lock(&ms->lock);
+ mpc52xx_spi_fsm_process(irq, ms);
+ spin_unlock(&ms->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpc52xx_spi_wq - Workqueue function for polling the state machine
+ */
+static void mpc52xx_spi_wq(struct work_struct *work)
+{
+ struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ms->lock, flags);
+ mpc52xx_spi_fsm_process(0, ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
+}
+
+/*
+ * spi_master ops
+ */
+
+static int mpc52xx_spi_setup(struct spi_device *spi)
+{
+ if (spi->bits_per_word % 8)
+ return -EINVAL;
+
+ if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
+ return -EINVAL;
+
+ if (spi->chip_select >= spi->master->num_chipselect)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ spin_lock_irqsave(&ms->lock, flags);
+ list_add_tail(&m->queue, &ms->queue);
+ spin_unlock_irqrestore(&ms->lock, flags);
+ schedule_work(&ms->work);
+
+ return 0;
+}
+
+/*
+ * OF Platform Bus Binding
+ */
+static int __devinit mpc52xx_spi_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct spi_master *master;
+ struct mpc52xx_spi *ms;
+ void __iomem *regs;
+ u8 ctrl1;
+ int rc, i = 0;
+ int gpio_cs;
+
+ /* MMIO registers */
+ dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
+ regs = of_iomap(op->node, 0);
+ if (!regs)
+ return -ENODEV;
+
+ /* initialize the device */
+ ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
+ out_8(regs + SPI_CTRL1, ctrl1);
+ out_8(regs + SPI_CTRL2, 0x0);
+ out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
+ out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
+
+ /* Clear the status register and re-read it to check for a MODF
+ * failure. This driver cannot currently handle multiple masters
+ * on the SPI bus. This fault will also occur if the SPI signals
+ * are not connected to any pins (port_config setting) */
+ in_8(regs + SPI_STATUS);
+ out_8(regs + SPI_CTRL1, ctrl1);
+
+ in_8(regs + SPI_DATA);
+ if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
+ dev_err(&op->dev, "mode fault; is port_config correct?\n");
+ rc = -EIO;
+ goto err_init;
+ }
+
+ dev_dbg(&op->dev, "allocating spi_master struct\n");
+ master = spi_alloc_master(&op->dev, sizeof *ms);
+ if (!master) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ master->bus_num = -1;
+ master->setup = mpc52xx_spi_setup;
+ master->transfer = mpc52xx_spi_transfer;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+ dev_set_drvdata(&op->dev, master);
+
+ ms = spi_master_get_devdata(master);
+ ms->master = master;
+ ms->regs = regs;
+ ms->irq0 = irq_of_parse_and_map(op->node, 0);
+ ms->irq1 = irq_of_parse_and_map(op->node, 1);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
+ ms->gpio_cs_count = of_gpio_count(op->node);
+ if (ms->gpio_cs_count > 0) {
+ master->num_chipselect = ms->gpio_cs_count;
+ ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!ms->gpio_cs) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (i = 0; i < ms->gpio_cs_count; i++) {
+ gpio_cs = of_get_gpio(op->node, i);
+ if (gpio_cs < 0) {
+ dev_err(&op->dev,
+ "could not parse the gpio field "
+ "in oftree\n");
+ rc = -ENODEV;
+ goto err_gpio;
+ }
+
+ rc = gpio_request(gpio_cs, dev_name(&op->dev));
+ if (rc) {
+ dev_err(&op->dev,
+ "can't request spi cs gpio #%d "
+ "on gpio line %d\n", i, gpio_cs);
+ goto err_gpio;
+ }
+
+ gpio_direction_output(gpio_cs, 1);
+ ms->gpio_cs[i] = gpio_cs;
+ }
+ } else {
+ master->num_chipselect = 1;
+ }
+
+ spin_lock_init(&ms->lock);
+ INIT_LIST_HEAD(&ms->queue);
+ INIT_WORK(&ms->work, mpc52xx_spi_wq);
+
+ /* Decide if interrupts can be used */
+ if (ms->irq0 && ms->irq1) {
+ rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0,
+ "mpc5200-spi-modf", ms);
+ rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0,
+ "mpc5200-spi-spif", ms);
+ if (rc) {
+ free_irq(ms->irq0, ms);
+ free_irq(ms->irq1, ms);
+ ms->irq0 = ms->irq1 = 0;
+ }
+ } else {
+ /* operate in polled mode */
+ ms->irq0 = ms->irq1 = 0;
+ }
+
+ if (!ms->irq0)
+ dev_info(&op->dev, "using polled mode\n");
+
+ dev_dbg(&op->dev, "registering spi_master struct\n");
+ rc = spi_register_master(master);
+ if (rc)
+ goto err_register;
+
+ of_register_spi_devices(master, op->node);
+ dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
+
+ return rc;
+
+ err_register:
+ dev_err(&ms->master->dev, "initialization failed\n");
+ spi_master_put(master);
+ err_gpio:
+ while (i-- > 0)
+ gpio_free(ms->gpio_cs[i]);
+
+ kfree(ms->gpio_cs);
+ err_alloc:
+ err_init:
+ iounmap(regs);
+ return rc;
+}
+
+static int __devexit mpc52xx_spi_remove(struct of_device *op)
+{
+ struct spi_master *master = dev_get_drvdata(&op->dev);
+ struct mpc52xx_spi *ms = spi_master_get_devdata(master);
+ int i;
+
+ free_irq(ms->irq0, ms);
+ free_irq(ms->irq1, ms);
+
+ for (i = 0; i < ms->gpio_cs_count; i++)
+ gpio_free(ms->gpio_cs[i]);
+
+ kfree(ms->gpio_cs);
+ spi_unregister_master(master);
+ spi_master_put(master);
+ iounmap(ms->regs);
+
+ return 0;
+}
+
+static struct of_device_id mpc52xx_spi_match[] __devinitdata = {
+ { .compatible = "fsl,mpc5200-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
+
+static struct of_platform_driver mpc52xx_spi_of_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc52xx-spi",
+ .match_table = mpc52xx_spi_match,
+ .probe = mpc52xx_spi_probe,
+ .remove = __exit_p(mpc52xx_spi_remove),
+};
+
+static int __init mpc52xx_spi_init(void)
+{
+ return of_register_platform_driver(&mpc52xx_spi_of_driver);
+}
+module_init(mpc52xx_spi_init);
+
+static void __exit mpc52xx_spi_exit(void)
+{
+ of_unregister_platform_driver(&mpc52xx_spi_of_driver);
+}
+module_exit(mpc52xx_spi_exit);
+
diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/omap_spi_100k.c
new file mode 100644
index 00000000000..5355d90d1be
--- /dev/null
+++ b/drivers/spi/omap_spi_100k.c
@@ -0,0 +1,635 @@
+/*
+ * OMAP7xx SPI 100k controller driver
+ * Author: Fabrice Crohas <fcrohas@gmail.com>
+ * from original omap1_mcspi driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
+ * Juha Yrj�l� <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <linux/spi/spi.h>
+
+#include <plat/clock.h>
+
+#define OMAP1_SPI100K_MAX_FREQ 48000000
+
+#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12)
+
+#define SPI_SETUP1 0x00
+#define SPI_SETUP2 0x02
+#define SPI_CTRL 0x04
+#define SPI_STATUS 0x06
+#define SPI_TX_LSB 0x08
+#define SPI_TX_MSB 0x0a
+#define SPI_RX_LSB 0x0c
+#define SPI_RX_MSB 0x0e
+
+#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5)
+#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4)
+#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1)
+#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0)
+
+#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0)
+#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0)
+#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5)
+#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5)
+#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10)
+#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10)
+
+#define SPI_CTRL_SEN(x) ((x) << 7)
+#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2)
+#define SPI_CTRL_WR (1UL << 1)
+#define SPI_CTRL_RD (1UL << 0)
+
+#define SPI_STATUS_WE (1UL << 1)
+#define SPI_STATUS_RD (1UL << 0)
+
+#define WRITE 0
+#define READ 1
+
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 8
+
+#define SPI_RUNNING 0
+#define SPI_SHUTDOWN 1
+
+struct omap1_spi100k {
+ struct work_struct work;
+
+ /* lock protects queue and registers */
+ spinlock_t lock;
+ struct list_head msg_queue;
+ struct spi_master *master;
+ struct clk *ick;
+ struct clk *fck;
+
+ /* Virtual base address of the controller */
+ void __iomem *base;
+
+ /* State of the SPI */
+ unsigned int state;
+};
+
+struct omap1_spi100k_cs {
+ void __iomem *base;
+ int word_len;
+};
+
+static struct workqueue_struct *omap1_spi100k_wq;
+
+#define MOD_REG_BIT(val, mask, set) do { \
+ if (set) \
+ val |= mask; \
+ else \
+ val &= ~mask; \
+} while (0)
+
+static void spi100k_enable_clock(struct spi_master *master)
+{
+ unsigned int val;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* enable SPI */
+ val = readw(spi100k->base + SPI_SETUP1);
+ val |= SPI_SETUP1_CLOCK_ENABLE;
+ writew(val, spi100k->base + SPI_SETUP1);
+}
+
+static void spi100k_disable_clock(struct spi_master *master)
+{
+ unsigned int val;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* disable SPI */
+ val = readw(spi100k->base + SPI_SETUP1);
+ val &= ~SPI_SETUP1_CLOCK_ENABLE;
+ writew(val, spi100k->base + SPI_SETUP1);
+}
+
+static void spi100k_write_data(struct spi_master *master, int len, int data)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* write 16-bit word */
+ spi100k_enable_clock(master);
+ writew( data , spi100k->base + SPI_TX_MSB);
+
+ writew(SPI_CTRL_SEN(0) |
+ SPI_CTRL_WORD_SIZE(len) |
+ SPI_CTRL_WR,
+ spi100k->base + SPI_CTRL);
+
+ /* Wait for bit ack send change */
+ while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE);
+ udelay(1000);
+
+ spi100k_disable_clock(master);
+}
+
+static int spi100k_read_data(struct spi_master *master, int len)
+{
+ int dataH,dataL;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ spi100k_enable_clock(master);
+ writew(SPI_CTRL_SEN(0) |
+ SPI_CTRL_WORD_SIZE(len) |
+ SPI_CTRL_RD,
+ spi100k->base + SPI_CTRL);
+
+ while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD);
+ udelay(1000);
+
+ dataL = readw(spi100k->base + SPI_RX_LSB);
+ dataH = readw(spi100k->base + SPI_RX_MSB);
+ spi100k_disable_clock(master);
+
+ return dataL;
+}
+
+static void spi100k_open(struct spi_master *master)
+{
+ /* get control of SPI */
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ writew(SPI_SETUP1_INT_READ_ENABLE |
+ SPI_SETUP1_INT_WRITE_ENABLE |
+ SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1);
+
+ /* configure clock and interrupts */
+ writew(SPI_SETUP2_ACTIVE_EDGE_FALLING |
+ SPI_SETUP2_NEGATIVE_LEVEL |
+ SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2);
+}
+
+static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
+{
+ if (enable)
+ writew(0x05fc, spi100k->base + SPI_CTRL);
+ else
+ writew(0x05fd, spi100k->base + SPI_CTRL);
+}
+
+static unsigned
+omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap1_spi100k *spi100k;
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+ unsigned int count, c;
+ int word_len;
+
+ spi100k = spi_master_get_devdata(spi->master);
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (xfer->tx_buf == NULL)
+ spi100k_write_data(spi->master,word_len, 0);
+
+ if (word_len <= 8) {
+ u8 *rx;
+ const u8 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c-=1;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master,word_len, *tx);
+ if (xfer->rx_buf != NULL)
+ *rx = spi100k_read_data(spi->master,word_len);
+ } while(c);
+ } else if (word_len <= 16) {
+ u16 *rx;
+ const u16 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c-=2;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master,word_len, *tx++);
+ if (xfer->rx_buf != NULL)
+ *rx++ = spi100k_read_data(spi->master,word_len);
+ } while(c);
+ } else if (word_len <= 32) {
+ u32 *rx;
+ const u32 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c-=4;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master,word_len, *tx);
+ if (xfer->rx_buf != NULL)
+ *rx = spi100k_read_data(spi->master,word_len);
+ } while(c);
+ }
+ return count - c;
+}
+
+/* called only when no transfer is active to this device */
+static int omap1_spi100k_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master);
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+ u8 word_len = spi->bits_per_word;
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+ if (!word_len)
+ word_len = 8;
+
+ if (spi->bits_per_word > 32)
+ return -EINVAL;
+ cs->word_len = word_len;
+
+ /* SPI init before transfer */
+ writew(0x3e , spi100k->base + SPI_SETUP1);
+ writew(0x00 , spi100k->base + SPI_STATUS);
+ writew(0x3e , spi100k->base + SPI_CTRL);
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+static int omap1_spi100k_setup(struct spi_device *spi)
+{
+ int ret;
+ struct omap1_spi100k *spi100k;
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+
+ if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+ dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
+ spi->bits_per_word);
+ return -EINVAL;
+ }
+
+ spi100k = spi_master_get_devdata(spi->master);
+
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ cs->base = spi100k->base + spi->chip_select * 0x14;
+ spi->controller_state = cs;
+ }
+
+ spi100k_open(spi->master);
+
+ clk_enable(spi100k->ick);
+ clk_enable(spi100k->fck);
+
+ ret = omap1_spi100k_setup_transfer(spi, NULL);
+
+ clk_disable(spi100k->ick);
+ clk_disable(spi100k->fck);
+
+ return ret;
+}
+
+static void omap1_spi100k_work(struct work_struct *work)
+{
+ struct omap1_spi100k *spi100k;
+ int status = 0;
+
+ spi100k = container_of(work, struct omap1_spi100k, work);
+ spin_lock_irq(&spi100k->lock);
+
+ clk_enable(spi100k->ick);
+ clk_enable(spi100k->fck);
+
+ /* We only enable one channel at a time -- the one whose message is
+ * at the head of the queue -- although this controller would gladly
+ * arbitrate among multiple channels. This corresponds to "single
+ * channel" master mode. As a side effect, we need to manage the
+ * chipselect with the FORCE bit ... CS != channel enable.
+ */
+ while (!list_empty(&spi100k->msg_queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ struct omap1_spi100k_cs *cs;
+ int par_override = 0;
+
+ m = container_of(spi100k->msg_queue.next, struct spi_message,
+ queue);
+
+ list_del_init(&m->queue);
+ spin_unlock_irq(&spi100k->lock);
+
+ spi = m->spi;
+ cs = spi->controller_state;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap1_spi100k_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
+
+ if (!cs_active) {
+ omap1_spi100k_force_cs(spi100k, 1);
+ cs_active = 1;
+ }
+
+ if (t->len) {
+ unsigned count;
+
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ spi100k_write_data(spi->master, 8, 0);
+
+ count = omap1_spi100k_txrx_pio(spi, t);
+ m->actual_length += count;
+
+ if (count != t->len) {
+ status = -EIO;
+ break;
+ }
+ }
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ /* ignore the "leave it on after last xfer" hint */
+
+ if (t->cs_change) {
+ omap1_spi100k_force_cs(spi100k, 0);
+ cs_active = 0;
+ }
+ }
+
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap1_spi100k_setup_transfer(spi, NULL);
+ }
+
+ if (cs_active)
+ omap1_spi100k_force_cs(spi100k, 0);
+
+ m->status = status;
+ m->complete(m->context);
+
+ spin_lock_irq(&spi100k->lock);
+ }
+
+ clk_disable(spi100k->ick);
+ clk_disable(spi100k->fck);
+ spin_unlock_irq(&spi100k->lock);
+
+ if (status < 0)
+ printk(KERN_WARNING "spi transfer failed with %d\n", status);
+}
+
+static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct omap1_spi100k *spi100k;
+ unsigned long flags;
+ struct spi_transfer *t;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ spi100k = spi_master_get_devdata(spi->master);
+
+ /* Don't accept new work if we're shutting down */
+ if (spi100k->state == SPI_SHUTDOWN)
+ return -ESHUTDOWN;
+
+ /* reject invalid messages and transfers */
+ if (list_empty(&m->transfers) || !m->complete)
+ return -EINVAL;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ const void *tx_buf = t->tx_buf;
+ void *rx_buf = t->rx_buf;
+ unsigned len = t->len;
+
+ if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ
+ || (len && !(rx_buf || tx_buf))
+ || (t->bits_per_word &&
+ ( t->bits_per_word < 4
+ || t->bits_per_word > 32))) {
+ dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+ t->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ t->bits_per_word);
+ return -EINVAL;
+ }
+
+ if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) {
+ dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
+ t->speed_hz,
+ OMAP1_SPI100K_MAX_FREQ/(1<<16));
+ return -EINVAL;
+ }
+
+ }
+
+ spin_lock_irqsave(&spi100k->lock, flags);
+ list_add_tail(&m->queue, &spi100k->msg_queue);
+ queue_work(omap1_spi100k_wq, &spi100k->work);
+ spin_unlock_irqrestore(&spi100k->lock, flags);
+
+ return 0;
+}
+
+static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k)
+{
+ return 0;
+}
+
+static int __devinit omap1_spi100k_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap1_spi100k *spi100k;
+ int status = 0;
+
+ if (!pdev->id)
+ return -EINVAL;
+
+ master = spi_alloc_master(&pdev->dev, sizeof *spi100k);
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+
+ master->setup = omap1_spi100k_setup;
+ master->transfer = omap1_spi100k_transfer;
+ master->cleanup = NULL;
+ master->num_chipselect = 2;
+ master->mode_bits = MODEBITS;
+
+ dev_set_drvdata(&pdev->dev, master);
+
+ spi100k = spi_master_get_devdata(master);
+ spi100k->master = master;
+
+ /*
+ * The memory region base address is taken as the platform_data.
+ * You should allocate this with ioremap() before initializing
+ * the SPI.
+ */
+ spi100k->base = (void __iomem *) pdev->dev.platform_data;
+
+ INIT_WORK(&spi100k->work, omap1_spi100k_work);
+
+ spin_lock_init(&spi100k->lock);
+ INIT_LIST_HEAD(&spi100k->msg_queue);
+ spi100k->ick = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(spi100k->ick)) {
+ dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
+ status = PTR_ERR(spi100k->ick);
+ goto err1;
+ }
+
+ spi100k->fck = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(spi100k->fck)) {
+ dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
+ status = PTR_ERR(spi100k->fck);
+ goto err2;
+ }
+
+ if (omap1_spi100k_reset(spi100k) < 0)
+ goto err3;
+
+ status = spi_register_master(master);
+ if (status < 0)
+ goto err3;
+
+ spi100k->state = SPI_RUNNING;
+
+ return status;
+
+err3:
+ clk_put(spi100k->fck);
+err2:
+ clk_put(spi100k->ick);
+err1:
+ spi_master_put(master);
+ return status;
+}
+
+static int __exit omap1_spi100k_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap1_spi100k *spi100k;
+ struct resource *r;
+ unsigned limit = 500;
+ unsigned long flags;
+ int status = 0;
+
+ master = dev_get_drvdata(&pdev->dev);
+ spi100k = spi_master_get_devdata(master);
+
+ spin_lock_irqsave(&spi100k->lock, flags);
+
+ spi100k->state = SPI_SHUTDOWN;
+ while (!list_empty(&spi100k->msg_queue) && limit--) {
+ spin_unlock_irqrestore(&spi100k->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&spi100k->lock, flags);
+ }
+
+ if (!list_empty(&spi100k->msg_queue))
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&spi100k->lock, flags);
+
+ if (status != 0)
+ return status;
+
+ clk_put(spi100k->fck);
+ clk_put(spi100k->ick);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+static struct platform_driver omap1_spi100k_driver = {
+ .driver = {
+ .name = "omap1_spi100k",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(omap1_spi100k_remove),
+};
+
+
+static int __init omap1_spi100k_init(void)
+{
+ omap1_spi100k_wq = create_singlethread_workqueue(
+ omap1_spi100k_driver.driver.name);
+
+ if (omap1_spi100k_wq == NULL)
+ return -1;
+
+ return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe);
+}
+
+static void __exit omap1_spi100k_exit(void)
+{
+ platform_driver_unregister(&omap1_spi100k_driver);
+
+ destroy_workqueue(omap1_spi100k_wq);
+}
+
+module_init(omap1_spi100k_init);
+module_exit(omap1_spi100k_exit);
+
+MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
+MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index c8c2b693ffa..c2f707e5ce7 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1709,7 +1709,7 @@ static int pxa2xx_spi_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops pxa2xx_spi_pm_ops = {
+static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
.suspend = pxa2xx_spi_suspend,
.resume = pxa2xx_spi_resume,
};
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 73e24ef5a2f..1d41058bbab 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1294,7 +1294,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_get_res;
}
- drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1));
+ drv_data->regs_base = ioremap(res->start, resource_size(res));
if (drv_data->regs_base == NULL) {
dev_err(dev, "Cannot map IO\n");
status = -ENXIO;
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 89c22efedfb..1893f1e96dc 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -44,6 +44,9 @@
#define MXC_CSPIINT 0x0c
#define MXC_RESET 0x1c
+#define MX3_CSPISTAT 0x14
+#define MX3_CSPISTAT_RR (1 << 3)
+
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
@@ -205,7 +208,7 @@ static int mx31_config(struct spi_imx_data *spi_imx,
if (cpu_is_mx31())
reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
- else if (cpu_is_mx35()) {
+ else if (cpu_is_mx25() || cpu_is_mx35()) {
reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
reg |= MX31_CSPICTRL_SSCTL;
}
@@ -219,7 +222,7 @@ static int mx31_config(struct spi_imx_data *spi_imx,
if (config->cs < 0) {
if (cpu_is_mx31())
reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT;
- else if (cpu_is_mx35())
+ else if (cpu_is_mx25() || cpu_is_mx35())
reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
}
@@ -481,7 +484,7 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
-static int __init spi_imx_probe(struct platform_device *pdev)
+static int __devinit spi_imx_probe(struct platform_device *pdev)
{
struct spi_imx_master *mxc_platform_info;
struct spi_master *master;
@@ -489,7 +492,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
struct resource *res;
int i, ret;
- mxc_platform_info = (struct spi_imx_master *)pdev->dev.platform_data;
+ mxc_platform_info = dev_get_platdata(&pdev->dev);
if (!mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
@@ -513,11 +516,12 @@ static int __init spi_imx_probe(struct platform_device *pdev)
continue;
ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
if (ret) {
- i--;
- while (i > 0)
+ while (i > 0) {
+ i--;
if (spi_imx->chipselect[i] >= 0)
- gpio_free(spi_imx->chipselect[i--]);
- dev_err(&pdev->dev, "can't get cs gpios");
+ gpio_free(spi_imx->chipselect[i]);
+ }
+ dev_err(&pdev->dev, "can't get cs gpios\n");
goto out_master_put;
}
}
@@ -551,7 +555,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
}
spi_imx->irq = platform_get_irq(pdev, 0);
- if (!spi_imx->irq) {
+ if (spi_imx->irq <= 0) {
ret = -EINVAL;
goto out_iounmap;
}
@@ -562,7 +566,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
goto out_iounmap;
}
- if (cpu_is_mx31() || cpu_is_mx35()) {
+ if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) {
spi_imx->intctrl = mx31_intctrl;
spi_imx->config = mx31_config;
spi_imx->trigger = mx31_trigger;
@@ -590,9 +594,14 @@ static int __init spi_imx_probe(struct platform_device *pdev)
clk_enable(spi_imx->clk);
spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
- if (!cpu_is_mx31() || !cpu_is_mx35())
+ if (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
writel(1, spi_imx->base + MXC_RESET);
+ /* drain receive buffer */
+ if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35())
+ while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR)
+ readl(spi_imx->base + MXC_CSPIRXDATA);
+
spi_imx->intctrl(spi_imx, 0);
ret = spi_bitbang_start(&spi_imx->bitbang);
@@ -625,7 +634,7 @@ out_master_put:
return ret;
}
-static int __exit spi_imx_remove(struct platform_device *pdev)
+static int __devexit spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -659,7 +668,7 @@ static struct platform_driver spi_imx_driver = {
.owner = THIS_MODULE,
},
.probe = spi_imx_probe,
- .remove = __exit_p(spi_imx_remove),
+ .remove = __devexit_p(spi_imx_remove),
};
static int __init spi_imx_init(void)
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 0fd0ec4d3a7..1fb2a6ea328 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -5,6 +5,10 @@
*
* Copyright (C) 2006 Polycom, Inc.
*
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -27,6 +31,9 @@
#include <linux/spi/spi_bitbang.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/gpio.h>
@@ -34,8 +41,19 @@
#include <linux/of_spi.h>
#include <sysdev/fsl_soc.h>
+#include <asm/cpm.h>
+#include <asm/qe.h>
#include <asm/irq.h>
+/* CPM1 and CPM2 are mutually exclusive. */
+#ifdef CONFIG_CPM1
+#include <asm/cpm1.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
+#else
+#include <asm/cpm2.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
+#endif
+
/* SPI Controller registers */
struct mpc8xxx_spi_reg {
u8 res1[0x20];
@@ -47,6 +65,28 @@ struct mpc8xxx_spi_reg {
__be32 receive;
};
+/* SPI Parameter RAM */
+struct spi_pram {
+ __be16 rbase; /* Rx Buffer descriptor base address */
+ __be16 tbase; /* Tx Buffer descriptor base address */
+ u8 rfcr; /* Rx function code */
+ u8 tfcr; /* Tx function code */
+ __be16 mrblr; /* Max receive buffer length */
+ __be32 rstate; /* Internal */
+ __be32 rdp; /* Internal */
+ __be16 rbptr; /* Internal */
+ __be16 rbc; /* Internal */
+ __be32 rxtmp; /* Internal */
+ __be32 tstate; /* Internal */
+ __be32 tdp; /* Internal */
+ __be16 tbptr; /* Internal */
+ __be16 tbc; /* Internal */
+ __be32 txtmp; /* Internal */
+ __be32 res; /* Tx temp. */
+ __be16 rpbase; /* Relocation pointer (CPM1 only) */
+ __be16 res1; /* Reserved */
+};
+
/* SPI Controller mode register definitions */
#define SPMODE_LOOP (1 << 30)
#define SPMODE_CI_INACTIVEHIGH (1 << 29)
@@ -75,14 +115,40 @@ struct mpc8xxx_spi_reg {
#define SPIM_NE 0x00000200 /* Not empty */
#define SPIM_NF 0x00000100 /* Not full */
+#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
+#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
+
+/* SPCOM register values */
+#define SPCOM_STR (1 << 23) /* Start transmit */
+
+#define SPI_PRAM_SIZE 0x100
+#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
+
/* SPI Controller driver's private data. */
struct mpc8xxx_spi {
+ struct device *dev;
struct mpc8xxx_spi_reg __iomem *base;
/* rx & tx bufs from the spi_transfer */
const void *tx;
void *rx;
+ int subblock;
+ struct spi_pram __iomem *pram;
+ struct cpm_buf_desc __iomem *tx_bd;
+ struct cpm_buf_desc __iomem *rx_bd;
+
+ struct spi_transfer *xfer_in_progress;
+
+ /* dma addresses for CPM transfers */
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+ bool map_tx_dma;
+ bool map_rx_dma;
+
+ dma_addr_t dma_dummy_tx;
+ dma_addr_t dma_dummy_rx;
+
/* functions to deal with different sized buffers */
void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
u32(*get_tx) (struct mpc8xxx_spi *);
@@ -96,7 +162,7 @@ struct mpc8xxx_spi {
u32 rx_shift; /* RX data reg shift when in qe mode */
u32 tx_shift; /* TX data reg shift when in qe mode */
- bool qe_mode;
+ unsigned int flags;
struct workqueue_struct *workqueue;
struct work_struct work;
@@ -107,6 +173,10 @@ struct mpc8xxx_spi {
struct completion done;
};
+static void *mpc8xxx_dummy_rx;
+static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock);
+static int mpc8xxx_dummy_rx_refcnt;
+
struct spi_mpc8xxx_cs {
/* functions to deal with different sized buffers */
void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
@@ -155,6 +225,42 @@ MPC83XX_SPI_TX_BUF(u8)
MPC83XX_SPI_TX_BUF(u16)
MPC83XX_SPI_TX_BUF(u32)
+static void mpc8xxx_spi_change_mode(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+ __be32 __iomem *mode = &mspi->base->mode;
+ unsigned long flags;
+
+ if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
+ return;
+
+ /* Turn off IRQs locally to minimize time that SPI is disabled. */
+ local_irq_save(flags);
+
+ /* Turn off SPI unit prior changing mode */
+ mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
+ mpc8xxx_spi_write_reg(mode, cs->hw_mode);
+
+ /* When in CPM mode, we need to reinit tx and rx. */
+ if (mspi->flags & SPI_CPM_MODE) {
+ if (mspi->flags & SPI_QE) {
+ qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ } else {
+ cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
+ if (mspi->flags & SPI_CPM1) {
+ out_be16(&mspi->pram->rbptr,
+ in_be16(&mspi->pram->rbase));
+ out_be16(&mspi->pram->tbptr,
+ in_be16(&mspi->pram->tbase));
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -168,27 +274,13 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
}
if (value == BITBANG_CS_ACTIVE) {
- u32 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
-
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
- if (cs->hw_mode != regval) {
- unsigned long flags;
- __be32 __iomem *mode = &mpc8xxx_spi->base->mode;
-
- regval = cs->hw_mode;
- /* Turn off IRQs locally to minimize time that
- * SPI is disabled
- */
- local_irq_save(flags);
- /* Turn off SPI unit prior changing mode */
- mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE);
- mpc8xxx_spi_write_reg(mode, regval);
- local_irq_restore(flags);
- }
+ mpc8xxx_spi_change_mode(spi);
+
if (pdata->cs_control)
pdata->cs_control(spi, pol);
}
@@ -198,7 +290,6 @@ static
int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi;
- u32 regval;
u8 bits_per_word, pm;
u32 hz;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
@@ -230,14 +321,14 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (bits_per_word <= 8) {
cs->get_rx = mpc8xxx_spi_rx_buf_u8;
cs->get_tx = mpc8xxx_spi_tx_buf_u8;
- if (mpc8xxx_spi->qe_mode) {
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
cs->rx_shift = 16;
cs->tx_shift = 24;
}
} else if (bits_per_word <= 16) {
cs->get_rx = mpc8xxx_spi_rx_buf_u16;
cs->get_tx = mpc8xxx_spi_tx_buf_u16;
- if (mpc8xxx_spi->qe_mode) {
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
cs->rx_shift = 16;
cs->tx_shift = 16;
}
@@ -247,7 +338,8 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
} else
return -EINVAL;
- if (mpc8xxx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) {
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
+ spi->mode & SPI_LSB_FIRST) {
cs->tx_shift = 0;
if (bits_per_word <= 8)
cs->rx_shift = 8;
@@ -286,37 +378,138 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
pm--;
cs->hw_mode |= SPMODE_PM(pm);
- regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
- if (cs->hw_mode != regval) {
- unsigned long flags;
- __be32 __iomem *mode = &mpc8xxx_spi->base->mode;
-
- regval = cs->hw_mode;
- /* Turn off IRQs locally to minimize time
- * that SPI is disabled
- */
- local_irq_save(flags);
- /* Turn off SPI unit prior changing mode */
- mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE);
- mpc8xxx_spi_write_reg(mode, regval);
- local_irq_restore(flags);
+
+ mpc8xxx_spi_change_mode(spi);
+ return 0;
+}
+
+static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
+{
+ struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
+ struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
+ unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
+ unsigned int xfer_ofs;
+
+ xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
+
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+ out_be16(&rx_bd->cbd_datlen, 0);
+ out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
+
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+ out_be16(&tx_bd->cbd_datlen, xfer_len);
+ out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
+ BD_SC_LAST);
+
+ /* start transfer */
+ mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR);
+}
+
+static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped)
+{
+ struct device *dev = mspi->dev;
+
+ if (is_dma_mapped) {
+ mspi->map_tx_dma = 0;
+ mspi->map_rx_dma = 0;
+ } else {
+ mspi->map_tx_dma = 1;
+ mspi->map_rx_dma = 1;
+ }
+
+ if (!t->tx_buf) {
+ mspi->tx_dma = mspi->dma_dummy_tx;
+ mspi->map_tx_dma = 0;
+ }
+
+ if (!t->rx_buf) {
+ mspi->rx_dma = mspi->dma_dummy_rx;
+ mspi->map_rx_dma = 0;
}
+
+ if (mspi->map_tx_dma) {
+ void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+
+ mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->tx_dma)) {
+ dev_err(dev, "unable to map tx dma\n");
+ return -ENOMEM;
+ }
+ } else {
+ mspi->tx_dma = t->tx_dma;
+ }
+
+ if (mspi->map_rx_dma) {
+ mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->rx_dma)) {
+ dev_err(dev, "unable to map rx dma\n");
+ goto err_rx_dma;
+ }
+ } else {
+ mspi->rx_dma = t->rx_dma;
+ }
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB);
+
+ mspi->xfer_in_progress = t;
+ mspi->count = t->len;
+
+ /* start CPM transfers */
+ mpc8xxx_spi_cpm_bufs_start(mspi);
+
return 0;
+
+err_rx_dma:
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ return -ENOMEM;
}
-static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
+static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
{
- struct mpc8xxx_spi *mpc8xxx_spi;
- u32 word, len, bits_per_word;
+ struct device *dev = mspi->dev;
+ struct spi_transfer *t = mspi->xfer_in_progress;
+
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ mspi->xfer_in_progress = NULL;
+}
- mpc8xxx_spi = spi_master_get_devdata(spi->master);
+static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, unsigned int len)
+{
+ u32 word;
+
+ mspi->count = len;
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE);
+
+ /* transmit word */
+ word = mspi->get_tx(mspi);
+ mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
+
+ return 0;
+}
+
+static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
+ bool is_dma_mapped)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ unsigned int len = t->len;
+ u8 bits_per_word;
+ int ret;
- mpc8xxx_spi->tx = t->tx_buf;
- mpc8xxx_spi->rx = t->rx_buf;
bits_per_word = spi->bits_per_word;
if (t->bits_per_word)
bits_per_word = t->bits_per_word;
- len = t->len;
+
if (bits_per_word > 8) {
/* invalid length? */
if (len & 1)
@@ -329,22 +522,27 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
return -EINVAL;
len /= 2;
}
- mpc8xxx_spi->count = len;
- INIT_COMPLETION(mpc8xxx_spi->done);
+ mpc8xxx_spi->tx = t->tx_buf;
+ mpc8xxx_spi->rx = t->rx_buf;
- /* enable rx ints */
- mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, SPIM_NE);
+ INIT_COMPLETION(mpc8xxx_spi->done);
- /* transmit word */
- word = mpc8xxx_spi->get_tx(mpc8xxx_spi);
- mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word);
+ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
+ ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
+ else
+ ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len);
+ if (ret)
+ return ret;
wait_for_completion(&mpc8xxx_spi->done);
/* disable rx ints */
mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
+ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
+ mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi);
+
return mpc8xxx_spi->count;
}
@@ -375,7 +573,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
}
cs_change = t->cs_change;
if (t->len)
- status = mpc8xxx_spi_bufs(spi, t);
+ status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped);
if (status) {
status = -EMSGSIZE;
break;
@@ -464,45 +662,80 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
return 0;
}
-static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
+static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
{
- struct mpc8xxx_spi *mpc8xxx_spi = context_data;
- u32 event;
- irqreturn_t ret = IRQ_NONE;
+ u16 len;
- /* Get interrupt events(tx/rx) */
- event = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event);
+ dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
+ in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
- /* We need handle RX first */
- if (event & SPIE_NE) {
- u32 rx_data = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->receive);
+ len = in_be16(&mspi->rx_bd->cbd_datlen);
+ if (len > mspi->count) {
+ WARN_ON(1);
+ len = mspi->count;
+ }
- if (mpc8xxx_spi->rx)
- mpc8xxx_spi->get_rx(rx_data, mpc8xxx_spi);
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&mspi->base->event, events);
- ret = IRQ_HANDLED;
+ mspi->count -= len;
+ if (mspi->count)
+ mpc8xxx_spi_cpm_bufs_start(mspi);
+ else
+ complete(&mspi->done);
+}
+
+static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+{
+ /* We need handle RX first */
+ if (events & SPIE_NE) {
+ u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive);
+
+ if (mspi->rx)
+ mspi->get_rx(rx_data, mspi);
}
- if ((event & SPIE_NF) == 0)
+ if ((events & SPIE_NF) == 0)
/* spin until TX is done */
- while (((event =
- mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event)) &
+ while (((events =
+ mpc8xxx_spi_read_reg(&mspi->base->event)) &
SPIE_NF) == 0)
cpu_relax();
- mpc8xxx_spi->count -= 1;
- if (mpc8xxx_spi->count) {
- u32 word = mpc8xxx_spi->get_tx(mpc8xxx_spi);
- mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word);
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&mspi->base->event, events);
+
+ mspi->count -= 1;
+ if (mspi->count) {
+ u32 word = mspi->get_tx(mspi);
+
+ mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
} else {
- complete(&mpc8xxx_spi->done);
+ complete(&mspi->done);
}
+}
- /* Clear the events */
- mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, event);
+static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
+{
+ struct mpc8xxx_spi *mspi = context_data;
+ irqreturn_t ret = IRQ_NONE;
+ u32 events;
+
+ /* Get interrupt events(tx/rx) */
+ events = mpc8xxx_spi_read_reg(&mspi->base->event);
+ if (events)
+ ret = IRQ_HANDLED;
+
+ dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
+
+ if (mspi->flags & SPI_CPM_MODE)
+ mpc8xxx_spi_cpm_irq(mspi, events);
+ else
+ mpc8xxx_spi_cpu_irq(mspi, events);
return ret;
}
+
static int mpc8xxx_spi_transfer(struct spi_device *spi,
struct spi_message *m)
{
@@ -526,6 +759,215 @@ static void mpc8xxx_spi_cleanup(struct spi_device *spi)
kfree(spi->controller_state);
}
+static void *mpc8xxx_spi_alloc_dummy_rx(void)
+{
+ mutex_lock(&mpc8xxx_dummy_rx_lock);
+
+ if (!mpc8xxx_dummy_rx)
+ mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
+ if (mpc8xxx_dummy_rx)
+ mpc8xxx_dummy_rx_refcnt++;
+
+ mutex_unlock(&mpc8xxx_dummy_rx_lock);
+
+ return mpc8xxx_dummy_rx;
+}
+
+static void mpc8xxx_spi_free_dummy_rx(void)
+{
+ mutex_lock(&mpc8xxx_dummy_rx_lock);
+
+ switch (mpc8xxx_dummy_rx_refcnt) {
+ case 0:
+ WARN_ON(1);
+ break;
+ case 1:
+ kfree(mpc8xxx_dummy_rx);
+ mpc8xxx_dummy_rx = NULL;
+ /* fall through */
+ default:
+ mpc8xxx_dummy_rx_refcnt--;
+ break;
+ }
+
+ mutex_unlock(&mpc8xxx_dummy_rx_lock);
+}
+
+static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ const u32 *iprop;
+ int size;
+ unsigned long spi_base_ofs;
+ unsigned long pram_ofs = -ENOMEM;
+
+ /* Can't use of_address_to_resource(), QE muram isn't at 0. */
+ iprop = of_get_property(np, "reg", &size);
+
+ /* QE with a fixed pram location? */
+ if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
+ return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
+
+ /* QE but with a dynamic pram location? */
+ if (mspi->flags & SPI_QE) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
+ return pram_ofs;
+ }
+
+ /* CPM1 and CPM2 pram must be at a fixed addr. */
+ if (!iprop || size != sizeof(*iprop) * 4)
+ return -ENOMEM;
+
+ spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
+ if (IS_ERR_VALUE(spi_base_ofs))
+ return -ENOMEM;
+
+ if (mspi->flags & SPI_CPM2) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ if (!IS_ERR_VALUE(pram_ofs)) {
+ u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
+
+ out_be16(spi_base, pram_ofs);
+ }
+ } else {
+ struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
+ u16 rpbase = in_be16(&pram->rpbase);
+
+ /* Microcode relocation patch applied? */
+ if (rpbase)
+ pram_ofs = rpbase;
+ else
+ return spi_base_ofs;
+ }
+
+ cpm_muram_free(spi_base_ofs);
+ return pram_ofs;
+}
+
+static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev_archdata_get_node(&dev->archdata);
+ const u32 *iprop;
+ int size;
+ unsigned long pram_ofs;
+ unsigned long bds_ofs;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return 0;
+
+ if (!mpc8xxx_spi_alloc_dummy_rx())
+ return -ENOMEM;
+
+ if (mspi->flags & SPI_QE) {
+ iprop = of_get_property(np, "cell-index", &size);
+ if (iprop && size == sizeof(*iprop))
+ mspi->subblock = *iprop;
+
+ switch (mspi->subblock) {
+ default:
+ dev_warn(dev, "cell-index unspecified, assuming SPI1");
+ /* fall through */
+ case 0:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI1;
+ break;
+ case 1:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI2;
+ break;
+ }
+ }
+
+ pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi);
+ if (IS_ERR_VALUE(pram_ofs)) {
+ dev_err(dev, "can't allocate spi parameter ram\n");
+ goto err_pram;
+ }
+
+ bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
+ sizeof(*mspi->rx_bd), 8);
+ if (IS_ERR_VALUE(bds_ofs)) {
+ dev_err(dev, "can't allocate bds\n");
+ goto err_bds;
+ }
+
+ mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
+ dev_err(dev, "unable to map dummy tx buffer\n");
+ goto err_dummy_tx;
+ }
+
+ mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
+ dev_err(dev, "unable to map dummy rx buffer\n");
+ goto err_dummy_rx;
+ }
+
+ mspi->pram = cpm_muram_addr(pram_ofs);
+
+ mspi->tx_bd = cpm_muram_addr(bds_ofs);
+ mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
+
+ /* Initialize parameter ram. */
+ out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
+ out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
+ out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_be16(&mspi->pram->mrblr, SPI_MRBLR);
+ out_be32(&mspi->pram->rstate, 0);
+ out_be32(&mspi->pram->rdp, 0);
+ out_be16(&mspi->pram->rbptr, 0);
+ out_be16(&mspi->pram->rbc, 0);
+ out_be32(&mspi->pram->rxtmp, 0);
+ out_be32(&mspi->pram->tstate, 0);
+ out_be32(&mspi->pram->tdp, 0);
+ out_be16(&mspi->pram->tbptr, 0);
+ out_be16(&mspi->pram->tbc, 0);
+ out_be32(&mspi->pram->txtmp, 0);
+
+ return 0;
+
+err_dummy_rx:
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+err_dummy_tx:
+ cpm_muram_free(bds_ofs);
+err_bds:
+ cpm_muram_free(pram_ofs);
+err_pram:
+ mpc8xxx_spi_free_dummy_rx();
+ return -ENOMEM;
+}
+
+static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+
+ dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+ cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
+ mpc8xxx_spi_free_dummy_rx();
+}
+
+static const char *mpc8xxx_spi_strmode(unsigned int flags)
+{
+ if (flags & SPI_QE_CPU_MODE) {
+ return "QE CPU";
+ } else if (flags & SPI_CPM_MODE) {
+ if (flags & SPI_QE)
+ return "QE";
+ else if (flags & SPI_CPM2)
+ return "CPM2";
+ else
+ return "CPM1";
+ }
+ return "CPU";
+}
+
static struct spi_master * __devinit
mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
{
@@ -552,24 +994,29 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
master->cleanup = mpc8xxx_spi_cleanup;
mpc8xxx_spi = spi_master_get_devdata(master);
- mpc8xxx_spi->qe_mode = pdata->qe_mode;
+ mpc8xxx_spi->dev = dev;
mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
+ mpc8xxx_spi->flags = pdata->flags;
mpc8xxx_spi->spibrg = pdata->sysclk;
+ ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi);
+ if (ret)
+ goto err_cpm_init;
+
mpc8xxx_spi->rx_shift = 0;
mpc8xxx_spi->tx_shift = 0;
- if (mpc8xxx_spi->qe_mode) {
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
mpc8xxx_spi->rx_shift = 16;
mpc8xxx_spi->tx_shift = 24;
}
init_completion(&mpc8xxx_spi->done);
- mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1);
+ mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
if (mpc8xxx_spi->base == NULL) {
ret = -ENOMEM;
- goto put_master;
+ goto err_ioremap;
}
mpc8xxx_spi->irq = irq;
@@ -592,7 +1039,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
- if (pdata->qe_mode)
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
regval |= SPMODE_OP;
mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval);
@@ -612,9 +1059,8 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
if (ret < 0)
goto unreg_master;
- printk(KERN_INFO
- "%s: MPC8xxx SPI Controller driver at 0x%p (irq = %d)\n",
- dev_name(dev), mpc8xxx_spi->base, mpc8xxx_spi->irq);
+ dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base,
+ mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
return master;
@@ -624,7 +1070,9 @@ free_irq:
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
unmap_io:
iounmap(mpc8xxx_spi->base);
-put_master:
+err_ioremap:
+ mpc8xxx_spi_cpm_free(mpc8xxx_spi);
+err_cpm_init:
spi_master_put(master);
err:
return ERR_PTR(ret);
@@ -644,6 +1092,7 @@ static int __devexit mpc8xxx_spi_remove(struct device *dev)
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
iounmap(mpc8xxx_spi->base);
+ mpc8xxx_spi_cpm_free(mpc8xxx_spi);
return 0;
}
@@ -709,6 +1158,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
gpio = of_get_gpio_flags(np, i, &flags);
if (!gpio_is_valid(gpio)) {
dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
+ ret = gpio;
goto err_loop;
}
@@ -804,7 +1254,13 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
prop = of_get_property(np, "mode", NULL);
if (prop && !strcmp(prop, "cpu-qe"))
- pdata->qe_mode = 1;
+ pdata->flags = SPI_QE_CPU_MODE;
+ else if (prop && !strcmp(prop, "qe"))
+ pdata->flags = SPI_CPM_MODE | SPI_QE;
+ else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
+ pdata->flags = SPI_CPM_MODE | SPI_CPM2;
+ else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
+ pdata->flags = SPI_CPM_MODE | SPI_CPM1;
ret = of_mpc8xxx_spi_get_chipselects(dev);
if (ret)
@@ -900,7 +1356,7 @@ static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
MODULE_ALIAS("platform:mpc8xxx_spi");
static struct platform_driver mpc8xxx_spi_driver = {
.probe = plat_mpc8xxx_spi_probe,
- .remove = __exit_p(plat_mpc8xxx_spi_remove),
+ .remove = __devexit_p(plat_mpc8xxx_spi_remove),
.driver = {
.name = "mpc8xxx_spi",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c
new file mode 100644
index 00000000000..b319f9bf9b9
--- /dev/null
+++ b/drivers/spi/spi_nuc900.c
@@ -0,0 +1,504 @@
+/* linux/drivers/spi/spi_nuc900.c
+ *
+ * Copyright (c) 2009 Nuvoton technology.
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <mach/nuc900_spi.h>
+
+/* usi registers offset */
+#define USI_CNT 0x00
+#define USI_DIV 0x04
+#define USI_SSR 0x08
+#define USI_RX0 0x10
+#define USI_TX0 0x10
+
+/* usi register bit */
+#define ENINT (0x01 << 17)
+#define ENFLG (0x01 << 16)
+#define TXNUM (0x03 << 8)
+#define TXNEG (0x01 << 2)
+#define RXNEG (0x01 << 1)
+#define LSB (0x01 << 10)
+#define SELECTLEV (0x01 << 2)
+#define SELECTPOL (0x01 << 31)
+#define SELECTSLAVE 0x01
+#define GOBUSY 0x01
+
+struct nuc900_spi {
+ struct spi_bitbang bitbang;
+ struct completion done;
+ void __iomem *regs;
+ int irq;
+ int len;
+ int count;
+ const unsigned char *tx;
+ unsigned char *rx;
+ struct clk *clk;
+ struct resource *ioarea;
+ struct spi_master *master;
+ struct spi_device *curdev;
+ struct device *dev;
+ struct nuc900_spi_info *pdata;
+ spinlock_t lock;
+ struct resource *res;
+};
+
+static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr)
+{
+ struct nuc900_spi *hw = to_hw(spi);
+ unsigned int val;
+ unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_SSR);
+
+ if (!cs)
+ val &= ~SELECTLEV;
+ else
+ val |= SELECTLEV;
+
+ if (!ssr)
+ val &= ~SELECTSLAVE;
+ else
+ val |= SELECTSLAVE;
+
+ __raw_writel(val, hw->regs + USI_SSR);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (!cpol)
+ val &= ~SELECTPOL;
+ else
+ val |= SELECTPOL;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_spi_chipsel(struct spi_device *spi, int value)
+{
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ nuc900_slave_select(spi, 0);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ nuc900_slave_select(spi, 1);
+ break;
+ }
+}
+
+static void nuc900_spi_setup_txnum(struct nuc900_spi *hw,
+ unsigned int txnum)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (!txnum)
+ val &= ~TXNUM;
+ else
+ val |= txnum << 0x08;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+}
+
+static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
+ unsigned int txbitlen)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ val |= (txbitlen << 0x03);
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_spi_gobusy(struct nuc900_spi *hw)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ val |= GOBUSY;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static int nuc900_spi_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ return 0;
+}
+
+static int nuc900_spi_setup(struct spi_device *spi)
+{
+ return 0;
+}
+
+static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count)
+{
+ return hw->tx ? hw->tx[count] : 0;
+}
+
+static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct nuc900_spi *hw = to_hw(spi);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0);
+
+ nuc900_spi_gobusy(hw);
+
+ wait_for_completion(&hw->done);
+
+ return hw->count;
+}
+
+static irqreturn_t nuc900_spi_irq(int irq, void *dev)
+{
+ struct nuc900_spi *hw = dev;
+ unsigned int status;
+ unsigned int count = hw->count;
+
+ status = __raw_readl(hw->regs + USI_CNT);
+ __raw_writel(status, hw->regs + USI_CNT);
+
+ if (status & ENFLG) {
+ hw->count++;
+
+ if (hw->rx)
+ hw->rx[count] = __raw_readl(hw->regs + USI_RX0);
+ count++;
+
+ if (count < hw->len) {
+ __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0);
+ nuc900_spi_gobusy(hw);
+ } else {
+ complete(&hw->done);
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ complete(&hw->done);
+ return IRQ_HANDLED;
+}
+
+static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (edge)
+ val |= TXNEG;
+ else
+ val &= ~TXNEG;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (edge)
+ val |= RXNEG;
+ else
+ val &= ~RXNEG;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (lsb)
+ val |= LSB;
+ else
+ val &= ~LSB;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (sleep)
+ val |= (sleep << 12);
+ else
+ val &= ~(0x0f << 12);
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_enable_int(struct nuc900_spi *hw)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ val |= ENINT;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_set_divider(struct nuc900_spi *hw)
+{
+ __raw_writel(hw->pdata->divider, hw->regs + USI_DIV);
+}
+
+static void nuc900_init_spi(struct nuc900_spi *hw)
+{
+ clk_enable(hw->clk);
+ spin_lock_init(&hw->lock);
+
+ nuc900_tx_edge(hw, hw->pdata->txneg);
+ nuc900_rx_edge(hw, hw->pdata->rxneg);
+ nuc900_send_first(hw, hw->pdata->lsb);
+ nuc900_set_sleep(hw, hw->pdata->sleep);
+ nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen);
+ nuc900_spi_setup_txnum(hw, hw->pdata->txnum);
+ nuc900_set_divider(hw);
+ nuc900_enable_int(hw);
+}
+
+static int __devinit nuc900_spi_probe(struct platform_device *pdev)
+{
+ struct nuc900_spi *hw;
+ struct spi_master *master;
+ int err = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_master\n");
+ err = -ENOMEM;
+ goto err_nomem;
+ }
+
+ hw = spi_master_get_devdata(master);
+ memset(hw, 0, sizeof(struct nuc900_spi));
+
+ hw->master = spi_master_get(master);
+ hw->pdata = pdev->dev.platform_data;
+ hw->dev = &pdev->dev;
+
+ if (hw->pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ err = -ENOENT;
+ goto err_pdata;
+ }
+
+ platform_set_drvdata(pdev, hw);
+ init_completion(&hw->done);
+
+ master->mode_bits = SPI_MODE_0;
+ master->num_chipselect = hw->pdata->num_cs;
+ master->bus_num = hw->pdata->bus_num;
+ hw->bitbang.master = hw->master;
+ hw->bitbang.setup_transfer = nuc900_spi_setupxfer;
+ hw->bitbang.chipselect = nuc900_spi_chipsel;
+ hw->bitbang.txrx_bufs = nuc900_spi_txrx;
+ hw->bitbang.master->setup = nuc900_spi_setup;
+
+ hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (hw->res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ err = -ENOENT;
+ goto err_pdata;
+ }
+
+ hw->ioarea = request_mem_region(hw->res->start,
+ resource_size(hw->res), pdev->name);
+
+ if (hw->ioarea == NULL) {
+ dev_err(&pdev->dev, "Cannot reserve region\n");
+ err = -ENXIO;
+ goto err_pdata;
+ }
+
+ hw->regs = ioremap(hw->res->start, resource_size(hw->res));
+ if (hw->regs == NULL) {
+ dev_err(&pdev->dev, "Cannot map IO\n");
+ err = -ENXIO;
+ goto err_iomap;
+ }
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ specified\n");
+ err = -ENOENT;
+ goto err_irq;
+ }
+
+ err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ goto err_irq;
+ }
+
+ hw->clk = clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock for device\n");
+ err = PTR_ERR(hw->clk);
+ goto err_clk;
+ }
+
+ mfp_set_groupg(&pdev->dev);
+ nuc900_init_spi(hw);
+
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register SPI master\n");
+ goto err_register;
+ }
+
+ return 0;
+
+err_register:
+ clk_disable(hw->clk);
+ clk_put(hw->clk);
+err_clk:
+ free_irq(hw->irq, hw);
+err_irq:
+ iounmap(hw->regs);
+err_iomap:
+ release_mem_region(hw->res->start, resource_size(hw->res));
+ kfree(hw->ioarea);
+err_pdata:
+ spi_master_put(hw->master);;
+
+err_nomem:
+ return err;
+}
+
+static int __devexit nuc900_spi_remove(struct platform_device *dev)
+{
+ struct nuc900_spi *hw = platform_get_drvdata(dev);
+
+ free_irq(hw->irq, hw);
+
+ platform_set_drvdata(dev, NULL);
+
+ spi_unregister_master(hw->master);
+
+ clk_disable(hw->clk);
+ clk_put(hw->clk);
+
+ iounmap(hw->regs);
+
+ release_mem_region(hw->res->start, resource_size(hw->res));
+ kfree(hw->ioarea);
+
+ spi_master_put(hw->master);
+ return 0;
+}
+
+static struct platform_driver nuc900_spi_driver = {
+ .probe = nuc900_spi_probe,
+ .remove = __devexit_p(nuc900_spi_remove),
+ .driver = {
+ .name = "nuc900-spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init nuc900_spi_init(void)
+{
+ return platform_driver_register(&nuc900_spi_driver);
+}
+
+static void __exit nuc900_spi_exit(void)
+{
+ platform_driver_unregister(&nuc900_spi_driver);
+}
+
+module_init(nuc900_spi_init);
+module_exit(nuc900_spi_exit);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("nuc900 spi driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-spi");
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 33d94f76b9e..c010733877a 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -1,7 +1,7 @@
/* linux/drivers/spi/spi_s3c24xx.c
*
* Copyright (c) 2006 Ben Dooks
- * Copyright (c) 2006 Simtec Electronics
+ * Copyright 2006-2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
@@ -28,6 +28,11 @@
#include <plat/regs-spi.h>
#include <mach/spi.h>
+#include <plat/fiq.h>
+#include <asm/fiq.h>
+
+#include "spi_s3c24xx_fiq.h"
+
/**
* s3c24xx_spi_devstate - per device data
* @hz: Last frequency calculated for @sppre field.
@@ -42,6 +47,13 @@ struct s3c24xx_spi_devstate {
u8 sppre;
};
+enum spi_fiq_mode {
+ FIQ_MODE_NONE = 0,
+ FIQ_MODE_TX = 1,
+ FIQ_MODE_RX = 2,
+ FIQ_MODE_TXRX = 3,
+};
+
struct s3c24xx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
@@ -52,6 +64,11 @@ struct s3c24xx_spi {
int len;
int count;
+ struct fiq_handler fiq_handler;
+ enum spi_fiq_mode fiq_mode;
+ unsigned char fiq_inuse;
+ unsigned char fiq_claimed;
+
void (*set_cs)(struct s3c2410_spi_info *spi,
int cs, int pol);
@@ -67,6 +84,7 @@ struct s3c24xx_spi {
struct s3c2410_spi_info *pdata;
};
+
#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
@@ -127,7 +145,7 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
}
if (spi->mode != cs->mode) {
- u8 spcon = SPCON_DEFAULT;
+ u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
if (spi->mode & SPI_CPHA)
spcon |= S3C2410_SPCON_CPHA_FMTB;
@@ -214,13 +232,196 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
return hw->tx ? hw->tx[count] : 0;
}
+#ifdef CONFIG_SPI_S3C24XX_FIQ
+/* Support for FIQ based pseudo-DMA to improve the transfer speed.
+ *
+ * This code uses the assembly helper in spi_s3c24xx_spi.S which is
+ * used by the FIQ core to move data between main memory and the peripheral
+ * block. Since this is code running on the processor, there is no problem
+ * with cache coherency of the buffers, so we can use any buffer we like.
+ */
+
+/**
+ * struct spi_fiq_code - FIQ code and header
+ * @length: The length of the code fragment, excluding this header.
+ * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
+ * @data: The code itself to install as a FIQ handler.
+ */
+struct spi_fiq_code {
+ u32 length;
+ u32 ack_offset;
+ u8 data[0];
+};
+
+extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
+extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
+extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
+
+/**
+ * ack_bit - turn IRQ into IRQ acknowledgement bit
+ * @irq: The interrupt number
+ *
+ * Returns the bit to write to the interrupt acknowledge register.
+ */
+static inline u32 ack_bit(unsigned int irq)
+{
+ return 1 << (irq - IRQ_EINT0);
+}
+
+/**
+ * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
+ * @hw: The hardware state.
+ *
+ * Claim the FIQ handler (only one can be active at any one time) and
+ * then setup the correct transfer code for this transfer.
+ *
+ * This call updates all the necessary state information if sucessful,
+ * so the caller does not need to do anything more than start the transfer
+ * as normal, since the IRQ will have been re-routed to the FIQ handler.
+*/
+void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
+{
+ struct pt_regs regs;
+ enum spi_fiq_mode mode;
+ struct spi_fiq_code *code;
+ int ret;
+
+ if (!hw->fiq_claimed) {
+ /* try and claim fiq if we haven't got it, and if not
+ * then return and simply use another transfer method */
+
+ ret = claim_fiq(&hw->fiq_handler);
+ if (ret)
+ return;
+ }
+
+ if (hw->tx && !hw->rx)
+ mode = FIQ_MODE_TX;
+ else if (hw->rx && !hw->tx)
+ mode = FIQ_MODE_RX;
+ else
+ mode = FIQ_MODE_TXRX;
+
+ regs.uregs[fiq_rspi] = (long)hw->regs;
+ regs.uregs[fiq_rrx] = (long)hw->rx;
+ regs.uregs[fiq_rtx] = (long)hw->tx + 1;
+ regs.uregs[fiq_rcount] = hw->len - 1;
+ regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ;
+
+ set_fiq_regs(&regs);
+
+ if (hw->fiq_mode != mode) {
+ u32 *ack_ptr;
+
+ hw->fiq_mode = mode;
+
+ switch (mode) {
+ case FIQ_MODE_TX:
+ code = &s3c24xx_spi_fiq_tx;
+ break;
+ case FIQ_MODE_RX:
+ code = &s3c24xx_spi_fiq_rx;
+ break;
+ case FIQ_MODE_TXRX:
+ code = &s3c24xx_spi_fiq_txrx;
+ break;
+ default:
+ code = NULL;
+ }
+
+ BUG_ON(!code);
+
+ ack_ptr = (u32 *)&code->data[code->ack_offset];
+ *ack_ptr = ack_bit(hw->irq);
+
+ set_fiq_handler(&code->data, code->length);
+ }
+
+ s3c24xx_set_fiq(hw->irq, true);
+
+ hw->fiq_mode = mode;
+ hw->fiq_inuse = 1;
+}
+
+/**
+ * s3c24xx_spi_fiqop - FIQ core code callback
+ * @pw: Data registered with the handler
+ * @release: Whether this is a release or a return.
+ *
+ * Called by the FIQ code when another module wants to use the FIQ, so
+ * return whether we are currently using this or not and then update our
+ * internal state.
+ */
+static int s3c24xx_spi_fiqop(void *pw, int release)
+{
+ struct s3c24xx_spi *hw = pw;
+ int ret = 0;
+
+ if (release) {
+ if (hw->fiq_inuse)
+ ret = -EBUSY;
+
+ /* note, we do not need to unroute the FIQ, as the FIQ
+ * vector code de-routes it to signal the end of transfer */
+
+ hw->fiq_mode = FIQ_MODE_NONE;
+ hw->fiq_claimed = 0;
+ } else {
+ hw->fiq_claimed = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * s3c24xx_spi_initfiq - setup the information for the FIQ core
+ * @hw: The hardware state.
+ *
+ * Setup the fiq_handler block to pass to the FIQ core.
+ */
+static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
+{
+ hw->fiq_handler.dev_id = hw;
+ hw->fiq_handler.name = dev_name(hw->dev);
+ hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
+}
+
+/**
+ * s3c24xx_spi_usefiq - return if we should be using FIQ.
+ * @hw: The hardware state.
+ *
+ * Return true if the platform data specifies whether this channel is
+ * allowed to use the FIQ.
+ */
+static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
+{
+ return hw->pdata->use_fiq;
+}
+
+/**
+ * s3c24xx_spi_usingfiq - return if channel is using FIQ
+ * @spi: The hardware state.
+ *
+ * Return whether the channel is currently using the FIQ (separate from
+ * whether the FIQ is claimed).
+ */
+static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
+{
+ return spi->fiq_inuse;
+}
+#else
+
+static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
+static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
+static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
+static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
+
+#endif /* CONFIG_SPI_S3C24XX_FIQ */
+
static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
{
struct s3c24xx_spi *hw = to_hw(spi);
- dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
- t->tx_buf, t->rx_buf, t->len);
-
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->len = t->len;
@@ -228,11 +429,14 @@ static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
init_completion(&hw->done);
+ hw->fiq_inuse = 0;
+ if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
+ s3c24xx_spi_tryfiq(hw);
+
/* send the first byte */
writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
wait_for_completion(&hw->done);
-
return hw->count;
}
@@ -254,17 +458,27 @@ static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
goto irq_done;
}
- hw->count++;
+ if (!s3c24xx_spi_usingfiq(hw)) {
+ hw->count++;
- if (hw->rx)
- hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
+ if (hw->rx)
+ hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
- count++;
+ count++;
+
+ if (count < hw->len)
+ writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
+ else
+ complete(&hw->done);
+ } else {
+ hw->count = hw->len;
+ hw->fiq_inuse = 0;
+
+ if (hw->rx)
+ hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
- if (count < hw->len)
- writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
- else
complete(&hw->done);
+ }
irq_done:
return IRQ_HANDLED;
@@ -322,6 +536,10 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
init_completion(&hw->done);
+ /* initialise fiq handler */
+
+ s3c24xx_spi_initfiq(hw);
+
/* setup the master state. */
/* the spi->mode bits understood by this driver: */
@@ -489,7 +707,7 @@ static int s3c24xx_spi_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops s3c24xx_spi_pmops = {
+static const struct dev_pm_ops s3c24xx_spi_pmops = {
.suspend = s3c24xx_spi_suspend,
.resume = s3c24xx_spi_resume,
};
diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi_s3c24xx_fiq.S
new file mode 100644
index 00000000000..3793cae361d
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.S
@@ -0,0 +1,116 @@
+/* linux/drivers/spi/spi_s3c24xx_fiq.S
+ *
+ * Copyright 2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX SPI - FIQ pseudo-DMA transfer code
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#include <mach/map.h>
+#include <mach/regs-irq.h>
+#include <plat/regs-spi.h>
+
+#include "spi_s3c24xx_fiq.h"
+
+ .text
+
+ @ entry to these routines is as follows, with the register names
+ @ defined in fiq.h so that they can be shared with the C files which
+ @ setup the calling registers.
+ @
+ @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
+ @ fiq_rtmp Temporary register to hold tx/rx data
+ @ fiq_rspi The base of the SPI register block
+ @ fiq_rtx The tx buffer pointer
+ @ fiq_rrx The rx buffer pointer
+ @ fiq_rcount The number of bytes to move
+
+ @ each entry starts with a word entry of how long it is
+ @ and an offset to the irq acknowledgment word
+
+ENTRY(s3c24xx_spi_fiq_rx)
+s3c24xx_spi_fix_rx:
+ .word fiq_rx_end - fiq_rx_start
+ .word fiq_rx_irq_ack - fiq_rx_start
+fiq_rx_start:
+ ldr fiq_rtmp, fiq_rx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+ strb fiq_rtmp, [ fiq_rrx ], #1
+
+ mov fiq_rtmp, #0xff
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ @@ set IRQ controller so that next op will trigger IRQ
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_rx_irq_ack:
+ .word 0
+fiq_rx_end:
+
+ENTRY(s3c24xx_spi_fiq_txrx)
+s3c24xx_spi_fiq_txrx:
+ .word fiq_txrx_end - fiq_txrx_start
+ .word fiq_txrx_irq_ack - fiq_txrx_start
+fiq_txrx_start:
+
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+ strb fiq_rtmp, [ fiq_rrx ], #1
+
+ ldr fiq_rtmp, fiq_txrx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rtx ], #1
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_txrx_irq_ack:
+ .word 0
+
+fiq_txrx_end:
+
+ENTRY(s3c24xx_spi_fiq_tx)
+s3c24xx_spi_fix_tx:
+ .word fiq_tx_end - fiq_tx_start
+ .word fiq_tx_irq_ack - fiq_tx_start
+fiq_tx_start:
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+
+ ldr fiq_rtmp, fiq_tx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rtx ], #1
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_tx_irq_ack:
+ .word 0
+
+fiq_tx_end:
+
+ .end
diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi_s3c24xx_fiq.h
new file mode 100644
index 00000000000..a5950bb25b5
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.h
@@ -0,0 +1,26 @@
+/* linux/drivers/spi/spi_s3c24xx_fiq.h
+ *
+ * Copyright 2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX SPI - FIQ pseudo-DMA transfer support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* We have R8 through R13 to play with */
+
+#ifdef __ASSEMBLY__
+#define __REG_NR(x) r##x
+#else
+#define __REG_NR(x) (x)
+#endif
+
+#define fiq_rspi __REG_NR(8)
+#define fiq_rtmp __REG_NR(9)
+#define fiq_rrx __REG_NR(10)
+#define fiq_rtx __REG_NR(11)
+#define fiq_rcount __REG_NR(12)
+#define fiq_rirq __REG_NR(13)
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
new file mode 100644
index 00000000000..88a456dba96
--- /dev/null
+++ b/drivers/spi/spi_s3c64xx.c
@@ -0,0 +1,1196 @@
+/* linux/drivers/spi/spi_s3c64xx.c
+ *
+ * Copyright (C) 2009 Samsung Electronics Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#include <mach/dma.h>
+#include <plat/spi.h>
+
+/* Registers and bit-fields */
+
+#define S3C64XX_SPI_CH_CFG 0x00
+#define S3C64XX_SPI_CLK_CFG 0x04
+#define S3C64XX_SPI_MODE_CFG 0x08
+#define S3C64XX_SPI_SLAVE_SEL 0x0C
+#define S3C64XX_SPI_INT_EN 0x10
+#define S3C64XX_SPI_STATUS 0x14
+#define S3C64XX_SPI_TX_DATA 0x18
+#define S3C64XX_SPI_RX_DATA 0x1C
+#define S3C64XX_SPI_PACKET_CNT 0x20
+#define S3C64XX_SPI_PENDING_CLR 0x24
+#define S3C64XX_SPI_SWAP_CFG 0x28
+#define S3C64XX_SPI_FB_CLK 0x2C
+
+#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
+#define S3C64XX_SPI_CH_SW_RST (1<<5)
+#define S3C64XX_SPI_CH_SLAVE (1<<4)
+#define S3C64XX_SPI_CPOL_L (1<<3)
+#define S3C64XX_SPI_CPHA_B (1<<2)
+#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
+#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
+
+#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
+#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
+#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
+#define S3C64XX_SPI_PSR_MASK 0xff
+
+#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
+#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
+#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
+#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
+#define S3C64XX_SPI_MODE_4BURST (1<<0)
+
+#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
+#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
+
+#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
+
+#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
+ (c)->regs + S3C64XX_SPI_SLAVE_SEL)
+
+#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
+#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
+#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
+#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
+#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
+#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
+#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
+
+#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
+#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
+#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
+#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
+#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
+#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
+
+#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
+
+#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
+#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
+#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
+#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
+#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
+
+#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
+#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
+#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
+#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
+#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
+#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
+#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
+#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
+
+#define S3C64XX_SPI_FBCLK_MSK (3<<0)
+
+#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
+ (((i)->fifo_lvl_mask + 1))) \
+ ? 1 : 0)
+
+#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
+ (((i)->fifo_lvl_mask + 1) << 1)) \
+ ? 1 : 0)
+#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
+#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
+
+#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
+#define S3C64XX_SPI_TRAILCNT_OFF 19
+
+#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+#define SUSPND (1<<0)
+#define SPIBUSY (1<<1)
+#define RXBUSY (1<<2)
+#define TXBUSY (1<<3)
+
+/**
+ * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
+ * @clk: Pointer to the spi clock.
+ * @master: Pointer to the SPI Protocol master.
+ * @workqueue: Work queue for the SPI xfer requests.
+ * @cntrlr_info: Platform specific data for the controller this driver manages.
+ * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
+ * @work: Work
+ * @queue: To log SPI xfer requests.
+ * @lock: Controller specific lock.
+ * @state: Set of FLAGS to indicate status.
+ * @rx_dmach: Controller's DMA channel for Rx.
+ * @tx_dmach: Controller's DMA channel for Tx.
+ * @sfr_start: BUS address of SPI controller regs.
+ * @regs: Pointer to ioremap'ed controller registers.
+ * @xfer_completion: To indicate completion of xfer task.
+ * @cur_mode: Stores the active configuration of the controller.
+ * @cur_bpw: Stores the active bits per word settings.
+ * @cur_speed: Stores the active xfer clock speed.
+ */
+struct s3c64xx_spi_driver_data {
+ void __iomem *regs;
+ struct clk *clk;
+ struct platform_device *pdev;
+ struct spi_master *master;
+ struct workqueue_struct *workqueue;
+ struct s3c64xx_spi_cntrlr_info *cntrlr_info;
+ struct spi_device *tgl_spi;
+ struct work_struct work;
+ struct list_head queue;
+ spinlock_t lock;
+ enum dma_ch rx_dmach;
+ enum dma_ch tx_dmach;
+ unsigned long sfr_start;
+ struct completion xfer_completion;
+ unsigned state;
+ unsigned cur_mode, cur_bpw;
+ unsigned cur_speed;
+};
+
+static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
+ .name = "samsung-spi-dma",
+};
+
+static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ unsigned long loops;
+ u32 val;
+
+ writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val |= S3C64XX_SPI_CH_SW_RST;
+ val &= ~S3C64XX_SPI_CH_HS_EN;
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ /* Flush TxFIFO*/
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ } while (TX_FIFO_LVL(val, sci) && loops--);
+
+ /* Flush RxFIFO*/
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ if (RX_FIFO_LVL(val, sci))
+ readl(regs + S3C64XX_SPI_RX_DATA);
+ else
+ break;
+ } while (loops--);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~S3C64XX_SPI_CH_SW_RST;
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+}
+
+static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi,
+ struct spi_transfer *xfer, int dma_mode)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ u32 modecfg, chcfg;
+
+ modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
+ modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+
+ chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
+ chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
+
+ if (dma_mode) {
+ chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
+ } else {
+ /* Always shift in data in FIFO, even if xfer is Tx only,
+ * this helps setting PCKT_CNT value for generating clocks
+ * as exactly needed.
+ */
+ chcfg |= S3C64XX_SPI_CH_RXCH_ON;
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+ }
+
+ if (xfer->tx_buf != NULL) {
+ sdd->state |= TXBUSY;
+ chcfg |= S3C64XX_SPI_CH_TXCH_ON;
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
+ s3c2410_dma_config(sdd->tx_dmach, 1);
+ s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
+ xfer->tx_dma, xfer->len);
+ s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
+ } else {
+ unsigned char *buf = (unsigned char *) xfer->tx_buf;
+ int i = 0;
+ while (i < xfer->len)
+ writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
+ }
+ }
+
+ if (xfer->rx_buf != NULL) {
+ sdd->state |= RXBUSY;
+
+ if (sci->high_speed && sdd->cur_speed >= 30000000UL
+ && !(sdd->cur_mode & SPI_CPHA))
+ chcfg |= S3C64XX_SPI_CH_HS_EN;
+
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
+ chcfg |= S3C64XX_SPI_CH_RXCH_ON;
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+ s3c2410_dma_config(sdd->rx_dmach, 1);
+ s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
+ xfer->rx_dma, xfer->len);
+ s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
+ }
+ }
+
+ writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
+ writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
+}
+
+static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs;
+
+ if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
+ if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
+ /* Deselect the last toggled device */
+ cs = sdd->tgl_spi->controller_data;
+ cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
+ }
+ sdd->tgl_spi = NULL;
+ }
+
+ cs = spi->controller_data;
+ cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0);
+}
+
+static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer, int dma_mode)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ int ms;
+
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 5; /* some tolerance */
+
+ if (dma_mode) {
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+ } else {
+ val = msecs_to_loops(ms);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
+ }
+
+ if (!val)
+ return -EIO;
+
+ if (dma_mode) {
+ u32 status;
+
+ /*
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (xfer->rx_buf == NULL) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sci)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sci))
+ && --val) {
+ cpu_relax();
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ }
+
+ if (!val)
+ return -EIO;
+ }
+ } else {
+ unsigned char *buf;
+ int i;
+
+ /* If it was only Tx */
+ if (xfer->rx_buf == NULL) {
+ sdd->state &= ~TXBUSY;
+ return 0;
+ }
+
+ i = 0;
+ buf = xfer->rx_buf;
+ while (i < xfer->len)
+ buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
+
+ sdd->state &= ~RXBUSY;
+ }
+
+ return 0;
+}
+
+static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+
+ if (sdd->tgl_spi == spi)
+ sdd->tgl_spi = NULL;
+
+ cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
+}
+
+static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ u32 val;
+
+ /* Disable Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val &= ~S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+
+ /* Set Polarity and Phase */
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_SLAVE |
+ S3C64XX_SPI_CPOL_L |
+ S3C64XX_SPI_CPHA_B);
+
+ if (sdd->cur_mode & SPI_CPOL)
+ val |= S3C64XX_SPI_CPOL_L;
+
+ if (sdd->cur_mode & SPI_CPHA)
+ val |= S3C64XX_SPI_CPHA_B;
+
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ /* Set Channel & DMA Mode */
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
+ | S3C64XX_SPI_MODE_CH_TSZ_MASK);
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
+ break;
+ case 16:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
+ break;
+ default:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
+ break;
+ }
+ val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
+
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ /* Configure Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val &= ~S3C64XX_SPI_PSR_MASK;
+ val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1)
+ & S3C64XX_SPI_PSR_MASK);
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+
+ /* Enable Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val |= S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+}
+
+void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
+{
+ struct s3c64xx_spi_driver_data *sdd = buf_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (res == S3C2410_RES_OK)
+ sdd->state &= ~RXBUSY;
+ else
+ dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
+
+ /* If the other done */
+ if (!(sdd->state & TXBUSY))
+ complete(&sdd->xfer_completion);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+}
+
+void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
+ int size, enum s3c2410_dma_buffresult res)
+{
+ struct s3c64xx_spi_driver_data *sdd = buf_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (res == S3C2410_RES_OK)
+ sdd->state &= ~TXBUSY;
+ else
+ dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
+
+ /* If the other done */
+ if (!(sdd->state & RXBUSY))
+ complete(&sdd->xfer_completion);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+}
+
+#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
+
+static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_message *msg)
+{
+ struct device *dev = &sdd->pdev->dev;
+ struct spi_transfer *xfer;
+
+ if (msg->is_dma_mapped)
+ return 0;
+
+ /* First mark all xfer unmapped */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ xfer->rx_dma = XFER_DMAADDR_INVALID;
+ xfer->tx_dma = XFER_DMAADDR_INVALID;
+ }
+
+ /* Map until end or first fail */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+
+ if (xfer->tx_buf != NULL) {
+ xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma)) {
+ dev_err(dev, "dma_map_single Tx failed\n");
+ xfer->tx_dma = XFER_DMAADDR_INVALID;
+ return -ENOMEM;
+ }
+ }
+
+ if (xfer->rx_buf != NULL) {
+ xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
+ xfer->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma)) {
+ dev_err(dev, "dma_map_single Rx failed\n");
+ dma_unmap_single(dev, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+ xfer->tx_dma = XFER_DMAADDR_INVALID;
+ xfer->rx_dma = XFER_DMAADDR_INVALID;
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_message *msg)
+{
+ struct device *dev = &sdd->pdev->dev;
+ struct spi_transfer *xfer;
+
+ if (msg->is_dma_mapped)
+ return;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+
+ if (xfer->rx_buf != NULL
+ && xfer->rx_dma != XFER_DMAADDR_INVALID)
+ dma_unmap_single(dev, xfer->rx_dma,
+ xfer->len, DMA_FROM_DEVICE);
+
+ if (xfer->tx_buf != NULL
+ && xfer->tx_dma != XFER_DMAADDR_INVALID)
+ dma_unmap_single(dev, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+ }
+}
+
+static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_message *msg)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct spi_device *spi = msg->spi;
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+ struct spi_transfer *xfer;
+ int status = 0, cs_toggle = 0;
+ u32 speed;
+ u8 bpw;
+
+ /* If Master's(controller) state differs from that needed by Slave */
+ if (sdd->cur_speed != spi->max_speed_hz
+ || sdd->cur_mode != spi->mode
+ || sdd->cur_bpw != spi->bits_per_word) {
+ sdd->cur_bpw = spi->bits_per_word;
+ sdd->cur_speed = spi->max_speed_hz;
+ sdd->cur_mode = spi->mode;
+ s3c64xx_spi_config(sdd);
+ }
+
+ /* Map all the transfers if needed */
+ if (s3c64xx_spi_map_mssg(sdd, msg)) {
+ dev_err(&spi->dev,
+ "Xfer: Unable to map message buffers!\n");
+ status = -ENOMEM;
+ goto out;
+ }
+
+ /* Configure feedback delay */
+ writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+
+ unsigned long flags;
+ int use_dma;
+
+ INIT_COMPLETION(sdd->xfer_completion);
+
+ /* Only BPW and Speed may change across transfers */
+ bpw = xfer->bits_per_word ? : spi->bits_per_word;
+ speed = xfer->speed_hz ? : spi->max_speed_hz;
+
+ if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
+ sdd->cur_bpw = bpw;
+ sdd->cur_speed = speed;
+ s3c64xx_spi_config(sdd);
+ }
+
+ /* Polling method for xfers not bigger than FIFO capacity */
+ if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
+ use_dma = 0;
+ else
+ use_dma = 1;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ /* Pending only which is to be done */
+ sdd->state &= ~RXBUSY;
+ sdd->state &= ~TXBUSY;
+
+ enable_datapath(sdd, spi, xfer, use_dma);
+
+ /* Slave Select */
+ enable_cs(sdd, spi);
+
+ /* Start the signals */
+ S3C64XX_SPI_ACT(sdd);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ status = wait_for_xfer(sdd, xfer, use_dma);
+
+ /* Quiese the signals */
+ S3C64XX_SPI_DEACT(sdd);
+
+ if (status) {
+ dev_err(&spi->dev, "I/O Error: \
+ rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
+ (sdd->state & RXBUSY) ? 'f' : 'p',
+ (sdd->state & TXBUSY) ? 'f' : 'p',
+ xfer->len);
+
+ if (use_dma) {
+ if (xfer->tx_buf != NULL
+ && (sdd->state & TXBUSY))
+ s3c2410_dma_ctrl(sdd->tx_dmach,
+ S3C2410_DMAOP_FLUSH);
+ if (xfer->rx_buf != NULL
+ && (sdd->state & RXBUSY))
+ s3c2410_dma_ctrl(sdd->rx_dmach,
+ S3C2410_DMAOP_FLUSH);
+ }
+
+ goto out;
+ }
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ /* Hint that the next mssg is gonna be
+ for the same device */
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers))
+ cs_toggle = 1;
+ else
+ disable_cs(sdd, spi);
+ }
+
+ msg->actual_length += xfer->len;
+
+ flush_fifo(sdd);
+ }
+
+out:
+ if (!cs_toggle || status)
+ disable_cs(sdd, spi);
+ else
+ sdd->tgl_spi = spi;
+
+ s3c64xx_spi_unmap_mssg(sdd, msg);
+
+ msg->status = status;
+
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
+{
+ if (s3c2410_dma_request(sdd->rx_dmach,
+ &s3c64xx_spi_dma_client, NULL) < 0) {
+ dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
+ return 0;
+ }
+ s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
+ s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
+ sdd->sfr_start + S3C64XX_SPI_RX_DATA);
+
+ if (s3c2410_dma_request(sdd->tx_dmach,
+ &s3c64xx_spi_dma_client, NULL) < 0) {
+ dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
+ s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
+ return 0;
+ }
+ s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
+ s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
+ sdd->sfr_start + S3C64XX_SPI_TX_DATA);
+
+ return 1;
+}
+
+static void s3c64xx_spi_work(struct work_struct *work)
+{
+ struct s3c64xx_spi_driver_data *sdd = container_of(work,
+ struct s3c64xx_spi_driver_data, work);
+ unsigned long flags;
+
+ /* Acquire DMA channels */
+ while (!acquire_dma(sdd))
+ msleep(10);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ while (!list_empty(&sdd->queue)
+ && !(sdd->state & SUSPND)) {
+
+ struct spi_message *msg;
+
+ msg = container_of(sdd->queue.next, struct spi_message, queue);
+
+ list_del_init(&msg->queue);
+
+ /* Set Xfer busy flag */
+ sdd->state |= SPIBUSY;
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ handle_msg(sdd, msg);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ sdd->state &= ~SPIBUSY;
+ }
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ /* Free DMA channels */
+ s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
+ s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
+}
+
+static int s3c64xx_spi_transfer(struct spi_device *spi,
+ struct spi_message *msg)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ unsigned long flags;
+
+ sdd = spi_master_get_devdata(spi->master);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (sdd->state & SUSPND) {
+ spin_unlock_irqrestore(&sdd->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->status = -EINPROGRESS;
+ msg->actual_length = 0;
+
+ list_add_tail(&msg->queue, &sdd->queue);
+
+ queue_work(sdd->workqueue, &sdd->work);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Here we only check the validity of requested configuration
+ * and save the configuration in a local data-structure.
+ * The controller is actually configured only just before we
+ * get a message to transfer.
+ */
+static int s3c64xx_spi_setup(struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_cntrlr_info *sci;
+ struct spi_message *msg;
+ u32 psr, speed;
+ unsigned long flags;
+ int err = 0;
+
+ if (cs == NULL || cs->set_level == NULL) {
+ dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
+ return -ENODEV;
+ }
+
+ sdd = spi_master_get_devdata(spi->master);
+ sci = sdd->cntrlr_info;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ list_for_each_entry(msg, &sdd->queue, queue) {
+ /* Is some mssg is already queued for this device */
+ if (msg->spi == spi) {
+ dev_err(&spi->dev,
+ "setup: attempt while mssg in queue!\n");
+ spin_unlock_irqrestore(&sdd->lock, flags);
+ return -EBUSY;
+ }
+ }
+
+ if (sdd->state & SUSPND) {
+ spin_unlock_irqrestore(&sdd->lock, flags);
+ dev_err(&spi->dev,
+ "setup: SPI-%d not active!\n", spi->master->bus_num);
+ return -ESHUTDOWN;
+ }
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ if (spi->bits_per_word != 8
+ && spi->bits_per_word != 16
+ && spi->bits_per_word != 32) {
+ dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
+ spi->bits_per_word);
+ err = -EINVAL;
+ goto setup_exit;
+ }
+
+ /* Check if we can provide the requested rate */
+ speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */
+
+ if (spi->max_speed_hz > speed)
+ spi->max_speed_hz = speed;
+
+ psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1;
+ psr &= S3C64XX_SPI_PSR_MASK;
+ if (psr == S3C64XX_SPI_PSR_MASK)
+ psr--;
+
+ speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
+ if (spi->max_speed_hz < speed) {
+ if (psr+1 < S3C64XX_SPI_PSR_MASK) {
+ psr++;
+ } else {
+ err = -EINVAL;
+ goto setup_exit;
+ }
+ }
+
+ speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
+ if (spi->max_speed_hz >= speed)
+ spi->max_speed_hz = speed;
+ else
+ err = -EINVAL;
+
+setup_exit:
+
+ /* setup() returns with device de-selected */
+ disable_cs(sdd, spi);
+
+ return err;
+}
+
+static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
+{
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ unsigned int val;
+
+ sdd->cur_speed = 0;
+
+ S3C64XX_SPI_DEACT(sdd);
+
+ /* Disable Interrupts - we use Polling if not DMA mode */
+ writel(0, regs + S3C64XX_SPI_INT_EN);
+
+ writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
+ regs + S3C64XX_SPI_CLK_CFG);
+ writel(0, regs + S3C64XX_SPI_MODE_CFG);
+ writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+
+ /* Clear any irq pending bits */
+ writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
+ regs + S3C64XX_SPI_PENDING_CLR);
+
+ writel(0, regs + S3C64XX_SPI_SWAP_CFG);
+
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~S3C64XX_SPI_MODE_4BURST;
+ val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ flush_fifo(sdd);
+}
+
+static int __init s3c64xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *mem_res, *dmatx_res, *dmarx_res;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_cntrlr_info *sci;
+ struct spi_master *master;
+ int ret;
+
+ if (pdev->id < 0) {
+ dev_err(&pdev->dev,
+ "Invalid platform device id-%d\n", pdev->id);
+ return -ENODEV;
+ }
+
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "platform_data missing!\n");
+ return -ENODEV;
+ }
+
+ /* Check for availability of necessary resource */
+
+ dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (dmatx_res == NULL) {
+ dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
+ return -ENXIO;
+ }
+
+ dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (dmarx_res == NULL) {
+ dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
+ return -ENXIO;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem_res == NULL) {
+ dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
+ return -ENXIO;
+ }
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct s3c64xx_spi_driver_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ sci = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, master);
+
+ sdd = spi_master_get_devdata(master);
+ sdd->master = master;
+ sdd->cntrlr_info = sci;
+ sdd->pdev = pdev;
+ sdd->sfr_start = mem_res->start;
+ sdd->tx_dmach = dmatx_res->start;
+ sdd->rx_dmach = dmarx_res->start;
+
+ sdd->cur_bpw = 8;
+
+ master->bus_num = pdev->id;
+ master->setup = s3c64xx_spi_setup;
+ master->transfer = s3c64xx_spi_transfer;
+ master->num_chipselect = sci->num_cs;
+ master->dma_alignment = 8;
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ if (request_mem_region(mem_res->start,
+ resource_size(mem_res), pdev->name) == NULL) {
+ dev_err(&pdev->dev, "Req mem region failed\n");
+ ret = -ENXIO;
+ goto err0;
+ }
+
+ sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
+ if (sdd->regs == NULL) {
+ dev_err(&pdev->dev, "Unable to remap IO\n");
+ ret = -ENXIO;
+ goto err1;
+ }
+
+ if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
+ dev_err(&pdev->dev, "Unable to config gpio\n");
+ ret = -EBUSY;
+ goto err2;
+ }
+
+ /* Setup clocks */
+ sdd->clk = clk_get(&pdev->dev, "spi");
+ if (IS_ERR(sdd->clk)) {
+ dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
+ ret = PTR_ERR(sdd->clk);
+ goto err3;
+ }
+
+ if (clk_enable(sdd->clk)) {
+ dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
+ ret = -EBUSY;
+ goto err4;
+ }
+
+ if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK)
+ sci->src_clk = sdd->clk;
+ else
+ sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
+ if (IS_ERR(sci->src_clk)) {
+ dev_err(&pdev->dev,
+ "Unable to acquire clock '%s'\n", sci->src_clk_name);
+ ret = PTR_ERR(sci->src_clk);
+ goto err5;
+ }
+
+ if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) {
+ dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
+ sci->src_clk_name);
+ ret = -EBUSY;
+ goto err6;
+ }
+
+ sdd->workqueue = create_singlethread_workqueue(
+ dev_name(master->dev.parent));
+ if (sdd->workqueue == NULL) {
+ dev_err(&pdev->dev, "Unable to create workqueue\n");
+ ret = -ENOMEM;
+ goto err7;
+ }
+
+ /* Setup Deufult Mode */
+ s3c64xx_spi_hwinit(sdd, pdev->id);
+
+ spin_lock_init(&sdd->lock);
+ init_completion(&sdd->xfer_completion);
+ INIT_WORK(&sdd->work, s3c64xx_spi_work);
+ INIT_LIST_HEAD(&sdd->queue);
+
+ if (spi_register_master(master)) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ ret = -EBUSY;
+ goto err8;
+ }
+
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \
+ with %d Slaves attached\n",
+ pdev->id, master->num_chipselect);
+ dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\
+ \tDMA=[Rx-%d, Tx-%d]\n",
+ mem_res->end, mem_res->start,
+ sdd->rx_dmach, sdd->tx_dmach);
+
+ return 0;
+
+err8:
+ destroy_workqueue(sdd->workqueue);
+err7:
+ if (sci->src_clk != sdd->clk)
+ clk_disable(sci->src_clk);
+err6:
+ if (sci->src_clk != sdd->clk)
+ clk_put(sci->src_clk);
+err5:
+ clk_disable(sdd->clk);
+err4:
+ clk_put(sdd->clk);
+err3:
+err2:
+ iounmap((void *) sdd->regs);
+err1:
+ release_mem_region(mem_res->start, resource_size(mem_res));
+err0:
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int s3c64xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct resource *mem_res;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+ sdd->state |= SUSPND;
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ while (sdd->state & SPIBUSY)
+ msleep(10);
+
+ spi_unregister_master(master);
+
+ destroy_workqueue(sdd->workqueue);
+
+ if (sci->src_clk != sdd->clk)
+ clk_disable(sci->src_clk);
+
+ if (sci->src_clk != sdd->clk)
+ clk_put(sci->src_clk);
+
+ clk_disable(sdd->clk);
+ clk_put(sdd->clk);
+
+ iounmap((void *) sdd->regs);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem_res->start, resource_size(mem_res));
+
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ struct s3c64xx_spi_csinfo *cs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+ sdd->state |= SUSPND;
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ while (sdd->state & SPIBUSY)
+ msleep(10);
+
+ /* Disable the clock */
+ if (sci->src_clk != sdd->clk)
+ clk_disable(sci->src_clk);
+
+ clk_disable(sdd->clk);
+
+ sdd->cur_speed = 0; /* Output Clock is stopped */
+
+ return 0;
+}
+
+static int s3c64xx_spi_resume(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
+ unsigned long flags;
+
+ sci->cfg_gpio(pdev);
+
+ /* Enable the clock */
+ if (sci->src_clk != sdd->clk)
+ clk_enable(sci->src_clk);
+
+ clk_enable(sdd->clk);
+
+ s3c64xx_spi_hwinit(sdd, pdev->id);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+ sdd->state &= ~SUSPND;
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ return 0;
+}
+#else
+#define s3c64xx_spi_suspend NULL
+#define s3c64xx_spi_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver s3c64xx_spi_driver = {
+ .driver = {
+ .name = "s3c64xx-spi",
+ .owner = THIS_MODULE,
+ },
+ .remove = s3c64xx_spi_remove,
+ .suspend = s3c64xx_spi_suspend,
+ .resume = s3c64xx_spi_resume,
+};
+MODULE_ALIAS("platform:s3c64xx-spi");
+
+static int __init s3c64xx_spi_init(void)
+{
+ return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
+}
+module_init(s3c64xx_spi_init);
+
+static void __exit s3c64xx_spi_exit(void)
+{
+ platform_driver_unregister(&s3c64xx_spi_driver);
+}
+module_exit(s3c64xx_spi_exit);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
new file mode 100644
index 00000000000..51e5e1dfa6e
--- /dev/null
+++ b/drivers/spi/spi_sh_msiof.c
@@ -0,0 +1,691 @@
+/*
+ * SuperH MSIOF SPI Master Interface
+ *
+ * Copyright (c) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/sh_msiof.h>
+
+#include <asm/spi.h>
+#include <asm/unaligned.h>
+
+struct sh_msiof_spi_priv {
+ struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct platform_device *pdev;
+ struct sh_msiof_spi_info *info;
+ struct completion done;
+ unsigned long flags;
+ int tx_fifo_size;
+ int rx_fifo_size;
+};
+
+#define TMDR1 0x00
+#define TMDR2 0x04
+#define TMDR3 0x08
+#define RMDR1 0x10
+#define RMDR2 0x14
+#define RMDR3 0x18
+#define TSCR 0x20
+#define RSCR 0x22
+#define CTR 0x28
+#define FCTR 0x30
+#define STR 0x40
+#define IER 0x44
+#define TDR1 0x48
+#define TDR2 0x4c
+#define TFDR 0x50
+#define RDR1 0x58
+#define RDR2 0x5c
+#define RFDR 0x60
+
+#define CTR_TSCKE (1 << 15)
+#define CTR_TFSE (1 << 14)
+#define CTR_TXE (1 << 9)
+#define CTR_RXE (1 << 8)
+
+#define STR_TEOF (1 << 23)
+#define STR_REOF (1 << 7)
+
+static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
+{
+ switch (reg_offs) {
+ case TSCR:
+ case RSCR:
+ return ioread16(p->mapbase + reg_offs);
+ default:
+ return ioread32(p->mapbase + reg_offs);
+ }
+}
+
+static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
+ unsigned long value)
+{
+ switch (reg_offs) {
+ case TSCR:
+ case RSCR:
+ iowrite16(value, p->mapbase + reg_offs);
+ break;
+ default:
+ iowrite32(value, p->mapbase + reg_offs);
+ break;
+ }
+}
+
+static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
+ unsigned long clr, unsigned long set)
+{
+ unsigned long mask = clr | set;
+ unsigned long data;
+ int k;
+
+ data = sh_msiof_read(p, CTR);
+ data &= ~clr;
+ data |= set;
+ sh_msiof_write(p, CTR, data);
+
+ for (k = 100; k > 0; k--) {
+ if ((sh_msiof_read(p, CTR) & mask) == set)
+ break;
+
+ udelay(10);
+ }
+
+ return k > 0 ? 0 : -ETIMEDOUT;
+}
+
+static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
+{
+ struct sh_msiof_spi_priv *p = data;
+
+ /* just disable the interrupt and wake up */
+ sh_msiof_write(p, IER, 0);
+ complete(&p->done);
+
+ return IRQ_HANDLED;
+}
+
+static struct {
+ unsigned short div;
+ unsigned short scr;
+} const sh_msiof_spi_clk_table[] = {
+ { 1, 0x0007 },
+ { 2, 0x0000 },
+ { 4, 0x0001 },
+ { 8, 0x0002 },
+ { 16, 0x0003 },
+ { 32, 0x0004 },
+ { 64, 0x1f00 },
+ { 128, 0x1f01 },
+ { 256, 0x1f02 },
+ { 512, 0x1f03 },
+ { 1024, 0x1f04 },
+};
+
+static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
+ unsigned long parent_rate,
+ unsigned long spi_hz)
+{
+ unsigned long div = 1024;
+ size_t k;
+
+ if (!WARN_ON(!spi_hz || !parent_rate))
+ div = parent_rate / spi_hz;
+
+ /* TODO: make more fine grained */
+
+ for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) {
+ if (sh_msiof_spi_clk_table[k].div >= div)
+ break;
+ }
+
+ k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
+
+ sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
+ sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+}
+
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
+ int cpol, int cpha,
+ int tx_hi_z, int lsb_first)
+{
+ unsigned long tmp;
+ int edge;
+
+ /*
+ * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG(!)
+ * 0 0 10 10 1 0
+ * 0 1 10 10 0 1
+ * 1 0 11 11 0 1
+ * 1 1 11 11 1 0
+ *
+ * (!) Note: REDG is inverted recommended data sheet setting
+ */
+
+ sh_msiof_write(p, FCTR, 0);
+ sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24));
+ sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24));
+
+ tmp = 0xa0000000;
+ tmp |= cpol << 30; /* TSCKIZ */
+ tmp |= cpol << 28; /* RSCKIZ */
+
+ edge = cpol ? cpha : !cpha;
+
+ tmp |= edge << 27; /* TEDG */
+ tmp |= !edge << 26; /* REDG */
+ tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
+ sh_msiof_write(p, CTR, tmp);
+}
+
+static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, void *rx_buf,
+ int bits, int words)
+{
+ unsigned long dr2;
+
+ dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
+
+ if (tx_buf)
+ sh_msiof_write(p, TMDR2, dr2);
+ else
+ sh_msiof_write(p, TMDR2, dr2 | 1);
+
+ if (rx_buf)
+ sh_msiof_write(p, RMDR2, dr2);
+
+ sh_msiof_write(p, IER, STR_TEOF | STR_REOF);
+}
+
+static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
+{
+ sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+}
+
+static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned char *buf_8 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_8[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned short *buf_16 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_16[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned short *buf_16 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+}
+
+static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned int *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_32[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const unsigned int *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+}
+
+static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned char *buf_8 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned short *buf_16 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned short *buf_16 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+}
+
+static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned int *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ unsigned int *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+}
+
+static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
+{
+ int bits;
+
+ bits = t ? t->bits_per_word : 0;
+ bits = bits ? bits : spi->bits_per_word;
+ return bits;
+}
+
+static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ unsigned long hz;
+
+ hz = t ? t->speed_hz : 0;
+ hz = hz ? hz : spi->max_speed_hz;
+ return hz;
+}
+
+static int sh_msiof_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ int bits;
+
+ /* noting to check hz values against since parent clock is disabled */
+
+ bits = sh_msiof_spi_bits(spi, t);
+ if (bits < 8)
+ return -EINVAL;
+ if (bits > 32)
+ return -EINVAL;
+
+ return spi_bitbang_setup_transfer(spi, t);
+}
+
+static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ int value;
+
+ /* chip select is active low unless SPI_CS_HIGH is set */
+ if (spi->mode & SPI_CS_HIGH)
+ value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0;
+ else
+ value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1;
+
+ if (is_on == BITBANG_CS_ACTIVE) {
+ if (!test_and_set_bit(0, &p->flags)) {
+ pm_runtime_get_sync(&p->pdev->dev);
+ clk_enable(p->clk);
+ }
+
+ /* Configure pins before asserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST));
+ }
+
+ /* use spi->controller data for CS (same strategy as spi_gpio) */
+ gpio_set_value((unsigned)spi->controller_data, value);
+
+ if (is_on == BITBANG_CS_INACTIVE) {
+ if (test_and_clear_bit(0, &p->flags)) {
+ clk_disable(p->clk);
+ pm_runtime_put(&p->pdev->dev);
+ }
+ }
+}
+
+static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
+ void (*tx_fifo)(struct sh_msiof_spi_priv *,
+ const void *, int, int),
+ void (*rx_fifo)(struct sh_msiof_spi_priv *,
+ void *, int, int),
+ const void *tx_buf, void *rx_buf,
+ int words, int bits)
+{
+ int fifo_shift;
+ int ret;
+
+ /* limit maximum word transfer to rx/tx fifo size */
+ if (tx_buf)
+ words = min_t(int, words, p->tx_fifo_size);
+ if (rx_buf)
+ words = min_t(int, words, p->rx_fifo_size);
+
+ /* the fifo contents need shifting */
+ fifo_shift = 32 - bits;
+
+ /* setup msiof transfer mode registers */
+ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
+
+ /* write tx fifo */
+ if (tx_buf)
+ tx_fifo(p, tx_buf, words, fifo_shift);
+
+ /* setup clock and rx/tx signals */
+ ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+ if (rx_buf)
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+
+ /* start by setting frame bit */
+ INIT_COMPLETION(p->done);
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to start hardware\n");
+ goto err;
+ }
+
+ /* wait for tx fifo to be emptied / rx fifo to be filled */
+ wait_for_completion(&p->done);
+
+ /* read rx fifo */
+ if (rx_buf)
+ rx_fifo(p, rx_buf, words, fifo_shift);
+
+ /* clear status bits */
+ sh_msiof_reset_str(p);
+
+ /* shut down frame, tx/tx and clock signals */
+ ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+ if (rx_buf)
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+ ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to shut down hardware\n");
+ goto err;
+ }
+
+ return words;
+
+ err:
+ sh_msiof_write(p, IER, 0);
+ return ret;
+}
+
+static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
+ void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
+ int bits;
+ int bytes_per_word;
+ int bytes_done;
+ int words;
+ int n;
+
+ bits = sh_msiof_spi_bits(spi, t);
+
+ /* setup bytes per word and fifo read/write functions */
+ if (bits <= 8) {
+ bytes_per_word = 1;
+ tx_fifo = sh_msiof_spi_write_fifo_8;
+ rx_fifo = sh_msiof_spi_read_fifo_8;
+ } else if (bits <= 16) {
+ bytes_per_word = 2;
+ if ((unsigned long)t->tx_buf & 0x01)
+ tx_fifo = sh_msiof_spi_write_fifo_16u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_16;
+
+ if ((unsigned long)t->rx_buf & 0x01)
+ rx_fifo = sh_msiof_spi_read_fifo_16u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_16;
+ } else {
+ bytes_per_word = 4;
+ if ((unsigned long)t->tx_buf & 0x03)
+ tx_fifo = sh_msiof_spi_write_fifo_32u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_32;
+
+ if ((unsigned long)t->rx_buf & 0x03)
+ rx_fifo = sh_msiof_spi_read_fifo_32u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_32;
+ }
+
+ /* setup clocks (clock already enabled in chipselect()) */
+ sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk),
+ sh_msiof_spi_hz(spi, t));
+
+ /* transfer in fifo sized chunks */
+ words = t->len / bytes_per_word;
+ bytes_done = 0;
+
+ while (bytes_done < t->len) {
+ n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
+ t->tx_buf + bytes_done,
+ t->rx_buf + bytes_done,
+ words, bits);
+ if (n < 0)
+ break;
+
+ bytes_done += n * bytes_per_word;
+ words -= n;
+ }
+
+ return bytes_done;
+}
+
+static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
+ u32 word, u8 bits)
+{
+ BUG(); /* unused but needed by bitbang code */
+ return 0;
+}
+
+static int sh_msiof_spi_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct spi_master *master;
+ struct sh_msiof_spi_priv *p;
+ char clk_name[16];
+ int i;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "failed to allocate spi master\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ p = spi_master_get_devdata(master);
+
+ platform_set_drvdata(pdev, p);
+ p->info = pdev->dev.platform_data;
+ init_completion(&p->done);
+
+ snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id);
+ p->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(p->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i = platform_get_irq(pdev, 0);
+ if (!r || i < 0) {
+ dev_err(&pdev->dev, "cannot get platform resources\n");
+ ret = -ENOENT;
+ goto err2;
+ }
+ p->mapbase = ioremap_nocache(r->start, resource_size(r));
+ if (!p->mapbase) {
+ dev_err(&pdev->dev, "unable to ioremap\n");
+ ret = -ENXIO;
+ goto err2;
+ }
+
+ ret = request_irq(i, sh_msiof_spi_irq, IRQF_DISABLED,
+ dev_name(&pdev->dev), p);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request irq\n");
+ goto err3;
+ }
+
+ p->pdev = pdev;
+ pm_runtime_enable(&pdev->dev);
+
+ /* The standard version of MSIOF use 64 word FIFOs */
+ p->tx_fifo_size = 64;
+ p->rx_fifo_size = 64;
+
+ /* Platform data may override FIFO sizes */
+ if (p->info->tx_fifo_override)
+ p->tx_fifo_size = p->info->tx_fifo_override;
+ if (p->info->rx_fifo_override)
+ p->rx_fifo_size = p->info->rx_fifo_override;
+
+ /* init master and bitbang code */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
+ master->flags = 0;
+ master->bus_num = pdev->id;
+ master->num_chipselect = p->info->num_chipselect;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
+
+ p->bitbang.master = master;
+ p->bitbang.chipselect = sh_msiof_spi_chipselect;
+ p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer;
+ p->bitbang.txrx_bufs = sh_msiof_spi_txrx;
+ p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word;
+ p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word;
+ p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word;
+ p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word;
+
+ ret = spi_bitbang_start(&p->bitbang);
+ if (ret == 0)
+ return 0;
+
+ pm_runtime_disable(&pdev->dev);
+ err3:
+ iounmap(p->mapbase);
+ err2:
+ clk_put(p->clk);
+ err1:
+ spi_master_put(master);
+ err0:
+ return ret;
+}
+
+static int sh_msiof_spi_remove(struct platform_device *pdev)
+{
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = spi_bitbang_stop(&p->bitbang);
+ if (!ret) {
+ pm_runtime_disable(&pdev->dev);
+ free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq);
+ iounmap(p->mapbase);
+ clk_put(p->clk);
+ spi_master_put(p->bitbang.master);
+ }
+ return ret;
+}
+
+static int sh_msiof_spi_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
+ .runtime_suspend = sh_msiof_spi_runtime_nop,
+ .runtime_resume = sh_msiof_spi_runtime_nop,
+};
+
+static struct platform_driver sh_msiof_spi_drv = {
+ .probe = sh_msiof_spi_probe,
+ .remove = sh_msiof_spi_remove,
+ .driver = {
+ .name = "spi_sh_msiof",
+ .owner = THIS_MODULE,
+ .pm = &sh_msiof_spi_dev_pm_ops,
+ },
+};
+
+static int __init sh_msiof_spi_init(void)
+{
+ return platform_driver_register(&sh_msiof_spi_drv);
+}
+module_init(sh_msiof_spi_init);
+
+static void __exit sh_msiof_spi_exit(void)
+{
+ platform_driver_unregister(&sh_msiof_spi_drv);
+}
+module_exit(sh_msiof_spi_exit);
+
+MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_sh_msiof");
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
index 7d36720eb98..a65c12ffa73 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi_sh_sci.c
@@ -148,7 +148,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
ret = -ENOENT;
goto err1;
}
- sp->membase = ioremap(r->start, r->end - r->start + 1);
+ sp->membase = ioremap(r->start, resource_size(r));
if (!sp->membase) {
ret = -ENXIO;
goto err1;
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 19f75627c3d..dfa024b633e 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -375,12 +375,10 @@ static int __init txx9spi_probe(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
goto exit_busy;
- if (!devm_request_mem_region(&dev->dev,
- res->start, res->end - res->start + 1,
+ if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
"spi_txx9"))
goto exit_busy;
- c->membase = devm_ioremap(&dev->dev,
- res->start, res->end - res->start + 1);
+ c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
if (!c->membase)
goto exit_busy;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 20d7322e2f7..ea1bec3c9a1 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -53,7 +53,7 @@
#define SPIDEV_MAJOR 153 /* assigned */
#define N_SPI_MINORS 32 /* ... up to 256 */
-static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG];
+static DECLARE_BITMAP(minors, N_SPI_MINORS);
/* Bit masks for spi_device.mode management. Note that incorrect
@@ -266,15 +266,15 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
#ifdef VERBOSE
- dev_dbg(&spi->dev,
+ dev_dbg(&spidev->spi->dev,
" xfer len %zd %s%s%s%dbits %u usec %uHz\n",
u_tmp->len,
u_tmp->rx_buf ? "rx " : "",
u_tmp->tx_buf ? "tx " : "",
u_tmp->cs_change ? "cs " : "",
- u_tmp->bits_per_word ? : spi->bits_per_word,
+ u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
u_tmp->delay_usecs,
- u_tmp->speed_hz ? : spi->max_speed_hz);
+ u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
}
@@ -558,7 +558,7 @@ static struct class *spidev_class;
/*-------------------------------------------------------------------------*/
-static int spidev_probe(struct spi_device *spi)
+static int __devinit spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
@@ -607,7 +607,7 @@ static int spidev_probe(struct spi_device *spi)
return status;
}
-static int spidev_remove(struct spi_device *spi)
+static int __devexit spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
@@ -629,7 +629,7 @@ static int spidev_remove(struct spi_device *spi)
return 0;
}
-static struct spi_driver spidev_spi = {
+static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.owner = THIS_MODULE,
@@ -661,14 +661,14 @@ static int __init spidev_init(void)
spidev_class = class_create(THIS_MODULE, "spidev");
if (IS_ERR(spidev_class)) {
- unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
return PTR_ERR(spidev_class);
}
- status = spi_register_driver(&spidev_spi);
+ status = spi_register_driver(&spidev_spi_driver);
if (status < 0) {
class_destroy(spidev_class);
- unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
return status;
}
@@ -676,9 +676,9 @@ module_init(spidev_init);
static void __exit spidev_exit(void)
{
- spi_unregister_driver(&spidev_spi);
+ spi_unregister_driver(&spidev_spi_driver);
class_destroy(spidev_class);
- unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
module_exit(spidev_exit);
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 46b8c5c2f45..9f386379c16 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -14,22 +14,20 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
-#include <linux/of_spi.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/io.h>
+#include "xilinx_spi.h"
+#include <linux/spi/xilinx_spi.h>
+
#define XILINX_SPI_NAME "xilinx_spi"
/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
* Product Specification", DS464
*/
-#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
+#define XSPI_CR_OFFSET 0x60 /* Control Register */
#define XSPI_CR_ENABLE 0x02
#define XSPI_CR_MASTER_MODE 0x04
@@ -40,8 +38,9 @@
#define XSPI_CR_RXFIFO_RESET 0x40
#define XSPI_CR_MANUAL_SSELECT 0x80
#define XSPI_CR_TRANS_INHIBIT 0x100
+#define XSPI_CR_LSB_FIRST 0x200
-#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
+#define XSPI_SR_OFFSET 0x64 /* Status Register */
#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
@@ -49,8 +48,8 @@
#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
-#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
-#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
+#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
+#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */
#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
@@ -70,6 +69,7 @@
#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
+#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
@@ -78,35 +78,85 @@ struct xilinx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
struct completion done;
-
+ struct resource mem; /* phys mem */
void __iomem *regs; /* virt. address of the control registers */
u32 irq;
- u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
-
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
int remaining_bytes; /* the number of bytes left to transfer */
+ u8 bits_per_word;
+ unsigned int (*read_fn) (void __iomem *);
+ void (*write_fn) (u32, void __iomem *);
+ void (*tx_fn) (struct xilinx_spi *);
+ void (*rx_fn) (struct xilinx_spi *);
};
-static void xspi_init_hw(void __iomem *regs_base)
+static void xspi_tx8(struct xilinx_spi *xspi)
+{
+ xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr++;
+}
+
+static void xspi_tx16(struct xilinx_spi *xspi)
+{
+ xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr += 2;
+}
+
+static void xspi_tx32(struct xilinx_spi *xspi)
+{
+ xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr += 4;
+}
+
+static void xspi_rx8(struct xilinx_spi *xspi)
+{
+ u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
+ if (xspi->rx_ptr) {
+ *xspi->rx_ptr = data & 0xff;
+ xspi->rx_ptr++;
+ }
+}
+
+static void xspi_rx16(struct xilinx_spi *xspi)
{
+ u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
+ if (xspi->rx_ptr) {
+ *(u16 *)(xspi->rx_ptr) = data & 0xffff;
+ xspi->rx_ptr += 2;
+ }
+}
+
+static void xspi_rx32(struct xilinx_spi *xspi)
+{
+ u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
+ if (xspi->rx_ptr) {
+ *(u32 *)(xspi->rx_ptr) = data;
+ xspi->rx_ptr += 4;
+ }
+}
+
+static void xspi_init_hw(struct xilinx_spi *xspi)
+{
+ void __iomem *regs_base = xspi->regs;
+
/* Reset the SPI device */
- out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
- XIPIF_V123B_RESET_MASK);
+ xspi->write_fn(XIPIF_V123B_RESET_MASK,
+ regs_base + XIPIF_V123B_RESETR_OFFSET);
/* Disable all the interrupts just in case */
- out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
+ xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
/* Enable the global IPIF interrupt */
- out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
- XIPIF_V123B_GINTR_ENABLE);
+ xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ regs_base + XIPIF_V123B_DGIER_OFFSET);
/* Deselect the slave on the SPI bus */
- out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
+ xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
/* Disable the transmitter, enable Manual Slave Select Assertion,
* put SPI controller into master mode, and enable it */
- out_be16(regs_base + XSPI_CR_OFFSET,
- XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
- | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
+ xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT |
+ XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET |
+ XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET);
}
static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
@@ -115,16 +165,16 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
if (is_on == BITBANG_CS_INACTIVE) {
/* Deselect the slave on the SPI bus */
- out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
+ xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET);
} else if (is_on == BITBANG_CS_ACTIVE) {
/* Set the SPI clock phase and polarity */
- u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
+ u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
& ~XSPI_CR_MODE_MASK;
if (spi->mode & SPI_CPHA)
cr |= XSPI_CR_CPHA;
if (spi->mode & SPI_CPOL)
cr |= XSPI_CR_CPOL;
- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
/* We do not check spi->max_speed_hz here as the SPI clock
* frequency is not software programmable (the IP block design
@@ -132,24 +182,27 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
*/
/* Activate the chip select */
- out_be32(xspi->regs + XSPI_SSR_OFFSET,
- ~(0x0001 << spi->chip_select));
+ xspi->write_fn(~(0x0001 << spi->chip_select),
+ xspi->regs + XSPI_SSR_OFFSET);
}
}
/* spi_bitbang requires custom setup_transfer() to be defined if there is a
* custom txrx_bufs(). We have nothing to setup here as the SPI IP block
- * supports just 8 bits per word, and SPI clock can't be changed in software.
- * Check for 8 bits per word. Chip select delay calculations could be
+ * supports 8 or 16 bits per word which cannot be changed in software.
+ * SPI clock can't be changed in software either.
+ * Check for correct bits per word. Chip select delay calculations could be
* added here as soon as bitbang_work() can be made aware of the delay value.
*/
static int xilinx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
u8 bits_per_word;
- bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
- if (bits_per_word != 8) {
+ bits_per_word = (t && t->bits_per_word)
+ ? t->bits_per_word : spi->bits_per_word;
+ if (bits_per_word != xspi->bits_per_word) {
dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
__func__, bits_per_word);
return -EINVAL;
@@ -160,17 +213,16 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
static int xilinx_spi_setup(struct spi_device *spi)
{
- struct spi_bitbang *bitbang;
- struct xilinx_spi *xspi;
- int retval;
-
- xspi = spi_master_get_devdata(spi->master);
- bitbang = &xspi->bitbang;
-
- retval = xilinx_spi_setup_transfer(spi, NULL);
- if (retval < 0)
- return retval;
-
+ /* always return 0, we can not check the number of bits.
+ * There are cases when SPI setup is called before any driver is
+ * there, in that case the SPI core defaults to 8 bits, which we
+ * do not support in some cases. But if we return an error, the
+ * SPI device would not be registered and no driver can get hold of it
+ * When the driver is there, it will call SPI setup again with the
+ * correct number of bits per transfer.
+ * If a driver setups with the wrong bit number, it will fail when
+ * it tries to do a transfer
+ */
return 0;
}
@@ -179,15 +231,14 @@ static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
u8 sr;
/* Fill the Tx FIFO with as many bytes as possible */
- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
- if (xspi->tx_ptr) {
- out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
- } else {
- out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
- }
- xspi->remaining_bytes--;
- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ if (xspi->tx_ptr)
+ xspi->tx_fn(xspi);
+ else
+ xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+ xspi->remaining_bytes -= xspi->bits_per_word / 8;
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
}
}
@@ -209,18 +260,19 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
/* Enable the transmit empty interrupt, which we use to determine
* progress on the transmission.
*/
- ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
- out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
- ipif_ier | XSPI_INTR_TX_EMPTY);
+ ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
+ xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
+ xspi->regs + XIPIF_V123B_IIER_OFFSET);
/* Start the transfer by not inhibiting the transmitter any longer */
- cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
+ ~XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
wait_for_completion(&xspi->done);
/* Disable the transmit empty interrupt */
- out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
+ xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
return t->len - xspi->remaining_bytes;
}
@@ -237,8 +289,8 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
u32 ipif_isr;
/* Get the IPIF interrupts, and clear them immediately */
- ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
- out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
+ ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
u16 cr;
@@ -249,20 +301,15 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
* transmitter while the Isr refills the transmit register/FIFO,
* or make sure it is stopped if we're done.
*/
- cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
- out_be16(xspi->regs + XSPI_CR_OFFSET,
- cr | XSPI_CR_TRANS_INHIBIT);
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
/* Read out all the data from the Rx FIFO */
- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
- u8 data;
-
- data = in_8(xspi->regs + XSPI_RXD_OFFSET);
- if (xspi->rx_ptr) {
- *xspi->rx_ptr++ = data;
- }
- sr = in_8(xspi->regs + XSPI_SR_OFFSET);
+ xspi->rx_fn(xspi);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
}
/* See if there is more data to send */
@@ -271,7 +318,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
/* Start the transfer by not inhibiting the
* transmitter any longer
*/
- out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
} else {
/* No more data to send.
* Indicate the transfer is completed.
@@ -283,40 +330,22 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init xilinx_spi_of_probe(struct of_device *ofdev,
- const struct of_device_id *match)
+struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
+ u32 irq, s16 bus_num)
{
struct spi_master *master;
struct xilinx_spi *xspi;
- struct resource r_irq_struct;
- struct resource r_mem_struct;
-
- struct resource *r_irq = &r_irq_struct;
- struct resource *r_mem = &r_mem_struct;
- int rc = 0;
- const u32 *prop;
- int len;
-
- /* Get resources(memory, IRQ) associated with the device */
- master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
+ struct xspi_platform_data *pdata = dev->platform_data;
+ int ret;
- if (master == NULL) {
- return -ENOMEM;
+ if (!pdata) {
+ dev_err(dev, "No platform data attached\n");
+ return NULL;
}
- dev_set_drvdata(&ofdev->dev, master);
-
- rc = of_address_to_resource(ofdev->node, 0, r_mem);
- if (rc) {
- dev_warn(&ofdev->dev, "invalid address\n");
- goto put_master;
- }
-
- rc = of_irq_to_resource(ofdev->node, 0, r_irq);
- if (rc == NO_IRQ) {
- dev_warn(&ofdev->dev, "no IRQ found\n");
- goto put_master;
- }
+ master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
+ if (!master)
+ return NULL;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
@@ -329,128 +358,87 @@ static int __init xilinx_spi_of_probe(struct of_device *ofdev,
xspi->bitbang.master->setup = xilinx_spi_setup;
init_completion(&xspi->done);
- xspi->irq = r_irq->start;
-
- if (!request_mem_region(r_mem->start,
- r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
- rc = -ENXIO;
- dev_warn(&ofdev->dev, "memory request failure\n");
+ if (!request_mem_region(mem->start, resource_size(mem),
+ XILINX_SPI_NAME))
goto put_master;
- }
- xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1);
+ xspi->regs = ioremap(mem->start, resource_size(mem));
if (xspi->regs == NULL) {
- rc = -ENOMEM;
- dev_warn(&ofdev->dev, "ioremap failure\n");
- goto release_mem;
+ dev_warn(dev, "ioremap failure\n");
+ goto map_failed;
}
- xspi->irq = r_irq->start;
-
- /* dynamic bus assignment */
- master->bus_num = -1;
- /* number of slave select bits is required */
- prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
- if (!prop || len < sizeof(*prop)) {
- dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
- goto unmap_io;
+ master->bus_num = bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+
+ xspi->mem = *mem;
+ xspi->irq = irq;
+ if (pdata->little_endian) {
+ xspi->read_fn = ioread32;
+ xspi->write_fn = iowrite32;
+ } else {
+ xspi->read_fn = ioread32be;
+ xspi->write_fn = iowrite32be;
}
- master->num_chipselect = *prop;
+ xspi->bits_per_word = pdata->bits_per_word;
+ if (xspi->bits_per_word == 8) {
+ xspi->tx_fn = xspi_tx8;
+ xspi->rx_fn = xspi_rx8;
+ } else if (xspi->bits_per_word == 16) {
+ xspi->tx_fn = xspi_tx16;
+ xspi->rx_fn = xspi_rx16;
+ } else if (xspi->bits_per_word == 32) {
+ xspi->tx_fn = xspi_tx32;
+ xspi->rx_fn = xspi_rx32;
+ } else
+ goto unmap_io;
+
/* SPI controller initializations */
- xspi_init_hw(xspi->regs);
+ xspi_init_hw(xspi);
/* Register for SPI Interrupt */
- rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
- if (rc != 0) {
- dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
+ ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+ if (ret)
goto unmap_io;
- }
- rc = spi_bitbang_start(&xspi->bitbang);
- if (rc != 0) {
- dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n");
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret) {
+ dev_err(dev, "spi_bitbang_start FAILED\n");
goto free_irq;
}
- dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
- (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq);
-
- /* Add any subnodes on the SPI bus */
- of_register_spi_devices(master, ofdev->node);
-
- return rc;
+ dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
+ (unsigned long long)mem->start, xspi->regs, xspi->irq);
+ return master;
free_irq:
free_irq(xspi->irq, xspi);
unmap_io:
iounmap(xspi->regs);
-release_mem:
- release_mem_region(r_mem->start, resource_size(r_mem));
+map_failed:
+ release_mem_region(mem->start, resource_size(mem));
put_master:
spi_master_put(master);
- return rc;
+ return NULL;
}
+EXPORT_SYMBOL(xilinx_spi_init);
-static int __devexit xilinx_spi_remove(struct of_device *ofdev)
+void xilinx_spi_deinit(struct spi_master *master)
{
struct xilinx_spi *xspi;
- struct spi_master *master;
- struct resource r_mem;
- master = platform_get_drvdata(ofdev);
xspi = spi_master_get_devdata(master);
spi_bitbang_stop(&xspi->bitbang);
free_irq(xspi->irq, xspi);
iounmap(xspi->regs);
- if (!of_address_to_resource(ofdev->node, 0, &r_mem))
- release_mem_region(r_mem.start, resource_size(&r_mem));
- dev_set_drvdata(&ofdev->dev, 0);
- spi_master_put(xspi->bitbang.master);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" XILINX_SPI_NAME);
-
-static int __exit xilinx_spi_of_remove(struct of_device *op)
-{
- return xilinx_spi_remove(op);
-}
-static struct of_device_id xilinx_spi_of_match[] = {
- { .compatible = "xlnx,xps-spi-2.00.a", },
- { .compatible = "xlnx,xps-spi-2.00.b", },
- {}
-};
-
-MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-
-static struct of_platform_driver xilinx_spi_of_driver = {
- .owner = THIS_MODULE,
- .name = "xilinx-xps-spi",
- .match_table = xilinx_spi_of_match,
- .probe = xilinx_spi_of_probe,
- .remove = __exit_p(xilinx_spi_of_remove),
- .driver = {
- .name = "xilinx-xps-spi",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init xilinx_spi_init(void)
-{
- return of_register_platform_driver(&xilinx_spi_of_driver);
+ release_mem_region(xspi->mem.start, resource_size(&xspi->mem));
+ spi_master_put(xspi->bitbang.master);
}
-module_init(xilinx_spi_init);
+EXPORT_SYMBOL(xilinx_spi_deinit);
-static void __exit xilinx_spi_exit(void)
-{
- of_unregister_platform_driver(&xilinx_spi_of_driver);
-}
-module_exit(xilinx_spi_exit);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("Xilinx SPI driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/xilinx_spi.h b/drivers/spi/xilinx_spi.h
new file mode 100644
index 00000000000..d211accf68d
--- /dev/null
+++ b/drivers/spi/xilinx_spi.h
@@ -0,0 +1,32 @@
+/*
+ * Xilinx SPI device driver API and platform data header file
+ *
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _XILINX_SPI_H_
+#define _XILINX_SPI_H_
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#define XILINX_SPI_NAME "xilinx_spi"
+
+struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
+ u32 irq, s16 bus_num);
+
+void xilinx_spi_deinit(struct spi_master *master);
+#endif
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
new file mode 100644
index 00000000000..71dc3adc049
--- /dev/null
+++ b/drivers/spi/xilinx_spi_of.c
@@ -0,0 +1,134 @@
+/*
+ * Xilinx SPI OF device driver
+ *
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Xilinx SPI devices as OF devices
+ *
+ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_spi.h>
+
+#include <linux/spi/xilinx_spi.h>
+#include "xilinx_spi.h"
+
+
+static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct spi_master *master;
+ struct xspi_platform_data *pdata;
+ struct resource r_mem;
+ struct resource r_irq;
+ int rc = 0;
+ const u32 *prop;
+ int len;
+
+ rc = of_address_to_resource(ofdev->node, 0, &r_mem);
+ if (rc) {
+ dev_warn(&ofdev->dev, "invalid address\n");
+ return rc;
+ }
+
+ rc = of_irq_to_resource(ofdev->node, 0, &r_irq);
+ if (rc == NO_IRQ) {
+ dev_warn(&ofdev->dev, "no IRQ found\n");
+ return -ENODEV;
+ }
+
+ ofdev->dev.platform_data =
+ kzalloc(sizeof(struct xspi_platform_data), GFP_KERNEL);
+ pdata = ofdev->dev.platform_data;
+ if (!pdata)
+ return -ENOMEM;
+
+ /* number of slave select bits is required */
+ prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
+ if (!prop || len < sizeof(*prop)) {
+ dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
+ return -EINVAL;
+ }
+ pdata->num_chipselect = *prop;
+ pdata->bits_per_word = 8;
+ master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1);
+ if (!master)
+ return -ENODEV;
+
+ dev_set_drvdata(&ofdev->dev, master);
+
+ /* Add any subnodes on the SPI bus */
+ of_register_spi_devices(master, ofdev->node);
+
+ return 0;
+}
+
+static int __devexit xilinx_spi_remove(struct of_device *ofdev)
+{
+ xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev));
+ dev_set_drvdata(&ofdev->dev, 0);
+ kfree(ofdev->dev.platform_data);
+ ofdev->dev.platform_data = NULL;
+ return 0;
+}
+
+static int __exit xilinx_spi_of_remove(struct of_device *op)
+{
+ return xilinx_spi_remove(op);
+}
+
+static struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,xps-spi-2.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.b", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+
+static struct of_platform_driver xilinx_spi_of_driver = {
+ .match_table = xilinx_spi_of_match,
+ .probe = xilinx_spi_of_probe,
+ .remove = __exit_p(xilinx_spi_of_remove),
+ .driver = {
+ .name = "xilinx-xps-spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init xilinx_spi_of_init(void)
+{
+ return of_register_platform_driver(&xilinx_spi_of_driver);
+}
+module_init(xilinx_spi_of_init);
+
+static void __exit xilinx_spi_of_exit(void)
+{
+ of_unregister_platform_driver(&xilinx_spi_of_driver);
+}
+module_exit(xilinx_spi_of_exit);
+
+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_DESCRIPTION("Xilinx SPI platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi_pltfm.c b/drivers/spi/xilinx_spi_pltfm.c
new file mode 100644
index 00000000000..24debac646a
--- /dev/null
+++ b/drivers/spi/xilinx_spi_pltfm.c
@@ -0,0 +1,102 @@
+/*
+ * Support for Xilinx SPI platform devices
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Xilinx SPI devices as platform devices
+ *
+ * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/xilinx_spi.h>
+
+#include "xilinx_spi.h"
+
+static int __devinit xilinx_spi_probe(struct platform_device *dev)
+{
+ struct xspi_platform_data *pdata;
+ struct resource *r;
+ int irq;
+ struct spi_master *master;
+ u8 i;
+
+ pdata = dev->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ master = xilinx_spi_init(&dev->dev, r, irq, dev->id);
+ if (!master)
+ return -ENODEV;
+
+ for (i = 0; i < pdata->num_devices; i++)
+ spi_new_device(master, pdata->devices + i);
+
+ platform_set_drvdata(dev, master);
+ return 0;
+}
+
+static int __devexit xilinx_spi_remove(struct platform_device *dev)
+{
+ xilinx_spi_deinit(platform_get_drvdata(dev));
+ platform_set_drvdata(dev, 0);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+
+static struct platform_driver xilinx_spi_driver = {
+ .probe = xilinx_spi_probe,
+ .remove = __devexit_p(xilinx_spi_remove),
+ .driver = {
+ .name = XILINX_SPI_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init xilinx_spi_pltfm_init(void)
+{
+ return platform_driver_register(&xilinx_spi_driver);
+}
+module_init(xilinx_spi_pltfm_init);
+
+static void __exit xilinx_spi_pltfm_exit(void)
+{
+ platform_driver_unregister(&xilinx_spi_driver);
+}
+module_exit(xilinx_spi_pltfm_exit);
+
+MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_DESCRIPTION("Xilinx SPI platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index a44ac2e3b8e..093f57af32d 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -99,8 +99,12 @@ source "drivers/staging/p9auth/Kconfig"
source "drivers/staging/line6/Kconfig"
+source "drivers/gpu/drm/vmwgfx/Kconfig"
+
source "drivers/gpu/drm/radeon/Kconfig"
+source "drivers/gpu/drm/nouveau/Kconfig"
+
source "drivers/staging/octeon/Kconfig"
source "drivers/staging/serqt_usb2/Kconfig"
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index f2dab1a4d41..06c02046629 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -2328,9 +2328,11 @@ static void usbdux_firmware_request_complete_handler(const struct firmware *fw,
if (ret) {
dev_err(&usbdev->dev,
"Could not upload firmware (err=%d)\n", ret);
- return;
+ goto out;
}
comedi_usb_auto_config(usbdev, BOARDNAME);
+ out:
+ release_firmware(fw);
}
/* allocate memory for the urbs and initialise them */
@@ -2581,6 +2583,7 @@ static int usbduxsub_probe(struct usb_interface *uinterf,
FW_ACTION_HOTPLUG,
"usbdux_firmware.bin",
&udev->dev,
+ GFP_KERNEL,
usbduxsub + index,
usbdux_firmware_request_complete_handler);
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index d143222579c..2e675cce7db 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1451,10 +1451,12 @@ static void usbduxfast_firmware_request_complete_handler(const struct firmware
if (ret) {
dev_err(&usbdev->dev,
"Could not upload firmware (err=%d)\n", ret);
- return;
+ goto out;
}
comedi_usb_auto_config(usbdev, BOARDNAME);
+ out:
+ release_firmware(fw);
}
/*
@@ -1569,6 +1571,7 @@ static int usbduxfastsub_probe(struct usb_interface *uinterf,
FW_ACTION_HOTPLUG,
"usbduxfast_firmware.bin",
&udev->dev,
+ GFP_KERNEL,
usbduxfastsub + index,
usbduxfast_firmware_request_complete_handler);
diff --git a/drivers/staging/cx25821/cx25821-audups11.c b/drivers/staging/cx25821/cx25821-audups11.c
index f78b8912d90..89c8fe2997f 100644
--- a/drivers/staging/cx25821/cx25821-audups11.c
+++ b/drivers/staging/cx25821/cx25821-audups11.c
@@ -94,36 +94,20 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH11]
- && h->video_dev[SRAM_CH11]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
file->private_data = fh;
fh->dev = dev;
@@ -427,7 +411,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template11 = {
.name = "cx25821-audioupstream",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/staging/cx25821/cx25821-video.c
index 8834bc80a5a..c7c14c7698a 100644
--- a/drivers/staging/cx25821/cx25821-video.c
+++ b/drivers/staging/cx25821/cx25821-video.c
@@ -184,11 +184,11 @@ struct video_device *cx25821_vdev_init(struct cx25821_dev *dev,
if (NULL == vfd)
return NULL;
*vfd = *template;
- vfd->minor = -1;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name, type,
cx25821_boards[dev->board].name);
+ video_set_drvdata(vfd, dev);
return vfd;
}
@@ -424,7 +424,7 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
void cx25821_videoioctl_unregister(struct cx25821_dev *dev)
{
if (dev->ioctl_dev) {
- if (dev->ioctl_dev->minor != -1)
+ if (video_is_registered(dev->ioctl_dev))
video_unregister_device(dev->ioctl_dev);
else
video_device_release(dev->ioctl_dev);
@@ -438,7 +438,7 @@ void cx25821_video_unregister(struct cx25821_dev *dev, int chan_num)
cx_clear(PCI_INT_MSK, 1);
if (dev->video_dev[chan_num]) {
- if (-1 != dev->video_dev[chan_num]->minor)
+ if (video_is_registered(dev->video_dev[chan_num]))
video_unregister_device(dev->video_dev[chan_num]);
else
video_device_release(dev->video_dev[chan_num]);
diff --git a/drivers/staging/cx25821/cx25821-video0.c b/drivers/staging/cx25821/cx25821-video0.c
index 950fac1d700..ad7a6912911 100644
--- a/drivers/staging/cx25821/cx25821-video0.c
+++ b/drivers/staging/cx25821/cx25821-video0.c
@@ -94,37 +94,21 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH00]
- && h->video_dev[SRAM_CH00]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
file->private_data = fh;
fh->dev = dev;
@@ -444,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template0 = {
.name = "cx25821-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video1.c b/drivers/staging/cx25821/cx25821-video1.c
index a4dddc684ad..e3f3c4ac790 100644
--- a/drivers/staging/cx25821/cx25821-video1.c
+++ b/drivers/staging/cx25821/cx25821-video1.c
@@ -94,37 +94,21 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH01]
- && h->video_dev[SRAM_CH01]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
file->private_data = fh;
fh->dev = dev;
@@ -444,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template1 = {
.name = "cx25821-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video2.c b/drivers/staging/cx25821/cx25821-video2.c
index 8e04e253f5d..36fb855a497 100644
--- a/drivers/staging/cx25821/cx25821-video2.c
+++ b/drivers/staging/cx25821/cx25821-video2.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH02]
- && h->video_dev[SRAM_CH02]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
+
file->private_data = fh;
fh->dev = dev;
fh->type = type;
@@ -445,7 +430,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template2 = {
.name = "cx25821-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video3.c b/drivers/staging/cx25821/cx25821-video3.c
index 8801a8ead90..1e0f10abdbc 100644
--- a/drivers/staging/cx25821/cx25821-video3.c
+++ b/drivers/staging/cx25821/cx25821-video3.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH03]
- && h->video_dev[SRAM_CH03]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
+
file->private_data = fh;
fh->dev = dev;
fh->type = type;
@@ -444,7 +429,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template3 = {
.name = "cx25821-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video4.c b/drivers/staging/cx25821/cx25821-video4.c
index ab0d747138a..0cbe7a79d8c 100644
--- a/drivers/staging/cx25821/cx25821-video4.c
+++ b/drivers/staging/cx25821/cx25821-video4.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH04]
- && h->video_dev[SRAM_CH04]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
+
file->private_data = fh;
fh->dev = dev;
fh->type = type;
@@ -443,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template4 = {
.name = "cx25821-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video5.c b/drivers/staging/cx25821/cx25821-video5.c
index 7ef0b971f5c..5dc08adc12e 100644
--- a/drivers/staging/cx25821/cx25821-video5.c
+++ b/drivers/staging/cx25821/cx25821-video5.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH05]
- && h->video_dev[SRAM_CH05]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
+
file->private_data = fh;
fh->dev = dev;
fh->type = type;
@@ -443,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template5 = {
.name = "cx25821-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video6.c b/drivers/staging/cx25821/cx25821-video6.c
index 3c41b49e2ea..2938ad3ad3c 100644
--- a/drivers/staging/cx25821/cx25821-video6.c
+++ b/drivers/staging/cx25821/cx25821-video6.c
@@ -94,37 +94,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH06]
- && h->video_dev[SRAM_CH06]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
+
file->private_data = fh;
fh->dev = dev;
fh->type = type;
@@ -443,7 +428,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template6 = {
.name = "cx25821-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-video7.c b/drivers/staging/cx25821/cx25821-video7.c
index 625c9b78a9c..458e525d72a 100644
--- a/drivers/staging/cx25821/cx25821-video7.c
+++ b/drivers/staging/cx25821/cx25821-video7.c
@@ -93,37 +93,22 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH07]
- && h->video_dev[SRAM_CH07]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
+
file->private_data = fh;
fh->dev = dev;
fh->type = type;
@@ -442,7 +427,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template7 = {
.name = "cx25821-video",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-videoioctl.c b/drivers/staging/cx25821/cx25821-videoioctl.c
index 2a312ce78c6..1da52b54a45 100644
--- a/drivers/staging/cx25821/cx25821-videoioctl.c
+++ b/drivers/staging/cx25821/cx25821-videoioctl.c
@@ -94,36 +94,21 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
u32 pix_format;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->ioctl_dev && h->ioctl_dev->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
file->private_data = fh;
fh->dev = dev;
@@ -489,7 +474,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_videoioctl_template = {
.name = "cx25821-videoioctl",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-vidups10.c b/drivers/staging/cx25821/cx25821-vidups10.c
index 77b63b06040..b76d9f62c3d 100644
--- a/drivers/staging/cx25821/cx25821-vidups10.c
+++ b/drivers/staging/cx25821/cx25821-vidups10.c
@@ -94,36 +94,20 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH10]
- && h->video_dev[SRAM_CH10]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
file->private_data = fh;
fh->dev = dev;
@@ -428,7 +412,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template10 = {
.name = "cx25821-upstream10",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/cx25821/cx25821-vidups9.c b/drivers/staging/cx25821/cx25821-vidups9.c
index 75c8c1eed2d..1580da3b29a 100644
--- a/drivers/staging/cx25821/cx25821-vidups9.c
+++ b/drivers/staging/cx25821/cx25821-vidups9.c
@@ -94,36 +94,20 @@ static struct videobuf_queue_ops cx25821_video_qops = {
static int video_open(struct file *file)
{
- int minor = video_devdata(file)->minor;
- struct cx25821_dev *h, *dev = NULL;
+ struct video_device *vdev = video_devdata(file);
+ struct cx25821_dev *dev = video_drvdata(file);
struct cx25821_fh *fh;
- struct list_head *list;
- enum v4l2_buf_type type = 0;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- lock_kernel();
- list_for_each(list, &cx25821_devlist) {
- h = list_entry(list, struct cx25821_dev, devlist);
-
- if (h->video_dev[SRAM_CH09]
- && h->video_dev[SRAM_CH09]->minor == minor) {
- dev = h;
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- }
- }
-
- if (NULL == dev) {
- unlock_kernel();
- return -ENODEV;
- }
-
- printk("open minor=%d type=%s\n", minor, v4l2_type_names[type]);
+ printk("open dev=%s type=%s\n", video_device_node_name(vdev),
+ v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- unlock_kernel();
+ if (NULL == fh)
return -ENOMEM;
- }
+
+ lock_kernel();
file->private_data = fh;
fh->dev = dev;
@@ -426,7 +410,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
struct video_device cx25821_video_template9 = {
.name = "cx25821-upstream9",
.fops = &video_fops,
- .minor = -1,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX25821_NORMS,
.current_norm = V4L2_STD_NTSC_M,
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index fd5bd0ea1e0..c83ca7e3d04 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -403,7 +403,7 @@ static void dst_node_cleanup(struct dst_node *n)
if (n->bdev) {
sync_blockdev(n->bdev);
- blkdev_put(n->bdev, FMODE_READ|FMODE_WRITE);
+ close_bdev_exclusive(n->bdev, FMODE_READ|FMODE_WRITE);
}
dst_state_lock(st);
@@ -464,37 +464,6 @@ void dst_node_put(struct dst_node *n)
}
/*
- * This function finds devices major/minor numbers for given pathname.
- */
-static int dst_lookup_device(const char *path, dev_t *dev)
-{
- int err;
- struct nameidata nd;
- struct inode *inode;
-
- err = path_lookup(path, LOOKUP_FOLLOW, &nd);
- if (err)
- return err;
-
- inode = nd.path.dentry->d_inode;
- if (!inode) {
- err = -ENOENT;
- goto out;
- }
-
- if (!S_ISBLK(inode->i_mode)) {
- err = -ENOTBLK;
- goto out;
- }
-
- *dev = inode->i_rdev;
-
-out:
- path_put(&nd.path);
- return err;
-}
-
-/*
* Setting up export device: lookup by the name, get its size
* and setup listening socket, which will accept clients, which
* will submit IO for given storage.
@@ -503,17 +472,12 @@ static int dst_setup_export(struct dst_node *n, struct dst_ctl *ctl,
struct dst_export_ctl *le)
{
int err;
- dev_t dev = 0; /* gcc likes to scream here */
snprintf(n->info->local, sizeof(n->info->local), "%s", le->device);
- err = dst_lookup_device(le->device, &dev);
- if (err)
- return err;
-
- n->bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
- if (!n->bdev)
- return -ENODEV;
+ n->bdev = open_bdev_exclusive(le->device, FMODE_READ|FMODE_WRITE, NULL);
+ if (IS_ERR(n->bdev))
+ return PTR_ERR(n->bdev);
if (n->size != 0)
n->size = min_t(loff_t, n->bdev->bd_inode->i_size, n->size);
@@ -528,7 +492,7 @@ static int dst_setup_export(struct dst_node *n, struct dst_ctl *ctl,
return 0;
err_out_cleanup:
- blkdev_put(n->bdev, FMODE_READ|FMODE_WRITE);
+ close_bdev_exclusive(n->bdev, FMODE_READ|FMODE_WRITE);
n->bdev = NULL;
return err;
diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
index b18d8e2d4c5..3af79242313 100644
--- a/drivers/staging/go7007/go7007-v4l2.c
+++ b/drivers/staging/go7007/go7007-v4l2.c
@@ -1787,7 +1787,6 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
static struct video_device go7007_template = {
.name = "go7007",
.fops = &go7007_fops,
- .minor = -1,
.release = go7007_vfl_release,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = V4L2_STD_ALL,
@@ -1817,8 +1816,8 @@ int go7007_v4l2_init(struct go7007 *go)
}
video_set_drvdata(go->video_dev, go);
++go->ref_count;
- printk(KERN_INFO "%s: registered device video%d [v4l2]\n",
- go->video_dev->name, go->video_dev->num);
+ printk(KERN_INFO "%s: registered device %s [v4l2]\n",
+ go->video_dev->name, video_device_node_name(go->video_dev));
return 0;
}
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index f0b86f02cd8..fd677f00836 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -29,7 +29,6 @@
* driver requests - some may support multiple options */
-#include <linux/autoconf.h>
#include "iio.h"
#include "ring_generic.h"
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 536e2382de5..638ad6b3589 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,7 +1,8 @@
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
depends on CPU_CAVIUM_OCTEON
- select MII
+ select PHYLIB
+ select MDIO_OCTEON
help
This driver supports the builtin ethernet ports on Cavium
Networks' products in the Octeon family. This driver supports the
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 31a58e50892..05a5cc0f43e 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -26,7 +26,8 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
+
#include <net/dst.h>
#include <asm/octeon/octeon.h>
@@ -34,86 +35,12 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-mdio.h"
+#include "ethernet-util.h"
#include "cvmx-helper-board.h"
#include "cvmx-smix-defs.h"
-DECLARE_MUTEX(mdio_sem);
-
-/**
- * Perform an MII read. Called by the generic MII routines
- *
- * @dev: Device to perform read for
- * @phy_id: The MII phy id
- * @location: Register location to read
- * Returns Result from the read or zero on failure
- */
-static int cvm_oct_mdio_read(struct net_device *dev, int phy_id, int location)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = 1;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
-
- do {
- if (!in_interrupt())
- yield();
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(0));
- } while (smi_rd.s.pending);
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else
- return 0;
-}
-
-static int cvm_oct_mdio_dummy_read(struct net_device *dev, int phy_id,
- int location)
-{
- return 0xffff;
-}
-
-/**
- * Perform an MII write. Called by the generic MII routines
- *
- * @dev: Device to perform write for
- * @phy_id: The MII phy id
- * @location: Register location to write
- * @val: Value to write
- */
-static void cvm_oct_mdio_write(struct net_device *dev, int phy_id, int location,
- int val)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(0), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = 0;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
-
- do {
- if (!in_interrupt())
- yield();
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(0));
- } while (smi_wr.s.pending);
-}
-
-static void cvm_oct_mdio_dummy_write(struct net_device *dev, int phy_id,
- int location, int val)
-{
-}
-
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -125,49 +52,37 @@ static void cvm_oct_get_drvinfo(struct net_device *dev,
static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- int ret;
- down(&mdio_sem);
- ret = mii_ethtool_gset(&priv->mii_info, cmd);
- up(&mdio_sem);
+ if (priv->phydev)
+ return phy_ethtool_gset(priv->phydev, cmd);
- return ret;
+ return -EINVAL;
}
static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- int ret;
- down(&mdio_sem);
- ret = mii_ethtool_sset(&priv->mii_info, cmd);
- up(&mdio_sem);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (priv->phydev)
+ return phy_ethtool_sset(priv->phydev, cmd);
- return ret;
+ return -EINVAL;
}
static int cvm_oct_nway_reset(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- int ret;
- down(&mdio_sem);
- ret = mii_nway_restart(&priv->mii_info);
- up(&mdio_sem);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
- return ret;
-}
+ if (priv->phydev)
+ return phy_start_aneg(priv->phydev);
-static u32 cvm_oct_get_link(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- u32 ret;
-
- down(&mdio_sem);
- ret = mii_link_ok(&priv->mii_info);
- up(&mdio_sem);
-
- return ret;
+ return -EINVAL;
}
const struct ethtool_ops cvm_oct_ethtool_ops = {
@@ -175,7 +90,7 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
.get_settings = cvm_oct_get_settings,
.set_settings = cvm_oct_set_settings,
.nway_reset = cvm_oct_nway_reset,
- .get_link = cvm_oct_get_link,
+ .get_link = ethtool_op_get_link,
.get_sg = ethtool_op_get_sg,
.get_tx_csum = ethtool_op_get_tx_csum,
};
@@ -191,41 +106,78 @@ const struct ethtool_ops cvm_oct_ethtool_ops = {
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(rq);
- unsigned int duplex_chg;
- int ret;
- down(&mdio_sem);
- ret = generic_mii_ioctl(&priv->mii_info, data, cmd, &duplex_chg);
- up(&mdio_sem);
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
+}
- return ret;
+static void cvm_oct_adjust_link(struct net_device *dev)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvmx_helper_link_info_t link_info;
+
+ if (priv->last_link != priv->phydev->link) {
+ priv->last_link = priv->phydev->link;
+ link_info.u64 = 0;
+ link_info.s.link_up = priv->last_link ? 1 : 0;
+ link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
+ link_info.s.speed = priv->phydev->speed;
+ cvmx_helper_link_set( priv->port, link_info);
+ if (priv->last_link) {
+ netif_carrier_on(dev);
+ if (priv->queue != -1)
+ DEBUGPRINT("%s: %u Mbps %s duplex, "
+ "port %2d, queue %2d\n",
+ dev->name, priv->phydev->speed,
+ priv->phydev->duplex ?
+ "Full" : "Half",
+ priv->port, priv->queue);
+ else
+ DEBUGPRINT("%s: %u Mbps %s duplex, "
+ "port %2d, POW\n",
+ dev->name, priv->phydev->speed,
+ priv->phydev->duplex ?
+ "Full" : "Half",
+ priv->port);
+ } else {
+ netif_carrier_off(dev);
+ DEBUGPRINT("%s: Link down\n", dev->name);
+ }
+ }
}
+
/**
- * Setup the MDIO device structures
+ * Setup the PHY
*
* @dev: Device to setup
*
* Returns Zero on success, negative on failure
*/
-int cvm_oct_mdio_setup_device(struct net_device *dev)
+int cvm_oct_phy_setup_device(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- int phy_id = cvmx_helper_board_get_mii_address(priv->port);
- if (phy_id != -1) {
- priv->mii_info.dev = dev;
- priv->mii_info.phy_id = phy_id;
- priv->mii_info.phy_id_mask = 0xff;
- priv->mii_info.supports_gmii = 1;
- priv->mii_info.reg_num_mask = 0x1f;
- priv->mii_info.mdio_read = cvm_oct_mdio_read;
- priv->mii_info.mdio_write = cvm_oct_mdio_write;
- } else {
- /* Supply dummy MDIO routines so the kernel won't crash
- if the user tries to read them */
- priv->mii_info.mdio_read = cvm_oct_mdio_dummy_read;
- priv->mii_info.mdio_write = cvm_oct_mdio_dummy_write;
+
+ int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
+ if (phy_addr != -1) {
+ char phy_id[20];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr);
+
+ priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+
+ if (IS_ERR(priv->phydev)) {
+ priv->phydev = NULL;
+ return -1;
+ }
+ priv->last_link = 0;
+ phy_start_aneg(priv->phydev);
}
return 0;
}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index b3328aeec2d..55d0614a7cd 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -43,4 +43,4 @@
extern const struct ethtool_ops cvm_oct_ethtool_ops;
int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-int cvm_oct_mdio_setup_device(struct net_device *dev);
+int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
index 8fa88fc419b..16308d484d3 100644
--- a/drivers/staging/octeon/ethernet-proc.c
+++ b/drivers/staging/octeon/ethernet-proc.c
@@ -25,7 +25,6 @@
* Contact Cavium Networks for more information
**********************************************************************/
#include <linux/kernel.h>
-#include <linux/mii.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <net/dst.h>
@@ -38,112 +37,6 @@
#include "cvmx-helper.h"
#include "cvmx-pip.h"
-static unsigned long long cvm_oct_stats_read_switch(struct net_device *dev,
- int phy_id, int offset)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- priv->mii_info.mdio_write(dev, phy_id, 0x1d, 0xcc00 | offset);
- return ((uint64_t) priv->mii_info.
- mdio_read(dev, phy_id,
- 0x1e) << 16) | (uint64_t) priv->mii_info.
- mdio_read(dev, phy_id, 0x1f);
-}
-
-static int cvm_oct_stats_switch_show(struct seq_file *m, void *v)
-{
- static const int ports[] = { 0, 1, 2, 3, 9, -1 };
- struct net_device *dev = cvm_oct_device[0];
- int index = 0;
-
- while (ports[index] != -1) {
-
- /* Latch port */
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- priv->mii_info.mdio_write(dev, 0x1b, 0x1d,
- 0xdc00 | ports[index]);
- seq_printf(m, "\nSwitch Port %d\n", ports[index]);
- seq_printf(m, "InGoodOctets: %12llu\t"
- "OutOctets: %12llu\t"
- "64 Octets: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b,
- 0x00) |
- (cvm_oct_stats_read_switch(dev, 0x1b, 0x01) << 32),
- cvm_oct_stats_read_switch(dev, 0x1b,
- 0x0E) |
- (cvm_oct_stats_read_switch(dev, 0x1b, 0x0F) << 32),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x08));
-
- seq_printf(m, "InBadOctets: %12llu\t"
- "OutUnicast: %12llu\t"
- "65-127 Octets: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x02),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x10),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x09));
-
- seq_printf(m, "InUnicast: %12llu\t"
- "OutBroadcasts: %12llu\t"
- "128-255 Octets: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x04),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x13),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x0A));
-
- seq_printf(m, "InBroadcasts: %12llu\t"
- "OutMulticasts: %12llu\t"
- "256-511 Octets: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x06),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x12),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x0B));
-
- seq_printf(m, "InMulticasts: %12llu\t"
- "OutPause: %12llu\t"
- "512-1023 Octets:%12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x07),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x15),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x0C));
-
- seq_printf(m, "InPause: %12llu\t"
- "Excessive: %12llu\t"
- "1024-Max Octets:%12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x16),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x11),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x0D));
-
- seq_printf(m, "InUndersize: %12llu\t"
- "Collisions: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x18),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1E));
-
- seq_printf(m, "InFragments: %12llu\t"
- "Deferred: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x19),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x05));
-
- seq_printf(m, "InOversize: %12llu\t"
- "Single: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1A),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x14));
-
- seq_printf(m, "InJabber: %12llu\t"
- "Multiple: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1B),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x17));
-
- seq_printf(m, "In RxErr: %12llu\t"
- "OutFCSErr: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1C),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x03));
-
- seq_printf(m, "InFCSErr: %12llu\t"
- "Late: %12llu\n",
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1D),
- cvm_oct_stats_read_switch(dev, 0x1b, 0x1F));
- index++;
- }
- return 0;
-}
-
/**
* User is reading /proc/octeon_ethernet_stats
*
@@ -215,11 +108,6 @@ static int cvm_oct_stats_show(struct seq_file *m, void *v)
}
}
- if (cvm_oct_device[0]) {
- priv = netdev_priv(cvm_oct_device[0]);
- if (priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
- cvm_oct_stats_switch_show(m, v);
- }
return 0;
}
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index fbaa465d2fa..3820f1ec11d 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -147,32 +147,36 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
gmxx_rxx_int_reg.u64);
}
-
- link_info = cvmx_helper_link_autoconf(priv->port);
- priv->link_info = link_info.u64;
+ if (priv->phydev == NULL) {
+ link_info = cvmx_helper_link_autoconf(priv->port);
+ priv->link_info = link_info.u64;
+ }
spin_unlock_irqrestore(&global_register_lock, flags);
- /* Tell Linux */
- if (link_info.s.link_up) {
-
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- if (priv->queue != -1)
- DEBUGPRINT
- ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ? "Full" : "Half",
- priv->port, priv->queue);
- else
- DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ? "Full" : "Half",
- priv->port);
- } else {
-
- if (netif_carrier_ok(dev))
- netif_carrier_off(dev);
- DEBUGPRINT("%s: Link down\n", dev->name);
+ if (priv->phydev == NULL) {
+ /* Tell core. */
+ if (link_info.s.link_up) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ if (priv->queue != -1)
+ DEBUGPRINT("%s: %u Mbps %s duplex, "
+ "port %2d, queue %2d\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ?
+ "Full" : "Half",
+ priv->port, priv->queue);
+ else
+ DEBUGPRINT("%s: %u Mbps %s duplex, "
+ "port %2d, POW\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ?
+ "Full" : "Half",
+ priv->port);
+ } else {
+ if (netif_carrier_ok(dev))
+ netif_carrier_off(dev);
+ DEBUGPRINT("%s: Link down\n", dev->name);
+ }
}
}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 2b54996bd85..6061d01eca2 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -113,7 +113,7 @@ int cvm_oct_sgmii_init(struct net_device *dev)
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
dev->netdev_ops->ndo_stop(dev);
- if (!octeon_is_simulation())
+ if (!octeon_is_simulation() && priv->phydev == NULL)
priv->poll = cvm_oct_sgmii_poll;
/* FIXME: Need autoneg logic */
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index 0c2e7cc40f3..ee3dc41b2c5 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -112,7 +112,7 @@ int cvm_oct_xaui_init(struct net_device *dev)
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
dev->netdev_ops->ndo_stop(dev);
- if (!octeon_is_simulation())
+ if (!octeon_is_simulation() && priv->phydev == NULL)
priv->poll = cvm_oct_xaui_poll;
return 0;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 492c5029992..4cfd4b136b3 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -30,7 +30,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
-#include <linux/mii.h>
+#include <linux/phy.h>
#include <net/dst.h>
@@ -132,8 +132,6 @@ static struct timer_list cvm_oct_poll_timer;
*/
struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
-extern struct semaphore mdio_sem;
-
/**
* Periodic timer tick for slow management operations
*
@@ -160,13 +158,8 @@ static void cvm_do_timer(unsigned long arg)
goto out;
priv = netdev_priv(cvm_oct_device[port]);
- if (priv->poll) {
- /* skip polling if we don't get the lock */
- if (!down_trylock(&mdio_sem)) {
- priv->poll(cvm_oct_device[port]);
- up(&mdio_sem);
- }
- }
+ if (priv->poll)
+ priv->poll(cvm_oct_device[port]);
queues_per_port = cvmx_pko_get_num_queues(port);
/* Drain any pending packets in the free list */
@@ -524,7 +517,7 @@ int cvm_oct_common_init(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
- cvm_oct_mdio_setup_device(dev);
+ cvm_oct_phy_setup_device(dev);
dev->netdev_ops->ndo_set_mac_address(dev, &sa);
dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
@@ -540,7 +533,10 @@ int cvm_oct_common_init(struct net_device *dev)
void cvm_oct_common_uninit(struct net_device *dev)
{
- /* Currently nothing to do */
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
}
static const struct net_device_ops cvm_oct_npi_netdev_ops = {
@@ -627,6 +623,8 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
#endif
};
+extern void octeon_mdiobus_force_mod_depencency(void);
+
/**
* Module/ driver initialization. Creates the linux network
* devices.
@@ -640,6 +638,7 @@ static int __init cvm_oct_init_module(void)
int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
int qos;
+ octeon_mdiobus_force_mod_depencency();
pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
if (OCTEON_IS_MODEL(OCTEON_CN52XX))
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 3aef9878fc0..402a15b9bb0 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -50,9 +50,9 @@ struct octeon_ethernet {
/* List of outstanding tx buffers per queue */
struct sk_buff_head tx_free_list[16];
/* Device statistics */
- struct net_device_stats stats
-; /* Generic MII info structure */
- struct mii_if_info mii_info;
+ struct net_device_stats stats;
+ struct phy_device *phydev;
+ unsigned int last_link;
/* Last negotiated link state */
uint64_t link_info;
/* Called periodically to check link status */
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 4ce399b6d23..f98a52448ea 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -55,7 +55,7 @@
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/io.h>
#include <asm/uaccess.h>
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
index 6c5b261e9f0..aacd25bfb0c 100644
--- a/drivers/staging/pohmelfs/dir.c
+++ b/drivers/staging/pohmelfs/dir.c
@@ -722,8 +722,6 @@ static int pohmelfs_remove_entry(struct inode *dir, struct dentry *dentry)
if (inode->i_nlink)
inode_dec_link_count(inode);
}
- dprintk("%s: inode: %p, lock: %ld, unhashed: %d.\n",
- __func__, pi, inode->i_state & I_LOCK, hlist_unhashed(&inode->i_hash));
return err;
}
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 6f8d8f97121..5066de5cfc0 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -225,6 +225,12 @@ passive_store(struct device *dev, struct device_attribute *attr,
if (!sscanf(buf, "%d\n", &state))
return -EINVAL;
+ /* sanity check: values below 1000 millicelcius don't make sense
+ * and can cause the system to go into a thermal heart attack
+ */
+ if (state && state < 1000)
+ return -EINVAL;
+
if (state && !tz->forced_passive) {
mutex_lock(&thermal_list_lock);
list_for_each_entry(cdev, &thermal_cdev_list, node) {
@@ -235,6 +241,8 @@ passive_store(struct device *dev, struct device_attribute *attr,
cdev);
}
mutex_unlock(&thermal_list_lock);
+ if (!tz->passive_delay)
+ tz->passive_delay = 1000;
} else if (!state && tz->forced_passive) {
mutex_lock(&thermal_list_lock);
list_for_each_entry(cdev, &thermal_cdev_list, node) {
@@ -245,17 +253,12 @@ passive_store(struct device *dev, struct device_attribute *attr,
cdev);
}
mutex_unlock(&thermal_list_lock);
+ tz->passive_delay = 0;
}
tz->tc1 = 1;
tz->tc2 = 1;
- if (!tz->passive_delay)
- tz->passive_delay = 1000;
-
- if (!tz->polling_delay)
- tz->polling_delay = 10000;
-
tz->forced_passive = state;
thermal_zone_device_update(tz);
@@ -374,7 +377,7 @@ thermal_cooling_device_cur_state_store(struct device *dev,
if (!sscanf(buf, "%ld\n", &state))
return -EINVAL;
- if (state < 0)
+ if ((long)state < 0)
return -EINVAL;
result = cdev->ops->set_cur_state(cdev, state);
@@ -1016,6 +1019,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
thermal_zone_device_set_polling(tz, tz->passive_delay);
else if (tz->polling_delay)
thermal_zone_device_set_polling(tz, tz->polling_delay);
+ else
+ thermal_zone_device_set_polling(tz, 0);
mutex_unlock(&tz->lock);
}
EXPORT_SYMBOL(thermal_zone_device_update);
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index aa53db9f2e8..1ef3b8fc50b 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -210,7 +210,7 @@ static int uio_pdrv_genirq_runtime_nop(struct device *dev)
return 0;
}
-static struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
+static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
.runtime_suspend = uio_pdrv_genirq_runtime_nop,
.runtime_resume = uio_pdrv_genirq_runtime_nop,
};
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 240750881d2..81aac7f4ca5 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -60,6 +60,8 @@ config USB_ARCH_HAS_EHCI
default y if ARCH_IXP4XX
default y if ARCH_W90X900
default y if ARCH_AT91SAM9G45
+ default y if ARCH_MXC
+ default y if ARCH_OMAP34XX
default PCI
# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index be3c9b80bc9..473aa1a20de 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -44,3 +44,5 @@ obj-y += early/
obj-$(CONFIG_USB_ATM) += atm/
obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
+
+obj-$(CONFIG_USB_ULPI) += otg/
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index bba4d3eabe0..c5395246886 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -667,12 +667,12 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte
else
uea_info(usb, "firmware uploaded\n");
- uea_leaves(usb);
- return;
+ goto err;
err_fw_corrupted:
uea_err(usb, "firmware is corrupted\n");
err:
+ release_firmware(fw_entry);
uea_leaves(usb);
}
@@ -705,7 +705,8 @@ static int uea_load_firmware(struct usb_device *usb, unsigned int ver)
break;
}
- ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev, usb, uea_upload_pre_firmware);
+ ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev,
+ GFP_KERNEL, usb, uea_upload_pre_firmware);
if (ret)
uea_err(usb, "firmware %s is not available\n", fw_name);
else
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e4eca7810bc..34d4eb98829 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1461,6 +1461,12 @@ err_out:
}
#endif /* CONFIG_PM */
+
+#define NOKIA_PCSUITE_ACM_INFO(x) \
+ USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+ USB_CDC_ACM_PROTO_VENDOR)
+
/*
* USB driver structure.
*/
@@ -1519,6 +1525,57 @@ static struct usb_device_id acm_ids[] = {
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ /* Nokia S60 phones expose two ACM channels. The first is
+ * a modem and is picked up by the standard AT-command
+ * information below. The second is 'vendor-specific' but
+ * is treated as a serial device at the S60 end, so we want
+ * to expose it on Linux too. */
+ { NOKIA_PCSUITE_ACM_INFO(0x042D), }, /* Nokia 3250 */
+ { NOKIA_PCSUITE_ACM_INFO(0x04D8), }, /* Nokia 5500 Sport */
+ { NOKIA_PCSUITE_ACM_INFO(0x04C9), }, /* Nokia E50 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0419), }, /* Nokia E60 */
+ { NOKIA_PCSUITE_ACM_INFO(0x044D), }, /* Nokia E61 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0001), }, /* Nokia E61i */
+ { NOKIA_PCSUITE_ACM_INFO(0x0475), }, /* Nokia E62 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0508), }, /* Nokia E65 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0418), }, /* Nokia E70 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0425), }, /* Nokia N71 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0486), }, /* Nokia N73 */
+ { NOKIA_PCSUITE_ACM_INFO(0x04DF), }, /* Nokia N75 */
+ { NOKIA_PCSUITE_ACM_INFO(0x000e), }, /* Nokia N77 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0445), }, /* Nokia N80 */
+ { NOKIA_PCSUITE_ACM_INFO(0x042F), }, /* Nokia N91 & N91 8GB */
+ { NOKIA_PCSUITE_ACM_INFO(0x048E), }, /* Nokia N92 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0420), }, /* Nokia N93 */
+ { NOKIA_PCSUITE_ACM_INFO(0x04E6), }, /* Nokia N93i */
+ { NOKIA_PCSUITE_ACM_INFO(0x04B2), }, /* Nokia 5700 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x0134), }, /* Nokia 6110 Navigator (China) */
+ { NOKIA_PCSUITE_ACM_INFO(0x046E), }, /* Nokia 6110 Navigator */
+ { NOKIA_PCSUITE_ACM_INFO(0x002f), }, /* Nokia 6120 classic & */
+ { NOKIA_PCSUITE_ACM_INFO(0x0088), }, /* Nokia 6121 classic */
+ { NOKIA_PCSUITE_ACM_INFO(0x00fc), }, /* Nokia 6124 classic */
+ { NOKIA_PCSUITE_ACM_INFO(0x0042), }, /* Nokia E51 */
+ { NOKIA_PCSUITE_ACM_INFO(0x00b0), }, /* Nokia E66 */
+ { NOKIA_PCSUITE_ACM_INFO(0x00ab), }, /* Nokia E71 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0481), }, /* Nokia N76 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0007), }, /* Nokia N81 & N81 8GB */
+ { NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */
+ { NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */
+ { NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB */
+ { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */
+ { NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */
+ { NOKIA_PCSUITE_ACM_INFO(0x00a0), }, /* Nokia 6650 */
+ { NOKIA_PCSUITE_ACM_INFO(0x007b), }, /* Nokia N78 */
+ { NOKIA_PCSUITE_ACM_INFO(0x0094), }, /* Nokia N85 */
+ { NOKIA_PCSUITE_ACM_INFO(0x003a), }, /* Nokia N96 & N96-3 */
+ { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
+ { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
+ { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
+
+ /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
+
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
@@ -1533,7 +1590,6 @@ static struct usb_device_id acm_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
- /* NOTE: COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
{ }
};
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index b4bd2411c66..7c5f4e32c92 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -347,13 +347,8 @@ usbtmc_abort_bulk_out_check_status:
goto exit;
usbtmc_abort_bulk_out_clear_halt:
- rv = usb_control_msg(data->usb_dev,
- usb_sndctrlpipe(data->usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_DIR_OUT | USB_TYPE_STANDARD |
- USB_RECIP_ENDPOINT,
- USB_ENDPOINT_HALT, data->bulk_out, buffer,
- 0, USBTMC_TIMEOUT);
+ rv = usb_clear_halt(data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -562,10 +557,16 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
n_bytes = roundup(12 + this_part, 4);
memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part));
- retval = usb_bulk_msg(data->usb_dev,
- usb_sndbulkpipe(data->usb_dev,
- data->bulk_out),
- buffer, n_bytes, &actual, USBTMC_TIMEOUT);
+ do {
+ retval = usb_bulk_msg(data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev,
+ data->bulk_out),
+ buffer, n_bytes,
+ &actual, USBTMC_TIMEOUT);
+ if (retval != 0)
+ break;
+ n_bytes -= actual;
+ } while (n_bytes);
data->bTag_last_write = data->bTag;
data->bTag++;
@@ -702,14 +703,8 @@ usbtmc_clear_check_status:
usbtmc_clear_bulk_out_halt:
- rv = usb_control_msg(data->usb_dev,
- usb_sndctrlpipe(data->usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_DIR_OUT | USB_TYPE_STANDARD |
- USB_RECIP_ENDPOINT,
- USB_ENDPOINT_HALT,
- data->bulk_out, buffer, 0,
- USBTMC_TIMEOUT);
+ rv = usb_clear_halt(data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
@@ -730,13 +725,8 @@ static int usbtmc_ioctl_clear_out_halt(struct usbtmc_device_data *data)
if (!buffer)
return -ENOMEM;
- rv = usb_control_msg(data->usb_dev,
- usb_sndctrlpipe(data->usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_DIR_OUT | USB_TYPE_STANDARD |
- USB_RECIP_ENDPOINT,
- USB_ENDPOINT_HALT, data->bulk_out,
- buffer, 0, USBTMC_TIMEOUT);
+ rv = usb_clear_halt(data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0) {
dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
@@ -759,12 +749,8 @@ static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
if (!buffer)
return -ENOMEM;
- rv = usb_control_msg(data->usb_dev, usb_sndctrlpipe(data->usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_DIR_OUT | USB_TYPE_STANDARD |
- USB_RECIP_ENDPOINT,
- USB_ENDPOINT_HALT, data->bulk_in, buffer, 0,
- USBTMC_TIMEOUT);
+ rv = usb_clear_halt(data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev, data->bulk_in));
if (rv < 0) {
dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
@@ -1109,13 +1095,13 @@ static void usbtmc_disconnect(struct usb_interface *intf)
kref_put(&data->kref, usbtmc_delete);
}
-static int usbtmc_suspend (struct usb_interface *intf, pm_message_t message)
+static int usbtmc_suspend(struct usb_interface *intf, pm_message_t message)
{
/* this driver does not have pending URBs */
return 0;
}
-static int usbtmc_resume (struct usb_interface *intf)
+static int usbtmc_resume(struct usb_interface *intf)
{
return 0;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 181f78c8410..6e8bcdfd23b 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1388,6 +1388,46 @@ static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
}
#ifdef CONFIG_COMPAT
+static int proc_control_compat(struct dev_state *ps,
+ struct usbdevfs_ctrltransfer32 __user *p32)
+{
+ struct usbdevfs_ctrltransfer __user *p;
+ __u32 udata;
+ p = compat_alloc_user_space(sizeof(*p));
+ if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) ||
+ get_user(udata, &p32->data) ||
+ put_user(compat_ptr(udata), &p->data))
+ return -EFAULT;
+ return proc_control(ps, p);
+}
+
+static int proc_bulk_compat(struct dev_state *ps,
+ struct usbdevfs_bulktransfer32 __user *p32)
+{
+ struct usbdevfs_bulktransfer __user *p;
+ compat_uint_t n;
+ compat_caddr_t addr;
+
+ p = compat_alloc_user_space(sizeof(*p));
+
+ if (get_user(n, &p32->ep) || put_user(n, &p->ep) ||
+ get_user(n, &p32->len) || put_user(n, &p->len) ||
+ get_user(n, &p32->timeout) || put_user(n, &p->timeout) ||
+ get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data))
+ return -EFAULT;
+
+ return proc_bulk(ps, p);
+}
+static int proc_disconnectsignal_compat(struct dev_state *ps, void __user *arg)
+{
+ struct usbdevfs_disconnectsignal32 ds;
+
+ if (copy_from_user(&ds, arg, sizeof(ds)))
+ return -EFAULT;
+ ps->discsignr = ds.signr;
+ ps->disccontext = compat_ptr(ds.context);
+ return 0;
+}
static int get_urb32(struct usbdevfs_urb *kurb,
struct usbdevfs_urb32 __user *uurb)
@@ -1482,6 +1522,7 @@ static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
return processcompl_compat(as, (void __user * __user *)arg);
}
+
#endif
static int proc_disconnectsignal(struct dev_state *ps, void __user *arg)
@@ -1648,12 +1689,12 @@ static int proc_release_port(struct dev_state *ps, void __user *arg)
* are assuming that somehow the configuration has been prevented from
* changing. But there's no mechanism to ensure that...
*/
-static int usbdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
+ void __user *p)
{
struct dev_state *ps = file->private_data;
+ struct inode *inode = file->f_path.dentry->d_inode;
struct usb_device *dev = ps->dev;
- void __user *p = (void __user *)arg;
int ret = -ENOTTY;
if (!(file->f_mode & FMODE_WRITE))
@@ -1726,6 +1767,24 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
break;
#ifdef CONFIG_COMPAT
+ case USBDEVFS_CONTROL32:
+ snoop(&dev->dev, "%s: CONTROL32\n", __func__);
+ ret = proc_control_compat(ps, p);
+ if (ret >= 0)
+ inode->i_mtime = CURRENT_TIME;
+ break;
+
+ case USBDEVFS_BULK32:
+ snoop(&dev->dev, "%s: BULK32\n", __func__);
+ ret = proc_bulk_compat(ps, p);
+ if (ret >= 0)
+ inode->i_mtime = CURRENT_TIME;
+ break;
+
+ case USBDEVFS_DISCSIGNAL32:
+ snoop(&dev->dev, "%s: DISCSIGNAL32\n", __func__);
+ ret = proc_disconnectsignal_compat(ps, p);
+ break;
case USBDEVFS_SUBMITURB32:
snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
@@ -1745,7 +1804,7 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
break;
case USBDEVFS_IOCTL32:
- snoop(&dev->dev, "%s: IOCTL\n", __func__);
+ snoop(&dev->dev, "%s: IOCTL32\n", __func__);
ret = proc_ioctl_compat(ps, ptr_to_compat(p));
break;
#endif
@@ -1801,6 +1860,32 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
return ret;
}
+static long usbdev_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
+ unlock_kernel();
+
+ return ret;
+}
+#endif
+
/* No kernel lock - fine */
static unsigned int usbdev_poll(struct file *file,
struct poll_table_struct *wait)
@@ -1817,13 +1902,16 @@ static unsigned int usbdev_poll(struct file *file,
}
const struct file_operations usbdev_file_operations = {
- .owner = THIS_MODULE,
- .llseek = usbdev_lseek,
- .read = usbdev_read,
- .poll = usbdev_poll,
- .ioctl = usbdev_ioctl,
- .open = usbdev_open,
- .release = usbdev_release,
+ .owner = THIS_MODULE,
+ .llseek = usbdev_lseek,
+ .read = usbdev_read,
+ .poll = usbdev_poll,
+ .unlocked_ioctl = usbdev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = usbdev_compat_ioctl,
+#endif
+ .open = usbdev_open,
+ .release = usbdev_release,
};
static void usbdev_remove(struct usb_device *udev)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 4f864472c5c..60a45f1e3a6 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -83,6 +83,47 @@ static ssize_t store_new_id(struct device_driver *driver,
}
static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
+/**
+ * store_remove_id - remove a USB device ID from this driver
+ * @driver: target device driver
+ * @buf: buffer for scanning device ID data
+ * @count: input size
+ *
+ * Removes a dynamic usb device ID from this driver.
+ */
+static ssize_t
+store_remove_id(struct device_driver *driver, const char *buf, size_t count)
+{
+ struct usb_dynid *dynid, *n;
+ struct usb_driver *usb_driver = to_usb_driver(driver);
+ u32 idVendor = 0;
+ u32 idProduct = 0;
+ int fields = 0;
+ int retval = 0;
+
+ fields = sscanf(buf, "%x %x", &idVendor, &idProduct);
+ if (fields < 2)
+ return -EINVAL;
+
+ spin_lock(&usb_driver->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) {
+ struct usb_device_id *id = &dynid->id;
+ if ((id->idVendor == idVendor) &&
+ (id->idProduct == idProduct)) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ retval = 0;
+ break;
+ }
+ }
+ spin_unlock(&usb_driver->dynids.lock);
+
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
+
static int usb_create_newid_file(struct usb_driver *usb_drv)
{
int error = 0;
@@ -107,6 +148,21 @@ static void usb_remove_newid_file(struct usb_driver *usb_drv)
&driver_attr_new_id);
}
+static int
+usb_create_removeid_file(struct usb_driver *drv)
+{
+ int error = 0;
+ if (drv->probe != NULL)
+ error = driver_create_file(&drv->drvwrap.driver,
+ &driver_attr_remove_id);
+ return error;
+}
+
+static void usb_remove_removeid_file(struct usb_driver *drv)
+{
+ driver_remove_file(&drv->drvwrap.driver, &driver_attr_remove_id);
+}
+
static void usb_free_dynids(struct usb_driver *usb_drv)
{
struct usb_dynid *dynid, *n;
@@ -128,6 +184,16 @@ static void usb_remove_newid_file(struct usb_driver *usb_drv)
{
}
+static int
+usb_create_removeid_file(struct usb_driver *drv)
+{
+ return 0;
+}
+
+static void usb_remove_removeid_file(struct usb_driver *drv)
+{
+}
+
static inline void usb_free_dynids(struct usb_driver *usb_drv)
{
}
@@ -774,19 +840,34 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
INIT_LIST_HEAD(&new_driver->dynids.list);
retval = driver_register(&new_driver->drvwrap.driver);
+ if (retval)
+ goto out;
- if (!retval) {
- pr_info("%s: registered new interface driver %s\n",
+ usbfs_update_special();
+
+ retval = usb_create_newid_file(new_driver);
+ if (retval)
+ goto out_newid;
+
+ retval = usb_create_removeid_file(new_driver);
+ if (retval)
+ goto out_removeid;
+
+ pr_info("%s: registered new interface driver %s\n",
usbcore_name, new_driver->name);
- usbfs_update_special();
- usb_create_newid_file(new_driver);
- } else {
- printk(KERN_ERR "%s: error %d registering interface "
- " driver %s\n",
- usbcore_name, retval, new_driver->name);
- }
+out:
return retval;
+
+out_removeid:
+ usb_remove_newid_file(new_driver);
+out_newid:
+ driver_unregister(&new_driver->drvwrap.driver);
+
+ printk(KERN_ERR "%s: error %d registering interface "
+ " driver %s\n",
+ usbcore_name, retval, new_driver->name);
+ goto out;
}
EXPORT_SYMBOL_GPL(usb_register_driver);
@@ -806,6 +887,7 @@ void usb_deregister(struct usb_driver *driver)
pr_info("%s: deregistering interface driver %s\n",
usbcore_name, driver->name);
+ usb_remove_removeid_file(driver);
usb_remove_newid_file(driver);
usb_free_dynids(driver);
driver_unregister(&driver->drvwrap.driver);
@@ -948,8 +1030,6 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
done:
dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
- if (status == 0)
- udev->autoresume_disabled = 0;
return status;
}
@@ -1280,11 +1360,6 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
/* Propagate the resume up the tree, if necessary */
if (udev->state == USB_STATE_SUSPENDED) {
- if ((msg.event & PM_EVENT_AUTO) &&
- udev->autoresume_disabled) {
- status = -EPERM;
- goto done;
- }
if (parent) {
status = usb_autoresume_device(parent);
if (status == 0) {
@@ -1341,7 +1416,6 @@ static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt)
int status = 0;
usb_pm_lock(udev);
- udev->auto_pm = 1;
udev->pm_usage_cnt += inc_usage_cnt;
WARN_ON(udev->pm_usage_cnt < 0);
if (inc_usage_cnt)
@@ -1473,7 +1547,6 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
if (intf->condition == USB_INTERFACE_UNBOUND)
status = -ENODEV;
else {
- udev->auto_pm = 1;
atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
udev->last_busy = jiffies;
if (inc_usage_cnt >= 0 &&
@@ -1640,8 +1713,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
if (intf->condition == USB_INTERFACE_UNBOUND)
status = -ENODEV;
- else if (udev->autoresume_disabled)
- status = -EPERM;
else {
atomic_inc(&intf->pm_usage_cnt);
if (atomic_read(&intf->pm_usage_cnt) > 0 &&
@@ -1654,28 +1725,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
-/**
- * usb_autopm_set_interface - set a USB interface's autosuspend state
- * @intf: the usb_interface whose state should be set
- *
- * This routine sets the autosuspend state of @intf's device according
- * to @intf's usage counter, which the caller must have set previously.
- * If the counter is <= 0, the device is autosuspended (if it isn't
- * already suspended and if nothing else prevents the autosuspend). If
- * the counter is > 0, the device is autoresumed (if it isn't already
- * awake).
- */
-int usb_autopm_set_interface(struct usb_interface *intf)
-{
- int status;
-
- status = usb_autopm_do_interface(intf, 0);
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
- return status;
-}
-EXPORT_SYMBOL_GPL(usb_autopm_set_interface);
-
#else
void usb_autosuspend_work(struct work_struct *work)
@@ -1707,7 +1756,6 @@ int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg)
do_unbind_rebind(udev, DO_UNBIND);
usb_pm_lock(udev);
- udev->auto_pm = 0;
status = usb_suspend_both(udev, msg);
usb_pm_unlock(udev);
return status;
@@ -1730,7 +1778,6 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
int status;
usb_pm_lock(udev);
- udev->auto_pm = 0;
status = usb_resume_both(udev, msg);
udev->last_busy = jiffies;
usb_pm_unlock(udev);
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 222ee07ea68..bfc6c2eea64 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -99,6 +99,7 @@ static int init_usb_class(void)
printk(KERN_ERR "class_create failed for usb devices\n");
kfree(usb_class);
usb_class = NULL;
+ goto exit;
}
usb_class->class->devnode = usb_devnode;
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 05e6d313961..bdf87a8414a 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -139,7 +139,7 @@ int usb_choose_configuration(struct usb_device *udev)
if (best) {
i = best->desc.bConfigurationValue;
- dev_info(&udev->dev,
+ dev_dbg(&udev->dev,
"configuration #%d chosen from %d choice%s\n",
i, num_configs, plural(num_configs));
} else {
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 91f2885b6ee..2dcf906df56 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -363,7 +363,7 @@ static int hcd_pci_restore(struct device *dev)
return resume_common(dev, true);
}
-struct dev_pm_ops usb_hcd_pci_pm_ops = {
+const struct dev_pm_ops usb_hcd_pci_pm_ops = {
.suspend = hcd_pci_suspend,
.suspend_noirq = hcd_pci_suspend_noirq,
.resume_noirq = hcd_pci_resume_noirq,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 34de475f016..6dac3b802d4 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -38,6 +38,7 @@
#include <asm/unaligned.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include <linux/usb.h>
@@ -1275,13 +1276,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (usb_endpoint_xfer_control(&urb->ep->desc)
&& !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
- if (hcd->self.uses_dma)
+ if (hcd->self.uses_dma) {
urb->setup_dma = dma_map_single(
hcd->self.controller,
urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
- else if (hcd->driver->flags & HCD_LOCAL_MEM)
+ if (dma_mapping_error(hcd->self.controller,
+ urb->setup_dma))
+ return -EAGAIN;
+ } else if (hcd->driver->flags & HCD_LOCAL_MEM)
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
&urb->setup_dma,
@@ -1293,13 +1297,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (ret == 0 && urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (hcd->self.uses_dma)
+ if (hcd->self.uses_dma) {
urb->transfer_dma = dma_map_single (
hcd->self.controller,
urb->transfer_buffer,
urb->transfer_buffer_length,
dir);
- else if (hcd->driver->flags & HCD_LOCAL_MEM) {
+ if (dma_mapping_error(hcd->self.controller,
+ urb->transfer_dma))
+ return -EAGAIN;
+ } else if (hcd->driver->flags & HCD_LOCAL_MEM) {
ret = hcd_alloc_coherent(
urb->dev->bus, mem_flags,
&urb->transfer_dma,
@@ -1589,19 +1596,32 @@ rescan:
}
}
-/* Check whether a new configuration or alt setting for an interface
- * will exceed the bandwidth for the bus (or the host controller resources).
- * Only pass in a non-NULL config or interface, not both!
- * Passing NULL for both new_config and new_intf means the device will be
- * de-configured by issuing a set configuration 0 command.
+/**
+ * Check whether a new bandwidth setting exceeds the bus bandwidth.
+ * @new_config: new configuration to install
+ * @cur_alt: the current alternate interface setting
+ * @new_alt: alternate interface setting that is being installed
+ *
+ * To change configurations, pass in the new configuration in new_config,
+ * and pass NULL for cur_alt and new_alt.
+ *
+ * To reset a device's configuration (put the device in the ADDRESSED state),
+ * pass in NULL for new_config, cur_alt, and new_alt.
+ *
+ * To change alternate interface settings, pass in NULL for new_config,
+ * pass in the current alternate interface setting in cur_alt,
+ * and pass in the new alternate interface setting in new_alt.
+ *
+ * Returns an error if the requested bandwidth change exceeds the
+ * bus bandwidth or host controller internal resources.
*/
-int usb_hcd_check_bandwidth(struct usb_device *udev,
+int usb_hcd_alloc_bandwidth(struct usb_device *udev,
struct usb_host_config *new_config,
- struct usb_interface *new_intf)
+ struct usb_host_interface *cur_alt,
+ struct usb_host_interface *new_alt)
{
int num_intfs, i, j;
- struct usb_interface_cache *intf_cache;
- struct usb_host_interface *alt = 0;
+ struct usb_host_interface *alt = NULL;
int ret = 0;
struct usb_hcd *hcd;
struct usb_host_endpoint *ep;
@@ -1611,7 +1631,7 @@ int usb_hcd_check_bandwidth(struct usb_device *udev,
return 0;
/* Configuration is being removed - set configuration 0 */
- if (!new_config && !new_intf) {
+ if (!new_config && !cur_alt) {
for (i = 1; i < 16; ++i) {
ep = udev->ep_out[i];
if (ep)
@@ -1648,19 +1668,12 @@ int usb_hcd_check_bandwidth(struct usb_device *udev,
}
}
for (i = 0; i < num_intfs; ++i) {
+ /* Set up endpoints for alternate interface setting 0 */
+ alt = usb_find_alt_setting(new_config, i, 0);
+ if (!alt)
+ /* No alt setting 0? Pick the first setting. */
+ alt = &new_config->intf_cache[i]->altsetting[0];
- /* Dig the endpoints for alt setting 0 out of the
- * interface cache for this interface
- */
- intf_cache = new_config->intf_cache[i];
- for (j = 0; j < intf_cache->num_altsetting; j++) {
- if (intf_cache->altsetting[j].desc.bAlternateSetting == 0)
- alt = &intf_cache->altsetting[j];
- }
- if (!alt) {
- printk(KERN_DEBUG "Did not find alt setting 0 for intf %d\n", i);
- continue;
- }
for (j = 0; j < alt->desc.bNumEndpoints; j++) {
ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
if (ret < 0)
@@ -1668,6 +1681,22 @@ int usb_hcd_check_bandwidth(struct usb_device *udev,
}
}
}
+ if (cur_alt && new_alt) {
+ /* Drop all the endpoints in the current alt setting */
+ for (i = 0; i < cur_alt->desc.bNumEndpoints; i++) {
+ ret = hcd->driver->drop_endpoint(hcd, udev,
+ &cur_alt->endpoint[i]);
+ if (ret < 0)
+ goto reset;
+ }
+ /* Add all the endpoints in the new alt setting */
+ for (i = 0; i < new_alt->desc.bNumEndpoints; i++) {
+ ret = hcd->driver->add_endpoint(hcd, udev,
+ &new_alt->endpoint[i]);
+ if (ret < 0)
+ goto reset;
+ }
+ }
ret = hcd->driver->check_bandwidth(hcd, udev);
reset:
if (ret < 0)
@@ -1984,6 +2013,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
#ifdef CONFIG_PM
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
+ mutex_init(&hcd->bandwidth_mutex);
hcd->driver = driver;
hcd->product_desc = (driver->product_desc) ? driver->product_desc :
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 79782a1c43f..bbe2b924aae 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -111,6 +111,20 @@ struct usb_hcd {
u64 rsrc_len; /* memory/io resource length */
unsigned power_budget; /* in mA, 0 = no limit */
+ /* bandwidth_mutex should be taken before adding or removing
+ * any new bus bandwidth constraints:
+ * 1. Before adding a configuration for a new device.
+ * 2. Before removing the configuration to put the device into
+ * the addressed state.
+ * 3. Before selecting a different configuration.
+ * 4. Before selecting an alternate interface setting.
+ *
+ * bandwidth_mutex should be dropped after a successful control message
+ * to the device, or resetting the bandwidth after a failed attempt.
+ */
+ struct mutex bandwidth_mutex;
+
+
#define HCD_BUFFER_POOLS 4
struct dma_pool *pool [HCD_BUFFER_POOLS];
@@ -290,9 +304,10 @@ extern void usb_hcd_disable_endpoint(struct usb_device *udev,
extern void usb_hcd_reset_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep);
extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
-extern int usb_hcd_check_bandwidth(struct usb_device *udev,
+extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
struct usb_host_config *new_config,
- struct usb_interface *new_intf);
+ struct usb_host_interface *old_alt,
+ struct usb_host_interface *new_alt);
extern int usb_hcd_get_frame_number(struct usb_device *udev);
extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
@@ -315,7 +330,7 @@ extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
#ifdef CONFIG_PM_SLEEP
-extern struct dev_pm_ops usb_hcd_pci_pm_ops;
+extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif
#endif /* CONFIG_PCI */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0f857e64505..06af970e106 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -45,7 +45,6 @@ struct usb_hub {
/* buffer for urb ... with extra space in case of babble */
char (*buffer)[8];
- dma_addr_t buffer_dma; /* DMA address for buffer */
union {
struct usb_hub_status hub;
struct usb_port_status port;
@@ -61,6 +60,8 @@ struct usb_hub {
status change */
unsigned long busy_bits[1]; /* ports being reset or
resumed */
+ unsigned long removed_bits[1]; /* ports with a "removed"
+ device present */
#if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
#error event_bits[] is too short!
#endif
@@ -70,6 +71,7 @@ struct usb_hub {
unsigned mA_per_port; /* current for each child */
+ unsigned init_done:1;
unsigned limited_power:1;
unsigned quiescing:1;
unsigned disconnected:1;
@@ -374,12 +376,13 @@ static void kick_khubd(struct usb_hub *hub)
{
unsigned long flags;
- /* Suppress autosuspend until khubd runs */
- atomic_set(&to_usb_interface(hub->intfdev)->pm_usage_cnt, 1);
-
spin_lock_irqsave(&hub_event_lock, flags);
if (!hub->disconnected && list_empty(&hub->event_list)) {
list_add_tail(&hub->event_list, &hub_event_list);
+
+ /* Suppress autosuspend until khubd runs */
+ usb_autopm_get_interface_no_resume(
+ to_usb_interface(hub->intfdev));
wake_up(&khubd_wait);
}
spin_unlock_irqrestore(&hub_event_lock, flags);
@@ -636,8 +639,35 @@ static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
kick_khubd(hub);
}
+/**
+ * usb_remove_device - disable a device's port on its parent hub
+ * @udev: device to be disabled and removed
+ * Context: @udev locked, must be able to sleep.
+ *
+ * After @udev's port has been disabled, khubd is notified and it will
+ * see that the device has been disconnected. When the device is
+ * physically unplugged and something is plugged in, the events will
+ * be received and processed normally.
+ */
+int usb_remove_device(struct usb_device *udev)
+{
+ struct usb_hub *hub;
+ struct usb_interface *intf;
+
+ if (!udev->parent) /* Can't remove a root hub */
+ return -EINVAL;
+ hub = hdev_to_hub(udev->parent);
+ intf = to_usb_interface(hub->intfdev);
+
+ usb_autopm_get_interface(intf);
+ set_bit(udev->portnum, hub->removed_bits);
+ hub_port_logical_disconnect(hub, udev->portnum);
+ usb_autopm_put_interface(intf);
+ return 0;
+}
+
enum hub_activation_type {
- HUB_INIT, HUB_INIT2, HUB_INIT3,
+ HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */
HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME,
};
@@ -682,8 +712,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
msecs_to_jiffies(delay));
/* Suppress autosuspend until init is done */
- atomic_set(&to_usb_interface(hub->intfdev)->
- pm_usage_cnt, 1);
+ usb_autopm_get_interface_no_resume(
+ to_usb_interface(hub->intfdev));
return; /* Continues at init2: below */
} else {
hub_power_on(hub, true);
@@ -731,6 +761,13 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_C_ENABLE);
}
+ /* We can forget about a "removed" device when there's a
+ * physical disconnect or the connect status changes.
+ */
+ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+ (portchange & USB_PORT_STAT_C_CONNECTION))
+ clear_bit(port1, hub->removed_bits);
+
if (!udev || udev->state == USB_STATE_NOTATTACHED) {
/* Tell khubd to disconnect the device or
* check for a new connection
@@ -783,6 +820,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
}
init3:
hub->quiescing = 0;
+ hub->init_done = 1;
status = usb_submit_urb(hub->urb, GFP_NOIO);
if (status < 0)
@@ -792,6 +830,10 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Scan all ports that need attention */
kick_khubd(hub);
+
+ /* Allow autosuspend if it was suppressed */
+ if (type <= HUB_INIT3)
+ usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
}
/* Implement the continuations for the delays above */
@@ -819,6 +861,11 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
int i;
cancel_delayed_work_sync(&hub->init_work);
+ if (!hub->init_done) {
+ hub->init_done = 1;
+ usb_autopm_put_interface_no_suspend(
+ to_usb_interface(hub->intfdev));
+ }
/* khubd and related activity won't re-trigger */
hub->quiescing = 1;
@@ -869,8 +916,7 @@ static int hub_configure(struct usb_hub *hub,
int maxp, ret;
char *message = "out of memory";
- hub->buffer = usb_buffer_alloc(hdev, sizeof(*hub->buffer), GFP_KERNEL,
- &hub->buffer_dma);
+ hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL);
if (!hub->buffer) {
ret = -ENOMEM;
goto fail;
@@ -1111,8 +1157,6 @@ static int hub_configure(struct usb_hub *hub,
usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq,
hub, endpoint->bInterval);
- hub->urb->transfer_dma = hub->buffer_dma;
- hub->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* maybe cycle the hub leds */
if (hub->has_indicators && blinkenlights)
@@ -1144,7 +1188,10 @@ static void hub_disconnect(struct usb_interface *intf)
/* Take the hub off the event list and don't let it be added again */
spin_lock_irq(&hub_event_lock);
- list_del_init(&hub->event_list);
+ if (!list_empty(&hub->event_list)) {
+ list_del_init(&hub->event_list);
+ usb_autopm_put_interface_no_suspend(intf);
+ }
hub->disconnected = 1;
spin_unlock_irq(&hub_event_lock);
@@ -1162,8 +1209,7 @@ static void hub_disconnect(struct usb_interface *intf)
kfree(hub->port_owners);
kfree(hub->descriptor);
kfree(hub->status);
- usb_buffer_free(hub->hdev, sizeof(*hub->buffer), hub->buffer,
- hub->buffer_dma);
+ kfree(hub->buffer);
kref_put(&hub->kref, hub_release);
}
@@ -1630,7 +1676,7 @@ static int usb_configure_device_otg(struct usb_device *udev)
if (!udev->bus->is_b_host
&& udev->config
&& udev->parent == udev->bus->root_hub) {
- struct usb_otg_descriptor *desc = 0;
+ struct usb_otg_descriptor *desc = NULL;
struct usb_bus *bus = udev->bus;
/* descriptor may appear anywhere in config */
@@ -2123,9 +2169,13 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
USB_DEVICE_REMOTE_WAKEUP, 0,
NULL, 0,
USB_CTRL_SET_TIMEOUT);
- if (status)
+ if (status) {
dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
status);
+ /* bail if autosuspend is requested */
+ if (msg.event & PM_EVENT_AUTO)
+ return status;
+ }
}
/* see 7.1.7.6 */
@@ -2134,7 +2184,8 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
/* paranoia: "should not happen" */
- (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ if (udev->do_remote_wakeup)
+ (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
USB_DEVICE_REMOTE_WAKEUP, 0,
NULL, 0,
@@ -2965,6 +3016,13 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
usb_disconnect(&hdev->children[port1-1]);
clear_bit(port1, hub->change_bits);
+ /* We can forget about a "removed" device when there's a physical
+ * disconnect or the connect status changes.
+ */
+ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+ (portchange & USB_PORT_STAT_C_CONNECTION))
+ clear_bit(port1, hub->removed_bits);
+
if (portchange & (USB_PORT_STAT_C_CONNECTION |
USB_PORT_STAT_C_ENABLE)) {
status = hub_port_debounce(hub, port1);
@@ -2978,8 +3036,11 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
}
}
- /* Return now if debouncing failed or nothing is connected */
- if (!(portstatus & USB_PORT_STAT_CONNECTION)) {
+ /* Return now if debouncing failed or nothing is connected or
+ * the device was "removed".
+ */
+ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+ test_bit(port1, hub->removed_bits)) {
/* maybe switch power back on (e.g. root hub was reset) */
if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
@@ -3189,7 +3250,7 @@ static void hub_events(void)
* disconnected while waiting for the lock to succeed. */
usb_lock_device(hdev);
if (unlikely(hub->disconnected))
- goto loop;
+ goto loop2;
/* If the hub has died, clean up after it */
if (hdev->state == USB_STATE_NOTATTACHED) {
@@ -3338,11 +3399,15 @@ static void hub_events(void)
}
}
-loop_autopm:
- /* Allow autosuspend if we're not going to run again */
- if (list_empty(&hub->event_list))
- usb_autopm_enable(intf);
-loop:
+ loop_autopm:
+ /* Balance the usb_autopm_get_interface() above */
+ usb_autopm_put_interface_no_suspend(intf);
+ loop:
+ /* Balance the usb_autopm_get_interface_no_resume() in
+ * kick_khubd() and allow autosuspend.
+ */
+ usb_autopm_put_interface(intf);
+ loop2:
usb_unlock_device(hdev);
kref_put(&hub->kref, hub_release);
@@ -3534,6 +3599,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
{
struct usb_device *parent_hdev = udev->parent;
struct usb_hub *parent_hub;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor = udev->descriptor;
int i, ret = 0;
int port1 = udev->portnum;
@@ -3577,6 +3643,16 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
/* Restore the device's previous configuration */
if (!udev->actconfig)
goto done;
+
+ mutex_lock(&hcd->bandwidth_mutex);
+ ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
+ if (ret < 0) {
+ dev_warn(&udev->dev,
+ "Busted HC? Not enough HCD resources for "
+ "old configuration.\n");
+ mutex_unlock(&hcd->bandwidth_mutex);
+ goto re_enumerate;
+ }
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_CONFIGURATION, 0,
udev->actconfig->desc.bConfigurationValue, 0,
@@ -3585,8 +3661,10 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
dev_err(&udev->dev,
"can't restore configuration #%d (error=%d)\n",
udev->actconfig->desc.bConfigurationValue, ret);
+ mutex_unlock(&hcd->bandwidth_mutex);
goto re_enumerate;
}
+ mutex_unlock(&hcd->bandwidth_mutex);
usb_set_device_state(udev, USB_STATE_CONFIGURED);
/* Put interfaces back into the same altsettings as before.
@@ -3596,7 +3674,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
* endpoint state.
*/
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
- struct usb_interface *intf = udev->actconfig->interface[i];
+ struct usb_host_config *config = udev->actconfig;
+ struct usb_interface *intf = config->interface[i];
struct usb_interface_descriptor *desc;
desc = &intf->cur_altsetting->desc;
@@ -3605,6 +3684,17 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
usb_enable_interface(udev, intf, true);
ret = 0;
} else {
+ /* We've just reset the device, so it will think alt
+ * setting 0 is installed. For usb_set_interface() to
+ * work properly, we need to set the current alternate
+ * interface setting to 0 (or the first alt setting, if
+ * the device doesn't have alt setting 0).
+ */
+ intf->cur_altsetting =
+ usb_find_alt_setting(config, i, 0);
+ if (!intf->cur_altsetting)
+ intf->cur_altsetting =
+ &config->intf_cache[i]->altsetting[0];
ret = usb_set_interface(udev, desc->bInterfaceNumber,
desc->bAlternateSetting);
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e80f1af438c..1b994846e8e 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -393,13 +393,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
if (io->entries <= 0)
return io->entries;
- /* If we're running on an xHCI host controller, queue the whole scatter
- * gather list with one call to urb_enqueue(). This is only for bulk,
- * as that endpoint type does not care how the data gets broken up
- * across frames.
- */
- if (usb_pipebulk(pipe) &&
- bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) {
+ if (dev->bus->sg_tablesize > 0) {
io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
use_sg = true;
} else {
@@ -409,7 +403,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
if (!io->urbs)
goto nomem;
- urb_flags = URB_NO_INTERRUPT;
+ urb_flags = 0;
if (dma)
urb_flags |= URB_NO_TRANSFER_DMA_MAP;
if (usb_pipein(pipe))
@@ -441,6 +435,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->urbs[0]->num_sgs = io->entries;
io->entries = 1;
} else {
+ urb_flags |= URB_NO_INTERRUPT;
for_each_sg(sg, sg, io->entries, i) {
unsigned len;
@@ -1303,6 +1298,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
{
struct usb_interface *iface;
struct usb_host_interface *alt;
+ struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int ret;
int manual = 0;
unsigned int epaddr;
@@ -1325,6 +1321,18 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
return -EINVAL;
}
+ /* Make sure we have enough bandwidth for this alternate interface.
+ * Remove the current alt setting and add the new alt setting.
+ */
+ mutex_lock(&hcd->bandwidth_mutex);
+ ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
+ if (ret < 0) {
+ dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
+ alternate);
+ mutex_unlock(&hcd->bandwidth_mutex);
+ return ret;
+ }
+
if (dev->quirks & USB_QUIRK_NO_SET_INTF)
ret = -EPIPE;
else
@@ -1340,8 +1348,13 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
"manual set_interface for iface %d, alt %d\n",
interface, alternate);
manual = 1;
- } else if (ret < 0)
+ } else if (ret < 0) {
+ /* Re-instate the old alt setting */
+ usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
+ mutex_unlock(&hcd->bandwidth_mutex);
return ret;
+ }
+ mutex_unlock(&hcd->bandwidth_mutex);
/* FIXME drivers shouldn't need to replicate/bugfix the logic here
* when they implement async or easily-killable versions of this or
@@ -1423,6 +1436,7 @@ int usb_reset_configuration(struct usb_device *dev)
{
int i, retval;
struct usb_host_config *config;
+ struct usb_hcd *hcd = bus_to_hcd(dev->bus);
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
@@ -1438,12 +1452,46 @@ int usb_reset_configuration(struct usb_device *dev)
}
config = dev->actconfig;
+ retval = 0;
+ mutex_lock(&hcd->bandwidth_mutex);
+ /* Make sure we have enough bandwidth for each alternate setting 0 */
+ for (i = 0; i < config->desc.bNumInterfaces; i++) {
+ struct usb_interface *intf = config->interface[i];
+ struct usb_host_interface *alt;
+
+ alt = usb_altnum_to_altsetting(intf, 0);
+ if (!alt)
+ alt = &intf->altsetting[0];
+ if (alt != intf->cur_altsetting)
+ retval = usb_hcd_alloc_bandwidth(dev, NULL,
+ intf->cur_altsetting, alt);
+ if (retval < 0)
+ break;
+ }
+ /* If not, reinstate the old alternate settings */
+ if (retval < 0) {
+reset_old_alts:
+ for (; i >= 0; i--) {
+ struct usb_interface *intf = config->interface[i];
+ struct usb_host_interface *alt;
+
+ alt = usb_altnum_to_altsetting(intf, 0);
+ if (!alt)
+ alt = &intf->altsetting[0];
+ if (alt != intf->cur_altsetting)
+ usb_hcd_alloc_bandwidth(dev, NULL,
+ alt, intf->cur_altsetting);
+ }
+ mutex_unlock(&hcd->bandwidth_mutex);
+ return retval;
+ }
retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
USB_REQ_SET_CONFIGURATION, 0,
config->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval < 0)
- return retval;
+ goto reset_old_alts;
+ mutex_unlock(&hcd->bandwidth_mutex);
/* re-init hc/hcd interface/endpoint state */
for (i = 0; i < config->desc.bNumInterfaces; i++) {
@@ -1585,7 +1633,7 @@ static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
*
* See usb_queue_reset_device() for more details
*/
-void __usb_queue_reset_device(struct work_struct *ws)
+static void __usb_queue_reset_device(struct work_struct *ws)
{
int rc;
struct usb_interface *iface =
@@ -1652,6 +1700,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
int i, ret;
struct usb_host_config *cp = NULL;
struct usb_interface **new_interfaces = NULL;
+ struct usb_hcd *hcd = bus_to_hcd(dev->bus);
int n, nintf;
if (dev->authorized == 0 || configuration == -1)
@@ -1721,12 +1770,11 @@ free_interfaces:
* host controller will not allow submissions to dropped endpoints. If
* this call fails, the device state is unchanged.
*/
- if (cp)
- ret = usb_hcd_check_bandwidth(dev, cp, NULL);
- else
- ret = usb_hcd_check_bandwidth(dev, NULL, NULL);
+ mutex_lock(&hcd->bandwidth_mutex);
+ ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
usb_autosuspend_device(dev);
+ mutex_unlock(&hcd->bandwidth_mutex);
goto free_interfaces;
}
@@ -1752,10 +1800,12 @@ free_interfaces:
dev->actconfig = cp;
if (!cp) {
usb_set_device_state(dev, USB_STATE_ADDRESS);
- usb_hcd_check_bandwidth(dev, NULL, NULL);
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
usb_autosuspend_device(dev);
+ mutex_unlock(&hcd->bandwidth_mutex);
goto free_interfaces;
}
+ mutex_unlock(&hcd->bandwidth_mutex);
usb_set_device_state(dev, USB_STATE_CONFIGURED);
/* Initialize the new interface structures and the
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 7ec3041ae79..15477008b63 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -139,6 +139,16 @@ show_devnum(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL);
static ssize_t
+show_devpath(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_device *udev;
+
+ udev = to_usb_device(dev);
+ return sprintf(buf, "%s\n", udev->devpath);
+}
+static DEVICE_ATTR(devpath, S_IRUGO, show_devpath, NULL);
+
+static ssize_t
show_version(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
@@ -317,7 +327,6 @@ static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR,
static const char on_string[] = "on";
static const char auto_string[] = "auto";
-static const char suspend_string[] = "suspend";
static ssize_t
show_level(struct device *dev, struct device_attribute *attr, char *buf)
@@ -325,13 +334,8 @@ show_level(struct device *dev, struct device_attribute *attr, char *buf)
struct usb_device *udev = to_usb_device(dev);
const char *p = auto_string;
- if (udev->state == USB_STATE_SUSPENDED) {
- if (udev->autoresume_disabled)
- p = suspend_string;
- } else {
- if (udev->autosuspend_disabled)
- p = on_string;
- }
+ if (udev->state != USB_STATE_SUSPENDED && udev->autosuspend_disabled)
+ p = on_string;
return sprintf(buf, "%s\n", p);
}
@@ -343,7 +347,7 @@ set_level(struct device *dev, struct device_attribute *attr,
int len = count;
char *cp;
int rc = 0;
- int old_autosuspend_disabled, old_autoresume_disabled;
+ int old_autosuspend_disabled;
cp = memchr(buf, '\n', count);
if (cp)
@@ -351,7 +355,6 @@ set_level(struct device *dev, struct device_attribute *attr,
usb_lock_device(udev);
old_autosuspend_disabled = udev->autosuspend_disabled;
- old_autoresume_disabled = udev->autoresume_disabled;
/* Setting the flags without calling usb_pm_lock is a subject to
* races, but who cares...
@@ -359,28 +362,18 @@ set_level(struct device *dev, struct device_attribute *attr,
if (len == sizeof on_string - 1 &&
strncmp(buf, on_string, len) == 0) {
udev->autosuspend_disabled = 1;
- udev->autoresume_disabled = 0;
rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
} else if (len == sizeof auto_string - 1 &&
strncmp(buf, auto_string, len) == 0) {
udev->autosuspend_disabled = 0;
- udev->autoresume_disabled = 0;
rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
- } else if (len == sizeof suspend_string - 1 &&
- strncmp(buf, suspend_string, len) == 0) {
- udev->autosuspend_disabled = 0;
- udev->autoresume_disabled = 1;
- rc = usb_external_suspend_device(udev, PMSG_USER_SUSPEND);
-
} else
rc = -EINVAL;
- if (rc) {
+ if (rc)
udev->autosuspend_disabled = old_autosuspend_disabled;
- udev->autoresume_disabled = old_autoresume_disabled;
- }
usb_unlock_device(udev);
return (rc < 0 ? rc : count);
}
@@ -508,6 +501,28 @@ static ssize_t usb_dev_authorized_store(struct device *dev,
static DEVICE_ATTR(authorized, 0644,
usb_dev_authorized_show, usb_dev_authorized_store);
+/* "Safely remove a device" */
+static ssize_t usb_remove_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ int rc = 0;
+
+ usb_lock_device(udev);
+ if (udev->state != USB_STATE_NOTATTACHED) {
+
+ /* To avoid races, first unconfigure and then remove */
+ usb_set_configuration(udev, -1);
+ rc = usb_remove_device(udev);
+ }
+ if (rc == 0)
+ rc = count;
+ usb_unlock_device(udev);
+ return rc;
+}
+static DEVICE_ATTR(remove, 0200, NULL, usb_remove_store);
+
static struct attribute *dev_attrs[] = {
/* current configuration's attributes */
@@ -516,8 +531,8 @@ static struct attribute *dev_attrs[] = {
&dev_attr_bConfigurationValue.attr,
&dev_attr_bmAttributes.attr,
&dev_attr_bMaxPower.attr,
- &dev_attr_urbnum.attr,
/* device attributes */
+ &dev_attr_urbnum.attr,
&dev_attr_idVendor.attr,
&dev_attr_idProduct.attr,
&dev_attr_bcdDevice.attr,
@@ -529,10 +544,12 @@ static struct attribute *dev_attrs[] = {
&dev_attr_speed.attr,
&dev_attr_busnum.attr,
&dev_attr_devnum.attr,
+ &dev_attr_devpath.attr,
&dev_attr_version.attr,
&dev_attr_maxchild.attr,
&dev_attr_quirks.attr,
&dev_attr_authorized.attr,
+ &dev_attr_remove.attr,
NULL,
};
static struct attribute_group dev_attr_grp = {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 0885d4abdc6..e7cae133469 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -429,8 +429,16 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
/* too small? */
- if (urb->interval <= 0)
- return -EINVAL;
+ switch (dev->speed) {
+ case USB_SPEED_VARIABLE:
+ if (urb->interval < 6)
+ return -EINVAL;
+ break;
+ default:
+ if (urb->interval <= 0)
+ return -EINVAL;
+ break;
+ }
/* too big? */
switch (dev->speed) {
case USB_SPEED_SUPER: /* units are 125us */
@@ -438,6 +446,10 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (urb->interval > (1 << 15))
return -EINVAL;
max = 1 << 15;
+ case USB_SPEED_VARIABLE:
+ if (urb->interval > 16)
+ return -EINVAL;
+ break;
case USB_SPEED_HIGH: /* units are microframes */
/* NOTE usb handles 2^15 */
if (urb->interval > (1024 * 8))
@@ -461,8 +473,10 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
default:
return -EINVAL;
}
- /* Round down to a power of 2, no more than max */
- urb->interval = min(max, 1 << ilog2(urb->interval));
+ if (dev->speed != USB_SPEED_VARIABLE) {
+ /* Round down to a power of 2, no more than max */
+ urb->interval = min(max, 1 << ilog2(urb->interval));
+ }
}
return usb_hcd_submit_urb(urb, mem_flags);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index b1b85abb9a2..2fb42043b30 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -64,6 +64,43 @@ MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
/**
+ * usb_find_alt_setting() - Given a configuration, find the alternate setting
+ * for the given interface.
+ * @config - the configuration to search (not necessarily the current config).
+ * @iface_num - interface number to search in
+ * @alt_num - alternate interface setting number to search for.
+ *
+ * Search the configuration's interface cache for the given alt setting.
+ */
+struct usb_host_interface *usb_find_alt_setting(
+ struct usb_host_config *config,
+ unsigned int iface_num,
+ unsigned int alt_num)
+{
+ struct usb_interface_cache *intf_cache = NULL;
+ int i;
+
+ for (i = 0; i < config->desc.bNumInterfaces; i++) {
+ if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
+ == iface_num) {
+ intf_cache = config->intf_cache[i];
+ break;
+ }
+ }
+ if (!intf_cache)
+ return NULL;
+ for (i = 0; i < intf_cache->num_altsetting; i++)
+ if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num)
+ return &intf_cache->altsetting[i];
+
+ printk(KERN_DEBUG "Did not find alt setting %u for intf %u, "
+ "config %u\n", alt_num, iface_num,
+ config->desc.bConfigurationValue);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(usb_find_alt_setting);
+
+/**
* usb_ifnum_to_if - get the interface object with a given interface number
* @dev: the device whose current configuration is considered
* @ifnum: the desired interface
@@ -132,7 +169,7 @@ EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting);
struct find_interface_arg {
int minor;
- struct usb_interface *interface;
+ struct device_driver *drv;
};
static int __find_interface(struct device *dev, void *data)
@@ -143,12 +180,10 @@ static int __find_interface(struct device *dev, void *data)
if (!is_usb_interface(dev))
return 0;
+ if (dev->driver != arg->drv)
+ return 0;
intf = to_usb_interface(dev);
- if (intf->minor != -1 && intf->minor == arg->minor) {
- arg->interface = intf;
- return 1;
- }
- return 0;
+ return intf->minor == arg->minor;
}
/**
@@ -156,21 +191,24 @@ static int __find_interface(struct device *dev, void *data)
* @drv: the driver whose current configuration is considered
* @minor: the minor number of the desired device
*
- * This walks the driver device list and returns a pointer to the interface
- * with the matching minor. Note, this only works for devices that share the
- * USB major number.
+ * This walks the bus device list and returns a pointer to the interface
+ * with the matching minor and driver. Note, this only works for devices
+ * that share the USB major number.
*/
struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
{
struct find_interface_arg argb;
- int retval;
+ struct device *dev;
argb.minor = minor;
- argb.interface = NULL;
- /* eat the error, it will be in argb.interface */
- retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb,
- __find_interface);
- return argb.interface;
+ argb.drv = &drv->drvwrap.driver;
+
+ dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
+
+ /* Drop reference count from bus_find_device */
+ put_device(dev);
+
+ return dev ? to_usb_interface(dev) : NULL;
}
EXPORT_SYMBOL_GPL(usb_find_interface);
@@ -291,7 +329,7 @@ static int usb_dev_restore(struct device *dev)
return usb_resume(dev, PMSG_RESTORE);
}
-static struct dev_pm_ops usb_device_pm_ops = {
+static const struct dev_pm_ops usb_device_pm_ops = {
.prepare = usb_dev_prepare,
.complete = usb_dev_complete,
.suspend = usb_dev_suspend,
@@ -1038,7 +1076,7 @@ static struct notifier_block usb_bus_nb = {
struct dentry *usb_debug_root;
EXPORT_SYMBOL_GPL(usb_debug_root);
-struct dentry *usb_debug_devices;
+static struct dentry *usb_debug_devices;
static int usb_debugfs_init(void)
{
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 9a8b15e6377..4c36c7f512a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -24,6 +24,7 @@ extern void usb_disable_device(struct usb_device *dev, int skip_ep0);
extern int usb_deauthorize_device(struct usb_device *);
extern int usb_authorize_device(struct usb_device *);
extern void usb_detect_quirks(struct usb_device *udev);
+extern int usb_remove_device(struct usb_device *udev);
extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index a18e3c5dd82..ee411206c69 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -732,6 +732,24 @@ config USB_FILE_STORAGE_TEST
behavior of USB Mass Storage hosts. Not needed for
normal operation.
+config USB_MASS_STORAGE
+ tristate "Mass Storage Gadget"
+ depends on BLOCK
+ help
+ The Mass Storage Gadget acts as a USB Mass Storage disk drive.
+ As its storage repository it can use a regular file or a block
+ device (in much the same way as the "loop" device driver),
+ specified as a module parameter or sysfs option.
+
+ This is heavily based on File-backed Storage Gadget and in most
+ cases you will want to use FSG instead. This gadget is mostly
+ here to test the functionality of the Mass Storage Function
+ which may be used with composite framework.
+
+ Say "y" to link the driver statically, or "m" to build
+ a dynamically linked module called "g_file_storage". If unsure,
+ consider File-backed Storage Gadget.
+
config USB_G_SERIAL
tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
help
@@ -794,6 +812,48 @@ config USB_CDC_COMPOSITE
Say "y" to link the driver statically, or "m" to build a
dynamically linked module.
+config USB_G_MULTI
+ tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
+ depends on BLOCK && NET
+ help
+ The Multifunction Composite Gadget provides Ethernet (RNDIS
+ and/or CDC Ethernet), mass storage and ACM serial link
+ interfaces.
+
+ You will be asked to choose which of the two configurations is
+ to be available in the gadget. At least one configuration must
+ be chosen to make the gadget usable. Selecting more than one
+ configuration will prevent Windows from automatically detecting
+ the gadget as a composite gadget, so an INF file will be needed to
+ use the gadget.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "g_multi".
+
+config USB_G_MULTI_RNDIS
+ bool "RNDIS + CDC Serial + Storage configuration"
+ depends on USB_G_MULTI
+ default y
+ help
+ This option enables a configuration with RNDIS, CDC Serial and
+ Mass Storage functions available in the Multifunction Composite
+ Gadget. This is the configuration dedicated for Windows since RNDIS
+ is Microsoft's protocol.
+
+ If unsure, say "y".
+
+config USB_G_MULTI_CDC
+ bool "CDC Ethernet + CDC Serial + Storage configuration"
+ depends on USB_G_MULTI
+ default n
+ help
+ This option enables a configuration with CDC Ethernet (ECM), CDC
+ Serial and Mass Storage functions available in the Multifunction
+ Composite Gadget.
+
+ If unsure, say "y".
+
+
# put drivers that need isochronous transfer support (for audio
# or video class gadget drivers), or specific hardware, here.
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 9d7b87c52e9..2e2c047262b 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -39,16 +39,20 @@ g_serial-objs := serial.o
g_midi-objs := gmidi.o
gadgetfs-objs := inode.o
g_file_storage-objs := file_storage.o
+g_mass_storage-objs := mass_storage.o
g_printer-objs := printer.o
g_cdc-objs := cdc2.o
+g_multi-objs := multi.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
obj-$(CONFIG_USB_ETH) += g_ether.o
obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
+obj-$(CONFIG_USB_MASS_STORAGE) += g_mass_storage.o
obj-$(CONFIG_USB_G_SERIAL) += g_serial.o
obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
+obj-$(CONFIG_USB_G_MULTI) += g_multi.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 66450a1abc2..043e04db2a0 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -892,7 +892,7 @@ static void pullup(struct at91_udc *udc, int is_on)
txvc |= AT91_UDP_TXVC_PUON;
at91_udp_write(udc, AT91_UDP_TXVC, txvc);
- } else if (cpu_is_at91sam9261()) {
+ } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
u32 usbpucr;
usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR);
@@ -910,7 +910,7 @@ static void pullup(struct at91_udc *udc, int is_on)
txvc &= ~AT91_UDP_TXVC_PUON;
at91_udp_write(udc, AT91_UDP_TXVC, txvc);
- } else if (cpu_is_at91sam9261()) {
+ } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
u32 usbpucr;
usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR);
@@ -1692,7 +1692,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
udc->ep[3].maxpacket = 64;
udc->ep[4].maxpacket = 512;
udc->ep[5].maxpacket = 512;
- } else if (cpu_is_at91sam9261()) {
+ } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
udc->ep[3].maxpacket = 64;
} else if (cpu_is_at91sam9263()) {
udc->ep[0].maxpacket = 64;
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index a3a0f4a27ef..58f22032384 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -89,120 +89,6 @@ static const struct usb_descriptor_header *otg_desc[] = {
/*-------------------------------------------------------------------------*/
-/**
- * Handle USB audio endpoint set/get command in setup class request
- */
-
-static int audio_set_endpoint_req(struct usb_configuration *c,
- const struct usb_ctrlrequest *ctrl)
-{
- struct usb_composite_dev *cdev = c->cdev;
- int value = -EOPNOTSUPP;
- u16 ep = le16_to_cpu(ctrl->wIndex);
- u16 len = le16_to_cpu(ctrl->wLength);
- u16 w_value = le16_to_cpu(ctrl->wValue);
-
- DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
- ctrl->bRequest, w_value, len, ep);
-
- switch (ctrl->bRequest) {
- case UAC_SET_CUR:
- value = 0;
- break;
-
- case UAC_SET_MIN:
- break;
-
- case UAC_SET_MAX:
- break;
-
- case UAC_SET_RES:
- break;
-
- case UAC_SET_MEM:
- break;
-
- default:
- break;
- }
-
- return value;
-}
-
-static int audio_get_endpoint_req(struct usb_configuration *c,
- const struct usb_ctrlrequest *ctrl)
-{
- struct usb_composite_dev *cdev = c->cdev;
- int value = -EOPNOTSUPP;
- u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
- u16 len = le16_to_cpu(ctrl->wLength);
- u16 w_value = le16_to_cpu(ctrl->wValue);
-
- DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
- ctrl->bRequest, w_value, len, ep);
-
- switch (ctrl->bRequest) {
- case UAC_GET_CUR:
- case UAC_GET_MIN:
- case UAC_GET_MAX:
- case UAC_GET_RES:
- value = 3;
- break;
- case UAC_GET_MEM:
- break;
- default:
- break;
- }
-
- return value;
-}
-
-static int
-audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
-{
- struct usb_composite_dev *cdev = c->cdev;
- struct usb_request *req = cdev->req;
- int value = -EOPNOTSUPP;
- u16 w_index = le16_to_cpu(ctrl->wIndex);
- u16 w_value = le16_to_cpu(ctrl->wValue);
- u16 w_length = le16_to_cpu(ctrl->wLength);
-
- /* composite driver infrastructure handles everything except
- * Audio class messages; interface activation uses set_alt().
- */
- switch (ctrl->bRequestType) {
- case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
- value = audio_set_endpoint_req(c, ctrl);
- break;
-
- case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
- value = audio_get_endpoint_req(c, ctrl);
- break;
-
- default:
- ERROR(cdev, "Invalid control req%02x.%02x v%04x i%04x l%d\n",
- ctrl->bRequestType, ctrl->bRequest,
- w_value, w_index, w_length);
- }
-
- /* respond with data transfer or status phase? */
- if (value >= 0) {
- DBG(cdev, "Audio req%02x.%02x v%04x i%04x l%d\n",
- ctrl->bRequestType, ctrl->bRequest,
- w_value, w_index, w_length);
- req->zero = 0;
- req->length = value;
- value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
- if (value < 0)
- ERROR(cdev, "Audio response on err %d\n", value);
- }
-
- /* device either stalls (value < 0) or reports success */
- return value;
-}
-
-/*-------------------------------------------------------------------------*/
-
static int __init audio_do_config(struct usb_configuration *c)
{
/* FIXME alloc iConfiguration string, set it in c->strings */
@@ -220,7 +106,6 @@ static int __init audio_do_config(struct usb_configuration *c)
static struct usb_configuration audio_config_driver = {
.label = DRIVER_DESC,
.bind = audio_do_config,
- .setup = audio_setup,
.bConfigurationValue = 1,
/* .iConfiguration = DYNAMIC */
.bmAttributes = USB_CONFIG_ATT_SELFPOWER,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d05397ec8a1..09289bb1e20 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -373,6 +373,8 @@ static void reset_config(struct usb_composite_dev *cdev)
list_for_each_entry(f, &cdev->config->functions, list) {
if (f->disable)
f->disable(f);
+
+ bitmap_zero(f->endpoints, 32);
}
cdev->config = NULL;
}
@@ -418,10 +420,35 @@ static int set_config(struct usb_composite_dev *cdev,
/* Initialize all interfaces by setting them to altsetting zero. */
for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
struct usb_function *f = c->interface[tmp];
+ struct usb_descriptor_header **descriptors;
if (!f)
break;
+ /*
+ * Record which endpoints are used by the function. This is used
+ * to dispatch control requests targeted at that endpoint to the
+ * function's setup callback instead of the current
+ * configuration's setup callback.
+ */
+ if (gadget->speed == USB_SPEED_HIGH)
+ descriptors = f->hs_descriptors;
+ else
+ descriptors = f->descriptors;
+
+ for (; *descriptors; ++descriptors) {
+ struct usb_endpoint_descriptor *ep;
+ int addr;
+
+ if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
+ continue;
+
+ ep = (struct usb_endpoint_descriptor *)*descriptors;
+ addr = ((ep->bEndpointAddress & 0x80) >> 3)
+ | (ep->bEndpointAddress & 0x0f);
+ set_bit(addr, f->endpoints);
+ }
+
result = f->set_alt(f, tmp, 0);
if (result < 0) {
DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
@@ -688,6 +715,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
struct usb_function *f = NULL;
+ u8 endp;
/* partial re-init of the response message; the function or the
* gadget might need to intercept e.g. a control-OUT completion
@@ -800,23 +828,33 @@ unknown:
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
- /* functions always handle their interfaces ... punt other
- * recipients (endpoint, other, WUSB, ...) to the current
+ /* functions always handle their interfaces and endpoints...
+ * punt other recipients (other, WUSB, ...) to the current
* configuration code.
*
* REVISIT it could make sense to let the composite device
* take such requests too, if that's ever needed: to work
* in config 0, etc.
*/
- if ((ctrl->bRequestType & USB_RECIP_MASK)
- == USB_RECIP_INTERFACE) {
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_INTERFACE:
f = cdev->config->interface[intf];
- if (f && f->setup)
- value = f->setup(f, ctrl);
- else
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
+ list_for_each_entry(f, &cdev->config->functions, list) {
+ if (test_bit(endp, f->endpoints))
+ break;
+ }
+ if (&f->list == &cdev->config->functions)
f = NULL;
+ break;
}
- if (value < 0 && !f) {
+
+ if (f && f->setup)
+ value = f->setup(f, ctrl);
+ else {
struct usb_configuration *c;
c = cdev->config;
@@ -1054,7 +1092,8 @@ static struct usb_gadget_driver composite_driver = {
.speed = USB_SPEED_HIGH,
.bind = composite_bind,
- .unbind = __exit_p(composite_unbind),
+ /* .unbind = __exit_p(composite_unbind), */
+ .unbind = composite_unbind,
.setup = composite_setup,
.disconnect = composite_disconnect,
@@ -1103,7 +1142,7 @@ int __init usb_composite_register(struct usb_composite_driver *driver)
* This function is used to unregister drivers using the composite
* driver framework.
*/
-void __exit usb_composite_unregister(struct usb_composite_driver *driver)
+void /* __exit */ usb_composite_unregister(struct usb_composite_driver *driver)
{
if (composite != driver)
return;
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 167cb2a8ece..141372b6e7a 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -25,6 +25,14 @@
#include <linux/kernel.h>
#include <linux/utsname.h>
+
+#if defined USB_ETH_RNDIS
+# undef USB_ETH_RNDIS
+#endif
+#ifdef CONFIG_USB_ETH_RNDIS
+# define USB_ETH_RNDIS y
+#endif
+
#include "u_ether.h"
@@ -66,7 +74,7 @@
#define DRIVER_DESC "Ethernet Gadget"
#define DRIVER_VERSION "Memorial Day 2008"
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
#define PREFIX "RNDIS/"
#else
#define PREFIX ""
@@ -87,7 +95,7 @@
static inline bool has_rndis(void)
{
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
return true;
#else
return false;
@@ -110,7 +118,7 @@ static inline bool has_rndis(void)
#include "f_ecm.c"
#include "f_subset.c"
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
#include "f_rndis.c"
#include "rndis.c"
#endif
@@ -251,7 +259,7 @@ static struct usb_configuration rndis_config_driver = {
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_USB_ETH_EEM
+#ifdef USB_ETH_EEM
static int use_eem = 1;
#else
static int use_eem;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 4e3657808b0..d10353d46b8 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -4,6 +4,8 @@
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 2009 by Samsung Electronics
+ * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
*
* This software is distributed under the terms of the GNU General
* Public License ("GPL") as published by the Free Software Foundation,
@@ -99,6 +101,20 @@ static inline struct f_acm *port_to_acm(struct gserial *p)
/* interface and class descriptors: */
+static struct usb_interface_assoc_descriptor
+acm_iad_descriptor = {
+ .bLength = sizeof acm_iad_descriptor,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ /* .bFirstInterface = DYNAMIC, */
+ .bInterfaceCount = 2, // control + data
+ .bFunctionClass = USB_CLASS_COMM,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
+ .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ /* .iFunction = DYNAMIC */
+};
+
+
static struct usb_interface_descriptor acm_control_interface_desc __initdata = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
@@ -178,6 +194,7 @@ static struct usb_endpoint_descriptor acm_fs_out_desc __initdata = {
};
static struct usb_descriptor_header *acm_fs_function[] __initdata = {
+ (struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
@@ -216,6 +233,7 @@ static struct usb_endpoint_descriptor acm_hs_out_desc __initdata = {
};
static struct usb_descriptor_header *acm_hs_function[] __initdata = {
+ (struct usb_descriptor_header *) &acm_iad_descriptor,
(struct usb_descriptor_header *) &acm_control_interface_desc,
(struct usb_descriptor_header *) &acm_header_desc,
(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
@@ -232,11 +250,13 @@ static struct usb_descriptor_header *acm_hs_function[] __initdata = {
#define ACM_CTRL_IDX 0
#define ACM_DATA_IDX 1
+#define ACM_IAD_IDX 2
/* static strings, in UTF-8 */
static struct usb_string acm_string_defs[] = {
[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
[ACM_DATA_IDX].s = "CDC ACM Data",
+ [ACM_IAD_IDX ].s = "CDC Serial",
{ /* ZEROES END LIST */ },
};
@@ -563,6 +583,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
if (status < 0)
goto fail;
acm->ctrl_id = status;
+ acm_iad_descriptor.bFirstInterface = status;
acm_control_interface_desc.bInterfaceNumber = status;
acm_union_desc .bMasterInterface0 = status;
@@ -732,6 +753,13 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
acm_string_defs[ACM_DATA_IDX].id = status;
acm_data_interface_desc.iInterface = status;
+
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ acm_string_defs[ACM_IAD_IDX].id = status;
+
+ acm_iad_descriptor.iFunction = status;
}
/* allocate and initialize one new instance */
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 98e9bb97729..c43c89ffa2c 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -445,6 +445,70 @@ static int audio_get_intf_req(struct usb_function *f,
return len;
}
+static int audio_set_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u16 ep = le16_to_cpu(ctrl->wIndex);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_SET_CUR:
+ value = 0;
+ break;
+
+ case UAC_SET_MIN:
+ break;
+
+ case UAC_SET_MAX:
+ break;
+
+ case UAC_SET_RES:
+ break;
+
+ case UAC_SET_MEM:
+ break;
+
+ default:
+ break;
+ }
+
+ return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int value = -EOPNOTSUPP;
+ u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+ u16 len = le16_to_cpu(ctrl->wLength);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+
+ DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+ ctrl->bRequest, w_value, len, ep);
+
+ switch (ctrl->bRequest) {
+ case UAC_GET_CUR:
+ case UAC_GET_MIN:
+ case UAC_GET_MAX:
+ case UAC_GET_RES:
+ value = 3;
+ break;
+ case UAC_GET_MEM:
+ break;
+ default:
+ break;
+ }
+
+ return value;
+}
+
static int
f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
@@ -455,8 +519,8 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
- /* composite driver infrastructure handles everything except
- * Audio class messages; interface activation uses set_alt().
+ /* composite driver infrastructure handles everything; interface
+ * activation uses set_alt().
*/
switch (ctrl->bRequestType) {
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
@@ -467,6 +531,14 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
value = audio_get_intf_req(f, ctrl);
break;
+ case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_set_endpoint_req(f, ctrl);
+ break;
+
+ case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+ value = audio_get_endpoint_req(f, ctrl);
+ break;
+
default:
ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
new file mode 100644
index 00000000000..a37640eba43
--- /dev/null
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -0,0 +1,3091 @@
+/*
+ * f_mass_storage.c -- Mass Storage USB Composite Function
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+ * The Mass Storage Function acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive or as a CD-ROM drive. In
+ * addition to providing an example of a genuinely useful composite
+ * function for a USB device, it also illustrates a technique of
+ * double-buffering for increased throughput.
+ *
+ * Function supports multiple logical units (LUNs). Backing storage
+ * for each LUN is provided by a regular file or a block device.
+ * Access for each LUN can be limited to read-only. Moreover, the
+ * function can indicate that LUN is removable and/or CD-ROM. (The
+ * later implies read-only access.)
+ *
+ * MSF is configured by specifying a fsg_config structure. It has the
+ * following fields:
+ *
+ * nluns Number of LUNs function have (anywhere from 1
+ * to FSG_MAX_LUNS which is 8).
+ * luns An array of LUN configuration values. This
+ * should be filled for each LUN that
+ * function will include (ie. for "nluns"
+ * LUNs). Each element of the array has
+ * the following fields:
+ * ->filename The path to the backing file for the LUN.
+ * Required if LUN is not marked as
+ * removable.
+ * ->ro Flag specifying access to the LUN shall be
+ * read-only. This is implied if CD-ROM
+ * emulation is enabled as well as when
+ * it was impossible to open "filename"
+ * in R/W mode.
+ * ->removable Flag specifying that LUN shall be indicated as
+ * being removable.
+ * ->cdrom Flag specifying that LUN shall be reported as
+ * being a CD-ROM.
+ *
+ * lun_name_format A printf-like format for names of the LUN
+ * devices. This determines how the
+ * directory in sysfs will be named.
+ * Unless you are using several MSFs in
+ * a single gadget (as opposed to single
+ * MSF in many configurations) you may
+ * leave it as NULL (in which case
+ * "lun%d" will be used). In the format
+ * you can use "%d" to index LUNs for
+ * MSF's with more than one LUN. (Beware
+ * that there is only one integer given
+ * as an argument for the format and
+ * specifying invalid format may cause
+ * unspecified behaviour.)
+ * thread_name Name of the kernel thread process used by the
+ * MSF. You can safely set it to NULL
+ * (in which case default "file-storage"
+ * will be used).
+ *
+ * vendor_name
+ * product_name
+ * release Information used as a reply to INQUIRY
+ * request. To use default set to NULL,
+ * NULL, 0xffff respectively. The first
+ * field should be 8 and the second 16
+ * characters or less.
+ *
+ * can_stall Set to permit function to halt bulk endpoints.
+ * Disabled on some USB devices known not
+ * to work correctly. You should set it
+ * to true.
+ *
+ * If "removable" is not set for a LUN then a backing file must be
+ * specified. If it is set, then NULL filename means the LUN's medium
+ * is not loaded (an empty string as "filename" in the fsg_config
+ * structure causes error). The CD-ROM emulation includes a single
+ * data track and no audio tracks; hence there need be only one
+ * backing file per LUN. Note also that the CD-ROM block length is
+ * set to 512 rather than the more common value 2048.
+ *
+ *
+ * MSF includes support for module parameters. If gadget using it
+ * decides to use it, the following module parameters will be
+ * available:
+ *
+ * file=filename[,filename...]
+ * Names of the files or block devices used for
+ * backing storage.
+ * ro=b[,b...] Default false, boolean for read-only access.
+ * removable=b[,b...]
+ * Default true, boolean for removable media.
+ * cdrom=b[,b...] Default false, boolean for whether to emulate
+ * a CD-ROM drive.
+ * luns=N Default N = number of filenames, number of
+ * LUNs to support.
+ * stall Default determined according to the type of
+ * USB device controller (usually true),
+ * boolean to permit the driver to halt
+ * bulk endpoints.
+ *
+ * The module parameters may be prefixed with some string. You need
+ * to consult gadget's documentation or source to verify whether it is
+ * using those module parameters and if it does what are the prefixes
+ * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
+ * the prefix).
+ *
+ *
+ * Requirements are modest; only a bulk-in and a bulk-out endpoint are
+ * needed. The memory requirement amounts to two 16K buffers, size
+ * configurable by a parameter. Support is included for both
+ * full-speed and high-speed operation.
+ *
+ * Note that the driver is slightly non-portable in that it assumes a
+ * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
+ * interrupt-in endpoints. With most device controllers this isn't an
+ * issue, but there may be some with hardware restrictions that prevent
+ * a buffer from being used by more than one endpoint.
+ *
+ *
+ * The pathnames of the backing files and the ro settings are
+ * available in the attribute files "file" and "ro" in the lun<n> (or
+ * to be more precise in a directory which name comes from
+ * "lun_name_format" option!) subdirectory of the gadget's sysfs
+ * directory. If the "removable" option is set, writing to these
+ * files will simulate ejecting/loading the medium (writing an empty
+ * line means eject) and adjusting a write-enable tab. Changes to the
+ * ro setting are not allowed when the medium is loaded or if CD-ROM
+ * emulation is being used.
+ *
+ *
+ * This function is heavily based on "File-backed Storage Gadget" by
+ * Alan Stern which in turn is heavily based on "Gadget Zero" by David
+ * Brownell. The driver's SCSI command interface was based on the
+ * "Information technology - Small Computer System Interface - 2"
+ * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
+ * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
+ * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
+ * was based on the "Universal Serial Bus Mass Storage Class UFI
+ * Command Specification" document, Revision 1.0, December 14, 1998,
+ * available at
+ * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
+ */
+
+
+/*
+ * Driver Design
+ *
+ * The MSF is fairly straightforward. There is a main kernel
+ * thread that handles most of the work. Interrupt routines field
+ * callbacks from the controller driver: bulk- and interrupt-request
+ * completion notifications, endpoint-0 events, and disconnect events.
+ * Completion events are passed to the main thread by wakeup calls. Many
+ * ep0 requests are handled at interrupt time, but SetInterface,
+ * SetConfiguration, and device reset requests are forwarded to the
+ * thread in the form of "exceptions" using SIGUSR1 signals (since they
+ * should interrupt any ongoing file I/O operations).
+ *
+ * The thread's main routine implements the standard command/data/status
+ * parts of a SCSI interaction. It and its subroutines are full of tests
+ * for pending signals/exceptions -- all this polling is necessary since
+ * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
+ * indication that the driver really wants to be running in userspace.)
+ * An important point is that so long as the thread is alive it keeps an
+ * open reference to the backing file. This will prevent unmounting
+ * the backing file's underlying filesystem and could cause problems
+ * during system shutdown, for example. To prevent such problems, the
+ * thread catches INT, TERM, and KILL signals and converts them into
+ * an EXIT exception.
+ *
+ * In normal operation the main thread is started during the gadget's
+ * fsg_bind() callback and stopped during fsg_unbind(). But it can
+ * also exit when it receives a signal, and there's no point leaving
+ * the gadget running when the thread is dead. At of this moment, MSF
+ * provides no way to deregister the gadget when thread dies -- maybe
+ * a callback functions is needed.
+ *
+ * To provide maximum throughput, the driver uses a circular pipeline of
+ * buffer heads (struct fsg_buffhd). In principle the pipeline can be
+ * arbitrarily long; in practice the benefits don't justify having more
+ * than 2 stages (i.e., double buffering). But it helps to think of the
+ * pipeline as being a long one. Each buffer head contains a bulk-in and
+ * a bulk-out request pointer (since the buffer can be used for both
+ * output and input -- directions always are given from the host's
+ * point of view) as well as a pointer to the buffer and various state
+ * variables.
+ *
+ * Use of the pipeline follows a simple protocol. There is a variable
+ * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
+ * At any time that buffer head may still be in use from an earlier
+ * request, so each buffer head has a state variable indicating whether
+ * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
+ * buffer head to be EMPTY, filling the buffer either by file I/O or by
+ * USB I/O (during which the buffer head is BUSY), and marking the buffer
+ * head FULL when the I/O is complete. Then the buffer will be emptied
+ * (again possibly by USB I/O, during which it is marked BUSY) and
+ * finally marked EMPTY again (possibly by a completion routine).
+ *
+ * A module parameter tells the driver to avoid stalling the bulk
+ * endpoints wherever the transport specification allows. This is
+ * necessary for some UDCs like the SuperH, which cannot reliably clear a
+ * halt on a bulk endpoint. However, under certain circumstances the
+ * Bulk-only specification requires a stall. In such cases the driver
+ * will halt the endpoint and set a flag indicating that it should clear
+ * the halt in software during the next device reset. Hopefully this
+ * will permit everything to work correctly. Furthermore, although the
+ * specification allows the bulk-out endpoint to halt when the host sends
+ * too much data, implementing this would cause an unavoidable race.
+ * The driver will always use the "no-stall" approach for OUT transfers.
+ *
+ * One subtle point concerns sending status-stage responses for ep0
+ * requests. Some of these requests, such as device reset, can involve
+ * interrupting an ongoing file I/O operation, which might take an
+ * arbitrarily long time. During that delay the host might give up on
+ * the original ep0 request and issue a new one. When that happens the
+ * driver should not notify the host about completion of the original
+ * request, as the host will no longer be waiting for it. So the driver
+ * assigns to each ep0 request a unique tag, and it keeps track of the
+ * tag value of the request associated with a long-running exception
+ * (device-reset, interface-change, or configuration-change). When the
+ * exception handler is finished, the status-stage response is submitted
+ * only if the current ep0 request tag is equal to the exception request
+ * tag. Thus only the most recently received ep0 request will get a
+ * status-stage response.
+ *
+ * Warning: This driver source file is too long. It ought to be split up
+ * into a header file plus about 3 separate .c files, to handle the details
+ * of the Gadget, USB Mass Storage, and SCSI protocols.
+ */
+
+
+/* #define VERBOSE_DEBUG */
+/* #define DUMP_MSGS */
+
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/freezer.h>
+#include <linux/utsname.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+
+
+/*------------------------------------------------------------------------*/
+
+#define FSG_DRIVER_DESC "Mass Storage Function"
+#define FSG_DRIVER_VERSION "2009/09/11"
+
+static const char fsg_string_interface[] = "Mass Storage";
+
+
+#define FSG_NO_INTR_EP 1
+#define FSG_BUFFHD_STATIC_BUFFER 1
+#define FSG_NO_DEVICE_STRINGS 1
+#define FSG_NO_OTG 1
+#define FSG_NO_INTR_EP 1
+
+#include "storage_common.c"
+
+
+/*-------------------------------------------------------------------------*/
+
+struct fsg_dev;
+
+
+/* Data shared by all the FSG instances. */
+struct fsg_common {
+ struct usb_gadget *gadget;
+ struct fsg_dev *fsg;
+ struct fsg_dev *prev_fsg;
+
+ /* filesem protects: backing files in use */
+ struct rw_semaphore filesem;
+
+ /* lock protects: state, all the req_busy's */
+ spinlock_t lock;
+
+ struct usb_ep *ep0; /* Copy of gadget->ep0 */
+ struct usb_request *ep0req; /* Copy of cdev->req */
+ unsigned int ep0_req_tag;
+ const char *ep0req_name;
+
+ struct fsg_buffhd *next_buffhd_to_fill;
+ struct fsg_buffhd *next_buffhd_to_drain;
+ struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
+
+ int cmnd_size;
+ u8 cmnd[MAX_COMMAND_SIZE];
+
+ unsigned int nluns;
+ unsigned int lun;
+ struct fsg_lun *luns;
+ struct fsg_lun *curlun;
+
+ unsigned int bulk_out_maxpacket;
+ enum fsg_state state; /* For exception handling */
+ unsigned int exception_req_tag;
+
+ u8 config, new_config;
+ enum data_direction data_dir;
+ u32 data_size;
+ u32 data_size_from_cmnd;
+ u32 tag;
+ u32 residue;
+ u32 usb_amount_left;
+
+ unsigned int can_stall:1;
+ unsigned int free_storage_on_release:1;
+ unsigned int phase_error:1;
+ unsigned int short_packet_received:1;
+ unsigned int bad_lun_okay:1;
+ unsigned int running:1;
+
+ int thread_wakeup_needed;
+ struct completion thread_notifier;
+ struct task_struct *thread_task;
+
+ /* Callback function to call when thread exits. */
+ void (*thread_exits)(struct fsg_common *common);
+ /* Gadget's private data. */
+ void *private_data;
+
+ /* Vendor (8 chars), product (16 chars), release (4
+ * hexadecimal digits) and NUL byte */
+ char inquiry_string[8 + 16 + 4 + 1];
+
+ struct kref ref;
+};
+
+
+struct fsg_config {
+ unsigned nluns;
+ struct fsg_lun_config {
+ const char *filename;
+ char ro;
+ char removable;
+ char cdrom;
+ } luns[FSG_MAX_LUNS];
+
+ const char *lun_name_format;
+ const char *thread_name;
+
+ /* Callback function to call when thread exits. */
+ void (*thread_exits)(struct fsg_common *common);
+ /* Gadget's private data. */
+ void *private_data;
+
+ const char *vendor_name; /* 8 characters or less */
+ const char *product_name; /* 16 characters or less */
+ u16 release;
+
+ char can_stall;
+};
+
+
+struct fsg_dev {
+ struct usb_function function;
+ struct usb_gadget *gadget; /* Copy of cdev->gadget */
+ struct fsg_common *common;
+
+ u16 interface_number;
+
+ unsigned int bulk_in_enabled:1;
+ unsigned int bulk_out_enabled:1;
+
+ unsigned long atomic_bitflags;
+#define IGNORE_BULK_OUT 0
+
+ struct usb_ep *bulk_in;
+ struct usb_ep *bulk_out;
+};
+
+
+static inline int __fsg_is_set(struct fsg_common *common,
+ const char *func, unsigned line)
+{
+ if (common->fsg)
+ return 1;
+ ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
+ return 0;
+}
+
+#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
+
+
+static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
+{
+ return container_of(f, struct fsg_dev, function);
+}
+
+
+typedef void (*fsg_routine_t)(struct fsg_dev *);
+
+static int exception_in_progress(struct fsg_common *common)
+{
+ return common->state > FSG_STATE_IDLE;
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void set_bulk_out_req_length(struct fsg_common *common,
+ struct fsg_buffhd *bh, unsigned int length)
+{
+ unsigned int rem;
+
+ bh->bulk_out_intended_length = length;
+ rem = length % common->bulk_out_maxpacket;
+ if (rem > 0)
+ length += common->bulk_out_maxpacket - rem;
+ bh->outreq->length = length;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+{
+ const char *name;
+
+ if (ep == fsg->bulk_in)
+ name = "bulk-in";
+ else if (ep == fsg->bulk_out)
+ name = "bulk-out";
+ else
+ name = ep->name;
+ DBG(fsg, "%s set halt\n", name);
+ return usb_ep_set_halt(ep);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+/* Caller must hold fsg->lock */
+static void wakeup_thread(struct fsg_common *common)
+{
+ /* Tell the main thread that something has happened */
+ common->thread_wakeup_needed = 1;
+ if (common->thread_task)
+ wake_up_process(common->thread_task);
+}
+
+
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+ unsigned long flags;
+
+ /* Do nothing if a higher-priority exception is already in progress.
+ * If a lower-or-equal priority exception is in progress, preempt it
+ * and notify the main thread by sending it a signal. */
+ spin_lock_irqsave(&common->lock, flags);
+ if (common->state <= new_state) {
+ common->exception_req_tag = common->ep0_req_tag;
+ common->state = new_state;
+ if (common->thread_task)
+ send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+ common->thread_task);
+ }
+ spin_unlock_irqrestore(&common->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int ep0_queue(struct fsg_common *common)
+{
+ int rc;
+
+ rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
+ common->ep0->driver_data = common;
+ if (rc != 0 && rc != -ESHUTDOWN) {
+ /* We can't do much more than wait for a reset */
+ WARNING(common, "error in submission: %s --> %d\n",
+ common->ep0->name, rc);
+ }
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Bulk and interrupt endpoint completion handlers.
+ * These always run in_irq. */
+
+static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct fsg_common *common = ep->driver_data;
+ struct fsg_buffhd *bh = req->context;
+
+ if (req->status || req->actual != req->length)
+ DBG(common, "%s --> %d, %u/%u\n", __func__,
+ req->status, req->actual, req->length);
+ if (req->status == -ECONNRESET) /* Request was cancelled */
+ usb_ep_fifo_flush(ep);
+
+ /* Hold the lock while we update the request and buffer states */
+ smp_wmb();
+ spin_lock(&common->lock);
+ bh->inreq_busy = 0;
+ bh->state = BUF_STATE_EMPTY;
+ wakeup_thread(common);
+ spin_unlock(&common->lock);
+}
+
+static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct fsg_common *common = ep->driver_data;
+ struct fsg_buffhd *bh = req->context;
+
+ dump_msg(common, "bulk-out", req->buf, req->actual);
+ if (req->status || req->actual != bh->bulk_out_intended_length)
+ DBG(common, "%s --> %d, %u/%u\n", __func__,
+ req->status, req->actual,
+ bh->bulk_out_intended_length);
+ if (req->status == -ECONNRESET) /* Request was cancelled */
+ usb_ep_fifo_flush(ep);
+
+ /* Hold the lock while we update the request and buffer states */
+ smp_wmb();
+ spin_lock(&common->lock);
+ bh->outreq_busy = 0;
+ bh->state = BUF_STATE_FULL;
+ wakeup_thread(common);
+ spin_unlock(&common->lock);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Ep0 class-specific handlers. These always run in_irq. */
+
+static int fsg_setup(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct fsg_dev *fsg = fsg_from_func(f);
+ struct usb_request *req = fsg->common->ep0req;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ if (!fsg->common->config)
+ return -EOPNOTSUPP;
+
+ switch (ctrl->bRequest) {
+
+ case USB_BULK_RESET_REQUEST:
+ if (ctrl->bRequestType !=
+ (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+ break;
+ if (w_index != fsg->interface_number || w_value != 0)
+ return -EDOM;
+
+ /* Raise an exception to stop the current operation
+ * and reinitialize our state. */
+ DBG(fsg, "bulk reset request\n");
+ raise_exception(fsg->common, FSG_STATE_RESET);
+ return DELAYED_STATUS;
+
+ case USB_BULK_GET_MAX_LUN_REQUEST:
+ if (ctrl->bRequestType !=
+ (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+ break;
+ if (w_index != fsg->interface_number || w_value != 0)
+ return -EDOM;
+ VDBG(fsg, "get max LUN\n");
+ *(u8 *) req->buf = fsg->common->nluns - 1;
+ return 1;
+ }
+
+ VDBG(fsg,
+ "unknown class-specific control req "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ le16_to_cpu(ctrl->wValue), w_index, w_length);
+ return -EOPNOTSUPP;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+ struct usb_request *req, int *pbusy,
+ enum fsg_buffer_state *state)
+{
+ int rc;
+
+ if (ep == fsg->bulk_in)
+ dump_msg(fsg, "bulk-in", req->buf, req->length);
+
+ spin_lock_irq(&fsg->common->lock);
+ *pbusy = 1;
+ *state = BUF_STATE_BUSY;
+ spin_unlock_irq(&fsg->common->lock);
+ rc = usb_ep_queue(ep, req, GFP_KERNEL);
+ if (rc != 0) {
+ *pbusy = 0;
+ *state = BUF_STATE_EMPTY;
+
+ /* We can't do much more than wait for a reset */
+
+ /* Note: currently the net2280 driver fails zero-length
+ * submissions if DMA is enabled. */
+ if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
+ req->length == 0))
+ WARNING(fsg, "error in submission: %s --> %d\n",
+ ep->name, rc);
+ }
+}
+
+#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
+ if (fsg_is_set(common)) \
+ start_transfer((common)->fsg, (common)->fsg->ep_name, \
+ req, pbusy, state); \
+ else
+
+#define START_TRANSFER(common, ep_name, req, pbusy, state) \
+ START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
+
+
+
+static int sleep_thread(struct fsg_common *common)
+{
+ int rc = 0;
+
+ /* Wait until a signal arrives or we are woken up */
+ for (;;) {
+ try_to_freeze();
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ if (common->thread_wakeup_needed)
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ common->thread_wakeup_needed = 0;
+ return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_common *common)
+{
+ struct fsg_lun *curlun = common->curlun;
+ u32 lba;
+ struct fsg_buffhd *bh;
+ int rc;
+ u32 amount_left;
+ loff_t file_offset, file_offset_tmp;
+ unsigned int amount;
+ unsigned int partial_page;
+ ssize_t nread;
+
+ /* Get the starting Logical Block Address and check that it's
+ * not too big */
+ if (common->cmnd[0] == SC_READ_6)
+ lba = get_unaligned_be24(&common->cmnd[1]);
+ else {
+ lba = get_unaligned_be32(&common->cmnd[2]);
+
+ /* We allow DPO (Disable Page Out = don't save data in the
+ * cache) and FUA (Force Unit Access = don't read from the
+ * cache), but we don't implement them. */
+ if ((common->cmnd[1] & ~0x18) != 0) {
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+ }
+ if (lba >= curlun->num_sectors) {
+ curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ file_offset = ((loff_t) lba) << 9;
+
+ /* Carry out the file reads */
+ amount_left = common->data_size_from_cmnd;
+ if (unlikely(amount_left == 0))
+ return -EIO; /* No default reply */
+
+ for (;;) {
+
+ /* Figure out how much we need to read:
+ * Try to read the remaining amount.
+ * But don't read more than the buffer size.
+ * And don't try to read past the end of the file.
+ * Finally, if we're not at a page boundary, don't read past
+ * the next page.
+ * If this means reading 0 then we were asked to read past
+ * the end of file. */
+ amount = min(amount_left, FSG_BUFLEN);
+ amount = min((loff_t) amount,
+ curlun->file_length - file_offset);
+ partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
+ if (partial_page > 0)
+ amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
+ partial_page);
+
+ /* Wait for the next buffer to become available */
+ bh = common->next_buffhd_to_fill;
+ while (bh->state != BUF_STATE_EMPTY) {
+ rc = sleep_thread(common);
+ if (rc)
+ return rc;
+ }
+
+ /* If we were asked to read past the end of file,
+ * end with an empty buffer. */
+ if (amount == 0) {
+ curlun->sense_data =
+ SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+ curlun->sense_data_info = file_offset >> 9;
+ curlun->info_valid = 1;
+ bh->inreq->length = 0;
+ bh->state = BUF_STATE_FULL;
+ break;
+ }
+
+ /* Perform the read */
+ file_offset_tmp = file_offset;
+ nread = vfs_read(curlun->filp,
+ (char __user *) bh->buf,
+ amount, &file_offset_tmp);
+ VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+ (unsigned long long) file_offset,
+ (int) nread);
+ if (signal_pending(current))
+ return -EINTR;
+
+ if (nread < 0) {
+ LDBG(curlun, "error in file read: %d\n",
+ (int) nread);
+ nread = 0;
+ } else if (nread < amount) {
+ LDBG(curlun, "partial file read: %d/%u\n",
+ (int) nread, amount);
+ nread -= (nread & 511); /* Round down to a block */
+ }
+ file_offset += nread;
+ amount_left -= nread;
+ common->residue -= nread;
+ bh->inreq->length = nread;
+ bh->state = BUF_STATE_FULL;
+
+ /* If an error occurred, report it and its position */
+ if (nread < amount) {
+ curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+ curlun->sense_data_info = file_offset >> 9;
+ curlun->info_valid = 1;
+ break;
+ }
+
+ if (amount_left == 0)
+ break; /* No more left to read */
+
+ /* Send this buffer and go read some more */
+ bh->inreq->zero = 0;
+ START_TRANSFER_OR(common, bulk_in, bh->inreq,
+ &bh->inreq_busy, &bh->state)
+ /* Don't know what to do if
+ * common->fsg is NULL */
+ return -EIO;
+ common->next_buffhd_to_fill = bh->next;
+ }
+
+ return -EIO; /* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_common *common)
+{
+ struct fsg_lun *curlun = common->curlun;
+ u32 lba;
+ struct fsg_buffhd *bh;
+ int get_some_more;
+ u32 amount_left_to_req, amount_left_to_write;
+ loff_t usb_offset, file_offset, file_offset_tmp;
+ unsigned int amount;
+ unsigned int partial_page;
+ ssize_t nwritten;
+ int rc;
+
+ if (curlun->ro) {
+ curlun->sense_data = SS_WRITE_PROTECTED;
+ return -EINVAL;
+ }
+ spin_lock(&curlun->filp->f_lock);
+ curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
+ spin_unlock(&curlun->filp->f_lock);
+
+ /* Get the starting Logical Block Address and check that it's
+ * not too big */
+ if (common->cmnd[0] == SC_WRITE_6)
+ lba = get_unaligned_be24(&common->cmnd[1]);
+ else {
+ lba = get_unaligned_be32(&common->cmnd[2]);
+
+ /* We allow DPO (Disable Page Out = don't save data in the
+ * cache) and FUA (Force Unit Access = write directly to the
+ * medium). We don't implement DPO; we implement FUA by
+ * performing synchronous output. */
+ if (common->cmnd[1] & ~0x18) {
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+ if (common->cmnd[1] & 0x08) { /* FUA */
+ spin_lock(&curlun->filp->f_lock);
+ curlun->filp->f_flags |= O_SYNC;
+ spin_unlock(&curlun->filp->f_lock);
+ }
+ }
+ if (lba >= curlun->num_sectors) {
+ curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ /* Carry out the file writes */
+ get_some_more = 1;
+ file_offset = usb_offset = ((loff_t) lba) << 9;
+ amount_left_to_req = common->data_size_from_cmnd;
+ amount_left_to_write = common->data_size_from_cmnd;
+
+ while (amount_left_to_write > 0) {
+
+ /* Queue a request for more data from the host */
+ bh = common->next_buffhd_to_fill;
+ if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+ /* Figure out how much we want to get:
+ * Try to get the remaining amount.
+ * But don't get more than the buffer size.
+ * And don't try to go past the end of the file.
+ * If we're not at a page boundary,
+ * don't go past the next page.
+ * If this means getting 0, then we were asked
+ * to write past the end of file.
+ * Finally, round down to a block boundary. */
+ amount = min(amount_left_to_req, FSG_BUFLEN);
+ amount = min((loff_t) amount, curlun->file_length -
+ usb_offset);
+ partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
+ if (partial_page > 0)
+ amount = min(amount,
+ (unsigned int) PAGE_CACHE_SIZE - partial_page);
+
+ if (amount == 0) {
+ get_some_more = 0;
+ curlun->sense_data =
+ SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+ curlun->sense_data_info = usb_offset >> 9;
+ curlun->info_valid = 1;
+ continue;
+ }
+ amount -= (amount & 511);
+ if (amount == 0) {
+
+ /* Why were we were asked to transfer a
+ * partial block? */
+ get_some_more = 0;
+ continue;
+ }
+
+ /* Get the next buffer */
+ usb_offset += amount;
+ common->usb_amount_left -= amount;
+ amount_left_to_req -= amount;
+ if (amount_left_to_req == 0)
+ get_some_more = 0;
+
+ /* amount is always divisible by 512, hence by
+ * the bulk-out maxpacket size */
+ bh->outreq->length = amount;
+ bh->bulk_out_intended_length = amount;
+ bh->outreq->short_not_ok = 1;
+ START_TRANSFER_OR(common, bulk_out, bh->outreq,
+ &bh->outreq_busy, &bh->state)
+ /* Don't know what to do if
+ * common->fsg is NULL */
+ return -EIO;
+ common->next_buffhd_to_fill = bh->next;
+ continue;
+ }
+
+ /* Write the received data to the backing file */
+ bh = common->next_buffhd_to_drain;
+ if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+ break; /* We stopped early */
+ if (bh->state == BUF_STATE_FULL) {
+ smp_rmb();
+ common->next_buffhd_to_drain = bh->next;
+ bh->state = BUF_STATE_EMPTY;
+
+ /* Did something go wrong with the transfer? */
+ if (bh->outreq->status != 0) {
+ curlun->sense_data = SS_COMMUNICATION_FAILURE;
+ curlun->sense_data_info = file_offset >> 9;
+ curlun->info_valid = 1;
+ break;
+ }
+
+ amount = bh->outreq->actual;
+ if (curlun->file_length - file_offset < amount) {
+ LERROR(curlun,
+ "write %u @ %llu beyond end %llu\n",
+ amount, (unsigned long long) file_offset,
+ (unsigned long long) curlun->file_length);
+ amount = curlun->file_length - file_offset;
+ }
+
+ /* Perform the write */
+ file_offset_tmp = file_offset;
+ nwritten = vfs_write(curlun->filp,
+ (char __user *) bh->buf,
+ amount, &file_offset_tmp);
+ VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+ (unsigned long long) file_offset,
+ (int) nwritten);
+ if (signal_pending(current))
+ return -EINTR; /* Interrupted! */
+
+ if (nwritten < 0) {
+ LDBG(curlun, "error in file write: %d\n",
+ (int) nwritten);
+ nwritten = 0;
+ } else if (nwritten < amount) {
+ LDBG(curlun, "partial file write: %d/%u\n",
+ (int) nwritten, amount);
+ nwritten -= (nwritten & 511);
+ /* Round down to a block */
+ }
+ file_offset += nwritten;
+ amount_left_to_write -= nwritten;
+ common->residue -= nwritten;
+
+ /* If an error occurred, report it and its position */
+ if (nwritten < amount) {
+ curlun->sense_data = SS_WRITE_ERROR;
+ curlun->sense_data_info = file_offset >> 9;
+ curlun->info_valid = 1;
+ break;
+ }
+
+ /* Did the host decide to stop early? */
+ if (bh->outreq->actual != bh->outreq->length) {
+ common->short_packet_received = 1;
+ break;
+ }
+ continue;
+ }
+
+ /* Wait for something to happen */
+ rc = sleep_thread(common);
+ if (rc)
+ return rc;
+ }
+
+ return -EIO; /* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_synchronize_cache(struct fsg_common *common)
+{
+ struct fsg_lun *curlun = common->curlun;
+ int rc;
+
+ /* We ignore the requested LBA and write out all file's
+ * dirty data buffers. */
+ rc = fsg_lun_fsync_sub(curlun);
+ if (rc)
+ curlun->sense_data = SS_WRITE_ERROR;
+ return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct fsg_lun *curlun)
+{
+ struct file *filp = curlun->filp;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ unsigned long rc;
+
+ rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+ VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_common *common)
+{
+ struct fsg_lun *curlun = common->curlun;
+ u32 lba;
+ u32 verification_length;
+ struct fsg_buffhd *bh = common->next_buffhd_to_fill;
+ loff_t file_offset, file_offset_tmp;
+ u32 amount_left;
+ unsigned int amount;
+ ssize_t nread;
+
+ /* Get the starting Logical Block Address and check that it's
+ * not too big */
+ lba = get_unaligned_be32(&common->cmnd[2]);
+ if (lba >= curlun->num_sectors) {
+ curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ /* We allow DPO (Disable Page Out = don't save data in the
+ * cache) but we don't implement it. */
+ if (common->cmnd[1] & ~0x10) {
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ verification_length = get_unaligned_be16(&common->cmnd[7]);
+ if (unlikely(verification_length == 0))
+ return -EIO; /* No default reply */
+
+ /* Prepare to carry out the file verify */
+ amount_left = verification_length << 9;
+ file_offset = ((loff_t) lba) << 9;
+
+ /* Write out all the dirty buffers before invalidating them */
+ fsg_lun_fsync_sub(curlun);
+ if (signal_pending(current))
+ return -EINTR;
+
+ invalidate_sub(curlun);
+ if (signal_pending(current))
+ return -EINTR;
+
+ /* Just try to read the requested blocks */
+ while (amount_left > 0) {
+
+ /* Figure out how much we need to read:
+ * Try to read the remaining amount, but not more than
+ * the buffer size.
+ * And don't try to read past the end of the file.
+ * If this means reading 0 then we were asked to read
+ * past the end of file. */
+ amount = min(amount_left, FSG_BUFLEN);
+ amount = min((loff_t) amount,
+ curlun->file_length - file_offset);
+ if (amount == 0) {
+ curlun->sense_data =
+ SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+ curlun->sense_data_info = file_offset >> 9;
+ curlun->info_valid = 1;
+ break;
+ }
+
+ /* Perform the read */
+ file_offset_tmp = file_offset;
+ nread = vfs_read(curlun->filp,
+ (char __user *) bh->buf,
+ amount, &file_offset_tmp);
+ VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+ (unsigned long long) file_offset,
+ (int) nread);
+ if (signal_pending(current))
+ return -EINTR;
+
+ if (nread < 0) {
+ LDBG(curlun, "error in file verify: %d\n",
+ (int) nread);
+ nread = 0;
+ } else if (nread < amount) {
+ LDBG(curlun, "partial file verify: %d/%u\n",
+ (int) nread, amount);
+ nread -= (nread & 511); /* Round down to a sector */
+ }
+ if (nread == 0) {
+ curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+ curlun->sense_data_info = file_offset >> 9;
+ curlun->info_valid = 1;
+ break;
+ }
+ file_offset += nread;
+ amount_left -= nread;
+ }
+ return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+ u8 *buf = (u8 *) bh->buf;
+
+ if (!curlun) { /* Unsupported LUNs are okay */
+ common->bad_lun_okay = 1;
+ memset(buf, 0, 36);
+ buf[0] = 0x7f; /* Unsupported, no device-type */
+ buf[4] = 31; /* Additional length */
+ return 36;
+ }
+
+ buf[0] = curlun->cdrom ? TYPE_CDROM : TYPE_DISK;
+ buf[1] = curlun->removable ? 0x80 : 0;
+ buf[2] = 2; /* ANSI SCSI level 2 */
+ buf[3] = 2; /* SCSI-2 INQUIRY data format */
+ buf[4] = 31; /* Additional length */
+ buf[5] = 0; /* No special options */
+ buf[6] = 0;
+ buf[7] = 0;
+ memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
+ return 36;
+}
+
+
+static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+ u8 *buf = (u8 *) bh->buf;
+ u32 sd, sdinfo;
+ int valid;
+
+ /*
+ * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+ *
+ * If a REQUEST SENSE command is received from an initiator
+ * with a pending unit attention condition (before the target
+ * generates the contingent allegiance condition), then the
+ * target shall either:
+ * a) report any pending sense data and preserve the unit
+ * attention condition on the logical unit, or,
+ * b) report the unit attention condition, may discard any
+ * pending sense data, and clear the unit attention
+ * condition on the logical unit for that initiator.
+ *
+ * FSG normally uses option a); enable this code to use option b).
+ */
+#if 0
+ if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+ curlun->sense_data = curlun->unit_attention_data;
+ curlun->unit_attention_data = SS_NO_SENSE;
+ }
+#endif
+
+ if (!curlun) { /* Unsupported LUNs are okay */
+ common->bad_lun_okay = 1;
+ sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+ sdinfo = 0;
+ valid = 0;
+ } else {
+ sd = curlun->sense_data;
+ sdinfo = curlun->sense_data_info;
+ valid = curlun->info_valid << 7;
+ curlun->sense_data = SS_NO_SENSE;
+ curlun->sense_data_info = 0;
+ curlun->info_valid = 0;
+ }
+
+ memset(buf, 0, 18);
+ buf[0] = valid | 0x70; /* Valid, current error */
+ buf[2] = SK(sd);
+ put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
+ buf[7] = 18 - 8; /* Additional sense length */
+ buf[12] = ASC(sd);
+ buf[13] = ASCQ(sd);
+ return 18;
+}
+
+
+static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+ u32 lba = get_unaligned_be32(&common->cmnd[2]);
+ int pmi = common->cmnd[8];
+ u8 *buf = (u8 *) bh->buf;
+
+ /* Check the PMI and LBA fields */
+ if (pmi > 1 || (pmi == 0 && lba != 0)) {
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
+ /* Max logical block */
+ put_unaligned_be32(512, &buf[4]); /* Block length */
+ return 8;
+}
+
+
+static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+ int msf = common->cmnd[1] & 0x02;
+ u32 lba = get_unaligned_be32(&common->cmnd[2]);
+ u8 *buf = (u8 *) bh->buf;
+
+ if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+ if (lba >= curlun->num_sectors) {
+ curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ memset(buf, 0, 8);
+ buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
+ store_cdrom_address(&buf[4], msf, lba);
+ return 8;
+}
+
+
+static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+ int msf = common->cmnd[1] & 0x02;
+ int start_track = common->cmnd[6];
+ u8 *buf = (u8 *) bh->buf;
+
+ if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
+ start_track > 1) {
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ memset(buf, 0, 20);
+ buf[1] = (20-2); /* TOC data length */
+ buf[2] = 1; /* First track number */
+ buf[3] = 1; /* Last track number */
+ buf[5] = 0x16; /* Data track, copying allowed */
+ buf[6] = 0x01; /* Only track is number 1 */
+ store_cdrom_address(&buf[8], msf, 0);
+
+ buf[13] = 0x16; /* Lead-out track is data */
+ buf[14] = 0xAA; /* Lead-out track number */
+ store_cdrom_address(&buf[16], msf, curlun->num_sectors);
+ return 20;
+}
+
+
+static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+ int mscmnd = common->cmnd[0];
+ u8 *buf = (u8 *) bh->buf;
+ u8 *buf0 = buf;
+ int pc, page_code;
+ int changeable_values, all_pages;
+ int valid_page = 0;
+ int len, limit;
+
+ if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+ pc = common->cmnd[2] >> 6;
+ page_code = common->cmnd[2] & 0x3f;
+ if (pc == 3) {
+ curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+ return -EINVAL;
+ }
+ changeable_values = (pc == 1);
+ all_pages = (page_code == 0x3f);
+
+ /* Write the mode parameter header. Fixed values are: default
+ * medium type, no cache control (DPOFUA), and no block descriptors.
+ * The only variable value is the WriteProtect bit. We will fill in
+ * the mode data length later. */
+ memset(buf, 0, 8);
+ if (mscmnd == SC_MODE_SENSE_6) {
+ buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
+ buf += 4;
+ limit = 255;
+ } else { /* SC_MODE_SENSE_10 */
+ buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
+ buf += 8;
+ limit = 65535; /* Should really be FSG_BUFLEN */
+ }
+
+ /* No block descriptors */
+
+ /* The mode pages, in numerical order. The only page we support
+ * is the Caching page. */
+ if (page_code == 0x08 || all_pages) {
+ valid_page = 1;
+ buf[0] = 0x08; /* Page code */
+ buf[1] = 10; /* Page length */
+ memset(buf+2, 0, 10); /* None of the fields are changeable */
+
+ if (!changeable_values) {
+ buf[2] = 0x04; /* Write cache enable, */
+ /* Read cache not disabled */
+ /* No cache retention priorities */
+ put_unaligned_be16(0xffff, &buf[4]);
+ /* Don't disable prefetch */
+ /* Minimum prefetch = 0 */
+ put_unaligned_be16(0xffff, &buf[8]);
+ /* Maximum prefetch */
+ put_unaligned_be16(0xffff, &buf[10]);
+ /* Maximum prefetch ceiling */
+ }
+ buf += 12;
+ }
+
+ /* Check that a valid page was requested and the mode data length
+ * isn't too long. */
+ len = buf - buf0;
+ if (!valid_page || len > limit) {
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ /* Store the mode data length */
+ if (mscmnd == SC_MODE_SENSE_6)
+ buf0[0] = len - 1;
+ else
+ put_unaligned_be16(len - 2, buf0);
+ return len;
+}
+
+
+static int do_start_stop(struct fsg_common *common)
+{
+ if (!common->curlun) {
+ return -EINVAL;
+ } else if (!common->curlun->removable) {
+ common->curlun->sense_data = SS_INVALID_COMMAND;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+static int do_prevent_allow(struct fsg_common *common)
+{
+ struct fsg_lun *curlun = common->curlun;
+ int prevent;
+
+ if (!common->curlun) {
+ return -EINVAL;
+ } else if (!common->curlun->removable) {
+ common->curlun->sense_data = SS_INVALID_COMMAND;
+ return -EINVAL;
+ }
+
+ prevent = common->cmnd[4] & 0x01;
+ if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ if (curlun->prevent_medium_removal && !prevent)
+ fsg_lun_fsync_sub(curlun);
+ curlun->prevent_medium_removal = prevent;
+ return 0;
+}
+
+
+static int do_read_format_capacities(struct fsg_common *common,
+ struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+ u8 *buf = (u8 *) bh->buf;
+
+ buf[0] = buf[1] = buf[2] = 0;
+ buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
+ buf += 4;
+
+ put_unaligned_be32(curlun->num_sectors, &buf[0]);
+ /* Number of blocks */
+ put_unaligned_be32(512, &buf[4]); /* Block length */
+ buf[4] = 0x02; /* Current capacity */
+ return 12;
+}
+
+
+static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+
+ /* We don't support MODE SELECT */
+ if (curlun)
+ curlun->sense_data = SS_INVALID_COMMAND;
+ return -EINVAL;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+ int rc;
+
+ rc = fsg_set_halt(fsg, fsg->bulk_in);
+ if (rc == -EAGAIN)
+ VDBG(fsg, "delayed bulk-in endpoint halt\n");
+ while (rc != 0) {
+ if (rc != -EAGAIN) {
+ WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
+ rc = 0;
+ break;
+ }
+
+ /* Wait for a short time and then try again */
+ if (msleep_interruptible(100) != 0)
+ return -EINTR;
+ rc = usb_ep_set_halt(fsg->bulk_in);
+ }
+ return rc;
+}
+
+static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+ int rc;
+
+ DBG(fsg, "bulk-in set wedge\n");
+ rc = usb_ep_set_wedge(fsg->bulk_in);
+ if (rc == -EAGAIN)
+ VDBG(fsg, "delayed bulk-in endpoint wedge\n");
+ while (rc != 0) {
+ if (rc != -EAGAIN) {
+ WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
+ rc = 0;
+ break;
+ }
+
+ /* Wait for a short time and then try again */
+ if (msleep_interruptible(100) != 0)
+ return -EINTR;
+ rc = usb_ep_set_wedge(fsg->bulk_in);
+ }
+ return rc;
+}
+
+static int pad_with_zeros(struct fsg_dev *fsg)
+{
+ struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
+ u32 nkeep = bh->inreq->length;
+ u32 nsend;
+ int rc;
+
+ bh->state = BUF_STATE_EMPTY; /* For the first iteration */
+ fsg->common->usb_amount_left = nkeep + fsg->common->residue;
+ while (fsg->common->usb_amount_left > 0) {
+
+ /* Wait for the next buffer to be free */
+ while (bh->state != BUF_STATE_EMPTY) {
+ rc = sleep_thread(fsg->common);
+ if (rc)
+ return rc;
+ }
+
+ nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
+ memset(bh->buf + nkeep, 0, nsend - nkeep);
+ bh->inreq->length = nsend;
+ bh->inreq->zero = 0;
+ start_transfer(fsg, fsg->bulk_in, bh->inreq,
+ &bh->inreq_busy, &bh->state);
+ bh = fsg->common->next_buffhd_to_fill = bh->next;
+ fsg->common->usb_amount_left -= nsend;
+ nkeep = 0;
+ }
+ return 0;
+}
+
+static int throw_away_data(struct fsg_common *common)
+{
+ struct fsg_buffhd *bh;
+ u32 amount;
+ int rc;
+
+ for (bh = common->next_buffhd_to_drain;
+ bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
+ bh = common->next_buffhd_to_drain) {
+
+ /* Throw away the data in a filled buffer */
+ if (bh->state == BUF_STATE_FULL) {
+ smp_rmb();
+ bh->state = BUF_STATE_EMPTY;
+ common->next_buffhd_to_drain = bh->next;
+
+ /* A short packet or an error ends everything */
+ if (bh->outreq->actual != bh->outreq->length ||
+ bh->outreq->status != 0) {
+ raise_exception(common,
+ FSG_STATE_ABORT_BULK_OUT);
+ return -EINTR;
+ }
+ continue;
+ }
+
+ /* Try to submit another request if we need one */
+ bh = common->next_buffhd_to_fill;
+ if (bh->state == BUF_STATE_EMPTY
+ && common->usb_amount_left > 0) {
+ amount = min(common->usb_amount_left, FSG_BUFLEN);
+
+ /* amount is always divisible by 512, hence by
+ * the bulk-out maxpacket size */
+ bh->outreq->length = amount;
+ bh->bulk_out_intended_length = amount;
+ bh->outreq->short_not_ok = 1;
+ START_TRANSFER_OR(common, bulk_out, bh->outreq,
+ &bh->outreq_busy, &bh->state)
+ /* Don't know what to do if
+ * common->fsg is NULL */
+ return -EIO;
+ common->next_buffhd_to_fill = bh->next;
+ common->usb_amount_left -= amount;
+ continue;
+ }
+
+ /* Otherwise wait for something to happen */
+ rc = sleep_thread(common);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+
+static int finish_reply(struct fsg_common *common)
+{
+ struct fsg_buffhd *bh = common->next_buffhd_to_fill;
+ int rc = 0;
+
+ switch (common->data_dir) {
+ case DATA_DIR_NONE:
+ break; /* Nothing to send */
+
+ /* If we don't know whether the host wants to read or write,
+ * this must be CB or CBI with an unknown command. We mustn't
+ * try to send or receive any data. So stall both bulk pipes
+ * if we can and wait for a reset. */
+ case DATA_DIR_UNKNOWN:
+ if (!common->can_stall) {
+ /* Nothing */
+ } else if (fsg_is_set(common)) {
+ fsg_set_halt(common->fsg, common->fsg->bulk_out);
+ rc = halt_bulk_in_endpoint(common->fsg);
+ } else {
+ /* Don't know what to do if common->fsg is NULL */
+ rc = -EIO;
+ }
+ break;
+
+ /* All but the last buffer of data must have already been sent */
+ case DATA_DIR_TO_HOST:
+ if (common->data_size == 0) {
+ /* Nothing to send */
+
+ /* If there's no residue, simply send the last buffer */
+ } else if (common->residue == 0) {
+ bh->inreq->zero = 0;
+ START_TRANSFER_OR(common, bulk_in, bh->inreq,
+ &bh->inreq_busy, &bh->state)
+ return -EIO;
+ common->next_buffhd_to_fill = bh->next;
+
+ /* For Bulk-only, if we're allowed to stall then send the
+ * short packet and halt the bulk-in endpoint. If we can't
+ * stall, pad out the remaining data with 0's. */
+ } else if (common->can_stall) {
+ bh->inreq->zero = 1;
+ START_TRANSFER_OR(common, bulk_in, bh->inreq,
+ &bh->inreq_busy, &bh->state)
+ /* Don't know what to do if
+ * common->fsg is NULL */
+ rc = -EIO;
+ common->next_buffhd_to_fill = bh->next;
+ if (common->fsg)
+ rc = halt_bulk_in_endpoint(common->fsg);
+ } else if (fsg_is_set(common)) {
+ rc = pad_with_zeros(common->fsg);
+ } else {
+ /* Don't know what to do if common->fsg is NULL */
+ rc = -EIO;
+ }
+ break;
+
+ /* We have processed all we want from the data the host has sent.
+ * There may still be outstanding bulk-out requests. */
+ case DATA_DIR_FROM_HOST:
+ if (common->residue == 0) {
+ /* Nothing to receive */
+
+ /* Did the host stop sending unexpectedly early? */
+ } else if (common->short_packet_received) {
+ raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+ rc = -EINTR;
+
+ /* We haven't processed all the incoming data. Even though
+ * we may be allowed to stall, doing so would cause a race.
+ * The controller may already have ACK'ed all the remaining
+ * bulk-out packets, in which case the host wouldn't see a
+ * STALL. Not realizing the endpoint was halted, it wouldn't
+ * clear the halt -- leading to problems later on. */
+#if 0
+ } else if (common->can_stall) {
+ if (fsg_is_set(common))
+ fsg_set_halt(common->fsg,
+ common->fsg->bulk_out);
+ raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+ rc = -EINTR;
+#endif
+
+ /* We can't stall. Read in the excess data and throw it
+ * all away. */
+ } else {
+ rc = throw_away_data(common);
+ }
+ break;
+ }
+ return rc;
+}
+
+
+static int send_status(struct fsg_common *common)
+{
+ struct fsg_lun *curlun = common->curlun;
+ struct fsg_buffhd *bh;
+ struct bulk_cs_wrap *csw;
+ int rc;
+ u8 status = USB_STATUS_PASS;
+ u32 sd, sdinfo = 0;
+
+ /* Wait for the next buffer to become available */
+ bh = common->next_buffhd_to_fill;
+ while (bh->state != BUF_STATE_EMPTY) {
+ rc = sleep_thread(common);
+ if (rc)
+ return rc;
+ }
+
+ if (curlun) {
+ sd = curlun->sense_data;
+ sdinfo = curlun->sense_data_info;
+ } else if (common->bad_lun_okay)
+ sd = SS_NO_SENSE;
+ else
+ sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+ if (common->phase_error) {
+ DBG(common, "sending phase-error status\n");
+ status = USB_STATUS_PHASE_ERROR;
+ sd = SS_INVALID_COMMAND;
+ } else if (sd != SS_NO_SENSE) {
+ DBG(common, "sending command-failure status\n");
+ status = USB_STATUS_FAIL;
+ VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+ " info x%x\n",
+ SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+ }
+
+ /* Store and send the Bulk-only CSW */
+ csw = (void *)bh->buf;
+
+ csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
+ csw->Tag = common->tag;
+ csw->Residue = cpu_to_le32(common->residue);
+ csw->Status = status;
+
+ bh->inreq->length = USB_BULK_CS_WRAP_LEN;
+ bh->inreq->zero = 0;
+ START_TRANSFER_OR(common, bulk_in, bh->inreq,
+ &bh->inreq_busy, &bh->state)
+ /* Don't know what to do if common->fsg is NULL */
+ return -EIO;
+
+ common->next_buffhd_to_fill = bh->next;
+ return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have. */
+static int check_command(struct fsg_common *common, int cmnd_size,
+ enum data_direction data_dir, unsigned int mask,
+ int needs_medium, const char *name)
+{
+ int i;
+ int lun = common->cmnd[1] >> 5;
+ static const char dirletter[4] = {'u', 'o', 'i', 'n'};
+ char hdlen[20];
+ struct fsg_lun *curlun;
+
+ hdlen[0] = 0;
+ if (common->data_dir != DATA_DIR_UNKNOWN)
+ sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
+ common->data_size);
+ VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
+ name, cmnd_size, dirletter[(int) data_dir],
+ common->data_size_from_cmnd, common->cmnd_size, hdlen);
+
+ /* We can't reply at all until we know the correct data direction
+ * and size. */
+ if (common->data_size_from_cmnd == 0)
+ data_dir = DATA_DIR_NONE;
+ if (common->data_size < common->data_size_from_cmnd) {
+ /* Host data size < Device data size is a phase error.
+ * Carry out the command, but only transfer as much as
+ * we are allowed. */
+ common->data_size_from_cmnd = common->data_size;
+ common->phase_error = 1;
+ }
+ common->residue = common->data_size;
+ common->usb_amount_left = common->data_size;
+
+ /* Conflicting data directions is a phase error */
+ if (common->data_dir != data_dir
+ && common->data_size_from_cmnd > 0) {
+ common->phase_error = 1;
+ return -EINVAL;
+ }
+
+ /* Verify the length of the command itself */
+ if (cmnd_size != common->cmnd_size) {
+
+ /* Special case workaround: There are plenty of buggy SCSI
+ * implementations. Many have issues with cbw->Length
+ * field passing a wrong command size. For those cases we
+ * always try to work around the problem by using the length
+ * sent by the host side provided it is at least as large
+ * as the correct command length.
+ * Examples of such cases would be MS-Windows, which issues
+ * REQUEST SENSE with cbw->Length == 12 where it should
+ * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
+ * REQUEST SENSE with cbw->Length == 10 where it should
+ * be 6 as well.
+ */
+ if (cmnd_size <= common->cmnd_size) {
+ DBG(common, "%s is buggy! Expected length %d "
+ "but we got %d\n", name,
+ cmnd_size, common->cmnd_size);
+ cmnd_size = common->cmnd_size;
+ } else {
+ common->phase_error = 1;
+ return -EINVAL;
+ }
+ }
+
+ /* Check that the LUN values are consistent */
+ if (common->lun != lun)
+ DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
+ common->lun, lun);
+
+ /* Check the LUN */
+ if (common->lun >= 0 && common->lun < common->nluns) {
+ curlun = &common->luns[common->lun];
+ common->curlun = curlun;
+ if (common->cmnd[0] != SC_REQUEST_SENSE) {
+ curlun->sense_data = SS_NO_SENSE;
+ curlun->sense_data_info = 0;
+ curlun->info_valid = 0;
+ }
+ } else {
+ common->curlun = NULL;
+ curlun = NULL;
+ common->bad_lun_okay = 0;
+
+ /* INQUIRY and REQUEST SENSE commands are explicitly allowed
+ * to use unsupported LUNs; all others may not. */
+ if (common->cmnd[0] != SC_INQUIRY &&
+ common->cmnd[0] != SC_REQUEST_SENSE) {
+ DBG(common, "unsupported LUN %d\n", common->lun);
+ return -EINVAL;
+ }
+ }
+
+ /* If a unit attention condition exists, only INQUIRY and
+ * REQUEST SENSE commands are allowed; anything else must fail. */
+ if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+ common->cmnd[0] != SC_INQUIRY &&
+ common->cmnd[0] != SC_REQUEST_SENSE) {
+ curlun->sense_data = curlun->unit_attention_data;
+ curlun->unit_attention_data = SS_NO_SENSE;
+ return -EINVAL;
+ }
+
+ /* Check that only command bytes listed in the mask are non-zero */
+ common->cmnd[1] &= 0x1f; /* Mask away the LUN */
+ for (i = 1; i < cmnd_size; ++i) {
+ if (common->cmnd[i] && !(mask & (1 << i))) {
+ if (curlun)
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+ }
+
+ /* If the medium isn't mounted and the command needs to access
+ * it, return an error. */
+ if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
+ curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int do_scsi_command(struct fsg_common *common)
+{
+ struct fsg_buffhd *bh;
+ int rc;
+ int reply = -EINVAL;
+ int i;
+ static char unknown[16];
+
+ dump_cdb(common);
+
+ /* Wait for the next buffer to become available for data or status */
+ bh = common->next_buffhd_to_fill;
+ common->next_buffhd_to_drain = bh;
+ while (bh->state != BUF_STATE_EMPTY) {
+ rc = sleep_thread(common);
+ if (rc)
+ return rc;
+ }
+ common->phase_error = 0;
+ common->short_packet_received = 0;
+
+ down_read(&common->filesem); /* We're using the backing file */
+ switch (common->cmnd[0]) {
+
+ case SC_INQUIRY:
+ common->data_size_from_cmnd = common->cmnd[4];
+ reply = check_command(common, 6, DATA_DIR_TO_HOST,
+ (1<<4), 0,
+ "INQUIRY");
+ if (reply == 0)
+ reply = do_inquiry(common, bh);
+ break;
+
+ case SC_MODE_SELECT_6:
+ common->data_size_from_cmnd = common->cmnd[4];
+ reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+ (1<<1) | (1<<4), 0,
+ "MODE SELECT(6)");
+ if (reply == 0)
+ reply = do_mode_select(common, bh);
+ break;
+
+ case SC_MODE_SELECT_10:
+ common->data_size_from_cmnd =
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+ (1<<1) | (3<<7), 0,
+ "MODE SELECT(10)");
+ if (reply == 0)
+ reply = do_mode_select(common, bh);
+ break;
+
+ case SC_MODE_SENSE_6:
+ common->data_size_from_cmnd = common->cmnd[4];
+ reply = check_command(common, 6, DATA_DIR_TO_HOST,
+ (1<<1) | (1<<2) | (1<<4), 0,
+ "MODE SENSE(6)");
+ if (reply == 0)
+ reply = do_mode_sense(common, bh);
+ break;
+
+ case SC_MODE_SENSE_10:
+ common->data_size_from_cmnd =
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ (1<<1) | (1<<2) | (3<<7), 0,
+ "MODE SENSE(10)");
+ if (reply == 0)
+ reply = do_mode_sense(common, bh);
+ break;
+
+ case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
+ common->data_size_from_cmnd = 0;
+ reply = check_command(common, 6, DATA_DIR_NONE,
+ (1<<4), 0,
+ "PREVENT-ALLOW MEDIUM REMOVAL");
+ if (reply == 0)
+ reply = do_prevent_allow(common);
+ break;
+
+ case SC_READ_6:
+ i = common->cmnd[4];
+ common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+ reply = check_command(common, 6, DATA_DIR_TO_HOST,
+ (7<<1) | (1<<4), 1,
+ "READ(6)");
+ if (reply == 0)
+ reply = do_read(common);
+ break;
+
+ case SC_READ_10:
+ common->data_size_from_cmnd =
+ get_unaligned_be16(&common->cmnd[7]) << 9;
+ reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ (1<<1) | (0xf<<2) | (3<<7), 1,
+ "READ(10)");
+ if (reply == 0)
+ reply = do_read(common);
+ break;
+
+ case SC_READ_12:
+ common->data_size_from_cmnd =
+ get_unaligned_be32(&common->cmnd[6]) << 9;
+ reply = check_command(common, 12, DATA_DIR_TO_HOST,
+ (1<<1) | (0xf<<2) | (0xf<<6), 1,
+ "READ(12)");
+ if (reply == 0)
+ reply = do_read(common);
+ break;
+
+ case SC_READ_CAPACITY:
+ common->data_size_from_cmnd = 8;
+ reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ (0xf<<2) | (1<<8), 1,
+ "READ CAPACITY");
+ if (reply == 0)
+ reply = do_read_capacity(common, bh);
+ break;
+
+ case SC_READ_HEADER:
+ if (!common->curlun || !common->curlun->cdrom)
+ goto unknown_cmnd;
+ common->data_size_from_cmnd =
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ (3<<7) | (0x1f<<1), 1,
+ "READ HEADER");
+ if (reply == 0)
+ reply = do_read_header(common, bh);
+ break;
+
+ case SC_READ_TOC:
+ if (!common->curlun || !common->curlun->cdrom)
+ goto unknown_cmnd;
+ common->data_size_from_cmnd =
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ (7<<6) | (1<<1), 1,
+ "READ TOC");
+ if (reply == 0)
+ reply = do_read_toc(common, bh);
+ break;
+
+ case SC_READ_FORMAT_CAPACITIES:
+ common->data_size_from_cmnd =
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ (3<<7), 1,
+ "READ FORMAT CAPACITIES");
+ if (reply == 0)
+ reply = do_read_format_capacities(common, bh);
+ break;
+
+ case SC_REQUEST_SENSE:
+ common->data_size_from_cmnd = common->cmnd[4];
+ reply = check_command(common, 6, DATA_DIR_TO_HOST,
+ (1<<4), 0,
+ "REQUEST SENSE");
+ if (reply == 0)
+ reply = do_request_sense(common, bh);
+ break;
+
+ case SC_START_STOP_UNIT:
+ common->data_size_from_cmnd = 0;
+ reply = check_command(common, 6, DATA_DIR_NONE,
+ (1<<1) | (1<<4), 0,
+ "START-STOP UNIT");
+ if (reply == 0)
+ reply = do_start_stop(common);
+ break;
+
+ case SC_SYNCHRONIZE_CACHE:
+ common->data_size_from_cmnd = 0;
+ reply = check_command(common, 10, DATA_DIR_NONE,
+ (0xf<<2) | (3<<7), 1,
+ "SYNCHRONIZE CACHE");
+ if (reply == 0)
+ reply = do_synchronize_cache(common);
+ break;
+
+ case SC_TEST_UNIT_READY:
+ common->data_size_from_cmnd = 0;
+ reply = check_command(common, 6, DATA_DIR_NONE,
+ 0, 1,
+ "TEST UNIT READY");
+ break;
+
+ /* Although optional, this command is used by MS-Windows. We
+ * support a minimal version: BytChk must be 0. */
+ case SC_VERIFY:
+ common->data_size_from_cmnd = 0;
+ reply = check_command(common, 10, DATA_DIR_NONE,
+ (1<<1) | (0xf<<2) | (3<<7), 1,
+ "VERIFY");
+ if (reply == 0)
+ reply = do_verify(common);
+ break;
+
+ case SC_WRITE_6:
+ i = common->cmnd[4];
+ common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
+ reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+ (7<<1) | (1<<4), 1,
+ "WRITE(6)");
+ if (reply == 0)
+ reply = do_write(common);
+ break;
+
+ case SC_WRITE_10:
+ common->data_size_from_cmnd =
+ get_unaligned_be16(&common->cmnd[7]) << 9;
+ reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+ (1<<1) | (0xf<<2) | (3<<7), 1,
+ "WRITE(10)");
+ if (reply == 0)
+ reply = do_write(common);
+ break;
+
+ case SC_WRITE_12:
+ common->data_size_from_cmnd =
+ get_unaligned_be32(&common->cmnd[6]) << 9;
+ reply = check_command(common, 12, DATA_DIR_FROM_HOST,
+ (1<<1) | (0xf<<2) | (0xf<<6), 1,
+ "WRITE(12)");
+ if (reply == 0)
+ reply = do_write(common);
+ break;
+
+ /* Some mandatory commands that we recognize but don't implement.
+ * They don't mean much in this setting. It's left as an exercise
+ * for anyone interested to implement RESERVE and RELEASE in terms
+ * of Posix locks. */
+ case SC_FORMAT_UNIT:
+ case SC_RELEASE:
+ case SC_RESERVE:
+ case SC_SEND_DIAGNOSTIC:
+ /* Fall through */
+
+ default:
+unknown_cmnd:
+ common->data_size_from_cmnd = 0;
+ sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
+ reply = check_command(common, common->cmnd_size,
+ DATA_DIR_UNKNOWN, 0xff, 0, unknown);
+ if (reply == 0) {
+ common->curlun->sense_data = SS_INVALID_COMMAND;
+ reply = -EINVAL;
+ }
+ break;
+ }
+ up_read(&common->filesem);
+
+ if (reply == -EINTR || signal_pending(current))
+ return -EINTR;
+
+ /* Set up the single reply buffer for finish_reply() */
+ if (reply == -EINVAL)
+ reply = 0; /* Error reply length */
+ if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
+ reply = min((u32) reply, common->data_size_from_cmnd);
+ bh->inreq->length = reply;
+ bh->state = BUF_STATE_FULL;
+ common->residue -= reply;
+ } /* Otherwise it's already set */
+
+ return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+ struct usb_request *req = bh->outreq;
+ struct fsg_bulk_cb_wrap *cbw = req->buf;
+ struct fsg_common *common = fsg->common;
+
+ /* Was this a real packet? Should it be ignored? */
+ if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
+ return -EINVAL;
+
+ /* Is the CBW valid? */
+ if (req->actual != USB_BULK_CB_WRAP_LEN ||
+ cbw->Signature != cpu_to_le32(
+ USB_BULK_CB_SIG)) {
+ DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+ req->actual,
+ le32_to_cpu(cbw->Signature));
+
+ /* The Bulk-only spec says we MUST stall the IN endpoint
+ * (6.6.1), so it's unavoidable. It also says we must
+ * retain this state until the next reset, but there's
+ * no way to tell the controller driver it should ignore
+ * Clear-Feature(HALT) requests.
+ *
+ * We aren't required to halt the OUT endpoint; instead
+ * we can simply accept and discard any data received
+ * until the next reset. */
+ wedge_bulk_in_endpoint(fsg);
+ set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+ return -EINVAL;
+ }
+
+ /* Is the CBW meaningful? */
+ if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
+ cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+ DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+ "cmdlen %u\n",
+ cbw->Lun, cbw->Flags, cbw->Length);
+
+ /* We can do anything we want here, so let's stall the
+ * bulk pipes if we are allowed to. */
+ if (common->can_stall) {
+ fsg_set_halt(fsg, fsg->bulk_out);
+ halt_bulk_in_endpoint(fsg);
+ }
+ return -EINVAL;
+ }
+
+ /* Save the command for later */
+ common->cmnd_size = cbw->Length;
+ memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
+ if (cbw->Flags & USB_BULK_IN_FLAG)
+ common->data_dir = DATA_DIR_TO_HOST;
+ else
+ common->data_dir = DATA_DIR_FROM_HOST;
+ common->data_size = le32_to_cpu(cbw->DataTransferLength);
+ if (common->data_size == 0)
+ common->data_dir = DATA_DIR_NONE;
+ common->lun = cbw->Lun;
+ common->tag = cbw->Tag;
+ return 0;
+}
+
+
+static int get_next_command(struct fsg_common *common)
+{
+ struct fsg_buffhd *bh;
+ int rc = 0;
+
+ /* Wait for the next buffer to become available */
+ bh = common->next_buffhd_to_fill;
+ while (bh->state != BUF_STATE_EMPTY) {
+ rc = sleep_thread(common);
+ if (rc)
+ return rc;
+ }
+
+ /* Queue a request to read a Bulk-only CBW */
+ set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
+ bh->outreq->short_not_ok = 1;
+ START_TRANSFER_OR(common, bulk_out, bh->outreq,
+ &bh->outreq_busy, &bh->state)
+ /* Don't know what to do if common->fsg is NULL */
+ return -EIO;
+
+ /* We will drain the buffer in software, which means we
+ * can reuse it for the next filling. No need to advance
+ * next_buffhd_to_fill. */
+
+ /* Wait for the CBW to arrive */
+ while (bh->state != BUF_STATE_FULL) {
+ rc = sleep_thread(common);
+ if (rc)
+ return rc;
+ }
+ smp_rmb();
+ rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
+ bh->state = BUF_STATE_EMPTY;
+
+ return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *d)
+{
+ int rc;
+
+ ep->driver_data = common;
+ rc = usb_ep_enable(ep, d);
+ if (rc)
+ ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
+ return rc;
+}
+
+static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
+ struct usb_request **preq)
+{
+ *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (*preq)
+ return 0;
+ ERROR(common, "can't allocate request for %s\n", ep->name);
+ return -ENOMEM;
+}
+
+/*
+ * Reset interface setting and re-init endpoint state (toggle etc).
+ * Call with altsetting < 0 to disable the interface. The only other
+ * available altsetting is 0, which enables the interface.
+ */
+static int do_set_interface(struct fsg_common *common, int altsetting)
+{
+ int rc = 0;
+ int i;
+ const struct usb_endpoint_descriptor *d;
+
+ if (common->running)
+ DBG(common, "reset interface\n");
+
+reset:
+ /* Deallocate the requests */
+ if (common->prev_fsg) {
+ struct fsg_dev *fsg = common->prev_fsg;
+
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ struct fsg_buffhd *bh = &common->buffhds[i];
+
+ if (bh->inreq) {
+ usb_ep_free_request(fsg->bulk_in, bh->inreq);
+ bh->inreq = NULL;
+ }
+ if (bh->outreq) {
+ usb_ep_free_request(fsg->bulk_out, bh->outreq);
+ bh->outreq = NULL;
+ }
+ }
+
+ /* Disable the endpoints */
+ if (fsg->bulk_in_enabled) {
+ usb_ep_disable(fsg->bulk_in);
+ fsg->bulk_in_enabled = 0;
+ }
+ if (fsg->bulk_out_enabled) {
+ usb_ep_disable(fsg->bulk_out);
+ fsg->bulk_out_enabled = 0;
+ }
+
+ common->prev_fsg = 0;
+ }
+
+ common->running = 0;
+ if (altsetting < 0 || rc != 0)
+ return rc;
+
+ DBG(common, "set interface %d\n", altsetting);
+
+ if (fsg_is_set(common)) {
+ struct fsg_dev *fsg = common->fsg;
+ common->prev_fsg = common->fsg;
+
+ /* Enable the endpoints */
+ d = fsg_ep_desc(common->gadget,
+ &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
+ rc = enable_endpoint(common, fsg->bulk_in, d);
+ if (rc)
+ goto reset;
+ fsg->bulk_in_enabled = 1;
+
+ d = fsg_ep_desc(common->gadget,
+ &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
+ rc = enable_endpoint(common, fsg->bulk_out, d);
+ if (rc)
+ goto reset;
+ fsg->bulk_out_enabled = 1;
+ common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
+ clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+
+ /* Allocate the requests */
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ struct fsg_buffhd *bh = &common->buffhds[i];
+
+ rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
+ if (rc)
+ goto reset;
+ rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
+ if (rc)
+ goto reset;
+ bh->inreq->buf = bh->outreq->buf = bh->buf;
+ bh->inreq->context = bh->outreq->context = bh;
+ bh->inreq->complete = bulk_in_complete;
+ bh->outreq->complete = bulk_out_complete;
+ }
+
+ common->running = 1;
+ for (i = 0; i < common->nluns; ++i)
+ common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+ return rc;
+ } else {
+ return -EIO;
+ }
+}
+
+
+/*
+ * Change our operational configuration. This code must agree with the code
+ * that returns config descriptors, and with interface altsetting code.
+ *
+ * It's also responsible for power management interactions. Some
+ * configurations might not work with our current power sources.
+ * For now we just assume the gadget is always self-powered.
+ */
+static int do_set_config(struct fsg_common *common, u8 new_config)
+{
+ int rc = 0;
+
+ /* Disable the single interface */
+ if (common->config != 0) {
+ DBG(common, "reset config\n");
+ common->config = 0;
+ rc = do_set_interface(common, -1);
+ }
+
+ /* Enable the interface */
+ if (new_config != 0) {
+ common->config = new_config;
+ rc = do_set_interface(common, 0);
+ if (rc != 0)
+ common->config = 0; /* Reset on errors */
+ }
+ return rc;
+}
+
+
+/****************************** ALT CONFIGS ******************************/
+
+
+static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct fsg_dev *fsg = fsg_from_func(f);
+ fsg->common->prev_fsg = fsg->common->fsg;
+ fsg->common->fsg = fsg;
+ fsg->common->new_config = 1;
+ raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+ return 0;
+}
+
+static void fsg_disable(struct usb_function *f)
+{
+ struct fsg_dev *fsg = fsg_from_func(f);
+ fsg->common->prev_fsg = fsg->common->fsg;
+ fsg->common->fsg = fsg;
+ fsg->common->new_config = 0;
+ raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_common *common)
+{
+ siginfo_t info;
+ int sig;
+ int i;
+ struct fsg_buffhd *bh;
+ enum fsg_state old_state;
+ u8 new_config;
+ struct fsg_lun *curlun;
+ unsigned int exception_req_tag;
+ int rc;
+
+ /* Clear the existing signals. Anything but SIGUSR1 is converted
+ * into a high-priority EXIT exception. */
+ for (;;) {
+ sig = dequeue_signal_lock(current, &current->blocked, &info);
+ if (!sig)
+ break;
+ if (sig != SIGUSR1) {
+ if (common->state < FSG_STATE_EXIT)
+ DBG(common, "Main thread exiting on signal\n");
+ raise_exception(common, FSG_STATE_EXIT);
+ }
+ }
+
+ /* Cancel all the pending transfers */
+ if (fsg_is_set(common)) {
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ bh = &common->buffhds[i];
+ if (bh->inreq_busy)
+ usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
+ if (bh->outreq_busy)
+ usb_ep_dequeue(common->fsg->bulk_out,
+ bh->outreq);
+ }
+
+ /* Wait until everything is idle */
+ for (;;) {
+ int num_active = 0;
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ bh = &common->buffhds[i];
+ num_active += bh->inreq_busy + bh->outreq_busy;
+ }
+ if (num_active == 0)
+ break;
+ if (sleep_thread(common))
+ return;
+ }
+
+ /* Clear out the controller's fifos */
+ if (common->fsg->bulk_in_enabled)
+ usb_ep_fifo_flush(common->fsg->bulk_in);
+ if (common->fsg->bulk_out_enabled)
+ usb_ep_fifo_flush(common->fsg->bulk_out);
+ }
+
+ /* Reset the I/O buffer states and pointers, the SCSI
+ * state, and the exception. Then invoke the handler. */
+ spin_lock_irq(&common->lock);
+
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
+ bh = &common->buffhds[i];
+ bh->state = BUF_STATE_EMPTY;
+ }
+ common->next_buffhd_to_fill = &common->buffhds[0];
+ common->next_buffhd_to_drain = &common->buffhds[0];
+ exception_req_tag = common->exception_req_tag;
+ new_config = common->new_config;
+ old_state = common->state;
+
+ if (old_state == FSG_STATE_ABORT_BULK_OUT)
+ common->state = FSG_STATE_STATUS_PHASE;
+ else {
+ for (i = 0; i < common->nluns; ++i) {
+ curlun = &common->luns[i];
+ curlun->prevent_medium_removal = 0;
+ curlun->sense_data = SS_NO_SENSE;
+ curlun->unit_attention_data = SS_NO_SENSE;
+ curlun->sense_data_info = 0;
+ curlun->info_valid = 0;
+ }
+ common->state = FSG_STATE_IDLE;
+ }
+ spin_unlock_irq(&common->lock);
+
+ /* Carry out any extra actions required for the exception */
+ switch (old_state) {
+ case FSG_STATE_ABORT_BULK_OUT:
+ send_status(common);
+ spin_lock_irq(&common->lock);
+ if (common->state == FSG_STATE_STATUS_PHASE)
+ common->state = FSG_STATE_IDLE;
+ spin_unlock_irq(&common->lock);
+ break;
+
+ case FSG_STATE_RESET:
+ /* In case we were forced against our will to halt a
+ * bulk endpoint, clear the halt now. (The SuperH UDC
+ * requires this.) */
+ if (!fsg_is_set(common))
+ break;
+ if (test_and_clear_bit(IGNORE_BULK_OUT,
+ &common->fsg->atomic_bitflags))
+ usb_ep_clear_halt(common->fsg->bulk_in);
+
+ if (common->ep0_req_tag == exception_req_tag)
+ ep0_queue(common); /* Complete the status stage */
+
+ /* Technically this should go here, but it would only be
+ * a waste of time. Ditto for the INTERFACE_CHANGE and
+ * CONFIG_CHANGE cases. */
+ /* for (i = 0; i < common->nluns; ++i) */
+ /* common->luns[i].unit_attention_data = */
+ /* SS_RESET_OCCURRED; */
+ break;
+
+ case FSG_STATE_CONFIG_CHANGE:
+ rc = do_set_config(common, new_config);
+ if (common->ep0_req_tag != exception_req_tag)
+ break;
+ if (rc != 0) { /* STALL on errors */
+ DBG(common, "ep0 set halt\n");
+ usb_ep_set_halt(common->ep0);
+ } else { /* Complete the status stage */
+ ep0_queue(common);
+ }
+ break;
+
+ case FSG_STATE_EXIT:
+ case FSG_STATE_TERMINATED:
+ do_set_config(common, 0); /* Free resources */
+ spin_lock_irq(&common->lock);
+ common->state = FSG_STATE_TERMINATED; /* Stop the thread */
+ spin_unlock_irq(&common->lock);
+ break;
+
+ case FSG_STATE_INTERFACE_CHANGE:
+ case FSG_STATE_DISCONNECT:
+ case FSG_STATE_COMMAND_PHASE:
+ case FSG_STATE_DATA_PHASE:
+ case FSG_STATE_STATUS_PHASE:
+ case FSG_STATE_IDLE:
+ break;
+ }
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *common_)
+{
+ struct fsg_common *common = common_;
+
+ /* Allow the thread to be killed by a signal, but set the signal mask
+ * to block everything but INT, TERM, KILL, and USR1. */
+ allow_signal(SIGINT);
+ allow_signal(SIGTERM);
+ allow_signal(SIGKILL);
+ allow_signal(SIGUSR1);
+
+ /* Allow the thread to be frozen */
+ set_freezable();
+
+ /* Arrange for userspace references to be interpreted as kernel
+ * pointers. That way we can pass a kernel pointer to a routine
+ * that expects a __user pointer and it will work okay. */
+ set_fs(get_ds());
+
+ /* The main loop */
+ while (common->state != FSG_STATE_TERMINATED) {
+ if (exception_in_progress(common) || signal_pending(current)) {
+ handle_exception(common);
+ continue;
+ }
+
+ if (!common->running) {
+ sleep_thread(common);
+ continue;
+ }
+
+ if (get_next_command(common))
+ continue;
+
+ spin_lock_irq(&common->lock);
+ if (!exception_in_progress(common))
+ common->state = FSG_STATE_DATA_PHASE;
+ spin_unlock_irq(&common->lock);
+
+ if (do_scsi_command(common) || finish_reply(common))
+ continue;
+
+ spin_lock_irq(&common->lock);
+ if (!exception_in_progress(common))
+ common->state = FSG_STATE_STATUS_PHASE;
+ spin_unlock_irq(&common->lock);
+
+ if (send_status(common))
+ continue;
+
+ spin_lock_irq(&common->lock);
+ if (!exception_in_progress(common))
+ common->state = FSG_STATE_IDLE;
+ spin_unlock_irq(&common->lock);
+ }
+
+ spin_lock_irq(&common->lock);
+ common->thread_task = NULL;
+ spin_unlock_irq(&common->lock);
+
+ if (common->thread_exits)
+ common->thread_exits(common);
+
+ /* Let the unbind and cleanup routines know the thread has exited */
+ complete_and_exit(&common->thread_notifier, 0);
+}
+
+
+/*************************** DEVICE ATTRIBUTES ***************************/
+
+/* Write permission is checked per LUN in store_*() functions. */
+static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
+static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
+
+
+/****************************** FSG COMMON ******************************/
+
+static void fsg_common_release(struct kref *ref);
+
+static void fsg_lun_release(struct device *dev)
+{
+ /* Nothing needs to be done */
+}
+
+static inline void fsg_common_get(struct fsg_common *common)
+{
+ kref_get(&common->ref);
+}
+
+static inline void fsg_common_put(struct fsg_common *common)
+{
+ kref_put(&common->ref, fsg_common_release);
+}
+
+
+static struct fsg_common *fsg_common_init(struct fsg_common *common,
+ struct usb_composite_dev *cdev,
+ struct fsg_config *cfg)
+{
+ struct usb_gadget *gadget = cdev->gadget;
+ struct fsg_buffhd *bh;
+ struct fsg_lun *curlun;
+ struct fsg_lun_config *lcfg;
+ int nluns, i, rc;
+ char *pathbuf;
+
+ /* Find out how many LUNs there should be */
+ nluns = cfg->nluns;
+ if (nluns < 1 || nluns > FSG_MAX_LUNS) {
+ dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Allocate? */
+ if (!common) {
+ common = kzalloc(sizeof *common, GFP_KERNEL);
+ if (!common)
+ return ERR_PTR(-ENOMEM);
+ common->free_storage_on_release = 1;
+ } else {
+ memset(common, 0, sizeof common);
+ common->free_storage_on_release = 0;
+ }
+
+ common->private_data = cfg->private_data;
+
+ common->gadget = gadget;
+ common->ep0 = gadget->ep0;
+ common->ep0req = cdev->req;
+
+ /* Maybe allocate device-global string IDs, and patch descriptors */
+ if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
+ rc = usb_string_id(cdev);
+ if (rc < 0) {
+ kfree(common);
+ return ERR_PTR(rc);
+ }
+ fsg_strings[FSG_STRING_INTERFACE].id = rc;
+ fsg_intf_desc.iInterface = rc;
+ }
+
+ /* Create the LUNs, open their backing files, and register the
+ * LUN devices in sysfs. */
+ curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
+ if (!curlun) {
+ kfree(common);
+ return ERR_PTR(-ENOMEM);
+ }
+ common->luns = curlun;
+
+ init_rwsem(&common->filesem);
+
+ for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
+ curlun->cdrom = !!lcfg->cdrom;
+ curlun->ro = lcfg->cdrom || lcfg->ro;
+ curlun->removable = lcfg->removable;
+ curlun->dev.release = fsg_lun_release;
+ curlun->dev.parent = &gadget->dev;
+ /* curlun->dev.driver = &fsg_driver.driver; XXX */
+ dev_set_drvdata(&curlun->dev, &common->filesem);
+ dev_set_name(&curlun->dev,
+ cfg->lun_name_format
+ ? cfg->lun_name_format
+ : "lun%d",
+ i);
+
+ rc = device_register(&curlun->dev);
+ if (rc) {
+ INFO(common, "failed to register LUN%d: %d\n", i, rc);
+ common->nluns = i;
+ goto error_release;
+ }
+
+ rc = device_create_file(&curlun->dev, &dev_attr_ro);
+ if (rc)
+ goto error_luns;
+ rc = device_create_file(&curlun->dev, &dev_attr_file);
+ if (rc)
+ goto error_luns;
+
+ if (lcfg->filename) {
+ rc = fsg_lun_open(curlun, lcfg->filename);
+ if (rc)
+ goto error_luns;
+ } else if (!curlun->removable) {
+ ERROR(common, "no file given for LUN%d\n", i);
+ rc = -EINVAL;
+ goto error_luns;
+ }
+ }
+ common->nluns = nluns;
+
+
+ /* Data buffers cyclic list */
+ /* Buffers in buffhds are static -- no need for additional
+ * allocation. */
+ bh = common->buffhds;
+ i = FSG_NUM_BUFFERS - 1;
+ do {
+ bh->next = bh + 1;
+ } while (++bh, --i);
+ bh->next = common->buffhds;
+
+
+ /* Prepare inquiryString */
+ if (cfg->release != 0xffff) {
+ i = cfg->release;
+ } else {
+ /* The sa1100 controller is not supported */
+ i = gadget_is_sa1100(gadget)
+ ? -1
+ : usb_gadget_controller_number(gadget);
+ if (i >= 0) {
+ i = 0x0300 + i;
+ } else {
+ WARNING(common, "controller '%s' not recognized\n",
+ gadget->name);
+ i = 0x0399;
+ }
+ }
+#define OR(x, y) ((x) ? (x) : (y))
+ snprintf(common->inquiry_string, sizeof common->inquiry_string,
+ "%-8s%-16s%04x",
+ OR(cfg->vendor_name, "Linux "),
+ /* Assume product name dependent on the first LUN */
+ OR(cfg->product_name, common->luns->cdrom
+ ? "File-Stor Gadget"
+ : "File-CD Gadget "),
+ i);
+
+
+ /* Some peripheral controllers are known not to be able to
+ * halt bulk endpoints correctly. If one of them is present,
+ * disable stalls.
+ */
+ common->can_stall = cfg->can_stall &&
+ !(gadget_is_sh(common->gadget) ||
+ gadget_is_at91(common->gadget));
+
+
+ spin_lock_init(&common->lock);
+ kref_init(&common->ref);
+
+
+ /* Tell the thread to start working */
+ common->thread_exits = cfg->thread_exits;
+ common->thread_task =
+ kthread_create(fsg_main_thread, common,
+ OR(cfg->thread_name, "file-storage"));
+ if (IS_ERR(common->thread_task)) {
+ rc = PTR_ERR(common->thread_task);
+ goto error_release;
+ }
+ init_completion(&common->thread_notifier);
+#undef OR
+
+
+ /* Information */
+ INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
+ INFO(common, "Number of LUNs=%d\n", common->nluns);
+
+ pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+ for (i = 0, nluns = common->nluns, curlun = common->luns;
+ i < nluns;
+ ++curlun, ++i) {
+ char *p = "(no medium)";
+ if (fsg_lun_is_open(curlun)) {
+ p = "(error)";
+ if (pathbuf) {
+ p = d_path(&curlun->filp->f_path,
+ pathbuf, PATH_MAX);
+ if (IS_ERR(p))
+ p = "(error)";
+ }
+ }
+ LINFO(curlun, "LUN: %s%s%sfile: %s\n",
+ curlun->removable ? "removable " : "",
+ curlun->ro ? "read only " : "",
+ curlun->cdrom ? "CD-ROM " : "",
+ p);
+ }
+ kfree(pathbuf);
+
+ DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
+
+ wake_up_process(common->thread_task);
+
+ return common;
+
+
+error_luns:
+ common->nluns = i + 1;
+error_release:
+ common->state = FSG_STATE_TERMINATED; /* The thread is dead */
+ /* Call fsg_common_release() directly, ref might be not
+ * initialised */
+ fsg_common_release(&common->ref);
+ complete(&common->thread_notifier);
+ return ERR_PTR(rc);
+}
+
+
+static void fsg_common_release(struct kref *ref)
+{
+ struct fsg_common *common =
+ container_of(ref, struct fsg_common, ref);
+ unsigned i = common->nluns;
+ struct fsg_lun *lun = common->luns;
+
+ /* If the thread isn't already dead, tell it to exit now */
+ if (common->state != FSG_STATE_TERMINATED) {
+ raise_exception(common, FSG_STATE_EXIT);
+ wait_for_completion(&common->thread_notifier);
+
+ /* The cleanup routine waits for this completion also */
+ complete(&common->thread_notifier);
+ }
+
+ /* Beware tempting for -> do-while optimization: when in error
+ * recovery nluns may be zero. */
+
+ for (; i; --i, ++lun) {
+ device_remove_file(&lun->dev, &dev_attr_ro);
+ device_remove_file(&lun->dev, &dev_attr_file);
+ fsg_lun_close(lun);
+ device_unregister(&lun->dev);
+ }
+
+ kfree(common->luns);
+ if (common->free_storage_on_release)
+ kfree(common);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct fsg_dev *fsg = fsg_from_func(f);
+
+ DBG(fsg, "unbind\n");
+ fsg_common_put(fsg->common);
+ kfree(fsg);
+}
+
+
+static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct fsg_dev *fsg = fsg_from_func(f);
+ struct usb_gadget *gadget = c->cdev->gadget;
+ int rc;
+ int i;
+ struct usb_ep *ep;
+
+ fsg->gadget = gadget;
+
+ /* New interface */
+ i = usb_interface_id(c, f);
+ if (i < 0)
+ return i;
+ fsg_intf_desc.bInterfaceNumber = i;
+ fsg->interface_number = i;
+
+ /* Find all the endpoints we will use */
+ ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
+ if (!ep)
+ goto autoconf_fail;
+ ep->driver_data = fsg->common; /* claim the endpoint */
+ fsg->bulk_in = ep;
+
+ ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
+ if (!ep)
+ goto autoconf_fail;
+ ep->driver_data = fsg->common; /* claim the endpoint */
+ fsg->bulk_out = ep;
+
+ if (gadget_is_dualspeed(gadget)) {
+ /* Assume endpoint addresses are the same for both speeds */
+ fsg_hs_bulk_in_desc.bEndpointAddress =
+ fsg_fs_bulk_in_desc.bEndpointAddress;
+ fsg_hs_bulk_out_desc.bEndpointAddress =
+ fsg_fs_bulk_out_desc.bEndpointAddress;
+ f->hs_descriptors = fsg_hs_function;
+ }
+
+ return 0;
+
+autoconf_fail:
+ ERROR(fsg, "unable to autoconfigure all endpoints\n");
+ rc = -ENOTSUPP;
+ fsg_unbind(c, f);
+ return rc;
+}
+
+
+/****************************** ADD FUNCTION ******************************/
+
+static struct usb_gadget_strings *fsg_strings_array[] = {
+ &fsg_stringtab,
+ NULL,
+};
+
+static int fsg_add(struct usb_composite_dev *cdev,
+ struct usb_configuration *c,
+ struct fsg_common *common)
+{
+ struct fsg_dev *fsg;
+ int rc;
+
+ fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+ if (unlikely(!fsg))
+ return -ENOMEM;
+
+ fsg->function.name = FSG_DRIVER_DESC;
+ fsg->function.strings = fsg_strings_array;
+ fsg->function.descriptors = fsg_fs_function;
+ fsg->function.bind = fsg_bind;
+ fsg->function.unbind = fsg_unbind;
+ fsg->function.setup = fsg_setup;
+ fsg->function.set_alt = fsg_set_alt;
+ fsg->function.disable = fsg_disable;
+
+ fsg->common = common;
+ /* Our caller holds a reference to common structure so we
+ * don't have to be worry about it being freed until we return
+ * from this function. So instead of incrementing counter now
+ * and decrement in error recovery we increment it only when
+ * call to usb_add_function() was successful. */
+
+ rc = usb_add_function(c, &fsg->function);
+
+ if (likely(rc == 0))
+ fsg_common_get(fsg->common);
+ else
+ kfree(fsg);
+
+ return rc;
+}
+
+
+
+/************************* Module parameters *************************/
+
+
+struct fsg_module_parameters {
+ char *file[FSG_MAX_LUNS];
+ int ro[FSG_MAX_LUNS];
+ int removable[FSG_MAX_LUNS];
+ int cdrom[FSG_MAX_LUNS];
+
+ unsigned int file_count, ro_count, removable_count, cdrom_count;
+ unsigned int luns; /* nluns */
+ int stall; /* can_stall */
+};
+
+
+#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
+ module_param_array_named(prefix ## name, params.name, type, \
+ &prefix ## params.name ## _count, \
+ S_IRUGO); \
+ MODULE_PARM_DESC(prefix ## name, desc)
+
+#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
+ module_param_named(prefix ## name, params.name, type, \
+ S_IRUGO); \
+ MODULE_PARM_DESC(prefix ## name, desc)
+
+#define FSG_MODULE_PARAMETERS(prefix, params) \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
+ "names of backing files or devices"); \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
+ "true to force read-only"); \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
+ "true to simulate removable media"); \
+ _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
+ "true to simulate CD-ROM instead of disk"); \
+ _FSG_MODULE_PARAM(prefix, params, luns, uint, \
+ "number of LUNs"); \
+ _FSG_MODULE_PARAM(prefix, params, stall, bool, \
+ "false to prevent bulk stalls")
+
+
+static void
+fsg_config_from_params(struct fsg_config *cfg,
+ const struct fsg_module_parameters *params)
+{
+ struct fsg_lun_config *lun;
+ unsigned i;
+
+ /* Configure LUNs */
+ cfg->nluns =
+ min(params->luns ?: (params->file_count ?: 1u),
+ (unsigned)FSG_MAX_LUNS);
+ for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
+ lun->ro = !!params->ro[i];
+ lun->cdrom = !!params->cdrom[i];
+ lun->removable = /* Removable by default */
+ params->removable_count <= i || params->removable[i];
+ lun->filename =
+ params->file_count > i && params->file[i][0]
+ ? params->file[i]
+ : 0;
+ }
+
+ /* Let MSF use defaults */
+ cfg->lun_name_format = 0;
+ cfg->thread_name = 0;
+ cfg->vendor_name = 0;
+ cfg->product_name = 0;
+ cfg->release = 0xffff;
+
+ cfg->thread_exits = 0;
+ cfg->private_data = 0;
+
+ /* Finalise */
+ cfg->can_stall = params->stall;
+}
+
+static inline struct fsg_common *
+fsg_common_from_params(struct fsg_common *common,
+ struct usb_composite_dev *cdev,
+ const struct fsg_module_parameters *params)
+ __attribute__((unused));
+static inline struct fsg_common *
+fsg_common_from_params(struct fsg_common *common,
+ struct usb_composite_dev *cdev,
+ const struct fsg_module_parameters *params)
+{
+ struct fsg_config cfg;
+ fsg_config_from_params(&cfg, params);
+ return fsg_common_init(common, cdev, &cfg);
+}
+
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index c9966cc07d3..95dae4c1ea4 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -4,6 +4,8 @@
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -149,8 +151,8 @@ static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor __initdata = {
.bDataInterface = 0x01,
};
-static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
- .bLength = sizeof acm_descriptor,
+static struct usb_cdc_acm_descriptor rndis_acm_descriptor __initdata = {
+ .bLength = sizeof rndis_acm_descriptor,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
@@ -179,6 +181,20 @@ static struct usb_interface_descriptor rndis_data_intf __initdata = {
/* .iInterface = DYNAMIC */
};
+
+static struct usb_interface_assoc_descriptor
+rndis_iad_descriptor = {
+ .bLength = sizeof rndis_iad_descriptor,
+ .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
+
+ .bFirstInterface = 0, /* XXX, hardcoded */
+ .bInterfaceCount = 2, // control + data
+ .bFunctionClass = USB_CLASS_COMM,
+ .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ /* .iFunction = DYNAMIC */
+};
+
/* full speed support: */
static struct usb_endpoint_descriptor fs_notify_desc __initdata = {
@@ -208,11 +224,12 @@ static struct usb_endpoint_descriptor fs_out_desc __initdata = {
};
static struct usb_descriptor_header *eth_fs_function[] __initdata = {
+ (struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
(struct usb_descriptor_header *) &header_desc,
(struct usb_descriptor_header *) &call_mgmt_descriptor,
- (struct usb_descriptor_header *) &acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_acm_descriptor,
(struct usb_descriptor_header *) &rndis_union_desc,
(struct usb_descriptor_header *) &fs_notify_desc,
/* data interface has no altsetting */
@@ -252,11 +269,12 @@ static struct usb_endpoint_descriptor hs_out_desc __initdata = {
};
static struct usb_descriptor_header *eth_hs_function[] __initdata = {
+ (struct usb_descriptor_header *) &rndis_iad_descriptor,
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
(struct usb_descriptor_header *) &header_desc,
(struct usb_descriptor_header *) &call_mgmt_descriptor,
- (struct usb_descriptor_header *) &acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_acm_descriptor,
(struct usb_descriptor_header *) &rndis_union_desc,
(struct usb_descriptor_header *) &hs_notify_desc,
/* data interface has no altsetting */
@@ -271,6 +289,7 @@ static struct usb_descriptor_header *eth_hs_function[] __initdata = {
static struct usb_string rndis_string_defs[] = {
[0].s = "RNDIS Communications Control",
[1].s = "RNDIS Ethernet Data",
+ [2].s = "RNDIS",
{ } /* end of list */
};
@@ -587,6 +606,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
if (status < 0)
goto fail;
rndis->ctrl_id = status;
+ rndis_iad_descriptor.bFirstInterface = status;
rndis_control_intf.bInterfaceNumber = status;
rndis_union_desc.bMasterInterface0 = status;
@@ -798,6 +818,13 @@ int __init rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
return status;
rndis_string_defs[1].id = status;
rndis_data_intf.iInterface = status;
+
+ /* IAD iFunction label */
+ status = usb_string_id(c->cdev);
+ if (status < 0)
+ return status;
+ rndis_string_defs[2].id = status;
+ rndis_iad_descriptor.iFunction = status;
}
/* allocate and initialize one new instance */
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 1e6aa504d58..29dfb0277ff 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -248,8 +248,6 @@
#include <linux/freezer.h>
#include <linux/utsname.h>
-#include <asm/unaligned.h>
-
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -274,21 +272,20 @@
#define DRIVER_NAME "g_file_storage"
#define DRIVER_VERSION "20 November 2008"
-static const char longname[] = DRIVER_DESC;
-static const char shortname[] = DRIVER_NAME;
+static char fsg_string_manufacturer[64];
+static const char fsg_string_product[] = DRIVER_DESC;
+static char fsg_string_serial[13];
+static const char fsg_string_config[] = "Self-powered";
+static const char fsg_string_interface[] = "Mass Storage";
+
+
+#include "storage_common.c"
+
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Alan Stern");
MODULE_LICENSE("Dual BSD/GPL");
-/* Thanks to NetChip Technologies for donating this product ID.
- *
- * DO NOT REUSE THESE IDs with any other driver!! Ever!!
- * Instead: allocate your own, using normal USB-IF procedures. */
-#define DRIVER_VENDOR_ID 0x0525 // NetChip
-#define DRIVER_PRODUCT_ID 0xa4a5 // Linux-USB File-backed Storage Gadget
-
-
/*
* This driver assumes self-powered hardware and has no way for users to
* trigger remote wakeup. It uses autoconfiguration to select endpoints
@@ -298,54 +295,12 @@ MODULE_LICENSE("Dual BSD/GPL");
/*-------------------------------------------------------------------------*/
-#define LDBG(lun,fmt,args...) \
- dev_dbg(&(lun)->dev , fmt , ## args)
-#define MDBG(fmt,args...) \
- pr_debug(DRIVER_NAME ": " fmt , ## args)
-
-#ifndef DEBUG
-#undef VERBOSE_DEBUG
-#undef DUMP_MSGS
-#endif /* !DEBUG */
-
-#ifdef VERBOSE_DEBUG
-#define VLDBG LDBG
-#else
-#define VLDBG(lun,fmt,args...) \
- do { } while (0)
-#endif /* VERBOSE_DEBUG */
-
-#define LERROR(lun,fmt,args...) \
- dev_err(&(lun)->dev , fmt , ## args)
-#define LWARN(lun,fmt,args...) \
- dev_warn(&(lun)->dev , fmt , ## args)
-#define LINFO(lun,fmt,args...) \
- dev_info(&(lun)->dev , fmt , ## args)
-
-#define MINFO(fmt,args...) \
- pr_info(DRIVER_NAME ": " fmt , ## args)
-
-#define DBG(d, fmt, args...) \
- dev_dbg(&(d)->gadget->dev , fmt , ## args)
-#define VDBG(d, fmt, args...) \
- dev_vdbg(&(d)->gadget->dev , fmt , ## args)
-#define ERROR(d, fmt, args...) \
- dev_err(&(d)->gadget->dev , fmt , ## args)
-#define WARNING(d, fmt, args...) \
- dev_warn(&(d)->gadget->dev , fmt , ## args)
-#define INFO(d, fmt, args...) \
- dev_info(&(d)->gadget->dev , fmt , ## args)
-
-
-/*-------------------------------------------------------------------------*/
/* Encapsulate the module parameter settings */
-#define MAX_LUNS 8
-
static struct {
- char *file[MAX_LUNS];
- int ro[MAX_LUNS];
+ char *file[FSG_MAX_LUNS];
+ int ro[FSG_MAX_LUNS];
unsigned int num_filenames;
unsigned int num_ros;
unsigned int nluns;
@@ -372,8 +327,8 @@ static struct {
.removable = 0,
.can_stall = 1,
.cdrom = 0,
- .vendor = DRIVER_VENDOR_ID,
- .product = DRIVER_PRODUCT_ID,
+ .vendor = FSG_VENDOR_ID,
+ .product = FSG_PRODUCT_ID,
.release = 0xffff, // Use controller chip type
.buflen = 16384,
};
@@ -425,125 +380,6 @@ MODULE_PARM_DESC(buflen, "I/O buffer size");
#endif /* CONFIG_USB_FILE_STORAGE_TEST */
-/*-------------------------------------------------------------------------*/
-
-/* SCSI device types */
-#define TYPE_DISK 0x00
-#define TYPE_CDROM 0x05
-
-/* USB protocol value = the transport method */
-#define USB_PR_CBI 0x00 // Control/Bulk/Interrupt
-#define USB_PR_CB 0x01 // Control/Bulk w/o interrupt
-#define USB_PR_BULK 0x50 // Bulk-only
-
-/* USB subclass value = the protocol encapsulation */
-#define USB_SC_RBC 0x01 // Reduced Block Commands (flash)
-#define USB_SC_8020 0x02 // SFF-8020i, MMC-2, ATAPI (CD-ROM)
-#define USB_SC_QIC 0x03 // QIC-157 (tape)
-#define USB_SC_UFI 0x04 // UFI (floppy)
-#define USB_SC_8070 0x05 // SFF-8070i (removable)
-#define USB_SC_SCSI 0x06 // Transparent SCSI
-
-/* Bulk-only data structures */
-
-/* Command Block Wrapper */
-struct bulk_cb_wrap {
- __le32 Signature; // Contains 'USBC'
- u32 Tag; // Unique per command id
- __le32 DataTransferLength; // Size of the data
- u8 Flags; // Direction in bit 7
- u8 Lun; // LUN (normally 0)
- u8 Length; // Of the CDB, <= MAX_COMMAND_SIZE
- u8 CDB[16]; // Command Data Block
-};
-
-#define USB_BULK_CB_WRAP_LEN 31
-#define USB_BULK_CB_SIG 0x43425355 // Spells out USBC
-#define USB_BULK_IN_FLAG 0x80
-
-/* Command Status Wrapper */
-struct bulk_cs_wrap {
- __le32 Signature; // Should = 'USBS'
- u32 Tag; // Same as original command
- __le32 Residue; // Amount not transferred
- u8 Status; // See below
-};
-
-#define USB_BULK_CS_WRAP_LEN 13
-#define USB_BULK_CS_SIG 0x53425355 // Spells out 'USBS'
-#define USB_STATUS_PASS 0
-#define USB_STATUS_FAIL 1
-#define USB_STATUS_PHASE_ERROR 2
-
-/* Bulk-only class specific requests */
-#define USB_BULK_RESET_REQUEST 0xff
-#define USB_BULK_GET_MAX_LUN_REQUEST 0xfe
-
-
-/* CBI Interrupt data structure */
-struct interrupt_data {
- u8 bType;
- u8 bValue;
-};
-
-#define CBI_INTERRUPT_DATA_LEN 2
-
-/* CBI Accept Device-Specific Command request */
-#define USB_CBI_ADSC_REQUEST 0x00
-
-
-#define MAX_COMMAND_SIZE 16 // Length of a SCSI Command Data Block
-
-/* SCSI commands that we recognize */
-#define SC_FORMAT_UNIT 0x04
-#define SC_INQUIRY 0x12
-#define SC_MODE_SELECT_6 0x15
-#define SC_MODE_SELECT_10 0x55
-#define SC_MODE_SENSE_6 0x1a
-#define SC_MODE_SENSE_10 0x5a
-#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
-#define SC_READ_6 0x08
-#define SC_READ_10 0x28
-#define SC_READ_12 0xa8
-#define SC_READ_CAPACITY 0x25
-#define SC_READ_FORMAT_CAPACITIES 0x23
-#define SC_READ_HEADER 0x44
-#define SC_READ_TOC 0x43
-#define SC_RELEASE 0x17
-#define SC_REQUEST_SENSE 0x03
-#define SC_RESERVE 0x16
-#define SC_SEND_DIAGNOSTIC 0x1d
-#define SC_START_STOP_UNIT 0x1b
-#define SC_SYNCHRONIZE_CACHE 0x35
-#define SC_TEST_UNIT_READY 0x00
-#define SC_VERIFY 0x2f
-#define SC_WRITE_6 0x0a
-#define SC_WRITE_10 0x2a
-#define SC_WRITE_12 0xaa
-
-/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
-#define SS_NO_SENSE 0
-#define SS_COMMUNICATION_FAILURE 0x040800
-#define SS_INVALID_COMMAND 0x052000
-#define SS_INVALID_FIELD_IN_CDB 0x052400
-#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
-#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
-#define SS_MEDIUM_NOT_PRESENT 0x023a00
-#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
-#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
-#define SS_RESET_OCCURRED 0x062900
-#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
-#define SS_UNRECOVERED_READ_ERROR 0x031100
-#define SS_WRITE_ERROR 0x030c02
-#define SS_WRITE_PROTECTED 0x072700
-
-#define SK(x) ((u8) ((x) >> 16)) // Sense Key byte, etc.
-#define ASC(x) ((u8) ((x) >> 8))
-#define ASCQ(x) ((u8) (x))
-
-
-/*-------------------------------------------------------------------------*/
-
/*
* These definitions will permit the compiler to avoid generating code for
* parts of the driver that aren't used in the non-TEST version. Even gcc
@@ -566,81 +402,8 @@ struct interrupt_data {
#endif /* CONFIG_USB_FILE_STORAGE_TEST */
-struct lun {
- struct file *filp;
- loff_t file_length;
- loff_t num_sectors;
-
- unsigned int ro : 1;
- unsigned int prevent_medium_removal : 1;
- unsigned int registered : 1;
- unsigned int info_valid : 1;
-
- u32 sense_data;
- u32 sense_data_info;
- u32 unit_attention_data;
-
- struct device dev;
-};
-
-#define backing_file_is_open(curlun) ((curlun)->filp != NULL)
-
-static struct lun *dev_to_lun(struct device *dev)
-{
- return container_of(dev, struct lun, dev);
-}
-
-
-/* Big enough to hold our biggest descriptor */
-#define EP0_BUFSIZE 256
-#define DELAYED_STATUS (EP0_BUFSIZE + 999) // An impossibly large value
-
-/* Number of buffers we will use. 2 is enough for double-buffering */
-#define NUM_BUFFERS 2
-
-enum fsg_buffer_state {
- BUF_STATE_EMPTY = 0,
- BUF_STATE_FULL,
- BUF_STATE_BUSY
-};
-
-struct fsg_buffhd {
- void *buf;
- enum fsg_buffer_state state;
- struct fsg_buffhd *next;
-
- /* The NetChip 2280 is faster, and handles some protocol faults
- * better, if we don't submit any short bulk-out read requests.
- * So we will record the intended request length here. */
- unsigned int bulk_out_intended_length;
-
- struct usb_request *inreq;
- int inreq_busy;
- struct usb_request *outreq;
- int outreq_busy;
-};
-
-enum fsg_state {
- FSG_STATE_COMMAND_PHASE = -10, // This one isn't used anywhere
- FSG_STATE_DATA_PHASE,
- FSG_STATE_STATUS_PHASE,
-
- FSG_STATE_IDLE = 0,
- FSG_STATE_ABORT_BULK_OUT,
- FSG_STATE_RESET,
- FSG_STATE_INTERFACE_CHANGE,
- FSG_STATE_CONFIG_CHANGE,
- FSG_STATE_DISCONNECT,
- FSG_STATE_EXIT,
- FSG_STATE_TERMINATED
-};
+/*-------------------------------------------------------------------------*/
-enum data_direction {
- DATA_DIR_UNKNOWN = 0,
- DATA_DIR_FROM_HOST,
- DATA_DIR_TO_HOST,
- DATA_DIR_NONE
-};
struct fsg_dev {
/* lock protects: state, all the req_busy's, and cbbuf_cmnd */
@@ -662,7 +425,7 @@ struct fsg_dev {
int intreq_busy;
struct fsg_buffhd *intr_buffhd;
- unsigned int bulk_out_maxpacket;
+ unsigned int bulk_out_maxpacket;
enum fsg_state state; // For exception handling
unsigned int exception_req_tag;
@@ -687,7 +450,7 @@ struct fsg_dev {
struct fsg_buffhd *next_buffhd_to_fill;
struct fsg_buffhd *next_buffhd_to_drain;
- struct fsg_buffhd buffhds[NUM_BUFFERS];
+ struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
int thread_wakeup_needed;
struct completion thread_notifier;
@@ -712,8 +475,8 @@ struct fsg_dev {
u8 cbbuf_cmnd[MAX_COMMAND_SIZE];
unsigned int nluns;
- struct lun *luns;
- struct lun *curlun;
+ struct fsg_lun *luns;
+ struct fsg_lun *curlun;
};
typedef void (*fsg_routine_t)(struct fsg_dev *);
@@ -739,49 +502,9 @@ static void set_bulk_out_req_length(struct fsg_dev *fsg,
static struct fsg_dev *the_fsg;
static struct usb_gadget_driver fsg_driver;
-static void close_backing_file(struct lun *curlun);
-
/*-------------------------------------------------------------------------*/
-#ifdef DUMP_MSGS
-
-static void dump_msg(struct fsg_dev *fsg, const char *label,
- const u8 *buf, unsigned int length)
-{
- if (length < 512) {
- DBG(fsg, "%s, length %u:\n", label, length);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
- 16, 1, buf, length, 0);
- }
-}
-
-static void dump_cdb(struct fsg_dev *fsg)
-{}
-
-#else
-
-static void dump_msg(struct fsg_dev *fsg, const char *label,
- const u8 *buf, unsigned int length)
-{}
-
-#ifdef VERBOSE_DEBUG
-
-static void dump_cdb(struct fsg_dev *fsg)
-{
- print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
- 16, 1, fsg->cmnd, fsg->cmnd_size, 0);
-}
-
-#else
-
-static void dump_cdb(struct fsg_dev *fsg)
-{}
-
-#endif /* VERBOSE_DEBUG */
-#endif /* DUMP_MSGS */
-
-
static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
{
const char *name;
@@ -799,26 +522,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
/*-------------------------------------------------------------------------*/
-/* Routines for unaligned data access */
-
-static u32 get_unaligned_be24(u8 *buf)
-{
- return 0xffffff & (u32) get_unaligned_be32(buf - 1);
-}
-
-
-/*-------------------------------------------------------------------------*/
-
/*
* DESCRIPTORS ... most are static, but strings and (full) configuration
* descriptors are built on demand. Also the (static) config and interface
* descriptors are adjusted during fsg_bind().
*/
-#define STRING_MANUFACTURER 1
-#define STRING_PRODUCT 2
-#define STRING_SERIAL 3
-#define STRING_CONFIG 4
-#define STRING_INTERFACE 5
/* There is only one configuration. */
#define CONFIG_VALUE 1
@@ -832,13 +540,13 @@ device_desc = {
.bDeviceClass = USB_CLASS_PER_INTERFACE,
/* The next three values can be overridden by module parameters */
- .idVendor = cpu_to_le16(DRIVER_VENDOR_ID),
- .idProduct = cpu_to_le16(DRIVER_PRODUCT_ID),
+ .idVendor = cpu_to_le16(FSG_VENDOR_ID),
+ .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
.bcdDevice = cpu_to_le16(0xffff),
- .iManufacturer = STRING_MANUFACTURER,
- .iProduct = STRING_PRODUCT,
- .iSerialNumber = STRING_SERIAL,
+ .iManufacturer = FSG_STRING_MANUFACTURER,
+ .iProduct = FSG_STRING_PRODUCT,
+ .iSerialNumber = FSG_STRING_SERIAL,
.bNumConfigurations = 1,
};
@@ -850,86 +558,12 @@ config_desc = {
/* wTotalLength computed by usb_gadget_config_buf() */
.bNumInterfaces = 1,
.bConfigurationValue = CONFIG_VALUE,
- .iConfiguration = STRING_CONFIG,
+ .iConfiguration = FSG_STRING_CONFIG,
.bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
.bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
};
-static struct usb_otg_descriptor
-otg_desc = {
- .bLength = sizeof(otg_desc),
- .bDescriptorType = USB_DT_OTG,
-
- .bmAttributes = USB_OTG_SRP,
-};
-
-/* There is only one interface. */
-static struct usb_interface_descriptor
-intf_desc = {
- .bLength = sizeof intf_desc,
- .bDescriptorType = USB_DT_INTERFACE,
-
- .bNumEndpoints = 2, // Adjusted during fsg_bind()
- .bInterfaceClass = USB_CLASS_MASS_STORAGE,
- .bInterfaceSubClass = USB_SC_SCSI, // Adjusted during fsg_bind()
- .bInterfaceProtocol = USB_PR_BULK, // Adjusted during fsg_bind()
- .iInterface = STRING_INTERFACE,
-};
-
-/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
- * and interrupt-in. */
-
-static struct usb_endpoint_descriptor
-fs_bulk_in_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
-
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- /* wMaxPacketSize set by autoconfiguration */
-};
-
-static struct usb_endpoint_descriptor
-fs_bulk_out_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
-
- .bEndpointAddress = USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- /* wMaxPacketSize set by autoconfiguration */
-};
-
-static struct usb_endpoint_descriptor
-fs_intr_in_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
-
- .bEndpointAddress = USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
- .wMaxPacketSize = cpu_to_le16(2),
- .bInterval = 32, // frames -> 32 ms
-};
-
-static const struct usb_descriptor_header *fs_function[] = {
- (struct usb_descriptor_header *) &otg_desc,
- (struct usb_descriptor_header *) &intf_desc,
- (struct usb_descriptor_header *) &fs_bulk_in_desc,
- (struct usb_descriptor_header *) &fs_bulk_out_desc,
- (struct usb_descriptor_header *) &fs_intr_in_desc,
- NULL,
-};
-#define FS_FUNCTION_PRE_EP_ENTRIES 2
-
-
-/*
- * USB 2.0 devices need to expose both high speed and full speed
- * descriptors, unless they only run at full speed.
- *
- * That means alternate endpoint descriptors (bigger packets)
- * and a "device qualifier" ... plus more construction options
- * for the config descriptor.
- */
static struct usb_qualifier_descriptor
dev_qualifier = {
.bLength = sizeof dev_qualifier,
@@ -941,78 +575,6 @@ dev_qualifier = {
.bNumConfigurations = 1,
};
-static struct usb_endpoint_descriptor
-hs_bulk_in_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
-
- /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(512),
-};
-
-static struct usb_endpoint_descriptor
-hs_bulk_out_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
-
- /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = cpu_to_le16(512),
- .bInterval = 1, // NAK every 1 uframe
-};
-
-static struct usb_endpoint_descriptor
-hs_intr_in_desc = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
-
- /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
- .bmAttributes = USB_ENDPOINT_XFER_INT,
- .wMaxPacketSize = cpu_to_le16(2),
- .bInterval = 9, // 2**(9-1) = 256 uframes -> 32 ms
-};
-
-static const struct usb_descriptor_header *hs_function[] = {
- (struct usb_descriptor_header *) &otg_desc,
- (struct usb_descriptor_header *) &intf_desc,
- (struct usb_descriptor_header *) &hs_bulk_in_desc,
- (struct usb_descriptor_header *) &hs_bulk_out_desc,
- (struct usb_descriptor_header *) &hs_intr_in_desc,
- NULL,
-};
-#define HS_FUNCTION_PRE_EP_ENTRIES 2
-
-/* Maxpacket and other transfer characteristics vary by speed. */
-static struct usb_endpoint_descriptor *
-ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
- struct usb_endpoint_descriptor *hs)
-{
- if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
- return hs;
- return fs;
-}
-
-
-/* The CBI specification limits the serial string to 12 uppercase hexadecimal
- * characters. */
-static char manufacturer[64];
-static char serial[13];
-
-/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
-static struct usb_string strings[] = {
- {STRING_MANUFACTURER, manufacturer},
- {STRING_PRODUCT, longname},
- {STRING_SERIAL, serial},
- {STRING_CONFIG, "Self-powered"},
- {STRING_INTERFACE, "Mass Storage"},
- {}
-};
-
-static struct usb_gadget_strings stringtab = {
- .language = 0x0409, // en-us
- .strings = strings,
-};
/*
@@ -1032,10 +594,9 @@ static int populate_config_buf(struct usb_gadget *gadget,
if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
- if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH)
- function = hs_function;
- else
- function = fs_function;
+ function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH
+ ? (const struct usb_descriptor_header **)fsg_hs_function
+ : (const struct usb_descriptor_header **)fsg_fs_function;
/* for now, don't advertise srp-only devices */
if (!gadget_is_otg(gadget))
@@ -1386,7 +947,7 @@ get_config:
VDBG(fsg, "get string descriptor\n");
/* wIndex == language code */
- value = usb_gadget_get_string(&stringtab,
+ value = usb_gadget_get_string(&fsg_stringtab,
w_value & 0xff, req->buf);
break;
}
@@ -1551,7 +1112,7 @@ static int sleep_thread(struct fsg_dev *fsg)
static int do_read(struct fsg_dev *fsg)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
u32 lba;
struct fsg_buffhd *bh;
int rc;
@@ -1677,7 +1238,7 @@ static int do_read(struct fsg_dev *fsg)
static int do_write(struct fsg_dev *fsg)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
u32 lba;
struct fsg_buffhd *bh;
int get_some_more;
@@ -1713,7 +1274,7 @@ static int do_write(struct fsg_dev *fsg)
}
if (fsg->cmnd[1] & 0x08) { // FUA
spin_lock(&curlun->filp->f_lock);
- curlun->filp->f_flags |= O_SYNC;
+ curlun->filp->f_flags |= O_DSYNC;
spin_unlock(&curlun->filp->f_lock);
}
}
@@ -1864,33 +1425,14 @@ static int do_write(struct fsg_dev *fsg)
/*-------------------------------------------------------------------------*/
-/* Sync the file data, don't bother with the metadata.
- * This code was copied from fs/buffer.c:sys_fdatasync(). */
-static int fsync_sub(struct lun *curlun)
-{
- struct file *filp = curlun->filp;
-
- if (curlun->ro || !filp)
- return 0;
- return vfs_fsync(filp, filp->f_path.dentry, 1);
-}
-
-static void fsync_all(struct fsg_dev *fsg)
-{
- int i;
-
- for (i = 0; i < fsg->nluns; ++i)
- fsync_sub(&fsg->luns[i]);
-}
-
static int do_synchronize_cache(struct fsg_dev *fsg)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
int rc;
/* We ignore the requested LBA and write out all file's
* dirty data buffers. */
- rc = fsync_sub(curlun);
+ rc = fsg_lun_fsync_sub(curlun);
if (rc)
curlun->sense_data = SS_WRITE_ERROR;
return 0;
@@ -1899,7 +1441,7 @@ static int do_synchronize_cache(struct fsg_dev *fsg)
/*-------------------------------------------------------------------------*/
-static void invalidate_sub(struct lun *curlun)
+static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
struct inode *inode = filp->f_path.dentry->d_inode;
@@ -1911,7 +1453,7 @@ static void invalidate_sub(struct lun *curlun)
static int do_verify(struct fsg_dev *fsg)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
u32 lba;
u32 verification_length;
struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
@@ -1944,7 +1486,7 @@ static int do_verify(struct fsg_dev *fsg)
file_offset = ((loff_t) lba) << 9;
/* Write out all the dirty buffers before invalidating them */
- fsync_sub(curlun);
+ fsg_lun_fsync_sub(curlun);
if (signal_pending(current))
return -EINTR;
@@ -2041,7 +1583,7 @@ static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
u8 *buf = (u8 *) bh->buf;
u32 sd, sdinfo;
int valid;
@@ -2095,7 +1637,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
int pmi = fsg->cmnd[8];
u8 *buf = (u8 *) bh->buf;
@@ -2113,27 +1655,9 @@ static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
}
-static void store_cdrom_address(u8 *dest, int msf, u32 addr)
-{
- if (msf) {
- /* Convert to Minutes-Seconds-Frames */
- addr >>= 2; /* Convert to 2048-byte frames */
- addr += 2*75; /* Lead-in occupies 2 seconds */
- dest[3] = addr % 75; /* Frames */
- addr /= 75;
- dest[2] = addr % 60; /* Seconds */
- addr /= 60;
- dest[1] = addr; /* Minutes */
- dest[0] = 0; /* Reserved */
- } else {
- /* Absolute sector */
- put_unaligned_be32(addr, dest);
- }
-}
-
static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
int msf = fsg->cmnd[1] & 0x02;
u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
u8 *buf = (u8 *) bh->buf;
@@ -2156,7 +1680,7 @@ static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
int msf = fsg->cmnd[1] & 0x02;
int start_track = fsg->cmnd[6];
u8 *buf = (u8 *) bh->buf;
@@ -2184,7 +1708,7 @@ static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
int mscmnd = fsg->cmnd[0];
u8 *buf = (u8 *) bh->buf;
u8 *buf0 = buf;
@@ -2265,7 +1789,7 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
static int do_start_stop(struct fsg_dev *fsg)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
int loej, start;
if (!mod_data.removable) {
@@ -2295,7 +1819,7 @@ static int do_start_stop(struct fsg_dev *fsg)
if (loej) { // Simulate an unload/eject
up_read(&fsg->filesem);
down_write(&fsg->filesem);
- close_backing_file(curlun);
+ fsg_lun_close(curlun);
up_write(&fsg->filesem);
down_read(&fsg->filesem);
}
@@ -2303,7 +1827,7 @@ static int do_start_stop(struct fsg_dev *fsg)
/* Our emulation doesn't support mounting; the medium is
* available for use as soon as it is loaded. */
- if (!backing_file_is_open(curlun)) {
+ if (!fsg_lun_is_open(curlun)) {
curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
return -EINVAL;
}
@@ -2315,7 +1839,7 @@ static int do_start_stop(struct fsg_dev *fsg)
static int do_prevent_allow(struct fsg_dev *fsg)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
int prevent;
if (!mod_data.removable) {
@@ -2330,7 +1854,7 @@ static int do_prevent_allow(struct fsg_dev *fsg)
}
if (curlun->prevent_medium_removal && !prevent)
- fsync_sub(curlun);
+ fsg_lun_fsync_sub(curlun);
curlun->prevent_medium_removal = prevent;
return 0;
}
@@ -2339,7 +1863,7 @@ static int do_prevent_allow(struct fsg_dev *fsg)
static int do_read_format_capacities(struct fsg_dev *fsg,
struct fsg_buffhd *bh)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
u8 *buf = (u8 *) bh->buf;
buf[0] = buf[1] = buf[2] = 0;
@@ -2356,7 +1880,7 @@ static int do_read_format_capacities(struct fsg_dev *fsg,
static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
/* We don't support MODE SELECT */
curlun->sense_data = SS_INVALID_COMMAND;
@@ -2599,7 +2123,7 @@ static int finish_reply(struct fsg_dev *fsg)
static int send_status(struct fsg_dev *fsg)
{
- struct lun *curlun = fsg->curlun;
+ struct fsg_lun *curlun = fsg->curlun;
struct fsg_buffhd *bh;
int rc;
u8 status = USB_STATUS_PASS;
@@ -2691,7 +2215,7 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
int lun = fsg->cmnd[1] >> 5;
static const char dirletter[4] = {'u', 'o', 'i', 'n'};
char hdlen[20];
- struct lun *curlun;
+ struct fsg_lun *curlun;
/* Adjust the expected cmnd_size for protocol encapsulation padding.
* Transparent SCSI doesn't pad. */
@@ -2820,7 +2344,7 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
/* If the medium isn't mounted and the command needs to access
* it, return an error. */
- if (curlun && !backing_file_is_open(curlun) && needs_medium) {
+ if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
return -EINVAL;
}
@@ -3075,8 +2599,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
{
- struct usb_request *req = bh->outreq;
- struct bulk_cb_wrap *cbw = req->buf;
+ struct usb_request *req = bh->outreq;
+ struct fsg_bulk_cb_wrap *cbw = req->buf;
/* Was this a real packet? Should it be ignored? */
if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
@@ -3105,7 +2629,7 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
}
/* Is the CBW meaningful? */
- if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
+ if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
"cmdlen %u\n",
@@ -3238,7 +2762,7 @@ static int do_set_interface(struct fsg_dev *fsg, int altsetting)
reset:
/* Deallocate the requests */
- for (i = 0; i < NUM_BUFFERS; ++i) {
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
struct fsg_buffhd *bh = &fsg->buffhds[i];
if (bh->inreq) {
@@ -3276,12 +2800,14 @@ reset:
DBG(fsg, "set interface %d\n", altsetting);
/* Enable the endpoints */
- d = ep_desc(fsg->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
+ d = fsg_ep_desc(fsg->gadget,
+ &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
goto reset;
fsg->bulk_in_enabled = 1;
- d = ep_desc(fsg->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
+ d = fsg_ep_desc(fsg->gadget,
+ &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
goto reset;
fsg->bulk_out_enabled = 1;
@@ -3289,14 +2815,15 @@ reset:
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
if (transport_is_cbi()) {
- d = ep_desc(fsg->gadget, &fs_intr_in_desc, &hs_intr_in_desc);
+ d = fsg_ep_desc(fsg->gadget,
+ &fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc);
if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
goto reset;
fsg->intr_in_enabled = 1;
}
/* Allocate the requests */
- for (i = 0; i < NUM_BUFFERS; ++i) {
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
struct fsg_buffhd *bh = &fsg->buffhds[i];
if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
@@ -3372,7 +2899,7 @@ static void handle_exception(struct fsg_dev *fsg)
struct fsg_buffhd *bh;
enum fsg_state old_state;
u8 new_config;
- struct lun *curlun;
+ struct fsg_lun *curlun;
unsigned int exception_req_tag;
int rc;
@@ -3392,7 +2919,7 @@ static void handle_exception(struct fsg_dev *fsg)
/* Cancel all the pending transfers */
if (fsg->intreq_busy)
usb_ep_dequeue(fsg->intr_in, fsg->intreq);
- for (i = 0; i < NUM_BUFFERS; ++i) {
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
bh = &fsg->buffhds[i];
if (bh->inreq_busy)
usb_ep_dequeue(fsg->bulk_in, bh->inreq);
@@ -3403,7 +2930,7 @@ static void handle_exception(struct fsg_dev *fsg)
/* Wait until everything is idle */
for (;;) {
num_active = fsg->intreq_busy;
- for (i = 0; i < NUM_BUFFERS; ++i) {
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
bh = &fsg->buffhds[i];
num_active += bh->inreq_busy + bh->outreq_busy;
}
@@ -3425,7 +2952,7 @@ static void handle_exception(struct fsg_dev *fsg)
* state, and the exception. Then invoke the handler. */
spin_lock_irq(&fsg->lock);
- for (i = 0; i < NUM_BUFFERS; ++i) {
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
bh = &fsg->buffhds[i];
bh->state = BUF_STATE_EMPTY;
}
@@ -3506,7 +3033,8 @@ static void handle_exception(struct fsg_dev *fsg)
break;
case FSG_STATE_DISCONNECT:
- fsync_all(fsg);
+ for (i = 0; i < fsg->nluns; ++i)
+ fsg_lun_fsync_sub(fsg->luns + i);
do_set_config(fsg, 0); // Unconfigured state
break;
@@ -3595,201 +3123,10 @@ static int fsg_main_thread(void *fsg_)
/*-------------------------------------------------------------------------*/
-/* If the next two routines are called while the gadget is registered,
- * the caller must own fsg->filesem for writing. */
-
-static int open_backing_file(struct lun *curlun, const char *filename)
-{
- int ro;
- struct file *filp = NULL;
- int rc = -EINVAL;
- struct inode *inode = NULL;
- loff_t size;
- loff_t num_sectors;
- loff_t min_sectors;
-
- /* R/W if we can, R/O if we must */
- ro = curlun->ro;
- if (!ro) {
- filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
- if (-EROFS == PTR_ERR(filp))
- ro = 1;
- }
- if (ro)
- filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
- if (IS_ERR(filp)) {
- LINFO(curlun, "unable to open backing file: %s\n", filename);
- return PTR_ERR(filp);
- }
-
- if (!(filp->f_mode & FMODE_WRITE))
- ro = 1;
-
- if (filp->f_path.dentry)
- inode = filp->f_path.dentry->d_inode;
- if (inode && S_ISBLK(inode->i_mode)) {
- if (bdev_read_only(inode->i_bdev))
- ro = 1;
- } else if (!inode || !S_ISREG(inode->i_mode)) {
- LINFO(curlun, "invalid file type: %s\n", filename);
- goto out;
- }
-
- /* If we can't read the file, it's no good.
- * If we can't write the file, use it read-only. */
- if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
- LINFO(curlun, "file not readable: %s\n", filename);
- goto out;
- }
- if (!(filp->f_op->write || filp->f_op->aio_write))
- ro = 1;
-
- size = i_size_read(inode->i_mapping->host);
- if (size < 0) {
- LINFO(curlun, "unable to find file size: %s\n", filename);
- rc = (int) size;
- goto out;
- }
- num_sectors = size >> 9; // File size in 512-byte blocks
- min_sectors = 1;
- if (mod_data.cdrom) {
- num_sectors &= ~3; // Reduce to a multiple of 2048
- min_sectors = 300*4; // Smallest track is 300 frames
- if (num_sectors >= 256*60*75*4) {
- num_sectors = (256*60*75 - 1) * 4;
- LINFO(curlun, "file too big: %s\n", filename);
- LINFO(curlun, "using only first %d blocks\n",
- (int) num_sectors);
- }
- }
- if (num_sectors < min_sectors) {
- LINFO(curlun, "file too small: %s\n", filename);
- rc = -ETOOSMALL;
- goto out;
- }
-
- get_file(filp);
- curlun->ro = ro;
- curlun->filp = filp;
- curlun->file_length = size;
- curlun->num_sectors = num_sectors;
- LDBG(curlun, "open backing file: %s\n", filename);
- rc = 0;
-
-out:
- filp_close(filp, current->files);
- return rc;
-}
-
-
-static void close_backing_file(struct lun *curlun)
-{
- if (curlun->filp) {
- LDBG(curlun, "close backing file\n");
- fput(curlun->filp);
- curlun->filp = NULL;
- }
-}
-
-
-static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct lun *curlun = dev_to_lun(dev);
-
- return sprintf(buf, "%d\n", curlun->ro);
-}
-
-static ssize_t show_file(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct lun *curlun = dev_to_lun(dev);
- struct fsg_dev *fsg = dev_get_drvdata(dev);
- char *p;
- ssize_t rc;
-
- down_read(&fsg->filesem);
- if (backing_file_is_open(curlun)) { // Get the complete pathname
- p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
- if (IS_ERR(p))
- rc = PTR_ERR(p);
- else {
- rc = strlen(p);
- memmove(buf, p, rc);
- buf[rc] = '\n'; // Add a newline
- buf[++rc] = 0;
- }
- } else { // No file, return 0 bytes
- *buf = 0;
- rc = 0;
- }
- up_read(&fsg->filesem);
- return rc;
-}
-
-
-static ssize_t store_ro(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- ssize_t rc = count;
- struct lun *curlun = dev_to_lun(dev);
- struct fsg_dev *fsg = dev_get_drvdata(dev);
- int i;
-
- if (sscanf(buf, "%d", &i) != 1)
- return -EINVAL;
-
- /* Allow the write-enable status to change only while the backing file
- * is closed. */
- down_read(&fsg->filesem);
- if (backing_file_is_open(curlun)) {
- LDBG(curlun, "read-only status change prevented\n");
- rc = -EBUSY;
- } else {
- curlun->ro = !!i;
- LDBG(curlun, "read-only status set to %d\n", curlun->ro);
- }
- up_read(&fsg->filesem);
- return rc;
-}
-
-static ssize_t store_file(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct lun *curlun = dev_to_lun(dev);
- struct fsg_dev *fsg = dev_get_drvdata(dev);
- int rc = 0;
-
- if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
- LDBG(curlun, "eject attempt prevented\n");
- return -EBUSY; // "Door is locked"
- }
-
- /* Remove a trailing newline */
- if (count > 0 && buf[count-1] == '\n')
- ((char *) buf)[count-1] = 0; // Ugh!
-
- /* Eject current medium */
- down_write(&fsg->filesem);
- if (backing_file_is_open(curlun)) {
- close_backing_file(curlun);
- curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
- }
-
- /* Load new medium */
- if (count > 0 && buf[0]) {
- rc = open_backing_file(curlun, buf);
- if (rc == 0)
- curlun->unit_attention_data =
- SS_NOT_READY_TO_READY_TRANSITION;
- }
- up_write(&fsg->filesem);
- return (rc < 0 ? rc : count);
-}
-
/* The write permissions and store_xxx pointers are set in fsg_bind() */
-static DEVICE_ATTR(ro, 0444, show_ro, NULL);
-static DEVICE_ATTR(file, 0444, show_file, NULL);
+static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
+static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
/*-------------------------------------------------------------------------*/
@@ -3804,7 +3141,9 @@ static void fsg_release(struct kref *ref)
static void lun_release(struct device *dev)
{
- struct fsg_dev *fsg = dev_get_drvdata(dev);
+ struct rw_semaphore *filesem = dev_get_drvdata(dev);
+ struct fsg_dev *fsg =
+ container_of(filesem, struct fsg_dev, filesem);
kref_put(&fsg->ref, fsg_release);
}
@@ -3813,7 +3152,7 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
{
struct fsg_dev *fsg = get_gadget_data(gadget);
int i;
- struct lun *curlun;
+ struct fsg_lun *curlun;
struct usb_request *req = fsg->ep0req;
DBG(fsg, "unbind\n");
@@ -3825,7 +3164,7 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
if (curlun->registered) {
device_remove_file(&curlun->dev, &dev_attr_ro);
device_remove_file(&curlun->dev, &dev_attr_file);
- close_backing_file(curlun);
+ fsg_lun_close(curlun);
device_unregister(&curlun->dev);
curlun->registered = 0;
}
@@ -3841,7 +3180,7 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
}
/* Free the data buffers */
- for (i = 0; i < NUM_BUFFERS; ++i)
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i)
kfree(fsg->buffhds[i].buf);
/* Free the request and buffer for endpoint 0 */
@@ -3948,7 +3287,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
struct fsg_dev *fsg = the_fsg;
int rc;
int i;
- struct lun *curlun;
+ struct fsg_lun *curlun;
struct usb_ep *ep;
struct usb_request *req;
char *pathbuf, *p;
@@ -3963,10 +3302,10 @@ static int __init fsg_bind(struct usb_gadget *gadget)
if (mod_data.removable) { // Enable the store_xxx attributes
dev_attr_file.attr.mode = 0644;
- dev_attr_file.store = store_file;
+ dev_attr_file.store = fsg_store_file;
if (!mod_data.cdrom) {
dev_attr_ro.attr.mode = 0644;
- dev_attr_ro.store = store_ro;
+ dev_attr_ro.store = fsg_store_ro;
}
}
@@ -3974,7 +3313,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
i = mod_data.nluns;
if (i == 0)
i = max(mod_data.num_filenames, 1u);
- if (i > MAX_LUNS) {
+ if (i > FSG_MAX_LUNS) {
ERROR(fsg, "invalid number of LUNs: %d\n", i);
rc = -EINVAL;
goto out;
@@ -3982,7 +3321,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
/* Create the LUNs, open their backing files, and register the
* LUN devices in sysfs. */
- fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
+ fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
if (!fsg->luns) {
rc = -ENOMEM;
goto out;
@@ -3991,13 +3330,14 @@ static int __init fsg_bind(struct usb_gadget *gadget)
for (i = 0; i < fsg->nluns; ++i) {
curlun = &fsg->luns[i];
- curlun->ro = mod_data.ro[i];
- if (mod_data.cdrom)
- curlun->ro = 1;
+ curlun->cdrom = !!mod_data.cdrom;
+ curlun->ro = mod_data.cdrom || mod_data.ro[i];
+ curlun->initially_ro = curlun->ro;
+ curlun->removable = mod_data.removable;
curlun->dev.release = lun_release;
curlun->dev.parent = &gadget->dev;
curlun->dev.driver = &fsg_driver.driver;
- dev_set_drvdata(&curlun->dev, fsg);
+ dev_set_drvdata(&curlun->dev, &fsg->filesem);
dev_set_name(&curlun->dev,"%s-lun%d",
dev_name(&gadget->dev), i);
@@ -4016,7 +3356,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
kref_get(&fsg->ref);
if (mod_data.file[i] && *mod_data.file[i]) {
- if ((rc = open_backing_file(curlun,
+ if ((rc = fsg_lun_open(curlun,
mod_data.file[i])) != 0)
goto out;
} else if (!mod_data.removable) {
@@ -4028,20 +3368,20 @@ static int __init fsg_bind(struct usb_gadget *gadget)
/* Find all the endpoints we will use */
usb_ep_autoconfig_reset(gadget);
- ep = usb_ep_autoconfig(gadget, &fs_bulk_in_desc);
+ ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
if (!ep)
goto autoconf_fail;
ep->driver_data = fsg; // claim the endpoint
fsg->bulk_in = ep;
- ep = usb_ep_autoconfig(gadget, &fs_bulk_out_desc);
+ ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
if (!ep)
goto autoconf_fail;
ep->driver_data = fsg; // claim the endpoint
fsg->bulk_out = ep;
if (transport_is_cbi()) {
- ep = usb_ep_autoconfig(gadget, &fs_intr_in_desc);
+ ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc);
if (!ep)
goto autoconf_fail;
ep->driver_data = fsg; // claim the endpoint
@@ -4055,28 +3395,28 @@ static int __init fsg_bind(struct usb_gadget *gadget)
device_desc.bcdDevice = cpu_to_le16(mod_data.release);
i = (transport_is_cbi() ? 3 : 2); // Number of endpoints
- intf_desc.bNumEndpoints = i;
- intf_desc.bInterfaceSubClass = mod_data.protocol_type;
- intf_desc.bInterfaceProtocol = mod_data.transport_type;
- fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+ fsg_intf_desc.bNumEndpoints = i;
+ fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type;
+ fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type;
+ fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
if (gadget_is_dualspeed(gadget)) {
- hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
+ fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
/* Assume ep0 uses the same maxpacket value for both speeds */
dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
/* Assume endpoint addresses are the same for both speeds */
- hs_bulk_in_desc.bEndpointAddress =
- fs_bulk_in_desc.bEndpointAddress;
- hs_bulk_out_desc.bEndpointAddress =
- fs_bulk_out_desc.bEndpointAddress;
- hs_intr_in_desc.bEndpointAddress =
- fs_intr_in_desc.bEndpointAddress;
+ fsg_hs_bulk_in_desc.bEndpointAddress =
+ fsg_fs_bulk_in_desc.bEndpointAddress;
+ fsg_hs_bulk_out_desc.bEndpointAddress =
+ fsg_fs_bulk_out_desc.bEndpointAddress;
+ fsg_hs_intr_in_desc.bEndpointAddress =
+ fsg_fs_intr_in_desc.bEndpointAddress;
}
if (gadget_is_otg(gadget))
- otg_desc.bmAttributes |= USB_OTG_HNP;
+ fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
rc = -ENOMEM;
@@ -4090,7 +3430,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
req->complete = ep0_complete;
/* Allocate the data buffers */
- for (i = 0; i < NUM_BUFFERS; ++i) {
+ for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
struct fsg_buffhd *bh = &fsg->buffhds[i];
/* Allocate for the bulk-in endpoint. We assume that
@@ -4101,23 +3441,24 @@ static int __init fsg_bind(struct usb_gadget *gadget)
goto out;
bh->next = bh + 1;
}
- fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
+ fsg->buffhds[FSG_NUM_BUFFERS - 1].next = &fsg->buffhds[0];
/* This should reflect the actual gadget power source */
usb_gadget_set_selfpowered(gadget);
- snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+ snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
+ "%s %s with %s",
init_utsname()->sysname, init_utsname()->release,
gadget->name);
/* On a real device, serial[] would be loaded from permanent
* storage. We just encode it from the driver version string. */
- for (i = 0; i < sizeof(serial) - 2; i += 2) {
+ for (i = 0; i < sizeof fsg_string_serial - 2; i += 2) {
unsigned char c = DRIVER_VERSION[i / 2];
if (!c)
break;
- sprintf(&serial[i], "%02X", c);
+ sprintf(&fsg_string_serial[i], "%02X", c);
}
fsg->thread_task = kthread_create(fsg_main_thread, fsg,
@@ -4133,7 +3474,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
for (i = 0; i < fsg->nluns; ++i) {
curlun = &fsg->luns[i];
- if (backing_file_is_open(curlun)) {
+ if (fsg_lun_is_open(curlun)) {
p = NULL;
if (pathbuf) {
p = d_path(&curlun->filp->f_path,
@@ -4203,7 +3544,7 @@ static struct usb_gadget_driver fsg_driver = {
#else
.speed = USB_SPEED_FULL,
#endif
- .function = (char *) longname,
+ .function = (char *) fsg_string_product,
.bind = fsg_bind,
.unbind = fsg_unbind,
.disconnect = fsg_disconnect,
@@ -4212,7 +3553,7 @@ static struct usb_gadget_driver fsg_driver = {
.resume = fsg_resume,
.driver = {
- .name = (char *) shortname,
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
// .release = ...
// .suspend = ...
diff --git a/drivers/usb/gadget/fsl_qe_udc.h b/drivers/usb/gadget/fsl_qe_udc.h
index 31b2710882e..bea5b827beb 100644
--- a/drivers/usb/gadget/fsl_qe_udc.h
+++ b/drivers/usb/gadget/fsl_qe_udc.h
@@ -419,19 +419,4 @@ struct qe_udc {
#define CPM_USB_RESTART_TX_OPCODE 0x0b
#define CPM_USB_EP_SHIFT 5
-#ifndef CONFIG_CPM
-inline int cpm_command(u32 command, u8 opcode)
-{
- return -EOPNOTSUPP;
-}
-#endif
-
-#ifndef CONFIG_QUICC_ENGINE
-inline int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol,
- u32 cmd_input)
-{
- return -EOPNOTSUPP;
-}
-#endif
-
#endif /* __FSL_QE_UDC_H */
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
new file mode 100644
index 00000000000..19619fbf20a
--- /dev/null
+++ b/drivers/usb/gadget/mass_storage.c
@@ -0,0 +1,240 @@
+/*
+ * mass_storage.c -- Mass Storage USB Gadget
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/*
+ * The Mass Storage Gadget acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive or as a CD-ROM drive. In
+ * addition to providing an example of a genuinely useful gadget
+ * driver for a USB device, it also illustrates a technique of
+ * double-buffering for increased throughput. Last but not least, it
+ * gives an easy way to probe the behavior of the Mass Storage drivers
+ * in a USB host.
+ *
+ * Since this file serves only administrative purposes and all the
+ * business logic is implemented in f_mass_storage.* file. Read
+ * comments in this file for more detailed description.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/usb/ch9.h>
+
+
+/*-------------------------------------------------------------------------*/
+
+#define DRIVER_DESC "Mass Storage Gadget"
+#define DRIVER_VERSION "2009/09/11"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "f_mass_storage.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor msg_device_desc = {
+ .bLength = sizeof msg_device_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_PER_INTERFACE,
+
+ /* Vendor and product id can be overridden by module parameters. */
+ .idVendor = cpu_to_le16(FSG_VENDOR_ID),
+ .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ .bNumConfigurations = 1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+ .bLength = sizeof otg_descriptor,
+ .bDescriptorType = USB_DT_OTG,
+
+ /* REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ...
+ */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+ (struct usb_descriptor_header *) &otg_descriptor,
+ NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_CONFIGURATION_IDX 2
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer,
+ [STRING_PRODUCT_IDX].s = DRIVER_DESC,
+ [STRING_CONFIGURATION_IDX].s = "Self Powered",
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+
+
+/****************************** Configurations ******************************/
+
+static struct fsg_module_parameters mod_data = {
+ .stall = 1
+};
+FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+
+static unsigned long msg_registered = 0;
+static void msg_cleanup(void);
+
+static int __init msg_do_config(struct usb_configuration *c)
+{
+ struct fsg_common *common;
+ struct fsg_config config;
+ int ret;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ fsg_config_from_params(&config, &mod_data);
+ config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup;
+ common = fsg_common_init(0, c->cdev, &config);
+ if (IS_ERR(common))
+ return PTR_ERR(common);
+
+ ret = fsg_add(c->cdev, c, common);
+ fsg_common_put(common);
+ return ret;
+}
+
+static struct usb_configuration msg_config_driver = {
+ .label = "Linux File-Backed Storage",
+ .bind = msg_do_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+
+
+
+/****************************** Gadget Bind ******************************/
+
+
+static int __init msg_bind(struct usb_composite_dev *cdev)
+{
+ struct usb_gadget *gadget = cdev->gadget;
+ int status;
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+
+ /* device descriptor strings: manufacturer, product */
+ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ gadget->name);
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ msg_device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ msg_device_desc.iProduct = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ return status;
+ strings_dev[STRING_CONFIGURATION_IDX].id = status;
+ msg_config_driver.iConfiguration = status;
+
+ /* register our second configuration */
+ status = usb_add_config(cdev, &msg_config_driver);
+ if (status < 0)
+ return status;
+
+ dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+ set_bit(0, &msg_registered);
+ return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+
+static struct usb_composite_driver msg_driver = {
+ .name = "g_mass_storage",
+ .dev = &msg_device_desc,
+ .strings = dev_strings,
+ .bind = msg_bind,
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+static int __init msg_init(void)
+{
+ return usb_composite_register(&msg_driver);
+}
+module_init(msg_init);
+
+static void msg_cleanup(void)
+{
+ if (test_and_clear_bit(0, &msg_registered))
+ usb_composite_unregister(&msg_driver);
+}
+module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
new file mode 100644
index 00000000000..429560100b1
--- /dev/null
+++ b/drivers/usb/gadget/multi.c
@@ -0,0 +1,358 @@
+/*
+ * multi.c -- Multifunction Composite driver
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+
+
+#if defined USB_ETH_RNDIS
+# undef USB_ETH_RNDIS
+#endif
+#ifdef CONFIG_USB_ETH_RNDIS
+# define USB_ETH_RNDIS y
+#endif
+
+
+#define DRIVER_DESC "Multifunction Composite Gadget"
+#define DRIVER_VERSION "2009/07/21"
+
+/*-------------------------------------------------------------------------*/
+
+#define MULTI_VENDOR_NUM 0x0525 /* XXX NetChip */
+#define MULTI_PRODUCT_NUM 0xa4ab /* XXX */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_serial.c"
+#include "f_acm.c"
+
+#include "f_ecm.c"
+#include "f_subset.c"
+#ifdef USB_ETH_RNDIS
+# include "f_rndis.c"
+# include "rndis.c"
+#endif
+#include "u_ether.c"
+
+#undef DBG /* u_ether.c has broken idea about macros */
+#undef VDBG /* so clean up after it */
+#undef ERROR
+#undef INFO
+#include "f_mass_storage.c"
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = sizeof device_desc,
+ .bDescriptorType = USB_DT_DEVICE,
+
+ .bcdUSB = cpu_to_le16(0x0200),
+
+ /* .bDeviceClass = USB_CLASS_COMM, */
+ /* .bDeviceSubClass = 0, */
+ /* .bDeviceProtocol = 0, */
+ .bDeviceClass = 0xEF,
+ .bDeviceSubClass = 2,
+ .bDeviceProtocol = 1,
+ /* .bMaxPacketSize0 = f(hardware) */
+
+ /* Vendor and product id can be overridden by module parameters. */
+ .idVendor = cpu_to_le16(MULTI_VENDOR_NUM),
+ .idProduct = cpu_to_le16(MULTI_PRODUCT_NUM),
+ /* .bcdDevice = f(hardware) */
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ /* NO SERIAL NUMBER */
+ .bNumConfigurations = 1,
+};
+
+static struct usb_otg_descriptor otg_descriptor = {
+ .bLength = sizeof otg_descriptor,
+ .bDescriptorType = USB_DT_OTG,
+
+ /* REVISIT SRP-only hardware is possible, although
+ * it would not be called "OTG" ...
+ */
+ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
+};
+
+static const struct usb_descriptor_header *otg_desc[] = {
+ (struct usb_descriptor_header *) &otg_descriptor,
+ NULL,
+};
+
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+
+static char manufacturer[50];
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer,
+ [STRING_PRODUCT_IDX].s = DRIVER_DESC,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static u8 hostaddr[ETH_ALEN];
+
+
+
+/****************************** Configurations ******************************/
+
+static struct fsg_module_parameters mod_data = {
+ .stall = 1
+};
+FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+
+static struct fsg_common *fsg_common;
+
+
+#ifdef USB_ETH_RNDIS
+
+static int __init rndis_do_config(struct usb_configuration *c)
+{
+ int ret;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ ret = rndis_bind_config(c, hostaddr);
+ if (ret < 0)
+ return ret;
+
+ ret = acm_bind_config(c, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = fsg_add(c->cdev, c, fsg_common);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct usb_configuration rndis_config_driver = {
+ .label = "Multifunction Composite (RNDIS + MS + ACM)",
+ .bind = rndis_do_config,
+ .bConfigurationValue = 2,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+
+#endif
+
+#ifdef CONFIG_USB_G_MULTI_CDC
+
+static int __init cdc_do_config(struct usb_configuration *c)
+{
+ int ret;
+
+ if (gadget_is_otg(c->cdev->gadget)) {
+ c->descriptors = otg_desc;
+ c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ }
+
+ ret = ecm_bind_config(c, hostaddr);
+ if (ret < 0)
+ return ret;
+
+ ret = acm_bind_config(c, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = fsg_add(c->cdev, c, fsg_common);
+ if (ret < 0)
+ return ret;
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct usb_configuration cdc_config_driver = {
+ .label = "Multifunction Composite (CDC + MS + ACM)",
+ .bind = cdc_do_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
+};
+
+#endif
+
+
+
+/****************************** Gadget Bind ******************************/
+
+
+static int __init multi_bind(struct usb_composite_dev *cdev)
+{
+ struct usb_gadget *gadget = cdev->gadget;
+ int status, gcnum;
+
+ if (!can_support_ecm(cdev->gadget)) {
+ dev_err(&gadget->dev, "controller '%s' not usable\n",
+ gadget->name);
+ return -EINVAL;
+ }
+
+ /* set up network link layer */
+ status = gether_setup(cdev->gadget, hostaddr);
+ if (status < 0)
+ return status;
+
+ /* set up serial link layer */
+ status = gserial_setup(cdev->gadget, 1);
+ if (status < 0)
+ goto fail0;
+
+ /* set up mass storage function */
+ fsg_common = fsg_common_from_params(0, cdev, &mod_data);
+ if (IS_ERR(fsg_common)) {
+ status = PTR_ERR(fsg_common);
+ goto fail1;
+ }
+
+
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
+ else {
+ /* We assume that can_support_ecm() tells the truth;
+ * but if the controller isn't recognized at all then
+ * that assumption is a bit more likely to be wrong.
+ */
+ WARNING(cdev, "controller '%s' not recognized\n",
+ gadget->name);
+ device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
+ }
+
+
+ /* Allocate string descriptor numbers ... note that string
+ * contents can be overridden by the composite_dev glue.
+ */
+
+ /* device descriptor strings: manufacturer, product */
+ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
+ init_utsname()->sysname, init_utsname()->release,
+ gadget->name);
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail2;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto fail2;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+ device_desc.iProduct = status;
+
+#ifdef USB_ETH_RNDIS
+ /* register our first configuration */
+ status = usb_add_config(cdev, &rndis_config_driver);
+ if (status < 0)
+ goto fail2;
+#endif
+
+#ifdef CONFIG_USB_G_MULTI_CDC
+ /* register our second configuration */
+ status = usb_add_config(cdev, &cdc_config_driver);
+ if (status < 0)
+ goto fail2;
+#endif
+
+ dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+ fsg_common_put(fsg_common);
+ return 0;
+
+fail2:
+ fsg_common_put(fsg_common);
+fail1:
+ gserial_cleanup();
+fail0:
+ gether_cleanup();
+ return status;
+}
+
+static int __exit multi_unbind(struct usb_composite_dev *cdev)
+{
+ gserial_cleanup();
+ gether_cleanup();
+ return 0;
+}
+
+
+/****************************** Some noise ******************************/
+
+
+static struct usb_composite_driver multi_driver = {
+ .name = "g_multi",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = multi_bind,
+ .unbind = __exit_p(multi_unbind),
+};
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Michal Nazarewicz");
+MODULE_LICENSE("GPL");
+
+static int __init g_multi_init(void)
+{
+ return usb_composite_register(&multi_driver);
+}
+module_init(g_multi_init);
+
+static void __exit g_multi_cleanup(void)
+{
+ usb_composite_unregister(&multi_driver);
+}
+module_exit(g_multi_cleanup);
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
new file mode 100644
index 00000000000..868d8ee8675
--- /dev/null
+++ b/drivers/usb/gadget/storage_common.c
@@ -0,0 +1,778 @@
+/*
+ * storage_common.c -- Common definitions for mass storage functionality
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyeight (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/*
+ * This file requires the following identifiers used in USB strings to
+ * be defined (each of type pointer to char):
+ * - fsg_string_manufacturer -- name of the manufacturer
+ * - fsg_string_product -- name of the product
+ * - fsg_string_serial -- product's serial
+ * - fsg_string_config -- name of the configuration
+ * - fsg_string_interface -- name of the interface
+ * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS
+ * macro is defined prior to including this file.
+ */
+
+/*
+ * When FSG_NO_INTR_EP is defined fsg_fs_intr_in_desc and
+ * fsg_hs_intr_in_desc objects as well as
+ * FSG_FS_FUNCTION_PRE_EP_ENTRIES and FSG_HS_FUNCTION_PRE_EP_ENTRIES
+ * macros are not defined.
+ *
+ * When FSG_NO_DEVICE_STRINGS is defined FSG_STRING_MANUFACTURER,
+ * FSG_STRING_PRODUCT, FSG_STRING_SERIAL and FSG_STRING_CONFIG are not
+ * defined (as well as corresponding entries in string tables are
+ * missing) and FSG_STRING_INTERFACE has value of zero.
+ *
+ * When FSG_NO_OTG is defined fsg_otg_desc won't be defined.
+ */
+
+/*
+ * When FSG_BUFFHD_STATIC_BUFFER is defined when this file is included
+ * the fsg_buffhd structure's buf field will be an array of FSG_BUFLEN
+ * characters rather then a pointer to void.
+ */
+
+
+#include <asm/unaligned.h>
+
+
+/* Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with any other driver!! Ever!!
+ * Instead: allocate your own, using normal USB-IF procedures. */
+#define FSG_VENDOR_ID 0x0525 /* NetChip */
+#define FSG_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
+
+
+/*-------------------------------------------------------------------------*/
+
+
+#ifndef DEBUG
+#undef VERBOSE_DEBUG
+#undef DUMP_MSGS
+#endif /* !DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VLDBG LDBG
+#else
+#define VLDBG(lun, fmt, args...) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define LDBG(lun, fmt, args...) dev_dbg (&(lun)->dev, fmt, ## args)
+#define LERROR(lun, fmt, args...) dev_err (&(lun)->dev, fmt, ## args)
+#define LWARN(lun, fmt, args...) dev_warn(&(lun)->dev, fmt, ## args)
+#define LINFO(lun, fmt, args...) dev_info(&(lun)->dev, fmt, ## args)
+
+/* Keep those macros in sync with thos in
+ * include/linux/ubs/composite.h or else GCC will complain. If they
+ * are identical (the same names of arguments, white spaces in the
+ * same places) GCC will allow redefinition otherwise (even if some
+ * white space is removed or added) warning will be issued. No
+ * checking if those symbols is defined is performed because warning
+ * is desired when those macros were defined by someone else to mean
+ * something else. */
+#define DBG(d, fmt, args...) dev_dbg(&(d)->gadget->dev , fmt , ## args)
+#define VDBG(d, fmt, args...) dev_vdbg(&(d)->gadget->dev , fmt , ## args)
+#define ERROR(d, fmt, args...) dev_err(&(d)->gadget->dev , fmt , ## args)
+#define WARNING(d, fmt, args...) dev_warn(&(d)->gadget->dev , fmt , ## args)
+#define INFO(d, fmt, args...) dev_info(&(d)->gadget->dev , fmt , ## args)
+
+
+
+#ifdef DUMP_MSGS
+
+# define dump_msg(fsg, /* const char * */ label, \
+ /* const u8 * */ buf, /* unsigned */ length) do { \
+ if (length < 512) { \
+ DBG(fsg, "%s, length %u:\n", label, length); \
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, buf, length, 0); \
+ } \
+} while (0)
+
+# define dump_cdb(fsg) do { } while (0)
+
+#else
+
+# define dump_msg(fsg, /* const char * */ label, \
+ /* const u8 * */ buf, /* unsigned */ length) do { } while (0)
+
+# ifdef VERBOSE_DEBUG
+
+# define dump_cdb(fsg) \
+ print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE, \
+ 16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0) \
+
+# else
+
+# define dump_cdb(fsg) do { } while (0)
+
+# endif /* VERBOSE_DEBUG */
+
+#endif /* DUMP_MSGS */
+
+
+
+
+
+/*-------------------------------------------------------------------------*/
+
+/* SCSI device types */
+#define TYPE_DISK 0x00
+#define TYPE_CDROM 0x05
+
+/* USB protocol value = the transport method */
+#define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */
+#define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */
+#define USB_PR_BULK 0x50 /* Bulk-only */
+
+/* USB subclass value = the protocol encapsulation */
+#define USB_SC_RBC 0x01 /* Reduced Block Commands (flash) */
+#define USB_SC_8020 0x02 /* SFF-8020i, MMC-2, ATAPI (CD-ROM) */
+#define USB_SC_QIC 0x03 /* QIC-157 (tape) */
+#define USB_SC_UFI 0x04 /* UFI (floppy) */
+#define USB_SC_8070 0x05 /* SFF-8070i (removable) */
+#define USB_SC_SCSI 0x06 /* Transparent SCSI */
+
+/* Bulk-only data structures */
+
+/* Command Block Wrapper */
+struct fsg_bulk_cb_wrap {
+ __le32 Signature; /* Contains 'USBC' */
+ u32 Tag; /* Unique per command id */
+ __le32 DataTransferLength; /* Size of the data */
+ u8 Flags; /* Direction in bit 7 */
+ u8 Lun; /* LUN (normally 0) */
+ u8 Length; /* Of the CDB, <= MAX_COMMAND_SIZE */
+ u8 CDB[16]; /* Command Data Block */
+};
+
+#define USB_BULK_CB_WRAP_LEN 31
+#define USB_BULK_CB_SIG 0x43425355 /* Spells out USBC */
+#define USB_BULK_IN_FLAG 0x80
+
+/* Command Status Wrapper */
+struct bulk_cs_wrap {
+ __le32 Signature; /* Should = 'USBS' */
+ u32 Tag; /* Same as original command */
+ __le32 Residue; /* Amount not transferred */
+ u8 Status; /* See below */
+};
+
+#define USB_BULK_CS_WRAP_LEN 13
+#define USB_BULK_CS_SIG 0x53425355 /* Spells out 'USBS' */
+#define USB_STATUS_PASS 0
+#define USB_STATUS_FAIL 1
+#define USB_STATUS_PHASE_ERROR 2
+
+/* Bulk-only class specific requests */
+#define USB_BULK_RESET_REQUEST 0xff
+#define USB_BULK_GET_MAX_LUN_REQUEST 0xfe
+
+
+/* CBI Interrupt data structure */
+struct interrupt_data {
+ u8 bType;
+ u8 bValue;
+};
+
+#define CBI_INTERRUPT_DATA_LEN 2
+
+/* CBI Accept Device-Specific Command request */
+#define USB_CBI_ADSC_REQUEST 0x00
+
+
+/* Length of a SCSI Command Data Block */
+#define MAX_COMMAND_SIZE 16
+
+/* SCSI commands that we recognize */
+#define SC_FORMAT_UNIT 0x04
+#define SC_INQUIRY 0x12
+#define SC_MODE_SELECT_6 0x15
+#define SC_MODE_SELECT_10 0x55
+#define SC_MODE_SENSE_6 0x1a
+#define SC_MODE_SENSE_10 0x5a
+#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
+#define SC_READ_6 0x08
+#define SC_READ_10 0x28
+#define SC_READ_12 0xa8
+#define SC_READ_CAPACITY 0x25
+#define SC_READ_FORMAT_CAPACITIES 0x23
+#define SC_READ_HEADER 0x44
+#define SC_READ_TOC 0x43
+#define SC_RELEASE 0x17
+#define SC_REQUEST_SENSE 0x03
+#define SC_RESERVE 0x16
+#define SC_SEND_DIAGNOSTIC 0x1d
+#define SC_START_STOP_UNIT 0x1b
+#define SC_SYNCHRONIZE_CACHE 0x35
+#define SC_TEST_UNIT_READY 0x00
+#define SC_VERIFY 0x2f
+#define SC_WRITE_6 0x0a
+#define SC_WRITE_10 0x2a
+#define SC_WRITE_12 0xaa
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE 0
+#define SS_COMMUNICATION_FAILURE 0x040800
+#define SS_INVALID_COMMAND 0x052000
+#define SS_INVALID_FIELD_IN_CDB 0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
+#define SS_MEDIUM_NOT_PRESENT 0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
+#define SS_RESET_OCCURRED 0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
+#define SS_UNRECOVERED_READ_ERROR 0x031100
+#define SS_WRITE_ERROR 0x030c02
+#define SS_WRITE_PROTECTED 0x072700
+
+#define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */
+#define ASC(x) ((u8) ((x) >> 8))
+#define ASCQ(x) ((u8) (x))
+
+
+/*-------------------------------------------------------------------------*/
+
+
+struct fsg_lun {
+ struct file *filp;
+ loff_t file_length;
+ loff_t num_sectors;
+
+ unsigned int initially_ro:1;
+ unsigned int ro:1;
+ unsigned int removable:1;
+ unsigned int cdrom:1;
+ unsigned int prevent_medium_removal:1;
+ unsigned int registered:1;
+ unsigned int info_valid:1;
+
+ u32 sense_data;
+ u32 sense_data_info;
+ u32 unit_attention_data;
+
+ struct device dev;
+};
+
+#define fsg_lun_is_open(curlun) ((curlun)->filp != NULL)
+
+static struct fsg_lun *fsg_lun_from_dev(struct device *dev)
+{
+ return container_of(dev, struct fsg_lun, dev);
+}
+
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE 256
+#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
+
+/* Number of buffers we will use. 2 is enough for double-buffering */
+#define FSG_NUM_BUFFERS 2
+
+/* Default size of buffer length. */
+#define FSG_BUFLEN ((u32)16384)
+
+/* Maximal number of LUNs supported in mass storage function */
+#define FSG_MAX_LUNS 8
+
+enum fsg_buffer_state {
+ BUF_STATE_EMPTY = 0,
+ BUF_STATE_FULL,
+ BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+#ifdef FSG_BUFFHD_STATIC_BUFFER
+ char buf[FSG_BUFLEN];
+#else
+ void *buf;
+#endif
+ enum fsg_buffer_state state;
+ struct fsg_buffhd *next;
+
+ /* The NetChip 2280 is faster, and handles some protocol faults
+ * better, if we don't submit any short bulk-out read requests.
+ * So we will record the intended request length here. */
+ unsigned int bulk_out_intended_length;
+
+ struct usb_request *inreq;
+ int inreq_busy;
+ struct usb_request *outreq;
+ int outreq_busy;
+};
+
+enum fsg_state {
+ /* This one isn't used anywhere */
+ FSG_STATE_COMMAND_PHASE = -10,
+ FSG_STATE_DATA_PHASE,
+ FSG_STATE_STATUS_PHASE,
+
+ FSG_STATE_IDLE = 0,
+ FSG_STATE_ABORT_BULK_OUT,
+ FSG_STATE_RESET,
+ FSG_STATE_INTERFACE_CHANGE,
+ FSG_STATE_CONFIG_CHANGE,
+ FSG_STATE_DISCONNECT,
+ FSG_STATE_EXIT,
+ FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+ DATA_DIR_UNKNOWN = 0,
+ DATA_DIR_FROM_HOST,
+ DATA_DIR_TO_HOST,
+ DATA_DIR_NONE
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static inline u32 get_unaligned_be24(u8 *buf)
+{
+ return 0xffffff & (u32) get_unaligned_be32(buf - 1);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+enum {
+#ifndef FSG_NO_DEVICE_STRINGS
+ FSG_STRING_MANUFACTURER = 1,
+ FSG_STRING_PRODUCT,
+ FSG_STRING_SERIAL,
+ FSG_STRING_CONFIG,
+#endif
+ FSG_STRING_INTERFACE
+};
+
+
+#ifndef FSG_NO_OTG
+static struct usb_otg_descriptor
+fsg_otg_desc = {
+ .bLength = sizeof fsg_otg_desc,
+ .bDescriptorType = USB_DT_OTG,
+
+ .bmAttributes = USB_OTG_SRP,
+};
+#endif
+
+/* There is only one interface. */
+
+static struct usb_interface_descriptor
+fsg_intf_desc = {
+ .bLength = sizeof fsg_intf_desc,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+ .bNumEndpoints = 2, /* Adjusted during fsg_bind() */
+ .bInterfaceClass = USB_CLASS_MASS_STORAGE,
+ .bInterfaceSubClass = USB_SC_SCSI, /* Adjusted during fsg_bind() */
+ .bInterfaceProtocol = USB_PR_BULK, /* Adjusted during fsg_bind() */
+ .iInterface = FSG_STRING_INTERFACE,
+};
+
+/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
+ * and interrupt-in. */
+
+static struct usb_endpoint_descriptor
+fsg_fs_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ /* wMaxPacketSize set by autoconfiguration */
+};
+
+static struct usb_endpoint_descriptor
+fsg_fs_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ /* wMaxPacketSize set by autoconfiguration */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_fs_intr_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(2),
+ .bInterval = 32, /* frames -> 32 ms */
+};
+
+#ifndef FSG_NO_OTG
+# define FSG_FS_FUNCTION_PRE_EP_ENTRIES 2
+#else
+# define FSG_FS_FUNCTION_PRE_EP_ENTRIES 1
+#endif
+
+#endif
+
+static struct usb_descriptor_header *fsg_fs_function[] = {
+#ifndef FSG_NO_OTG
+ (struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+ (struct usb_descriptor_header *) &fsg_intf_desc,
+ (struct usb_descriptor_header *) &fsg_fs_bulk_in_desc,
+ (struct usb_descriptor_header *) &fsg_fs_bulk_out_desc,
+#ifndef FSG_NO_INTR_EP
+ (struct usb_descriptor_header *) &fsg_fs_intr_in_desc,
+#endif
+ NULL,
+};
+
+
+/*
+ * USB 2.0 devices need to expose both high speed and full speed
+ * descriptors, unless they only run at full speed.
+ *
+ * That means alternate endpoint descriptors (bigger packets)
+ * and a "device qualifier" ... plus more construction options
+ * for the config descriptor.
+ */
+static struct usb_endpoint_descriptor
+fsg_hs_bulk_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor
+fsg_hs_bulk_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+ .bInterval = 1, /* NAK every 1 uframe */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_hs_intr_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(2),
+ .bInterval = 9, /* 2**(9-1) = 256 uframes -> 32 ms */
+};
+
+#ifndef FSG_NO_OTG
+# define FSG_HS_FUNCTION_PRE_EP_ENTRIES 2
+#else
+# define FSG_HS_FUNCTION_PRE_EP_ENTRIES 1
+#endif
+
+#endif
+
+static struct usb_descriptor_header *fsg_hs_function[] = {
+#ifndef FSG_NO_OTG
+ (struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+ (struct usb_descriptor_header *) &fsg_intf_desc,
+ (struct usb_descriptor_header *) &fsg_hs_bulk_in_desc,
+ (struct usb_descriptor_header *) &fsg_hs_bulk_out_desc,
+#ifndef FSG_NO_INTR_EP
+ (struct usb_descriptor_header *) &fsg_hs_intr_in_desc,
+#endif
+ NULL,
+};
+
+/* Maxpacket and other transfer characteristics vary by speed. */
+static struct usb_endpoint_descriptor *
+fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
+ struct usb_endpoint_descriptor *hs)
+{
+ if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ return hs;
+ return fs;
+}
+
+
+/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
+static struct usb_string fsg_strings[] = {
+#ifndef FSG_NO_DEVICE_STRINGS
+ {FSG_STRING_MANUFACTURER, fsg_string_manufacturer},
+ {FSG_STRING_PRODUCT, fsg_string_product},
+ {FSG_STRING_SERIAL, fsg_string_serial},
+ {FSG_STRING_CONFIG, fsg_string_config},
+#endif
+ {FSG_STRING_INTERFACE, fsg_string_interface},
+ {}
+};
+
+static struct usb_gadget_strings fsg_stringtab = {
+ .language = 0x0409, /* en-us */
+ .strings = fsg_strings,
+};
+
+
+ /*-------------------------------------------------------------------------*/
+
+/* If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing. */
+
+static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
+{
+ int ro;
+ struct file *filp = NULL;
+ int rc = -EINVAL;
+ struct inode *inode = NULL;
+ loff_t size;
+ loff_t num_sectors;
+ loff_t min_sectors;
+
+ /* R/W if we can, R/O if we must */
+ ro = curlun->initially_ro;
+ if (!ro) {
+ filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
+ if (-EROFS == PTR_ERR(filp))
+ ro = 1;
+ }
+ if (ro)
+ filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
+ if (IS_ERR(filp)) {
+ LINFO(curlun, "unable to open backing file: %s\n", filename);
+ return PTR_ERR(filp);
+ }
+
+ if (!(filp->f_mode & FMODE_WRITE))
+ ro = 1;
+
+ if (filp->f_path.dentry)
+ inode = filp->f_path.dentry->d_inode;
+ if (inode && S_ISBLK(inode->i_mode)) {
+ if (bdev_read_only(inode->i_bdev))
+ ro = 1;
+ } else if (!inode || !S_ISREG(inode->i_mode)) {
+ LINFO(curlun, "invalid file type: %s\n", filename);
+ goto out;
+ }
+
+ /* If we can't read the file, it's no good.
+ * If we can't write the file, use it read-only. */
+ if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
+ LINFO(curlun, "file not readable: %s\n", filename);
+ goto out;
+ }
+ if (!(filp->f_op->write || filp->f_op->aio_write))
+ ro = 1;
+
+ size = i_size_read(inode->i_mapping->host);
+ if (size < 0) {
+ LINFO(curlun, "unable to find file size: %s\n", filename);
+ rc = (int) size;
+ goto out;
+ }
+ num_sectors = size >> 9; /* File size in 512-byte blocks */
+ min_sectors = 1;
+ if (curlun->cdrom) {
+ num_sectors &= ~3; /* Reduce to a multiple of 2048 */
+ min_sectors = 300*4; /* Smallest track is 300 frames */
+ if (num_sectors >= 256*60*75*4) {
+ num_sectors = (256*60*75 - 1) * 4;
+ LINFO(curlun, "file too big: %s\n", filename);
+ LINFO(curlun, "using only first %d blocks\n",
+ (int) num_sectors);
+ }
+ }
+ if (num_sectors < min_sectors) {
+ LINFO(curlun, "file too small: %s\n", filename);
+ rc = -ETOOSMALL;
+ goto out;
+ }
+
+ get_file(filp);
+ curlun->ro = ro;
+ curlun->filp = filp;
+ curlun->file_length = size;
+ curlun->num_sectors = num_sectors;
+ LDBG(curlun, "open backing file: %s\n", filename);
+ rc = 0;
+
+out:
+ filp_close(filp, current->files);
+ return rc;
+}
+
+
+static void fsg_lun_close(struct fsg_lun *curlun)
+{
+ if (curlun->filp) {
+ LDBG(curlun, "close backing file\n");
+ fput(curlun->filp);
+ curlun->filp = NULL;
+ }
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Sync the file data, don't bother with the metadata.
+ * This code was copied from fs/buffer.c:sys_fdatasync(). */
+static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
+{
+ struct file *filp = curlun->filp;
+
+ if (curlun->ro || !filp)
+ return 0;
+ return vfs_fsync(filp, filp->f_path.dentry, 1);
+}
+
+static void store_cdrom_address(u8 *dest, int msf, u32 addr)
+{
+ if (msf) {
+ /* Convert to Minutes-Seconds-Frames */
+ addr >>= 2; /* Convert to 2048-byte frames */
+ addr += 2*75; /* Lead-in occupies 2 seconds */
+ dest[3] = addr % 75; /* Frames */
+ addr /= 75;
+ dest[2] = addr % 60; /* Seconds */
+ addr /= 60;
+ dest[1] = addr; /* Minutes */
+ dest[0] = 0; /* Reserved */
+ } else {
+ /* Absolute sector */
+ put_unaligned_be32(addr, dest);
+ }
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+
+ return sprintf(buf, "%d\n", fsg_lun_is_open(curlun)
+ ? curlun->ro
+ : curlun->initially_ro);
+}
+
+static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+ struct rw_semaphore *filesem = dev_get_drvdata(dev);
+ char *p;
+ ssize_t rc;
+
+ down_read(filesem);
+ if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */
+ p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+ if (IS_ERR(p))
+ rc = PTR_ERR(p);
+ else {
+ rc = strlen(p);
+ memmove(buf, p, rc);
+ buf[rc] = '\n'; /* Add a newline */
+ buf[++rc] = 0;
+ }
+ } else { /* No file, return 0 bytes */
+ *buf = 0;
+ rc = 0;
+ }
+ up_read(filesem);
+ return rc;
+}
+
+
+static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t rc = count;
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+ struct rw_semaphore *filesem = dev_get_drvdata(dev);
+ int i;
+
+ if (sscanf(buf, "%d", &i) != 1)
+ return -EINVAL;
+
+ /* Allow the write-enable status to change only while the backing file
+ * is closed. */
+ down_read(filesem);
+ if (fsg_lun_is_open(curlun)) {
+ LDBG(curlun, "read-only status change prevented\n");
+ rc = -EBUSY;
+ } else {
+ curlun->ro = !!i;
+ curlun->initially_ro = !!i;
+ LDBG(curlun, "read-only status set to %d\n", curlun->ro);
+ }
+ up_read(filesem);
+ return rc;
+}
+
+static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fsg_lun *curlun = fsg_lun_from_dev(dev);
+ struct rw_semaphore *filesem = dev_get_drvdata(dev);
+ int rc = 0;
+
+ if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
+ LDBG(curlun, "eject attempt prevented\n");
+ return -EBUSY; /* "Door is locked" */
+ }
+
+ /* Remove a trailing newline */
+ if (count > 0 && buf[count-1] == '\n')
+ ((char *) buf)[count-1] = 0; /* Ugh! */
+
+ /* Eject current medium */
+ down_write(filesem);
+ if (fsg_lun_is_open(curlun)) {
+ fsg_lun_close(curlun);
+ curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+ }
+
+ /* Load new medium */
+ if (count > 0 && buf[0]) {
+ rc = fsg_lun_open(curlun, buf);
+ if (rc == 0)
+ curlun->unit_attention_data =
+ SS_NOT_READY_TO_READY_TRANSITION;
+ }
+ up_write(filesem);
+ return (rc < 0 ? rc : count);
+}
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 91b39ffdf6e..fd55f450bc0 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -112,7 +112,7 @@ int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
int eem_bind_config(struct usb_configuration *c);
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef USB_ETH_RNDIS
int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 9b43b226817..2678a1624fc 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -90,14 +90,25 @@ config USB_EHCI_TT_NEWSCHED
config USB_EHCI_BIG_ENDIAN_MMIO
bool
- depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX)
+ depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX)
default y
config USB_EHCI_BIG_ENDIAN_DESC
bool
- depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX)
+ depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX)
default y
+config XPS_USB_HCD_XILINX
+ bool "Use Xilinx usb host EHCI controller core"
+ depends on USB_EHCI_HCD && (PPC32 || MICROBLAZE)
+ select USB_EHCI_BIG_ENDIAN_DESC
+ select USB_EHCI_BIG_ENDIAN_MMIO
+ ---help---
+ Xilinx xps USB host controller core is EHCI compilant and has
+ transaction translator built-in. It can be configured to either
+ support both high speed and full speed devices, or high speed
+ devices only.
+
config USB_EHCI_FSL
bool "Support for Freescale on-chip EHCI USB controller"
depends on USB_EHCI_HCD && FSL_SOC
@@ -105,6 +116,13 @@ config USB_EHCI_FSL
---help---
Variation of ARC USB block used in some Freescale chips.
+config USB_EHCI_MXC
+ bool "Support for Freescale on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_MXC
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Variation of ARC USB block used in some Freescale chips.
+
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
depends on USB_EHCI_HCD && PPC_OF
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index ed77be76d6b..dbfb482a94e 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -297,7 +297,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops au1xxx_ehci_pmops = {
+static const struct dev_pm_ops au1xxx_ehci_pmops = {
.suspend = ehci_hcd_au1xxx_drv_suspend,
.resume = ehci_hcd_au1xxx_drv_resume,
};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d8f4aaa616f..5859522d6ed 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -549,7 +549,7 @@ static int ehci_init(struct usb_hcd *hcd)
/* controllers may cache some of the periodic schedule ... */
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
- ehci->i_thresh = 8;
+ ehci->i_thresh = 2 + 8;
else // N microframes cached
ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
@@ -605,6 +605,8 @@ static int ehci_init(struct usb_hcd *hcd)
}
ehci->command = temp;
+ /* Accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
return 0;
}
@@ -1105,11 +1107,21 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_fsl_driver
#endif
+#ifdef CONFIG_USB_EHCI_MXC
+#include "ehci-mxc.c"
+#define PLATFORM_DRIVER ehci_mxc_driver
+#endif
+
#ifdef CONFIG_SOC_AU1200
#include "ehci-au1xxx.c"
#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
#endif
+#ifdef CONFIG_ARCH_OMAP34XX
+#include "ehci-omap.c"
+#define PLATFORM_DRIVER ehci_hcd_omap_driver
+#endif
+
#ifdef CONFIG_PPC_PS3
#include "ehci-ps3.c"
#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
@@ -1120,6 +1132,11 @@ MODULE_LICENSE ("GPL");
#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
#endif
+#ifdef CONFIG_XPS_USB_HCD_XILINX
+#include "ehci-xilinx-of.c"
+#define OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
+#endif
+
#ifdef CONFIG_PLAT_ORION
#include "ehci-orion.c"
#define PLATFORM_DRIVER ehci_orion_driver
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 1b6f1c0e5ce..2c6571c05f3 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -236,7 +236,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
}
if (unlikely(ehci->debug)) {
- if (ehci->debug && !dbgp_reset_prep())
+ if (!dbgp_reset_prep())
ehci->debug = NULL;
else
dbgp_external_startup();
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
new file mode 100644
index 00000000000..35c56f40bdb
--- /dev/null
+++ b/drivers/usb/host/ehci-mxc.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/usb/otg.h>
+
+#include <mach/mxc_ehci.h>
+
+#define ULPI_VIEWPORT_OFFSET 0x170
+#define PORTSC_OFFSET 0x184
+#define USBMODE_OFFSET 0x1a8
+#define USBMODE_CM_HOST 3
+
+struct ehci_mxc_priv {
+ struct clk *usbclk, *ahbclk;
+ struct usb_hcd *hcd;
+};
+
+/* called during probe() after chip reset completes */
+static int ehci_mxc_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ /* EHCI registers start at offset 0x100 */
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ dbg_hcs_params(ehci, "reset");
+ dbg_hcc_params(ehci, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ /* data structure init */
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ hcd->has_tt = 1;
+
+ ehci->sbrn = 0x20;
+
+ ehci_reset(ehci);
+
+ ehci_port_power(ehci, 0);
+ return 0;
+}
+
+static const struct hc_driver ehci_mxc_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Freescale On-Chip EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_USB2 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_mxc_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+};
+
+static int ehci_mxc_drv_probe(struct platform_device *pdev)
+{
+ struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq, ret, temp;
+ struct ehci_mxc_priv *priv;
+ struct device *dev = &pdev->dev;
+
+ dev_info(&pdev->dev, "initializing i.MX USB Controller\n");
+
+ if (!pdata) {
+ dev_err(dev, "No platform data given, bailing out.\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+
+ hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Found HC with no register addr. Check setup!\n");
+ ret = -ENODEV;
+ goto err_get_resource;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ dev_dbg(dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto err_request_mem;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(dev, "error mapping memory\n");
+ ret = -EFAULT;
+ goto err_ioremap;
+ }
+
+ /* enable clocks */
+ priv->usbclk = clk_get(dev, "usb");
+ if (IS_ERR(priv->usbclk)) {
+ ret = PTR_ERR(priv->usbclk);
+ goto err_clk;
+ }
+ clk_enable(priv->usbclk);
+
+ if (!cpu_is_mx35()) {
+ priv->ahbclk = clk_get(dev, "usb_ahb");
+ if (IS_ERR(priv->ahbclk)) {
+ ret = PTR_ERR(priv->ahbclk);
+ goto err_clk_ahb;
+ }
+ clk_enable(priv->ahbclk);
+ }
+
+ /* set USBMODE to host mode */
+ temp = readl(hcd->regs + USBMODE_OFFSET);
+ writel(temp | USBMODE_CM_HOST, hcd->regs + USBMODE_OFFSET);
+
+ /* set up the PORTSCx register */
+ writel(pdata->portsc, hcd->regs + PORTSC_OFFSET);
+ mdelay(10);
+
+ /* setup USBCONTROL. */
+ ret = mxc_set_usbcontrol(pdev->id, pdata->flags);
+ if (ret < 0)
+ goto err_init;
+
+ /* call platform specific init function */
+ if (pdata->init) {
+ ret = pdata->init(pdev);
+ if (ret) {
+ dev_err(dev, "platform init failed\n");
+ goto err_init;
+ }
+ }
+
+ /* most platforms need some time to settle changed IO settings */
+ mdelay(10);
+
+ /* Initialize the transceiver */
+ if (pdata->otg) {
+ pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
+ if (otg_init(pdata->otg) != 0)
+ dev_err(dev, "unable to init transceiver\n");
+ else if (otg_set_vbus(pdata->otg, 1) != 0)
+ dev_err(dev, "unable to enable vbus on transceiver\n");
+ }
+
+ priv->hcd = hcd;
+ platform_set_drvdata(pdev, priv);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (ret)
+ goto err_add;
+
+ return 0;
+
+err_add:
+ if (pdata && pdata->exit)
+ pdata->exit(pdev);
+err_init:
+ if (priv->ahbclk) {
+ clk_disable(priv->ahbclk);
+ clk_put(priv->ahbclk);
+ }
+err_clk_ahb:
+ clk_disable(priv->usbclk);
+ clk_put(priv->usbclk);
+err_clk:
+ iounmap(hcd->regs);
+err_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_request_mem:
+err_get_resource:
+ kfree(priv);
+err_alloc:
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
+{
+ struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
+ struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = priv->hcd;
+
+ if (pdata && pdata->exit)
+ pdata->exit(pdev);
+
+ if (pdata->otg)
+ otg_shutdown(pdata->otg);
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ platform_set_drvdata(pdev, NULL);
+
+ clk_disable(priv->usbclk);
+ clk_put(priv->usbclk);
+ if (priv->ahbclk) {
+ clk_disable(priv->ahbclk);
+ clk_put(priv->ahbclk);
+ }
+
+ kfree(priv);
+
+ return 0;
+}
+
+static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
+{
+ struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = priv->hcd;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+MODULE_ALIAS("platform:mxc-ehci");
+
+static struct platform_driver ehci_mxc_driver = {
+ .probe = ehci_mxc_drv_probe,
+ .remove = __exit_p(ehci_mxc_drv_remove),
+ .shutdown = ehci_mxc_drv_shutdown,
+ .driver = {
+ .name = "mxc-ehci",
+ },
+};
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
new file mode 100644
index 00000000000..74d07f4e8b7
--- /dev/null
+++ b/drivers/usb/host/ehci-omap.c
@@ -0,0 +1,756 @@
+/*
+ * ehci-omap.c - driver for USBHOST on OMAP 34xx processor
+ *
+ * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller
+ * Tested on OMAP3430 ES2.0 SDP
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Author: Vikram Pandita <vikram.pandita@ti.com>
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * TODO (last updated Feb 23rd, 2009):
+ * - add kernel-doc
+ * - enable AUTOIDLE
+ * - move DPLL5 programming to clock fw
+ * - add suspend/resume
+ * - move workarounds to board-files
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <plat/usb.h>
+
+/*
+ * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
+ * Use ehci_omap_readl()/ehci_omap_writel() functions
+ */
+
+/* TLL Register Set */
+#define OMAP_USBTLL_REVISION (0x00)
+#define OMAP_USBTLL_SYSCONFIG (0x10)
+#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
+#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
+#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
+#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define OMAP_USBTLL_SYSSTATUS (0x14)
+#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
+
+#define OMAP_USBTLL_IRQSTATUS (0x18)
+#define OMAP_USBTLL_IRQENABLE (0x1C)
+
+#define OMAP_TLL_SHARED_CONF (0x30)
+#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
+#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
+#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
+#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
+#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
+
+#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
+#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
+#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
+#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
+#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
+#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
+
+#define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num)
+#define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num)
+#define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num)
+#define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num)
+#define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num)
+#define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num)
+#define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num)
+#define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num)
+#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num)
+
+#define OMAP_TLL_CHANNEL_COUNT 3
+#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 1)
+#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 2)
+#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 4)
+
+/* UHH Register Set */
+#define OMAP_UHH_REVISION (0x00)
+#define OMAP_UHH_SYSCONFIG (0x10)
+#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
+#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
+#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
+#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
+#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
+#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define OMAP_UHH_SYSSTATUS (0x14)
+#define OMAP_UHH_HOSTCONFIG (0x40)
+#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
+#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
+#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
+#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
+#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
+#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
+#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
+#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
+#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
+
+#define OMAP_UHH_DEBUG_CSR (0x44)
+
+/* EHCI Register Set */
+#define EHCI_INSNREG05_ULPI (0xA4)
+#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
+#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
+#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22
+#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16
+#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
+#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
+
+/*-------------------------------------------------------------------------*/
+
+static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val)
+{
+ __raw_writel(val, base + reg);
+}
+
+static inline u32 ehci_omap_readl(void __iomem *base, u32 reg)
+{
+ return __raw_readl(base + reg);
+}
+
+static inline void ehci_omap_writeb(void __iomem *base, u8 reg, u8 val)
+{
+ __raw_writeb(val, base + reg);
+}
+
+static inline u8 ehci_omap_readb(void __iomem *base, u8 reg)
+{
+ return __raw_readb(base + reg);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct ehci_hcd_omap {
+ struct ehci_hcd *ehci;
+ struct device *dev;
+
+ struct clk *usbhost_ick;
+ struct clk *usbhost2_120m_fck;
+ struct clk *usbhost1_48m_fck;
+ struct clk *usbtll_fck;
+ struct clk *usbtll_ick;
+
+ /* FIXME the following two workarounds are
+ * board specific not silicon-specific so these
+ * should be moved to board-file instead.
+ *
+ * Maybe someone from TI will know better which
+ * board is affected and needs the workarounds
+ * to be applied
+ */
+
+ /* gpio for resetting phy */
+ int reset_gpio_port[OMAP3_HS_USB_PORTS];
+
+ /* phy reset workaround */
+ int phy_reset;
+
+ /* desired phy_mode: TLL, PHY */
+ enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS];
+
+ void __iomem *uhh_base;
+ void __iomem *tll_base;
+ void __iomem *ehci_base;
+};
+
+/*-------------------------------------------------------------------------*/
+
+static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
+{
+ unsigned reg;
+ int i;
+
+ /* Program the 3 TLL channels upfront */
+ for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
+ reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
+
+ /* Disable AutoIdle, BitStuffing and use SDR Mode */
+ reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
+ | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
+ | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+ ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
+ }
+
+ /* Program Common TLL register */
+ reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
+ reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON
+ | OMAP_TLL_SHARED_CONF_USB_DIVRATION
+ | OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN);
+ reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
+
+ ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
+
+ /* Enable channels now */
+ for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
+ reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
+
+ /* Enable only the reg that is needed */
+ if (!(tll_channel_mask & 1<<i))
+ continue;
+
+ reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
+ ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
+
+ ehci_omap_writeb(omap->tll_base,
+ OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe);
+ dev_dbg(omap->dev, "ULPI_SCRATCH_REG[ch=%d]= 0x%02x\n",
+ i+1, ehci_omap_readb(omap->tll_base,
+ OMAP_TLL_ULPI_SCRATCH_REGISTER(i)));
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* omap_start_ehc
+ * - Start the TI USBHOST controller
+ */
+static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ u8 tll_ch_mask = 0;
+ unsigned reg = 0;
+ int ret = 0;
+
+ dev_dbg(omap->dev, "starting TI EHCI USB Controller\n");
+
+ /* Enable Clocks for USBHOST */
+ omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
+ if (IS_ERR(omap->usbhost_ick)) {
+ ret = PTR_ERR(omap->usbhost_ick);
+ goto err_host_ick;
+ }
+ clk_enable(omap->usbhost_ick);
+
+ omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
+ if (IS_ERR(omap->usbhost2_120m_fck)) {
+ ret = PTR_ERR(omap->usbhost2_120m_fck);
+ goto err_host_120m_fck;
+ }
+ clk_enable(omap->usbhost2_120m_fck);
+
+ omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
+ if (IS_ERR(omap->usbhost1_48m_fck)) {
+ ret = PTR_ERR(omap->usbhost1_48m_fck);
+ goto err_host_48m_fck;
+ }
+ clk_enable(omap->usbhost1_48m_fck);
+
+ if (omap->phy_reset) {
+ /* Refer: ISSUE1 */
+ if (gpio_is_valid(omap->reset_gpio_port[0])) {
+ gpio_request(omap->reset_gpio_port[0],
+ "USB1 PHY reset");
+ gpio_direction_output(omap->reset_gpio_port[0], 0);
+ }
+
+ if (gpio_is_valid(omap->reset_gpio_port[1])) {
+ gpio_request(omap->reset_gpio_port[1],
+ "USB2 PHY reset");
+ gpio_direction_output(omap->reset_gpio_port[1], 0);
+ }
+
+ /* Hold the PHY in RESET for enough time till DIR is high */
+ udelay(10);
+ }
+
+ /* Configure TLL for 60Mhz clk for ULPI */
+ omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
+ if (IS_ERR(omap->usbtll_fck)) {
+ ret = PTR_ERR(omap->usbtll_fck);
+ goto err_tll_fck;
+ }
+ clk_enable(omap->usbtll_fck);
+
+ omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
+ if (IS_ERR(omap->usbtll_ick)) {
+ ret = PTR_ERR(omap->usbtll_ick);
+ goto err_tll_ick;
+ }
+ clk_enable(omap->usbtll_ick);
+
+ /* perform TLL soft reset, and wait until reset is complete */
+ ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+ /* Wait for TLL reset to complete */
+ while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(omap->dev, "operation timed out\n");
+ ret = -EINVAL;
+ goto err_sys_status;
+ }
+ }
+
+ dev_dbg(omap->dev, "TLL RESET DONE\n");
+
+ /* (1<<3) = no idle mode only for initial debugging */
+ ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+ OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+ OMAP_USBTLL_SYSCONFIG_CACTIVITY);
+
+
+ /* Put UHH in NoIdle/NoStandby mode */
+ reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+ reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+ | OMAP_UHH_SYSCONFIG_SIDLEMODE
+ | OMAP_UHH_SYSCONFIG_CACTIVITY
+ | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+ reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+
+ ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
+ reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
+
+ /* setup ULPI bypass and burst configurations */
+ reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
+ | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
+ | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
+ reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
+
+ if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
+ if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
+ if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
+ reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
+
+ /* Bypass the TLL module for PHY mode operation */
+ if (omap_rev() <= OMAP3430_REV_ES2_1) {
+ dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1 \n");
+ if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
+ (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
+ (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ else
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ } else {
+ dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
+ if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+ else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
+
+ if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+ else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
+
+ if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+ else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
+ reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
+
+ }
+ ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
+ dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
+
+
+ if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
+ (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
+ (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
+
+ if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
+ tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK;
+ if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
+ tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK;
+ if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
+ tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK;
+
+ /* Enable UTMI mode for required TLL channels */
+ omap_usb_utmi_init(omap, tll_ch_mask);
+ }
+
+ if (omap->phy_reset) {
+ /* Refer ISSUE1:
+ * Hold the PHY in RESET for enough time till
+ * PHY is settled and ready
+ */
+ udelay(10);
+
+ if (gpio_is_valid(omap->reset_gpio_port[0]))
+ gpio_set_value(omap->reset_gpio_port[0], 1);
+
+ if (gpio_is_valid(omap->reset_gpio_port[1]))
+ gpio_set_value(omap->reset_gpio_port[1], 1);
+ }
+
+ return 0;
+
+err_sys_status:
+ clk_disable(omap->usbtll_ick);
+ clk_put(omap->usbtll_ick);
+
+err_tll_ick:
+ clk_disable(omap->usbtll_fck);
+ clk_put(omap->usbtll_fck);
+
+err_tll_fck:
+ clk_disable(omap->usbhost1_48m_fck);
+ clk_put(omap->usbhost1_48m_fck);
+
+ if (omap->phy_reset) {
+ if (gpio_is_valid(omap->reset_gpio_port[0]))
+ gpio_free(omap->reset_gpio_port[0]);
+
+ if (gpio_is_valid(omap->reset_gpio_port[1]))
+ gpio_free(omap->reset_gpio_port[1]);
+ }
+
+err_host_48m_fck:
+ clk_disable(omap->usbhost2_120m_fck);
+ clk_put(omap->usbhost2_120m_fck);
+
+err_host_120m_fck:
+ clk_disable(omap->usbhost_ick);
+ clk_put(omap->usbhost_ick);
+
+err_host_ick:
+ return ret;
+}
+
+static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
+
+ /* Reset OMAP modules for insmod/rmmod to work */
+ ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
+ OMAP_UHH_SYSCONFIG_SOFTRESET);
+ while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & (1 << 0))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & (1 << 1))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
+ & (1 << 2))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
+
+ while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & (1 << 0))) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout))
+ dev_dbg(omap->dev, "operation timed out\n");
+ }
+
+ if (omap->usbtll_fck != NULL) {
+ clk_disable(omap->usbtll_fck);
+ clk_put(omap->usbtll_fck);
+ omap->usbtll_fck = NULL;
+ }
+
+ if (omap->usbhost_ick != NULL) {
+ clk_disable(omap->usbhost_ick);
+ clk_put(omap->usbhost_ick);
+ omap->usbhost_ick = NULL;
+ }
+
+ if (omap->usbhost1_48m_fck != NULL) {
+ clk_disable(omap->usbhost1_48m_fck);
+ clk_put(omap->usbhost1_48m_fck);
+ omap->usbhost1_48m_fck = NULL;
+ }
+
+ if (omap->usbhost2_120m_fck != NULL) {
+ clk_disable(omap->usbhost2_120m_fck);
+ clk_put(omap->usbhost2_120m_fck);
+ omap->usbhost2_120m_fck = NULL;
+ }
+
+ if (omap->usbtll_ick != NULL) {
+ clk_disable(omap->usbtll_ick);
+ clk_put(omap->usbtll_ick);
+ omap->usbtll_ick = NULL;
+ }
+
+ if (omap->phy_reset) {
+ if (gpio_is_valid(omap->reset_gpio_port[0]))
+ gpio_free(omap->reset_gpio_port[0]);
+
+ if (gpio_is_valid(omap->reset_gpio_port[1]))
+ gpio_free(omap->reset_gpio_port[1]);
+ }
+
+ dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
+}
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ehci_omap_hc_driver;
+
+/* configure so an HC device and id are always provided */
+/* always called with process context; sleeping is OK */
+
+/**
+ * ehci_hcd_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int ehci_hcd_omap_probe(struct platform_device *pdev)
+{
+ struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
+ struct ehci_hcd_omap *omap;
+ struct resource *res;
+ struct usb_hcd *hcd;
+
+ int irq = platform_get_irq(pdev, 0);
+ int ret = -ENODEV;
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "missing platform_data\n");
+ goto err_pdata;
+ }
+
+ if (usb_disabled())
+ goto err_disabled;
+
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ ret = -ENOMEM;
+ goto err_disabled;
+ }
+
+ hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
+ ret = -ENOMEM;
+ goto err_create_hcd;
+ }
+
+ platform_set_drvdata(pdev, omap);
+ omap->dev = &pdev->dev;
+ omap->phy_reset = pdata->phy_reset;
+ omap->reset_gpio_port[0] = pdata->reset_gpio_port[0];
+ omap->reset_gpio_port[1] = pdata->reset_gpio_port[1];
+ omap->reset_gpio_port[2] = pdata->reset_gpio_port[2];
+ omap->port_mode[0] = pdata->port_mode[0];
+ omap->port_mode[1] = pdata->port_mode[1];
+ omap->port_mode[2] = pdata->port_mode[2];
+ omap->ehci = hcd_to_ehci(hcd);
+ omap->ehci->sbrn = 0x20;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "EHCI ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ /* we know this is the memory we want, no need to ioremap again */
+ omap->ehci->caps = hcd->regs;
+ omap->ehci_base = hcd->regs;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ omap->uhh_base = ioremap(res->start, resource_size(res));
+ if (!omap->uhh_base) {
+ dev_err(&pdev->dev, "UHH ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_uhh_ioremap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ omap->tll_base = ioremap(res->start, resource_size(res));
+ if (!omap->tll_base) {
+ dev_err(&pdev->dev, "TLL ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_tll_ioremap;
+ }
+
+ ret = omap_start_ehc(omap, hcd);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to start ehci\n");
+ goto err_start;
+ }
+
+ omap->ehci->regs = hcd->regs
+ + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
+
+ /* cache this readonly data; minimize chip reads */
+ omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
+
+ /* SET 1 micro-frame Interrupt interval */
+ writel(readl(&omap->ehci->regs->command) | (1 << 16),
+ &omap->ehci->regs->command);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
+ if (ret) {
+ dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+ goto err_add_hcd;
+ }
+
+ return 0;
+
+err_add_hcd:
+ omap_stop_ehc(omap, hcd);
+
+err_start:
+ iounmap(omap->tll_base);
+
+err_tll_ioremap:
+ iounmap(omap->uhh_base);
+
+err_uhh_ioremap:
+ iounmap(hcd->regs);
+
+err_ioremap:
+ usb_put_hcd(hcd);
+
+err_create_hcd:
+ kfree(omap);
+err_disabled:
+err_pdata:
+ return ret;
+}
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+/**
+ * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static int ehci_hcd_omap_remove(struct platform_device *pdev)
+{
+ struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+
+ usb_remove_hcd(hcd);
+ omap_stop_ehc(omap, hcd);
+ iounmap(hcd->regs);
+ iounmap(omap->tll_base);
+ iounmap(omap->uhh_base);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
+{
+ struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_hcd_omap_driver = {
+ .probe = ehci_hcd_omap_probe,
+ .remove = ehci_hcd_omap_remove,
+ .shutdown = ehci_hcd_omap_shutdown,
+ /*.suspend = ehci_hcd_omap_suspend, */
+ /*.resume = ehci_hcd_omap_resume, */
+ .driver = {
+ .name = "ehci-omap",
+ }
+};
+
+/*-------------------------------------------------------------------------*/
+
+static const struct hc_driver ehci_omap_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "OMAP-EHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_init,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+MODULE_ALIAS("platform:omap-ehci");
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 139a2cc3f64..a427d3b0063 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -616,9 +616,11 @@ qh_urb_transaction (
) {
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
- int len, maxpacket;
+ int len, this_sg_len, maxpacket;
int is_input;
u32 token;
+ int i;
+ struct scatterlist *sg;
/*
* URBs map to sequences of QTDs: one logical transaction
@@ -659,7 +661,20 @@ qh_urb_transaction (
/*
* data transfer stage: buffer setup
*/
- buf = urb->transfer_dma;
+ i = urb->num_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg->sg;
+ buf = sg_dma_address(sg);
+
+ /* urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ } else {
+ sg = NULL;
+ buf = urb->transfer_dma;
+ this_sg_len = len;
+ }
if (is_input)
token |= (1 /* "in" */ << 8);
@@ -675,7 +690,9 @@ qh_urb_transaction (
for (;;) {
int this_qtd_len;
- this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+ this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
+ maxpacket);
+ this_sg_len -= this_qtd_len;
len -= this_qtd_len;
buf += this_qtd_len;
@@ -691,8 +708,13 @@ qh_urb_transaction (
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
- if (likely (len <= 0))
- break;
+ if (likely(this_sg_len <= 0)) {
+ if (--i <= 0 || len <= 0)
+ break;
+ sg = sg_next(sg);
+ buf = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ }
qtd_prev = qtd;
qtd = ehci_qtd_alloc (ehci, flags);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a5535b5e3fe..1e391e624c8 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1385,7 +1385,7 @@ sitd_slot_ok (
* given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
*/
-#define SCHEDULE_SLOP 10 /* frames */
+#define SCHEDULE_SLOP 80 /* microframes */
static int
iso_stream_schedule (
@@ -1394,12 +1394,13 @@ iso_stream_schedule (
struct ehci_iso_stream *stream
)
{
- u32 now, start, max, period;
+ u32 now, next, start, period;
int status;
unsigned mod = ehci->periodic_size << 3;
struct ehci_iso_sched *sched = urb->hcpriv;
+ struct pci_dev *pdev;
- if (sched->span > (mod - 8 * SCHEDULE_SLOP)) {
+ if (sched->span > (mod - SCHEDULE_SLOP)) {
ehci_dbg (ehci, "iso request %p too long\n", urb);
status = -EFBIG;
goto fail;
@@ -1418,26 +1419,35 @@ iso_stream_schedule (
now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
- /* when's the last uframe this urb could start? */
- max = now + mod;
-
/* Typical case: reuse current schedule, stream is still active.
* Hopefully there are no gaps from the host falling behind
* (irq delays etc), but if there are we'll take the next
* slot in the schedule, implicitly assuming URB_ISO_ASAP.
*/
if (likely (!list_empty (&stream->td_list))) {
+ pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
start = stream->next_uframe;
- if (start < now)
- start += mod;
+
+ /* For high speed devices, allow scheduling within the
+ * isochronous scheduling threshold. For full speed devices,
+ * don't. (Work around for Intel ICH9 bug.)
+ */
+ if (!stream->highspeed &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL)
+ next = now + ehci->i_thresh;
+ else
+ next = now;
/* Fell behind (by up to twice the slop amount)? */
- if (start >= max - 2 * 8 * SCHEDULE_SLOP)
+ if (((start - next) & (mod - 1)) >=
+ mod - 2 * SCHEDULE_SLOP)
start += period * DIV_ROUND_UP(
- max - start, period) - mod;
+ (next - start) & (mod - 1),
+ period);
/* Tried to schedule too far into the future? */
- if (unlikely((start + sched->span) >= max)) {
+ if (unlikely(((start - now) & (mod - 1)) + sched->span
+ >= mod - 2 * SCHEDULE_SLOP)) {
status = -EFBIG;
goto fail;
}
@@ -1451,7 +1461,7 @@ iso_stream_schedule (
* can also help high bandwidth if the dma and irq loads don't
* jump until after the queue is primed.
*/
- start = SCHEDULE_SLOP * 8 + (now & ~0x07);
+ start = SCHEDULE_SLOP + (now & ~0x07);
start %= mod;
stream->next_uframe = start;
@@ -1482,7 +1492,7 @@ iso_stream_schedule (
/* no room in the schedule */
ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
list_empty (&stream->td_list) ? "" : "re",
- urb, now, max);
+ urb, now, now + mod);
status = -ENOSPC;
fail:
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
new file mode 100644
index 00000000000..a5861531ad3
--- /dev/null
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -0,0 +1,300 @@
+/*
+ * EHCI HCD (Host Controller Driver) for USB.
+ *
+ * Bus Glue for Xilinx EHCI core on the of_platform bus
+ *
+ * Copyright (c) 2009 Xilinx, Inc.
+ *
+ * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
+ * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
+ * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/signal.h>
+
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+/**
+ * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
+ * @hcd: Pointer to the usb_hcd device to which the host controller bound
+ *
+ * called during probe() after chip reset completes.
+ */
+static int ehci_xilinx_of_setup(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int retval;
+
+ retval = ehci_halt(ehci);
+ if (retval)
+ return retval;
+
+ retval = ehci_init(hcd);
+ if (retval)
+ return retval;
+
+ ehci->sbrn = 0x20;
+
+ return ehci_reset(ehci);
+}
+
+/**
+ * ehci_xilinx_port_handed_over - hand the port out if failed to enable it
+ * @hcd: Pointer to the usb_hcd device to which the host controller bound
+ * @portnum:Port number to which the device is attached.
+ *
+ * This function is used as a place to tell the user that the Xilinx USB host
+ * controller does support LS devices. And in an HS only configuration, it
+ * does not support FS devices either. It is hoped that this can help a
+ * confused user.
+ *
+ * There are cases when the host controller fails to enable the port due to,
+ * for example, insufficient power that can be supplied to the device from
+ * the USB bus. In those cases, the messages printed here are not helpful.
+ */
+static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
+{
+ dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum);
+ if (hcd->has_tt) {
+ dev_warn(hcd->self.controller,
+ "Maybe you have connected a low speed device?\n");
+
+ dev_warn(hcd->self.controller,
+ "We do not support low speed devices\n");
+ } else {
+ dev_warn(hcd->self.controller,
+ "Maybe your device is not a high speed device?\n");
+ dev_warn(hcd->self.controller,
+ "The USB host controller does not support full speed "
+ "nor low speed devices\n");
+ dev_warn(hcd->self.controller,
+ "You can reconfigure the host controller to have "
+ "full speed support\n");
+ }
+
+ return 0;
+}
+
+
+static const struct hc_driver ehci_xilinx_of_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "OF EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ehci_xilinx_of_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+#endif
+ .relinquish_port = NULL,
+ .port_handed_over = ehci_xilinx_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+/**
+ * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
+ * @op: pointer to the of_device to which the host controller bound
+ * @match: pointer to of_device_id structure, not used
+ *
+ * This function requests resources and sets up appropriate properties for the
+ * host controller. Because the Xilinx USB host controller can be configured
+ * as HS only or HS/FS only, it checks the configuration in the device tree
+ * entry, and sets an appropriate value for hcd->has_tt.
+ */
+static int __devinit
+ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
+{
+ struct device_node *dn = op->node;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource res;
+ int irq;
+ int rv;
+ int *value;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n");
+
+ rv = of_address_to_resource(dn, 0, &res);
+ if (rv)
+ return rv;
+
+ hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev,
+ "XILINX-OF USB");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res.start;
+ hcd->rsrc_len = res.end - res.start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ rv = -EBUSY;
+ goto err_rmr;
+ }
+
+ irq = irq_of_parse_and_map(dn, 0);
+ if (irq == NO_IRQ) {
+ printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ rv = -EBUSY;
+ goto err_irq;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ rv = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+
+ /* This core always has big-endian register interface and uses
+ * big-endian memory descriptors.
+ */
+ ehci->big_endian_mmio = 1;
+ ehci->big_endian_desc = 1;
+
+ /* Check whether the FS support option is selected in the hardware.
+ */
+ value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL);
+ if (value && (*value == 1)) {
+ ehci_dbg(ehci, "USB host controller supports FS devices\n");
+ hcd->has_tt = 1;
+ } else {
+ ehci_dbg(ehci,
+ "USB host controller is HS only\n");
+ hcd->has_tt = 0;
+ }
+
+ /* Debug registers are at the first 0x100 region
+ */
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ rv = usb_add_hcd(hcd, irq, 0);
+ if (rv == 0)
+ return 0;
+
+ iounmap(hcd->regs);
+
+err_ioremap:
+err_irq:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_rmr:
+ usb_put_hcd(hcd);
+
+ return rv;
+}
+
+/**
+ * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
+ * @op: pointer to of_device structure that is to be removed
+ *
+ * Remove the hcd structure, and release resources that has been requested
+ * during probe.
+ */
+static int ehci_hcd_xilinx_of_remove(struct of_device *op)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+ dev_set_drvdata(&op->dev, NULL);
+
+ dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
+
+ usb_remove_hcd(hcd);
+
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+/**
+ * ehci_hcd_xilinx_of_shutdown - shutdown the hcd
+ * @op: pointer to of_device structure that is to be removed
+ *
+ * Properly shutdown the hcd, call driver's shutdown routine.
+ */
+static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+
+ return 0;
+}
+
+
+static struct of_device_id ehci_hcd_xilinx_of_match[] = {
+ {.compatible = "xlnx,xps-usb-host-1.00.a",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
+
+static struct of_platform_driver ehci_hcd_xilinx_of_driver = {
+ .name = "xilinx-of-ehci",
+ .match_table = ehci_hcd_xilinx_of_match,
+ .probe = ehci_hcd_xilinx_of_probe,
+ .remove = ehci_hcd_xilinx_of_remove,
+ .shutdown = ehci_hcd_xilinx_of_shutdown,
+ .driver = {
+ .name = "xilinx-of-ehci",
+ .owner = THIS_MODULE,
+ },
+};
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 00a29855d0c..ff43747a614 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -37,7 +37,7 @@ static void recycle_frame(struct fhci_usb *usb, struct packet *pkt)
pkt->info = 0;
pkt->priv_data = NULL;
- cq_put(usb->ep0->empty_frame_Q, pkt);
+ cq_put(&usb->ep0->empty_frame_Q, pkt);
}
/* confirm submitted packet */
@@ -57,7 +57,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt)
if ((td->data + td->actual_len) && trans_len)
memcpy(td->data + td->actual_len, pkt->data,
trans_len);
- cq_put(usb->ep0->dummy_packets_Q, pkt->data);
+ cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
}
recycle_frame(usb, pkt);
@@ -213,7 +213,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
}
/* update frame object fields before transmitting */
- pkt = cq_get(usb->ep0->empty_frame_Q);
+ pkt = cq_get(&usb->ep0->empty_frame_Q);
if (!pkt) {
fhci_dbg(usb->fhci, "there is no empty frame\n");
return -1;
@@ -222,7 +222,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
pkt->info = 0;
if (data == NULL) {
- data = cq_get(usb->ep0->dummy_packets_Q);
+ data = cq_get(&usb->ep0->dummy_packets_Q);
BUG_ON(!data);
pkt->info = PKT_DUMMY_PACKET;
}
@@ -246,7 +246,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td)
list_del_init(&td->frame_lh);
td->status = USB_TD_OK;
if (pkt->info & PKT_DUMMY_PACKET)
- cq_put(usb->ep0->dummy_packets_Q, pkt->data);
+ cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
recycle_frame(usb, pkt);
usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
fhci_err(usb->fhci, "host transaction failed\n");
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index b4033229031..d224ab467a4 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -106,33 +106,33 @@ void fhci_ep0_free(struct fhci_usb *usb)
cpm_muram_free(cpm_muram_offset(ep->td_base));
if (ep->conf_frame_Q) {
- size = cq_howmany(ep->conf_frame_Q);
+ size = cq_howmany(&ep->conf_frame_Q);
for (; size; size--) {
- struct packet *pkt = cq_get(ep->conf_frame_Q);
+ struct packet *pkt = cq_get(&ep->conf_frame_Q);
kfree(pkt);
}
- cq_delete(ep->conf_frame_Q);
+ cq_delete(&ep->conf_frame_Q);
}
if (ep->empty_frame_Q) {
- size = cq_howmany(ep->empty_frame_Q);
+ size = cq_howmany(&ep->empty_frame_Q);
for (; size; size--) {
- struct packet *pkt = cq_get(ep->empty_frame_Q);
+ struct packet *pkt = cq_get(&ep->empty_frame_Q);
kfree(pkt);
}
- cq_delete(ep->empty_frame_Q);
+ cq_delete(&ep->empty_frame_Q);
}
if (ep->dummy_packets_Q) {
- size = cq_howmany(ep->dummy_packets_Q);
+ size = cq_howmany(&ep->dummy_packets_Q);
for (; size; size--) {
- u8 *buff = cq_get(ep->dummy_packets_Q);
+ u8 *buff = cq_get(&ep->dummy_packets_Q);
kfree(buff);
}
- cq_delete(ep->dummy_packets_Q);
+ cq_delete(&ep->dummy_packets_Q);
}
kfree(ep);
@@ -175,10 +175,9 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
ep->td_base = cpm_muram_addr(ep_offset);
/* zero all queue pointers */
- ep->conf_frame_Q = cq_new(ring_len + 2);
- ep->empty_frame_Q = cq_new(ring_len + 2);
- ep->dummy_packets_Q = cq_new(ring_len + 2);
- if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) {
+ if (cq_new(&ep->conf_frame_Q, ring_len + 2) ||
+ cq_new(&ep->empty_frame_Q, ring_len + 2) ||
+ cq_new(&ep->dummy_packets_Q, ring_len + 2)) {
err_for = "frame_queues";
goto err;
}
@@ -199,8 +198,8 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem,
err_for = "buffer";
goto err;
}
- cq_put(ep->empty_frame_Q, pkt);
- cq_put(ep->dummy_packets_Q, buff);
+ cq_put(&ep->empty_frame_Q, pkt);
+ cq_put(&ep->dummy_packets_Q, buff);
}
/* we put the endpoint parameter RAM right behind the TD ring */
@@ -319,7 +318,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb)
if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
continue;
- pkt = cq_get(ep->conf_frame_Q);
+ pkt = cq_get(&ep->conf_frame_Q);
if (!pkt)
fhci_err(usb->fhci, "no frame to confirm\n");
@@ -460,9 +459,9 @@ u32 fhci_host_transaction(struct fhci_usb *usb,
out_be16(&td->length, pkt->len);
/* put the frame to the confirmation queue */
- cq_put(ep->conf_frame_Q, pkt);
+ cq_put(&ep->conf_frame_Q, pkt);
- if (cq_howmany(ep->conf_frame_Q) == 1)
+ if (cq_howmany(&ep->conf_frame_Q) == 1)
out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
return 0;
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 7116284ed21..72dae1c5ab3 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -423,9 +423,9 @@ struct endpoint {
struct usb_td __iomem *td_base; /* first TD in the ring */
struct usb_td __iomem *conf_td; /* next TD for confirm after transac */
struct usb_td __iomem *empty_td;/* next TD for new transaction req. */
- struct kfifo *empty_frame_Q; /* Empty frames list to use */
- struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */
- struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */
+ struct kfifo empty_frame_Q; /* Empty frames list to use */
+ struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */
+ struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */
bool already_pushed_dummy_bd;
};
@@ -493,9 +493,9 @@ static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci)
}
/* fifo of pointers */
-static inline struct kfifo *cq_new(int size)
+static inline int cq_new(struct kfifo *fifo, int size)
{
- return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL);
+ return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL);
}
static inline void cq_delete(struct kfifo *kfifo)
@@ -505,19 +505,19 @@ static inline void cq_delete(struct kfifo *kfifo)
static inline unsigned int cq_howmany(struct kfifo *kfifo)
{
- return __kfifo_len(kfifo) / sizeof(void *);
+ return kfifo_len(kfifo) / sizeof(void *);
}
static inline int cq_put(struct kfifo *kfifo, void *p)
{
- return __kfifo_put(kfifo, (void *)&p, sizeof(p));
+ return kfifo_in(kfifo, (void *)&p, sizeof(p));
}
static inline void *cq_get(struct kfifo *kfifo)
{
void *p = NULL;
- __kfifo_get(kfifo, (void *)&p, sizeof(p));
+ kfifo_out(kfifo, (void *)&p, sizeof(p));
return p;
}
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 5c774ab9825..73352f3739b 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -80,7 +80,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/io.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <asm/irq.h>
#include <asm/system.h>
@@ -190,10 +190,8 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
struct isp1362_ep *ep, u16 len)
{
int ptd_offset = -EINVAL;
- int index;
int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
- int found = -1;
- int last = -1;
+ int found;
BUG_ON(len > epq->buf_size);
@@ -205,20 +203,9 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
BUG_ON(ep->num_ptds != 0);
- for (index = 0; index <= epq->buf_count - num_ptds; index++) {
- if (test_bit(index, &epq->buf_map))
- continue;
- found = index;
- for (last = index + 1; last < index + num_ptds; last++) {
- if (test_bit(last, &epq->buf_map)) {
- found = -1;
- break;
- }
- }
- if (found >= 0)
- break;
- }
- if (found < 0)
+ found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
+ num_ptds, 0);
+ if (found >= epq->buf_count)
return -EOVERFLOW;
DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
@@ -230,8 +217,7 @@ static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
epq->buf_avail -= num_ptds;
BUG_ON(epq->buf_avail > epq->buf_count);
ep->ptd_index = found;
- for (index = found; index < last; index++)
- __set_bit(index, &epq->buf_map);
+ bitmap_set(&epq->buf_map, found, num_ptds);
DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
__func__, epq->name, ep->ptd_index, ep->ptd_offset,
epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index 1a253ebf7e5..5151516ea1d 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -534,8 +534,8 @@ struct isp1362_hcd {
/* periodic schedule: isochronous */
struct list_head isoc;
- int istl_flip:1;
- int irq_active:1;
+ unsigned int istl_flip:1;
+ unsigned int irq_active:1;
/* Schedules for the current frame */
struct isp1362_ep_queue atl_queue;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 7ccffcbe7b6..68b83ab7071 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -35,7 +35,7 @@ extern int usb_disabled(void);
static void at91_start_clock(void)
{
- if (cpu_is_at91sam9261())
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
clk_enable(hclk);
clk_enable(iclk);
clk_enable(fclk);
@@ -46,7 +46,7 @@ static void at91_stop_clock(void)
{
clk_disable(fclk);
clk_disable(iclk);
- if (cpu_is_at91sam9261())
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
clk_disable(hclk);
clocked = 0;
}
@@ -142,7 +142,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
iclk = clk_get(&pdev->dev, "ohci_clk");
fclk = clk_get(&pdev->dev, "uhpck");
- if (cpu_is_at91sam9261())
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
hclk = clk_get(&pdev->dev, "hck0");
at91_start_hc(pdev);
@@ -155,7 +155,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
/* Error handling */
at91_stop_hc(pdev);
- if (cpu_is_at91sam9261())
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
clk_put(hclk);
clk_put(fclk);
clk_put(iclk);
@@ -192,7 +192,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
- if (cpu_is_at91sam9261())
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
clk_put(hclk);
clk_put(fclk);
clk_put(iclk);
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index e4380082ebb..17a6043c1fa 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -294,7 +294,7 @@ static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops au1xxx_ohci_pmops = {
+static const struct dev_pm_ops au1xxx_ohci_pmops = {
.suspend = ohci_hcd_au1xxx_drv_suspend,
.resume = ohci_hcd_au1xxx_drv_resume,
};
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 100bf3d8437..2769326da42 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -98,8 +98,8 @@
#define ISP1301_I2C_INTERRUPT_RISING 0xE
#define ISP1301_I2C_REG_CLEAR_ADDR 1
-struct i2c_driver isp1301_driver;
-struct i2c_client *isp1301_i2c_client;
+static struct i2c_driver isp1301_driver;
+static struct i2c_client *isp1301_i2c_client;
extern int usb_disabled(void);
extern int ocpi_enable(void);
@@ -120,12 +120,12 @@ static int isp1301_remove(struct i2c_client *client)
return 0;
}
-const struct i2c_device_id isp1301_id[] = {
+static const struct i2c_device_id isp1301_id[] = {
{ "isp1301_pnx", 0 },
{ }
};
-struct i2c_driver isp1301_driver = {
+static struct i2c_driver isp1301_driver = {
.driver = {
.name = "isp1301_pnx",
},
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index f1c06202fdf..a18debdd79b 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -518,7 +518,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
+static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
.suspend = ohci_hcd_pxa27x_drv_suspend,
.resume = ohci_hcd_pxa27x_drv_resume,
};
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index e33d3625635..b7a661c02bc 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -822,8 +822,6 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
return;
list_for_each_entry_safe(td, next, list, queue) {
- if (!td)
- continue;
if (td->address != address)
continue;
@@ -2025,8 +2023,6 @@ static struct r8a66597_device *get_r8a66597_device(struct r8a66597 *r8a66597,
struct list_head *list = &r8a66597->child_device;
list_for_each_entry(dev, list, device_list) {
- if (!dev)
- continue;
if (dev->usb_address != addr)
continue;
@@ -2357,7 +2353,7 @@ static int r8a66597_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops r8a66597_dev_pm_ops = {
+static const struct dev_pm_ops r8a66597_dev_pm_ops = {
.suspend = r8a66597_suspend,
.resume = r8a66597_resume,
.poweroff = r8a66597_suspend,
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index 2273c815941..8c1c610c951 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -31,17 +31,29 @@ struct whc_dbg {
void qset_print(struct seq_file *s, struct whc_qset *qset)
{
+ static const char *qh_type[] = {
+ "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
struct whc_std *std;
struct urb *urb = NULL;
int i;
- seq_printf(s, "qset %08x\n", (u32)qset->qset_dma);
+ seq_printf(s, "qset %08x", (u32)qset->qset_dma);
+ if (&qset->list_node == qset->whc->async_list.prev) {
+ seq_printf(s, " (dummy)\n");
+ } else {
+ seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
+ qset->qh.info1 & 0x0f,
+ (qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
+ qh_type[(qset->qh.info1 >> 5) & 0x7],
+ (qset->qh.info1 >> 16) & 0xffff);
+ }
seq_printf(s, " -> %08x\n", (u32)qset->qh.link);
seq_printf(s, " info: %08x %08x %08x\n",
- qset->qh.info1, qset->qh.info2, qset->qh.info3);
- seq_printf(s, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count);
+ qset->qh.info1, qset->qh.info2, qset->qh.info3);
+ seq_printf(s, " sts: %04x errs: %d curwin: %08x\n",
+ qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
seq_printf(s, " TD: sts: %08x opts: %08x\n",
- qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
+ qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 687b622a161..e0d3401285c 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -250,6 +250,7 @@ static int whc_probe(struct umc_dev *umc)
}
usb_hcd->wireless = 1;
+ usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
whc = wusbhc_to_whc(wusbhc);
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 1b9dc157157..7d4204db0f6 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -49,16 +49,19 @@ struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
* state
* @urb: an urb for a transfer to this endpoint
*/
-static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
+static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
{
struct usb_device *usb_dev = urb->dev;
+ struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
struct usb_wireless_ep_comp_descriptor *epcd;
bool is_out;
+ uint8_t phy_rate;
is_out = usb_pipeout(urb->pipe);
- epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
+ qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
+ epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
if (epcd) {
qset->max_seq = epcd->bMaxSequence;
qset->max_burst = epcd->bMaxBurst;
@@ -67,12 +70,28 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
qset->max_burst = 1;
}
+ /*
+ * Initial PHY rate is 53.3 Mbit/s for control endpoints or
+ * the maximum supported by the device for other endpoints
+ * (unless limited by the user).
+ */
+ if (usb_pipecontrol(urb->pipe))
+ phy_rate = UWB_PHY_RATE_53;
+ else {
+ uint16_t phy_rates;
+
+ phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
+ phy_rate = fls(phy_rates) - 1;
+ if (phy_rate > whc->wusbhc.phy_rate)
+ phy_rate = whc->wusbhc.phy_rate;
+ }
+
qset->qh.info1 = cpu_to_le32(
QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
| (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
| usb_pipe_to_qh_type(urb->pipe)
| QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
- | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
+ | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
);
qset->qh.info2 = cpu_to_le32(
QH_INFO2_BURST(qset->max_burst)
@@ -86,7 +105,7 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
* strength and can presumably guess the Tx power required
* from that? */
qset->qh.info3 = cpu_to_le32(
- QH_INFO3_TX_RATE_53_3
+ QH_INFO3_TX_RATE(phy_rate)
| QH_INFO3_TX_PWR(0) /* 0 == max power */
);
@@ -148,7 +167,7 @@ struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
qset->ep = urb->ep;
urb->ep->hcpriv = qset;
- qset_fill_qh(qset, urb);
+ qset_fill_qh(whc, qset, urb);
}
return qset;
}
@@ -241,6 +260,36 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
qset->ntds--;
}
+static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
+{
+ struct scatterlist *sg;
+ void *bounce;
+ size_t remaining, offset;
+
+ bounce = std->bounce_buf;
+ remaining = std->len;
+
+ sg = std->bounce_sg;
+ offset = std->bounce_offset;
+
+ while (remaining) {
+ size_t len;
+
+ len = min(sg->length - offset, remaining);
+ memcpy(sg_virt(sg) + offset, bounce, len);
+
+ bounce += len;
+ remaining -= len;
+
+ offset += len;
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ }
+
+}
+
/**
* qset_free_std - remove an sTD and free it.
* @whc: the WHCI host controller
@@ -249,13 +298,29 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
void qset_free_std(struct whc *whc, struct whc_std *std)
{
list_del(&std->list_node);
- if (std->num_pointers) {
- dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
- std->num_pointers * sizeof(struct whc_page_list_entry),
- DMA_TO_DEVICE);
+ if (std->bounce_buf) {
+ bool is_out = usb_pipeout(std->urb->pipe);
+ dma_addr_t dma_addr;
+
+ if (std->num_pointers)
+ dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
+ else
+ dma_addr = std->dma_addr;
+
+ dma_unmap_single(whc->wusbhc.dev, dma_addr,
+ std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!is_out)
+ qset_copy_bounce_to_sg(whc, std);
+ kfree(std->bounce_buf);
+ }
+ if (std->pl_virt) {
+ if (std->dma_addr)
+ dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
+ std->num_pointers * sizeof(struct whc_page_list_entry),
+ DMA_TO_DEVICE);
kfree(std->pl_virt);
+ std->pl_virt = NULL;
}
-
kfree(std);
}
@@ -293,12 +358,17 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
{
dma_addr_t dma_addr = std->dma_addr;
dma_addr_t sp, ep;
- size_t std_len = std->len;
size_t pl_len;
int p;
- sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
- ep = dma_addr + std_len;
+ /* Short buffers don't need a page list. */
+ if (std->len <= WHCI_PAGE_SIZE) {
+ std->num_pointers = 0;
+ return 0;
+ }
+
+ sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+ ep = dma_addr + std->len;
std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
@@ -309,7 +379,7 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
for (p = 0; p < std->num_pointers; p++) {
std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
- dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
+ dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
}
return 0;
@@ -339,6 +409,218 @@ static void urb_dequeue_work(struct work_struct *work)
spin_unlock_irqrestore(&whc->lock, flags);
}
+static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_std *std;
+
+ std = kzalloc(sizeof(struct whc_std), mem_flags);
+ if (std == NULL)
+ return NULL;
+
+ std->urb = urb;
+ std->qtd = NULL;
+
+ INIT_LIST_HEAD(&std->list_node);
+ list_add_tail(&std->list_node, &qset->stds);
+
+ return std;
+}
+
+static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags)
+{
+ size_t remaining;
+ struct scatterlist *sg;
+ int i;
+ int ntds = 0;
+ struct whc_std *std = NULL;
+ struct whc_page_list_entry *entry;
+ dma_addr_t prev_end = 0;
+ size_t pl_len;
+ int p = 0;
+
+ remaining = urb->transfer_buffer_length;
+
+ for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
+ dma_addr_t dma_addr;
+ size_t dma_remaining;
+ dma_addr_t sp, ep;
+ int num_pointers;
+
+ if (remaining == 0) {
+ break;
+ }
+
+ dma_addr = sg_dma_address(sg);
+ dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
+
+ while (dma_remaining) {
+ size_t dma_len;
+
+ /*
+ * We can use the previous std (if it exists) provided that:
+ * - the previous one ended on a page boundary.
+ * - the current one begins on a page boundary.
+ * - the previous one isn't full.
+ *
+ * If a new std is needed but the previous one
+ * was not a whole number of packets then this
+ * sg list cannot be mapped onto multiple
+ * qTDs. Return an error and let the caller
+ * sort it out.
+ */
+ if (!std
+ || (prev_end & (WHCI_PAGE_SIZE-1))
+ || (dma_addr & (WHCI_PAGE_SIZE-1))
+ || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
+ if (std->len % qset->max_packet != 0)
+ return -EINVAL;
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL) {
+ return -ENOMEM;
+ }
+ ntds++;
+ p = 0;
+ }
+
+ dma_len = dma_remaining;
+
+ /*
+ * If the remainder of this element doesn't
+ * fit in a single qTD, limit the qTD to a
+ * whole number of packets. This allows the
+ * remainder to go into the next qTD.
+ */
+ if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
+ dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
+ * qset->max_packet - std->len;
+ }
+
+ std->len += dma_len;
+ std->ntds_remaining = -1; /* filled in later */
+
+ sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+ ep = dma_addr + dma_len;
+ num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+ std->num_pointers += num_pointers;
+
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+
+ std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
+ if (std->pl_virt == NULL) {
+ return -ENOMEM;
+ }
+
+ for (;p < std->num_pointers; p++, entry++) {
+ std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+ dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
+ }
+
+ prev_end = dma_addr = ep;
+ dma_remaining -= dma_len;
+ remaining -= dma_len;
+ }
+ }
+
+ /* Now the number of stds is know, go back and fill in
+ std->ntds_remaining. */
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (std->ntds_remaining == -1) {
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+ std->ntds_remaining = ntds--;
+ std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
+ pl_len, DMA_TO_DEVICE);
+ }
+ }
+ return 0;
+}
+
+/**
+ * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
+ *
+ * If the URB contains an sg list whose elements cannot be directly
+ * mapped to qTDs then the data must be transferred via bounce
+ * buffers.
+ */
+static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, gfp_t mem_flags)
+{
+ bool is_out = usb_pipeout(urb->pipe);
+ size_t max_std_len;
+ size_t remaining;
+ int ntds = 0;
+ struct whc_std *std = NULL;
+ void *bounce = NULL;
+ struct scatterlist *sg;
+ int i;
+
+ /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
+ max_std_len = qset->max_burst * qset->max_packet;
+
+ remaining = urb->transfer_buffer_length;
+
+ for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) {
+ size_t len;
+ size_t sg_remaining;
+ void *orig;
+
+ if (remaining == 0) {
+ break;
+ }
+
+ sg_remaining = min_t(size_t, remaining, sg->length);
+ orig = sg_virt(sg);
+
+ while (sg_remaining) {
+ if (!std || std->len == max_std_len) {
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL)
+ return -ENOMEM;
+ std->bounce_buf = kmalloc(max_std_len, mem_flags);
+ if (std->bounce_buf == NULL)
+ return -ENOMEM;
+ std->bounce_sg = sg;
+ std->bounce_offset = orig - sg_virt(sg);
+ bounce = std->bounce_buf;
+ ntds++;
+ }
+
+ len = min(sg_remaining, max_std_len - std->len);
+
+ if (is_out)
+ memcpy(bounce, orig, len);
+
+ std->len += len;
+ std->ntds_remaining = -1; /* filled in later */
+
+ bounce += len;
+ orig += len;
+ sg_remaining -= len;
+ remaining -= len;
+ }
+ }
+
+ /*
+ * For each of the new sTDs, map the bounce buffers, create
+ * page lists (if necessary), and fill in std->ntds_remaining.
+ */
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (std->ntds_remaining != -1)
+ continue;
+
+ std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
+ is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (qset_fill_page_list(whc, std, mem_flags) < 0)
+ return -ENOMEM;
+
+ std->ntds_remaining = ntds--;
+ }
+
+ return 0;
+}
+
/**
* qset_add_urb - add an urb to the qset's queue.
*
@@ -353,10 +635,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
int remaining = urb->transfer_buffer_length;
u64 transfer_dma = urb->transfer_dma;
int ntds_remaining;
-
- ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
- if (ntds_remaining == 0)
- ntds_remaining = 1;
+ int ret;
wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
if (wurb == NULL)
@@ -366,32 +645,39 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
wurb->urb = urb;
INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
+ if (urb->sg) {
+ ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
+ if (ret == -EINVAL) {
+ qset_free_stds(qset, urb);
+ ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
+ }
+ if (ret < 0)
+ goto err_no_mem;
+ return 0;
+ }
+
+ ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
+ if (ntds_remaining == 0)
+ ntds_remaining = 1;
+
while (ntds_remaining) {
struct whc_std *std;
size_t std_len;
- std = kmalloc(sizeof(struct whc_std), mem_flags);
- if (std == NULL)
- goto err_no_mem;
-
std_len = remaining;
if (std_len > QTD_MAX_XFER_SIZE)
std_len = QTD_MAX_XFER_SIZE;
- std->urb = urb;
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL)
+ goto err_no_mem;
+
std->dma_addr = transfer_dma;
std->len = std_len;
std->ntds_remaining = ntds_remaining;
- std->qtd = NULL;
- INIT_LIST_HEAD(&std->list_node);
- list_add_tail(&std->list_node, &qset->stds);
-
- if (std_len > WHCI_PAGE_SIZE) {
- if (qset_fill_page_list(whc, std, mem_flags) < 0)
- goto err_no_mem;
- } else
- std->num_pointers = 0;
+ if (qset_fill_page_list(whc, std, mem_flags) < 0)
+ goto err_no_mem;
ntds_remaining--;
remaining -= std_len;
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
index 24e94d983c5..c80c7d93bc4 100644
--- a/drivers/usb/host/whci/whcd.h
+++ b/drivers/usb/host/whci/whcd.h
@@ -84,6 +84,11 @@ struct whc {
* @len: the length of data in the associated TD.
* @ntds_remaining: number of TDs (starting from this one) in this transfer.
*
+ * @bounce_buf: a bounce buffer if the std was from an urb with a sg
+ * list that could not be mapped to qTDs directly.
+ * @bounce_sg: the first scatterlist element bounce_buf is for.
+ * @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
+ *
* Queued URBs may require more TDs than are available in a qset so we
* use a list of these "software TDs" (sTDs) to hold per-TD data.
*/
@@ -97,6 +102,10 @@ struct whc_std {
int num_pointers;
dma_addr_t dma_addr;
struct whc_page_list_entry *pl_virt;
+
+ void *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned bounce_offset;
};
/**
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
index e8d0001605b..4d4cbc0730b 100644
--- a/drivers/usb/host/whci/whci-hc.h
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -172,14 +172,7 @@ struct whc_qhead {
#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
-#define QH_INFO3_TX_RATE_53_3 (0 << 24)
-#define QH_INFO3_TX_RATE_80 (1 << 24)
-#define QH_INFO3_TX_RATE_106_7 (2 << 24)
-#define QH_INFO3_TX_RATE_160 (3 << 24)
-#define QH_INFO3_TX_RATE_200 (4 << 24)
-#define QH_INFO3_TX_RATE_320 (5 << 24)
-#define QH_INFO3_TX_RATE_400 (6 << 24)
-#define QH_INFO3_TX_RATE_480 (7 << 24)
+#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
#define QH_STATUS_FLOW_CTRL (1 << 15)
@@ -267,8 +260,9 @@ struct whc_qset {
unsigned reset:1;
struct urb *pause_after_urb;
struct completion remove_complete;
- int max_burst;
- int max_seq;
+ uint16_t max_packet;
+ uint8_t max_burst;
+ uint8_t max_seq;
};
static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 932f9993848..5e92c72df64 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -67,22 +67,14 @@ static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
}
/*
- * Force HC into halt state.
- *
- * Disable any IRQs and clear the run/stop bit.
- * HC will complete any current and actively pipelined transactions, and
- * should halt within 16 microframes of the run/stop bit being cleared.
- * Read HC Halted bit in the status register to see when the HC is finished.
- * XXX: shouldn't we set HC_STATE_HALT here somewhere?
+ * Disable interrupts and begin the xHCI halting process.
*/
-int xhci_halt(struct xhci_hcd *xhci)
+void xhci_quiesce(struct xhci_hcd *xhci)
{
u32 halted;
u32 cmd;
u32 mask;
- xhci_dbg(xhci, "// Halt the HC\n");
- /* Disable all interrupts from the host controller */
mask = ~(XHCI_IRQS);
halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
if (!halted)
@@ -91,6 +83,21 @@ int xhci_halt(struct xhci_hcd *xhci)
cmd = xhci_readl(xhci, &xhci->op_regs->command);
cmd &= mask;
xhci_writel(xhci, cmd, &xhci->op_regs->command);
+}
+
+/*
+ * Force HC into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * HC will complete any current and actively pipelined transactions, and
+ * should halt within 16 microframes of the run/stop bit being cleared.
+ * Read HC Halted bit in the status register to see when the HC is finished.
+ * XXX: shouldn't we set HC_STATE_HALT here somewhere?
+ */
+int xhci_halt(struct xhci_hcd *xhci)
+{
+ xhci_dbg(xhci, "// Halt the HC\n");
+ xhci_quiesce(xhci);
return handshake(xhci, &xhci->op_regs->status,
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
@@ -124,28 +131,6 @@ int xhci_reset(struct xhci_hcd *xhci)
return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
}
-/*
- * Stop the HC from processing the endpoint queues.
- */
-static void xhci_quiesce(struct xhci_hcd *xhci)
-{
- /*
- * Queues are per endpoint, so we need to disable an endpoint or slot.
- *
- * To disable a slot, we need to insert a disable slot command on the
- * command ring and ring the doorbell. This will also free any internal
- * resources associated with the slot (which might not be what we want).
- *
- * A Release Endpoint command sounds better - doesn't free internal HC
- * memory, but removes the endpoints from the schedule and releases the
- * bandwidth, disables the doorbells, and clears the endpoint enable
- * flag. Usually used prior to a set interface command.
- *
- * TODO: Implement after command ring code is done.
- */
- BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
- xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
-}
#if 0
/* Set up MSI-X table for entry 0 (may claim other entries later) */
@@ -261,8 +246,14 @@ static void xhci_work(struct xhci_hcd *xhci)
/* Flush posted writes */
xhci_readl(xhci, &xhci->ir_set->irq_pending);
- /* FIXME this should be a delayed service routine that clears the EHB */
- xhci_handle_event(xhci);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
+ "Shouldn't IRQs be disabled?\n");
+ else
+ /* FIXME this should be a delayed service routine
+ * that clears the EHB.
+ */
+ xhci_handle_event(xhci);
/* Clear the event handler busy flag (RW1C); the event ring should be empty. */
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
@@ -335,7 +326,7 @@ void xhci_event_ring_work(unsigned long arg)
spin_lock_irqsave(&xhci->lock, flags);
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
- if (temp == 0xffffffff) {
+ if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
xhci_dbg(xhci, "HW died, polling stopped.\n");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
@@ -490,8 +481,6 @@ void xhci_stop(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
spin_lock_irq(&xhci->lock);
- if (HC_IS_RUNNING(hcd->state))
- xhci_quiesce(xhci);
xhci_halt(xhci);
xhci_reset(xhci);
spin_unlock_irq(&xhci->lock);
@@ -727,16 +716,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
* atomic context to this function, which may allocate memory.
*/
spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ goto dying;
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ goto dying;
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ goto dying;
ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -745,6 +740,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
}
exit:
return ret;
+dying:
+ xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
+ "non-responsive xHCI host.\n",
+ urb->ep->desc.bEndpointAddress, urb);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return -ESHUTDOWN;
}
/*
@@ -806,6 +807,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
kfree(td);
return ret;
}
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
+ "non-responsive xHCI host.\n",
+ urb->ep->desc.bEndpointAddress, urb);
+ /* Let the stop endpoint command watchdog timer (which set this
+ * state) finish cleaning up the endpoint TD lists. We must
+ * have caught it in the middle of dropping a lock and giving
+ * back an URB.
+ */
+ goto done;
+ }
xhci_dbg(xhci, "Cancel URB %p\n", urb);
xhci_dbg(xhci, "Event ring:\n");
@@ -817,12 +829,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_debug_ring(xhci, ep_ring);
td = (struct xhci_td *) urb->hcpriv;
- ep->cancels_pending++;
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
/* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled.
*/
- if (ep->cancels_pending == 1) {
+ if (!(ep->ep_state & EP_HALT_PENDING)) {
+ ep->ep_state |= EP_HALT_PENDING;
+ ep->stop_cmds_pending++;
+ ep->stop_cmd_timer.expires = jiffies +
+ XHCI_STOP_EP_CMD_TIMEOUT * HZ;
+ add_timer(&ep->stop_cmd_timer);
xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
xhci_ring_cmd_db(xhci);
}
@@ -1246,13 +1262,35 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
xhci_zero_in_ctx(xhci, virt_dev);
- /* Free any old rings */
+ /* Install new rings and free or cache any old rings */
for (i = 1; i < 31; ++i) {
- if (virt_dev->eps[i].new_ring) {
- xhci_ring_free(xhci, virt_dev->eps[i].ring);
- virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
- virt_dev->eps[i].new_ring = NULL;
+ int rings_cached;
+
+ if (!virt_dev->eps[i].new_ring)
+ continue;
+ /* Only cache or free the old ring if it exists.
+ * It may not if this is the first add of an endpoint.
+ */
+ if (virt_dev->eps[i].ring) {
+ rings_cached = virt_dev->num_rings_cached;
+ if (rings_cached < XHCI_MAX_RINGS_CACHED) {
+ virt_dev->num_rings_cached++;
+ rings_cached = virt_dev->num_rings_cached;
+ virt_dev->ring_cache[rings_cached] =
+ virt_dev->eps[i].ring;
+ xhci_dbg(xhci, "Cached old ring, "
+ "%d ring%s cached\n",
+ rings_cached,
+ (rings_cached > 1) ? "s" : "");
+ } else {
+ xhci_ring_free(xhci, virt_dev->eps[i].ring);
+ xhci_dbg(xhci, "Ring cache full (%d rings), "
+ "freeing ring\n",
+ virt_dev->num_rings_cached);
+ }
}
+ virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
+ virt_dev->eps[i].new_ring = NULL;
}
return ret;
@@ -1427,16 +1465,27 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev;
unsigned long flags;
u32 state;
+ int i;
if (udev->slot_id == 0)
return;
+ virt_dev = xhci->devs[udev->slot_id];
+ if (!virt_dev)
+ return;
+
+ /* Stop any wayward timer functions (which may grab the lock) */
+ for (i = 0; i < 31; ++i) {
+ virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
+ del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
+ }
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = xhci_readl(xhci, &xhci->op_regs->status);
- if (state == 0xffffffff) {
+ if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
xhci_free_virt_device(xhci, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
return;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b8fd270a8b0..bffcef7a554 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -125,6 +125,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
kfree(ring);
}
+static void xhci_initialize_ring_info(struct xhci_ring *ring)
+{
+ /* The ring is empty, so the enqueue pointer == dequeue pointer */
+ ring->enqueue = ring->first_seg->trbs;
+ ring->enq_seg = ring->first_seg;
+ ring->dequeue = ring->enqueue;
+ ring->deq_seg = ring->first_seg;
+ /* The ring is initialized to 0. The producer must write 1 to the cycle
+ * bit to handover ownership of the TRB, so PCS = 1. The consumer must
+ * compare CCS to the cycle bit to check ownership, so CCS = 1.
+ */
+ ring->cycle_state = 1;
+ /* Not necessary for new rings, but needed for re-initialized rings */
+ ring->enq_updates = 0;
+ ring->deq_updates = 0;
+}
+
/**
* Create a new ring with zero or more segments.
*
@@ -173,17 +190,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
" segment %p (virtual), 0x%llx (DMA)\n",
prev, (unsigned long long)prev->dma);
}
- /* The ring is empty, so the enqueue pointer == dequeue pointer */
- ring->enqueue = ring->first_seg->trbs;
- ring->enq_seg = ring->first_seg;
- ring->dequeue = ring->enqueue;
- ring->deq_seg = ring->first_seg;
- /* The ring is initialized to 0. The producer must write 1 to the cycle
- * bit to handover ownership of the TRB, so PCS = 1. The consumer must
- * compare CCS to the cycle bit to check ownership, so CCS = 1.
- */
- ring->cycle_state = 1;
-
+ xhci_initialize_ring_info(ring);
return ring;
fail:
@@ -191,6 +198,27 @@ fail:
return 0;
}
+/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
+ * pointers to the beginning of the ring.
+ */
+static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
+ struct xhci_ring *ring)
+{
+ struct xhci_segment *seg = ring->first_seg;
+ do {
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
+ /* All endpoint rings have link TRBs */
+ xhci_link_segments(xhci, seg, seg->next, 1);
+ seg = seg->next;
+ } while (seg != ring->first_seg);
+ xhci_initialize_ring_info(ring);
+ /* td list should be empty since all URBs have been cancelled,
+ * but just in case...
+ */
+ INIT_LIST_HEAD(&ring->td_list);
+}
+
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -248,6 +276,15 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
+static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep)
+{
+ init_timer(&ep->stop_cmd_timer);
+ ep->stop_cmd_timer.data = (unsigned long) ep;
+ ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
+ ep->xhci = xhci;
+}
+
/* All the xhci_tds in the ring's TD list should be freed at this point */
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{
@@ -267,6 +304,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
+ if (dev->ring_cache) {
+ for (i = 0; i < dev->num_rings_cached; i++)
+ xhci_ring_free(xhci, dev->ring_cache[i]);
+ kfree(dev->ring_cache);
+ }
+
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx)
@@ -309,15 +352,25 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->in_ctx->dma);
- /* Initialize the cancellation list for each endpoint */
- for (i = 0; i < 31; i++)
+ /* Initialize the cancellation list and watchdog timers for each ep */
+ for (i = 0; i < 31; i++) {
+ xhci_init_endpoint_timer(xhci, &dev->eps[i]);
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
+ }
/* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
if (!dev->eps[0].ring)
goto fail;
+ /* Allocate pointers to the ring cache */
+ dev->ring_cache = kzalloc(
+ sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
+ flags);
+ if (!dev->ring_cache)
+ goto fail;
+ dev->num_rings_cached = 0;
+
init_completion(&dev->cmd_completion);
INIT_LIST_HEAD(&dev->cmd_list);
@@ -544,8 +597,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, true, mem_flags);
- if (!virt_dev->eps[ep_index].new_ring)
- return -ENOMEM;
+ if (!virt_dev->eps[ep_index].new_ring) {
+ /* Attempt to use the ring cache */
+ if (virt_dev->num_rings_cached == 0)
+ return -ENOMEM;
+ virt_dev->eps[ep_index].new_ring =
+ virt_dev->ring_cache[virt_dev->num_rings_cached];
+ virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+ virt_dev->num_rings_cached--;
+ xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
+ }
ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
@@ -768,14 +829,17 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
command->in_ctx =
xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
- if (!command->in_ctx)
+ if (!command->in_ctx) {
+ kfree(command);
return NULL;
+ }
if (allocate_completion) {
command->completion =
kzalloc(sizeof(struct completion), mem_flags);
if (!command->completion) {
xhci_free_container_ctx(xhci, command->in_ctx);
+ kfree(command);
return NULL;
}
init_completion(command->completion);
@@ -848,6 +912,163 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->page_shift = 0;
}
+static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
+ struct xhci_segment *input_seg,
+ union xhci_trb *start_trb,
+ union xhci_trb *end_trb,
+ dma_addr_t input_dma,
+ struct xhci_segment *result_seg,
+ char *test_name, int test_number)
+{
+ unsigned long long start_dma;
+ unsigned long long end_dma;
+ struct xhci_segment *seg;
+
+ start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
+ end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
+
+ seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
+ if (seg != result_seg) {
+ xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
+ test_name, test_number);
+ xhci_warn(xhci, "Tested TRB math w/ seg %p and "
+ "input DMA 0x%llx\n",
+ input_seg,
+ (unsigned long long) input_dma);
+ xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
+ "ending TRB %p (0x%llx DMA)\n",
+ start_trb, start_dma,
+ end_trb, end_dma);
+ xhci_warn(xhci, "Expected seg %p, got seg %p\n",
+ result_seg, seg);
+ return -1;
+ }
+ return 0;
+}
+
+/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
+static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
+{
+ struct {
+ dma_addr_t input_dma;
+ struct xhci_segment *result_seg;
+ } simple_test_vector [] = {
+ /* A zeroed DMA field should fail */
+ { 0, NULL },
+ /* One TRB before the ring start should fail */
+ { xhci->event_ring->first_seg->dma - 16, NULL },
+ /* One byte before the ring start should fail */
+ { xhci->event_ring->first_seg->dma - 1, NULL },
+ /* Starting TRB should succeed */
+ { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
+ /* Ending TRB should succeed */
+ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
+ xhci->event_ring->first_seg },
+ /* One byte after the ring end should fail */
+ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
+ /* One TRB after the ring end should fail */
+ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
+ /* An address of all ones should fail */
+ { (dma_addr_t) (~0), NULL },
+ };
+ struct {
+ struct xhci_segment *input_seg;
+ union xhci_trb *start_trb;
+ union xhci_trb *end_trb;
+ dma_addr_t input_dma;
+ struct xhci_segment *result_seg;
+ } complex_test_vector [] = {
+ /* Test feeding a valid DMA address from a different ring */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = xhci->event_ring->first_seg->trbs,
+ .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+ .input_dma = xhci->cmd_ring->first_seg->dma,
+ .result_seg = NULL,
+ },
+ /* Test feeding a valid end TRB from a different ring */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = xhci->event_ring->first_seg->trbs,
+ .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+ .input_dma = xhci->cmd_ring->first_seg->dma,
+ .result_seg = NULL,
+ },
+ /* Test feeding a valid start and end TRB from a different ring */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = xhci->cmd_ring->first_seg->trbs,
+ .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+ .input_dma = xhci->cmd_ring->first_seg->dma,
+ .result_seg = NULL,
+ },
+ /* TRB in this ring, but after this TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[0],
+ .end_trb = &xhci->event_ring->first_seg->trbs[3],
+ .input_dma = xhci->event_ring->first_seg->dma + 4*16,
+ .result_seg = NULL,
+ },
+ /* TRB in this ring, but before this TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[3],
+ .end_trb = &xhci->event_ring->first_seg->trbs[6],
+ .input_dma = xhci->event_ring->first_seg->dma + 2*16,
+ .result_seg = NULL,
+ },
+ /* TRB in this ring, but after this wrapped TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+ .end_trb = &xhci->event_ring->first_seg->trbs[1],
+ .input_dma = xhci->event_ring->first_seg->dma + 2*16,
+ .result_seg = NULL,
+ },
+ /* TRB in this ring, but before this wrapped TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+ .end_trb = &xhci->event_ring->first_seg->trbs[1],
+ .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
+ .result_seg = NULL,
+ },
+ /* TRB not in this ring, and we have a wrapped TD */
+ { .input_seg = xhci->event_ring->first_seg,
+ .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
+ .end_trb = &xhci->event_ring->first_seg->trbs[1],
+ .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
+ .result_seg = NULL,
+ },
+ };
+
+ unsigned int num_tests;
+ int i, ret;
+
+ num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
+ for (i = 0; i < num_tests; i++) {
+ ret = xhci_test_trb_in_td(xhci,
+ xhci->event_ring->first_seg,
+ xhci->event_ring->first_seg->trbs,
+ &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
+ simple_test_vector[i].input_dma,
+ simple_test_vector[i].result_seg,
+ "Simple", i);
+ if (ret < 0)
+ return ret;
+ }
+
+ num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
+ for (i = 0; i < num_tests; i++) {
+ ret = xhci_test_trb_in_td(xhci,
+ complex_test_vector[i].input_seg,
+ complex_test_vector[i].start_trb,
+ complex_test_vector[i].end_trb,
+ complex_test_vector[i].input_dma,
+ complex_test_vector[i].result_seg,
+ "Complex", i);
+ if (ret < 0)
+ return ret;
+ }
+ xhci_dbg(xhci, "TRB math tests passed.\n");
+ return 0;
+}
+
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
@@ -951,6 +1172,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
if (!xhci->event_ring)
goto fail;
+ if (xhci_check_trb_in_td_math(xhci, flags) < 0)
+ goto fail;
xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 06595ec27bb..e097008d6fb 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,6 +54,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
+ hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
+
xhci->cap_regs = hcd->regs;
xhci->op_regs = hcd->regs +
HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 821b7b4709d..ee7bc7ecbc5 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -306,7 +306,7 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because the we don't want to interrupt processing.
*/
- if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
+ if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
&& !(ep_state & EP_HALTED)) {
field = xhci_readl(xhci, db_addr) & DB_MASK;
xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
@@ -475,6 +475,35 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
ep->ep_state |= SET_DEQ_PENDING;
}
+static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep)
+{
+ ep->ep_state &= ~EP_HALT_PENDING;
+ /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
+ * timer is running on another CPU, we don't decrement stop_cmds_pending
+ * (since we didn't successfully stop the watchdog timer).
+ */
+ if (del_timer(&ep->stop_cmd_timer))
+ ep->stop_cmds_pending--;
+}
+
+/* Must be called with xhci->lock held in interrupt context */
+static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
+ struct xhci_td *cur_td, int status, char *adjective)
+{
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
+ cur_td->urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
+ xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
+
+ spin_unlock(&xhci->lock);
+ usb_hcd_giveback_urb(hcd, cur_td->urb, status);
+ kfree(cur_td);
+ spin_lock(&xhci->lock);
+ xhci_dbg(xhci, "%s URB given back\n", adjective);
+}
+
/*
* When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -497,9 +526,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_td *last_unlinked_td;
struct xhci_dequeue_state deq_state;
-#ifdef CONFIG_USB_HCD_STAT
- ktime_t stop_time = ktime_get();
-#endif
memset(&deq_state, 0, sizeof(deq_state));
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
@@ -507,8 +533,11 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
ep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = ep->ring;
- if (list_empty(&ep->cancelled_td_list))
+ if (list_empty(&ep->cancelled_td_list)) {
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ring_ep_doorbell(xhci, slot_id, ep_index);
return;
+ }
/* Fix up the ep ring first, so HW stops executing cancelled TDs.
* We have the xHCI lock, so nothing can modify this list until we drop
@@ -535,9 +564,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* the cancelled TD list for URB completion later.
*/
list_del(&cur_td->td_list);
- ep->cancels_pending--;
}
last_unlinked_td = cur_td;
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
@@ -561,27 +590,136 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
list_del(&cur_td->cancelled_td_list);
/* Clean up the cancelled URB */
-#ifdef CONFIG_USB_HCD_STAT
- hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
- ktime_sub(stop_time, cur_td->start_time));
-#endif
- cur_td->urb->hcpriv = NULL;
- usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
-
- xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
- spin_unlock(&xhci->lock);
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
- kfree(cur_td);
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
- spin_lock(&xhci->lock);
+ /* Stop processing the cancelled list if the watchdog timer is
+ * running.
+ */
+ if (xhci->xhc_state & XHCI_STATE_DYING)
+ return;
} while (cur_td != last_unlinked_td);
/* Return to the event handler with xhci->lock re-acquired */
}
+/* Watchdog timer function for when a stop endpoint command fails to complete.
+ * In this case, we assume the host controller is broken or dying or dead. The
+ * host may still be completing some other events, so we have to be careful to
+ * let the event ring handler and the URB dequeueing/enqueueing functions know
+ * through xhci->state.
+ *
+ * The timer may also fire if the host takes a very long time to respond to the
+ * command, and the stop endpoint command completion handler cannot delete the
+ * timer before the timer function is called. Another endpoint cancellation may
+ * sneak in before the timer function can grab the lock, and that may queue
+ * another stop endpoint command and add the timer back. So we cannot use a
+ * simple flag to say whether there is a pending stop endpoint command for a
+ * particular endpoint.
+ *
+ * Instead we use a combination of that flag and a counter for the number of
+ * pending stop endpoint commands. If the timer is the tail end of the last
+ * stop endpoint command, and the endpoint's command is still pending, we assume
+ * the host is dying.
+ */
+void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_virt_ep *ep;
+ struct xhci_virt_ep *temp_ep;
+ struct xhci_ring *ring;
+ struct xhci_td *cur_td;
+ int ret, i, j;
+
+ ep = (struct xhci_virt_ep *) arg;
+ xhci = ep->xhci;
+
+ spin_lock(&xhci->lock);
+
+ ep->stop_cmds_pending--;
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
+ "xHCI as DYING, exiting.\n");
+ spin_unlock(&xhci->lock);
+ return;
+ }
+ if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
+ xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
+ "exiting.\n");
+ spin_unlock(&xhci->lock);
+ return;
+ }
+
+ xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
+ xhci_warn(xhci, "Assuming host is dying, halting host.\n");
+ /* Oops, HC is dead or dying or at least not responding to the stop
+ * endpoint command.
+ */
+ xhci->xhc_state |= XHCI_STATE_DYING;
+ /* Disable interrupts from the host controller and start halting it */
+ xhci_quiesce(xhci);
+ spin_unlock(&xhci->lock);
+
+ ret = xhci_halt(xhci);
+
+ spin_lock(&xhci->lock);
+ if (ret < 0) {
+ /* This is bad; the host is not responding to commands and it's
+ * not allowing itself to be halted. At least interrupts are
+ * disabled, so we can set HC_STATE_HALT and notify the
+ * USB core. But if we call usb_hc_died(), it will attempt to
+ * disconnect all device drivers under this host. Those
+ * disconnect() methods will wait for all URBs to be unlinked,
+ * so we must complete them.
+ */
+ xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
+ xhci_warn(xhci, "Completing active URBs anyway.\n");
+ /* We could turn all TDs on the rings to no-ops. This won't
+ * help if the host has cached part of the ring, and is slow if
+ * we want to preserve the cycle bit. Skip it and hope the host
+ * doesn't touch the memory.
+ */
+ }
+ for (i = 0; i < MAX_HC_SLOTS; i++) {
+ if (!xhci->devs[i])
+ continue;
+ for (j = 0; j < 31; j++) {
+ temp_ep = &xhci->devs[i]->eps[j];
+ ring = temp_ep->ring;
+ if (!ring)
+ continue;
+ xhci_dbg(xhci, "Killing URBs for slot ID %u, "
+ "ep index %u\n", i, j);
+ while (!list_empty(&ring->td_list)) {
+ cur_td = list_first_entry(&ring->td_list,
+ struct xhci_td,
+ td_list);
+ list_del(&cur_td->td_list);
+ if (!list_empty(&cur_td->cancelled_td_list))
+ list_del(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td,
+ -ESHUTDOWN, "killed");
+ }
+ while (!list_empty(&temp_ep->cancelled_td_list)) {
+ cur_td = list_first_entry(
+ &temp_ep->cancelled_td_list,
+ struct xhci_td,
+ cancelled_td_list);
+ list_del(&cur_td->cancelled_td_list);
+ xhci_giveback_urb_in_irq(xhci, cur_td,
+ -ESHUTDOWN, "killed");
+ }
+ }
+ }
+ spin_unlock(&xhci->lock);
+ xhci_to_hcd(xhci)->state = HC_STATE_HALT;
+ xhci_dbg(xhci, "Calling usb_hc_died()\n");
+ usb_hc_died(xhci_to_hcd(xhci));
+ xhci_dbg(xhci, "xHCI host controller is dead.\n");
+}
+
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -765,28 +903,32 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
virt_dev->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
- if (!ep_ring) {
- /* This must have been an initial configure endpoint */
- xhci->devs[slot_id]->cmd_status =
- GET_COMP_CODE(event->status);
- complete(&xhci->devs[slot_id]->cmd_completion);
- break;
- }
- ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
- xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
- "state = %d\n", ep_index, ep_state);
+ /* A usb_set_interface() call directly after clearing a halted
+ * condition may race on this quirky hardware.
+ * Not worth worrying about, since this is prototype hardware.
+ */
if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
- ep_state & EP_HALTED) {
+ ep_index != (unsigned int) -1 &&
+ ctrl_ctx->add_flags - SLOT_FLAG ==
+ ctrl_ctx->drop_flags) {
+ ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+ if (!(ep_state & EP_HALTED))
+ goto bandwidth_change;
+ xhci_dbg(xhci, "Completed config ep cmd - "
+ "last ep index = %d, state = %d\n",
+ ep_index, ep_state);
/* Clear our internal halted state and restart ring */
xhci->devs[slot_id]->eps[ep_index].ep_state &=
~EP_HALTED;
ring_ep_doorbell(xhci, slot_id, ep_index);
- } else {
- xhci->devs[slot_id]->cmd_status =
- GET_COMP_CODE(event->status);
- complete(&xhci->devs[slot_id]->cmd_completion);
+ break;
}
+bandwidth_change:
+ xhci_dbg(xhci, "Completed config ep cmd\n");
+ xhci->devs[slot_id]->cmd_status =
+ GET_COMP_CODE(event->status);
+ complete(&xhci->devs[slot_id]->cmd_completion);
break;
case TRB_TYPE(TRB_EVAL_CONTEXT):
virt_dev = xhci->devs[slot_id];
@@ -849,8 +991,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
* TRB in this TD, this function returns that TRB's segment. Otherwise it
* returns 0.
*/
-static struct xhci_segment *trb_in_td(
- struct xhci_segment *start_seg,
+struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
union xhci_trb *start_trb,
union xhci_trb *end_trb,
dma_addr_t suspect_dma)
@@ -900,6 +1041,45 @@ static struct xhci_segment *trb_in_td(
return 0;
}
+static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_td *td, union xhci_trb *event_trb)
+{
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep->ep_state |= EP_HALTED;
+ ep->stopped_td = td;
+ ep->stopped_trb = event_trb;
+ xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+ xhci_ring_cmd_db(xhci);
+}
+
+/* Check if an error has halted the endpoint ring. The class driver will
+ * cleanup the halt for a non-default control endpoint if we indicate a stall.
+ * However, a babble and other errors also halt the endpoint ring, and the class
+ * driver won't clear the halt in that case, so we need to issue a Set Transfer
+ * Ring Dequeue Pointer command manually.
+ */
+static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
+ struct xhci_ep_ctx *ep_ctx,
+ unsigned int trb_comp_code)
+{
+ /* TRB completion codes that may require a manual halt cleanup */
+ if (trb_comp_code == COMP_TX_ERR ||
+ trb_comp_code == COMP_BABBLE ||
+ trb_comp_code == COMP_SPLIT_ERR)
+ /* The 0.96 spec says a babbling control endpoint
+ * is not halted. The 0.96 spec says it is. Some HW
+ * claims to be 0.95 compliant, but it halts the control
+ * endpoint anyway. Check if a babble halted the
+ * endpoint.
+ */
+ if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
+ return 1;
+
+ return 0;
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1002,6 +1182,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_warn(xhci, "WARN: TRB error on endpoint\n");
status = -EILSEQ;
break;
+ case COMP_SPLIT_ERR:
case COMP_TX_ERR:
xhci_warn(xhci, "WARN: transfer error on endpoint\n");
status = -EPROTO;
@@ -1015,6 +1196,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
status = -ENOSR;
break;
default:
+ if (trb_comp_code >= 224 && trb_comp_code <= 255) {
+ /* Vendor defined "informational" completion code,
+ * treat as not-an-error.
+ */
+ xhci_dbg(xhci, "Vendor defined info completion code %u\n",
+ trb_comp_code);
+ xhci_dbg(xhci, "Treating code as success.\n");
+ status = 0;
+ break;
+ }
xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
urb = NULL;
goto cleanup;
@@ -1043,15 +1234,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
else
status = 0;
break;
- case COMP_BABBLE:
- /* The 0.96 spec says a babbling control endpoint
- * is not halted. The 0.96 spec says it is. Some HW
- * claims to be 0.95 compliant, but it halts the control
- * endpoint anyway. Check if a babble halted the
- * endpoint.
- */
- if (ep_ctx->ep_info != EP_STATE_HALTED)
+
+ default:
+ if (!xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code))
break;
+ xhci_dbg(xhci, "TRB error code %u, "
+ "halted endpoint index = %u\n",
+ trb_comp_code, ep_index);
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
@@ -1063,15 +1253,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
else
td->urb->actual_length = 0;
- ep->stopped_td = td;
- ep->stopped_trb = event_trb;
- xhci_queue_reset_ep(xhci, slot_id, ep_index);
- xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
- xhci_ring_cmd_db(xhci);
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, td, event_trb);
goto td_cleanup;
- default:
- /* Others already handled above */
- break;
}
/*
* Did we transfer any data, despite the errors that might have
@@ -1209,16 +1393,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
ep->stopped_td = td;
ep->stopped_trb = event_trb;
} else {
- if (trb_comp_code == COMP_STALL ||
- trb_comp_code == COMP_BABBLE) {
+ if (trb_comp_code == COMP_STALL) {
/* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue
* pointer past the TD. We can't do that here because
- * the halt condition must be cleared first.
+ * the halt condition must be cleared first. Let the
+ * USB class driver clear the stall later.
*/
ep->stopped_td = td;
ep->stopped_trb = event_trb;
+ } else if (xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code)) {
+ /* Other types of errors halt the endpoint, but the
+ * class driver doesn't call usb_reset_endpoint() unless
+ * the error is -EPIPE. Clear the halted status in the
+ * xHCI hardware manually.
+ */
+ xhci_cleanup_halted_endpoint(xhci,
+ slot_id, ep_index, td, event_trb);
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@@ -1249,10 +1442,9 @@ td_cleanup:
}
list_del(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
- if (!list_empty(&td->cancelled_td_list)) {
+ if (!list_empty(&td->cancelled_td_list))
list_del(&td->cancelled_td_list);
- ep->cancels_pending--;
- }
+
/* Leave the TD around for the reset endpoint function to use
* (but only if it's not a control endpoint, since we already
* queued the Set TR dequeue pointer command for stalled
@@ -1331,6 +1523,14 @@ void xhci_handle_event(struct xhci_hcd *xhci)
default:
xhci->error_bitmask |= 1 << 3;
}
+ /* Any of the above functions may drop and re-acquire the lock, so check
+ * to make sure a watchdog timer didn't mark the host as non-responsive.
+ */
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
+ xhci_dbg(xhci, "xHCI host dying, returning from "
+ "event handler.\n");
+ return;
+ }
if (update_ptrs) {
/* Update SW and HC event ring dequeue pointer */
@@ -1555,6 +1755,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
}
+/*
+ * The TD size is the number of bytes remaining in the TD (including this TRB),
+ * right shifted by 10.
+ * It must fit in bits 21:17, so it can't be bigger than 31.
+ */
+static u32 xhci_td_remainder(unsigned int remainder)
+{
+ u32 max = (1 << (21 - 17 + 1)) - 1;
+
+ if ((remainder >> 10) >= max)
+ return max << 17;
+ else
+ return (remainder >> 10) << 17;
+}
+
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
@@ -1612,6 +1827,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
do {
u32 field = 0;
u32 length_field = 0;
+ u32 remainder = 0;
/* Don't change the cycle bit of the first TRB until later */
if (first_trb)
@@ -1641,8 +1857,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
}
+ remainder = xhci_td_remainder(urb->transfer_buffer_length -
+ running_total) ;
length_field = TRB_LEN(trb_buff_len) |
- TD_REMAINDER(urb->transfer_buffer_length - running_total) |
+ remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false,
lower_32_bits(addr),
@@ -1755,6 +1973,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Queue the first TRB, even if it's zero-length */
do {
+ u32 remainder = 0;
field = 0;
/* Don't change the cycle bit of the first TRB until later */
@@ -1773,8 +1992,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
+ remainder = xhci_td_remainder(urb->transfer_buffer_length -
+ running_total);
length_field = TRB_LEN(trb_buff_len) |
- TD_REMAINDER(urb->transfer_buffer_length - running_total) |
+ remainder |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false,
lower_32_bits(addr),
@@ -1862,7 +2083,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* If there's data, queue data TRBs */
field = 0;
length_field = TRB_LEN(urb->transfer_buffer_length) |
- TD_REMAINDER(urb->transfer_buffer_length) |
+ xhci_td_remainder(urb->transfer_buffer_length) |
TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4b254b6fa24..877813505ef 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -652,13 +652,17 @@ struct xhci_virt_ep {
struct xhci_ring *new_ring;
unsigned int ep_state;
#define SET_DEQ_PENDING (1 << 0)
-#define EP_HALTED (1 << 1)
+#define EP_HALTED (1 << 1) /* For stall handling */
+#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
- unsigned int cancels_pending;
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
+ /* Watchdog timer for stop endpoint command to cancel URBs */
+ struct timer_list stop_cmd_timer;
+ int stop_cmds_pending;
+ struct xhci_hcd *xhci;
};
struct xhci_virt_device {
@@ -673,6 +677,10 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
+ /* Rings saved to ensure old alt settings can be re-instated */
+ struct xhci_ring **ring_cache;
+ int num_rings_cached;
+#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
struct completion cmd_completion;
/* Status of the last command issued for this device */
@@ -824,9 +832,6 @@ struct xhci_event_cmd {
/* Normal TRB fields */
/* transfer_len bitmasks - bits 0:16 */
#define TRB_LEN(p) ((p) & 0x1ffff)
-/* TD size - number of bytes remaining in the TD (including this TRB):
- * bits 17 - 21. Shift the number of bytes by 10. */
-#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17)
/* Interrupter Target - which MSI-X vector to target the completion event at */
#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
@@ -1022,6 +1027,8 @@ struct xhci_scratchpad {
#define ERST_ENTRIES 1
/* Poll every 60 seconds */
#define POLL_TIMEOUT 60
+/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
+#define XHCI_STOP_EP_CMD_TIMEOUT 5
/* XXX: Make these module parameters */
@@ -1083,6 +1090,21 @@ struct xhci_hcd {
struct timer_list event_ring_timer;
int zombie;
#endif
+ /* Host controller watchdog timer structures */
+ unsigned int xhc_state;
+/* Host controller is dying - not responding to commands. "I'm not dead yet!"
+ *
+ * xHC interrupts have been disabled and a watchdog timer will (or has already)
+ * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code. Any code
+ * that sees this status (other than the timer that set it) should stop touching
+ * hardware immediately. Interrupt handlers should return immediately when
+ * they see this status (any time they drop and re-acquire xhci->lock).
+ * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
+ * putting the TD on the canceled list, etc.
+ *
+ * There are no reports of xHCI host controllers that display this issue.
+ */
+#define XHCI_STATE_DYING (1 << 0)
/* Statistics */
int noops_submitted;
int noops_handled;
@@ -1223,6 +1245,7 @@ void xhci_unregister_pci(void);
#endif
/* xHCI host controller glue */
+void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
int xhci_init(struct usb_hcd *hcd);
@@ -1246,6 +1269,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
+struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
+ union xhci_trb *start_trb, union xhci_trb *end_trb,
+ dma_addr_t suspect_dma);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
void *xhci_setup_one_noop(struct xhci_hcd *xhci);
void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1278,6 +1304,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
+void xhci_stop_endpoint_command_watchdog(unsigned long arg);
/* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index a9f06d76960..3dab0c0b196 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -213,8 +213,9 @@ static struct urb *simple_alloc_urb (
}
static unsigned pattern = 0;
-module_param (pattern, uint, S_IRUGO);
-MODULE_PARM_DESC(pattern, "i/o pattern (0 == zeroes)");
+static unsigned mod_pattern;
+module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
static inline void simple_fill_buf (struct urb *urb)
{
@@ -1567,6 +1568,8 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
// FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is.
+ pattern = mod_pattern;
+
if (code != USBTEST_REQUEST)
return -EOPNOTSUPP;
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 10f3205798e..385ec052016 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -16,6 +16,7 @@
#include <linux/compat.h>
#include <linux/mm.h>
#include <linux/smp_lock.h>
+#include <linux/scatterlist.h>
#include <asm/uaccess.h>
@@ -221,7 +222,7 @@ static void mon_free_buff(struct mon_pgmap *map, int npages);
/*
* This is a "chunked memcpy". It does not manipulate any counters.
*/
-static void mon_copy_to_buff(const struct mon_reader_bin *this,
+static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
unsigned int off, const unsigned char *from, unsigned int length)
{
unsigned int step_len;
@@ -246,6 +247,7 @@ static void mon_copy_to_buff(const struct mon_reader_bin *this,
from += step_len;
length -= step_len;
}
+ return off;
}
/*
@@ -394,14 +396,44 @@ static inline char mon_bin_get_setup(unsigned char *setupb,
return 0;
}
-static char mon_bin_get_data(const struct mon_reader_bin *rp,
- unsigned int offset, struct urb *urb, unsigned int length)
+static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
+ unsigned int offset, struct urb *urb, unsigned int length,
+ char *flag)
{
+ int i;
+ struct scatterlist *sg;
+ unsigned int this_len;
+
+ *flag = 0;
+ if (urb->num_sgs == 0) {
+ if (urb->transfer_buffer == NULL) {
+ *flag = 'Z';
+ return length;
+ }
+ mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
+ length = 0;
- if (urb->transfer_buffer == NULL)
- return 'Z';
- mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
- return 0;
+ } else {
+ /* If IOMMU coalescing occurred, we cannot trust sg_page */
+ if (urb->sg->nents != urb->num_sgs) {
+ *flag = 'D';
+ return length;
+ }
+
+ /* Copy up to the first non-addressable segment */
+ for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
+ if (length == 0 || PageHighMem(sg_page(sg)))
+ break;
+ this_len = min_t(unsigned int, sg->length, length);
+ offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
+ this_len);
+ length -= this_len;
+ }
+ if (i == 0)
+ *flag = 'D';
+ }
+
+ return length;
}
static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
@@ -536,8 +568,9 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
}
if (length != 0) {
- ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
- if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */
+ length = mon_bin_get_data(rp, offset, urb, length,
+ &ep->flag_data);
+ if (length > 0) {
delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
ep->len_cap -= length;
delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 9f1a9227ebe..047568ff223 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -10,6 +10,7 @@
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
#include <asm/uaccess.h>
#include "usb_mon.h"
@@ -137,6 +138,8 @@ static inline char mon_text_get_setup(struct mon_event_text *ep,
static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
int len, char ev_type, struct mon_bus *mbus)
{
+ void *src;
+
if (len <= 0)
return 'L';
if (len >= DATA_MAX)
@@ -150,10 +153,24 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
return '>';
}
- if (urb->transfer_buffer == NULL)
- return 'Z'; /* '0' would be not as pretty. */
+ if (urb->num_sgs == 0) {
+ src = urb->transfer_buffer;
+ if (src == NULL)
+ return 'Z'; /* '0' would be not as pretty. */
+ } else {
+ struct scatterlist *sg = urb->sg->sg;
+
+ /* If IOMMU coalescing occurred, we cannot trust sg_page */
+ if (urb->sg->nents != urb->num_sgs ||
+ PageHighMem(sg_page(sg)))
+ return 'D';
+
+ /* For the text interface we copy only the first sg buffer */
+ len = min_t(int, sg->length, len);
+ src = sg_virt(sg);
+ }
- memcpy(ep->data, urb->transfer_buffer, len);
+ memcpy(ep->data, src, len);
return 0;
}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index b84abd8ee8a..d9db8649802 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -9,10 +9,9 @@ comment "Enable Host or Gadget support to see Inventra options"
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
depends on (USB || USB_GADGET)
- depends on (ARM || BLACKFIN)
- select NOP_USB_XCEIV if ARCH_DAVINCI
+ depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
+ select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
select TWL4030_USB if MACH_OMAP_3430SDP
- select NOP_USB_XCEIV if MACH_OMAP3EVM
select USB_OTG_UTILS
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index fcec87ea709..fe4934d9602 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -53,13 +53,11 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
void __iomem *fifo = hw_ep->fifo;
+
+#ifdef CONFIG_BF52x
u8 epnum = hw_ep->epnum;
u16 dma_reg = 0;
- DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
- 'R', hw_ep->epnum, fifo, len, dst);
-
-#ifdef CONFIG_BF52x
invalidate_dcache_range((unsigned int)dst,
(unsigned int)(dst + len));
@@ -102,6 +100,9 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
len & 0x01 ? (len >> 1) + 1 : len >> 1);
#endif
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', hw_ep->epnum, fifo, len, dst);
+
dump_fifo_data(dst, len);
}
@@ -225,8 +226,9 @@ int musb_platform_get_vbus_status(struct musb *musb)
return 0;
}
-void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
{
+ return -EIO;
}
int __init musb_platform_init(struct musb *musb)
@@ -261,10 +263,6 @@ int __init musb_platform_init(struct musb *musb)
SSYNC();
}
- /* TODO
- * Set SIC-IVG register
- */
-
/* Configure PLL oscillator register */
bfin_write_USB_PLLOSC_CTRL(0x30a8);
SSYNC();
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index a240c1e53d1..10b7d7584f4 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -14,6 +14,43 @@
* Blackfin specific definitions
*/
+/* Anomalies notes:
+ *
+ * 05000450 - USB DMA Mode 1 Short Packet Data Corruption:
+ * MUSB driver is designed to transfer buffer of N * maxpacket size
+ * in DMA mode 1 and leave the rest of the data to the next
+ * transfer in DMA mode 0, so we never transmit a short packet in
+ * DMA mode 1.
+ *
+ * 05000463 - This anomaly doesn't affect this driver since it
+ * never uses L1 or L2 memory as data destination.
+ *
+ * 05000464 - This anomaly doesn't affect this driver since it
+ * never uses L1 or L2 memory as data source.
+ *
+ * 05000465 - The anomaly can be seen when SCLK is over 100 MHz, and there is
+ * no way to workaround for bulk endpoints. Since the wMaxPackSize
+ * of bulk is less than or equal to 512, while the fifo size of
+ * endpoint 5, 6, 7 is 1024, the double buffer mode is enabled
+ * automatically when these endpoints are used for bulk OUT.
+ *
+ * 05000466 - This anomaly doesn't affect this driver since it never mixes
+ * concurrent DMA and core accesses to the TX endpoint FIFOs.
+ *
+ * 05000467 - The workaround for this anomaly will introduce another
+ * anomaly - 05000465.
+ */
+
+/* The Mentor USB DMA engine on BF52x (silicon v0.0 and v0.1) seems to be
+ * unstable in host mode. This may be caused by Anomaly 05000380. After
+ * digging out the root cause, we will change this number accordingly.
+ * So, need to either use silicon v0.2+ or disable DMA mode in MUSB.
+ */
+#if ANOMALY_05000380 && defined(CONFIG_BF52x) && \
+ defined(CONFIG_USB_MUSB_HDRC) && !defined(CONFIG_MUSB_PIO_ONLY)
+# error "Please use PIO mode in MUSB driver on bf52x chip v0.0 and v0.1"
+#endif
+
#undef DUMP_FIFO_DATA
#ifdef DUMP_FIFO_DATA
static void dump_fifo_data(u8 *buf, u16 len)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 547e0e39072..bfe08f4975a 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1319,7 +1319,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
#endif
u8 reg;
char *type;
- u16 hwvers, rev_major, rev_minor;
char aInfo[78], aRevision[32], aDate[12];
void __iomem *mbase = musb->mregs;
int status = 0;
@@ -1391,11 +1390,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
}
/* log release info */
- hwvers = musb_read_hwvers(mbase);
- rev_major = (hwvers >> 10) & 0x1f;
- rev_minor = hwvers & 0x3ff;
- snprintf(aRevision, 32, "%d.%d%s", rev_major,
- rev_minor, (hwvers & 0x8000) ? "RC" : "");
+ musb->hwvers = musb_read_hwvers(mbase);
+ snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers),
+ MUSB_HWVERS_MINOR(musb->hwvers),
+ (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
musb_driver_name, type, aRevision, aDate);
@@ -2216,7 +2214,7 @@ static int musb_resume_noirq(struct device *dev)
return 0;
}
-static struct dev_pm_ops musb_dev_pm_ops = {
+static const struct dev_pm_ops musb_dev_pm_ops = {
.suspend = musb_suspend,
.resume_noirq = musb_resume_noirq,
};
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 6aa5f22e527..03d50909b07 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -322,6 +322,14 @@ struct musb {
struct clk *clock;
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
+#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
+#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
+#define MUSB_HWVERS_RC 0x8000
+#define MUSB_HWVERS_1300 0x52C
+#define MUSB_HWVERS_1400 0x590
+#define MUSB_HWVERS_1800 0x720
+#define MUSB_HWVERS_2000 0x800
+ u16 hwvers;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
#define MUSB_PORT_STAT_RESUME (1 << 31)
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 0a2c4e3602c..916065ba9e7 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -80,6 +80,17 @@ struct musb_hw_ep;
#define tusb_dma_omap() 0
#endif
+/* Anomaly 05000456 - USB Receive Interrupt Is Not Generated in DMA Mode 1
+ * Only allow DMA mode 1 to be used when the USB will actually generate the
+ * interrupts we expect.
+ */
+#ifdef CONFIG_BLACKFIN
+# undef USE_MODE1
+# if !ANOMALY_05000456
+# define USE_MODE1
+# endif
+#endif
+
/*
* DMA channel status ... updated by the dma controller driver whenever that
* status changes, and protected by the overall controller spinlock.
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 74073f9a43f..c49b9ba025a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -429,112 +429,102 @@ void musb_g_tx(struct musb *musb, u8 epnum)
DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
- do {
- /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
- * probably rates reporting as a host error
+
+ /*
+ * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+ * probably rates reporting as a host error.
+ */
+ if (csr & MUSB_TXCSR_P_SENTSTALL) {
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~MUSB_TXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ return;
+ }
+
+ if (csr & MUSB_TXCSR_P_UNDERRUN) {
+ /* We NAKed, no big deal... little reason to care. */
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ DBG(20, "underrun on ep%d, req %p\n", epnum, request);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /*
+ * SHOULD NOT HAPPEN... has with CPPI though, after
+ * changing SENDSTALL (and other cases); harmless?
*/
- if (csr & MUSB_TXCSR_P_SENTSTALL) {
- csr |= MUSB_TXCSR_P_WZC_BITS;
- csr &= ~MUSB_TXCSR_P_SENTSTALL;
- musb_writew(epio, MUSB_TXCSR, csr);
- break;
- }
+ DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
+ return;
+ }
+
+ if (request) {
+ u8 is_dma = 0;
- if (csr & MUSB_TXCSR_P_UNDERRUN) {
- /* we NAKed, no big deal ... little reason to care */
+ if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+ is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
- csr &= ~(MUSB_TXCSR_P_UNDERRUN
- | MUSB_TXCSR_TXPKTRDY);
+ csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
+ MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR, csr);
- DBG(20, "underrun on ep%d, req %p\n", epnum, request);
+ /* Ensure writebuffer is empty. */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ request->actual += musb_ep->dma->actual_len;
+ DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
+ epnum, csr, musb_ep->dma->actual_len, request);
}
- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- /* SHOULD NOT HAPPEN ... has with cppi though, after
- * changing SENDSTALL (and other cases); harmless?
+ if (is_dma || request->actual == request->length) {
+ /*
+ * First, maybe a terminating short packet. Some DMA
+ * engines might handle this by themselves.
*/
- DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
- break;
- }
-
- if (request) {
- u8 is_dma = 0;
-
- if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
- is_dma = 1;
- csr |= MUSB_TXCSR_P_WZC_BITS;
- csr &= ~(MUSB_TXCSR_DMAENAB
- | MUSB_TXCSR_P_UNDERRUN
- | MUSB_TXCSR_TXPKTRDY);
- musb_writew(epio, MUSB_TXCSR, csr);
- /* ensure writebuffer is empty */
- csr = musb_readw(epio, MUSB_TXCSR);
- request->actual += musb_ep->dma->actual_len;
- DBG(4, "TXCSR%d %04x, dma off, "
- "len %zu, req %p\n",
- epnum, csr,
- musb_ep->dma->actual_len,
- request);
- }
-
- if (is_dma || request->actual == request->length) {
-
- /* First, maybe a terminating short packet.
- * Some DMA engines might handle this by
- * themselves.
- */
- if ((request->zero
- && request->length
- && (request->length
- % musb_ep->packet_sz)
- == 0)
+ if ((request->zero && request->length
+ && request->length % musb_ep->packet_sz == 0)
#ifdef CONFIG_USB_INVENTRA_DMA
- || (is_dma &&
- ((!dma->desired_mode) ||
- (request->actual &
- (musb_ep->packet_sz - 1))))
+ || (is_dma && (!dma->desired_mode ||
+ (request->actual &
+ (musb_ep->packet_sz - 1))))
#endif
- ) {
- /* on dma completion, fifo may not
- * be available yet ...
- */
- if (csr & MUSB_TXCSR_TXPKTRDY)
- break;
-
- DBG(4, "sending zero pkt\n");
- musb_writew(epio, MUSB_TXCSR,
- MUSB_TXCSR_MODE
- | MUSB_TXCSR_TXPKTRDY);
- request->zero = 0;
- }
-
- /* ... or if not, then complete it */
- musb_g_giveback(musb_ep, request, 0);
-
- /* kickstart next transfer if appropriate;
- * the packet that just completed might not
- * be transmitted for hours or days.
- * REVISIT for double buffering...
- * FIXME revisit for stalls too...
+ ) {
+ /*
+ * On DMA completion, FIFO may not be
+ * available yet...
*/
- musb_ep_select(mbase, epnum);
- csr = musb_readw(epio, MUSB_TXCSR);
- if (csr & MUSB_TXCSR_FIFONOTEMPTY)
- break;
- request = musb_ep->desc
- ? next_request(musb_ep)
- : NULL;
- if (!request) {
- DBG(4, "%s idle now\n",
- musb_ep->end_point.name);
- break;
- }
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ return;
+
+ DBG(4, "sending zero pkt\n");
+ musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
+ | MUSB_TXCSR_TXPKTRDY);
+ request->zero = 0;
}
- txstate(musb, to_musb_request(request));
+ /* ... or if not, then complete it. */
+ musb_g_giveback(musb_ep, request, 0);
+
+ /*
+ * Kickstart next transfer if appropriate;
+ * the packet that just completed might not
+ * be transmitted for hours or days.
+ * REVISIT for double buffering...
+ * FIXME revisit for stalls too...
+ */
+ musb_ep_select(mbase, epnum);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ return;
+
+ if (!musb_ep->desc) {
+ DBG(4, "%s idle now\n",
+ musb_ep->end_point.name);
+ return;
+ } else
+ request = next_request(musb_ep);
}
- } while (0);
+ txstate(musb, to_musb_request(request));
+ }
}
/* ------------------------------------------------------------ */
@@ -966,6 +956,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
musb_ep->desc = desc;
musb_ep->busy = 0;
+ musb_ep->wedged = 0;
status = 0;
pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
@@ -1220,7 +1211,7 @@ done:
*
* exported to ep0 code
*/
-int musb_gadget_set_halt(struct usb_ep *ep, int value)
+static int musb_gadget_set_halt(struct usb_ep *ep, int value)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
u8 epnum = musb_ep->current_epnum;
@@ -1262,7 +1253,8 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value)
goto done;
}
}
- }
+ } else
+ musb_ep->wedged = 0;
/* set/clear the stall and toggle bits */
DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
@@ -1301,6 +1293,21 @@ done:
return status;
}
+/*
+ * Sets the halt feature with the clear requests ignored
+ */
+static int musb_gadget_set_wedge(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+
+ if (!ep)
+ return -EINVAL;
+
+ musb_ep->wedged = 1;
+
+ return usb_ep_set_halt(ep);
+}
+
static int musb_gadget_fifo_status(struct usb_ep *ep)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
@@ -1371,6 +1378,7 @@ static const struct usb_ep_ops musb_ep_ops = {
.queue = musb_gadget_queue,
.dequeue = musb_gadget_dequeue,
.set_halt = musb_gadget_set_halt,
+ .set_wedge = musb_gadget_set_wedge,
.fifo_status = musb_gadget_fifo_status,
.fifo_flush = musb_gadget_fifo_flush
};
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index 59502da9f73..c8b140325d8 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -75,6 +75,8 @@ struct musb_ep {
/* later things are modified based on usage */
struct list_head req_list;
+ u8 wedged;
+
/* true if lock must be dropped but req_list may not be advanced */
u8 busy;
};
@@ -103,6 +105,4 @@ extern void musb_gadget_cleanup(struct musb *);
extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
-extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
-
#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 522efb31b56..8fba3f11e47 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -199,7 +199,6 @@ service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
{
musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
- musb->ep0_state = MUSB_EP0_STAGE_SETUP;
}
/*
@@ -258,30 +257,53 @@ __acquires(musb->lock)
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:{
- const u8 num = ctrlrequest->wIndex & 0x0f;
- struct musb_ep *musb_ep;
+ const u8 epnum =
+ ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *ep;
+ void __iomem *regs;
+ int is_in;
+ u16 csr;
- if (num == 0
- || num >= MUSB_C_NUM_EPS
- || ctrlrequest->wValue
- != USB_ENDPOINT_HALT)
+ if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
+ ctrlrequest->wValue != USB_ENDPOINT_HALT)
break;
- if (ctrlrequest->wIndex & USB_DIR_IN)
- musb_ep = &musb->endpoints[num].ep_in;
+ ep = musb->endpoints + epnum;
+ regs = ep->regs;
+ is_in = ctrlrequest->wIndex & USB_DIR_IN;
+ if (is_in)
+ musb_ep = &ep->ep_in;
else
- musb_ep = &musb->endpoints[num].ep_out;
+ musb_ep = &ep->ep_out;
if (!musb_ep->desc)
break;
- /* REVISIT do it directly, no locking games */
- spin_unlock(&musb->lock);
- musb_gadget_set_halt(&musb_ep->end_point, 0);
- spin_lock(&musb->lock);
+ handled = 1;
+ /* Ignore request if endpoint is wedged */
+ if (musb_ep->wedged)
+ break;
+
+ musb_ep_select(mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_CLRDATATOG |
+ MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_SENDSTALL |
+ MUSB_TXCSR_P_SENTSTALL |
+ MUSB_TXCSR_TXPKTRDY);
+ musb_writew(regs, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_CLRDATATOG |
+ MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_RXCSR_P_SENDSTALL |
+ MUSB_RXCSR_P_SENTSTALL);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ }
/* select ep0 again */
musb_ep_select(mbase, 0);
- handled = 1;
} break;
default:
/* class, vendor, etc ... delegate */
@@ -374,10 +396,8 @@ stall:
int is_in;
u16 csr;
- if (epnum == 0
- || epnum >= MUSB_C_NUM_EPS
- || ctrlrequest->wValue
- != USB_ENDPOINT_HALT)
+ if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
+ ctrlrequest->wValue != USB_ENDPOINT_HALT)
break;
ep = musb->endpoints + epnum;
@@ -392,24 +412,20 @@ stall:
musb_ep_select(mbase, epnum);
if (is_in) {
- csr = musb_readw(regs,
- MUSB_TXCSR);
+ csr = musb_readw(regs, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY)
csr |= MUSB_TXCSR_FLUSHFIFO;
csr |= MUSB_TXCSR_P_SENDSTALL
| MUSB_TXCSR_CLRDATATOG
| MUSB_TXCSR_P_WZC_BITS;
- musb_writew(regs, MUSB_TXCSR,
- csr);
+ musb_writew(regs, MUSB_TXCSR, csr);
} else {
- csr = musb_readw(regs,
- MUSB_RXCSR);
+ csr = musb_readw(regs, MUSB_RXCSR);
csr |= MUSB_RXCSR_P_SENDSTALL
| MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_CLRDATATOG
| MUSB_RXCSR_P_WZC_BITS;
- musb_writew(regs, MUSB_RXCSR,
- csr);
+ musb_writew(regs, MUSB_RXCSR, csr);
}
/* select ep0 again */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e3ab40a966e..74c4c3698f1 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1642,18 +1642,18 @@ void musb_host_rx(struct musb *musb, u8 epnum)
c = musb->dma_controller;
if (usb_pipeisoc(pipe)) {
- int status = 0;
+ int d_status = 0;
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
if (iso_err) {
- status = -EILSEQ;
+ d_status = -EILSEQ;
urb->error_count++;
}
if (rx_count > d->length) {
- if (status == 0) {
- status = -EOVERFLOW;
+ if (d_status == 0) {
+ d_status = -EOVERFLOW;
urb->error_count++;
}
DBG(2, "** OVERFLOW %d into %d\n",\
@@ -1662,7 +1662,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
length = d->length;
} else
length = rx_count;
- d->status = status;
+ d->status = d_status;
buf = urb->transfer_dma + d->offset;
} else {
length = rx_count;
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index cc1d71b57d3..473a94ef905 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -465,9 +465,9 @@ static inline u16 musb_read_hwvers(void __iomem *mbase)
return 0;
}
-static inline u16 musb_read_target_reg_base(u8 i, void __iomem *mbase)
+static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
{
- return 0;
+ return NULL;
}
static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs,
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 5e83f96d6b7..a237550f91b 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -259,6 +259,11 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
if (!int_hsdma)
goto done;
+#ifdef CONFIG_BLACKFIN
+ /* Clear DMA interrupt flags */
+ musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
+#endif
+
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
if (int_hsdma & (1 << bchannel)) {
musb_channel = (struct musb_dma_channel *)
@@ -280,7 +285,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
channel->actual_len = addr
- musb_channel->start_addr;
- DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+ DBG(2, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
channel, musb_channel->start_addr,
addr, channel->actual_len,
musb_channel->len,
@@ -324,11 +329,6 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
}
}
-#ifdef CONFIG_BLACKFIN
- /* Clear DMA interrup flags */
- musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
-#endif
-
retval = IRQ_HANDLED;
done:
spin_unlock_irqrestore(&musb->lock, flags);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 6761d2088db..83beeac5e7b 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -315,7 +315,7 @@ int musb_platform_exit(struct musb *musb)
musb_platform_suspend(musb);
clk_put(musb->clock);
- musb->clock = 0;
+ musb->clock = NULL;
return 0;
}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index aa884d072f0..de56b3d743d 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -41,6 +41,15 @@ config ISP1301_OMAP
This driver can also be built as a module. If so, the module
will be called isp1301_omap.
+config USB_ULPI
+ bool "Generic ULPI Transceiver Driver"
+ depends on ARM
+ help
+ Enable this to support ULPI connected USB OTG transceivers which
+ are likely found on embedded boards.
+
+ The only chip currently supported is NXP's ISP1504
+
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 20816785652..aeb49a8ec41 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
+obj-$(CONFIG_USB_ULPI) += ulpi.o
ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 9e3e7a5c258..2be9f2fa41f 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -33,7 +33,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/usb/otg.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
@@ -276,16 +276,16 @@ static int twl4030_i2c_write_u8_verify(struct twl4030_usb *twl,
{
u8 check;
- if ((twl4030_i2c_write_u8(module, data, address) >= 0) &&
- (twl4030_i2c_read_u8(module, &check, address) >= 0) &&
+ if ((twl_i2c_write_u8(module, data, address) >= 0) &&
+ (twl_i2c_read_u8(module, &check, address) >= 0) &&
(check == data))
return 0;
dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
1, module, address, check, data);
/* Failed once: Try again */
- if ((twl4030_i2c_write_u8(module, data, address) >= 0) &&
- (twl4030_i2c_read_u8(module, &check, address) >= 0) &&
+ if ((twl_i2c_write_u8(module, data, address) >= 0) &&
+ (twl_i2c_read_u8(module, &check, address) >= 0) &&
(check == data))
return 0;
dev_dbg(twl->dev, "Write%d[%d,0x%x] wrote %02x but read %02x\n",
@@ -303,7 +303,7 @@ static inline int twl4030_usb_write(struct twl4030_usb *twl,
{
int ret = 0;
- ret = twl4030_i2c_write_u8(TWL4030_MODULE_USB, data, address);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_USB, data, address);
if (ret < 0)
dev_dbg(twl->dev,
"TWL4030:USB:Write[0x%x] Error %d\n", address, ret);
@@ -315,7 +315,7 @@ static inline int twl4030_readb(struct twl4030_usb *twl, u8 module, u8 address)
u8 data;
int ret = 0;
- ret = twl4030_i2c_read_u8(module, &data, address);
+ ret = twl_i2c_read_u8(module, &data, address);
if (ret >= 0)
ret = data;
else
@@ -462,7 +462,7 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
* SLEEP. We work around this by clearing the bit after usv3v1
* is re-activated. This ensures that VUSB3V1 is really active.
*/
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
VUSB_DEDICATED2);
regulator_enable(twl->usb1v5);
pwr &= ~PHY_PWR_PHYPWD;
@@ -505,44 +505,44 @@ static void twl4030_phy_resume(struct twl4030_usb *twl)
static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
{
/* Enable writing to power configuration registers */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
/* put VUSB3V1 LDO in active state */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);
/* input to VUSB3V1 LDO is from VBAT, not VBUS */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
/* Initialize 3.1V regulator */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_DEV_GRP);
twl->usb3v1 = regulator_get(twl->dev, "usb3v1");
if (IS_ERR(twl->usb3v1))
return -ENODEV;
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB3V1_TYPE);
/* Initialize 1.5V regulator */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_DEV_GRP);
twl->usb1v5 = regulator_get(twl->dev, "usb1v5");
if (IS_ERR(twl->usb1v5))
goto fail1;
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V5_TYPE);
/* Initialize 1.8V regulator */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_DEV_GRP);
twl->usb1v8 = regulator_get(twl->dev, "usb1v8");
if (IS_ERR(twl->usb1v8))
goto fail2;
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB1V8_TYPE);
/* disable access to power configuration registers */
- twl4030_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, PROTECT_KEY);
+ twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0, PROTECT_KEY);
return 0;
@@ -598,12 +598,12 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
* USB_LINK_VBUS state. musb_hdrc won't care until it
* starts to handle softconnect right.
*/
- twl4030charger_usb_en(status == USB_LINK_VBUS);
-
if (status == USB_LINK_NONE)
twl4030_phy_suspend(twl, 0);
else
twl4030_phy_resume(twl);
+
+ twl4030charger_usb_en(status == USB_LINK_VBUS);
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
new file mode 100644
index 00000000000..896527456b7
--- /dev/null
+++ b/drivers/usb/otg/ulpi.c
@@ -0,0 +1,136 @@
+/*
+ * Generic ULPI USB transceiver support
+ *
+ * Copyright (C) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * Based on sources from
+ *
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ * Freescale Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
+
+/* ULPI register addresses */
+#define ULPI_VID_LOW 0x00 /* Vendor ID low */
+#define ULPI_VID_HIGH 0x01 /* Vendor ID high */
+#define ULPI_PID_LOW 0x02 /* Product ID low */
+#define ULPI_PID_HIGH 0x03 /* Product ID high */
+#define ULPI_ITFCTL 0x07 /* Interface Control */
+#define ULPI_OTGCTL 0x0A /* OTG Control */
+
+/* add to above register address to access Set/Clear functions */
+#define ULPI_REG_SET 0x01
+#define ULPI_REG_CLEAR 0x02
+
+/* ULPI OTG Control Register bits */
+#define ID_PULL_UP (1 << 0) /* enable ID Pull Up */
+#define DP_PULL_DOWN (1 << 1) /* enable DP Pull Down */
+#define DM_PULL_DOWN (1 << 2) /* enable DM Pull Down */
+#define DISCHRG_VBUS (1 << 3) /* Discharge Vbus */
+#define CHRG_VBUS (1 << 4) /* Charge Vbus */
+#define DRV_VBUS (1 << 5) /* Drive Vbus */
+#define DRV_VBUS_EXT (1 << 6) /* Drive Vbus external */
+#define USE_EXT_VBUS_IND (1 << 7) /* Use ext. Vbus indicator */
+
+#define ULPI_ID(vendor, product) (((vendor) << 16) | (product))
+
+#define TR_FLAG(flags, a, b) (((flags) & a) ? b : 0)
+
+/* ULPI hardcoded IDs, used for probing */
+static unsigned int ulpi_ids[] = {
+ ULPI_ID(0x04cc, 0x1504), /* NXP ISP1504 */
+};
+
+static int ulpi_set_flags(struct otg_transceiver *otg)
+{
+ unsigned int flags = 0;
+
+ if (otg->flags & USB_OTG_PULLUP_ID)
+ flags |= ID_PULL_UP;
+
+ if (otg->flags & USB_OTG_PULLDOWN_DM)
+ flags |= DM_PULL_DOWN;
+
+ if (otg->flags & USB_OTG_PULLDOWN_DP)
+ flags |= DP_PULL_DOWN;
+
+ if (otg->flags & USB_OTG_EXT_VBUS_INDICATOR)
+ flags |= USE_EXT_VBUS_IND;
+
+ return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
+}
+
+static int ulpi_init(struct otg_transceiver *otg)
+{
+ int i, vid, pid;
+
+ vid = (otg_io_read(otg, ULPI_VID_HIGH) << 8) |
+ otg_io_read(otg, ULPI_VID_LOW);
+ pid = (otg_io_read(otg, ULPI_PID_HIGH) << 8) |
+ otg_io_read(otg, ULPI_PID_LOW);
+
+ pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid);
+
+ for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++)
+ if (ulpi_ids[i] == ULPI_ID(vid, pid))
+ return ulpi_set_flags(otg);
+
+ pr_err("ULPI ID does not match any known transceiver.\n");
+ return -ENODEV;
+}
+
+static int ulpi_set_vbus(struct otg_transceiver *otg, bool on)
+{
+ unsigned int flags = otg_io_read(otg, ULPI_OTGCTL);
+
+ flags &= ~(DRV_VBUS | DRV_VBUS_EXT);
+
+ if (on) {
+ if (otg->flags & USB_OTG_DRV_VBUS)
+ flags |= DRV_VBUS;
+
+ if (otg->flags & USB_OTG_DRV_VBUS_EXT)
+ flags |= DRV_VBUS_EXT;
+ }
+
+ return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
+}
+
+struct otg_transceiver *
+otg_ulpi_create(struct otg_io_access_ops *ops,
+ unsigned int flags)
+{
+ struct otg_transceiver *otg;
+
+ otg = kzalloc(sizeof(*otg), GFP_KERNEL);
+ if (!otg)
+ return NULL;
+
+ otg->label = "ULPI";
+ otg->flags = flags;
+ otg->io_ops = ops;
+ otg->init = ulpi_init;
+ otg->set_vbus = ulpi_set_vbus;
+
+ return otg;
+}
+EXPORT_SYMBOL_GPL(otg_ulpi_create);
+
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 131e61adaaf..a9c2dec8e3f 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -1,4 +1,6 @@
/*
+ * Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com)
+ * Original version:
* Copyright (C) 2006
* Simon Schulz (ark3116_driver <at> auctionant.de)
*
@@ -6,10 +8,13 @@
* - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547,
* productid=0x0232) (used in a datacable called KQ-U8A)
*
- * - based on code by krisfx -> thanks !!
- * (see http://www.linuxquestions.org/questions/showthread.php?p=2184457#post2184457)
+ * Supports full modem status lines, break, hardware flow control. Does not
+ * support software flow control, since I do not know how to enable it in hw.
*
- * - based on logs created by usbsnoopy
+ * This driver is a essentially new implementation. I initially dug
+ * into the old ark3116.c driver and suddenly realized the ark3116 is
+ * a 16450 with a USB interface glued to it. See comments at the
+ * bottom of this file.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -19,15 +24,31 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/ioctl.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
+#include <linux/serial_reg.h>
#include <linux/uaccess.h>
-
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
static int debug;
+/*
+ * Version information
+ */
+
+#define DRIVER_VERSION "v0.5"
+#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
+#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
+#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
+#define DRIVER_NAME "ark3116"
+
+/* usb timeout of 1 second */
+#define ARK_TIMEOUT (1*HZ)
static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x6547, 0x0232) },
@@ -45,118 +66,152 @@ static int is_irda(struct usb_serial *serial)
return 0;
}
-static inline void ARK3116_SND(struct usb_serial *serial, int seq,
- __u8 request, __u8 requesttype,
- __u16 value, __u16 index)
+struct ark3116_private {
+ wait_queue_head_t delta_msr_wait;
+ struct async_icount icount;
+ int irda; /* 1 for irda device */
+
+ /* protects hw register updates */
+ struct mutex hw_lock;
+
+ int quot; /* baudrate divisor */
+ __u32 lcr; /* line control register value */
+ __u32 hcr; /* handshake control register (0x8)
+ * value */
+ __u32 mcr; /* modem contol register value */
+
+ /* protects the status values below */
+ spinlock_t status_lock;
+ __u32 msr; /* modem status register value */
+ __u32 lsr; /* line status register value */
+};
+
+static int ark3116_write_reg(struct usb_serial *serial,
+ unsigned reg, __u8 val)
{
int result;
+ /* 0xfe 0x40 are magic values taken from original driver */
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
- request, requesttype, value, index,
- NULL, 0x00, 1000);
- dbg("%03d > ok", seq);
+ 0xfe, 0x40, val, reg,
+ NULL, 0, ARK_TIMEOUT);
+ return result;
}
-static inline void ARK3116_RCV(struct usb_serial *serial, int seq,
- __u8 request, __u8 requesttype,
- __u16 value, __u16 index, __u8 expected,
- char *buf)
+static int ark3116_read_reg(struct usb_serial *serial,
+ unsigned reg, unsigned char *buf)
{
int result;
+ /* 0xfe 0xc0 are magic values taken from original driver */
result = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
- request, requesttype, value, index,
- buf, 0x0000001, 1000);
- if (result)
- dbg("%03d < %d bytes [0x%02X]", seq, result,
- ((unsigned char *)buf)[0]);
+ 0xfe, 0xc0, 0, reg,
+ buf, 1, ARK_TIMEOUT);
+ if (result < 0)
+ return result;
else
- dbg("%03d < 0 bytes", seq);
+ return buf[0];
}
-static inline void ARK3116_RCV_QUIET(struct usb_serial *serial,
- __u8 request, __u8 requesttype,
- __u16 value, __u16 index, char *buf)
+static inline int calc_divisor(int bps)
{
- usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
- request, requesttype, value, index,
- buf, 0x0000001, 1000);
+ /* Original ark3116 made some exceptions in rounding here
+ * because windows did the same. Assume that is not really
+ * necessary.
+ * Crystal is 12MHz, probably because of USB, but we divide by 4?
+ */
+ return (12000000 + 2*bps) / (4*bps);
}
static int ark3116_attach(struct usb_serial *serial)
{
- char *buf;
+ struct usb_serial_port *port = serial->port[0];
+ struct ark3116_private *priv;
+
+ /* make sure we have our end-points */
+ if ((serial->num_bulk_in == 0) ||
+ (serial->num_bulk_out == 0) ||
+ (serial->num_interrupt_in == 0)) {
+ dev_err(&serial->dev->dev,
+ "%s - missing endpoint - "
+ "bulk in: %d, bulk out: %d, int in %d\n",
+ KBUILD_MODNAME,
+ serial->num_bulk_in,
+ serial->num_bulk_out,
+ serial->num_interrupt_in);
+ return -EINVAL;
+ }
- buf = kmalloc(1, GFP_KERNEL);
- if (!buf) {
- dbg("error kmalloc -> out of mem?");
+ priv = kzalloc(sizeof(struct ark3116_private),
+ GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
- if (is_irda(serial))
- dbg("IrDA mode");
+ init_waitqueue_head(&priv->delta_msr_wait);
+ mutex_init(&priv->hw_lock);
+ spin_lock_init(&priv->status_lock);
+
+ priv->irda = is_irda(serial);
- /* 3 */
- ARK3116_SND(serial, 3, 0xFE, 0x40, 0x0008, 0x0002);
- ARK3116_SND(serial, 4, 0xFE, 0x40, 0x0008, 0x0001);
- ARK3116_SND(serial, 5, 0xFE, 0x40, 0x0000, 0x0008);
- ARK3116_SND(serial, 6, 0xFE, 0x40, is_irda(serial) ? 0x0001 : 0x0000,
- 0x000B);
+ usb_set_serial_port_data(port, priv);
- if (is_irda(serial)) {
- ARK3116_SND(serial, 1001, 0xFE, 0x40, 0x0000, 0x000C);
- ARK3116_SND(serial, 1002, 0xFE, 0x40, 0x0041, 0x000D);
- ARK3116_SND(serial, 1003, 0xFE, 0x40, 0x0001, 0x000A);
+ /* setup the hardware */
+ ark3116_write_reg(serial, UART_IER, 0);
+ /* disable DMA */
+ ark3116_write_reg(serial, UART_FCR, 0);
+ /* handshake control */
+ priv->hcr = 0;
+ ark3116_write_reg(serial, 0x8 , 0);
+ /* modem control */
+ priv->mcr = 0;
+ ark3116_write_reg(serial, UART_MCR, 0);
+
+ if (!(priv->irda)) {
+ ark3116_write_reg(serial, 0xb , 0);
+ } else {
+ ark3116_write_reg(serial, 0xb , 1);
+ ark3116_write_reg(serial, 0xc , 0);
+ ark3116_write_reg(serial, 0xd , 0x41);
+ ark3116_write_reg(serial, 0xa , 1);
}
- /* <-- seq7 */
- ARK3116_RCV(serial, 7, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
- ARK3116_SND(serial, 8, 0xFE, 0x40, 0x0080, 0x0003);
- ARK3116_SND(serial, 9, 0xFE, 0x40, 0x001A, 0x0000);
- ARK3116_SND(serial, 10, 0xFE, 0x40, 0x0000, 0x0001);
- ARK3116_SND(serial, 11, 0xFE, 0x40, 0x0000, 0x0003);
-
- /* <-- seq12 */
- ARK3116_RCV(serial, 12, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
- ARK3116_SND(serial, 13, 0xFE, 0x40, 0x0000, 0x0004);
-
- /* 14 */
- ARK3116_RCV(serial, 14, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
- ARK3116_SND(serial, 15, 0xFE, 0x40, 0x0000, 0x0004);
-
- /* 16 */
- ARK3116_RCV(serial, 16, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
- /* --> seq17 */
- ARK3116_SND(serial, 17, 0xFE, 0x40, 0x0001, 0x0004);
-
- /* <-- seq18 */
- ARK3116_RCV(serial, 18, 0xFE, 0xC0, 0x0000, 0x0004, 0x01, buf);
-
- /* --> seq19 */
- ARK3116_SND(serial, 19, 0xFE, 0x40, 0x0003, 0x0004);
-
- /* <-- seq20 */
- /* seems like serial port status info (RTS, CTS, ...) */
- /* returns modem control line status?! */
- ARK3116_RCV(serial, 20, 0xFE, 0xC0, 0x0000, 0x0006, 0xFF, buf);
-
- /* set 9600 baud & do some init?! */
- ARK3116_SND(serial, 147, 0xFE, 0x40, 0x0083, 0x0003);
- ARK3116_SND(serial, 148, 0xFE, 0x40, 0x0038, 0x0000);
- ARK3116_SND(serial, 149, 0xFE, 0x40, 0x0001, 0x0001);
- if (is_irda(serial))
- ARK3116_SND(serial, 1004, 0xFE, 0x40, 0x0000, 0x0009);
- ARK3116_SND(serial, 150, 0xFE, 0x40, 0x0003, 0x0003);
- ARK3116_RCV(serial, 151, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
- ARK3116_SND(serial, 152, 0xFE, 0x40, 0x0000, 0x0003);
- ARK3116_RCV(serial, 153, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
- ARK3116_SND(serial, 154, 0xFE, 0x40, 0x0003, 0x0003);
+ /* setup baudrate */
+ ark3116_write_reg(serial, UART_LCR, UART_LCR_DLAB);
- kfree(buf);
+ /* setup for 9600 8N1 */
+ priv->quot = calc_divisor(9600);
+ ark3116_write_reg(serial, UART_DLL, priv->quot & 0xff);
+ ark3116_write_reg(serial, UART_DLM, (priv->quot>>8) & 0xff);
+
+ priv->lcr = UART_LCR_WLEN8;
+ ark3116_write_reg(serial, UART_LCR, UART_LCR_WLEN8);
+
+ ark3116_write_reg(serial, 0xe, 0);
+
+ if (priv->irda)
+ ark3116_write_reg(serial, 0x9, 0);
+
+ dev_info(&serial->dev->dev,
+ "%s using %s mode\n",
+ KBUILD_MODNAME,
+ priv->irda ? "IrDA" : "RS232");
return 0;
}
+static void ark3116_release(struct usb_serial *serial)
+{
+ struct usb_serial_port *port = serial->port[0];
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
+
+ /* device is closed, so URBs and DMA should be down */
+
+ usb_set_serial_port_data(port, NULL);
+
+ mutex_destroy(&priv->hw_lock);
+
+ kfree(priv);
+}
+
static void ark3116_init_termios(struct tty_struct *tty)
{
struct ktermios *termios = tty->termios;
@@ -172,200 +227,189 @@ static void ark3116_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = tty->termios;
unsigned int cflag = termios->c_cflag;
- int baud;
- int ark3116_baud;
- char *buf;
- char config;
-
- config = 0;
-
- dbg("%s - port %d", __func__, port->number);
+ int bps = tty_get_baud_rate(tty);
+ int quot;
+ __u8 lcr, hcr, eval;
+
+ /* set data bit count */
+ switch (cflag & CSIZE) {
+ case CS5:
+ lcr = UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr = UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr = UART_LCR_WLEN7;
+ break;
+ default:
+ case CS8:
+ lcr = UART_LCR_WLEN8;
+ break;
+ }
+ if (cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+ if (cflag & PARENB)
+ lcr |= UART_LCR_PARITY;
+ if (!(cflag & PARODD))
+ lcr |= UART_LCR_EPAR;
+#ifdef CMSPAR
+ if (cflag & CMSPAR)
+ lcr |= UART_LCR_SPAR;
+#endif
+ /* handshake control */
+ hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
+
+ /* calc baudrate */
+ dbg("%s - setting bps to %d", __func__, bps);
+ eval = 0;
+ switch (bps) {
+ case 0:
+ quot = calc_divisor(9600);
+ break;
+ default:
+ if ((bps < 75) || (bps > 3000000))
+ bps = 9600;
+ quot = calc_divisor(bps);
+ break;
+ case 460800:
+ eval = 1;
+ quot = calc_divisor(bps);
+ break;
+ case 921600:
+ eval = 2;
+ quot = calc_divisor(bps);
+ break;
+ }
+ /* Update state: synchronize */
+ mutex_lock(&priv->hw_lock);
- cflag = termios->c_cflag;
- termios->c_cflag &= ~(CMSPAR|CRTSCTS);
+ /* keep old LCR_SBC bit */
+ lcr |= (priv->lcr & UART_LCR_SBC);
- buf = kmalloc(1, GFP_KERNEL);
- if (!buf) {
- dbg("error kmalloc");
- *termios = *old_termios;
- return;
- }
+ dbg("%s - setting hcr:0x%02x,lcr:0x%02x,quot:%d",
+ __func__, hcr, lcr, quot);
- /* set data bit count (8/7/6/5) */
- if (cflag & CSIZE) {
- switch (cflag & CSIZE) {
- case CS5:
- config |= 0x00;
- dbg("setting CS5");
- break;
- case CS6:
- config |= 0x01;
- dbg("setting CS6");
- break;
- case CS7:
- config |= 0x02;
- dbg("setting CS7");
- break;
- default:
- dbg("CSIZE was set but not CS5-CS8, using CS8!");
- /* fall through */
- case CS8:
- config |= 0x03;
- dbg("setting CS8");
- break;
- }
+ /* handshake control */
+ if (priv->hcr != hcr) {
+ priv->hcr = hcr;
+ ark3116_write_reg(serial, 0x8, hcr);
}
- /* set parity (NONE/EVEN/ODD) */
- if (cflag & PARENB) {
- if (cflag & PARODD) {
- config |= 0x08;
- dbg("setting parity to ODD");
- } else {
- config |= 0x18;
- dbg("setting parity to EVEN");
- }
- } else {
- dbg("setting parity to NONE");
+ /* baudrate */
+ if (priv->quot != quot) {
+ priv->quot = quot;
+ priv->lcr = lcr; /* need to write lcr anyway */
+
+ /* disable DMA since transmit/receive is
+ * shadowed by UART_DLL
+ */
+ ark3116_write_reg(serial, UART_FCR, 0);
+
+ ark3116_write_reg(serial, UART_LCR,
+ lcr|UART_LCR_DLAB);
+ ark3116_write_reg(serial, UART_DLL, quot & 0xff);
+ ark3116_write_reg(serial, UART_DLM, (quot>>8) & 0xff);
+
+ /* restore lcr */
+ ark3116_write_reg(serial, UART_LCR, lcr);
+ /* magic baudrate thingy: not sure what it does,
+ * but windows does this as well.
+ */
+ ark3116_write_reg(serial, 0xe, eval);
+
+ /* enable DMA */
+ ark3116_write_reg(serial, UART_FCR, UART_FCR_DMA_SELECT);
+ } else if (priv->lcr != lcr) {
+ priv->lcr = lcr;
+ ark3116_write_reg(serial, UART_LCR, lcr);
}
- /* set stop bit (1/2) */
- if (cflag & CSTOPB) {
- config |= 0x04;
- dbg("setting 2 stop bits");
- } else {
- dbg("setting 1 stop bit");
- }
+ mutex_unlock(&priv->hw_lock);
- /* set baudrate */
- baud = tty_get_baud_rate(tty);
-
- switch (baud) {
- case 75:
- case 150:
- case 300:
- case 600:
- case 1200:
- case 1800:
- case 2400:
- case 4800:
- case 9600:
- case 19200:
- case 38400:
- case 57600:
- case 115200:
- case 230400:
- case 460800:
- /* Report the resulting rate back to the caller */
- tty_encode_baud_rate(tty, baud, baud);
- break;
- /* set 9600 as default (if given baudrate is invalid for example) */
- default:
- tty_encode_baud_rate(tty, 9600, 9600);
- case 0:
- baud = 9600;
+ /* check for software flow control */
+ if (I_IXOFF(tty) || I_IXON(tty)) {
+ dev_warn(&serial->dev->dev,
+ "%s: don't know how to do software flow control\n",
+ KBUILD_MODNAME);
}
- /*
- * found by try'n'error, be careful, maybe there are other options
- * for multiplicator etc! (3.5 for example)
- */
- if (baud == 460800)
- /* strange, for 460800 the formula is wrong
- * if using round() then 9600baud is wrong) */
- ark3116_baud = 7;
- else
- ark3116_baud = 3000000 / baud;
-
- /* ? */
- ARK3116_RCV(serial, 0, 0xFE, 0xC0, 0x0000, 0x0003, 0x03, buf);
-
- /* offset = buf[0]; */
- /* offset = 0x03; */
- /* dbg("using 0x%04X as target for 0x0003:", 0x0080 + offset); */
-
- /* set baudrate */
- dbg("setting baudrate to %d (->reg=%d)", baud, ark3116_baud);
- ARK3116_SND(serial, 147, 0xFE, 0x40, 0x0083, 0x0003);
- ARK3116_SND(serial, 148, 0xFE, 0x40,
- (ark3116_baud & 0x00FF), 0x0000);
- ARK3116_SND(serial, 149, 0xFE, 0x40,
- (ark3116_baud & 0xFF00) >> 8, 0x0001);
- ARK3116_SND(serial, 150, 0xFE, 0x40, 0x0003, 0x0003);
-
- /* ? */
- ARK3116_RCV(serial, 151, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
- ARK3116_SND(serial, 152, 0xFE, 0x40, 0x0000, 0x0003);
-
- /* set data bit count, stop bit count & parity: */
- dbg("updating bit count, stop bit or parity (cfg=0x%02X)", config);
- ARK3116_RCV(serial, 153, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
- ARK3116_SND(serial, 154, 0xFE, 0x40, config, 0x0003);
+ /* Don't rewrite B0 */
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, bps, bps);
+}
- if (cflag & CRTSCTS)
- dbg("CRTSCTS not supported by chipset?!");
+static void ark3116_close(struct usb_serial_port *port)
+{
+ struct usb_serial *serial = port->serial;
- /* TEST ARK3116_SND(154, 0xFE, 0x40, 0xFFFF, 0x0006); */
+ if (serial->dev) {
+ /* disable DMA */
+ ark3116_write_reg(serial, UART_FCR, 0);
- kfree(buf);
+ /* deactivate interrupts */
+ ark3116_write_reg(serial, UART_IER, 0);
- return;
+ /* shutdown any bulk reads that might be going on */
+ if (serial->num_bulk_out)
+ usb_kill_urb(port->write_urb);
+ if (serial->num_bulk_in)
+ usb_kill_urb(port->read_urb);
+ if (serial->num_interrupt_in)
+ usb_kill_urb(port->interrupt_in_urb);
+ }
}
static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct ktermios tmp_termios;
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
- char *buf;
- int result = 0;
-
- dbg("%s - port %d", __func__, port->number);
+ unsigned char *buf;
+ int result;
buf = kmalloc(1, GFP_KERNEL);
- if (!buf) {
- dbg("error kmalloc -> out of mem?");
+ if (buf == NULL)
return -ENOMEM;
- }
result = usb_serial_generic_open(tty, port);
- if (result)
+ if (result) {
+ dbg("%s - usb_serial_generic_open failed: %d",
+ __func__, result);
goto err_out;
+ }
- /* open */
- ARK3116_RCV(serial, 111, 0xFE, 0xC0, 0x0000, 0x0003, 0x02, buf);
-
- ARK3116_SND(serial, 112, 0xFE, 0x40, 0x0082, 0x0003);
- ARK3116_SND(serial, 113, 0xFE, 0x40, 0x001A, 0x0000);
- ARK3116_SND(serial, 114, 0xFE, 0x40, 0x0000, 0x0001);
- ARK3116_SND(serial, 115, 0xFE, 0x40, 0x0002, 0x0003);
-
- ARK3116_RCV(serial, 116, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
- ARK3116_SND(serial, 117, 0xFE, 0x40, 0x0002, 0x0004);
-
- ARK3116_RCV(serial, 118, 0xFE, 0xC0, 0x0000, 0x0004, 0x02, buf);
- ARK3116_SND(serial, 119, 0xFE, 0x40, 0x0000, 0x0004);
-
- ARK3116_RCV(serial, 120, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
+ /* setup termios */
+ if (tty)
+ ark3116_set_termios(tty, port, NULL);
- ARK3116_SND(serial, 121, 0xFE, 0x40, 0x0001, 0x0004);
+ /* remove any data still left: also clears error state */
+ ark3116_read_reg(serial, UART_RX, buf);
- ARK3116_RCV(serial, 122, 0xFE, 0xC0, 0x0000, 0x0004, 0x01, buf);
+ /* read modem status */
+ priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
+ /* read line status */
+ priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
- ARK3116_SND(serial, 123, 0xFE, 0x40, 0x0003, 0x0004);
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+ if (result) {
+ dev_err(&port->dev, "submit irq_in urb failed %d\n",
+ result);
+ ark3116_close(port);
+ goto err_out;
+ }
- /* returns different values (control lines?!) */
- ARK3116_RCV(serial, 124, 0xFE, 0xC0, 0x0000, 0x0006, 0xFF, buf);
+ /* activate interrupts */
+ ark3116_write_reg(port->serial, UART_IER, UART_IER_MSI|UART_IER_RLSI);
- /* initialise termios */
- if (tty)
- ark3116_set_termios(tty, port, &tmp_termios);
+ /* enable DMA */
+ ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
err_out:
kfree(buf);
-
return result;
}
@@ -373,6 +417,7 @@ static int ark3116_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
struct serial_struct serstruct;
void __user *user_arg = (void __user *)arg;
@@ -394,9 +439,48 @@ static int ark3116_ioctl(struct tty_struct *tty, struct file *file,
if (copy_from_user(&serstruct, user_arg, sizeof(serstruct)))
return -EFAULT;
return 0;
- default:
- dbg("%s cmd 0x%04x not supported", __func__, cmd);
+ case TIOCMIWAIT:
+ for (;;) {
+ struct async_icount prev = priv->icount;
+ interruptible_sleep_on(&priv->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ if ((prev.rng == priv->icount.rng) &&
+ (prev.dsr == priv->icount.dsr) &&
+ (prev.dcd == priv->icount.dcd) &&
+ (prev.cts == priv->icount.cts))
+ return -EIO;
+ if ((arg & TIOCM_RNG &&
+ (prev.rng != priv->icount.rng)) ||
+ (arg & TIOCM_DSR &&
+ (prev.dsr != priv->icount.dsr)) ||
+ (arg & TIOCM_CD &&
+ (prev.dcd != priv->icount.dcd)) ||
+ (arg & TIOCM_CTS &&
+ (prev.cts != priv->icount.cts)))
+ return 0;
+ }
break;
+ case TIOCGICOUNT: {
+ struct serial_icounter_struct icount;
+ struct async_icount cnow = priv->icount;
+ memset(&icount, 0, sizeof(icount));
+ icount.cts = cnow.cts;
+ icount.dsr = cnow.dsr;
+ icount.rng = cnow.rng;
+ icount.dcd = cnow.dcd;
+ icount.rx = cnow.rx;
+ icount.tx = cnow.tx;
+ icount.frame = cnow.frame;
+ icount.overrun = cnow.overrun;
+ icount.parity = cnow.parity;
+ icount.brk = cnow.brk;
+ icount.buf_overrun = cnow.buf_overrun;
+ if (copy_to_user(user_arg, &icount, sizeof(icount)))
+ return -EFAULT;
+ return 0;
+ }
}
return -ENOIOCTLCMD;
@@ -405,32 +489,273 @@ static int ark3116_ioctl(struct tty_struct *tty, struct file *file,
static int ark3116_tiocmget(struct tty_struct *tty, struct file *file)
{
struct usb_serial_port *port = tty->driver_data;
- struct usb_serial *serial = port->serial;
- char *buf;
- char temp;
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
+ __u32 status;
+ __u32 ctrl;
+ unsigned long flags;
+
+ mutex_lock(&priv->hw_lock);
+ ctrl = priv->mcr;
+ mutex_unlock(&priv->hw_lock);
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ status = priv->msr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ return (status & UART_MSR_DSR ? TIOCM_DSR : 0) |
+ (status & UART_MSR_CTS ? TIOCM_CTS : 0) |
+ (status & UART_MSR_RI ? TIOCM_RI : 0) |
+ (status & UART_MSR_DCD ? TIOCM_CD : 0) |
+ (ctrl & UART_MCR_DTR ? TIOCM_DTR : 0) |
+ (ctrl & UART_MCR_RTS ? TIOCM_RTS : 0) |
+ (ctrl & UART_MCR_OUT1 ? TIOCM_OUT1 : 0) |
+ (ctrl & UART_MCR_OUT2 ? TIOCM_OUT2 : 0);
+}
- /* seems like serial port status info (RTS, CTS, ...) is stored
- * in reg(?) 0x0006
- * pcb connection point 11 = GND -> sets bit4 of response
- * pcb connection point 7 = GND -> sets bit6 of response
+static int ark3116_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned set, unsigned clr)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
+
+ /* we need to take the mutex here, to make sure that the value
+ * in priv->mcr is actually the one that is in the hardware
*/
- buf = kmalloc(1, GFP_KERNEL);
- if (!buf) {
- dbg("error kmalloc");
- return -ENOMEM;
+ mutex_lock(&priv->hw_lock);
+
+ if (set & TIOCM_RTS)
+ priv->mcr |= UART_MCR_RTS;
+ if (set & TIOCM_DTR)
+ priv->mcr |= UART_MCR_DTR;
+ if (set & TIOCM_OUT1)
+ priv->mcr |= UART_MCR_OUT1;
+ if (set & TIOCM_OUT2)
+ priv->mcr |= UART_MCR_OUT2;
+ if (clr & TIOCM_RTS)
+ priv->mcr &= ~UART_MCR_RTS;
+ if (clr & TIOCM_DTR)
+ priv->mcr &= ~UART_MCR_DTR;
+ if (clr & TIOCM_OUT1)
+ priv->mcr &= ~UART_MCR_OUT1;
+ if (clr & TIOCM_OUT2)
+ priv->mcr &= ~UART_MCR_OUT2;
+
+ ark3116_write_reg(port->serial, UART_MCR, priv->mcr);
+
+ mutex_unlock(&priv->hw_lock);
+
+ return 0;
+}
+
+static void ark3116_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
+
+ /* LCR is also used for other things: protect access */
+ mutex_lock(&priv->hw_lock);
+
+ if (break_state)
+ priv->lcr |= UART_LCR_SBC;
+ else
+ priv->lcr &= ~UART_LCR_SBC;
+
+ ark3116_write_reg(port->serial, UART_LCR, priv->lcr);
+
+ mutex_unlock(&priv->hw_lock);
+}
+
+static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
+{
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ priv->msr = msr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if (msr & UART_MSR_ANY_DELTA) {
+ /* update input line counters */
+ if (msr & UART_MSR_DCTS)
+ priv->icount.cts++;
+ if (msr & UART_MSR_DDSR)
+ priv->icount.dsr++;
+ if (msr & UART_MSR_DDCD)
+ priv->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ priv->icount.rng++;
+ wake_up_interruptible(&priv->delta_msr_wait);
}
+}
- /* read register */
- ARK3116_RCV_QUIET(serial, 0xFE, 0xC0, 0x0000, 0x0006, buf);
- temp = buf[0];
- kfree(buf);
+static void ark3116_update_lsr(struct usb_serial_port *port, __u8 lsr)
+{
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ /* combine bits */
+ priv->lsr |= lsr;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if (lsr&UART_LSR_BRK_ERROR_BITS) {
+ if (lsr & UART_LSR_BI)
+ priv->icount.brk++;
+ if (lsr & UART_LSR_FE)
+ priv->icount.frame++;
+ if (lsr & UART_LSR_PE)
+ priv->icount.parity++;
+ if (lsr & UART_LSR_OE)
+ priv->icount.overrun++;
+ }
+}
- /* i do not really know if bit4=CTS and bit6=DSR... just a
- * quick guess!
- */
- return (temp & (1<<4) ? TIOCM_CTS : 0)
- | (temp & (1<<6) ? TIOCM_DSR : 0);
+static void ark3116_read_int_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ int status = urb->status;
+ const __u8 *data = urb->transfer_buffer;
+ int result;
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d",
+ __func__, status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d",
+ __func__, status);
+ break;
+ case 0: /* success */
+ /* discovered this by trail and error... */
+ if ((urb->actual_length == 4) && (data[0] == 0xe8)) {
+ const __u8 id = data[1]&UART_IIR_ID;
+ dbg("%s: iir=%02x", __func__, data[1]);
+ if (id == UART_IIR_MSI) {
+ dbg("%s: msr=%02x", __func__, data[3]);
+ ark3116_update_msr(port, data[3]);
+ break;
+ } else if (id == UART_IIR_RLSI) {
+ dbg("%s: lsr=%02x", __func__, data[2]);
+ ark3116_update_lsr(port, data[2]);
+ break;
+ }
+ }
+ /*
+ * Not sure what this data meant...
+ */
+ usb_serial_debug_data(debug, &port->dev,
+ __func__,
+ urb->actual_length,
+ urb->transfer_buffer);
+ break;
+ }
+
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result)
+ dev_err(&urb->dev->dev,
+ "%s - Error %d submitting interrupt urb\n",
+ __func__, result);
+}
+
+
+/* Data comes in via the bulk (data) URB, erors/interrupts via the int URB.
+ * This means that we cannot be sure which data byte has an associated error
+ * condition, so we report an error for all data in the next bulk read.
+ *
+ * Actually, there might even be a window between the bulk data leaving the
+ * ark and reading/resetting the lsr in the read_bulk_callback where an
+ * interrupt for the next data block could come in.
+ * Without somekind of ordering on the ark, we would have to report the
+ * error for the next block of data as well...
+ * For now, let's pretend this can't happen.
+ */
+
+static void send_to_tty(struct tty_struct *tty,
+ const unsigned char *chars,
+ size_t size, char flag)
+{
+ if (size == 0)
+ return;
+ if (flag == TTY_NORMAL) {
+ tty_insert_flip_string(tty, chars, size);
+ } else {
+ int i;
+ for (i = 0; i < size; ++i)
+ tty_insert_flip_char(tty, chars[i], flag);
+ }
+}
+
+static void ark3116_read_bulk_callback(struct urb *urb)
+{
+ struct usb_serial_port *port = urb->context;
+ struct ark3116_private *priv = usb_get_serial_port_data(port);
+ const __u8 *data = urb->transfer_buffer;
+ int status = urb->status;
+ struct tty_struct *tty;
+ unsigned long flags;
+ int result;
+ char flag;
+ __u32 lsr;
+
+ switch (status) {
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d",
+ __func__, status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d",
+ __func__, status);
+ break;
+ case 0: /* success */
+
+ spin_lock_irqsave(&priv->status_lock, flags);
+ lsr = priv->lsr;
+ /* clear error bits */
+ priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ if (unlikely(lsr & UART_LSR_BI))
+ flag = TTY_BREAK;
+ else if (unlikely(lsr & UART_LSR_PE))
+ flag = TTY_PARITY;
+ else if (unlikely(lsr & UART_LSR_FE))
+ flag = TTY_FRAME;
+ else
+ flag = TTY_NORMAL;
+
+ tty = tty_port_tty_get(&port->port);
+ if (tty) {
+ tty_buffer_request_room(tty, urb->actual_length + 1);
+ /* overrun is special, not associated with a char */
+ if (unlikely(lsr & UART_LSR_OE))
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ send_to_tty(tty, data, urb->actual_length, flag);
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
+
+ /* Throttle the device if requested by tty */
+ spin_lock_irqsave(&port->lock, flags);
+ port->throttled = port->throttle_req;
+ if (port->throttled) {
+ spin_unlock_irqrestore(&port->lock, flags);
+ return;
+ } else
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+ /* Continue reading from device */
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result)
+ dev_err(&urb->dev->dev, "%s - failed resubmitting"
+ " read urb, error %d\n", __func__, result);
}
static struct usb_driver ark3116_driver = {
@@ -450,11 +775,17 @@ static struct usb_serial_driver ark3116_device = {
.usb_driver = &ark3116_driver,
.num_ports = 1,
.attach = ark3116_attach,
+ .release = ark3116_release,
.set_termios = ark3116_set_termios,
.init_termios = ark3116_init_termios,
.ioctl = ark3116_ioctl,
.tiocmget = ark3116_tiocmget,
+ .tiocmset = ark3116_tiocmset,
.open = ark3116_open,
+ .close = ark3116_close,
+ .break_ctl = ark3116_break_ctl,
+ .read_int_callback = ark3116_read_int_callback,
+ .read_bulk_callback = ark3116_read_bulk_callback,
};
static int __init ark3116_init(void)
@@ -465,7 +796,12 @@ static int __init ark3116_init(void)
if (retval)
return retval;
retval = usb_register(&ark3116_driver);
- if (retval)
+ if (retval == 0) {
+ printk(KERN_INFO "%s:"
+ DRIVER_VERSION ":"
+ DRIVER_DESC "\n",
+ KBUILD_MODNAME);
+ } else
usb_serial_deregister(&ark3116_device);
return retval;
}
@@ -480,6 +816,109 @@ module_init(ark3116_init);
module_exit(ark3116_exit);
MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+
module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
+MODULE_PARM_DESC(debug, "Enable debug");
+/*
+ * The following describes what I learned from studying the old
+ * ark3116.c driver, disassembling the windows driver, and some lucky
+ * guesses. Since I do not have any datasheet or other
+ * documentation, inaccuracies are almost guaranteed.
+ *
+ * Some specs for the ARK3116 can be found here:
+ * http://web.archive.org/web/20060318000438/
+ * www.arkmicro.com/en/products/view.php?id=10
+ * On that page, 2 GPIO pins are mentioned: I assume these are the
+ * OUT1 and OUT2 pins of the UART, so I added support for those
+ * through the MCR. Since the pins are not available on my hardware,
+ * I could not verify this.
+ * Also, it states there is "on-chip hardware flow control". I have
+ * discovered how to enable that. Unfortunately, I do not know how to
+ * enable XON/XOFF (software) flow control, which would need support
+ * from the chip as well to work. Because of the wording on the web
+ * page there is a real possibility the chip simply does not support
+ * software flow control.
+ *
+ * I got my ark3116 as part of a mobile phone adapter cable. On the
+ * PCB, the following numbered contacts are present:
+ *
+ * 1:- +5V
+ * 2:o DTR
+ * 3:i RX
+ * 4:i DCD
+ * 5:o RTS
+ * 6:o TX
+ * 7:i RI
+ * 8:i DSR
+ * 10:- 0V
+ * 11:i CTS
+ *
+ * On my chip, all signals seem to be 3.3V, but 5V tolerant. But that
+ * may be different for the one you have ;-).
+ *
+ * The windows driver limits the registers to 0-F, so I assume there
+ * are actually 16 present on the device.
+ *
+ * On an UART interrupt, 4 bytes of data come in on the interrupt
+ * endpoint. The bytes are 0xe8 IIR LSR MSR.
+ *
+ * The baudrate seems to be generated from the 12MHz crystal, using
+ * 4-times subsampling. So quot=12e6/(4*baud). Also see description
+ * of register E.
+ *
+ * Registers 0-7:
+ * These seem to be the same as for a regular 16450. The FCR is set
+ * to UART_FCR_DMA_SELECT (0x8), I guess to enable transfers between
+ * the UART and the USB bridge/DMA engine.
+ *
+ * Register 8:
+ * By trial and error, I found out that bit 0 enables hardware CTS,
+ * stopping TX when CTS is +5V. Bit 1 does the same for RTS, making
+ * RTS +5V when the 3116 cannot transfer the data to the USB bus
+ * (verified by disabling the reading URB). Note that as far as I can
+ * tell, the windows driver does NOT use this, so there might be some
+ * hardware bug or something.
+ *
+ * According to a patch provided here
+ * (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used
+ * as an IrDA dongle. Since I do not have such a thing, I could not
+ * investigate that aspect. However, I can speculate ;-).
+ *
+ * - IrDA encodes data differently than RS232. Most likely, one of
+ * the bits in registers 9..E enables the IR ENDEC (encoder/decoder).
+ * - Depending on the IR transceiver, the input and output need to be
+ * inverted, so there are probably bits for that as well.
+ * - IrDA is half-duplex, so there should be a bit for selecting that.
+ *
+ * This still leaves at least two registers unaccounted for. Perhaps
+ * The chip can do XON/XOFF or CRC in HW?
+ *
+ * Register 9:
+ * Set to 0x00 for IrDA, when the baudrate is initialised.
+ *
+ * Register A:
+ * Set to 0x01 for IrDA, at init.
+ *
+ * Register B:
+ * Set to 0x01 for IrDA, 0x00 for RS232, at init.
+ *
+ * Register C:
+ * Set to 00 for IrDA, at init.
+ *
+ * Register D:
+ * Set to 0x41 for IrDA, at init.
+ *
+ * Register E:
+ * Somekind of baudrate override. The windows driver seems to set
+ * this to 0x00 for normal baudrates, 0x01 for 460800, 0x02 for 921600.
+ * Since 460800 and 921600 cannot be obtained by dividing 3MHz by an integer,
+ * it could be somekind of subdivisor thingy.
+ * However,it does not seem to do anything: selecting 921600 (divisor 3,
+ * reg E=2), still gets 1 MHz. I also checked if registers 9, C or F would
+ * work, but they don't.
+ *
+ * Register F: unknown
+ */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ebcc6d0e2e9..f99498fca99 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -598,6 +598,20 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) },
{ USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
@@ -2195,15 +2209,21 @@ static void ftdi_set_termios(struct tty_struct *tty,
/* Set number of data bits, parity, stop bits */
- termios->c_cflag &= ~CMSPAR;
-
urb_value = 0;
urb_value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 :
FTDI_SIO_SET_DATA_STOP_BITS_1);
- urb_value |= (cflag & PARENB ?
- (cflag & PARODD ? FTDI_SIO_SET_DATA_PARITY_ODD :
- FTDI_SIO_SET_DATA_PARITY_EVEN) :
- FTDI_SIO_SET_DATA_PARITY_NONE);
+ if (cflag & PARENB) {
+ if (cflag & CMSPAR)
+ urb_value |= cflag & PARODD ?
+ FTDI_SIO_SET_DATA_PARITY_MARK :
+ FTDI_SIO_SET_DATA_PARITY_SPACE;
+ else
+ urb_value |= cflag & PARODD ?
+ FTDI_SIO_SET_DATA_PARITY_ODD :
+ FTDI_SIO_SET_DATA_PARITY_EVEN;
+ } else {
+ urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
+ }
if (cflag & CSIZE) {
switch (cflag & CSIZE) {
case CS5: urb_value |= 5; dbg("Setting CS5"); break;
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 6f31e0d7189..4586a24fafb 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -662,6 +662,20 @@
#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
+#define BANDB_USOPTL4_PID 0xAC11
+#define BANDB_USPTL4_PID 0xAC12
+#define BANDB_USO9ML2DR_2_PID 0xAC16
+#define BANDB_USO9ML2DR_PID 0xAC17
+#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
+#define BANDB_USOPTL4DR_PID 0xAC19
+#define BANDB_485USB9F_2W_PID 0xAC25
+#define BANDB_485USB9F_4W_PID 0xAC26
+#define BANDB_232USB9M_PID 0xAC27
+#define BANDB_485USBTB_2W_PID 0xAC33
+#define BANDB_485USBTB_4W_PID 0xAC34
+#define BANDB_TTL5USB9M_PID 0xAC49
+#define BANDB_TTL3USB9M_PID 0xAC50
+#define BANDB_ZZ_PROG1_USB_PID 0xBA02
/*
* RM Michaelides CANview USB (http://www.rmcan.com)
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index bbe005cefcf..f1ea3a33b6e 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -276,7 +276,7 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port)
if (port->write_urb_busy)
start_io = false;
else {
- start_io = (__kfifo_len(port->write_fifo) != 0);
+ start_io = (kfifo_len(&port->write_fifo) != 0);
port->write_urb_busy = start_io;
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -285,7 +285,7 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port)
return 0;
data = port->write_urb->transfer_buffer;
- count = kfifo_get(port->write_fifo, data, port->bulk_out_size);
+ count = kfifo_out_locked(&port->write_fifo, data, port->bulk_out_size, &port->lock);
usb_serial_debug_data(debug, &port->dev, __func__, count, data);
/* set up our urb */
@@ -345,7 +345,7 @@ int usb_serial_generic_write(struct tty_struct *tty,
return usb_serial_multi_urb_write(tty, port,
buf, count);
- count = kfifo_put(port->write_fifo, buf, count);
+ count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock);
result = usb_serial_generic_write_start(port);
if (result >= 0)
@@ -370,7 +370,7 @@ int usb_serial_generic_write_room(struct tty_struct *tty)
(serial->type->max_in_flight_urbs -
port->urbs_in_flight);
} else if (serial->num_bulk_out)
- room = port->write_fifo->size - __kfifo_len(port->write_fifo);
+ room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
dbg("%s - returns %d", __func__, room);
@@ -391,7 +391,7 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
chars = port->tx_bytes_flight;
spin_unlock_irqrestore(&port->lock, flags);
} else if (serial->num_bulk_out)
- chars = kfifo_len(port->write_fifo);
+ chars = kfifo_len(&port->write_fifo);
dbg("%s - returns %d", __func__, chars);
return chars;
@@ -507,7 +507,7 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
if (status) {
dbg("%s - nonzero multi-urb write bulk status "
"received: %d", __func__, status);
- kfifo_reset(port->write_fifo);
+ kfifo_reset_out(&port->write_fifo);
} else
usb_serial_generic_write_start(port);
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index f11abf52be7..485fa9c5b10 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -121,8 +121,14 @@
* moschip_id_table_combined
*/
#define USB_VENDOR_ID_BANDB 0x0856
-#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
+#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
+#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
@@ -177,8 +183,14 @@
static struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
@@ -187,8 +199,14 @@ static struct usb_device_id moschip_port_id_table[] = {
static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 80f59b6350c..4cdb975caa8 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -501,12 +501,13 @@ static int opticon_resume(struct usb_interface *intf)
struct usb_serial_port *port = serial->port[0];
int result;
- mutex_lock(&port->mutex);
- if (port->port.count)
+ mutex_lock(&port->port.mutex);
+ /* This is protected by the port mutex against close/open */
+ if (test_bit(ASYNCB_INITIALIZED, &port->port.flags))
result = usb_submit_urb(priv->bulk_read_urb, GFP_NOIO);
else
result = 0;
- mutex_unlock(&port->mutex);
+ mutex_unlock(&port->port.mutex);
return result;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0577e4b6111..9a2b903492e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -580,12 +580,48 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
@@ -599,6 +635,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+ { USB_DEVICE(ALINK_VENDOR_ID, 0xce16) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
@@ -1312,7 +1349,7 @@ static int option_suspend(struct usb_serial *serial, pm_message_t message)
dbg("%s entered", __func__);
- if (serial->dev->auto_pm) {
+ if (message.event & PM_EVENT_AUTO) {
spin_lock_irq(&intfdata->susp_lock);
b = intfdata->in_flight;
spin_unlock_irq(&intfdata->susp_lock);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 5019325ba25..ac1b6449fb6 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -16,8 +16,9 @@
Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de>
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
*/
-
-#define DRIVER_VERSION "v.1.3.8"
+/* Uncomment to log function calls */
+/* #define DEBUG */
+#define DRIVER_VERSION "v.1.7.16"
#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
@@ -33,8 +34,10 @@
#define SWIMS_USB_REQUEST_SetPower 0x00
#define SWIMS_USB_REQUEST_SetNmea 0x07
-#define N_IN_URB 8
-#define N_OUT_URB 64
+#define N_IN_URB_HM 8
+#define N_OUT_URB_HM 64
+#define N_IN_URB 4
+#define N_OUT_URB 4
#define IN_BUFLEN 4096
#define MAX_TRANSFER (PAGE_SIZE - 512)
@@ -124,6 +127,23 @@ static int is_blacklisted(const u8 ifnum,
return 0;
}
+static int is_himemory(const u8 ifnum,
+ const struct sierra_iface_info *himemorylist)
+{
+ const u8 *info;
+ int i;
+
+ if (himemorylist) {
+ info = himemorylist->ifaceinfo;
+
+ for (i=0; i < himemorylist->infolen; i++) {
+ if (info[i] == ifnum)
+ return 1;
+ }
+ }
+ return 0;
+}
+
static int sierra_calc_interface(struct usb_serial *serial)
{
int interface;
@@ -186,6 +206,20 @@ static int sierra_probe(struct usb_serial *serial,
return result;
}
+/* interfaces with higher memory requirements */
+static const u8 hi_memory_typeA_ifaces[] = { 0, 2 };
+static const struct sierra_iface_info typeA_interface_list = {
+ .infolen = ARRAY_SIZE(hi_memory_typeA_ifaces),
+ .ifaceinfo = hi_memory_typeA_ifaces,
+};
+
+static const u8 hi_memory_typeB_ifaces[] = { 3, 4, 5, 6 };
+static const struct sierra_iface_info typeB_interface_list = {
+ .infolen = ARRAY_SIZE(hi_memory_typeB_ifaces),
+ .ifaceinfo = hi_memory_typeB_ifaces,
+};
+
+/* 'blacklist' of interfaces not served by this driver */
static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
static const struct sierra_iface_info direct_ip_interface_blacklist = {
.infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
@@ -286,8 +320,10 @@ struct sierra_port_private {
struct usb_anchor active;
struct usb_anchor delayed;
+ int num_out_urbs;
+ int num_in_urbs;
/* Input endpoints and buffers for this port */
- struct urb *in_urbs[N_IN_URB];
+ struct urb *in_urbs[N_IN_URB_HM];
/* Settings for the port */
int rts_state; /* Handshaking pins (outputs) */
@@ -460,7 +496,7 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
spin_lock_irqsave(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
portdata->outstanding_urbs);
- if (portdata->outstanding_urbs > N_OUT_URB) {
+ if (portdata->outstanding_urbs > portdata->num_out_urbs) {
spin_unlock_irqrestore(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
return 0;
@@ -665,7 +701,7 @@ static int sierra_write_room(struct tty_struct *tty)
/* try to give a good number back based on if we have any free urbs at
* this point in time */
spin_lock_irqsave(&portdata->lock, flags);
- if (portdata->outstanding_urbs > N_OUT_URB * 2 / 3) {
+ if (portdata->outstanding_urbs > (portdata->num_out_urbs * 2) / 3) {
spin_unlock_irqrestore(&portdata->lock, flags);
dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
return 0;
@@ -680,7 +716,7 @@ static void sierra_stop_rx_urbs(struct usb_serial_port *port)
int i;
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
- for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++)
+ for (i = 0; i < portdata->num_in_urbs; i++)
usb_kill_urb(portdata->in_urbs[i]);
usb_kill_urb(port->interrupt_in_urb);
@@ -695,7 +731,7 @@ static int sierra_submit_rx_urbs(struct usb_serial_port *port, gfp_t mem_flags)
struct sierra_port_private *portdata = usb_get_serial_port_data(port);
ok_cnt = 0;
- for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
+ for (i = 0; i < portdata->num_in_urbs; i++) {
urb = portdata->in_urbs[i];
if (!urb)
continue;
@@ -791,7 +827,7 @@ static void sierra_close(struct usb_serial_port *port)
/* Stop reading urbs */
sierra_stop_rx_urbs(port);
/* .. and release them */
- for (i = 0; i < N_IN_URB; i++) {
+ for (i = 0; i < portdata->num_in_urbs; i++) {
sierra_release_urb(portdata->in_urbs[i]);
portdata->in_urbs[i] = NULL;
}
@@ -818,7 +854,7 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
endpoint = port->bulk_in_endpointAddress;
- for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) {
+ for (i = 0; i < portdata->num_in_urbs; i++) {
urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port,
IN_BUFLEN, GFP_KERNEL,
sierra_indat_callback);
@@ -869,7 +905,9 @@ static int sierra_startup(struct usb_serial *serial)
{
struct usb_serial_port *port;
struct sierra_port_private *portdata;
+ struct sierra_iface_info *himemoryp = NULL;
int i;
+ u8 ifnum;
dev_dbg(&serial->dev->dev, "%s\n", __func__);
@@ -886,13 +924,40 @@ static int sierra_startup(struct usb_serial *serial)
portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
if (!portdata) {
dev_dbg(&port->dev, "%s: kmalloc for "
- "sierra_port_private (%d) failed!.\n",
+ "sierra_port_private (%d) failed!\n",
__func__, i);
return -ENOMEM;
}
spin_lock_init(&portdata->lock);
init_usb_anchor(&portdata->active);
init_usb_anchor(&portdata->delayed);
+ ifnum = i;
+ /* Assume low memory requirements */
+ portdata->num_out_urbs = N_OUT_URB;
+ portdata->num_in_urbs = N_IN_URB;
+
+ /* Determine actual memory requirements */
+ if (serial->num_ports == 1) {
+ /* Get interface number for composite device */
+ ifnum = sierra_calc_interface(serial);
+ himemoryp =
+ (struct sierra_iface_info *)&typeB_interface_list;
+ if (is_himemory(ifnum, himemoryp)) {
+ portdata->num_out_urbs = N_OUT_URB_HM;
+ portdata->num_in_urbs = N_IN_URB_HM;
+ }
+ }
+ else {
+ himemoryp =
+ (struct sierra_iface_info *)&typeA_interface_list;
+ if (is_himemory(i, himemoryp)) {
+ portdata->num_out_urbs = N_OUT_URB_HM;
+ portdata->num_in_urbs = N_IN_URB_HM;
+ }
+ }
+ dev_dbg(&serial->dev->dev,
+ "Memory usage (urbs) interface #%d, in=%d, out=%d\n",
+ ifnum,portdata->num_in_urbs, portdata->num_out_urbs );
/* Set the port private data pointer */
usb_set_serial_port_data(port, portdata);
}
@@ -940,7 +1005,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message)
struct sierra_intf_private *intfdata;
int b;
- if (serial->dev->auto_pm) {
+ if (message.event & PM_EVENT_AUTO) {
intfdata = serial->private;
spin_lock_irq(&intfdata->susp_lock);
b = intfdata->in_flight;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index bd3fa7ff15b..33c85f7084f 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -247,96 +247,66 @@ static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
return retval;
}
-static int serial_open(struct tty_struct *tty, struct file *filp)
+static int serial_activate(struct tty_port *tport, struct tty_struct *tty)
{
- struct usb_serial_port *port = tty->driver_data;
+ struct usb_serial_port *port =
+ container_of(tport, struct usb_serial_port, port);
struct usb_serial *serial = port->serial;
int retval;
- dbg("%s - port %d", __func__, port->number);
-
- spin_lock_irq(&port->port.lock);
- if (!tty_hung_up_p(filp))
- ++port->port.count;
- spin_unlock_irq(&port->port.lock);
- tty_port_tty_set(&port->port, tty);
+ mutex_lock(&serial->disc_mutex);
+ if (serial->disconnected)
+ retval = -ENODEV;
+ else
+ retval = port->serial->type->open(tty, port);
+ mutex_unlock(&serial->disc_mutex);
+ return retval;
+}
- /* Do the device-specific open only if the hardware isn't
- * already initialized.
- */
- if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
- if (mutex_lock_interruptible(&port->mutex))
- return -ERESTARTSYS;
- mutex_lock(&serial->disc_mutex);
- if (serial->disconnected)
- retval = -ENODEV;
- else
- retval = port->serial->type->open(tty, port);
- mutex_unlock(&serial->disc_mutex);
- mutex_unlock(&port->mutex);
- if (retval)
- return retval;
- set_bit(ASYNCB_INITIALIZED, &port->port.flags);
- }
+static int serial_open(struct tty_struct *tty, struct file *filp)
+{
+ struct usb_serial_port *port = tty->driver_data;
- /* Now do the correct tty layer semantics */
- retval = tty_port_block_til_ready(&port->port, tty, filp);
- return retval;
+ dbg("%s - port %d", __func__, port->number);
+ return tty_port_open(&port->port, tty, filp);
}
/**
* serial_down - shut down hardware
- * @port: port to shut down
+ * @tport: tty port to shut down
*
* Shut down a USB serial port unless it is the console. We never
- * shut down the console hardware as it will always be in use.
+ * shut down the console hardware as it will always be in use. Serialized
+ * against activate by the tport mutex and kept to matching open/close pairs
+ * of calls by the ASYNCB_INITIALIZED flag.
*/
-static void serial_down(struct usb_serial_port *port)
+static void serial_down(struct tty_port *tport)
{
+ struct usb_serial_port *port =
+ container_of(tport, struct usb_serial_port, port);
struct usb_serial_driver *drv = port->serial->type;
-
/*
* The console is magical. Do not hang up the console hardware
* or there will be tears.
*/
if (port->console)
return;
-
- /* Don't call the close method if the hardware hasn't been
- * initialized.
- */
- if (!test_and_clear_bit(ASYNCB_INITIALIZED, &port->port.flags))
- return;
-
- mutex_lock(&port->mutex);
if (drv->close)
drv->close(port);
- mutex_unlock(&port->mutex);
}
static void serial_hangup(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
-
dbg("%s - port %d", __func__, port->number);
-
- serial_down(port);
tty_port_hangup(&port->port);
}
static void serial_close(struct tty_struct *tty, struct file *filp)
{
struct usb_serial_port *port = tty->driver_data;
-
dbg("%s - port %d", __func__, port->number);
-
- if (tty_hung_up_p(filp))
- return;
- if (tty_port_close_start(&port->port, tty, filp) == 0)
- return;
- serial_down(port);
- tty_port_close_end(&port->port, tty);
- tty_port_tty_set(&port->port, NULL);
+ tty_port_close(&port->port, tty, filp);
}
/**
@@ -625,8 +595,7 @@ static void port_release(struct device *dev)
usb_free_urb(port->write_urb);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
- if (!IS_ERR(port->write_fifo) && port->write_fifo)
- kfifo_free(port->write_fifo);
+ kfifo_free(&port->write_fifo);
kfree(port->bulk_in_buffer);
kfree(port->bulk_out_buffer);
kfree(port->interrupt_in_buffer);
@@ -725,6 +694,8 @@ static void serial_dtr_rts(struct tty_port *port, int on)
static const struct tty_port_operations serial_port_ops = {
.carrier_raised = serial_carrier_raised,
.dtr_rts = serial_dtr_rts,
+ .activate = serial_activate,
+ .shutdown = serial_down,
};
int usb_serial_probe(struct usb_interface *interface,
@@ -923,7 +894,8 @@ int usb_serial_probe(struct usb_interface *interface,
port->port.ops = &serial_port_ops;
port->serial = serial;
spin_lock_init(&port->lock);
- mutex_init(&port->mutex);
+ /* Keep this for private driver use for the moment but
+ should probably go away */
INIT_WORK(&port->work, usb_serial_port_work);
serial->port[i] = port;
port->dev.parent = &interface->dev;
@@ -966,9 +938,7 @@ int usb_serial_probe(struct usb_interface *interface,
dev_err(&interface->dev, "No free urbs available\n");
goto probe_error;
}
- port->write_fifo = kfifo_alloc(PAGE_SIZE, GFP_KERNEL,
- &port->lock);
- if (IS_ERR(port->write_fifo))
+ if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
port->bulk_out_size = buffer_size;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index cfa26d56ce6..e5e6df39e73 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -73,7 +73,8 @@
static const char* host_info(struct Scsi_Host *host)
{
- return "SCSI emulation for USB Mass Storage devices";
+ struct us_data *us = host_to_us(host);
+ return us->scsi_name;
}
static int slave_alloc (struct scsi_device *sdev)
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 589f6b4404f..cc313d16d72 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -666,10 +666,11 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
* to wait for at least one CHECK_CONDITION to determine
* SANE_SENSE support
*/
- if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
+ if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
result == USB_STOR_TRANSPORT_GOOD &&
!(us->fflags & US_FL_SANE_SENSE) &&
- !(srb->cmnd[2] & 0x20)) {
+ !(us->fflags & US_FL_BAD_SENSE) &&
+ !(srb->cmnd[2] & 0x20))) {
US_DEBUGP("-- SAT supported, increasing auto-sense\n");
us->fflags |= US_FL_SANE_SENSE;
}
@@ -718,6 +719,12 @@ Retry_Sense:
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
US_DEBUGP("-- auto-sense aborted\n");
srb->result = DID_ABORT << 16;
+
+ /* If SANE_SENSE caused this problem, disable it */
+ if (sense_size != US_SENSE_SIZE) {
+ us->fflags &= ~US_FL_SANE_SENSE;
+ us->fflags |= US_FL_BAD_SENSE;
+ }
goto Handle_Errors;
}
@@ -727,10 +734,11 @@ Retry_Sense:
* (small) sense request. This fixes some USB GSM modems
*/
if (temp_result == USB_STOR_TRANSPORT_FAILED &&
- (us->fflags & US_FL_SANE_SENSE) &&
- sense_size != US_SENSE_SIZE) {
+ sense_size != US_SENSE_SIZE) {
US_DEBUGP("-- auto-sense failure, retry small sense\n");
sense_size = US_SENSE_SIZE;
+ us->fflags &= ~US_FL_SANE_SENSE;
+ us->fflags |= US_FL_BAD_SENSE;
goto Retry_Sense;
}
@@ -754,6 +762,7 @@ Retry_Sense:
*/
if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
!(us->fflags & US_FL_SANE_SENSE) &&
+ !(us->fflags & US_FL_BAD_SENSE) &&
(srb->sense_buffer[0] & 0x7C) == 0x70) {
US_DEBUGP("-- SANE_SENSE support enabled\n");
us->fflags |= US_FL_SANE_SENSE;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index d4f034ebaa8..64a0a2c27e1 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -818,6 +818,13 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Daniel Kukula <daniel.kuku@gmail.com> */
+UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100,
+ "Prolific Technology, Inc.",
+ "Prolific Storage Gadget",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
/* Reported by Rogerio Brito <rbrito@ime.usp.br> */
UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
"Prolific Technology, Inc.",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 8060b85fe1a..5a53d4f0dd1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -45,6 +45,10 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifdef CONFIG_USB_STORAGE_DEBUG
+#define DEBUG
+#endif
+
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/freezer.h>
@@ -228,6 +232,7 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data,
if (data_len<36) // You lose.
return;
+ memset(data+8, ' ', 28);
if(data[0]&0x20) { /* USB device currently not connected. Return
peripheral qualifier 001b ("...however, the
physical device is not currently connected
@@ -237,15 +242,15 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data,
device, it may return zeros or ASCII spaces
(20h) in those fields until the data is
available from the device."). */
- memset(data+8,0,28);
} else {
u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
- memcpy(data+8, us->unusual_dev->vendorName,
- strlen(us->unusual_dev->vendorName) > 8 ? 8 :
- strlen(us->unusual_dev->vendorName));
- memcpy(data+16, us->unusual_dev->productName,
- strlen(us->unusual_dev->productName) > 16 ? 16 :
- strlen(us->unusual_dev->productName));
+ int n;
+
+ n = strlen(us->unusual_dev->vendorName);
+ memcpy(data+8, us->unusual_dev->vendorName, min(8, n));
+ n = strlen(us->unusual_dev->productName);
+ memcpy(data+16, us->unusual_dev->productName, min(16, n));
+
data[32] = 0x30 + ((bcdDevice>>12) & 0x0F);
data[33] = 0x30 + ((bcdDevice>>8) & 0x0F);
data[34] = 0x30 + ((bcdDevice>>4) & 0x0F);
@@ -459,6 +464,9 @@ static void adjust_quirks(struct us_data *us)
case 'a':
f |= US_FL_SANE_SENSE;
break;
+ case 'b':
+ f |= US_FL_BAD_SENSE;
+ break;
case 'c':
f |= US_FL_FIX_CAPACITY;
break;
@@ -808,14 +816,13 @@ static int usb_stor_scan_thread(void * __us)
{
struct us_data *us = (struct us_data *)__us;
- printk(KERN_DEBUG
- "usb-storage: device found at %d\n", us->pusb_dev->devnum);
+ dev_dbg(&us->pusb_intf->dev, "device found\n");
set_freezable();
/* Wait for the timeout to expire or for a disconnect */
if (delay_use > 0) {
- printk(KERN_DEBUG "usb-storage: waiting for device "
- "to settle before scanning\n");
+ dev_dbg(&us->pusb_intf->dev, "waiting for device to settle "
+ "before scanning\n");
wait_event_freezable_timeout(us->delay_wait,
test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
delay_use * HZ);
@@ -832,7 +839,7 @@ static int usb_stor_scan_thread(void * __us)
mutex_unlock(&us->dev_mutex);
}
scsi_scan_host(us_to_host(us));
- printk(KERN_DEBUG "usb-storage: device scan complete\n");
+ dev_dbg(&us->pusb_intf->dev, "scan complete\n");
/* Should we unbind if no devices were detected? */
}
@@ -840,6 +847,15 @@ static int usb_stor_scan_thread(void * __us)
complete_and_exit(&us->scanning_done, 0);
}
+static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+
+ if (usb_dev->bus->sg_tablesize) {
+ return usb_dev->bus->sg_tablesize;
+ }
+ return SG_ALL;
+}
/* First part of general USB mass-storage probing */
int usb_stor_probe1(struct us_data **pus,
@@ -868,6 +884,7 @@ int usb_stor_probe1(struct us_data **pus,
* Allow 16-byte CDBs and thus > 2TB
*/
host->max_cmd_len = 16;
+ host->sg_tablesize = usb_stor_sg_tablesize(intf);
*pus = us = host_to_us(host);
memset(us, 0, sizeof(struct us_data));
mutex_init(&(us->dev_mutex));
@@ -929,6 +946,8 @@ int usb_stor_probe2(struct us_data *us)
result = usb_stor_acquire_resources(us);
if (result)
goto BadDevice;
+ snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
+ dev_name(&us->pusb_intf->dev));
result = scsi_add_host(us_to_host(us), &us->pusb_intf->dev);
if (result) {
printk(KERN_WARNING USB_STORAGE
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 2609efb2bd7..69717134231 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -132,6 +132,7 @@ struct us_data {
/* SCSI interfaces */
struct scsi_cmnd *srb; /* current srb */
unsigned int tag; /* current dCBWTag */
+ char scsi_name[32]; /* scsi_host name */
/* control and bulk communications data */
struct urb *current_urb; /* USB requests */
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index b62f2bc064f..b1e579c5c97 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -358,7 +358,7 @@ retry:
rv = skel_do_read_io(dev, count);
if (rv < 0)
goto exit;
- else if (!file->f_flags & O_NONBLOCK)
+ else if (!(file->f_flags & O_NONBLOCK))
goto retry;
rv = -EAGAIN;
}
@@ -411,7 +411,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
* limit the number of URBs in flight to stop a user from using up all
* RAM
*/
- if (!file->f_flags & O_NONBLOCK) {
+ if (!(file->f_flags & O_NONBLOCK)) {
if (down_interruptible(&dev->limit_sem)) {
retval = -ERESTARTSYS;
goto exit;
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 4ac4300a3f9..dced419f7ab 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -119,10 +119,12 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
urb = usb_alloc_urb(0, GFP_KERNEL);
if (urb == NULL)
goto err;
+ wusb_dev->set_gtk_urb = urb;
- req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
if (req == NULL)
goto err;
+ wusb_dev->set_gtk_req = req;
req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
req->bRequest = USB_REQ_SET_DESCRIPTOR;
@@ -130,9 +132,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
req->wIndex = 0;
req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
- wusb_dev->set_gtk_urb = urb;
- wusb_dev->set_gtk_req = req;
-
return wusb_dev;
err:
wusb_dev_free(wusb_dev);
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 4516c36436e..edcd2d75603 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -205,15 +205,15 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
const void *itr, *top;
char buf[64];
- secd = kmalloc(sizeof(struct usb_security_descriptor), GFP_KERNEL);
+ secd = kmalloc(sizeof(*secd), GFP_KERNEL);
if (secd == NULL) {
result = -ENOMEM;
goto out;
}
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
- 0, secd, sizeof(struct usb_security_descriptor));
- if (result < sizeof(secd)) {
+ 0, secd, sizeof(*secd));
+ if (result < sizeof(*secd)) {
dev_err(dev, "Can't read security descriptor or "
"not enough data: %d\n", result);
goto out;
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index ee6256f2363..eab86e4bc77 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -147,10 +147,40 @@ static ssize_t wusb_chid_store(struct device *dev,
}
static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
+
+static ssize_t wusb_phy_rate_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+ return sprintf(buf, "%d\n", wusbhc->phy_rate);
+}
+
+static ssize_t wusb_phy_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ uint8_t phy_rate;
+ ssize_t result;
+
+ result = sscanf(buf, "%hhu", &phy_rate);
+ if (result != 1)
+ return -EINVAL;
+ if (phy_rate >= UWB_PHY_RATE_INVALID)
+ return -EINVAL;
+
+ wusbhc->phy_rate = phy_rate;
+ return size;
+}
+static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, wusb_phy_rate_store);
+
/* Group all the WUSBHC attributes */
static struct attribute *wusbhc_attrs[] = {
&dev_attr_wusb_trust_timeout.attr,
&dev_attr_wusb_chid.attr,
+ &dev_attr_wusb_phy_rate.attr,
NULL,
};
@@ -177,6 +207,8 @@ int wusbhc_create(struct wusbhc *wusbhc)
int result = 0;
wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
+ wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1;
+
mutex_init(&wusbhc->mutex);
result = wusbhc_mmcie_create(wusbhc);
if (result < 0)
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 797c2453a35..fd2fd4e277e 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -253,6 +253,7 @@ struct wusbhc {
unsigned trust_timeout; /* in jiffies */
struct wusb_ckhdid chid;
+ uint8_t phy_rate;
struct wuie_host_info *wuie_host_info;
struct mutex mutex; /* locks everything else */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e9f193e6b27..5a5c303a637 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -614,6 +614,21 @@ config FB_BFIN_T350MCQB
This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI
It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK.
+config FB_BFIN_LQ035Q1
+ tristate "SHARP LQ035Q1DH02 TFT LCD"
+ depends on FB && BLACKFIN && SPI
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select BFIN_GPTIMERS
+ help
+ This is the framebuffer device driver for a SHARP LQ035Q1DH02 TFT display found on
+ the Blackfin Landscape LCD EZ-Extender Card.
+ This display is a QVGA 320x240 18-bit RGB display interfaced by an 16-bit wide PPI
+ It uses PPI[0..15] PPI_FS1, PPI_FS2 and PPI_CLK.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bfin-lq035q1-fb.
config FB_STI
tristate "HP STI frame buffer device support"
@@ -2131,7 +2146,7 @@ config FB_PRE_INIT_FB
the bootloader.
config FB_MSM
- tristate
+ tristate "MSM Framebuffer support"
depends on FB && ARCH_MSM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -2165,6 +2180,7 @@ config FB_BROADSHEET
a bridge adapter.
source "drivers/video/omap/Kconfig"
+source "drivers/video/omap2/Kconfig"
source "drivers/video/backlight/Kconfig"
source "drivers/video/display/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 80232e12488..4ecb30c4f3f 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -124,6 +124,7 @@ obj-$(CONFIG_FB_SM501) += sm501fb.o
obj-$(CONFIG_FB_XILINX) += xilinxfb.o
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
obj-$(CONFIG_FB_OMAP) += omap/
+obj-y += omap2/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
@@ -136,6 +137,7 @@ obj-$(CONFIG_FB_EFI) += efifb.o
obj-$(CONFIG_FB_VGA16) += vga16fb.o
obj-$(CONFIG_FB_OF) += offb.o
obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
+obj-$(CONFIG_FB_BFIN_LQ035Q1) += bfin-lq035q1-fb.o
obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index ad05da5ba3c..86d95c228ad 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -15,7 +15,7 @@
struct adp5520_bl {
struct device *master;
- struct adp5520_backlight_platfrom_data *pdata;
+ struct adp5520_backlight_platform_data *pdata;
struct mutex lock;
unsigned long cached_daylight_max;
int id;
@@ -31,29 +31,30 @@ static int adp5520_bl_set(struct backlight_device *bl, int brightness)
if (data->pdata->en_ambl_sens) {
if ((brightness > 0) && (brightness < ADP5020_MAX_BRIGHTNESS)) {
/* Disable Ambient Light auto adjust */
- ret |= adp5520_clr_bits(master, BL_CONTROL,
- BL_AUTO_ADJ);
- ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
+ ret |= adp5520_clr_bits(master, ADP5520_BL_CONTROL,
+ ADP5520_BL_AUTO_ADJ);
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX,
+ brightness);
} else {
/*
* MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
* restore daylight l3 sysfs brightness
*/
- ret |= adp5520_write(master, DAYLIGHT_MAX,
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX,
data->cached_daylight_max);
- ret |= adp5520_set_bits(master, BL_CONTROL,
- BL_AUTO_ADJ);
+ ret |= adp5520_set_bits(master, ADP5520_BL_CONTROL,
+ ADP5520_BL_AUTO_ADJ);
}
} else {
- ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX, brightness);
}
if (data->current_brightness && brightness == 0)
ret |= adp5520_set_bits(master,
- MODE_STATUS, DIM_EN);
+ ADP5520_MODE_STATUS, ADP5520_DIM_EN);
else if (data->current_brightness == 0 && brightness)
ret |= adp5520_clr_bits(master,
- MODE_STATUS, DIM_EN);
+ ADP5520_MODE_STATUS, ADP5520_DIM_EN);
if (!ret)
data->current_brightness = brightness;
@@ -79,12 +80,12 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
int error;
uint8_t reg_val;
- error = adp5520_read(data->master, BL_VALUE, &reg_val);
+ error = adp5520_read(data->master, ADP5520_BL_VALUE, &reg_val);
return error ? data->current_brightness : reg_val;
}
-static struct backlight_ops adp5520_bl_ops = {
+static const struct backlight_ops adp5520_bl_ops = {
.update_status = adp5520_bl_update_status,
.get_brightness = adp5520_bl_get_brightness,
};
@@ -93,33 +94,46 @@ static int adp5520_bl_setup(struct backlight_device *bl)
{
struct adp5520_bl *data = bl_get_data(bl);
struct device *master = data->master;
- struct adp5520_backlight_platfrom_data *pdata = data->pdata;
+ struct adp5520_backlight_platform_data *pdata = data->pdata;
int ret = 0;
- ret |= adp5520_write(master, DAYLIGHT_MAX, pdata->l1_daylight_max);
- ret |= adp5520_write(master, DAYLIGHT_DIM, pdata->l1_daylight_dim);
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_MAX,
+ pdata->l1_daylight_max);
+ ret |= adp5520_write(master, ADP5520_DAYLIGHT_DIM,
+ pdata->l1_daylight_dim);
if (pdata->en_ambl_sens) {
data->cached_daylight_max = pdata->l1_daylight_max;
- ret |= adp5520_write(master, OFFICE_MAX, pdata->l2_office_max);
- ret |= adp5520_write(master, OFFICE_DIM, pdata->l2_office_dim);
- ret |= adp5520_write(master, DARK_MAX, pdata->l3_dark_max);
- ret |= adp5520_write(master, DARK_DIM, pdata->l3_dark_dim);
- ret |= adp5520_write(master, L2_TRIP, pdata->l2_trip);
- ret |= adp5520_write(master, L2_HYS, pdata->l2_hyst);
- ret |= adp5520_write(master, L3_TRIP, pdata->l3_trip);
- ret |= adp5520_write(master, L3_HYS, pdata->l3_hyst);
- ret |= adp5520_write(master, ALS_CMPR_CFG,
- ALS_CMPR_CFG_VAL(pdata->abml_filt, L3_EN));
+ ret |= adp5520_write(master, ADP5520_OFFICE_MAX,
+ pdata->l2_office_max);
+ ret |= adp5520_write(master, ADP5520_OFFICE_DIM,
+ pdata->l2_office_dim);
+ ret |= adp5520_write(master, ADP5520_DARK_MAX,
+ pdata->l3_dark_max);
+ ret |= adp5520_write(master, ADP5520_DARK_DIM,
+ pdata->l3_dark_dim);
+ ret |= adp5520_write(master, ADP5520_L2_TRIP,
+ pdata->l2_trip);
+ ret |= adp5520_write(master, ADP5520_L2_HYS,
+ pdata->l2_hyst);
+ ret |= adp5520_write(master, ADP5520_L3_TRIP,
+ pdata->l3_trip);
+ ret |= adp5520_write(master, ADP5520_L3_HYS,
+ pdata->l3_hyst);
+ ret |= adp5520_write(master, ADP5520_ALS_CMPR_CFG,
+ ALS_CMPR_CFG_VAL(pdata->abml_filt,
+ ADP5520_L3_EN));
}
- ret |= adp5520_write(master, BL_CONTROL,
- BL_CTRL_VAL(pdata->fade_led_law, pdata->en_ambl_sens));
+ ret |= adp5520_write(master, ADP5520_BL_CONTROL,
+ BL_CTRL_VAL(pdata->fade_led_law,
+ pdata->en_ambl_sens));
- ret |= adp5520_write(master, BL_FADE, FADE_VAL(pdata->fade_in,
+ ret |= adp5520_write(master, ADP5520_BL_FADE, FADE_VAL(pdata->fade_in,
pdata->fade_out));
- ret |= adp5520_set_bits(master, MODE_STATUS, BL_EN | DIM_EN);
+ ret |= adp5520_set_bits(master, ADP5520_MODE_STATUS,
+ ADP5520_BL_EN | ADP5520_DIM_EN);
return ret;
}
@@ -156,29 +170,31 @@ static ssize_t adp5520_store(struct device *dev, const char *buf,
}
static ssize_t adp5520_bl_dark_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, DARK_MAX);
+ return adp5520_show(dev, buf, ADP5520_DARK_MAX);
}
static ssize_t adp5520_bl_dark_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, DARK_MAX);
+ return adp5520_store(dev, buf, count, ADP5520_DARK_MAX);
}
static DEVICE_ATTR(dark_max, 0664, adp5520_bl_dark_max_show,
adp5520_bl_dark_max_store);
static ssize_t adp5520_bl_office_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, OFFICE_MAX);
+ return adp5520_show(dev, buf, ADP5520_OFFICE_MAX);
}
static ssize_t adp5520_bl_office_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, OFFICE_MAX);
+ return adp5520_store(dev, buf, count, ADP5520_OFFICE_MAX);
}
static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show,
adp5520_bl_office_max_store);
@@ -186,16 +202,17 @@ static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show,
static ssize_t adp5520_bl_daylight_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, DAYLIGHT_MAX);
+ return adp5520_show(dev, buf, ADP5520_DAYLIGHT_MAX);
}
static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adp5520_bl *data = dev_get_drvdata(dev);
strict_strtoul(buf, 10, &data->cached_daylight_max);
- return adp5520_store(dev, buf, count, DAYLIGHT_MAX);
+ return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX);
}
static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
adp5520_bl_daylight_max_store);
@@ -203,14 +220,14 @@ static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
static ssize_t adp5520_bl_dark_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, DARK_DIM);
+ return adp5520_show(dev, buf, ADP5520_DARK_DIM);
}
static ssize_t adp5520_bl_dark_dim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, DARK_DIM);
+ return adp5520_store(dev, buf, count, ADP5520_DARK_DIM);
}
static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show,
adp5520_bl_dark_dim_store);
@@ -218,29 +235,29 @@ static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show,
static ssize_t adp5520_bl_office_dim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, OFFICE_DIM);
+ return adp5520_show(dev, buf, ADP5520_OFFICE_DIM);
}
static ssize_t adp5520_bl_office_dim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, OFFICE_DIM);
+ return adp5520_store(dev, buf, count, ADP5520_OFFICE_DIM);
}
static DEVICE_ATTR(office_dim, 0664, adp5520_bl_office_dim_show,
adp5520_bl_office_dim_store);
static ssize_t adp5520_bl_daylight_dim_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- return adp5520_show(dev, buf, DAYLIGHT_DIM);
+ return adp5520_show(dev, buf, ADP5520_DAYLIGHT_DIM);
}
static ssize_t adp5520_bl_daylight_dim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- return adp5520_store(dev, buf, count, DAYLIGHT_DIM);
+ return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_DIM);
}
static DEVICE_ATTR(daylight_dim, 0664, adp5520_bl_daylight_dim_show,
adp5520_bl_daylight_dim_store);
@@ -316,7 +333,7 @@ static int __devexit adp5520_bl_remove(struct platform_device *pdev)
struct backlight_device *bl = platform_get_drvdata(pdev);
struct adp5520_bl *data = bl_get_data(bl);
- adp5520_clr_bits(data->master, MODE_STATUS, BL_EN);
+ adp5520_clr_bits(data->master, ADP5520_MODE_STATUS, ADP5520_BL_EN);
if (data->pdata->en_ambl_sens)
sysfs_remove_group(&bl->dev.kobj,
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index 2c3bdfc620b..d769b0bab21 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
return 1;
}
-static struct backlight_ops adx_backlight_ops = {
+static const struct backlight_ops adx_backlight_ops = {
.options = 0,
.update_status = adx_backlight_update_status,
.get_brightness = adx_backlight_get_brightness,
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index 2cf7ba52f67..f625ffc69ad 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
return pwm_channel_enable(&pwmbl->pwmc);
}
-static struct backlight_ops atmel_pwm_bl_ops = {
+static const struct backlight_ops atmel_pwm_bl_ops = {
.get_brightness = atmel_pwm_bl_get_intensity,
.update_status = atmel_pwm_bl_set_intensity,
};
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 6615ac7fa60..18829cf68b1 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
* ERR_PTR() or a pointer to the newly allocated device.
*/
struct backlight_device *backlight_device_register(const char *name,
- struct device *parent, void *devdata, struct backlight_ops *ops)
+ struct device *parent, void *devdata, const struct backlight_ops *ops)
{
struct backlight_device *new_bd;
int rc;
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 96774949cd3..b4bcf804379 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
}
EXPORT_SYMBOL(corgi_lcd_limit_intensity);
-static struct backlight_ops corgi_bl_ops = {
+static const struct backlight_ops corgi_bl_ops = {
.get_brightness = corgi_bl_get_intensity,
.update_status = corgi_bl_update_status,
};
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index b9fe62b475c..da86db4374a 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
return intensity;
}
-static struct backlight_ops cr_backlight_ops = {
+static const struct backlight_ops cr_backlight_ops = {
.get_brightness = cr_backlight_get_intensity,
.update_status = cr_backlight_set_intensity,
};
@@ -201,7 +201,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
if (IS_ERR(ldp)) {
backlight_device_unregister(bdp);
pci_dev_put(lpc_dev);
- return PTR_ERR(bdp);
+ return PTR_ERR(ldp);
}
pci_read_config_dword(lpc_dev, CRVML_REG_GPIOBAR,
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 7fcb0eb54c6..74cdc640173 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -95,7 +95,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
return data->current_brightness;
}
-static struct backlight_ops da903x_backlight_ops = {
+static const struct backlight_ops da903x_backlight_ops = {
.update_status = da903x_backlight_update_status,
.get_brightness = da903x_backlight_get_brightness,
};
@@ -177,7 +177,7 @@ static int da903x_backlight_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops da903x_backlight_pm_ops = {
+static const struct dev_pm_ops da903x_backlight_pm_ops = {
.suspend = da903x_backlight_suspend,
.resume = da903x_backlight_resume,
};
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 6d27f62fdcd..e6d348e6359 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
}
EXPORT_SYMBOL(corgibl_limit_intensity);
-static struct backlight_ops genericbl_ops = {
+static const struct backlight_ops genericbl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = genericbl_get_intensity,
.update_status = genericbl_send_intensity,
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 7fb4eefff80..f7cc528d5be 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
return current_intensity;
}
-static struct backlight_ops hp680bl_ops = {
+static const struct backlight_ops hp680bl_ops = {
.get_brightness = hp680bl_get_intensity,
.update_status = hp680bl_set_intensity,
};
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 7aed2565c1b..db9071fc566 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -93,7 +93,7 @@ out:
return ret;
}
-static struct backlight_ops jornada_bl_ops = {
+static const struct backlight_ops jornada_bl_ops = {
.get_brightness = jornada_bl_get_brightness,
.update_status = jornada_bl_update_status,
.options = BL_CORE_SUSPENDRESUME,
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index a38fda1742d..939e7b830cf 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
return kb3886bl_intensity;
}
-static struct backlight_ops kb3886bl_ops = {
+static const struct backlight_ops kb3886bl_ops = {
.get_brightness = kb3886bl_get_intensity,
.update_status = kb3886bl_send_intensity,
};
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index a482dd7b031..9b3be74cee5 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -101,7 +101,7 @@ static ssize_t lcd_store_power(struct device *dev,
int power = simple_strtoul(buf, &endp, 0);
size_t size = endp - buf;
- if (*endp && isspace(*endp))
+ if (isspace(*endp))
size++;
if (size != count)
return -EINVAL;
@@ -140,7 +140,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
int contrast = simple_strtoul(buf, &endp, 0);
size_t size = endp - buf;
- if (*endp && isspace(*endp))
+ if (isspace(*endp))
size++;
if (size != count)
return -EINVAL;
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 6b488b8a7ee..00a9591b000 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
return current_intensity;
}
-static struct backlight_ops locomobl_data = {
+static const struct backlight_ops locomobl_data = {
.get_brightness = locomolcd_get_intensity,
.update_status = locomolcd_set_intensity,
};
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9edb8d7c295..2e78b0784bd 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -33,7 +33,7 @@ struct dmi_match_data {
unsigned long iostart;
unsigned long iolen;
/* Backlight operations structure. */
- struct backlight_ops backlight_ops;
+ const struct backlight_ops backlight_ops;
};
/* Module parameters. */
@@ -220,6 +220,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
},
{
.callback = mbp_dmi_match,
+ .ident = "MacBookPro 5,3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3"),
+ },
+ .driver_data = (void *)&nvidia_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
+ .ident = "MacBookPro 5,4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4"),
+ },
+ .driver_data = (void *)&nvidia_chipset_data,
+ },
+ {
+ .callback = mbp_dmi_match,
.ident = "MacBookPro 5,5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 8693e5fcd2e..409ca964352 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
return bl->current_intensity;
}
-static struct backlight_ops omapbl_ops = {
+static const struct backlight_ops omapbl_ops = {
.get_brightness = omapbl_get_intensity,
.update_status = omapbl_update_status,
};
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 9edaf24fd82..075786e0503 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
return intensity - HW_LEVEL_MIN;
}
-static struct backlight_ops progearbl_ops = {
+static const struct backlight_ops progearbl_ops = {
.get_brightness = progearbl_get_intensity,
.update_status = progearbl_set_intensity,
};
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 88716626744..9d2ec2a1cce 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -22,8 +22,10 @@
struct pwm_bl_data {
struct pwm_device *pwm;
+ struct device *dev;
unsigned int period;
- int (*notify)(int brightness);
+ int (*notify)(struct device *,
+ int brightness);
};
static int pwm_backlight_update_status(struct backlight_device *bl)
@@ -39,7 +41,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
brightness = 0;
if (pb->notify)
- brightness = pb->notify(brightness);
+ brightness = pb->notify(pb->dev, brightness);
if (brightness == 0) {
pwm_config(pb->pwm, 0, pb->period);
@@ -56,7 +58,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
return bl->props.brightness;
}
-static struct backlight_ops pwm_backlight_ops = {
+static const struct backlight_ops pwm_backlight_ops = {
.update_status = pwm_backlight_update_status,
.get_brightness = pwm_backlight_get_brightness,
};
@@ -88,6 +90,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->period = data->pwm_period_ns;
pb->notify = data->notify;
+ pb->dev = &pdev->dev;
pb->pwm = pwm_request(data->pwm_id, "backlight");
if (IS_ERR(pb->pwm)) {
@@ -146,7 +149,7 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
if (pb->notify)
- pb->notify(0);
+ pb->notify(pb->dev, 0);
pwm_config(pb->pwm, 0, pb->period);
pwm_disable(pb->pwm);
return 0;
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 43edbada12d..e14ce4d469f 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
return props->brightness;
}
-static struct backlight_ops bl_ops = {
+static const struct backlight_ops bl_ops = {
.get_brightness = tosa_bl_get_brightness,
.update_status = tosa_bl_update_status,
};
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 467bdb7efb2..e32add37a20 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
return data->current_brightness;
}
-static struct backlight_ops wm831x_backlight_ops = {
+static const struct backlight_ops wm831x_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = wm831x_backlight_update_status,
.get_brightness = wm831x_backlight_get_brightness,
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
new file mode 100644
index 00000000000..b690c269784
--- /dev/null
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -0,0 +1,826 @@
+/*
+ * Blackfin LCD Framebuffer driver SHARP LQ035Q1DH02
+ *
+ * Copyright 2008-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#define DRIVER_NAME "bfin-lq035q1"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/backlight.h>
+#include <linux/lcd.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/blackfin.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+#include <asm/gptimers.h>
+
+#include <asm/bfin-lq035q1.h>
+
+#if defined(BF533_FAMILY) || defined(BF538_FAMILY)
+#define TIMER_HSYNC_id TIMER1_id
+#define TIMER_HSYNCbit TIMER1bit
+#define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN1
+#define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1
+#define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF1
+
+#define TIMER_VSYNC_id TIMER2_id
+#define TIMER_VSYNCbit TIMER2bit
+#define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN2
+#define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL2
+#define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF2
+#else
+#define TIMER_HSYNC_id TIMER0_id
+#define TIMER_HSYNCbit TIMER0bit
+#define TIMER_HSYNC_STATUS_TRUN TIMER_STATUS_TRUN0
+#define TIMER_HSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL0
+#define TIMER_HSYNC_STATUS_TOVF TIMER_STATUS_TOVF0
+
+#define TIMER_VSYNC_id TIMER1_id
+#define TIMER_VSYNCbit TIMER1bit
+#define TIMER_VSYNC_STATUS_TRUN TIMER_STATUS_TRUN1
+#define TIMER_VSYNC_STATUS_TIMIL TIMER_STATUS_TIMIL1
+#define TIMER_VSYNC_STATUS_TOVF TIMER_STATUS_TOVF1
+#endif
+
+#define LCD_X_RES 320 /* Horizontal Resolution */
+#define LCD_Y_RES 240 /* Vertical Resolution */
+#define DMA_BUS_SIZE 16
+
+#define USE_RGB565_16_BIT_PPI
+
+#ifdef USE_RGB565_16_BIT_PPI
+#define LCD_BPP 16 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 1
+#define CPLD_PIPELINE_DELAY_COR 0 /* NO CPLB */
+#endif
+
+/* Interface 16/18-bit TFT over an 8-bit wide PPI using a small Programmable Logic Device (CPLD)
+ * http://blackfin.uclinux.org/gf/project/stamp/frs/?action=FrsReleaseBrowse&frs_package_id=165
+ */
+
+#ifdef USE_RGB565_8_BIT_PPI
+#define LCD_BPP 16 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 2
+#define CPLD_PIPELINE_DELAY_COR 3 /* RGB565 */
+#endif
+
+#ifdef USE_RGB888_8_BIT_PPI
+#define LCD_BPP 24 /* Bit Per Pixel */
+#define CLOCKS_PER_PIX 3
+#define CPLD_PIPELINE_DELAY_COR 5 /* RGB888 */
+#endif
+
+ /*
+ * HS and VS timing parameters (all in number of PPI clk ticks)
+ */
+
+#define U_LINE 4 /* Blanking Lines */
+
+#define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */
+#define H_PERIOD (336 * CLOCKS_PER_PIX) /* HS period */
+#define H_PULSE (2 * CLOCKS_PER_PIX) /* HS pulse width */
+#define H_START (7 * CLOCKS_PER_PIX + CPLD_PIPELINE_DELAY_COR) /* first valid pixel */
+
+#define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */
+#define V_PULSE (2 * CLOCKS_PER_PIX) /* VS pulse width (1-5 H_PERIODs) */
+#define V_PERIOD (H_PERIOD * V_LINES) /* VS period */
+
+#define ACTIVE_VIDEO_MEM_OFFSET ((U_LINE / 2) * LCD_X_RES * (LCD_BPP / 8))
+
+#define BFIN_LCD_NBR_PALETTE_ENTRIES 256
+
+#define PPI_TX_MODE 0x2
+#define PPI_XFER_TYPE_11 0xC
+#define PPI_PORT_CFG_01 0x10
+#define PPI_POLS_1 0x8000
+
+#if (CLOCKS_PER_PIX > 1)
+#define PPI_PMODE (DLEN_8 | PACK_EN)
+#else
+#define PPI_PMODE (DLEN_16)
+#endif
+
+#define LQ035_INDEX 0x74
+#define LQ035_DATA 0x76
+
+#define LQ035_DRIVER_OUTPUT_CTL 0x1
+#define LQ035_SHUT_CTL 0x11
+
+#define LQ035_DRIVER_OUTPUT_MASK (LQ035_LR | LQ035_TB | LQ035_BGR | LQ035_REV)
+#define LQ035_DRIVER_OUTPUT_DEFAULT (0x2AEF & ~LQ035_DRIVER_OUTPUT_MASK)
+
+#define LQ035_SHUT (1 << 0) /* Shutdown */
+#define LQ035_ON (0 << 0) /* Shutdown */
+
+struct bfin_lq035q1fb_info {
+ struct fb_info *fb;
+ struct device *dev;
+ struct spi_driver spidrv;
+ struct bfin_lq035q1fb_disp_info *disp_info;
+ unsigned char *fb_buffer; /* RGB Buffer */
+ dma_addr_t dma_handle;
+ int lq035_open_cnt;
+ int irq;
+ spinlock_t lock; /* lock */
+ u32 pseudo_pal[16];
+};
+
+static int nocursor;
+module_param(nocursor, int, 0644);
+MODULE_PARM_DESC(nocursor, "cursor enable/disable");
+
+struct spi_control {
+ unsigned short mode;
+};
+
+static int lq035q1_control(struct spi_device *spi, unsigned char reg, unsigned short value)
+{
+ int ret;
+ u8 regs[3] = { LQ035_INDEX, 0, 0 };
+ u8 dat[3] = { LQ035_DATA, 0, 0 };
+
+ if (!spi)
+ return -ENODEV;
+
+ regs[2] = reg;
+ dat[1] = value >> 8;
+ dat[2] = value & 0xFF;
+
+ ret = spi_write(spi, regs, ARRAY_SIZE(regs));
+ ret |= spi_write(spi, dat, ARRAY_SIZE(dat));
+ return ret;
+}
+
+static int __devinit lq035q1_spidev_probe(struct spi_device *spi)
+{
+ int ret;
+ struct spi_control *ctl;
+ struct bfin_lq035q1fb_info *info = container_of(spi->dev.driver,
+ struct bfin_lq035q1fb_info,
+ spidrv.driver);
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+
+ if (!ctl)
+ return -ENOMEM;
+
+ ctl->mode = (info->disp_info->mode &
+ LQ035_DRIVER_OUTPUT_MASK) | LQ035_DRIVER_OUTPUT_DEFAULT;
+
+ ret = lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON);
+ ret |= lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, ctl);
+
+ return 0;
+}
+
+static int lq035q1_spidev_remove(struct spi_device *spi)
+{
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+#ifdef CONFIG_PM
+static int lq035q1_spidev_suspend(struct spi_device *spi, pm_message_t state)
+{
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+static int lq035q1_spidev_resume(struct spi_device *spi)
+{
+ int ret;
+ struct spi_control *ctl = spi_get_drvdata(spi);
+
+ ret = lq035q1_control(spi, LQ035_DRIVER_OUTPUT_CTL, ctl->mode);
+ if (ret)
+ return ret;
+
+ return lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_ON);
+}
+#else
+# define lq035q1_spidev_suspend NULL
+# define lq035q1_spidev_resume NULL
+#endif
+
+/* Power down all displays on reboot, poweroff or halt */
+static void lq035q1_spidev_shutdown(struct spi_device *spi)
+{
+ lq035q1_control(spi, LQ035_SHUT_CTL, LQ035_SHUT);
+}
+
+static int lq035q1_backlight(struct bfin_lq035q1fb_info *info, unsigned arg)
+{
+ if (info->disp_info->use_bl)
+ gpio_set_value(info->disp_info->gpio_bl, arg);
+
+ return 0;
+}
+
+static void bfin_lq035q1_config_ppi(struct bfin_lq035q1fb_info *fbi)
+{
+ bfin_write_PPI_DELAY(H_START);
+ bfin_write_PPI_COUNT(H_ACTPIX - 1);
+ bfin_write_PPI_FRAME(V_LINES);
+
+ bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */
+ PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */
+ PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */
+ PPI_PMODE | /* 8/16 bit data length / PACK_EN? */
+ PPI_POLS_1); /* faling edge syncs POLS */
+}
+
+static inline void bfin_lq035q1_disable_ppi(void)
+{
+ bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() & ~PORT_EN);
+}
+
+static inline void bfin_lq035q1_enable_ppi(void)
+{
+ bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN);
+}
+
+static void bfin_lq035q1_start_timers(void)
+{
+ enable_gptimers(TIMER_VSYNCbit | TIMER_HSYNCbit);
+}
+
+static void bfin_lq035q1_stop_timers(void)
+{
+ disable_gptimers(TIMER_HSYNCbit | TIMER_VSYNCbit);
+
+ set_gptimer_status(0, TIMER_HSYNC_STATUS_TRUN | TIMER_VSYNC_STATUS_TRUN |
+ TIMER_HSYNC_STATUS_TIMIL | TIMER_VSYNC_STATUS_TIMIL |
+ TIMER_HSYNC_STATUS_TOVF | TIMER_VSYNC_STATUS_TOVF);
+
+}
+
+static void bfin_lq035q1_init_timers(void)
+{
+
+ bfin_lq035q1_stop_timers();
+
+ set_gptimer_period(TIMER_HSYNC_id, H_PERIOD);
+ set_gptimer_pwidth(TIMER_HSYNC_id, H_PULSE);
+ set_gptimer_config(TIMER_HSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
+ TIMER_TIN_SEL | TIMER_CLK_SEL|
+ TIMER_EMU_RUN);
+
+ set_gptimer_period(TIMER_VSYNC_id, V_PERIOD);
+ set_gptimer_pwidth(TIMER_VSYNC_id, V_PULSE);
+ set_gptimer_config(TIMER_VSYNC_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
+ TIMER_TIN_SEL | TIMER_CLK_SEL |
+ TIMER_EMU_RUN);
+
+}
+
+static void bfin_lq035q1_config_dma(struct bfin_lq035q1fb_info *fbi)
+{
+
+ set_dma_config(CH_PPI,
+ set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
+ INTR_DISABLE, DIMENSION_2D,
+ DATA_SIZE_16,
+ DMA_NOSYNC_KEEP_DMA_BUF));
+ set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE);
+ set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8);
+ set_dma_y_count(CH_PPI, V_LINES);
+
+ set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8);
+ set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer);
+
+}
+
+#if (CLOCKS_PER_PIX == 1)
+static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
+ P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
+ P_PPI0_D6, P_PPI0_D7, P_PPI0_D8,
+ P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14,
+ P_PPI0_D15, 0};
+#else
+static const u16 ppi0_req_16[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
+ P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
+ P_PPI0_D6, P_PPI0_D7, 0};
+#endif
+
+static inline void bfin_lq035q1_free_ports(void)
+{
+ peripheral_free_list(ppi0_req_16);
+ if (ANOMALY_05000400)
+ gpio_free(P_IDENT(P_PPI0_FS3));
+}
+
+static int __devinit bfin_lq035q1_request_ports(struct platform_device *pdev)
+{
+ /* ANOMALY_05000400 - PPI Does Not Start Properly In Specific Mode:
+ * Drive PPI_FS3 Low
+ */
+ if (ANOMALY_05000400) {
+ int ret = gpio_request(P_IDENT(P_PPI0_FS3), "PPI_FS3");
+ if (ret)
+ return ret;
+ gpio_direction_output(P_IDENT(P_PPI0_FS3), 0);
+ }
+
+ if (peripheral_request_list(ppi0_req_16, DRIVER_NAME)) {
+ dev_err(&pdev->dev, "requesting peripherals failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_open(struct fb_info *info, int user)
+{
+ struct bfin_lq035q1fb_info *fbi = info->par;
+
+ spin_lock(&fbi->lock);
+ fbi->lq035_open_cnt++;
+
+ if (fbi->lq035_open_cnt <= 1) {
+
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+
+ bfin_lq035q1_config_dma(fbi);
+ bfin_lq035q1_config_ppi(fbi);
+ bfin_lq035q1_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_lq035q1_start_timers();
+ lq035q1_backlight(fbi, 1);
+ }
+
+ spin_unlock(&fbi->lock);
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_release(struct fb_info *info, int user)
+{
+ struct bfin_lq035q1fb_info *fbi = info->par;
+
+ spin_lock(&fbi->lock);
+
+ fbi->lq035_open_cnt--;
+
+ if (fbi->lq035_open_cnt <= 0) {
+ lq035q1_backlight(fbi, 0);
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+ disable_dma(CH_PPI);
+ bfin_lq035q1_stop_timers();
+ }
+
+ spin_unlock(&fbi->lock);
+
+ return 0;
+}
+
+static int bfin_lq035q1_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ switch (var->bits_per_pixel) {
+#if (LCD_BPP == 24)
+ case 24:/* TRUECOLOUR, 16m */
+#else
+ case 16:/* DIRECTCOLOUR, 64k */
+#endif
+ var->red.offset = info->var.red.offset;
+ var->green.offset = info->var.green.offset;
+ var->blue.offset = info->var.blue.offset;
+ var->red.length = info->var.red.length;
+ var->green.length = info->var.green.length;
+ var->blue.length = info->var.blue.length;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->transp.msb_right = 0;
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ break;
+ default:
+ pr_debug("%s: depth not supported: %u BPP\n", __func__,
+ var->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (info->var.xres != var->xres || info->var.yres != var->yres ||
+ info->var.xres_virtual != var->xres_virtual ||
+ info->var.yres_virtual != var->yres_virtual) {
+ pr_debug("%s: Resolution not supported: X%u x Y%u \n",
+ __func__, var->xres, var->yres);
+ return -EINVAL;
+ }
+
+ /*
+ * Memory limit
+ */
+
+ if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
+ pr_debug("%s: Memory Limit requested yres_virtual = %u\n",
+ __func__, var->yres_virtual);
+ return -ENOMEM;
+ }
+
+
+ return 0;
+}
+
+int bfin_lq035q1_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+{
+ if (nocursor)
+ return 0;
+ else
+ return -EINVAL; /* just to force soft_cursor() call */
+}
+
+static int bfin_lq035q1_fb_setcolreg(u_int regno, u_int red, u_int green,
+ u_int blue, u_int transp,
+ struct fb_info *info)
+{
+ if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES)
+ return -EINVAL;
+
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+
+ u32 value;
+ /* Place color in the pseudopalette */
+ if (regno > 16)
+ return -EINVAL;
+
+ red >>= (16 - info->var.red.length);
+ green >>= (16 - info->var.green.length);
+ blue >>= (16 - info->var.blue.length);
+
+ value = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset);
+ value &= 0xFFFFFF;
+
+ ((u32 *) (info->pseudo_palette))[regno] = value;
+
+ }
+
+ return 0;
+}
+
+static struct fb_ops bfin_lq035q1_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = bfin_lq035q1_fb_open,
+ .fb_release = bfin_lq035q1_fb_release,
+ .fb_check_var = bfin_lq035q1_fb_check_var,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_cursor = bfin_lq035q1_fb_cursor,
+ .fb_setcolreg = bfin_lq035q1_fb_setcolreg,
+};
+
+static irqreturn_t bfin_lq035q1_irq_error(int irq, void *dev_id)
+{
+ /*struct bfin_lq035q1fb_info *info = (struct bfin_lq035q1fb_info *)dev_id;*/
+
+ u16 status = bfin_read_PPI_STATUS();
+ bfin_write_PPI_STATUS(-1);
+
+ if (status) {
+ bfin_lq035q1_disable_ppi();
+ disable_dma(CH_PPI);
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_write_PPI_STATUS(-1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit bfin_lq035q1_probe(struct platform_device *pdev)
+{
+ struct bfin_lq035q1fb_info *info;
+ struct fb_info *fbinfo;
+ int ret;
+
+ ret = request_dma(CH_PPI, DRIVER_NAME"_CH_PPI");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "PPI DMA unavailable\n");
+ goto out1;
+ }
+
+ fbinfo = framebuffer_alloc(sizeof(*info), &pdev->dev);
+ if (!fbinfo) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+
+ info = fbinfo->par;
+ info->fb = fbinfo;
+ info->dev = &pdev->dev;
+
+ info->disp_info = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, fbinfo);
+
+ strcpy(fbinfo->fix.id, DRIVER_NAME);
+
+ fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbinfo->fix.type_aux = 0;
+ fbinfo->fix.xpanstep = 0;
+ fbinfo->fix.ypanstep = 0;
+ fbinfo->fix.ywrapstep = 0;
+ fbinfo->fix.accel = FB_ACCEL_NONE;
+ fbinfo->fix.visual = FB_VISUAL_TRUECOLOR;
+
+ fbinfo->var.nonstd = 0;
+ fbinfo->var.activate = FB_ACTIVATE_NOW;
+ fbinfo->var.height = -1;
+ fbinfo->var.width = -1;
+ fbinfo->var.accel_flags = 0;
+ fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
+
+ fbinfo->var.xres = LCD_X_RES;
+ fbinfo->var.xres_virtual = LCD_X_RES;
+ fbinfo->var.yres = LCD_Y_RES;
+ fbinfo->var.yres_virtual = LCD_Y_RES;
+ fbinfo->var.bits_per_pixel = LCD_BPP;
+
+ if (info->disp_info->mode & LQ035_BGR) {
+#if (LCD_BPP == 24)
+ fbinfo->var.red.offset = 0;
+ fbinfo->var.green.offset = 8;
+ fbinfo->var.blue.offset = 16;
+#else
+ fbinfo->var.red.offset = 0;
+ fbinfo->var.green.offset = 5;
+ fbinfo->var.blue.offset = 11;
+#endif
+ } else {
+#if (LCD_BPP == 24)
+ fbinfo->var.red.offset = 16;
+ fbinfo->var.green.offset = 8;
+ fbinfo->var.blue.offset = 0;
+#else
+ fbinfo->var.red.offset = 11;
+ fbinfo->var.green.offset = 5;
+ fbinfo->var.blue.offset = 0;
+#endif
+ }
+
+ fbinfo->var.transp.offset = 0;
+
+#if (LCD_BPP == 24)
+ fbinfo->var.red.length = 8;
+ fbinfo->var.green.length = 8;
+ fbinfo->var.blue.length = 8;
+#else
+ fbinfo->var.red.length = 5;
+ fbinfo->var.green.length = 6;
+ fbinfo->var.blue.length = 5;
+#endif
+
+ fbinfo->var.transp.length = 0;
+
+ fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8
+ + ACTIVE_VIDEO_MEM_OFFSET;
+
+ fbinfo->fix.line_length = fbinfo->var.xres_virtual *
+ fbinfo->var.bits_per_pixel / 8;
+
+
+ fbinfo->fbops = &bfin_lq035q1_fb_ops;
+ fbinfo->flags = FBINFO_FLAG_DEFAULT;
+
+ info->fb_buffer =
+ dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
+ GFP_KERNEL);
+
+ if (NULL == info->fb_buffer) {
+ dev_err(&pdev->dev, "couldn't allocate dma buffer\n");
+ ret = -ENOMEM;
+ goto out3;
+ }
+
+ fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
+ fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
+
+ fbinfo->fbops = &bfin_lq035q1_fb_ops;
+
+ fbinfo->pseudo_palette = &info->pseudo_pal;
+
+ ret = fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate colormap (%d entries)\n",
+ BFIN_LCD_NBR_PALETTE_ENTRIES);
+ goto out4;
+ }
+
+ ret = bfin_lq035q1_request_ports(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request gpio port\n");
+ goto out6;
+ }
+
+ info->irq = platform_get_irq(pdev, 0);
+ if (info->irq < 0) {
+ ret = -EINVAL;
+ goto out7;
+ }
+
+ ret = request_irq(info->irq, bfin_lq035q1_irq_error, IRQF_DISABLED,
+ DRIVER_NAME" PPI ERROR", info);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to request PPI ERROR IRQ\n");
+ goto out7;
+ }
+
+ info->spidrv.driver.name = DRIVER_NAME"-spi";
+ info->spidrv.probe = lq035q1_spidev_probe;
+ info->spidrv.remove = __devexit_p(lq035q1_spidev_remove);
+ info->spidrv.shutdown = lq035q1_spidev_shutdown;
+ info->spidrv.suspend = lq035q1_spidev_suspend;
+ info->spidrv.resume = lq035q1_spidev_resume;
+
+ ret = spi_register_driver(&info->spidrv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't register SPI Interface\n");
+ goto out8;
+ }
+
+ if (info->disp_info->use_bl) {
+ ret = gpio_request(info->disp_info->gpio_bl, "LQ035 Backlight");
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request GPIO %d\n",
+ info->disp_info->gpio_bl);
+ goto out9;
+ }
+ gpio_direction_output(info->disp_info->gpio_bl, 0);
+ }
+
+ ret = register_framebuffer(fbinfo);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register framebuffer\n");
+ goto out10;
+ }
+
+ dev_info(&pdev->dev, "%dx%d %d-bit RGB FrameBuffer initialized\n",
+ LCD_X_RES, LCD_Y_RES, LCD_BPP);
+
+ return 0;
+
+ out10:
+ if (info->disp_info->use_bl)
+ gpio_free(info->disp_info->gpio_bl);
+ out9:
+ spi_unregister_driver(&info->spidrv);
+ out8:
+ free_irq(info->irq, info);
+ out7:
+ bfin_lq035q1_free_ports();
+ out6:
+ fb_dealloc_cmap(&fbinfo->cmap);
+ out4:
+ dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
+ info->dma_handle);
+ out3:
+ framebuffer_release(fbinfo);
+ out2:
+ free_dma(CH_PPI);
+ out1:
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int __devexit bfin_lq035q1_remove(struct platform_device *pdev)
+{
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->disp_info->use_bl)
+ gpio_free(info->disp_info->gpio_bl);
+
+ spi_unregister_driver(&info->spidrv);
+
+ unregister_framebuffer(fbinfo);
+
+ free_dma(CH_PPI);
+ free_irq(info->irq, info);
+
+ if (info->fb_buffer != NULL)
+ dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
+ info->dma_handle);
+
+ fb_dealloc_cmap(&fbinfo->cmap);
+
+ bfin_lq035q1_free_ports();
+
+ platform_set_drvdata(pdev, NULL);
+ framebuffer_release(fbinfo);
+
+ dev_info(&pdev->dev, "unregistered LCD driver\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_lq035q1_suspend(struct device *dev)
+{
+ struct fb_info *fbinfo = dev_get_drvdata(dev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->lq035_open_cnt) {
+ lq035q1_backlight(info, 0);
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+ disable_dma(CH_PPI);
+ bfin_lq035q1_stop_timers();
+ bfin_write_PPI_STATUS(-1);
+ }
+
+ return 0;
+}
+
+static int bfin_lq035q1_resume(struct device *dev)
+{
+ struct fb_info *fbinfo = dev_get_drvdata(dev);
+ struct bfin_lq035q1fb_info *info = fbinfo->par;
+
+ if (info->lq035_open_cnt) {
+ bfin_lq035q1_disable_ppi();
+ SSYNC();
+
+ bfin_lq035q1_config_dma(info);
+ bfin_lq035q1_config_ppi(info);
+ bfin_lq035q1_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_lq035q1_enable_ppi();
+ bfin_lq035q1_start_timers();
+ lq035q1_backlight(info, 1);
+ }
+
+ return 0;
+}
+
+static struct dev_pm_ops bfin_lq035q1_dev_pm_ops = {
+ .suspend = bfin_lq035q1_suspend,
+ .resume = bfin_lq035q1_resume,
+};
+#endif
+
+static struct platform_driver bfin_lq035q1_driver = {
+ .probe = bfin_lq035q1_probe,
+ .remove = __devexit_p(bfin_lq035q1_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+#ifdef CONFIG_PM
+ .pm = &bfin_lq035q1_dev_pm_ops,
+#endif
+ },
+};
+
+static int __init bfin_lq035q1_driver_init(void)
+{
+ return platform_driver_register(&bfin_lq035q1_driver);
+}
+module_init(bfin_lq035q1_driver_init);
+
+static void __exit bfin_lq035q1_driver_cleanup(void)
+{
+ platform_driver_unregister(&bfin_lq035q1_driver);
+}
+module_exit(bfin_lq035q1_driver_cleanup);
+
+MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 5cc36cfbf07..2549c53b26a 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -487,8 +487,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
fbinfo->var.nonstd = 0;
fbinfo->var.activate = FB_ACTIVATE_NOW;
- fbinfo->var.height = -1;
- fbinfo->var.width = -1;
+ fbinfo->var.height = 53;
+ fbinfo->var.width = 70;
fbinfo->var.accel_flags = 0;
fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
@@ -634,17 +634,35 @@ static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
{
- bfin_t350mcqb_disable_ppi();
- disable_dma(CH_PPI);
- bfin_write_PPI_STATUS(0xFFFF);
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_t350mcqbfb_info *fbi = fbinfo->par;
+
+ if (fbi->lq043_open_cnt) {
+ bfin_t350mcqb_disable_ppi();
+ disable_dma(CH_PPI);
+ bfin_t350mcqb_stop_timers();
+ bfin_write_PPI_STATUS(-1);
+ }
+
return 0;
}
static int bfin_t350mcqb_resume(struct platform_device *pdev)
{
- enable_dma(CH_PPI);
- bfin_t350mcqb_enable_ppi();
+ struct fb_info *fbinfo = platform_get_drvdata(pdev);
+ struct bfin_t350mcqbfb_info *fbi = fbinfo->par;
+
+ if (fbi->lq043_open_cnt) {
+ bfin_t350mcqb_config_dma(fbi);
+ bfin_t350mcqb_config_ppi(fbi);
+ bfin_t350mcqb_init_timers();
+
+ /* start dma */
+ enable_dma(CH_PPI);
+ bfin_t350mcqb_enable_ppi();
+ bfin_t350mcqb_start_timers();
+ }
return 0;
}
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c
index 16f5db471ab..99b354b8e25 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/clps711xfb.c
@@ -19,8 +19,10 @@
*
* Framebuffer driver for the CLPS7111 and EP7212 processors.
*/
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/init.h>
@@ -38,14 +40,6 @@ struct fb_info *cfb;
#define CMAP_MAX_SIZE 16
-/* The /proc entry for the backlight. */
-static struct proc_dir_entry *clps7111fb_backlight_proc_entry = NULL;
-
-static int clps7111fb_proc_backlight_read(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-static int clps7111fb_proc_backlight_write(struct file *file,
- const char *buffer, unsigned long count, void *data);
-
/*
* LCD AC Prescale. This comes from the LCD panel manufacturers specifications.
* This determines how many clocks + 1 of CL1 before the M signal toggles.
@@ -221,26 +215,23 @@ static struct fb_ops clps7111fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int
-clps7111fb_proc_backlight_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int backlight_proc_show(struct seq_file *m, void *v)
{
- /* We need at least two characters, one for the digit, and one for
- * the terminating NULL. */
- if (count < 2)
- return -EINVAL;
-
if (machine_is_edb7211()) {
- return sprintf(page, "%d\n",
+ seq_printf(m, "%d\n",
(clps_readb(PDDR) & EDB_PD3_LCDBL) ? 1 : 0);
}
return 0;
}
-static int
-clps7111fb_proc_backlight_write(struct file *file, const char *buffer,
- unsigned long count, void *data)
+static int backlight_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, backlight_proc_show, NULL);
+}
+
+static ssize_t backlight_proc_write(struct file *file, const char *buffer,
+ size_t count, loff_t *pos)
{
unsigned char char_value;
int value;
@@ -271,6 +262,15 @@ clps7111fb_proc_backlight_write(struct file *file, const char *buffer,
return count;
}
+static const struct file_operations backlight_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = backlight_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = backlight_proc_write,
+};
+
static void __init clps711x_guess_lcd_params(struct fb_info *info)
{
unsigned int lcdcon, syscon, size;
@@ -379,19 +379,11 @@ int __init clps711xfb_init(void)
fb_alloc_cmap(&cfb->cmap, CMAP_MAX_SIZE, 0);
- /* Register the /proc entries. */
- clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444,
- NULL);
- if (clps7111fb_backlight_proc_entry == NULL) {
+ if (!proc_create("backlight", 0444, NULL, &backlight_proc_fops)) {
printk("Couldn't create the /proc entry for the backlight.\n");
return -EINVAL;
}
- clps7111fb_backlight_proc_entry->read_proc =
- &clps7111fb_proc_backlight_read;
- clps7111fb_backlight_proc_entry->write_proc =
- &clps7111fb_proc_backlight_write;
-
/*
* Power up the LCD
*/
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index ea1fd3f4751..369a5b3ac64 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -28,6 +28,8 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/console.h>
#include <video/da8xx-fb.h>
#define DRIVER_NAME "da8xx_lcdc"
@@ -113,6 +115,12 @@ struct da8xx_fb_par {
unsigned short pseudo_palette[16];
unsigned int databuf_sz;
unsigned int palette_sz;
+ unsigned int pxl_clk;
+ int blank;
+#ifdef CONFIG_CPU_FREQ
+ struct notifier_block freq_transition;
+#endif
+ void (*panel_power_ctrl)(int);
};
/* Variable Screen Information */
@@ -155,7 +163,7 @@ struct da8xx_panel {
int vfp; /* Vertical front porch */
int vbp; /* Vertical back porch */
int vsw; /* Vertical Sync Pulse Width */
- int pxl_clk; /* Pixel clock */
+ unsigned int pxl_clk; /* Pixel clock */
unsigned char invert_pxl_clk; /* Invert Pixel clock */
};
@@ -171,7 +179,7 @@ static struct da8xx_panel known_lcd_panels[] = {
.vfp = 2,
.vbp = 2,
.vsw = 0,
- .pxl_clk = 0x10,
+ .pxl_clk = 4608000,
.invert_pxl_clk = 1,
},
/* Sharp LK043T1DG01 */
@@ -185,13 +193,23 @@ static struct da8xx_panel known_lcd_panels[] = {
.vfp = 2,
.vbp = 2,
.vsw = 10,
- .pxl_clk = 0x12,
+ .pxl_clk = 7833600,
.invert_pxl_clk = 0,
},
};
+/* Enable the Raster Engine of the LCD Controller */
+static inline void lcd_enable_raster(void)
+{
+ u32 reg;
+
+ reg = lcdc_read(LCD_RASTER_CTRL_REG);
+ if (!(reg & LCD_RASTER_ENABLE))
+ lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+}
+
/* Disable the Raster Engine of the LCD Controller */
-static void lcd_disable_raster(struct da8xx_fb_par *par)
+static inline void lcd_disable_raster(void)
{
u32 reg;
@@ -443,14 +461,25 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
static void lcd_reset(struct da8xx_fb_par *par)
{
/* Disable the Raster if previously Enabled */
- if (lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE)
- lcd_disable_raster(par);
+ lcd_disable_raster();
/* DMA has to be disabled */
lcdc_write(0, LCD_DMA_CTRL_REG);
lcdc_write(0, LCD_RASTER_CTRL_REG);
}
+static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
+{
+ unsigned int lcd_clk, div;
+
+ lcd_clk = clk_get_rate(par->lcdc_clk);
+ div = lcd_clk / par->pxl_clk;
+
+ /* Configure the LCD clock divisor. */
+ lcdc_write(LCD_CLK_DIVISOR(div) |
+ (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
+}
+
static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
struct da8xx_panel *panel)
{
@@ -459,9 +488,8 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
lcd_reset(par);
- /* Configure the LCD clock divisor. */
- lcdc_write(LCD_CLK_DIVISOR(panel->pxl_clk) |
- (LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
+ /* Calculate the divider */
+ lcd_calc_clk_divider(par);
if (panel->invert_pxl_clk)
lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) |
@@ -513,13 +541,11 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
static irqreturn_t lcdc_irq_handler(int irq, void *arg)
{
u32 stat = lcdc_read(LCD_STAT_REG);
- u32 reg;
if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
- reg = lcdc_read(LCD_RASTER_CTRL_REG);
- lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ lcd_disable_raster();
lcdc_write(stat, LCD_STAT_REG);
- lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ lcd_enable_raster();
} else
lcdc_write(stat, LCD_STAT_REG);
@@ -574,6 +600,38 @@ static int fb_check_var(struct fb_var_screeninfo *var,
return err;
}
+#ifdef CONFIG_CPU_FREQ
+static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct da8xx_fb_par *par;
+
+ par = container_of(nb, struct da8xx_fb_par, freq_transition);
+ if (val == CPUFREQ_PRECHANGE) {
+ lcd_disable_raster();
+ } else if (val == CPUFREQ_POSTCHANGE) {
+ lcd_calc_clk_divider(par);
+ lcd_enable_raster();
+ }
+
+ return 0;
+}
+
+static inline int lcd_da8xx_cpufreq_register(struct da8xx_fb_par *par)
+{
+ par->freq_transition.notifier_call = lcd_da8xx_cpufreq_transition;
+
+ return cpufreq_register_notifier(&par->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
+{
+ cpufreq_unregister_notifier(&par->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+#endif
+
static int __devexit fb_remove(struct platform_device *dev)
{
struct fb_info *info = dev_get_drvdata(&dev->dev);
@@ -581,8 +639,13 @@ static int __devexit fb_remove(struct platform_device *dev)
if (info) {
struct da8xx_fb_par *par = info->par;
- if (lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE)
- lcd_disable_raster(par);
+#ifdef CONFIG_CPU_FREQ
+ lcd_da8xx_cpufreq_deregister(par);
+#endif
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(0);
+
+ lcd_disable_raster();
lcdc_write(0, LCD_RASTER_CTRL_REG);
/* disable DMA */
@@ -639,6 +702,35 @@ static int fb_ioctl(struct fb_info *info, unsigned int cmd,
return 0;
}
+static int cfb_blank(int blank, struct fb_info *info)
+{
+ struct da8xx_fb_par *par = info->par;
+ int ret = 0;
+
+ if (par->blank == blank)
+ return 0;
+
+ par->blank = blank;
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(1);
+
+ lcd_enable_raster();
+ break;
+ case FB_BLANK_POWERDOWN:
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(0);
+
+ lcd_disable_raster();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static struct fb_ops da8xx_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = fb_check_var,
@@ -647,6 +739,7 @@ static struct fb_ops da8xx_fb_ops = {
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_blank = cfb_blank,
};
static int __init fb_probe(struct platform_device *device)
@@ -721,6 +814,12 @@ static int __init fb_probe(struct platform_device *device)
}
par = da8xx_fb_info->par;
+ par->lcdc_clk = fb_clk;
+ par->pxl_clk = lcdc_info->pxl_clk;
+ if (fb_pdata->panel_power_ctrl) {
+ par->panel_power_ctrl = fb_pdata->panel_power_ctrl;
+ par->panel_power_ctrl(1);
+ }
if (lcd_init(par, lcd_cfg, lcdc_info) < 0) {
dev_err(&device->dev, "lcd_init failed\n");
@@ -754,8 +853,6 @@ static int __init fb_probe(struct platform_device *device)
da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz;
da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
- par->lcdc_clk = fb_clk;
-
par->irq = platform_get_irq(device, 0);
if (par->irq < 0) {
ret = -ENOENT;
@@ -814,12 +911,24 @@ static int __init fb_probe(struct platform_device *device)
goto err_dealloc_cmap;
}
+#ifdef CONFIG_CPU_FREQ
+ ret = lcd_da8xx_cpufreq_register(par);
+ if (ret) {
+ dev_err(&device->dev, "failed to register cpufreq\n");
+ goto err_cpu_freq;
+ }
+#endif
+
/* enable raster engine */
- lcdc_write(lcdc_read(LCD_RASTER_CTRL_REG) |
- LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
+ lcd_enable_raster();
return 0;
+#ifdef CONFIG_CPU_FREQ
+err_cpu_freq:
+ unregister_framebuffer(da8xx_fb_info);
+#endif
+
err_dealloc_cmap:
fb_dealloc_cmap(&da8xx_fb_info->cmap);
@@ -852,11 +961,35 @@ err_request_mem:
#ifdef CONFIG_PM
static int fb_suspend(struct platform_device *dev, pm_message_t state)
{
- return -EBUSY;
+ struct fb_info *info = platform_get_drvdata(dev);
+ struct da8xx_fb_par *par = info->par;
+
+ acquire_console_sem();
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(0);
+
+ fb_set_suspend(info, 1);
+ lcd_disable_raster();
+ clk_disable(par->lcdc_clk);
+ release_console_sem();
+
+ return 0;
}
static int fb_resume(struct platform_device *dev)
{
- return -EBUSY;
+ struct fb_info *info = platform_get_drvdata(dev);
+ struct da8xx_fb_par *par = info->par;
+
+ acquire_console_sem();
+ if (par->panel_power_ctrl)
+ par->panel_power_ctrl(1);
+
+ clk_enable(par->lcdc_clk);
+ lcd_enable_raster();
+ fb_set_suspend(info, 0);
+ release_console_sem();
+
+ return 0;
}
#else
#define fb_suspend NULL
diff --git a/drivers/video/display/display-sysfs.c b/drivers/video/display/display-sysfs.c
index 4830b1bf51e..80abbf323b9 100644
--- a/drivers/video/display/display-sysfs.c
+++ b/drivers/video/display/display-sysfs.c
@@ -67,7 +67,7 @@ static ssize_t display_store_contrast(struct device *dev,
contrast = simple_strtoul(buf, &endp, 0);
size = endp - buf;
- if (*endp && isspace(*endp))
+ if (isspace(*endp))
size++;
if (size != count)
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index bd9d46f9529..27aab4a0619 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -358,6 +358,8 @@ static int ep93xxfb_setcolreg(unsigned int regno, unsigned int red,
switch (info->fix.visual) {
case FB_VISUAL_PSEUDOCOLOR:
+ if (regno > 255)
+ return 1;
rgb = ((red & 0xff00) << 8) | (green & 0xff00) |
((blue & 0xff00) >> 8);
diff --git a/drivers/video/geode/display_gx.c b/drivers/video/geode/display_gx.c
index e759895bf3d..f0af911a096 100644
--- a/drivers/video/geode/display_gx.c
+++ b/drivers/video/geode/display_gx.c
@@ -17,7 +17,7 @@
#include <asm/io.h>
#include <asm/div64.h>
#include <asm/delay.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include "gxfb.h"
@@ -25,7 +25,7 @@ unsigned int gx_frame_buffer_size(void)
{
unsigned int val;
- if (!geode_has_vsa2()) {
+ if (!cs5535_has_vsa2()) {
uint32_t hi, lo;
/* The number of pages is (PMAX - PMIN)+1 */
diff --git a/drivers/video/geode/gxfb.h b/drivers/video/geode/gxfb.h
index 16a96f8fd8c..d19e9378b0c 100644
--- a/drivers/video/geode/gxfb.h
+++ b/drivers/video/geode/gxfb.h
@@ -340,7 +340,7 @@ static inline void write_fp(struct gxfb_par *par, int reg, uint32_t val)
}
-/* MSRs are defined in asm/geode.h; their bitfields are here */
+/* MSRs are defined in linux/cs5535.h; their bitfields are here */
#define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 (1 << 3)
#define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2 (1 << 2)
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 2552cac39e1..b3e639d1e12 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -32,7 +32,7 @@
#include <linux/suspend.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include "gxfb.h"
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index 6a51448fd3f..cc781c00f75 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -1,3 +1,13 @@
+/* Geode LX framebuffer driver
+ *
+ * Copyright (C) 2006-2007, Advanced Micro Devices,Inc.
+ * Copyright (c) 2008 Andres Salomon <dilinger@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
#ifndef _LXFB_H_
#define _LXFB_H_
@@ -409,7 +419,7 @@ static inline void write_fp(struct lxfb_par *par, int reg, uint32_t val)
}
-/* MSRs are defined in asm/geode.h; their bitfields are here */
+/* MSRs are defined in linux/cs5535.h; their bitfields are here */
#define MSR_GLCP_DOTPLL_LOCK (1 << 25) /* r/o */
#define MSR_GLCP_DOTPLL_HALFPIX (1 << 24)
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c
index b1cd49c9935..0e5d8c7c3eb 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/geode/lxfb_ops.c
@@ -13,7 +13,7 @@
#include <linux/fb.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include "lxfb.h"
@@ -307,7 +307,7 @@ unsigned int lx_framebuffer_size(void)
{
unsigned int val;
- if (!geode_has_vsa2()) {
+ if (!cs5535_has_vsa2()) {
uint32_t hi, lo;
/* The number of pages is (PMAX - PMIN)+1 */
diff --git a/drivers/video/geode/suspend_gx.c b/drivers/video/geode/suspend_gx.c
index 9aff32ef8bb..1bb043d70c6 100644
--- a/drivers/video/geode/suspend_gx.c
+++ b/drivers/video/geode/suspend_gx.c
@@ -10,7 +10,7 @@
#include <linux/fb.h>
#include <asm/io.h>
#include <asm/msr.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include <asm/delay.h>
#include "gxfb.h"
diff --git a/drivers/video/geode/video_gx.c b/drivers/video/geode/video_gx.c
index b8d52a8360d..6082f653c68 100644
--- a/drivers/video/geode/video_gx.c
+++ b/drivers/video/geode/video_gx.c
@@ -16,7 +16,7 @@
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/msr.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#include "gxfb.h"
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index e7116a6d82d..73c83a8de2d 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -456,7 +456,7 @@ static int hitfb_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops hitfb_dev_pm_ops = {
+static const struct dev_pm_ops hitfb_dev_pm_ops = {
.suspend = hitfb_suspend,
.resume = hitfb_resume,
};
diff --git a/drivers/video/i810/i810_dvt.c b/drivers/video/i810/i810_dvt.c
index 27fa703a2e0..b4b3670667a 100644
--- a/drivers/video/i810/i810_dvt.c
+++ b/drivers/video/i810/i810_dvt.c
@@ -212,24 +212,29 @@ inline void round_off_yres(u32 *xres, u32 *yres)
*yres = (*xres * 3) >> 2;
}
-void i810fb_encode_registers(const struct fb_var_screeninfo *var,
- struct i810fb_par *par, u32 xres, u32 yres)
+static int i810fb_find_best_mode(u32 xres, u32 yres, u32 pixclock)
{
u32 diff = 0, diff_best = 0xFFFFFFFF, i = 0, i_best = 0;
- u8 hfl;
+ u8 hfl = (u8) ((xres >> 3) - 1);
- hfl = (u8) ((xres >> 3) - 1);
for (i = 0; i < ARRAY_SIZE(std_modes); i++) {
if (std_modes[i].cr01 == hfl) {
- if (std_modes[i].pixclock <= par->regs.pixclock)
- diff = par->regs.pixclock -
- std_modes[i].pixclock;
+ if (std_modes[i].pixclock <= pixclock)
+ diff = pixclock - std_modes[i].pixclock;
if (diff < diff_best) {
i_best = i;
diff_best = diff;
}
}
}
+ return i_best;
+}
+
+void i810fb_encode_registers(const struct fb_var_screeninfo *var,
+ struct i810fb_par *par, u32 xres, u32 yres)
+{
+ u32 i_best = i810fb_find_best_mode(xres, yres, par->regs.pixclock);
+
par->regs = std_modes[i_best];
/* overlay */
@@ -239,36 +244,36 @@ void i810fb_encode_registers(const struct fb_var_screeninfo *var,
void i810fb_fill_var_timings(struct fb_var_screeninfo *var)
{
- struct i810fb_par par;
u32 total, xres, yres;
+ u32 mode, pixclock;
xres = var->xres;
yres = var->yres;
- par.regs.pixclock = 1000000000/var->pixclock;
- i810fb_encode_registers(var, &par, xres, yres);
+ pixclock = 1000000000 / var->pixclock;
+ mode = i810fb_find_best_mode(xres, yres, pixclock);
- total = ((par.regs.cr00 | (par.regs.cr35 & 1) << 8) + 3) << 3;
+ total = (std_modes[mode].cr00 | (std_modes[mode].cr35 & 1) << 8) + 3;
+ total <<= 3;
- var->pixclock = 1000000000/par.regs.pixclock;
- var->right_margin = (par.regs.cr04 << 3) - xres;
- var->hsync_len = ((par.regs.cr05 & 0x1F) -
- (par.regs.cr04 & 0x1F)) << 3;
+ var->pixclock = 1000000000 / std_modes[mode].pixclock;
+ var->right_margin = (std_modes[mode].cr04 << 3) - xres;
+ var->hsync_len = ((std_modes[mode].cr05 & 0x1F) -
+ (std_modes[mode].cr04 & 0x1F)) << 3;
var->left_margin = (total - (xres + var->right_margin +
var->hsync_len));
var->sync = FB_SYNC_ON_GREEN;
- if (~(par.regs.msr & (1 << 6)))
+ if (~(std_modes[mode].msr & (1 << 6)))
var->sync |= FB_SYNC_HOR_HIGH_ACT;
- if (~(par.regs.msr & (1 << 7)))
+ if (~(std_modes[mode].msr & (1 << 7)))
var->sync |= FB_SYNC_VERT_HIGH_ACT;
-
- total = ((par.regs.cr06 | (par.regs.cr30 & 0x0F) << 8)) + 2;
- var->lower_margin = (par.regs.cr10 |
- (par.regs.cr32 & 0x0F) << 8) - yres;
- var->vsync_len = (par.regs.cr11 & 0x0F) - (var->lower_margin & 0x0F);
- var->upper_margin = total - (yres + var->lower_margin +
- var->vsync_len);
+ total = (std_modes[mode].cr06 | (std_modes[mode].cr30 & 0xF) << 8) + 2;
+ var->lower_margin = (std_modes[mode].cr10 |
+ (std_modes[mode].cr32 & 0x0F) << 8) - yres;
+ var->vsync_len = (std_modes[mode].cr11 & 0x0F) -
+ (var->lower_margin & 0x0F);
+ var->upper_margin = total - (yres + var->lower_margin + var->vsync_len);
}
u32 i810_get_watermark(struct fb_var_screeninfo *var,
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 0cafd642fbc..5ba39999105 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -874,6 +874,9 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
if (bailearly == 18)
bailout(dinfo);
+ /* read active pipe */
+ dinfo->pipe = intelfbhw_active_pipe(&dinfo->save_state);
+
/* Cursor initialisation */
if (dinfo->hwcursor) {
intelfbhw_cursor_init(dinfo);
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c
index 0689f97c523..81627466804 100644
--- a/drivers/video/intelfb/intelfbhw.c
+++ b/drivers/video/intelfb/intelfbhw.c
@@ -469,6 +469,32 @@ void intelfbhw_do_blank(int blank, struct fb_info *info)
}
+/* Check which pipe is connected to an active display plane. */
+int intelfbhw_active_pipe(const struct intelfb_hwstate *hw)
+{
+ int pipe = -1;
+
+ /* keep old default behaviour - prefer PIPE_A */
+ if (hw->disp_b_ctrl & DISPPLANE_PLANE_ENABLE) {
+ pipe = (hw->disp_b_ctrl >> DISPPLANE_SEL_PIPE_SHIFT);
+ pipe &= PIPE_MASK;
+ if (unlikely(pipe == PIPE_A))
+ return PIPE_A;
+ }
+ if (hw->disp_a_ctrl & DISPPLANE_PLANE_ENABLE) {
+ pipe = (hw->disp_a_ctrl >> DISPPLANE_SEL_PIPE_SHIFT);
+ pipe &= PIPE_MASK;
+ if (likely(pipe == PIPE_A))
+ return PIPE_A;
+ }
+ /* Impossible that no pipe is selected - return PIPE_A */
+ WARN_ON(pipe == -1);
+ if (unlikely(pipe == -1))
+ pipe = PIPE_A;
+
+ return pipe;
+}
+
void intelfbhw_setcolreg(struct intelfb_info *dinfo, unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp)
@@ -1019,7 +1045,7 @@ int intelfbhw_mode_to_hw(struct intelfb_info *dinfo,
struct intelfb_hwstate *hw,
struct fb_var_screeninfo *var)
{
- int pipe = PIPE_A;
+ int pipe = intelfbhw_active_pipe(hw);
u32 *dpll, *fp0, *fp1;
u32 m1, m2, n, p1, p2, clock_target, clock;
u32 hsync_start, hsync_end, hblank_start, hblank_end, htotal, hactive;
@@ -1033,12 +1059,6 @@ int intelfbhw_mode_to_hw(struct intelfb_info *dinfo,
/* Disable VGA */
hw->vgacntrl |= VGA_DISABLE;
- /* Check whether pipe A or pipe B is enabled. */
- if (hw->pipe_a_conf & PIPECONF_ENABLE)
- pipe = PIPE_A;
- else if (hw->pipe_b_conf & PIPECONF_ENABLE)
- pipe = PIPE_B;
-
/* Set which pipe's registers will be set. */
if (pipe == PIPE_B) {
dpll = &hw->dpll_b;
@@ -1262,7 +1282,6 @@ int intelfbhw_mode_to_hw(struct intelfb_info *dinfo,
int intelfbhw_program_mode(struct intelfb_info *dinfo,
const struct intelfb_hwstate *hw, int blank)
{
- int pipe = PIPE_A;
u32 tmp;
const u32 *dpll, *fp0, *fp1, *pipe_conf;
const u32 *hs, *ht, *hb, *vs, *vt, *vb, *ss;
@@ -1272,7 +1291,7 @@ int intelfbhw_program_mode(struct intelfb_info *dinfo,
u32 src_size_reg;
u32 count, tmp_val[3];
- /* Assume single pipe, display plane A, analog CRT. */
+ /* Assume single pipe */
#if VERBOSE > 0
DBG_MSG("intelfbhw_program_mode\n");
@@ -1283,15 +1302,9 @@ int intelfbhw_program_mode(struct intelfb_info *dinfo,
tmp |= VGA_DISABLE;
OUTREG(VGACNTRL, tmp);
- /* Check whether pipe A or pipe B is enabled. */
- if (hw->pipe_a_conf & PIPECONF_ENABLE)
- pipe = PIPE_A;
- else if (hw->pipe_b_conf & PIPECONF_ENABLE)
- pipe = PIPE_B;
-
- dinfo->pipe = pipe;
+ dinfo->pipe = intelfbhw_active_pipe(hw);
- if (pipe == PIPE_B) {
+ if (dinfo->pipe == PIPE_B) {
dpll = &hw->dpll_b;
fp0 = &hw->fpb0;
fp1 = &hw->fpb1;
diff --git a/drivers/video/intelfb/intelfbhw.h b/drivers/video/intelfb/intelfbhw.h
index 0b076bac321..216ca20f259 100644
--- a/drivers/video/intelfb/intelfbhw.h
+++ b/drivers/video/intelfb/intelfbhw.h
@@ -604,5 +604,6 @@ extern void intelfbhw_cursor_reset(struct intelfb_info *dinfo);
extern int intelfbhw_enable_irq(struct intelfb_info *dinfo);
extern void intelfbhw_disable_irq(struct intelfb_info *dinfo);
extern int intelfbhw_wait_for_vsync(struct intelfb_info *dinfo, u32 pipe);
+extern int intelfbhw_active_pipe(const struct intelfb_hwstate *hw);
#endif /* _INTELFBHW_H */
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
index 09f6e045d5b..c15f8a57498 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/matrox/g450_pll.c
@@ -368,7 +368,8 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
M1064_XDVICLKCTRL_C1DVICLKEN |
M1064_XDVICLKCTRL_DVILOOPCTL |
M1064_XDVICLKCTRL_P1LOOPBWDTCTL;
- matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp);
+ /* Setting this breaks PC systems so don't do it */
+ /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */
matroxfb_DAC_out(minfo, M1064_XPWRCTRL,
xpwrctrl);
diff --git a/drivers/video/maxinefb.c b/drivers/video/maxinefb.c
index 5e91c2b30af..7854c7a37dc 100644
--- a/drivers/video/maxinefb.c
+++ b/drivers/video/maxinefb.c
@@ -92,6 +92,9 @@ static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
/* value to be written into the palette reg. */
unsigned long hw_colorvalue = 0;
+ if (regno > 255)
+ return 1;
+
red >>= 8; /* The cmap fields are 16 bits */
green >>= 8; /* wide, but the harware colormap */
blue >>= 8; /* registers are only 8 bits wide */
diff --git a/drivers/video/mb862xx/Makefile b/drivers/video/mb862xx/Makefile
index 07664814bb1..d7777714166 100644
--- a/drivers/video/mb862xx/Makefile
+++ b/drivers/video/mb862xx/Makefile
@@ -2,4 +2,4 @@
# Makefile for the MB862xx framebuffer driver
#
-obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o
+obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o mb862xxfb_accel.o
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index a28e3cfbbf7..fabb0c59a21 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -214,6 +214,8 @@ static int mb862xxfb_set_par(struct fb_info *fbi)
unsigned long reg, sc;
dev_dbg(par->dev, "%s\n", __func__);
+ if (par->type == BT_CORALP)
+ mb862xxfb_init_accel(fbi, fbi->var.xres);
if (par->pre_init)
return 0;
@@ -453,6 +455,18 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
ptr += sprintf(ptr, "%08x = %08x\n",
reg, inreg(disp, reg));
+ for (reg = 0x400; reg <= 0x410; reg += 4)
+ ptr += sprintf(ptr, "geo %08x = %08x\n",
+ reg, inreg(geo, reg));
+
+ for (reg = 0x400; reg <= 0x410; reg += 4)
+ ptr += sprintf(ptr, "draw %08x = %08x\n",
+ reg, inreg(draw, reg));
+
+ for (reg = 0x440; reg <= 0x450; reg += 4)
+ ptr += sprintf(ptr, "draw %08x = %08x\n",
+ reg, inreg(draw, reg));
+
return ptr - buf;
}
diff --git a/drivers/video/mb862xx/mb862xxfb.h b/drivers/video/mb862xx/mb862xxfb.h
index c4c8f4dd221..d7e7cb76bbf 100644
--- a/drivers/video/mb862xx/mb862xxfb.h
+++ b/drivers/video/mb862xx/mb862xxfb.h
@@ -61,6 +61,8 @@ struct mb862xxfb_par {
u32 pseudo_palette[16];
};
+extern void mb862xxfb_init_accel(struct fb_info *info, int xres);
+
#if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC)
#error "Select Lime GDC or CoralP/Carmine support, but not both together"
#endif
diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
new file mode 100644
index 00000000000..049256052b1
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
@@ -0,0 +1,331 @@
+/*
+ * drivers/mb862xx/mb862xxfb_accel.c
+ *
+ * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver acceleration support
+ *
+ * (C) 2007 Alexander Shishkin <virtuoso@slind.org>
+ * (C) 2009 Valentin Sitdikov <valentin.sitdikov@siemens.com>
+ * (C) 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#if defined(CONFIG_OF)
+#include <linux/of_platform.h>
+#endif
+#include "mb862xxfb.h"
+#include "mb862xx_reg.h"
+#include "mb862xxfb_accel.h"
+
+static void mb862xxfb_write_fifo(u32 count, u32 *data, struct fb_info *info)
+{
+ struct mb862xxfb_par *par = info->par;
+ static u32 free;
+
+ u32 total = 0;
+ while (total < count) {
+ if (free) {
+ outreg(geo, GDC_GEO_REG_INPUT_FIFO, data[total]);
+ total++;
+ free--;
+ } else {
+ free = (u32) inreg(draw, GDC_REG_FIFO_COUNT);
+ }
+ }
+}
+
+static void mb86290fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ __u32 cmd[6];
+
+ cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
+ /* Set raster operation */
+ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
+ cmd[2] = GDC_TYPE_BLTCOPYP << 24;
+
+ if (area->sx >= area->dx && area->sy >= area->dy)
+ cmd[2] |= GDC_CMD_BLTCOPY_TOP_LEFT << 16;
+ else if (area->sx >= area->dx && area->sy <= area->dy)
+ cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_LEFT << 16;
+ else if (area->sx <= area->dx && area->sy >= area->dy)
+ cmd[2] |= GDC_CMD_BLTCOPY_TOP_RIGHT << 16;
+ else
+ cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_RIGHT << 16;
+
+ cmd[3] = (area->sy << 16) | area->sx;
+ cmd[4] = (area->dy << 16) | area->dx;
+ cmd[5] = (area->height << 16) | area->width;
+ mb862xxfb_write_fifo(6, cmd, info);
+}
+
+/*
+ * Fill in the cmd array /GDC FIFO commands/ to draw a 1bit image.
+ * Make sure cmd has enough room!
+ */
+static void mb86290fb_imageblit1(u32 *cmd, u16 step, u16 dx, u16 dy,
+ u16 width, u16 height, u32 fgcolor,
+ u32 bgcolor, const struct fb_image *image,
+ struct fb_info *info)
+{
+ int i;
+ unsigned const char *line;
+ u16 bytes;
+
+ /* set colors and raster operation regs */
+ cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
+ /* Set raster operation */
+ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
+ cmd[2] =
+ (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16);
+ cmd[3] = fgcolor;
+ cmd[4] =
+ (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_BACK_COLOR << 16);
+ cmd[5] = bgcolor;
+
+ i = 0;
+ line = image->data;
+ bytes = (image->width + 7) >> 3;
+
+ /* and the image */
+ cmd[6] = (GDC_TYPE_DRAWBITMAPP << 24) |
+ (GDC_CMD_BITMAP << 16) | (2 + (step * height));
+ cmd[7] = (dy << 16) | dx;
+ cmd[8] = (height << 16) | width;
+
+ while (i < height) {
+ memcpy(&cmd[9 + i * step], line, step << 2);
+#ifdef __LITTLE_ENDIAN
+ {
+ int k = 0;
+ for (k = 0; k < step; k++)
+ cmd[9 + i * step + k] =
+ cpu_to_be32(cmd[9 + i * step + k]);
+ }
+#endif
+ line += bytes;
+ i++;
+ }
+}
+
+/*
+ * Fill in the cmd array /GDC FIFO commands/ to draw a 8bit image.
+ * Make sure cmd has enough room!
+ */
+static void mb86290fb_imageblit8(u32 *cmd, u16 step, u16 dx, u16 dy,
+ u16 width, u16 height, u32 fgcolor,
+ u32 bgcolor, const struct fb_image *image,
+ struct fb_info *info)
+{
+ int i, j;
+ unsigned const char *line, *ptr;
+ u16 bytes;
+
+ cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) |
+ (GDC_CMD_BLT_DRAW << 16) | (2 + (height * step));
+ cmd[1] = (dy << 16) | dx;
+ cmd[2] = (height << 16) | width;
+
+ i = 0;
+ line = ptr = image->data;
+ bytes = image->width;
+
+ while (i < height) {
+ ptr = line;
+ for (j = 0; j < step; j++) {
+ cmd[3 + i * step + j] =
+ (((u32 *) (info->pseudo_palette))[*ptr]) & 0xffff;
+ ptr++;
+ cmd[3 + i * step + j] |=
+ ((((u32 *) (info->
+ pseudo_palette))[*ptr]) & 0xffff) << 16;
+ ptr++;
+ }
+
+ line += bytes;
+ i++;
+ }
+}
+
+/*
+ * Fill in the cmd array /GDC FIFO commands/ to draw a 16bit image.
+ * Make sure cmd has enough room!
+ */
+static void mb86290fb_imageblit16(u32 *cmd, u16 step, u16 dx, u16 dy,
+ u16 width, u16 height, u32 fgcolor,
+ u32 bgcolor, const struct fb_image *image,
+ struct fb_info *info)
+{
+ int i;
+ unsigned const char *line;
+ u16 bytes;
+
+ i = 0;
+ line = image->data;
+ bytes = image->width << 1;
+
+ cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) |
+ (GDC_CMD_BLT_DRAW << 16) | (2 + step * height);
+ cmd[1] = (dy << 16) | dx;
+ cmd[2] = (height << 16) | width;
+
+ while (i < height) {
+ memcpy(&cmd[3 + i * step], line, step);
+ line += bytes;
+ i++;
+ }
+}
+
+static void mb86290fb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ int mdr;
+ u32 *cmd = NULL;
+ void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32,
+ const struct fb_image *, struct fb_info *) = NULL;
+ u32 cmdlen;
+ u32 fgcolor = 0, bgcolor = 0;
+ u16 step;
+
+ u16 width = image->width, height = image->height;
+ u16 dx = image->dx, dy = image->dy;
+ int x2, y2, vxres, vyres;
+
+ mdr = (GDC_ROP_COPY << 9);
+ x2 = image->dx + image->width;
+ y2 = image->dy + image->height;
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+ x2 = min(x2, vxres);
+ y2 = min(y2, vyres);
+ width = x2 - dx;
+ height = y2 - dy;
+
+ switch (image->depth) {
+ case 1:
+ step = (width + 31) >> 5;
+ cmdlen = 9 + height * step;
+ cmdfn = mb86290fb_imageblit1;
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ fgcolor =
+ ((u32 *) (info->pseudo_palette))[image->fg_color];
+ bgcolor =
+ ((u32 *) (info->pseudo_palette))[image->bg_color];
+ } else {
+ fgcolor = image->fg_color;
+ bgcolor = image->bg_color;
+ }
+
+ break;
+
+ case 8:
+ step = (width + 1) >> 1;
+ cmdlen = 3 + height * step;
+ cmdfn = mb86290fb_imageblit8;
+ break;
+
+ case 16:
+ step = (width + 1) >> 1;
+ cmdlen = 3 + height * step;
+ cmdfn = mb86290fb_imageblit16;
+ break;
+
+ default:
+ cfb_imageblit(info, image);
+ return;
+ }
+
+ cmd = kmalloc(cmdlen * 4, GFP_DMA);
+ if (!cmd)
+ return cfb_imageblit(info, image);
+ cmdfn(cmd, step, dx, dy, width, height, fgcolor, bgcolor, image, info);
+ mb862xxfb_write_fifo(cmdlen, cmd, info);
+ kfree(cmd);
+}
+
+static void mb86290fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+
+ u32 x2, y2, vxres, vyres, height, width, fg;
+ u32 cmd[7];
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ if (!rect->width || !rect->height || rect->dx > vxres
+ || rect->dy > vyres)
+ return;
+
+ /* We could use hardware clipping but on many cards you get around
+ * hardware clipping by writing to framebuffer directly. */
+ x2 = rect->dx + rect->width;
+ y2 = rect->dy + rect->height;
+ x2 = min(x2, vxres);
+ y2 = min(y2, vyres);
+ width = x2 - rect->dx;
+ height = y2 - rect->dy;
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ fg = ((u32 *) (info->pseudo_palette))[rect->color];
+ else
+ fg = rect->color;
+
+ switch (rect->rop) {
+
+ case ROP_XOR:
+ /* Set raster operation */
+ cmd[1] = (2 << 7) | (GDC_ROP_XOR << 9);
+ break;
+
+ case ROP_COPY:
+ /* Set raster operation */
+ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9);
+ break;
+
+ }
+
+ cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP;
+ /* cmd[1] set earlier */
+ cmd[2] =
+ (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16);
+ cmd[3] = fg;
+ cmd[4] = (GDC_TYPE_DRAWRECTP << 24) | (GDC_CMD_BLT_FILL << 16);
+ cmd[5] = (rect->dy << 16) | (rect->dx);
+ cmd[6] = (height << 16) | width;
+
+ mb862xxfb_write_fifo(7, cmd, info);
+}
+
+void mb862xxfb_init_accel(struct fb_info *info, int xres)
+{
+ struct mb862xxfb_par *par = info->par;
+
+ if (info->var.bits_per_pixel == 32) {
+ info->fbops->fb_fillrect = cfb_fillrect;
+ info->fbops->fb_copyarea = cfb_copyarea;
+ info->fbops->fb_imageblit = cfb_imageblit;
+ } else {
+ outreg(disp, GC_L0EM, 3);
+ info->fbops->fb_fillrect = mb86290fb_fillrect;
+ info->fbops->fb_copyarea = mb86290fb_copyarea;
+ info->fbops->fb_imageblit = mb86290fb_imageblit;
+ }
+ outreg(draw, GDC_REG_DRAW_BASE, 0);
+ outreg(draw, GDC_REG_MODE_MISC, 0x8000);
+ outreg(draw, GDC_REG_X_RESOLUTION, xres);
+
+ info->flags |=
+ FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
+ info->fix.accel = 0xff; /*FIXME: add right define */
+}
+EXPORT_SYMBOL(mb862xxfb_init_accel);
diff --git a/drivers/video/mb862xx/mb862xxfb_accel.h b/drivers/video/mb862xx/mb862xxfb_accel.h
new file mode 100644
index 00000000000..96a2dfef0f6
--- /dev/null
+++ b/drivers/video/mb862xx/mb862xxfb_accel.h
@@ -0,0 +1,203 @@
+#ifndef __MB826XXFB_ACCEL_H__
+#define __MB826XXFB_ACCEL_H__
+
+/* registers */
+#define GDC_GEO_REG_INPUT_FIFO 0x00000400L
+
+/* Special Registers */
+#define GDC_REG_CTRL 0x00000400L
+#define GDC_REG_FIFO_STATUS 0x00000404L
+#define GDC_REG_FIFO_COUNT 0x00000408L
+#define GDC_REG_SETUP_STATUS 0x0000040CL
+#define GDC_REG_DDA_STATUS 0x00000410L
+#define GDC_REG_ENGINE_STATUS 0x00000414L
+#define GDC_REG_ERROR_STATUS 0x00000418L
+#define GDC_REG_MODE_MISC 0x00000420L /* MDR0 */
+#define GDC_REG_MODE_LINE 0x00000424L /* MDR1 */
+#define GDC_REG_MODE_POLYGON 0x00000428L /* MDR2 */
+#define GDC_REG_MODE_TEXTURE 0x0000042CL /* MDR3 */
+#define GDC_REG_MODE_BITMAP 0x00000430L /* MDR4 */
+#define GDC_REG_MODE_EXTENSION 0x0000043CL /* MDR7 */
+
+/* Configuration Registers */
+#define GDC_REG_DRAW_BASE 0x00000440L
+#define GDC_REG_X_RESOLUTION 0x00000444L
+#define GDC_REG_Z_BASE 0x00000448L
+#define GDC_REG_TEXTURE_BASE 0x0000044CL
+#define GDC_REG_POLYGON_FLAG_BASE 0x00000450L
+#define GDC_REG_CLIP_XMIN 0x00000454L
+#define GDC_REG_CLIP_XMAX 0x00000458L
+#define GDC_REG_CLIP_YMIN 0x0000045CL
+#define GDC_REG_CLIP_YMAX 0x00000460L
+#define GDC_REG_TEXURE_SIZE 0x00000464L
+#define GDC_REG_TILE_SIZE 0x00000468L
+#define GDC_REG_TEX_BUF_OFFSET 0x0000046CL
+
+/* for MB86293 or later */
+#define GDC_REG_ALPHA_MAP_BASE 0x00000474L /* ABR */
+
+/* Constant Registers */
+#define GDC_REG_FOREGROUND_COLOR 0x00000480L
+#define GDC_REG_BACKGROUND_COLOR 0x00000484L
+#define GDC_REG_ALPHA 0x00000488L
+#define GDC_REG_LINE_PATTERN 0x0000048CL
+#define GDC_REG_TEX_BORDER_COLOR 0x00000494L
+#define GDC_REG_LINE_PATTERN_OFFSET 0x000003E0L
+
+/* Coomand Code */
+#define GDC_CMD_PIXEL 0x00000000L
+#define GDC_CMD_PIXEL_Z 0x00000001L
+
+#define GDC_CMD_X_VECTOR 0x00000020L
+#define GDC_CMD_Y_VECTOR 0x00000021L
+#define GDC_CMD_X_VECTOR_NOEND 0x00000022L
+#define GDC_CMD_Y_VECTOR_NOEND 0x00000023L
+#define GDC_CMD_X_VECTOR_BLPO 0x00000024L
+#define GDC_CMD_Y_VECTOR_BLPO 0x00000025L
+#define GDC_CMD_X_VECTOR_NOEND_BLPO 0x00000026L
+#define GDC_CMD_Y_VECTOR_NOEND_BLPO 0x00000027L
+#define GDC_CMD_AA_X_VECTOR 0x00000028L
+#define GDC_CMD_AA_Y_VECTOR 0x00000029L
+#define GDC_CMD_AA_X_VECTOR_NOEND 0x0000002AL
+#define GDC_CMD_AA_Y_VECTOR_NOEND 0x0000002BL
+#define GDC_CMD_AA_X_VECTOR_BLPO 0x0000002CL
+#define GDC_CMD_AA_Y_VECTOR_BLPO 0x0000002DL
+#define GDC_CMD_AA_X_VECTOR_NOEND_BLPO 0x0000002EL
+#define GDC_CMD_AA_Y_VECTOR_NOEND_BLPO 0x0000002FL
+
+#define GDC_CMD_0_VECTOR 0x00000030L
+#define GDC_CMD_1_VECTOR 0x00000031L
+#define GDC_CMD_0_VECTOR_NOEND 0x00000032L
+#define GDC_CMD_1_VECTOR_NOEND 0x00000033L
+#define GDC_CMD_0_VECTOR_BLPO 0x00000034L
+#define GDC_CMD_1_VECTOR_BLPO 0x00000035L
+#define GDC_CMD_0_VECTOR_NOEND_BLPO 0x00000036L
+#define GDC_CMD_1_VECTOR_NOEND_BLPO 0x00000037L
+#define GDC_CMD_AA_0_VECTOR 0x00000038L
+#define GDC_CMD_AA_1_VECTOR 0x00000039L
+#define GDC_CMD_AA_0_VECTOR_NOEND 0x0000003AL
+#define GDC_CMD_AA_1_VECTOR_NOEND 0x0000003BL
+#define GDC_CMD_AA_0_VECTOR_BLPO 0x0000003CL
+#define GDC_CMD_AA_1_VECTOR_BLPO 0x0000003DL
+#define GDC_CMD_AA_0_VECTOR_NOEND_BLPO 0x0000003EL
+#define GDC_CMD_AA_1_VECTOR_NOEND_BLPO 0x0000003FL
+
+#define GDC_CMD_BLT_FILL 0x00000041L
+#define GDC_CMD_BLT_DRAW 0x00000042L
+#define GDC_CMD_BITMAP 0x00000043L
+#define GDC_CMD_BLTCOPY_TOP_LEFT 0x00000044L
+#define GDC_CMD_BLTCOPY_TOP_RIGHT 0x00000045L
+#define GDC_CMD_BLTCOPY_BOTTOM_LEFT 0x00000046L
+#define GDC_CMD_BLTCOPY_BOTTOM_RIGHT 0x00000047L
+#define GDC_CMD_LOAD_TEXTURE 0x00000048L
+#define GDC_CMD_LOAD_TILE 0x00000049L
+
+#define GDC_CMD_TRAP_RIGHT 0x00000060L
+#define GDC_CMD_TRAP_LEFT 0x00000061L
+#define GDC_CMD_TRIANGLE_FAN 0x00000062L
+#define GDC_CMD_FLAG_TRIANGLE_FAN 0x00000063L
+
+#define GDC_CMD_FLUSH_FB 0x000000C1L
+#define GDC_CMD_FLUSH_Z 0x000000C2L
+
+#define GDC_CMD_POLYGON_BEGIN 0x000000E0L
+#define GDC_CMD_POLYGON_END 0x000000E1L
+#define GDC_CMD_CLEAR_POLY_FLAG 0x000000E2L
+#define GDC_CMD_NORMAL 0x000000FFL
+
+#define GDC_CMD_VECTOR_BLPO_FLAG 0x00040000L
+#define GDC_CMD_FAST_VECTOR_BLPO_FLAG 0x00000004L
+
+/* for MB86293 or later */
+#define GDC_CMD_MDR1 0x00000000L
+#define GDC_CMD_MDR1S 0x00000002L
+#define GDC_CMD_MDR1B 0x00000004L
+#define GDC_CMD_MDR2 0x00000001L
+#define GDC_CMD_MDR2S 0x00000003L
+#define GDC_CMD_MDR2TL 0x00000007L
+#define GDC_CMD_GMDR1E 0x00000010L
+#define GDC_CMD_GMDR2E 0x00000020L
+#define GDC_CMD_OVERLAP_SHADOW_XY 0x00000000L
+#define GDC_CMD_OVERLAP_SHADOW_XY_COMPOSITION 0x00000001L
+#define GDC_CMD_OVERLAP_Z_PACKED_ONBS 0x00000007L
+#define GDC_CMD_OVERLAP_Z_ORIGIN 0x00000000L
+#define GDC_CMD_OVERLAP_Z_NON_TOPLEFT 0x00000001L
+#define GDC_CMD_OVERLAP_Z_BORDER 0x00000002L
+#define GDC_CMD_OVERLAP_Z_SHADOW 0x00000003L
+#define GDC_CMD_BLTCOPY_ALT_ALPHA 0x00000000L /* Reserverd */
+#define GDC_CMD_DC_LOGOUT 0x00000000L /* Reserverd */
+#define GDC_CMD_BODY_FORE_COLOR 0x00000000L
+#define GDC_CMD_BODY_BACK_COLOR 0x00000001L
+#define GDC_CMD_SHADOW_FORE_COLOR 0x00000002L
+#define GDC_CMD_SHADOW_BACK_COLOR 0x00000003L
+#define GDC_CMD_BORDER_FORE_COLOR 0x00000004L
+#define GDC_CMD_BORDER_BACK_COLOR 0x00000005L
+
+/* Type Code Table */
+#define GDC_TYPE_G_NOP 0x00000020L
+#define GDC_TYPE_G_BEGIN 0x00000021L
+#define GDC_TYPE_G_BEGINCONT 0x00000022L
+#define GDC_TYPE_G_END 0x00000023L
+#define GDC_TYPE_G_VERTEX 0x00000030L
+#define GDC_TYPE_G_VERTEXLOG 0x00000032L
+#define GDC_TYPE_G_VERTEXNOPLOG 0x00000033L
+#define GDC_TYPE_G_INIT 0x00000040L
+#define GDC_TYPE_G_VIEWPORT 0x00000041L
+#define GDC_TYPE_G_DEPTHRANGE 0x00000042L
+#define GDC_TYPE_G_LOADMATRIX 0x00000043L
+#define GDC_TYPE_G_VIEWVOLUMEXYCLIP 0x00000044L
+#define GDC_TYPE_G_VIEWVOLUMEZCLIP 0x00000045L
+#define GDC_TYPE_G_VIEWVOLUMEWCLIP 0x00000046L
+#define GDC_TYPE_SETLVERTEX2I 0x00000072L
+#define GDC_TYPE_SETLVERTEX2IP 0x00000073L
+#define GDC_TYPE_SETMODEREGISTER 0x000000C0L
+#define GDC_TYPE_SETGMODEREGISTER 0x000000C1L
+#define GDC_TYPE_OVERLAPXYOFFT 0x000000C8L
+#define GDC_TYPE_OVERLAPZOFFT 0x000000C9L
+#define GDC_TYPE_DC_LOGOUTADDR 0x000000CCL
+#define GDC_TYPE_SETCOLORREGISTER 0x000000CEL
+#define GDC_TYPE_G_BEGINE 0x000000E1L
+#define GDC_TYPE_G_BEGINCONTE 0x000000E2L
+#define GDC_TYPE_G_ENDE 0x000000E3L
+#define GDC_TYPE_DRAWPIXEL 0x00000000L
+#define GDC_TYPE_DRAWPIXELZ 0x00000001L
+#define GDC_TYPE_DRAWLINE 0x00000002L
+#define GDC_TYPE_DRAWLINE2I 0x00000003L
+#define GDC_TYPE_DRAWLINE2IP 0x00000004L
+#define GDC_TYPE_DRAWTRAP 0x00000005L
+#define GDC_TYPE_DRAWVERTEX2I 0x00000006L
+#define GDC_TYPE_DRAWVERTEX2IP 0x00000007L
+#define GDC_TYPE_DRAWRECTP 0x00000009L
+#define GDC_TYPE_DRAWBITMAPP 0x0000000BL
+#define GDC_TYPE_BLTCOPYP 0x0000000DL
+#define GDC_TYPE_BLTCOPYALTERNATEP 0x0000000FL
+#define GDC_TYPE_LOADTEXTUREP 0x00000011L
+#define GDC_TYPE_BLTTEXTUREP 0x00000013L
+#define GDC_TYPE_BLTCOPYALTALPHABLENDP 0x0000001FL
+#define GDC_TYPE_SETVERTEX2I 0x00000070L
+#define GDC_TYPE_SETVERTEX2IP 0x00000071L
+#define GDC_TYPE_DRAW 0x000000F0L
+#define GDC_TYPE_SETREGISTER 0x000000F1L
+#define GDC_TYPE_SYNC 0x000000FCL
+#define GDC_TYPE_INTERRUPT 0x000000FDL
+#define GDC_TYPE_NOP 0x0
+
+/* Raster operation */
+#define GDC_ROP_CLEAR 0x0000
+#define GDC_ROP_AND 0x0001
+#define GDC_ROP_AND_REVERSE 0x0002
+#define GDC_ROP_COPY 0x0003
+#define GDC_ROP_AND_INVERTED 0x0004
+#define GDC_ROP_NOP 0x0005
+#define GDC_ROP_XOR 0x0006
+#define GDC_ROP_OR 0x0007
+#define GDC_ROP_NOR 0x0008
+#define GDC_ROP_EQUIV 0x0009
+#define GDC_ROP_INVERT 0x000A
+#define GDC_ROP_OR_REVERSE 0x000B
+#define GDC_ROP_COPY_INVERTED 0x000C
+#define GDC_ROP_OR_INVERTED 0x000D
+#define GDC_ROP_NAND 0x000E
+#define GDC_ROP_SET 0x000F
+
+#endif
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 34e4e799516..0129f1bc352 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/fb.h>
+#include <linux/kernel.h>
#undef DEBUG
@@ -402,21 +403,6 @@ const struct fb_videomode vesa_modes[] = {
EXPORT_SYMBOL(vesa_modes);
#endif /* CONFIG_FB_MODE_HELPERS */
-static int my_atoi(const char *name)
-{
- int val = 0;
-
- for (;; name++) {
- switch (*name) {
- case '0' ... '9':
- val = 10*val+(*name-'0');
- break;
- default:
- return val;
- }
- }
-}
-
/**
* fb_try_mode - test a video mode
* @var: frame buffer user defined part of display
@@ -539,7 +525,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
namelen = i;
if (!refresh_specified && !bpp_specified &&
!yres_specified) {
- refresh = my_atoi(&name[i+1]);
+ refresh = simple_strtol(&name[i+1], NULL, 10);
refresh_specified = 1;
if (cvt || rb)
cvt = 0;
@@ -549,7 +535,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
case '-':
namelen = i;
if (!bpp_specified && !yres_specified) {
- bpp = my_atoi(&name[i+1]);
+ bpp = simple_strtol(&name[i+1], NULL, 10);
bpp_specified = 1;
if (cvt || rb)
cvt = 0;
@@ -558,7 +544,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
break;
case 'x':
if (!yres_specified) {
- yres = my_atoi(&name[i+1]);
+ yres = simple_strtol(&name[i+1], NULL, 10);
yres_specified = 1;
} else
goto done;
@@ -586,7 +572,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
}
}
if (i < 0 && yres_specified) {
- xres = my_atoi(name);
+ xres = simple_strtol(name, NULL, 10);
res_specified = 1;
}
done:
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 4d8c54c23dd..b043ac83c41 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -282,8 +282,17 @@ static int offb_set_par(struct fb_info *info)
return 0;
}
+static void offb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(info->aperture_base, info->aperture_size);
+ framebuffer_release(info);
+}
+
static struct fb_ops offb_ops = {
.owner = THIS_MODULE,
+ .fb_destroy = offb_destroy,
.fb_setcolreg = offb_setcolreg,
.fb_set_par = offb_set_par,
.fb_blank = offb_blank,
@@ -482,10 +491,14 @@ static void __init offb_init_fb(const char *name, const char *full_name,
var->sync = 0;
var->vmode = FB_VMODE_NONINTERLACED;
+ /* set offb aperture size for generic probing */
+ info->aperture_base = address;
+ info->aperture_size = fix->smem_len;
+
info->fbops = &offb_ops;
info->screen_base = ioremap(address, fix->smem_len);
info->pseudo_palette = (void *) (info + 1);
- info->flags = FBINFO_DEFAULT | foreign_endian;
+ info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian;
fb_alloc_cmap(&info->cmap, 256, 0);
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 551e3e9c4cb..455c6055325 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -1,6 +1,7 @@
config FB_OMAP
tristate "OMAP frame buffer support (EXPERIMENTAL)"
- depends on FB && ARCH_OMAP
+ depends on FB && ARCH_OMAP && (OMAP2_DSS = "n")
+
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -72,7 +73,7 @@ config FB_OMAP_LCD_MIPID
config FB_OMAP_BOOTLOADER_INIT
bool "Check bootloader initialization"
- depends on FB_OMAP
+ depends on FB_OMAP || FB_OMAP2
help
Say Y here if you want to enable checking if the bootloader has
already initialized the display controller. In this case the
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index f5d75f22cef..2ffb34af4c5 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -27,9 +27,9 @@
#include <linux/clk.h>
#include <plat/dma.h>
-#include <plat/omapfb.h>
#include <plat/blizzard.h>
+#include "omapfb.h"
#include "dispc.h"
#define MODULE_NAME "blizzard"
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 7c833db4f9b..c7c6455f1fa 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -24,11 +24,12 @@
#include <linux/vmalloc.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <plat/sram.h>
-#include <plat/omapfb.h>
#include <plat/board.h>
+#include "omapfb.h"
#include "dispc.h"
#define MODULE_NAME "dispc"
@@ -188,6 +189,11 @@ static struct {
struct omapfb_color_key color_key;
} dispc;
+static struct platform_device omapdss_device = {
+ .name = "omapdss",
+ .id = -1,
+};
+
static void enable_lcd_clocks(int enable);
static void inline dispc_write_reg(int idx, u32 val)
@@ -914,20 +920,20 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
static int get_dss_clocks(void)
{
- dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick");
+ dispc.dss_ick = clk_get(&omapdss_device.dev, "ick");
if (IS_ERR(dispc.dss_ick)) {
dev_err(dispc.fbdev->dev, "can't get ick\n");
return PTR_ERR(dispc.dss_ick);
}
- dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck");
+ dispc.dss1_fck = clk_get(&omapdss_device.dev, "dss1_fck");
if (IS_ERR(dispc.dss1_fck)) {
dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
clk_put(dispc.dss_ick);
return PTR_ERR(dispc.dss1_fck);
}
- dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck");
+ dispc.dss_54m_fck = clk_get(&omapdss_device.dev, "tv_fck");
if (IS_ERR(dispc.dss_54m_fck)) {
dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
clk_put(dispc.dss_ick);
@@ -1379,6 +1385,12 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
int skip_init = 0;
int i;
+ r = platform_device_register(&omapdss_device);
+ if (r) {
+ dev_err(fbdev->dev, "can't register omapdss device\n");
+ return r;
+ }
+
memset(&dispc, 0, sizeof(dispc));
dispc.base = ioremap(DISPC_BASE, SZ_1K);
@@ -1522,6 +1534,7 @@ static void omap_dispc_cleanup(void)
free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
put_dss_clocks();
iounmap(dispc.base);
+ platform_device_unregister(&omapdss_device);
}
const struct lcd_ctrl omap2_int_ctrl = {
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 17a975e4c9c..0016f77cd13 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -25,10 +25,11 @@
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/clk.h>
+#include <linux/interrupt.h>
#include <plat/dma.h>
-#include <plat/omapfb.h>
#include <plat/hwa742.h>
+#include "omapfb.h"
#define HWA742_REV_CODE_REG 0x0
#define HWA742_CONFIG_REG 0x2
diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c
index fea7feee0b7..e3eccc9af78 100644
--- a/drivers/video/omap/lcd_2430sdp.c
+++ b/drivers/video/omap/lcd_2430sdp.c
@@ -25,12 +25,13 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
#include <asm/mach-types.h>
+#include "omapfb.h"
+
#define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91
#define SDP2430_LCD_PANEL_ENABLE_GPIO 154
#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 24
@@ -51,7 +52,7 @@ static unsigned enable_gpio;
#define TWL4030_VPLL2_DEV_GRP 0x33
#define TWL4030_VPLL2_DEDICATED 0x36
-#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v)
+#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v)
static int sdp2430_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c
index b3973ebd1b0..567db6ac32c 100644
--- a/drivers/video/omap/lcd_ams_delta.c
+++ b/drivers/video/omap/lcd_ams_delta.c
@@ -27,7 +27,8 @@
#include <plat/board-ams-delta.h>
#include <mach/hardware.h>
-#include <plat/omapfb.h>
+
+#include "omapfb.h"
#define AMS_DELTA_DEFAULT_CONTRAST 112
diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c
index 4c5cefc5153..2be94eb3bbf 100644
--- a/drivers/video/omap/lcd_apollon.c
+++ b/drivers/video/omap/lcd_apollon.c
@@ -26,7 +26,8 @@
#include <mach/gpio.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
+
+#include "omapfb.h"
/* #define USE_35INCH_LCD 1 */
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 240b4fb1074..8df688748b5 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -24,7 +24,7 @@
#include <linux/i2c/tps65010.h>
#include <mach/gpio.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
#define MODULE_NAME "omapfb-lcd_h3"
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
index 720625da1f4..03a06a98275 100644
--- a/drivers/video/omap/lcd_h4.c
+++ b/drivers/video/omap/lcd_h4.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
{
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c
index 2e0c81ea748..a9007c5d1fa 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/omap/lcd_htcherald.c
@@ -29,7 +29,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
static int htcherald_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
index aafe9b497e2..3271f1643b2 100644
--- a/drivers/video/omap/lcd_inn1510.c
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -24,7 +24,7 @@
#include <linux/io.h>
#include <plat/fpga.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
static int innovator1510_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 0de338264a8..9fff86f67bd 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -23,7 +23,7 @@
#include <linux/platform_device.h>
#include <mach/gpio.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
#define MODULE_NAME "omapfb-lcd_h3"
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
index 6a260dfdadc..0f5952cae85 100644
--- a/drivers/video/omap/lcd_ldp.c
+++ b/drivers/video/omap/lcd_ldp.c
@@ -24,13 +24,14 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <mach/gpio.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
#include <asm/mach-types.h>
+#include "omapfb.h"
+
#define LCD_PANEL_BACKLIGHT_GPIO (15 + OMAP_MAX_GPIO_LINES)
#define LCD_PANEL_ENABLE_GPIO (7 + OMAP_MAX_GPIO_LINES)
@@ -58,7 +59,7 @@
#define TWL4030_VPLL2_DEV_GRP 0x33
#define TWL4030_VPLL2_DEDICATED 0x36
-#define t2_out(c, r, v) twl4030_i2c_write_u8(c, r, v)
+#define t2_out(c, r, v) twl_i2c_write_u8(c, r, v)
static int ldp_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
index 8f3e2b4bb4f..abe1c76a325 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/omap/lcd_mipid.c
@@ -23,9 +23,10 @@
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
-#include <plat/omapfb.h>
#include <plat/lcd_mipid.h>
+#include "omapfb.h"
+
#define MIPID_MODULE_NAME "lcd_mipid"
#define MIPID_CMD_READ_DISP_ID 0x04
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
index e1a38abca3e..7e7a65c0845 100644
--- a/drivers/video/omap/lcd_omap2evm.c
+++ b/drivers/video/omap/lcd_omap2evm.c
@@ -24,12 +24,13 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
#include <asm/mach-types.h>
+#include "omapfb.h"
+
#define LCD_PANEL_ENABLE_GPIO 154
#define LCD_PANEL_LR 128
#define LCD_PANEL_UD 129
@@ -60,9 +61,9 @@ static int omap2evm_panel_init(struct lcd_panel *panel,
gpio_direction_output(LCD_PANEL_LR, 1);
gpio_direction_output(LCD_PANEL_UD, 1);
- twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
+ twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
bklight_level = 100;
return 0;
@@ -100,7 +101,7 @@ static int omap2evm_bklight_setlevel(struct lcd_panel *panel,
u8 c;
if ((level >= 0) && (level <= 100)) {
c = (125 * (100 - level)) / 100 + 2;
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
bklight_level = level;
}
return 0;
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
index ccec084ed64..ca75cc2a87a 100644
--- a/drivers/video/omap/lcd_omap3beagle.c
+++ b/drivers/video/omap/lcd_omap3beagle.c
@@ -23,12 +23,14 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
+#include <plat/mux.h>
#include <asm/mach-types.h>
+#include "omapfb.h"
+
#define LCD_PANEL_ENABLE_GPIO 170
static int omap3beagle_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c
index 556eb31db24..06840da0b09 100644
--- a/drivers/video/omap/lcd_omap3evm.c
+++ b/drivers/video/omap/lcd_omap3evm.c
@@ -23,12 +23,13 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
#include <asm/mach-types.h>
+#include "omapfb.h"
+
#define LCD_PANEL_ENABLE_GPIO 153
#define LCD_PANEL_LR 2
#define LCD_PANEL_UD 3
@@ -62,9 +63,9 @@ static int omap3evm_panel_init(struct lcd_panel *panel,
gpio_direction_output(LCD_PANEL_LR, 1);
gpio_direction_output(LCD_PANEL_UD, 1);
- twl4030_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
+ twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
bklight_level = 100;
return 0;
@@ -101,7 +102,7 @@ static int omap3evm_bklight_setlevel(struct lcd_panel *panel,
u8 c;
if ((level >= 0) && (level <= 100)) {
c = (125 * (100 - level)) / 100 + 2;
- twl4030_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
+ twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
bklight_level = level;
}
return 0;
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index bb21d7dca39..b87e8b83f29 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -25,7 +25,7 @@
#include <mach/gpio.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
{
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
index b0f86e514cd..564933ffac6 100644
--- a/drivers/video/omap/lcd_overo.c
+++ b/drivers/video/omap/lcd_overo.c
@@ -21,13 +21,14 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <mach/gpio.h>
#include <plat/mux.h>
-#include <plat/omapfb.h>
#include <asm/mach-types.h>
+#include "omapfb.h"
+
#define LCD_ENABLE 144
static int overo_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
index d30289603ce..4cb301750d0 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/omap/lcd_palmte.c
@@ -24,7 +24,7 @@
#include <linux/io.h>
#include <plat/fpga.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
static int palmte_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index 557424fb6df..ff0e6d7ab3a 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -30,7 +30,7 @@ GPIO13 - screen blanking
#include <linux/io.h>
#include <mach/gpio.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
static int palmtt_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
index 5f4b5b2c1f4..2334e56536b 100644
--- a/drivers/video/omap/lcd_palmz71.c
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -24,7 +24,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
static int palmz71_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
index 5f32cafbf74..a33483910dc 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/omap/lcdc.c
@@ -29,47 +29,17 @@
#include <linux/vmalloc.h>
#include <linux/clk.h>
+#include <mach/lcdc.h>
#include <plat/dma.h>
-#include <plat/omapfb.h>
#include <asm/mach-types.h>
+#include "omapfb.h"
+
#include "lcdc.h"
#define MODULE_NAME "lcdc"
-#define OMAP_LCDC_BASE 0xfffec000
-#define OMAP_LCDC_SIZE 256
-#define OMAP_LCDC_IRQ INT_LCD_CTRL
-
-#define OMAP_LCDC_CONTROL (OMAP_LCDC_BASE + 0x00)
-#define OMAP_LCDC_TIMING0 (OMAP_LCDC_BASE + 0x04)
-#define OMAP_LCDC_TIMING1 (OMAP_LCDC_BASE + 0x08)
-#define OMAP_LCDC_TIMING2 (OMAP_LCDC_BASE + 0x0c)
-#define OMAP_LCDC_STATUS (OMAP_LCDC_BASE + 0x10)
-#define OMAP_LCDC_SUBPANEL (OMAP_LCDC_BASE + 0x14)
-#define OMAP_LCDC_LINE_INT (OMAP_LCDC_BASE + 0x18)
-#define OMAP_LCDC_DISPLAY_STATUS (OMAP_LCDC_BASE + 0x1c)
-
-#define OMAP_LCDC_STAT_DONE (1 << 0)
-#define OMAP_LCDC_STAT_VSYNC (1 << 1)
-#define OMAP_LCDC_STAT_SYNC_LOST (1 << 2)
-#define OMAP_LCDC_STAT_ABC (1 << 3)
-#define OMAP_LCDC_STAT_LINE_INT (1 << 4)
-#define OMAP_LCDC_STAT_FUF (1 << 5)
-#define OMAP_LCDC_STAT_LOADED_PALETTE (1 << 6)
-
-#define OMAP_LCDC_CTRL_LCD_EN (1 << 0)
-#define OMAP_LCDC_CTRL_LCD_TFT (1 << 7)
-#define OMAP_LCDC_CTRL_LINE_IRQ_CLR_SEL (1 << 10)
-
-#define OMAP_LCDC_IRQ_VSYNC (1 << 2)
-#define OMAP_LCDC_IRQ_DONE (1 << 3)
-#define OMAP_LCDC_IRQ_LOADED_PALETTE (1 << 4)
-#define OMAP_LCDC_IRQ_LINE_NIRQ (1 << 5)
-#define OMAP_LCDC_IRQ_LINE (1 << 6)
-#define OMAP_LCDC_IRQ_MASK (((1 << 5) - 1) << 2)
-
#define MAX_PALETTE_SIZE PAGE_SIZE
enum lcdc_load_mode {
diff --git a/arch/arm/plat-omap/include/plat/omapfb.h b/drivers/video/omap/omapfb.h
index bfef7ab95f1..46e4714014e 100644
--- a/arch/arm/plat-omap/include/plat/omapfb.h
+++ b/drivers/video/omap/omapfb.h
@@ -1,5 +1,5 @@
/*
- * File: arch/arm/plat-omap/include/mach/omapfb.h
+ * File: drivers/video/omap/omapfb.h
*
* Framebuffer driver for TI OMAP boards
*
@@ -24,151 +24,12 @@
#ifndef __OMAPFB_H
#define __OMAPFB_H
-#include <asm/ioctl.h>
-#include <asm/types.h>
-
-/* IOCTL commands. */
-
-#define OMAP_IOW(num, dtype) _IOW('O', num, dtype)
-#define OMAP_IOR(num, dtype) _IOR('O', num, dtype)
-#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype)
-#define OMAP_IO(num) _IO('O', num)
-
-#define OMAPFB_MIRROR OMAP_IOW(31, int)
-#define OMAPFB_SYNC_GFX OMAP_IO(37)
-#define OMAPFB_VSYNC OMAP_IO(38)
-#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int)
-#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps)
-#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int)
-#define OMAPFB_LCD_TEST OMAP_IOW(45, int)
-#define OMAPFB_CTRL_TEST OMAP_IOW(46, int)
-#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old)
-#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key)
-#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key)
-#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info)
-#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info)
-#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window)
-#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info)
-#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info)
-
-#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff
-#define OMAPFB_CAPS_LCDC_MASK 0x00fff000
-#define OMAPFB_CAPS_PANEL_MASK 0xff000000
-
-#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000
-#define OMAPFB_CAPS_TEARSYNC 0x00002000
-#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000
-#define OMAPFB_CAPS_PLANE_SCALE 0x00008000
-#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000
-#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000
-#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000
-#define OMAPFB_CAPS_WINDOW_ROTATE 0x00080000
-#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000
-
-/* Values from DSP must map to lower 16-bits */
-#define OMAPFB_FORMAT_MASK 0x00ff
-#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100
-#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200
-#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400
-#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800
-#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000
-
-#define OMAPFB_EVENT_READY 1
-#define OMAPFB_EVENT_DISABLED 2
-
-#define OMAPFB_MEMTYPE_SDRAM 0
-#define OMAPFB_MEMTYPE_SRAM 1
-#define OMAPFB_MEMTYPE_MAX 1
-
-enum omapfb_color_format {
- OMAPFB_COLOR_RGB565 = 0,
- OMAPFB_COLOR_YUV422,
- OMAPFB_COLOR_YUV420,
- OMAPFB_COLOR_CLUT_8BPP,
- OMAPFB_COLOR_CLUT_4BPP,
- OMAPFB_COLOR_CLUT_2BPP,
- OMAPFB_COLOR_CLUT_1BPP,
- OMAPFB_COLOR_RGB444,
- OMAPFB_COLOR_YUY422,
-};
-
-struct omapfb_update_window {
- __u32 x, y;
- __u32 width, height;
- __u32 format;
- __u32 out_x, out_y;
- __u32 out_width, out_height;
- __u32 reserved[8];
-};
-
-struct omapfb_update_window_old {
- __u32 x, y;
- __u32 width, height;
- __u32 format;
-};
-
-enum omapfb_plane {
- OMAPFB_PLANE_GFX = 0,
- OMAPFB_PLANE_VID1,
- OMAPFB_PLANE_VID2,
-};
-
-enum omapfb_channel_out {
- OMAPFB_CHANNEL_OUT_LCD = 0,
- OMAPFB_CHANNEL_OUT_DIGIT,
-};
-
-struct omapfb_plane_info {
- __u32 pos_x;
- __u32 pos_y;
- __u8 enabled;
- __u8 channel_out;
- __u8 mirror;
- __u8 reserved1;
- __u32 out_width;
- __u32 out_height;
- __u32 reserved2[12];
-};
-
-struct omapfb_mem_info {
- __u32 size;
- __u8 type;
- __u8 reserved[3];
-};
-
-struct omapfb_caps {
- __u32 ctrl;
- __u32 plane_color;
- __u32 wnd_color;
-};
-
-enum omapfb_color_key_type {
- OMAPFB_COLOR_KEY_DISABLED = 0,
- OMAPFB_COLOR_KEY_GFX_DST,
- OMAPFB_COLOR_KEY_VID_SRC,
-};
-
-struct omapfb_color_key {
- __u8 channel_out;
- __u32 background;
- __u32 trans_key;
- __u8 key_type;
-};
-
-enum omapfb_update_mode {
- OMAPFB_UPDATE_DISABLED = 0,
- OMAPFB_AUTO_UPDATE,
- OMAPFB_MANUAL_UPDATE
-};
-
-#ifdef __KERNEL__
-
-#include <linux/completion.h>
-#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/mutex.h>
+#include <linux/omapfb.h>
-#include <plat/board.h>
+#define OMAPFB_EVENT_READY 1
+#define OMAPFB_EVENT_DISABLED 2
#define OMAP_LCDC_INV_VSYNC 0x0001
#define OMAP_LCDC_INV_HSYNC 0x0002
@@ -184,12 +45,6 @@ enum omapfb_update_mode {
#define OMAPFB_PLANE_XRES_MIN 8
#define OMAPFB_PLANE_YRES_MIN 8
-#ifdef CONFIG_ARCH_OMAP1
-#define OMAPFB_PLANE_NUM 1
-#else
-#define OMAPFB_PLANE_NUM 3
-#endif
-
struct omapfb_device;
struct lcd_panel {
@@ -256,7 +111,7 @@ struct lcd_ctrl_extif {
void (*read_data) (void *buf, unsigned int len);
void (*write_data) (const void *buf, unsigned int len);
void (*transfer_area) (int width, int height,
- void (callback)(void * data), void *data);
+ void (callback)(void *data), void *data);
int (*setup_tearsync) (unsigned pin_cnt,
unsigned hs_pulse_time, unsigned vs_pulse_time,
int hs_pol_inv, int vs_pol_inv, int div);
@@ -275,20 +130,6 @@ typedef int (*omapfb_notifier_callback_t)(struct notifier_block *,
unsigned long event,
void *fbi);
-struct omapfb_mem_region {
- u32 paddr;
- void __iomem *vaddr;
- unsigned long size;
- u8 type; /* OMAPFB_PLANE_MEM_* */
- unsigned alloc:1; /* allocated by the driver */
- unsigned map:1; /* kernel mapped by the driver */
-};
-
-struct omapfb_mem_desc {
- int region_cnt;
- struct omapfb_mem_region region[OMAPFB_PLANE_NUM];
-};
-
struct lcd_ctrl {
const char *name;
void *data;
@@ -331,9 +172,9 @@ struct lcd_ctrl {
};
enum omapfb_state {
- OMAPFB_DISABLED = 0,
- OMAPFB_SUSPENDED= 99,
- OMAPFB_ACTIVE = 100
+ OMAPFB_DISABLED = 0,
+ OMAPFB_SUSPENDED = 99,
+ OMAPFB_ACTIVE = 100
};
struct omapfb_plane_struct {
@@ -345,8 +186,8 @@ struct omapfb_plane_struct {
struct omapfb_device {
int state;
- int ext_lcdc; /* Using external
- LCD controller */
+ int ext_lcdc; /* Using external
+ LCD controller */
struct mutex rqueue_mutex;
int palette_size;
@@ -364,19 +205,12 @@ struct omapfb_device {
struct fb_info *fb_info[OMAPFB_PLANE_NUM];
};
-struct omapfb_platform_data {
- struct omap_lcd_config lcd;
- struct omapfb_mem_desc mem_desc;
- void *ctrl_platform_data;
-};
-
#ifdef CONFIG_ARCH_OMAP1
extern struct lcd_ctrl omap1_lcd_ctrl;
#else
extern struct lcd_ctrl omap2_disp_ctrl;
#endif
-extern void omapfb_reserve_sdram(void);
extern void omapfb_register_panel(struct lcd_panel *panel);
extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval);
extern void omapfb_notify_clients(struct omapfb_device *fbdev,
@@ -390,9 +224,4 @@ extern int omapfb_update_window_async(struct fb_info *fbi,
void (*callback)(void *),
void *callback_data);
-/* in arch/arm/plat-omap/fb.c */
-extern void omapfb_set_ctrl_platform_data(void *pdata);
-
-#endif /* __KERNEL__ */
-
#endif /* __OMAPFB_H */
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index f900a43db8d..c7f59a5ccdb 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -29,8 +29,8 @@
#include <linux/uaccess.h>
#include <plat/dma.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
#include "lcdc.h"
#include "dispc.h"
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index c90fa39486b..fed7b1bda19 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -27,8 +27,7 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <plat/omapfb.h>
-
+#include "omapfb.h"
#include "dispc.h"
/* To work around an RFBI transfer rate limitation */
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index 79dc84f0971..8fb7c708f56 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -23,10 +23,11 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <plat/dma.h>
-#include <plat/omapfb.h>
+#include "omapfb.h"
#include "lcdc.h"
#define MODULE_NAME "omapfb-sossi"
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
new file mode 100644
index 00000000000..d877c361abd
--- /dev/null
+++ b/drivers/video/omap2/Kconfig
@@ -0,0 +1,9 @@
+config OMAP2_VRAM
+ bool
+
+config OMAP2_VRFB
+ bool
+
+source "drivers/video/omap2/dss/Kconfig"
+source "drivers/video/omap2/omapfb/Kconfig"
+source "drivers/video/omap2/displays/Kconfig"
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
new file mode 100644
index 00000000000..d853d05dad3
--- /dev/null
+++ b/drivers/video/omap2/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_OMAP2_VRAM) += vram.o
+obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
+
+obj-y += dss/
+obj-y += omapfb/
+obj-y += displays/
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
new file mode 100644
index 00000000000..b12a59c9c50
--- /dev/null
+++ b/drivers/video/omap2/displays/Kconfig
@@ -0,0 +1,22 @@
+menu "OMAP2/3 Display Device Drivers"
+ depends on OMAP2_DSS
+
+config PANEL_GENERIC
+ tristate "Generic Panel"
+ help
+ Generic panel driver.
+ Used for DVI output for Beagle and OMAP3 SDP.
+
+config PANEL_SHARP_LS037V7DW01
+ tristate "Sharp LS037V7DW01 LCD Panel"
+ depends on OMAP2_DSS
+ help
+ LCD Panel used in TI's SDP3430 and EVM boards
+
+config PANEL_TAAL
+ tristate "Taal DSI Panel"
+ depends on OMAP2_DSS_DSI
+ help
+ Taal DSI command mode panel from TPO.
+
+endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
new file mode 100644
index 00000000000..955646440b3
--- /dev/null
+++ b/drivers/video/omap2/displays/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
+obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+
+obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c
new file mode 100644
index 00000000000..eb48d1afd80
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-generic.c
@@ -0,0 +1,104 @@
+/*
+ * Generic panel support
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <plat/display.h>
+
+static struct omap_video_timings generic_panel_timings = {
+ /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
+ .x_res = 640,
+ .y_res = 480,
+ .pixel_clock = 23500,
+ .hfp = 48,
+ .hsw = 32,
+ .hbp = 80,
+ .vfp = 3,
+ .vsw = 4,
+ .vbp = 7,
+};
+
+static int generic_panel_probe(struct omap_dss_device *dssdev)
+{
+ dssdev->panel.config = OMAP_DSS_LCD_TFT;
+ dssdev->panel.timings = generic_panel_timings;
+
+ return 0;
+}
+
+static void generic_panel_remove(struct omap_dss_device *dssdev)
+{
+}
+
+static int generic_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ if (dssdev->platform_enable)
+ r = dssdev->platform_enable(dssdev);
+
+ return r;
+}
+
+static void generic_panel_disable(struct omap_dss_device *dssdev)
+{
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+}
+
+static int generic_panel_suspend(struct omap_dss_device *dssdev)
+{
+ generic_panel_disable(dssdev);
+ return 0;
+}
+
+static int generic_panel_resume(struct omap_dss_device *dssdev)
+{
+ return generic_panel_enable(dssdev);
+}
+
+static struct omap_dss_driver generic_driver = {
+ .probe = generic_panel_probe,
+ .remove = generic_panel_remove,
+
+ .enable = generic_panel_enable,
+ .disable = generic_panel_disable,
+ .suspend = generic_panel_suspend,
+ .resume = generic_panel_resume,
+
+ .driver = {
+ .name = "generic_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init generic_panel_drv_init(void)
+{
+ return omap_dss_register_driver(&generic_driver);
+}
+
+static void __exit generic_panel_drv_exit(void)
+{
+ omap_dss_unregister_driver(&generic_driver);
+}
+
+module_init(generic_panel_drv_init);
+module_exit(generic_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
new file mode 100644
index 00000000000..bbe880bbe79
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -0,0 +1,153 @@
+/*
+ * LCD panel driver for Sharp LS037V7DW01
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+
+#include <plat/display.h>
+
+struct sharp_data {
+ /* XXX This regulator should actually be in SDP board file, not here,
+ * as it doesn't actually power the LCD, but something else that
+ * affects the output to LCD (I think. Somebody clarify). It doesn't do
+ * harm here, as SDP is the only board using this currently */
+ struct regulator *vdvi_reg;
+};
+
+static struct omap_video_timings sharp_ls_timings = {
+ .x_res = 480,
+ .y_res = 640,
+
+ .pixel_clock = 19200,
+
+ .hsw = 2,
+ .hfp = 1,
+ .hbp = 28,
+
+ .vsw = 1,
+ .vfp = 1,
+ .vbp = 1,
+};
+
+static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
+{
+ struct sharp_data *sd;
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS;
+ dssdev->panel.acb = 0x28;
+ dssdev->panel.timings = sharp_ls_timings;
+
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dssdev->dev, sd);
+
+ sd->vdvi_reg = regulator_get(&dssdev->dev, "vdvi");
+ if (IS_ERR(sd->vdvi_reg)) {
+ kfree(sd);
+ pr_err("failed to get VDVI regulator\n");
+ return PTR_ERR(sd->vdvi_reg);
+ }
+
+ return 0;
+}
+
+static void sharp_ls_panel_remove(struct omap_dss_device *dssdev)
+{
+ struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+
+ regulator_put(sd->vdvi_reg);
+
+ kfree(sd);
+}
+
+static int sharp_ls_panel_enable(struct omap_dss_device *dssdev)
+{
+ struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+ int r = 0;
+
+ /* wait couple of vsyncs until enabling the LCD */
+ msleep(50);
+
+ regulator_enable(sd->vdvi_reg);
+
+ if (dssdev->platform_enable)
+ r = dssdev->platform_enable(dssdev);
+
+ return r;
+}
+
+static void sharp_ls_panel_disable(struct omap_dss_device *dssdev)
+{
+ struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ regulator_disable(sd->vdvi_reg);
+
+ /* wait at least 5 vsyncs after disabling the LCD */
+
+ msleep(100);
+}
+
+static int sharp_ls_panel_suspend(struct omap_dss_device *dssdev)
+{
+ sharp_ls_panel_disable(dssdev);
+ return 0;
+}
+
+static int sharp_ls_panel_resume(struct omap_dss_device *dssdev)
+{
+ return sharp_ls_panel_enable(dssdev);
+}
+
+static struct omap_dss_driver sharp_ls_driver = {
+ .probe = sharp_ls_panel_probe,
+ .remove = sharp_ls_panel_remove,
+
+ .enable = sharp_ls_panel_enable,
+ .disable = sharp_ls_panel_disable,
+ .suspend = sharp_ls_panel_suspend,
+ .resume = sharp_ls_panel_resume,
+
+ .driver = {
+ .name = "sharp_ls_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sharp_ls_panel_drv_init(void)
+{
+ return omap_dss_register_driver(&sharp_ls_driver);
+}
+
+static void __exit sharp_ls_panel_drv_exit(void)
+{
+ omap_dss_unregister_driver(&sharp_ls_driver);
+}
+
+module_init(sharp_ls_panel_drv_init);
+module_exit(sharp_ls_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
new file mode 100644
index 00000000000..1f01dfc5e52
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -0,0 +1,1003 @@
+/*
+ * Taal DSI command mode panel
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+
+#include <plat/display.h>
+
+/* DSI Virtual channel. Hardcoded for now. */
+#define TCH 0
+
+#define DCS_READ_NUM_ERRORS 0x05
+#define DCS_READ_POWER_MODE 0x0a
+#define DCS_READ_MADCTL 0x0b
+#define DCS_READ_PIXEL_FORMAT 0x0c
+#define DCS_RDDSDR 0x0f
+#define DCS_SLEEP_IN 0x10
+#define DCS_SLEEP_OUT 0x11
+#define DCS_DISPLAY_OFF 0x28
+#define DCS_DISPLAY_ON 0x29
+#define DCS_COLUMN_ADDR 0x2a
+#define DCS_PAGE_ADDR 0x2b
+#define DCS_MEMORY_WRITE 0x2c
+#define DCS_TEAR_OFF 0x34
+#define DCS_TEAR_ON 0x35
+#define DCS_MEM_ACC_CTRL 0x36
+#define DCS_PIXEL_FORMAT 0x3a
+#define DCS_BRIGHTNESS 0x51
+#define DCS_CTRL_DISPLAY 0x53
+#define DCS_WRITE_CABC 0x55
+#define DCS_READ_CABC 0x56
+#define DCS_GET_ID1 0xda
+#define DCS_GET_ID2 0xdb
+#define DCS_GET_ID3 0xdc
+
+/* #define TAAL_USE_ESD_CHECK */
+#define TAAL_ESD_CHECK_PERIOD msecs_to_jiffies(5000)
+
+struct taal_data {
+ struct backlight_device *bldev;
+
+ unsigned long hw_guard_end; /* next value of jiffies when we can
+ * issue the next sleep in/out command
+ */
+ unsigned long hw_guard_wait; /* max guard time in jiffies */
+
+ struct omap_dss_device *dssdev;
+
+ bool enabled;
+ u8 rotate;
+ bool mirror;
+
+ bool te_enabled;
+ bool use_ext_te;
+ struct completion te_completion;
+
+ bool use_dsi_bl;
+
+ bool cabc_broken;
+ unsigned cabc_mode;
+
+ bool intro_printed;
+
+ struct workqueue_struct *esd_wq;
+ struct delayed_work esd_work;
+};
+
+static void taal_esd_work(struct work_struct *work);
+
+static void hw_guard_start(struct taal_data *td, int guard_msec)
+{
+ td->hw_guard_wait = msecs_to_jiffies(guard_msec);
+ td->hw_guard_end = jiffies + td->hw_guard_wait;
+}
+
+static void hw_guard_wait(struct taal_data *td)
+{
+ unsigned long wait = td->hw_guard_end - jiffies;
+
+ if ((long)wait > 0 && wait <= td->hw_guard_wait) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(wait);
+ }
+}
+
+static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
+{
+ int r;
+ u8 buf[1];
+
+ r = dsi_vc_dcs_read(TCH, dcs_cmd, buf, 1);
+
+ if (r < 0)
+ return r;
+
+ *data = buf[0];
+
+ return 0;
+}
+
+static int taal_dcs_write_0(u8 dcs_cmd)
+{
+ return dsi_vc_dcs_write(TCH, &dcs_cmd, 1);
+}
+
+static int taal_dcs_write_1(u8 dcs_cmd, u8 param)
+{
+ u8 buf[2];
+ buf[0] = dcs_cmd;
+ buf[1] = param;
+ return dsi_vc_dcs_write(TCH, buf, 2);
+}
+
+static int taal_sleep_in(struct taal_data *td)
+
+{
+ u8 cmd;
+ int r;
+
+ hw_guard_wait(td);
+
+ cmd = DCS_SLEEP_IN;
+ r = dsi_vc_dcs_write_nosync(TCH, &cmd, 1);
+ if (r)
+ return r;
+
+ hw_guard_start(td, 120);
+
+ msleep(5);
+
+ return 0;
+}
+
+static int taal_sleep_out(struct taal_data *td)
+{
+ int r;
+
+ hw_guard_wait(td);
+
+ r = taal_dcs_write_0(DCS_SLEEP_OUT);
+ if (r)
+ return r;
+
+ hw_guard_start(td, 120);
+
+ msleep(5);
+
+ return 0;
+}
+
+static int taal_get_id(u8 *id1, u8 *id2, u8 *id3)
+{
+ int r;
+
+ r = taal_dcs_read_1(DCS_GET_ID1, id1);
+ if (r)
+ return r;
+ r = taal_dcs_read_1(DCS_GET_ID2, id2);
+ if (r)
+ return r;
+ r = taal_dcs_read_1(DCS_GET_ID3, id3);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int taal_set_addr_mode(u8 rotate, bool mirror)
+{
+ int r;
+ u8 mode;
+ int b5, b6, b7;
+
+ r = taal_dcs_read_1(DCS_READ_MADCTL, &mode);
+ if (r)
+ return r;
+
+ switch (rotate) {
+ default:
+ case 0:
+ b7 = 0;
+ b6 = 0;
+ b5 = 0;
+ break;
+ case 1:
+ b7 = 0;
+ b6 = 1;
+ b5 = 1;
+ break;
+ case 2:
+ b7 = 1;
+ b6 = 1;
+ b5 = 0;
+ break;
+ case 3:
+ b7 = 1;
+ b6 = 0;
+ b5 = 1;
+ break;
+ }
+
+ if (mirror)
+ b6 = !b6;
+
+ mode &= ~((1<<7) | (1<<6) | (1<<5));
+ mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
+
+ return taal_dcs_write_1(DCS_MEM_ACC_CTRL, mode);
+}
+
+static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
+{
+ int r;
+ u16 x1 = x;
+ u16 x2 = x + w - 1;
+ u16 y1 = y;
+ u16 y2 = y + h - 1;
+
+ u8 buf[5];
+ buf[0] = DCS_COLUMN_ADDR;
+ buf[1] = (x1 >> 8) & 0xff;
+ buf[2] = (x1 >> 0) & 0xff;
+ buf[3] = (x2 >> 8) & 0xff;
+ buf[4] = (x2 >> 0) & 0xff;
+
+ r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+ if (r)
+ return r;
+
+ buf[0] = DCS_PAGE_ADDR;
+ buf[1] = (y1 >> 8) & 0xff;
+ buf[2] = (y1 >> 0) & 0xff;
+ buf[3] = (y2 >> 8) & 0xff;
+ buf[4] = (y2 >> 0) & 0xff;
+
+ r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
+ if (r)
+ return r;
+
+ dsi_vc_send_bta_sync(TCH);
+
+ return r;
+}
+
+static int taal_bl_update_status(struct backlight_device *dev)
+{
+ struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int r;
+ int level;
+
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ level = dev->props.brightness;
+ else
+ level = 0;
+
+ dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
+
+ if (td->use_dsi_bl) {
+ if (td->enabled) {
+ dsi_bus_lock();
+ r = taal_dcs_write_1(DCS_BRIGHTNESS, level);
+ dsi_bus_unlock();
+ if (r)
+ return r;
+ }
+ } else {
+ if (!dssdev->set_backlight)
+ return -EINVAL;
+
+ r = dssdev->set_backlight(dssdev, level);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int taal_bl_get_intensity(struct backlight_device *dev)
+{
+ if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
+ dev->props.power == FB_BLANK_UNBLANK)
+ return dev->props.brightness;
+
+ return 0;
+}
+
+static struct backlight_ops taal_bl_ops = {
+ .get_brightness = taal_bl_get_intensity,
+ .update_status = taal_bl_update_status,
+};
+
+static void taal_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+static void taal_get_resolution(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
+ if (td->rotate == 0 || td->rotate == 2) {
+ *xres = dssdev->panel.timings.x_res;
+ *yres = dssdev->panel.timings.y_res;
+ } else {
+ *yres = dssdev->panel.timings.x_res;
+ *xres = dssdev->panel.timings.y_res;
+ }
+}
+
+static irqreturn_t taal_te_isr(int irq, void *data)
+{
+ struct omap_dss_device *dssdev = data;
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
+ complete_all(&td->te_completion);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t taal_num_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ u8 errors;
+ int r;
+
+ if (td->enabled) {
+ dsi_bus_lock();
+ r = taal_dcs_read_1(DCS_READ_NUM_ERRORS, &errors);
+ dsi_bus_unlock();
+ } else {
+ r = -ENODEV;
+ }
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", errors);
+}
+
+static ssize_t taal_hw_revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ u8 id1, id2, id3;
+ int r;
+
+ if (td->enabled) {
+ dsi_bus_lock();
+ r = taal_get_id(&id1, &id2, &id3);
+ dsi_bus_unlock();
+ } else {
+ r = -ENODEV;
+ }
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
+}
+
+static const char *cabc_modes[] = {
+ "off", /* used also always when CABC is not supported */
+ "ui",
+ "still-image",
+ "moving-image",
+};
+
+static ssize_t show_cabc_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ const char *mode_str;
+ int mode;
+ int len;
+
+ mode = td->cabc_mode;
+
+ mode_str = "unknown";
+ if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
+ mode_str = cabc_modes[mode];
+ len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
+
+ return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
+}
+
+static ssize_t store_cabc_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
+ if (sysfs_streq(cabc_modes[i], buf))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cabc_modes))
+ return -EINVAL;
+
+ if (td->enabled) {
+ dsi_bus_lock();
+ if (!td->cabc_broken)
+ taal_dcs_write_1(DCS_WRITE_CABC, i);
+ dsi_bus_unlock();
+ }
+
+ td->cabc_mode = i;
+
+ return count;
+}
+
+static ssize_t show_cabc_available_modes(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len;
+ int i;
+
+ for (i = 0, len = 0;
+ len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
+ len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
+ i ? " " : "", cabc_modes[i],
+ i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
+
+ return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
+}
+
+static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL);
+static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL);
+static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
+ show_cabc_mode, store_cabc_mode);
+static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
+ show_cabc_available_modes, NULL);
+
+static struct attribute *taal_attrs[] = {
+ &dev_attr_num_dsi_errors.attr,
+ &dev_attr_hw_revision.attr,
+ &dev_attr_cabc_mode.attr,
+ &dev_attr_cabc_available_modes.attr,
+ NULL,
+};
+
+static struct attribute_group taal_attr_group = {
+ .attrs = taal_attrs,
+};
+
+static int taal_probe(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td;
+ struct backlight_device *bldev;
+ int r;
+
+ const struct omap_video_timings taal_panel_timings = {
+ .x_res = 864,
+ .y_res = 480,
+ };
+
+ dev_dbg(&dssdev->dev, "probe\n");
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT;
+ dssdev->panel.timings = taal_panel_timings;
+ dssdev->ctrl.pixel_size = 24;
+
+ td = kzalloc(sizeof(*td), GFP_KERNEL);
+ if (!td) {
+ r = -ENOMEM;
+ goto err0;
+ }
+ td->dssdev = dssdev;
+
+ td->esd_wq = create_singlethread_workqueue("taal_esd");
+ if (td->esd_wq == NULL) {
+ dev_err(&dssdev->dev, "can't create ESD workqueue\n");
+ r = -ENOMEM;
+ goto err2;
+ }
+ INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
+
+ dev_set_drvdata(&dssdev->dev, td);
+
+ dssdev->get_timings = taal_get_timings;
+ dssdev->get_resolution = taal_get_resolution;
+
+ /* if no platform set_backlight() defined, presume DSI backlight
+ * control */
+ if (!dssdev->set_backlight)
+ td->use_dsi_bl = true;
+
+ bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
+ &taal_bl_ops);
+ if (IS_ERR(bldev)) {
+ r = PTR_ERR(bldev);
+ goto err1;
+ }
+
+ td->bldev = bldev;
+
+ bldev->props.fb_blank = FB_BLANK_UNBLANK;
+ bldev->props.power = FB_BLANK_UNBLANK;
+ if (td->use_dsi_bl) {
+ bldev->props.max_brightness = 255;
+ bldev->props.brightness = 255;
+ } else {
+ bldev->props.max_brightness = 127;
+ bldev->props.brightness = 127;
+ }
+
+ taal_bl_update_status(bldev);
+
+ if (dssdev->phy.dsi.ext_te) {
+ int gpio = dssdev->phy.dsi.ext_te_gpio;
+
+ r = gpio_request(gpio, "taal irq");
+ if (r) {
+ dev_err(&dssdev->dev, "GPIO request failed\n");
+ goto err3;
+ }
+
+ gpio_direction_input(gpio);
+
+ r = request_irq(gpio_to_irq(gpio), taal_te_isr,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ "taal vsync", dssdev);
+
+ if (r) {
+ dev_err(&dssdev->dev, "IRQ request failed\n");
+ gpio_free(gpio);
+ goto err3;
+ }
+
+ init_completion(&td->te_completion);
+
+ td->use_ext_te = true;
+ }
+
+ r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to create sysfs files\n");
+ goto err4;
+ }
+
+ return 0;
+err4:
+ if (td->use_ext_te) {
+ int gpio = dssdev->phy.dsi.ext_te_gpio;
+ free_irq(gpio_to_irq(gpio), dssdev);
+ gpio_free(gpio);
+ }
+err3:
+ backlight_device_unregister(bldev);
+err2:
+ cancel_delayed_work_sync(&td->esd_work);
+ destroy_workqueue(td->esd_wq);
+err1:
+ kfree(td);
+err0:
+ return r;
+}
+
+static void taal_remove(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct backlight_device *bldev;
+
+ dev_dbg(&dssdev->dev, "remove\n");
+
+ sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group);
+
+ if (td->use_ext_te) {
+ int gpio = dssdev->phy.dsi.ext_te_gpio;
+ free_irq(gpio_to_irq(gpio), dssdev);
+ gpio_free(gpio);
+ }
+
+ bldev = td->bldev;
+ bldev->props.power = FB_BLANK_POWERDOWN;
+ taal_bl_update_status(bldev);
+ backlight_device_unregister(bldev);
+
+ cancel_delayed_work_sync(&td->esd_work);
+ destroy_workqueue(td->esd_wq);
+
+ kfree(td);
+}
+
+static int taal_enable(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ u8 id1, id2, id3;
+ int r;
+
+ dev_dbg(&dssdev->dev, "enable\n");
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ return r;
+ }
+
+ /* it seems we have to wait a bit until taal is ready */
+ msleep(5);
+
+ r = taal_sleep_out(td);
+ if (r)
+ goto err;
+
+ r = taal_get_id(&id1, &id2, &id3);
+ if (r)
+ goto err;
+
+ /* on early revisions CABC is broken */
+ if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
+ td->cabc_broken = true;
+
+ taal_dcs_write_1(DCS_BRIGHTNESS, 0xff);
+ taal_dcs_write_1(DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */
+
+ taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
+
+ taal_set_addr_mode(td->rotate, td->mirror);
+ if (!td->cabc_broken)
+ taal_dcs_write_1(DCS_WRITE_CABC, td->cabc_mode);
+
+ taal_dcs_write_0(DCS_DISPLAY_ON);
+
+#ifdef TAAL_USE_ESD_CHECK
+ queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+#endif
+
+ td->enabled = 1;
+
+ if (!td->intro_printed) {
+ dev_info(&dssdev->dev, "revision %02x.%02x.%02x\n",
+ id1, id2, id3);
+ if (td->cabc_broken)
+ dev_info(&dssdev->dev,
+ "old Taal version, CABC disabled\n");
+ td->intro_printed = true;
+ }
+
+ return 0;
+err:
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ return r;
+}
+
+static void taal_disable(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
+ dev_dbg(&dssdev->dev, "disable\n");
+
+ cancel_delayed_work(&td->esd_work);
+
+ taal_dcs_write_0(DCS_DISPLAY_OFF);
+ taal_sleep_in(td);
+
+ /* wait a bit so that the message goes through */
+ msleep(10);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ td->enabled = 0;
+}
+
+static int taal_suspend(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct backlight_device *bldev = td->bldev;
+
+ bldev->props.power = FB_BLANK_POWERDOWN;
+ taal_bl_update_status(bldev);
+
+ return 0;
+}
+
+static int taal_resume(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ struct backlight_device *bldev = td->bldev;
+
+ bldev->props.power = FB_BLANK_UNBLANK;
+ taal_bl_update_status(bldev);
+
+ return 0;
+}
+
+static void taal_setup_update(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ taal_set_update_window(x, y, w, h);
+}
+
+static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ td->te_enabled = enable;
+
+ if (enable)
+ r = taal_dcs_write_1(DCS_TEAR_ON, 0);
+ else
+ r = taal_dcs_write_0(DCS_TEAR_OFF);
+
+ return r;
+}
+
+static int taal_wait_te(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ long wait = msecs_to_jiffies(500);
+
+ if (!td->use_ext_te || !td->te_enabled)
+ return 0;
+
+ INIT_COMPLETION(td->te_completion);
+ wait = wait_for_completion_timeout(&td->te_completion, wait);
+ if (wait == 0) {
+ dev_err(&dssdev->dev, "timeout waiting TE\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
+
+ if (td->enabled) {
+ r = taal_set_addr_mode(rotate, td->mirror);
+
+ if (r)
+ return r;
+ }
+
+ td->rotate = rotate;
+
+ return 0;
+}
+
+static u8 taal_get_rotate(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ return td->rotate;
+}
+
+static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ int r;
+
+ dev_dbg(&dssdev->dev, "mirror %d\n", enable);
+
+ if (td->enabled) {
+ r = taal_set_addr_mode(td->rotate, enable);
+
+ if (r)
+ return r;
+ }
+
+ td->mirror = enable;
+
+ return 0;
+}
+
+static bool taal_get_mirror(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ return td->mirror;
+}
+
+static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
+{
+ u8 id1, id2, id3;
+ int r;
+
+ r = taal_dcs_read_1(DCS_GET_ID1, &id1);
+ if (r)
+ return r;
+ r = taal_dcs_read_1(DCS_GET_ID2, &id2);
+ if (r)
+ return r;
+ r = taal_dcs_read_1(DCS_GET_ID3, &id3);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int taal_memory_read(struct omap_dss_device *dssdev,
+ void *buf, size_t size,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ int r;
+ int first = 1;
+ int plen;
+ unsigned buf_used = 0;
+
+ if (size < w * h * 3)
+ return -ENOMEM;
+
+ size = min(w * h * 3,
+ dssdev->panel.timings.x_res *
+ dssdev->panel.timings.y_res * 3);
+
+ /* plen 1 or 2 goes into short packet. until checksum error is fixed,
+ * use short packets. plen 32 works, but bigger packets seem to cause
+ * an error. */
+ if (size % 2)
+ plen = 1;
+ else
+ plen = 2;
+
+ taal_setup_update(dssdev, x, y, w, h);
+
+ r = dsi_vc_set_max_rx_packet_size(TCH, plen);
+ if (r)
+ return r;
+
+ while (buf_used < size) {
+ u8 dcs_cmd = first ? 0x2e : 0x3e;
+ first = 0;
+
+ r = dsi_vc_dcs_read(TCH, dcs_cmd,
+ buf + buf_used, size - buf_used);
+
+ if (r < 0) {
+ dev_err(&dssdev->dev, "read error\n");
+ goto err;
+ }
+
+ buf_used += r;
+
+ if (r < plen) {
+ dev_err(&dssdev->dev, "short read\n");
+ break;
+ }
+
+ if (signal_pending(current)) {
+ dev_err(&dssdev->dev, "signal pending, "
+ "aborting memory read\n");
+ r = -ERESTARTSYS;
+ goto err;
+ }
+ }
+
+ r = buf_used;
+
+err:
+ dsi_vc_set_max_rx_packet_size(TCH, 1);
+
+ return r;
+}
+
+static void taal_esd_work(struct work_struct *work)
+{
+ struct taal_data *td = container_of(work, struct taal_data,
+ esd_work.work);
+ struct omap_dss_device *dssdev = td->dssdev;
+ u8 state1, state2;
+ int r;
+
+ if (!td->enabled)
+ return;
+
+ dsi_bus_lock();
+
+ r = taal_dcs_read_1(DCS_RDDSDR, &state1);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to read Taal status\n");
+ goto err;
+ }
+
+ /* Run self diagnostics */
+ r = taal_sleep_out(td);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to run Taal self-diagnostics\n");
+ goto err;
+ }
+
+ r = taal_dcs_read_1(DCS_RDDSDR, &state2);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to read Taal status\n");
+ goto err;
+ }
+
+ /* Each sleep out command will trigger a self diagnostic and flip
+ * Bit6 if the test passes.
+ */
+ if (!((state1 ^ state2) & (1 << 6))) {
+ dev_err(&dssdev->dev, "LCD self diagnostics failed\n");
+ goto err;
+ }
+ /* Self-diagnostics result is also shown on TE GPIO line. We need
+ * to re-enable TE after self diagnostics */
+ if (td->use_ext_te && td->te_enabled)
+ taal_enable_te(dssdev, true);
+
+ dsi_bus_unlock();
+
+ queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+
+ return;
+err:
+ dev_err(&dssdev->dev, "performing LCD reset\n");
+
+ taal_disable(dssdev);
+ taal_enable(dssdev);
+
+ dsi_bus_unlock();
+
+ queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
+}
+
+static struct omap_dss_driver taal_driver = {
+ .probe = taal_probe,
+ .remove = taal_remove,
+
+ .enable = taal_enable,
+ .disable = taal_disable,
+ .suspend = taal_suspend,
+ .resume = taal_resume,
+
+ .setup_update = taal_setup_update,
+ .enable_te = taal_enable_te,
+ .wait_for_te = taal_wait_te,
+ .set_rotate = taal_rotate,
+ .get_rotate = taal_get_rotate,
+ .set_mirror = taal_mirror,
+ .get_mirror = taal_get_mirror,
+ .run_test = taal_run_test,
+ .memory_read = taal_memory_read,
+
+ .driver = {
+ .name = "taal",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init taal_init(void)
+{
+ omap_dss_register_driver(&taal_driver);
+
+ return 0;
+}
+
+static void __exit taal_exit(void)
+{
+ omap_dss_unregister_driver(&taal_driver);
+}
+
+module_init(taal_init);
+module_exit(taal_exit);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_DESCRIPTION("Taal Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
new file mode 100644
index 00000000000..71d8dec3063
--- /dev/null
+++ b/drivers/video/omap2/dss/Kconfig
@@ -0,0 +1,89 @@
+menuconfig OMAP2_DSS
+ tristate "OMAP2/3 Display Subsystem support (EXPERIMENTAL)"
+ depends on ARCH_OMAP2 || ARCH_OMAP3
+ help
+ OMAP2/3 Display Subsystem support.
+
+if OMAP2_DSS
+
+config OMAP2_VRAM_SIZE
+ int "VRAM size (MB)"
+ range 0 32
+ default 0
+ help
+ The amount of SDRAM to reserve at boot time for video RAM use.
+ This VRAM will be used by omapfb and other drivers that need
+ large continuous RAM area for video use.
+
+ You can also set this with "vram=<bytes>" kernel argument, or
+ in the board file.
+
+config OMAP2_DSS_DEBUG_SUPPORT
+ bool "Debug support"
+ default y
+ help
+ This enables debug messages. You need to enable printing
+ with 'debug' module parameter.
+
+config OMAP2_DSS_RFBI
+ bool "RFBI support"
+ default n
+ help
+ MIPI DBI, or RFBI (Remote Framebuffer Interface), support.
+
+config OMAP2_DSS_VENC
+ bool "VENC support"
+ default y
+ help
+ OMAP Video Encoder support.
+
+config OMAP2_DSS_SDI
+ bool "SDI support"
+ depends on ARCH_OMAP3
+ default n
+ help
+ SDI (Serial Display Interface) support.
+
+config OMAP2_DSS_DSI
+ bool "DSI support"
+ depends on ARCH_OMAP3
+ default n
+ help
+ MIPI DSI support.
+
+config OMAP2_DSS_USE_DSI_PLL
+ bool "Use DSI PLL for PCLK (EXPERIMENTAL)"
+ default n
+ depends on OMAP2_DSS_DSI
+ help
+ Use DSI PLL to generate pixel clock. Currently only for DPI output.
+ DSI PLL can be used to generate higher and more precise pixel clocks.
+
+config OMAP2_DSS_FAKE_VSYNC
+ bool "Fake VSYNC irq from manual update displays"
+ default n
+ help
+ If this is selected, DSI will generate a fake DISPC VSYNC interrupt
+ when DSI has sent a frame. This is only needed with DSI or RFBI
+ displays using manual mode, and you want VSYNC to, for example,
+ time animation.
+
+config OMAP2_DSS_MIN_FCK_PER_PCK
+ int "Minimum FCK/PCK ratio (for scaling)"
+ range 0 32
+ default 0
+ help
+ This can be used to adjust the minimum FCK/PCK ratio.
+
+ With this you can make sure that DISPC FCK is at least
+ n x PCK. Video plane scaling requires higher FCK than
+ normally.
+
+ If this is set to 0, there's no extra constraint on the
+ DISPC FCK. However, the FCK will at minimum be
+ 2xPCK (if active matrix) or 3xPCK (if passive matrix).
+
+ Max FCK is 173MHz, so this doesn't work if your PCK
+ is very high.
+
+endif
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
new file mode 100644
index 00000000000..980c72c2db9
--- /dev/null
+++ b/drivers/video/omap2/dss/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_OMAP2_DSS) += omapdss.o
+omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o
+omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
+omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
+omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
+omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
new file mode 100644
index 00000000000..29497a0c9a9
--- /dev/null
+++ b/drivers/video/omap2/dss/core.c
@@ -0,0 +1,919 @@
+/*
+ * linux/drivers/video/omap2/dss/core.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "CORE"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include <plat/display.h>
+#include <plat/clock.h>
+
+#include "dss.h"
+
+static struct {
+ struct platform_device *pdev;
+ int ctx_id;
+
+ struct clk *dss_ick;
+ struct clk *dss1_fck;
+ struct clk *dss2_fck;
+ struct clk *dss_54m_fck;
+ struct clk *dss_96m_fck;
+ unsigned num_clks_enabled;
+} core;
+
+static void dss_clk_enable_all_no_ctx(void);
+static void dss_clk_disable_all_no_ctx(void);
+static void dss_clk_enable_no_ctx(enum dss_clock clks);
+static void dss_clk_disable_no_ctx(enum dss_clock clks);
+
+static char *def_disp_name;
+module_param_named(def_disp, def_disp_name, charp, 0);
+MODULE_PARM_DESC(def_disp_name, "default display name");
+
+#ifdef DEBUG
+unsigned int dss_debug;
+module_param_named(debug, dss_debug, bool, 0644);
+#endif
+
+/* CONTEXT */
+static int dss_get_ctx_id(void)
+{
+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+ int r;
+
+ if (!pdata->get_last_off_on_transaction_id)
+ return 0;
+ r = pdata->get_last_off_on_transaction_id(&core.pdev->dev);
+ if (r < 0) {
+ dev_err(&core.pdev->dev, "getting transaction ID failed, "
+ "will force context restore\n");
+ r = -1;
+ }
+ return r;
+}
+
+int dss_need_ctx_restore(void)
+{
+ int id = dss_get_ctx_id();
+
+ if (id < 0 || id != core.ctx_id) {
+ DSSDBG("ctx id %d -> id %d\n",
+ core.ctx_id, id);
+ core.ctx_id = id;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void save_all_ctx(void)
+{
+ DSSDBG("save context\n");
+
+ dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dss_save_context();
+ dispc_save_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_save_context();
+#endif
+
+ dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+static void restore_all_ctx(void)
+{
+ DSSDBG("restore context\n");
+
+ dss_clk_enable_all_no_ctx();
+
+ dss_restore_context();
+ dispc_restore_context();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_restore_context();
+#endif
+
+ dss_clk_disable_all_no_ctx();
+}
+
+/* CLOCKS */
+static void core_dump_clocks(struct seq_file *s)
+{
+ int i;
+ struct clk *clocks[5] = {
+ core.dss_ick,
+ core.dss1_fck,
+ core.dss2_fck,
+ core.dss_54m_fck,
+ core.dss_96m_fck
+ };
+
+ seq_printf(s, "- CORE -\n");
+
+ seq_printf(s, "internal clk count\t\t%u\n", core.num_clks_enabled);
+
+ for (i = 0; i < 5; i++) {
+ if (!clocks[i])
+ continue;
+ seq_printf(s, "%-15s\t%lu\t%d\n",
+ clocks[i]->name,
+ clk_get_rate(clocks[i]),
+ clocks[i]->usecount);
+ }
+}
+
+static int dss_get_clock(struct clk **clock, const char *clk_name)
+{
+ struct clk *clk;
+
+ clk = clk_get(&core.pdev->dev, clk_name);
+
+ if (IS_ERR(clk)) {
+ DSSERR("can't get clock %s", clk_name);
+ return PTR_ERR(clk);
+ }
+
+ *clock = clk;
+
+ DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
+
+ return 0;
+}
+
+static int dss_get_clocks(void)
+{
+ int r;
+
+ core.dss_ick = NULL;
+ core.dss1_fck = NULL;
+ core.dss2_fck = NULL;
+ core.dss_54m_fck = NULL;
+ core.dss_96m_fck = NULL;
+
+ r = dss_get_clock(&core.dss_ick, "ick");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&core.dss1_fck, "dss1_fck");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&core.dss2_fck, "dss2_fck");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&core.dss_54m_fck, "tv_fck");
+ if (r)
+ goto err;
+
+ r = dss_get_clock(&core.dss_96m_fck, "video_fck");
+ if (r)
+ goto err;
+
+ return 0;
+
+err:
+ if (core.dss_ick)
+ clk_put(core.dss_ick);
+ if (core.dss1_fck)
+ clk_put(core.dss1_fck);
+ if (core.dss2_fck)
+ clk_put(core.dss2_fck);
+ if (core.dss_54m_fck)
+ clk_put(core.dss_54m_fck);
+ if (core.dss_96m_fck)
+ clk_put(core.dss_96m_fck);
+
+ return r;
+}
+
+static void dss_put_clocks(void)
+{
+ if (core.dss_96m_fck)
+ clk_put(core.dss_96m_fck);
+ clk_put(core.dss_54m_fck);
+ clk_put(core.dss1_fck);
+ clk_put(core.dss2_fck);
+ clk_put(core.dss_ick);
+}
+
+unsigned long dss_clk_get_rate(enum dss_clock clk)
+{
+ switch (clk) {
+ case DSS_CLK_ICK:
+ return clk_get_rate(core.dss_ick);
+ case DSS_CLK_FCK1:
+ return clk_get_rate(core.dss1_fck);
+ case DSS_CLK_FCK2:
+ return clk_get_rate(core.dss2_fck);
+ case DSS_CLK_54M:
+ return clk_get_rate(core.dss_54m_fck);
+ case DSS_CLK_96M:
+ return clk_get_rate(core.dss_96m_fck);
+ }
+
+ BUG();
+ return 0;
+}
+
+static unsigned count_clk_bits(enum dss_clock clks)
+{
+ unsigned num_clks = 0;
+
+ if (clks & DSS_CLK_ICK)
+ ++num_clks;
+ if (clks & DSS_CLK_FCK1)
+ ++num_clks;
+ if (clks & DSS_CLK_FCK2)
+ ++num_clks;
+ if (clks & DSS_CLK_54M)
+ ++num_clks;
+ if (clks & DSS_CLK_96M)
+ ++num_clks;
+
+ return num_clks;
+}
+
+static void dss_clk_enable_no_ctx(enum dss_clock clks)
+{
+ unsigned num_clks = count_clk_bits(clks);
+
+ if (clks & DSS_CLK_ICK)
+ clk_enable(core.dss_ick);
+ if (clks & DSS_CLK_FCK1)
+ clk_enable(core.dss1_fck);
+ if (clks & DSS_CLK_FCK2)
+ clk_enable(core.dss2_fck);
+ if (clks & DSS_CLK_54M)
+ clk_enable(core.dss_54m_fck);
+ if (clks & DSS_CLK_96M)
+ clk_enable(core.dss_96m_fck);
+
+ core.num_clks_enabled += num_clks;
+}
+
+void dss_clk_enable(enum dss_clock clks)
+{
+ dss_clk_enable_no_ctx(clks);
+
+ if (cpu_is_omap34xx() && dss_need_ctx_restore())
+ restore_all_ctx();
+}
+
+static void dss_clk_disable_no_ctx(enum dss_clock clks)
+{
+ unsigned num_clks = count_clk_bits(clks);
+
+ if (clks & DSS_CLK_ICK)
+ clk_disable(core.dss_ick);
+ if (clks & DSS_CLK_FCK1)
+ clk_disable(core.dss1_fck);
+ if (clks & DSS_CLK_FCK2)
+ clk_disable(core.dss2_fck);
+ if (clks & DSS_CLK_54M)
+ clk_disable(core.dss_54m_fck);
+ if (clks & DSS_CLK_96M)
+ clk_disable(core.dss_96m_fck);
+
+ core.num_clks_enabled -= num_clks;
+}
+
+void dss_clk_disable(enum dss_clock clks)
+{
+ if (cpu_is_omap34xx()) {
+ unsigned num_clks = count_clk_bits(clks);
+
+ BUG_ON(core.num_clks_enabled < num_clks);
+
+ if (core.num_clks_enabled == num_clks)
+ save_all_ctx();
+ }
+
+ dss_clk_disable_no_ctx(clks);
+}
+
+static void dss_clk_enable_all_no_ctx(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_96M;
+ dss_clk_enable_no_ctx(clks);
+}
+
+static void dss_clk_disable_all_no_ctx(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_96M;
+ dss_clk_disable_no_ctx(clks);
+}
+
+static void dss_clk_disable_all(void)
+{
+ enum dss_clock clks;
+
+ clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
+ if (cpu_is_omap34xx())
+ clks |= DSS_CLK_96M;
+ dss_clk_disable(clks);
+}
+
+/* DEBUGFS */
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+static void dss_debug_dump_clocks(struct seq_file *s)
+{
+ core_dump_clocks(s);
+ dss_dump_clocks(s);
+ dispc_dump_clocks(s);
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_dump_clocks(s);
+#endif
+}
+
+static int dss_debug_show(struct seq_file *s, void *unused)
+{
+ void (*func)(struct seq_file *) = s->private;
+ func(s);
+ return 0;
+}
+
+static int dss_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dss_debug_show, inode->i_private);
+}
+
+static const struct file_operations dss_debug_fops = {
+ .open = dss_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *dss_debugfs_dir;
+
+static int dss_initialize_debugfs(void)
+{
+ dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
+ if (IS_ERR(dss_debugfs_dir)) {
+ int err = PTR_ERR(dss_debugfs_dir);
+ dss_debugfs_dir = NULL;
+ return err;
+ }
+
+ debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
+ &dss_debug_dump_clocks, &dss_debug_fops);
+
+ debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
+ &dss_dump_regs, &dss_debug_fops);
+ debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
+ &dispc_dump_regs, &dss_debug_fops);
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
+ &rfbi_dump_regs, &dss_debug_fops);
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+ debugfs_create_file("dsi", S_IRUGO, dss_debugfs_dir,
+ &dsi_dump_regs, &dss_debug_fops);
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
+ &venc_dump_regs, &dss_debug_fops);
+#endif
+ return 0;
+}
+
+static void dss_uninitialize_debugfs(void)
+{
+ if (dss_debugfs_dir)
+ debugfs_remove_recursive(dss_debugfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
+
+/* PLATFORM DEVICE */
+static int omap_dss_probe(struct platform_device *pdev)
+{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int skip_init = 0;
+ int r;
+ int i;
+
+ core.pdev = pdev;
+
+ dss_init_overlay_managers(pdev);
+ dss_init_overlays(pdev);
+
+ r = dss_get_clocks();
+ if (r)
+ goto fail0;
+
+ dss_clk_enable_all_no_ctx();
+
+ core.ctx_id = dss_get_ctx_id();
+ DSSDBG("initial ctx id %u\n", core.ctx_id);
+
+#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
+ /* DISPC_CONTROL */
+ if (omap_readl(0x48050440) & 1) /* LCD enabled? */
+ skip_init = 1;
+#endif
+
+ r = dss_init(skip_init);
+ if (r) {
+ DSSERR("Failed to initialize DSS\n");
+ goto fail0;
+ }
+
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ r = rfbi_init();
+ if (r) {
+ DSSERR("Failed to initialize rfbi\n");
+ goto fail0;
+ }
+#endif
+
+ r = dpi_init();
+ if (r) {
+ DSSERR("Failed to initialize dpi\n");
+ goto fail0;
+ }
+
+ r = dispc_init();
+ if (r) {
+ DSSERR("Failed to initialize dispc\n");
+ goto fail0;
+ }
+#ifdef CONFIG_OMAP2_DSS_VENC
+ r = venc_init(pdev);
+ if (r) {
+ DSSERR("Failed to initialize venc\n");
+ goto fail0;
+ }
+#endif
+ if (cpu_is_omap34xx()) {
+#ifdef CONFIG_OMAP2_DSS_SDI
+ r = sdi_init(skip_init);
+ if (r) {
+ DSSERR("Failed to initialize SDI\n");
+ goto fail0;
+ }
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+ r = dsi_init(pdev);
+ if (r) {
+ DSSERR("Failed to initialize DSI\n");
+ goto fail0;
+ }
+#endif
+ }
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+ r = dss_initialize_debugfs();
+ if (r)
+ goto fail0;
+#endif
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ r = omap_dss_register_device(dssdev);
+ if (r)
+ DSSERR("device reg failed %d\n", i);
+
+ if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
+ pdata->default_device = dssdev;
+ }
+
+ dss_clk_disable_all();
+
+ return 0;
+
+ /* XXX fail correctly */
+fail0:
+ return r;
+}
+
+static int omap_dss_remove(struct platform_device *pdev)
+{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int i;
+ int c;
+
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
+ dss_uninitialize_debugfs();
+#endif
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+ venc_exit();
+#endif
+ dispc_exit();
+ dpi_exit();
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ rfbi_exit();
+#endif
+ if (cpu_is_omap34xx()) {
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_exit();
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+ sdi_exit();
+#endif
+ }
+
+ dss_exit();
+
+ /* these should be removed at some point */
+ c = core.dss_ick->usecount;
+ if (c > 0) {
+ DSSERR("warning: dss_ick usecount %d, disabling\n", c);
+ while (c-- > 0)
+ clk_disable(core.dss_ick);
+ }
+
+ c = core.dss1_fck->usecount;
+ if (c > 0) {
+ DSSERR("warning: dss1_fck usecount %d, disabling\n", c);
+ while (c-- > 0)
+ clk_disable(core.dss1_fck);
+ }
+
+ c = core.dss2_fck->usecount;
+ if (c > 0) {
+ DSSERR("warning: dss2_fck usecount %d, disabling\n", c);
+ while (c-- > 0)
+ clk_disable(core.dss2_fck);
+ }
+
+ c = core.dss_54m_fck->usecount;
+ if (c > 0) {
+ DSSERR("warning: dss_54m_fck usecount %d, disabling\n", c);
+ while (c-- > 0)
+ clk_disable(core.dss_54m_fck);
+ }
+
+ if (core.dss_96m_fck) {
+ c = core.dss_96m_fck->usecount;
+ if (c > 0) {
+ DSSERR("warning: dss_96m_fck usecount %d, disabling\n",
+ c);
+ while (c-- > 0)
+ clk_disable(core.dss_96m_fck);
+ }
+ }
+
+ dss_put_clocks();
+
+ dss_uninit_overlays(pdev);
+ dss_uninit_overlay_managers(pdev);
+
+ for (i = 0; i < pdata->num_devices; ++i)
+ omap_dss_unregister_device(pdata->devices[i]);
+
+ return 0;
+}
+
+static void omap_dss_shutdown(struct platform_device *pdev)
+{
+ DSSDBG("shutdown\n");
+ dss_disable_all_devices();
+}
+
+static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ DSSDBG("suspend %d\n", state.event);
+
+ return dss_suspend_all_devices();
+}
+
+static int omap_dss_resume(struct platform_device *pdev)
+{
+ DSSDBG("resume\n");
+
+ return dss_resume_all_devices();
+}
+
+static struct platform_driver omap_dss_driver = {
+ .probe = omap_dss_probe,
+ .remove = omap_dss_remove,
+ .shutdown = omap_dss_shutdown,
+ .suspend = omap_dss_suspend,
+ .resume = omap_dss_resume,
+ .driver = {
+ .name = "omapdss",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* BUS */
+static int dss_bus_match(struct device *dev, struct device_driver *driver)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+
+ DSSDBG("bus_match. dev %s/%s, drv %s\n",
+ dev_name(dev), dssdev->driver_name, driver->name);
+
+ return strcmp(dssdev->driver_name, driver->name) == 0;
+}
+
+static ssize_t device_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ dssdev->name ?
+ dssdev->name : "");
+}
+
+static struct device_attribute default_dev_attrs[] = {
+ __ATTR(name, S_IRUGO, device_name_show, NULL),
+ __ATTR_NULL,
+};
+
+static ssize_t driver_name_show(struct device_driver *drv, char *buf)
+{
+ struct omap_dss_driver *dssdrv = to_dss_driver(drv);
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ dssdrv->driver.name ?
+ dssdrv->driver.name : "");
+}
+static struct driver_attribute default_drv_attrs[] = {
+ __ATTR(name, S_IRUGO, driver_name_show, NULL),
+ __ATTR_NULL,
+};
+
+static struct bus_type dss_bus_type = {
+ .name = "omapdss",
+ .match = dss_bus_match,
+ .dev_attrs = default_dev_attrs,
+ .drv_attrs = default_drv_attrs,
+};
+
+static void dss_bus_release(struct device *dev)
+{
+ DSSDBG("bus_release\n");
+}
+
+static struct device dss_bus = {
+ .release = dss_bus_release,
+};
+
+struct bus_type *dss_get_bus(void)
+{
+ return &dss_bus_type;
+}
+
+/* DRIVER */
+static int dss_driver_probe(struct device *dev)
+{
+ int r;
+ struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+ bool force;
+
+ DSSDBG("driver_probe: dev %s/%s, drv %s\n",
+ dev_name(dev), dssdev->driver_name,
+ dssdrv->driver.name);
+
+ dss_init_device(core.pdev, dssdev);
+
+ /* skip this if the device is behind a ctrl */
+ if (!dssdev->panel.ctrl) {
+ force = pdata->default_device == dssdev;
+ dss_recheck_connections(dssdev, force);
+ }
+
+ r = dssdrv->probe(dssdev);
+
+ if (r) {
+ DSSERR("driver probe failed: %d\n", r);
+ return r;
+ }
+
+ DSSDBG("probe done for device %s\n", dev_name(dev));
+
+ dssdev->driver = dssdrv;
+
+ return 0;
+}
+
+static int dss_driver_remove(struct device *dev)
+{
+ struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+
+ DSSDBG("driver_remove: dev %s/%s\n", dev_name(dev),
+ dssdev->driver_name);
+
+ dssdrv->remove(dssdev);
+
+ dss_uninit_device(core.pdev, dssdev);
+
+ dssdev->driver = NULL;
+
+ return 0;
+}
+
+int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
+{
+ dssdriver->driver.bus = &dss_bus_type;
+ dssdriver->driver.probe = dss_driver_probe;
+ dssdriver->driver.remove = dss_driver_remove;
+ return driver_register(&dssdriver->driver);
+}
+EXPORT_SYMBOL(omap_dss_register_driver);
+
+void omap_dss_unregister_driver(struct omap_dss_driver *dssdriver)
+{
+ driver_unregister(&dssdriver->driver);
+}
+EXPORT_SYMBOL(omap_dss_unregister_driver);
+
+/* DEVICE */
+static void reset_device(struct device *dev, int check)
+{
+ u8 *dev_p = (u8 *)dev;
+ u8 *dev_end = dev_p + sizeof(*dev);
+ void *saved_pdata;
+
+ saved_pdata = dev->platform_data;
+ if (check) {
+ /*
+ * Check if there is any other setting than platform_data
+ * in struct device; warn that these will be reset by our
+ * init.
+ */
+ dev->platform_data = NULL;
+ while (dev_p < dev_end) {
+ if (*dev_p) {
+ WARN("%s: struct device fields will be "
+ "discarded\n",
+ __func__);
+ break;
+ }
+ dev_p++;
+ }
+ }
+ memset(dev, 0, sizeof(*dev));
+ dev->platform_data = saved_pdata;
+}
+
+
+static void omap_dss_dev_release(struct device *dev)
+{
+ reset_device(dev, 0);
+}
+
+int omap_dss_register_device(struct omap_dss_device *dssdev)
+{
+ static int dev_num;
+ static int panel_num;
+ int r;
+
+ WARN_ON(!dssdev->driver_name);
+
+ reset_device(&dssdev->dev, 1);
+ dssdev->dev.bus = &dss_bus_type;
+ dssdev->dev.parent = &dss_bus;
+ dssdev->dev.release = omap_dss_dev_release;
+ dev_set_name(&dssdev->dev, "display%d", dev_num++);
+ r = device_register(&dssdev->dev);
+ if (r)
+ return r;
+
+ if (dssdev->ctrl.panel) {
+ struct omap_dss_device *panel = dssdev->ctrl.panel;
+
+ panel->panel.ctrl = dssdev;
+
+ reset_device(&panel->dev, 1);
+ panel->dev.bus = &dss_bus_type;
+ panel->dev.parent = &dssdev->dev;
+ panel->dev.release = omap_dss_dev_release;
+ dev_set_name(&panel->dev, "panel%d", panel_num++);
+ r = device_register(&panel->dev);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+void omap_dss_unregister_device(struct omap_dss_device *dssdev)
+{
+ device_unregister(&dssdev->dev);
+
+ if (dssdev->ctrl.panel) {
+ struct omap_dss_device *panel = dssdev->ctrl.panel;
+ device_unregister(&panel->dev);
+ }
+}
+
+/* BUS */
+static int omap_dss_bus_register(void)
+{
+ int r;
+
+ r = bus_register(&dss_bus_type);
+ if (r) {
+ DSSERR("bus register failed\n");
+ return r;
+ }
+
+ dev_set_name(&dss_bus, "omapdss");
+ r = device_register(&dss_bus);
+ if (r) {
+ DSSERR("bus driver register failed\n");
+ bus_unregister(&dss_bus_type);
+ return r;
+ }
+
+ return 0;
+}
+
+/* INIT */
+
+#ifdef CONFIG_OMAP2_DSS_MODULE
+static void omap_dss_bus_unregister(void)
+{
+ device_unregister(&dss_bus);
+
+ bus_unregister(&dss_bus_type);
+}
+
+static int __init omap_dss_init(void)
+{
+ int r;
+
+ r = omap_dss_bus_register();
+ if (r)
+ return r;
+
+ r = platform_driver_register(&omap_dss_driver);
+ if (r) {
+ omap_dss_bus_unregister();
+ return r;
+ }
+
+ return 0;
+}
+
+static void __exit omap_dss_exit(void)
+{
+ platform_driver_unregister(&omap_dss_driver);
+
+ omap_dss_bus_unregister();
+}
+
+module_init(omap_dss_init);
+module_exit(omap_dss_exit);
+#else
+static int __init omap_dss_init(void)
+{
+ return omap_dss_bus_register();
+}
+
+static int __init omap_dss_init2(void)
+{
+ return platform_driver_register(&omap_dss_driver);
+}
+
+core_initcall(omap_dss_init);
+device_initcall(omap_dss_init2);
+#endif
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
new file mode 100644
index 00000000000..6dabf4b2f00
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc.c
@@ -0,0 +1,3091 @@
+/*
+ * linux/drivers/video/omap2/dss/dispc.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DISPC"
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#include <plat/sram.h>
+#include <plat/clock.h>
+
+#include <plat/display.h>
+
+#include "dss.h"
+
+/* DISPC */
+#define DISPC_BASE 0x48050400
+
+#define DISPC_SZ_REGS SZ_1K
+
+struct dispc_reg { u16 idx; };
+
+#define DISPC_REG(idx) ((const struct dispc_reg) { idx })
+
+/* DISPC common */
+#define DISPC_REVISION DISPC_REG(0x0000)
+#define DISPC_SYSCONFIG DISPC_REG(0x0010)
+#define DISPC_SYSSTATUS DISPC_REG(0x0014)
+#define DISPC_IRQSTATUS DISPC_REG(0x0018)
+#define DISPC_IRQENABLE DISPC_REG(0x001C)
+#define DISPC_CONTROL DISPC_REG(0x0040)
+#define DISPC_CONFIG DISPC_REG(0x0044)
+#define DISPC_CAPABLE DISPC_REG(0x0048)
+#define DISPC_DEFAULT_COLOR0 DISPC_REG(0x004C)
+#define DISPC_DEFAULT_COLOR1 DISPC_REG(0x0050)
+#define DISPC_TRANS_COLOR0 DISPC_REG(0x0054)
+#define DISPC_TRANS_COLOR1 DISPC_REG(0x0058)
+#define DISPC_LINE_STATUS DISPC_REG(0x005C)
+#define DISPC_LINE_NUMBER DISPC_REG(0x0060)
+#define DISPC_TIMING_H DISPC_REG(0x0064)
+#define DISPC_TIMING_V DISPC_REG(0x0068)
+#define DISPC_POL_FREQ DISPC_REG(0x006C)
+#define DISPC_DIVISOR DISPC_REG(0x0070)
+#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
+#define DISPC_SIZE_DIG DISPC_REG(0x0078)
+#define DISPC_SIZE_LCD DISPC_REG(0x007C)
+
+/* DISPC GFX plane */
+#define DISPC_GFX_BA0 DISPC_REG(0x0080)
+#define DISPC_GFX_BA1 DISPC_REG(0x0084)
+#define DISPC_GFX_POSITION DISPC_REG(0x0088)
+#define DISPC_GFX_SIZE DISPC_REG(0x008C)
+#define DISPC_GFX_ATTRIBUTES DISPC_REG(0x00A0)
+#define DISPC_GFX_FIFO_THRESHOLD DISPC_REG(0x00A4)
+#define DISPC_GFX_FIFO_SIZE_STATUS DISPC_REG(0x00A8)
+#define DISPC_GFX_ROW_INC DISPC_REG(0x00AC)
+#define DISPC_GFX_PIXEL_INC DISPC_REG(0x00B0)
+#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4)
+#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8)
+
+#define DISPC_DATA_CYCLE1 DISPC_REG(0x01D4)
+#define DISPC_DATA_CYCLE2 DISPC_REG(0x01D8)
+#define DISPC_DATA_CYCLE3 DISPC_REG(0x01DC)
+
+#define DISPC_CPR_COEF_R DISPC_REG(0x0220)
+#define DISPC_CPR_COEF_G DISPC_REG(0x0224)
+#define DISPC_CPR_COEF_B DISPC_REG(0x0228)
+
+#define DISPC_GFX_PRELOAD DISPC_REG(0x022C)
+
+/* DISPC Video plane, n = 0 for VID1 and n = 1 for VID2 */
+#define DISPC_VID_REG(n, idx) DISPC_REG(0x00BC + (n)*0x90 + idx)
+
+#define DISPC_VID_BA0(n) DISPC_VID_REG(n, 0x0000)
+#define DISPC_VID_BA1(n) DISPC_VID_REG(n, 0x0004)
+#define DISPC_VID_POSITION(n) DISPC_VID_REG(n, 0x0008)
+#define DISPC_VID_SIZE(n) DISPC_VID_REG(n, 0x000C)
+#define DISPC_VID_ATTRIBUTES(n) DISPC_VID_REG(n, 0x0010)
+#define DISPC_VID_FIFO_THRESHOLD(n) DISPC_VID_REG(n, 0x0014)
+#define DISPC_VID_FIFO_SIZE_STATUS(n) DISPC_VID_REG(n, 0x0018)
+#define DISPC_VID_ROW_INC(n) DISPC_VID_REG(n, 0x001C)
+#define DISPC_VID_PIXEL_INC(n) DISPC_VID_REG(n, 0x0020)
+#define DISPC_VID_FIR(n) DISPC_VID_REG(n, 0x0024)
+#define DISPC_VID_PICTURE_SIZE(n) DISPC_VID_REG(n, 0x0028)
+#define DISPC_VID_ACCU0(n) DISPC_VID_REG(n, 0x002C)
+#define DISPC_VID_ACCU1(n) DISPC_VID_REG(n, 0x0030)
+
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_FIR_COEF_H(n, i) DISPC_REG(0x00F0 + (n)*0x90 + (i)*0x8)
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_FIR_COEF_HV(n, i) DISPC_REG(0x00F4 + (n)*0x90 + (i)*0x8)
+/* coef index i = {0, 1, 2, 3, 4} */
+#define DISPC_VID_CONV_COEF(n, i) DISPC_REG(0x0130 + (n)*0x90 + (i)*0x4)
+/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
+#define DISPC_VID_FIR_COEF_V(n, i) DISPC_REG(0x01E0 + (n)*0x20 + (i)*0x4)
+
+#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04)
+
+
+#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
+ DISPC_IRQ_OCP_ERR | \
+ DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
+ DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
+ DISPC_IRQ_SYNC_LOST | \
+ DISPC_IRQ_SYNC_LOST_DIGIT)
+
+#define DISPC_MAX_NR_ISRS 8
+
+struct omap_dispc_isr_data {
+ omap_dispc_isr_t isr;
+ void *arg;
+ u32 mask;
+};
+
+#define REG_GET(idx, start, end) \
+ FLD_GET(dispc_read_reg(idx), start, end)
+
+#define REG_FLD_MOD(idx, val, start, end) \
+ dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
+
+static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
+ DISPC_VID_ATTRIBUTES(0),
+ DISPC_VID_ATTRIBUTES(1) };
+
+static struct {
+ void __iomem *base;
+
+ u32 fifo_size[3];
+
+ spinlock_t irq_lock;
+ u32 irq_error_mask;
+ struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
+ u32 error_irqs;
+ struct work_struct error_work;
+
+ u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
+} dispc;
+
+static void _omap_dispc_set_irqs(void);
+
+static inline void dispc_write_reg(const struct dispc_reg idx, u32 val)
+{
+ __raw_writel(val, dispc.base + idx.idx);
+}
+
+static inline u32 dispc_read_reg(const struct dispc_reg idx)
+{
+ return __raw_readl(dispc.base + idx.idx);
+}
+
+#define SR(reg) \
+ dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
+#define RR(reg) \
+ dispc_write_reg(DISPC_##reg, dispc.ctx[(DISPC_##reg).idx / sizeof(u32)])
+
+void dispc_save_context(void)
+{
+ if (cpu_is_omap24xx())
+ return;
+
+ SR(SYSCONFIG);
+ SR(IRQENABLE);
+ SR(CONTROL);
+ SR(CONFIG);
+ SR(DEFAULT_COLOR0);
+ SR(DEFAULT_COLOR1);
+ SR(TRANS_COLOR0);
+ SR(TRANS_COLOR1);
+ SR(LINE_NUMBER);
+ SR(TIMING_H);
+ SR(TIMING_V);
+ SR(POL_FREQ);
+ SR(DIVISOR);
+ SR(GLOBAL_ALPHA);
+ SR(SIZE_DIG);
+ SR(SIZE_LCD);
+
+ SR(GFX_BA0);
+ SR(GFX_BA1);
+ SR(GFX_POSITION);
+ SR(GFX_SIZE);
+ SR(GFX_ATTRIBUTES);
+ SR(GFX_FIFO_THRESHOLD);
+ SR(GFX_ROW_INC);
+ SR(GFX_PIXEL_INC);
+ SR(GFX_WINDOW_SKIP);
+ SR(GFX_TABLE_BA);
+
+ SR(DATA_CYCLE1);
+ SR(DATA_CYCLE2);
+ SR(DATA_CYCLE3);
+
+ SR(CPR_COEF_R);
+ SR(CPR_COEF_G);
+ SR(CPR_COEF_B);
+
+ SR(GFX_PRELOAD);
+
+ /* VID1 */
+ SR(VID_BA0(0));
+ SR(VID_BA1(0));
+ SR(VID_POSITION(0));
+ SR(VID_SIZE(0));
+ SR(VID_ATTRIBUTES(0));
+ SR(VID_FIFO_THRESHOLD(0));
+ SR(VID_ROW_INC(0));
+ SR(VID_PIXEL_INC(0));
+ SR(VID_FIR(0));
+ SR(VID_PICTURE_SIZE(0));
+ SR(VID_ACCU0(0));
+ SR(VID_ACCU1(0));
+
+ SR(VID_FIR_COEF_H(0, 0));
+ SR(VID_FIR_COEF_H(0, 1));
+ SR(VID_FIR_COEF_H(0, 2));
+ SR(VID_FIR_COEF_H(0, 3));
+ SR(VID_FIR_COEF_H(0, 4));
+ SR(VID_FIR_COEF_H(0, 5));
+ SR(VID_FIR_COEF_H(0, 6));
+ SR(VID_FIR_COEF_H(0, 7));
+
+ SR(VID_FIR_COEF_HV(0, 0));
+ SR(VID_FIR_COEF_HV(0, 1));
+ SR(VID_FIR_COEF_HV(0, 2));
+ SR(VID_FIR_COEF_HV(0, 3));
+ SR(VID_FIR_COEF_HV(0, 4));
+ SR(VID_FIR_COEF_HV(0, 5));
+ SR(VID_FIR_COEF_HV(0, 6));
+ SR(VID_FIR_COEF_HV(0, 7));
+
+ SR(VID_CONV_COEF(0, 0));
+ SR(VID_CONV_COEF(0, 1));
+ SR(VID_CONV_COEF(0, 2));
+ SR(VID_CONV_COEF(0, 3));
+ SR(VID_CONV_COEF(0, 4));
+
+ SR(VID_FIR_COEF_V(0, 0));
+ SR(VID_FIR_COEF_V(0, 1));
+ SR(VID_FIR_COEF_V(0, 2));
+ SR(VID_FIR_COEF_V(0, 3));
+ SR(VID_FIR_COEF_V(0, 4));
+ SR(VID_FIR_COEF_V(0, 5));
+ SR(VID_FIR_COEF_V(0, 6));
+ SR(VID_FIR_COEF_V(0, 7));
+
+ SR(VID_PRELOAD(0));
+
+ /* VID2 */
+ SR(VID_BA0(1));
+ SR(VID_BA1(1));
+ SR(VID_POSITION(1));
+ SR(VID_SIZE(1));
+ SR(VID_ATTRIBUTES(1));
+ SR(VID_FIFO_THRESHOLD(1));
+ SR(VID_ROW_INC(1));
+ SR(VID_PIXEL_INC(1));
+ SR(VID_FIR(1));
+ SR(VID_PICTURE_SIZE(1));
+ SR(VID_ACCU0(1));
+ SR(VID_ACCU1(1));
+
+ SR(VID_FIR_COEF_H(1, 0));
+ SR(VID_FIR_COEF_H(1, 1));
+ SR(VID_FIR_COEF_H(1, 2));
+ SR(VID_FIR_COEF_H(1, 3));
+ SR(VID_FIR_COEF_H(1, 4));
+ SR(VID_FIR_COEF_H(1, 5));
+ SR(VID_FIR_COEF_H(1, 6));
+ SR(VID_FIR_COEF_H(1, 7));
+
+ SR(VID_FIR_COEF_HV(1, 0));
+ SR(VID_FIR_COEF_HV(1, 1));
+ SR(VID_FIR_COEF_HV(1, 2));
+ SR(VID_FIR_COEF_HV(1, 3));
+ SR(VID_FIR_COEF_HV(1, 4));
+ SR(VID_FIR_COEF_HV(1, 5));
+ SR(VID_FIR_COEF_HV(1, 6));
+ SR(VID_FIR_COEF_HV(1, 7));
+
+ SR(VID_CONV_COEF(1, 0));
+ SR(VID_CONV_COEF(1, 1));
+ SR(VID_CONV_COEF(1, 2));
+ SR(VID_CONV_COEF(1, 3));
+ SR(VID_CONV_COEF(1, 4));
+
+ SR(VID_FIR_COEF_V(1, 0));
+ SR(VID_FIR_COEF_V(1, 1));
+ SR(VID_FIR_COEF_V(1, 2));
+ SR(VID_FIR_COEF_V(1, 3));
+ SR(VID_FIR_COEF_V(1, 4));
+ SR(VID_FIR_COEF_V(1, 5));
+ SR(VID_FIR_COEF_V(1, 6));
+ SR(VID_FIR_COEF_V(1, 7));
+
+ SR(VID_PRELOAD(1));
+}
+
+void dispc_restore_context(void)
+{
+ RR(SYSCONFIG);
+ RR(IRQENABLE);
+ /*RR(CONTROL);*/
+ RR(CONFIG);
+ RR(DEFAULT_COLOR0);
+ RR(DEFAULT_COLOR1);
+ RR(TRANS_COLOR0);
+ RR(TRANS_COLOR1);
+ RR(LINE_NUMBER);
+ RR(TIMING_H);
+ RR(TIMING_V);
+ RR(POL_FREQ);
+ RR(DIVISOR);
+ RR(GLOBAL_ALPHA);
+ RR(SIZE_DIG);
+ RR(SIZE_LCD);
+
+ RR(GFX_BA0);
+ RR(GFX_BA1);
+ RR(GFX_POSITION);
+ RR(GFX_SIZE);
+ RR(GFX_ATTRIBUTES);
+ RR(GFX_FIFO_THRESHOLD);
+ RR(GFX_ROW_INC);
+ RR(GFX_PIXEL_INC);
+ RR(GFX_WINDOW_SKIP);
+ RR(GFX_TABLE_BA);
+
+ RR(DATA_CYCLE1);
+ RR(DATA_CYCLE2);
+ RR(DATA_CYCLE3);
+
+ RR(CPR_COEF_R);
+ RR(CPR_COEF_G);
+ RR(CPR_COEF_B);
+
+ RR(GFX_PRELOAD);
+
+ /* VID1 */
+ RR(VID_BA0(0));
+ RR(VID_BA1(0));
+ RR(VID_POSITION(0));
+ RR(VID_SIZE(0));
+ RR(VID_ATTRIBUTES(0));
+ RR(VID_FIFO_THRESHOLD(0));
+ RR(VID_ROW_INC(0));
+ RR(VID_PIXEL_INC(0));
+ RR(VID_FIR(0));
+ RR(VID_PICTURE_SIZE(0));
+ RR(VID_ACCU0(0));
+ RR(VID_ACCU1(0));
+
+ RR(VID_FIR_COEF_H(0, 0));
+ RR(VID_FIR_COEF_H(0, 1));
+ RR(VID_FIR_COEF_H(0, 2));
+ RR(VID_FIR_COEF_H(0, 3));
+ RR(VID_FIR_COEF_H(0, 4));
+ RR(VID_FIR_COEF_H(0, 5));
+ RR(VID_FIR_COEF_H(0, 6));
+ RR(VID_FIR_COEF_H(0, 7));
+
+ RR(VID_FIR_COEF_HV(0, 0));
+ RR(VID_FIR_COEF_HV(0, 1));
+ RR(VID_FIR_COEF_HV(0, 2));
+ RR(VID_FIR_COEF_HV(0, 3));
+ RR(VID_FIR_COEF_HV(0, 4));
+ RR(VID_FIR_COEF_HV(0, 5));
+ RR(VID_FIR_COEF_HV(0, 6));
+ RR(VID_FIR_COEF_HV(0, 7));
+
+ RR(VID_CONV_COEF(0, 0));
+ RR(VID_CONV_COEF(0, 1));
+ RR(VID_CONV_COEF(0, 2));
+ RR(VID_CONV_COEF(0, 3));
+ RR(VID_CONV_COEF(0, 4));
+
+ RR(VID_FIR_COEF_V(0, 0));
+ RR(VID_FIR_COEF_V(0, 1));
+ RR(VID_FIR_COEF_V(0, 2));
+ RR(VID_FIR_COEF_V(0, 3));
+ RR(VID_FIR_COEF_V(0, 4));
+ RR(VID_FIR_COEF_V(0, 5));
+ RR(VID_FIR_COEF_V(0, 6));
+ RR(VID_FIR_COEF_V(0, 7));
+
+ RR(VID_PRELOAD(0));
+
+ /* VID2 */
+ RR(VID_BA0(1));
+ RR(VID_BA1(1));
+ RR(VID_POSITION(1));
+ RR(VID_SIZE(1));
+ RR(VID_ATTRIBUTES(1));
+ RR(VID_FIFO_THRESHOLD(1));
+ RR(VID_ROW_INC(1));
+ RR(VID_PIXEL_INC(1));
+ RR(VID_FIR(1));
+ RR(VID_PICTURE_SIZE(1));
+ RR(VID_ACCU0(1));
+ RR(VID_ACCU1(1));
+
+ RR(VID_FIR_COEF_H(1, 0));
+ RR(VID_FIR_COEF_H(1, 1));
+ RR(VID_FIR_COEF_H(1, 2));
+ RR(VID_FIR_COEF_H(1, 3));
+ RR(VID_FIR_COEF_H(1, 4));
+ RR(VID_FIR_COEF_H(1, 5));
+ RR(VID_FIR_COEF_H(1, 6));
+ RR(VID_FIR_COEF_H(1, 7));
+
+ RR(VID_FIR_COEF_HV(1, 0));
+ RR(VID_FIR_COEF_HV(1, 1));
+ RR(VID_FIR_COEF_HV(1, 2));
+ RR(VID_FIR_COEF_HV(1, 3));
+ RR(VID_FIR_COEF_HV(1, 4));
+ RR(VID_FIR_COEF_HV(1, 5));
+ RR(VID_FIR_COEF_HV(1, 6));
+ RR(VID_FIR_COEF_HV(1, 7));
+
+ RR(VID_CONV_COEF(1, 0));
+ RR(VID_CONV_COEF(1, 1));
+ RR(VID_CONV_COEF(1, 2));
+ RR(VID_CONV_COEF(1, 3));
+ RR(VID_CONV_COEF(1, 4));
+
+ RR(VID_FIR_COEF_V(1, 0));
+ RR(VID_FIR_COEF_V(1, 1));
+ RR(VID_FIR_COEF_V(1, 2));
+ RR(VID_FIR_COEF_V(1, 3));
+ RR(VID_FIR_COEF_V(1, 4));
+ RR(VID_FIR_COEF_V(1, 5));
+ RR(VID_FIR_COEF_V(1, 6));
+ RR(VID_FIR_COEF_V(1, 7));
+
+ RR(VID_PRELOAD(1));
+
+ /* enable last, because LCD & DIGIT enable are here */
+ RR(CONTROL);
+}
+
+#undef SR
+#undef RR
+
+static inline void enable_clocks(bool enable)
+{
+ if (enable)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ else
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+bool dispc_go_busy(enum omap_channel channel)
+{
+ int bit;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ bit = 5; /* GOLCD */
+ else
+ bit = 6; /* GODIGIT */
+
+ return REG_GET(DISPC_CONTROL, bit, bit) == 1;
+}
+
+void dispc_go(enum omap_channel channel)
+{
+ int bit;
+
+ enable_clocks(1);
+
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ bit = 0; /* LCDENABLE */
+ else
+ bit = 1; /* DIGITALENABLE */
+
+ /* if the channel is not enabled, we don't need GO */
+ if (REG_GET(DISPC_CONTROL, bit, bit) == 0)
+ goto end;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ bit = 5; /* GOLCD */
+ else
+ bit = 6; /* GODIGIT */
+
+ if (REG_GET(DISPC_CONTROL, bit, bit) == 1) {
+ DSSERR("GO bit not down for channel %d\n", channel);
+ goto end;
+ }
+
+ DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT");
+
+ REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
+end:
+ enable_clocks(0);
+}
+
+static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
+{
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value);
+}
+
+static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
+{
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value);
+}
+
+static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value)
+{
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value);
+}
+
+static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
+ int vscaleup, int five_taps)
+{
+ /* Coefficients for horizontal up-sampling */
+ static const u32 coef_hup[8] = {
+ 0x00800000,
+ 0x0D7CF800,
+ 0x1E70F5FF,
+ 0x335FF5FE,
+ 0xF74949F7,
+ 0xF55F33FB,
+ 0xF5701EFE,
+ 0xF87C0DFF,
+ };
+
+ /* Coefficients for horizontal down-sampling */
+ static const u32 coef_hdown[8] = {
+ 0x24382400,
+ 0x28371FFE,
+ 0x2C361BFB,
+ 0x303516F9,
+ 0x11343311,
+ 0x1635300C,
+ 0x1B362C08,
+ 0x1F372804,
+ };
+
+ /* Coefficients for horizontal and vertical up-sampling */
+ static const u32 coef_hvup[2][8] = {
+ {
+ 0x00800000,
+ 0x037B02FF,
+ 0x0C6F05FE,
+ 0x205907FB,
+ 0x00404000,
+ 0x075920FE,
+ 0x056F0CFF,
+ 0x027B0300,
+ },
+ {
+ 0x00800000,
+ 0x0D7CF8FF,
+ 0x1E70F5FE,
+ 0x335FF5FB,
+ 0xF7404000,
+ 0xF55F33FE,
+ 0xF5701EFF,
+ 0xF87C0D00,
+ },
+ };
+
+ /* Coefficients for horizontal and vertical down-sampling */
+ static const u32 coef_hvdown[2][8] = {
+ {
+ 0x24382400,
+ 0x28391F04,
+ 0x2D381B08,
+ 0x3237170C,
+ 0x123737F7,
+ 0x173732F9,
+ 0x1B382DFB,
+ 0x1F3928FE,
+ },
+ {
+ 0x24382400,
+ 0x28371F04,
+ 0x2C361B08,
+ 0x3035160C,
+ 0x113433F7,
+ 0x163530F9,
+ 0x1B362CFB,
+ 0x1F3728FE,
+ },
+ };
+
+ /* Coefficients for vertical up-sampling */
+ static const u32 coef_vup[8] = {
+ 0x00000000,
+ 0x0000FF00,
+ 0x0000FEFF,
+ 0x0000FBFE,
+ 0x000000F7,
+ 0x0000FEFB,
+ 0x0000FFFE,
+ 0x000000FF,
+ };
+
+
+ /* Coefficients for vertical down-sampling */
+ static const u32 coef_vdown[8] = {
+ 0x00000000,
+ 0x000004FE,
+ 0x000008FB,
+ 0x00000CF9,
+ 0x0000F711,
+ 0x0000F90C,
+ 0x0000FB08,
+ 0x0000FE04,
+ };
+
+ const u32 *h_coef;
+ const u32 *hv_coef;
+ const u32 *hv_coef_mod;
+ const u32 *v_coef;
+ int i;
+
+ if (hscaleup)
+ h_coef = coef_hup;
+ else
+ h_coef = coef_hdown;
+
+ if (vscaleup) {
+ hv_coef = coef_hvup[five_taps];
+ v_coef = coef_vup;
+
+ if (hscaleup)
+ hv_coef_mod = NULL;
+ else
+ hv_coef_mod = coef_hvdown[five_taps];
+ } else {
+ hv_coef = coef_hvdown[five_taps];
+ v_coef = coef_vdown;
+
+ if (hscaleup)
+ hv_coef_mod = coef_hvup[five_taps];
+ else
+ hv_coef_mod = NULL;
+ }
+
+ for (i = 0; i < 8; i++) {
+ u32 h, hv;
+
+ h = h_coef[i];
+
+ hv = hv_coef[i];
+
+ if (hv_coef_mod) {
+ hv &= 0xffffff00;
+ hv |= (hv_coef_mod[i] & 0xff);
+ }
+
+ _dispc_write_firh_reg(plane, i, h);
+ _dispc_write_firhv_reg(plane, i, hv);
+ }
+
+ if (!five_taps)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ u32 v;
+ v = v_coef[i];
+ _dispc_write_firv_reg(plane, i, v);
+ }
+}
+
+static void _dispc_setup_color_conv_coef(void)
+{
+ const struct color_conv_coef {
+ int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
+ int full_range;
+ } ctbl_bt601_5 = {
+ 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
+ };
+
+ const struct color_conv_coef *ct;
+
+#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
+
+ ct = &ctbl_bt601_5;
+
+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 0), CVAL(ct->rcr, ct->ry));
+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 1), CVAL(ct->gy, ct->rcb));
+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 2), CVAL(ct->gcb, ct->gcr));
+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 3), CVAL(ct->bcr, ct->by));
+ dispc_write_reg(DISPC_VID_CONV_COEF(0, 4), CVAL(0, ct->bcb));
+
+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 0), CVAL(ct->rcr, ct->ry));
+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 1), CVAL(ct->gy, ct->rcb));
+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 2), CVAL(ct->gcb, ct->gcr));
+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 3), CVAL(ct->bcr, ct->by));
+ dispc_write_reg(DISPC_VID_CONV_COEF(1, 4), CVAL(0, ct->bcb));
+
+#undef CVAL
+
+ REG_FLD_MOD(DISPC_VID_ATTRIBUTES(0), ct->full_range, 11, 11);
+ REG_FLD_MOD(DISPC_VID_ATTRIBUTES(1), ct->full_range, 11, 11);
+}
+
+
+static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr)
+{
+ const struct dispc_reg ba0_reg[] = { DISPC_GFX_BA0,
+ DISPC_VID_BA0(0),
+ DISPC_VID_BA0(1) };
+
+ dispc_write_reg(ba0_reg[plane], paddr);
+}
+
+static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr)
+{
+ const struct dispc_reg ba1_reg[] = { DISPC_GFX_BA1,
+ DISPC_VID_BA1(0),
+ DISPC_VID_BA1(1) };
+
+ dispc_write_reg(ba1_reg[plane], paddr);
+}
+
+static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y)
+{
+ const struct dispc_reg pos_reg[] = { DISPC_GFX_POSITION,
+ DISPC_VID_POSITION(0),
+ DISPC_VID_POSITION(1) };
+
+ u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
+ dispc_write_reg(pos_reg[plane], val);
+}
+
+static void _dispc_set_pic_size(enum omap_plane plane, int width, int height)
+{
+ const struct dispc_reg siz_reg[] = { DISPC_GFX_SIZE,
+ DISPC_VID_PICTURE_SIZE(0),
+ DISPC_VID_PICTURE_SIZE(1) };
+ u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+ dispc_write_reg(siz_reg[plane], val);
+}
+
+static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
+{
+ u32 val;
+ const struct dispc_reg vsi_reg[] = { DISPC_VID_SIZE(0),
+ DISPC_VID_SIZE(1) };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+ dispc_write_reg(vsi_reg[plane-1], val);
+}
+
+static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
+{
+
+ BUG_ON(plane == OMAP_DSS_VIDEO1);
+
+ if (cpu_is_omap24xx())
+ return;
+
+ if (plane == OMAP_DSS_GFX)
+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
+ else if (plane == OMAP_DSS_VIDEO2)
+ REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 23, 16);
+}
+
+static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc)
+{
+ const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC,
+ DISPC_VID_PIXEL_INC(0),
+ DISPC_VID_PIXEL_INC(1) };
+
+ dispc_write_reg(ri_reg[plane], inc);
+}
+
+static void _dispc_set_row_inc(enum omap_plane plane, s32 inc)
+{
+ const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC,
+ DISPC_VID_ROW_INC(0),
+ DISPC_VID_ROW_INC(1) };
+
+ dispc_write_reg(ri_reg[plane], inc);
+}
+
+static void _dispc_set_color_mode(enum omap_plane plane,
+ enum omap_color_mode color_mode)
+{
+ u32 m = 0;
+
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_CLUT1:
+ m = 0x0; break;
+ case OMAP_DSS_COLOR_CLUT2:
+ m = 0x1; break;
+ case OMAP_DSS_COLOR_CLUT4:
+ m = 0x2; break;
+ case OMAP_DSS_COLOR_CLUT8:
+ m = 0x3; break;
+ case OMAP_DSS_COLOR_RGB12U:
+ m = 0x4; break;
+ case OMAP_DSS_COLOR_ARGB16:
+ m = 0x5; break;
+ case OMAP_DSS_COLOR_RGB16:
+ m = 0x6; break;
+ case OMAP_DSS_COLOR_RGB24U:
+ m = 0x8; break;
+ case OMAP_DSS_COLOR_RGB24P:
+ m = 0x9; break;
+ case OMAP_DSS_COLOR_YUV2:
+ m = 0xa; break;
+ case OMAP_DSS_COLOR_UYVY:
+ m = 0xb; break;
+ case OMAP_DSS_COLOR_ARGB32:
+ m = 0xc; break;
+ case OMAP_DSS_COLOR_RGBA32:
+ m = 0xd; break;
+ case OMAP_DSS_COLOR_RGBX32:
+ m = 0xe; break;
+ default:
+ BUG(); break;
+ }
+
+ REG_FLD_MOD(dispc_reg_att[plane], m, 4, 1);
+}
+
+static void _dispc_set_channel_out(enum omap_plane plane,
+ enum omap_channel channel)
+{
+ int shift;
+ u32 val;
+
+ switch (plane) {
+ case OMAP_DSS_GFX:
+ shift = 8;
+ break;
+ case OMAP_DSS_VIDEO1:
+ case OMAP_DSS_VIDEO2:
+ shift = 16;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ val = dispc_read_reg(dispc_reg_att[plane]);
+ val = FLD_MOD(val, channel, shift, shift);
+ dispc_write_reg(dispc_reg_att[plane], val);
+}
+
+void dispc_set_burst_size(enum omap_plane plane,
+ enum omap_burst_size burst_size)
+{
+ int shift;
+ u32 val;
+
+ enable_clocks(1);
+
+ switch (plane) {
+ case OMAP_DSS_GFX:
+ shift = 6;
+ break;
+ case OMAP_DSS_VIDEO1:
+ case OMAP_DSS_VIDEO2:
+ shift = 14;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ val = dispc_read_reg(dispc_reg_att[plane]);
+ val = FLD_MOD(val, burst_size, shift+1, shift);
+ dispc_write_reg(dispc_reg_att[plane], val);
+
+ enable_clocks(0);
+}
+
+static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
+{
+ u32 val;
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ val = dispc_read_reg(dispc_reg_att[plane]);
+ val = FLD_MOD(val, enable, 9, 9);
+ dispc_write_reg(dispc_reg_att[plane], val);
+}
+
+void dispc_enable_replication(enum omap_plane plane, bool enable)
+{
+ int bit;
+
+ if (plane == OMAP_DSS_GFX)
+ bit = 5;
+ else
+ bit = 10;
+
+ enable_clocks(1);
+ REG_FLD_MOD(dispc_reg_att[plane], enable, bit, bit);
+ enable_clocks(0);
+}
+
+void dispc_set_lcd_size(u16 width, u16 height)
+{
+ u32 val;
+ BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
+ val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+ enable_clocks(1);
+ dispc_write_reg(DISPC_SIZE_LCD, val);
+ enable_clocks(0);
+}
+
+void dispc_set_digit_size(u16 width, u16 height)
+{
+ u32 val;
+ BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
+ val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
+ enable_clocks(1);
+ dispc_write_reg(DISPC_SIZE_DIG, val);
+ enable_clocks(0);
+}
+
+static void dispc_read_plane_fifo_sizes(void)
+{
+ const struct dispc_reg fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
+ DISPC_VID_FIFO_SIZE_STATUS(0),
+ DISPC_VID_FIFO_SIZE_STATUS(1) };
+ u32 size;
+ int plane;
+
+ enable_clocks(1);
+
+ for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
+ if (cpu_is_omap24xx())
+ size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 8, 0);
+ else if (cpu_is_omap34xx())
+ size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 10, 0);
+ else
+ BUG();
+
+ dispc.fifo_size[plane] = size;
+ }
+
+ enable_clocks(0);
+}
+
+u32 dispc_get_plane_fifo_size(enum omap_plane plane)
+{
+ return dispc.fifo_size[plane];
+}
+
+void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
+{
+ const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
+ DISPC_VID_FIFO_THRESHOLD(0),
+ DISPC_VID_FIFO_THRESHOLD(1) };
+ enable_clocks(1);
+
+ DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
+ plane,
+ REG_GET(ftrs_reg[plane], 11, 0),
+ REG_GET(ftrs_reg[plane], 27, 16),
+ low, high);
+
+ if (cpu_is_omap24xx())
+ dispc_write_reg(ftrs_reg[plane],
+ FLD_VAL(high, 24, 16) | FLD_VAL(low, 8, 0));
+ else
+ dispc_write_reg(ftrs_reg[plane],
+ FLD_VAL(high, 27, 16) | FLD_VAL(low, 11, 0));
+
+ enable_clocks(0);
+}
+
+void dispc_enable_fifomerge(bool enable)
+{
+ enable_clocks(1);
+
+ DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
+ REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
+
+ enable_clocks(0);
+}
+
+static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc)
+{
+ u32 val;
+ const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0),
+ DISPC_VID_FIR(1) };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ if (cpu_is_omap24xx())
+ val = FLD_VAL(vinc, 27, 16) | FLD_VAL(hinc, 11, 0);
+ else
+ val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
+ dispc_write_reg(fir_reg[plane-1], val);
+}
+
+static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
+{
+ u32 val;
+ const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0),
+ DISPC_VID_ACCU0(1) };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ dispc_write_reg(ac0_reg[plane-1], val);
+}
+
+static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
+{
+ u32 val;
+ const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0),
+ DISPC_VID_ACCU1(1) };
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
+ dispc_write_reg(ac1_reg[plane-1], val);
+}
+
+
+static void _dispc_set_scaling(enum omap_plane plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, bool five_taps,
+ bool fieldmode)
+{
+ int fir_hinc;
+ int fir_vinc;
+ int hscaleup, vscaleup;
+ int accu0 = 0;
+ int accu1 = 0;
+ u32 l;
+
+ BUG_ON(plane == OMAP_DSS_GFX);
+
+ hscaleup = orig_width <= out_width;
+ vscaleup = orig_height <= out_height;
+
+ _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps);
+
+ if (!orig_width || orig_width == out_width)
+ fir_hinc = 0;
+ else
+ fir_hinc = 1024 * orig_width / out_width;
+
+ if (!orig_height || orig_height == out_height)
+ fir_vinc = 0;
+ else
+ fir_vinc = 1024 * orig_height / out_height;
+
+ _dispc_set_fir(plane, fir_hinc, fir_vinc);
+
+ l = dispc_read_reg(dispc_reg_att[plane]);
+ l &= ~((0x0f << 5) | (0x3 << 21));
+
+ l |= fir_hinc ? (1 << 5) : 0;
+ l |= fir_vinc ? (1 << 6) : 0;
+
+ l |= hscaleup ? 0 : (1 << 7);
+ l |= vscaleup ? 0 : (1 << 8);
+
+ l |= five_taps ? (1 << 21) : 0;
+ l |= five_taps ? (1 << 22) : 0;
+
+ dispc_write_reg(dispc_reg_att[plane], l);
+
+ /*
+ * field 0 = even field = bottom field
+ * field 1 = odd field = top field
+ */
+ if (ilace && !fieldmode) {
+ accu1 = 0;
+ accu0 = (fir_vinc / 2) & 0x3ff;
+ if (accu0 >= 1024/2) {
+ accu1 = 1024/2;
+ accu0 -= accu1;
+ }
+ }
+
+ _dispc_set_vid_accu0(plane, 0, accu0);
+ _dispc_set_vid_accu1(plane, 0, accu1);
+}
+
+static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
+ bool mirroring, enum omap_color_mode color_mode)
+{
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY) {
+ int vidrot = 0;
+
+ if (mirroring) {
+ switch (rotation) {
+ case OMAP_DSS_ROT_0:
+ vidrot = 2;
+ break;
+ case OMAP_DSS_ROT_90:
+ vidrot = 1;
+ break;
+ case OMAP_DSS_ROT_180:
+ vidrot = 0;
+ break;
+ case OMAP_DSS_ROT_270:
+ vidrot = 3;
+ break;
+ }
+ } else {
+ switch (rotation) {
+ case OMAP_DSS_ROT_0:
+ vidrot = 0;
+ break;
+ case OMAP_DSS_ROT_90:
+ vidrot = 1;
+ break;
+ case OMAP_DSS_ROT_180:
+ vidrot = 2;
+ break;
+ case OMAP_DSS_ROT_270:
+ vidrot = 3;
+ break;
+ }
+ }
+
+ REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12);
+
+ if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270)
+ REG_FLD_MOD(dispc_reg_att[plane], 0x1, 18, 18);
+ else
+ REG_FLD_MOD(dispc_reg_att[plane], 0x0, 18, 18);
+ } else {
+ REG_FLD_MOD(dispc_reg_att[plane], 0, 13, 12);
+ REG_FLD_MOD(dispc_reg_att[plane], 0, 18, 18);
+ }
+}
+
+static int color_mode_to_bpp(enum omap_color_mode color_mode)
+{
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_CLUT1:
+ return 1;
+ case OMAP_DSS_COLOR_CLUT2:
+ return 2;
+ case OMAP_DSS_COLOR_CLUT4:
+ return 4;
+ case OMAP_DSS_COLOR_CLUT8:
+ return 8;
+ case OMAP_DSS_COLOR_RGB12U:
+ case OMAP_DSS_COLOR_RGB16:
+ case OMAP_DSS_COLOR_ARGB16:
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ return 16;
+ case OMAP_DSS_COLOR_RGB24P:
+ return 24;
+ case OMAP_DSS_COLOR_RGB24U:
+ case OMAP_DSS_COLOR_ARGB32:
+ case OMAP_DSS_COLOR_RGBA32:
+ case OMAP_DSS_COLOR_RGBX32:
+ return 32;
+ default:
+ BUG();
+ }
+}
+
+static s32 pixinc(int pixels, u8 ps)
+{
+ if (pixels == 1)
+ return 1;
+ else if (pixels > 1)
+ return 1 + (pixels - 1) * ps;
+ else if (pixels < 0)
+ return 1 - (-pixels + 1) * ps;
+ else
+ BUG();
+}
+
+static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
+ u16 screen_width,
+ u16 width, u16 height,
+ enum omap_color_mode color_mode, bool fieldmode,
+ unsigned int field_offset,
+ unsigned *offset0, unsigned *offset1,
+ s32 *row_inc, s32 *pix_inc)
+{
+ u8 ps;
+
+ /* FIXME CLUT formats */
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_CLUT1:
+ case OMAP_DSS_COLOR_CLUT2:
+ case OMAP_DSS_COLOR_CLUT4:
+ case OMAP_DSS_COLOR_CLUT8:
+ BUG();
+ return;
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ ps = 4;
+ break;
+ default:
+ ps = color_mode_to_bpp(color_mode) / 8;
+ break;
+ }
+
+ DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
+ width, height);
+
+ /*
+ * field 0 = even field = bottom field
+ * field 1 = odd field = top field
+ */
+ switch (rotation + mirror * 4) {
+ case OMAP_DSS_ROT_0:
+ case OMAP_DSS_ROT_180:
+ /*
+ * If the pixel format is YUV or UYVY divide the width
+ * of the image by 2 for 0 and 180 degree rotation.
+ */
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ width = width >> 1;
+ case OMAP_DSS_ROT_90:
+ case OMAP_DSS_ROT_270:
+ *offset1 = 0;
+ if (field_offset)
+ *offset0 = field_offset * screen_width * ps;
+ else
+ *offset0 = 0;
+
+ *row_inc = pixinc(1 + (screen_width - width) +
+ (fieldmode ? screen_width : 0),
+ ps);
+ *pix_inc = pixinc(1, ps);
+ break;
+
+ case OMAP_DSS_ROT_0 + 4:
+ case OMAP_DSS_ROT_180 + 4:
+ /* If the pixel format is YUV or UYVY divide the width
+ * of the image by 2 for 0 degree and 180 degree
+ */
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ width = width >> 1;
+ case OMAP_DSS_ROT_90 + 4:
+ case OMAP_DSS_ROT_270 + 4:
+ *offset1 = 0;
+ if (field_offset)
+ *offset0 = field_offset * screen_width * ps;
+ else
+ *offset0 = 0;
+ *row_inc = pixinc(1 - (screen_width + width) -
+ (fieldmode ? screen_width : 0),
+ ps);
+ *pix_inc = pixinc(1, ps);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static void calc_dma_rotation_offset(u8 rotation, bool mirror,
+ u16 screen_width,
+ u16 width, u16 height,
+ enum omap_color_mode color_mode, bool fieldmode,
+ unsigned int field_offset,
+ unsigned *offset0, unsigned *offset1,
+ s32 *row_inc, s32 *pix_inc)
+{
+ u8 ps;
+ u16 fbw, fbh;
+
+ /* FIXME CLUT formats */
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_CLUT1:
+ case OMAP_DSS_COLOR_CLUT2:
+ case OMAP_DSS_COLOR_CLUT4:
+ case OMAP_DSS_COLOR_CLUT8:
+ BUG();
+ return;
+ default:
+ ps = color_mode_to_bpp(color_mode) / 8;
+ break;
+ }
+
+ DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
+ width, height);
+
+ /* width & height are overlay sizes, convert to fb sizes */
+
+ if (rotation == OMAP_DSS_ROT_0 || rotation == OMAP_DSS_ROT_180) {
+ fbw = width;
+ fbh = height;
+ } else {
+ fbw = height;
+ fbh = width;
+ }
+
+ /*
+ * field 0 = even field = bottom field
+ * field 1 = odd field = top field
+ */
+ switch (rotation + mirror * 4) {
+ case OMAP_DSS_ROT_0:
+ *offset1 = 0;
+ if (field_offset)
+ *offset0 = *offset1 + field_offset * screen_width * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(1 + (screen_width - fbw) +
+ (fieldmode ? screen_width : 0),
+ ps);
+ *pix_inc = pixinc(1, ps);
+ break;
+ case OMAP_DSS_ROT_90:
+ *offset1 = screen_width * (fbh - 1) * ps;
+ if (field_offset)
+ *offset0 = *offset1 + field_offset * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(screen_width * (fbh - 1) + 1 +
+ (fieldmode ? 1 : 0), ps);
+ *pix_inc = pixinc(-screen_width, ps);
+ break;
+ case OMAP_DSS_ROT_180:
+ *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
+ if (field_offset)
+ *offset0 = *offset1 - field_offset * screen_width * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(-1 -
+ (screen_width - fbw) -
+ (fieldmode ? screen_width : 0),
+ ps);
+ *pix_inc = pixinc(-1, ps);
+ break;
+ case OMAP_DSS_ROT_270:
+ *offset1 = (fbw - 1) * ps;
+ if (field_offset)
+ *offset0 = *offset1 - field_offset * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
+ (fieldmode ? 1 : 0), ps);
+ *pix_inc = pixinc(screen_width, ps);
+ break;
+
+ /* mirroring */
+ case OMAP_DSS_ROT_0 + 4:
+ *offset1 = (fbw - 1) * ps;
+ if (field_offset)
+ *offset0 = *offset1 + field_offset * screen_width * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(screen_width * 2 - 1 +
+ (fieldmode ? screen_width : 0),
+ ps);
+ *pix_inc = pixinc(-1, ps);
+ break;
+
+ case OMAP_DSS_ROT_90 + 4:
+ *offset1 = 0;
+ if (field_offset)
+ *offset0 = *offset1 + field_offset * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
+ (fieldmode ? 1 : 0),
+ ps);
+ *pix_inc = pixinc(screen_width, ps);
+ break;
+
+ case OMAP_DSS_ROT_180 + 4:
+ *offset1 = screen_width * (fbh - 1) * ps;
+ if (field_offset)
+ *offset0 = *offset1 - field_offset * screen_width * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(1 - screen_width * 2 -
+ (fieldmode ? screen_width : 0),
+ ps);
+ *pix_inc = pixinc(1, ps);
+ break;
+
+ case OMAP_DSS_ROT_270 + 4:
+ *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
+ if (field_offset)
+ *offset0 = *offset1 - field_offset * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(screen_width * (fbh - 1) - 1 -
+ (fieldmode ? 1 : 0),
+ ps);
+ *pix_inc = pixinc(-screen_width, ps);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static unsigned long calc_fclk_five_taps(u16 width, u16 height,
+ u16 out_width, u16 out_height, enum omap_color_mode color_mode)
+{
+ u32 fclk = 0;
+ /* FIXME venc pclk? */
+ u64 tmp, pclk = dispc_pclk_rate();
+
+ if (height > out_height) {
+ /* FIXME get real display PPL */
+ unsigned int ppl = 800;
+
+ tmp = pclk * height * out_width;
+ do_div(tmp, 2 * out_height * ppl);
+ fclk = tmp;
+
+ if (height > 2 * out_height && ppl != out_width) {
+ tmp = pclk * (height - 2 * out_height) * out_width;
+ do_div(tmp, 2 * out_height * (ppl - out_width));
+ fclk = max(fclk, (u32) tmp);
+ }
+ }
+
+ if (width > out_width) {
+ tmp = pclk * width;
+ do_div(tmp, out_width);
+ fclk = max(fclk, (u32) tmp);
+
+ if (color_mode == OMAP_DSS_COLOR_RGB24U)
+ fclk <<= 1;
+ }
+
+ return fclk;
+}
+
+static unsigned long calc_fclk(u16 width, u16 height,
+ u16 out_width, u16 out_height)
+{
+ unsigned int hf, vf;
+
+ /*
+ * FIXME how to determine the 'A' factor
+ * for the no downscaling case ?
+ */
+
+ if (width > 3 * out_width)
+ hf = 4;
+ else if (width > 2 * out_width)
+ hf = 3;
+ else if (width > out_width)
+ hf = 2;
+ else
+ hf = 1;
+
+ if (height > out_height)
+ vf = 2;
+ else
+ vf = 1;
+
+ /* FIXME venc pclk? */
+ return dispc_pclk_rate() * vf * hf;
+}
+
+void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
+{
+ enable_clocks(1);
+ _dispc_set_channel_out(plane, channel_out);
+ enable_clocks(0);
+}
+
+static int _dispc_setup_plane(enum omap_plane plane,
+ u32 paddr, u16 screen_width,
+ u16 pos_x, u16 pos_y,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ enum omap_color_mode color_mode,
+ bool ilace,
+ enum omap_dss_rotation_type rotation_type,
+ u8 rotation, int mirror,
+ u8 global_alpha)
+{
+ const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
+ bool five_taps = 0;
+ bool fieldmode = 0;
+ int cconv = 0;
+ unsigned offset0, offset1;
+ s32 row_inc;
+ s32 pix_inc;
+ u16 frame_height = height;
+ unsigned int field_offset = 0;
+
+ if (paddr == 0)
+ return -EINVAL;
+
+ if (ilace && height == out_height)
+ fieldmode = 1;
+
+ if (ilace) {
+ if (fieldmode)
+ height /= 2;
+ pos_y /= 2;
+ out_height /= 2;
+
+ DSSDBG("adjusting for ilace: height %d, pos_y %d, "
+ "out_height %d\n",
+ height, pos_y, out_height);
+ }
+
+ if (plane == OMAP_DSS_GFX) {
+ if (width != out_width || height != out_height)
+ return -EINVAL;
+
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_ARGB16:
+ case OMAP_DSS_COLOR_ARGB32:
+ case OMAP_DSS_COLOR_RGBA32:
+ case OMAP_DSS_COLOR_RGBX32:
+ if (cpu_is_omap24xx())
+ return -EINVAL;
+ /* fall through */
+ case OMAP_DSS_COLOR_RGB12U:
+ case OMAP_DSS_COLOR_RGB16:
+ case OMAP_DSS_COLOR_RGB24P:
+ case OMAP_DSS_COLOR_RGB24U:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else {
+ /* video plane */
+
+ unsigned long fclk = 0;
+
+ if (out_width < width / maxdownscale ||
+ out_width > width * 8)
+ return -EINVAL;
+
+ if (out_height < height / maxdownscale ||
+ out_height > height * 8)
+ return -EINVAL;
+
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_RGBX32:
+ case OMAP_DSS_COLOR_RGB12U:
+ if (cpu_is_omap24xx())
+ return -EINVAL;
+ /* fall through */
+ case OMAP_DSS_COLOR_RGB16:
+ case OMAP_DSS_COLOR_RGB24P:
+ case OMAP_DSS_COLOR_RGB24U:
+ break;
+
+ case OMAP_DSS_COLOR_ARGB16:
+ case OMAP_DSS_COLOR_ARGB32:
+ case OMAP_DSS_COLOR_RGBA32:
+ if (cpu_is_omap24xx())
+ return -EINVAL;
+ if (plane == OMAP_DSS_VIDEO1)
+ return -EINVAL;
+ break;
+
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ cconv = 1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Must use 5-tap filter? */
+ five_taps = height > out_height * 2;
+
+ if (!five_taps) {
+ fclk = calc_fclk(width, height,
+ out_width, out_height);
+
+ /* Try 5-tap filter if 3-tap fclk is too high */
+ if (cpu_is_omap34xx() && height > out_height &&
+ fclk > dispc_fclk_rate())
+ five_taps = true;
+ }
+
+ if (width > (2048 >> five_taps)) {
+ DSSERR("failed to set up scaling, fclk too low\n");
+ return -EINVAL;
+ }
+
+ if (five_taps)
+ fclk = calc_fclk_five_taps(width, height,
+ out_width, out_height, color_mode);
+
+ DSSDBG("required fclk rate = %lu Hz\n", fclk);
+ DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
+
+ if (fclk > dispc_fclk_rate()) {
+ DSSERR("failed to set up scaling, "
+ "required fclk rate = %lu Hz, "
+ "current fclk rate = %lu Hz\n",
+ fclk, dispc_fclk_rate());
+ return -EINVAL;
+ }
+ }
+
+ if (ilace && !fieldmode) {
+ /*
+ * when downscaling the bottom field may have to start several
+ * source lines below the top field. Unfortunately ACCUI
+ * registers will only hold the fractional part of the offset
+ * so the integer part must be added to the base address of the
+ * bottom field.
+ */
+ if (!height || height == out_height)
+ field_offset = 0;
+ else
+ field_offset = height / out_height / 2;
+ }
+
+ /* Fields are independent but interleaved in memory. */
+ if (fieldmode)
+ field_offset = 1;
+
+ if (rotation_type == OMAP_DSS_ROT_DMA)
+ calc_dma_rotation_offset(rotation, mirror,
+ screen_width, width, frame_height, color_mode,
+ fieldmode, field_offset,
+ &offset0, &offset1, &row_inc, &pix_inc);
+ else
+ calc_vrfb_rotation_offset(rotation, mirror,
+ screen_width, width, frame_height, color_mode,
+ fieldmode, field_offset,
+ &offset0, &offset1, &row_inc, &pix_inc);
+
+ DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
+ offset0, offset1, row_inc, pix_inc);
+
+ _dispc_set_color_mode(plane, color_mode);
+
+ _dispc_set_plane_ba0(plane, paddr + offset0);
+ _dispc_set_plane_ba1(plane, paddr + offset1);
+
+ _dispc_set_row_inc(plane, row_inc);
+ _dispc_set_pix_inc(plane, pix_inc);
+
+ DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, width, height,
+ out_width, out_height);
+
+ _dispc_set_plane_pos(plane, pos_x, pos_y);
+
+ _dispc_set_pic_size(plane, width, height);
+
+ if (plane != OMAP_DSS_GFX) {
+ _dispc_set_scaling(plane, width, height,
+ out_width, out_height,
+ ilace, five_taps, fieldmode);
+ _dispc_set_vid_size(plane, out_width, out_height);
+ _dispc_set_vid_color_conv(plane, cconv);
+ }
+
+ _dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
+
+ if (plane != OMAP_DSS_VIDEO1)
+ _dispc_setup_global_alpha(plane, global_alpha);
+
+ return 0;
+}
+
+static void _dispc_enable_plane(enum omap_plane plane, bool enable)
+{
+ REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 0, 0);
+}
+
+static void dispc_disable_isr(void *data, u32 mask)
+{
+ struct completion *compl = data;
+ complete(compl);
+}
+
+static void _enable_lcd_out(bool enable)
+{
+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+}
+
+void dispc_enable_lcd_out(bool enable)
+{
+ struct completion frame_done_completion;
+ bool is_on;
+ int r;
+
+ enable_clocks(1);
+
+ /* When we disable LCD output, we need to wait until frame is done.
+ * Otherwise the DSS is still working, and turning off the clocks
+ * prevents DSS from going to OFF mode */
+ is_on = REG_GET(DISPC_CONTROL, 0, 0);
+
+ if (!enable && is_on) {
+ init_completion(&frame_done_completion);
+
+ r = omap_dispc_register_isr(dispc_disable_isr,
+ &frame_done_completion,
+ DISPC_IRQ_FRAMEDONE);
+
+ if (r)
+ DSSERR("failed to register FRAMEDONE isr\n");
+ }
+
+ _enable_lcd_out(enable);
+
+ if (!enable && is_on) {
+ if (!wait_for_completion_timeout(&frame_done_completion,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for FRAME DONE\n");
+
+ r = omap_dispc_unregister_isr(dispc_disable_isr,
+ &frame_done_completion,
+ DISPC_IRQ_FRAMEDONE);
+
+ if (r)
+ DSSERR("failed to unregister FRAMEDONE isr\n");
+ }
+
+ enable_clocks(0);
+}
+
+static void _enable_digit_out(bool enable)
+{
+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
+}
+
+void dispc_enable_digit_out(bool enable)
+{
+ struct completion frame_done_completion;
+ int r;
+
+ enable_clocks(1);
+
+ if (REG_GET(DISPC_CONTROL, 1, 1) == enable) {
+ enable_clocks(0);
+ return;
+ }
+
+ if (enable) {
+ unsigned long flags;
+ /* When we enable digit output, we'll get an extra digit
+ * sync lost interrupt, that we need to ignore */
+ spin_lock_irqsave(&dispc.irq_lock, flags);
+ dispc.irq_error_mask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+ _omap_dispc_set_irqs();
+ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+ }
+
+ /* When we disable digit output, we need to wait until fields are done.
+ * Otherwise the DSS is still working, and turning off the clocks
+ * prevents DSS from going to OFF mode. And when enabling, we need to
+ * wait for the extra sync losts */
+ init_completion(&frame_done_completion);
+
+ r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion,
+ DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
+ if (r)
+ DSSERR("failed to register EVSYNC isr\n");
+
+ _enable_digit_out(enable);
+
+ /* XXX I understand from TRM that we should only wait for the
+ * current field to complete. But it seems we have to wait
+ * for both fields */
+ if (!wait_for_completion_timeout(&frame_done_completion,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for EVSYNC\n");
+
+ if (!wait_for_completion_timeout(&frame_done_completion,
+ msecs_to_jiffies(100)))
+ DSSERR("timeout waiting for EVSYNC\n");
+
+ r = omap_dispc_unregister_isr(dispc_disable_isr,
+ &frame_done_completion,
+ DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
+ if (r)
+ DSSERR("failed to unregister EVSYNC isr\n");
+
+ if (enable) {
+ unsigned long flags;
+ spin_lock_irqsave(&dispc.irq_lock, flags);
+ dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+ dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
+ _omap_dispc_set_irqs();
+ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+ }
+
+ enable_clocks(0);
+}
+
+void dispc_lcd_enable_signal_polarity(bool act_high)
+{
+ enable_clocks(1);
+ REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
+ enable_clocks(0);
+}
+
+void dispc_lcd_enable_signal(bool enable)
+{
+ enable_clocks(1);
+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
+ enable_clocks(0);
+}
+
+void dispc_pck_free_enable(bool enable)
+{
+ enable_clocks(1);
+ REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
+ enable_clocks(0);
+}
+
+void dispc_enable_fifohandcheck(bool enable)
+{
+ enable_clocks(1);
+ REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
+ enable_clocks(0);
+}
+
+
+void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
+{
+ int mode;
+
+ switch (type) {
+ case OMAP_DSS_LCD_DISPLAY_STN:
+ mode = 0;
+ break;
+
+ case OMAP_DSS_LCD_DISPLAY_TFT:
+ mode = 1;
+ break;
+
+ default:
+ BUG();
+ return;
+ }
+
+ enable_clocks(1);
+ REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
+ enable_clocks(0);
+}
+
+void dispc_set_loadmode(enum omap_dss_load_mode mode)
+{
+ enable_clocks(1);
+ REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
+ enable_clocks(0);
+}
+
+
+void dispc_set_default_color(enum omap_channel channel, u32 color)
+{
+ const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
+ DISPC_DEFAULT_COLOR1 };
+
+ enable_clocks(1);
+ dispc_write_reg(def_reg[channel], color);
+ enable_clocks(0);
+}
+
+u32 dispc_get_default_color(enum omap_channel channel)
+{
+ const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
+ DISPC_DEFAULT_COLOR1 };
+ u32 l;
+
+ BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
+ channel != OMAP_DSS_CHANNEL_LCD);
+
+ enable_clocks(1);
+ l = dispc_read_reg(def_reg[channel]);
+ enable_clocks(0);
+
+ return l;
+}
+
+void dispc_set_trans_key(enum omap_channel ch,
+ enum omap_dss_trans_key_type type,
+ u32 trans_key)
+{
+ const struct dispc_reg tr_reg[] = {
+ DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
+
+ enable_clocks(1);
+ if (ch == OMAP_DSS_CHANNEL_LCD)
+ REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
+ else /* OMAP_DSS_CHANNEL_DIGIT */
+ REG_FLD_MOD(DISPC_CONFIG, type, 13, 13);
+
+ dispc_write_reg(tr_reg[ch], trans_key);
+ enable_clocks(0);
+}
+
+void dispc_get_trans_key(enum omap_channel ch,
+ enum omap_dss_trans_key_type *type,
+ u32 *trans_key)
+{
+ const struct dispc_reg tr_reg[] = {
+ DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
+
+ enable_clocks(1);
+ if (type) {
+ if (ch == OMAP_DSS_CHANNEL_LCD)
+ *type = REG_GET(DISPC_CONFIG, 11, 11);
+ else if (ch == OMAP_DSS_CHANNEL_DIGIT)
+ *type = REG_GET(DISPC_CONFIG, 13, 13);
+ else
+ BUG();
+ }
+
+ if (trans_key)
+ *trans_key = dispc_read_reg(tr_reg[ch]);
+ enable_clocks(0);
+}
+
+void dispc_enable_trans_key(enum omap_channel ch, bool enable)
+{
+ enable_clocks(1);
+ if (ch == OMAP_DSS_CHANNEL_LCD)
+ REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
+ else /* OMAP_DSS_CHANNEL_DIGIT */
+ REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
+ enable_clocks(0);
+}
+void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
+{
+ if (cpu_is_omap24xx())
+ return;
+
+ enable_clocks(1);
+ if (ch == OMAP_DSS_CHANNEL_LCD)
+ REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
+ else /* OMAP_DSS_CHANNEL_DIGIT */
+ REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
+ enable_clocks(0);
+}
+bool dispc_alpha_blending_enabled(enum omap_channel ch)
+{
+ bool enabled;
+
+ if (cpu_is_omap24xx())
+ return false;
+
+ enable_clocks(1);
+ if (ch == OMAP_DSS_CHANNEL_LCD)
+ enabled = REG_GET(DISPC_CONFIG, 18, 18);
+ else if (ch == OMAP_DSS_CHANNEL_DIGIT)
+ enabled = REG_GET(DISPC_CONFIG, 18, 18);
+ else
+ BUG();
+ enable_clocks(0);
+
+ return enabled;
+
+}
+
+
+bool dispc_trans_key_enabled(enum omap_channel ch)
+{
+ bool enabled;
+
+ enable_clocks(1);
+ if (ch == OMAP_DSS_CHANNEL_LCD)
+ enabled = REG_GET(DISPC_CONFIG, 10, 10);
+ else if (ch == OMAP_DSS_CHANNEL_DIGIT)
+ enabled = REG_GET(DISPC_CONFIG, 12, 12);
+ else
+ BUG();
+ enable_clocks(0);
+
+ return enabled;
+}
+
+
+void dispc_set_tft_data_lines(u8 data_lines)
+{
+ int code;
+
+ switch (data_lines) {
+ case 12:
+ code = 0;
+ break;
+ case 16:
+ code = 1;
+ break;
+ case 18:
+ code = 2;
+ break;
+ case 24:
+ code = 3;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ enable_clocks(1);
+ REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
+ enable_clocks(0);
+}
+
+void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
+{
+ u32 l;
+ int stallmode;
+ int gpout0 = 1;
+ int gpout1;
+
+ switch (mode) {
+ case OMAP_DSS_PARALLELMODE_BYPASS:
+ stallmode = 0;
+ gpout1 = 1;
+ break;
+
+ case OMAP_DSS_PARALLELMODE_RFBI:
+ stallmode = 1;
+ gpout1 = 0;
+ break;
+
+ case OMAP_DSS_PARALLELMODE_DSI:
+ stallmode = 1;
+ gpout1 = 1;
+ break;
+
+ default:
+ BUG();
+ return;
+ }
+
+ enable_clocks(1);
+
+ l = dispc_read_reg(DISPC_CONTROL);
+
+ l = FLD_MOD(l, stallmode, 11, 11);
+ l = FLD_MOD(l, gpout0, 15, 15);
+ l = FLD_MOD(l, gpout1, 16, 16);
+
+ dispc_write_reg(DISPC_CONTROL, l);
+
+ enable_clocks(0);
+}
+
+static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
+ int vsw, int vfp, int vbp)
+{
+ if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
+ if (hsw < 1 || hsw > 64 ||
+ hfp < 1 || hfp > 256 ||
+ hbp < 1 || hbp > 256 ||
+ vsw < 1 || vsw > 64 ||
+ vfp < 0 || vfp > 255 ||
+ vbp < 0 || vbp > 255)
+ return false;
+ } else {
+ if (hsw < 1 || hsw > 256 ||
+ hfp < 1 || hfp > 4096 ||
+ hbp < 1 || hbp > 4096 ||
+ vsw < 1 || vsw > 256 ||
+ vfp < 0 || vfp > 4095 ||
+ vbp < 0 || vbp > 4095)
+ return false;
+ }
+
+ return true;
+}
+
+bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
+{
+ return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
+ timings->hbp, timings->vsw,
+ timings->vfp, timings->vbp);
+}
+
+static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp,
+ int vsw, int vfp, int vbp)
+{
+ u32 timing_h, timing_v;
+
+ if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
+ timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) |
+ FLD_VAL(hbp-1, 27, 20);
+
+ timing_v = FLD_VAL(vsw-1, 5, 0) | FLD_VAL(vfp, 15, 8) |
+ FLD_VAL(vbp, 27, 20);
+ } else {
+ timing_h = FLD_VAL(hsw-1, 7, 0) | FLD_VAL(hfp-1, 19, 8) |
+ FLD_VAL(hbp-1, 31, 20);
+
+ timing_v = FLD_VAL(vsw-1, 7, 0) | FLD_VAL(vfp, 19, 8) |
+ FLD_VAL(vbp, 31, 20);
+ }
+
+ enable_clocks(1);
+ dispc_write_reg(DISPC_TIMING_H, timing_h);
+ dispc_write_reg(DISPC_TIMING_V, timing_v);
+ enable_clocks(0);
+}
+
+/* change name to mode? */
+void dispc_set_lcd_timings(struct omap_video_timings *timings)
+{
+ unsigned xtot, ytot;
+ unsigned long ht, vt;
+
+ if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
+ timings->hbp, timings->vsw,
+ timings->vfp, timings->vbp))
+ BUG();
+
+ _dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp,
+ timings->vsw, timings->vfp, timings->vbp);
+
+ dispc_set_lcd_size(timings->x_res, timings->y_res);
+
+ xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
+ ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
+
+ ht = (timings->pixel_clock * 1000) / xtot;
+ vt = (timings->pixel_clock * 1000) / xtot / ytot;
+
+ DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res);
+ DSSDBG("pck %u\n", timings->pixel_clock);
+ DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+ timings->hsw, timings->hfp, timings->hbp,
+ timings->vsw, timings->vfp, timings->vbp);
+
+ DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+}
+
+static void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div)
+{
+ BUG_ON(lck_div < 1);
+ BUG_ON(pck_div < 2);
+
+ enable_clocks(1);
+ dispc_write_reg(DISPC_DIVISOR,
+ FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
+ enable_clocks(0);
+}
+
+static void dispc_get_lcd_divisor(int *lck_div, int *pck_div)
+{
+ u32 l;
+ l = dispc_read_reg(DISPC_DIVISOR);
+ *lck_div = FLD_GET(l, 23, 16);
+ *pck_div = FLD_GET(l, 7, 0);
+}
+
+unsigned long dispc_fclk_rate(void)
+{
+ unsigned long r = 0;
+
+ if (dss_get_dispc_clk_source() == 0)
+ r = dss_clk_get_rate(DSS_CLK_FCK1);
+ else
+#ifdef CONFIG_OMAP2_DSS_DSI
+ r = dsi_get_dsi1_pll_rate();
+#else
+ BUG();
+#endif
+ return r;
+}
+
+unsigned long dispc_lclk_rate(void)
+{
+ int lcd;
+ unsigned long r;
+ u32 l;
+
+ l = dispc_read_reg(DISPC_DIVISOR);
+
+ lcd = FLD_GET(l, 23, 16);
+
+ r = dispc_fclk_rate();
+
+ return r / lcd;
+}
+
+unsigned long dispc_pclk_rate(void)
+{
+ int lcd, pcd;
+ unsigned long r;
+ u32 l;
+
+ l = dispc_read_reg(DISPC_DIVISOR);
+
+ lcd = FLD_GET(l, 23, 16);
+ pcd = FLD_GET(l, 7, 0);
+
+ r = dispc_fclk_rate();
+
+ return r / lcd / pcd;
+}
+
+void dispc_dump_clocks(struct seq_file *s)
+{
+ int lcd, pcd;
+
+ enable_clocks(1);
+
+ dispc_get_lcd_divisor(&lcd, &pcd);
+
+ seq_printf(s, "- DISPC -\n");
+
+ seq_printf(s, "dispc fclk source = %s\n",
+ dss_get_dispc_clk_source() == 0 ?
+ "dss1_alwon_fclk" : "dsi1_pll_fclk");
+
+ seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
+ seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(), lcd);
+ seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(), pcd);
+
+ enable_clocks(0);
+}
+
+void dispc_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ DUMPREG(DISPC_REVISION);
+ DUMPREG(DISPC_SYSCONFIG);
+ DUMPREG(DISPC_SYSSTATUS);
+ DUMPREG(DISPC_IRQSTATUS);
+ DUMPREG(DISPC_IRQENABLE);
+ DUMPREG(DISPC_CONTROL);
+ DUMPREG(DISPC_CONFIG);
+ DUMPREG(DISPC_CAPABLE);
+ DUMPREG(DISPC_DEFAULT_COLOR0);
+ DUMPREG(DISPC_DEFAULT_COLOR1);
+ DUMPREG(DISPC_TRANS_COLOR0);
+ DUMPREG(DISPC_TRANS_COLOR1);
+ DUMPREG(DISPC_LINE_STATUS);
+ DUMPREG(DISPC_LINE_NUMBER);
+ DUMPREG(DISPC_TIMING_H);
+ DUMPREG(DISPC_TIMING_V);
+ DUMPREG(DISPC_POL_FREQ);
+ DUMPREG(DISPC_DIVISOR);
+ DUMPREG(DISPC_GLOBAL_ALPHA);
+ DUMPREG(DISPC_SIZE_DIG);
+ DUMPREG(DISPC_SIZE_LCD);
+
+ DUMPREG(DISPC_GFX_BA0);
+ DUMPREG(DISPC_GFX_BA1);
+ DUMPREG(DISPC_GFX_POSITION);
+ DUMPREG(DISPC_GFX_SIZE);
+ DUMPREG(DISPC_GFX_ATTRIBUTES);
+ DUMPREG(DISPC_GFX_FIFO_THRESHOLD);
+ DUMPREG(DISPC_GFX_FIFO_SIZE_STATUS);
+ DUMPREG(DISPC_GFX_ROW_INC);
+ DUMPREG(DISPC_GFX_PIXEL_INC);
+ DUMPREG(DISPC_GFX_WINDOW_SKIP);
+ DUMPREG(DISPC_GFX_TABLE_BA);
+
+ DUMPREG(DISPC_DATA_CYCLE1);
+ DUMPREG(DISPC_DATA_CYCLE2);
+ DUMPREG(DISPC_DATA_CYCLE3);
+
+ DUMPREG(DISPC_CPR_COEF_R);
+ DUMPREG(DISPC_CPR_COEF_G);
+ DUMPREG(DISPC_CPR_COEF_B);
+
+ DUMPREG(DISPC_GFX_PRELOAD);
+
+ DUMPREG(DISPC_VID_BA0(0));
+ DUMPREG(DISPC_VID_BA1(0));
+ DUMPREG(DISPC_VID_POSITION(0));
+ DUMPREG(DISPC_VID_SIZE(0));
+ DUMPREG(DISPC_VID_ATTRIBUTES(0));
+ DUMPREG(DISPC_VID_FIFO_THRESHOLD(0));
+ DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(0));
+ DUMPREG(DISPC_VID_ROW_INC(0));
+ DUMPREG(DISPC_VID_PIXEL_INC(0));
+ DUMPREG(DISPC_VID_FIR(0));
+ DUMPREG(DISPC_VID_PICTURE_SIZE(0));
+ DUMPREG(DISPC_VID_ACCU0(0));
+ DUMPREG(DISPC_VID_ACCU1(0));
+
+ DUMPREG(DISPC_VID_BA0(1));
+ DUMPREG(DISPC_VID_BA1(1));
+ DUMPREG(DISPC_VID_POSITION(1));
+ DUMPREG(DISPC_VID_SIZE(1));
+ DUMPREG(DISPC_VID_ATTRIBUTES(1));
+ DUMPREG(DISPC_VID_FIFO_THRESHOLD(1));
+ DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(1));
+ DUMPREG(DISPC_VID_ROW_INC(1));
+ DUMPREG(DISPC_VID_PIXEL_INC(1));
+ DUMPREG(DISPC_VID_FIR(1));
+ DUMPREG(DISPC_VID_PICTURE_SIZE(1));
+ DUMPREG(DISPC_VID_ACCU0(1));
+ DUMPREG(DISPC_VID_ACCU1(1));
+
+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 0));
+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 1));
+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 2));
+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 3));
+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 4));
+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 5));
+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 6));
+ DUMPREG(DISPC_VID_FIR_COEF_H(0, 7));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 0));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 1));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 2));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 3));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 4));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 5));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 6));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(0, 7));
+ DUMPREG(DISPC_VID_CONV_COEF(0, 0));
+ DUMPREG(DISPC_VID_CONV_COEF(0, 1));
+ DUMPREG(DISPC_VID_CONV_COEF(0, 2));
+ DUMPREG(DISPC_VID_CONV_COEF(0, 3));
+ DUMPREG(DISPC_VID_CONV_COEF(0, 4));
+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 0));
+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 1));
+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 2));
+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 3));
+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 4));
+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 5));
+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 6));
+ DUMPREG(DISPC_VID_FIR_COEF_V(0, 7));
+
+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 0));
+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 1));
+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 2));
+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 3));
+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 4));
+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 5));
+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 6));
+ DUMPREG(DISPC_VID_FIR_COEF_H(1, 7));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 0));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 1));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 2));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 3));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 4));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 5));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 6));
+ DUMPREG(DISPC_VID_FIR_COEF_HV(1, 7));
+ DUMPREG(DISPC_VID_CONV_COEF(1, 0));
+ DUMPREG(DISPC_VID_CONV_COEF(1, 1));
+ DUMPREG(DISPC_VID_CONV_COEF(1, 2));
+ DUMPREG(DISPC_VID_CONV_COEF(1, 3));
+ DUMPREG(DISPC_VID_CONV_COEF(1, 4));
+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 0));
+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 1));
+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 2));
+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 3));
+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 4));
+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 5));
+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 6));
+ DUMPREG(DISPC_VID_FIR_COEF_V(1, 7));
+
+ DUMPREG(DISPC_VID_PRELOAD(0));
+ DUMPREG(DISPC_VID_PRELOAD(1));
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+#undef DUMPREG
+}
+
+static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
+ bool ihs, bool ivs, u8 acbi, u8 acb)
+{
+ u32 l = 0;
+
+ DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n",
+ onoff, rf, ieo, ipc, ihs, ivs, acbi, acb);
+
+ l |= FLD_VAL(onoff, 17, 17);
+ l |= FLD_VAL(rf, 16, 16);
+ l |= FLD_VAL(ieo, 15, 15);
+ l |= FLD_VAL(ipc, 14, 14);
+ l |= FLD_VAL(ihs, 13, 13);
+ l |= FLD_VAL(ivs, 12, 12);
+ l |= FLD_VAL(acbi, 11, 8);
+ l |= FLD_VAL(acb, 7, 0);
+
+ enable_clocks(1);
+ dispc_write_reg(DISPC_POL_FREQ, l);
+ enable_clocks(0);
+}
+
+void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb)
+{
+ _dispc_set_pol_freq((config & OMAP_DSS_LCD_ONOFF) != 0,
+ (config & OMAP_DSS_LCD_RF) != 0,
+ (config & OMAP_DSS_LCD_IEO) != 0,
+ (config & OMAP_DSS_LCD_IPC) != 0,
+ (config & OMAP_DSS_LCD_IHS) != 0,
+ (config & OMAP_DSS_LCD_IVS) != 0,
+ acbi, acb);
+}
+
+/* with fck as input clock rate, find dispc dividers that produce req_pck */
+void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
+ struct dispc_clock_info *cinfo)
+{
+ u16 pcd_min = is_tft ? 2 : 3;
+ unsigned long best_pck;
+ u16 best_ld, cur_ld;
+ u16 best_pd, cur_pd;
+
+ best_pck = 0;
+ best_ld = 0;
+ best_pd = 0;
+
+ for (cur_ld = 1; cur_ld <= 255; ++cur_ld) {
+ unsigned long lck = fck / cur_ld;
+
+ for (cur_pd = pcd_min; cur_pd <= 255; ++cur_pd) {
+ unsigned long pck = lck / cur_pd;
+ long old_delta = abs(best_pck - req_pck);
+ long new_delta = abs(pck - req_pck);
+
+ if (best_pck == 0 || new_delta < old_delta) {
+ best_pck = pck;
+ best_ld = cur_ld;
+ best_pd = cur_pd;
+
+ if (pck == req_pck)
+ goto found;
+ }
+
+ if (pck < req_pck)
+ break;
+ }
+
+ if (lck / pcd_min < req_pck)
+ break;
+ }
+
+found:
+ cinfo->lck_div = best_ld;
+ cinfo->pck_div = best_pd;
+ cinfo->lck = fck / cinfo->lck_div;
+ cinfo->pck = cinfo->lck / cinfo->pck_div;
+}
+
+/* calculate clock rates using dividers in cinfo */
+int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
+ struct dispc_clock_info *cinfo)
+{
+ if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
+ return -EINVAL;
+ if (cinfo->pck_div < 2 || cinfo->pck_div > 255)
+ return -EINVAL;
+
+ cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
+ cinfo->pck = cinfo->lck / cinfo->pck_div;
+
+ return 0;
+}
+
+int dispc_set_clock_div(struct dispc_clock_info *cinfo)
+{
+ DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
+ DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
+
+ dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div);
+
+ return 0;
+}
+
+int dispc_get_clock_div(struct dispc_clock_info *cinfo)
+{
+ unsigned long fck;
+
+ fck = dispc_fclk_rate();
+
+ cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16);
+ cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0);
+
+ cinfo->lck = fck / cinfo->lck_div;
+ cinfo->pck = cinfo->lck / cinfo->pck_div;
+
+ return 0;
+}
+
+/* dispc.irq_lock has to be locked by the caller */
+static void _omap_dispc_set_irqs(void)
+{
+ u32 mask;
+ u32 old_mask;
+ int i;
+ struct omap_dispc_isr_data *isr_data;
+
+ mask = dispc.irq_error_mask;
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc.registered_isr[i];
+
+ if (isr_data->isr == NULL)
+ continue;
+
+ mask |= isr_data->mask;
+ }
+
+ enable_clocks(1);
+
+ old_mask = dispc_read_reg(DISPC_IRQENABLE);
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
+
+ dispc_write_reg(DISPC_IRQENABLE, mask);
+
+ enable_clocks(0);
+}
+
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ struct omap_dispc_isr_data *isr_data;
+
+ if (isr == NULL)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dispc.irq_lock, flags);
+
+ /* check for duplicate entry */
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc.registered_isr[i];
+ if (isr_data->isr == isr && isr_data->arg == arg &&
+ isr_data->mask == mask) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ isr_data = NULL;
+ ret = -EBUSY;
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc.registered_isr[i];
+
+ if (isr_data->isr != NULL)
+ continue;
+
+ isr_data->isr = isr;
+ isr_data->arg = arg;
+ isr_data->mask = mask;
+ ret = 0;
+
+ break;
+ }
+
+ _omap_dispc_set_irqs();
+
+ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+ return 0;
+err:
+ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(omap_dispc_register_isr);
+
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
+{
+ int i;
+ unsigned long flags;
+ int ret = -EINVAL;
+ struct omap_dispc_isr_data *isr_data;
+
+ spin_lock_irqsave(&dispc.irq_lock, flags);
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &dispc.registered_isr[i];
+ if (isr_data->isr != isr || isr_data->arg != arg ||
+ isr_data->mask != mask)
+ continue;
+
+ /* found the correct isr */
+
+ isr_data->isr = NULL;
+ isr_data->arg = NULL;
+ isr_data->mask = 0;
+
+ ret = 0;
+ break;
+ }
+
+ if (ret == 0)
+ _omap_dispc_set_irqs();
+
+ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(omap_dispc_unregister_isr);
+
+#ifdef DEBUG
+static void print_irq_status(u32 status)
+{
+ if ((status & dispc.irq_error_mask) == 0)
+ return;
+
+ printk(KERN_DEBUG "DISPC IRQ: 0x%x: ", status);
+
+#define PIS(x) \
+ if (status & DISPC_IRQ_##x) \
+ printk(#x " ");
+ PIS(GFX_FIFO_UNDERFLOW);
+ PIS(OCP_ERR);
+ PIS(VID1_FIFO_UNDERFLOW);
+ PIS(VID2_FIFO_UNDERFLOW);
+ PIS(SYNC_LOST);
+ PIS(SYNC_LOST_DIGIT);
+#undef PIS
+
+ printk("\n");
+}
+#endif
+
+/* Called from dss.c. Note that we don't touch clocks here,
+ * but we presume they are on because we got an IRQ. However,
+ * an irq handler may turn the clocks off, so we may not have
+ * clock later in the function. */
+void dispc_irq_handler(void)
+{
+ int i;
+ u32 irqstatus;
+ u32 handledirqs = 0;
+ u32 unhandled_errors;
+ struct omap_dispc_isr_data *isr_data;
+ struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
+
+ spin_lock(&dispc.irq_lock);
+
+ irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
+
+#ifdef DEBUG
+ if (dss_debug)
+ print_irq_status(irqstatus);
+#endif
+ /* Ack the interrupt. Do it here before clocks are possibly turned
+ * off */
+ dispc_write_reg(DISPC_IRQSTATUS, irqstatus);
+ /* flush posted write */
+ dispc_read_reg(DISPC_IRQSTATUS);
+
+ /* make a copy and unlock, so that isrs can unregister
+ * themselves */
+ memcpy(registered_isr, dispc.registered_isr,
+ sizeof(registered_isr));
+
+ spin_unlock(&dispc.irq_lock);
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ isr_data = &registered_isr[i];
+
+ if (!isr_data->isr)
+ continue;
+
+ if (isr_data->mask & irqstatus) {
+ isr_data->isr(isr_data->arg, irqstatus);
+ handledirqs |= isr_data->mask;
+ }
+ }
+
+ spin_lock(&dispc.irq_lock);
+
+ unhandled_errors = irqstatus & ~handledirqs & dispc.irq_error_mask;
+
+ if (unhandled_errors) {
+ dispc.error_irqs |= unhandled_errors;
+
+ dispc.irq_error_mask &= ~unhandled_errors;
+ _omap_dispc_set_irqs();
+
+ schedule_work(&dispc.error_work);
+ }
+
+ spin_unlock(&dispc.irq_lock);
+}
+
+static void dispc_error_worker(struct work_struct *work)
+{
+ int i;
+ u32 errors;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dispc.irq_lock, flags);
+ errors = dispc.error_irqs;
+ dispc.error_irqs = 0;
+ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+
+ if (errors & DISPC_IRQ_GFX_FIFO_UNDERFLOW) {
+ DSSERR("GFX_FIFO_UNDERFLOW, disabling GFX\n");
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id == 0) {
+ dispc_enable_plane(ovl->id, 0);
+ dispc_go(ovl->manager->id);
+ mdelay(50);
+ break;
+ }
+ }
+ }
+
+ if (errors & DISPC_IRQ_VID1_FIFO_UNDERFLOW) {
+ DSSERR("VID1_FIFO_UNDERFLOW, disabling VID1\n");
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id == 1) {
+ dispc_enable_plane(ovl->id, 0);
+ dispc_go(ovl->manager->id);
+ mdelay(50);
+ break;
+ }
+ }
+ }
+
+ if (errors & DISPC_IRQ_VID2_FIFO_UNDERFLOW) {
+ DSSERR("VID2_FIFO_UNDERFLOW, disabling VID2\n");
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id == 2) {
+ dispc_enable_plane(ovl->id, 0);
+ dispc_go(ovl->manager->id);
+ mdelay(50);
+ break;
+ }
+ }
+ }
+
+ if (errors & DISPC_IRQ_SYNC_LOST) {
+ struct omap_overlay_manager *manager = NULL;
+ bool enable = false;
+
+ DSSERR("SYNC_LOST, disabling LCD\n");
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+ mgr = omap_dss_get_overlay_manager(i);
+
+ if (mgr->id == OMAP_DSS_CHANNEL_LCD) {
+ manager = mgr;
+ enable = mgr->device->state ==
+ OMAP_DSS_DISPLAY_ACTIVE;
+ mgr->device->disable(mgr->device);
+ break;
+ }
+ }
+
+ if (manager) {
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id != 0 && ovl->manager == manager)
+ dispc_enable_plane(ovl->id, 0);
+ }
+
+ dispc_go(manager->id);
+ mdelay(50);
+ if (enable)
+ manager->device->enable(manager->device);
+ }
+ }
+
+ if (errors & DISPC_IRQ_SYNC_LOST_DIGIT) {
+ struct omap_overlay_manager *manager = NULL;
+ bool enable = false;
+
+ DSSERR("SYNC_LOST_DIGIT, disabling TV\n");
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+ mgr = omap_dss_get_overlay_manager(i);
+
+ if (mgr->id == OMAP_DSS_CHANNEL_DIGIT) {
+ manager = mgr;
+ enable = mgr->device->state ==
+ OMAP_DSS_DISPLAY_ACTIVE;
+ mgr->device->disable(mgr->device);
+ break;
+ }
+ }
+
+ if (manager) {
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ if (ovl->id != 0 && ovl->manager == manager)
+ dispc_enable_plane(ovl->id, 0);
+ }
+
+ dispc_go(manager->id);
+ mdelay(50);
+ if (enable)
+ manager->device->enable(manager->device);
+ }
+ }
+
+ if (errors & DISPC_IRQ_OCP_ERR) {
+ DSSERR("OCP_ERR\n");
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ struct omap_overlay_manager *mgr;
+ mgr = omap_dss_get_overlay_manager(i);
+
+ if (mgr->caps & OMAP_DSS_OVL_CAP_DISPC)
+ mgr->device->disable(mgr->device);
+ }
+ }
+
+ spin_lock_irqsave(&dispc.irq_lock, flags);
+ dispc.irq_error_mask |= errors;
+ _omap_dispc_set_irqs();
+ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+}
+
+int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
+{
+ void dispc_irq_wait_handler(void *data, u32 mask)
+ {
+ complete((struct completion *)data);
+ }
+
+ int r;
+ DECLARE_COMPLETION_ONSTACK(completion);
+
+ r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
+ irqmask);
+
+ if (r)
+ return r;
+
+ timeout = wait_for_completion_timeout(&completion, timeout);
+
+ omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ if (timeout == -ERESTARTSYS)
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
+ unsigned long timeout)
+{
+ void dispc_irq_wait_handler(void *data, u32 mask)
+ {
+ complete((struct completion *)data);
+ }
+
+ int r;
+ DECLARE_COMPLETION_ONSTACK(completion);
+
+ r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
+ irqmask);
+
+ if (r)
+ return r;
+
+ timeout = wait_for_completion_interruptible_timeout(&completion,
+ timeout);
+
+ omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
+
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ if (timeout == -ERESTARTSYS)
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
+void dispc_fake_vsync_irq(void)
+{
+ u32 irqstatus = DISPC_IRQ_VSYNC;
+ int i;
+
+ local_irq_disable();
+
+ for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
+ struct omap_dispc_isr_data *isr_data;
+ isr_data = &dispc.registered_isr[i];
+
+ if (!isr_data->isr)
+ continue;
+
+ if (isr_data->mask & irqstatus)
+ isr_data->isr(isr_data->arg, irqstatus);
+ }
+
+ local_irq_enable();
+}
+#endif
+
+static void _omap_dispc_initialize_irq(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dispc.irq_lock, flags);
+
+ memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
+
+ dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
+
+ /* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
+ * so clear it */
+ dispc_write_reg(DISPC_IRQSTATUS, dispc_read_reg(DISPC_IRQSTATUS));
+
+ _omap_dispc_set_irqs();
+
+ spin_unlock_irqrestore(&dispc.irq_lock, flags);
+}
+
+void dispc_enable_sidle(void)
+{
+ REG_FLD_MOD(DISPC_SYSCONFIG, 2, 4, 3); /* SIDLEMODE: smart idle */
+}
+
+void dispc_disable_sidle(void)
+{
+ REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
+}
+
+static void _omap_dispc_initial_config(void)
+{
+ u32 l;
+
+ l = dispc_read_reg(DISPC_SYSCONFIG);
+ l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */
+ l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */
+ l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */
+ l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
+ dispc_write_reg(DISPC_SYSCONFIG, l);
+
+ /* FUNCGATED */
+ REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+
+ /* L3 firewall setting: enable access to OCM RAM */
+ /* XXX this should be somewhere in plat-omap */
+ if (cpu_is_omap24xx())
+ __raw_writel(0x402000b0, OMAP2_L3_IO_ADDRESS(0x680050a0));
+
+ _dispc_setup_color_conv_coef();
+
+ dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
+
+ dispc_read_plane_fifo_sizes();
+}
+
+int dispc_init(void)
+{
+ u32 rev;
+
+ spin_lock_init(&dispc.irq_lock);
+
+ INIT_WORK(&dispc.error_work, dispc_error_worker);
+
+ dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
+ if (!dispc.base) {
+ DSSERR("can't ioremap DISPC\n");
+ return -ENOMEM;
+ }
+
+ enable_clocks(1);
+
+ _omap_dispc_initial_config();
+
+ _omap_dispc_initialize_irq();
+
+ dispc_save_context();
+
+ rev = dispc_read_reg(DISPC_REVISION);
+ printk(KERN_INFO "OMAP DISPC rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ enable_clocks(0);
+
+ return 0;
+}
+
+void dispc_exit(void)
+{
+ iounmap(dispc.base);
+}
+
+int dispc_enable_plane(enum omap_plane plane, bool enable)
+{
+ DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
+
+ enable_clocks(1);
+ _dispc_enable_plane(plane, enable);
+ enable_clocks(0);
+
+ return 0;
+}
+
+int dispc_setup_plane(enum omap_plane plane,
+ u32 paddr, u16 screen_width,
+ u16 pos_x, u16 pos_y,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ enum omap_color_mode color_mode,
+ bool ilace,
+ enum omap_dss_rotation_type rotation_type,
+ u8 rotation, bool mirror, u8 global_alpha)
+{
+ int r = 0;
+
+ DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
+ "%dx%d, ilace %d, cmode %x, rot %d, mir %d\n",
+ plane, paddr, screen_width, pos_x, pos_y,
+ width, height,
+ out_width, out_height,
+ ilace, color_mode,
+ rotation, mirror);
+
+ enable_clocks(1);
+
+ r = _dispc_setup_plane(plane,
+ paddr, screen_width,
+ pos_x, pos_y,
+ width, height,
+ out_width, out_height,
+ color_mode, ilace,
+ rotation_type,
+ rotation, mirror,
+ global_alpha);
+
+ enable_clocks(0);
+
+ return r;
+}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
new file mode 100644
index 00000000000..3b92b84b956
--- /dev/null
+++ b/drivers/video/omap2/dss/display.c
@@ -0,0 +1,671 @@
+/*
+ * linux/drivers/video/omap2/dss/display.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DISPLAY"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+
+#include <plat/display.h>
+#include "dss.h"
+
+static LIST_HEAD(display_list);
+
+static ssize_t display_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
+}
+
+static ssize_t display_enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ bool enabled, r;
+
+ enabled = simple_strtoul(buf, NULL, 10);
+
+ if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
+ if (enabled) {
+ r = dssdev->enable(dssdev);
+ if (r)
+ return r;
+ } else {
+ dssdev->disable(dssdev);
+ }
+ }
+
+ return size;
+}
+
+static ssize_t display_upd_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO;
+ if (dssdev->get_update_mode)
+ mode = dssdev->get_update_mode(dssdev);
+ return snprintf(buf, PAGE_SIZE, "%d\n", mode);
+}
+
+static ssize_t display_upd_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int val, r;
+ enum omap_dss_update_mode mode;
+
+ val = simple_strtoul(buf, NULL, 10);
+
+ switch (val) {
+ case OMAP_DSS_UPDATE_DISABLED:
+ case OMAP_DSS_UPDATE_AUTO:
+ case OMAP_DSS_UPDATE_MANUAL:
+ mode = (enum omap_dss_update_mode)val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ r = dssdev->set_update_mode(dssdev, mode);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_tear_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ dssdev->get_te ? dssdev->get_te(dssdev) : 0);
+}
+
+static ssize_t display_tear_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ unsigned long te;
+ int r;
+
+ if (!dssdev->enable_te || !dssdev->get_te)
+ return -ENOENT;
+
+ te = simple_strtoul(buf, NULL, 0);
+
+ r = dssdev->enable_te(dssdev, te);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_timings_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_video_timings t;
+
+ if (!dssdev->get_timings)
+ return -ENOENT;
+
+ dssdev->get_timings(dssdev, &t);
+
+ return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
+ t.pixel_clock,
+ t.x_res, t.hfp, t.hbp, t.hsw,
+ t.y_res, t.vfp, t.vbp, t.vsw);
+}
+
+static ssize_t display_timings_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ struct omap_video_timings t;
+ int r, found;
+
+ if (!dssdev->set_timings || !dssdev->check_timings)
+ return -ENOENT;
+
+ found = 0;
+#ifdef CONFIG_OMAP2_DSS_VENC
+ if (strncmp("pal", buf, 3) == 0) {
+ t = omap_dss_pal_timings;
+ found = 1;
+ } else if (strncmp("ntsc", buf, 4) == 0) {
+ t = omap_dss_ntsc_timings;
+ found = 1;
+ }
+#endif
+ if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
+ &t.pixel_clock,
+ &t.x_res, &t.hfp, &t.hbp, &t.hsw,
+ &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
+ return -EINVAL;
+
+ r = dssdev->check_timings(dssdev, &t);
+ if (r)
+ return r;
+
+ dssdev->set_timings(dssdev, &t);
+
+ return size;
+}
+
+static ssize_t display_rotate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int rotate;
+ if (!dssdev->get_rotate)
+ return -ENOENT;
+ rotate = dssdev->get_rotate(dssdev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
+}
+
+static ssize_t display_rotate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ unsigned long rot;
+ int r;
+
+ if (!dssdev->set_rotate || !dssdev->get_rotate)
+ return -ENOENT;
+
+ rot = simple_strtoul(buf, NULL, 0);
+
+ r = dssdev->set_rotate(dssdev, rot);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_mirror_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ int mirror;
+ if (!dssdev->get_mirror)
+ return -ENOENT;
+ mirror = dssdev->get_mirror(dssdev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
+}
+
+static ssize_t display_mirror_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ unsigned long mirror;
+ int r;
+
+ if (!dssdev->set_mirror || !dssdev->get_mirror)
+ return -ENOENT;
+
+ mirror = simple_strtoul(buf, NULL, 0);
+
+ r = dssdev->set_mirror(dssdev, mirror);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t display_wss_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ unsigned int wss;
+
+ if (!dssdev->get_wss)
+ return -ENOENT;
+
+ wss = dssdev->get_wss(dssdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
+}
+
+static ssize_t display_wss_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ unsigned long wss;
+ int r;
+
+ if (!dssdev->get_wss || !dssdev->set_wss)
+ return -ENOENT;
+
+ if (strict_strtoul(buf, 0, &wss))
+ return -EINVAL;
+
+ if (wss > 0xfffff)
+ return -EINVAL;
+
+ r = dssdev->set_wss(dssdev, wss);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
+ display_enabled_show, display_enabled_store);
+static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR,
+ display_upd_mode_show, display_upd_mode_store);
+static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
+ display_tear_show, display_tear_store);
+static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
+ display_timings_show, display_timings_store);
+static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
+ display_rotate_show, display_rotate_store);
+static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
+ display_mirror_show, display_mirror_store);
+static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
+ display_wss_show, display_wss_store);
+
+static struct device_attribute *display_sysfs_attrs[] = {
+ &dev_attr_enabled,
+ &dev_attr_update_mode,
+ &dev_attr_tear_elim,
+ &dev_attr_timings,
+ &dev_attr_rotate,
+ &dev_attr_mirror,
+ &dev_attr_wss,
+ NULL
+};
+
+static void default_get_resolution(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres)
+{
+ *xres = dssdev->panel.timings.x_res;
+ *yres = dssdev->panel.timings.y_res;
+}
+
+void default_get_overlay_fifo_thresholds(enum omap_plane plane,
+ u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 *fifo_low, u32 *fifo_high)
+{
+ unsigned burst_size_bytes;
+
+ *burst_size = OMAP_DSS_BURST_16x32;
+ burst_size_bytes = 16 * 32 / 8;
+
+ *fifo_high = fifo_size - 1;
+ *fifo_low = fifo_size - burst_size_bytes;
+}
+
+static int default_wait_vsync(struct omap_dss_device *dssdev)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ u32 irq;
+
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
+ irq = DISPC_IRQ_EVSYNC_ODD;
+ else
+ irq = DISPC_IRQ_VSYNC;
+
+ return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+}
+
+static int default_get_recommended_bpp(struct omap_dss_device *dssdev)
+{
+ if (dssdev->panel.recommended_bpp)
+ return dssdev->panel.recommended_bpp;
+
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (dssdev->phy.dpi.data_lines == 24)
+ return 24;
+ else
+ return 16;
+
+ case OMAP_DISPLAY_TYPE_DBI:
+ case OMAP_DISPLAY_TYPE_DSI:
+ if (dssdev->ctrl.pixel_size == 24)
+ return 24;
+ else
+ return 16;
+ case OMAP_DISPLAY_TYPE_VENC:
+ case OMAP_DISPLAY_TYPE_SDI:
+ return 24;
+ return 24;
+ default:
+ BUG();
+ }
+}
+
+/* Checks if replication logic should be used. Only use for active matrix,
+ * when overlay is in RGB12U or RGB16 mode, and LCD interface is
+ * 18bpp or 24bpp */
+bool dss_use_replication(struct omap_dss_device *dssdev,
+ enum omap_color_mode mode)
+{
+ int bpp;
+
+ if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16)
+ return false;
+
+ if (dssdev->type == OMAP_DISPLAY_TYPE_DPI &&
+ (dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0)
+ return false;
+
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ bpp = dssdev->phy.dpi.data_lines;
+ break;
+ case OMAP_DISPLAY_TYPE_VENC:
+ case OMAP_DISPLAY_TYPE_SDI:
+ bpp = 24;
+ break;
+ case OMAP_DISPLAY_TYPE_DBI:
+ case OMAP_DISPLAY_TYPE_DSI:
+ bpp = dssdev->ctrl.pixel_size;
+ break;
+ default:
+ BUG();
+ }
+
+ return bpp > 16;
+}
+
+void dss_init_device(struct platform_device *pdev,
+ struct omap_dss_device *dssdev)
+{
+ struct device_attribute *attr;
+ int i;
+ int r;
+
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ case OMAP_DISPLAY_TYPE_DBI:
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+ case OMAP_DISPLAY_TYPE_SDI:
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+ case OMAP_DISPLAY_TYPE_DSI:
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ case OMAP_DISPLAY_TYPE_VENC:
+#endif
+ break;
+ default:
+ DSSERR("Support for display '%s' not compiled in.\n",
+ dssdev->name);
+ return;
+ }
+
+ dssdev->get_resolution = default_get_resolution;
+ dssdev->get_recommended_bpp = default_get_recommended_bpp;
+ dssdev->wait_vsync = default_wait_vsync;
+
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ r = dpi_init_display(dssdev);
+ break;
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ case OMAP_DISPLAY_TYPE_DBI:
+ r = rfbi_init_display(dssdev);
+ break;
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ case OMAP_DISPLAY_TYPE_VENC:
+ r = venc_init_display(dssdev);
+ break;
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+ case OMAP_DISPLAY_TYPE_SDI:
+ r = sdi_init_display(dssdev);
+ break;
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+ case OMAP_DISPLAY_TYPE_DSI:
+ r = dsi_init_display(dssdev);
+ break;
+#endif
+ default:
+ BUG();
+ }
+
+ if (r) {
+ DSSERR("failed to init display %s\n", dssdev->name);
+ return;
+ }
+
+ /* create device sysfs files */
+ i = 0;
+ while ((attr = display_sysfs_attrs[i++]) != NULL) {
+ r = device_create_file(&dssdev->dev, attr);
+ if (r)
+ DSSERR("failed to create sysfs file\n");
+ }
+
+ /* create display? sysfs links */
+ r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
+ dev_name(&dssdev->dev));
+ if (r)
+ DSSERR("failed to create sysfs display link\n");
+}
+
+void dss_uninit_device(struct platform_device *pdev,
+ struct omap_dss_device *dssdev)
+{
+ struct device_attribute *attr;
+ int i = 0;
+
+ sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev));
+
+ while ((attr = display_sysfs_attrs[i++]) != NULL)
+ device_remove_file(&dssdev->dev, attr);
+
+ if (dssdev->manager)
+ dssdev->manager->unset_device(dssdev->manager);
+}
+
+static int dss_suspend_device(struct device *dev, void *data)
+{
+ int r;
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ dssdev->activate_after_resume = false;
+ return 0;
+ }
+
+ if (!dssdev->suspend) {
+ DSSERR("display '%s' doesn't implement suspend\n",
+ dssdev->name);
+ return -ENOSYS;
+ }
+
+ r = dssdev->suspend(dssdev);
+ if (r)
+ return r;
+
+ dssdev->activate_after_resume = true;
+
+ return 0;
+}
+
+int dss_suspend_all_devices(void)
+{
+ int r;
+ struct bus_type *bus = dss_get_bus();
+
+ r = bus_for_each_dev(bus, NULL, NULL, dss_suspend_device);
+ if (r) {
+ /* resume all displays that were suspended */
+ dss_resume_all_devices();
+ return r;
+ }
+
+ return 0;
+}
+
+static int dss_resume_device(struct device *dev, void *data)
+{
+ int r;
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+
+ if (dssdev->activate_after_resume && dssdev->resume) {
+ r = dssdev->resume(dssdev);
+ if (r)
+ return r;
+ }
+
+ dssdev->activate_after_resume = false;
+
+ return 0;
+}
+
+int dss_resume_all_devices(void)
+{
+ struct bus_type *bus = dss_get_bus();
+
+ return bus_for_each_dev(bus, NULL, NULL, dss_resume_device);
+}
+
+static int dss_disable_device(struct device *dev, void *data)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ dssdev->disable(dssdev);
+ return 0;
+}
+
+void dss_disable_all_devices(void)
+{
+ struct bus_type *bus = dss_get_bus();
+ bus_for_each_dev(bus, NULL, NULL, dss_disable_device);
+}
+
+
+void omap_dss_get_device(struct omap_dss_device *dssdev)
+{
+ get_device(&dssdev->dev);
+}
+EXPORT_SYMBOL(omap_dss_get_device);
+
+void omap_dss_put_device(struct omap_dss_device *dssdev)
+{
+ put_device(&dssdev->dev);
+}
+EXPORT_SYMBOL(omap_dss_put_device);
+
+/* ref count of the found device is incremented. ref count
+ * of from-device is decremented. */
+struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct omap_dss_device *dssdev = NULL;
+
+ int match(struct device *dev, void *data)
+ {
+ /* skip panels connected to controllers */
+ if (to_dss_device(dev)->panel.ctrl)
+ return 0;
+
+ return 1;
+ }
+
+ if (from)
+ dev_start = &from->dev;
+ dev = bus_find_device(dss_get_bus(), dev_start, NULL, match);
+ if (dev)
+ dssdev = to_dss_device(dev);
+ if (from)
+ put_device(&from->dev);
+
+ return dssdev;
+}
+EXPORT_SYMBOL(omap_dss_get_next_device);
+
+struct omap_dss_device *omap_dss_find_device(void *data,
+ int (*match)(struct omap_dss_device *dssdev, void *data))
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ while ((dssdev = omap_dss_get_next_device(dssdev)) != NULL) {
+ if (match(dssdev, data))
+ return dssdev;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(omap_dss_find_device);
+
+int omap_dss_start_device(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ if (!dssdev->driver) {
+ DSSDBG("no driver\n");
+ r = -ENODEV;
+ goto err0;
+ }
+
+ if (dssdev->ctrl.panel && !dssdev->ctrl.panel->driver) {
+ DSSDBG("no panel driver\n");
+ r = -ENODEV;
+ goto err0;
+ }
+
+ if (!try_module_get(dssdev->dev.driver->owner)) {
+ r = -ENODEV;
+ goto err0;
+ }
+
+ if (dssdev->ctrl.panel) {
+ if (!try_module_get(dssdev->ctrl.panel->dev.driver->owner)) {
+ r = -ENODEV;
+ goto err1;
+ }
+ }
+
+ return 0;
+err1:
+ module_put(dssdev->dev.driver->owner);
+err0:
+ return r;
+}
+EXPORT_SYMBOL(omap_dss_start_device);
+
+void omap_dss_stop_device(struct omap_dss_device *dssdev)
+{
+ if (dssdev->ctrl.panel)
+ module_put(dssdev->ctrl.panel->dev.driver->owner);
+
+ module_put(dssdev->dev.driver->owner);
+}
+EXPORT_SYMBOL(omap_dss_stop_device);
+
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
new file mode 100644
index 00000000000..2d71031baa2
--- /dev/null
+++ b/drivers/video/omap2/dss/dpi.c
@@ -0,0 +1,399 @@
+/*
+ * linux/drivers/video/omap2/dss/dpi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DPI"
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss.h"
+
+static struct {
+ int update_enabled;
+} dpi;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
+ unsigned long *fck, int *lck_div, int *pck_div)
+{
+ struct dsi_clock_info dsi_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+ int r;
+
+ r = dsi_pll_calc_clock_div_pck(is_tft, pck_req, &dsi_cinfo,
+ &dispc_cinfo);
+ if (r)
+ return r;
+
+ r = dsi_pll_set_clock_div(&dsi_cinfo);
+ if (r)
+ return r;
+
+ dss_select_clk_source(0, 1);
+
+ r = dispc_set_clock_div(&dispc_cinfo);
+ if (r)
+ return r;
+
+ *fck = dsi_cinfo.dsi1_pll_fclk;
+ *lck_div = dispc_cinfo.lck_div;
+ *pck_div = dispc_cinfo.pck_div;
+
+ return 0;
+}
+#else
+static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
+ unsigned long *fck, int *lck_div, int *pck_div)
+{
+ struct dss_clock_info dss_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+ int r;
+
+ r = dss_calc_clock_div(is_tft, pck_req, &dss_cinfo, &dispc_cinfo);
+ if (r)
+ return r;
+
+ r = dss_set_clock_div(&dss_cinfo);
+ if (r)
+ return r;
+
+ r = dispc_set_clock_div(&dispc_cinfo);
+ if (r)
+ return r;
+
+ *fck = dss_cinfo.fck;
+ *lck_div = dispc_cinfo.lck_div;
+ *pck_div = dispc_cinfo.pck_div;
+
+ return 0;
+}
+#endif
+
+static int dpi_set_mode(struct omap_dss_device *dssdev)
+{
+ struct omap_video_timings *t = &dssdev->panel.timings;
+ int lck_div, pck_div;
+ unsigned long fck;
+ unsigned long pck;
+ bool is_tft;
+ int r = 0;
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
+ dssdev->panel.acb);
+
+ is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+ r = dpi_set_dsi_clk(is_tft, t->pixel_clock * 1000,
+ &fck, &lck_div, &pck_div);
+#else
+ r = dpi_set_dispc_clk(is_tft, t->pixel_clock * 1000,
+ &fck, &lck_div, &pck_div);
+#endif
+ if (r)
+ goto err0;
+
+ pck = fck / lck_div / pck_div / 1000;
+
+ if (pck != t->pixel_clock) {
+ DSSWARN("Could not find exact pixel clock. "
+ "Requested %d kHz, got %lu kHz\n",
+ t->pixel_clock, pck);
+
+ t->pixel_clock = pck;
+ }
+
+ dispc_set_lcd_timings(t);
+
+err0:
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ return r;
+}
+
+static int dpi_basic_init(struct omap_dss_device *dssdev)
+{
+ bool is_tft;
+
+ is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+
+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
+ dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT :
+ OMAP_DSS_LCD_DISPLAY_STN);
+ dispc_set_tft_data_lines(dssdev->phy.dpi.data_lines);
+
+ return 0;
+}
+
+static int dpi_display_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+ goto err0;
+ }
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ DSSERR("display already enabled\n");
+ r = -EINVAL;
+ goto err1;
+ }
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ r = dpi_basic_init(dssdev);
+ if (r)
+ goto err2;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+ dss_clk_enable(DSS_CLK_FCK2);
+ r = dsi_pll_init(dssdev, 0, 1);
+ if (r)
+ goto err3;
+#endif
+ r = dpi_set_mode(dssdev);
+ if (r)
+ goto err4;
+
+ mdelay(2);
+
+ dispc_enable_lcd_out(1);
+
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ goto err5;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+
+err5:
+ dispc_enable_lcd_out(0);
+err4:
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+ dsi_pll_uninit();
+err3:
+ dss_clk_disable(DSS_CLK_FCK2);
+#endif
+err2:
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+err1:
+ omap_dss_stop_device(dssdev);
+err0:
+ return r;
+}
+
+static int dpi_display_resume(struct omap_dss_device *dssdev);
+
+static void dpi_display_disable(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
+ return;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+ dpi_display_resume(dssdev);
+
+ dssdev->driver->disable(dssdev);
+
+ dispc_enable_lcd_out(0);
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+ dss_select_clk_source(0, 0);
+ dsi_pll_uninit();
+ dss_clk_disable(DSS_CLK_FCK2);
+#endif
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ omap_dss_stop_device(dssdev);
+}
+
+static int dpi_display_suspend(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return -EINVAL;
+
+ DSSDBG("dpi_display_suspend\n");
+
+ if (dssdev->driver->suspend)
+ dssdev->driver->suspend(dssdev);
+
+ dispc_enable_lcd_out(0);
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ return 0;
+}
+
+static int dpi_display_resume(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
+ return -EINVAL;
+
+ DSSDBG("dpi_display_resume\n");
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dispc_enable_lcd_out(1);
+
+ if (dssdev->driver->resume)
+ dssdev->driver->resume(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void dpi_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ DSSDBG("dpi_set_timings\n");
+ dssdev->panel.timings = *timings;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ dpi_set_mode(dssdev);
+ dispc_go(OMAP_DSS_CHANNEL_LCD);
+ }
+}
+
+static int dpi_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ bool is_tft;
+ int r;
+ int lck_div, pck_div;
+ unsigned long fck;
+ unsigned long pck;
+
+ if (!dispc_lcd_timings_ok(timings))
+ return -EINVAL;
+
+ if (timings->pixel_clock == 0)
+ return -EINVAL;
+
+ is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+
+#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
+ {
+ struct dsi_clock_info dsi_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+ r = dsi_pll_calc_clock_div_pck(is_tft,
+ timings->pixel_clock * 1000,
+ &dsi_cinfo, &dispc_cinfo);
+
+ if (r)
+ return r;
+
+ fck = dsi_cinfo.dsi1_pll_fclk;
+ lck_div = dispc_cinfo.lck_div;
+ pck_div = dispc_cinfo.pck_div;
+ }
+#else
+ {
+ struct dss_clock_info dss_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+ r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000,
+ &dss_cinfo, &dispc_cinfo);
+
+ if (r)
+ return r;
+
+ fck = dss_cinfo.fck;
+ lck_div = dispc_cinfo.lck_div;
+ pck_div = dispc_cinfo.pck_div;
+ }
+#endif
+
+ pck = fck / lck_div / pck_div / 1000;
+
+ timings->pixel_clock = pck;
+
+ return 0;
+}
+
+static void dpi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+static int dpi_display_set_update_mode(struct omap_dss_device *dssdev,
+ enum omap_dss_update_mode mode)
+{
+ if (mode == OMAP_DSS_UPDATE_MANUAL)
+ return -EINVAL;
+
+ if (mode == OMAP_DSS_UPDATE_DISABLED) {
+ dispc_enable_lcd_out(0);
+ dpi.update_enabled = 0;
+ } else {
+ dispc_enable_lcd_out(1);
+ dpi.update_enabled = 1;
+ }
+
+ return 0;
+}
+
+static enum omap_dss_update_mode dpi_display_get_update_mode(
+ struct omap_dss_device *dssdev)
+{
+ return dpi.update_enabled ? OMAP_DSS_UPDATE_AUTO :
+ OMAP_DSS_UPDATE_DISABLED;
+}
+
+int dpi_init_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("init_display\n");
+
+ dssdev->enable = dpi_display_enable;
+ dssdev->disable = dpi_display_disable;
+ dssdev->suspend = dpi_display_suspend;
+ dssdev->resume = dpi_display_resume;
+ dssdev->set_timings = dpi_set_timings;
+ dssdev->check_timings = dpi_check_timings;
+ dssdev->get_timings = dpi_get_timings;
+ dssdev->set_update_mode = dpi_display_set_update_mode;
+ dssdev->get_update_mode = dpi_display_get_update_mode;
+
+ return 0;
+}
+
+int dpi_init(void)
+{
+ return 0;
+}
+
+void dpi_exit(void)
+{
+}
+
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
new file mode 100644
index 00000000000..5936487b5de
--- /dev/null
+++ b/drivers/video/omap2/dss/dsi.c
@@ -0,0 +1,3710 @@
+/*
+ * linux/drivers/video/omap2/dss/dsi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DSI"
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+
+#include <plat/display.h>
+#include <plat/clock.h>
+
+#include "dss.h"
+
+/*#define VERBOSE_IRQ*/
+#define DSI_CATCH_MISSING_TE
+
+#define DSI_BASE 0x4804FC00
+
+struct dsi_reg { u16 idx; };
+
+#define DSI_REG(idx) ((const struct dsi_reg) { idx })
+
+#define DSI_SZ_REGS SZ_1K
+/* DSI Protocol Engine */
+
+#define DSI_REVISION DSI_REG(0x0000)
+#define DSI_SYSCONFIG DSI_REG(0x0010)
+#define DSI_SYSSTATUS DSI_REG(0x0014)
+#define DSI_IRQSTATUS DSI_REG(0x0018)
+#define DSI_IRQENABLE DSI_REG(0x001C)
+#define DSI_CTRL DSI_REG(0x0040)
+#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
+#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
+#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
+#define DSI_CLK_CTRL DSI_REG(0x0054)
+#define DSI_TIMING1 DSI_REG(0x0058)
+#define DSI_TIMING2 DSI_REG(0x005C)
+#define DSI_VM_TIMING1 DSI_REG(0x0060)
+#define DSI_VM_TIMING2 DSI_REG(0x0064)
+#define DSI_VM_TIMING3 DSI_REG(0x0068)
+#define DSI_CLK_TIMING DSI_REG(0x006C)
+#define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
+#define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
+#define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
+#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
+#define DSI_VM_TIMING4 DSI_REG(0x0080)
+#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
+#define DSI_VM_TIMING5 DSI_REG(0x0088)
+#define DSI_VM_TIMING6 DSI_REG(0x008C)
+#define DSI_VM_TIMING7 DSI_REG(0x0090)
+#define DSI_STOPCLK_TIMING DSI_REG(0x0094)
+#define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
+#define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
+#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
+#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
+#define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
+#define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
+
+/* DSIPHY_SCP */
+
+#define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
+#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
+#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
+#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
+
+/* DSI_PLL_CTRL_SCP */
+
+#define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
+#define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
+#define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
+#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
+#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
+
+#define REG_GET(idx, start, end) \
+ FLD_GET(dsi_read_reg(idx), start, end)
+
+#define REG_FLD_MOD(idx, val, start, end) \
+ dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end))
+
+/* Global interrupts */
+#define DSI_IRQ_VC0 (1 << 0)
+#define DSI_IRQ_VC1 (1 << 1)
+#define DSI_IRQ_VC2 (1 << 2)
+#define DSI_IRQ_VC3 (1 << 3)
+#define DSI_IRQ_WAKEUP (1 << 4)
+#define DSI_IRQ_RESYNC (1 << 5)
+#define DSI_IRQ_PLL_LOCK (1 << 7)
+#define DSI_IRQ_PLL_UNLOCK (1 << 8)
+#define DSI_IRQ_PLL_RECALL (1 << 9)
+#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
+#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
+#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
+#define DSI_IRQ_TE_TRIGGER (1 << 16)
+#define DSI_IRQ_ACK_TRIGGER (1 << 17)
+#define DSI_IRQ_SYNC_LOST (1 << 18)
+#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
+#define DSI_IRQ_TA_TIMEOUT (1 << 20)
+#define DSI_IRQ_ERROR_MASK \
+ (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
+ DSI_IRQ_TA_TIMEOUT)
+#define DSI_IRQ_CHANNEL_MASK 0xf
+
+/* Virtual channel interrupts */
+#define DSI_VC_IRQ_CS (1 << 0)
+#define DSI_VC_IRQ_ECC_CORR (1 << 1)
+#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
+#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
+#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
+#define DSI_VC_IRQ_BTA (1 << 5)
+#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
+#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
+#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
+#define DSI_VC_IRQ_ERROR_MASK \
+ (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
+ DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
+ DSI_VC_IRQ_FIFO_TX_UDF)
+
+/* ComplexIO interrupts */
+#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
+#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
+#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
+#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
+#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
+#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
+#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
+#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
+#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
+#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
+#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
+#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
+#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
+#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
+
+#define DSI_DT_DCS_SHORT_WRITE_0 0x05
+#define DSI_DT_DCS_SHORT_WRITE_1 0x15
+#define DSI_DT_DCS_READ 0x06
+#define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37
+#define DSI_DT_NULL_PACKET 0x09
+#define DSI_DT_DCS_LONG_WRITE 0x39
+
+#define DSI_DT_RX_ACK_WITH_ERR 0x02
+#define DSI_DT_RX_DCS_LONG_READ 0x1c
+#define DSI_DT_RX_SHORT_READ_1 0x21
+#define DSI_DT_RX_SHORT_READ_2 0x22
+
+#define FINT_MAX 2100000
+#define FINT_MIN 750000
+#define REGN_MAX (1 << 7)
+#define REGM_MAX ((1 << 11) - 1)
+#define REGM3_MAX (1 << 4)
+#define REGM4_MAX (1 << 4)
+#define LP_DIV_MAX ((1 << 13) - 1)
+
+enum fifo_size {
+ DSI_FIFO_SIZE_0 = 0,
+ DSI_FIFO_SIZE_32 = 1,
+ DSI_FIFO_SIZE_64 = 2,
+ DSI_FIFO_SIZE_96 = 3,
+ DSI_FIFO_SIZE_128 = 4,
+};
+
+enum dsi_vc_mode {
+ DSI_VC_MODE_L4 = 0,
+ DSI_VC_MODE_VP,
+};
+
+struct dsi_update_region {
+ bool dirty;
+ u16 x, y, w, h;
+ struct omap_dss_device *device;
+};
+
+static struct
+{
+ void __iomem *base;
+
+ struct dsi_clock_info current_cinfo;
+
+ struct regulator *vdds_dsi_reg;
+
+ struct {
+ enum dsi_vc_mode mode;
+ struct omap_dss_device *dssdev;
+ enum fifo_size fifo_size;
+ int dest_per; /* destination peripheral 0-3 */
+ } vc[4];
+
+ struct mutex lock;
+ struct mutex bus_lock;
+
+ unsigned pll_locked;
+
+ struct completion bta_completion;
+
+ struct task_struct *thread;
+ wait_queue_head_t waitqueue;
+
+ spinlock_t update_lock;
+ bool framedone_received;
+ struct dsi_update_region update_region;
+ struct dsi_update_region active_update_region;
+ struct completion update_completion;
+
+ enum omap_dss_update_mode user_update_mode;
+ enum omap_dss_update_mode update_mode;
+ bool te_enabled;
+ bool use_ext_te;
+
+#ifdef DSI_CATCH_MISSING_TE
+ struct timer_list te_timer;
+#endif
+
+ unsigned long cache_req_pck;
+ unsigned long cache_clk_freq;
+ struct dsi_clock_info cache_cinfo;
+
+ u32 errors;
+ spinlock_t errors_lock;
+#ifdef DEBUG
+ ktime_t perf_setup_time;
+ ktime_t perf_start_time;
+ ktime_t perf_start_time_auto;
+ int perf_measure_frames;
+#endif
+ int debug_read;
+ int debug_write;
+} dsi;
+
+#ifdef DEBUG
+static unsigned int dsi_perf;
+module_param_named(dsi_perf, dsi_perf, bool, 0644);
+#endif
+
+static inline void dsi_write_reg(const struct dsi_reg idx, u32 val)
+{
+ __raw_writel(val, dsi.base + idx.idx);
+}
+
+static inline u32 dsi_read_reg(const struct dsi_reg idx)
+{
+ return __raw_readl(dsi.base + idx.idx);
+}
+
+
+void dsi_save_context(void)
+{
+}
+
+void dsi_restore_context(void)
+{
+}
+
+void dsi_bus_lock(void)
+{
+ mutex_lock(&dsi.bus_lock);
+}
+EXPORT_SYMBOL(dsi_bus_lock);
+
+void dsi_bus_unlock(void)
+{
+ mutex_unlock(&dsi.bus_lock);
+}
+EXPORT_SYMBOL(dsi_bus_unlock);
+
+static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
+ int value)
+{
+ int t = 100000;
+
+ while (REG_GET(idx, bitnum, bitnum) != value) {
+ if (--t == 0)
+ return !value;
+ }
+
+ return value;
+}
+
+#ifdef DEBUG
+static void dsi_perf_mark_setup(void)
+{
+ dsi.perf_setup_time = ktime_get();
+}
+
+static void dsi_perf_mark_start(void)
+{
+ dsi.perf_start_time = ktime_get();
+}
+
+static void dsi_perf_mark_start_auto(void)
+{
+ dsi.perf_measure_frames = 0;
+ dsi.perf_start_time_auto = ktime_get();
+}
+
+static void dsi_perf_show(const char *name)
+{
+ ktime_t t, setup_time, trans_time;
+ u32 total_bytes;
+ u32 setup_us, trans_us, total_us;
+
+ if (!dsi_perf)
+ return;
+
+ if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
+ return;
+
+ t = ktime_get();
+
+ setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time);
+ setup_us = (u32)ktime_to_us(setup_time);
+ if (setup_us == 0)
+ setup_us = 1;
+
+ trans_time = ktime_sub(t, dsi.perf_start_time);
+ trans_us = (u32)ktime_to_us(trans_time);
+ if (trans_us == 0)
+ trans_us = 1;
+
+ total_us = setup_us + trans_us;
+
+ total_bytes = dsi.active_update_region.w *
+ dsi.active_update_region.h *
+ dsi.active_update_region.device->ctrl.pixel_size / 8;
+
+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
+ static u32 s_total_trans_us, s_total_setup_us;
+ static u32 s_min_trans_us = 0xffffffff, s_min_setup_us;
+ static u32 s_max_trans_us, s_max_setup_us;
+ const int numframes = 100;
+ ktime_t total_time_auto;
+ u32 total_time_auto_us;
+
+ dsi.perf_measure_frames++;
+
+ if (setup_us < s_min_setup_us)
+ s_min_setup_us = setup_us;
+
+ if (setup_us > s_max_setup_us)
+ s_max_setup_us = setup_us;
+
+ s_total_setup_us += setup_us;
+
+ if (trans_us < s_min_trans_us)
+ s_min_trans_us = trans_us;
+
+ if (trans_us > s_max_trans_us)
+ s_max_trans_us = trans_us;
+
+ s_total_trans_us += trans_us;
+
+ if (dsi.perf_measure_frames < numframes)
+ return;
+
+ total_time_auto = ktime_sub(t, dsi.perf_start_time_auto);
+ total_time_auto_us = (u32)ktime_to_us(total_time_auto);
+
+ printk(KERN_INFO "DSI(%s): %u fps, setup %u/%u/%u, "
+ "trans %u/%u/%u\n",
+ name,
+ 1000 * 1000 * numframes / total_time_auto_us,
+ s_min_setup_us,
+ s_max_setup_us,
+ s_total_setup_us / numframes,
+ s_min_trans_us,
+ s_max_trans_us,
+ s_total_trans_us / numframes);
+
+ s_total_setup_us = 0;
+ s_min_setup_us = 0xffffffff;
+ s_max_setup_us = 0;
+ s_total_trans_us = 0;
+ s_min_trans_us = 0xffffffff;
+ s_max_trans_us = 0;
+ dsi_perf_mark_start_auto();
+ } else {
+ printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
+ "%u bytes, %u kbytes/sec\n",
+ name,
+ setup_us,
+ trans_us,
+ total_us,
+ 1000*1000 / total_us,
+ total_bytes,
+ total_bytes * 1000 / total_us);
+ }
+}
+#else
+#define dsi_perf_mark_setup()
+#define dsi_perf_mark_start()
+#define dsi_perf_mark_start_auto()
+#define dsi_perf_show(x)
+#endif
+
+static void print_irq_status(u32 status)
+{
+#ifndef VERBOSE_IRQ
+ if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
+ return;
+#endif
+ printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
+
+#define PIS(x) \
+ if (status & DSI_IRQ_##x) \
+ printk(#x " ");
+#ifdef VERBOSE_IRQ
+ PIS(VC0);
+ PIS(VC1);
+ PIS(VC2);
+ PIS(VC3);
+#endif
+ PIS(WAKEUP);
+ PIS(RESYNC);
+ PIS(PLL_LOCK);
+ PIS(PLL_UNLOCK);
+ PIS(PLL_RECALL);
+ PIS(COMPLEXIO_ERR);
+ PIS(HS_TX_TIMEOUT);
+ PIS(LP_RX_TIMEOUT);
+ PIS(TE_TRIGGER);
+ PIS(ACK_TRIGGER);
+ PIS(SYNC_LOST);
+ PIS(LDO_POWER_GOOD);
+ PIS(TA_TIMEOUT);
+#undef PIS
+
+ printk("\n");
+}
+
+static void print_irq_status_vc(int channel, u32 status)
+{
+#ifndef VERBOSE_IRQ
+ if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
+ return;
+#endif
+ printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
+
+#define PIS(x) \
+ if (status & DSI_VC_IRQ_##x) \
+ printk(#x " ");
+ PIS(CS);
+ PIS(ECC_CORR);
+#ifdef VERBOSE_IRQ
+ PIS(PACKET_SENT);
+#endif
+ PIS(FIFO_TX_OVF);
+ PIS(FIFO_RX_OVF);
+ PIS(BTA);
+ PIS(ECC_NO_CORR);
+ PIS(FIFO_TX_UDF);
+ PIS(PP_BUSY_CHANGE);
+#undef PIS
+ printk("\n");
+}
+
+static void print_irq_status_cio(u32 status)
+{
+ printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
+
+#define PIS(x) \
+ if (status & DSI_CIO_IRQ_##x) \
+ printk(#x " ");
+ PIS(ERRSYNCESC1);
+ PIS(ERRSYNCESC2);
+ PIS(ERRSYNCESC3);
+ PIS(ERRESC1);
+ PIS(ERRESC2);
+ PIS(ERRESC3);
+ PIS(ERRCONTROL1);
+ PIS(ERRCONTROL2);
+ PIS(ERRCONTROL3);
+ PIS(STATEULPS1);
+ PIS(STATEULPS2);
+ PIS(STATEULPS3);
+ PIS(ERRCONTENTIONLP0_1);
+ PIS(ERRCONTENTIONLP1_1);
+ PIS(ERRCONTENTIONLP0_2);
+ PIS(ERRCONTENTIONLP1_2);
+ PIS(ERRCONTENTIONLP0_3);
+ PIS(ERRCONTENTIONLP1_3);
+ PIS(ULPSACTIVENOT_ALL0);
+ PIS(ULPSACTIVENOT_ALL1);
+#undef PIS
+
+ printk("\n");
+}
+
+static int debug_irq;
+
+/* called from dss */
+void dsi_irq_handler(void)
+{
+ u32 irqstatus, vcstatus, ciostatus;
+ int i;
+
+ irqstatus = dsi_read_reg(DSI_IRQSTATUS);
+
+ if (irqstatus & DSI_IRQ_ERROR_MASK) {
+ DSSERR("DSI error, irqstatus %x\n", irqstatus);
+ print_irq_status(irqstatus);
+ spin_lock(&dsi.errors_lock);
+ dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK;
+ spin_unlock(&dsi.errors_lock);
+ } else if (debug_irq) {
+ print_irq_status(irqstatus);
+ }
+
+#ifdef DSI_CATCH_MISSING_TE
+ if (irqstatus & DSI_IRQ_TE_TRIGGER)
+ del_timer(&dsi.te_timer);
+#endif
+
+ for (i = 0; i < 4; ++i) {
+ if ((irqstatus & (1<<i)) == 0)
+ continue;
+
+ vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+
+ if (vcstatus & DSI_VC_IRQ_BTA)
+ complete(&dsi.bta_completion);
+
+ if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
+ DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
+ i, vcstatus);
+ print_irq_status_vc(i, vcstatus);
+ } else if (debug_irq) {
+ print_irq_status_vc(i, vcstatus);
+ }
+
+ dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
+ /* flush posted write */
+ dsi_read_reg(DSI_VC_IRQSTATUS(i));
+ }
+
+ if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
+ ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+
+ dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
+ /* flush posted write */
+ dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+
+ DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
+ print_irq_status_cio(ciostatus);
+ }
+
+ dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+ /* flush posted write */
+ dsi_read_reg(DSI_IRQSTATUS);
+}
+
+
+static void _dsi_initialize_irq(void)
+{
+ u32 l;
+ int i;
+
+ /* disable all interrupts */
+ dsi_write_reg(DSI_IRQENABLE, 0);
+ for (i = 0; i < 4; ++i)
+ dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
+ dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
+
+ /* clear interrupt status */
+ l = dsi_read_reg(DSI_IRQSTATUS);
+ dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
+
+ for (i = 0; i < 4; ++i) {
+ l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+ dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
+ }
+
+ l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+ dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
+
+ /* enable error irqs */
+ l = DSI_IRQ_ERROR_MASK;
+#ifdef DSI_CATCH_MISSING_TE
+ l |= DSI_IRQ_TE_TRIGGER;
+#endif
+ dsi_write_reg(DSI_IRQENABLE, l);
+
+ l = DSI_VC_IRQ_ERROR_MASK;
+ for (i = 0; i < 4; ++i)
+ dsi_write_reg(DSI_VC_IRQENABLE(i), l);
+
+ /* XXX zonda responds incorrectly, causing control error:
+ Exit from LP-ESC mode to LP11 uses wrong transition states on the
+ data lines LP0 and LN0. */
+ dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
+ -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
+}
+
+static u32 dsi_get_errors(void)
+{
+ unsigned long flags;
+ u32 e;
+ spin_lock_irqsave(&dsi.errors_lock, flags);
+ e = dsi.errors;
+ dsi.errors = 0;
+ spin_unlock_irqrestore(&dsi.errors_lock, flags);
+ return e;
+}
+
+static void dsi_vc_enable_bta_irq(int channel)
+{
+ u32 l;
+
+ dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
+
+ l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
+ l |= DSI_VC_IRQ_BTA;
+ dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+}
+
+static void dsi_vc_disable_bta_irq(int channel)
+{
+ u32 l;
+
+ l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
+ l &= ~DSI_VC_IRQ_BTA;
+ dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
+}
+
+/* DSI func clock. this could also be DSI2_PLL_FCLK */
+static inline void enable_clocks(bool enable)
+{
+ if (enable)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ else
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+/* source clock for DSI PLL. this could also be PCLKFREE */
+static inline void dsi_enable_pll_clock(bool enable)
+{
+ if (enable)
+ dss_clk_enable(DSS_CLK_FCK2);
+ else
+ dss_clk_disable(DSS_CLK_FCK2);
+
+ if (enable && dsi.pll_locked) {
+ if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
+ DSSERR("cannot lock PLL when enabling clocks\n");
+ }
+}
+
+#ifdef DEBUG
+static void _dsi_print_reset_status(void)
+{
+ u32 l;
+
+ if (!dss_debug)
+ return;
+
+ /* A dummy read using the SCP interface to any DSIPHY register is
+ * required after DSIPHY reset to complete the reset of the DSI complex
+ * I/O. */
+ l = dsi_read_reg(DSI_DSIPHY_CFG5);
+
+ printk(KERN_DEBUG "DSI resets: ");
+
+ l = dsi_read_reg(DSI_PLL_STATUS);
+ printk("PLL (%d) ", FLD_GET(l, 0, 0));
+
+ l = dsi_read_reg(DSI_COMPLEXIO_CFG1);
+ printk("CIO (%d) ", FLD_GET(l, 29, 29));
+
+ l = dsi_read_reg(DSI_DSIPHY_CFG5);
+ printk("PHY (%x, %d, %d, %d)\n",
+ FLD_GET(l, 28, 26),
+ FLD_GET(l, 29, 29),
+ FLD_GET(l, 30, 30),
+ FLD_GET(l, 31, 31));
+}
+#else
+#define _dsi_print_reset_status()
+#endif
+
+static inline int dsi_if_enable(bool enable)
+{
+ DSSDBG("dsi_if_enable(%d)\n", enable);
+
+ enable = enable ? 1 : 0;
+ REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */
+
+ if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) {
+ DSSERR("Failed to set dsi_if_enable to %d\n", enable);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+unsigned long dsi_get_dsi1_pll_rate(void)
+{
+ return dsi.current_cinfo.dsi1_pll_fclk;
+}
+
+static unsigned long dsi_get_dsi2_pll_rate(void)
+{
+ return dsi.current_cinfo.dsi2_pll_fclk;
+}
+
+static unsigned long dsi_get_txbyteclkhs(void)
+{
+ return dsi.current_cinfo.clkin4ddr / 16;
+}
+
+static unsigned long dsi_fclk_rate(void)
+{
+ unsigned long r;
+
+ if (dss_get_dsi_clk_source() == 0) {
+ /* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
+ r = dss_clk_get_rate(DSS_CLK_FCK1);
+ } else {
+ /* DSI FCLK source is DSI2_PLL_FCLK */
+ r = dsi_get_dsi2_pll_rate();
+ }
+
+ return r;
+}
+
+static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
+{
+ unsigned long dsi_fclk;
+ unsigned lp_clk_div;
+ unsigned long lp_clk;
+
+ lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
+
+ if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
+ return -EINVAL;
+
+ dsi_fclk = dsi_fclk_rate();
+
+ lp_clk = dsi_fclk / 2 / lp_clk_div;
+
+ DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
+ dsi.current_cinfo.lp_clk = lp_clk;
+ dsi.current_cinfo.lp_clk_div = lp_clk_div;
+
+ REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */
+
+ REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0,
+ 21, 21); /* LP_RX_SYNCHRO_ENABLE */
+
+ return 0;
+}
+
+
+enum dsi_pll_power_state {
+ DSI_PLL_POWER_OFF = 0x0,
+ DSI_PLL_POWER_ON_HSCLK = 0x1,
+ DSI_PLL_POWER_ON_ALL = 0x2,
+ DSI_PLL_POWER_ON_DIV = 0x3,
+};
+
+static int dsi_pll_power(enum dsi_pll_power_state state)
+{
+ int t = 0;
+
+ REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */
+
+ /* PLL_PWR_STATUS */
+ while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
+ udelay(1);
+ if (t++ > 1000) {
+ DSSERR("Failed to set DSI PLL power mode to %d\n",
+ state);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/* calculate clock rates using dividers in cinfo */
+static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
+{
+ if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
+ return -EINVAL;
+
+ if (cinfo->regm == 0 || cinfo->regm > REGM_MAX)
+ return -EINVAL;
+
+ if (cinfo->regm3 > REGM3_MAX)
+ return -EINVAL;
+
+ if (cinfo->regm4 > REGM4_MAX)
+ return -EINVAL;
+
+ if (cinfo->use_dss2_fck) {
+ cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
+ /* XXX it is unclear if highfreq should be used
+ * with DSS2_FCK source also */
+ cinfo->highfreq = 0;
+ } else {
+ cinfo->clkin = dispc_pclk_rate();
+
+ if (cinfo->clkin < 32000000)
+ cinfo->highfreq = 0;
+ else
+ cinfo->highfreq = 1;
+ }
+
+ cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
+
+ if (cinfo->fint > FINT_MAX || cinfo->fint < FINT_MIN)
+ return -EINVAL;
+
+ cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
+
+ if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
+ return -EINVAL;
+
+ if (cinfo->regm3 > 0)
+ cinfo->dsi1_pll_fclk = cinfo->clkin4ddr / cinfo->regm3;
+ else
+ cinfo->dsi1_pll_fclk = 0;
+
+ if (cinfo->regm4 > 0)
+ cinfo->dsi2_pll_fclk = cinfo->clkin4ddr / cinfo->regm4;
+ else
+ cinfo->dsi2_pll_fclk = 0;
+
+ return 0;
+}
+
+int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
+ struct dsi_clock_info *dsi_cinfo,
+ struct dispc_clock_info *dispc_cinfo)
+{
+ struct dsi_clock_info cur, best;
+ struct dispc_clock_info best_dispc;
+ int min_fck_per_pck;
+ int match = 0;
+ unsigned long dss_clk_fck2;
+
+ dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
+
+ if (req_pck == dsi.cache_req_pck &&
+ dsi.cache_cinfo.clkin == dss_clk_fck2) {
+ DSSDBG("DSI clock info found from cache\n");
+ *dsi_cinfo = dsi.cache_cinfo;
+ dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi1_pll_fclk,
+ dispc_cinfo);
+ return 0;
+ }
+
+ min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
+
+ if (min_fck_per_pck &&
+ req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
+ DSSERR("Requested pixel clock not possible with the current "
+ "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
+ "the constraint off.\n");
+ min_fck_per_pck = 0;
+ }
+
+ DSSDBG("dsi_pll_calc\n");
+
+retry:
+ memset(&best, 0, sizeof(best));
+ memset(&best_dispc, 0, sizeof(best_dispc));
+
+ memset(&cur, 0, sizeof(cur));
+ cur.clkin = dss_clk_fck2;
+ cur.use_dss2_fck = 1;
+ cur.highfreq = 0;
+
+ /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
+ /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
+ /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
+ for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
+ if (cur.highfreq == 0)
+ cur.fint = cur.clkin / cur.regn;
+ else
+ cur.fint = cur.clkin / (2 * cur.regn);
+
+ if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
+ continue;
+
+ /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
+ for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
+ unsigned long a, b;
+
+ a = 2 * cur.regm * (cur.clkin/1000);
+ b = cur.regn * (cur.highfreq + 1);
+ cur.clkin4ddr = a / b * 1000;
+
+ if (cur.clkin4ddr > 1800 * 1000 * 1000)
+ break;
+
+ /* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3 < 173MHz */
+ for (cur.regm3 = 1; cur.regm3 < REGM3_MAX;
+ ++cur.regm3) {
+ struct dispc_clock_info cur_dispc;
+ cur.dsi1_pll_fclk = cur.clkin4ddr / cur.regm3;
+
+ /* this will narrow down the search a bit,
+ * but still give pixclocks below what was
+ * requested */
+ if (cur.dsi1_pll_fclk < req_pck)
+ break;
+
+ if (cur.dsi1_pll_fclk > DISPC_MAX_FCK)
+ continue;
+
+ if (min_fck_per_pck &&
+ cur.dsi1_pll_fclk <
+ req_pck * min_fck_per_pck)
+ continue;
+
+ match = 1;
+
+ dispc_find_clk_divs(is_tft, req_pck,
+ cur.dsi1_pll_fclk,
+ &cur_dispc);
+
+ if (abs(cur_dispc.pck - req_pck) <
+ abs(best_dispc.pck - req_pck)) {
+ best = cur;
+ best_dispc = cur_dispc;
+
+ if (cur_dispc.pck == req_pck)
+ goto found;
+ }
+ }
+ }
+ }
+found:
+ if (!match) {
+ if (min_fck_per_pck) {
+ DSSERR("Could not find suitable clock settings.\n"
+ "Turning FCK/PCK constraint off and"
+ "trying again.\n");
+ min_fck_per_pck = 0;
+ goto retry;
+ }
+
+ DSSERR("Could not find suitable clock settings.\n");
+
+ return -EINVAL;
+ }
+
+ /* DSI2_PLL_FCLK (regm4) is not used */
+ best.regm4 = 0;
+ best.dsi2_pll_fclk = 0;
+
+ if (dsi_cinfo)
+ *dsi_cinfo = best;
+ if (dispc_cinfo)
+ *dispc_cinfo = best_dispc;
+
+ dsi.cache_req_pck = req_pck;
+ dsi.cache_clk_freq = 0;
+ dsi.cache_cinfo = best;
+
+ return 0;
+}
+
+int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
+{
+ int r = 0;
+ u32 l;
+ int f;
+
+ DSSDBGF();
+
+ dsi.current_cinfo.fint = cinfo->fint;
+ dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr;
+ dsi.current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
+ dsi.current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
+
+ dsi.current_cinfo.regn = cinfo->regn;
+ dsi.current_cinfo.regm = cinfo->regm;
+ dsi.current_cinfo.regm3 = cinfo->regm3;
+ dsi.current_cinfo.regm4 = cinfo->regm4;
+
+ DSSDBG("DSI Fint %ld\n", cinfo->fint);
+
+ DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
+ cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree",
+ cinfo->clkin,
+ cinfo->highfreq);
+
+ /* DSIPHY == CLKIN4DDR */
+ DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
+ cinfo->regm,
+ cinfo->regn,
+ cinfo->clkin,
+ cinfo->highfreq + 1,
+ cinfo->clkin4ddr);
+
+ DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
+ cinfo->clkin4ddr / 1000 / 1000 / 2);
+
+ DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
+
+ DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n",
+ cinfo->regm3, cinfo->dsi1_pll_fclk);
+ DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
+ cinfo->regm4, cinfo->dsi2_pll_fclk);
+
+ REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
+
+ l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
+ l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
+ l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */
+ l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */
+ l = FLD_MOD(l, cinfo->regm3 > 0 ? cinfo->regm3 - 1 : 0,
+ 22, 19); /* DSI_CLOCK_DIV */
+ l = FLD_MOD(l, cinfo->regm4 > 0 ? cinfo->regm4 - 1 : 0,
+ 26, 23); /* DSIPROTO_CLOCK_DIV */
+ dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
+
+ BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
+ if (cinfo->fint < 1000000)
+ f = 0x3;
+ else if (cinfo->fint < 1250000)
+ f = 0x4;
+ else if (cinfo->fint < 1500000)
+ f = 0x5;
+ else if (cinfo->fint < 1750000)
+ f = 0x6;
+ else
+ f = 0x7;
+
+ l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
+ l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
+ l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1,
+ 11, 11); /* DSI_PLL_CLKSEL */
+ l = FLD_MOD(l, cinfo->highfreq,
+ 12, 12); /* DSI_PLL_HIGHFREQ */
+ l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
+ l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
+ l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
+ dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
+
+ REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
+
+ if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) {
+ DSSERR("dsi pll go bit not going down.\n");
+ r = -EIO;
+ goto err;
+ }
+
+ if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) {
+ DSSERR("cannot lock PLL\n");
+ r = -EIO;
+ goto err;
+ }
+
+ dsi.pll_locked = 1;
+
+ l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
+ l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
+ l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
+ l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
+ l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
+ l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
+ l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
+ l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
+ l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
+ l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
+ l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
+ l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
+ l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
+ l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
+ l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
+ dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
+
+ DSSDBG("PLL config done\n");
+err:
+ return r;
+}
+
+int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
+ bool enable_hsdiv)
+{
+ int r = 0;
+ enum dsi_pll_power_state pwstate;
+
+ DSSDBG("PLL init\n");
+
+ enable_clocks(1);
+ dsi_enable_pll_clock(1);
+
+ r = regulator_enable(dsi.vdds_dsi_reg);
+ if (r)
+ goto err0;
+
+ /* XXX PLL does not come out of reset without this... */
+ dispc_pck_free_enable(1);
+
+ if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
+ DSSERR("PLL not coming out of reset.\n");
+ r = -ENODEV;
+ goto err1;
+ }
+
+ /* XXX ... but if left on, we get problems when planes do not
+ * fill the whole display. No idea about this */
+ dispc_pck_free_enable(0);
+
+ if (enable_hsclk && enable_hsdiv)
+ pwstate = DSI_PLL_POWER_ON_ALL;
+ else if (enable_hsclk)
+ pwstate = DSI_PLL_POWER_ON_HSCLK;
+ else if (enable_hsdiv)
+ pwstate = DSI_PLL_POWER_ON_DIV;
+ else
+ pwstate = DSI_PLL_POWER_OFF;
+
+ r = dsi_pll_power(pwstate);
+
+ if (r)
+ goto err1;
+
+ DSSDBG("PLL init done\n");
+
+ return 0;
+err1:
+ regulator_disable(dsi.vdds_dsi_reg);
+err0:
+ enable_clocks(0);
+ dsi_enable_pll_clock(0);
+ return r;
+}
+
+void dsi_pll_uninit(void)
+{
+ enable_clocks(0);
+ dsi_enable_pll_clock(0);
+
+ dsi.pll_locked = 0;
+ dsi_pll_power(DSI_PLL_POWER_OFF);
+ regulator_disable(dsi.vdds_dsi_reg);
+ DSSDBG("PLL uninit done\n");
+}
+
+void dsi_dump_clocks(struct seq_file *s)
+{
+ int clksel;
+ struct dsi_clock_info *cinfo = &dsi.current_cinfo;
+
+ enable_clocks(1);
+
+ clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11);
+
+ seq_printf(s, "- DSI PLL -\n");
+
+ seq_printf(s, "dsi pll source = %s\n",
+ clksel == 0 ?
+ "dss2_alwon_fclk" : "pclkfree");
+
+ seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
+
+ seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
+ cinfo->clkin4ddr, cinfo->regm);
+
+ seq_printf(s, "dsi1_pll_fck\t%-16luregm3 %u\t(%s)\n",
+ cinfo->dsi1_pll_fclk,
+ cinfo->regm3,
+ dss_get_dispc_clk_source() == 0 ? "off" : "on");
+
+ seq_printf(s, "dsi2_pll_fck\t%-16luregm4 %u\t(%s)\n",
+ cinfo->dsi2_pll_fclk,
+ cinfo->regm4,
+ dss_get_dsi_clk_source() == 0 ? "off" : "on");
+
+ seq_printf(s, "- DSI -\n");
+
+ seq_printf(s, "dsi fclk source = %s\n",
+ dss_get_dsi_clk_source() == 0 ?
+ "dss1_alwon_fclk" : "dsi2_pll_fclk");
+
+ seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate());
+
+ seq_printf(s, "DDR_CLK\t\t%lu\n",
+ cinfo->clkin4ddr / 4);
+
+ seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs());
+
+ seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
+
+ seq_printf(s, "VP_CLK\t\t%lu\n"
+ "VP_PCLK\t\t%lu\n",
+ dispc_lclk_rate(),
+ dispc_pclk_rate());
+
+ enable_clocks(0);
+}
+
+void dsi_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ DUMPREG(DSI_REVISION);
+ DUMPREG(DSI_SYSCONFIG);
+ DUMPREG(DSI_SYSSTATUS);
+ DUMPREG(DSI_IRQSTATUS);
+ DUMPREG(DSI_IRQENABLE);
+ DUMPREG(DSI_CTRL);
+ DUMPREG(DSI_COMPLEXIO_CFG1);
+ DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
+ DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
+ DUMPREG(DSI_CLK_CTRL);
+ DUMPREG(DSI_TIMING1);
+ DUMPREG(DSI_TIMING2);
+ DUMPREG(DSI_VM_TIMING1);
+ DUMPREG(DSI_VM_TIMING2);
+ DUMPREG(DSI_VM_TIMING3);
+ DUMPREG(DSI_CLK_TIMING);
+ DUMPREG(DSI_TX_FIFO_VC_SIZE);
+ DUMPREG(DSI_RX_FIFO_VC_SIZE);
+ DUMPREG(DSI_COMPLEXIO_CFG2);
+ DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
+ DUMPREG(DSI_VM_TIMING4);
+ DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
+ DUMPREG(DSI_VM_TIMING5);
+ DUMPREG(DSI_VM_TIMING6);
+ DUMPREG(DSI_VM_TIMING7);
+ DUMPREG(DSI_STOPCLK_TIMING);
+
+ DUMPREG(DSI_VC_CTRL(0));
+ DUMPREG(DSI_VC_TE(0));
+ DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
+ DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
+ DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
+ DUMPREG(DSI_VC_IRQSTATUS(0));
+ DUMPREG(DSI_VC_IRQENABLE(0));
+
+ DUMPREG(DSI_VC_CTRL(1));
+ DUMPREG(DSI_VC_TE(1));
+ DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
+ DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
+ DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
+ DUMPREG(DSI_VC_IRQSTATUS(1));
+ DUMPREG(DSI_VC_IRQENABLE(1));
+
+ DUMPREG(DSI_VC_CTRL(2));
+ DUMPREG(DSI_VC_TE(2));
+ DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
+ DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
+ DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
+ DUMPREG(DSI_VC_IRQSTATUS(2));
+ DUMPREG(DSI_VC_IRQENABLE(2));
+
+ DUMPREG(DSI_VC_CTRL(3));
+ DUMPREG(DSI_VC_TE(3));
+ DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
+ DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
+ DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
+ DUMPREG(DSI_VC_IRQSTATUS(3));
+ DUMPREG(DSI_VC_IRQENABLE(3));
+
+ DUMPREG(DSI_DSIPHY_CFG0);
+ DUMPREG(DSI_DSIPHY_CFG1);
+ DUMPREG(DSI_DSIPHY_CFG2);
+ DUMPREG(DSI_DSIPHY_CFG5);
+
+ DUMPREG(DSI_PLL_CONTROL);
+ DUMPREG(DSI_PLL_STATUS);
+ DUMPREG(DSI_PLL_GO);
+ DUMPREG(DSI_PLL_CONFIGURATION1);
+ DUMPREG(DSI_PLL_CONFIGURATION2);
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+#undef DUMPREG
+}
+
+enum dsi_complexio_power_state {
+ DSI_COMPLEXIO_POWER_OFF = 0x0,
+ DSI_COMPLEXIO_POWER_ON = 0x1,
+ DSI_COMPLEXIO_POWER_ULPS = 0x2,
+};
+
+static int dsi_complexio_power(enum dsi_complexio_power_state state)
+{
+ int t = 0;
+
+ /* PWR_CMD */
+ REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27);
+
+ /* PWR_STATUS */
+ while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
+ udelay(1);
+ if (t++ > 1000) {
+ DSSERR("failed to set complexio power state to "
+ "%d\n", state);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static void dsi_complexio_config(struct omap_dss_device *dssdev)
+{
+ u32 r;
+
+ int clk_lane = dssdev->phy.dsi.clk_lane;
+ int data1_lane = dssdev->phy.dsi.data1_lane;
+ int data2_lane = dssdev->phy.dsi.data2_lane;
+ int clk_pol = dssdev->phy.dsi.clk_pol;
+ int data1_pol = dssdev->phy.dsi.data1_pol;
+ int data2_pol = dssdev->phy.dsi.data2_pol;
+
+ r = dsi_read_reg(DSI_COMPLEXIO_CFG1);
+ r = FLD_MOD(r, clk_lane, 2, 0);
+ r = FLD_MOD(r, clk_pol, 3, 3);
+ r = FLD_MOD(r, data1_lane, 6, 4);
+ r = FLD_MOD(r, data1_pol, 7, 7);
+ r = FLD_MOD(r, data2_lane, 10, 8);
+ r = FLD_MOD(r, data2_pol, 11, 11);
+ dsi_write_reg(DSI_COMPLEXIO_CFG1, r);
+
+ /* The configuration of the DSI complex I/O (number of data lanes,
+ position, differential order) should not be changed while
+ DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
+ the hardware to take into account a new configuration of the complex
+ I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
+ follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
+ then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
+ DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
+ DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
+ DSI complex I/O configuration is unknown. */
+
+ /*
+ REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
+ REG_FLD_MOD(DSI_CTRL, 0, 0, 0);
+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20);
+ REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
+ */
+}
+
+static inline unsigned ns2ddr(unsigned ns)
+{
+ /* convert time in ns to ddr ticks, rounding up */
+ unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
+ return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
+}
+
+static inline unsigned ddr2ns(unsigned ddr)
+{
+ unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
+ return ddr * 1000 * 1000 / (ddr_clk / 1000);
+}
+
+static void dsi_complexio_timings(void)
+{
+ u32 r;
+ u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
+ u32 tlpx_half, tclk_trail, tclk_zero;
+ u32 tclk_prepare;
+
+ /* calculate timings */
+
+ /* 1 * DDR_CLK = 2 * UI */
+
+ /* min 40ns + 4*UI max 85ns + 6*UI */
+ ths_prepare = ns2ddr(70) + 2;
+
+ /* min 145ns + 10*UI */
+ ths_prepare_ths_zero = ns2ddr(175) + 2;
+
+ /* min max(8*UI, 60ns+4*UI) */
+ ths_trail = ns2ddr(60) + 5;
+
+ /* min 100ns */
+ ths_exit = ns2ddr(145);
+
+ /* tlpx min 50n */
+ tlpx_half = ns2ddr(25);
+
+ /* min 60ns */
+ tclk_trail = ns2ddr(60) + 2;
+
+ /* min 38ns, max 95ns */
+ tclk_prepare = ns2ddr(65);
+
+ /* min tclk-prepare + tclk-zero = 300ns */
+ tclk_zero = ns2ddr(260);
+
+ DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
+ ths_prepare, ddr2ns(ths_prepare),
+ ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero));
+ DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
+ ths_trail, ddr2ns(ths_trail),
+ ths_exit, ddr2ns(ths_exit));
+
+ DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
+ "tclk_zero %u (%uns)\n",
+ tlpx_half, ddr2ns(tlpx_half),
+ tclk_trail, ddr2ns(tclk_trail),
+ tclk_zero, ddr2ns(tclk_zero));
+ DSSDBG("tclk_prepare %u (%uns)\n",
+ tclk_prepare, ddr2ns(tclk_prepare));
+
+ /* program timings */
+
+ r = dsi_read_reg(DSI_DSIPHY_CFG0);
+ r = FLD_MOD(r, ths_prepare, 31, 24);
+ r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
+ r = FLD_MOD(r, ths_trail, 15, 8);
+ r = FLD_MOD(r, ths_exit, 7, 0);
+ dsi_write_reg(DSI_DSIPHY_CFG0, r);
+
+ r = dsi_read_reg(DSI_DSIPHY_CFG1);
+ r = FLD_MOD(r, tlpx_half, 22, 16);
+ r = FLD_MOD(r, tclk_trail, 15, 8);
+ r = FLD_MOD(r, tclk_zero, 7, 0);
+ dsi_write_reg(DSI_DSIPHY_CFG1, r);
+
+ r = dsi_read_reg(DSI_DSIPHY_CFG2);
+ r = FLD_MOD(r, tclk_prepare, 7, 0);
+ dsi_write_reg(DSI_DSIPHY_CFG2, r);
+}
+
+
+static int dsi_complexio_init(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("dsi_complexio_init\n");
+
+ /* CIO_CLK_ICG, enable L3 clk to CIO */
+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
+
+ /* A dummy read using the SCP interface to any DSIPHY register is
+ * required after DSIPHY reset to complete the reset of the DSI complex
+ * I/O. */
+ dsi_read_reg(DSI_DSIPHY_CFG5);
+
+ if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) {
+ DSSERR("ComplexIO PHY not coming out of reset.\n");
+ r = -ENODEV;
+ goto err;
+ }
+
+ dsi_complexio_config(dssdev);
+
+ r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON);
+
+ if (r)
+ goto err;
+
+ if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
+ DSSERR("ComplexIO not coming out of reset.\n");
+ r = -ENODEV;
+ goto err;
+ }
+
+ if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
+ DSSERR("ComplexIO LDO power down.\n");
+ r = -ENODEV;
+ goto err;
+ }
+
+ dsi_complexio_timings();
+
+ /*
+ The configuration of the DSI complex I/O (number of data lanes,
+ position, differential order) should not be changed while
+ DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the
+ hardware to recognize a new configuration of the complex I/O (done
+ in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow
+ this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next
+ reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20]
+ LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN
+ bit to 1. If the sequence is not followed, the DSi complex I/O
+ configuration is undetermined.
+ */
+ dsi_if_enable(1);
+ dsi_if_enable(0);
+ REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
+ dsi_if_enable(1);
+ dsi_if_enable(0);
+
+ DSSDBG("CIO init done\n");
+err:
+ return r;
+}
+
+static void dsi_complexio_uninit(void)
+{
+ dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF);
+}
+
+static int _dsi_wait_reset(void)
+{
+ int i = 0;
+
+ while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
+ if (i++ > 5) {
+ DSSERR("soft reset failed\n");
+ return -ENODEV;
+ }
+ udelay(1);
+ }
+
+ return 0;
+}
+
+static int _dsi_reset(void)
+{
+ /* Soft reset */
+ REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1);
+ return _dsi_wait_reset();
+}
+
+static void dsi_reset_tx_fifo(int channel)
+{
+ u32 mask;
+ u32 l;
+
+ /* set fifosize of the channel to 0, then return the old size */
+ l = dsi_read_reg(DSI_TX_FIFO_VC_SIZE);
+
+ mask = FLD_MASK((8 * channel) + 7, (8 * channel) + 4);
+ dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l & ~mask);
+
+ dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l);
+}
+
+static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
+ enum fifo_size size3, enum fifo_size size4)
+{
+ u32 r = 0;
+ int add = 0;
+ int i;
+
+ dsi.vc[0].fifo_size = size1;
+ dsi.vc[1].fifo_size = size2;
+ dsi.vc[2].fifo_size = size3;
+ dsi.vc[3].fifo_size = size4;
+
+ for (i = 0; i < 4; i++) {
+ u8 v;
+ int size = dsi.vc[i].fifo_size;
+
+ if (add + size > 4) {
+ DSSERR("Illegal FIFO configuration\n");
+ BUG();
+ }
+
+ v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
+ r |= v << (8 * i);
+ /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
+ add += size;
+ }
+
+ dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r);
+}
+
+static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
+ enum fifo_size size3, enum fifo_size size4)
+{
+ u32 r = 0;
+ int add = 0;
+ int i;
+
+ dsi.vc[0].fifo_size = size1;
+ dsi.vc[1].fifo_size = size2;
+ dsi.vc[2].fifo_size = size3;
+ dsi.vc[3].fifo_size = size4;
+
+ for (i = 0; i < 4; i++) {
+ u8 v;
+ int size = dsi.vc[i].fifo_size;
+
+ if (add + size > 4) {
+ DSSERR("Illegal FIFO configuration\n");
+ BUG();
+ }
+
+ v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
+ r |= v << (8 * i);
+ /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
+ add += size;
+ }
+
+ dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r);
+}
+
+static int dsi_force_tx_stop_mode_io(void)
+{
+ u32 r;
+
+ r = dsi_read_reg(DSI_TIMING1);
+ r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
+ dsi_write_reg(DSI_TIMING1, r);
+
+ if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) {
+ DSSERR("TX_STOP bit not going down\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void dsi_vc_print_status(int channel)
+{
+ u32 r;
+
+ r = dsi_read_reg(DSI_VC_CTRL(channel));
+ DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, "
+ "TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ",
+ channel,
+ FLD_GET(r, 5, 5),
+ FLD_GET(r, 6, 6),
+ FLD_GET(r, 15, 15),
+ FLD_GET(r, 16, 16),
+ FLD_GET(r, 20, 20));
+
+ r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS);
+ DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff);
+}
+
+static int dsi_vc_enable(int channel, bool enable)
+{
+ if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+ DSSDBG("dsi_vc_enable channel %d, enable %d\n",
+ channel, enable);
+
+ enable = enable ? 1 : 0;
+
+ REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0);
+
+ if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) {
+ DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void dsi_vc_initial_config(int channel)
+{
+ u32 r;
+
+ DSSDBGF("%d", channel);
+
+ r = dsi_read_reg(DSI_VC_CTRL(channel));
+
+ if (FLD_GET(r, 15, 15)) /* VC_BUSY */
+ DSSERR("VC(%d) busy when trying to configure it!\n",
+ channel);
+
+ r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
+ r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
+ r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
+ r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
+ r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
+ r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
+ r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
+
+ r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
+ r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
+
+ dsi_write_reg(DSI_VC_CTRL(channel), r);
+
+ dsi.vc[channel].mode = DSI_VC_MODE_L4;
+}
+
+static void dsi_vc_config_l4(int channel)
+{
+ if (dsi.vc[channel].mode == DSI_VC_MODE_L4)
+ return;
+
+ DSSDBGF("%d", channel);
+
+ dsi_vc_enable(channel, 0);
+
+ if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
+ DSSERR("vc(%d) busy when trying to config for L4\n", channel);
+
+ REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
+
+ dsi_vc_enable(channel, 1);
+
+ dsi.vc[channel].mode = DSI_VC_MODE_L4;
+}
+
+static void dsi_vc_config_vp(int channel)
+{
+ if (dsi.vc[channel].mode == DSI_VC_MODE_VP)
+ return;
+
+ DSSDBGF("%d", channel);
+
+ dsi_vc_enable(channel, 0);
+
+ if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
+ DSSERR("vc(%d) busy when trying to config for VP\n", channel);
+
+ REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
+
+ dsi_vc_enable(channel, 1);
+
+ dsi.vc[channel].mode = DSI_VC_MODE_VP;
+}
+
+
+static void dsi_vc_enable_hs(int channel, bool enable)
+{
+ DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
+
+ dsi_vc_enable(channel, 0);
+ dsi_if_enable(0);
+
+ REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9);
+
+ dsi_vc_enable(channel, 1);
+ dsi_if_enable(1);
+
+ dsi_force_tx_stop_mode_io();
+}
+
+static void dsi_vc_flush_long_data(int channel)
+{
+ while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+ u32 val;
+ val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+ DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
+ (val >> 0) & 0xff,
+ (val >> 8) & 0xff,
+ (val >> 16) & 0xff,
+ (val >> 24) & 0xff);
+ }
+}
+
+static void dsi_show_rx_ack_with_err(u16 err)
+{
+ DSSERR("\tACK with ERROR (%#x):\n", err);
+ if (err & (1 << 0))
+ DSSERR("\t\tSoT Error\n");
+ if (err & (1 << 1))
+ DSSERR("\t\tSoT Sync Error\n");
+ if (err & (1 << 2))
+ DSSERR("\t\tEoT Sync Error\n");
+ if (err & (1 << 3))
+ DSSERR("\t\tEscape Mode Entry Command Error\n");
+ if (err & (1 << 4))
+ DSSERR("\t\tLP Transmit Sync Error\n");
+ if (err & (1 << 5))
+ DSSERR("\t\tHS Receive Timeout Error\n");
+ if (err & (1 << 6))
+ DSSERR("\t\tFalse Control Error\n");
+ if (err & (1 << 7))
+ DSSERR("\t\t(reserved7)\n");
+ if (err & (1 << 8))
+ DSSERR("\t\tECC Error, single-bit (corrected)\n");
+ if (err & (1 << 9))
+ DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
+ if (err & (1 << 10))
+ DSSERR("\t\tChecksum Error\n");
+ if (err & (1 << 11))
+ DSSERR("\t\tData type not recognized\n");
+ if (err & (1 << 12))
+ DSSERR("\t\tInvalid VC ID\n");
+ if (err & (1 << 13))
+ DSSERR("\t\tInvalid Transmission Length\n");
+ if (err & (1 << 14))
+ DSSERR("\t\t(reserved14)\n");
+ if (err & (1 << 15))
+ DSSERR("\t\tDSI Protocol Violation\n");
+}
+
+static u16 dsi_vc_flush_receive_data(int channel)
+{
+ /* RX_FIFO_NOT_EMPTY */
+ while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+ u32 val;
+ u8 dt;
+ val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+ DSSDBG("\trawval %#08x\n", val);
+ dt = FLD_GET(val, 5, 0);
+ if (dt == DSI_DT_RX_ACK_WITH_ERR) {
+ u16 err = FLD_GET(val, 23, 8);
+ dsi_show_rx_ack_with_err(err);
+ } else if (dt == DSI_DT_RX_SHORT_READ_1) {
+ DSSDBG("\tDCS short response, 1 byte: %#x\n",
+ FLD_GET(val, 23, 8));
+ } else if (dt == DSI_DT_RX_SHORT_READ_2) {
+ DSSDBG("\tDCS short response, 2 byte: %#x\n",
+ FLD_GET(val, 23, 8));
+ } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
+ DSSDBG("\tDCS long response, len %d\n",
+ FLD_GET(val, 23, 8));
+ dsi_vc_flush_long_data(channel);
+ } else {
+ DSSERR("\tunknown datatype 0x%02x\n", dt);
+ }
+ }
+ return 0;
+}
+
+static int dsi_vc_send_bta(int channel)
+{
+ if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO &&
+ (dsi.debug_write || dsi.debug_read))
+ DSSDBG("dsi_vc_send_bta %d\n", channel);
+
+ WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+
+ if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
+ DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
+ dsi_vc_flush_receive_data(channel);
+ }
+
+ REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+
+ return 0;
+}
+
+int dsi_vc_send_bta_sync(int channel)
+{
+ int r = 0;
+ u32 err;
+
+ INIT_COMPLETION(dsi.bta_completion);
+
+ dsi_vc_enable_bta_irq(channel);
+
+ r = dsi_vc_send_bta(channel);
+ if (r)
+ goto err;
+
+ if (wait_for_completion_timeout(&dsi.bta_completion,
+ msecs_to_jiffies(500)) == 0) {
+ DSSERR("Failed to receive BTA\n");
+ r = -EIO;
+ goto err;
+ }
+
+ err = dsi_get_errors();
+ if (err) {
+ DSSERR("Error while sending BTA: %x\n", err);
+ r = -EIO;
+ goto err;
+ }
+err:
+ dsi_vc_disable_bta_irq(channel);
+
+ return r;
+}
+EXPORT_SYMBOL(dsi_vc_send_bta_sync);
+
+static inline void dsi_vc_write_long_header(int channel, u8 data_type,
+ u16 len, u8 ecc)
+{
+ u32 val;
+ u8 data_id;
+
+ WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+
+ /*data_id = data_type | channel << 6; */
+ data_id = data_type | dsi.vc[channel].dest_per << 6;
+
+ val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
+ FLD_VAL(ecc, 31, 24);
+
+ dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val);
+}
+
+static inline void dsi_vc_write_long_payload(int channel,
+ u8 b1, u8 b2, u8 b3, u8 b4)
+{
+ u32 val;
+
+ val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
+
+/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
+ b1, b2, b3, b4, val); */
+
+ dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+}
+
+static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
+ u8 ecc)
+{
+ /*u32 val; */
+ int i;
+ u8 *p;
+ int r = 0;
+ u8 b1, b2, b3, b4;
+
+ if (dsi.debug_write)
+ DSSDBG("dsi_vc_send_long, %d bytes\n", len);
+
+ /* len + header */
+ if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) {
+ DSSERR("unable to send long packet: packet too long.\n");
+ return -EINVAL;
+ }
+
+ dsi_vc_config_l4(channel);
+
+ dsi_vc_write_long_header(channel, data_type, len, ecc);
+
+ /*dsi_vc_print_status(0); */
+
+ p = data;
+ for (i = 0; i < len >> 2; i++) {
+ if (dsi.debug_write)
+ DSSDBG("\tsending full packet %d\n", i);
+ /*dsi_vc_print_status(0); */
+
+ b1 = *p++;
+ b2 = *p++;
+ b3 = *p++;
+ b4 = *p++;
+
+ dsi_vc_write_long_payload(channel, b1, b2, b3, b4);
+ }
+
+ i = len % 4;
+ if (i) {
+ b1 = 0; b2 = 0; b3 = 0;
+
+ if (dsi.debug_write)
+ DSSDBG("\tsending remainder bytes %d\n", i);
+
+ switch (i) {
+ case 3:
+ b1 = *p++;
+ b2 = *p++;
+ b3 = *p++;
+ break;
+ case 2:
+ b1 = *p++;
+ b2 = *p++;
+ break;
+ case 1:
+ b1 = *p++;
+ break;
+ }
+
+ dsi_vc_write_long_payload(channel, b1, b2, b3, 0);
+ }
+
+ return r;
+}
+
+static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
+{
+ u32 r;
+ u8 data_id;
+
+ WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+
+ if (dsi.debug_write)
+ DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
+ channel,
+ data_type, data & 0xff, (data >> 8) & 0xff);
+
+ dsi_vc_config_l4(channel);
+
+ if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) {
+ DSSERR("ERROR FIFO FULL, aborting transfer\n");
+ return -EINVAL;
+ }
+
+ data_id = data_type | channel << 6;
+
+ r = (data_id << 0) | (data << 8) | (ecc << 24);
+
+ dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r);
+
+ return 0;
+}
+
+int dsi_vc_send_null(int channel)
+{
+ u8 nullpkg[] = {0, 0, 0, 0};
+ return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
+}
+EXPORT_SYMBOL(dsi_vc_send_null);
+
+int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
+{
+ int r;
+
+ BUG_ON(len == 0);
+
+ if (len == 1) {
+ r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0,
+ data[0], 0);
+ } else if (len == 2) {
+ r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1,
+ data[0] | (data[1] << 8), 0);
+ } else {
+ /* 0x39 = DCS Long Write */
+ r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE,
+ data, len, 0);
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
+
+int dsi_vc_dcs_write(int channel, u8 *data, int len)
+{
+ int r;
+
+ r = dsi_vc_dcs_write_nosync(channel, data, len);
+ if (r)
+ return r;
+
+ r = dsi_vc_send_bta_sync(channel);
+
+ return r;
+}
+EXPORT_SYMBOL(dsi_vc_dcs_write);
+
+int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
+{
+ u32 val;
+ u8 dt;
+ int r;
+
+ if (dsi.debug_read)
+ DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd);
+
+ r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
+ if (r)
+ return r;
+
+ r = dsi_vc_send_bta_sync(channel);
+ if (r)
+ return r;
+
+ /* RX_FIFO_NOT_EMPTY */
+ if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) {
+ DSSERR("RX fifo empty when trying to read.\n");
+ return -EIO;
+ }
+
+ val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+ if (dsi.debug_read)
+ DSSDBG("\theader: %08x\n", val);
+ dt = FLD_GET(val, 5, 0);
+ if (dt == DSI_DT_RX_ACK_WITH_ERR) {
+ u16 err = FLD_GET(val, 23, 8);
+ dsi_show_rx_ack_with_err(err);
+ return -EIO;
+
+ } else if (dt == DSI_DT_RX_SHORT_READ_1) {
+ u8 data = FLD_GET(val, 15, 8);
+ if (dsi.debug_read)
+ DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
+
+ if (buflen < 1)
+ return -EIO;
+
+ buf[0] = data;
+
+ return 1;
+ } else if (dt == DSI_DT_RX_SHORT_READ_2) {
+ u16 data = FLD_GET(val, 23, 8);
+ if (dsi.debug_read)
+ DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
+
+ if (buflen < 2)
+ return -EIO;
+
+ buf[0] = data & 0xff;
+ buf[1] = (data >> 8) & 0xff;
+
+ return 2;
+ } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
+ int w;
+ int len = FLD_GET(val, 23, 8);
+ if (dsi.debug_read)
+ DSSDBG("\tDCS long response, len %d\n", len);
+
+ if (len > buflen)
+ return -EIO;
+
+ /* two byte checksum ends the packet, not included in len */
+ for (w = 0; w < len + 2;) {
+ int b;
+ val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
+ if (dsi.debug_read)
+ DSSDBG("\t\t%02x %02x %02x %02x\n",
+ (val >> 0) & 0xff,
+ (val >> 8) & 0xff,
+ (val >> 16) & 0xff,
+ (val >> 24) & 0xff);
+
+ for (b = 0; b < 4; ++b) {
+ if (w < len)
+ buf[w] = (val >> (b * 8)) & 0xff;
+ /* we discard the 2 byte checksum */
+ ++w;
+ }
+ }
+
+ return len;
+
+ } else {
+ DSSERR("\tunknown datatype 0x%02x\n", dt);
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL(dsi_vc_dcs_read);
+
+
+int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
+{
+ int r;
+ r = dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
+ len, 0);
+
+ if (r)
+ return r;
+
+ r = dsi_vc_send_bta_sync(channel);
+
+ return r;
+}
+EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
+
+static void dsi_set_lp_rx_timeout(unsigned long ns)
+{
+ u32 r;
+ unsigned x4, x16;
+ unsigned long fck;
+ unsigned long ticks;
+
+ /* ticks in DSI_FCK */
+
+ fck = dsi_fclk_rate();
+ ticks = (fck / 1000 / 1000) * ns / 1000;
+ x4 = 0;
+ x16 = 0;
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
+ x4 = 1;
+ x16 = 0;
+ }
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
+ x4 = 0;
+ x16 = 1;
+ }
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
+ x4 = 1;
+ x16 = 1;
+ }
+
+ if (ticks > 0x1fff) {
+ DSSWARN("LP_TX_TO over limit, setting it to max\n");
+ ticks = 0x1fff;
+ x4 = 1;
+ x16 = 1;
+ }
+
+ r = dsi_read_reg(DSI_TIMING2);
+ r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
+ r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */
+ r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */
+ r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
+ dsi_write_reg(DSI_TIMING2, r);
+
+ DSSDBG("LP_RX_TO %lu ns (%#lx ticks%s%s)\n",
+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+ (fck / 1000 / 1000),
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+}
+
+static void dsi_set_ta_timeout(unsigned long ns)
+{
+ u32 r;
+ unsigned x8, x16;
+ unsigned long fck;
+ unsigned long ticks;
+
+ /* ticks in DSI_FCK */
+ fck = dsi_fclk_rate();
+ ticks = (fck / 1000 / 1000) * ns / 1000;
+ x8 = 0;
+ x16 = 0;
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / 8;
+ x8 = 1;
+ x16 = 0;
+ }
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
+ x8 = 0;
+ x16 = 1;
+ }
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / (8 * 16);
+ x8 = 1;
+ x16 = 1;
+ }
+
+ if (ticks > 0x1fff) {
+ DSSWARN("TA_TO over limit, setting it to max\n");
+ ticks = 0x1fff;
+ x8 = 1;
+ x16 = 1;
+ }
+
+ r = dsi_read_reg(DSI_TIMING1);
+ r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
+ r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */
+ r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */
+ r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
+ dsi_write_reg(DSI_TIMING1, r);
+
+ DSSDBG("TA_TO %lu ns (%#lx ticks%s%s)\n",
+ (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
+ (fck / 1000 / 1000),
+ ticks, x8 ? " x8" : "", x16 ? " x16" : "");
+}
+
+static void dsi_set_stop_state_counter(unsigned long ns)
+{
+ u32 r;
+ unsigned x4, x16;
+ unsigned long fck;
+ unsigned long ticks;
+
+ /* ticks in DSI_FCK */
+
+ fck = dsi_fclk_rate();
+ ticks = (fck / 1000 / 1000) * ns / 1000;
+ x4 = 0;
+ x16 = 0;
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
+ x4 = 1;
+ x16 = 0;
+ }
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
+ x4 = 0;
+ x16 = 1;
+ }
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
+ x4 = 1;
+ x16 = 1;
+ }
+
+ if (ticks > 0x1fff) {
+ DSSWARN("STOP_STATE_COUNTER_IO over limit, "
+ "setting it to max\n");
+ ticks = 0x1fff;
+ x4 = 1;
+ x16 = 1;
+ }
+
+ r = dsi_read_reg(DSI_TIMING1);
+ r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
+ r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */
+ r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */
+ r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
+ dsi_write_reg(DSI_TIMING1, r);
+
+ DSSDBG("STOP_STATE_COUNTER %lu ns (%#lx ticks%s%s)\n",
+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+ (fck / 1000 / 1000),
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+}
+
+static void dsi_set_hs_tx_timeout(unsigned long ns)
+{
+ u32 r;
+ unsigned x4, x16;
+ unsigned long fck;
+ unsigned long ticks;
+
+ /* ticks in TxByteClkHS */
+
+ fck = dsi_get_txbyteclkhs();
+ ticks = (fck / 1000 / 1000) * ns / 1000;
+ x4 = 0;
+ x16 = 0;
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
+ x4 = 1;
+ x16 = 0;
+ }
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
+ x4 = 0;
+ x16 = 1;
+ }
+
+ if (ticks > 0x1fff) {
+ ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
+ x4 = 1;
+ x16 = 1;
+ }
+
+ if (ticks > 0x1fff) {
+ DSSWARN("HS_TX_TO over limit, setting it to max\n");
+ ticks = 0x1fff;
+ x4 = 1;
+ x16 = 1;
+ }
+
+ r = dsi_read_reg(DSI_TIMING2);
+ r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
+ r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */
+ r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */
+ r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
+ dsi_write_reg(DSI_TIMING2, r);
+
+ DSSDBG("HS_TX_TO %lu ns (%#lx ticks%s%s)\n",
+ (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
+ (fck / 1000 / 1000),
+ ticks, x4 ? " x4" : "", x16 ? " x16" : "");
+}
+static int dsi_proto_config(struct omap_dss_device *dssdev)
+{
+ u32 r;
+ int buswidth = 0;
+
+ dsi_config_tx_fifo(DSI_FIFO_SIZE_128,
+ DSI_FIFO_SIZE_0,
+ DSI_FIFO_SIZE_0,
+ DSI_FIFO_SIZE_0);
+
+ dsi_config_rx_fifo(DSI_FIFO_SIZE_128,
+ DSI_FIFO_SIZE_0,
+ DSI_FIFO_SIZE_0,
+ DSI_FIFO_SIZE_0);
+
+ /* XXX what values for the timeouts? */
+ dsi_set_stop_state_counter(1000);
+ dsi_set_ta_timeout(6400000);
+ dsi_set_lp_rx_timeout(48000);
+ dsi_set_hs_tx_timeout(1000000);
+
+ switch (dssdev->ctrl.pixel_size) {
+ case 16:
+ buswidth = 0;
+ break;
+ case 18:
+ buswidth = 1;
+ break;
+ case 24:
+ buswidth = 2;
+ break;
+ default:
+ BUG();
+ }
+
+ r = dsi_read_reg(DSI_CTRL);
+ r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
+ r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
+ r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
+ r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
+ r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
+ r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
+ r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
+ r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
+ r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
+ r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
+ r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */
+
+ dsi_write_reg(DSI_CTRL, r);
+
+ dsi_vc_initial_config(0);
+
+ /* set all vc targets to peripheral 0 */
+ dsi.vc[0].dest_per = 0;
+ dsi.vc[1].dest_per = 0;
+ dsi.vc[2].dest_per = 0;
+ dsi.vc[3].dest_per = 0;
+
+ return 0;
+}
+
+static void dsi_proto_timings(struct omap_dss_device *dssdev)
+{
+ unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned tclk_pre, tclk_post;
+ unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
+ unsigned ths_trail, ths_exit;
+ unsigned ddr_clk_pre, ddr_clk_post;
+ unsigned enter_hs_mode_lat, exit_hs_mode_lat;
+ unsigned ths_eot;
+ u32 r;
+
+ r = dsi_read_reg(DSI_DSIPHY_CFG0);
+ ths_prepare = FLD_GET(r, 31, 24);
+ ths_prepare_ths_zero = FLD_GET(r, 23, 16);
+ ths_zero = ths_prepare_ths_zero - ths_prepare;
+ ths_trail = FLD_GET(r, 15, 8);
+ ths_exit = FLD_GET(r, 7, 0);
+
+ r = dsi_read_reg(DSI_DSIPHY_CFG1);
+ tlpx = FLD_GET(r, 22, 16) * 2;
+ tclk_trail = FLD_GET(r, 15, 8);
+ tclk_zero = FLD_GET(r, 7, 0);
+
+ r = dsi_read_reg(DSI_DSIPHY_CFG2);
+ tclk_prepare = FLD_GET(r, 7, 0);
+
+ /* min 8*UI */
+ tclk_pre = 20;
+ /* min 60ns + 52*UI */
+ tclk_post = ns2ddr(60) + 26;
+
+ /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */
+ if (dssdev->phy.dsi.data1_lane != 0 &&
+ dssdev->phy.dsi.data2_lane != 0)
+ ths_eot = 2;
+ else
+ ths_eot = 4;
+
+ ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
+ 4);
+ ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
+
+ BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
+ BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
+
+ r = dsi_read_reg(DSI_CLK_TIMING);
+ r = FLD_MOD(r, ddr_clk_pre, 15, 8);
+ r = FLD_MOD(r, ddr_clk_post, 7, 0);
+ dsi_write_reg(DSI_CLK_TIMING, r);
+
+ DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
+ ddr_clk_pre,
+ ddr_clk_post);
+
+ enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
+ DIV_ROUND_UP(ths_prepare, 4) +
+ DIV_ROUND_UP(ths_zero + 3, 4);
+
+ exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
+
+ r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
+ FLD_VAL(exit_hs_mode_lat, 15, 0);
+ dsi_write_reg(DSI_VM_TIMING7, r);
+
+ DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
+ enter_hs_mode_lat, exit_hs_mode_lat);
+}
+
+
+#define DSI_DECL_VARS \
+ int __dsi_cb = 0; u32 __dsi_cv = 0;
+
+#define DSI_FLUSH(ch) \
+ if (__dsi_cb > 0) { \
+ /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
+ dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
+ __dsi_cb = __dsi_cv = 0; \
+ }
+
+#define DSI_PUSH(ch, data) \
+ do { \
+ __dsi_cv |= (data) << (__dsi_cb * 8); \
+ /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
+ if (++__dsi_cb > 3) \
+ DSI_FLUSH(ch); \
+ } while (0)
+
+static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
+ int x, int y, int w, int h)
+{
+ /* Note: supports only 24bit colors in 32bit container */
+ int first = 1;
+ int fifo_stalls = 0;
+ int max_dsi_packet_size;
+ int max_data_per_packet;
+ int max_pixels_per_packet;
+ int pixels_left;
+ int bytespp = dssdev->ctrl.pixel_size / 8;
+ int scr_width;
+ u32 __iomem *data;
+ int start_offset;
+ int horiz_inc;
+ int current_x;
+ struct omap_overlay *ovl;
+
+ debug_irq = 0;
+
+ DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
+ x, y, w, h);
+
+ ovl = dssdev->manager->overlays[0];
+
+ if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
+ return -EINVAL;
+
+ if (dssdev->ctrl.pixel_size != 24)
+ return -EINVAL;
+
+ scr_width = ovl->info.screen_width;
+ data = ovl->info.vaddr;
+
+ start_offset = scr_width * y + x;
+ horiz_inc = scr_width - w;
+ current_x = x;
+
+ /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
+ * in fifo */
+
+ /* When using CPU, max long packet size is TX buffer size */
+ max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4;
+
+ /* we seem to get better perf if we divide the tx fifo to half,
+ and while the other half is being sent, we fill the other half
+ max_dsi_packet_size /= 2; */
+
+ max_data_per_packet = max_dsi_packet_size - 4 - 1;
+
+ max_pixels_per_packet = max_data_per_packet / bytespp;
+
+ DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
+
+ pixels_left = w * h;
+
+ DSSDBG("total pixels %d\n", pixels_left);
+
+ data += start_offset;
+
+ while (pixels_left > 0) {
+ /* 0x2c = write_memory_start */
+ /* 0x3c = write_memory_continue */
+ u8 dcs_cmd = first ? 0x2c : 0x3c;
+ int pixels;
+ DSI_DECL_VARS;
+ first = 0;
+
+#if 1
+ /* using fifo not empty */
+ /* TX_FIFO_NOT_EMPTY */
+ while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
+ udelay(1);
+ fifo_stalls++;
+ if (fifo_stalls > 0xfffff) {
+ DSSERR("fifo stalls overflow, pixels left %d\n",
+ pixels_left);
+ dsi_if_enable(0);
+ return -EIO;
+ }
+ }
+#elif 1
+ /* using fifo emptiness */
+ while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
+ max_dsi_packet_size) {
+ fifo_stalls++;
+ if (fifo_stalls > 0xfffff) {
+ DSSERR("fifo stalls overflow, pixels left %d\n",
+ pixels_left);
+ dsi_if_enable(0);
+ return -EIO;
+ }
+ }
+#else
+ while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) {
+ fifo_stalls++;
+ if (fifo_stalls > 0xfffff) {
+ DSSERR("fifo stalls overflow, pixels left %d\n",
+ pixels_left);
+ dsi_if_enable(0);
+ return -EIO;
+ }
+ }
+#endif
+ pixels = min(max_pixels_per_packet, pixels_left);
+
+ pixels_left -= pixels;
+
+ dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE,
+ 1 + pixels * bytespp, 0);
+
+ DSI_PUSH(0, dcs_cmd);
+
+ while (pixels-- > 0) {
+ u32 pix = __raw_readl(data++);
+
+ DSI_PUSH(0, (pix >> 16) & 0xff);
+ DSI_PUSH(0, (pix >> 8) & 0xff);
+ DSI_PUSH(0, (pix >> 0) & 0xff);
+
+ current_x++;
+ if (current_x == x+w) {
+ current_x = x;
+ data += horiz_inc;
+ }
+ }
+
+ DSI_FLUSH(0);
+ }
+
+ return 0;
+}
+
+static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ unsigned bytespp;
+ unsigned bytespl;
+ unsigned bytespf;
+ unsigned total_len;
+ unsigned packet_payload;
+ unsigned packet_len;
+ u32 l;
+ bool use_te_trigger;
+ const unsigned channel = 0;
+ /* line buffer is 1024 x 24bits */
+ /* XXX: for some reason using full buffer size causes considerable TX
+ * slowdown with update sizes that fill the whole buffer */
+ const unsigned line_buf_size = 1023 * 3;
+
+ use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
+
+ if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+ DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
+ x, y, w, h);
+
+ bytespp = dssdev->ctrl.pixel_size / 8;
+ bytespl = w * bytespp;
+ bytespf = bytespl * h;
+
+ /* NOTE: packet_payload has to be equal to N * bytespl, where N is
+ * number of lines in a packet. See errata about VP_CLK_RATIO */
+
+ if (bytespf < line_buf_size)
+ packet_payload = bytespf;
+ else
+ packet_payload = (line_buf_size) / bytespl * bytespl;
+
+ packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
+ total_len = (bytespf / packet_payload) * packet_len;
+
+ if (bytespf % packet_payload)
+ total_len += (bytespf % packet_payload) + 1;
+
+ if (0)
+ dsi_vc_print_status(1);
+
+ l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
+ dsi_write_reg(DSI_VC_TE(channel), l);
+
+ dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0);
+
+ if (use_te_trigger)
+ l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
+ else
+ l = FLD_MOD(l, 1, 31, 31); /* TE_START */
+ dsi_write_reg(DSI_VC_TE(channel), l);
+
+ /* We put SIDLEMODE to no-idle for the duration of the transfer,
+ * because DSS interrupts are not capable of waking up the CPU and the
+ * framedone interrupt could be delayed for quite a long time. I think
+ * the same goes for any DSS interrupts, but for some reason I have not
+ * seen the problem anywhere else than here.
+ */
+ dispc_disable_sidle();
+
+ dss_start_update(dssdev);
+
+ if (use_te_trigger) {
+ /* disable LP_RX_TO, so that we can receive TE. Time to wait
+ * for TE is longer than the timer allows */
+ REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
+
+ dsi_vc_send_bta(channel);
+
+#ifdef DSI_CATCH_MISSING_TE
+ mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250));
+#endif
+ }
+}
+
+#ifdef DSI_CATCH_MISSING_TE
+static void dsi_te_timeout(unsigned long arg)
+{
+ DSSERR("TE not received for 250ms!\n");
+}
+#endif
+
+static void dsi_framedone_irq_callback(void *data, u32 mask)
+{
+ /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
+ * turns itself off. However, DSI still has the pixels in its buffers,
+ * and is sending the data.
+ */
+
+ /* SIDLEMODE back to smart-idle */
+ dispc_enable_sidle();
+
+ dsi.framedone_received = true;
+ wake_up(&dsi.waitqueue);
+}
+
+static void dsi_set_update_region(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ spin_lock(&dsi.update_lock);
+ if (dsi.update_region.dirty) {
+ dsi.update_region.x = min(x, dsi.update_region.x);
+ dsi.update_region.y = min(y, dsi.update_region.y);
+ dsi.update_region.w = max(w, dsi.update_region.w);
+ dsi.update_region.h = max(h, dsi.update_region.h);
+ } else {
+ dsi.update_region.x = x;
+ dsi.update_region.y = y;
+ dsi.update_region.w = w;
+ dsi.update_region.h = h;
+ }
+
+ dsi.update_region.device = dssdev;
+ dsi.update_region.dirty = true;
+
+ spin_unlock(&dsi.update_lock);
+
+}
+
+static int dsi_set_update_mode(struct omap_dss_device *dssdev,
+ enum omap_dss_update_mode mode)
+{
+ int r = 0;
+ int i;
+
+ WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+
+ if (dsi.update_mode != mode) {
+ dsi.update_mode = mode;
+
+ /* Mark the overlays dirty, and do apply(), so that we get the
+ * overlays configured properly after update mode change. */
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+ if (ovl->manager == dssdev->manager)
+ ovl->info_dirty = true;
+ }
+
+ r = dssdev->manager->apply(dssdev->manager);
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
+ mode == OMAP_DSS_UPDATE_AUTO) {
+ u16 w, h;
+
+ DSSDBG("starting auto update\n");
+
+ dssdev->get_resolution(dssdev, &w, &h);
+
+ dsi_set_update_region(dssdev, 0, 0, w, h);
+
+ dsi_perf_mark_start_auto();
+
+ wake_up(&dsi.waitqueue);
+ }
+ }
+
+ return r;
+}
+
+static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
+{
+ int r;
+ r = dssdev->driver->enable_te(dssdev, enable);
+ /* XXX for some reason, DSI TE breaks if we don't wait here.
+ * Panel bug? Needs more studying */
+ msleep(100);
+ return r;
+}
+
+static void dsi_handle_framedone(void)
+{
+ int r;
+ const int channel = 0;
+ bool use_te_trigger;
+
+ use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
+
+ if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
+ DSSDBG("FRAMEDONE\n");
+
+ if (use_te_trigger) {
+ /* enable LP_RX_TO again after the TE */
+ REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
+ }
+
+ /* Send BTA after the frame. We need this for the TE to work, as TE
+ * trigger is only sent for BTAs without preceding packet. Thus we need
+ * to BTA after the pixel packets so that next BTA will cause TE
+ * trigger.
+ *
+ * This is not needed when TE is not in use, but we do it anyway to
+ * make sure that the transfer has been completed. It would be more
+ * optimal, but more complex, to wait only just before starting next
+ * transfer. */
+ r = dsi_vc_send_bta_sync(channel);
+ if (r)
+ DSSERR("BTA after framedone failed\n");
+
+ /* RX_FIFO_NOT_EMPTY */
+ if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+ DSSERR("Received error during frame transfer:\n");
+ dsi_vc_flush_receive_data(0);
+ }
+
+#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
+ dispc_fake_vsync_irq();
+#endif
+}
+
+static int dsi_update_thread(void *data)
+{
+ unsigned long timeout;
+ struct omap_dss_device *device;
+ u16 x, y, w, h;
+
+ while (1) {
+ bool sched;
+
+ wait_event_interruptible(dsi.waitqueue,
+ dsi.update_mode == OMAP_DSS_UPDATE_AUTO ||
+ (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
+ dsi.update_region.dirty == true) ||
+ kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ dsi_bus_lock();
+
+ if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED ||
+ kthread_should_stop()) {
+ dsi_bus_unlock();
+ break;
+ }
+
+ dsi_perf_mark_setup();
+
+ if (dsi.update_region.dirty) {
+ spin_lock(&dsi.update_lock);
+ dsi.active_update_region = dsi.update_region;
+ dsi.update_region.dirty = false;
+ spin_unlock(&dsi.update_lock);
+ }
+
+ device = dsi.active_update_region.device;
+ x = dsi.active_update_region.x;
+ y = dsi.active_update_region.y;
+ w = dsi.active_update_region.w;
+ h = dsi.active_update_region.h;
+
+ if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+
+ if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
+ dss_setup_partial_planes(device,
+ &x, &y, &w, &h);
+
+ dispc_set_lcd_size(w, h);
+ }
+
+ if (dsi.active_update_region.dirty) {
+ dsi.active_update_region.dirty = false;
+ /* XXX TODO we don't need to send the coords, if they
+ * are the same that are already programmed to the
+ * panel. That should speed up manual update a bit */
+ device->driver->setup_update(device, x, y, w, h);
+ }
+
+ dsi_perf_mark_start();
+
+ if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+ dsi_vc_config_vp(0);
+
+ if (dsi.te_enabled && dsi.use_ext_te)
+ device->driver->wait_for_te(device);
+
+ dsi.framedone_received = false;
+
+ dsi_update_screen_dispc(device, x, y, w, h);
+
+ /* wait for framedone */
+ timeout = msecs_to_jiffies(1000);
+ wait_event_timeout(dsi.waitqueue,
+ dsi.framedone_received == true,
+ timeout);
+
+ if (!dsi.framedone_received) {
+ DSSERR("framedone timeout\n");
+ DSSERR("failed update %d,%d %dx%d\n",
+ x, y, w, h);
+
+ dispc_enable_sidle();
+ dispc_enable_lcd_out(0);
+
+ dsi_reset_tx_fifo(0);
+ } else {
+ dsi_handle_framedone();
+ dsi_perf_show("DISPC");
+ }
+ } else {
+ dsi_update_screen_l4(device, x, y, w, h);
+ dsi_perf_show("L4");
+ }
+
+ sched = atomic_read(&dsi.bus_lock.count) < 0;
+
+ complete_all(&dsi.update_completion);
+
+ dsi_bus_unlock();
+
+ /* XXX We need to give others chance to get the bus lock. Is
+ * there a better way for this? */
+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
+ schedule_timeout_interruptible(1);
+ }
+
+ DSSDBG("update thread exiting\n");
+
+ return 0;
+}
+
+
+
+/* Display funcs */
+
+static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL,
+ DISPC_IRQ_FRAMEDONE);
+ if (r) {
+ DSSERR("can't get FRAMEDONE irq\n");
+ return r;
+ }
+
+ dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+
+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
+ dispc_enable_fifohandcheck(1);
+
+ dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+
+ {
+ struct omap_video_timings timings = {
+ .hsw = 1,
+ .hfp = 1,
+ .hbp = 1,
+ .vsw = 1,
+ .vfp = 0,
+ .vbp = 0,
+ };
+
+ dispc_set_lcd_timings(&timings);
+ }
+
+ return 0;
+}
+
+static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
+{
+ omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL,
+ DISPC_IRQ_FRAMEDONE);
+}
+
+static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
+{
+ struct dsi_clock_info cinfo;
+ int r;
+
+ /* we always use DSS2_FCK as input clock */
+ cinfo.use_dss2_fck = true;
+ cinfo.regn = dssdev->phy.dsi.div.regn;
+ cinfo.regm = dssdev->phy.dsi.div.regm;
+ cinfo.regm3 = dssdev->phy.dsi.div.regm3;
+ cinfo.regm4 = dssdev->phy.dsi.div.regm4;
+ r = dsi_calc_clock_rates(&cinfo);
+ if (r)
+ return r;
+
+ r = dsi_pll_set_clock_div(&cinfo);
+ if (r) {
+ DSSERR("Failed to set dsi clocks\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
+{
+ struct dispc_clock_info dispc_cinfo;
+ int r;
+ unsigned long long fck;
+
+ fck = dsi_get_dsi1_pll_rate();
+
+ dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div;
+ dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div;
+
+ r = dispc_calc_clock_rates(fck, &dispc_cinfo);
+ if (r) {
+ DSSERR("Failed to calc dispc clocks\n");
+ return r;
+ }
+
+ r = dispc_set_clock_div(&dispc_cinfo);
+ if (r) {
+ DSSERR("Failed to set dispc clocks\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ _dsi_print_reset_status();
+
+ r = dsi_pll_init(dssdev, true, true);
+ if (r)
+ goto err0;
+
+ r = dsi_configure_dsi_clocks(dssdev);
+ if (r)
+ goto err1;
+
+ dss_select_clk_source(true, true);
+
+ DSSDBG("PLL OK\n");
+
+ r = dsi_configure_dispc_clocks(dssdev);
+ if (r)
+ goto err2;
+
+ r = dsi_complexio_init(dssdev);
+ if (r)
+ goto err2;
+
+ _dsi_print_reset_status();
+
+ dsi_proto_timings(dssdev);
+ dsi_set_lp_clk_divisor(dssdev);
+
+ if (1)
+ _dsi_print_reset_status();
+
+ r = dsi_proto_config(dssdev);
+ if (r)
+ goto err3;
+
+ /* enable interface */
+ dsi_vc_enable(0, 1);
+ dsi_if_enable(1);
+ dsi_force_tx_stop_mode_io();
+
+ if (dssdev->driver->enable) {
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ goto err4;
+ }
+
+ /* enable high-speed after initial config */
+ dsi_vc_enable_hs(0, 1);
+
+ return 0;
+err4:
+ dsi_if_enable(0);
+err3:
+ dsi_complexio_uninit();
+err2:
+ dss_select_clk_source(false, false);
+err1:
+ dsi_pll_uninit();
+err0:
+ return r;
+}
+
+static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
+{
+ if (dssdev->driver->disable)
+ dssdev->driver->disable(dssdev);
+
+ dss_select_clk_source(false, false);
+ dsi_complexio_uninit();
+ dsi_pll_uninit();
+}
+
+static int dsi_core_init(void)
+{
+ /* Autoidle */
+ REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0);
+
+ /* ENWAKEUP */
+ REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2);
+
+ /* SIDLEMODE smart-idle */
+ REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3);
+
+ _dsi_initialize_irq();
+
+ return 0;
+}
+
+static int dsi_display_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("dsi_display_enable\n");
+
+ mutex_lock(&dsi.lock);
+ dsi_bus_lock();
+
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+ goto err0;
+ }
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ DSSERR("dssdev already enabled\n");
+ r = -EINVAL;
+ goto err1;
+ }
+
+ enable_clocks(1);
+ dsi_enable_pll_clock(1);
+
+ r = _dsi_reset();
+ if (r)
+ goto err2;
+
+ dsi_core_init();
+
+ r = dsi_display_init_dispc(dssdev);
+ if (r)
+ goto err2;
+
+ r = dsi_display_init_dsi(dssdev);
+ if (r)
+ goto err3;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ dsi.use_ext_te = dssdev->phy.dsi.ext_te;
+ r = dsi_set_te(dssdev, dsi.te_enabled);
+ if (r)
+ goto err4;
+
+ dsi_set_update_mode(dssdev, dsi.user_update_mode);
+
+ dsi_bus_unlock();
+ mutex_unlock(&dsi.lock);
+
+ return 0;
+
+err4:
+
+ dsi_display_uninit_dsi(dssdev);
+err3:
+ dsi_display_uninit_dispc(dssdev);
+err2:
+ enable_clocks(0);
+ dsi_enable_pll_clock(0);
+err1:
+ omap_dss_stop_device(dssdev);
+err0:
+ dsi_bus_unlock();
+ mutex_unlock(&dsi.lock);
+ DSSDBG("dsi_display_enable FAILED\n");
+ return r;
+}
+
+static void dsi_display_disable(struct omap_dss_device *dssdev)
+{
+ DSSDBG("dsi_display_disable\n");
+
+ mutex_lock(&dsi.lock);
+ dsi_bus_lock();
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
+ dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+ goto end;
+
+ dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ dsi_display_uninit_dispc(dssdev);
+
+ dsi_display_uninit_dsi(dssdev);
+
+ enable_clocks(0);
+ dsi_enable_pll_clock(0);
+
+ omap_dss_stop_device(dssdev);
+end:
+ dsi_bus_unlock();
+ mutex_unlock(&dsi.lock);
+}
+
+static int dsi_display_suspend(struct omap_dss_device *dssdev)
+{
+ DSSDBG("dsi_display_suspend\n");
+
+ mutex_lock(&dsi.lock);
+ dsi_bus_lock();
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
+ dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+ goto end;
+
+ dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ dsi_display_uninit_dispc(dssdev);
+
+ dsi_display_uninit_dsi(dssdev);
+
+ enable_clocks(0);
+ dsi_enable_pll_clock(0);
+end:
+ dsi_bus_unlock();
+ mutex_unlock(&dsi.lock);
+
+ return 0;
+}
+
+static int dsi_display_resume(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ DSSDBG("dsi_display_resume\n");
+
+ mutex_lock(&dsi.lock);
+ dsi_bus_lock();
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ DSSERR("dssdev not suspended\n");
+ r = -EINVAL;
+ goto err0;
+ }
+
+ enable_clocks(1);
+ dsi_enable_pll_clock(1);
+
+ r = _dsi_reset();
+ if (r)
+ goto err1;
+
+ dsi_core_init();
+
+ r = dsi_display_init_dispc(dssdev);
+ if (r)
+ goto err1;
+
+ r = dsi_display_init_dsi(dssdev);
+ if (r)
+ goto err2;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ r = dsi_set_te(dssdev, dsi.te_enabled);
+ if (r)
+ goto err2;
+
+ dsi_set_update_mode(dssdev, dsi.user_update_mode);
+
+ dsi_bus_unlock();
+ mutex_unlock(&dsi.lock);
+
+ return 0;
+
+err2:
+ dsi_display_uninit_dispc(dssdev);
+err1:
+ enable_clocks(0);
+ dsi_enable_pll_clock(0);
+err0:
+ dsi_bus_unlock();
+ mutex_unlock(&dsi.lock);
+ DSSDBG("dsi_display_resume FAILED\n");
+ return r;
+}
+
+static int dsi_display_update(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ int r = 0;
+ u16 dw, dh;
+
+ DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h);
+
+ mutex_lock(&dsi.lock);
+
+ if (dsi.update_mode != OMAP_DSS_UPDATE_MANUAL)
+ goto end;
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ goto end;
+
+ dssdev->get_resolution(dssdev, &dw, &dh);
+
+ if (x > dw || y > dh)
+ goto end;
+
+ if (x + w > dw)
+ w = dw - x;
+
+ if (y + h > dh)
+ h = dh - y;
+
+ if (w == 0 || h == 0)
+ goto end;
+
+ if (w == 1) {
+ r = -EINVAL;
+ goto end;
+ }
+
+ dsi_set_update_region(dssdev, x, y, w, h);
+
+ wake_up(&dsi.waitqueue);
+
+end:
+ mutex_unlock(&dsi.lock);
+
+ return r;
+}
+
+static int dsi_display_sync(struct omap_dss_device *dssdev)
+{
+ bool wait;
+
+ DSSDBG("dsi_display_sync()\n");
+
+ mutex_lock(&dsi.lock);
+ dsi_bus_lock();
+
+ if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
+ dsi.update_region.dirty) {
+ INIT_COMPLETION(dsi.update_completion);
+ wait = true;
+ } else {
+ wait = false;
+ }
+
+ dsi_bus_unlock();
+ mutex_unlock(&dsi.lock);
+
+ if (wait)
+ wait_for_completion_interruptible(&dsi.update_completion);
+
+ DSSDBG("dsi_display_sync() done\n");
+ return 0;
+}
+
+static int dsi_display_set_update_mode(struct omap_dss_device *dssdev,
+ enum omap_dss_update_mode mode)
+{
+ int r = 0;
+
+ DSSDBGF("%d", mode);
+
+ mutex_lock(&dsi.lock);
+ dsi_bus_lock();
+
+ dsi.user_update_mode = mode;
+ r = dsi_set_update_mode(dssdev, mode);
+
+ dsi_bus_unlock();
+ mutex_unlock(&dsi.lock);
+
+ return r;
+}
+
+static enum omap_dss_update_mode dsi_display_get_update_mode(
+ struct omap_dss_device *dssdev)
+{
+ return dsi.update_mode;
+}
+
+
+static int dsi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
+{
+ int r = 0;
+
+ DSSDBGF("%d", enable);
+
+ if (!dssdev->driver->enable_te)
+ return -ENOENT;
+
+ dsi_bus_lock();
+
+ dsi.te_enabled = enable;
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ goto end;
+
+ r = dsi_set_te(dssdev, enable);
+end:
+ dsi_bus_unlock();
+
+ return r;
+}
+
+static int dsi_display_get_te(struct omap_dss_device *dssdev)
+{
+ return dsi.te_enabled;
+}
+
+static int dsi_display_set_rotate(struct omap_dss_device *dssdev, u8 rotate)
+{
+
+ DSSDBGF("%d", rotate);
+
+ if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
+ return -EINVAL;
+
+ dsi_bus_lock();
+ dssdev->driver->set_rotate(dssdev, rotate);
+ if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
+ u16 w, h;
+ /* the display dimensions may have changed, so set a new
+ * update region */
+ dssdev->get_resolution(dssdev, &w, &h);
+ dsi_set_update_region(dssdev, 0, 0, w, h);
+ }
+ dsi_bus_unlock();
+
+ return 0;
+}
+
+static u8 dsi_display_get_rotate(struct omap_dss_device *dssdev)
+{
+ if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
+ return 0;
+
+ return dssdev->driver->get_rotate(dssdev);
+}
+
+static int dsi_display_set_mirror(struct omap_dss_device *dssdev, bool mirror)
+{
+ DSSDBGF("%d", mirror);
+
+ if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
+ return -EINVAL;
+
+ dsi_bus_lock();
+ dssdev->driver->set_mirror(dssdev, mirror);
+ dsi_bus_unlock();
+
+ return 0;
+}
+
+static bool dsi_display_get_mirror(struct omap_dss_device *dssdev)
+{
+ if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
+ return 0;
+
+ return dssdev->driver->get_mirror(dssdev);
+}
+
+static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
+{
+ int r;
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return -EIO;
+
+ DSSDBGF("%d", test_num);
+
+ dsi_bus_lock();
+
+ /* run test first in low speed mode */
+ dsi_vc_enable_hs(0, 0);
+
+ if (dssdev->driver->run_test) {
+ r = dssdev->driver->run_test(dssdev, test_num);
+ if (r)
+ goto end;
+ }
+
+ /* then in high speed */
+ dsi_vc_enable_hs(0, 1);
+
+ if (dssdev->driver->run_test) {
+ r = dssdev->driver->run_test(dssdev, test_num);
+ if (r)
+ goto end;
+ }
+
+end:
+ dsi_vc_enable_hs(0, 1);
+
+ dsi_bus_unlock();
+
+ return r;
+}
+
+static int dsi_display_memory_read(struct omap_dss_device *dssdev,
+ void *buf, size_t size,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ int r;
+
+ DSSDBGF("");
+
+ if (!dssdev->driver->memory_read)
+ return -EINVAL;
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return -EIO;
+
+ dsi_bus_lock();
+
+ r = dssdev->driver->memory_read(dssdev, buf, size,
+ x, y, w, h);
+
+ /* Memory read usually changes the update area. This will
+ * force the next update to re-set the update area */
+ dsi.active_update_region.dirty = true;
+
+ dsi_bus_unlock();
+
+ return r;
+}
+
+void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
+ u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 *fifo_low, u32 *fifo_high)
+{
+ unsigned burst_size_bytes;
+
+ *burst_size = OMAP_DSS_BURST_16x32;
+ burst_size_bytes = 16 * 32 / 8;
+
+ *fifo_high = fifo_size - burst_size_bytes;
+ *fifo_low = fifo_size - burst_size_bytes * 8;
+}
+
+int dsi_init_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("DSI init\n");
+
+ dssdev->enable = dsi_display_enable;
+ dssdev->disable = dsi_display_disable;
+ dssdev->suspend = dsi_display_suspend;
+ dssdev->resume = dsi_display_resume;
+ dssdev->update = dsi_display_update;
+ dssdev->sync = dsi_display_sync;
+ dssdev->set_update_mode = dsi_display_set_update_mode;
+ dssdev->get_update_mode = dsi_display_get_update_mode;
+ dssdev->enable_te = dsi_display_enable_te;
+ dssdev->get_te = dsi_display_get_te;
+
+ dssdev->get_rotate = dsi_display_get_rotate;
+ dssdev->set_rotate = dsi_display_set_rotate;
+
+ dssdev->get_mirror = dsi_display_get_mirror;
+ dssdev->set_mirror = dsi_display_set_mirror;
+
+ dssdev->run_test = dsi_display_run_test;
+ dssdev->memory_read = dsi_display_memory_read;
+
+ /* XXX these should be figured out dynamically */
+ dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
+ OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
+
+ dsi.vc[0].dssdev = dssdev;
+ dsi.vc[1].dssdev = dssdev;
+
+ return 0;
+}
+
+int dsi_init(struct platform_device *pdev)
+{
+ u32 rev;
+ int r;
+ struct sched_param param = {
+ .sched_priority = MAX_USER_RT_PRIO-1
+ };
+
+ spin_lock_init(&dsi.errors_lock);
+ dsi.errors = 0;
+
+ init_completion(&dsi.bta_completion);
+ init_completion(&dsi.update_completion);
+
+ dsi.thread = kthread_create(dsi_update_thread, NULL, "dsi");
+ if (IS_ERR(dsi.thread)) {
+ DSSERR("cannot create kthread\n");
+ r = PTR_ERR(dsi.thread);
+ goto err0;
+ }
+ sched_setscheduler(dsi.thread, SCHED_FIFO, &param);
+
+ init_waitqueue_head(&dsi.waitqueue);
+ spin_lock_init(&dsi.update_lock);
+
+ mutex_init(&dsi.lock);
+ mutex_init(&dsi.bus_lock);
+
+#ifdef DSI_CATCH_MISSING_TE
+ init_timer(&dsi.te_timer);
+ dsi.te_timer.function = dsi_te_timeout;
+ dsi.te_timer.data = 0;
+#endif
+
+ dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
+ dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
+
+ dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
+ if (!dsi.base) {
+ DSSERR("can't ioremap DSI\n");
+ r = -ENOMEM;
+ goto err1;
+ }
+
+ dsi.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
+ if (IS_ERR(dsi.vdds_dsi_reg)) {
+ iounmap(dsi.base);
+ DSSERR("can't get VDDS_DSI regulator\n");
+ r = PTR_ERR(dsi.vdds_dsi_reg);
+ goto err2;
+ }
+
+ enable_clocks(1);
+
+ rev = dsi_read_reg(DSI_REVISION);
+ printk(KERN_INFO "OMAP DSI rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ enable_clocks(0);
+
+ wake_up_process(dsi.thread);
+
+ return 0;
+err2:
+ iounmap(dsi.base);
+err1:
+ kthread_stop(dsi.thread);
+err0:
+ return r;
+}
+
+void dsi_exit(void)
+{
+ kthread_stop(dsi.thread);
+
+ regulator_put(dsi.vdds_dsi_reg);
+
+ iounmap(dsi.base);
+
+ DSSDBG("omap_dsi_exit\n");
+}
+
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
new file mode 100644
index 00000000000..9b05ee65a15
--- /dev/null
+++ b/drivers/video/omap2/dss/dss.c
@@ -0,0 +1,596 @@
+/*
+ * linux/drivers/video/omap2/dss/dss.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "DSS"
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/clk.h>
+
+#include <plat/display.h>
+#include "dss.h"
+
+#define DSS_BASE 0x48050000
+
+#define DSS_SZ_REGS SZ_512
+
+struct dss_reg {
+ u16 idx;
+};
+
+#define DSS_REG(idx) ((const struct dss_reg) { idx })
+
+#define DSS_REVISION DSS_REG(0x0000)
+#define DSS_SYSCONFIG DSS_REG(0x0010)
+#define DSS_SYSSTATUS DSS_REG(0x0014)
+#define DSS_IRQSTATUS DSS_REG(0x0018)
+#define DSS_CONTROL DSS_REG(0x0040)
+#define DSS_SDI_CONTROL DSS_REG(0x0044)
+#define DSS_PLL_CONTROL DSS_REG(0x0048)
+#define DSS_SDI_STATUS DSS_REG(0x005C)
+
+#define REG_GET(idx, start, end) \
+ FLD_GET(dss_read_reg(idx), start, end)
+
+#define REG_FLD_MOD(idx, val, start, end) \
+ dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+
+static struct {
+ void __iomem *base;
+
+ struct clk *dpll4_m4_ck;
+
+ unsigned long cache_req_pck;
+ unsigned long cache_prate;
+ struct dss_clock_info cache_dss_cinfo;
+ struct dispc_clock_info cache_dispc_cinfo;
+
+ u32 ctx[DSS_SZ_REGS / sizeof(u32)];
+} dss;
+
+static int _omap_dss_wait_reset(void);
+
+static inline void dss_write_reg(const struct dss_reg idx, u32 val)
+{
+ __raw_writel(val, dss.base + idx.idx);
+}
+
+static inline u32 dss_read_reg(const struct dss_reg idx)
+{
+ return __raw_readl(dss.base + idx.idx);
+}
+
+#define SR(reg) \
+ dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
+#define RR(reg) \
+ dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
+
+void dss_save_context(void)
+{
+ if (cpu_is_omap24xx())
+ return;
+
+ SR(SYSCONFIG);
+ SR(CONTROL);
+
+#ifdef CONFIG_OMAP2_DSS_SDI
+ SR(SDI_CONTROL);
+ SR(PLL_CONTROL);
+#endif
+}
+
+void dss_restore_context(void)
+{
+ if (_omap_dss_wait_reset())
+ DSSERR("DSS not coming out of reset after sleep\n");
+
+ RR(SYSCONFIG);
+ RR(CONTROL);
+
+#ifdef CONFIG_OMAP2_DSS_SDI
+ RR(SDI_CONTROL);
+ RR(PLL_CONTROL);
+#endif
+}
+
+#undef SR
+#undef RR
+
+void dss_sdi_init(u8 datapairs)
+{
+ u32 l;
+
+ BUG_ON(datapairs > 3 || datapairs < 1);
+
+ l = dss_read_reg(DSS_SDI_CONTROL);
+ l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
+ l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
+ l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
+ dss_write_reg(DSS_SDI_CONTROL, l);
+
+ l = dss_read_reg(DSS_PLL_CONTROL);
+ l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
+ l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
+ l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
+ dss_write_reg(DSS_PLL_CONTROL, l);
+}
+
+int dss_sdi_enable(void)
+{
+ unsigned long timeout;
+
+ dispc_pck_free_enable(1);
+
+ /* Reset SDI PLL */
+ REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
+ udelay(1); /* wait 2x PCLK */
+
+ /* Lock SDI PLL */
+ REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
+
+ /* Waiting for PLL lock request to complete */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
+ if (time_after_eq(jiffies, timeout)) {
+ DSSERR("PLL lock request timed out\n");
+ goto err1;
+ }
+ }
+
+ /* Clearing PLL_GO bit */
+ REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
+
+ /* Waiting for PLL to lock */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
+ if (time_after_eq(jiffies, timeout)) {
+ DSSERR("PLL lock timed out\n");
+ goto err1;
+ }
+ }
+
+ dispc_lcd_enable_signal(1);
+
+ /* Waiting for SDI reset to complete */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
+ if (time_after_eq(jiffies, timeout)) {
+ DSSERR("SDI reset timed out\n");
+ goto err2;
+ }
+ }
+
+ return 0;
+
+ err2:
+ dispc_lcd_enable_signal(0);
+ err1:
+ /* Reset SDI PLL */
+ REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
+
+ dispc_pck_free_enable(0);
+
+ return -ETIMEDOUT;
+}
+
+void dss_sdi_disable(void)
+{
+ dispc_lcd_enable_signal(0);
+
+ dispc_pck_free_enable(0);
+
+ /* Reset SDI PLL */
+ REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
+}
+
+void dss_dump_clocks(struct seq_file *s)
+{
+ unsigned long dpll4_ck_rate;
+ unsigned long dpll4_m4_ck_rate;
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+ dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
+
+ seq_printf(s, "- DSS -\n");
+
+ seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
+
+ seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
+ dpll4_ck_rate,
+ dpll4_ck_rate / dpll4_m4_ck_rate,
+ dss_clk_get_rate(DSS_CLK_FCK1));
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+void dss_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ DUMPREG(DSS_REVISION);
+ DUMPREG(DSS_SYSCONFIG);
+ DUMPREG(DSS_SYSSTATUS);
+ DUMPREG(DSS_IRQSTATUS);
+ DUMPREG(DSS_CONTROL);
+ DUMPREG(DSS_SDI_CONTROL);
+ DUMPREG(DSS_PLL_CONTROL);
+ DUMPREG(DSS_SDI_STATUS);
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+#undef DUMPREG
+}
+
+void dss_select_clk_source(bool dsi, bool dispc)
+{
+ u32 r;
+ r = dss_read_reg(DSS_CONTROL);
+ r = FLD_MOD(r, dsi, 1, 1); /* DSI_CLK_SWITCH */
+ r = FLD_MOD(r, dispc, 0, 0); /* DISPC_CLK_SWITCH */
+ dss_write_reg(DSS_CONTROL, r);
+}
+
+int dss_get_dsi_clk_source(void)
+{
+ return FLD_GET(dss_read_reg(DSS_CONTROL), 1, 1);
+}
+
+int dss_get_dispc_clk_source(void)
+{
+ return FLD_GET(dss_read_reg(DSS_CONTROL), 0, 0);
+}
+
+/* calculate clock rates using dividers in cinfo */
+int dss_calc_clock_rates(struct dss_clock_info *cinfo)
+{
+ unsigned long prate;
+
+ if (cinfo->fck_div > 16 || cinfo->fck_div == 0)
+ return -EINVAL;
+
+ prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+
+ cinfo->fck = prate / cinfo->fck_div;
+
+ return 0;
+}
+
+int dss_set_clock_div(struct dss_clock_info *cinfo)
+{
+ unsigned long prate;
+ int r;
+
+ if (cpu_is_omap34xx()) {
+ prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+ DSSDBG("dpll4_m4 = %ld\n", prate);
+
+ r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
+ if (r)
+ return r;
+ }
+
+ DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
+
+ return 0;
+}
+
+int dss_get_clock_div(struct dss_clock_info *cinfo)
+{
+ cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
+
+ if (cpu_is_omap34xx()) {
+ unsigned long prate;
+ prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+ cinfo->fck_div = prate / (cinfo->fck / 2);
+ } else {
+ cinfo->fck_div = 0;
+ }
+
+ return 0;
+}
+
+unsigned long dss_get_dpll4_rate(void)
+{
+ if (cpu_is_omap34xx())
+ return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
+ else
+ return 0;
+}
+
+int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
+ struct dss_clock_info *dss_cinfo,
+ struct dispc_clock_info *dispc_cinfo)
+{
+ unsigned long prate;
+ struct dss_clock_info best_dss;
+ struct dispc_clock_info best_dispc;
+
+ unsigned long fck;
+
+ u16 fck_div;
+
+ int match = 0;
+ int min_fck_per_pck;
+
+ prate = dss_get_dpll4_rate();
+
+ fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ if (req_pck == dss.cache_req_pck &&
+ ((cpu_is_omap34xx() && prate == dss.cache_prate) ||
+ dss.cache_dss_cinfo.fck == fck)) {
+ DSSDBG("dispc clock info found from cache.\n");
+ *dss_cinfo = dss.cache_dss_cinfo;
+ *dispc_cinfo = dss.cache_dispc_cinfo;
+ return 0;
+ }
+
+ min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
+
+ if (min_fck_per_pck &&
+ req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
+ DSSERR("Requested pixel clock not possible with the current "
+ "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
+ "the constraint off.\n");
+ min_fck_per_pck = 0;
+ }
+
+retry:
+ memset(&best_dss, 0, sizeof(best_dss));
+ memset(&best_dispc, 0, sizeof(best_dispc));
+
+ if (cpu_is_omap24xx()) {
+ struct dispc_clock_info cur_dispc;
+ /* XXX can we change the clock on omap2? */
+ fck = dss_clk_get_rate(DSS_CLK_FCK1);
+ fck_div = 1;
+
+ dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
+ match = 1;
+
+ best_dss.fck = fck;
+ best_dss.fck_div = fck_div;
+
+ best_dispc = cur_dispc;
+
+ goto found;
+ } else if (cpu_is_omap34xx()) {
+ for (fck_div = 16; fck_div > 0; --fck_div) {
+ struct dispc_clock_info cur_dispc;
+
+ fck = prate / fck_div * 2;
+
+ if (fck > DISPC_MAX_FCK)
+ continue;
+
+ if (min_fck_per_pck &&
+ fck < req_pck * min_fck_per_pck)
+ continue;
+
+ match = 1;
+
+ dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
+
+ if (abs(cur_dispc.pck - req_pck) <
+ abs(best_dispc.pck - req_pck)) {
+
+ best_dss.fck = fck;
+ best_dss.fck_div = fck_div;
+
+ best_dispc = cur_dispc;
+
+ if (cur_dispc.pck == req_pck)
+ goto found;
+ }
+ }
+ } else {
+ BUG();
+ }
+
+found:
+ if (!match) {
+ if (min_fck_per_pck) {
+ DSSERR("Could not find suitable clock settings.\n"
+ "Turning FCK/PCK constraint off and"
+ "trying again.\n");
+ min_fck_per_pck = 0;
+ goto retry;
+ }
+
+ DSSERR("Could not find suitable clock settings.\n");
+
+ return -EINVAL;
+ }
+
+ if (dss_cinfo)
+ *dss_cinfo = best_dss;
+ if (dispc_cinfo)
+ *dispc_cinfo = best_dispc;
+
+ dss.cache_req_pck = req_pck;
+ dss.cache_prate = prate;
+ dss.cache_dss_cinfo = best_dss;
+ dss.cache_dispc_cinfo = best_dispc;
+
+ return 0;
+}
+
+
+
+static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
+{
+ dispc_irq_handler();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
+{
+ u32 irqstatus;
+
+ irqstatus = dss_read_reg(DSS_IRQSTATUS);
+
+ if (irqstatus & (1<<0)) /* DISPC_IRQ */
+ dispc_irq_handler();
+#ifdef CONFIG_OMAP2_DSS_DSI
+ if (irqstatus & (1<<1)) /* DSI_IRQ */
+ dsi_irq_handler();
+#endif
+
+ return IRQ_HANDLED;
+}
+
+static int _omap_dss_wait_reset(void)
+{
+ unsigned timeout = 1000;
+
+ while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
+ udelay(1);
+ if (!--timeout) {
+ DSSERR("soft reset failed\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int _omap_dss_reset(void)
+{
+ /* Soft reset */
+ REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
+ return _omap_dss_wait_reset();
+}
+
+void dss_set_venc_output(enum omap_dss_venc_type type)
+{
+ int l = 0;
+
+ if (type == OMAP_DSS_VENC_TYPE_COMPOSITE)
+ l = 0;
+ else if (type == OMAP_DSS_VENC_TYPE_SVIDEO)
+ l = 1;
+ else
+ BUG();
+
+ /* venc out selection. 0 = comp, 1 = svideo */
+ REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
+}
+
+void dss_set_dac_pwrdn_bgz(bool enable)
+{
+ REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
+}
+
+int dss_init(bool skip_init)
+{
+ int r;
+ u32 rev;
+
+ dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
+ if (!dss.base) {
+ DSSERR("can't ioremap DSS\n");
+ r = -ENOMEM;
+ goto fail0;
+ }
+
+ if (!skip_init) {
+ /* disable LCD and DIGIT output. This seems to fix the synclost
+ * problem that we get, if the bootloader starts the DSS and
+ * the kernel resets it */
+ omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
+
+ /* We need to wait here a bit, otherwise we sometimes start to
+ * get synclost errors, and after that only power cycle will
+ * restore DSS functionality. I have no idea why this happens.
+ * And we have to wait _before_ resetting the DSS, but after
+ * enabling clocks.
+ */
+ msleep(50);
+
+ _omap_dss_reset();
+ }
+
+ /* autoidle */
+ REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
+
+ /* Select DPLL */
+ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+ REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
+ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
+ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
+#endif
+
+ r = request_irq(INT_24XX_DSS_IRQ,
+ cpu_is_omap24xx()
+ ? dss_irq_handler_omap2
+ : dss_irq_handler_omap3,
+ 0, "OMAP DSS", NULL);
+
+ if (r < 0) {
+ DSSERR("omap2 dss: request_irq failed\n");
+ goto fail1;
+ }
+
+ if (cpu_is_omap34xx()) {
+ dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
+ if (IS_ERR(dss.dpll4_m4_ck)) {
+ DSSERR("Failed to get dpll4_m4_ck\n");
+ r = PTR_ERR(dss.dpll4_m4_ck);
+ goto fail2;
+ }
+ }
+
+ dss_save_context();
+
+ rev = dss_read_reg(DSS_REVISION);
+ printk(KERN_INFO "OMAP DSS rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ return 0;
+
+fail2:
+ free_irq(INT_24XX_DSS_IRQ, NULL);
+fail1:
+ iounmap(dss.base);
+fail0:
+ return r;
+}
+
+void dss_exit(void)
+{
+ if (cpu_is_omap34xx())
+ clk_put(dss.dpll4_m4_ck);
+
+ free_irq(INT_24XX_DSS_IRQ, NULL);
+
+ iounmap(dss.base);
+}
+
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
new file mode 100644
index 00000000000..8da5ac42151
--- /dev/null
+++ b/drivers/video/omap2/dss/dss.h
@@ -0,0 +1,370 @@
+/*
+ * linux/drivers/video/omap2/dss/dss.h
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP2_DSS_H
+#define __OMAP2_DSS_H
+
+#ifdef CONFIG_OMAP2_DSS_DEBUG_SUPPORT
+#define DEBUG
+#endif
+
+#ifdef DEBUG
+extern unsigned int dss_debug;
+#ifdef DSS_SUBSYS_NAME
+#define DSSDBG(format, ...) \
+ if (dss_debug) \
+ printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME ": " format, \
+ ## __VA_ARGS__)
+#else
+#define DSSDBG(format, ...) \
+ if (dss_debug) \
+ printk(KERN_DEBUG "omapdss: " format, ## __VA_ARGS__)
+#endif
+
+#ifdef DSS_SUBSYS_NAME
+#define DSSDBGF(format, ...) \
+ if (dss_debug) \
+ printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME \
+ ": %s(" format ")\n", \
+ __func__, \
+ ## __VA_ARGS__)
+#else
+#define DSSDBGF(format, ...) \
+ if (dss_debug) \
+ printk(KERN_DEBUG "omapdss: " \
+ ": %s(" format ")\n", \
+ __func__, \
+ ## __VA_ARGS__)
+#endif
+
+#else /* DEBUG */
+#define DSSDBG(format, ...)
+#define DSSDBGF(format, ...)
+#endif
+
+
+#ifdef DSS_SUBSYS_NAME
+#define DSSERR(format, ...) \
+ printk(KERN_ERR "omapdss " DSS_SUBSYS_NAME " error: " format, \
+ ## __VA_ARGS__)
+#else
+#define DSSERR(format, ...) \
+ printk(KERN_ERR "omapdss error: " format, ## __VA_ARGS__)
+#endif
+
+#ifdef DSS_SUBSYS_NAME
+#define DSSINFO(format, ...) \
+ printk(KERN_INFO "omapdss " DSS_SUBSYS_NAME ": " format, \
+ ## __VA_ARGS__)
+#else
+#define DSSINFO(format, ...) \
+ printk(KERN_INFO "omapdss: " format, ## __VA_ARGS__)
+#endif
+
+#ifdef DSS_SUBSYS_NAME
+#define DSSWARN(format, ...) \
+ printk(KERN_WARNING "omapdss " DSS_SUBSYS_NAME ": " format, \
+ ## __VA_ARGS__)
+#else
+#define DSSWARN(format, ...) \
+ printk(KERN_WARNING "omapdss: " format, ## __VA_ARGS__)
+#endif
+
+/* OMAP TRM gives bitfields as start:end, where start is the higher bit
+ number. For example 7:0 */
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
+#define FLD_MOD(orig, val, start, end) \
+ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+
+#define DISPC_MAX_FCK 173000000
+
+enum omap_burst_size {
+ OMAP_DSS_BURST_4x32 = 0,
+ OMAP_DSS_BURST_8x32 = 1,
+ OMAP_DSS_BURST_16x32 = 2,
+};
+
+enum omap_parallel_interface_mode {
+ OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */
+ OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */
+ OMAP_DSS_PARALLELMODE_DSI,
+};
+
+enum dss_clock {
+ DSS_CLK_ICK = 1 << 0,
+ DSS_CLK_FCK1 = 1 << 1,
+ DSS_CLK_FCK2 = 1 << 2,
+ DSS_CLK_54M = 1 << 3,
+ DSS_CLK_96M = 1 << 4,
+};
+
+struct dss_clock_info {
+ /* rates that we get with dividers below */
+ unsigned long fck;
+
+ /* dividers */
+ u16 fck_div;
+};
+
+struct dispc_clock_info {
+ /* rates that we get with dividers below */
+ unsigned long lck;
+ unsigned long pck;
+
+ /* dividers */
+ u16 lck_div;
+ u16 pck_div;
+};
+
+struct dsi_clock_info {
+ /* rates that we get with dividers below */
+ unsigned long fint;
+ unsigned long clkin4ddr;
+ unsigned long clkin;
+ unsigned long dsi1_pll_fclk;
+ unsigned long dsi2_pll_fclk;
+
+ unsigned long lp_clk;
+
+ /* dividers */
+ u16 regn;
+ u16 regm;
+ u16 regm3;
+ u16 regm4;
+
+ u16 lp_clk_div;
+
+ u8 highfreq;
+ bool use_dss2_fck;
+};
+
+struct seq_file;
+struct platform_device;
+
+/* core */
+void dss_clk_enable(enum dss_clock clks);
+void dss_clk_disable(enum dss_clock clks);
+unsigned long dss_clk_get_rate(enum dss_clock clk);
+int dss_need_ctx_restore(void);
+void dss_dump_clocks(struct seq_file *s);
+struct bus_type *dss_get_bus(void);
+
+/* display */
+int dss_suspend_all_devices(void);
+int dss_resume_all_devices(void);
+void dss_disable_all_devices(void);
+
+void dss_init_device(struct platform_device *pdev,
+ struct omap_dss_device *dssdev);
+void dss_uninit_device(struct platform_device *pdev,
+ struct omap_dss_device *dssdev);
+bool dss_use_replication(struct omap_dss_device *dssdev,
+ enum omap_color_mode mode);
+void default_get_overlay_fifo_thresholds(enum omap_plane plane,
+ u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 *fifo_low, u32 *fifo_high);
+
+/* manager */
+int dss_init_overlay_managers(struct platform_device *pdev);
+void dss_uninit_overlay_managers(struct platform_device *pdev);
+int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
+void dss_setup_partial_planes(struct omap_dss_device *dssdev,
+ u16 *x, u16 *y, u16 *w, u16 *h);
+void dss_start_update(struct omap_dss_device *dssdev);
+
+/* overlay */
+void dss_init_overlays(struct platform_device *pdev);
+void dss_uninit_overlays(struct platform_device *pdev);
+int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev);
+void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
+#ifdef L4_EXAMPLE
+void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
+#endif
+void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
+
+/* DSS */
+int dss_init(bool skip_init);
+void dss_exit(void);
+
+void dss_save_context(void);
+void dss_restore_context(void);
+
+void dss_dump_regs(struct seq_file *s);
+
+void dss_sdi_init(u8 datapairs);
+int dss_sdi_enable(void);
+void dss_sdi_disable(void);
+
+void dss_select_clk_source(bool dsi, bool dispc);
+int dss_get_dsi_clk_source(void);
+int dss_get_dispc_clk_source(void);
+void dss_set_venc_output(enum omap_dss_venc_type type);
+void dss_set_dac_pwrdn_bgz(bool enable);
+
+unsigned long dss_get_dpll4_rate(void);
+int dss_calc_clock_rates(struct dss_clock_info *cinfo);
+int dss_set_clock_div(struct dss_clock_info *cinfo);
+int dss_get_clock_div(struct dss_clock_info *cinfo);
+int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
+ struct dss_clock_info *dss_cinfo,
+ struct dispc_clock_info *dispc_cinfo);
+
+/* SDI */
+int sdi_init(bool skip_init);
+void sdi_exit(void);
+int sdi_init_display(struct omap_dss_device *display);
+
+/* DSI */
+int dsi_init(struct platform_device *pdev);
+void dsi_exit(void);
+
+void dsi_dump_clocks(struct seq_file *s);
+void dsi_dump_regs(struct seq_file *s);
+
+void dsi_save_context(void);
+void dsi_restore_context(void);
+
+int dsi_init_display(struct omap_dss_device *display);
+void dsi_irq_handler(void);
+unsigned long dsi_get_dsi1_pll_rate(void);
+int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo);
+int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
+ struct dsi_clock_info *cinfo,
+ struct dispc_clock_info *dispc_cinfo);
+int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
+ bool enable_hsdiv);
+void dsi_pll_uninit(void);
+void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
+ u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 *fifo_low, u32 *fifo_high);
+
+/* DPI */
+int dpi_init(void);
+void dpi_exit(void);
+int dpi_init_display(struct omap_dss_device *dssdev);
+
+/* DISPC */
+int dispc_init(void);
+void dispc_exit(void);
+void dispc_dump_clocks(struct seq_file *s);
+void dispc_dump_regs(struct seq_file *s);
+void dispc_irq_handler(void);
+void dispc_fake_vsync_irq(void);
+
+void dispc_save_context(void);
+void dispc_restore_context(void);
+
+void dispc_enable_sidle(void);
+void dispc_disable_sidle(void);
+
+void dispc_lcd_enable_signal_polarity(bool act_high);
+void dispc_lcd_enable_signal(bool enable);
+void dispc_pck_free_enable(bool enable);
+void dispc_enable_fifohandcheck(bool enable);
+
+void dispc_set_lcd_size(u16 width, u16 height);
+void dispc_set_digit_size(u16 width, u16 height);
+u32 dispc_get_plane_fifo_size(enum omap_plane plane);
+void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
+void dispc_enable_fifomerge(bool enable);
+void dispc_set_burst_size(enum omap_plane plane,
+ enum omap_burst_size burst_size);
+
+void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
+void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
+void dispc_set_plane_pos(enum omap_plane plane, u16 x, u16 y);
+void dispc_set_plane_size(enum omap_plane plane, u16 width, u16 height);
+void dispc_set_channel_out(enum omap_plane plane,
+ enum omap_channel channel_out);
+
+int dispc_setup_plane(enum omap_plane plane,
+ u32 paddr, u16 screen_width,
+ u16 pos_x, u16 pos_y,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ enum omap_color_mode color_mode,
+ bool ilace,
+ enum omap_dss_rotation_type rotation_type,
+ u8 rotation, bool mirror,
+ u8 global_alpha);
+
+bool dispc_go_busy(enum omap_channel channel);
+void dispc_go(enum omap_channel channel);
+void dispc_enable_lcd_out(bool enable);
+void dispc_enable_digit_out(bool enable);
+int dispc_enable_plane(enum omap_plane plane, bool enable);
+void dispc_enable_replication(enum omap_plane plane, bool enable);
+
+void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode);
+void dispc_set_tft_data_lines(u8 data_lines);
+void dispc_set_lcd_display_type(enum omap_lcd_display_type type);
+void dispc_set_loadmode(enum omap_dss_load_mode mode);
+
+void dispc_set_default_color(enum omap_channel channel, u32 color);
+u32 dispc_get_default_color(enum omap_channel channel);
+void dispc_set_trans_key(enum omap_channel ch,
+ enum omap_dss_trans_key_type type,
+ u32 trans_key);
+void dispc_get_trans_key(enum omap_channel ch,
+ enum omap_dss_trans_key_type *type,
+ u32 *trans_key);
+void dispc_enable_trans_key(enum omap_channel ch, bool enable);
+void dispc_enable_alpha_blending(enum omap_channel ch, bool enable);
+bool dispc_trans_key_enabled(enum omap_channel ch);
+bool dispc_alpha_blending_enabled(enum omap_channel ch);
+
+bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
+void dispc_set_lcd_timings(struct omap_video_timings *timings);
+unsigned long dispc_fclk_rate(void);
+unsigned long dispc_lclk_rate(void);
+unsigned long dispc_pclk_rate(void);
+void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb);
+void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
+ struct dispc_clock_info *cinfo);
+int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
+ struct dispc_clock_info *cinfo);
+int dispc_set_clock_div(struct dispc_clock_info *cinfo);
+int dispc_get_clock_div(struct dispc_clock_info *cinfo);
+
+
+/* VENC */
+int venc_init(struct platform_device *pdev);
+void venc_exit(void);
+void venc_dump_regs(struct seq_file *s);
+int venc_init_display(struct omap_dss_device *display);
+
+/* RFBI */
+int rfbi_init(void);
+void rfbi_exit(void);
+void rfbi_dump_regs(struct seq_file *s);
+
+int rfbi_configure(int rfbi_module, int bpp, int lines);
+void rfbi_enable_rfbi(bool enable);
+void rfbi_transfer_area(u16 width, u16 height,
+ void (callback)(void *data), void *data);
+void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
+unsigned long rfbi_get_max_tx_rate(void);
+int rfbi_init_display(struct omap_dss_device *display);
+
+#endif
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
new file mode 100644
index 00000000000..27d9c465c85
--- /dev/null
+++ b/drivers/video/omap2/dss/manager.c
@@ -0,0 +1,1487 @@
+/*
+ * linux/drivers/video/omap2/dss/manager.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "MANAGER"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss.h"
+
+static int num_managers;
+static struct list_head manager_list;
+
+static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name);
+}
+
+static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ mgr->device ? mgr->device->name : "<none>");
+}
+
+static ssize_t manager_display_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ int r = 0;
+ size_t len = size;
+ struct omap_dss_device *dssdev = NULL;
+
+ int match(struct omap_dss_device *dssdev, void *data)
+ {
+ const char *str = data;
+ return sysfs_streq(dssdev->name, str);
+ }
+
+ if (buf[size-1] == '\n')
+ --len;
+
+ if (len > 0)
+ dssdev = omap_dss_find_device((void *)buf, match);
+
+ if (len > 0 && dssdev == NULL)
+ return -EINVAL;
+
+ if (dssdev)
+ DSSDBG("display %s found\n", dssdev->name);
+
+ if (mgr->device) {
+ r = mgr->unset_device(mgr);
+ if (r) {
+ DSSERR("failed to unset display\n");
+ goto put_device;
+ }
+ }
+
+ if (dssdev) {
+ r = mgr->set_device(mgr, dssdev);
+ if (r) {
+ DSSERR("failed to set manager\n");
+ goto put_device;
+ }
+
+ r = mgr->apply(mgr);
+ if (r) {
+ DSSERR("failed to apply dispc config\n");
+ goto put_device;
+ }
+ }
+
+put_device:
+ if (dssdev)
+ omap_dss_put_device(dssdev);
+
+ return r ? r : size;
+}
+
+static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.default_color);
+}
+
+static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ u32 color;
+ int r;
+
+ if (sscanf(buf, "%d", &color) != 1)
+ return -EINVAL;
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.default_color = color;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static const char *trans_key_type_str[] = {
+ "gfx-destination",
+ "video-source",
+};
+
+static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ enum omap_dss_trans_key_type key_type;
+
+ key_type = mgr->info.trans_key_type;
+ BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
+}
+
+static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ enum omap_dss_trans_key_type key_type;
+ struct omap_overlay_manager_info info;
+ int r;
+
+ for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ key_type < ARRAY_SIZE(trans_key_type_str); key_type++) {
+ if (sysfs_streq(buf, trans_key_type_str[key_type]))
+ break;
+ }
+
+ if (key_type == ARRAY_SIZE(trans_key_type_str))
+ return -EINVAL;
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.trans_key_type = key_type;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_key);
+}
+
+static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ u32 key_value;
+ int r;
+
+ if (sscanf(buf, "%d", &key_value) != 1)
+ return -EINVAL;
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.trans_key = key_value;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_enabled);
+}
+
+static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ int enable;
+ int r;
+
+ if (sscanf(buf, "%d", &enable) != 1)
+ return -EINVAL;
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.trans_enabled = enable ? true : false;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t manager_alpha_blending_enabled_show(
+ struct omap_overlay_manager *mgr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.alpha_enabled);
+}
+
+static ssize_t manager_alpha_blending_enabled_store(
+ struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ int enable;
+ int r;
+
+ if (sscanf(buf, "%d", &enable) != 1)
+ return -EINVAL;
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.alpha_enabled = enable ? true : false;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+struct manager_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct omap_overlay_manager *, char *);
+ ssize_t (*store)(struct omap_overlay_manager *, const char *, size_t);
+};
+
+#define MANAGER_ATTR(_name, _mode, _show, _store) \
+ struct manager_attribute manager_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL);
+static MANAGER_ATTR(display, S_IRUGO|S_IWUSR,
+ manager_display_show, manager_display_store);
+static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR,
+ manager_default_color_show, manager_default_color_store);
+static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR,
+ manager_trans_key_type_show, manager_trans_key_type_store);
+static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR,
+ manager_trans_key_value_show, manager_trans_key_value_store);
+static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
+ manager_trans_key_enabled_show,
+ manager_trans_key_enabled_store);
+static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
+ manager_alpha_blending_enabled_show,
+ manager_alpha_blending_enabled_store);
+
+
+static struct attribute *manager_sysfs_attrs[] = {
+ &manager_attr_name.attr,
+ &manager_attr_display.attr,
+ &manager_attr_default_color.attr,
+ &manager_attr_trans_key_type.attr,
+ &manager_attr_trans_key_value.attr,
+ &manager_attr_trans_key_enabled.attr,
+ &manager_attr_alpha_blending_enabled.attr,
+ NULL
+};
+
+static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct omap_overlay_manager *manager;
+ struct manager_attribute *manager_attr;
+
+ manager = container_of(kobj, struct omap_overlay_manager, kobj);
+ manager_attr = container_of(attr, struct manager_attribute, attr);
+
+ if (!manager_attr->show)
+ return -ENOENT;
+
+ return manager_attr->show(manager, buf);
+}
+
+static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager *manager;
+ struct manager_attribute *manager_attr;
+
+ manager = container_of(kobj, struct omap_overlay_manager, kobj);
+ manager_attr = container_of(attr, struct manager_attribute, attr);
+
+ if (!manager_attr->store)
+ return -ENOENT;
+
+ return manager_attr->store(manager, buf, size);
+}
+
+static struct sysfs_ops manager_sysfs_ops = {
+ .show = manager_attr_show,
+ .store = manager_attr_store,
+};
+
+static struct kobj_type manager_ktype = {
+ .sysfs_ops = &manager_sysfs_ops,
+ .default_attrs = manager_sysfs_attrs,
+};
+
+/*
+ * We have 4 levels of cache for the dispc settings. First two are in SW and
+ * the latter two in HW.
+ *
+ * +--------------------+
+ * |overlay/manager_info|
+ * +--------------------+
+ * v
+ * apply()
+ * v
+ * +--------------------+
+ * | dss_cache |
+ * +--------------------+
+ * v
+ * configure()
+ * v
+ * +--------------------+
+ * | shadow registers |
+ * +--------------------+
+ * v
+ * VFP or lcd/digit_enable
+ * v
+ * +--------------------+
+ * | registers |
+ * +--------------------+
+ */
+
+struct overlay_cache_data {
+ /* If true, cache changed, but not written to shadow registers. Set
+ * in apply(), cleared when registers written. */
+ bool dirty;
+ /* If true, shadow registers contain changed values not yet in real
+ * registers. Set when writing to shadow registers, cleared at
+ * VSYNC/EVSYNC */
+ bool shadow_dirty;
+
+ bool enabled;
+
+ u32 paddr;
+ void __iomem *vaddr;
+ u16 screen_width;
+ u16 width;
+ u16 height;
+ enum omap_color_mode color_mode;
+ u8 rotation;
+ enum omap_dss_rotation_type rotation_type;
+ bool mirror;
+
+ u16 pos_x;
+ u16 pos_y;
+ u16 out_width; /* if 0, out_width == width */
+ u16 out_height; /* if 0, out_height == height */
+ u8 global_alpha;
+
+ enum omap_channel channel;
+ bool replication;
+ bool ilace;
+
+ enum omap_burst_size burst_size;
+ u32 fifo_low;
+ u32 fifo_high;
+
+ bool manual_update;
+};
+
+struct manager_cache_data {
+ /* If true, cache changed, but not written to shadow registers. Set
+ * in apply(), cleared when registers written. */
+ bool dirty;
+ /* If true, shadow registers contain changed values not yet in real
+ * registers. Set when writing to shadow registers, cleared at
+ * VSYNC/EVSYNC */
+ bool shadow_dirty;
+
+ u32 default_color;
+
+ enum omap_dss_trans_key_type trans_key_type;
+ u32 trans_key;
+ bool trans_enabled;
+
+ bool alpha_enabled;
+
+ bool manual_upd_display;
+ bool manual_update;
+ bool do_manual_update;
+
+ /* manual update region */
+ u16 x, y, w, h;
+};
+
+static struct {
+ spinlock_t lock;
+ struct overlay_cache_data overlay_cache[3];
+ struct manager_cache_data manager_cache[2];
+
+ bool irq_enabled;
+} dss_cache;
+
+
+
+static int omap_dss_set_device(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev)
+{
+ int i;
+ int r;
+
+ if (dssdev->manager) {
+ DSSERR("display '%s' already has a manager '%s'\n",
+ dssdev->name, dssdev->manager->name);
+ return -EINVAL;
+ }
+
+ if ((mgr->supported_displays & dssdev->type) == 0) {
+ DSSERR("display '%s' does not support manager '%s'\n",
+ dssdev->name, mgr->name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mgr->num_overlays; i++) {
+ struct omap_overlay *ovl = mgr->overlays[i];
+
+ if (ovl->manager != mgr || !ovl->info.enabled)
+ continue;
+
+ r = dss_check_overlay(ovl, dssdev);
+ if (r)
+ return r;
+ }
+
+ dssdev->manager = mgr;
+ mgr->device = dssdev;
+ mgr->device_changed = true;
+
+ return 0;
+}
+
+static int omap_dss_unset_device(struct omap_overlay_manager *mgr)
+{
+ if (!mgr->device) {
+ DSSERR("failed to unset display, display not set.\n");
+ return -EINVAL;
+ }
+
+ mgr->device->manager = NULL;
+ mgr->device = NULL;
+ mgr->device_changed = true;
+
+ return 0;
+}
+
+static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ struct manager_cache_data *mc;
+ enum omap_channel channel;
+ u32 irq;
+ int r;
+ int i;
+
+ if (!mgr->device)
+ return 0;
+
+ if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
+ irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+ channel = OMAP_DSS_CHANNEL_DIGIT;
+ } else {
+ if (mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+ enum omap_dss_update_mode mode;
+ mode = mgr->device->get_update_mode(mgr->device);
+ if (mode != OMAP_DSS_UPDATE_AUTO)
+ return 0;
+
+ irq = DISPC_IRQ_FRAMEDONE;
+ } else {
+ irq = DISPC_IRQ_VSYNC;
+ }
+ channel = OMAP_DSS_CHANNEL_LCD;
+ }
+
+ mc = &dss_cache.manager_cache[mgr->id];
+ i = 0;
+ while (1) {
+ unsigned long flags;
+ bool shadow_dirty, dirty;
+
+ spin_lock_irqsave(&dss_cache.lock, flags);
+ dirty = mc->dirty;
+ shadow_dirty = mc->shadow_dirty;
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+ if (!dirty && !shadow_dirty) {
+ r = 0;
+ break;
+ }
+
+ /* 4 iterations is the worst case:
+ * 1 - initial iteration, dirty = true (between VFP and VSYNC)
+ * 2 - first VSYNC, dirty = true
+ * 3 - dirty = false, shadow_dirty = true
+ * 4 - shadow_dirty = false */
+ if (i++ == 3) {
+ DSSERR("mgr(%d)->wait_for_go() not finishing\n",
+ mgr->id);
+ r = 0;
+ break;
+ }
+
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ if (r == -ERESTARTSYS)
+ break;
+
+ if (r) {
+ DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
+ break;
+ }
+ }
+
+ return r;
+}
+
+int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ enum omap_channel channel;
+ struct overlay_cache_data *oc;
+ struct omap_dss_device *dssdev;
+ u32 irq;
+ int r;
+ int i;
+
+ if (!ovl->manager || !ovl->manager->device)
+ return 0;
+
+ dssdev = ovl->manager->device;
+
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+ channel = OMAP_DSS_CHANNEL_DIGIT;
+ } else {
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+ enum omap_dss_update_mode mode;
+ mode = dssdev->get_update_mode(dssdev);
+ if (mode != OMAP_DSS_UPDATE_AUTO)
+ return 0;
+
+ irq = DISPC_IRQ_FRAMEDONE;
+ } else {
+ irq = DISPC_IRQ_VSYNC;
+ }
+ channel = OMAP_DSS_CHANNEL_LCD;
+ }
+
+ oc = &dss_cache.overlay_cache[ovl->id];
+ i = 0;
+ while (1) {
+ unsigned long flags;
+ bool shadow_dirty, dirty;
+
+ spin_lock_irqsave(&dss_cache.lock, flags);
+ dirty = oc->dirty;
+ shadow_dirty = oc->shadow_dirty;
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+ if (!dirty && !shadow_dirty) {
+ r = 0;
+ break;
+ }
+
+ /* 4 iterations is the worst case:
+ * 1 - initial iteration, dirty = true (between VFP and VSYNC)
+ * 2 - first VSYNC, dirty = true
+ * 3 - dirty = false, shadow_dirty = true
+ * 4 - shadow_dirty = false */
+ if (i++ == 3) {
+ DSSERR("ovl(%d)->wait_for_go() not finishing\n",
+ ovl->id);
+ r = 0;
+ break;
+ }
+
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ if (r == -ERESTARTSYS)
+ break;
+
+ if (r) {
+ DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int overlay_enabled(struct omap_overlay *ovl)
+{
+ return ovl->info.enabled && ovl->manager && ovl->manager->device;
+}
+
+/* Is rect1 a subset of rect2? */
+static bool rectangle_subset(int x1, int y1, int w1, int h1,
+ int x2, int y2, int w2, int h2)
+{
+ if (x1 < x2 || y1 < y2)
+ return false;
+
+ if (x1 + w1 > x2 + w2)
+ return false;
+
+ if (y1 + h1 > y2 + h2)
+ return false;
+
+ return true;
+}
+
+/* Do rect1 and rect2 overlap? */
+static bool rectangle_intersects(int x1, int y1, int w1, int h1,
+ int x2, int y2, int w2, int h2)
+{
+ if (x1 >= x2 + w2)
+ return false;
+
+ if (x2 >= x1 + w1)
+ return false;
+
+ if (y1 >= y2 + h2)
+ return false;
+
+ if (y2 >= y1 + h1)
+ return false;
+
+ return true;
+}
+
+static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
+{
+ if (oc->out_width != 0 && oc->width != oc->out_width)
+ return true;
+
+ if (oc->out_height != 0 && oc->height != oc->out_height)
+ return true;
+
+ return false;
+}
+
+static int configure_overlay(enum omap_plane plane)
+{
+ struct overlay_cache_data *c;
+ struct manager_cache_data *mc;
+ u16 outw, outh;
+ u16 x, y, w, h;
+ u32 paddr;
+ int r;
+
+ DSSDBGF("%d", plane);
+
+ c = &dss_cache.overlay_cache[plane];
+
+ if (!c->enabled) {
+ dispc_enable_plane(plane, 0);
+ return 0;
+ }
+
+ mc = &dss_cache.manager_cache[c->channel];
+
+ x = c->pos_x;
+ y = c->pos_y;
+ w = c->width;
+ h = c->height;
+ outw = c->out_width == 0 ? c->width : c->out_width;
+ outh = c->out_height == 0 ? c->height : c->out_height;
+ paddr = c->paddr;
+
+ if (c->manual_update && mc->do_manual_update) {
+ unsigned bpp;
+ /* If the overlay is outside the update region, disable it */
+ if (!rectangle_intersects(mc->x, mc->y, mc->w, mc->h,
+ x, y, outw, outh)) {
+ dispc_enable_plane(plane, 0);
+ return 0;
+ }
+
+ switch (c->color_mode) {
+ case OMAP_DSS_COLOR_RGB16:
+ case OMAP_DSS_COLOR_ARGB16:
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ bpp = 16;
+ break;
+
+ case OMAP_DSS_COLOR_RGB24P:
+ bpp = 24;
+ break;
+
+ case OMAP_DSS_COLOR_RGB24U:
+ case OMAP_DSS_COLOR_ARGB32:
+ case OMAP_DSS_COLOR_RGBA32:
+ case OMAP_DSS_COLOR_RGBX32:
+ bpp = 32;
+ break;
+
+ default:
+ BUG();
+ }
+
+ if (dispc_is_overlay_scaled(c)) {
+ /* If the overlay is scaled, the update area has
+ * already been enlarged to cover the whole overlay. We
+ * only need to adjust x/y here */
+ x = c->pos_x - mc->x;
+ y = c->pos_y - mc->y;
+ } else {
+ if (mc->x > c->pos_x) {
+ x = 0;
+ w -= (mc->x - c->pos_x);
+ paddr += (mc->x - c->pos_x) * bpp / 8;
+ } else {
+ x = c->pos_x - mc->x;
+ }
+
+ if (mc->y > c->pos_y) {
+ y = 0;
+ h -= (mc->y - c->pos_y);
+ paddr += (mc->y - c->pos_y) * c->screen_width *
+ bpp / 8;
+ } else {
+ y = c->pos_y - mc->y;
+ }
+
+ if (mc->w < (x+w))
+ w -= (x+w) - (mc->w);
+
+ if (mc->h < (y+h))
+ h -= (y+h) - (mc->h);
+
+ outw = w;
+ outh = h;
+ }
+ }
+
+ r = dispc_setup_plane(plane,
+ paddr,
+ c->screen_width,
+ x, y,
+ w, h,
+ outw, outh,
+ c->color_mode,
+ c->ilace,
+ c->rotation_type,
+ c->rotation,
+ c->mirror,
+ c->global_alpha);
+
+ if (r) {
+ /* this shouldn't happen */
+ DSSERR("dispc_setup_plane failed for ovl %d\n", plane);
+ dispc_enable_plane(plane, 0);
+ return r;
+ }
+
+ dispc_enable_replication(plane, c->replication);
+
+ dispc_set_burst_size(plane, c->burst_size);
+ dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
+
+ dispc_enable_plane(plane, 1);
+
+ return 0;
+}
+
+static void configure_manager(enum omap_channel channel)
+{
+ struct manager_cache_data *c;
+
+ DSSDBGF("%d", channel);
+
+ c = &dss_cache.manager_cache[channel];
+
+ dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
+ dispc_enable_trans_key(channel, c->trans_enabled);
+ dispc_enable_alpha_blending(channel, c->alpha_enabled);
+}
+
+/* configure_dispc() tries to write values from cache to shadow registers.
+ * It writes only to those managers/overlays that are not busy.
+ * returns 0 if everything could be written to shadow registers.
+ * returns 1 if not everything could be written to shadow registers. */
+static int configure_dispc(void)
+{
+ struct overlay_cache_data *oc;
+ struct manager_cache_data *mc;
+ const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+ const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+ int i;
+ int r;
+ bool mgr_busy[2];
+ bool mgr_go[2];
+ bool busy;
+
+ r = 0;
+ busy = false;
+
+ mgr_busy[0] = dispc_go_busy(0);
+ mgr_busy[1] = dispc_go_busy(1);
+ mgr_go[0] = false;
+ mgr_go[1] = false;
+
+ /* Commit overlay settings */
+ for (i = 0; i < num_ovls; ++i) {
+ oc = &dss_cache.overlay_cache[i];
+ mc = &dss_cache.manager_cache[oc->channel];
+
+ if (!oc->dirty)
+ continue;
+
+ if (oc->manual_update && !mc->do_manual_update)
+ continue;
+
+ if (mgr_busy[oc->channel]) {
+ busy = true;
+ continue;
+ }
+
+ r = configure_overlay(i);
+ if (r)
+ DSSERR("configure_overlay %d failed\n", i);
+
+ oc->dirty = false;
+ oc->shadow_dirty = true;
+ mgr_go[oc->channel] = true;
+ }
+
+ /* Commit manager settings */
+ for (i = 0; i < num_mgrs; ++i) {
+ mc = &dss_cache.manager_cache[i];
+
+ if (!mc->dirty)
+ continue;
+
+ if (mc->manual_update && !mc->do_manual_update)
+ continue;
+
+ if (mgr_busy[i]) {
+ busy = true;
+ continue;
+ }
+
+ configure_manager(i);
+ mc->dirty = false;
+ mc->shadow_dirty = true;
+ mgr_go[i] = true;
+ }
+
+ /* set GO */
+ for (i = 0; i < num_mgrs; ++i) {
+ mc = &dss_cache.manager_cache[i];
+
+ if (!mgr_go[i])
+ continue;
+
+ /* We don't need GO with manual update display. LCD iface will
+ * always be turned off after frame, and new settings will be
+ * taken in to use at next update */
+ if (!mc->manual_upd_display)
+ dispc_go(i);
+ }
+
+ if (busy)
+ r = 1;
+ else
+ r = 0;
+
+ return r;
+}
+
+/* Configure dispc for partial update. Return possibly modified update
+ * area */
+void dss_setup_partial_planes(struct omap_dss_device *dssdev,
+ u16 *xi, u16 *yi, u16 *wi, u16 *hi)
+{
+ struct overlay_cache_data *oc;
+ struct manager_cache_data *mc;
+ const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+ struct omap_overlay_manager *mgr;
+ int i;
+ u16 x, y, w, h;
+ unsigned long flags;
+
+ x = *xi;
+ y = *yi;
+ w = *wi;
+ h = *hi;
+
+ DSSDBG("dispc_setup_partial_planes %d,%d %dx%d\n",
+ *xi, *yi, *wi, *hi);
+
+ mgr = dssdev->manager;
+
+ if (!mgr) {
+ DSSDBG("no manager\n");
+ return;
+ }
+
+ spin_lock_irqsave(&dss_cache.lock, flags);
+
+ /* We need to show the whole overlay if it is scaled. So look for
+ * those, and make the update area larger if found.
+ * Also mark the overlay cache dirty */
+ for (i = 0; i < num_ovls; ++i) {
+ unsigned x1, y1, x2, y2;
+ unsigned outw, outh;
+
+ oc = &dss_cache.overlay_cache[i];
+
+ if (oc->channel != mgr->id)
+ continue;
+
+ oc->dirty = true;
+
+ if (!oc->enabled)
+ continue;
+
+ if (!dispc_is_overlay_scaled(oc))
+ continue;
+
+ outw = oc->out_width == 0 ? oc->width : oc->out_width;
+ outh = oc->out_height == 0 ? oc->height : oc->out_height;
+
+ /* is the overlay outside the update region? */
+ if (!rectangle_intersects(x, y, w, h,
+ oc->pos_x, oc->pos_y,
+ outw, outh))
+ continue;
+
+ /* if the overlay totally inside the update region? */
+ if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh,
+ x, y, w, h))
+ continue;
+
+ if (x > oc->pos_x)
+ x1 = oc->pos_x;
+ else
+ x1 = x;
+
+ if (y > oc->pos_y)
+ y1 = oc->pos_y;
+ else
+ y1 = y;
+
+ if ((x + w) < (oc->pos_x + outw))
+ x2 = oc->pos_x + outw;
+ else
+ x2 = x + w;
+
+ if ((y + h) < (oc->pos_y + outh))
+ y2 = oc->pos_y + outh;
+ else
+ y2 = y + h;
+
+ x = x1;
+ y = y1;
+ w = x2 - x1;
+ h = y2 - y1;
+
+ DSSDBG("changing upd area due to ovl(%d) scaling %d,%d %dx%d\n",
+ i, x, y, w, h);
+ }
+
+ mc = &dss_cache.manager_cache[mgr->id];
+ mc->do_manual_update = true;
+ mc->x = x;
+ mc->y = y;
+ mc->w = w;
+ mc->h = h;
+
+ configure_dispc();
+
+ mc->do_manual_update = false;
+
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+ *xi = x;
+ *yi = y;
+ *wi = w;
+ *hi = h;
+}
+
+void dss_start_update(struct omap_dss_device *dssdev)
+{
+ struct manager_cache_data *mc;
+ struct overlay_cache_data *oc;
+ const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+ const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+ struct omap_overlay_manager *mgr;
+ int i;
+
+ mgr = dssdev->manager;
+
+ for (i = 0; i < num_ovls; ++i) {
+ oc = &dss_cache.overlay_cache[i];
+ if (oc->channel != mgr->id)
+ continue;
+
+ oc->shadow_dirty = false;
+ }
+
+ for (i = 0; i < num_mgrs; ++i) {
+ mc = &dss_cache.manager_cache[i];
+ if (mgr->id != i)
+ continue;
+
+ mc->shadow_dirty = false;
+ }
+
+ dispc_enable_lcd_out(1);
+}
+
+static void dss_apply_irq_handler(void *data, u32 mask)
+{
+ struct manager_cache_data *mc;
+ struct overlay_cache_data *oc;
+ const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
+ const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
+ int i, r;
+ bool mgr_busy[2];
+
+ mgr_busy[0] = dispc_go_busy(0);
+ mgr_busy[1] = dispc_go_busy(1);
+
+ spin_lock(&dss_cache.lock);
+
+ for (i = 0; i < num_ovls; ++i) {
+ oc = &dss_cache.overlay_cache[i];
+ if (!mgr_busy[oc->channel])
+ oc->shadow_dirty = false;
+ }
+
+ for (i = 0; i < num_mgrs; ++i) {
+ mc = &dss_cache.manager_cache[i];
+ if (!mgr_busy[i])
+ mc->shadow_dirty = false;
+ }
+
+ r = configure_dispc();
+ if (r == 1)
+ goto end;
+
+ /* re-read busy flags */
+ mgr_busy[0] = dispc_go_busy(0);
+ mgr_busy[1] = dispc_go_busy(1);
+
+ /* keep running as long as there are busy managers, so that
+ * we can collect overlay-applied information */
+ for (i = 0; i < num_mgrs; ++i) {
+ if (mgr_busy[i])
+ goto end;
+ }
+
+ omap_dispc_unregister_isr(dss_apply_irq_handler, NULL,
+ DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
+ DISPC_IRQ_EVSYNC_EVEN);
+ dss_cache.irq_enabled = false;
+
+end:
+ spin_unlock(&dss_cache.lock);
+}
+
+static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+{
+ struct overlay_cache_data *oc;
+ struct manager_cache_data *mc;
+ int i;
+ struct omap_overlay *ovl;
+ int num_planes_enabled = 0;
+ bool use_fifomerge;
+ unsigned long flags;
+ int r;
+
+ DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+
+ spin_lock_irqsave(&dss_cache.lock, flags);
+
+ /* Configure overlays */
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_dss_device *dssdev;
+
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ oc = &dss_cache.overlay_cache[ovl->id];
+
+ if (!overlay_enabled(ovl)) {
+ if (oc->enabled) {
+ oc->enabled = false;
+ oc->dirty = true;
+ }
+ continue;
+ }
+
+ if (!ovl->info_dirty) {
+ if (oc->enabled)
+ ++num_planes_enabled;
+ continue;
+ }
+
+ dssdev = ovl->manager->device;
+
+ if (dss_check_overlay(ovl, dssdev)) {
+ if (oc->enabled) {
+ oc->enabled = false;
+ oc->dirty = true;
+ }
+ continue;
+ }
+
+ ovl->info_dirty = false;
+ oc->dirty = true;
+
+ oc->paddr = ovl->info.paddr;
+ oc->vaddr = ovl->info.vaddr;
+ oc->screen_width = ovl->info.screen_width;
+ oc->width = ovl->info.width;
+ oc->height = ovl->info.height;
+ oc->color_mode = ovl->info.color_mode;
+ oc->rotation = ovl->info.rotation;
+ oc->rotation_type = ovl->info.rotation_type;
+ oc->mirror = ovl->info.mirror;
+ oc->pos_x = ovl->info.pos_x;
+ oc->pos_y = ovl->info.pos_y;
+ oc->out_width = ovl->info.out_width;
+ oc->out_height = ovl->info.out_height;
+ oc->global_alpha = ovl->info.global_alpha;
+
+ oc->replication =
+ dss_use_replication(dssdev, ovl->info.color_mode);
+
+ oc->ilace = dssdev->type == OMAP_DISPLAY_TYPE_VENC;
+
+ oc->channel = ovl->manager->id;
+
+ oc->enabled = true;
+
+ oc->manual_update =
+ dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
+ dssdev->get_update_mode(dssdev) != OMAP_DSS_UPDATE_AUTO;
+
+ ++num_planes_enabled;
+ }
+
+ /* Configure managers */
+ list_for_each_entry(mgr, &manager_list, list) {
+ struct omap_dss_device *dssdev;
+
+ if (!(mgr->caps & OMAP_DSS_OVL_MGR_CAP_DISPC))
+ continue;
+
+ mc = &dss_cache.manager_cache[mgr->id];
+
+ if (mgr->device_changed) {
+ mgr->device_changed = false;
+ mgr->info_dirty = true;
+ }
+
+ if (!mgr->info_dirty)
+ continue;
+
+ if (!mgr->device)
+ continue;
+
+ dssdev = mgr->device;
+
+ mgr->info_dirty = false;
+ mc->dirty = true;
+
+ mc->default_color = mgr->info.default_color;
+ mc->trans_key_type = mgr->info.trans_key_type;
+ mc->trans_key = mgr->info.trans_key;
+ mc->trans_enabled = mgr->info.trans_enabled;
+ mc->alpha_enabled = mgr->info.alpha_enabled;
+
+ mc->manual_upd_display =
+ dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+
+ mc->manual_update =
+ dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
+ dssdev->get_update_mode(dssdev) != OMAP_DSS_UPDATE_AUTO;
+ }
+
+ /* XXX TODO: Try to get fifomerge working. The problem is that it
+ * affects both managers, not individually but at the same time. This
+ * means the change has to be well synchronized. I guess the proper way
+ * is to have a two step process for fifo merge:
+ * fifomerge enable:
+ * 1. disable other planes, leaving one plane enabled
+ * 2. wait until the planes are disabled on HW
+ * 3. config merged fifo thresholds, enable fifomerge
+ * fifomerge disable:
+ * 1. config unmerged fifo thresholds, disable fifomerge
+ * 2. wait until fifo changes are in HW
+ * 3. enable planes
+ */
+ use_fifomerge = false;
+
+ /* Configure overlay fifos */
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
+ struct omap_dss_device *dssdev;
+ u32 size;
+
+ ovl = omap_dss_get_overlay(i);
+
+ if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
+ continue;
+
+ oc = &dss_cache.overlay_cache[ovl->id];
+
+ if (!oc->enabled)
+ continue;
+
+ dssdev = ovl->manager->device;
+
+ size = dispc_get_plane_fifo_size(ovl->id);
+ if (use_fifomerge)
+ size *= 3;
+
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DBI:
+ case OMAP_DISPLAY_TYPE_SDI:
+ case OMAP_DISPLAY_TYPE_VENC:
+ default_get_overlay_fifo_thresholds(ovl->id, size,
+ &oc->burst_size, &oc->fifo_low,
+ &oc->fifo_high);
+ break;
+#ifdef CONFIG_OMAP2_DSS_DSI
+ case OMAP_DISPLAY_TYPE_DSI:
+ dsi_get_overlay_fifo_thresholds(ovl->id, size,
+ &oc->burst_size, &oc->fifo_low,
+ &oc->fifo_high);
+ break;
+#endif
+ default:
+ BUG();
+ }
+ }
+
+ r = 0;
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ if (!dss_cache.irq_enabled) {
+ r = omap_dispc_register_isr(dss_apply_irq_handler, NULL,
+ DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
+ DISPC_IRQ_EVSYNC_EVEN);
+ dss_cache.irq_enabled = true;
+ }
+ configure_dispc();
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ spin_unlock_irqrestore(&dss_cache.lock, flags);
+
+ return r;
+}
+
+static int dss_check_manager(struct omap_overlay_manager *mgr)
+{
+ /* OMAP supports only graphics source transparency color key and alpha
+ * blending simultaneously. See TRM 15.4.2.4.2.2 Alpha Mode */
+
+ if (mgr->info.alpha_enabled && mgr->info.trans_enabled &&
+ mgr->info.trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int omap_dss_mgr_set_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info)
+{
+ int r;
+ struct omap_overlay_manager_info old_info;
+
+ old_info = mgr->info;
+ mgr->info = *info;
+
+ r = dss_check_manager(mgr);
+ if (r) {
+ mgr->info = old_info;
+ return r;
+ }
+
+ mgr->info_dirty = true;
+
+ return 0;
+}
+
+static void omap_dss_mgr_get_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info)
+{
+ *info = mgr->info;
+}
+
+static void omap_dss_add_overlay_manager(struct omap_overlay_manager *manager)
+{
+ ++num_managers;
+ list_add_tail(&manager->list, &manager_list);
+}
+
+int dss_init_overlay_managers(struct platform_device *pdev)
+{
+ int i, r;
+
+ spin_lock_init(&dss_cache.lock);
+
+ INIT_LIST_HEAD(&manager_list);
+
+ num_managers = 0;
+
+ for (i = 0; i < 2; ++i) {
+ struct omap_overlay_manager *mgr;
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ BUG_ON(mgr == NULL);
+
+ switch (i) {
+ case 0:
+ mgr->name = "lcd";
+ mgr->id = OMAP_DSS_CHANNEL_LCD;
+ mgr->supported_displays =
+ OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
+ OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI;
+ break;
+ case 1:
+ mgr->name = "tv";
+ mgr->id = OMAP_DSS_CHANNEL_DIGIT;
+ mgr->supported_displays = OMAP_DISPLAY_TYPE_VENC;
+ break;
+ }
+
+ mgr->set_device = &omap_dss_set_device;
+ mgr->unset_device = &omap_dss_unset_device;
+ mgr->apply = &omap_dss_mgr_apply;
+ mgr->set_manager_info = &omap_dss_mgr_set_info;
+ mgr->get_manager_info = &omap_dss_mgr_get_info;
+ mgr->wait_for_go = &dss_mgr_wait_for_go;
+
+ mgr->caps = OMAP_DSS_OVL_MGR_CAP_DISPC;
+
+ dss_overlay_setup_dispc_manager(mgr);
+
+ omap_dss_add_overlay_manager(mgr);
+
+ r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
+ &pdev->dev.kobj, "manager%d", i);
+
+ if (r) {
+ DSSERR("failed to create sysfs file\n");
+ continue;
+ }
+ }
+
+#ifdef L4_EXAMPLE
+ {
+ int omap_dss_mgr_apply_l4(struct omap_overlay_manager *mgr)
+ {
+ DSSDBG("omap_dss_mgr_apply_l4(%s)\n", mgr->name);
+
+ return 0;
+ }
+
+ struct omap_overlay_manager *mgr;
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ BUG_ON(mgr == NULL);
+
+ mgr->name = "l4";
+ mgr->supported_displays =
+ OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI;
+
+ mgr->set_device = &omap_dss_set_device;
+ mgr->unset_device = &omap_dss_unset_device;
+ mgr->apply = &omap_dss_mgr_apply_l4;
+ mgr->set_manager_info = &omap_dss_mgr_set_info;
+ mgr->get_manager_info = &omap_dss_mgr_get_info;
+
+ dss_overlay_setup_l4_manager(mgr);
+
+ omap_dss_add_overlay_manager(mgr);
+
+ r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
+ &pdev->dev.kobj, "managerl4");
+
+ if (r)
+ DSSERR("failed to create sysfs file\n");
+ }
+#endif
+
+ return 0;
+}
+
+void dss_uninit_overlay_managers(struct platform_device *pdev)
+{
+ struct omap_overlay_manager *mgr;
+
+ while (!list_empty(&manager_list)) {
+ mgr = list_first_entry(&manager_list,
+ struct omap_overlay_manager, list);
+ list_del(&mgr->list);
+ kobject_del(&mgr->kobj);
+ kobject_put(&mgr->kobj);
+ kfree(mgr);
+ }
+
+ num_managers = 0;
+}
+
+int omap_dss_get_num_overlay_managers(void)
+{
+ return num_managers;
+}
+EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
+
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
+{
+ int i = 0;
+ struct omap_overlay_manager *mgr;
+
+ list_for_each_entry(mgr, &manager_list, list) {
+ if (i++ == num)
+ return mgr;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(omap_dss_get_overlay_manager);
+
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
new file mode 100644
index 00000000000..b7f9a733984
--- /dev/null
+++ b/drivers/video/omap2/dss/overlay.c
@@ -0,0 +1,680 @@
+/*
+ * linux/drivers/video/omap2/dss/overlay.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "OVERLAY"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss.h"
+
+static int num_overlays;
+static struct list_head overlay_list;
+
+static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name);
+}
+
+static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ ovl->manager ? ovl->manager->name : "<none>");
+}
+
+static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
+ size_t size)
+{
+ int i, r;
+ struct omap_overlay_manager *mgr = NULL;
+ struct omap_overlay_manager *old_mgr;
+ int len = size;
+
+ if (buf[size-1] == '\n')
+ --len;
+
+ if (len > 0) {
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
+ mgr = omap_dss_get_overlay_manager(i);
+
+ if (strncmp(buf, mgr->name, len) == 0)
+ break;
+
+ mgr = NULL;
+ }
+ }
+
+ if (len > 0 && mgr == NULL)
+ return -EINVAL;
+
+ if (mgr)
+ DSSDBG("manager %s found\n", mgr->name);
+
+ if (mgr == ovl->manager)
+ return size;
+
+ old_mgr = ovl->manager;
+
+ /* detach old manager */
+ if (old_mgr) {
+ r = ovl->unset_manager(ovl);
+ if (r) {
+ DSSERR("detach failed\n");
+ return r;
+ }
+
+ r = old_mgr->apply(old_mgr);
+ if (r)
+ return r;
+ }
+
+ if (mgr) {
+ r = ovl->set_manager(ovl, mgr);
+ if (r) {
+ DSSERR("Failed to attach overlay\n");
+ return r;
+ }
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
+static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+ ovl->info.width, ovl->info.height);
+}
+
+static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.screen_width);
+}
+
+static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+ ovl->info.pos_x, ovl->info.pos_y);
+}
+
+static ssize_t overlay_position_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ char *last;
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ info.pos_x = simple_strtoul(buf, &last, 10);
+ ++last;
+ if (last - buf >= size)
+ return -EINVAL;
+
+ info.pos_y = simple_strtoul(last, &last, 10);
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ return r;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
+static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d,%d\n",
+ ovl->info.out_width, ovl->info.out_height);
+}
+
+static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ char *last;
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ info.out_width = simple_strtoul(buf, &last, 10);
+ ++last;
+ if (last - buf >= size)
+ return -EINVAL;
+
+ info.out_height = simple_strtoul(last, &last, 10);
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ return r;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
+static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.enabled);
+}
+
+static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
+ size_t size)
+{
+ int r;
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ info.enabled = simple_strtoul(buf, NULL, 10);
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ return r;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
+static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ ovl->info.global_alpha);
+}
+
+static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
+ const char *buf, size_t size)
+{
+ int r;
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ /* Video1 plane does not support global alpha
+ * to always make it 255 completely opaque
+ */
+ if (ovl->id == OMAP_DSS_VIDEO1)
+ info.global_alpha = 255;
+ else
+ info.global_alpha = simple_strtoul(buf, NULL, 10);
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ return r;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ return r;
+ }
+
+ return size;
+}
+
+struct overlay_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct omap_overlay *, char *);
+ ssize_t (*store)(struct omap_overlay *, const char *, size_t);
+};
+
+#define OVERLAY_ATTR(_name, _mode, _show, _store) \
+ struct overlay_attribute overlay_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL);
+static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR,
+ overlay_manager_show, overlay_manager_store);
+static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL);
+static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL);
+static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR,
+ overlay_position_show, overlay_position_store);
+static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR,
+ overlay_output_size_show, overlay_output_size_store);
+static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
+ overlay_enabled_show, overlay_enabled_store);
+static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
+ overlay_global_alpha_show, overlay_global_alpha_store);
+
+static struct attribute *overlay_sysfs_attrs[] = {
+ &overlay_attr_name.attr,
+ &overlay_attr_manager.attr,
+ &overlay_attr_input_size.attr,
+ &overlay_attr_screen_width.attr,
+ &overlay_attr_position.attr,
+ &overlay_attr_output_size.attr,
+ &overlay_attr_enabled.attr,
+ &overlay_attr_global_alpha.attr,
+ NULL
+};
+
+static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct omap_overlay *overlay;
+ struct overlay_attribute *overlay_attr;
+
+ overlay = container_of(kobj, struct omap_overlay, kobj);
+ overlay_attr = container_of(attr, struct overlay_attribute, attr);
+
+ if (!overlay_attr->show)
+ return -ENOENT;
+
+ return overlay_attr->show(overlay, buf);
+}
+
+static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay *overlay;
+ struct overlay_attribute *overlay_attr;
+
+ overlay = container_of(kobj, struct omap_overlay, kobj);
+ overlay_attr = container_of(attr, struct overlay_attribute, attr);
+
+ if (!overlay_attr->store)
+ return -ENOENT;
+
+ return overlay_attr->store(overlay, buf, size);
+}
+
+static struct sysfs_ops overlay_sysfs_ops = {
+ .show = overlay_attr_show,
+ .store = overlay_attr_store,
+};
+
+static struct kobj_type overlay_ktype = {
+ .sysfs_ops = &overlay_sysfs_ops,
+ .default_attrs = overlay_sysfs_attrs,
+};
+
+/* Check if overlay parameters are compatible with display */
+int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
+{
+ struct omap_overlay_info *info;
+ u16 outw, outh;
+ u16 dw, dh;
+
+ if (!dssdev)
+ return 0;
+
+ if (!ovl->info.enabled)
+ return 0;
+
+ info = &ovl->info;
+
+ if (info->paddr == 0) {
+ DSSDBG("check_overlay failed: paddr 0\n");
+ return -EINVAL;
+ }
+
+ dssdev->get_resolution(dssdev, &dw, &dh);
+
+ DSSDBG("check_overlay %d: (%d,%d %dx%d -> %dx%d) disp (%dx%d)\n",
+ ovl->id,
+ info->pos_x, info->pos_y,
+ info->width, info->height,
+ info->out_width, info->out_height,
+ dw, dh);
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+ outw = info->width;
+ outh = info->height;
+ } else {
+ if (info->out_width == 0)
+ outw = info->width;
+ else
+ outw = info->out_width;
+
+ if (info->out_height == 0)
+ outh = info->height;
+ else
+ outh = info->out_height;
+ }
+
+ if (dw < info->pos_x + outw) {
+ DSSDBG("check_overlay failed 1: %d < %d + %d\n",
+ dw, info->pos_x, outw);
+ return -EINVAL;
+ }
+
+ if (dh < info->pos_y + outh) {
+ DSSDBG("check_overlay failed 2: %d < %d + %d\n",
+ dh, info->pos_y, outh);
+ return -EINVAL;
+ }
+
+ if ((ovl->supported_modes & info->color_mode) == 0) {
+ DSSERR("overlay doesn't support mode %d\n", info->color_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dss_ovl_set_overlay_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info)
+{
+ int r;
+ struct omap_overlay_info old_info;
+
+ old_info = ovl->info;
+ ovl->info = *info;
+
+ if (ovl->manager) {
+ r = dss_check_overlay(ovl, ovl->manager->device);
+ if (r) {
+ ovl->info = old_info;
+ return r;
+ }
+ }
+
+ ovl->info_dirty = true;
+
+ return 0;
+}
+
+static void dss_ovl_get_overlay_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info)
+{
+ *info = ovl->info;
+}
+
+static int dss_ovl_wait_for_go(struct omap_overlay *ovl)
+{
+ return dss_mgr_wait_for_go_ovl(ovl);
+}
+
+static int omap_dss_set_manager(struct omap_overlay *ovl,
+ struct omap_overlay_manager *mgr)
+{
+ if (!mgr)
+ return -EINVAL;
+
+ if (ovl->manager) {
+ DSSERR("overlay '%s' already has a manager '%s'\n",
+ ovl->name, ovl->manager->name);
+ return -EINVAL;
+ }
+
+ if (ovl->info.enabled) {
+ DSSERR("overlay has to be disabled to change the manager\n");
+ return -EINVAL;
+ }
+
+ ovl->manager = mgr;
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ /* XXX: on manual update display, in auto update mode, a bug happens
+ * here. When an overlay is first enabled on LCD, then it's disabled,
+ * and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT
+ * errors. Waiting before changing the channel_out fixes it. I'm
+ * guessing that the overlay is still somehow being used for the LCD,
+ * but I don't understand how or why. */
+ msleep(40);
+ dispc_set_channel_out(ovl->id, mgr->id);
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ return 0;
+}
+
+static int omap_dss_unset_manager(struct omap_overlay *ovl)
+{
+ int r;
+
+ if (!ovl->manager) {
+ DSSERR("failed to detach overlay: manager not set\n");
+ return -EINVAL;
+ }
+
+ if (ovl->info.enabled) {
+ DSSERR("overlay has to be disabled to unset the manager\n");
+ return -EINVAL;
+ }
+
+ r = ovl->wait_for_go(ovl);
+ if (r)
+ return r;
+
+ ovl->manager = NULL;
+
+ return 0;
+}
+
+int omap_dss_get_num_overlays(void)
+{
+ return num_overlays;
+}
+EXPORT_SYMBOL(omap_dss_get_num_overlays);
+
+struct omap_overlay *omap_dss_get_overlay(int num)
+{
+ int i = 0;
+ struct omap_overlay *ovl;
+
+ list_for_each_entry(ovl, &overlay_list, list) {
+ if (i++ == num)
+ return ovl;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(omap_dss_get_overlay);
+
+static void omap_dss_add_overlay(struct omap_overlay *overlay)
+{
+ ++num_overlays;
+ list_add_tail(&overlay->list, &overlay_list);
+}
+
+static struct omap_overlay *dispc_overlays[3];
+
+void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr)
+{
+ mgr->num_overlays = 3;
+ mgr->overlays = dispc_overlays;
+}
+
+#ifdef L4_EXAMPLE
+static struct omap_overlay *l4_overlays[1];
+void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr)
+{
+ mgr->num_overlays = 1;
+ mgr->overlays = l4_overlays;
+}
+#endif
+
+void dss_init_overlays(struct platform_device *pdev)
+{
+ int i, r;
+
+ INIT_LIST_HEAD(&overlay_list);
+
+ num_overlays = 0;
+
+ for (i = 0; i < 3; ++i) {
+ struct omap_overlay *ovl;
+ ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
+
+ BUG_ON(ovl == NULL);
+
+ switch (i) {
+ case 0:
+ ovl->name = "gfx";
+ ovl->id = OMAP_DSS_GFX;
+ ovl->supported_modes = cpu_is_omap34xx() ?
+ OMAP_DSS_COLOR_GFX_OMAP3 :
+ OMAP_DSS_COLOR_GFX_OMAP2;
+ ovl->caps = OMAP_DSS_OVL_CAP_DISPC;
+ ovl->info.global_alpha = 255;
+ break;
+ case 1:
+ ovl->name = "vid1";
+ ovl->id = OMAP_DSS_VIDEO1;
+ ovl->supported_modes = cpu_is_omap34xx() ?
+ OMAP_DSS_COLOR_VID1_OMAP3 :
+ OMAP_DSS_COLOR_VID_OMAP2;
+ ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
+ OMAP_DSS_OVL_CAP_DISPC;
+ ovl->info.global_alpha = 255;
+ break;
+ case 2:
+ ovl->name = "vid2";
+ ovl->id = OMAP_DSS_VIDEO2;
+ ovl->supported_modes = cpu_is_omap34xx() ?
+ OMAP_DSS_COLOR_VID2_OMAP3 :
+ OMAP_DSS_COLOR_VID_OMAP2;
+ ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
+ OMAP_DSS_OVL_CAP_DISPC;
+ ovl->info.global_alpha = 255;
+ break;
+ }
+
+ ovl->set_manager = &omap_dss_set_manager;
+ ovl->unset_manager = &omap_dss_unset_manager;
+ ovl->set_overlay_info = &dss_ovl_set_overlay_info;
+ ovl->get_overlay_info = &dss_ovl_get_overlay_info;
+ ovl->wait_for_go = &dss_ovl_wait_for_go;
+
+ omap_dss_add_overlay(ovl);
+
+ r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
+ &pdev->dev.kobj, "overlay%d", i);
+
+ if (r) {
+ DSSERR("failed to create sysfs file\n");
+ continue;
+ }
+
+ dispc_overlays[i] = ovl;
+ }
+
+#ifdef L4_EXAMPLE
+ {
+ struct omap_overlay *ovl;
+ ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
+
+ BUG_ON(ovl == NULL);
+
+ ovl->name = "l4";
+ ovl->supported_modes = OMAP_DSS_COLOR_RGB24U;
+
+ ovl->set_manager = &omap_dss_set_manager;
+ ovl->unset_manager = &omap_dss_unset_manager;
+ ovl->set_overlay_info = &dss_ovl_set_overlay_info;
+ ovl->get_overlay_info = &dss_ovl_get_overlay_info;
+
+ omap_dss_add_overlay(ovl);
+
+ r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
+ &pdev->dev.kobj, "overlayl4");
+
+ if (r)
+ DSSERR("failed to create sysfs file\n");
+
+ l4_overlays[0] = ovl;
+ }
+#endif
+}
+
+/* connect overlays to the new device, if not already connected. if force
+ * selected, connect always. */
+void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
+{
+ int i;
+ struct omap_overlay_manager *lcd_mgr;
+ struct omap_overlay_manager *tv_mgr;
+ struct omap_overlay_manager *mgr = NULL;
+
+ lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD);
+ tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV);
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
+ if (!lcd_mgr->device || force) {
+ if (lcd_mgr->device)
+ lcd_mgr->unset_device(lcd_mgr);
+ lcd_mgr->set_device(lcd_mgr, dssdev);
+ mgr = lcd_mgr;
+ }
+ }
+
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (!tv_mgr->device || force) {
+ if (tv_mgr->device)
+ tv_mgr->unset_device(tv_mgr);
+ tv_mgr->set_device(tv_mgr, dssdev);
+ mgr = tv_mgr;
+ }
+ }
+
+ if (mgr) {
+ for (i = 0; i < 3; i++) {
+ struct omap_overlay *ovl;
+ ovl = omap_dss_get_overlay(i);
+ if (!ovl->manager || force) {
+ if (ovl->manager)
+ omap_dss_unset_manager(ovl);
+ omap_dss_set_manager(ovl, mgr);
+ }
+ }
+ }
+}
+
+void dss_uninit_overlays(struct platform_device *pdev)
+{
+ struct omap_overlay *ovl;
+
+ while (!list_empty(&overlay_list)) {
+ ovl = list_first_entry(&overlay_list,
+ struct omap_overlay, list);
+ list_del(&ovl->list);
+ kobject_del(&ovl->kobj);
+ kobject_put(&ovl->kobj);
+ kfree(ovl);
+ }
+
+ num_overlays = 0;
+}
+
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
new file mode 100644
index 00000000000..d0b3006ad8a
--- /dev/null
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -0,0 +1,1309 @@
+/*
+ * linux/drivers/video/omap2/dss/rfbi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "RFBI"
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/kfifo.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/seq_file.h>
+
+#include <plat/display.h>
+#include "dss.h"
+
+/*#define MEASURE_PERF*/
+
+#define RFBI_BASE 0x48050800
+
+struct rfbi_reg { u16 idx; };
+
+#define RFBI_REG(idx) ((const struct rfbi_reg) { idx })
+
+#define RFBI_REVISION RFBI_REG(0x0000)
+#define RFBI_SYSCONFIG RFBI_REG(0x0010)
+#define RFBI_SYSSTATUS RFBI_REG(0x0014)
+#define RFBI_CONTROL RFBI_REG(0x0040)
+#define RFBI_PIXEL_CNT RFBI_REG(0x0044)
+#define RFBI_LINE_NUMBER RFBI_REG(0x0048)
+#define RFBI_CMD RFBI_REG(0x004c)
+#define RFBI_PARAM RFBI_REG(0x0050)
+#define RFBI_DATA RFBI_REG(0x0054)
+#define RFBI_READ RFBI_REG(0x0058)
+#define RFBI_STATUS RFBI_REG(0x005c)
+
+#define RFBI_CONFIG(n) RFBI_REG(0x0060 + (n)*0x18)
+#define RFBI_ONOFF_TIME(n) RFBI_REG(0x0064 + (n)*0x18)
+#define RFBI_CYCLE_TIME(n) RFBI_REG(0x0068 + (n)*0x18)
+#define RFBI_DATA_CYCLE1(n) RFBI_REG(0x006c + (n)*0x18)
+#define RFBI_DATA_CYCLE2(n) RFBI_REG(0x0070 + (n)*0x18)
+#define RFBI_DATA_CYCLE3(n) RFBI_REG(0x0074 + (n)*0x18)
+
+#define RFBI_VSYNC_WIDTH RFBI_REG(0x0090)
+#define RFBI_HSYNC_WIDTH RFBI_REG(0x0094)
+
+#define RFBI_CMD_FIFO_LEN_BYTES (16 * sizeof(struct update_param))
+
+#define REG_FLD_MOD(idx, val, start, end) \
+ rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end))
+
+/* To work around an RFBI transfer rate limitation */
+#define OMAP_RFBI_RATE_LIMIT 1
+
+enum omap_rfbi_cycleformat {
+ OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0,
+ OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1,
+ OMAP_DSS_RFBI_CYCLEFORMAT_3_1 = 2,
+ OMAP_DSS_RFBI_CYCLEFORMAT_3_2 = 3,
+};
+
+enum omap_rfbi_datatype {
+ OMAP_DSS_RFBI_DATATYPE_12 = 0,
+ OMAP_DSS_RFBI_DATATYPE_16 = 1,
+ OMAP_DSS_RFBI_DATATYPE_18 = 2,
+ OMAP_DSS_RFBI_DATATYPE_24 = 3,
+};
+
+enum omap_rfbi_parallelmode {
+ OMAP_DSS_RFBI_PARALLELMODE_8 = 0,
+ OMAP_DSS_RFBI_PARALLELMODE_9 = 1,
+ OMAP_DSS_RFBI_PARALLELMODE_12 = 2,
+ OMAP_DSS_RFBI_PARALLELMODE_16 = 3,
+};
+
+enum update_cmd {
+ RFBI_CMD_UPDATE = 0,
+ RFBI_CMD_SYNC = 1,
+};
+
+static int rfbi_convert_timings(struct rfbi_timings *t);
+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
+static void process_cmd_fifo(void);
+
+static struct {
+ void __iomem *base;
+
+ unsigned long l4_khz;
+
+ enum omap_rfbi_datatype datatype;
+ enum omap_rfbi_parallelmode parallelmode;
+
+ enum omap_rfbi_te_mode te_mode;
+ int te_enabled;
+
+ void (*framedone_callback)(void *data);
+ void *framedone_callback_data;
+
+ struct omap_dss_device *dssdev[2];
+
+ struct kfifo *cmd_fifo;
+ spinlock_t cmd_lock;
+ struct completion cmd_done;
+ atomic_t cmd_fifo_full;
+ atomic_t cmd_pending;
+#ifdef MEASURE_PERF
+ unsigned perf_bytes;
+ ktime_t perf_setup_time;
+ ktime_t perf_start_time;
+#endif
+} rfbi;
+
+struct update_region {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+};
+
+struct update_param {
+ u8 rfbi_module;
+ u8 cmd;
+
+ union {
+ struct update_region r;
+ struct completion *sync;
+ } par;
+};
+
+static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
+{
+ __raw_writel(val, rfbi.base + idx.idx);
+}
+
+static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
+{
+ return __raw_readl(rfbi.base + idx.idx);
+}
+
+static void rfbi_enable_clocks(bool enable)
+{
+ if (enable)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ else
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+}
+
+void omap_rfbi_write_command(const void *buf, u32 len)
+{
+ rfbi_enable_clocks(1);
+ switch (rfbi.parallelmode) {
+ case OMAP_DSS_RFBI_PARALLELMODE_8:
+ {
+ const u8 *b = buf;
+ for (; len; len--)
+ rfbi_write_reg(RFBI_CMD, *b++);
+ break;
+ }
+
+ case OMAP_DSS_RFBI_PARALLELMODE_16:
+ {
+ const u16 *w = buf;
+ BUG_ON(len & 1);
+ for (; len; len -= 2)
+ rfbi_write_reg(RFBI_CMD, *w++);
+ break;
+ }
+
+ case OMAP_DSS_RFBI_PARALLELMODE_9:
+ case OMAP_DSS_RFBI_PARALLELMODE_12:
+ default:
+ BUG();
+ }
+ rfbi_enable_clocks(0);
+}
+EXPORT_SYMBOL(omap_rfbi_write_command);
+
+void omap_rfbi_read_data(void *buf, u32 len)
+{
+ rfbi_enable_clocks(1);
+ switch (rfbi.parallelmode) {
+ case OMAP_DSS_RFBI_PARALLELMODE_8:
+ {
+ u8 *b = buf;
+ for (; len; len--) {
+ rfbi_write_reg(RFBI_READ, 0);
+ *b++ = rfbi_read_reg(RFBI_READ);
+ }
+ break;
+ }
+
+ case OMAP_DSS_RFBI_PARALLELMODE_16:
+ {
+ u16 *w = buf;
+ BUG_ON(len & ~1);
+ for (; len; len -= 2) {
+ rfbi_write_reg(RFBI_READ, 0);
+ *w++ = rfbi_read_reg(RFBI_READ);
+ }
+ break;
+ }
+
+ case OMAP_DSS_RFBI_PARALLELMODE_9:
+ case OMAP_DSS_RFBI_PARALLELMODE_12:
+ default:
+ BUG();
+ }
+ rfbi_enable_clocks(0);
+}
+EXPORT_SYMBOL(omap_rfbi_read_data);
+
+void omap_rfbi_write_data(const void *buf, u32 len)
+{
+ rfbi_enable_clocks(1);
+ switch (rfbi.parallelmode) {
+ case OMAP_DSS_RFBI_PARALLELMODE_8:
+ {
+ const u8 *b = buf;
+ for (; len; len--)
+ rfbi_write_reg(RFBI_PARAM, *b++);
+ break;
+ }
+
+ case OMAP_DSS_RFBI_PARALLELMODE_16:
+ {
+ const u16 *w = buf;
+ BUG_ON(len & 1);
+ for (; len; len -= 2)
+ rfbi_write_reg(RFBI_PARAM, *w++);
+ break;
+ }
+
+ case OMAP_DSS_RFBI_PARALLELMODE_9:
+ case OMAP_DSS_RFBI_PARALLELMODE_12:
+ default:
+ BUG();
+
+ }
+ rfbi_enable_clocks(0);
+}
+EXPORT_SYMBOL(omap_rfbi_write_data);
+
+void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
+ u16 x, u16 y,
+ u16 w, u16 h)
+{
+ int start_offset = scr_width * y + x;
+ int horiz_offset = scr_width - w;
+ int i;
+
+ rfbi_enable_clocks(1);
+
+ if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
+ rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
+ const u16 __iomem *pd = buf;
+ pd += start_offset;
+
+ for (; h; --h) {
+ for (i = 0; i < w; ++i) {
+ const u8 __iomem *b = (const u8 __iomem *)pd;
+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
+ ++pd;
+ }
+ pd += horiz_offset;
+ }
+ } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 &&
+ rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
+ const u32 __iomem *pd = buf;
+ pd += start_offset;
+
+ for (; h; --h) {
+ for (i = 0; i < w; ++i) {
+ const u8 __iomem *b = (const u8 __iomem *)pd;
+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+2));
+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
+ rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
+ ++pd;
+ }
+ pd += horiz_offset;
+ }
+ } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
+ rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) {
+ const u16 __iomem *pd = buf;
+ pd += start_offset;
+
+ for (; h; --h) {
+ for (i = 0; i < w; ++i) {
+ rfbi_write_reg(RFBI_PARAM, __raw_readw(pd));
+ ++pd;
+ }
+ pd += horiz_offset;
+ }
+ } else {
+ BUG();
+ }
+
+ rfbi_enable_clocks(0);
+}
+EXPORT_SYMBOL(omap_rfbi_write_pixels);
+
+#ifdef MEASURE_PERF
+static void perf_mark_setup(void)
+{
+ rfbi.perf_setup_time = ktime_get();
+}
+
+static void perf_mark_start(void)
+{
+ rfbi.perf_start_time = ktime_get();
+}
+
+static void perf_show(const char *name)
+{
+ ktime_t t, setup_time, trans_time;
+ u32 total_bytes;
+ u32 setup_us, trans_us, total_us;
+
+ t = ktime_get();
+
+ setup_time = ktime_sub(rfbi.perf_start_time, rfbi.perf_setup_time);
+ setup_us = (u32)ktime_to_us(setup_time);
+ if (setup_us == 0)
+ setup_us = 1;
+
+ trans_time = ktime_sub(t, rfbi.perf_start_time);
+ trans_us = (u32)ktime_to_us(trans_time);
+ if (trans_us == 0)
+ trans_us = 1;
+
+ total_us = setup_us + trans_us;
+
+ total_bytes = rfbi.perf_bytes;
+
+ DSSINFO("%s update %u us + %u us = %u us (%uHz), %u bytes, "
+ "%u kbytes/sec\n",
+ name,
+ setup_us,
+ trans_us,
+ total_us,
+ 1000*1000 / total_us,
+ total_bytes,
+ total_bytes * 1000 / total_us);
+}
+#else
+#define perf_mark_setup()
+#define perf_mark_start()
+#define perf_show(x)
+#endif
+
+void rfbi_transfer_area(u16 width, u16 height,
+ void (callback)(void *data), void *data)
+{
+ u32 l;
+
+ /*BUG_ON(callback == 0);*/
+ BUG_ON(rfbi.framedone_callback != NULL);
+
+ DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
+
+ dispc_set_lcd_size(width, height);
+
+ dispc_enable_lcd_out(1);
+
+ rfbi.framedone_callback = callback;
+ rfbi.framedone_callback_data = data;
+
+ rfbi_enable_clocks(1);
+
+ rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
+
+ l = rfbi_read_reg(RFBI_CONTROL);
+ l = FLD_MOD(l, 1, 0, 0); /* enable */
+ if (!rfbi.te_enabled)
+ l = FLD_MOD(l, 1, 4, 4); /* ITE */
+
+ perf_mark_start();
+
+ rfbi_write_reg(RFBI_CONTROL, l);
+}
+
+static void framedone_callback(void *data, u32 mask)
+{
+ void (*callback)(void *data);
+
+ DSSDBG("FRAMEDONE\n");
+
+ perf_show("DISPC");
+
+ REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0);
+
+ rfbi_enable_clocks(0);
+
+ callback = rfbi.framedone_callback;
+ rfbi.framedone_callback = NULL;
+
+ /*callback(rfbi.framedone_callback_data);*/
+
+ atomic_set(&rfbi.cmd_pending, 0);
+
+ process_cmd_fifo();
+}
+
+#if 1 /* VERBOSE */
+static void rfbi_print_timings(void)
+{
+ u32 l;
+ u32 time;
+
+ l = rfbi_read_reg(RFBI_CONFIG(0));
+ time = 1000000000 / rfbi.l4_khz;
+ if (l & (1 << 4))
+ time *= 2;
+
+ DSSDBG("Tick time %u ps\n", time);
+ l = rfbi_read_reg(RFBI_ONOFF_TIME(0));
+ DSSDBG("CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
+ "REONTIME %d, REOFFTIME %d\n",
+ l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
+ (l >> 20) & 0x0f, (l >> 24) & 0x3f);
+
+ l = rfbi_read_reg(RFBI_CYCLE_TIME(0));
+ DSSDBG("WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
+ "ACCESSTIME %d\n",
+ (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
+ (l >> 22) & 0x3f);
+}
+#else
+static void rfbi_print_timings(void) {}
+#endif
+
+
+
+
+static u32 extif_clk_period;
+
+static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
+{
+ int bus_tick = extif_clk_period * div;
+ return (ps + bus_tick - 1) / bus_tick * bus_tick;
+}
+
+static int calc_reg_timing(struct rfbi_timings *t, int div)
+{
+ t->clk_div = div;
+
+ t->cs_on_time = round_to_extif_ticks(t->cs_on_time, div);
+
+ t->we_on_time = round_to_extif_ticks(t->we_on_time, div);
+ t->we_off_time = round_to_extif_ticks(t->we_off_time, div);
+ t->we_cycle_time = round_to_extif_ticks(t->we_cycle_time, div);
+
+ t->re_on_time = round_to_extif_ticks(t->re_on_time, div);
+ t->re_off_time = round_to_extif_ticks(t->re_off_time, div);
+ t->re_cycle_time = round_to_extif_ticks(t->re_cycle_time, div);
+
+ t->access_time = round_to_extif_ticks(t->access_time, div);
+ t->cs_off_time = round_to_extif_ticks(t->cs_off_time, div);
+ t->cs_pulse_width = round_to_extif_ticks(t->cs_pulse_width, div);
+
+ DSSDBG("[reg]cson %d csoff %d reon %d reoff %d\n",
+ t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
+ DSSDBG("[reg]weon %d weoff %d recyc %d wecyc %d\n",
+ t->we_on_time, t->we_off_time, t->re_cycle_time,
+ t->we_cycle_time);
+ DSSDBG("[reg]rdaccess %d cspulse %d\n",
+ t->access_time, t->cs_pulse_width);
+
+ return rfbi_convert_timings(t);
+}
+
+static int calc_extif_timings(struct rfbi_timings *t)
+{
+ u32 max_clk_div;
+ int div;
+
+ rfbi_get_clk_info(&extif_clk_period, &max_clk_div);
+ for (div = 1; div <= max_clk_div; div++) {
+ if (calc_reg_timing(t, div) == 0)
+ break;
+ }
+
+ if (div <= max_clk_div)
+ return 0;
+
+ DSSERR("can't setup timings\n");
+ return -1;
+}
+
+
+void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
+{
+ int r;
+
+ if (!t->converted) {
+ r = calc_extif_timings(t);
+ if (r < 0)
+ DSSERR("Failed to calc timings\n");
+ }
+
+ BUG_ON(!t->converted);
+
+ rfbi_enable_clocks(1);
+ rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]);
+ rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]);
+
+ /* TIMEGRANULARITY */
+ REG_FLD_MOD(RFBI_CONFIG(rfbi_module),
+ (t->tim[2] ? 1 : 0), 4, 4);
+
+ rfbi_print_timings();
+ rfbi_enable_clocks(0);
+}
+
+static int ps_to_rfbi_ticks(int time, int div)
+{
+ unsigned long tick_ps;
+ int ret;
+
+ /* Calculate in picosecs to yield more exact results */
+ tick_ps = 1000000000 / (rfbi.l4_khz) * div;
+
+ ret = (time + tick_ps - 1) / tick_ps;
+
+ return ret;
+}
+
+#ifdef OMAP_RFBI_RATE_LIMIT
+unsigned long rfbi_get_max_tx_rate(void)
+{
+ unsigned long l4_rate, dss1_rate;
+ int min_l4_ticks = 0;
+ int i;
+
+ /* According to TI this can't be calculated so make the
+ * adjustments for a couple of known frequencies and warn for
+ * others.
+ */
+ static const struct {
+ unsigned long l4_clk; /* HZ */
+ unsigned long dss1_clk; /* HZ */
+ unsigned long min_l4_ticks;
+ } ftab[] = {
+ { 55, 132, 7, }, /* 7.86 MPix/s */
+ { 110, 110, 12, }, /* 9.16 MPix/s */
+ { 110, 132, 10, }, /* 11 Mpix/s */
+ { 120, 120, 10, }, /* 12 Mpix/s */
+ { 133, 133, 10, }, /* 13.3 Mpix/s */
+ };
+
+ l4_rate = rfbi.l4_khz / 1000;
+ dss1_rate = dss_clk_get_rate(DSS_CLK_FCK1) / 1000000;
+
+ for (i = 0; i < ARRAY_SIZE(ftab); i++) {
+ /* Use a window instead of an exact match, to account
+ * for different DPLL multiplier / divider pairs.
+ */
+ if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
+ abs(ftab[i].dss1_clk - dss1_rate) < 3) {
+ min_l4_ticks = ftab[i].min_l4_ticks;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ftab)) {
+ /* Can't be sure, return anyway the maximum not
+ * rate-limited. This might cause a problem only for the
+ * tearing synchronisation.
+ */
+ DSSERR("can't determine maximum RFBI transfer rate\n");
+ return rfbi.l4_khz * 1000;
+ }
+ return rfbi.l4_khz * 1000 / min_l4_ticks;
+}
+#else
+int rfbi_get_max_tx_rate(void)
+{
+ return rfbi.l4_khz * 1000;
+}
+#endif
+
+static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
+{
+ *clk_period = 1000000000 / rfbi.l4_khz;
+ *max_clk_div = 2;
+}
+
+static int rfbi_convert_timings(struct rfbi_timings *t)
+{
+ u32 l;
+ int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
+ int actim, recyc, wecyc;
+ int div = t->clk_div;
+
+ if (div <= 0 || div > 2)
+ return -1;
+
+ /* Make sure that after conversion it still holds that:
+ * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
+ * csoff > cson, csoff >= max(weoff, reoff), actim > reon
+ */
+ weon = ps_to_rfbi_ticks(t->we_on_time, div);
+ weoff = ps_to_rfbi_ticks(t->we_off_time, div);
+ if (weoff <= weon)
+ weoff = weon + 1;
+ if (weon > 0x0f)
+ return -1;
+ if (weoff > 0x3f)
+ return -1;
+
+ reon = ps_to_rfbi_ticks(t->re_on_time, div);
+ reoff = ps_to_rfbi_ticks(t->re_off_time, div);
+ if (reoff <= reon)
+ reoff = reon + 1;
+ if (reon > 0x0f)
+ return -1;
+ if (reoff > 0x3f)
+ return -1;
+
+ cson = ps_to_rfbi_ticks(t->cs_on_time, div);
+ csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
+ if (csoff <= cson)
+ csoff = cson + 1;
+ if (csoff < max(weoff, reoff))
+ csoff = max(weoff, reoff);
+ if (cson > 0x0f)
+ return -1;
+ if (csoff > 0x3f)
+ return -1;
+
+ l = cson;
+ l |= csoff << 4;
+ l |= weon << 10;
+ l |= weoff << 14;
+ l |= reon << 20;
+ l |= reoff << 24;
+
+ t->tim[0] = l;
+
+ actim = ps_to_rfbi_ticks(t->access_time, div);
+ if (actim <= reon)
+ actim = reon + 1;
+ if (actim > 0x3f)
+ return -1;
+
+ wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
+ if (wecyc < weoff)
+ wecyc = weoff;
+ if (wecyc > 0x3f)
+ return -1;
+
+ recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
+ if (recyc < reoff)
+ recyc = reoff;
+ if (recyc > 0x3f)
+ return -1;
+
+ cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
+ if (cs_pulse > 0x3f)
+ return -1;
+
+ l = wecyc;
+ l |= recyc << 6;
+ l |= cs_pulse << 12;
+ l |= actim << 22;
+
+ t->tim[1] = l;
+
+ t->tim[2] = div - 1;
+
+ t->converted = 1;
+
+ return 0;
+}
+
+/* xxx FIX module selection missing */
+int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
+ unsigned hs_pulse_time, unsigned vs_pulse_time,
+ int hs_pol_inv, int vs_pol_inv, int extif_div)
+{
+ int hs, vs;
+ int min;
+ u32 l;
+
+ hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
+ vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
+ if (hs < 2)
+ return -EDOM;
+ if (mode == OMAP_DSS_RFBI_TE_MODE_2)
+ min = 2;
+ else /* OMAP_DSS_RFBI_TE_MODE_1 */
+ min = 4;
+ if (vs < min)
+ return -EDOM;
+ if (vs == hs)
+ return -EINVAL;
+ rfbi.te_mode = mode;
+ DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n",
+ mode, hs, vs, hs_pol_inv, vs_pol_inv);
+
+ rfbi_enable_clocks(1);
+ rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
+ rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
+
+ l = rfbi_read_reg(RFBI_CONFIG(0));
+ if (hs_pol_inv)
+ l &= ~(1 << 21);
+ else
+ l |= 1 << 21;
+ if (vs_pol_inv)
+ l &= ~(1 << 20);
+ else
+ l |= 1 << 20;
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_rfbi_setup_te);
+
+/* xxx FIX module selection missing */
+int omap_rfbi_enable_te(bool enable, unsigned line)
+{
+ u32 l;
+
+ DSSDBG("te %d line %d mode %d\n", enable, line, rfbi.te_mode);
+ if (line > (1 << 11) - 1)
+ return -EINVAL;
+
+ rfbi_enable_clocks(1);
+ l = rfbi_read_reg(RFBI_CONFIG(0));
+ l &= ~(0x3 << 2);
+ if (enable) {
+ rfbi.te_enabled = 1;
+ l |= rfbi.te_mode << 2;
+ } else
+ rfbi.te_enabled = 0;
+ rfbi_write_reg(RFBI_CONFIG(0), l);
+ rfbi_write_reg(RFBI_LINE_NUMBER, line);
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_rfbi_enable_te);
+
+#if 0
+static void rfbi_enable_config(int enable1, int enable2)
+{
+ u32 l;
+ int cs = 0;
+
+ if (enable1)
+ cs |= 1<<0;
+ if (enable2)
+ cs |= 1<<1;
+
+ rfbi_enable_clocks(1);
+
+ l = rfbi_read_reg(RFBI_CONTROL);
+
+ l = FLD_MOD(l, cs, 3, 2);
+ l = FLD_MOD(l, 0, 1, 1);
+
+ rfbi_write_reg(RFBI_CONTROL, l);
+
+
+ l = rfbi_read_reg(RFBI_CONFIG(0));
+ l = FLD_MOD(l, 0, 3, 2); /* TRIGGERMODE: ITE */
+ /*l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
+ /*l |= FLD_VAL(0, 8, 7); */ /* L4FORMAT, 1pix/L4 */
+
+ l = FLD_MOD(l, 0, 16, 16); /* A0POLARITY */
+ l = FLD_MOD(l, 1, 20, 20); /* TE_VSYNC_POLARITY */
+ l = FLD_MOD(l, 1, 21, 21); /* HSYNCPOLARITY */
+
+ l = FLD_MOD(l, OMAP_DSS_RFBI_PARALLELMODE_8, 1, 0);
+ rfbi_write_reg(RFBI_CONFIG(0), l);
+
+ rfbi_enable_clocks(0);
+}
+#endif
+
+int rfbi_configure(int rfbi_module, int bpp, int lines)
+{
+ u32 l;
+ int cycle1 = 0, cycle2 = 0, cycle3 = 0;
+ enum omap_rfbi_cycleformat cycleformat;
+ enum omap_rfbi_datatype datatype;
+ enum omap_rfbi_parallelmode parallelmode;
+
+ switch (bpp) {
+ case 12:
+ datatype = OMAP_DSS_RFBI_DATATYPE_12;
+ break;
+ case 16:
+ datatype = OMAP_DSS_RFBI_DATATYPE_16;
+ break;
+ case 18:
+ datatype = OMAP_DSS_RFBI_DATATYPE_18;
+ break;
+ case 24:
+ datatype = OMAP_DSS_RFBI_DATATYPE_24;
+ break;
+ default:
+ BUG();
+ return 1;
+ }
+ rfbi.datatype = datatype;
+
+ switch (lines) {
+ case 8:
+ parallelmode = OMAP_DSS_RFBI_PARALLELMODE_8;
+ break;
+ case 9:
+ parallelmode = OMAP_DSS_RFBI_PARALLELMODE_9;
+ break;
+ case 12:
+ parallelmode = OMAP_DSS_RFBI_PARALLELMODE_12;
+ break;
+ case 16:
+ parallelmode = OMAP_DSS_RFBI_PARALLELMODE_16;
+ break;
+ default:
+ BUG();
+ return 1;
+ }
+ rfbi.parallelmode = parallelmode;
+
+ if ((bpp % lines) == 0) {
+ switch (bpp / lines) {
+ case 1:
+ cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_1_1;
+ break;
+ case 2:
+ cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_2_1;
+ break;
+ case 3:
+ cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_1;
+ break;
+ default:
+ BUG();
+ return 1;
+ }
+ } else if ((2 * bpp % lines) == 0) {
+ if ((2 * bpp / lines) == 3)
+ cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_2;
+ else {
+ BUG();
+ return 1;
+ }
+ } else {
+ BUG();
+ return 1;
+ }
+
+ switch (cycleformat) {
+ case OMAP_DSS_RFBI_CYCLEFORMAT_1_1:
+ cycle1 = lines;
+ break;
+
+ case OMAP_DSS_RFBI_CYCLEFORMAT_2_1:
+ cycle1 = lines;
+ cycle2 = lines;
+ break;
+
+ case OMAP_DSS_RFBI_CYCLEFORMAT_3_1:
+ cycle1 = lines;
+ cycle2 = lines;
+ cycle3 = lines;
+ break;
+
+ case OMAP_DSS_RFBI_CYCLEFORMAT_3_2:
+ cycle1 = lines;
+ cycle2 = (lines / 2) | ((lines / 2) << 16);
+ cycle3 = (lines << 16);
+ break;
+ }
+
+ rfbi_enable_clocks(1);
+
+ REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */
+
+ l = 0;
+ l |= FLD_VAL(parallelmode, 1, 0);
+ l |= FLD_VAL(0, 3, 2); /* TRIGGERMODE: ITE */
+ l |= FLD_VAL(0, 4, 4); /* TIMEGRANULARITY */
+ l |= FLD_VAL(datatype, 6, 5);
+ /* l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
+ l |= FLD_VAL(0, 8, 7); /* L4FORMAT, 1pix/L4 */
+ l |= FLD_VAL(cycleformat, 10, 9);
+ l |= FLD_VAL(0, 12, 11); /* UNUSEDBITS */
+ l |= FLD_VAL(0, 16, 16); /* A0POLARITY */
+ l |= FLD_VAL(0, 17, 17); /* REPOLARITY */
+ l |= FLD_VAL(0, 18, 18); /* WEPOLARITY */
+ l |= FLD_VAL(0, 19, 19); /* CSPOLARITY */
+ l |= FLD_VAL(1, 20, 20); /* TE_VSYNC_POLARITY */
+ l |= FLD_VAL(1, 21, 21); /* HSYNCPOLARITY */
+ rfbi_write_reg(RFBI_CONFIG(rfbi_module), l);
+
+ rfbi_write_reg(RFBI_DATA_CYCLE1(rfbi_module), cycle1);
+ rfbi_write_reg(RFBI_DATA_CYCLE2(rfbi_module), cycle2);
+ rfbi_write_reg(RFBI_DATA_CYCLE3(rfbi_module), cycle3);
+
+
+ l = rfbi_read_reg(RFBI_CONTROL);
+ l = FLD_MOD(l, rfbi_module+1, 3, 2); /* Select CSx */
+ l = FLD_MOD(l, 0, 1, 1); /* clear bypass */
+ rfbi_write_reg(RFBI_CONTROL, l);
+
+
+ DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n",
+ bpp, lines, cycle1, cycle2, cycle3);
+
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+EXPORT_SYMBOL(rfbi_configure);
+
+static int rfbi_find_display(struct omap_dss_device *dssdev)
+{
+ if (dssdev == rfbi.dssdev[0])
+ return 0;
+
+ if (dssdev == rfbi.dssdev[1])
+ return 1;
+
+ BUG();
+ return -1;
+}
+
+
+static void signal_fifo_waiters(void)
+{
+ if (atomic_read(&rfbi.cmd_fifo_full) > 0) {
+ /* DSSDBG("SIGNALING: Fifo not full for waiter!\n"); */
+ complete(&rfbi.cmd_done);
+ atomic_dec(&rfbi.cmd_fifo_full);
+ }
+}
+
+/* returns 1 for async op, and 0 for sync op */
+static int do_update(struct omap_dss_device *dssdev, struct update_region *upd)
+{
+ u16 x = upd->x;
+ u16 y = upd->y;
+ u16 w = upd->w;
+ u16 h = upd->h;
+
+ perf_mark_setup();
+
+ if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+ /*dssdev->driver->enable_te(dssdev, 1); */
+ dss_setup_partial_planes(dssdev, &x, &y, &w, &h);
+ }
+
+#ifdef MEASURE_PERF
+ rfbi.perf_bytes = w * h * 2; /* XXX always 16bit */
+#endif
+
+ dssdev->driver->setup_update(dssdev, x, y, w, h);
+
+ if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+ rfbi_transfer_area(w, h, NULL, NULL);
+ return 1;
+ } else {
+ struct omap_overlay *ovl;
+ void __iomem *addr;
+ int scr_width;
+
+ ovl = dssdev->manager->overlays[0];
+ scr_width = ovl->info.screen_width;
+ addr = ovl->info.vaddr;
+
+ omap_rfbi_write_pixels(addr, scr_width, x, y, w, h);
+
+ perf_show("L4");
+
+ return 0;
+ }
+}
+
+static void process_cmd_fifo(void)
+{
+ int len;
+ struct update_param p;
+ struct omap_dss_device *dssdev;
+ unsigned long flags;
+
+ if (atomic_inc_return(&rfbi.cmd_pending) != 1)
+ return;
+
+ while (true) {
+ spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
+
+ len = __kfifo_get(rfbi.cmd_fifo, (unsigned char *)&p,
+ sizeof(struct update_param));
+ if (len == 0) {
+ DSSDBG("nothing more in fifo\n");
+ atomic_set(&rfbi.cmd_pending, 0);
+ spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ break;
+ }
+
+ /* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/
+
+ spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+
+ BUG_ON(len != sizeof(struct update_param));
+ BUG_ON(p.rfbi_module > 1);
+
+ dssdev = rfbi.dssdev[p.rfbi_module];
+
+ if (p.cmd == RFBI_CMD_UPDATE) {
+ if (do_update(dssdev, &p.par.r))
+ break; /* async op */
+ } else if (p.cmd == RFBI_CMD_SYNC) {
+ DSSDBG("Signaling SYNC done!\n");
+ complete(p.par.sync);
+ } else
+ BUG();
+ }
+
+ signal_fifo_waiters();
+}
+
+static void rfbi_push_cmd(struct update_param *p)
+{
+ int ret;
+
+ while (1) {
+ unsigned long flags;
+ int available;
+
+ spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
+ available = RFBI_CMD_FIFO_LEN_BYTES -
+ __kfifo_len(rfbi.cmd_fifo);
+
+/* DSSDBG("%d bytes left in fifo\n", available); */
+ if (available < sizeof(struct update_param)) {
+ DSSDBG("Going to wait because FIFO FULL..\n");
+ spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ atomic_inc(&rfbi.cmd_fifo_full);
+ wait_for_completion(&rfbi.cmd_done);
+ /*DSSDBG("Woke up because fifo not full anymore\n");*/
+ continue;
+ }
+
+ ret = __kfifo_put(rfbi.cmd_fifo, (unsigned char *)p,
+ sizeof(struct update_param));
+/* DSSDBG("pushed %d bytes\n", ret);*/
+
+ spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+
+ BUG_ON(ret != sizeof(struct update_param));
+
+ break;
+ }
+}
+
+static void rfbi_push_update(int rfbi_module, int x, int y, int w, int h)
+{
+ struct update_param p;
+
+ p.rfbi_module = rfbi_module;
+ p.cmd = RFBI_CMD_UPDATE;
+
+ p.par.r.x = x;
+ p.par.r.y = y;
+ p.par.r.w = w;
+ p.par.r.h = h;
+
+ DSSDBG("RFBI pushed %d,%d %dx%d\n", x, y, w, h);
+
+ rfbi_push_cmd(&p);
+
+ process_cmd_fifo();
+}
+
+static void rfbi_push_sync(int rfbi_module, struct completion *sync_comp)
+{
+ struct update_param p;
+
+ p.rfbi_module = rfbi_module;
+ p.cmd = RFBI_CMD_SYNC;
+ p.par.sync = sync_comp;
+
+ rfbi_push_cmd(&p);
+
+ DSSDBG("RFBI sync pushed to cmd fifo\n");
+
+ process_cmd_fifo();
+}
+
+void rfbi_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ DUMPREG(RFBI_REVISION);
+ DUMPREG(RFBI_SYSCONFIG);
+ DUMPREG(RFBI_SYSSTATUS);
+ DUMPREG(RFBI_CONTROL);
+ DUMPREG(RFBI_PIXEL_CNT);
+ DUMPREG(RFBI_LINE_NUMBER);
+ DUMPREG(RFBI_CMD);
+ DUMPREG(RFBI_PARAM);
+ DUMPREG(RFBI_DATA);
+ DUMPREG(RFBI_READ);
+ DUMPREG(RFBI_STATUS);
+
+ DUMPREG(RFBI_CONFIG(0));
+ DUMPREG(RFBI_ONOFF_TIME(0));
+ DUMPREG(RFBI_CYCLE_TIME(0));
+ DUMPREG(RFBI_DATA_CYCLE1(0));
+ DUMPREG(RFBI_DATA_CYCLE2(0));
+ DUMPREG(RFBI_DATA_CYCLE3(0));
+
+ DUMPREG(RFBI_CONFIG(1));
+ DUMPREG(RFBI_ONOFF_TIME(1));
+ DUMPREG(RFBI_CYCLE_TIME(1));
+ DUMPREG(RFBI_DATA_CYCLE1(1));
+ DUMPREG(RFBI_DATA_CYCLE2(1));
+ DUMPREG(RFBI_DATA_CYCLE3(1));
+
+ DUMPREG(RFBI_VSYNC_WIDTH);
+ DUMPREG(RFBI_HSYNC_WIDTH);
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+#undef DUMPREG
+}
+
+int rfbi_init(void)
+{
+ u32 rev;
+ u32 l;
+
+ spin_lock_init(&rfbi.cmd_lock);
+ rfbi.cmd_fifo = kfifo_alloc(RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL,
+ &rfbi.cmd_lock);
+ if (IS_ERR(rfbi.cmd_fifo))
+ return -ENOMEM;
+
+ init_completion(&rfbi.cmd_done);
+ atomic_set(&rfbi.cmd_fifo_full, 0);
+ atomic_set(&rfbi.cmd_pending, 0);
+
+ rfbi.base = ioremap(RFBI_BASE, SZ_256);
+ if (!rfbi.base) {
+ DSSERR("can't ioremap RFBI\n");
+ return -ENOMEM;
+ }
+
+ rfbi_enable_clocks(1);
+
+ msleep(10);
+
+ rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+
+ /* Enable autoidle and smart-idle */
+ l = rfbi_read_reg(RFBI_SYSCONFIG);
+ l |= (1 << 0) | (2 << 3);
+ rfbi_write_reg(RFBI_SYSCONFIG, l);
+
+ rev = rfbi_read_reg(RFBI_REVISION);
+ printk(KERN_INFO "OMAP RFBI rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ rfbi_enable_clocks(0);
+
+ return 0;
+}
+
+void rfbi_exit(void)
+{
+ DSSDBG("rfbi_exit\n");
+
+ kfifo_free(rfbi.cmd_fifo);
+
+ iounmap(rfbi.base);
+}
+
+/* struct omap_display support */
+static int rfbi_display_update(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h)
+{
+ int rfbi_module;
+
+ if (w == 0 || h == 0)
+ return 0;
+
+ rfbi_module = rfbi_find_display(dssdev);
+
+ rfbi_push_update(rfbi_module, x, y, w, h);
+
+ return 0;
+}
+
+static int rfbi_display_sync(struct omap_dss_device *dssdev)
+{
+ struct completion sync_comp;
+ int rfbi_module;
+
+ rfbi_module = rfbi_find_display(dssdev);
+
+ init_completion(&sync_comp);
+ rfbi_push_sync(rfbi_module, &sync_comp);
+ DSSDBG("Waiting for SYNC to happen...\n");
+ wait_for_completion(&sync_comp);
+ DSSDBG("Released from SYNC\n");
+ return 0;
+}
+
+static int rfbi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
+{
+ dssdev->driver->enable_te(dssdev, enable);
+ return 0;
+}
+
+static int rfbi_display_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+ goto err0;
+ }
+
+ r = omap_dispc_register_isr(framedone_callback, NULL,
+ DISPC_IRQ_FRAMEDONE);
+ if (r) {
+ DSSERR("can't get FRAMEDONE irq\n");
+ goto err1;
+ }
+
+ dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+
+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI);
+
+ dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
+
+ rfbi_configure(dssdev->phy.rfbi.channel,
+ dssdev->ctrl.pixel_size,
+ dssdev->phy.rfbi.data_lines);
+
+ rfbi_set_timings(dssdev->phy.rfbi.channel,
+ &dssdev->ctrl.rfbi_timings);
+
+
+ if (dssdev->driver->enable) {
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ goto err2;
+ }
+
+ return 0;
+err2:
+ omap_dispc_unregister_isr(framedone_callback, NULL,
+ DISPC_IRQ_FRAMEDONE);
+err1:
+ omap_dss_stop_device(dssdev);
+err0:
+ return r;
+}
+
+static void rfbi_display_disable(struct omap_dss_device *dssdev)
+{
+ dssdev->driver->disable(dssdev);
+ omap_dispc_unregister_isr(framedone_callback, NULL,
+ DISPC_IRQ_FRAMEDONE);
+ omap_dss_stop_device(dssdev);
+}
+
+int rfbi_init_display(struct omap_dss_device *dssdev)
+{
+ dssdev->enable = rfbi_display_enable;
+ dssdev->disable = rfbi_display_disable;
+ dssdev->update = rfbi_display_update;
+ dssdev->sync = rfbi_display_sync;
+ dssdev->enable_te = rfbi_display_enable_te;
+
+ rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
+
+ dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
new file mode 100644
index 00000000000..c24f307d3da
--- /dev/null
+++ b/drivers/video/omap2/dss/sdi.c
@@ -0,0 +1,277 @@
+/*
+ * linux/drivers/video/omap2/dss/sdi.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "SDI"
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include <plat/display.h>
+#include "dss.h"
+
+static struct {
+ bool skip_init;
+ bool update_enabled;
+} sdi;
+
+static void sdi_basic_init(void)
+{
+ dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
+
+ dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
+ dispc_set_tft_data_lines(24);
+ dispc_lcd_enable_signal_polarity(1);
+}
+
+static int sdi_display_enable(struct omap_dss_device *dssdev)
+{
+ struct omap_video_timings *t = &dssdev->panel.timings;
+ struct dss_clock_info dss_cinfo;
+ struct dispc_clock_info dispc_cinfo;
+ u16 lck_div, pck_div;
+ unsigned long fck;
+ unsigned long pck;
+ int r;
+
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+ goto err0;
+ }
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ DSSERR("dssdev already enabled\n");
+ r = -EINVAL;
+ goto err1;
+ }
+
+ /* In case of skip_init sdi_init has already enabled the clocks */
+ if (!sdi.skip_init)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ sdi_basic_init();
+
+ /* 15.5.9.1.2 */
+ dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
+
+ dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
+ dssdev->panel.acb);
+
+ if (!sdi.skip_init) {
+ r = dss_calc_clock_div(1, t->pixel_clock * 1000,
+ &dss_cinfo, &dispc_cinfo);
+ } else {
+ r = dss_get_clock_div(&dss_cinfo);
+ r = dispc_get_clock_div(&dispc_cinfo);
+ }
+
+ if (r)
+ goto err2;
+
+ fck = dss_cinfo.fck;
+ lck_div = dispc_cinfo.lck_div;
+ pck_div = dispc_cinfo.pck_div;
+
+ pck = fck / lck_div / pck_div / 1000;
+
+ if (pck != t->pixel_clock) {
+ DSSWARN("Could not find exact pixel clock. Requested %d kHz, "
+ "got %lu kHz\n",
+ t->pixel_clock, pck);
+
+ t->pixel_clock = pck;
+ }
+
+
+ dispc_set_lcd_timings(t);
+
+ r = dss_set_clock_div(&dss_cinfo);
+ if (r)
+ goto err2;
+
+ r = dispc_set_clock_div(&dispc_cinfo);
+ if (r)
+ goto err2;
+
+ if (!sdi.skip_init) {
+ dss_sdi_init(dssdev->phy.sdi.datapairs);
+ r = dss_sdi_enable();
+ if (r)
+ goto err1;
+ mdelay(2);
+ }
+
+ dispc_enable_lcd_out(1);
+
+ if (dssdev->driver->enable) {
+ r = dssdev->driver->enable(dssdev);
+ if (r)
+ goto err3;
+ }
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ sdi.skip_init = 0;
+
+ return 0;
+err3:
+ dispc_enable_lcd_out(0);
+err2:
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+err1:
+ omap_dss_stop_device(dssdev);
+err0:
+ return r;
+}
+
+static int sdi_display_resume(struct omap_dss_device *dssdev);
+
+static void sdi_display_disable(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
+ return;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
+ if (sdi_display_resume(dssdev))
+ return;
+
+ if (dssdev->driver->disable)
+ dssdev->driver->disable(dssdev);
+
+ dispc_enable_lcd_out(0);
+
+ dss_sdi_disable();
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+
+ omap_dss_stop_device(dssdev);
+}
+
+static int sdi_display_suspend(struct omap_dss_device *dssdev)
+{
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return -EINVAL;
+
+ if (dssdev->driver->suspend)
+ dssdev->driver->suspend(dssdev);
+
+ dispc_enable_lcd_out(0);
+
+ dss_sdi_disable();
+
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
+ return 0;
+}
+
+static int sdi_display_resume(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
+ return -EINVAL;
+
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+
+ r = dss_sdi_enable();
+ if (r)
+ goto err;
+ mdelay(2);
+
+ dispc_enable_lcd_out(1);
+
+ if (dssdev->driver->resume)
+ dssdev->driver->resume(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+err:
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ return r;
+}
+
+static int sdi_display_set_update_mode(struct omap_dss_device *dssdev,
+ enum omap_dss_update_mode mode)
+{
+ if (mode == OMAP_DSS_UPDATE_MANUAL)
+ return -EINVAL;
+
+ if (mode == OMAP_DSS_UPDATE_DISABLED) {
+ dispc_enable_lcd_out(0);
+ sdi.update_enabled = 0;
+ } else {
+ dispc_enable_lcd_out(1);
+ sdi.update_enabled = 1;
+ }
+
+ return 0;
+}
+
+static enum omap_dss_update_mode sdi_display_get_update_mode(
+ struct omap_dss_device *dssdev)
+{
+ return sdi.update_enabled ? OMAP_DSS_UPDATE_AUTO :
+ OMAP_DSS_UPDATE_DISABLED;
+}
+
+static void sdi_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+int sdi_init_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("SDI init\n");
+
+ dssdev->enable = sdi_display_enable;
+ dssdev->disable = sdi_display_disable;
+ dssdev->suspend = sdi_display_suspend;
+ dssdev->resume = sdi_display_resume;
+ dssdev->set_update_mode = sdi_display_set_update_mode;
+ dssdev->get_update_mode = sdi_display_get_update_mode;
+ dssdev->get_timings = sdi_get_timings;
+
+ return 0;
+}
+
+int sdi_init(bool skip_init)
+{
+ /* we store this for first display enable, then clear it */
+ sdi.skip_init = skip_init;
+
+ /*
+ * Enable clocks already here, otherwise there would be a toggle
+ * of them until sdi_display_enable is called.
+ */
+ if (skip_init)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ return 0;
+}
+
+void sdi_exit(void)
+{
+}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
new file mode 100644
index 00000000000..749a5a0f5be
--- /dev/null
+++ b/drivers/video/omap2/dss/venc.c
@@ -0,0 +1,797 @@
+/*
+ * linux/drivers/video/omap2/dss/venc.c
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * VENC settings from TI's DSS driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "VENC"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <plat/display.h>
+#include <plat/cpu.h>
+
+#include "dss.h"
+
+#define VENC_BASE 0x48050C00
+
+/* Venc registers */
+#define VENC_REV_ID 0x00
+#define VENC_STATUS 0x04
+#define VENC_F_CONTROL 0x08
+#define VENC_VIDOUT_CTRL 0x10
+#define VENC_SYNC_CTRL 0x14
+#define VENC_LLEN 0x1C
+#define VENC_FLENS 0x20
+#define VENC_HFLTR_CTRL 0x24
+#define VENC_CC_CARR_WSS_CARR 0x28
+#define VENC_C_PHASE 0x2C
+#define VENC_GAIN_U 0x30
+#define VENC_GAIN_V 0x34
+#define VENC_GAIN_Y 0x38
+#define VENC_BLACK_LEVEL 0x3C
+#define VENC_BLANK_LEVEL 0x40
+#define VENC_X_COLOR 0x44
+#define VENC_M_CONTROL 0x48
+#define VENC_BSTAMP_WSS_DATA 0x4C
+#define VENC_S_CARR 0x50
+#define VENC_LINE21 0x54
+#define VENC_LN_SEL 0x58
+#define VENC_L21__WC_CTL 0x5C
+#define VENC_HTRIGGER_VTRIGGER 0x60
+#define VENC_SAVID__EAVID 0x64
+#define VENC_FLEN__FAL 0x68
+#define VENC_LAL__PHASE_RESET 0x6C
+#define VENC_HS_INT_START_STOP_X 0x70
+#define VENC_HS_EXT_START_STOP_X 0x74
+#define VENC_VS_INT_START_X 0x78
+#define VENC_VS_INT_STOP_X__VS_INT_START_Y 0x7C
+#define VENC_VS_INT_STOP_Y__VS_EXT_START_X 0x80
+#define VENC_VS_EXT_STOP_X__VS_EXT_START_Y 0x84
+#define VENC_VS_EXT_STOP_Y 0x88
+#define VENC_AVID_START_STOP_X 0x90
+#define VENC_AVID_START_STOP_Y 0x94
+#define VENC_FID_INT_START_X__FID_INT_START_Y 0xA0
+#define VENC_FID_INT_OFFSET_Y__FID_EXT_START_X 0xA4
+#define VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y 0xA8
+#define VENC_TVDETGP_INT_START_STOP_X 0xB0
+#define VENC_TVDETGP_INT_START_STOP_Y 0xB4
+#define VENC_GEN_CTRL 0xB8
+#define VENC_OUTPUT_CONTROL 0xC4
+#define VENC_OUTPUT_TEST 0xC8
+#define VENC_DAC_B__DAC_C 0xC8
+
+struct venc_config {
+ u32 f_control;
+ u32 vidout_ctrl;
+ u32 sync_ctrl;
+ u32 llen;
+ u32 flens;
+ u32 hfltr_ctrl;
+ u32 cc_carr_wss_carr;
+ u32 c_phase;
+ u32 gain_u;
+ u32 gain_v;
+ u32 gain_y;
+ u32 black_level;
+ u32 blank_level;
+ u32 x_color;
+ u32 m_control;
+ u32 bstamp_wss_data;
+ u32 s_carr;
+ u32 line21;
+ u32 ln_sel;
+ u32 l21__wc_ctl;
+ u32 htrigger_vtrigger;
+ u32 savid__eavid;
+ u32 flen__fal;
+ u32 lal__phase_reset;
+ u32 hs_int_start_stop_x;
+ u32 hs_ext_start_stop_x;
+ u32 vs_int_start_x;
+ u32 vs_int_stop_x__vs_int_start_y;
+ u32 vs_int_stop_y__vs_ext_start_x;
+ u32 vs_ext_stop_x__vs_ext_start_y;
+ u32 vs_ext_stop_y;
+ u32 avid_start_stop_x;
+ u32 avid_start_stop_y;
+ u32 fid_int_start_x__fid_int_start_y;
+ u32 fid_int_offset_y__fid_ext_start_x;
+ u32 fid_ext_start_y__fid_ext_offset_y;
+ u32 tvdetgp_int_start_stop_x;
+ u32 tvdetgp_int_start_stop_y;
+ u32 gen_ctrl;
+};
+
+/* from TRM */
+static const struct venc_config venc_config_pal_trm = {
+ .f_control = 0,
+ .vidout_ctrl = 1,
+ .sync_ctrl = 0x40,
+ .llen = 0x35F, /* 863 */
+ .flens = 0x270, /* 624 */
+ .hfltr_ctrl = 0,
+ .cc_carr_wss_carr = 0x2F7225ED,
+ .c_phase = 0,
+ .gain_u = 0x111,
+ .gain_v = 0x181,
+ .gain_y = 0x140,
+ .black_level = 0x3B,
+ .blank_level = 0x3B,
+ .x_color = 0x7,
+ .m_control = 0x2,
+ .bstamp_wss_data = 0x3F,
+ .s_carr = 0x2A098ACB,
+ .line21 = 0,
+ .ln_sel = 0x01290015,
+ .l21__wc_ctl = 0x0000F603,
+ .htrigger_vtrigger = 0,
+
+ .savid__eavid = 0x06A70108,
+ .flen__fal = 0x00180270,
+ .lal__phase_reset = 0x00040135,
+ .hs_int_start_stop_x = 0x00880358,
+ .hs_ext_start_stop_x = 0x000F035F,
+ .vs_int_start_x = 0x01A70000,
+ .vs_int_stop_x__vs_int_start_y = 0x000001A7,
+ .vs_int_stop_y__vs_ext_start_x = 0x01AF0000,
+ .vs_ext_stop_x__vs_ext_start_y = 0x000101AF,
+ .vs_ext_stop_y = 0x00000025,
+ .avid_start_stop_x = 0x03530083,
+ .avid_start_stop_y = 0x026C002E,
+ .fid_int_start_x__fid_int_start_y = 0x0001008A,
+ .fid_int_offset_y__fid_ext_start_x = 0x002E0138,
+ .fid_ext_start_y__fid_ext_offset_y = 0x01380001,
+
+ .tvdetgp_int_start_stop_x = 0x00140001,
+ .tvdetgp_int_start_stop_y = 0x00010001,
+ .gen_ctrl = 0x00FF0000,
+};
+
+/* from TRM */
+static const struct venc_config venc_config_ntsc_trm = {
+ .f_control = 0,
+ .vidout_ctrl = 1,
+ .sync_ctrl = 0x8040,
+ .llen = 0x359,
+ .flens = 0x20C,
+ .hfltr_ctrl = 0,
+ .cc_carr_wss_carr = 0x043F2631,
+ .c_phase = 0,
+ .gain_u = 0x102,
+ .gain_v = 0x16C,
+ .gain_y = 0x12F,
+ .black_level = 0x43,
+ .blank_level = 0x38,
+ .x_color = 0x7,
+ .m_control = 0x1,
+ .bstamp_wss_data = 0x38,
+ .s_carr = 0x21F07C1F,
+ .line21 = 0,
+ .ln_sel = 0x01310011,
+ .l21__wc_ctl = 0x0000F003,
+ .htrigger_vtrigger = 0,
+
+ .savid__eavid = 0x069300F4,
+ .flen__fal = 0x0016020C,
+ .lal__phase_reset = 0x00060107,
+ .hs_int_start_stop_x = 0x008E0350,
+ .hs_ext_start_stop_x = 0x000F0359,
+ .vs_int_start_x = 0x01A00000,
+ .vs_int_stop_x__vs_int_start_y = 0x020701A0,
+ .vs_int_stop_y__vs_ext_start_x = 0x01AC0024,
+ .vs_ext_stop_x__vs_ext_start_y = 0x020D01AC,
+ .vs_ext_stop_y = 0x00000006,
+ .avid_start_stop_x = 0x03480078,
+ .avid_start_stop_y = 0x02060024,
+ .fid_int_start_x__fid_int_start_y = 0x0001008A,
+ .fid_int_offset_y__fid_ext_start_x = 0x01AC0106,
+ .fid_ext_start_y__fid_ext_offset_y = 0x01060006,
+
+ .tvdetgp_int_start_stop_x = 0x00140001,
+ .tvdetgp_int_start_stop_y = 0x00010001,
+ .gen_ctrl = 0x00F90000,
+};
+
+static const struct venc_config venc_config_pal_bdghi = {
+ .f_control = 0,
+ .vidout_ctrl = 0,
+ .sync_ctrl = 0,
+ .hfltr_ctrl = 0,
+ .x_color = 0,
+ .line21 = 0,
+ .ln_sel = 21,
+ .htrigger_vtrigger = 0,
+ .tvdetgp_int_start_stop_x = 0x00140001,
+ .tvdetgp_int_start_stop_y = 0x00010001,
+ .gen_ctrl = 0x00FB0000,
+
+ .llen = 864-1,
+ .flens = 625-1,
+ .cc_carr_wss_carr = 0x2F7625ED,
+ .c_phase = 0xDF,
+ .gain_u = 0x111,
+ .gain_v = 0x181,
+ .gain_y = 0x140,
+ .black_level = 0x3e,
+ .blank_level = 0x3e,
+ .m_control = 0<<2 | 1<<1,
+ .bstamp_wss_data = 0x42,
+ .s_carr = 0x2a098acb,
+ .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0,
+ .savid__eavid = 0x06A70108,
+ .flen__fal = 23<<16 | 624<<0,
+ .lal__phase_reset = 2<<17 | 310<<0,
+ .hs_int_start_stop_x = 0x00920358,
+ .hs_ext_start_stop_x = 0x000F035F,
+ .vs_int_start_x = 0x1a7<<16,
+ .vs_int_stop_x__vs_int_start_y = 0x000601A7,
+ .vs_int_stop_y__vs_ext_start_x = 0x01AF0036,
+ .vs_ext_stop_x__vs_ext_start_y = 0x27101af,
+ .vs_ext_stop_y = 0x05,
+ .avid_start_stop_x = 0x03530082,
+ .avid_start_stop_y = 0x0270002E,
+ .fid_int_start_x__fid_int_start_y = 0x0005008A,
+ .fid_int_offset_y__fid_ext_start_x = 0x002E0138,
+ .fid_ext_start_y__fid_ext_offset_y = 0x01380005,
+};
+
+const struct omap_video_timings omap_dss_pal_timings = {
+ .x_res = 720,
+ .y_res = 574,
+ .pixel_clock = 13500,
+ .hsw = 64,
+ .hfp = 12,
+ .hbp = 68,
+ .vsw = 5,
+ .vfp = 5,
+ .vbp = 41,
+};
+EXPORT_SYMBOL(omap_dss_pal_timings);
+
+const struct omap_video_timings omap_dss_ntsc_timings = {
+ .x_res = 720,
+ .y_res = 482,
+ .pixel_clock = 13500,
+ .hsw = 64,
+ .hfp = 16,
+ .hbp = 58,
+ .vsw = 6,
+ .vfp = 6,
+ .vbp = 31,
+};
+EXPORT_SYMBOL(omap_dss_ntsc_timings);
+
+static struct {
+ void __iomem *base;
+ struct mutex venc_lock;
+ u32 wss_data;
+ struct regulator *vdda_dac_reg;
+} venc;
+
+static inline void venc_write_reg(int idx, u32 val)
+{
+ __raw_writel(val, venc.base + idx);
+}
+
+static inline u32 venc_read_reg(int idx)
+{
+ u32 l = __raw_readl(venc.base + idx);
+ return l;
+}
+
+static void venc_write_config(const struct venc_config *config)
+{
+ DSSDBG("write venc conf\n");
+
+ venc_write_reg(VENC_LLEN, config->llen);
+ venc_write_reg(VENC_FLENS, config->flens);
+ venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr);
+ venc_write_reg(VENC_C_PHASE, config->c_phase);
+ venc_write_reg(VENC_GAIN_U, config->gain_u);
+ venc_write_reg(VENC_GAIN_V, config->gain_v);
+ venc_write_reg(VENC_GAIN_Y, config->gain_y);
+ venc_write_reg(VENC_BLACK_LEVEL, config->black_level);
+ venc_write_reg(VENC_BLANK_LEVEL, config->blank_level);
+ venc_write_reg(VENC_M_CONTROL, config->m_control);
+ venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
+ venc.wss_data);
+ venc_write_reg(VENC_S_CARR, config->s_carr);
+ venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl);
+ venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid);
+ venc_write_reg(VENC_FLEN__FAL, config->flen__fal);
+ venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset);
+ venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x);
+ venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x);
+ venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x);
+ venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y,
+ config->vs_int_stop_x__vs_int_start_y);
+ venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X,
+ config->vs_int_stop_y__vs_ext_start_x);
+ venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y,
+ config->vs_ext_stop_x__vs_ext_start_y);
+ venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y);
+ venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x);
+ venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y);
+ venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y,
+ config->fid_int_start_x__fid_int_start_y);
+ venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X,
+ config->fid_int_offset_y__fid_ext_start_x);
+ venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y,
+ config->fid_ext_start_y__fid_ext_offset_y);
+
+ venc_write_reg(VENC_DAC_B__DAC_C, venc_read_reg(VENC_DAC_B__DAC_C));
+ venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl);
+ venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl);
+ venc_write_reg(VENC_X_COLOR, config->x_color);
+ venc_write_reg(VENC_LINE21, config->line21);
+ venc_write_reg(VENC_LN_SEL, config->ln_sel);
+ venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger);
+ venc_write_reg(VENC_TVDETGP_INT_START_STOP_X,
+ config->tvdetgp_int_start_stop_x);
+ venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y,
+ config->tvdetgp_int_start_stop_y);
+ venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl);
+ venc_write_reg(VENC_F_CONTROL, config->f_control);
+ venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl);
+}
+
+static void venc_reset(void)
+{
+ int t = 1000;
+
+ venc_write_reg(VENC_F_CONTROL, 1<<8);
+ while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) {
+ if (--t == 0) {
+ DSSERR("Failed to reset venc\n");
+ return;
+ }
+ }
+
+ /* the magical sleep that makes things work */
+ msleep(20);
+}
+
+static void venc_enable_clocks(int enable)
+{
+ if (enable)
+ dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
+ DSS_CLK_96M);
+ else
+ dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
+ DSS_CLK_96M);
+}
+
+static const struct venc_config *venc_timings_to_config(
+ struct omap_video_timings *timings)
+{
+ if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ return &venc_config_pal_trm;
+
+ if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ return &venc_config_ntsc_trm;
+
+ BUG();
+}
+
+
+
+
+
+/* driver */
+static int venc_panel_probe(struct omap_dss_device *dssdev)
+{
+ dssdev->panel.timings = omap_dss_pal_timings;
+
+ return 0;
+}
+
+static void venc_panel_remove(struct omap_dss_device *dssdev)
+{
+}
+
+static int venc_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ /* wait couple of vsyncs until enabling the LCD */
+ msleep(50);
+
+ if (dssdev->platform_enable)
+ r = dssdev->platform_enable(dssdev);
+
+ return r;
+}
+
+static void venc_panel_disable(struct omap_dss_device *dssdev)
+{
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ /* wait at least 5 vsyncs after disabling the LCD */
+
+ msleep(100);
+}
+
+static int venc_panel_suspend(struct omap_dss_device *dssdev)
+{
+ venc_panel_disable(dssdev);
+ return 0;
+}
+
+static int venc_panel_resume(struct omap_dss_device *dssdev)
+{
+ return venc_panel_enable(dssdev);
+}
+
+static struct omap_dss_driver venc_driver = {
+ .probe = venc_panel_probe,
+ .remove = venc_panel_remove,
+
+ .enable = venc_panel_enable,
+ .disable = venc_panel_disable,
+ .suspend = venc_panel_suspend,
+ .resume = venc_panel_resume,
+
+ .driver = {
+ .name = "venc",
+ .owner = THIS_MODULE,
+ },
+};
+/* driver end */
+
+
+
+int venc_init(struct platform_device *pdev)
+{
+ u8 rev_id;
+
+ mutex_init(&venc.venc_lock);
+
+ venc.wss_data = 0;
+
+ venc.base = ioremap(VENC_BASE, SZ_1K);
+ if (!venc.base) {
+ DSSERR("can't ioremap VENC\n");
+ return -ENOMEM;
+ }
+
+ venc.vdda_dac_reg = regulator_get(&pdev->dev, "vdda_dac");
+ if (IS_ERR(venc.vdda_dac_reg)) {
+ iounmap(venc.base);
+ DSSERR("can't get VDDA_DAC regulator\n");
+ return PTR_ERR(venc.vdda_dac_reg);
+ }
+
+ venc_enable_clocks(1);
+
+ rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
+ printk(KERN_INFO "OMAP VENC rev %d\n", rev_id);
+
+ venc_enable_clocks(0);
+
+ return omap_dss_register_driver(&venc_driver);
+}
+
+void venc_exit(void)
+{
+ omap_dss_unregister_driver(&venc_driver);
+
+ regulator_put(venc.vdda_dac_reg);
+
+ iounmap(venc.base);
+}
+
+static void venc_power_on(struct omap_dss_device *dssdev)
+{
+ u32 l;
+
+ venc_enable_clocks(1);
+
+ venc_reset();
+ venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
+
+ dss_set_venc_output(dssdev->phy.venc.type);
+ dss_set_dac_pwrdn_bgz(1);
+
+ l = 0;
+
+ if (dssdev->phy.venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE)
+ l |= 1 << 1;
+ else /* S-Video */
+ l |= (1 << 0) | (1 << 2);
+
+ if (dssdev->phy.venc.invert_polarity == false)
+ l |= 1 << 3;
+
+ venc_write_reg(VENC_OUTPUT_CONTROL, l);
+
+ dispc_set_digit_size(dssdev->panel.timings.x_res,
+ dssdev->panel.timings.y_res/2);
+
+ regulator_enable(venc.vdda_dac_reg);
+
+ if (dssdev->platform_enable)
+ dssdev->platform_enable(dssdev);
+
+ dispc_enable_digit_out(1);
+}
+
+static void venc_power_off(struct omap_dss_device *dssdev)
+{
+ venc_write_reg(VENC_OUTPUT_CONTROL, 0);
+ dss_set_dac_pwrdn_bgz(0);
+
+ dispc_enable_digit_out(0);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ regulator_disable(venc.vdda_dac_reg);
+
+ venc_enable_clocks(0);
+}
+
+static int venc_enable_display(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("venc_enable_display\n");
+
+ mutex_lock(&venc.venc_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ venc_power_on(dssdev);
+
+ venc.wss_data = 0;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+err:
+ mutex_unlock(&venc.venc_lock);
+
+ return r;
+}
+
+static void venc_disable_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("venc_disable_display\n");
+
+ mutex_lock(&venc.venc_lock);
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
+ goto end;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
+ /* suspended is the same as disabled with venc */
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ goto end;
+ }
+
+ venc_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+end:
+ mutex_unlock(&venc.venc_lock);
+}
+
+static int venc_display_suspend(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("venc_display_suspend\n");
+
+ mutex_lock(&venc.venc_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ venc_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+err:
+ mutex_unlock(&venc.venc_lock);
+
+ return r;
+}
+
+static int venc_display_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ DSSDBG("venc_display_resume\n");
+
+ mutex_lock(&venc.venc_lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ venc_power_on(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+err:
+ mutex_unlock(&venc.venc_lock);
+
+ return r;
+}
+
+static void venc_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+
+static void venc_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ DSSDBG("venc_set_timings\n");
+
+ /* Reset WSS data when the TV standard changes. */
+ if (memcmp(&dssdev->panel.timings, timings, sizeof(*timings)))
+ venc.wss_data = 0;
+
+ dssdev->panel.timings = *timings;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ /* turn the venc off and on to get new timings to use */
+ venc_disable_display(dssdev);
+ venc_enable_display(dssdev);
+ }
+}
+
+static int venc_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ DSSDBG("venc_check_timings\n");
+
+ if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
+ return 0;
+
+ if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+static u32 venc_get_wss(struct omap_dss_device *dssdev)
+{
+ /* Invert due to VENC_L21_WC_CTL:INV=1 */
+ return (venc.wss_data >> 8) ^ 0xfffff;
+}
+
+static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
+{
+ const struct venc_config *config;
+
+ DSSDBG("venc_set_wss\n");
+
+ mutex_lock(&venc.venc_lock);
+
+ config = venc_timings_to_config(&dssdev->panel.timings);
+
+ /* Invert due to VENC_L21_WC_CTL:INV=1 */
+ venc.wss_data = (wss ^ 0xfffff) << 8;
+
+ venc_enable_clocks(1);
+
+ venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
+ venc.wss_data);
+
+ venc_enable_clocks(0);
+
+ mutex_unlock(&venc.venc_lock);
+
+ return 0;
+}
+
+static enum omap_dss_update_mode venc_display_get_update_mode(
+ struct omap_dss_device *dssdev)
+{
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return OMAP_DSS_UPDATE_AUTO;
+ else
+ return OMAP_DSS_UPDATE_DISABLED;
+}
+
+int venc_init_display(struct omap_dss_device *dssdev)
+{
+ DSSDBG("init_display\n");
+
+ dssdev->enable = venc_enable_display;
+ dssdev->disable = venc_disable_display;
+ dssdev->suspend = venc_display_suspend;
+ dssdev->resume = venc_display_resume;
+ dssdev->get_timings = venc_get_timings;
+ dssdev->set_timings = venc_set_timings;
+ dssdev->check_timings = venc_check_timings;
+ dssdev->get_wss = venc_get_wss;
+ dssdev->set_wss = venc_set_wss;
+ dssdev->get_update_mode = venc_display_get_update_mode;
+
+ return 0;
+}
+
+void venc_dump_regs(struct seq_file *s)
+{
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
+
+ venc_enable_clocks(1);
+
+ DUMPREG(VENC_F_CONTROL);
+ DUMPREG(VENC_VIDOUT_CTRL);
+ DUMPREG(VENC_SYNC_CTRL);
+ DUMPREG(VENC_LLEN);
+ DUMPREG(VENC_FLENS);
+ DUMPREG(VENC_HFLTR_CTRL);
+ DUMPREG(VENC_CC_CARR_WSS_CARR);
+ DUMPREG(VENC_C_PHASE);
+ DUMPREG(VENC_GAIN_U);
+ DUMPREG(VENC_GAIN_V);
+ DUMPREG(VENC_GAIN_Y);
+ DUMPREG(VENC_BLACK_LEVEL);
+ DUMPREG(VENC_BLANK_LEVEL);
+ DUMPREG(VENC_X_COLOR);
+ DUMPREG(VENC_M_CONTROL);
+ DUMPREG(VENC_BSTAMP_WSS_DATA);
+ DUMPREG(VENC_S_CARR);
+ DUMPREG(VENC_LINE21);
+ DUMPREG(VENC_LN_SEL);
+ DUMPREG(VENC_L21__WC_CTL);
+ DUMPREG(VENC_HTRIGGER_VTRIGGER);
+ DUMPREG(VENC_SAVID__EAVID);
+ DUMPREG(VENC_FLEN__FAL);
+ DUMPREG(VENC_LAL__PHASE_RESET);
+ DUMPREG(VENC_HS_INT_START_STOP_X);
+ DUMPREG(VENC_HS_EXT_START_STOP_X);
+ DUMPREG(VENC_VS_INT_START_X);
+ DUMPREG(VENC_VS_INT_STOP_X__VS_INT_START_Y);
+ DUMPREG(VENC_VS_INT_STOP_Y__VS_EXT_START_X);
+ DUMPREG(VENC_VS_EXT_STOP_X__VS_EXT_START_Y);
+ DUMPREG(VENC_VS_EXT_STOP_Y);
+ DUMPREG(VENC_AVID_START_STOP_X);
+ DUMPREG(VENC_AVID_START_STOP_Y);
+ DUMPREG(VENC_FID_INT_START_X__FID_INT_START_Y);
+ DUMPREG(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X);
+ DUMPREG(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y);
+ DUMPREG(VENC_TVDETGP_INT_START_STOP_X);
+ DUMPREG(VENC_TVDETGP_INT_START_STOP_Y);
+ DUMPREG(VENC_GEN_CTRL);
+ DUMPREG(VENC_OUTPUT_CONTROL);
+ DUMPREG(VENC_OUTPUT_TEST);
+
+ venc_enable_clocks(0);
+
+#undef DUMPREG
+}
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
new file mode 100644
index 00000000000..bb694cc52a5
--- /dev/null
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -0,0 +1,37 @@
+menuconfig FB_OMAP2
+ tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)"
+ depends on FB && OMAP2_DSS
+
+ select OMAP2_VRAM
+ select OMAP2_VRFB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Frame buffer driver for OMAP2/3 based boards.
+
+config FB_OMAP2_DEBUG_SUPPORT
+ bool "Debug support for OMAP2/3 FB"
+ default y
+ depends on FB_OMAP2
+ help
+ Support for debug output. You have to enable the actual printing
+ with debug module parameter.
+
+config FB_OMAP2_FORCE_AUTO_UPDATE
+ bool "Force main display to automatic update mode"
+ depends on FB_OMAP2
+ help
+ Forces main display to automatic update mode (if possible),
+ and also enables tearsync (if possible). By default
+ displays that support manual update are started in manual
+ update mode.
+
+config FB_OMAP2_NUM_FBS
+ int "Number of framebuffers"
+ range 1 10
+ default 3
+ depends on FB_OMAP2
+ help
+ Select the number of framebuffers created. OMAP2/3 has 3 overlays
+ so normally this would be 3.
diff --git a/drivers/video/omap2/omapfb/Makefile b/drivers/video/omap2/omapfb/Makefile
new file mode 100644
index 00000000000..51c2e00d9bf
--- /dev/null
+++ b/drivers/video/omap2/omapfb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_FB_OMAP2) += omapfb.o
+omapfb-y := omapfb-main.o omapfb-sysfs.o omapfb-ioctl.o
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
new file mode 100644
index 00000000000..4c4bafdfaa4
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -0,0 +1,755 @@
+/*
+ * linux/drivers/video/omap2/omapfb-ioctl.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fb.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/omapfb.h>
+#include <linux/vmalloc.h>
+
+#include <plat/display.h>
+#include <plat/vrfb.h>
+#include <plat/vram.h>
+
+#include "omapfb.h"
+
+static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_overlay *ovl;
+ struct omap_overlay_info info;
+ int r = 0;
+
+ DBG("omapfb_setup_plane\n");
+
+ if (ofbi->num_overlays != 1) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* XXX uses only the first overlay */
+ ovl = ofbi->overlays[0];
+
+ if (pi->enabled && !ofbi->region.size) {
+ /*
+ * This plane's memory was freed, can't enable it
+ * until it's reallocated.
+ */
+ r = -EINVAL;
+ goto out;
+ }
+
+ ovl->get_overlay_info(ovl, &info);
+
+ info.pos_x = pi->pos_x;
+ info.pos_y = pi->pos_y;
+ info.out_width = pi->out_width;
+ info.out_height = pi->out_height;
+ info.enabled = pi->enabled;
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r)
+ goto out;
+
+ if (ovl->manager) {
+ r = ovl->manager->apply(ovl->manager);
+ if (r)
+ goto out;
+ }
+
+out:
+ if (r)
+ dev_err(fbdev->dev, "setup_plane failed\n");
+ return r;
+}
+
+static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ if (ofbi->num_overlays != 1) {
+ memset(pi, 0, sizeof(*pi));
+ } else {
+ struct omap_overlay_info *ovli;
+ struct omap_overlay *ovl;
+
+ ovl = ofbi->overlays[0];
+ ovli = &ovl->info;
+
+ pi->pos_x = ovli->pos_x;
+ pi->pos_y = ovli->pos_y;
+ pi->enabled = ovli->enabled;
+ pi->channel_out = 0; /* xxx */
+ pi->mirror = 0;
+ pi->out_width = ovli->out_width;
+ pi->out_height = ovli->out_height;
+ }
+
+ return 0;
+}
+
+static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb2_mem_region *rg;
+ int r, i;
+ size_t size;
+
+ if (mi->type > OMAPFB_MEMTYPE_MAX)
+ return -EINVAL;
+
+ size = PAGE_ALIGN(mi->size);
+
+ rg = &ofbi->region;
+
+ for (i = 0; i < ofbi->num_overlays; i++) {
+ if (ofbi->overlays[i]->info.enabled)
+ return -EBUSY;
+ }
+
+ if (rg->size != size || rg->type != mi->type) {
+ r = omapfb_realloc_fbmem(fbi, size, mi->type);
+ if (r) {
+ dev_err(fbdev->dev, "realloc fbmem failed\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_mem_region *rg;
+
+ rg = &ofbi->region;
+ memset(mi, 0, sizeof(*mi));
+
+ mi->size = rg->size;
+ mi->type = rg->type;
+
+ return 0;
+}
+
+static int omapfb_update_window_nolock(struct fb_info *fbi,
+ u32 x, u32 y, u32 w, u32 h)
+{
+ struct omap_dss_device *display = fb2display(fbi);
+ u16 dw, dh;
+
+ if (!display)
+ return 0;
+
+ if (w == 0 || h == 0)
+ return 0;
+
+ display->get_resolution(display, &dw, &dh);
+
+ if (x + w > dw || y + h > dh)
+ return -EINVAL;
+
+ return display->update(display, x, y, w, h);
+}
+
+/* This function is exported for SGX driver use */
+int omapfb_update_window(struct fb_info *fbi,
+ u32 x, u32 y, u32 w, u32 h)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ int r;
+
+ omapfb_lock(fbdev);
+ lock_fb_info(fbi);
+
+ r = omapfb_update_window_nolock(fbi, x, y, w, h);
+
+ unlock_fb_info(fbi);
+ omapfb_unlock(fbdev);
+
+ return r;
+}
+EXPORT_SYMBOL(omapfb_update_window);
+
+static int omapfb_set_update_mode(struct fb_info *fbi,
+ enum omapfb_update_mode mode)
+{
+ struct omap_dss_device *display = fb2display(fbi);
+ enum omap_dss_update_mode um;
+ int r;
+
+ if (!display || !display->set_update_mode)
+ return -EINVAL;
+
+ switch (mode) {
+ case OMAPFB_UPDATE_DISABLED:
+ um = OMAP_DSS_UPDATE_DISABLED;
+ break;
+
+ case OMAPFB_AUTO_UPDATE:
+ um = OMAP_DSS_UPDATE_AUTO;
+ break;
+
+ case OMAPFB_MANUAL_UPDATE:
+ um = OMAP_DSS_UPDATE_MANUAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ r = display->set_update_mode(display, um);
+
+ return r;
+}
+
+static int omapfb_get_update_mode(struct fb_info *fbi,
+ enum omapfb_update_mode *mode)
+{
+ struct omap_dss_device *display = fb2display(fbi);
+ enum omap_dss_update_mode m;
+
+ if (!display || !display->get_update_mode)
+ return -EINVAL;
+
+ m = display->get_update_mode(display);
+
+ switch (m) {
+ case OMAP_DSS_UPDATE_DISABLED:
+ *mode = OMAPFB_UPDATE_DISABLED;
+ break;
+ case OMAP_DSS_UPDATE_AUTO:
+ *mode = OMAPFB_AUTO_UPDATE;
+ break;
+ case OMAP_DSS_UPDATE_MANUAL:
+ *mode = OMAPFB_MANUAL_UPDATE;
+ break;
+ default:
+ BUG();
+ }
+
+ return 0;
+}
+
+/* XXX this color key handling is a hack... */
+static struct omapfb_color_key omapfb_color_keys[2];
+
+static int _omapfb_set_color_key(struct omap_overlay_manager *mgr,
+ struct omapfb_color_key *ck)
+{
+ struct omap_overlay_manager_info info;
+ enum omap_dss_trans_key_type kt;
+ int r;
+
+ mgr->get_manager_info(mgr, &info);
+
+ if (ck->key_type == OMAPFB_COLOR_KEY_DISABLED) {
+ info.trans_enabled = false;
+ omapfb_color_keys[mgr->id] = *ck;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+
+ return r;
+ }
+
+ switch (ck->key_type) {
+ case OMAPFB_COLOR_KEY_GFX_DST:
+ kt = OMAP_DSS_COLOR_KEY_GFX_DST;
+ break;
+ case OMAPFB_COLOR_KEY_VID_SRC:
+ kt = OMAP_DSS_COLOR_KEY_VID_SRC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ info.default_color = ck->background;
+ info.trans_key = ck->trans_key;
+ info.trans_key_type = kt;
+ info.trans_enabled = true;
+
+ omapfb_color_keys[mgr->id] = *ck;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+
+ return r;
+}
+
+static int omapfb_set_color_key(struct fb_info *fbi,
+ struct omapfb_color_key *ck)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ int r;
+ int i;
+ struct omap_overlay_manager *mgr = NULL;
+
+ omapfb_lock(fbdev);
+
+ for (i = 0; i < ofbi->num_overlays; i++) {
+ if (ofbi->overlays[i]->manager) {
+ mgr = ofbi->overlays[i]->manager;
+ break;
+ }
+ }
+
+ if (!mgr) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = _omapfb_set_color_key(mgr, ck);
+err:
+ omapfb_unlock(fbdev);
+
+ return r;
+}
+
+static int omapfb_get_color_key(struct fb_info *fbi,
+ struct omapfb_color_key *ck)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_overlay_manager *mgr = NULL;
+ int r = 0;
+ int i;
+
+ omapfb_lock(fbdev);
+
+ for (i = 0; i < ofbi->num_overlays; i++) {
+ if (ofbi->overlays[i]->manager) {
+ mgr = ofbi->overlays[i]->manager;
+ break;
+ }
+ }
+
+ if (!mgr) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ *ck = omapfb_color_keys[mgr->id];
+err:
+ omapfb_unlock(fbdev);
+
+ return r;
+}
+
+static int omapfb_memory_read(struct fb_info *fbi,
+ struct omapfb_memory_read *mr)
+{
+ struct omap_dss_device *display = fb2display(fbi);
+ void *buf;
+ int r;
+
+ if (!display || !display->memory_read)
+ return -ENOENT;
+
+ if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
+ return -EFAULT;
+
+ if (mr->w * mr->h * 3 > mr->buffer_size)
+ return -EINVAL;
+
+ buf = vmalloc(mr->buffer_size);
+ if (!buf) {
+ DBG("vmalloc failed\n");
+ return -ENOMEM;
+ }
+
+ r = display->memory_read(display, buf, mr->buffer_size,
+ mr->x, mr->y, mr->w, mr->h);
+
+ if (r > 0) {
+ if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+ r = -EFAULT;
+ }
+
+ vfree(buf);
+
+ return r;
+}
+
+static int omapfb_get_ovl_colormode(struct omapfb2_device *fbdev,
+ struct omapfb_ovl_colormode *mode)
+{
+ int ovl_idx = mode->overlay_idx;
+ int mode_idx = mode->mode_idx;
+ struct omap_overlay *ovl;
+ enum omap_color_mode supported_modes;
+ struct fb_var_screeninfo var;
+ int i;
+
+ if (ovl_idx >= fbdev->num_overlays)
+ return -ENODEV;
+ ovl = fbdev->overlays[ovl_idx];
+ supported_modes = ovl->supported_modes;
+
+ mode_idx = mode->mode_idx;
+
+ for (i = 0; i < sizeof(supported_modes) * 8; i++) {
+ if (!(supported_modes & (1 << i)))
+ continue;
+ /*
+ * It's possible that the FB doesn't support a mode
+ * that is supported by the overlay, so call the
+ * following here.
+ */
+ if (dss_mode_to_fb_mode(1 << i, &var) < 0)
+ continue;
+
+ mode_idx--;
+ if (mode_idx < 0)
+ break;
+ }
+
+ if (i == sizeof(supported_modes) * 8)
+ return -ENOENT;
+
+ mode->bits_per_pixel = var.bits_per_pixel;
+ mode->nonstd = var.nonstd;
+ mode->red = var.red;
+ mode->green = var.green;
+ mode->blue = var.blue;
+ mode->transp = var.transp;
+
+ return 0;
+}
+
+static int omapfb_wait_for_go(struct fb_info *fbi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ int r = 0;
+ int i;
+
+ for (i = 0; i < ofbi->num_overlays; ++i) {
+ struct omap_overlay *ovl = ofbi->overlays[i];
+ r = ovl->wait_for_go(ovl);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_dss_device *display = fb2display(fbi);
+
+ union {
+ struct omapfb_update_window_old uwnd_o;
+ struct omapfb_update_window uwnd;
+ struct omapfb_plane_info plane_info;
+ struct omapfb_caps caps;
+ struct omapfb_mem_info mem_info;
+ struct omapfb_color_key color_key;
+ struct omapfb_ovl_colormode ovl_colormode;
+ enum omapfb_update_mode update_mode;
+ int test_num;
+ struct omapfb_memory_read memory_read;
+ struct omapfb_vram_info vram_info;
+ struct omapfb_tearsync_info tearsync_info;
+ } p;
+
+ int r = 0;
+
+ switch (cmd) {
+ case OMAPFB_SYNC_GFX:
+ DBG("ioctl SYNC_GFX\n");
+ if (!display || !display->sync) {
+ /* DSS1 never returns an error here, so we neither */
+ /*r = -EINVAL;*/
+ break;
+ }
+
+ r = display->sync(display);
+ break;
+
+ case OMAPFB_UPDATE_WINDOW_OLD:
+ DBG("ioctl UPDATE_WINDOW_OLD\n");
+ if (!display || !display->update) {
+ r = -EINVAL;
+ break;
+ }
+
+ if (copy_from_user(&p.uwnd_o,
+ (void __user *)arg,
+ sizeof(p.uwnd_o))) {
+ r = -EFAULT;
+ break;
+ }
+
+ r = omapfb_update_window_nolock(fbi, p.uwnd_o.x, p.uwnd_o.y,
+ p.uwnd_o.width, p.uwnd_o.height);
+ break;
+
+ case OMAPFB_UPDATE_WINDOW:
+ DBG("ioctl UPDATE_WINDOW\n");
+ if (!display || !display->update) {
+ r = -EINVAL;
+ break;
+ }
+
+ if (copy_from_user(&p.uwnd, (void __user *)arg,
+ sizeof(p.uwnd))) {
+ r = -EFAULT;
+ break;
+ }
+
+ r = omapfb_update_window_nolock(fbi, p.uwnd.x, p.uwnd.y,
+ p.uwnd.width, p.uwnd.height);
+ break;
+
+ case OMAPFB_SETUP_PLANE:
+ DBG("ioctl SETUP_PLANE\n");
+ if (copy_from_user(&p.plane_info, (void __user *)arg,
+ sizeof(p.plane_info)))
+ r = -EFAULT;
+ else
+ r = omapfb_setup_plane(fbi, &p.plane_info);
+ break;
+
+ case OMAPFB_QUERY_PLANE:
+ DBG("ioctl QUERY_PLANE\n");
+ r = omapfb_query_plane(fbi, &p.plane_info);
+ if (r < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.plane_info,
+ sizeof(p.plane_info)))
+ r = -EFAULT;
+ break;
+
+ case OMAPFB_SETUP_MEM:
+ DBG("ioctl SETUP_MEM\n");
+ if (copy_from_user(&p.mem_info, (void __user *)arg,
+ sizeof(p.mem_info)))
+ r = -EFAULT;
+ else
+ r = omapfb_setup_mem(fbi, &p.mem_info);
+ break;
+
+ case OMAPFB_QUERY_MEM:
+ DBG("ioctl QUERY_MEM\n");
+ r = omapfb_query_mem(fbi, &p.mem_info);
+ if (r < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.mem_info,
+ sizeof(p.mem_info)))
+ r = -EFAULT;
+ break;
+
+ case OMAPFB_GET_CAPS:
+ DBG("ioctl GET_CAPS\n");
+ if (!display) {
+ r = -EINVAL;
+ break;
+ }
+
+ memset(&p.caps, 0, sizeof(p.caps));
+ if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ p.caps.ctrl |= OMAPFB_CAPS_MANUAL_UPDATE;
+ if (display->caps & OMAP_DSS_DISPLAY_CAP_TEAR_ELIM)
+ p.caps.ctrl |= OMAPFB_CAPS_TEARSYNC;
+
+ if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
+ r = -EFAULT;
+ break;
+
+ case OMAPFB_GET_OVERLAY_COLORMODE:
+ DBG("ioctl GET_OVERLAY_COLORMODE\n");
+ if (copy_from_user(&p.ovl_colormode, (void __user *)arg,
+ sizeof(p.ovl_colormode))) {
+ r = -EFAULT;
+ break;
+ }
+ r = omapfb_get_ovl_colormode(fbdev, &p.ovl_colormode);
+ if (r < 0)
+ break;
+ if (copy_to_user((void __user *)arg, &p.ovl_colormode,
+ sizeof(p.ovl_colormode)))
+ r = -EFAULT;
+ break;
+
+ case OMAPFB_SET_UPDATE_MODE:
+ DBG("ioctl SET_UPDATE_MODE\n");
+ if (get_user(p.update_mode, (int __user *)arg))
+ r = -EFAULT;
+ else
+ r = omapfb_set_update_mode(fbi, p.update_mode);
+ break;
+
+ case OMAPFB_GET_UPDATE_MODE:
+ DBG("ioctl GET_UPDATE_MODE\n");
+ r = omapfb_get_update_mode(fbi, &p.update_mode);
+ if (r)
+ break;
+ if (put_user(p.update_mode,
+ (enum omapfb_update_mode __user *)arg))
+ r = -EFAULT;
+ break;
+
+ case OMAPFB_SET_COLOR_KEY:
+ DBG("ioctl SET_COLOR_KEY\n");
+ if (copy_from_user(&p.color_key, (void __user *)arg,
+ sizeof(p.color_key)))
+ r = -EFAULT;
+ else
+ r = omapfb_set_color_key(fbi, &p.color_key);
+ break;
+
+ case OMAPFB_GET_COLOR_KEY:
+ DBG("ioctl GET_COLOR_KEY\n");
+ r = omapfb_get_color_key(fbi, &p.color_key);
+ if (r)
+ break;
+ if (copy_to_user((void __user *)arg, &p.color_key,
+ sizeof(p.color_key)))
+ r = -EFAULT;
+ break;
+
+ case OMAPFB_WAITFORVSYNC:
+ DBG("ioctl WAITFORVSYNC\n");
+ if (!display) {
+ r = -EINVAL;
+ break;
+ }
+
+ r = display->wait_vsync(display);
+ break;
+
+ case OMAPFB_WAITFORGO:
+ DBG("ioctl WAITFORGO\n");
+ if (!display) {
+ r = -EINVAL;
+ break;
+ }
+
+ r = omapfb_wait_for_go(fbi);
+ break;
+
+ /* LCD and CTRL tests do the same thing for backward
+ * compatibility */
+ case OMAPFB_LCD_TEST:
+ DBG("ioctl LCD_TEST\n");
+ if (get_user(p.test_num, (int __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+ if (!display || !display->run_test) {
+ r = -EINVAL;
+ break;
+ }
+
+ r = display->run_test(display, p.test_num);
+
+ break;
+
+ case OMAPFB_CTRL_TEST:
+ DBG("ioctl CTRL_TEST\n");
+ if (get_user(p.test_num, (int __user *)arg)) {
+ r = -EFAULT;
+ break;
+ }
+ if (!display || !display->run_test) {
+ r = -EINVAL;
+ break;
+ }
+
+ r = display->run_test(display, p.test_num);
+
+ break;
+
+ case OMAPFB_MEMORY_READ:
+ DBG("ioctl MEMORY_READ\n");
+
+ if (copy_from_user(&p.memory_read, (void __user *)arg,
+ sizeof(p.memory_read))) {
+ r = -EFAULT;
+ break;
+ }
+
+ r = omapfb_memory_read(fbi, &p.memory_read);
+
+ break;
+
+ case OMAPFB_GET_VRAM_INFO: {
+ unsigned long vram, free, largest;
+
+ DBG("ioctl GET_VRAM_INFO\n");
+
+ omap_vram_get_info(&vram, &free, &largest);
+ p.vram_info.total = vram;
+ p.vram_info.free = free;
+ p.vram_info.largest_free_block = largest;
+
+ if (copy_to_user((void __user *)arg, &p.vram_info,
+ sizeof(p.vram_info)))
+ r = -EFAULT;
+ break;
+ }
+
+ case OMAPFB_SET_TEARSYNC: {
+ DBG("ioctl SET_TEARSYNC\n");
+
+ if (copy_from_user(&p.tearsync_info, (void __user *)arg,
+ sizeof(p.tearsync_info))) {
+ r = -EFAULT;
+ break;
+ }
+
+ if (!display->enable_te) {
+ r = -ENODEV;
+ break;
+ }
+
+ r = display->enable_te(display, !!p.tearsync_info.enabled);
+
+ break;
+ }
+
+ default:
+ dev_err(fbdev->dev, "Unknown ioctl 0x%x\n", cmd);
+ r = -EINVAL;
+ }
+
+ if (r < 0)
+ DBG("ioctl failed: %d\n", r);
+
+ return r;
+}
+
+
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
new file mode 100644
index 00000000000..ef299839858
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -0,0 +1,2261 @@
+/*
+ * linux/drivers/video/omap2/omapfb-main.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/omapfb.h>
+
+#include <plat/display.h>
+#include <plat/vram.h>
+#include <plat/vrfb.h>
+
+#include "omapfb.h"
+
+#define MODULE_NAME "omapfb"
+
+#define OMAPFB_PLANE_XRES_MIN 8
+#define OMAPFB_PLANE_YRES_MIN 8
+
+static char *def_mode;
+static char *def_vram;
+static int def_vrfb;
+static int def_rotate;
+static int def_mirror;
+
+#ifdef DEBUG
+unsigned int omapfb_debug;
+module_param_named(debug, omapfb_debug, bool, 0644);
+static unsigned int omapfb_test_pattern;
+module_param_named(test, omapfb_test_pattern, bool, 0644);
+#endif
+
+static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi);
+
+#ifdef DEBUG
+static void draw_pixel(struct fb_info *fbi, int x, int y, unsigned color)
+{
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ void __iomem *addr = fbi->screen_base;
+ const unsigned bytespp = var->bits_per_pixel >> 3;
+ const unsigned line_len = fix->line_length / bytespp;
+
+ int r = (color >> 16) & 0xff;
+ int g = (color >> 8) & 0xff;
+ int b = (color >> 0) & 0xff;
+
+ if (var->bits_per_pixel == 16) {
+ u16 __iomem *p = (u16 __iomem *)addr;
+ p += y * line_len + x;
+
+ r = r * 32 / 256;
+ g = g * 64 / 256;
+ b = b * 32 / 256;
+
+ __raw_writew((r << 11) | (g << 5) | (b << 0), p);
+ } else if (var->bits_per_pixel == 24) {
+ u8 __iomem *p = (u8 __iomem *)addr;
+ p += (y * line_len + x) * 3;
+
+ __raw_writeb(b, p + 0);
+ __raw_writeb(g, p + 1);
+ __raw_writeb(r, p + 2);
+ } else if (var->bits_per_pixel == 32) {
+ u32 __iomem *p = (u32 __iomem *)addr;
+ p += y * line_len + x;
+ __raw_writel(color, p);
+ }
+}
+
+static void fill_fb(struct fb_info *fbi)
+{
+ struct fb_var_screeninfo *var = &fbi->var;
+ const short w = var->xres_virtual;
+ const short h = var->yres_virtual;
+ void __iomem *addr = fbi->screen_base;
+ int y, x;
+
+ if (!addr)
+ return;
+
+ DBG("fill_fb %dx%d, line_len %d bytes\n", w, h, fbi->fix.line_length);
+
+ for (y = 0; y < h; y++) {
+ for (x = 0; x < w; x++) {
+ if (x < 20 && y < 20)
+ draw_pixel(fbi, x, y, 0xffffff);
+ else if (x < 20 && (y > 20 && y < h - 20))
+ draw_pixel(fbi, x, y, 0xff);
+ else if (y < 20 && (x > 20 && x < w - 20))
+ draw_pixel(fbi, x, y, 0xff00);
+ else if (x > w - 20 && (y > 20 && y < h - 20))
+ draw_pixel(fbi, x, y, 0xff0000);
+ else if (y > h - 20 && (x > 20 && x < w - 20))
+ draw_pixel(fbi, x, y, 0xffff00);
+ else if (x == 20 || x == w - 20 ||
+ y == 20 || y == h - 20)
+ draw_pixel(fbi, x, y, 0xffffff);
+ else if (x == y || w - x == h - y)
+ draw_pixel(fbi, x, y, 0xff00ff);
+ else if (w - x == y || x == h - y)
+ draw_pixel(fbi, x, y, 0x00ffff);
+ else if (x > 20 && y > 20 && x < w - 20 && y < h - 20) {
+ int t = x * 3 / w;
+ unsigned r = 0, g = 0, b = 0;
+ unsigned c;
+ if (var->bits_per_pixel == 16) {
+ if (t == 0)
+ b = (y % 32) * 256 / 32;
+ else if (t == 1)
+ g = (y % 64) * 256 / 64;
+ else if (t == 2)
+ r = (y % 32) * 256 / 32;
+ } else {
+ if (t == 0)
+ b = (y % 256);
+ else if (t == 1)
+ g = (y % 256);
+ else if (t == 2)
+ r = (y % 256);
+ }
+ c = (r << 16) | (g << 8) | (b << 0);
+ draw_pixel(fbi, x, y, c);
+ } else {
+ draw_pixel(fbi, x, y, 0);
+ }
+ }
+ }
+}
+#endif
+
+static unsigned omapfb_get_vrfb_offset(struct omapfb_info *ofbi, int rot)
+{
+ struct vrfb *vrfb = &ofbi->region.vrfb;
+ unsigned offset;
+
+ switch (rot) {
+ case FB_ROTATE_UR:
+ offset = 0;
+ break;
+ case FB_ROTATE_CW:
+ offset = vrfb->yoffset;
+ break;
+ case FB_ROTATE_UD:
+ offset = vrfb->yoffset * OMAP_VRFB_LINE_LEN + vrfb->xoffset;
+ break;
+ case FB_ROTATE_CCW:
+ offset = vrfb->xoffset * OMAP_VRFB_LINE_LEN;
+ break;
+ default:
+ BUG();
+ }
+
+ offset *= vrfb->bytespp;
+
+ return offset;
+}
+
+static u32 omapfb_get_region_rot_paddr(struct omapfb_info *ofbi, int rot)
+{
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ return ofbi->region.vrfb.paddr[rot]
+ + omapfb_get_vrfb_offset(ofbi, rot);
+ } else {
+ return ofbi->region.paddr;
+ }
+}
+
+static u32 omapfb_get_region_paddr(struct omapfb_info *ofbi)
+{
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+ return ofbi->region.vrfb.paddr[0];
+ else
+ return ofbi->region.paddr;
+}
+
+static void __iomem *omapfb_get_region_vaddr(struct omapfb_info *ofbi)
+{
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+ return ofbi->region.vrfb.vaddr[0];
+ else
+ return ofbi->region.vaddr;
+}
+
+static struct omapfb_colormode omapfb_colormodes[] = {
+ {
+ .dssmode = OMAP_DSS_COLOR_UYVY,
+ .bits_per_pixel = 16,
+ .nonstd = OMAPFB_COLOR_YUV422,
+ }, {
+ .dssmode = OMAP_DSS_COLOR_YUV2,
+ .bits_per_pixel = 16,
+ .nonstd = OMAPFB_COLOR_YUY422,
+ }, {
+ .dssmode = OMAP_DSS_COLOR_ARGB16,
+ .bits_per_pixel = 16,
+ .red = { .length = 4, .offset = 8, .msb_right = 0 },
+ .green = { .length = 4, .offset = 4, .msb_right = 0 },
+ .blue = { .length = 4, .offset = 0, .msb_right = 0 },
+ .transp = { .length = 4, .offset = 12, .msb_right = 0 },
+ }, {
+ .dssmode = OMAP_DSS_COLOR_RGB16,
+ .bits_per_pixel = 16,
+ .red = { .length = 5, .offset = 11, .msb_right = 0 },
+ .green = { .length = 6, .offset = 5, .msb_right = 0 },
+ .blue = { .length = 5, .offset = 0, .msb_right = 0 },
+ .transp = { .length = 0, .offset = 0, .msb_right = 0 },
+ }, {
+ .dssmode = OMAP_DSS_COLOR_RGB24P,
+ .bits_per_pixel = 24,
+ .red = { .length = 8, .offset = 16, .msb_right = 0 },
+ .green = { .length = 8, .offset = 8, .msb_right = 0 },
+ .blue = { .length = 8, .offset = 0, .msb_right = 0 },
+ .transp = { .length = 0, .offset = 0, .msb_right = 0 },
+ }, {
+ .dssmode = OMAP_DSS_COLOR_RGB24U,
+ .bits_per_pixel = 32,
+ .red = { .length = 8, .offset = 16, .msb_right = 0 },
+ .green = { .length = 8, .offset = 8, .msb_right = 0 },
+ .blue = { .length = 8, .offset = 0, .msb_right = 0 },
+ .transp = { .length = 0, .offset = 0, .msb_right = 0 },
+ }, {
+ .dssmode = OMAP_DSS_COLOR_ARGB32,
+ .bits_per_pixel = 32,
+ .red = { .length = 8, .offset = 16, .msb_right = 0 },
+ .green = { .length = 8, .offset = 8, .msb_right = 0 },
+ .blue = { .length = 8, .offset = 0, .msb_right = 0 },
+ .transp = { .length = 8, .offset = 24, .msb_right = 0 },
+ }, {
+ .dssmode = OMAP_DSS_COLOR_RGBA32,
+ .bits_per_pixel = 32,
+ .red = { .length = 8, .offset = 24, .msb_right = 0 },
+ .green = { .length = 8, .offset = 16, .msb_right = 0 },
+ .blue = { .length = 8, .offset = 8, .msb_right = 0 },
+ .transp = { .length = 8, .offset = 0, .msb_right = 0 },
+ }, {
+ .dssmode = OMAP_DSS_COLOR_RGBX32,
+ .bits_per_pixel = 32,
+ .red = { .length = 8, .offset = 24, .msb_right = 0 },
+ .green = { .length = 8, .offset = 16, .msb_right = 0 },
+ .blue = { .length = 8, .offset = 8, .msb_right = 0 },
+ .transp = { .length = 0, .offset = 0, .msb_right = 0 },
+ },
+};
+
+static bool cmp_var_to_colormode(struct fb_var_screeninfo *var,
+ struct omapfb_colormode *color)
+{
+ bool cmp_component(struct fb_bitfield *f1, struct fb_bitfield *f2)
+ {
+ return f1->length == f2->length &&
+ f1->offset == f2->offset &&
+ f1->msb_right == f2->msb_right;
+ }
+
+ if (var->bits_per_pixel == 0 ||
+ var->red.length == 0 ||
+ var->blue.length == 0 ||
+ var->green.length == 0)
+ return 0;
+
+ return var->bits_per_pixel == color->bits_per_pixel &&
+ cmp_component(&var->red, &color->red) &&
+ cmp_component(&var->green, &color->green) &&
+ cmp_component(&var->blue, &color->blue) &&
+ cmp_component(&var->transp, &color->transp);
+}
+
+static void assign_colormode_to_var(struct fb_var_screeninfo *var,
+ struct omapfb_colormode *color)
+{
+ var->bits_per_pixel = color->bits_per_pixel;
+ var->nonstd = color->nonstd;
+ var->red = color->red;
+ var->green = color->green;
+ var->blue = color->blue;
+ var->transp = color->transp;
+}
+
+static int fb_mode_to_dss_mode(struct fb_var_screeninfo *var,
+ enum omap_color_mode *mode)
+{
+ enum omap_color_mode dssmode;
+ int i;
+
+ /* first match with nonstd field */
+ if (var->nonstd) {
+ for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
+ struct omapfb_colormode *m = &omapfb_colormodes[i];
+ if (var->nonstd == m->nonstd) {
+ assign_colormode_to_var(var, m);
+ *mode = m->dssmode;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ /* then try exact match of bpp and colors */
+ for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
+ struct omapfb_colormode *m = &omapfb_colormodes[i];
+ if (cmp_var_to_colormode(var, m)) {
+ assign_colormode_to_var(var, m);
+ *mode = m->dssmode;
+ return 0;
+ }
+ }
+
+ /* match with bpp if user has not filled color fields
+ * properly */
+ switch (var->bits_per_pixel) {
+ case 1:
+ dssmode = OMAP_DSS_COLOR_CLUT1;
+ break;
+ case 2:
+ dssmode = OMAP_DSS_COLOR_CLUT2;
+ break;
+ case 4:
+ dssmode = OMAP_DSS_COLOR_CLUT4;
+ break;
+ case 8:
+ dssmode = OMAP_DSS_COLOR_CLUT8;
+ break;
+ case 12:
+ dssmode = OMAP_DSS_COLOR_RGB12U;
+ break;
+ case 16:
+ dssmode = OMAP_DSS_COLOR_RGB16;
+ break;
+ case 24:
+ dssmode = OMAP_DSS_COLOR_RGB24P;
+ break;
+ case 32:
+ dssmode = OMAP_DSS_COLOR_RGB24U;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
+ struct omapfb_colormode *m = &omapfb_colormodes[i];
+ if (dssmode == m->dssmode) {
+ assign_colormode_to_var(var, m);
+ *mode = m->dssmode;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int check_fb_res_bounds(struct fb_var_screeninfo *var)
+{
+ int xres_min = OMAPFB_PLANE_XRES_MIN;
+ int xres_max = 2048;
+ int yres_min = OMAPFB_PLANE_YRES_MIN;
+ int yres_max = 2048;
+
+ /* XXX: some applications seem to set virtual res to 0. */
+ if (var->xres_virtual == 0)
+ var->xres_virtual = var->xres;
+
+ if (var->yres_virtual == 0)
+ var->yres_virtual = var->yres;
+
+ if (var->xres_virtual < xres_min || var->yres_virtual < yres_min)
+ return -EINVAL;
+
+ if (var->xres < xres_min)
+ var->xres = xres_min;
+ if (var->yres < yres_min)
+ var->yres = yres_min;
+ if (var->xres > xres_max)
+ var->xres = xres_max;
+ if (var->yres > yres_max)
+ var->yres = yres_max;
+
+ if (var->xres > var->xres_virtual)
+ var->xres = var->xres_virtual;
+ if (var->yres > var->yres_virtual)
+ var->yres = var->yres_virtual;
+
+ return 0;
+}
+
+static void shrink_height(unsigned long max_frame_size,
+ struct fb_var_screeninfo *var)
+{
+ DBG("can't fit FB into memory, reducing y\n");
+ var->yres_virtual = max_frame_size /
+ (var->xres_virtual * var->bits_per_pixel >> 3);
+
+ if (var->yres_virtual < OMAPFB_PLANE_YRES_MIN)
+ var->yres_virtual = OMAPFB_PLANE_YRES_MIN;
+
+ if (var->yres > var->yres_virtual)
+ var->yres = var->yres_virtual;
+}
+
+static void shrink_width(unsigned long max_frame_size,
+ struct fb_var_screeninfo *var)
+{
+ DBG("can't fit FB into memory, reducing x\n");
+ var->xres_virtual = max_frame_size / var->yres_virtual /
+ (var->bits_per_pixel >> 3);
+
+ if (var->xres_virtual < OMAPFB_PLANE_XRES_MIN)
+ var->xres_virtual = OMAPFB_PLANE_XRES_MIN;
+
+ if (var->xres > var->xres_virtual)
+ var->xres = var->xres_virtual;
+}
+
+static int check_vrfb_fb_size(unsigned long region_size,
+ const struct fb_var_screeninfo *var)
+{
+ unsigned long min_phys_size = omap_vrfb_min_phys_size(var->xres_virtual,
+ var->yres_virtual, var->bits_per_pixel >> 3);
+
+ return min_phys_size > region_size ? -EINVAL : 0;
+}
+
+static int check_fb_size(const struct omapfb_info *ofbi,
+ struct fb_var_screeninfo *var)
+{
+ unsigned long max_frame_size = ofbi->region.size;
+ int bytespp = var->bits_per_pixel >> 3;
+ unsigned long line_size = var->xres_virtual * bytespp;
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ /* One needs to check for both VRFB and OMAPFB limitations. */
+ if (check_vrfb_fb_size(max_frame_size, var))
+ shrink_height(omap_vrfb_max_height(
+ max_frame_size, var->xres_virtual, bytespp) *
+ line_size, var);
+
+ if (check_vrfb_fb_size(max_frame_size, var)) {
+ DBG("cannot fit FB to memory\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ DBG("max frame size %lu, line size %lu\n", max_frame_size, line_size);
+
+ if (line_size * var->yres_virtual > max_frame_size)
+ shrink_height(max_frame_size, var);
+
+ if (line_size * var->yres_virtual > max_frame_size) {
+ shrink_width(max_frame_size, var);
+ line_size = var->xres_virtual * bytespp;
+ }
+
+ if (line_size * var->yres_virtual > max_frame_size) {
+ DBG("cannot fit FB to memory\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Consider if VRFB assisted rotation is in use and if the virtual space for
+ * the zero degree view needs to be mapped. The need for mapping also acts as
+ * the trigger for setting up the hardware on the context in question. This
+ * ensures that one does not attempt to access the virtual view before the
+ * hardware is serving the address translations.
+ */
+static int setup_vrfb_rotation(struct fb_info *fbi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_mem_region *rg = &ofbi->region;
+ struct vrfb *vrfb = &rg->vrfb;
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ unsigned bytespp;
+ bool yuv_mode;
+ enum omap_color_mode mode;
+ int r;
+ bool reconf;
+
+ if (!rg->size || ofbi->rotation_type != OMAP_DSS_ROT_VRFB)
+ return 0;
+
+ DBG("setup_vrfb_rotation\n");
+
+ r = fb_mode_to_dss_mode(var, &mode);
+ if (r)
+ return r;
+
+ bytespp = var->bits_per_pixel >> 3;
+
+ yuv_mode = mode == OMAP_DSS_COLOR_YUV2 || mode == OMAP_DSS_COLOR_UYVY;
+
+ /* We need to reconfigure VRFB if the resolution changes, if yuv mode
+ * is enabled/disabled, or if bytes per pixel changes */
+
+ /* XXX we shouldn't allow this when framebuffer is mmapped */
+
+ reconf = false;
+
+ if (yuv_mode != vrfb->yuv_mode)
+ reconf = true;
+ else if (bytespp != vrfb->bytespp)
+ reconf = true;
+ else if (vrfb->xres != var->xres_virtual ||
+ vrfb->yres != var->yres_virtual)
+ reconf = true;
+
+ if (vrfb->vaddr[0] && reconf) {
+ fbi->screen_base = NULL;
+ fix->smem_start = 0;
+ fix->smem_len = 0;
+ iounmap(vrfb->vaddr[0]);
+ vrfb->vaddr[0] = NULL;
+ DBG("setup_vrfb_rotation: reset fb\n");
+ }
+
+ if (vrfb->vaddr[0])
+ return 0;
+
+ omap_vrfb_setup(&rg->vrfb, rg->paddr,
+ var->xres_virtual,
+ var->yres_virtual,
+ bytespp, yuv_mode);
+
+ /* Now one can ioremap the 0 angle view */
+ r = omap_vrfb_map_angle(vrfb, var->yres_virtual, 0);
+ if (r)
+ return r;
+
+ /* used by open/write in fbmem.c */
+ fbi->screen_base = ofbi->region.vrfb.vaddr[0];
+
+ fix->smem_start = ofbi->region.vrfb.paddr[0];
+
+ switch (var->nonstd) {
+ case OMAPFB_COLOR_YUV422:
+ case OMAPFB_COLOR_YUY422:
+ fix->line_length =
+ (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2;
+ break;
+ default:
+ fix->line_length =
+ (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3;
+ break;
+ }
+
+ fix->smem_len = var->yres_virtual * fix->line_length;
+
+ return 0;
+}
+
+int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
+ struct fb_var_screeninfo *var)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
+ struct omapfb_colormode *mode = &omapfb_colormodes[i];
+ if (dssmode == mode->dssmode) {
+ assign_colormode_to_var(var, mode);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+void set_fb_fix(struct fb_info *fbi)
+{
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_mem_region *rg = &ofbi->region;
+
+ DBG("set_fb_fix\n");
+
+ /* used by open/write in fbmem.c */
+ fbi->screen_base = (char __iomem *)omapfb_get_region_vaddr(ofbi);
+
+ /* used by mmap in fbmem.c */
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ switch (var->nonstd) {
+ case OMAPFB_COLOR_YUV422:
+ case OMAPFB_COLOR_YUY422:
+ fix->line_length =
+ (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2;
+ break;
+ default:
+ fix->line_length =
+ (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3;
+ break;
+ }
+
+ fix->smem_len = var->yres_virtual * fix->line_length;
+ } else {
+ fix->line_length =
+ (var->xres_virtual * var->bits_per_pixel) >> 3;
+ fix->smem_len = rg->size;
+ }
+
+ fix->smem_start = omapfb_get_region_paddr(ofbi);
+
+ fix->type = FB_TYPE_PACKED_PIXELS;
+
+ if (var->nonstd)
+ fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ else {
+ switch (var->bits_per_pixel) {
+ case 32:
+ case 24:
+ case 16:
+ case 12:
+ fix->visual = FB_VISUAL_TRUECOLOR;
+ /* 12bpp is stored in 16 bits */
+ break;
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ fix->visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ }
+ }
+
+ fix->accel = FB_ACCEL_NONE;
+
+ fix->xpanstep = 1;
+ fix->ypanstep = 1;
+}
+
+/* check new var and possibly modify it to be ok */
+int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omap_dss_device *display = fb2display(fbi);
+ enum omap_color_mode mode = 0;
+ int i;
+ int r;
+
+ DBG("check_fb_var %d\n", ofbi->id);
+
+ if (ofbi->region.size == 0)
+ return 0;
+
+ r = fb_mode_to_dss_mode(var, &mode);
+ if (r) {
+ DBG("cannot convert var to omap dss mode\n");
+ return r;
+ }
+
+ for (i = 0; i < ofbi->num_overlays; ++i) {
+ if ((ofbi->overlays[i]->supported_modes & mode) == 0) {
+ DBG("invalid mode\n");
+ return -EINVAL;
+ }
+ }
+
+ if (var->rotate < 0 || var->rotate > 3)
+ return -EINVAL;
+
+ if (check_fb_res_bounds(var))
+ return -EINVAL;
+
+ if (check_fb_size(ofbi, var))
+ return -EINVAL;
+
+ if (var->xres + var->xoffset > var->xres_virtual)
+ var->xoffset = var->xres_virtual - var->xres;
+ if (var->yres + var->yoffset > var->yres_virtual)
+ var->yoffset = var->yres_virtual - var->yres;
+
+ DBG("xres = %d, yres = %d, vxres = %d, vyres = %d\n",
+ var->xres, var->yres,
+ var->xres_virtual, var->yres_virtual);
+
+ var->height = -1;
+ var->width = -1;
+ var->grayscale = 0;
+
+ if (display && display->get_timings) {
+ struct omap_video_timings timings;
+ display->get_timings(display, &timings);
+
+ /* pixclock in ps, the rest in pixclock */
+ var->pixclock = timings.pixel_clock != 0 ?
+ KHZ2PICOS(timings.pixel_clock) :
+ 0;
+ var->left_margin = timings.hfp;
+ var->right_margin = timings.hbp;
+ var->upper_margin = timings.vfp;
+ var->lower_margin = timings.vbp;
+ var->hsync_len = timings.hsw;
+ var->vsync_len = timings.vsw;
+ } else {
+ var->pixclock = 0;
+ var->left_margin = 0;
+ var->right_margin = 0;
+ var->upper_margin = 0;
+ var->lower_margin = 0;
+ var->hsync_len = 0;
+ var->vsync_len = 0;
+ }
+
+ /* TODO: get these from panel->config */
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->sync = 0;
+
+ return 0;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * fbdev framework callbacks
+ * ---------------------------------------------------------------------------
+ */
+static int omapfb_open(struct fb_info *fbi, int user)
+{
+ return 0;
+}
+
+static int omapfb_release(struct fb_info *fbi, int user)
+{
+#if 0
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_dss_device *display = fb2display(fbi);
+
+ DBG("Closing fb with plane index %d\n", ofbi->id);
+
+ omapfb_lock(fbdev);
+
+ if (display && display->get_update_mode && display->update) {
+ /* XXX this update should be removed, I think. But it's
+ * good for debugging */
+ if (display->get_update_mode(display) ==
+ OMAP_DSS_UPDATE_MANUAL) {
+ u16 w, h;
+
+ if (display->sync)
+ display->sync(display);
+
+ display->get_resolution(display, &w, &h);
+ display->update(display, 0, 0, w, h);
+ }
+ }
+
+ if (display && display->sync)
+ display->sync(display);
+
+ omapfb_unlock(fbdev);
+#endif
+ return 0;
+}
+
+static unsigned calc_rotation_offset_dma(struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix, int rotation)
+{
+ unsigned offset;
+
+ offset = var->yoffset * fix->line_length +
+ var->xoffset * (var->bits_per_pixel >> 3);
+
+ return offset;
+}
+
+static unsigned calc_rotation_offset_vrfb(struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix, int rotation)
+{
+ unsigned offset;
+
+ if (rotation == FB_ROTATE_UD)
+ offset = (var->yres_virtual - var->yres) *
+ fix->line_length;
+ else if (rotation == FB_ROTATE_CW)
+ offset = (var->yres_virtual - var->yres) *
+ (var->bits_per_pixel >> 3);
+ else
+ offset = 0;
+
+ if (rotation == FB_ROTATE_UR)
+ offset += var->yoffset * fix->line_length +
+ var->xoffset * (var->bits_per_pixel >> 3);
+ else if (rotation == FB_ROTATE_UD)
+ offset -= var->yoffset * fix->line_length +
+ var->xoffset * (var->bits_per_pixel >> 3);
+ else if (rotation == FB_ROTATE_CW)
+ offset -= var->xoffset * fix->line_length +
+ var->yoffset * (var->bits_per_pixel >> 3);
+ else if (rotation == FB_ROTATE_CCW)
+ offset += var->xoffset * fix->line_length +
+ var->yoffset * (var->bits_per_pixel >> 3);
+
+ return offset;
+}
+
+
+/* setup overlay according to the fb */
+static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
+ u16 posx, u16 posy, u16 outw, u16 outh)
+{
+ int r = 0;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ enum omap_color_mode mode = 0;
+ int offset;
+ u32 data_start_p;
+ void __iomem *data_start_v;
+ struct omap_overlay_info info;
+ int xres, yres;
+ int screen_width;
+ int mirror;
+ int rotation = var->rotate;
+ int i;
+
+ for (i = 0; i < ofbi->num_overlays; i++) {
+ if (ovl != ofbi->overlays[i])
+ continue;
+
+ rotation = (rotation + ofbi->rotation[i]) % 4;
+ break;
+ }
+
+ DBG("setup_overlay %d, posx %d, posy %d, outw %d, outh %d\n", ofbi->id,
+ posx, posy, outw, outh);
+
+ if (rotation == FB_ROTATE_CW || rotation == FB_ROTATE_CCW) {
+ xres = var->yres;
+ yres = var->xres;
+ } else {
+ xres = var->xres;
+ yres = var->yres;
+ }
+
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation);
+ data_start_v = NULL;
+ } else {
+ data_start_p = omapfb_get_region_paddr(ofbi);
+ data_start_v = omapfb_get_region_vaddr(ofbi);
+ }
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+ offset = calc_rotation_offset_vrfb(var, fix, rotation);
+ else
+ offset = calc_rotation_offset_dma(var, fix, rotation);
+
+ data_start_p += offset;
+ data_start_v += offset;
+
+ if (offset)
+ DBG("offset %d, %d = %d\n",
+ var->xoffset, var->yoffset, offset);
+
+ DBG("paddr %x, vaddr %p\n", data_start_p, data_start_v);
+
+ r = fb_mode_to_dss_mode(var, &mode);
+ if (r) {
+ DBG("fb_mode_to_dss_mode failed");
+ goto err;
+ }
+
+ switch (var->nonstd) {
+ case OMAPFB_COLOR_YUV422:
+ case OMAPFB_COLOR_YUY422:
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ screen_width = fix->line_length
+ / (var->bits_per_pixel >> 2);
+ break;
+ }
+ default:
+ screen_width = fix->line_length / (var->bits_per_pixel >> 3);
+ break;
+ }
+
+ ovl->get_overlay_info(ovl, &info);
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
+ mirror = 0;
+ else
+ mirror = ofbi->mirror;
+
+ info.paddr = data_start_p;
+ info.vaddr = data_start_v;
+ info.screen_width = screen_width;
+ info.width = xres;
+ info.height = yres;
+ info.color_mode = mode;
+ info.rotation_type = ofbi->rotation_type;
+ info.rotation = rotation;
+ info.mirror = mirror;
+
+ info.pos_x = posx;
+ info.pos_y = posy;
+ info.out_width = outw;
+ info.out_height = outh;
+
+ r = ovl->set_overlay_info(ovl, &info);
+ if (r) {
+ DBG("ovl->setup_overlay_info failed\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ DBG("setup_overlay failed\n");
+ return r;
+}
+
+/* apply var to the overlay */
+int omapfb_apply_changes(struct fb_info *fbi, int init)
+{
+ int r = 0;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct omap_overlay *ovl;
+ u16 posx, posy;
+ u16 outw, outh;
+ int i;
+
+#ifdef DEBUG
+ if (omapfb_test_pattern)
+ fill_fb(fbi);
+#endif
+
+ for (i = 0; i < ofbi->num_overlays; i++) {
+ ovl = ofbi->overlays[i];
+
+ DBG("apply_changes, fb %d, ovl %d\n", ofbi->id, ovl->id);
+
+ if (ofbi->region.size == 0) {
+ /* the fb is not available. disable the overlay */
+ omapfb_overlay_enable(ovl, 0);
+ if (!init && ovl->manager)
+ ovl->manager->apply(ovl->manager);
+ continue;
+ }
+
+ if (init || (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+ int rotation = (var->rotate + ofbi->rotation[i]) % 4;
+ if (rotation == FB_ROTATE_CW ||
+ rotation == FB_ROTATE_CCW) {
+ outw = var->yres;
+ outh = var->xres;
+ } else {
+ outw = var->xres;
+ outh = var->yres;
+ }
+ } else {
+ outw = ovl->info.out_width;
+ outh = ovl->info.out_height;
+ }
+
+ if (init) {
+ posx = 0;
+ posy = 0;
+ } else {
+ posx = ovl->info.pos_x;
+ posy = ovl->info.pos_y;
+ }
+
+ r = omapfb_setup_overlay(fbi, ovl, posx, posy, outw, outh);
+ if (r)
+ goto err;
+
+ if (!init && ovl->manager)
+ ovl->manager->apply(ovl->manager);
+ }
+ return 0;
+err:
+ DBG("apply_changes failed\n");
+ return r;
+}
+
+/* checks var and eventually tweaks it to something supported,
+ * DO NOT MODIFY PAR */
+static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
+{
+ int r;
+
+ DBG("check_var(%d)\n", FB2OFB(fbi)->id);
+
+ r = check_fb_var(fbi, var);
+
+ return r;
+}
+
+/* set the video mode according to info->var */
+static int omapfb_set_par(struct fb_info *fbi)
+{
+ int r;
+
+ DBG("set_par(%d)\n", FB2OFB(fbi)->id);
+
+ set_fb_fix(fbi);
+
+ r = setup_vrfb_rotation(fbi);
+ if (r)
+ return r;
+
+ r = omapfb_apply_changes(fbi, 0);
+
+ return r;
+}
+
+static int omapfb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ struct fb_var_screeninfo new_var;
+ int r;
+
+ DBG("pan_display(%d)\n", FB2OFB(fbi)->id);
+
+ if (var->xoffset == fbi->var.xoffset &&
+ var->yoffset == fbi->var.yoffset)
+ return 0;
+
+ new_var = fbi->var;
+ new_var.xoffset = var->xoffset;
+ new_var.yoffset = var->yoffset;
+
+ fbi->var = new_var;
+
+ r = omapfb_apply_changes(fbi, 0);
+
+ return r;
+}
+
+static void mmap_user_open(struct vm_area_struct *vma)
+{
+ struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
+
+ atomic_inc(&ofbi->map_count);
+}
+
+static void mmap_user_close(struct vm_area_struct *vma)
+{
+ struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
+
+ atomic_dec(&ofbi->map_count);
+}
+
+static struct vm_operations_struct mmap_user_ops = {
+ .open = mmap_user_open,
+ .close = mmap_user_close,
+};
+
+static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct fb_fix_screeninfo *fix = &fbi->fix;
+ unsigned long off;
+ unsigned long start;
+ u32 len;
+
+ if (vma->vm_end - vma->vm_start == 0)
+ return 0;
+ if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+ return -EINVAL;
+ off = vma->vm_pgoff << PAGE_SHIFT;
+
+ start = omapfb_get_region_paddr(ofbi);
+ len = fix->smem_len;
+ if (off >= len)
+ return -EINVAL;
+ if ((vma->vm_end - vma->vm_start + off) > len)
+ return -EINVAL;
+
+ off += start;
+
+ DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off);
+
+ vma->vm_pgoff = off >> PAGE_SHIFT;
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mmap_user_ops;
+ vma->vm_private_data = ofbi;
+ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ return -EAGAIN;
+ /* vm_ops.open won't be called for mmap itself. */
+ atomic_inc(&ofbi->map_count);
+ return 0;
+}
+
+/* Store a single color palette entry into a pseudo palette or the hardware
+ * palette if one is available. For now we support only 16bpp and thus store
+ * the entry only to the pseudo palette.
+ */
+static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
+ u_int blue, u_int transp, int update_hw_pal)
+{
+ /*struct omapfb_info *ofbi = FB2OFB(fbi);*/
+ /*struct omapfb2_device *fbdev = ofbi->fbdev;*/
+ struct fb_var_screeninfo *var = &fbi->var;
+ int r = 0;
+
+ enum omapfb_color_format mode = OMAPFB_COLOR_RGB24U; /* XXX */
+
+ /*switch (plane->color_mode) {*/
+ switch (mode) {
+ case OMAPFB_COLOR_YUV422:
+ case OMAPFB_COLOR_YUV420:
+ case OMAPFB_COLOR_YUY422:
+ r = -EINVAL;
+ break;
+ case OMAPFB_COLOR_CLUT_8BPP:
+ case OMAPFB_COLOR_CLUT_4BPP:
+ case OMAPFB_COLOR_CLUT_2BPP:
+ case OMAPFB_COLOR_CLUT_1BPP:
+ /*
+ if (fbdev->ctrl->setcolreg)
+ r = fbdev->ctrl->setcolreg(regno, red, green, blue,
+ transp, update_hw_pal);
+ */
+ /* Fallthrough */
+ r = -EINVAL;
+ break;
+ case OMAPFB_COLOR_RGB565:
+ case OMAPFB_COLOR_RGB444:
+ case OMAPFB_COLOR_RGB24P:
+ case OMAPFB_COLOR_RGB24U:
+ if (r != 0)
+ break;
+
+ if (regno < 0) {
+ r = -EINVAL;
+ break;
+ }
+
+ if (regno < 16) {
+ u16 pal;
+ pal = ((red >> (16 - var->red.length)) <<
+ var->red.offset) |
+ ((green >> (16 - var->green.length)) <<
+ var->green.offset) |
+ (blue >> (16 - var->blue.length));
+ ((u32 *)(fbi->pseudo_palette))[regno] = pal;
+ }
+ break;
+ default:
+ BUG();
+ }
+ return r;
+}
+
+static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ DBG("setcolreg\n");
+
+ return _setcolreg(info, regno, red, green, blue, transp, 1);
+}
+
+static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+{
+ int count, index, r;
+ u16 *red, *green, *blue, *transp;
+ u16 trans = 0xffff;
+
+ DBG("setcmap\n");
+
+ red = cmap->red;
+ green = cmap->green;
+ blue = cmap->blue;
+ transp = cmap->transp;
+ index = cmap->start;
+
+ for (count = 0; count < cmap->len; count++) {
+ if (transp)
+ trans = *transp++;
+ r = _setcolreg(info, index++, *red++, *green++, *blue++, trans,
+ count == cmap->len - 1);
+ if (r != 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int omapfb_blank(int blank, struct fb_info *fbi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_dss_device *display = fb2display(fbi);
+ int do_update = 0;
+ int r = 0;
+
+ omapfb_lock(fbdev);
+
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
+ goto exit;
+
+ if (display->resume)
+ r = display->resume(display);
+
+ if (r == 0 && display->get_update_mode &&
+ display->get_update_mode(display) ==
+ OMAP_DSS_UPDATE_MANUAL)
+ do_update = 1;
+
+ break;
+
+ case FB_BLANK_NORMAL:
+ /* FB_BLANK_NORMAL could be implemented.
+ * Needs DSS additions. */
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
+ goto exit;
+
+ if (display->suspend)
+ r = display->suspend(display);
+
+ break;
+
+ default:
+ r = -EINVAL;
+ }
+
+exit:
+ omapfb_unlock(fbdev);
+
+ if (r == 0 && do_update && display->update) {
+ u16 w, h;
+ display->get_resolution(display, &w, &h);
+
+ r = display->update(display, 0, 0, w, h);
+ }
+
+ return r;
+}
+
+#if 0
+/* XXX fb_read and fb_write are needed for VRFB */
+ssize_t omapfb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ DBG("omapfb_write %d, %lu\n", count, (unsigned long)*ppos);
+ /* XXX needed for VRFB */
+ return count;
+}
+#endif
+
+static struct fb_ops omapfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = omapfb_open,
+ .fb_release = omapfb_release,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_blank = omapfb_blank,
+ .fb_ioctl = omapfb_ioctl,
+ .fb_check_var = omapfb_check_var,
+ .fb_set_par = omapfb_set_par,
+ .fb_pan_display = omapfb_pan_display,
+ .fb_mmap = omapfb_mmap,
+ .fb_setcolreg = omapfb_setcolreg,
+ .fb_setcmap = omapfb_setcmap,
+ /*.fb_write = omapfb_write,*/
+};
+
+static void omapfb_free_fbmem(struct fb_info *fbi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb2_mem_region *rg;
+
+ rg = &ofbi->region;
+
+ if (rg->paddr)
+ if (omap_vram_free(rg->paddr, rg->size))
+ dev_err(fbdev->dev, "VRAM FREE failed\n");
+
+ if (rg->vaddr)
+ iounmap(rg->vaddr);
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ /* unmap the 0 angle rotation */
+ if (rg->vrfb.vaddr[0]) {
+ iounmap(rg->vrfb.vaddr[0]);
+ omap_vrfb_release_ctx(&rg->vrfb);
+ }
+ }
+
+ rg->vaddr = NULL;
+ rg->paddr = 0;
+ rg->alloc = 0;
+ rg->size = 0;
+}
+
+static void clear_fb_info(struct fb_info *fbi)
+{
+ memset(&fbi->var, 0, sizeof(fbi->var));
+ memset(&fbi->fix, 0, sizeof(fbi->fix));
+ strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
+}
+
+static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
+{
+ int i;
+
+ DBG("free all fbmem\n");
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ struct fb_info *fbi = fbdev->fbs[i];
+ omapfb_free_fbmem(fbi);
+ clear_fb_info(fbi);
+ }
+
+ return 0;
+}
+
+static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
+ unsigned long paddr)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb2_mem_region *rg;
+ void __iomem *vaddr;
+ int r;
+
+ rg = &ofbi->region;
+ memset(rg, 0, sizeof(*rg));
+
+ size = PAGE_ALIGN(size);
+
+ if (!paddr) {
+ DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
+ r = omap_vram_alloc(OMAP_VRAM_MEMTYPE_SDRAM, size, &paddr);
+ } else {
+ DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr,
+ ofbi->id);
+ r = omap_vram_reserve(paddr, size);
+ }
+
+ if (r) {
+ dev_err(fbdev->dev, "failed to allocate framebuffer\n");
+ return -ENOMEM;
+ }
+
+ if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) {
+ vaddr = ioremap_wc(paddr, size);
+
+ if (!vaddr) {
+ dev_err(fbdev->dev, "failed to ioremap framebuffer\n");
+ omap_vram_free(paddr, size);
+ return -ENOMEM;
+ }
+
+ DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr);
+ } else {
+ r = omap_vrfb_request_ctx(&rg->vrfb);
+ if (r) {
+ dev_err(fbdev->dev, "vrfb create ctx failed\n");
+ return r;
+ }
+
+ vaddr = NULL;
+ }
+
+ rg->paddr = paddr;
+ rg->vaddr = vaddr;
+ rg->size = size;
+ rg->alloc = 1;
+
+ return 0;
+}
+
+/* allocate fbmem using display resolution as reference */
+static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
+ unsigned long paddr)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omap_dss_device *display;
+ int bytespp;
+
+ display = fb2display(fbi);
+
+ if (!display)
+ return 0;
+
+ switch (display->get_recommended_bpp(display)) {
+ case 16:
+ bytespp = 2;
+ break;
+ case 24:
+ bytespp = 4;
+ break;
+ default:
+ bytespp = 4;
+ break;
+ }
+
+ if (!size) {
+ u16 w, h;
+
+ display->get_resolution(display, &w, &h);
+
+ if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
+ size = max(omap_vrfb_min_phys_size(w, h, bytespp),
+ omap_vrfb_min_phys_size(h, w, bytespp));
+
+ DBG("adjusting fb mem size for VRFB, %u -> %lu\n",
+ w * h * bytespp, size);
+ } else {
+ size = w * h * bytespp;
+ }
+ }
+
+ if (!size)
+ return 0;
+
+ return omapfb_alloc_fbmem(fbi, size, paddr);
+}
+
+static enum omap_color_mode fb_format_to_dss_mode(enum omapfb_color_format fmt)
+{
+ enum omap_color_mode mode;
+
+ switch (fmt) {
+ case OMAPFB_COLOR_RGB565:
+ mode = OMAP_DSS_COLOR_RGB16;
+ break;
+ case OMAPFB_COLOR_YUV422:
+ mode = OMAP_DSS_COLOR_YUV2;
+ break;
+ case OMAPFB_COLOR_CLUT_8BPP:
+ mode = OMAP_DSS_COLOR_CLUT8;
+ break;
+ case OMAPFB_COLOR_CLUT_4BPP:
+ mode = OMAP_DSS_COLOR_CLUT4;
+ break;
+ case OMAPFB_COLOR_CLUT_2BPP:
+ mode = OMAP_DSS_COLOR_CLUT2;
+ break;
+ case OMAPFB_COLOR_CLUT_1BPP:
+ mode = OMAP_DSS_COLOR_CLUT1;
+ break;
+ case OMAPFB_COLOR_RGB444:
+ mode = OMAP_DSS_COLOR_RGB12U;
+ break;
+ case OMAPFB_COLOR_YUY422:
+ mode = OMAP_DSS_COLOR_UYVY;
+ break;
+ case OMAPFB_COLOR_ARGB16:
+ mode = OMAP_DSS_COLOR_ARGB16;
+ break;
+ case OMAPFB_COLOR_RGB24U:
+ mode = OMAP_DSS_COLOR_RGB24U;
+ break;
+ case OMAPFB_COLOR_RGB24P:
+ mode = OMAP_DSS_COLOR_RGB24P;
+ break;
+ case OMAPFB_COLOR_ARGB32:
+ mode = OMAP_DSS_COLOR_ARGB32;
+ break;
+ case OMAPFB_COLOR_RGBA32:
+ mode = OMAP_DSS_COLOR_RGBA32;
+ break;
+ case OMAPFB_COLOR_RGBX32:
+ mode = OMAP_DSS_COLOR_RGBX32;
+ break;
+ default:
+ mode = -EINVAL;
+ }
+
+ return mode;
+}
+
+static int omapfb_parse_vram_param(const char *param, int max_entries,
+ unsigned long *sizes, unsigned long *paddrs)
+{
+ int fbnum;
+ unsigned long size;
+ unsigned long paddr = 0;
+ char *p, *start;
+
+ start = (char *)param;
+
+ while (1) {
+ p = start;
+
+ fbnum = simple_strtoul(p, &p, 10);
+
+ if (p == param)
+ return -EINVAL;
+
+ if (*p != ':')
+ return -EINVAL;
+
+ if (fbnum >= max_entries)
+ return -EINVAL;
+
+ size = memparse(p + 1, &p);
+
+ if (!size)
+ return -EINVAL;
+
+ paddr = 0;
+
+ if (*p == '@') {
+ paddr = simple_strtoul(p + 1, &p, 16);
+
+ if (!paddr)
+ return -EINVAL;
+
+ }
+
+ paddrs[fbnum] = paddr;
+ sizes[fbnum] = size;
+
+ if (*p == 0)
+ break;
+
+ if (*p != ',')
+ return -EINVAL;
+
+ ++p;
+
+ start = p;
+ }
+
+ return 0;
+}
+
+static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev)
+{
+ int i, r;
+ unsigned long vram_sizes[10];
+ unsigned long vram_paddrs[10];
+
+ memset(&vram_sizes, 0, sizeof(vram_sizes));
+ memset(&vram_paddrs, 0, sizeof(vram_paddrs));
+
+ if (def_vram && omapfb_parse_vram_param(def_vram, 10,
+ vram_sizes, vram_paddrs)) {
+ dev_err(fbdev->dev, "failed to parse vram parameter\n");
+
+ memset(&vram_sizes, 0, sizeof(vram_sizes));
+ memset(&vram_paddrs, 0, sizeof(vram_paddrs));
+ }
+
+ if (fbdev->dev->platform_data) {
+ struct omapfb_platform_data *opd;
+ opd = fbdev->dev->platform_data;
+ for (i = 0; i < opd->mem_desc.region_cnt; ++i) {
+ if (!vram_sizes[i]) {
+ unsigned long size;
+ unsigned long paddr;
+
+ size = opd->mem_desc.region[i].size;
+ paddr = opd->mem_desc.region[i].paddr;
+
+ vram_sizes[i] = size;
+ vram_paddrs[i] = paddr;
+ }
+ }
+ }
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ /* allocate memory automatically only for fb0, or if
+ * excplicitly defined with vram or plat data option */
+ if (i == 0 || vram_sizes[i] != 0) {
+ r = omapfb_alloc_fbmem_display(fbdev->fbs[i],
+ vram_sizes[i], vram_paddrs[i]);
+
+ if (r)
+ return r;
+ }
+ }
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+ struct omapfb2_mem_region *rg;
+ rg = &ofbi->region;
+
+ DBG("region%d phys %08x virt %p size=%lu\n",
+ i,
+ rg->paddr,
+ rg->vaddr,
+ rg->size);
+ }
+
+ return 0;
+}
+
+int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_dss_device *display = fb2display(fbi);
+ struct omapfb2_mem_region *rg = &ofbi->region;
+ unsigned long old_size = rg->size;
+ unsigned long old_paddr = rg->paddr;
+ int old_type = rg->type;
+ int r;
+
+ if (type > OMAPFB_MEMTYPE_MAX)
+ return -EINVAL;
+
+ size = PAGE_ALIGN(size);
+
+ if (old_size == size && old_type == type)
+ return 0;
+
+ if (display && display->sync)
+ display->sync(display);
+
+ omapfb_free_fbmem(fbi);
+
+ if (size == 0) {
+ clear_fb_info(fbi);
+ return 0;
+ }
+
+ r = omapfb_alloc_fbmem(fbi, size, 0);
+
+ if (r) {
+ if (old_size)
+ omapfb_alloc_fbmem(fbi, old_size, old_paddr);
+
+ if (rg->size == 0)
+ clear_fb_info(fbi);
+
+ return r;
+ }
+
+ if (old_size == size)
+ return 0;
+
+ if (old_size == 0) {
+ DBG("initializing fb %d\n", ofbi->id);
+ r = omapfb_fb_init(fbdev, fbi);
+ if (r) {
+ DBG("omapfb_fb_init failed\n");
+ goto err;
+ }
+ r = omapfb_apply_changes(fbi, 1);
+ if (r) {
+ DBG("omapfb_apply_changes failed\n");
+ goto err;
+ }
+ } else {
+ struct fb_var_screeninfo new_var;
+ memcpy(&new_var, &fbi->var, sizeof(new_var));
+ r = check_fb_var(fbi, &new_var);
+ if (r)
+ goto err;
+ memcpy(&fbi->var, &new_var, sizeof(fbi->var));
+ set_fb_fix(fbi);
+ r = setup_vrfb_rotation(fbi);
+ if (r)
+ goto err;
+ }
+
+ return 0;
+err:
+ omapfb_free_fbmem(fbi);
+ clear_fb_info(fbi);
+ return r;
+}
+
+/* initialize fb_info, var, fix to something sane based on the display */
+static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
+{
+ struct fb_var_screeninfo *var = &fbi->var;
+ struct omap_dss_device *display = fb2display(fbi);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ int r = 0;
+
+ fbi->fbops = &omapfb_ops;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->pseudo_palette = fbdev->pseudo_palette;
+
+ if (ofbi->region.size == 0) {
+ clear_fb_info(fbi);
+ return 0;
+ }
+
+ var->nonstd = 0;
+ var->bits_per_pixel = 0;
+
+ var->rotate = def_rotate;
+
+ /*
+ * Check if there is a default color format set in the board file,
+ * and use this format instead the default deducted from the
+ * display bpp.
+ */
+ if (fbdev->dev->platform_data) {
+ struct omapfb_platform_data *opd;
+ int id = ofbi->id;
+
+ opd = fbdev->dev->platform_data;
+ if (opd->mem_desc.region[id].format_used) {
+ enum omap_color_mode mode;
+ enum omapfb_color_format format;
+
+ format = opd->mem_desc.region[id].format;
+ mode = fb_format_to_dss_mode(format);
+ if (mode < 0) {
+ r = mode;
+ goto err;
+ }
+ r = dss_mode_to_fb_mode(mode, var);
+ if (r < 0)
+ goto err;
+ }
+ }
+
+ if (display) {
+ u16 w, h;
+ int rotation = (var->rotate + ofbi->rotation[0]) % 4;
+
+ display->get_resolution(display, &w, &h);
+
+ if (rotation == FB_ROTATE_CW ||
+ rotation == FB_ROTATE_CCW) {
+ var->xres = h;
+ var->yres = w;
+ } else {
+ var->xres = w;
+ var->yres = h;
+ }
+
+ var->xres_virtual = var->xres;
+ var->yres_virtual = var->yres;
+
+ if (!var->bits_per_pixel) {
+ switch (display->get_recommended_bpp(display)) {
+ case 16:
+ var->bits_per_pixel = 16;
+ break;
+ case 24:
+ var->bits_per_pixel = 32;
+ break;
+ default:
+ dev_err(fbdev->dev, "illegal display "
+ "bpp\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ /* if there's no display, let's just guess some basic values */
+ var->xres = 320;
+ var->yres = 240;
+ var->xres_virtual = var->xres;
+ var->yres_virtual = var->yres;
+ if (!var->bits_per_pixel)
+ var->bits_per_pixel = 16;
+ }
+
+ r = check_fb_var(fbi, var);
+ if (r)
+ goto err;
+
+ set_fb_fix(fbi);
+ r = setup_vrfb_rotation(fbi);
+ if (r)
+ goto err;
+
+ r = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (r)
+ dev_err(fbdev->dev, "unable to allocate color map memory\n");
+
+err:
+ return r;
+}
+
+static void fbinfo_cleanup(struct omapfb2_device *fbdev, struct fb_info *fbi)
+{
+ fb_dealloc_cmap(&fbi->cmap);
+}
+
+
+static void omapfb_free_resources(struct omapfb2_device *fbdev)
+{
+ int i;
+
+ DBG("free_resources\n");
+
+ if (fbdev == NULL)
+ return;
+
+ for (i = 0; i < fbdev->num_fbs; i++)
+ unregister_framebuffer(fbdev->fbs[i]);
+
+ /* free the reserved fbmem */
+ omapfb_free_all_fbmem(fbdev);
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ fbinfo_cleanup(fbdev, fbdev->fbs[i]);
+ framebuffer_release(fbdev->fbs[i]);
+ }
+
+ for (i = 0; i < fbdev->num_displays; i++) {
+ if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED)
+ fbdev->displays[i]->disable(fbdev->displays[i]);
+
+ omap_dss_put_device(fbdev->displays[i]);
+ }
+
+ dev_set_drvdata(fbdev->dev, NULL);
+ kfree(fbdev);
+}
+
+static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
+{
+ int r, i;
+
+ fbdev->num_fbs = 0;
+
+ DBG("create %d framebuffers\n", CONFIG_FB_OMAP2_NUM_FBS);
+
+ /* allocate fb_infos */
+ for (i = 0; i < CONFIG_FB_OMAP2_NUM_FBS; i++) {
+ struct fb_info *fbi;
+ struct omapfb_info *ofbi;
+
+ fbi = framebuffer_alloc(sizeof(struct omapfb_info),
+ fbdev->dev);
+
+ if (fbi == NULL) {
+ dev_err(fbdev->dev,
+ "unable to allocate memory for plane info\n");
+ return -ENOMEM;
+ }
+
+ clear_fb_info(fbi);
+
+ fbdev->fbs[i] = fbi;
+
+ ofbi = FB2OFB(fbi);
+ ofbi->fbdev = fbdev;
+ ofbi->id = i;
+
+ /* assign these early, so that fb alloc can use them */
+ ofbi->rotation_type = def_vrfb ? OMAP_DSS_ROT_VRFB :
+ OMAP_DSS_ROT_DMA;
+ ofbi->mirror = def_mirror;
+
+ fbdev->num_fbs++;
+ }
+
+ DBG("fb_infos allocated\n");
+
+ /* assign overlays for the fbs */
+ for (i = 0; i < min(fbdev->num_fbs, fbdev->num_overlays); i++) {
+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+
+ ofbi->overlays[0] = fbdev->overlays[i];
+ ofbi->num_overlays = 1;
+ }
+
+ /* allocate fb memories */
+ r = omapfb_allocate_all_fbs(fbdev);
+ if (r) {
+ dev_err(fbdev->dev, "failed to allocate fbmem\n");
+ return r;
+ }
+
+ DBG("fbmems allocated\n");
+
+ /* setup fb_infos */
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ r = omapfb_fb_init(fbdev, fbdev->fbs[i]);
+ if (r) {
+ dev_err(fbdev->dev, "failed to setup fb_info\n");
+ return r;
+ }
+ }
+
+ DBG("fb_infos initialized\n");
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ r = register_framebuffer(fbdev->fbs[i]);
+ if (r != 0) {
+ dev_err(fbdev->dev,
+ "registering framebuffer %d failed\n", i);
+ return r;
+ }
+ }
+
+ DBG("framebuffers registered\n");
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ r = omapfb_apply_changes(fbdev->fbs[i], 1);
+ if (r) {
+ dev_err(fbdev->dev, "failed to change mode\n");
+ return r;
+ }
+ }
+
+ DBG("create sysfs for fbs\n");
+ r = omapfb_create_sysfs(fbdev);
+ if (r) {
+ dev_err(fbdev->dev, "failed to create sysfs entries\n");
+ return r;
+ }
+
+ /* Enable fb0 */
+ if (fbdev->num_fbs > 0) {
+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[0]);
+
+ if (ofbi->num_overlays > 0) {
+ struct omap_overlay *ovl = ofbi->overlays[0];
+
+ r = omapfb_overlay_enable(ovl, 1);
+
+ if (r) {
+ dev_err(fbdev->dev,
+ "failed to enable overlay\n");
+ return r;
+ }
+ }
+ }
+
+ DBG("create_framebuffers done\n");
+
+ return 0;
+}
+
+static int omapfb_mode_to_timings(const char *mode_str,
+ struct omap_video_timings *timings, u8 *bpp)
+{
+ struct fb_info fbi;
+ struct fb_var_screeninfo var;
+ struct fb_ops fbops;
+ int r;
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+ if (strcmp(mode_str, "pal") == 0) {
+ *timings = omap_dss_pal_timings;
+ *bpp = 0;
+ return 0;
+ } else if (strcmp(mode_str, "ntsc") == 0) {
+ *timings = omap_dss_ntsc_timings;
+ *bpp = 0;
+ return 0;
+ }
+#endif
+
+ /* this is quite a hack, but I wanted to use the modedb and for
+ * that we need fb_info and var, so we create dummy ones */
+
+ memset(&fbi, 0, sizeof(fbi));
+ memset(&var, 0, sizeof(var));
+ memset(&fbops, 0, sizeof(fbops));
+ fbi.fbops = &fbops;
+
+ r = fb_find_mode(&var, &fbi, mode_str, NULL, 0, NULL, 24);
+
+ if (r != 0) {
+ timings->pixel_clock = PICOS2KHZ(var.pixclock);
+ timings->hfp = var.left_margin;
+ timings->hbp = var.right_margin;
+ timings->vfp = var.upper_margin;
+ timings->vbp = var.lower_margin;
+ timings->hsw = var.hsync_len;
+ timings->vsw = var.vsync_len;
+ timings->x_res = var.xres;
+ timings->y_res = var.yres;
+
+ switch (var.bits_per_pixel) {
+ case 16:
+ *bpp = 16;
+ break;
+ case 24:
+ case 32:
+ default:
+ *bpp = 24;
+ break;
+ }
+
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int omapfb_set_def_mode(struct omap_dss_device *display, char *mode_str)
+{
+ int r;
+ u8 bpp;
+ struct omap_video_timings timings;
+
+ r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
+ if (r)
+ return r;
+
+ display->panel.recommended_bpp = bpp;
+
+ if (!display->check_timings || !display->set_timings)
+ return -EINVAL;
+
+ r = display->check_timings(display, &timings);
+ if (r)
+ return r;
+
+ display->set_timings(display, &timings);
+
+ return 0;
+}
+
+static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
+{
+ char *str, *options, *this_opt;
+ int r = 0;
+
+ str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL);
+ strcpy(str, def_mode);
+ options = str;
+
+ while (!r && (this_opt = strsep(&options, ",")) != NULL) {
+ char *p, *display_str, *mode_str;
+ struct omap_dss_device *display;
+ int i;
+
+ p = strchr(this_opt, ':');
+ if (!p) {
+ r = -EINVAL;
+ break;
+ }
+
+ *p = 0;
+ display_str = this_opt;
+ mode_str = p + 1;
+
+ display = NULL;
+ for (i = 0; i < fbdev->num_displays; ++i) {
+ if (strcmp(fbdev->displays[i]->name,
+ display_str) == 0) {
+ display = fbdev->displays[i];
+ break;
+ }
+ }
+
+ if (!display) {
+ r = -EINVAL;
+ break;
+ }
+
+ r = omapfb_set_def_mode(display, mode_str);
+ if (r)
+ break;
+ }
+
+ kfree(str);
+
+ return r;
+}
+
+static int omapfb_probe(struct platform_device *pdev)
+{
+ struct omapfb2_device *fbdev = NULL;
+ int r = 0;
+ int i;
+ struct omap_overlay *ovl;
+ struct omap_dss_device *def_display;
+ struct omap_dss_device *dssdev;
+
+ DBG("omapfb_probe\n");
+
+ if (pdev->num_resources != 0) {
+ dev_err(&pdev->dev, "probed for an unknown device\n");
+ r = -ENODEV;
+ goto err0;
+ }
+
+ fbdev = kzalloc(sizeof(struct omapfb2_device), GFP_KERNEL);
+ if (fbdev == NULL) {
+ r = -ENOMEM;
+ goto err0;
+ }
+
+ mutex_init(&fbdev->mtx);
+
+ fbdev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, fbdev);
+
+ fbdev->num_displays = 0;
+ dssdev = NULL;
+ for_each_dss_dev(dssdev) {
+ omap_dss_get_device(dssdev);
+ fbdev->displays[fbdev->num_displays++] = dssdev;
+ }
+
+ if (fbdev->num_displays == 0) {
+ dev_err(&pdev->dev, "no displays\n");
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ fbdev->num_overlays = omap_dss_get_num_overlays();
+ for (i = 0; i < fbdev->num_overlays; i++)
+ fbdev->overlays[i] = omap_dss_get_overlay(i);
+
+ fbdev->num_managers = omap_dss_get_num_overlay_managers();
+ for (i = 0; i < fbdev->num_managers; i++)
+ fbdev->managers[i] = omap_dss_get_overlay_manager(i);
+
+ if (def_mode && strlen(def_mode) > 0) {
+ if (omapfb_parse_def_modes(fbdev))
+ dev_warn(&pdev->dev, "cannot parse default modes\n");
+ }
+
+ r = omapfb_create_framebuffers(fbdev);
+ if (r)
+ goto cleanup;
+
+ for (i = 0; i < fbdev->num_managers; i++) {
+ struct omap_overlay_manager *mgr;
+ mgr = fbdev->managers[i];
+ r = mgr->apply(mgr);
+ if (r)
+ dev_warn(fbdev->dev, "failed to apply dispc config\n");
+ }
+
+ DBG("mgr->apply'ed\n");
+
+ /* gfx overlay should be the default one. find a display
+ * connected to that, and use it as default display */
+ ovl = omap_dss_get_overlay(0);
+ if (ovl->manager && ovl->manager->device) {
+ def_display = ovl->manager->device;
+ } else {
+ dev_warn(&pdev->dev, "cannot find default display\n");
+ def_display = NULL;
+ }
+
+ if (def_display) {
+#ifndef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
+ u16 w, h;
+#endif
+ r = def_display->enable(def_display);
+ if (r)
+ dev_warn(fbdev->dev, "Failed to enable display '%s'\n",
+ def_display->name);
+
+ /* set the update mode */
+ if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 1);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+#else /* MANUAL_UPDATE */
+ if (def_display->enable_te)
+ def_display->enable_te(def_display, 0);
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_MANUAL);
+
+ def_display->get_resolution(def_display, &w, &h);
+ def_display->update(def_display, 0, 0, w, h);
+#endif
+ } else {
+ if (def_display->set_update_mode)
+ def_display->set_update_mode(def_display,
+ OMAP_DSS_UPDATE_AUTO);
+ }
+ }
+
+ return 0;
+
+cleanup:
+ omapfb_free_resources(fbdev);
+err0:
+ dev_err(&pdev->dev, "failed to setup omapfb\n");
+ return r;
+}
+
+static int omapfb_remove(struct platform_device *pdev)
+{
+ struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
+
+ /* FIXME: wait till completion of pending events */
+
+ omapfb_remove_sysfs(fbdev);
+
+ omapfb_free_resources(fbdev);
+
+ return 0;
+}
+
+static struct platform_driver omapfb_driver = {
+ .probe = omapfb_probe,
+ .remove = omapfb_remove,
+ .driver = {
+ .name = "omapfb",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omapfb_init(void)
+{
+ DBG("omapfb_init\n");
+
+ if (platform_driver_register(&omapfb_driver)) {
+ printk(KERN_ERR "failed to register omapfb driver\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit omapfb_exit(void)
+{
+ DBG("omapfb_exit\n");
+ platform_driver_unregister(&omapfb_driver);
+}
+
+module_param_named(mode, def_mode, charp, 0);
+module_param_named(vram, def_vram, charp, 0);
+module_param_named(rotate, def_rotate, int, 0);
+module_param_named(vrfb, def_vrfb, bool, 0);
+module_param_named(mirror, def_mirror, bool, 0);
+
+/* late_initcall to let panel/ctrl drivers loaded first.
+ * I guess better option would be a more dynamic approach,
+ * so that omapfb reacts to new panels when they are loaded */
+late_initcall(omapfb_init);
+/*module_init(omapfb_init);*/
+module_exit(omapfb_exit);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_DESCRIPTION("OMAP2/3 Framebuffer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
new file mode 100644
index 00000000000..62bb88f5c19
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -0,0 +1,507 @@
+/*
+ * linux/drivers/video/omap2/omapfb-sysfs.c
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fb.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/omapfb.h>
+
+#include <plat/display.h>
+#include <plat/vrfb.h>
+
+#include "omapfb.h"
+
+static ssize_t show_rotate_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type);
+}
+
+static ssize_t store_rotate_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ enum omap_dss_rotation_type rot_type;
+ int r;
+
+ rot_type = simple_strtoul(buf, NULL, 0);
+
+ if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
+ return -EINVAL;
+
+ lock_fb_info(fbi);
+
+ r = 0;
+ if (rot_type == ofbi->rotation_type)
+ goto out;
+
+ if (ofbi->region.size) {
+ r = -EBUSY;
+ goto out;
+ }
+
+ ofbi->rotation_type = rot_type;
+
+ /*
+ * Since the VRAM for this FB is not allocated at the moment we don't
+ * need to do any further parameter checking at this point.
+ */
+out:
+ unlock_fb_info(fbi);
+
+ return r ? r : count;
+}
+
+
+static ssize_t show_mirror(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror);
+}
+
+static ssize_t store_mirror(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ bool mirror;
+ int r;
+ struct fb_var_screeninfo new_var;
+
+ mirror = simple_strtoul(buf, NULL, 0);
+
+ if (mirror != 0 && mirror != 1)
+ return -EINVAL;
+
+ lock_fb_info(fbi);
+
+ ofbi->mirror = mirror;
+
+ memcpy(&new_var, &fbi->var, sizeof(new_var));
+ r = check_fb_var(fbi, &new_var);
+ if (r)
+ goto out;
+ memcpy(&fbi->var, &new_var, sizeof(fbi->var));
+
+ set_fb_fix(fbi);
+
+ r = omapfb_apply_changes(fbi, 0);
+ if (r)
+ goto out;
+
+ r = count;
+out:
+ unlock_fb_info(fbi);
+
+ return r;
+}
+
+static ssize_t show_overlays(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ ssize_t l = 0;
+ int t;
+
+ omapfb_lock(fbdev);
+ lock_fb_info(fbi);
+
+ for (t = 0; t < ofbi->num_overlays; t++) {
+ struct omap_overlay *ovl = ofbi->overlays[t];
+ int ovlnum;
+
+ for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum)
+ if (ovl == fbdev->overlays[ovlnum])
+ break;
+
+ l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+ t == 0 ? "" : ",", ovlnum);
+ }
+
+ l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+
+ unlock_fb_info(fbi);
+ omapfb_unlock(fbdev);
+
+ return l;
+}
+
+static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev,
+ struct omap_overlay *ovl)
+{
+ int i, t;
+
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
+
+ for (t = 0; t < ofbi->num_overlays; t++) {
+ if (ofbi->overlays[t] == ovl)
+ return ofbi;
+ }
+ }
+
+ return NULL;
+}
+
+static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB];
+ struct omap_overlay *ovl;
+ int num_ovls, r, i;
+ int len;
+ bool added = false;
+
+ num_ovls = 0;
+
+ len = strlen(buf);
+ if (buf[len - 1] == '\n')
+ len = len - 1;
+
+ omapfb_lock(fbdev);
+ lock_fb_info(fbi);
+
+ if (len > 0) {
+ char *p = (char *)buf;
+ int ovlnum;
+
+ while (p < buf + len) {
+ int found;
+ if (num_ovls == OMAPFB_MAX_OVL_PER_FB) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ ovlnum = simple_strtoul(p, &p, 0);
+ if (ovlnum > fbdev->num_overlays) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ found = 0;
+ for (i = 0; i < num_ovls; ++i) {
+ if (ovls[i] == fbdev->overlays[ovlnum]) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ ovls[num_ovls++] = fbdev->overlays[ovlnum];
+
+ p++;
+ }
+ }
+
+ for (i = 0; i < num_ovls; ++i) {
+ struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]);
+ if (ofbi2 && ofbi2 != ofbi) {
+ dev_err(fbdev->dev, "overlay already in use\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* detach unused overlays */
+ for (i = 0; i < ofbi->num_overlays; ++i) {
+ int t, found;
+
+ ovl = ofbi->overlays[i];
+
+ found = 0;
+
+ for (t = 0; t < num_ovls; ++t) {
+ if (ovl == ovls[t]) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ DBG("detaching %d\n", ofbi->overlays[i]->id);
+
+ omapfb_overlay_enable(ovl, 0);
+
+ if (ovl->manager)
+ ovl->manager->apply(ovl->manager);
+
+ for (t = i + 1; t < ofbi->num_overlays; t++) {
+ ofbi->rotation[t-1] = ofbi->rotation[t];
+ ofbi->overlays[t-1] = ofbi->overlays[t];
+ }
+
+ ofbi->num_overlays--;
+ i--;
+ }
+
+ for (i = 0; i < num_ovls; ++i) {
+ int t, found;
+
+ ovl = ovls[i];
+
+ found = 0;
+
+ for (t = 0; t < ofbi->num_overlays; ++t) {
+ if (ovl == ofbi->overlays[t]) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+ ofbi->rotation[ofbi->num_overlays] = 0;
+ ofbi->overlays[ofbi->num_overlays++] = ovl;
+
+ added = true;
+ }
+
+ if (added) {
+ r = omapfb_apply_changes(fbi, 0);
+ if (r)
+ goto out;
+ }
+
+ r = count;
+out:
+ unlock_fb_info(fbi);
+ omapfb_unlock(fbdev);
+
+ return r;
+}
+
+static ssize_t show_overlays_rotate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ ssize_t l = 0;
+ int t;
+
+ lock_fb_info(fbi);
+
+ for (t = 0; t < ofbi->num_overlays; t++) {
+ l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
+ t == 0 ? "" : ",", ofbi->rotation[t]);
+ }
+
+ l += snprintf(buf + l, PAGE_SIZE - l, "\n");
+
+ unlock_fb_info(fbi);
+
+ return l;
+}
+
+static ssize_t store_overlays_rotate(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ int num_ovls = 0, r, i;
+ int len;
+ bool changed = false;
+ u8 rotation[OMAPFB_MAX_OVL_PER_FB];
+
+ len = strlen(buf);
+ if (buf[len - 1] == '\n')
+ len = len - 1;
+
+ lock_fb_info(fbi);
+
+ if (len > 0) {
+ char *p = (char *)buf;
+
+ while (p < buf + len) {
+ int rot;
+
+ if (num_ovls == ofbi->num_overlays) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ rot = simple_strtoul(p, &p, 0);
+ if (rot < 0 || rot > 3) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (ofbi->rotation[num_ovls] != rot)
+ changed = true;
+
+ rotation[num_ovls++] = rot;
+
+ p++;
+ }
+ }
+
+ if (num_ovls != ofbi->num_overlays) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (changed) {
+ for (i = 0; i < num_ovls; ++i)
+ ofbi->rotation[i] = rotation[i];
+
+ r = omapfb_apply_changes(fbi, 0);
+ if (r)
+ goto out;
+
+ /* FIXME error handling? */
+ }
+
+ r = count;
+out:
+ unlock_fb_info(fbi);
+
+ return r;
+}
+
+static ssize_t show_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region.size);
+}
+
+static ssize_t store_size(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ unsigned long size;
+ int r;
+ int i;
+
+ size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0));
+
+ lock_fb_info(fbi);
+
+ for (i = 0; i < ofbi->num_overlays; i++) {
+ if (ofbi->overlays[i]->info.enabled) {
+ r = -EBUSY;
+ goto out;
+ }
+ }
+
+ if (size != ofbi->region.size) {
+ r = omapfb_realloc_fbmem(fbi, size, ofbi->region.type);
+ if (r) {
+ dev_err(dev, "realloc fbmem failed\n");
+ goto out;
+ }
+ }
+
+ r = count;
+out:
+ unlock_fb_info(fbi);
+
+ return r;
+}
+
+static ssize_t show_phys(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region.paddr);
+}
+
+static ssize_t show_virt(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+
+ return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region.vaddr);
+}
+
+static struct device_attribute omapfb_attrs[] = {
+ __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type,
+ store_rotate_type),
+ __ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror),
+ __ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size),
+ __ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays),
+ __ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate,
+ store_overlays_rotate),
+ __ATTR(phys_addr, S_IRUGO, show_phys, NULL),
+ __ATTR(virt_addr, S_IRUGO, show_virt, NULL),
+};
+
+int omapfb_create_sysfs(struct omapfb2_device *fbdev)
+{
+ int i;
+ int r;
+
+ DBG("create sysfs for fbs\n");
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ int t;
+ for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) {
+ r = device_create_file(fbdev->fbs[i]->dev,
+ &omapfb_attrs[t]);
+
+ if (r) {
+ dev_err(fbdev->dev, "failed to create sysfs "
+ "file\n");
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void omapfb_remove_sysfs(struct omapfb2_device *fbdev)
+{
+ int i, t;
+
+ DBG("remove sysfs for fbs\n");
+ for (i = 0; i < fbdev->num_fbs; i++) {
+ for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++)
+ device_remove_file(fbdev->fbs[i]->dev,
+ &omapfb_attrs[t]);
+ }
+}
+
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
new file mode 100644
index 00000000000..f7c9c739e5e
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -0,0 +1,146 @@
+/*
+ * linux/drivers/video/omap2/omapfb.h
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * Some code and ideas taken from drivers/video/omap/ driver
+ * by Imre Deak.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DRIVERS_VIDEO_OMAP2_OMAPFB_H__
+#define __DRIVERS_VIDEO_OMAP2_OMAPFB_H__
+
+#ifdef CONFIG_FB_OMAP2_DEBUG_SUPPORT
+#define DEBUG
+#endif
+
+#include <plat/display.h>
+
+#ifdef DEBUG
+extern unsigned int omapfb_debug;
+#define DBG(format, ...) \
+ if (omapfb_debug) \
+ printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__)
+#else
+#define DBG(format, ...)
+#endif
+
+#define FB2OFB(fb_info) ((struct omapfb_info *)(fb_info->par))
+
+/* max number of overlays to which a framebuffer data can be direct */
+#define OMAPFB_MAX_OVL_PER_FB 3
+
+struct omapfb2_mem_region {
+ u32 paddr;
+ void __iomem *vaddr;
+ struct vrfb vrfb;
+ unsigned long size;
+ u8 type; /* OMAPFB_PLANE_MEM_* */
+ bool alloc; /* allocated by the driver */
+ bool map; /* kernel mapped by the driver */
+};
+
+/* appended to fb_info */
+struct omapfb_info {
+ int id;
+ struct omapfb2_mem_region region;
+ atomic_t map_count;
+ int num_overlays;
+ struct omap_overlay *overlays[OMAPFB_MAX_OVL_PER_FB];
+ struct omapfb2_device *fbdev;
+ enum omap_dss_rotation_type rotation_type;
+ u8 rotation[OMAPFB_MAX_OVL_PER_FB];
+ bool mirror;
+};
+
+struct omapfb2_device {
+ struct device *dev;
+ struct mutex mtx;
+
+ u32 pseudo_palette[17];
+
+ int state;
+
+ unsigned num_fbs;
+ struct fb_info *fbs[10];
+
+ unsigned num_displays;
+ struct omap_dss_device *displays[10];
+ unsigned num_overlays;
+ struct omap_overlay *overlays[10];
+ unsigned num_managers;
+ struct omap_overlay_manager *managers[10];
+};
+
+struct omapfb_colormode {
+ enum omap_color_mode dssmode;
+ u32 bits_per_pixel;
+ u32 nonstd;
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+};
+
+void set_fb_fix(struct fb_info *fbi);
+int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var);
+int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type);
+int omapfb_apply_changes(struct fb_info *fbi, int init);
+
+int omapfb_create_sysfs(struct omapfb2_device *fbdev);
+void omapfb_remove_sysfs(struct omapfb2_device *fbdev);
+
+int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg);
+
+int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
+ struct fb_var_screeninfo *var);
+
+/* find the display connected to this fb, if any */
+static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
+{
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ int i;
+
+ /* XXX: returns the display connected to first attached overlay */
+ for (i = 0; i < ofbi->num_overlays; i++) {
+ if (ofbi->overlays[i]->manager)
+ return ofbi->overlays[i]->manager->device;
+ }
+
+ return NULL;
+}
+
+static inline void omapfb_lock(struct omapfb2_device *fbdev)
+{
+ mutex_lock(&fbdev->mtx);
+}
+
+static inline void omapfb_unlock(struct omapfb2_device *fbdev)
+{
+ mutex_unlock(&fbdev->mtx);
+}
+
+static inline int omapfb_overlay_enable(struct omap_overlay *ovl,
+ int enable)
+{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+ info.enabled = enable;
+ return ovl->set_overlay_info(ovl, &info);
+}
+
+#endif
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c
new file mode 100644
index 00000000000..55a4de5e5d1
--- /dev/null
+++ b/drivers/video/omap2/vram.c
@@ -0,0 +1,655 @@
+/*
+ * VRAM manager for OMAP
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/bootmem.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+#include <asm/setup.h>
+
+#include <plat/sram.h>
+#include <plat/vram.h>
+#include <plat/dma.h>
+
+#ifdef DEBUG
+#define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__)
+#else
+#define DBG(format, ...)
+#endif
+
+#define OMAP2_SRAM_START 0x40200000
+/* Maximum size, in reality this is smaller if SRAM is partially locked. */
+#define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
+
+/* postponed regions are used to temporarily store region information at boot
+ * time when we cannot yet allocate the region list */
+#define MAX_POSTPONED_REGIONS 10
+
+static bool vram_initialized;
+static int postponed_cnt;
+static struct {
+ unsigned long paddr;
+ size_t size;
+} postponed_regions[MAX_POSTPONED_REGIONS];
+
+struct vram_alloc {
+ struct list_head list;
+ unsigned long paddr;
+ unsigned pages;
+};
+
+struct vram_region {
+ struct list_head list;
+ struct list_head alloc_list;
+ unsigned long paddr;
+ unsigned pages;
+};
+
+static DEFINE_MUTEX(region_mutex);
+static LIST_HEAD(region_list);
+
+static inline int region_mem_type(unsigned long paddr)
+{
+ if (paddr >= OMAP2_SRAM_START &&
+ paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
+ return OMAP_VRAM_MEMTYPE_SRAM;
+ else
+ return OMAP_VRAM_MEMTYPE_SDRAM;
+}
+
+static struct vram_region *omap_vram_create_region(unsigned long paddr,
+ unsigned pages)
+{
+ struct vram_region *rm;
+
+ rm = kzalloc(sizeof(*rm), GFP_KERNEL);
+
+ if (rm) {
+ INIT_LIST_HEAD(&rm->alloc_list);
+ rm->paddr = paddr;
+ rm->pages = pages;
+ }
+
+ return rm;
+}
+
+#if 0
+static void omap_vram_free_region(struct vram_region *vr)
+{
+ list_del(&vr->list);
+ kfree(vr);
+}
+#endif
+
+static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
+ unsigned long paddr, unsigned pages)
+{
+ struct vram_alloc *va;
+ struct vram_alloc *new;
+
+ new = kzalloc(sizeof(*va), GFP_KERNEL);
+
+ if (!new)
+ return NULL;
+
+ new->paddr = paddr;
+ new->pages = pages;
+
+ list_for_each_entry(va, &vr->alloc_list, list) {
+ if (va->paddr > new->paddr)
+ break;
+ }
+
+ list_add_tail(&new->list, &va->list);
+
+ return new;
+}
+
+static void omap_vram_free_allocation(struct vram_alloc *va)
+{
+ list_del(&va->list);
+ kfree(va);
+}
+
+int omap_vram_add_region(unsigned long paddr, size_t size)
+{
+ struct vram_region *rm;
+ unsigned pages;
+
+ if (vram_initialized) {
+ DBG("adding region paddr %08lx size %d\n",
+ paddr, size);
+
+ size &= PAGE_MASK;
+ pages = size >> PAGE_SHIFT;
+
+ rm = omap_vram_create_region(paddr, pages);
+ if (rm == NULL)
+ return -ENOMEM;
+
+ list_add(&rm->list, &region_list);
+ } else {
+ if (postponed_cnt == MAX_POSTPONED_REGIONS)
+ return -ENOMEM;
+
+ postponed_regions[postponed_cnt].paddr = paddr;
+ postponed_regions[postponed_cnt].size = size;
+
+ ++postponed_cnt;
+ }
+ return 0;
+}
+
+int omap_vram_free(unsigned long paddr, size_t size)
+{
+ struct vram_region *rm;
+ struct vram_alloc *alloc;
+ unsigned start, end;
+
+ DBG("free mem paddr %08lx size %d\n", paddr, size);
+
+ size = PAGE_ALIGN(size);
+
+ mutex_lock(&region_mutex);
+
+ list_for_each_entry(rm, &region_list, list) {
+ list_for_each_entry(alloc, &rm->alloc_list, list) {
+ start = alloc->paddr;
+ end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
+
+ if (start >= paddr && end < paddr + size)
+ goto found;
+ }
+ }
+
+ mutex_unlock(&region_mutex);
+ return -EINVAL;
+
+found:
+ omap_vram_free_allocation(alloc);
+
+ mutex_unlock(&region_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(omap_vram_free);
+
+static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
+{
+ struct vram_region *rm;
+ struct vram_alloc *alloc;
+ size_t size;
+
+ size = pages << PAGE_SHIFT;
+
+ list_for_each_entry(rm, &region_list, list) {
+ unsigned long start, end;
+
+ DBG("checking region %lx %d\n", rm->paddr, rm->pages);
+
+ if (region_mem_type(rm->paddr) != region_mem_type(paddr))
+ continue;
+
+ start = rm->paddr;
+ end = start + (rm->pages << PAGE_SHIFT) - 1;
+ if (start > paddr || end < paddr + size - 1)
+ continue;
+
+ DBG("block ok, checking allocs\n");
+
+ list_for_each_entry(alloc, &rm->alloc_list, list) {
+ end = alloc->paddr - 1;
+
+ if (start <= paddr && end >= paddr + size - 1)
+ goto found;
+
+ start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
+ }
+
+ end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
+
+ if (!(start <= paddr && end >= paddr + size - 1))
+ continue;
+found:
+ DBG("found area start %lx, end %lx\n", start, end);
+
+ if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
+ return -ENOMEM;
+
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+int omap_vram_reserve(unsigned long paddr, size_t size)
+{
+ unsigned pages;
+ int r;
+
+ DBG("reserve mem paddr %08lx size %d\n", paddr, size);
+
+ size = PAGE_ALIGN(size);
+ pages = size >> PAGE_SHIFT;
+
+ mutex_lock(&region_mutex);
+
+ r = _omap_vram_reserve(paddr, pages);
+
+ mutex_unlock(&region_mutex);
+
+ return r;
+}
+EXPORT_SYMBOL(omap_vram_reserve);
+
+static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data)
+{
+ struct completion *compl = data;
+ complete(compl);
+}
+
+static int _omap_vram_clear(u32 paddr, unsigned pages)
+{
+ struct completion compl;
+ unsigned elem_count;
+ unsigned frame_count;
+ int r;
+ int lch;
+
+ init_completion(&compl);
+
+ r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA",
+ _omap_vram_dma_cb,
+ &compl, &lch);
+ if (r) {
+ pr_err("VRAM: request_dma failed for memory clear\n");
+ return -EBUSY;
+ }
+
+ elem_count = pages * PAGE_SIZE / 4;
+ frame_count = 1;
+
+ omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32,
+ elem_count, frame_count,
+ OMAP_DMA_SYNC_ELEMENT,
+ 0, 0);
+
+ omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC,
+ paddr, 0, 0);
+
+ omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000);
+
+ omap_start_dma(lch);
+
+ if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) {
+ omap_stop_dma(lch);
+ pr_err("VRAM: dma timeout while clearing memory\n");
+ r = -EIO;
+ goto err;
+ }
+
+ r = 0;
+err:
+ omap_free_dma(lch);
+
+ return r;
+}
+
+static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
+{
+ struct vram_region *rm;
+ struct vram_alloc *alloc;
+
+ list_for_each_entry(rm, &region_list, list) {
+ unsigned long start, end;
+
+ DBG("checking region %lx %d\n", rm->paddr, rm->pages);
+
+ if (region_mem_type(rm->paddr) != mtype)
+ continue;
+
+ start = rm->paddr;
+
+ list_for_each_entry(alloc, &rm->alloc_list, list) {
+ end = alloc->paddr;
+
+ if (end - start >= pages << PAGE_SHIFT)
+ goto found;
+
+ start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
+ }
+
+ end = rm->paddr + (rm->pages << PAGE_SHIFT);
+found:
+ if (end - start < pages << PAGE_SHIFT)
+ continue;
+
+ DBG("found %lx, end %lx\n", start, end);
+
+ alloc = omap_vram_create_allocation(rm, start, pages);
+ if (alloc == NULL)
+ return -ENOMEM;
+
+ *paddr = start;
+
+ _omap_vram_clear(start, pages);
+
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
+{
+ unsigned pages;
+ int r;
+
+ BUG_ON(mtype > OMAP_VRAM_MEMTYPE_MAX || !size);
+
+ DBG("alloc mem type %d size %d\n", mtype, size);
+
+ size = PAGE_ALIGN(size);
+ pages = size >> PAGE_SHIFT;
+
+ mutex_lock(&region_mutex);
+
+ r = _omap_vram_alloc(mtype, pages, paddr);
+
+ mutex_unlock(&region_mutex);
+
+ return r;
+}
+EXPORT_SYMBOL(omap_vram_alloc);
+
+void omap_vram_get_info(unsigned long *vram,
+ unsigned long *free_vram,
+ unsigned long *largest_free_block)
+{
+ struct vram_region *vr;
+ struct vram_alloc *va;
+
+ *vram = 0;
+ *free_vram = 0;
+ *largest_free_block = 0;
+
+ mutex_lock(&region_mutex);
+
+ list_for_each_entry(vr, &region_list, list) {
+ unsigned free;
+ unsigned long pa;
+
+ pa = vr->paddr;
+ *vram += vr->pages << PAGE_SHIFT;
+
+ list_for_each_entry(va, &vr->alloc_list, list) {
+ free = va->paddr - pa;
+ *free_vram += free;
+ if (free > *largest_free_block)
+ *largest_free_block = free;
+ pa = va->paddr + (va->pages << PAGE_SHIFT);
+ }
+
+ free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa;
+ *free_vram += free;
+ if (free > *largest_free_block)
+ *largest_free_block = free;
+ }
+
+ mutex_unlock(&region_mutex);
+}
+EXPORT_SYMBOL(omap_vram_get_info);
+
+#if defined(CONFIG_DEBUG_FS)
+static int vram_debug_show(struct seq_file *s, void *unused)
+{
+ struct vram_region *vr;
+ struct vram_alloc *va;
+ unsigned size;
+
+ mutex_lock(&region_mutex);
+
+ list_for_each_entry(vr, &region_list, list) {
+ size = vr->pages << PAGE_SHIFT;
+ seq_printf(s, "%08lx-%08lx (%d bytes)\n",
+ vr->paddr, vr->paddr + size - 1,
+ size);
+
+ list_for_each_entry(va, &vr->alloc_list, list) {
+ size = va->pages << PAGE_SHIFT;
+ seq_printf(s, " %08lx-%08lx (%d bytes)\n",
+ va->paddr, va->paddr + size - 1,
+ size);
+ }
+ }
+
+ mutex_unlock(&region_mutex);
+
+ return 0;
+}
+
+static int vram_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vram_debug_show, inode->i_private);
+}
+
+static const struct file_operations vram_debug_fops = {
+ .open = vram_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init omap_vram_create_debugfs(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("vram", S_IRUGO, NULL,
+ NULL, &vram_debug_fops);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+
+ return 0;
+}
+#endif
+
+static __init int omap_vram_init(void)
+{
+ int i;
+
+ vram_initialized = 1;
+
+ for (i = 0; i < postponed_cnt; i++)
+ omap_vram_add_region(postponed_regions[i].paddr,
+ postponed_regions[i].size);
+
+#ifdef CONFIG_DEBUG_FS
+ if (omap_vram_create_debugfs())
+ pr_err("VRAM: Failed to create debugfs file\n");
+#endif
+
+ return 0;
+}
+
+arch_initcall(omap_vram_init);
+
+/* boottime vram alloc stuff */
+
+/* set from board file */
+static u32 omap_vram_sram_start __initdata;
+static u32 omap_vram_sram_size __initdata;
+
+/* set from board file */
+static u32 omap_vram_sdram_start __initdata;
+static u32 omap_vram_sdram_size __initdata;
+
+/* set from kernel cmdline */
+static u32 omap_vram_def_sdram_size __initdata;
+static u32 omap_vram_def_sdram_start __initdata;
+
+static void __init omap_vram_early_vram(char **p)
+{
+ omap_vram_def_sdram_size = memparse(*p, p);
+ if (**p == ',')
+ omap_vram_def_sdram_start = simple_strtoul((*p) + 1, p, 16);
+}
+__early_param("vram=", omap_vram_early_vram);
+
+/*
+ * Called from map_io. We need to call to this early enough so that we
+ * can reserve the fixed SDRAM regions before VM could get hold of them.
+ */
+void __init omap_vram_reserve_sdram(void)
+{
+ struct bootmem_data *bdata;
+ unsigned long sdram_start, sdram_size;
+ u32 paddr;
+ u32 size = 0;
+
+ /* cmdline arg overrides the board file definition */
+ if (omap_vram_def_sdram_size) {
+ size = omap_vram_def_sdram_size;
+ paddr = omap_vram_def_sdram_start;
+ }
+
+ if (!size) {
+ size = omap_vram_sdram_size;
+ paddr = omap_vram_sdram_start;
+ }
+
+#ifdef CONFIG_OMAP2_VRAM_SIZE
+ if (!size) {
+ size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024;
+ paddr = 0;
+ }
+#endif
+
+ if (!size)
+ return;
+
+ size = PAGE_ALIGN(size);
+
+ bdata = NODE_DATA(0)->bdata;
+ sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
+ sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
+
+ if (paddr) {
+ if ((paddr & ~PAGE_MASK) || paddr < sdram_start ||
+ paddr + size > sdram_start + sdram_size) {
+ pr_err("Illegal SDRAM region for VRAM\n");
+ return;
+ }
+
+ if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) {
+ pr_err("FB: failed to reserve VRAM\n");
+ return;
+ }
+ } else {
+ if (size > sdram_size) {
+ pr_err("Illegal SDRAM size for VRAM\n");
+ return;
+ }
+
+ paddr = virt_to_phys(alloc_bootmem_pages(size));
+ BUG_ON(paddr & ~PAGE_MASK);
+ }
+
+ omap_vram_add_region(paddr, size);
+
+ pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
+}
+
+/*
+ * Called at sram init time, before anything is pushed to the SRAM stack.
+ * Because of the stack scheme, we will allocate everything from the
+ * start of the lowest address region to the end of SRAM. This will also
+ * include padding for page alignment and possible holes between regions.
+ *
+ * As opposed to the SDRAM case, we'll also do any dynamic allocations at
+ * this point, since the driver built as a module would have problem with
+ * freeing / reallocating the regions.
+ */
+unsigned long __init omap_vram_reserve_sram(unsigned long sram_pstart,
+ unsigned long sram_vstart,
+ unsigned long sram_size,
+ unsigned long pstart_avail,
+ unsigned long size_avail)
+{
+ unsigned long pend_avail;
+ unsigned long reserved;
+ u32 paddr;
+ u32 size;
+
+ paddr = omap_vram_sram_start;
+ size = omap_vram_sram_size;
+
+ if (!size)
+ return 0;
+
+ reserved = 0;
+ pend_avail = pstart_avail + size_avail;
+
+ if (!paddr) {
+ /* Dynamic allocation */
+ if ((size_avail & PAGE_MASK) < size) {
+ pr_err("Not enough SRAM for VRAM\n");
+ return 0;
+ }
+ size_avail = (size_avail - size) & PAGE_MASK;
+ paddr = pstart_avail + size_avail;
+ }
+
+ if (paddr < sram_pstart ||
+ paddr + size > sram_pstart + sram_size) {
+ pr_err("Illegal SRAM region for VRAM\n");
+ return 0;
+ }
+
+ /* Reserve everything above the start of the region. */
+ if (pend_avail - paddr > reserved)
+ reserved = pend_avail - paddr;
+ size_avail = pend_avail - reserved - pstart_avail;
+
+ omap_vram_add_region(paddr, size);
+
+ if (reserved)
+ pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved);
+
+ return reserved;
+}
+
+void __init omap_vram_set_sdram_vram(u32 size, u32 start)
+{
+ omap_vram_sdram_start = start;
+ omap_vram_sdram_size = size;
+}
+
+void __init omap_vram_set_sram_vram(u32 size, u32 start)
+{
+ omap_vram_sram_start = start;
+ omap_vram_sram_size = size;
+}
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
new file mode 100644
index 00000000000..fd227160037
--- /dev/null
+++ b/drivers/video/omap2/vrfb.c
@@ -0,0 +1,315 @@
+/*
+ * VRFB Rotation Engine
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+
+#include <mach/io.h>
+#include <plat/vrfb.h>
+#include <plat/sdrc.h>
+
+#ifdef DEBUG
+#define DBG(format, ...) pr_debug("VRFB: " format, ## __VA_ARGS__)
+#else
+#define DBG(format, ...)
+#endif
+
+#define SMS_ROT_VIRT_BASE(context, rot) \
+ (((context >= 4) ? 0xD0000000 : 0x70000000) \
+ + (0x4000000 * (context)) \
+ + (0x1000000 * (rot)))
+
+#define OMAP_VRFB_SIZE (2048 * 2048 * 4)
+
+#define VRFB_PAGE_WIDTH_EXP 5 /* Assuming SDRAM pagesize= 1024 */
+#define VRFB_PAGE_HEIGHT_EXP 5 /* 1024 = 2^5 * 2^5 */
+#define VRFB_PAGE_WIDTH (1 << VRFB_PAGE_WIDTH_EXP)
+#define VRFB_PAGE_HEIGHT (1 << VRFB_PAGE_HEIGHT_EXP)
+#define SMS_IMAGEHEIGHT_OFFSET 16
+#define SMS_IMAGEWIDTH_OFFSET 0
+#define SMS_PH_OFFSET 8
+#define SMS_PW_OFFSET 4
+#define SMS_PS_OFFSET 0
+
+#define VRFB_NUM_CTXS 12
+/* bitmap of reserved contexts */
+static unsigned long ctx_map;
+
+static DEFINE_MUTEX(ctx_lock);
+
+/*
+ * Access to this happens from client drivers or the PM core after wake-up.
+ * For the first case we require locking at the driver level, for the second
+ * we don't need locking, since no drivers will run until after the wake-up
+ * has finished.
+ */
+static struct {
+ u32 physical_ba;
+ u32 control;
+ u32 size;
+} vrfb_hw_context[VRFB_NUM_CTXS];
+
+static inline void restore_hw_context(int ctx)
+{
+ omap2_sms_write_rot_control(vrfb_hw_context[ctx].control, ctx);
+ omap2_sms_write_rot_size(vrfb_hw_context[ctx].size, ctx);
+ omap2_sms_write_rot_physical_ba(vrfb_hw_context[ctx].physical_ba, ctx);
+}
+
+static u32 get_image_width_roundup(u16 width, u8 bytespp)
+{
+ unsigned long stride = width * bytespp;
+ unsigned long ceil_pages_per_stride = (stride / VRFB_PAGE_WIDTH) +
+ (stride % VRFB_PAGE_WIDTH != 0);
+
+ return ceil_pages_per_stride * VRFB_PAGE_WIDTH / bytespp;
+}
+
+/*
+ * This the extra space needed in the VRFB physical area for VRFB to safely wrap
+ * any memory accesses to the invisible part of the virtual view to the physical
+ * area.
+ */
+static inline u32 get_extra_physical_size(u16 image_width_roundup, u8 bytespp)
+{
+ return (OMAP_VRFB_LINE_LEN - image_width_roundup) * VRFB_PAGE_HEIGHT *
+ bytespp;
+}
+
+void omap_vrfb_restore_context(void)
+{
+ int i;
+ unsigned long map = ctx_map;
+
+ for (i = ffs(map); i; i = ffs(map)) {
+ /* i=1..32 */
+ i--;
+ map &= ~(1 << i);
+ restore_hw_context(i);
+ }
+}
+
+void omap_vrfb_adjust_size(u16 *width, u16 *height,
+ u8 bytespp)
+{
+ *width = ALIGN(*width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
+ *height = ALIGN(*height, VRFB_PAGE_HEIGHT);
+}
+EXPORT_SYMBOL(omap_vrfb_adjust_size);
+
+u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp)
+{
+ unsigned long image_width_roundup = get_image_width_roundup(width,
+ bytespp);
+
+ if (image_width_roundup > OMAP_VRFB_LINE_LEN)
+ return 0;
+
+ return (width * height * bytespp) + get_extra_physical_size(
+ image_width_roundup, bytespp);
+}
+EXPORT_SYMBOL(omap_vrfb_min_phys_size);
+
+u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp)
+{
+ unsigned long image_width_roundup = get_image_width_roundup(width,
+ bytespp);
+ unsigned long height;
+ unsigned long extra;
+
+ if (image_width_roundup > OMAP_VRFB_LINE_LEN)
+ return 0;
+
+ extra = get_extra_physical_size(image_width_roundup, bytespp);
+
+ if (phys_size < extra)
+ return 0;
+
+ height = (phys_size - extra) / (width * bytespp);
+
+ /* Virtual views provided by VRFB are limited to 2048x2048. */
+ return min_t(unsigned long, height, 2048);
+}
+EXPORT_SYMBOL(omap_vrfb_max_height);
+
+void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
+ u16 width, u16 height,
+ unsigned bytespp, bool yuv_mode)
+{
+ unsigned pixel_size_exp;
+ u16 vrfb_width;
+ u16 vrfb_height;
+ u8 ctx = vrfb->context;
+ u32 size;
+ u32 control;
+
+ DBG("omapfb_set_vrfb(%d, %lx, %dx%d, %d, %d)\n", ctx, paddr,
+ width, height, bytespp, yuv_mode);
+
+ /* For YUV2 and UYVY modes VRFB needs to handle pixels a bit
+ * differently. See TRM. */
+ if (yuv_mode) {
+ bytespp *= 2;
+ width /= 2;
+ }
+
+ if (bytespp == 4)
+ pixel_size_exp = 2;
+ else if (bytespp == 2)
+ pixel_size_exp = 1;
+ else
+ BUG();
+
+ vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
+ vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
+
+ DBG("vrfb w %u, h %u bytespp %d\n", vrfb_width, vrfb_height, bytespp);
+
+ size = vrfb_width << SMS_IMAGEWIDTH_OFFSET;
+ size |= vrfb_height << SMS_IMAGEHEIGHT_OFFSET;
+
+ control = pixel_size_exp << SMS_PS_OFFSET;
+ control |= VRFB_PAGE_WIDTH_EXP << SMS_PW_OFFSET;
+ control |= VRFB_PAGE_HEIGHT_EXP << SMS_PH_OFFSET;
+
+ vrfb_hw_context[ctx].physical_ba = paddr;
+ vrfb_hw_context[ctx].size = size;
+ vrfb_hw_context[ctx].control = control;
+
+ omap2_sms_write_rot_physical_ba(paddr, ctx);
+ omap2_sms_write_rot_size(size, ctx);
+ omap2_sms_write_rot_control(control, ctx);
+
+ DBG("vrfb offset pixels %d, %d\n",
+ vrfb_width - width, vrfb_height - height);
+
+ vrfb->xres = width;
+ vrfb->yres = height;
+ vrfb->xoffset = vrfb_width - width;
+ vrfb->yoffset = vrfb_height - height;
+ vrfb->bytespp = bytespp;
+ vrfb->yuv_mode = yuv_mode;
+}
+EXPORT_SYMBOL(omap_vrfb_setup);
+
+int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot)
+{
+ unsigned long size = height * OMAP_VRFB_LINE_LEN * vrfb->bytespp;
+
+ vrfb->vaddr[rot] = ioremap_wc(vrfb->paddr[rot], size);
+
+ if (!vrfb->vaddr[rot]) {
+ printk(KERN_ERR "vrfb: ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ DBG("ioremapped vrfb area %d of size %lu into %p\n", rot, size,
+ vrfb->vaddr[rot]);
+
+ return 0;
+}
+EXPORT_SYMBOL(omap_vrfb_map_angle);
+
+void omap_vrfb_release_ctx(struct vrfb *vrfb)
+{
+ int rot;
+ int ctx = vrfb->context;
+
+ if (ctx == 0xff)
+ return;
+
+ DBG("release ctx %d\n", ctx);
+
+ mutex_lock(&ctx_lock);
+
+ BUG_ON(!(ctx_map & (1 << ctx)));
+
+ clear_bit(ctx, &ctx_map);
+
+ for (rot = 0; rot < 4; ++rot) {
+ if (vrfb->paddr[rot]) {
+ release_mem_region(vrfb->paddr[rot], OMAP_VRFB_SIZE);
+ vrfb->paddr[rot] = 0;
+ }
+ }
+
+ vrfb->context = 0xff;
+
+ mutex_unlock(&ctx_lock);
+}
+EXPORT_SYMBOL(omap_vrfb_release_ctx);
+
+int omap_vrfb_request_ctx(struct vrfb *vrfb)
+{
+ int rot;
+ u32 paddr;
+ u8 ctx;
+ int r;
+
+ DBG("request ctx\n");
+
+ mutex_lock(&ctx_lock);
+
+ for (ctx = 0; ctx < VRFB_NUM_CTXS; ++ctx)
+ if ((ctx_map & (1 << ctx)) == 0)
+ break;
+
+ if (ctx == VRFB_NUM_CTXS) {
+ pr_err("vrfb: no free contexts\n");
+ r = -EBUSY;
+ goto out;
+ }
+
+ DBG("found free ctx %d\n", ctx);
+
+ set_bit(ctx, &ctx_map);
+
+ memset(vrfb, 0, sizeof(*vrfb));
+
+ vrfb->context = ctx;
+
+ for (rot = 0; rot < 4; ++rot) {
+ paddr = SMS_ROT_VIRT_BASE(ctx, rot);
+ if (!request_mem_region(paddr, OMAP_VRFB_SIZE, "vrfb")) {
+ pr_err("vrfb: failed to reserve VRFB "
+ "area for ctx %d, rotation %d\n",
+ ctx, rot * 90);
+ omap_vrfb_release_ctx(vrfb);
+ r = -ENOMEM;
+ goto out;
+ }
+
+ vrfb->paddr[rot] = paddr;
+
+ DBG("VRFB %d/%d: %lx\n", ctx, rot*90, vrfb->paddr[rot]);
+ }
+
+ r = 0;
+out:
+ mutex_unlock(&ctx_lock);
+ return r;
+}
+EXPORT_SYMBOL(omap_vrfb_request_ctx);
diff --git a/drivers/video/output.c b/drivers/video/output.c
index 5e6439ae739..5137aa016b8 100644
--- a/drivers/video/output.c
+++ b/drivers/video/output.c
@@ -50,7 +50,7 @@ static ssize_t video_output_store_state(struct device *dev,
int request_state = simple_strtoul(buf,&endp,0);
size_t size = endp - buf;
- if (*endp && isspace(*endp))
+ if (isspace(*endp))
size++;
if (size != count)
return -EINVAL;
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index 0573ec685a5..0f361b6100d 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -98,7 +98,8 @@ static int pmagbafb_setcolreg(unsigned int regno, unsigned int red,
{
struct pmagbafb_par *par = info->par;
- BUG_ON(regno >= info->cmap.len);
+ if (regno >= info->cmap.len)
+ return 1;
red >>= 8; /* The cmap fields are 16 bits */
green >>= 8; /* wide, but the hardware colormap */
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index 98748723af9..2de0806421b 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -102,7 +102,8 @@ static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red,
{
struct pmagbbfb_par *par = info->par;
- BUG_ON(regno >= info->cmap.len);
+ if (regno >= info->cmap.len)
+ return 1;
red >>= 8; /* The cmap fields are 16 bits */
green >>= 8; /* wide, but the hardware colormap */
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index f58a3aae6ea..415858b421b 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1221,13 +1221,14 @@ static void setup_smart_timing(struct pxafb_info *fbi,
static int pxafb_smart_thread(void *arg)
{
struct pxafb_info *fbi = arg;
- struct pxafb_mach_info *inf = fbi->dev->platform_data;
+ struct pxafb_mach_info *inf;
- if (!fbi || !inf->smart_update) {
+ if (!fbi || !fbi->dev->platform_data->smart_update) {
pr_err("%s: not properly initialized, thread terminated\n",
__func__);
return -EINVAL;
}
+ inf = fbi->dev->platform_data;
pr_debug("%s(): task starting\n", __func__);
@@ -1667,7 +1668,7 @@ static int pxafb_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops pxafb_pm_ops = {
+static const struct dev_pm_ops pxafb_pm_ops = {
.suspend = pxafb_suspend,
.resume = pxafb_resume,
};
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index b4b5de930cf..a69830d26f7 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -281,6 +281,7 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct sh_mobile_lcdc_chan *ch = info->par;
+ struct sh_mobile_lcdc_board_cfg *bcfg = &ch->cfg.board_cfg;
/* enable clocks before accessing hardware */
sh_mobile_lcdc_clk_on(ch->lcdc);
@@ -305,10 +306,17 @@ static void sh_mobile_lcdc_deferred_io(struct fb_info *info,
/* trigger panel update */
dma_map_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
+ if (bcfg->start_transfer)
+ bcfg->start_transfer(bcfg->board_data, ch,
+ &sh_mobile_lcdc_sys_bus_ops);
lcdc_write_chan(ch, LDSM2R, 1);
dma_unmap_sg(info->dev, ch->sglist, nr_pages, DMA_TO_DEVICE);
- } else
+ } else {
+ if (bcfg->start_transfer)
+ bcfg->start_transfer(bcfg->board_data, ch,
+ &sh_mobile_lcdc_sys_bus_ops);
lcdc_write_chan(ch, LDSM2R, 1);
+ }
}
static void sh_mobile_lcdc_deferred_io_touch(struct fb_info *info)
@@ -890,7 +898,7 @@ static int sh_mobile_lcdc_runtime_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = {
+static const struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = {
.suspend = sh_mobile_lcdc_suspend,
.resume = sh_mobile_lcdc_resume,
.runtime_suspend = sh_mobile_lcdc_runtime_suspend,
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index a4e05e4d750..9d2b6bc4903 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -2115,7 +2115,7 @@ sisfb_detect_VB_connect(struct sis_video_info *ivideo)
if( (!(ivideo->vbflags2 & VB2_SISBRIDGE)) &&
(!((ivideo->sisvga_engine == SIS_315_VGA) &&
(ivideo->vbflags2 & VB2_CHRONTEL))) ) {
- if(ivideo->sisfb_tvstd & (TV_PALN | TV_PALN | TV_NTSCJ)) {
+ if(ivideo->sisfb_tvstd & (TV_PALM | TV_PALN | TV_NTSCJ)) {
ivideo->sisfb_tvstd = -1;
printk(KERN_ERR "sisfb: PALM/PALN/NTSCJ not supported\n");
}
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 924d7946278..35370d0ecf0 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -29,8 +29,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/console.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/div64.h>
@@ -66,6 +66,7 @@ struct sm501fb_info {
struct fb_info *fb[2]; /* fb info for both heads */
struct resource *fbmem_res; /* framebuffer resource */
struct resource *regs_res; /* registers resource */
+ struct resource *regs2d_res; /* 2d registers resource */
struct sm501_platdata_fb *pdata; /* our platform data */
unsigned long pm_crt_ctrl; /* pm: crt ctrl save */
@@ -73,6 +74,7 @@ struct sm501fb_info {
int irq;
int swap_endian; /* set to swap rgb=>bgr */
void __iomem *regs; /* remapped registers */
+ void __iomem *regs2d; /* 2d remapped registers */
void __iomem *fbmem; /* remapped framebuffer */
size_t fbmem_len; /* length of remapped region */
};
@@ -123,9 +125,9 @@ static inline void sm501fb_sync_regs(struct sm501fb_info *info)
* This is an attempt to lay out memory for the two framebuffers and
* everything else
*
- * |fbmem_res->start fbmem_res->end|
- * | |
- * |fb[0].fix.smem_start | |fb[1].fix.smem_start | 2K |
+ * |fbmem_res->start fbmem_res->end|
+ * | |
+ * |fb[0].fix.smem_start | |fb[1].fix.smem_start | 2K |
* |-> fb[0].fix.smem_len <-| spare |-> fb[1].fix.smem_len <-|-> cursors <-|
*
* The "spare" space is for the 2d engine data
@@ -1246,7 +1248,173 @@ static ssize_t sm501fb_debug_show_pnl(struct device *dev,
static DEVICE_ATTR(fbregs_pnl, 0444, sm501fb_debug_show_pnl, NULL);
-/* framebuffer ops */
+/* acceleration operations */
+static int sm501fb_sync(struct fb_info *info)
+{
+ int count = 1000000;
+ struct sm501fb_par *par = info->par;
+ struct sm501fb_info *fbi = par->info;
+
+ /* wait for the 2d engine to be ready */
+ while ((count > 0) &&
+ (readl(fbi->regs + SM501_SYSTEM_CONTROL) &
+ SM501_SYSCTRL_2D_ENGINE_STATUS) != 0)
+ count--;
+
+ if (count <= 0) {
+ dev_err(info->dev, "Timeout waiting for 2d engine sync\n");
+ return 1;
+ }
+ return 0;
+}
+
+static void sm501fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct sm501fb_par *par = info->par;
+ struct sm501fb_info *fbi = par->info;
+ int width = area->width;
+ int height = area->height;
+ int sx = area->sx;
+ int sy = area->sy;
+ int dx = area->dx;
+ int dy = area->dy;
+ unsigned long rtl = 0;
+
+ /* source clip */
+ if ((sx >= info->var.xres_virtual) ||
+ (sy >= info->var.yres_virtual))
+ /* source Area not within virtual screen, skipping */
+ return;
+ if ((sx + width) >= info->var.xres_virtual)
+ width = info->var.xres_virtual - sx - 1;
+ if ((sy + height) >= info->var.yres_virtual)
+ height = info->var.yres_virtual - sy - 1;
+
+ /* dest clip */
+ if ((dx >= info->var.xres_virtual) ||
+ (dy >= info->var.yres_virtual))
+ /* Destination Area not within virtual screen, skipping */
+ return;
+ if ((dx + width) >= info->var.xres_virtual)
+ width = info->var.xres_virtual - dx - 1;
+ if ((dy + height) >= info->var.yres_virtual)
+ height = info->var.yres_virtual - dy - 1;
+
+ if ((sx < dx) || (sy < dy)) {
+ rtl = 1 << 27;
+ sx += width - 1;
+ dx += width - 1;
+ sy += height - 1;
+ dy += height - 1;
+ }
+
+ if (sm501fb_sync(info))
+ return;
+
+ /* set the base addresses */
+ writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
+ writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE);
+
+ /* set the window width */
+ writel((info->var.xres << 16) | info->var.xres,
+ fbi->regs2d + SM501_2D_WINDOW_WIDTH);
+
+ /* set window stride */
+ writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
+ fbi->regs2d + SM501_2D_PITCH);
+
+ /* set data format */
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ writel(0, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ case 16:
+ writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ case 32:
+ writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ }
+
+ /* 2d compare mask */
+ writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
+
+ /* 2d mask */
+ writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
+
+ /* source and destination x y */
+ writel((sx << 16) | sy, fbi->regs2d + SM501_2D_SOURCE);
+ writel((dx << 16) | dy, fbi->regs2d + SM501_2D_DESTINATION);
+
+ /* w/h */
+ writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
+
+ /* do area move */
+ writel(0x800000cc | rtl, fbi->regs2d + SM501_2D_CONTROL);
+}
+
+static void sm501fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+ struct sm501fb_par *par = info->par;
+ struct sm501fb_info *fbi = par->info;
+ int width = rect->width, height = rect->height;
+
+ if ((rect->dx >= info->var.xres_virtual) ||
+ (rect->dy >= info->var.yres_virtual))
+ /* Rectangle not within virtual screen, skipping */
+ return;
+ if ((rect->dx + width) >= info->var.xres_virtual)
+ width = info->var.xres_virtual - rect->dx - 1;
+ if ((rect->dy + height) >= info->var.yres_virtual)
+ height = info->var.yres_virtual - rect->dy - 1;
+
+ if (sm501fb_sync(info))
+ return;
+
+ /* set the base addresses */
+ writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_SOURCE_BASE);
+ writel(par->screen.sm_addr, fbi->regs2d + SM501_2D_DESTINATION_BASE);
+
+ /* set the window width */
+ writel((info->var.xres << 16) | info->var.xres,
+ fbi->regs2d + SM501_2D_WINDOW_WIDTH);
+
+ /* set window stride */
+ writel((info->var.xres_virtual << 16) | info->var.xres_virtual,
+ fbi->regs2d + SM501_2D_PITCH);
+
+ /* set data format */
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ writel(0, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ case 16:
+ writel(0x00100000, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ case 32:
+ writel(0x00200000, fbi->regs2d + SM501_2D_STRETCH);
+ break;
+ }
+
+ /* 2d compare mask */
+ writel(0xffffffff, fbi->regs2d + SM501_2D_COLOR_COMPARE_MASK);
+
+ /* 2d mask */
+ writel(0xffffffff, fbi->regs2d + SM501_2D_MASK);
+
+ /* colour */
+ writel(rect->color, fbi->regs2d + SM501_2D_FOREGROUND);
+
+ /* x y */
+ writel((rect->dx << 16) | rect->dy, fbi->regs2d + SM501_2D_DESTINATION);
+
+ /* w/h */
+ writel((width << 16) | height, fbi->regs2d + SM501_2D_DIMENSION);
+
+ /* do rectangle fill */
+ writel(0x800100cc, fbi->regs2d + SM501_2D_CONTROL);
+}
+
static struct fb_ops sm501fb_ops_crt = {
.owner = THIS_MODULE,
@@ -1256,9 +1424,10 @@ static struct fb_ops sm501fb_ops_crt = {
.fb_setcolreg = sm501fb_setcolreg,
.fb_pan_display = sm501fb_pan_crt,
.fb_cursor = sm501fb_cursor,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
+ .fb_fillrect = sm501fb_fillrect,
+ .fb_copyarea = sm501fb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_sync = sm501fb_sync,
};
static struct fb_ops sm501fb_ops_pnl = {
@@ -1269,9 +1438,10 @@ static struct fb_ops sm501fb_ops_pnl = {
.fb_blank = sm501fb_blank_pnl,
.fb_setcolreg = sm501fb_setcolreg,
.fb_cursor = sm501fb_cursor,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
+ .fb_fillrect = sm501fb_fillrect,
+ .fb_copyarea = sm501fb_copyarea,
.fb_imageblit = cfb_imageblit,
+ .fb_sync = sm501fb_sync,
};
/* sm501_init_cursor
@@ -1329,7 +1499,8 @@ static int sm501fb_start(struct sm501fb_info *info,
dev_warn(dev, "no irq for device\n");
}
- /* allocate, reserve and remap resources for registers */
+ /* allocate, reserve and remap resources for display
+ * controller registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no resource definition for registers\n");
@@ -1338,7 +1509,7 @@ static int sm501fb_start(struct sm501fb_info *info,
}
info->regs_res = request_mem_region(res->start,
- res->end - res->start,
+ resource_size(res),
pdev->name);
if (info->regs_res == NULL) {
@@ -1347,37 +1518,63 @@ static int sm501fb_start(struct sm501fb_info *info,
goto err_release;
}
- info->regs = ioremap(res->start, (res->end - res->start)+1);
+ info->regs = ioremap(res->start, resource_size(res));
if (info->regs == NULL) {
dev_err(dev, "cannot remap registers\n");
ret = -ENXIO;
goto err_regs_res;
}
+ /* allocate, reserve and remap resources for 2d
+ * controller registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res == NULL) {
+ dev_err(dev, "no resource definition for 2d registers\n");
+ ret = -ENOENT;
+ goto err_regs_map;
+ }
+
+ info->regs2d_res = request_mem_region(res->start,
+ resource_size(res),
+ pdev->name);
+
+ if (info->regs2d_res == NULL) {
+ dev_err(dev, "cannot claim registers\n");
+ ret = -ENXIO;
+ goto err_regs_map;
+ }
+
+ info->regs2d = ioremap(res->start, resource_size(res));
+ if (info->regs2d == NULL) {
+ dev_err(dev, "cannot remap registers\n");
+ ret = -ENXIO;
+ goto err_regs2d_res;
+ }
+
/* allocate, reserve resources for framebuffer */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (res == NULL) {
dev_err(dev, "no memory resource defined\n");
ret = -ENXIO;
- goto err_regs_map;
+ goto err_regs2d_map;
}
info->fbmem_res = request_mem_region(res->start,
- (res->end - res->start)+1,
+ resource_size(res),
pdev->name);
if (info->fbmem_res == NULL) {
dev_err(dev, "cannot claim framebuffer\n");
ret = -ENXIO;
- goto err_regs_map;
+ goto err_regs2d_map;
}
- info->fbmem = ioremap(res->start, (res->end - res->start)+1);
+ info->fbmem = ioremap(res->start, resource_size(res));
if (info->fbmem == NULL) {
dev_err(dev, "cannot remap framebuffer\n");
goto err_mem_res;
}
- info->fbmem_len = (res->end - res->start)+1;
+ info->fbmem_len = resource_size(res);
/* clear framebuffer memory - avoids garbage data on unused fb */
memset(info->fbmem, 0, info->fbmem_len);
@@ -1389,8 +1586,10 @@ static int sm501fb_start(struct sm501fb_info *info,
/* enable display controller */
sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1);
- /* setup cursors */
+ /* enable 2d controller */
+ sm501_unit_power(dev->parent, SM501_GATE_2D_ENGINE, 1);
+ /* setup cursors */
sm501_init_cursor(info->fb[HEAD_CRT], SM501_DC_CRT_HWC_ADDR);
sm501_init_cursor(info->fb[HEAD_PANEL], SM501_DC_PANEL_HWC_ADDR);
@@ -1400,6 +1599,13 @@ static int sm501fb_start(struct sm501fb_info *info,
release_resource(info->fbmem_res);
kfree(info->fbmem_res);
+ err_regs2d_map:
+ iounmap(info->regs2d);
+
+ err_regs2d_res:
+ release_resource(info->regs2d_res);
+ kfree(info->regs2d_res);
+
err_regs_map:
iounmap(info->regs);
@@ -1420,6 +1626,10 @@ static void sm501fb_stop(struct sm501fb_info *info)
release_resource(info->fbmem_res);
kfree(info->fbmem_res);
+ iounmap(info->regs2d);
+ release_resource(info->regs2d_res);
+ kfree(info->regs2d_res);
+
iounmap(info->regs);
release_resource(info->regs_res);
kfree(info->regs_res);
@@ -1486,7 +1696,8 @@ static int sm501fb_init_fb(struct fb_info *fb,
par->ops.fb_cursor = NULL;
fb->fbops = &par->ops;
- fb->flags = FBINFO_FLAG_DEFAULT |
+ fb->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST |
+ FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
/* fixed data */
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index e3e597f937a..09353e2b92f 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -1134,45 +1134,33 @@ static void integrated_lvds_enable(struct lvds_setting_information
*plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
- bool turn_on_first_powersequence = false;
- bool turn_on_second_powersequence = false;
-
DEBUG_MSG(KERN_INFO "integrated_lvds_enable, out_interface:%d\n",
plvds_chip_info->output_interface);
if (plvds_setting_info->lcd_mode == LCD_SPWG)
viafb_write_reg_mask(CRD2, VIACR, 0x00, BIT0 + BIT1);
- else
+ else
viafb_write_reg_mask(CRD2, VIACR, 0x03, BIT0 + BIT1);
- if (INTERFACE_LVDS0LVDS1 == plvds_chip_info->output_interface)
- turn_on_first_powersequence = true;
- if (INTERFACE_LVDS0 == plvds_chip_info->output_interface)
- turn_on_first_powersequence = true;
- if (INTERFACE_LVDS1 == plvds_chip_info->output_interface)
- turn_on_second_powersequence = true;
-
- if (turn_on_second_powersequence) {
- /* Use second power sequence control: */
-
- /* Use hardware control power sequence. */
- viafb_write_reg_mask(CRD3, VIACR, 0, BIT0);
-
- /* Turn on back light. */
- viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7);
- /* Turn on hardware power sequence. */
- viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1);
- }
- if (turn_on_first_powersequence) {
+ switch (plvds_chip_info->output_interface) {
+ case INTERFACE_LVDS0LVDS1:
+ case INTERFACE_LVDS0:
/* Use first power sequence control: */
-
/* Use hardware control power sequence. */
viafb_write_reg_mask(CR91, VIACR, 0, BIT0);
-
/* Turn on back light. */
viafb_write_reg_mask(CR91, VIACR, 0, BIT6 + BIT7);
-
/* Turn on hardware power sequence. */
viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3);
+ break;
+ case INTERFACE_LVDS1:
+ /* Use second power sequence control: */
+ /* Use hardware control power sequence. */
+ viafb_write_reg_mask(CRD3, VIACR, 0, BIT0);
+ /* Turn on back light. */
+ viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7);
+ /* Turn on hardware power sequence. */
+ viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1);
+ break;
}
/* Turn DFP High/Low pad on. */
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 56ec696e8af..d8df17a7d5f 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -680,7 +680,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
if (!viafb_gamma_table)
return -ENOMEM;
if (copy_from_user(viafb_gamma_table, argp,
- sizeof(viafb_gamma_table))) {
+ 256 * sizeof(u32))) {
kfree(viafb_gamma_table);
return -EFAULT;
}
@@ -694,7 +694,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
return -ENOMEM;
viafb_get_gamma_table(viafb_gamma_table);
if (copy_to_user(argp, viafb_gamma_table,
- sizeof(viafb_gamma_table))) {
+ 256 * sizeof(u32))) {
kfree(viafb_gamma_table);
return -EFAULT;
}
@@ -1797,7 +1797,7 @@ static const struct file_operations viafb_vt1636_proc_fops = {
static void viafb_init_proc(struct proc_dir_entry **viafb_entry)
{
*viafb_entry = proc_mkdir("viafb", NULL);
- if (viafb_entry) {
+ if (*viafb_entry) {
proc_create("dvp0", 0, *viafb_entry, &viafb_dvp0_proc_fops);
proc_create("dvp1", 0, *viafb_entry, &viafb_dvp1_proc_fops);
proc_create("dfph", 0, *viafb_entry, &viafb_dfph_proc_fops);
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 91a68e9eb66..603598f4dbb 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -25,7 +25,10 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
+
#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/interface/io/fbif.h>
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 3711b888d48..088f32f29a6 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -368,7 +368,7 @@ config ALIM7101_WDT
config GEODE_WDT
tristate "AMD Geode CS5535/CS5536 Watchdog"
- depends on MGEODE_LX
+ depends on CS5535_MFGPT
help
This driver enables a watchdog capability built into the
CS5535/CS5536 companion chips for the AMD Geode GX and LX
@@ -815,16 +815,6 @@ config PNX833X_WDT
timer has expired and no process has written to /dev/watchdog during
that time.
-config WDT_RM9K_GPI
- tristate "RM9000/GPI hardware watchdog"
- depends on CPU_RM9000
- help
- Watchdog implementation using the GPI hardware found on
- PMC-Sierra RM9xxx CPUs.
-
- To compile this driver as a module, choose M here: the
- module will be called rm9k_wdt.
-
config SIBYTE_WDOG
tristate "Sibyte SoC hardware watchdog"
depends on CPU_SB1
@@ -861,8 +851,10 @@ config GEF_WDT
Watchdog timer found in a number of GE Fanuc single board computers.
config MPC5200_WDT
- tristate "MPC5200 Watchdog Timer"
+ bool "MPC52xx Watchdog Timer"
depends on PPC_MPC52xx
+ help
+ Use General Purpose Timer (GPT) 0 on the MPC5200 as Watchdog.
config 8xxx_WDT
tristate "MPC8xxx Platform Watchdog Timer"
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 699199b1baa..475c6110006 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -109,7 +109,6 @@ obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
-obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
@@ -118,7 +117,6 @@ obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
# POWERPC Architecture
obj-$(CONFIG_GEF_WDT) += gef_wdt.o
-obj-$(CONFIG_MPC5200_WDT) += mpc5200_wdt.o
obj-$(CONFIG_8xxx_WDT) += mpc8xxx_wdt.o
obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
obj-$(CONFIG_PIKA_WDT) += pika_wdt.o
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
index 77afb0acc50..9c6594473d3 100644
--- a/drivers/watchdog/adx_wdt.c
+++ b/drivers/watchdog/adx_wdt.c
@@ -314,7 +314,7 @@ static int adx_wdt_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops adx_wdt_pm_ops = {
+static const struct dev_pm_ops adx_wdt_pm_ops = {
.suspend = adx_wdt_suspend,
.resume = adx_wdt_resume,
};
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 9acf0015a1e..38252ff828c 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -1,6 +1,7 @@
-/* Watchdog timer for the Geode GX/LX with the CS5535/CS5536 companion chip
+/* Watchdog timer for machines with the CS5535/CS5536 companion chip
*
* Copyright (C) 2006-2007, Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -19,7 +20,7 @@
#include <linux/reboot.h>
#include <linux/uaccess.h>
-#include <asm/geode.h>
+#include <linux/cs5535.h>
#define GEODEWDT_HZ 500
#define GEODEWDT_SCALE 6
@@ -46,25 +47,25 @@ MODULE_PARM_DESC(nowayout,
static struct platform_device *geodewdt_platform_device;
static unsigned long wdt_flags;
-static int wdt_timer;
+static struct cs5535_mfgpt_timer *wdt_timer;
static int safe_close;
static void geodewdt_ping(void)
{
/* Stop the counter */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
/* Reset the counter */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
/* Enable the counter */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
}
static void geodewdt_disable(void)
{
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
- geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
}
static int geodewdt_set_heartbeat(int val)
@@ -72,10 +73,10 @@ static int geodewdt_set_heartbeat(int val)
if (val < 1 || val > GEODEWDT_MAX_SECONDS)
return -EINVAL;
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
- geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ);
- geode_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0);
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN);
timeout = val;
return 0;
@@ -215,28 +216,25 @@ static struct miscdevice geodewdt_miscdev = {
static int __devinit geodewdt_probe(struct platform_device *dev)
{
- int ret, timer;
-
- timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
+ int ret;
- if (timer == -1) {
+ wdt_timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
+ if (!wdt_timer) {
printk(KERN_ERR "geodewdt: No timers were available\n");
return -ENODEV;
}
- wdt_timer = timer;
-
/* Set up the timer */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_SETUP,
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP,
GEODEWDT_SCALE | (3 << 8));
/* Set up comparator 2 to reset when the event fires */
- geode_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1);
+ cs5535_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1);
/* Set up the initial timeout */
- geode_mfgpt_write(wdt_timer, MFGPT_REG_CMP2,
+ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2,
timeout * GEODEWDT_HZ);
ret = misc_register(&geodewdt_miscdev);
diff --git a/drivers/watchdog/mpc5200_wdt.c b/drivers/watchdog/mpc5200_wdt.c
deleted file mode 100644
index fa9c47ce0ae..00000000000
--- a/drivers/watchdog/mpc5200_wdt.c
+++ /dev/null
@@ -1,293 +0,0 @@
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <linux/of_platform.h>
-#include <linux/uaccess.h>
-#include <asm/mpc52xx.h>
-
-
-#define GPT_MODE_WDT (1 << 15)
-#define GPT_MODE_CE (1 << 12)
-#define GPT_MODE_MS_TIMER (0x4)
-
-
-struct mpc5200_wdt {
- unsigned count; /* timer ticks before watchdog kicks in */
- long ipb_freq;
- struct miscdevice miscdev;
- struct resource mem;
- struct mpc52xx_gpt __iomem *regs;
- spinlock_t io_lock;
-};
-
-/* is_active stores wether or not the /dev/watchdog device is opened */
-static unsigned long is_active;
-
-/* misc devices don't provide a way, to get back to 'dev' or 'miscdev' from
- * file operations, which sucks. But there can be max 1 watchdog anyway, so...
- */
-static struct mpc5200_wdt *wdt_global;
-
-
-/* helper to calculate timeout in timer counts */
-static void mpc5200_wdt_set_timeout(struct mpc5200_wdt *wdt, int timeout)
-{
- /* use biggest prescaler of 64k */
- wdt->count = (wdt->ipb_freq + 0xffff) / 0x10000 * timeout;
-
- if (wdt->count > 0xffff)
- wdt->count = 0xffff;
-}
-/* return timeout in seconds (calculated from timer count) */
-static int mpc5200_wdt_get_timeout(struct mpc5200_wdt *wdt)
-{
- return wdt->count * 0x10000 / wdt->ipb_freq;
-}
-
-
-/* watchdog operations */
-static int mpc5200_wdt_start(struct mpc5200_wdt *wdt)
-{
- spin_lock(&wdt->io_lock);
- /* disable */
- out_be32(&wdt->regs->mode, 0);
- /* set timeout, with maximum prescaler */
- out_be32(&wdt->regs->count, 0x0 | wdt->count);
- /* enable watchdog */
- out_be32(&wdt->regs->mode, GPT_MODE_CE | GPT_MODE_WDT |
- GPT_MODE_MS_TIMER);
- spin_unlock(&wdt->io_lock);
-
- return 0;
-}
-static int mpc5200_wdt_ping(struct mpc5200_wdt *wdt)
-{
- spin_lock(&wdt->io_lock);
- /* writing A5 to OCPW resets the watchdog */
- out_be32(&wdt->regs->mode, 0xA5000000 |
- (0xffffff & in_be32(&wdt->regs->mode)));
- spin_unlock(&wdt->io_lock);
- return 0;
-}
-static int mpc5200_wdt_stop(struct mpc5200_wdt *wdt)
-{
- spin_lock(&wdt->io_lock);
- /* disable */
- out_be32(&wdt->regs->mode, 0);
- spin_unlock(&wdt->io_lock);
- return 0;
-}
-
-
-/* file operations */
-static ssize_t mpc5200_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- struct mpc5200_wdt *wdt = file->private_data;
- mpc5200_wdt_ping(wdt);
- return 0;
-}
-static struct watchdog_info mpc5200_wdt_info = {
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
- .identity = "mpc5200 watchdog on GPT0",
-};
-static long mpc5200_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct mpc5200_wdt *wdt = file->private_data;
- int __user *data = (int __user *)arg;
- int timeout;
- int ret = 0;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user(data, &mpc5200_wdt_info,
- sizeof(mpc5200_wdt_info));
- if (ret)
- ret = -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, data);
- break;
-
- case WDIOC_KEEPALIVE:
- mpc5200_wdt_ping(wdt);
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(timeout, data);
- if (ret)
- break;
- mpc5200_wdt_set_timeout(wdt, timeout);
- mpc5200_wdt_start(wdt);
- /* fall through and return the timeout */
-
- case WDIOC_GETTIMEOUT:
- timeout = mpc5200_wdt_get_timeout(wdt);
- ret = put_user(timeout, data);
- break;
-
- default:
- ret = -ENOTTY;
- }
- return ret;
-}
-
-static int mpc5200_wdt_open(struct inode *inode, struct file *file)
-{
- /* /dev/watchdog can only be opened once */
- if (test_and_set_bit(0, &is_active))
- return -EBUSY;
-
- /* Set and activate the watchdog */
- mpc5200_wdt_set_timeout(wdt_global, 30);
- mpc5200_wdt_start(wdt_global);
- file->private_data = wdt_global;
- return nonseekable_open(inode, file);
-}
-static int mpc5200_wdt_release(struct inode *inode, struct file *file)
-{
-#if WATCHDOG_NOWAYOUT == 0
- struct mpc5200_wdt *wdt = file->private_data;
- mpc5200_wdt_stop(wdt);
- wdt->count = 0; /* == disabled */
-#endif
- clear_bit(0, &is_active);
- return 0;
-}
-
-static const struct file_operations mpc5200_wdt_fops = {
- .owner = THIS_MODULE,
- .write = mpc5200_wdt_write,
- .unlocked_ioctl = mpc5200_wdt_ioctl,
- .open = mpc5200_wdt_open,
- .release = mpc5200_wdt_release,
-};
-
-/* module operations */
-static int mpc5200_wdt_probe(struct of_device *op,
- const struct of_device_id *match)
-{
- struct mpc5200_wdt *wdt;
- int err;
- const void *has_wdt;
- int size;
-
- has_wdt = of_get_property(op->node, "has-wdt", NULL);
- if (!has_wdt)
- has_wdt = of_get_property(op->node, "fsl,has-wdt", NULL);
- if (!has_wdt)
- return -ENODEV;
-
- wdt = kzalloc(sizeof(*wdt), GFP_KERNEL);
- if (!wdt)
- return -ENOMEM;
-
- wdt->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
-
- err = of_address_to_resource(op->node, 0, &wdt->mem);
- if (err)
- goto out_free;
- size = wdt->mem.end - wdt->mem.start + 1;
- if (!request_mem_region(wdt->mem.start, size, "mpc5200_wdt")) {
- err = -ENODEV;
- goto out_free;
- }
- wdt->regs = ioremap(wdt->mem.start, size);
- if (!wdt->regs) {
- err = -ENODEV;
- goto out_release;
- }
-
- dev_set_drvdata(&op->dev, wdt);
- spin_lock_init(&wdt->io_lock);
-
- wdt->miscdev = (struct miscdevice) {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mpc5200_wdt_fops,
- .parent = &op->dev,
- };
- wdt_global = wdt;
- err = misc_register(&wdt->miscdev);
- if (!err)
- return 0;
-
- iounmap(wdt->regs);
-out_release:
- release_mem_region(wdt->mem.start, size);
-out_free:
- kfree(wdt);
- return err;
-}
-
-static int mpc5200_wdt_remove(struct of_device *op)
-{
- struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
-
- mpc5200_wdt_stop(wdt);
- misc_deregister(&wdt->miscdev);
- iounmap(wdt->regs);
- release_mem_region(wdt->mem.start, wdt->mem.end - wdt->mem.start + 1);
- kfree(wdt);
-
- return 0;
-}
-static int mpc5200_wdt_suspend(struct of_device *op, pm_message_t state)
-{
- struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
- mpc5200_wdt_stop(wdt);
- return 0;
-}
-static int mpc5200_wdt_resume(struct of_device *op)
-{
- struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
- if (wdt->count)
- mpc5200_wdt_start(wdt);
- return 0;
-}
-static int mpc5200_wdt_shutdown(struct of_device *op)
-{
- struct mpc5200_wdt *wdt = dev_get_drvdata(&op->dev);
- mpc5200_wdt_stop(wdt);
- return 0;
-}
-
-static struct of_device_id mpc5200_wdt_match[] = {
- { .compatible = "mpc5200-gpt", },
- { .compatible = "fsl,mpc5200-gpt", },
- {},
-};
-static struct of_platform_driver mpc5200_wdt_driver = {
- .owner = THIS_MODULE,
- .name = "mpc5200-gpt-wdt",
- .match_table = mpc5200_wdt_match,
- .probe = mpc5200_wdt_probe,
- .remove = mpc5200_wdt_remove,
- .suspend = mpc5200_wdt_suspend,
- .resume = mpc5200_wdt_resume,
- .shutdown = mpc5200_wdt_shutdown,
-};
-
-
-static int __init mpc5200_wdt_init(void)
-{
- return of_register_platform_driver(&mpc5200_wdt_driver);
-}
-
-static void __exit mpc5200_wdt_exit(void)
-{
- of_unregister_platform_driver(&mpc5200_wdt_driver);
-}
-
-module_init(mpc5200_wdt_init);
-module_exit(mpc5200_wdt_exit);
-
-MODULE_AUTHOR("Domen Puncer <domen.puncer@telargo.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/rm9k_wdt.c b/drivers/watchdog/rm9k_wdt.c
deleted file mode 100644
index bb66958b943..00000000000
--- a/drivers/watchdog/rm9k_wdt.c
+++ /dev/null
@@ -1,419 +0,0 @@
-/*
- * Watchdog implementation for GPI h/w found on PMC-Sierra RM9xxx
- * chips.
- *
- * Copyright (C) 2004 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/reboot.h>
-#include <linux/notifier.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <asm/atomic.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/rm9k-ocd.h>
-
-#include <rm9k_wdt.h>
-
-
-#define CLOCK 125000000
-#define MAX_TIMEOUT_SECONDS 32
-#define CPCCR 0x0080
-#define CPGIG1SR 0x0044
-#define CPGIG1ER 0x0054
-
-
-/* Function prototypes */
-static irqreturn_t wdt_gpi_irqhdl(int, void *);
-static void wdt_gpi_start(void);
-static void wdt_gpi_stop(void);
-static void wdt_gpi_set_timeout(unsigned int);
-static int wdt_gpi_open(struct inode *, struct file *);
-static int wdt_gpi_release(struct inode *, struct file *);
-static ssize_t wdt_gpi_write(struct file *, const char __user *, size_t,
- loff_t *);
-static long wdt_gpi_ioctl(struct file *, unsigned int, unsigned long);
-static int wdt_gpi_notify(struct notifier_block *, unsigned long, void *);
-static const struct resource *wdt_gpi_get_resource(struct platform_device *,
- const char *, unsigned int);
-static int __init wdt_gpi_probe(struct platform_device *);
-static int __exit wdt_gpi_remove(struct platform_device *);
-
-
-static const char wdt_gpi_name[] = "wdt_gpi";
-static atomic_t opencnt;
-static int expect_close;
-static int locked;
-
-
-/* These are set from device resources */
-static void __iomem *wd_regs;
-static unsigned int wd_irq, wd_ctr;
-
-
-/* Module arguments */
-static int timeout = MAX_TIMEOUT_SECONDS;
-module_param(timeout, int, 0444);
-MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds");
-
-static unsigned long resetaddr = 0xbffdc200;
-module_param(resetaddr, ulong, 0444);
-MODULE_PARM_DESC(resetaddr, "Address to write to to force a reset");
-
-static unsigned long flagaddr = 0xbffdc104;
-module_param(flagaddr, ulong, 0444);
-MODULE_PARM_DESC(flagaddr, "Address to write to boot flags to");
-
-static int powercycle;
-module_param(powercycle, bool, 0444);
-MODULE_PARM_DESC(powercycle, "Cycle power if watchdog expires");
-
-static int nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0444);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be disabled once started");
-
-
-/* Kernel interfaces */
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = wdt_gpi_open,
- .release = wdt_gpi_release,
- .write = wdt_gpi_write,
- .unlocked_ioctl = wdt_gpi_ioctl,
-};
-
-static struct miscdevice miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = wdt_gpi_name,
- .fops = &fops,
-};
-
-static struct notifier_block wdt_gpi_shutdown = {
- .notifier_call = wdt_gpi_notify,
-};
-
-
-/* Interrupt handler */
-static irqreturn_t wdt_gpi_irqhdl(int irq, void *ctxt)
-{
- if (!unlikely(__raw_readl(wd_regs + 0x0008) & 0x1))
- return IRQ_NONE;
- __raw_writel(0x1, wd_regs + 0x0008);
-
-
- printk(KERN_CRIT "%s: watchdog expired - resetting system\n",
- wdt_gpi_name);
-
- *(volatile char *) flagaddr |= 0x01;
- *(volatile char *) resetaddr = powercycle ? 0x01 : 0x2;
- iob();
- while (1)
- cpu_relax();
-}
-
-
-/* Watchdog functions */
-static void wdt_gpi_start(void)
-{
- u32 reg;
-
- lock_titan_regs();
- reg = titan_readl(CPGIG1ER);
- titan_writel(reg | (0x100 << wd_ctr), CPGIG1ER);
- iob();
- unlock_titan_regs();
-}
-
-static void wdt_gpi_stop(void)
-{
- u32 reg;
-
- lock_titan_regs();
- reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
- titan_writel(reg, CPCCR);
- reg = titan_readl(CPGIG1ER);
- titan_writel(reg & ~(0x100 << wd_ctr), CPGIG1ER);
- iob();
- unlock_titan_regs();
-}
-
-static void wdt_gpi_set_timeout(unsigned int to)
-{
- u32 reg;
- const u32 wdval = (to * CLOCK) & ~0x0000000f;
-
- lock_titan_regs();
- reg = titan_readl(CPCCR) & ~(0xf << (wd_ctr * 4));
- titan_writel(reg, CPCCR);
- wmb();
- __raw_writel(wdval, wd_regs + 0x0000);
- wmb();
- titan_writel(reg | (0x2 << (wd_ctr * 4)), CPCCR);
- wmb();
- titan_writel(reg | (0x5 << (wd_ctr * 4)), CPCCR);
- iob();
- unlock_titan_regs();
-}
-
-
-/* /dev/watchdog operations */
-static int wdt_gpi_open(struct inode *inode, struct file *file)
-{
- int res;
-
- if (unlikely(atomic_dec_if_positive(&opencnt) < 0))
- return -EBUSY;
-
- expect_close = 0;
- if (locked) {
- module_put(THIS_MODULE);
- free_irq(wd_irq, &miscdev);
- locked = 0;
- }
-
- res = request_irq(wd_irq, wdt_gpi_irqhdl, IRQF_SHARED | IRQF_DISABLED,
- wdt_gpi_name, &miscdev);
- if (unlikely(res))
- return res;
-
- wdt_gpi_set_timeout(timeout);
- wdt_gpi_start();
-
- printk(KERN_INFO "%s: watchdog started, timeout = %u seconds\n",
- wdt_gpi_name, timeout);
- return nonseekable_open(inode, file);
-}
-
-static int wdt_gpi_release(struct inode *inode, struct file *file)
-{
- if (nowayout) {
- printk(KERN_INFO "%s: no way out - watchdog left running\n",
- wdt_gpi_name);
- __module_get(THIS_MODULE);
- locked = 1;
- } else {
- if (expect_close) {
- wdt_gpi_stop();
- free_irq(wd_irq, &miscdev);
- printk(KERN_INFO "%s: watchdog stopped\n",
- wdt_gpi_name);
- } else {
- printk(KERN_CRIT "%s: unexpected close() -"
- " watchdog left running\n",
- wdt_gpi_name);
- wdt_gpi_set_timeout(timeout);
- __module_get(THIS_MODULE);
- locked = 1;
- }
- }
-
- atomic_inc(&opencnt);
- return 0;
-}
-
-static ssize_t wdt_gpi_write(struct file *f, const char __user *d, size_t s,
- loff_t *o)
-{
- char val;
-
- wdt_gpi_set_timeout(timeout);
- expect_close = (s > 0) && !get_user(val, d) && (val == 'V');
- return s ? 1 : 0;
-}
-
-static long wdt_gpi_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- long res = -ENOTTY;
- const long size = _IOC_SIZE(cmd);
- int stat;
- void __user *argp = (void __user *)arg;
- static struct watchdog_info wdinfo = {
- .identity = "RM9xxx/GPI watchdog",
- .firmware_version = 0,
- .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING
- };
-
- if (unlikely(_IOC_TYPE(cmd) != WATCHDOG_IOCTL_BASE))
- return -ENOTTY;
-
- if ((_IOC_DIR(cmd) & _IOC_READ)
- && !access_ok(VERIFY_WRITE, arg, size))
- return -EFAULT;
-
- if ((_IOC_DIR(cmd) & _IOC_WRITE)
- && !access_ok(VERIFY_READ, arg, size))
- return -EFAULT;
-
- expect_close = 0;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- wdinfo.options = nowayout ?
- WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING :
- WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE;
- res = __copy_to_user(argp, &wdinfo, size) ? -EFAULT : size;
- break;
-
- case WDIOC_GETSTATUS:
- break;
-
- case WDIOC_GETBOOTSTATUS:
- stat = (*(volatile char *) flagaddr & 0x01)
- ? WDIOF_CARDRESET : 0;
- res = __copy_to_user(argp, &stat, size) ?
- -EFAULT : size;
- break;
-
- case WDIOC_SETOPTIONS:
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_gpi_set_timeout(timeout);
- res = size;
- break;
-
- case WDIOC_SETTIMEOUT:
- {
- int val;
- if (unlikely(__copy_from_user(&val, argp, size))) {
- res = -EFAULT;
- break;
- }
-
- if (val > MAX_TIMEOUT_SECONDS)
- val = MAX_TIMEOUT_SECONDS;
- timeout = val;
- wdt_gpi_set_timeout(val);
- res = size;
- printk(KERN_INFO "%s: timeout set to %u seconds\n",
- wdt_gpi_name, timeout);
- }
- break;
-
- case WDIOC_GETTIMEOUT:
- res = __copy_to_user(argp, &timeout, size) ?
- -EFAULT : size;
- break;
- }
-
- return res;
-}
-
-
-/* Shutdown notifier */
-static int wdt_gpi_notify(struct notifier_block *this, unsigned long code,
- void *unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- wdt_gpi_stop();
-
- return NOTIFY_DONE;
-}
-
-
-/* Init & exit procedures */
-static const struct resource *wdt_gpi_get_resource(struct platform_device *pdv,
- const char *name, unsigned int type)
-{
- char buf[80];
- if (snprintf(buf, sizeof(buf), "%s_0", name) >= sizeof(buf))
- return NULL;
- return platform_get_resource_byname(pdv, type, buf);
-}
-
-/* No hotplugging on the platform bus - use __devinit */
-static int __devinit wdt_gpi_probe(struct platform_device *pdv)
-{
- int res;
- const struct resource
- * const rr = wdt_gpi_get_resource(pdv, WDT_RESOURCE_REGS,
- IORESOURCE_MEM),
- * const ri = wdt_gpi_get_resource(pdv, WDT_RESOURCE_IRQ,
- IORESOURCE_IRQ),
- * const rc = wdt_gpi_get_resource(pdv, WDT_RESOURCE_COUNTER,
- 0);
-
- if (unlikely(!rr || !ri || !rc))
- return -ENXIO;
-
- wd_regs = ioremap_nocache(rr->start, rr->end + 1 - rr->start);
- if (unlikely(!wd_regs))
- return -ENOMEM;
- wd_irq = ri->start;
- wd_ctr = rc->start;
- res = misc_register(&miscdev);
- if (res)
- iounmap(wd_regs);
- else
- register_reboot_notifier(&wdt_gpi_shutdown);
- return res;
-}
-
-static int __devexit wdt_gpi_remove(struct platform_device *dev)
-{
- int res;
-
- unregister_reboot_notifier(&wdt_gpi_shutdown);
- res = misc_deregister(&miscdev);
- iounmap(wd_regs);
- wd_regs = NULL;
- return res;
-}
-
-
-/* Device driver init & exit */
-static struct platform_driver wgt_gpi_driver = {
- .driver = {
- .name = wdt_gpi_name,
- .owner = THIS_MODULE,
- },
- .probe = wdt_gpi_probe,
- .remove = __devexit_p(wdt_gpi_remove),
-};
-
-static int __init wdt_gpi_init_module(void)
-{
- atomic_set(&opencnt, 1);
- if (timeout > MAX_TIMEOUT_SECONDS)
- timeout = MAX_TIMEOUT_SECONDS;
- return platform_driver_register(&wdt_gpi_driver);
-}
-
-static void __exit wdt_gpi_cleanup_module(void)
-{
- platform_driver_unregister(&wdt_gpi_driver);
-}
-
-module_init(wdt_gpi_init_module);
-module_exit(wdt_gpi_cleanup_module);
-
-MODULE_AUTHOR("Thomas Koeller <thomas.koeller@baslerweb.com>");
-MODULE_DESCRIPTION("Basler eXcite watchdog driver for gpi devices");
-MODULE_VERSION("0.1");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index cb46556f297..8162a40d152 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -26,7 +26,7 @@
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3
@@ -48,7 +48,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
static int twl4030_wdt_write(unsigned char val)
{
- return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val,
+ return twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val,
TWL4030_WATCHDOG_CFG_REG_OFFS);
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 42043361358..f6738d8b02b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -52,6 +52,8 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+
+#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/xenbus.h>
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 0f765a92018..14e2d995e95 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -1,5 +1,6 @@
#include <linux/notifier.h>
+#include <xen/xen.h>
#include <xen/xenbus.h>
#include <asm/xen/hypervisor.h>
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 79bedba44fe..f70a4f4698c 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -48,6 +48,8 @@
#include <linux/gfp.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
+
+#include <xen/xen.h>
#include <xen/events.h>
#include <xen/evtchn.h>
#include <asm/xen/hypervisor.h>
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7d8f531fb8e..4c6c0bd636a 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
+#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/page.h>
#include <xen/grant_table.h>
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 88a60e03ccf..ae5cb05a1a1 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -14,6 +14,7 @@
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h>
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 649fcdf114b..2f7aaa99dc4 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -49,6 +49,8 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
+
+#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/page.h>
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 6559e0c752c..8924d93136f 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -13,6 +13,8 @@
#include <linux/fs.h>
#include <linux/magic.h>
+#include <xen/xen.h>
+
#include "xenfs.h"
#include <asm/xen/hypervisor.h>
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c63a3c8beb7..5e15a21dbf9 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -671,7 +671,6 @@ ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
ssize_t result;
size_t count = iov_length(iov, nr_segs);
- int ret;
_enter("{%x.%u},{%zu},%lu,",
vnode->fid.vid, vnode->fid.vnode, count, nr_segs);
@@ -691,13 +690,6 @@ ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
return result;
}
- /* return error values for O_SYNC and IS_SYNC() */
- if (IS_SYNC(&vnode->vfs_inode) || iocb->ki_filp->f_flags & O_SYNC) {
- ret = afs_fsync(iocb->ki_filp, dentry, 1);
- if (ret < 0)
- result = ret;
- }
-
_leave(" = %zd", result);
return result;
}
diff --git a/fs/aio.c b/fs/aio.c
index c30dfc00610..1cf12b3dd83 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -711,10 +711,8 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
*/
ret = retry(iocb);
- if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
- BUG_ON(!list_empty(&iocb->ki_wait.task_list));
+ if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
aio_complete(iocb, ret, 0);
- }
out:
spin_lock_irq(&ctx->ctx_lock);
@@ -866,13 +864,6 @@ static void try_queue_kicked_iocb(struct kiocb *iocb)
unsigned long flags;
int run = 0;
- /* We're supposed to be the only path putting the iocb back on the run
- * list. If we find that the iocb is *back* on a wait queue already
- * than retry has happened before we could queue the iocb. This also
- * means that the retry could have completed and freed our iocb, no
- * good. */
- BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
-
spin_lock_irqsave(&ctx->ctx_lock, flags);
/* set this inside the lock so that we can't race with aio_run_iocb()
* testing it and putting the iocb on the run list under the lock */
@@ -886,7 +877,7 @@ static void try_queue_kicked_iocb(struct kiocb *iocb)
/*
* kick_iocb:
* Called typically from a wait queue callback context
- * (aio_wake_function) to trigger a retry of the iocb.
+ * to trigger a retry of the iocb.
* The retry is usually executed by aio workqueue
* threads (See aio_kick_handler).
*/
@@ -1520,31 +1511,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
return 0;
}
-/*
- * aio_wake_function:
- * wait queue callback function for aio notification,
- * Simply triggers a retry of the operation via kick_iocb.
- *
- * This callback is specified in the wait queue entry in
- * a kiocb.
- *
- * Note:
- * This routine is executed with the wait queue lock held.
- * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests
- * the ioctx lock inside the wait queue lock. This is safe
- * because this callback isn't used for wait queues which
- * are nested inside ioctx lock (i.e. ctx->wait)
- */
-static int aio_wake_function(wait_queue_t *wait, unsigned mode,
- int sync, void *key)
-{
- struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
-
- list_del_init(&wait->task_list);
- kick_iocb(iocb);
- return 1;
-}
-
static void aio_batch_add(struct address_space *mapping,
struct hlist_head *batch_hash)
{
@@ -1642,8 +1608,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
req->ki_opcode = iocb->aio_lio_opcode;
- init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
- INIT_LIST_HEAD(&req->ki_wait.task_list);
ret = aio_setup_iocb(req);
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 2ca7a7cafdb..9f0bf13291e 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -35,14 +35,13 @@ static int anon_inodefs_get_sb(struct file_system_type *fs_type, int flags,
mnt);
}
-static int anon_inodefs_delete_dentry(struct dentry *dentry)
+/*
+ * anon_inodefs_dname() is called from d_path().
+ */
+static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen)
{
- /*
- * We faked vfs to believe the dentry was hashed when we created it.
- * Now we restore the flag so that dput() will work correctly.
- */
- dentry->d_flags |= DCACHE_UNHASHED;
- return 1;
+ return dynamic_dname(dentry, buffer, buflen, "anon_inode:%s",
+ dentry->d_name.name);
}
static struct file_system_type anon_inode_fs_type = {
@@ -51,7 +50,7 @@ static struct file_system_type anon_inode_fs_type = {
.kill_sb = kill_anon_super,
};
static const struct dentry_operations anon_inodefs_dentry_operations = {
- .d_delete = anon_inodefs_delete_dentry,
+ .d_dname = anon_inodefs_dname,
};
/*
@@ -88,7 +87,7 @@ struct file *anon_inode_getfile(const char *name,
void *priv, int flags)
{
struct qstr this;
- struct dentry *dentry;
+ struct path path;
struct file *file;
int error;
@@ -106,10 +105,11 @@ struct file *anon_inode_getfile(const char *name,
this.name = name;
this.len = strlen(name);
this.hash = 0;
- dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this);
- if (!dentry)
+ path.dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this);
+ if (!path.dentry)
goto err_module;
+ path.mnt = mntget(anon_inode_mnt);
/*
* We know the anon_inode inode count is always greater than zero,
* so we can avoid doing an igrab() and we can use an open-coded
@@ -117,27 +117,24 @@ struct file *anon_inode_getfile(const char *name,
*/
atomic_inc(&anon_inode_inode->i_count);
- dentry->d_op = &anon_inodefs_dentry_operations;
- /* Do not publish this dentry inside the global dentry hash table */
- dentry->d_flags &= ~DCACHE_UNHASHED;
- d_instantiate(dentry, anon_inode_inode);
+ path.dentry->d_op = &anon_inodefs_dentry_operations;
+ d_instantiate(path.dentry, anon_inode_inode);
error = -ENFILE;
- file = alloc_file(anon_inode_mnt, dentry,
- FMODE_READ | FMODE_WRITE, fops);
+ file = alloc_file(&path, OPEN_FMODE(flags), fops);
if (!file)
goto err_dput;
file->f_mapping = anon_inode_inode->i_mapping;
file->f_pos = 0;
- file->f_flags = O_RDWR | (flags & O_NONBLOCK);
+ file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->f_version = 0;
file->private_data = priv;
return file;
err_dput:
- dput(dentry);
+ path_put(&path);
err_module:
module_put(fops->owner);
return ERR_PTR(error);
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 8f7cdde4173..0118d67221b 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -60,6 +60,11 @@ do { \
current->pid, __func__, ##args); \
} while (0)
+struct rehash_entry {
+ struct task_struct *task;
+ struct list_head list;
+};
+
/* Unified info structure. This is pointed to by both the dentry and
inode structures. Each file in the filesystem has an instance of this
structure. It holds a reference to the dentry, so dentries are never
@@ -75,6 +80,9 @@ struct autofs_info {
struct completion expire_complete;
struct list_head active;
+ int active_count;
+ struct list_head rehash_list;
+
struct list_head expiring;
struct autofs_sb_info *sbi;
@@ -95,6 +103,8 @@ struct autofs_info {
#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */
+#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
+#define AUTOFS_INF_REHASH (1<<3) /* dentry in transit to ->lookup() */
struct autofs_wait_queue {
wait_queue_head_t queue;
@@ -161,7 +171,7 @@ static inline int autofs4_ispending(struct dentry *dentry)
{
struct autofs_info *inf = autofs4_dentry_ino(dentry);
- if (dentry->d_flags & DCACHE_AUTOFS_PENDING)
+ if (inf->flags & AUTOFS_INF_PENDING)
return 1;
if (inf->flags & AUTOFS_INF_EXPIRING)
@@ -264,5 +274,31 @@ out:
return ret;
}
+static inline void autofs4_add_expiring(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino) {
+ spin_lock(&sbi->lookup_lock);
+ if (list_empty(&ino->expiring))
+ list_add(&ino->expiring, &sbi->expiring_list);
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
+static inline void autofs4_del_expiring(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino) {
+ spin_lock(&sbi->lookup_lock);
+ if (!list_empty(&ino->expiring))
+ list_del_init(&ino->expiring);
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
void autofs4_dentry_release(struct dentry *);
extern void autofs4_kill_sb(struct super_block *);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 3da18d45348..74bc9aa6df3 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -27,7 +27,7 @@ static inline int autofs4_can_expire(struct dentry *dentry,
return 0;
/* No point expiring a pending mount */
- if (dentry->d_flags & DCACHE_AUTOFS_PENDING)
+ if (ino->flags & AUTOFS_INF_PENDING)
return 0;
if (!do_now) {
@@ -279,6 +279,7 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
root->d_mounted--;
}
ino->flags |= AUTOFS_INF_EXPIRING;
+ autofs4_add_expiring(root);
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
return root;
@@ -406,6 +407,7 @@ found:
expired, (int)expired->d_name.len, expired->d_name.name);
ino = autofs4_dentry_ino(expired);
ino->flags |= AUTOFS_INF_EXPIRING;
+ autofs4_add_expiring(expired);
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
spin_lock(&dcache_lock);
@@ -433,7 +435,7 @@ int autofs4_expire_wait(struct dentry *dentry)
DPRINTK("expire done status=%d", status);
- if (d_unhashed(dentry))
+ if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode))
return -EAGAIN;
return status;
@@ -473,6 +475,7 @@ int autofs4_expire_run(struct super_block *sb,
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
ino->flags &= ~AUTOFS_INF_EXPIRING;
+ autofs4_del_expiring(dentry);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
@@ -503,6 +506,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
}
ino->flags &= ~AUTOFS_INF_EXPIRING;
+ autofs4_del_expiring(dentry);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 69c8142da83..d0a3de24745 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -49,6 +49,8 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
ino->dentry = NULL;
ino->size = 0;
INIT_LIST_HEAD(&ino->active);
+ INIT_LIST_HEAD(&ino->rehash_list);
+ ino->active_count = 0;
INIT_LIST_HEAD(&ino->expiring);
atomic_set(&ino->count, 0);
}
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index b96a3c57359..30cc9ddf4b7 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -72,6 +72,139 @@ const struct inode_operations autofs4_dir_inode_operations = {
.rmdir = autofs4_dir_rmdir,
};
+static void autofs4_add_active(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino) {
+ spin_lock(&sbi->lookup_lock);
+ if (!ino->active_count) {
+ if (list_empty(&ino->active))
+ list_add(&ino->active, &sbi->active_list);
+ }
+ ino->active_count++;
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
+static void autofs4_del_active(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino) {
+ spin_lock(&sbi->lookup_lock);
+ ino->active_count--;
+ if (!ino->active_count) {
+ if (!list_empty(&ino->active))
+ list_del_init(&ino->active);
+ }
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
+static void autofs4_add_rehash_entry(struct autofs_info *ino,
+ struct rehash_entry *entry)
+{
+ entry->task = current;
+ INIT_LIST_HEAD(&entry->list);
+ list_add(&entry->list, &ino->rehash_list);
+ return;
+}
+
+static void autofs4_remove_rehash_entry(struct autofs_info *ino)
+{
+ struct list_head *head = &ino->rehash_list;
+ struct rehash_entry *entry;
+ list_for_each_entry(entry, head, list) {
+ if (entry->task == current) {
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ return;
+}
+
+static void autofs4_remove_rehash_entrys(struct autofs_info *ino)
+{
+ struct autofs_sb_info *sbi = ino->sbi;
+ struct rehash_entry *entry, *next;
+ struct list_head *head;
+
+ spin_lock(&sbi->fs_lock);
+ spin_lock(&sbi->lookup_lock);
+ if (!(ino->flags & AUTOFS_INF_REHASH)) {
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&sbi->fs_lock);
+ return;
+ }
+ ino->flags &= ~AUTOFS_INF_REHASH;
+ head = &ino->rehash_list;
+ list_for_each_entry_safe(entry, next, head, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&sbi->fs_lock);
+ dput(ino->dentry);
+
+ return;
+}
+
+static void autofs4_revalidate_drop(struct dentry *dentry,
+ struct rehash_entry *entry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ /*
+ * Add to the active list so we can pick this up in
+ * ->lookup(). Also add an entry to a rehash list so
+ * we know when there are no dentrys in flight so we
+ * know when we can rehash the dentry.
+ */
+ spin_lock(&sbi->lookup_lock);
+ if (list_empty(&ino->active))
+ list_add(&ino->active, &sbi->active_list);
+ autofs4_add_rehash_entry(ino, entry);
+ spin_unlock(&sbi->lookup_lock);
+ if (!(ino->flags & AUTOFS_INF_REHASH)) {
+ ino->flags |= AUTOFS_INF_REHASH;
+ dget(dentry);
+ spin_lock(&dentry->d_lock);
+ __d_drop(dentry);
+ spin_unlock(&dentry->d_lock);
+ }
+ return;
+}
+
+static void autofs4_revalidate_rehash(struct dentry *dentry)
+{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ if (ino->flags & AUTOFS_INF_REHASH) {
+ spin_lock(&sbi->lookup_lock);
+ autofs4_remove_rehash_entry(ino);
+ if (list_empty(&ino->rehash_list)) {
+ spin_unlock(&sbi->lookup_lock);
+ ino->flags &= ~AUTOFS_INF_REHASH;
+ d_rehash(dentry);
+ dput(ino->dentry);
+ } else
+ spin_unlock(&sbi->lookup_lock);
+ }
+ return;
+}
+
+static unsigned int autofs4_need_mount(unsigned int flags)
+{
+ unsigned int res = 0;
+ if (flags & (TRIGGER_FLAGS | TRIGGER_INTENTS))
+ res = 1;
+ return res;
+}
+
static int autofs4_dir_open(struct inode *inode, struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
@@ -93,7 +226,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
* it.
*/
spin_lock(&dcache_lock);
- if (!d_mountpoint(dentry) && __simple_empty(dentry)) {
+ if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
spin_unlock(&dcache_lock);
return -ENOENT;
}
@@ -103,7 +236,7 @@ out:
return dcache_dir_open(inode, file);
}
-static int try_to_fill_dentry(struct dentry *dentry, int flags)
+static int try_to_fill_dentry(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
@@ -116,55 +249,17 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
* Wait for a pending mount, triggering one if there
* isn't one already
*/
- if (dentry->d_inode == NULL) {
- DPRINTK("waiting for mount name=%.*s",
- dentry->d_name.len, dentry->d_name.name);
-
- status = autofs4_wait(sbi, dentry, NFY_MOUNT);
-
- DPRINTK("mount done status=%d", status);
-
- /* Turn this into a real negative dentry? */
- if (status == -ENOENT) {
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
- return status;
- } else if (status) {
- /* Return a negative dentry, but leave it "pending" */
- return status;
- }
- /* Trigger mount for path component or follow link */
- } else if (dentry->d_flags & DCACHE_AUTOFS_PENDING ||
- flags & (TRIGGER_FLAGS | TRIGGER_INTENTS) ||
- current->link_count) {
- DPRINTK("waiting for mount name=%.*s",
- dentry->d_name.len, dentry->d_name.name);
-
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
- status = autofs4_wait(sbi, dentry, NFY_MOUNT);
+ DPRINTK("waiting for mount name=%.*s",
+ dentry->d_name.len, dentry->d_name.name);
- DPRINTK("mount done status=%d", status);
+ status = autofs4_wait(sbi, dentry, NFY_MOUNT);
- if (status) {
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
- return status;
- }
- }
-
- /* Initialize expiry counter after successful mount */
- if (ino)
- ino->last_used = jiffies;
+ DPRINTK("mount done status=%d", status);
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
+ /* Update expiry counter */
+ ino->last_used = jiffies;
- return 0;
+ return status;
}
/* For autofs direct mounts the follow link triggers the mount */
@@ -202,27 +297,39 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
autofs4_expire_wait(dentry);
/* We trigger a mount for almost all flags */
- lookup_type = nd->flags & (TRIGGER_FLAGS | TRIGGER_INTENTS);
- if (!(lookup_type || dentry->d_flags & DCACHE_AUTOFS_PENDING))
+ lookup_type = autofs4_need_mount(nd->flags);
+ spin_lock(&sbi->fs_lock);
+ spin_lock(&dcache_lock);
+ if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
goto follow;
+ }
/*
* If the dentry contains directories then it is an autofs
* multi-mount with no root mount offset. So don't try to
* mount it again.
*/
- spin_lock(&dcache_lock);
- if (dentry->d_flags & DCACHE_AUTOFS_PENDING ||
- (!d_mountpoint(dentry) && __simple_empty(dentry))) {
+ if (ino->flags & AUTOFS_INF_PENDING ||
+ (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
+ ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+
+ status = try_to_fill_dentry(dentry);
+
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
- status = try_to_fill_dentry(dentry, 0);
if (status)
goto out_error;
goto follow;
}
spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
follow:
/*
* If there is no root mount it must be an autofs
@@ -254,18 +361,47 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
{
struct inode *dir = dentry->d_parent->d_inode;
struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
- int oz_mode = autofs4_oz_mode(sbi);
+ struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ struct rehash_entry *entry;
int flags = nd ? nd->flags : 0;
- int status = 1;
+ unsigned int mutex_aquired;
+
+ DPRINTK("name = %.*s oz_mode = %d",
+ dentry->d_name.len, dentry->d_name.name, oz_mode);
+
+ /* Daemon never causes a mount to trigger */
+ if (autofs4_oz_mode(sbi))
+ return 1;
+
+ entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ mutex_aquired = mutex_trylock(&dir->i_mutex);
- /* Pending dentry */
spin_lock(&sbi->fs_lock);
+ spin_lock(&dcache_lock);
+ /* Pending dentry */
if (autofs4_ispending(dentry)) {
- /* The daemon never causes a mount to trigger */
- spin_unlock(&sbi->fs_lock);
+ int status;
- if (oz_mode)
- return 1;
+ /*
+ * We can only unhash and send this to ->lookup() if
+ * the directory mutex is held over d_revalidate() and
+ * ->lookup(). This prevents the VFS from incorrectly
+ * seeing the dentry as non-existent.
+ */
+ ino->flags |= AUTOFS_INF_PENDING;
+ if (!mutex_aquired) {
+ autofs4_revalidate_drop(dentry, entry);
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+ return 0;
+ }
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+ mutex_unlock(&dir->i_mutex);
+ kfree(entry);
/*
* If the directory has gone away due to an expire
@@ -279,46 +415,82 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
* A zero status is success otherwise we have a
* negative error code.
*/
- status = try_to_fill_dentry(dentry, flags);
+ status = try_to_fill_dentry(dentry);
+
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+
if (status == 0)
return 1;
return status;
}
- spin_unlock(&sbi->fs_lock);
-
- /* Negative dentry.. invalidate if "old" */
- if (dentry->d_inode == NULL)
- return 0;
/* Check for a non-mountpoint directory with no contents */
- spin_lock(&dcache_lock);
if (S_ISDIR(dentry->d_inode->i_mode) &&
- !d_mountpoint(dentry) &&
- __simple_empty(dentry)) {
+ !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
DPRINTK("dentry=%p %.*s, emptydir",
dentry, dentry->d_name.len, dentry->d_name.name);
- spin_unlock(&dcache_lock);
- /* The daemon never causes a mount to trigger */
- if (oz_mode)
- return 1;
+ if (autofs4_need_mount(flags) || current->link_count) {
+ int status;
- /*
- * A zero status is success otherwise we have a
- * negative error code.
- */
- status = try_to_fill_dentry(dentry, flags);
- if (status == 0)
- return 1;
+ /*
+ * We can only unhash and send this to ->lookup() if
+ * the directory mutex is held over d_revalidate() and
+ * ->lookup(). This prevents the VFS from incorrectly
+ * seeing the dentry as non-existent.
+ */
+ ino->flags |= AUTOFS_INF_PENDING;
+ if (!mutex_aquired) {
+ autofs4_revalidate_drop(dentry, entry);
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+ return 0;
+ }
+ spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+ mutex_unlock(&dir->i_mutex);
+ kfree(entry);
- return status;
+ /*
+ * A zero status is success otherwise we have a
+ * negative error code.
+ */
+ status = try_to_fill_dentry(dentry);
+
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+
+ if (status == 0)
+ return 1;
+
+ return status;
+ }
}
spin_unlock(&dcache_lock);
+ spin_unlock(&sbi->fs_lock);
+
+ if (mutex_aquired)
+ mutex_unlock(&dir->i_mutex);
+
+ kfree(entry);
return 1;
}
+static void autofs4_free_rehash_entrys(struct autofs_info *inf)
+{
+ struct list_head *head = &inf->rehash_list;
+ struct rehash_entry *entry, *next;
+ list_for_each_entry_safe(entry, next, head, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
void autofs4_dentry_release(struct dentry *de)
{
struct autofs_info *inf;
@@ -337,6 +509,8 @@ void autofs4_dentry_release(struct dentry *de)
list_del(&inf->active);
if (!list_empty(&inf->expiring))
list_del(&inf->expiring);
+ if (!list_empty(&inf->rehash_list))
+ autofs4_free_rehash_entrys(inf);
spin_unlock(&sbi->lookup_lock);
}
@@ -359,35 +533,52 @@ static const struct dentry_operations autofs4_dentry_operations = {
.d_release = autofs4_dentry_release,
};
-static struct dentry *autofs4_lookup_active(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name)
+static struct dentry *autofs4_lookup_active(struct dentry *dentry)
{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct dentry *parent = dentry->d_parent;
+ struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
struct list_head *p, *head;
+restart:
spin_lock(&dcache_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->active_list;
list_for_each(p, head) {
struct autofs_info *ino;
- struct dentry *dentry;
+ struct dentry *active;
struct qstr *qstr;
ino = list_entry(p, struct autofs_info, active);
- dentry = ino->dentry;
+ active = ino->dentry;
- spin_lock(&dentry->d_lock);
+ spin_lock(&active->d_lock);
/* Already gone? */
- if (atomic_read(&dentry->d_count) == 0)
+ if (atomic_read(&active->d_count) == 0)
goto next;
- qstr = &dentry->d_name;
+ if (active->d_inode && IS_DEADDIR(active->d_inode)) {
+ if (!list_empty(&ino->rehash_list)) {
+ dget(active);
+ spin_unlock(&active->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&dcache_lock);
+ autofs4_remove_rehash_entrys(ino);
+ dput(active);
+ goto restart;
+ }
+ goto next;
+ }
+
+ qstr = &active->d_name;
- if (dentry->d_name.hash != hash)
+ if (active->d_name.hash != hash)
goto next;
- if (dentry->d_parent != parent)
+ if (active->d_parent != parent)
goto next;
if (qstr->len != len)
@@ -395,15 +586,13 @@ static struct dentry *autofs4_lookup_active(struct autofs_sb_info *sbi, struct d
if (memcmp(qstr->name, str, len))
goto next;
- if (d_unhashed(dentry)) {
- dget(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
- return dentry;
- }
+ dget(active);
+ spin_unlock(&active->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&dcache_lock);
+ return active;
next:
- spin_unlock(&dentry->d_lock);
+ spin_unlock(&active->d_lock);
}
spin_unlock(&sbi->lookup_lock);
spin_unlock(&dcache_lock);
@@ -411,8 +600,11 @@ next:
return NULL;
}
-static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name)
+static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
{
+ struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ struct dentry *parent = dentry->d_parent;
+ struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
@@ -423,23 +615,23 @@ static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct
head = &sbi->expiring_list;
list_for_each(p, head) {
struct autofs_info *ino;
- struct dentry *dentry;
+ struct dentry *expiring;
struct qstr *qstr;
ino = list_entry(p, struct autofs_info, expiring);
- dentry = ino->dentry;
+ expiring = ino->dentry;
- spin_lock(&dentry->d_lock);
+ spin_lock(&expiring->d_lock);
/* Bad luck, we've already been dentry_iput */
- if (!dentry->d_inode)
+ if (!expiring->d_inode)
goto next;
- qstr = &dentry->d_name;
+ qstr = &expiring->d_name;
- if (dentry->d_name.hash != hash)
+ if (expiring->d_name.hash != hash)
goto next;
- if (dentry->d_parent != parent)
+ if (expiring->d_parent != parent)
goto next;
if (qstr->len != len)
@@ -447,15 +639,13 @@ static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct
if (memcmp(qstr->name, str, len))
goto next;
- if (d_unhashed(dentry)) {
- dget(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
- return dentry;
- }
+ dget(expiring);
+ spin_unlock(&expiring->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ spin_unlock(&dcache_lock);
+ return expiring;
next:
- spin_unlock(&dentry->d_lock);
+ spin_unlock(&expiring->d_lock);
}
spin_unlock(&sbi->lookup_lock);
spin_unlock(&dcache_lock);
@@ -463,13 +653,56 @@ next:
return NULL;
}
+static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi,
+ struct dentry *dentry, int oz_mode)
+{
+ struct autofs_info *ino;
+
+ /*
+ * Mark the dentry incomplete but don't hash it. We do this
+ * to serialize our inode creation operations (symlink and
+ * mkdir) which prevents deadlock during the callback to
+ * the daemon. Subsequent user space lookups for the same
+ * dentry are placed on the wait queue while the daemon
+ * itself is allowed passage unresticted so the create
+ * operation itself can then hash the dentry. Finally,
+ * we check for the hashed dentry and return the newly
+ * hashed dentry.
+ */
+ dentry->d_op = &autofs4_root_dentry_operations;
+
+ /*
+ * And we need to ensure that the same dentry is used for
+ * all following lookup calls until it is hashed so that
+ * the dentry flags are persistent throughout the request.
+ */
+ ino = autofs4_init_ino(NULL, sbi, 0555);
+ if (!ino)
+ return ERR_PTR(-ENOMEM);
+
+ dentry->d_fsdata = ino;
+ ino->dentry = dentry;
+
+ /*
+ * Only set the mount pending flag for new dentrys not created
+ * by the daemon.
+ */
+ if (!oz_mode)
+ ino->flags |= AUTOFS_INF_PENDING;
+
+ d_instantiate(dentry, NULL);
+
+ return ino;
+}
+
/* Lookups in the root directory */
static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct autofs_sb_info *sbi;
struct autofs_info *ino;
- struct dentry *expiring, *unhashed;
+ struct dentry *expiring, *active;
int oz_mode;
+ int status = 0;
DPRINTK("name = %.*s",
dentry->d_name.len, dentry->d_name.name);
@@ -484,123 +717,100 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
- unhashed = autofs4_lookup_active(sbi, dentry->d_parent, &dentry->d_name);
- if (unhashed)
- dentry = unhashed;
- else {
- /*
- * Mark the dentry incomplete but don't hash it. We do this
- * to serialize our inode creation operations (symlink and
- * mkdir) which prevents deadlock during the callback to
- * the daemon. Subsequent user space lookups for the same
- * dentry are placed on the wait queue while the daemon
- * itself is allowed passage unresticted so the create
- * operation itself can then hash the dentry. Finally,
- * we check for the hashed dentry and return the newly
- * hashed dentry.
- */
- dentry->d_op = &autofs4_root_dentry_operations;
-
- /*
- * And we need to ensure that the same dentry is used for
- * all following lookup calls until it is hashed so that
- * the dentry flags are persistent throughout the request.
- */
- ino = autofs4_init_ino(NULL, sbi, 0555);
- if (!ino)
- return ERR_PTR(-ENOMEM);
-
- dentry->d_fsdata = ino;
- ino->dentry = dentry;
-
- spin_lock(&sbi->lookup_lock);
- list_add(&ino->active, &sbi->active_list);
- spin_unlock(&sbi->lookup_lock);
-
- d_instantiate(dentry, NULL);
+ spin_lock(&sbi->fs_lock);
+ active = autofs4_lookup_active(dentry);
+ if (active) {
+ dentry = active;
+ ino = autofs4_dentry_ino(dentry);
+ /* If this came from revalidate, rehash it */
+ autofs4_revalidate_rehash(dentry);
+ spin_unlock(&sbi->fs_lock);
+ } else {
+ spin_unlock(&sbi->fs_lock);
+ ino = init_new_dentry(sbi, dentry, oz_mode);
+ if (IS_ERR(ino))
+ return (struct dentry *) ino;
}
+ autofs4_add_active(dentry);
+
if (!oz_mode) {
+ expiring = autofs4_lookup_expiring(dentry);
mutex_unlock(&dir->i_mutex);
- expiring = autofs4_lookup_expiring(sbi,
- dentry->d_parent,
- &dentry->d_name);
if (expiring) {
/*
* If we are racing with expire the request might not
* be quite complete but the directory has been removed
* so it must have been successful, so just wait for it.
*/
- ino = autofs4_dentry_ino(expiring);
autofs4_expire_wait(expiring);
- spin_lock(&sbi->lookup_lock);
- if (!list_empty(&ino->expiring))
- list_del_init(&ino->expiring);
- spin_unlock(&sbi->lookup_lock);
dput(expiring);
}
-
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
- if (dentry->d_op && dentry->d_op->d_revalidate)
- (dentry->d_op->d_revalidate)(dentry, nd);
+ status = try_to_fill_dentry(dentry);
mutex_lock(&dir->i_mutex);
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
}
+ autofs4_del_active(dentry);
+
/*
- * If we are still pending, check if we had to handle
+ * If we had a mount fail, check if we had to handle
* a signal. If so we can force a restart..
*/
- if (dentry->d_flags & DCACHE_AUTOFS_PENDING) {
+ if (status) {
/* See if we were interrupted */
if (signal_pending(current)) {
sigset_t *sigset = &current->pending.signal;
if (sigismember (sigset, SIGKILL) ||
sigismember (sigset, SIGQUIT) ||
sigismember (sigset, SIGINT)) {
- if (unhashed)
- dput(unhashed);
+ if (active)
+ dput(active);
return ERR_PTR(-ERESTARTNOINTR);
}
}
- if (!oz_mode) {
- spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
- spin_unlock(&dentry->d_lock);
+ }
+
+ /*
+ * User space can (and has done in the past) remove and re-create
+ * this directory during the callback. This can leave us with an
+ * unhashed dentry, but a successful mount! So we need to
+ * perform another cached lookup in case the dentry now exists.
+ */
+ if (!oz_mode && !have_submounts(dentry)) {
+ struct dentry *new;
+ new = d_lookup(dentry->d_parent, &dentry->d_name);
+ if (new) {
+ if (active)
+ dput(active);
+ return new;
+ } else {
+ if (!status)
+ status = -ENOENT;
}
}
/*
- * If this dentry is unhashed, then we shouldn't honour this
- * lookup. Returning ENOENT here doesn't do the right thing
- * for all system calls, but it should be OK for the operations
- * we permit from an autofs.
+ * If we had a mount failure, return status to user space.
+ * If the mount succeeded and we used a dentry from the active queue
+ * return it.
*/
- if (!oz_mode && d_unhashed(dentry)) {
+ if (status) {
+ dentry = ERR_PTR(status);
+ if (active)
+ dput(active);
+ return dentry;
+ } else {
/*
- * A user space application can (and has done in the past)
- * remove and re-create this directory during the callback.
- * This can leave us with an unhashed dentry, but a
- * successful mount! So we need to perform another
- * cached lookup in case the dentry now exists.
+ * Valid successful mount, return active dentry or NULL
+ * for a new dentry.
*/
- struct dentry *parent = dentry->d_parent;
- struct dentry *new = d_lookup(parent, &dentry->d_name);
- if (new != NULL)
- dentry = new;
- else
- dentry = ERR_PTR(-ENOENT);
-
- if (unhashed)
- dput(unhashed);
-
- return dentry;
+ if (active)
+ return active;
}
- if (unhashed)
- return unhashed;
-
return NULL;
}
@@ -624,11 +834,6 @@ static int autofs4_dir_symlink(struct inode *dir,
if (!ino)
return -ENOMEM;
- spin_lock(&sbi->lookup_lock);
- if (!list_empty(&ino->active))
- list_del_init(&ino->active);
- spin_unlock(&sbi->lookup_lock);
-
ino->size = strlen(symname);
cp = kmalloc(ino->size + 1, GFP_KERNEL);
if (!cp) {
@@ -705,10 +910,6 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
dir->i_mtime = CURRENT_TIME;
spin_lock(&dcache_lock);
- spin_lock(&sbi->lookup_lock);
- if (list_empty(&ino->expiring))
- list_add(&ino->expiring, &sbi->expiring_list);
- spin_unlock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
@@ -734,10 +935,6 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
spin_unlock(&dcache_lock);
return -ENOTEMPTY;
}
- spin_lock(&sbi->lookup_lock);
- if (list_empty(&ino->expiring))
- list_add(&ino->expiring, &sbi->expiring_list);
- spin_unlock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
@@ -775,11 +972,6 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (!ino)
return -ENOMEM;
- spin_lock(&sbi->lookup_lock);
- if (!list_empty(&ino->active))
- list_del_init(&ino->active);
- spin_unlock(&sbi->lookup_lock);
-
inode = autofs4_get_inode(dir->i_sb, ino);
if (!inode) {
if (!dentry->d_fsdata)
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index b639dcf7c77..346b6940536 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -32,7 +32,7 @@
static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
static int load_aout_library(struct file*);
-static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
+static int aout_core_dump(struct coredump_params *cprm);
static struct linux_binfmt aout_format = {
.module = THIS_MODULE,
@@ -89,8 +89,9 @@ if (file->f_op->llseek) { \
* dumping of the process results in another error..
*/
-static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
+static int aout_core_dump(struct coredump_params *cprm)
{
+ struct file *file = cprm->file;
mm_segment_t fs;
int has_dumped = 0;
unsigned long dump_start, dump_size;
@@ -108,16 +109,16 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
current->flags |= PF_DUMPCORE;
strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
dump.u_ar0 = offsetof(struct user, regs);
- dump.signal = signr;
- aout_dump_thread(regs, &dump);
+ dump.signal = cprm->signr;
+ aout_dump_thread(cprm->regs, &dump);
/* If the size of the dump file exceeds the rlimit, then see what would happen
if we wrote the stack, but not the data area. */
- if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
+ if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
dump.u_dsize = 0;
/* Make sure we have enough room to write the stack and data areas. */
- if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
+ if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
dump.u_ssize = 0;
/* make sure we actually have a data and stack area to dump */
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d15ea1790bf..edd90c49003 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -44,8 +44,8 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
* If we don't support core dumping, then supply a NULL so we
* don't even try.
*/
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
+#ifdef CONFIG_ELF_CORE
+static int elf_core_dump(struct coredump_params *cprm);
#else
#define elf_core_dump NULL
#endif
@@ -1101,12 +1101,7 @@ out:
return error;
}
-/*
- * Note that some platforms still use traditional core dumps and not
- * the ELF core dump. Each platform can select it as appropriate.
- */
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-
+#ifdef CONFIG_ELF_CORE
/*
* ELF core dumper
*
@@ -1277,8 +1272,9 @@ static int writenote(struct memelfnote *men, struct file *file,
}
#undef DUMP_WRITE
-#define DUMP_WRITE(addr, nr) \
- if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
+#define DUMP_WRITE(addr, nr) \
+ if ((size += (nr)) > cprm->limit || \
+ !dump_write(cprm->file, (addr), (nr))) \
goto end_coredump;
static void fill_elf_header(struct elfhdr *elf, int segs,
@@ -1906,7 +1902,7 @@ static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
* and then they are actually written out. If we run out of core limit
* we just truncate.
*/
-static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
+static int elf_core_dump(struct coredump_params *cprm)
{
int has_dumped = 0;
mm_segment_t fs;
@@ -1952,7 +1948,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
* notes. This also sets up the file header.
*/
if (!fill_note_info(elf, segs + 1, /* including notes section */
- &info, signr, regs))
+ &info, cprm->signr, cprm->regs))
goto cleanup;
has_dumped = 1;
@@ -2014,14 +2010,14 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
#endif
/* write out the notes section */
- if (!write_note_info(&info, file, &foffset))
+ if (!write_note_info(&info, cprm->file, &foffset))
goto end_coredump;
- if (elf_coredump_extra_notes_write(file, &foffset))
+ if (elf_coredump_extra_notes_write(cprm->file, &foffset))
goto end_coredump;
/* Align to page */
- if (!dump_seek(file, dataoff - foffset))
+ if (!dump_seek(cprm->file, dataoff - foffset))
goto end_coredump;
for (vma = first_vma(current, gate_vma); vma != NULL;
@@ -2038,12 +2034,13 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
page = get_dump_page(addr);
if (page) {
void *kaddr = kmap(page);
- stop = ((size += PAGE_SIZE) > limit) ||
- !dump_write(file, kaddr, PAGE_SIZE);
+ stop = ((size += PAGE_SIZE) > cprm->limit) ||
+ !dump_write(cprm->file, kaddr,
+ PAGE_SIZE);
kunmap(page);
page_cache_release(page);
} else
- stop = !dump_seek(file, PAGE_SIZE);
+ stop = !dump_seek(cprm->file, PAGE_SIZE);
if (stop)
goto end_coredump;
}
@@ -2063,7 +2060,7 @@ out:
return has_dumped;
}
-#endif /* USE_ELF_CORE_DUMP */
+#endif /* CONFIG_ELF_CORE */
static int __init init_elf_binfmt(void)
{
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 38502c67987..c25256a5c5b 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -75,14 +75,14 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *,
static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *,
struct file *, struct mm_struct *);
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit);
+#ifdef CONFIG_ELF_CORE
+static int elf_fdpic_core_dump(struct coredump_params *cprm);
#endif
static struct linux_binfmt elf_fdpic_format = {
.module = THIS_MODULE,
.load_binary = load_elf_fdpic_binary,
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
.core_dump = elf_fdpic_core_dump,
#endif
.min_coredump = ELF_EXEC_PAGESIZE,
@@ -380,7 +380,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
down_write(&current->mm->mmap_sem);
current->mm->start_brk = do_mmap(NULL, 0, stack_size,
PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN,
+ MAP_PRIVATE | MAP_ANONYMOUS |
+ MAP_UNINITIALIZED | MAP_GROWSDOWN,
0);
if (IS_ERR_VALUE(current->mm->start_brk)) {
@@ -1200,7 +1201,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
*
* Modelled on fs/binfmt_elf.c core dumper
*/
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
/*
* These are the only things you should do on a core-file: use only these
@@ -1325,8 +1326,9 @@ static int writenote(struct memelfnote *men, struct file *file)
#undef DUMP_WRITE
#undef DUMP_SEEK
-#define DUMP_WRITE(addr, nr) \
- if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
+#define DUMP_WRITE(addr, nr) \
+ if ((size += (nr)) > cprm->limit || \
+ !dump_write(cprm->file, (addr), (nr))) \
goto end_coredump;
static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs)
@@ -1581,8 +1583,7 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size,
* and then they are actually written out. If we run out of core limit
* we just truncate.
*/
-static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
- struct file *file, unsigned long limit)
+static int elf_fdpic_core_dump(struct coredump_params *cprm)
{
#define NUM_NOTES 6
int has_dumped = 0;
@@ -1641,7 +1642,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
goto cleanup;
#endif
- if (signr) {
+ if (cprm->signr) {
struct core_thread *ct;
struct elf_thread_status *tmp;
@@ -1660,14 +1661,14 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
int sz;
tmp = list_entry(t, struct elf_thread_status, list);
- sz = elf_dump_thread_status(signr, tmp);
+ sz = elf_dump_thread_status(cprm->signr, tmp);
thread_status_size += sz;
}
}
/* now collect the dump for the current */
- fill_prstatus(prstatus, current, signr);
- elf_core_copy_regs(&prstatus->pr_reg, regs);
+ fill_prstatus(prstatus, current, cprm->signr);
+ elf_core_copy_regs(&prstatus->pr_reg, cprm->regs);
segs = current->mm->map_count;
#ifdef ELF_CORE_EXTRA_PHDRS
@@ -1702,7 +1703,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
/* Try to dump the FPU. */
if ((prstatus->pr_fpvalid =
- elf_core_copy_task_fpregs(current, regs, fpu)))
+ elf_core_copy_task_fpregs(current, cprm->regs, fpu)))
fill_note(notes + numnote++,
"CORE", NT_PRFPREG, sizeof(*fpu), fpu);
#ifdef ELF_CORE_COPY_XFPREGS
@@ -1773,7 +1774,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
/* write out the notes section */
for (i = 0; i < numnote; i++)
- if (!writenote(notes + i, file))
+ if (!writenote(notes + i, cprm->file))
goto end_coredump;
/* write out the thread status notes section */
@@ -1782,14 +1783,15 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs,
list_entry(t, struct elf_thread_status, list);
for (i = 0; i < tmp->num_notes; i++)
- if (!writenote(&tmp->notes[i], file))
+ if (!writenote(&tmp->notes[i], cprm->file))
goto end_coredump;
}
- if (!dump_seek(file, dataoff))
+ if (!dump_seek(cprm->file, dataoff))
goto end_coredump;
- if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0)
+ if (elf_fdpic_dump_segments(cprm->file, &size, &cprm->limit,
+ mm_flags) < 0)
goto end_coredump;
#ifdef ELF_CORE_WRITE_EXTRA_DATA
@@ -1825,4 +1827,4 @@ cleanup:
#undef NUM_NOTES
}
-#endif /* USE_ELF_CORE_DUMP */
+#endif /* CONFIG_ELF_CORE */
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index a2796651e75..d4a00ea1054 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -87,7 +87,7 @@ static int load_flat_shared_library(int id, struct lib_info *p);
#endif
static int load_flat_binary(struct linux_binprm *, struct pt_regs * regs);
-static int flat_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
+static int flat_core_dump(struct coredump_params *cprm);
static struct linux_binfmt flat_format = {
.module = THIS_MODULE,
@@ -102,10 +102,10 @@ static struct linux_binfmt flat_format = {
* Currently only a stub-function.
*/
-static int flat_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
+static int flat_core_dump(struct coredump_params *cprm)
{
printk("Process %s:%d received signr %d and should have core dumped\n",
- current->comm, current->pid, (int) signr);
+ current->comm, current->pid, (int) cprm->signr);
return(1);
}
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index eff74b9c9e7..2a9b5330cc5 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -43,7 +43,7 @@ static int load_som_library(struct file *);
* don't even try.
*/
#if 0
-static int som_core_dump(long signr, struct pt_regs *regs, unsigned long limit);
+static int som_core_dump(struct coredump_params *cprm);
#else
#define som_core_dump NULL
#endif
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 36160424427..2e9e69987a8 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -73,13 +73,13 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
return acl;
}
-static int btrfs_xattr_get_acl(struct inode *inode, int type,
- void *value, size_t size)
+static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name,
+ void *value, size_t size, int type)
{
struct posix_acl *acl;
int ret = 0;
- acl = btrfs_get_acl(inode, type);
+ acl = btrfs_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
@@ -94,7 +94,8 @@ static int btrfs_xattr_get_acl(struct inode *inode, int type,
/*
* Needs to be called with fs_mutex held
*/
-static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+static int btrfs_set_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct posix_acl *acl, int type)
{
int ret, size = 0;
const char *name;
@@ -140,8 +141,7 @@ static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
goto out;
}
- ret = __btrfs_setxattr(inode, name, value, size, 0);
-
+ ret = __btrfs_setxattr(trans, inode, name, value, size, 0);
out:
kfree(value);
@@ -151,10 +151,10 @@ out:
return ret;
}
-static int btrfs_xattr_set_acl(struct inode *inode, int type,
- const void *value, size_t size)
+static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
- int ret = 0;
+ int ret;
struct posix_acl *acl = NULL;
if (value) {
@@ -167,38 +167,13 @@ static int btrfs_xattr_set_acl(struct inode *inode, int type,
}
}
- ret = btrfs_set_acl(inode, acl, type);
+ ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);
posix_acl_release(acl);
return ret;
}
-
-static int btrfs_xattr_acl_access_get(struct inode *inode, const char *name,
- void *value, size_t size)
-{
- return btrfs_xattr_get_acl(inode, ACL_TYPE_ACCESS, value, size);
-}
-
-static int btrfs_xattr_acl_access_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return btrfs_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
-}
-
-static int btrfs_xattr_acl_default_get(struct inode *inode, const char *name,
- void *value, size_t size)
-{
- return btrfs_xattr_get_acl(inode, ACL_TYPE_DEFAULT, value, size);
-}
-
-static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
-}
-
int btrfs_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl;
@@ -221,7 +196,8 @@ int btrfs_check_acl(struct inode *inode, int mask)
* stuff has been fixed to work with that. If the locking stuff changes, we
* need to re-evaluate the acl locking stuff.
*/
-int btrfs_init_acl(struct inode *inode, struct inode *dir)
+int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
int ret = 0;
@@ -246,7 +222,8 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
mode_t mode;
if (S_ISDIR(inode->i_mode)) {
- ret = btrfs_set_acl(inode, acl, ACL_TYPE_DEFAULT);
+ ret = btrfs_set_acl(trans, inode, acl,
+ ACL_TYPE_DEFAULT);
if (ret)
goto failed;
}
@@ -261,7 +238,7 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
inode->i_mode = mode;
if (ret > 0) {
/* we need an acl */
- ret = btrfs_set_acl(inode, clone,
+ ret = btrfs_set_acl(trans, inode, clone,
ACL_TYPE_ACCESS);
}
}
@@ -294,7 +271,7 @@ int btrfs_acl_chmod(struct inode *inode)
ret = posix_acl_chmod_masq(clone, inode->i_mode);
if (!ret)
- ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS);
+ ret = btrfs_set_acl(NULL, inode, clone, ACL_TYPE_ACCESS);
posix_acl_release(clone);
@@ -303,14 +280,16 @@ int btrfs_acl_chmod(struct inode *inode)
struct xattr_handler btrfs_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
- .get = btrfs_xattr_acl_default_get,
- .set = btrfs_xattr_acl_default_set,
+ .flags = ACL_TYPE_DEFAULT,
+ .get = btrfs_xattr_acl_get,
+ .set = btrfs_xattr_acl_set,
};
struct xattr_handler btrfs_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
- .get = btrfs_xattr_acl_access_get,
- .set = btrfs_xattr_acl_access_set,
+ .flags = ACL_TYPE_ACCESS,
+ .get = btrfs_xattr_acl_get,
+ .set = btrfs_xattr_acl_set,
};
#else /* CONFIG_BTRFS_FS_POSIX_ACL */
@@ -320,7 +299,8 @@ int btrfs_acl_chmod(struct inode *inode)
return 0;
}
-int btrfs_init_acl(struct inode *inode, struct inode *dir)
+int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
{
return 0;
}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index f6783a42f01..3f1f50d9d91 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,9 +44,6 @@ struct btrfs_inode {
*/
struct extent_io_tree io_failure_tree;
- /* held while inesrting or deleting extents from files */
- struct mutex extent_mutex;
-
/* held while logging the inode in tree-log.c */
struct mutex log_mutex;
@@ -166,7 +163,7 @@ static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
static inline void btrfs_i_size_write(struct inode *inode, u64 size)
{
- inode->i_size = size;
+ i_size_write(inode, size);
BTRFS_I(inode)->disk_i_size = size;
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index ec96f3a6d53..c4bc570a396 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -37,6 +37,11 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
struct extent_buffer *src_buf);
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
+static int setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *cpu_key, u32 *data_size,
+ u32 total_data, u32 total_size, int nr);
+
struct btrfs_path *btrfs_alloc_path(void)
{
@@ -451,9 +456,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
extent_buffer_get(cow);
spin_unlock(&root->node_lock);
- btrfs_free_extent(trans, root, buf->start, buf->len,
- parent_start, root->root_key.objectid,
- level, 0);
+ btrfs_free_tree_block(trans, root, buf->start, buf->len,
+ parent_start, root->root_key.objectid, level);
free_extent_buffer(buf);
add_root_to_dirty_list(root);
} else {
@@ -468,9 +472,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_set_node_ptr_generation(parent, parent_slot,
trans->transid);
btrfs_mark_buffer_dirty(parent);
- btrfs_free_extent(trans, root, buf->start, buf->len,
- parent_start, root->root_key.objectid,
- level, 0);
+ btrfs_free_tree_block(trans, root, buf->start, buf->len,
+ parent_start, root->root_key.objectid, level);
}
if (unlock_orig)
btrfs_tree_unlock(buf);
@@ -1030,8 +1033,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(mid);
/* once for the path */
free_extent_buffer(mid);
- ret = btrfs_free_extent(trans, root, mid->start, mid->len,
- 0, root->root_key.objectid, level, 1);
+ ret = btrfs_free_tree_block(trans, root, mid->start, mid->len,
+ 0, root->root_key.objectid, level);
/* once for the root ptr */
free_extent_buffer(mid);
return ret;
@@ -1095,10 +1098,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1);
if (wret)
ret = wret;
- wret = btrfs_free_extent(trans, root, bytenr,
- blocksize, 0,
- root->root_key.objectid,
- level, 0);
+ wret = btrfs_free_tree_block(trans, root,
+ bytenr, blocksize, 0,
+ root->root_key.objectid,
+ level);
if (wret)
ret = wret;
} else {
@@ -1143,9 +1146,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
wret = del_ptr(trans, root, path, level + 1, pslot);
if (wret)
ret = wret;
- wret = btrfs_free_extent(trans, root, bytenr, blocksize,
- 0, root->root_key.objectid,
- level, 0);
+ wret = btrfs_free_tree_block(trans, root, bytenr, blocksize,
+ 0, root->root_key.objectid, level);
if (wret)
ret = wret;
} else {
@@ -2997,75 +2999,85 @@ again:
return ret;
}
-/*
- * This function splits a single item into two items,
- * giving 'new_key' to the new item and splitting the
- * old one at split_offset (from the start of the item).
- *
- * The path may be released by this operation. After
- * the split, the path is pointing to the old item. The
- * new item is going to be in the same node as the old one.
- *
- * Note, the item being split must be smaller enough to live alone on
- * a tree block with room for one extra struct btrfs_item
- *
- * This allows us to split the item in place, keeping a lock on the
- * leaf the entire time.
- */
-int btrfs_split_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_key *new_key,
- unsigned long split_offset)
+static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path, int ins_len)
{
- u32 item_size;
+ struct btrfs_key key;
struct extent_buffer *leaf;
- struct btrfs_key orig_key;
- struct btrfs_item *item;
- struct btrfs_item *new_item;
- int ret = 0;
- int slot;
- u32 nritems;
- u32 orig_offset;
- struct btrfs_disk_key disk_key;
- char *buf;
+ struct btrfs_file_extent_item *fi;
+ u64 extent_len = 0;
+ u32 item_size;
+ int ret;
leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
- if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
- goto split;
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+ BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
+ key.type != BTRFS_EXTENT_CSUM_KEY);
+
+ if (btrfs_leaf_free_space(root, leaf) >= ins_len)
+ return 0;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ if (key.type == BTRFS_EXTENT_DATA_KEY) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_len = btrfs_file_extent_num_bytes(leaf, fi);
+ }
btrfs_release_path(root, path);
- path->search_for_split = 1;
path->keep_locks = 1;
-
- ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
+ path->search_for_split = 1;
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
path->search_for_split = 0;
+ if (ret < 0)
+ goto err;
+ ret = -EAGAIN;
+ leaf = path->nodes[0];
/* if our item isn't there or got smaller, return now */
- if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
- path->slots[0])) {
- path->keep_locks = 0;
- return -EAGAIN;
+ if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
+ goto err;
+
+ if (key.type == BTRFS_EXTENT_DATA_KEY) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
+ goto err;
}
btrfs_set_path_blocking(path);
- ret = split_leaf(trans, root, &orig_key, path,
- sizeof(struct btrfs_item), 1);
- path->keep_locks = 0;
+ ret = split_leaf(trans, root, &key, path, ins_len, 1);
BUG_ON(ret);
+ path->keep_locks = 0;
btrfs_unlock_up_safe(path, 1);
+ return 0;
+err:
+ path->keep_locks = 0;
+ return ret;
+}
+
+static noinline int split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *new_key,
+ unsigned long split_offset)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_item *item;
+ struct btrfs_item *new_item;
+ int slot;
+ char *buf;
+ u32 nritems;
+ u32 item_size;
+ u32 orig_offset;
+ struct btrfs_disk_key disk_key;
+
leaf = path->nodes[0];
BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
-split:
- /*
- * make sure any changes to the path from split_leaf leave it
- * in a blocking state
- */
btrfs_set_path_blocking(path);
item = btrfs_item_nr(leaf, path->slots[0]);
@@ -3073,19 +3085,19 @@ split:
item_size = btrfs_item_size(leaf, item);
buf = kmalloc(item_size, GFP_NOFS);
+ if (!buf)
+ return -ENOMEM;
+
read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
path->slots[0]), item_size);
- slot = path->slots[0] + 1;
- leaf = path->nodes[0];
+ slot = path->slots[0] + 1;
nritems = btrfs_header_nritems(leaf);
-
if (slot != nritems) {
/* shift the items */
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
- btrfs_item_nr_offset(slot),
- (nritems - slot) * sizeof(struct btrfs_item));
-
+ btrfs_item_nr_offset(slot),
+ (nritems - slot) * sizeof(struct btrfs_item));
}
btrfs_cpu_key_to_disk(&disk_key, new_key);
@@ -3113,16 +3125,81 @@ split:
item_size - split_offset);
btrfs_mark_buffer_dirty(leaf);
- ret = 0;
- if (btrfs_leaf_free_space(root, leaf) < 0) {
- btrfs_print_leaf(root, leaf);
- BUG();
- }
+ BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
kfree(buf);
+ return 0;
+}
+
+/*
+ * This function splits a single item into two items,
+ * giving 'new_key' to the new item and splitting the
+ * old one at split_offset (from the start of the item).
+ *
+ * The path may be released by this operation. After
+ * the split, the path is pointing to the old item. The
+ * new item is going to be in the same node as the old one.
+ *
+ * Note, the item being split must be smaller enough to live alone on
+ * a tree block with room for one extra struct btrfs_item
+ *
+ * This allows us to split the item in place, keeping a lock on the
+ * leaf the entire time.
+ */
+int btrfs_split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *new_key,
+ unsigned long split_offset)
+{
+ int ret;
+ ret = setup_leaf_for_split(trans, root, path,
+ sizeof(struct btrfs_item));
+ if (ret)
+ return ret;
+
+ ret = split_item(trans, root, path, new_key, split_offset);
return ret;
}
/*
+ * This function duplicate a item, giving 'new_key' to the new item.
+ * It guarantees both items live in the same tree leaf and the new item
+ * is contiguous with the original item.
+ *
+ * This allows us to split file extent in place, keeping a lock on the
+ * leaf the entire time.
+ */
+int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *new_key)
+{
+ struct extent_buffer *leaf;
+ int ret;
+ u32 item_size;
+
+ leaf = path->nodes[0];
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ ret = setup_leaf_for_split(trans, root, path,
+ item_size + sizeof(struct btrfs_item));
+ if (ret)
+ return ret;
+
+ path->slots[0]++;
+ ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
+ item_size, item_size +
+ sizeof(struct btrfs_item), 1);
+ BUG_ON(ret);
+
+ leaf = path->nodes[0];
+ memcpy_extent_buffer(leaf,
+ btrfs_item_ptr_offset(leaf, path->slots[0]),
+ btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
+ item_size);
+ return 0;
+}
+
+/*
* make the item pointed to by the path smaller. new_size indicates
* how small to make it, and from_end tells us if we just chop bytes
* off the end of the item or if we shift the item to chop bytes off
@@ -3714,8 +3791,8 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
*/
btrfs_unlock_up_safe(path, 0);
- ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
- 0, root->root_key.objectid, 0, 0);
+ ret = btrfs_free_tree_block(trans, root, leaf->start, leaf->len,
+ 0, root->root_key.objectid, 0);
return ret;
}
/*
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 444b3e9b92a..9f806dd04c2 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -310,6 +310,9 @@ struct btrfs_header {
#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) - \
sizeof(struct btrfs_file_extent_item))
+#define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
+ sizeof(struct btrfs_item) -\
+ sizeof(struct btrfs_dir_item))
/*
@@ -859,8 +862,9 @@ struct btrfs_fs_info {
struct mutex ordered_operations_mutex;
struct rw_semaphore extent_commit_sem;
- struct rw_semaphore subvol_sem;
+ struct rw_semaphore cleanup_work_sem;
+ struct rw_semaphore subvol_sem;
struct srcu_struct subvol_srcu;
struct list_head trans_list;
@@ -868,6 +872,9 @@ struct btrfs_fs_info {
struct list_head dead_roots;
struct list_head caching_block_groups;
+ spinlock_t delayed_iput_lock;
+ struct list_head delayed_iputs;
+
atomic_t nr_async_submits;
atomic_t async_submit_draining;
atomic_t nr_async_bios;
@@ -1034,12 +1041,12 @@ struct btrfs_root {
int ref_cows;
int track_dirty;
int in_radix;
+ int clean_orphans;
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
int defrag_running;
- int defrag_level;
char *name;
int in_sysfs;
@@ -1975,6 +1982,10 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
struct btrfs_disk_key *key, int level,
u64 hint, u64 empty_size);
+int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u32 blocksize,
+ u64 parent, u64 root_objectid, int level);
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u32 blocksize,
@@ -2089,6 +2100,10 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_key *new_key,
unsigned long split_offset);
+int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *new_key);
int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, struct btrfs_path *p, int
ins_len, int cow);
@@ -2196,9 +2211,10 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_dir_item *di);
int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
- u16 name_len, const void *data, u16 data_len,
- u64 dir);
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 objectid,
+ const char *name, u16 name_len,
+ const void *data, u16 data_len);
struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 dir,
@@ -2292,7 +2308,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct inode *inode, u64 new_size,
u32 min_type);
-int btrfs_start_delalloc_inodes(struct btrfs_root *root);
+int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end);
int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc);
@@ -2332,6 +2348,8 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
void btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_cont_expand(struct inode *inode, loff_t size);
int btrfs_invalidate_inodes(struct btrfs_root *root);
+void btrfs_add_delayed_iput(struct inode *inode);
+void btrfs_run_delayed_iputs(struct btrfs_root *root);
extern const struct dentry_operations btrfs_dentry_operations;
/* ioctl.c */
@@ -2345,12 +2363,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
int skip_pinned);
int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
extern const struct file_operations btrfs_file_operations;
-int btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- u64 start, u64 end, u64 locked_end,
- u64 inline_limit, u64 *hint_block, int drop_cache);
+int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
+ u64 start, u64 end, u64 *hint_byte, int drop_cache);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
@@ -2380,7 +2395,8 @@ int btrfs_check_acl(struct inode *inode, int mask);
#else
#define btrfs_check_acl NULL
#endif
-int btrfs_init_acl(struct inode *inode, struct inode *dir);
+int btrfs_init_acl(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir);
int btrfs_acl_chmod(struct inode *inode);
/* relocation.c */
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index f3a6075519c..e9103b3baa4 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -68,12 +68,12 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
* into the tree
*/
int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *name,
- u16 name_len, const void *data, u16 data_len,
- u64 dir)
+ struct btrfs_root *root,
+ struct btrfs_path *path, u64 objectid,
+ const char *name, u16 name_len,
+ const void *data, u16 data_len)
{
int ret = 0;
- struct btrfs_path *path;
struct btrfs_dir_item *dir_item;
unsigned long name_ptr, data_ptr;
struct btrfs_key key, location;
@@ -81,15 +81,11 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
u32 data_size;
- key.objectid = dir;
+ BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root));
+
+ key.objectid = objectid;
btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
key.offset = btrfs_name_hash(name, name_len);
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- if (name_len + data_len + sizeof(struct btrfs_dir_item) >
- BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item))
- return -ENOSPC;
data_size = sizeof(*dir_item) + name_len + data_len;
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
@@ -117,7 +113,6 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, data, data_ptr, data_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 02b6afbd745..009e3bd18f2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -892,6 +892,8 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->stripesize = stripesize;
root->ref_cows = 0;
root->track_dirty = 0;
+ root->in_radix = 0;
+ root->clean_orphans = 0;
root->fs_info = fs_info;
root->objectid = objectid;
@@ -928,7 +930,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->defrag_trans_start = fs_info->generation;
init_completion(&root->kobj_unregister);
root->defrag_running = 0;
- root->defrag_level = 0;
root->root_key.objectid = objectid;
root->anon_super.s_root = NULL;
root->anon_super.s_dev = 0;
@@ -980,12 +981,12 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
while (1) {
ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
- 0, &start, &end, EXTENT_DIRTY);
+ 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
if (ret)
break;
- clear_extent_dirty(&log_root_tree->dirty_log_pages,
- start, end, GFP_NOFS);
+ clear_extent_bits(&log_root_tree->dirty_log_pages, start, end,
+ EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
}
eb = fs_info->log_root_tree->node;
@@ -1210,8 +1211,10 @@ again:
ret = radix_tree_insert(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
root);
- if (ret == 0)
+ if (ret == 0) {
root->in_radix = 1;
+ root->clean_orphans = 1;
+ }
spin_unlock(&fs_info->fs_roots_radix_lock);
radix_tree_preload_end();
if (ret) {
@@ -1225,10 +1228,6 @@ again:
ret = btrfs_find_dead_roots(fs_info->tree_root,
root->root_key.objectid);
WARN_ON(ret);
-
- if (!(fs_info->sb->s_flags & MS_RDONLY))
- btrfs_orphan_cleanup(root);
-
return root;
fail:
free_fs_root(root);
@@ -1477,6 +1476,7 @@ static int cleaner_kthread(void *arg)
if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
mutex_trylock(&root->fs_info->cleaner_mutex)) {
+ btrfs_run_delayed_iputs(root);
btrfs_clean_old_snapshots(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
}
@@ -1606,6 +1606,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_LIST_HEAD(&fs_info->trans_list);
INIT_LIST_HEAD(&fs_info->dead_roots);
+ INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->hashers);
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
INIT_LIST_HEAD(&fs_info->ordered_operations);
@@ -1614,6 +1615,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->new_trans_lock);
spin_lock_init(&fs_info->ref_cache_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
+ spin_lock_init(&fs_info->delayed_iput_lock);
init_completion(&fs_info->kobj_unregister);
fs_info->tree_root = tree_root;
@@ -1689,6 +1691,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
mutex_init(&fs_info->cleaner_mutex);
mutex_init(&fs_info->volume_mutex);
init_rwsem(&fs_info->extent_commit_sem);
+ init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
@@ -2386,8 +2389,14 @@ int btrfs_commit_super(struct btrfs_root *root)
int ret;
mutex_lock(&root->fs_info->cleaner_mutex);
+ btrfs_run_delayed_iputs(root);
btrfs_clean_old_snapshots(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
+
+ /* wait until ongoing cleanup work done */
+ down_write(&root->fs_info->cleanup_work_sem);
+ up_write(&root->fs_info->cleanup_work_sem);
+
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 94627c4cc19..56e50137d0e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -195,6 +195,14 @@ static int exclude_super_stripes(struct btrfs_root *root,
int stripe_len;
int i, nr, ret;
+ if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
+ stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
+ cache->bytes_super += stripe_len;
+ ret = add_excluded_extent(root, cache->key.objectid,
+ stripe_len);
+ BUG_ON(ret);
+ }
+
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
@@ -255,7 +263,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
if (ret)
break;
- if (extent_start == start) {
+ if (extent_start <= start) {
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
@@ -2880,9 +2888,9 @@ static noinline void flush_delalloc_async(struct btrfs_work *work)
root = async->root;
info = async->info;
- btrfs_start_delalloc_inodes(root);
+ btrfs_start_delalloc_inodes(root, 0);
wake_up(&info->flush_wait);
- btrfs_wait_ordered_extents(root, 0);
+ btrfs_wait_ordered_extents(root, 0, 0);
spin_lock(&info->lock);
info->flushing = 0;
@@ -2956,8 +2964,8 @@ static void flush_delalloc(struct btrfs_root *root,
return;
flush:
- btrfs_start_delalloc_inodes(root);
- btrfs_wait_ordered_extents(root, 0);
+ btrfs_start_delalloc_inodes(root, 0);
+ btrfs_wait_ordered_extents(root, 0, 0);
spin_lock(&info->lock);
info->flushing = 0;
@@ -3454,14 +3462,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
else
old_val -= num_bytes;
btrfs_set_super_bytes_used(&info->super_copy, old_val);
-
- /* block accounting for root item */
- old_val = btrfs_root_used(&root->root_item);
- if (alloc)
- old_val += num_bytes;
- else
- old_val -= num_bytes;
- btrfs_set_root_used(&root->root_item, old_val);
spin_unlock(&info->delalloc_lock);
while (total) {
@@ -4049,6 +4049,21 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
return ret;
}
+int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u32 blocksize,
+ u64 parent, u64 root_objectid, int level)
+{
+ u64 used;
+ spin_lock(&root->node_lock);
+ used = btrfs_root_used(&root->root_item) - blocksize;
+ btrfs_set_root_used(&root->root_item, used);
+ spin_unlock(&root->node_lock);
+
+ return btrfs_free_extent(trans, root, bytenr, blocksize,
+ parent, root_objectid, level, 0);
+}
+
static u64 stripe_align(struct btrfs_root *root, u64 val)
{
u64 mask = ((u64)root->stripesize - 1);
@@ -4578,7 +4593,6 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
{
int ret;
u64 search_start = 0;
- struct btrfs_fs_info *info = root->fs_info;
data = btrfs_get_alloc_profile(root, data);
again:
@@ -4586,17 +4600,9 @@ again:
* the only place that sets empty_size is btrfs_realloc_node, which
* is not called recursively on allocations
*/
- if (empty_size || root->ref_cows) {
- if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
- ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- 2 * 1024 * 1024,
- BTRFS_BLOCK_GROUP_METADATA |
- (info->metadata_alloc_profile &
- info->avail_metadata_alloc_bits), 0);
- }
+ if (empty_size || root->ref_cows)
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes + 2 * 1024 * 1024, data, 0);
- }
WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -4897,6 +4903,14 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
extent_op);
BUG_ON(ret);
}
+
+ if (root_objectid == root->root_key.objectid) {
+ u64 used;
+ spin_lock(&root->node_lock);
+ used = btrfs_root_used(&root->root_item) + num_bytes;
+ btrfs_set_root_used(&root->root_item, used);
+ spin_unlock(&root->node_lock);
+ }
return ret;
}
@@ -4919,8 +4933,16 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
btrfs_set_buffer_uptodate(buf);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
- set_extent_dirty(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
+ /*
+ * we allow two log transactions at a time, use different
+ * EXENT bit to differentiate dirty pages.
+ */
+ if (root->log_transid % 2 == 0)
+ set_extent_dirty(&root->dirty_log_pages, buf->start,
+ buf->start + buf->len - 1, GFP_NOFS);
+ else
+ set_extent_new(&root->dirty_log_pages, buf->start,
+ buf->start + buf->len - 1, GFP_NOFS);
} else {
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 06550affbd2..feaa13b105d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -179,18 +179,14 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
}
flags = em->flags;
if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
- if (em->start <= start &&
- (!testend || em->start + em->len >= start + len)) {
+ if (testend && em->start + em->len >= start + len) {
free_extent_map(em);
write_unlock(&em_tree->lock);
break;
}
- if (start < em->start) {
- len = em->start - start;
- } else {
+ start = em->start + em->len;
+ if (testend)
len = start + len - (em->start + em->len);
- start = em->start + em->len;
- }
free_extent_map(em);
write_unlock(&em_tree->lock);
continue;
@@ -265,319 +261,247 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
* If an extent intersects the range but is not entirely inside the range
* it is either truncated or split. Anything entirely inside the range
* is deleted from the tree.
- *
- * inline_limit is used to tell this code which offsets in the file to keep
- * if they contain inline extents.
*/
-noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- u64 start, u64 end, u64 locked_end,
- u64 inline_limit, u64 *hint_byte, int drop_cache)
+int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
+ u64 start, u64 end, u64 *hint_byte, int drop_cache)
{
- u64 extent_end = 0;
- u64 search_start = start;
- u64 ram_bytes = 0;
- u64 disk_bytenr = 0;
- u64 orig_locked_end = locked_end;
- u8 compression;
- u8 encryption;
- u16 other_encoding = 0;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
- struct btrfs_file_extent_item *extent;
+ struct btrfs_file_extent_item *fi;
struct btrfs_path *path;
struct btrfs_key key;
- struct btrfs_file_extent_item old;
- int keep;
- int slot;
- int bookend;
- int found_type = 0;
- int found_extent;
- int found_inline;
+ struct btrfs_key new_key;
+ u64 search_start = start;
+ u64 disk_bytenr = 0;
+ u64 num_bytes = 0;
+ u64 extent_offset = 0;
+ u64 extent_end = 0;
+ int del_nr = 0;
+ int del_slot = 0;
+ int extent_type;
int recow;
int ret;
- inline_limit = 0;
if (drop_cache)
btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
+
while (1) {
recow = 0;
- btrfs_release_path(root, path);
ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
search_start, -1);
if (ret < 0)
- goto out;
- if (ret > 0) {
- if (path->slots[0] == 0) {
- ret = 0;
- goto out;
- }
- path->slots[0]--;
+ break;
+ if (ret > 0 && path->slots[0] > 0 && search_start == start) {
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
+ if (key.objectid == inode->i_ino &&
+ key.type == BTRFS_EXTENT_DATA_KEY)
+ path->slots[0]--;
}
+ ret = 0;
next_slot:
- keep = 0;
- bookend = 0;
- found_extent = 0;
- found_inline = 0;
- compression = 0;
- encryption = 0;
- extent = NULL;
leaf = path->nodes[0];
- slot = path->slots[0];
- ret = 0;
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
- key.offset >= end) {
- goto out;
- }
- if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
- key.objectid != inode->i_ino) {
- goto out;
- }
- if (recow) {
- search_start = max(key.offset, start);
- continue;
- }
- if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
- extent = btrfs_item_ptr(leaf, slot,
- struct btrfs_file_extent_item);
- found_type = btrfs_file_extent_type(leaf, extent);
- compression = btrfs_file_extent_compression(leaf,
- extent);
- encryption = btrfs_file_extent_encryption(leaf,
- extent);
- other_encoding = btrfs_file_extent_other_encoding(leaf,
- extent);
- if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
- extent_end =
- btrfs_file_extent_disk_bytenr(leaf,
- extent);
- if (extent_end)
- *hint_byte = extent_end;
-
- extent_end = key.offset +
- btrfs_file_extent_num_bytes(leaf, extent);
- ram_bytes = btrfs_file_extent_ram_bytes(leaf,
- extent);
- found_extent = 1;
- } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
- found_inline = 1;
- extent_end = key.offset +
- btrfs_file_extent_inline_len(leaf, extent);
+ if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ BUG_ON(del_nr > 0);
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ break;
+ if (ret > 0) {
+ ret = 0;
+ break;
}
+ leaf = path->nodes[0];
+ recow = 1;
+ }
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.objectid > inode->i_ino ||
+ key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
+ break;
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_type = btrfs_file_extent_type(leaf, fi);
+
+ if (extent_type == BTRFS_FILE_EXTENT_REG ||
+ extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+ num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+ extent_offset = btrfs_file_extent_offset(leaf, fi);
+ extent_end = key.offset +
+ btrfs_file_extent_num_bytes(leaf, fi);
+ } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ extent_end = key.offset +
+ btrfs_file_extent_inline_len(leaf, fi);
} else {
+ WARN_ON(1);
extent_end = search_start;
}
- /* we found nothing we can drop */
- if ((!found_extent && !found_inline) ||
- search_start >= extent_end) {
- int nextret;
- u32 nritems;
- nritems = btrfs_header_nritems(leaf);
- if (slot >= nritems - 1) {
- nextret = btrfs_next_leaf(root, path);
- if (nextret)
- goto out;
- recow = 1;
- } else {
- path->slots[0]++;
- }
+ if (extent_end <= search_start) {
+ path->slots[0]++;
goto next_slot;
}
- if (end <= extent_end && start >= key.offset && found_inline)
- *hint_byte = EXTENT_MAP_INLINE;
-
- if (found_extent) {
- read_extent_buffer(leaf, &old, (unsigned long)extent,
- sizeof(old));
- }
-
- if (end < extent_end && end >= key.offset) {
- bookend = 1;
- if (found_inline && start <= key.offset)
- keep = 1;
+ search_start = max(key.offset, start);
+ if (recow) {
+ btrfs_release_path(root, path);
+ continue;
}
- if (bookend && found_extent) {
- if (locked_end < extent_end) {
- ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
- locked_end, extent_end - 1,
- GFP_NOFS);
- if (!ret) {
- btrfs_release_path(root, path);
- lock_extent(&BTRFS_I(inode)->io_tree,
- locked_end, extent_end - 1,
- GFP_NOFS);
- locked_end = extent_end;
- continue;
- }
- locked_end = extent_end;
+ /*
+ * | - range to drop - |
+ * | -------- extent -------- |
+ */
+ if (start > key.offset && end < extent_end) {
+ BUG_ON(del_nr > 0);
+ BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
+
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = start;
+ ret = btrfs_duplicate_item(trans, root, path,
+ &new_key);
+ if (ret == -EAGAIN) {
+ btrfs_release_path(root, path);
+ continue;
}
- disk_bytenr = le64_to_cpu(old.disk_bytenr);
- if (disk_bytenr != 0) {
+ if (ret < 0)
+ break;
+
+ leaf = path->nodes[0];
+ fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ start - key.offset);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+ extent_offset += start - key.offset;
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - start);
+ btrfs_mark_buffer_dirty(leaf);
+
+ if (disk_bytenr > 0) {
ret = btrfs_inc_extent_ref(trans, root,
- disk_bytenr,
- le64_to_cpu(old.disk_num_bytes), 0,
- root->root_key.objectid,
- key.objectid, key.offset -
- le64_to_cpu(old.offset));
+ disk_bytenr, num_bytes, 0,
+ root->root_key.objectid,
+ new_key.objectid,
+ start - extent_offset);
BUG_ON(ret);
+ *hint_byte = disk_bytenr;
}
+ key.offset = start;
}
+ /*
+ * | ---- range to drop ----- |
+ * | -------- extent -------- |
+ */
+ if (start <= key.offset && end < extent_end) {
+ BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
- if (found_inline) {
- u64 mask = root->sectorsize - 1;
- search_start = (extent_end + mask) & ~mask;
- } else
- search_start = extent_end;
-
- /* truncate existing extent */
- if (start > key.offset) {
- u64 new_num;
- u64 old_num;
- keep = 1;
- WARN_ON(start & (root->sectorsize - 1));
- if (found_extent) {
- new_num = start - key.offset;
- old_num = btrfs_file_extent_num_bytes(leaf,
- extent);
- *hint_byte =
- btrfs_file_extent_disk_bytenr(leaf,
- extent);
- if (btrfs_file_extent_disk_bytenr(leaf,
- extent)) {
- inode_sub_bytes(inode, old_num -
- new_num);
- }
- btrfs_set_file_extent_num_bytes(leaf,
- extent, new_num);
- btrfs_mark_buffer_dirty(leaf);
- } else if (key.offset < inline_limit &&
- (end > extent_end) &&
- (inline_limit < extent_end)) {
- u32 new_size;
- new_size = btrfs_file_extent_calc_inline_size(
- inline_limit - key.offset);
- inode_sub_bytes(inode, extent_end -
- inline_limit);
- btrfs_set_file_extent_ram_bytes(leaf, extent,
- new_size);
- if (!compression && !encryption) {
- btrfs_truncate_item(trans, root, path,
- new_size, 1);
- }
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = end;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
+
+ extent_offset += end - key.offset;
+ btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - end);
+ btrfs_mark_buffer_dirty(leaf);
+ if (disk_bytenr > 0) {
+ inode_sub_bytes(inode, end - key.offset);
+ *hint_byte = disk_bytenr;
}
+ break;
}
- /* delete the entire extent */
- if (!keep) {
- if (found_inline)
- inode_sub_bytes(inode, extent_end -
- key.offset);
- ret = btrfs_del_item(trans, root, path);
- /* TODO update progress marker and return */
- BUG_ON(ret);
- extent = NULL;
- btrfs_release_path(root, path);
- /* the extent will be freed later */
- }
- if (bookend && found_inline && start <= key.offset) {
- u32 new_size;
- new_size = btrfs_file_extent_calc_inline_size(
- extent_end - end);
- inode_sub_bytes(inode, end - key.offset);
- btrfs_set_file_extent_ram_bytes(leaf, extent,
- new_size);
- if (!compression && !encryption)
- ret = btrfs_truncate_item(trans, root, path,
- new_size, 0);
- BUG_ON(ret);
- }
- /* create bookend, splitting the extent in two */
- if (bookend && found_extent) {
- struct btrfs_key ins;
- ins.objectid = inode->i_ino;
- ins.offset = end;
- btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
- btrfs_release_path(root, path);
- path->leave_spinning = 1;
- ret = btrfs_insert_empty_item(trans, root, path, &ins,
- sizeof(*extent));
- BUG_ON(ret);
+ search_start = extent_end;
+ /*
+ * | ---- range to drop ----- |
+ * | -------- extent -------- |
+ */
+ if (start > key.offset && end >= extent_end) {
+ BUG_ON(del_nr > 0);
+ BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
- leaf = path->nodes[0];
- extent = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- write_extent_buffer(leaf, &old,
- (unsigned long)extent, sizeof(old));
-
- btrfs_set_file_extent_compression(leaf, extent,
- compression);
- btrfs_set_file_extent_encryption(leaf, extent,
- encryption);
- btrfs_set_file_extent_other_encoding(leaf, extent,
- other_encoding);
- btrfs_set_file_extent_offset(leaf, extent,
- le64_to_cpu(old.offset) + end - key.offset);
- WARN_ON(le64_to_cpu(old.num_bytes) <
- (extent_end - end));
- btrfs_set_file_extent_num_bytes(leaf, extent,
- extent_end - end);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ start - key.offset);
+ btrfs_mark_buffer_dirty(leaf);
+ if (disk_bytenr > 0) {
+ inode_sub_bytes(inode, extent_end - start);
+ *hint_byte = disk_bytenr;
+ }
+ if (end == extent_end)
+ break;
- /*
- * set the ram bytes to the size of the full extent
- * before splitting. This is a worst case flag,
- * but its the best we can do because we don't know
- * how splitting affects compression
- */
- btrfs_set_file_extent_ram_bytes(leaf, extent,
- ram_bytes);
- btrfs_set_file_extent_type(leaf, extent, found_type);
-
- btrfs_unlock_up_safe(path, 1);
- btrfs_mark_buffer_dirty(path->nodes[0]);
- btrfs_set_lock_blocking(path->nodes[0]);
-
- path->leave_spinning = 0;
- btrfs_release_path(root, path);
- if (disk_bytenr != 0)
- inode_add_bytes(inode, extent_end - end);
+ path->slots[0]++;
+ goto next_slot;
}
- if (found_extent && !keep) {
- u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
+ /*
+ * | ---- range to drop ----- |
+ * | ------ extent ------ |
+ */
+ if (start <= key.offset && end >= extent_end) {
+ if (del_nr == 0) {
+ del_slot = path->slots[0];
+ del_nr = 1;
+ } else {
+ BUG_ON(del_slot + del_nr != path->slots[0]);
+ del_nr++;
+ }
- if (old_disk_bytenr != 0) {
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
inode_sub_bytes(inode,
- le64_to_cpu(old.num_bytes));
+ extent_end - key.offset);
+ extent_end = ALIGN(extent_end,
+ root->sectorsize);
+ } else if (disk_bytenr > 0) {
ret = btrfs_free_extent(trans, root,
- old_disk_bytenr,
- le64_to_cpu(old.disk_num_bytes),
- 0, root->root_key.objectid,
+ disk_bytenr, num_bytes, 0,
+ root->root_key.objectid,
key.objectid, key.offset -
- le64_to_cpu(old.offset));
+ extent_offset);
BUG_ON(ret);
- *hint_byte = old_disk_bytenr;
+ inode_sub_bytes(inode,
+ extent_end - key.offset);
+ *hint_byte = disk_bytenr;
}
- }
- if (search_start >= end) {
- ret = 0;
- goto out;
+ if (end == extent_end)
+ break;
+
+ if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
+ path->slots[0]++;
+ goto next_slot;
+ }
+
+ ret = btrfs_del_items(trans, root, path, del_slot,
+ del_nr);
+ BUG_ON(ret);
+
+ del_nr = 0;
+ del_slot = 0;
+
+ btrfs_release_path(root, path);
+ continue;
}
+
+ BUG_ON(1);
}
-out:
- btrfs_free_path(path);
- if (locked_end > orig_locked_end) {
- unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end,
- locked_end - 1, GFP_NOFS);
+
+ if (del_nr > 0) {
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ BUG_ON(ret);
}
+
+ btrfs_free_path(path);
return ret;
}
@@ -620,23 +544,23 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
* two or three.
*/
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct inode *inode, u64 start, u64 end)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
+ struct btrfs_key new_key;
u64 bytenr;
u64 num_bytes;
u64 extent_end;
u64 orig_offset;
u64 other_start;
u64 other_end;
- u64 split = start;
- u64 locked_end = end;
- int extent_type;
- int split_end = 1;
+ u64 split;
+ int del_nr = 0;
+ int del_slot = 0;
int ret;
btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -644,12 +568,10 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
BUG_ON(!path);
again:
+ split = start;
key.objectid = inode->i_ino;
key.type = BTRFS_EXTENT_DATA_KEY;
- if (split == start)
- key.offset = split;
- else
- key.offset = split - 1;
+ key.offset = split;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0 && path->slots[0] > 0)
@@ -661,8 +583,8 @@ again:
key.type != BTRFS_EXTENT_DATA_KEY);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- extent_type = btrfs_file_extent_type(leaf, fi);
- BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
+ BUG_ON(btrfs_file_extent_type(leaf, fi) !=
+ BTRFS_FILE_EXTENT_PREALLOC);
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
BUG_ON(key.offset > start || extent_end < end);
@@ -670,150 +592,91 @@ again:
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
- if (key.offset == start)
- split = end;
-
- if (key.offset == start && extent_end == end) {
- int del_nr = 0;
- int del_slot = 0;
- other_start = end;
- other_end = 0;
- if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
- extent_end = other_end;
- del_slot = path->slots[0] + 1;
- del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
- 0, root->root_key.objectid,
- inode->i_ino, orig_offset);
- BUG_ON(ret);
- }
- other_start = 0;
- other_end = start;
- if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
- key.offset = other_start;
- del_slot = path->slots[0];
- del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
- 0, root->root_key.objectid,
- inode->i_ino, orig_offset);
- BUG_ON(ret);
- }
- split_end = 0;
- if (del_nr == 0) {
- btrfs_set_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG);
- goto done;
+ while (start > key.offset || end < extent_end) {
+ if (key.offset == start)
+ split = end;
+
+ memcpy(&new_key, &key, sizeof(new_key));
+ new_key.offset = split;
+ ret = btrfs_duplicate_item(trans, root, path, &new_key);
+ if (ret == -EAGAIN) {
+ btrfs_release_path(root, path);
+ goto again;
}
+ BUG_ON(ret < 0);
- fi = btrfs_item_ptr(leaf, del_slot - 1,
+ leaf = path->nodes[0];
+ fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
struct btrfs_file_extent_item);
- btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - key.offset);
+ split - key.offset);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+ btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - split);
btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
+ root->root_key.objectid,
+ inode->i_ino, orig_offset);
BUG_ON(ret);
- goto release;
- } else if (split == start) {
- if (locked_end < extent_end) {
- ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
- locked_end, extent_end - 1, GFP_NOFS);
- if (!ret) {
- btrfs_release_path(root, path);
- lock_extent(&BTRFS_I(inode)->io_tree,
- locked_end, extent_end - 1, GFP_NOFS);
- locked_end = extent_end;
- goto again;
- }
- locked_end = extent_end;
- }
- btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
- } else {
- BUG_ON(key.offset != start);
- key.offset = split;
- btrfs_set_file_extent_offset(leaf, fi, key.offset -
- orig_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
- btrfs_set_item_key_safe(trans, root, path, &key);
- extent_end = split;
- }
- if (extent_end == end) {
- split_end = 0;
- extent_type = BTRFS_FILE_EXTENT_REG;
- }
- if (extent_end == end && split == start) {
- other_start = end;
- other_end = 0;
- if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
- path->slots[0]++;
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- key.offset = split;
- btrfs_set_item_key_safe(trans, root, path, &key);
- btrfs_set_file_extent_offset(leaf, fi, key.offset -
- orig_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- other_end - split);
- goto done;
- }
- }
- if (extent_end == end && split == end) {
- other_start = 0;
- other_end = start;
- if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
- bytenr, &other_start, &other_end)) {
+ if (split == start) {
+ key.offset = start;
+ } else {
+ BUG_ON(start != key.offset);
path->slots[0]--;
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
- other_start);
- goto done;
+ extent_end = end;
}
}
- btrfs_mark_buffer_dirty(leaf);
-
- ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
- root->root_key.objectid,
- inode->i_ino, orig_offset);
- BUG_ON(ret);
- btrfs_release_path(root, path);
-
- key.offset = start;
- ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
- BUG_ON(ret);
-
- leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_set_file_extent_type(leaf, fi, extent_type);
- btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
- btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_offset(leaf, fi, key.offset - orig_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
- btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_compression(leaf, fi, 0);
- btrfs_set_file_extent_encryption(leaf, fi, 0);
- btrfs_set_file_extent_other_encoding(leaf, fi, 0);
-done:
- btrfs_mark_buffer_dirty(leaf);
-release:
- btrfs_release_path(root, path);
- if (split_end && split == start) {
- split = end;
- goto again;
+ other_start = end;
+ other_end = 0;
+ if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
+ bytenr, &other_start, &other_end)) {
+ extent_end = other_end;
+ del_slot = path->slots[0] + 1;
+ del_nr++;
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ 0, root->root_key.objectid,
+ inode->i_ino, orig_offset);
+ BUG_ON(ret);
}
- if (locked_end > end) {
- unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
- GFP_NOFS);
+ other_start = 0;
+ other_end = start;
+ if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
+ bytenr, &other_start, &other_end)) {
+ key.offset = other_start;
+ del_slot = path->slots[0];
+ del_nr++;
+ ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+ 0, root->root_key.objectid,
+ inode->i_ino, orig_offset);
+ BUG_ON(ret);
}
+ if (del_nr == 0) {
+ btrfs_set_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
+ }
+
+ fi = btrfs_item_ptr(leaf, del_slot - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - key.offset);
+ btrfs_mark_buffer_dirty(leaf);
+
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ BUG_ON(ret);
+out:
btrfs_free_path(path);
return 0;
}
@@ -909,7 +772,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
unsigned long last_index;
int will_write;
- will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
+ will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
(file->f_flags & O_DIRECT));
nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
@@ -1076,7 +939,7 @@ out_nolock:
if (err)
num_written = err;
- if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
+ if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
trans = btrfs_start_transaction(root, 1);
ret = btrfs_log_dentry_safe(trans, root,
file->f_dentry);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b3ad168a0bf..5440bab2363 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -88,13 +88,14 @@ static noinline int cow_file_range(struct inode *inode,
u64 start, u64 end, int *page_started,
unsigned long *nr_written, int unlock);
-static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
+static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
{
int err;
- err = btrfs_init_acl(inode, dir);
+ err = btrfs_init_acl(trans, inode, dir);
if (!err)
- err = btrfs_xattr_security_init(inode, dir);
+ err = btrfs_xattr_security_init(trans, inode, dir);
return err;
}
@@ -188,8 +189,18 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
+ /*
+ * we're an inline extent, so nobody can
+ * extend the file past i_size without locking
+ * a page we already have locked.
+ *
+ * We must do any isize and inode updates
+ * before we unlock the pages. Otherwise we
+ * could end up racing with unlink.
+ */
BTRFS_I(inode)->disk_i_size = inode->i_size;
btrfs_update_inode(trans, root, inode);
+
return 0;
fail:
btrfs_free_path(path);
@@ -230,8 +241,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
return 1;
}
- ret = btrfs_drop_extents(trans, root, inode, start,
- aligned_end, aligned_end, start,
+ ret = btrfs_drop_extents(trans, inode, start, aligned_end,
&hint_byte, 1);
BUG_ON(ret);
@@ -416,7 +426,6 @@ again:
start, end,
total_compressed, pages);
}
- btrfs_end_transaction(trans, root);
if (ret == 0) {
/*
* inline extent creation worked, we don't need
@@ -430,9 +439,11 @@ again:
EXTENT_CLEAR_DELALLOC |
EXTENT_CLEAR_ACCOUNTING |
EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
- ret = 0;
+
+ btrfs_end_transaction(trans, root);
goto free_pages_out;
}
+ btrfs_end_transaction(trans, root);
}
if (will_compress) {
@@ -543,7 +554,6 @@ static noinline int submit_compressed_extents(struct inode *inode,
if (list_empty(&async_cow->extents))
return 0;
- trans = btrfs_join_transaction(root, 1);
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
@@ -590,19 +600,15 @@ retry:
lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1,
GFP_NOFS);
- /*
- * here we're doing allocation and writeback of the
- * compressed pages
- */
- btrfs_drop_extent_cache(inode, async_extent->start,
- async_extent->start +
- async_extent->ram_size - 1, 0);
+ trans = btrfs_join_transaction(root, 1);
ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size,
async_extent->compressed_size,
0, alloc_hint,
(u64)-1, &ins, 1);
+ btrfs_end_transaction(trans, root);
+
if (ret) {
int i;
for (i = 0; i < async_extent->nr_pages; i++) {
@@ -618,6 +624,14 @@ retry:
goto retry;
}
+ /*
+ * here we're doing allocation and writeback of the
+ * compressed pages
+ */
+ btrfs_drop_extent_cache(inode, async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1, 0);
+
em = alloc_extent_map(GFP_NOFS);
em->start = async_extent->start;
em->len = async_extent->ram_size;
@@ -649,8 +663,6 @@ retry:
BTRFS_ORDERED_COMPRESSED);
BUG_ON(ret);
- btrfs_end_transaction(trans, root);
-
/*
* clear dirty, set writeback and unlock the pages.
*/
@@ -672,13 +684,11 @@ retry:
async_extent->nr_pages);
BUG_ON(ret);
- trans = btrfs_join_transaction(root, 1);
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
cond_resched();
}
- btrfs_end_transaction(trans, root);
return 0;
}
@@ -742,6 +752,7 @@ static noinline int cow_file_range(struct inode *inode,
EXTENT_CLEAR_DIRTY |
EXTENT_SET_WRITEBACK |
EXTENT_END_WRITEBACK);
+
*nr_written = *nr_written +
(end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
*page_started = 1;
@@ -1596,7 +1607,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
struct inode *inode, u64 file_pos,
u64 disk_bytenr, u64 disk_num_bytes,
u64 num_bytes, u64 ram_bytes,
- u64 locked_end,
u8 compression, u8 encryption,
u16 other_encoding, int extent_type)
{
@@ -1622,9 +1632,8 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
- ret = btrfs_drop_extents(trans, root, inode, file_pos,
- file_pos + num_bytes, locked_end,
- file_pos, &hint, 0);
+ ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
+ &hint, 0);
BUG_ON(ret);
ins.objectid = inode->i_ino;
@@ -1730,23 +1739,32 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
}
}
- trans = btrfs_join_transaction(root, 1);
-
if (!ordered_extent)
ordered_extent = btrfs_lookup_ordered_extent(inode, start);
BUG_ON(!ordered_extent);
- if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
- goto nocow;
+ if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
+ BUG_ON(!list_empty(&ordered_extent->list));
+ ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ if (!ret) {
+ trans = btrfs_join_transaction(root, 1);
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ btrfs_end_transaction(trans, root);
+ }
+ goto out;
+ }
lock_extent(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
GFP_NOFS);
+ trans = btrfs_join_transaction(root, 1);
+
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compressed = 1;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
BUG_ON(compressed);
- ret = btrfs_mark_extent_written(trans, root, inode,
+ ret = btrfs_mark_extent_written(trans, inode,
ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len);
@@ -1758,8 +1776,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
ordered_extent->disk_len,
ordered_extent->len,
ordered_extent->len,
- ordered_extent->file_offset +
- ordered_extent->len,
compressed, 0, 0,
BTRFS_FILE_EXTENT_REG);
unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
@@ -1770,22 +1786,20 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
unlock_extent(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
GFP_NOFS);
-nocow:
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
- mutex_lock(&BTRFS_I(inode)->extent_mutex);
- btrfs_ordered_update_i_size(inode, ordered_extent);
- btrfs_update_inode(trans, root, inode);
- btrfs_remove_ordered_extent(inode, ordered_extent);
- mutex_unlock(&BTRFS_I(inode)->extent_mutex);
-
+ /* this also removes the ordered extent from the tree */
+ btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ btrfs_end_transaction(trans, root);
+out:
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
btrfs_put_ordered_extent(ordered_extent);
- btrfs_end_transaction(trans, root);
return 0;
}
@@ -2008,6 +2022,54 @@ zeroit:
return -EIO;
}
+struct delayed_iput {
+ struct list_head list;
+ struct inode *inode;
+};
+
+void btrfs_add_delayed_iput(struct inode *inode)
+{
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct delayed_iput *delayed;
+
+ if (atomic_add_unless(&inode->i_count, -1, 1))
+ return;
+
+ delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
+ delayed->inode = inode;
+
+ spin_lock(&fs_info->delayed_iput_lock);
+ list_add_tail(&delayed->list, &fs_info->delayed_iputs);
+ spin_unlock(&fs_info->delayed_iput_lock);
+}
+
+void btrfs_run_delayed_iputs(struct btrfs_root *root)
+{
+ LIST_HEAD(list);
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct delayed_iput *delayed;
+ int empty;
+
+ spin_lock(&fs_info->delayed_iput_lock);
+ empty = list_empty(&fs_info->delayed_iputs);
+ spin_unlock(&fs_info->delayed_iput_lock);
+ if (empty)
+ return;
+
+ down_read(&root->fs_info->cleanup_work_sem);
+ spin_lock(&fs_info->delayed_iput_lock);
+ list_splice_init(&fs_info->delayed_iputs, &list);
+ spin_unlock(&fs_info->delayed_iput_lock);
+
+ while (!list_empty(&list)) {
+ delayed = list_entry(list.next, struct delayed_iput, list);
+ list_del(&delayed->list);
+ iput(delayed->inode);
+ kfree(delayed);
+ }
+ up_read(&root->fs_info->cleanup_work_sem);
+}
+
/*
* This creates an orphan entry for the given inode in case something goes
* wrong in the middle of an unlink/truncate.
@@ -2080,16 +2142,17 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
struct inode *inode;
int ret = 0, nr_unlink = 0, nr_truncate = 0;
- path = btrfs_alloc_path();
- if (!path)
+ if (!xchg(&root->clean_orphans, 0))
return;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
path->reada = -1;
key.objectid = BTRFS_ORPHAN_OBJECTID;
btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
key.offset = (u64)-1;
-
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
@@ -2834,37 +2897,40 @@ out:
* min_type is the minimum key type to truncate down to. If set to 0, this
* will kill all the items on this inode, including the INODE_ITEM_KEY.
*/
-noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode,
- u64 new_size, u32 min_type)
+int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode,
+ u64 new_size, u32 min_type)
{
- int ret;
struct btrfs_path *path;
- struct btrfs_key key;
- struct btrfs_key found_key;
- u32 found_type = (u8)-1;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
u64 extent_start = 0;
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
+ u64 mask = root->sectorsize - 1;
+ u32 found_type = (u8)-1;
int found_extent;
int del_item;
int pending_del_nr = 0;
int pending_del_slot = 0;
int extent_type = -1;
int encoding;
- u64 mask = root->sectorsize - 1;
+ int ret;
+ int err = 0;
+
+ BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
if (root->ref_cows)
btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
+
path = btrfs_alloc_path();
BUG_ON(!path);
path->reada = -1;
- /* FIXME, add redo link to tree so we don't leak on crash */
key.objectid = inode->i_ino;
key.offset = (u64)-1;
key.type = (u8)-1;
@@ -2872,17 +2938,17 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
search_again:
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
- goto error;
+ if (ret < 0) {
+ err = ret;
+ goto out;
+ }
if (ret > 0) {
/* there are no items in the tree for us to truncate, we're
* done
*/
- if (path->slots[0] == 0) {
- ret = 0;
- goto error;
- }
+ if (path->slots[0] == 0)
+ goto out;
path->slots[0]--;
}
@@ -2917,28 +2983,17 @@ search_again:
}
item_end--;
}
- if (item_end < new_size) {
- if (found_type == BTRFS_DIR_ITEM_KEY)
- found_type = BTRFS_INODE_ITEM_KEY;
- else if (found_type == BTRFS_EXTENT_ITEM_KEY)
- found_type = BTRFS_EXTENT_DATA_KEY;
- else if (found_type == BTRFS_EXTENT_DATA_KEY)
- found_type = BTRFS_XATTR_ITEM_KEY;
- else if (found_type == BTRFS_XATTR_ITEM_KEY)
- found_type = BTRFS_INODE_REF_KEY;
- else if (found_type)
- found_type--;
- else
+ if (found_type > min_type) {
+ del_item = 1;
+ } else {
+ if (item_end < new_size)
break;
- btrfs_set_key_type(&key, found_type);
- goto next;
+ if (found_key.offset >= new_size)
+ del_item = 1;
+ else
+ del_item = 0;
}
- if (found_key.offset >= new_size)
- del_item = 1;
- else
- del_item = 0;
found_extent = 0;
-
/* FIXME, shrink the extent if the ref count is only 1 */
if (found_type != BTRFS_EXTENT_DATA_KEY)
goto delete;
@@ -3025,42 +3080,36 @@ delete:
inode->i_ino, extent_offset);
BUG_ON(ret);
}
-next:
- if (path->slots[0] == 0) {
- if (pending_del_nr)
- goto del_pending;
- btrfs_release_path(root, path);
- if (found_type == BTRFS_INODE_ITEM_KEY)
- break;
- goto search_again;
- }
- path->slots[0]--;
- if (pending_del_nr &&
- path->slots[0] + 1 != pending_del_slot) {
- struct btrfs_key debug;
-del_pending:
- btrfs_item_key_to_cpu(path->nodes[0], &debug,
- pending_del_slot);
- ret = btrfs_del_items(trans, root, path,
- pending_del_slot,
- pending_del_nr);
- BUG_ON(ret);
- pending_del_nr = 0;
+ if (found_type == BTRFS_INODE_ITEM_KEY)
+ break;
+
+ if (path->slots[0] == 0 ||
+ path->slots[0] != pending_del_slot) {
+ if (root->ref_cows) {
+ err = -EAGAIN;
+ goto out;
+ }
+ if (pending_del_nr) {
+ ret = btrfs_del_items(trans, root, path,
+ pending_del_slot,
+ pending_del_nr);
+ BUG_ON(ret);
+ pending_del_nr = 0;
+ }
btrfs_release_path(root, path);
- if (found_type == BTRFS_INODE_ITEM_KEY)
- break;
goto search_again;
+ } else {
+ path->slots[0]--;
}
}
- ret = 0;
-error:
+out:
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
}
btrfs_free_path(path);
- return ret;
+ return err;
}
/*
@@ -3180,10 +3229,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
if (size <= hole_start)
return 0;
- err = btrfs_truncate_page(inode->i_mapping, inode->i_size);
- if (err)
- return err;
-
while (1) {
struct btrfs_ordered_extent *ordered;
btrfs_wait_ordered_range(inode, hole_start,
@@ -3196,9 +3241,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
btrfs_put_ordered_extent(ordered);
}
- trans = btrfs_start_transaction(root, 1);
- btrfs_set_trans_block_group(trans, inode);
-
cur_offset = hole_start;
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
@@ -3206,40 +3248,120 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
BUG_ON(IS_ERR(em) || !em);
last_byte = min(extent_map_end(em), block_end);
last_byte = (last_byte + mask) & ~mask;
- if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
+ if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
- err = btrfs_drop_extents(trans, root, inode,
- cur_offset,
- cur_offset + hole_size,
- block_end,
- cur_offset, &hint_byte, 1);
- if (err)
- break;
- err = btrfs_reserve_metadata_space(root, 1);
+ err = btrfs_reserve_metadata_space(root, 2);
if (err)
break;
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+
+ err = btrfs_drop_extents(trans, inode, cur_offset,
+ cur_offset + hole_size,
+ &hint_byte, 1);
+ BUG_ON(err);
+
err = btrfs_insert_file_extent(trans, root,
inode->i_ino, cur_offset, 0,
0, hole_size, 0, hole_size,
0, 0, 0);
+ BUG_ON(err);
+
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
- btrfs_unreserve_metadata_space(root, 1);
+
+ btrfs_end_transaction(trans, root);
+ btrfs_unreserve_metadata_space(root, 2);
}
free_extent_map(em);
cur_offset = last_byte;
- if (err || cur_offset >= block_end)
+ if (cur_offset >= block_end)
break;
}
- btrfs_end_transaction(trans, root);
unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
return err;
}
+static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_trans_handle *trans;
+ unsigned long nr;
+ int ret;
+
+ if (attr->ia_size == inode->i_size)
+ return 0;
+
+ if (attr->ia_size > inode->i_size) {
+ unsigned long limit;
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (attr->ia_size > inode->i_sb->s_maxbytes)
+ return -EFBIG;
+ if (limit != RLIM_INFINITY && attr->ia_size > limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+ }
+ }
+
+ ret = btrfs_reserve_metadata_space(root, 1);
+ if (ret)
+ return ret;
+
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+
+ ret = btrfs_orphan_add(trans, inode);
+ BUG_ON(ret);
+
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_unreserve_metadata_space(root, 1);
+ btrfs_btree_balance_dirty(root, nr);
+
+ if (attr->ia_size > inode->i_size) {
+ ret = btrfs_cont_expand(inode, attr->ia_size);
+ if (ret) {
+ btrfs_truncate(inode);
+ return ret;
+ }
+
+ i_size_write(inode, attr->ia_size);
+ btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ if (inode->i_nlink > 0) {
+ ret = btrfs_orphan_del(trans, inode);
+ BUG_ON(ret);
+ }
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+ return 0;
+ }
+
+ /*
+ * We're truncating a file that used to have good data down to
+ * zero. Make sure it gets into the ordered flush list so that
+ * any new writes get down to disk quickly.
+ */
+ if (attr->ia_size == 0)
+ BTRFS_I(inode)->ordered_data_close = 1;
+
+ /* we don't support swapfiles, so vmtruncate shouldn't fail */
+ ret = vmtruncate(inode, attr->ia_size);
+ BUG_ON(ret);
+
+ return 0;
+}
+
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
@@ -3250,23 +3372,14 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
- if (attr->ia_size > inode->i_size) {
- err = btrfs_cont_expand(inode, attr->ia_size);
- if (err)
- return err;
- } else if (inode->i_size > 0 &&
- attr->ia_size == 0) {
-
- /* we're truncating a file that used to have good
- * data down to zero. Make sure it gets into
- * the ordered flush list so that any new writes
- * get down to disk quickly.
- */
- BTRFS_I(inode)->ordered_data_close = 1;
- }
+ err = btrfs_setattr_size(inode, attr);
+ if (err)
+ return err;
}
+ attr->ia_valid &= ~ATTR_SIZE;
- err = inode_setattr(inode, attr);
+ if (attr->ia_valid)
+ err = inode_setattr(inode, attr);
if (!err && ((attr->ia_valid & ATTR_MODE)))
err = btrfs_acl_chmod(inode);
@@ -3287,36 +3400,43 @@ void btrfs_delete_inode(struct inode *inode)
}
btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ if (root->fs_info->log_root_recovering) {
+ BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
+ goto no_delete;
+ }
+
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0);
goto no_delete;
}
btrfs_i_size_write(inode, 0);
- trans = btrfs_join_transaction(root, 1);
- btrfs_set_trans_block_group(trans, inode);
- ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
- if (ret) {
- btrfs_orphan_del(NULL, inode);
- goto no_delete_lock;
- }
+ while (1) {
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+ ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
- btrfs_orphan_del(trans, inode);
+ if (ret != -EAGAIN)
+ break;
- nr = trans->blocks_used;
- clear_inode(inode);
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ trans = NULL;
+ btrfs_btree_balance_dirty(root, nr);
+ }
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root, nr);
- return;
+ if (ret == 0) {
+ ret = btrfs_orphan_del(trans, inode);
+ BUG_ON(ret);
+ }
-no_delete_lock:
nr = trans->blocks_used;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
no_delete:
clear_inode(inode);
+ return;
}
/*
@@ -3569,7 +3689,6 @@ static noinline void init_btrfs_i(struct inode *inode)
INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
- mutex_init(&BTRFS_I(inode)->extent_mutex);
mutex_init(&BTRFS_I(inode)->log_mutex);
}
@@ -3695,6 +3814,13 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
}
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
+ if (root != sub_root) {
+ down_read(&root->fs_info->cleanup_work_sem);
+ if (!(inode->i_sb->s_flags & MS_RDONLY))
+ btrfs_orphan_cleanup(sub_root);
+ up_read(&root->fs_info->cleanup_work_sem);
+ }
+
return inode;
}
@@ -4219,7 +4345,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -4290,7 +4416,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -4336,6 +4462,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
if (inode->i_nlink == 0)
return -ENOENT;
+ /* do not allow sys_link's with other subvols of the same device */
+ if (root->objectid != BTRFS_I(inode)->root->objectid)
+ return -EPERM;
+
/*
* 1 item for inode ref
* 2 items for dir items
@@ -4423,7 +4553,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
drop_on_err = 1;
- err = btrfs_init_inode_security(inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir);
if (err)
goto out_fail;
@@ -5074,17 +5204,20 @@ static void btrfs_truncate(struct inode *inode)
unsigned long nr;
u64 mask = root->sectorsize - 1;
- if (!S_ISREG(inode->i_mode))
- return;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ if (!S_ISREG(inode->i_mode)) {
+ WARN_ON(1);
return;
+ }
ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
if (ret)
return;
+
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
+ btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
/*
* setattr is responsible for setting the ordered_data_close flag,
@@ -5106,21 +5239,32 @@ static void btrfs_truncate(struct inode *inode)
if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
btrfs_add_ordered_operation(trans, root, inode);
- btrfs_set_trans_block_group(trans, inode);
- btrfs_i_size_write(inode, inode->i_size);
+ while (1) {
+ ret = btrfs_truncate_inode_items(trans, root, inode,
+ inode->i_size,
+ BTRFS_EXTENT_DATA_KEY);
+ if (ret != -EAGAIN)
+ break;
- ret = btrfs_orphan_add(trans, inode);
- if (ret)
- goto out;
- /* FIXME, add redo link to tree so we don't leak on crash */
- ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
- BTRFS_EXTENT_DATA_KEY);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+
+ nr = trans->blocks_used;
+ btrfs_end_transaction(trans, root);
+ btrfs_btree_balance_dirty(root, nr);
+
+ trans = btrfs_start_transaction(root, 1);
+ btrfs_set_trans_block_group(trans, inode);
+ }
- ret = btrfs_orphan_del(trans, inode);
+ if (ret == 0 && inode->i_nlink > 0) {
+ ret = btrfs_orphan_del(trans, inode);
+ BUG_ON(ret);
+ }
+
+ ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
-out:
nr = trans->blocks_used;
ret = btrfs_end_transaction_throttle(trans, root);
BUG_ON(ret);
@@ -5217,9 +5361,9 @@ void btrfs_destroy_inode(struct inode *inode)
spin_lock(&root->list_lock);
if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
- printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
- " list\n", inode->i_ino);
- dump_stack();
+ printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
+ inode->i_ino);
+ list_del_init(&BTRFS_I(inode)->i_orphan);
}
spin_unlock(&root->list_lock);
@@ -5476,7 +5620,7 @@ out_fail:
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
-int btrfs_start_delalloc_inodes(struct btrfs_root *root)
+int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
{
struct list_head *head = &root->fs_info->delalloc_inodes;
struct btrfs_inode *binode;
@@ -5495,7 +5639,10 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
spin_unlock(&root->fs_info->delalloc_lock);
if (inode) {
filemap_flush(inode->i_mapping);
- iput(inode);
+ if (delay_iput)
+ btrfs_add_delayed_iput(inode);
+ else
+ iput(inode);
}
cond_resched();
spin_lock(&root->fs_info->delalloc_lock);
@@ -5569,7 +5716,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto out_unlock;
- err = btrfs_init_inode_security(inode, dir);
+ err = btrfs_init_inode_security(trans, inode, dir);
if (err) {
drop_inode = 1;
goto out_unlock;
@@ -5641,10 +5788,10 @@ out_fail:
return err;
}
-static int prealloc_file_range(struct btrfs_trans_handle *trans,
- struct inode *inode, u64 start, u64 end,
- u64 locked_end, u64 alloc_hint, int mode)
+static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
+ u64 alloc_hint, int mode)
{
+ struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 alloc_size;
@@ -5655,43 +5802,56 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
while (num_bytes > 0) {
alloc_size = min(num_bytes, root->fs_info->max_extent);
- ret = btrfs_reserve_metadata_space(root, 1);
- if (ret)
- goto out;
+ trans = btrfs_start_transaction(root, 1);
ret = btrfs_reserve_extent(trans, root, alloc_size,
root->sectorsize, 0, alloc_hint,
(u64)-1, &ins, 1);
if (ret) {
WARN_ON(1);
- goto out;
+ goto stop_trans;
+ }
+
+ ret = btrfs_reserve_metadata_space(root, 3);
+ if (ret) {
+ btrfs_free_reserved_extent(root, ins.objectid,
+ ins.offset);
+ goto stop_trans;
}
+
ret = insert_reserved_file_extent(trans, inode,
cur_offset, ins.objectid,
ins.offset, ins.offset,
- ins.offset, locked_end,
- 0, 0, 0,
+ ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
BUG_ON(ret);
btrfs_drop_extent_cache(inode, cur_offset,
cur_offset + ins.offset -1, 0);
+
num_bytes -= ins.offset;
cur_offset += ins.offset;
alloc_hint = ins.objectid + ins.offset;
- btrfs_unreserve_metadata_space(root, 1);
- }
-out:
- if (cur_offset > start) {
+
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- cur_offset > i_size_read(inode))
- btrfs_i_size_write(inode, cur_offset);
+ cur_offset > inode->i_size) {
+ i_size_write(inode, cur_offset);
+ btrfs_ordered_update_i_size(inode, cur_offset, NULL);
+ }
+
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
+
+ btrfs_end_transaction(trans, root);
+ btrfs_unreserve_metadata_space(root, 3);
}
+ return ret;
+stop_trans:
+ btrfs_end_transaction(trans, root);
return ret;
+
}
static long btrfs_fallocate(struct inode *inode, int mode,
@@ -5705,8 +5865,6 @@ static long btrfs_fallocate(struct inode *inode, int mode,
u64 locked_end;
u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
struct extent_map *em;
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root;
int ret;
alloc_start = offset & ~mask;
@@ -5725,9 +5883,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
goto out;
}
- root = BTRFS_I(inode)->root;
-
- ret = btrfs_check_data_free_space(root, inode,
+ ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode,
alloc_end - alloc_start);
if (ret)
goto out;
@@ -5736,12 +5892,6 @@ static long btrfs_fallocate(struct inode *inode, int mode,
while (1) {
struct btrfs_ordered_extent *ordered;
- trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
- if (!trans) {
- ret = -EIO;
- goto out_free;
- }
-
/* the extent lock is ordered inside the running
* transaction
*/
@@ -5755,8 +5905,6 @@ static long btrfs_fallocate(struct inode *inode, int mode,
btrfs_put_ordered_extent(ordered);
unlock_extent(&BTRFS_I(inode)->io_tree,
alloc_start, locked_end, GFP_NOFS);
- btrfs_end_transaction(trans, BTRFS_I(inode)->root);
-
/*
* we can't wait on the range with the transaction
* running or with the extent lock held
@@ -5777,10 +5925,12 @@ static long btrfs_fallocate(struct inode *inode, int mode,
BUG_ON(IS_ERR(em) || !em);
last_byte = min(extent_map_end(em), alloc_end);
last_byte = (last_byte + mask) & ~mask;
- if (em->block_start == EXTENT_MAP_HOLE) {
- ret = prealloc_file_range(trans, inode, cur_offset,
- last_byte, locked_end + 1,
- alloc_hint, mode);
+ if (em->block_start == EXTENT_MAP_HOLE ||
+ (cur_offset >= inode->i_size &&
+ !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+ ret = prealloc_file_range(inode,
+ cur_offset, last_byte,
+ alloc_hint, mode);
if (ret < 0) {
free_extent_map(em);
break;
@@ -5799,9 +5949,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
GFP_NOFS);
- btrfs_end_transaction(trans, BTRFS_I(inode)->root);
-out_free:
- btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
+ btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
+ alloc_end - alloc_start);
out:
mutex_unlock(&inode->i_mutex);
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index cdbb054102b..645a17927a8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -237,7 +237,6 @@ static noinline int create_subvol(struct btrfs_root *root,
u64 objectid;
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
- unsigned long nr = 1;
/*
* 1 - inode item
@@ -290,7 +289,7 @@ static noinline int create_subvol(struct btrfs_root *root,
btrfs_set_root_generation(&root_item, trans->transid);
btrfs_set_root_level(&root_item, 0);
btrfs_set_root_refs(&root_item, 1);
- btrfs_set_root_used(&root_item, 0);
+ btrfs_set_root_used(&root_item, leaf->len);
btrfs_set_root_last_snapshot(&root_item, 0);
memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
@@ -342,24 +341,21 @@ static noinline int create_subvol(struct btrfs_root *root,
d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
fail:
- nr = trans->blocks_used;
err = btrfs_commit_transaction(trans, root);
if (err && !ret)
ret = err;
btrfs_unreserve_metadata_space(root, 6);
- btrfs_btree_balance_dirty(root, nr);
return ret;
}
static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
char *name, int namelen)
{
+ struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
struct btrfs_trans_handle *trans;
- int ret = 0;
- int err;
- unsigned long nr = 0;
+ int ret;
if (!root->ref_cows)
return -EINVAL;
@@ -372,20 +368,20 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
*/
ret = btrfs_reserve_metadata_space(root, 6);
if (ret)
- goto fail_unlock;
+ goto fail;
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
if (!pending_snapshot) {
ret = -ENOMEM;
btrfs_unreserve_metadata_space(root, 6);
- goto fail_unlock;
+ goto fail;
}
pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
if (!pending_snapshot->name) {
ret = -ENOMEM;
kfree(pending_snapshot);
btrfs_unreserve_metadata_space(root, 6);
- goto fail_unlock;
+ goto fail;
}
memcpy(pending_snapshot->name, name, namelen);
pending_snapshot->name[namelen] = '\0';
@@ -395,10 +391,19 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
pending_snapshot->root = root;
list_add(&pending_snapshot->list,
&trans->transaction->pending_snapshots);
- err = btrfs_commit_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
+ BUG_ON(ret);
+ btrfs_unreserve_metadata_space(root, 6);
-fail_unlock:
- btrfs_btree_balance_dirty(root, nr);
+ inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto fail;
+ }
+ BUG_ON(!inode);
+ d_instantiate(dentry, inode);
+ ret = 0;
+fail:
return ret;
}
@@ -1027,8 +1032,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
BUG_ON(!trans);
/* punch hole in destination first */
- btrfs_drop_extents(trans, root, inode, off, off + len,
- off + len, 0, &hint_byte, 1);
+ btrfs_drop_extents(trans, inode, off, off + len, &hint_byte, 1);
/* clone data */
key.objectid = src->i_ino;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 5799bc46a30..b10a49d4bc6 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -291,16 +291,16 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
/*
* remove an ordered extent from the tree. No references are dropped
- * but, anyone waiting on this extent is woken up.
+ * and you must wake_up entry->wait. You must hold the tree mutex
+ * while you call this function.
*/
-int btrfs_remove_ordered_extent(struct inode *inode,
+static int __btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
tree = &BTRFS_I(inode)->ordered_tree;
- mutex_lock(&tree->mutex);
node = &entry->rb_node;
rb_erase(node, &tree->tree);
tree->last = NULL;
@@ -326,16 +326,34 @@ int btrfs_remove_ordered_extent(struct inode *inode,
}
spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
+ return 0;
+}
+
+/*
+ * remove an ordered extent from the tree. No references are dropped
+ * but any waiters are woken.
+ */
+int btrfs_remove_ordered_extent(struct inode *inode,
+ struct btrfs_ordered_extent *entry)
+{
+ struct btrfs_ordered_inode_tree *tree;
+ int ret;
+
+ tree = &BTRFS_I(inode)->ordered_tree;
+ mutex_lock(&tree->mutex);
+ ret = __btrfs_remove_ordered_extent(inode, entry);
mutex_unlock(&tree->mutex);
wake_up(&entry->wait);
- return 0;
+
+ return ret;
}
/*
* wait for all the ordered extents in a root. This is done when balancing
* space between drives.
*/
-int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only)
+int btrfs_wait_ordered_extents(struct btrfs_root *root,
+ int nocow_only, int delay_iput)
{
struct list_head splice;
struct list_head *cur;
@@ -372,7 +390,10 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only)
if (inode) {
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
- iput(inode);
+ if (delay_iput)
+ btrfs_add_delayed_iput(inode);
+ else
+ iput(inode);
} else {
btrfs_put_ordered_extent(ordered);
}
@@ -430,7 +451,7 @@ again:
btrfs_wait_ordered_range(inode, 0, (u64)-1);
else
filemap_flush(inode->i_mapping);
- iput(inode);
+ btrfs_add_delayed_iput(inode);
}
cond_resched();
@@ -589,7 +610,7 @@ out:
* After an extent is done, call this to conditionally update the on disk
* i_size. i_size is updated to cover any fully written part of the file.
*/
-int btrfs_ordered_update_i_size(struct inode *inode,
+int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered)
{
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
@@ -597,18 +618,30 @@ int btrfs_ordered_update_i_size(struct inode *inode,
u64 disk_i_size;
u64 new_i_size;
u64 i_size_test;
+ u64 i_size = i_size_read(inode);
struct rb_node *node;
+ struct rb_node *prev = NULL;
struct btrfs_ordered_extent *test;
+ int ret = 1;
+
+ if (ordered)
+ offset = entry_end(ordered);
mutex_lock(&tree->mutex);
disk_i_size = BTRFS_I(inode)->disk_i_size;
+ /* truncate file */
+ if (disk_i_size > i_size) {
+ BTRFS_I(inode)->disk_i_size = i_size;
+ ret = 0;
+ goto out;
+ }
+
/*
* if the disk i_size is already at the inode->i_size, or
* this ordered extent is inside the disk i_size, we're done
*/
- if (disk_i_size >= inode->i_size ||
- ordered->file_offset + ordered->len <= disk_i_size) {
+ if (disk_i_size == i_size || offset <= disk_i_size) {
goto out;
}
@@ -616,8 +649,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
* we can't update the disk_isize if there are delalloc bytes
* between disk_i_size and this ordered extent
*/
- if (test_range_bit(io_tree, disk_i_size,
- ordered->file_offset + ordered->len - 1,
+ if (test_range_bit(io_tree, disk_i_size, offset - 1,
EXTENT_DELALLOC, 0, NULL)) {
goto out;
}
@@ -626,20 +658,32 @@ int btrfs_ordered_update_i_size(struct inode *inode,
* if we find an ordered extent then we can't update disk i_size
* yet
*/
- node = &ordered->rb_node;
- while (1) {
- node = rb_prev(node);
- if (!node)
- break;
+ if (ordered) {
+ node = rb_prev(&ordered->rb_node);
+ } else {
+ prev = tree_search(tree, offset);
+ /*
+ * we insert file extents without involving ordered struct,
+ * so there should be no ordered struct cover this offset
+ */
+ if (prev) {
+ test = rb_entry(prev, struct btrfs_ordered_extent,
+ rb_node);
+ BUG_ON(offset_in_entry(test, offset));
+ }
+ node = prev;
+ }
+ while (node) {
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (test->file_offset + test->len <= disk_i_size)
break;
- if (test->file_offset >= inode->i_size)
+ if (test->file_offset >= i_size)
break;
if (test->file_offset >= disk_i_size)
goto out;
+ node = rb_prev(node);
}
- new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode));
+ new_i_size = min_t(u64, offset, i_size);
/*
* at this point, we know we can safely update i_size to at least
@@ -647,7 +691,14 @@ int btrfs_ordered_update_i_size(struct inode *inode,
* walk forward and see if ios from higher up in the file have
* finished.
*/
- node = rb_next(&ordered->rb_node);
+ if (ordered) {
+ node = rb_next(&ordered->rb_node);
+ } else {
+ if (prev)
+ node = rb_next(prev);
+ else
+ node = rb_first(&tree->tree);
+ }
i_size_test = 0;
if (node) {
/*
@@ -655,10 +706,10 @@ int btrfs_ordered_update_i_size(struct inode *inode,
* between our ordered extent and the next one.
*/
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
- if (test->file_offset > entry_end(ordered))
+ if (test->file_offset > offset)
i_size_test = test->file_offset;
} else {
- i_size_test = i_size_read(inode);
+ i_size_test = i_size;
}
/*
@@ -667,15 +718,25 @@ int btrfs_ordered_update_i_size(struct inode *inode,
* are no delalloc bytes in this area, it is safe to update
* disk_i_size to the end of the region.
*/
- if (i_size_test > entry_end(ordered) &&
- !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1,
- EXTENT_DELALLOC, 0, NULL)) {
- new_i_size = min_t(u64, i_size_test, i_size_read(inode));
+ if (i_size_test > offset &&
+ !test_range_bit(io_tree, offset, i_size_test - 1,
+ EXTENT_DELALLOC, 0, NULL)) {
+ new_i_size = min_t(u64, i_size_test, i_size);
}
BTRFS_I(inode)->disk_i_size = new_i_size;
+ ret = 0;
out:
+ /*
+ * we need to remove the ordered extent with the tree lock held
+ * so that other people calling this function don't find our fully
+ * processed ordered entry and skip updating the i_size
+ */
+ if (ordered)
+ __btrfs_remove_ordered_extent(inode, ordered);
mutex_unlock(&tree->mutex);
- return 0;
+ if (ordered)
+ wake_up(&ordered->wait);
+ return ret;
}
/*
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index f82e87488ca..1fe1282ef47 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -150,12 +150,13 @@ void btrfs_start_ordered_extent(struct inode *inode,
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
-int btrfs_ordered_update_i_size(struct inode *inode,
+int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
+int btrfs_wait_ordered_extents(struct btrfs_root *root,
+ int nocow_only, int delay_iput);
#endif
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index cfcc93c93a7..a9728680eca 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1561,6 +1561,20 @@ static int invalidate_extent_cache(struct btrfs_root *root,
return 0;
}
+static void put_inodes(struct list_head *list)
+{
+ struct inodevec *ivec;
+ while (!list_empty(list)) {
+ ivec = list_entry(list->next, struct inodevec, list);
+ list_del(&ivec->list);
+ while (ivec->nr > 0) {
+ ivec->nr--;
+ iput(ivec->inode[ivec->nr]);
+ }
+ kfree(ivec);
+ }
+}
+
static int find_next_key(struct btrfs_path *path, int level,
struct btrfs_key *key)
@@ -1723,6 +1737,11 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
btrfs_btree_balance_dirty(root, nr);
+ /*
+ * put inodes outside transaction, otherwise we may deadlock.
+ */
+ put_inodes(&inode_list);
+
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
}
@@ -1752,19 +1771,7 @@ out:
btrfs_btree_balance_dirty(root, nr);
- /*
- * put inodes while we aren't holding the tree locks
- */
- while (!list_empty(&inode_list)) {
- struct inodevec *ivec;
- ivec = list_entry(inode_list.next, struct inodevec, list);
- list_del(&ivec->list);
- while (ivec->nr > 0) {
- ivec->nr--;
- iput(ivec->inode[ivec->nr]);
- }
- kfree(ivec);
- }
+ put_inodes(&inode_list);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
@@ -3534,8 +3541,8 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
(unsigned long long)rc->block_group->key.objectid,
(unsigned long long)rc->block_group->flags);
- btrfs_start_delalloc_inodes(fs_info->tree_root);
- btrfs_wait_ordered_extents(fs_info->tree_root, 0);
+ btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
+ btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0);
while (1) {
rc->extents_found = 0;
@@ -3755,6 +3762,7 @@ out:
BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
+ btrfs_orphan_cleanup(fs_root);
}
return err;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 752a5463bf5..3f9b45704fc 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -128,6 +128,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
substring_t args[MAX_OPT_ARGS];
char *p, *num;
int intarg;
+ int ret = 0;
if (!options)
return 0;
@@ -262,12 +263,18 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_discard:
btrfs_set_opt(info->mount_opt, DISCARD);
break;
+ case Opt_err:
+ printk(KERN_INFO "btrfs: unrecognized mount option "
+ "'%s'\n", p);
+ ret = -EINVAL;
+ goto out;
default:
break;
}
}
+out:
kfree(options);
- return 0;
+ return ret;
}
/*
@@ -405,8 +412,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
return 0;
}
- btrfs_start_delalloc_inodes(root);
- btrfs_wait_ordered_extents(root, 0);
+ btrfs_start_delalloc_inodes(root, 0);
+ btrfs_wait_ordered_extents(root, 0, 0);
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
@@ -450,6 +457,8 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",notreelog");
if (btrfs_test_opt(root, FLUSHONCOMMIT))
seq_puts(seq, ",flushoncommit");
+ if (btrfs_test_opt(root, DISCARD))
+ seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
return 0;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c207e8c32c9..b2acc79f1b3 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -333,6 +333,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
memset(trans, 0, sizeof(*trans));
kmem_cache_free(btrfs_trans_handle_cachep, trans);
+ if (throttle)
+ btrfs_run_delayed_iputs(root);
+
return 0;
}
@@ -354,7 +357,7 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
* those extents are sent to disk but does not wait on them
*/
int btrfs_write_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages)
+ struct extent_io_tree *dirty_pages, int mark)
{
int ret;
int err = 0;
@@ -367,7 +370,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
while (1) {
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
- EXTENT_DIRTY);
+ mark);
if (ret)
break;
while (start <= end) {
@@ -413,7 +416,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
* on all the pages and clear them from the dirty pages state tree
*/
int btrfs_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages)
+ struct extent_io_tree *dirty_pages, int mark)
{
int ret;
int err = 0;
@@ -425,12 +428,12 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
unsigned long index;
while (1) {
- ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
- EXTENT_DIRTY);
+ ret = find_first_extent_bit(dirty_pages, start, &start, &end,
+ mark);
if (ret)
break;
- clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
+ clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
while (start <= end) {
index = start >> PAGE_CACHE_SHIFT;
start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
@@ -460,13 +463,13 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
* those extents are on disk for transaction or log commit
*/
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages)
+ struct extent_io_tree *dirty_pages, int mark)
{
int ret;
int ret2;
- ret = btrfs_write_marked_extents(root, dirty_pages);
- ret2 = btrfs_wait_marked_extents(root, dirty_pages);
+ ret = btrfs_write_marked_extents(root, dirty_pages, mark);
+ ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
return ret || ret2;
}
@@ -479,7 +482,8 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
return filemap_write_and_wait(btree_inode->i_mapping);
}
return btrfs_write_and_wait_marked_extents(root,
- &trans->transaction->dirty_pages);
+ &trans->transaction->dirty_pages,
+ EXTENT_DIRTY);
}
/*
@@ -497,13 +501,16 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
{
int ret;
u64 old_root_bytenr;
+ u64 old_root_used;
struct btrfs_root *tree_root = root->fs_info->tree_root;
+ old_root_used = btrfs_root_used(&root->root_item);
btrfs_write_dirty_block_groups(trans, root);
while (1) {
old_root_bytenr = btrfs_root_bytenr(&root->root_item);
- if (old_root_bytenr == root->node->start)
+ if (old_root_bytenr == root->node->start &&
+ old_root_used == btrfs_root_used(&root->root_item))
break;
btrfs_set_root_node(&root->root_item, root->node);
@@ -512,6 +519,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
&root->root_item);
BUG_ON(ret);
+ old_root_used = btrfs_root_used(&root->root_item);
ret = btrfs_write_dirty_block_groups(trans, root);
BUG_ON(ret);
}
@@ -795,7 +803,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
memcpy(&pending->root_key, &key, sizeof(key));
fail:
kfree(new_root_item);
- btrfs_unreserve_metadata_space(root, 6);
return ret;
}
@@ -807,7 +814,6 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
u64 index = 0;
struct btrfs_trans_handle *trans;
struct inode *parent_inode;
- struct inode *inode;
struct btrfs_root *parent_root;
parent_inode = pending->dentry->d_parent->d_inode;
@@ -839,8 +845,6 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
BUG_ON(ret);
- inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
- d_instantiate(pending->dentry, inode);
fail:
btrfs_end_transaction(trans, fs_info->fs_root);
return ret;
@@ -994,11 +998,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_unlock(&root->fs_info->trans_mutex);
if (flush_on_commit) {
- btrfs_start_delalloc_inodes(root);
- ret = btrfs_wait_ordered_extents(root, 0);
+ btrfs_start_delalloc_inodes(root, 1);
+ ret = btrfs_wait_ordered_extents(root, 0, 1);
BUG_ON(ret);
} else if (snap_pending) {
- ret = btrfs_wait_ordered_extents(root, 1);
+ ret = btrfs_wait_ordered_extents(root, 0, 1);
BUG_ON(ret);
}
@@ -1116,6 +1120,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
current->journal_info = NULL;
kmem_cache_free(btrfs_trans_handle_cachep, trans);
+
+ if (current != root->fs_info->transaction_kthread)
+ btrfs_run_delayed_iputs(root);
+
return ret;
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index d4e3e7a6938..93c7ccb3311 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -107,10 +107,10 @@ void btrfs_throttle(struct btrfs_root *root);
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages);
+ struct extent_io_tree *dirty_pages, int mark);
int btrfs_write_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages);
+ struct extent_io_tree *dirty_pages, int mark);
int btrfs_wait_marked_extents(struct btrfs_root *root,
- struct extent_io_tree *dirty_pages);
+ struct extent_io_tree *dirty_pages, int mark);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 741666a7676..4a9434b622e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -542,8 +542,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
- ret = btrfs_drop_extents(trans, root, inode,
- start, extent_end, extent_end, start, &alloc_hint, 1);
+ ret = btrfs_drop_extents(trans, inode, start, extent_end,
+ &alloc_hint, 1);
BUG_ON(ret);
if (found_type == BTRFS_FILE_EXTENT_REG ||
@@ -930,6 +930,17 @@ out_nowrite:
return 0;
}
+static int insert_orphan_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 offset)
+{
+ int ret;
+ ret = btrfs_find_orphan_item(root, offset);
+ if (ret > 0)
+ ret = btrfs_insert_orphan_item(trans, root, offset);
+ return ret;
+}
+
+
/*
* There are a few corners where the link count of the file can't
* be properly maintained during replay. So, instead of adding
@@ -997,9 +1008,13 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
}
BTRFS_I(inode)->index_cnt = (u64)-1;
- if (inode->i_nlink == 0 && S_ISDIR(inode->i_mode)) {
- ret = replay_dir_deletes(trans, root, NULL, path,
- inode->i_ino, 1);
+ if (inode->i_nlink == 0) {
+ if (S_ISDIR(inode->i_mode)) {
+ ret = replay_dir_deletes(trans, root, NULL, path,
+ inode->i_ino, 1);
+ BUG_ON(ret);
+ }
+ ret = insert_orphan_item(trans, root, inode->i_ino);
BUG_ON(ret);
}
btrfs_free_path(path);
@@ -1587,7 +1602,6 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
/* inode keys are done during the first stage */
if (key.type == BTRFS_INODE_ITEM_KEY &&
wc->stage == LOG_WALK_REPLAY_INODES) {
- struct inode *inode;
struct btrfs_inode_item *inode_item;
u32 mode;
@@ -1603,31 +1617,16 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
eb, i, &key);
BUG_ON(ret);
- /* for regular files, truncate away
- * extents past the new EOF
+ /* for regular files, make sure corresponding
+ * orhpan item exist. extents past the new EOF
+ * will be truncated later by orphan cleanup.
*/
if (S_ISREG(mode)) {
- inode = read_one_inode(root,
- key.objectid);
- BUG_ON(!inode);
-
- ret = btrfs_truncate_inode_items(wc->trans,
- root, inode, inode->i_size,
- BTRFS_EXTENT_DATA_KEY);
+ ret = insert_orphan_item(wc->trans, root,
+ key.objectid);
BUG_ON(ret);
-
- /* if the nlink count is zero here, the iput
- * will free the inode. We bump it to make
- * sure it doesn't get freed until the link
- * count fixup is done
- */
- if (inode->i_nlink == 0) {
- btrfs_inc_nlink(inode);
- btrfs_update_inode(wc->trans,
- root, inode);
- }
- iput(inode);
}
+
ret = link_to_fixup_dir(wc->trans, root,
path, key.objectid);
BUG_ON(ret);
@@ -1977,10 +1976,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
{
int index1;
int index2;
+ int mark;
int ret;
struct btrfs_root *log = root->log_root;
struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
- u64 log_transid = 0;
+ unsigned long log_transid = 0;
mutex_lock(&root->log_mutex);
index1 = root->log_transid % 2;
@@ -2014,24 +2014,29 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out;
}
+ log_transid = root->log_transid;
+ if (log_transid % 2 == 0)
+ mark = EXTENT_DIRTY;
+ else
+ mark = EXTENT_NEW;
+
/* we start IO on all the marked extents here, but we don't actually
* wait for them until later.
*/
- ret = btrfs_write_marked_extents(log, &log->dirty_log_pages);
+ ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
BUG_ON(ret);
btrfs_set_root_node(&log->root_item, log->node);
root->log_batch = 0;
- log_transid = root->log_transid;
root->log_transid++;
log->log_transid = root->log_transid;
root->log_start_pid = 0;
smp_mb();
/*
- * log tree has been flushed to disk, new modifications of
- * the log will be written to new positions. so it's safe to
- * allow log writers to go in.
+ * IO has been started, blocks of the log tree have WRITTEN flag set
+ * in their headers. new modifications of the log will be written to
+ * new positions. so it's safe to allow log writers to go in.
*/
mutex_unlock(&root->log_mutex);
@@ -2052,7 +2057,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
index2 = log_root_tree->log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
- btrfs_wait_marked_extents(log, &log->dirty_log_pages);
+ btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
wait_log_commit(trans, log_root_tree,
log_root_tree->log_transid);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2072,16 +2077,17 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* check the full commit flag again
*/
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
- btrfs_wait_marked_extents(log, &log->dirty_log_pages);
+ btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
}
ret = btrfs_write_and_wait_marked_extents(log_root_tree,
- &log_root_tree->dirty_log_pages);
+ &log_root_tree->dirty_log_pages,
+ EXTENT_DIRTY | EXTENT_NEW);
BUG_ON(ret);
- btrfs_wait_marked_extents(log, &log->dirty_log_pages);
+ btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
btrfs_set_super_log_root(&root->fs_info->super_for_commit,
log_root_tree->node->start);
@@ -2147,12 +2153,12 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
- 0, &start, &end, EXTENT_DIRTY);
+ 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
if (ret)
break;
- clear_extent_dirty(&log->dirty_log_pages,
- start, end, GFP_NOFS);
+ clear_extent_bits(&log->dirty_log_pages, start, end,
+ EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
}
if (log->log_transid > 0) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 7eda483d7b5..198cff28766 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2209,7 +2209,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
max_chunk_size = 10 * calc_size;
min_stripe_size = 64 * 1024 * 1024;
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
- max_chunk_size = 4 * calc_size;
+ max_chunk_size = 256 * 1024 * 1024;
min_stripe_size = 32 * 1024 * 1024;
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
calc_size = 8 * 1024 * 1024;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index b6dd5967c48..193b58f7d3f 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -85,22 +85,23 @@ out:
return ret;
}
-int __btrfs_setxattr(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int do_setxattr(struct btrfs_trans_handle *trans,
+ struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
{
struct btrfs_dir_item *di;
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_trans_handle *trans;
struct btrfs_path *path;
- int ret = 0, mod = 0;
+ size_t name_len = strlen(name);
+ int ret = 0;
+
+ if (name_len + size > BTRFS_MAX_XATTR_SIZE(root))
+ return -ENOSPC;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- trans = btrfs_join_transaction(root, 1);
- btrfs_set_trans_block_group(trans, inode);
-
/* first lets see if we already have this xattr */
di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
strlen(name), -1);
@@ -118,15 +119,12 @@ int __btrfs_setxattr(struct inode *inode, const char *name,
}
ret = btrfs_delete_one_dir_name(trans, root, path, di);
- if (ret)
- goto out;
+ BUG_ON(ret);
btrfs_release_path(root, path);
/* if we don't have a value then we are removing the xattr */
- if (!value) {
- mod = 1;
+ if (!value)
goto out;
- }
} else {
btrfs_release_path(root, path);
@@ -138,20 +136,45 @@ int __btrfs_setxattr(struct inode *inode, const char *name,
}
/* ok we have to create a completely new xattr */
- ret = btrfs_insert_xattr_item(trans, root, name, strlen(name),
- value, size, inode->i_ino);
+ ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino,
+ name, name_len, value, size);
+ BUG_ON(ret);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+int __btrfs_setxattr(struct btrfs_trans_handle *trans,
+ struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+
+ if (trans)
+ return do_setxattr(trans, inode, name, value, size, flags);
+
+ ret = btrfs_reserve_metadata_space(root, 2);
if (ret)
- goto out;
- mod = 1;
+ return ret;
-out:
- if (mod) {
- inode->i_ctime = CURRENT_TIME;
- ret = btrfs_update_inode(trans, root, inode);
+ trans = btrfs_start_transaction(root, 1);
+ if (!trans) {
+ ret = -ENOMEM;
+ goto out;
}
+ btrfs_set_trans_block_group(trans, inode);
- btrfs_end_transaction(trans, root);
- btrfs_free_path(path);
+ ret = do_setxattr(trans, inode, name, value, size, flags);
+ if (ret)
+ goto out;
+
+ inode->i_ctime = CURRENT_TIME;
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+out:
+ btrfs_end_transaction_throttle(trans, root);
+ btrfs_unreserve_metadata_space(root, 2);
return ret;
}
@@ -314,7 +337,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
if (size == 0)
value = ""; /* empty EA, do not remove */
- return __btrfs_setxattr(dentry->d_inode, name, value, size, flags);
+
+ return __btrfs_setxattr(NULL, dentry->d_inode, name, value, size,
+ flags);
}
int btrfs_removexattr(struct dentry *dentry, const char *name)
@@ -329,10 +354,13 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
if (!btrfs_is_valid_xattr(name))
return -EOPNOTSUPP;
- return __btrfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
+
+ return __btrfs_setxattr(NULL, dentry->d_inode, name, NULL, 0,
+ XATTR_REPLACE);
}
-int btrfs_xattr_security_init(struct inode *inode, struct inode *dir)
+int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir)
{
int err;
size_t len;
@@ -354,7 +382,7 @@ int btrfs_xattr_security_init(struct inode *inode, struct inode *dir)
} else {
strcpy(name, XATTR_SECURITY_PREFIX);
strcpy(name + XATTR_SECURITY_PREFIX_LEN, suffix);
- err = __btrfs_setxattr(inode, name, value, len, 0);
+ err = __btrfs_setxattr(trans, inode, name, value, len, 0);
kfree(name);
}
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index c71e9c3cf3f..721efa0346e 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -27,15 +27,16 @@ extern struct xattr_handler *btrfs_xattr_handlers[];
extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
void *buffer, size_t size);
-extern int __btrfs_setxattr(struct inode *inode, const char *name,
- const void *value, size_t size, int flags);
-
+extern int __btrfs_setxattr(struct btrfs_trans_handle *trans,
+ struct inode *inode, const char *name,
+ const void *value, size_t size, int flags);
extern ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size);
extern int btrfs_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int btrfs_removexattr(struct dentry *dentry, const char *name);
-extern int btrfs_xattr_security_init(struct inode *inode, struct inode *dir);
+extern int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct inode *dir);
#endif /* __XATTR__ */
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index 3797e0077b3..2906077ac79 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -84,7 +84,7 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
{
struct cachefiles_object *fsdef;
- struct nameidata nd;
+ struct path path;
struct kstatfs stats;
struct dentry *graveyard, *cachedir, *root;
const struct cred *saved_cred;
@@ -114,15 +114,12 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
_debug("- fsdef %p", fsdef);
/* look up the directory at the root of the cache */
- memset(&nd, 0, sizeof(nd));
-
- ret = path_lookup(cache->rootdirname, LOOKUP_DIRECTORY, &nd);
+ ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
if (ret < 0)
goto error_open_root;
- cache->mnt = mntget(nd.path.mnt);
- root = dget(nd.path.dentry);
- path_put(&nd.path);
+ cache->mnt = path.mnt;
+ root = path.dentry;
/* check parameters */
ret = -EOPNOTSUPP;
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 4618516dd99..c2413561ea7 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -21,6 +21,7 @@
#include <linux/mount.h>
#include <linux/statfs.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/fs_struct.h>
#include "internal.h"
@@ -257,8 +258,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
if (args == data)
goto error;
*args = '\0';
- for (args++; isspace(*args); args++)
- continue;
+ args = skip_spaces(++args);
}
/* run the appropriate command handler */
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index a6c8c6fe8df..1d833256386 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -11,7 +11,6 @@
#include <linux/mount.h>
#include <linux/file.h>
-#include <linux/ima.h>
#include "internal.h"
/*
@@ -923,7 +922,6 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
if (IS_ERR(file)) {
ret = PTR_ERR(file);
} else {
- ima_counts_get(file);
ret = -EIO;
if (file->f_op->write) {
pos = (loff_t) page->index << PAGE_SHIFT;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 1f42f772865..6ccf7262d1b 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -214,7 +214,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
posix_flags |= SMB_O_EXCL;
if (oflags & O_TRUNC)
posix_flags |= SMB_O_TRUNC;
- if (oflags & O_SYNC)
+ /* be safe and imply O_SYNC for O_DSYNC */
+ if (oflags & O_DSYNC)
posix_flags |= SMB_O_SYNC;
if (oflags & O_DIRECTORY)
posix_flags |= SMB_O_DIRECTORY;
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 75949d6a5f1..6177f7cca16 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -24,7 +24,7 @@
*/
/*
- * See Documentation/filesystems/Exporting
+ * See Documentation/filesystems/nfs/Exporting
* and examples in fs/exportfs
*
* Since cifs is a network file system, an "fsid" must be included for
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 429337eb7af..057e1dae12a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -76,8 +76,10 @@ static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
reopening a file. They had their effect on the original open */
if (flags & O_APPEND)
posix_flags |= (fmode_t)O_APPEND;
- if (flags & O_SYNC)
- posix_flags |= (fmode_t)O_SYNC;
+ if (flags & O_DSYNC)
+ posix_flags |= (fmode_t)O_DSYNC;
+ if (flags & __O_SYNC)
+ posix_flags |= (fmode_t)__O_SYNC;
if (flags & O_DIRECTORY)
posix_flags |= (fmode_t)O_DIRECTORY;
if (flags & O_NOFOLLOW)
diff --git a/fs/compat.c b/fs/compat.c
index 6c19040ffee..00d90c2e66f 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -38,8 +38,6 @@
#include <linux/dirent.h>
#include <linux/fsnotify.h>
#include <linux/highuid.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/syscall.h>
#include <linux/personality.h>
#include <linux/rwsem.h>
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 2346895b3a7..332dd00f089 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -111,43 +111,40 @@
#include <linux/dvb/frontend.h>
#include <linux/dvb/video.h>
+#include <linux/sort.h>
+
#ifdef CONFIG_SPARC
#include <asm/fbio.h>
#endif
-static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
- unsigned long arg, struct file *f)
-{
- return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
-}
-
-static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int w_long(unsigned int fd, unsigned int cmd,
+ compat_ulong_t __user *argp)
{
mm_segment_t old_fs = get_fs();
int err;
unsigned long val;
-
+
set_fs (KERNEL_DS);
err = sys_ioctl(fd, cmd, (unsigned long)&val);
set_fs (old_fs);
- if (!err && put_user(val, (u32 __user *)compat_ptr(arg)))
+ if (!err && put_user(val, argp))
return -EFAULT;
return err;
}
-
-static int rw_long(unsigned int fd, unsigned int cmd, unsigned long arg)
+
+static int rw_long(unsigned int fd, unsigned int cmd,
+ compat_ulong_t __user *argp)
{
mm_segment_t old_fs = get_fs();
- u32 __user *argptr = compat_ptr(arg);
int err;
unsigned long val;
-
- if(get_user(val, argptr))
+
+ if(get_user(val, argp))
return -EFAULT;
set_fs (KERNEL_DS);
err = sys_ioctl(fd, cmd, (unsigned long)&val);
set_fs (old_fs);
- if (!err && put_user(val, argptr))
+ if (!err && put_user(val, argp))
return -EFAULT;
return err;
}
@@ -161,7 +158,8 @@ struct compat_video_event {
} u;
};
-static int do_video_get_event(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int do_video_get_event(unsigned int fd, unsigned int cmd,
+ struct compat_video_event __user *up)
{
struct video_event kevent;
mm_segment_t old_fs = get_fs();
@@ -172,8 +170,6 @@ static int do_video_get_event(unsigned int fd, unsigned int cmd, unsigned long a
set_fs(old_fs);
if (!err) {
- struct compat_video_event __user *up = compat_ptr(arg);
-
err = put_user(kevent.type, &up->type);
err |= put_user(kevent.timestamp, &up->timestamp);
err |= put_user(kevent.u.size.w, &up->u.size.w);
@@ -192,15 +188,14 @@ struct compat_video_still_picture {
int32_t size;
};
-static int do_video_stillpicture(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int do_video_stillpicture(unsigned int fd, unsigned int cmd,
+ struct compat_video_still_picture __user *up)
{
- struct compat_video_still_picture __user *up;
struct video_still_picture __user *up_native;
compat_uptr_t fp;
int32_t size;
int err;
- up = (struct compat_video_still_picture __user *) arg;
err = get_user(fp, &up->iFrame);
err |= get_user(size, &up->size);
if (err)
@@ -224,14 +219,13 @@ struct compat_video_spu_palette {
compat_uptr_t palette;
};
-static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
+ struct compat_video_spu_palette __user *up)
{
- struct compat_video_spu_palette __user *up;
struct video_spu_palette __user *up_native;
compat_uptr_t palp;
int length, err;
- up = (struct compat_video_spu_palette __user *) arg;
err = get_user(palp, &up->palette);
err |= get_user(length, &up->length);
@@ -299,16 +293,15 @@ static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iov
return 0;
}
-static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
+ sg_io_hdr32_t __user *sgio32)
{
sg_io_hdr_t __user *sgio;
- sg_io_hdr32_t __user *sgio32;
u16 iovec_count;
u32 data;
void __user *dxferp;
int err;
- sgio32 = compat_ptr(arg);
if (get_user(iovec_count, &sgio32->iovec_count))
return -EFAULT;
@@ -398,11 +391,11 @@ struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
int unused;
};
-static int sg_grt_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int sg_grt_trans(unsigned int fd, unsigned int cmd, struct
+ compat_sg_req_info __user *o)
{
int err, i;
sg_req_info_t __user *r;
- struct compat_sg_req_info __user *o = (void __user *)arg;
r = compat_alloc_user_space(sizeof(sg_req_info_t)*SG_MAX_QUEUE);
err = sys_ioctl(fd,cmd,(unsigned long)r);
if (err < 0)
@@ -430,9 +423,9 @@ struct sock_fprog32 {
#define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32)
#define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32)
-static int ppp_sock_fprog_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int ppp_sock_fprog_ioctl_trans(unsigned int fd, unsigned int cmd,
+ struct sock_fprog32 __user *u_fprog32)
{
- struct sock_fprog32 __user *u_fprog32 = compat_ptr(arg);
struct sock_fprog __user *u_fprog64 = compat_alloc_user_space(sizeof(struct sock_fprog));
void __user *fptr64;
u32 fptr32;
@@ -469,15 +462,14 @@ struct ppp_idle32 {
};
#define PPPIOCGIDLE32 _IOR('t', 63, struct ppp_idle32)
-static int ppp_gidle(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int ppp_gidle(unsigned int fd, unsigned int cmd,
+ struct ppp_idle32 __user *idle32)
{
struct ppp_idle __user *idle;
- struct ppp_idle32 __user *idle32;
__kernel_time_t xmit, recv;
int err;
idle = compat_alloc_user_space(sizeof(*idle));
- idle32 = compat_ptr(arg);
err = sys_ioctl(fd, PPPIOCGIDLE, (unsigned long) idle);
@@ -491,15 +483,14 @@ static int ppp_gidle(unsigned int fd, unsigned int cmd, unsigned long arg)
return err;
}
-static int ppp_scompress(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int ppp_scompress(unsigned int fd, unsigned int cmd,
+ struct ppp_option_data32 __user *odata32)
{
struct ppp_option_data __user *odata;
- struct ppp_option_data32 __user *odata32;
__u32 data;
void __user *datap;
odata = compat_alloc_user_space(sizeof(*odata));
- odata32 = compat_ptr(arg);
if (get_user(data, &odata32->ptr))
return -EFAULT;
@@ -515,35 +506,6 @@ static int ppp_scompress(unsigned int fd, unsigned int cmd, unsigned long arg)
return sys_ioctl(fd, PPPIOCSCOMPRESS, (unsigned long) odata);
}
-static int ppp_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- int err;
-
- switch (cmd) {
- case PPPIOCGIDLE32:
- err = ppp_gidle(fd, cmd, arg);
- break;
-
- case PPPIOCSCOMPRESS32:
- err = ppp_scompress(fd, cmd, arg);
- break;
-
- default:
- do {
- static int count;
- if (++count <= 20)
- printk("ppp_ioctl: Unknown cmd fd(%d) "
- "cmd(%08x) arg(%08x)\n",
- (int)fd, (unsigned int)cmd, (unsigned int)arg);
- } while(0);
- err = -EINVAL;
- break;
- };
-
- return err;
-}
-
-
#ifdef CONFIG_BLOCK
struct mtget32 {
compat_long_t mt_type;
@@ -561,7 +523,7 @@ struct mtpos32 {
};
#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
-static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
{
mm_segment_t old_fs = get_fs();
struct mtget get;
@@ -581,15 +543,6 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
kcmd = MTIOCGET;
karg = &get;
break;
- default:
- do {
- static int count;
- if (++count <= 20)
- printk("mt_ioctl: Unknown cmd fd(%d) "
- "cmd(%08x) arg(%08x)\n",
- (int)fd, (unsigned int)cmd, (unsigned int)arg);
- } while(0);
- return -EINVAL;
}
set_fs (KERNEL_DS);
err = sys_ioctl (fd, kcmd, (unsigned long)karg);
@@ -598,11 +551,11 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
return err;
switch (cmd) {
case MTIOCPOS32:
- upos32 = compat_ptr(arg);
+ upos32 = argp;
err = __put_user(pos.mt_blkno, &upos32->mt_blkno);
break;
case MTIOCGET32:
- umget32 = compat_ptr(arg);
+ umget32 = argp;
err = __put_user(get.mt_type, &umget32->mt_type);
err |= __put_user(get.mt_resid, &umget32->mt_resid);
err |= __put_user(get.mt_dsreg, &umget32->mt_dsreg);
@@ -617,162 +570,8 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
#endif /* CONFIG_BLOCK */
-#ifdef CONFIG_VT
-
-static int vt_check(struct file *file)
-{
- struct tty_struct *tty;
- struct inode *inode = file->f_path.dentry->d_inode;
- struct vc_data *vc;
-
- if (file->f_op->unlocked_ioctl != tty_ioctl)
- return -EINVAL;
-
- tty = (struct tty_struct *)file->private_data;
- if (tty_paranoia_check(tty, inode, "tty_ioctl"))
- return -EINVAL;
-
- if (tty->ops->ioctl != vt_ioctl)
- return -EINVAL;
-
- vc = (struct vc_data *)tty->driver_data;
- if (!vc_cons_allocated(vc->vc_num)) /* impossible? */
- return -ENOIOCTLCMD;
-
- /*
- * To have permissions to do most of the vt ioctls, we either have
- * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
- */
- if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG))
- return 1;
- return 0;
-}
-
-struct consolefontdesc32 {
- unsigned short charcount; /* characters in font (256 or 512) */
- unsigned short charheight; /* scan lines per character (1-32) */
- compat_caddr_t chardata; /* font data in expanded form */
-};
-
-static int do_fontx_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
-{
- struct consolefontdesc32 __user *user_cfd = compat_ptr(arg);
- struct console_font_op op;
- compat_caddr_t data;
- int i, perm;
-
- perm = vt_check(file);
- if (perm < 0) return perm;
-
- switch (cmd) {
- case PIO_FONTX:
- if (!perm)
- return -EPERM;
- op.op = KD_FONT_OP_SET;
- op.flags = 0;
- op.width = 8;
- if (get_user(op.height, &user_cfd->charheight) ||
- get_user(op.charcount, &user_cfd->charcount) ||
- get_user(data, &user_cfd->chardata))
- return -EFAULT;
- op.data = compat_ptr(data);
- return con_font_op(vc_cons[fg_console].d, &op);
- case GIO_FONTX:
- op.op = KD_FONT_OP_GET;
- op.flags = 0;
- op.width = 8;
- if (get_user(op.height, &user_cfd->charheight) ||
- get_user(op.charcount, &user_cfd->charcount) ||
- get_user(data, &user_cfd->chardata))
- return -EFAULT;
- if (!data)
- return 0;
- op.data = compat_ptr(data);
- i = con_font_op(vc_cons[fg_console].d, &op);
- if (i)
- return i;
- if (put_user(op.height, &user_cfd->charheight) ||
- put_user(op.charcount, &user_cfd->charcount) ||
- put_user((compat_caddr_t)(unsigned long)op.data,
- &user_cfd->chardata))
- return -EFAULT;
- return 0;
- }
- return -EINVAL;
-}
-
-struct console_font_op32 {
- compat_uint_t op; /* operation code KD_FONT_OP_* */
- compat_uint_t flags; /* KD_FONT_FLAG_* */
- compat_uint_t width, height; /* font size */
- compat_uint_t charcount;
- compat_caddr_t data; /* font data with height fixed to 32 */
-};
-
-static int do_kdfontop_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
-{
- struct console_font_op op;
- struct console_font_op32 __user *fontop = compat_ptr(arg);
- int perm = vt_check(file), i;
- struct vc_data *vc;
-
- if (perm < 0) return perm;
-
- if (copy_from_user(&op, fontop, sizeof(struct console_font_op32)))
- return -EFAULT;
- if (!perm && op.op != KD_FONT_OP_GET)
- return -EPERM;
- op.data = compat_ptr(((struct console_font_op32 *)&op)->data);
- op.flags |= KD_FONT_FLAG_OLD;
- vc = ((struct tty_struct *)file->private_data)->driver_data;
- i = con_font_op(vc, &op);
- if (i)
- return i;
- ((struct console_font_op32 *)&op)->data = (unsigned long)op.data;
- if (copy_to_user(fontop, &op, sizeof(struct console_font_op32)))
- return -EFAULT;
- return 0;
-}
-
-struct unimapdesc32 {
- unsigned short entry_ct;
- compat_caddr_t entries;
-};
-
-static int do_unimap_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
-{
- struct unimapdesc32 tmp;
- struct unimapdesc32 __user *user_ud = compat_ptr(arg);
- int perm = vt_check(file);
- struct vc_data *vc;
-
- if (perm < 0)
- return perm;
- if (copy_from_user(&tmp, user_ud, sizeof tmp))
- return -EFAULT;
- if (tmp.entries)
- if (!access_ok(VERIFY_WRITE, compat_ptr(tmp.entries),
- tmp.entry_ct*sizeof(struct unipair)))
- return -EFAULT;
- vc = ((struct tty_struct *)file->private_data)->driver_data;
- switch (cmd) {
- case PIO_UNIMAP:
- if (!perm)
- return -EPERM;
- return con_set_unimap(vc, tmp.entry_ct,
- compat_ptr(tmp.entries));
- case GIO_UNIMAP:
- if (!perm && fg_console != vc->vc_num)
- return -EPERM;
- return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct),
- compat_ptr(tmp.entries));
- }
- return 0;
-}
-
-#endif /* CONFIG_VT */
-
-static int do_smb_getmountuid(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int do_smb_getmountuid(unsigned int fd, unsigned int cmd,
+ compat_uid_t __user *argp)
{
mm_segment_t old_fs = get_fs();
__kernel_uid_t kuid;
@@ -785,20 +584,15 @@ static int do_smb_getmountuid(unsigned int fd, unsigned int cmd, unsigned long a
set_fs(old_fs);
if (err >= 0)
- err = put_user(kuid, (compat_uid_t __user *)compat_ptr(arg));
+ err = put_user(kuid, argp);
return err;
}
-static __used int
-ret_einval(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- return -EINVAL;
-}
-
-static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int ioc_settimeout(unsigned int fd, unsigned int cmd,
+ compat_ulong_t __user *argp)
{
- return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
+ return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, argp);
}
/* Bluetooth ioctls */
@@ -856,7 +650,8 @@ static int set_raw32_request(struct raw_config_request *req, struct raw32_config
return ret ? -EFAULT : 0;
}
-static int raw_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
+static int raw_ioctl(unsigned fd, unsigned cmd,
+ struct raw32_config_request __user *user_req)
{
int ret;
@@ -864,7 +659,6 @@ static int raw_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
case RAW_SETBIND:
case RAW_GETBIND: {
struct raw_config_request req;
- struct raw32_config_request __user *user_req = compat_ptr(arg);
mm_segment_t oldfs = get_fs();
if ((ret = get_raw32_request(&req, user_req)))
@@ -879,9 +673,6 @@ static int raw_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
}
break;
}
- default:
- ret = sys_ioctl(fd, cmd, arg);
- break;
}
return ret;
}
@@ -909,11 +700,11 @@ struct serial_struct32 {
compat_int_t reserved[1];
};
-static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
+static int serial_struct_ioctl(unsigned fd, unsigned cmd,
+ struct serial_struct32 __user *ss32)
{
typedef struct serial_struct SS;
typedef struct serial_struct32 SS32;
- struct serial_struct32 __user *ss32 = compat_ptr(arg);
int err;
struct serial_struct ss;
mm_segment_t oldseg = get_fs();
@@ -951,96 +742,6 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
return err;
}
-struct usbdevfs_ctrltransfer32 {
- u8 bRequestType;
- u8 bRequest;
- u16 wValue;
- u16 wIndex;
- u16 wLength;
- u32 timeout; /* in milliseconds */
- compat_caddr_t data;
-};
-
-#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
-
-static int do_usbdevfs_control(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct usbdevfs_ctrltransfer32 __user *p32 = compat_ptr(arg);
- struct usbdevfs_ctrltransfer __user *p;
- __u32 udata;
- p = compat_alloc_user_space(sizeof(*p));
- if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) ||
- get_user(udata, &p32->data) ||
- put_user(compat_ptr(udata), &p->data))
- return -EFAULT;
- return sys_ioctl(fd, USBDEVFS_CONTROL, (unsigned long)p);
-}
-
-
-struct usbdevfs_bulktransfer32 {
- compat_uint_t ep;
- compat_uint_t len;
- compat_uint_t timeout; /* in milliseconds */
- compat_caddr_t data;
-};
-
-#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32)
-
-static int do_usbdevfs_bulk(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct usbdevfs_bulktransfer32 __user *p32 = compat_ptr(arg);
- struct usbdevfs_bulktransfer __user *p;
- compat_uint_t n;
- compat_caddr_t addr;
-
- p = compat_alloc_user_space(sizeof(*p));
-
- if (get_user(n, &p32->ep) || put_user(n, &p->ep) ||
- get_user(n, &p32->len) || put_user(n, &p->len) ||
- get_user(n, &p32->timeout) || put_user(n, &p->timeout) ||
- get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data))
- return -EFAULT;
-
- return sys_ioctl(fd, USBDEVFS_BULK, (unsigned long)p);
-}
-
-
-/*
- * USBDEVFS_SUBMITURB, USBDEVFS_REAPURB and USBDEVFS_REAPURBNDELAY
- * are handled in usbdevfs core. -Christopher Li
- */
-
-struct usbdevfs_disconnectsignal32 {
- compat_int_t signr;
- compat_caddr_t context;
-};
-
-#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32)
-
-static int do_usbdevfs_discsignal(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct usbdevfs_disconnectsignal kdis;
- struct usbdevfs_disconnectsignal32 __user *udis;
- mm_segment_t old_fs;
- u32 uctx;
- int err;
-
- udis = compat_ptr(arg);
-
- if (get_user(kdis.signr, &udis->signr) ||
- __get_user(uctx, &udis->context))
- return -EFAULT;
-
- kdis.context = compat_ptr(uctx);
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_ioctl(fd, USBDEVFS_DISCSIGNAL, (unsigned long) &kdis);
- set_fs(old_fs);
-
- return err;
-}
-
/*
* I2C layer ioctls
*/
@@ -1069,9 +770,9 @@ struct i2c_rdwr_aligned {
struct i2c_msg msgs[0];
};
-static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
+ struct i2c_rdwr_ioctl_data32 __user *udata)
{
- struct i2c_rdwr_ioctl_data32 __user *udata = compat_ptr(arg);
struct i2c_rdwr_aligned __user *tdata;
struct i2c_msg __user *tmsgs;
struct i2c_msg32 __user *umsgs;
@@ -1105,10 +806,10 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd, unsigned long ar
return sys_ioctl(fd, cmd, (unsigned long)tdata);
}
-static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd,
+ struct i2c_smbus_ioctl_data32 __user *udata)
{
struct i2c_smbus_ioctl_data __user *tdata;
- struct i2c_smbus_ioctl_data32 __user *udata;
compat_caddr_t datap;
tdata = compat_alloc_user_space(sizeof(*tdata));
@@ -1117,7 +818,6 @@ static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd, unsigned long a
if (!access_ok(VERIFY_WRITE, tdata, sizeof(*tdata)))
return -EFAULT;
- udata = compat_ptr(arg);
if (!access_ok(VERIFY_READ, udata, sizeof(*udata)))
return -EFAULT;
@@ -1137,7 +837,7 @@ static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd, unsigned long a
#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
#define RTC_EPOCH_SET32 _IOW('p', 0x0e, compat_ulong_t)
-static int rtc_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
+static int rtc_ioctl(unsigned fd, unsigned cmd, void __user *argp)
{
mm_segment_t oldfs = get_fs();
compat_ulong_t val32;
@@ -1155,29 +855,14 @@ static int rtc_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
if (ret)
return ret;
val32 = kval;
- return put_user(val32, (unsigned int __user *)arg);
+ return put_user(val32, (unsigned int __user *)argp);
case RTC_IRQP_SET32:
- return sys_ioctl(fd, RTC_IRQP_SET, arg);
+ return sys_ioctl(fd, RTC_IRQP_SET, (unsigned long)argp);
case RTC_EPOCH_SET32:
- return sys_ioctl(fd, RTC_EPOCH_SET, arg);
- default:
- /* unreached */
- return -ENOIOCTLCMD;
+ return sys_ioctl(fd, RTC_EPOCH_SET, (unsigned long)argp);
}
-}
-static int
-lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct compat_timeval __user *tc = (struct compat_timeval __user *)arg;
- struct timeval __user *tn = compat_alloc_user_space(sizeof(struct timeval));
- struct timeval ts;
- if (get_user(ts.tv_sec, &tc->tv_sec) ||
- get_user(ts.tv_usec, &tc->tv_usec) ||
- put_user(ts.tv_sec, &tn->tv_sec) ||
- put_user(ts.tv_usec, &tn->tv_usec))
- return -EFAULT;
- return sys_ioctl(fd, cmd, (unsigned long)tn);
+ return -ENOIOCTLCMD;
}
/* on ia32 l_start is on a 32-bit boundary */
@@ -1197,9 +882,9 @@ struct space_resv_32 {
#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
/* just account for different alignment */
-static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
+static int compat_ioctl_preallocate(struct file *file,
+ struct space_resv_32 __user *p32)
{
- struct space_resv_32 __user *p32 = compat_ptr(arg);
struct space_resv __user *p = compat_alloc_user_space(sizeof(*p));
if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
@@ -1215,27 +900,13 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
}
#endif
+/*
+ * simple reversible transform to make our table more evenly
+ * distributed after sorting.
+ */
+#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
-typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int,
- unsigned long, struct file *);
-
-struct ioctl_trans {
- unsigned long cmd;
- ioctl_trans_handler_t handler;
- struct ioctl_trans *next;
-};
-
-#define HANDLE_IOCTL(cmd,handler) \
- { (cmd), (ioctl_trans_handler_t)(handler) },
-
-/* pointer to compatible structure or no argument */
-#define COMPATIBLE_IOCTL(cmd) \
- { (cmd), do_ioctl32_pointer },
-
-/* argument is an unsigned long integer, not a pointer */
-#define ULONG_IOCTL(cmd) \
- { (cmd), (ioctl_trans_handler_t)sys_ioctl },
-
+#define COMPATIBLE_IOCTL(cmd) XFORM(cmd),
/* ioctl should not be warned about even if it's not implemented.
Valid reasons to use this:
- It is implemented with ->compat_ioctl on some device, but programs
@@ -1245,7 +916,7 @@ struct ioctl_trans {
Most other reasons are not valid. */
#define IGNORE_IOCTL(cmd) COMPATIBLE_IOCTL(cmd)
-static struct ioctl_trans ioctl_start[] = {
+static unsigned int ioctl_pointer[] = {
/* compatible ioctls first */
COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */
COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */
@@ -1256,7 +927,6 @@ COMPATIBLE_IOCTL(TCSETA)
COMPATIBLE_IOCTL(TCSETAW)
COMPATIBLE_IOCTL(TCSETAF)
COMPATIBLE_IOCTL(TCSBRK)
-ULONG_IOCTL(TCSBRKP)
COMPATIBLE_IOCTL(TCXONC)
COMPATIBLE_IOCTL(TCFLSH)
COMPATIBLE_IOCTL(TCGETS)
@@ -1266,7 +936,6 @@ COMPATIBLE_IOCTL(TCSETSF)
COMPATIBLE_IOCTL(TIOCLINUX)
COMPATIBLE_IOCTL(TIOCSBRK)
COMPATIBLE_IOCTL(TIOCCBRK)
-ULONG_IOCTL(TIOCMIWAIT)
COMPATIBLE_IOCTL(TIOCGICOUNT)
/* Little t */
COMPATIBLE_IOCTL(TIOCGETD)
@@ -1288,7 +957,6 @@ COMPATIBLE_IOCTL(TIOCSTI)
COMPATIBLE_IOCTL(TIOCOUTQ)
COMPATIBLE_IOCTL(TIOCSPGRP)
COMPATIBLE_IOCTL(TIOCGPGRP)
-ULONG_IOCTL(TIOCSCTTY)
COMPATIBLE_IOCTL(TIOCGPTN)
COMPATIBLE_IOCTL(TIOCSPTLCK)
COMPATIBLE_IOCTL(TIOCSERGETLSR)
@@ -1311,44 +979,11 @@ COMPATIBLE_IOCTL(FIGETBSZ)
/* 'X' - originally XFS but some now in the VFS */
COMPATIBLE_IOCTL(FIFREEZE)
COMPATIBLE_IOCTL(FITHAW)
-/* RAID */
-COMPATIBLE_IOCTL(RAID_VERSION)
-COMPATIBLE_IOCTL(GET_ARRAY_INFO)
-COMPATIBLE_IOCTL(GET_DISK_INFO)
-COMPATIBLE_IOCTL(PRINT_RAID_DEBUG)
-COMPATIBLE_IOCTL(RAID_AUTORUN)
-COMPATIBLE_IOCTL(CLEAR_ARRAY)
-COMPATIBLE_IOCTL(ADD_NEW_DISK)
-ULONG_IOCTL(HOT_REMOVE_DISK)
-COMPATIBLE_IOCTL(SET_ARRAY_INFO)
-COMPATIBLE_IOCTL(SET_DISK_INFO)
-COMPATIBLE_IOCTL(WRITE_RAID_INFO)
-COMPATIBLE_IOCTL(UNPROTECT_ARRAY)
-COMPATIBLE_IOCTL(PROTECT_ARRAY)
-ULONG_IOCTL(HOT_ADD_DISK)
-ULONG_IOCTL(SET_DISK_FAULTY)
-COMPATIBLE_IOCTL(RUN_ARRAY)
-COMPATIBLE_IOCTL(STOP_ARRAY)
-COMPATIBLE_IOCTL(STOP_ARRAY_RO)
-COMPATIBLE_IOCTL(RESTART_ARRAY_RW)
-COMPATIBLE_IOCTL(GET_BITMAP_FILE)
-ULONG_IOCTL(SET_BITMAP_FILE)
-/* Big K */
-COMPATIBLE_IOCTL(PIO_FONT)
-COMPATIBLE_IOCTL(GIO_FONT)
-COMPATIBLE_IOCTL(PIO_CMAP)
-COMPATIBLE_IOCTL(GIO_CMAP)
-ULONG_IOCTL(KDSIGACCEPT)
COMPATIBLE_IOCTL(KDGETKEYCODE)
COMPATIBLE_IOCTL(KDSETKEYCODE)
-ULONG_IOCTL(KIOCSOUND)
-ULONG_IOCTL(KDMKTONE)
COMPATIBLE_IOCTL(KDGKBTYPE)
-ULONG_IOCTL(KDSETMODE)
COMPATIBLE_IOCTL(KDGETMODE)
-ULONG_IOCTL(KDSKBMODE)
COMPATIBLE_IOCTL(KDGKBMODE)
-ULONG_IOCTL(KDSKBMETA)
COMPATIBLE_IOCTL(KDGKBMETA)
COMPATIBLE_IOCTL(KDGKBENT)
COMPATIBLE_IOCTL(KDSKBENT)
@@ -1358,15 +993,7 @@ COMPATIBLE_IOCTL(KDGKBDIACR)
COMPATIBLE_IOCTL(KDSKBDIACR)
COMPATIBLE_IOCTL(KDKBDREP)
COMPATIBLE_IOCTL(KDGKBLED)
-ULONG_IOCTL(KDSKBLED)
COMPATIBLE_IOCTL(KDGETLED)
-ULONG_IOCTL(KDSETLED)
-COMPATIBLE_IOCTL(GIO_SCRNMAP)
-COMPATIBLE_IOCTL(PIO_SCRNMAP)
-COMPATIBLE_IOCTL(GIO_UNISCRNMAP)
-COMPATIBLE_IOCTL(PIO_UNISCRNMAP)
-COMPATIBLE_IOCTL(PIO_FONTRESET)
-COMPATIBLE_IOCTL(PIO_UNIMAPCLR)
#ifdef CONFIG_BLOCK
/* Big S */
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
@@ -1378,20 +1005,6 @@ COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
#endif
-/* Big V */
-COMPATIBLE_IOCTL(VT_SETMODE)
-COMPATIBLE_IOCTL(VT_GETMODE)
-COMPATIBLE_IOCTL(VT_GETSTATE)
-COMPATIBLE_IOCTL(VT_OPENQRY)
-ULONG_IOCTL(VT_ACTIVATE)
-ULONG_IOCTL(VT_WAITACTIVE)
-ULONG_IOCTL(VT_RELDISP)
-ULONG_IOCTL(VT_DISALLOCATE)
-COMPATIBLE_IOCTL(VT_RESIZE)
-COMPATIBLE_IOCTL(VT_RESIZEX)
-COMPATIBLE_IOCTL(VT_LOCKSWITCH)
-COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
-COMPATIBLE_IOCTL(VT_GETHIFONTMASK)
/* Little p (/dev/rtc, /dev/envctrl, etc.) */
COMPATIBLE_IOCTL(RTC_AIE_ON)
COMPATIBLE_IOCTL(RTC_AIE_OFF)
@@ -1420,11 +1033,12 @@ COMPATIBLE_IOCTL(MTIOCTOP)
/* Socket level stuff */
COMPATIBLE_IOCTL(FIOQSIZE)
#ifdef CONFIG_BLOCK
+/* loop */
+IGNORE_IOCTL(LOOP_CLR_FD)
/* SG stuff */
COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
COMPATIBLE_IOCTL(SG_EMULATED_HOST)
-ULONG_IOCTL(SG_SET_TRANSFORM)
COMPATIBLE_IOCTL(SG_GET_TRANSFORM)
COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE)
COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE)
@@ -1478,8 +1092,6 @@ COMPATIBLE_IOCTL(PPPIOCGCHAN)
/* PPPOX */
COMPATIBLE_IOCTL(PPPOEIOCSFWD)
COMPATIBLE_IOCTL(PPPOEIOCDFWD)
-/* LP */
-COMPATIBLE_IOCTL(LPGETSTATUS)
/* ppdev */
COMPATIBLE_IOCTL(PPSETMODE)
COMPATIBLE_IOCTL(PPRSTATUS)
@@ -1661,8 +1273,6 @@ COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
COMPATIBLE_IOCTL(OSS_GETVERSION)
/* AUTOFS */
-ULONG_IOCTL(AUTOFS_IOC_READY)
-ULONG_IOCTL(AUTOFS_IOC_FAIL)
COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC)
COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER)
COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE)
@@ -1755,30 +1365,11 @@ COMPATIBLE_IOCTL(PCIIOC_CONTROLLER)
COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO)
COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM)
COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE)
-/* USB */
-COMPATIBLE_IOCTL(USBDEVFS_RESETEP)
-COMPATIBLE_IOCTL(USBDEVFS_SETINTERFACE)
-COMPATIBLE_IOCTL(USBDEVFS_SETCONFIGURATION)
-COMPATIBLE_IOCTL(USBDEVFS_GETDRIVER)
-COMPATIBLE_IOCTL(USBDEVFS_DISCARDURB)
-COMPATIBLE_IOCTL(USBDEVFS_CLAIMINTERFACE)
-COMPATIBLE_IOCTL(USBDEVFS_RELEASEINTERFACE)
-COMPATIBLE_IOCTL(USBDEVFS_CONNECTINFO)
-COMPATIBLE_IOCTL(USBDEVFS_HUB_PORTINFO)
-COMPATIBLE_IOCTL(USBDEVFS_RESET)
-COMPATIBLE_IOCTL(USBDEVFS_SUBMITURB32)
-COMPATIBLE_IOCTL(USBDEVFS_REAPURB32)
-COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32)
-COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT)
/* NBD */
-ULONG_IOCTL(NBD_SET_SOCK)
-ULONG_IOCTL(NBD_SET_BLKSIZE)
-ULONG_IOCTL(NBD_SET_SIZE)
COMPATIBLE_IOCTL(NBD_DO_IT)
COMPATIBLE_IOCTL(NBD_CLEAR_SOCK)
COMPATIBLE_IOCTL(NBD_CLEAR_QUE)
COMPATIBLE_IOCTL(NBD_PRINT_DEBUG)
-ULONG_IOCTL(NBD_SET_SIZE_BLOCKS)
COMPATIBLE_IOCTL(NBD_DISCONNECT)
/* i2c */
COMPATIBLE_IOCTL(I2C_SLAVE)
@@ -1878,42 +1469,6 @@ COMPATIBLE_IOCTL(JSIOCGAXES)
COMPATIBLE_IOCTL(JSIOCGBUTTONS)
COMPATIBLE_IOCTL(JSIOCGNAME(0))
-/* now things that need handlers */
-#ifdef CONFIG_BLOCK
-HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
-HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans)
-#endif
-HANDLE_IOCTL(PPPIOCGIDLE32, ppp_ioctl_trans)
-HANDLE_IOCTL(PPPIOCSCOMPRESS32, ppp_ioctl_trans)
-HANDLE_IOCTL(PPPIOCSPASS32, ppp_sock_fprog_ioctl_trans)
-HANDLE_IOCTL(PPPIOCSACTIVE32, ppp_sock_fprog_ioctl_trans)
-#ifdef CONFIG_BLOCK
-HANDLE_IOCTL(MTIOCGET32, mt_ioctl_trans)
-HANDLE_IOCTL(MTIOCPOS32, mt_ioctl_trans)
-#endif
-#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
-HANDLE_IOCTL(AUTOFS_IOC_SETTIMEOUT32, ioc_settimeout)
-#ifdef CONFIG_VT
-HANDLE_IOCTL(PIO_FONTX, do_fontx_ioctl)
-HANDLE_IOCTL(GIO_FONTX, do_fontx_ioctl)
-HANDLE_IOCTL(PIO_UNIMAP, do_unimap_ioctl)
-HANDLE_IOCTL(GIO_UNIMAP, do_unimap_ioctl)
-HANDLE_IOCTL(KDFONTOP, do_kdfontop_ioctl)
-#endif
-/* One SMB ioctl needs translations. */
-#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t)
-HANDLE_IOCTL(SMB_IOC_GETMOUNTUID_32, do_smb_getmountuid)
-/* block stuff */
-#ifdef CONFIG_BLOCK
-/* loop */
-IGNORE_IOCTL(LOOP_CLR_FD)
-/* Raw devices */
-HANDLE_IOCTL(RAW_SETBIND, raw_ioctl)
-HANDLE_IOCTL(RAW_GETBIND, raw_ioctl)
-#endif
-/* Serial */
-HANDLE_IOCTL(TIOCGSERIAL, serial_struct_ioctl)
-HANDLE_IOCTL(TIOCSSERIAL, serial_struct_ioctl)
#ifdef TIOCGLTC
COMPATIBLE_IOCTL(TIOCGLTC)
COMPATIBLE_IOCTL(TIOCSLTC)
@@ -1928,39 +1483,6 @@ COMPATIBLE_IOCTL(TIOCSLTC)
COMPATIBLE_IOCTL(TIOCSTART)
COMPATIBLE_IOCTL(TIOCSTOP)
#endif
-/* Usbdevfs */
-HANDLE_IOCTL(USBDEVFS_CONTROL32, do_usbdevfs_control)
-HANDLE_IOCTL(USBDEVFS_BULK32, do_usbdevfs_bulk)
-HANDLE_IOCTL(USBDEVFS_DISCSIGNAL32, do_usbdevfs_discsignal)
-COMPATIBLE_IOCTL(USBDEVFS_IOCTL32)
-/* i2c */
-HANDLE_IOCTL(I2C_FUNCS, w_long)
-HANDLE_IOCTL(I2C_RDWR, do_i2c_rdwr_ioctl)
-HANDLE_IOCTL(I2C_SMBUS, do_i2c_smbus_ioctl)
-/* Not implemented in the native kernel */
-HANDLE_IOCTL(RTC_IRQP_READ32, rtc_ioctl)
-HANDLE_IOCTL(RTC_IRQP_SET32, rtc_ioctl)
-HANDLE_IOCTL(RTC_EPOCH_READ32, rtc_ioctl)
-HANDLE_IOCTL(RTC_EPOCH_SET32, rtc_ioctl)
-
-/* dvb */
-HANDLE_IOCTL(VIDEO_GET_EVENT, do_video_get_event)
-HANDLE_IOCTL(VIDEO_STILLPICTURE, do_video_stillpicture)
-HANDLE_IOCTL(VIDEO_SET_SPU_PALETTE, do_video_set_spu_palette)
-
-/* parport */
-COMPATIBLE_IOCTL(LPTIME)
-COMPATIBLE_IOCTL(LPCHAR)
-COMPATIBLE_IOCTL(LPABORTOPEN)
-COMPATIBLE_IOCTL(LPCAREFUL)
-COMPATIBLE_IOCTL(LPWAIT)
-COMPATIBLE_IOCTL(LPSETIRQ)
-COMPATIBLE_IOCTL(LPGETSTATUS)
-COMPATIBLE_IOCTL(LPGETSTATUS)
-COMPATIBLE_IOCTL(LPRESET)
-/*LPGETSTATS not implemented, but no kernels seem to compile it in anyways*/
-COMPATIBLE_IOCTL(LPGETFLAGS)
-HANDLE_IOCTL(LPSETTIMEOUT, lp_timeout_trans)
/* fat 'r' ioctls. These are handled by fat with ->compat_ioctl,
but we don't want warnings on other file systems. So declare
@@ -1988,12 +1510,108 @@ IGNORE_IOCTL(FBIOGCURSOR32)
#endif
};
-#define IOCTL_HASHSIZE 256
-static struct ioctl_trans *ioctl32_hash_table[IOCTL_HASHSIZE];
-
-static inline unsigned long ioctl32_hash(unsigned long cmd)
+/*
+ * Convert common ioctl arguments based on their command number
+ *
+ * Please do not add any code in here. Instead, implement
+ * a compat_ioctl operation in the place that handleѕ the
+ * ioctl for the native case.
+ */
+static long do_ioctl_trans(int fd, unsigned int cmd,
+ unsigned long arg, struct file *file)
{
- return (((cmd >> 6) ^ (cmd >> 4) ^ cmd)) % IOCTL_HASHSIZE;
+ void __user *argp = compat_ptr(arg);
+
+ switch (cmd) {
+ case PPPIOCGIDLE32:
+ return ppp_gidle(fd, cmd, argp);
+ case PPPIOCSCOMPRESS32:
+ return ppp_scompress(fd, cmd, argp);
+ case PPPIOCSPASS32:
+ case PPPIOCSACTIVE32:
+ return ppp_sock_fprog_ioctl_trans(fd, cmd, argp);
+#ifdef CONFIG_BLOCK
+ case SG_IO:
+ return sg_ioctl_trans(fd, cmd, argp);
+ case SG_GET_REQUEST_TABLE:
+ return sg_grt_trans(fd, cmd, argp);
+ case MTIOCGET32:
+ case MTIOCPOS32:
+ return mt_ioctl_trans(fd, cmd, argp);
+ /* Raw devices */
+ case RAW_SETBIND:
+ case RAW_GETBIND:
+ return raw_ioctl(fd, cmd, argp);
+#endif
+#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
+ case AUTOFS_IOC_SETTIMEOUT32:
+ return ioc_settimeout(fd, cmd, argp);
+ /* One SMB ioctl needs translations. */
+#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t)
+ case SMB_IOC_GETMOUNTUID_32:
+ return do_smb_getmountuid(fd, cmd, argp);
+ /* Serial */
+ case TIOCGSERIAL:
+ case TIOCSSERIAL:
+ return serial_struct_ioctl(fd, cmd, argp);
+ /* i2c */
+ case I2C_FUNCS:
+ return w_long(fd, cmd, argp);
+ case I2C_RDWR:
+ return do_i2c_rdwr_ioctl(fd, cmd, argp);
+ case I2C_SMBUS:
+ return do_i2c_smbus_ioctl(fd, cmd, argp);
+ /* Not implemented in the native kernel */
+ case RTC_IRQP_READ32:
+ case RTC_IRQP_SET32:
+ case RTC_EPOCH_READ32:
+ case RTC_EPOCH_SET32:
+ return rtc_ioctl(fd, cmd, argp);
+
+ /* dvb */
+ case VIDEO_GET_EVENT:
+ return do_video_get_event(fd, cmd, argp);
+ case VIDEO_STILLPICTURE:
+ return do_video_stillpicture(fd, cmd, argp);
+ case VIDEO_SET_SPU_PALETTE:
+ return do_video_set_spu_palette(fd, cmd, argp);
+ }
+
+ /*
+ * These take an integer instead of a pointer as 'arg',
+ * so we must not do a compat_ptr() translation.
+ */
+ switch (cmd) {
+ /* Big T */
+ case TCSBRKP:
+ case TIOCMIWAIT:
+ case TIOCSCTTY:
+ /* RAID */
+ case HOT_REMOVE_DISK:
+ case HOT_ADD_DISK:
+ case SET_DISK_FAULTY:
+ case SET_BITMAP_FILE:
+ /* Big K */
+ case KDSIGACCEPT:
+ case KIOCSOUND:
+ case KDMKTONE:
+ case KDSETMODE:
+ case KDSKBMODE:
+ case KDSKBMETA:
+ case KDSKBLED:
+ case KDSETLED:
+ /* AUTOFS */
+ case AUTOFS_IOC_READY:
+ case AUTOFS_IOC_FAIL:
+ /* NBD */
+ case NBD_SET_SOCK:
+ case NBD_SET_BLKSIZE:
+ case NBD_SET_SIZE:
+ case NBD_SET_SIZE_BLOCKS:
+ return do_vfs_ioctl(file, fd, cmd, arg);
+ }
+
+ return -ENOIOCTLCMD;
}
static void compat_ioctl_error(struct file *filp, unsigned int fd,
@@ -2025,12 +1643,33 @@ static void compat_ioctl_error(struct file *filp, unsigned int fd,
free_page((unsigned long)path);
}
+static int compat_ioctl_check_table(unsigned int xcmd)
+{
+ int i;
+ const int max = ARRAY_SIZE(ioctl_pointer) - 1;
+
+ BUILD_BUG_ON(max >= (1 << 16));
+
+ /* guess initial offset into table, assuming a
+ normalized distribution */
+ i = ((xcmd >> 16) * max) >> 16;
+
+ /* do linear search up first, until greater or equal */
+ while (ioctl_pointer[i] < xcmd && i < max)
+ i++;
+
+ /* then do linear search down */
+ while (ioctl_pointer[i] > xcmd && i > 0)
+ i--;
+
+ return ioctl_pointer[i] == xcmd;
+}
+
asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
unsigned long arg)
{
struct file *filp;
int error = -EBADF;
- struct ioctl_trans *t;
int fput_needed;
filp = fget_light(fd, &fput_needed);
@@ -2058,7 +1697,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
case FS_IOC_RESVSP_32:
case FS_IOC_RESVSP64_32:
- error = compat_ioctl_preallocate(filp, arg);
+ error = compat_ioctl_preallocate(filp, compat_ptr(arg));
goto out_fput;
#else
case FS_IOC_RESVSP:
@@ -2087,12 +1726,11 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
break;
}
- for (t = ioctl32_hash_table[ioctl32_hash(cmd)]; t; t = t->next) {
- if (t->cmd == cmd)
- goto found_handler;
- }
+ if (compat_ioctl_check_table(XFORM(cmd)))
+ goto found_handler;
- {
+ error = do_ioctl_trans(fd, cmd, arg, filp);
+ if (error == -ENOIOCTLCMD) {
static int count;
if (++count <= 50)
@@ -2103,13 +1741,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
goto out_fput;
found_handler:
- if (t->handler) {
- lock_kernel();
- error = t->handler(fd, cmd, arg, filp);
- unlock_kernel();
- goto out_fput;
- }
-
+ arg = (unsigned long)compat_ptr(arg);
do_ioctl:
error = do_vfs_ioctl(filp, fd, cmd, arg);
out_fput:
@@ -2118,35 +1750,22 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
return error;
}
-static void ioctl32_insert_translation(struct ioctl_trans *trans)
+static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
{
- unsigned long hash;
- struct ioctl_trans *t;
-
- hash = ioctl32_hash (trans->cmd);
- if (!ioctl32_hash_table[hash])
- ioctl32_hash_table[hash] = trans;
- else {
- t = ioctl32_hash_table[hash];
- while (t->next)
- t = t->next;
- trans->next = NULL;
- t->next = trans;
- }
+ unsigned int a, b;
+ a = *(unsigned int *)p;
+ b = *(unsigned int *)q;
+ if (a > b)
+ return 1;
+ if (a < b)
+ return -1;
+ return 0;
}
static int __init init_sys32_ioctl(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ioctl_start); i++) {
- if (ioctl_start[i].next) {
- printk("ioctl translation %d bad\n",i);
- return -1;
- }
-
- ioctl32_insert_translation(&ioctl_start[i]);
- }
+ sort(ioctl_pointer, ARRAY_SIZE(ioctl_pointer), sizeof(*ioctl_pointer),
+ init_sys32_ioctl_cmp, NULL);
return 0;
}
__initcall(init_sys32_ioctl);
diff --git a/fs/dcache.c b/fs/dcache.c
index a100fa35a48..953173a293a 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -978,6 +978,7 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
q.hash = full_name_hash(q.name, q.len);
return d_alloc(parent, &q);
}
+EXPORT_SYMBOL(d_alloc_name);
/* the caller must hold dcache_lock */
static void __d_instantiate(struct dentry *dentry, struct inode *inode)
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 0d23b52dd22..b486169f42b 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -32,7 +32,9 @@ static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
-static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev)
+static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev,
+ void *data, const struct file_operations *fops)
+
{
struct inode *inode = new_inode(sb);
@@ -44,14 +46,18 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
- inode->i_fop = &debugfs_file_operations;
+ inode->i_fop = fops ? fops : &debugfs_file_operations;
+ inode->i_private = data;
break;
case S_IFLNK:
inode->i_op = &debugfs_link_operations;
+ inode->i_fop = fops;
+ inode->i_private = data;
break;
case S_IFDIR:
inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
+ inode->i_fop = fops ? fops : &simple_dir_operations;
+ inode->i_private = data;
/* directory inodes start off with i_nlink == 2
* (for "." entry) */
@@ -64,7 +70,8 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
/* SMP-safe */
static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t dev)
+ int mode, dev_t dev, void *data,
+ const struct file_operations *fops)
{
struct inode *inode;
int error = -EPERM;
@@ -72,7 +79,7 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
if (dentry->d_inode)
return -EEXIST;
- inode = debugfs_get_inode(dir->i_sb, mode, dev);
+ inode = debugfs_get_inode(dir->i_sb, mode, dev, data, fops);
if (inode) {
d_instantiate(dentry, inode);
dget(dentry);
@@ -81,12 +88,13 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
return error;
}
-static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode,
+ void *data, const struct file_operations *fops)
{
int res;
mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
- res = debugfs_mknod(dir, dentry, mode, 0);
+ res = debugfs_mknod(dir, dentry, mode, 0, data, fops);
if (!res) {
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
@@ -94,18 +102,20 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return res;
}
-static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode)
+static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode,
+ void *data, const struct file_operations *fops)
{
mode = (mode & S_IALLUGO) | S_IFLNK;
- return debugfs_mknod(dir, dentry, mode, 0);
+ return debugfs_mknod(dir, dentry, mode, 0, data, fops);
}
-static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode)
+static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode,
+ void *data, const struct file_operations *fops)
{
int res;
mode = (mode & S_IALLUGO) | S_IFREG;
- res = debugfs_mknod(dir, dentry, mode, 0);
+ res = debugfs_mknod(dir, dentry, mode, 0, data, fops);
if (!res)
fsnotify_create(dir, dentry);
return res;
@@ -139,7 +149,9 @@ static struct file_system_type debug_fs_type = {
static int debugfs_create_by_name(const char *name, mode_t mode,
struct dentry *parent,
- struct dentry **dentry)
+ struct dentry **dentry,
+ void *data,
+ const struct file_operations *fops)
{
int error = 0;
@@ -164,13 +176,16 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
if (!IS_ERR(*dentry)) {
switch (mode & S_IFMT) {
case S_IFDIR:
- error = debugfs_mkdir(parent->d_inode, *dentry, mode);
+ error = debugfs_mkdir(parent->d_inode, *dentry, mode,
+ data, fops);
break;
case S_IFLNK:
- error = debugfs_link(parent->d_inode, *dentry, mode);
+ error = debugfs_link(parent->d_inode, *dentry, mode,
+ data, fops);
break;
default:
- error = debugfs_create(parent->d_inode, *dentry, mode);
+ error = debugfs_create(parent->d_inode, *dentry, mode,
+ data, fops);
break;
}
dput(*dentry);
@@ -221,19 +236,13 @@ struct dentry *debugfs_create_file(const char *name, mode_t mode,
if (error)
goto exit;
- error = debugfs_create_by_name(name, mode, parent, &dentry);
+ error = debugfs_create_by_name(name, mode, parent, &dentry,
+ data, fops);
if (error) {
dentry = NULL;
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
goto exit;
}
-
- if (dentry->d_inode) {
- if (data)
- dentry->d_inode->i_private = data;
- if (fops)
- dentry->d_inode->i_fop = fops;
- }
exit:
return dentry;
}
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index d5f8c96964b..8882ecc0f1b 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -517,11 +517,23 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty)
struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number)
{
+ struct dentry *dentry;
+ struct tty_struct *tty;
+
BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
+ /* Ensure dentry has not been deleted by devpts_pty_kill() */
+ dentry = d_find_alias(pts_inode);
+ if (!dentry)
+ return NULL;
+
+ tty = NULL;
if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
- return (struct tty_struct *)pts_inode->i_private;
- return NULL;
+ tty = (struct tty_struct *)pts_inode->i_private;
+
+ dput(dentry);
+
+ return tty;
}
void devpts_pty_kill(struct tty_struct *tty)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b912270942f..e82adc2debb 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -53,13 +53,6 @@
*
* If blkfactor is zero then the user's request was aligned to the filesystem's
* blocksize.
- *
- * lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
- * This determines whether we need to do the fancy locking which prevents
- * direct-IO from being able to read uninitialised disk blocks. If its zero
- * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is
- * not held for the entire direct write (taken briefly, initially, during a
- * direct read though, but its never held for the duration of a direct-IO).
*/
struct dio {
@@ -68,7 +61,7 @@ struct dio {
struct inode *inode;
int rw;
loff_t i_size; /* i_size when submitted */
- int lock_type; /* doesn't change */
+ int flags; /* doesn't change */
unsigned blkbits; /* doesn't change */
unsigned blkfactor; /* When we're using an alignment which
is finer than the filesystem's soft
@@ -104,6 +97,18 @@ struct dio {
unsigned cur_page_len; /* Nr of bytes at cur_page_offset */
sector_t cur_page_block; /* Where it starts */
+ /* BIO completion state */
+ spinlock_t bio_lock; /* protects BIO fields below */
+ unsigned long refcount; /* direct_io_worker() and bios */
+ struct bio *bio_list; /* singly linked via bi_private */
+ struct task_struct *waiter; /* waiting task (NULL if none) */
+
+ /* AIO related stuff */
+ struct kiocb *iocb; /* kiocb */
+ int is_async; /* is IO async ? */
+ int io_error; /* IO error in completion path */
+ ssize_t result; /* IO result */
+
/*
* Page fetching state. These variables belong to dio_refill_pages().
*/
@@ -115,22 +120,16 @@ struct dio {
* Page queue. These variables belong to dio_refill_pages() and
* dio_get_page().
*/
- struct page *pages[DIO_PAGES]; /* page buffer */
unsigned head; /* next page to process */
unsigned tail; /* last valid page + 1 */
int page_errors; /* errno from get_user_pages() */
- /* BIO completion state */
- spinlock_t bio_lock; /* protects BIO fields below */
- unsigned long refcount; /* direct_io_worker() and bios */
- struct bio *bio_list; /* singly linked via bi_private */
- struct task_struct *waiter; /* waiting task (NULL if none) */
-
- /* AIO related stuff */
- struct kiocb *iocb; /* kiocb */
- int is_async; /* is IO async ? */
- int io_error; /* IO error in completion path */
- ssize_t result; /* IO result */
+ /*
+ * pages[] (and any fields placed after it) are not zeroed out at
+ * allocation time. Don't add new fields after pages[] unless you
+ * wish that they not be zeroed.
+ */
+ struct page *pages[DIO_PAGES]; /* page buffer */
};
/*
@@ -240,7 +239,8 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
if (dio->end_io && dio->result)
dio->end_io(dio->iocb, offset, transferred,
dio->map_bh.b_private);
- if (dio->lock_type == DIO_LOCKING)
+
+ if (dio->flags & DIO_LOCKING)
/* lockdep: non-owner release */
up_read_non_owner(&dio->inode->i_alloc_sem);
@@ -515,21 +515,24 @@ static int get_more_blocks(struct dio *dio)
map_bh->b_state = 0;
map_bh->b_size = fs_count << dio->inode->i_blkbits;
+ /*
+ * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
+ * forbid block creations: only overwrites are permitted.
+ * We will return early to the caller once we see an
+ * unmapped buffer head returned, and the caller will fall
+ * back to buffered I/O.
+ *
+ * Otherwise the decision is left to the get_blocks method,
+ * which may decide to handle it or also return an unmapped
+ * buffer head.
+ */
create = dio->rw & WRITE;
- if (dio->lock_type == DIO_LOCKING) {
+ if (dio->flags & DIO_SKIP_HOLES) {
if (dio->block_in_file < (i_size_read(dio->inode) >>
dio->blkbits))
create = 0;
- } else if (dio->lock_type == DIO_NO_LOCKING) {
- create = 0;
}
- /*
- * For writes inside i_size we forbid block creations: only
- * overwrites are permitted. We fall back to buffered writes
- * at a higher level for inside-i_size block-instantiating
- * writes.
- */
ret = (*dio->get_block)(dio->inode, fs_startblk,
map_bh, create);
}
@@ -1039,7 +1042,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
* we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
- if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
+ if (rw == READ && (dio->flags & DIO_LOCKING))
mutex_unlock(&dio->inode->i_mutex);
/*
@@ -1086,30 +1089,28 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
/*
* This is a library function for use by filesystem drivers.
- * The locking rules are governed by the dio_lock_type parameter.
*
- * DIO_NO_LOCKING (no locking, for raw block device access)
- * For writes, i_mutex is not held on entry; it is never taken.
+ * The locking rules are governed by the flags parameter:
+ * - if the flags value contains DIO_LOCKING we use a fancy locking
+ * scheme for dumb filesystems.
+ * For writes this function is called under i_mutex and returns with
+ * i_mutex held, for reads, i_mutex is not held on entry, but it is
+ * taken and dropped again before returning.
+ * For reads and writes i_alloc_sem is taken in shared mode and released
+ * on I/O completion (which may happen asynchronously after returning to
+ * the caller).
*
- * DIO_LOCKING (simple locking for regular files)
- * For writes we are called under i_mutex and return with i_mutex held, even
- * though it is internally dropped.
- * For reads, i_mutex is not held on entry, but it is taken and dropped before
- * returning.
- *
- * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
- * uninitialised data, allowing parallel direct readers and writers)
- * For writes we are called without i_mutex, return without it, never touch it.
- * For reads we are called under i_mutex and return with i_mutex held, even
- * though it may be internally dropped.
- *
- * Additional i_alloc_sem locking requirements described inline below.
+ * - if the flags value does NOT contain DIO_LOCKING we don't use any
+ * internal locking but rather rely on the filesystem to synchronize
+ * direct I/O reads/writes versus each other and truncate.
+ * For reads and writes both i_mutex and i_alloc_sem are not held on
+ * entry and are never taken.
*/
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
- int dio_lock_type)
+ int flags)
{
int seg;
size_t size;
@@ -1120,8 +1121,6 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
ssize_t retval = -EINVAL;
loff_t end = offset;
struct dio *dio;
- int release_i_mutex = 0;
- int acquire_i_mutex = 0;
if (rw & WRITE)
rw = WRITE_ODIRECT_PLUG;
@@ -1151,48 +1150,41 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
}
- dio = kzalloc(sizeof(*dio), GFP_KERNEL);
+ dio = kmalloc(sizeof(*dio), GFP_KERNEL);
retval = -ENOMEM;
if (!dio)
goto out;
-
/*
- * For block device access DIO_NO_LOCKING is used,
- * neither readers nor writers do any locking at all
- * For regular files using DIO_LOCKING,
- * readers need to grab i_mutex and i_alloc_sem
- * writers need to grab i_alloc_sem only (i_mutex is already held)
- * For regular files using DIO_OWN_LOCKING,
- * neither readers nor writers take any locks here
+ * Believe it or not, zeroing out the page array caused a .5%
+ * performance regression in a database benchmark. So, we take
+ * care to only zero out what's needed.
*/
- dio->lock_type = dio_lock_type;
- if (dio_lock_type != DIO_NO_LOCKING) {
+ memset(dio, 0, offsetof(struct dio, pages));
+
+ dio->flags = flags;
+ if (dio->flags & DIO_LOCKING) {
/* watch out for a 0 len io from a tricksy fs */
if (rw == READ && end > offset) {
- struct address_space *mapping;
+ struct address_space *mapping =
+ iocb->ki_filp->f_mapping;
- mapping = iocb->ki_filp->f_mapping;
- if (dio_lock_type != DIO_OWN_LOCKING) {
- mutex_lock(&inode->i_mutex);
- release_i_mutex = 1;
- }
+ /* will be released by direct_io_worker */
+ mutex_lock(&inode->i_mutex);
retval = filemap_write_and_wait_range(mapping, offset,
end - 1);
if (retval) {
+ mutex_unlock(&inode->i_mutex);
kfree(dio);
goto out;
}
-
- if (dio_lock_type == DIO_OWN_LOCKING) {
- mutex_unlock(&inode->i_mutex);
- acquire_i_mutex = 1;
- }
}
- if (dio_lock_type == DIO_LOCKING)
- /* lockdep: not the owner will release it */
- down_read_non_owner(&inode->i_alloc_sem);
+ /*
+ * Will be released at I/O completion, possibly in a
+ * different thread.
+ */
+ down_read_non_owner(&inode->i_alloc_sem);
}
/*
@@ -1210,24 +1202,19 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again for DIO_LOCKING.
- * NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this by
- * it's own meaner.
+ *
+ * NOTE: filesystems with their own locking have to handle this
+ * on their own.
*/
- if (unlikely(retval < 0 && (rw & WRITE))) {
- loff_t isize = i_size_read(inode);
-
- if (end > isize && dio_lock_type == DIO_LOCKING)
- vmtruncate(inode, isize);
+ if (flags & DIO_LOCKING) {
+ if (unlikely((rw & WRITE) && retval < 0)) {
+ loff_t isize = i_size_read(inode);
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
}
- if (rw == READ && dio_lock_type == DIO_LOCKING)
- release_i_mutex = 0;
-
out:
- if (release_i_mutex)
- mutex_unlock(&inode->i_mutex);
- else if (acquire_i_mutex)
- mutex_lock(&inode->i_mutex);
return retval;
}
EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 2dda5ade75b..8f006a0d607 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -62,7 +62,7 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
struct inode *lower_inode =
ecryptfs_inode_to_lower(dentry->d_inode);
- fsstack_copy_attr_all(dentry->d_inode, lower_inode, NULL);
+ fsstack_copy_attr_all(dentry->d_inode, lower_inode);
}
out:
return rc;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 056fed62d0d..429ca0b3ba0 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -626,9 +626,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
lower_new_dir_dentry->d_inode, lower_new_dentry);
if (rc)
goto out_lock;
- fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode, NULL);
+ fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
if (new_dir != old_dir)
- fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode, NULL);
+ fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
out_lock:
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
dput(lower_new_dentry->d_parent);
@@ -967,7 +967,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
rc = notify_change(lower_dentry, ia);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
out:
- fsstack_copy_attr_all(inode, lower_inode, NULL);
+ fsstack_copy_attr_all(inode, lower_inode);
return rc;
}
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index c6ac85d6c70..567bc4b9f70 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -35,7 +35,6 @@
#include <linux/key.h>
#include <linux/parser.h>
#include <linux/fs_stack.h>
-#include <linux/ima.h>
#include "ecryptfs_kernel.h"
/**
@@ -119,7 +118,6 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
const struct cred *cred = current_cred();
struct ecryptfs_inode_info *inode_info =
ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
- int opened_lower_file = 0;
int rc = 0;
mutex_lock(&inode_info->lower_file_mutex);
@@ -136,12 +134,9 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
"for lower_dentry [0x%p] and lower_mnt [0x%p]; "
"rc = [%d]\n", lower_dentry, lower_mnt, rc);
inode_info->lower_file = NULL;
- } else
- opened_lower_file = 1;
+ }
}
mutex_unlock(&inode_info->lower_file_mutex);
- if (opened_lower_file)
- ima_counts_get(inode_info->lower_file);
return rc;
}
@@ -194,7 +189,7 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
init_special_inode(inode, lower_inode->i_mode,
lower_inode->i_rdev);
dentry->d_op = &ecryptfs_dops;
- fsstack_copy_attr_all(inode, lower_inode, NULL);
+ fsstack_copy_attr_all(inode, lower_inode);
/* This size will be overwritten for real files w/ headers and
* other metadata */
fsstack_copy_inode_size(inode, lower_inode);
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 8b47e4200e6..d26402ff06e 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -339,7 +339,7 @@ struct file *eventfd_file_create(unsigned int count, int flags)
ctx->flags = flags;
file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx,
- flags & EFD_SHARED_FCNTL_FLAGS);
+ O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
if (IS_ERR(file))
eventfd_free_ctx(ctx);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 366c503f965..bd056a5b4ef 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1206,7 +1206,7 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
* a file structure and a free file descriptor.
*/
error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
- flags & O_CLOEXEC);
+ O_RDWR | (flags & O_CLOEXEC));
if (error < 0)
ep_free(ep);
diff --git a/fs/exec.c b/fs/exec.c
index c0c636e34f6..632b02e34ec 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -826,7 +826,9 @@ static int de_thread(struct task_struct *tsk)
attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
transfer_pid(leader, tsk, PIDTYPE_PGID);
transfer_pid(leader, tsk, PIDTYPE_SID);
+
list_replace_rcu(&leader->tasks, &tsk->tasks);
+ list_replace_init(&leader->sibling, &tsk->sibling);
tsk->group_leader = tsk;
leader->group_leader = tsk;
@@ -923,6 +925,15 @@ char *get_task_comm(char *buf, struct task_struct *tsk)
void set_task_comm(struct task_struct *tsk, char *buf)
{
task_lock(tsk);
+
+ /*
+ * Threads may access current->comm without holding
+ * the task lock, so write the string carefully.
+ * Readers without a lock may see incomplete new
+ * names but are safe from non-terminating string reads.
+ */
+ memset(tsk->comm, 0, TASK_COMM_LEN);
+ wmb();
strlcpy(tsk->comm, buf, sizeof(tsk->comm));
task_unlock(tsk);
perf_event_comm(tsk);
@@ -1752,17 +1763,20 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
struct mm_struct *mm = current->mm;
struct linux_binfmt * binfmt;
struct inode * inode;
- struct file * file;
const struct cred *old_cred;
struct cred *cred;
int retval = 0;
int flag = 0;
int ispipe = 0;
- unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
char **helper_argv = NULL;
int helper_argc = 0;
int dump_count = 0;
static atomic_t core_dump_count = ATOMIC_INIT(0);
+ struct coredump_params cprm = {
+ .signr = signr,
+ .regs = regs,
+ .limit = current->signal->rlim[RLIMIT_CORE].rlim_cur,
+ };
audit_core_dumps(signr);
@@ -1818,15 +1832,15 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
ispipe = format_corename(corename, signr);
unlock_kernel();
- if ((!ispipe) && (core_limit < binfmt->min_coredump))
+ if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
goto fail_unlock;
if (ispipe) {
- if (core_limit == 0) {
+ if (cprm.limit == 0) {
/*
* Normally core limits are irrelevant to pipes, since
* we're not writing to the file system, but we use
- * core_limit of 0 here as a speacial value. Any
+ * cprm.limit of 0 here as a speacial value. Any
* non-zero limit gets set to RLIM_INFINITY below, but
* a limit of 0 skips the dump. This is a consistent
* way to catch recursive crashes. We can still crash
@@ -1859,25 +1873,25 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
goto fail_dropcount;
}
- core_limit = RLIM_INFINITY;
+ cprm.limit = RLIM_INFINITY;
/* SIGPIPE can happen, but it's just never processed */
if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL,
- &file)) {
+ &cprm.file)) {
printk(KERN_INFO "Core dump to %s pipe failed\n",
corename);
goto fail_dropcount;
}
} else
- file = filp_open(corename,
+ cprm.file = filp_open(corename,
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
0600);
- if (IS_ERR(file))
+ if (IS_ERR(cprm.file))
goto fail_dropcount;
- inode = file->f_path.dentry->d_inode;
+ inode = cprm.file->f_path.dentry->d_inode;
if (inode->i_nlink > 1)
goto close_fail; /* multiple links - don't dump */
- if (!ispipe && d_unhashed(file->f_path.dentry))
+ if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
goto close_fail;
/* AK: actually i see no reason to not allow this for named pipes etc.,
@@ -1890,21 +1904,22 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
*/
if (inode->i_uid != current_fsuid())
goto close_fail;
- if (!file->f_op)
+ if (!cprm.file->f_op)
goto close_fail;
- if (!file->f_op->write)
+ if (!cprm.file->f_op->write)
goto close_fail;
- if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
+ if (!ispipe &&
+ do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
goto close_fail;
- retval = binfmt->core_dump(signr, regs, file, core_limit);
+ retval = binfmt->core_dump(&cprm);
if (retval)
current->signal->group_exit_code |= 0x80;
close_fail:
if (ispipe && core_pipe_limit)
- wait_for_dump_helpers(file);
- filp_close(file, NULL);
+ wait_for_dump_helpers(cprm.file);
+ filp_close(cprm.file, NULL);
fail_dropcount:
if (dump_count)
atomic_dec(&core_dump_count);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 197c7db583c..e9e175949a6 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -6,7 +6,7 @@
* and for mapping back from file handles to dentries.
*
* For details on why we do all the strange and hairy things in here
- * take a look at Documentation/filesystems/Exporting.
+ * take a look at Documentation/filesystems/nfs/Exporting.
*/
#include <linux/exportfs.h>
#include <linux/fs.h>
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index a63d44256a7..a99e54318c3 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -339,12 +339,12 @@ ext2_acl_chmod(struct inode *inode)
* Extended attribut handlers
*/
static size_t
-ext2_xattr_list_acl_access(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext2_xattr_list_acl_access(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_size)
memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
@@ -352,12 +352,12 @@ ext2_xattr_list_acl_access(struct inode *inode, char *list, size_t list_size,
}
static size_t
-ext2_xattr_list_acl_default(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext2_xattr_list_acl_default(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_size)
memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
@@ -365,15 +365,18 @@ ext2_xattr_list_acl_default(struct inode *inode, char *list, size_t list_size,
}
static int
-ext2_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+ext2_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int type)
{
struct posix_acl *acl;
int error;
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return -EOPNOTSUPP;
- acl = ext2_get_acl(inode, type);
+ acl = ext2_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
@@ -385,33 +388,17 @@ ext2_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
}
static int
-ext2_xattr_get_acl_access(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext2_xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
-}
-
-static int
-ext2_xattr_get_acl_default(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext2_xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
-}
-
-static int
-ext2_xattr_set_acl(struct inode *inode, int type, const void *value,
- size_t size)
+ext2_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags, int type)
{
struct posix_acl *acl;
int error;
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!is_owner_or_cap(dentry->d_inode))
return -EPERM;
if (value) {
@@ -426,41 +413,25 @@ ext2_xattr_set_acl(struct inode *inode, int type, const void *value,
} else
acl = NULL;
- error = ext2_set_acl(inode, type, acl);
+ error = ext2_set_acl(dentry->d_inode, type, acl);
release_and_out:
posix_acl_release(acl);
return error;
}
-static int
-ext2_xattr_set_acl_access(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext2_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
-}
-
-static int
-ext2_xattr_set_acl_default(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext2_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
-}
-
struct xattr_handler ext2_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
.list = ext2_xattr_list_acl_access,
- .get = ext2_xattr_get_acl_access,
- .set = ext2_xattr_set_acl_access,
+ .get = ext2_xattr_get_acl,
+ .set = ext2_xattr_set_acl,
};
struct xattr_handler ext2_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
.list = ext2_xattr_list_acl_default,
- .get = ext2_xattr_get_acl_default,
- .set = ext2_xattr_set_acl_default,
+ .get = ext2_xattr_get_acl,
+ .set = ext2_xattr_set_acl,
};
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 6cde970b0a1..7516957273e 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -353,8 +353,8 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
* ext2_find_entry()
*
* finds an entry in the specified directory with the wanted name. It
- * returns the page in which the entry was found, and the entry itself
- * (as a parameter - res_dir). Page is returned mapped and unlocked.
+ * returns the page in which the entry was found (as a parameter - res_page),
+ * and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
@@ -721,5 +721,5 @@ const struct file_operations ext2_dir_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
- .fsync = simple_fsync,
+ .fsync = ext2_fsync,
};
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 9a8a8e27a06..061914add3c 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -142,7 +142,7 @@ struct dentry *ext2_get_parent(struct dentry *child);
/* super.c */
extern void ext2_error (struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
-extern void ext2_warning (struct super_block *, const char *, const char *, ...)
+extern void ext2_msg(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
extern void ext2_update_dynamic_rev (struct super_block *sb);
extern void ext2_write_super (struct super_block *);
@@ -155,6 +155,7 @@ extern void ext2_write_super (struct super_block *);
extern const struct file_operations ext2_dir_operations;
/* file.c */
+extern int ext2_fsync(struct file *file, struct dentry *dentry, int datasync);
extern const struct inode_operations ext2_file_inode_operations;
extern const struct file_operations ext2_file_operations;
extern const struct file_operations ext2_xip_file_operations;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index a2f3afd1a1c..586e3589d4c 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -19,6 +19,7 @@
*/
#include <linux/time.h>
+#include <linux/pagemap.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@@ -38,6 +39,22 @@ static int ext2_release_file (struct inode * inode, struct file * filp)
return 0;
}
+int ext2_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ int ret;
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+
+ ret = simple_fsync(file, dentry, datasync);
+ if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
+ /* We don't really know where the IO error happened... */
+ ext2_error(sb, __func__,
+ "detected IO error when writing metadata buffers");
+ ret = -EIO;
+ }
+ return ret;
+}
+
/*
* We have mostly NULL's here: the current defaults are ok for
* the ext2 filesystem.
@@ -55,7 +72,7 @@ const struct file_operations ext2_file_operations = {
.mmap = generic_file_mmap,
.open = generic_file_open,
.release = ext2_release_file,
- .fsync = simple_fsync,
+ .fsync = ext2_fsync,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
};
@@ -72,7 +89,7 @@ const struct file_operations ext2_xip_file_operations = {
.mmap = xip_file_mmap,
.open = generic_file_open,
.release = ext2_release_file,
- .fsync = simple_fsync,
+ .fsync = ext2_fsync,
};
#endif
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index ade634076d0..71b032c65a0 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -137,7 +137,8 @@ static int ext2_block_to_path(struct inode *inode,
int final = 0;
if (i_block < 0) {
- ext2_warning (inode->i_sb, "ext2_block_to_path", "block < 0");
+ ext2_msg(inode->i_sb, KERN_WARNING,
+ "warning: %s: block < 0", __func__);
} else if (i_block < direct_blocks) {
offsets[n++] = i_block;
final = direct_blocks;
@@ -157,7 +158,8 @@ static int ext2_block_to_path(struct inode *inode,
offsets[n++] = i_block & (ptrs - 1);
final = ptrs;
} else {
- ext2_warning (inode->i_sb, "ext2_block_to_path", "block > big");
+ ext2_msg(inode->i_sb, KERN_WARNING,
+ "warning: %s: block is too big", __func__);
}
if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1));
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 1a9ffee47d5..f9cb54a585c 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -58,27 +58,27 @@ void ext2_error (struct super_block * sb, const char * function,
}
va_start(args, fmt);
- printk(KERN_CRIT "EXT2-fs error (device %s): %s: ",sb->s_id, function);
+ printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function);
vprintk(fmt, args);
printk("\n");
va_end(args);
if (test_opt(sb, ERRORS_PANIC))
- panic("EXT2-fs panic from previous error\n");
+ panic("EXT2-fs: panic from previous error\n");
if (test_opt(sb, ERRORS_RO)) {
- printk("Remounting filesystem read-only\n");
+ ext2_msg(sb, KERN_CRIT,
+ "error: remounting filesystem read-only");
sb->s_flags |= MS_RDONLY;
}
}
-void ext2_warning (struct super_block * sb, const char * function,
- const char * fmt, ...)
+void ext2_msg(struct super_block *sb, const char *prefix,
+ const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
- printk(KERN_WARNING "EXT2-fs warning (device %s): %s: ",
- sb->s_id, function);
+ printk("%sEXT2-fs (%s): ", prefix, sb->s_id);
vprintk(fmt, args);
printk("\n");
va_end(args);
@@ -91,9 +91,9 @@ void ext2_update_dynamic_rev(struct super_block *sb)
if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
return;
- ext2_warning(sb, __func__,
- "updating to rev %d because of new feature flag, "
- "running e2fsck is recommended",
+ ext2_msg(sb, KERN_WARNING,
+ "warning: updating to rev %d because of "
+ "new feature flag, running e2fsck is recommended",
EXT2_DYNAMIC_REV);
es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO);
@@ -419,10 +419,10 @@ static const match_table_t tokens = {
{Opt_err, NULL}
};
-static int parse_options (char * options,
- struct ext2_sb_info *sbi)
+static int parse_options(char *options, struct super_block *sb)
{
- char * p;
+ char *p;
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
substring_t args[MAX_OPT_ARGS];
int option;
@@ -505,7 +505,8 @@ static int parse_options (char * options,
#else
case Opt_user_xattr:
case Opt_nouser_xattr:
- printk("EXT2 (no)user_xattr options not supported\n");
+ ext2_msg(sb, KERN_INFO, "(no)user_xattr options"
+ "not supported");
break;
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
@@ -518,14 +519,15 @@ static int parse_options (char * options,
#else
case Opt_acl:
case Opt_noacl:
- printk("EXT2 (no)acl options not supported\n");
+ ext2_msg(sb, KERN_INFO,
+ "(no)acl options not supported");
break;
#endif
case Opt_xip:
#ifdef CONFIG_EXT2_FS_XIP
set_opt (sbi->s_mount_opt, XIP);
#else
- printk("EXT2 xip option not supported\n");
+ ext2_msg(sb, KERN_INFO, "xip option not supported");
#endif
break;
@@ -542,19 +544,18 @@ static int parse_options (char * options,
case Opt_quota:
case Opt_usrquota:
case Opt_grpquota:
- printk(KERN_ERR
- "EXT2-fs: quota operations not supported.\n");
-
+ ext2_msg(sb, KERN_INFO,
+ "quota operations not supported");
break;
#endif
case Opt_reservation:
set_opt(sbi->s_mount_opt, RESERVATION);
- printk("reservations ON\n");
+ ext2_msg(sb, KERN_INFO, "reservations ON");
break;
case Opt_noreservation:
clear_opt(sbi->s_mount_opt, RESERVATION);
- printk("reservations OFF\n");
+ ext2_msg(sb, KERN_INFO, "reservations OFF");
break;
case Opt_ignore:
break;
@@ -573,34 +574,40 @@ static int ext2_setup_super (struct super_block * sb,
struct ext2_sb_info *sbi = EXT2_SB(sb);
if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) {
- printk ("EXT2-fs warning: revision level too high, "
- "forcing read-only mode\n");
+ ext2_msg(sb, KERN_ERR,
+ "error: revision level too high, "
+ "forcing read-only mode");
res = MS_RDONLY;
}
if (read_only)
return res;
if (!(sbi->s_mount_state & EXT2_VALID_FS))
- printk ("EXT2-fs warning: mounting unchecked fs, "
- "running e2fsck is recommended\n");
+ ext2_msg(sb, KERN_WARNING,
+ "warning: mounting unchecked fs, "
+ "running e2fsck is recommended");
else if ((sbi->s_mount_state & EXT2_ERROR_FS))
- printk ("EXT2-fs warning: mounting fs with errors, "
- "running e2fsck is recommended\n");
+ ext2_msg(sb, KERN_WARNING,
+ "warning: mounting fs with errors, "
+ "running e2fsck is recommended");
else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
le16_to_cpu(es->s_mnt_count) >=
(unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
- printk ("EXT2-fs warning: maximal mount count reached, "
- "running e2fsck is recommended\n");
+ ext2_msg(sb, KERN_WARNING,
+ "warning: maximal mount count reached, "
+ "running e2fsck is recommended");
else if (le32_to_cpu(es->s_checkinterval) &&
- (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds()))
- printk ("EXT2-fs warning: checktime reached, "
- "running e2fsck is recommended\n");
+ (le32_to_cpu(es->s_lastcheck) +
+ le32_to_cpu(es->s_checkinterval) <= get_seconds()))
+ ext2_msg(sb, KERN_WARNING,
+ "warning: checktime reached, "
+ "running e2fsck is recommended");
if (!le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1);
ext2_write_super(sb);
if (test_opt (sb, DEBUG))
- printk ("[EXT II FS %s, %s, bs=%lu, fs=%lu, gc=%lu, "
- "bpg=%lu, ipg=%lu, mo=%04lx]\n",
+ ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
+ "bpg=%lu, ipg=%lu, mo=%04lx]",
EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
sbi->s_frag_size,
sbi->s_groups_count,
@@ -767,7 +774,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
*/
blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
if (!blocksize) {
- printk ("EXT2-fs: unable to set blocksize\n");
+ ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
goto failed_sbi;
}
@@ -783,7 +790,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
}
if (!(bh = sb_bread(sb, logic_sb_block))) {
- printk ("EXT2-fs: unable to read superblock\n");
+ ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
goto failed_sbi;
}
/*
@@ -826,7 +833,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
set_opt(sbi->s_mount_opt, RESERVATION);
- if (!parse_options ((char *) data, sbi))
+ if (!parse_options((char *) data, sb))
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -840,8 +847,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
(EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
- printk("EXT2-fs warning: feature flags set on rev 0 fs, "
- "running e2fsck is recommended\n");
+ ext2_msg(sb, KERN_WARNING,
+ "warning: feature flags set on rev 0 fs, "
+ "running e2fsck is recommended");
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
@@ -849,16 +857,16 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
*/
features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
if (features) {
- printk("EXT2-fs: %s: couldn't mount because of "
- "unsupported optional features (%x).\n",
- sb->s_id, le32_to_cpu(features));
+ ext2_msg(sb, KERN_ERR, "error: couldn't mount because of "
+ "unsupported optional features (%x)",
+ le32_to_cpu(features));
goto failed_mount;
}
if (!(sb->s_flags & MS_RDONLY) &&
(features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
- printk("EXT2-fs: %s: couldn't mount RDWR because of "
- "unsupported optional features (%x).\n",
- sb->s_id, le32_to_cpu(features));
+ ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
+ "unsupported optional features (%x)",
+ le32_to_cpu(features));
goto failed_mount;
}
@@ -866,7 +874,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
if (!silent)
- printk("XIP: Unsupported blocksize\n");
+ ext2_msg(sb, KERN_ERR,
+ "error: unsupported blocksize for xip");
goto failed_mount;
}
@@ -875,7 +884,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
brelse(bh);
if (!sb_set_blocksize(sb, blocksize)) {
- printk(KERN_ERR "EXT2-fs: blocksize too small for device.\n");
+ ext2_msg(sb, KERN_ERR, "error: blocksize is too small");
goto failed_sbi;
}
@@ -883,14 +892,14 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
offset = (sb_block*BLOCK_SIZE) % blocksize;
bh = sb_bread(sb, logic_sb_block);
if(!bh) {
- printk("EXT2-fs: Couldn't read superblock on "
- "2nd try.\n");
+ ext2_msg(sb, KERN_ERR, "error: couldn't read"
+ "superblock on 2nd try");
goto failed_sbi;
}
es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
- printk ("EXT2-fs: Magic mismatch, very weird !\n");
+ ext2_msg(sb, KERN_ERR, "error: magic mismatch");
goto failed_mount;
}
}
@@ -906,7 +915,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
!is_power_of_2(sbi->s_inode_size) ||
(sbi->s_inode_size > blocksize)) {
- printk ("EXT2-fs: unsupported inode size: %d\n",
+ ext2_msg(sb, KERN_ERR,
+ "error: unsupported inode size: %d",
sbi->s_inode_size);
goto failed_mount;
}
@@ -943,29 +953,33 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_blocksize != bh->b_size) {
if (!silent)
- printk ("VFS: Unsupported blocksize on dev "
- "%s.\n", sb->s_id);
+ ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
goto failed_mount;
}
if (sb->s_blocksize != sbi->s_frag_size) {
- printk ("EXT2-fs: fragsize %lu != blocksize %lu (not supported yet)\n",
+ ext2_msg(sb, KERN_ERR,
+ "error: fragsize %lu != blocksize %lu"
+ "(not supported yet)",
sbi->s_frag_size, sb->s_blocksize);
goto failed_mount;
}
if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
- printk ("EXT2-fs: #blocks per group too big: %lu\n",
+ ext2_msg(sb, KERN_ERR,
+ "error: #blocks per group too big: %lu",
sbi->s_blocks_per_group);
goto failed_mount;
}
if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
- printk ("EXT2-fs: #fragments per group too big: %lu\n",
+ ext2_msg(sb, KERN_ERR,
+ "error: #fragments per group too big: %lu",
sbi->s_frags_per_group);
goto failed_mount;
}
if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
- printk ("EXT2-fs: #inodes per group too big: %lu\n",
+ ext2_msg(sb, KERN_ERR,
+ "error: #inodes per group too big: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
@@ -979,13 +993,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
- printk ("EXT2-fs: not enough memory\n");
+ ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount;
}
bgl_lock_init(sbi->s_blockgroup_lock);
sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
if (!sbi->s_debts) {
- printk ("EXT2-fs: not enough memory\n");
+ ext2_msg(sb, KERN_ERR, "error: not enough memory");
goto failed_mount_group_desc;
}
for (i = 0; i < db_count; i++) {
@@ -994,12 +1008,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi->s_group_desc[i]) {
for (j = 0; j < i; j++)
brelse (sbi->s_group_desc[j]);
- printk ("EXT2-fs: unable to read group descriptors\n");
+ ext2_msg(sb, KERN_ERR,
+ "error: unable to read group descriptors");
goto failed_mount_group_desc;
}
}
if (!ext2_check_descriptors (sb)) {
- printk ("EXT2-fs: group descriptors corrupted!\n");
+ ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
@@ -1032,7 +1047,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
ext2_count_dirs(sb));
}
if (err) {
- printk(KERN_ERR "EXT2-fs: insufficient memory\n");
+ ext2_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
/*
@@ -1048,27 +1063,28 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
- printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
+ ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
goto failed_mount3;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
iput(root);
- printk(KERN_ERR "EXT2-fs: get root inode failed\n");
+ ext2_msg(sb, KERN_ERR, "error: get root inode failed");
ret = -ENOMEM;
goto failed_mount3;
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
- ext2_warning(sb, __func__,
- "mounting ext3 filesystem as ext2");
+ ext2_msg(sb, KERN_WARNING,
+ "warning: mounting ext3 filesystem as ext2");
ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
return 0;
cantfind_ext2:
if (!silent)
- printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
- sb->s_id);
+ ext2_msg(sb, KERN_ERR,
+ "error: can't find an ext2 filesystem on dev %s.",
+ sb->s_id);
goto failed_mount;
failed_mount3:
percpu_counter_destroy(&sbi->s_freeblocks_counter);
@@ -1089,9 +1105,30 @@ failed_sbi:
return ret;
}
+static void ext2_clear_super_error(struct super_block *sb)
+{
+ struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
+
+ if (buffer_write_io_error(sbh)) {
+ /*
+ * Oh, dear. A previous attempt to write the
+ * superblock failed. This could happen because the
+ * USB device was yanked out. Or it could happen to
+ * be a transient write error and maybe the block will
+ * be remapped. Nothing we can do but to retry the
+ * write and hope for the best.
+ */
+ printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
+ "superblock detected", sb->s_id);
+ clear_buffer_write_io_error(sbh);
+ set_buffer_uptodate(sbh);
+ }
+}
+
static void ext2_commit_super (struct super_block * sb,
struct ext2_super_block * es)
{
+ ext2_clear_super_error(sb);
es->s_wtime = cpu_to_le32(get_seconds());
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
@@ -1099,6 +1136,7 @@ static void ext2_commit_super (struct super_block * sb,
static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
{
+ ext2_clear_super_error(sb);
es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
es->s_wtime = cpu_to_le32(get_seconds());
@@ -1121,8 +1159,24 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
static int ext2_sync_fs(struct super_block *sb, int wait)
{
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
+ struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
lock_kernel();
+ if (buffer_write_io_error(sbh)) {
+ /*
+ * Oh, dear. A previous attempt to write the
+ * superblock failed. This could happen because the
+ * USB device was yanked out. Or it could happen to
+ * be a transient write error and maybe the block will
+ * be remapped. Nothing we can do but to retry the
+ * write and hope for the best.
+ */
+ ext2_msg(sb, KERN_ERR,
+ "previous I/O error to superblock detected\n");
+ clear_buffer_write_io_error(sbh);
+ set_buffer_uptodate(sbh);
+ }
+
if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
ext2_debug("setting valid to 0\n");
es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
@@ -1170,7 +1224,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
/*
* Allow the "check" option to be passed as a remount option.
*/
- if (!parse_options (data, sbi)) {
+ if (!parse_options(data, sb)) {
err = -EINVAL;
goto restore_opts;
}
@@ -1182,7 +1236,8 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
EXT2_MOUNT_XIP if not */
if ((ext2_use_xip(sb)) && (sb->s_blocksize != PAGE_SIZE)) {
- printk("XIP: Unsupported blocksize\n");
+ ext2_msg(sb, KERN_WARNING,
+ "warning: unsupported blocksize for xip");
err = -EINVAL;
goto restore_opts;
}
@@ -1191,8 +1246,8 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
(old_mount_opt & EXT2_MOUNT_XIP)) &&
invalidate_inodes(sb)) {
- ext2_warning(sb, __func__, "refusing change of xip flag "
- "with busy inodes while remounting");
+ ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
+ "xip flag with busy inodes while remounting");
sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
}
@@ -1216,9 +1271,10 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
__le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
~EXT2_FEATURE_RO_COMPAT_SUPP);
if (ret) {
- printk("EXT2-fs: %s: couldn't remount RDWR because of "
- "unsupported optional features (%x).\n",
- sb->s_id, le32_to_cpu(ret));
+ ext2_msg(sb, KERN_WARNING,
+ "warning: couldn't remount RDWR because of "
+ "unsupported optional features (%x).",
+ le32_to_cpu(ret));
err = -EROFS;
goto restore_opts;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 7913531ec6d..904f00642f8 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -60,6 +60,7 @@
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
+#include <linux/security.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@@ -249,8 +250,9 @@ cleanup:
* used / required on success.
*/
static int
-ext2_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
+ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
+ struct inode *inode = dentry->d_inode;
struct buffer_head *bh = NULL;
struct ext2_xattr_entry *entry;
char *end;
@@ -300,9 +302,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
ext2_xattr_handler(entry->e_name_index);
if (handler) {
- size_t size = handler->list(inode, buffer, rest,
+ size_t size = handler->list(dentry, buffer, rest,
entry->e_name,
- entry->e_name_len);
+ entry->e_name_len,
+ handler->flags);
if (buffer) {
if (size > rest) {
error = -ERANGE;
@@ -330,7 +333,7 @@ cleanup:
ssize_t
ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- return ext2_xattr_list(dentry->d_inode, buffer, size);
+ return ext2_xattr_list(dentry, buffer, size);
}
/*
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 70c0dbdcdcb..c8155845ac0 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -11,8 +11,8 @@
#include "xattr.h"
static size_t
-ext2_xattr_security_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext2_xattr_security_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const int prefix_len = XATTR_SECURITY_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
@@ -26,22 +26,22 @@ ext2_xattr_security_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext2_xattr_security_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext2_xattr_security_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext2_xattr_get(inode, EXT2_XATTR_INDEX_SECURITY, name,
+ return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_SECURITY, name,
buffer, size);
}
static int
-ext2_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext2_xattr_security_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY, name,
+ return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_SECURITY, name,
value, size, flags);
}
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index e8219f8eae9..2a26d71f477 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -13,8 +13,8 @@
#include "xattr.h"
static size_t
-ext2_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext2_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const int prefix_len = XATTR_TRUSTED_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
@@ -31,22 +31,22 @@ ext2_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext2_xattr_trusted_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext2_xattr_trusted_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext2_xattr_get(inode, EXT2_XATTR_INDEX_TRUSTED, name,
+ return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_TRUSTED, name,
buffer, size);
}
static int
-ext2_xattr_trusted_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext2_xattr_trusted_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext2_xattr_set(inode, EXT2_XATTR_INDEX_TRUSTED, name,
+ return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_TRUSTED, name,
value, size, flags);
}
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index 92495d28c62..3f6caf3684b 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -12,13 +12,13 @@
#include "xattr.h"
static size_t
-ext2_xattr_user_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext2_xattr_user_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t prefix_len = XATTR_USER_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return 0;
if (list && total_len <= list_size) {
@@ -30,27 +30,28 @@ ext2_xattr_user_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext2_xattr_user_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext2_xattr_user_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext2_xattr_get(inode, EXT2_XATTR_INDEX_USER, name, buffer, size);
+ return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_USER,
+ name, buffer, size);
}
static int
-ext2_xattr_user_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext2_xattr_user_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext2_xattr_set(inode, EXT2_XATTR_INDEX_USER, name,
- value, size, flags);
+ return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_USER,
+ name, value, size, flags);
}
struct xattr_handler ext2_xattr_user_handler = {
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index c18fbf3e406..322a56b2dfb 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -69,8 +69,9 @@ void ext2_xip_verify_sb(struct super_block *sb)
if ((sbi->s_mount_opt & EXT2_MOUNT_XIP) &&
!sb->s_bdev->bd_disk->fops->direct_access) {
sbi->s_mount_opt &= (~EXT2_MOUNT_XIP);
- ext2_warning(sb, __func__,
- "ignoring xip option - not supported by bdev");
+ ext2_msg(sb, KERN_WARNING,
+ "warning: ignoring xip option - "
+ "not supported by bdev");
}
}
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index c9b0df376b5..82ba3415866 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -366,12 +366,12 @@ out:
* Extended attribute handlers
*/
static size_t
-ext3_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len,
- const char *name, size_t name_len)
+ext3_xattr_list_acl_access(struct dentry *dentry, char *list, size_t list_len,
+ const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_len)
memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
@@ -379,12 +379,12 @@ ext3_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len,
}
static size_t
-ext3_xattr_list_acl_default(struct inode *inode, char *list, size_t list_len,
- const char *name, size_t name_len)
+ext3_xattr_list_acl_default(struct dentry *dentry, char *list, size_t list_len,
+ const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_len)
memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
@@ -392,15 +392,18 @@ ext3_xattr_list_acl_default(struct inode *inode, char *list, size_t list_len,
}
static int
-ext3_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+ext3_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int type)
{
struct posix_acl *acl;
int error;
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return -EOPNOTSUPP;
- acl = ext3_get_acl(inode, type);
+ acl = ext3_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
@@ -412,31 +415,16 @@ ext3_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
}
static int
-ext3_xattr_get_acl_access(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext3_xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
-}
-
-static int
-ext3_xattr_get_acl_default(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext3_xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
-}
-
-static int
-ext3_xattr_set_acl(struct inode *inode, int type, const void *value,
- size_t size)
+ext3_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags, int type)
{
+ struct inode *inode = dentry->d_inode;
handle_t *handle;
struct posix_acl *acl;
int error, retries = 0;
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
if (!is_owner_or_cap(inode))
@@ -468,34 +456,18 @@ release_and_out:
return error;
}
-static int
-ext3_xattr_set_acl_access(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext3_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
-}
-
-static int
-ext3_xattr_set_acl_default(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext3_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
-}
-
struct xattr_handler ext3_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
.list = ext3_xattr_list_acl_access,
- .get = ext3_xattr_get_acl_access,
- .set = ext3_xattr_set_acl_access,
+ .get = ext3_xattr_get_acl,
+ .set = ext3_xattr_set_acl,
};
struct xattr_handler ext3_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
.list = ext3_xattr_list_acl_default,
- .get = ext3_xattr_get_acl_default,
- .set = ext3_xattr_set_acl_default,
+ .get = ext3_xattr_get_acl,
+ .set = ext3_xattr_set_acl,
};
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 2db95777890..ad14227f509 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1151,6 +1151,16 @@ static int do_journal_get_write_access(handle_t *handle,
return ext3_journal_get_write_access(handle, bh);
}
+/*
+ * Truncate blocks that were not used by write. We have to truncate the
+ * pagecache as well so that corresponding buffers get properly unmapped.
+ */
+static void ext3_truncate_failed_write(struct inode *inode)
+{
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
+ ext3_truncate(inode);
+}
+
static int ext3_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -1209,7 +1219,7 @@ write_begin_failed:
unlock_page(page);
page_cache_release(page);
if (pos + len > inode->i_size)
- ext3_truncate(inode);
+ ext3_truncate_failed_write(inode);
}
if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
goto retry;
@@ -1304,7 +1314,7 @@ static int ext3_ordered_write_end(struct file *file,
page_cache_release(page);
if (pos + len > inode->i_size)
- ext3_truncate(inode);
+ ext3_truncate_failed_write(inode);
return ret ? ret : copied;
}
@@ -1330,7 +1340,7 @@ static int ext3_writeback_write_end(struct file *file,
page_cache_release(page);
if (pos + len > inode->i_size)
- ext3_truncate(inode);
+ ext3_truncate_failed_write(inode);
return ret ? ret : copied;
}
@@ -1383,7 +1393,7 @@ static int ext3_journalled_write_end(struct file *file,
page_cache_release(page);
if (pos + len > inode->i_size)
- ext3_truncate(inode);
+ ext3_truncate_failed_write(inode);
return ret ? ret : copied;
}
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 8359e7b3dc8..5f83b617917 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -266,7 +266,7 @@ static int setup_new_group_blocks(struct super_block *sb,
goto exit_bh;
if (IS_ERR(gdb = bclean(handle, sb, block))) {
- err = PTR_ERR(bh);
+ err = PTR_ERR(gdb);
goto exit_bh;
}
ext3_journal_dirty_metadata(handle, gdb);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 427496c4767..7ad1e8c30bd 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -135,12 +135,24 @@ void ext3_journal_abort_handle(const char *caller, const char *err_fn,
if (is_handle_aborted(handle))
return;
- printk(KERN_ERR "%s: aborting transaction: %s in %s\n",
- caller, errstr, err_fn);
+ printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n",
+ caller, errstr, err_fn);
journal_abort_handle(handle);
}
+void ext3_msg(struct super_block *sb, const char *prefix,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ printk("%sEXT3-fs (%s): ", prefix, sb->s_id);
+ vprintk(fmt, args);
+ printk("\n");
+ va_end(args);
+}
+
/* Deal with the reporting of failure conditions on a filesystem such as
* inconsistencies detected or read IO failures.
*
@@ -174,12 +186,13 @@ static void ext3_handle_error(struct super_block *sb)
journal_abort(journal, -EIO);
}
if (test_opt (sb, ERRORS_RO)) {
- printk (KERN_CRIT "Remounting filesystem read-only\n");
+ ext3_msg(sb, KERN_CRIT,
+ "error: remounting filesystem read-only");
sb->s_flags |= MS_RDONLY;
}
ext3_commit_super(sb, es, 1);
if (test_opt(sb, ERRORS_PANIC))
- panic("EXT3-fs (device %s): panic forced after error\n",
+ panic("EXT3-fs (%s): panic forced after error\n",
sb->s_id);
}
@@ -247,8 +260,7 @@ void __ext3_std_error (struct super_block * sb, const char * function,
return;
errstr = ext3_decode_error(sb, errno, nbuf);
- printk (KERN_CRIT "EXT3-fs error (device %s) in %s: %s\n",
- sb->s_id, function, errstr);
+ ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr);
ext3_handle_error(sb);
}
@@ -268,21 +280,20 @@ void ext3_abort (struct super_block * sb, const char * function,
{
va_list args;
- printk (KERN_CRIT "ext3_abort called.\n");
-
va_start(args, fmt);
- printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function);
+ printk(KERN_CRIT "EXT3-fs (%s): error: %s: ", sb->s_id, function);
vprintk(fmt, args);
printk("\n");
va_end(args);
if (test_opt(sb, ERRORS_PANIC))
- panic("EXT3-fs panic from previous error\n");
+ panic("EXT3-fs: panic from previous error\n");
if (sb->s_flags & MS_RDONLY)
return;
- printk(KERN_CRIT "Remounting filesystem read-only\n");
+ ext3_msg(sb, KERN_CRIT,
+ "error: remounting filesystem read-only");
EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
sb->s_flags |= MS_RDONLY;
EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
@@ -296,7 +307,7 @@ void ext3_warning (struct super_block * sb, const char * function,
va_list args;
va_start(args, fmt);
- printk(KERN_WARNING "EXT3-fs warning (device %s): %s: ",
+ printk(KERN_WARNING "EXT3-fs (%s): warning: %s: ",
sb->s_id, function);
vprintk(fmt, args);
printk("\n");
@@ -310,10 +321,10 @@ void ext3_update_dynamic_rev(struct super_block *sb)
if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV)
return;
- ext3_warning(sb, __func__,
- "updating to rev %d because of new feature flag, "
- "running e2fsck is recommended",
- EXT3_DYNAMIC_REV);
+ ext3_msg(sb, KERN_WARNING,
+ "warning: updating to rev %d because of "
+ "new feature flag, running e2fsck is recommended",
+ EXT3_DYNAMIC_REV);
es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO);
es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE);
@@ -331,7 +342,7 @@ void ext3_update_dynamic_rev(struct super_block *sb)
/*
* Open the external journal device
*/
-static struct block_device *ext3_blkdev_get(dev_t dev)
+static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb)
{
struct block_device *bdev;
char b[BDEVNAME_SIZE];
@@ -342,8 +353,9 @@ static struct block_device *ext3_blkdev_get(dev_t dev)
return bdev;
fail:
- printk(KERN_ERR "EXT3: failed to open journal device %s: %ld\n",
- __bdevname(dev, b), PTR_ERR(bdev));
+ ext3_msg(sb, "error: failed to open journal device %s: %ld",
+ __bdevname(dev, b), PTR_ERR(bdev));
+
return NULL;
}
@@ -378,13 +390,13 @@ static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
{
struct list_head *l;
- printk(KERN_ERR "sb orphan head is %d\n",
+ ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d",
le32_to_cpu(sbi->s_es->s_last_orphan));
- printk(KERN_ERR "sb_info orphan list:\n");
+ ext3_msg(sb, KERN_ERR, "sb_info orphan list:");
list_for_each(l, &sbi->s_orphan) {
struct inode *inode = orphan_list_entry(l);
- printk(KERN_ERR " "
+ ext3_msg(sb, KERN_ERR, " "
"inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
inode->i_sb->s_id, inode->i_ino, inode,
inode->i_mode, inode->i_nlink,
@@ -527,9 +539,22 @@ static inline void ext3_show_quota_options(struct seq_file *seq, struct super_bl
#if defined(CONFIG_QUOTA)
struct ext3_sb_info *sbi = EXT3_SB(sb);
- if (sbi->s_jquota_fmt)
- seq_printf(seq, ",jqfmt=%s",
- (sbi->s_jquota_fmt == QFMT_VFS_OLD) ? "vfsold": "vfsv0");
+ if (sbi->s_jquota_fmt) {
+ char *fmtname = "";
+
+ switch (sbi->s_jquota_fmt) {
+ case QFMT_VFS_OLD:
+ fmtname = "vfsold";
+ break;
+ case QFMT_VFS_V0:
+ fmtname = "vfsv0";
+ break;
+ case QFMT_VFS_V1:
+ fmtname = "vfsv1";
+ break;
+ }
+ seq_printf(seq, ",jqfmt=%s", fmtname);
+ }
if (sbi->s_qf_names[USRQUOTA])
seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
@@ -636,6 +661,9 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (test_opt(sb, DATA_ERR_ABORT))
seq_puts(seq, ",data_err=abort");
+ if (test_opt(sb, NOLOAD))
+ seq_puts(seq, ",norecovery");
+
ext3_show_quota_options(seq, sb);
return 0;
@@ -787,9 +815,9 @@ enum {
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
- Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_grpquota
+ Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
+ Opt_noquota, Opt_ignore, Opt_barrier, Opt_err, Opt_resize,
+ Opt_usrquota, Opt_grpquota
};
static const match_table_t tokens = {
@@ -818,6 +846,7 @@ static const match_table_t tokens = {
{Opt_reservation, "reservation"},
{Opt_noreservation, "noreservation"},
{Opt_noload, "noload"},
+ {Opt_noload, "norecovery"},
{Opt_nobh, "nobh"},
{Opt_bh, "bh"},
{Opt_commit, "commit=%u"},
@@ -836,6 +865,7 @@ static const match_table_t tokens = {
{Opt_grpjquota, "grpjquota=%s"},
{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
+ {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
{Opt_grpquota, "grpquota"},
{Opt_noquota, "noquota"},
{Opt_quota, "quota"},
@@ -845,7 +875,7 @@ static const match_table_t tokens = {
{Opt_err, NULL},
};
-static ext3_fsblk_t get_sb_block(void **data)
+static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
{
ext3_fsblk_t sb_block;
char *options = (char *) *data;
@@ -856,7 +886,7 @@ static ext3_fsblk_t get_sb_block(void **data)
/*todo: use simple_strtoll with >32bit ext3 */
sb_block = simple_strtoul(options, &options, 0);
if (*options && *options != ',') {
- printk("EXT3-fs: Invalid sb specification: %s\n",
+ ext3_msg(sb, "error: invalid sb specification: %s",
(char *) *data);
return 1;
}
@@ -956,7 +986,8 @@ static int parse_options (char *options, struct super_block *sb,
#else
case Opt_user_xattr:
case Opt_nouser_xattr:
- printk("EXT3 (no)user_xattr options not supported\n");
+ ext3_msg(sb, KERN_INFO,
+ "(no)user_xattr options not supported");
break;
#endif
#ifdef CONFIG_EXT3_FS_POSIX_ACL
@@ -969,7 +1000,8 @@ static int parse_options (char *options, struct super_block *sb,
#else
case Opt_acl:
case Opt_noacl:
- printk("EXT3 (no)acl options not supported\n");
+ ext3_msg(sb, KERN_INFO,
+ "(no)acl options not supported");
break;
#endif
case Opt_reservation:
@@ -985,16 +1017,16 @@ static int parse_options (char *options, struct super_block *sb,
user to specify an existing inode to be the
journal file. */
if (is_remount) {
- printk(KERN_ERR "EXT3-fs: cannot specify "
- "journal on remount\n");
+ ext3_msg(sb, KERN_ERR, "error: cannot specify "
+ "journal on remount");
return 0;
}
set_opt (sbi->s_mount_opt, UPDATE_JOURNAL);
break;
case Opt_journal_inum:
if (is_remount) {
- printk(KERN_ERR "EXT3-fs: cannot specify "
- "journal on remount\n");
+ ext3_msg(sb, KERN_ERR, "error: cannot specify "
+ "journal on remount");
return 0;
}
if (match_int(&args[0], &option))
@@ -1003,8 +1035,8 @@ static int parse_options (char *options, struct super_block *sb,
break;
case Opt_journal_dev:
if (is_remount) {
- printk(KERN_ERR "EXT3-fs: cannot specify "
- "journal on remount\n");
+ ext3_msg(sb, KERN_ERR, "error: cannot specify "
+ "journal on remount");
return 0;
}
if (match_int(&args[0], &option))
@@ -1036,12 +1068,11 @@ static int parse_options (char *options, struct super_block *sb,
if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS)
== data_opt)
break;
- printk(KERN_ERR
- "EXT3-fs (device %s): Cannot change "
+ ext3_msg(sb, KERN_ERR,
+ "error: cannot change "
"data mode on remount. The filesystem "
"is mounted in data=%s mode and you "
- "try to remount it in data=%s mode.\n",
- sb->s_id,
+ "try to remount it in data=%s mode.",
data_mode_string(sbi->s_mount_opt &
EXT3_MOUNT_DATA_FLAGS),
data_mode_string(data_opt));
@@ -1066,31 +1097,31 @@ static int parse_options (char *options, struct super_block *sb,
set_qf_name:
if (sb_any_quota_loaded(sb) &&
!sbi->s_qf_names[qtype]) {
- printk(KERN_ERR
- "EXT3-fs: Cannot change journaled "
- "quota options when quota turned on.\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: cannot change journaled "
+ "quota options when quota turned on.");
return 0;
}
qname = match_strdup(&args[0]);
if (!qname) {
- printk(KERN_ERR
- "EXT3-fs: not enough memory for "
- "storing quotafile name.\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: not enough memory for "
+ "storing quotafile name.");
return 0;
}
if (sbi->s_qf_names[qtype] &&
strcmp(sbi->s_qf_names[qtype], qname)) {
- printk(KERN_ERR
- "EXT3-fs: %s quota file already "
- "specified.\n", QTYPE2NAME(qtype));
+ ext3_msg(sb, KERN_ERR,
+ "error: %s quota file already "
+ "specified.", QTYPE2NAME(qtype));
kfree(qname);
return 0;
}
sbi->s_qf_names[qtype] = qname;
if (strchr(sbi->s_qf_names[qtype], '/')) {
- printk(KERN_ERR
- "EXT3-fs: quotafile must be on "
- "filesystem root.\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: quotafile must be on "
+ "filesystem root.");
kfree(sbi->s_qf_names[qtype]);
sbi->s_qf_names[qtype] = NULL;
return 0;
@@ -1105,9 +1136,9 @@ set_qf_name:
clear_qf_name:
if (sb_any_quota_loaded(sb) &&
sbi->s_qf_names[qtype]) {
- printk(KERN_ERR "EXT3-fs: Cannot change "
+ ext3_msg(sb, KERN_ERR, "error: cannot change "
"journaled quota options when "
- "quota turned on.\n");
+ "quota turned on.");
return 0;
}
/*
@@ -1121,12 +1152,15 @@ clear_qf_name:
goto set_qf_format;
case Opt_jqfmt_vfsv0:
qfmt = QFMT_VFS_V0;
+ goto set_qf_format;
+ case Opt_jqfmt_vfsv1:
+ qfmt = QFMT_VFS_V1;
set_qf_format:
if (sb_any_quota_loaded(sb) &&
sbi->s_jquota_fmt != qfmt) {
- printk(KERN_ERR "EXT3-fs: Cannot change "
+ ext3_msg(sb, KERN_ERR, "error: cannot change "
"journaled quota options when "
- "quota turned on.\n");
+ "quota turned on.");
return 0;
}
sbi->s_jquota_fmt = qfmt;
@@ -1142,8 +1176,8 @@ set_qf_format:
break;
case Opt_noquota:
if (sb_any_quota_loaded(sb)) {
- printk(KERN_ERR "EXT3-fs: Cannot change quota "
- "options when quota turned on.\n");
+ ext3_msg(sb, KERN_ERR, "error: cannot change "
+ "quota options when quota turned on.");
return 0;
}
clear_opt(sbi->s_mount_opt, QUOTA);
@@ -1154,8 +1188,8 @@ set_qf_format:
case Opt_quota:
case Opt_usrquota:
case Opt_grpquota:
- printk(KERN_ERR
- "EXT3-fs: quota options not supported.\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: quota options not supported.");
break;
case Opt_usrjquota:
case Opt_grpjquota:
@@ -1163,9 +1197,10 @@ set_qf_format:
case Opt_offgrpjquota:
case Opt_jqfmt_vfsold:
case Opt_jqfmt_vfsv0:
- printk(KERN_ERR
- "EXT3-fs: journaled quota options not "
- "supported.\n");
+ case Opt_jqfmt_vfsv1:
+ ext3_msg(sb, KERN_ERR,
+ "error: journaled quota options not "
+ "supported.");
break;
case Opt_noquota:
break;
@@ -1185,8 +1220,9 @@ set_qf_format:
break;
case Opt_resize:
if (!is_remount) {
- printk("EXT3-fs: resize option only available "
- "for remount\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: resize option only available "
+ "for remount");
return 0;
}
if (match_int(&args[0], &option) != 0)
@@ -1200,9 +1236,9 @@ set_qf_format:
clear_opt(sbi->s_mount_opt, NOBH);
break;
default:
- printk (KERN_ERR
- "EXT3-fs: Unrecognized mount option \"%s\" "
- "or missing value\n", p);
+ ext3_msg(sb, KERN_ERR,
+ "error: unrecognized mount option \"%s\" "
+ "or missing value", p);
return 0;
}
}
@@ -1220,21 +1256,21 @@ set_qf_format:
(sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)) ||
(sbi->s_qf_names[GRPQUOTA] &&
(sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA))) {
- printk(KERN_ERR "EXT3-fs: old and new quota "
- "format mixing.\n");
+ ext3_msg(sb, KERN_ERR, "error: old and new quota "
+ "format mixing.");
return 0;
}
if (!sbi->s_jquota_fmt) {
- printk(KERN_ERR "EXT3-fs: journaled quota format "
- "not specified.\n");
+ ext3_msg(sb, KERN_ERR, "error: journaled quota format "
+ "not specified.");
return 0;
}
} else {
if (sbi->s_jquota_fmt) {
- printk(KERN_ERR "EXT3-fs: journaled quota format "
+ ext3_msg(sb, KERN_ERR, "error: journaled quota format "
"specified with no journaling "
- "enabled.\n");
+ "enabled.");
return 0;
}
}
@@ -1249,31 +1285,33 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
int res = 0;
if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) {
- printk (KERN_ERR "EXT3-fs warning: revision level too high, "
- "forcing read-only mode\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: revision level too high, "
+ "forcing read-only mode");
res = MS_RDONLY;
}
if (read_only)
return res;
if (!(sbi->s_mount_state & EXT3_VALID_FS))
- printk (KERN_WARNING "EXT3-fs warning: mounting unchecked fs, "
- "running e2fsck is recommended\n");
+ ext3_msg(sb, KERN_WARNING,
+ "warning: mounting unchecked fs, "
+ "running e2fsck is recommended");
else if ((sbi->s_mount_state & EXT3_ERROR_FS))
- printk (KERN_WARNING
- "EXT3-fs warning: mounting fs with errors, "
- "running e2fsck is recommended\n");
+ ext3_msg(sb, KERN_WARNING,
+ "warning: mounting fs with errors, "
+ "running e2fsck is recommended");
else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
le16_to_cpu(es->s_mnt_count) >=
(unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
- printk (KERN_WARNING
- "EXT3-fs warning: maximal mount count reached, "
- "running e2fsck is recommended\n");
+ ext3_msg(sb, KERN_WARNING,
+ "warning: maximal mount count reached, "
+ "running e2fsck is recommended");
else if (le32_to_cpu(es->s_checkinterval) &&
(le32_to_cpu(es->s_lastcheck) +
le32_to_cpu(es->s_checkinterval) <= get_seconds()))
- printk (KERN_WARNING
- "EXT3-fs warning: checktime reached, "
- "running e2fsck is recommended\n");
+ ext3_msg(sb, KERN_WARNING,
+ "warning: checktime reached, "
+ "running e2fsck is recommended");
#if 0
/* @@@ We _will_ want to clear the valid bit if we find
inconsistencies, to force a fsck at reboot. But for
@@ -1290,22 +1328,20 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
ext3_commit_super(sb, es, 1);
if (test_opt(sb, DEBUG))
- printk(KERN_INFO "[EXT3 FS bs=%lu, gc=%lu, "
- "bpg=%lu, ipg=%lu, mo=%04lx]\n",
+ ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, "
+ "bpg=%lu, ipg=%lu, mo=%04lx]",
sb->s_blocksize,
sbi->s_groups_count,
EXT3_BLOCKS_PER_GROUP(sb),
EXT3_INODES_PER_GROUP(sb),
sbi->s_mount_opt);
- printk(KERN_INFO "EXT3 FS on %s, ", sb->s_id);
if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
char b[BDEVNAME_SIZE];
-
- printk("external journal on %s\n",
+ ext3_msg(sb, KERN_INFO, "using external journal on %s",
bdevname(EXT3_SB(sb)->s_journal->j_dev, b));
} else {
- printk("internal journal\n");
+ ext3_msg(sb, KERN_INFO, "using internal journal");
}
return res;
}
@@ -1399,8 +1435,8 @@ static void ext3_orphan_cleanup (struct super_block * sb,
}
if (bdev_read_only(sb->s_bdev)) {
- printk(KERN_ERR "EXT3-fs: write access "
- "unavailable, skipping orphan cleanup.\n");
+ ext3_msg(sb, KERN_ERR, "error: write access "
+ "unavailable, skipping orphan cleanup.");
return;
}
@@ -1414,8 +1450,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
}
if (s_flags & MS_RDONLY) {
- printk(KERN_INFO "EXT3-fs: %s: orphan cleanup on readonly fs\n",
- sb->s_id);
+ ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
sb->s_flags &= ~MS_RDONLY;
}
#ifdef CONFIG_QUOTA
@@ -1426,9 +1461,9 @@ static void ext3_orphan_cleanup (struct super_block * sb,
if (EXT3_SB(sb)->s_qf_names[i]) {
int ret = ext3_quota_on_mount(sb, i);
if (ret < 0)
- printk(KERN_ERR
- "EXT3-fs: Cannot turn on journaled "
- "quota: error %d\n", ret);
+ ext3_msg(sb, KERN_ERR,
+ "error: cannot turn on journaled "
+ "quota: %d", ret);
}
}
#endif
@@ -1466,11 +1501,11 @@ static void ext3_orphan_cleanup (struct super_block * sb,
#define PLURAL(x) (x), ((x)==1) ? "" : "s"
if (nr_orphans)
- printk(KERN_INFO "EXT3-fs: %s: %d orphan inode%s deleted\n",
- sb->s_id, PLURAL(nr_orphans));
+ ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
+ PLURAL(nr_orphans));
if (nr_truncates)
- printk(KERN_INFO "EXT3-fs: %s: %d truncate%s cleaned up\n",
- sb->s_id, PLURAL(nr_truncates));
+ ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+ PLURAL(nr_truncates));
#ifdef CONFIG_QUOTA
/* Turn quotas off */
for (i = 0; i < MAXQUOTAS; i++) {
@@ -1554,7 +1589,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
struct ext3_super_block *es = NULL;
struct ext3_sb_info *sbi;
ext3_fsblk_t block;
- ext3_fsblk_t sb_block = get_sb_block(&data);
+ ext3_fsblk_t sb_block = get_sb_block(&data, sb);
ext3_fsblk_t logic_sb_block;
unsigned long offset = 0;
unsigned int journal_inum = 0;
@@ -1590,7 +1625,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
if (!blocksize) {
- printk(KERN_ERR "EXT3-fs: unable to set blocksize\n");
+ ext3_msg(sb, KERN_ERR, "error: unable to set blocksize");
goto out_fail;
}
@@ -1606,7 +1641,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
}
if (!(bh = sb_bread(sb, logic_sb_block))) {
- printk (KERN_ERR "EXT3-fs: unable to read superblock\n");
+ ext3_msg(sb, KERN_ERR, "error: unable to read superblock");
goto out_fail;
}
/*
@@ -1665,9 +1700,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
(EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
- printk(KERN_WARNING
- "EXT3-fs warning: feature flags set on rev 0 fs, "
- "running e2fsck is recommended\n");
+ ext3_msg(sb, KERN_WARNING,
+ "warning: feature flags set on rev 0 fs, "
+ "running e2fsck is recommended");
/*
* Check feature flags regardless of the revision level, since we
* previously didn't change the revision level when setting the flags,
@@ -1675,25 +1710,25 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
*/
features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP);
if (features) {
- printk(KERN_ERR "EXT3-fs: %s: couldn't mount because of "
- "unsupported optional features (%x).\n",
- sb->s_id, le32_to_cpu(features));
+ ext3_msg(sb, KERN_ERR,
+ "error: couldn't mount because of unsupported "
+ "optional features (%x)", le32_to_cpu(features));
goto failed_mount;
}
features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP);
if (!(sb->s_flags & MS_RDONLY) && features) {
- printk(KERN_ERR "EXT3-fs: %s: couldn't mount RDWR because of "
- "unsupported optional features (%x).\n",
- sb->s_id, le32_to_cpu(features));
+ ext3_msg(sb, KERN_ERR,
+ "error: couldn't mount RDWR because of unsupported "
+ "optional features (%x)", le32_to_cpu(features));
goto failed_mount;
}
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize < EXT3_MIN_BLOCK_SIZE ||
blocksize > EXT3_MAX_BLOCK_SIZE) {
- printk(KERN_ERR
- "EXT3-fs: Unsupported filesystem blocksize %d on %s.\n",
- blocksize, sb->s_id);
+ ext3_msg(sb, KERN_ERR,
+ "error: couldn't mount because of unsupported "
+ "filesystem blocksize %d", blocksize);
goto failed_mount;
}
@@ -1704,30 +1739,31 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
* than the hardware sectorsize for the machine.
*/
if (blocksize < hblock) {
- printk(KERN_ERR "EXT3-fs: blocksize %d too small for "
- "device blocksize %d.\n", blocksize, hblock);
+ ext3_msg(sb, KERN_ERR,
+ "error: fsblocksize %d too small for "
+ "hardware sectorsize %d", blocksize, hblock);
goto failed_mount;
}
brelse (bh);
if (!sb_set_blocksize(sb, blocksize)) {
- printk(KERN_ERR "EXT3-fs: bad blocksize %d.\n",
- blocksize);
+ ext3_msg(sb, KERN_ERR,
+ "error: bad blocksize %d", blocksize);
goto out_fail;
}
logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
bh = sb_bread(sb, logic_sb_block);
if (!bh) {
- printk(KERN_ERR
- "EXT3-fs: Can't read superblock on 2nd try.\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: can't read superblock on 2nd try");
goto failed_mount;
}
es = (struct ext3_super_block *)(((char *)bh->b_data) + offset);
sbi->s_es = es;
if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
- printk (KERN_ERR
- "EXT3-fs: Magic mismatch, very weird !\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: magic mismatch");
goto failed_mount;
}
}
@@ -1743,8 +1779,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
- printk (KERN_ERR
- "EXT3-fs: unsupported inode size: %d\n",
+ ext3_msg(sb, KERN_ERR,
+ "error: unsupported inode size: %d",
sbi->s_inode_size);
goto failed_mount;
}
@@ -1752,8 +1788,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
le32_to_cpu(es->s_log_frag_size);
if (blocksize != sbi->s_frag_size) {
- printk(KERN_ERR
- "EXT3-fs: fragsize %lu != blocksize %u (unsupported)\n",
+ ext3_msg(sb, KERN_ERR,
+ "error: fragsize %lu != blocksize %u (unsupported)",
sbi->s_frag_size, blocksize);
goto failed_mount;
}
@@ -1789,31 +1825,31 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
}
if (sbi->s_blocks_per_group > blocksize * 8) {
- printk (KERN_ERR
- "EXT3-fs: #blocks per group too big: %lu\n",
+ ext3_msg(sb, KERN_ERR,
+ "#blocks per group too big: %lu",
sbi->s_blocks_per_group);
goto failed_mount;
}
if (sbi->s_frags_per_group > blocksize * 8) {
- printk (KERN_ERR
- "EXT3-fs: #fragments per group too big: %lu\n",
+ ext3_msg(sb, KERN_ERR,
+ "error: #fragments per group too big: %lu",
sbi->s_frags_per_group);
goto failed_mount;
}
if (sbi->s_inodes_per_group > blocksize * 8) {
- printk (KERN_ERR
- "EXT3-fs: #inodes per group too big: %lu\n",
+ ext3_msg(sb, KERN_ERR,
+ "error: #inodes per group too big: %lu",
sbi->s_inodes_per_group);
goto failed_mount;
}
if (le32_to_cpu(es->s_blocks_count) >
(sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
- printk(KERN_ERR "EXT3-fs: filesystem on %s:"
- " too large to mount safely\n", sb->s_id);
+ ext3_msg(sb, KERN_ERR,
+ "error: filesystem is too large to mount safely");
if (sizeof(sector_t) < 8)
- printk(KERN_WARNING "EXT3-fs: CONFIG_LBDAF not "
- "enabled\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: CONFIG_LBDAF not enabled");
goto failed_mount;
}
@@ -1827,7 +1863,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
- printk (KERN_ERR "EXT3-fs: not enough memory\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: not enough memory");
goto failed_mount;
}
@@ -1837,14 +1874,15 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
block = descriptor_loc(sb, logic_sb_block, i);
sbi->s_group_desc[i] = sb_bread(sb, block);
if (!sbi->s_group_desc[i]) {
- printk (KERN_ERR "EXT3-fs: "
- "can't read group descriptor %d\n", i);
+ ext3_msg(sb, KERN_ERR,
+ "error: can't read group descriptor %d", i);
db_count = i;
goto failed_mount2;
}
}
if (!ext3_check_descriptors (sb)) {
- printk(KERN_ERR "EXT3-fs: group descriptors corrupted!\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: group descriptors corrupted");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
@@ -1862,7 +1900,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
ext3_count_dirs(sb));
}
if (err) {
- printk(KERN_ERR "EXT3-fs: insufficient memory\n");
+ ext3_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount3;
}
@@ -1910,9 +1948,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount3;
} else {
if (!silent)
- printk (KERN_ERR
- "ext3: No journal on filesystem on %s\n",
- sb->s_id);
+ ext3_msg(sb, KERN_ERR,
+ "error: no journal found. "
+ "mounting ext3 over ext2?");
goto failed_mount3;
}
@@ -1934,8 +1972,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
case EXT3_MOUNT_WRITEBACK_DATA:
if (!journal_check_available_features
(sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) {
- printk(KERN_ERR "EXT3-fs: Journal does not support "
- "requested data journaling mode\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: journal does not support "
+ "requested data journaling mode");
goto failed_mount4;
}
default:
@@ -1944,8 +1983,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (test_opt(sb, NOBH)) {
if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) {
- printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - "
- "its supported only with writeback mode\n");
+ ext3_msg(sb, KERN_WARNING,
+ "warning: ignoring nobh option - "
+ "it is supported only with writeback mode");
clear_opt(sbi->s_mount_opt, NOBH);
}
}
@@ -1956,18 +1996,18 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
root = ext3_iget(sb, EXT3_ROOT_INO);
if (IS_ERR(root)) {
- printk(KERN_ERR "EXT3-fs: get root inode failed\n");
+ ext3_msg(sb, KERN_ERR, "error: get root inode failed");
ret = PTR_ERR(root);
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
- printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n");
+ ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
goto failed_mount4;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
- printk(KERN_ERR "EXT3-fs: get root dentry failed\n");
+ ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
iput(root);
ret = -ENOMEM;
goto failed_mount4;
@@ -1986,9 +2026,9 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
ext3_orphan_cleanup(sb, es);
EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
if (needs_recovery)
- printk (KERN_INFO "EXT3-fs: recovery complete.\n");
+ ext3_msg(sb, KERN_INFO, "recovery complete");
ext3_mark_recovery_complete(sb, es);
- printk (KERN_INFO "EXT3-fs: mounted filesystem with %s data mode.\n",
+ ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode",
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
"writeback");
@@ -1998,7 +2038,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
cantfind_ext3:
if (!silent)
- printk(KERN_ERR "VFS: Can't find ext3 filesystem on dev %s.\n",
+ ext3_msg(sb, KERN_INFO,
+ "error: can't find ext3 filesystem on dev %s.",
sb->s_id);
goto failed_mount;
@@ -2066,27 +2107,27 @@ static journal_t *ext3_get_journal(struct super_block *sb,
journal_inode = ext3_iget(sb, journal_inum);
if (IS_ERR(journal_inode)) {
- printk(KERN_ERR "EXT3-fs: no journal found.\n");
+ ext3_msg(sb, KERN_ERR, "error: no journal found");
return NULL;
}
if (!journal_inode->i_nlink) {
make_bad_inode(journal_inode);
iput(journal_inode);
- printk(KERN_ERR "EXT3-fs: journal inode is deleted.\n");
+ ext3_msg(sb, KERN_ERR, "error: journal inode is deleted");
return NULL;
}
jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
journal_inode, journal_inode->i_size);
if (!S_ISREG(journal_inode->i_mode)) {
- printk(KERN_ERR "EXT3-fs: invalid journal inode.\n");
+ ext3_msg(sb, KERN_ERR, "error: invalid journal inode");
iput(journal_inode);
return NULL;
}
journal = journal_init_inode(journal_inode);
if (!journal) {
- printk(KERN_ERR "EXT3-fs: Could not load journal inode\n");
+ ext3_msg(sb, KERN_ERR, "error: could not load journal inode");
iput(journal_inode);
return NULL;
}
@@ -2108,13 +2149,13 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
struct ext3_super_block * es;
struct block_device *bdev;
- bdev = ext3_blkdev_get(j_dev);
+ bdev = ext3_blkdev_get(j_dev, sb);
if (bdev == NULL)
return NULL;
if (bd_claim(bdev, sb)) {
- printk(KERN_ERR
- "EXT3: failed to claim external journal device.\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: failed to claim external journal device");
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
return NULL;
}
@@ -2122,8 +2163,8 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
blocksize = sb->s_blocksize;
hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
- printk(KERN_ERR
- "EXT3-fs: blocksize too small for journal device.\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: blocksize too small for journal device");
goto out_bdev;
}
@@ -2131,8 +2172,8 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
offset = EXT3_MIN_BLOCK_SIZE % blocksize;
set_blocksize(bdev, blocksize);
if (!(bh = __bread(bdev, sb_block, blocksize))) {
- printk(KERN_ERR "EXT3-fs: couldn't read superblock of "
- "external journal\n");
+ ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of "
+ "external journal");
goto out_bdev;
}
@@ -2140,14 +2181,14 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
!(le32_to_cpu(es->s_feature_incompat) &
EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
- printk(KERN_ERR "EXT3-fs: external journal has "
- "bad superblock\n");
+ ext3_msg(sb, KERN_ERR, "error: external journal has "
+ "bad superblock");
brelse(bh);
goto out_bdev;
}
if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
- printk(KERN_ERR "EXT3-fs: journal UUID does not match\n");
+ ext3_msg(sb, KERN_ERR, "error: journal UUID does not match");
brelse(bh);
goto out_bdev;
}
@@ -2159,19 +2200,21 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
journal = journal_init_dev(bdev, sb->s_bdev,
start, len, blocksize);
if (!journal) {
- printk(KERN_ERR "EXT3-fs: failed to create device journal\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: failed to create device journal");
goto out_bdev;
}
journal->j_private = sb;
ll_rw_block(READ, 1, &journal->j_sb_buffer);
wait_on_buffer(journal->j_sb_buffer);
if (!buffer_uptodate(journal->j_sb_buffer)) {
- printk(KERN_ERR "EXT3-fs: I/O error on journal device\n");
+ ext3_msg(sb, KERN_ERR, "I/O error on journal device");
goto out_journal;
}
if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
- printk(KERN_ERR "EXT3-fs: External journal has more than one "
- "user (unsupported) - %d\n",
+ ext3_msg(sb, KERN_ERR,
+ "error: external journal has more than one "
+ "user (unsupported) - %d",
be32_to_cpu(journal->j_superblock->s_nr_users));
goto out_journal;
}
@@ -2197,8 +2240,8 @@ static int ext3_load_journal(struct super_block *sb,
if (journal_devnum &&
journal_devnum != le32_to_cpu(es->s_journal_dev)) {
- printk(KERN_INFO "EXT3-fs: external journal device major/minor "
- "numbers have changed\n");
+ ext3_msg(sb, KERN_INFO, "external journal device major/minor "
+ "numbers have changed");
journal_dev = new_decode_dev(journal_devnum);
} else
journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
@@ -2213,21 +2256,21 @@ static int ext3_load_journal(struct super_block *sb,
if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) {
if (sb->s_flags & MS_RDONLY) {
- printk(KERN_INFO "EXT3-fs: INFO: recovery "
- "required on readonly filesystem.\n");
+ ext3_msg(sb, KERN_INFO,
+ "recovery required on readonly filesystem");
if (really_read_only) {
- printk(KERN_ERR "EXT3-fs: write access "
- "unavailable, cannot proceed.\n");
+ ext3_msg(sb, KERN_ERR, "error: write access "
+ "unavailable, cannot proceed");
return -EROFS;
}
- printk (KERN_INFO "EXT3-fs: write access will "
- "be enabled during recovery.\n");
+ ext3_msg(sb, KERN_INFO,
+ "write access will be enabled during recovery");
}
}
if (journal_inum && journal_dev) {
- printk(KERN_ERR "EXT3-fs: filesystem has both journal "
- "and inode journals!\n");
+ ext3_msg(sb, KERN_ERR, "error: filesystem has both journal "
+ "and inode journals");
return -EINVAL;
}
@@ -2242,7 +2285,7 @@ static int ext3_load_journal(struct super_block *sb,
if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
err = journal_update_format(journal);
if (err) {
- printk(KERN_ERR "EXT3-fs: error updating journal.\n");
+ ext3_msg(sb, KERN_ERR, "error updating journal");
journal_destroy(journal);
return err;
}
@@ -2254,7 +2297,7 @@ static int ext3_load_journal(struct super_block *sb,
err = journal_load(journal);
if (err) {
- printk(KERN_ERR "EXT3-fs: error loading journal.\n");
+ ext3_msg(sb, KERN_ERR, "error loading journal");
journal_destroy(journal);
return err;
}
@@ -2273,16 +2316,17 @@ static int ext3_load_journal(struct super_block *sb,
return 0;
}
-static int ext3_create_journal(struct super_block * sb,
- struct ext3_super_block * es,
+static int ext3_create_journal(struct super_block *sb,
+ struct ext3_super_block *es,
unsigned int journal_inum)
{
journal_t *journal;
int err;
if (sb->s_flags & MS_RDONLY) {
- printk(KERN_ERR "EXT3-fs: readonly filesystem when trying to "
- "create journal.\n");
+ ext3_msg(sb, KERN_ERR,
+ "error: readonly filesystem when trying to "
+ "create journal");
return -EROFS;
}
@@ -2290,12 +2334,12 @@ static int ext3_create_journal(struct super_block * sb,
if (!journal)
return -EINVAL;
- printk(KERN_INFO "EXT3-fs: creating new journal on inode %u\n",
+ ext3_msg(sb, KERN_INFO, "creating new journal on inode %u",
journal_inum);
err = journal_create(journal);
if (err) {
- printk(KERN_ERR "EXT3-fs: error creating journal.\n");
+ ext3_msg(sb, KERN_ERR, "error creating journal");
journal_destroy(journal);
return -EIO;
}
@@ -2376,8 +2420,8 @@ out:
* has recorded an error from a previous lifetime, move that error to the
* main filesystem now.
*/
-static void ext3_clear_journal_err(struct super_block * sb,
- struct ext3_super_block * es)
+static void ext3_clear_journal_err(struct super_block *sb,
+ struct ext3_super_block *es)
{
journal_t *journal;
int j_errno;
@@ -2568,10 +2612,10 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
__le32 ret;
if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb,
~EXT3_FEATURE_RO_COMPAT_SUPP))) {
- printk(KERN_WARNING "EXT3-fs: %s: couldn't "
- "remount RDWR because of unsupported "
- "optional features (%x).\n",
- sb->s_id, le32_to_cpu(ret));
+ ext3_msg(sb, KERN_WARNING,
+ "warning: couldn't remount RDWR "
+ "because of unsupported optional "
+ "features (%x)", le32_to_cpu(ret));
err = -EROFS;
goto restore_opts;
}
@@ -2582,11 +2626,10 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
* require a full umount/remount for now.
*/
if (es->s_last_orphan) {
- printk(KERN_WARNING "EXT3-fs: %s: couldn't "
+ ext3_msg(sb, KERN_WARNING, "warning: couldn't "
"remount RDWR because of unprocessed "
"orphan inode list. Please "
- "umount/remount instead.\n",
- sb->s_id);
+ "umount/remount instead.");
err = -EINVAL;
goto restore_opts;
}
@@ -2686,13 +2729,11 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
- es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
buf->f_bavail = 0;
buf->f_files = le32_to_cpu(es->s_inodes_count);
buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
- es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
buf->f_namelen = EXT3_NAME_LEN;
fsid = le64_to_cpup((void *)es->s_uuid) ^
le64_to_cpup((void *)es->s_uuid + sizeof(u64));
@@ -2837,9 +2878,9 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
if (EXT3_SB(sb)->s_qf_names[type]) {
/* Quotafile not of fs root? */
if (path.dentry->d_parent != sb->s_root)
- printk(KERN_WARNING
- "EXT3-fs: Quota file not on filesystem root. "
- "Journaled quota will not work.\n");
+ ext3_msg(sb, KERN_WARNING,
+ "warning: Quota file not on filesystem root. "
+ "Journaled quota will not work.");
}
/*
@@ -2921,8 +2962,9 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
handle_t *handle = journal_current_handle();
if (!handle) {
- printk(KERN_WARNING "EXT3-fs: Quota write (off=%Lu, len=%Lu)"
- " cancelled because transaction is not started.\n",
+ ext3_msg(sb, KERN_WARNING,
+ "warning: quota write (off=%llu, len=%llu)"
+ " cancelled because transaction is not started.",
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 545e37c4b91..66895ccf76c 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -99,7 +99,7 @@ static struct buffer_head *ext3_xattr_cache_find(struct inode *,
struct mb_cache_entry **);
static void ext3_xattr_rehash(struct ext3_xattr_header *,
struct ext3_xattr_entry *);
-static int ext3_xattr_list(struct inode *inode, char *buffer,
+static int ext3_xattr_list(struct dentry *dentry, char *buffer,
size_t buffer_size);
static struct mb_cache *ext3_xattr_cache;
@@ -147,7 +147,7 @@ ext3_xattr_handler(int name_index)
ssize_t
ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- return ext3_xattr_list(dentry->d_inode, buffer, size);
+ return ext3_xattr_list(dentry, buffer, size);
}
static int
@@ -332,7 +332,7 @@ ext3_xattr_get(struct inode *inode, int name_index, const char *name,
}
static int
-ext3_xattr_list_entries(struct inode *inode, struct ext3_xattr_entry *entry,
+ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
char *buffer, size_t buffer_size)
{
size_t rest = buffer_size;
@@ -342,9 +342,10 @@ ext3_xattr_list_entries(struct inode *inode, struct ext3_xattr_entry *entry,
ext3_xattr_handler(entry->e_name_index);
if (handler) {
- size_t size = handler->list(inode, buffer, rest,
+ size_t size = handler->list(dentry, buffer, rest,
entry->e_name,
- entry->e_name_len);
+ entry->e_name_len,
+ handler->flags);
if (buffer) {
if (size > rest)
return -ERANGE;
@@ -357,8 +358,9 @@ ext3_xattr_list_entries(struct inode *inode, struct ext3_xattr_entry *entry,
}
static int
-ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
+ext3_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
+ struct inode *inode = dentry->d_inode;
struct buffer_head *bh = NULL;
int error;
@@ -383,7 +385,7 @@ ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
goto cleanup;
}
ext3_xattr_cache_insert(bh);
- error = ext3_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
+ error = ext3_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
cleanup:
brelse(bh);
@@ -392,8 +394,9 @@ cleanup:
}
static int
-ext3_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
+ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
+ struct inode *inode = dentry->d_inode;
struct ext3_xattr_ibody_header *header;
struct ext3_inode *raw_inode;
struct ext3_iloc iloc;
@@ -411,7 +414,7 @@ ext3_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
error = ext3_xattr_check_names(IFIRST(header), end);
if (error)
goto cleanup;
- error = ext3_xattr_list_entries(inode, IFIRST(header),
+ error = ext3_xattr_list_entries(dentry, IFIRST(header),
buffer, buffer_size);
cleanup:
@@ -430,12 +433,12 @@ cleanup:
* used / required on success.
*/
static int
-ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
+ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
int i_error, b_error;
- down_read(&EXT3_I(inode)->xattr_sem);
- i_error = ext3_xattr_ibody_list(inode, buffer, buffer_size);
+ down_read(&EXT3_I(dentry->d_inode)->xattr_sem);
+ i_error = ext3_xattr_ibody_list(dentry, buffer, buffer_size);
if (i_error < 0) {
b_error = 0;
} else {
@@ -443,11 +446,11 @@ ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
buffer += i_error;
buffer_size -= i_error;
}
- b_error = ext3_xattr_block_list(inode, buffer, buffer_size);
+ b_error = ext3_xattr_block_list(dentry, buffer, buffer_size);
if (b_error < 0)
i_error = 0;
}
- up_read(&EXT3_I(inode)->xattr_sem);
+ up_read(&EXT3_I(dentry->d_inode)->xattr_sem);
return i_error + b_error;
}
@@ -960,6 +963,10 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (error)
goto cleanup;
+ error = ext3_journal_get_write_access(handle, is.iloc.bh);
+ if (error)
+ goto cleanup;
+
if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) {
struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc);
memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
@@ -985,9 +992,6 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (flags & XATTR_CREATE)
goto cleanup;
}
- error = ext3_journal_get_write_access(handle, is.iloc.bh);
- if (error)
- goto cleanup;
if (!value) {
if (!is.s.not_found)
error = ext3_xattr_ibody_set(handle, inode, &i, &is);
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index 37b81097bdf..474348788dd 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -12,8 +12,8 @@
#include "xattr.h"
static size_t
-ext3_xattr_security_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext3_xattr_security_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
@@ -28,23 +28,23 @@ ext3_xattr_security_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext3_xattr_security_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext3_xattr_security_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext3_xattr_get(inode, EXT3_XATTR_INDEX_SECURITY, name,
- buffer, size);
+ return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY,
+ name, buffer, size);
}
static int
-ext3_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext3_xattr_security_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext3_xattr_set(inode, EXT3_XATTR_INDEX_SECURITY, name,
- value, size, flags);
+ return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY,
+ name, value, size, flags);
}
int
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index c7c41a410c4..e5562845ed9 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -14,8 +14,8 @@
#include "xattr.h"
static size_t
-ext3_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext3_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
@@ -32,22 +32,22 @@ ext3_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext3_xattr_trusted_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext3_xattr_trusted_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED, name,
- buffer, size);
+ return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_TRUSTED,
+ name, buffer, size);
}
static int
-ext3_xattr_trusted_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext3_xattr_trusted_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext3_xattr_set(inode, EXT3_XATTR_INDEX_TRUSTED, name,
+ return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_TRUSTED, name,
value, size, flags);
}
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index 430fe63b31b..3bcfe9ee0a6 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -13,13 +13,13 @@
#include "xattr.h"
static size_t
-ext3_xattr_user_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext3_xattr_user_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t prefix_len = XATTR_USER_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return 0;
if (list && total_len <= list_size) {
@@ -31,26 +31,27 @@ ext3_xattr_user_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext3_xattr_user_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext3_xattr_user_get(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext3_xattr_get(inode, EXT3_XATTR_INDEX_USER, name, buffer, size);
+ return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_USER,
+ name, buffer, size);
}
static int
-ext3_xattr_user_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext3_xattr_user_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext3_xattr_set(inode, EXT3_XATTR_INDEX_USER, name,
- value, size, flags);
+ return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_USER,
+ name, value, size, flags);
}
struct xattr_handler ext3_xattr_user_handler = {
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 0df88b2a69b..8a2a29d35a6 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -364,12 +364,12 @@ out:
* Extended attribute handlers
*/
static size_t
-ext4_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len,
- const char *name, size_t name_len)
+ext4_xattr_list_acl_access(struct dentry *dentry, char *list, size_t list_len,
+ const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_len)
memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
@@ -377,12 +377,12 @@ ext4_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len,
}
static size_t
-ext4_xattr_list_acl_default(struct inode *inode, char *list, size_t list_len,
- const char *name, size_t name_len)
+ext4_xattr_list_acl_default(struct dentry *dentry, char *list, size_t list_len,
+ const char *name, size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return 0;
if (list && size <= list_len)
memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
@@ -390,15 +390,18 @@ ext4_xattr_list_acl_default(struct inode *inode, char *list, size_t list_len,
}
static int
-ext4_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+ext4_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int type)
{
struct posix_acl *acl;
int error;
- if (!test_opt(inode->i_sb, POSIX_ACL))
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+ if (!test_opt(dentry->d_sb, POSIX_ACL))
return -EOPNOTSUPP;
- acl = ext4_get_acl(inode, type);
+ acl = ext4_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
@@ -410,31 +413,16 @@ ext4_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
}
static int
-ext4_xattr_get_acl_access(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext4_xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
-}
-
-static int
-ext4_xattr_get_acl_default(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext4_xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
-}
-
-static int
-ext4_xattr_set_acl(struct inode *inode, int type, const void *value,
- size_t size)
+ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags, int type)
{
+ struct inode *inode = dentry->d_inode;
handle_t *handle;
struct posix_acl *acl;
int error, retries = 0;
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
if (!test_opt(inode->i_sb, POSIX_ACL))
return -EOPNOTSUPP;
if (!is_owner_or_cap(inode))
@@ -466,34 +454,18 @@ release_and_out:
return error;
}
-static int
-ext4_xattr_set_acl_access(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext4_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
-}
-
-static int
-ext4_xattr_set_acl_default(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ext4_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
-}
-
struct xattr_handler ext4_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
.list = ext4_xattr_list_acl_access,
- .get = ext4_xattr_get_acl_access,
- .set = ext4_xattr_set_acl_access,
+ .get = ext4_xattr_get_acl,
+ .set = ext4_xattr_set_acl,
};
struct xattr_handler ext4_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
.list = ext4_xattr_list_acl_default,
- .get = ext4_xattr_get_acl_default,
- .set = ext4_xattr_set_acl_default,
+ .get = ext4_xattr_get_acl,
+ .set = ext4_xattr_set_acl,
};
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c1e19d5b598..b1fd3daadc9 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3955,7 +3955,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
* per cpu locality group is to reduce the contention between block
* request from multiple CPUs.
*/
- ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
+ ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
/* we're going to use group allocation */
ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8b58a144c31..827bde1f259 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -769,9 +769,22 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
#if defined(CONFIG_QUOTA)
struct ext4_sb_info *sbi = EXT4_SB(sb);
- if (sbi->s_jquota_fmt)
- seq_printf(seq, ",jqfmt=%s",
- (sbi->s_jquota_fmt == QFMT_VFS_OLD) ? "vfsold" : "vfsv0");
+ if (sbi->s_jquota_fmt) {
+ char *fmtname = "";
+
+ switch (sbi->s_jquota_fmt) {
+ case QFMT_VFS_OLD:
+ fmtname = "vfsold";
+ break;
+ case QFMT_VFS_V0:
+ fmtname = "vfsv0";
+ break;
+ case QFMT_VFS_V1:
+ fmtname = "vfsv1";
+ break;
+ }
+ seq_printf(seq, ",jqfmt=%s", fmtname);
+ }
if (sbi->s_qf_names[USRQUOTA])
seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
@@ -1084,9 +1097,9 @@ enum {
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
- Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
- Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize,
- Opt_usrquota, Opt_grpquota, Opt_i_version,
+ Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
+ Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
+ Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version,
Opt_stripe, Opt_delalloc, Opt_nodelalloc,
Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
@@ -1137,6 +1150,7 @@ static const match_table_t tokens = {
{Opt_grpjquota, "grpjquota=%s"},
{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
+ {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
{Opt_grpquota, "grpquota"},
{Opt_noquota, "noquota"},
{Opt_quota, "quota"},
@@ -1439,6 +1453,9 @@ clear_qf_name:
goto set_qf_format;
case Opt_jqfmt_vfsv0:
qfmt = QFMT_VFS_V0;
+ goto set_qf_format;
+ case Opt_jqfmt_vfsv1:
+ qfmt = QFMT_VFS_V1;
set_qf_format:
if (sb_any_quota_loaded(sb) &&
sbi->s_jquota_fmt != qfmt) {
@@ -1481,6 +1498,7 @@ set_qf_format:
case Opt_offgrpjquota:
case Opt_jqfmt_vfsold:
case Opt_jqfmt_vfsv0:
+ case Opt_jqfmt_vfsv1:
ext4_msg(sb, KERN_ERR,
"journaled quota options not supported");
break;
@@ -2119,11 +2137,8 @@ static int parse_strtoul(const char *buf,
{
char *endp;
- while (*buf && isspace(*buf))
- buf++;
- *value = simple_strtoul(buf, &endp, 0);
- while (*endp && isspace(*endp))
- endp++;
+ *value = simple_strtoul(skip_spaces(buf), &endp, 0);
+ endp = skip_spaces(endp);
if (*endp || *value > max)
return -EINVAL;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 910bf9a59cb..83218bebbc7 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -92,7 +92,7 @@ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
struct mb_cache_entry **);
static void ext4_xattr_rehash(struct ext4_xattr_header *,
struct ext4_xattr_entry *);
-static int ext4_xattr_list(struct inode *inode, char *buffer,
+static int ext4_xattr_list(struct dentry *dentry, char *buffer,
size_t buffer_size);
static struct mb_cache *ext4_xattr_cache;
@@ -140,7 +140,7 @@ ext4_xattr_handler(int name_index)
ssize_t
ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- return ext4_xattr_list(dentry->d_inode, buffer, size);
+ return ext4_xattr_list(dentry, buffer, size);
}
static int
@@ -325,7 +325,7 @@ ext4_xattr_get(struct inode *inode, int name_index, const char *name,
}
static int
-ext4_xattr_list_entries(struct inode *inode, struct ext4_xattr_entry *entry,
+ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
char *buffer, size_t buffer_size)
{
size_t rest = buffer_size;
@@ -335,9 +335,10 @@ ext4_xattr_list_entries(struct inode *inode, struct ext4_xattr_entry *entry,
ext4_xattr_handler(entry->e_name_index);
if (handler) {
- size_t size = handler->list(inode, buffer, rest,
+ size_t size = handler->list(dentry, buffer, rest,
entry->e_name,
- entry->e_name_len);
+ entry->e_name_len,
+ handler->flags);
if (buffer) {
if (size > rest)
return -ERANGE;
@@ -350,8 +351,9 @@ ext4_xattr_list_entries(struct inode *inode, struct ext4_xattr_entry *entry,
}
static int
-ext4_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
+ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
+ struct inode *inode = dentry->d_inode;
struct buffer_head *bh = NULL;
int error;
@@ -376,7 +378,7 @@ ext4_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
goto cleanup;
}
ext4_xattr_cache_insert(bh);
- error = ext4_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
+ error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
cleanup:
brelse(bh);
@@ -385,8 +387,9 @@ cleanup:
}
static int
-ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
+ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
+ struct inode *inode = dentry->d_inode;
struct ext4_xattr_ibody_header *header;
struct ext4_inode *raw_inode;
struct ext4_iloc iloc;
@@ -404,7 +407,7 @@ ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
error = ext4_xattr_check_names(IFIRST(header), end);
if (error)
goto cleanup;
- error = ext4_xattr_list_entries(inode, IFIRST(header),
+ error = ext4_xattr_list_entries(dentry, IFIRST(header),
buffer, buffer_size);
cleanup:
@@ -423,12 +426,12 @@ cleanup:
* used / required on success.
*/
static int
-ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
+ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
int i_error, b_error;
- down_read(&EXT4_I(inode)->xattr_sem);
- i_error = ext4_xattr_ibody_list(inode, buffer, buffer_size);
+ down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
+ i_error = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
if (i_error < 0) {
b_error = 0;
} else {
@@ -436,11 +439,11 @@ ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
buffer += i_error;
buffer_size -= i_error;
}
- b_error = ext4_xattr_block_list(inode, buffer, buffer_size);
+ b_error = ext4_xattr_block_list(dentry, buffer, buffer_size);
if (b_error < 0)
i_error = 0;
}
- up_read(&EXT4_I(inode)->xattr_sem);
+ up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
return i_error + b_error;
}
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index ca5f89fc6ca..983c253999a 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -12,8 +12,8 @@
#include "xattr.h"
static size_t
-ext4_xattr_security_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext4_xattr_security_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1;
const size_t total_len = prefix_len + name_len + 1;
@@ -28,23 +28,23 @@ ext4_xattr_security_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext4_xattr_security_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext4_xattr_security_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext4_xattr_get(inode, EXT4_XATTR_INDEX_SECURITY, name,
- buffer, size);
+ return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_SECURITY,
+ name, buffer, size);
}
static int
-ext4_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext4_xattr_security_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext4_xattr_set(inode, EXT4_XATTR_INDEX_SECURITY, name,
- value, size, flags);
+ return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_SECURITY,
+ name, value, size, flags);
}
int
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index ac1a52cf2a3..15b50edc658 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -14,8 +14,8 @@
#include "xattr.h"
static size_t
-ext4_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext4_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
@@ -32,23 +32,23 @@ ext4_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext4_xattr_trusted_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext4_xattr_trusted_get(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext4_xattr_get(inode, EXT4_XATTR_INDEX_TRUSTED, name,
- buffer, size);
+ return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED,
+ name, buffer, size);
}
static int
-ext4_xattr_trusted_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext4_xattr_trusted_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ext4_xattr_set(inode, EXT4_XATTR_INDEX_TRUSTED, name,
- value, size, flags);
+ return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_TRUSTED,
+ name, value, size, flags);
}
struct xattr_handler ext4_xattr_trusted_handler = {
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index d91aa61b42a..c4ce05746ce 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -13,13 +13,13 @@
#include "xattr.h"
static size_t
-ext4_xattr_user_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+ext4_xattr_user_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
const size_t prefix_len = XATTR_USER_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return 0;
if (list && total_len <= list_size) {
@@ -31,26 +31,27 @@ ext4_xattr_user_list(struct inode *inode, char *list, size_t list_size,
}
static int
-ext4_xattr_user_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+ext4_xattr_user_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext4_xattr_get(inode, EXT4_XATTR_INDEX_USER, name, buffer, size);
+ return ext4_xattr_get(dentry->d_inode, EXT4_XATTR_INDEX_USER,
+ name, buffer, size);
}
static int
-ext4_xattr_user_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+ext4_xattr_user_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!test_opt(inode->i_sb, XATTR_USER))
+ if (!test_opt(dentry->d_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext4_xattr_set(inode, EXT4_XATTR_INDEX_USER, name,
- value, size, flags);
+ return ext4_xattr_set(dentry->d_inode, EXT4_XATTR_INDEX_USER,
+ name, value, size, flags);
}
struct xattr_handler ext4_xattr_user_handler = {
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 7db0979c6b7..e6efdfa0f6d 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -44,7 +44,8 @@ struct fat_mount_options {
nocase:1, /* Does this need case conversion? 0=need case conversion*/
usefree:1, /* Use free_clusters for FAT32 */
tz_utc:1, /* Filesystem timestamps are in UTC */
- rodir:1; /* allow ATTR_RO for directory */
+ rodir:1, /* allow ATTR_RO for directory */
+ discard:1; /* Issue discard requests on deletions */
};
#define FAT_HASH_BITS 8
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index a81037721a6..81184d3b75a 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -566,16 +566,21 @@ int fat_free_clusters(struct inode *inode, int cluster)
goto error;
}
- /*
- * Issue discard for the sectors we no longer care about,
- * batching contiguous clusters into one request
- */
- if (cluster != fatent.entry + 1) {
- int nr_clus = fatent.entry - first_cl + 1;
-
- sb_issue_discard(sb, fat_clus_to_blknr(sbi, first_cl),
- nr_clus * sbi->sec_per_clus);
- first_cl = cluster;
+ if (sbi->options.discard) {
+ /*
+ * Issue discard for the sectors we no longer
+ * care about, batching contiguous clusters
+ * into one request
+ */
+ if (cluster != fatent.entry + 1) {
+ int nr_clus = fatent.entry - first_cl + 1;
+
+ sb_issue_discard(sb,
+ fat_clus_to_blknr(sbi, first_cl),
+ nr_clus * sbi->sec_per_clus);
+
+ first_cl = cluster;
+ }
}
ops->ent_put(&fatent, FAT_ENT_FREE);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 76b7961ab66..14da530b05c 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -858,6 +858,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
seq_puts(m, ",errors=panic");
else
seq_puts(m, ",errors=remount-ro");
+ if (opts->discard)
+ seq_puts(m, ",discard");
return 0;
}
@@ -871,7 +873,7 @@ enum {
Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
- Opt_err_panic, Opt_err_ro, Opt_err,
+ Opt_err_panic, Opt_err_ro, Opt_discard, Opt_err,
};
static const match_table_t fat_tokens = {
@@ -899,6 +901,7 @@ static const match_table_t fat_tokens = {
{Opt_err_cont, "errors=continue"},
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
+ {Opt_discard, "discard"},
{Opt_obsolate, "conv=binary"},
{Opt_obsolate, "conv=text"},
{Opt_obsolate, "conv=auto"},
@@ -1136,6 +1139,9 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
case Opt_rodir:
opts->rodir = 1;
break;
+ case Opt_discard:
+ opts->discard = 1;
+ break;
/* obsolete mount options */
case Opt_obsolate:
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 0f55f5cb732..d3da05f2646 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
+#include <linux/time.h>
#include "fat.h"
/*
@@ -157,10 +158,6 @@ extern struct timezone sys_tz;
#define SECS_PER_MIN 60
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-#define UNIX_SECS_1980 315532800L
-#if BITS_PER_LONG == 64
-#define UNIX_SECS_2108 4354819200L
-#endif
/* days between 1.1.70 and 1.1.80 (2 leap days) */
#define DAYS_DELTA (365 * 10 + 2)
/* 120 (2100 - 1980) isn't leap year */
@@ -213,58 +210,35 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
__le16 *time, __le16 *date, u8 *time_cs)
{
- time_t second = ts->tv_sec;
- time_t day, leap_day, month, year;
+ struct tm tm;
+ time_to_tm(ts->tv_sec, sbi->options.tz_utc ? 0 :
+ -sys_tz.tz_minuteswest * 60, &tm);
- if (!sbi->options.tz_utc)
- second -= sys_tz.tz_minuteswest * SECS_PER_MIN;
-
- /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
- if (second < UNIX_SECS_1980) {
+ /* FAT can only support year between 1980 to 2107 */
+ if (tm.tm_year < 1980 - 1900) {
*time = 0;
*date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
if (time_cs)
*time_cs = 0;
return;
}
-#if BITS_PER_LONG == 64
- if (second >= UNIX_SECS_2108) {
+ if (tm.tm_year > 2107 - 1900) {
*time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
*date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
if (time_cs)
*time_cs = 199;
return;
}
-#endif
- day = second / SECS_PER_DAY - DAYS_DELTA;
- year = day / 365;
- leap_day = (year + 3) / 4;
- if (year > YEAR_2100) /* 2100 isn't leap year */
- leap_day--;
- if (year * 365 + leap_day > day)
- year--;
- leap_day = (year + 3) / 4;
- if (year > YEAR_2100) /* 2100 isn't leap year */
- leap_day--;
- day -= year * 365 + leap_day;
-
- if (IS_LEAP_YEAR(year) && day == days_in_year[3]) {
- month = 2;
- } else {
- if (IS_LEAP_YEAR(year) && day > days_in_year[3])
- day--;
- for (month = 1; month < 12; month++) {
- if (days_in_year[month + 1] > day)
- break;
- }
- }
- day -= days_in_year[month];
+ /* from 1900 -> from 1980 */
+ tm.tm_year -= 80;
+ /* 0~11 -> 1~12 */
+ tm.tm_mon++;
+ /* 0~59 -> 0~29(2sec counts) */
+ tm.tm_sec >>= 1;
- *time = cpu_to_le16(((second / SECS_PER_HOUR) % 24) << 11
- | ((second / SECS_PER_MIN) % 60) << 5
- | (second % SECS_PER_MIN) >> 1);
- *date = cpu_to_le16((year << 9) | (month << 5) | (day + 1));
+ *time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec);
+ *date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday);
if (time_cs)
*time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
}
@@ -285,4 +259,3 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
}
return err;
}
-
diff --git a/fs/file_table.c b/fs/file_table.c
index 4bef4c01ec6..69652c5bd5f 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -21,9 +21,12 @@
#include <linux/fsnotify.h>
#include <linux/sysctl.h>
#include <linux/percpu_counter.h>
+#include <linux/ima.h>
#include <asm/atomic.h>
+#include "internal.h"
+
/* sysctl tunables... */
struct files_stat_struct files_stat = {
.max_files = NR_FILE
@@ -147,8 +150,6 @@ fail:
return NULL;
}
-EXPORT_SYMBOL(get_empty_filp);
-
/**
* alloc_file - allocate and initialize a 'struct file'
* @mnt: the vfsmount on which the file will reside
@@ -164,8 +165,8 @@ EXPORT_SYMBOL(get_empty_filp);
* If all the callers of init_file() are eliminated, its
* code should be moved into this function.
*/
-struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry,
- fmode_t mode, const struct file_operations *fop)
+struct file *alloc_file(struct path *path, fmode_t mode,
+ const struct file_operations *fop)
{
struct file *file;
@@ -173,35 +174,8 @@ struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry,
if (!file)
return NULL;
- init_file(file, mnt, dentry, mode, fop);
- return file;
-}
-EXPORT_SYMBOL(alloc_file);
-
-/**
- * init_file - initialize a 'struct file'
- * @file: the already allocated 'struct file' to initialized
- * @mnt: the vfsmount on which the file resides
- * @dentry: the dentry representing this file
- * @mode: the mode the file is opened with
- * @fop: the 'struct file_operations' for this file
- *
- * Use this instead of setting the members directly. Doing so
- * avoids making mistakes like forgetting the mntget() or
- * forgetting to take a write on the mnt.
- *
- * Note: This is a crappy interface. It is here to make
- * merging with the existing users of get_empty_filp()
- * who have complex failure logic easier. All users
- * of this should be moving to alloc_file().
- */
-int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
- fmode_t mode, const struct file_operations *fop)
-{
- int error = 0;
- file->f_path.dentry = dentry;
- file->f_path.mnt = mntget(mnt);
- file->f_mapping = dentry->d_inode->i_mapping;
+ file->f_path = *path;
+ file->f_mapping = path->dentry->d_inode->i_mapping;
file->f_mode = mode;
file->f_op = fop;
@@ -211,14 +185,14 @@ int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
* visible. We do this for consistency, and so
* that we can do debugging checks at __fput()
*/
- if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) {
+ if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
file_take_write(file);
- error = mnt_clone_write(mnt);
- WARN_ON(error);
+ WARN_ON(mnt_clone_write(path->mnt));
}
- return error;
+ ima_counts_get(file);
+ return file;
}
-EXPORT_SYMBOL(init_file);
+EXPORT_SYMBOL(alloc_file);
void fput(struct file *file)
{
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e590242fa41..3221a0c7944 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(fscache_object_destroy);
*/
static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
{
- struct fscache_object *pobj, *obj, *minobj = NULL;
+ struct fscache_object *pobj, *obj = NULL, *minobj = NULL;
struct rb_node *p;
unsigned long pos;
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index e0b53aa7bbe..55458031e50 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -1,62 +1,58 @@
/*
- * fs/generic_acl.c
- *
* (C) 2005 Andreas Gruenbacher <agruen@suse.de>
*
* This file is released under the GPL.
+ *
+ * Generic ACL support for in-memory filesystems.
*/
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/generic_acl.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
-/**
- * generic_acl_list - Generic xattr_handler->list() operation
- * @ops: Filesystem specific getacl and setacl callbacks
- */
-size_t
-generic_acl_list(struct inode *inode, struct generic_acl_operations *ops,
- int type, char *list, size_t list_size)
+
+static size_t
+generic_acl_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int type)
{
struct posix_acl *acl;
- const char *name;
+ const char *xname;
size_t size;
- acl = ops->getacl(inode, type);
+ acl = get_cached_acl(dentry->d_inode, type);
if (!acl)
return 0;
posix_acl_release(acl);
- switch(type) {
- case ACL_TYPE_ACCESS:
- name = POSIX_ACL_XATTR_ACCESS;
- break;
-
- case ACL_TYPE_DEFAULT:
- name = POSIX_ACL_XATTR_DEFAULT;
- break;
-
- default:
- return 0;
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ xname = POSIX_ACL_XATTR_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ xname = POSIX_ACL_XATTR_DEFAULT;
+ break;
+ default:
+ return 0;
}
- size = strlen(name) + 1;
+ size = strlen(xname) + 1;
if (list && size <= list_size)
- memcpy(list, name, size);
+ memcpy(list, xname, size);
return size;
}
-/**
- * generic_acl_get - Generic xattr_handler->get() operation
- * @ops: Filesystem specific getacl and setacl callbacks
- */
-int
-generic_acl_get(struct inode *inode, struct generic_acl_operations *ops,
- int type, void *buffer, size_t size)
+static int
+generic_acl_get(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int type)
{
struct posix_acl *acl;
int error;
- acl = ops->getacl(inode, type);
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
+
+ acl = get_cached_acl(dentry->d_inode, type);
if (!acl)
return -ENODATA;
error = posix_acl_to_xattr(acl, buffer, size);
@@ -65,17 +61,16 @@ generic_acl_get(struct inode *inode, struct generic_acl_operations *ops,
return error;
}
-/**
- * generic_acl_set - Generic xattr_handler->set() operation
- * @ops: Filesystem specific getacl and setacl callbacks
- */
-int
-generic_acl_set(struct inode *inode, struct generic_acl_operations *ops,
- int type, const void *value, size_t size)
+static int
+generic_acl_set(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags, int type)
{
+ struct inode *inode = dentry->d_inode;
struct posix_acl *acl = NULL;
int error;
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
if (!is_owner_or_cap(inode))
@@ -91,28 +86,27 @@ generic_acl_set(struct inode *inode, struct generic_acl_operations *ops,
error = posix_acl_valid(acl);
if (error)
goto failed;
- switch(type) {
- case ACL_TYPE_ACCESS:
- mode = inode->i_mode;
- error = posix_acl_equiv_mode(acl, &mode);
- if (error < 0)
- goto failed;
- inode->i_mode = mode;
- if (error == 0) {
- posix_acl_release(acl);
- acl = NULL;
- }
- break;
-
- case ACL_TYPE_DEFAULT:
- if (!S_ISDIR(inode->i_mode)) {
- error = -EINVAL;
- goto failed;
- }
- break;
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ mode = inode->i_mode;
+ error = posix_acl_equiv_mode(acl, &mode);
+ if (error < 0)
+ goto failed;
+ inode->i_mode = mode;
+ if (error == 0) {
+ posix_acl_release(acl);
+ acl = NULL;
+ }
+ break;
+ case ACL_TYPE_DEFAULT:
+ if (!S_ISDIR(inode->i_mode)) {
+ error = -EINVAL;
+ goto failed;
+ }
+ break;
}
}
- ops->setacl(inode, type, acl);
+ set_cached_acl(inode, type, acl);
error = 0;
failed:
posix_acl_release(acl);
@@ -121,14 +115,12 @@ failed:
/**
* generic_acl_init - Take care of acl inheritance at @inode create time
- * @ops: Filesystem specific getacl and setacl callbacks
*
* Files created inside a directory with a default ACL inherit the
* directory's default ACL.
*/
int
-generic_acl_init(struct inode *inode, struct inode *dir,
- struct generic_acl_operations *ops)
+generic_acl_init(struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
mode_t mode = inode->i_mode;
@@ -136,7 +128,7 @@ generic_acl_init(struct inode *inode, struct inode *dir,
inode->i_mode = mode & ~current_umask();
if (!S_ISLNK(inode->i_mode))
- acl = ops->getacl(dir, ACL_TYPE_DEFAULT);
+ acl = get_cached_acl(dir, ACL_TYPE_DEFAULT);
if (acl) {
struct posix_acl *clone;
@@ -145,7 +137,7 @@ generic_acl_init(struct inode *inode, struct inode *dir,
error = -ENOMEM;
if (!clone)
goto cleanup;
- ops->setacl(inode, ACL_TYPE_DEFAULT, clone);
+ set_cached_acl(inode, ACL_TYPE_DEFAULT, clone);
posix_acl_release(clone);
}
clone = posix_acl_clone(acl, GFP_KERNEL);
@@ -156,7 +148,7 @@ generic_acl_init(struct inode *inode, struct inode *dir,
if (error >= 0) {
inode->i_mode = mode;
if (error > 0)
- ops->setacl(inode, ACL_TYPE_ACCESS, clone);
+ set_cached_acl(inode, ACL_TYPE_ACCESS, clone);
}
posix_acl_release(clone);
}
@@ -169,20 +161,19 @@ cleanup:
/**
* generic_acl_chmod - change the access acl of @inode upon chmod()
- * @ops: FIlesystem specific getacl and setacl callbacks
*
* A chmod also changes the permissions of the owner, group/mask, and
* other ACL entries.
*/
int
-generic_acl_chmod(struct inode *inode, struct generic_acl_operations *ops)
+generic_acl_chmod(struct inode *inode)
{
struct posix_acl *acl, *clone;
int error = 0;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
- acl = ops->getacl(inode, ACL_TYPE_ACCESS);
+ acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
if (acl) {
clone = posix_acl_clone(acl, GFP_KERNEL);
posix_acl_release(acl);
@@ -190,8 +181,37 @@ generic_acl_chmod(struct inode *inode, struct generic_acl_operations *ops)
return -ENOMEM;
error = posix_acl_chmod_masq(clone, inode->i_mode);
if (!error)
- ops->setacl(inode, ACL_TYPE_ACCESS, clone);
+ set_cached_acl(inode, ACL_TYPE_ACCESS, clone);
posix_acl_release(clone);
}
return error;
}
+
+int
+generic_check_acl(struct inode *inode, int mask)
+{
+ struct posix_acl *acl = get_cached_acl(inode, ACL_TYPE_ACCESS);
+
+ if (acl) {
+ int error = posix_acl_permission(inode, acl, mask);
+ posix_acl_release(acl);
+ return error;
+ }
+ return -EAGAIN;
+}
+
+struct xattr_handler generic_acl_access_handler = {
+ .prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
+ .list = generic_acl_list,
+ .get = generic_acl_get,
+ .set = generic_acl_set,
+};
+
+struct xattr_handler generic_acl_default_handler = {
+ .prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
+ .list = generic_acl_list,
+ .get = generic_acl_get,
+ .set = generic_acl_set,
+};
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3eb1ea84617..87ee309d4c2 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -126,7 +126,7 @@ static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
error = posix_acl_to_xattr(acl, data, len);
if (error < 0)
goto out;
- error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, data, len, 0);
+ error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS);
if (!error)
set_cached_acl(inode, type, acl);
out:
@@ -232,9 +232,10 @@ static int gfs2_acl_type(const char *name)
return -EINVAL;
}
-static int gfs2_xattr_system_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int gfs2_xattr_system_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int xtype)
{
+ struct inode *inode = dentry->d_inode;
struct posix_acl *acl;
int type;
int error;
@@ -255,9 +256,11 @@ static int gfs2_xattr_system_get(struct inode *inode, const char *name,
return error;
}
-static int gfs2_xattr_system_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int gfs2_xattr_system_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags,
+ int xtype)
{
+ struct inode *inode = dentry->d_inode;
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct posix_acl *acl = NULL;
int error = 0, type;
@@ -319,7 +322,7 @@ static int gfs2_xattr_system_set(struct inode *inode, const char *name,
}
set_acl:
- error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
+ error = __gfs2_xattr_set(inode, name, value, size, 0, GFS2_EATYPE_SYS);
if (!error) {
if (acl)
set_cached_acl(inode, type, acl);
@@ -334,6 +337,7 @@ out:
struct xattr_handler gfs2_xattr_system_handler = {
.prefix = XATTR_SYSTEM_PREFIX,
+ .flags = GFS2_EATYPE_SYS,
.get = gfs2_xattr_system_get,
.set = gfs2_xattr_system_set,
};
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 26ba2a4c4a2..6e220f4eee7 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -125,7 +125,7 @@ static struct inode *gfs2_iget_skip(struct super_block *sb,
* directory entry when gfs2_inode_lookup() is invoked. Part of the code
* segment inside gfs2_inode_lookup code needs to get moved around.
*
- * Clean up I_LOCK and I_NEW as well.
+ * Clears I_NEW as well.
**/
void gfs2_set_iop(struct inode *inode)
@@ -801,7 +801,8 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
return err;
}
- err = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SECURITY, name, value, len, 0);
+ err = __gfs2_xattr_set(&ip->i_inode, name, value, len, 0,
+ GFS2_EATYPE_SECURITY);
kfree(value);
kfree(name);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c5dad1eb7b9..0dc34621f6a 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -85,11 +85,7 @@ static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
buf[0] = '\0';
if (!gfs2_uuid_valid(uuid))
return 0;
- return snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X-%02X%02X-"
- "%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n",
- uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
- uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
- uuid[12], uuid[13], uuid[14], uuid[15]);
+ return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid);
}
static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@@ -575,14 +571,8 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
if (!sdp->sd_args.ar_spectator)
add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid);
- if (gfs2_uuid_valid(uuid)) {
- add_uevent_var(env, "UUID=%02X%02X%02X%02X-%02X%02X-%02X%02X-"
- "%02X%02X-%02X%02X%02X%02X%02X%02X",
- uuid[0], uuid[1], uuid[2], uuid[3], uuid[4],
- uuid[5], uuid[6], uuid[7], uuid[8], uuid[9],
- uuid[10], uuid[11], uuid[12], uuid[13],
- uuid[14], uuid[15]);
- }
+ if (gfs2_uuid_valid(uuid))
+ add_uevent_var(env, "UUID=%pUB", uuid);
return 0;
}
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 912f5cbc474..8a04108e0c2 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -567,18 +567,17 @@ out:
/**
* gfs2_xattr_get - Get a GFS2 extended attribute
* @inode: The inode
- * @type: The type of extended attribute
* @name: The name of the extended attribute
* @buffer: The buffer to write the result into
* @size: The size of the buffer
+ * @type: The type of extended attribute
*
* Returns: actual size of data on success, -errno on error
*/
-
-int gfs2_xattr_get(struct inode *inode, int type, const char *name,
- void *buffer, size_t size)
+static int gfs2_xattr_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
- struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
struct gfs2_ea_location el;
int error;
@@ -1119,7 +1118,7 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
/**
* gfs2_xattr_remove - Remove a GFS2 extended attribute
- * @inode: The inode
+ * @ip: The inode
* @type: The type of the extended attribute
* @name: The name of the extended attribute
*
@@ -1130,9 +1129,8 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
* Returns: 0, or errno on failure
*/
-static int gfs2_xattr_remove(struct inode *inode, int type, const char *name)
+static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
{
- struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_ea_location el;
int error;
@@ -1156,24 +1154,24 @@ static int gfs2_xattr_remove(struct inode *inode, int type, const char *name)
}
/**
- * gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
- * @inode: The inode
- * @type: The type of the extended attribute
+ * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
+ * @ip: The inode
* @name: The name of the extended attribute
* @value: The value of the extended attribute (NULL for remove)
* @size: The size of the @value argument
* @flags: Create or Replace
+ * @type: The type of the extended attribute
*
* See gfs2_xattr_remove() for details of the removal of xattrs.
*
* Returns: 0 or errno on failure
*/
-int gfs2_xattr_set(struct inode *inode, int type, const char *name,
- const void *value, size_t size, int flags)
+int __gfs2_xattr_set(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags, int type)
{
- struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_ea_location el;
unsigned int namel = strlen(name);
int error;
@@ -1184,7 +1182,7 @@ int gfs2_xattr_set(struct inode *inode, int type, const char *name,
return -ERANGE;
if (value == NULL)
- return gfs2_xattr_remove(inode, type, name);
+ return gfs2_xattr_remove(ip, type, name);
if (ea_check_size(sdp, namel, size))
return -ERANGE;
@@ -1224,6 +1222,13 @@ int gfs2_xattr_set(struct inode *inode, int type, const char *name,
return error;
}
+static int gfs2_xattr_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
+{
+ return __gfs2_xattr_set(dentry->d_inode, name, value,
+ size, flags, type);
+}
+
static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
struct gfs2_ea_header *ea, char *data)
{
@@ -1529,40 +1534,18 @@ out_alloc:
return error;
}
-static int gfs2_xattr_user_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- return gfs2_xattr_get(inode, GFS2_EATYPE_USR, name, buffer, size);
-}
-
-static int gfs2_xattr_user_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
-}
-
-static int gfs2_xattr_security_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- return gfs2_xattr_get(inode, GFS2_EATYPE_SECURITY, name, buffer, size);
-}
-
-static int gfs2_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return gfs2_xattr_set(inode, GFS2_EATYPE_SECURITY, name, value, size, flags);
-}
-
static struct xattr_handler gfs2_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
- .get = gfs2_xattr_user_get,
- .set = gfs2_xattr_user_set,
+ .flags = GFS2_EATYPE_USR,
+ .get = gfs2_xattr_get,
+ .set = gfs2_xattr_set,
};
static struct xattr_handler gfs2_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
- .get = gfs2_xattr_security_get,
- .set = gfs2_xattr_security_set,
+ .flags = GFS2_EATYPE_SECURITY,
+ .get = gfs2_xattr_get,
+ .set = gfs2_xattr_set,
};
struct xattr_handler *gfs2_xattr_handlers[] = {
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index 8d6ae5813c4..d392f8358f2 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -53,10 +53,9 @@ struct gfs2_ea_location {
struct gfs2_ea_header *el_prev;
};
-extern int gfs2_xattr_get(struct inode *inode, int type, const char *name,
- void *buffer, size_t size);
-extern int gfs2_xattr_set(struct inode *inode, int type, const char *name,
- const void *value, size_t size, int flags);
+extern int __gfs2_xattr_set(struct inode *inode, const char *name,
+ const void *value, size_t size,
+ int flags, int type);
extern ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 6d98f116ca0..424b0337f52 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -289,6 +289,10 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
err = hfs_brec_find(&src_fd);
if (err)
goto out;
+ if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
+ err = -EIO;
+ goto out;
+ }
hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
src_fd.entrylength);
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 7c69b98a2e4..2b3b8611b41 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -79,6 +79,11 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
filp->f_pos++;
/* fall through */
case 1:
+ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
+ err = -EIO;
+ goto out;
+ }
+
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
if (entry.type != HFS_CDR_THD) {
printk(KERN_ERR "hfs: bad catalog folder thread\n");
@@ -109,6 +114,12 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
err = -EIO;
goto out;
}
+
+ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
+ err = -EIO;
+ goto out;
+ }
+
hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
type = entry.type;
len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index f7fcbe49da7..5ed7252b7b2 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -409,8 +409,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
/* try to get the root inode */
hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
- if (!res)
+ if (!res) {
+ if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
+ res = -EIO;
+ goto bail;
+ }
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
+ }
if (res) {
hfs_find_exit(&fd);
goto bail_no_root;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index f2feaa06bf2..cadc4ce4865 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -14,6 +14,7 @@
#include <linux/magic.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
+#include <linux/bitmap.h>
/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
@@ -115,15 +116,13 @@ static void hpfs_put_super(struct super_block *s)
unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
{
struct quad_buffer_head qbh;
- unsigned *bits;
- unsigned i, count;
- if (!(bits = hpfs_map_4sectors(s, secno, &qbh, 4))) return 0;
- count = 0;
- for (i = 0; i < 2048 / sizeof(unsigned); i++) {
- unsigned b;
- if (!bits[i]) continue;
- for (b = bits[i]; b; b>>=1) count += b & 1;
- }
+ unsigned long *bits;
+ unsigned count;
+
+ bits = hpfs_map_4sectors(s, secno, &qbh, 4);
+ if (!bits)
+ return 0;
+ count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
hpfs_brelse4(&qbh);
return count;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 87a1258953b..a0bbd3d1b41 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -30,7 +30,6 @@
#include <linux/dnotify.h>
#include <linux/statfs.h>
#include <linux/security.h>
-#include <linux/ima.h>
#include <linux/magic.h>
#include <asm/uaccess.h>
@@ -922,7 +921,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
int error = -ENOMEM;
struct file *file;
struct inode *inode;
- struct dentry *dentry, *root;
+ struct path path;
+ struct dentry *root;
struct qstr quick_string;
*user = NULL;
@@ -944,10 +944,11 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
quick_string.name = name;
quick_string.len = strlen(quick_string.name);
quick_string.hash = 0;
- dentry = d_alloc(root, &quick_string);
- if (!dentry)
+ path.dentry = d_alloc(root, &quick_string);
+ if (!path.dentry)
goto out_shm_unlock;
+ path.mnt = mntget(hugetlbfs_vfsmount);
error = -ENOSPC;
inode = hugetlbfs_get_inode(root->d_sb, current_fsuid(),
current_fsgid(), S_IFREG | S_IRWXUGO, 0);
@@ -960,24 +961,22 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
acctflag))
goto out_inode;
- d_instantiate(dentry, inode);
+ d_instantiate(path.dentry, inode);
inode->i_size = size;
inode->i_nlink = 0;
error = -ENFILE;
- file = alloc_file(hugetlbfs_vfsmount, dentry,
- FMODE_WRITE | FMODE_READ,
+ file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
&hugetlbfs_file_operations);
if (!file)
goto out_dentry; /* inode is already attached */
- ima_counts_get(file);
return file;
out_inode:
iput(inode);
out_dentry:
- dput(dentry);
+ path_put(&path);
out_shm_unlock:
if (*user) {
user_shm_unlock(size, *user);
diff --git a/fs/inode.c b/fs/inode.c
index 06c1f02de61..03dfeb2e392 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -113,7 +113,7 @@ static void wake_up_inode(struct inode *inode)
* Prevent speculative execution through spin_unlock(&inode_lock);
*/
smp_mb();
- wake_up_bit(&inode->i_state, __I_LOCK);
+ wake_up_bit(&inode->i_state, __I_NEW);
}
/**
@@ -690,17 +690,17 @@ void unlock_new_inode(struct inode *inode)
}
#endif
/*
- * This is special! We do not need the spinlock when clearing I_LOCK,
+ * This is special! We do not need the spinlock when clearing I_NEW,
* because we're guaranteed that nobody else tries to do anything about
* the state of the inode when it is locked, as we just created it (so
- * there can be no old holders that haven't tested I_LOCK).
+ * there can be no old holders that haven't tested I_NEW).
* However we must emit the memory barrier so that other CPUs reliably
- * see the clearing of I_LOCK after the other inode initialisation has
+ * see the clearing of I_NEW after the other inode initialisation has
* completed.
*/
smp_mb();
- WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW));
- inode->i_state &= ~(I_LOCK|I_NEW);
+ WARN_ON(!(inode->i_state & I_NEW));
+ inode->i_state &= ~I_NEW;
wake_up_inode(inode);
}
EXPORT_SYMBOL(unlock_new_inode);
@@ -731,7 +731,7 @@ static struct inode *get_new_inode(struct super_block *sb,
goto set_failed;
__inode_add_to_lists(sb, head, inode);
- inode->i_state = I_LOCK|I_NEW;
+ inode->i_state = I_NEW;
spin_unlock(&inode_lock);
/* Return the locked inode with I_NEW set, the
@@ -778,7 +778,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
if (!old) {
inode->i_ino = ino;
__inode_add_to_lists(sb, head, inode);
- inode->i_state = I_LOCK|I_NEW;
+ inode->i_state = I_NEW;
spin_unlock(&inode_lock);
/* Return the locked inode with I_NEW set, the
@@ -1083,7 +1083,7 @@ int insert_inode_locked(struct inode *inode)
ino_t ino = inode->i_ino;
struct hlist_head *head = inode_hashtable + hash(sb, ino);
- inode->i_state |= I_LOCK|I_NEW;
+ inode->i_state |= I_NEW;
while (1) {
struct hlist_node *node;
struct inode *old = NULL;
@@ -1120,7 +1120,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
struct super_block *sb = inode->i_sb;
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
- inode->i_state |= I_LOCK|I_NEW;
+ inode->i_state |= I_NEW;
while (1) {
struct hlist_node *node;
@@ -1510,7 +1510,7 @@ EXPORT_SYMBOL(inode_wait);
* until the deletion _might_ have completed. Callers are responsible
* to recheck inode state.
*
- * It doesn't matter if I_LOCK is not set initially, a call to
+ * It doesn't matter if I_NEW is not set initially, a call to
* wake_up_inode() after removing from the hash list will DTRT.
*
* This is called with inode_lock held.
@@ -1518,8 +1518,8 @@ EXPORT_SYMBOL(inode_wait);
static void __wait_on_freeing_inode(struct inode *inode)
{
wait_queue_head_t *wq;
- DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK);
- wq = bit_waitqueue(&inode->i_state, __I_LOCK);
+ DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
+ wq = bit_waitqueue(&inode->i_state, __I_NEW);
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&inode_lock);
schedule();
diff --git a/fs/internal.h b/fs/internal.h
index 515175b8b72..e96a1667d74 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -79,8 +79,16 @@ extern void chroot_fs_refs(struct path *, struct path *);
* file_table.c
*/
extern void mark_files_ro(struct super_block *);
+extern struct file *get_empty_filp(void);
/*
* super.c
*/
extern int do_remount_sb(struct super_block *, int, void *, int);
+
+/*
+ * open.c
+ */
+struct nameidata;
+extern struct file *nameidata_to_filp(struct nameidata *);
+extern void release_open_intent(struct nameidata *);
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index defb932eee9..0b3fa7974fa 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -36,286 +36,323 @@ static void *zisofs_zlib_workspace;
static DEFINE_MUTEX(zisofs_zlib_lock);
/*
- * When decompressing, we typically obtain more than one page
- * per reference. We inject the additional pages into the page
- * cache as a form of readahead.
+ * Read data of @inode from @block_start to @block_end and uncompress
+ * to one zisofs block. Store the data in the @pages array with @pcount
+ * entries. Start storing at offset @poffset of the first page.
*/
-static int zisofs_readpage(struct file *file, struct page *page)
+static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
+ loff_t block_end, int pcount,
+ struct page **pages, unsigned poffset,
+ int *errp)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- struct address_space *mapping = inode->i_mapping;
- unsigned int maxpage, xpage, fpage, blockindex;
- unsigned long offset;
- unsigned long blockptr, blockendptr, cstart, cend, csize;
- struct buffer_head *bh, *ptrbh[2];
- unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
- unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
- unsigned long bufmask = bufsize - 1;
- int err = -EIO;
- int i;
- unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
- /* unsigned long zisofs_block_size = 1UL << zisofs_block_shift; */
- unsigned int zisofs_block_page_shift = zisofs_block_shift-PAGE_CACHE_SHIFT;
- unsigned long zisofs_block_pages = 1UL << zisofs_block_page_shift;
- unsigned long zisofs_block_page_mask = zisofs_block_pages-1;
- struct page *pages[zisofs_block_pages];
- unsigned long index = page->index;
- int indexblocks;
-
- /* We have already been given one page, this is the one
- we must do. */
- xpage = index & zisofs_block_page_mask;
- pages[xpage] = page;
-
- /* The remaining pages need to be allocated and inserted */
- offset = index & ~zisofs_block_page_mask;
- blockindex = offset >> zisofs_block_page_shift;
- maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
- /*
- * If this page is wholly outside i_size we just return zero;
- * do_generic_file_read() will handle this for us
- */
- if (page->index >= maxpage) {
- SetPageUptodate(page);
- unlock_page(page);
+ unsigned int bufsize = ISOFS_BUFFER_SIZE(inode);
+ unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
+ unsigned int bufmask = bufsize - 1;
+ int i, block_size = block_end - block_start;
+ z_stream stream = { .total_out = 0,
+ .avail_in = 0,
+ .avail_out = 0, };
+ int zerr;
+ int needblocks = (block_size + (block_start & bufmask) + bufmask)
+ >> bufshift;
+ int haveblocks;
+ blkcnt_t blocknum;
+ struct buffer_head *bhs[needblocks + 1];
+ int curbh, curpage;
+
+ if (block_size > deflateBound(1UL << zisofs_block_shift)) {
+ *errp = -EIO;
return 0;
}
-
- maxpage = min(zisofs_block_pages, maxpage-offset);
-
- for ( i = 0 ; i < maxpage ; i++, offset++ ) {
- if ( i != xpage ) {
- pages[i] = grab_cache_page_nowait(mapping, offset);
- }
- page = pages[i];
- if ( page ) {
- ClearPageError(page);
- kmap(page);
+ /* Empty block? */
+ if (block_size == 0) {
+ for ( i = 0 ; i < pcount ; i++ ) {
+ if (!pages[i])
+ continue;
+ memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE);
+ flush_dcache_page(pages[i]);
+ SetPageUptodate(pages[i]);
}
+ return ((loff_t)pcount) << PAGE_CACHE_SHIFT;
}
- /* This is the last page filled, plus one; used in case of abort. */
- fpage = 0;
+ /* Because zlib is not thread-safe, do all the I/O at the top. */
+ blocknum = block_start >> bufshift;
+ memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+ haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
+ ll_rw_block(READ, haveblocks, bhs);
- /* Find the pointer to this specific chunk */
- /* Note: we're not using isonum_731() here because the data is known aligned */
- /* Note: header_size is in 32-bit words (4 bytes) */
- blockptr = (header_size + blockindex) << 2;
- blockendptr = blockptr + 4;
+ curbh = 0;
+ curpage = 0;
+ /*
+ * First block is special since it may be fractional. We also wait for
+ * it before grabbing the zlib mutex; odds are that the subsequent
+ * blocks are going to come in in short order so we don't hold the zlib
+ * mutex longer than necessary.
+ */
- indexblocks = ((blockptr^blockendptr) >> bufshift) ? 2 : 1;
- ptrbh[0] = ptrbh[1] = NULL;
+ if (!bhs[0])
+ goto b_eio;
- if ( isofs_get_blocks(inode, blockptr >> bufshift, ptrbh, indexblocks) != indexblocks ) {
- if ( ptrbh[0] ) brelse(ptrbh[0]);
- printk(KERN_DEBUG "zisofs: Null buffer on reading block table, inode = %lu, block = %lu\n",
- inode->i_ino, blockptr >> bufshift);
- goto eio;
- }
- ll_rw_block(READ, indexblocks, ptrbh);
-
- bh = ptrbh[0];
- if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
- printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
- inode->i_ino, blockptr >> bufshift);
- if ( ptrbh[1] )
- brelse(ptrbh[1]);
- goto eio;
- }
- cstart = le32_to_cpu(*(__le32 *)(bh->b_data + (blockptr & bufmask)));
-
- if ( indexblocks == 2 ) {
- /* We just crossed a block boundary. Switch to the next block */
- brelse(bh);
- bh = ptrbh[1];
- if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
- printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
- inode->i_ino, blockendptr >> bufshift);
- goto eio;
- }
+ wait_on_buffer(bhs[0]);
+ if (!buffer_uptodate(bhs[0])) {
+ *errp = -EIO;
+ goto b_eio;
}
- cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
- brelse(bh);
- if (cstart > cend)
- goto eio;
+ stream.workspace = zisofs_zlib_workspace;
+ mutex_lock(&zisofs_zlib_lock);
- csize = cend-cstart;
-
- if (csize > deflateBound(1UL << zisofs_block_shift))
- goto eio;
-
- /* Now page[] contains an array of pages, any of which can be NULL,
- and the locks on which we hold. We should now read the data and
- release the pages. If the pages are NULL the decompressed data
- for that particular page should be discarded. */
-
- if ( csize == 0 ) {
- /* This data block is empty. */
-
- for ( fpage = 0 ; fpage < maxpage ; fpage++ ) {
- if ( (page = pages[fpage]) != NULL ) {
- memset(page_address(page), 0, PAGE_CACHE_SIZE);
-
- flush_dcache_page(page);
- SetPageUptodate(page);
- kunmap(page);
- unlock_page(page);
- if ( fpage == xpage )
- err = 0; /* The critical page */
- else
- page_cache_release(page);
+ zerr = zlib_inflateInit(&stream);
+ if (zerr != Z_OK) {
+ if (zerr == Z_MEM_ERROR)
+ *errp = -ENOMEM;
+ else
+ *errp = -EIO;
+ printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
+ zerr);
+ goto z_eio;
+ }
+
+ while (curpage < pcount && curbh < haveblocks &&
+ zerr != Z_STREAM_END) {
+ if (!stream.avail_out) {
+ if (pages[curpage]) {
+ stream.next_out = page_address(pages[curpage])
+ + poffset;
+ stream.avail_out = PAGE_CACHE_SIZE - poffset;
+ poffset = 0;
+ } else {
+ stream.next_out = (void *)&zisofs_sink_page;
+ stream.avail_out = PAGE_CACHE_SIZE;
}
}
- } else {
- /* This data block is compressed. */
- z_stream stream;
- int bail = 0, left_out = -1;
- int zerr;
- int needblocks = (csize + (cstart & bufmask) + bufmask) >> bufshift;
- int haveblocks;
- struct buffer_head *bhs[needblocks+1];
- struct buffer_head **bhptr;
-
- /* Because zlib is not thread-safe, do all the I/O at the top. */
-
- blockptr = cstart >> bufshift;
- memset(bhs, 0, (needblocks+1)*sizeof(struct buffer_head *));
- haveblocks = isofs_get_blocks(inode, blockptr, bhs, needblocks);
- ll_rw_block(READ, haveblocks, bhs);
-
- bhptr = &bhs[0];
- bh = *bhptr++;
-
- /* First block is special since it may be fractional.
- We also wait for it before grabbing the zlib
- mutex; odds are that the subsequent blocks are
- going to come in in short order so we don't hold
- the zlib mutex longer than necessary. */
-
- if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
- printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
- fpage, xpage, csize);
- goto b_eio;
- }
- stream.next_in = bh->b_data + (cstart & bufmask);
- stream.avail_in = min(bufsize-(cstart & bufmask), csize);
- csize -= stream.avail_in;
-
- stream.workspace = zisofs_zlib_workspace;
- mutex_lock(&zisofs_zlib_lock);
-
- zerr = zlib_inflateInit(&stream);
- if ( zerr != Z_OK ) {
- if ( err && zerr == Z_MEM_ERROR )
- err = -ENOMEM;
- printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
- zerr);
- goto z_eio;
+ if (!stream.avail_in) {
+ wait_on_buffer(bhs[curbh]);
+ if (!buffer_uptodate(bhs[curbh])) {
+ *errp = -EIO;
+ break;
+ }
+ stream.next_in = bhs[curbh]->b_data +
+ (block_start & bufmask);
+ stream.avail_in = min_t(unsigned, bufsize -
+ (block_start & bufmask),
+ block_size);
+ block_size -= stream.avail_in;
+ block_start = 0;
}
- while ( !bail && fpage < maxpage ) {
- page = pages[fpage];
- if ( page )
- stream.next_out = page_address(page);
- else
- stream.next_out = (void *)&zisofs_sink_page;
- stream.avail_out = PAGE_CACHE_SIZE;
-
- while ( stream.avail_out ) {
- int ao, ai;
- if ( stream.avail_in == 0 && left_out ) {
- if ( !csize ) {
- printk(KERN_WARNING "zisofs: ZF read beyond end of input\n");
- bail = 1;
- break;
- } else {
- bh = *bhptr++;
- if ( !bh ||
- (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
- /* Reached an EIO */
- printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
- fpage, xpage, csize);
-
- bail = 1;
- break;
- }
- stream.next_in = bh->b_data;
- stream.avail_in = min(csize,bufsize);
- csize -= stream.avail_in;
- }
- }
- ao = stream.avail_out; ai = stream.avail_in;
- zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
- left_out = stream.avail_out;
- if ( zerr == Z_BUF_ERROR && stream.avail_in == 0 )
- continue;
- if ( zerr != Z_OK ) {
- /* EOF, error, or trying to read beyond end of input */
- if ( err && zerr == Z_MEM_ERROR )
- err = -ENOMEM;
- if ( zerr != Z_STREAM_END )
- printk(KERN_DEBUG "zisofs: zisofs_inflate returned %d, inode = %lu, index = %lu, fpage = %d, xpage = %d, avail_in = %d, avail_out = %d, ai = %d, ao = %d\n",
- zerr, inode->i_ino, index,
- fpage, xpage,
- stream.avail_in, stream.avail_out,
- ai, ao);
- bail = 1;
- break;
+ while (stream.avail_out && stream.avail_in) {
+ zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
+ if (zerr == Z_BUF_ERROR && stream.avail_in == 0)
+ break;
+ if (zerr == Z_STREAM_END)
+ break;
+ if (zerr != Z_OK) {
+ /* EOF, error, or trying to read beyond end of input */
+ if (zerr == Z_MEM_ERROR)
+ *errp = -ENOMEM;
+ else {
+ printk(KERN_DEBUG
+ "zisofs: zisofs_inflate returned"
+ " %d, inode = %lu,"
+ " page idx = %d, bh idx = %d,"
+ " avail_in = %d,"
+ " avail_out = %d\n",
+ zerr, inode->i_ino, curpage,
+ curbh, stream.avail_in,
+ stream.avail_out);
+ *errp = -EIO;
}
+ goto inflate_out;
}
+ }
- if ( stream.avail_out && zerr == Z_STREAM_END ) {
- /* Fractional page written before EOF. This may
- be the last page in the file. */
- memset(stream.next_out, 0, stream.avail_out);
- stream.avail_out = 0;
+ if (!stream.avail_out) {
+ /* This page completed */
+ if (pages[curpage]) {
+ flush_dcache_page(pages[curpage]);
+ SetPageUptodate(pages[curpage]);
}
+ curpage++;
+ }
+ if (!stream.avail_in)
+ curbh++;
+ }
+inflate_out:
+ zlib_inflateEnd(&stream);
- if ( !stream.avail_out ) {
- /* This page completed */
- if ( page ) {
- flush_dcache_page(page);
- SetPageUptodate(page);
- kunmap(page);
- unlock_page(page);
- if ( fpage == xpage )
- err = 0; /* The critical page */
- else
- page_cache_release(page);
- }
- fpage++;
- }
+z_eio:
+ mutex_unlock(&zisofs_zlib_lock);
+
+b_eio:
+ for (i = 0; i < haveblocks; i++)
+ brelse(bhs[i]);
+ return stream.total_out;
+}
+
+/*
+ * Uncompress data so that pages[full_page] is fully uptodate and possibly
+ * fills in other pages if we have data for them.
+ */
+static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
+ struct page **pages)
+{
+ loff_t start_off, end_off;
+ loff_t block_start, block_end;
+ unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
+ unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
+ unsigned int blockptr;
+ loff_t poffset = 0;
+ blkcnt_t cstart_block, cend_block;
+ struct buffer_head *bh;
+ unsigned int blkbits = ISOFS_BUFFER_BITS(inode);
+ unsigned int blksize = 1 << blkbits;
+ int err;
+ loff_t ret;
+
+ BUG_ON(!pages[full_page]);
+
+ /*
+ * We want to read at least 'full_page' page. Because we have to
+ * uncompress the whole compression block anyway, fill the surrounding
+ * pages with the data we have anyway...
+ */
+ start_off = page_offset(pages[full_page]);
+ end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size);
+
+ cstart_block = start_off >> zisofs_block_shift;
+ cend_block = (end_off + (1 << zisofs_block_shift) - 1)
+ >> zisofs_block_shift;
+
+ WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) !=
+ ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK));
+
+ /* Find the pointer to this specific chunk */
+ /* Note: we're not using isonum_731() here because the data is known aligned */
+ /* Note: header_size is in 32-bit words (4 bytes) */
+ blockptr = (header_size + cstart_block) << 2;
+ bh = isofs_bread(inode, blockptr >> blkbits);
+ if (!bh)
+ return -EIO;
+ block_start = le32_to_cpu(*(__le32 *)
+ (bh->b_data + (blockptr & (blksize - 1))));
+
+ while (cstart_block < cend_block && pcount > 0) {
+ /* Load end of the compressed block in the file */
+ blockptr += 4;
+ /* Traversed to next block? */
+ if (!(blockptr & (blksize - 1))) {
+ brelse(bh);
+
+ bh = isofs_bread(inode, blockptr >> blkbits);
+ if (!bh)
+ return -EIO;
+ }
+ block_end = le32_to_cpu(*(__le32 *)
+ (bh->b_data + (blockptr & (blksize - 1))));
+ if (block_start > block_end) {
+ brelse(bh);
+ return -EIO;
+ }
+ err = 0;
+ ret = zisofs_uncompress_block(inode, block_start, block_end,
+ pcount, pages, poffset, &err);
+ poffset += ret;
+ pages += poffset >> PAGE_CACHE_SHIFT;
+ pcount -= poffset >> PAGE_CACHE_SHIFT;
+ full_page -= poffset >> PAGE_CACHE_SHIFT;
+ poffset &= ~PAGE_CACHE_MASK;
+
+ if (err) {
+ brelse(bh);
+ /*
+ * Did we finish reading the page we really wanted
+ * to read?
+ */
+ if (full_page < 0)
+ return 0;
+ return err;
}
- zlib_inflateEnd(&stream);
- z_eio:
- mutex_unlock(&zisofs_zlib_lock);
+ block_start = block_end;
+ cstart_block++;
+ }
+
+ if (poffset && *pages) {
+ memset(page_address(*pages) + poffset, 0,
+ PAGE_CACHE_SIZE - poffset);
+ flush_dcache_page(*pages);
+ SetPageUptodate(*pages);
+ }
+ return 0;
+}
- b_eio:
- for ( i = 0 ; i < haveblocks ; i++ ) {
- if ( bhs[i] )
- brelse(bhs[i]);
+/*
+ * When decompressing, we typically obtain more than one page
+ * per reference. We inject the additional pages into the page
+ * cache as a form of readahead.
+ */
+static int zisofs_readpage(struct file *file, struct page *page)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct address_space *mapping = inode->i_mapping;
+ int err;
+ int i, pcount, full_page;
+ unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
+ unsigned int zisofs_pages_per_cblock =
+ PAGE_CACHE_SHIFT <= zisofs_block_shift ?
+ (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0;
+ struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+ pgoff_t index = page->index, end_index;
+
+ end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ /*
+ * If this page is wholly outside i_size we just return zero;
+ * do_generic_file_read() will handle this for us
+ */
+ if (index >= end_index) {
+ SetPageUptodate(page);
+ unlock_page(page);
+ return 0;
+ }
+
+ if (PAGE_CACHE_SHIFT <= zisofs_block_shift) {
+ /* We have already been given one page, this is the one
+ we must do. */
+ full_page = index & (zisofs_pages_per_cblock - 1);
+ pcount = min_t(int, zisofs_pages_per_cblock,
+ end_index - (index & ~(zisofs_pages_per_cblock - 1)));
+ index -= full_page;
+ } else {
+ full_page = 0;
+ pcount = 1;
+ }
+ pages[full_page] = page;
+
+ for (i = 0; i < pcount; i++, index++) {
+ if (i != full_page)
+ pages[i] = grab_cache_page_nowait(mapping, index);
+ if (pages[i]) {
+ ClearPageError(pages[i]);
+ kmap(pages[i]);
}
}
-eio:
+ err = zisofs_fill_pages(inode, full_page, pcount, pages);
/* Release any residual pages, do not SetPageUptodate */
- while ( fpage < maxpage ) {
- page = pages[fpage];
- if ( page ) {
- flush_dcache_page(page);
- if ( fpage == xpage )
- SetPageError(page);
- kunmap(page);
- unlock_page(page);
- if ( fpage != xpage )
- page_cache_release(page);
+ for (i = 0; i < pcount; i++) {
+ if (pages[i]) {
+ flush_dcache_page(pages[i]);
+ if (i == full_page && err)
+ SetPageError(pages[i]);
+ kunmap(pages[i]);
+ unlock_page(pages[i]);
+ if (i != full_page)
+ page_cache_release(pages[i]);
}
- fpage++;
}
/* At this point, err contains 0 or -EIO depending on the "critical" page */
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index e81a30593ba..ed752cb3847 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -9,7 +9,7 @@
*
* The following files are helpful:
*
- * Documentation/filesystems/Exporting
+ * Documentation/filesystems/nfs/Exporting
* fs/exportfs/expfs.c.
*/
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index c2fb2dd0131..96a685c550f 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -518,8 +518,7 @@ repeat:
if (algo == SIG('p', 'z')) {
int block_shift =
isonum_711(&rr->u.ZF.parms[1]);
- if (block_shift < PAGE_CACHE_SHIFT
- || block_shift > 17) {
+ if (block_shift > 17) {
printk(KERN_WARNING "isofs: "
"Can't handle ZF block "
"size of 2^%d\n",
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 8896c1d4feb..6a10238d2c6 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -286,7 +286,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
if (err) {
/*
* Because AS_EIO is cleared by
- * wait_on_page_writeback_range(), set it again so
+ * filemap_fdatawait_range(), set it again so
* that user process can get -EIO from fsync().
*/
set_bit(AS_EIO,
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 7edb62e9741..7cdc3196476 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -350,8 +350,8 @@ int jffs2_acl_chmod(struct inode *inode)
return rc;
}
-static size_t jffs2_acl_access_listxattr(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+static size_t jffs2_acl_access_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
{
const int retlen = sizeof(POSIX_ACL_XATTR_ACCESS);
@@ -360,8 +360,8 @@ static size_t jffs2_acl_access_listxattr(struct inode *inode, char *list, size_t
return retlen;
}
-static size_t jffs2_acl_default_listxattr(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+static size_t jffs2_acl_default_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
{
const int retlen = sizeof(POSIX_ACL_XATTR_DEFAULT);
@@ -370,12 +370,16 @@ static size_t jffs2_acl_default_listxattr(struct inode *inode, char *list, size_
return retlen;
}
-static int jffs2_acl_getxattr(struct inode *inode, int type, void *buffer, size_t size)
+static int jffs2_acl_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
struct posix_acl *acl;
int rc;
- acl = jffs2_get_acl(inode, type);
+ if (name[0] != '\0')
+ return -EINVAL;
+
+ acl = jffs2_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (!acl)
@@ -386,26 +390,15 @@ static int jffs2_acl_getxattr(struct inode *inode, int type, void *buffer, size_
return rc;
}
-static int jffs2_acl_access_getxattr(struct inode *inode, const char *name, void *buffer, size_t size)
-{
- if (name[0] != '\0')
- return -EINVAL;
- return jffs2_acl_getxattr(inode, ACL_TYPE_ACCESS, buffer, size);
-}
-
-static int jffs2_acl_default_getxattr(struct inode *inode, const char *name, void *buffer, size_t size)
-{
- if (name[0] != '\0')
- return -EINVAL;
- return jffs2_acl_getxattr(inode, ACL_TYPE_DEFAULT, buffer, size);
-}
-
-static int jffs2_acl_setxattr(struct inode *inode, int type, const void *value, size_t size)
+static int jffs2_acl_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
struct posix_acl *acl;
int rc;
- if (!is_owner_or_cap(inode))
+ if (name[0] != '\0')
+ return -EINVAL;
+ if (!is_owner_or_cap(dentry->d_inode))
return -EPERM;
if (value) {
@@ -420,38 +413,24 @@ static int jffs2_acl_setxattr(struct inode *inode, int type, const void *value,
} else {
acl = NULL;
}
- rc = jffs2_set_acl(inode, type, acl);
+ rc = jffs2_set_acl(dentry->d_inode, type, acl);
out:
posix_acl_release(acl);
return rc;
}
-static int jffs2_acl_access_setxattr(struct inode *inode, const char *name,
- const void *buffer, size_t size, int flags)
-{
- if (name[0] != '\0')
- return -EINVAL;
- return jffs2_acl_setxattr(inode, ACL_TYPE_ACCESS, buffer, size);
-}
-
-static int jffs2_acl_default_setxattr(struct inode *inode, const char *name,
- const void *buffer, size_t size, int flags)
-{
- if (name[0] != '\0')
- return -EINVAL;
- return jffs2_acl_setxattr(inode, ACL_TYPE_DEFAULT, buffer, size);
-}
-
struct xattr_handler jffs2_acl_access_xattr_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_DEFAULT,
.list = jffs2_acl_access_listxattr,
- .get = jffs2_acl_access_getxattr,
- .set = jffs2_acl_access_setxattr,
+ .get = jffs2_acl_getxattr,
+ .set = jffs2_acl_setxattr,
};
struct xattr_handler jffs2_acl_default_xattr_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
.list = jffs2_acl_default_listxattr,
- .get = jffs2_acl_default_getxattr,
- .set = jffs2_acl_default_setxattr,
+ .get = jffs2_acl_getxattr,
+ .set = jffs2_acl_setxattr,
};
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 090c556ffed..3b6f2fa12cf 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -700,7 +700,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
struct jffs2_raw_inode ri;
struct jffs2_node_frag *last_frag;
union jffs2_device_node dev;
- char *mdata = NULL, mdatalen = 0;
+ char *mdata = NULL;
+ int mdatalen = 0;
uint32_t alloclen, ilen;
int ret;
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 378991cfe40..e22de8397b7 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1284,7 +1284,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
f->target = NULL;
mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
- return -ret;
+ return ret;
}
f->target[je32_to_cpu(latest_node->csize)] = '\0';
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 02c39c64ecb..eaccee05858 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -44,26 +44,28 @@ int jffs2_init_security(struct inode *inode, struct inode *dir)
}
/* ---- XATTR Handler for "security.*" ----------------- */
-static int jffs2_security_getxattr(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int jffs2_security_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size);
+ return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_SECURITY,
+ name, buffer, size);
}
-static int jffs2_security_setxattr(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags)
+static int jffs2_security_setxattr(struct dentry *dentry, const char *name,
+ const void *buffer, size_t size, int flags, int type)
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size, flags);
+ return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_SECURITY,
+ name, buffer, size, flags);
}
-static size_t jffs2_security_listxattr(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+static size_t jffs2_security_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
{
size_t retlen = XATTR_SECURITY_PREFIX_LEN + name_len + 1;
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 6caf1e1ee26..800171dca53 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -23,7 +23,7 @@
int jffs2_sum_init(struct jffs2_sb_info *c)
{
- uint32_t sum_size = max_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE);
+ uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE);
c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 4b107881acd..9e75c62c85d 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -990,9 +990,11 @@ ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
if (!xhandle)
continue;
if (buffer) {
- rc = xhandle->list(inode, buffer+len, size-len, xd->xname, xd->name_len);
+ rc = xhandle->list(dentry, buffer+len, size-len,
+ xd->xname, xd->name_len, xd->flags);
} else {
- rc = xhandle->list(inode, NULL, 0, xd->xname, xd->name_len);
+ rc = xhandle->list(dentry, NULL, 0, xd->xname,
+ xd->name_len, xd->flags);
}
if (rc < 0)
goto out;
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index 8ec5765ef34..3e5a5e356e0 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -16,24 +16,26 @@
#include <linux/mtd/mtd.h>
#include "nodelist.h"
-static int jffs2_trusted_getxattr(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int jffs2_trusted_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size);
+ return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED,
+ name, buffer, size);
}
-static int jffs2_trusted_setxattr(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags)
+static int jffs2_trusted_setxattr(struct dentry *dentry, const char *name,
+ const void *buffer, size_t size, int flags, int type)
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags);
+ return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED,
+ name, buffer, size, flags);
}
-static size_t jffs2_trusted_listxattr(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+static size_t jffs2_trusted_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
{
size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1;
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index 8bbeab90ada..8544af67dff 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -16,24 +16,26 @@
#include <linux/mtd/mtd.h>
#include "nodelist.h"
-static int jffs2_user_getxattr(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int jffs2_user_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size);
+ return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_USER,
+ name, buffer, size);
}
-static int jffs2_user_setxattr(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags)
+static int jffs2_user_setxattr(struct dentry *dentry, const char *name,
+ const void *buffer, size_t size, int flags, int type)
{
if (!strcmp(name, ""))
return -EINVAL;
- return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size, flags);
+ return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_USER,
+ name, buffer, size, flags);
}
-static size_t jffs2_user_listxattr(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+static size_t jffs2_user_listxattr(struct dentry *dentry, char *list,
+ size_t list_size, const char *name, size_t name_len, int type)
{
size_t retlen = XATTR_USER_PREFIX_LEN + name_len + 1;
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index f26e4d03ada..d945ea76b44 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -1292,7 +1292,7 @@ int txCommit(tid_t tid, /* transaction identifier */
*/
/*
* I believe this code is no longer needed. Splitting I_LOCK
- * into two bits, I_LOCK and I_SYNC should prevent this
+ * into two bits, I_NEW and I_SYNC should prevent this
* deadlock as well. But since I don't have a JFS testload
* to verify this, only a trivial s/I_LOCK/I_SYNC/ was done.
* Joern
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 2234c73fc57..d929a822a74 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -524,7 +524,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
* Page cache is indexed by long.
* I would use MAX_LFS_FILESIZE, but it's only half as big
*/
- sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
+ sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes);
#endif
sb->s_time_gran = 1;
return 0;
diff --git a/fs/libfs.c b/fs/libfs.c
index 219576c52d8..6e8d17e1dc4 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -848,7 +848,6 @@ EXPORT_SYMBOL(simple_write_end);
EXPORT_SYMBOL(simple_dir_inode_operations);
EXPORT_SYMBOL(simple_dir_operations);
EXPORT_SYMBOL(simple_empty);
-EXPORT_SYMBOL(d_alloc_name);
EXPORT_SYMBOL(simple_fill_super);
EXPORT_SYMBOL(simple_getattr);
EXPORT_SYMBOL(simple_link);
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index bd173a6ca3b..a7966eed3c1 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -11,10 +11,6 @@
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
-#include <linux/in.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/nfsd/nfsd.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index e1d28ddd216..56c9519d900 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -11,10 +11,6 @@
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
-#include <linux/in.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/nfsd/nfsd.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
diff --git a/fs/namei.c b/fs/namei.c
index d11f404667e..68921d9b530 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -35,7 +35,7 @@
#include <linux/fs_struct.h>
#include <asm/uaccess.h>
-#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
+#include "internal.h"
/* [Feb-1997 T. Schoebel-Theuer]
* Fundamental changes in the pathname lookup mechanisms (namei)
@@ -108,8 +108,6 @@
* any extra contention...
*/
-static int __link_path_walk(const char *name, struct nameidata *nd);
-
/* In order to reduce some races, while at the same time doing additional
* checking and hopefully speeding things up, we copy filenames to the
* kernel data space before using them..
@@ -414,36 +412,55 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd)
}
/*
- * Internal lookup() using the new generic dcache.
- * SMP-safe
+ * force_reval_path - force revalidation of a dentry
+ *
+ * In some situations the path walking code will trust dentries without
+ * revalidating them. This causes problems for filesystems that depend on
+ * d_revalidate to handle file opens (e.g. NFSv4). When FS_REVAL_DOT is set
+ * (which indicates that it's possible for the dentry to go stale), force
+ * a d_revalidate call before proceeding.
+ *
+ * Returns 0 if the revalidation was successful. If the revalidation fails,
+ * either return the error returned by d_revalidate or -ESTALE if the
+ * revalidation it just returned 0. If d_revalidate returns 0, we attempt to
+ * invalidate the dentry. It's up to the caller to handle putting references
+ * to the path if necessary.
*/
-static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name, struct nameidata *nd)
+static int
+force_reval_path(struct path *path, struct nameidata *nd)
{
- struct dentry * dentry = __d_lookup(parent, name);
+ int status;
+ struct dentry *dentry = path->dentry;
- /* lockess __d_lookup may fail due to concurrent d_move()
- * in some unrelated directory, so try with d_lookup
+ /*
+ * only check on filesystems where it's possible for the dentry to
+ * become stale. It's assumed that if this flag is set then the
+ * d_revalidate op will also be defined.
*/
- if (!dentry)
- dentry = d_lookup(parent, name);
+ if (!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT))
+ return 0;
- if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
- dentry = do_revalidate(dentry, nd);
+ status = dentry->d_op->d_revalidate(dentry, nd);
+ if (status > 0)
+ return 0;
- return dentry;
+ if (!status) {
+ d_invalidate(dentry);
+ status = -ESTALE;
+ }
+ return status;
}
/*
- * Short-cut version of permission(), for calling by
- * path_walk(), when dcache lock is held. Combines parts
- * of permission() and generic_permission(), and tests ONLY for
- * MAY_EXEC permission.
+ * Short-cut version of permission(), for calling on directories
+ * during pathname resolution. Combines parts of permission()
+ * and generic_permission(), and tests ONLY for MAY_EXEC permission.
*
* If appropriate, check DAC only. If not appropriate, or
- * short-cut DAC fails, then call permission() to do more
+ * short-cut DAC fails, then call ->permission() to do more
* complete permission check.
*/
-static int exec_permission_lite(struct inode *inode)
+static int exec_permission(struct inode *inode)
{
int ret;
@@ -465,99 +482,6 @@ ok:
return security_inode_permission(inode, MAY_EXEC);
}
-/*
- * This is called when everything else fails, and we actually have
- * to go to the low-level filesystem to find out what we should do..
- *
- * We get the directory semaphore, and after getting that we also
- * make sure that nobody added the entry to the dcache in the meantime..
- * SMP-safe
- */
-static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, struct nameidata *nd)
-{
- struct dentry * result;
- struct inode *dir = parent->d_inode;
-
- mutex_lock(&dir->i_mutex);
- /*
- * First re-do the cached lookup just in case it was created
- * while we waited for the directory semaphore..
- *
- * FIXME! This could use version numbering or similar to
- * avoid unnecessary cache lookups.
- *
- * The "dcache_lock" is purely to protect the RCU list walker
- * from concurrent renames at this point (we mustn't get false
- * negatives from the RCU list walk here, unlike the optimistic
- * fast walk).
- *
- * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
- */
- result = d_lookup(parent, name);
- if (!result) {
- struct dentry *dentry;
-
- /* Don't create child dentry for a dead directory. */
- result = ERR_PTR(-ENOENT);
- if (IS_DEADDIR(dir))
- goto out_unlock;
-
- dentry = d_alloc(parent, name);
- result = ERR_PTR(-ENOMEM);
- if (dentry) {
- result = dir->i_op->lookup(dir, dentry, nd);
- if (result)
- dput(dentry);
- else
- result = dentry;
- }
-out_unlock:
- mutex_unlock(&dir->i_mutex);
- return result;
- }
-
- /*
- * Uhhuh! Nasty case: the cache was re-populated while
- * we waited on the semaphore. Need to revalidate.
- */
- mutex_unlock(&dir->i_mutex);
- if (result->d_op && result->d_op->d_revalidate) {
- result = do_revalidate(result, nd);
- if (!result)
- result = ERR_PTR(-ENOENT);
- }
- return result;
-}
-
-/*
- * Wrapper to retry pathname resolution whenever the underlying
- * file system returns an ESTALE.
- *
- * Retry the whole path once, forcing real lookup requests
- * instead of relying on the dcache.
- */
-static __always_inline int link_path_walk(const char *name, struct nameidata *nd)
-{
- struct path save = nd->path;
- int result;
-
- /* make sure the stuff we saved doesn't go away */
- path_get(&save);
-
- result = __link_path_walk(name, nd);
- if (result == -ESTALE) {
- /* nd->path had been dropped */
- nd->path = save;
- path_get(&nd->path);
- nd->flags |= LOOKUP_REVAL;
- result = __link_path_walk(name, nd);
- }
-
- path_put(&save);
-
- return result;
-}
-
static __always_inline void set_root(struct nameidata *nd)
{
if (!nd->root.mnt) {
@@ -569,6 +493,8 @@ static __always_inline void set_root(struct nameidata *nd)
}
}
+static int link_path_walk(const char *, struct nameidata *);
+
static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link)
{
int res = 0;
@@ -641,11 +567,14 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
error = 0;
if (s)
error = __vfs_follow_link(nd, s);
+ else if (nd->last_type == LAST_BIND) {
+ error = force_reval_path(&nd->path, nd);
+ if (error)
+ path_put(&nd->path);
+ }
if (dentry->d_inode->i_op->put_link)
dentry->d_inode->i_op->put_link(dentry, nd, cookie);
}
- path_put(path);
-
return error;
}
@@ -672,6 +601,7 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
current->total_link_count++;
nd->depth++;
err = __do_follow_link(path, nd);
+ path_put(path);
current->link_count--;
nd->depth--;
return err;
@@ -797,8 +727,19 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
struct path *path)
{
struct vfsmount *mnt = nd->path.mnt;
- struct dentry *dentry = __d_lookup(nd->path.dentry, name);
+ struct dentry *dentry, *parent;
+ struct inode *dir;
+ /*
+ * See if the low-level filesystem might want
+ * to use its own hash..
+ */
+ if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) {
+ int err = nd->path.dentry->d_op->d_hash(nd->path.dentry, name);
+ if (err < 0)
+ return err;
+ }
+ dentry = __d_lookup(nd->path.dentry, name);
if (!dentry)
goto need_lookup;
if (dentry->d_op && dentry->d_op->d_revalidate)
@@ -810,7 +751,59 @@ done:
return 0;
need_lookup:
- dentry = real_lookup(nd->path.dentry, name, nd);
+ parent = nd->path.dentry;
+ dir = parent->d_inode;
+
+ mutex_lock(&dir->i_mutex);
+ /*
+ * First re-do the cached lookup just in case it was created
+ * while we waited for the directory semaphore..
+ *
+ * FIXME! This could use version numbering or similar to
+ * avoid unnecessary cache lookups.
+ *
+ * The "dcache_lock" is purely to protect the RCU list walker
+ * from concurrent renames at this point (we mustn't get false
+ * negatives from the RCU list walk here, unlike the optimistic
+ * fast walk).
+ *
+ * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup
+ */
+ dentry = d_lookup(parent, name);
+ if (!dentry) {
+ struct dentry *new;
+
+ /* Don't create child dentry for a dead directory. */
+ dentry = ERR_PTR(-ENOENT);
+ if (IS_DEADDIR(dir))
+ goto out_unlock;
+
+ new = d_alloc(parent, name);
+ dentry = ERR_PTR(-ENOMEM);
+ if (new) {
+ dentry = dir->i_op->lookup(dir, new, nd);
+ if (dentry)
+ dput(new);
+ else
+ dentry = new;
+ }
+out_unlock:
+ mutex_unlock(&dir->i_mutex);
+ if (IS_ERR(dentry))
+ goto fail;
+ goto done;
+ }
+
+ /*
+ * Uhhuh! Nasty case: the cache was re-populated while
+ * we waited on the semaphore. Need to revalidate.
+ */
+ mutex_unlock(&dir->i_mutex);
+ if (dentry->d_op && dentry->d_op->d_revalidate) {
+ dentry = do_revalidate(dentry, nd);
+ if (!dentry)
+ dentry = ERR_PTR(-ENOENT);
+ }
if (IS_ERR(dentry))
goto fail;
goto done;
@@ -835,7 +828,7 @@ fail:
* Returns 0 and nd will have valid dentry and mnt on success.
* Returns error and drops reference to input namei data on failure.
*/
-static int __link_path_walk(const char *name, struct nameidata *nd)
+static int link_path_walk(const char *name, struct nameidata *nd)
{
struct path next;
struct inode *inode;
@@ -858,7 +851,7 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
unsigned int c;
nd->flags |= LOOKUP_CONTINUE;
- err = exec_permission_lite(inode);
+ err = exec_permission(inode);
if (err)
break;
@@ -898,16 +891,6 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
case 1:
continue;
}
- /*
- * See if the low-level filesystem might want
- * to use its own hash..
- */
- if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) {
- err = nd->path.dentry->d_op->d_hash(nd->path.dentry,
- &this);
- if (err < 0)
- break;
- }
/* This does the actual lookups.. */
err = do_lookup(nd, &this, &next);
if (err)
@@ -953,12 +936,6 @@ last_component:
case 1:
goto return_reval;
}
- if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) {
- err = nd->path.dentry->d_op->d_hash(nd->path.dentry,
- &this);
- if (err < 0)
- break;
- }
err = do_lookup(nd, &this, &next);
if (err)
break;
@@ -1017,8 +994,27 @@ return_err:
static int path_walk(const char *name, struct nameidata *nd)
{
+ struct path save = nd->path;
+ int result;
+
current->total_link_count = 0;
- return link_path_walk(name, nd);
+
+ /* make sure the stuff we saved doesn't go away */
+ path_get(&save);
+
+ result = link_path_walk(name, nd);
+ if (result == -ESTALE) {
+ /* nd->path had been dropped */
+ current->total_link_count = 0;
+ nd->path = save;
+ path_get(&nd->path);
+ nd->flags |= LOOKUP_REVAL;
+ result = link_path_walk(name, nd);
+ }
+
+ path_put(&save);
+
+ return result;
}
static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd)
@@ -1141,36 +1137,6 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
return retval;
}
-/**
- * path_lookup_open - lookup a file path with open intent
- * @dfd: the directory to use as base, or AT_FDCWD
- * @name: pointer to file name
- * @lookup_flags: lookup intent flags
- * @nd: pointer to nameidata
- * @open_flags: open intent flags
- */
-static int path_lookup_open(int dfd, const char *name,
- unsigned int lookup_flags, struct nameidata *nd, int open_flags)
-{
- struct file *filp = get_empty_filp();
- int err;
-
- if (filp == NULL)
- return -ENFILE;
- nd->intent.open.file = filp;
- nd->intent.open.flags = open_flags;
- nd->intent.open.create_mode = 0;
- err = do_path_lookup(dfd, name, lookup_flags|LOOKUP_OPEN, nd);
- if (IS_ERR(nd->intent.open.file)) {
- if (err == 0) {
- err = PTR_ERR(nd->intent.open.file);
- path_put(&nd->path);
- }
- } else if (err != 0)
- release_open_intent(nd);
- return err;
-}
-
static struct dentry *__lookup_hash(struct qstr *name,
struct dentry *base, struct nameidata *nd)
{
@@ -1191,7 +1157,17 @@ static struct dentry *__lookup_hash(struct qstr *name,
goto out;
}
- dentry = cached_lookup(base, name, nd);
+ dentry = __d_lookup(base, name);
+
+ /* lockess __d_lookup may fail due to concurrent d_move()
+ * in some unrelated directory, so try with d_lookup
+ */
+ if (!dentry)
+ dentry = d_lookup(base, name);
+
+ if (dentry && dentry->d_op && dentry->d_op->d_revalidate)
+ dentry = do_revalidate(dentry, nd);
+
if (!dentry) {
struct dentry *new;
@@ -1223,7 +1199,7 @@ static struct dentry *lookup_hash(struct nameidata *nd)
{
int err;
- err = inode_permission(nd->path.dentry->d_inode, MAY_EXEC);
+ err = exec_permission(nd->path.dentry->d_inode);
if (err)
return ERR_PTR(err);
return __lookup_hash(&nd->last, nd->path.dentry, nd);
@@ -1273,29 +1249,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
if (err)
return ERR_PTR(err);
- err = inode_permission(base->d_inode, MAY_EXEC);
- if (err)
- return ERR_PTR(err);
- return __lookup_hash(&this, base, NULL);
-}
-
-/**
- * lookup_one_noperm - bad hack for sysfs
- * @name: pathname component to lookup
- * @base: base directory to lookup from
- *
- * This is a variant of lookup_one_len that doesn't perform any permission
- * checks. It's a horrible hack to work around the braindead sysfs
- * architecture and should not be used anywhere else.
- *
- * DON'T USE THIS FUNCTION EVER, thanks.
- */
-struct dentry *lookup_one_noperm(const char *name, struct dentry *base)
-{
- int err;
- struct qstr this;
-
- err = __lookup_one_len(name, &this, base, strlen(name));
+ err = exec_permission(base->d_inode);
if (err)
return ERR_PTR(err);
return __lookup_hash(&this, base, NULL);
@@ -1533,69 +1487,45 @@ int may_open(struct path *path, int acc_mode, int flag)
if (error)
return error;
- error = ima_path_check(path, acc_mode ?
- acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC) :
- ACC_MODE(flag) & (MAY_READ | MAY_WRITE),
- IMA_COUNT_UPDATE);
-
- if (error)
- return error;
/*
* An append-only file must be opened in append mode for writing.
*/
if (IS_APPEND(inode)) {
- error = -EPERM;
if ((flag & FMODE_WRITE) && !(flag & O_APPEND))
- goto err_out;
+ return -EPERM;
if (flag & O_TRUNC)
- goto err_out;
+ return -EPERM;
}
/* O_NOATIME can only be set by the owner or superuser */
- if (flag & O_NOATIME)
- if (!is_owner_or_cap(inode)) {
- error = -EPERM;
- goto err_out;
- }
+ if (flag & O_NOATIME && !is_owner_or_cap(inode))
+ return -EPERM;
/*
* Ensure there are no outstanding leases on the file.
*/
- error = break_lease(inode, flag);
- if (error)
- goto err_out;
-
- if (flag & O_TRUNC) {
- error = get_write_access(inode);
- if (error)
- goto err_out;
-
- /*
- * Refuse to truncate files with mandatory locks held on them.
- */
- error = locks_verify_locked(inode);
- if (!error)
- error = security_path_truncate(path, 0,
- ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
- if (!error) {
- vfs_dq_init(inode);
-
- error = do_truncate(dentry, 0,
- ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
- NULL);
- }
- put_write_access(inode);
- if (error)
- goto err_out;
- } else
- if (flag & FMODE_WRITE)
- vfs_dq_init(inode);
+ return break_lease(inode, flag);
+}
- return 0;
-err_out:
- ima_counts_put(path, acc_mode ?
- acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC) :
- ACC_MODE(flag) & (MAY_READ | MAY_WRITE));
+static int handle_truncate(struct path *path)
+{
+ struct inode *inode = path->dentry->d_inode;
+ int error = get_write_access(inode);
+ if (error)
+ return error;
+ /*
+ * Refuse to truncate files with mandatory locks held on them.
+ */
+ error = locks_verify_locked(inode);
+ if (!error)
+ error = security_path_truncate(path, 0,
+ ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
+ if (!error) {
+ error = do_truncate(path->dentry, 0,
+ ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
+ NULL);
+ }
+ put_write_access(inode);
return error;
}
@@ -1650,7 +1580,7 @@ static inline int open_to_namei_flags(int flag)
return flag;
}
-static int open_will_write_to_fs(int flag, struct inode *inode)
+static int open_will_truncate(int flag, struct inode *inode)
{
/*
* We'll never write to the fs underlying
@@ -1672,12 +1602,21 @@ struct file *do_filp_open(int dfd, const char *pathname,
struct file *filp;
struct nameidata nd;
int error;
- struct path path;
+ struct path path, save;
struct dentry *dir;
int count = 0;
- int will_write;
+ int will_truncate;
int flag = open_to_namei_flags(open_flag);
+ /*
+ * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
+ * check for O_DSYNC if the need any syncing at all we enforce it's
+ * always set instead of having to deal with possibly weird behaviour
+ * for malicious applications setting only __O_SYNC.
+ */
+ if (open_flag & __O_SYNC)
+ open_flag |= O_DSYNC;
+
if (!acc_mode)
acc_mode = MAY_OPEN | ACC_MODE(flag);
@@ -1694,8 +1633,23 @@ struct file *do_filp_open(int dfd, const char *pathname,
* The simplest case - just a plain lookup.
*/
if (!(flag & O_CREAT)) {
- error = path_lookup_open(dfd, pathname, lookup_flags(flag),
- &nd, flag);
+ filp = get_empty_filp();
+
+ if (filp == NULL)
+ return ERR_PTR(-ENFILE);
+ nd.intent.open.file = filp;
+ filp->f_flags = open_flag;
+ nd.intent.open.flags = flag;
+ nd.intent.open.create_mode = 0;
+ error = do_path_lookup(dfd, pathname,
+ lookup_flags(flag)|LOOKUP_OPEN, &nd);
+ if (IS_ERR(nd.intent.open.file)) {
+ if (error == 0) {
+ error = PTR_ERR(nd.intent.open.file);
+ path_put(&nd.path);
+ }
+ } else if (error)
+ release_open_intent(&nd);
if (error)
return ERR_PTR(error);
goto ok;
@@ -1730,6 +1684,7 @@ struct file *do_filp_open(int dfd, const char *pathname,
if (filp == NULL)
goto exit_parent;
nd.intent.open.file = filp;
+ filp->f_flags = open_flag;
nd.intent.open.flags = flag;
nd.intent.open.create_mode = mode;
dir = nd.path.dentry;
@@ -1770,14 +1725,18 @@ do_last:
mnt_drop_write(nd.path.mnt);
goto exit;
}
- filp = nameidata_to_filp(&nd, open_flag);
- if (IS_ERR(filp))
- ima_counts_put(&nd.path,
- acc_mode & (MAY_READ | MAY_WRITE |
- MAY_EXEC));
+ filp = nameidata_to_filp(&nd);
mnt_drop_write(nd.path.mnt);
if (nd.root.mnt)
path_put(&nd.root);
+ if (!IS_ERR(filp)) {
+ error = ima_path_check(&filp->f_path, filp->f_mode &
+ (MAY_READ | MAY_WRITE | MAY_EXEC));
+ if (error) {
+ fput(filp);
+ filp = ERR_PTR(error);
+ }
+ }
return filp;
}
@@ -1805,7 +1764,7 @@ do_last:
path_to_nameidata(&path, &nd);
error = -EISDIR;
- if (path.dentry->d_inode && S_ISDIR(path.dentry->d_inode->i_mode))
+ if (S_ISDIR(path.dentry->d_inode->i_mode))
goto exit;
ok:
/*
@@ -1818,28 +1777,45 @@ ok:
* be avoided. Taking this mnt write here
* ensures that (2) can not occur.
*/
- will_write = open_will_write_to_fs(flag, nd.path.dentry->d_inode);
- if (will_write) {
+ will_truncate = open_will_truncate(flag, nd.path.dentry->d_inode);
+ if (will_truncate) {
error = mnt_want_write(nd.path.mnt);
if (error)
goto exit;
}
error = may_open(&nd.path, acc_mode, flag);
if (error) {
- if (will_write)
+ if (will_truncate)
mnt_drop_write(nd.path.mnt);
goto exit;
}
- filp = nameidata_to_filp(&nd, open_flag);
- if (IS_ERR(filp))
- ima_counts_put(&nd.path,
- acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC));
+ filp = nameidata_to_filp(&nd);
+ if (!IS_ERR(filp)) {
+ error = ima_path_check(&filp->f_path, filp->f_mode &
+ (MAY_READ | MAY_WRITE | MAY_EXEC));
+ if (error) {
+ fput(filp);
+ filp = ERR_PTR(error);
+ }
+ }
+ if (!IS_ERR(filp)) {
+ if (acc_mode & MAY_WRITE)
+ vfs_dq_init(nd.path.dentry->d_inode);
+
+ if (will_truncate) {
+ error = handle_truncate(&nd.path);
+ if (error) {
+ fput(filp);
+ filp = ERR_PTR(error);
+ }
+ }
+ }
/*
* It is now safe to drop the mnt write
* because the filp has had a write taken
* on its behalf.
*/
- if (will_write)
+ if (will_truncate)
mnt_drop_write(nd.path.mnt);
if (nd.root.mnt)
path_put(&nd.root);
@@ -1876,7 +1852,18 @@ do_link:
error = security_inode_follow_link(path.dentry, &nd);
if (error)
goto exit_dput;
+ save = nd.path;
+ path_get(&save);
error = __do_follow_link(&path, &nd);
+ if (error == -ESTALE) {
+ /* nd.path had been dropped */
+ nd.path = save;
+ path_get(&nd.path);
+ nd.flags |= LOOKUP_REVAL;
+ error = __do_follow_link(&path, &nd);
+ }
+ path_put(&save);
+ path_put(&path);
if (error) {
/* Does someone understand code flow here? Or it is only
* me so stupid? Anathema to whoever designed this non-sense
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 2a77bc25d5a..59e5673b459 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -90,7 +90,7 @@ config ROOT_NFS
If you want your system to mount its root file system via NFS,
choose Y here. This is common practice for managing systems
without local permanent storage. For details, read
- <file:Documentation/filesystems/nfsroot.txt>.
+ <file:Documentation/filesystems/nfs/nfsroot.txt>.
Most people say N here.
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 293fa0528a6..73ab220354d 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -78,11 +78,6 @@ nfs4_callback_svc(void *vrqstp)
set_freezable();
- /*
- * FIXME: do we really need to run this under the BKL? If so, please
- * add a comment about what it's intended to protect.
- */
- lock_kernel();
while (!kthread_should_stop()) {
/*
* Listen for a request on the socket
@@ -104,7 +99,6 @@ nfs4_callback_svc(void *vrqstp)
preverr = err;
svc_process(rqstp);
}
- unlock_kernel();
return 0;
}
@@ -160,11 +154,6 @@ nfs41_callback_svc(void *vrqstp)
set_freezable();
- /*
- * FIXME: do we really need to run this under the BKL? If so, please
- * add a comment about what it's intended to protect.
- */
- lock_kernel();
while (!kthread_should_stop()) {
prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
@@ -183,7 +172,6 @@ nfs41_callback_svc(void *vrqstp)
}
finish_wait(&serv->sv_cb_waitq, &wq);
}
- unlock_kernel();
return 0;
}
@@ -397,6 +385,7 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
*/
static struct svc_version *nfs4_callback_version[] = {
[1] = &nfs4_callback_version1,
+ [4] = &nfs4_callback_version4,
};
static struct svc_stat nfs4_callback_stats;
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 07baa8254ca..d4036be0b58 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -106,6 +106,19 @@ struct cb_sequenceres {
extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
struct cb_sequenceres *res);
+extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid);
+
+#define RCA4_TYPE_MASK_RDATA_DLG 0
+#define RCA4_TYPE_MASK_WDATA_DLG 1
+
+struct cb_recallanyargs {
+ struct sockaddr *craa_addr;
+ uint32_t craa_objs_to_keep;
+ uint32_t craa_type_mask;
+};
+
+extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy);
#endif /* CONFIG_NFS_V4_1 */
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
@@ -114,8 +127,9 @@ extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
#ifdef CONFIG_NFS_V4
extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
extern void nfs_callback_down(int minorversion);
+extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid);
#endif /* CONFIG_NFS_V4 */
-
/*
* nfs41: Callbacks are expected to not cause substantial latency,
* so we limit their concurrency to 1 by setting up the maximum number
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index b7da1f54da6..defa9b4c470 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -61,6 +61,16 @@ out:
return res->status;
}
+static int (*nfs_validate_delegation_stateid(struct nfs_client *clp))(struct nfs_delegation *, const nfs4_stateid *)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (clp->cl_minorversion > 0)
+ return nfs41_validate_delegation_stateid;
+#endif
+ return nfs4_validate_delegation_stateid;
+}
+
+
__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
{
struct nfs_client *clp;
@@ -81,7 +91,8 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
inode = nfs_delegation_find_inode(clp, &args->fh);
if (inode != NULL) {
/* Set up a helper thread to actually return the delegation */
- switch(nfs_async_inode_return_delegation(inode, &args->stateid)) {
+ switch (nfs_async_inode_return_delegation(inode, &args->stateid,
+ nfs_validate_delegation_stateid(clp))) {
case 0:
res = 0;
break;
@@ -102,8 +113,31 @@ out:
return res;
}
+int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
+{
+ if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
+ sizeof(delegation->stateid.data)) != 0)
+ return 0;
+ return 1;
+}
+
#if defined(CONFIG_NFS_V4_1)
+int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
+{
+ if (delegation == NULL)
+ return 0;
+
+ /* seqid is 4-bytes long */
+ if (((u32 *) &stateid->data)[0] != 0)
+ return 0;
+ if (memcmp(&delegation->stateid.data[4], &stateid->data[4],
+ sizeof(stateid->data)-4))
+ return 0;
+
+ return 1;
+}
+
/*
* Validate the sequenceID sent by the server.
* Return success if the sequenceID is one more than what we last saw on
@@ -227,4 +261,32 @@ out:
return res->csr_status;
}
+unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
+{
+ struct nfs_client *clp;
+ int status;
+ fmode_t flags = 0;
+
+ status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ clp = nfs_find_client(args->craa_addr, 4);
+ if (clp == NULL)
+ goto out;
+
+ dprintk("NFS: RECALL_ANY callback request from %s\n",
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+ if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
+ &args->craa_type_mask))
+ flags = FMODE_READ;
+ if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
+ &args->craa_type_mask))
+ flags |= FMODE_WRITE;
+
+ if (flags)
+ nfs_expire_all_delegation_types(clp, flags);
+ status = htonl(NFS4_OK);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 76b0aa0f73b..8e1a2511c8b 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -23,6 +23,7 @@
#if defined(CONFIG_NFS_V4_1)
#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
4 + 1 + 3)
+#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#endif /* CONFIG_NFS_V4_1 */
#define NFSDBG_FACILITY NFSDBG_CALLBACK
@@ -326,6 +327,25 @@ out_free:
goto out;
}
+static unsigned decode_recallany_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_recallanyargs *args)
+{
+ uint32_t *p;
+
+ args->craa_addr = svc_addr(rqstp);
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_BADXDR);
+ args->craa_objs_to_keep = ntohl(*p++);
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_BADXDR);
+ args->craa_type_mask = ntohl(*p);
+
+ return 0;
+}
+
#endif /* CONFIG_NFS_V4_1 */
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
@@ -533,6 +553,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_GETATTR:
case OP_CB_RECALL:
case OP_CB_SEQUENCE:
+ case OP_CB_RECALL_ANY:
*op = &callback_ops[op_nr];
break;
@@ -540,7 +561,6 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_NOTIFY_DEVICEID:
case OP_CB_NOTIFY:
case OP_CB_PUSH_DELEG:
- case OP_CB_RECALL_ANY:
case OP_CB_RECALLABLE_OBJ_AVAIL:
case OP_CB_RECALL_SLOT:
case OP_CB_WANTS_CANCELLED:
@@ -688,6 +708,11 @@ static struct callback_op callback_ops[] = {
.encode_res = (callback_encode_res_t)encode_cb_sequence_res,
.res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
},
+ [OP_CB_RECALL_ANY] = {
+ .process_op = (callback_process_op_t)nfs4_callback_recallany,
+ .decode_args = (callback_decode_arg_t)decode_recallany_args,
+ .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
+ },
#endif /* CONFIG_NFS_V4_1 */
};
@@ -718,3 +743,10 @@ struct svc_version nfs4_callback_version1 = {
.vs_dispatch = NULL,
};
+struct svc_version nfs4_callback_version4 = {
+ .vs_vers = 4,
+ .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
+ .vs_proc = nfs4_callback_procedures1,
+ .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
+ .vs_dispatch = NULL,
+};
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 99ea196f071..ee77713ce68 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1260,10 +1260,20 @@ error:
static void nfs4_session_set_rwsize(struct nfs_server *server)
{
#ifdef CONFIG_NFS_V4_1
+ struct nfs4_session *sess;
+ u32 server_resp_sz;
+ u32 server_rqst_sz;
+
if (!nfs4_has_session(server->nfs_client))
return;
- server->rsize = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
- server->wsize = server->nfs_client->cl_session->fc_attrs.max_rqst_sz;
+ sess = server->nfs_client->cl_session;
+ server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
+ server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
+
+ if (server->rsize > server_resp_sz)
+ server->rsize = server_resp_sz;
+ if (server->wsize > server_rqst_sz)
+ server->wsize = server_rqst_sz;
#endif /* CONFIG_NFS_V4_1 */
}
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 6dd48a4405b..2563bebc4c6 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -92,7 +92,7 @@ out:
return status;
}
-static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
+static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_context *ctx;
@@ -116,10 +116,11 @@ again:
err = nfs_delegation_claim_locks(ctx, state);
put_nfs_open_context(ctx);
if (err != 0)
- return;
+ return err;
goto again;
}
spin_unlock(&inode->i_lock);
+ return 0;
}
/*
@@ -261,30 +262,34 @@ static void nfs_msync_inode(struct inode *inode)
/*
* Basic procedure for returning a delegation to the server
*/
-static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
+static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
struct nfs_inode *nfsi = NFS_I(inode);
+ int err;
- nfs_msync_inode(inode);
/*
* Guard against new delegated open/lock/unlock calls and against
* state recovery
*/
down_write(&nfsi->rwsem);
- nfs_delegation_claim_opens(inode, &delegation->stateid);
+ err = nfs_delegation_claim_opens(inode, &delegation->stateid);
up_write(&nfsi->rwsem);
- nfs_msync_inode(inode);
+ if (err)
+ goto out;
- return nfs_do_return_delegation(inode, delegation, 1);
+ err = nfs_do_return_delegation(inode, delegation, issync);
+out:
+ return err;
}
/*
* Return all delegations that have been marked for return
*/
-void nfs_client_return_marked_delegations(struct nfs_client *clp)
+int nfs_client_return_marked_delegations(struct nfs_client *clp)
{
struct nfs_delegation *delegation;
struct inode *inode;
+ int err = 0;
restart:
rcu_read_lock();
@@ -298,12 +303,18 @@ restart:
delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
spin_unlock(&clp->cl_lock);
rcu_read_unlock();
- if (delegation != NULL)
- __nfs_inode_return_delegation(inode, delegation);
+ if (delegation != NULL) {
+ filemap_flush(inode->i_mapping);
+ err = __nfs_inode_return_delegation(inode, delegation, 0);
+ }
iput(inode);
- goto restart;
+ if (!err)
+ goto restart;
+ set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ return err;
}
rcu_read_unlock();
+ return 0;
}
/*
@@ -338,8 +349,10 @@ int nfs_inode_return_delegation(struct inode *inode)
spin_lock(&clp->cl_lock);
delegation = nfs_detach_delegation_locked(nfsi, NULL);
spin_unlock(&clp->cl_lock);
- if (delegation != NULL)
- err = __nfs_inode_return_delegation(inode, delegation);
+ if (delegation != NULL) {
+ nfs_msync_inode(inode);
+ err = __nfs_inode_return_delegation(inode, delegation, 1);
+ }
}
return err;
}
@@ -368,33 +381,47 @@ void nfs_super_return_all_delegations(struct super_block *sb)
spin_unlock(&delegation->lock);
}
rcu_read_unlock();
- nfs_client_return_marked_delegations(clp);
+ if (nfs_client_return_marked_delegations(clp) != 0)
+ nfs4_schedule_state_manager(clp);
}
-static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
+static
+void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
{
struct nfs_delegation *delegation;
rcu_read_lock();
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
- set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
+ continue;
+ if (delegation->type & flags)
+ nfs_mark_return_delegation(clp, delegation);
}
rcu_read_unlock();
}
+static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
+{
+ nfs_client_mark_return_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
+}
+
static void nfs_delegation_run_state_manager(struct nfs_client *clp)
{
if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
nfs4_schedule_state_manager(clp);
}
-void nfs_expire_all_delegations(struct nfs_client *clp)
+void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
{
- nfs_client_mark_return_all_delegations(clp);
+ nfs_client_mark_return_all_delegation_types(clp, flags);
nfs_delegation_run_state_manager(clp);
}
+void nfs_expire_all_delegations(struct nfs_client *clp)
+{
+ nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
+}
+
/*
* Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
*/
@@ -413,8 +440,7 @@ static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *c
list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
continue;
- set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ nfs_mark_return_delegation(clp, delegation);
}
rcu_read_unlock();
}
@@ -428,18 +454,21 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
/*
* Asynchronous delegation recall!
*/
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
+ int (*validate_stateid)(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid))
{
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
- if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
- sizeof(delegation->stateid.data)) != 0) {
+
+ if (!validate_stateid(delegation, stateid)) {
rcu_read_unlock();
return -ENOENT;
}
+
nfs_mark_return_delegation(clp, delegation);
rcu_read_unlock();
nfs_delegation_run_state_manager(clp);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 09f38379517..944b627ec6e 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -34,15 +34,18 @@ enum {
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
int nfs_inode_return_delegation(struct inode *inode);
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
+ int (*validate_stateid)(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid));
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
void nfs_super_return_all_delegations(struct super_block *sb);
void nfs_expire_all_delegations(struct nfs_client *clp);
+void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
void nfs_handle_cb_pathdown(struct nfs_client *clp);
-void nfs_client_return_marked_delegations(struct nfs_client *clp);
+int nfs_client_return_marked_delegations(struct nfs_client *clp);
void nfs_delegation_mark_reclaim(struct nfs_client *clp);
void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 7cb298525ee..2c5ace4f00a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1579,55 +1579,46 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *dentry = NULL, *rehash = NULL;
int error = -EBUSY;
- /*
- * To prevent any new references to the target during the rename,
- * we unhash the dentry and free the inode in advance.
- */
- if (!d_unhashed(new_dentry)) {
- d_drop(new_dentry);
- rehash = new_dentry;
- }
-
dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
atomic_read(&new_dentry->d_count));
/*
- * First check whether the target is busy ... we can't
- * safely do _any_ rename if the target is in use.
- *
- * For files, make a copy of the dentry and then do a
- * silly-rename. If the silly-rename succeeds, the
- * copied dentry is hashed and becomes the new target.
+ * For non-directories, check whether the target is busy and if so,
+ * make a copy of the dentry and then do a silly-rename. If the
+ * silly-rename succeeds, the copied dentry is hashed and becomes
+ * the new target.
*/
- if (!new_inode)
- goto go_ahead;
- if (S_ISDIR(new_inode->i_mode)) {
- error = -EISDIR;
- if (!S_ISDIR(old_inode->i_mode))
- goto out;
- } else if (atomic_read(&new_dentry->d_count) > 2) {
- int err;
- /* copy the target dentry's name */
- dentry = d_alloc(new_dentry->d_parent,
- &new_dentry->d_name);
- if (!dentry)
- goto out;
+ if (new_inode && !S_ISDIR(new_inode->i_mode)) {
+ /*
+ * To prevent any new references to the target during the
+ * rename, we unhash the dentry in advance.
+ */
+ if (!d_unhashed(new_dentry)) {
+ d_drop(new_dentry);
+ rehash = new_dentry;
+ }
+
+ if (atomic_read(&new_dentry->d_count) > 2) {
+ int err;
+
+ /* copy the target dentry's name */
+ dentry = d_alloc(new_dentry->d_parent,
+ &new_dentry->d_name);
+ if (!dentry)
+ goto out;
- /* silly-rename the existing target ... */
- err = nfs_sillyrename(new_dir, new_dentry);
- if (!err) {
- new_dentry = rehash = dentry;
+ /* silly-rename the existing target ... */
+ err = nfs_sillyrename(new_dir, new_dentry);
+ if (err)
+ goto out;
+
+ new_dentry = dentry;
new_inode = NULL;
- /* instantiate the replacement target */
- d_instantiate(new_dentry, NULL);
- } else if (atomic_read(&new_dentry->d_count) > 1)
- /* dentry still busy? */
- goto out;
+ }
}
-go_ahead:
/*
* ... prune child dentries and writebacks if needed.
*/
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index f4d54ba97cc..95e1ca765d4 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -146,7 +146,7 @@ static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
return 0;
}
-struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
+static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
struct nfs_dns_ent *key)
{
struct cache_head *ch;
@@ -159,7 +159,7 @@ struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
return container_of(ch, struct nfs_dns_ent, h);
}
-struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
+static struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
struct nfs_dns_ent *new,
struct nfs_dns_ent *key)
{
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index f5fdd39e037..6b891328f33 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -581,7 +581,7 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
{
struct nfs_open_context *ctx;
- if (IS_SYNC(inode) || (filp->f_flags & O_SYNC))
+ if (IS_SYNC(inode) || (filp->f_flags & O_DSYNC))
return 1;
ctx = nfs_file_open_context(filp);
if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags))
@@ -622,7 +622,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
result = generic_file_aio_write(iocb, iov, nr_segs, pos);
- /* Return error values for O_SYNC and IS_SYNC() */
+ /* Return error values for O_DSYNC and IS_SYNC() */
if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
if (err < 0)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e21b1bb9972..29e464d23b3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -30,6 +30,15 @@ static inline int nfs4_has_session(const struct nfs_client *clp)
return 0;
}
+static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp))
+ return (clp->cl_session->flags & SESSION4_PERSIST);
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
@@ -156,6 +165,7 @@ struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentr
/* callback_xdr.c */
extern struct svc_version nfs4_callback_version1;
+extern struct svc_version nfs4_callback_version4;
/* pagelist.c */
extern int __init nfs_init_nfspagecache(void);
@@ -177,24 +187,14 @@ extern __be32 * nfs_decode_dirent(__be32 *, struct nfs_entry *, int);
extern struct rpc_procinfo nfs3_procedures[];
extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
-/* nfs4proc.c */
-static inline void nfs4_restart_rpc(struct rpc_task *task,
- const struct nfs_client *clp)
-{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp) &&
- test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
- rpc_restart_call_prepare(task);
- return;
- }
-#endif /* CONFIG_NFS_V4_1 */
- rpc_restart_call(task);
-}
-
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
#endif
+#ifdef CONFIG_NFS_V4_1
+extern const u32 nfs41_maxread_overhead;
+extern const u32 nfs41_maxwrite_overhead;
+#endif
/* nfs4proc.c */
#ifdef CONFIG_NFS_V4
@@ -273,20 +273,6 @@ extern int _nfs4_call_sync_session(struct nfs_server *server,
struct nfs4_sequence_res *res,
int cache_reply);
-#ifdef CONFIG_NFS_V4_1
-extern void nfs41_sequence_free_slot(const struct nfs_client *,
- struct nfs4_sequence_res *res);
-#endif /* CONFIG_NFS_V4_1 */
-
-static inline void nfs4_sequence_free_slot(const struct nfs_client *clp,
- struct nfs4_sequence_res *res)
-{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp))
- nfs41_sequence_free_slot(clp, res);
-#endif /* CONFIG_NFS_V4_1 */
-}
-
/*
* Determine the device name as a string
*/
@@ -380,3 +366,15 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
return ((unsigned long)len + (unsigned long)base +
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
+
+/*
+ * Helper for restarting RPC calls in the possible presence of NFSv4.1
+ * sessions.
+ */
+static inline void nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
+{
+ if (nfs4_has_session(clp))
+ rpc_restart_call_prepare(task);
+ else
+ rpc_restart_call(task);
+}
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index ceda50aad73..46d779abafd 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -25,13 +25,7 @@ struct nfs_iostats {
static inline void nfs_inc_server_stats(const struct nfs_server *server,
enum nfs_stat_eventcounters stat)
{
- struct nfs_iostats *iostats;
- int cpu;
-
- cpu = get_cpu();
- iostats = per_cpu_ptr(server->io_stats, cpu);
- iostats->events[stat]++;
- put_cpu();
+ this_cpu_inc(server->io_stats->events[stat]);
}
static inline void nfs_inc_stats(const struct inode *inode,
@@ -44,13 +38,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server,
enum nfs_stat_bytecounters stat,
unsigned long addend)
{
- struct nfs_iostats *iostats;
- int cpu;
-
- cpu = get_cpu();
- iostats = per_cpu_ptr(server->io_stats, cpu);
- iostats->bytes[stat] += addend;
- put_cpu();
+ this_cpu_add(server->io_stats->bytes[stat], addend);
}
static inline void nfs_add_stats(const struct inode *inode,
@@ -65,13 +53,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
enum nfs_stat_fscachecounters stat,
unsigned long addend)
{
- struct nfs_iostats *iostats;
- int cpu;
-
- cpu = get_cpu();
- iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu);
- iostats->fscache[stat] += addend;
- put_cpu();
+ this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
}
#endif
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6ea07a3c75d..865265bdca0 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,7 +44,8 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_REBOOT,
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
- NFS4CLNT_SESSION_SETUP,
+ NFS4CLNT_SESSION_RESET,
+ NFS4CLNT_SESSION_DRAINING,
};
/*
@@ -107,6 +108,10 @@ enum {
NFS_OWNER_RECLAIM_NOGRACE
};
+#define NFS_LOCK_NEW 0
+#define NFS_LOCK_RECLAIM 1
+#define NFS_LOCK_EXPIRED 2
+
/*
* struct nfs4_state maintains the client-side state for a given
* (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
@@ -180,6 +185,7 @@ struct nfs4_state_recovery_ops {
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
+ int (*reclaim_complete)(struct nfs_client *);
};
struct nfs4_state_maintenance_ops {
@@ -200,9 +206,11 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
/* nfs4proc.c */
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
+extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
@@ -218,9 +226,11 @@ extern int nfs4_setup_sequence(struct nfs_client *clp,
int cache_reply, struct rpc_task *task);
extern void nfs4_destroy_session(struct nfs4_session *session);
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
-extern int nfs4_proc_create_session(struct nfs_client *, int reset);
+extern int nfs4_proc_create_session(struct nfs_client *);
extern int nfs4_proc_destroy_session(struct nfs4_session *);
extern int nfs4_init_session(struct nfs_server *server);
+extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
+ struct nfs_fsinfo *fsinfo);
#else /* CONFIG_NFS_v4_1 */
static inline int nfs4_setup_sequence(struct nfs_client *clp,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
@@ -267,6 +277,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
extern void nfs4_schedule_state_recovery(struct nfs_client *);
extern void nfs4_schedule_state_manager(struct nfs_client *);
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
+extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
@@ -275,6 +286,7 @@ extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter);
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_release_seqid(struct nfs_seqid *seqid);
extern void nfs_free_seqid(struct nfs_seqid *seqid);
extern const nfs4_stateid zero_stateid;
@@ -287,6 +299,7 @@ struct nfs4_mount_data;
/* callback_xdr.c */
extern struct svc_version nfs4_callback_version1;
+extern struct svc_version nfs4_callback_version4;
#else
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 741a562177f..198d51d17c1 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -64,6 +64,7 @@
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
+static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
@@ -270,11 +271,18 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ nfs4_schedule_state_recovery(clp);
exception->retry = 1;
- /* FALLTHROUGH */
+ break;
#endif /* !defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
+ if (exception->timeout > HZ) {
+ /* We have retried a decent amount, time to
+ * fail
+ */
+ ret = -EBUSY;
+ break;
+ }
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
ret = nfs4_delay(server->client, &exception->timeout);
@@ -311,48 +319,67 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
* so we need to scan down from highest_used_slotid to 0 looking for the now
* highest slotid in use.
* If none found, highest_used_slotid is set to -1.
+ *
+ * Must be called while holding tbl->slot_tbl_lock
*/
static void
nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
{
int slotid = free_slotid;
- spin_lock(&tbl->slot_tbl_lock);
/* clear used bit in bitmap */
__clear_bit(slotid, tbl->used_slots);
/* update highest_used_slotid when it is freed */
if (slotid == tbl->highest_used_slotid) {
slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
- if (slotid >= 0 && slotid < tbl->max_slots)
+ if (slotid < tbl->max_slots)
tbl->highest_used_slotid = slotid;
else
tbl->highest_used_slotid = -1;
}
- rpc_wake_up_next(&tbl->slot_tbl_waitq);
- spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
free_slotid, tbl->highest_used_slotid);
}
-void nfs41_sequence_free_slot(const struct nfs_client *clp,
- struct nfs4_sequence_res *res)
+/*
+ * Signal state manager thread if session is drained
+ */
+static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
{
- struct nfs4_slot_table *tbl;
+ struct rpc_task *task;
- if (!nfs4_has_session(clp)) {
- dprintk("%s: No session\n", __func__);
+ if (!test_bit(NFS4CLNT_SESSION_DRAINING, &ses->clp->cl_state)) {
+ task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
+ if (task)
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
return;
}
+
+ if (ses->fc_slot_table.highest_used_slotid != -1)
+ return;
+
+ dprintk("%s COMPLETE: Session Drained\n", __func__);
+ complete(&ses->complete);
+}
+
+static void nfs41_sequence_free_slot(const struct nfs_client *clp,
+ struct nfs4_sequence_res *res)
+{
+ struct nfs4_slot_table *tbl;
+
tbl = &clp->cl_session->fc_slot_table;
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
- dprintk("%s: No slot\n", __func__);
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
- rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ dprintk("%s: No slot\n", __func__);
return;
}
+
+ spin_lock(&tbl->slot_tbl_lock);
nfs4_free_slot(tbl, res->sr_slotid);
+ nfs41_check_drain_session_complete(clp->cl_session);
+ spin_unlock(&tbl->slot_tbl_lock);
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
}
@@ -377,10 +404,10 @@ static void nfs41_sequence_done(struct nfs_client *clp,
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
goto out;
- tbl = &clp->cl_session->fc_slot_table;
- slot = tbl->slots + res->sr_slotid;
-
+ /* Check the SEQUENCE operation status */
if (res->sr_status == 0) {
+ tbl = &clp->cl_session->fc_slot_table;
+ slot = tbl->slots + res->sr_slotid;
/* Update the slot's sequence and clientid lease timer */
++slot->seq_nr;
timestamp = res->sr_renewal_time;
@@ -388,7 +415,8 @@ static void nfs41_sequence_done(struct nfs_client *clp,
if (time_before(clp->cl_last_renewal, timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
- return;
+ /* Check sequence flags */
+ nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
}
out:
/* The session may be reset by one of the error handlers. */
@@ -407,7 +435,7 @@ out:
* Note: must be called with under the slot_tbl_lock.
*/
static u8
-nfs4_find_slot(struct nfs4_slot_table *tbl, struct rpc_task *task)
+nfs4_find_slot(struct nfs4_slot_table *tbl)
{
int slotid;
u8 ret_id = NFS4_MAX_SLOT_TABLE;
@@ -429,24 +457,6 @@ out:
return ret_id;
}
-static int nfs4_recover_session(struct nfs4_session *session)
-{
- struct nfs_client *clp = session->clp;
- unsigned int loop;
- int ret;
-
- for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
- ret = nfs4_wait_clnt_recover(clp);
- if (ret != 0)
- break;
- if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
- break;
- nfs4_schedule_state_manager(clp);
- ret = -EIO;
- }
- return ret;
-}
-
static int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -455,7 +465,6 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
{
struct nfs4_slot *slot;
struct nfs4_slot_table *tbl;
- int status = 0;
u8 slotid;
dprintk("--> %s\n", __func__);
@@ -468,24 +477,27 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
- if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) {
- if (tbl->highest_used_slotid != -1) {
- rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
- spin_unlock(&tbl->slot_tbl_lock);
- dprintk("<-- %s: Session reset: draining\n", __func__);
- return -EAGAIN;
- }
+ if (test_bit(NFS4CLNT_SESSION_DRAINING, &session->clp->cl_state) &&
+ !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
+ /*
+ * The state manager will wait until the slot table is empty.
+ * Schedule the reset thread
+ */
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s Schedule Session Reset\n", __func__);
+ return -EAGAIN;
+ }
- /* The slot table is empty; start the reset thread */
- dprintk("%s Session Reset\n", __func__);
+ if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
+ !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
- status = nfs4_recover_session(session);
- if (status)
- return status;
- spin_lock(&tbl->slot_tbl_lock);
+ dprintk("%s enforce FIFO order\n", __func__);
+ return -EAGAIN;
}
- slotid = nfs4_find_slot(tbl, task);
+ slotid = nfs4_find_slot(tbl);
if (slotid == NFS4_MAX_SLOT_TABLE) {
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
@@ -494,6 +506,7 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
}
spin_unlock(&tbl->slot_tbl_lock);
+ rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
slot = tbl->slots + slotid;
args->sa_session = session;
args->sa_slotid = slotid;
@@ -527,7 +540,7 @@ int nfs4_setup_sequence(struct nfs_client *clp,
goto out;
ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
task);
- if (ret != -EAGAIN) {
+ if (ret && ret != -EAGAIN) {
/* terminate rpc task */
task->tk_status = ret;
task->tk_action = NULL;
@@ -556,12 +569,17 @@ static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
rpc_call_start(task);
}
+static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
+{
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ nfs41_call_sync_prepare(task, calldata);
+}
+
static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
- nfs41_sequence_free_slot(data->clp, data->seq_res);
}
struct rpc_call_ops nfs41_call_sync_ops = {
@@ -569,12 +587,18 @@ struct rpc_call_ops nfs41_call_sync_ops = {
.rpc_call_done = nfs41_call_sync_done,
};
+struct rpc_call_ops nfs41_call_priv_sync_ops = {
+ .rpc_call_prepare = nfs41_call_priv_sync_prepare,
+ .rpc_call_done = nfs41_call_sync_done,
+};
+
static int nfs4_call_sync_sequence(struct nfs_client *clp,
struct rpc_clnt *clnt,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
- int cache_reply)
+ int cache_reply,
+ int privileged)
{
int ret;
struct rpc_task *task;
@@ -592,6 +616,8 @@ static int nfs4_call_sync_sequence(struct nfs_client *clp,
};
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ if (privileged)
+ task_setup.callback_ops = &nfs41_call_priv_sync_ops;
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
ret = PTR_ERR(task);
@@ -609,7 +635,7 @@ int _nfs4_call_sync_session(struct nfs_server *server,
int cache_reply)
{
return nfs4_call_sync_sequence(server->nfs_client, server->client,
- msg, args, res, cache_reply);
+ msg, args, res, cache_reply, 0);
}
#endif /* CONFIG_NFS_V4_1 */
@@ -637,15 +663,6 @@ static void nfs4_sequence_done(const struct nfs_server *server,
#endif /* CONFIG_NFS_V4_1 */
}
-/* no restart, therefore free slot here */
-static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
- struct nfs4_sequence_res *res,
- int rpc_status)
-{
- nfs4_sequence_done(server, res, rpc_status);
- nfs4_sequence_free_slot(server->nfs_client, res);
-}
-
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -720,9 +737,15 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
p->o_arg.bitmask = server->attr_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (flags & O_EXCL) {
- u32 *s = (u32 *) p->o_arg.u.verifier.data;
- s[0] = jiffies;
- s[1] = current->pid;
+ if (nfs4_has_persistent_session(server->nfs_client)) {
+ /* GUARDED */
+ p->o_arg.u.attrs = &p->attrs;
+ memcpy(&p->attrs, attrs, sizeof(p->attrs));
+ } else { /* EXCLUSIVE4_1 */
+ u32 *s = (u32 *) p->o_arg.u.verifier.data;
+ s[0] = jiffies;
+ s[1] = current->pid;
+ }
} else if (flags & O_CREAT) {
p->o_arg.u.attrs = &p->attrs;
memcpy(&p->attrs, attrs, sizeof(p->attrs));
@@ -776,13 +799,16 @@ static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode
goto out;
switch (mode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
- ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0;
+ ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
+ && state->n_rdonly != 0;
break;
case FMODE_WRITE:
- ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0;
+ ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
+ && state->n_wronly != 0;
break;
case FMODE_READ|FMODE_WRITE:
- ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
+ ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
+ && state->n_rdwr != 0;
}
out:
return ret;
@@ -1047,7 +1073,7 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
nfs4_init_opendata_res(opendata);
- ret = _nfs4_proc_open(opendata);
+ ret = _nfs4_recover_proc_open(opendata);
if (ret != 0)
return ret;
newstate = nfs4_opendata_to_nfs4_state(opendata);
@@ -1183,6 +1209,14 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
case -ENOENT:
case -ESTALE:
goto out;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+ nfs4_schedule_state_recovery(
+ server->nfs_client);
+ goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
@@ -1330,14 +1364,20 @@ out_no_action:
}
+static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
+{
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ nfs4_open_prepare(task, calldata);
+}
+
static void nfs4_open_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
- nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->o_arg.server, &data->o_res.seq_res,
+ task->tk_status);
if (RPC_ASSASSINATED(task))
return;
@@ -1388,10 +1428,13 @@ static const struct rpc_call_ops nfs4_open_ops = {
.rpc_release = nfs4_open_release,
};
-/*
- * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
- */
-static int _nfs4_proc_open(struct nfs4_opendata *data)
+static const struct rpc_call_ops nfs4_recover_open_ops = {
+ .rpc_call_prepare = nfs4_recover_open_prepare,
+ .rpc_call_done = nfs4_open_done,
+ .rpc_release = nfs4_open_release,
+};
+
+static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
{
struct inode *dir = data->dir->d_inode;
struct nfs_server *server = NFS_SERVER(dir);
@@ -1418,21 +1461,57 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
data->rpc_done = 0;
data->rpc_status = 0;
data->cancelled = 0;
+ if (isrecover)
+ task_setup_data.callback_ops = &nfs4_recover_open_ops;
task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- return PTR_ERR(task);
- status = nfs4_wait_for_completion_rpc_task(task);
- if (status != 0) {
- data->cancelled = 1;
- smp_wmb();
- } else
- status = data->rpc_status;
- rpc_put_task(task);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0) {
+ data->cancelled = 1;
+ smp_wmb();
+ } else
+ status = data->rpc_status;
+ rpc_put_task(task);
+
+ return status;
+}
+
+static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
+{
+ struct inode *dir = data->dir->d_inode;
+ struct nfs_openres *o_res = &data->o_res;
+ int status;
+
+ status = nfs4_run_open_task(data, 1);
if (status != 0 || !data->rpc_done)
return status;
- if (o_res->fh.size == 0)
- _nfs4_proc_lookup(dir, o_arg->name, &o_res->fh, o_res->f_attr);
+ nfs_refresh_inode(dir, o_res->dir_attr);
+
+ if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
+ status = _nfs4_proc_open_confirm(data);
+ if (status != 0)
+ return status;
+ }
+
+ return status;
+}
+
+/*
+ * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
+ */
+static int _nfs4_proc_open(struct nfs4_opendata *data)
+{
+ struct inode *dir = data->dir->d_inode;
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_openargs *o_arg = &data->o_arg;
+ struct nfs_openres *o_res = &data->o_res;
+ int status;
+
+ status = nfs4_run_open_task(data, 0);
+ if (status != 0 || !data->rpc_done)
+ return status;
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
@@ -1488,7 +1567,7 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
return ret;
}
-static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
+static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
@@ -1496,10 +1575,16 @@ static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4
do {
err = _nfs4_open_expired(ctx, state);
- if (err != -NFS4ERR_DELAY)
- break;
- nfs4_handle_exception(server, err, &exception);
+ switch (err) {
+ default:
+ goto out;
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_DELAY:
+ nfs4_handle_exception(server, err, &exception);
+ err = 0;
+ }
} while (exception.retry);
+out:
return err;
}
@@ -1712,6 +1797,18 @@ static void nfs4_free_closedata(void *data)
kfree(calldata);
}
+static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
+ fmode_t fmode)
+{
+ spin_lock(&state->owner->so_lock);
+ if (!(fmode & FMODE_READ))
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ if (!(fmode & FMODE_WRITE))
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ spin_unlock(&state->owner->so_lock);
+}
+
static void nfs4_close_done(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
@@ -1728,6 +1825,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
case 0:
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
renew_lease(server, calldata->timestamp);
+ nfs4_close_clear_stateid_flags(state,
+ calldata->arg.fmode);
break;
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_OLD_STATEID:
@@ -1736,12 +1835,10 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
if (calldata->arg.fmode == 0)
break;
default:
- if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
- nfs4_restart_rpc(task, server->nfs_client);
- return;
- }
+ if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
+ rpc_restart_call_prepare(task);
}
- nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res);
+ nfs_release_seqid(calldata->arg.seqid);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
@@ -1749,38 +1846,39 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
{
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
- int clear_rd, clear_wr, clear_rdwr;
+ int call_close = 0;
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
- clear_rd = clear_wr = clear_rdwr = 0;
+ task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
+ calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
spin_lock(&state->owner->so_lock);
/* Calculate the change in open mode */
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0) {
- clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags);
- clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ calldata->arg.fmode &= ~FMODE_READ;
}
if (state->n_wronly == 0) {
- clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags);
- clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ calldata->arg.fmode &= ~FMODE_WRITE;
}
}
spin_unlock(&state->owner->so_lock);
- if (!clear_rd && !clear_wr && !clear_rdwr) {
+
+ if (!call_close) {
/* Note: exit _without_ calling nfs4_close_done */
task->tk_action = NULL;
return;
}
+
+ if (calldata->arg.fmode == 0)
+ task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
+
nfs_fattr_init(calldata->res.fattr);
- if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) {
- task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
- calldata->arg.fmode = FMODE_READ;
- } else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) {
- task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
- calldata->arg.fmode = FMODE_WRITE;
- }
calldata->timestamp = jiffies;
if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
&calldata->arg.seq_args, &calldata->res.seq_res,
@@ -1832,8 +1930,6 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
calldata->arg.stateid = &state->open_stateid;
- if (nfs4_has_session(server->nfs_client))
- memset(calldata->arg.stateid->data, 0, 4); /* clear seqid */
/* Serialization for the sequence id */
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
if (calldata->arg.seqid == NULL)
@@ -1981,7 +2077,7 @@ out_drop:
return 0;
}
-void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
+static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
{
if (ctx->state == NULL)
return;
@@ -2532,7 +2628,6 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
- nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res);
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, &res->dir_attr);
return 1;
@@ -2971,11 +3066,10 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
dprintk("--> %s\n", __func__);
- /* nfs4_sequence_free_slot called in the read rpc_call_done */
nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
- nfs4_restart_rpc(task, server->nfs_client);
+ nfs_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
@@ -2995,12 +3089,11 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
- /* slot is freed in nfs_writeback_done */
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
- nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
+ nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@@ -3028,11 +3121,9 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
- nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
+ nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
- nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
- &data->res.seq_res);
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
@@ -3350,7 +3441,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ nfs4_schedule_state_recovery(clp);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
@@ -3483,12 +3574,23 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
- nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->res.server, &data->res.seq_res,
+ task->tk_status);
- data->rpc_status = task->tk_status;
- if (data->rpc_status == 0)
+ switch (task->tk_status) {
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case 0:
renew_lease(data->res.server, data->timestamp);
+ break;
+ default:
+ if (nfs4_async_handle_error(task, data->res.server, NULL) ==
+ -EAGAIN) {
+ nfs_restart_rpc(task, data->res.server->nfs_client);
+ return;
+ }
+ }
+ data->rpc_status = task->tk_status;
}
static void nfs4_delegreturn_release(void *calldata)
@@ -3741,11 +3843,9 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
- nfs4_restart_rpc(task,
- calldata->server->nfs_client);
+ nfs_restart_rpc(task,
+ calldata->server->nfs_client);
}
- nfs4_sequence_free_slot(calldata->server->nfs_client,
- &calldata->res.seq_res);
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
@@ -3921,14 +4021,20 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
+static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
+{
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ nfs4_lock_prepare(task, calldata);
+}
+
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
dprintk("%s: begin!\n", __func__);
- nfs4_sequence_done_free_slot(data->server, &data->res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->server, &data->res.seq_res,
+ task->tk_status);
data->rpc_status = task->tk_status;
if (RPC_ASSASSINATED(task))
@@ -3976,7 +4082,13 @@ static const struct rpc_call_ops nfs4_lock_ops = {
.rpc_release = nfs4_lock_release,
};
-static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
+static const struct rpc_call_ops nfs4_recover_lock_ops = {
+ .rpc_call_prepare = nfs4_recover_lock_prepare,
+ .rpc_call_done = nfs4_lock_done,
+ .rpc_release = nfs4_lock_release,
+};
+
+static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
{
struct nfs4_lockdata *data;
struct rpc_task *task;
@@ -4000,8 +4112,11 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
return -ENOMEM;
if (IS_SETLKW(cmd))
data->arg.block = 1;
- if (reclaim != 0)
- data->arg.reclaim = 1;
+ if (recovery_type > NFS_LOCK_NEW) {
+ if (recovery_type == NFS_LOCK_RECLAIM)
+ data->arg.reclaim = NFS_LOCK_RECLAIM;
+ task_setup_data.callback_ops = &nfs4_recover_lock_ops;
+ }
msg.rpc_argp = &data->arg,
msg.rpc_resp = &data->res,
task_setup_data.callback_data = data;
@@ -4028,7 +4143,7 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
/* Cache the lock if possible... */
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
- err = _nfs4_do_setlk(state, F_SETLK, request, 1);
+ err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
@@ -4048,11 +4163,17 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
do {
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
- err = _nfs4_do_setlk(state, F_SETLK, request, 0);
- if (err != -NFS4ERR_DELAY)
- break;
- nfs4_handle_exception(server, err, &exception);
+ err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
+ switch (err) {
+ default:
+ goto out;
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_DELAY:
+ nfs4_handle_exception(server, err, &exception);
+ err = 0;
+ }
} while (exception.retry);
+out:
return err;
}
@@ -4078,7 +4199,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
status = do_vfs_lock(request->fl_file, request);
goto out_unlock;
}
- status = _nfs4_do_setlk(state, cmd, request, 0);
+ status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
if (status != 0)
goto out_unlock;
/* Note: we always want to sleep here! */
@@ -4161,7 +4282,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
if (err != 0)
goto out;
do {
- err = _nfs4_do_setlk(state, F_SETLK, fl, 0);
+ err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
switch (err) {
default:
printk(KERN_ERR "%s: unhandled error %d.\n",
@@ -4172,6 +4293,11 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
case -NFS4ERR_EXPIRED:
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
nfs4_schedule_state_recovery(server->nfs_client);
goto out;
case -ERESTARTSYS:
@@ -4296,7 +4422,7 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
* be in some phase of session reset.
*/
-static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
{
nfs4_verifier verifier;
struct nfs41_exchange_id_args args = {
@@ -4318,6 +4444,9 @@ static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
dprintk("--> %s\n", __func__);
BUG_ON(clp == NULL);
+ /* Remove server-only flags */
+ args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
+
p = (u32 *)verifier.data;
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
@@ -4361,11 +4490,12 @@ static void nfs4_get_lease_time_prepare(struct rpc_task *task,
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
/* just setup sequence, do not trigger session recovery
since we're invoked within one */
ret = nfs41_setup_sequence(data->clp->cl_session,
- &data->args->la_seq_args,
- &data->res->lr_seq_res, 0, task);
+ &data->args->la_seq_args,
+ &data->res->lr_seq_res, 0, task);
BUG_ON(ret == -EAGAIN);
rpc_call_start(task);
@@ -4389,10 +4519,9 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
- nfs4_restart_rpc(task, data->clp);
+ nfs_restart_rpc(task, data->clp);
return;
}
- nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
dprintk("<-- %s\n", __func__);
}
@@ -4465,7 +4594,6 @@ static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots,
spin_lock(&tbl->slot_tbl_lock);
for (i = 0; i < max_slots; ++i)
tbl->slots[i].seq_nr = ivalue;
- tbl->highest_used_slotid = -1;
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
tbl, tbl->slots, tbl->max_slots);
@@ -4515,7 +4643,6 @@ static void nfs4_destroy_slot_tables(struct nfs4_session *session)
static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
int max_slots, int ivalue)
{
- int i;
struct nfs4_slot *slot;
int ret = -ENOMEM;
@@ -4526,18 +4653,9 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
if (!slot)
goto out;
- for (i = 0; i < max_slots; ++i)
- slot[i].seq_nr = ivalue;
ret = 0;
spin_lock(&tbl->slot_tbl_lock);
- if (tbl->slots != NULL) {
- spin_unlock(&tbl->slot_tbl_lock);
- dprintk("%s: slot table already initialized. tbl=%p slots=%p\n",
- __func__, tbl, tbl->slots);
- WARN_ON(1);
- goto out_free;
- }
tbl->max_slots = max_slots;
tbl->slots = slot;
tbl->highest_used_slotid = -1; /* no slot is currently used */
@@ -4547,10 +4665,6 @@ static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
out:
dprintk("<-- %s: return %d\n", __func__, ret);
return ret;
-
-out_free:
- kfree(slot);
- goto out;
}
/*
@@ -4558,17 +4672,24 @@ out_free:
*/
static int nfs4_init_slot_tables(struct nfs4_session *session)
{
- int status;
+ struct nfs4_slot_table *tbl;
+ int status = 0;
- status = nfs4_init_slot_table(&session->fc_slot_table,
- session->fc_attrs.max_reqs, 1);
- if (status)
- return status;
+ tbl = &session->fc_slot_table;
+ if (tbl->slots == NULL) {
+ status = nfs4_init_slot_table(tbl,
+ session->fc_attrs.max_reqs, 1);
+ if (status)
+ return status;
+ }
- status = nfs4_init_slot_table(&session->bc_slot_table,
- session->bc_attrs.max_reqs, 0);
- if (status)
- nfs4_destroy_slot_tables(session);
+ tbl = &session->bc_slot_table;
+ if (tbl->slots == NULL) {
+ status = nfs4_init_slot_table(tbl,
+ session->bc_attrs.max_reqs, 0);
+ if (status)
+ nfs4_destroy_slot_tables(session);
+ }
return status;
}
@@ -4582,7 +4703,6 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
if (!session)
return NULL;
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
/*
* The create session reply races with the server back
* channel probe. Mark the client NFS_CS_SESSION_INITING
@@ -4590,12 +4710,15 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
* nfs_client struct
*/
clp->cl_cons_state = NFS_CS_SESSION_INITING;
+ init_completion(&session->complete);
tbl = &session->fc_slot_table;
+ tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
- rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+ rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
tbl = &session->bc_slot_table;
+ tbl->highest_used_slotid = -1;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
@@ -4747,11 +4870,10 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
* It is the responsibility of the caller to verify the session is
* expired before calling this routine.
*/
-int nfs4_proc_create_session(struct nfs_client *clp, int reset)
+int nfs4_proc_create_session(struct nfs_client *clp)
{
int status;
unsigned *ptr;
- struct nfs_fsinfo fsinfo;
struct nfs4_session *session = clp->cl_session;
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
@@ -4760,35 +4882,19 @@ int nfs4_proc_create_session(struct nfs_client *clp, int reset)
if (status)
goto out;
- /* Init or reset the fore channel */
- if (reset)
- status = nfs4_reset_slot_tables(session);
- else
- status = nfs4_init_slot_tables(session);
- dprintk("fore channel slot table initialization returned %d\n", status);
+ /* Init and reset the fore channel */
+ status = nfs4_init_slot_tables(session);
+ dprintk("slot table initialization returned %d\n", status);
+ if (status)
+ goto out;
+ status = nfs4_reset_slot_tables(session);
+ dprintk("slot table reset returned %d\n", status);
if (status)
goto out;
ptr = (unsigned *)&session->sess_id.data[0];
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
-
- if (reset)
- /* Lease time is aleady set */
- goto out;
-
- /* Get the lease time */
- status = nfs4_proc_get_lease_time(clp, &fsinfo);
- if (status == 0) {
- /* Update lease time and schedule renewal */
- spin_lock(&clp->cl_lock);
- clp->cl_lease_time = fsinfo.lease_time * HZ;
- clp->cl_last_renewal = jiffies;
- clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- spin_unlock(&clp->cl_lock);
-
- nfs4_schedule_state_renewal(clp);
- }
out:
dprintk("<-- %s\n", __func__);
return status;
@@ -4827,13 +4933,24 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
int nfs4_init_session(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
+ struct nfs4_session *session;
+ unsigned int rsize, wsize;
int ret;
if (!nfs4_has_session(clp))
return 0;
- clp->cl_session->fc_attrs.max_rqst_sz = server->wsize;
- clp->cl_session->fc_attrs.max_resp_sz = server->rsize;
+ rsize = server->rsize;
+ if (rsize == 0)
+ rsize = NFS_MAX_FILE_IO_SIZE;
+ wsize = server->wsize;
+ if (wsize == 0)
+ wsize = NFS_MAX_FILE_IO_SIZE;
+
+ session = clp->cl_session;
+ session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
+ session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+
ret = nfs4_recover_expired_lease(server);
if (!ret)
ret = nfs4_check_client_ready(clp);
@@ -4858,7 +4975,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
args.sa_cache_this = 0;
return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args,
- &res, 0);
+ &res, args.sa_cache_this, 1);
}
void nfs41_sequence_call_done(struct rpc_task *task, void *data)
@@ -4872,11 +4989,10 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
if (_nfs4_async_handle_error(task, NULL, clp, NULL)
== -EAGAIN) {
- nfs4_restart_rpc(task, clp);
+ nfs_restart_rpc(task, clp);
return;
}
}
- nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
kfree(task->tk_msg.rpc_argp);
@@ -4931,6 +5047,110 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,
&nfs41_sequence_ops, (void *)clp);
}
+struct nfs4_reclaim_complete_data {
+ struct nfs_client *clp;
+ struct nfs41_reclaim_complete_args arg;
+ struct nfs41_reclaim_complete_res res;
+};
+
+static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ if (nfs4_setup_sequence(calldata->clp, &calldata->arg.seq_args,
+ &calldata->res.seq_res, 0, task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
+ struct nfs4_sequence_res *res = &calldata->res.seq_res;
+
+ dprintk("--> %s\n", __func__);
+ nfs41_sequence_done(clp, res, task->tk_status);
+ switch (task->tk_status) {
+ case 0:
+ case -NFS4ERR_COMPLETE_ALREADY:
+ break;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_DEADSESSION:
+ /*
+ * Handle the session error, but do not retry the operation, as
+ * we have no way of telling whether the clientid had to be
+ * reset before we got our reply. If reset, a new wave of
+ * reclaim operations will follow, containing their own reclaim
+ * complete. We don't want our retry to get on the way of
+ * recovery by incorrectly indicating to the server that we're
+ * done reclaiming state since the process had to be restarted.
+ */
+ _nfs4_async_handle_error(task, NULL, clp, NULL);
+ break;
+ default:
+ if (_nfs4_async_handle_error(
+ task, NULL, clp, NULL) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ }
+
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs4_free_reclaim_complete_data(void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+
+ kfree(calldata);
+}
+
+static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
+ .rpc_call_prepare = nfs4_reclaim_complete_prepare,
+ .rpc_call_done = nfs4_reclaim_complete_done,
+ .rpc_release = nfs4_free_reclaim_complete_data,
+};
+
+/*
+ * Issue a global reclaim complete.
+ */
+static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
+{
+ struct nfs4_reclaim_complete_data *calldata;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_reclaim_complete_call_ops,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status = -ENOMEM;
+
+ dprintk("--> %s\n", __func__);
+ calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
+ if (calldata == NULL)
+ goto out;
+ calldata->clp = clp;
+ calldata->arg.one_fs = 0;
+ calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
+
+ msg.rpc_argp = &calldata->arg;
+ msg.rpc_resp = &calldata->res;
+ task_setup_data.callback_data = calldata;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ status = PTR_ERR(task);
+ rpc_put_task(task);
+out:
+ dprintk("<-- %s status=%d\n", __func__, status);
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
@@ -4948,8 +5168,9 @@ struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
- .establish_clid = nfs4_proc_exchange_id,
+ .establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
+ .reclaim_complete = nfs41_proc_reclaim_complete,
};
#endif /* CONFIG_NFS_V4_1 */
@@ -4968,7 +5189,7 @@ struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
- .establish_clid = nfs4_proc_exchange_id,
+ .establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
};
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 2ef4fecf398..6d263ed79e9 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -116,6 +116,79 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
#if defined(CONFIG_NFS_V4_1)
+static int nfs41_setup_state_renewal(struct nfs_client *clp)
+{
+ int status;
+ struct nfs_fsinfo fsinfo;
+
+ status = nfs4_proc_get_lease_time(clp, &fsinfo);
+ if (status == 0) {
+ /* Update lease time and schedule renewal */
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = fsinfo.lease_time * HZ;
+ clp->cl_last_renewal = jiffies;
+ spin_unlock(&clp->cl_lock);
+
+ nfs4_schedule_state_renewal(clp);
+ }
+
+ return status;
+}
+
+static void nfs4_end_drain_session(struct nfs_client *clp)
+{
+ struct nfs4_session *ses = clp->cl_session;
+ int max_slots;
+
+ if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
+ spin_lock(&ses->fc_slot_table.slot_tbl_lock);
+ max_slots = ses->fc_slot_table.max_slots;
+ while (max_slots--) {
+ struct rpc_task *task;
+
+ task = rpc_wake_up_next(&ses->fc_slot_table.
+ slot_tbl_waitq);
+ if (!task)
+ break;
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ }
+ spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
+ }
+}
+
+static int nfs4_begin_drain_session(struct nfs_client *clp)
+{
+ struct nfs4_session *ses = clp->cl_session;
+ struct nfs4_slot_table *tbl = &ses->fc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
+ if (tbl->highest_used_slotid != -1) {
+ INIT_COMPLETION(ses->complete);
+ spin_unlock(&tbl->slot_tbl_lock);
+ return wait_for_completion_interruptible(&ses->complete);
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+ return 0;
+}
+
+int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ int status;
+
+ nfs4_begin_drain_session(clp);
+ status = nfs4_proc_exchange_id(clp, cred);
+ if (status != 0)
+ goto out;
+ status = nfs4_proc_create_session(clp);
+ if (status != 0)
+ goto out;
+ nfs41_setup_state_renewal(clp);
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+out:
+ return status;
+}
+
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
@@ -693,16 +766,21 @@ struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
return new;
}
-void nfs_free_seqid(struct nfs_seqid *seqid)
+void nfs_release_seqid(struct nfs_seqid *seqid)
{
if (!list_empty(&seqid->list)) {
struct rpc_sequence *sequence = seqid->sequence->sequence;
spin_lock(&sequence->lock);
- list_del(&seqid->list);
+ list_del_init(&seqid->list);
spin_unlock(&sequence->lock);
rpc_wake_up(&sequence->wait);
}
+}
+
+void nfs_free_seqid(struct nfs_seqid *seqid)
+{
+ nfs_release_seqid(seqid);
kfree(seqid);
}
@@ -877,6 +955,10 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
case -NFS4ERR_EXPIRED:
case -NFS4ERR_NO_GRACE:
case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
goto out;
default:
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
@@ -959,6 +1041,10 @@ restart:
case -NFS4ERR_NO_GRACE:
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
goto out_err;
}
nfs4_put_open_state(state);
@@ -1011,6 +1097,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
}
+static void nfs4_reclaim_complete(struct nfs_client *clp,
+ const struct nfs4_state_recovery_ops *ops)
+{
+ /* Notify the server we're done reclaiming our state */
+ if (ops->reclaim_complete)
+ (void)ops->reclaim_complete(clp);
+}
+
static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
@@ -1020,6 +1114,9 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
return;
+ nfs4_reclaim_complete(clp,
+ nfs4_reboot_recovery_ops[clp->cl_minorversion]);
+
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
spin_lock(&sp->so_lock);
@@ -1046,25 +1143,25 @@ static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
}
-static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp)
-{
- clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
-}
-
-static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
+static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
case -NFS4ERR_CB_PATH_DOWN:
nfs_handle_cb_pathdown(clp);
- break;
+ return 0;
+ case -NFS4ERR_NO_GRACE:
+ nfs4_state_end_reclaim_reboot(clp);
+ return 0;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_end_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_EXPIRED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
+ break;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
@@ -1072,8 +1169,11 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ /* Zero session reset errors */
+ return 0;
}
+ return error;
}
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1093,8 +1193,7 @@ restart:
if (status < 0) {
set_bit(ops->owner_flag_bit, &sp->so_flags);
nfs4_put_state_owner(sp);
- nfs4_recovery_handle_error(clp, status);
- return status;
+ return nfs4_recovery_handle_error(clp, status);
}
nfs4_put_state_owner(sp);
goto restart;
@@ -1124,8 +1223,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
status = ops->renew_lease(clp, cred);
put_rpccred(cred);
out:
- nfs4_recovery_handle_error(clp, status);
- return status;
+ return nfs4_recovery_handle_error(clp, status);
}
static int nfs4_reclaim_lease(struct nfs_client *clp)
@@ -1151,55 +1249,59 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
}
#ifdef CONFIG_NFS_V4_1
-static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
+void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{
- switch (err) {
- case -NFS4ERR_STALE_CLIENTID:
+ if (!flags)
+ return;
+ else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) {
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
- }
+ nfs4_state_start_reclaim_reboot(clp);
+ nfs4_schedule_state_recovery(clp);
+ } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+ SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
+ SEQ4_STATUS_ADMIN_STATE_REVOKED |
+ SEQ4_STATUS_RECALLABLE_STATE_REVOKED |
+ SEQ4_STATUS_LEASE_MOVED)) {
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_start_reclaim_nograce(clp);
+ nfs4_schedule_state_recovery(clp);
+ } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ SEQ4_STATUS_BACKCHANNEL_FAULT |
+ SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+ nfs_expire_all_delegations(clp);
}
static int nfs4_reset_session(struct nfs_client *clp)
{
int status;
+ nfs4_begin_drain_session(clp);
status = nfs4_proc_destroy_session(clp->cl_session);
if (status && status != -NFS4ERR_BADSESSION &&
status != -NFS4ERR_DEADSESSION) {
- nfs4_session_recovery_handle_error(clp, status);
+ status = nfs4_recovery_handle_error(clp, status);
goto out;
}
memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
- status = nfs4_proc_create_session(clp, 1);
+ status = nfs4_proc_create_session(clp);
if (status)
- nfs4_session_recovery_handle_error(clp, status);
- /* fall through*/
-out:
- /* Wake up the next rpc task even on error */
- rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
- return status;
-}
+ status = nfs4_recovery_handle_error(clp, status);
-static int nfs4_initialize_session(struct nfs_client *clp)
-{
- int status;
+out:
+ /*
+ * Let the state manager reestablish state
+ */
+ if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
+ status == 0)
+ nfs41_setup_state_renewal(clp);
- status = nfs4_proc_create_session(clp, 0);
- if (!status) {
- nfs_mark_client_ready(clp, NFS_CS_READY);
- } else if (status == -NFS4ERR_STALE_CLIENTID) {
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
- } else {
- nfs_mark_client_ready(clp, status);
- }
return status;
}
+
#else /* CONFIG_NFS_V4_1 */
static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
-static int nfs4_initialize_session(struct nfs_client *clp) { return 0; }
+static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
#endif /* CONFIG_NFS_V4_1 */
/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
@@ -1234,7 +1336,8 @@ static void nfs4_state_manager(struct nfs_client *clp)
status = nfs4_reclaim_lease(clp);
if (status) {
nfs4_set_lease_expired(clp, status);
- if (status == -EAGAIN)
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED,
+ &clp->cl_state))
continue;
if (clp->cl_cons_state ==
NFS_CS_SESSION_INITING)
@@ -1242,57 +1345,54 @@ static void nfs4_state_manager(struct nfs_client *clp)
goto out_error;
}
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
}
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
status = nfs4_check_lease(clp);
- if (status != 0)
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
continue;
+ if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
+ goto out_error;
}
+
/* Initialize or reset the session */
- if (test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)
+ if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
&& nfs4_has_session(clp)) {
- if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
- status = nfs4_initialize_session(clp);
- else
- status = nfs4_reset_session(clp);
- if (status) {
- if (status == -NFS4ERR_STALE_CLIENTID)
- continue;
+ status = nfs4_reset_session(clp);
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ continue;
+ if (status < 0)
goto out_error;
- }
}
+
/* First recover reboot state... */
- if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
+ if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
status = nfs4_do_reclaim(clp,
nfs4_reboot_recovery_ops[clp->cl_minorversion]);
- if (status == -NFS4ERR_STALE_CLIENTID)
- continue;
- if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+ test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
continue;
nfs4_state_end_reclaim_reboot(clp);
- continue;
+ if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
+ continue;
+ if (status < 0)
+ goto out_error;
}
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
status = nfs4_do_reclaim(clp,
nfs4_nograce_recovery_ops[clp->cl_minorversion]);
- if (status < 0) {
- set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
- if (status == -NFS4ERR_STALE_CLIENTID)
- continue;
- if (status == -NFS4ERR_EXPIRED)
- continue;
- if (test_bit(NFS4CLNT_SESSION_SETUP,
- &clp->cl_state))
- continue;
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+ test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
+ test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+ continue;
+ if (status < 0)
goto out_error;
- } else
- nfs4_state_end_reclaim_nograce(clp);
- continue;
}
+ nfs4_end_drain_session(clp);
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
nfs_client_return_marked_delegations(clp);
continue;
@@ -1309,8 +1409,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
out_error:
printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
" with error %d\n", clp->cl_hostname, -status);
- if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
- nfs4_state_end_reclaim_reboot(clp);
+ nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 20b4e30e6c8..e437fd6a819 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -46,11 +46,13 @@
#include <linux/proc_fs.h>
#include <linux/kdev_t.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/msg_prot.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include "nfs4_fs.h"
+#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -134,7 +136,7 @@ static int nfs4_stat_to_errno(int);
#define decode_lookup_maxsz (op_decode_hdr_maxsz)
#define encode_share_access_maxsz \
(2)
-#define encode_createmode_maxsz (1 + encode_attrs_maxsz)
+#define encode_createmode_maxsz (1 + encode_attrs_maxsz + encode_verifier_maxsz)
#define encode_opentype_maxsz (1 + encode_createmode_maxsz)
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
@@ -299,6 +301,8 @@ static int nfs4_stat_to_errno(int);
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
+#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4)
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
@@ -676,6 +680,25 @@ static int nfs4_stat_to_errno(int);
decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
+#define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_reclaim_complete_maxsz)
+#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_reclaim_complete_maxsz)
+
+const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+ compound_encode_hdr_maxsz +
+ encode_sequence_maxsz +
+ encode_putfh_maxsz +
+ encode_getattr_maxsz) *
+ XDR_UNIT);
+
+const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+ compound_decode_hdr_maxsz +
+ decode_sequence_maxsz +
+ decode_putfh_maxsz) *
+ XDR_UNIT);
#endif /* CONFIG_NFS_V4_1 */
static const umode_t nfs_type2fmt[] = {
@@ -1140,6 +1163,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
+ struct nfs_client *clp;
p = reserve_space(xdr, 4);
switch(arg->open_flags & O_EXCL) {
@@ -1148,8 +1172,23 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
encode_attrs(xdr, arg->u.attrs, arg->server);
break;
default:
- *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
- encode_nfs4_verifier(xdr, &arg->u.verifier);
+ clp = arg->server->nfs_client;
+ if (clp->cl_minorversion > 0) {
+ if (nfs4_has_persistent_session(clp)) {
+ *p = cpu_to_be32(NFS4_CREATE_GUARDED);
+ encode_attrs(xdr, arg->u.attrs, arg->server);
+ } else {
+ struct iattr dummy;
+
+ *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
+ encode_nfs4_verifier(xdr, &arg->u.verifier);
+ dummy.ia_valid = 0;
+ encode_attrs(xdr, &dummy, arg->server);
+ }
+ } else {
+ *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
+ encode_nfs4_verifier(xdr, &arg->u.verifier);
+ }
}
}
@@ -1592,6 +1631,19 @@ static void encode_destroy_session(struct xdr_stream *xdr,
hdr->nops++;
hdr->replen += decode_destroy_session_maxsz;
}
+
+static void encode_reclaim_complete(struct xdr_stream *xdr,
+ struct nfs41_reclaim_complete_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(OP_RECLAIM_COMPLETE);
+ *p++ = cpu_to_be32(args->one_fs);
+ hdr->nops++;
+ hdr->replen += decode_reclaim_complete_maxsz;
+}
#endif /* CONFIG_NFS_V4_1 */
static void encode_sequence(struct xdr_stream *xdr,
@@ -2096,7 +2148,7 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
encode_compound_hdr(&xdr, req, &hdr);
encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
- replen = hdr.replen + nfs4_fattr_bitmap_maxsz + 1;
+ replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
@@ -2420,6 +2472,26 @@ static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
encode_nops(&hdr);
return 0;
}
+
+/*
+ * a RECLAIM_COMPLETE request
+ */
+static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
+ struct nfs41_reclaim_complete_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args)
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_reclaim_complete(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
#endif /* CONFIG_NFS_V4_1 */
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -4528,6 +4600,11 @@ static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
{
return decode_op_hdr(xdr, OP_DESTROY_SESSION);
}
+
+static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
+}
#endif /* CONFIG_NFS_V4_1 */
static int decode_sequence(struct xdr_stream *xdr,
@@ -4583,8 +4660,8 @@ static int decode_sequence(struct xdr_stream *xdr,
dummy = be32_to_cpup(p++);
/* target highest slot id - currently not processed */
dummy = be32_to_cpup(p++);
- /* result flags - currently not processed */
- dummy = be32_to_cpup(p);
+ /* result flags */
+ res->sr_status_flags = be32_to_cpup(p);
status = 0;
out_err:
res->sr_status = status;
@@ -5309,7 +5386,7 @@ out:
}
/*
- * FSINFO request
+ * Decode FSINFO response
*/
static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
struct nfs4_fsinfo_res *res)
@@ -5330,7 +5407,7 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
}
/*
- * PATHCONF request
+ * Decode PATHCONF response
*/
static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
struct nfs4_pathconf_res *res)
@@ -5351,7 +5428,7 @@ static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
}
/*
- * STATFS request
+ * Decode STATFS response
*/
static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
struct nfs4_statfs_res *res)
@@ -5372,7 +5449,7 @@ static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
}
/*
- * GETATTR_BITMAP request
+ * Decode GETATTR_BITMAP response
*/
static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res)
{
@@ -5411,7 +5488,7 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
}
/*
- * a SETCLIENTID request
+ * Decode SETCLIENTID response
*/
static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
struct nfs_client *clp)
@@ -5428,7 +5505,7 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
}
/*
- * a SETCLIENTID_CONFIRM request
+ * Decode SETCLIENTID_CONFIRM response
*/
static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
{
@@ -5448,7 +5525,7 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
}
/*
- * DELEGRETURN request
+ * Decode DELEGRETURN response
*/
static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res)
{
@@ -5474,7 +5551,7 @@ out:
}
/*
- * FS_LOCATIONS request
+ * Decode FS_LOCATIONS response
*/
static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
struct nfs4_fs_locations_res *res)
@@ -5504,7 +5581,7 @@ out:
#if defined(CONFIG_NFS_V4_1)
/*
- * EXCHANGE_ID request
+ * Decode EXCHANGE_ID response
*/
static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
void *res)
@@ -5521,7 +5598,7 @@ static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
}
/*
- * a CREATE_SESSION request
+ * Decode CREATE_SESSION response
*/
static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
struct nfs41_create_session_res *res)
@@ -5538,7 +5615,7 @@ static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
}
/*
- * a DESTROY_SESSION request
+ * Decode DESTROY_SESSION response
*/
static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
void *dummy)
@@ -5555,7 +5632,7 @@ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
}
/*
- * a SEQUENCE request
+ * Decode SEQUENCE response
*/
static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
struct nfs4_sequence_res *res)
@@ -5572,7 +5649,7 @@ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
}
/*
- * a GET_LEASE_TIME request
+ * Decode GET_LEASE_TIME response
*/
static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
struct nfs4_get_lease_time_res *res)
@@ -5591,6 +5668,25 @@ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
status = decode_fsinfo(&xdr, res->lr_fsinfo);
return status;
}
+
+/*
+ * Decode RECLAIM_COMPLETE response
+ */
+static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs41_reclaim_complete_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (!status)
+ status = decode_reclaim_complete(&xdr, (void *)NULL);
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
__be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
@@ -5767,6 +5863,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
PROC(SEQUENCE, enc_sequence, dec_sequence),
PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+ PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 12c9e66d3f1..db9b360ae19 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -356,25 +356,19 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
- goto out;
+ return;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
- goto out;
+ return;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
- nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
- return;
-out:
- nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
- &data->res.seq_res);
- return;
-
+ nfs_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
}
/*
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 90be551b80c..ce907efc550 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -175,14 +175,16 @@ static const match_table_t nfs_mount_option_tokens = {
};
enum {
- Opt_xprt_udp, Opt_xprt_tcp, Opt_xprt_rdma,
+ Opt_xprt_udp, Opt_xprt_udp6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_rdma,
Opt_xprt_err
};
static const match_table_t nfs_xprt_protocol_tokens = {
{ Opt_xprt_udp, "udp" },
+ { Opt_xprt_udp6, "udp6" },
{ Opt_xprt_tcp, "tcp" },
+ { Opt_xprt_tcp6, "tcp6" },
{ Opt_xprt_rdma, "rdma" },
{ Opt_xprt_err, NULL }
@@ -492,6 +494,45 @@ static const char *nfs_pseudoflavour_to_name(rpc_authflavor_t flavour)
return sec_flavours[i].str;
}
+static void nfs_show_mountd_netid(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ struct sockaddr *sap = (struct sockaddr *) &nfss->mountd_address;
+
+ seq_printf(m, ",mountproto=");
+ switch (sap->sa_family) {
+ case AF_INET:
+ switch (nfss->mountd_protocol) {
+ case IPPROTO_UDP:
+ seq_printf(m, RPCBIND_NETID_UDP);
+ break;
+ case IPPROTO_TCP:
+ seq_printf(m, RPCBIND_NETID_TCP);
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+ break;
+ case AF_INET6:
+ switch (nfss->mountd_protocol) {
+ case IPPROTO_UDP:
+ seq_printf(m, RPCBIND_NETID_UDP6);
+ break;
+ case IPPROTO_TCP:
+ seq_printf(m, RPCBIND_NETID_TCP6);
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+}
+
static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
int showdefaults)
{
@@ -505,7 +546,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
}
case AF_INET6: {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
- seq_printf(m, ",mountaddr=%pI6", &sin6->sin6_addr);
+ seq_printf(m, ",mountaddr=%pI6c", &sin6->sin6_addr);
break;
}
default:
@@ -518,17 +559,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
if (nfss->mountd_port || showdefaults)
seq_printf(m, ",mountport=%u", nfss->mountd_port);
- switch (nfss->mountd_protocol) {
- case IPPROTO_UDP:
- seq_printf(m, ",mountproto=udp");
- break;
- case IPPROTO_TCP:
- seq_printf(m, ",mountproto=tcp");
- break;
- default:
- if (showdefaults)
- seq_printf(m, ",mountproto=auto");
- }
+ nfs_show_mountd_netid(m, nfss, showdefaults);
}
/*
@@ -578,7 +609,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
seq_puts(m, nfs_infop->nostr);
}
seq_printf(m, ",proto=%s",
- rpc_peeraddr2str(nfss->client, RPC_DISPLAY_PROTO));
+ rpc_peeraddr2str(nfss->client, RPC_DISPLAY_NETID));
if (version == 4) {
if (nfss->port != NFS_PORT)
seq_printf(m, ",port=%u", nfss->port);
@@ -714,8 +745,6 @@ static void nfs_umount_begin(struct super_block *sb)
struct nfs_server *server;
struct rpc_clnt *rpc;
- lock_kernel();
-
server = NFS_SB(sb);
/* -EIO all pending I/O */
rpc = server->client_acl;
@@ -724,8 +753,6 @@ static void nfs_umount_begin(struct super_block *sb)
rpc = server->client;
if (!IS_ERR(rpc))
rpc_killall_tasks(rpc);
-
- unlock_kernel();
}
static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
@@ -734,8 +761,6 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data) {
- data->rsize = NFS_MAX_FILE_IO_SIZE;
- data->wsize = NFS_MAX_FILE_IO_SIZE;
data->acregmin = NFS_DEF_ACREGMIN;
data->acregmax = NFS_DEF_ACREGMAX;
data->acdirmin = NFS_DEF_ACDIRMIN;
@@ -887,6 +912,8 @@ static int nfs_parse_mount_options(char *raw,
{
char *p, *string, *secdata;
int rc, sloppy = 0, invalid_option = 0;
+ unsigned short protofamily = AF_UNSPEC;
+ unsigned short mountfamily = AF_UNSPEC;
if (!raw) {
dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
@@ -1232,12 +1259,17 @@ static int nfs_parse_mount_options(char *raw,
token = match_token(string,
nfs_xprt_protocol_tokens, args);
+ protofamily = AF_INET;
switch (token) {
+ case Opt_xprt_udp6:
+ protofamily = AF_INET6;
case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
kfree(string);
break;
+ case Opt_xprt_tcp6:
+ protofamily = AF_INET6;
case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
@@ -1265,10 +1297,15 @@ static int nfs_parse_mount_options(char *raw,
nfs_xprt_protocol_tokens, args);
kfree(string);
+ mountfamily = AF_INET;
switch (token) {
+ case Opt_xprt_udp6:
+ mountfamily = AF_INET6;
case Opt_xprt_udp:
mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
break;
+ case Opt_xprt_tcp6:
+ mountfamily = AF_INET6;
case Opt_xprt_tcp:
mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
break;
@@ -1367,8 +1404,33 @@ static int nfs_parse_mount_options(char *raw,
if (!sloppy && invalid_option)
return 0;
+ /*
+ * verify that any proto=/mountproto= options match the address
+ * familiies in the addr=/mountaddr= options.
+ */
+ if (protofamily != AF_UNSPEC &&
+ protofamily != mnt->nfs_server.address.ss_family)
+ goto out_proto_mismatch;
+
+ if (mountfamily != AF_UNSPEC) {
+ if (mnt->mount_server.addrlen) {
+ if (mountfamily != mnt->mount_server.address.ss_family)
+ goto out_mountproto_mismatch;
+ } else {
+ if (mountfamily != mnt->nfs_server.address.ss_family)
+ goto out_mountproto_mismatch;
+ }
+ }
+
return 1;
+out_mountproto_mismatch:
+ printk(KERN_INFO "NFS: mount server address does not match mountproto= "
+ "option\n");
+ return 0;
+out_proto_mismatch:
+ printk(KERN_INFO "NFS: server address does not match proto= option\n");
+ return 0;
out_invalid_address:
printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
return 0;
@@ -1881,7 +1943,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
if (data == NULL)
return -ENOMEM;
- lock_kernel();
/* fill out struct with values from existing mount */
data->flags = nfss->flags;
data->rsize = nfss->rsize;
@@ -1907,7 +1968,6 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data)
error = nfs_compare_remount_data(nfss, data);
out:
kfree(data);
- unlock_kernel();
return error;
}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 1064c91ae81..6da3d3ff6ed 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -83,7 +83,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
- nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
+ nfs_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
}
/**
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c84b5cc1a94..d171696017f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -774,7 +774,7 @@ int nfs_updatepage(struct file *file, struct page *page,
*/
if (nfs_write_pageuptodate(page, inode) &&
inode->i_flock == NULL &&
- !(file->f_flags & O_SYNC)) {
+ !(file->f_flags & O_DSYNC)) {
count = max(count + offset, nfs_page_length(page));
offset = 0;
}
@@ -1216,7 +1216,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
argp->stable = NFS_FILE_SYNC;
}
- nfs4_restart_rpc(task, server->nfs_client);
+ nfs_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
@@ -1228,7 +1228,6 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
- nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
return 0;
}
@@ -1612,15 +1611,16 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
if (ret)
goto out_unlock;
page_cache_get(newpage);
+ spin_lock(&mapping->host->i_lock);
req->wb_page = newpage;
SetPagePrivate(newpage);
- set_page_private(newpage, page_private(page));
+ set_page_private(newpage, (unsigned long)req);
ClearPagePrivate(page);
set_page_private(page, 0);
+ spin_unlock(&mapping->host->i_lock);
page_cache_release(page);
out_unlock:
nfs_clear_page_tag_locked(req);
- nfs_release_request(req);
out:
return ret;
}
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index 8f9a20556f7..d3854d94b7c 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -7,8 +7,6 @@
#include <linux/types.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/syscall.h>
#include <linux/cred.h>
#include <linux/sched.h>
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 36fcabbf518..79717a40dab 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -1,15 +1,7 @@
-/*
- * linux/fs/nfsd/auth.c
- *
- * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
- */
+/* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */
-#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/sunrpc/svcauth.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/export.h>
+#include "nfsd.h"
#include "auth.h"
int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
diff --git a/include/linux/nfsd/cache.h b/fs/nfsd/cache.h
index 3a3f58934f5..d892be61016 100644
--- a/include/linux/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -1,6 +1,4 @@
/*
- * include/linux/nfsd/cache.h
- *
* Request reply cache. This was heavily inspired by the
* implementation in 4.3BSD/4.4BSD.
*
@@ -10,8 +8,7 @@
#ifndef NFSCACHE_H
#define NFSCACHE_H
-#include <linux/in.h>
-#include <linux/uio.h>
+#include <linux/sunrpc/svc.h>
/*
* Representation of a reply cache entry.
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c1c9e035d4a..c487810a236 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1,7 +1,5 @@
#define MSNFS /* HACK HACK */
/*
- * linux/fs/nfsd/export.c
- *
* NFS exporting and validation.
*
* We maintain a list of clients, each of which has a list of
@@ -14,29 +12,16 @@
* Copyright (C) 1995, 1996 Olaf Kirch, <okir@monad.swb.de>
*/
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/in.h>
-#include <linux/seq_file.h>
-#include <linux/syscalls.h>
-#include <linux/rwsem.h>
-#include <linux/dcache.h>
#include <linux/namei.h>
-#include <linux/mount.h>
-#include <linux/hash.h>
#include <linux/module.h>
#include <linux/exportfs.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/nfsfh.h>
#include <linux/nfsd/syscall.h>
-#include <linux/lockd/bind.h>
-#include <linux/sunrpc/msg_prot.h>
-#include <linux/sunrpc/gss_api.h>
#include <net/ipv6.h>
+#include "nfsd.h"
+#include "nfsfh.h"
+
#define NFSDDBG_FACILITY NFSDDBG_EXPORT
typedef struct auth_domain svc_client;
@@ -369,16 +354,25 @@ static struct svc_export *svc_export_update(struct svc_export *new,
struct svc_export *old);
static struct svc_export *svc_export_lookup(struct svc_export *);
-static int check_export(struct inode *inode, int flags, unsigned char *uuid)
+static int check_export(struct inode *inode, int *flags, unsigned char *uuid)
{
- /* We currently export only dirs and regular files.
- * This is what umountd does.
+ /*
+ * We currently export only dirs, regular files, and (for v4
+ * pseudoroot) symlinks.
*/
if (!S_ISDIR(inode->i_mode) &&
+ !S_ISLNK(inode->i_mode) &&
!S_ISREG(inode->i_mode))
return -ENOTDIR;
+ /*
+ * Mountd should never pass down a writeable V4ROOT export, but,
+ * just to make sure:
+ */
+ if (*flags & NFSEXP_V4ROOT)
+ *flags |= NFSEXP_READONLY;
+
/* There are two requirements on a filesystem to be exportable.
* 1: We must be able to identify the filesystem from a number.
* either a device number (so FS_REQUIRES_DEV needed)
@@ -387,7 +381,7 @@ static int check_export(struct inode *inode, int flags, unsigned char *uuid)
* This means that s_export_op must be set.
*/
if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) &&
- !(flags & NFSEXP_FSID) &&
+ !(*flags & NFSEXP_FSID) &&
uuid == NULL) {
dprintk("exp_export: export of non-dev fs without fsid\n");
return -EINVAL;
@@ -602,7 +596,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
goto out4;
}
- err = check_export(exp.ex_path.dentry->d_inode, exp.ex_flags,
+ err = check_export(exp.ex_path.dentry->d_inode, &exp.ex_flags,
exp.ex_uuid);
if (err)
goto out4;
@@ -1041,7 +1035,7 @@ exp_export(struct nfsctl_export *nxp)
goto finish;
}
- err = check_export(path.dentry->d_inode, nxp->ex_flags, NULL);
+ err = check_export(path.dentry->d_inode, &nxp->ex_flags, NULL);
if (err) goto finish;
err = -ENOMEM;
@@ -1320,6 +1314,23 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
return exp;
}
+static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
+{
+ struct svc_export *exp;
+ u32 fsidv[2];
+
+ mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
+
+ exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
+ /*
+ * We shouldn't have accepting an nfsv4 request at all if we
+ * don't have a pseudoexport!:
+ */
+ if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
+ exp = ERR_PTR(-ESERVERFAULT);
+ return exp;
+}
+
/*
* Called when we need the filehandle for the root of the pseudofs,
* for a given NFSv4 client. The root is defined to be the
@@ -1330,11 +1341,8 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
{
struct svc_export *exp;
__be32 rv;
- u32 fsidv[2];
- mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
-
- exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
+ exp = find_fsidzero_export(rqstp);
if (IS_ERR(exp))
return nfserrno(PTR_ERR(exp));
rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL);
@@ -1425,6 +1433,7 @@ static struct flags {
{ NFSEXP_CROSSMOUNT, {"crossmnt", ""}},
{ NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}},
{ NFSEXP_NOAUTHNLM, {"insecure_locks", ""}},
+ { NFSEXP_V4ROOT, {"v4root", ""}},
#ifdef MSNFS
{ NFSEXP_MSNFS, {"msnfs", ""}},
#endif
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index b2786a5f9af..0c6d8167013 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -1,6 +1,4 @@
/*
- * linux/fs/nfsd/lockd.c
- *
* This file contains all the stubs needed when communicating with lockd.
* This level of indirection is necessary so we can run nfsd+lockd without
* requiring the nfs client to be compiled in/loaded, and vice versa.
@@ -8,14 +6,10 @@
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
-#include <linux/types.h>
-#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/mount.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
#include <linux/lockd/bind.h>
+#include "nfsd.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_LOCKD
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 4e3219e8411..f20589d2ae2 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -1,19 +1,15 @@
/*
- * linux/fs/nfsd/nfs2acl.c
- *
* Process version 2 NFSACL requests.
*
* Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de>
*/
-#include <linux/sunrpc/svc.h>
-#include <linux/nfs.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
-#include <linux/nfsd/xdr3.h>
-#include <linux/posix_acl.h>
+#include "nfsd.h"
+/* FIXME: nfsacl.h is a broken header */
#include <linux/nfsacl.h>
+#include "cache.h"
+#include "xdr3.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
#define RETURN_STATUS(st) { resp->status = (st); return (st); }
@@ -217,6 +213,16 @@ static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p,
* XDR encode functions
*/
+/*
+ * There must be an encoding function for void results so svc_process
+ * will work properly.
+ */
+int
+nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+{
+ return xdr_ressize_check(rqstp, p);
+}
+
/* GETACL */
static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p,
struct nfsd3_getaclres *resp)
@@ -308,7 +314,6 @@ static int nfsaclsvc_release_access(struct svc_rqst *rqstp, __be32 *p,
}
#define nfsaclsvc_decode_voidargs NULL
-#define nfsaclsvc_encode_voidres NULL
#define nfsaclsvc_release_void NULL
#define nfsd3_fhandleargs nfsd_fhandle
#define nfsd3_attrstatres nfsd_attrstat
@@ -346,5 +351,5 @@ struct svc_version nfsd_acl_version2 = {
.vs_proc = nfsd_acl_procedures2,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS3_SVC_XDRSIZE,
- .vs_hidden = 1,
+ .vs_hidden = 0,
};
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 9981dbb377a..e0c4846bad9 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -1,18 +1,15 @@
/*
- * linux/fs/nfsd/nfs3acl.c
- *
* Process version 3 NFSACL requests.
*
* Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de>
*/
-#include <linux/sunrpc/svc.h>
-#include <linux/nfs3.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr3.h>
-#include <linux/posix_acl.h>
+#include "nfsd.h"
+/* FIXME: nfsacl.h is a broken header */
#include <linux/nfsacl.h>
+#include "cache.h"
+#include "xdr3.h"
+#include "vfs.h"
#define RETURN_STATUS(st) { resp->status = (st); return (st); }
@@ -264,6 +261,6 @@ struct svc_version nfsd_acl_version3 = {
.vs_proc = nfsd_acl_procedures3,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS3_SVC_XDRSIZE,
- .vs_hidden = 1,
+ .vs_hidden = 0,
};
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index a713c418a92..3d68f45a37b 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -1,30 +1,16 @@
/*
- * linux/fs/nfsd/nfs3proc.c
- *
* Process version 3 NFS requests.
*
* Copyright (C) 1996, 1997, 1998 Olaf Kirch <okir@monad.swb.de>
*/
-#include <linux/linkage.h>
-#include <linux/time.h>
-#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/ext2_fs.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
-#include <linux/net.h>
-#include <linux/in.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/major.h>
#include <linux/magic.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr3.h>
-#include <linux/nfs3.h>
+#include "cache.h"
+#include "xdr3.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index d0a2ce1b432..2a533a0af2a 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -1,6 +1,4 @@
/*
- * linux/fs/nfsd/nfs3xdr.c
- *
* XDR support for nfsd/protocol version 3.
*
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
@@ -8,19 +6,8 @@
* 2003-08-09 Jamie Lokier: Use htonl() for nanoseconds, not htons()!
*/
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/nfs3.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/dcache.h>
#include <linux/namei.h>
-#include <linux/mm.h>
-#include <linux/vfs.h>
-#include <linux/sunrpc/xdr.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/xdr3.h>
+#include "xdr3.h"
#include "auth.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 725d02f210e..88150685df3 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -1,6 +1,4 @@
/*
- * fs/nfs4acl/acl.c
- *
* Common NFSv4 ACL handling code.
*
* Copyright (c) 2002, 2003 The Regents of the University of Michigan.
@@ -36,15 +34,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/module.h>
#include <linux/nfs_fs.h>
-#include <linux/posix_acl.h>
-#include <linux/nfs4.h>
#include <linux/nfs4_acl.h>
@@ -389,7 +379,7 @@ sort_pacl(struct posix_acl *pacl)
sort_pacl_range(pacl, 1, i-1);
BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ);
- j = i++;
+ j = ++i;
while (pacl->a_entries[j].e_tag == ACL_GROUP)
j++;
sort_pacl_range(pacl, i, j-1);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 24e8d78f8dd..c6eed2a3b09 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1,6 +1,4 @@
/*
- * linux/fs/nfsd/nfs4callback.c
- *
* Copyright (c) 2001 The Regents of the University of Michigan.
* All rights reserved.
*
@@ -33,22 +31,9 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/inet.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/sunrpc/xdr.h>
-#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/svcsock.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/state.h>
-#include <linux/sunrpc/sched.h>
-#include <linux/nfs4.h>
-#include <linux/sunrpc/xprtsock.h>
+#include "nfsd.h"
+#include "state.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index ba2c199592f..6e2983b27f3 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -1,6 +1,4 @@
/*
- * fs/nfsd/nfs4idmap.c
- *
* Mapping of UID/GIDs to name and vice versa.
*
* Copyright (c) 2002, 2003 The Regents of the University of
@@ -35,22 +33,9 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/nfs.h>
-#include <linux/nfs4.h>
-#include <linux/nfs_fs.h>
-#include <linux/nfs_page.h>
-#include <linux/sunrpc/cache.h>
#include <linux/nfsd_idmap.h>
-#include <linux/list.h>
-#include <linux/time.h>
#include <linux/seq_file.h>
-#include <linux/sunrpc/svcauth.h>
+#include <linux/sched.h>
/*
* Cache entry
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index bebc0c2e1b0..37514c46984 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1,6 +1,4 @@
/*
- * fs/nfsd/nfs4proc.c
- *
* Server-side procedures for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
@@ -34,20 +32,11 @@
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
-#include <linux/param.h>
-#include <linux/major.h>
-#include <linux/slab.h>
#include <linux/file.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfs4.h>
-#include <linux/nfsd/state.h>
-#include <linux/nfsd/xdr4.h>
-#include <linux/nfs4_acl.h>
-#include <linux/sunrpc/gss_api.h>
+#include "cache.h"
+#include "xdr4.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -170,7 +159,7 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
accmode |= NFSD_MAY_READ;
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
accmode |= (NFSD_MAY_WRITE | NFSD_MAY_TRUNC);
- if (open->op_share_deny & NFS4_SHARE_DENY_WRITE)
+ if (open->op_share_deny & NFS4_SHARE_DENY_READ)
accmode |= NFSD_MAY_WRITE;
status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index b5348405046..5a754f7b71e 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -1,6 +1,4 @@
/*
-* linux/fs/nfsd/nfs4recover.c
-*
* Copyright (c) 2004 The Regents of the University of Michigan.
* All rights reserved.
*
@@ -33,20 +31,14 @@
*
*/
-#include <linux/err.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfs4.h>
-#include <linux/nfsd/state.h>
-#include <linux/nfsd/xdr4.h>
-#include <linux/param.h>
#include <linux/file.h>
#include <linux/namei.h>
-#include <asm/uaccess.h>
-#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/sched.h>
-#include <linux/mount.h>
+
+#include "nfsd.h"
+#include "state.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2153f9bdbeb..f19ed866c95 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1,6 +1,4 @@
/*
-* linux/fs/nfsd/nfs4state.c
-*
* Copyright (c) 2001 The Regents of the University of Michigan.
* All rights reserved.
*
@@ -34,28 +32,14 @@
*
*/
-#include <linux/param.h>
-#include <linux/major.h>
-#include <linux/slab.h>
-
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
#include <linux/file.h>
-#include <linux/mount.h>
-#include <linux/workqueue.h>
#include <linux/smp_lock.h>
-#include <linux/kthread.h>
-#include <linux/nfs4.h>
-#include <linux/nfsd/state.h>
-#include <linux/nfsd/xdr4.h>
#include <linux/namei.h>
#include <linux/swap.h>
-#include <linux/mutex.h>
-#include <linux/lockd/bind.h>
-#include <linux/module.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/clnt.h>
+#include "xdr4.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -477,13 +461,14 @@ static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan)
/*
* fchan holds the client values on input, and the server values on output
+ * sv_max_mesg is the maximum payload plus one page for overhead.
*/
static int init_forechannel_attrs(struct svc_rqst *rqstp,
struct nfsd4_channel_attrs *session_fchan,
struct nfsd4_channel_attrs *fchan)
{
int status = 0;
- __u32 maxcount = svc_max_payload(rqstp);
+ __u32 maxcount = nfsd_serv->sv_max_mesg;
/* headerpadsz set to zero in encode routine */
@@ -523,6 +508,15 @@ free_session_slots(struct nfsd4_session *ses)
kfree(ses->se_slots[i]);
}
+/*
+ * We don't actually need to cache the rpc and session headers, so we
+ * can allocate a little less for each slot:
+ */
+static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
+{
+ return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
+}
+
static int
alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
struct nfsd4_create_session *cses)
@@ -554,7 +548,7 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
memcpy(new, &tmp, sizeof(*new));
/* allocate each struct nfsd4_slot and data cache in one piece */
- cachesize = new->se_fchannel.maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
+ cachesize = slot_bytes(&new->se_fchannel);
for (i = 0; i < new->se_fchannel.maxreqs; i++) {
sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL);
if (!sp)
@@ -628,10 +622,12 @@ void
free_session(struct kref *kref)
{
struct nfsd4_session *ses;
+ int mem;
ses = container_of(kref, struct nfsd4_session, se_ref);
spin_lock(&nfsd_drc_lock);
- nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE;
+ mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
+ nfsd_drc_mem_used -= mem;
spin_unlock(&nfsd_drc_lock);
free_session_slots(ses);
kfree(ses);
@@ -2404,11 +2400,8 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
- dprintk("NFSD: delegation stateid=(%08x/%08x/%08x/%08x)\n\n",
- dp->dl_stateid.si_boot,
- dp->dl_stateid.si_stateownerid,
- dp->dl_stateid.si_fileid,
- dp->dl_stateid.si_generation);
+ dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
+ STATEID_VAL(&dp->dl_stateid));
out:
if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
&& flag == NFS4_OPEN_DELEGATE_NONE
@@ -2498,9 +2491,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs_ok;
- dprintk("nfs4_process_open2: stateid=(%08x/%08x/%08x/%08x)\n",
- stp->st_stateid.si_boot, stp->st_stateid.si_stateownerid,
- stp->st_stateid.si_fileid, stp->st_stateid.si_generation);
+ dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
+ STATEID_VAL(&stp->st_stateid));
out:
if (fp)
put_nfs4_file(fp);
@@ -2666,9 +2658,8 @@ STALE_STATEID(stateid_t *stateid)
{
if (time_after((unsigned long)boot_time,
(unsigned long)stateid->si_boot)) {
- dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
- stateid->si_boot, stateid->si_stateownerid,
- stateid->si_fileid, stateid->si_generation);
+ dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
+ STATEID_VAL(stateid));
return 1;
}
return 0;
@@ -2680,9 +2671,8 @@ EXPIRED_STATEID(stateid_t *stateid)
if (time_before((unsigned long)boot_time,
((unsigned long)stateid->si_boot)) &&
time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) {
- dprintk("NFSD: expired stateid (%08x/%08x/%08x/%08x)!\n",
- stateid->si_boot, stateid->si_stateownerid,
- stateid->si_fileid, stateid->si_generation);
+ dprintk("NFSD: expired stateid " STATEID_FMT "!\n",
+ STATEID_VAL(stateid));
return 1;
}
return 0;
@@ -2696,9 +2686,8 @@ stateid_error_map(stateid_t *stateid)
if (EXPIRED_STATEID(stateid))
return nfserr_expired;
- dprintk("NFSD: bad stateid (%08x/%08x/%08x/%08x)!\n",
- stateid->si_boot, stateid->si_stateownerid,
- stateid->si_fileid, stateid->si_generation);
+ dprintk("NFSD: bad stateid " STATEID_FMT "!\n",
+ STATEID_VAL(stateid));
return nfserr_bad_stateid;
}
@@ -2884,10 +2873,8 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
struct svc_fh *current_fh = &cstate->current_fh;
__be32 status;
- dprintk("NFSD: preprocess_seqid_op: seqid=%d "
- "stateid = (%08x/%08x/%08x/%08x)\n", seqid,
- stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
- stateid->si_generation);
+ dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
+ seqid, STATEID_VAL(stateid));
*stpp = NULL;
*sopp = NULL;
@@ -3019,12 +3006,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
sop->so_confirmed = 1;
update_stateid(&stp->st_stateid);
memcpy(&oc->oc_resp_stateid, &stp->st_stateid, sizeof(stateid_t));
- dprintk("NFSD: nfsd4_open_confirm: success, seqid=%d "
- "stateid=(%08x/%08x/%08x/%08x)\n", oc->oc_seqid,
- stp->st_stateid.si_boot,
- stp->st_stateid.si_stateownerid,
- stp->st_stateid.si_fileid,
- stp->st_stateid.si_generation);
+ dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
+ __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stateid));
nfsd4_create_clid_dir(sop->so_client);
out:
@@ -3283,9 +3266,8 @@ find_delegation_stateid(struct inode *ino, stateid_t *stid)
struct nfs4_file *fp;
struct nfs4_delegation *dl;
- dprintk("NFSD:find_delegation_stateid stateid=(%08x/%08x/%08x/%08x)\n",
- stid->si_boot, stid->si_stateownerid,
- stid->si_fileid, stid->si_generation);
+ dprintk("NFSD: %s: stateid=" STATEID_FMT "\n", __func__,
+ STATEID_VAL(stid));
fp = find_file(ino);
if (!fp)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0fbd50cee1f..a8587e90fd5 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -40,24 +40,16 @@
* at the end of nfs4svc_decode_compoundargs.
*/
-#include <linux/param.h>
-#include <linux/smp.h>
-#include <linux/fs.h>
#include <linux/namei.h>
-#include <linux/vfs.h>
+#include <linux/statfs.h>
#include <linux/utsname.h>
-#include <linux/sunrpc/xdr.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/state.h>
-#include <linux/nfsd/xdr4.h>
#include <linux/nfsd_idmap.h>
-#include <linux/nfs4.h>
#include <linux/nfs4_acl.h>
-#include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/svcauth_gss.h>
+#include "xdr4.h"
+#include "vfs.h"
+
#define NFSDDBG_FACILITY NFSDDBG_XDR
/*
@@ -2204,11 +2196,14 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
* we will not follow the cross mount and will fill the attribtutes
* directly from the mountpoint dentry.
*/
- if (d_mountpoint(dentry) && !attributes_need_mount(cd->rd_bmval))
- ignore_crossmnt = 1;
- else if (d_mountpoint(dentry)) {
+ if (nfsd_mountpoint(dentry, exp)) {
int err;
+ if (!(exp->ex_flags & NFSEXP_V4ROOT)
+ && !attributes_need_mount(cd->rd_bmval)) {
+ ignore_crossmnt = 1;
+ goto out_encode;
+ }
/*
* Why the heck aren't we just using nfsd_lookup??
* Different "."/".." handling? Something else?
@@ -2224,6 +2219,7 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
goto out_put;
}
+out_encode:
nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval,
cd->rd_rqstp, ignore_crossmnt);
out_put:
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 4638635c5d8..da08560c481 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -1,6 +1,4 @@
/*
- * linux/fs/nfsd/nfscache.c
- *
* Request reply cache. This is currently a global cache, but this may
* change in the future and be a per-client cache.
*
@@ -10,16 +8,8 @@
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
+#include "nfsd.h"
+#include "cache.h"
/* Size of reply cache. Common values are:
* 4.3BSD: 128
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 5c01fc148ce..2604c3e70ea 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1,46 +1,20 @@
/*
- * linux/fs/nfsd/nfsctl.c
- *
* Syscall interface to knfsd.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
-#include <linux/module.h>
-
-#include <linux/linkage.h>
-#include <linux/time.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
#include <linux/namei.h>
-#include <linux/fcntl.h>
-#include <linux/net.h>
-#include <linux/in.h>
-#include <linux/syscalls.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/inet.h>
-#include <linux/string.h>
#include <linux/ctype.h>
-#include <linux/nfs.h>
#include <linux/nfsd_idmap.h>
-#include <linux/lockd/bind.h>
-#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
#include <linux/lockd/lockd.h>
#include <linux/sunrpc/clnt.h>
-#include <asm/uaccess.h>
-#include <net/ipv6.h>
+#include "nfsd.h"
+#include "cache.h"
/*
* We have a single directory with 9 nodes in it.
@@ -55,6 +29,7 @@ enum {
NFSD_Getfd,
NFSD_Getfs,
NFSD_List,
+ NFSD_Export_features,
NFSD_Fh,
NFSD_FO_UnlockIP,
NFSD_FO_UnlockFS,
@@ -173,6 +148,24 @@ static const struct file_operations exports_operations = {
.owner = THIS_MODULE,
};
+static int export_features_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "0x%x 0x%x\n", NFSEXP_ALLFLAGS, NFSEXP_SECINFO_FLAGS);
+ return 0;
+}
+
+static int export_features_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, export_features_show, NULL);
+}
+
+static struct file_operations export_features_operations = {
+ .open = export_features_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1330,6 +1323,8 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_List] = {"exports", &exports_operations, S_IRUGO},
+ [NFSD_Export_features] = {"export_features",
+ &export_features_operations, S_IRUGO},
[NFSD_FO_UnlockIP] = {"unlock_ip",
&transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_FO_UnlockFS] = {"unlock_filesystem",
diff --git a/include/linux/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 510ffdd5020..e942a1aaac9 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/nfsd/nfsd.h
- *
* Hodge-podge collection of knfsd-related stuff.
* I will sort this out later.
*
@@ -11,13 +9,9 @@
#define LINUX_NFSD_NFSD_H
#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/fs.h>
-#include <linux/posix_acl.h>
#include <linux/mount.h>
#include <linux/nfsd/debug.h>
-#include <linux/nfsd/nfsfh.h>
#include <linux/nfsd/export.h>
#include <linux/nfsd/stats.h>
/*
@@ -25,30 +19,10 @@
*/
#define NFSD_SUPPORTED_MINOR_VERSION 1
-/*
- * Flags for nfsd_permission
- */
-#define NFSD_MAY_NOP 0
-#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
-#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
-#define NFSD_MAY_READ 4 /* == MAY_READ */
-#define NFSD_MAY_SATTR 8
-#define NFSD_MAY_TRUNC 16
-#define NFSD_MAY_LOCK 32
-#define NFSD_MAY_OWNER_OVERRIDE 64
-#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
-#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
-
-#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
-#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
-
-/*
- * Callback function for readdir
- */
struct readdir_cd {
__be32 err; /* 0, nfserr, or nfserr_eof */
};
-typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
+
extern struct svc_program nfsd_program;
extern struct svc_version nfsd_version2, nfsd_version3,
@@ -73,69 +47,6 @@ int nfsd_nrpools(void);
int nfsd_get_nrthreads(int n, int *);
int nfsd_set_nrthreads(int n, int *);
-/* nfsd/vfs.c */
-int fh_lock_parent(struct svc_fh *, struct dentry *);
-int nfsd_racache_init(int);
-void nfsd_racache_shutdown(void);
-int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
- struct svc_export **expp);
-__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
- const char *, unsigned int, struct svc_fh *);
-__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
- const char *, unsigned int,
- struct svc_export **, struct dentry **);
-__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
- struct iattr *, int, time_t);
-#ifdef CONFIG_NFSD_V4
-__be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
- struct nfs4_acl *);
-int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
-#endif /* CONFIG_NFSD_V4 */
-__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
- char *name, int len, struct iattr *attrs,
- int type, dev_t rdev, struct svc_fh *res);
-#ifdef CONFIG_NFSD_V3
-__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
-__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *,
- char *name, int len, struct iattr *attrs,
- struct svc_fh *res, int createmode,
- u32 *verifier, int *truncp, int *created);
-__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
- loff_t, unsigned long);
-#endif /* CONFIG_NFSD_V3 */
-__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int,
- int, struct file **);
-void nfsd_close(struct file *);
-__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
- loff_t, struct kvec *, int, unsigned long *);
-__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
- loff_t, struct kvec *,int, unsigned long *, int *);
-__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
- char *, int *);
-__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
- char *name, int len, char *path, int plen,
- struct svc_fh *res, struct iattr *);
-__be32 nfsd_link(struct svc_rqst *, struct svc_fh *,
- char *, int, struct svc_fh *);
-__be32 nfsd_rename(struct svc_rqst *,
- struct svc_fh *, char *, int,
- struct svc_fh *, char *, int);
-__be32 nfsd_remove(struct svc_rqst *,
- struct svc_fh *, char *, int);
-__be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
- char *name, int len);
-int nfsd_truncate(struct svc_rqst *, struct svc_fh *,
- unsigned long size);
-__be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *,
- loff_t *, struct readdir_cd *, filldir_t);
-__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
- struct kstatfs *, int access);
-
-int nfsd_notify_change(struct inode *, struct iattr *);
-__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
- struct dentry *, int);
-int nfsd_sync_dir(struct dentry *dp);
-
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
#ifdef CONFIG_NFSD_V2_ACL
extern struct svc_version nfsd_acl_version2;
@@ -147,8 +58,6 @@ extern struct svc_version nfsd_acl_version3;
#else
#define nfsd_acl_version3 NULL
#endif
-struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
-int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
#endif
enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
@@ -159,6 +68,11 @@ int nfsd_create_serv(void);
extern int nfsd_max_blksize;
+static inline int nfsd_v4client(struct svc_rqst *rq)
+{
+ return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
+}
+
/*
* NFSv4 State
*/
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 01965b2f3a7..55c8e63af0b 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -1,6 +1,4 @@
/*
- * linux/fs/nfsd/nfsfh.c
- *
* NFS server file handle treatment.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
@@ -9,19 +7,11 @@
* ... and again Southern-Winter 2001 to support export_operations
*/
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/unistd.h>
-#include <linux/string.h>
-#include <linux/stat.h>
-#include <linux/dcache.h>
#include <linux/exportfs.h>
-#include <linux/mount.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcauth_gss.h>
-#include <linux/nfsd/nfsd.h>
+#include "nfsd.h"
+#include "vfs.h"
#include "auth.h"
#define NFSDDBG_FACILITY NFSDDBG_FH
@@ -96,8 +86,10 @@ nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, int type)
static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
struct svc_export *exp)
{
+ int flags = nfsexp_flags(rqstp, exp);
+
/* Check if the request originated from a secure port. */
- if (!rqstp->rq_secure && EX_SECURE(exp)) {
+ if (!rqstp->rq_secure && !(flags & NFSEXP_INSECURE_PORT)) {
RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
dprintk(KERN_WARNING
"nfsd: request from insecure port %s!\n",
@@ -109,6 +101,36 @@ static __be32 nfsd_setuser_and_check_port(struct svc_rqst *rqstp,
return nfserrno(nfsd_setuser(rqstp, exp));
}
+static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
+ struct dentry *dentry, struct svc_export *exp)
+{
+ if (!(exp->ex_flags & NFSEXP_V4ROOT))
+ return nfs_ok;
+ /*
+ * v2/v3 clients have no need for the V4ROOT export--they use
+ * the mount protocl instead; also, further V4ROOT checks may be
+ * in v4-specific code, in which case v2/v3 clients could bypass
+ * them.
+ */
+ if (!nfsd_v4client(rqstp))
+ return nfserr_stale;
+ /*
+ * We're exposing only the directories and symlinks that have to be
+ * traversed on the way to real exports:
+ */
+ if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) &&
+ !S_ISLNK(dentry->d_inode->i_mode)))
+ return nfserr_stale;
+ /*
+ * A pseudoroot export gives permission to access only one
+ * single directory; the kernel has to make another upcall
+ * before granting access to anything else under it:
+ */
+ if (unlikely(dentry != exp->ex_path.dentry))
+ return nfserr_stale;
+ return nfs_ok;
+}
+
/*
* Use the given filehandle to look up the corresponding export and
* dentry. On success, the results are used to set fh_export and
@@ -232,14 +254,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
goto out;
}
- if (exp->ex_flags & NFSEXP_NOSUBTREECHECK) {
- error = nfsd_setuser_and_check_port(rqstp, exp);
- if (error) {
- dput(dentry);
- goto out;
- }
- }
-
if (S_ISDIR(dentry->d_inode->i_mode) &&
(dentry->d_flags & DCACHE_DISCONNECTED)) {
printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
@@ -294,28 +308,32 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
error = nfsd_set_fh_dentry(rqstp, fhp);
if (error)
goto out;
- dentry = fhp->fh_dentry;
- exp = fhp->fh_export;
- } else {
- /*
- * just rechecking permissions
- * (e.g. nfsproc_create calls fh_verify, then nfsd_create
- * does as well)
- */
- dprintk("nfsd: fh_verify - just checking\n");
- dentry = fhp->fh_dentry;
- exp = fhp->fh_export;
- /*
- * Set user creds for this exportpoint; necessary even
- * in the "just checking" case because this may be a
- * filehandle that was created by fh_compose, and that
- * is about to be used in another nfsv4 compound
- * operation.
- */
- error = nfsd_setuser_and_check_port(rqstp, exp);
- if (error)
- goto out;
}
+ dentry = fhp->fh_dentry;
+ exp = fhp->fh_export;
+ /*
+ * We still have to do all these permission checks, even when
+ * fh_dentry is already set:
+ * - fh_verify may be called multiple times with different
+ * "access" arguments (e.g. nfsd_proc_create calls
+ * fh_verify(...,NFSD_MAY_EXEC) first, then later (in
+ * nfsd_create) calls fh_verify(...,NFSD_MAY_CREATE).
+ * - in the NFSv4 case, the filehandle may have been filled
+ * in by fh_compose, and given a dentry, but further
+ * compound operations performed with that filehandle
+ * still need permissions checks. In the worst case, a
+ * mountpoint crossing may have changed the export
+ * options, and we may now need to use a different uid
+ * (for example, if different id-squashing options are in
+ * effect on the new filesystem).
+ */
+ error = check_pseudo_root(rqstp, dentry, exp);
+ if (error)
+ goto out;
+
+ error = nfsd_setuser_and_check_port(rqstp, exp);
+ if (error)
+ goto out;
error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
if (error)
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
new file mode 100644
index 00000000000..cdfb8c6a420
--- /dev/null
+++ b/fs/nfsd/nfsfh.h
@@ -0,0 +1,208 @@
+/* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */
+
+#ifndef _LINUX_NFSD_FH_INT_H
+#define _LINUX_NFSD_FH_INT_H
+
+#include <linux/nfsd/nfsfh.h>
+
+enum nfsd_fsid {
+ FSID_DEV = 0,
+ FSID_NUM,
+ FSID_MAJOR_MINOR,
+ FSID_ENCODE_DEV,
+ FSID_UUID4_INUM,
+ FSID_UUID8,
+ FSID_UUID16,
+ FSID_UUID16_INUM,
+};
+
+enum fsid_source {
+ FSIDSOURCE_DEV,
+ FSIDSOURCE_FSID,
+ FSIDSOURCE_UUID,
+};
+extern enum fsid_source fsid_source(struct svc_fh *fhp);
+
+
+/* This might look a little large to "inline" but in all calls except
+ * one, 'vers' is constant so moste of the function disappears.
+ */
+static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino,
+ u32 fsid, unsigned char *uuid)
+{
+ u32 *up;
+ switch(vers) {
+ case FSID_DEV:
+ fsidv[0] = htonl((MAJOR(dev)<<16) |
+ MINOR(dev));
+ fsidv[1] = ino_t_to_u32(ino);
+ break;
+ case FSID_NUM:
+ fsidv[0] = fsid;
+ break;
+ case FSID_MAJOR_MINOR:
+ fsidv[0] = htonl(MAJOR(dev));
+ fsidv[1] = htonl(MINOR(dev));
+ fsidv[2] = ino_t_to_u32(ino);
+ break;
+
+ case FSID_ENCODE_DEV:
+ fsidv[0] = new_encode_dev(dev);
+ fsidv[1] = ino_t_to_u32(ino);
+ break;
+
+ case FSID_UUID4_INUM:
+ /* 4 byte fsid and inode number */
+ up = (u32*)uuid;
+ fsidv[0] = ino_t_to_u32(ino);
+ fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3];
+ break;
+
+ case FSID_UUID8:
+ /* 8 byte fsid */
+ up = (u32*)uuid;
+ fsidv[0] = up[0] ^ up[2];
+ fsidv[1] = up[1] ^ up[3];
+ break;
+
+ case FSID_UUID16:
+ /* 16 byte fsid - NFSv3+ only */
+ memcpy(fsidv, uuid, 16);
+ break;
+
+ case FSID_UUID16_INUM:
+ /* 8 byte inode and 16 byte fsid */
+ *(u64*)fsidv = (u64)ino;
+ memcpy(fsidv+2, uuid, 16);
+ break;
+ default: BUG();
+ }
+}
+
+static inline int key_len(int type)
+{
+ switch(type) {
+ case FSID_DEV: return 8;
+ case FSID_NUM: return 4;
+ case FSID_MAJOR_MINOR: return 12;
+ case FSID_ENCODE_DEV: return 8;
+ case FSID_UUID4_INUM: return 8;
+ case FSID_UUID8: return 8;
+ case FSID_UUID16: return 16;
+ case FSID_UUID16_INUM: return 24;
+ default: return 0;
+ }
+}
+
+/*
+ * Shorthand for dprintk()'s
+ */
+extern char * SVCFH_fmt(struct svc_fh *fhp);
+
+/*
+ * Function prototypes
+ */
+__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
+__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
+__be32 fh_update(struct svc_fh *);
+void fh_put(struct svc_fh *);
+
+static __inline__ struct svc_fh *
+fh_copy(struct svc_fh *dst, struct svc_fh *src)
+{
+ WARN_ON(src->fh_dentry || src->fh_locked);
+
+ *dst = *src;
+ return dst;
+}
+
+static inline void
+fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
+{
+ dst->fh_size = src->fh_size;
+ memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
+}
+
+static __inline__ struct svc_fh *
+fh_init(struct svc_fh *fhp, int maxsize)
+{
+ memset(fhp, 0, sizeof(*fhp));
+ fhp->fh_maxsize = maxsize;
+ return fhp;
+}
+
+#ifdef CONFIG_NFSD_V3
+/*
+ * Fill in the pre_op attr for the wcc data
+ */
+static inline void
+fill_pre_wcc(struct svc_fh *fhp)
+{
+ struct inode *inode;
+
+ inode = fhp->fh_dentry->d_inode;
+ if (!fhp->fh_pre_saved) {
+ fhp->fh_pre_mtime = inode->i_mtime;
+ fhp->fh_pre_ctime = inode->i_ctime;
+ fhp->fh_pre_size = inode->i_size;
+ fhp->fh_pre_change = inode->i_version;
+ fhp->fh_pre_saved = 1;
+ }
+}
+
+extern void fill_post_wcc(struct svc_fh *);
+#else
+#define fill_pre_wcc(ignored)
+#define fill_post_wcc(notused)
+#endif /* CONFIG_NFSD_V3 */
+
+
+/*
+ * Lock a file handle/inode
+ * NOTE: both fh_lock and fh_unlock are done "by hand" in
+ * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
+ * so, any changes here should be reflected there.
+ */
+
+static inline void
+fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
+{
+ struct dentry *dentry = fhp->fh_dentry;
+ struct inode *inode;
+
+ BUG_ON(!dentry);
+
+ if (fhp->fh_locked) {
+ printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+ return;
+ }
+
+ inode = dentry->d_inode;
+ mutex_lock_nested(&inode->i_mutex, subclass);
+ fill_pre_wcc(fhp);
+ fhp->fh_locked = 1;
+}
+
+static inline void
+fh_lock(struct svc_fh *fhp)
+{
+ fh_lock_nested(fhp, I_MUTEX_NORMAL);
+}
+
+/*
+ * Unlock a file handle/inode
+ */
+static inline void
+fh_unlock(struct svc_fh *fhp)
+{
+ BUG_ON(!fhp->fh_dentry);
+
+ if (fhp->fh_locked) {
+ fill_post_wcc(fhp);
+ mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
+ fhp->fh_locked = 0;
+ }
+}
+
+#endif /* _LINUX_NFSD_FH_INT_H */
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 0eb9c820b7a..a047ad6111e 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -1,29 +1,14 @@
/*
- * nfsproc2.c Process version 2 NFS requests.
- * linux/fs/nfsd/nfs2proc.c
- *
* Process version 2 NFS requests.
*
* Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
*/
-#include <linux/linkage.h>
-#include <linux/time.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
-#include <linux/net.h>
-#include <linux/in.h>
#include <linux/namei.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
+#include "cache.h"
+#include "xdr.h"
+#include "vfs.h"
typedef struct svc_rqst svc_rqst;
typedef struct svc_buf svc_buf;
@@ -758,6 +743,7 @@ nfserrno (int errno)
{ nfserr_io, -ETXTBSY },
{ nfserr_notsupp, -EOPNOTSUPP },
{ nfserr_toosmall, -ETOOSMALL },
+ { nfserr_serverfault, -ESERVERFAULT },
};
int i;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 67ea83eedd4..171699eb07c 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -1,6 +1,4 @@
/*
- * linux/fs/nfsd/nfssvc.c
- *
* Central processing for nfsd.
*
* Authors: Olaf Kirch (okir@monad.swb.de)
@@ -8,33 +6,19 @@
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
*/
-#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/errno.h>
-#include <linux/nfs.h>
-#include <linux/in.h>
-#include <linux/uio.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
#include <linux/freezer.h>
#include <linux/fs_struct.h>
-#include <linux/kthread.h>
#include <linux/swap.h>
-#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
-#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
-#include <linux/sunrpc/cache.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/stats.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/syscall.h>
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
#include <linux/seq_file.h>
+#include "nfsd.h"
+#include "cache.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_SVC
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index afd08e2c90a..4ce005dbf3e 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -1,20 +1,10 @@
/*
- * linux/fs/nfsd/nfsxdr.c
- *
* XDR support for nfsd
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
-#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/nfs.h>
-#include <linux/vfs.h>
-#include <linux/sunrpc/xdr.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/xdr.h>
-#include <linux/mm.h>
+#include "xdr.h"
#include "auth.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
diff --git a/include/linux/nfsd/state.h b/fs/nfsd/state.h
index b38d1132418..fefeae27f25 100644
--- a/include/linux/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -1,6 +1,4 @@
/*
- * linux/include/nfsd/state.h
- *
* Copyright (c) 2001 The Regents of the University of Michigan.
* All rights reserved.
*
@@ -37,9 +35,8 @@
#ifndef _NFSD4_STATE_H
#define _NFSD4_STATE_H
-#include <linux/list.h>
-#include <linux/kref.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/nfsd/nfsfh.h>
+#include "nfsfh.h"
typedef struct {
u32 cl_boot;
@@ -60,6 +57,13 @@ typedef struct {
#define si_stateownerid si_opaque.so_stateownerid
#define si_fileid si_opaque.so_fileid
+#define STATEID_FMT "(%08x/%08x/%08x/%08x)"
+#define STATEID_VAL(s) \
+ (s)->si_boot, \
+ (s)->si_stateownerid, \
+ (s)->si_fileid, \
+ (s)->si_generation
+
struct nfsd4_cb_sequence {
/* args/res */
u32 cbs_minorversion;
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 71944cddf68..5232d3e8fb2 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -1,6 +1,4 @@
/*
- * linux/fs/nfsd/stats.c
- *
* procfs-based user access to knfsd statistics
*
* /proc/net/rpc/nfsd
@@ -23,18 +21,13 @@
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
*/
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/stat.h>
#include <linux/module.h>
-
-#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/stats.h>
-#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/stats.h>
+#include "nfsd.h"
+
struct nfsd_stats nfsdstats;
struct svc_stat nfsd_svcstats = {
.program = &nfsd_program,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index a293f027326..7c2e337d05a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1,7 +1,5 @@
#define MSNFS /* HACK HACK */
/*
- * linux/fs/nfsd/vfs.c
- *
* File operations used by nfsd. Some of these have been ripped from
* other parts of the kernel because they weren't exported, others
* are partial duplicates with added or changed functionality.
@@ -16,48 +14,31 @@
* Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp>
*/
-#include <linux/string.h>
-#include <linux/time.h>
-#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/mount.h>
-#include <linux/major.h>
#include <linux/splice.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
#include <linux/fcntl.h>
-#include <linux/net.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <linux/in.h>
-#include <linux/module.h>
#include <linux/namei.h>
-#include <linux/vfs.h>
#include <linux/delay.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#ifdef CONFIG_NFSD_V3
-#include <linux/nfs3.h>
-#include <linux/nfsd/xdr3.h>
-#endif /* CONFIG_NFSD_V3 */
-#include <linux/nfsd/nfsfh.h>
#include <linux/quotaops.h>
#include <linux/fsnotify.h>
-#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
+#include <linux/jhash.h>
+#include <linux/ima.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_NFSD_V3
+#include "xdr3.h"
+#endif /* CONFIG_NFSD_V3 */
+
#ifdef CONFIG_NFSD_V4
-#include <linux/nfs4.h>
#include <linux/nfs4_acl.h>
#include <linux/nfsd_idmap.h>
-#include <linux/security.h>
#endif /* CONFIG_NFSD_V4 */
-#include <linux/jhash.h>
-#include <linux/ima.h>
-#include <asm/uaccess.h>
+#include "nfsd.h"
+#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_FILEOP
@@ -89,12 +70,6 @@ struct raparm_hbucket {
#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
-static inline int
-nfsd_v4client(struct svc_rqst *rq)
-{
- return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4;
-}
-
/*
* Called from nfsd_lookup and encode_dirent. Check if we have crossed
* a mount point.
@@ -116,8 +91,16 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
exp2 = rqst_exp_get_by_name(rqstp, &path);
if (IS_ERR(exp2)) {
- if (PTR_ERR(exp2) != -ENOENT)
- err = PTR_ERR(exp2);
+ err = PTR_ERR(exp2);
+ /*
+ * We normally allow NFS clients to continue
+ * "underneath" a mountpoint that is not exported.
+ * The exception is V4ROOT, where no traversal is ever
+ * allowed without an explicit export of the new
+ * directory.
+ */
+ if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT))
+ err = 0;
path_put(&path);
goto out;
}
@@ -141,6 +124,53 @@ out:
return err;
}
+static void follow_to_parent(struct path *path)
+{
+ struct dentry *dp;
+
+ while (path->dentry == path->mnt->mnt_root && follow_up(path))
+ ;
+ dp = dget_parent(path->dentry);
+ dput(path->dentry);
+ path->dentry = dp;
+}
+
+static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp)
+{
+ struct svc_export *exp2;
+ struct path path = {.mnt = mntget((*exp)->ex_path.mnt),
+ .dentry = dget(dparent)};
+
+ follow_to_parent(&path);
+
+ exp2 = rqst_exp_parent(rqstp, &path);
+ if (PTR_ERR(exp2) == -ENOENT) {
+ *dentryp = dget(dparent);
+ } else if (IS_ERR(exp2)) {
+ path_put(&path);
+ return PTR_ERR(exp2);
+ } else {
+ *dentryp = dget(path.dentry);
+ exp_put(*exp);
+ *exp = exp2;
+ }
+ path_put(&path);
+ return 0;
+}
+
+/*
+ * For nfsd purposes, we treat V4ROOT exports as though there was an
+ * export at *every* directory.
+ */
+int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
+{
+ if (d_mountpoint(dentry))
+ return 1;
+ if (!(exp->ex_flags & NFSEXP_V4ROOT))
+ return 0;
+ return dentry->d_inode != NULL;
+}
+
__be32
nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
const char *name, unsigned int len,
@@ -169,35 +199,13 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
dentry = dget(dparent);
else if (dparent != exp->ex_path.dentry)
dentry = dget_parent(dparent);
- else if (!EX_NOHIDE(exp))
+ else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp))
dentry = dget(dparent); /* .. == . just like at / */
else {
/* checking mountpoint crossing is very different when stepping up */
- struct svc_export *exp2 = NULL;
- struct dentry *dp;
- struct path path = {.mnt = mntget(exp->ex_path.mnt),
- .dentry = dget(dparent)};
-
- while (path.dentry == path.mnt->mnt_root &&
- follow_up(&path))
- ;
- dp = dget_parent(path.dentry);
- dput(path.dentry);
- path.dentry = dp;
-
- exp2 = rqst_exp_parent(rqstp, &path);
- if (PTR_ERR(exp2) == -ENOENT) {
- dentry = dget(dparent);
- } else if (IS_ERR(exp2)) {
- host_err = PTR_ERR(exp2);
- path_put(&path);
+ host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry);
+ if (host_err)
goto out_nfserr;
- } else {
- dentry = dget(path.dentry);
- exp_put(exp);
- exp = exp2;
- }
- path_put(&path);
}
} else {
fh_lock(fhp);
@@ -208,7 +216,7 @@ nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
/*
* check if we have crossed a mount point ...
*/
- if (d_mountpoint(dentry)) {
+ if (nfsd_mountpoint(dentry, exp)) {
if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) {
dput(dentry);
goto out_nfserr;
@@ -744,8 +752,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
- else
- ima_counts_get(*filp);
out_nfserr:
err = nfserrno(host_err);
out:
@@ -2124,8 +2130,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
*/
path.mnt = exp->ex_path.mnt;
path.dentry = dentry;
- err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC),
- IMA_COUNT_LEAVE);
+ err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
nfsd_out:
return err? nfserrno(err) : 0;
}
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
new file mode 100644
index 00000000000..4b1de0a9ea7
--- /dev/null
+++ b/fs/nfsd/vfs.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef LINUX_NFSD_VFS_H
+#define LINUX_NFSD_VFS_H
+
+#include "nfsfh.h"
+
+/*
+ * Flags for nfsd_permission
+ */
+#define NFSD_MAY_NOP 0
+#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
+#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
+#define NFSD_MAY_READ 4 /* == MAY_READ */
+#define NFSD_MAY_SATTR 8
+#define NFSD_MAY_TRUNC 16
+#define NFSD_MAY_LOCK 32
+#define NFSD_MAY_OWNER_OVERRIDE 64
+#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
+#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
+
+#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
+#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
+
+/*
+ * Callback function for readdir
+ */
+typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
+
+/* nfsd/vfs.c */
+int fh_lock_parent(struct svc_fh *, struct dentry *);
+int nfsd_racache_init(int);
+void nfsd_racache_shutdown(void);
+int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
+ struct svc_export **expp);
+__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
+ const char *, unsigned int, struct svc_fh *);
+__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
+ const char *, unsigned int,
+ struct svc_export **, struct dentry **);
+__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
+ struct iattr *, int, time_t);
+int nfsd_mountpoint(struct dentry *, struct svc_export *);
+#ifdef CONFIG_NFSD_V4
+__be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
+ struct nfs4_acl *);
+int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
+#endif /* CONFIG_NFSD_V4 */
+__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
+ char *name, int len, struct iattr *attrs,
+ int type, dev_t rdev, struct svc_fh *res);
+#ifdef CONFIG_NFSD_V3
+__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
+__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *,
+ char *name, int len, struct iattr *attrs,
+ struct svc_fh *res, int createmode,
+ u32 *verifier, int *truncp, int *created);
+__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
+ loff_t, unsigned long);
+#endif /* CONFIG_NFSD_V3 */
+__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int,
+ int, struct file **);
+void nfsd_close(struct file *);
+__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
+ loff_t, struct kvec *, int, unsigned long *);
+__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
+ loff_t, struct kvec *,int, unsigned long *, int *);
+__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
+ char *, int *);
+__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
+ char *name, int len, char *path, int plen,
+ struct svc_fh *res, struct iattr *);
+__be32 nfsd_link(struct svc_rqst *, struct svc_fh *,
+ char *, int, struct svc_fh *);
+__be32 nfsd_rename(struct svc_rqst *,
+ struct svc_fh *, char *, int,
+ struct svc_fh *, char *, int);
+__be32 nfsd_remove(struct svc_rqst *,
+ struct svc_fh *, char *, int);
+__be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
+ char *name, int len);
+int nfsd_truncate(struct svc_rqst *, struct svc_fh *,
+ unsigned long size);
+__be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *,
+ loff_t *, struct readdir_cd *, filldir_t);
+__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
+ struct kstatfs *, int access);
+
+int nfsd_notify_change(struct inode *, struct iattr *);
+__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
+ struct dentry *, int);
+int nfsd_sync_dir(struct dentry *dp);
+
+#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
+int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
+#endif
+
+#endif /* LINUX_NFSD_VFS_H */
diff --git a/include/linux/nfsd/xdr.h b/fs/nfsd/xdr.h
index a0132ef58f2..53b1863dd8f 100644
--- a/include/linux/nfsd/xdr.h
+++ b/fs/nfsd/xdr.h
@@ -1,15 +1,11 @@
-/*
- * linux/include/linux/nfsd/xdr.h
- *
- * XDR types for nfsd. This is mainly a typing exercise.
- */
+/* XDR types for nfsd. This is mainly a typing exercise. */
#ifndef LINUX_NFSD_H
#define LINUX_NFSD_H
-#include <linux/fs.h>
#include <linux/vfs.h>
-#include <linux/nfs.h>
+#include "nfsd.h"
+#include "nfsfh.h"
struct nfsd_fhandle {
struct svc_fh fh;
diff --git a/include/linux/nfsd/xdr3.h b/fs/nfsd/xdr3.h
index 421eddd65a2..7df980eb056 100644
--- a/include/linux/nfsd/xdr3.h
+++ b/fs/nfsd/xdr3.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/nfsd/xdr3.h
- *
* XDR types for NFSv3 in nfsd.
*
* Copyright (C) 1996-1998, Olaf Kirch <okir@monad.swb.de>
@@ -9,7 +7,7 @@
#ifndef _LINUX_NFSD_XDR3_H
#define _LINUX_NFSD_XDR3_H
-#include <linux/nfsd/xdr.h>
+#include "xdr.h"
struct nfsd3_sattrargs {
struct svc_fh fh;
diff --git a/include/linux/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 73164c2b3d2..efa33773953 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -1,6 +1,4 @@
/*
- * include/linux/nfsd/xdr4.h
- *
* Server-side types for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
@@ -39,7 +37,8 @@
#ifndef _LINUX_NFSD_XDR4_H
#define _LINUX_NFSD_XDR4_H
-#include <linux/nfs4.h>
+#include "state.h"
+#include "nfsd.h"
#define NFSD4_MAX_TAGLEN 128
#define XDR_LEN(n) (((n) + 3) & ~3)
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index d69e6ae5925..3f959f1879d 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -142,29 +142,75 @@ static void nilfs_palloc_desc_block_init(struct inode *inode,
}
}
+static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
+ int create,
+ void (*init_block)(struct inode *,
+ struct buffer_head *,
+ void *),
+ struct buffer_head **bhp,
+ struct nilfs_bh_assoc *prev,
+ spinlock_t *lock)
+{
+ int ret;
+
+ spin_lock(lock);
+ if (prev->bh && blkoff == prev->blkoff) {
+ get_bh(prev->bh);
+ *bhp = prev->bh;
+ spin_unlock(lock);
+ return 0;
+ }
+ spin_unlock(lock);
+
+ ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
+ if (!ret) {
+ spin_lock(lock);
+ /*
+ * The following code must be safe for change of the
+ * cache contents during the get block call.
+ */
+ brelse(prev->bh);
+ get_bh(*bhp);
+ prev->bh = *bhp;
+ prev->blkoff = blkoff;
+ spin_unlock(lock);
+ }
+ return ret;
+}
+
static int nilfs_palloc_get_desc_block(struct inode *inode,
unsigned long group,
int create, struct buffer_head **bhp)
{
- return nilfs_mdt_get_block(inode,
- nilfs_palloc_desc_blkoff(inode, group),
- create, nilfs_palloc_desc_block_init, bhp);
+ struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
+
+ return nilfs_palloc_get_block(inode,
+ nilfs_palloc_desc_blkoff(inode, group),
+ create, nilfs_palloc_desc_block_init,
+ bhp, &cache->prev_desc, &cache->lock);
}
static int nilfs_palloc_get_bitmap_block(struct inode *inode,
unsigned long group,
int create, struct buffer_head **bhp)
{
- return nilfs_mdt_get_block(inode,
- nilfs_palloc_bitmap_blkoff(inode, group),
- create, NULL, bhp);
+ struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
+
+ return nilfs_palloc_get_block(inode,
+ nilfs_palloc_bitmap_blkoff(inode, group),
+ create, NULL, bhp,
+ &cache->prev_bitmap, &cache->lock);
}
int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
int create, struct buffer_head **bhp)
{
- return nilfs_mdt_get_block(inode, nilfs_palloc_entry_blkoff(inode, nr),
- create, NULL, bhp);
+ struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
+
+ return nilfs_palloc_get_block(inode,
+ nilfs_palloc_entry_blkoff(inode, nr),
+ create, NULL, bhp,
+ &cache->prev_entry, &cache->lock);
}
static struct nilfs_palloc_group_desc *
@@ -176,13 +222,6 @@ nilfs_palloc_block_get_group_desc(const struct inode *inode,
group % nilfs_palloc_groups_per_desc_block(inode);
}
-static unsigned char *
-nilfs_palloc_block_get_bitmap(const struct inode *inode,
- const struct buffer_head *bh, void *kaddr)
-{
- return (unsigned char *)(kaddr + bh_offset(bh));
-}
-
void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
const struct buffer_head *bh, void *kaddr)
{
@@ -289,8 +328,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
if (ret < 0)
goto out_desc;
bitmap_kaddr = kmap(bitmap_bh->b_page);
- bitmap = nilfs_palloc_block_get_bitmap(
- inode, bitmap_bh, bitmap_kaddr);
+ bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
pos = nilfs_palloc_find_available_slot(
inode, group, group_offset, bitmap,
entries_per_group);
@@ -351,8 +389,7 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
desc = nilfs_palloc_block_get_group_desc(inode, group,
req->pr_desc_bh, desc_kaddr);
bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
- bitmap = nilfs_palloc_block_get_bitmap(inode, req->pr_bitmap_bh,
- bitmap_kaddr);
+ bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
group_offset, bitmap))
@@ -385,8 +422,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
desc = nilfs_palloc_block_get_group_desc(inode, group,
req->pr_desc_bh, desc_kaddr);
bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
- bitmap = nilfs_palloc_block_get_bitmap(inode, req->pr_bitmap_bh,
- bitmap_kaddr);
+ bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
group_offset, bitmap))
printk(KERN_WARNING "%s: entry numer %llu already freed\n",
@@ -472,8 +508,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
desc = nilfs_palloc_block_get_group_desc(
inode, group, desc_bh, desc_kaddr);
bitmap_kaddr = kmap(bitmap_bh->b_page);
- bitmap = nilfs_palloc_block_get_bitmap(
- inode, bitmap_bh, bitmap_kaddr);
+ bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
for (j = i, n = 0;
(j < nitems) && nilfs_palloc_group_is_in(inode, group,
entry_nrs[j]);
@@ -502,3 +537,30 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
}
return 0;
}
+
+void nilfs_palloc_setup_cache(struct inode *inode,
+ struct nilfs_palloc_cache *cache)
+{
+ NILFS_MDT(inode)->mi_palloc_cache = cache;
+ spin_lock_init(&cache->lock);
+}
+
+void nilfs_palloc_clear_cache(struct inode *inode)
+{
+ struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
+
+ spin_lock(&cache->lock);
+ brelse(cache->prev_desc.bh);
+ brelse(cache->prev_bitmap.bh);
+ brelse(cache->prev_entry.bh);
+ cache->prev_desc.bh = NULL;
+ cache->prev_bitmap.bh = NULL;
+ cache->prev_entry.bh = NULL;
+ spin_unlock(&cache->lock);
+}
+
+void nilfs_palloc_destroy_cache(struct inode *inode)
+{
+ nilfs_palloc_clear_cache(inode);
+ NILFS_MDT(inode)->mi_palloc_cache = NULL;
+}
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 4ace5475c2c..f4543ac4f56 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -69,4 +69,25 @@ int nilfs_palloc_freev(struct inode *, __u64 *, size_t);
#define nilfs_clear_bit_atomic ext2_clear_bit_atomic
#define nilfs_find_next_zero_bit ext2_find_next_zero_bit
+/*
+ * persistent object allocator cache
+ */
+
+struct nilfs_bh_assoc {
+ unsigned long blkoff;
+ struct buffer_head *bh;
+};
+
+struct nilfs_palloc_cache {
+ spinlock_t lock;
+ struct nilfs_bh_assoc prev_desc;
+ struct nilfs_bh_assoc prev_bitmap;
+ struct nilfs_bh_assoc prev_entry;
+};
+
+void nilfs_palloc_setup_cache(struct inode *inode,
+ struct nilfs_palloc_cache *cache);
+void nilfs_palloc_clear_cache(struct inode *inode);
+void nilfs_palloc_destroy_cache(struct inode *inode);
+
#endif /* _NILFS_ALLOC_H */
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 08834df6ec6..f4a14ea2ed9 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -402,19 +402,11 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n)
{
inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
- if (NILFS_MDT(bmap->b_inode))
- nilfs_mdt_mark_dirty(bmap->b_inode);
- else
- mark_inode_dirty(bmap->b_inode);
}
void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n)
{
inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
- if (NILFS_MDT(bmap->b_inode))
- nilfs_mdt_mark_dirty(bmap->b_inode);
- else
- mark_inode_dirty(bmap->b_inode);
}
__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 84c25382f8e..471e269536a 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -68,9 +68,34 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
truncate_inode_pages(btnc, 0);
}
+struct buffer_head *
+nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
+{
+ struct inode *inode = NILFS_BTNC_I(btnc);
+ struct buffer_head *bh;
+
+ bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+ if (unlikely(!bh))
+ return NULL;
+
+ if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
+ buffer_dirty(bh))) {
+ brelse(bh);
+ BUG();
+ }
+ memset(bh->b_data, 0, 1 << inode->i_blkbits);
+ bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
+ bh->b_blocknr = blocknr;
+ set_buffer_mapped(bh);
+ set_buffer_uptodate(bh);
+
+ unlock_page(bh->b_page);
+ page_cache_release(bh->b_page);
+ return bh;
+}
+
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
- sector_t pblocknr, struct buffer_head **pbh,
- int newblk)
+ sector_t pblocknr, struct buffer_head **pbh)
{
struct buffer_head *bh;
struct inode *inode = NILFS_BTNC_I(btnc);
@@ -81,19 +106,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
return -ENOMEM;
err = -EEXIST; /* internal code */
- if (newblk) {
- if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
- buffer_dirty(bh))) {
- brelse(bh);
- BUG();
- }
- memset(bh->b_data, 0, 1 << inode->i_blkbits);
- bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
- bh->b_blocknr = blocknr;
- set_buffer_mapped(bh);
- set_buffer_uptodate(bh);
- goto found;
- }
if (buffer_uptodate(bh) || buffer_dirty(bh))
goto found;
@@ -135,27 +147,6 @@ out_locked:
return err;
}
-int nilfs_btnode_get(struct address_space *btnc, __u64 blocknr,
- sector_t pblocknr, struct buffer_head **pbh, int newblk)
-{
- struct buffer_head *bh;
- int err;
-
- err = nilfs_btnode_submit_block(btnc, blocknr, pblocknr, pbh, newblk);
- if (err == -EEXIST) /* internal code (cache hit) */
- return 0;
- if (unlikely(err))
- return err;
-
- bh = *pbh;
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- brelse(bh);
- return -EIO;
- }
- return 0;
-}
-
/**
* nilfs_btnode_delete - delete B-tree node buffer
* @bh: buffer to be deleted
@@ -244,12 +235,13 @@ retry:
unlock_page(obh->b_page);
}
- err = nilfs_btnode_get(btnc, newkey, 0, &nbh, 1);
- if (likely(!err)) {
- BUG_ON(nbh == obh);
- ctxt->newbh = nbh;
- }
- return err;
+ nbh = nilfs_btnode_create_block(btnc, newkey);
+ if (!nbh)
+ return -ENOMEM;
+
+ BUG_ON(nbh == obh);
+ ctxt->newbh = nbh;
+ return 0;
failed_unlock:
unlock_page(obh->b_page);
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 3e2275172ed..07da83f0771 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -40,10 +40,10 @@ struct nilfs_btnode_chkey_ctxt {
void nilfs_btnode_cache_init_once(struct address_space *);
void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
void nilfs_btnode_cache_clear(struct address_space *);
+struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
+ __u64 blocknr);
int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t,
- struct buffer_head **, int);
-int nilfs_btnode_get(struct address_space *, __u64, sector_t,
- struct buffer_head **, int);
+ struct buffer_head **);
void nilfs_btnode_delete(struct buffer_head *);
int nilfs_btnode_prepare_change_key(struct address_space *,
struct nilfs_btnode_chkey_ctxt *);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index e25b507a474..7cdd98b8d51 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -114,7 +114,18 @@ static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr,
{
struct address_space *btnc =
&NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
- return nilfs_btnode_get(btnc, ptr, 0, bhp, 0);
+ int err;
+
+ err = nilfs_btnode_submit_block(btnc, ptr, 0, bhp);
+ if (err)
+ return err == -EEXIST ? 0 : err;
+
+ wait_on_buffer(*bhp);
+ if (!buffer_uptodate(*bhp)) {
+ brelse(*bhp);
+ return -EIO;
+ }
+ return 0;
}
static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
@@ -122,12 +133,15 @@ static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
{
struct address_space *btnc =
&NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
- int ret;
+ struct buffer_head *bh;
- ret = nilfs_btnode_get(btnc, ptr, 0, bhp, 1);
- if (!ret)
- set_buffer_nilfs_volatile(*bhp);
- return ret;
+ bh = nilfs_btnode_create_block(btnc, ptr);
+ if (!bh)
+ return -ENOMEM;
+
+ set_buffer_nilfs_volatile(bh);
+ *bhp = bh;
+ return 0;
}
static inline int
@@ -444,6 +458,18 @@ nilfs_btree_get_node(const struct nilfs_btree *btree,
nilfs_btree_get_nonroot_node(path, level);
}
+static inline int
+nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
+{
+ if (unlikely(nilfs_btree_node_get_level(node) != level)) {
+ dump_stack();
+ printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
+ nilfs_btree_node_get_level(node), level);
+ return 1;
+ }
+ return 0;
+}
+
static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
struct nilfs_btree_path *path,
__u64 key, __u64 *ptrp, int minlevel)
@@ -467,7 +493,8 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
if (ret < 0)
return ret;
node = nilfs_btree_get_nonroot_node(path, level);
- BUG_ON(level != nilfs_btree_node_get_level(node));
+ if (nilfs_btree_bad_node(node, level))
+ return -EINVAL;
if (!found)
found = nilfs_btree_node_lookup(node, key, &index);
else
@@ -512,7 +539,8 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree,
if (ret < 0)
return ret;
node = nilfs_btree_get_nonroot_node(path, level);
- BUG_ON(level != nilfs_btree_node_get_level(node));
+ if (nilfs_btree_bad_node(node, level))
+ return -EINVAL;
index = nilfs_btree_node_get_nchildren(node) - 1;
ptr = nilfs_btree_node_get_ptr(btree, node, index);
path[level].bp_index = index;
@@ -638,13 +666,11 @@ static void nilfs_btree_promote_key(struct nilfs_btree *btree,
{
if (level < nilfs_btree_height(btree) - 1) {
do {
- lock_buffer(path[level].bp_bh);
nilfs_btree_node_set_key(
nilfs_btree_get_nonroot_node(path, level),
path[level].bp_index, key);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
- unlock_buffer(path[level].bp_bh);
} while ((path[level].bp_index == 0) &&
(++level < nilfs_btree_height(btree) - 1));
}
@@ -663,13 +689,11 @@ static void nilfs_btree_do_insert(struct nilfs_btree *btree,
struct nilfs_btree_node *node;
if (level < nilfs_btree_height(btree) - 1) {
- lock_buffer(path[level].bp_bh);
node = nilfs_btree_get_nonroot_node(path, level);
nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
path[level].bp_index);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
- unlock_buffer(path[level].bp_bh);
if (path[level].bp_index == 0)
nilfs_btree_promote_key(btree, path, level + 1,
@@ -689,9 +713,6 @@ static void nilfs_btree_carry_left(struct nilfs_btree *btree,
struct nilfs_btree_node *node, *left;
int nchildren, lnchildren, n, move;
- lock_buffer(path[level].bp_bh);
- lock_buffer(path[level].bp_sib_bh);
-
node = nilfs_btree_get_nonroot_node(path, level);
left = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
@@ -712,9 +733,6 @@ static void nilfs_btree_carry_left(struct nilfs_btree *btree,
if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
- unlock_buffer(path[level].bp_bh);
- unlock_buffer(path[level].bp_sib_bh);
-
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0));
@@ -740,9 +758,6 @@ static void nilfs_btree_carry_right(struct nilfs_btree *btree,
struct nilfs_btree_node *node, *right;
int nchildren, rnchildren, n, move;
- lock_buffer(path[level].bp_bh);
- lock_buffer(path[level].bp_sib_bh);
-
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
@@ -763,9 +778,6 @@ static void nilfs_btree_carry_right(struct nilfs_btree *btree,
if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
- unlock_buffer(path[level].bp_bh);
- unlock_buffer(path[level].bp_sib_bh);
-
path[level + 1].bp_index++;
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(right, 0));
@@ -794,9 +806,6 @@ static void nilfs_btree_split(struct nilfs_btree *btree,
__u64 newptr;
int nchildren, n, move;
- lock_buffer(path[level].bp_bh);
- lock_buffer(path[level].bp_sib_bh);
-
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
@@ -815,9 +824,6 @@ static void nilfs_btree_split(struct nilfs_btree *btree,
if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
- unlock_buffer(path[level].bp_bh);
- unlock_buffer(path[level].bp_sib_bh);
-
newkey = nilfs_btree_node_get_key(right, 0);
newptr = path[level].bp_newreq.bpr_ptr;
@@ -852,8 +858,6 @@ static void nilfs_btree_grow(struct nilfs_btree *btree,
struct nilfs_btree_node *root, *child;
int n;
- lock_buffer(path[level].bp_sib_bh);
-
root = nilfs_btree_get_root(btree);
child = nilfs_btree_get_sib_node(path, level);
@@ -865,8 +869,6 @@ static void nilfs_btree_grow(struct nilfs_btree *btree,
if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
- unlock_buffer(path[level].bp_sib_bh);
-
path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL;
@@ -1023,11 +1025,9 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
stats->bs_nblocks++;
- lock_buffer(bh);
nilfs_btree_node_init(btree,
(struct nilfs_btree_node *)bh->b_data,
0, level, 0, NULL, NULL);
- unlock_buffer(bh);
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_split;
}
@@ -1052,10 +1052,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
if (ret < 0)
goto err_out_curr_node;
- lock_buffer(bh);
nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data,
0, level, 0, NULL, NULL);
- unlock_buffer(bh);
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_grow;
@@ -1154,13 +1152,11 @@ static void nilfs_btree_do_delete(struct nilfs_btree *btree,
struct nilfs_btree_node *node;
if (level < nilfs_btree_height(btree) - 1) {
- lock_buffer(path[level].bp_bh);
node = nilfs_btree_get_nonroot_node(path, level);
nilfs_btree_node_delete(btree, node, keyp, ptrp,
path[level].bp_index);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
- unlock_buffer(path[level].bp_bh);
if (path[level].bp_index == 0)
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0));
@@ -1180,9 +1176,6 @@ static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
- lock_buffer(path[level].bp_bh);
- lock_buffer(path[level].bp_sib_bh);
-
node = nilfs_btree_get_nonroot_node(path, level);
left = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
@@ -1197,9 +1190,6 @@ static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
- unlock_buffer(path[level].bp_bh);
- unlock_buffer(path[level].bp_sib_bh);
-
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(node, 0));
@@ -1217,9 +1207,6 @@ static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
- lock_buffer(path[level].bp_bh);
- lock_buffer(path[level].bp_sib_bh);
-
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
@@ -1234,9 +1221,6 @@ static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
- unlock_buffer(path[level].bp_bh);
- unlock_buffer(path[level].bp_sib_bh);
-
path[level + 1].bp_index++;
nilfs_btree_promote_key(btree, path, level + 1,
nilfs_btree_node_get_key(right, 0));
@@ -1255,9 +1239,6 @@ static void nilfs_btree_concat_left(struct nilfs_btree *btree,
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
- lock_buffer(path[level].bp_bh);
- lock_buffer(path[level].bp_sib_bh);
-
node = nilfs_btree_get_nonroot_node(path, level);
left = nilfs_btree_get_sib_node(path, level);
@@ -1268,9 +1249,6 @@ static void nilfs_btree_concat_left(struct nilfs_btree *btree,
if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
- unlock_buffer(path[level].bp_bh);
- unlock_buffer(path[level].bp_sib_bh);
-
nilfs_btnode_delete(path[level].bp_bh);
path[level].bp_bh = path[level].bp_sib_bh;
path[level].bp_sib_bh = NULL;
@@ -1286,9 +1264,6 @@ static void nilfs_btree_concat_right(struct nilfs_btree *btree,
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
- lock_buffer(path[level].bp_bh);
- lock_buffer(path[level].bp_sib_bh);
-
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
@@ -1299,9 +1274,6 @@ static void nilfs_btree_concat_right(struct nilfs_btree *btree,
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
- unlock_buffer(path[level].bp_bh);
- unlock_buffer(path[level].bp_sib_bh);
-
nilfs_btnode_delete(path[level].bp_sib_bh);
path[level].bp_sib_bh = NULL;
path[level + 1].bp_index++;
@@ -1316,7 +1288,6 @@ static void nilfs_btree_shrink(struct nilfs_btree *btree,
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
- lock_buffer(path[level].bp_bh);
root = nilfs_btree_get_root(btree);
child = nilfs_btree_get_nonroot_node(path, level);
@@ -1324,7 +1295,6 @@ static void nilfs_btree_shrink(struct nilfs_btree *btree,
nilfs_btree_node_set_level(root, level);
n = nilfs_btree_node_get_nchildren(child);
nilfs_btree_node_move_left(btree, root, child, n);
- unlock_buffer(path[level].bp_bh);
nilfs_btnode_delete(path[level].bp_bh);
path[level].bp_bh = NULL;
@@ -1699,7 +1669,6 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat);
/* create child node at level 1 */
- lock_buffer(bh);
node = (struct nilfs_btree_node *)bh->b_data;
nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs);
nilfs_btree_node_insert(btree, node,
@@ -1709,7 +1678,6 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
if (!nilfs_bmap_dirty(bmap))
nilfs_bmap_set_dirty(bmap);
- unlock_buffer(bh);
brelse(bh);
/* create root node at level 2 */
@@ -2050,7 +2018,7 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *bmap,
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < NILFS_BTREE_LEVEL_MAX;
level++)
- list_splice(&lists[level], listp->prev);
+ list_splice_tail(&lists[level], listp);
}
static int nilfs_btree_assign_p(struct nilfs_btree *btree,
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 0e72bbbc6b6..4b82d84ade7 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -34,28 +34,6 @@ struct nilfs_btree;
struct nilfs_btree_path;
/**
- * struct nilfs_btree_node - B-tree node
- * @bn_flags: flags
- * @bn_level: level
- * @bn_nchildren: number of children
- * @bn_pad: padding
- */
-struct nilfs_btree_node {
- __u8 bn_flags;
- __u8 bn_level;
- __le16 bn_nchildren;
- __le32 bn_pad;
-};
-
-/* flags */
-#define NILFS_BTREE_NODE_ROOT 0x01
-
-/* level */
-#define NILFS_BTREE_LEVEL_DATA 0
-#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
-#define NILFS_BTREE_LEVEL_MAX 14
-
-/**
* struct nilfs_btree - B-tree structure
* @bt_bmap: bmap base structure
*/
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 3f5d5d06f53..d5ad54e204a 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -926,3 +926,29 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
up_read(&NILFS_MDT(cpfile)->mi_sem);
return ret;
}
+
+/**
+ * nilfs_cpfile_read - read cpfile inode
+ * @cpfile: cpfile inode
+ * @raw_inode: on-disk cpfile inode
+ */
+int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode)
+{
+ return nilfs_read_inode_common(cpfile, raw_inode);
+}
+
+/**
+ * nilfs_cpfile_new - create cpfile
+ * @nilfs: nilfs object
+ * @cpsize: size of a checkpoint entry
+ */
+struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize)
+{
+ struct inode *cpfile;
+
+ cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0);
+ if (cpfile)
+ nilfs_mdt_set_entry_size(cpfile, cpsize,
+ sizeof(struct nilfs_cpfile_header));
+ return cpfile;
+}
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index debea896e70..bc0809e0ab4 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -40,4 +40,7 @@ int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *);
ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned,
size_t);
+int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode);
+struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize);
+
#endif /* _NILFS_CPFILE_H */
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 1ff8e15bd36..187dd07ba86 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -33,6 +33,16 @@
#define NILFS_CNO_MIN ((__u64)1)
#define NILFS_CNO_MAX (~(__u64)0)
+struct nilfs_dat_info {
+ struct nilfs_mdt_info mi;
+ struct nilfs_palloc_cache palloc_cache;
+};
+
+static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
+{
+ return (struct nilfs_dat_info *)NILFS_MDT(dat);
+}
+
static int nilfs_dat_prepare_entry(struct inode *dat,
struct nilfs_palloc_req *req, int create)
{
@@ -425,3 +435,40 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
return nvi;
}
+
+/**
+ * nilfs_dat_read - read dat inode
+ * @dat: dat inode
+ * @raw_inode: on-disk dat inode
+ */
+int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode)
+{
+ return nilfs_read_inode_common(dat, raw_inode);
+}
+
+/**
+ * nilfs_dat_new - create dat file
+ * @nilfs: nilfs object
+ * @entry_size: size of a dat entry
+ */
+struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size)
+{
+ static struct lock_class_key dat_lock_key;
+ struct inode *dat;
+ struct nilfs_dat_info *di;
+ int err;
+
+ dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, sizeof(*di));
+ if (dat) {
+ err = nilfs_palloc_init_blockgroup(dat, entry_size);
+ if (unlikely(err)) {
+ nilfs_mdt_destroy(dat);
+ return NULL;
+ }
+
+ di = NILFS_DAT_I(dat);
+ lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
+ nilfs_palloc_setup_cache(dat, &di->palloc_cache);
+ }
+ return dat;
+}
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index 406070d3ff4..d31c3aab0ef 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -53,4 +53,7 @@ int nilfs_dat_freev(struct inode *, __u64 *, size_t);
int nilfs_dat_move(struct inode *, __u64, sector_t);
ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t);
+int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode);
+struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size);
+
#endif /* _NILFS_DAT_H */
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index e097099bfc8..76d803e060a 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -99,9 +99,9 @@ static int nilfs_prepare_chunk(struct page *page,
NULL, nilfs_get_block);
}
-static int nilfs_commit_chunk(struct page *page,
- struct address_space *mapping,
- unsigned from, unsigned to)
+static void nilfs_commit_chunk(struct page *page,
+ struct address_space *mapping,
+ unsigned from, unsigned to)
{
struct inode *dir = mapping->host;
struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb);
@@ -112,15 +112,13 @@ static int nilfs_commit_chunk(struct page *page,
nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
- if (pos + copied > dir->i_size) {
+ if (pos + copied > dir->i_size)
i_size_write(dir, pos + copied);
- mark_inode_dirty(dir);
- }
if (IS_DIRSYNC(dir))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
err = nilfs_set_file_dirty(sbi, dir, nr_dirty);
+ WARN_ON(err); /* do not happen */
unlock_page(page);
- return err;
}
static void nilfs_check_page(struct page *page)
@@ -455,11 +453,10 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
BUG_ON(err);
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
- err = nilfs_commit_chunk(page, mapping, from, to);
+ nilfs_commit_chunk(page, mapping, from, to);
nilfs_put_page(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
- mark_inode_dirty(dir);
}
/*
@@ -548,10 +545,10 @@ got_it:
memcpy(de->name, name, namelen);
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
- err = nilfs_commit_chunk(page, page->mapping, from, to);
+ nilfs_commit_chunk(page, page->mapping, from, to);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
- mark_inode_dirty(dir);
+ nilfs_mark_inode_dirty(dir);
/* OFFSET_CACHE */
out_put:
nilfs_put_page(page);
@@ -595,10 +592,9 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
if (pde)
pde->rec_len = cpu_to_le16(to - from);
dir->inode = 0;
- err = nilfs_commit_chunk(page, mapping, from, to);
+ nilfs_commit_chunk(page, mapping, from, to);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
/* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */
- mark_inode_dirty(inode);
out:
nilfs_put_page(page);
return err;
@@ -640,7 +636,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
memcpy(de->name, "..\0", 4);
nilfs_set_de_type(de, inode);
kunmap_atomic(kaddr, KM_USER0);
- err = nilfs_commit_chunk(page, mapping, 0, chunk_size);
+ nilfs_commit_chunk(page, mapping, 0, chunk_size);
fail:
page_cache_release(page);
return err;
diff --git a/fs/nilfs2/gcdat.c b/fs/nilfs2/gcdat.c
index 93383c5cee9..dd5f7e0a95f 100644
--- a/fs/nilfs2/gcdat.c
+++ b/fs/nilfs2/gcdat.c
@@ -61,6 +61,8 @@ void nilfs_commit_gcdat_inode(struct the_nilfs *nilfs)
nilfs_bmap_commit_gcdat(gii->i_bmap, dii->i_bmap);
+ nilfs_palloc_clear_cache(dat);
+ nilfs_palloc_clear_cache(gcdat);
nilfs_clear_dirty_pages(mapping);
nilfs_copy_back_pages(mapping, gmapping);
/* note: mdt dirty flags should be cleared by segctor. */
@@ -79,6 +81,7 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *nilfs)
gcdat->i_state = I_CLEAR;
gii->i_flags = 0;
+ nilfs_palloc_clear_cache(gcdat);
truncate_inode_pages(gcdat->i_mapping, 0);
truncate_inode_pages(&gii->i_btnode_cache, 0);
}
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index e6de0a27ab5..e16a6664dfa 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -149,7 +149,7 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh)
{
int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
- vbn ? : pbn, pbn, out_bh, 0);
+ vbn ? : pbn, pbn, out_bh);
if (ret == -EEXIST) /* internal code (cache hit) */
ret = 0;
return ret;
@@ -212,9 +212,10 @@ void nilfs_destroy_gccache(struct the_nilfs *nilfs)
static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino,
__u64 cno)
{
- struct inode *inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS);
+ struct inode *inode;
struct nilfs_inode_info *ii;
+ inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS, 0);
if (!inode)
return NULL;
@@ -265,7 +266,6 @@ struct inode *nilfs_gc_iget(struct the_nilfs *nilfs, ino_t ino, __u64 cno)
*/
void nilfs_clear_gcinode(struct inode *inode)
{
- nilfs_mdt_clear(inode);
nilfs_mdt_destroy(inode);
}
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index de86401f209..922d9dd42c8 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -29,6 +29,17 @@
#include "alloc.h"
#include "ifile.h"
+
+struct nilfs_ifile_info {
+ struct nilfs_mdt_info mi;
+ struct nilfs_palloc_cache palloc_cache;
+};
+
+static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile)
+{
+ return (struct nilfs_ifile_info *)NILFS_MDT(ifile);
+}
+
/**
* nilfs_ifile_create_inode - create a new disk inode
* @ifile: ifile inode
@@ -148,3 +159,27 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
}
return err;
}
+
+/**
+ * nilfs_ifile_new - create inode file
+ * @sbi: nilfs_sb_info struct
+ * @inode_size: size of an inode
+ */
+struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size)
+{
+ struct inode *ifile;
+ int err;
+
+ ifile = nilfs_mdt_new(sbi->s_nilfs, sbi->s_super, NILFS_IFILE_INO,
+ sizeof(struct nilfs_ifile_info));
+ if (ifile) {
+ err = nilfs_palloc_init_blockgroup(ifile, inode_size);
+ if (unlikely(err)) {
+ nilfs_mdt_destroy(ifile);
+ return NULL;
+ }
+ nilfs_palloc_setup_cache(ifile,
+ &NILFS_IFILE_I(ifile)->palloc_cache);
+ }
+ return ifile;
+}
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index ecc3ba76db4..cbca32e498f 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -49,4 +49,6 @@ int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **);
int nilfs_ifile_delete_inode(struct inode *, ino_t);
int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **);
+struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size);
+
#endif /* _NILFS_IFILE_H */
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2a0a5a3ac13..7868cc122ac 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -97,6 +97,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
nilfs_transaction_abort(inode->i_sb);
goto out;
}
+ nilfs_mark_inode_dirty(inode);
nilfs_transaction_commit(inode->i_sb); /* never fails */
/* Error handling should be detailed */
set_buffer_new(bh_result);
@@ -322,7 +323,6 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
nilfs_init_acl(), proper cancellation of
above jobs should be considered */
- mark_inode_dirty(inode);
return inode;
failed_acl:
@@ -525,7 +525,6 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
- /* The buffer is guarded with lock_buffer() by the caller */
if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
@@ -599,6 +598,7 @@ void nilfs_truncate(struct inode *inode)
if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
+ nilfs_mark_inode_dirty(inode);
nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
nilfs_transaction_commit(sb);
/* May construct a logical segment and may fail in sync mode.
@@ -623,6 +623,7 @@ void nilfs_delete_inode(struct inode *inode)
truncate_inode_pages(&inode->i_data, 0);
nilfs_truncate_bmap(ii, 0);
+ nilfs_mark_inode_dirty(inode);
nilfs_free_inode(inode);
/* nilfs_free_inode() marks inode buffer dirty */
if (IS_SYNC(inode))
@@ -745,9 +746,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
"failed to reget inode block.\n");
return err;
}
- lock_buffer(ibh);
nilfs_update_inode(inode, ibh);
- unlock_buffer(ibh);
nilfs_mdt_mark_buffer_dirty(ibh);
nilfs_mdt_mark_dirty(sbi->s_ifile);
brelse(ibh);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index f6326112d64..06713ffcc7f 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -186,7 +186,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
}
static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
- struct buffer_head **out_bh)
+ int readahead, struct buffer_head **out_bh)
{
struct buffer_head *first_bh, *bh;
unsigned long blkoff;
@@ -200,16 +200,18 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
if (unlikely(err))
goto failed;
- blkoff = block + 1;
- for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
- err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
- if (likely(!err || err == -EEXIST))
- brelse(bh);
- else if (err != -EBUSY)
- break; /* abort readahead if bmap lookup failed */
-
- if (!buffer_locked(first_bh))
- goto out_no_wait;
+ if (readahead) {
+ blkoff = block + 1;
+ for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
+ err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
+ if (likely(!err || err == -EEXIST))
+ brelse(bh);
+ else if (err != -EBUSY)
+ break;
+ /* abort readahead if bmap lookup failed */
+ if (!buffer_locked(first_bh))
+ goto out_no_wait;
+ }
}
wait_on_buffer(first_bh);
@@ -263,7 +265,7 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
/* Should be rewritten with merging nilfs_mdt_read_block() */
retry:
- ret = nilfs_mdt_read_block(inode, blkoff, out_bh);
+ ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
if (!create || ret != -ENOENT)
return ret;
@@ -371,7 +373,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
struct buffer_head *bh;
int err;
- err = nilfs_mdt_read_block(inode, block, &bh);
+ err = nilfs_mdt_read_block(inode, block, 0, &bh);
if (unlikely(err))
return err;
nilfs_mark_buffer_dirty(bh);
@@ -445,9 +447,17 @@ static const struct file_operations def_mdt_fops;
* longer than those of the super block structs; they may continue for
* several consecutive mounts/umounts. This would need discussions.
*/
+/**
+ * nilfs_mdt_new_common - allocate a pseudo inode for metadata file
+ * @nilfs: nilfs object
+ * @sb: super block instance the metadata file belongs to
+ * @ino: inode number
+ * @gfp_mask: gfp mask for data pages
+ * @objsz: size of the private object attached to inode->i_private
+ */
struct inode *
nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
- ino_t ino, gfp_t gfp_mask)
+ ino_t ino, gfp_t gfp_mask, size_t objsz)
{
struct inode *inode = nilfs_alloc_inode_common(nilfs);
@@ -455,8 +465,9 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
return NULL;
else {
struct address_space * const mapping = &inode->i_data;
- struct nilfs_mdt_info *mi = kzalloc(sizeof(*mi), GFP_NOFS);
+ struct nilfs_mdt_info *mi;
+ mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
if (!mi) {
nilfs_destroy_inode(inode);
return NULL;
@@ -513,11 +524,11 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
}
struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb,
- ino_t ino)
+ ino_t ino, size_t objsz)
{
- struct inode *inode = nilfs_mdt_new_common(nilfs, sb, ino,
- NILFS_MDT_GFP);
+ struct inode *inode;
+ inode = nilfs_mdt_new_common(nilfs, sb, ino, NILFS_MDT_GFP, objsz);
if (!inode)
return NULL;
@@ -544,14 +555,15 @@ void nilfs_mdt_set_shadow(struct inode *orig, struct inode *shadow)
&NILFS_I(orig)->i_btnode_cache;
}
-void nilfs_mdt_clear(struct inode *inode)
+static void nilfs_mdt_clear(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
invalidate_mapping_pages(inode->i_mapping, 0, -1);
truncate_inode_pages(inode->i_mapping, 0);
- nilfs_bmap_clear(ii->i_bmap);
+ if (test_bit(NILFS_I_BMAP, &ii->i_state))
+ nilfs_bmap_clear(ii->i_bmap);
nilfs_btnode_cache_clear(&ii->i_btnode_cache);
}
@@ -559,6 +571,10 @@ void nilfs_mdt_destroy(struct inode *inode)
{
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+ if (mdi->mi_palloc_cache)
+ nilfs_palloc_destroy_cache(inode);
+ nilfs_mdt_clear(inode);
+
kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
kfree(mdi);
nilfs_destroy_inode(inode);
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index 431599733c9..6c4bbb0470f 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -36,6 +36,7 @@
* @mi_entry_size: size of an entry
* @mi_first_entry_offset: offset to the first entry
* @mi_entries_per_block: number of entries in a block
+ * @mi_palloc_cache: persistent object allocator cache
* @mi_blocks_per_group: number of blocks in a group
* @mi_blocks_per_desc_block: number of blocks per descriptor block
*/
@@ -46,6 +47,7 @@ struct nilfs_mdt_info {
unsigned mi_entry_size;
unsigned mi_first_entry_offset;
unsigned long mi_entries_per_block;
+ struct nilfs_palloc_cache *mi_palloc_cache;
unsigned long mi_blocks_per_group;
unsigned long mi_blocks_per_desc_block;
};
@@ -74,11 +76,11 @@ int nilfs_mdt_forget_block(struct inode *, unsigned long);
int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long);
int nilfs_mdt_fetch_dirty(struct inode *);
-struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t);
+struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t,
+ size_t);
struct inode *nilfs_mdt_new_common(struct the_nilfs *, struct super_block *,
- ino_t, gfp_t);
+ ino_t, gfp_t, size_t);
void nilfs_mdt_destroy(struct inode *);
-void nilfs_mdt_clear(struct inode *);
void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned);
void nilfs_mdt_set_shadow(struct inode *, struct inode *);
@@ -104,21 +106,4 @@ static inline __u64 nilfs_mdt_cno(struct inode *inode)
#define nilfs_mdt_bgl_lock(inode, bg) \
(&NILFS_MDT(inode)->mi_bgl->locks[(bg) & (NR_BG_LOCKS-1)].lock)
-
-static inline int
-nilfs_mdt_read_inode_direct(struct inode *inode, struct buffer_head *bh,
- unsigned n)
-{
- return nilfs_read_inode_common(
- inode, (struct nilfs_inode *)(bh->b_data + n));
-}
-
-static inline void
-nilfs_mdt_write_inode_direct(struct inode *inode, struct buffer_head *bh,
- unsigned n)
-{
- nilfs_write_inode_common(
- inode, (struct nilfs_inode *)(bh->b_data + n), 1);
-}
-
#endif /* _NILFS_MDT_H */
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index ed02e886fa7..07ba838ef08 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -120,7 +120,7 @@ static int nilfs_create(struct inode *dir, struct dentry *dentry, int mode,
inode->i_op = &nilfs_file_inode_operations;
inode->i_fop = &nilfs_file_operations;
inode->i_mapping->a_ops = &nilfs_aops;
- mark_inode_dirty(inode);
+ nilfs_mark_inode_dirty(inode);
err = nilfs_add_nondir(dentry, inode);
}
if (!err)
@@ -148,7 +148,7 @@ nilfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
- mark_inode_dirty(inode);
+ nilfs_mark_inode_dirty(inode);
err = nilfs_add_nondir(dentry, inode);
}
if (!err)
@@ -188,7 +188,7 @@ static int nilfs_symlink(struct inode *dir, struct dentry *dentry,
goto out_fail;
/* mark_inode_dirty(inode); */
- /* nilfs_new_inode() and page_symlink() do this */
+ /* page_symlink() do this */
err = nilfs_add_nondir(dentry, inode);
out:
@@ -200,7 +200,8 @@ out:
return err;
out_fail:
- inode_dec_link_count(inode);
+ drop_nlink(inode);
+ nilfs_mark_inode_dirty(inode);
iput(inode);
goto out;
}
@@ -245,7 +246,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (err)
return err;
- inode_inc_link_count(dir);
+ inc_nlink(dir);
inode = nilfs_new_inode(dir, S_IFDIR | mode);
err = PTR_ERR(inode);
@@ -256,7 +257,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
inode->i_fop = &nilfs_dir_operations;
inode->i_mapping->a_ops = &nilfs_aops;
- inode_inc_link_count(inode);
+ inc_nlink(inode);
err = nilfs_make_empty(inode, dir);
if (err)
@@ -266,6 +267,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (err)
goto out_fail;
+ nilfs_mark_inode_dirty(inode);
d_instantiate(dentry, inode);
out:
if (!err)
@@ -276,26 +278,23 @@ out:
return err;
out_fail:
- inode_dec_link_count(inode);
- inode_dec_link_count(inode);
+ drop_nlink(inode);
+ drop_nlink(inode);
+ nilfs_mark_inode_dirty(inode);
iput(inode);
out_dir:
- inode_dec_link_count(dir);
+ drop_nlink(dir);
+ nilfs_mark_inode_dirty(dir);
goto out;
}
-static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
+static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode;
struct nilfs_dir_entry *de;
struct page *page;
- struct nilfs_transaction_info ti;
int err;
- err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
- if (err)
- return err;
-
err = -ENOENT;
de = nilfs_find_entry(dir, dentry, &page);
if (!de)
@@ -317,12 +316,28 @@ static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
goto out;
inode->i_ctime = dir->i_ctime;
- inode_dec_link_count(inode);
+ drop_nlink(inode);
err = 0;
out:
- if (!err)
+ return err;
+}
+
+static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct nilfs_transaction_info ti;
+ int err;
+
+ err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
+ if (err)
+ return err;
+
+ err = nilfs_do_unlink(dir, dentry);
+
+ if (!err) {
+ nilfs_mark_inode_dirty(dir);
+ nilfs_mark_inode_dirty(dentry->d_inode);
err = nilfs_transaction_commit(dir->i_sb);
- else
+ } else
nilfs_transaction_abort(dir->i_sb);
return err;
@@ -340,11 +355,13 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
err = -ENOTEMPTY;
if (nilfs_empty_dir(inode)) {
- err = nilfs_unlink(dir, dentry);
+ err = nilfs_do_unlink(dir, dentry);
if (!err) {
inode->i_size = 0;
- inode_dec_link_count(inode);
- inode_dec_link_count(dir);
+ drop_nlink(inode);
+ nilfs_mark_inode_dirty(inode);
+ drop_nlink(dir);
+ nilfs_mark_inode_dirty(dir);
}
}
if (!err)
@@ -395,42 +412,48 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_de = nilfs_find_entry(new_dir, new_dentry, &new_page);
if (!new_de)
goto out_dir;
- inode_inc_link_count(old_inode);
+ inc_nlink(old_inode);
nilfs_set_link(new_dir, new_de, new_page, old_inode);
+ nilfs_mark_inode_dirty(new_dir);
new_inode->i_ctime = CURRENT_TIME;
if (dir_de)
drop_nlink(new_inode);
- inode_dec_link_count(new_inode);
+ drop_nlink(new_inode);
+ nilfs_mark_inode_dirty(new_inode);
} else {
if (dir_de) {
err = -EMLINK;
if (new_dir->i_nlink >= NILFS_LINK_MAX)
goto out_dir;
}
- inode_inc_link_count(old_inode);
+ inc_nlink(old_inode);
err = nilfs_add_link(new_dentry, old_inode);
if (err) {
- inode_dec_link_count(old_inode);
+ drop_nlink(old_inode);
+ nilfs_mark_inode_dirty(old_inode);
goto out_dir;
}
- if (dir_de)
- inode_inc_link_count(new_dir);
+ if (dir_de) {
+ inc_nlink(new_dir);
+ nilfs_mark_inode_dirty(new_dir);
+ }
}
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
- * inode_dec_link_count() will mark the inode dirty.
*/
old_inode->i_ctime = CURRENT_TIME;
nilfs_delete_entry(old_de, old_page);
- inode_dec_link_count(old_inode);
+ drop_nlink(old_inode);
if (dir_de) {
nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
- inode_dec_link_count(old_dir);
+ drop_nlink(old_dir);
}
+ nilfs_mark_inode_dirty(old_dir);
+ nilfs_mark_inode_dirty(old_inode);
err = nilfs_transaction_commit(old_dir->i_sb);
return err;
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 6dc83591d11..c9c96c7825d 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -770,14 +770,8 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
nilfs_finish_roll_forward(nilfs, sbi, ri);
}
- nilfs_detach_checkpoint(sbi);
- return 0;
-
failed:
nilfs_detach_checkpoint(sbi);
- nilfs_mdt_clear(nilfs->ns_cpfile);
- nilfs_mdt_clear(nilfs->ns_sufile);
- nilfs_mdt_clear(nilfs->ns_dat);
return err;
}
@@ -804,6 +798,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
struct nilfs_segsum_info ssi;
sector_t pseg_start, pseg_end, sr_pseg_start = 0;
sector_t seg_start, seg_end; /* range of full segment (block number) */
+ sector_t b, end;
u64 seg_seq;
__u64 segnum, nextnum = 0;
__u64 cno;
@@ -819,6 +814,11 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
/* Calculate range of segment */
nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
+ /* Read ahead segment */
+ b = seg_start;
+ while (b <= seg_end)
+ sb_breadahead(sbi->s_super, b++);
+
for (;;) {
/* Load segment summary */
ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
@@ -841,14 +841,20 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
ri->ri_nextnum = nextnum;
empty_seg = 0;
+ if (!NILFS_SEG_HAS_SR(&ssi) && !scan_newer) {
+ /* This will never happen because a superblock
+ (last_segment) always points to a pseg
+ having a super root. */
+ ret = NILFS_SEG_FAIL_CONSISTENCY;
+ goto failed;
+ }
+
+ if (pseg_start == seg_start) {
+ nilfs_get_segment_range(nilfs, nextnum, &b, &end);
+ while (b <= end)
+ sb_breadahead(sbi->s_super, b++);
+ }
if (!NILFS_SEG_HAS_SR(&ssi)) {
- if (!scan_newer) {
- /* This will never happen because a superblock
- (last_segment) always points to a pseg
- having a super root. */
- ret = NILFS_SEG_FAIL_CONSISTENCY;
- goto failed;
- }
if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) {
ri->ri_lsegs_start = pseg_start;
ri->ri_lsegs_start_seq = seg_seq;
@@ -919,7 +925,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
super_root_found:
/* Updating pointers relating to the latest checkpoint */
- list_splice(&segments, ri->ri_used_segments.prev);
+ list_splice_tail(&segments, &ri->ri_used_segments);
nilfs->ns_last_pseg = sr_pseg_start;
nilfs->ns_last_seq = nilfs->ns_seg_seq;
nilfs->ns_last_cno = ri->ri_cno;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index e6d9e37fa24..645c78656aa 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -24,10 +24,22 @@
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/crc32.h>
+#include <linux/backing-dev.h>
#include "page.h"
#include "segbuf.h"
+struct nilfs_write_info {
+ struct the_nilfs *nilfs;
+ struct bio *bio;
+ int start, end; /* The region to be submitted */
+ int rest_blocks;
+ int max_pages;
+ int nr_vecs;
+ sector_t blocknr;
+};
+
+
static struct kmem_cache *nilfs_segbuf_cachep;
static void nilfs_segbuf_init_once(void *obj)
@@ -63,6 +75,11 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
INIT_LIST_HEAD(&segbuf->sb_list);
INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
+
+ init_completion(&segbuf->sb_bio_event);
+ atomic_set(&segbuf->sb_err, 0);
+ segbuf->sb_nbio = 0;
+
return segbuf;
}
@@ -83,6 +100,22 @@ void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
}
+/**
+ * nilfs_segbuf_map_cont - map a new log behind a given log
+ * @segbuf: new segment buffer
+ * @prev: segment buffer containing a log to be continued
+ */
+void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
+ struct nilfs_segment_buffer *prev)
+{
+ segbuf->sb_segnum = prev->sb_segnum;
+ segbuf->sb_fseg_start = prev->sb_fseg_start;
+ segbuf->sb_fseg_end = prev->sb_fseg_end;
+ segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
+ segbuf->sb_rest_blocks =
+ segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
+}
+
void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
__u64 nextnum, struct the_nilfs *nilfs)
{
@@ -132,8 +165,6 @@ int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
segbuf->sb_sum.ctime = ctime;
-
- segbuf->sb_io_error = 0;
return 0;
}
@@ -219,7 +250,7 @@ void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
raw_sum->ss_datasum = cpu_to_le32(crc);
}
-void nilfs_release_buffers(struct list_head *list)
+static void nilfs_release_buffers(struct list_head *list)
{
struct buffer_head *bh, *n;
@@ -241,13 +272,56 @@ void nilfs_release_buffers(struct list_head *list)
}
}
+static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
+{
+ nilfs_release_buffers(&segbuf->sb_segsum_buffers);
+ nilfs_release_buffers(&segbuf->sb_payload_buffers);
+}
+
+/*
+ * Iterators for segment buffers
+ */
+void nilfs_clear_logs(struct list_head *logs)
+{
+ struct nilfs_segment_buffer *segbuf;
+
+ list_for_each_entry(segbuf, logs, sb_list)
+ nilfs_segbuf_clear(segbuf);
+}
+
+void nilfs_truncate_logs(struct list_head *logs,
+ struct nilfs_segment_buffer *last)
+{
+ struct nilfs_segment_buffer *n, *segbuf;
+
+ segbuf = list_prepare_entry(last, logs, sb_list);
+ list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
+ list_del_init(&segbuf->sb_list);
+ nilfs_segbuf_clear(segbuf);
+ nilfs_segbuf_free(segbuf);
+ }
+}
+
+int nilfs_wait_on_logs(struct list_head *logs)
+{
+ struct nilfs_segment_buffer *segbuf;
+ int err;
+
+ list_for_each_entry(segbuf, logs, sb_list) {
+ err = nilfs_segbuf_wait(segbuf);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
/*
* BIO operations
*/
static void nilfs_end_bio_write(struct bio *bio, int err)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct nilfs_write_info *wi = bio->bi_private;
+ struct nilfs_segment_buffer *segbuf = bio->bi_private;
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
@@ -256,21 +330,22 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
}
if (!uptodate)
- atomic_inc(&wi->err);
+ atomic_inc(&segbuf->sb_err);
bio_put(bio);
- complete(&wi->bio_event);
+ complete(&segbuf->sb_bio_event);
}
-static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
+static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
+ struct nilfs_write_info *wi, int mode)
{
struct bio *bio = wi->bio;
int err;
- if (wi->nbio > 0 && bdi_write_congested(wi->bdi)) {
- wait_for_completion(&wi->bio_event);
- wi->nbio--;
- if (unlikely(atomic_read(&wi->err))) {
+ if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
+ wait_for_completion(&segbuf->sb_bio_event);
+ segbuf->sb_nbio--;
+ if (unlikely(atomic_read(&segbuf->sb_err))) {
bio_put(bio);
err = -EIO;
goto failed;
@@ -278,7 +353,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
}
bio->bi_end_io = nilfs_end_bio_write;
- bio->bi_private = wi;
+ bio->bi_private = segbuf;
bio_get(bio);
submit_bio(mode, bio);
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
@@ -286,7 +361,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
err = -EOPNOTSUPP;
goto failed;
}
- wi->nbio++;
+ segbuf->sb_nbio++;
bio_put(bio);
wi->bio = NULL;
@@ -301,17 +376,15 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
}
/**
- * nilfs_alloc_seg_bio - allocate a bio for writing segment.
- * @sb: super block
- * @start: beginning disk block number of this BIO.
+ * nilfs_alloc_seg_bio - allocate a new bio for writing log
+ * @nilfs: nilfs object
+ * @start: start block number of the bio
* @nr_vecs: request size of page vector.
*
- * alloc_seg_bio() allocates a new BIO structure and initialize it.
- *
* Return Value: On success, pointer to the struct bio is returned.
* On error, NULL is returned.
*/
-static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
+static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
int nr_vecs)
{
struct bio *bio;
@@ -322,36 +395,33 @@ static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
bio = bio_alloc(GFP_NOIO, nr_vecs);
}
if (likely(bio)) {
- bio->bi_bdev = sb->s_bdev;
- bio->bi_sector = (sector_t)start << (sb->s_blocksize_bits - 9);
+ bio->bi_bdev = nilfs->ns_bdev;
+ bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
}
return bio;
}
-void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
- struct nilfs_write_info *wi)
+static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
+ struct nilfs_write_info *wi)
{
wi->bio = NULL;
wi->rest_blocks = segbuf->sb_sum.nblocks;
- wi->max_pages = bio_get_nr_vecs(wi->sb->s_bdev);
+ wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
wi->start = wi->end = 0;
- wi->nbio = 0;
wi->blocknr = segbuf->sb_pseg_start;
-
- atomic_set(&wi->err, 0);
- init_completion(&wi->bio_event);
}
-static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
- int mode)
+static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
+ struct nilfs_write_info *wi,
+ struct buffer_head *bh, int mode)
{
int len, err;
BUG_ON(wi->nr_vecs <= 0);
repeat:
if (!wi->bio) {
- wi->bio = nilfs_alloc_seg_bio(wi->sb, wi->blocknr + wi->end,
+ wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
wi->nr_vecs);
if (unlikely(!wi->bio))
return -ENOMEM;
@@ -363,76 +433,83 @@ static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
return 0;
}
/* bio is FULL */
- err = nilfs_submit_seg_bio(wi, mode);
+ err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
/* never submit current bh */
if (likely(!err))
goto repeat;
return err;
}
+/**
+ * nilfs_segbuf_write - submit write requests of a log
+ * @segbuf: buffer storing a log to be written
+ * @nilfs: nilfs object
+ *
+ * Return Value: On Success, 0 is returned. On Error, one of the following
+ * negative error code is returned.
+ *
+ * %-EIO - I/O error
+ *
+ * %-ENOMEM - Insufficient memory available.
+ */
int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
- struct nilfs_write_info *wi)
+ struct the_nilfs *nilfs)
{
+ struct nilfs_write_info wi;
struct buffer_head *bh;
- int res, rw = WRITE;
+ int res = 0, rw = WRITE;
+
+ wi.nilfs = nilfs;
+ nilfs_segbuf_prepare_write(segbuf, &wi);
list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
- res = nilfs_submit_bh(wi, bh, rw);
+ res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
if (unlikely(res))
goto failed_bio;
}
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
- res = nilfs_submit_bh(wi, bh, rw);
+ res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
if (unlikely(res))
goto failed_bio;
}
- if (wi->bio) {
+ if (wi.bio) {
/*
* Last BIO is always sent through the following
* submission.
*/
rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
- res = nilfs_submit_seg_bio(wi, rw);
- if (unlikely(res))
- goto failed_bio;
+ res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
}
- res = 0;
- out:
- return res;
-
failed_bio:
- atomic_inc(&wi->err);
- goto out;
+ return res;
}
/**
* nilfs_segbuf_wait - wait for completion of requested BIOs
- * @wi: nilfs_write_info
+ * @segbuf: segment buffer
*
* Return Value: On Success, 0 is returned. On Error, one of the following
* negative error code is returned.
*
* %-EIO - I/O error
*/
-int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf,
- struct nilfs_write_info *wi)
+int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
{
int err = 0;
- if (!wi->nbio)
+ if (!segbuf->sb_nbio)
return 0;
do {
- wait_for_completion(&wi->bio_event);
- } while (--wi->nbio > 0);
+ wait_for_completion(&segbuf->sb_bio_event);
+ } while (--segbuf->sb_nbio > 0);
- if (unlikely(atomic_read(&wi->err) > 0)) {
+ if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
printk(KERN_ERR "NILFS: IO error writing segment\n");
err = -EIO;
- segbuf->sb_io_error = 1;
}
return err;
}
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 0c3076f4e59..6af1630fb40 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -27,7 +27,6 @@
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include <linux/completion.h>
-#include <linux/backing-dev.h>
/**
* struct nilfs_segsum_info - On-memory segment summary
@@ -77,7 +76,9 @@ struct nilfs_segsum_info {
* @sb_rest_blocks: Number of residual blocks in the current segment
* @sb_segsum_buffers: List of buffers for segment summaries
* @sb_payload_buffers: List of buffers for segment payload
- * @sb_io_error: I/O error status
+ * @sb_nbio: Number of flying bio requests
+ * @sb_err: I/O error status
+ * @sb_bio_event: Completion event of log writing
*/
struct nilfs_segment_buffer {
struct super_block *sb_super;
@@ -96,7 +97,9 @@ struct nilfs_segment_buffer {
struct list_head sb_payload_buffers; /* including super root */
/* io status */
- int sb_io_error;
+ int sb_nbio;
+ atomic_t sb_err;
+ struct completion sb_bio_event;
};
#define NILFS_LIST_SEGBUF(head) \
@@ -125,6 +128,8 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *);
void nilfs_segbuf_free(struct nilfs_segment_buffer *);
void nilfs_segbuf_map(struct nilfs_segment_buffer *, __u64, unsigned long,
struct the_nilfs *);
+void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
+ struct nilfs_segment_buffer *prev);
void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64,
struct the_nilfs *);
int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t);
@@ -161,41 +166,18 @@ nilfs_segbuf_add_file_buffer(struct nilfs_segment_buffer *segbuf,
segbuf->sb_sum.nfileblk++;
}
-void nilfs_release_buffers(struct list_head *);
+int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
+ struct the_nilfs *nilfs);
+int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
-static inline void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
+void nilfs_clear_logs(struct list_head *logs);
+void nilfs_truncate_logs(struct list_head *logs,
+ struct nilfs_segment_buffer *last);
+int nilfs_wait_on_logs(struct list_head *logs);
+
+static inline void nilfs_destroy_logs(struct list_head *logs)
{
- nilfs_release_buffers(&segbuf->sb_segsum_buffers);
- nilfs_release_buffers(&segbuf->sb_payload_buffers);
+ nilfs_truncate_logs(logs, NULL);
}
-struct nilfs_write_info {
- struct bio *bio;
- int start, end; /* The region to be submitted */
- int rest_blocks;
- int max_pages;
- int nr_vecs;
- sector_t blocknr;
-
- int nbio;
- atomic_t err;
- struct completion bio_event;
- /* completion event of segment write */
-
- /*
- * The following fields must be set explicitly
- */
- struct super_block *sb;
- struct backing_dev_info *bdi; /* backing dev info */
- struct buffer_head *bh_sr;
-};
-
-
-void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *,
- struct nilfs_write_info *);
-int nilfs_segbuf_write(struct nilfs_segment_buffer *,
- struct nilfs_write_info *);
-int nilfs_segbuf_wait(struct nilfs_segment_buffer *,
- struct nilfs_write_info *);
-
#endif /* _NILFS_SEGBUF_H */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 6eff66a070d..17584c52448 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -974,12 +974,12 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
raw_sr->sr_flags = 0;
- nilfs_mdt_write_inode_direct(
- nilfs_dat_inode(nilfs), bh_sr, NILFS_SR_DAT_OFFSET(isz));
- nilfs_mdt_write_inode_direct(
- nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(isz));
- nilfs_mdt_write_inode_direct(
- nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(isz));
+ nilfs_write_inode_common(nilfs_dat_inode(nilfs), (void *)raw_sr +
+ NILFS_SR_DAT_OFFSET(isz), 1);
+ nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
+ NILFS_SR_CPFILE_OFFSET(isz), 1);
+ nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
+ NILFS_SR_SUFILE_OFFSET(isz), 1);
}
static void nilfs_redirty_inodes(struct list_head *head)
@@ -1273,73 +1273,75 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
return err;
}
-static int nilfs_touch_segusage(struct inode *sufile, __u64 segnum)
-{
- struct buffer_head *bh_su;
- struct nilfs_segment_usage *raw_su;
- int err;
-
- err = nilfs_sufile_get_segment_usage(sufile, segnum, &raw_su, &bh_su);
- if (unlikely(err))
- return err;
- nilfs_mdt_mark_buffer_dirty(bh_su);
- nilfs_mdt_mark_dirty(sufile);
- nilfs_sufile_put_segment_usage(sufile, segnum, bh_su);
- return 0;
-}
-
+/**
+ * nilfs_segctor_begin_construction - setup segment buffer to make a new log
+ * @sci: nilfs_sc_info
+ * @nilfs: nilfs object
+ */
static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs)
{
- struct nilfs_segment_buffer *segbuf, *n;
+ struct nilfs_segment_buffer *segbuf, *prev;
__u64 nextnum;
- int err;
+ int err, alloc = 0;
- if (list_empty(&sci->sc_segbufs)) {
- segbuf = nilfs_segbuf_new(sci->sc_super);
- if (unlikely(!segbuf))
- return -ENOMEM;
- list_add(&segbuf->sb_list, &sci->sc_segbufs);
- } else
- segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
+ segbuf = nilfs_segbuf_new(sci->sc_super);
+ if (unlikely(!segbuf))
+ return -ENOMEM;
+
+ if (list_empty(&sci->sc_write_logs)) {
+ nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
+ nilfs->ns_pseg_offset, nilfs);
+ if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
+ nilfs_shift_to_next_segment(nilfs);
+ nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
+ }
- nilfs_segbuf_map(segbuf, nilfs->ns_segnum, nilfs->ns_pseg_offset,
- nilfs);
+ segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
+ nextnum = nilfs->ns_nextnum;
- if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
- nilfs_shift_to_next_segment(nilfs);
- nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
+ if (nilfs->ns_segnum == nilfs->ns_nextnum)
+ /* Start from the head of a new full segment */
+ alloc++;
+ } else {
+ /* Continue logs */
+ prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
+ nilfs_segbuf_map_cont(segbuf, prev);
+ segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
+ nextnum = prev->sb_nextnum;
+
+ if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
+ nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
+ segbuf->sb_sum.seg_seq++;
+ alloc++;
+ }
}
- sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
- err = nilfs_touch_segusage(nilfs->ns_sufile, segbuf->sb_segnum);
- if (unlikely(err))
- return err;
+ err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
+ if (err)
+ goto failed;
- if (nilfs->ns_segnum == nilfs->ns_nextnum) {
- /* Start from the head of a new full segment */
+ if (alloc) {
err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
- if (unlikely(err))
- return err;
- } else
- nextnum = nilfs->ns_nextnum;
-
- segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
+ if (err)
+ goto failed;
+ }
nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
- /* truncating segment buffers */
- list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs,
- sb_list) {
- list_del_init(&segbuf->sb_list);
- nilfs_segbuf_free(segbuf);
- }
+ BUG_ON(!list_empty(&sci->sc_segbufs));
+ list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
+ sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
return 0;
+
+ failed:
+ nilfs_segbuf_free(segbuf);
+ return err;
}
static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
struct the_nilfs *nilfs, int nadd)
{
- struct nilfs_segment_buffer *segbuf, *prev, *n;
+ struct nilfs_segment_buffer *segbuf, *prev;
struct inode *sufile = nilfs->ns_sufile;
__u64 nextnextnum;
LIST_HEAD(list);
@@ -1352,7 +1354,7 @@ static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
* not be dirty. The following call ensures that the buffer is dirty
* and will pin the buffer on memory until the sufile is written.
*/
- err = nilfs_touch_segusage(sufile, prev->sb_nextnum);
+ err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
if (unlikely(err))
return err;
@@ -1378,33 +1380,33 @@ static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
list_add_tail(&segbuf->sb_list, &list);
prev = segbuf;
}
- list_splice(&list, sci->sc_segbufs.prev);
+ list_splice_tail(&list, &sci->sc_segbufs);
return 0;
failed_segbuf:
nilfs_segbuf_free(segbuf);
failed:
- list_for_each_entry_safe(segbuf, n, &list, sb_list) {
+ list_for_each_entry(segbuf, &list, sb_list) {
ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
WARN_ON(ret); /* never fails */
- list_del_init(&segbuf->sb_list);
- nilfs_segbuf_free(segbuf);
}
+ nilfs_destroy_logs(&list);
return err;
}
-static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci,
- struct the_nilfs *nilfs)
+static void nilfs_free_incomplete_logs(struct list_head *logs,
+ struct the_nilfs *nilfs)
{
- struct nilfs_segment_buffer *segbuf;
- int ret, done = 0;
+ struct nilfs_segment_buffer *segbuf, *prev;
+ struct inode *sufile = nilfs->ns_sufile;
+ int ret;
- segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
+ segbuf = NILFS_FIRST_SEGBUF(logs);
if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
- ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
+ ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
WARN_ON(ret); /* never fails */
}
- if (segbuf->sb_io_error) {
+ if (atomic_read(&segbuf->sb_err)) {
/* Case 1: The first segment failed */
if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
/* Case 1a: Partial segment appended into an existing
@@ -1413,106 +1415,54 @@ static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci,
segbuf->sb_fseg_end);
else /* Case 1b: New full segment */
set_nilfs_discontinued(nilfs);
- done++;
}
- list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
- ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
- WARN_ON(ret); /* never fails */
- if (!done && segbuf->sb_io_error) {
- if (segbuf->sb_segnum != nilfs->ns_nextnum)
- /* Case 2: extended segment (!= next) failed */
- nilfs_sufile_set_error(nilfs->ns_sufile,
- segbuf->sb_segnum);
- done++;
- }
- }
-}
-
-static void nilfs_segctor_clear_segment_buffers(struct nilfs_sc_info *sci)
-{
- struct nilfs_segment_buffer *segbuf;
-
- list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list)
- nilfs_segbuf_clear(segbuf);
- sci->sc_super_root = NULL;
-}
-
-static void nilfs_segctor_destroy_segment_buffers(struct nilfs_sc_info *sci)
-{
- struct nilfs_segment_buffer *segbuf;
-
- while (!list_empty(&sci->sc_segbufs)) {
- segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
- list_del_init(&segbuf->sb_list);
- nilfs_segbuf_free(segbuf);
- }
- /* sci->sc_curseg = NULL; */
-}
-
-static void nilfs_segctor_end_construction(struct nilfs_sc_info *sci,
- struct the_nilfs *nilfs, int err)
-{
- if (unlikely(err)) {
- nilfs_segctor_free_incomplete_segments(sci, nilfs);
- if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
- int ret;
-
- ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
- sci->sc_freesegs,
- sci->sc_nfreesegs,
- NULL);
- WARN_ON(ret); /* do not happen */
+ prev = segbuf;
+ list_for_each_entry_continue(segbuf, logs, sb_list) {
+ if (prev->sb_nextnum != segbuf->sb_nextnum) {
+ ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
+ WARN_ON(ret); /* never fails */
}
+ if (atomic_read(&segbuf->sb_err) &&
+ segbuf->sb_segnum != nilfs->ns_nextnum)
+ /* Case 2: extended segment (!= next) failed */
+ nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
+ prev = segbuf;
}
- nilfs_segctor_clear_segment_buffers(sci);
}
static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
struct inode *sufile)
{
struct nilfs_segment_buffer *segbuf;
- struct buffer_head *bh_su;
- struct nilfs_segment_usage *raw_su;
unsigned long live_blocks;
int ret;
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
- ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
- &raw_su, &bh_su);
- WARN_ON(ret); /* always succeed because bh_su is dirty */
live_blocks = segbuf->sb_sum.nblocks +
(segbuf->sb_pseg_start - segbuf->sb_fseg_start);
- raw_su->su_lastmod = cpu_to_le64(sci->sc_seg_ctime);
- raw_su->su_nblocks = cpu_to_le32(live_blocks);
- nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum,
- bh_su);
+ ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
+ live_blocks,
+ sci->sc_seg_ctime);
+ WARN_ON(ret); /* always succeed because the segusage is dirty */
}
}
-static void nilfs_segctor_cancel_segusage(struct nilfs_sc_info *sci,
- struct inode *sufile)
+static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
{
struct nilfs_segment_buffer *segbuf;
- struct buffer_head *bh_su;
- struct nilfs_segment_usage *raw_su;
int ret;
- segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
- ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
- &raw_su, &bh_su);
- WARN_ON(ret); /* always succeed because bh_su is dirty */
- raw_su->su_nblocks = cpu_to_le32(segbuf->sb_pseg_start -
- segbuf->sb_fseg_start);
- nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, bh_su);
+ segbuf = NILFS_FIRST_SEGBUF(logs);
+ ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
+ segbuf->sb_pseg_start -
+ segbuf->sb_fseg_start, 0);
+ WARN_ON(ret); /* always succeed because the segusage is dirty */
- list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
- ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
- &raw_su, &bh_su);
+ list_for_each_entry_continue(segbuf, logs, sb_list) {
+ ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
+ 0, 0);
WARN_ON(ret); /* always succeed */
- raw_su->su_nblocks = 0;
- nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum,
- bh_su);
}
}
@@ -1520,17 +1470,15 @@ static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
struct nilfs_segment_buffer *last,
struct inode *sufile)
{
- struct nilfs_segment_buffer *segbuf = last, *n;
+ struct nilfs_segment_buffer *segbuf = last;
int ret;
- list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs,
- sb_list) {
- list_del_init(&segbuf->sb_list);
+ list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
WARN_ON(ret);
- nilfs_segbuf_free(segbuf);
}
+ nilfs_truncate_logs(&sci->sc_segbufs, last);
}
@@ -1569,7 +1517,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
NULL);
WARN_ON(err); /* do not happen */
}
- nilfs_segctor_clear_segment_buffers(sci);
+ nilfs_clear_logs(&sci->sc_segbufs);
err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
if (unlikely(err))
@@ -1814,26 +1762,18 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
}
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
- struct backing_dev_info *bdi)
+ struct the_nilfs *nilfs)
{
struct nilfs_segment_buffer *segbuf;
- struct nilfs_write_info wi;
- int err, res;
-
- wi.sb = sci->sc_super;
- wi.bh_sr = sci->sc_super_root;
- wi.bdi = bdi;
+ int ret = 0;
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
- nilfs_segbuf_prepare_write(segbuf, &wi);
- err = nilfs_segbuf_write(segbuf, &wi);
-
- res = nilfs_segbuf_wait(segbuf, &wi);
- err = err ? : res;
- if (err)
- return err;
+ ret = nilfs_segbuf_write(segbuf, nilfs);
+ if (ret)
+ break;
}
- return 0;
+ list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
+ return ret;
}
static void __nilfs_end_page_io(struct page *page, int err)
@@ -1911,15 +1851,17 @@ static void nilfs_clear_copied_buffers(struct list_head *list, int err)
}
}
-static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
- struct page *failed_page, int err)
+static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
+ struct buffer_head *bh_sr, int err)
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
+ struct buffer_head *bh;
- list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
- struct buffer_head *bh;
+ if (list_empty(logs))
+ return;
+ list_for_each_entry(segbuf, logs, sb_list) {
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
if (bh->b_page != bd_page) {
@@ -1931,7 +1873,7 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- if (bh == sci->sc_super_root) {
+ if (bh == bh_sr) {
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;
@@ -1941,7 +1883,7 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
if (bh->b_page != fs_page) {
nilfs_end_page_io(fs_page, err);
if (fs_page && fs_page == failed_page)
- goto done;
+ return;
fs_page = bh->b_page;
}
}
@@ -1950,8 +1892,34 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
end_page_writeback(bd_page);
nilfs_end_page_io(fs_page, err);
- done:
+}
+
+static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
+ struct the_nilfs *nilfs, int err)
+{
+ LIST_HEAD(logs);
+ int ret;
+
+ list_splice_tail_init(&sci->sc_write_logs, &logs);
+ ret = nilfs_wait_on_logs(&logs);
+ if (ret)
+ nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret);
+
+ list_splice_tail_init(&sci->sc_segbufs, &logs);
+ nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
+ nilfs_free_incomplete_logs(&logs, nilfs);
nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
+
+ if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
+ ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
+ sci->sc_freesegs,
+ sci->sc_nfreesegs,
+ NULL);
+ WARN_ON(ret); /* do not happen */
+ }
+
+ nilfs_destroy_logs(&logs);
+ sci->sc_super_root = NULL;
}
static void nilfs_set_next_segment(struct the_nilfs *nilfs,
@@ -1973,7 +1941,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
struct the_nilfs *nilfs = sbi->s_nilfs;
int update_sr = (sci->sc_super_root != NULL);
- list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
+ list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
struct buffer_head *bh;
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
@@ -2046,7 +2014,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
sci->sc_nblk_inc += sci->sc_nblk_this_inc;
- segbuf = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
+ segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
nilfs_set_next_segment(nilfs, segbuf);
if (update_sr) {
@@ -2057,10 +2025,23 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
+ nilfs_segctor_clear_metadata_dirty(sci);
} else
clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
}
+static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
+{
+ int ret;
+
+ ret = nilfs_wait_on_logs(&sci->sc_write_logs);
+ if (!ret) {
+ nilfs_segctor_complete_write(sci);
+ nilfs_destroy_logs(&sci->sc_write_logs);
+ }
+ return ret;
+}
+
static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
struct nilfs_sb_info *sbi)
{
@@ -2173,7 +2154,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
/* Avoid empty segment */
if (sci->sc_stage.scnt == NILFS_ST_DONE &&
NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
- nilfs_segctor_end_construction(sci, nilfs, 1);
+ nilfs_segctor_abort_construction(sci, nilfs, 1);
goto out;
}
@@ -2187,7 +2168,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
if (has_sr) {
err = nilfs_segctor_fill_in_checkpoint(sci);
if (unlikely(err))
- goto failed_to_make_up;
+ goto failed_to_write;
nilfs_segctor_fill_in_super_root(sci, nilfs);
}
@@ -2195,42 +2176,46 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
/* Write partial segments */
err = nilfs_segctor_prepare_write(sci, &failed_page);
- if (unlikely(err))
+ if (err) {
+ nilfs_abort_logs(&sci->sc_segbufs, failed_page,
+ sci->sc_super_root, err);
goto failed_to_write;
-
+ }
nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed);
- err = nilfs_segctor_write(sci, nilfs->ns_bdi);
+ err = nilfs_segctor_write(sci, nilfs);
if (unlikely(err))
goto failed_to_write;
- nilfs_segctor_complete_write(sci);
-
- /* Commit segments */
- if (has_sr)
- nilfs_segctor_clear_metadata_dirty(sci);
-
- nilfs_segctor_end_construction(sci, nilfs, 0);
-
+ if (sci->sc_stage.scnt == NILFS_ST_DONE ||
+ nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
+ /*
+ * At this point, we avoid double buffering
+ * for blocksize < pagesize because page dirty
+ * flag is turned off during write and dirty
+ * buffers are not properly collected for
+ * pages crossing over segments.
+ */
+ err = nilfs_segctor_wait(sci);
+ if (err)
+ goto failed_to_write;
+ }
} while (sci->sc_stage.scnt != NILFS_ST_DONE);
+ sci->sc_super_root = NULL;
+
out:
- nilfs_segctor_destroy_segment_buffers(sci);
nilfs_segctor_check_out_files(sci, sbi);
return err;
failed_to_write:
- nilfs_segctor_abort_write(sci, failed_page, err);
- nilfs_segctor_cancel_segusage(sci, nilfs->ns_sufile);
-
- failed_to_make_up:
if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
nilfs_redirty_inodes(&sci->sc_dirty_files);
failed:
if (nilfs_doing_gc())
nilfs_redirty_inodes(&sci->sc_gc_inodes);
- nilfs_segctor_end_construction(sci, nilfs, err);
+ nilfs_segctor_abort_construction(sci, nilfs, err);
goto out;
}
@@ -2559,7 +2544,7 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
sci->sc_freesegs = kbufs[4];
sci->sc_nfreesegs = argv[4].v_nmembs;
- list_splice_init(&nilfs->ns_gc_inodes, sci->sc_gc_inodes.prev);
+ list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
for (;;) {
nilfs_segctor_accept(sci, &req);
@@ -2788,6 +2773,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi)
spin_lock_init(&sci->sc_state_lock);
INIT_LIST_HEAD(&sci->sc_dirty_files);
INIT_LIST_HEAD(&sci->sc_segbufs);
+ INIT_LIST_HEAD(&sci->sc_write_logs);
INIT_LIST_HEAD(&sci->sc_gc_inodes);
INIT_LIST_HEAD(&sci->sc_copied_buffers);
@@ -2855,6 +2841,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
}
WARN_ON(!list_empty(&sci->sc_segbufs));
+ WARN_ON(!list_empty(&sci->sc_write_logs));
down_write(&sbi->s_nilfs->ns_segctor_sem);
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 0d2a475a741..3d3ab2f9864 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -97,6 +97,7 @@ struct nilfs_segsum_pointer {
* @sc_dsync_start: start byte offset of data pages
* @sc_dsync_end: end byte offset of data pages (inclusive)
* @sc_segbufs: List of segment buffers
+ * @sc_write_logs: List of segment buffers to hold logs under writing
* @sc_segbuf_nblocks: Number of available blocks in segment buffers.
* @sc_curseg: Current segment buffer
* @sc_super_root: Pointer to the super root buffer
@@ -143,6 +144,7 @@ struct nilfs_sc_info {
/* Segment buffers */
struct list_head sc_segbufs;
+ struct list_head sc_write_logs;
unsigned long sc_segbuf_nblocks;
struct nilfs_segment_buffer *sc_curseg;
struct buffer_head *sc_super_root;
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 37994d4a59c..b6c36d0cc33 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -31,6 +31,16 @@
#include "sufile.h"
+struct nilfs_sufile_info {
+ struct nilfs_mdt_info mi;
+ unsigned long ncleansegs;
+};
+
+static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
+{
+ return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
+}
+
static inline unsigned long
nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
{
@@ -62,14 +72,6 @@ nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
max - curr + 1);
}
-static inline struct nilfs_sufile_header *
-nilfs_sufile_block_get_header(const struct inode *sufile,
- struct buffer_head *bh,
- void *kaddr)
-{
- return kaddr + bh_offset(bh);
-}
-
static struct nilfs_segment_usage *
nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
struct buffer_head *bh, void *kaddr)
@@ -110,6 +112,15 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
}
/**
+ * nilfs_sufile_get_ncleansegs - return the number of clean segments
+ * @sufile: inode of segment usage file
+ */
+unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
+{
+ return NILFS_SUI(sufile)->ncleansegs;
+}
+
+/**
* nilfs_sufile_updatev - modify multiple segment usages at a time
* @sufile: inode of segment usage file
* @segnumv: array of segment numbers
@@ -270,7 +281,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
if (ret < 0)
goto out_sem;
kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
- header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
+ header = kaddr + bh_offset(header_bh);
ncleansegs = le64_to_cpu(header->sh_ncleansegs);
last_alloc = le64_to_cpu(header->sh_last_alloc);
kunmap_atomic(kaddr, KM_USER0);
@@ -302,13 +313,13 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
kunmap_atomic(kaddr, KM_USER0);
kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
- header = nilfs_sufile_block_get_header(
- sufile, header_bh, kaddr);
+ header = kaddr + bh_offset(header_bh);
le64_add_cpu(&header->sh_ncleansegs, -1);
le64_add_cpu(&header->sh_ndirtysegs, 1);
header->sh_last_alloc = cpu_to_le64(segnum);
kunmap_atomic(kaddr, KM_USER0);
+ NILFS_SUI(sufile)->ncleansegs--;
nilfs_mdt_mark_buffer_dirty(header_bh);
nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile);
@@ -351,6 +362,8 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
kunmap_atomic(kaddr, KM_USER0);
nilfs_sufile_mod_counter(header_bh, -1, 1);
+ NILFS_SUI(sufile)->ncleansegs--;
+
nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile);
}
@@ -380,6 +393,8 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
kunmap_atomic(kaddr, KM_USER0);
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
+ NILFS_SUI(sufile)->ncleansegs -= clean;
+
nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile);
}
@@ -409,79 +424,65 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
+ NILFS_SUI(sufile)->ncleansegs++;
+
nilfs_mdt_mark_dirty(sufile);
}
/**
- * nilfs_sufile_get_segment_usage - get a segment usage
+ * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
* @sufile: inode of segment usage file
* @segnum: segment number
- * @sup: pointer to segment usage
- * @bhp: pointer to buffer head
- *
- * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
- * specified by @segnum.
- *
- * Return Value: On success, 0 is returned, and the segment usage and the
- * buffer head of the buffer on which the segment usage is located are stored
- * in the place pointed by @sup and @bhp, respectively. On error, one of the
- * following negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- *
- * %-EINVAL - Invalid segment usage number.
*/
-int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
- struct nilfs_segment_usage **sup,
- struct buffer_head **bhp)
+int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
{
struct buffer_head *bh;
- struct nilfs_segment_usage *su;
- void *kaddr;
int ret;
- /* segnum is 0 origin */
- if (segnum >= nilfs_sufile_get_nsegments(sufile))
- return -EINVAL;
- down_write(&NILFS_MDT(sufile)->mi_sem);
- ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
- if (ret < 0)
- goto out_sem;
- kaddr = kmap(bh->b_page);
- su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
- if (nilfs_segment_usage_error(su)) {
- kunmap(bh->b_page);
+ ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
+ if (!ret) {
+ nilfs_mdt_mark_buffer_dirty(bh);
+ nilfs_mdt_mark_dirty(sufile);
brelse(bh);
- ret = -EINVAL;
- goto out_sem;
}
-
- if (sup != NULL)
- *sup = su;
- *bhp = bh;
-
- out_sem:
- up_write(&NILFS_MDT(sufile)->mi_sem);
return ret;
}
/**
- * nilfs_sufile_put_segment_usage - put a segment usage
+ * nilfs_sufile_set_segment_usage - set usage of a segment
* @sufile: inode of segment usage file
* @segnum: segment number
- * @bh: buffer head
- *
- * Description: nilfs_sufile_put_segment_usage() releases the segment usage
- * specified by @segnum. @bh must be the buffer head which have been returned
- * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
+ * @nblocks: number of live blocks in the segment
+ * @modtime: modification time (option)
*/
-void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum,
- struct buffer_head *bh)
+int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
+ unsigned long nblocks, time_t modtime)
{
- kunmap(bh->b_page);
+ struct buffer_head *bh;
+ struct nilfs_segment_usage *su;
+ void *kaddr;
+ int ret;
+
+ down_write(&NILFS_MDT(sufile)->mi_sem);
+ ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
+ if (ret < 0)
+ goto out_sem;
+
+ kaddr = kmap_atomic(bh->b_page, KM_USER0);
+ su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
+ WARN_ON(nilfs_segment_usage_error(su));
+ if (modtime)
+ su->su_lastmod = cpu_to_le64(modtime);
+ su->su_nblocks = cpu_to_le32(nblocks);
+ kunmap_atomic(kaddr, KM_USER0);
+
+ nilfs_mdt_mark_buffer_dirty(bh);
+ nilfs_mdt_mark_dirty(sufile);
brelse(bh);
+
+ out_sem:
+ up_write(&NILFS_MDT(sufile)->mi_sem);
+ return ret;
}
/**
@@ -515,7 +516,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
goto out_sem;
kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
- header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
+ header = kaddr + bh_offset(header_bh);
sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
@@ -532,33 +533,6 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
return ret;
}
-/**
- * nilfs_sufile_get_ncleansegs - get the number of clean segments
- * @sufile: inode of segment usage file
- * @nsegsp: pointer to the number of clean segments
- *
- * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
- * segments.
- *
- * Return Value: On success, 0 is returned and the number of clean segments is
- * stored in the place pointed by @nsegsp. On error, one of the following
- * negative error codes is returned.
- *
- * %-EIO - I/O error.
- *
- * %-ENOMEM - Insufficient amount of memory available.
- */
-int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
-{
- struct nilfs_sustat sustat;
- int ret;
-
- ret = nilfs_sufile_get_stat(sufile, &sustat);
- if (ret == 0)
- *nsegsp = sustat.ss_ncleansegs;
- return ret;
-}
-
void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
struct buffer_head *header_bh,
struct buffer_head *su_bh)
@@ -577,8 +551,10 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
nilfs_segment_usage_set_error(su);
kunmap_atomic(kaddr, KM_USER0);
- if (suclean)
+ if (suclean) {
nilfs_sufile_mod_counter(header_bh, -1, 0);
+ NILFS_SUI(sufile)->ncleansegs--;
+ }
nilfs_mdt_mark_buffer_dirty(su_bh);
nilfs_mdt_mark_dirty(sufile);
}
@@ -657,3 +633,48 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
up_read(&NILFS_MDT(sufile)->mi_sem);
return ret;
}
+
+/**
+ * nilfs_sufile_read - read sufile inode
+ * @sufile: sufile inode
+ * @raw_inode: on-disk sufile inode
+ */
+int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
+{
+ struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
+ struct buffer_head *header_bh;
+ struct nilfs_sufile_header *header;
+ void *kaddr;
+ int ret;
+
+ ret = nilfs_read_inode_common(sufile, raw_inode);
+ if (ret < 0)
+ return ret;
+
+ ret = nilfs_sufile_get_header_block(sufile, &header_bh);
+ if (!ret) {
+ kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ header = kaddr + bh_offset(header_bh);
+ sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
+ kunmap_atomic(kaddr, KM_USER0);
+ brelse(header_bh);
+ }
+ return ret;
+}
+
+/**
+ * nilfs_sufile_new - create sufile
+ * @nilfs: nilfs object
+ * @susize: size of a segment usage entry
+ */
+struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
+{
+ struct inode *sufile;
+
+ sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO,
+ sizeof(struct nilfs_sufile_info));
+ if (sufile)
+ nilfs_mdt_set_entry_size(sufile, susize,
+ sizeof(struct nilfs_sufile_header));
+ return sufile;
+}
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 0e99e5c0bd0..15163b8aff7 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -34,14 +34,13 @@ static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments;
}
+unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile);
+
int nilfs_sufile_alloc(struct inode *, __u64 *);
-int nilfs_sufile_get_segment_usage(struct inode *, __u64,
- struct nilfs_segment_usage **,
- struct buffer_head **);
-void nilfs_sufile_put_segment_usage(struct inode *, __u64,
- struct buffer_head *);
+int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum);
+int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
+ unsigned long nblocks, time_t modtime);
int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
-int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *);
ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned,
size_t);
@@ -62,6 +61,9 @@ void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
struct buffer_head *);
+int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode);
+struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize);
+
/**
* nilfs_sufile_scrap - make a segment garbage
* @sufile: inode of segment usage file
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 644e66727dd..8173faee31e 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -363,14 +363,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
list_add(&sbi->s_list, &nilfs->ns_supers);
up_write(&nilfs->ns_super_sem);
- sbi->s_ifile = nilfs_mdt_new(nilfs, sbi->s_super, NILFS_IFILE_INO);
+ sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
if (!sbi->s_ifile)
return -ENOMEM;
- err = nilfs_palloc_init_blockgroup(sbi->s_ifile, nilfs->ns_inode_size);
- if (unlikely(err))
- goto failed;
-
down_read(&nilfs->ns_segctor_sem);
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
&bh_cp);
@@ -411,7 +407,6 @@ void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi)
{
struct the_nilfs *nilfs = sbi->s_nilfs;
- nilfs_mdt_clear(sbi->s_ifile);
nilfs_mdt_destroy(sbi->s_ifile);
sbi->s_ifile = NULL;
down_write(&nilfs->ns_super_sem);
@@ -419,22 +414,6 @@ void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi)
up_write(&nilfs->ns_super_sem);
}
-static int nilfs_mark_recovery_complete(struct nilfs_sb_info *sbi)
-{
- struct the_nilfs *nilfs = sbi->s_nilfs;
- int err = 0;
-
- down_write(&nilfs->ns_sem);
- if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) {
- nilfs->ns_mount_state |= NILFS_VALID_FS;
- err = nilfs_commit_super(sbi, 1);
- if (likely(!err))
- printk(KERN_INFO "NILFS: recovery complete.\n");
- }
- up_write(&nilfs->ns_sem);
- return err;
-}
-
static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
@@ -490,7 +469,7 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
struct nilfs_sb_info *sbi = NILFS_SB(sb);
if (!nilfs_test_opt(sbi, BARRIER))
- seq_printf(seq, ",barrier=off");
+ seq_printf(seq, ",nobarrier");
if (nilfs_test_opt(sbi, SNAPSHOT))
seq_printf(seq, ",cp=%llu",
(unsigned long long int)sbi->s_snapshot_cno);
@@ -500,6 +479,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",errors=panic");
if (nilfs_test_opt(sbi, STRICT_ORDER))
seq_printf(seq, ",order=strict");
+ if (nilfs_test_opt(sbi, NORECOVERY))
+ seq_printf(seq, ",norecovery");
return 0;
}
@@ -568,7 +549,7 @@ static const struct export_operations nilfs_export_ops = {
enum {
Opt_err_cont, Opt_err_panic, Opt_err_ro,
- Opt_barrier, Opt_snapshot, Opt_order,
+ Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
Opt_err,
};
@@ -576,25 +557,13 @@ static match_table_t tokens = {
{Opt_err_cont, "errors=continue"},
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
- {Opt_barrier, "barrier=%s"},
+ {Opt_nobarrier, "nobarrier"},
{Opt_snapshot, "cp=%u"},
{Opt_order, "order=%s"},
+ {Opt_norecovery, "norecovery"},
{Opt_err, NULL}
};
-static int match_bool(substring_t *s, int *result)
-{
- int len = s->to - s->from;
-
- if (strncmp(s->from, "on", len) == 0)
- *result = 1;
- else if (strncmp(s->from, "off", len) == 0)
- *result = 0;
- else
- return 1;
- return 0;
-}
-
static int parse_options(char *options, struct super_block *sb)
{
struct nilfs_sb_info *sbi = NILFS_SB(sb);
@@ -612,13 +581,8 @@ static int parse_options(char *options, struct super_block *sb)
token = match_token(p, tokens, args);
switch (token) {
- case Opt_barrier:
- if (match_bool(&args[0], &option))
- return 0;
- if (option)
- nilfs_set_opt(sbi, BARRIER);
- else
- nilfs_clear_opt(sbi, BARRIER);
+ case Opt_nobarrier:
+ nilfs_clear_opt(sbi, BARRIER);
break;
case Opt_order:
if (strcmp(args[0].from, "relaxed") == 0)
@@ -647,6 +611,9 @@ static int parse_options(char *options, struct super_block *sb)
sbi->s_snapshot_cno = option;
nilfs_set_opt(sbi, SNAPSHOT);
break;
+ case Opt_norecovery:
+ nilfs_set_opt(sbi, NORECOVERY);
+ break;
default:
printk(KERN_ERR
"NILFS: Unrecognized mount option \"%s\"\n", p);
@@ -672,9 +639,7 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi)
int mnt_count = le16_to_cpu(sbp->s_mnt_count);
/* nilfs->sem must be locked by the caller. */
- if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) {
- printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
- } else if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
+ if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
printk(KERN_WARNING
"NILFS warning: mounting fs with errors\n");
#if 0
@@ -782,11 +747,10 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
sb->s_root = NULL;
sb->s_time_gran = 1;
- if (!nilfs_loaded(nilfs)) {
- err = load_nilfs(nilfs, sbi);
- if (err)
- goto failed_sbi;
- }
+ err = load_nilfs(nilfs, sbi);
+ if (err)
+ goto failed_sbi;
+
cno = nilfs_last_cno(nilfs);
if (sb->s_flags & MS_RDONLY) {
@@ -854,12 +818,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
up_write(&nilfs->ns_sem);
}
- err = nilfs_mark_recovery_complete(sbi);
- if (unlikely(err)) {
- printk(KERN_ERR "NILFS: recovery failed.\n");
- goto failed_root;
- }
-
down_write(&nilfs->ns_super_sem);
if (!nilfs_test_opt(sbi, SNAPSHOT))
nilfs->ns_current = sbi;
@@ -867,10 +825,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
return 0;
- failed_root:
- dput(sb->s_root);
- sb->s_root = NULL;
-
failed_segctor:
nilfs_detach_segment_constructor(sbi);
@@ -915,6 +869,14 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if (!nilfs_valid_fs(nilfs)) {
+ printk(KERN_WARNING "NILFS (device %s): couldn't "
+ "remount because the filesystem is in an "
+ "incomplete recovery state.\n", sb->s_id);
+ err = -EINVAL;
+ goto restore_opts;
+ }
+
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
goto out;
if (*flags & MS_RDONLY) {
@@ -1156,8 +1118,7 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
/* Abandoning the newly allocated superblock */
mutex_unlock(&nilfs->ns_mount_mutex);
put_nilfs(nilfs);
- up_write(&s->s_umount);
- deactivate_super(s);
+ deactivate_locked_super(s);
/*
* deactivate_super() invokes close_bdev_exclusive().
* We must finish all post-cleaning before this call;
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ad391a8c3e7..6241e1722ef 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -146,13 +146,9 @@ void put_nilfs(struct the_nilfs *nilfs)
might_sleep();
if (nilfs_loaded(nilfs)) {
- nilfs_mdt_clear(nilfs->ns_sufile);
nilfs_mdt_destroy(nilfs->ns_sufile);
- nilfs_mdt_clear(nilfs->ns_cpfile);
nilfs_mdt_destroy(nilfs->ns_cpfile);
- nilfs_mdt_clear(nilfs->ns_dat);
nilfs_mdt_destroy(nilfs->ns_dat);
- /* XXX: how and when to clear nilfs->ns_gc_dat? */
nilfs_mdt_destroy(nilfs->ns_gc_dat);
}
if (nilfs_init(nilfs)) {
@@ -166,7 +162,6 @@ void put_nilfs(struct the_nilfs *nilfs)
static int nilfs_load_super_root(struct the_nilfs *nilfs,
struct nilfs_sb_info *sbi, sector_t sr_block)
{
- static struct lock_class_key dat_lock_key;
struct buffer_head *bh_sr;
struct nilfs_super_root *raw_sr;
struct nilfs_super_block **sbp = nilfs->ns_sbp;
@@ -187,51 +182,36 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs,
inode_size = nilfs->ns_inode_size;
err = -ENOMEM;
- nilfs->ns_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO);
+ nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size);
if (unlikely(!nilfs->ns_dat))
goto failed;
- nilfs->ns_gc_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO);
+ nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size);
if (unlikely(!nilfs->ns_gc_dat))
goto failed_dat;
- nilfs->ns_cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO);
+ nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size);
if (unlikely(!nilfs->ns_cpfile))
goto failed_gc_dat;
- nilfs->ns_sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO);
+ nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size);
if (unlikely(!nilfs->ns_sufile))
goto failed_cpfile;
- err = nilfs_palloc_init_blockgroup(nilfs->ns_dat, dat_entry_size);
- if (unlikely(err))
- goto failed_sufile;
-
- err = nilfs_palloc_init_blockgroup(nilfs->ns_gc_dat, dat_entry_size);
- if (unlikely(err))
- goto failed_sufile;
-
- lockdep_set_class(&NILFS_MDT(nilfs->ns_dat)->mi_sem, &dat_lock_key);
- lockdep_set_class(&NILFS_MDT(nilfs->ns_gc_dat)->mi_sem, &dat_lock_key);
-
nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
- nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size,
- sizeof(struct nilfs_cpfile_header));
- nilfs_mdt_set_entry_size(nilfs->ns_sufile, segment_usage_size,
- sizeof(struct nilfs_sufile_header));
- err = nilfs_mdt_read_inode_direct(
- nilfs->ns_dat, bh_sr, NILFS_SR_DAT_OFFSET(inode_size));
+ err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data +
+ NILFS_SR_DAT_OFFSET(inode_size));
if (unlikely(err))
goto failed_sufile;
- err = nilfs_mdt_read_inode_direct(
- nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(inode_size));
+ err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data +
+ NILFS_SR_CPFILE_OFFSET(inode_size));
if (unlikely(err))
goto failed_sufile;
- err = nilfs_mdt_read_inode_direct(
- nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(inode_size));
+ err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data +
+ NILFS_SR_SUFILE_OFFSET(inode_size));
if (unlikely(err))
goto failed_sufile;
@@ -281,29 +261,30 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
struct nilfs_recovery_info ri;
unsigned int s_flags = sbi->s_super->s_flags;
int really_read_only = bdev_read_only(nilfs->ns_bdev);
- unsigned valid_fs;
- int err = 0;
-
- nilfs_init_recovery_info(&ri);
+ int valid_fs = nilfs_valid_fs(nilfs);
+ int err;
- down_write(&nilfs->ns_sem);
- valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS);
- up_write(&nilfs->ns_sem);
+ if (nilfs_loaded(nilfs)) {
+ if (valid_fs ||
+ ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY)))
+ return 0;
+ printk(KERN_ERR "NILFS: the filesystem is in an incomplete "
+ "recovery state.\n");
+ return -EINVAL;
+ }
- if (!valid_fs && (s_flags & MS_RDONLY)) {
- printk(KERN_INFO "NILFS: INFO: recovery "
- "required for readonly filesystem.\n");
- if (really_read_only) {
- printk(KERN_ERR "NILFS: write access "
- "unavailable, cannot proceed.\n");
- err = -EROFS;
- goto failed;
+ if (!valid_fs) {
+ printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
+ if (s_flags & MS_RDONLY) {
+ printk(KERN_INFO "NILFS: INFO: recovery "
+ "required for readonly filesystem.\n");
+ printk(KERN_INFO "NILFS: write access will "
+ "be enabled during recovery.\n");
}
- printk(KERN_INFO "NILFS: write access will "
- "be enabled during recovery.\n");
- sbi->s_super->s_flags &= ~MS_RDONLY;
}
+ nilfs_init_recovery_info(&ri);
+
err = nilfs_search_super_root(nilfs, sbi, &ri);
if (unlikely(err)) {
printk(KERN_ERR "NILFS: error searching super root.\n");
@@ -316,19 +297,56 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
goto failed;
}
- if (!valid_fs) {
- err = nilfs_recover_logical_segments(nilfs, sbi, &ri);
- if (unlikely(err)) {
- nilfs_mdt_destroy(nilfs->ns_cpfile);
- nilfs_mdt_destroy(nilfs->ns_sufile);
- nilfs_mdt_destroy(nilfs->ns_dat);
- goto failed;
+ if (valid_fs)
+ goto skip_recovery;
+
+ if (s_flags & MS_RDONLY) {
+ if (nilfs_test_opt(sbi, NORECOVERY)) {
+ printk(KERN_INFO "NILFS: norecovery option specified. "
+ "skipping roll-forward recovery\n");
+ goto skip_recovery;
}
- if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED)
- sbi->s_super->s_dirt = 1;
+ if (really_read_only) {
+ printk(KERN_ERR "NILFS: write access "
+ "unavailable, cannot proceed.\n");
+ err = -EROFS;
+ goto failed_unload;
+ }
+ sbi->s_super->s_flags &= ~MS_RDONLY;
+ } else if (nilfs_test_opt(sbi, NORECOVERY)) {
+ printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
+ "option was specified for a read/write mount\n");
+ err = -EINVAL;
+ goto failed_unload;
}
+ err = nilfs_recover_logical_segments(nilfs, sbi, &ri);
+ if (err)
+ goto failed_unload;
+
+ down_write(&nilfs->ns_sem);
+ nilfs->ns_mount_state |= NILFS_VALID_FS;
+ nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
+ err = nilfs_commit_super(sbi, 1);
+ up_write(&nilfs->ns_sem);
+
+ if (err) {
+ printk(KERN_ERR "NILFS: failed to update super block. "
+ "recovery unfinished.\n");
+ goto failed_unload;
+ }
+ printk(KERN_INFO "NILFS: recovery complete.\n");
+
+ skip_recovery:
set_nilfs_loaded(nilfs);
+ nilfs_clear_recovery_info(&ri);
+ sbi->s_super->s_flags = s_flags;
+ return 0;
+
+ failed_unload:
+ nilfs_mdt_destroy(nilfs->ns_cpfile);
+ nilfs_mdt_destroy(nilfs->ns_sufile);
+ nilfs_mdt_destroy(nilfs->ns_dat);
failed:
nilfs_clear_recovery_info(&ri);
@@ -632,30 +650,23 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
{
struct inode *dat = nilfs_dat_inode(nilfs);
unsigned long ncleansegs;
- int err;
down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
- err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs);
+ ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
- if (likely(!err))
- *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
- return err;
+ *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
+ return 0;
}
int nilfs_near_disk_full(struct the_nilfs *nilfs)
{
- struct inode *sufile = nilfs->ns_sufile;
unsigned long ncleansegs, nincsegs;
- int ret;
- ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs);
- if (likely(!ret)) {
- nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
- nilfs->ns_blocks_per_segment + 1;
- if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs)
- ret++;
- }
- return ret;
+ ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
+ nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
+ nilfs->ns_blocks_per_segment + 1;
+
+ return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs;
}
/**
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 20abd55881e..589786e3346 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -258,6 +258,16 @@ static inline void nilfs_put_sbinfo(struct nilfs_sb_info *sbi)
kfree(sbi);
}
+static inline int nilfs_valid_fs(struct the_nilfs *nilfs)
+{
+ unsigned valid_fs;
+
+ down_read(&nilfs->ns_sem);
+ valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS);
+ up_read(&nilfs->ns_sem);
+ return valid_fs;
+}
+
static inline void
nilfs_get_segment_range(struct the_nilfs *nilfs, __u64 segnum,
sector_t *seg_start, sector_t *seg_end)
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 5ef5f365a5c..8271cf05c95 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -646,6 +646,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
struct fsnotify_group *group;
struct user_struct *user;
struct file *filp;
+ struct path path;
int fd, ret;
/* Check the IN_* constants for consistency. */
@@ -659,12 +660,6 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
if (fd < 0)
return fd;
- filp = get_empty_filp();
- if (!filp) {
- ret = -ENFILE;
- goto out_put_fd;
- }
-
user = get_current_user();
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
@@ -679,24 +674,28 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
goto out_free_uid;
}
- filp->f_op = &inotify_fops;
- filp->f_path.mnt = mntget(inotify_mnt);
- filp->f_path.dentry = dget(inotify_mnt->mnt_root);
- filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
- filp->f_mode = FMODE_READ;
+ atomic_inc(&user->inotify_devs);
+
+ path.mnt = inotify_mnt;
+ path.dentry = inotify_mnt->mnt_root;
+ path_get(&path);
+ filp = alloc_file(&path, FMODE_READ, &inotify_fops);
+ if (!filp)
+ goto Enfile;
+
filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
filp->private_data = group;
- atomic_inc(&user->inotify_devs);
-
fd_install(fd, filp);
return fd;
+Enfile:
+ ret = -ENFILE;
+ path_put(&path);
+ atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
- put_filp(filp);
-out_put_fd:
put_unused_fd(fd);
return ret;
}
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 9938034762c..dc2505abb6d 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -530,7 +530,7 @@ err_corrupt_attr:
* the ntfs inode.
*
* Q: What locks are held when the function is called?
- * A: i_state has I_LOCK set, hence the inode is locked, also
+ * A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
* i_flags is set to 0 and we have no business touching it. Only an ioctl()
* is allowed to write to them. We should of course be honouring them but
@@ -1207,7 +1207,7 @@ err_out:
* necessary fields in @vi as well as initializing the ntfs inode.
*
* Q: What locks are held when the function is called?
- * A: i_state has I_LOCK set, hence the inode is locked, also
+ * A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
*
* Return 0 on success and -errno on error. In the error case, the inode will
@@ -1474,7 +1474,7 @@ err_out:
* normal directory inodes.
*
* Q: What locks are held when the function is called?
- * A: i_state has I_LOCK set, hence the inode is locked, also
+ * A: i_state has I_NEW set, hence the inode is locked, also
* i_count is set to 1, so it is not going to go away
*
* Return 0 on success and -errno on error. In the error case, the inode will
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index fbeaec76210..e3e47415d85 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -331,13 +331,14 @@ cleanup:
return ret;
}
-static size_t ocfs2_xattr_list_acl_access(struct inode *inode,
+static size_t ocfs2_xattr_list_acl_access(struct dentry *dentry,
char *list,
size_t list_len,
const char *name,
- size_t name_len)
+ size_t name_len,
+ int type)
{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
@@ -348,13 +349,14 @@ static size_t ocfs2_xattr_list_acl_access(struct inode *inode,
return size;
}
-static size_t ocfs2_xattr_list_acl_default(struct inode *inode,
+static size_t ocfs2_xattr_list_acl_default(struct dentry *dentry,
char *list,
size_t list_len,
const char *name,
- size_t name_len)
+ size_t name_len,
+ int type)
{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
@@ -365,19 +367,19 @@ static size_t ocfs2_xattr_list_acl_default(struct inode *inode,
return size;
}
-static int ocfs2_xattr_get_acl(struct inode *inode,
- int type,
- void *buffer,
- size_t size)
+static int ocfs2_xattr_get_acl(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
struct posix_acl *acl;
int ret;
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
return -EOPNOTSUPP;
- acl = ocfs2_get_acl(inode, type);
+ acl = ocfs2_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
@@ -388,35 +390,16 @@ static int ocfs2_xattr_get_acl(struct inode *inode,
return ret;
}
-static int ocfs2_xattr_get_acl_access(struct inode *inode,
- const char *name,
- void *buffer,
- size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ocfs2_xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
-}
-
-static int ocfs2_xattr_get_acl_default(struct inode *inode,
- const char *name,
- void *buffer,
- size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ocfs2_xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
-}
-
-static int ocfs2_xattr_set_acl(struct inode *inode,
- int type,
- const void *value,
- size_t size)
+static int ocfs2_xattr_set_acl(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
+ struct inode *inode = dentry->d_inode;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl;
int ret = 0;
+ if (strcmp(name, "") != 0)
+ return -EINVAL;
if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
return -EOPNOTSUPP;
@@ -442,38 +425,18 @@ cleanup:
return ret;
}
-static int ocfs2_xattr_set_acl_access(struct inode *inode,
- const char *name,
- const void *value,
- size_t size,
- int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ocfs2_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
-}
-
-static int ocfs2_xattr_set_acl_default(struct inode *inode,
- const char *name,
- const void *value,
- size_t size,
- int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return ocfs2_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
-}
-
struct xattr_handler ocfs2_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
.list = ocfs2_xattr_list_acl_access,
- .get = ocfs2_xattr_get_acl_access,
- .set = ocfs2_xattr_set_acl_access,
+ .get = ocfs2_xattr_get_acl,
+ .set = ocfs2_xattr_set_acl,
};
struct xattr_handler ocfs2_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
.list = ocfs2_xattr_list_acl_default,
- .get = ocfs2_xattr_get_acl_default,
- .set = ocfs2_xattr_set_acl_default,
+ .get = ocfs2_xattr_get_acl,
+ .set = ocfs2_xattr_set_acl,
};
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 7c7198a5bc9..fb4e672579b 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7190,8 +7190,8 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
* wait on them - the truncate_inode_pages() call later will
* do that for us.
*/
- ret = do_sync_mapping_range(inode->i_mapping, range_start,
- range_end - 1, SYNC_FILE_RANGE_WRITE);
+ ret = filemap_fdatawrite_range(inode->i_mapping, range_start,
+ range_end - 1);
if (ret)
mlog_errno(ret);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index deb2b132ae5..3dae4a13f6e 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -547,6 +547,9 @@ bail:
*
* called like this: dio->get_blocks(dio->inode, fs_startblk,
* fs_count, map_bh, dio->rw == WRITE);
+ *
+ * Note that we never bother to allocate blocks here, and thus ignore the
+ * create argument.
*/
static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
@@ -563,14 +566,6 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
- /*
- * Any write past EOF is not allowed because we'd be extending.
- */
- if (create && (iblock + max_blocks) > inode_blocks) {
- ret = -EIO;
- goto bail;
- }
-
/* This figures out the size of the next contiguous block, and
* our logical offset */
ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
@@ -582,15 +577,6 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
goto bail;
}
- if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno && create) {
- ocfs2_error(inode->i_sb,
- "Inode %llu has a hole at block %llu\n",
- (unsigned long long)OCFS2_I(inode)->ip_blkno,
- (unsigned long long)iblock);
- ret = -EROFS;
- goto bail;
- }
-
/* We should already CoW the refcounted extent. */
BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
/*
@@ -601,20 +587,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
*/
if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
map_bh(bh_result, inode->i_sb, p_blkno);
- else {
- /*
- * ocfs2_prepare_inode_for_write() should have caught
- * the case where we'd be filling a hole and triggered
- * a buffered write instead.
- */
- if (create) {
- ret = -EIO;
- mlog_errno(ret);
- goto bail;
- }
-
+ else
clear_buffer_mapped(bh_result);
- }
/* make sure we don't map more than max_blocks blocks here as
that's all the kernel will handle at this point. */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index de059f49058..3d30a1c974a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2006,7 +2006,7 @@ out_dio:
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
- if ((file->f_flags & O_SYNC && !direct_io) || IS_SYNC(inode)) {
+ if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode)) {
ret = filemap_fdatawrite_range(file->f_mapping, pos,
pos + count - 1);
if (ret < 0)
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h
index e5df9d170b0..123bc520a2c 100644
--- a/fs/ocfs2/quota.h
+++ b/fs/ocfs2/quota.h
@@ -17,10 +17,6 @@
#include "ocfs2.h"
-/* Common stuff */
-/* id number of quota format */
-#define QFMT_OCFS2 3
-
/*
* In-memory structures
*/
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 1a2c50a759f..21f9e71223c 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -1325,7 +1325,7 @@ out:
return status;
}
-static struct quota_format_ops ocfs2_format_ops = {
+static const struct quota_format_ops ocfs2_format_ops = {
.check_quota_file = ocfs2_local_check_quota_file,
.read_file_info = ocfs2_local_read_info,
.write_file_info = ocfs2_global_write_info,
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index fe3419068df..43c114831c0 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -205,8 +205,6 @@ static int ocfs2_get_xattr_tree_value_root(struct super_block *sb,
int offset,
struct ocfs2_xattr_value_root **xv,
struct buffer_head **bh);
-static int ocfs2_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags);
static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb)
{
@@ -6978,9 +6976,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
ret = ocfs2_init_security_get(inode, dir, &si);
if (!ret) {
- ret = ocfs2_xattr_security_set(inode, si.name,
- si.value, si.value_len,
- XATTR_CREATE);
+ ret = ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
+ si.name, si.value, si.value_len,
+ XATTR_CREATE);
if (ret) {
mlog_errno(ret);
goto leave;
@@ -7008,9 +7006,9 @@ leave:
/*
* 'security' attributes support
*/
-static size_t ocfs2_xattr_security_list(struct inode *inode, char *list,
+static size_t ocfs2_xattr_security_list(struct dentry *dentry, char *list,
size_t list_size, const char *name,
- size_t name_len)
+ size_t name_len, int type)
{
const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
@@ -7023,23 +7021,23 @@ static size_t ocfs2_xattr_security_list(struct inode *inode, char *list,
return total_len;
}
-static int ocfs2_xattr_security_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int ocfs2_xattr_security_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_SECURITY, name,
- buffer, size);
+ return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
+ name, buffer, size);
}
-static int ocfs2_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int ocfs2_xattr_security_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY, name, value,
- size, flags);
+ return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_SECURITY,
+ name, value, size, flags);
}
int ocfs2_init_security_get(struct inode *inode,
@@ -7076,9 +7074,9 @@ struct xattr_handler ocfs2_xattr_security_handler = {
/*
* 'trusted' attributes support
*/
-static size_t ocfs2_xattr_trusted_list(struct inode *inode, char *list,
+static size_t ocfs2_xattr_trusted_list(struct dentry *dentry, char *list,
size_t list_size, const char *name,
- size_t name_len)
+ size_t name_len, int type)
{
const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
@@ -7091,23 +7089,23 @@ static size_t ocfs2_xattr_trusted_list(struct inode *inode, char *list,
return total_len;
}
-static int ocfs2_xattr_trusted_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int ocfs2_xattr_trusted_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_TRUSTED, name,
- buffer, size);
+ return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
+ name, buffer, size);
}
-static int ocfs2_xattr_trusted_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int ocfs2_xattr_trusted_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_TRUSTED, name, value,
- size, flags);
+ return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_TRUSTED,
+ name, value, size, flags);
}
struct xattr_handler ocfs2_xattr_trusted_handler = {
@@ -7120,13 +7118,13 @@ struct xattr_handler ocfs2_xattr_trusted_handler = {
/*
* 'user' attributes support
*/
-static size_t ocfs2_xattr_user_list(struct inode *inode, char *list,
+static size_t ocfs2_xattr_user_list(struct dentry *dentry, char *list,
size_t list_size, const char *name,
- size_t name_len)
+ size_t name_len, int type)
{
const size_t prefix_len = XATTR_USER_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return 0;
@@ -7139,31 +7137,31 @@ static size_t ocfs2_xattr_user_list(struct inode *inode, char *list,
return total_len;
}
-static int ocfs2_xattr_user_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int ocfs2_xattr_user_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
if (strcmp(name, "") == 0)
return -EINVAL;
if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return -EOPNOTSUPP;
- return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_USER, name,
+ return ocfs2_xattr_get(dentry->d_inode, OCFS2_XATTR_INDEX_USER, name,
buffer, size);
}
-static int ocfs2_xattr_user_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int ocfs2_xattr_user_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
if (strcmp(name, "") == 0)
return -EINVAL;
if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return -EOPNOTSUPP;
- return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_USER, name, value,
- size, flags);
+ return ocfs2_xattr_set(dentry->d_inode, OCFS2_XATTR_INDEX_USER,
+ name, value, size, flags);
}
struct xattr_handler ocfs2_xattr_user_handler = {
diff --git a/fs/open.c b/fs/open.c
index b4b31d277f3..040cef72bc0 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -30,6 +30,9 @@
#include <linux/audit.h>
#include <linux/falloc.h>
#include <linux/fs_struct.h>
+#include <linux/ima.h>
+
+#include "internal.h"
int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -818,15 +821,14 @@ static inline int __get_file_write_access(struct inode *inode,
}
static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
- int flags, struct file *f,
+ struct file *f,
int (*open)(struct inode *, struct file *),
const struct cred *cred)
{
struct inode *inode;
int error;
- f->f_flags = flags;
- f->f_mode = (__force fmode_t)((flags+1) & O_ACCMODE) | FMODE_LSEEK |
+ f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
FMODE_PREAD | FMODE_PWRITE;
inode = dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
@@ -855,6 +857,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
if (error)
goto cleanup_all;
}
+ ima_counts_get(f);
f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
@@ -926,7 +929,6 @@ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry
if (IS_ERR(dentry))
goto out_err;
nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
- nd->intent.open.flags - 1,
nd->intent.open.file,
open, cred);
out:
@@ -945,7 +947,7 @@ EXPORT_SYMBOL_GPL(lookup_instantiate_filp);
*
* Note that this function destroys the original nameidata
*/
-struct file *nameidata_to_filp(struct nameidata *nd, int flags)
+struct file *nameidata_to_filp(struct nameidata *nd)
{
const struct cred *cred = current_cred();
struct file *filp;
@@ -954,7 +956,7 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags)
filp = nd->intent.open.file;
/* Has the filesystem initialised the file for us? */
if (filp->f_path.dentry == NULL)
- filp = __dentry_open(nd->path.dentry, nd->path.mnt, flags, filp,
+ filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp,
NULL, cred);
else
path_put(&nd->path);
@@ -993,7 +995,8 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
return ERR_PTR(error);
}
- return __dentry_open(dentry, mnt, flags, f, NULL, cred);
+ f->f_flags = flags;
+ return __dentry_open(dentry, mnt, f, NULL, cred);
}
EXPORT_SYMBOL(dentry_open);
diff --git a/fs/pipe.c b/fs/pipe.c
index ae17d026aaa..37ba29ff315 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -906,17 +906,6 @@ void free_pipe_info(struct inode *inode)
}
static struct vfsmount *pipe_mnt __read_mostly;
-static int pipefs_delete_dentry(struct dentry *dentry)
-{
- /*
- * At creation time, we pretended this dentry was hashed
- * (by clearing DCACHE_UNHASHED bit in d_flags)
- * At delete time, we restore the truth : not hashed.
- * (so that dput() can proceed correctly)
- */
- dentry->d_flags |= DCACHE_UNHASHED;
- return 0;
-}
/*
* pipefs_dname() is called from d_path().
@@ -928,7 +917,6 @@ static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
}
static const struct dentry_operations pipefs_dentry_operations = {
- .d_delete = pipefs_delete_dentry,
.d_dname = pipefs_dname,
};
@@ -974,7 +962,7 @@ struct file *create_write_pipe(int flags)
int err;
struct inode *inode;
struct file *f;
- struct dentry *dentry;
+ struct path path;
struct qstr name = { .name = "" };
err = -ENFILE;
@@ -983,21 +971,16 @@ struct file *create_write_pipe(int flags)
goto err;
err = -ENOMEM;
- dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &name);
- if (!dentry)
+ path.dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &name);
+ if (!path.dentry)
goto err_inode;
+ path.mnt = mntget(pipe_mnt);
- dentry->d_op = &pipefs_dentry_operations;
- /*
- * We dont want to publish this dentry into global dentry hash table.
- * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
- * This permits a working /proc/$pid/fd/XXX on pipes
- */
- dentry->d_flags &= ~DCACHE_UNHASHED;
- d_instantiate(dentry, inode);
+ path.dentry->d_op = &pipefs_dentry_operations;
+ d_instantiate(path.dentry, inode);
err = -ENFILE;
- f = alloc_file(pipe_mnt, dentry, FMODE_WRITE, &write_pipefifo_fops);
+ f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
if (!f)
goto err_dentry;
f->f_mapping = inode->i_mapping;
@@ -1009,7 +992,7 @@ struct file *create_write_pipe(int flags)
err_dentry:
free_pipe_info(inode);
- dput(dentry);
+ path_put(&path);
return ERR_PTR(err);
err_inode:
@@ -1028,20 +1011,14 @@ void free_write_pipe(struct file *f)
struct file *create_read_pipe(struct file *wrf, int flags)
{
- struct file *f = get_empty_filp();
+ /* Grab pipe from the writer */
+ struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
+ &read_pipefifo_fops);
if (!f)
return ERR_PTR(-ENFILE);
- /* Grab pipe from the writer */
- f->f_path = wrf->f_path;
path_get(&wrf->f_path);
- f->f_mapping = wrf->f_path.dentry->d_inode->i_mapping;
-
- f->f_pos = 0;
f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
- f->f_op = &read_pipefifo_fops;
- f->f_mode = FMODE_READ;
- f->f_version = 0;
return f;
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 4badde179b1..f560325c444 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -134,13 +134,16 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
* simple bit tests.
*/
static const char *task_state_array[] = {
- "R (running)", /* 0 */
- "S (sleeping)", /* 1 */
- "D (disk sleep)", /* 2 */
- "T (stopped)", /* 4 */
- "T (tracing stop)", /* 8 */
- "Z (zombie)", /* 16 */
- "X (dead)" /* 32 */
+ "R (running)", /* 0 */
+ "S (sleeping)", /* 1 */
+ "D (disk sleep)", /* 2 */
+ "T (stopped)", /* 4 */
+ "t (tracing stop)", /* 8 */
+ "Z (zombie)", /* 16 */
+ "X (dead)", /* 32 */
+ "x (dead)", /* 64 */
+ "K (wakekill)", /* 128 */
+ "W (waking)", /* 256 */
};
static inline const char *get_task_state(struct task_struct *tsk)
@@ -148,6 +151,8 @@ static inline const char *get_task_state(struct task_struct *tsk)
unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
const char **p = &task_state_array[0];
+ BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
+
while (state) {
p++;
state >>= 1;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index af643b5aefe..18d5cc62d8e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1265,6 +1265,72 @@ static const struct file_operations proc_pid_sched_operations = {
#endif
+static ssize_t comm_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct task_struct *p;
+ char buffer[TASK_COMM_LEN];
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+
+ p = get_proc_task(inode);
+ if (!p)
+ return -ESRCH;
+
+ if (same_thread_group(current, p))
+ set_task_comm(p, buffer);
+ else
+ count = -EINVAL;
+
+ put_task_struct(p);
+
+ return count;
+}
+
+static int comm_show(struct seq_file *m, void *v)
+{
+ struct inode *inode = m->private;
+ struct task_struct *p;
+
+ p = get_proc_task(inode);
+ if (!p)
+ return -ESRCH;
+
+ task_lock(p);
+ seq_printf(m, "%s\n", p->comm);
+ task_unlock(p);
+
+ put_task_struct(p);
+
+ return 0;
+}
+
+static int comm_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+
+ ret = single_open(filp, comm_show, NULL);
+ if (!ret) {
+ struct seq_file *m = filp->private_data;
+
+ m->private = inode;
+ }
+ return ret;
+}
+
+static const struct file_operations proc_pid_set_comm_operations = {
+ .open = comm_open,
+ .read = seq_read,
+ .write = comm_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* We added or removed a vma mapping the executable. The vmas are only mapped
* during exec and are not mapped with the mmap system call.
@@ -2200,7 +2266,7 @@ static const struct inode_operations proc_attr_dir_inode_operations = {
#endif
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -2504,6 +2570,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
+ REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
INF("syscall", S_IRUSR, proc_pid_syscall),
#endif
@@ -2556,7 +2623,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
#endif
-#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
+#ifdef CONFIG_ELF_CORE
REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
#endif
#ifdef CONFIG_TASK_IO_ACCOUNTING
@@ -2838,6 +2905,7 @@ static const struct pid_entry tid_base_stuff[] = {
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
+ REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
INF("syscall", S_IRUSR, proc_pid_syscall),
#endif
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index fa678abc9db..480cb1065ee 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -429,7 +429,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
unsigned int ino;
ino = de->low_ino;
- de_get(de);
+ pde_get(de);
spin_unlock(&proc_subdir_lock);
error = -EINVAL;
inode = proc_get_inode(dir->i_sb, ino, de);
@@ -445,7 +445,7 @@ out_unlock:
return NULL;
}
if (de)
- de_put(de);
+ pde_put(de);
return ERR_PTR(error);
}
@@ -509,17 +509,17 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
struct proc_dir_entry *next;
/* filldir passes info to user space */
- de_get(de);
+ pde_get(de);
spin_unlock(&proc_subdir_lock);
if (filldir(dirent, de->name, de->namelen, filp->f_pos,
de->low_ino, de->mode >> 12) < 0) {
- de_put(de);
+ pde_put(de);
goto out;
}
spin_lock(&proc_subdir_lock);
filp->f_pos++;
next = de->next;
- de_put(de);
+ pde_put(de);
de = next;
} while (de);
spin_unlock(&proc_subdir_lock);
@@ -763,7 +763,7 @@ out:
return NULL;
}
-void free_proc_entry(struct proc_dir_entry *de)
+static void free_proc_entry(struct proc_dir_entry *de)
{
unsigned int ino = de->low_ino;
@@ -777,6 +777,12 @@ void free_proc_entry(struct proc_dir_entry *de)
kfree(de);
}
+void pde_put(struct proc_dir_entry *pde)
+{
+ if (atomic_dec_and_test(&pde->count))
+ free_proc_entry(pde);
+}
+
/*
* Remove a /proc entry and free it if it's not currently in use.
*/
@@ -845,6 +851,5 @@ continue_removing:
WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
"'%s/%s', leaking at least '%s'\n", __func__,
de->parent->name, de->name, de->subdir->name);
- if (atomic_dec_and_test(&de->count))
- free_proc_entry(de);
+ pde_put(de);
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d78ade30554..445a02bcaab 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -24,29 +24,6 @@
#include "internal.h"
-struct proc_dir_entry *de_get(struct proc_dir_entry *de)
-{
- atomic_inc(&de->count);
- return de;
-}
-
-/*
- * Decrements the use count and checks for deferred deletion.
- */
-void de_put(struct proc_dir_entry *de)
-{
- if (!atomic_read(&de->count)) {
- printk("de_put: entry %s already free!\n", de->name);
- return;
- }
-
- if (atomic_dec_and_test(&de->count))
- free_proc_entry(de);
-}
-
-/*
- * Decrement the use count of the proc_dir_entry.
- */
static void proc_delete_inode(struct inode *inode)
{
struct proc_dir_entry *de;
@@ -59,7 +36,7 @@ static void proc_delete_inode(struct inode *inode)
/* Let go of any associated proc directory entry */
de = PROC_I(inode)->pde;
if (de)
- de_put(de);
+ pde_put(de);
if (PROC_I(inode)->sysctl)
sysctl_head_put(PROC_I(inode)->sysctl);
clear_inode(inode);
@@ -480,7 +457,7 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
}
unlock_new_inode(inode);
} else
- de_put(de);
+ pde_put(de);
return inode;
}
@@ -495,7 +472,7 @@ int proc_fill_super(struct super_block *s)
s->s_op = &proc_sops;
s->s_time_gran = 1;
- de_get(&proc_root);
+ pde_get(&proc_root);
root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
if (!root_inode)
goto out_no_root;
@@ -509,6 +486,6 @@ int proc_fill_super(struct super_block *s)
out_no_root:
printk("proc_read_super: get root inode failed\n");
iput(root_inode);
- de_put(&proc_root);
+ pde_put(&proc_root);
return -ENOMEM;
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 753ca37002c..1f24a3eddd1 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -61,8 +61,6 @@ extern const struct file_operations proc_pagemap_operations;
extern const struct file_operations proc_net_operations;
extern const struct inode_operations proc_net_inode_operations;
-void free_proc_entry(struct proc_dir_entry *de);
-
void proc_init_inodecache(void);
static inline struct pid *proc_pid(struct inode *inode)
@@ -101,8 +99,12 @@ unsigned long task_vsize(struct mm_struct *);
int task_statm(struct mm_struct *, int *, int *, int *, int *);
void task_mem(struct seq_file *, struct mm_struct *);
-struct proc_dir_entry *de_get(struct proc_dir_entry *de);
-void de_put(struct proc_dir_entry *de);
+static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
+{
+ atomic_inc(&pde->count);
+ return pde;
+}
+void pde_put(struct proc_dir_entry *pde);
extern struct vfsmount *proc_mnt;
int proc_fill_super(struct super_block *);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 5033ce0d254..180cf5a0bd6 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -8,6 +8,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/hugetlb.h>
+#include <linux/kernel-page-flags.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -71,52 +72,12 @@ static const struct file_operations proc_kpagecount_operations = {
* physical page flags.
*/
-/* These macros are used to decouple internal flags from exported ones */
-
-#define KPF_LOCKED 0
-#define KPF_ERROR 1
-#define KPF_REFERENCED 2
-#define KPF_UPTODATE 3
-#define KPF_DIRTY 4
-#define KPF_LRU 5
-#define KPF_ACTIVE 6
-#define KPF_SLAB 7
-#define KPF_WRITEBACK 8
-#define KPF_RECLAIM 9
-#define KPF_BUDDY 10
-
-/* 11-20: new additions in 2.6.31 */
-#define KPF_MMAP 11
-#define KPF_ANON 12
-#define KPF_SWAPCACHE 13
-#define KPF_SWAPBACKED 14
-#define KPF_COMPOUND_HEAD 15
-#define KPF_COMPOUND_TAIL 16
-#define KPF_HUGE 17
-#define KPF_UNEVICTABLE 18
-#define KPF_HWPOISON 19
-#define KPF_NOPAGE 20
-
-#define KPF_KSM 21
-
-/* kernel hacking assistances
- * WARNING: subject to change, never rely on them!
- */
-#define KPF_RESERVED 32
-#define KPF_MLOCKED 33
-#define KPF_MAPPEDTODISK 34
-#define KPF_PRIVATE 35
-#define KPF_PRIVATE_2 36
-#define KPF_OWNER_PRIVATE 37
-#define KPF_ARCH 38
-#define KPF_UNCACHED 39
-
static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
{
return ((kflags >> kbit) & 1) << ubit;
}
-static u64 get_uflags(struct page *page)
+u64 stable_page_flags(struct page *page)
{
u64 k;
u64 u;
@@ -219,7 +180,7 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
else
ppage = NULL;
- if (put_user(get_uflags(ppage), out)) {
+ if (put_user(stable_page_flags(ppage), out)) {
ret = -EFAULT;
break;
}
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 7ba79a54948..123257bb356 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <asm/prom.h>
@@ -25,26 +26,27 @@ static struct proc_dir_entry *proc_device_tree;
/*
* Supply data on a read from /proc/device-tree/node/property.
*/
-static int property_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int property_proc_show(struct seq_file *m, void *v)
{
- struct property *pp = data;
- int n;
+ struct property *pp = m->private;
- if (off >= pp->length) {
- *eof = 1;
- return 0;
- }
- n = pp->length - off;
- if (n > count)
- n = count;
- else
- *eof = 1;
- memcpy(page, (char *)pp->value + off, n);
- *start = page;
- return n;
+ seq_write(m, pp->value, pp->length);
+ return 0;
+}
+
+static int property_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, property_proc_show, PDE(inode)->data);
}
+static const struct file_operations property_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = property_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* For a node with a name like "gc@10", we make symlinks called "gc"
* and "@10" to it.
@@ -63,10 +65,9 @@ __proc_device_tree_add_prop(struct proc_dir_entry *de, struct property *pp,
* Unfortunately proc_register puts each new entry
* at the beginning of the list. So we rearrange them.
*/
- ent = create_proc_read_entry(name,
- strncmp(name, "security-", 9)
- ? S_IRUGO : S_IRUSR, de,
- property_read_proc, pp);
+ ent = proc_create_data(name,
+ strncmp(name, "security-", 9) ? S_IRUGO : S_IRUSR,
+ de, &property_proc_fops, pp);
if (ent == NULL)
return NULL;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 2a1bef9203c..47c03f4336b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -650,6 +650,50 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return err;
}
+static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
+{
+ u64 pme = 0;
+ if (pte_present(pte))
+ pme = PM_PFRAME(pte_pfn(pte) + offset)
+ | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
+ return pme;
+}
+
+static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct vm_area_struct *vma;
+ struct pagemapread *pm = walk->private;
+ struct hstate *hs = NULL;
+ int err = 0;
+
+ vma = find_vma(walk->mm, addr);
+ if (vma)
+ hs = hstate_vma(vma);
+ for (; addr != end; addr += PAGE_SIZE) {
+ u64 pfn = PM_NOT_PRESENT;
+
+ if (vma && (addr >= vma->vm_end)) {
+ vma = find_vma(walk->mm, addr);
+ if (vma)
+ hs = hstate_vma(vma);
+ }
+
+ if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) {
+ /* calculate pfn of the "raw" page in the hugepage. */
+ int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT;
+ pfn = huge_pte_to_pagemap_entry(*pte, offset);
+ }
+ err = add_to_pagemap(addr, pfn, pm);
+ if (err)
+ return err;
+ }
+
+ cond_resched();
+
+ return err;
+}
+
/*
* /proc/pid/pagemap - an array mapping virtual pages to pfns
*
@@ -742,6 +786,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
+ pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
pagemap_walk.mm = mm;
pagemap_walk.private = &pm;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 8f5c05d3dbd..5d9fd64ef81 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -110,9 +110,13 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
}
}
- size += (*text = mm->end_code - mm->start_code);
- size += (*data = mm->start_stack - mm->start_data);
+ *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
+ >> PAGE_SHIFT;
+ *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
+ >> PAGE_SHIFT;
up_read(&mm->mmap_sem);
+ size >>= PAGE_SHIFT;
+ size += *text + *data;
*resident = size;
return size;
}
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index 32f5d131a64..22e0d60e53e 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -17,13 +17,6 @@
#include <linux/bitops.h>
#include "qnx4.h"
-#if 0
-int qnx4_new_block(struct super_block *sb)
-{
- return 0;
-}
-#endif /* 0 */
-
static void count_bits(register const char *bmPart, register int size,
int *const tf)
{
@@ -35,22 +28,7 @@ static void count_bits(register const char *bmPart, register int size,
}
do {
b = *bmPart++;
- if ((b & 1) == 0)
- tot++;
- if ((b & 2) == 0)
- tot++;
- if ((b & 4) == 0)
- tot++;
- if ((b & 8) == 0)
- tot++;
- if ((b & 16) == 0)
- tot++;
- if ((b & 32) == 0)
- tot++;
- if ((b & 64) == 0)
- tot++;
- if ((b & 128) == 0)
- tot++;
+ tot += 8 - hweight8(b);
size--;
} while (size != 0);
*tf = tot;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 449f5a66dd3..ebf3440d28c 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -64,25 +64,7 @@ static struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
result = sb_getblk(inode->i_sb, nr);
return result;
}
- if (!create) {
- return NULL;
- }
-#if 0
- tmp = qnx4_new_block(inode->i_sb);
- if (!tmp) {
- return NULL;
- }
- result = sb_getblk(inode->i_sb, tmp);
- if (tst) {
- qnx4_free_block(inode->i_sb, tmp);
- brelse(result);
- goto repeat;
- }
- tst = tmp;
-#endif
- inode->i_ctime = CURRENT_TIME_SEC;
- mark_inode_dirty(inode);
- return result;
+ return NULL;
}
struct buffer_head *qnx4_bread(struct inode *inode, int block, int create)
@@ -113,8 +95,6 @@ static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_h
if ( phys ) {
// logical block is before EOF
map_bh(bh, inode->i_sb, phys);
- } else if ( create ) {
- // to be done.
}
return 0;
}
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index 353e78a9ebe..efc02ebb8c7 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -46,12 +46,14 @@ config QFMT_V1
format say Y here.
config QFMT_V2
- tristate "Quota format v2 support"
+ tristate "Quota format vfsv0 and vfsv1 support"
depends on QUOTA
select QUOTA_TREE
help
- This quota format allows using quotas with 32-bit UIDs/GIDs. If you
- need this functionality say Y here.
+ This config option enables kernel support for vfsv0 and vfsv1 quota
+ formats. Both these formats support 32-bit UIDs/GIDs and vfsv1 format
+ also supports 64-bit inode and block quota limits. If you need this
+ functionality say Y here.
config QUOTACTL
bool
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index eb5a755718f..cd6bb9a33c1 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2164,7 +2164,9 @@ int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
struct dentry *dentry;
int error;
+ mutex_lock(&sb->s_root->d_inode->i_mutex);
dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
+ mutex_unlock(&sb->s_root->d_inode->i_mutex);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 0edcf42b177..2ae757e9c00 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -204,7 +204,7 @@ out:
return ret;
}
-static struct quota_format_ops v1_format_ops = {
+static const struct quota_format_ops v1_format_ops = {
.check_quota_file = v1_check_quota_file,
.read_file_info = v1_read_file_info,
.write_file_info = v1_write_file_info,
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index a5475fb1ae4..3dfc23e0213 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -23,14 +23,23 @@ MODULE_LICENSE("GPL");
#define __QUOTA_V2_PARANOIA
-static void v2_mem2diskdqb(void *dp, struct dquot *dquot);
-static void v2_disk2memdqb(struct dquot *dquot, void *dp);
-static int v2_is_id(void *dp, struct dquot *dquot);
-
-static struct qtree_fmt_operations v2_qtree_ops = {
- .mem2disk_dqblk = v2_mem2diskdqb,
- .disk2mem_dqblk = v2_disk2memdqb,
- .is_id = v2_is_id,
+static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot);
+static void v2r0_disk2memdqb(struct dquot *dquot, void *dp);
+static int v2r0_is_id(void *dp, struct dquot *dquot);
+static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot);
+static void v2r1_disk2memdqb(struct dquot *dquot, void *dp);
+static int v2r1_is_id(void *dp, struct dquot *dquot);
+
+static struct qtree_fmt_operations v2r0_qtree_ops = {
+ .mem2disk_dqblk = v2r0_mem2diskdqb,
+ .disk2mem_dqblk = v2r0_disk2memdqb,
+ .is_id = v2r0_is_id,
+};
+
+static struct qtree_fmt_operations v2r1_qtree_ops = {
+ .mem2disk_dqblk = v2r1_mem2diskdqb,
+ .disk2mem_dqblk = v2r1_disk2memdqb,
+ .is_id = v2r1_is_id,
};
#define QUOTABLOCK_BITS 10
@@ -46,23 +55,33 @@ static inline qsize_t v2_qbtos(qsize_t blocks)
return blocks << QUOTABLOCK_BITS;
}
+static int v2_read_header(struct super_block *sb, int type,
+ struct v2_disk_dqheader *dqhead)
+{
+ ssize_t size;
+
+ size = sb->s_op->quota_read(sb, type, (char *)dqhead,
+ sizeof(struct v2_disk_dqheader), 0);
+ if (size != sizeof(struct v2_disk_dqheader)) {
+ printk(KERN_WARNING "quota_v2: Failed header read:"
+ " expected=%zd got=%zd\n",
+ sizeof(struct v2_disk_dqheader), size);
+ return 0;
+ }
+ return 1;
+}
+
/* Check whether given file is really vfsv0 quotafile */
static int v2_check_quota_file(struct super_block *sb, int type)
{
struct v2_disk_dqheader dqhead;
- ssize_t size;
static const uint quota_magics[] = V2_INITQMAGICS;
static const uint quota_versions[] = V2_INITQVERSIONS;
- size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
- sizeof(struct v2_disk_dqheader), 0);
- if (size != sizeof(struct v2_disk_dqheader)) {
- printk("quota_v2: failed read expected=%zd got=%zd\n",
- sizeof(struct v2_disk_dqheader), size);
+ if (!v2_read_header(sb, type, &dqhead))
return 0;
- }
if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] ||
- le32_to_cpu(dqhead.dqh_version) != quota_versions[type])
+ le32_to_cpu(dqhead.dqh_version) > quota_versions[type])
return 0;
return 1;
}
@@ -71,14 +90,20 @@ static int v2_check_quota_file(struct super_block *sb, int type)
static int v2_read_file_info(struct super_block *sb, int type)
{
struct v2_disk_dqinfo dinfo;
+ struct v2_disk_dqheader dqhead;
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct qtree_mem_dqinfo *qinfo;
ssize_t size;
+ unsigned int version;
+
+ if (!v2_read_header(sb, type, &dqhead))
+ return 0;
+ version = le32_to_cpu(dqhead.dqh_version);
size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
if (size != sizeof(struct v2_disk_dqinfo)) {
- printk(KERN_WARNING "Can't read info structure on device %s.\n",
+ printk(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
sb->s_id);
return -1;
}
@@ -89,9 +114,15 @@ static int v2_read_file_info(struct super_block *sb, int type)
return -1;
}
qinfo = info->dqi_priv;
- /* limits are stored as unsigned 32-bit data */
- info->dqi_maxblimit = 0xffffffff;
- info->dqi_maxilimit = 0xffffffff;
+ if (version == 0) {
+ /* limits are stored as unsigned 32-bit data */
+ info->dqi_maxblimit = 0xffffffff;
+ info->dqi_maxilimit = 0xffffffff;
+ } else {
+ /* used space is stored as unsigned 64-bit value */
+ info->dqi_maxblimit = 0xffffffffffffffff; /* 2^64-1 */
+ info->dqi_maxilimit = 0xffffffffffffffff;
+ }
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
@@ -103,8 +134,13 @@ static int v2_read_file_info(struct super_block *sb, int type)
qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS;
qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS;
qinfo->dqi_qtree_depth = qtree_depth(qinfo);
- qinfo->dqi_entry_size = sizeof(struct v2_disk_dqblk);
- qinfo->dqi_ops = &v2_qtree_ops;
+ if (version == 0) {
+ qinfo->dqi_entry_size = sizeof(struct v2r0_disk_dqblk);
+ qinfo->dqi_ops = &v2r0_qtree_ops;
+ } else {
+ qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk);
+ qinfo->dqi_ops = &v2r1_qtree_ops;
+ }
return 0;
}
@@ -135,9 +171,9 @@ static int v2_write_file_info(struct super_block *sb, int type)
return 0;
}
-static void v2_disk2memdqb(struct dquot *dquot, void *dp)
+static void v2r0_disk2memdqb(struct dquot *dquot, void *dp)
{
- struct v2_disk_dqblk *d = dp, empty;
+ struct v2r0_disk_dqblk *d = dp, empty;
struct mem_dqblk *m = &dquot->dq_dqb;
m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit);
@@ -149,15 +185,15 @@ static void v2_disk2memdqb(struct dquot *dquot, void *dp)
m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
m->dqb_btime = le64_to_cpu(d->dqb_btime);
/* We need to escape back all-zero structure */
- memset(&empty, 0, sizeof(struct v2_disk_dqblk));
+ memset(&empty, 0, sizeof(struct v2r0_disk_dqblk));
empty.dqb_itime = cpu_to_le64(1);
- if (!memcmp(&empty, dp, sizeof(struct v2_disk_dqblk)))
+ if (!memcmp(&empty, dp, sizeof(struct v2r0_disk_dqblk)))
m->dqb_itime = 0;
}
-static void v2_mem2diskdqb(void *dp, struct dquot *dquot)
+static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot)
{
- struct v2_disk_dqblk *d = dp;
+ struct v2r0_disk_dqblk *d = dp;
struct mem_dqblk *m = &dquot->dq_dqb;
struct qtree_mem_dqinfo *info =
sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
@@ -175,9 +211,60 @@ static void v2_mem2diskdqb(void *dp, struct dquot *dquot)
d->dqb_itime = cpu_to_le64(1);
}
-static int v2_is_id(void *dp, struct dquot *dquot)
+static int v2r0_is_id(void *dp, struct dquot *dquot)
+{
+ struct v2r0_disk_dqblk *d = dp;
+ struct qtree_mem_dqinfo *info =
+ sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+
+ if (qtree_entry_unused(info, dp))
+ return 0;
+ return le32_to_cpu(d->dqb_id) == dquot->dq_id;
+}
+
+static void v2r1_disk2memdqb(struct dquot *dquot, void *dp)
+{
+ struct v2r1_disk_dqblk *d = dp, empty;
+ struct mem_dqblk *m = &dquot->dq_dqb;
+
+ m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
+ m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
+ m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
+ m->dqb_itime = le64_to_cpu(d->dqb_itime);
+ m->dqb_bhardlimit = v2_qbtos(le64_to_cpu(d->dqb_bhardlimit));
+ m->dqb_bsoftlimit = v2_qbtos(le64_to_cpu(d->dqb_bsoftlimit));
+ m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
+ m->dqb_btime = le64_to_cpu(d->dqb_btime);
+ /* We need to escape back all-zero structure */
+ memset(&empty, 0, sizeof(struct v2r1_disk_dqblk));
+ empty.dqb_itime = cpu_to_le64(1);
+ if (!memcmp(&empty, dp, sizeof(struct v2r1_disk_dqblk)))
+ m->dqb_itime = 0;
+}
+
+static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot)
+{
+ struct v2r1_disk_dqblk *d = dp;
+ struct mem_dqblk *m = &dquot->dq_dqb;
+ struct qtree_mem_dqinfo *info =
+ sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
+
+ d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
+ d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
+ d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
+ d->dqb_itime = cpu_to_le64(m->dqb_itime);
+ d->dqb_bhardlimit = cpu_to_le64(v2_stoqb(m->dqb_bhardlimit));
+ d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit));
+ d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
+ d->dqb_btime = cpu_to_le64(m->dqb_btime);
+ d->dqb_id = cpu_to_le32(dquot->dq_id);
+ if (qtree_entry_unused(info, dp))
+ d->dqb_itime = cpu_to_le64(1);
+}
+
+static int v2r1_is_id(void *dp, struct dquot *dquot)
{
- struct v2_disk_dqblk *d = dp;
+ struct v2r1_disk_dqblk *d = dp;
struct qtree_mem_dqinfo *info =
sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
@@ -207,7 +294,7 @@ static int v2_free_file_info(struct super_block *sb, int type)
return 0;
}
-static struct quota_format_ops v2_format_ops = {
+static const struct quota_format_ops v2_format_ops = {
.check_quota_file = v2_check_quota_file,
.read_file_info = v2_read_file_info,
.write_file_info = v2_write_file_info,
@@ -217,20 +304,32 @@ static struct quota_format_ops v2_format_ops = {
.release_dqblk = v2_release_dquot,
};
-static struct quota_format_type v2_quota_format = {
+static struct quota_format_type v2r0_quota_format = {
.qf_fmt_id = QFMT_VFS_V0,
.qf_ops = &v2_format_ops,
.qf_owner = THIS_MODULE
};
+static struct quota_format_type v2r1_quota_format = {
+ .qf_fmt_id = QFMT_VFS_V1,
+ .qf_ops = &v2_format_ops,
+ .qf_owner = THIS_MODULE
+};
+
static int __init init_v2_quota_format(void)
{
- return register_quota_format(&v2_quota_format);
+ int ret;
+
+ ret = register_quota_format(&v2r0_quota_format);
+ if (ret)
+ return ret;
+ return register_quota_format(&v2r1_quota_format);
}
static void __exit exit_v2_quota_format(void)
{
- unregister_quota_format(&v2_quota_format);
+ unregister_quota_format(&v2r0_quota_format);
+ unregister_quota_format(&v2r1_quota_format);
}
module_init(init_v2_quota_format);
diff --git a/fs/quota/quotaio_v2.h b/fs/quota/quotaio_v2.h
index 530fe580685..f1966b42c2f 100644
--- a/fs/quota/quotaio_v2.h
+++ b/fs/quota/quotaio_v2.h
@@ -17,8 +17,8 @@
}
#define V2_INITQVERSIONS {\
- 0, /* USRQUOTA */\
- 0 /* GRPQUOTA */\
+ 1, /* USRQUOTA */\
+ 1 /* GRPQUOTA */\
}
/* First generic header */
@@ -32,7 +32,7 @@ struct v2_disk_dqheader {
* (as it appears on disk) - the file is a radix tree whose leaves point
* to blocks of these structures.
*/
-struct v2_disk_dqblk {
+struct v2r0_disk_dqblk {
__le32 dqb_id; /* id this quota applies to */
__le32 dqb_ihardlimit; /* absolute limit on allocated inodes */
__le32 dqb_isoftlimit; /* preferred inode limit */
@@ -44,6 +44,19 @@ struct v2_disk_dqblk {
__le64 dqb_itime; /* time limit for excessive inode use */
};
+struct v2r1_disk_dqblk {
+ __le32 dqb_id; /* id this quota applies to */
+ __le32 dqb_pad;
+ __le64 dqb_ihardlimit; /* absolute limit on allocated inodes */
+ __le64 dqb_isoftlimit; /* preferred inode limit */
+ __le64 dqb_curinodes; /* current # allocated inodes */
+ __le64 dqb_bhardlimit; /* absolute limit on disk space (in QUOTABLOCK_SIZE) */
+ __le64 dqb_bsoftlimit; /* preferred limit on disk space (in QUOTABLOCK_SIZE) */
+ __le64 dqb_curspace; /* current space occupied (in bytes) */
+ __le64 dqb_btime; /* time limit for excessive disk use */
+ __le64 dqb_itime; /* time limit for excessive inode use */
+};
+
/* Header with type and version specific information */
struct v2_disk_dqinfo {
__le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 32fae4040eb..2efc57173fd 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -60,7 +60,7 @@ const struct inode_operations ramfs_file_inode_operations = {
*/
int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
{
- unsigned long npages, xpages, loop, limit;
+ unsigned long npages, xpages, loop;
struct page *pages;
unsigned order;
void *data;
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index 6a9e30c041d..792b3cb2cd1 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -7,7 +7,11 @@ obj-$(CONFIG_REISERFS_FS) += reiserfs.o
reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
hashes.o tail_conversion.o journal.o resize.o \
- item_ops.o ioctl.o procfs.o xattr.o lock.o
+ item_ops.o ioctl.o xattr.o lock.o
+
+ifeq ($(CONFIG_REISERFS_PROC_INFO),y)
+reiserfs-objs += procfs.o
+endif
ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
reiserfs-objs += xattr_user.o xattr_trusted.o
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 3a28e7751b3..290ae38fca8 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2538,6 +2538,12 @@ static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
return reiserfs_write_full_page(page, wbc);
}
+static void reiserfs_truncate_failed_write(struct inode *inode)
+{
+ truncate_inode_pages(inode->i_mapping, inode->i_size);
+ reiserfs_truncate_file(inode, 0);
+}
+
static int reiserfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -2604,6 +2610,8 @@ static int reiserfs_write_begin(struct file *file,
if (ret) {
unlock_page(page);
page_cache_release(page);
+ /* Truncate allocated blocks */
+ reiserfs_truncate_failed_write(inode);
}
return ret;
}
@@ -2701,9 +2709,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
** transaction tracking stuff when the size changes. So, we have
** to do the i_size updates here.
*/
- pos += copied;
-
- if (pos > inode->i_size) {
+ if (pos + copied > inode->i_size) {
struct reiserfs_transaction_handle myth;
lock_depth = reiserfs_write_lock_once(inode->i_sb);
locked = true;
@@ -2721,7 +2727,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
goto journal_error;
reiserfs_update_inode_transaction(inode);
- inode->i_size = pos;
+ inode->i_size = pos + copied;
/*
* this will just nest into our transaction. It's important
* to use mark_inode_dirty so the inode gets pushed around on the
@@ -2751,6 +2757,10 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
reiserfs_write_unlock_once(inode->i_sb, lock_depth);
unlock_page(page);
page_cache_release(page);
+
+ if (pos + len > inode->i_size)
+ reiserfs_truncate_failed_write(inode);
+
return ret == 0 ? copied : ret;
journal_error:
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 9229e5514a4..7a9981196c1 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -17,8 +17,6 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
-#ifdef CONFIG_REISERFS_PROC_INFO
-
/*
* LOCKING:
*
@@ -48,14 +46,6 @@ static int show_version(struct seq_file *m, struct super_block *sb)
return 0;
}
-int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
- int count, int *eof, void *data)
-{
- *start = buffer;
- *eof = 1;
- return 0;
-}
-
#define SF( x ) ( r -> x )
#define SFP( x ) SF( s_proc_info_data.x )
#define SFPL( x ) SFP( x[ level ] )
@@ -538,19 +528,6 @@ int reiserfs_proc_info_done(struct super_block *sb)
return 0;
}
-struct proc_dir_entry *reiserfs_proc_register_global(char *name,
- read_proc_t * func)
-{
- return (proc_info_root) ? create_proc_read_entry(name, 0,
- proc_info_root,
- func, NULL) : NULL;
-}
-
-void reiserfs_proc_unregister_global(const char *name)
-{
- remove_proc_entry(name, proc_info_root);
-}
-
int reiserfs_proc_info_global_init(void)
{
if (proc_info_root == NULL) {
@@ -572,48 +549,6 @@ int reiserfs_proc_info_global_done(void)
}
return 0;
}
-
-/* REISERFS_PROC_INFO */
-#else
-
-int reiserfs_proc_info_init(struct super_block *sb)
-{
- return 0;
-}
-int reiserfs_proc_info_done(struct super_block *sb)
-{
- return 0;
-}
-
-struct proc_dir_entry *reiserfs_proc_register_global(char *name,
- read_proc_t * func)
-{
- return NULL;
-}
-
-void reiserfs_proc_unregister_global(const char *name)
-{;
-}
-
-int reiserfs_proc_info_global_init(void)
-{
- return 0;
-}
-int reiserfs_proc_info_global_done(void)
-{
- return 0;
-}
-
-int reiserfs_global_version_in_proc(char *buffer, char **start,
- off_t offset,
- int count, int *eof, void *data)
-{
- return 0;
-}
-
-/* REISERFS_PROC_INFO */
-#endif
-
/*
* Revision 1.1.8.2 2001/07/15 17:08:42 god
* . use get_super() in procfs.c
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 339b0baf2af..b4a7dd03bdb 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2222,8 +2222,6 @@ static int __init init_reiserfs_fs(void)
}
reiserfs_proc_info_global_init();
- reiserfs_proc_register_global("version",
- reiserfs_global_version_in_proc);
ret = register_filesystem(&reiserfs_fs_type);
@@ -2231,7 +2229,6 @@ static int __init init_reiserfs_fs(void)
return 0;
}
- reiserfs_proc_unregister_global("version");
reiserfs_proc_info_global_done();
destroy_inodecache();
@@ -2240,7 +2237,6 @@ static int __init init_reiserfs_fs(void)
static void __exit exit_reiserfs_fs(void)
{
- reiserfs_proc_unregister_global("version");
reiserfs_proc_info_global_done();
unregister_filesystem(&reiserfs_fs_type);
destroy_inodecache();
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 58aa8e75f7f..8c7033a8b67 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -48,6 +48,7 @@
#include <net/checksum.h>
#include <linux/stat.h>
#include <linux/quotaops.h>
+#include <linux/security.h>
#define PRIVROOT_NAME ".reiserfs_priv"
#define XAROOT_NAME "xattrs"
@@ -726,15 +727,14 @@ ssize_t
reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
size_t size)
{
- struct inode *inode = dentry->d_inode;
struct xattr_handler *handler;
- handler = find_xattr_handler_prefix(inode->i_sb->s_xattr, name);
+ handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
- if (!handler || get_inode_sd_version(inode) == STAT_DATA_V1)
+ if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- return handler->get(inode, name, buffer, size);
+ return handler->get(dentry, name, buffer, size, handler->flags);
}
/*
@@ -746,15 +746,14 @@ int
reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
{
- struct inode *inode = dentry->d_inode;
struct xattr_handler *handler;
- handler = find_xattr_handler_prefix(inode->i_sb->s_xattr, name);
+ handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
- if (!handler || get_inode_sd_version(inode) == STAT_DATA_V1)
+ if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- return handler->set(inode, name, value, size, flags);
+ return handler->set(dentry, name, value, size, flags, handler->flags);
}
/*
@@ -764,21 +763,20 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
*/
int reiserfs_removexattr(struct dentry *dentry, const char *name)
{
- struct inode *inode = dentry->d_inode;
struct xattr_handler *handler;
- handler = find_xattr_handler_prefix(inode->i_sb->s_xattr, name);
+ handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name);
- if (!handler || get_inode_sd_version(inode) == STAT_DATA_V1)
+ if (!handler || get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
+ return handler->set(dentry, name, NULL, 0, XATTR_REPLACE, handler->flags);
}
struct listxattr_buf {
size_t size;
size_t pos;
char *buf;
- struct inode *inode;
+ struct dentry *dentry;
};
static int listxattr_filler(void *buf, const char *name, int namelen,
@@ -789,17 +787,19 @@ static int listxattr_filler(void *buf, const char *name, int namelen,
if (name[0] != '.' ||
(namelen != 1 && (name[1] != '.' || namelen != 2))) {
struct xattr_handler *handler;
- handler = find_xattr_handler_prefix(b->inode->i_sb->s_xattr,
+ handler = find_xattr_handler_prefix(b->dentry->d_sb->s_xattr,
name);
if (!handler) /* Unsupported xattr name */
return 0;
if (b->buf) {
- size = handler->list(b->inode, b->buf + b->pos,
- b->size, name, namelen);
+ size = handler->list(b->dentry, b->buf + b->pos,
+ b->size, name, namelen,
+ handler->flags);
if (size > b->size)
return -ERANGE;
} else {
- size = handler->list(b->inode, NULL, 0, name, namelen);
+ size = handler->list(b->dentry, NULL, 0, name,
+ namelen, handler->flags);
}
b->pos += size;
@@ -820,7 +820,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
int err = 0;
loff_t pos = 0;
struct listxattr_buf buf = {
- .inode = dentry->d_inode,
+ .dentry = dentry,
.buf = buffer,
.size = buffer ? size : 0,
};
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 35d6e672a27..cc32e6ada67 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -15,8 +15,10 @@ static int reiserfs_set_acl(struct reiserfs_transaction_handle *th,
struct posix_acl *acl);
static int
-xattr_set_acl(struct inode *inode, int type, const void *value, size_t size)
+posix_acl_set(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags, int type)
{
+ struct inode *inode = dentry->d_inode;
struct posix_acl *acl;
int error, error2;
struct reiserfs_transaction_handle th;
@@ -60,15 +62,16 @@ xattr_set_acl(struct inode *inode, int type, const void *value, size_t size)
}
static int
-xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+posix_acl_get(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int type)
{
struct posix_acl *acl;
int error;
- if (!reiserfs_posixacl(inode->i_sb))
+ if (!reiserfs_posixacl(dentry->d_sb))
return -EOPNOTSUPP;
- acl = reiserfs_get_acl(inode, type);
+ acl = reiserfs_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
@@ -482,30 +485,12 @@ int reiserfs_acl_chmod(struct inode *inode)
return error;
}
-static int
-posix_acl_access_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- if (strlen(name) != sizeof(POSIX_ACL_XATTR_ACCESS) - 1)
- return -EINVAL;
- return xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
-}
-
-static int
-posix_acl_access_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- if (strlen(name) != sizeof(POSIX_ACL_XATTR_ACCESS) - 1)
- return -EINVAL;
- return xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
-}
-
-static size_t posix_acl_access_list(struct inode *inode, char *list,
+static size_t posix_acl_access_list(struct dentry *dentry, char *list,
size_t list_size, const char *name,
- size_t name_len)
+ size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
- if (!reiserfs_posixacl(inode->i_sb))
+ if (!reiserfs_posixacl(dentry->d_sb))
return 0;
if (list && size <= list_size)
memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
@@ -514,35 +499,18 @@ static size_t posix_acl_access_list(struct inode *inode, char *list,
struct xattr_handler reiserfs_posix_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
- .get = posix_acl_access_get,
- .set = posix_acl_access_set,
+ .flags = ACL_TYPE_ACCESS,
+ .get = posix_acl_get,
+ .set = posix_acl_set,
.list = posix_acl_access_list,
};
-static int
-posix_acl_default_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
-{
- if (strlen(name) != sizeof(POSIX_ACL_XATTR_DEFAULT) - 1)
- return -EINVAL;
- return xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
-}
-
-static int
-posix_acl_default_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- if (strlen(name) != sizeof(POSIX_ACL_XATTR_DEFAULT) - 1)
- return -EINVAL;
- return xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
-}
-
-static size_t posix_acl_default_list(struct inode *inode, char *list,
+static size_t posix_acl_default_list(struct dentry *dentry, char *list,
size_t list_size, const char *name,
- size_t name_len)
+ size_t name_len, int type)
{
const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
- if (!reiserfs_posixacl(inode->i_sb))
+ if (!reiserfs_posixacl(dentry->d_sb))
return 0;
if (list && size <= list_size)
memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
@@ -551,7 +519,8 @@ static size_t posix_acl_default_list(struct inode *inode, char *list,
struct xattr_handler reiserfs_posix_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
- .get = posix_acl_default_get,
- .set = posix_acl_default_set,
+ .flags = ACL_TYPE_DEFAULT,
+ .get = posix_acl_get,
+ .set = posix_acl_set,
.list = posix_acl_default_list,
};
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index a92c8792c0f..d8b5bfcbdd3 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -8,36 +8,37 @@
#include <asm/uaccess.h>
static int
-security_get(struct inode *inode, const char *name, void *buffer, size_t size)
+security_get(struct dentry *dentry, const char *name, void *buffer, size_t size,
+ int handler_flags)
{
if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
return -EINVAL;
- if (IS_PRIVATE(inode))
+ if (IS_PRIVATE(dentry->d_inode))
return -EPERM;
- return reiserfs_xattr_get(inode, name, buffer, size);
+ return reiserfs_xattr_get(dentry->d_inode, name, buffer, size);
}
static int
-security_set(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags)
+security_set(struct dentry *dentry, const char *name, const void *buffer,
+ size_t size, int flags, int handler_flags)
{
if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
return -EINVAL;
- if (IS_PRIVATE(inode))
+ if (IS_PRIVATE(dentry->d_inode))
return -EPERM;
- return reiserfs_xattr_set(inode, name, buffer, size, flags);
+ return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags);
}
-static size_t security_list(struct inode *inode, char *list, size_t list_len,
- const char *name, size_t namelen)
+static size_t security_list(struct dentry *dentry, char *list, size_t list_len,
+ const char *name, size_t namelen, int handler_flags)
{
const size_t len = namelen + 1;
- if (IS_PRIVATE(inode))
+ if (IS_PRIVATE(dentry->d_inode))
return 0;
if (list && len <= list_len) {
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index a865042f75e..5b08aaca3da 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -8,36 +8,37 @@
#include <asm/uaccess.h>
static int
-trusted_get(struct inode *inode, const char *name, void *buffer, size_t size)
+trusted_get(struct dentry *dentry, const char *name, void *buffer, size_t size,
+ int handler_flags)
{
if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
+ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode))
return -EPERM;
- return reiserfs_xattr_get(inode, name, buffer, size);
+ return reiserfs_xattr_get(dentry->d_inode, name, buffer, size);
}
static int
-trusted_set(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags)
+trusted_set(struct dentry *dentry, const char *name, const void *buffer,
+ size_t size, int flags, int handler_flags)
{
if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
+ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode))
return -EPERM;
- return reiserfs_xattr_set(inode, name, buffer, size, flags);
+ return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags);
}
-static size_t trusted_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+static size_t trusted_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int handler_flags)
{
const size_t len = name_len + 1;
- if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
+ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(dentry->d_inode))
return 0;
if (list && len <= list_size) {
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index e3238dc4f3d..75d59c49b91 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -7,34 +7,35 @@
#include <asm/uaccess.h>
static int
-user_get(struct inode *inode, const char *name, void *buffer, size_t size)
+user_get(struct dentry *dentry, const char *name, void *buffer, size_t size,
+ int handler_flags)
{
if (strlen(name) < sizeof(XATTR_USER_PREFIX))
return -EINVAL;
- if (!reiserfs_xattrs_user(inode->i_sb))
+ if (!reiserfs_xattrs_user(dentry->d_sb))
return -EOPNOTSUPP;
- return reiserfs_xattr_get(inode, name, buffer, size);
+ return reiserfs_xattr_get(dentry->d_inode, name, buffer, size);
}
static int
-user_set(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags)
+user_set(struct dentry *dentry, const char *name, const void *buffer,
+ size_t size, int flags, int handler_flags)
{
if (strlen(name) < sizeof(XATTR_USER_PREFIX))
return -EINVAL;
- if (!reiserfs_xattrs_user(inode->i_sb))
+ if (!reiserfs_xattrs_user(dentry->d_sb))
return -EOPNOTSUPP;
- return reiserfs_xattr_set(inode, name, buffer, size, flags);
+ return reiserfs_xattr_set(dentry->d_inode, name, buffer, size, flags);
}
-static size_t user_list(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
+static size_t user_list(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int handler_flags)
{
const size_t len = name_len + 1;
- if (!reiserfs_xattrs_user(inode->i_sb))
+ if (!reiserfs_xattrs_user(dentry->d_sb))
return 0;
if (list && len <= list_size) {
memcpy(list, name, name_len);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index b07565c9438..1dabe4ee02f 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -236,7 +236,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
* anon_inode_getfd() will install the fd.
*/
ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx,
- flags & (O_CLOEXEC | O_NONBLOCK));
+ O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK)));
if (ufd < 0)
kfree(ctx);
} else {
diff --git a/fs/stack.c b/fs/stack.c
index 67716f6a1a4..4a6f7f44065 100644
--- a/fs/stack.c
+++ b/fs/stack.c
@@ -7,18 +7,63 @@
* This function cannot be inlined since i_size_{read,write} is rather
* heavy-weight on 32-bit systems
*/
-void fsstack_copy_inode_size(struct inode *dst, const struct inode *src)
+void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
{
- i_size_write(dst, i_size_read((struct inode *)src));
- dst->i_blocks = src->i_blocks;
+ loff_t i_size;
+ blkcnt_t i_blocks;
+
+ /*
+ * i_size_read() includes its own seqlocking and protection from
+ * preemption (see include/linux/fs.h): we need nothing extra for
+ * that here, and prefer to avoid nesting locks than attempt to keep
+ * i_size and i_blocks in sync together.
+ */
+ i_size = i_size_read(src);
+
+ /*
+ * But if CONFIG_LBDAF (on 32-bit), we ought to make an effort to
+ * keep the two halves of i_blocks in sync despite SMP or PREEMPT -
+ * though stat's generic_fillattr() doesn't bother, and we won't be
+ * applying quotas (where i_blocks does become important) at the
+ * upper level.
+ *
+ * We don't actually know what locking is used at the lower level;
+ * but if it's a filesystem that supports quotas, it will be using
+ * i_lock as in inode_add_bytes(). tmpfs uses other locking, and
+ * its 32-bit is (just) able to exceed 2TB i_size with the aid of
+ * holes; but its i_blocks cannot carry into the upper long without
+ * almost 2TB swap - let's ignore that case.
+ */
+ if (sizeof(i_blocks) > sizeof(long))
+ spin_lock(&src->i_lock);
+ i_blocks = src->i_blocks;
+ if (sizeof(i_blocks) > sizeof(long))
+ spin_unlock(&src->i_lock);
+
+ /*
+ * If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for
+ * fsstack_copy_inode_size() to hold some lock around
+ * i_size_write(), otherwise i_size_read() may spin forever (see
+ * include/linux/fs.h). We don't necessarily hold i_mutex when this
+ * is called, so take i_lock for that case.
+ *
+ * And if CONFIG_LBADF (on 32-bit), continue our effort to keep the
+ * two halves of i_blocks in sync despite SMP or PREEMPT: use i_lock
+ * for that case too, and do both at once by combining the tests.
+ *
+ * There is none of this locking overhead in the 64-bit case.
+ */
+ if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long))
+ spin_lock(&dst->i_lock);
+ i_size_write(dst, i_size);
+ dst->i_blocks = i_blocks;
+ if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long))
+ spin_unlock(&dst->i_lock);
}
EXPORT_SYMBOL_GPL(fsstack_copy_inode_size);
-/* copy all attributes; get_nlinks is optional way to override the i_nlink
- * copying
- */
-void fsstack_copy_attr_all(struct inode *dest, const struct inode *src,
- int (*get_nlinks)(struct inode *))
+/* copy all attributes */
+void fsstack_copy_attr_all(struct inode *dest, const struct inode *src)
{
dest->i_mode = src->i_mode;
dest->i_uid = src->i_uid;
@@ -29,14 +74,6 @@ void fsstack_copy_attr_all(struct inode *dest, const struct inode *src,
dest->i_ctime = src->i_ctime;
dest->i_blkbits = src->i_blkbits;
dest->i_flags = src->i_flags;
-
- /*
- * Update the nlinks AFTER updating the above fields, because the
- * get_links callback may depend on them.
- */
- if (!get_nlinks)
- dest->i_nlink = src->i_nlink;
- else
- dest->i_nlink = (*get_nlinks)(dest);
+ dest->i_nlink = src->i_nlink;
}
EXPORT_SYMBOL_GPL(fsstack_copy_attr_all);
diff --git a/fs/sync.c b/fs/sync.c
index d104591b066..418727a2a23 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -295,10 +295,11 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
*/
int generic_write_sync(struct file *file, loff_t pos, loff_t count)
{
- if (!(file->f_flags & O_SYNC) && !IS_SYNC(file->f_mapping->host))
+ if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
return 0;
return vfs_fsync_range(file, file->f_path.dentry, pos,
- pos + count - 1, 1);
+ pos + count - 1,
+ (file->f_flags & __O_SYNC) ? 0 : 1);
}
EXPORT_SYMBOL(generic_write_sync);
@@ -354,6 +355,7 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
{
int ret;
struct file *file;
+ struct address_space *mapping;
loff_t endbyte; /* inclusive */
int fput_needed;
umode_t i_mode;
@@ -404,7 +406,28 @@ SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes,
!S_ISLNK(i_mode))
goto out_put;
- ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags);
+ mapping = file->f_mapping;
+ if (!mapping) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ ret = 0;
+ if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
+ ret = filemap_fdatawait_range(mapping, offset, endbyte);
+ if (ret < 0)
+ goto out_put;
+ }
+
+ if (flags & SYNC_FILE_RANGE_WRITE) {
+ ret = filemap_fdatawrite_range(mapping, offset, endbyte);
+ if (ret < 0)
+ goto out_put;
+ }
+
+ if (flags & SYNC_FILE_RANGE_WAIT_AFTER)
+ ret = filemap_fdatawait_range(mapping, offset, endbyte);
+
out_put:
fput_light(file, fput_needed);
out:
@@ -436,42 +459,3 @@ asmlinkage long SyS_sync_file_range2(long fd, long flags,
}
SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2);
#endif
-
-/*
- * `endbyte' is inclusive
- */
-int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
- loff_t endbyte, unsigned int flags)
-{
- int ret;
-
- if (!mapping) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = 0;
- if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
- ret = wait_on_page_writeback_range(mapping,
- offset >> PAGE_CACHE_SHIFT,
- endbyte >> PAGE_CACHE_SHIFT);
- if (ret < 0)
- goto out;
- }
-
- if (flags & SYNC_FILE_RANGE_WRITE) {
- ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
- WB_SYNC_ALL);
- if (ret < 0)
- goto out;
- }
-
- if (flags & SYNC_FILE_RANGE_WAIT_AFTER) {
- ret = wait_on_page_writeback_range(mapping,
- offset >> PAGE_CACHE_SHIFT,
- endbyte >> PAGE_CACHE_SHIFT);
- }
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(do_sync_mapping_range);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index e0201837d24..f05f2303a8b 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -25,7 +25,6 @@
#include "sysfs.h"
DEFINE_MUTEX(sysfs_mutex);
-DEFINE_MUTEX(sysfs_rename_mutex);
DEFINE_SPINLOCK(sysfs_assoc_lock);
static DEFINE_SPINLOCK(sysfs_ino_lock);
@@ -85,46 +84,6 @@ static void sysfs_unlink_sibling(struct sysfs_dirent *sd)
}
/**
- * sysfs_get_dentry - get dentry for the given sysfs_dirent
- * @sd: sysfs_dirent of interest
- *
- * Get dentry for @sd. Dentry is looked up if currently not
- * present. This function descends from the root looking up
- * dentry for each step.
- *
- * LOCKING:
- * mutex_lock(sysfs_rename_mutex)
- *
- * RETURNS:
- * Pointer to found dentry on success, ERR_PTR() value on error.
- */
-struct dentry *sysfs_get_dentry(struct sysfs_dirent *sd)
-{
- struct dentry *dentry = dget(sysfs_sb->s_root);
-
- while (dentry->d_fsdata != sd) {
- struct sysfs_dirent *cur;
- struct dentry *parent;
-
- /* find the first ancestor which hasn't been looked up */
- cur = sd;
- while (cur->s_parent != dentry->d_fsdata)
- cur = cur->s_parent;
-
- /* look it up */
- parent = dentry;
- mutex_lock(&parent->d_inode->i_mutex);
- dentry = lookup_one_noperm(cur->s_name, parent);
- mutex_unlock(&parent->d_inode->i_mutex);
- dput(parent);
-
- if (IS_ERR(dentry))
- break;
- }
- return dentry;
-}
-
-/**
* sysfs_get_active - get an active reference to sysfs_dirent
* @sd: sysfs_dirent to get an active reference to
*
@@ -298,7 +257,61 @@ void release_sysfs_dirent(struct sysfs_dirent * sd)
goto repeat;
}
-static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
+static int sysfs_dentry_delete(struct dentry *dentry)
+{
+ struct sysfs_dirent *sd = dentry->d_fsdata;
+ return !!(sd->s_flags & SYSFS_FLAG_REMOVED);
+}
+
+static int sysfs_dentry_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ struct sysfs_dirent *sd = dentry->d_fsdata;
+ int is_dir;
+
+ mutex_lock(&sysfs_mutex);
+
+ /* The sysfs dirent has been deleted */
+ if (sd->s_flags & SYSFS_FLAG_REMOVED)
+ goto out_bad;
+
+ /* The sysfs dirent has been moved? */
+ if (dentry->d_parent->d_fsdata != sd->s_parent)
+ goto out_bad;
+
+ /* The sysfs dirent has been renamed */
+ if (strcmp(dentry->d_name.name, sd->s_name) != 0)
+ goto out_bad;
+
+ mutex_unlock(&sysfs_mutex);
+out_valid:
+ return 1;
+out_bad:
+ /* Remove the dentry from the dcache hashes.
+ * If this is a deleted dentry we use d_drop instead of d_delete
+ * so sysfs doesn't need to cope with negative dentries.
+ *
+ * If this is a dentry that has simply been renamed we
+ * use d_drop to remove it from the dcache lookup on its
+ * old parent. If this dentry persists later when a lookup
+ * is performed at its new name the dentry will be readded
+ * to the dcache hashes.
+ */
+ is_dir = (sysfs_type(sd) == SYSFS_DIR);
+ mutex_unlock(&sysfs_mutex);
+ if (is_dir) {
+ /* If we have submounts we must allow the vfs caches
+ * to lie about the state of the filesystem to prevent
+ * leaks and other nasty things.
+ */
+ if (have_submounts(dentry))
+ goto out_valid;
+ shrink_dcache_parent(dentry);
+ }
+ d_drop(dentry);
+ return 0;
+}
+
+static void sysfs_dentry_iput(struct dentry *dentry, struct inode *inode)
{
struct sysfs_dirent * sd = dentry->d_fsdata;
@@ -307,7 +320,9 @@ static void sysfs_d_iput(struct dentry * dentry, struct inode * inode)
}
static const struct dentry_operations sysfs_dentry_ops = {
- .d_iput = sysfs_d_iput,
+ .d_revalidate = sysfs_dentry_revalidate,
+ .d_delete = sysfs_dentry_delete,
+ .d_iput = sysfs_dentry_iput,
};
struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
@@ -344,12 +359,6 @@ struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
return NULL;
}
-static int sysfs_ilookup_test(struct inode *inode, void *arg)
-{
- struct sysfs_dirent *sd = arg;
- return inode->i_ino == sd->s_ino;
-}
-
/**
* sysfs_addrm_start - prepare for sysfs_dirent add/remove
* @acxt: pointer to sysfs_addrm_cxt to be used
@@ -357,47 +366,20 @@ static int sysfs_ilookup_test(struct inode *inode, void *arg)
*
* This function is called when the caller is about to add or
* remove sysfs_dirent under @parent_sd. This function acquires
- * sysfs_mutex, grabs inode for @parent_sd if available and lock
- * i_mutex of it. @acxt is used to keep and pass context to
+ * sysfs_mutex. @acxt is used to keep and pass context to
* other addrm functions.
*
* LOCKING:
* Kernel thread context (may sleep). sysfs_mutex is locked on
- * return. i_mutex of parent inode is locked on return if
- * available.
+ * return.
*/
void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
struct sysfs_dirent *parent_sd)
{
- struct inode *inode;
-
memset(acxt, 0, sizeof(*acxt));
acxt->parent_sd = parent_sd;
- /* Lookup parent inode. inode initialization is protected by
- * sysfs_mutex, so inode existence can be determined by
- * looking up inode while holding sysfs_mutex.
- */
mutex_lock(&sysfs_mutex);
-
- inode = ilookup5(sysfs_sb, parent_sd->s_ino, sysfs_ilookup_test,
- parent_sd);
- if (inode) {
- WARN_ON(inode->i_state & I_NEW);
-
- /* parent inode available */
- acxt->parent_inode = inode;
-
- /* sysfs_mutex is below i_mutex in lock hierarchy.
- * First, trylock i_mutex. If fails, unlock
- * sysfs_mutex and lock them in order.
- */
- if (!mutex_trylock(&inode->i_mutex)) {
- mutex_unlock(&sysfs_mutex);
- mutex_lock(&inode->i_mutex);
- mutex_lock(&sysfs_mutex);
- }
- }
}
/**
@@ -422,18 +404,22 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
*/
int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
+ struct sysfs_inode_attrs *ps_iattr;
+
if (sysfs_find_dirent(acxt->parent_sd, sd->s_name))
return -EEXIST;
sd->s_parent = sysfs_get(acxt->parent_sd);
- if (sysfs_type(sd) == SYSFS_DIR && acxt->parent_inode)
- inc_nlink(acxt->parent_inode);
-
- acxt->cnt++;
-
sysfs_link_sibling(sd);
+ /* Update timestamps on the parent */
+ ps_iattr = acxt->parent_sd->s_iattr;
+ if (ps_iattr) {
+ struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
+ ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
+ }
+
return 0;
}
@@ -512,70 +498,22 @@ int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
*/
void sysfs_remove_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
+ struct sysfs_inode_attrs *ps_iattr;
+
BUG_ON(sd->s_flags & SYSFS_FLAG_REMOVED);
sysfs_unlink_sibling(sd);
+ /* Update timestamps on the parent */
+ ps_iattr = acxt->parent_sd->s_iattr;
+ if (ps_iattr) {
+ struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
+ ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
+ }
+
sd->s_flags |= SYSFS_FLAG_REMOVED;
sd->s_sibling = acxt->removed;
acxt->removed = sd;
-
- if (sysfs_type(sd) == SYSFS_DIR && acxt->parent_inode)
- drop_nlink(acxt->parent_inode);
-
- acxt->cnt++;
-}
-
-/**
- * sysfs_drop_dentry - drop dentry for the specified sysfs_dirent
- * @sd: target sysfs_dirent
- *
- * Drop dentry for @sd. @sd must have been unlinked from its
- * parent on entry to this function such that it can't be looked
- * up anymore.
- */
-static void sysfs_drop_dentry(struct sysfs_dirent *sd)
-{
- struct inode *inode;
- struct dentry *dentry;
-
- inode = ilookup(sysfs_sb, sd->s_ino);
- if (!inode)
- return;
-
- /* Drop any existing dentries associated with sd.
- *
- * For the dentry to be properly freed we need to grab a
- * reference to the dentry under the dcache lock, unhash it,
- * and then put it. The playing with the dentry count allows
- * dput to immediately free the dentry if it is not in use.
- */
-repeat:
- spin_lock(&dcache_lock);
- list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
- if (d_unhashed(dentry))
- continue;
- dget_locked(dentry);
- spin_lock(&dentry->d_lock);
- __d_drop(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
- dput(dentry);
- goto repeat;
- }
- spin_unlock(&dcache_lock);
-
- /* adjust nlink and update timestamp */
- mutex_lock(&inode->i_mutex);
-
- inode->i_ctime = CURRENT_TIME;
- drop_nlink(inode);
- if (sysfs_type(sd) == SYSFS_DIR)
- drop_nlink(inode);
-
- mutex_unlock(&inode->i_mutex);
-
- iput(inode);
}
/**
@@ -584,25 +522,15 @@ repeat:
*
* Finish up sysfs_dirent add/remove. Resources acquired by
* sysfs_addrm_start() are released and removed sysfs_dirents are
- * cleaned up. Timestamps on the parent inode are updated.
+ * cleaned up.
*
* LOCKING:
- * All mutexes acquired by sysfs_addrm_start() are released.
+ * sysfs_mutex is released.
*/
void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
{
/* release resources acquired by sysfs_addrm_start() */
mutex_unlock(&sysfs_mutex);
- if (acxt->parent_inode) {
- struct inode *inode = acxt->parent_inode;
-
- /* if added/removed, update timestamps on the parent */
- if (acxt->cnt)
- inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-
- mutex_unlock(&inode->i_mutex);
- iput(inode);
- }
/* kill removed sysfs_dirents */
while (acxt->removed) {
@@ -611,7 +539,6 @@ void sysfs_addrm_finish(struct sysfs_addrm_cxt *acxt)
acxt->removed = sd->s_sibling;
sd->s_sibling = NULL;
- sysfs_drop_dentry(sd);
sysfs_deactivate(sd);
unmap_bin_file(sd);
sysfs_put(sd);
@@ -751,10 +678,15 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
}
/* instantiate and hash dentry */
- dentry->d_op = &sysfs_dentry_ops;
- dentry->d_fsdata = sysfs_get(sd);
- d_instantiate(dentry, inode);
- d_rehash(dentry);
+ ret = d_find_alias(inode);
+ if (!ret) {
+ dentry->d_op = &sysfs_dentry_ops;
+ dentry->d_fsdata = sysfs_get(sd);
+ d_add(dentry, inode);
+ } else {
+ d_move(ret, dentry);
+ iput(inode);
+ }
out_unlock:
mutex_unlock(&sysfs_mutex);
@@ -763,7 +695,9 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
const struct inode_operations sysfs_dir_inode_operations = {
.lookup = sysfs_lookup,
+ .permission = sysfs_permission,
.setattr = sysfs_setattr,
+ .getattr = sysfs_getattr,
.setxattr = sysfs_setxattr,
};
@@ -826,141 +760,65 @@ void sysfs_remove_dir(struct kobject * kobj)
__sysfs_remove_dir(sd);
}
-int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
+int sysfs_rename(struct sysfs_dirent *sd,
+ struct sysfs_dirent *new_parent_sd, const char *new_name)
{
- struct sysfs_dirent *sd = kobj->sd;
- struct dentry *parent = NULL;
- struct dentry *old_dentry = NULL, *new_dentry = NULL;
const char *dup_name = NULL;
int error;
- mutex_lock(&sysfs_rename_mutex);
+ mutex_lock(&sysfs_mutex);
error = 0;
- if (strcmp(sd->s_name, new_name) == 0)
+ if ((sd->s_parent == new_parent_sd) &&
+ (strcmp(sd->s_name, new_name) == 0))
goto out; /* nothing to rename */
- /* get the original dentry */
- old_dentry = sysfs_get_dentry(sd);
- if (IS_ERR(old_dentry)) {
- error = PTR_ERR(old_dentry);
- old_dentry = NULL;
- goto out;
- }
-
- parent = old_dentry->d_parent;
-
- /* lock parent and get dentry for new name */
- mutex_lock(&parent->d_inode->i_mutex);
- mutex_lock(&sysfs_mutex);
-
error = -EEXIST;
- if (sysfs_find_dirent(sd->s_parent, new_name))
- goto out_unlock;
-
- error = -ENOMEM;
- new_dentry = d_alloc_name(parent, new_name);
- if (!new_dentry)
- goto out_unlock;
+ if (sysfs_find_dirent(new_parent_sd, new_name))
+ goto out;
/* rename sysfs_dirent */
- error = -ENOMEM;
- new_name = dup_name = kstrdup(new_name, GFP_KERNEL);
- if (!new_name)
- goto out_unlock;
-
- dup_name = sd->s_name;
- sd->s_name = new_name;
+ if (strcmp(sd->s_name, new_name) != 0) {
+ error = -ENOMEM;
+ new_name = dup_name = kstrdup(new_name, GFP_KERNEL);
+ if (!new_name)
+ goto out;
+
+ dup_name = sd->s_name;
+ sd->s_name = new_name;
+ }
- /* rename */
- d_add(new_dentry, NULL);
- d_move(old_dentry, new_dentry);
+ /* Remove from old parent's list and insert into new parent's list. */
+ if (sd->s_parent != new_parent_sd) {
+ sysfs_unlink_sibling(sd);
+ sysfs_get(new_parent_sd);
+ sysfs_put(sd->s_parent);
+ sd->s_parent = new_parent_sd;
+ sysfs_link_sibling(sd);
+ }
error = 0;
- out_unlock:
+ out:
mutex_unlock(&sysfs_mutex);
- mutex_unlock(&parent->d_inode->i_mutex);
kfree(dup_name);
- dput(old_dentry);
- dput(new_dentry);
- out:
- mutex_unlock(&sysfs_rename_mutex);
return error;
}
+int sysfs_rename_dir(struct kobject *kobj, const char *new_name)
+{
+ return sysfs_rename(kobj->sd, kobj->sd->s_parent, new_name);
+}
+
int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj)
{
struct sysfs_dirent *sd = kobj->sd;
struct sysfs_dirent *new_parent_sd;
- struct dentry *old_parent, *new_parent = NULL;
- struct dentry *old_dentry = NULL, *new_dentry = NULL;
- int error;
- mutex_lock(&sysfs_rename_mutex);
BUG_ON(!sd->s_parent);
- new_parent_sd = (new_parent_kobj && new_parent_kobj->sd) ?
+ new_parent_sd = new_parent_kobj && new_parent_kobj->sd ?
new_parent_kobj->sd : &sysfs_root;
- error = 0;
- if (sd->s_parent == new_parent_sd)
- goto out; /* nothing to move */
-
- /* get dentries */
- old_dentry = sysfs_get_dentry(sd);
- if (IS_ERR(old_dentry)) {
- error = PTR_ERR(old_dentry);
- old_dentry = NULL;
- goto out;
- }
- old_parent = old_dentry->d_parent;
-
- new_parent = sysfs_get_dentry(new_parent_sd);
- if (IS_ERR(new_parent)) {
- error = PTR_ERR(new_parent);
- new_parent = NULL;
- goto out;
- }
-
-again:
- mutex_lock(&old_parent->d_inode->i_mutex);
- if (!mutex_trylock(&new_parent->d_inode->i_mutex)) {
- mutex_unlock(&old_parent->d_inode->i_mutex);
- goto again;
- }
- mutex_lock(&sysfs_mutex);
-
- error = -EEXIST;
- if (sysfs_find_dirent(new_parent_sd, sd->s_name))
- goto out_unlock;
-
- error = -ENOMEM;
- new_dentry = d_alloc_name(new_parent, sd->s_name);
- if (!new_dentry)
- goto out_unlock;
-
- error = 0;
- d_add(new_dentry, NULL);
- d_move(old_dentry, new_dentry);
-
- /* Remove from old parent's list and insert into new parent's list. */
- sysfs_unlink_sibling(sd);
- sysfs_get(new_parent_sd);
- drop_nlink(old_parent->d_inode);
- sysfs_put(sd->s_parent);
- sd->s_parent = new_parent_sd;
- inc_nlink(new_parent->d_inode);
- sysfs_link_sibling(sd);
-
- out_unlock:
- mutex_unlock(&sysfs_mutex);
- mutex_unlock(&new_parent->d_inode->i_mutex);
- mutex_unlock(&old_parent->d_inode->i_mutex);
- out:
- dput(new_parent);
- dput(old_dentry);
- dput(new_dentry);
- mutex_unlock(&sysfs_rename_mutex);
- return error;
+ return sysfs_rename(sd, new_parent_sd, sd->s_name);
}
/* Relationship between s_mode and the DT_xxx types */
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index f5ea4680f15..dc30d9e3168 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -579,46 +579,23 @@ EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
*/
int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
{
- struct sysfs_dirent *victim_sd = NULL;
- struct dentry *victim = NULL;
- struct inode * inode;
+ struct sysfs_dirent *sd;
struct iattr newattrs;
int rc;
- rc = -ENOENT;
- victim_sd = sysfs_get_dirent(kobj->sd, attr->name);
- if (!victim_sd)
- goto out;
+ mutex_lock(&sysfs_mutex);
- mutex_lock(&sysfs_rename_mutex);
- victim = sysfs_get_dentry(victim_sd);
- mutex_unlock(&sysfs_rename_mutex);
- if (IS_ERR(victim)) {
- rc = PTR_ERR(victim);
- victim = NULL;
+ rc = -ENOENT;
+ sd = sysfs_find_dirent(kobj->sd, attr->name);
+ if (!sd)
goto out;
- }
-
- inode = victim->d_inode;
-
- mutex_lock(&inode->i_mutex);
- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- newattrs.ia_ctime = current_fs_time(inode->i_sb);
- rc = sysfs_setattr(victim, &newattrs);
+ newattrs.ia_mode = (mode & S_IALLUGO) | (sd->s_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE;
+ rc = sysfs_sd_setattr(sd, &newattrs);
- if (rc == 0) {
- fsnotify_change(victim, newattrs.ia_valid);
- mutex_lock(&sysfs_mutex);
- victim_sd->s_mode = newattrs.ia_mode;
- mutex_unlock(&sysfs_mutex);
- }
-
- mutex_unlock(&inode->i_mutex);
out:
- dput(victim);
- sysfs_put(victim_sd);
+ mutex_unlock(&sysfs_mutex);
return rc;
}
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index e28cecf179f..220b758523a 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -37,7 +37,9 @@ static struct backing_dev_info sysfs_backing_dev_info = {
};
static const struct inode_operations sysfs_inode_operations ={
+ .permission = sysfs_permission,
.setattr = sysfs_setattr,
+ .getattr = sysfs_getattr,
.setxattr = sysfs_setxattr,
};
@@ -46,7 +48,7 @@ int __init sysfs_inode_init(void)
return bdi_init(&sysfs_backing_dev_info);
}
-struct sysfs_inode_attrs *sysfs_init_inode_attrs(struct sysfs_dirent *sd)
+static struct sysfs_inode_attrs *sysfs_init_inode_attrs(struct sysfs_dirent *sd)
{
struct sysfs_inode_attrs *attrs;
struct iattr *iattrs;
@@ -64,30 +66,15 @@ struct sysfs_inode_attrs *sysfs_init_inode_attrs(struct sysfs_dirent *sd)
return attrs;
}
-int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
+
+int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr)
{
- struct inode * inode = dentry->d_inode;
- struct sysfs_dirent * sd = dentry->d_fsdata;
struct sysfs_inode_attrs *sd_attrs;
struct iattr *iattrs;
unsigned int ia_valid = iattr->ia_valid;
- int error;
-
- if (!sd)
- return -EINVAL;
sd_attrs = sd->s_iattr;
- error = inode_change_ok(inode, iattr);
- if (error)
- return error;
-
- iattr->ia_valid &= ~ATTR_SIZE; /* ignore size changes */
-
- error = inode_setattr(inode, iattr);
- if (error)
- return error;
-
if (!sd_attrs) {
/* setting attributes for the first time, allocate now */
sd_attrs = sysfs_init_inode_attrs(sd);
@@ -103,42 +90,78 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
if (ia_valid & ATTR_GID)
iattrs->ia_gid = iattr->ia_gid;
if (ia_valid & ATTR_ATIME)
- iattrs->ia_atime = timespec_trunc(iattr->ia_atime,
- inode->i_sb->s_time_gran);
+ iattrs->ia_atime = iattr->ia_atime;
if (ia_valid & ATTR_MTIME)
- iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime,
- inode->i_sb->s_time_gran);
+ iattrs->ia_mtime = iattr->ia_mtime;
if (ia_valid & ATTR_CTIME)
- iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime,
- inode->i_sb->s_time_gran);
+ iattrs->ia_ctime = iattr->ia_ctime;
if (ia_valid & ATTR_MODE) {
umode_t mode = iattr->ia_mode;
-
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
- mode &= ~S_ISGID;
iattrs->ia_mode = sd->s_mode = mode;
}
}
+ return 0;
+}
+
+int sysfs_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct sysfs_dirent *sd = dentry->d_fsdata;
+ int error;
+
+ if (!sd)
+ return -EINVAL;
+
+ error = inode_change_ok(inode, iattr);
+ if (error)
+ return error;
+
+ iattr->ia_valid &= ~ATTR_SIZE; /* ignore size changes */
+
+ error = inode_setattr(inode, iattr);
+ if (error)
+ return error;
+
+ mutex_lock(&sysfs_mutex);
+ error = sysfs_sd_setattr(sd, iattr);
+ mutex_unlock(&sysfs_mutex);
+
return error;
}
+static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata, u32 *secdata_len)
+{
+ struct sysfs_inode_attrs *iattrs;
+ void *old_secdata;
+ size_t old_secdata_len;
+
+ iattrs = sd->s_iattr;
+ if (!iattrs)
+ iattrs = sysfs_init_inode_attrs(sd);
+ if (!iattrs)
+ return -ENOMEM;
+
+ old_secdata = iattrs->ia_secdata;
+ old_secdata_len = iattrs->ia_secdata_len;
+
+ iattrs->ia_secdata = *secdata;
+ iattrs->ia_secdata_len = *secdata_len;
+
+ *secdata = old_secdata;
+ *secdata_len = old_secdata_len;
+ return 0;
+}
+
int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
{
struct sysfs_dirent *sd = dentry->d_fsdata;
- struct sysfs_inode_attrs *iattrs;
void *secdata;
int error;
u32 secdata_len = 0;
if (!sd)
return -EINVAL;
- if (!sd->s_iattr)
- sd->s_iattr = sysfs_init_inode_attrs(sd);
- if (!sd->s_iattr)
- return -ENOMEM;
-
- iattrs = sd->s_iattr;
if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
@@ -150,12 +173,13 @@ int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
&secdata, &secdata_len);
if (error)
goto out;
- if (iattrs->ia_secdata)
- security_release_secctx(iattrs->ia_secdata,
- iattrs->ia_secdata_len);
- iattrs->ia_secdata = secdata;
- iattrs->ia_secdata_len = secdata_len;
+ mutex_lock(&sysfs_mutex);
+ error = sysfs_sd_setsecdata(sd, &secdata, &secdata_len);
+ mutex_unlock(&sysfs_mutex);
+
+ if (secdata)
+ security_release_secctx(secdata, secdata_len);
} else
return -EINVAL;
out:
@@ -170,7 +194,6 @@ static inline void set_default_inode_attr(struct inode * inode, mode_t mode)
static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
{
- inode->i_mode = iattr->ia_mode;
inode->i_uid = iattr->ia_uid;
inode->i_gid = iattr->ia_gid;
inode->i_atime = iattr->ia_atime;
@@ -178,17 +201,6 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
inode->i_ctime = iattr->ia_ctime;
}
-
-/*
- * sysfs has a different i_mutex lock order behavior for i_mutex than other
- * filesystems; sysfs i_mutex is called in many places with subsystem locks
- * held. At the same time, many of the VFS locking rules do not apply to
- * sysfs at all (cross directory rename for example). To untangle this mess
- * (which gives false positives in lockdep), we're giving sysfs inodes their
- * own class for i_mutex.
- */
-static struct lock_class_key sysfs_inode_imutex_key;
-
static int sysfs_count_nlink(struct sysfs_dirent *sd)
{
struct sysfs_dirent *child;
@@ -201,38 +213,55 @@ static int sysfs_count_nlink(struct sysfs_dirent *sd)
return nr + 2;
}
+static void sysfs_refresh_inode(struct sysfs_dirent *sd, struct inode *inode)
+{
+ struct sysfs_inode_attrs *iattrs = sd->s_iattr;
+
+ inode->i_mode = sd->s_mode;
+ if (iattrs) {
+ /* sysfs_dirent has non-default attributes
+ * get them from persistent copy in sysfs_dirent
+ */
+ set_inode_attr(inode, &iattrs->ia_iattr);
+ security_inode_notifysecctx(inode,
+ iattrs->ia_secdata,
+ iattrs->ia_secdata_len);
+ }
+
+ if (sysfs_type(sd) == SYSFS_DIR)
+ inode->i_nlink = sysfs_count_nlink(sd);
+}
+
+int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+ struct sysfs_dirent *sd = dentry->d_fsdata;
+ struct inode *inode = dentry->d_inode;
+
+ mutex_lock(&sysfs_mutex);
+ sysfs_refresh_inode(sd, inode);
+ mutex_unlock(&sysfs_mutex);
+
+ generic_fillattr(inode, stat);
+ return 0;
+}
+
static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
{
struct bin_attribute *bin_attr;
- struct sysfs_inode_attrs *iattrs;
inode->i_private = sysfs_get(sd);
inode->i_mapping->a_ops = &sysfs_aops;
inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
inode->i_op = &sysfs_inode_operations;
- inode->i_ino = sd->s_ino;
- lockdep_set_class(&inode->i_mutex, &sysfs_inode_imutex_key);
- iattrs = sd->s_iattr;
- if (iattrs) {
- /* sysfs_dirent has non-default attributes
- * get them for the new inode from persistent copy
- * in sysfs_dirent
- */
- set_inode_attr(inode, &iattrs->ia_iattr);
- if (iattrs->ia_secdata)
- security_inode_notifysecctx(inode,
- iattrs->ia_secdata,
- iattrs->ia_secdata_len);
- } else
- set_default_inode_attr(inode, sd->s_mode);
+ set_default_inode_attr(inode, sd->s_mode);
+ sysfs_refresh_inode(sd, inode);
/* initialize inode according to type */
switch (sysfs_type(sd)) {
case SYSFS_DIR:
inode->i_op = &sysfs_dir_inode_operations;
inode->i_fop = &sysfs_dir_operations;
- inode->i_nlink = sysfs_count_nlink(sd);
break;
case SYSFS_KOBJ_ATTR:
inode->i_size = PAGE_SIZE;
@@ -315,3 +344,14 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name)
else
return -ENOENT;
}
+
+int sysfs_permission(struct inode *inode, int mask)
+{
+ struct sysfs_dirent *sd = inode->i_private;
+
+ mutex_lock(&sysfs_mutex);
+ sysfs_refresh_inode(sd, inode);
+ mutex_unlock(&sysfs_mutex);
+
+ return generic_permission(inode, mask, NULL);
+}
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index c5081ad7702..c5eff49fa41 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -210,10 +210,13 @@ static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *co
}
const struct inode_operations sysfs_symlink_inode_operations = {
- .setxattr = sysfs_setxattr,
- .readlink = generic_readlink,
- .follow_link = sysfs_follow_link,
- .put_link = sysfs_put_link,
+ .setxattr = sysfs_setxattr,
+ .readlink = generic_readlink,
+ .follow_link = sysfs_follow_link,
+ .put_link = sysfs_put_link,
+ .setattr = sysfs_setattr,
+ .getattr = sysfs_getattr,
+ .permission = sysfs_permission,
};
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index af4c4e7482a..ca52e7b9d8f 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -89,9 +89,7 @@ static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
*/
struct sysfs_addrm_cxt {
struct sysfs_dirent *parent_sd;
- struct inode *parent_inode;
struct sysfs_dirent *removed;
- int cnt;
};
/*
@@ -105,7 +103,6 @@ extern struct kmem_cache *sysfs_dir_cachep;
* dir.c
*/
extern struct mutex sysfs_mutex;
-extern struct mutex sysfs_rename_mutex;
extern spinlock_t sysfs_assoc_lock;
extern const struct file_operations sysfs_dir_operations;
@@ -133,6 +130,9 @@ int sysfs_create_subdir(struct kobject *kobj, const char *name,
struct sysfs_dirent **p_sd);
void sysfs_remove_subdir(struct sysfs_dirent *sd);
+int sysfs_rename(struct sysfs_dirent *sd,
+ struct sysfs_dirent *new_parent_sd, const char *new_name);
+
static inline struct sysfs_dirent *__sysfs_get(struct sysfs_dirent *sd)
{
if (sd) {
@@ -155,7 +155,10 @@ static inline void __sysfs_put(struct sysfs_dirent *sd)
*/
struct inode *sysfs_get_inode(struct sysfs_dirent *sd);
void sysfs_delete_inode(struct inode *inode);
+int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr);
+int sysfs_permission(struct inode *inode, int mask);
int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
+int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags);
int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index b042bd7034b..1bfc95ad5f7 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -200,7 +200,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
- flags & TFD_SHARED_FCNTL_FLAGS);
+ O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
if (ufd < 0)
kfree(ctx);
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 8a771c59ac3..90492327b38 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -350,13 +350,8 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
le32_to_cpu(sup->fmt_version));
printk(KERN_DEBUG "\ttime_gran %u\n",
le32_to_cpu(sup->time_gran));
- printk(KERN_DEBUG "\tUUID %02X%02X%02X%02X-%02X%02X"
- "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X\n",
- sup->uuid[0], sup->uuid[1], sup->uuid[2], sup->uuid[3],
- sup->uuid[4], sup->uuid[5], sup->uuid[6], sup->uuid[7],
- sup->uuid[8], sup->uuid[9], sup->uuid[10], sup->uuid[11],
- sup->uuid[12], sup->uuid[13], sup->uuid[14],
- sup->uuid[15]);
+ printk(KERN_DEBUG "\tUUID %pUB\n",
+ sup->uuid);
break;
}
case UBIFS_MST_NODE:
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 39849f887e7..16a6444330e 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -45,7 +45,7 @@
*
* Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
* read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
- * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not
+ * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
* set as well. However, UBIFS disables readahead.
*/
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 943ad562453..43f9d19a6f3 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1393,12 +1393,7 @@ static int mount_ubifs(struct ubifs_info *c)
c->leb_size, c->leb_size >> 10);
dbg_msg("data journal heads: %d",
c->jhead_cnt - NONDATA_JHEADS_CNT);
- dbg_msg("UUID: %02X%02X%02X%02X-%02X%02X"
- "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
- c->uuid[0], c->uuid[1], c->uuid[2], c->uuid[3],
- c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7],
- c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11],
- c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]);
+ dbg_msg("UUID: %pUB", c->uuid);
dbg_msg("big_lpt %d", c->big_lpt);
dbg_msg("log LEBs: %d (%d - %d)",
c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1e068535b58..82372e332f0 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -440,7 +440,7 @@ static void udf_table_free_blocks(struct super_block *sb,
(bloc->logicalBlockNum + count) >
partmap->s_partition_len) {
udf_debug("%d < %d || %d + %d > %d\n",
- bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
+ bloc->logicalBlockNum, 0, bloc->logicalBlockNum, count,
partmap->s_partition_len);
goto error_return;
}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index b80cbd78833..f311d509b6a 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -196,6 +196,7 @@ static int udf_release_file(struct inode *inode, struct file *filp)
mutex_lock(&inode->i_mutex);
lock_kernel();
udf_discard_prealloc(inode);
+ udf_truncate_tail_extent(inode);
unlock_kernel();
mutex_unlock(&inode->i_mutex);
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 6d24c2c63f9..f90231eb291 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -97,15 +97,17 @@ no_delete:
*/
void udf_clear_inode(struct inode *inode)
{
- struct udf_inode_info *iinfo;
- if (!(inode->i_sb->s_flags & MS_RDONLY)) {
- lock_kernel();
- udf_truncate_tail_extent(inode);
- unlock_kernel();
- write_inode_now(inode, 0);
- invalidate_inode_buffers(inode);
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
+ inode->i_size != iinfo->i_lenExtents) {
+ printk(KERN_WARNING "UDF-fs (%s): Inode %lu (mode %o) has "
+ "inode size %llu different from extent lenght %llu. "
+ "Filesystem need not be standards compliant.\n",
+ inode->i_sb->s_id, inode->i_ino, inode->i_mode,
+ (unsigned long long)inode->i_size,
+ (unsigned long long)iinfo->i_lenExtents);
}
- iinfo = UDF_I(inode);
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
}
@@ -198,7 +200,6 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
int newblock;
struct buffer_head *dbh = NULL;
struct kernel_lb_addr eloc;
- uint32_t elen;
uint8_t alloctype;
struct extent_position epos;
@@ -273,12 +274,11 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
eloc.logicalBlockNum = *block;
eloc.partitionReferenceNum =
iinfo->i_location.partitionReferenceNum;
- elen = inode->i_sb->s_blocksize;
- iinfo->i_lenExtents = elen;
+ iinfo->i_lenExtents = inode->i_size;
epos.bh = NULL;
epos.block = iinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(inode);
- udf_add_aext(inode, &epos, &eloc, elen, 0);
+ udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
/* UniqueID stuff */
brelse(epos.bh);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 21dad8c608f..cd2115060fd 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -408,15 +408,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
}
add:
- /* Is there any extent whose size we need to round up? */
- if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && elen) {
- elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
- if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- epos.offset -= sizeof(struct short_ad);
- else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- epos.offset -= sizeof(struct long_ad);
- udf_write_aext(dir, &epos, &eloc, elen, 1);
- }
f_pos += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
@@ -439,6 +430,7 @@ add:
udf_current_aext(dir, &epos, &eloc, &elen, 1);
}
+ /* Entry fits into current block? */
if (sb->s_blocksize - fibh->eoffset >= nfidlen) {
fibh->soffset = fibh->eoffset;
fibh->eoffset += nfidlen;
@@ -462,6 +454,16 @@ add:
(fibh->sbh->b_data + fibh->soffset);
}
} else {
+ /* Round up last extent in the file */
+ elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
+ if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ epos.offset -= sizeof(struct short_ad);
+ else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+ epos.offset -= sizeof(struct long_ad);
+ udf_write_aext(dir, &epos, &eloc, elen, 1);
+ dinfo->i_lenExtents = (dinfo->i_lenExtents + sb->s_blocksize
+ - 1) & ~(sb->s_blocksize - 1);
+
fibh->soffset = fibh->eoffset - sb->s_blocksize;
fibh->eoffset += nfidlen - sb->s_blocksize;
if (fibh->sbh != fibh->ebh) {
@@ -508,6 +510,20 @@ add:
dir->i_size += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
dinfo->i_lenAlloc += nfidlen;
+ else {
+ /* Find the last extent and truncate it to proper size */
+ while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
+ (EXT_RECORDED_ALLOCATED >> 30))
+ ;
+ elen -= dinfo->i_lenExtents - dir->i_size;
+ if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ epos.offset -= sizeof(struct short_ad);
+ else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+ epos.offset -= sizeof(struct long_ad);
+ udf_write_aext(dir, &epos, &eloc, elen, 1);
+ dinfo->i_lenExtents = dir->i_size;
+ }
+
mark_inode_dirty(dir);
goto out_ok;
} else {
@@ -922,7 +938,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
block = udf_get_pblock(inode->i_sb, block,
iinfo->i_location.partitionReferenceNum,
0);
- epos.bh = udf_tread(inode->i_sb, block);
+ epos.bh = udf_tgetblk(inode->i_sb, block);
lock_buffer(epos.bh);
memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(epos.bh);
@@ -999,6 +1015,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
inode->i_size = elen;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
iinfo->i_lenAlloc = inode->i_size;
+ else
+ udf_truncate_tail_extent(inode);
mark_inode_dirty(inode);
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 9d1b8c2e6c4..1e4543cbcd2 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1078,21 +1078,39 @@ static int udf_fill_partdesc_info(struct super_block *sb,
return 0;
}
-static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
+static void udf_find_vat_block(struct super_block *sb, int p_index,
+ int type1_index, sector_t start_block)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map = &sbi->s_partmaps[p_index];
+ sector_t vat_block;
struct kernel_lb_addr ino;
+
+ /*
+ * VAT file entry is in the last recorded block. Some broken disks have
+ * it a few blocks before so try a bit harder...
+ */
+ ino.partitionReferenceNum = type1_index;
+ for (vat_block = start_block;
+ vat_block >= map->s_partition_root &&
+ vat_block >= start_block - 3 &&
+ !sbi->s_vat_inode; vat_block--) {
+ ino.logicalBlockNum = vat_block - map->s_partition_root;
+ sbi->s_vat_inode = udf_iget(sb, &ino);
+ }
+}
+
+static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
+{
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct udf_part_map *map = &sbi->s_partmaps[p_index];
struct buffer_head *bh = NULL;
struct udf_inode_info *vati;
uint32_t pos;
struct virtualAllocationTable20 *vat20;
sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
- /* VAT file entry is in the last recorded block */
- ino.partitionReferenceNum = type1_index;
- ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
- sbi->s_vat_inode = udf_iget(sb, &ino);
+ udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
if (!sbi->s_vat_inode &&
sbi->s_last_block != blocks - 1) {
printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
@@ -1100,9 +1118,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
"block of the device (%lu).\n",
(unsigned long)sbi->s_last_block,
(unsigned long)blocks - 1);
- ino.partitionReferenceNum = type1_index;
- ino.logicalBlockNum = blocks - 1 - map->s_partition_root;
- sbi->s_vat_inode = udf_iget(sb, &ino);
+ udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
}
if (!sbi->s_vat_inode)
return 1;
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 6f671f1ac27..22af68f8b68 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -70,13 +70,13 @@ static inline unsigned long ufs_dir_pages(struct inode *inode)
return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
}
-ino_t ufs_inode_by_name(struct inode *dir, struct dentry *dentry)
+ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr)
{
ino_t res = 0;
struct ufs_dir_entry *de;
struct page *page;
- de = ufs_find_entry(dir, dentry, &page);
+ de = ufs_find_entry(dir, qstr, &page);
if (de) {
res = fs32_to_cpu(dir->i_sb, de->d_ino);
ufs_put_page(page);
@@ -249,12 +249,12 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
* (as a parameter - res_dir). Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
-struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry,
+struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr,
struct page **res_page)
{
struct super_block *sb = dir->i_sb;
- const char *name = dentry->d_name.name;
- int namelen = dentry->d_name.len;
+ const char *name = qstr->name;
+ int namelen = qstr->len;
unsigned reclen = UFS_DIR_REC_LEN(namelen);
unsigned long start, n;
unsigned long npages = ufs_dir_pages(dir);
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 23119fe7ad6..4c26d9e8bc9 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -56,7 +56,7 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
return ERR_PTR(-ENAMETOOLONG);
lock_kernel();
- ino = ufs_inode_by_name(dir, dentry);
+ ino = ufs_inode_by_name(dir, &dentry->d_name);
if (ino) {
inode = ufs_iget(dir->i_sb, ino);
if (IS_ERR(inode)) {
@@ -237,7 +237,7 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
struct page *page;
int err = -ENOENT;
- de = ufs_find_entry(dir, dentry, &page);
+ de = ufs_find_entry(dir, &dentry->d_name, &page);
if (!de)
goto out;
@@ -281,7 +281,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ufs_dir_entry *old_de;
int err = -ENOENT;
- old_de = ufs_find_entry(old_dir, old_dentry, &old_page);
+ old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;
@@ -301,7 +301,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_dir;
err = -ENOENT;
- new_de = ufs_find_entry(new_dir, new_dentry, &new_page);
+ new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
goto out_dir;
inode_inc_link_count(old_inode);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 5faed7954d0..143c20bfb04 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -66,6 +66,7 @@
*/
+#include <linux/exportfs.h>
#include <linux/module.h>
#include <linux/bitops.h>
@@ -96,6 +97,56 @@
#include "swab.h"
#include "util.h"
+static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation)
+{
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+ struct inode *inode;
+
+ if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
+ return ERR_PTR(-ESTALE);
+
+ inode = ufs_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
+ return inode;
+}
+
+static struct dentry *ufs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ufs_nfs_get_inode);
+}
+
+static struct dentry *ufs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ return generic_fh_to_parent(sb, fid, fh_len, fh_type, ufs_nfs_get_inode);
+}
+
+static struct dentry *ufs_get_parent(struct dentry *child)
+{
+ struct qstr dot_dot = {
+ .name = "..",
+ .len = 2,
+ };
+ ino_t ino;
+
+ ino = ufs_inode_by_name(child->d_inode, &dot_dot);
+ if (!ino)
+ return ERR_PTR(-ENOENT);
+ return d_obtain_alias(ufs_iget(child->d_inode->i_sb, ino));
+}
+
+static const struct export_operations ufs_export_ops = {
+ .fh_to_dentry = ufs_fh_to_dentry,
+ .fh_to_parent = ufs_fh_to_parent,
+ .get_parent = ufs_get_parent,
+};
+
#ifdef CONFIG_UFS_DEBUG
/*
* Print contents of ufs_super_block, useful for debugging
@@ -990,6 +1041,7 @@ magic_found:
* Read ufs_super_block into internal data structures
*/
sb->s_op = &ufs_super_ops;
+ sb->s_export_op = &ufs_export_ops;
sb->dq_op = NULL; /***/
sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 644e77e1359..0b4c39bc0d9 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -86,9 +86,9 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
/* dir.c */
extern const struct inode_operations ufs_dir_inode_operations;
extern int ufs_add_link (struct dentry *, struct inode *);
-extern ino_t ufs_inode_by_name(struct inode *, struct dentry *);
+extern ino_t ufs_inode_by_name(struct inode *, struct qstr *);
extern int ufs_make_empty(struct inode *, struct inode *);
-extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct dentry *, struct page **);
+extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **);
extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
extern int ufs_empty_dir (struct inode *);
extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
diff --git a/fs/xattr.c b/fs/xattr.c
index 6d4f6d3449f..46f87e828b4 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -615,12 +615,11 @@ ssize_t
generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size)
{
struct xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
- handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+ handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (!handler)
return -EOPNOTSUPP;
- return handler->get(inode, name, buffer, size);
+ return handler->get(dentry, name, buffer, size, handler->flags);
}
/*
@@ -630,18 +629,20 @@ generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t s
ssize_t
generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- struct inode *inode = dentry->d_inode;
- struct xattr_handler *handler, **handlers = inode->i_sb->s_xattr;
+ struct xattr_handler *handler, **handlers = dentry->d_sb->s_xattr;
unsigned int size = 0;
if (!buffer) {
- for_each_xattr_handler(handlers, handler)
- size += handler->list(inode, NULL, 0, NULL, 0);
+ for_each_xattr_handler(handlers, handler) {
+ size += handler->list(dentry, NULL, 0, NULL, 0,
+ handler->flags);
+ }
} else {
char *buf = buffer;
for_each_xattr_handler(handlers, handler) {
- size = handler->list(inode, buf, buffer_size, NULL, 0);
+ size = handler->list(dentry, buf, buffer_size,
+ NULL, 0, handler->flags);
if (size > buffer_size)
return -ERANGE;
buf += size;
@@ -659,14 +660,13 @@ int
generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
{
struct xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
if (size == 0)
value = ""; /* empty EA, do not remove */
- handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+ handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (!handler)
return -EOPNOTSUPP;
- return handler->set(inode, name, value, size, flags);
+ return handler->set(dentry, name, value, size, 0, handler->flags);
}
/*
@@ -677,12 +677,12 @@ int
generic_removexattr(struct dentry *dentry, const char *name)
{
struct xattr_handler *handler;
- struct inode *inode = dentry->d_inode;
- handler = xattr_resolve_name(inode->i_sb->s_xattr, &name);
+ handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (!handler)
return -EOPNOTSUPP;
- return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
+ return handler->set(dentry, name, NULL, 0,
+ XATTR_REPLACE, handler->flags);
}
EXPORT_SYMBOL(generic_getxattr);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 7a59daed178..56641fe52a2 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -26,6 +26,8 @@ endif
obj-$(CONFIG_XFS_FS) += xfs.o
+xfs-y += linux-2.6/xfs_trace.o
+
xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
xfs_dquot.o \
xfs_dquot_item.o \
@@ -90,8 +92,7 @@ xfs-y += xfs_alloc.o \
xfs_rw.o \
xfs_dmops.o
-xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \
- xfs_dir2_trace.o
+xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o
# Objects in linux/
xfs-y += $(addprefix $(XFS_LINUX)/, \
@@ -113,6 +114,3 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \
xfs-y += $(addprefix support/, \
debug.o \
uuid.o)
-
-xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
-
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index b23a5450644..2512125dfa7 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -21,6 +21,7 @@
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
@@ -353,37 +354,14 @@ xfs_acl_chmod(struct inode *inode)
return error;
}
-/*
- * System xattr handlers.
- *
- * Currently Posix ACLs are the only system namespace extended attribute
- * handlers supported by XFS, so we just implement the handlers here.
- * If we ever support other system extended attributes this will need
- * some refactoring.
- */
-
static int
-xfs_decode_acl(const char *name)
-{
- if (strcmp(name, "posix_acl_access") == 0)
- return ACL_TYPE_ACCESS;
- else if (strcmp(name, "posix_acl_default") == 0)
- return ACL_TYPE_DEFAULT;
- return -EINVAL;
-}
-
-static int
-xfs_xattr_system_get(struct inode *inode, const char *name,
- void *value, size_t size)
+xfs_xattr_acl_get(struct dentry *dentry, const char *name,
+ void *value, size_t size, int type)
{
struct posix_acl *acl;
- int type, error;
-
- type = xfs_decode_acl(name);
- if (type < 0)
- return type;
+ int error;
- acl = xfs_get_acl(inode, type);
+ acl = xfs_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
@@ -396,15 +374,13 @@ xfs_xattr_system_get(struct inode *inode, const char *name,
}
static int
-xfs_xattr_system_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+xfs_xattr_acl_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
{
+ struct inode *inode = dentry->d_inode;
struct posix_acl *acl = NULL;
- int error = 0, type;
+ int error = 0;
- type = xfs_decode_acl(name);
- if (type < 0)
- return type;
if (flags & XATTR_CREATE)
return -EINVAL;
if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
@@ -461,8 +437,16 @@ xfs_xattr_system_set(struct inode *inode, const char *name,
return error;
}
-struct xattr_handler xfs_xattr_system_handler = {
- .prefix = XATTR_SYSTEM_PREFIX,
- .get = xfs_xattr_system_get,
- .set = xfs_xattr_system_set,
+struct xattr_handler xfs_xattr_acl_access_handler = {
+ .prefix = POSIX_ACL_XATTR_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
+ .get = xfs_xattr_acl_get,
+ .set = xfs_xattr_acl_set,
+};
+
+struct xattr_handler xfs_xattr_acl_default_handler = {
+ .prefix = POSIX_ACL_XATTR_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
+ .get = xfs_xattr_acl_get,
+ .set = xfs_xattr_acl_set,
};
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 70f989895d1..66abe36c121 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,6 +38,7 @@
#include "xfs_rw.h"
#include "xfs_iomap.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#include <linux/mpage.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
@@ -76,7 +77,7 @@ xfs_ioend_wake(
wake_up(to_ioend_wq(ip));
}
-STATIC void
+void
xfs_count_page_state(
struct page *page,
int *delalloc,
@@ -98,48 +99,6 @@ xfs_count_page_state(
} while ((bh = bh->b_this_page) != head);
}
-#if defined(XFS_RW_TRACE)
-void
-xfs_page_trace(
- int tag,
- struct inode *inode,
- struct page *page,
- unsigned long pgoff)
-{
- xfs_inode_t *ip;
- loff_t isize = i_size_read(inode);
- loff_t offset = page_offset(page);
- int delalloc = -1, unmapped = -1, unwritten = -1;
-
- if (page_has_buffers(page))
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
-
- ip = XFS_I(inode);
- if (!ip->i_rwtrace)
- return;
-
- ktrace_enter(ip->i_rwtrace,
- (void *)((unsigned long)tag),
- (void *)ip,
- (void *)inode,
- (void *)page,
- (void *)pgoff,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
- (void *)((unsigned long)(isize & 0xffffffff)),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)delalloc),
- (void *)((unsigned long)unmapped),
- (void *)((unsigned long)unwritten),
- (void *)((unsigned long)current_pid()),
- (void *)NULL);
-}
-#else
-#define xfs_page_trace(tag, inode, page, pgoff)
-#endif
-
STATIC struct block_device *
xfs_find_bdev_for_inode(
struct xfs_inode *ip)
@@ -235,71 +194,36 @@ xfs_setfilesize(
}
/*
- * Buffered IO write completion for delayed allocate extents.
- */
-STATIC void
-xfs_end_bio_delalloc(
- struct work_struct *work)
-{
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
-
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
-}
-
-/*
- * Buffered IO write completion for regular, written extents.
+ * IO write completion.
*/
STATIC void
-xfs_end_bio_written(
- struct work_struct *work)
-{
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
-
- xfs_setfilesize(ioend);
- xfs_destroy_ioend(ioend);
-}
-
-/*
- * IO write completion for unwritten extents.
- *
- * Issue transactions to convert a buffer range from unwritten
- * to written extents.
- */
-STATIC void
-xfs_end_bio_unwritten(
+xfs_end_io(
struct work_struct *work)
{
xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work);
struct xfs_inode *ip = XFS_I(ioend->io_inode);
- xfs_off_t offset = ioend->io_offset;
- size_t size = ioend->io_size;
-
- if (likely(!ioend->io_error)) {
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- int error;
- error = xfs_iomap_write_unwritten(ip, offset, size);
- if (error)
- ioend->io_error = error;
- }
- xfs_setfilesize(ioend);
- }
- xfs_destroy_ioend(ioend);
-}
-/*
- * IO read completion for regular, written extents.
- */
-STATIC void
-xfs_end_bio_read(
- struct work_struct *work)
-{
- xfs_ioend_t *ioend =
- container_of(work, xfs_ioend_t, io_work);
+ /*
+ * For unwritten extents we need to issue transactions to convert a
+ * range to normal written extens after the data I/O has finished.
+ */
+ if (ioend->io_type == IOMAP_UNWRITTEN &&
+ likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
+ int error;
+
+ error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
+ ioend->io_size);
+ if (error)
+ ioend->io_error = error;
+ }
+ /*
+ * We might have to update the on-disk file size after extending
+ * writes.
+ */
+ if (ioend->io_type != IOMAP_READ)
+ xfs_setfilesize(ioend);
xfs_destroy_ioend(ioend);
}
@@ -314,10 +238,10 @@ xfs_finish_ioend(
int wait)
{
if (atomic_dec_and_test(&ioend->io_remaining)) {
- struct workqueue_struct *wq = xfsdatad_workqueue;
- if (ioend->io_work.func == xfs_end_bio_unwritten)
- wq = xfsconvertd_workqueue;
+ struct workqueue_struct *wq;
+ wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
+ xfsconvertd_workqueue : xfsdatad_workqueue;
queue_work(wq, &ioend->io_work);
if (wait)
flush_workqueue(wq);
@@ -355,15 +279,7 @@ xfs_alloc_ioend(
ioend->io_offset = 0;
ioend->io_size = 0;
- if (type == IOMAP_UNWRITTEN)
- INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
- else if (type == IOMAP_DELAY)
- INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
- else if (type == IOMAP_READ)
- INIT_WORK(&ioend->io_work, xfs_end_bio_read);
- else
- INIT_WORK(&ioend->io_work, xfs_end_bio_written);
-
+ INIT_WORK(&ioend->io_work, xfs_end_io);
return ioend;
}
@@ -380,7 +296,7 @@ xfs_map_blocks(
return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
}
-STATIC_INLINE int
+STATIC int
xfs_iomap_valid(
xfs_iomap_t *iomapp,
loff_t offset)
@@ -412,8 +328,9 @@ xfs_end_bio(
STATIC void
xfs_submit_ioend_bio(
- xfs_ioend_t *ioend,
- struct bio *bio)
+ struct writeback_control *wbc,
+ xfs_ioend_t *ioend,
+ struct bio *bio)
{
atomic_inc(&ioend->io_remaining);
bio->bi_private = ioend;
@@ -426,7 +343,8 @@ xfs_submit_ioend_bio(
if (xfs_ioend_new_eof(ioend))
xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
- submit_bio(WRITE, bio);
+ submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
+ WRITE_SYNC_PLUG : WRITE, bio);
ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
bio_put(bio);
}
@@ -505,6 +423,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
*/
STATIC void
xfs_submit_ioend(
+ struct writeback_control *wbc,
xfs_ioend_t *ioend)
{
xfs_ioend_t *head = ioend;
@@ -533,19 +452,19 @@ xfs_submit_ioend(
retry:
bio = xfs_alloc_ioend_bio(bh);
} else if (bh->b_blocknr != lastblock + 1) {
- xfs_submit_ioend_bio(ioend, bio);
+ xfs_submit_ioend_bio(wbc, ioend, bio);
goto retry;
}
if (bio_add_buffer(bio, bh) != bh->b_size) {
- xfs_submit_ioend_bio(ioend, bio);
+ xfs_submit_ioend_bio(wbc, ioend, bio);
goto retry;
}
lastblock = bh->b_blocknr;
}
if (bio)
- xfs_submit_ioend_bio(ioend, bio);
+ xfs_submit_ioend_bio(wbc, ioend, bio);
xfs_finish_ioend(ioend, 0);
} while ((ioend = next) != NULL);
}
@@ -1191,7 +1110,7 @@ xfs_page_state_convert(
}
if (iohead)
- xfs_submit_ioend(iohead);
+ xfs_submit_ioend(wbc, iohead);
return page_dirty;
@@ -1242,7 +1161,7 @@ xfs_vm_writepage(
int delalloc, unmapped, unwritten;
struct inode *inode = page->mapping->host;
- xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
+ trace_xfs_writepage(inode, page, 0);
/*
* We need a transaction if:
@@ -1347,7 +1266,7 @@ xfs_vm_releasepage(
.nr_to_write = 1,
};
- xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
+ trace_xfs_releasepage(inode, page, 0);
if (!page_has_buffers(page))
return 0;
@@ -1528,7 +1447,7 @@ xfs_end_io_direct(
* didn't map an unwritten extent so switch it's completion
* handler.
*/
- INIT_WORK(&ioend->io_work, xfs_end_bio_written);
+ ioend->io_type = IOMAP_NEW;
xfs_finish_ioend(ioend, 0);
}
@@ -1555,19 +1474,13 @@ xfs_vm_direct_IO(
bdev = xfs_find_bdev_for_inode(XFS_I(inode));
- if (rw == WRITE) {
- iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
- ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
- bdev, iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- } else {
- iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
- ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
- bdev, iov, offset, nr_segs,
- xfs_get_blocks_direct,
- xfs_end_io_direct);
- }
+ iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
+ IOMAP_UNWRITTEN : IOMAP_READ);
+
+ ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
+ offset, nr_segs,
+ xfs_get_blocks_direct,
+ xfs_end_io_direct);
if (unlikely(ret != -EIOCBQUEUED && iocb->private))
xfs_destroy_ioend(iocb->private);
@@ -1627,8 +1540,7 @@ xfs_vm_invalidatepage(
struct page *page,
unsigned long offset)
{
- xfs_page_trace(XFS_INVALIDPAGE_ENTER,
- page->mapping->host, page, offset);
+ trace_xfs_invalidatepage(page->mapping->host, page, offset);
block_invalidatepage(page, offset);
}
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 221b3e66cee..4cfc6ea87df 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -45,4 +45,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
extern void xfs_ioend_init(void);
extern void xfs_ioend_wait(struct xfs_inode *);
+extern void xfs_count_page_state(struct page *, int *, int *, int *);
+
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 965df1227d6..77b8be81c76 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -39,6 +39,7 @@
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
+#include "xfs_trace.h"
static kmem_zone_t *xfs_buf_zone;
STATIC int xfsbufd(void *);
@@ -53,34 +54,6 @@ static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
struct workqueue_struct *xfsconvertd_workqueue;
-#ifdef XFS_BUF_TRACE
-void
-xfs_buf_trace(
- xfs_buf_t *bp,
- char *id,
- void *data,
- void *ra)
-{
- ktrace_enter(xfs_buf_trace_buf,
- bp, id,
- (void *)(unsigned long)bp->b_flags,
- (void *)(unsigned long)bp->b_hold.counter,
- (void *)(unsigned long)bp->b_sema.count,
- (void *)current,
- data, ra,
- (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
- (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
- (void *)(unsigned long)bp->b_buffer_length,
- NULL, NULL, NULL, NULL, NULL);
-}
-ktrace_t *xfs_buf_trace_buf;
-#define XFS_BUF_TRACE_SIZE 4096
-#define XB_TRACE(bp, id, data) \
- xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
-#else
-#define XB_TRACE(bp, id, data) do { } while (0)
-#endif
-
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
@@ -149,7 +122,7 @@ page_region_mask(
return mask;
}
-STATIC_INLINE void
+STATIC void
set_page_region(
struct page *page,
size_t offset,
@@ -161,7 +134,7 @@ set_page_region(
SetPageUptodate(page);
}
-STATIC_INLINE int
+STATIC int
test_page_region(
struct page *page,
size_t offset,
@@ -279,7 +252,8 @@ _xfs_buf_initialize(
init_waitqueue_head(&bp->b_waiters);
XFS_STATS_INC(xb_create);
- XB_TRACE(bp, "initialize", target);
+
+ trace_xfs_buf_init(bp, _RET_IP_);
}
/*
@@ -318,6 +292,7 @@ _xfs_buf_free_pages(
{
if (bp->b_pages != bp->b_page_array) {
kmem_free(bp->b_pages);
+ bp->b_pages = NULL;
}
}
@@ -332,7 +307,7 @@ void
xfs_buf_free(
xfs_buf_t *bp)
{
- XB_TRACE(bp, "free", 0);
+ trace_xfs_buf_free(bp, _RET_IP_);
ASSERT(list_empty(&bp->b_hash_list));
@@ -349,9 +324,8 @@ xfs_buf_free(
ASSERT(!PagePrivate(page));
page_cache_release(page);
}
- _xfs_buf_free_pages(bp);
}
-
+ _xfs_buf_free_pages(bp);
xfs_buf_deallocate(bp);
}
@@ -445,7 +419,6 @@ _xfs_buf_lookup_pages(
if (page_count == bp->b_page_count)
bp->b_flags |= XBF_DONE;
- XB_TRACE(bp, "lookup_pages", (long)page_count);
return error;
}
@@ -548,7 +521,6 @@ found:
if (down_trylock(&bp->b_sema)) {
if (!(flags & XBF_TRYLOCK)) {
/* wait for buffer ownership */
- XB_TRACE(bp, "get_lock", 0);
xfs_buf_lock(bp);
XFS_STATS_INC(xb_get_locked_waited);
} else {
@@ -571,7 +543,8 @@ found:
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
bp->b_flags &= XBF_MAPPED;
}
- XB_TRACE(bp, "got_lock", 0);
+
+ trace_xfs_buf_find(bp, flags, _RET_IP_);
XFS_STATS_INC(xb_get_locked);
return bp;
}
@@ -582,7 +555,7 @@ found:
* although backing storage may not be.
*/
xfs_buf_t *
-xfs_buf_get_flags(
+xfs_buf_get(
xfs_buftarg_t *target,/* target for buffer */
xfs_off_t ioff, /* starting offset of range */
size_t isize, /* length of range */
@@ -627,7 +600,7 @@ xfs_buf_get_flags(
bp->b_bn = ioff;
bp->b_count_desired = bp->b_buffer_length;
- XB_TRACE(bp, "get", (unsigned long)flags);
+ trace_xfs_buf_get(bp, flags, _RET_IP_);
return bp;
no_buffer:
@@ -644,8 +617,6 @@ _xfs_buf_read(
{
int status;
- XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
-
ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
@@ -661,7 +632,7 @@ _xfs_buf_read(
}
xfs_buf_t *
-xfs_buf_read_flags(
+xfs_buf_read(
xfs_buftarg_t *target,
xfs_off_t ioff,
size_t isize,
@@ -671,21 +642,20 @@ xfs_buf_read_flags(
flags |= XBF_READ;
- bp = xfs_buf_get_flags(target, ioff, isize, flags);
+ bp = xfs_buf_get(target, ioff, isize, flags);
if (bp) {
+ trace_xfs_buf_read(bp, flags, _RET_IP_);
+
if (!XFS_BUF_ISDONE(bp)) {
- XB_TRACE(bp, "read", (unsigned long)flags);
XFS_STATS_INC(xb_get_read);
_xfs_buf_read(bp, flags);
} else if (flags & XBF_ASYNC) {
- XB_TRACE(bp, "read_async", (unsigned long)flags);
/*
* Read ahead call which is already satisfied,
* drop the buffer
*/
goto no_buffer;
} else {
- XB_TRACE(bp, "read_done", (unsigned long)flags);
/* We do not want read in the flags */
bp->b_flags &= ~XBF_READ;
}
@@ -718,7 +688,7 @@ xfs_buf_readahead(
return;
flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
- xfs_buf_read_flags(target, ioff, isize, flags);
+ xfs_buf_read(target, ioff, isize, flags);
}
xfs_buf_t *
@@ -823,7 +793,7 @@ xfs_buf_get_noaddr(
xfs_buf_unlock(bp);
- XB_TRACE(bp, "no_daddr", len);
+ trace_xfs_buf_get_noaddr(bp, _RET_IP_);
return bp;
fail_free_mem:
@@ -845,8 +815,8 @@ void
xfs_buf_hold(
xfs_buf_t *bp)
{
+ trace_xfs_buf_hold(bp, _RET_IP_);
atomic_inc(&bp->b_hold);
- XB_TRACE(bp, "hold", 0);
}
/*
@@ -859,7 +829,7 @@ xfs_buf_rele(
{
xfs_bufhash_t *hash = bp->b_hash;
- XB_TRACE(bp, "rele", bp->b_relse);
+ trace_xfs_buf_rele(bp, _RET_IP_);
if (unlikely(!hash)) {
ASSERT(!bp->b_relse);
@@ -909,21 +879,19 @@ xfs_buf_cond_lock(
int locked;
locked = down_trylock(&bp->b_sema) == 0;
- if (locked) {
+ if (locked)
XB_SET_OWNER(bp);
- }
- XB_TRACE(bp, "cond_lock", (long)locked);
+
+ trace_xfs_buf_cond_lock(bp, _RET_IP_);
return locked ? 0 : -EBUSY;
}
-#if defined(DEBUG) || defined(XFS_BLI_TRACE)
int
xfs_buf_lock_value(
xfs_buf_t *bp)
{
return bp->b_sema.count;
}
-#endif
/*
* Locks a buffer object.
@@ -935,12 +903,14 @@ void
xfs_buf_lock(
xfs_buf_t *bp)
{
- XB_TRACE(bp, "lock", 0);
+ trace_xfs_buf_lock(bp, _RET_IP_);
+
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
down(&bp->b_sema);
XB_SET_OWNER(bp);
- XB_TRACE(bp, "locked", 0);
+
+ trace_xfs_buf_lock_done(bp, _RET_IP_);
}
/*
@@ -962,7 +932,8 @@ xfs_buf_unlock(
XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
- XB_TRACE(bp, "unlock", 0);
+
+ trace_xfs_buf_unlock(bp, _RET_IP_);
}
@@ -974,17 +945,18 @@ void
xfs_buf_pin(
xfs_buf_t *bp)
{
+ trace_xfs_buf_pin(bp, _RET_IP_);
atomic_inc(&bp->b_pin_count);
- XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
}
void
xfs_buf_unpin(
xfs_buf_t *bp)
{
+ trace_xfs_buf_unpin(bp, _RET_IP_);
+
if (atomic_dec_and_test(&bp->b_pin_count))
wake_up_all(&bp->b_waiters);
- XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
}
int
@@ -1035,7 +1007,7 @@ xfs_buf_iodone_work(
*/
if ((bp->b_error == EOPNOTSUPP) &&
(bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
- XB_TRACE(bp, "ordered_retry", bp->b_iodone);
+ trace_xfs_buf_ordered_retry(bp, _RET_IP_);
bp->b_flags &= ~XBF_ORDERED;
bp->b_flags |= _XFS_BARRIER_FAILED;
xfs_buf_iorequest(bp);
@@ -1050,12 +1022,12 @@ xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
+ trace_xfs_buf_iodone(bp, _RET_IP_);
+
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
if (bp->b_error == 0)
bp->b_flags |= XBF_DONE;
- XB_TRACE(bp, "iodone", bp->b_iodone);
-
if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) {
INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
@@ -1075,7 +1047,7 @@ xfs_buf_ioerror(
{
ASSERT(error >= 0 && error <= 0xffff);
bp->b_error = (unsigned short)error;
- XB_TRACE(bp, "ioerror", (unsigned long)error);
+ trace_xfs_buf_ioerror(bp, error, _RET_IP_);
}
int
@@ -1083,7 +1055,7 @@ xfs_bawrite(
void *mp,
struct xfs_buf *bp)
{
- XB_TRACE(bp, "bawrite", 0);
+ trace_xfs_buf_bawrite(bp, _RET_IP_);
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
@@ -1102,7 +1074,7 @@ xfs_bdwrite(
void *mp,
struct xfs_buf *bp)
{
- XB_TRACE(bp, "bdwrite", 0);
+ trace_xfs_buf_bdwrite(bp, _RET_IP_);
bp->b_strat = xfs_bdstrat_cb;
bp->b_mount = mp;
@@ -1113,7 +1085,7 @@ xfs_bdwrite(
xfs_buf_delwri_queue(bp, 1);
}
-STATIC_INLINE void
+STATIC void
_xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
@@ -1177,10 +1149,14 @@ _xfs_buf_ioapply(
if (bp->b_flags & XBF_ORDERED) {
ASSERT(!(bp->b_flags & XBF_READ));
rw = WRITE_BARRIER;
- } else if (bp->b_flags & _XBF_RUN_QUEUES) {
+ } else if (bp->b_flags & XBF_LOG_BUFFER) {
ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
bp->b_flags &= ~_XBF_RUN_QUEUES;
rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
+ } else if (bp->b_flags & _XBF_RUN_QUEUES) {
+ ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
+ bp->b_flags &= ~_XBF_RUN_QUEUES;
+ rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
} else {
rw = (bp->b_flags & XBF_WRITE) ? WRITE :
(bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
@@ -1253,7 +1229,7 @@ int
xfs_buf_iorequest(
xfs_buf_t *bp)
{
- XB_TRACE(bp, "iorequest", 0);
+ trace_xfs_buf_iorequest(bp, _RET_IP_);
if (bp->b_flags & XBF_DELWRI) {
xfs_buf_delwri_queue(bp, 1);
@@ -1287,11 +1263,13 @@ int
xfs_buf_iowait(
xfs_buf_t *bp)
{
- XB_TRACE(bp, "iowait", 0);
+ trace_xfs_buf_iowait(bp, _RET_IP_);
+
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
wait_for_completion(&bp->b_iowait);
- XB_TRACE(bp, "iowaited", (long)bp->b_error);
+
+ trace_xfs_buf_iowait_done(bp, _RET_IP_);
return bp->b_error;
}
@@ -1604,7 +1582,8 @@ xfs_buf_delwri_queue(
struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
- XB_TRACE(bp, "delwri_q", (long)unlock);
+ trace_xfs_buf_delwri_queue(bp, _RET_IP_);
+
ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
spin_lock(dwlk);
@@ -1644,7 +1623,7 @@ xfs_buf_delwri_dequeue(
if (dequeued)
xfs_buf_rele(bp);
- XB_TRACE(bp, "delwri_dq", (long)dequeued);
+ trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
}
STATIC void
@@ -1692,7 +1671,7 @@ xfs_buf_delwri_split(
INIT_LIST_HEAD(list);
spin_lock(dwlk);
list_for_each_entry_safe(bp, n, dwq, b_list) {
- XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
+ trace_xfs_buf_delwri_split(bp, _RET_IP_);
ASSERT(bp->b_flags & XBF_DELWRI);
if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1816,14 +1795,10 @@ xfs_flush_buftarg(
int __init
xfs_buf_init(void)
{
-#ifdef XFS_BUF_TRACE
- xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
-#endif
-
xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
KM_ZONE_HWALIGN, NULL);
if (!xfs_buf_zone)
- goto out_free_trace_buf;
+ goto out;
xfslogd_workqueue = create_workqueue("xfslogd");
if (!xfslogd_workqueue)
@@ -1846,10 +1821,7 @@ xfs_buf_init(void)
destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone:
kmem_zone_destroy(xfs_buf_zone);
- out_free_trace_buf:
-#ifdef XFS_BUF_TRACE
- ktrace_free(xfs_buf_trace_buf);
-#endif
+ out:
return -ENOMEM;
}
@@ -1861,9 +1833,6 @@ xfs_buf_terminate(void)
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
-#ifdef XFS_BUF_TRACE
- ktrace_free(xfs_buf_trace_buf);
-#endif
}
#ifdef CONFIG_KDB_MODULES
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 9b4d666ad31..a34c7b54822 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -55,6 +55,7 @@ typedef enum {
XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
XBF_ORDERED = (1 << 11), /* use ordered writes */
XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
+ XBF_LOG_BUFFER = (1 << 13), /* this is a buffer used for the log */
/* flags used only as arguments to access routines */
XBF_LOCK = (1 << 14), /* lock requested */
@@ -95,6 +96,28 @@ typedef enum {
_XFS_BARRIER_FAILED = (1 << 23),
} xfs_buf_flags_t;
+#define XFS_BUF_FLAGS \
+ { XBF_READ, "READ" }, \
+ { XBF_WRITE, "WRITE" }, \
+ { XBF_MAPPED, "MAPPED" }, \
+ { XBF_ASYNC, "ASYNC" }, \
+ { XBF_DONE, "DONE" }, \
+ { XBF_DELWRI, "DELWRI" }, \
+ { XBF_STALE, "STALE" }, \
+ { XBF_FS_MANAGED, "FS_MANAGED" }, \
+ { XBF_ORDERED, "ORDERED" }, \
+ { XBF_READ_AHEAD, "READ_AHEAD" }, \
+ { XBF_LOCK, "LOCK" }, /* should never be set */\
+ { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
+ { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
+ { _XBF_PAGE_CACHE, "PAGE_CACHE" }, \
+ { _XBF_PAGES, "PAGES" }, \
+ { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
+ { _XBF_DELWRI_Q, "DELWRI_Q" }, \
+ { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }, \
+ { _XFS_BARRIER_FAILED, "BARRIER_FAILED" }
+
+
typedef enum {
XBT_FORCE_SLEEP = 0,
XBT_FORCE_FLUSH = 1,
@@ -186,15 +209,10 @@ extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
#define xfs_incore(buftarg,blkno,len,lockit) \
_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
-extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
+extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t);
-#define xfs_buf_get(target, blkno, len, flags) \
- xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
-
-extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
+extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t);
-#define xfs_buf_read(target, blkno, len, flags) \
- xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
@@ -248,13 +266,6 @@ extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
extern int xfs_buf_init(void);
extern void xfs_buf_terminate(void);
-#ifdef XFS_BUF_TRACE
-extern ktrace_t *xfs_buf_trace_buf;
-extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
-#else
-#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
-#endif
-
#define xfs_buf_target_name(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
@@ -370,10 +381,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
#define xfs_bpin(bp) xfs_buf_pin(bp)
#define xfs_bunpin(bp) xfs_buf_unpin(bp)
-
-#define xfs_buftrace(id, bp) \
- xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
-
#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
#define xfs_biomove(bp, off, len, data, rw) \
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index eff61e2732a..e4caeb28ce2 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -52,7 +52,7 @@ xfs_file_aio_read(
loff_t pos)
{
struct file *file = iocb->ki_filp;
- int ioflags = IO_ISAIO;
+ int ioflags = 0;
BUG_ON(iocb->ki_pos != pos);
if (unlikely(file->f_flags & O_DIRECT))
@@ -71,7 +71,7 @@ xfs_file_aio_write(
loff_t pos)
{
struct file *file = iocb->ki_filp;
- int ioflags = IO_ISAIO;
+ int ioflags = 0;
BUG_ON(iocb->ki_pos != pos);
if (unlikely(file->f_flags & O_DIRECT))
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 08be36d7326..7501b85fd86 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -19,6 +19,7 @@
#include "xfs_vnodeops.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
+#include "xfs_trace.h"
int fs_noerr(void) { return 0; }
int fs_nosys(void) { return ENOSYS; }
@@ -51,6 +52,8 @@ xfs_flushinval_pages(
struct address_space *mapping = VFS_I(ip)->i_mapping;
int ret = 0;
+ trace_xfs_pagecache_inval(ip, first, last);
+
if (mapping->nrpages) {
xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = filemap_write_and_wait(mapping);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 5bb523d7f37..a034cf62443 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -51,6 +51,7 @@
#include "xfs_quota.h"
#include "xfs_inode_item.h"
#include "xfs_export.h"
+#include "xfs_trace.h"
#include <linux/capability.h>
#include <linux/dcache.h>
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index eafcc7c1870..be1527b1670 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -46,6 +46,7 @@
#include "xfs_attr.h"
#include "xfs_ioctl.h"
#include "xfs_ioctl32.h"
+#include "xfs_trace.h"
#define _NATIVE_IOC(cmd, type) \
_IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index cd42ef78f6b..225946012d0 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -47,6 +47,7 @@
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#include <linux/capability.h>
#include <linux/xattr.h>
@@ -573,8 +574,8 @@ xfs_vn_fallocate(
bf.l_len = len;
xfs_ilock(ip, XFS_IOLOCK_EXCL);
- error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
- 0, XFS_ATTR_NOLOCK);
+ error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
+ 0, XFS_ATTR_NOLOCK);
if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode))
new_size = offset + len;
@@ -585,7 +586,7 @@ xfs_vn_fallocate(
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size;
- error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
+ error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
}
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
@@ -793,7 +794,7 @@ xfs_setup_inode(
struct inode *inode = &ip->i_vnode;
inode->i_ino = ip->i_ino;
- inode->i_state = I_NEW|I_LOCK;
+ inode->i_state = I_NEW;
inode_add_to_lists(ip->i_mount->m_super, inode);
inode->i_mode = ip->i_d.di_mode;
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 6127e24062d..5af0c81ca1a 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -40,7 +40,6 @@
#include <sv.h>
#include <time.h>
-#include <support/ktrace.h>
#include <support/debug.h>
#include <support/uuid.h>
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 072050f8d34..0d32457abef 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -48,73 +48,12 @@
#include "xfs_utils.h"
#include "xfs_iomap.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#include <linux/capability.h>
#include <linux/writeback.h>
-#if defined(XFS_RW_TRACE)
-void
-xfs_rw_enter_trace(
- int tag,
- xfs_inode_t *ip,
- void *data,
- size_t segs,
- loff_t offset,
- int ioflags)
-{
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(unsigned long)tag,
- (void *)ip,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)data,
- (void *)((unsigned long)segs),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)ioflags),
- (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL);
-}
-
-void
-xfs_inval_cached_trace(
- xfs_inode_t *ip,
- xfs_off_t offset,
- xfs_off_t len,
- xfs_off_t first,
- xfs_off_t last)
-{
-
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(__psint_t)XFS_INVAL_CACHED,
- (void *)ip,
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)((len >> 32) & 0xffffffff)),
- (void *)((unsigned long)(len & 0xffffffff)),
- (void *)((unsigned long)((first >> 32) & 0xffffffff)),
- (void *)((unsigned long)(first & 0xffffffff)),
- (void *)((unsigned long)((last >> 32) & 0xffffffff)),
- (void *)((unsigned long)(last & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL);
-}
-#endif
-
/*
* xfs_iozero
*
@@ -250,13 +189,10 @@ xfs_read(
}
}
- xfs_rw_enter_trace(XFS_READ_ENTER, ip,
- (void *)iovp, segs, *offset, ioflags);
+ trace_xfs_file_read(ip, size, *offset, ioflags);
iocb->ki_pos = *offset;
ret = generic_file_aio_read(iocb, iovp, segs, *offset);
- if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
- ret = wait_on_sync_kiocb(iocb);
if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret);
@@ -294,8 +230,9 @@ xfs_splice_read(
return -error;
}
}
- xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip,
- pipe, count, *ppos, ioflags);
+
+ trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
+
ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret);
@@ -344,8 +281,8 @@ xfs_splice_write(
ip->i_new_size = new_size;
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip,
- pipe, count, *ppos, ioflags);
+ trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
+
ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
if (ret > 0)
XFS_STATS_ADD(xs_write_bytes, ret);
@@ -712,8 +649,6 @@ start:
if ((ioflags & IO_ISDIRECT)) {
if (mapping->nrpages) {
WARN_ON(need_i_mutex == 0);
- xfs_inval_cached_trace(xip, pos, -1,
- (pos & PAGE_CACHE_MASK), -1);
error = xfs_flushinval_pages(xip,
(pos & PAGE_CACHE_MASK),
-1, FI_REMAPF_LOCKED);
@@ -730,8 +665,7 @@ start:
need_i_mutex = 0;
}
- xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs,
- *offset, ioflags);
+ trace_xfs_file_direct_write(xip, count, *offset, ioflags);
ret = generic_file_direct_write(iocb, iovp,
&segs, pos, offset, count, ocount);
@@ -754,8 +688,7 @@ start:
ssize_t ret2 = 0;
write_retry:
- xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs,
- *offset, ioflags);
+ trace_xfs_file_buffered_write(xip, count, *offset, ioflags);
ret2 = generic_file_buffered_write(iocb, iovp, segs,
pos, offset, count, ret);
/*
@@ -774,9 +707,6 @@ write_retry:
current->backing_dev_info = NULL;
- if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
- ret = wait_on_sync_kiocb(iocb);
-
isize = i_size_read(inode);
if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
*offset = isize;
@@ -811,7 +741,7 @@ write_retry:
XFS_STATS_ADD(xs_write_bytes, ret);
/* Handle various SYNC-type writes */
- if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
+ if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
loff_t end = pos + ret - 1;
int error2;
@@ -863,7 +793,7 @@ int
xfs_bdstrat_cb(struct xfs_buf *bp)
{
if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
- xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
/*
* Metadata write that didn't get logged but
* written delayed anyway. These aren't associated
@@ -896,7 +826,7 @@ xfsbdstrat(
return;
}
- xfs_buftrace("XFSBDSTRAT IOERROR", bp);
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
xfs_bioerror_relse(bp);
}
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index e6be37dbd0e..d1f7789c7ff 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -20,52 +20,7 @@
struct xfs_mount;
struct xfs_inode;
-struct xfs_bmbt_irec;
struct xfs_buf;
-struct xfs_iomap;
-
-#if defined(XFS_RW_TRACE)
-/*
- * Defines for the trace mechanisms in xfs_lrw.c.
- */
-#define XFS_RW_KTRACE_SIZE 128
-
-#define XFS_READ_ENTER 1
-#define XFS_WRITE_ENTER 2
-#define XFS_IOMAP_READ_ENTER 3
-#define XFS_IOMAP_WRITE_ENTER 4
-#define XFS_IOMAP_READ_MAP 5
-#define XFS_IOMAP_WRITE_MAP 6
-#define XFS_IOMAP_WRITE_NOSPACE 7
-#define XFS_ITRUNC_START 8
-#define XFS_ITRUNC_FINISH1 9
-#define XFS_ITRUNC_FINISH2 10
-#define XFS_CTRUNC1 11
-#define XFS_CTRUNC2 12
-#define XFS_CTRUNC3 13
-#define XFS_CTRUNC4 14
-#define XFS_CTRUNC5 15
-#define XFS_CTRUNC6 16
-#define XFS_BUNMAP 17
-#define XFS_INVAL_CACHED 18
-#define XFS_DIORD_ENTER 19
-#define XFS_DIOWR_ENTER 20
-#define XFS_WRITEPAGE_ENTER 22
-#define XFS_RELEASEPAGE_ENTER 23
-#define XFS_INVALIDPAGE_ENTER 24
-#define XFS_IOMAP_ALLOC_ENTER 25
-#define XFS_IOMAP_ALLOC_MAP 26
-#define XFS_IOMAP_UNWRITTEN 27
-#define XFS_SPLICE_READ_ENTER 28
-#define XFS_SPLICE_WRITE_ENTER 29
-extern void xfs_rw_enter_trace(int, struct xfs_inode *,
- void *, size_t, loff_t, int);
-extern void xfs_inval_cached_trace(struct xfs_inode *,
- xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
-#else
-#define xfs_rw_enter_trace(tag, ip, data, size, offset, ioflags)
-#define xfs_inval_cached_trace(ip, offset, len, first, last)
-#endif
/* errors from xfsbdstrat() must be extracted from the buffer */
extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 18a4b8e11df..09783cc444a 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -15,6 +15,7 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
#include "xfs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
@@ -52,11 +53,11 @@
#include "xfs_trans_priv.h"
#include "xfs_filestream.h"
#include "xfs_da_btree.h"
-#include "xfs_dir2_trace.h"
#include "xfs_extfree_item.h"
#include "xfs_mru_cache.h"
#include "xfs_inode_item.h"
#include "xfs_sync.h"
+#include "xfs_trace.h"
#include <linux/namei.h>
#include <linux/init.h>
@@ -930,13 +931,39 @@ xfs_fs_alloc_inode(
*/
STATIC void
xfs_fs_destroy_inode(
- struct inode *inode)
+ struct inode *inode)
{
- xfs_inode_t *ip = XFS_I(inode);
+ struct xfs_inode *ip = XFS_I(inode);
+
+ xfs_itrace_entry(ip);
XFS_STATS_INC(vn_reclaim);
- if (xfs_reclaim(ip))
- panic("%s: cannot reclaim 0x%p\n", __func__, inode);
+
+ /* bad inode, get out here ASAP */
+ if (is_bad_inode(inode))
+ goto out_reclaim;
+
+ xfs_ioend_wait(ip);
+
+ ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
+
+ /*
+ * We should never get here with one of the reclaim flags already set.
+ */
+ ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
+ ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
+
+ /*
+ * If we have nothing to flush with this inode then complete the
+ * teardown now, otherwise delay the flush operation.
+ */
+ if (!xfs_inode_clean(ip)) {
+ xfs_inode_set_reclaim_tag(ip);
+ return;
+ }
+
+out_reclaim:
+ xfs_ireclaim(ip);
}
/*
@@ -973,7 +1000,6 @@ xfs_fs_inode_init_once(
mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
"xfsino", ip->i_ino);
- mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
}
/*
@@ -1075,6 +1101,20 @@ xfs_fs_clear_inode(
XFS_STATS_INC(vn_remove);
XFS_STATS_DEC(vn_active);
+ /*
+ * The iolock is used by the file system to coordinate reads,
+ * writes, and block truncates. Up to this point the lock
+ * protected concurrent accesses by users of the inode. But
+ * from here forward we're doing some final processing of the
+ * inode because we're done with it, and although we reuse the
+ * iolock for protection it is really a distinct lock class
+ * (in the lockdep sense) from before. To keep lockdep happy
+ * (and basically indicate what we are doing), we explicitly
+ * re-init the iolock here.
+ */
+ ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+ mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+
xfs_inactive(ip);
}
@@ -1092,8 +1132,6 @@ xfs_fs_put_super(
struct super_block *sb)
{
struct xfs_mount *mp = XFS_M(sb);
- struct xfs_inode *rip = mp->m_rootip;
- int unmount_event_flags = 0;
xfs_syncd_stop(mp);
@@ -1109,20 +1147,7 @@ xfs_fs_put_super(
xfs_sync_attr(mp, 0);
}
-#ifdef HAVE_DMAPI
- if (mp->m_flags & XFS_MOUNT_DMAPI) {
- unmount_event_flags =
- (mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ?
- 0 : DM_FLAGS_UNWANTED;
- /*
- * Ignore error from dmapi here, first unmount is not allowed
- * to fail anyway, and second we wouldn't want to fail a
- * unmount because of dmapi.
- */
- XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL,
- NULL, NULL, 0, 0, unmount_event_flags);
- }
-#endif
+ XFS_SEND_PREUNMOUNT(mp);
/*
* Blow away any referenced inode in the filestreams cache.
@@ -1133,10 +1158,7 @@ xfs_fs_put_super(
XFS_bflush(mp->m_ddev_targp);
- if (mp->m_flags & XFS_MOUNT_DMAPI) {
- XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0,
- unmount_event_flags);
- }
+ XFS_SEND_UNMOUNT(mp);
xfs_unmountfs(mp);
xfs_freesb(mp);
@@ -1504,8 +1526,6 @@ xfs_fs_fill_super(
goto fail_vnrele;
kfree(mtpt);
-
- xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
return 0;
out_filestream_unmount:
@@ -1581,94 +1601,6 @@ static struct file_system_type xfs_fs_type = {
};
STATIC int __init
-xfs_alloc_trace_bufs(void)
-{
-#ifdef XFS_ALLOC_TRACE
- xfs_alloc_trace_buf = ktrace_alloc(XFS_ALLOC_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_alloc_trace_buf)
- goto out;
-#endif
-#ifdef XFS_BMAP_TRACE
- xfs_bmap_trace_buf = ktrace_alloc(XFS_BMAP_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_bmap_trace_buf)
- goto out_free_alloc_trace;
-#endif
-#ifdef XFS_BTREE_TRACE
- xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE,
- KM_MAYFAIL);
- if (!xfs_allocbt_trace_buf)
- goto out_free_bmap_trace;
-
- xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_inobt_trace_buf)
- goto out_free_allocbt_trace;
-
- xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_bmbt_trace_buf)
- goto out_free_inobt_trace;
-#endif
-#ifdef XFS_ATTR_TRACE
- xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL);
- if (!xfs_attr_trace_buf)
- goto out_free_bmbt_trace;
-#endif
-#ifdef XFS_DIR2_TRACE
- xfs_dir2_trace_buf = ktrace_alloc(XFS_DIR2_GTRACE_SIZE, KM_MAYFAIL);
- if (!xfs_dir2_trace_buf)
- goto out_free_attr_trace;
-#endif
-
- return 0;
-
-#ifdef XFS_DIR2_TRACE
- out_free_attr_trace:
-#endif
-#ifdef XFS_ATTR_TRACE
- ktrace_free(xfs_attr_trace_buf);
- out_free_bmbt_trace:
-#endif
-#ifdef XFS_BTREE_TRACE
- ktrace_free(xfs_bmbt_trace_buf);
- out_free_inobt_trace:
- ktrace_free(xfs_inobt_trace_buf);
- out_free_allocbt_trace:
- ktrace_free(xfs_allocbt_trace_buf);
- out_free_bmap_trace:
-#endif
-#ifdef XFS_BMAP_TRACE
- ktrace_free(xfs_bmap_trace_buf);
- out_free_alloc_trace:
-#endif
-#ifdef XFS_ALLOC_TRACE
- ktrace_free(xfs_alloc_trace_buf);
- out:
-#endif
- return -ENOMEM;
-}
-
-STATIC void
-xfs_free_trace_bufs(void)
-{
-#ifdef XFS_DIR2_TRACE
- ktrace_free(xfs_dir2_trace_buf);
-#endif
-#ifdef XFS_ATTR_TRACE
- ktrace_free(xfs_attr_trace_buf);
-#endif
-#ifdef XFS_BTREE_TRACE
- ktrace_free(xfs_bmbt_trace_buf);
- ktrace_free(xfs_inobt_trace_buf);
- ktrace_free(xfs_allocbt_trace_buf);
-#endif
-#ifdef XFS_BMAP_TRACE
- ktrace_free(xfs_bmap_trace_buf);
-#endif
-#ifdef XFS_ALLOC_TRACE
- ktrace_free(xfs_alloc_trace_buf);
-#endif
-}
-
-STATIC int __init
xfs_init_zones(void)
{
@@ -1809,7 +1741,6 @@ init_xfs_fs(void)
printk(KERN_INFO XFS_VERSION_STRING " with "
XFS_BUILD_OPTIONS " enabled\n");
- ktrace_init(64);
xfs_ioend_init();
xfs_dir_startup();
@@ -1817,13 +1748,9 @@ init_xfs_fs(void)
if (error)
goto out;
- error = xfs_alloc_trace_bufs();
- if (error)
- goto out_destroy_zones;
-
error = xfs_mru_cache_init();
if (error)
- goto out_free_trace_buffers;
+ goto out_destroy_zones;
error = xfs_filestream_init();
if (error)
@@ -1858,8 +1785,6 @@ init_xfs_fs(void)
xfs_filestream_uninit();
out_mru_cache_uninit:
xfs_mru_cache_uninit();
- out_free_trace_buffers:
- xfs_free_trace_bufs();
out_destroy_zones:
xfs_destroy_zones();
out:
@@ -1876,9 +1801,7 @@ exit_xfs_fs(void)
xfs_buf_terminate();
xfs_filestream_uninit();
xfs_mru_cache_uninit();
- xfs_free_trace_bufs();
xfs_destroy_zones();
- ktrace_uninit();
}
module_init(init_xfs_fs);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 18175ebd58e..233d4b9881b 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -56,12 +56,6 @@ extern void xfs_qm_exit(void);
# define XFS_BIGFS_STRING
#endif
-#ifdef CONFIG_XFS_TRACE
-# define XFS_TRACE_STRING "tracing, "
-#else
-# define XFS_TRACE_STRING
-#endif
-
#ifdef CONFIG_XFS_DMAPI
# define XFS_DMAPI_STRING "dmapi support, "
#else
@@ -78,7 +72,6 @@ extern void xfs_qm_exit(void);
XFS_SECURITY_STRING \
XFS_REALTIME_STRING \
XFS_BIGFS_STRING \
- XFS_TRACE_STRING \
XFS_DMAPI_STRING \
XFS_DBG_STRING /* DBG must be last */
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 961df0a22c7..6fed97a8cd3 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -44,6 +44,7 @@
#include "xfs_inode_item.h"
#include "xfs_rw.h"
#include "xfs_quota.h"
+#include "xfs_trace.h"
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -663,10 +664,9 @@ xfs_syncd_stop(
kthread_stop(mp->m_sync_task);
}
-int
+STATIC int
xfs_reclaim_inode(
xfs_inode_t *ip,
- int locked,
int sync_mode)
{
xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
@@ -682,10 +682,6 @@ xfs_reclaim_inode(
!__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
spin_unlock(&ip->i_flags_lock);
write_unlock(&pag->pag_ici_lock);
- if (locked) {
- xfs_ifunlock(ip);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
return -EAGAIN;
}
__xfs_iflags_set(ip, XFS_IRECLAIM);
@@ -704,10 +700,8 @@ xfs_reclaim_inode(
* We get the flush lock regardless, though, just to make sure
* we don't free it while it is being flushed.
*/
- if (!locked) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_iflock(ip);
- }
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_iflock(ip);
/*
* In the case of a forced shutdown we rely on xfs_iflush() to
@@ -778,7 +772,7 @@ xfs_reclaim_inode_now(
}
read_unlock(&pag->pag_ici_lock);
- return xfs_reclaim_inode(ip, 0, flags);
+ return xfs_reclaim_inode(ip, flags);
}
int
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 27920eb7a82..a500b4d9183 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -44,7 +44,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
void xfs_flush_inodes(struct xfs_inode *ip);
-int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c
new file mode 100644
index 00000000000..856eb3c8d60
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_trace.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2009, Christoph Hellwig
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_types.h"
+#include "xfs_bit.h"
+#include "xfs_log.h"
+#include "xfs_inum.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir2.h"
+#include "xfs_da_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_ialloc.h"
+#include "xfs_itable.h"
+#include "xfs_alloc.h"
+#include "xfs_bmap.h"
+#include "xfs_attr.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_log_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_quota.h"
+#include "xfs_iomap.h"
+#include "xfs_aops.h"
+#include "quota/xfs_dquot_item.h"
+#include "quota/xfs_dquot.h"
+
+/*
+ * Format fsblock number into a static buffer & return it.
+ */
+STATIC char *xfs_fmtfsblock(xfs_fsblock_t bno)
+{
+ static char rval[50];
+
+ if (bno == NULLFSBLOCK)
+ sprintf(rval, "NULLFSBLOCK");
+ else if (isnullstartblock(bno))
+ sprintf(rval, "NULLSTARTBLOCK(%lld)", startblockval(bno));
+ else
+ sprintf(rval, "%lld", (xfs_dfsbno_t)bno);
+ return rval;
+}
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "xfs_trace.h"
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
new file mode 100644
index 00000000000..c40834bdee5
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -0,0 +1,1369 @@
+/*
+ * Copyright (c) 2009, Christoph Hellwig
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xfs
+
+#if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_XFS_H
+
+#include <linux/tracepoint.h>
+
+struct xfs_agf;
+struct xfs_alloc_arg;
+struct xfs_attr_list_context;
+struct xfs_buf_log_item;
+struct xfs_da_args;
+struct xfs_da_node_entry;
+struct xfs_dquot;
+struct xlog_ticket;
+struct log;
+
+#define DEFINE_ATTR_LIST_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_attr_list_context *ctx), \
+ TP_ARGS(ctx), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(u32, hashval) \
+ __field(u32, blkno) \
+ __field(u32, offset) \
+ __field(void *, alist) \
+ __field(int, bufsize) \
+ __field(int, count) \
+ __field(int, firstu) \
+ __field(int, dupcnt) \
+ __field(int, flags) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \
+ __entry->ino = ctx->dp->i_ino; \
+ __entry->hashval = ctx->cursor->hashval; \
+ __entry->blkno = ctx->cursor->blkno; \
+ __entry->offset = ctx->cursor->offset; \
+ __entry->alist = ctx->alist; \
+ __entry->bufsize = ctx->bufsize; \
+ __entry->count = ctx->count; \
+ __entry->firstu = ctx->firstu; \
+ __entry->flags = ctx->flags; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \
+ "alist 0x%p size %u count %u firstu %u flags %d %s", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->hashval, \
+ __entry->blkno, \
+ __entry->offset, \
+ __entry->dupcnt, \
+ __entry->alist, \
+ __entry->bufsize, \
+ __entry->count, \
+ __entry->firstu, \
+ __entry->flags, \
+ __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \
+ ) \
+)
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
+DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
+
+TRACE_EVENT(xfs_attr_list_node_descend,
+ TP_PROTO(struct xfs_attr_list_context *ctx,
+ struct xfs_da_node_entry *btree),
+ TP_ARGS(ctx, btree),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(u32, hashval)
+ __field(u32, blkno)
+ __field(u32, offset)
+ __field(void *, alist)
+ __field(int, bufsize)
+ __field(int, count)
+ __field(int, firstu)
+ __field(int, dupcnt)
+ __field(int, flags)
+ __field(u32, bt_hashval)
+ __field(u32, bt_before)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
+ __entry->ino = ctx->dp->i_ino;
+ __entry->hashval = ctx->cursor->hashval;
+ __entry->blkno = ctx->cursor->blkno;
+ __entry->offset = ctx->cursor->offset;
+ __entry->alist = ctx->alist;
+ __entry->bufsize = ctx->bufsize;
+ __entry->count = ctx->count;
+ __entry->firstu = ctx->firstu;
+ __entry->flags = ctx->flags;
+ __entry->bt_hashval = be32_to_cpu(btree->hashval);
+ __entry->bt_before = be32_to_cpu(btree->before);
+ ),
+ TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
+ "alist 0x%p size %u count %u firstu %u flags %d %s "
+ "node hashval %u, node before %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->hashval,
+ __entry->blkno,
+ __entry->offset,
+ __entry->dupcnt,
+ __entry->alist,
+ __entry->bufsize,
+ __entry->count,
+ __entry->firstu,
+ __entry->flags,
+ __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS),
+ __entry->bt_hashval,
+ __entry->bt_before)
+);
+
+TRACE_EVENT(xfs_iext_insert,
+ TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx,
+ struct xfs_bmbt_irec *r, int state, unsigned long caller_ip),
+ TP_ARGS(ip, idx, r, state, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_extnum_t, idx)
+ __field(xfs_fileoff_t, startoff)
+ __field(xfs_fsblock_t, startblock)
+ __field(xfs_filblks_t, blockcount)
+ __field(xfs_exntst_t, state)
+ __field(int, bmap_state)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->idx = idx;
+ __entry->startoff = r->br_startoff;
+ __entry->startblock = r->br_startblock;
+ __entry->blockcount = r->br_blockcount;
+ __entry->state = r->br_state;
+ __entry->bmap_state = state;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
+ "offset %lld block %s count %lld flag %d caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
+ (long)__entry->idx,
+ __entry->startoff,
+ xfs_fmtfsblock(__entry->startblock),
+ __entry->blockcount,
+ __entry->state,
+ (char *)__entry->caller_ip)
+);
+
+#define DEFINE_BMAP_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
+ unsigned long caller_ip), \
+ TP_ARGS(ip, idx, state, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(xfs_extnum_t, idx) \
+ __field(xfs_fileoff_t, startoff) \
+ __field(xfs_fsblock_t, startblock) \
+ __field(xfs_filblks_t, blockcount) \
+ __field(xfs_exntst_t, state) \
+ __field(int, bmap_state) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \
+ ip->i_afp : &ip->i_df; \
+ struct xfs_bmbt_irec r; \
+ \
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->idx = idx; \
+ __entry->startoff = r.br_startoff; \
+ __entry->startblock = r.br_startblock; \
+ __entry->blockcount = r.br_blockcount; \
+ __entry->state = r.br_state; \
+ __entry->bmap_state = state; \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \
+ "offset %lld block %s count %lld flag %d caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \
+ (long)__entry->idx, \
+ __entry->startoff, \
+ xfs_fmtfsblock(__entry->startblock), \
+ __entry->blockcount, \
+ __entry->state, \
+ (char *)__entry->caller_ip) \
+)
+
+DEFINE_BMAP_EVENT(xfs_iext_remove);
+DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
+DEFINE_BMAP_EVENT(xfs_bmap_post_update);
+DEFINE_BMAP_EVENT(xfs_extlist);
+
+#define DEFINE_BUF_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
+ TP_ARGS(bp, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_daddr_t, bno) \
+ __field(size_t, buffer_length) \
+ __field(int, hold) \
+ __field(int, pincount) \
+ __field(unsigned, lockval) \
+ __field(unsigned, flags) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = bp->b_target->bt_dev; \
+ __entry->bno = bp->b_bn; \
+ __entry->buffer_length = bp->b_buffer_length; \
+ __entry->hold = atomic_read(&bp->b_hold); \
+ __entry->pincount = atomic_read(&bp->b_pin_count); \
+ __entry->lockval = xfs_buf_lock_value(bp); \
+ __entry->flags = bp->b_flags; \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
+ "lock %d flags %s caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ (unsigned long long)__entry->bno, \
+ __entry->buffer_length, \
+ __entry->hold, \
+ __entry->pincount, \
+ __entry->lockval, \
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
+ (void *)__entry->caller_ip) \
+)
+DEFINE_BUF_EVENT(xfs_buf_init);
+DEFINE_BUF_EVENT(xfs_buf_free);
+DEFINE_BUF_EVENT(xfs_buf_hold);
+DEFINE_BUF_EVENT(xfs_buf_rele);
+DEFINE_BUF_EVENT(xfs_buf_pin);
+DEFINE_BUF_EVENT(xfs_buf_unpin);
+DEFINE_BUF_EVENT(xfs_buf_iodone);
+DEFINE_BUF_EVENT(xfs_buf_iorequest);
+DEFINE_BUF_EVENT(xfs_buf_bawrite);
+DEFINE_BUF_EVENT(xfs_buf_bdwrite);
+DEFINE_BUF_EVENT(xfs_buf_lock);
+DEFINE_BUF_EVENT(xfs_buf_lock_done);
+DEFINE_BUF_EVENT(xfs_buf_cond_lock);
+DEFINE_BUF_EVENT(xfs_buf_unlock);
+DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
+DEFINE_BUF_EVENT(xfs_buf_iowait);
+DEFINE_BUF_EVENT(xfs_buf_iowait_done);
+DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
+DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
+DEFINE_BUF_EVENT(xfs_buf_delwri_split);
+DEFINE_BUF_EVENT(xfs_buf_get_noaddr);
+DEFINE_BUF_EVENT(xfs_bdstrat_shut);
+DEFINE_BUF_EVENT(xfs_buf_item_relse);
+DEFINE_BUF_EVENT(xfs_buf_item_iodone);
+DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
+DEFINE_BUF_EVENT(xfs_buf_error_relse);
+DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
+DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
+
+/* not really buffer traces, but the buf provides useful information */
+DEFINE_BUF_EVENT(xfs_btree_corrupt);
+DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
+DEFINE_BUF_EVENT(xfs_reset_dqcounts);
+DEFINE_BUF_EVENT(xfs_inode_item_push);
+
+/* pass flags explicitly */
+#define DEFINE_BUF_FLAGS_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
+ TP_ARGS(bp, flags, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_daddr_t, bno) \
+ __field(size_t, buffer_length) \
+ __field(int, hold) \
+ __field(int, pincount) \
+ __field(unsigned, lockval) \
+ __field(unsigned, flags) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = bp->b_target->bt_dev; \
+ __entry->bno = bp->b_bn; \
+ __entry->buffer_length = bp->b_buffer_length; \
+ __entry->flags = flags; \
+ __entry->hold = atomic_read(&bp->b_hold); \
+ __entry->pincount = atomic_read(&bp->b_pin_count); \
+ __entry->lockval = xfs_buf_lock_value(bp); \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
+ "lock %d flags %s caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ (unsigned long long)__entry->bno, \
+ __entry->buffer_length, \
+ __entry->hold, \
+ __entry->pincount, \
+ __entry->lockval, \
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
+ (void *)__entry->caller_ip) \
+)
+DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
+DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
+DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
+
+TRACE_EVENT(xfs_buf_ioerror,
+ TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
+ TP_ARGS(bp, error, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_daddr_t, bno)
+ __field(size_t, buffer_length)
+ __field(unsigned, flags)
+ __field(int, hold)
+ __field(int, pincount)
+ __field(unsigned, lockval)
+ __field(int, error)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = bp->b_target->bt_dev;
+ __entry->bno = bp->b_bn;
+ __entry->buffer_length = bp->b_buffer_length;
+ __entry->hold = atomic_read(&bp->b_hold);
+ __entry->pincount = atomic_read(&bp->b_pin_count);
+ __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->error = error;
+ __entry->flags = bp->b_flags;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ "lock %d error %d flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->bno,
+ __entry->buffer_length,
+ __entry->hold,
+ __entry->pincount,
+ __entry->lockval,
+ __entry->error,
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
+ (void *)__entry->caller_ip)
+);
+
+#define DEFINE_BUF_ITEM_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_buf_log_item *bip), \
+ TP_ARGS(bip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_daddr_t, buf_bno) \
+ __field(size_t, buf_len) \
+ __field(int, buf_hold) \
+ __field(int, buf_pincount) \
+ __field(int, buf_lockval) \
+ __field(unsigned, buf_flags) \
+ __field(unsigned, bli_recur) \
+ __field(int, bli_refcount) \
+ __field(unsigned, bli_flags) \
+ __field(void *, li_desc) \
+ __field(unsigned, li_flags) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = bip->bli_buf->b_target->bt_dev; \
+ __entry->bli_flags = bip->bli_flags; \
+ __entry->bli_recur = bip->bli_recur; \
+ __entry->bli_refcount = atomic_read(&bip->bli_refcount); \
+ __entry->buf_bno = bip->bli_buf->b_bn; \
+ __entry->buf_len = bip->bli_buf->b_buffer_length; \
+ __entry->buf_flags = bip->bli_buf->b_flags; \
+ __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \
+ __entry->buf_pincount = \
+ atomic_read(&bip->bli_buf->b_pin_count); \
+ __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \
+ __entry->li_desc = bip->bli_item.li_desc; \
+ __entry->li_flags = bip->bli_item.li_flags; \
+ ), \
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
+ "lock %d flags %s recur %d refcount %d bliflags %s " \
+ "lidesc 0x%p liflags %s", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ (unsigned long long)__entry->buf_bno, \
+ __entry->buf_len, \
+ __entry->buf_hold, \
+ __entry->buf_pincount, \
+ __entry->buf_lockval, \
+ __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \
+ __entry->bli_recur, \
+ __entry->bli_refcount, \
+ __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \
+ __entry->li_desc, \
+ __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \
+)
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
+DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
+
+#define DEFINE_LOCK_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
+ unsigned long caller_ip), \
+ TP_ARGS(ip, lock_flags, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(int, lock_flags) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->lock_flags = lock_flags; \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \
+ (void *)__entry->caller_ip) \
+)
+
+DEFINE_LOCK_EVENT(xfs_ilock);
+DEFINE_LOCK_EVENT(xfs_ilock_nowait);
+DEFINE_LOCK_EVENT(xfs_ilock_demote);
+DEFINE_LOCK_EVENT(xfs_iunlock);
+
+#define DEFINE_IGET_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip), \
+ TP_ARGS(ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino) \
+)
+DEFINE_IGET_EVENT(xfs_iget_skip);
+DEFINE_IGET_EVENT(xfs_iget_reclaim);
+DEFINE_IGET_EVENT(xfs_iget_found);
+DEFINE_IGET_EVENT(xfs_iget_alloc);
+
+#define DEFINE_INODE_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
+ TP_ARGS(ip, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(int, count) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->count = atomic_read(&VFS_I(ip)->i_count); \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->count, \
+ (char *)__entry->caller_ip) \
+)
+DEFINE_INODE_EVENT(xfs_ihold);
+DEFINE_INODE_EVENT(xfs_irele);
+/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
+DEFINE_INODE_EVENT(xfs_inode);
+#define xfs_itrace_entry(ip) \
+ trace_xfs_inode(ip, _THIS_IP_)
+
+#define DEFINE_DQUOT_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_dquot *dqp), \
+ TP_ARGS(dqp), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(__be32, id) \
+ __field(unsigned, flags) \
+ __field(unsigned, nrefs) \
+ __field(unsigned long long, res_bcount) \
+ __field(unsigned long long, bcount) \
+ __field(unsigned long long, icount) \
+ __field(unsigned long long, blk_hardlimit) \
+ __field(unsigned long long, blk_softlimit) \
+ __field(unsigned long long, ino_hardlimit) \
+ __field(unsigned long long, ino_softlimit) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = dqp->q_mount->m_super->s_dev; \
+ __entry->id = dqp->q_core.d_id; \
+ __entry->flags = dqp->dq_flags; \
+ __entry->nrefs = dqp->q_nrefs; \
+ __entry->res_bcount = dqp->q_res_bcount; \
+ __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \
+ __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \
+ __entry->blk_hardlimit = \
+ be64_to_cpu(dqp->q_core.d_blk_hardlimit); \
+ __entry->blk_softlimit = \
+ be64_to_cpu(dqp->q_core.d_blk_softlimit); \
+ __entry->ino_hardlimit = \
+ be64_to_cpu(dqp->q_core.d_ino_hardlimit); \
+ __entry->ino_softlimit = \
+ be64_to_cpu(dqp->q_core.d_ino_softlimit); \
+ ), \
+ TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \
+ "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \
+ "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ be32_to_cpu(__entry->id), \
+ __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \
+ __entry->nrefs, \
+ __entry->res_bcount, \
+ __entry->bcount, \
+ __entry->blk_hardlimit, \
+ __entry->blk_softlimit, \
+ __entry->icount, \
+ __entry->ino_hardlimit, \
+ __entry->ino_softlimit) \
+)
+DEFINE_DQUOT_EVENT(xfs_dqadjust);
+DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
+DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
+DEFINE_DQUOT_EVENT(xfs_dqreclaim_want);
+DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty);
+DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink);
+DEFINE_DQUOT_EVENT(xfs_dqattach_found);
+DEFINE_DQUOT_EVENT(xfs_dqattach_get);
+DEFINE_DQUOT_EVENT(xfs_dqinit);
+DEFINE_DQUOT_EVENT(xfs_dqreuse);
+DEFINE_DQUOT_EVENT(xfs_dqalloc);
+DEFINE_DQUOT_EVENT(xfs_dqtobp_read);
+DEFINE_DQUOT_EVENT(xfs_dqread);
+DEFINE_DQUOT_EVENT(xfs_dqread_fail);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_found);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_want);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_move);
+DEFINE_DQUOT_EVENT(xfs_dqlookup_done);
+DEFINE_DQUOT_EVENT(xfs_dqget_hit);
+DEFINE_DQUOT_EVENT(xfs_dqget_miss);
+DEFINE_DQUOT_EVENT(xfs_dqput);
+DEFINE_DQUOT_EVENT(xfs_dqput_wait);
+DEFINE_DQUOT_EVENT(xfs_dqput_free);
+DEFINE_DQUOT_EVENT(xfs_dqrele);
+DEFINE_DQUOT_EVENT(xfs_dqflush);
+DEFINE_DQUOT_EVENT(xfs_dqflush_force);
+DEFINE_DQUOT_EVENT(xfs_dqflush_done);
+/* not really iget events, but we re-use the format */
+DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
+DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
+
+
+#define DEFINE_LOGGRANT_EVENT(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct log *log, struct xlog_ticket *tic), \
+ TP_ARGS(log, tic), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(unsigned, trans_type) \
+ __field(char, ocnt) \
+ __field(char, cnt) \
+ __field(int, curr_res) \
+ __field(int, unit_res) \
+ __field(unsigned int, flags) \
+ __field(void *, reserve_headq) \
+ __field(void *, write_headq) \
+ __field(int, grant_reserve_cycle) \
+ __field(int, grant_reserve_bytes) \
+ __field(int, grant_write_cycle) \
+ __field(int, grant_write_bytes) \
+ __field(int, curr_cycle) \
+ __field(int, curr_block) \
+ __field(xfs_lsn_t, tail_lsn) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = log->l_mp->m_super->s_dev; \
+ __entry->trans_type = tic->t_trans_type; \
+ __entry->ocnt = tic->t_ocnt; \
+ __entry->cnt = tic->t_cnt; \
+ __entry->curr_res = tic->t_curr_res; \
+ __entry->unit_res = tic->t_unit_res; \
+ __entry->flags = tic->t_flags; \
+ __entry->reserve_headq = log->l_reserve_headq; \
+ __entry->write_headq = log->l_write_headq; \
+ __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \
+ __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \
+ __entry->grant_write_cycle = log->l_grant_write_cycle; \
+ __entry->grant_write_bytes = log->l_grant_write_bytes; \
+ __entry->curr_cycle = log->l_curr_cycle; \
+ __entry->curr_block = log->l_curr_block; \
+ __entry->tail_lsn = log->l_tail_lsn; \
+ ), \
+ TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \
+ "t_unit_res %u t_flags %s reserve_headq 0x%p " \
+ "write_headq 0x%p grant_reserve_cycle %d " \
+ "grant_reserve_bytes %d grant_write_cycle %d " \
+ "grant_write_bytes %d curr_cycle %d curr_block %d " \
+ "tail_cycle %d tail_block %d", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \
+ __entry->ocnt, \
+ __entry->cnt, \
+ __entry->curr_res, \
+ __entry->unit_res, \
+ __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \
+ __entry->reserve_headq, \
+ __entry->write_headq, \
+ __entry->grant_reserve_cycle, \
+ __entry->grant_reserve_bytes, \
+ __entry->grant_write_cycle, \
+ __entry->grant_write_bytes, \
+ __entry->curr_cycle, \
+ __entry->curr_block, \
+ CYCLE_LSN(__entry->tail_lsn), \
+ BLOCK_LSN(__entry->tail_lsn) \
+ ) \
+)
+DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
+DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
+DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
+DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
+DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
+DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
+DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
+
+#define DEFINE_RW_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
+ TP_ARGS(ip, count, offset, flags), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(xfs_fsize_t, size) \
+ __field(xfs_fsize_t, new_size) \
+ __field(loff_t, offset) \
+ __field(size_t, count) \
+ __field(int, flags) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->size = ip->i_d.di_size; \
+ __entry->new_size = ip->i_new_size; \
+ __entry->offset = offset; \
+ __entry->count = count; \
+ __entry->flags = flags; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
+ "offset 0x%llx count 0x%zx ioflags %s", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->size, \
+ __entry->new_size, \
+ __entry->offset, \
+ __entry->count, \
+ __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) \
+)
+DEFINE_RW_EVENT(xfs_file_read);
+DEFINE_RW_EVENT(xfs_file_buffered_write);
+DEFINE_RW_EVENT(xfs_file_direct_write);
+DEFINE_RW_EVENT(xfs_file_splice_read);
+DEFINE_RW_EVENT(xfs_file_splice_write);
+
+
+#define DEFINE_PAGE_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
+ TP_ARGS(inode, page, off), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(pgoff_t, pgoff) \
+ __field(loff_t, size) \
+ __field(unsigned long, offset) \
+ __field(int, delalloc) \
+ __field(int, unmapped) \
+ __field(int, unwritten) \
+ ), \
+ TP_fast_assign( \
+ int delalloc = -1, unmapped = -1, unwritten = -1; \
+ \
+ if (page_has_buffers(page)) \
+ xfs_count_page_state(page, &delalloc, \
+ &unmapped, &unwritten); \
+ __entry->dev = inode->i_sb->s_dev; \
+ __entry->ino = XFS_I(inode)->i_ino; \
+ __entry->pgoff = page_offset(page); \
+ __entry->size = i_size_read(inode); \
+ __entry->offset = off; \
+ __entry->delalloc = delalloc; \
+ __entry->unmapped = unmapped; \
+ __entry->unwritten = unwritten; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " \
+ "delalloc %d unmapped %d unwritten %d", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->pgoff, \
+ __entry->size, \
+ __entry->offset, \
+ __entry->delalloc, \
+ __entry->unmapped, \
+ __entry->unwritten) \
+)
+DEFINE_PAGE_EVENT(xfs_writepage);
+DEFINE_PAGE_EVENT(xfs_releasepage);
+DEFINE_PAGE_EVENT(xfs_invalidatepage);
+
+#define DEFINE_IOMAP_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
+ int flags, struct xfs_bmbt_irec *irec), \
+ TP_ARGS(ip, offset, count, flags, irec), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(loff_t, size) \
+ __field(loff_t, new_size) \
+ __field(loff_t, offset) \
+ __field(size_t, count) \
+ __field(int, flags) \
+ __field(xfs_fileoff_t, startoff) \
+ __field(xfs_fsblock_t, startblock) \
+ __field(xfs_filblks_t, blockcount) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->size = ip->i_d.di_size; \
+ __entry->new_size = ip->i_new_size; \
+ __entry->offset = offset; \
+ __entry->count = count; \
+ __entry->flags = flags; \
+ __entry->startoff = irec ? irec->br_startoff : 0; \
+ __entry->startblock = irec ? irec->br_startblock : 0; \
+ __entry->blockcount = irec ? irec->br_blockcount : 0; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
+ "offset 0x%llx count %zd flags %s " \
+ "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->size, \
+ __entry->new_size, \
+ __entry->offset, \
+ __entry->count, \
+ __print_flags(__entry->flags, "|", BMAPI_FLAGS), \
+ __entry->startoff, \
+ __entry->startblock, \
+ __entry->blockcount) \
+)
+DEFINE_IOMAP_EVENT(xfs_iomap_enter);
+DEFINE_IOMAP_EVENT(xfs_iomap_found);
+DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
+
+#define DEFINE_SIMPLE_IO_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
+ TP_ARGS(ip, offset, count), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(loff_t, size) \
+ __field(loff_t, new_size) \
+ __field(loff_t, offset) \
+ __field(size_t, count) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->size = ip->i_d.di_size; \
+ __entry->new_size = ip->i_new_size; \
+ __entry->offset = offset; \
+ __entry->count = count; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
+ "offset 0x%llx count %zd", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->size, \
+ __entry->new_size, \
+ __entry->offset, \
+ __entry->count) \
+);
+DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
+DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
+
+
+TRACE_EVENT(xfs_itruncate_start,
+ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
+ xfs_off_t toss_start, xfs_off_t toss_finish),
+ TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_fsize_t, new_size)
+ __field(xfs_off_t, toss_start)
+ __field(xfs_off_t, toss_finish)
+ __field(int, flag)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->new_size = new_size;
+ __entry->toss_start = toss_start;
+ __entry->toss_finish = toss_finish;
+ __entry->flag = flag;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
+ "toss start 0x%llx toss finish 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
+ __entry->size,
+ __entry->new_size,
+ __entry->toss_start,
+ __entry->toss_finish)
+);
+
+#define DEFINE_ITRUNC_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
+ TP_ARGS(ip, new_size), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(xfs_fsize_t, size) \
+ __field(xfs_fsize_t, new_size) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(ip)->i_sb->s_dev; \
+ __entry->ino = ip->i_ino; \
+ __entry->size = ip->i_d.di_size; \
+ __entry->new_size = new_size; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->size, \
+ __entry->new_size) \
+)
+DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
+DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
+
+TRACE_EVENT(xfs_pagecache_inval,
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
+ TP_ARGS(ip, start, finish),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_off_t, start)
+ __field(xfs_off_t, finish)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->start = start;
+ __entry->finish = finish;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->start,
+ __entry->finish)
+);
+
+TRACE_EVENT(xfs_bunmap,
+ TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len,
+ int flags, unsigned long caller_ip),
+ TP_ARGS(ip, bno, len, flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_fileoff_t, bno)
+ __field(xfs_filblks_t, len)
+ __field(unsigned long, caller_ip)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->bno = bno;
+ __entry->len = len;
+ __entry->caller_ip = caller_ip;
+ __entry->flags = flags;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
+ "flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->bno,
+ __entry->len,
+ __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS),
+ (void *)__entry->caller_ip)
+
+);
+
+TRACE_EVENT(xfs_alloc_busy,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
+ xfs_extlen_t len, int slot),
+ TP_ARGS(mp, agno, agbno, len, slot),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(int, slot)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->slot = slot;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u slot %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __entry->slot)
+
+);
+
+#define XFS_BUSY_STATES \
+ { 0, "found" }, \
+ { 1, "missing" }
+
+TRACE_EVENT(xfs_alloc_unbusy,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ int slot, int found),
+ TP_ARGS(mp, agno, slot, found),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, slot)
+ __field(int, found)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->slot = slot;
+ __entry->found = found;
+ ),
+ TP_printk("dev %d:%d agno %u slot %d %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->slot,
+ __print_symbolic(__entry->found, XFS_BUSY_STATES))
+);
+
+TRACE_EVENT(xfs_alloc_busysearch,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
+ xfs_extlen_t len, int found),
+ TP_ARGS(mp, agno, agbno, len, found),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(int, found)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->found = found;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __print_symbolic(__entry->found, XFS_BUSY_STATES))
+);
+
+TRACE_EVENT(xfs_agf,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
+ unsigned long caller_ip),
+ TP_ARGS(mp, agf, flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, flags)
+ __field(__u32, length)
+ __field(__u32, bno_root)
+ __field(__u32, cnt_root)
+ __field(__u32, bno_level)
+ __field(__u32, cnt_level)
+ __field(__u32, flfirst)
+ __field(__u32, fllast)
+ __field(__u32, flcount)
+ __field(__u32, freeblks)
+ __field(__u32, longest)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = be32_to_cpu(agf->agf_seqno),
+ __entry->flags = flags;
+ __entry->length = be32_to_cpu(agf->agf_length),
+ __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
+ __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
+ __entry->bno_level =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
+ __entry->cnt_level =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
+ __entry->flfirst = be32_to_cpu(agf->agf_flfirst),
+ __entry->fllast = be32_to_cpu(agf->agf_fllast),
+ __entry->flcount = be32_to_cpu(agf->agf_flcount),
+ __entry->freeblks = be32_to_cpu(agf->agf_freeblks),
+ __entry->longest = be32_to_cpu(agf->agf_longest);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
+ "levels b %u c %u flfirst %u fllast %u flcount %u "
+ "freeblks %u longest %u caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
+ __entry->length,
+ __entry->bno_root,
+ __entry->cnt_root,
+ __entry->bno_level,
+ __entry->cnt_level,
+ __entry->flfirst,
+ __entry->fllast,
+ __entry->flcount,
+ __entry->freeblks,
+ __entry->longest,
+ (void *)__entry->caller_ip)
+);
+
+TRACE_EVENT(xfs_free_extent,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
+ xfs_extlen_t len, bool isfl, int haveleft, int haveright),
+ TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(int, isfl)
+ __field(int, haveleft)
+ __field(int, haveright)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->isfl = isfl;
+ __entry->haveleft = haveleft;
+ __entry->haveright = haveright;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __entry->isfl,
+ __entry->haveleft ?
+ (__entry->haveright ? "both" : "left") :
+ (__entry->haveright ? "right" : "none"))
+
+);
+
+#define DEFINE_ALLOC_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_alloc_arg *args), \
+ TP_ARGS(args), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_agnumber_t, agno) \
+ __field(xfs_agblock_t, agbno) \
+ __field(xfs_extlen_t, minlen) \
+ __field(xfs_extlen_t, maxlen) \
+ __field(xfs_extlen_t, mod) \
+ __field(xfs_extlen_t, prod) \
+ __field(xfs_extlen_t, minleft) \
+ __field(xfs_extlen_t, total) \
+ __field(xfs_extlen_t, alignment) \
+ __field(xfs_extlen_t, minalignslop) \
+ __field(xfs_extlen_t, len) \
+ __field(short, type) \
+ __field(short, otype) \
+ __field(char, wasdel) \
+ __field(char, wasfromfl) \
+ __field(char, isfl) \
+ __field(char, userdata) \
+ __field(xfs_fsblock_t, firstblock) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = args->mp->m_super->s_dev; \
+ __entry->agno = args->agno; \
+ __entry->agbno = args->agbno; \
+ __entry->minlen = args->minlen; \
+ __entry->maxlen = args->maxlen; \
+ __entry->mod = args->mod; \
+ __entry->prod = args->prod; \
+ __entry->minleft = args->minleft; \
+ __entry->total = args->total; \
+ __entry->alignment = args->alignment; \
+ __entry->minalignslop = args->minalignslop; \
+ __entry->len = args->len; \
+ __entry->type = args->type; \
+ __entry->otype = args->otype; \
+ __entry->wasdel = args->wasdel; \
+ __entry->wasfromfl = args->wasfromfl; \
+ __entry->isfl = args->isfl; \
+ __entry->userdata = args->userdata; \
+ __entry->firstblock = args->firstblock; \
+ ), \
+ TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
+ "prod %u minleft %u total %u alignment %u minalignslop %u " \
+ "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
+ "userdata %d firstblock 0x%llx", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->agno, \
+ __entry->agbno, \
+ __entry->minlen, \
+ __entry->maxlen, \
+ __entry->mod, \
+ __entry->prod, \
+ __entry->minleft, \
+ __entry->total, \
+ __entry->alignment, \
+ __entry->minalignslop, \
+ __entry->len, \
+ __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \
+ __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \
+ __entry->wasdel, \
+ __entry->wasfromfl, \
+ __entry->isfl, \
+ __entry->userdata, \
+ __entry->firstblock) \
+)
+
+DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
+DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_neither);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_size_error);
+DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist);
+DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough);
+DEFINE_ALLOC_EVENT(xfs_alloc_small_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_small_error);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
+DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
+
+#define DEFINE_DIR2_TRACE(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_da_args *args), \
+ TP_ARGS(args), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __dynamic_array(char, name, args->namelen) \
+ __field(int, namelen) \
+ __field(xfs_dahash_t, hashval) \
+ __field(xfs_ino_t, inumber) \
+ __field(int, op_flags) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
+ __entry->ino = args->dp->i_ino; \
+ if (args->namelen) \
+ memcpy(__get_str(name), args->name, args->namelen); \
+ __entry->namelen = args->namelen; \
+ __entry->hashval = args->hashval; \
+ __entry->inumber = args->inumber; \
+ __entry->op_flags = args->op_flags; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \
+ "inumber 0x%llx op_flags %s", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __entry->namelen, \
+ __entry->namelen ? __get_str(name) : NULL, \
+ __entry->namelen, \
+ __entry->hashval, \
+ __entry->inumber, \
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \
+)
+DEFINE_DIR2_TRACE(xfs_dir2_sf_addname);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_create);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_replace);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_removename);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8);
+DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block);
+DEFINE_DIR2_TRACE(xfs_dir2_block_addname);
+DEFINE_DIR2_TRACE(xfs_dir2_block_lookup);
+DEFINE_DIR2_TRACE(xfs_dir2_block_replace);
+DEFINE_DIR2_TRACE(xfs_dir2_block_removename);
+DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf);
+DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block);
+DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node);
+DEFINE_DIR2_TRACE(xfs_dir2_node_addname);
+DEFINE_DIR2_TRACE(xfs_dir2_node_lookup);
+DEFINE_DIR2_TRACE(xfs_dir2_node_replace);
+DEFINE_DIR2_TRACE(xfs_dir2_node_removename);
+DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf);
+
+#define DEFINE_DIR2_SPACE_TRACE(tname) \
+TRACE_EVENT(tname, \
+ TP_PROTO(struct xfs_da_args *args, int idx), \
+ TP_ARGS(args, idx), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_ino_t, ino) \
+ __field(int, op_flags) \
+ __field(int, idx) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
+ __entry->ino = args->dp->i_ino; \
+ __entry->op_flags = args->op_flags; \
+ __entry->idx = idx; \
+ ), \
+ TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->ino, \
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \
+ __entry->idx) \
+)
+DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add);
+DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove);
+DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode);
+DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode);
+
+TRACE_EVENT(xfs_dir2_leafn_moveents,
+ TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
+ TP_ARGS(args, src_idx, dst_idx, count),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, op_flags)
+ __field(int, src_idx)
+ __field(int, dst_idx)
+ __field(int, count)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
+ __entry->ino = args->dp->i_ino;
+ __entry->op_flags = args->op_flags;
+ __entry->src_idx = src_idx;
+ __entry->dst_idx = dst_idx;
+ __entry->count = count;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx op_flags %s "
+ "src_idx %d dst_idx %d count %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
+ __entry->src_idx,
+ __entry->dst_idx,
+ __entry->count)
+);
+
+#endif /* _TRACE_XFS_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE xfs_trace
+#include <trace/define_trace.h>
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index ad7fbead4c9..7c220b4227b 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -36,10 +36,13 @@ struct attrlist_cursor_kern;
/*
* Flags for read/write calls - same values as IRIX
*/
-#define IO_ISAIO 0x00001 /* don't wait for completion */
#define IO_ISDIRECT 0x00004 /* bypass page cache */
#define IO_INVIS 0x00020 /* don't update inode timestamps */
+#define XFS_IO_FLAGS \
+ { IO_ISDIRECT, "DIRECT" }, \
+ { IO_INVIS, "INVIS"}
+
/*
* Flush/Invalidate options for vop_toss/flush/flushinval_pages.
*/
diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/linux-2.6/xfs_xattr.c
index 497c7fb75cc..0b1878857fc 100644
--- a/fs/xfs/linux-2.6/xfs_xattr.c
+++ b/fs/xfs/linux-2.6/xfs_xattr.c
@@ -30,10 +30,10 @@
static int
-__xfs_xattr_get(struct inode *inode, const char *name,
+xfs_xattr_get(struct dentry *dentry, const char *name,
void *value, size_t size, int xflags)
{
- struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_inode *ip = XFS_I(dentry->d_inode);
int error, asize = size;
if (strcmp(name, "") == 0)
@@ -52,10 +52,10 @@ __xfs_xattr_get(struct inode *inode, const char *name,
}
static int
-__xfs_xattr_set(struct inode *inode, const char *name, const void *value,
+xfs_xattr_set(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags, int xflags)
{
- struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_inode *ip = XFS_I(dentry->d_inode);
if (strcmp(name, "") == 0)
return -EINVAL;
@@ -71,75 +71,34 @@ __xfs_xattr_set(struct inode *inode, const char *name, const void *value,
return -xfs_attr_set(ip, name, (void *)value, size, xflags);
}
-static int
-xfs_xattr_user_get(struct inode *inode, const char *name,
- void *value, size_t size)
-{
- return __xfs_xattr_get(inode, name, value, size, 0);
-}
-
-static int
-xfs_xattr_user_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return __xfs_xattr_set(inode, name, value, size, flags, 0);
-}
-
static struct xattr_handler xfs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
- .get = xfs_xattr_user_get,
- .set = xfs_xattr_user_set,
+ .flags = 0, /* no flags implies user namespace */
+ .get = xfs_xattr_get,
+ .set = xfs_xattr_set,
};
-
-static int
-xfs_xattr_trusted_get(struct inode *inode, const char *name,
- void *value, size_t size)
-{
- return __xfs_xattr_get(inode, name, value, size, ATTR_ROOT);
-}
-
-static int
-xfs_xattr_trusted_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return __xfs_xattr_set(inode, name, value, size, flags, ATTR_ROOT);
-}
-
static struct xattr_handler xfs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
- .get = xfs_xattr_trusted_get,
- .set = xfs_xattr_trusted_set,
+ .flags = ATTR_ROOT,
+ .get = xfs_xattr_get,
+ .set = xfs_xattr_set,
};
-
-static int
-xfs_xattr_secure_get(struct inode *inode, const char *name,
- void *value, size_t size)
-{
- return __xfs_xattr_get(inode, name, value, size, ATTR_SECURE);
-}
-
-static int
-xfs_xattr_secure_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
-{
- return __xfs_xattr_set(inode, name, value, size, flags, ATTR_SECURE);
-}
-
static struct xattr_handler xfs_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
- .get = xfs_xattr_secure_get,
- .set = xfs_xattr_secure_set,
+ .flags = ATTR_SECURE,
+ .get = xfs_xattr_get,
+ .set = xfs_xattr_set,
};
-
struct xattr_handler *xfs_xattr_handlers[] = {
&xfs_xattr_user_handler,
&xfs_xattr_trusted_handler,
&xfs_xattr_security_handler,
#ifdef CONFIG_XFS_POSIX_ACL
- &xfs_xattr_system_handler,
+ &xfs_xattr_acl_access_handler,
+ &xfs_xattr_acl_default_handler,
#endif
NULL
};
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 2f3f2229eaa..d7c7eea09fc 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -47,6 +47,7 @@
#include "xfs_trans_space.h"
#include "xfs_trans_priv.h"
#include "xfs_qm.h"
+#include "xfs_trace.h"
/*
@@ -112,10 +113,7 @@ xfs_qm_dqinit(
init_completion(&dqp->q_flush);
complete(&dqp->q_flush);
-#ifdef XFS_DQUOT_TRACE
- dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS);
- xfs_dqtrace_entry(dqp, "DQINIT");
-#endif
+ trace_xfs_dqinit(dqp);
} else {
/*
* Only the q_core portion was zeroed in dqreclaim_one().
@@ -136,10 +134,7 @@ xfs_qm_dqinit(
dqp->q_hash = NULL;
ASSERT(dqp->dq_flnext == dqp->dq_flprev);
-#ifdef XFS_DQUOT_TRACE
- ASSERT(dqp->q_trace);
- xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
-#endif
+ trace_xfs_dqreuse(dqp);
}
/*
@@ -167,13 +162,8 @@ xfs_qm_dqdestroy(
mutex_destroy(&dqp->q_qlock);
sv_destroy(&dqp->q_pinwait);
-
-#ifdef XFS_DQUOT_TRACE
- if (dqp->q_trace)
- ktrace_free(dqp->q_trace);
- dqp->q_trace = NULL;
-#endif
kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
+
atomic_dec(&xfs_Gqm->qm_totaldquots);
}
@@ -195,49 +185,6 @@ xfs_qm_dqinit_core(
d->dd_diskdq.d_flags = type;
}
-
-#ifdef XFS_DQUOT_TRACE
-/*
- * Dquot tracing for debugging.
- */
-/* ARGSUSED */
-void
-__xfs_dqtrace_entry(
- xfs_dquot_t *dqp,
- char *func,
- void *retaddr,
- xfs_inode_t *ip)
-{
- xfs_dquot_t *udqp = NULL;
- xfs_ino_t ino = 0;
-
- ASSERT(dqp->q_trace);
- if (ip) {
- ino = ip->i_ino;
- udqp = ip->i_udquot;
- }
- ktrace_enter(dqp->q_trace,
- (void *)(__psint_t)DQUOT_KTRACE_ENTRY,
- (void *)func,
- (void *)(__psint_t)dqp->q_nrefs,
- (void *)(__psint_t)dqp->dq_flags,
- (void *)(__psint_t)dqp->q_res_bcount,
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_bcount),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_icount),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_hardlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_blk_softlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_hardlimit),
- (void *)(__psint_t)be64_to_cpu(dqp->q_core.d_ino_softlimit),
- (void *)(__psint_t)be32_to_cpu(dqp->q_core.d_id),
- (void *)(__psint_t)current_pid(),
- (void *)(__psint_t)ino,
- (void *)(__psint_t)retaddr,
- (void *)(__psint_t)udqp);
- return;
-}
-#endif
-
-
/*
* If default limits are in force, push them into the dquot now.
* We overwrite the dquot limits only if they are zero and this
@@ -425,7 +372,8 @@ xfs_qm_dqalloc(
xfs_trans_t *tp = *tpp;
ASSERT(tp != NULL);
- xfs_dqtrace_entry(dqp, "DQALLOC");
+
+ trace_xfs_dqalloc(dqp);
/*
* Initialize the bmap freelist prior to calling bmapi code.
@@ -612,7 +560,8 @@ xfs_qm_dqtobp(
* (in which case we already have the buf).
*/
if (! newdquot) {
- xfs_dqtrace_entry(dqp, "DQTOBP READBUF");
+ trace_xfs_dqtobp_read(dqp);
+
if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
dqp->q_blkno,
XFS_QI_DQCHUNKLEN(mp),
@@ -670,11 +619,12 @@ xfs_qm_dqread(
ASSERT(tpp);
+ trace_xfs_dqread(dqp);
+
/*
* get a pointer to the on-disk dquot and the buffer containing it
* dqp already knows its own type (GROUP/USER).
*/
- xfs_dqtrace_entry(dqp, "DQREAD");
if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
return (error);
}
@@ -763,7 +713,7 @@ xfs_qm_idtodq(
* or if the dquot didn't exist on disk and we ask to
* allocate (ENOENT).
*/
- xfs_dqtrace_entry(dqp, "DQREAD FAIL");
+ trace_xfs_dqread_fail(dqp);
cancelflags |= XFS_TRANS_ABORT;
goto error0;
}
@@ -817,7 +767,8 @@ xfs_qm_dqlookup(
* id can't be modified without the hashlock anyway.
*/
if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) {
- xfs_dqtrace_entry(dqp, "DQFOUND BY LOOKUP");
+ trace_xfs_dqlookup_found(dqp);
+
/*
* All in core dquots must be on the dqlist of mp
*/
@@ -827,7 +778,7 @@ xfs_qm_dqlookup(
if (dqp->q_nrefs == 0) {
ASSERT (XFS_DQ_IS_ON_FREELIST(dqp));
if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
- xfs_dqtrace_entry(dqp, "DQLOOKUP: WANT");
+ trace_xfs_dqlookup_want(dqp);
/*
* We may have raced with dqreclaim_one()
@@ -857,8 +808,7 @@ xfs_qm_dqlookup(
/*
* take it off the freelist
*/
- xfs_dqtrace_entry(dqp,
- "DQLOOKUP: TAKEOFF FL");
+ trace_xfs_dqlookup_freelist(dqp);
XQM_FREELIST_REMOVE(dqp);
/* xfs_qm_freelist_print(&(xfs_Gqm->
qm_dqfreelist),
@@ -878,8 +828,7 @@ xfs_qm_dqlookup(
*/
ASSERT(mutex_is_locked(&qh->qh_lock));
if (dqp->HL_PREVP != &qh->qh_next) {
- xfs_dqtrace_entry(dqp,
- "DQLOOKUP: HASH MOVETOFRONT");
+ trace_xfs_dqlookup_move(dqp);
if ((d = dqp->HL_NEXT))
d->HL_PREVP = dqp->HL_PREVP;
*(dqp->HL_PREVP) = d;
@@ -889,7 +838,7 @@ xfs_qm_dqlookup(
dqp->HL_PREVP = &qh->qh_next;
qh->qh_next = dqp;
}
- xfs_dqtrace_entry(dqp, "LOOKUP END");
+ trace_xfs_dqlookup_done(dqp);
*O_dqpp = dqp;
ASSERT(mutex_is_locked(&qh->qh_lock));
return (0);
@@ -971,7 +920,7 @@ xfs_qm_dqget(
ASSERT(*O_dqpp);
ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp));
mutex_unlock(&h->qh_lock);
- xfs_dqtrace_entry(*O_dqpp, "DQGET DONE (FROM CACHE)");
+ trace_xfs_dqget_hit(*O_dqpp);
return (0); /* success */
}
XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses);
@@ -1104,7 +1053,7 @@ xfs_qm_dqget(
mutex_unlock(&h->qh_lock);
dqret:
ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
- xfs_dqtrace_entry(dqp, "DQGET DONE");
+ trace_xfs_dqget_miss(dqp);
*O_dqpp = dqp;
return (0);
}
@@ -1124,7 +1073,8 @@ xfs_qm_dqput(
ASSERT(dqp->q_nrefs > 0);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- xfs_dqtrace_entry(dqp, "DQPUT");
+
+ trace_xfs_dqput(dqp);
if (dqp->q_nrefs != 1) {
dqp->q_nrefs--;
@@ -1137,7 +1087,7 @@ xfs_qm_dqput(
* in the right order; but try to get it out-of-order first
*/
if (! xfs_qm_freelist_lock_nowait(xfs_Gqm)) {
- xfs_dqtrace_entry(dqp, "DQPUT: FLLOCK-WAIT");
+ trace_xfs_dqput_wait(dqp);
xfs_dqunlock(dqp);
xfs_qm_freelist_lock(xfs_Gqm);
xfs_dqlock(dqp);
@@ -1148,7 +1098,8 @@ xfs_qm_dqput(
/* We can't depend on nrefs being == 1 here */
if (--dqp->q_nrefs == 0) {
- xfs_dqtrace_entry(dqp, "DQPUT: ON FREELIST");
+ trace_xfs_dqput_free(dqp);
+
/*
* insert at end of the freelist.
*/
@@ -1196,7 +1147,7 @@ xfs_qm_dqrele(
if (!dqp)
return;
- xfs_dqtrace_entry(dqp, "DQRELE");
+ trace_xfs_dqrele(dqp);
xfs_dqlock(dqp);
/*
@@ -1229,7 +1180,7 @@ xfs_qm_dqflush(
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(!completion_done(&dqp->q_flush));
- xfs_dqtrace_entry(dqp, "DQFLUSH");
+ trace_xfs_dqflush(dqp);
/*
* If not dirty, or it's pinned and we are not supposed to
@@ -1259,7 +1210,6 @@ xfs_qm_dqflush(
* the ondisk-dquot has already been allocated for.
*/
if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) {
- xfs_dqtrace_entry(dqp, "DQTOBP FAIL");
ASSERT(error != ENOENT);
/*
* Quotas could have gotten turned off (ESRCH)
@@ -1297,7 +1247,7 @@ xfs_qm_dqflush(
* get stuck waiting in the write for too long.
*/
if (XFS_BUF_ISPINNED(bp)) {
- xfs_dqtrace_entry(dqp, "DQFLUSH LOG FORCE");
+ trace_xfs_dqflush_force(dqp);
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
}
@@ -1308,7 +1258,9 @@ xfs_qm_dqflush(
} else {
error = xfs_bwrite(mp, bp);
}
- xfs_dqtrace_entry(dqp, "DQFLUSH END");
+
+ trace_xfs_dqflush_done(dqp);
+
/*
* dqp is still locked, but caller is free to unlock it now.
*/
@@ -1483,7 +1435,7 @@ xfs_qm_dqpurge(
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- xfs_dqtrace_entry(dqp, "DQPURGE ->DQFLUSH: DQDIRTY");
+
/* dqflush unlocks dqflock */
/*
* Given that dqpurge is a very rare occurrence, it is OK
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index a2c16bcee90..a0f7da586d1 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -85,9 +85,6 @@ typedef struct xfs_dquot {
struct completion q_flush; /* flush completion queue */
atomic_t q_pincount; /* dquot pin count */
wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
-#ifdef XFS_DQUOT_TRACE
- struct ktrace *q_trace; /* trace header structure */
-#endif
} xfs_dquot_t;
@@ -144,24 +141,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
(XFS_IS_UQUOTA_ON((d)->q_mount)) : \
(XFS_IS_OQUOTA_ON((d)->q_mount))))
-#ifdef XFS_DQUOT_TRACE
-/*
- * Dquot Tracing stuff.
- */
-#define DQUOT_TRACE_SIZE 64
-#define DQUOT_KTRACE_ENTRY 1
-
-extern void __xfs_dqtrace_entry(xfs_dquot_t *dqp, char *func,
- void *, xfs_inode_t *);
-#define xfs_dqtrace_entry_ino(a,b,ip) \
- __xfs_dqtrace_entry((a), (b), (void*)__return_address, (ip))
-#define xfs_dqtrace_entry(a,b) \
- __xfs_dqtrace_entry((a), (b), (void*)__return_address, NULL)
-#else
-#define xfs_dqtrace_entry(a,b)
-#define xfs_dqtrace_entry_ino(a,b,ip)
-#endif
-
#ifdef QUOTADEBUG
extern void xfs_qm_dqprint(xfs_dquot_t *);
#else
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 45b1bfef738..9e627a8b5b0 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -47,6 +47,7 @@
#include "xfs_trans_space.h"
#include "xfs_utils.h"
#include "xfs_qm.h"
+#include "xfs_trace.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -453,7 +454,7 @@ again:
xfs_dqunlock(dqp);
continue;
}
- xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
+
/* XXX a sentinel would be better */
recl = XFS_QI_MPLRECLAIMS(mp);
if (!xfs_dqflock_nowait(dqp)) {
@@ -651,7 +652,7 @@ xfs_qm_dqattach_one(
*/
dqp = *IO_idqpp;
if (dqp) {
- xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
+ trace_xfs_dqattach_found(dqp);
return 0;
}
@@ -704,7 +705,7 @@ xfs_qm_dqattach_one(
if (error)
return error;
- xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
+ trace_xfs_dqattach_get(dqp);
/*
* dqget may have dropped and re-acquired the ilock, but it guarantees
@@ -890,15 +891,15 @@ xfs_qm_dqdetach(
if (!(ip->i_udquot || ip->i_gdquot))
return;
+ trace_xfs_dquot_dqdetach(ip);
+
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
if (ip->i_udquot) {
- xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if (ip->i_gdquot) {
- xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
@@ -977,7 +978,6 @@ xfs_qm_sync(
* across a disk write
*/
xfs_qm_mplist_unlock(mp);
- xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
error = xfs_qm_dqflush(dqp, flush_flags);
xfs_dqunlock(dqp);
if (error && XFS_FORCED_SHUTDOWN(mp))
@@ -1350,7 +1350,8 @@ xfs_qm_reset_dqcounts(
xfs_disk_dquot_t *ddq;
int j;
- xfs_buftrace("RESET DQUOTS", bp);
+ trace_xfs_reset_dqcounts(bp, _RET_IP_);
+
/*
* Reset all counters and timers. They'll be
* started afresh by xfs_qm_quotacheck.
@@ -1543,7 +1544,9 @@ xfs_qm_quotacheck_dqadjust(
xfs_qcnt_t rtblks)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");
+
+ trace_xfs_dqadjust(dqp);
+
/*
* Adjust the inode count and the block count to reflect this inode's
* resource usage.
@@ -1994,7 +1997,9 @@ xfs_qm_shake_freelist(
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
+
+ trace_xfs_dqshake_dirty(dqp);
+
/*
* We flush it delayed write, so don't bother
* releasing the mplock.
@@ -2038,7 +2043,9 @@ xfs_qm_shake_freelist(
return nreclaimed;
goto tryagain;
}
- xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
+
+ trace_xfs_dqshake_unlink(dqp);
+
#ifdef QUOTADEBUG
cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
dqp, be32_to_cpu(dqp->q_core.d_id));
@@ -2125,7 +2132,9 @@ xfs_qm_dqreclaim_one(void)
*/
if (dqp->dq_flags & XFS_DQ_WANT) {
ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
- xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT");
+
+ trace_xfs_dqreclaim_want(dqp);
+
xfs_dqunlock(dqp);
xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
@@ -2171,7 +2180,9 @@ xfs_qm_dqreclaim_one(void)
*/
if (XFS_DQ_IS_DIRTY(dqp)) {
int error;
- xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
+
+ trace_xfs_dqreclaim_dirty(dqp);
+
/*
* We flush it delayed write, so don't bother
* releasing the freelist lock.
@@ -2194,8 +2205,9 @@ xfs_qm_dqreclaim_one(void)
if (!mutex_trylock(&dqp->q_hash->qh_lock))
goto mplistunlock;
+ trace_xfs_dqreclaim_unlink(dqp);
+
ASSERT(dqp->q_nrefs == 0);
- xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
XQM_FREELIST_REMOVE(dqp);
@@ -2430,7 +2442,7 @@ xfs_qm_vop_dqalloc(
}
}
if (uq)
- xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
+ trace_xfs_dquot_dqalloc(ip);
xfs_iunlock(ip, lockflags);
if (O_udqpp)
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 5d1a3b98a6e..71af76fe8a2 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -49,6 +49,7 @@
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_qm.h"
+#include "xfs_trace.h"
#ifdef DEBUG
# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
@@ -496,7 +497,6 @@ xfs_qm_scall_setqlim(
ASSERT(error != ENOENT);
return (error);
}
- xfs_dqtrace_entry(dqp, "Q_SETQLIM: AFT DQGET");
xfs_trans_dqjoin(tp, dqp);
ddq = &dqp->q_core;
@@ -602,7 +602,6 @@ xfs_qm_scall_setqlim(
dqp->dq_flags |= XFS_DQ_DIRTY;
xfs_trans_log_dquot(tp, dqp);
- xfs_dqtrace_entry(dqp, "Q_SETQLIM: COMMIT");
error = xfs_trans_commit(tp, 0);
xfs_qm_dqprint(dqp);
xfs_qm_dqrele(dqp);
@@ -630,7 +629,6 @@ xfs_qm_scall_getquota(
return (error);
}
- xfs_dqtrace_entry(dqp, "Q_GETQUOTA SUCCESS");
/*
* If everything's NULL, this dquot doesn't quite exist as far as
* our utility programs are concerned.
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index 6f4fd37c67a..d2d20462fd4 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -41,10 +41,6 @@ extern void assfail(char *expr, char *f, int l);
# define STATIC static noinline
#endif
-#ifndef STATIC_INLINE
-# define STATIC_INLINE static inline
-#endif
-
#else /* DEBUG */
#define ASSERT(expr) \
@@ -54,19 +50,5 @@ extern void assfail(char *expr, char *f, int l);
# define STATIC noinline
#endif
-/*
- * We stop inlining of inline functions in debug mode.
- * Unfortunately, this means static inline in header files
- * get multiple definitions, so they need to remain static.
- * This then gives tonnes of warnings about unused but defined
- * functions, so we need to add the unused attribute to prevent
- * these spurious warnings.
- */
-#ifndef STATIC_INLINE
-# define STATIC_INLINE static __attribute__ ((unused)) noinline
-#endif
-
#endif /* DEBUG */
-
-
#endif /* __XFS_SUPPORT_DEBUG_H__ */
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
deleted file mode 100644
index 2d494c26717..00000000000
--- a/fs/xfs/support/ktrace.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include <xfs.h>
-
-static kmem_zone_t *ktrace_hdr_zone;
-static kmem_zone_t *ktrace_ent_zone;
-static int ktrace_zentries;
-
-void __init
-ktrace_init(int zentries)
-{
- ktrace_zentries = roundup_pow_of_two(zentries);
-
- ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
- "ktrace_hdr");
- ASSERT(ktrace_hdr_zone);
-
- ktrace_ent_zone = kmem_zone_init(ktrace_zentries
- * sizeof(ktrace_entry_t),
- "ktrace_ent");
- ASSERT(ktrace_ent_zone);
-}
-
-void __exit
-ktrace_uninit(void)
-{
- kmem_zone_destroy(ktrace_hdr_zone);
- kmem_zone_destroy(ktrace_ent_zone);
-}
-
-/*
- * ktrace_alloc()
- *
- * Allocate a ktrace header and enough buffering for the given
- * number of entries. Round the number of entries up to a
- * power of 2 so we can do fast masking to get the index from
- * the atomic index counter.
- */
-ktrace_t *
-ktrace_alloc(int nentries, unsigned int __nocast sleep)
-{
- ktrace_t *ktp;
- ktrace_entry_t *ktep;
- int entries;
-
- ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
-
- if (ktp == (ktrace_t*)NULL) {
- /*
- * KM_SLEEP callers don't expect failure.
- */
- if (sleep & KM_SLEEP)
- panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
-
- return NULL;
- }
-
- /*
- * Special treatment for buffers with the ktrace_zentries entries
- */
- entries = roundup_pow_of_two(nentries);
- if (entries == ktrace_zentries) {
- ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
- sleep);
- } else {
- ktep = (ktrace_entry_t*)kmem_zalloc((entries * sizeof(*ktep)),
- sleep | KM_LARGE);
- }
-
- if (ktep == NULL) {
- /*
- * KM_SLEEP callers don't expect failure.
- */
- if (sleep & KM_SLEEP)
- panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
-
- kmem_free(ktp);
-
- return NULL;
- }
-
- ktp->kt_entries = ktep;
- ktp->kt_nentries = entries;
- ASSERT(is_power_of_2(entries));
- ktp->kt_index_mask = entries - 1;
- atomic_set(&ktp->kt_index, 0);
- ktp->kt_rollover = 0;
- return ktp;
-}
-
-
-/*
- * ktrace_free()
- *
- * Free up the ktrace header and buffer. It is up to the caller
- * to ensure that no-one is referencing it.
- */
-void
-ktrace_free(ktrace_t *ktp)
-{
- if (ktp == (ktrace_t *)NULL)
- return;
-
- /*
- * Special treatment for the Vnode trace buffer.
- */
- if (ktp->kt_nentries == ktrace_zentries)
- kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
- else
- kmem_free(ktp->kt_entries);
-
- kmem_zone_free(ktrace_hdr_zone, ktp);
-}
-
-
-/*
- * Enter the given values into the "next" entry in the trace buffer.
- * kt_index is always the index of the next entry to be filled.
- */
-void
-ktrace_enter(
- ktrace_t *ktp,
- void *val0,
- void *val1,
- void *val2,
- void *val3,
- void *val4,
- void *val5,
- void *val6,
- void *val7,
- void *val8,
- void *val9,
- void *val10,
- void *val11,
- void *val12,
- void *val13,
- void *val14,
- void *val15)
-{
- int index;
- ktrace_entry_t *ktep;
-
- ASSERT(ktp != NULL);
-
- /*
- * Grab an entry by pushing the index up to the next one.
- */
- index = atomic_add_return(1, &ktp->kt_index);
- index = (index - 1) & ktp->kt_index_mask;
- if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
- ktp->kt_rollover = 1;
-
- ASSERT((index >= 0) && (index < ktp->kt_nentries));
-
- ktep = &(ktp->kt_entries[index]);
-
- ktep->val[0] = val0;
- ktep->val[1] = val1;
- ktep->val[2] = val2;
- ktep->val[3] = val3;
- ktep->val[4] = val4;
- ktep->val[5] = val5;
- ktep->val[6] = val6;
- ktep->val[7] = val7;
- ktep->val[8] = val8;
- ktep->val[9] = val9;
- ktep->val[10] = val10;
- ktep->val[11] = val11;
- ktep->val[12] = val12;
- ktep->val[13] = val13;
- ktep->val[14] = val14;
- ktep->val[15] = val15;
-}
-
-/*
- * Return the number of entries in the trace buffer.
- */
-int
-ktrace_nentries(
- ktrace_t *ktp)
-{
- int index;
- if (ktp == NULL)
- return 0;
-
- index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
- return (ktp->kt_rollover ? ktp->kt_nentries : index);
-}
-
-/*
- * ktrace_first()
- *
- * This is used to find the start of the trace buffer.
- * In conjunction with ktrace_next() it can be used to
- * iterate through the entire trace buffer. This code does
- * not do any locking because it is assumed that it is called
- * from the debugger.
- *
- * The caller must pass in a pointer to a ktrace_snap
- * structure in which we will keep some state used to
- * iterate through the buffer. This state must not touched
- * by any code outside of this module.
- */
-ktrace_entry_t *
-ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
-{
- ktrace_entry_t *ktep;
- int index;
- int nentries;
-
- if (ktp->kt_rollover)
- index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask;
- else
- index = 0;
-
- ktsp->ks_start = index;
- ktep = &(ktp->kt_entries[index]);
-
- nentries = ktrace_nentries(ktp);
- index++;
- if (index < nentries) {
- ktsp->ks_index = index;
- } else {
- ktsp->ks_index = 0;
- if (index > nentries)
- ktep = NULL;
- }
- return ktep;
-}
-
-/*
- * ktrace_next()
- *
- * This is used to iterate through the entries of the given
- * trace buffer. The caller must pass in the ktrace_snap_t
- * structure initialized by ktrace_first(). The return value
- * will be either a pointer to the next ktrace_entry or NULL
- * if all of the entries have been traversed.
- */
-ktrace_entry_t *
-ktrace_next(
- ktrace_t *ktp,
- ktrace_snap_t *ktsp)
-{
- int index;
- ktrace_entry_t *ktep;
-
- index = ktsp->ks_index;
- if (index == ktsp->ks_start) {
- ktep = NULL;
- } else {
- ktep = &ktp->kt_entries[index];
- }
-
- index++;
- if (index == ktrace_nentries(ktp)) {
- ktsp->ks_index = 0;
- } else {
- ktsp->ks_index = index;
- }
-
- return ktep;
-}
-
-/*
- * ktrace_skip()
- *
- * Skip the next "count" entries and return the entry after that.
- * Return NULL if this causes us to iterate past the beginning again.
- */
-ktrace_entry_t *
-ktrace_skip(
- ktrace_t *ktp,
- int count,
- ktrace_snap_t *ktsp)
-{
- int index;
- int new_index;
- ktrace_entry_t *ktep;
- int nentries = ktrace_nentries(ktp);
-
- index = ktsp->ks_index;
- new_index = index + count;
- while (new_index >= nentries) {
- new_index -= nentries;
- }
- if (index == ktsp->ks_start) {
- /*
- * We've iterated around to the start, so we're done.
- */
- ktep = NULL;
- } else if ((new_index < index) && (index < ktsp->ks_index)) {
- /*
- * We've skipped past the start again, so we're done.
- */
- ktep = NULL;
- ktsp->ks_index = ktsp->ks_start;
- } else {
- ktep = &(ktp->kt_entries[new_index]);
- new_index++;
- if (new_index == nentries) {
- ktsp->ks_index = 0;
- } else {
- ktsp->ks_index = new_index;
- }
- }
- return ktep;
-}
diff --git a/fs/xfs/support/ktrace.h b/fs/xfs/support/ktrace.h
deleted file mode 100644
index 741d6947ca6..00000000000
--- a/fs/xfs/support/ktrace.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_SUPPORT_KTRACE_H__
-#define __XFS_SUPPORT_KTRACE_H__
-
-/*
- * Trace buffer entry structure.
- */
-typedef struct ktrace_entry {
- void *val[16];
-} ktrace_entry_t;
-
-/*
- * Trace buffer header structure.
- */
-typedef struct ktrace {
- int kt_nentries; /* number of entries in trace buf */
- atomic_t kt_index; /* current index in entries */
- unsigned int kt_index_mask;
- int kt_rollover;
- ktrace_entry_t *kt_entries; /* buffer of entries */
-} ktrace_t;
-
-/*
- * Trace buffer snapshot structure.
- */
-typedef struct ktrace_snap {
- int ks_start; /* kt_index at time of snap */
- int ks_index; /* current index */
-} ktrace_snap_t;
-
-
-#ifdef CONFIG_XFS_TRACE
-
-extern void ktrace_init(int zentries);
-extern void ktrace_uninit(void);
-
-extern ktrace_t *ktrace_alloc(int, unsigned int __nocast);
-extern void ktrace_free(ktrace_t *);
-
-extern void ktrace_enter(
- ktrace_t *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *,
- void *);
-
-extern ktrace_entry_t *ktrace_first(ktrace_t *, ktrace_snap_t *);
-extern int ktrace_nentries(ktrace_t *);
-extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *);
-extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *);
-
-#else
-#define ktrace_init(x) do { } while (0)
-#define ktrace_uninit() do { } while (0)
-#endif /* CONFIG_XFS_TRACE */
-
-#endif /* __XFS_SUPPORT_KTRACE_H__ */
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 17254b529c5..5ad8ad3a1dc 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -25,21 +25,5 @@
/* #define QUOTADEBUG 1 */
#endif
-#ifdef CONFIG_XFS_TRACE
-#define XFS_ALLOC_TRACE 1
-#define XFS_ATTR_TRACE 1
-#define XFS_BLI_TRACE 1
-#define XFS_BMAP_TRACE 1
-#define XFS_BTREE_TRACE 1
-#define XFS_DIR2_TRACE 1
-#define XFS_DQUOT_TRACE 1
-#define XFS_ILOCK_TRACE 1
-#define XFS_LOG_TRACE 1
-#define XFS_RW_TRACE 1
-#define XFS_BUF_TRACE 1
-#define XFS_INODE_TRACE 1
-#define XFS_FILESTREAMS_TRACE 1
-#endif
-
#include <linux-2.6/xfs_linux.h>
#endif /* __XFS_H__ */
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 947b150df8e..00fd357c3e4 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -49,7 +49,8 @@ extern int xfs_acl_chmod(struct inode *inode);
extern int posix_acl_access_exists(struct inode *inode);
extern int posix_acl_default_exists(struct inode *inode);
-extern struct xattr_handler xfs_xattr_system_handler;
+extern struct xattr_handler xfs_xattr_acl_access_handler;
+extern struct xattr_handler xfs_xattr_acl_default_handler;
#else
# define xfs_check_acl NULL
# define xfs_get_acl(inode, type) NULL
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index a5d54bf4931..6702bd86581 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -86,6 +86,20 @@ typedef struct xfs_agf {
#define XFS_AGF_NUM_BITS 12
#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
+#define XFS_AGF_FLAGS \
+ { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
+ { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
+ { XFS_AGF_SEQNO, "SEQNO" }, \
+ { XFS_AGF_LENGTH, "LENGTH" }, \
+ { XFS_AGF_ROOTS, "ROOTS" }, \
+ { XFS_AGF_LEVELS, "LEVELS" }, \
+ { XFS_AGF_FLFIRST, "FLFIRST" }, \
+ { XFS_AGF_FLLAST, "FLLAST" }, \
+ { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
+ { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
+ { XFS_AGF_LONGEST, "LONGEST" }, \
+ { XFS_AGF_BTREEBLKS, "BTREEBLKS" }
+
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 2cf944eb796..a1c65fc6d9c 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -38,6 +38,7 @@
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
@@ -51,30 +52,6 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_agblock_t bno,
xfs_extlen_t len);
-#if defined(XFS_ALLOC_TRACE)
-ktrace_t *xfs_alloc_trace_buf;
-
-#define TRACE_ALLOC(s,a) \
- xfs_alloc_trace_alloc(__func__, s, a, __LINE__)
-#define TRACE_FREE(s,a,b,x,f) \
- xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__)
-#define TRACE_MODAGF(s,a,f) \
- xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__)
-#define TRACE_BUSY(__func__,s,ag,agb,l,sl,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__)
-#define TRACE_UNBUSY(__func__,s,ag,sl,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
-#define TRACE_BUSYSEARCH(__func__,s,ag,agb,l,tp) \
- xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
-#else
-#define TRACE_ALLOC(s,a)
-#define TRACE_FREE(s,a,b,x,f)
-#define TRACE_MODAGF(s,a,f)
-#define TRACE_BUSY(s,a,ag,agb,l,sl,tp)
-#define TRACE_UNBUSY(fname,s,ag,sl,tp)
-#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,tp)
-#endif /* XFS_ALLOC_TRACE */
-
/*
* Prototypes for per-ag allocation routines
*/
@@ -498,124 +475,6 @@ xfs_alloc_read_agfl(
return 0;
}
-#if defined(XFS_ALLOC_TRACE)
-/*
- * Add an allocation trace entry for an alloc call.
- */
-STATIC void
-xfs_alloc_trace_alloc(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_alloc_arg_t *args, /* allocation argument structure */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_ALLOC | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)args->mp,
- (void *)(__psunsigned_t)args->agno,
- (void *)(__psunsigned_t)args->agbno,
- (void *)(__psunsigned_t)args->minlen,
- (void *)(__psunsigned_t)args->maxlen,
- (void *)(__psunsigned_t)args->mod,
- (void *)(__psunsigned_t)args->prod,
- (void *)(__psunsigned_t)args->minleft,
- (void *)(__psunsigned_t)args->total,
- (void *)(__psunsigned_t)args->alignment,
- (void *)(__psunsigned_t)args->len,
- (void *)((((__psint_t)args->type) << 16) |
- (__psint_t)args->otype),
- (void *)(__psint_t)((args->wasdel << 3) |
- (args->wasfromfl << 2) |
- (args->isfl << 1) |
- (args->userdata << 0)));
-}
-
-/*
- * Add an allocation trace entry for a free call.
- */
-STATIC void
-xfs_alloc_trace_free(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* a.g. relative block number */
- xfs_extlen_t len, /* length of extent */
- int isfl, /* set if is freelist allocation/free */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_FREE | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psunsigned_t)agno,
- (void *)(__psunsigned_t)agbno,
- (void *)(__psunsigned_t)len,
- (void *)(__psint_t)isfl,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-}
-
-/*
- * Add an allocation trace entry for modifying an agf.
- */
-STATIC void
-xfs_alloc_trace_modagf(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agf_t *agf, /* new agf value */
- int flags, /* logging flags for agf */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(XFS_ALLOC_KTRACE_MODAGF | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psint_t)flags,
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_seqno),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_length),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flfirst),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_fllast),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_flcount),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_freeblks),
- (void *)(__psunsigned_t)be32_to_cpu(agf->agf_longest));
-}
-
-STATIC void
-xfs_alloc_trace_busy(
- const char *name, /* function tag string */
- char *str, /* additional string */
- xfs_mount_t *mp, /* file system mount point */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t agbno, /* a.g. relative block number */
- xfs_extlen_t len, /* length of extent */
- int slot, /* perag Busy slot */
- xfs_trans_t *tp,
- int trtype, /* type: add, delete, search */
- int line) /* source line number */
-{
- ktrace_enter(xfs_alloc_trace_buf,
- (void *)(__psint_t)(trtype | (line << 16)),
- (void *)name,
- (void *)str,
- (void *)mp,
- (void *)(__psunsigned_t)agno,
- (void *)(__psunsigned_t)agbno,
- (void *)(__psunsigned_t)len,
- (void *)(__psint_t)slot,
- (void *)tp,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL);
-}
-#endif /* XFS_ALLOC_TRACE */
-
/*
* Allocation group level functions.
*/
@@ -665,9 +524,6 @@ xfs_alloc_ag_vextent(
*/
if (args->agbno != NULLAGBLOCK) {
xfs_agf_t *agf; /* allocation group freelist header */
-#ifdef XFS_ALLOC_TRACE
- xfs_mount_t *mp = args->mp;
-#endif
long slen = (long)args->len;
ASSERT(args->len >= args->minlen && args->len <= args->maxlen);
@@ -682,7 +538,6 @@ xfs_alloc_ag_vextent(
args->pag->pagf_freeblks -= args->len;
ASSERT(be32_to_cpu(agf->agf_freeblks) <=
be32_to_cpu(agf->agf_length));
- TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
xfs_alloc_log_agf(args->tp, args->agbp,
XFS_AGF_FREEBLKS);
/* search the busylist for these blocks */
@@ -792,13 +647,14 @@ xfs_alloc_ag_vextent_exact(
}
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("normal", args);
+
+ trace_xfs_alloc_exact_done(args);
args->wasfromfl = 0;
return 0;
error0:
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_exact_error(args);
return error;
}
@@ -958,7 +814,7 @@ xfs_alloc_ag_vextent_near(
args->len = blen;
if (!xfs_alloc_fix_minleft(args)) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("nominleft", args);
+ trace_xfs_alloc_near_nominleft(args);
return 0;
}
blen = args->len;
@@ -981,7 +837,8 @@ xfs_alloc_ag_vextent_near(
goto error0;
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
- TRACE_ALLOC("first", args);
+
+ trace_xfs_alloc_near_first(args);
return 0;
}
/*
@@ -1272,7 +1129,7 @@ xfs_alloc_ag_vextent_near(
* If we couldn't get anything, give up.
*/
if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
- TRACE_ALLOC("neither", args);
+ trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
return 0;
}
@@ -1299,7 +1156,7 @@ xfs_alloc_ag_vextent_near(
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args)) {
- TRACE_ALLOC("nominleft", args);
+ trace_xfs_alloc_near_nominleft(args);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
return 0;
@@ -1314,13 +1171,18 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
ltnew, rlen, XFSA_FIXUP_BNO_OK)))
goto error0;
- TRACE_ALLOC(j ? "gt" : "lt", args);
+
+ if (j)
+ trace_xfs_alloc_near_greater(args);
+ else
+ trace_xfs_alloc_near_lesser(args);
+
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_near_error(args);
if (cnt_cur != NULL)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
if (bno_cur_lt != NULL)
@@ -1371,7 +1233,7 @@ xfs_alloc_ag_vextent_size(
goto error0;
if (i == 0 || flen == 0) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("noentry", args);
+ trace_xfs_alloc_size_noentry(args);
return 0;
}
ASSERT(i == 1);
@@ -1448,7 +1310,7 @@ xfs_alloc_ag_vextent_size(
xfs_alloc_fix_len(args);
if (rlen < args->minlen || !xfs_alloc_fix_minleft(args)) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- TRACE_ALLOC("nominleft", args);
+ trace_xfs_alloc_size_nominleft(args);
args->agbno = NULLAGBLOCK;
return 0;
}
@@ -1471,11 +1333,11 @@ xfs_alloc_ag_vextent_size(
args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
- TRACE_ALLOC("normal", args);
+ trace_xfs_alloc_size_done(args);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_size_error(args);
if (cnt_cur)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
if (bno_cur)
@@ -1534,7 +1396,7 @@ xfs_alloc_ag_vextent_small(
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
args->wasfromfl = 1;
- TRACE_ALLOC("freelist", args);
+ trace_xfs_alloc_small_freelist(args);
*stat = 0;
return 0;
}
@@ -1556,17 +1418,17 @@ xfs_alloc_ag_vextent_small(
*/
if (flen < args->minlen) {
args->agbno = NULLAGBLOCK;
- TRACE_ALLOC("notenough", args);
+ trace_xfs_alloc_small_notenough(args);
flen = 0;
}
*fbnop = fbno;
*flenp = flen;
*stat = 1;
- TRACE_ALLOC("normal", args);
+ trace_xfs_alloc_small_done(args);
return 0;
error0:
- TRACE_ALLOC("error", args);
+ trace_xfs_alloc_small_error(args);
return error;
}
@@ -1809,17 +1671,14 @@ xfs_free_ag_extent(
be32_to_cpu(agf->agf_freeblks) <=
be32_to_cpu(agf->agf_length),
error0);
- TRACE_MODAGF(NULL, agf, XFS_AGF_FREEBLKS);
xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
if (!isfl)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
XFS_STATS_INC(xs_freex);
XFS_STATS_ADD(xs_freeb, len);
}
- TRACE_FREE(haveleft ?
- (haveright ? "both" : "left") :
- (haveright ? "right" : "none"),
- agno, bno, len, isfl);
+
+ trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
/*
* Since blocks move to the free list without the coordination
@@ -1836,7 +1695,7 @@ xfs_free_ag_extent(
return 0;
error0:
- TRACE_FREE("error", agno, bno, len, isfl);
+ trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
if (bno_cur)
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
if (cnt_cur)
@@ -2122,7 +1981,6 @@ xfs_alloc_get_freelist(
logflags |= XFS_AGF_BTREEBLKS;
}
- TRACE_MODAGF(NULL, agf, logflags);
xfs_alloc_log_agf(tp, agbp, logflags);
*bnop = bno;
@@ -2165,6 +2023,8 @@ xfs_alloc_log_agf(
sizeof(xfs_agf_t)
};
+ trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
+
xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
}
@@ -2230,13 +2090,11 @@ xfs_alloc_put_freelist(
logflags |= XFS_AGF_BTREEBLKS;
}
- TRACE_MODAGF(NULL, agf, logflags);
xfs_alloc_log_agf(tp, agbp, logflags);
ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
*blockp = cpu_to_be32(bno);
- TRACE_MODAGF(NULL, agf, logflags);
xfs_alloc_log_agf(tp, agbp, logflags);
xfs_trans_log_buf(tp, agflbp,
(int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
@@ -2399,7 +2257,7 @@ xfs_alloc_vextent(
args->minlen > args->maxlen || args->minlen > agsize ||
args->mod >= args->prod) {
args->fsbno = NULLFSBLOCK;
- TRACE_ALLOC("badargs", args);
+ trace_xfs_alloc_vextent_badargs(args);
return 0;
}
minleft = args->minleft;
@@ -2418,12 +2276,12 @@ xfs_alloc_vextent(
error = xfs_alloc_fix_freelist(args, 0);
args->minleft = minleft;
if (error) {
- TRACE_ALLOC("nofix", args);
+ trace_xfs_alloc_vextent_nofix(args);
goto error0;
}
if (!args->agbp) {
up_read(&mp->m_peraglock);
- TRACE_ALLOC("noagbp", args);
+ trace_xfs_alloc_vextent_noagbp(args);
break;
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
@@ -2488,7 +2346,7 @@ xfs_alloc_vextent(
error = xfs_alloc_fix_freelist(args, flags);
args->minleft = minleft;
if (error) {
- TRACE_ALLOC("nofix", args);
+ trace_xfs_alloc_vextent_nofix(args);
goto error0;
}
/*
@@ -2499,7 +2357,9 @@ xfs_alloc_vextent(
goto error0;
break;
}
- TRACE_ALLOC("loopfailed", args);
+
+ trace_xfs_alloc_vextent_loopfailed(args);
+
/*
* Didn't work, figure out the next iteration.
*/
@@ -2526,7 +2386,7 @@ xfs_alloc_vextent(
if (args->agno == sagno) {
if (no_min == 1) {
args->agbno = NULLAGBLOCK;
- TRACE_ALLOC("allfailed", args);
+ trace_xfs_alloc_vextent_allfailed(args);
break;
}
if (flags == 0) {
@@ -2642,16 +2502,16 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
}
}
+ trace_xfs_alloc_busy(mp, agno, bno, len, n);
+
if (n < XFS_PAGB_NUM_SLOTS) {
bsy = &mp->m_perag[agno].pagb_list[n];
mp->m_perag[agno].pagb_count++;
- TRACE_BUSY("xfs_alloc_mark_busy", "got", agno, bno, len, n, tp);
bsy->busy_start = bno;
bsy->busy_length = len;
bsy->busy_tp = tp;
xfs_trans_add_busy(tp, agno, n);
} else {
- TRACE_BUSY("xfs_alloc_mark_busy", "FULL", agno, bno, len, -1, tp);
/*
* The busy list is full! Since it is now not possible to
* track the free block, make this a synchronous transaction
@@ -2678,12 +2538,12 @@ xfs_alloc_clear_busy(xfs_trans_t *tp,
list = mp->m_perag[agno].pagb_list;
ASSERT(idx < XFS_PAGB_NUM_SLOTS);
+
+ trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp);
+
if (list[idx].busy_tp == tp) {
- TRACE_UNBUSY("xfs_alloc_clear_busy", "found", agno, idx, tp);
list[idx].busy_tp = NULL;
mp->m_perag[agno].pagb_count--;
- } else {
- TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp);
}
spin_unlock(&mp->m_perag[agno].pagb_lock);
@@ -2724,24 +2584,22 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
if ((bno > bend) || (uend < bsy->busy_start)) {
cnt--;
} else {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy",
- "found1", agno, bno, len, tp);
break;
}
}
}
+ trace_xfs_alloc_busysearch(mp, agno, bno, len, !!cnt);
+
/*
* If a block was found, force the log through the LSN of the
* transaction that freed the block
*/
if (cnt) {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, tp);
lsn = bsy->busy_tp->t_commit_lsn;
spin_unlock(&mp->m_perag[agno].pagb_lock);
xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
} else {
- TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, tp);
spin_unlock(&mp->m_perag[agno].pagb_lock);
}
}
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index e704caee10d..599bffa3978 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -37,6 +37,15 @@ typedef enum xfs_alloctype
XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */
} xfs_alloctype_t;
+#define XFS_ALLOC_TYPES \
+ { XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \
+ { XFS_ALLOCTYPE_FIRST_AG, "FIRST_AG" }, \
+ { XFS_ALLOCTYPE_START_AG, "START_AG" }, \
+ { XFS_ALLOCTYPE_THIS_AG, "THIS_AG" }, \
+ { XFS_ALLOCTYPE_START_BNO, "START_BNO" }, \
+ { XFS_ALLOCTYPE_NEAR_BNO, "NEAR_BNO" }, \
+ { XFS_ALLOCTYPE_THIS_BNO, "THIS_BNO" }
+
/*
* Flags for xfs_alloc_fix_freelist.
*/
@@ -109,24 +118,6 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp,
#ifdef __KERNEL__
-#if defined(XFS_ALLOC_TRACE)
-/*
- * Allocation tracing buffer size.
- */
-#define XFS_ALLOC_TRACE_SIZE 4096
-extern ktrace_t *xfs_alloc_trace_buf;
-
-/*
- * Types for alloc tracing.
- */
-#define XFS_ALLOC_KTRACE_ALLOC 1
-#define XFS_ALLOC_KTRACE_FREE 2
-#define XFS_ALLOC_KTRACE_MODAGF 3
-#define XFS_ALLOC_KTRACE_BUSY 4
-#define XFS_ALLOC_KTRACE_UNBUSY 5
-#define XFS_ALLOC_KTRACE_BUSYSEARCH 6
-#endif
-
void
xfs_alloc_mark_busy(xfs_trans_t *tp,
xfs_agnumber_t agno,
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index c10c3a292d3..adbd9141aea 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -39,6 +39,7 @@
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
STATIC struct xfs_btree_cur *
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 4ece1906bd4..e953b6cfb2a 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -47,6 +47,7 @@
#include "xfs_trans_space.h"
#include "xfs_rw.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
/*
* xfs_attr.c
@@ -89,10 +90,6 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
-#if defined(XFS_ATTR_TRACE)
-ktrace_t *xfs_attr_trace_buf;
-#endif
-
STATIC int
xfs_attr_name_to_xname(
struct xfs_name *xname,
@@ -123,9 +120,13 @@ xfs_inode_hasattr(
* Overall external interface routines.
*========================================================================*/
-int
-xfs_attr_fetch(xfs_inode_t *ip, struct xfs_name *name,
- char *value, int *valuelenp, int flags)
+STATIC int
+xfs_attr_get_int(
+ struct xfs_inode *ip,
+ struct xfs_name *name,
+ char *value,
+ int *valuelenp,
+ int flags)
{
xfs_da_args_t args;
int error;
@@ -188,7 +189,7 @@ xfs_attr_get(
return error;
xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_attr_fetch(ip, &xname, value, valuelenp, flags);
+ error = xfs_attr_get_int(ip, &xname, value, valuelenp, flags);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return(error);
}
@@ -636,7 +637,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
return EIO;
xfs_ilock(dp, XFS_ILOCK_SHARED);
- xfs_attr_trace_l_c("syscall start", context);
/*
* Decide on what work routines to call based on the inode size.
@@ -652,7 +652,6 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
}
xfs_iunlock(dp, XFS_ILOCK_SHARED);
- xfs_attr_trace_l_c("syscall end", context);
return error;
}
@@ -698,7 +697,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
context->count * sizeof(alist->al_offset[0]);
context->firstu -= ATTR_ENTSIZE(namelen);
if (context->firstu < arraytop) {
- xfs_attr_trace_l_c("buffer full", context);
+ trace_xfs_attr_list_full(context);
alist->al_more = 1;
context->seen_enough = 1;
return 1;
@@ -710,7 +709,7 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
aep->a_name[namelen] = 0;
alist->al_offset[context->count++] = context->firstu;
alist->al_count = context->count;
- xfs_attr_trace_l_c("add", context);
+ trace_xfs_attr_list_add(context);
return 0;
}
@@ -1849,7 +1848,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
node = bp->data;
switch (be16_to_cpu(node->hdr.info.magic)) {
case XFS_DA_NODE_MAGIC:
- xfs_attr_trace_l_cn("wrong blk", context, node);
+ trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
bp = NULL;
break;
@@ -1857,20 +1856,18 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
leaf = bp->data;
if (cursor->hashval > be32_to_cpu(leaf->entries[
be16_to_cpu(leaf->hdr.count)-1].hashval)) {
- xfs_attr_trace_l_cl("wrong blk",
- context, leaf);
+ trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
bp = NULL;
} else if (cursor->hashval <=
be32_to_cpu(leaf->entries[0].hashval)) {
- xfs_attr_trace_l_cl("maybe wrong blk",
- context, leaf);
+ trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
bp = NULL;
}
break;
default:
- xfs_attr_trace_l_c("wrong blk - ??", context);
+ trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
bp = NULL;
}
@@ -1915,8 +1912,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
if (cursor->hashval
<= be32_to_cpu(btree->hashval)) {
cursor->blkno = be32_to_cpu(btree->before);
- xfs_attr_trace_l_cb("descending",
- context, btree);
+ trace_xfs_attr_list_node_descend(context,
+ btree);
break;
}
}
@@ -2143,8 +2140,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
- bp = xfs_buf_get_flags(mp->m_ddev_targp, dblkno, blkcnt,
- XFS_BUF_LOCK | XBF_DONT_BLOCK);
+ bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
+ XFS_BUF_LOCK | XBF_DONT_BLOCK);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
@@ -2266,85 +2263,3 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
}
return(0);
}
-
-#if defined(XFS_ATTR_TRACE)
-/*
- * Add a trace buffer entry for an attr_list context structure.
- */
-void
-xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, context,
- (__psunsigned_t)NULL,
- (__psunsigned_t)NULL,
- (__psunsigned_t)NULL);
-}
-
-/*
- * Add a trace buffer entry for a context structure and a Btree node.
- */
-void
-xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_intnode *node)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, context,
- (__psunsigned_t)be16_to_cpu(node->hdr.count),
- (__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
- (__psunsigned_t)be32_to_cpu(node->btree[
- be16_to_cpu(node->hdr.count)-1].hashval));
-}
-
-/*
- * Add a trace buffer entry for a context structure and a Btree element.
- */
-void
-xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_node_entry *btree)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, context,
- (__psunsigned_t)be32_to_cpu(btree->hashval),
- (__psunsigned_t)be32_to_cpu(btree->before),
- (__psunsigned_t)NULL);
-}
-
-/*
- * Add a trace buffer entry for a context structure and a leaf block.
- */
-void
-xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
- struct xfs_attr_leafblock *leaf)
-{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, context,
- (__psunsigned_t)be16_to_cpu(leaf->hdr.count),
- (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
- (__psunsigned_t)be32_to_cpu(leaf->entries[
- be16_to_cpu(leaf->hdr.count)-1].hashval));
-}
-
-/*
- * Add a trace buffer entry for the arguments given to the routine,
- * generic form.
- */
-void
-xfs_attr_trace_enter(int type, char *where,
- struct xfs_attr_list_context *context,
- __psunsigned_t a13, __psunsigned_t a14,
- __psunsigned_t a15)
-{
- ASSERT(xfs_attr_trace_buf);
- ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type),
- (void *)((__psunsigned_t)where),
- (void *)((__psunsigned_t)context->dp),
- (void *)((__psunsigned_t)context->cursor->hashval),
- (void *)((__psunsigned_t)context->cursor->blkno),
- (void *)((__psunsigned_t)context->cursor->offset),
- (void *)((__psunsigned_t)context->alist),
- (void *)((__psunsigned_t)context->bufsize),
- (void *)((__psunsigned_t)context->count),
- (void *)((__psunsigned_t)context->firstu),
- NULL,
- (void *)((__psunsigned_t)context->dupcnt),
- (void *)((__psunsigned_t)context->flags),
- (void *)a13, (void *)a14, (void *)a15);
-}
-#endif /* XFS_ATTR_TRACE */
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index fb3b2a68b9b..59b410ce69a 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -48,6 +48,16 @@ struct xfs_attr_list_context;
#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
+#define XFS_ATTR_FLAGS \
+ { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
+ { ATTR_ROOT, "ROOT" }, \
+ { ATTR_TRUST, "TRUST" }, \
+ { ATTR_SECURE, "SECURE" }, \
+ { ATTR_CREATE, "CREATE" }, \
+ { ATTR_REPLACE, "REPLACE" }, \
+ { ATTR_KERNOTIME, "KERNOTIME" }, \
+ { ATTR_KERNOVAL, "KERNOVAL" }
+
/*
* The maximum size (into the kernel or returned from the kernel) of an
* attribute value or the buffer used for an attr_list() call. Larger
@@ -131,7 +141,6 @@ typedef struct xfs_attr_list_context {
*/
int xfs_attr_calc_size(struct xfs_inode *, int, int, int *);
int xfs_attr_inactive(struct xfs_inode *dp);
-int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
int xfs_attr_list_int(struct xfs_attr_list_context *);
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index afdc8911637..baf41b5af75 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -42,6 +42,7 @@
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* xfs_attr_leaf.c
@@ -98,7 +99,7 @@ STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
* If namespace bits don't match return 0.
* If all match then return 1.
*/
-STATIC_INLINE int
+STATIC int
xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
{
return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
@@ -594,7 +595,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
cursor = context->cursor;
ASSERT(cursor != NULL);
- xfs_attr_trace_l_c("sf start", context);
+ trace_xfs_attr_list_sf(context);
/*
* If the buffer is large enough and the cursor is at the start,
@@ -627,7 +628,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
return error;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
- xfs_attr_trace_l_c("sf big-gulp", context);
+ trace_xfs_attr_list_sf_all(context);
return(0);
}
@@ -653,7 +654,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, sfe);
- xfs_attr_trace_l_c("sf corrupted", context);
kmem_free(sbuf);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -693,7 +693,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
}
if (i == nsbuf) {
kmem_free(sbuf);
- xfs_attr_trace_l_c("blk end", context);
return(0);
}
@@ -719,7 +718,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
}
kmem_free(sbuf);
- xfs_attr_trace_l_c("sf E-O-F", context);
return(0);
}
@@ -2323,7 +2321,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
cursor = context->cursor;
cursor->initted = 1;
- xfs_attr_trace_l_cl("blk start", context, leaf);
+ trace_xfs_attr_list_leaf(context);
/*
* Re-find our place in the leaf block if this is a new syscall.
@@ -2344,7 +2342,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
}
}
if (i == be16_to_cpu(leaf->hdr.count)) {
- xfs_attr_trace_l_c("not found", context);
+ trace_xfs_attr_list_notfound(context);
return(0);
}
} else {
@@ -2419,7 +2417,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
break;
cursor->offset++;
}
- xfs_attr_trace_l_cl("blk end", context, leaf);
+ trace_xfs_attr_list_leaf_end(context);
return(retval);
}
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index ea22839caed..76ab7b0cbb3 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -25,8 +25,6 @@
* to fit into the literal area of the inode.
*/
-struct xfs_inode;
-
/*
* Entries are packed toward the top as tight as possible.
*/
@@ -69,42 +67,4 @@ typedef struct xfs_attr_sf_sort {
(be16_to_cpu(((xfs_attr_shortform_t *) \
((dp)->i_afp->if_u1.if_data))->hdr.totsize))
-#if defined(XFS_ATTR_TRACE)
-/*
- * Kernel tracing support for attribute lists
- */
-struct xfs_attr_list_context;
-struct xfs_da_intnode;
-struct xfs_da_node_entry;
-struct xfs_attr_leafblock;
-
-#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */
-extern ktrace_t *xfs_attr_trace_buf;
-
-/*
- * Trace record types.
- */
-#define XFS_ATTR_KTRACE_L_C 1 /* context */
-#define XFS_ATTR_KTRACE_L_CN 2 /* context, node */
-#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */
-#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */
-
-void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context);
-void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_intnode *node);
-void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
- struct xfs_da_node_entry *btree);
-void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
- struct xfs_attr_leafblock *leaf);
-void xfs_attr_trace_enter(int type, char *where,
- struct xfs_attr_list_context *context,
- __psunsigned_t a13, __psunsigned_t a14,
- __psunsigned_t a15);
-#else
-#define xfs_attr_trace_l_c(w,c)
-#define xfs_attr_trace_l_cn(w,c,n)
-#define xfs_attr_trace_l_cb(w,c,b)
-#define xfs_attr_trace_l_cl(w,c,l)
-#endif /* XFS_ATTR_TRACE */
-
#endif /* __XFS_ATTR_SF_H__ */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 8971fb09d38..98251cdc52a 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -54,6 +54,7 @@
#include "xfs_buf_item.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
#ifdef DEBUG
@@ -272,71 +273,6 @@ xfs_bmap_isaeof(
int whichfork, /* data or attribute fork */
char *aeof); /* return value */
-#ifdef XFS_BMAP_TRACE
-/*
- * Add bmap trace entry prior to a call to xfs_iext_remove.
- */
-STATIC void
-xfs_bmap_trace_delete(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) deleted */
- xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
- int whichfork); /* data or attr fork */
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_insert, or
- * reading in the extents list from the disk (in the btree).
- */
-STATIC void
-xfs_bmap_trace_insert(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) inserted */
- xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
- xfs_bmbt_irec_t *r1, /* inserted record 1 */
- xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
- int whichfork); /* data or attr fork */
-
-/*
- * Add bmap trace entry after updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_post_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry updated */
- int whichfork); /* data or attr fork */
-
-/*
- * Add bmap trace entry prior to updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_pre_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry to be updated */
- int whichfork); /* data or attr fork */
-
-#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \
- xfs_bmap_trace_delete(__func__,d,ip,i,c,w)
-#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \
- xfs_bmap_trace_insert(__func__,d,ip,i,c,r1,r2,w)
-#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \
- xfs_bmap_trace_post_update(__func__,d,ip,i,w)
-#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \
- xfs_bmap_trace_pre_update(__func__,d,ip,i,w)
-#else
-#define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w)
-#define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w)
-#define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w)
-#define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w)
-#endif /* XFS_BMAP_TRACE */
-
/*
* Compute the worst-case number of indirect blocks that will be used
* for ip's delayed extent of length "len".
@@ -363,18 +299,6 @@ xfs_bmap_validate_ret(
#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
#endif /* DEBUG */
-#if defined(XFS_RW_TRACE)
-STATIC void
-xfs_bunmap_trace(
- xfs_inode_t *ip,
- xfs_fileoff_t bno,
- xfs_filblks_t len,
- int flags,
- inst_t *ra);
-#else
-#define xfs_bunmap_trace(ip, bno, len, flags, ra)
-#endif /* XFS_RW_TRACE */
-
STATIC int
xfs_bmap_count_tree(
xfs_mount_t *mp,
@@ -590,9 +514,9 @@ xfs_bmap_add_extent(
* already extents in the list.
*/
if (nextents == 0) {
- XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL,
- whichfork);
- xfs_iext_insert(ifp, 0, 1, new);
+ xfs_iext_insert(ip, 0, 1, new,
+ whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
+
ASSERT(cur == NULL);
ifp->if_lastex = 0;
if (!isnullstartblock(new->br_startblock)) {
@@ -759,26 +683,10 @@ xfs_bmap_add_extent_delay_real(
xfs_filblks_t temp=0; /* value for dnew calculations */
xfs_filblks_t temp2=0;/* value for dnew calculations */
int tmp_rval; /* partial logging flags */
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_FILLING, RIGHT_FILLING,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
#define LEFT r[0]
#define RIGHT r[1]
#define PREV r[2]
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
-#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE \
- (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
/*
* Set up a bunch of variables to make the tests simpler.
@@ -790,69 +698,80 @@ xfs_bmap_add_extent_delay_real(
new_endoff = new->br_startoff + new->br_blockcount;
ASSERT(PREV.br_startoff <= new->br_startoff);
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+
/*
* Set flags determining what part of the previous delayed allocation
* extent is being replaced by a real allocation.
*/
- STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
- STATE_SET(RIGHT_FILLING,
- PREV.br_startoff + PREV.br_blockcount == new_endoff);
+ if (PREV.br_startoff == new->br_startoff)
+ state |= BMAP_LEFT_FILLING;
+ if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
+ state |= BMAP_RIGHT_FILLING;
+
/*
* Check and set flags if this segment has a left neighbor.
* Don't set contiguous if the combined extent would be too large.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+ if (idx > 0) {
+ state |= BMAP_LEFT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
- STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock));
+
+ if (isnullstartblock(LEFT.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
- LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
- LEFT.br_state == new->br_state &&
- LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+ LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+ LEFT.br_state == new->br_state &&
+ LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
/*
* Check and set flags if this segment has a right neighbor.
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
+ if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+ state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
- STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock));
+
+ if (isnullstartblock(RIGHT.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new_endoff == RIGHT.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- RIGHT.br_startblock &&
- new->br_state == RIGHT.br_state &&
- new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
- ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
- MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
- LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
- <= MAXEXTLEN));
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new_endoff == RIGHT.br_startoff &&
+ new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
+ new->br_state == RIGHT.br_state &&
+ new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+ ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING)) !=
+ (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING) ||
+ LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+ <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
+
error = 0;
/*
* Switch out based on the FILLING and CONTIG state bits.
*/
- switch (SWITCH_STATE) {
-
- case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The left and right neighbors are both contiguous with new.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + PREV.br_blockcount +
RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 2);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, idx, 2, state);
ip->i_df.if_lastex = idx - 1;
ip->i_d.di_nextents--;
if (cur == NULL)
@@ -885,20 +804,18 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The left neighbor is contiguous, the right is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + PREV.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx - 1;
- XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
+ xfs_iext_remove(ip, idx, 1, state);
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
@@ -921,19 +838,19 @@ xfs_bmap_add_extent_delay_real(
PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
* The right neighbor is contiguous, the left is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep, new->br_startblock);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount + RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx;
- XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx + 1, 1);
+ xfs_iext_remove(ip, idx + 1, 1, state);
if (cur == NULL)
rval = XFS_ILOG_DEXT;
else {
@@ -956,15 +873,16 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount;
break;
- case MASK2(LEFT_FILLING, RIGHT_FILLING):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Filling in all of a previously delayed allocation extent.
* Neither the left nor right neighbors are contiguous with
* the new one.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep, new->br_startblock);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -987,19 +905,20 @@ xfs_bmap_add_extent_delay_real(
temp2 = new->br_blockcount;
break;
- case MASK2(LEFT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
/*
* Filling in the first part of a previous delayed allocation.
* The left neighbor is contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + new->br_blockcount);
xfs_bmbt_set_startoff(ep,
PREV.br_startoff + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
ip->i_df.if_lastex = idx - 1;
if (cur == NULL)
@@ -1021,7 +940,7 @@ xfs_bmap_add_extent_delay_real(
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
startblockval(PREV.br_startblock));
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: The boundary between two in-core extents moved. */
temp = LEFT.br_startoff;
@@ -1029,18 +948,16 @@ xfs_bmap_add_extent_delay_real(
PREV.br_blockcount;
break;
- case MASK(LEFT_FILLING):
+ case BMAP_LEFT_FILLING:
/*
* Filling in the first part of a previous delayed allocation.
* The left neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startoff(ep, new_endoff);
temp = PREV.br_blockcount - new->br_blockcount;
xfs_bmbt_set_blockcount(ep, temp);
- XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
+ xfs_iext_insert(ip, idx, 1, new, state);
ip->i_df.if_lastex = idx;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1071,27 +988,27 @@ xfs_bmap_add_extent_delay_real(
(cur ? cur->bc_private.b.allocated : 0));
ep = xfs_iext_get_ext(ifp, idx + 1);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
*dnew = temp;
/* DELTA: One in-core extent is split in two. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
- case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in the last part of a previous delayed allocation.
* The right neighbor is contiguous with the new allocation.
*/
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+ trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
new->br_startoff, new->br_startblock,
new->br_blockcount + RIGHT.br_blockcount,
RIGHT.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
ip->i_df.if_lastex = idx + 1;
if (cur == NULL)
rval = XFS_ILOG_DEXT;
@@ -1112,7 +1029,7 @@ xfs_bmap_add_extent_delay_real(
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
startblockval(PREV.br_startblock));
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: The boundary between two in-core extents moved. */
temp = PREV.br_startoff;
@@ -1120,17 +1037,15 @@ xfs_bmap_add_extent_delay_real(
RIGHT.br_blockcount;
break;
- case MASK(RIGHT_FILLING):
+ case BMAP_RIGHT_FILLING:
/*
* Filling in the last part of a previous delayed allocation.
* The right neighbor is not contiguous.
*/
temp = PREV.br_blockcount - new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
- XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 1, new);
+ xfs_iext_insert(ip, idx + 1, 1, new, state);
ip->i_df.if_lastex = idx + 1;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1161,7 +1076,7 @@ xfs_bmap_add_extent_delay_real(
(cur ? cur->bc_private.b.allocated : 0));
ep = xfs_iext_get_ext(ifp, idx);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
/* DELTA: One in-core extent is split in two. */
temp = PREV.br_startoff;
@@ -1175,7 +1090,7 @@ xfs_bmap_add_extent_delay_real(
* This case is avoided almost all the time.
*/
temp = new->br_startoff - PREV.br_startoff;
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
r[0] = *new;
r[1].br_state = PREV.br_state;
@@ -1183,9 +1098,7 @@ xfs_bmap_add_extent_delay_real(
r[1].br_startoff = new_endoff;
temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
r[1].br_blockcount = temp2;
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
+ xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
ip->i_df.if_lastex = idx + 1;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1242,24 +1155,24 @@ xfs_bmap_add_extent_delay_real(
}
ep = xfs_iext_get_ext(ifp, idx);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+ trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
nullstartblock((int)temp2));
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
*dnew = temp + temp2;
/* DELTA: One in-core extent is split in three. */
temp = PREV.br_startoff;
temp2 = PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK2(LEFT_FILLING, RIGHT_CONTIG):
- case MASK2(RIGHT_FILLING, LEFT_CONTIG):
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
- case MASK(LEFT_CONTIG):
- case MASK(RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_CONTIG:
+ case BMAP_RIGHT_CONTIG:
/*
* These cases are all impossible.
*/
@@ -1279,14 +1192,6 @@ done:
#undef LEFT
#undef RIGHT
#undef PREV
-#undef MASK
-#undef MASK2
-#undef MASK3
-#undef MASK4
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
@@ -1316,27 +1221,10 @@ xfs_bmap_add_extent_unwritten_real(
int state = 0;/* state bits, accessed thru macros */
xfs_filblks_t temp=0;
xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_FILLING, RIGHT_FILLING,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
#define LEFT r[0]
#define RIGHT r[1]
#define PREV r[2]
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define MASK3(a,b,c) (MASK2(a,b) | MASK(c))
-#define MASK4(a,b,c,d) (MASK3(a,b,c) | MASK(d))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE \
- (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
-
/*
* Set up a bunch of variables to make the tests simpler.
*/
@@ -1352,68 +1240,78 @@ xfs_bmap_add_extent_unwritten_real(
new_endoff = new->br_startoff + new->br_blockcount;
ASSERT(PREV.br_startoff <= new->br_startoff);
ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
+
/*
* Set flags determining what part of the previous oldext allocation
* extent is being replaced by a newext allocation.
*/
- STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
- STATE_SET(RIGHT_FILLING,
- PREV.br_startoff + PREV.br_blockcount == new_endoff);
+ if (PREV.br_startoff == new->br_startoff)
+ state |= BMAP_LEFT_FILLING;
+ if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
+ state |= BMAP_RIGHT_FILLING;
+
/*
* Check and set flags if this segment has a left neighbor.
* Don't set contiguous if the combined extent would be too large.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+ if (idx > 0) {
+ state |= BMAP_LEFT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
- STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock));
+
+ if (isnullstartblock(LEFT.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
- LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
- LEFT.br_state == newext &&
- LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
+
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
+ LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
+ LEFT.br_state == newext &&
+ LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
/*
* Check and set flags if this segment has a right neighbor.
* Don't set contiguous if the combined extent would be too large.
* Also check for all-three-contiguous being too large.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
+ if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+ state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
- STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock));
+ if (isnullstartblock(RIGHT.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new_endoff == RIGHT.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- RIGHT.br_startblock &&
- newext == RIGHT.br_state &&
- new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
- ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
- MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
- LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
- <= MAXEXTLEN));
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new_endoff == RIGHT.br_startoff &&
+ new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
+ newext == RIGHT.br_state &&
+ new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
+ ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING)) !=
+ (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
+ BMAP_RIGHT_FILLING) ||
+ LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
+ <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
+
/*
* Switch out based on the FILLING and CONTIG state bits.
*/
- switch (SWITCH_STATE) {
-
- case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
+ BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The left and right neighbors are both contiguous with new.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + PREV.br_blockcount +
RIGHT.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 2);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, idx, 2, state);
ip->i_df.if_lastex = idx - 1;
ip->i_d.di_nextents -= 2;
if (cur == NULL)
@@ -1450,20 +1348,18 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The left neighbor is contiguous, the right is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + PREV.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx - 1;
- XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
+ xfs_iext_remove(ip, idx, 1, state);
ip->i_d.di_nextents--;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1492,21 +1388,18 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting all of a previous oldext extent to newext.
* The right neighbor is contiguous, the left is not.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount + RIGHT.br_blockcount);
xfs_bmbt_set_state(ep, newext);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
ip->i_df.if_lastex = idx;
- XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx + 1, 1);
+ xfs_iext_remove(ip, idx + 1, 1, state);
ip->i_d.di_nextents--;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1535,17 +1428,16 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount;
break;
- case MASK2(LEFT_FILLING, RIGHT_FILLING):
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
/*
* Setting all of a previous oldext extent to newext.
* Neither the left nor right neighbors are contiguous with
* the new one.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_state(ep, newext);
- XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx;
if (cur == NULL)
rval = XFS_ILOG_DEXT;
@@ -1566,27 +1458,25 @@ xfs_bmap_add_extent_unwritten_real(
temp2 = new->br_blockcount;
break;
- case MASK2(LEFT_FILLING, LEFT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
/*
* Setting the first part of a previous oldext extent to newext.
* The left neighbor is contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
LEFT.br_blockcount + new->br_blockcount);
xfs_bmbt_set_startoff(ep,
PREV.br_startoff + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startblock(ep,
new->br_startblock + new->br_blockcount);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx - 1;
if (cur == NULL)
rval = XFS_ILOG_DEXT;
@@ -1617,22 +1507,21 @@ xfs_bmap_add_extent_unwritten_real(
PREV.br_blockcount;
break;
- case MASK(LEFT_FILLING):
+ case BMAP_LEFT_FILLING:
/*
* Setting the first part of a previous oldext extent to newext.
* The left neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
xfs_bmbt_set_startoff(ep, new_endoff);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
xfs_bmbt_set_startblock(ep,
new->br_startblock + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
+ xfs_iext_insert(ip, idx, 1, new, state);
ip->i_df.if_lastex = idx;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1660,24 +1549,21 @@ xfs_bmap_add_extent_unwritten_real(
temp2 = PREV.br_blockcount;
break;
- case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
+ case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Setting the last part of a previous oldext extent to newext.
* The right neighbor is contiguous with the new allocation.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+ trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
new->br_startoff, new->br_startblock,
new->br_blockcount + RIGHT.br_blockcount, newext);
- XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx + 1;
if (cur == NULL)
rval = XFS_ILOG_DEXT;
@@ -1707,18 +1593,17 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount;
break;
- case MASK(RIGHT_FILLING):
+ case BMAP_RIGHT_FILLING:
/*
* Setting the last part of a previous oldext extent to newext.
* The right neighbor is not contiguous.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
PREV.br_blockcount - new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
- XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 1, new);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
+ xfs_iext_insert(ip, idx + 1, 1, new, state);
ip->i_df.if_lastex = idx + 1;
ip->i_d.di_nextents++;
if (cur == NULL)
@@ -1756,19 +1641,18 @@ xfs_bmap_add_extent_unwritten_real(
* newext. Contiguity is impossible here.
* One extent becomes three extents.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep,
new->br_startoff - PREV.br_startoff);
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
r[0] = *new;
r[1].br_startoff = new_endoff;
r[1].br_blockcount =
PREV.br_startoff + PREV.br_blockcount - new_endoff;
r[1].br_startblock = new->br_startblock + new->br_blockcount;
r[1].br_state = oldext;
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1],
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx + 1, 2, &r[0]);
+ xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
ip->i_df.if_lastex = idx + 1;
ip->i_d.di_nextents += 2;
if (cur == NULL)
@@ -1813,13 +1697,13 @@ xfs_bmap_add_extent_unwritten_real(
temp2 = PREV.br_blockcount;
break;
- case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
- case MASK2(LEFT_FILLING, RIGHT_CONTIG):
- case MASK2(RIGHT_FILLING, LEFT_CONTIG):
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
- case MASK(LEFT_CONTIG):
- case MASK(RIGHT_CONTIG):
+ case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
+ case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ case BMAP_LEFT_CONTIG:
+ case BMAP_RIGHT_CONTIG:
/*
* These cases are all impossible.
*/
@@ -1839,14 +1723,6 @@ done:
#undef LEFT
#undef RIGHT
#undef PREV
-#undef MASK
-#undef MASK2
-#undef MASK3
-#undef MASK4
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
@@ -1872,62 +1748,57 @@ xfs_bmap_add_extent_hole_delay(
int state; /* state bits, accessed thru macros */
xfs_filblks_t temp=0; /* temp for indirect calculations */
xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
-
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
ep = xfs_iext_get_ext(ifp, idx);
state = 0;
ASSERT(isnullstartblock(new->br_startblock));
+
/*
* Check and set flags if this segment has a left neighbor
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+ if (idx > 0) {
+ state |= BMAP_LEFT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
- STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock));
+
+ if (isnullstartblock(left.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
+
/*
* Check and set flags if the current (right) segment exists.
* If it doesn't exist, we're converting the hole at end-of-file.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
+ if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+ state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(ep, &right);
- STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock));
+
+ if (isnullstartblock(right.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
+
/*
* Set contiguity flags on the left and right neighbors.
* Don't let extents get too large, even if the pieces are contiguous.
*/
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) &&
- left.br_startoff + left.br_blockcount == new->br_startoff &&
- left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) &&
- new->br_startoff + new->br_blockcount == right.br_startoff &&
- new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
- (!STATE_TEST(LEFT_CONTIG) ||
- (left.br_blockcount + new->br_blockcount +
- right.br_blockcount <= MAXEXTLEN)));
+ if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
+ left.br_startoff + left.br_blockcount == new->br_startoff &&
+ left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
+ if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
+ new->br_startoff + new->br_blockcount == right.br_startoff &&
+ new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+ (!(state & BMAP_LEFT_CONTIG) ||
+ (left.br_blockcount + new->br_blockcount +
+ right.br_blockcount <= MAXEXTLEN)))
+ state |= BMAP_RIGHT_CONTIG;
+
/*
* Switch out based on the contiguity flags.
*/
- switch (SWITCH_STATE) {
-
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with delayed allocations
* on the left and on the right.
@@ -1935,8 +1806,8 @@ xfs_bmap_add_extent_hole_delay(
*/
temp = left.br_blockcount + new->br_blockcount +
right.br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
+
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
oldlen = startblockval(left.br_startblock) +
startblockval(new->br_startblock) +
@@ -1944,53 +1815,52 @@ xfs_bmap_add_extent_hole_delay(
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
nullstartblock((int)newlen));
- XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
- XFS_DATA_FORK);
- XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK);
- xfs_iext_remove(ifp, idx, 1);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, idx, 1, state);
ip->i_df.if_lastex = idx - 1;
/* DELTA: Two in-core extents were replaced by one. */
temp2 = temp;
temp = left.br_startoff;
break;
- case MASK(LEFT_CONTIG):
+ case BMAP_LEFT_CONTIG:
/*
* New allocation is contiguous with a delayed allocation
* on the left.
* Merge the new allocation with the left neighbor.
*/
temp = left.br_blockcount + new->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
oldlen = startblockval(left.br_startblock) +
startblockval(new->br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
nullstartblock((int)newlen));
- XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1,
- XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx - 1;
/* DELTA: One in-core extent grew into a hole. */
temp2 = temp;
temp = left.br_startoff;
break;
- case MASK(RIGHT_CONTIG):
+ case BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with a delayed allocation
* on the right.
* Merge the new allocation with the right neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
temp = new->br_blockcount + right.br_blockcount;
oldlen = startblockval(new->br_startblock) +
startblockval(right.br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_allf(ep, new->br_startoff,
nullstartblock((int)newlen), temp, right.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ip->i_df.if_lastex = idx;
/* DELTA: One in-core extent grew into a hole. */
temp2 = temp;
@@ -2004,9 +1874,7 @@ xfs_bmap_add_extent_hole_delay(
* Insert a new entry.
*/
oldlen = newlen = 0;
- XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL,
- XFS_DATA_FORK);
- xfs_iext_insert(ifp, idx, 1, new);
+ xfs_iext_insert(ip, idx, 1, new, state);
ip->i_df.if_lastex = idx;
/* DELTA: A new in-core extent was added in a hole. */
temp2 = new->br_blockcount;
@@ -2030,12 +1898,6 @@ xfs_bmap_add_extent_hole_delay(
}
*logflagsp = 0;
return 0;
-#undef MASK
-#undef MASK2
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
@@ -2062,83 +1924,75 @@ xfs_bmap_add_extent_hole_real(
int state; /* state bits, accessed thru macros */
xfs_filblks_t temp=0;
xfs_filblks_t temp2=0;
- enum { /* bit number definitions for state */
- LEFT_CONTIG, RIGHT_CONTIG,
- LEFT_DELAY, RIGHT_DELAY,
- LEFT_VALID, RIGHT_VALID
- };
-
-#define MASK(b) (1 << (b))
-#define MASK2(a,b) (MASK(a) | MASK(b))
-#define STATE_SET(b,v) ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
-#define STATE_TEST(b) (state & MASK(b))
-#define STATE_SET_TEST(b,v) ((v) ? ((state |= MASK(b)), 1) : \
- ((state &= ~MASK(b)), 0))
-#define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
ep = xfs_iext_get_ext(ifp, idx);
state = 0;
+
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
+
/*
* Check and set flags if this segment has a left neighbor.
*/
- if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
+ if (idx > 0) {
+ state |= BMAP_LEFT_VALID;
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
- STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock));
+ if (isnullstartblock(left.br_startblock))
+ state |= BMAP_LEFT_DELAY;
}
+
/*
* Check and set flags if this segment has a current value.
* Not true if we're inserting into the "hole" at eof.
*/
- if (STATE_SET_TEST(RIGHT_VALID,
- idx <
- ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
+ if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+ state |= BMAP_RIGHT_VALID;
xfs_bmbt_get_all(ep, &right);
- STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock));
+ if (isnullstartblock(right.br_startblock))
+ state |= BMAP_RIGHT_DELAY;
}
+
/*
* We're inserting a real allocation between "left" and "right".
* Set the contiguity flags. Don't let extents get too large.
*/
- STATE_SET(LEFT_CONTIG,
- STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
- left.br_startoff + left.br_blockcount == new->br_startoff &&
- left.br_startblock + left.br_blockcount == new->br_startblock &&
- left.br_state == new->br_state &&
- left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
- STATE_SET(RIGHT_CONTIG,
- STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
- new->br_startoff + new->br_blockcount == right.br_startoff &&
- new->br_startblock + new->br_blockcount ==
- right.br_startblock &&
- new->br_state == right.br_state &&
- new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
- (!STATE_TEST(LEFT_CONTIG) ||
- left.br_blockcount + new->br_blockcount +
- right.br_blockcount <= MAXEXTLEN));
+ if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
+ left.br_startoff + left.br_blockcount == new->br_startoff &&
+ left.br_startblock + left.br_blockcount == new->br_startblock &&
+ left.br_state == new->br_state &&
+ left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
+ state |= BMAP_LEFT_CONTIG;
+
+ if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
+ new->br_startoff + new->br_blockcount == right.br_startoff &&
+ new->br_startblock + new->br_blockcount == right.br_startblock &&
+ new->br_state == right.br_state &&
+ new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
+ (!(state & BMAP_LEFT_CONTIG) ||
+ left.br_blockcount + new->br_blockcount +
+ right.br_blockcount <= MAXEXTLEN))
+ state |= BMAP_RIGHT_CONTIG;
error = 0;
/*
* Select which case we're in here, and implement it.
*/
- switch (SWITCH_STATE) {
-
- case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with real allocations on the
* left and on the right.
* Merge all three into a single extent record.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
- whichfork);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
left.br_blockcount + new->br_blockcount +
right.br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
- whichfork);
- XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork);
- xfs_iext_remove(ifp, idx, 1);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
+ xfs_iext_remove(ip, idx, 1, state);
ifp->if_lastex = idx - 1;
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
@@ -2173,16 +2027,17 @@ xfs_bmap_add_extent_hole_real(
right.br_blockcount;
break;
- case MASK(LEFT_CONTIG):
+ case BMAP_LEFT_CONTIG:
/*
* New allocation is contiguous with a real allocation
* on the left.
* Merge the new allocation with the left neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
left.br_blockcount + new->br_blockcount);
- XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork);
+ trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+
ifp->if_lastex = idx - 1;
if (cur == NULL) {
rval = xfs_ilog_fext(whichfork);
@@ -2207,17 +2062,18 @@ xfs_bmap_add_extent_hole_real(
new->br_blockcount;
break;
- case MASK(RIGHT_CONTIG):
+ case BMAP_RIGHT_CONTIG:
/*
* New allocation is contiguous with a real allocation
* on the right.
* Merge the new allocation with the right neighbor.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
new->br_blockcount + right.br_blockcount,
right.br_state);
- XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+
ifp->if_lastex = idx;
if (cur == NULL) {
rval = xfs_ilog_fext(whichfork);
@@ -2248,8 +2104,7 @@ xfs_bmap_add_extent_hole_real(
* real allocation.
* Insert a new entry.
*/
- XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork);
- xfs_iext_insert(ifp, idx, 1, new);
+ xfs_iext_insert(ip, idx, 1, new, state);
ifp->if_lastex = idx;
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
@@ -2283,12 +2138,6 @@ xfs_bmap_add_extent_hole_real(
done:
*logflagsp = rval;
return error;
-#undef MASK
-#undef MASK2
-#undef STATE_SET
-#undef STATE_TEST
-#undef STATE_SET_TEST
-#undef SWITCH_STATE
}
/*
@@ -3115,8 +2964,13 @@ xfs_bmap_del_extent(
uint qfield; /* quota field to update */
xfs_filblks_t temp; /* for indirect length calculations */
xfs_filblks_t temp2; /* for indirect length calculations */
+ int state = 0;
XFS_STATS_INC(xs_del_exlist);
+
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
+
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT((idx >= 0) && (idx < ifp->if_bytes /
@@ -3196,8 +3050,8 @@ xfs_bmap_del_extent(
/*
* Matches the whole extent. Delete the entry.
*/
- XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork);
- xfs_iext_remove(ifp, idx, 1);
+ xfs_iext_remove(ip, idx, 1,
+ whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
ifp->if_lastex = idx;
if (delay)
break;
@@ -3217,7 +3071,7 @@ xfs_bmap_del_extent(
/*
* Deleting the first part of the extent.
*/
- XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_startoff(ep, del_endoff);
temp = got.br_blockcount - del->br_blockcount;
xfs_bmbt_set_blockcount(ep, temp);
@@ -3226,13 +3080,12 @@ xfs_bmap_del_extent(
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx,
- whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
da_new = temp;
break;
}
xfs_bmbt_set_startblock(ep, del_endblock);
- XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
if (!cur) {
flags |= xfs_ilog_fext(whichfork);
break;
@@ -3248,19 +3101,18 @@ xfs_bmap_del_extent(
* Deleting the last part of the extent.
*/
temp = got.br_blockcount - del->br_blockcount;
- XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
ifp->if_lastex = idx;
if (delay) {
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx,
- whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
da_new = temp;
break;
}
- XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
if (!cur) {
flags |= xfs_ilog_fext(whichfork);
break;
@@ -3277,7 +3129,7 @@ xfs_bmap_del_extent(
* Deleting the middle of the extent.
*/
temp = del->br_startoff - got.br_startoff;
- XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork);
+ trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
xfs_bmbt_set_blockcount(ep, temp);
new.br_startoff = del_endoff;
temp2 = got_endoff - del_endoff;
@@ -3364,10 +3216,8 @@ xfs_bmap_del_extent(
}
}
}
- XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork);
- XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL,
- whichfork);
- xfs_iext_insert(ifp, idx + 1, 1, &new);
+ trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+ xfs_iext_insert(ip, idx + 1, 1, &new, state);
ifp->if_lastex = idx + 1;
break;
}
@@ -3687,7 +3537,9 @@ xfs_bmap_local_to_extents(
xfs_iext_add(ifp, 0, 1);
ep = xfs_iext_get_ext(ifp, 0);
xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
- XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork);
+ trace_xfs_bmap_post_update(ip, 0,
+ whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
+ _THIS_IP_);
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
ip->i_d.di_nblocks = 1;
xfs_trans_mod_dquot_byino(tp, ip,
@@ -3800,158 +3652,6 @@ xfs_bmap_search_extents(
return ep;
}
-
-#ifdef XFS_BMAP_TRACE
-ktrace_t *xfs_bmap_trace_buf;
-
-/*
- * Add a bmap trace buffer entry. Base routine for the others.
- */
-STATIC void
-xfs_bmap_trace_addentry(
- int opcode, /* operation */
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(ies) */
- xfs_extnum_t cnt, /* count of entries, 1 or 2 */
- xfs_bmbt_rec_host_t *r1, /* first record */
- xfs_bmbt_rec_host_t *r2, /* second record or null */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t tr2;
-
- ASSERT(cnt == 1 || cnt == 2);
- ASSERT(r1 != NULL);
- if (cnt == 1) {
- ASSERT(r2 == NULL);
- r2 = &tr2;
- memset(&tr2, 0, sizeof(tr2));
- } else
- ASSERT(r2 != NULL);
- ktrace_enter(xfs_bmap_trace_buf,
- (void *)(__psint_t)(opcode | (whichfork << 16)),
- (void *)fname, (void *)desc, (void *)ip,
- (void *)(__psint_t)idx,
- (void *)(__psint_t)cnt,
- (void *)(__psunsigned_t)(ip->i_ino >> 32),
- (void *)(__psunsigned_t)(unsigned)ip->i_ino,
- (void *)(__psunsigned_t)(r1->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l0),
- (void *)(__psunsigned_t)(r1->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l1),
- (void *)(__psunsigned_t)(r2->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l0),
- (void *)(__psunsigned_t)(r2->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l1)
- );
- ASSERT(ip->i_xtrace);
- ktrace_enter(ip->i_xtrace,
- (void *)(__psint_t)(opcode | (whichfork << 16)),
- (void *)fname, (void *)desc, (void *)ip,
- (void *)(__psint_t)idx,
- (void *)(__psint_t)cnt,
- (void *)(__psunsigned_t)(ip->i_ino >> 32),
- (void *)(__psunsigned_t)(unsigned)ip->i_ino,
- (void *)(__psunsigned_t)(r1->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l0),
- (void *)(__psunsigned_t)(r1->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r1->l1),
- (void *)(__psunsigned_t)(r2->l0 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l0),
- (void *)(__psunsigned_t)(r2->l1 >> 32),
- (void *)(__psunsigned_t)(unsigned)(r2->l1)
- );
-}
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_remove.
- */
-STATIC void
-xfs_bmap_trace_delete(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) deleted */
- xfs_extnum_t cnt, /* count of entries deleted, 1 or 2 */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
- cnt, xfs_iext_get_ext(ifp, idx),
- cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL,
- whichfork);
-}
-
-/*
- * Add bmap trace entry prior to a call to xfs_iext_insert, or
- * reading in the extents list from the disk (in the btree).
- */
-STATIC void
-xfs_bmap_trace_insert(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry(entries) inserted */
- xfs_extnum_t cnt, /* count of entries inserted, 1 or 2 */
- xfs_bmbt_irec_t *r1, /* inserted record 1 */
- xfs_bmbt_irec_t *r2, /* inserted record 2 or null */
- int whichfork) /* data or attr fork */
-{
- xfs_bmbt_rec_host_t tr1; /* compressed record 1 */
- xfs_bmbt_rec_host_t tr2; /* compressed record 2 if needed */
-
- xfs_bmbt_set_all(&tr1, r1);
- if (cnt == 2) {
- ASSERT(r2 != NULL);
- xfs_bmbt_set_all(&tr2, r2);
- } else {
- ASSERT(cnt == 1);
- ASSERT(r2 == NULL);
- }
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
- cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
-}
-
-/*
- * Add bmap trace entry after updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_post_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry updated */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
- 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork);
-}
-
-/*
- * Add bmap trace entry prior to updating an extent record in place.
- */
-STATIC void
-xfs_bmap_trace_pre_update(
- const char *fname, /* function name */
- char *desc, /* operation description */
- xfs_inode_t *ip, /* incore inode pointer */
- xfs_extnum_t idx, /* index of entry to be updated */
- int whichfork) /* data or attr fork */
-{
- xfs_ifork_t *ifp; /* inode fork pointer */
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
- xfs_iext_get_ext(ifp, idx), NULL, whichfork);
-}
-#endif /* XFS_BMAP_TRACE */
-
/*
* Compute the worst-case number of indirect blocks that will be used
* for ip's delayed extent of length "len".
@@ -3983,37 +3683,6 @@ xfs_bmap_worst_indlen(
return rval;
}
-#if defined(XFS_RW_TRACE)
-STATIC void
-xfs_bunmap_trace(
- xfs_inode_t *ip,
- xfs_fileoff_t bno,
- xfs_filblks_t len,
- int flags,
- inst_t *ra)
-{
- if (ip->i_rwtrace == NULL)
- return;
- ktrace_enter(ip->i_rwtrace,
- (void *)(__psint_t)XFS_BUNMAP,
- (void *)ip,
- (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
- (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
- (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
- (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
- (void *)(__psint_t)len,
- (void *)(__psint_t)flags,
- (void *)(unsigned long)current_cpu(),
- (void *)ra,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0,
- (void *)0);
-}
-#endif
-
/*
* Convert inode from non-attributed to attributed.
* Must not be in a transaction, ip must not be locked.
@@ -4702,34 +4371,30 @@ error0:
return XFS_ERROR(EFSCORRUPTED);
}
-#ifdef XFS_BMAP_TRACE
+#ifdef DEBUG
/*
* Add bmap trace insert entries for all the contents of the extent records.
*/
void
xfs_bmap_trace_exlist(
- const char *fname, /* function name */
xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t cnt, /* count of entries in the list */
- int whichfork) /* data or attr fork */
+ int whichfork, /* data or attr fork */
+ unsigned long caller_ip)
{
- xfs_bmbt_rec_host_t *ep; /* current extent record */
xfs_extnum_t idx; /* extent record index */
xfs_ifork_t *ifp; /* inode fork pointer */
- xfs_bmbt_irec_t s; /* file extent record */
+ int state = 0;
+
+ if (whichfork == XFS_ATTR_FORK)
+ state |= BMAP_ATTRFORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
- for (idx = 0; idx < cnt; idx++) {
- ep = xfs_iext_get_ext(ifp, idx);
- xfs_bmbt_get_all(ep, &s);
- XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL,
- whichfork);
- }
+ for (idx = 0; idx < cnt; idx++)
+ trace_xfs_extlist(ip, idx, whichfork, caller_ip);
}
-#endif
-#ifdef DEBUG
/*
* Validate that the bmbt_irecs being returned from bmapi are valid
* given the callers original parameters. Specifically check the
@@ -5478,7 +5143,8 @@ xfs_bunmapi(
int rsvd; /* OK to allocate reserved blocks */
xfs_fsblock_t sum;
- xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address);
+ trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
+
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 56f62d2edc3..419dafb9d87 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -95,6 +95,21 @@ typedef struct xfs_bmap_free
/* need write cache flushing and no */
/* additional allocation alignments */
+#define XFS_BMAPI_FLAGS \
+ { XFS_BMAPI_WRITE, "WRITE" }, \
+ { XFS_BMAPI_DELAY, "DELAY" }, \
+ { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
+ { XFS_BMAPI_METADATA, "METADATA" }, \
+ { XFS_BMAPI_EXACT, "EXACT" }, \
+ { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
+ { XFS_BMAPI_ASYNC, "ASYNC" }, \
+ { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \
+ { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
+ { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
+ { XFS_BMAPI_CONTIG, "CONTIG" }, \
+ { XFS_BMAPI_CONVERT, "CONVERT" }
+
+
static inline int xfs_bmapi_aflag(int w)
{
return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
@@ -135,36 +150,43 @@ typedef struct xfs_bmalloca {
char conv; /* overwriting unwritten extents */
} xfs_bmalloca_t;
-#if defined(__KERNEL__) && defined(XFS_BMAP_TRACE)
/*
- * Trace operations for bmap extent tracing
+ * Flags for xfs_bmap_add_extent*.
*/
-#define XFS_BMAP_KTRACE_DELETE 1
-#define XFS_BMAP_KTRACE_INSERT 2
-#define XFS_BMAP_KTRACE_PRE_UP 3
-#define XFS_BMAP_KTRACE_POST_UP 4
-
-#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */
-#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */
-extern ktrace_t *xfs_bmap_trace_buf;
+#define BMAP_LEFT_CONTIG (1 << 0)
+#define BMAP_RIGHT_CONTIG (1 << 1)
+#define BMAP_LEFT_FILLING (1 << 2)
+#define BMAP_RIGHT_FILLING (1 << 3)
+#define BMAP_LEFT_DELAY (1 << 4)
+#define BMAP_RIGHT_DELAY (1 << 5)
+#define BMAP_LEFT_VALID (1 << 6)
+#define BMAP_RIGHT_VALID (1 << 7)
+#define BMAP_ATTRFORK (1 << 8)
+
+#define XFS_BMAP_EXT_FLAGS \
+ { BMAP_LEFT_CONTIG, "LC" }, \
+ { BMAP_RIGHT_CONTIG, "RC" }, \
+ { BMAP_LEFT_FILLING, "LF" }, \
+ { BMAP_RIGHT_FILLING, "RF" }, \
+ { BMAP_ATTRFORK, "ATTR" }
/*
* Add bmap trace insert entries for all the contents of the extent list.
+ *
+ * Quite excessive tracing. Only do this for debug builds.
*/
+#if defined(__KERNEL) && defined(DEBUG)
void
xfs_bmap_trace_exlist(
- const char *fname, /* function name */
struct xfs_inode *ip, /* incore inode pointer */
xfs_extnum_t cnt, /* count of entries in list */
- int whichfork); /* data or attr fork */
+ int whichfork,
+ unsigned long caller_ip); /* data or attr fork */
#define XFS_BMAP_TRACE_EXLIST(ip,c,w) \
- xfs_bmap_trace_exlist(__func__,ip,c,w)
-
-#else /* __KERNEL__ && XFS_BMAP_TRACE */
-
+ xfs_bmap_trace_exlist(ip,c,w, _THIS_IP_)
+#else
#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
-
-#endif /* __KERNEL__ && XFS_BMAP_TRACE */
+#endif
/*
* Convert inode from non-attributed to attributed.
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index eb7b702d069..38751d5fac6 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -98,8 +98,7 @@ xfs_bmdr_to_bmbt(
* This code must be in sync with the routines xfs_bmbt_get_startoff,
* xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
*/
-
-STATIC_INLINE void
+STATIC void
__xfs_bmbt_get_all(
__uint64_t l0,
__uint64_t l1,
@@ -769,12 +768,6 @@ xfs_bmbt_trace_enter(
(void *)a0, (void *)a1, (void *)a2, (void *)a3,
(void *)a4, (void *)a5, (void *)a6, (void *)a7,
(void *)a8, (void *)a9, (void *)a10);
- ktrace_enter(ip->i_btrace,
- (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
- (void *)func, (void *)s, (void *)ip, (void *)cur,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10);
}
STATIC void
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index 5549d495947..cf07ca7c22e 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -46,20 +46,12 @@ typedef struct xfs_bmdr_block {
#define BMBT_STARTBLOCK_BITLEN 52
#define BMBT_BLOCKCOUNT_BITLEN 21
-
-#define BMBT_USE_64 1
-
-typedef struct xfs_bmbt_rec_32
-{
- __uint32_t l0, l1, l2, l3;
-} xfs_bmbt_rec_32_t;
-typedef struct xfs_bmbt_rec_64
-{
+typedef struct xfs_bmbt_rec {
__be64 l0, l1;
-} xfs_bmbt_rec_64_t;
+} xfs_bmbt_rec_t;
typedef __uint64_t xfs_bmbt_rec_base_t; /* use this for casts */
-typedef xfs_bmbt_rec_64_t xfs_bmbt_rec_t, xfs_bmdr_rec_t;
+typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
typedef struct xfs_bmbt_rec_host {
__uint64_t l0, l1;
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 52b5f14d0c3..36a0992dd66 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -39,6 +39,7 @@
#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* Cursor allocation zone.
@@ -81,7 +82,7 @@ xfs_btree_check_lblock(
XFS_ERRTAG_BTREE_CHECK_LBLOCK,
XFS_RANDOM_BTREE_CHECK_LBLOCK))) {
if (bp)
- xfs_buftrace("LBTREE ERROR", bp);
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW,
mp);
return XFS_ERROR(EFSCORRUPTED);
@@ -119,7 +120,7 @@ xfs_btree_check_sblock(
XFS_ERRTAG_BTREE_CHECK_SBLOCK,
XFS_RANDOM_BTREE_CHECK_SBLOCK))) {
if (bp)
- xfs_buftrace("SBTREE ERROR", bp);
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_btree_check_sblock",
XFS_ERRLEVEL_LOW, cur->bc_mp, block);
return XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h
index b3f5eb3c3c6..2d8a309873e 100644
--- a/fs/xfs/xfs_btree_trace.h
+++ b/fs/xfs/xfs_btree_trace.h
@@ -58,8 +58,6 @@ void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *,
struct xfs_buf *, int, int);
void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *,
struct xfs_buf *, int, int, int);
-void xfs_btree_trace_argfffi(const char *, struct xfs_btree_cur *,
- xfs_dfiloff_t, xfs_dfsbno_t, xfs_dfilblks_t, int, int);
void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int);
void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int,
union xfs_btree_ptr, union xfs_btree_key *, int);
@@ -71,24 +69,10 @@ void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *,
union xfs_btree_rec *, int);
void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int);
-
-#define XFS_ALLOCBT_TRACE_SIZE 4096 /* size of global trace buffer */
-extern ktrace_t *xfs_allocbt_trace_buf;
-
-#define XFS_INOBT_TRACE_SIZE 4096 /* size of global trace buffer */
-extern ktrace_t *xfs_inobt_trace_buf;
-
-#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */
-#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */
-extern ktrace_t *xfs_bmbt_trace_buf;
-
-
#define XFS_BTREE_TRACE_ARGBI(c, b, i) \
xfs_btree_trace_argbi(__func__, c, b, i, __LINE__)
#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \
xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__)
-#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j) \
- xfs_btree_trace_argfffi(__func__, c, o, b, i, j, __LINE__)
#define XFS_BTREE_TRACE_ARGI(c, i) \
xfs_btree_trace_argi(__func__, c, i, __LINE__)
#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \
@@ -104,7 +88,6 @@ extern ktrace_t *xfs_bmbt_trace_buf;
#else
#define XFS_BTREE_TRACE_ARGBI(c, b, i)
#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
-#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j)
#define XFS_BTREE_TRACE_ARGI(c, i)
#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 92af4098c7e..a30f7e9eb2b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -29,6 +29,7 @@
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_buf_item_zone;
@@ -164,7 +165,7 @@ xfs_buf_item_size(
* is the buf log format structure with the
* cancel flag in it.
*/
- xfs_buf_item_trace("SIZE STALE", bip);
+ trace_xfs_buf_item_size_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
return 1;
}
@@ -206,7 +207,7 @@ xfs_buf_item_size(
}
}
- xfs_buf_item_trace("SIZE NORM", bip);
+ trace_xfs_buf_item_size(bip);
return nvecs;
}
@@ -259,7 +260,7 @@ xfs_buf_item_format(
* is the buf log format structure with the
* cancel flag in it.
*/
- xfs_buf_item_trace("FORMAT STALE", bip);
+ trace_xfs_buf_item_format_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
bip->bli_format.blf_size = nvecs;
return;
@@ -335,7 +336,7 @@ xfs_buf_item_format(
/*
* Check to make sure everything is consistent.
*/
- xfs_buf_item_trace("FORMAT NORM", bip);
+ trace_xfs_buf_item_format(bip);
xfs_buf_item_log_check(bip);
}
@@ -355,8 +356,7 @@ xfs_buf_item_pin(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("PIN", bip);
- xfs_buftrace("XFS_PIN", bp);
+ trace_xfs_buf_item_pin(bip);
xfs_bpin(bp);
}
@@ -383,8 +383,7 @@ xfs_buf_item_unpin(
ASSERT(bp != NULL);
ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- xfs_buf_item_trace("UNPIN", bip);
- xfs_buftrace("XFS_UNPIN", bp);
+ trace_xfs_buf_item_unpin(bip);
freed = atomic_dec_and_test(&bip->bli_refcount);
ailp = bip->bli_item.li_ailp;
@@ -395,8 +394,8 @@ xfs_buf_item_unpin(
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
- xfs_buf_item_trace("UNPIN STALE", bip);
- xfs_buftrace("XFS_UNPIN STALE", bp);
+ trace_xfs_buf_item_unpin_stale(bip);
+
/*
* If we get called here because of an IO error, we may
* or may not have the item on the AIL. xfs_trans_ail_delete()
@@ -440,8 +439,8 @@ xfs_buf_item_unpin_remove(
if ((atomic_read(&bip->bli_refcount) == 1) &&
(bip->bli_flags & XFS_BLI_STALE)) {
ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
- xfs_buf_item_trace("UNPIN REMOVE", bip);
- xfs_buftrace("XFS_UNPIN_REMOVE", bp);
+ trace_xfs_buf_item_unpin_stale(bip);
+
/*
* yes -- clear the xaction descriptor in-use flag
* and free the chunk if required. We can safely
@@ -495,7 +494,7 @@ xfs_buf_item_trylock(
XFS_BUF_HOLD(bp);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("TRYLOCK SUCCESS", bip);
+ trace_xfs_buf_item_trylock(bip);
return XFS_ITEM_SUCCESS;
}
@@ -524,7 +523,6 @@ xfs_buf_item_unlock(
uint hold;
bp = bip->bli_buf;
- xfs_buftrace("XFS_UNLOCK", bp);
/*
* Clear the buffer's association with this transaction.
@@ -547,7 +545,7 @@ xfs_buf_item_unlock(
*/
if (bip->bli_flags & XFS_BLI_STALE) {
bip->bli_flags &= ~XFS_BLI_LOGGED;
- xfs_buf_item_trace("UNLOCK STALE", bip);
+ trace_xfs_buf_item_unlock_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
if (!aborted)
return;
@@ -574,7 +572,7 @@ xfs_buf_item_unlock(
* release the buffer at the end of this routine.
*/
hold = bip->bli_flags & XFS_BLI_HOLD;
- xfs_buf_item_trace("UNLOCK", bip);
+ trace_xfs_buf_item_unlock(bip);
/*
* If the buf item isn't tracking any data, free it.
@@ -618,7 +616,8 @@ xfs_buf_item_committed(
xfs_buf_log_item_t *bip,
xfs_lsn_t lsn)
{
- xfs_buf_item_trace("COMMITTED", bip);
+ trace_xfs_buf_item_committed(bip);
+
if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
(bip->bli_item.li_lsn != 0)) {
return bip->bli_item.li_lsn;
@@ -640,7 +639,7 @@ xfs_buf_item_push(
xfs_buf_t *bp;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- xfs_buf_item_trace("PUSH", bip);
+ trace_xfs_buf_item_push(bip);
bp = bip->bli_buf;
@@ -738,9 +737,6 @@ xfs_buf_item_init(
bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
bip->bli_format.blf_map_size = map_size;
-#ifdef XFS_BLI_TRACE
- bip->bli_trace = ktrace_alloc(XFS_BLI_TRACE_SIZE, KM_NOFS);
-#endif
#ifdef XFS_TRANS_DEBUG
/*
@@ -878,9 +874,6 @@ xfs_buf_item_free(
kmem_free(bip->bli_logged);
#endif /* XFS_TRANS_DEBUG */
-#ifdef XFS_BLI_TRACE
- ktrace_free(bip->bli_trace);
-#endif
kmem_zone_free(xfs_buf_item_zone, bip);
}
@@ -897,7 +890,8 @@ xfs_buf_item_relse(
{
xfs_buf_log_item_t *bip;
- xfs_buftrace("XFS_RELSE", bp);
+ trace_xfs_buf_item_relse(bp, _RET_IP_);
+
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
@@ -994,7 +988,7 @@ xfs_buf_iodone_callbacks(
if (XFS_FORCED_SHUTDOWN(mp)) {
ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
XFS_BUF_SUPER_STALE(bp);
- xfs_buftrace("BUF_IODONE_CB", bp);
+ trace_xfs_buf_item_iodone(bp, _RET_IP_);
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1030,7 +1024,7 @@ xfs_buf_iodone_callbacks(
XFS_BUF_SET_START(bp);
}
ASSERT(XFS_BUF_IODONE_FUNC(bp));
- xfs_buftrace("BUF_IODONE ASYNC", bp);
+ trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
xfs_buf_relse(bp);
} else {
/*
@@ -1053,9 +1047,7 @@ xfs_buf_iodone_callbacks(
}
return;
}
-#ifdef XFSERRORDEBUG
- xfs_buftrace("XFS BUFCB NOERR", bp);
-#endif
+
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
@@ -1081,7 +1073,9 @@ xfs_buf_error_relse(
XFS_BUF_DONE(bp);
XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_ERROR(bp,0);
- xfs_buftrace("BUF_ERROR_RELSE", bp);
+
+ trace_xfs_buf_error_relse(bp, _RET_IP_);
+
if (! XFS_FORCED_SHUTDOWN(mp))
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
/*
@@ -1128,34 +1122,3 @@ xfs_buf_iodone(
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
xfs_buf_item_free(bip);
}
-
-#if defined(XFS_BLI_TRACE)
-void
-xfs_buf_item_trace(
- char *id,
- xfs_buf_log_item_t *bip)
-{
- xfs_buf_t *bp;
- ASSERT(bip->bli_trace != NULL);
-
- bp = bip->bli_buf;
- ktrace_enter(bip->bli_trace,
- (void *)id,
- (void *)bip->bli_buf,
- (void *)((unsigned long)bip->bli_flags),
- (void *)((unsigned long)bip->bli_recur),
- (void *)((unsigned long)atomic_read(&bip->bli_refcount)),
- (void *)((unsigned long)
- (0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)),
- (void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))),
- (void *)((unsigned long)XFS_BUF_COUNT(bp)),
- (void *)((unsigned long)XFS_BUF_BFLAGS(bp)),
- XFS_BUF_FSPRIVATE(bp, void *),
- XFS_BUF_FSPRIVATE2(bp, void *),
- (void *)(unsigned long)XFS_BUF_ISPINNED(bp),
- (void *)XFS_BUF_IODONE_FUNC(bp),
- (void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))),
- (void *)bip->bli_item.li_desc,
- (void *)((unsigned long)bip->bli_item.li_flags));
-}
-#endif /* XFS_BLI_TRACE */
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 5a41c348bb1..217f34af00c 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -70,22 +70,21 @@ typedef struct xfs_buf_log_format_t {
#define XFS_BLI_INODE_ALLOC_BUF 0x10
#define XFS_BLI_STALE_INODE 0x20
+#define XFS_BLI_FLAGS \
+ { XFS_BLI_HOLD, "HOLD" }, \
+ { XFS_BLI_DIRTY, "DIRTY" }, \
+ { XFS_BLI_STALE, "STALE" }, \
+ { XFS_BLI_LOGGED, "LOGGED" }, \
+ { XFS_BLI_INODE_ALLOC_BUF, "INODE_ALLOC" }, \
+ { XFS_BLI_STALE_INODE, "STALE_INODE" }
+
#ifdef __KERNEL__
struct xfs_buf;
-struct ktrace;
struct xfs_mount;
struct xfs_buf_log_item;
-#if defined(XFS_BLI_TRACE)
-#define XFS_BLI_TRACE_SIZE 32
-
-void xfs_buf_item_trace(char *, struct xfs_buf_log_item *);
-#else
-#define xfs_buf_item_trace(id, bip)
-#endif
-
/*
* This is the in core log item structure used to track information
* needed to log buffers. It tracks how many times the lock has been
@@ -97,9 +96,6 @@ typedef struct xfs_buf_log_item {
unsigned int bli_flags; /* misc flags */
unsigned int bli_recur; /* lock recursion count */
atomic_t bli_refcount; /* cnt of tp refs */
-#ifdef XFS_BLI_TRACE
- struct ktrace *bli_trace; /* event trace buf */
-#endif
#ifdef XFS_TRANS_DEBUG
char *bli_orig; /* original buffer copy */
char *bli_logged; /* bytes logged (bitmap) */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 2847bbc1c53..c0c8869115b 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -46,6 +46,7 @@
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* xfs_da_btree.c
@@ -2107,7 +2108,7 @@ xfs_da_do_buf(
(be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC),
mp, XFS_ERRTAG_DA_READ_BUF,
XFS_RANDOM_DA_READ_BUF))) {
- xfs_buftrace("DA READ ERROR", rbp->bps[0]);
+ trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
XFS_ERRLEVEL_LOW, mp, info);
error = XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 8c536167bf7..30cd08f56a3 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -125,6 +125,13 @@ typedef struct xfs_da_args {
#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
+#define XFS_DA_OP_FLAGS \
+ { XFS_DA_OP_JUSTCHECK, "JUSTCHECK" }, \
+ { XFS_DA_OP_RENAME, "RENAME" }, \
+ { XFS_DA_OP_ADDNAME, "ADDNAME" }, \
+ { XFS_DA_OP_OKNOENT, "OKNOENT" }, \
+ { XFS_DA_OP_CILOOKUP, "CILOOKUP" }
+
/*
* Structure to describe buffer(s) for a block.
* This is needed in the directory version 2 format case, when
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index ab89a7e94a0..d1483a4f71b 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -43,6 +43,7 @@
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
/*
* Syssgi interface for swapext
@@ -168,7 +169,6 @@ xfs_swap_extents(
}
if (VN_CACHED(VFS_I(tip)) != 0) {
- xfs_inval_cached_trace(tip, 0, -1, 0, -1);
error = xfs_flushinval_pages(tip, 0, -1,
FI_REMAPF_LOCKED);
if (error)
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index bb1d58eb398..93634a7e90e 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -40,9 +40,9 @@
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
-#include "xfs_dir2_trace.h"
#include "xfs_error.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
struct xfs_name xfs_name_dotdot = {"..", 2};
@@ -525,7 +525,8 @@ xfs_dir2_grow_inode(
xfs_trans_t *tp;
xfs_drfsbno_t nblks;
- xfs_dir2_trace_args_s("grow_inode", args, space);
+ trace_xfs_dir2_grow_inode(args, space);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -703,7 +704,8 @@ xfs_dir2_shrink_inode(
xfs_mount_t *mp;
xfs_trans_t *tp;
- xfs_dir2_trace_args_db("shrink_inode", args, db, bp);
+ trace_xfs_dir2_shrink_inode(args, db);
+
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index ab52e9e1c1e..ddc4ecc7807 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -36,8 +36,8 @@
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
-#include "xfs_dir2_trace.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* Local function prototypes.
@@ -94,7 +94,8 @@ xfs_dir2_block_addname(
__be16 *tagp; /* pointer to tag value */
xfs_trans_t *tp; /* transaction structure */
- xfs_dir2_trace_args("block_addname", args);
+ trace_xfs_dir2_block_addname(args);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -590,7 +591,8 @@ xfs_dir2_block_lookup(
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
- xfs_dir2_trace_args("block_lookup", args);
+ trace_xfs_dir2_block_lookup(args);
+
/*
* Get the buffer, look up the entry.
* If not found (ENOENT) then return, have no buffer.
@@ -747,7 +749,8 @@ xfs_dir2_block_removename(
int size; /* shortform size */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args("block_removename", args);
+ trace_xfs_dir2_block_removename(args);
+
/*
* Look up the entry in the block. Gets the buffer and entry index.
* It will always be there, the vnodeops level does a lookup first.
@@ -823,7 +826,8 @@ xfs_dir2_block_replace(
int error; /* error return value */
xfs_mount_t *mp; /* filesystem mount point */
- xfs_dir2_trace_args("block_replace", args);
+ trace_xfs_dir2_block_replace(args);
+
/*
* Lookup the entry in the directory. Get buffer and entry index.
* This will always succeed since the caller has already done a lookup.
@@ -897,7 +901,8 @@ xfs_dir2_leaf_to_block(
int to; /* block/leaf to index */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_bb("leaf_to_block", args, lbp, dbp);
+ trace_xfs_dir2_leaf_to_block(args);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -1044,7 +1049,8 @@ xfs_dir2_sf_to_block(
xfs_trans_t *tp; /* transaction pointer */
struct xfs_name name;
- xfs_dir2_trace_args("sf_to_block", args);
+ trace_xfs_dir2_sf_to_block(args);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 41ad537c49e..29f484c11b3 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -38,8 +38,8 @@
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
-#include "xfs_dir2_trace.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* Local function declarations.
@@ -80,7 +80,8 @@ xfs_dir2_block_to_leaf(
int needscan; /* need to rescan bestfree */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_b("block_to_leaf", args, dbp);
+ trace_xfs_dir2_block_to_leaf(args);
+
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
@@ -188,7 +189,8 @@ xfs_dir2_leaf_addname(
xfs_trans_t *tp; /* transaction pointer */
xfs_dir2_db_t use_block; /* data block number */
- xfs_dir2_trace_args("leaf_addname", args);
+ trace_xfs_dir2_leaf_addname(args);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -1266,7 +1268,8 @@ xfs_dir2_leaf_lookup(
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args("leaf_lookup", args);
+ trace_xfs_dir2_leaf_lookup(args);
+
/*
* Look up name in the leaf block, returning both buffers and index.
*/
@@ -1454,7 +1457,8 @@ xfs_dir2_leaf_removename(
xfs_dir2_data_off_t oldbest; /* old value of best free */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args("leaf_removename", args);
+ trace_xfs_dir2_leaf_removename(args);
+
/*
* Lookup the leaf entry, get the leaf and data blocks read in.
*/
@@ -1586,7 +1590,8 @@ xfs_dir2_leaf_replace(
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args("leaf_replace", args);
+ trace_xfs_dir2_leaf_replace(args);
+
/*
* Look up the entry.
*/
@@ -1766,7 +1771,9 @@ xfs_dir2_node_to_leaf(
if (state->path.active > 1)
return 0;
args = state->args;
- xfs_dir2_trace_args("node_to_leaf", args);
+
+ trace_xfs_dir2_node_to_leaf(args);
+
mp = state->mp;
dp = args->dp;
tp = args->trans;
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 5a81ccd1045..ce6e355199b 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -37,8 +37,8 @@
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
-#include "xfs_dir2_trace.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
/*
* Function declarations.
@@ -123,7 +123,8 @@ xfs_dir2_leaf_to_node(
__be16 *to; /* pointer to freespace entry */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_b("leaf_to_node", args, lbp);
+ trace_xfs_dir2_leaf_to_node(args);
+
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
@@ -196,7 +197,8 @@ xfs_dir2_leafn_add(
xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_sb("leafn_add", args, index, bp);
+ trace_xfs_dir2_leafn_add(args, index);
+
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
@@ -711,8 +713,8 @@ xfs_dir2_leafn_moveents(
int stale; /* count stale leaves copied */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_bibii("leafn_moveents", args, bp_s, start_s, bp_d,
- start_d, count);
+ trace_xfs_dir2_leafn_moveents(args, start_s, start_d, count);
+
/*
* Silently return if nothing to do.
*/
@@ -933,7 +935,8 @@ xfs_dir2_leafn_remove(
int needscan; /* need to rescan data frees */
xfs_trans_t *tp; /* transaction pointer */
- xfs_dir2_trace_args_sb("leafn_remove", args, index, bp);
+ trace_xfs_dir2_leafn_remove(args, index);
+
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
@@ -1363,7 +1366,8 @@ xfs_dir2_node_addname(
int rval; /* sub-return value */
xfs_da_state_t *state; /* btree cursor */
- xfs_dir2_trace_args("node_addname", args);
+ trace_xfs_dir2_node_addname(args);
+
/*
* Allocate and initialize the state (btree cursor).
*/
@@ -1822,7 +1826,8 @@ xfs_dir2_node_lookup(
int rval; /* operation return value */
xfs_da_state_t *state; /* btree cursor */
- xfs_dir2_trace_args("node_lookup", args);
+ trace_xfs_dir2_node_lookup(args);
+
/*
* Allocate and initialize the btree cursor.
*/
@@ -1875,7 +1880,8 @@ xfs_dir2_node_removename(
int rval; /* operation return value */
xfs_da_state_t *state; /* btree cursor */
- xfs_dir2_trace_args("node_removename", args);
+ trace_xfs_dir2_node_removename(args);
+
/*
* Allocate and initialize the btree cursor.
*/
@@ -1944,7 +1950,8 @@ xfs_dir2_node_replace(
int rval; /* internal return value */
xfs_da_state_t *state; /* btree cursor */
- xfs_dir2_trace_args("node_replace", args);
+ trace_xfs_dir2_node_replace(args);
+
/*
* Allocate and initialize the btree cursor.
*/
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index e89734e8464..9d4f17a6967 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -37,7 +37,7 @@
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
-#include "xfs_dir2_trace.h"
+#include "xfs_trace.h"
/*
* Prototypes for internal functions.
@@ -169,7 +169,8 @@ xfs_dir2_block_to_sf(
xfs_dir2_sf_t *sfp; /* shortform structure */
xfs_ino_t temp;
- xfs_dir2_trace_args_sb("block_to_sf", args, size, bp);
+ trace_xfs_dir2_block_to_sf(args);
+
dp = args->dp;
mp = dp->i_mount;
@@ -281,7 +282,8 @@ xfs_dir2_sf_addname(
xfs_dir2_sf_t *sfp; /* shortform structure */
xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */
- xfs_dir2_trace_args("sf_addname", args);
+ trace_xfs_dir2_sf_addname(args);
+
ASSERT(xfs_dir2_sf_lookup(args) == ENOENT);
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -654,7 +656,8 @@ xfs_dir2_sf_create(
xfs_dir2_sf_t *sfp; /* shortform structure */
int size; /* directory size */
- xfs_dir2_trace_args_i("sf_create", args, pino);
+ trace_xfs_dir2_sf_create(args);
+
dp = args->dp;
ASSERT(dp != NULL);
@@ -808,7 +811,8 @@ xfs_dir2_sf_lookup(
enum xfs_dacmp cmp; /* comparison result */
xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */
- xfs_dir2_trace_args("sf_lookup", args);
+ trace_xfs_dir2_sf_lookup(args);
+
xfs_dir2_sf_check(args);
dp = args->dp;
@@ -891,7 +895,8 @@ xfs_dir2_sf_removename(
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
- xfs_dir2_trace_args("sf_removename", args);
+ trace_xfs_dir2_sf_removename(args);
+
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -982,7 +987,8 @@ xfs_dir2_sf_replace(
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
- xfs_dir2_trace_args("sf_replace", args);
+ trace_xfs_dir2_sf_replace(args);
+
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
@@ -1125,7 +1131,8 @@ xfs_dir2_sf_toino4(
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
xfs_dir2_sf_t *sfp; /* new sf directory */
- xfs_dir2_trace_args("sf_toino4", args);
+ trace_xfs_dir2_sf_toino4(args);
+
dp = args->dp;
/*
@@ -1202,7 +1209,8 @@ xfs_dir2_sf_toino8(
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
xfs_dir2_sf_t *sfp; /* new sf directory */
- xfs_dir2_trace_args("sf_toino8", args);
+ trace_xfs_dir2_sf_toino8(args);
+
dp = args->dp;
/*
diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c
deleted file mode 100644
index 6cc7c0c681a..00000000000
--- a/fs/xfs/xfs_dir2_trace.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_inum.h"
-#include "xfs_dir2.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
-#include "xfs_dir2_trace.h"
-
-#ifdef XFS_DIR2_TRACE
-ktrace_t *xfs_dir2_trace_buf;
-
-/*
- * Enter something in the trace buffers.
- */
-static void
-xfs_dir2_trace_enter(
- xfs_inode_t *dp,
- int type,
- char *where,
- char *name,
- int namelen,
- void *a0,
- void *a1,
- void *a2,
- void *a3,
- void *a4,
- void *a5,
- void *a6,
- void *a7)
-{
- void *n[5];
-
- ASSERT(xfs_dir2_trace_buf);
- ASSERT(dp->i_dir_trace);
- if (name)
- memcpy(n, name, min((int)sizeof(n), namelen));
- else
- memset((char *)n, 0, sizeof(n));
- ktrace_enter(xfs_dir2_trace_buf,
- (void *)(long)type, (void *)where,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)(long)namelen,
- (void *)n[0], (void *)n[1], (void *)n[2],
- (void *)n[3], (void *)n[4]);
- ktrace_enter(dp->i_dir_trace,
- (void *)(long)type, (void *)where,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)(long)namelen,
- (void *)n[0], (void *)n[1], (void *)n[2],
- (void *)n[3], (void *)n[4]);
-}
-
-void
-xfs_dir2_trace_args(
- char *where,
- xfs_da_args_t *args)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- NULL, NULL);
-}
-
-void
-xfs_dir2_trace_args_b(
- char *where,
- xfs_da_args_t *args,
- xfs_dabuf_t *bp)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_B, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(bp ? bp->bps[0] : NULL), NULL);
-}
-
-void
-xfs_dir2_trace_args_bb(
- char *where,
- xfs_da_args_t *args,
- xfs_dabuf_t *lbp,
- xfs_dabuf_t *dbp)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BB, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(lbp ? lbp->bps[0] : NULL),
- (void *)(dbp ? dbp->bps[0] : NULL));
-}
-
-void
-xfs_dir2_trace_args_bibii(
- char *where,
- xfs_da_args_t *args,
- xfs_dabuf_t *bs,
- int ss,
- xfs_dabuf_t *bd,
- int sd,
- int c)
-{
- xfs_buf_t *bpbs = bs ? bs->bps[0] : NULL;
- xfs_buf_t *bpbd = bd ? bd->bps[0] : NULL;
-
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_BIBII, where,
- (char *)args->name, (int)args->namelen,
- (void *)args->dp, (void *)args->trans,
- (void *)bpbs, (void *)(long)ss, (void *)bpbd, (void *)(long)sd,
- (void *)(long)c, NULL);
-}
-
-void
-xfs_dir2_trace_args_db(
- char *where,
- xfs_da_args_t *args,
- xfs_dir2_db_t db,
- xfs_dabuf_t *bp)
-{
- xfs_buf_t *dbp = bp ? bp->bps[0] : NULL;
-
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_DB, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(long)db, (void *)dbp);
-}
-
-void
-xfs_dir2_trace_args_i(
- char *where,
- xfs_da_args_t *args,
- xfs_ino_t i)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_I, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)((unsigned long)(i >> 32)),
- (void *)((unsigned long)(i & 0xFFFFFFFF)));
-}
-
-void
-xfs_dir2_trace_args_s(
- char *where,
- xfs_da_args_t *args,
- int s)
-{
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_S, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(long)s, NULL);
-}
-
-void
-xfs_dir2_trace_args_sb(
- char *where,
- xfs_da_args_t *args,
- int s,
- xfs_dabuf_t *bp)
-{
- xfs_buf_t *dbp = bp ? bp->bps[0] : NULL;
-
- xfs_dir2_trace_enter(args->dp, XFS_DIR2_KTRACE_ARGS_SB, where,
- (char *)args->name, (int)args->namelen,
- (void *)(unsigned long)args->hashval,
- (void *)((unsigned long)(args->inumber >> 32)),
- (void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
- (void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
- (void *)(long)s, (void *)dbp);
-}
-#endif /* XFS_DIR2_TRACE */
diff --git a/fs/xfs/xfs_dir2_trace.h b/fs/xfs/xfs_dir2_trace.h
deleted file mode 100644
index ca3c754f482..00000000000
--- a/fs/xfs/xfs_dir2_trace.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2000,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR2_TRACE_H__
-#define __XFS_DIR2_TRACE_H__
-
-/*
- * Tracing for xfs v2 directories.
- */
-
-#if defined(XFS_DIR2_TRACE)
-
-struct ktrace;
-struct xfs_dabuf;
-struct xfs_da_args;
-
-#define XFS_DIR2_GTRACE_SIZE 4096 /* global buffer */
-#define XFS_DIR2_KTRACE_SIZE 32 /* per-inode buffer */
-extern struct ktrace *xfs_dir2_trace_buf;
-
-#define XFS_DIR2_KTRACE_ARGS 1 /* args only */
-#define XFS_DIR2_KTRACE_ARGS_B 2 /* args + buffer */
-#define XFS_DIR2_KTRACE_ARGS_BB 3 /* args + 2 buffers */
-#define XFS_DIR2_KTRACE_ARGS_DB 4 /* args, db, buffer */
-#define XFS_DIR2_KTRACE_ARGS_I 5 /* args, inum */
-#define XFS_DIR2_KTRACE_ARGS_S 6 /* args, int */
-#define XFS_DIR2_KTRACE_ARGS_SB 7 /* args, int, buffer */
-#define XFS_DIR2_KTRACE_ARGS_BIBII 8 /* args, buf/int/buf/int/int */
-
-void xfs_dir2_trace_args(char *where, struct xfs_da_args *args);
-void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args,
- struct xfs_dabuf *bp);
-void xfs_dir2_trace_args_bb(char *where, struct xfs_da_args *args,
- struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
-void xfs_dir2_trace_args_bibii(char *where, struct xfs_da_args *args,
- struct xfs_dabuf *bs, int ss,
- struct xfs_dabuf *bd, int sd, int c);
-void xfs_dir2_trace_args_db(char *where, struct xfs_da_args *args,
- xfs_dir2_db_t db, struct xfs_dabuf *bp);
-void xfs_dir2_trace_args_i(char *where, struct xfs_da_args *args, xfs_ino_t i);
-void xfs_dir2_trace_args_s(char *where, struct xfs_da_args *args, int s);
-void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s,
- struct xfs_dabuf *bp);
-
-#else /* XFS_DIR2_TRACE */
-
-#define xfs_dir2_trace_args(where, args)
-#define xfs_dir2_trace_args_b(where, args, bp)
-#define xfs_dir2_trace_args_bb(where, args, lbp, dbp)
-#define xfs_dir2_trace_args_bibii(where, args, bs, ss, bd, sd, c)
-#define xfs_dir2_trace_args_db(where, args, db, bp)
-#define xfs_dir2_trace_args_i(where, args, i)
-#define xfs_dir2_trace_args_s(where, args, s)
-#define xfs_dir2_trace_args_sb(where, args, s, bp)
-
-#endif /* XFS_DIR2_TRACE */
-
-#endif /* __XFS_DIR2_TRACE_H__ */
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index edf8bdf4141..a631e1451ab 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -34,6 +34,7 @@
#include "xfs_utils.h"
#include "xfs_mru_cache.h"
#include "xfs_filestream.h"
+#include "xfs_trace.h"
#ifdef XFS_FILESTREAMS_TRACE
@@ -394,9 +395,7 @@ xfs_filestream_init(void)
item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item");
if (!item_zone)
return -ENOMEM;
-#ifdef XFS_FILESTREAMS_TRACE
- xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_NOFS);
-#endif
+
return 0;
}
@@ -407,9 +406,6 @@ xfs_filestream_init(void)
void
xfs_filestream_uninit(void)
{
-#ifdef XFS_FILESTREAMS_TRACE
- ktrace_free(xfs_filestreams_trace_buf);
-#endif
kmem_zone_destroy(item_zone);
}
diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h
index f655f7dc334..4aba67c5f64 100644
--- a/fs/xfs/xfs_filestream.h
+++ b/fs/xfs/xfs_filestream.h
@@ -79,7 +79,7 @@ extern ktrace_t *xfs_filestreams_trace_buf;
* the cache that reference per-ag array elements that have since been
* reallocated.
*/
-STATIC_INLINE int
+static inline int
xfs_filestream_peek_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
@@ -87,7 +87,7 @@ xfs_filestream_peek_ag(
return atomic_read(&mp->m_perag[agno].pagf_fstrms);
}
-STATIC_INLINE int
+static inline int
xfs_filestream_get_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
@@ -95,7 +95,7 @@ xfs_filestream_get_ag(
return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms);
}
-STATIC_INLINE int
+static inline int
xfs_filestream_put_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
@@ -122,7 +122,7 @@ int xfs_filestream_new_ag(struct xfs_bmalloca *ap, xfs_agnumber_t *agp);
/* filestreams for the inode? */
-STATIC_INLINE int
+static inline int
xfs_inode_is_filestream(
struct xfs_inode *ip)
{
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 2d0b3e1da9e..a13919a6a36 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -45,6 +45,7 @@
#include "xfs_rtalloc.h"
#include "xfs_rw.h"
#include "xfs_filestream.h"
+#include "xfs_trace.h"
/*
* File system operations
@@ -201,8 +202,8 @@ xfs_growfs_data_private(
* AG freelist header block
*/
bp = xfs_buf_get(mp->m_ddev_targp,
- XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
- XFS_FSS_TO_BB(mp, 1), 0);
+ XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
agf = XFS_BUF_TO_AGF(bp);
memset(agf, 0, mp->m_sb.sb_sectsize);
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
@@ -233,8 +234,8 @@ xfs_growfs_data_private(
* AG inode header block
*/
bp = xfs_buf_get(mp->m_ddev_targp,
- XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
- XFS_FSS_TO_BB(mp, 1), 0);
+ XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
+ XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED);
agi = XFS_BUF_TO_AGI(bp);
memset(agi, 0, mp->m_sb.sb_sectsize);
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
@@ -257,8 +258,9 @@ xfs_growfs_data_private(
* BNO btree root block
*/
bp = xfs_buf_get(mp->m_ddev_targp,
- XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
- BTOBB(mp->m_sb.sb_blocksize), 0);
+ XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
+ BTOBB(mp->m_sb.sb_blocksize),
+ XBF_LOCK | XBF_MAPPED);
block = XFS_BUF_TO_BLOCK(bp);
memset(block, 0, mp->m_sb.sb_blocksize);
block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC);
@@ -278,8 +280,9 @@ xfs_growfs_data_private(
* CNT btree root block
*/
bp = xfs_buf_get(mp->m_ddev_targp,
- XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
- BTOBB(mp->m_sb.sb_blocksize), 0);
+ XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
+ BTOBB(mp->m_sb.sb_blocksize),
+ XBF_LOCK | XBF_MAPPED);
block = XFS_BUF_TO_BLOCK(bp);
memset(block, 0, mp->m_sb.sb_blocksize);
block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC);
@@ -300,8 +303,9 @@ xfs_growfs_data_private(
* INO btree root block
*/
bp = xfs_buf_get(mp->m_ddev_targp,
- XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
- BTOBB(mp->m_sb.sb_blocksize), 0);
+ XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
+ BTOBB(mp->m_sb.sb_blocksize),
+ XBF_LOCK | XBF_MAPPED);
block = XFS_BUF_TO_BLOCK(bp);
memset(block, 0, mp->m_sb.sb_blocksize);
block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
@@ -344,6 +348,7 @@ xfs_growfs_data_private(
be32_add_cpu(&agf->agf_length, new);
ASSERT(be32_to_cpu(agf->agf_length) ==
be32_to_cpu(agi->agi_length));
+
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
/*
* Free the new space.
@@ -611,7 +616,7 @@ xfs_fs_log_dummy(
xfs_inode_t *ip;
int error;
- tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
if (error) {
xfs_trans_cancel(tp, 0);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 0785797db82..cb907ba69c4 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -425,7 +425,7 @@ xfs_ialloc_ag_alloc(
return 0;
}
-STATIC_INLINE xfs_agnumber_t
+STATIC xfs_agnumber_t
xfs_ialloc_next_ag(
xfs_mount_t *mp)
{
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 80e526489be..fa402a6bbbc 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,7 +43,7 @@
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_btree_trace.h"
-#include "xfs_dir2_trace.h"
+#include "xfs_trace.h"
/*
@@ -73,6 +73,9 @@ xfs_inode_alloc(
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(completion_done(&ip->i_flush));
+ ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+
+ mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
/* initialise the xfs inode */
ip->i_ino = ino;
@@ -87,30 +90,8 @@ xfs_inode_alloc(
ip->i_size = 0;
ip->i_new_size = 0;
- /*
- * Initialize inode's trace buffers.
- */
-#ifdef XFS_INODE_TRACE
- ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_BMAP_TRACE
- ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_BTREE_TRACE
- ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_RW_TRACE
- ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_ILOCK_TRACE
- ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
-#endif
-#ifdef XFS_DIR2_TRACE
- ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
-#endif
-
/* prevent anyone from using this yet */
- VFS_I(ip)->i_state = I_NEW|I_LOCK;
+ VFS_I(ip)->i_state = I_NEW;
return ip;
}
@@ -130,25 +111,6 @@ xfs_inode_free(
if (ip->i_afp)
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-#ifdef XFS_INODE_TRACE
- ktrace_free(ip->i_trace);
-#endif
-#ifdef XFS_BMAP_TRACE
- ktrace_free(ip->i_xtrace);
-#endif
-#ifdef XFS_BTREE_TRACE
- ktrace_free(ip->i_btrace);
-#endif
-#ifdef XFS_RW_TRACE
- ktrace_free(ip->i_rwtrace);
-#endif
-#ifdef XFS_ILOCK_TRACE
- ktrace_free(ip->i_lock_trace);
-#endif
-#ifdef XFS_DIR2_TRACE
- ktrace_free(ip->i_dir_trace);
-#endif
-
if (ip->i_itemp) {
/*
* Only if we are shutting down the fs will we see an
@@ -207,6 +169,7 @@ xfs_iget_cache_hit(
* instead of polling for it.
*/
if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
+ trace_xfs_iget_skip(ip);
XFS_STATS_INC(xs_ig_frecycle);
error = EAGAIN;
goto out_error;
@@ -225,7 +188,7 @@ xfs_iget_cache_hit(
* Need to carefully get it back into useable state.
*/
if (ip->i_flags & XFS_IRECLAIMABLE) {
- xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
+ trace_xfs_iget_reclaim(ip);
/*
* We need to set XFS_INEW atomically with clearing the
@@ -251,9 +214,10 @@ xfs_iget_cache_hit(
ip->i_flags &= ~XFS_INEW;
ip->i_flags |= XFS_IRECLAIMABLE;
__xfs_inode_set_reclaim_tag(pag, ip);
+ trace_xfs_iget_reclaim(ip);
goto out_error;
}
- inode->i_state = I_LOCK|I_NEW;
+ inode->i_state = I_NEW;
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
@@ -270,8 +234,9 @@ xfs_iget_cache_hit(
xfs_ilock(ip, lock_flags);
xfs_iflags_clear(ip, XFS_ISTALE);
- xfs_itrace_exit_tag(ip, "xfs_iget.found");
XFS_STATS_INC(xs_ig_found);
+
+ trace_xfs_iget_found(ip);
return 0;
out_error:
@@ -290,7 +255,7 @@ xfs_iget_cache_miss(
struct xfs_inode **ipp,
xfs_daddr_t bno,
int flags,
- int lock_flags) __releases(pag->pag_ici_lock)
+ int lock_flags)
{
struct xfs_inode *ip;
int error;
@@ -305,7 +270,7 @@ xfs_iget_cache_miss(
if (error)
goto out_destroy;
- xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
+ xfs_itrace_entry(ip);
if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
error = ENOENT;
@@ -350,6 +315,8 @@ xfs_iget_cache_miss(
write_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
+
+ trace_xfs_iget_alloc(ip);
*ipp = ip;
return 0;
@@ -511,17 +478,21 @@ xfs_ireclaim(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
XFS_STATS_INC(xs_ig_reclaims);
/*
- * Remove the inode from the per-AG radix tree. It doesn't matter
- * if it was never added to it because radix_tree_delete can deal
- * with that case just fine.
+ * Remove the inode from the per-AG radix tree.
+ *
+ * Because radix_tree_delete won't complain even if the item was never
+ * added to the tree assert that it's been there before to catch
+ * problems with the inode life time early on.
*/
pag = xfs_get_perag(mp, ip->i_ino);
write_lock(&pag->pag_ici_lock);
- radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
+ if (!radix_tree_delete(&pag->pag_ici_root, agino))
+ ASSERT(0);
write_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
@@ -636,7 +607,7 @@ xfs_ilock(
else if (lock_flags & XFS_ILOCK_SHARED)
mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
- xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
+ trace_xfs_ilock(ip, lock_flags, _RET_IP_);
}
/*
@@ -681,7 +652,7 @@ xfs_ilock_nowait(
if (!mrtryaccess(&ip->i_lock))
goto out_undo_iolock;
}
- xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
+ trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
return 1;
out_undo_iolock:
@@ -743,7 +714,7 @@ xfs_iunlock(
xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
(xfs_log_item_t*)(ip->i_itemp));
}
- xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
+ trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
}
/*
@@ -762,6 +733,8 @@ xfs_ilock_demote(
mrdemote(&ip->i_lock);
if (lock_flags & XFS_IOLOCK_EXCL)
mrdemote(&ip->i_iolock);
+
+ trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
}
#ifdef DEBUG
@@ -792,52 +765,3 @@ xfs_isilocked(
return 1;
}
#endif
-
-#ifdef XFS_INODE_TRACE
-
-#define KTRACE_ENTER(ip, vk, s, line, ra) \
- ktrace_enter((ip)->i_trace, \
-/* 0 */ (void *)(__psint_t)(vk), \
-/* 1 */ (void *)(s), \
-/* 2 */ (void *)(__psint_t) line, \
-/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
-/* 4 */ (void *)(ra), \
-/* 5 */ NULL, \
-/* 6 */ (void *)(__psint_t)current_cpu(), \
-/* 7 */ (void *)(__psint_t)current_pid(), \
-/* 8 */ (void *)__return_address, \
-/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
-
-/*
- * Vnode tracing code.
- */
-void
-_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
-}
-
-void
-_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
-}
-
-void
-xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
-}
-
-void
-_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
-}
-
-void
-xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
-{
- KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
-}
-#endif /* XFS_INODE_TRACE */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b92a4fa2a0a..ce278b3ae7f 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -47,10 +47,10 @@
#include "xfs_rw.h"
#include "xfs_error.h"
#include "xfs_utils.h"
-#include "xfs_dir2_trace.h"
#include "xfs_quota.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_ifork_zone;
kmem_zone_t *xfs_inode_zone;
@@ -1291,42 +1291,6 @@ xfs_file_last_byte(
return last_byte;
}
-#if defined(XFS_RW_TRACE)
-STATIC void
-xfs_itrunc_trace(
- int tag,
- xfs_inode_t *ip,
- int flag,
- xfs_fsize_t new_size,
- xfs_off_t toss_start,
- xfs_off_t toss_finish)
-{
- if (ip->i_rwtrace == NULL) {
- return;
- }
-
- ktrace_enter(ip->i_rwtrace,
- (void*)((long)tag),
- (void*)ip,
- (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
- (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
- (void*)((long)flag),
- (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
- (void*)(unsigned long)(new_size & 0xffffffff),
- (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
- (void*)(unsigned long)(toss_start & 0xffffffff),
- (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
- (void*)(unsigned long)(toss_finish & 0xffffffff),
- (void*)(unsigned long)current_cpu(),
- (void*)(unsigned long)current_pid(),
- (void*)NULL,
- (void*)NULL,
- (void*)NULL);
-}
-#else
-#define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
-#endif
-
/*
* Start the truncation of the file to new_size. The new size
* must be smaller than the current size. This routine will
@@ -1409,8 +1373,7 @@ xfs_itruncate_start(
return 0;
}
last_byte = xfs_file_last_byte(ip);
- xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
- last_byte);
+ trace_xfs_itruncate_start(ip, flags, new_size, toss_start, last_byte);
if (last_byte > toss_start) {
if (flags & XFS_ITRUNC_DEFINITE) {
xfs_tosspages(ip, toss_start,
@@ -1514,7 +1477,8 @@ xfs_itruncate_finish(
new_size = 0LL;
}
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
- xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
+ trace_xfs_itruncate_finish_start(ip, new_size);
+
/*
* The first thing we do is set the size to new_size permanently
* on disk. This way we don't have to worry about anyone ever
@@ -1731,7 +1695,7 @@ xfs_itruncate_finish(
ASSERT((new_size != 0) ||
(fork == XFS_ATTR_FORK) ||
(ip->i_d.di_nextents == 0));
- xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
+ trace_xfs_itruncate_finish_end(ip, new_size);
return 0;
}
@@ -3252,23 +3216,6 @@ corrupt_out:
return XFS_ERROR(EFSCORRUPTED);
}
-
-
-#ifdef XFS_ILOCK_TRACE
-void
-xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
-{
- ktrace_enter(ip->i_lock_trace,
- (void *)ip,
- (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
- (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
- (void *)ra, /* caller of ilock */
- (void *)(unsigned long)current_cpu(),
- (void *)(unsigned long)current_pid(),
- NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
-}
-#endif
-
/*
* Return a pointer to the extent record at file index idx.
*/
@@ -3300,13 +3247,17 @@ xfs_iext_get_ext(
*/
void
xfs_iext_insert(
- xfs_ifork_t *ifp, /* inode fork pointer */
+ xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t idx, /* starting index of new items */
xfs_extnum_t count, /* number of inserted items */
- xfs_bmbt_irec_t *new) /* items to insert */
+ xfs_bmbt_irec_t *new, /* items to insert */
+ int state) /* type of extent conversion */
{
+ xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
xfs_extnum_t i; /* extent record index */
+ trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_);
+
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
xfs_iext_add(ifp, idx, count);
for (i = idx; i < idx + count; i++, new++)
@@ -3549,13 +3500,17 @@ xfs_iext_add_indirect_multi(
*/
void
xfs_iext_remove(
- xfs_ifork_t *ifp, /* inode fork pointer */
+ xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t idx, /* index to begin removing exts */
- int ext_diff) /* number of extents to remove */
+ int ext_diff, /* number of extents to remove */
+ int state) /* type of extent conversion */
{
+ xfs_ifork_t *ifp = (state & BMAP_ATTRFORK) ? ip->i_afp : &ip->i_df;
xfs_extnum_t nextents; /* number of extents in file */
int new_size; /* size of extents after removal */
+ trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
+
ASSERT(ext_diff > 0);
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 41555de1d1d..ec1f28c4fc4 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -213,7 +213,6 @@ typedef struct xfs_icdinode {
struct bhv_desc;
struct cred;
-struct ktrace;
struct xfs_buf;
struct xfs_bmap_free;
struct xfs_bmbt_irec;
@@ -222,13 +221,6 @@ struct xfs_mount;
struct xfs_trans;
struct xfs_dquot;
-#if defined(XFS_ILOCK_TRACE)
-#define XFS_ILOCK_KTRACE_SIZE 32
-extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *);
-#else
-#define xfs_ilock_trace(i,n,f,ra)
-#endif
-
typedef struct dm_attrs_s {
__uint32_t da_dmevmask; /* DMIG event mask */
__uint16_t da_dmstate; /* DMIG state info */
@@ -271,26 +263,6 @@ typedef struct xfs_inode {
/* VFS inode */
struct inode i_vnode; /* embedded VFS inode */
-
- /* Trace buffers per inode. */
-#ifdef XFS_INODE_TRACE
- struct ktrace *i_trace; /* general inode trace */
-#endif
-#ifdef XFS_BMAP_TRACE
- struct ktrace *i_xtrace; /* inode extent list trace */
-#endif
-#ifdef XFS_BTREE_TRACE
- struct ktrace *i_btrace; /* inode bmap btree trace */
-#endif
-#ifdef XFS_RW_TRACE
- struct ktrace *i_rwtrace; /* inode read/write trace */
-#endif
-#ifdef XFS_ILOCK_TRACE
- struct ktrace *i_lock_trace; /* inode lock/unlock trace */
-#endif
-#ifdef XFS_DIR2_TRACE
- struct ktrace *i_dir_trace; /* inode directory trace */
-#endif
} xfs_inode_t;
#define XFS_ISIZE(ip) (((ip)->i_d.di_mode & S_IFMT) == S_IFREG) ? \
@@ -406,6 +378,14 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
#define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
| XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)
+#define XFS_LOCK_FLAGS \
+ { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \
+ { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \
+ { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \
+ { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \
+ { XFS_IUNLOCK_NONOTIFY, "IUNLOCK_NONOTIFY" }
+
+
/*
* Flags for lockdep annotations.
*
@@ -455,6 +435,10 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
#define XFS_ITRUNC_DEFINITE 0x1
#define XFS_ITRUNC_MAYBE 0x2
+#define XFS_ITRUNC_FLAGS \
+ { XFS_ITRUNC_DEFINITE, "DEFINITE" }, \
+ { XFS_ITRUNC_MAYBE, "MAYBE" }
+
/*
* For multiple groups support: if S_ISGID bit is set in the parent
* directory, group of new file is set to that of the parent, and
@@ -507,48 +491,16 @@ void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
void xfs_synchronize_times(xfs_inode_t *);
void xfs_mark_inode_dirty_sync(xfs_inode_t *);
-#if defined(XFS_INODE_TRACE)
-
-#define INODE_TRACE_SIZE 16 /* number of trace entries */
-#define INODE_KTRACE_ENTRY 1
-#define INODE_KTRACE_EXIT 2
-#define INODE_KTRACE_HOLD 3
-#define INODE_KTRACE_REF 4
-#define INODE_KTRACE_RELE 5
-
-extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *);
-extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *);
-extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *);
-extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *);
-extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *);
-#define xfs_itrace_entry(ip) \
- _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address)
-#define xfs_itrace_exit(ip) \
- _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address)
-#define xfs_itrace_exit_tag(ip, tag) \
- _xfs_itrace_exit(ip, tag, (inst_t *)__return_address)
-#define xfs_itrace_ref(ip) \
- _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address)
-
-#else
-#define xfs_itrace_entry(a)
-#define xfs_itrace_exit(a)
-#define xfs_itrace_exit_tag(a, b)
-#define xfs_itrace_hold(a, b, c, d)
-#define xfs_itrace_ref(a)
-#define xfs_itrace_rele(a, b, c, d)
-#endif
-
#define IHOLD(ip) \
do { \
ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
atomic_inc(&(VFS_I(ip)->i_count)); \
- xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
+ trace_xfs_ihold(ip, _THIS_IP_); \
} while (0)
#define IRELE(ip) \
do { \
- xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \
+ trace_xfs_irele(ip, _THIS_IP_); \
iput(VFS_I(ip)); \
} while (0)
@@ -577,11 +529,11 @@ int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
int xfs_iextents_copy(struct xfs_inode *, xfs_bmbt_rec_t *, int);
xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t);
-void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t,
- xfs_bmbt_irec_t *);
+void xfs_iext_insert(xfs_inode_t *, xfs_extnum_t, xfs_extnum_t,
+ xfs_bmbt_irec_t *, int);
void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int);
-void xfs_iext_remove(xfs_ifork_t *, xfs_extnum_t, int);
+void xfs_iext_remove(xfs_inode_t *, xfs_extnum_t, int, int);
void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int);
void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9794b876d6f..f38855d21ea 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -41,6 +41,7 @@
#include "xfs_ialloc.h"
#include "xfs_rw.h"
#include "xfs_error.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
@@ -800,7 +801,9 @@ xfs_inode_item_pushbuf(
!completion_done(&ip->i_flush));
iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- xfs_buftrace("INODE ITEM PUSH", bp);
+
+ trace_xfs_inode_item_push(bp, _RET_IP_);
+
if (XFS_BUF_ISPINNED(bp)) {
xfs_log_force(mp, (xfs_lsn_t)0,
XFS_LOG_FORCE);
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 65bae4c9b8b..cc8df1ac778 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -127,7 +127,7 @@ static inline int xfs_ilog_fdata(int w)
#ifdef __KERNEL__
struct xfs_buf;
-struct xfs_bmbt_rec_64;
+struct xfs_bmbt_rec;
struct xfs_inode;
struct xfs_mount;
@@ -140,9 +140,9 @@ typedef struct xfs_inode_log_item {
unsigned short ili_flags; /* misc flags */
unsigned short ili_logged; /* flushed logged data */
unsigned int ili_last_fields; /* fields when flushed */
- struct xfs_bmbt_rec_64 *ili_extents_buf; /* array of logged
+ struct xfs_bmbt_rec *ili_extents_buf; /* array of logged
data exts */
- struct xfs_bmbt_rec_64 *ili_aextents_buf; /* array of logged
+ struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged
attr exts */
unsigned int ili_pushbuf_flag; /* one bit used in push_ail */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 67ae5555a30..0b65039951a 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,72 +47,8 @@
#include "xfs_trans_space.h"
#include "xfs_utils.h"
#include "xfs_iomap.h"
+#include "xfs_trace.h"
-#if defined(XFS_RW_TRACE)
-void
-xfs_iomap_enter_trace(
- int tag,
- xfs_inode_t *ip,
- xfs_off_t offset,
- ssize_t count)
-{
- if (!ip->i_rwtrace)
- return;
-
- ktrace_enter(ip->i_rwtrace,
- (void *)((unsigned long)tag),
- (void *)ip,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)count),
- (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_new_size & 0xffffffff)),
- (void *)((unsigned long)current_pid()),
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL,
- (void *)NULL);
-}
-
-void
-xfs_iomap_map_trace(
- int tag,
- xfs_inode_t *ip,
- xfs_off_t offset,
- ssize_t count,
- xfs_iomap_t *iomapp,
- xfs_bmbt_irec_t *imapp,
- int flags)
-{
- if (!ip->i_rwtrace)
- return;
-
- ktrace_enter(ip->i_rwtrace,
- (void *)((unsigned long)tag),
- (void *)ip,
- (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
- (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
- (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(offset & 0xffffffff)),
- (void *)((unsigned long)count),
- (void *)((unsigned long)flags),
- (void *)((unsigned long)((iomapp->iomap_offset >> 32) & 0xffffffff)),
- (void *)((unsigned long)(iomapp->iomap_offset & 0xffffffff)),
- (void *)((unsigned long)(iomapp->iomap_delta)),
- (void *)((unsigned long)(iomapp->iomap_bsize)),
- (void *)((unsigned long)(iomapp->iomap_bn)),
- (void *)(__psint_t)(imapp->br_startoff),
- (void *)((unsigned long)(imapp->br_blockcount)),
- (void *)(__psint_t)(imapp->br_startblock));
-}
-#else
-#define xfs_iomap_enter_trace(tag, io, offset, count)
-#define xfs_iomap_map_trace(tag, io, offset, count, iomapp, imapp, flags)
-#endif
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
@@ -187,21 +123,20 @@ xfs_iomap(
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
+ trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
+
switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
case BMAPI_READ:
- xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, ip, offset, count);
lockmode = xfs_ilock_map_shared(ip);
bmapi_flags = XFS_BMAPI_ENTIRE;
break;
case BMAPI_WRITE:
- xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count);
lockmode = XFS_ILOCK_EXCL;
if (flags & BMAPI_IGNSTATE)
bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
xfs_ilock(ip, lockmode);
break;
case BMAPI_ALLOCATE:
- xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count);
lockmode = XFS_ILOCK_SHARED;
bmapi_flags = XFS_BMAPI_ENTIRE;
@@ -237,8 +172,7 @@ xfs_iomap(
if (nimaps &&
(imap.br_startblock != HOLESTARTBLOCK) &&
(imap.br_startblock != DELAYSTARTBLOCK)) {
- xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip,
- offset, count, iomapp, &imap, flags);
+ trace_xfs_iomap_found(ip, offset, count, flags, &imap);
break;
}
@@ -250,8 +184,7 @@ xfs_iomap(
&imap, &nimaps);
}
if (!error) {
- xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, ip,
- offset, count, iomapp, &imap, flags);
+ trace_xfs_iomap_alloc(ip, offset, count, flags, &imap);
}
iomap_flags = IOMAP_NEW;
break;
@@ -261,8 +194,7 @@ xfs_iomap(
lockmode = 0;
if (nimaps && !isnullstartblock(imap.br_startblock)) {
- xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip,
- offset, count, iomapp, &imap, flags);
+ trace_xfs_iomap_found(ip, offset, count, flags, &imap);
break;
}
@@ -623,8 +555,7 @@ retry:
* delalloc blocks and retry without EOF preallocation.
*/
if (nimaps == 0) {
- xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
- ip, offset, count);
+ trace_xfs_delalloc_enospc(ip, offset, count);
if (flushed)
return XFS_ERROR(ENOSPC);
@@ -837,7 +768,7 @@ xfs_iomap_write_unwritten(
int committed;
int error;
- xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, ip, offset, count);
+ trace_xfs_unwritten_convert(ip, offset, count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
@@ -860,8 +791,15 @@ xfs_iomap_write_unwritten(
* set up a transaction to convert the range of extents
* from unwritten to real. Do allocations in a loop until
* we have covered the range passed in.
+ *
+ * Note that we open code the transaction allocation here
+ * to pass KM_NOFS--we can't risk to recursing back into
+ * the filesystem here as we might be asked to write out
+ * the same inode that we complete here and might deadlock
+ * on the iolock.
*/
- tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
+ xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
tp->t_flags |= XFS_TRANS_RESERVE;
error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), 0,
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index fdcf7b82747..174f2999099 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -43,6 +43,14 @@ typedef enum {
BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */
} bmapi_flags_t;
+#define BMAPI_FLAGS \
+ { BMAPI_READ, "READ" }, \
+ { BMAPI_WRITE, "WRITE" }, \
+ { BMAPI_ALLOCATE, "ALLOCATE" }, \
+ { BMAPI_IGNSTATE, "IGNSTATE" }, \
+ { BMAPI_DIRECT, "DIRECT" }, \
+ { BMAPI_MMAP, "MMAP" }, \
+ { BMAPI_TRYLOCK, "TRYLOCK" }
/*
* xfs_iomap_t: File system I/O map
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9dbdff3ea48..600b5b06aae 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -40,6 +40,7 @@
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_rw.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_log_ticket_zone;
@@ -122,85 +123,6 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
STATIC int xlog_iclogs_empty(xlog_t *log);
-#if defined(XFS_LOG_TRACE)
-
-#define XLOG_TRACE_LOGGRANT_SIZE 2048
-#define XLOG_TRACE_ICLOG_SIZE 256
-
-void
-xlog_trace_loggrant_alloc(xlog_t *log)
-{
- log->l_grant_trace = ktrace_alloc(XLOG_TRACE_LOGGRANT_SIZE, KM_NOFS);
-}
-
-void
-xlog_trace_loggrant_dealloc(xlog_t *log)
-{
- ktrace_free(log->l_grant_trace);
-}
-
-void
-xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
-{
- unsigned long cnts;
-
- /* ticket counts are 1 byte each */
- cnts = ((unsigned long)tic->t_ocnt) | ((unsigned long)tic->t_cnt) << 8;
-
- ktrace_enter(log->l_grant_trace,
- (void *)tic,
- (void *)log->l_reserve_headq,
- (void *)log->l_write_headq,
- (void *)((unsigned long)log->l_grant_reserve_cycle),
- (void *)((unsigned long)log->l_grant_reserve_bytes),
- (void *)((unsigned long)log->l_grant_write_cycle),
- (void *)((unsigned long)log->l_grant_write_bytes),
- (void *)((unsigned long)log->l_curr_cycle),
- (void *)((unsigned long)log->l_curr_block),
- (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)),
- (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)),
- (void *)string,
- (void *)((unsigned long)tic->t_trans_type),
- (void *)cnts,
- (void *)((unsigned long)tic->t_curr_res),
- (void *)((unsigned long)tic->t_unit_res));
-}
-
-void
-xlog_trace_iclog_alloc(xlog_in_core_t *iclog)
-{
- iclog->ic_trace = ktrace_alloc(XLOG_TRACE_ICLOG_SIZE, KM_NOFS);
-}
-
-void
-xlog_trace_iclog_dealloc(xlog_in_core_t *iclog)
-{
- ktrace_free(iclog->ic_trace);
-}
-
-void
-xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
-{
- ktrace_enter(iclog->ic_trace,
- (void *)((unsigned long)state),
- (void *)((unsigned long)current_pid()),
- (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
- (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
- (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
- (void *)NULL, (void *)NULL);
-}
-#else
-
-#define xlog_trace_loggrant_alloc(log)
-#define xlog_trace_loggrant_dealloc(log)
-#define xlog_trace_loggrant(log,tic,string)
-
-#define xlog_trace_iclog_alloc(iclog)
-#define xlog_trace_iclog_dealloc(iclog)
-#define xlog_trace_iclog(iclog,state)
-
-#endif /* XFS_LOG_TRACE */
-
static void
xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
@@ -353,15 +275,17 @@ xfs_log_done(xfs_mount_t *mp,
if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
(flags & XFS_LOG_REL_PERM_RESERV)) {
+ trace_xfs_log_done_nonperm(log, ticket);
+
/*
* Release ticket if not permanent reservation or a specific
* request has been made to release a permanent reservation.
*/
- xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
xlog_ungrant_log_space(log, ticket);
xfs_log_ticket_put(ticket);
} else {
- xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
+ trace_xfs_log_done_perm(log, ticket);
+
xlog_regrant_reserve_log_space(log, ticket);
/* If this ticket was a permanent reservation and we aren't
* trying to release it, reset the inited flags; so next time
@@ -505,10 +429,13 @@ xfs_log_reserve(xfs_mount_t *mp,
XFS_STATS_INC(xs_try_logspace);
+
if (*ticket != NULL) {
ASSERT(flags & XFS_LOG_PERM_RESERV);
internal_ticket = (xlog_ticket_t *)*ticket;
- xlog_trace_loggrant(log, internal_ticket, "xfs_log_reserve: existing ticket (permanent trans)");
+
+ trace_xfs_log_reserve(log, internal_ticket);
+
xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
retval = xlog_regrant_write_log_space(log, internal_ticket);
} else {
@@ -519,10 +446,9 @@ xfs_log_reserve(xfs_mount_t *mp,
return XFS_ERROR(ENOMEM);
internal_ticket->t_trans_type = t_type;
*ticket = internal_ticket;
- xlog_trace_loggrant(log, internal_ticket,
- (internal_ticket->t_flags & XLOG_TIC_PERM_RESERV) ?
- "xfs_log_reserve: create new ticket (permanent trans)" :
- "xfs_log_reserve: create new ticket");
+
+ trace_xfs_log_reserve(log, internal_ticket);
+
xlog_grant_push_ail(mp,
(internal_ticket->t_unit_res *
internal_ticket->t_cnt));
@@ -734,7 +660,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
spin_unlock(&log->l_icloglock);
}
if (tic) {
- xlog_trace_loggrant(log, tic, "unmount rec");
+ trace_xfs_log_umount_write(log, tic);
xlog_ungrant_log_space(log, tic);
xfs_log_ticket_put(tic);
}
@@ -1030,7 +956,6 @@ xlog_iodone(xfs_buf_t *bp)
xfs_fs_cmn_err(CE_WARN, l->l_mp,
"xlog_iodone: Barriers are no longer supported"
" by device. Disabling barriers\n");
- xfs_buftrace("XLOG_IODONE BARRIERS OFF", bp);
}
/*
@@ -1085,13 +1010,10 @@ xlog_bdstrat_cb(struct xfs_buf *bp)
return 0;
}
- xfs_buftrace("XLOG__BDSTRAT IOERROR", bp);
XFS_BUF_ERROR(bp, EIO);
XFS_BUF_STALE(bp);
xfs_biodone(bp);
return XFS_ERROR(EIO);
-
-
}
/*
@@ -1246,7 +1168,6 @@ xlog_alloc_log(xfs_mount_t *mp,
spin_lock_init(&log->l_grant_lock);
sv_init(&log->l_flush_wait, 0, "flush_wait");
- xlog_trace_loggrant_alloc(log);
/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1305,8 +1226,6 @@ xlog_alloc_log(xfs_mount_t *mp,
sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
- xlog_trace_iclog_alloc(iclog);
-
iclogp = &iclog->ic_next;
}
*iclogp = log->l_iclog; /* complete ring */
@@ -1321,13 +1240,11 @@ out_free_iclog:
sv_destroy(&iclog->ic_force_wait);
sv_destroy(&iclog->ic_write_wait);
xfs_buf_free(iclog->ic_bp);
- xlog_trace_iclog_dealloc(iclog);
}
kmem_free(iclog);
}
spinlock_destroy(&log->l_icloglock);
spinlock_destroy(&log->l_grant_lock);
- xlog_trace_loggrant_dealloc(log);
xfs_buf_free(log->l_xbuf);
out_free_log:
kmem_free(log);
@@ -1524,6 +1441,7 @@ xlog_sync(xlog_t *log,
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp);
+ bp->b_flags |= XBF_LOG_BUFFER;
/*
* Do an ordered write for the log block.
* Its unnecessary to flush the first split block in the log wrap case.
@@ -1561,6 +1479,7 @@ xlog_sync(xlog_t *log,
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp);
+ bp->b_flags |= XBF_LOG_BUFFER;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
XFS_BUF_ORDERED(bp);
dptr = XFS_BUF_PTR(bp);
@@ -1607,7 +1526,6 @@ xlog_dealloc_log(xlog_t *log)
sv_destroy(&iclog->ic_force_wait);
sv_destroy(&iclog->ic_write_wait);
xfs_buf_free(iclog->ic_bp);
- xlog_trace_iclog_dealloc(iclog);
next_iclog = iclog->ic_next;
kmem_free(iclog);
iclog = next_iclog;
@@ -1616,7 +1534,6 @@ xlog_dealloc_log(xlog_t *log)
spinlock_destroy(&log->l_grant_lock);
xfs_buf_free(log->l_xbuf);
- xlog_trace_loggrant_dealloc(log);
log->l_mp->m_log = NULL;
kmem_free(log);
} /* xlog_dealloc_log */
@@ -2414,7 +2331,6 @@ restart:
iclog = log->l_iclog;
if (iclog->ic_state != XLOG_STATE_ACTIVE) {
- xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH);
XFS_STATS_INC(xs_log_noiclogs);
/* Wait for log writes to have flushed */
@@ -2520,13 +2436,15 @@ xlog_grant_log_space(xlog_t *log,
/* Is there space or do we need to sleep? */
spin_lock(&log->l_grant_lock);
- xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter");
+
+ trace_xfs_log_grant_enter(log, tic);
/* something is already sleeping; insert new transaction at end */
if (log->l_reserve_headq) {
xlog_ins_ticketq(&log->l_reserve_headq, tic);
- xlog_trace_loggrant(log, tic,
- "xlog_grant_log_space: sleep 1");
+
+ trace_xfs_log_grant_sleep1(log, tic);
+
/*
* Gotta check this before going to sleep, while we're
* holding the grant lock.
@@ -2540,8 +2458,7 @@ xlog_grant_log_space(xlog_t *log,
* If we got an error, and the filesystem is shutting down,
* we'll catch it down below. So just continue...
*/
- xlog_trace_loggrant(log, tic,
- "xlog_grant_log_space: wake 1");
+ trace_xfs_log_grant_wake1(log, tic);
spin_lock(&log->l_grant_lock);
}
if (tic->t_flags & XFS_LOG_PERM_RESERV)
@@ -2558,8 +2475,9 @@ redo:
if (free_bytes < need_bytes) {
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
xlog_ins_ticketq(&log->l_reserve_headq, tic);
- xlog_trace_loggrant(log, tic,
- "xlog_grant_log_space: sleep 2");
+
+ trace_xfs_log_grant_sleep2(log, tic);
+
spin_unlock(&log->l_grant_lock);
xlog_grant_push_ail(log->l_mp, need_bytes);
spin_lock(&log->l_grant_lock);
@@ -2571,8 +2489,8 @@ redo:
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- xlog_trace_loggrant(log, tic,
- "xlog_grant_log_space: wake 2");
+ trace_xfs_log_grant_wake2(log, tic);
+
goto redo;
} else if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_reserve_headq, tic);
@@ -2592,7 +2510,7 @@ redo:
ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
}
#endif
- xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit");
+ trace_xfs_log_grant_exit(log, tic);
xlog_verify_grant_head(log, 1);
spin_unlock(&log->l_grant_lock);
return 0;
@@ -2600,7 +2518,9 @@ redo:
error_return:
if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_reserve_headq, tic);
- xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret");
+
+ trace_xfs_log_grant_error(log, tic);
+
/*
* If we are failing, make sure the ticket doesn't have any
* current reservations. We don't want to add this back when
@@ -2640,7 +2560,8 @@ xlog_regrant_write_log_space(xlog_t *log,
#endif
spin_lock(&log->l_grant_lock);
- xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter");
+
+ trace_xfs_log_regrant_write_enter(log, tic);
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
@@ -2669,8 +2590,8 @@ xlog_regrant_write_log_space(xlog_t *log,
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
xlog_ins_ticketq(&log->l_write_headq, tic);
- xlog_trace_loggrant(log, tic,
- "xlog_regrant_write_log_space: sleep 1");
+ trace_xfs_log_regrant_write_sleep1(log, tic);
+
spin_unlock(&log->l_grant_lock);
xlog_grant_push_ail(log->l_mp, need_bytes);
spin_lock(&log->l_grant_lock);
@@ -2685,8 +2606,7 @@ xlog_regrant_write_log_space(xlog_t *log,
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- xlog_trace_loggrant(log, tic,
- "xlog_regrant_write_log_space: wake 1");
+ trace_xfs_log_regrant_write_wake1(log, tic);
}
}
@@ -2704,6 +2624,8 @@ redo:
spin_lock(&log->l_grant_lock);
XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_regrant_write_sleep2(log, tic);
+
sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
/* If we're shutting down, this tic is already off the queue */
@@ -2711,8 +2633,7 @@ redo:
if (XLOG_FORCED_SHUTDOWN(log))
goto error_return;
- xlog_trace_loggrant(log, tic,
- "xlog_regrant_write_log_space: wake 2");
+ trace_xfs_log_regrant_write_wake2(log, tic);
goto redo;
} else if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_write_headq, tic);
@@ -2727,7 +2648,8 @@ redo:
}
#endif
- xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit");
+ trace_xfs_log_regrant_write_exit(log, tic);
+
xlog_verify_grant_head(log, 1);
spin_unlock(&log->l_grant_lock);
return 0;
@@ -2736,7 +2658,9 @@ redo:
error_return:
if (tic->t_flags & XLOG_TIC_IN_Q)
xlog_del_ticketq(&log->l_reserve_headq, tic);
- xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret");
+
+ trace_xfs_log_regrant_write_error(log, tic);
+
/*
* If we are failing, make sure the ticket doesn't have any
* current reservations. We don't want to add this back when
@@ -2760,8 +2684,8 @@ STATIC void
xlog_regrant_reserve_log_space(xlog_t *log,
xlog_ticket_t *ticket)
{
- xlog_trace_loggrant(log, ticket,
- "xlog_regrant_reserve_log_space: enter");
+ trace_xfs_log_regrant_reserve_enter(log, ticket);
+
if (ticket->t_cnt > 0)
ticket->t_cnt--;
@@ -2769,8 +2693,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
xlog_grant_sub_space(log, ticket->t_curr_res);
ticket->t_curr_res = ticket->t_unit_res;
xlog_tic_reset_res(ticket);
- xlog_trace_loggrant(log, ticket,
- "xlog_regrant_reserve_log_space: sub current res");
+
+ trace_xfs_log_regrant_reserve_sub(log, ticket);
+
xlog_verify_grant_head(log, 1);
/* just return if we still have some of the pre-reserved space */
@@ -2780,8 +2705,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
}
xlog_grant_add_space_reserve(log, ticket->t_unit_res);
- xlog_trace_loggrant(log, ticket,
- "xlog_regrant_reserve_log_space: exit");
+
+ trace_xfs_log_regrant_reserve_exit(log, ticket);
+
xlog_verify_grant_head(log, 0);
spin_unlock(&log->l_grant_lock);
ticket->t_curr_res = ticket->t_unit_res;
@@ -2811,11 +2737,11 @@ xlog_ungrant_log_space(xlog_t *log,
ticket->t_cnt--;
spin_lock(&log->l_grant_lock);
- xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter");
+ trace_xfs_log_ungrant_enter(log, ticket);
xlog_grant_sub_space(log, ticket->t_curr_res);
- xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current");
+ trace_xfs_log_ungrant_sub(log, ticket);
/* If this is a permanent reservation ticket, we may be able to free
* up more space based on the remaining count.
@@ -2825,7 +2751,8 @@ xlog_ungrant_log_space(xlog_t *log,
xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
}
- xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit");
+ trace_xfs_log_ungrant_exit(log, ticket);
+
xlog_verify_grant_head(log, 1);
spin_unlock(&log->l_grant_lock);
xfs_log_move_tail(log->l_mp, 1);
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 679c7c4926a..d55662db707 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -19,7 +19,6 @@
#define __XFS_LOG_PRIV_H__
struct xfs_buf;
-struct ktrace;
struct log;
struct xlog_ticket;
struct xfs_buf_cancel;
@@ -135,6 +134,12 @@ static inline uint xlog_get_client_id(__be32 i)
#define XLOG_TIC_INITED 0x1 /* has been initialized */
#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */
#define XLOG_TIC_IN_Q 0x4
+
+#define XLOG_TIC_FLAGS \
+ { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \
+ { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }, \
+ { XLOG_TIC_IN_Q, "XLOG_TIC_IN_Q" }
+
#endif /* __KERNEL__ */
#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */
@@ -361,9 +366,6 @@ typedef struct xlog_in_core {
int ic_bwritecnt;
unsigned short ic_state;
char *ic_datap; /* pointer to iclog data */
-#ifdef XFS_LOG_TRACE
- struct ktrace *ic_trace;
-#endif
/* Callback structures need their own cacheline */
spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
@@ -429,10 +431,6 @@ typedef struct log {
int l_grant_write_cycle;
int l_grant_write_bytes;
-#ifdef XFS_LOG_TRACE
- struct ktrace *l_grant_trace;
-#endif
-
/* The following field are used for debugging; need to hold icloglock */
#ifdef DEBUG
char *l_iclog_bak[XLOG_MAX_ICLOGS];
@@ -456,12 +454,6 @@ extern void xlog_put_bp(struct xfs_buf *);
extern kmem_zone_t *xfs_log_ticket_zone;
-/* iclog tracing */
-#define XLOG_TRACE_GRAB_FLUSH 1
-#define XLOG_TRACE_REL_FLUSH 2
-#define XLOG_TRACE_SLEEP_FLUSH 3
-#define XLOG_TRACE_WAKE_FLUSH 4
-
/*
* Unmount record type is used as a pseudo transaction type for the ticket.
* It's value must be outside the range of XFS_TRANS_* values.
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index fb17f8226b0..69ac2e5ef20 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -46,6 +46,7 @@
#include "xfs_quota.h"
#include "xfs_rw.h"
#include "xfs_utils.h"
+#include "xfs_trace.h"
STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
@@ -225,16 +226,10 @@ xlog_header_check_dump(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
- int b;
-
- cmn_err(CE_DEBUG, "%s: SB : uuid = ", __func__);
- for (b = 0; b < 16; b++)
- cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&mp->m_sb.sb_uuid)[b]);
- cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
- cmn_err(CE_DEBUG, " log : uuid = ");
- for (b = 0; b < 16; b++)
- cmn_err(CE_DEBUG, "%02x", ((__uint8_t *)&head->h_fs_uuid)[b]);
- cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
+ cmn_err(CE_DEBUG, "%s: SB : uuid = %pU, fmt = %d\n",
+ __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
+ cmn_err(CE_DEBUG, " log : uuid = %pU, fmt = %d\n",
+ &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
}
#else
#define xlog_header_check_dump(mp, head)
@@ -2206,6 +2201,7 @@ xlog_recover_do_buffer_trans(
xfs_daddr_t blkno;
int len;
ushort flags;
+ uint buf_flags;
buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
@@ -2246,12 +2242,11 @@ xlog_recover_do_buffer_trans(
}
mp = log->l_mp;
- if (flags & XFS_BLI_INODE_BUF) {
- bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
- XFS_BUF_LOCK);
- } else {
- bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
- }
+ buf_flags = XFS_BUF_LOCK;
+ if (!(flags & XFS_BLI_INODE_BUF))
+ buf_flags |= XFS_BUF_MAPPED;
+
+ bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
if (XFS_BUF_ISERROR(bp)) {
xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
bp, blkno);
@@ -2350,8 +2345,8 @@ xlog_recover_do_inode_trans(
goto error;
}
- bp = xfs_buf_read_flags(mp->m_ddev_targp, in_f->ilf_blkno,
- in_f->ilf_len, XFS_BUF_LOCK);
+ bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
+ XFS_BUF_LOCK);
if (XFS_BUF_ISERROR(bp)) {
xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
bp, in_f->ilf_blkno);
@@ -3517,7 +3512,7 @@ xlog_do_recovery_pass(
{
xlog_rec_header_t *rhead;
xfs_daddr_t blk_no;
- xfs_caddr_t bufaddr, offset;
+ xfs_caddr_t offset;
xfs_buf_t *hbp, *dbp;
int error = 0, h_size;
int bblks, split_bblks;
@@ -3610,7 +3605,7 @@ xlog_do_recovery_pass(
/*
* Check for header wrapping around physical end-of-log
*/
- offset = NULL;
+ offset = XFS_BUF_PTR(hbp);
split_hblks = 0;
wrapped_hblks = 0;
if (blk_no + hblks <= log->l_logBBsize) {
@@ -3646,9 +3641,8 @@ xlog_do_recovery_pass(
* - order is important.
*/
wrapped_hblks = hblks - split_hblks;
- bufaddr = XFS_BUF_PTR(hbp);
error = XFS_BUF_SET_PTR(hbp,
- bufaddr + BBTOB(split_hblks),
+ offset + BBTOB(split_hblks),
BBTOB(hblks - split_hblks));
if (error)
goto bread_err2;
@@ -3658,14 +3652,10 @@ xlog_do_recovery_pass(
if (error)
goto bread_err2;
- error = XFS_BUF_SET_PTR(hbp, bufaddr,
+ error = XFS_BUF_SET_PTR(hbp, offset,
BBTOB(hblks));
if (error)
goto bread_err2;
-
- if (!offset)
- offset = xlog_align(log, 0,
- wrapped_hblks, hbp);
}
rhead = (xlog_rec_header_t *)offset;
error = xlog_valid_rec_header(log, rhead,
@@ -3685,7 +3675,7 @@ xlog_do_recovery_pass(
} else {
/* This log record is split across the
* physical end of log */
- offset = NULL;
+ offset = XFS_BUF_PTR(dbp);
split_bblks = 0;
if (blk_no != log->l_logBBsize) {
/* some data is before the physical
@@ -3714,9 +3704,8 @@ xlog_do_recovery_pass(
* _first_, then the log start (LR header end)
* - order is important.
*/
- bufaddr = XFS_BUF_PTR(dbp);
error = XFS_BUF_SET_PTR(dbp,
- bufaddr + BBTOB(split_bblks),
+ offset + BBTOB(split_bblks),
BBTOB(bblks - split_bblks));
if (error)
goto bread_err2;
@@ -3727,13 +3716,9 @@ xlog_do_recovery_pass(
if (error)
goto bread_err2;
- error = XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
+ error = XFS_BUF_SET_PTR(dbp, offset, h_size);
if (error)
goto bread_err2;
-
- if (!offset)
- offset = xlog_align(log, wrapped_hblks,
- bblks - split_bblks, dbp);
}
xlog_unpack_data(rhead, offset, log);
if ((error = xlog_recover_process_data(log, rhash,
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 8b6c9e807ef..eb403b40e12 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -44,6 +44,8 @@
#include "xfs_quota.h"
#include "xfs_fsops.h"
#include "xfs_utils.h"
+#include "xfs_trace.h"
+
STATIC void xfs_unmountfs_wait(xfs_mount_t *);
@@ -583,8 +585,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
- bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
- BTOBB(sector_size), extra_flags);
+ bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
+ extra_flags);
if (!bp || XFS_BUF_ISERROR(bp)) {
xfs_fs_mount_cmn_err(flags, "SB read failed");
error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
@@ -624,8 +626,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
XFS_BUF_UNMANAGE(bp);
xfs_buf_relse(bp);
sector_size = mp->m_sb.sb_sectsize;
- bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
- BTOBB(sector_size), extra_flags);
+ bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR,
+ BTOBB(sector_size), extra_flags);
if (!bp || XFS_BUF_ISERROR(bp)) {
xfs_fs_mount_cmn_err(flags, "SB re-read failed");
error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
@@ -1471,7 +1473,7 @@ xfs_log_sbcount(
if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
return 0;
- tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT);
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
XFS_DEFAULT_LOG_COUNT);
if (error) {
@@ -2123,7 +2125,7 @@ xfs_icsb_destroy_counters(
mutex_destroy(&mp->m_icsb_mutex);
}
-STATIC_INLINE void
+STATIC void
xfs_icsb_lock_cntr(
xfs_icsb_cnts_t *icsbp)
{
@@ -2132,7 +2134,7 @@ xfs_icsb_lock_cntr(
}
}
-STATIC_INLINE void
+STATIC void
xfs_icsb_unlock_cntr(
xfs_icsb_cnts_t *icsbp)
{
@@ -2140,7 +2142,7 @@ xfs_icsb_unlock_cntr(
}
-STATIC_INLINE void
+STATIC void
xfs_icsb_lock_all_counters(
xfs_mount_t *mp)
{
@@ -2153,7 +2155,7 @@ xfs_icsb_lock_all_counters(
}
}
-STATIC_INLINE void
+STATIC void
xfs_icsb_unlock_all_counters(
xfs_mount_t *mp)
{
@@ -2389,12 +2391,12 @@ xfs_icsb_modify_counters(
{
xfs_icsb_cnts_t *icsbp;
long long lcounter; /* long counter for 64 bit fields */
- int cpu, ret = 0;
+ int ret = 0;
might_sleep();
again:
- cpu = get_cpu();
- icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
+ preempt_disable();
+ icsbp = this_cpu_ptr(mp->m_sb_cnts);
/*
* if the counter is disabled, go to slow path
@@ -2438,11 +2440,11 @@ again:
break;
}
xfs_icsb_unlock_cntr(icsbp);
- put_cpu();
+ preempt_enable();
return 0;
slow_path:
- put_cpu();
+ preempt_enable();
/*
* serialise with a mutex so we don't burn lots of cpu on
@@ -2490,7 +2492,7 @@ slow_path:
balance_counter:
xfs_icsb_unlock_cntr(icsbp);
- put_cpu();
+ preempt_enable();
/*
* We may have multiple threads here if multiple per-cpu
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a6c023bc0fb..1df7e450296 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -93,6 +93,9 @@ typedef struct xfs_dmops {
xfs_send_unmount_t xfs_send_unmount;
} xfs_dmops_t;
+#define XFS_DMAPI_UNMOUNT_FLAGS(mp) \
+ (((mp)->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 0 : DM_FLAGS_UNWANTED)
+
#define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \
(*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock)
#define XFS_SEND_MMAP(mp, vma,fl) \
@@ -101,12 +104,24 @@ typedef struct xfs_dmops {
(*(mp)->m_dm_ops->xfs_send_destroy)(ip,right)
#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
(*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl)
-#define XFS_SEND_PREUNMOUNT(mp,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
- (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT,mp,b1,r1,b2,r2,n1,n2,mode,rval,fl)
#define XFS_SEND_MOUNT(mp,right,path,name) \
(*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name)
-#define XFS_SEND_UNMOUNT(mp, ip,right,mode,rval,fl) \
- (*(mp)->m_dm_ops->xfs_send_unmount)(mp,ip,right,mode,rval,fl)
+#define XFS_SEND_PREUNMOUNT(mp) \
+do { \
+ if (mp->m_flags & XFS_MOUNT_DMAPI) { \
+ (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT, mp, \
+ (mp)->m_rootip, DM_RIGHT_NULL, \
+ (mp)->m_rootip, DM_RIGHT_NULL, \
+ NULL, NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
+ } \
+} while (0)
+#define XFS_SEND_UNMOUNT(mp) \
+do { \
+ if (mp->m_flags & XFS_MOUNT_DMAPI) { \
+ (*(mp)->m_dm_ops->xfs_send_unmount)(mp, (mp)->m_rootip, \
+ DM_RIGHT_NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
+ } \
+} while (0)
#ifdef HAVE_PERCPU_SB
@@ -387,13 +402,13 @@ xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag)
* Per-cpu superblock locking functions
*/
#ifdef HAVE_PERCPU_SB
-STATIC_INLINE void
+static inline void
xfs_icsb_lock(xfs_mount_t *mp)
{
mutex_lock(&mp->m_icsb_mutex);
}
-STATIC_INLINE void
+static inline void
xfs_icsb_unlock(xfs_mount_t *mp)
{
mutex_unlock(&mp->m_icsb_mutex);
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 3ec91ac74c2..91bfd60f4c7 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -92,6 +92,14 @@ typedef struct xfs_dqblk {
#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
+#define XFS_DQ_FLAGS \
+ { XFS_DQ_USER, "USER" }, \
+ { XFS_DQ_PROJ, "PROJ" }, \
+ { XFS_DQ_GROUP, "GROUP" }, \
+ { XFS_DQ_DIRTY, "DIRTY" }, \
+ { XFS_DQ_WANT, "WANT" }, \
+ { XFS_DQ_INACTIVE, "INACTIVE" }
+
/*
* In the worst case, when both user and group quotas are on,
* we can have a max of three dquots changing in a single transaction.
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index b81deea0ce1..fc1cda23b81 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -39,6 +39,7 @@
#include "xfs_utils.h"
#include "xfs_trans_space.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
/*
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 385f6dceba5..9e15a118536 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -45,6 +45,7 @@
#include "xfs_inode_item.h"
#include "xfs_trans_space.h"
#include "xfs_utils.h"
+#include "xfs_trace.h"
/*
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 3f816ad7ff1..5aa07caea5f 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -44,6 +44,7 @@
#include "xfs_error.h"
#include "xfs_buf_item.h"
#include "xfs_rw.h"
+#include "xfs_trace.h"
/*
* This is a subroutine for xfs_write() and other writers (xfs_ioctl)
@@ -171,7 +172,6 @@ xfs_bioerror(
* No need to wait until the buffer is unpinned.
* We aren't flushing it.
*/
- xfs_buftrace("XFS IOERROR", bp);
XFS_BUF_ERROR(bp, EIO);
/*
* We're calling biodone, so delete B_DONE flag. Either way
@@ -205,7 +205,6 @@ xfs_bioerror_relse(
ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
- xfs_buftrace("XFS IOERRELSE", bp);
fl = XFS_BUF_BFLAGS(bp);
/*
* No need to wait until the buffer is unpinned.
@@ -277,10 +276,10 @@ xfs_read_buf(
xfs_buf_t *bp;
int error;
- if (flags)
- bp = xfs_buf_read_flags(target, blkno, len, flags);
- else
- bp = xfs_buf_read(target, blkno, len, flags);
+ if (!flags)
+ flags = XBF_LOCK | XBF_MAPPED;
+
+ bp = xfs_buf_read(target, blkno, len, flags);
if (!bp)
return XFS_ERROR(EIO);
error = XFS_BUF_GETERROR(bp);
@@ -336,3 +335,25 @@ xfs_bwrite(
}
return (error);
}
+
+/*
+ * helper function to extract extent size hint from inode
+ */
+xfs_extlen_t
+xfs_get_extsz_hint(
+ struct xfs_inode *ip)
+{
+ xfs_extlen_t extsz;
+
+ if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
+ extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
+ ? ip->i_d.di_extsize
+ : ip->i_mount->m_sb.sb_rextsize;
+ ASSERT(extsz);
+ } else {
+ extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
+ ? ip->i_d.di_extsize : 0;
+ }
+
+ return extsz;
+}
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index f5e4874c37d..571f2174435 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -37,34 +37,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
}
/*
- * Flags for xfs_free_eofblocks
- */
-#define XFS_FREE_EOF_LOCK (1<<0)
-#define XFS_FREE_EOF_NOLOCK (1<<1)
-
-
-/*
- * helper function to extract extent size hint from inode
- */
-STATIC_INLINE xfs_extlen_t
-xfs_get_extsz_hint(
- xfs_inode_t *ip)
-{
- xfs_extlen_t extsz;
-
- if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
- extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
- ? ip->i_d.di_extsize
- : ip->i_mount->m_sb.sb_rextsize;
- ASSERT(extsz);
- } else {
- extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
- ? ip->i_d.di_extsize : 0;
- }
- return extsz;
-}
-
-/*
* Prototypes for functions in xfs_rw.c.
*/
extern int xfs_write_clear_setuid(struct xfs_inode *ip);
@@ -76,5 +48,6 @@ extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp,
struct xfs_buf **bpp);
extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp,
xfs_buf_t *bp, xfs_daddr_t blkno);
+extern xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
#endif /* __XFS_RW_H__ */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 66b849358e6..237badcbac3 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -236,19 +236,20 @@ xfs_trans_alloc(
uint type)
{
xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
- return _xfs_trans_alloc(mp, type);
+ return _xfs_trans_alloc(mp, type, KM_SLEEP);
}
xfs_trans_t *
_xfs_trans_alloc(
xfs_mount_t *mp,
- uint type)
+ uint type,
+ uint memflags)
{
xfs_trans_t *tp;
atomic_inc(&mp->m_active_trans);
- tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
+ tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
tp->t_magic = XFS_TRANS_MAGIC;
tp->t_type = type;
tp->t_mountp = mp;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index ed47fc77759..ca64f33c63a 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -100,6 +100,49 @@ typedef struct xfs_trans_header {
#define XFS_TRANS_TYPE_MAX 41
/* new transaction types need to be reflected in xfs_logprint(8) */
+#define XFS_TRANS_TYPES \
+ { XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \
+ { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
+ { XFS_TRANS_INACTIVE, "INACTIVE" }, \
+ { XFS_TRANS_CREATE, "CREATE" }, \
+ { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
+ { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
+ { XFS_TRANS_REMOVE, "REMOVE" }, \
+ { XFS_TRANS_LINK, "LINK" }, \
+ { XFS_TRANS_RENAME, "RENAME" }, \
+ { XFS_TRANS_MKDIR, "MKDIR" }, \
+ { XFS_TRANS_RMDIR, "RMDIR" }, \
+ { XFS_TRANS_SYMLINK, "SYMLINK" }, \
+ { XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \
+ { XFS_TRANS_GROWFS, "GROWFS" }, \
+ { XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \
+ { XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \
+ { XFS_TRANS_WRITEID, "WRITEID" }, \
+ { XFS_TRANS_ADDAFORK, "ADDAFORK" }, \
+ { XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \
+ { XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \
+ { XFS_TRANS_ATTR_SET, "ATTR_SET" }, \
+ { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
+ { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
+ { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
+ { XFS_TRANS_QM_SBCHANGE, "QM_SBCHANGE" }, \
+ { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
+ { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
+ { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
+ { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
+ { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
+ { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
+ { XFS_TRANS_SB_UNIT, "SB_UNIT" }, \
+ { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
+ { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
+ { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
+ { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
+ { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
+ { XFS_TRANS_SB_COUNT, "SB_COUNT" }, \
+ { XFS_TRANS_DUMMY1, "DUMMY1" }, \
+ { XFS_TRANS_DUMMY2, "DUMMY2" }, \
+ { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
+
/*
* This structure is used to track log items associated with
* a transaction. It points to the log item and keeps some
@@ -782,6 +825,10 @@ typedef struct xfs_log_item {
#define XFS_LI_IN_AIL 0x1
#define XFS_LI_ABORTED 0x2
+#define XFS_LI_FLAGS \
+ { XFS_LI_IN_AIL, "IN_AIL" }, \
+ { XFS_LI_ABORTED, "ABORTED" }
+
typedef struct xfs_item_ops {
uint (*iop_size)(xfs_log_item_t *);
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
@@ -924,7 +971,7 @@ typedef struct xfs_trans {
* XFS transaction mechanism exported interfaces.
*/
xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint);
-xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint);
+xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint);
xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
uint, uint);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 218829e6a15..49130628d5e 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -38,6 +38,7 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
#include "xfs_rw.h"
+#include "xfs_trace.h"
STATIC xfs_buf_t *xfs_trans_buf_item_match(xfs_trans_t *, xfs_buftarg_t *,
@@ -79,11 +80,8 @@ xfs_trans_get_buf(xfs_trans_t *tp,
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
- if (tp == NULL) {
- bp = xfs_buf_get_flags(target_dev, blkno, len,
- flags | BUF_BUSY);
- return(bp);
- }
+ if (tp == NULL)
+ return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
/*
* If we find the buffer in the cache with this transaction
@@ -98,26 +96,23 @@ xfs_trans_get_buf(xfs_trans_t *tp,
}
if (bp != NULL) {
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
- if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
- xfs_buftrace("TRANS GET RECUR SHUT", bp);
+ if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
XFS_BUF_SUPER_STALE(bp);
- }
+
/*
* If the buffer is stale then it was binval'ed
* since last read. This doesn't matter since the
* caller isn't allowed to use the data anyway.
*/
- else if (XFS_BUF_ISSTALE(bp)) {
- xfs_buftrace("TRANS GET RECUR STALE", bp);
+ else if (XFS_BUF_ISSTALE(bp))
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
- }
+
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
- xfs_buftrace("TRANS GET RECUR", bp);
- xfs_buf_item_trace("GET RECUR", bip);
+ trace_xfs_trans_get_buf_recur(bip);
return (bp);
}
@@ -129,7 +124,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
- bp = xfs_buf_get_flags(target_dev, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
if (bp == NULL) {
return NULL;
}
@@ -169,8 +164,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
- xfs_buftrace("TRANS GET", bp);
- xfs_buf_item_trace("GET", bip);
+ trace_xfs_trans_get_buf(bip);
return (bp);
}
@@ -210,7 +204,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
- xfs_buf_item_trace("GETSB RECUR", bip);
+ trace_xfs_trans_getsb_recur(bip);
return (bp);
}
@@ -252,7 +246,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
- xfs_buf_item_trace("GETSB", bip);
+ trace_xfs_trans_getsb(bip);
return (bp);
}
@@ -302,7 +296,7 @@ xfs_trans_read_buf(
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL) {
- bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
if (!bp)
return (flags & XFS_BUF_TRYLOCK) ?
EAGAIN : XFS_ERROR(ENOMEM);
@@ -350,7 +344,7 @@ xfs_trans_read_buf(
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT((XFS_BUF_ISERROR(bp)) == 0);
if (!(XFS_BUF_ISDONE(bp))) {
- xfs_buftrace("READ_BUF_INCORE !DONE", bp);
+ trace_xfs_trans_read_buf_io(bp, _RET_IP_);
ASSERT(!XFS_BUF_ISASYNC(bp));
XFS_BUF_READ(bp);
xfsbdstrat(tp->t_mountp, bp);
@@ -375,7 +369,7 @@ xfs_trans_read_buf(
* brelse it either. Just get out.
*/
if (XFS_FORCED_SHUTDOWN(mp)) {
- xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp);
+ trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
*bpp = NULL;
return XFS_ERROR(EIO);
}
@@ -385,7 +379,7 @@ xfs_trans_read_buf(
bip->bli_recur++;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- xfs_buf_item_trace("READ RECUR", bip);
+ trace_xfs_trans_read_buf_recur(bip);
*bpp = bp;
return 0;
}
@@ -398,14 +392,13 @@ xfs_trans_read_buf(
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
- bp = xfs_buf_read_flags(target, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
if (bp == NULL) {
*bpp = NULL;
return 0;
}
if (XFS_BUF_GETERROR(bp) != 0) {
XFS_BUF_SUPER_STALE(bp);
- xfs_buftrace("READ ERROR", bp);
error = XFS_BUF_GETERROR(bp);
xfs_ioerror_alert("xfs_trans_read_buf", mp,
@@ -464,8 +457,7 @@ xfs_trans_read_buf(
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
- xfs_buftrace("TRANS READ", bp);
- xfs_buf_item_trace("READ", bip);
+ trace_xfs_trans_read_buf(bip);
*bpp = bp;
return 0;
@@ -483,7 +475,7 @@ shutdown_abort:
ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
(XFS_B_STALE|XFS_B_DELWRI));
- xfs_buftrace("READ_BUF XFSSHUTDN", bp);
+ trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
xfs_buf_relse(bp);
*bpp = NULL;
return XFS_ERROR(EIO);
@@ -549,13 +541,14 @@ xfs_trans_brelse(xfs_trans_t *tp,
lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
ASSERT(lidp != NULL);
+ trace_xfs_trans_brelse(bip);
+
/*
* If the release is just for a recursive lock,
* then decrement the count and return.
*/
if (bip->bli_recur > 0) {
bip->bli_recur--;
- xfs_buf_item_trace("RELSE RECUR", bip);
return;
}
@@ -563,10 +556,8 @@ xfs_trans_brelse(xfs_trans_t *tp,
* If the buffer is dirty within this transaction, we can't
* release it until we commit.
*/
- if (lidp->lid_flags & XFS_LID_DIRTY) {
- xfs_buf_item_trace("RELSE DIRTY", bip);
+ if (lidp->lid_flags & XFS_LID_DIRTY)
return;
- }
/*
* If the buffer has been invalidated, then we can't release
@@ -574,13 +565,10 @@ xfs_trans_brelse(xfs_trans_t *tp,
* as part of this transaction. This prevents us from pulling
* the item from the AIL before we should.
*/
- if (bip->bli_flags & XFS_BLI_STALE) {
- xfs_buf_item_trace("RELSE STALE", bip);
+ if (bip->bli_flags & XFS_BLI_STALE)
return;
- }
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
- xfs_buf_item_trace("RELSE", bip);
/*
* Free up the log item descriptor tracking the released item.
@@ -677,7 +665,7 @@ xfs_trans_bjoin(xfs_trans_t *tp,
*/
XFS_BUF_SET_FSPRIVATE2(bp, tp);
- xfs_buf_item_trace("BJOIN", bip);
+ trace_xfs_trans_bjoin(bip);
}
/*
@@ -701,7 +689,7 @@ xfs_trans_bhold(xfs_trans_t *tp,
ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_HOLD;
- xfs_buf_item_trace("BHOLD", bip);
+ trace_xfs_trans_bhold(bip);
}
/*
@@ -724,7 +712,8 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT(bip->bli_flags & XFS_BLI_HOLD);
bip->bli_flags &= ~XFS_BLI_HOLD;
- xfs_buf_item_trace("BHOLD RELEASE", bip);
+
+ trace_xfs_trans_bhold_release(bip);
}
/*
@@ -770,6 +759,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
+ trace_xfs_trans_log_buf(bip);
+
/*
* If we invalidated the buffer within this transaction, then
* cancel the invalidation now that we're dirtying the buffer
@@ -777,7 +768,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
* because we have a reference to the buffer this entire time.
*/
if (bip->bli_flags & XFS_BLI_STALE) {
- xfs_buf_item_trace("BLOG UNSTALE", bip);
bip->bli_flags &= ~XFS_BLI_STALE;
ASSERT(XFS_BUF_ISSTALE(bp));
XFS_BUF_UNSTALE(bp);
@@ -792,7 +782,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
lidp->lid_flags &= ~XFS_LID_BUF_STALE;
bip->bli_flags |= XFS_BLI_LOGGED;
xfs_buf_item_log(bip, first, last);
- xfs_buf_item_trace("BLOG", bip);
}
@@ -831,6 +820,8 @@ xfs_trans_binval(
ASSERT(lidp != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ trace_xfs_trans_binval(bip);
+
if (bip->bli_flags & XFS_BLI_STALE) {
/*
* If the buffer is already invalidated, then
@@ -843,8 +834,6 @@ xfs_trans_binval(
ASSERT(bip->bli_format.blf_flags & XFS_BLI_CANCEL);
ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
- xfs_buftrace("XFS_BINVAL RECUR", bp);
- xfs_buf_item_trace("BINVAL RECUR", bip);
return;
}
@@ -878,8 +867,6 @@ xfs_trans_binval(
(bip->bli_format.blf_map_size * sizeof(uint)));
lidp->lid_flags |= XFS_LID_DIRTY|XFS_LID_BUF_STALE;
tp->t_flags |= XFS_TRANS_DIRTY;
- xfs_buftrace("XFS_BINVAL", bp);
- xfs_buf_item_trace("BINVAL", bip);
}
/*
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index b572f7e840e..6558ffd8d14 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -53,6 +53,7 @@
#include "xfs_log_priv.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
+#include "xfs_trace.h"
int
xfs_setattr(
@@ -538,9 +539,8 @@ xfs_readlink_bmap(
d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
- bp = xfs_buf_read_flags(mp->m_ddev_targp, d, BTOBB(byte_cnt),
- XBF_LOCK | XBF_MAPPED |
- XBF_DONT_BLOCK);
+ bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
+ XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK);
error = XFS_BUF_GETERROR(bp);
if (error) {
xfs_ioerror_alert("xfs_readlink",
@@ -709,6 +709,11 @@ xfs_fsync(
}
/*
+ * Flags for xfs_free_eofblocks
+ */
+#define XFS_FREE_EOF_TRYLOCK (1<<0)
+
+/*
* This is called by xfs_inactive to free any blocks beyond eof
* when the link count isn't zero and by xfs_dm_punch_hole() when
* punching a hole to EOF.
@@ -726,7 +731,6 @@ xfs_free_eofblocks(
xfs_filblks_t map_len;
int nimaps;
xfs_bmbt_irec_t imap;
- int use_iolock = (flags & XFS_FREE_EOF_LOCK);
/*
* Figure out if there are any blocks beyond the end
@@ -768,14 +772,19 @@ xfs_free_eofblocks(
* cache and we can't
* do that within a transaction.
*/
- if (use_iolock)
+ if (flags & XFS_FREE_EOF_TRYLOCK) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+ xfs_trans_cancel(tp, 0);
+ return 0;
+ }
+ } else {
xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ }
error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
ip->i_size);
if (error) {
xfs_trans_cancel(tp, 0);
- if (use_iolock)
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
@@ -812,8 +821,7 @@ xfs_free_eofblocks(
error = xfs_trans_commit(tp,
XFS_TRANS_RELEASE_LOG_RES);
}
- xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)
- : XFS_ILOCK_EXCL));
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL);
}
return error;
}
@@ -1113,7 +1121,17 @@ xfs_release(
(ip->i_df.if_flags & XFS_IFEXTENTS)) &&
(!(ip->i_d.di_flags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
- error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
+
+ /*
+ * If we can't get the iolock just skip truncating
+ * the blocks past EOF because we could deadlock
+ * with the mmap_sem otherwise. We'll get another
+ * chance to drop them once the last reference to
+ * the inode is dropped, so we'll never leak blocks
+ * permanently.
+ */
+ error = xfs_free_eofblocks(mp, ip,
+ XFS_FREE_EOF_TRYLOCK);
if (error)
return error;
}
@@ -1184,7 +1202,7 @@ xfs_inactive(
(!(ip->i_d.di_flags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
(ip->i_delayed_blks != 0)))) {
- error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
+ error = xfs_free_eofblocks(mp, ip, 0);
if (error)
return VN_INACTIVE_CACHE;
}
@@ -1380,7 +1398,6 @@ xfs_lookup(
if (error)
goto out_free_name;
- xfs_itrace_ref(*ipp);
return 0;
out_free_name:
@@ -1526,7 +1543,6 @@ xfs_create(
* At this point, we've gotten a newly allocated inode.
* It is locked (and joined to the transaction).
*/
- xfs_itrace_ref(ip);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
/*
@@ -1986,9 +2002,6 @@ xfs_remove(
if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
xfs_filestream_deassociate(ip);
- xfs_itrace_exit(ip);
- xfs_itrace_exit(dp);
-
std_return:
if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
@@ -2285,7 +2298,6 @@ xfs_symlink(
goto error_return;
goto error1;
}
- xfs_itrace_ref(ip);
/*
* An error after we've joined dp to the transaction will result in the
@@ -2456,46 +2468,6 @@ xfs_set_dmattrs(
return error;
}
-int
-xfs_reclaim(
- xfs_inode_t *ip)
-{
-
- xfs_itrace_entry(ip);
-
- ASSERT(!VN_MAPPED(VFS_I(ip)));
-
- /* bad inode, get out here ASAP */
- if (is_bad_inode(VFS_I(ip))) {
- xfs_ireclaim(ip);
- return 0;
- }
-
- xfs_ioend_wait(ip);
-
- ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
-
- /*
- * If we have nothing to flush with this inode then complete the
- * teardown now, otherwise break the link between the xfs inode and the
- * linux inode and clean up the xfs inode later. This avoids flushing
- * the inode to disk during the delete operation itself.
- *
- * When breaking the link, we need to set the XFS_IRECLAIMABLE flag
- * first to ensure that xfs_iunpin() will never see an xfs inode
- * that has a linux inode being reclaimed. Synchronisation is provided
- * by the i_flags_lock.
- */
- if (!ip->i_update_core && (ip->i_itemp == NULL)) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_iflock(ip);
- xfs_iflags_set(ip, XFS_IRECLAIMABLE);
- return xfs_reclaim_inode(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC);
- }
- xfs_inode_set_reclaim_tag(ip);
- return 0;
-}
-
/*
* xfs_alloc_file_space()
* This routine allocates disk space for the given file.
@@ -2868,7 +2840,6 @@ xfs_free_file_space(
ioffset = offset & ~(rounding - 1);
if (VN_CACHED(VFS_I(ip)) != 0) {
- xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
if (error)
goto out_unlock_iolock;
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index a9e102de71a..167a467403a 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -38,7 +38,6 @@ int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
const char *target_path, mode_t mode, struct xfs_inode **ipp,
cred_t *credp);
int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
-int xfs_reclaim(struct xfs_inode *ip);
int xfs_change_file_space(struct xfs_inode *ip, int cmd,
xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);
int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 5c823d5ab78..d814da4b536 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -85,7 +85,8 @@
#define ACPI_LV_INIT 0x00000001
#define ACPI_LV_DEBUG_OBJECT 0x00000002
#define ACPI_LV_INFO 0x00000004
-#define ACPI_LV_ALL_EXCEPTIONS 0x00000007
+#define ACPI_LV_REPAIR 0x00000008
+#define ACPI_LV_ALL_EXCEPTIONS 0x0000000F
/* Trace verbosity level 1 [Standard Trace Level] */
@@ -143,6 +144,7 @@
#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT)
#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
#define ACPI_DB_INFO ACPI_DEBUG_LEVEL (ACPI_LV_INFO)
+#define ACPI_DB_REPAIR ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR)
#define ACPI_DB_ALL_EXCEPTIONS ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS)
/* Trace level -- also used in the global "DebugLevel" */
@@ -174,8 +176,8 @@
/* Defaults for debug_level, debug and normal */
-#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO)
-#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT)
+#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
+#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
diff --git a/include/acpi/acpi_hest.h b/include/acpi/acpi_hest.h
new file mode 100644
index 00000000000..63194d03cb2
--- /dev/null
+++ b/include/acpi/acpi_hest.h
@@ -0,0 +1,12 @@
+#ifndef __ACPI_HEST_H
+#define __ACPI_HEST_H
+
+#include <linux/pci.h>
+
+#ifdef CONFIG_ACPI
+extern int acpi_hest_firmware_first_pci(struct pci_dev *pci);
+#else
+static inline int acpi_hest_firmware_first_pci(struct pci_dev *pci) { return 0; }
+#endif
+
+#endif
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 5e1ad3cd1bb..86e9735a96b 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20091112
+#define ACPI_CA_VERSION 0x20091214
#include "actypes.h"
#include "actbl.h"
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 740ac3ad8fd..29245c6b5c0 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -294,7 +294,8 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
#ifdef CONFIG_CPU_FREQ
void acpi_processor_ppc_init(void);
void acpi_processor_ppc_exit(void);
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr);
+int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
+extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
#else
static inline void acpi_processor_ppc_init(void)
{
@@ -304,7 +305,8 @@ static inline void acpi_processor_ppc_exit(void)
{
return;
}
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+ int event_flag)
{
static unsigned int printout = 1;
if (printout) {
@@ -316,6 +318,11 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
}
return 0;
}
+static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_CPU_FREQ */
/* in processor_throttling.c */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63..ecc44a8e2b4 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,19 +15,19 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 4b6755984d2..18c435d7c08 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -113,22 +113,22 @@ extern void warn_slowpath_null(const char *file, const int line);
#endif
#define WARN_ON_ONCE(condition) ({ \
- static int __warned; \
+ static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
- __warned = 1; \
+ __warned = true; \
unlikely(__ret_warn_once); \
})
#define WARN_ONCE(condition, format...) ({ \
- static int __warned; \
+ static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
- __warned = 1; \
+ __warned = true; \
unlikely(__ret_warn_once); \
})
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index 495dc8af404..fcd268ce067 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -3,8 +3,6 @@
#include <linux/types.h>
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_ACCMODE 00000003
#define O_RDONLY 00000000
#define O_WRONLY 00000001
@@ -27,8 +25,8 @@
#ifndef O_NONBLOCK
#define O_NONBLOCK 00004000
#endif
-#ifndef O_SYNC
-#define O_SYNC 00010000
+#ifndef O_DSYNC
+#define O_DSYNC 00010000 /* used to be O_SYNC, see below */
#endif
#ifndef FASYNC
#define FASYNC 00020000 /* fcntl, for BSD compatibility */
@@ -51,6 +49,25 @@
#ifndef O_CLOEXEC
#define O_CLOEXEC 02000000 /* set close_on_exec */
#endif
+
+/*
+ * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using
+ * the O_SYNC flag. We continue to use the existing numerical value
+ * for O_DSYNC semantics now, but using the correct symbolic name for it.
+ * This new value is used to request true Posix O_SYNC semantics. It is
+ * defined in this strange way to make sure applications compiled against
+ * new headers get at least O_DSYNC semantics on older kernels.
+ *
+ * This has the nice side-effect that we can simply test for O_DSYNC
+ * wherever we do not care if O_DSYNC or O_SYNC is used.
+ *
+ * Note: __O_SYNC must never be used directly.
+ */
+#ifndef O_SYNC
+#define __O_SYNC 04000000
+#define O_SYNC (__O_SYNC|O_DSYNC)
+#endif
+
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 66d6106a206..485eeb6c4ef 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -28,6 +28,7 @@ static inline int gpio_is_valid(int number)
return ((unsigned)number) < ARCH_NR_GPIOS;
}
+struct device;
struct seq_file;
struct module;
@@ -144,6 +145,7 @@ extern int __gpio_to_irq(unsigned gpio);
extern int gpio_export(unsigned gpio, bool direction_may_change);
extern int gpio_export_link(struct device *dev, const char *name,
unsigned gpio);
+extern int gpio_sysfs_set_active_low(unsigned gpio, int value);
extern void gpio_unexport(unsigned gpio);
#endif /* CONFIG_GPIO_SYSFS */
@@ -181,6 +183,8 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
#ifndef CONFIG_GPIO_SYSFS
+struct device;
+
/* sysfs support is only available with gpiolib, where it's optional */
static inline int gpio_export(unsigned gpio, bool direction_may_change)
@@ -194,6 +198,11 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -ENOSYS;
}
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ return -ENOSYS;
+}
+
static inline void gpio_unexport(unsigned gpio)
{
}
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 5ee13b2fd22..3da9e2742fa 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -19,6 +19,11 @@
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
+#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
+# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
+#else
+# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
+#endif
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */
@@ -35,6 +40,7 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
#define MADV_HWPOISON 100 /* poison a page for testing */
+#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 90079c373f1..8087b90d467 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
+#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
+#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
+#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* SMP */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 7c38c147e5e..6a0b30f78a6 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -622,9 +622,13 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
#define __NR_perf_event_open 241
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_accept4 242
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_recvmmsg 243
+__SYSCALL(__NR_recvmmsg, sys_recvmmsg)
#undef __NR_syscalls
-#define __NR_syscalls 242
+#define __NR_syscalls 244
/*
* All syscalls below here should go away really,
@@ -802,7 +806,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall)
#define __NR_statfs __NR3264_statfs
#define __NR_fstatfs __NR3264_fstatfs
#define __NR_truncate __NR3264_truncate
-#define __NR_ftruncate __NR3264_truncate
+#define __NR_ftruncate __NR3264_ftruncate
#define __NR_lseek __NR3264_lseek
#define __NR_sendfile __NR3264_sendfile
#define __NR_newfstatat __NR3264_fstatat
@@ -818,7 +822,7 @@ __SYSCALL(__NR_fork, sys_ni_syscall)
#define __NR_statfs64 __NR3264_statfs
#define __NR_fstatfs64 __NR3264_fstatfs
#define __NR_truncate64 __NR3264_truncate
-#define __NR_ftruncate64 __NR3264_truncate
+#define __NR_ftruncate64 __NR3264_ftruncate
#define __NR_llseek __NR3264_lseek
#define __NR_sendfile64 __NR3264_sendfile
#define __NR_fstatat64 __NR3264_fstatat
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index b6e818f4b24..67e652068e0 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -52,8 +52,12 @@
#define LOAD_OFFSET 0
#endif
-#ifndef VMLINUX_SYMBOL
-#define VMLINUX_SYMBOL(_sym_) _sym_
+#ifndef SYMBOL_PREFIX
+#define VMLINUX_SYMBOL(sym) sym
+#else
+#define PASTE2(x,y) x##y
+#define PASTE(x,y) PASTE2(x,y)
+#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
#endif
/* Align . to a 8 byte boundary equals to maximum function alignment. */
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index b940fdfa3b2..bd3a1c2fbdb 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -7,4 +7,6 @@ unifdef-y += r128_drm.h
unifdef-y += radeon_drm.h
unifdef-y += sis_drm.h
unifdef-y += savage_drm.h
+unifdef-y += vmwgfx_drm.h
unifdef-y += via_drm.h
+unifdef-y += nouveau_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 7cb50bdde46..e3f46e0cb7d 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,17 +36,27 @@
#ifndef _DRM_H_
#define _DRM_H_
+#if defined(__linux__)
+
#include <linux/types.h>
-#include <asm/ioctl.h> /* For _IO* macros */
-#define DRM_IOCTL_NR(n) _IOC_NR(n)
-#define DRM_IOC_VOID _IOC_NONE
-#define DRM_IOC_READ _IOC_READ
-#define DRM_IOC_WRITE _IOC_WRITE
-#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+#include <asm/ioctl.h>
+typedef unsigned int drm_handle_t;
-#define DRM_MAJOR 226
-#define DRM_MAX_MINOR 15
+#else /* One of the BSDs */
+
+#include <sys/ioccom.h>
+#include <sys/types.h>
+typedef int8_t __s8;
+typedef uint8_t __u8;
+typedef int16_t __s16;
+typedef uint16_t __u16;
+typedef int32_t __s32;
+typedef uint32_t __u32;
+typedef int64_t __s64;
+typedef uint64_t __u64;
+typedef unsigned long drm_handle_t;
+
+#endif
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
@@ -59,7 +69,6 @@
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
-typedef unsigned int drm_handle_t;
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
@@ -454,6 +463,7 @@ struct drm_irq_busid {
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
@@ -461,8 +471,8 @@ enum drm_vblank_seq_type {
};
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
-#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
- _DRM_VBLANK_NEXTONMISS)
+#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
+ _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
struct drm_wait_vblank_request {
enum drm_vblank_seq_type type;
@@ -686,6 +696,8 @@ struct drm_gem_open {
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
+#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
+#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
/**
* Device specific ioctls should only be in their respective headers
@@ -698,6 +710,35 @@ struct drm_gem_open {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
+/**
+ * Header for events written back to userspace on the drm fd. The
+ * type defines the type of event, the length specifies the total
+ * length of the event (including the header), and user_data is
+ * typically a 64 bit value passed with the ioctl that triggered the
+ * event. A read on the drm fd will always only return complete
+ * events, that is, if for example the read buffer is 100 bytes, and
+ * there are two 64 byte events pending, only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
+ * up are chipset specific.
+ */
+struct drm_event {
+ __u32 type;
+ __u32 length;
+};
+
+#define DRM_EVENT_VBLANK 0x01
+#define DRM_EVENT_FLIP_COMPLETE 0x02
+
+struct drm_event_vblank {
+ struct drm_event base;
+ __u64 user_data;
+ __u32 tv_sec;
+ __u32 tv_usec;
+ __u32 sequence;
+ __u32 reserved;
+};
+
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c8e64bbadbc..71dafb69cfe 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -245,16 +245,6 @@ extern void drm_ut_debug_printk(unsigned int request_level,
#endif
-#define DRM_PROC_LIMIT (PAGE_SIZE-80)
-
-#define DRM_PROC_PRINT(fmt, arg...) \
- len += sprintf(&buf[len], fmt , ##arg); \
- if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
-
-#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
- len += sprintf(&buf[len], fmt , ##arg); \
- if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
-
/*@}*/
/***********************************************************************/
@@ -265,19 +255,8 @@ extern void drm_ut_debug_printk(unsigned int request_level,
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
-#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-/**
- * Get the private SAREA mapping.
- *
- * \param _dev DRM device.
- * \param _ctx context number.
- * \param _map output mapping.
- */
-#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
- (_map) = (_dev)->context_sareas[_ctx]; \
-} while(0)
/**
* Test that the hardware lock is held by the caller, returning otherwise.
@@ -297,18 +276,6 @@ do { \
} while (0)
/**
- * Copy and IOCTL return string to user space
- */
-#define DRM_COPY( name, value ) \
- len = strlen( value ); \
- if ( len > name##_len ) len = name##_len; \
- name##_len = strlen( value ); \
- if ( len && name ) { \
- if ( copy_to_user( name, value, len ) ) \
- return -EFAULT; \
- }
-
-/**
* Ioctl function type.
*
* \param inode device inode.
@@ -322,10 +289,14 @@ typedef int drm_ioctl_t(struct drm_device *dev, void *data,
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_MAJOR 226
+
#define DRM_AUTH 0x1
#define DRM_MASTER 0x2
#define DRM_ROOT_ONLY 0x4
#define DRM_CONTROL_ALLOW 0x8
+#define DRM_UNLOCKED 0x10
struct drm_ioctl_desc {
unsigned int cmd;
@@ -426,6 +397,14 @@ struct drm_buf_entry {
struct drm_freelist freelist;
};
+/* Event queued up for userspace to read */
+struct drm_pending_event {
+ struct drm_event *event;
+ struct list_head link;
+ struct drm_file *file_priv;
+ void (*destroy)(struct drm_pending_event *event);
+};
+
/** File private data */
struct drm_file {
int authenticated;
@@ -449,6 +428,10 @@ struct drm_file {
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
struct list_head fbs;
+
+ wait_queue_head_t event_wait;
+ struct list_head event_list;
+ int event_space;
};
/** Wait queue */
@@ -795,6 +778,15 @@ struct drm_driver {
/* Master routines */
int (*master_create)(struct drm_device *dev, struct drm_master *master);
void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
+ /**
+ * master_set is called whenever the minor master is set.
+ * master_drop is called whenever the minor master is dropped.
+ */
+
+ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_open);
+ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
+ bool from_release);
int (*proc_init)(struct drm_minor *minor);
void (*proc_cleanup)(struct drm_minor *minor);
@@ -900,6 +892,12 @@ struct drm_minor {
struct drm_mode_group mode_group;
};
+struct drm_pending_vblank_event {
+ struct drm_pending_event base;
+ int pipe;
+ struct drm_event_vblank event;
+};
+
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@@ -999,6 +997,12 @@ struct drm_device {
u32 max_vblank_count; /**< size of vblank counter register */
+ /**
+ * List of events
+ */
+ struct list_head vblank_event_list;
+ spinlock_t event_lock;
+
/*@} */
cycles_t ctx_start;
cycles_t lck_start;
@@ -1125,8 +1129,8 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
/* Driver support (drm_drv.h) */
extern int drm_init(struct drm_driver *driver);
extern void drm_exit(struct drm_driver *driver);
-extern int drm_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_lastclose(struct drm_device *dev);
@@ -1135,6 +1139,8 @@ extern int drm_lastclose(struct drm_device *dev);
extern int drm_open(struct inode *inode, struct file *filp);
extern int drm_stub_open(struct inode *inode, struct file *filp);
extern int drm_fasync(int fd, struct file *filp, int on);
+extern ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */
@@ -1295,6 +1301,7 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern void drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
@@ -1519,14 +1526,27 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
{
+ if (size != 0 && nmemb > ULONG_MAX / size)
+ return NULL;
+
if (size * nmemb <= PAGE_SIZE)
return kcalloc(nmemb, size, GFP_KERNEL);
+ return __vmalloc(size * nmemb,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+}
+
+/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+{
if (size != 0 && nmemb > ULONG_MAX / size)
return NULL;
+ if (size * nmemb <= PAGE_SIZE)
+ return kmalloc(nmemb * size, GFP_KERNEL);
+
return __vmalloc(size * nmemb,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
static __inline void drm_free_large(void *ptr)
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b69347b8904..fdf43abc36d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -123,7 +123,7 @@ struct drm_display_mode {
int type;
/* Proposed mode values */
- int clock;
+ int clock; /* in kHz */
int hdisplay;
int hsync_start;
int hsync_end;
@@ -164,8 +164,8 @@ struct drm_display_mode {
int *private;
int private_flags;
- int vrefresh;
- float hsync;
+ int vrefresh; /* in Hz */
+ int hsync; /* in kHz */
};
enum drm_connector_status {
@@ -242,6 +242,21 @@ struct drm_framebuffer_funcs {
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle);
+ /**
+ * Optinal callback for the dirty fb ioctl.
+ *
+ * Userspace can notify the driver via this callback
+ * that a area of the framebuffer has changed and should
+ * be flushed to the display hardware.
+ *
+ * See documentation in drm_mode.h for the struct
+ * drm_mode_fb_dirty_cmd for more information as all
+ * the semantics and arguments have a one to one mapping
+ * on this function.
+ */
+ int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
+ unsigned color, struct drm_clip_rect *clips,
+ unsigned num_clips);
};
struct drm_framebuffer {
@@ -256,7 +271,7 @@ struct drm_framebuffer {
unsigned int depth;
int bits_per_pixel;
int flags;
- void *fbdev;
+ struct fb_info *fbdev;
u32 pseudo_palette[17];
struct list_head filp_head;
/* if you are using the helper */
@@ -290,6 +305,7 @@ struct drm_property {
struct drm_crtc;
struct drm_connector;
struct drm_encoder;
+struct drm_pending_vblank_event;
/**
* drm_crtc_funcs - control CRTCs for a given device
@@ -333,6 +349,19 @@ struct drm_crtc_funcs {
void (*destroy)(struct drm_crtc *crtc);
int (*set_config)(struct drm_mode_set *set);
+
+ /*
+ * Flip to the given framebuffer. This implements the page
+ * flip ioctl descibed in drm_mode.h, specifically, the
+ * implementation must return immediately and block all
+ * rendering to the current fb until the flip has completed.
+ * If userspace set the event flag in the ioctl, the event
+ * argument will point to an event to send back when the flip
+ * completes, otherwise it will be NULL.
+ */
+ int (*page_flip)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event);
};
/**
@@ -596,6 +625,7 @@ struct drm_mode_config {
/* Optional properties */
struct drm_property *scaling_mode_property;
struct drm_property *dithering_mode_property;
+ struct drm_property *dirty_info_property;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -667,6 +697,7 @@ extern void drm_mode_validate_size(struct drm_device *dev,
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
+extern int drm_mode_hsync(struct drm_display_mode *mode);
extern int drm_mode_vrefresh(struct drm_display_mode *mode);
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
@@ -703,6 +734,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats
char *formats[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
extern int drm_mode_create_dithering_property(struct drm_device *dev);
+extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern char *drm_get_encoder_name(struct drm_encoder *encoder);
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -711,7 +743,8 @@ extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
-extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type);
+extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ uint32_t id, uint32_t type);
/* IOCTLs */
extern int drm_mode_getresources(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@@ -730,6 +763,8 @@ extern int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern int drm_mode_addmode_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
@@ -756,6 +791,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
+extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool reduced, bool interlaced, bool margins);
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/include/drm/drm_dp_helper.h
index 2b38054d3b6..a49e791db0b 100644
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ b/include/drm/drm_dp_helper.h
@@ -20,8 +20,8 @@
* OF THIS SOFTWARE.
*/
-#ifndef _INTEL_DP_H_
-#define _INTEL_DP_H_
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
/* From the VESA DisplayPort spec */
@@ -43,16 +43,41 @@
#define AUX_I2C_REPLY_MASK (0x3 << 6)
/* AUX CH addresses */
-#define DP_LINK_BW_SET 0x100
+/* DPCD */
+#define DP_DPCD_REV 0x000
+
+#define DP_MAX_LINK_RATE 0x001
+
+#define DP_MAX_LANE_COUNT 0x002
+# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_ENHANCED_FRAME_CAP (1 << 7)
+
+#define DP_MAX_DOWNSPREAD 0x003
+# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
+
+#define DP_NORP 0x004
+
+#define DP_DOWNSTREAMPORT_PRESENT 0x005
+# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
+# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
+/* 00b = DisplayPort */
+/* 01b = Analog */
+/* 10b = TMDS or HDMI */
+/* 11b = Other */
+# define DP_FORMAT_CONVERSION (1 << 3)
+
+#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+
+/* link configuration */
+#define DP_LINK_BW_SET 0x100
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
-#define DP_LANE_COUNT_SET 0x101
+#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
-#define DP_TRAINING_PATTERN_SET 0x102
-
+#define DP_TRAINING_PATTERN_SET 0x102
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
@@ -102,11 +127,14 @@
#define DP_LANE0_1_STATUS 0x202
#define DP_LANE2_3_STATUS 0x203
-
# define DP_LANE_CR_DONE (1 << 0)
# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
+ DP_LANE_CHANNEL_EQ_DONE | \
+ DP_LANE_SYMBOL_LOCKED)
+
#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
#define DP_INTERLANE_ALIGN_DONE (1 << 0)
@@ -120,25 +148,33 @@
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
-
-#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
-#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
-#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
-#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
-#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
-#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
-#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
-#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+#define DP_SET_POWER 0x600
+# define DP_SET_POWER_D0 0x1
+# define DP_SET_POWER_D3 0x2
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
struct i2c_algo_dp_aux_data {
bool running;
u16 address;
int (*aux_ch) (struct i2c_adapter *adapter,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_bytes);
+ int mode, uint8_t write_byte,
+ uint8_t *read_byte);
};
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
-#endif /* _INTEL_DP_H_ */
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7d6c9a2dfcb..d33c3e03860 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -106,6 +106,10 @@ struct detailed_data_color_point {
u8 wpindex2[3];
} __attribute__((packed));
+struct cvt_timing {
+ u8 code[3];
+} __attribute__((packed));
+
struct detailed_non_pixel {
u8 pad1;
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
@@ -117,9 +121,13 @@ struct detailed_non_pixel {
struct detailed_data_monitor_range range;
struct detailed_data_wpindex color;
struct std_timing timings[5];
+ struct cvt_timing cvt[4];
} data;
} __attribute__((packed));
+#define EDID_DETAIL_EST_TIMINGS 0xf7
+#define EDID_DETAIL_CVT_3BYTE 0xf8
+#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
#define EDID_DETAIL_STD_MODES 0xfa
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
#define EDID_DETAIL_MONITOR_NAME 0xfc
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 62329f9a42c..4c10be39a43 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
int atomic);
+extern struct drm_mm_node *drm_mm_get_block_range_generic(
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic);
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
{
return drm_mm_get_block_generic(parent, size, alignment, 1);
}
+static inline struct drm_mm_node *drm_mm_get_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment,
+ start, end, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment,
+ start, end, 1);
+}
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
int best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm;
}
+extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
#ifdef CONFIG_DEBUG_FS
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
#endif
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 1f908416aed..43009bc2e75 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -27,9 +27,6 @@
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
-#include <linux/kernel.h>
-#include <linux/types.h>
-
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
@@ -78,6 +75,11 @@
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
+/* Dirty info options */
+#define DRM_MODE_DIRTY_OFF 0
+#define DRM_MODE_DIRTY_ON 1
+#define DRM_MODE_DIRTY_ANNOTATE 2
+
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
@@ -225,6 +227,45 @@ struct drm_mode_fb_cmd {
__u32 handle;
};
+#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+#define DRM_MODE_FB_DIRTY_FLAGS 0x03
+
+/*
+ * Mark a region of a framebuffer as dirty.
+ *
+ * Some hardware does not automatically update display contents
+ * as a hardware or software draw to a framebuffer. This ioctl
+ * allows userspace to tell the kernel and the hardware what
+ * regions of the framebuffer have changed.
+ *
+ * The kernel or hardware is free to update more then just the
+ * region specified by the clip rects. The kernel or hardware
+ * may also delay and/or coalesce several calls to dirty into a
+ * single update.
+ *
+ * Userspace may annotate the updates, the annotates are a
+ * promise made by the caller that the change is either a copy
+ * of pixels or a fill of a single color in the region specified.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
+ * the number of updated regions are half of num_clips given,
+ * where the clip rects are paired in src and dst. The width and
+ * height of each one of the pairs must match.
+ *
+ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
+ * promises that the region specified of the clip rects is filled
+ * completely with a single color as given in the color argument.
+ */
+
+struct drm_mode_fb_dirty_cmd {
+ __u32 fb_id;
+ __u32 flags;
+ __u32 color;
+ __u32 num_clips;
+ __u64 clips_ptr;
+};
+
struct drm_mode_mode_cmd {
__u32 connector_id;
struct drm_mode_modeinfo mode;
@@ -268,4 +309,37 @@ struct drm_mode_crtc_lut {
__u64 blue;
};
+#define DRM_MODE_PAGE_FLIP_EVENT 0x01
+#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
+
+/*
+ * Request a page flip on the specified crtc.
+ *
+ * This ioctl will ask KMS to schedule a page flip for the specified
+ * crtc. Once any pending rendering targeting the specified fb (as of
+ * ioctl time) has completed, the crtc will be reprogrammed to display
+ * that fb after the next vertical refresh. The ioctl returns
+ * immediately, but subsequent rendering to the current fb will block
+ * in the execbuffer ioctl until the page flip happens. If a page
+ * flip is already pending as the ioctl is called, EBUSY will be
+ * returned.
+ *
+ * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
+ * request that drm sends back a vblank event (see drm.h: struct
+ * drm_event_vblank) when the page flip is done. The user_data field
+ * passed in with this ioctl will be returned as the user_data field
+ * in the vblank event struct.
+ *
+ * The reserved field must be zero until we figure out something
+ * clever to use it for.
+ */
+
+struct drm_mode_crtc_page_flip {
+ __u32 crtc_id;
+ __u32 fb_id;
+ __u32 flags;
+ __u32 reserved;
+ __u64 user_data;
+};
+
#endif
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 26641e95e0a..393369147a2 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -123,5 +123,5 @@ do { \
remove_wait_queue(&(queue), &entry); \
} while (0)
-#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
+#define DRM_WAKEUP( queue ) wake_up( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
new file mode 100644
index 00000000000..8390b437a1f
--- /dev/null
+++ b/include/drm/i2c/ch7006.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DRM_I2C_CH7006_H__
+#define __DRM_I2C_CH7006_H__
+
+/**
+ * struct ch7006_encoder_params
+ *
+ * Describes how the ch7006 is wired up with the GPU. It should be
+ * used as the @params parameter of its @set_config method.
+ *
+ * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
+ * meaning.
+ */
+struct ch7006_encoder_params {
+ enum {
+ CH7006_FORMAT_RGB16 = 0,
+ CH7006_FORMAT_YCrCb24m16,
+ CH7006_FORMAT_RGB24m16,
+ CH7006_FORMAT_RGB15,
+ CH7006_FORMAT_RGB24m12C,
+ CH7006_FORMAT_RGB24m12I,
+ CH7006_FORMAT_RGB24m8,
+ CH7006_FORMAT_RGB16m8,
+ CH7006_FORMAT_RGB15m8,
+ CH7006_FORMAT_YCrCb24m8,
+ } input_format;
+
+ enum {
+ CH7006_CLOCK_SLAVE = 0,
+ CH7006_CLOCK_MASTER,
+ } clock_mode;
+
+ enum {
+ CH7006_CLOCK_EDGE_NEG = 0,
+ CH7006_CLOCK_EDGE_POS,
+ } clock_edge;
+
+ int xcm, pcm;
+
+ enum {
+ CH7006_SYNC_SLAVE = 0,
+ CH7006_SYNC_MASTER,
+ } sync_direction;
+
+ enum {
+ CH7006_SYNC_SEPARATED = 0,
+ CH7006_SYNC_EMBEDDED,
+ } sync_encoding;
+
+ enum {
+ CH7006_POUT_1_8V = 0,
+ CH7006_POUT_3_3V,
+ } pout_level;
+
+ enum {
+ CH7006_ACTIVE_HSYNC = 0,
+ CH7006_ACTIVE_DSTART,
+ } active_detect;
+};
+
+#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 7e0cb1da92e..ec3f5e80a5d 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -27,11 +27,11 @@
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
+#include "drm.h"
+
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
-#include <linux/types.h>
-#include "drm.h"
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
@@ -186,6 +186,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
+#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -221,8 +223,10 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
-#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id)
+#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image)
+#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -266,6 +270,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
+#define I915_PARAM_HAS_OVERLAY 7
+#define I915_PARAM_HAS_PAGEFLIPPING 8
typedef struct drm_i915_getparam {
int param;
@@ -686,4 +692,70 @@ struct drm_i915_gem_madvise {
__u32 retained;
};
+/* flags */
+#define I915_OVERLAY_TYPE_MASK 0xff
+#define I915_OVERLAY_YUV_PLANAR 0x01
+#define I915_OVERLAY_YUV_PACKED 0x02
+#define I915_OVERLAY_RGB 0x03
+
+#define I915_OVERLAY_DEPTH_MASK 0xff00
+#define I915_OVERLAY_RGB24 0x1000
+#define I915_OVERLAY_RGB16 0x2000
+#define I915_OVERLAY_RGB15 0x3000
+#define I915_OVERLAY_YUV422 0x0100
+#define I915_OVERLAY_YUV411 0x0200
+#define I915_OVERLAY_YUV420 0x0300
+#define I915_OVERLAY_YUV410 0x0400
+
+#define I915_OVERLAY_SWAP_MASK 0xff0000
+#define I915_OVERLAY_NO_SWAP 0x000000
+#define I915_OVERLAY_UV_SWAP 0x010000
+#define I915_OVERLAY_Y_SWAP 0x020000
+#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
+
+#define I915_OVERLAY_FLAGS_MASK 0xff000000
+#define I915_OVERLAY_ENABLE 0x01000000
+
+struct drm_intel_overlay_put_image {
+ /* various flags and src format description */
+ __u32 flags;
+ /* source picture description */
+ __u32 bo_handle;
+ /* stride values and offsets are in bytes, buffer relative */
+ __u16 stride_Y; /* stride for packed formats */
+ __u16 stride_UV;
+ __u32 offset_Y; /* offset for packet formats */
+ __u32 offset_U;
+ __u32 offset_V;
+ /* in pixels */
+ __u16 src_width;
+ __u16 src_height;
+ /* to compensate the scaling factors for partially covered surfaces */
+ __u16 src_scan_width;
+ __u16 src_scan_height;
+ /* output crtc description */
+ __u32 crtc_id;
+ __u16 dst_x;
+ __u16 dst_y;
+ __u16 dst_width;
+ __u16 dst_height;
+};
+
+/* flags */
+#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
+#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
+struct drm_intel_overlay_attrs {
+ __u32 flags;
+ __u32 color_key;
+ __s32 brightness;
+ __u32 contrast;
+ __u32 saturation;
+ __u32 gamma0;
+ __u32 gamma1;
+ __u32 gamma2;
+ __u32 gamma3;
+ __u32 gamma4;
+ __u32 gamma5;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index 325fd6fb4a4..3ffbc4798af 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -35,7 +35,7 @@
#ifndef __MGA_DRM_H__
#define __MGA_DRM_H__
-#include <linux/types.h>
+#include "drm.h"
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mga_sarea.h)
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
new file mode 100644
index 00000000000..1e67c441ea8
--- /dev/null
+++ b/include/drm/nouveau_drm.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRM_H__
+#define __NOUVEAU_DRM_H__
+
+#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15
+
+struct drm_nouveau_channel_alloc {
+ uint32_t fb_ctxdma_handle;
+ uint32_t tt_ctxdma_handle;
+
+ int channel;
+
+ /* Notifier memory */
+ uint32_t notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ uint32_t handle;
+ uint32_t grclass;
+ } subchan[8];
+ uint32_t nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ int channel;
+};
+
+struct drm_nouveau_grobj_alloc {
+ int channel;
+ uint32_t handle;
+ int class;
+};
+
+struct drm_nouveau_notifierobj_alloc {
+ uint32_t channel;
+ uint32_t handle;
+ uint32_t size;
+ uint32_t offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+ int channel;
+ uint32_t handle;
+};
+
+/* FIXME : maybe unify {GET,SET}PARAMs */
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
+#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+struct drm_nouveau_getparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+struct drm_nouveau_setparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
+#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
+#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
+#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+
+struct drm_nouveau_gem_info {
+ uint32_t handle;
+ uint32_t domain;
+ uint64_t size;
+ uint64_t offset;
+ uint64_t map_handle;
+ uint32_t tile_mode;
+ uint32_t tile_flags;
+};
+
+struct drm_nouveau_gem_new {
+ struct drm_nouveau_gem_info info;
+ uint32_t channel_hint;
+ uint32_t align;
+};
+
+struct drm_nouveau_gem_pushbuf_bo {
+ uint64_t user_priv;
+ uint32_t handle;
+ uint32_t read_domains;
+ uint32_t write_domains;
+ uint32_t valid_domains;
+ uint32_t presumed_ok;
+ uint32_t presumed_domain;
+ uint64_t presumed_offset;
+};
+
+#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
+#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
+#define NOUVEAU_GEM_RELOC_OR (1 << 2)
+struct drm_nouveau_gem_pushbuf_reloc {
+ uint32_t bo_index;
+ uint32_t reloc_index;
+ uint32_t flags;
+ uint32_t data;
+ uint32_t vor;
+ uint32_t tor;
+};
+
+#define NOUVEAU_GEM_MAX_BUFFERS 1024
+#define NOUVEAU_GEM_MAX_RELOCS 1024
+
+struct drm_nouveau_gem_pushbuf {
+ uint32_t channel;
+ uint32_t nr_dwords;
+ uint32_t nr_buffers;
+ uint32_t nr_relocs;
+ uint64_t dwords;
+ uint64_t buffers;
+ uint64_t relocs;
+};
+
+struct drm_nouveau_gem_pushbuf_call {
+ uint32_t channel;
+ uint32_t handle;
+ uint32_t offset;
+ uint32_t nr_buffers;
+ uint32_t nr_relocs;
+ uint32_t nr_dwords;
+ uint64_t buffers;
+ uint64_t relocs;
+ uint32_t suffix0;
+ uint32_t suffix1;
+ /* below only accessed for CALL2 */
+ uint64_t vram_available;
+ uint64_t gart_available;
+};
+
+struct drm_nouveau_gem_pin {
+ uint32_t handle;
+ uint32_t domain;
+ uint64_t offset;
+};
+
+struct drm_nouveau_gem_unpin {
+ uint32_t handle;
+};
+
+#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
+#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
+#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
+struct drm_nouveau_gem_cpu_prep {
+ uint32_t handle;
+ uint32_t flags;
+};
+
+struct drm_nouveau_gem_cpu_fini {
+ uint32_t handle;
+};
+
+struct drm_nouveau_gem_tile {
+ uint32_t handle;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t tile_mode;
+ uint32_t tile_flags;
+};
+
+enum nouveau_bus_type {
+ NV_AGP = 0,
+ NV_PCI = 1,
+ NV_PCIE = 2,
+};
+
+struct drm_nouveau_sarea {
+};
+
+#define DRM_NOUVEAU_CARD_INIT 0x00
+#define DRM_NOUVEAU_GETPARAM 0x01
+#define DRM_NOUVEAU_SETPARAM 0x02
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03
+#define DRM_NOUVEAU_CHANNEL_FREE 0x04
+#define DRM_NOUVEAU_GROBJ_ALLOC 0x05
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06
+#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
+#define DRM_NOUVEAU_GEM_NEW 0x40
+#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
+#define DRM_NOUVEAU_GEM_PUSHBUF_CALL 0x42
+#define DRM_NOUVEAU_GEM_PIN 0x43 /* !KMS only */
+#define DRM_NOUVEAU_GEM_UNPIN 0x44 /* !KMS only */
+#define DRM_NOUVEAU_GEM_CPU_PREP 0x45
+#define DRM_NOUVEAU_GEM_CPU_FINI 0x46
+#define DRM_NOUVEAU_GEM_INFO 0x47
+#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2 0x48
+
+#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 3b9932ab175..39537f3cf98 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -33,7 +33,7 @@
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
-#include <linux/types.h>
+#include "drm.h"
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 49114617052..81eb9f45883 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,6 +44,29 @@ struct ttm_bo_device;
struct drm_mm_node;
+
+/**
+ * struct ttm_placement
+ *
+ * @fpfn: first valid page frame number to put the object
+ * @lpfn: last valid page frame number to put the object
+ * @num_placement: number of prefered placements
+ * @placement: prefered placements
+ * @num_busy_placement: number of prefered placements when need to evict buffer
+ * @busy_placement: prefered placements when need to evict buffer
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+ unsigned fpfn;
+ unsigned lpfn;
+ unsigned num_placement;
+ const uint32_t *placement;
+ unsigned num_busy_placement;
+ const uint32_t *busy_placement;
+};
+
+
/**
* struct ttm_mem_reg
*
@@ -109,10 +132,6 @@ struct ttm_tt;
* the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change.
* @lock: spinlock protecting mostly synchronization members.
- * @proposed_placement: Proposed placement for the buffer. Changed only by the
- * creator prior to validation as opposed to bo->mem.proposed_flags which is
- * changed by the implementation prior to a buffer move if it wants to outsmart
- * the buffer creator / user. This latter happens, for example, at eviction.
* @mem: structure describing current placement.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
@@ -177,7 +196,6 @@ struct ttm_buffer_object {
* Members protected by the bo::reserved lock.
*/
- uint32_t proposed_placement;
struct ttm_mem_reg mem;
struct file *persistant_swap_storage;
struct ttm_tt *ttm;
@@ -285,29 +303,30 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
* Note: It might be necessary to block validations before the
* wait by reserving the buffer.
* Returns -EBUSY if no_wait is true and the buffer is busy.
- * Returns -ERESTART if interrupted by a signal.
+ * Returns -ERESTARTSYS if interrupted by a signal.
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
/**
- * ttm_buffer_object_validate
+ * ttm_bo_validate
*
* @bo: The buffer object.
- * @proposed_placement: Proposed_placement for the buffer object.
+ * @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping.
* @no_wait: Return immediately if the buffer is busy.
*
* Changes placement and caching policy of the buffer object
- * according to bo::proposed_flags.
+ * according proposed placement.
* Returns
- * -EINVAL on invalid proposed_flags.
+ * -EINVAL on invalid proposed placement.
* -ENOMEM on out-of-memory condition.
* -EBUSY if no_wait is true and buffer busy.
- * -ERESTART if interrupted by a signal.
+ * -ERESTARTSYS if interrupted by a signal.
*/
-extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- bool interruptible, bool no_wait);
+extern int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ bool interruptible, bool no_wait);
+
/**
* ttm_bo_unref
*
@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
* waiting for buffer idle. This lock is recursive.
* Returns
* -EBUSY if the buffer is busy and no_wait is true.
- * -ERESTART if interrupted by a signal.
+ * -ERESTARTSYS if interrupted by a signal.
*/
extern int
@@ -343,7 +362,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
/**
- * ttm_buffer_object_init
+ * ttm_bo_init
*
* @bdev: Pointer to a ttm_bo_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
@@ -371,20 +390,20 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
- * -ERESTART: Interrupted by signal while sleeping waiting for resources.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
- struct ttm_buffer_object *bo,
- unsigned long size,
- enum ttm_bo_type type,
- uint32_t flags,
- uint32_t page_alignment,
- unsigned long buffer_start,
- bool interrubtible,
- struct file *persistant_swap_storage,
- size_t acc_size,
- void (*destroy) (struct ttm_buffer_object *));
+extern int ttm_bo_init(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ bool interrubtible,
+ struct file *persistant_swap_storage,
+ size_t acc_size,
+ void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_bo_synccpu_object_init
*
@@ -405,47 +424,43 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
* GEM user interface.
* @p_bo: On successful completion *p_bo points to the created object.
*
- * This function allocates a ttm_buffer_object, and then calls
- * ttm_buffer_object_init on that object.
- * The destroy function is set to kfree().
+ * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
+ * on that object. The destroy function is set to kfree().
* Returns
* -ENOMEM: Out of memory.
* -EINVAL: Invalid placement flags.
- * -ERESTART: Interrupted by signal while waiting for resources.
+ * -ERESTARTSYS: Interrupted by signal while waiting for resources.
*/
-extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
- unsigned long size,
- enum ttm_bo_type type,
- uint32_t flags,
- uint32_t page_alignment,
- unsigned long buffer_start,
- bool interruptible,
- struct file *persistant_swap_storage,
- struct ttm_buffer_object **p_bo);
+extern int ttm_bo_create(struct ttm_bo_device *bdev,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ bool interruptible,
+ struct file *persistant_swap_storage,
+ struct ttm_buffer_object **p_bo);
/**
* ttm_bo_check_placement
*
- * @bo: the buffer object.
- * @set_flags: placement flags to set.
- * @clr_flags: placement flags to clear.
+ * @bo: the buffer object.
+ * @placement: placements
*
* Performs minimal validity checking on an intended change of
* placement flags.
* Returns
* -EINVAL: Intended change is invalid or not allowed.
*/
-
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
- uint32_t set_flags, uint32_t clr_flags);
+ struct ttm_placement *placement);
/**
* ttm_bo_init_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
- * @p_offset: offset for managed area in pages.
* @p_size: size managed area in pages.
*
* Initialize a manager for a given memory type.
@@ -458,7 +473,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
- unsigned long p_offset, unsigned long p_size);
+ unsigned long p_size);
/**
* ttm_bo_clean_mm
*
@@ -503,7 +518,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
*
* Returns:
* -EINVAL: Invalid or uninitialized memory type.
- * -ERESTART: The call was interrupted by a signal while waiting to
+ * -ERESTARTSYS: The call was interrupted by a signal while waiting to
* evict a buffer.
*/
@@ -606,7 +621,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* be called from the fops::read and fops::write method.
* Returns:
* See man (2) write, man(2) read. In particular,
- * the function may return -EINTR if
+ * the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e8cd6d20aed..ff7664e0c3c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
/**
* struct ttm_bo_driver
*
- * @mem_type_prio: Priority array of memory types to place a buffer object in
- * if it fits without evicting buffers from any of these memory types.
- * @mem_busy_prio: Priority array of memory types to place a buffer object in
- * if it needs to evict buffers to make room.
- * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
- * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
* @create_ttm_backend_entry: Callback to create a struct ttm_backend.
* @invalidate_caches: Callback to invalidate read caches when a buffer object
* has been evicted.
@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
*/
struct ttm_bo_driver {
- const uint32_t *mem_type_prio;
- const uint32_t *mem_busy_prio;
- uint32_t num_mem_type_prio;
- uint32_t num_mem_busy_prio;
-
/**
* struct ttm_bo_driver member create_ttm_backend_entry
*
@@ -306,7 +295,8 @@ struct ttm_bo_driver {
* finished, they'll end up in bo->mem.flags
*/
- uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
+ void(*evict_flags) (struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
/**
* struct ttm_bo_driver member move:
*
@@ -545,6 +535,15 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
/**
+ * ttm_tt_populate:
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Add backing pages to all of @ttm
+ */
+extern int ttm_tt_populate(struct ttm_tt *ttm);
+
+/**
* ttm_ttm_destroy:
*
* @ttm: The struct ttm_tt.
@@ -639,12 +638,12 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* -EBUSY: No space available (only if no_wait == 1).
* -ENOMEM: Could not allocate memory for the buffer object, either due to
* fragmentation or concurrent allocators.
- * -ERESTART: An interruptible sleep was interrupted by a signal.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- uint32_t proposed_placement,
- struct ttm_mem_reg *mem,
- bool interruptible, bool no_wait);
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait);
/**
* ttm_bo_wait_for_cpu
*
@@ -654,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
* Wait until a buffer object is no longer sync'ed for CPU access.
* Returns:
* -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
- * -ERESTART: An interruptible sleep was interrupted by a signal.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
@@ -758,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
* -EAGAIN: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1).
- * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
@@ -799,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
*
* Returns:
* -EBUSY: If no_wait == 1 and the buffer is already reserved.
- * -ERESTART: If interruptible == 1 and the process received a signal
+ * -ERESTARTSYS: If interruptible == 1 and the process received a signal
* while sleeping.
*/
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
new file mode 100644
index 00000000000..cd2c475da9e
--- /dev/null
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -0,0 +1,107 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_EXECBUF_UTIL_H_
+#define _TTM_EXECBUF_UTIL_H_
+
+#include "ttm/ttm_bo_api.h"
+#include <linux/list.h>
+
+/**
+ * struct ttm_validate_buffer
+ *
+ * @head: list head for thread-private list.
+ * @bo: refcounted buffer object pointer.
+ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
+ * adding a new sync object.
+ * @reservied: Indicates whether @bo has been reserved for validation.
+ */
+
+struct ttm_validate_buffer {
+ struct list_head head;
+ struct ttm_buffer_object *bo;
+ void *new_sync_obj_arg;
+ bool reserved;
+};
+
+/**
+ * function ttm_eu_backoff_reservation
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ *
+ * Undoes all buffer validation reservations for bos pointed to by
+ * the list entries.
+ */
+
+extern void ttm_eu_backoff_reservation(struct list_head *list);
+
+/**
+ * function ttm_eu_reserve_buffers
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ * @val_seq: A unique sequence number.
+ *
+ * Tries to reserve bos pointed to by the list entries for validation.
+ * If the function returns 0, all buffers are marked as "unfenced",
+ * taken off the lru lists and are not synced for write CPU usage.
+ *
+ * If the function detects a deadlock due to multiple threads trying to
+ * reserve the same buffers in reverse order, all threads except one will
+ * back off and retry. This function may sleep while waiting for
+ * CPU write reservations to be cleared, and for other threads to
+ * unreserve their buffers.
+ *
+ * This function may return -ERESTART or -EAGAIN if the calling process
+ * receives a signal while waiting. In that case, no buffers on the list
+ * will be reserved upon return.
+ *
+ * Buffers reserved by this function should be unreserved by
+ * a call to either ttm_eu_backoff_reservation() or
+ * ttm_eu_fence_buffer_objects() when command submission is complete or
+ * has failed.
+ */
+
+extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
+
+/**
+ * function ttm_eu_fence_buffer_objects.
+ *
+ * @list: thread private list of ttm_validate_buffer structs.
+ * @sync_obj: The new sync object for the buffers.
+ *
+ * This function should be called when command submission is complete, and
+ * it will add a new sync object to bos pointed to by entries on @list.
+ * It also unreserves all buffers, putting them on lru lists.
+ *
+ */
+
+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
+
+#endif
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
new file mode 100644
index 00000000000..81ba0b0b891
--- /dev/null
+++ b/include/drm/ttm/ttm_lock.h
@@ -0,0 +1,247 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+/** @file ttm_lock.h
+ * This file implements a simple replacement for the buffer manager use
+ * of the DRM heavyweight hardware lock.
+ * The lock is a read-write lock. Taking it in read mode and write mode
+ * is relatively fast, and intended for in-kernel use only.
+ *
+ * The vt mode is used only when there is a need to block all
+ * user-space processes from validating buffers.
+ * It's allowed to leave kernel space with the vt lock held.
+ * If a user-space process dies while having the vt-lock,
+ * it will be released during the file descriptor release. The vt lock
+ * excludes write lock and read lock.
+ *
+ * The suspend mode is used to lock out all TTM users when preparing for
+ * and executing suspend operations.
+ *
+ */
+
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include "ttm/ttm_object.h"
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+/**
+ * struct ttm_lock
+ *
+ * @base: ttm base object used solely to release the lock if the client
+ * holding the lock dies.
+ * @queue: Queue for processes waiting for lock change-of-status.
+ * @lock: Spinlock protecting some lock members.
+ * @rw: Read-write lock counter. Protected by @lock.
+ * @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
+ */
+
+struct ttm_lock {
+ struct ttm_base_object base;
+ wait_queue_head_t queue;
+ spinlock_t lock;
+ int32_t rw;
+ uint32_t flags;
+ bool kill_takers;
+ int signal;
+ struct ttm_object_file *vt_holder;
+};
+
+
+/**
+ * ttm_lock_init
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * Initializes the lock.
+ */
+extern void ttm_lock_init(struct ttm_lock *lock);
+
+/**
+ * ttm_read_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a read lock.
+ */
+extern void ttm_read_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_read_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in read mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_read_trylock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Tries to take the lock in read mode. If the lock is already held
+ * in write mode, the function will return -EBUSY. If the lock is held
+ * in vt or suspend mode, the function will sleep until these modes
+ * are unlocked.
+ *
+ * Returns:
+ * -EBUSY The lock was already held in write mode.
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_downgrade
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Downgrades a write lock to a read lock.
+ */
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Takes the lock in suspend mode. Excludes read and write mode.
+ */
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+
+/**
+ * ttm_suspend_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a suspend lock
+ */
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_vt_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
+ *
+ * Takes the lock in vt mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ * -ENOMEM: Out of memory when locking.
+ */
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+ struct ttm_object_file *tfile);
+
+/**
+ * ttm_vt_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a vt lock.
+ * Returns:
+ * -EINVAL If the lock was not held.
+ */
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_unlock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ *
+ * Releases a write lock.
+ */
+extern void ttm_write_unlock(struct ttm_lock *lock);
+
+/**
+ * ttm_write_lock
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @interruptible: Interruptible sleeping while waiting for a lock.
+ *
+ * Takes the lock in write mode.
+ * Returns:
+ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
+ */
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+ int signal)
+{
+ lock->kill_takers = val;
+ if (val)
+ lock->signal = signal;
+}
+
+#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 6983a7cf4da..b199170b3c2 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -33,6 +33,7 @@
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/kobject.h>
+#include <linux/mm.h>
/**
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
new file mode 100644
index 00000000000..0d9db099978
--- /dev/null
+++ b/include/drm/ttm/ttm_object.h
@@ -0,0 +1,271 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+/** @file ttm_object.h
+ *
+ * Base- and reference object implementation for the various
+ * ttm objects. Implements reference counting, minimal security checks
+ * and release on file close.
+ */
+
+#ifndef _TTM_OBJECT_H_
+#define _TTM_OBJECT_H_
+
+#include <linux/list.h>
+#include "drm_hashtab.h"
+#include <linux/kref.h>
+#include <ttm/ttm_memory.h>
+
+/**
+ * enum ttm_ref_type
+ *
+ * Describes what type of reference a ref object holds.
+ *
+ * TTM_REF_USAGE is a simple refcount on a base object.
+ *
+ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
+ * buffer object.
+ *
+ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
+ * buffer object.
+ *
+ */
+
+enum ttm_ref_type {
+ TTM_REF_USAGE,
+ TTM_REF_SYNCCPU_READ,
+ TTM_REF_SYNCCPU_WRITE,
+ TTM_REF_NUM
+};
+
+/**
+ * enum ttm_object_type
+ *
+ * One entry per ttm object type.
+ * Device-specific types should use the
+ * ttm_driver_typex types.
+ */
+
+enum ttm_object_type {
+ ttm_fence_type,
+ ttm_buffer_type,
+ ttm_lock_type,
+ ttm_driver_type0 = 256,
+ ttm_driver_type1,
+ ttm_driver_type2,
+ ttm_driver_type3,
+ ttm_driver_type4,
+ ttm_driver_type5
+};
+
+struct ttm_object_file;
+struct ttm_object_device;
+
+/**
+ * struct ttm_base_object
+ *
+ * @hash: hash entry for the per-device object hash.
+ * @type: derived type this object is base class for.
+ * @shareable: Other ttm_object_files can access this object.
+ *
+ * @tfile: Pointer to ttm_object_file of the creator.
+ * NULL if the object was not created by a user request.
+ * (kernel object).
+ *
+ * @refcount: Number of references to this object, not
+ * including the hash entry. A reference to a base object can
+ * only be held by a ref object.
+ *
+ * @refcount_release: A function to be called when there are
+ * no more references to this object. This function should
+ * destroy the object (or make sure destruction eventually happens),
+ * and when it is called, the object has
+ * already been taken out of the per-device hash. The parameter
+ * "base" should be set to NULL by the function.
+ *
+ * @ref_obj_release: A function to be called when a reference object
+ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
+ * this function may, for example, release a lock held by a user-space
+ * process.
+ *
+ * This struct is intended to be used as a base struct for objects that
+ * are visible to user-space. It provides a global name, race-safe
+ * access and refcounting, minimal access contol and hooks for unref actions.
+ */
+
+struct ttm_base_object {
+ struct drm_hash_item hash;
+ enum ttm_object_type object_type;
+ bool shareable;
+ struct ttm_object_file *tfile;
+ struct kref refcount;
+ void (*refcount_release) (struct ttm_base_object **base);
+ void (*ref_obj_release) (struct ttm_base_object *base,
+ enum ttm_ref_type ref_type);
+};
+
+/**
+ * ttm_base_object_init
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @base: The struct ttm_base_object to initialize.
+ * @shareable: This object is shareable with other applcations.
+ * (different @tfile pointers.)
+ * @type: The object type.
+ * @refcount_release: See the struct ttm_base_object description.
+ * @ref_obj_release: See the struct ttm_base_object description.
+ *
+ * Initializes a struct ttm_base_object.
+ */
+
+extern int ttm_base_object_init(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ bool shareable,
+ enum ttm_object_type type,
+ void (*refcount_release) (struct ttm_base_object
+ **),
+ void (*ref_obj_release) (struct ttm_base_object
+ *,
+ enum ttm_ref_type
+ ref_type));
+
+/**
+ * ttm_base_object_lookup
+ *
+ * @tfile: Pointer to a struct ttm_object_file.
+ * @key: Hash key
+ *
+ * Looks up a struct ttm_base_object with the key @key.
+ * Also verifies that the object is visible to the application, by
+ * comparing the @tfile argument and checking the object shareable flag.
+ */
+
+extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
+ *tfile, uint32_t key);
+
+/**
+ * ttm_base_object_unref
+ *
+ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
+ *
+ * Decrements the base object refcount and clears the pointer pointed to by
+ * p_base.
+ */
+
+extern void ttm_base_object_unref(struct ttm_base_object **p_base);
+
+/**
+ * ttm_ref_object_add.
+ *
+ * @tfile: A struct ttm_object_file representing the application owning the
+ * ref_object.
+ * @base: The base object to reference.
+ * @ref_type: The type of reference.
+ * @existed: Upon completion, indicates that an identical reference object
+ * already existed, and the refcount was upped on that object instead.
+ *
+ * Adding a ref object to a base object is basically like referencing the
+ * base object, but a user-space application holds the reference. When the
+ * file corresponding to @tfile is closed, all its reference objects are
+ * deleted. A reference object can have different types depending on what
+ * it's intended for. It can be refcounting to prevent object destruction,
+ * When user-space takes a lock, it can add a ref object to that lock to
+ * make sure the lock is released if the application dies. A ref object
+ * will hold a single reference on a base object.
+ */
+extern int ttm_ref_object_add(struct ttm_object_file *tfile,
+ struct ttm_base_object *base,
+ enum ttm_ref_type ref_type, bool *existed);
+/**
+ * ttm_ref_object_base_unref
+ *
+ * @key: Key representing the base object.
+ * @ref_type: Ref type of the ref object to be dereferenced.
+ *
+ * Unreference a ref object with type @ref_type
+ * on the base object identified by @key. If there are no duplicate
+ * references, the ref object will be destroyed and the base object
+ * will be unreferenced.
+ */
+extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
+ unsigned long key,
+ enum ttm_ref_type ref_type);
+
+/**
+ * ttm_object_file_init - initialize a struct ttm_object file
+ *
+ * @tdev: A struct ttm_object device this file is initialized on.
+ * @hash_order: Order of the hash table used to hold the reference objects.
+ *
+ * This is typically called by the file_ops::open function.
+ */
+
+extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
+ *tdev,
+ unsigned int hash_order);
+
+/**
+ * ttm_object_file_release - release data held by a ttm_object_file
+ *
+ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
+ * *p_tfile will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_file.
+ * Typically called from file_ops::release. The caller must
+ * ensure that there are no concurrent users of tfile.
+ */
+
+extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
+
+/**
+ * ttm_object device init - initialize a struct ttm_object_device
+ *
+ * @hash_order: Order of hash table used to hash the base objects.
+ *
+ * This function is typically called on device initialization to prepare
+ * data structures needed for ttm base and ref objects.
+ */
+
+extern struct ttm_object_device *ttm_object_device_init
+ (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+
+/**
+ * ttm_object_device_release - release data held by a ttm_object_device
+ *
+ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
+ * *p_tdev will be set to NULL by this function.
+ *
+ * Releases all data associated by a ttm_object_device.
+ * Typically called from driver::unload before the destruction of the
+ * device private data structure.
+ */
+
+extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
+
+#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index 170786e5c2f..fd11a5bd892 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -24,7 +24,7 @@
#ifndef _VIA_DRM_H_
#define _VIA_DRM_H_
-#include <linux/types.h>
+#include "drm.h"
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
new file mode 100644
index 00000000000..2be7e1249b6
--- /dev/null
+++ b/include/drm/vmwgfx_drm.h
@@ -0,0 +1,574 @@
+/**************************************************************************
+ *
+ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef __VMWGFX_DRM_H__
+#define __VMWGFX_DRM_H__
+
+#define DRM_VMW_MAX_SURFACE_FACES 6
+#define DRM_VMW_MAX_MIP_LEVELS 24
+
+#define DRM_VMW_EXT_NAME_LEN 128
+
+#define DRM_VMW_GET_PARAM 0
+#define DRM_VMW_ALLOC_DMABUF 1
+#define DRM_VMW_UNREF_DMABUF 2
+#define DRM_VMW_CURSOR_BYPASS 3
+/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
+#define DRM_VMW_CONTROL_STREAM 4
+#define DRM_VMW_CLAIM_STREAM 5
+#define DRM_VMW_UNREF_STREAM 6
+/* guarded by DRM_VMW_PARAM_3D == 1 */
+#define DRM_VMW_CREATE_CONTEXT 7
+#define DRM_VMW_UNREF_CONTEXT 8
+#define DRM_VMW_CREATE_SURFACE 9
+#define DRM_VMW_UNREF_SURFACE 10
+#define DRM_VMW_REF_SURFACE 11
+#define DRM_VMW_EXECBUF 12
+#define DRM_VMW_FIFO_DEBUG 13
+#define DRM_VMW_FENCE_WAIT 14
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GET_PARAM - get device information.
+ *
+ * DRM_VMW_PARAM_FIFO_OFFSET:
+ * Offset to use to map the first page of the FIFO read-only.
+ * The fifo is mapped using the mmap() system call on the drm device.
+ *
+ * DRM_VMW_PARAM_OVERLAY_IOCTL:
+ * Does the driver support the overlay ioctl.
+ */
+
+#define DRM_VMW_PARAM_NUM_STREAMS 0
+#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
+#define DRM_VMW_PARAM_3D 2
+#define DRM_VMW_PARAM_FIFO_OFFSET 3
+
+
+/**
+ * struct drm_vmw_getparam_arg
+ *
+ * @value: Returned value. //Out
+ * @param: Parameter to query. //In.
+ *
+ * Argument to the DRM_VMW_GET_PARAM Ioctl.
+ */
+
+struct drm_vmw_getparam_arg {
+ uint64_t value;
+ uint32_t param;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_EXTENSION - Query device extensions.
+ */
+
+/**
+ * struct drm_vmw_extension_rep
+ *
+ * @exists: The queried extension exists.
+ * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
+ * @driver_sarea_offset: Offset to any space in the DRI SAREA
+ * used by the extension.
+ * @major: Major version number of the extension.
+ * @minor: Minor version number of the extension.
+ * @pl: Patch level version number of the extension.
+ *
+ * Output argument to the DRM_VMW_EXTENSION Ioctl.
+ */
+
+struct drm_vmw_extension_rep {
+ int32_t exists;
+ uint32_t driver_ioctl_offset;
+ uint32_t driver_sarea_offset;
+ uint32_t major;
+ uint32_t minor;
+ uint32_t pl;
+ uint32_t pad64;
+};
+
+/**
+ * union drm_vmw_extension_arg
+ *
+ * @extension - Ascii name of the extension to be queried. //In
+ * @rep - Reply as defined above. //Out
+ *
+ * Argument to the DRM_VMW_EXTENSION Ioctl.
+ */
+
+union drm_vmw_extension_arg {
+ char extension[DRM_VMW_EXT_NAME_LEN];
+ struct drm_vmw_extension_rep rep;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_CONTEXT - Create a host context.
+ *
+ * Allocates a device unique context id, and queues a create context command
+ * for the host. Does not wait for host completion.
+ */
+
+/**
+ * struct drm_vmw_context_arg
+ *
+ * @cid: Device unique context ID.
+ *
+ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
+ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
+ */
+
+struct drm_vmw_context_arg {
+ int32_t cid;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_CONTEXT - Create a host context.
+ *
+ * Frees a global context id, and queues a destroy host command for the host.
+ * Does not wait for host completion. The context ID can be used directly
+ * in the command stream and shows up as the same context ID on the host.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CREATE_SURFACE - Create a host suface.
+ *
+ * Allocates a device unique surface id, and queues a create surface command
+ * for the host. Does not wait for host completion. The surface ID can be
+ * used directly in the command stream and shows up as the same surface
+ * ID on the host.
+ */
+
+/**
+ * struct drm_wmv_surface_create_req
+ *
+ * @flags: Surface flags as understood by the host.
+ * @format: Surface format as understood by the host.
+ * @mip_levels: Number of mip levels for each face.
+ * An unused face should have 0 encoded.
+ * @size_addr: Address of a user-space array of sruct drm_vmw_size
+ * cast to an uint64_t for 32-64 bit compatibility.
+ * The size of the array should equal the total number of mipmap levels.
+ * @shareable: Boolean whether other clients (as identified by file descriptors)
+ * may reference this surface.
+ *
+ * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
+ * Output data from the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+struct drm_vmw_surface_create_req {
+ uint32_t flags;
+ uint32_t format;
+ uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+ uint64_t size_addr;
+ int32_t shareable;
+ uint32_t pad64;
+};
+
+/**
+ * struct drm_wmv_surface_arg
+ *
+ * @sid: Surface id of created surface or surface to destroy or reference.
+ *
+ * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
+ * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
+ * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+struct drm_vmw_surface_arg {
+ int32_t sid;
+ uint32_t pad64;
+};
+
+/**
+ * struct drm_vmw_size ioctl.
+ *
+ * @width - mip level width
+ * @height - mip level height
+ * @depth - mip level depth
+ *
+ * Description of a mip level.
+ * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
+ */
+
+struct drm_vmw_size {
+ uint32_t width;
+ uint32_t height;
+ uint32_t depth;
+ uint32_t pad64;
+};
+
+/**
+ * union drm_vmw_surface_create_arg
+ *
+ * @rep: Output data as described above.
+ * @req: Input data as described above.
+ *
+ * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
+ */
+
+union drm_vmw_surface_create_arg {
+ struct drm_vmw_surface_arg rep;
+ struct drm_vmw_surface_create_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_REF_SURFACE - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a give sid, as previously
+ * returned by the DRM_VMW_CREATE_SURFACE ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface ID in the command
+ * stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * in the DRM_VMW_CREATE_SURFACE ioctl.
+ */
+
+/**
+ * union drm_vmw_surface_reference_arg
+ *
+ * @rep: Output data as described above.
+ * @req: Input data as described above.
+ *
+ * Argument to the DRM_VMW_REF_SURFACE Ioctl.
+ */
+
+union drm_vmw_surface_reference_arg {
+ struct drm_vmw_surface_create_req rep;
+ struct drm_vmw_surface_arg req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
+ *
+ * Clear a reference previously put on a host surface.
+ * When all references are gone, including the one implicitly placed
+ * on creation,
+ * a destroy surface command will be queued for the host.
+ * Does not wait for completion.
+ */
+
+/*************************************************************************/
+/**
+ * DRM_VMW_EXECBUF
+ *
+ * Submit a command buffer for execution on the host, and return a
+ * fence sequence that when signaled, indicates that the command buffer has
+ * executed.
+ */
+
+/**
+ * struct drm_vmw_execbuf_arg
+ *
+ * @commands: User-space address of a command buffer cast to an uint64_t.
+ * @command-size: Size in bytes of the command buffer.
+ * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
+ * uint64_t.
+ *
+ * Argument to the DRM_VMW_EXECBUF Ioctl.
+ */
+
+struct drm_vmw_execbuf_arg {
+ uint64_t commands;
+ uint32_t command_size;
+ uint32_t pad64;
+ uint64_t fence_rep;
+};
+
+/**
+ * struct drm_vmw_fence_rep
+ *
+ * @fence_seq: Fence sequence associated with a command submission.
+ * @error: This member should've been set to -EFAULT on submission.
+ * The following actions should be take on completion:
+ * error == -EFAULT: Fence communication failed. The host is synchronized.
+ * Use the last fence id read from the FIFO fence register.
+ * error != 0 && error != -EFAULT:
+ * Fence submission failed. The host is synchronized. Use the fence_seq member.
+ * error == 0: All is OK, The host may not be synchronized.
+ * Use the fence_seq member.
+ *
+ * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
+ */
+
+struct drm_vmw_fence_rep {
+ uint64_t fence_seq;
+ int32_t error;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_ALLOC_DMABUF
+ *
+ * Allocate a DMA buffer that is visible also to the host.
+ * NOTE: The buffer is
+ * identified by a handle and an offset, which are private to the guest, but
+ * useable in the command stream. The guest kernel may translate these
+ * and patch up the command stream accordingly. In the future, the offset may
+ * be zero at all times, or it may disappear from the interface before it is
+ * fixed.
+ *
+ * The DMA buffer may stay user-space mapped in the guest at all times,
+ * and is thus suitable for sub-allocation.
+ *
+ * DMA buffers are mapped using the mmap() syscall on the drm device.
+ */
+
+/**
+ * struct drm_vmw_alloc_dmabuf_req
+ *
+ * @size: Required minimum size of the buffer.
+ *
+ * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+struct drm_vmw_alloc_dmabuf_req {
+ uint32_t size;
+ uint32_t pad64;
+};
+
+/**
+ * struct drm_vmw_dmabuf_rep
+ *
+ * @map_handle: Offset to use in the mmap() call used to map the buffer.
+ * @handle: Handle unique to this buffer. Used for unreferencing.
+ * @cur_gmr_id: GMR id to use in the command stream when this buffer is
+ * referenced. See not above.
+ * @cur_gmr_offset: Offset to use in the command stream when this buffer is
+ * referenced. See note above.
+ *
+ * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+struct drm_vmw_dmabuf_rep {
+ uint64_t map_handle;
+ uint32_t handle;
+ uint32_t cur_gmr_id;
+ uint32_t cur_gmr_offset;
+ uint32_t pad64;
+};
+
+/**
+ * union drm_vmw_dmabuf_arg
+ *
+ * @req: Input data as described above.
+ * @rep: Output data as described above.
+ *
+ * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ */
+
+union drm_vmw_alloc_dmabuf_arg {
+ struct drm_vmw_alloc_dmabuf_req req;
+ struct drm_vmw_dmabuf_rep rep;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
+ *
+ */
+
+/**
+ * struct drm_vmw_unref_dmabuf_arg
+ *
+ * @handle: Handle indicating what buffer to free. Obtained from the
+ * DRM_VMW_ALLOC_DMABUF Ioctl.
+ *
+ * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
+ */
+
+struct drm_vmw_unref_dmabuf_arg {
+ uint32_t handle;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
+ *
+ * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
+ */
+
+/**
+ * struct drm_vmw_fifo_debug_arg
+ *
+ * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
+ * @debug_buffer_size: Size in bytes of debug buffer //In
+ * @used_size: Number of bytes copied to the buffer // Out
+ * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
+ *
+ * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
+ */
+
+struct drm_vmw_fifo_debug_arg {
+ uint64_t debug_buffer;
+ uint32_t debug_buffer_size;
+ uint32_t used_size;
+ int32_t did_not_fit;
+ uint32_t pad64;
+};
+
+struct drm_vmw_fence_wait_arg {
+ uint64_t sequence;
+ uint64_t kernel_cookie;
+ int32_t cookie_valid;
+ int32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
+ *
+ * This IOCTL controls the overlay units of the svga device.
+ * The SVGA overlay units does not work like regular hardware units in
+ * that they do not automaticaly read back the contents of the given dma
+ * buffer. But instead only read back for each call to this ioctl, and
+ * at any point between this call being made and a following call that
+ * either changes the buffer or disables the stream.
+ */
+
+/**
+ * struct drm_vmw_rect
+ *
+ * Defines a rectangle. Used in the overlay ioctl to define
+ * source and destination rectangle.
+ */
+
+struct drm_vmw_rect {
+ int32_t x;
+ int32_t y;
+ uint32_t w;
+ uint32_t h;
+};
+
+/**
+ * struct drm_vmw_control_stream_arg
+ *
+ * @stream_id: Stearm to control
+ * @enabled: If false all following arguments are ignored.
+ * @handle: Handle to buffer for getting data from.
+ * @format: Format of the overlay as understood by the host.
+ * @width: Width of the overlay.
+ * @height: Height of the overlay.
+ * @size: Size of the overlay in bytes.
+ * @pitch: Array of pitches, the two last are only used for YUV12 formats.
+ * @offset: Offset from start of dma buffer to overlay.
+ * @src: Source rect, must be within the defined area above.
+ * @dst: Destination rect, x and y may be negative.
+ *
+ * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
+ */
+
+struct drm_vmw_control_stream_arg {
+ uint32_t stream_id;
+ uint32_t enabled;
+
+ uint32_t flags;
+ uint32_t color_key;
+
+ uint32_t handle;
+ uint32_t offset;
+ int32_t format;
+ uint32_t size;
+ uint32_t width;
+ uint32_t height;
+ uint32_t pitch[3];
+
+ uint32_t pad64;
+ struct drm_vmw_rect src;
+ struct drm_vmw_rect dst;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
+ *
+ */
+
+#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
+#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
+
+/**
+ * struct drm_vmw_cursor_bypass_arg
+ *
+ * @flags: Flags.
+ * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
+ * @xpos: X position of cursor.
+ * @ypos: Y position of cursor.
+ * @xhot: X hotspot.
+ * @yhot: Y hotspot.
+ *
+ * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
+ */
+
+struct drm_vmw_cursor_bypass_arg {
+ uint32_t flags;
+ uint32_t crtc_id;
+ int32_t xpos;
+ int32_t ypos;
+ int32_t xhot;
+ int32_t yhot;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_CLAIM_STREAM - Claim a single stream.
+ */
+
+/**
+ * struct drm_vmw_context_arg
+ *
+ * @stream_id: Device unique context ID.
+ *
+ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
+ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
+ */
+
+struct drm_vmw_stream_arg {
+ uint32_t stream_id;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UNREF_STREAM - Unclaim a stream.
+ *
+ * Return a single stream that was claimed by this process. Also makes
+ * sure that the stream has been stopped.
+ */
+
+#endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 5a5385749e1..756f831cbdd 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -118,6 +118,7 @@ header-y += mtio.h
header-y += ncp_no.h
header-y += neighbour.h
header-y += net_dropmon.h
+header-y += net_tstamp.h
header-y += netfilter_arp.h
header-y += netrom.h
header-y += nfs2.h
@@ -214,7 +215,6 @@ unifdef-y += futex.h
unifdef-y += fs.h
unifdef-y += gameport.h
unifdef-y += generic_serial.h
-unifdef-y += hayesesp.h
unifdef-y += hdlcdrv.h
unifdef-y += hdlc.h
unifdef-y += hdreg.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index dfcd920c3e5..ce945d4845f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -240,7 +240,7 @@ extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
#define NID_INVAL (-1)
-int acpi_check_resource_conflict(struct resource *res);
+int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
@@ -253,10 +253,16 @@ void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
#endif /* CONFIG_PM_SLEEP */
+struct acpi_osc_context {
+ char *uuid_str; /* uuid string */
+ int rev;
+ struct acpi_buffer cap; /* arg2/arg3 */
+ struct acpi_buffer ret; /* free by caller if success */
+};
+
#define OSC_QUERY_TYPE 0
#define OSC_SUPPORT_TYPE 1
#define OSC_CONTROL_TYPE 2
-#define OSC_SUPPORT_MASKS 0x1f
/* _OSC DW0 Definition */
#define OSC_QUERY_ENABLE 1
@@ -265,12 +271,23 @@ void __init acpi_s4_no_nvs(void);
#define OSC_INVALID_REVISION_ERROR 8
#define OSC_CAPABILITIES_MASK_ERROR 16
+acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
+
+/* platform-wide _OSC bits */
+#define OSC_SB_PAD_SUPPORT 1
+#define OSC_SB_PPC_OST_SUPPORT 2
+#define OSC_SB_PR3_SUPPORT 4
+#define OSC_SB_CPUHP_OST_SUPPORT 8
+#define OSC_SB_APEI_SUPPORT 16
+
+/* PCI defined _OSC bits */
/* _OSC DW1 Definition (OS Support Fields) */
#define OSC_EXT_PCI_CONFIG_SUPPORT 1
#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
#define OSC_MSI_SUPPORT 16
+#define OSC_PCI_SUPPORT_MASKS 0x1f
/* _OSC DW1 Definition (OS Control Fields) */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
@@ -279,7 +296,7 @@ void __init acpi_s4_no_nvs(void);
#define OSC_PCI_EXPRESS_AER_CONTROL 8
#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
-#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
+#define OSC_PCI_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
OSC_SHPC_NATIVE_HP_CONTROL | \
OSC_PCI_EXPRESS_PME_CONTROL | \
OSC_PCI_EXPRESS_AER_CONTROL | \
diff --git a/include/linux/aio.h b/include/linux/aio.h
index aea219d7d8d..811dbb36937 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -102,7 +102,6 @@ struct kiocb {
} ki_obj;
__u64 ki_user_data; /* user's data for completion */
- wait_queue_t ki_wait;
loff_t ki_pos;
void *private;
@@ -140,7 +139,6 @@ struct kiocb {
(x)->ki_dtor = NULL; \
(x)->ki_obj.tsk = tsk; \
(x)->ki_user_data = 0; \
- init_wait((&(x)->ki_wait)); \
} while (0)
#define AIO_RING_MAGIC 0xa10a10a1
@@ -223,8 +221,6 @@ struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
#endif /* CONFIG_AIO */
-#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-
static inline struct kiocb *list_kiocb(struct list_head *h)
{
return list_entry(h, struct kiocb, ki_list);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 57b1846a3c8..3e09b345f4d 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,8 +3,6 @@
#define ATMEL_MCI_MAX_NR_SLOTS 2
-#include <linux/dw_dmac.h>
-
/**
* struct mci_slot_pdata - board-specific per-slot configuration
* @bus_width: Number of data lines wired up the slot
@@ -34,7 +32,7 @@ struct mci_slot_pdata {
* @slot: Per-slot configuration data.
*/
struct mci_platform_data {
- struct dw_dma_slave dma_slave;
+ struct mci_dma_data *dma_slave;
struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
};
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 0f5f57858a2..8c4f884db6b 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -36,18 +36,18 @@ struct backlight_device;
struct fb_info;
struct backlight_ops {
- unsigned int options;
+ const unsigned int options;
#define BL_CORE_SUSPENDRESUME (1 << 0)
/* Notify the backlight driver some property has changed */
- int (*update_status)(struct backlight_device *);
+ int (* const update_status)(struct backlight_device *);
/* Return the current backlight brightness (accounting for power,
fb_blank etc.) */
- int (*get_brightness)(struct backlight_device *);
+ int (* const get_brightness)(struct backlight_device *);
/* Check if given framebuffer device is the one bound to this backlight;
return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
- int (*check_fb)(struct fb_info *);
+ int (* const check_fb)(struct fb_info *);
};
/* This structure defines all the properties of a backlight */
@@ -86,7 +86,7 @@ struct backlight_device {
registered this device has been unloaded, and if class_get_devdata()
points to something in the body of that driver, it is also invalid. */
struct mutex ops_lock;
- struct backlight_ops *ops;
+ const struct backlight_ops *ops;
/* The framebuffer notifier block */
struct notifier_block fb_notif;
@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
}
extern struct backlight_device *backlight_device_register(const char *name,
- struct device *dev, void *devdata, struct backlight_ops *ops);
+ struct device *dev, void *devdata, const struct backlight_ops *ops);
extern void backlight_device_unregister(struct backlight_device *bd);
extern void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index aece486ac73..cd4349bdc34 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -68,6 +68,14 @@ struct linux_binprm{
#define BINPRM_MAX_RECURSION 4
+/* Function parameter for binfmt->coredump */
+struct coredump_params {
+ long signr;
+ struct pt_regs *regs;
+ struct file *file;
+ unsigned long limit;
+};
+
/*
* This structure defines the functions that are used to load the binary formats that
* linux accepts.
@@ -77,7 +85,7 @@ struct linux_binfmt {
struct module *module;
int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
int (*load_shlib)(struct file *);
- int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
+ int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
int hasvdso;
};
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 756d78b8c1c..daf8c480c78 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -42,6 +42,9 @@
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -108,6 +111,14 @@ extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
+extern void bitmap_set(unsigned long *map, int i, int len);
+extern void bitmap_clear(unsigned long *map, int start, int nr);
+extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask);
+
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 1ed2a5cc03f..3db7767d2a1 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -51,6 +51,15 @@ struct can_priv {
struct sk_buff **echo_skb;
};
+/*
+ * get_can_dlc(value) - helper macro to cast a given data length code (dlc)
+ * to __u8 and ensure the dlc value to be max. 8 bytes.
+ *
+ * To be used in the CAN netdriver receive path to ensure conformance with
+ * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ */
+#define get_can_dlc(i) (min_t(__u8, (i), 8))
+
struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
void free_candev(struct net_device *dev);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 47536197ffd..e287863ac05 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -43,6 +43,8 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
+extern ssize_t arch_cpu_probe(const char *, size_t);
+extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
@@ -115,6 +117,19 @@ extern void put_online_cpus(void);
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+extern void cpu_hotplug_driver_lock(void);
+extern void cpu_hotplug_driver_unlock(void);
+#else
+static inline void cpu_hotplug_driver_lock(void)
+{
+}
+
+static inline void cpu_hotplug_driver_unlock(void)
+{
+}
+#endif
+
#else /* CONFIG_HOTPLUG_CPU */
#define get_online_cpus() do { } while (0)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 79a2340d83c..4de02b10007 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -232,6 +232,7 @@ struct cpufreq_driver {
/* optional */
unsigned int (*getavg) (struct cpufreq_policy *policy,
unsigned int cpu);
+ int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 789cf5f920c..d77b54733c5 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_active_cpus() cpumask_weight(cpu_active_mask)
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus() 1
#define num_possible_cpus() 1
#define num_present_cpus() 1
+#define num_active_cpus() 1
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
new file mode 100644
index 00000000000..d5a1d4810b8
--- /dev/null
+++ b/include/linux/cs5535.h
@@ -0,0 +1,172 @@
+/*
+ * AMD CS5535/CS5536 definitions
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _CS5535_H
+#define _CS5535_H
+
+/* MSRs */
+#define MSR_GLIU_P2D_RO0 0x10000029
+
+#define MSR_LX_GLD_MSR_CONFIG 0x48002001
+#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
+ * sheet has the wrong value */
+#define MSR_GLCP_SYS_RSTPLL 0x4C000014
+#define MSR_GLCP_DOTPLL 0x4C000015
+
+#define MSR_LBAR_SMB 0x5140000B
+#define MSR_LBAR_GPIO 0x5140000C
+#define MSR_LBAR_MFGPT 0x5140000D
+#define MSR_LBAR_ACPI 0x5140000E
+#define MSR_LBAR_PMS 0x5140000F
+
+#define MSR_DIVIL_SOFT_RESET 0x51400017
+
+#define MSR_PIC_YSEL_LOW 0x51400020
+#define MSR_PIC_YSEL_HIGH 0x51400021
+#define MSR_PIC_ZSEL_LOW 0x51400022
+#define MSR_PIC_ZSEL_HIGH 0x51400023
+#define MSR_PIC_IRQM_LPC 0x51400025
+
+#define MSR_MFGPT_IRQ 0x51400028
+#define MSR_MFGPT_NR 0x51400029
+#define MSR_MFGPT_SETUP 0x5140002B
+
+#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
+
+#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
+#define MSR_GX_MSR_PADSEL 0xC0002011
+
+/* resource sizes */
+#define LBAR_GPIO_SIZE 0xFF
+#define LBAR_MFGPT_SIZE 0x40
+#define LBAR_ACPI_SIZE 0x40
+#define LBAR_PMS_SIZE 0x80
+
+/* VSA2 magic values */
+#define VSA_VRC_INDEX 0xAC1C
+#define VSA_VRC_DATA 0xAC1E
+#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
+#define VSA_VR_SIGNATURE 0x0003
+#define VSA_VR_MEM_SIZE 0x0200
+#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
+#define GSW_VSA_SIG 0x534d /* General Software signature */
+
+#include <linux/io.h>
+
+static inline int cs5535_has_vsa2(void)
+{
+ static int has_vsa2 = -1;
+
+ if (has_vsa2 == -1) {
+ uint16_t val;
+
+ /*
+ * The VSA has virtual registers that we can query for a
+ * signature.
+ */
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
+
+ val = inw(VSA_VRC_DATA);
+ has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
+ }
+
+ return has_vsa2;
+}
+
+/* GPIOs */
+#define GPIO_OUTPUT_VAL 0x00
+#define GPIO_OUTPUT_ENABLE 0x04
+#define GPIO_OUTPUT_OPEN_DRAIN 0x08
+#define GPIO_OUTPUT_INVERT 0x0C
+#define GPIO_OUTPUT_AUX1 0x10
+#define GPIO_OUTPUT_AUX2 0x14
+#define GPIO_PULL_UP 0x18
+#define GPIO_PULL_DOWN 0x1C
+#define GPIO_INPUT_ENABLE 0x20
+#define GPIO_INPUT_INVERT 0x24
+#define GPIO_INPUT_FILTER 0x28
+#define GPIO_INPUT_EVENT_COUNT 0x2C
+#define GPIO_READ_BACK 0x30
+#define GPIO_INPUT_AUX1 0x34
+#define GPIO_EVENTS_ENABLE 0x38
+#define GPIO_LOCK_ENABLE 0x3C
+#define GPIO_POSITIVE_EDGE_EN 0x40
+#define GPIO_NEGATIVE_EDGE_EN 0x44
+#define GPIO_POSITIVE_EDGE_STS 0x48
+#define GPIO_NEGATIVE_EDGE_STS 0x4C
+
+#define GPIO_MAP_X 0xE0
+#define GPIO_MAP_Y 0xE4
+#define GPIO_MAP_Z 0xE8
+#define GPIO_MAP_W 0xEC
+
+void cs5535_gpio_set(unsigned offset, unsigned int reg);
+void cs5535_gpio_clear(unsigned offset, unsigned int reg);
+int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+
+/* MFGPTs */
+
+#define MFGPT_MAX_TIMERS 8
+#define MFGPT_TIMER_ANY (-1)
+
+#define MFGPT_DOMAIN_WORKING 1
+#define MFGPT_DOMAIN_STANDBY 2
+#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
+
+#define MFGPT_CMP1 0
+#define MFGPT_CMP2 1
+
+#define MFGPT_EVENT_IRQ 0
+#define MFGPT_EVENT_NMI 1
+#define MFGPT_EVENT_RESET 3
+
+#define MFGPT_REG_CMP1 0
+#define MFGPT_REG_CMP2 2
+#define MFGPT_REG_COUNTER 4
+#define MFGPT_REG_SETUP 6
+
+#define MFGPT_SETUP_CNTEN (1 << 15)
+#define MFGPT_SETUP_CMP2 (1 << 14)
+#define MFGPT_SETUP_CMP1 (1 << 13)
+#define MFGPT_SETUP_SETUP (1 << 12)
+#define MFGPT_SETUP_STOPEN (1 << 11)
+#define MFGPT_SETUP_EXTEN (1 << 10)
+#define MFGPT_SETUP_REVEN (1 << 5)
+#define MFGPT_SETUP_CLKSEL (1 << 4)
+
+struct cs5535_mfgpt_timer;
+
+extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer,
+ uint16_t reg);
+extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value);
+
+extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable);
+extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp,
+ int *irq, int enable);
+extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer,
+ int domain);
+extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer);
+
+static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 1);
+}
+
+static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 0);
+}
+
+#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index afa36392297..a3d6ee0044f 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -15,7 +15,7 @@
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
-extern unsigned char _ctype[];
+extern const unsigned char _ctype[];
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
@@ -27,6 +27,7 @@ extern unsigned char _ctype[];
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
#define isspace(c) ((__ismask(c)&(_S)) != 0)
#define isupper(c) ((__ismask(c)&(_U)) != 0)
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 12ff8c3f1d0..5032b9a31ae 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,7 +25,7 @@ static void *malloc(int size)
void *p;
if (size < 0)
- error("Malloc error");
+ return NULL;
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
@@ -35,7 +35,7 @@ static void *malloc(int size)
malloc_ptr += size;
if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
- error("Out of memory");
+ return NULL;
malloc_count++;
return p;
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index df7607e6dce..d4c9c0b88ad 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
-int dm_suspended(struct mapped_device *md);
+int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
union map_info *dm_get_rq_mapinfo(struct request *rq);
@@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/
-struct dm_table *dm_get_table(struct mapped_device *md);
+struct dm_table *dm_get_live_table(struct mapped_device *md);
void dm_table_get(struct dm_table *t);
void dm_table_put(struct dm_table *t);
@@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t);
/*
* The device must be suspended before calling this method.
+ * Returns the previous table, which the caller must destroy.
*/
-int dm_swap_table(struct mapped_device *md, struct dm_table *t);
+struct dm_table *dm_swap_table(struct mapped_device *md,
+ struct dm_table *t);
/*
* A wrapper around vmalloc.
diff --git a/include/linux/device.h b/include/linux/device.h
index 2ea3e492181..2a73d9bcbc9 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -558,7 +558,7 @@ extern void wait_for_device_probe(void);
#ifdef CONFIG_DEVTMPFS
extern int devtmpfs_create_node(struct device *dev);
extern int devtmpfs_delete_node(struct device *dev);
-extern int devtmpfs_mount(const char *mountpoint);
+extern int devtmpfs_mount(const char *mntdir);
#else
static inline int devtmpfs_create_node(struct device *dev) { return 0; }
static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 5e8b11d88f6..7084503c340 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -21,6 +21,7 @@ struct dm_dirty_log_type;
struct dm_dirty_log {
struct dm_dirty_log_type *type;
+ int (*flush_callback_fn)(struct dm_target *ti);
void *context;
};
@@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
* type->constructor/destructor() directly.
*/
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
- struct dm_target *ti,
- unsigned argc, char **argv);
+ struct dm_target *ti,
+ int (*flush_callback_fn)(struct dm_target *ti),
+ unsigned argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 2ab84c83c31..aa95508d2f9 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
- * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
@@ -266,9 +266,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 15
+#define DM_VERSION_MINOR 16
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2009-04-01)"
+#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -309,4 +309,11 @@ enum {
*/
#define DM_NOFLUSH_FLAG (1 << 11) /* In */
+/*
+ * If set, any table information returned will relate to the inactive
+ * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG
+ * is set before using the data returned.
+ */
+#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index a9e652a4137..9e2a7a401df 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region);
/* Delay bios on regions. */
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
-void dm_rh_mark_nosync(struct dm_region_hash *rh,
- struct bio *bio, unsigned done, int error);
+void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
/*
* Region recovery control.
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 2b9f2ac7ed6..78784982b33 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -74,7 +74,7 @@ enum dma_transaction_type {
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
- * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
+ * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency
* chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a0d9422a156..f8c2e176750 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \
- ##__VA_ARGS__); \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
@@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- dev_printk(KERN_DEBUG, dev, \
- KBUILD_MODNAME ": " fmt, \
- ##__VA_ARGS__); \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
@@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod)
return 0;
}
-#define dynamic_pr_debug(fmt, ...) do { } while (0)
-#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, format, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
#endif
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ce4581fbc08..fb737bc19a8 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
static inline char *
efi_guid_unparse(efi_guid_t *guid, char *out)
{
- sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- guid->b[3], guid->b[2], guid->b[1], guid->b[0],
- guid->b[5], guid->b[4], guid->b[7], guid->b[6],
- guid->b[8], guid->b[9], guid->b[10], guid->b[11],
- guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
+ sprintf(out, "%pUl", guid->b);
return out;
}
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 90a4ed0ea0e..0cc4d55151b 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -361,7 +361,7 @@ typedef struct elf64_shdr {
#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
-#define NT_PRXSTATUS 0x300 /* s390 upper register halves */
+#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
/* Note header in a PT_NOTE section */
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 90d1c218411..9a33c5f7e12 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -42,6 +42,8 @@ enum enclosure_status {
ENCLOSURE_STATUS_NOT_INSTALLED,
ENCLOSURE_STATUS_UNKNOWN,
ENCLOSURE_STATUS_UNAVAILABLE,
+ /* last element for counting purposes */
+ ENCLOSURE_STATUS_MAX
};
/* SFF-8485 activity light settings */
diff --git a/include/linux/err.h b/include/linux/err.h
index ec87f3142bf..1b12642636c 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+static inline long IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 27e772cefb6..dc12f416a49 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -97,7 +97,7 @@ struct fid {
* @get_name: find the name for a given inode in a given directory
* @get_parent: find the parent of a given directory
*
- * See Documentation/filesystems/Exporting for details on how to use
+ * See Documentation/filesystems/nfs/Exporting for details on how to use
* this interface correctly.
*
* encode_fh:
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 121720d74e1..2dfa7076e8b 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -565,14 +565,14 @@ struct ext2_dir_entry_2 {
* other bits are reserved for now.
*/
enum {
- EXT2_FT_UNKNOWN,
- EXT2_FT_REG_FILE,
- EXT2_FT_DIR,
- EXT2_FT_CHRDEV,
- EXT2_FT_BLKDEV,
- EXT2_FT_FIFO,
- EXT2_FT_SOCK,
- EXT2_FT_SYMLINK,
+ EXT2_FT_UNKNOWN = 0,
+ EXT2_FT_REG_FILE = 1,
+ EXT2_FT_DIR = 2,
+ EXT2_FT_CHRDEV = 3,
+ EXT2_FT_BLKDEV = 4,
+ EXT2_FT_FIFO = 5,
+ EXT2_FT_SOCK = 6,
+ EXT2_FT_SYMLINK = 7,
EXT2_FT_MAX
};
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 7499b366779..6b049030fbe 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -918,6 +918,8 @@ extern void ext3_abort (struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
extern void ext3_warning (struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
+extern void ext3_msg(struct super_block *, const char *, const char *, ...)
+ __attribute__ ((format (printf, 3, 4)));
extern void ext3_update_dynamic_rev (struct super_block *sb);
#define ext3_std_error(sb, errno) \
diff --git a/include/linux/file.h b/include/linux/file.h
index 335a0a5c316..5555508fd51 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -18,11 +18,9 @@ extern void drop_file_write_access(struct file *file);
struct file_operations;
struct vfsmount;
struct dentry;
-extern int init_file(struct file *, struct vfsmount *mnt,
- struct dentry *dentry, fmode_t mode,
- const struct file_operations *fop);
-extern struct file *alloc_file(struct vfsmount *, struct dentry *dentry,
- fmode_t mode, const struct file_operations *fop);
+struct path;
+extern struct file *alloc_file(struct path *, fmode_t mode,
+ const struct file_operations *fop);
static inline void fput_light(struct file *file, int fput_needed)
{
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index d3154462843..043811f0d27 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/gfp.h>
#define FW_ACTION_NOHOTPLUG 0
#define FW_ACTION_HOTPLUG 1
@@ -38,7 +39,7 @@ int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_nowait(
struct module *module, int uevent,
- const char *name, struct device *device, void *context,
+ const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context));
void release_firmware(const struct firmware *fw);
@@ -51,7 +52,7 @@ static inline int request_firmware(const struct firmware **fw,
}
static inline int request_firmware_nowait(
struct module *module, int uevent,
- const char *name, struct device *device, void *context,
+ const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context))
{
return -EINVAL;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 891f7d642e5..7e3012e0ac0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -152,6 +152,7 @@ struct inodes_stat_t {
#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO))
+#define WRITE_META (WRITE | (1 << BIO_RW_META))
#define SWRITE_SYNC_PLUG \
(SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
@@ -1094,10 +1095,6 @@ struct file_lock {
extern void send_sigio(struct fown_struct *fown, int fd, int band);
-/* fs/sync.c */
-extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
- loff_t endbyte, unsigned int flags);
-
#ifdef CONFIG_FILE_LOCKING
extern int fcntl_getlk(struct file *, struct flock __user *);
extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
@@ -1590,7 +1587,7 @@ struct super_operations {
* until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
* various stages of removing an inode.
*
- * Two bits are used for locking and completion notification, I_LOCK and I_SYNC.
+ * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
*
* I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
* fdatasync(). i_atime is the usual cause.
@@ -1599,8 +1596,14 @@ struct super_operations {
* don't have to write inode on fdatasync() when only
* mtime has changed in it.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
- * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both
- * are cleared by unlock_new_inode(), called from iget().
+ * I_NEW Serves as both a mutex and completion notification.
+ * New inodes set I_NEW. If two processes both create
+ * the same inode, one of them will release its inode and
+ * wait for I_NEW to be released before returning.
+ * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
+ * also cause waiting on I_NEW, without I_NEW actually
+ * being set. find_inode() uses this to prevent returning
+ * nearly-dead inodes.
* I_WILL_FREE Must be set when calling write_inode_now() if i_count
* is zero. I_FREEING must be set when I_WILL_FREE is
* cleared.
@@ -1614,35 +1617,23 @@ struct super_operations {
* prohibited for many purposes. iget() must wait for
* the inode to be completely released, then create it
* anew. Other functions will just ignore such inodes,
- * if appropriate. I_LOCK is used for waiting.
+ * if appropriate. I_NEW is used for waiting.
*
- * I_LOCK Serves as both a mutex and completion notification.
- * New inodes set I_LOCK. If two processes both create
- * the same inode, one of them will release its inode and
- * wait for I_LOCK to be released before returning.
- * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
- * also cause waiting on I_LOCK, without I_LOCK actually
- * being set. find_inode() uses this to prevent returning
- * nearly-dead inodes.
- * I_SYNC Similar to I_LOCK, but limited in scope to writeback
- * of inode dirty data. Having a separate lock for this
- * purpose reduces latency and prevents some filesystem-
- * specific deadlocks.
+ * I_SYNC Synchonized write of dirty inode data. The bits is
+ * set during data writeback, and cleared with a wakeup
+ * on the bit address once it is done.
*
* Q: What is the difference between I_WILL_FREE and I_FREEING?
- * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on
- * I_CLEAR? If not, why?
*/
#define I_DIRTY_SYNC 1
#define I_DIRTY_DATASYNC 2
#define I_DIRTY_PAGES 4
-#define I_NEW 8
+#define __I_NEW 3
+#define I_NEW (1 << __I_NEW)
#define I_WILL_FREE 16
#define I_FREEING 32
#define I_CLEAR 64
-#define __I_LOCK 7
-#define I_LOCK (1 << __I_LOCK)
-#define __I_SYNC 8
+#define __I_SYNC 7
#define I_SYNC (1 << __I_SYNC)
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
@@ -2091,8 +2082,6 @@ extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
-extern int wait_on_page_writeback_range(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
extern int __filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end, int sync_mode);
extern int filemap_fdatawrite_range(struct address_space *mapping,
@@ -2191,7 +2180,6 @@ static inline void insert_inode_hash(struct inode *inode) {
__insert_inode_hash(inode, inode->i_ino);
}
-extern struct file * get_empty_filp(void);
extern void file_move(struct file *f, struct list_head *list);
extern void file_kill(struct file *f);
#ifdef CONFIG_BLOCK
@@ -2266,9 +2254,11 @@ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
int lock_type);
enum {
- DIO_LOCKING = 1, /* need locking between buffered and direct access */
- DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */
- DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
+ /* need locking between buffered and direct access */
+ DIO_LOCKING = 0x01,
+
+ /* filesystem does not support filling holes */
+ DIO_SKIP_HOLES = 0x02,
};
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
@@ -2277,7 +2267,8 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_LOCKING);
+ nr_segs, get_block, end_io,
+ DIO_LOCKING | DIO_SKIP_HOLES);
}
static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
@@ -2286,16 +2277,7 @@ static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_NO_LOCKING);
-}
-
-static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
- struct inode *inode, struct block_device *bdev, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs, get_block_t get_block,
- dio_iodone_t end_io)
-{
- return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_OWN_LOCKING);
+ nr_segs, get_block, end_io, 0);
}
#endif
@@ -2480,5 +2462,8 @@ int proc_nr_files(struct ctl_table *table, int write,
int __init get_filesystem_list(char *buf);
+#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
+#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
+
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index bb516ceeefc..da317c7163a 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -8,10 +8,8 @@
#include <linux/fs.h>
/* externs for fs/stack.c */
-extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src,
- int (*get_nlinks)(struct inode *));
-
-extern void fsstack_copy_inode_size(struct inode *dst, const struct inode *src);
+extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src);
+extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
/* inlines */
static inline void fsstack_copy_attr_atime(struct inode *dest,
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 43fc95d822d..28e33fea510 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -74,7 +74,12 @@ struct spi_device;
struct fsl_spi_platform_data {
u32 initial_spmode; /* initial SPMODE value */
s16 bus_num;
- bool qe_mode;
+ unsigned int flags;
+#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */
+#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */
+#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */
+#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */
+#define SPI_QE (1 << 4) /* SPI unit is in QE block */
/* board specific information */
u16 max_chipselect;
void (*cs_control)(struct spi_device *spi, bool on);
@@ -90,6 +95,10 @@ struct mpc8xx_pcmcia_ops {
* lead to a deep sleep (i.e. power removed from the core,
* instead of just the clock).
*/
+#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND)
int fsl_deep_sleep(void);
+#else
+static inline int fsl_deep_sleep(void) { return 0; }
+#endif
#endif /* _FSL_DEVICE_H_ */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 47bbdf9c38d..2233c98d80d 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -57,6 +57,7 @@ struct trace_iterator {
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
+ int leftover;
int cpu;
u64 ts;
@@ -130,7 +131,7 @@ struct ftrace_event_call {
void *mod;
void *data;
- atomic_t profile_count;
+ int profile_count;
int (*profile_enable)(struct ftrace_event_call *);
void (*profile_disable)(struct ftrace_event_call *);
};
@@ -157,7 +158,7 @@ enum {
FILTER_PTR_STRING,
};
-extern int trace_define_common_fields(struct ftrace_event_call *call);
+extern int trace_event_raw_init(struct ftrace_event_call *call);
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h
index 886f5faa08c..ca666d18ed6 100644
--- a/include/linux/generic_acl.h
+++ b/include/linux/generic_acl.h
@@ -1,36 +1,15 @@
-/*
- * include/linux/generic_acl.h
- *
- * (C) 2005 Andreas Gruenbacher <agruen@suse.de>
- *
- * This file is released under the GPL.
- */
+#ifndef LINUX_GENERIC_ACL_H
+#define LINUX_GENERIC_ACL_H
-#ifndef GENERIC_ACL_H
-#define GENERIC_ACL_H
+#include <linux/xattr.h>
-#include <linux/posix_acl.h>
-#include <linux/posix_acl_xattr.h>
+struct inode;
-/**
- * struct generic_acl_operations - filesystem operations
- *
- * Filesystems must make these operations available to the generic
- * operations.
- */
-struct generic_acl_operations {
- struct posix_acl *(*getacl)(struct inode *, int);
- void (*setacl)(struct inode *, int, struct posix_acl *);
-};
+extern struct xattr_handler generic_acl_access_handler;
+extern struct xattr_handler generic_acl_default_handler;
-size_t generic_acl_list(struct inode *, struct generic_acl_operations *, int,
- char *, size_t);
-int generic_acl_get(struct inode *, struct generic_acl_operations *, int,
- void *, size_t);
-int generic_acl_set(struct inode *, struct generic_acl_operations *, int,
- const void *, size_t);
-int generic_acl_init(struct inode *, struct inode *,
- struct generic_acl_operations *);
-int generic_acl_chmod(struct inode *, struct generic_acl_operations *);
+int generic_acl_init(struct inode *, struct inode *);
+int generic_acl_chmod(struct inode *);
+int generic_check_acl(struct inode *inode, int mask);
-#endif
+#endif /* LINUX_GENERIC_ACL_H */
diff --git a/include/linux/gigaset_dev.h b/include/linux/gigaset_dev.h
index 5dc4a316ca3..258ba82937e 100644
--- a/include/linux/gigaset_dev.h
+++ b/include/linux/gigaset_dev.h
@@ -16,15 +16,23 @@
#include <linux/ioctl.h>
+/* The magic IOCTL value for this interface. */
#define GIGASET_IOCTL 0x47
-#define GIGVER_DRIVER 0
-#define GIGVER_COMPAT 1
-#define GIGVER_FWBASE 2
+/* enable/disable device control via character device (lock out ISDN subsys) */
+#define GIGASET_REDIR _IOWR(GIGASET_IOCTL, 0, int)
-#define GIGASET_REDIR _IOWR (GIGASET_IOCTL, 0, int)
-#define GIGASET_CONFIG _IOWR (GIGASET_IOCTL, 1, int)
-#define GIGASET_BRKCHARS _IOW (GIGASET_IOCTL, 2, unsigned char[6]) //FIXME [6] okay?
-#define GIGASET_VERSION _IOWR (GIGASET_IOCTL, 3, unsigned[4])
+/* enable adapter configuration mode (M10x only) */
+#define GIGASET_CONFIG _IOWR(GIGASET_IOCTL, 1, int)
+
+/* set break characters (M105 only) */
+#define GIGASET_BRKCHARS _IOW(GIGASET_IOCTL, 2, unsigned char[6])
+
+/* get version information selected by arg[0] */
+#define GIGASET_VERSION _IOWR(GIGASET_IOCTL, 3, unsigned[4])
+/* values for GIGASET_VERSION arg[0] */
+#define GIGVER_DRIVER 0 /* get driver version */
+#define GIGVER_COMPAT 1 /* get interface compatibility version */
+#define GIGVER_FWBASE 2 /* get base station firmware version */
#endif
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 059bd189d35..4e949a5b5b8 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -99,6 +99,12 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -EINVAL;
}
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
static inline void gpio_unexport(unsigned gpio)
{
diff --git a/include/linux/hayesesp.h b/include/linux/hayesesp.h
deleted file mode 100644
index 92b08cfe4a7..00000000000
--- a/include/linux/hayesesp.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef HAYESESP_H
-#define HAYESESP_H
-
-struct hayes_esp_config {
- short flow_on;
- short flow_off;
- short rx_trigger;
- short tx_trigger;
- short pio_threshold;
- unsigned char rx_timeout;
- char dma_channel;
-};
-
-#ifdef __KERNEL__
-
-#define ESP_DMA_CHANNEL 0
-#define ESP_RX_TRIGGER 768
-#define ESP_TX_TRIGGER 768
-#define ESP_FLOW_OFF 1016
-#define ESP_FLOW_ON 944
-#define ESP_RX_TMOUT 128
-#define ESP_PIO_THRESHOLD 32
-
-#define ESP_IN_MAJOR 57 /* major dev # for dial in */
-#define ESP_OUT_MAJOR 58 /* major dev # for dial out */
-#define ESPC_SCALE 3
-#define UART_ESI_BASE 0x00
-#define UART_ESI_SID 0x01
-#define UART_ESI_RX 0x02
-#define UART_ESI_TX 0x02
-#define UART_ESI_CMD1 0x04
-#define UART_ESI_CMD2 0x05
-#define UART_ESI_STAT1 0x04
-#define UART_ESI_STAT2 0x05
-#define UART_ESI_RWS 0x07
-
-#define UART_IER_DMA_TMOUT 0x80
-#define UART_IER_DMA_TC 0x08
-
-#define ESI_SET_IRQ 0x04
-#define ESI_SET_DMA_TMOUT 0x05
-#define ESI_SET_SRV_MASK 0x06
-#define ESI_SET_ERR_MASK 0x07
-#define ESI_SET_FLOW_CNTL 0x08
-#define ESI_SET_FLOW_CHARS 0x09
-#define ESI_SET_FLOW_LVL 0x0a
-#define ESI_SET_TRIGGER 0x0b
-#define ESI_SET_RX_TIMEOUT 0x0c
-#define ESI_SET_FLOW_TMOUT 0x0d
-#define ESI_WRITE_UART 0x0e
-#define ESI_READ_UART 0x0f
-#define ESI_SET_MODE 0x10
-#define ESI_GET_ERR_STAT 0x12
-#define ESI_GET_UART_STAT 0x13
-#define ESI_GET_RX_AVAIL 0x14
-#define ESI_GET_TX_AVAIL 0x15
-#define ESI_START_DMA_RX 0x16
-#define ESI_START_DMA_TX 0x17
-#define ESI_ISSUE_BREAK 0x1a
-#define ESI_FLUSH_RX 0x1b
-#define ESI_FLUSH_TX 0x1c
-#define ESI_SET_BAUD 0x1d
-#define ESI_SET_ENH_IRQ 0x1f
-#define ESI_SET_REINTR 0x20
-#define ESI_SET_PRESCALAR 0x23
-#define ESI_NO_COMMAND 0xff
-
-#define ESP_STAT_RX_TIMEOUT 0x01
-#define ESP_STAT_DMA_RX 0x02
-#define ESP_STAT_DMA_TX 0x04
-#define ESP_STAT_NEVER_DMA 0x08
-#define ESP_STAT_USE_PIO 0x10
-
-#define ESP_MAGIC 0x53ee
-#define ESP_XMIT_SIZE 4096
-
-struct esp_struct {
- int magic;
- struct tty_port port;
- spinlock_t lock;
- int io_port;
- int irq;
- int read_status_mask;
- int ignore_status_mask;
- int timeout;
- int stat_flags;
- int custom_divisor;
- int close_delay;
- unsigned short closing_wait;
- unsigned short closing_wait2;
- int IER; /* Interrupt Enable Register */
- int MCR; /* Modem control register */
- unsigned long last_active;
- int line;
- unsigned char *xmit_buf;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
- wait_queue_head_t break_wait;
- struct async_icount icount; /* kernel counters for the 4 input interrupts */
- struct hayes_esp_config config; /* port configuration */
- struct esp_struct *next_port; /* For the linked list */
-};
-
-struct esp_pio_buffer {
- unsigned char data[1024];
- struct esp_pio_buffer *next;
-};
-
-#endif /* __KERNEL__ */
-
-
-#endif /* ESP_H */
-
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 9bace4b9f4f..5d86fb2309d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -162,18 +162,23 @@ struct hrtimer_clock_base {
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @hres_active: State of high resolution mode
- * @check_clocks: Indictator, when set evaluate time source and clock
- * event devices whether high resolution mode can be
- * activated.
- * @nr_events: Total number of timer interrupt events
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
*/
struct hrtimer_cpu_base {
- spinlock_t lock;
+ raw_spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
int hres_active;
+ int hang_detected;
unsigned long nr_events;
+ unsigned long nr_retries;
+ unsigned long nr_hangs;
+ ktime_t max_hang_time;
#endif
};
@@ -435,47 +440,4 @@ extern u64 ktime_divns(const ktime_t kt, s64 div);
/* Show pending timers: */
extern void sysrq_timer_list_show(void);
-/*
- * Timer-statistics info:
- */
-#ifdef CONFIG_TIMER_STATS
-
-extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm,
- unsigned int timer_flag);
-
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
- if (likely(!timer_stats_active))
- return;
- timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
- timer->function, timer->start_comm, 0);
-}
-
-extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
- void *addr);
-
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
- __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
- timer->start_site = NULL;
-}
-#else
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
-}
-
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 41a59afc70f..78b4bc64c00 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+#endif
+
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index a03daed08c5..41235c93e4e 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -20,19 +20,18 @@ enum {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-/* As it's for in-kernel or ptrace use, we want it to be pinned */
-#define DEFINE_BREAKPOINT_ATTR(name) \
-struct perf_event_attr name = { \
- .type = PERF_TYPE_BREAKPOINT, \
- .size = sizeof(name), \
- .pinned = 1, \
-};
-
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
{
+ memset(attr, 0, sizeof(*attr));
+
attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(*attr);
+ /*
+ * As it's for in-kernel or ptrace use, we want it to be pinned
+ * and to call its callback every hits.
+ */
attr->pinned = 1;
+ attr->sample_period = 1;
}
static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
@@ -52,27 +51,24 @@ static inline int hw_breakpoint_len(struct perf_event *bp)
extern struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
struct task_struct *tsk);
/* FIXME: only change from the attr, and don't unregister */
-extern struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk);
+extern int
+modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
/*
* Kernel breakpoints are not associated with any particular thread.
*/
extern struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
int cpu);
extern struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered);
+ perf_overflow_handler_t triggered);
extern int register_perf_hw_breakpoint(struct perf_event *bp);
extern int __register_perf_hw_breakpoint(struct perf_event *bp);
@@ -93,20 +89,18 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
static inline struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
struct task_struct *tsk) { return NULL; }
-static inline struct perf_event *
+static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk) { return NULL; }
+ struct perf_event_attr *attr) { return -ENOSYS; }
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
int cpu) { return NULL; }
static inline struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered) { return NULL; }
+ perf_overflow_handler_t triggered) { return NULL; }
static inline int
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline int
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 419ab546b26..02fc617782e 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* @driver: Device driver model driver
* @id_table: List of I2C devices supported by this driver
* @detect: Callback for device detection
- * @address_data: The I2C addresses to probe (for detect)
+ * @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
*
* The driver.owner field should be set to the module owner of this driver.
@@ -161,8 +161,8 @@ struct i2c_driver {
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
- int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *);
- const struct i2c_client_address_data *address_data;
+ int (*detect)(struct i2c_client *, struct i2c_board_info *);
+ const unsigned short *address_list;
struct list_head clients;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
-/* i2c_client_address_data is the struct for holding default client
- * addresses for a driver and for the parameters supplied on the
- * command line
- */
-struct i2c_client_address_data {
- const unsigned short *normal_i2c;
-};
-
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
@@ -576,82 +568,4 @@ union i2c_smbus_data {
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
#define I2C_SMBUS_I2C_BLOCK_DATA 8
-
-#ifdef __KERNEL__
-
-/* These defines are used for probing i2c client addresses */
-/* The length of the option lists */
-#define I2C_CLIENT_MAX_OPTS 48
-
-/* Default fill of many variables */
-#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
-
-/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
- module header */
-
-#define I2C_CLIENT_MODULE_PARM(var,desc) \
- static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
- static unsigned int var##_num; \
- module_param_array(var, short, &var##_num, 0); \
- MODULE_PARM_DESC(var, desc)
-
-#define I2C_CLIENT_INSMOD_COMMON \
-static const struct i2c_client_address_data addr_data = { \
- .normal_i2c = normal_i2c, \
-}
-
-/* These are the ones you want to use in your own drivers. Pick the one
- which matches the number of devices the driver differenciates between. */
-#define I2C_CLIENT_INSMOD \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_1(chip1) \
-enum chips { any_chip, chip1 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_2(chip1, chip2) \
-enum chips { any_chip, chip1, chip2 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \
-enum chips { any_chip, chip1, chip2, chip3 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \
-enum chips { any_chip, chip1, chip2, chip3, chip4 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7, chip8 }; \
-I2C_CLIENT_INSMOD_COMMON
-#endif /* __KERNEL__ */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h
index 918c5354d9b..08aa92278d7 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/i2c/tps65010.h
@@ -72,6 +72,21 @@
#define TPS_VDCDC1 0x0c
# define TPS_ENABLE_LP (1 << 3)
#define TPS_VDCDC2 0x0d
+# define TPS_LP_COREOFF (1 << 7)
+# define TPS_VCORE_1_8V (7<<4)
+# define TPS_VCORE_1_5V (6 << 4)
+# define TPS_VCORE_1_4V (5 << 4)
+# define TPS_VCORE_1_3V (4 << 4)
+# define TPS_VCORE_1_2V (3 << 4)
+# define TPS_VCORE_1_1V (2 << 4)
+# define TPS_VCORE_1_0V (1 << 4)
+# define TPS_VCORE_0_85V (0 << 4)
+# define TPS_VCORE_LP_1_2V (3 << 2)
+# define TPS_VCORE_LP_1_1V (2 << 2)
+# define TPS_VCORE_LP_1_0V (1 << 2)
+# define TPS_VCORE_LP_0_85V (0 << 2)
+# define TPS_VIB (1 << 1)
+# define TPS_VCORE_DISCH (1 << 0)
#define TPS_VREGS1 0x0e
# define TPS_LDO2_ENABLE (1 << 7)
# define TPS_LDO2_OFF (1 << 6)
@@ -152,6 +167,10 @@ extern int tps65010_config_vregs1(unsigned value);
*/
extern int tps65013_set_low_pwr(unsigned mode);
+/* tps65010_set_vdcdc2
+ * value to be written to VDCDC2
+ */
+extern int tps65010_config_vdcdc2(unsigned value);
struct i2c_client;
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl.h
index 5306a759cbd..bf1c5be1f5b 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __TWL4030_H_
-#define __TWL4030_H_
+#ifndef __TWL_H_
+#define __TWL_H_
#include <linux/types.h>
#include <linux/input/matrix_keypad.h>
@@ -61,28 +61,112 @@
#define TWL4030_MODULE_PWMA 0x0E
#define TWL4030_MODULE_PWMB 0x0F
+#define TWL5031_MODULE_ACCESSORY 0x10
+#define TWL5031_MODULE_INTERRUPTS 0x11
+
/* Slave 3 (i2c address 0x4b) */
-#define TWL4030_MODULE_BACKUP 0x10
-#define TWL4030_MODULE_INT 0x11
-#define TWL4030_MODULE_PM_MASTER 0x12
-#define TWL4030_MODULE_PM_RECEIVER 0x13
-#define TWL4030_MODULE_RTC 0x14
-#define TWL4030_MODULE_SECURED_REG 0x15
+#define TWL4030_MODULE_BACKUP 0x12
+#define TWL4030_MODULE_INT 0x13
+#define TWL4030_MODULE_PM_MASTER 0x14
+#define TWL4030_MODULE_PM_RECEIVER 0x15
+#define TWL4030_MODULE_RTC 0x16
+#define TWL4030_MODULE_SECURED_REG 0x17
+
+#define TWL_MODULE_USB TWL4030_MODULE_USB
+#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
+#define TWL_MODULE_PIH TWL4030_MODULE_PIH
+#define TWL_MODULE_MADC TWL4030_MODULE_MADC
+#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
+#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER
+#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
+#define TWL_MODULE_RTC TWL4030_MODULE_RTC
+
+#define GPIO_INTR_OFFSET 0
+#define KEYPAD_INTR_OFFSET 1
+#define BCI_INTR_OFFSET 2
+#define MADC_INTR_OFFSET 3
+#define USB_INTR_OFFSET 4
+#define BCI_PRES_INTR_OFFSET 9
+#define USB_PRES_INTR_OFFSET 10
+#define RTC_INTR_OFFSET 11
+
+/*
+ * Offset from TWL6030_IRQ_BASE / pdata->irq_base
+ */
+#define PWR_INTR_OFFSET 0
+#define HOTDIE_INTR_OFFSET 12
+#define SMPSLDO_INTR_OFFSET 13
+#define BATDETECT_INTR_OFFSET 14
+#define SIMDETECT_INTR_OFFSET 15
+#define MMCDETECT_INTR_OFFSET 16
+#define GASGAUGE_INTR_OFFSET 17
+#define USBOTG_INTR_OFFSET 4
+#define CHARGER_INTR_OFFSET 2
+#define RSV_INTR_OFFSET 0
+
+/* INT register offsets */
+#define REG_INT_STS_A 0x00
+#define REG_INT_STS_B 0x01
+#define REG_INT_STS_C 0x02
+
+#define REG_INT_MSK_LINE_A 0x03
+#define REG_INT_MSK_LINE_B 0x04
+#define REG_INT_MSK_LINE_C 0x05
+
+#define REG_INT_MSK_STS_A 0x06
+#define REG_INT_MSK_STS_B 0x07
+#define REG_INT_MSK_STS_C 0x08
+
+/* MASK INT REG GROUP A */
+#define TWL6030_PWR_INT_MASK 0x07
+#define TWL6030_RTC_INT_MASK 0x18
+#define TWL6030_HOTDIE_INT_MASK 0x20
+#define TWL6030_SMPSLDOA_INT_MASK 0xC0
+
+/* MASK INT REG GROUP B */
+#define TWL6030_SMPSLDOB_INT_MASK 0x01
+#define TWL6030_BATDETECT_INT_MASK 0x02
+#define TWL6030_SIMDETECT_INT_MASK 0x04
+#define TWL6030_MMCDETECT_INT_MASK 0x08
+#define TWL6030_GPADC_INT_MASK 0x60
+#define TWL6030_GASGAUGE_INT_MASK 0x80
+
+/* MASK INT REG GROUP C */
+#define TWL6030_USBOTG_INT_MASK 0x0F
+#define TWL6030_CHARGER_CTRL_INT_MASK 0x10
+#define TWL6030_CHARGER_FAULT_INT_MASK 0x60
+
+
+#define TWL4030_CLASS_ID 0x4030
+#define TWL6030_CLASS_ID 0x6030
+unsigned int twl_rev(void);
+#define GET_TWL_REV (twl_rev())
+#define TWL_CLASS_IS(class, id) \
+static inline int twl_class_is_ ##class(void) \
+{ \
+ return ((id) == (GET_TWL_REV)) ? 1 : 0; \
+}
+
+TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
+TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
/*
* Read and write single 8-bit registers
*/
-int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
-int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
+int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
/*
* Read and write several 8-bit registers at once.
*
- * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1
+ * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1
* for the value, and populate your data starting at offset 1.
*/
-int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
-int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+
+int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
+int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
/*----------------------------------------------------------------------*/
@@ -221,6 +305,38 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
/*----------------------------------------------------------------------*/
+/*
+ * Accessory Interrupts
+ */
+#define TWL5031_ACIIMR_LSB 0x05
+#define TWL5031_ACIIMR_MSB 0x06
+#define TWL5031_ACIIDR_LSB 0x07
+#define TWL5031_ACIIDR_MSB 0x08
+#define TWL5031_ACCISR1 0x0F
+#define TWL5031_ACCIMR1 0x10
+#define TWL5031_ACCISR2 0x11
+#define TWL5031_ACCIMR2 0x12
+#define TWL5031_ACCSIR 0x13
+#define TWL5031_ACCEDR1 0x14
+#define TWL5031_ACCSIHCTRL 0x15
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery Charger Controller
+ */
+
+#define TWL5031_INTERRUPTS_BCIISR1 0x0
+#define TWL5031_INTERRUPTS_BCIIMR1 0x1
+#define TWL5031_INTERRUPTS_BCIISR2 0x2
+#define TWL5031_INTERRUPTS_BCIIMR2 0x3
+#define TWL5031_INTERRUPTS_BCISIR 0x4
+#define TWL5031_INTERRUPTS_BCIEDR1 0x5
+#define TWL5031_INTERRUPTS_BCIEDR2 0x6
+#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7
+
+/*----------------------------------------------------------------------*/
+
/* Power bus message definitions */
/* The TWL4030/5030 splits its power-management resources (the various
@@ -250,6 +366,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define RES_TYPE_ALL 0x7
+/* Resource states */
#define RES_STATE_WRST 0xF
#define RES_STATE_ACTIVE 0xE
#define RES_STATE_SLEEP 0x8
@@ -310,8 +427,18 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define MSG_SINGULAR(devgrp, id, state) \
((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
+#define MSG_BROADCAST_ALL(devgrp, state) \
+ ((devgrp) << 5 | (state))
+
+#define MSG_BROADCAST_REF MSG_BROADCAST_ALL
+#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL
+#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL
/*----------------------------------------------------------------------*/
+struct twl4030_clock_init_data {
+ bool ck32k_lowpwr_enable;
+};
+
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
unsigned int tblsize;
@@ -391,12 +518,15 @@ struct twl4030_resconfig {
u8 devgroup; /* Processor group that Power resource belongs to */
u8 type; /* Power resource addressed, 6 / broadcast message */
u8 type2; /* Power resource addressed, 3 / broadcast message */
+ u8 remap_off; /* off state remapping */
+ u8 remap_sleep; /* sleep state remapping */
};
struct twl4030_power_data {
struct twl4030_script **scripts;
unsigned num;
struct twl4030_resconfig *resource_config;
+#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
};
extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
@@ -421,6 +551,7 @@ struct twl4030_codec_data {
struct twl4030_platform_data {
unsigned irq_base, irq_end;
+ struct twl4030_clock_init_data *clock;
struct twl4030_bci_platform_data *bci;
struct twl4030_gpio_platform_data *gpio;
struct twl4030_madc_platform_data *madc;
@@ -429,19 +560,31 @@ struct twl4030_platform_data {
struct twl4030_power_data *power;
struct twl4030_codec_data *codec;
- /* LDO regulators */
+ /* Common LDO regulators for TWL4030/TWL6030 */
struct regulator_init_data *vdac;
+ struct regulator_init_data *vaux1;
+ struct regulator_init_data *vaux2;
+ struct regulator_init_data *vaux3;
+ /* TWL4030 LDO regulators */
struct regulator_init_data *vpll1;
struct regulator_init_data *vpll2;
struct regulator_init_data *vmmc1;
struct regulator_init_data *vmmc2;
struct regulator_init_data *vsim;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
struct regulator_init_data *vaux4;
-
- /* REVISIT more to come ... _nothing_ should be hard-wired */
+ struct regulator_init_data *vio;
+ struct regulator_init_data *vdd1;
+ struct regulator_init_data *vdd2;
+ struct regulator_init_data *vintana1;
+ struct regulator_init_data *vintana2;
+ struct regulator_init_data *vintdig;
+ /* TWL6030 LDO regulators */
+ struct regulator_init_data *vmmc;
+ struct regulator_init_data *vpp;
+ struct regulator_init_data *vusim;
+ struct regulator_init_data *vana;
+ struct regulator_init_data *vcxio;
+ struct regulator_init_data *vusb;
};
/*----------------------------------------------------------------------*/
@@ -473,6 +616,7 @@ int twl4030_sih_setup(int module);
* VIO is generally fixed.
*/
+/* TWL4030 SMPS/LDO's */
/* EXTERNAL dc-to-dc buck converters */
#define TWL4030_REG_VDD1 0
#define TWL4030_REG_VDD2 1
@@ -499,4 +643,31 @@ int twl4030_sih_setup(int module);
#define TWL4030_REG_VUSB1V8 18
#define TWL4030_REG_VUSB3V1 19
+/* TWL6030 SMPS/LDO's */
+/* EXTERNAL dc-to-dc buck convertor contollable via SR */
+#define TWL6030_REG_VDD1 30
+#define TWL6030_REG_VDD2 31
+#define TWL6030_REG_VDD3 32
+
+/* Non SR compliant dc-to-dc buck convertors */
+#define TWL6030_REG_VMEM 33
+#define TWL6030_REG_V2V1 34
+#define TWL6030_REG_V1V29 35
+#define TWL6030_REG_V1V8 36
+
+/* EXTERNAL LDOs */
+#define TWL6030_REG_VAUX1_6030 37
+#define TWL6030_REG_VAUX2_6030 38
+#define TWL6030_REG_VAUX3_6030 39
+#define TWL6030_REG_VMMC 40
+#define TWL6030_REG_VPP 41
+#define TWL6030_REG_VUSIM 42
+#define TWL6030_REG_VANA 43
+#define TWL6030_REG_VCXIO 44
+#define TWL6030_REG_VDAC 45
+#define TWL6030_REG_VUSB 46
+
+/* INTERNAL LDOs */
+#define TWL6030_REG_VRTC 47
+
#endif /* End of __TWL4030_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 60c3360ef6a..9bf6870ee5f 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -39,6 +39,10 @@ void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
bool i8042_check_port_owner(const struct serio *);
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio));
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio));
#else
@@ -52,7 +56,7 @@ void i8042_unlock_chip(void)
int i8042_command(unsigned char *param, int command)
{
- return -ENOSYS;
+ return -ENODEV;
}
bool i8042_check_port_owner(const struct serio *serio)
@@ -60,6 +64,18 @@ bool i8042_check_port_owner(const struct serio *serio)
return false;
}
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ return -ENODEV;
+}
+
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ return -ENODEV;
+}
+
#endif
#endif
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 005e1525ab8..299b4121f91 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -137,8 +137,6 @@ extern struct ctl_table ether_table[];
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_BUF_SIZE 18
-#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE]
#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e3f2a4c25f..99dc6d5cf7e 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -13,18 +13,14 @@
#include <linux/fs.h>
struct linux_binprm;
-#define IMA_COUNT_UPDATE 1
-#define IMA_COUNT_LEAVE 0
-
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_inode_alloc(struct inode *inode);
extern void ima_inode_free(struct inode *inode);
-extern int ima_path_check(struct path *path, int mask, int update_counts);
+extern int ima_path_check(struct path *path, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern void ima_counts_get(struct file *file);
-extern void ima_counts_put(struct path *path, int mask);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -42,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
return;
}
-static inline int ima_path_check(struct path *path, int mask, int update_counts)
+static inline int ima_path_check(struct path *path, int mask)
{
return 0;
}
@@ -62,9 +58,5 @@ static inline void ima_counts_get(struct file *file)
return;
}
-static inline void ima_counts_put(struct path *path, int mask)
-{
- return;
-}
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index ff8bde520d0..ab1d31f9352 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -149,6 +149,8 @@ void prepare_namespace(void);
extern void (*late_time_init)(void);
+extern int initcall_debug;
+
#endif
#ifndef MODULE
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8d10aa7fd4c..abec69b63d7 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -165,7 +165,7 @@ extern struct cred init_cred;
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 4f0a72a9740..9310c699a37 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -332,6 +332,7 @@ struct intel_iommu {
#ifdef CONFIG_INTR_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
#endif
+ int node;
};
static inline void __iommu_flush_cache(
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 3b068e5b567..64d1b638745 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -14,14 +14,11 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
unsigned long boundary_size);
-extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
unsigned long boundary_size,
unsigned long align_mask);
-extern void iommu_area_free(unsigned long *map, unsigned long start,
- unsigned int nr);
extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 83aa81297ea..7129504e053 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -126,11 +126,11 @@ extern int allocate_resource(struct resource *root, struct resource *new,
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
-static inline resource_size_t resource_size(struct resource *res)
+static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}
-static inline unsigned long resource_type(struct resource *res)
+static inline unsigned long resource_type(const struct resource *res)
{
return res->flags & IORESOURCE_TYPE_BITS;
}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e408722a84c..07baa38bce3 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -87,7 +87,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
/* default values */
#define DFLT_QUEUESMAX 256 /* max number of message queues */
#define DFLT_MSGMAX 10 /* max number of messages in each queue */
-#define HARD_MSGMAX (131072/sizeof(void *))
+#define HARD_MSGMAX (32768*sizeof(void *)/4)
#define DFLT_MSGSIZEMAX 8192 /* max message size */
#else
static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a..451481c082b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- spinlock_t lock;
+ raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
unsigned int node;
diff --git a/include/linux/isicom.h b/include/linux/isicom.h
index bbd42197298..b92e0565063 100644
--- a/include/linux/isicom.h
+++ b/include/linux/isicom.h
@@ -67,6 +67,7 @@
#define FIRMWARE_LOADED 0x0001
#define BOARD_ACTIVE 0x0002
+#define BOARD_INIT 0x0004
/* isi_port status bitmap */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 792274269f2..d8e9b3d1c23 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
__builtin_extract_return_addr((void *)addr));
}
-/*
- * Pretty-print a function pointer. This function is deprecated.
- * Please use the "%pF" vsprintf format instead.
- */
-static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
-{
-#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
- addr = *(void **)addr;
-#endif
- print_symbol(fmt, (unsigned long)addr);
-}
-
static inline void print_ip_sym(unsigned long ip)
{
printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
new file mode 100644
index 00000000000..bd92a89f4b0
--- /dev/null
+++ b/include/linux/kernel-page-flags.h
@@ -0,0 +1,46 @@
+#ifndef LINUX_KERNEL_PAGE_FLAGS_H
+#define LINUX_KERNEL_PAGE_FLAGS_H
+
+/*
+ * Stable page flag bits exported to user space
+ */
+
+#define KPF_LOCKED 0
+#define KPF_ERROR 1
+#define KPF_REFERENCED 2
+#define KPF_UPTODATE 3
+#define KPF_DIRTY 4
+#define KPF_LRU 5
+#define KPF_ACTIVE 6
+#define KPF_SLAB 7
+#define KPF_WRITEBACK 8
+#define KPF_RECLAIM 9
+#define KPF_BUDDY 10
+
+/* 11-20: new additions in 2.6.31 */
+#define KPF_MMAP 11
+#define KPF_ANON 12
+#define KPF_SWAPCACHE 13
+#define KPF_SWAPBACKED 14
+#define KPF_COMPOUND_HEAD 15
+#define KPF_COMPOUND_TAIL 16
+#define KPF_HUGE 17
+#define KPF_UNEVICTABLE 18
+#define KPF_HWPOISON 19
+#define KPF_NOPAGE 20
+
+#define KPF_KSM 21
+
+/* kernel hacking assistances
+ * WARNING: subject to change, never rely on them!
+ */
+#define KPF_RESERVED 32
+#define KPF_MLOCKED 33
+#define KPF_MAPPEDTODISK 34
+#define KPF_PRIVATE 35
+#define KPF_PRIVATE_2 36
+#define KPF_OWNER_PRIVATE 37
+#define KPF_ARCH 38
+#define KPF_UNCACHED 39
+
+#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fa4c590cf1..3fc9f5aab5f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -251,10 +251,10 @@ extern int printk_delay_msec;
* Print a one-time message (analogous to WARN_ONCE() et al):
*/
#define printk_once(x...) ({ \
- static bool __print_once = true; \
+ static bool __print_once; \
\
- if (__print_once) { \
- __print_once = false; \
+ if (!__print_once) { \
+ __print_once = true; \
printk(x); \
} \
})
@@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#elif defined(CONFIG_DYNAMIC_DEBUG)
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
-#define pr_debug(fmt, ...) do { \
- dynamic_pr_debug(fmt, ##__VA_ARGS__); \
- } while (0)
+#define pr_debug(fmt, ...) \
+ dynamic_pr_debug(fmt, ##__VA_ARGS__)
#else
#define pr_debug(fmt, ...) \
({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
#endif
/*
+ * ratelimited messages with local ratelimit_state,
+ * no local ratelimit_state used in the !PRINTK case
+ */
+#ifdef CONFIG_PRINTK
+#define printk_ratelimited(fmt, ...) ({ \
+ static struct ratelimit_state _rs = { \
+ .interval = DEFAULT_RATELIMIT_INTERVAL, \
+ .burst = DEFAULT_RATELIMIT_BURST, \
+ }; \
+ \
+ if (!__ratelimit(&_rs)) \
+ printk(fmt, ##__VA_ARGS__); \
+})
+#else
+/* No effect, but we still get type checking even in the !PRINTK case: */
+#define printk_ratelimited printk
+#endif
+
+#define pr_emerg_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/* no pr_cont_ratelimited, don't do that... */
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_ratelimited(fmt, ...) \
+ ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
+ ##__VA_ARGS__); 0; })
+#endif
+
+/*
* General tracing related utility functions - trace_printk(),
* tracing_on/tracing_off and tracing_start()/tracing_stop
*
@@ -492,6 +535,8 @@ extern int
__trace_printk(unsigned long ip, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
+extern void trace_dump_stack(void);
+
/*
* The double __builtin_constant_p is because gcc will give us an error
* if we try to allocate the static variable to fmt if it is not a
@@ -525,6 +570,7 @@ trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
+static inline void trace_dump_stack(void) { }
static inline int
trace_printk(const char *fmt, ...)
{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index adc34f2c6ef..c356b6914ff 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -206,6 +206,8 @@ extern size_t vmcoreinfo_max_size;
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base);
+int crash_shrink_memory(unsigned long new_size);
+size_t crash_get_memory_size(void);
#else /* !CONFIG_KEXEC */
struct pt_regs;
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index ad6bdf5a597..486e8ad3bb5 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,6 +1,7 @@
/*
- * A simple kernel FIFO implementation.
+ * A generic kernel FIFO implementation.
*
+ * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
* Copyright (C) 2004 Stelian Pop <stelian@popies.net>
*
* This program is free software; you can redistribute it and/or modify
@@ -18,6 +19,25 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
+
+/*
+ * Howto porting drivers to the new generic fifo API:
+ *
+ * - Modify the declaration of the "struct kfifo *" object into a
+ * in-place "struct kfifo" object
+ * - Init the in-place object with kfifo_alloc() or kfifo_init()
+ * Note: The address of the in-place "struct kfifo" object must be
+ * passed as the first argument to this functions
+ * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get
+ * into kfifo_out
+ * - Replace the use of kfifo_put into kfifo_in_locked and kfifo_get
+ * into kfifo_out_locked
+ * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc
+ * must be passed now to the kfifo_in_locked and kfifo_out_locked
+ * as the last parameter.
+ * - All formerly name __kfifo_* functions has been renamed into kfifo_*
+ */
+
#ifndef _LINUX_KFIFO_H
#define _LINUX_KFIFO_H
@@ -29,124 +49,563 @@ struct kfifo {
unsigned int size; /* the size of the allocated buffer */
unsigned int in; /* data is added at offset (in % size) */
unsigned int out; /* data is extracted from off. (out % size) */
- spinlock_t *lock; /* protects concurrent modifications */
};
-extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- gfp_t gfp_mask, spinlock_t *lock);
-extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
- spinlock_t *lock);
+/*
+ * Macros for declaration and initialization of the kfifo datatype
+ */
+
+/* helper macro */
+#define __kfifo_initializer(s, b) \
+ (struct kfifo) { \
+ .size = s, \
+ .in = 0, \
+ .out = 0, \
+ .buffer = b \
+ }
+
+/**
+ * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
+ * @name: name of the declared kfifo datatype
+ * @size: size of the fifo buffer
+ *
+ * Note: the macro can be used inside struct or union declaration
+ * Note: the macro creates two objects:
+ * A kfifo object with the given name and a buffer for the kfifo
+ * object named name##kfifo_buffer
+ */
+#define DECLARE_KFIFO(name, size) \
+union { \
+ struct kfifo name; \
+ unsigned char name##kfifo_buffer[size + sizeof(struct kfifo)]; \
+}
+
+/**
+ * INIT_KFIFO - Initialize a kfifo declared by DECLARED_KFIFO
+ * @name: name of the declared kfifo datatype
+ * @size: size of the fifo buffer
+ */
+#define INIT_KFIFO(name) \
+ name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \
+ sizeof(struct kfifo), name##kfifo_buffer)
+
+/**
+ * DEFINE_KFIFO - macro to define and initialize a kfifo
+ * @name: name of the declared kfifo datatype
+ * @size: size of the fifo buffer
+ *
+ * Note: the macro can be used for global and local kfifo data type variables
+ * Note: the macro creates two objects:
+ * A kfifo object with the given name and a buffer for the kfifo
+ * object named name##kfifo_buffer
+ */
+#define DEFINE_KFIFO(name, size) \
+ unsigned char name##kfifo_buffer[size]; \
+ struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer)
+
+#undef __kfifo_initializer
+
+extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+ unsigned int size);
+extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
+ gfp_t gfp_mask);
extern void kfifo_free(struct kfifo *fifo);
-extern unsigned int __kfifo_put(struct kfifo *fifo,
- const unsigned char *buffer, unsigned int len);
-extern unsigned int __kfifo_get(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len);
+extern unsigned int kfifo_in(struct kfifo *fifo,
+ const unsigned char *from, unsigned int len);
+extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
+ unsigned char *to, unsigned int len);
/**
- * __kfifo_reset - removes the entire FIFO contents, no locking version
+ * kfifo_reset - removes the entire FIFO contents
* @fifo: the fifo to be emptied.
*/
-static inline void __kfifo_reset(struct kfifo *fifo)
+static inline void kfifo_reset(struct kfifo *fifo)
{
fifo->in = fifo->out = 0;
}
/**
- * kfifo_reset - removes the entire FIFO contents
+ * kfifo_reset_out - skip FIFO contents
* @fifo: the fifo to be emptied.
*/
-static inline void kfifo_reset(struct kfifo *fifo)
+static inline void kfifo_reset_out(struct kfifo *fifo)
{
- unsigned long flags;
+ smp_mb();
+ fifo->out = fifo->in;
+}
- spin_lock_irqsave(fifo->lock, flags);
+/**
+ * kfifo_size - returns the size of the fifo in bytes
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check unsigned int kfifo_size(struct kfifo *fifo)
+{
+ return fifo->size;
+}
- __kfifo_reset(fifo);
+/**
+ * kfifo_len - returns the number of used bytes in the FIFO
+ * @fifo: the fifo to be used.
+ */
+static inline unsigned int kfifo_len(struct kfifo *fifo)
+{
+ register unsigned int out;
- spin_unlock_irqrestore(fifo->lock, flags);
+ out = fifo->out;
+ smp_rmb();
+ return fifo->in - out;
}
/**
- * kfifo_put - puts some data into the FIFO
+ * kfifo_is_empty - returns true if the fifo is empty
* @fifo: the fifo to be used.
- * @buffer: the data to be added.
- * @len: the length of the data to be added.
+ */
+static inline __must_check int kfifo_is_empty(struct kfifo *fifo)
+{
+ return fifo->in == fifo->out;
+}
+
+/**
+ * kfifo_is_full - returns true if the fifo is full
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check int kfifo_is_full(struct kfifo *fifo)
+{
+ return kfifo_len(fifo) == kfifo_size(fifo);
+}
+
+/**
+ * kfifo_avail - returns the number of bytes available in the FIFO
+ * @fifo: the fifo to be used.
+ */
+static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
+{
+ return kfifo_size(fifo) - kfifo_len(fifo);
+}
+
+/**
+ * kfifo_in_locked - puts some data into the FIFO using a spinlock for locking
+ * @fifo: the fifo to be used.
+ * @from: the data to be added.
+ * @n: the length of the data to be added.
+ * @lock: pointer to the spinlock to use for locking.
*
- * This function copies at most @len bytes from the @buffer into
+ * This function copies at most @len bytes from the @from buffer into
* the FIFO depending on the free space, and returns the number of
* bytes copied.
*/
-static inline unsigned int kfifo_put(struct kfifo *fifo,
- const unsigned char *buffer, unsigned int len)
+static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
+ const unsigned char *from, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(fifo->lock, flags);
+ spin_lock_irqsave(lock, flags);
- ret = __kfifo_put(fifo, buffer, len);
+ ret = kfifo_in(fifo, from, n);
- spin_unlock_irqrestore(fifo->lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return ret;
}
/**
- * kfifo_get - gets some data from the FIFO
+ * kfifo_out_locked - gets some data from the FIFO using a spinlock for locking
* @fifo: the fifo to be used.
- * @buffer: where the data must be copied.
- * @len: the size of the destination buffer.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @lock: pointer to the spinlock to use for locking.
*
* This function copies at most @len bytes from the FIFO into the
- * @buffer and returns the number of copied bytes.
+ * @to buffer and returns the number of copied bytes.
*/
-static inline unsigned int kfifo_get(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len)
+static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
+ unsigned char *to, unsigned int n, spinlock_t *lock)
{
unsigned long flags;
unsigned int ret;
- spin_lock_irqsave(fifo->lock, flags);
+ spin_lock_irqsave(lock, flags);
- ret = __kfifo_get(fifo, buffer, len);
+ ret = kfifo_out(fifo, to, n);
/*
* optimization: if the FIFO is empty, set the indices to 0
* so we don't wrap the next time
*/
- if (fifo->in == fifo->out)
- fifo->in = fifo->out = 0;
+ if (kfifo_is_empty(fifo))
+ kfifo_reset(fifo);
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return ret;
+}
+
+extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
+
+extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo,
+ const void __user *from, unsigned int n);
+
+extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo,
+ void __user *to, unsigned int n);
+
+/**
+ * __kfifo_add_out internal helper function for updating the out offset
+ */
+static inline void __kfifo_add_out(struct kfifo *fifo,
+ unsigned int off)
+{
+ smp_mb();
+ fifo->out += off;
+}
+
+/**
+ * __kfifo_add_in internal helper function for updating the in offset
+ */
+static inline void __kfifo_add_in(struct kfifo *fifo,
+ unsigned int off)
+{
+ smp_wmb();
+ fifo->in += off;
+}
+
+/**
+ * __kfifo_off internal helper function for calculating the index of a
+ * given offeset
+ */
+static inline unsigned int __kfifo_off(struct kfifo *fifo, unsigned int off)
+{
+ return off & (fifo->size - 1);
+}
+
+/**
+ * __kfifo_peek_n internal helper function for determinate the length of
+ * the next record in the fifo
+ */
+static inline unsigned int __kfifo_peek_n(struct kfifo *fifo,
+ unsigned int recsize)
+{
+#define __KFIFO_GET(fifo, off, shift) \
+ ((fifo)->buffer[__kfifo_off((fifo), (fifo)->out+(off))] << (shift))
+
+ unsigned int l;
+
+ l = __KFIFO_GET(fifo, 0, 0);
+
+ if (--recsize)
+ l |= __KFIFO_GET(fifo, 1, 8);
+
+ return l;
+#undef __KFIFO_GET
+}
+
+/**
+ * __kfifo_poke_n internal helper function for storing the length of
+ * the next record into the fifo
+ */
+static inline void __kfifo_poke_n(struct kfifo *fifo,
+ unsigned int recsize, unsigned int n)
+{
+#define __KFIFO_PUT(fifo, off, val, shift) \
+ ( \
+ (fifo)->buffer[__kfifo_off((fifo), (fifo)->in+(off))] = \
+ (unsigned char)((val) >> (shift)) \
+ )
- spin_unlock_irqrestore(fifo->lock, flags);
+ __KFIFO_PUT(fifo, 0, n, 0);
+ if (--recsize)
+ __KFIFO_PUT(fifo, 1, n, 8);
+#undef __KFIFO_PUT
+}
+
+/**
+ * __kfifo_in_... internal functions for put date into the fifo
+ * do not call it directly, use kfifo_in_rec() instead
+ */
+extern unsigned int __kfifo_in_n(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize);
+
+extern unsigned int __kfifo_in_generic(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize);
+
+static inline unsigned int __kfifo_in_rec(struct kfifo *fifo,
+ const void *from, unsigned int n, unsigned int recsize)
+{
+ unsigned int ret;
+
+ ret = __kfifo_in_n(fifo, from, n, recsize);
+
+ if (likely(ret == 0)) {
+ if (recsize)
+ __kfifo_poke_n(fifo, recsize, n);
+ __kfifo_add_in(fifo, n + recsize);
+ }
return ret;
}
/**
- * __kfifo_len - returns the number of bytes available in the FIFO, no locking version
+ * kfifo_in_rec - puts some record data into the FIFO
* @fifo: the fifo to be used.
+ * @from: the data to be added.
+ * @n: the length of the data to be added.
+ * @recsize: size of record field
+ *
+ * This function copies @n bytes from the @from into the FIFO and returns
+ * the number of bytes which cannot be copied.
+ * A returned value greater than the @n value means that the record doesn't
+ * fit into the buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
*/
-static inline unsigned int __kfifo_len(struct kfifo *fifo)
+static inline __must_check unsigned int kfifo_in_rec(struct kfifo *fifo,
+ void *from, unsigned int n, unsigned int recsize)
{
- return fifo->in - fifo->out;
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_in_generic(fifo, from, n, recsize);
+ return __kfifo_in_rec(fifo, from, n, recsize);
}
/**
- * kfifo_len - returns the number of bytes available in the FIFO
+ * __kfifo_out_... internal functions for get date from the fifo
+ * do not call it directly, use kfifo_out_rec() instead
+ */
+extern unsigned int __kfifo_out_n(struct kfifo *fifo,
+ void *to, unsigned int reclen, unsigned int recsize);
+
+extern unsigned int __kfifo_out_generic(struct kfifo *fifo,
+ void *to, unsigned int n,
+ unsigned int recsize, unsigned int *total);
+
+static inline unsigned int __kfifo_out_rec(struct kfifo *fifo,
+ void *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
+{
+ unsigned int l;
+
+ if (!recsize) {
+ l = n;
+ if (total)
+ *total = l;
+ } else {
+ l = __kfifo_peek_n(fifo, recsize);
+ if (total)
+ *total = l;
+ if (n < l)
+ return l;
+ }
+
+ return __kfifo_out_n(fifo, to, l, recsize);
+}
+
+/**
+ * kfifo_out_rec - gets some record data from the FIFO
* @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @recsize: size of record field
+ * @total: pointer where the total number of to copied bytes should stored
+ *
+ * This function copies at most @n bytes from the FIFO to @to and returns the
+ * number of bytes which cannot be copied.
+ * A returned value greater than the @n value means that the record doesn't
+ * fit into the @to buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
*/
-static inline unsigned int kfifo_len(struct kfifo *fifo)
+static inline __must_check unsigned int kfifo_out_rec(struct kfifo *fifo,
+ void *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
+
{
- unsigned long flags;
- unsigned int ret;
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_out_generic(fifo, to, n, recsize, total);
+ return __kfifo_out_rec(fifo, to, n, recsize, total);
+}
+
+/**
+ * __kfifo_from_user_... internal functions for transfer from user space into
+ * the fifo. do not call it directly, use kfifo_from_user_rec() instead
+ */
+extern unsigned int __kfifo_from_user_n(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize);
- spin_lock_irqsave(fifo->lock, flags);
+extern unsigned int __kfifo_from_user_generic(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize);
- ret = __kfifo_len(fifo);
+static inline unsigned int __kfifo_from_user_rec(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize)
+{
+ unsigned int ret;
- spin_unlock_irqrestore(fifo->lock, flags);
+ ret = __kfifo_from_user_n(fifo, from, n, recsize);
+ if (likely(ret == 0)) {
+ if (recsize)
+ __kfifo_poke_n(fifo, recsize, n);
+ __kfifo_add_in(fifo, n + recsize);
+ }
return ret;
}
+/**
+ * kfifo_from_user_rec - puts some data from user space into the FIFO
+ * @fifo: the fifo to be used.
+ * @from: pointer to the data to be added.
+ * @n: the length of the data to be added.
+ * @recsize: size of record field
+ *
+ * This function copies @n bytes from the @from into the
+ * FIFO and returns the number of bytes which cannot be copied.
+ *
+ * If the returned value is equal or less the @n value, the copy_from_user()
+ * functions has failed. Otherwise the record doesn't fit into the buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+static inline __must_check unsigned int kfifo_from_user_rec(struct kfifo *fifo,
+ const void __user *from, unsigned int n, unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_from_user_generic(fifo, from, n, recsize);
+ return __kfifo_from_user_rec(fifo, from, n, recsize);
+}
+
+/**
+ * __kfifo_to_user_... internal functions for transfer fifo data into user space
+ * do not call it directly, use kfifo_to_user_rec() instead
+ */
+extern unsigned int __kfifo_to_user_n(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int reclen,
+ unsigned int recsize);
+
+extern unsigned int __kfifo_to_user_generic(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int recsize,
+ unsigned int *total);
+
+static inline unsigned int __kfifo_to_user_rec(struct kfifo *fifo,
+ void __user *to, unsigned int n,
+ unsigned int recsize, unsigned int *total)
+{
+ unsigned int l;
+
+ if (!recsize) {
+ l = n;
+ if (total)
+ *total = l;
+ } else {
+ l = __kfifo_peek_n(fifo, recsize);
+ if (total)
+ *total = l;
+ if (n < l)
+ return l;
+ }
+
+ return __kfifo_to_user_n(fifo, to, n, l, recsize);
+}
+
+/**
+ * kfifo_to_user_rec - gets data from the FIFO and write it to user space
+ * @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @n: the size of the destination buffer.
+ * @recsize: size of record field
+ * @total: pointer where the total number of to copied bytes should stored
+ *
+ * This function copies at most @n bytes from the FIFO to the @to.
+ * In case of an error, the function returns the number of bytes which cannot
+ * be copied.
+ * If the returned value is equal or less the @n value, the copy_to_user()
+ * functions has failed. Otherwise the record doesn't fit into the @to buffer.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+static inline __must_check unsigned int kfifo_to_user_rec(struct kfifo *fifo,
+ void __user *to, unsigned int n, unsigned int recsize,
+ unsigned int *total)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_to_user_generic(fifo, to, n, recsize, total);
+ return __kfifo_to_user_rec(fifo, to, n, recsize, total);
+}
+
+/**
+ * __kfifo_peek_... internal functions for peek into the next fifo record
+ * do not call it directly, use kfifo_peek_rec() instead
+ */
+extern unsigned int __kfifo_peek_generic(struct kfifo *fifo,
+ unsigned int recsize);
+
+/**
+ * kfifo_peek_rec - gets the size of the next FIFO record data
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ *
+ * This function returns the size of the next FIFO record in number of bytes
+ */
+static inline __must_check unsigned int kfifo_peek_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ return __kfifo_peek_generic(fifo, recsize);
+ if (!recsize)
+ return kfifo_len(fifo);
+ return __kfifo_peek_n(fifo, recsize);
+}
+
+/**
+ * __kfifo_skip_... internal functions for skip the next fifo record
+ * do not call it directly, use kfifo_skip_rec() instead
+ */
+extern void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize);
+
+static inline void __kfifo_skip_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ unsigned int l;
+
+ if (recsize) {
+ l = __kfifo_peek_n(fifo, recsize);
+
+ if (l + recsize <= kfifo_len(fifo)) {
+ __kfifo_add_out(fifo, l + recsize);
+ return;
+ }
+ }
+ kfifo_reset_out(fifo);
+}
+
+/**
+ * kfifo_skip_rec - skip the next fifo out record
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ *
+ * This function skips the next FIFO record
+ */
+static inline void kfifo_skip_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ if (!__builtin_constant_p(recsize))
+ __kfifo_skip_generic(fifo, recsize);
+ else
+ __kfifo_skip_rec(fifo, recsize);
+}
+
+/**
+ * kfifo_avail_rec - returns the number of bytes available in a record FIFO
+ * @fifo: the fifo to be used.
+ * @recsize: size of record field
+ */
+static inline __must_check unsigned int kfifo_avail_rec(struct kfifo *fifo,
+ unsigned int recsize)
+{
+ unsigned int l = kfifo_size(fifo) - kfifo_len(fifo);
+
+ return (l > recsize) ? l - recsize : 0;
+}
+
#endif
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 3c7497d46ee..99d9a6766f7 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset,
size_t size) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
-extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp) __ref;
+extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr)
static inline void kmemleak_ignore(const void *ptr)
{
}
-static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp)
+static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
}
static inline void kmemleak_erase(void **ptr)
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
new file mode 100644
index 00000000000..e32aa268efa
--- /dev/null
+++ b/include/linux/kmsg_dump.h
@@ -0,0 +1,60 @@
+/*
+ * linux/include/kmsg_dump.h
+ *
+ * Copyright (C) 2009 Net Insight AB
+ *
+ * Author: Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+#ifndef _LINUX_KMSG_DUMP_H
+#define _LINUX_KMSG_DUMP_H
+
+#include <linux/list.h>
+
+enum kmsg_dump_reason {
+ KMSG_DUMP_OOPS,
+ KMSG_DUMP_PANIC,
+};
+
+/**
+ * struct kmsg_dumper - kernel crash message dumper structure
+ * @dump: The callback which gets called on crashes. The buffer is passed
+ * as two sections, where s1 (length l1) contains the older
+ * messages and s2 (length l2) contains the newer.
+ * @list: Entry in the dumper list (private)
+ * @registered: Flag that specifies if this is already registered
+ */
+struct kmsg_dumper {
+ void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
+ const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2);
+ struct list_head list;
+ int registered;
+};
+
+#ifdef CONFIG_PRINTK
+void kmsg_dump(enum kmsg_dump_reason reason);
+
+int kmsg_dump_register(struct kmsg_dumper *dumper);
+
+int kmsg_dump_unregister(struct kmsg_dumper *dumper);
+#else
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+}
+
+static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
+{
+ return -EINVAL;
+}
+
+static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index a485c14ecd5..43bdab769fc 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -9,8 +9,12 @@
#include <linux/bitops.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
#include <linux/sched.h>
-#include <linux/vmstat.h>
+
+struct stable_node;
+struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -34,46 +38,110 @@ static inline void ksm_exit(struct mm_struct *mm)
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
* which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
*/
static inline int PageKsm(struct page *page)
{
- return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+
+static inline struct stable_node *page_stable_node(struct page *page)
+{
+ return PageKsm(page) ? page_rmapping(page) : NULL;
+}
+
+static inline void set_page_stable_node(struct page *page,
+ struct stable_node *stable_node)
+{
+ page->mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
}
/*
- * But we have to avoid the checking which page_add_anon_rmap() performs.
+ * When do_swap_page() first faults in from swap what used to be a KSM page,
+ * no problem, it will be assigned to this vma's anon_vma; but thereafter,
+ * it might be faulted into a different anon_vma (or perhaps to a different
+ * offset in the same anon_vma). do_swap_page() cannot do all the locking
+ * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
+ * a copy, and leave remerging the pages to a later pass of ksmd.
+ *
+ * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
+ * but what if the vma was unmerged while the page was swapped out?
*/
-static inline void page_add_ksm_rmap(struct page *page)
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- page->mapping = (void *) PAGE_MAPPING_ANON;
- __inc_zone_page_state(page, NR_ANON_PAGES);
- }
+ struct anon_vma *anon_vma = page_anon_vma(page);
+
+ if (!anon_vma ||
+ (anon_vma == vma->anon_vma &&
+ page->index == linear_page_index(vma, address)))
+ return page;
+
+ return ksm_does_need_to_copy(page, vma, address);
}
+
+int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags);
+int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+
#else /* !CONFIG_KSM */
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+}
+
+static inline int PageKsm(struct page *page)
+{
+ return 0;
+}
+
+#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
{
return 0;
}
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ return page;
+}
+
+static inline int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags)
{
return 0;
}
-static inline void ksm_exit(struct mm_struct *mm)
+static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
+ return 0;
}
-static inline int PageKsm(struct page *page)
+static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
{
return 0;
}
-/* No stub required for page_add_ksm_rmap(page) */
+static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+}
+#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
-#endif
+#endif /* __LINUX_KSM_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 2d241da0723..a24de0b1858 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -496,6 +496,7 @@ struct kvm_ioeventfd {
#define KVM_CAP_VCPU_EVENTS 41
#endif
#define KVM_CAP_S390_PSW 42
+#define KVM_CAP_PPC_SEGSTATE 43
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
index afc9f9fd70f..2618aa9063b 100644
--- a/include/linux/leds-lp3944.h
+++ b/include/linux/leds-lp3944.h
@@ -12,9 +12,6 @@
#ifndef __LINUX_LEDS_LP3944_H
#define __LINUX_LEDS_LP3944_H
-#include <linux/leds.h>
-#include <linux/workqueue.h>
-
#define LP3944_LED0 0
#define LP3944_LED1 1
#define LP3944_LED2 2
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index 96eea90f01a..f158eb1149a 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -32,7 +32,7 @@ struct pca9532_led {
struct i2c_client *client;
char *name;
struct led_classdev ldev;
- struct work_struct work;
+ struct work_struct work;
enum pca9532_type type;
enum pca9532_state state;
};
diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h
new file mode 100644
index 00000000000..5a8eb389aab
--- /dev/null
+++ b/include/linux/leds-regulator.h
@@ -0,0 +1,46 @@
+/*
+ * leds-regulator.h - platform data structure for regulator driven LEDs.
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_LEDS_REGULATOR_H
+#define __LINUX_LEDS_REGULATOR_H
+
+/*
+ * Use "vled" as supply id when declaring the regulator consumer:
+ *
+ * static struct regulator_consumer_supply pcap_regulator_VVIB_consumers [] = {
+ * { .dev_name = "leds-regulator.0", supply = "vled" },
+ * };
+ *
+ * If you have several regulator driven LEDs, you can append a numerical id to
+ * .dev_name as done above, and use the same id when declaring the platform
+ * device:
+ *
+ * static struct led_regulator_platform_data a780_vibrator_data = {
+ * .name = "a780::vibrator",
+ * };
+ *
+ * static struct platform_device a780_vibrator = {
+ * .name = "leds-regulator",
+ * .id = 0,
+ * .dev = {
+ * .platform_data = &a780_vibrator_data,
+ * },
+ * };
+ */
+
+#include <linux/leds.h>
+
+struct led_regulator_platform_data {
+ char *name; /* LED name as expected by LED class */
+ enum led_brightness brightness; /* initial brightness value */
+};
+
+#endif /* __LINUX_LEDS_REGULATOR_H */
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index 3cc2f2c53e4..f1ca0dcc162 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -43,6 +43,21 @@ struct lis3lv02d_platform_data {
#define LIS3_WAKEUP_Z_HI (1 << 5)
unsigned char wakeup_flags;
unsigned char wakeup_thresh;
+#define LIS3_NO_MAP 0
+#define LIS3_DEV_X 1
+#define LIS3_DEV_Y 2
+#define LIS3_DEV_Z 3
+#define LIS3_INV_DEV_X -1
+#define LIS3_INV_DEV_Y -2
+#define LIS3_INV_DEV_Z -3
+ s8 axis_x;
+ s8 axis_y;
+ s8 axis_z;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+ /* Limits for selftest are specified in chip data sheet */
+ s16 st_min_limits[3]; /* min pass limit x, y, z */
+ s16 st_max_limits[3]; /* max pass limit x, y, z */
};
#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 2442e3f3d03..ef82b8fcbdd 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -54,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void);
extern u64 lmb_end_of_DRAM(void);
extern void __init lmb_enforce_memory_limit(u64 memory_limit);
extern int __init lmb_is_reserved(u64 addr);
+extern int lmb_is_region_reserved(u64 base, u64 size);
extern int lmb_find(struct lmb_property *res);
extern void lmb_dump_all(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bf9213b2db8..1f9b119f4ac 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -54,6 +54,11 @@ extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_del_lru(struct page *page);
extern void mem_cgroup_move_lists(struct page *page,
enum lru_list from, enum lru_list to);
+
+/* For coalescing uncharge for reducing memcg' overhead*/
+extern void mem_cgroup_uncharge_start(void);
+extern void mem_cgroup_uncharge_end(void);
+
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern int mem_cgroup_shmem_charge_fallback(struct page *page,
@@ -68,6 +73,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
+extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
static inline
@@ -80,6 +86,8 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
return cgroup == mem;
}
+extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
+
extern int
mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
@@ -117,7 +125,7 @@ static inline bool mem_cgroup_disabled(void)
}
extern bool mem_cgroup_oom_called(struct task_struct *task);
-void mem_cgroup_update_mapped_file_stat(struct page *page, int val);
+void mem_cgroup_update_file_mapped(struct page *page, int val);
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask, int nid,
int zid);
@@ -151,6 +159,14 @@ static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
{
}
+static inline void mem_cgroup_uncharge_start(void)
+{
+}
+
+static inline void mem_cgroup_uncharge_end(void)
+{
+}
+
static inline void mem_cgroup_uncharge_page(struct page *page)
{
}
@@ -189,6 +205,11 @@ mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
{
}
+static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
+
static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
{
return 1;
@@ -200,6 +221,11 @@ static inline int task_in_mem_cgroup(struct task_struct *task,
return 1;
}
+static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
+{
+ return NULL;
+}
+
static inline int
mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
{
@@ -274,7 +300,7 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline void mem_cgroup_update_mapped_file_stat(struct page *page,
+static inline void mem_cgroup_update_file_mapped(struct page *page,
int val)
{
}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 37fa19b34ef..1adfe779eb9 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -50,6 +50,19 @@ struct memory_notify {
int status_change_nid;
};
+/*
+ * During pageblock isolation, count the number of pages within the
+ * range [start_pfn, start_pfn + nr_pages) which are owned by code
+ * in the notifier chain.
+ */
+#define MEM_ISOLATE_COUNT (1<<0)
+
+struct memory_isolate_notify {
+ unsigned long start_pfn; /* Start of range to check */
+ unsigned int nr_pages; /* # pages in range to check */
+ unsigned int pages_found; /* # pages owned found by callbacks */
+};
+
struct notifier_block;
struct mem_section;
@@ -76,14 +89,28 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
+static inline int register_memory_isolate_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
+{
+}
+static inline int memory_isolate_notify(unsigned long val, void *v)
+{
+ return 0;
+}
#else
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
+extern int register_memory_isolate_notifier(struct notifier_block *nb);
+extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
extern int register_new_memory(int, struct mem_section *);
extern int unregister_memory_section(struct mem_section *);
extern int memory_dev_init(void);
extern int remove_memory_block(unsigned long, struct mem_section *, int);
extern int memory_notify(unsigned long val, void *v);
+extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block(struct mem_section *);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
enum mem_add_context { BOOT, HOTPLUG };
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index fed969281a4..35b07b773e6 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -69,7 +69,6 @@ extern void online_page(struct page *page);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long);
extern void __offline_isolated_pages(unsigned long, unsigned long);
-extern int offline_pages(unsigned long, unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 085c903fe0f..1cc966cd3e5 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
+extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
return node_zonelist(0, gfp_flags);
}
+static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }
+
static inline int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes,
const nodemask_t *to_nodes, int flags)
diff --git a/include/linux/mfd/88pm8607.h b/include/linux/mfd/88pm8607.h
new file mode 100644
index 00000000000..f41b428d2ce
--- /dev/null
+++ b/include/linux/mfd/88pm8607.h
@@ -0,0 +1,217 @@
+/*
+ * Marvell 88PM8607 Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM8607_H
+#define __LINUX_MFD_88PM8607_H
+
+enum {
+ PM8607_ID_BUCK1 = 0,
+ PM8607_ID_BUCK2,
+ PM8607_ID_BUCK3,
+
+ PM8607_ID_LDO1,
+ PM8607_ID_LDO2,
+ PM8607_ID_LDO3,
+ PM8607_ID_LDO4,
+ PM8607_ID_LDO5,
+ PM8607_ID_LDO6,
+ PM8607_ID_LDO7,
+ PM8607_ID_LDO8,
+ PM8607_ID_LDO9,
+ PM8607_ID_LDO10,
+ PM8607_ID_LDO12,
+ PM8607_ID_LDO14,
+
+ PM8607_ID_RG_MAX,
+};
+
+#define CHIP_ID (0x40)
+#define CHIP_ID_MASK (0xF8)
+
+/* Interrupt Registers */
+#define PM8607_STATUS_1 (0x01)
+#define PM8607_STATUS_2 (0x02)
+#define PM8607_INT_STATUS1 (0x03)
+#define PM8607_INT_STATUS2 (0x04)
+#define PM8607_INT_STATUS3 (0x05)
+#define PM8607_INT_MASK_1 (0x06)
+#define PM8607_INT_MASK_2 (0x07)
+#define PM8607_INT_MASK_3 (0x08)
+
+/* Regulator Control Registers */
+#define PM8607_LDO1 (0x10)
+#define PM8607_LDO2 (0x11)
+#define PM8607_LDO3 (0x12)
+#define PM8607_LDO4 (0x13)
+#define PM8607_LDO5 (0x14)
+#define PM8607_LDO6 (0x15)
+#define PM8607_LDO7 (0x16)
+#define PM8607_LDO8 (0x17)
+#define PM8607_LDO9 (0x18)
+#define PM8607_LDO10 (0x19)
+#define PM8607_LDO12 (0x1A)
+#define PM8607_LDO14 (0x1B)
+#define PM8607_SLEEP_MODE1 (0x1C)
+#define PM8607_SLEEP_MODE2 (0x1D)
+#define PM8607_SLEEP_MODE3 (0x1E)
+#define PM8607_SLEEP_MODE4 (0x1F)
+#define PM8607_GO (0x20)
+#define PM8607_SLEEP_BUCK1 (0x21)
+#define PM8607_SLEEP_BUCK2 (0x22)
+#define PM8607_SLEEP_BUCK3 (0x23)
+#define PM8607_BUCK1 (0x24)
+#define PM8607_BUCK2 (0x25)
+#define PM8607_BUCK3 (0x26)
+#define PM8607_BUCK_CONTROLS (0x27)
+#define PM8607_SUPPLIES_EN11 (0x2B)
+#define PM8607_SUPPLIES_EN12 (0x2C)
+#define PM8607_GROUP1 (0x2D)
+#define PM8607_GROUP2 (0x2E)
+#define PM8607_GROUP3 (0x2F)
+#define PM8607_GROUP4 (0x30)
+#define PM8607_GROUP5 (0x31)
+#define PM8607_GROUP6 (0x32)
+#define PM8607_SUPPLIES_EN21 (0x33)
+#define PM8607_SUPPLIES_EN22 (0x34)
+
+/* RTC Control Registers */
+#define PM8607_RTC1 (0xA0)
+#define PM8607_RTC_COUNTER1 (0xA1)
+#define PM8607_RTC_COUNTER2 (0xA2)
+#define PM8607_RTC_COUNTER3 (0xA3)
+#define PM8607_RTC_COUNTER4 (0xA4)
+#define PM8607_RTC_EXPIRE1 (0xA5)
+#define PM8607_RTC_EXPIRE2 (0xA6)
+#define PM8607_RTC_EXPIRE3 (0xA7)
+#define PM8607_RTC_EXPIRE4 (0xA8)
+#define PM8607_RTC_TRIM1 (0xA9)
+#define PM8607_RTC_TRIM2 (0xAA)
+#define PM8607_RTC_TRIM3 (0xAB)
+#define PM8607_RTC_TRIM4 (0xAC)
+#define PM8607_RTC_MISC1 (0xAD)
+#define PM8607_RTC_MISC2 (0xAE)
+#define PM8607_RTC_MISC3 (0xAF)
+
+/* Misc Registers */
+#define PM8607_CHIP_ID (0x00)
+#define PM8607_LDO1 (0x10)
+#define PM8607_DVC3 (0x26)
+#define PM8607_MISC1 (0x40)
+
+/* bit definitions for PM8607 events */
+#define PM8607_EVENT_ONKEY (1 << 0)
+#define PM8607_EVENT_EXTON (1 << 1)
+#define PM8607_EVENT_CHG (1 << 2)
+#define PM8607_EVENT_BAT (1 << 3)
+#define PM8607_EVENT_RTC (1 << 4)
+#define PM8607_EVENT_CC (1 << 5)
+#define PM8607_EVENT_VBAT (1 << 8)
+#define PM8607_EVENT_VCHG (1 << 9)
+#define PM8607_EVENT_VSYS (1 << 10)
+#define PM8607_EVENT_TINT (1 << 11)
+#define PM8607_EVENT_GPADC0 (1 << 12)
+#define PM8607_EVENT_GPADC1 (1 << 13)
+#define PM8607_EVENT_GPADC2 (1 << 14)
+#define PM8607_EVENT_GPADC3 (1 << 15)
+#define PM8607_EVENT_AUDIO_SHORT (1 << 16)
+#define PM8607_EVENT_PEN (1 << 17)
+#define PM8607_EVENT_HEADSET (1 << 18)
+#define PM8607_EVENT_HOOK (1 << 19)
+#define PM8607_EVENT_MICIN (1 << 20)
+#define PM8607_EVENT_CHG_TIMEOUT (1 << 21)
+#define PM8607_EVENT_CHG_DONE (1 << 22)
+#define PM8607_EVENT_CHG_FAULT (1 << 23)
+
+/* bit definitions of Status Query Interface */
+#define PM8607_STATUS_CC (1 << 3)
+#define PM8607_STATUS_PEN (1 << 4)
+#define PM8607_STATUS_HEADSET (1 << 5)
+#define PM8607_STATUS_HOOK (1 << 6)
+#define PM8607_STATUS_MICIN (1 << 7)
+#define PM8607_STATUS_ONKEY (1 << 8)
+#define PM8607_STATUS_EXTON (1 << 9)
+#define PM8607_STATUS_CHG (1 << 10)
+#define PM8607_STATUS_BAT (1 << 11)
+#define PM8607_STATUS_VBUS (1 << 12)
+#define PM8607_STATUS_OV (1 << 13)
+
+/* bit definitions of BUCK3 */
+#define PM8607_BUCK3_DOUBLE (1 << 6)
+
+/* bit definitions of Misc1 */
+#define PM8607_MISC1_PI2C (1 << 0)
+
+/* Interrupt Number in 88PM8607 */
+enum {
+ PM8607_IRQ_ONKEY = 0,
+ PM8607_IRQ_EXTON,
+ PM8607_IRQ_CHG,
+ PM8607_IRQ_BAT,
+ PM8607_IRQ_RTC,
+ PM8607_IRQ_VBAT = 8,
+ PM8607_IRQ_VCHG,
+ PM8607_IRQ_VSYS,
+ PM8607_IRQ_TINT,
+ PM8607_IRQ_GPADC0,
+ PM8607_IRQ_GPADC1,
+ PM8607_IRQ_GPADC2,
+ PM8607_IRQ_GPADC3,
+ PM8607_IRQ_AUDIO_SHORT = 16,
+ PM8607_IRQ_PEN,
+ PM8607_IRQ_HEADSET,
+ PM8607_IRQ_HOOK,
+ PM8607_IRQ_MICIN,
+ PM8607_IRQ_CHG_FAIL,
+ PM8607_IRQ_CHG_DONE,
+ PM8607_IRQ_CHG_FAULT,
+};
+
+enum {
+ PM8607_CHIP_A0 = 0x40,
+ PM8607_CHIP_A1 = 0x41,
+ PM8607_CHIP_B0 = 0x48,
+};
+
+
+struct pm8607_chip {
+ struct device *dev;
+ struct mutex io_lock;
+ struct i2c_client *client;
+
+ int (*read)(struct pm8607_chip *chip, int reg, int bytes, void *dest);
+ int (*write)(struct pm8607_chip *chip, int reg, int bytes, void *src);
+
+ int buck3_double; /* DVC ramp slope double */
+ unsigned char chip_id;
+
+};
+
+#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
+
+enum {
+ GI2C_PORT = 0,
+ PI2C_PORT,
+};
+
+struct pm8607_platform_data {
+ int i2c_port; /* Controlled by GI2C or PI2C */
+ struct regulator_init_data *regulator[PM8607_MAX_REGULATOR];
+};
+
+extern int pm8607_reg_read(struct pm8607_chip *, int);
+extern int pm8607_reg_write(struct pm8607_chip *, int, unsigned char);
+extern int pm8607_bulk_read(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_bulk_write(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_set_bits(struct pm8607_chip *, int, unsigned char,
+ unsigned char);
+#endif /* __LINUX_MFD_88PM8607_H */
diff --git a/include/linux/mfd/ab4500.h b/include/linux/mfd/ab4500.h
new file mode 100644
index 00000000000..a42a7033ae5
--- /dev/null
+++ b/include/linux/mfd/ab4500.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson
+ *
+ * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * AB4500 device core funtions, for client access
+ */
+#ifndef MFD_AB4500_H
+#define MFD_AB4500_H
+
+#include <linux/device.h>
+
+/*
+ * AB4500 bank addresses
+ */
+#define AB4500_SYS_CTRL1_BLOCK 0x1
+#define AB4500_SYS_CTRL2_BLOCK 0x2
+#define AB4500_REGU_CTRL1 0x3
+#define AB4500_REGU_CTRL2 0x4
+#define AB4500_USB 0x5
+#define AB4500_TVOUT 0x6
+#define AB4500_DBI 0x7
+#define AB4500_ECI_AV_ACC 0x8
+#define AB4500_RESERVED 0x9
+#define AB4500_GPADC 0xA
+#define AB4500_CHARGER 0xB
+#define AB4500_GAS_GAUGE 0xC
+#define AB4500_AUDIO 0xD
+#define AB4500_INTERRUPT 0xE
+#define AB4500_RTC 0xF
+#define AB4500_MISC 0x10
+#define AB4500_DEBUG 0x12
+#define AB4500_PROD_TEST 0x13
+#define AB4500_OTP_EMUL 0x15
+
+/*
+ * System control 1 register offsets.
+ * Bank = 0x01
+ */
+#define AB4500_TURNON_STAT_REG 0x0100
+#define AB4500_RESET_STAT_REG 0x0101
+#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102
+
+#define AB4500_FSM_STAT1_REG 0x0140
+#define AB4500_FSM_STAT2_REG 0x0141
+#define AB4500_SYSCLK_REQ_STAT_REG 0x0142
+#define AB4500_USB_STAT1_REG 0x0143
+#define AB4500_USB_STAT2_REG 0x0144
+#define AB4500_STATUS_SPARE1_REG 0x0145
+#define AB4500_STATUS_SPARE2_REG 0x0146
+
+#define AB4500_CTRL1_REG 0x0180
+#define AB4500_CTRL2_REG 0x0181
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB4500_CTRL3_REG 0x0200
+#define AB4500_MAIN_WDOG_CTRL_REG 0x0201
+#define AB4500_MAIN_WDOG_TIMER_REG 0x0202
+#define AB4500_LOW_BAT_REG 0x0203
+#define AB4500_BATT_OK_REG 0x0204
+#define AB4500_SYSCLK_TIMER_REG 0x0205
+#define AB4500_SMPSCLK_CTRL_REG 0x0206
+#define AB4500_SMPSCLK_SEL1_REG 0x0207
+#define AB4500_SMPSCLK_SEL2_REG 0x0208
+#define AB4500_SMPSCLK_SEL3_REG 0x0209
+#define AB4500_SYSULPCLK_CONF_REG 0x020A
+#define AB4500_SYSULPCLK_CTRL1_REG 0x020B
+#define AB4500_SYSCLK_CTRL_REG 0x020C
+#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D
+#define AB4500_SYSCLK_REQ_VALID_REG 0x020E
+#define AB4500_SYSCTRL_SPARE_REG 0x020F
+#define AB4500_PAD_CONF_REG 0x0210
+
+/*
+ * Regu control1 register offsets
+ * Bank = 0x03
+ */
+#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300
+#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301
+#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302
+#define AB4500_REGU_REQ_CTRL1_REG 0x0303
+#define AB4500_REGU_REQ_CTRL2_REG 0x0304
+#define AB4500_REGU_REQ_CTRL3_REG 0x0305
+#define AB4500_REGU_REQ_CTRL4_REG 0x0306
+#define AB4500_REGU_MISC1_REG 0x0380
+#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381
+#define AB4500_REGU_VUSB_CTRL_REG 0x0382
+#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383
+#define AB4500_REGU_CTRL1_SPARE_REG 0x0384
+
+/*
+ * Regu control2 Vmod register offsets
+ */
+#define AB4500_REGU_VMOD_REGU_REG 0x0440
+#define AB4500_REGU_VMOD_SEL1_REG 0x0441
+#define AB4500_REGU_VMOD_SEL2_REG 0x0442
+#define AB4500_REGU_CTRL_DISCH_REG 0x0443
+#define AB4500_REGU_CTRL_DISCH2_REG 0x0444
+
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB4500_USB_LINE_STAT_REG 0x0580
+#define AB4500_USB_LINE_CTRL1_REG 0x0581
+#define AB4500_USB_LINE_CTRL2_REG 0x0582
+#define AB4500_USB_LINE_CTRL3_REG 0x0583
+#define AB4500_USB_LINE_CTRL4_REG 0x0584
+#define AB4500_USB_LINE_CTRL5_REG 0x0585
+#define AB4500_USB_OTG_CTRL_REG 0x0587
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_CTRL_SPARE_REG 0x0589
+#define AB4500_USB_PHY_CTRL_REG 0x058A
+
+/*
+ * TVOUT / CTRL register offsets
+ * Bank : 0x06
+ */
+#define AB4500_TVOUT_CTRL_REG 0x0680
+
+/*
+ * DBI register offsets
+ * Bank : 0x07
+ */
+#define AB4500_DBI_REG1_REG 0x0700
+#define AB4500_DBI_REG2_REG 0x0701
+
+/*
+ * ECI regsiter offsets
+ * Bank : 0x08
+ */
+#define AB4500_ECI_CTRL_REG 0x0800
+#define AB4500_ECI_HOOKLEVEL_REG 0x0801
+#define AB4500_ECI_DATAOUT_REG 0x0802
+#define AB4500_ECI_DATAIN_REG 0x0803
+
+/*
+ * AV Connector register offsets
+ * Bank : 0x08
+ */
+#define AB4500_AV_CONN_REG 0x0840
+
+/*
+ * Accessory detection register offsets
+ * Bank : 0x08
+ */
+#define AB4500_ACC_DET_DB1_REG 0x0880
+#define AB4500_ACC_DET_DB2_REG 0x0881
+
+/*
+ * GPADC register offsets
+ * Bank : 0x0A
+ */
+#define AB4500_GPADC_CTRL1_REG 0x0A00
+#define AB4500_GPADC_CTRL2_REG 0x0A01
+#define AB4500_GPADC_CTRL3_REG 0x0A02
+#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03
+#define AB4500_GPADC_STAT_REG 0x0A04
+#define AB4500_GPADC_MANDATAL_REG 0x0A05
+#define AB4500_GPADC_MANDATAH_REG 0x0A06
+#define AB4500_GPADC_AUTODATAL_REG 0x0A07
+#define AB4500_GPADC_AUTODATAH_REG 0x0A08
+#define AB4500_GPADC_MUX_CTRL_REG 0x0A09
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_STATUS1_REG 0x0B00
+#define AB4500_CH_STATUS2_REG 0x0B01
+#define AB4500_CH_USBCH_STAT1_REG 0x0B02
+#define AB4500_CH_USBCH_STAT2_REG 0x0B03
+#define AB4500_CH_FSM_STAT_REG 0x0B04
+#define AB4500_CH_STAT_REG 0x0B05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_VOLT_LVL_REG 0x0B40
+
+/*
+ * Charger / main control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_MCH_CTRL1 0x0B80
+#define AB4500_MCH_CTRL2 0x0B81
+#define AB4500_MCH_IPT_CURLVL_REG 0x0B82
+#define AB4500_CH_WD_REG 0x0B83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB4500_USBCH_CTRL1_REG 0x0BC0
+#define AB4500_USBCH_CTRL2_REG 0x0BC1
+#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2
+
+/*
+ * RTC bank register offsets
+ * Bank : 0xF
+ */
+#define AB4500_RTC_SOFF_STAT_REG 0x0F00
+#define AB4500_RTC_CC_CONF_REG 0x0F01
+#define AB4500_RTC_READ_REQ_REG 0x0F02
+#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03
+#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04
+#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05
+#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06
+#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07
+#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08
+#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09
+#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A
+#define AB4500_RTC_STAT_REG 0x0F0B
+#define AB4500_RTC_BKUP_CHG_REG 0x0F0C
+#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D
+#define AB4500_RTC_CALIB_REG 0x0F0E
+#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F
+
+/*
+ * PWM Out generators
+ * Bank: 0x10
+ */
+#define AB4500_PWM_OUT_CTRL1_REG 0x1060
+#define AB4500_PWM_OUT_CTRL2_REG 0x1061
+#define AB4500_PWM_OUT_CTRL3_REG 0x1062
+#define AB4500_PWM_OUT_CTRL4_REG 0x1063
+#define AB4500_PWM_OUT_CTRL5_REG 0x1064
+#define AB4500_PWM_OUT_CTRL6_REG 0x1065
+#define AB4500_PWM_OUT_CTRL7_REG 0x1066
+
+#define AB4500_I2C_PAD_CTRL_REG 0x1067
+#define AB4500_REV_REG 0x1080
+
+/**
+ * struct ab4500
+ * @spi: spi device structure
+ * @tx_buf: transmit buffer
+ * @rx_buf: receive buffer
+ * @lock: sync primitive
+ */
+struct ab4500 {
+ struct spi_device *spi;
+ unsigned long tx_buf[4];
+ unsigned long rx_buf[4];
+ struct mutex lock;
+};
+
+int ab4500_write(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr, unsigned char data);
+int ab4500_read(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr);
+
+#endif /* MFD_AB4500_H */
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
new file mode 100644
index 00000000000..ac37558a467
--- /dev/null
+++ b/include/linux/mfd/adp5520.h
@@ -0,0 +1,299 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef __LINUX_MFD_ADP5520_H
+#define __LINUX_MFD_ADP5520_H
+
+#define ID_ADP5520 5520
+#define ID_ADP5501 5501
+
+/*
+ * ADP5520/ADP5501 Register Map
+ */
+
+#define ADP5520_MODE_STATUS 0x00
+#define ADP5520_INTERRUPT_ENABLE 0x01
+#define ADP5520_BL_CONTROL 0x02
+#define ADP5520_BL_TIME 0x03
+#define ADP5520_BL_FADE 0x04
+#define ADP5520_DAYLIGHT_MAX 0x05
+#define ADP5520_DAYLIGHT_DIM 0x06
+#define ADP5520_OFFICE_MAX 0x07
+#define ADP5520_OFFICE_DIM 0x08
+#define ADP5520_DARK_MAX 0x09
+#define ADP5520_DARK_DIM 0x0A
+#define ADP5520_BL_VALUE 0x0B
+#define ADP5520_ALS_CMPR_CFG 0x0C
+#define ADP5520_L2_TRIP 0x0D
+#define ADP5520_L2_HYS 0x0E
+#define ADP5520_L3_TRIP 0x0F
+#define ADP5520_L3_HYS 0x10
+#define ADP5520_LED_CONTROL 0x11
+#define ADP5520_LED_TIME 0x12
+#define ADP5520_LED_FADE 0x13
+#define ADP5520_LED1_CURRENT 0x14
+#define ADP5520_LED2_CURRENT 0x15
+#define ADP5520_LED3_CURRENT 0x16
+
+/*
+ * ADP5520 Register Map
+ */
+
+#define ADP5520_GPIO_CFG_1 0x17
+#define ADP5520_GPIO_CFG_2 0x18
+#define ADP5520_GPIO_IN 0x19
+#define ADP5520_GPIO_OUT 0x1A
+#define ADP5520_GPIO_INT_EN 0x1B
+#define ADP5520_GPIO_INT_STAT 0x1C
+#define ADP5520_GPIO_INT_LVL 0x1D
+#define ADP5520_GPIO_DEBOUNCE 0x1E
+#define ADP5520_GPIO_PULLUP 0x1F
+#define ADP5520_KP_INT_STAT_1 0x20
+#define ADP5520_KP_INT_STAT_2 0x21
+#define ADP5520_KR_INT_STAT_1 0x22
+#define ADP5520_KR_INT_STAT_2 0x23
+#define ADP5520_KEY_STAT_1 0x24
+#define ADP5520_KEY_STAT_2 0x25
+
+/*
+ * MODE_STATUS bits
+ */
+
+#define ADP5520_nSTNBY (1 << 7)
+#define ADP5520_BL_EN (1 << 6)
+#define ADP5520_DIM_EN (1 << 5)
+#define ADP5520_OVP_INT (1 << 4)
+#define ADP5520_CMPR_INT (1 << 3)
+#define ADP5520_GPI_INT (1 << 2)
+#define ADP5520_KR_INT (1 << 1)
+#define ADP5520_KP_INT (1 << 0)
+
+/*
+ * INTERRUPT_ENABLE bits
+ */
+
+#define ADP5520_AUTO_LD_EN (1 << 4)
+#define ADP5520_CMPR_IEN (1 << 3)
+#define ADP5520_OVP_IEN (1 << 2)
+#define ADP5520_KR_IEN (1 << 1)
+#define ADP5520_KP_IEN (1 << 0)
+
+/*
+ * BL_CONTROL bits
+ */
+
+#define ADP5520_BL_LVL ((x) << 5)
+#define ADP5520_BL_LAW ((x) << 4)
+#define ADP5520_BL_AUTO_ADJ (1 << 3)
+#define ADP5520_OVP_EN (1 << 2)
+#define ADP5520_FOVR (1 << 1)
+#define ADP5520_KP_BL_EN (1 << 0)
+
+/*
+ * ALS_CMPR_CFG bits
+ */
+
+#define ADP5520_L3_OUT (1 << 3)
+#define ADP5520_L2_OUT (1 << 2)
+#define ADP5520_L3_EN (1 << 1)
+
+#define ADP5020_MAX_BRIGHTNESS 0x7F
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4))
+#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en)
+
+/*
+ * LEDs subdevice bits and masks
+ */
+
+#define ADP5520_01_MAXLEDS 3
+
+#define ADP5520_FLAG_LED_MASK 0x3
+#define ADP5520_FLAG_OFFT_SHIFT 8
+#define ADP5520_FLAG_OFFT_MASK 0x3
+
+#define ADP5520_R3_MODE (1 << 5)
+#define ADP5520_C3_MODE (1 << 4)
+#define ADP5520_LED_LAW (1 << 3)
+#define ADP5520_LED3_EN (1 << 2)
+#define ADP5520_LED2_EN (1 << 1)
+#define ADP5520_LED1_EN (1 << 0)
+
+/*
+ * GPIO subdevice bits and masks
+ */
+
+#define ADP5520_MAXGPIOS 8
+
+#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_GPIO_C2 (1 << 6)
+#define ADP5520_GPIO_C1 (1 << 5)
+#define ADP5520_GPIO_C0 (1 << 4)
+#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_GPIO_R2 (1 << 2)
+#define ADP5520_GPIO_R1 (1 << 1)
+#define ADP5520_GPIO_R0 (1 << 0)
+
+struct adp5520_gpio_platform_data {
+ unsigned gpio_start;
+ u8 gpio_en_mask;
+ u8 gpio_pullup_mask;
+};
+
+/*
+ * Keypad subdevice bits and masks
+ */
+
+#define ADP5520_MAXKEYS 16
+
+#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_COL_C2 (1 << 6)
+#define ADP5520_COL_C1 (1 << 5)
+#define ADP5520_COL_C0 (1 << 4)
+#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_ROW_R2 (1 << 2)
+#define ADP5520_ROW_R1 (1 << 1)
+#define ADP5520_ROW_R0 (1 << 0)
+
+#define ADP5520_KEY(row, col) (col + row * 4)
+#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS
+
+struct adp5520_keys_platform_data {
+ int rows_en_mask; /* Number of rows */
+ int cols_en_mask; /* Number of columns */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ unsigned repeat:1; /* Enable key repeat */
+};
+
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */
+#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */
+#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */
+
+#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT)
+
+#define ADP5520_LED_ONT_200ms 0
+#define ADP5520_LED_ONT_600ms 1
+#define ADP5520_LED_ONT_800ms 2
+#define ADP5520_LED_ONT_1200ms 3
+
+struct adp5520_leds_platform_data {
+ int num_leds;
+ struct led_info *leds;
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 led_on_time;
+};
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP5520_FADE_T_600ms 2
+#define ADP5520_FADE_T_900ms 3
+#define ADP5520_FADE_T_1200ms 4
+#define ADP5520_FADE_T_1500ms 5
+#define ADP5520_FADE_T_1800ms 6
+#define ADP5520_FADE_T_2100ms 7
+#define ADP5520_FADE_T_2400ms 8
+#define ADP5520_FADE_T_2700ms 9
+#define ADP5520_FADE_T_3000ms 10
+#define ADP5520_FADE_T_3500ms 11
+#define ADP5520_FADE_T_4000ms 12
+#define ADP5520_FADE_T_4500ms 13
+#define ADP5520_FADE_T_5000ms 14
+#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP5520_BL_LAW_LINEAR 0
+#define ADP5520_BL_LAW_SQUARE 1
+#define ADP5520_BL_LAW_CUBIC1 2
+#define ADP5520_BL_LAW_CUBIC2 3
+
+#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP5520_BL_AMBL_FILT_160ms 1
+#define ADP5520_BL_AMBL_FILT_320ms 2
+#define ADP5520_BL_AMBL_FILT_640ms 3
+#define ADP5520_BL_AMBL_FILT_1280ms 4
+#define ADP5520_BL_AMBL_FILT_2560ms 5
+#define ADP5520_BL_AMBL_FILT_5120ms 6
+#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+ /*
+ * Blacklight current 0..30mA
+ */
+#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30)
+
+ /*
+ * L2 comparator current 0..1000uA
+ */
+#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000)
+
+ /*
+ * L3 comparator current 0..127uA
+ */
+#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127)
+
+struct adp5520_backlight_platform_data {
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 fade_led_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+};
+
+/*
+ * MFD chip platform data
+ */
+
+struct adp5520_platform_data {
+ struct adp5520_keys_platform_data *keys;
+ struct adp5520_gpio_platform_data *gpio;
+ struct adp5520_leds_platform_data *leds;
+ struct adp5520_backlight_platform_data *backlight;
+};
+
+/*
+ * MFD chip functions
+ */
+
+extern int adp5520_read(struct device *dev, int reg, uint8_t *val);
+extern int adp5520_write(struct device *dev, int reg, u8 val);
+extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+
+extern int adp5520_register_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+extern int adp5520_unregister_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+#endif /* __LINUX_MFD_ADP5520_H */
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 3402042ddc3..40c372165f3 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -231,9 +231,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_LED_4MA 1
#define PCAP_LED_5MA 2
#define PCAP_LED_9MA 3
-#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff
-#define PCAP_LED_GPIO_EN 0x01000000
-#define PCAP_LED_GPIO_INVERT 0x02000000
#define PCAP_LED_T_MASK 0xf
#define PCAP_LED_C_MASK 0x3
#define PCAP_BL_MASK 0x1f
diff --git a/include/linux/mfd/mc13783-private.h b/include/linux/mfd/mc13783-private.h
index 47e698cb0f1..95cf9360553 100644
--- a/include/linux/mfd/mc13783-private.h
+++ b/include/linux/mfd/mc13783-private.h
@@ -24,52 +24,23 @@
#include <linux/platform_device.h>
#include <linux/mfd/mc13783.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-
-struct mc13783_irq {
- void (*handler)(int, void *);
- void *data;
-};
-
-#define MC13783_NUM_IRQ 2
-#define MC13783_IRQ_TS 0
-#define MC13783_IRQ_REGULATOR 1
-
-#define MC13783_ADC_MODE_TS 1
-#define MC13783_ADC_MODE_SINGLE_CHAN 2
-#define MC13783_ADC_MODE_MULT_CHAN 3
+#include <linux/interrupt.h>
struct mc13783 {
- int revision;
- struct device *dev;
- struct spi_device *spi_device;
-
- int (*read_dev)(void *data, char reg, int count, u32 *dst);
- int (*write_dev)(void *data, char reg, int count, const u32 *src);
-
- struct mutex io_lock;
- void *io_data;
+ struct spi_device *spidev;
+ struct mutex lock;
int irq;
- unsigned int flags;
+ int flags;
- struct mc13783_irq irq_handler[MC13783_NUM_IRQ];
- struct work_struct work;
- struct completion adc_done;
- unsigned int ts_active;
- struct mutex adc_conv_lock;
+ irq_handler_t irqhandler[MC13783_NUM_IRQ];
+ void *irqdata[MC13783_NUM_IRQ];
+ /* XXX these should go as platformdata to the regulator subdevice */
struct mc13783_regulator_init_data *regulators;
int num_regulators;
};
-int mc13783_reg_read(struct mc13783 *, int reg_num, u32 *);
-int mc13783_reg_write(struct mc13783 *, int, u32);
-int mc13783_set_bits(struct mc13783 *, int, u32, u32);
-int mc13783_free_irq(struct mc13783 *mc13783, int irq);
-int mc13783_register_irq(struct mc13783 *mc13783, int irq,
- void (*handler) (int, void *), void *data);
-
#define MC13783_REG_INTERRUPT_STATUS_0 0
#define MC13783_REG_INTERRUPT_MASK_0 1
#define MC13783_REG_INTERRUPT_SENSE_0 2
@@ -136,55 +107,6 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_REG_TEST_3 63
#define MC13783_REG_NB 64
-
-/*
- * Interrupt Status
- */
-#define MC13783_INT_STAT_ADCDONEI (1 << 0)
-#define MC13783_INT_STAT_ADCBISDONEI (1 << 1)
-#define MC13783_INT_STAT_TSI (1 << 2)
-#define MC13783_INT_STAT_WHIGHI (1 << 3)
-#define MC13783_INT_STAT_WLOWI (1 << 4)
-#define MC13783_INT_STAT_CHGDETI (1 << 6)
-#define MC13783_INT_STAT_CHGOVI (1 << 7)
-#define MC13783_INT_STAT_CHGREVI (1 << 8)
-#define MC13783_INT_STAT_CHGSHORTI (1 << 9)
-#define MC13783_INT_STAT_CCCVI (1 << 10)
-#define MC13783_INT_STAT_CHGCURRI (1 << 11)
-#define MC13783_INT_STAT_BPONI (1 << 12)
-#define MC13783_INT_STAT_LOBATLI (1 << 13)
-#define MC13783_INT_STAT_LOBATHI (1 << 14)
-#define MC13783_INT_STAT_UDPI (1 << 15)
-#define MC13783_INT_STAT_USBI (1 << 16)
-#define MC13783_INT_STAT_IDI (1 << 19)
-#define MC13783_INT_STAT_Unused (1 << 20)
-#define MC13783_INT_STAT_SE1I (1 << 21)
-#define MC13783_INT_STAT_CKDETI (1 << 22)
-#define MC13783_INT_STAT_UDMI (1 << 23)
-
-/*
- * Interrupt Mask
- */
-#define MC13783_INT_MASK_ADCDONEM (1 << 0)
-#define MC13783_INT_MASK_ADCBISDONEM (1 << 1)
-#define MC13783_INT_MASK_TSM (1 << 2)
-#define MC13783_INT_MASK_WHIGHM (1 << 3)
-#define MC13783_INT_MASK_WLOWM (1 << 4)
-#define MC13783_INT_MASK_CHGDETM (1 << 6)
-#define MC13783_INT_MASK_CHGOVM (1 << 7)
-#define MC13783_INT_MASK_CHGREVM (1 << 8)
-#define MC13783_INT_MASK_CHGSHORTM (1 << 9)
-#define MC13783_INT_MASK_CCCVM (1 << 10)
-#define MC13783_INT_MASK_CHGCURRM (1 << 11)
-#define MC13783_INT_MASK_BPONM (1 << 12)
-#define MC13783_INT_MASK_LOBATLM (1 << 13)
-#define MC13783_INT_MASK_LOBATHM (1 << 14)
-#define MC13783_INT_MASK_UDPM (1 << 15)
-#define MC13783_INT_MASK_USBM (1 << 16)
-#define MC13783_INT_MASK_IDM (1 << 19)
-#define MC13783_INT_MASK_SE1M (1 << 21)
-#define MC13783_INT_MASK_CKDETM (1 << 22)
-
/*
* Reg Regulator Mode 0
*/
@@ -284,113 +206,15 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_SWCTRL_SW3_STBY (1 << 21)
#define MC13783_SWCTRL_SW3_MODE (1 << 22)
-/*
- * ADC/Touch
- */
-#define MC13783_ADC0_LICELLCON (1 << 0)
-#define MC13783_ADC0_CHRGICON (1 << 1)
-#define MC13783_ADC0_BATICON (1 << 2)
-#define MC13783_ADC0_RTHEN (1 << 3)
-#define MC13783_ADC0_DTHEN (1 << 4)
-#define MC13783_ADC0_UIDEN (1 << 5)
-#define MC13783_ADC0_ADOUTEN (1 << 6)
-#define MC13783_ADC0_ADOUTPER (1 << 7)
-#define MC13783_ADC0_ADREFEN (1 << 10)
-#define MC13783_ADC0_ADREFMODE (1 << 11)
-#define MC13783_ADC0_TSMOD0 (1 << 12)
-#define MC13783_ADC0_TSMOD1 (1 << 13)
-#define MC13783_ADC0_TSMOD2 (1 << 14)
-#define MC13783_ADC0_CHRGRAWDIV (1 << 15)
-#define MC13783_ADC0_ADINC1 (1 << 16)
-#define MC13783_ADC0_ADINC2 (1 << 17)
-#define MC13783_ADC0_WCOMP (1 << 18)
-#define MC13783_ADC0_ADCBIS0 (1 << 23)
-
-#define MC13783_ADC1_ADEN (1 << 0)
-#define MC13783_ADC1_RAND (1 << 1)
-#define MC13783_ADC1_ADSEL (1 << 3)
-#define MC13783_ADC1_TRIGMASK (1 << 4)
-#define MC13783_ADC1_ADA10 (1 << 5)
-#define MC13783_ADC1_ADA11 (1 << 6)
-#define MC13783_ADC1_ADA12 (1 << 7)
-#define MC13783_ADC1_ADA20 (1 << 8)
-#define MC13783_ADC1_ADA21 (1 << 9)
-#define MC13783_ADC1_ADA22 (1 << 10)
-#define MC13783_ADC1_ATO0 (1 << 11)
-#define MC13783_ADC1_ATO1 (1 << 12)
-#define MC13783_ADC1_ATO2 (1 << 13)
-#define MC13783_ADC1_ATO3 (1 << 14)
-#define MC13783_ADC1_ATO4 (1 << 15)
-#define MC13783_ADC1_ATO5 (1 << 16)
-#define MC13783_ADC1_ATO6 (1 << 17)
-#define MC13783_ADC1_ATO7 (1 << 18)
-#define MC13783_ADC1_ATOX (1 << 19)
-#define MC13783_ADC1_ASC (1 << 20)
-#define MC13783_ADC1_ADTRIGIGN (1 << 21)
-#define MC13783_ADC1_ADONESHOT (1 << 22)
-#define MC13783_ADC1_ADCBIS1 (1 << 23)
-
-#define MC13783_ADC1_CHAN0_SHIFT 5
-#define MC13783_ADC1_CHAN1_SHIFT 8
-
-#define MC13783_ADC2_ADD10 (1 << 2)
-#define MC13783_ADC2_ADD11 (1 << 3)
-#define MC13783_ADC2_ADD12 (1 << 4)
-#define MC13783_ADC2_ADD13 (1 << 5)
-#define MC13783_ADC2_ADD14 (1 << 6)
-#define MC13783_ADC2_ADD15 (1 << 7)
-#define MC13783_ADC2_ADD16 (1 << 8)
-#define MC13783_ADC2_ADD17 (1 << 9)
-#define MC13783_ADC2_ADD18 (1 << 10)
-#define MC13783_ADC2_ADD19 (1 << 11)
-#define MC13783_ADC2_ADD20 (1 << 14)
-#define MC13783_ADC2_ADD21 (1 << 15)
-#define MC13783_ADC2_ADD22 (1 << 16)
-#define MC13783_ADC2_ADD23 (1 << 17)
-#define MC13783_ADC2_ADD24 (1 << 18)
-#define MC13783_ADC2_ADD25 (1 << 19)
-#define MC13783_ADC2_ADD26 (1 << 20)
-#define MC13783_ADC2_ADD27 (1 << 21)
-#define MC13783_ADC2_ADD28 (1 << 22)
-#define MC13783_ADC2_ADD29 (1 << 23)
+static inline int mc13783_set_bits(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val)
+{
+ int ret;
+ mc13783_lock(mc13783);
+ ret = mc13783_reg_rmw(mc13783, offset, mask, val);
+ mc13783_unlock(mc13783);
-#define MC13783_ADC3_WHIGH0 (1 << 0)
-#define MC13783_ADC3_WHIGH1 (1 << 1)
-#define MC13783_ADC3_WHIGH2 (1 << 2)
-#define MC13783_ADC3_WHIGH3 (1 << 3)
-#define MC13783_ADC3_WHIGH4 (1 << 4)
-#define MC13783_ADC3_WHIGH5 (1 << 5)
-#define MC13783_ADC3_ICID0 (1 << 6)
-#define MC13783_ADC3_ICID1 (1 << 7)
-#define MC13783_ADC3_ICID2 (1 << 8)
-#define MC13783_ADC3_WLOW0 (1 << 9)
-#define MC13783_ADC3_WLOW1 (1 << 10)
-#define MC13783_ADC3_WLOW2 (1 << 11)
-#define MC13783_ADC3_WLOW3 (1 << 12)
-#define MC13783_ADC3_WLOW4 (1 << 13)
-#define MC13783_ADC3_WLOW5 (1 << 14)
-#define MC13783_ADC3_ADCBIS2 (1 << 23)
-
-#define MC13783_ADC4_ADDBIS10 (1 << 2)
-#define MC13783_ADC4_ADDBIS11 (1 << 3)
-#define MC13783_ADC4_ADDBIS12 (1 << 4)
-#define MC13783_ADC4_ADDBIS13 (1 << 5)
-#define MC13783_ADC4_ADDBIS14 (1 << 6)
-#define MC13783_ADC4_ADDBIS15 (1 << 7)
-#define MC13783_ADC4_ADDBIS16 (1 << 8)
-#define MC13783_ADC4_ADDBIS17 (1 << 9)
-#define MC13783_ADC4_ADDBIS18 (1 << 10)
-#define MC13783_ADC4_ADDBIS19 (1 << 11)
-#define MC13783_ADC4_ADDBIS20 (1 << 14)
-#define MC13783_ADC4_ADDBIS21 (1 << 15)
-#define MC13783_ADC4_ADDBIS22 (1 << 16)
-#define MC13783_ADC4_ADDBIS23 (1 << 17)
-#define MC13783_ADC4_ADDBIS24 (1 << 18)
-#define MC13783_ADC4_ADDBIS25 (1 << 19)
-#define MC13783_ADC4_ADDBIS26 (1 << 20)
-#define MC13783_ADC4_ADDBIS27 (1 << 21)
-#define MC13783_ADC4_ADDBIS28 (1 << 22)
-#define MC13783_ADC4_ADDBIS29 (1 << 23)
+ return ret;
+}
#endif /* __LINUX_MFD_MC13783_PRIV_H */
-
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b3a2a724357..35680409b8c 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,28 +1,50 @@
/*
- * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright 2009 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
- * Initial development of this code was funded by
- * Phytec Messtechnik GmbH, http://www.phytec.de
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
*/
+#ifndef __LINUX_MFD_MC13783_H
+#define __LINUX_MFD_MC13783_H
-#ifndef __INCLUDE_LINUX_MFD_MC13783_H
-#define __INCLUDE_LINUX_MFD_MC13783_H
+#include <linux/interrupt.h>
struct mc13783;
+
+void mc13783_lock(struct mc13783 *mc13783);
+void mc13783_unlock(struct mc13783 *mc13783);
+
+int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val);
+int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val);
+int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val);
+
+int mc13783_irq_request(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev);
+int mc13783_ackirq(struct mc13783 *mc13783, int irq);
+
+int mc13783_mask(struct mc13783 *mc13783, int irq);
+int mc13783_unmask(struct mc13783 *mc13783, int irq);
+
+#define MC13783_ADC0 43
+#define MC13783_ADC0_ADREFEN (1 << 10)
+#define MC13783_ADC0_ADREFMODE (1 << 11)
+#define MC13783_ADC0_TSMOD0 (1 << 12)
+#define MC13783_ADC0_TSMOD1 (1 << 13)
+#define MC13783_ADC0_TSMOD2 (1 << 14)
+#define MC13783_ADC0_ADINC1 (1 << 16)
+#define MC13783_ADC0_ADINC2 (1 << 17)
+
+#define MC13783_ADC0_TSMOD_MASK (MC13783_ADC0_TSMOD0 | \
+ MC13783_ADC0_TSMOD1 | \
+ MC13783_ADC0_TSMOD2)
+
+/* to be cleaned up */
struct regulator_init_data;
struct mc13783_regulator_init_data {
@@ -30,23 +52,30 @@ struct mc13783_regulator_init_data {
struct regulator_init_data *init_data;
};
-struct mc13783_platform_data {
- struct mc13783_regulator_init_data *regulators;
+struct mc13783_regulator_platform_data {
int num_regulators;
- unsigned int flags;
+ struct mc13783_regulator_init_data *regulators;
};
-/* mc13783_platform_data flags */
+struct mc13783_platform_data {
+ int num_regulators;
+ struct mc13783_regulator_init_data *regulators;
+
#define MC13783_USE_TOUCHSCREEN (1 << 0)
#define MC13783_USE_CODEC (1 << 1)
#define MC13783_USE_ADC (1 << 2)
#define MC13783_USE_RTC (1 << 3)
#define MC13783_USE_REGULATOR (1 << 4)
+ unsigned int flags;
+};
+
+#define MC13783_ADC_MODE_TS 1
+#define MC13783_ADC_MODE_SINGLE_CHAN 2
+#define MC13783_ADC_MODE_MULT_CHAN 3
int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
unsigned int channel, unsigned int *sample);
-void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_SW_SW1A 0
#define MC13783_SW_SW1B 1
@@ -80,5 +109,46 @@ void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_REGU_V3 29
#define MC13783_REGU_V4 30
-#endif /* __INCLUDE_LINUX_MFD_MC13783_H */
+#define MC13783_IRQ_ADCDONE 0
+#define MC13783_IRQ_ADCBISDONE 1
+#define MC13783_IRQ_TS 2
+#define MC13783_IRQ_WHIGH 3
+#define MC13783_IRQ_WLOW 4
+#define MC13783_IRQ_CHGDET 6
+#define MC13783_IRQ_CHGOV 7
+#define MC13783_IRQ_CHGREV 8
+#define MC13783_IRQ_CHGSHORT 9
+#define MC13783_IRQ_CCCV 10
+#define MC13783_IRQ_CHGCURR 11
+#define MC13783_IRQ_BPON 12
+#define MC13783_IRQ_LOBATL 13
+#define MC13783_IRQ_LOBATH 14
+#define MC13783_IRQ_UDP 15
+#define MC13783_IRQ_USB 16
+#define MC13783_IRQ_ID 19
+#define MC13783_IRQ_SE1 21
+#define MC13783_IRQ_CKDET 22
+#define MC13783_IRQ_UDM 23
+#define MC13783_IRQ_1HZ 24
+#define MC13783_IRQ_TODA 25
+#define MC13783_IRQ_ONOFD1 27
+#define MC13783_IRQ_ONOFD2 28
+#define MC13783_IRQ_ONOFD3 29
+#define MC13783_IRQ_SYSRST 30
+#define MC13783_IRQ_RTCRST 31
+#define MC13783_IRQ_PC 32
+#define MC13783_IRQ_WARM 33
+#define MC13783_IRQ_MEMHLD 34
+#define MC13783_IRQ_PWRRDY 35
+#define MC13783_IRQ_THWARNL 36
+#define MC13783_IRQ_THWARNH 37
+#define MC13783_IRQ_CLK 38
+#define MC13783_IRQ_SEMAF 39
+#define MC13783_IRQ_MC2B 41
+#define MC13783_IRQ_HSDET 42
+#define MC13783_IRQ_HSL 43
+#define MC13783_IRQ_ALSPTH 44
+#define MC13783_IRQ_AHSSHORT 45
+#define MC13783_NUM_IRQ 46
+#endif /* __LINUX_MFD_MC13783_H */
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 9aba7b779fb..3398bd9aab1 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,7 +29,12 @@ struct pcf50633_platform_data {
char **batteries;
int num_batteries;
- int charging_restart_interval;
+ /*
+ * Should be set accordingly to the reference resistor used, see
+ * I_{ch(ref)} charger reference current in the pcf50633 User
+ * Manual.
+ */
+ int charger_reference_current_ma;
/* Callbacks */
void (*probe_done)(struct pcf50633 *);
@@ -40,10 +45,6 @@ struct pcf50633_platform_data {
u8 resumers[5];
};
-struct pcf50633_subdev_pdata {
- struct pcf50633 *pcf;
-};
-
struct pcf50633_irq {
void (*handler) (int, void *);
void *data;
@@ -217,5 +218,9 @@ enum pcf50633_reg_int5 {
#define PCF50633_REG_LEDCTL 0x2a
#define PCF50633_REG_LEDDIM 0x2b
-#endif
+static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+#endif
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 4119579acf2..df4f5fa88de 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 {
int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
int pcf50633_mbc_get_status(struct pcf50633 *);
+int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
#endif
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 91eb493bf14..5184b79c700 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -16,7 +16,6 @@
#define __MFD_WM831X_CORE_H__
#include <linux/interrupt.h>
-#include <linux/workqueue.h>
/*
* Register values.
@@ -117,6 +116,7 @@
#define WM831X_DC3_SLEEP_CONTROL 0x4063
#define WM831X_DC4_CONTROL 0x4064
#define WM831X_DC4_SLEEP_CONTROL 0x4065
+#define WM832X_DC4_SLEEP_CONTROL 0x4067
#define WM831X_EPE1_CONTROL 0x4066
#define WM831X_EPE2_CONTROL 0x4067
#define WM831X_LDO1_CONTROL 0x4068
@@ -235,6 +235,8 @@
struct regulator_dev;
+#define WM831X_NUM_IRQ_REGS 5
+
struct wm831x {
struct mutex io_lock;
@@ -248,10 +250,11 @@ struct wm831x {
int irq; /* Our chip IRQ */
struct mutex irq_lock;
- struct workqueue_struct *irq_wq;
- struct work_struct irq_work;
unsigned int irq_base;
- int irq_masks[5];
+ int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
+ int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
+
+ int num_gpio;
struct mutex auxadc_lock;
@@ -278,12 +281,30 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
-int __must_check wm831x_request_irq(struct wm831x *wm831x,
- unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name,
- void *dev);
-void wm831x_free_irq(struct wm831x *wm831x, unsigned int, void *);
-void wm831x_disable_irq(struct wm831x *wm831x, int irq);
-void wm831x_enable_irq(struct wm831x *wm831x, int irq);
+static inline int __must_check wm831x_request_irq(struct wm831x *wm831x,
+ unsigned int irq,
+ irq_handler_t handler,
+ unsigned long flags,
+ const char *name,
+ void *dev)
+{
+ return request_threaded_irq(irq, NULL, handler, flags, name, dev);
+}
+
+static inline void wm831x_free_irq(struct wm831x *wm831x,
+ unsigned int irq, void *dev)
+{
+ free_irq(irq, dev);
+}
+
+static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq)
+{
+ disable_irq(irq);
+}
+
+static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq)
+{
+ enable_irq(irq);
+}
#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 90d820260aa..fd322aca33b 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -41,6 +41,23 @@ struct wm831x_battery_pdata {
int timeout; /** Charge cycle timeout, in minutes */
};
+/**
+ * Configuration for the WM831x DC-DC BuckWise convertors. This
+ * should be passed as driver_data in the regulator_init_data.
+ *
+ * Currently all the configuration is for the fast DVS switching
+ * support of the devices. This allows MFPs on the device to be
+ * configured as an input to switch between two output voltages,
+ * allowing voltage transitions without the expense of an access over
+ * I2C or SPI buses.
+ */
+struct wm831x_buckv_pdata {
+ int dvs_gpio; /** CPU GPIO to use for DVS switching */
+ int dvs_control_src; /** Hardware DVS source to use (1 or 2) */
+ int dvs_init_state; /** DVS state to expect on startup */
+ int dvs_state_gpio; /** CPU GPIO to use for monitoring status */
+};
+
/* Sources for status LED configuration. Values are register values
* plus 1 to allow for a zero default for preserve.
*/
@@ -91,6 +108,7 @@ struct wm831x_pdata {
/** Called after subdevices are set up */
int (*post_init)(struct wm831x *wm831x);
+ int irq_base;
int gpio_base;
struct wm831x_backlight_pdata *backlight;
struct wm831x_backup_pdata *backup;
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 1d595de6a05..43868899bf4 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/interrupt.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -601,7 +601,7 @@ extern const u16 wm8352_mode3_defaults[];
struct wm8350;
struct wm8350_irq {
- void (*handler) (struct wm8350 *, int, void *);
+ irq_handler_t handler;
void *data;
};
@@ -646,10 +646,12 @@ struct wm8350 {
* @init: Function called during driver initialisation. Should be
* used by the platform to configure GPIO functions and similar.
* @irq_high: Set if WM8350 IRQ is active high.
+ * @irq_base: Base IRQ for genirq (not currently used).
*/
struct wm8350_platform_data {
int (*init)(struct wm8350 *wm8350);
int irq_high;
+ int irq_base;
};
@@ -676,11 +678,13 @@ int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src);
* WM8350 internal interrupts
*/
int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- void (*handler) (struct wm8350 *, int, void *),
- void *data);
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data);
int wm8350_free_irq(struct wm8350 *wm8350, int irq);
int wm8350_mask_irq(struct wm8350 *wm8350, int irq);
int wm8350_unmask_irq(struct wm8350 *wm8350, int irq);
-
+int wm8350_irq_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata);
+int wm8350_irq_exit(struct wm8350 *wm8350);
#endif
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index ed91e8f5d29..71af3d6ebe9 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -173,6 +173,24 @@
#define WM8350_GPIO_DEBOUNCE_ON 1
/*
+ * R30 (0x1E) - GPIO Interrupt Status
+ */
+#define WM8350_GP12_EINT 0x1000
+#define WM8350_GP11_EINT 0x0800
+#define WM8350_GP10_EINT 0x0400
+#define WM8350_GP9_EINT 0x0200
+#define WM8350_GP8_EINT 0x0100
+#define WM8350_GP7_EINT 0x0080
+#define WM8350_GP6_EINT 0x0040
+#define WM8350_GP5_EINT 0x0020
+#define WM8350_GP4_EINT 0x0010
+#define WM8350_GP3_EINT 0x0008
+#define WM8350_GP2_EINT 0x0004
+#define WM8350_GP1_EINT 0x0002
+#define WM8350_GP0_EINT 0x0001
+
+
+/*
* R128 (0x80) - GPIO Debounce
*/
#define WM8350_GP12_DB 0x1000
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 527602cdea1..7f085c97c79 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
-extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
+extern int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, int offlining);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm,
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
- unsigned long private) { return -ENOSYS; }
-
-static inline int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest) { return 0; }
+ unsigned long private, int offlining) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ce7cc6c7bcb..e92d1bfdb33 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -61,6 +61,7 @@ enum {
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
+ MLX4_DEV_CAP_FLAG_BLH = 1 << 15,
MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
MLX4_DEV_CAP_FLAG_APM = 1 << 17,
MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 24c395694f4..2265f28eb47 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,13 +620,22 @@ void page_address_init(void);
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it.
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
extern struct address_space swapper_space;
static inline struct address_space *page_mapping(struct page *page)
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
-#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else
-#endif
- if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
}
+/* Neutral page->mapping pointer to address_space or anon_vma or other */
+static inline void *page_rmapping(struct page *page)
+{
+ return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+}
+
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
*
* (see walk_page_range for more details)
*/
@@ -767,6 +780,8 @@ struct mm_walk {
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
+ struct mm_walk *);
struct mm_struct *mm;
void *private;
};
@@ -1022,6 +1037,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_all_active_ranges(void);
+void sort_node_map(void);
+unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
@@ -1316,11 +1334,17 @@ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
size_t size);
extern void refund_locked_memory(struct mm_struct *mm, size_t size);
+enum mf_flags {
+ MF_COUNT_INCREASED = 1 << 0,
+};
extern void memory_failure(unsigned long pfn, int trapno);
-extern int __memory_failure(unsigned long pfn, int trapno, int ref);
+extern int __memory_failure(unsigned long pfn, int trapno, int flags);
+extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
+extern void shake_page(struct page *p, int access);
extern atomic_long_t mce_bad_pages;
+extern int soft_offline_page(struct page *page, int flags);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 8a550987719..ee24ef8ab61 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,8 +1,6 @@
#ifndef LINUX_MM_DEBUG_H
#define LINUX_MM_DEBUG_H 1
-#include <linux/autoconf.h>
-
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6f7561730d8..30fe668c254 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -15,7 +15,7 @@
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <linux/pageblock-flags.h>
-#include <linux/bounds.h>
+#include <generated/bounds.h>
#include <asm/atomic.h>
#include <asm/page.h>
diff --git a/include/linux/module.h b/include/linux/module.h
index 482efc865ac..6cb1a3cab5d 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -25,8 +25,10 @@
/* Not Yet Implemented */
#define MODULE_SUPPORTED_DEVICE(name)
-/* some toolchains uses a `_' prefix for all user symbols */
-#ifndef MODULE_SYMBOL_PREFIX
+/* Some toolchains use a `_' prefix for all user symbols. */
+#ifdef CONFIG_SYMBOL_PREFIX
+#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX
+#else
#define MODULE_SYMBOL_PREFIX ""
#endif
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index fff8c53e543..9c3757c5759 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,22 +19,21 @@
/**
* struct nand_bbt_descr - bad block table descriptor
- * @options: options for this descriptor
- * @pages: the page(s) where we find the bbt, used with
- * option BBT_ABSPAGE when bbt is searched,
- * then we store the found bbts pages here.
- * Its an array and supports up to 8 chips now
- * @offs: offset of the pattern in the oob area of the page
- * @veroffs: offset of the bbt version counter in the oob area of the page
- * @version: version read from the bbt page during scan
- * @len: length of the pattern, if 0 no pattern check is performed
- * @maxblocks: maximum number of blocks to search for a bbt. This
- * number of blocks is reserved at the end of the device
- * where the tables are written.
- * @reserved_block_code: if non-0, this pattern denotes a reserved
- * (rather than bad) block in the stored bbt
- * @pattern: pattern to identify bad block table or factory marked
- * good / bad blocks, can be NULL, if len = 0
+ * @options: options for this descriptor
+ * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
+ * when bbt is searched, then we store the found bbts pages here.
+ * Its an array and supports up to 8 chips now
+ * @offs: offset of the pattern in the oob area of the page
+ * @veroffs: offset of the bbt version counter in the oob are of the page
+ * @version: version read from the bbt page during scan
+ * @len: length of the pattern, if 0 no pattern check is performed
+ * @maxblocks: maximum number of blocks to search for a bbt. This number of
+ * blocks is reserved at the end of the device where the tables are
+ * written.
+ * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
+ * bad) block in the stored bbt
+ * @pattern: pattern to identify bad block table or factory marked good /
+ * bad blocks, can be NULL, if len = 0
*
* Descriptor for the bad block table marker and the descriptor for the
* pattern which identifies good and bad blocks. The assumption is made
@@ -90,7 +89,9 @@ struct nand_bbt_descr {
/*
* Constants for oob configuration
*/
-#define ONENAND_BADBLOCK_POS 0
+#define NAND_SMALL_BADBLOCK_POS 5
+#define NAND_LARGE_BADBLOCK_POS 0
+#define ONENAND_BADBLOCK_POS 0
/*
* Bad block scanning errors
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 88d3d8fbf9f..df89f427523 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -518,10 +518,11 @@ struct cfi_fixup {
#define CFI_MFR_ANY 0xffff
#define CFI_ID_ANY 0xffff
-#define CFI_MFR_AMD 0x0001
-#define CFI_MFR_ATMEL 0x001F
-#define CFI_MFR_SAMSUNG 0x00EC
-#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_AMD 0x0001
+#define CFI_MFR_INTEL 0x0089
+#define CFI_MFR_ATMEL 0x001F
+#define CFI_MFR_SAMSUNG 0x00EC
+#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index d4f38c5fd44..d0bf422ae37 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -38,6 +38,15 @@ typedef enum {
FL_XIP_WHILE_ERASING,
FL_XIP_WHILE_WRITING,
FL_SHUTDOWN,
+ /* These 2 come from nand_state_t, which has been unified here */
+ FL_READING,
+ FL_CACHEDPRG,
+ /* These 4 come from onenand_state_t, which has been unified here */
+ FL_RESETING,
+ FL_OTPING,
+ FL_PREPARING_ERASE,
+ FL_VERIFYING_ERASE,
+
FL_UNKNOWN
} flstate_t;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7a232a9bdd6..ccab9dfc521 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -21,6 +21,8 @@
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/bbm.h>
struct mtd_info;
/* Scan and identify a NAND device */
@@ -168,7 +170,6 @@ typedef enum {
/* Chip does not allow subpage writes */
#define NAND_NO_SUBPAGE_WRITE 0x00000200
-
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS \
(NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
@@ -194,6 +195,9 @@ typedef enum {
/* This option is defined if the board driver allocates its own buffers
(e.g. because it needs them DMA-coherent */
#define NAND_OWN_BUFFERS 0x00040000
+/* Chip may not exist, so silence any errors in scan */
+#define NAND_SCAN_SILENT_NODEV 0x00080000
+
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -202,20 +206,6 @@ typedef enum {
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
-/*
- * nand_state_t - chip states
- * Enumeration for NAND flash chip state
- */
-typedef enum {
- FL_READY,
- FL_READING,
- FL_WRITING,
- FL_ERASING,
- FL_SYNCING,
- FL_CACHEDPRG,
- FL_PM_SUSPENDED,
-} nand_state_t;
-
/* Keep gcc happy */
struct nand_chip;
@@ -402,7 +392,7 @@ struct nand_chip {
uint8_t cellinfo;
int badblockpos;
- nand_state_t state;
+ flstate_t state;
uint8_t *oob_poi;
struct nand_hw_control *controller;
@@ -470,75 +460,6 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-/**
- * struct nand_bbt_descr - bad block table descriptor
- * @options: options for this descriptor
- * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
- * when bbt is searched, then we store the found bbts pages here.
- * Its an array and supports up to 8 chips now
- * @offs: offset of the pattern in the oob area of the page
- * @veroffs: offset of the bbt version counter in the oob are of the page
- * @version: version read from the bbt page during scan
- * @len: length of the pattern, if 0 no pattern check is performed
- * @maxblocks: maximum number of blocks to search for a bbt. This number of
- * blocks is reserved at the end of the device where the tables are
- * written.
- * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
- * bad) block in the stored bbt
- * @pattern: pattern to identify bad block table or factory marked good /
- * bad blocks, can be NULL, if len = 0
- *
- * Descriptor for the bad block table marker and the descriptor for the
- * pattern which identifies good and bad blocks. The assumption is made
- * that the pattern and the version count are always located in the oob area
- * of the first block.
- */
-struct nand_bbt_descr {
- int options;
- int pages[NAND_MAX_CHIPS];
- int offs;
- int veroffs;
- uint8_t version[NAND_MAX_CHIPS];
- int len;
- int maxblocks;
- int reserved_block_code;
- uint8_t *pattern;
-};
-
-/* Options for the bad block table descriptors */
-
-/* The number of bits used per block in the bbt on the device */
-#define NAND_BBT_NRBITS_MSK 0x0000000F
-#define NAND_BBT_1BIT 0x00000001
-#define NAND_BBT_2BIT 0x00000002
-#define NAND_BBT_4BIT 0x00000004
-#define NAND_BBT_8BIT 0x00000008
-/* The bad block table is in the last good block of the device */
-#define NAND_BBT_LASTBLOCK 0x00000010
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_ABSPAGE 0x00000020
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_SEARCH 0x00000040
-/* bbt is stored per chip on multichip devices */
-#define NAND_BBT_PERCHIP 0x00000080
-/* bbt has a version counter at offset veroffs */
-#define NAND_BBT_VERSION 0x00000100
-/* Create a bbt if none axists */
-#define NAND_BBT_CREATE 0x00000200
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES 0x00000400
-/* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY 0x00000800
-/* Write bbt if neccecary */
-#define NAND_BBT_WRITE 0x00001000
-/* Read and write back block contents when writing bbt */
-#define NAND_BBT_SAVECONTENT 0x00002000
-/* Search good / bad pattern on the first and the second page */
-#define NAND_BBT_SCAN2NDPAGE 0x00004000
-
-/* The maximum number of blocks to scan for a bbt */
-#define NAND_BBT_SCAN_MAXBLOCKS 4
-
extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_default_bbt(struct mtd_info *mtd);
@@ -548,12 +469,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, uint8_t * buf);
-/*
-* Constants for oob configuration
-*/
-#define NAND_SMALL_BADBLOCK_POS 5
-#define NAND_LARGE_BADBLOCK_POS 0
-
/**
* struct platform_nand_chip - chip level device structure
* @nr_chips: max. number of chips to scan for
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 052ea8ca243..41bc013571d 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -16,7 +16,13 @@
struct mtd_info;
/*
- * Calculate 3 byte ECC code for 256 byte block
+ * Calculate 3 byte ECC code for eccsize byte block
+ */
+void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
+ u_char *ecc_code);
+
+/*
+ * Calculate 3 byte ECC code for 256/512 byte block
*/
int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
@@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
unsigned int eccsize);
/*
- * Detect and correct a 1 bit error for 256 byte block
+ * Detect and correct a 1 bit error for 256/512 byte block
*/
int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4e49f335067..5509eb06b32 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -1,7 +1,7 @@
/*
* linux/include/linux/mtd/onenand.h
*
- * Copyright (C) 2005-2007 Samsung Electronics
+ * Copyright © 2005-2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
+#include <linux/mtd/flashchip.h>
#include <linux/mtd/onenand_regs.h>
#include <linux/mtd/bbm.h>
@@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
/* Free resources held by the OneNAND device */
extern void onenand_release(struct mtd_info *mtd);
-/*
- * onenand_state_t - chip states
- * Enumeration for OneNAND flash chip state
- */
-typedef enum {
- FL_READY,
- FL_READING,
- FL_WRITING,
- FL_ERASING,
- FL_SYNCING,
- FL_LOCKING,
- FL_RESETING,
- FL_OTPING,
- FL_PM_SUSPENDED,
-} onenand_state_t;
-
/**
* struct onenand_bufferram - OneNAND BufferRAM Data
* @blockpage: block & page address in BufferRAM
@@ -137,7 +122,7 @@ struct onenand_chip {
spinlock_t chip_lock;
wait_queue_head_t wq;
- onenand_state_t state;
+ flstate_t state;
unsigned char *page_buf;
unsigned char *oob_buf;
@@ -152,6 +137,8 @@ struct onenand_chip {
/*
* Helper macros
*/
+#define ONENAND_PAGES_PER_BLOCK (1<<6)
+
#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index acadbf53a69..cd6f3b43119 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -131,6 +131,8 @@
#define ONENAND_CMD_LOCK_TIGHT (0x2C)
#define ONENAND_CMD_UNLOCK_ALL (0x27)
#define ONENAND_CMD_ERASE (0x94)
+#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95)
+#define ONENAND_CMD_ERASE_VERIFY (0x71)
#define ONENAND_CMD_RESET (0xF0)
#define ONENAND_CMD_OTP_ACCESS (0x65)
#define ONENAND_CMD_READID (0x90)
diff --git a/include/linux/namei.h b/include/linux/namei.h
index ec0f607b364..05b441d9364 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -72,11 +72,8 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
-extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
-extern void release_open_intent(struct nameidata *);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_noperm(const char *, struct dentry *);
extern int follow_down(struct path *);
extern int follow_up(struct path *);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c4c06020810..9b8299af374 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -128,6 +128,8 @@
#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
#define SEQ4_STATUS_LEASE_MOVED 0x00000080
#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
+#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200
+#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400
#define NFS4_MAX_UINT64 (~(u64)0)
@@ -528,6 +530,7 @@ enum {
NFSPROC4_CLNT_DESTROY_SESSION,
NFSPROC4_CLNT_SEQUENCE,
NFSPROC4_CLNT_GET_LEASE_TIME,
+ NFSPROC4_CLNT_RECLAIM_COMPLETE,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 320569eabe3..34fc6be5bfc 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,7 @@ struct nfs4_session {
unsigned long session_state;
u32 hash_alg;
u32 ssv_len;
+ struct completion complete;
/* The fore and back channel */
struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 62f63fb0c4c..89b28812ec2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -2,6 +2,7 @@
#define _LINUX_NFS_XDR_H
#include <linux/nfsacl.h>
+#include <linux/nfs3.h>
/*
* To change the maximum rsize and wsize supported by the NFS client, adjust
@@ -170,8 +171,9 @@ struct nfs4_sequence_args {
struct nfs4_sequence_res {
struct nfs4_session *sr_session;
u8 sr_slotid; /* slot used to send request */
- unsigned long sr_renewal_time;
int sr_status; /* sequence operation status */
+ unsigned long sr_renewal_time;
+ u32 sr_status_flags;
};
struct nfs4_get_lease_time_args {
@@ -938,6 +940,16 @@ struct nfs41_create_session_args {
struct nfs41_create_session_res {
struct nfs_client *client;
};
+
+struct nfs41_reclaim_complete_args {
+ /* In the future extend to include curr_fh for use with migration */
+ unsigned char one_fs:1;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs41_reclaim_complete_res {
+ struct nfs4_sequence_res seq_res;
+};
#endif /* CONFIG_NFS_V4_1 */
struct nfs_page;
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index 43011b69297..f321b578ede 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -29,6 +29,7 @@
#ifdef __KERNEL__
#include <linux/posix_acl.h>
+#include <linux/sunrpc/xdr.h>
/* Maximum number of ACL entries over NFS */
#define NFS_ACL_MAX_ENTRIES 1024
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index a6d9ef2bb34..8ae78a61eea 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -12,7 +12,7 @@
# include <linux/types.h>
#ifdef __KERNEL__
-# include <linux/in.h>
+# include <linux/nfsd/nfsfh.h>
#endif
/*
@@ -39,11 +39,23 @@
#define NFSEXP_FSID 0x2000
#define NFSEXP_CROSSMOUNT 0x4000
#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */
-#define NFSEXP_ALLFLAGS 0xFE3F
+/*
+ * The NFSEXP_V4ROOT flag causes the kernel to give access only to NFSv4
+ * clients, and only to the single directory that is the root of the
+ * export; further lookup and readdir operations are treated as if every
+ * subdirectory was a mountpoint, and ignored if they are not themselves
+ * exported. This is used by nfsd and mountd to construct the NFSv4
+ * pseudofilesystem, which provides access only to paths leading to each
+ * exported filesystem.
+ */
+#define NFSEXP_V4ROOT 0x10000
+/* All flags that we claim to support. (Note we don't support NOACL.) */
+#define NFSEXP_ALLFLAGS 0x17E3F
/* The flags that may vary depending on security flavor: */
#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
- | NFSEXP_ALLSQUASH)
+ | NFSEXP_ALLSQUASH \
+ | NFSEXP_INSECURE_PORT)
#ifdef __KERNEL__
@@ -108,7 +120,6 @@ struct svc_expkey {
struct path ek_path;
};
-#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index 8f641c90845..65e333afaee 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -16,11 +16,9 @@
# include <linux/types.h>
#ifdef __KERNEL__
-# include <linux/string.h>
-# include <linux/fs.h>
+# include <linux/sunrpc/svc.h>
#endif
#include <linux/nfsd/const.h>
-#include <linux/nfsd/debug.h>
/*
* This is the old "dentry style" Linux NFSv2 file handle.
@@ -164,208 +162,6 @@ typedef struct svc_fh {
} svc_fh;
-enum nfsd_fsid {
- FSID_DEV = 0,
- FSID_NUM,
- FSID_MAJOR_MINOR,
- FSID_ENCODE_DEV,
- FSID_UUID4_INUM,
- FSID_UUID8,
- FSID_UUID16,
- FSID_UUID16_INUM,
-};
-
-enum fsid_source {
- FSIDSOURCE_DEV,
- FSIDSOURCE_FSID,
- FSIDSOURCE_UUID,
-};
-extern enum fsid_source fsid_source(struct svc_fh *fhp);
-
-
-/* This might look a little large to "inline" but in all calls except
- * one, 'vers' is constant so moste of the function disappears.
- */
-static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino,
- u32 fsid, unsigned char *uuid)
-{
- u32 *up;
- switch(vers) {
- case FSID_DEV:
- fsidv[0] = htonl((MAJOR(dev)<<16) |
- MINOR(dev));
- fsidv[1] = ino_t_to_u32(ino);
- break;
- case FSID_NUM:
- fsidv[0] = fsid;
- break;
- case FSID_MAJOR_MINOR:
- fsidv[0] = htonl(MAJOR(dev));
- fsidv[1] = htonl(MINOR(dev));
- fsidv[2] = ino_t_to_u32(ino);
- break;
-
- case FSID_ENCODE_DEV:
- fsidv[0] = new_encode_dev(dev);
- fsidv[1] = ino_t_to_u32(ino);
- break;
-
- case FSID_UUID4_INUM:
- /* 4 byte fsid and inode number */
- up = (u32*)uuid;
- fsidv[0] = ino_t_to_u32(ino);
- fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3];
- break;
-
- case FSID_UUID8:
- /* 8 byte fsid */
- up = (u32*)uuid;
- fsidv[0] = up[0] ^ up[2];
- fsidv[1] = up[1] ^ up[3];
- break;
-
- case FSID_UUID16:
- /* 16 byte fsid - NFSv3+ only */
- memcpy(fsidv, uuid, 16);
- break;
-
- case FSID_UUID16_INUM:
- /* 8 byte inode and 16 byte fsid */
- *(u64*)fsidv = (u64)ino;
- memcpy(fsidv+2, uuid, 16);
- break;
- default: BUG();
- }
-}
-
-static inline int key_len(int type)
-{
- switch(type) {
- case FSID_DEV: return 8;
- case FSID_NUM: return 4;
- case FSID_MAJOR_MINOR: return 12;
- case FSID_ENCODE_DEV: return 8;
- case FSID_UUID4_INUM: return 8;
- case FSID_UUID8: return 8;
- case FSID_UUID16: return 16;
- case FSID_UUID16_INUM: return 24;
- default: return 0;
- }
-}
-
-/*
- * Shorthand for dprintk()'s
- */
-extern char * SVCFH_fmt(struct svc_fh *fhp);
-
-/*
- * Function prototypes
- */
-__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
-__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
-__be32 fh_update(struct svc_fh *);
-void fh_put(struct svc_fh *);
-
-static __inline__ struct svc_fh *
-fh_copy(struct svc_fh *dst, struct svc_fh *src)
-{
- WARN_ON(src->fh_dentry || src->fh_locked);
-
- *dst = *src;
- return dst;
-}
-
-static inline void
-fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
-{
- dst->fh_size = src->fh_size;
- memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
-}
-
-static __inline__ struct svc_fh *
-fh_init(struct svc_fh *fhp, int maxsize)
-{
- memset(fhp, 0, sizeof(*fhp));
- fhp->fh_maxsize = maxsize;
- return fhp;
-}
-
-#ifdef CONFIG_NFSD_V3
-/*
- * Fill in the pre_op attr for the wcc data
- */
-static inline void
-fill_pre_wcc(struct svc_fh *fhp)
-{
- struct inode *inode;
-
- inode = fhp->fh_dentry->d_inode;
- if (!fhp->fh_pre_saved) {
- fhp->fh_pre_mtime = inode->i_mtime;
- fhp->fh_pre_ctime = inode->i_ctime;
- fhp->fh_pre_size = inode->i_size;
- fhp->fh_pre_change = inode->i_version;
- fhp->fh_pre_saved = 1;
- }
-}
-
-extern void fill_post_wcc(struct svc_fh *);
-#else
-#define fill_pre_wcc(ignored)
-#define fill_post_wcc(notused)
-#endif /* CONFIG_NFSD_V3 */
-
-
-/*
- * Lock a file handle/inode
- * NOTE: both fh_lock and fh_unlock are done "by hand" in
- * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
- * so, any changes here should be reflected there.
- */
-
-static inline void
-fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
-{
- struct dentry *dentry = fhp->fh_dentry;
- struct inode *inode;
-
- dfprintk(FILEOP, "nfsd: fh_lock(%s) locked = %d\n",
- SVCFH_fmt(fhp), fhp->fh_locked);
-
- BUG_ON(!dentry);
-
- if (fhp->fh_locked) {
- printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
- dentry->d_parent->d_name.name, dentry->d_name.name);
- return;
- }
-
- inode = dentry->d_inode;
- mutex_lock_nested(&inode->i_mutex, subclass);
- fill_pre_wcc(fhp);
- fhp->fh_locked = 1;
-}
-
-static inline void
-fh_lock(struct svc_fh *fhp)
-{
- fh_lock_nested(fhp, I_MUTEX_NORMAL);
-}
-
-/*
- * Unlock a file handle/inode
- */
-static inline void
-fh_unlock(struct svc_fh *fhp)
-{
- BUG_ON(!fhp->fh_dentry);
-
- if (fhp->fh_locked) {
- fill_post_wcc(fhp);
- mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
- fhp->fh_locked = 0;
- }
-}
#endif /* __KERNEL__ */
diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h
index 7a3b565b898..812bc1e160d 100644
--- a/include/linux/nfsd/syscall.h
+++ b/include/linux/nfsd/syscall.h
@@ -9,14 +9,8 @@
#ifndef NFSD_SYSCALL_H
#define NFSD_SYSCALL_H
-# include <linux/types.h>
-#ifdef __KERNEL__
-# include <linux/in.h>
-#endif
-#include <linux/posix_types.h>
-#include <linux/nfsd/const.h>
+#include <linux/types.h>
#include <linux/nfsd/export.h>
-#include <linux/nfsd/nfsfh.h>
/*
* Version of the syscall interface
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index ce520402e84..3fe02cf8b65 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -151,6 +151,8 @@ struct nilfs_super_root {
#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */
#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order
semantics also for data */
+#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during
+ mount-time recovery */
/**
@@ -403,6 +405,28 @@ struct nilfs_segment_summary {
#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
/**
+ * struct nilfs_btree_node - B-tree node
+ * @bn_flags: flags
+ * @bn_level: level
+ * @bn_nchildren: number of children
+ * @bn_pad: padding
+ */
+struct nilfs_btree_node {
+ __u8 bn_flags;
+ __u8 bn_level;
+ __le16 bn_nchildren;
+ __le32 bn_pad;
+};
+
+/* flags */
+#define NILFS_BTREE_NODE_ROOT 0x01
+
+/* level */
+#define NILFS_BTREE_LEVEL_DATA 0
+#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
+#define NILFS_BTREE_LEVEL_MAX 14
+
+/**
* struct nilfs_palloc_group_desc - block group descriptor
* @pg_nfrees: number of free entries in block group
*/
diff --git a/include/linux/node.h b/include/linux/node.h
index 681a697b9a8..06292dac3ea 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -21,13 +21,19 @@
#include <linux/sysdev.h>
#include <linux/cpumask.h>
+#include <linux/workqueue.h>
struct node {
struct sys_device sysdev;
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+ struct work_struct node_work;
+#endif
};
struct memory_block;
extern struct node node_devices[];
+typedef void (*node_registration_func_t)(struct node *);
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
@@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
+
+#ifdef CONFIG_HUGETLBFS
+extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister);
+#endif
#else
static inline int register_one_node(int nid)
{
@@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
{
return 0;
}
+
+static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
+ node_registration_func_t unreg)
+{
+}
#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b359c4a9ec9..454997cccbd 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp)
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
+static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+{
+ nodes_clear(*mask);
+ node_set(node, *mask);
+}
+
#define nodemask_of_node(node) \
({ \
typeof(_unused_nodemask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
- m.bits[0] = 1UL<<(node); \
+ m.bits[0] = 1UL << (node); \
} else { \
- nodes_clear(m); \
- node_set((node), m); \
+ init_nodemask_of_node(&m, (node)); \
} \
m; \
})
@@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
+ * For nodemask scrach area.
+ * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
+ * name.
*/
-
-#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
-#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
-#define NODEMASK_FREE(m) kfree(m)
+#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#define NODEMASK_ALLOC(type, name, gfp_flags) \
+ type *name = kmalloc(sizeof(*name), gfp_flags)
+#define NODEMASK_FREE(m) kfree(m)
#else
-#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
-#define NODEMASK_FREE(m)
+#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name
+#define NODEMASK_FREE(m) do {} while (0)
#endif
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
@@ -497,8 +504,10 @@ struct nodemask_scratch {
nodemask_t mask2;
};
-#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
-#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
+#define NODEMASK_SCRATCH(x) \
+ NODEMASK_ALLOC(struct nodemask_scratch, x, \
+ GFP_KERNEL | __GFP_NORETRY)
+#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a31a7301b15..3aaa31603a8 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -10,4 +10,6 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
+#define NUMA_NO_NODE (-1)
+
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h
new file mode 100644
index 00000000000..f46c40ac6d4
--- /dev/null
+++ b/include/linux/omapfb.h
@@ -0,0 +1,251 @@
+/*
+ * File: include/linux/omapfb.h
+ *
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_OMAPFB_H__
+#define __LINUX_OMAPFB_H__
+
+#include <linux/fb.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* IOCTL commands. */
+
+#define OMAP_IOW(num, dtype) _IOW('O', num, dtype)
+#define OMAP_IOR(num, dtype) _IOR('O', num, dtype)
+#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype)
+#define OMAP_IO(num) _IO('O', num)
+
+#define OMAPFB_MIRROR OMAP_IOW(31, int)
+#define OMAPFB_SYNC_GFX OMAP_IO(37)
+#define OMAPFB_VSYNC OMAP_IO(38)
+#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int)
+#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps)
+#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int)
+#define OMAPFB_LCD_TEST OMAP_IOW(45, int)
+#define OMAPFB_CTRL_TEST OMAP_IOW(46, int)
+#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old)
+#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key)
+#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key)
+#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info)
+#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info)
+#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window)
+#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info)
+#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info)
+#define OMAPFB_WAITFORVSYNC OMAP_IO(57)
+#define OMAPFB_MEMORY_READ OMAP_IOR(58, struct omapfb_memory_read)
+#define OMAPFB_GET_OVERLAY_COLORMODE OMAP_IOR(59, struct omapfb_ovl_colormode)
+#define OMAPFB_WAITFORGO OMAP_IO(60)
+#define OMAPFB_GET_VRAM_INFO OMAP_IOR(61, struct omapfb_vram_info)
+#define OMAPFB_SET_TEARSYNC OMAP_IOW(62, struct omapfb_tearsync_info)
+
+#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff
+#define OMAPFB_CAPS_LCDC_MASK 0x00fff000
+#define OMAPFB_CAPS_PANEL_MASK 0xff000000
+
+#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000
+#define OMAPFB_CAPS_TEARSYNC 0x00002000
+#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000
+#define OMAPFB_CAPS_PLANE_SCALE 0x00008000
+#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000
+#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000
+#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000
+#define OMAPFB_CAPS_WINDOW_ROTATE 0x00080000
+#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000
+
+/* Values from DSP must map to lower 16-bits */
+#define OMAPFB_FORMAT_MASK 0x00ff
+#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100
+#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200
+#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400
+#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800
+#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000
+
+#define OMAPFB_MEMTYPE_SDRAM 0
+#define OMAPFB_MEMTYPE_SRAM 1
+#define OMAPFB_MEMTYPE_MAX 1
+
+enum omapfb_color_format {
+ OMAPFB_COLOR_RGB565 = 0,
+ OMAPFB_COLOR_YUV422,
+ OMAPFB_COLOR_YUV420,
+ OMAPFB_COLOR_CLUT_8BPP,
+ OMAPFB_COLOR_CLUT_4BPP,
+ OMAPFB_COLOR_CLUT_2BPP,
+ OMAPFB_COLOR_CLUT_1BPP,
+ OMAPFB_COLOR_RGB444,
+ OMAPFB_COLOR_YUY422,
+
+ OMAPFB_COLOR_ARGB16,
+ OMAPFB_COLOR_RGB24U, /* RGB24, 32-bit container */
+ OMAPFB_COLOR_RGB24P, /* RGB24, 24-bit container */
+ OMAPFB_COLOR_ARGB32,
+ OMAPFB_COLOR_RGBA32,
+ OMAPFB_COLOR_RGBX32,
+};
+
+struct omapfb_update_window {
+ __u32 x, y;
+ __u32 width, height;
+ __u32 format;
+ __u32 out_x, out_y;
+ __u32 out_width, out_height;
+ __u32 reserved[8];
+};
+
+struct omapfb_update_window_old {
+ __u32 x, y;
+ __u32 width, height;
+ __u32 format;
+};
+
+enum omapfb_plane {
+ OMAPFB_PLANE_GFX = 0,
+ OMAPFB_PLANE_VID1,
+ OMAPFB_PLANE_VID2,
+};
+
+enum omapfb_channel_out {
+ OMAPFB_CHANNEL_OUT_LCD = 0,
+ OMAPFB_CHANNEL_OUT_DIGIT,
+};
+
+struct omapfb_plane_info {
+ __u32 pos_x;
+ __u32 pos_y;
+ __u8 enabled;
+ __u8 channel_out;
+ __u8 mirror;
+ __u8 reserved1;
+ __u32 out_width;
+ __u32 out_height;
+ __u32 reserved2[12];
+};
+
+struct omapfb_mem_info {
+ __u32 size;
+ __u8 type;
+ __u8 reserved[3];
+};
+
+struct omapfb_caps {
+ __u32 ctrl;
+ __u32 plane_color;
+ __u32 wnd_color;
+};
+
+enum omapfb_color_key_type {
+ OMAPFB_COLOR_KEY_DISABLED = 0,
+ OMAPFB_COLOR_KEY_GFX_DST,
+ OMAPFB_COLOR_KEY_VID_SRC,
+};
+
+struct omapfb_color_key {
+ __u8 channel_out;
+ __u32 background;
+ __u32 trans_key;
+ __u8 key_type;
+};
+
+enum omapfb_update_mode {
+ OMAPFB_UPDATE_DISABLED = 0,
+ OMAPFB_AUTO_UPDATE,
+ OMAPFB_MANUAL_UPDATE
+};
+
+struct omapfb_memory_read {
+ __u16 x;
+ __u16 y;
+ __u16 w;
+ __u16 h;
+ size_t buffer_size;
+ void __user *buffer;
+};
+
+struct omapfb_ovl_colormode {
+ __u8 overlay_idx;
+ __u8 mode_idx;
+ __u32 bits_per_pixel;
+ __u32 nonstd;
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+};
+
+struct omapfb_vram_info {
+ __u32 total;
+ __u32 free;
+ __u32 largest_free_block;
+ __u32 reserved[5];
+};
+
+struct omapfb_tearsync_info {
+ __u8 enabled;
+ __u8 reserved1[3];
+ __u16 line;
+ __u16 reserved2;
+};
+
+#ifdef __KERNEL__
+
+#include <plat/board.h>
+
+#ifdef CONFIG_ARCH_OMAP1
+#define OMAPFB_PLANE_NUM 1
+#else
+#define OMAPFB_PLANE_NUM 3
+#endif
+
+struct omapfb_mem_region {
+ u32 paddr;
+ void __iomem *vaddr;
+ unsigned long size;
+ u8 type; /* OMAPFB_PLANE_MEM_* */
+ enum omapfb_color_format format;/* OMAPFB_COLOR_* */
+ unsigned format_used:1; /* Must be set when format is set.
+ * Needed b/c of the badly chosen 0
+ * base for OMAPFB_COLOR_* values
+ */
+ unsigned alloc:1; /* allocated by the driver */
+ unsigned map:1; /* kernel mapped by the driver */
+};
+
+struct omapfb_mem_desc {
+ int region_cnt;
+ struct omapfb_mem_region region[OMAPFB_PLANE_NUM];
+};
+
+struct omapfb_platform_data {
+ struct omap_lcd_config lcd;
+ struct omapfb_mem_desc mem_desc;
+ void *ctrl_platform_data;
+};
+
+/* in arch/arm/plat-omap/fb.c */
+extern void omapfb_set_platform_data(struct omapfb_platform_data *data);
+extern void omapfb_set_ctrl_platform_data(void *pdata);
+extern void omapfb_reserve_sdram(void);
+
+#endif
+
+#endif /* __OMAPFB_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6aac5fe4f6f..53766231562 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -10,6 +10,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/nodemask.h>
struct zonelist;
struct notifier_block;
@@ -26,7 +27,8 @@ enum oom_constraint {
extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
-extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
+extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *mask);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b17395..5b59f35dcb8 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
#ifndef __GENERATING_BOUNDS_H
#include <linux/mm_types.h>
-#include <linux/bounds.h>
+#include <generated/bounds.h>
#endif /* !__GENERATING_BOUNDS_H */
/*
@@ -99,7 +99,7 @@ enum pageflags {
PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable)
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
-#define MLOCK_PAGES 1
+#ifdef CONFIG_MMU
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
#else
-#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
#endif
@@ -277,13 +275,15 @@ PAGEFLAG_FALSE(Uncached)
#ifdef CONFIG_MEMORY_FAILURE
PAGEFLAG(HWPoison, hwpoison)
-TESTSETFLAG(HWPoison, hwpoison)
+TESTSCFLAG(HWPoison, hwpoison)
#define __PG_HWPOISON (1UL << PG_hwpoison)
#else
PAGEFLAG_FALSE(HWPoison)
#define __PG_HWPOISON 0
#endif
+u64 stable_page_flags(struct page *page);
+
static inline int PageUptodate(struct page *page)
{
int ret = test_bit(PG_uptodate, &(page)->flags);
@@ -393,7 +393,7 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked)
#else
#define __PG_MLOCKED 0
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 4b938d4f3ac..b0e4eb12623 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -57,6 +57,8 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
{ return test_and_clear_bit(PCG_##lname, &pc->flags); }
+TESTPCGFLAG(Locked, LOCK)
+
/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
CLEARPCGFLAG(Cache, CACHE)
@@ -86,11 +88,6 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
bit_spin_lock(PCG_LOCK, &pc->flags);
}
-static inline int trylock_page_cgroup(struct page_cgroup *pc)
-{
- return bit_spin_trylock(PCG_LOCK, &pc->flags);
-}
-
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_unlock(PCG_LOCK, &pc->flags);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f5c7cd343e5..bf1e6708084 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -218,6 +218,7 @@ struct pci_dev {
unsigned int class; /* 3 bytes: (base,sub,prog-if) */
u8 revision; /* PCI revision, low byte of class word */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
+ u8 pcie_cap; /* PCI-E capability offset */
u8 pcie_type; /* PCI-E device/port type */
u8 rom_base_reg; /* which config register controls the ROM */
u8 pin; /* which interrupt pin this device uses */
@@ -280,6 +281,7 @@ struct pci_dev {
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
+ unsigned int aer_firmware_first:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -635,7 +637,13 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
struct pci_dev *from);
struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
-struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
+struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
+ unsigned int devfn);
+static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
+ unsigned int devfn)
+{
+ return pci_get_domain_bus_and_slot(0, bus, devfn);
+}
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
int pci_dev_present(const struct pci_device_id *ids);
@@ -701,6 +709,7 @@ void pci_disable_device(struct pci_dev *dev);
void pci_set_master(struct pci_dev *dev);
void pci_clear_master(struct pci_dev *dev);
int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
+int pci_set_cacheline_size(struct pci_dev *dev);
#define HAVE_PCI_SET_MWI
int __must_check pci_set_mwi(struct pci_dev *dev);
int pci_try_set_mwi(struct pci_dev *dev);
@@ -1246,6 +1255,8 @@ extern int pci_pci_problems;
extern unsigned long pci_cardbus_io_size;
extern unsigned long pci_cardbus_mem_size;
+extern u8 __devinitdata pci_dfl_cache_line_size;
+extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mem_size;
@@ -1290,5 +1301,34 @@ extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
#endif
+/**
+ * pci_pcie_cap - get the saved PCIe capability offset
+ * @dev: PCI device
+ *
+ * PCIe capability offset is calculated at PCI device initialization
+ * time and saved in the data structure. This function returns saved
+ * PCIe capability offset. Using this instead of pci_find_capability()
+ * reduces unnecessary search in the PCI configuration space. If you
+ * need to calculate PCIe capability offset from raw device for some
+ * reasons, please use pci_find_capability() instead.
+ */
+static inline int pci_pcie_cap(struct pci_dev *dev)
+{
+ return dev->pcie_cap;
+}
+
+/**
+ * pci_is_pcie - check if the PCI device is PCI Express capable
+ * @dev: PCI device
+ *
+ * Retrun true if the PCI device is PCI Express capable, false otherwise.
+ */
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+ return !!pci_pcie_cap(dev);
+}
+
+void pci_request_acs(void);
+
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index eae1f864c93..cca8a044e2b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2295,6 +2295,20 @@
#define PCI_DEVICE_ID_MPC8536 0x0051
#define PCI_DEVICE_ID_P2020E 0x0070
#define PCI_DEVICE_ID_P2020 0x0071
+#define PCI_DEVICE_ID_P2010E 0x0078
+#define PCI_DEVICE_ID_P2010 0x0079
+#define PCI_DEVICE_ID_P1020E 0x0100
+#define PCI_DEVICE_ID_P1020 0x0101
+#define PCI_DEVICE_ID_P1011E 0x0108
+#define PCI_DEVICE_ID_P1011 0x0109
+#define PCI_DEVICE_ID_P1022E 0x0110
+#define PCI_DEVICE_ID_P1022 0x0111
+#define PCI_DEVICE_ID_P1013E 0x0118
+#define PCI_DEVICE_ID_P1013 0x0119
+#define PCI_DEVICE_ID_P4080E 0x0400
+#define PCI_DEVICE_ID_P4080 0x0401
+#define PCI_DEVICE_ID_P4040E 0x0408
+#define PCI_DEVICE_ID_P4040 0x0409
#define PCI_DEVICE_ID_MPC8641 0x7010
#define PCI_DEVICE_ID_MPC8641D 0x7011
#define PCI_DEVICE_ID_MPC8610 0x7018
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index dd0bed4f1cf..9f2ad0aa3c3 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -365,6 +365,11 @@
#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
+/* PCI Bridge Subsystem ID registers */
+
+#define PCI_SSVID_VENDOR_ID 4 /* PCI-Bridge subsystem vendor id register */
+#define PCI_SSVID_DEVICE_ID 6 /* PCI-Bridge subsystem device id register */
+
/* PCI Express capability registers */
#define PCI_EXP_FLAGS 2 /* Capabilities register */
@@ -502,6 +507,7 @@
#define PCI_EXT_CAP_ID_VC 2
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
+#define PCI_EXT_CAP_ID_ACS 13
#define PCI_EXT_CAP_ID_ARI 14
#define PCI_EXT_CAP_ID_ATS 15
#define PCI_EXT_CAP_ID_SRIOV 16
@@ -662,4 +668,16 @@
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
+/* Access Control Service */
+#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
+#define PCI_ACS_SV 0x01 /* Source Validation */
+#define PCI_ACS_TB 0x02 /* Translation Blocking */
+#define PCI_ACS_RR 0x04 /* P2P Request Redirect */
+#define PCI_ACS_CR 0x08 /* P2P Completion Redirect */
+#define PCI_ACS_UF 0x10 /* Upstream Forwarding */
+#define PCI_ACS_EC 0x20 /* P2P Egress Control */
+#define PCI_ACS_DT 0x40 /* Direct Translated P2P */
+#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
+#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index b4c79545330..6775532b92a 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -10,10 +10,7 @@
#define _PCIEPORT_IF_H_
/* Port Type */
-#define PCIE_RC_PORT 4 /* Root port of RC */
-#define PCIE_SW_UPSTREAM_PORT 5 /* Upstream port of Switch */
-#define PCIE_SW_DOWNSTREAM_PORT 6 /* Downstream port of Switch */
-#define PCIE_ANY_PORT 7
+#define PCIE_ANY_PORT (~0)
/* Service Type */
#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
@@ -25,17 +22,6 @@
#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
-/* Root/Upstream/Downstream Port's Interrupt Mode */
-#define PCIE_PORT_NO_IRQ (-1)
-#define PCIE_PORT_INTx_MODE 0
-#define PCIE_PORT_MSI_MODE 1
-#define PCIE_PORT_MSIX_MODE 2
-
-struct pcie_port_data {
- int port_type; /* Type of the port */
- int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
-};
-
struct pcie_device {
int irq; /* Service IRQ/MSI/MSI-X Vector */
struct pci_dev *port; /* Root/Upstream/Downstream Port */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 9bd03193ecd..5a5d6ce4bd5 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -60,6 +60,7 @@
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) per_cpu__##name
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999..cf5efbcf716 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
#ifdef CONFIG_SMP
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void *__alloc_reserved_percpu(size_t size, size_t align);
-
-#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-struct percpu_data {
- void *ptrs[1];
-};
-
-/* pointer disguising messes up the kmemleak objects tracking */
-#ifndef CONFIG_DEBUG_KMEMLEAK
-#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-#else
-#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
-#endif
-
-#define per_cpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = __percpu_disguise(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
-})
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
kfree(p);
}
+static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+ return __pa(addr);
+}
+
static inline void __init setup_per_cpu_areas(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */
-#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
- __alignof__(type))
+#define alloc_percpu(type) \
+ (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do { \
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
#endif
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#define __pcpu_size_call_return(stem, variable) \
+({ typeof(variable) pscr_ret__; \
+ switch(sizeof(variable)) { \
+ case 1: pscr_ret__ = stem##1(variable);break; \
+ case 2: pscr_ret__ = stem##2(variable);break; \
+ case 4: pscr_ret__ = stem##4(variable);break; \
+ case 8: pscr_ret__ = stem##8(variable);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+ pscr_ret__; \
+})
+
+#define __pcpu_size_call(stem, variable, ...) \
+do { \
+ switch(sizeof(variable)) { \
+ case 1: stem##1(variable, __VA_ARGS__);break; \
+ case 2: stem##2(variable, __VA_ARGS__);break; \
+ case 4: stem##4(variable, __VA_ARGS__);break; \
+ case 8: stem##8(variable, __VA_ARGS__);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+} while (0)
+
+/*
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables (can be determined
+ * using per_cpu_var(xx).
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The first group is used for accesses that must be done in a
+ * preemption safe way since we know that the context is not preempt
+ * safe. Interrupts may occur. If the interrupt modifies the variable
+ * too then RMW actions will not be reliable.
+ *
+ * The arch code can provide optimized functions in two ways:
+ *
+ * 1. Override the function completely. F.e. define this_cpu_add().
+ * The arch must then ensure that the various scalar format passed
+ * are handled correctly.
+ *
+ * 2. Provide functions for certain scalar sizes. F.e. provide
+ * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
+ * sized RMW actions. If arch code does not provide operations for
+ * a scalar size then the fallback in the generic code will be
+ * used.
+ */
+
+#define _this_cpu_generic_read(pcp) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = *this_cpu_ptr(&(pcp)); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_read
+# ifndef this_cpu_read_1
+# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_2
+# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_4
+# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_8
+# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
+# endif
+# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
+#endif
+
+#define _this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ preempt_disable(); \
+ *__this_cpu_ptr(&pcp) op val; \
+ preempt_enable(); \
+} while (0)
+
+#ifndef this_cpu_write
+# ifndef this_cpu_write_1
+# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_2
+# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_4
+# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_8
+# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_add
+# ifndef this_cpu_add_1
+# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_2
+# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_4
+# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_8
+# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_sub
+# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef this_cpu_inc
+# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
+#endif
+
+#ifndef this_cpu_dec
+# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef this_cpu_and
+# ifndef this_cpu_and_1
+# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_2
+# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_4
+# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_8
+# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_or
+# ifndef this_cpu_or_1
+# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_2
+# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_4
+# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_8
+# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_xor
+# ifndef this_cpu_xor_1
+# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_2
+# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_4
+# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_8
+# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+/*
+ * Generic percpu operations that do not require preemption handling.
+ * Either we do not care about races or the caller has the
+ * responsibility of handling preemptions issues. Arch code can still
+ * override these instructions since the arch per cpu code may be more
+ * efficient and may actually get race freeness for free (that is the
+ * case for x86 for example).
+ *
+ * If there is no other protection through preempt disable and/or
+ * disabling interupts then one of these RMW operations can show unexpected
+ * behavior because the execution thread was rescheduled on another processor
+ * or an interrupt occurred and the same percpu variable was modified from
+ * the interrupt context.
+ */
+#ifndef __this_cpu_read
+# ifndef __this_cpu_read_1
+# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_2
+# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_4
+# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_8
+# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
+#endif
+
+#define __this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ *__this_cpu_ptr(&(pcp)) op val; \
+} while (0)
+
+#ifndef __this_cpu_write
+# ifndef __this_cpu_write_1
+# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_2
+# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_4
+# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_8
+# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_add
+# ifndef __this_cpu_add_1
+# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_2
+# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_4
+# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_8
+# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_sub
+# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef __this_cpu_inc
+# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
+#endif
+
+#ifndef __this_cpu_dec
+# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef __this_cpu_and
+# ifndef __this_cpu_and_1
+# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_2
+# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_4
+# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_8
+# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_or
+# ifndef __this_cpu_or_1
+# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_2
+# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_4
+# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_8
+# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_xor
+# ifndef __this_cpu_xor_1
+# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_2
+# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_4
+# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_8
+# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
+#endif
+
+/*
+ * IRQ safe versions of the per cpu RMW operations. Note that these operations
+ * are *not* safe against modification of the same variable from another
+ * processors (which one gets when using regular atomic operations)
+ . They are guaranteed to be atomic vs. local interrupts and
+ * preemption only.
+ */
+#define irqsafe_cpu_generic_to_op(pcp, val, op) \
+do { \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ *__this_cpu_ptr(&(pcp)) op val; \
+ local_irq_restore(flags); \
+} while (0)
+
+#ifndef irqsafe_cpu_add
+# ifndef irqsafe_cpu_add_1
+# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_2
+# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_4
+# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_8
+# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef irqsafe_cpu_sub
+# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
+#endif
+
+#ifndef irqsafe_cpu_inc
+# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_dec
+# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_and
+# ifndef irqsafe_cpu_and_1
+# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_2
+# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_4
+# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_8
+# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
+#endif
+
+#ifndef irqsafe_cpu_or
+# ifndef irqsafe_cpu_or_1
+# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_2
+# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_4
+# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_8
+# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
+#endif
+
+#ifndef irqsafe_cpu_xor
+# ifndef irqsafe_cpu_xor_1
+# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_2
+# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_4
+# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_8
+# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
+#endif
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
deleted file mode 100644
index e3fb2560670..00000000000
--- a/include/linux/perf_counter.h
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * NOTE: this file will be removed in a future kernel release, it is
- * provided as a courtesy copy of user-space code that relies on the
- * old (pre-rename) symbols and constants.
- *
- * Performance events:
- *
- * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
- *
- * Data type definitions, declarations, prototypes.
- *
- * Started by: Thomas Gleixner and Ingo Molnar
- *
- * For licencing details see kernel-base/COPYING
- */
-#ifndef _LINUX_PERF_COUNTER_H
-#define _LINUX_PERF_COUNTER_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <asm/byteorder.h>
-
-/*
- * User-space ABI bits:
- */
-
-/*
- * attr.type
- */
-enum perf_type_id {
- PERF_TYPE_HARDWARE = 0,
- PERF_TYPE_SOFTWARE = 1,
- PERF_TYPE_TRACEPOINT = 2,
- PERF_TYPE_HW_CACHE = 3,
- PERF_TYPE_RAW = 4,
-
- PERF_TYPE_MAX, /* non-ABI */
-};
-
-/*
- * Generalized performance counter event types, used by the
- * attr.event_id parameter of the sys_perf_counter_open()
- * syscall:
- */
-enum perf_hw_id {
- /*
- * Common hardware events, generalized by the kernel:
- */
- PERF_COUNT_HW_CPU_CYCLES = 0,
- PERF_COUNT_HW_INSTRUCTIONS = 1,
- PERF_COUNT_HW_CACHE_REFERENCES = 2,
- PERF_COUNT_HW_CACHE_MISSES = 3,
- PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
- PERF_COUNT_HW_BRANCH_MISSES = 5,
- PERF_COUNT_HW_BUS_CYCLES = 6,
-
- PERF_COUNT_HW_MAX, /* non-ABI */
-};
-
-/*
- * Generalized hardware cache counters:
- *
- * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
- * { read, write, prefetch } x
- * { accesses, misses }
- */
-enum perf_hw_cache_id {
- PERF_COUNT_HW_CACHE_L1D = 0,
- PERF_COUNT_HW_CACHE_L1I = 1,
- PERF_COUNT_HW_CACHE_LL = 2,
- PERF_COUNT_HW_CACHE_DTLB = 3,
- PERF_COUNT_HW_CACHE_ITLB = 4,
- PERF_COUNT_HW_CACHE_BPU = 5,
-
- PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_id {
- PERF_COUNT_HW_CACHE_OP_READ = 0,
- PERF_COUNT_HW_CACHE_OP_WRITE = 1,
- PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
-
- PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_result_id {
- PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
- PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
-
- PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
-};
-
-/*
- * Special "software" counters provided by the kernel, even if the hardware
- * does not support performance counters. These counters measure various
- * physical and sw events of the kernel (and allow the profiling of them as
- * well):
- */
-enum perf_sw_ids {
- PERF_COUNT_SW_CPU_CLOCK = 0,
- PERF_COUNT_SW_TASK_CLOCK = 1,
- PERF_COUNT_SW_PAGE_FAULTS = 2,
- PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
- PERF_COUNT_SW_CPU_MIGRATIONS = 4,
- PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
- PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
- PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
- PERF_COUNT_SW_EMULATION_FAULTS = 8,
-
- PERF_COUNT_SW_MAX, /* non-ABI */
-};
-
-/*
- * Bits that can be set in attr.sample_type to request information
- * in the overflow packets.
- */
-enum perf_counter_sample_format {
- PERF_SAMPLE_IP = 1U << 0,
- PERF_SAMPLE_TID = 1U << 1,
- PERF_SAMPLE_TIME = 1U << 2,
- PERF_SAMPLE_ADDR = 1U << 3,
- PERF_SAMPLE_READ = 1U << 4,
- PERF_SAMPLE_CALLCHAIN = 1U << 5,
- PERF_SAMPLE_ID = 1U << 6,
- PERF_SAMPLE_CPU = 1U << 7,
- PERF_SAMPLE_PERIOD = 1U << 8,
- PERF_SAMPLE_STREAM_ID = 1U << 9,
- PERF_SAMPLE_RAW = 1U << 10,
-
- PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
-};
-
-/*
- * The format of the data returned by read() on a perf counter fd,
- * as specified by attr.read_format:
- *
- * struct read_format {
- * { u64 value;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 id; } && PERF_FORMAT_ID
- * } && !PERF_FORMAT_GROUP
- *
- * { u64 nr;
- * { u64 time_enabled; } && PERF_FORMAT_ENABLED
- * { u64 time_running; } && PERF_FORMAT_RUNNING
- * { u64 value;
- * { u64 id; } && PERF_FORMAT_ID
- * } cntr[nr];
- * } && PERF_FORMAT_GROUP
- * };
- */
-enum perf_counter_read_format {
- PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
- PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
- PERF_FORMAT_ID = 1U << 2,
- PERF_FORMAT_GROUP = 1U << 3,
-
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
-};
-
-#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
-
-/*
- * Hardware event to monitor via a performance monitoring counter:
- */
-struct perf_counter_attr {
-
- /*
- * Major type: hardware/software/tracepoint/etc.
- */
- __u32 type;
-
- /*
- * Size of the attr structure, for fwd/bwd compat.
- */
- __u32 size;
-
- /*
- * Type specific configuration information.
- */
- __u64 config;
-
- union {
- __u64 sample_period;
- __u64 sample_freq;
- };
-
- __u64 sample_type;
- __u64 read_format;
-
- __u64 disabled : 1, /* off by default */
- inherit : 1, /* children inherit it */
- pinned : 1, /* must always be on PMU */
- exclusive : 1, /* only group on PMU */
- exclude_user : 1, /* don't count user */
- exclude_kernel : 1, /* ditto kernel */
- exclude_hv : 1, /* ditto hypervisor */
- exclude_idle : 1, /* don't count when idle */
- mmap : 1, /* include mmap data */
- comm : 1, /* include comm data */
- freq : 1, /* use freq, not period */
- inherit_stat : 1, /* per task counts */
- enable_on_exec : 1, /* next exec enables */
- task : 1, /* trace fork/exit */
- watermark : 1, /* wakeup_watermark */
-
- __reserved_1 : 49;
-
- union {
- __u32 wakeup_events; /* wakeup every n events */
- __u32 wakeup_watermark; /* bytes before wakeup */
- };
- __u32 __reserved_2;
-
- __u64 __reserved_3;
-};
-
-/*
- * Ioctls that can be done on a perf counter fd:
- */
-#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
-#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
-#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
-#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
-#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
-#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
-#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *)
-
-enum perf_counter_ioc_flags {
- PERF_IOC_FLAG_GROUP = 1U << 0,
-};
-
-/*
- * Structure of the page that can be mapped via mmap
- */
-struct perf_counter_mmap_page {
- __u32 version; /* version number of this structure */
- __u32 compat_version; /* lowest version this is compat with */
-
- /*
- * Bits needed to read the hw counters in user-space.
- *
- * u32 seq;
- * s64 count;
- *
- * do {
- * seq = pc->lock;
- *
- * barrier()
- * if (pc->index) {
- * count = pmc_read(pc->index - 1);
- * count += pc->offset;
- * } else
- * goto regular_read;
- *
- * barrier();
- * } while (pc->lock != seq);
- *
- * NOTE: for obvious reason this only works on self-monitoring
- * processes.
- */
- __u32 lock; /* seqlock for synchronization */
- __u32 index; /* hardware counter identifier */
- __s64 offset; /* add to hardware counter value */
- __u64 time_enabled; /* time counter active */
- __u64 time_running; /* time counter on cpu */
-
- /*
- * Hole for extension of the self monitor capabilities
- */
-
- __u64 __reserved[123]; /* align to 1k */
-
- /*
- * Control data for the mmap() data buffer.
- *
- * User-space reading the @data_head value should issue an rmb(), on
- * SMP capable platforms, after reading this value -- see
- * perf_counter_wakeup().
- *
- * When the mapping is PROT_WRITE the @data_tail value should be
- * written by userspace to reflect the last read data. In this case
- * the kernel will not over-write unread data.
- */
- __u64 data_head; /* head in the data section */
- __u64 data_tail; /* user-space written tail */
-};
-
-#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
-#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
-#define PERF_EVENT_MISC_KERNEL (1 << 0)
-#define PERF_EVENT_MISC_USER (2 << 0)
-#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
-
-struct perf_event_header {
- __u32 type;
- __u16 misc;
- __u16 size;
-};
-
-enum perf_event_type {
-
- /*
- * The MMAP events record the PROT_EXEC mappings so that we can
- * correlate userspace IPs to code. They have the following structure:
- *
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * u64 addr;
- * u64 len;
- * u64 pgoff;
- * char filename[];
- * };
- */
- PERF_EVENT_MMAP = 1,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 id;
- * u64 lost;
- * };
- */
- PERF_EVENT_LOST = 2,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * char comm[];
- * };
- */
- PERF_EVENT_COMM = 3,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * };
- */
- PERF_EVENT_EXIT = 4,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 time;
- * u64 id;
- * u64 stream_id;
- * };
- */
- PERF_EVENT_THROTTLE = 5,
- PERF_EVENT_UNTHROTTLE = 6,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * };
- */
- PERF_EVENT_FORK = 7,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, tid;
- *
- * struct read_format values;
- * };
- */
- PERF_EVENT_READ = 8,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * { u64 ip; } && PERF_SAMPLE_IP
- * { u32 pid, tid; } && PERF_SAMPLE_TID
- * { u64 time; } && PERF_SAMPLE_TIME
- * { u64 addr; } && PERF_SAMPLE_ADDR
- * { u64 id; } && PERF_SAMPLE_ID
- * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
- * { u32 cpu, res; } && PERF_SAMPLE_CPU
- * { u64 period; } && PERF_SAMPLE_PERIOD
- *
- * { struct read_format values; } && PERF_SAMPLE_READ
- *
- * { u64 nr,
- * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
- *
- * #
- * # The RAW record below is opaque data wrt the ABI
- * #
- * # That is, the ABI doesn't make any promises wrt to
- * # the stability of its content, it may vary depending
- * # on event, hardware, kernel version and phase of
- * # the moon.
- * #
- * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
- * #
- *
- * { u32 size;
- * char data[size];}&& PERF_SAMPLE_RAW
- * };
- */
- PERF_EVENT_SAMPLE = 9,
-
- PERF_EVENT_MAX, /* non-ABI */
-};
-
-enum perf_callchain_context {
- PERF_CONTEXT_HV = (__u64)-32,
- PERF_CONTEXT_KERNEL = (__u64)-128,
- PERF_CONTEXT_USER = (__u64)-512,
-
- PERF_CONTEXT_GUEST = (__u64)-2048,
- PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
- PERF_CONTEXT_GUEST_USER = (__u64)-2560,
-
- PERF_CONTEXT_MAX = (__u64)-4095,
-};
-
-#define PERF_FLAG_FD_NO_GROUP (1U << 0)
-#define PERF_FLAG_FD_OUTPUT (1U << 1)
-
-/*
- * In case some app still references the old symbols:
- */
-
-#define __NR_perf_counter_open __NR_perf_event_open
-
-#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE
-#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE
-
-#endif /* _LINUX_PERF_COUNTER_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 43adbd7f001..c66b34f75ee 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -18,10 +18,6 @@
#include <linux/ioctl.h>
#include <asm/byteorder.h>
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-#include <asm/hw_breakpoint.h>
-#endif
-
/*
* User-space ABI bits:
*/
@@ -215,17 +211,11 @@ struct perf_event_attr {
__u32 wakeup_watermark; /* bytes before wakeup */
};
- union {
- struct { /* Hardware breakpoint info */
- __u64 bp_addr;
- __u32 bp_type;
- __u32 bp_len;
- };
- };
-
__u32 __reserved_2;
- __u64 __reserved_3;
+ __u64 bp_addr;
+ __u32 bp_type;
+ __u32 bp_len;
};
/*
@@ -451,6 +441,10 @@ enum perf_callchain_context {
# include <asm/perf_event.h>
#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <asm/hw_breakpoint.h>
+#endif
+
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
@@ -565,10 +559,12 @@ struct perf_pending_entry {
void (*func)(struct perf_pending_entry *);
};
-typedef void (*perf_callback_t)(struct perf_event *, void *);
-
struct perf_sample_data;
+typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
+ struct perf_sample_data *,
+ struct pt_regs *regs);
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -660,18 +656,12 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
- void (*overflow_handler)(struct perf_event *event,
- int nmi, struct perf_sample_data *data,
- struct pt_regs *regs);
+ perf_overflow_handler_t overflow_handler;
#ifdef CONFIG_EVENT_PROFILE
struct event_filter *filter;
#endif
- perf_callback_t callback;
-
- perf_callback_t event_callback;
-
#endif /* CONFIG_PERF_EVENTS */
};
@@ -685,7 +675,7 @@ struct perf_event_context {
* Protect the states of the events in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
@@ -781,7 +771,7 @@ extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
pid_t pid,
- perf_callback_t callback);
+ perf_overflow_handler_t callback);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -876,6 +866,8 @@ extern void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
+extern void perf_event_enable(struct perf_event *event);
+extern void perf_event_disable(struct perf_event *event);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -906,7 +898,8 @@ static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
-
+static inline void perf_event_enable(struct perf_event *event) { }
+static inline void perf_event_disable(struct perf_event *event) { }
#endif
#define perf_output_put(handle, x) \
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 3c6675c2444..71ff887ca44 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -83,6 +83,8 @@ struct early_platform_driver {
struct platform_driver *pdrv;
struct list_head list;
int requested_id;
+ char *buffer;
+ int bufsize;
};
#define EARLY_PLATFORM_ID_UNSET -2
@@ -102,21 +104,29 @@ extern int early_platform_driver_probe(char *class_str,
int nr_probe, int user_only);
extern void early_platform_cleanup(void);
+#define early_platform_init(class_string, platdrv) \
+ early_platform_init_buffer(class_string, platdrv, NULL, 0)
#ifndef MODULE
-#define early_platform_init(class_string, platform_driver) \
+#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
static __initdata struct early_platform_driver early_driver = { \
.class_str = class_string, \
- .pdrv = platform_driver, \
+ .buffer = buf, \
+ .bufsize = bufsiz, \
+ .pdrv = platdrv, \
.requested_id = EARLY_PLATFORM_ID_UNSET, \
}; \
-static int __init early_platform_driver_setup_func(char *buf) \
+static int __init early_platform_driver_setup_func(char *buffer) \
{ \
- return early_platform_driver_register(&early_driver, buf); \
+ return early_platform_driver_register(&early_driver, buffer); \
} \
early_param(class_string, early_platform_driver_setup_func)
#else /* MODULE */
-#define early_platform_init(class_string, platform_driver)
+#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
+static inline char *early_platform_driver_setup_func(void) \
+{ \
+ return bufsiz ? buf : NULL; \
+}
#endif /* MODULE */
#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6a..8227f717c70 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
- spinlock_t *lock;
+ raw_spinlock_t *rawlock;
+ spinlock_t *spinlock;
#endif
};
@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- _PLIST_HEAD_INIT(head), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
+ * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
+ */
+#define PLIST_HEAD_INIT_RAW(head, _lock) \
+{ \
+ _PLIST_HEAD_INIT(head), \
+ PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
+}
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = { _PLIST_HEAD_INIT((node).plist) }, \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
- * @lock: list spinlock, remembered for debugging
+ * @lock: spinlock protecting the list (debugging)
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
- head->lock = lock;
+ head->spinlock = lock;
+ head->rawlock = NULL;
+#endif
+}
+
+/**
+ * plist_head_init_raw - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ * @lock: raw_spinlock protecting the list (debugging)
+ */
+static inline void
+plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->rawlock = lock;
+ head->spinlock = NULL;
#endif
}
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0d65934246a..198b8f9fe05 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -219,7 +219,7 @@ struct dev_pm_ops {
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-struct dev_pm_ops name = { \
+const struct dev_pm_ops name = { \
.suspend = suspend_fn, \
.resume = resume_fn, \
.freeze = suspend_fn, \
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index fddfafaed02..7c4193eb007 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -334,6 +334,19 @@ extern struct pnp_protocol pnpbios_protocol;
#define pnp_device_is_pnpbios(dev) 0
#endif
+#ifdef CONFIG_PNPACPI
+extern struct pnp_protocol pnpacpi_protocol;
+
+static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev)
+{
+ if (dev->protocol == &pnpacpi_protocol)
+ return dev->data;
+ return NULL;
+}
+#else
+#define pnp_acpi_device(dev) 0
+#endif
+
/* status */
#define PNP_READY 0x0000
#define PNP_ATTACHED 0x0001
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 7456d7d87a1..56f2d63a5cb 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -105,12 +105,7 @@ static inline int ptrace_reparented(struct task_struct *child)
{
return child->real_parent != child->parent;
}
-static inline void ptrace_link(struct task_struct *child,
- struct task_struct *new_parent)
-{
- if (unlikely(child->ptrace))
- __ptrace_link(child, new_parent);
-}
+
static inline void ptrace_unlink(struct task_struct *child)
{
if (unlikely(child->ptrace))
@@ -169,9 +164,9 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
INIT_LIST_HEAD(&child->ptraced);
child->parent = child->real_parent;
child->ptrace = 0;
- if (unlikely(ptrace)) {
+ if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
child->ptrace = current->ptrace;
- ptrace_link(child, current->parent);
+ __ptrace_link(child, current->parent);
}
}
@@ -278,6 +273,18 @@ static inline void user_enable_block_step(struct task_struct *task)
}
#endif /* arch_has_block_step */
+#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
+extern void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info);
+#else
+static inline void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+}
+#endif
+
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 7a9754c9677..01b3d759f1f 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -10,7 +10,7 @@ struct platform_pwm_backlight_data {
unsigned int dft_brightness;
unsigned int pwm_period_ns;
int (*init)(struct device *dev);
- int (*notify)(int brightness);
+ int (*notify)(struct device *dev, int brightness);
void (*exit)(struct device *dev);
};
diff --git a/include/linux/quota.h b/include/linux/quota.h
index ce9a9b2e5cd..e70e6219424 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -73,6 +73,8 @@
/* Quota format type IDs */
#define QFMT_VFS_OLD 1
#define QFMT_VFS_V0 2
+#define QFMT_OCFS2 3
+#define QFMT_VFS_V1 4
/* Size of block in which space limits are passed through the quota
* interface */
@@ -334,7 +336,7 @@ struct quotactl_ops {
struct quota_format_type {
int qf_fmt_id; /* Quota format id */
- struct quota_format_ops *qf_ops; /* Operations of format */
+ const struct quota_format_ops *qf_ops; /* Operations of format */
struct module *qf_owner; /* Module implementing quota format */
struct quota_format_type *qf_next;
};
@@ -394,7 +396,7 @@ struct quota_info {
struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
- struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
+ const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
};
int register_quota_format(struct quota_format_type *fmt);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index d92480f8285..1cbbd2c11aa 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -78,6 +78,25 @@ struct raid6_calls {
/* Selected algorithm */
extern struct raid6_calls raid6_call;
+/* Various routine sets */
+extern const struct raid6_calls raid6_intx1;
+extern const struct raid6_calls raid6_intx2;
+extern const struct raid6_calls raid6_intx4;
+extern const struct raid6_calls raid6_intx8;
+extern const struct raid6_calls raid6_intx16;
+extern const struct raid6_calls raid6_intx32;
+extern const struct raid6_calls raid6_mmxx1;
+extern const struct raid6_calls raid6_mmxx2;
+extern const struct raid6_calls raid6_sse1x1;
+extern const struct raid6_calls raid6_sse1x2;
+extern const struct raid6_calls raid6_sse2x1;
+extern const struct raid6_calls raid6_sse2x2;
+extern const struct raid6_calls raid6_sse2x4;
+extern const struct raid6_calls raid6_altivec1;
+extern const struct raid6_calls raid6_altivec2;
+extern const struct raid6_calls raid6_altivec4;
+extern const struct raid6_calls raid6_altivec8;
+
/* Algorithm list */
extern const struct raid6_calls * const raid6_algos[];
int raid6_select_algo(void);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index c4ba9a78721..96cc307ed9f 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -101,4 +101,9 @@ static inline void exit_rcu(void)
{
}
+static inline int rcu_preempt_depth(void)
+{
+ return 0;
+}
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c93eee5911b..8044b1b9433 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void);
extern void synchronize_rcu(void);
extern void exit_rcu(void);
+/*
+ * Defined as macro as it is a very low level header
+ * included from areas that don't even know about current
+ */
+#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
static inline void __rcu_read_lock(void)
@@ -63,6 +69,11 @@ static inline void exit_rcu(void)
{
}
+static inline int rcu_preempt_depth(void)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
static inline void __rcu_read_lock_bh(void)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 490c5b37b6d..030d92255c7 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -35,6 +35,8 @@
#ifndef __LINUX_REGULATOR_CONSUMER_H_
#define __LINUX_REGULATOR_CONSUMER_H_
+#include <linux/device.h>
+
/*
* Regulator operating modes.
*
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 87f5f176d4e..234a8476cba 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -43,16 +43,20 @@ struct regulator;
/**
* struct regulator_state - regulator state during low power system states
*
- * This describes a regulators state during a system wide low power state.
+ * This describes a regulators state during a system wide low power
+ * state. One of enabled or disabled must be set for the
+ * configuration to be applied.
*
* @uV: Operating voltage during suspend.
* @mode: Operating mode during suspend.
* @enabled: Enabled during suspend.
+ * @disabled: Disabled during suspend.
*/
struct regulator_state {
int uV; /* suspend voltage */
unsigned int mode; /* suspend regulator operating mode */
int enabled; /* is regulator enabled in this suspend state */
+ int disabled; /* is the regulator disbled in this suspend state */
};
/**
diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h
new file mode 100644
index 00000000000..9936763621c
--- /dev/null
+++ b/include/linux/regulator/max8660.h
@@ -0,0 +1,57 @@
+/*
+ * max8660.h -- Voltage regulation for the Maxim 8660/8661
+ *
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_REGULATOR_MAX8660_H
+#define __LINUX_REGULATOR_MAX8660_H
+
+#include <linux/regulator/machine.h>
+
+enum {
+ MAX8660_V3,
+ MAX8660_V4,
+ MAX8660_V5,
+ MAX8660_V6,
+ MAX8660_V7,
+ MAX8660_V_END,
+};
+
+/**
+ * max8660_subdev_data - regulator subdev data
+ * @id: regulator id
+ * @name: regulator name
+ * @platform_data: regulator init data
+ */
+struct max8660_subdev_data {
+ int id;
+ char *name;
+ struct regulator_init_data *platform_data;
+};
+
+/**
+ * max8660_platform_data - platform data for max8660
+ * @num_subdevs: number of regulators used
+ * @subdevs: pointer to regulators used
+ * @en34_is_high: if EN34 is driven high, regulators cannot be en-/disabled.
+ */
+struct max8660_platform_data {
+ int num_subdevs;
+ struct max8660_subdev_data *subdevs;
+ unsigned en34_is_high:1;
+};
+#endif
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index a05b4a20768..c96c1858fe2 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2051,25 +2051,12 @@ void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
struct treepath *path, struct reiserfs_dir_entry *de);
struct dentry *reiserfs_get_parent(struct dentry *);
-/* procfs.c */
-
-#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
-#define REISERFS_PROC_INFO
-#else
-#undef REISERFS_PROC_INFO
-#endif
+#ifdef CONFIG_REISERFS_PROC_INFO
int reiserfs_proc_info_init(struct super_block *sb);
int reiserfs_proc_info_done(struct super_block *sb);
-struct proc_dir_entry *reiserfs_proc_register_global(char *name,
- read_proc_t * func);
-void reiserfs_proc_unregister_global(const char *name);
int reiserfs_proc_info_global_init(void);
int reiserfs_proc_info_global_done(void);
-int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
- int count, int *eof, void *data);
-
-#if defined( REISERFS_PROC_INFO )
#define PROC_EXP( e ) e
@@ -2084,6 +2071,26 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \
PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) )
#else
+static inline int reiserfs_proc_info_init(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_done(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_global_init(void)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_global_done(void)
+{
+ return 0;
+}
+
#define PROC_EXP( e )
#define VOID_V ( ( void ) 0 )
#define PROC_INFO_MAX( sb, field, value ) VOID_V
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index cb0ba703260..b019ae64e2a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,9 @@
*/
struct anon_vma {
spinlock_t lock; /* Serialize access to vma list */
+#ifdef CONFIG_KSM
+ atomic_t ksm_refcount;
+#endif
/*
* NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the
@@ -38,6 +41,34 @@ struct anon_vma {
};
#ifdef CONFIG_MMU
+#ifdef CONFIG_KSM
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+ atomic_set(&anon_vma->ksm_refcount, 0);
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return atomic_read(&anon_vma->ksm_refcount);
+}
+#else
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return 0;
+}
+#endif /* CONFIG_KSM */
+
+static inline struct anon_vma *page_anon_vma(struct page *page)
+{
+ if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
+ PAGE_MAPPING_ANON)
+ return NULL;
+ return page_rmapping(page);
+}
static inline void anon_vma_lock(struct vm_area_struct *vma)
{
@@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
void anon_vma_unlink(struct vm_area_struct *);
void anon_vma_link(struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
+void anon_vma_free(struct anon_vma *);
/*
* rmap interfaces called when adding or removing pte of page
@@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page)
*/
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *cnt, unsigned long *vm_flags);
+int page_referenced_one(struct page *, struct vm_area_struct *,
+ unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
+
enum ttu_flags {
TTU_UNMAP = 0, /* unmap mode */
TTU_MIGRATION = 1, /* migration mode */
@@ -94,6 +129,8 @@ enum ttu_flags {
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap_one(struct page *, struct vm_area_struct *,
+ unsigned long address, enum ttu_flags flags);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
@@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page);
void page_unlock_anon_vma(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+/*
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d53..281d8fd775e 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
* @owner: the mutex owner
*/
struct rt_mutex {
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 14fc906ed60..05330fc5b43 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -368,11 +368,9 @@ enum {
#define RTAX_MAX (__RTAX_MAX - 1)
#define RTAX_FEATURE_ECN 0x00000001
-#define RTAX_FEATURE_NO_SACK 0x00000002
-#define RTAX_FEATURE_NO_TSTAMP 0x00000004
+#define RTAX_FEATURE_SACK 0x00000002
+#define RTAX_FEATURE_TIMESTAMP 0x00000004
#define RTAX_FEATURE_ALLFRAG 0x00000008
-#define RTAX_FEATURE_NO_WSCALE 0x00000010
-#define RTAX_FEATURE_NO_DSACK 0x00000020
struct rta_session {
__u8 proto;
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 00000000000..71e0b00b6f2
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
+#ifndef __LINUX_RWLOCK_H
+#define __LINUX_RWLOCK_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * rwlock related methods
+ *
+ * split out from spinlock.h
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void do_raw_read_lock(rwlock_t *lock);
+#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
+ extern int do_raw_read_trylock(rwlock_t *lock);
+ extern void do_raw_read_unlock(rwlock_t *lock);
+ extern void do_raw_write_lock(rwlock_t *lock);
+#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
+ extern int do_raw_write_trylock(rwlock_t *lock);
+ extern void do_raw_write_unlock(rwlock_t *lock);
+#else
+# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
+# define do_raw_read_lock_flags(lock, flags) \
+ arch_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
+# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
+# define do_raw_write_lock_flags(lock, flags) \
+ arch_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+
+#define write_lock(lock) _raw_write_lock(lock)
+#define read_lock(lock) _raw_read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_write_lock_irqsave(lock); \
+ } while (0)
+
+#else
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_lock_irqsave(lock, flags); \
+ } while (0)
+
+#endif
+
+#define read_lock_irq(lock) _raw_read_lock_irq(lock)
+#define read_lock_bh(lock) _raw_read_lock_bh(lock)
+#define write_lock_irq(lock) _raw_write_lock_irq(lock)
+#define write_lock_bh(lock) _raw_write_lock_bh(lock)
+#define read_unlock(lock) _raw_read_unlock(lock)
+#define write_unlock(lock) _raw_write_unlock(lock)
+#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
+#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 00000000000..9c9f0495d37
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
+#ifndef __LINUX_RWLOCK_API_SMP_H
+#define __LINUX_RWLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/rwlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+void __lockfunc
+_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_READ_LOCK
+#define _raw_read_lock(lock) __raw_read_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK
+#define _raw_write_lock(lock) __raw_write_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_BH
+#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
+#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
+#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
+#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
+#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_TRYLOCK
+#define _raw_read_trylock(lock) __raw_read_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
+#define _raw_write_trylock(lock) __raw_write_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK
+#define _raw_read_unlock(lock) __raw_read_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
+#define _raw_write_unlock(lock) __raw_write_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
+#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
+#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
+#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __raw_read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __raw_write_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __raw_read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __raw_write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __raw_read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
+ do_raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline void __raw_read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
+ do_raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __raw_write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void
+__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 00000000000..bd31808c7d8
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_RWLOCK_TYPES_H
+#define __LINUX_RWLOCK_TYPES_H
+
+/*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+typedef struct {
+ arch_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
+ */
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 6c3c0f6c261..bdfcc252797 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
-
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->activity != 0);
-}
+extern int rwsem_is_locked(struct rw_semaphore *sem);
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89115ec7d43..f2f842db03c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
+#define TASK_STATE_MAX 512
+
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
+
+extern char ___assert_task_state[1 - 2*!!(
+ sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -1091,7 +1097,8 @@ struct sched_class {
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
- void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
+ void (*task_waking) (struct rq *this_rq, struct task_struct *task);
+ void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
@@ -1102,7 +1109,7 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
- void (*task_new) (struct rq *rq, struct task_struct *p);
+ void (*task_fork) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
@@ -1111,10 +1118,11 @@ struct sched_class {
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running);
- unsigned int (*get_rr_interval) (struct task_struct *task);
+ unsigned int (*get_rr_interval) (struct rq *rq,
+ struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*moved_group) (struct task_struct *p);
+ void (*moved_group) (struct task_struct *p, int on_rq);
#endif
};
@@ -1151,8 +1159,6 @@ struct sched_entity {
u64 start_runtime;
u64 avg_wakeup;
- u64 avg_running;
-
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
@@ -1175,7 +1181,6 @@ struct sched_entity {
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
- u64 nr_forced2_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
@@ -1411,7 +1416,7 @@ struct task_struct {
#endif
/* Protection of the PI data structures: */
- spinlock_t pi_lock;
+ raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
@@ -1544,10 +1549,18 @@ struct task_struct {
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
unsigned long stack_start;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+ struct memcg_batch_info {
+ int do_batch; /* incremented when batch uncharge started */
+ struct mem_cgroup *memcg; /* target memcg of uncharge */
+ unsigned long bytes; /* uncharged usage */
+ unsigned long memsw_bytes; /* uncharged mem+swap usage */
+ } memcg_batch;
+#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
@@ -1840,7 +1853,8 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern int sched_clock_stable;
#endif
-extern unsigned long long sched_clock(void);
+/* ftrace calls sched_clock() directly */
+extern unsigned long long notrace sched_clock(void);
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
@@ -1903,14 +1917,22 @@ extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
extern unsigned int sysctl_sched_child_runs_first;
+
+enum sched_tunable_scaling {
+ SCHED_TUNABLESCALING_NONE,
+ SCHED_TUNABLESCALING_LOG,
+ SCHED_TUNABLESCALING_LINEAR,
+ SCHED_TUNABLESCALING_END,
+};
+extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+
#ifdef CONFIG_SCHED_DEBUG
-extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration;
-int sched_nr_latency_handler(struct ctl_table *table, int write,
+int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
@@ -2066,7 +2088,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t);
extern int do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
-extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
@@ -2085,11 +2106,6 @@ static inline int kill_cad_pid(int sig, int priv)
#define SEND_SIG_PRIV ((struct siginfo *) 1)
#define SEND_SIG_FORCED ((struct siginfo *) 2)
-static inline int is_si_special(const struct siginfo *info)
-{
- return info <= SEND_SIG_FORCED;
-}
-
/*
* True if we are on the alternate signal stack.
*/
@@ -2585,8 +2601,6 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
}
#endif /* CONFIG_MM_OWNER */
-#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
-
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 466cbadbd1e..2c627d361c0 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -95,8 +95,13 @@ struct seq_file;
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
+#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
+#else
+#define dac_mmap_min_addr 0UL
+#endif
+
/*
* Values used in the task_security_ops calls
*/
@@ -121,6 +126,7 @@ struct request_sock;
#define LSM_UNSAFE_PTRACE 2
#define LSM_UNSAFE_PTRACE_CAP 4
+#ifdef CONFIG_MMU
/*
* If a hint addr is less than mmap_min_addr change hint to be as
* low as possible but still greater than mmap_min_addr
@@ -135,6 +141,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
}
extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
#ifdef CONFIG_SECURITY
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 1b191c176bc..8a4adbef8a0 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -86,6 +86,7 @@ struct task_struct;
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ struct list_head sem_pending; /* pending single-sop operations */
};
/* One sem_array data structure for each set of semaphores in the system. */
@@ -96,11 +97,13 @@ struct sem_array {
struct sem *sem_base; /* ptr to first semaphore in array */
struct list_head sem_pending; /* pending operations to be processed */
struct list_head list_id; /* undo requests on this array */
- unsigned long sem_nsems; /* no. of semaphores in array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
};
/* One queue for each sleeping process in the system. */
struct sem_queue {
+ struct list_head simple_list; /* queue of pending operations */
struct list_head list; /* queue of pending operations */
struct task_struct *sleeper; /* this process */
struct sem_undo *undo; /* undo structure */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index deee7afd8d6..e164291fb3e 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -41,20 +41,4 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
extern int init_tmpfs(void);
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
-#ifdef CONFIG_TMPFS_POSIX_ACL
-int shmem_check_acl(struct inode *, int);
-int shmem_acl_init(struct inode *, struct inode *);
-
-extern struct xattr_handler shmem_xattr_acl_access_handler;
-extern struct xattr_handler shmem_xattr_acl_default_handler;
-
-extern struct generic_acl_operations shmem_acl_ops;
-
-#else
-static inline int shmem_acl_init(struct inode *inode, struct inode *dir)
-{
- return 0;
-}
-#endif /* CONFIG_TMPFS_POSIX_ACL */
-
#endif
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 850d057500d..ca6b2b31799 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
@@ -166,7 +166,7 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5ad70a60fd7..1e14beb23f9 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
#else
static __always_inline void *
@@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node);
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h
index d53642d2d89..67ed2c54283 100644
--- a/include/linux/sm501-regs.h
+++ b/include/linux/sm501-regs.h
@@ -31,6 +31,8 @@
#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
+#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19)
+
/* miscellaneous control */
#define SM501_MISC_CONTROL (0x000004)
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
new file mode 100644
index 00000000000..51b3e771a9a
--- /dev/null
+++ b/include/linux/spi/dw_spi.h
@@ -0,0 +1,212 @@
+#ifndef DW_SPI_HEADER_H
+#define DW_SPI_HEADER_H
+#include <linux/io.h>
+
+/* Bit fields in CTRLR0 */
+#define SPI_DFS_OFFSET 0
+
+#define SPI_FRF_OFFSET 4
+#define SPI_FRF_SPI 0x0
+#define SPI_FRF_SSP 0x1
+#define SPI_FRF_MICROWIRE 0x2
+#define SPI_FRF_RESV 0x3
+
+#define SPI_MODE_OFFSET 6
+#define SPI_SCPH_OFFSET 6
+#define SPI_SCOL_OFFSET 7
+#define SPI_TMOD_OFFSET 8
+#define SPI_TMOD_TR 0x0 /* xmit & recv */
+#define SPI_TMOD_TO 0x1 /* xmit only */
+#define SPI_TMOD_RO 0x2 /* recv only */
+#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
+
+#define SPI_SLVOE_OFFSET 10
+#define SPI_SRL_OFFSET 11
+#define SPI_CFS_OFFSET 12
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK 0x7f /* cover 7 bits */
+#define SR_BUSY (1 << 0)
+#define SR_TF_NOT_FULL (1 << 1)
+#define SR_TF_EMPT (1 << 2)
+#define SR_RF_NOT_EMPT (1 << 3)
+#define SR_RF_FULL (1 << 4)
+#define SR_TX_ERR (1 << 5)
+#define SR_DCOL (1 << 6)
+
+/* Bit fields in ISR, IMR, RISR, 7 bits */
+#define SPI_INT_TXEI (1 << 0)
+#define SPI_INT_TXOI (1 << 1)
+#define SPI_INT_RXUI (1 << 2)
+#define SPI_INT_RXOI (1 << 3)
+#define SPI_INT_RXFI (1 << 4)
+#define SPI_INT_MSTI (1 << 5)
+
+/* TX RX interrupt level threshhold, max can be 256 */
+#define SPI_INT_THRESHOLD 32
+
+enum dw_ssi_type {
+ SSI_MOTO_SPI = 0,
+ SSI_TI_SSP,
+ SSI_NS_MICROWIRE,
+};
+
+struct dw_spi_reg {
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 ssienr;
+ u32 mwcr;
+ u32 ser;
+ u32 baudr;
+ u32 txfltr;
+ u32 rxfltr;
+ u32 txflr;
+ u32 rxflr;
+ u32 sr;
+ u32 imr;
+ u32 isr;
+ u32 risr;
+ u32 txoicr;
+ u32 rxoicr;
+ u32 rxuicr;
+ u32 msticr;
+ u32 icr;
+ u32 dmacr;
+ u32 dmatdlr;
+ u32 dmardlr;
+ u32 idr;
+ u32 version;
+ u32 dr; /* Currently oper as 32 bits,
+ though only low 16 bits matters */
+} __packed;
+
+struct dw_spi {
+ struct spi_master *master;
+ struct spi_device *cur_dev;
+ struct device *parent_dev;
+ enum dw_ssi_type type;
+
+ void __iomem *regs;
+ unsigned long paddr;
+ u32 iolen;
+ int irq;
+ u32 max_freq; /* max bus freq supported */
+
+ u16 bus_num;
+ u16 num_cs; /* supported slave numbers */
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ int run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ struct chip_data *prev_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ int dma_mapped;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ size_t rx_map_len;
+ size_t tx_map_len;
+ u8 n_bytes; /* current is a 1/2 bytes op */
+ u8 max_bits_per_word; /* maxim is 16b */
+ u32 dma_width;
+ int cs_change;
+ int (*write)(struct dw_spi *dws);
+ int (*read)(struct dw_spi *dws);
+ irqreturn_t (*transfer_handler)(struct dw_spi *dws);
+ void (*cs_control)(u32 command);
+
+ /* Dma info */
+ int dma_inited;
+ struct dma_chan *txchan;
+ struct dma_chan *rxchan;
+ int txdma_done;
+ int rxdma_done;
+ u64 tx_param;
+ u64 rx_param;
+ struct device *dma_dev;
+ dma_addr_t dma_addr;
+
+ /* Bus interface info */
+ void *priv;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+};
+
+#define dw_readl(dw, name) \
+ __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_writel(dw, name, val) \
+ __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_readw(dw, name) \
+ __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name))
+#define dw_writew(dw, name, val) \
+ __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name))
+
+static inline void spi_enable_chip(struct dw_spi *dws, int enable)
+{
+ dw_writel(dws, ssienr, (enable ? 1 : 0));
+}
+
+static inline void spi_set_clk(struct dw_spi *dws, u16 div)
+{
+ dw_writel(dws, baudr, div);
+}
+
+static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
+{
+ if (cs > dws->num_cs)
+ return;
+ dw_writel(dws, ser, 1 << cs);
+}
+
+/* Disable IRQ bits */
+static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, imr) & ~mask;
+ dw_writel(dws, imr, new_mask);
+}
+
+/* Enable IRQ bits */
+static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, imr) | mask;
+ dw_writel(dws, imr, new_mask);
+}
+
+/*
+ * Each SPI slave device to work with dw_api controller should
+ * has such a structure claiming its working mode (PIO/DMA etc),
+ * which can be save in the "controller_data" member of the
+ * struct spi_device
+ */
+struct dw_spi_chip {
+ u8 poll_mode; /* 0 for contoller polling mode */
+ u8 type; /* SPI/SSP/Micrwire */
+ u8 enable_dma;
+ void (*cs_control)(u32 command);
+};
+
+extern int dw_spi_add_host(struct dw_spi *dws);
+extern void dw_spi_remove_host(struct dw_spi *dws);
+extern int dw_spi_suspend_host(struct dw_spi *dws);
+extern int dw_spi_resume_host(struct dw_spi *dws);
+#endif /* DW_SPI_HEADER_H */
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
new file mode 100644
index 00000000000..2e8db3d2d2e
--- /dev/null
+++ b/include/linux/spi/sh_msiof.h
@@ -0,0 +1,10 @@
+#ifndef __SPI_SH_MSIOF_H__
+#define __SPI_SH_MSIOF_H__
+
+struct sh_msiof_spi_info {
+ int tx_fifo_override;
+ int rx_fifo_override;
+ u16 num_chipselect;
+};
+
+#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
new file mode 100644
index 00000000000..6f17278810b
--- /dev/null
+++ b/include/linux/spi/xilinx_spi.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_SPI_XILINX_SPI_H
+#define __LINUX_SPI_XILINX_SPI_H
+
+/**
+ * struct xspi_platform_data - Platform data of the Xilinx SPI driver
+ * @num_chipselect: Number of chip select by the IP.
+ * @little_endian: If registers should be accessed little endian or not.
+ * @bits_per_word: Number of bits per word.
+ * @devices: Devices to add when the driver is probed.
+ * @num_devices: Number of devices in the devices array.
+ */
+struct xspi_platform_data {
+ u16 num_chipselect;
+ bool little_endian;
+ u8 bits_per_word;
+ struct spi_board_info *devices;
+ u8 num_devices;
+};
+
+#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d8..86088213334 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
- * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
* defines the generic type and initializers
*
* linux/spinlock_up.h:
- * contains the __raw_spin_*()/etc. version of UP
+ * contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
@@ -75,12 +75,12 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
/*
- * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define spin_lock_init(lock) \
+ extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __spin_lock_init((lock), #lock, &__key); \
+ __raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
-# define spin_lock_init(lock) \
- do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
+# define raw_spin_lock_init(lock) \
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define rwlock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __rwlock_init((lock), #lock, &__key); \
-} while (0)
-#else
-# define rwlock_init(lock) \
- do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
-#endif
-
-#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
-#define spin_is_contended(lock) ((lock)->break_lock)
+#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else
-#ifdef __raw_spin_is_contended
-#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#ifdef arch_spin_is_contended
+#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
-#define spin_is_contended(lock) (((void)(lock), 0))
-#endif /*__raw_spin_is_contended*/
+#define raw_spin_is_contended(lock) (((void)(lock), 0))
+#endif /*arch_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/**
- * spin_unlock_wait - wait until the spinlock gets unlocked
+ * raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(spinlock_t *lock);
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(spinlock_t *lock);
- extern void _raw_spin_unlock(spinlock_t *lock);
- extern void _raw_read_lock(rwlock_t *lock);
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
- extern int _raw_read_trylock(rwlock_t *lock);
- extern void _raw_read_unlock(rwlock_t *lock);
- extern void _raw_write_lock(rwlock_t *lock);
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
- extern int _raw_write_trylock(rwlock_t *lock);
- extern void _raw_write_unlock(rwlock_t *lock);
+ extern void do_raw_spin_lock(raw_spinlock_t *lock);
+#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
+ extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+ extern void do_raw_spin_unlock(raw_spinlock_t *lock);
#else
-# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
-# define _raw_spin_lock_flags(lock, flags) \
- __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+static inline void do_raw_spin_lock(raw_spinlock_t *lock)
+{
+ arch_spin_lock(&lock->raw_lock);
+}
+
+static inline void
+do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+{
+ arch_spin_lock_flags(&lock->raw_lock, *flags);
+}
+
+static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return arch_spin_trylock(&(lock)->raw_lock);
+}
+
+static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ arch_spin_unlock(&lock->raw_lock);
+}
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
-
/*
- * Define the various spin_lock and rw_lock methods. Note we define these
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
- * methods are defined as nops in the case they are not required.
+ * Define the various spin_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
*/
-#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
-#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
-#define spin_lock(lock) _spin_lock(lock)
+#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
-# define spin_lock_nest_lock(lock, nest_lock) \
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock_nested(lock, subclass)
+
+# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
- _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
+# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
-#define write_lock(lock) _write_lock(lock)
-#define read_lock(lock) _read_lock(lock)
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define spin_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _read_lock_irqsave(lock); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _write_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave_nested(lock, subclass); \
+ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_lock_irqsave(lock, flags); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_lock_irqsave(lock, flags); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_lock_irqsave(lock, flags); \
+ _raw_spin_lock_irqsave(lock, flags); \
} while (0)
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
- spin_lock_irqsave(lock, flags)
-#endif
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ raw_spin_lock_irqsave(lock, flags)
-#define spin_lock_irq(lock) _spin_lock_irq(lock)
-#define spin_lock_bh(lock) _spin_lock_bh(lock)
-#define read_lock_irq(lock) _read_lock_irq(lock)
-#define read_lock_bh(lock) _read_lock_bh(lock)
-#define write_lock_irq(lock) _write_lock_irq(lock)
-#define write_lock_bh(lock) _write_lock_bh(lock)
-#define spin_unlock(lock) _spin_unlock(lock)
-#define read_unlock(lock) _read_unlock(lock)
-#define write_unlock(lock) _write_unlock(lock)
-#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-#define read_unlock_irq(lock) _read_unlock_irq(lock)
-#define write_unlock_irq(lock) _write_unlock_irq(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_unlock_irqrestore(lock, flags); \
- } while (0)
-#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
+#endif
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_unlock_irqrestore(lock, flags); \
- } while (0)
-#define read_unlock_bh(lock) _read_unlock_bh(lock)
+#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
+#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
+#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _write_unlock_irqrestore(lock, flags); \
+#define raw_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
-#define write_unlock_bh(lock) _write_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) \
+ __cond_lock(lock, _raw_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) \
+#define raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
-#define spin_trylock_irqsave(lock, flags) \
+#define raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
+/**
+ * raw_spin_can_lock - would raw_spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+/*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+
+static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+{
+ return &lock->rlock;
+}
+
+#define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ raw_spin_lock_init(&(_lock)->rlock); \
+} while (0)
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ raw_spin_lock(&lock->rlock);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+ raw_spin_lock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return raw_spin_trylock(&lock->rlock);
+}
+
+#define spin_lock_nested(lock, subclass) \
+do { \
+ raw_spin_lock_nested(spinlock_check(lock), subclass); \
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+do { \
+ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+} while (0)
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+ raw_spin_lock_irq(&lock->rlock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+do { \
+ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+} while (0)
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ raw_spin_unlock(&lock->rlock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+ raw_spin_unlock_bh(&lock->rlock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+ raw_spin_unlock_irq(&lock->rlock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&lock->rlock, flags);
+}
+
+static inline int spin_trylock_bh(spinlock_t *lock)
+{
+ return raw_spin_trylock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock_irq(spinlock_t *lock)
+{
+ return raw_spin_trylock_irq(&lock->rlock);
+}
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+ raw_spin_unlock_wait(&lock->rlock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return raw_spin_is_locked(&lock->rlock);
+}
+
+static inline int spin_is_contended(spinlock_t *lock)
+{
+ return raw_spin_is_contended(&lock->rlock);
+}
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return raw_spin_can_lock(&lock->rlock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+ assert_raw_spin_locked(&lock->rlock);
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-/**
- * spin_can_lock - would spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define spin_can_lock(lock) (!spin_is_locked(lock))
-
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459b..e253ccd7a60 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
int in_lock_functions(unsigned long addr);
-#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
-
-void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
- __acquires(lock);
-void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
+#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
+
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc
+_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock);
+
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc
+_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
-#define _spin_lock(lock) __spin_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK
-#define _read_lock(lock) __read_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK
-#define _write_lock(lock) __write_lock(lock)
+#define _raw_spin_lock(lock) __raw_spin_lock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
-#define _spin_lock_bh(lock) __spin_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_BH
-#define _read_lock_bh(lock) __read_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_BH
-#define _write_lock_bh(lock) __write_lock_bh(lock)
+#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
-#define _spin_lock_irq(lock) __spin_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQ
-#define _read_lock_irq(lock) __read_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
-#define _write_lock_irq(lock) __write_lock_irq(lock)
+#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
-#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
-#define _spin_trylock(lock) __spin_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_TRYLOCK
-#define _read_trylock(lock) __read_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_TRYLOCK
-#define _write_trylock(lock) __write_trylock(lock)
+#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
-#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK
-#define _spin_unlock(lock) __spin_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK
-#define _read_unlock(lock) __read_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK
-#define _write_unlock(lock) __write_unlock(lock)
+#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
-#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_BH
-#define _read_unlock_bh(lock) __read_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
-#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
-#define _read_unlock_irq(lock) __read_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
#endif
-static inline int __spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
return 0;
}
-static inline int __read_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
-static inline int __write_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline void __read_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
/*
* On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
+ * do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
- _raw_spin_lock_flags(lock, &flags);
+ do_raw_spin_lock_flags(lock, &flags);
#endif
return flags;
}
-static inline void __spin_lock_irq(spinlock_t *lock)
+static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __spin_lock_bh(spinlock_t *lock)
+static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
-}
-
-static inline void __read_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline void __read_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
-}
-
-static inline void __write_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __write_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
-static inline void __spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline void __write_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#endif /* CONFIG_PREEMPT */
-static inline void __spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable();
-}
-
-static inline void __write_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
-}
-
-static inline void __read_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable();
}
-static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
-static inline void __spin_unlock_irq(spinlock_t *lock)
+static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
-static inline void __spin_unlock_bh(spinlock_t *lock)
+static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __read_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __read_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline void __write_unlock_irqrestore(rwlock_t *lock,
- unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __write_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __write_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline int __spin_trylock_bh(spinlock_t *lock)
+static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
return 0;
}
+#include <linux/rwlock_api_smp.h>
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d316457..af1f47229e7 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
-#define assert_spin_locked(lock) do { (void)(lock); } while (0)
+#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable_no_resched(); local_bh_enable(); \
+ __release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
-#define _spin_lock(lock) __LOCK(lock)
-#define _spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _read_lock(lock) __LOCK(lock)
-#define _write_lock(lock) __LOCK(lock)
-#define _spin_lock_bh(lock) __LOCK_BH(lock)
-#define _read_lock_bh(lock) __LOCK_BH(lock)
-#define _write_lock_bh(lock) __LOCK_BH(lock)
-#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _read_lock_irq(lock) __LOCK_IRQ(lock)
-#define _write_lock_irq(lock) __LOCK_IRQ(lock)
-#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
-#define _spin_unlock(lock) __UNLOCK(lock)
-#define _read_unlock(lock) __UNLOCK(lock)
-#define _write_unlock(lock) __UNLOCK(lock)
-#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_spin_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+#define _raw_spin_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_write_unlock(lock) __UNLOCK(lock)
+#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_spin_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a..851b7783720 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
#include <linux/lockdep.h>
-typedef struct {
- raw_spinlock_t raw_lock;
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
@@ -29,26 +29,10 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+} raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
-typedef struct {
- raw_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
# define SPIN_DEP_MAP_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# define RW_DEP_MAP_INIT(lockname)
+# define SPIN_DEBUG_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- .magic = SPINLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- .magic = RWLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- RW_DEP_MAP_INIT(lockname) }
-#else
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- RW_DEP_MAP_INIT(lockname) }
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
/*
- * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
- * are hence deprecated.
- * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
- * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198..c09b6407ae1 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
-typedef struct { } raw_spinlock_t;
+typedef struct { } arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { }
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
#endif
typedef struct {
/* no debug version on UP */
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { }
+#define __ARCH_RW_LOCK_UNLOCKED { }
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215..b14f6a91e19 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
*/
#ifdef CONFIG_DEBUG_SPINLOCK
-#define __raw_spin_is_locked(x) ((x)->slock == 0)
+#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return oldval > 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = 1;
}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
/*
* Read-write spinlocks. No debug version.
*/
-#define __raw_read_lock(lock) do { (void)(lock); } while (0)
-#define __raw_write_lock(lock) do { (void)(lock); } while (0)
-#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
-#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
-#define __raw_spin_is_locked(lock) ((void)(lock), 0)
+#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
-# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
-# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
+# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
-#define __raw_spin_is_contended(lock) (((void)(lock), 0))
+#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define __raw_read_can_lock(lock) (((void)(lock), 1))
-#define __raw_write_can_lock(lock) (((void)(lock), 1))
+#define arch_read_can_lock(lock) (((void)(lock), 1))
+#define arch_write_can_lock(lock) (((void)(lock), 1))
-#define __raw_spin_unlock_wait(lock) \
- do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+#define arch_spin_unlock_wait(lock) \
+ do { cpu_relax(); } while (arch_spin_is_locked(lock))
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index b8508868d5a..651839a2a75 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,7 +62,15 @@ extern char * strnchr(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
-extern char * __must_check strstrip(char *);
+extern char * __must_check skip_spaces(const char *);
+
+extern char *strim(char *);
+
+static inline __must_check char *strstrip(char *str)
+{
+ return strim(str);
+}
+
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
#endif
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 10709cbe96f..c2786f20016 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -28,9 +28,6 @@
#ifdef __KERNEL__
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-
/*
* Enable RPC debugging/profiling.
*/
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 87b895d5c78..b78f16b1dea 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -40,6 +40,8 @@
#ifndef _LINUX_SUNRPC_RPC_RDMA_H
#define _LINUX_SUNRPC_RPC_RDMA_H
+#include <linux/types.h>
+
struct rpcrdma_segment {
__be32 rs_handle; /* Registered memory handle */
__be32 rs_length; /* Length of the chunk in bytes */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 401097781fc..7bc7fd5291c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -130,12 +130,14 @@ struct rpc_task_setup {
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
+#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
+#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
@@ -171,7 +173,8 @@ struct rpc_task_setup {
#define RPC_PRIORITY_LOW (-1)
#define RPC_PRIORITY_NORMAL (0)
#define RPC_PRIORITY_HIGH (1)
-#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW)
+#define RPC_PRIORITY_PRIVILEGED (2)
+#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW)
struct rpc_timer {
struct timer_list timer;
@@ -227,6 +230,7 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *,
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
+int rpc_queue_empty(struct rpc_wait_queue *);
void rpc_delay(struct rpc_task *, unsigned long);
void * rpc_malloc(struct rpc_task *, size_t);
void rpc_free(void *);
@@ -252,6 +256,16 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task)
return __rpc_wait_for_completion_task(task, NULL);
}
+static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio)
+{
+ task->tk_priority = prio - RPC_PRIORITY_LOW;
+}
+
+static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio)
+{
+ return (task->tk_priority + RPC_PRIORITY_LOW == prio);
+}
+
#ifdef RPC_DEBUG
static inline const char * rpc_qname(struct rpc_wait_queue *q)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 52e8cb0a756..5a3085b9b39 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -29,7 +29,6 @@ struct svc_pool_stats {
unsigned long packets;
unsigned long sockets_queued;
unsigned long threads_woken;
- unsigned long overloads_avoided;
unsigned long threads_timedout;
};
@@ -50,7 +49,6 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
- int sp_nwaking; /* number of threads woken but not yet active */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
} ____cacheline_aligned_in_smp;
@@ -275,16 +273,11 @@ struct svc_rqst {
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
struct svc_cacherep * rq_cacherep; /* cache info */
- struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to
- * determine what device number
- * to report (real or virtual)
- */
int rq_splice_ok; /* turned off in gss privacy
* to prevent encrypting page
* cache pages */
wait_queue_head_t rq_wait; /* synchronization */
struct task_struct *rq_task; /* service thread */
- int rq_waking; /* 1 if thread is being woken */
};
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4ec90019c1a..a2602a8207a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -145,38 +145,43 @@ enum {
SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
+ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
/* add others here before... */
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32
-#define SWAP_MAP_MAX 0x7ffe
-#define SWAP_MAP_BAD 0x7fff
-#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */
-#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE)
+#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
+#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
+#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
+#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
+#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
+#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
+
/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
- unsigned long flags;
- int prio; /* swap priority */
- int next; /* next entry on swap list */
- struct file *swap_file;
- struct block_device *bdev;
- struct list_head extent_list;
- struct swap_extent *curr_swap_extent;
- unsigned short *swap_map;
- unsigned int lowest_bit;
- unsigned int highest_bit;
+ unsigned long flags; /* SWP_USED etc: see above */
+ signed short prio; /* swap priority of this type */
+ signed char type; /* strange name for an index */
+ signed char next; /* next type on the swap list */
+ unsigned int max; /* extent of the swap_map */
+ unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned int lowest_bit; /* index of first free in swap_map */
+ unsigned int highest_bit; /* index of last free in swap_map */
+ unsigned int pages; /* total of usable pages of swap */
+ unsigned int inuse_pages; /* number of those currently in use */
+ unsigned int cluster_next; /* likely index for next allocation */
+ unsigned int cluster_nr; /* countdown to next cluster search */
unsigned int lowest_alloc; /* while preparing discard cluster */
unsigned int highest_alloc; /* while preparing discard cluster */
- unsigned int cluster_next;
- unsigned int cluster_nr;
- unsigned int pages;
- unsigned int max;
- unsigned int inuse_pages;
- unsigned int old_block_size;
+ struct swap_extent *curr_swap_extent;
+ struct swap_extent first_swap_extent;
+ struct block_device *bdev; /* swap device or bdev of swap file */
+ struct file *swap_file; /* seldom referenced */
+ unsigned int old_block_size; /* seldom referenced */
};
struct swap_list_t {
@@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node);
extern int kswapd_run(int nid);
+extern void kswapd_stop(int nid);
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
@@ -309,17 +315,18 @@ extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
-extern void swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
+extern int add_swap_count_continuation(swp_entry_t, gfp_t);
+extern void swap_shmem_alloc(swp_entry_t);
+extern int swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swapcache_free(swp_entry_t, struct page *page);
extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
@@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void)
#define free_swap_and_cache(swp) is_migration_entry(swp)
#define swapcache_prepare(swp) is_migration_entry(swp)
-static inline void swap_duplicate(swp_entry_t swp)
+static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
+ return 0;
+}
+
+static inline void swap_shmem_alloc(swp_entry_t swp)
+{
+}
+
+static inline int swap_duplicate(swp_entry_t swp)
+{
+ return 0;
}
static inline void swap_free(swp_entry_t swp)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bc70c5810fe..65793e90d6f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -102,12 +102,10 @@ struct perf_event_attr;
#ifdef CONFIG_EVENT_PROFILE
#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
- .profile_count = ATOMIC_INIT(-1), \
.profile_enable = prof_sysenter_enable, \
.profile_disable = prof_sysenter_disable,
#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
- .profile_count = ATOMIC_INIT(-1), \
.profile_enable = prof_sysexit_enable, \
.profile_disable = prof_sysexit_disable,
#else
@@ -145,7 +143,7 @@ struct perf_event_attr;
.name = "sys_enter"#sname, \
.system = "syscalls", \
.event = &enter_syscall_print_##sname, \
- .raw_init = init_syscall_trace, \
+ .raw_init = trace_event_raw_init, \
.show_format = syscall_enter_format, \
.define_fields = syscall_enter_define_fields, \
.regfunc = reg_event_syscall_enter, \
@@ -167,7 +165,7 @@ struct perf_event_attr;
.name = "sys_exit"#sname, \
.system = "syscalls", \
.event = &exit_syscall_print_##sname, \
- .raw_init = init_syscall_trace, \
+ .raw_init = trace_event_raw_init, \
.show_format = syscall_exit_format, \
.define_fields = syscall_exit_define_fields, \
.regfunc = reg_event_syscall_exit, \
@@ -834,4 +832,8 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
asmlinkage long sys_perf_event_open(
struct perf_event_attr __user *attr_uptr,
pid_t pid, int cpu, int group_fd, unsigned long flags);
+
+asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
#endif
diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h
new file mode 100644
index 00000000000..ce456eaae86
--- /dev/null
+++ b/include/linux/timb_gpio.h
@@ -0,0 +1,37 @@
+/*
+ * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_TIMB_GPIO_H
+#define _LINUX_TIMB_GPIO_H
+
+/**
+ * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver
+ * @gpio_base The number of the first GPIO pin, set to -1 for
+ * dynamic number allocation.
+ * @nr_pins Number of pins that is supported by the hardware (1-32)
+ * @irq_base If IRQ is supported by the hardware, this is the base
+ * number of IRQ:s. One IRQ per pin will be used. Set to
+ * -1 if IRQ:s is not supported.
+ */
+struct timbgpio_platform_data {
+ int gpio_base;
+ int nr_pins;
+ int irq_base;
+};
+
+#endif
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 09077f6ed12..5cf397ceb72 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -14,6 +14,7 @@ struct trace_seq {
unsigned char buffer[PAGE_SIZE];
unsigned int len;
unsigned int readpos;
+ int full;
};
static inline void
@@ -21,6 +22,7 @@ trace_seq_init(struct trace_seq *s)
{
s->len = 0;
s->readpos = 0;
+ s->full = 0;
}
/*
@@ -33,7 +35,7 @@ extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
__attribute__ ((format (printf, 2, 0)));
extern int
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
-extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
+extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt);
extern int trace_seq_puts(struct trace_seq *s, const char *str);
@@ -55,8 +57,9 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
return 0;
}
-static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
{
+ return 0;
}
static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt)
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 1eb44a924e5..10db0102a89 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -134,6 +134,13 @@ static inline __must_check int tracehook_report_syscall_entry(
*/
static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
{
+ if (step) {
+ siginfo_t info;
+ user_single_step_siginfo(current, regs, &info);
+ force_sig_info(SIGTRAP, &info, current);
+ return;
+ }
+
ptrace_report_syscall(regs);
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index f0f43d08d8b..ef3a2947b10 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -190,9 +190,17 @@ struct tty_port_operations {
/* Control the DTR line */
void (*dtr_rts)(struct tty_port *port, int raise);
/* Called when the last close completes or a hangup finishes
- IFF the port was initialized. Do not use to free resources */
+ IFF the port was initialized. Do not use to free resources. Called
+ under the port mutex to serialize against activate/shutdowns */
void (*shutdown)(struct tty_port *port);
void (*drop)(struct tty_port *port);
+ /* Called under the port mutex from tty_port_open, serialized using
+ the port mutex */
+ /* FIXME: long term getting the tty argument *out* of this would be
+ good for consoles */
+ int (*activate)(struct tty_port *port, struct tty_struct *tty);
+ /* Called on the final put of a port */
+ void (*destruct)(struct tty_port *port);
};
struct tty_port {
@@ -206,12 +214,14 @@ struct tty_port {
wait_queue_head_t delta_msr_wait; /* Modem status change */
unsigned long flags; /* TTY flags ASY_*/
struct mutex mutex; /* Locking */
+ struct mutex buf_mutex; /* Buffer alloc lock */
unsigned char *xmit_buf; /* Optional buffer */
unsigned int close_delay; /* Close port delay */
unsigned int closing_wait; /* Delay for output */
int drain_delay; /* Set to zero if no pure time
based drain is needed else
set to size of fifo */
+ struct kref kref; /* Ref counter */
};
/*
@@ -340,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *);
extern struct ktermios tty_std_termios;
-extern int kmsg_redirect;
-
extern void console_init(void);
extern int vcs_init(void);
@@ -439,7 +447,7 @@ extern void initialize_tty_struct(struct tty_struct *tty,
struct tty_driver *driver, int idx);
extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
int first_ok);
-extern void tty_release_dev(struct file *filp);
+extern int tty_release(struct inode *inode, struct file *filp);
extern int tty_init_termios(struct tty_struct *tty);
extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
@@ -454,6 +462,15 @@ extern int tty_write_lock(struct tty_struct *tty, int ndelay);
extern void tty_port_init(struct tty_port *port);
extern int tty_port_alloc_xmit_buf(struct tty_port *port);
extern void tty_port_free_xmit_buf(struct tty_port *port);
+extern void tty_port_put(struct tty_port *port);
+
+extern inline struct tty_port *tty_port_get(struct tty_port *port)
+{
+ if (port)
+ kref_get(&port->kref);
+ return port;
+}
+
extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
extern int tty_port_carrier_raised(struct tty_port *port);
@@ -467,6 +484,8 @@ extern int tty_port_close_start(struct tty_port *port,
extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty);
extern void tty_port_close(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
+extern int tty_port_open(struct tty_port *port,
+ struct tty_struct *tty, struct file *filp);
extern inline int tty_port_users(struct tty_port *port)
{
return port->count + port->blocked_open;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a34fa89f147..e101a2d04d7 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -331,6 +331,7 @@ struct usb_bus {
u8 otg_port; /* 0, or number of OTG/HNP port */
unsigned is_b_host:1; /* true during some HNP roleswitches */
unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
+ unsigned sg_tablesize; /* 0 or largest number of sg list entries */
int devnum_next; /* Next open device number in
* round-robin allocation */
@@ -428,11 +429,9 @@ struct usb_tt;
* @last_busy: time of last use
* @autosuspend_delay: in jiffies
* @connect_time: time device was first connected
- * @auto_pm: autosuspend/resume in progress
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @autosuspend_disabled: autosuspend disabled by the user
- * @autoresume_disabled: autoresume disabled by the user
* @skip_sys_resume: skip the next system resume
* @wusb_dev: if this is a Wireless USB device, link to the WUSB
* specific data for the device.
@@ -513,11 +512,9 @@ struct usb_device {
int autosuspend_delay;
unsigned long connect_time;
- unsigned auto_pm:1;
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned autosuspend_disabled:1;
- unsigned autoresume_disabled:1;
unsigned skip_sys_resume:1;
#endif
struct wusb_dev *wusb_dev;
@@ -543,22 +540,20 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
/* USB autosuspend and autoresume */
#ifdef CONFIG_USB_SUSPEND
-extern int usb_autopm_set_interface(struct usb_interface *intf);
extern int usb_autopm_get_interface(struct usb_interface *intf);
extern void usb_autopm_put_interface(struct usb_interface *intf);
extern int usb_autopm_get_interface_async(struct usb_interface *intf);
extern void usb_autopm_put_interface_async(struct usb_interface *intf);
-static inline void usb_autopm_enable(struct usb_interface *intf)
+static inline void usb_autopm_get_interface_no_resume(
+ struct usb_interface *intf)
{
- atomic_set(&intf->pm_usage_cnt, 0);
- usb_autopm_set_interface(intf);
+ atomic_inc(&intf->pm_usage_cnt);
}
-
-static inline void usb_autopm_disable(struct usb_interface *intf)
+static inline void usb_autopm_put_interface_no_suspend(
+ struct usb_interface *intf)
{
- atomic_set(&intf->pm_usage_cnt, 1);
- usb_autopm_set_interface(intf);
+ atomic_dec(&intf->pm_usage_cnt);
}
static inline void usb_mark_last_busy(struct usb_device *udev)
@@ -568,12 +563,8 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
#else
-static inline int usb_autopm_set_interface(struct usb_interface *intf)
-{ return 0; }
-
static inline int usb_autopm_get_interface(struct usb_interface *intf)
{ return 0; }
-
static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
{ return 0; }
@@ -581,9 +572,11 @@ static inline void usb_autopm_put_interface(struct usb_interface *intf)
{ }
static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
{ }
-static inline void usb_autopm_enable(struct usb_interface *intf)
+static inline void usb_autopm_get_interface_no_resume(
+ struct usb_interface *intf)
{ }
-static inline void usb_autopm_disable(struct usb_interface *intf)
+static inline void usb_autopm_put_interface_no_suspend(
+ struct usb_interface *intf)
{ }
static inline void usb_mark_last_busy(struct usb_device *udev)
{ }
@@ -626,6 +619,10 @@ extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
unsigned ifnum);
extern struct usb_host_interface *usb_altnum_to_altsetting(
const struct usb_interface *intf, unsigned int altnum);
+extern struct usb_host_interface *usb_find_alt_setting(
+ struct usb_host_config *config,
+ unsigned int iface_num,
+ unsigned int alt_num);
/**
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4f6bb3d2160..738ea1a691c 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -127,6 +127,7 @@ struct usb_function {
/* private: */
/* internals */
struct list_head list;
+ DECLARE_BITMAP(endpoints, 32);
};
int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 2443c0e7a80..52bb917641f 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -33,6 +33,23 @@ enum usb_otg_state {
OTG_STATE_A_VBUS_ERR,
};
+#define USB_OTG_PULLUP_ID (1 << 0)
+#define USB_OTG_PULLDOWN_DP (1 << 1)
+#define USB_OTG_PULLDOWN_DM (1 << 2)
+#define USB_OTG_EXT_VBUS_INDICATOR (1 << 3)
+#define USB_OTG_DRV_VBUS (1 << 4)
+#define USB_OTG_DRV_VBUS_EXT (1 << 5)
+
+struct otg_transceiver;
+
+/* for transceivers connected thru an ULPI interface, the user must
+ * provide access ops
+ */
+struct otg_io_access_ops {
+ int (*read)(struct otg_transceiver *otg, u32 reg);
+ int (*write)(struct otg_transceiver *otg, u32 val, u32 reg);
+};
+
/*
* the otg driver needs to interact with both device side and host side
* usb controllers. it decides which controller is active at a given
@@ -42,6 +59,7 @@ enum usb_otg_state {
struct otg_transceiver {
struct device *dev;
const char *label;
+ unsigned int flags;
u8 default_a;
enum usb_otg_state state;
@@ -49,10 +67,17 @@ struct otg_transceiver {
struct usb_bus *host;
struct usb_gadget *gadget;
+ struct otg_io_access_ops *io_ops;
+ void __iomem *io_priv;
+
/* to pass extra port status to the root hub */
u16 port_status;
u16 port_change;
+ /* initialize/shutdown the OTG controller */
+ int (*init)(struct otg_transceiver *otg);
+ void (*shutdown)(struct otg_transceiver *otg);
+
/* bind/unbind the host controller */
int (*set_host)(struct otg_transceiver *otg,
struct usb_bus *host);
@@ -65,6 +90,10 @@ struct otg_transceiver {
int (*set_power)(struct otg_transceiver *otg,
unsigned mA);
+ /* effective for A-peripheral, ignored for B devices */
+ int (*set_vbus)(struct otg_transceiver *otg,
+ bool enabled);
+
/* for non-OTG B devices: set transceiver into suspend mode */
int (*set_suspend)(struct otg_transceiver *otg,
int suspend);
@@ -85,6 +114,38 @@ extern int otg_set_transceiver(struct otg_transceiver *);
extern void usb_nop_xceiv_register(void);
extern void usb_nop_xceiv_unregister(void);
+/* helpers for direct access thru low-level io interface */
+static inline int otg_io_read(struct otg_transceiver *otg, u32 reg)
+{
+ if (otg->io_ops && otg->io_ops->read)
+ return otg->io_ops->read(otg, reg);
+
+ return -EINVAL;
+}
+
+static inline int otg_io_write(struct otg_transceiver *otg, u32 reg, u32 val)
+{
+ if (otg->io_ops && otg->io_ops->write)
+ return otg->io_ops->write(otg, reg, val);
+
+ return -EINVAL;
+}
+
+static inline int
+otg_init(struct otg_transceiver *otg)
+{
+ if (otg->init)
+ return otg->init(otg);
+
+ return 0;
+}
+
+static inline void
+otg_shutdown(struct otg_transceiver *otg)
+{
+ if (otg->shutdown)
+ otg->shutdown(otg);
+}
/* for usb host and peripheral controller drivers */
extern struct otg_transceiver *otg_get_transceiver(void);
@@ -97,6 +158,12 @@ otg_start_hnp(struct otg_transceiver *otg)
return otg->start_hnp(otg);
}
+/* Context: can sleep */
+static inline int
+otg_set_vbus(struct otg_transceiver *otg, bool enabled)
+{
+ return otg->set_vbus(otg, enabled);
+}
/* for HCDs */
static inline int
@@ -105,7 +172,6 @@ otg_set_host(struct otg_transceiver *otg, struct usb_bus *host)
return otg->set_host(otg, host);
}
-
/* for usb peripheral controller drivers */
/* Context: can sleep */
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index ce911ebf91e..1819396ed50 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -16,6 +16,7 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/sysrq.h>
+#include <linux/kfifo.h>
#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
#define SERIAL_TTY_MINORS 254 /* loads of devices :) */
@@ -39,8 +40,6 @@ enum port_dev_state {
* @serial: pointer back to the struct usb_serial owner of this port.
* @port: pointer to the corresponding tty_port for this port.
* @lock: spinlock to grab when updating portions of this structure.
- * @mutex: mutex used to synchronize serial_open() and serial_close()
- * access for this port.
* @number: the number of the port (the minor number).
* @interrupt_in_buffer: pointer to the interrupt in buffer for this port.
* @interrupt_in_urb: pointer to the interrupt in struct urb for this port.
@@ -77,7 +76,6 @@ struct usb_serial_port {
struct usb_serial *serial;
struct tty_port port;
spinlock_t lock;
- struct mutex mutex;
unsigned char number;
unsigned char *interrupt_in_buffer;
@@ -97,7 +95,7 @@ struct usb_serial_port {
unsigned char *bulk_out_buffer;
int bulk_out_size;
struct urb *write_urb;
- struct kfifo *write_fifo;
+ struct kfifo write_fifo;
int write_urb_busy;
__u8 bulk_out_endpointAddress;
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
new file mode 100644
index 00000000000..20675c6ebc4
--- /dev/null
+++ b/include/linux/usb/ulpi.h
@@ -0,0 +1,7 @@
+#ifndef __LINUX_USB_ULPI_H
+#define __LINUX_USB_ULPI_H
+
+struct otg_transceiver *otg_ulpi_create(struct otg_io_access_ops *ops,
+ unsigned int flags);
+
+#endif /* __LINUX_USB_ULPI_H */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 3d15fb9bc11..a4b947e470a 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -56,7 +56,9 @@
US_FLAG(SANE_SENSE, 0x00008000) \
/* Sane Sense (> 18 bytes) */ \
US_FLAG(CAPACITY_OK, 0x00010000) \
- /* READ CAPACITY response is correct */
+ /* READ CAPACITY response is correct */ \
+ US_FLAG(BAD_SENSE, 0x00020000) \
+ /* Bad Sense (never more than 18 bytes) */
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index b2a7d8ba6ee..15591d2ea40 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -128,6 +128,29 @@ struct usbdevfs_hub_portinfo {
#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
+
+struct usbdevfs_ctrltransfer32 {
+ u8 bRequestType;
+ u8 bRequest;
+ u16 wValue;
+ u16 wIndex;
+ u16 wLength;
+ u32 timeout; /* in milliseconds */
+ compat_caddr_t data;
+};
+
+struct usbdevfs_bulktransfer32 {
+ compat_uint_t ep;
+ compat_uint_t len;
+ compat_uint_t timeout; /* in milliseconds */
+ compat_caddr_t data;
+};
+
+struct usbdevfs_disconnectsignal32 {
+ compat_int_t signr;
+ compat_caddr_t context;
+};
+
struct usbdevfs_urb32 {
unsigned char type;
unsigned char endpoint;
@@ -153,7 +176,9 @@ struct usbdevfs_ioctl32 {
#endif /* __KERNEL__ */
#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer)
+#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer)
+#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32)
#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int)
#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface)
#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int)
@@ -166,6 +191,7 @@ struct usbdevfs_ioctl32 {
#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *)
#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32)
#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal)
+#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32)
#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int)
#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int)
#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo)
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 79b9837d9ca..cf97b5b9d1f 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -1,4 +1,4 @@
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/module.h>
/* Simply sanity version stamp for modules. */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 32b92298fd7..d4962a782b8 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -294,6 +294,7 @@ struct v4l2_pix_format {
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
/* Palette formats */
@@ -329,7 +330,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
-#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */
+#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
+#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
/* 10bit raw bayer DPCM compressed to 8 bits */
#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
/*
@@ -732,6 +737,99 @@ struct v4l2_standard {
};
/*
+ * V I D E O T I M I N G S D V P R E S E T
+ */
+struct v4l2_dv_preset {
+ __u32 preset;
+ __u32 reserved[4];
+};
+
+/*
+ * D V P R E S E T S E N U M E R A T I O N
+ */
+struct v4l2_dv_enum_preset {
+ __u32 index;
+ __u32 preset;
+ __u8 name[32]; /* Name of the preset timing */
+ __u32 width;
+ __u32 height;
+ __u32 reserved[4];
+};
+
+/*
+ * D V P R E S E T V A L U E S
+ */
+#define V4L2_DV_INVALID 0
+#define V4L2_DV_480P59_94 1 /* BT.1362 */
+#define V4L2_DV_576P50 2 /* BT.1362 */
+#define V4L2_DV_720P24 3 /* SMPTE 296M */
+#define V4L2_DV_720P25 4 /* SMPTE 296M */
+#define V4L2_DV_720P30 5 /* SMPTE 296M */
+#define V4L2_DV_720P50 6 /* SMPTE 296M */
+#define V4L2_DV_720P59_94 7 /* SMPTE 274M */
+#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */
+#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */
+#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */
+#define V4L2_DV_1080I25 11 /* BT.1120 */
+#define V4L2_DV_1080I50 12 /* SMPTE 296M */
+#define V4L2_DV_1080I60 13 /* SMPTE 296M */
+#define V4L2_DV_1080P24 14 /* SMPTE 296M */
+#define V4L2_DV_1080P25 15 /* SMPTE 296M */
+#define V4L2_DV_1080P30 16 /* SMPTE 296M */
+#define V4L2_DV_1080P50 17 /* BT.1120 */
+#define V4L2_DV_1080P60 18 /* BT.1120 */
+
+/*
+ * D V B T T I M I N G S
+ */
+
+/* BT.656/BT.1120 timing data */
+struct v4l2_bt_timings {
+ __u32 width; /* width in pixels */
+ __u32 height; /* height in lines */
+ __u32 interlaced; /* Interlaced or progressive */
+ __u32 polarities; /* Positive or negative polarity */
+ __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */
+ __u32 hfrontporch; /* Horizpontal front porch in pixels */
+ __u32 hsync; /* Horizontal Sync length in pixels */
+ __u32 hbackporch; /* Horizontal back porch in pixels */
+ __u32 vfrontporch; /* Vertical front porch in pixels */
+ __u32 vsync; /* Vertical Sync length in lines */
+ __u32 vbackporch; /* Vertical back porch in lines */
+ __u32 il_vfrontporch; /* Vertical front porch for bottom field of
+ * interlaced field formats
+ */
+ __u32 il_vsync; /* Vertical sync length for bottom field of
+ * interlaced field formats
+ */
+ __u32 il_vbackporch; /* Vertical back porch for bottom field of
+ * interlaced field formats
+ */
+ __u32 reserved[16];
+} __attribute__ ((packed));
+
+/* Interlaced or progressive format */
+#define V4L2_DV_PROGRESSIVE 0
+#define V4L2_DV_INTERLACED 1
+
+/* Polarities. If bit is not set, it is assumed to be negative polarity */
+#define V4L2_DV_VSYNC_POS_POL 0x00000001
+#define V4L2_DV_HSYNC_POS_POL 0x00000002
+
+
+/* DV timings */
+struct v4l2_dv_timings {
+ __u32 type;
+ union {
+ struct v4l2_bt_timings bt;
+ __u32 reserved[32];
+ };
+} __attribute__ ((packed));
+
+/* Values for the type field */
+#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
+
+/*
* V I D E O I N P U T S
*/
struct v4l2_input {
@@ -742,7 +840,8 @@ struct v4l2_input {
__u32 tuner; /* Associated tuner */
v4l2_std_id std;
__u32 status;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
/* Values for the 'type' field */
@@ -773,6 +872,11 @@ struct v4l2_input {
#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
+/* capabilities flags */
+#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
+#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
+
/*
* V I D E O O U T P U T S
*/
@@ -783,13 +887,19 @@ struct v4l2_output {
__u32 audioset; /* Associated audios (bitfield) */
__u32 modulator; /* Associated modulator */
v4l2_std_id std;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
/* Values for the 'type' field */
#define V4L2_OUTPUT_TYPE_MODULATOR 1
#define V4L2_OUTPUT_TYPE_ANALOG 2
#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
+/* capabilities flags */
+#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
+#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
+
/*
* C O N T R O L S
*/
@@ -1624,6 +1734,13 @@ struct v4l2_dbg_chip_ident {
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
+#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset)
+#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset)
+#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset)
+#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset)
+#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
+#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d0f222388a..ee03bba9c5d 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+ KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+ KSWAPD_SKIP_CONGESTION_WAIT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -76,24 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- __get_cpu_var(vm_event_states).event[item]++;
+ __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
- get_cpu_var(vm_event_states).event[item]++;
- put_cpu();
+ this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __get_cpu_var(vm_event_states).event[item] += delta;
+ __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- get_cpu_var(vm_event_states).event[item] += delta;
- put_cpu();
+ this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7ffa11f0623..d5dd0bc408f 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -84,4 +84,23 @@ struct vt_setactivate {
#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
+#ifdef __KERNEL__
+
+#ifdef CONFIG_VT_CONSOLE
+
+extern int vt_kmsg_redirect(int new);
+
+#else
+
+static inline int vt_kmsg_redirect(int new)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __KERNEL__ */
+
+#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
+
#endif /* _LINUX_VT_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 705f01fe413..c18c008f4bb 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -79,8 +79,7 @@ void wakeup_flusher_threads(long nr_pages);
static inline void wait_on_inode(struct inode *inode)
{
might_sleep();
- wait_on_bit(&inode->i_state, __I_LOCK, inode_wait,
- TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
}
static inline void inode_sync_wait(struct inode *inode)
{
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 5c84af8c5f6..fb9b7e6e1e2 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -38,12 +38,13 @@ struct dentry;
struct xattr_handler {
char *prefix;
- size_t (*list)(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len);
- int (*get)(struct inode *inode, const char *name, void *buffer,
- size_t size);
- int (*set)(struct inode *inode, const char *name, const void *buffer,
- size_t size, int flags);
+ int flags; /* fs private flags passed back to the handlers */
+ size_t (*list)(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int handler_flags);
+ int (*get)(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int handler_flags);
+ int (*set)(struct dentry *dentry, const char *name, const void *buffer,
+ size_t size, int flags, int handler_flags);
};
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index e41a99ee353..2c6af24b905 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -26,26 +26,7 @@
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-
-extern int media_ir_debug; /* media_ir_debug level (0,1,2) */
-#define IR_dprintk(level, fmt, arg...) if (media_ir_debug >= level) \
- printk(KERN_DEBUG "%s: " fmt , __func__, ## arg)
-
-#define IR_TYPE_RC5 1
-#define IR_TYPE_PD 2 /* Pulse distance encoded IR */
-#define IR_TYPE_OTHER 99
-
-struct ir_scancode {
- u16 scancode;
- u32 keycode;
-};
-
-struct ir_scancode_table {
- struct ir_scancode *scan;
- int size;
- spinlock_t lock;
-};
+#include <media/ir-core.h>
#define RC5_START(x) (((x)>>12)&3)
#define RC5_TOGGLE(x) (((x)>>11)&1)
@@ -56,8 +37,6 @@ struct ir_input_state {
/* configuration */
int ir_type;
- struct ir_scancode_table keytable;
-
/* key info */
u32 ir_key; /* ir scancode */
u32 keycode; /* linux key code */
@@ -105,7 +84,7 @@ struct card_ir {
/* Routines from ir-functions.c */
int ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
- int ir_type, struct ir_scancode_table *ir_codes);
+ int ir_type);
void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir);
void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir,
u32 ir_key);
@@ -118,19 +97,6 @@ u32 ir_rc5_decode(unsigned int code);
void ir_rc5_timer_end(unsigned long data);
void ir_rc5_timer_keyup(unsigned long data);
-/* Routines from ir-keytable.c */
-
-u32 ir_g_keycode_from_table(struct input_dev *input_dev,
- u32 scancode);
-
-int ir_set_keycode_table(struct input_dev *input_dev,
- struct ir_scancode_table *rc_tab);
-
-int ir_roundup_tablesize(int n_elems);
-int ir_copy_table(struct ir_scancode_table *destin,
- const struct ir_scancode_table *origin);
-void ir_input_free(struct input_dev *input_dev);
-
/* scancode->keycode map tables from ir-keymaps.c */
extern struct ir_scancode_table ir_codes_empty_table;
@@ -195,4 +161,5 @@ extern struct ir_scancode_table ir_codes_evga_indtube_table;
extern struct ir_scancode_table ir_codes_terratec_cinergy_xs_table;
extern struct ir_scancode_table ir_codes_videomate_s350_table;
extern struct ir_scancode_table ir_codes_gadmei_rm008z_table;
+extern struct ir_scancode_table ir_codes_nec_terratec_cinergy_xs_table;
#endif
diff --git a/include/media/ir-core.h b/include/media/ir-core.h
new file mode 100644
index 00000000000..299d201e133
--- /dev/null
+++ b/include/media/ir-core.h
@@ -0,0 +1,62 @@
+/*
+ * Remote Controller core header
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IR_CORE
+#define _IR_CORE
+
+#include <linux/input.h>
+#include <linux/spinlock.h>
+
+extern int ir_core_debug;
+#define IR_dprintk(level, fmt, arg...) if (ir_core_debug >= level) \
+ printk(KERN_DEBUG "%s: " fmt , __func__, ## arg)
+
+enum ir_type {
+ IR_TYPE_UNKNOWN = 0,
+ IR_TYPE_RC5 = 1,
+ IR_TYPE_PD = 2, /* Pulse distance encoded IR */
+ IR_TYPE_NEC = 3,
+ IR_TYPE_OTHER = 99,
+};
+
+struct ir_scancode {
+ u16 scancode;
+ u32 keycode;
+};
+
+struct ir_scancode_table {
+ struct ir_scancode *scan;
+ int size;
+ enum ir_type ir_type;
+ spinlock_t lock;
+};
+
+struct ir_input_dev {
+ struct input_dev *dev;
+ struct ir_scancode_table rc_tab;
+};
+
+/* Routines from ir-keytable.c */
+
+u32 ir_g_keycode_from_table(struct input_dev *input_dev,
+ u32 scancode);
+
+int ir_set_keycode_table(struct input_dev *input_dev,
+ struct ir_scancode_table *rc_tab);
+
+int ir_roundup_tablesize(int n_elems);
+int ir_input_register(struct input_dev *dev,
+ struct ir_scancode_table *ir_codes);
+void ir_input_unregister(struct input_dev *input_dev);
+
+#endif
diff --git a/include/media/mt9t112.h b/include/media/mt9t112.h
new file mode 100644
index 00000000000..a43c74ab05e
--- /dev/null
+++ b/include/media/mt9t112.h
@@ -0,0 +1,30 @@
+/* mt9t112 Camera
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MT9T112_H__
+#define __MT9T112_H__
+
+#define MT9T112_FLAG_PCLK_RISING_EDGE (1 << 0)
+#define MT9T112_FLAG_DATAWIDTH_8 (1 << 1) /* default width is 10 */
+
+struct mt9t112_pll_divider {
+ u8 m, n;
+ u8 p1, p2, p3, p4, p5, p6, p7;
+};
+
+/*
+ * mt9t112 camera info
+ */
+struct mt9t112_camera_info {
+ u32 flags;
+ struct mt9t112_pll_divider divider;
+};
+
+#endif /* __MT9T112_H__ */
diff --git a/include/media/ov772x.h b/include/media/ov772x.h
index 30d9629198e..14c77efd6a8 100644
--- a/include/media/ov772x.h
+++ b/include/media/ov772x.h
@@ -1,4 +1,5 @@
-/* ov772x Camera
+/*
+ * ov772x Camera
*
* Copyright (C) 2008 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
@@ -54,7 +55,6 @@ struct ov772x_edge_ctrl {
struct ov772x_camera_info {
unsigned long buswidth;
unsigned long flags;
- struct soc_camera_link link;
struct ov772x_edge_ctrl edgectrl;
};
diff --git a/include/media/rj54n1cb0c.h b/include/media/rj54n1cb0c.h
new file mode 100644
index 00000000000..8ae3288ae92
--- /dev/null
+++ b/include/media/rj54n1cb0c.h
@@ -0,0 +1,19 @@
+/*
+ * RJ54N1CB0C Private data
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RJ54N1CB0C_H__
+#define __RJ54N1CB0C_H__
+
+struct rj54n1_pdata {
+ unsigned int mclk_freq;
+ bool ioctl_high;
+};
+
+#endif
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index eed5fccc83f..4aeff96ff7d 100644
--- a/include/media/saa7146_vv.h
+++ b/include/media/saa7146_vv.h
@@ -108,8 +108,6 @@ struct saa7146_fh {
struct saa7146_vv
{
- int vbi_minor;
-
/* vbi capture */
struct saa7146_dmaqueue vbi_q;
/* vbi workaround interrupt queue */
@@ -117,8 +115,6 @@ struct saa7146_vv
int vbi_fieldcount;
struct saa7146_fh *vbi_streaming;
- int video_minor;
-
int video_status;
struct saa7146_fh *video_fh;
diff --git a/include/media/sh_mobile_ceu.h b/include/media/sh_mobile_ceu.h
index 0f3524cff43..b6774783687 100644
--- a/include/media/sh_mobile_ceu.h
+++ b/include/media/sh_mobile_ceu.h
@@ -3,6 +3,8 @@
#define SH_CEU_FLAG_USE_8BIT_BUS (1 << 0) /* use 8bit bus width */
#define SH_CEU_FLAG_USE_16BIT_BUS (1 << 1) /* use 16bit bus width */
+#define SH_CEU_FLAG_HSYNC_LOW (1 << 2) /* default High if possible */
+#define SH_CEU_FLAG_VSYNC_LOW (1 << 3) /* default High if possible */
struct sh_mobile_ceu_info {
unsigned long flags;
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 3d74e60032d..dcc5b86bcb6 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -24,18 +24,13 @@ struct soc_camera_device {
struct device *pdev; /* Platform device */
s32 user_width;
s32 user_height;
- unsigned short width_min;
- unsigned short height_min;
- unsigned short y_skip_top; /* Lines to skip at the top */
+ enum v4l2_colorspace colorspace;
unsigned char iface; /* Host number */
unsigned char devnum; /* Device number per host */
- unsigned char buswidth; /* See comment in .c */
struct soc_camera_sense *sense; /* See comment in struct definition */
struct soc_camera_ops *ops;
struct video_device *vdev;
- const struct soc_camera_data_format *current_fmt;
- const struct soc_camera_data_format *formats;
- int num_formats;
+ const struct soc_camera_format_xlate *current_fmt;
struct soc_camera_format_xlate *user_formats;
int num_user_formats;
enum v4l2_field field; /* Preserve field over close() */
@@ -107,6 +102,8 @@ struct soc_camera_link {
int i2c_adapter_id;
struct i2c_board_info *board_info;
const char *module_name;
+ void *priv;
+
/*
* For non-I2C devices platform platform has to provide methods to
* add a device to the system and to remove
@@ -162,23 +159,13 @@ static inline struct v4l2_subdev *soc_camera_to_subdev(
int soc_camera_host_register(struct soc_camera_host *ici);
void soc_camera_host_unregister(struct soc_camera_host *ici);
-const struct soc_camera_data_format *soc_camera_format_by_fourcc(
- struct soc_camera_device *icd, unsigned int fourcc);
const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc(
struct soc_camera_device *icd, unsigned int fourcc);
-struct soc_camera_data_format {
- const char *name;
- unsigned int depth;
- __u32 fourcc;
- enum v4l2_colorspace colorspace;
-};
-
/**
* struct soc_camera_format_xlate - match between host and sensor formats
- * @cam_fmt: sensor format provided by the sensor
- * @host_fmt: host format after host translation from cam_fmt
- * @buswidth: bus width for this format
+ * @code: code of a sensor provided format
+ * @host_fmt: host format after host translation from code
*
* Host and sensor translation structure. Used in table of host and sensor
* formats matchings in soc_camera_device. A host can override the generic list
@@ -186,9 +173,8 @@ struct soc_camera_data_format {
* format setup.
*/
struct soc_camera_format_xlate {
- const struct soc_camera_data_format *cam_fmt;
- const struct soc_camera_data_format *host_fmt;
- unsigned char buswidth;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *host_fmt;
};
struct soc_camera_ops {
diff --git a/include/media/soc_camera_platform.h b/include/media/soc_camera_platform.h
index bb70401b814..0ecefe227b7 100644
--- a/include/media/soc_camera_platform.h
+++ b/include/media/soc_camera_platform.h
@@ -19,11 +19,10 @@ struct device;
struct soc_camera_platform_info {
const char *format_name;
unsigned long format_depth;
- struct v4l2_pix_format format;
+ struct v4l2_mbus_framefmt format;
unsigned long bus_param;
struct device *dev;
int (*set_capture)(struct soc_camera_platform_info *info, int enable);
- struct soc_camera_link link;
};
#endif /* __SOC_CAMERA_H__ */
diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h
new file mode 100644
index 00000000000..037cd7be001
--- /dev/null
+++ b/include/media/soc_mediabus.h
@@ -0,0 +1,65 @@
+/*
+ * SoC-camera Media Bus API extensions
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SOC_MEDIABUS_H
+#define SOC_MEDIABUS_H
+
+#include <linux/videodev2.h>
+
+#include <media/v4l2-mediabus.h>
+
+/**
+ * enum soc_mbus_packing - data packing types on the media-bus
+ * @SOC_MBUS_PACKING_NONE: no packing, bit-for-bit transfer to RAM
+ * @SOC_MBUS_PACKING_2X8_PADHI: 16 bits transferred in 2 8-bit samples, in the
+ * possibly incomplete byte high bits are padding
+ * @SOC_MBUS_PACKING_2X8_PADLO: as above, but low bits are padding
+ * @SOC_MBUS_PACKING_EXTEND16: sample width (e.g., 10 bits) has to be extended
+ * to 16 bits
+ */
+enum soc_mbus_packing {
+ SOC_MBUS_PACKING_NONE,
+ SOC_MBUS_PACKING_2X8_PADHI,
+ SOC_MBUS_PACKING_2X8_PADLO,
+ SOC_MBUS_PACKING_EXTEND16,
+};
+
+/**
+ * enum soc_mbus_order - sample order on the media bus
+ * @SOC_MBUS_ORDER_LE: least significant sample first
+ * @SOC_MBUS_ORDER_BE: most significant sample first
+ */
+enum soc_mbus_order {
+ SOC_MBUS_ORDER_LE,
+ SOC_MBUS_ORDER_BE,
+};
+
+/**
+ * struct soc_mbus_pixelfmt - Data format on the media bus
+ * @name: Name of the format
+ * @fourcc: Fourcc code, that will be obtained if the data is
+ * stored in memory in the following way:
+ * @packing: Type of sample-packing, that has to be used
+ * @order: Sample order when storing in memory
+ * @bits_per_sample: How many bits the bridge has to sample
+ */
+struct soc_mbus_pixelfmt {
+ const char *name;
+ u32 fourcc;
+ enum soc_mbus_packing packing;
+ enum soc_mbus_order order;
+ u8 bits_per_sample;
+};
+
+const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc(
+ enum v4l2_mbus_pixelcode code);
+s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf);
+
+#endif
diff --git a/include/media/tw9910.h b/include/media/tw9910.h
index 73231e7880d..5e2895a05e6 100644
--- a/include/media/tw9910.h
+++ b/include/media/tw9910.h
@@ -32,7 +32,6 @@ enum tw9910_mpout_pin {
struct tw9910_video_info {
unsigned long buswidth;
enum tw9910_mpout_pin mpout;
- struct soc_camera_link link;
};
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
index 91942dbe64e..6cc107d198a 100644
--- a/include/media/v4l2-chip-ident.h
+++ b/include/media/v4l2-chip-ident.h
@@ -267,6 +267,8 @@ enum {
V4L2_IDENT_MT9V022IX7ATC = 45010, /* No way to detect "normal" I77ATx */
V4L2_IDENT_MT9V022IX7ATM = 45015, /* and "lead free" IA7ATx chips */
V4L2_IDENT_MT9T031 = 45020,
+ V4L2_IDENT_MT9T111 = 45021,
+ V4L2_IDENT_MT9T112 = 45022,
V4L2_IDENT_MT9V111 = 45031,
V4L2_IDENT_MT9V112 = 45032,
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 1c25b10da34..1c7b259f341 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -212,5 +212,5 @@ void v4l_bound_align_image(unsigned int *w, unsigned int wmin,
unsigned int *h, unsigned int hmin,
unsigned int hmax, unsigned int halign,
unsigned int salign);
-
+int v4l_fill_dv_preset_info(u32 preset, struct v4l2_dv_enum_preset *info);
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 73c9867d744..2dee93892ea 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -28,10 +28,10 @@ struct v4l2_ioctl_callbacks;
struct video_device;
struct v4l2_device;
-/* Flag to mark the video_device struct as unregistered.
- Drivers can set this flag if they want to block all future
- device access. It is set by video_unregister_device. */
-#define V4L2_FL_UNREGISTERED (0)
+/* Flag to mark the video_device struct as registered.
+ Drivers can clear this flag if they want to block all future
+ device access. It is cleared by video_unregister_device. */
+#define V4L2_FL_REGISTERED (0)
struct v4l2_file_operations {
struct module *owner;
@@ -96,9 +96,7 @@ struct video_device
/* Register video devices. Note that if video_register_device fails,
the release() callback of the video_device structure is *not* called, so
the caller is responsible for freeing any data. Usually that means that
- you call video_device_release() on failure.
-
- Also note that vdev->minor is set to -1 if the registration failed. */
+ you call video_device_release() on failure. */
int __must_check video_register_device(struct video_device *vdev, int type, int nr);
/* Same as video_register_device, but no warning is issued if the desired
@@ -106,7 +104,7 @@ int __must_check video_register_device(struct video_device *vdev, int type, int
int __must_check video_register_device_no_warn(struct video_device *vdev, int type, int nr);
/* Unregister video devices. Will do nothing if vdev == NULL or
- vdev->minor < 0. */
+ video_is_registered() returns false. */
void video_unregister_device(struct video_device *vdev);
/* helper functions to alloc/release struct video_device, the
@@ -141,9 +139,14 @@ static inline void *video_drvdata(struct file *file)
return video_get_drvdata(video_devdata(file));
}
-static inline int video_is_unregistered(struct video_device *vdev)
+static inline const char *video_device_node_name(struct video_device *vdev)
+{
+ return dev_name(&vdev->dev);
+}
+
+static inline int video_is_registered(struct video_device *vdev)
{
- return test_bit(V4L2_FL_UNREGISTERED, &vdev->flags);
+ return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
}
#endif /* _V4L2_DEV_H */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 7a4529defa8..e8ba0f2efba 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -239,6 +239,21 @@ struct v4l2_ioctl_ops {
int (*vidioc_enum_frameintervals) (struct file *file, void *fh,
struct v4l2_frmivalenum *fival);
+ /* DV Timings IOCTLs */
+ int (*vidioc_enum_dv_presets) (struct file *file, void *fh,
+ struct v4l2_dv_enum_preset *preset);
+
+ int (*vidioc_s_dv_preset) (struct file *file, void *fh,
+ struct v4l2_dv_preset *preset);
+ int (*vidioc_g_dv_preset) (struct file *file, void *fh,
+ struct v4l2_dv_preset *preset);
+ int (*vidioc_query_dv_preset) (struct file *file, void *fh,
+ struct v4l2_dv_preset *qpreset);
+ int (*vidioc_s_dv_timings) (struct file *file, void *fh,
+ struct v4l2_dv_timings *timings);
+ int (*vidioc_g_dv_timings) (struct file *file, void *fh,
+ struct v4l2_dv_timings *timings);
+
/* For other private ioctls */
long (*vidioc_default) (struct file *file, void *fh,
int cmd, void *arg);
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
new file mode 100644
index 00000000000..0dbe02ada25
--- /dev/null
+++ b/include/media/v4l2-mediabus.h
@@ -0,0 +1,61 @@
+/*
+ * Media Bus API header
+ *
+ * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef V4L2_MEDIABUS_H
+#define V4L2_MEDIABUS_H
+
+/*
+ * These pixel codes uniquely identify data formats on the media bus. Mostly
+ * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is
+ * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the
+ * data format is fixed. Additionally, "2X8" means that one pixel is transferred
+ * in two 8-bit samples, "BE" or "LE" specify in which order those samples are
+ * transferred over the bus: "LE" means that the least significant bits are
+ * transferred first, "BE" means that the most significant bits are transferred
+ * first, and "PADHI" and "PADLO" define which bits - low or high, in the
+ * incomplete high byte, are filled with padding bits.
+ */
+enum v4l2_mbus_pixelcode {
+ V4L2_MBUS_FMT_FIXED = 1,
+ V4L2_MBUS_FMT_YUYV8_2X8_LE,
+ V4L2_MBUS_FMT_YVYU8_2X8_LE,
+ V4L2_MBUS_FMT_YUYV8_2X8_BE,
+ V4L2_MBUS_FMT_YVYU8_2X8_BE,
+ V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE,
+ V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE,
+ V4L2_MBUS_FMT_RGB565_2X8_LE,
+ V4L2_MBUS_FMT_RGB565_2X8_BE,
+ V4L2_MBUS_FMT_SBGGR8_1X8,
+ V4L2_MBUS_FMT_SBGGR10_1X10,
+ V4L2_MBUS_FMT_GREY8_1X8,
+ V4L2_MBUS_FMT_Y10_1X10,
+ V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE,
+ V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE,
+ V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE,
+ V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE,
+};
+
+/**
+ * struct v4l2_mbus_framefmt - frame format on the media bus
+ * @width: frame width
+ * @height: frame height
+ * @code: data format code
+ * @field: used interlacing type
+ * @colorspace: colorspace of the data
+ */
+struct v4l2_mbus_framefmt {
+ __u32 width;
+ __u32 height;
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_field field;
+ enum v4l2_colorspace colorspace;
+};
+
+#endif
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 00bf1760845..9ba99cd39ee 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -22,6 +22,7 @@
#define _V4L2_SUBDEV_H
#include <media/v4l2-common.h>
+#include <media/v4l2-mediabus.h>
/* generic v4l2_device notify callback notification values */
#define V4L2_SUBDEV_IR_RX_NOTIFY _IOW('v', 0, u32)
@@ -207,7 +208,7 @@ struct v4l2_subdev_audio_ops {
s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
video input devices.
- s_crystal_freq: sets the frequency of the crystal used to generate the
+ s_crystal_freq: sets the frequency of the crystal used to generate the
clocks in Hz. An extra flags field allows device specific configuration
regarding clock frequency dividers, etc. If not used, then set flags
to 0. If the frequency is not supported, then -EINVAL is returned.
@@ -217,6 +218,26 @@ struct v4l2_subdev_audio_ops {
s_routing: see s_routing in audio_ops, except this version is for video
devices.
+
+ s_dv_preset: set dv (Digital Video) preset in the sub device. Similar to
+ s_std()
+
+ query_dv_preset: query dv preset in the sub device. This is similar to
+ querystd()
+
+ s_dv_timings(): Set custom dv timings in the sub device. This is used
+ when sub device is capable of setting detailed timing information
+ in the hardware to generate/detect the video signal.
+
+ g_dv_timings(): Get custom dv timings in the sub device.
+
+ enum_mbus_fmt: enumerate pixel formats, provided by a video data source
+
+ g_mbus_fmt: get the current pixel format, provided by a video data source
+
+ try_mbus_fmt: try to set a pixel format on a video data source
+
+ s_mbus_fmt: set a pixel format on a video data source
*/
struct v4l2_subdev_video_ops {
int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
@@ -240,6 +261,33 @@ struct v4l2_subdev_video_ops {
int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize);
int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival);
+ int (*s_dv_preset)(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *preset);
+ int (*query_dv_preset)(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *preset);
+ int (*s_dv_timings)(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings);
+ int (*g_dv_timings)(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings);
+ int (*enum_mbus_fmt)(struct v4l2_subdev *sd, int index,
+ enum v4l2_mbus_pixelcode *code);
+ int (*g_mbus_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt);
+ int (*try_mbus_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt);
+ int (*s_mbus_fmt)(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt);
+};
+
+/**
+ * struct v4l2_subdev_sensor_ops - v4l2-subdev sensor operations
+ * @g_skip_top_lines: number of lines at the top of the image to be skipped.
+ * This is needed for some sensors, which always corrupt
+ * several top lines of the output image, or which send their
+ * metadata in them.
+ */
+struct v4l2_subdev_sensor_ops {
+ int (*g_skip_top_lines)(struct v4l2_subdev *sd, u32 *lines);
};
/*
@@ -326,11 +374,12 @@ struct v4l2_subdev_ir_ops {
};
struct v4l2_subdev_ops {
- const struct v4l2_subdev_core_ops *core;
- const struct v4l2_subdev_tuner_ops *tuner;
- const struct v4l2_subdev_audio_ops *audio;
- const struct v4l2_subdev_video_ops *video;
- const struct v4l2_subdev_ir_ops *ir;
+ const struct v4l2_subdev_core_ops *core;
+ const struct v4l2_subdev_tuner_ops *tuner;
+ const struct v4l2_subdev_audio_ops *audio;
+ const struct v4l2_subdev_video_ops *video;
+ const struct v4l2_subdev_ir_ops *ir;
+ const struct v4l2_subdev_sensor_ops *sensor;
};
#define V4L2_SUBDEV_NAME_SIZE 32
diff --git a/include/net/compat.h b/include/net/compat.h
index 3c7d4e38fa1..28d5428ec6a 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -46,7 +46,7 @@ extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsi
extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned);
extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
unsigned, unsigned,
- struct timespec __user *);
+ struct compat_timespec __user *);
extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
diff --git a/include/net/dst.h b/include/net/dst.h
index 387cb3cfde7..39c4a5963e1 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -113,7 +113,7 @@ dst_metric(const struct dst_entry *dst, int metric)
static inline u32
dst_feature(const struct dst_entry *dst, u32 feature)
{
- return (dst ? dst_metric(dst, RTAX_FEATURES) & feature : 0);
+ return dst_metric(dst, RTAX_FEATURES) & feature;
}
static inline u32 dst_mtu(const struct dst_entry *dst)
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 92838d3a1ab..e46674d5dae 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -53,7 +53,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk)
return inet6_ehashfn(net, laddr, lport, faddr, fport);
}
-extern void __inet6_hash(struct sock *sk);
+extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 41cbddd25b7..74358d1b3f4 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -251,7 +251,7 @@ extern void inet_put_port(struct sock *sk);
void inet_hashinfo_init(struct inet_hashinfo *h);
-extern void __inet_hash_nolisten(struct sock *sk);
+extern int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
extern void inet_hash(struct sock *sk);
extern void inet_unhash(struct sock *sk);
@@ -391,10 +391,12 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
}
extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk, u32 port_offset,
+ struct sock *sk,
+ u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **),
- void (*hash)(struct sock *sk));
+ int (*hash)(struct sock *sk, struct inet_timewait_sock *twp));
+
extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
#endif /* _INET_HASHTABLES_H */
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index b801ade2295..79f67eae8a7 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -201,6 +201,9 @@ extern void inet_twsk_put(struct inet_timewait_sock *tw);
extern int inet_twsk_unhash(struct inet_timewait_sock *tw);
+extern int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo);
+
extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
const int state);
diff --git a/include/net/ip.h b/include/net/ip.h
index e6b9d12d5f6..85108cfbb1a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -337,6 +337,7 @@ enum ip_defrag_users {
IP_DEFRAG_CALL_RA_CHAIN,
IP_DEFRAG_CONNTRACK_IN,
IP_DEFRAG_CONNTRACK_OUT,
+ IP_DEFRAG_CONNTRACK_BRIDGE_IN,
IP_DEFRAG_VS_IN,
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 92db8617d18..ccab5946c83 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -350,8 +350,16 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1,
struct inet_frag_queue;
+enum ip6_defrag_users {
+ IP6_DEFRAG_LOCAL_DELIVER,
+ IP6_DEFRAG_CONNTRACK_IN,
+ IP6_DEFRAG_CONNTRACK_OUT,
+ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+};
+
struct ip6_create_arg {
__be32 id;
+ u32 user;
struct in6_addr *src;
struct in6_addr *dst;
};
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0302f31a2fb..b0173202cad 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -88,12 +88,7 @@ struct neigh_statistics {
unsigned long unres_discards; /* number of unresolved drops */
};
-#define NEIGH_CACHE_STAT_INC(tbl, field) \
- do { \
- preempt_disable(); \
- (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
- preempt_enable(); \
- } while (0)
+#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
struct neighbour {
struct neighbour *next;
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index abc55ad75c2..1ee717eb5b0 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
extern int nf_ct_frag6_init(void);
extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
struct net_device *in,
struct net_device *out,
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5cf7270e3ff..a0904adfb8f 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
#define NF_CT_STAT_INC(net, count) \
- (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++)
+ __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) \
do { \
local_bh_disable(); \
- per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \
+ __this_cpu_inc((net)->ct.stat->count); \
local_bh_enable(); \
} while (0)
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8c842e06bec..f0d756f2ac9 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -136,45 +136,31 @@ struct linux_xfrm_mib {
#define SNMP_STAT_BHPTR(name) (name[0])
#define SNMP_STAT_USRPTR(name) (name[1])
-#define SNMP_INC_STATS_BH(mib, field) \
- (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
-#define SNMP_INC_STATS_USER(mib, field) \
- do { \
- per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \
- put_cpu(); \
- } while (0)
-#define SNMP_INC_STATS(mib, field) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \
- put_cpu(); \
- } while (0)
-#define SNMP_DEC_STATS(mib, field) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \
- put_cpu(); \
- } while (0)
-#define SNMP_ADD_STATS(mib, field, addend) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \
- put_cpu(); \
- } while (0)
-#define SNMP_ADD_STATS_BH(mib, field, addend) \
- (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
-#define SNMP_ADD_STATS_USER(mib, field, addend) \
- do { \
- per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \
- put_cpu(); \
- } while (0)
+#define SNMP_INC_STATS_BH(mib, field) \
+ __this_cpu_inc(mib[0]->mibs[field])
+#define SNMP_INC_STATS_USER(mib, field) \
+ this_cpu_inc(mib[1]->mibs[field])
+#define SNMP_INC_STATS(mib, field) \
+ this_cpu_inc(mib[!in_softirq()]->mibs[field])
+#define SNMP_DEC_STATS(mib, field) \
+ this_cpu_dec(mib[!in_softirq()]->mibs[field])
+#define SNMP_ADD_STATS_BH(mib, field, addend) \
+ __this_cpu_add(mib[0]->mibs[field], addend)
+#define SNMP_ADD_STATS_USER(mib, field, addend) \
+ this_cpu_add(mib[1]->mibs[field], addend)
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
- __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\
+ __typeof__(mib[0]) ptr; \
+ preempt_disable(); \
+ ptr = this_cpu_ptr((mib)[!in_softirq()]); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend;\
- put_cpu(); \
+ preempt_enable(); \
} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
do { \
- __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\
+ __typeof__(mib[0]) ptr = \
+ __this_cpu_ptr((mib)[!in_softirq()]); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend;\
} while (0)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5740b85bc5a..34f5cc24d90 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -408,8 +408,7 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
extern void tcp_parse_options(struct sk_buff *skb,
struct tcp_options_received *opt_rx,
u8 **hvpp,
- int estab,
- struct dst_entry *dst);
+ int estab);
extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
@@ -1261,29 +1260,6 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
-/* This function calculates a "timeout" which is equivalent to the timeout of a
- * TCP connection after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN.
- */
-static inline bool retransmits_timed_out(const struct sock *sk,
- unsigned int boundary)
-{
- unsigned int timeout, linear_backoff_thresh;
-
- if (!inet_csk(sk)->icsk_retransmits)
- return false;
-
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
-
- if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
- else
- timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
-
- return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout;
-}
-
static inline struct sk_buff *tcp_send_head(struct sock *sk)
{
return sk->sk_send_head;
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index afc2bfb9e91..75fa3530345 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -126,8 +126,8 @@ typedef struct irq_req_t {
#define IRQ_TYPE_TIME 0x01
#define IRQ_TYPE_DYNAMIC_SHARING 0x02
#define IRQ_FORCED_PULSE 0x04
-#define IRQ_FIRST_SHARED 0x08
-//#define IRQ_HANDLE_PRESENT 0x10
+#define IRQ_FIRST_SHARED 0x08 /* unused */
+#define IRQ_HANDLE_PRESENT 0x10 /* unused */
#define IRQ_PULSE_ALLOCATED 0x100
/* Bits in IRQInfo1 field */
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index d403c12f797..ee148573c11 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -82,7 +82,7 @@ struct pcmcia_device {
/* the hardware "function" device; certain subdevices can
* share one hardware "function" device. */
u8 func;
- struct config_t* function_config;
+ struct config_t *function_config;
struct list_head socket_device_list;
@@ -121,14 +121,14 @@ struct pcmcia_device {
u16 manf_id;
u16 card_id;
- char * prod_id[4];
+ char *prod_id[4];
u64 dma_mask;
struct device dev;
#ifdef CONFIG_PCMCIA_IOCTL
/* device driver wanted by cardmgr */
- struct pcmcia_driver * cardmgr;
+ struct pcmcia_driver *cardmgr;
#endif
/* data private to drivers */
diff --git a/include/pcmcia/mem_op.h b/include/pcmcia/mem_op.h
index 8d19b9401a5..0fa06e5d537 100644
--- a/include/pcmcia/mem_op.h
+++ b/include/pcmcia/mem_op.h
@@ -15,8 +15,8 @@
#ifndef _LINUX_MEM_OP_H
#define _LINUX_MEM_OP_H
+#include <linux/io.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
/*
If UNSAFE_MEMCPY is defined, we use the (optimized) system routines
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 7c23be706f1..cbfba885eb8 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -154,7 +154,7 @@ struct pcmcia_socket {
struct list_head socket_list;
struct completion socket_released;
- /* deprecated */
+ /* deprecated */
unsigned int sock; /* socket number */
@@ -164,7 +164,7 @@ struct pcmcia_socket {
u_int map_size;
u_int io_offset;
u_int pci_irq;
- struct pci_dev * cb_dev;
+ struct pci_dev *cb_dev;
/* socket setup is done so resources should be able to be allocated.
@@ -179,9 +179,9 @@ struct pcmcia_socket {
u8 reserved:5;
/* socket operations */
- struct pccard_operations * ops;
- struct pccard_resource_ops * resource_ops;
- void * resource_data;
+ struct pccard_operations *ops;
+ struct pccard_resource_ops *resource_ops;
+ void *resource_data;
/* Zoom video behaviour is so chip specific its not worth adding
this to _ops */
@@ -245,7 +245,7 @@ struct pcmcia_socket {
/* cardbus (32-bit) */
#ifdef CONFIG_CARDBUS
- struct resource * cb_cis_res;
+ struct resource *cb_cis_res;
void __iomem *cb_cis_virt;
#endif /* CONFIG_CARDBUS */
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 483057b2f4b..fa0d52b8e62 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -36,6 +36,7 @@
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <rdma/ib_verbs.h>
@@ -60,8 +61,8 @@ struct rdma_dev_addr {
unsigned char src_dev_addr[MAX_ADDR_LEN];
unsigned char dst_dev_addr[MAX_ADDR_LEN];
unsigned char broadcast[MAX_ADDR_LEN];
- enum rdma_node_type dev_type;
- struct net_device *src_dev;
+ unsigned short dev_type;
+ int bound_dev_if;
};
/**
@@ -121,40 +122,29 @@ static inline void ib_addr_get_mgid(struct rdma_dev_addr *dev_addr,
memcpy(gid, dev_addr->broadcast + 4, sizeof *gid);
}
-static inline void ib_addr_get_sgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
+static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr)
{
- memcpy(gid, dev_addr->src_dev_addr + 4, sizeof *gid);
+ return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0;
}
-static inline void ib_addr_set_sgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
+static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- memcpy(dev_addr->src_dev_addr + 4, gid, sizeof *gid);
+ memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid);
}
-static inline void ib_addr_get_dgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
+static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- memcpy(gid, dev_addr->dst_dev_addr + 4, sizeof *gid);
+ memcpy(dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid);
}
-static inline void ib_addr_set_dgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
+static inline void rdma_addr_get_dgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- memcpy(dev_addr->dst_dev_addr + 4, gid, sizeof *gid);
+ memcpy(gid, dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid);
}
-static inline void iw_addr_get_sgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
-{
- memcpy(gid, dev_addr->src_dev_addr, sizeof *gid);
-}
-
-static inline void iw_addr_get_dgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
+static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- memcpy(gid, dev_addr->dst_dev_addr, sizeof *gid);
+ memcpy(dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid);
}
#endif /* IB_ADDR_H */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 3841c1aff69..1082afaed15 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -379,4 +379,10 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec,
struct ib_ah_attr *ah_attr);
+/**
+ * ib_sa_unpack_path - Convert a path record from MAD format to struct
+ * ib_sa_path_rec.
+ */
+void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec);
+
#endif /* IB_SA_H */
diff --git a/include/rdma/ib_user_sa.h b/include/rdma/ib_user_sa.h
index 659120157e1..cfc7c9ba781 100644
--- a/include/rdma/ib_user_sa.h
+++ b/include/rdma/ib_user_sa.h
@@ -35,6 +35,22 @@
#include <linux/types.h>
+enum {
+ IB_PATH_GMP = 1,
+ IB_PATH_PRIMARY = (1<<1),
+ IB_PATH_ALTERNATE = (1<<2),
+ IB_PATH_OUTBOUND = (1<<3),
+ IB_PATH_INBOUND = (1<<4),
+ IB_PATH_INBOUND_REVERSE = (1<<5),
+ IB_PATH_BIDIRECTIONAL = IB_PATH_OUTBOUND | IB_PATH_INBOUND_REVERSE
+};
+
+struct ib_path_rec_data {
+ __u32 flags;
+ __u32 reserved;
+ __u32 path_rec[16];
+};
+
struct ib_user_path_rec {
__u8 dgid[16];
__u8 sgid[16];
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index c179318edd9..09509edb1c5 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1425,6 +1425,11 @@ int ib_destroy_qp(struct ib_qp *qp);
* @send_wr: A list of work requests to post on the send queue.
* @bad_send_wr: On an immediate failure, this parameter will reference
* the work request that failed to be posted on the QP.
+ *
+ * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
+ * error is returned, the QP state shall not be affected,
+ * ib_post_send() will return an immediate error after queueing any
+ * earlier work requests in the list.
*/
static inline int ib_post_send(struct ib_qp *qp,
struct ib_send_wr *send_wr,
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
index c55705460b8..1d165022c02 100644
--- a/include/rdma/rdma_user_cm.h
+++ b/include/rdma/rdma_user_cm.h
@@ -215,12 +215,14 @@ struct rdma_ucm_event_resp {
/* Option levels */
enum {
- RDMA_OPTION_ID = 0
+ RDMA_OPTION_ID = 0,
+ RDMA_OPTION_IB = 1
};
/* Option details */
enum {
- RDMA_OPTION_ID_TOS = 0
+ RDMA_OPTION_ID_TOS = 0,
+ RDMA_OPTION_IB_PATH = 1
};
struct rdma_ucm_set_option {
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 7394e3bc8f4..ff92b46f515 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -28,6 +28,7 @@
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/kfifo.h>
#include <scsi/iscsi_proto.h>
#include <scsi/iscsi_if.h>
#include <scsi/scsi_transport_iscsi.h>
@@ -231,7 +232,7 @@ struct iscsi_conn {
};
struct iscsi_pool {
- struct kfifo *queue; /* FIFO Queue */
+ struct kfifo queue; /* FIFO Queue */
void **pool; /* Pool of elements */
int max; /* Max number of elements */
};
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h
index 9e3182e659d..741ae7ed439 100644
--- a/include/scsi/libiscsi_tcp.h
+++ b/include/scsi/libiscsi_tcp.h
@@ -80,7 +80,7 @@ struct iscsi_tcp_task {
int data_offset;
struct iscsi_r2t_info *r2t; /* in progress solict R2T */
struct iscsi_pool r2tpool;
- struct kfifo *r2tqueue;
+ struct kfifo r2tqueue;
void *dd_data;
};
diff --git a/include/scsi/libsrp.h b/include/scsi/libsrp.h
index ba615e4c1d7..07e3adde21d 100644
--- a/include/scsi/libsrp.h
+++ b/include/scsi/libsrp.h
@@ -21,7 +21,7 @@ struct srp_buf {
struct srp_queue {
void *pool;
void *items;
- struct kfifo *queue;
+ struct kfifo queue;
spinlock_t lock;
};
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index 39d6d109715..a8f37012663 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -142,6 +142,7 @@ struct osd_request {
struct _osd_io_info {
struct bio *bio;
u64 total_bytes;
+ u64 residual;
struct request *req;
struct _osd_req_data_segment *last_seg;
u8 *pad_buff;
@@ -150,12 +151,14 @@ struct osd_request {
gfp_t alloc_flags;
unsigned timeout;
unsigned retries;
+ unsigned sense_len;
u8 sense[OSD_MAX_SENSE_LEN];
enum osd_attributes_mode attributes_mode;
osd_req_done_fn *async_done;
void *async_private;
int async_error;
+ int req_errors;
};
static inline bool osd_req_is_ver1(struct osd_request *or)
@@ -297,8 +300,6 @@ enum osd_err_priority {
};
struct osd_sense_info {
- u64 out_resid; /* Zero on success otherwise out residual */
- u64 in_resid; /* Zero on success otherwise in residual */
enum osd_err_priority osd_err_pri;
int key; /* one of enum scsi_sense_keys */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index e5ce87a0498..9496b965d62 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -301,8 +301,8 @@ TRACE_EVENT(itimer_state,
__entry->interval_usec = value->it_interval.tv_usec;
),
- TP_printk("which=%d expires=%lu it_value=%lu.%lu it_interval=%lu.%lu",
- __entry->which, __entry->expires,
+ TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
+ __entry->which, (unsigned long long)__entry->expires,
__entry->value_sec, __entry->value_usec,
__entry->interval_sec, __entry->interval_usec)
);
@@ -331,8 +331,8 @@ TRACE_EVENT(itimer_expire,
__entry->pid = pid_nr(pid);
),
- TP_printk("which=%d pid=%d now=%lu", __entry->which,
- (int) __entry->pid, __entry->now)
+ TP_printk("which=%d pid=%d now=%llu", __entry->which,
+ (int) __entry->pid, (unsigned long long)__entry->now)
);
#endif /* _TRACE_TIMER_H */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index d1b3de9c1a7..73523151a73 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -436,10 +436,6 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
struct ftrace_raw_##call field; \
int ret; \
\
- ret = trace_define_common_fields(event_call); \
- if (ret) \
- return ret; \
- \
tstruct; \
\
return ret; \
@@ -559,13 +555,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
*
* static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
* {
- * int ret;
- *
- * ret = register_trace_<call>(ftrace_event_<call>);
- * if (!ret)
- * pr_info("event trace: Could not activate trace point "
- * "probe to <call>");
- * return ret;
+ * return register_trace_<call>(ftrace_event_<call>);
* }
*
* static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
@@ -623,23 +613,12 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
* .trace = ftrace_raw_output_<call>, <-- stage 2
* };
*
- * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused)
- * {
- * int id;
- *
- * id = register_ftrace_event(&ftrace_event_type_<call>);
- * if (!id)
- * return -ENODEV;
- * event_<call>.id = id;
- * return 0;
- * }
- *
* static struct ftrace_event_call __used
* __attribute__((__aligned__(4)))
* __attribute__((section("_ftrace_events"))) event_<call> = {
* .name = "<call>",
* .system = "<system>",
- * .raw_init = ftrace_raw_init_event_<call>,
+ * .raw_init = trace_event_raw_init,
* .regfunc = ftrace_reg_event_<call>,
* .unregfunc = ftrace_unreg_event_<call>,
* .show_format = ftrace_format_<call>,
@@ -647,13 +626,9 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
*
*/
-#undef TP_FMT
-#define TP_FMT(fmt, args...) fmt "\n", ##args
-
#ifdef CONFIG_EVENT_PROFILE
#define _TRACE_PROFILE_INIT(call) \
- .profile_count = ATOMIC_INIT(-1), \
.profile_enable = ftrace_profile_enable_##call, \
.profile_disable = ftrace_profile_disable_##call,
@@ -728,13 +703,7 @@ static void ftrace_raw_event_##call(proto) \
\
static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\
{ \
- int ret; \
- \
- ret = register_trace_##call(ftrace_raw_event_##call); \
- if (ret) \
- pr_info("event trace: Could not activate trace point " \
- "probe to " #call "\n"); \
- return ret; \
+ return register_trace_##call(ftrace_raw_event_##call); \
} \
\
static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
@@ -744,19 +713,7 @@ static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\
\
static struct trace_event ftrace_event_type_##call = { \
.trace = ftrace_raw_output_##call, \
-}; \
- \
-static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\
-{ \
- int id; \
- \
- id = register_ftrace_event(&ftrace_event_type_##call); \
- if (!id) \
- return -ENODEV; \
- event_##call.id = id; \
- INIT_LIST_HEAD(&event_##call.fields); \
- return 0; \
-}
+};
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
@@ -776,7 +733,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.system = __stringify(TRACE_SYSTEM), \
.event = &ftrace_event_type_##call, \
- .raw_init = ftrace_raw_init_event_##call, \
+ .raw_init = trace_event_raw_init, \
.regfunc = ftrace_raw_reg_event_##call, \
.unregfunc = ftrace_raw_unreg_event_##call, \
.show_format = ftrace_format_##template, \
@@ -793,7 +750,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \
.system = __stringify(TRACE_SYSTEM), \
.event = &ftrace_event_type_##call, \
- .raw_init = ftrace_raw_init_event_##call, \
+ .raw_init = trace_event_raw_init, \
.regfunc = ftrace_raw_reg_event_##call, \
.unregfunc = ftrace_raw_unreg_event_##call, \
.show_format = ftrace_format_##call, \
@@ -953,7 +910,6 @@ end: \
perf_swevent_put_recursion_context(rctx); \
end_recursion: \
local_irq_restore(irq_flags); \
- \
}
#undef DEFINE_EVENT
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
index c051a50ed52..89d43b3d4cb 100644
--- a/include/video/da8xx-fb.h
+++ b/include/video/da8xx-fb.h
@@ -38,6 +38,7 @@ struct da8xx_lcdc_platform_data {
const char manu_name[10];
void *controller_data;
const char type[25];
+ void (*panel_power_ctrl)(int);
};
struct lcd_ctrl_config {
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index 25144ab22b9..28820545771 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -50,6 +50,8 @@ struct sh_mobile_lcdc_board_cfg {
void *board_data;
int (*setup_sys)(void *board_data, void *sys_ops_handle,
struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
+ void (*start_transfer)(void *board_data, void *sys_ops_handle,
+ struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
void (*display_on)(void *board_data);
void (*display_off)(void *board_data);
};
diff --git a/include/xen/xen.h b/include/xen/xen.h
new file mode 100644
index 00000000000..a16402418d3
--- /dev/null
+++ b/include/xen/xen.h
@@ -0,0 +1,32 @@
+#ifndef _XEN_XEN_H
+#define _XEN_XEN_H
+
+enum xen_domain_type {
+ XEN_NATIVE, /* running on bare hardware */
+ XEN_PV_DOMAIN, /* running in a PV domain */
+ XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
+};
+
+#ifdef CONFIG_XEN
+extern enum xen_domain_type xen_domain_type;
+#else
+#define xen_domain_type XEN_NATIVE
+#endif
+
+#define xen_domain() (xen_domain_type != XEN_NATIVE)
+#define xen_pv_domain() (xen_domain() && \
+ xen_domain_type == XEN_PV_DOMAIN)
+#define xen_hvm_domain() (xen_domain() && \
+ xen_domain_type == XEN_HVM_DOMAIN)
+
+#ifdef CONFIG_XEN_DOM0
+#include <xen/interface/xen.h>
+#include <asm/xen/hypervisor.h>
+
+#define xen_initial_domain() (xen_pv_domain() && \
+ xen_start_info->flags & SIF_INITDOMAIN)
+#else /* !CONFIG_XEN_DOM0 */
+#define xen_initial_domain() (0)
+#endif /* CONFIG_XEN_DOM0 */
+
+#endif /* _XEN_XEN_H */
diff --git a/init/Kconfig b/init/Kconfig
index 54c655ce9c0..a23da9f0180 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1079,6 +1079,28 @@ config SLOB
endchoice
+config MMAP_ALLOW_UNINITIALIZED
+ bool "Allow mmapped anonymous memory to be uninitialized"
+ depends on EMBEDDED && !MMU
+ default n
+ help
+ Normally, and according to the Linux spec, anonymous memory obtained
+ from mmap() has it's contents cleared before it is passed to
+ userspace. Enabling this config option allows you to request that
+ mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
+ providing a huge performance boost. If this option is not enabled,
+ then the flag will be ignored.
+
+ This is taken advantage of by uClibc's malloc(), and also by
+ ELF-FDPIC binfmt's brk and stack allocator.
+
+ Because of the obvious security issues, this option should only be
+ enabled on embedded devices where you control what is run in
+ userspace. Since that isn't generally a problem on no-MMU systems,
+ it is normally safe to say Y here.
+
+ See Documentation/nommu-mmap.txt for more information.
+
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
diff --git a/init/Makefile b/init/Makefile
index 4a243df426f..0bf677aa087 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -15,12 +15,8 @@ mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o
mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o
-# files to be removed upon make clean
-clean-files := ../include/linux/compile.h
-
# dependencies on generated files need to be listed explicitly
-
-$(obj)/version.o: include/linux/compile.h
+$(obj)/version.o: include/generated/compile.h
# compile.h changes depending on hostname, generation number, etc,
# so we regenerate it always.
@@ -30,7 +26,7 @@ $(obj)/version.o: include/linux/compile.h
chk_compile.h = :
quiet_chk_compile.h = echo ' CHK $@'
silent_chk_compile.h = :
-include/linux/compile.h: FORCE
+include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
diff --git a/init/initramfs.c b/init/initramfs.c
index 4c00edc5968..b37d34beb90 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -413,7 +413,7 @@ static unsigned my_inptr; /* index of next byte to be processed in inbuf */
static char * __init unpack_to_rootfs(char *buf, unsigned len)
{
- int written;
+ int written, res;
decompress_fn decompress;
const char *compress_name;
static __initdata char msg_buf[64];
@@ -445,10 +445,12 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
}
this_header = 0;
decompress = decompress_method(buf, len, &compress_name);
- if (decompress)
- decompress(buf, len, NULL, flush_buffer, NULL,
+ if (decompress) {
+ res = decompress(buf, len, NULL, flush_buffer, NULL,
&my_inptr, error);
- else if (compress_name) {
+ if (res)
+ error("decompressor failed");
+ } else if (compress_name) {
if (!message) {
snprintf(msg_buf, sizeof msg_buf,
"compression method %s not configured",
diff --git a/init/main.c b/init/main.c
index 4051d75dd2d..dac44a9356a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -369,12 +369,6 @@ static void __init smp_init(void)
{
unsigned int cpu;
- /*
- * Set up the current CPU as possible to migrate to.
- * The other ones will be done by cpu_up/cpu_down()
- */
- set_cpu_active(smp_processor_id(), true);
-
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
@@ -486,6 +480,7 @@ static void __init boot_cpu_init(void)
int cpu = smp_processor_id();
/* Mark the boot cpu "present", "online" etc for SMP and UP case */
set_cpu_online(cpu, true);
+ set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
}
@@ -691,10 +686,10 @@ asmlinkage void __init start_kernel(void)
static void __init do_ctors(void)
{
#ifdef CONFIG_CONSTRUCTORS
- ctor_fn_t *call = (ctor_fn_t *) __ctors_start;
+ ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
- for (; call < (ctor_fn_t *) __ctors_end; call++)
- (*call)();
+ for (; fn < (ctor_fn_t *) __ctors_end; fn++)
+ (*fn)();
#endif
}
@@ -755,10 +750,10 @@ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
static void __init do_initcalls(void)
{
- initcall_t *call;
+ initcall_t *fn;
- for (call = __early_initcall_end; call < __initcall_end; call++)
- do_one_initcall(*call);
+ for (fn = __early_initcall_end; fn < __initcall_end; fn++)
+ do_one_initcall(*fn);
/* Make sure there is no pending stuff from the initcall sequence */
flush_scheduled_work();
@@ -785,10 +780,10 @@ static void __init do_basic_setup(void)
static void __init do_pre_smp_initcalls(void)
{
- initcall_t *call;
+ initcall_t *fn;
- for (call = __initcall_start; call < __early_initcall_end; call++)
- do_one_initcall(*call);
+ for (fn = __initcall_start; fn < __early_initcall_end; fn++)
+ do_one_initcall(*fn);
}
static void run_init_process(char *init_filename)
diff --git a/init/version.c b/init/version.c
index 52a8b98642b..adff586401a 100644
--- a/init/version.c
+++ b/init/version.c
@@ -6,11 +6,11 @@
* May be freely distributed as part of Linux.
*/
-#include <linux/compile.h>
+#include <generated/compile.h>
#include <linux/module.h>
#include <linux/uts.h>
#include <linux/utsname.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/version.h>
#ifndef CONFIG_KALLSYMS
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index ee9d69707c0..c79bd57353e 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -32,7 +32,6 @@
#include <linux/nsproxy.h>
#include <linux/pid.h>
#include <linux/ipc_namespace.h>
-#include <linux/ima.h>
#include <net/sock.h>
#include "util.h"
@@ -734,7 +733,6 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
error = PTR_ERR(filp);
goto out_putfd;
}
- ima_counts_get(filp);
fd_install(fd, filp);
goto out_upsem;
diff --git a/ipc/msg.c b/ipc/msg.c
index 085bd58f2f0..af42ef8900a 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -125,6 +125,7 @@ void msg_init_ns(struct ipc_namespace *ns)
void msg_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &msg_ids(ns), freeque);
+ idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
}
#endif
diff --git a/ipc/sem.c b/ipc/sem.c
index 87c2b641fd7..dbef95b1594 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -129,6 +129,7 @@ void sem_init_ns(struct ipc_namespace *ns)
void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
+ idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
#endif
@@ -240,6 +241,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
key_t key = params->key;
int nsems = params->u.nsems;
int semflg = params->flg;
+ int i;
if (!nsems)
return -EINVAL;
@@ -272,6 +274,11 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
ns->used_sems += nsems;
sma->sem_base = (struct sem *) &sma[1];
+
+ for (i = 0; i < nsems; i++)
+ INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+
+ sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
@@ -397,63 +404,109 @@ undo:
return result;
}
-/* Go through the pending queue for the indicated semaphore
- * looking for tasks that can be completed.
+/*
+ * Wake up a process waiting on the sem queue with a given error.
+ * The queue is invalid (may not be accessed) after the function returns.
*/
-static void update_queue (struct sem_array * sma)
+static void wake_up_sem_queue(struct sem_queue *q, int error)
{
- int error;
- struct sem_queue * q;
+ /*
+ * Hold preempt off so that we don't get preempted and have the
+ * wakee busy-wait until we're scheduled back on. We're holding
+ * locks here so it may not strictly be needed, however if the
+ * locks become preemptible then this prevents such a problem.
+ */
+ preempt_disable();
+ q->status = IN_WAKEUP;
+ wake_up_process(q->sleeper);
+ /* hands-off: q can disappear immediately after writing q->status. */
+ smp_wmb();
+ q->status = error;
+ preempt_enable();
+}
+
+static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+{
+ list_del(&q->list);
+ if (q->nsops == 1)
+ list_del(&q->simple_list);
+ else
+ sma->complex_count--;
+}
+
+
+/**
+ * update_queue(sma, semnum): Look for tasks that can be completed.
+ * @sma: semaphore array.
+ * @semnum: semaphore that was modified.
+ *
+ * update_queue must be called after a semaphore in a semaphore array
+ * was modified. If multiple semaphore were modified, then @semnum
+ * must be set to -1.
+ */
+static void update_queue(struct sem_array *sma, int semnum)
+{
+ struct sem_queue *q;
+ struct list_head *walk;
+ struct list_head *pending_list;
+ int offset;
+
+ /* if there are complex operations around, then knowing the semaphore
+ * that was modified doesn't help us. Assume that multiple semaphores
+ * were modified.
+ */
+ if (sma->complex_count)
+ semnum = -1;
+
+ if (semnum == -1) {
+ pending_list = &sma->sem_pending;
+ offset = offsetof(struct sem_queue, list);
+ } else {
+ pending_list = &sma->sem_base[semnum].sem_pending;
+ offset = offsetof(struct sem_queue, simple_list);
+ }
+
+again:
+ walk = pending_list->next;
+ while (walk != pending_list) {
+ int error, alter;
+
+ q = (struct sem_queue *)((char *)walk - offset);
+ walk = walk->next;
+
+ /* If we are scanning the single sop, per-semaphore list of
+ * one semaphore and that semaphore is 0, then it is not
+ * necessary to scan the "alter" entries: simple increments
+ * that affect only one entry succeed immediately and cannot
+ * be in the per semaphore pending queue, and decrements
+ * cannot be successful if the value is already 0.
+ */
+ if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
+ q->alter)
+ break;
- q = list_entry(sma->sem_pending.next, struct sem_queue, list);
- while (&q->list != &sma->sem_pending) {
error = try_atomic_semop(sma, q->sops, q->nsops,
q->undo, q->pid);
/* Does q->sleeper still need to sleep? */
- if (error <= 0) {
- struct sem_queue *n;
-
- /*
- * Continue scanning. The next operation
- * that must be checked depends on the type of the
- * completed operation:
- * - if the operation modified the array, then
- * restart from the head of the queue and
- * check for threads that might be waiting
- * for semaphore values to become 0.
- * - if the operation didn't modify the array,
- * then just continue.
- * The order of list_del() and reading ->next
- * is crucial: In the former case, the list_del()
- * must be done first [because we might be the
- * first entry in ->sem_pending], in the latter
- * case the list_del() must be done last
- * [because the list is invalid after the list_del()]
- */
- if (q->alter) {
- list_del(&q->list);
- n = list_entry(sma->sem_pending.next,
- struct sem_queue, list);
- } else {
- n = list_entry(q->list.next, struct sem_queue,
- list);
- list_del(&q->list);
- }
-
- /* wake up the waiting thread */
- q->status = IN_WAKEUP;
+ if (error > 0)
+ continue;
- wake_up_process(q->sleeper);
- /* hands-off: q will disappear immediately after
- * writing q->status.
- */
- smp_wmb();
- q->status = error;
- q = n;
- } else {
- q = list_entry(q->list.next, struct sem_queue, list);
- }
+ unlink_queue(sma, q);
+
+ /*
+ * The next operation that must be checked depends on the type
+ * of the completed operation:
+ * - if the operation modified the array, then restart from the
+ * head of the queue and check for threads that might be
+ * waiting for the new semaphore values.
+ * - if the operation didn't modify the array, then just
+ * continue.
+ */
+ alter = q->alter;
+ wake_up_sem_queue(q, error);
+ if (alter && !error)
+ goto again;
}
}
@@ -533,12 +586,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Wake up all pending processes and let them fail with EIDRM. */
list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
- list_del(&q->list);
-
- q->status = IN_WAKEUP;
- wake_up_process(q->sleeper); /* doesn't sleep */
- smp_wmb();
- q->status = -EIDRM; /* hands-off q */
+ unlink_queue(sma, q);
+ wake_up_sem_queue(q, -EIDRM);
}
/* Remove the semaphore set from the IDR */
@@ -575,7 +624,7 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in,
static int semctl_nolock(struct ipc_namespace *ns, int semid,
int cmd, int version, union semun arg)
{
- int err = -EINVAL;
+ int err;
struct sem_array *sma;
switch(cmd) {
@@ -652,7 +701,6 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
default:
return -EINVAL;
}
- return err;
out_unlock:
sem_unlock(sma);
return err;
@@ -759,7 +807,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
- update_queue(sma);
+ update_queue(sma, -1);
err = 0;
goto out_unlock;
}
@@ -801,7 +849,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
curr->sempid = task_tgid_vnr(current);
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
- update_queue(sma);
+ update_queue(sma, semnum);
err = 0;
goto out_unlock;
}
@@ -961,17 +1009,31 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
return 0;
}
-static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
{
- struct sem_undo *walk;
+ struct sem_undo *un;
- list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
- if (walk->semid == semid)
- return walk;
+ list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
+ if (un->semid == semid)
+ return un;
}
return NULL;
}
+static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
+{
+ struct sem_undo *un;
+
+ assert_spin_locked(&ulp->lock);
+
+ un = __lookup_undo(ulp, semid);
+ if (un) {
+ list_del_rcu(&un->list_proc);
+ list_add_rcu(&un->list_proc, &ulp->list_proc);
+ }
+ return un;
+}
+
/**
* find_alloc_undo - Lookup (and if not present create) undo array
* @ns: namespace
@@ -1163,7 +1225,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
if (alter && error == 0)
- update_queue (sma);
+ update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1);
+
goto out_unlock_free;
}
@@ -1181,6 +1244,19 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
else
list_add(&queue.list, &sma->sem_pending);
+ if (nsops == 1) {
+ struct sem *curr;
+ curr = &sma->sem_base[sops->sem_num];
+
+ if (alter)
+ list_add_tail(&queue.simple_list, &curr->sem_pending);
+ else
+ list_add(&queue.simple_list, &curr->sem_pending);
+ } else {
+ INIT_LIST_HEAD(&queue.simple_list);
+ sma->complex_count++;
+ }
+
queue.status = -EINTR;
queue.sleeper = current;
current->state = TASK_INTERRUPTIBLE;
@@ -1222,7 +1298,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
*/
if (timeout && jiffies_left == 0)
error = -EAGAIN;
- list_del(&queue.list);
+ unlink_queue(sma, &queue);
out_unlock_free:
sem_unlock(sma);
@@ -1307,7 +1383,7 @@ void exit_sem(struct task_struct *tsk)
if (IS_ERR(sma))
continue;
- un = lookup_undo(ulp, semid);
+ un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
@@ -1351,7 +1427,7 @@ void exit_sem(struct task_struct *tsk)
}
sma->sem_otime = get_seconds();
/* maybe some queued-up processes were waiting for this */
- update_queue(sma);
+ update_queue(sma, -1);
sem_unlock(sma);
call_rcu(&un->rcu, free_un);
@@ -1365,7 +1441,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
struct sem_array *sma = it;
return seq_printf(s,
- "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
+ "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
sma->sem_perm.key,
sma->sem_perm.id,
sma->sem_perm.mode,
diff --git a/ipc/shm.c b/ipc/shm.c
index 464694e0aa4..92fe9236258 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -39,7 +39,6 @@
#include <linux/nsproxy.h>
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
-#include <linux/ima.h>
#include <asm/uaccess.h>
@@ -101,6 +100,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
void shm_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
+ idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
}
#endif
@@ -290,28 +290,28 @@ static unsigned long shm_get_unmapped_area(struct file *file,
unsigned long flags)
{
struct shm_file_data *sfd = shm_file_data(file);
- return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
-}
-
-int is_file_shm_hugepages(struct file *file)
-{
- int ret = 0;
-
- if (file->f_op == &shm_file_operations) {
- struct shm_file_data *sfd;
- sfd = shm_file_data(file);
- ret = is_file_hugepages(sfd->file);
- }
- return ret;
+ return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
+ pgoff, flags);
}
static const struct file_operations shm_file_operations = {
.mmap = shm_mmap,
.fsync = shm_fsync,
.release = shm_release,
+};
+
+static const struct file_operations shm_file_operations_huge = {
+ .mmap = shm_mmap,
+ .fsync = shm_fsync,
+ .release = shm_release,
.get_unmapped_area = shm_get_unmapped_area,
};
+int is_file_shm_hugepages(struct file *file)
+{
+ return file->f_op == &shm_file_operations_huge;
+}
+
static const struct vm_operations_struct shm_vm_ops = {
.open = shm_open, /* callback for a new vm-area open */
.close = shm_close, /* callback for when the vm-area is released */
@@ -878,8 +878,8 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
if (err)
goto out_unlock;
- path.dentry = dget(shp->shm_file->f_path.dentry);
- path.mnt = shp->shm_file->f_path.mnt;
+ path = shp->shm_file->f_path;
+ path_get(&path);
shp->shm_nattch++;
size = i_size_read(path.dentry->d_inode);
shm_unlock(shp);
@@ -889,10 +889,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
if (!sfd)
goto out_put_dentry;
- file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
+ file = alloc_file(&path, f_mode,
+ is_file_hugepages(shp->shm_file) ?
+ &shm_file_operations_huge :
+ &shm_file_operations);
if (!file)
goto out_free;
- ima_counts_get(file);
file->private_data = sfd;
file->f_mapping = shp->shm_file->f_mapping;
@@ -947,7 +949,7 @@ out_unlock:
out_free:
kfree(sfd);
out_put_dentry:
- dput(path.dentry);
+ path_put(&path);
goto out_nattch;
}
diff --git a/kernel/acct.c b/kernel/acct.c
index 9a4715a2f6b..a6605ca921b 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -536,7 +536,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
do_div(elapsed, AHZ);
ac.ac_btime = get_seconds() - elapsed;
/* we really need to bite the bullet and change layout */
- current_uid_gid(&ac.ac_uid, &ac.ac_gid);
+ ac.ac_uid = orig_cred->uid;
+ ac.ac_gid = orig_cred->gid;
#if ACCT_VERSION==2
ac.ac_ahz = AHZ;
#endif
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 2451dc6f328..4b05bd9479d 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -277,7 +277,7 @@ static void untag_chunk(struct node *p)
owner->root = NULL;
}
- for (i = j = 0; i < size; i++, j++) {
+ for (i = j = 0; j <= size; i++, j++) {
struct audit_tree *s;
if (&chunk->owners[j] == p) {
list_del_init(&p->list);
@@ -290,7 +290,7 @@ static void untag_chunk(struct node *p)
if (!s) /* result of earlier fallback */
continue;
get_tree(s);
- list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
+ list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
}
list_replace_rcu(&chunk->hash, &new->hash);
@@ -373,15 +373,17 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
for (n = 0; n < old->count; n++) {
if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
- put_inotify_watch(watch);
+ put_inotify_watch(&old->watch);
return 0;
}
}
spin_unlock(&hash_lock);
chunk = alloc_chunk(old->count + 1);
- if (!chunk)
+ if (!chunk) {
+ put_inotify_watch(&old->watch);
return -ENOMEM;
+ }
mutex_lock(&inode->inotify_mutex);
if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
@@ -425,7 +427,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&hash_lock);
inotify_evict_watch(&old->watch);
mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch);
+ put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
+ put_inotify_watch(&old->watch); /* and kill it */
return 0;
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 267e484f019..fc0f928167e 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -250,7 +250,6 @@ struct audit_context {
#endif
};
-#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
static inline int open_arg(int flags, int mask)
{
int n = ACC_MODE(flags);
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 3c530138183..98a51f26c13 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -12,7 +12,7 @@
void foo(void)
{
- /* The enum constants to put into include/linux/bounds.h */
+ /* The enum constants to put into include/generated/bounds.h */
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
/* End of constants */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 7c4e2713df0..1c8ddd6ee94 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -209,9 +209,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
return -ENOMEM;
cpu_hotplug_begin();
+ set_cpu_active(cpu, false);
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
hcpu, -1, &nr_calls);
if (err == NOTIFY_BAD) {
+ set_cpu_active(cpu, true);
+
nr_calls--;
__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu, nr_calls, NULL);
@@ -223,11 +226,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
/* Ensure that we are not runnable on dying cpu */
cpumask_copy(old_allowed, &current->cpus_allowed);
- set_cpus_allowed_ptr(current,
- cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
+ set_cpus_allowed_ptr(current, cpu_active_mask);
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
+ set_cpu_active(cpu, true);
/* CPU didn't die: tell everyone. Can't complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu) == NOTIFY_BAD)
@@ -278,23 +281,8 @@ int __ref cpu_down(unsigned int cpu)
goto out;
}
- set_cpu_active(cpu, false);
-
- /*
- * Make sure the all cpus did the reschedule and are not
- * using stale version of the cpu_active_mask.
- * This is not strictly necessary becuase stop_machine()
- * that we run down the line already provides the required
- * synchronization. But it's really a side effect and we do not
- * want to depend on the innards of the stop_machine here.
- */
- synchronize_sched();
-
err = _cpu_down(cpu, 0);
- if (cpu_online(cpu))
- set_cpu_active(cpu, true);
-
out:
cpu_maps_update_done();
stop_machine_destroy();
@@ -383,10 +371,12 @@ int disable_nonboot_cpus(void)
return error;
cpu_maps_update_begin();
first_cpu = cpumask_first(cpu_online_mask);
- /* We take down all of the non-boot CPUs in one shot to avoid races
+ /*
+ * We take down all of the non-boot CPUs in one shot to avoid races
* with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
+
printk("Disabling non-boot CPUs ...\n");
for_each_online_cpu(cpu) {
if (cpu == first_cpu)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 3cf2183b472..ba401fab459 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -737,7 +737,7 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
{
}
-static int generate_sched_domains(struct cpumask **domains,
+static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr **attributes)
{
*domains = NULL;
@@ -872,7 +872,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
return retval;
- if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask))
+ if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
return -EINVAL;
}
retval = validate_change(cs, trialcs);
@@ -2010,7 +2010,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
}
/* Continue past cpusets with all cpus, mems online */
- if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) &&
+ if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
continue;
@@ -2019,7 +2019,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
/* Remove offline cpus and mems from this cpuset. */
mutex_lock(&callback_mutex);
cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
- cpu_online_mask);
+ cpu_active_mask);
nodes_and(cp->mems_allowed, cp->mems_allowed,
node_states[N_HIGH_MEMORY]);
mutex_unlock(&callback_mutex);
@@ -2057,8 +2057,10 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
switch (phase) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
break;
default:
@@ -2067,7 +2069,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
cgroup_lock();
mutex_lock(&callback_mutex);
- cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
+ cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
mutex_unlock(&callback_mutex);
scan_for_empty_cpusets(&top_cpuset);
ndoms = generate_sched_domains(&doms, &attr);
@@ -2114,7 +2116,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
void __init cpuset_init_smp(void)
{
- cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
+ cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
hotcpu_notifier(cpuset_track_online_cpus, 0);
diff --git a/kernel/exit.c b/kernel/exit.c
index 1143012951e..546774a31a6 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -68,10 +68,10 @@ static void __unhash_process(struct task_struct *p)
detach_pid(p, PIDTYPE_SID);
list_del_rcu(&p->tasks);
+ list_del_init(&p->sibling);
__get_cpu_var(process_counts)--;
}
list_del_rcu(&p->thread_group);
- list_del_init(&p->sibling);
}
/*
@@ -736,12 +736,9 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
/*
* Any that need to be release_task'd are put on the @dead list.
*/
-static void reparent_thread(struct task_struct *father, struct task_struct *p,
+static void reparent_leader(struct task_struct *father, struct task_struct *p,
struct list_head *dead)
{
- if (p->pdeath_signal)
- group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
-
list_move_tail(&p->sibling, &p->real_parent->children);
if (task_detached(p))
@@ -780,12 +777,18 @@ static void forget_original_parent(struct task_struct *father)
reaper = find_new_reaper(father);
list_for_each_entry_safe(p, n, &father->children, sibling) {
- p->real_parent = reaper;
- if (p->parent == father) {
- BUG_ON(task_ptrace(p));
- p->parent = p->real_parent;
- }
- reparent_thread(father, p, &dead_children);
+ struct task_struct *t = p;
+ do {
+ t->real_parent = reaper;
+ if (t->parent == father) {
+ BUG_ON(task_ptrace(t));
+ t->parent = t->real_parent;
+ }
+ if (t->pdeath_signal)
+ group_send_sig_info(t->pdeath_signal,
+ SEND_SIG_NOINFO, t);
+ } while_each_thread(p, t);
+ reparent_leader(father, p, &dead_children);
}
write_unlock_irq(&tasklist_lock);
@@ -933,7 +936,7 @@ NORET_TYPE void do_exit(long code)
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
- spin_unlock_wait(&tsk->pi_lock);
+ raw_spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
@@ -971,7 +974,7 @@ NORET_TYPE void do_exit(long code)
exit_thread();
cgroup_exit(tsk, 1);
- if (group_dead && tsk->signal->leader)
+ if (group_dead)
disassociate_ctty(1);
module_put(task_thread_info(tsk)->exec_domain->module);
@@ -1551,14 +1554,9 @@ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
struct task_struct *p;
list_for_each_entry(p, &tsk->children, sibling) {
- /*
- * Do not consider detached threads.
- */
- if (!task_detached(p)) {
- int ret = wait_consider_task(wo, 0, p);
- if (ret)
- return ret;
- }
+ int ret = wait_consider_task(wo, 0, p);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/kernel/fork.c b/kernel/fork.c
index 1415dc4598a..5b2959b3ffc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
static void rt_mutex_init_task(struct task_struct *p)
{
- spin_lock_init(&p->pi_lock);
+ raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&p->pi_waiters, &p->pi_lock);
+ plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
p->pi_blocked_on = NULL;
#endif
}
@@ -1127,6 +1127,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ p->memcg_batch.do_batch = 0;
+ p->memcg_batch.memcg = NULL;
+#endif
p->bts = NULL;
@@ -1206,9 +1210,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->sas_ss_sp = p->sas_ss_size = 0;
/*
- * Syscall tracing should be turned off in the child regardless
- * of CLONE_PTRACE.
+ * Syscall tracing and stepping should be turned off in the
+ * child regardless of CLONE_PTRACE.
*/
+ user_disable_single_step(p);
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
@@ -1286,7 +1291,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
if (likely(p->pid)) {
- list_add_tail(&p->sibling, &p->real_parent->children);
tracehook_finish_clone(p, clone_flags, trace);
if (thread_group_leader(p)) {
@@ -1298,6 +1302,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->signal->tty = tty_kref_get(current->signal->tty);
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
+ list_add_tail(&p->sibling, &p->real_parent->children);
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
diff --git a/kernel/futex.c b/kernel/futex.c
index fb65e822fc4..8e3c3ffe1b9 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -304,8 +304,14 @@ void put_futex_key(int fshared, union futex_key *key)
*/
static int fault_in_user_writeable(u32 __user *uaddr)
{
- int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
- 1, 1, 0, NULL, NULL);
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(current, mm, (unsigned long)uaddr,
+ 1, 1, 0, NULL, NULL);
+ up_read(&mm->mmap_sem);
+
return ret < 0 ? ret : 0;
}
@@ -397,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state)
* and has cleaned up the pi_state already
*/
if (pi_state->owner) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
}
@@ -464,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr)
* pi_state_list anymore, but we have to be careful
* versus waiters unqueueing themselves:
*/
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
/*
* We dropped the pi-lock, so re-check whether this
* task still owns the PI-state:
@@ -489,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr)
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
spin_unlock(&hb->lock);
- spin_lock_irq(&curr->pi_lock);
+ raw_spin_lock_irq(&curr->pi_lock);
}
- spin_unlock_irq(&curr->pi_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
}
static int
@@ -552,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
* change of the task flags, we do this protected by
* p->pi_lock:
*/
- spin_lock_irq(&p->pi_lock);
+ raw_spin_lock_irq(&p->pi_lock);
if (unlikely(p->flags & PF_EXITING)) {
/*
* The task is on the way out. When PF_EXITPIDONE is
@@ -561,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
*/
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
- spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
return ret;
}
@@ -580,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &p->pi_state_list);
pi_state->owner = p;
- spin_unlock_irq(&p->pi_lock);
+ raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
@@ -754,7 +760,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!pi_state)
return -EINVAL;
- spin_lock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
@@ -783,23 +789,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
else if (curval != uval)
ret = -EINVAL;
if (ret) {
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
}
}
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
- spin_lock_irq(&new_owner->pi_lock);
+ raw_spin_lock_irq(&new_owner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &new_owner->pi_state_list);
pi_state->owner = new_owner;
- spin_unlock_irq(&new_owner->pi_lock);
+ raw_spin_unlock_irq(&new_owner->pi_lock);
- spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
return 0;
@@ -1004,7 +1010,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb2->lock;
+ q->list.plist.spinlock = &hb2->lock;
#endif
}
get_futex_key_refs(key2);
@@ -1040,7 +1046,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
q->lock_ptr = &hb->lock;
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.spinlock = &hb->lock;
#endif
wake_up_state(q->task, TASK_NORMAL);
@@ -1388,7 +1394,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
- q->list.plist.lock = &hb->lock;
+ q->list.plist.spinlock = &hb->lock;
#endif
plist_add(&q->list, &hb->chain);
q->task = current;
@@ -1523,18 +1529,18 @@ retry:
* itself.
*/
if (pi_state->owner != NULL) {
- spin_lock_irq(&pi_state->owner->pi_lock);
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
- spin_unlock_irq(&pi_state->owner->pi_lock);
+ raw_spin_unlock_irq(&pi_state->owner->pi_lock);
}
pi_state->owner = newowner;
- spin_lock_irq(&newowner->pi_lock);
+ raw_spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
list_add(&pi_state->list, &newowner->pi_state_list);
- spin_unlock_irq(&newowner->pi_lock);
+ raw_spin_unlock_irq(&newowner->pi_lock);
return 0;
/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ede52770812..0086628b6e9 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
for (;;) {
base = timer->base;
if (likely(base != NULL)) {
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
return base;
/* The timer has migrated to another CPU: */
- spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
}
cpu_relax();
}
@@ -208,13 +208,13 @@ again:
/* See the comment in lock_timer_base() */
timer->base = NULL;
- spin_unlock(&base->cpu_base->lock);
- spin_lock(&new_base->cpu_base->lock);
+ raw_spin_unlock(&base->cpu_base->lock);
+ raw_spin_lock(&new_base->cpu_base->lock);
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
cpu = this_cpu;
- spin_unlock(&new_base->cpu_base->lock);
- spin_lock(&base->cpu_base->lock);
+ raw_spin_unlock(&new_base->cpu_base->lock);
+ raw_spin_lock(&base->cpu_base->lock);
timer->base = base;
goto again;
}
@@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
struct hrtimer_clock_base *base = timer->base;
- spin_lock_irqsave(&base->cpu_base->lock, *flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
return base;
}
@@ -557,7 +557,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
static int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
- ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
int res;
@@ -582,7 +582,16 @@ static int hrtimer_reprogram(struct hrtimer *timer,
if (expires.tv64 < 0)
return -ETIME;
- if (expires.tv64 >= expires_next->tv64)
+ if (expires.tv64 >= cpu_base->expires_next.tv64)
+ return 0;
+
+ /*
+ * If a hang was detected in the last timer interrupt then we
+ * do not schedule a timer which is earlier than the expiry
+ * which we enforced in the hang detection. We want the system
+ * to make progress.
+ */
+ if (cpu_base->hang_detected)
return 0;
/*
@@ -590,7 +599,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
*/
res = tick_program_event(expires, 0);
if (!IS_ERR_VALUE(res))
- *expires_next = expires;
+ cpu_base->expires_next = expires;
return res;
}
@@ -619,12 +628,12 @@ static void retrigger_next_event(void *arg)
base = &__get_cpu_var(hrtimer_bases);
/* Adjust CLOCK_REALTIME offset */
- spin_lock(&base->lock);
+ raw_spin_lock(&base->lock);
base->clock_base[CLOCK_REALTIME].offset =
timespec_to_ktime(realtime_offset);
hrtimer_force_reprogram(base, 0);
- spin_unlock(&base->lock);
+ raw_spin_unlock(&base->lock);
}
/*
@@ -685,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
{
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
if (wakeup) {
- spin_unlock(&base->cpu_base->lock);
+ raw_spin_unlock(&base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- spin_lock(&base->cpu_base->lock);
+ raw_spin_lock(&base->cpu_base->lock);
} else
__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
@@ -747,17 +756,33 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
+static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
{
+#ifdef CONFIG_TIMER_STATS
if (timer->start_site)
return;
-
- timer->start_site = addr;
+ timer->start_site = __builtin_return_address(0);
memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
timer->start_pid = current->pid;
+#endif
}
+
+static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+ timer->start_site = NULL;
+#endif
+}
+
+static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+ if (likely(!timer_stats_active))
+ return;
+ timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+ timer->function, timer->start_comm, 0);
#endif
+}
/*
* Counterpart to lock_hrtimer_base above:
@@ -765,7 +790,7 @@ void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
static inline
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
{
- spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
+ raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
}
/**
@@ -1098,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void)
unsigned long flags;
int i;
- spin_lock_irqsave(&cpu_base->lock, flags);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!hrtimer_hres_active()) {
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
@@ -1115,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void)
}
}
- spin_unlock_irqrestore(&cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
if (mindelta.tv64 < 0)
mindelta.tv64 = 0;
@@ -1197,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
* they get migrated to another cpu, therefore its safe to unlock
* the timer base.
*/
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
trace_hrtimer_expire_exit(timer);
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
/*
* Note: We clear the CALLBACK bit after enqueue_hrtimer and
@@ -1217,30 +1242,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
#ifdef CONFIG_HIGH_RES_TIMERS
-static int force_clock_reprogram;
-
-/*
- * After 5 iteration's attempts, we consider that hrtimer_interrupt()
- * is hanging, which could happen with something that slows the interrupt
- * such as the tracing. Then we force the clock reprogramming for each future
- * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
- * threshold that we will overwrite.
- * The next tick event will be scheduled to 3 times we currently spend on
- * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
- * 1/4 of their time to process the hrtimer interrupts. This is enough to
- * let it running without serious starvation.
- */
-
-static inline void
-hrtimer_interrupt_hanging(struct clock_event_device *dev,
- ktime_t try_time)
-{
- force_clock_reprogram = 1;
- dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
- printk(KERN_WARNING "hrtimer: interrupt too slow, "
- "forcing clock min delta to %llu ns\n",
- (unsigned long long) dev->min_delta_ns);
-}
/*
* High resolution timer interrupt
* Called with interrupts disabled
@@ -1249,24 +1250,18 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
- ktime_t expires_next, now;
- int nr_retries = 0;
- int i;
+ ktime_t expires_next, now, entry_time, delta;
+ int i, retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
dev->next_event.tv64 = KTIME_MAX;
- retry:
- /* 5 retries is enough to notice a hang */
- if (!(++nr_retries % 5))
- hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
-
- now = ktime_get();
-
+ entry_time = now = ktime_get();
+retry:
expires_next.tv64 = KTIME_MAX;
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
@@ -1322,13 +1317,51 @@ void hrtimer_interrupt(struct clock_event_device *dev)
* against it.
*/
cpu_base->expires_next = expires_next;
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
/* Reprogramming necessary ? */
- if (expires_next.tv64 != KTIME_MAX) {
- if (tick_program_event(expires_next, force_clock_reprogram))
- goto retry;
+ if (expires_next.tv64 == KTIME_MAX ||
+ !tick_program_event(expires_next, 0)) {
+ cpu_base->hang_detected = 0;
+ return;
}
+
+ /*
+ * The next timer was already expired due to:
+ * - tracing
+ * - long lasting callbacks
+ * - being scheduled away when running in a VM
+ *
+ * We need to prevent that we loop forever in the hrtimer
+ * interrupt routine. We give it 3 attempts to avoid
+ * overreacting on some spurious event.
+ */
+ now = ktime_get();
+ cpu_base->nr_retries++;
+ if (++retries < 3)
+ goto retry;
+ /*
+ * Give the system a chance to do something else than looping
+ * here. We stored the entry time, so we know exactly how long
+ * we spent here. We schedule the next event this amount of
+ * time away.
+ */
+ cpu_base->nr_hangs++;
+ cpu_base->hang_detected = 1;
+ delta = ktime_sub(now, entry_time);
+ if (delta.tv64 > cpu_base->max_hang_time.tv64)
+ cpu_base->max_hang_time = delta;
+ /*
+ * Limit it to a sensible value as we enforce a longer
+ * delay. Give the CPU at least 100ms to catch up.
+ */
+ if (delta.tv64 > 100 * NSEC_PER_MSEC)
+ expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
+ else
+ expires_next = ktime_add(now, delta);
+ tick_program_event(expires_next, 1);
+ printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
+ ktime_to_ns(delta));
}
/*
@@ -1424,7 +1457,7 @@ void hrtimer_run_queues(void)
gettime = 0;
}
- spin_lock(&cpu_base->lock);
+ raw_spin_lock(&cpu_base->lock);
while ((node = base->first)) {
struct hrtimer *timer;
@@ -1436,7 +1469,7 @@ void hrtimer_run_queues(void)
__run_hrtimer(timer, &base->softirq_time);
}
- spin_unlock(&cpu_base->lock);
+ raw_spin_unlock(&cpu_base->lock);
}
}
@@ -1592,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
- spin_lock_init(&cpu_base->lock);
+ raw_spin_lock_init(&cpu_base->lock);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1650,16 +1683,16 @@ static void migrate_hrtimers(int scpu)
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
- spin_lock(&new_base->lock);
- spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&new_base->lock);
+ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
migrate_hrtimer_list(&old_base->clock_base[i],
&new_base->clock_base[i]);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index cf5ee162841..dbcbf6a33a0 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -52,7 +52,7 @@
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
/* Number of pinned task breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
+static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
/* Number of non-pinned cpu/task breakpoints in a cpu */
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
@@ -73,7 +73,7 @@ static DEFINE_MUTEX(nr_bp_mutex);
static unsigned int max_task_bp_pinned(int cpu)
{
int i;
- unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
+ unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
for (i = HBP_NUM -1; i >= 0; i--) {
if (tsk_pinned[i] > 0)
@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu)
return 0;
}
+static int task_bp_pinned(struct task_struct *tsk)
+{
+ struct perf_event_context *ctx = tsk->perf_event_ctxp;
+ struct list_head *list;
+ struct perf_event *bp;
+ unsigned long flags;
+ int count = 0;
+
+ if (WARN_ONCE(!ctx, "No perf context for this task"))
+ return 0;
+
+ list = &ctx->event_list;
+
+ raw_spin_lock_irqsave(&ctx->lock, flags);
+
+ /*
+ * The current breakpoint counter is not included in the list
+ * at the open() callback time
+ */
+ list_for_each_entry(bp, list, event_entry) {
+ if (bp->attr.type == PERF_TYPE_BREAKPOINT)
+ count++;
+ }
+
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return count;
+}
+
/*
* Report the number of pinned/un-pinned breakpoints we have in
* a given cpu (cpu > -1) or in all of them (cpu = -1).
*/
-static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
+static void
+fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
{
+ int cpu = bp->cpu;
+ struct task_struct *tsk = bp->ctx->task;
+
if (cpu >= 0) {
slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
- slots->pinned += max_task_bp_pinned(cpu);
+ if (!tsk)
+ slots->pinned += max_task_bp_pinned(cpu);
+ else
+ slots->pinned += task_bp_pinned(tsk);
slots->flexible = per_cpu(nr_bp_flexible, cpu);
return;
@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
unsigned int nr;
nr = per_cpu(nr_cpu_bp_pinned, cpu);
- nr += max_task_bp_pinned(cpu);
+ if (!tsk)
+ nr += max_task_bp_pinned(cpu);
+ else
+ nr += task_bp_pinned(tsk);
if (nr > slots->pinned)
slots->pinned = nr;
@@ -118,35 +157,12 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
*/
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
{
- int count = 0;
- struct perf_event *bp;
- struct perf_event_context *ctx = tsk->perf_event_ctxp;
unsigned int *tsk_pinned;
- struct list_head *list;
- unsigned long flags;
-
- if (WARN_ONCE(!ctx, "No perf context for this task"))
- return;
-
- list = &ctx->event_list;
-
- spin_lock_irqsave(&ctx->lock, flags);
-
- /*
- * The current breakpoint counter is not included in the list
- * at the open() callback time
- */
- list_for_each_entry(bp, list, event_entry) {
- if (bp->attr.type == PERF_TYPE_BREAKPOINT)
- count++;
- }
-
- spin_unlock_irqrestore(&ctx->lock, flags);
+ int count = 0;
- if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
- return;
+ count = task_bp_pinned(tsk);
- tsk_pinned = per_cpu(task_bp_pinned, cpu);
+ tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
if (enable) {
tsk_pinned[count]++;
if (count > 0)
@@ -193,7 +209,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to a single cpu, check:
*
* (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
- * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
+ * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
*
* -> If there are already non-pinned counters in this cpu, it means
* there is already a free slot for them.
@@ -204,7 +220,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to every cpus, check:
*
* (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
- * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
+ * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
*
* -> This is roughly the same, except we check the number of per cpu
* bp for every cpu and we keep the max one. Same for the per tasks
@@ -216,7 +232,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to a single cpu, check:
*
* ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
- * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
+ * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
*
* -> Same checks as before. But now the nr_bp_flexible, if any, must keep
* one register at least (or they will never be fed).
@@ -224,7 +240,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* - If attached to every cpus, check:
*
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
- * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
+ * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
*/
int reserve_bp_slot(struct perf_event *bp)
{
@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp)
mutex_lock(&nr_bp_mutex);
- fetch_bp_busy_slots(&slots, bp->cpu);
+ fetch_bp_busy_slots(&slots, bp);
/* Flexible counters need to keep at least one slot */
if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
@@ -259,7 +275,7 @@ void release_bp_slot(struct perf_event *bp)
}
-int __register_perf_hw_breakpoint(struct perf_event *bp)
+int register_perf_hw_breakpoint(struct perf_event *bp)
{
int ret;
@@ -276,19 +292,12 @@ int __register_perf_hw_breakpoint(struct perf_event *bp)
* This is a quick hack that will be removed soon, once we remove
* the tmp breakpoints from ptrace
*/
- if (!bp->attr.disabled || bp->callback == perf_bp_event)
+ if (!bp->attr.disabled || !bp->overflow_handler)
ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
return ret;
}
-int register_perf_hw_breakpoint(struct perf_event *bp)
-{
- bp->callback = perf_bp_event;
-
- return __register_perf_hw_breakpoint(bp);
-}
-
/**
* register_user_hw_breakpoint - register a hardware breakpoint for user space
* @attr: breakpoint attributes
@@ -297,7 +306,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
*/
struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
struct task_struct *tsk)
{
return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
@@ -311,19 +320,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
* @triggered: callback to trigger when we hit the breakpoint
* @tsk: pointer to 'task_struct' of the process to which the address belongs
*/
-struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk)
+int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
- /*
- * FIXME: do it without unregistering
- * - We don't want to lose our slot
- * - If the new bp is incorrect, don't lose the older one
- */
- unregister_hw_breakpoint(bp);
+ u64 old_addr = bp->attr.bp_addr;
+ int old_type = bp->attr.bp_type;
+ int old_len = bp->attr.bp_len;
+ int err = 0;
- return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
+ perf_event_disable(bp);
+
+ bp->attr.bp_addr = attr->bp_addr;
+ bp->attr.bp_type = attr->bp_type;
+ bp->attr.bp_len = attr->bp_len;
+
+ if (attr->disabled)
+ goto end;
+
+ err = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
+ if (!err)
+ perf_event_enable(bp);
+
+ if (err) {
+ bp->attr.bp_addr = old_addr;
+ bp->attr.bp_type = old_type;
+ bp->attr.bp_len = old_len;
+ if (!bp->attr.disabled)
+ perf_event_enable(bp);
+
+ return err;
+ }
+
+end:
+ bp->attr.disabled = attr->disabled;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
@@ -348,7 +378,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
*/
struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered)
+ perf_overflow_handler_t triggered)
{
struct perf_event **cpu_events, **pevent, *bp;
long err;
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 1de9700f416..2295a31ef11 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
* flush such a longstanding irq before considering it as spurious.
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
* An old-style architecture might still have
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
desc->chip->set_type(i, IRQ_TYPE_PROBE);
desc->chip->startup(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
* happened in the previous stage, it may have masked itself)
*/
for_each_irq_desc_reverse(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->chip->startup(i))
desc->status |= IRQ_PENDING;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
/*
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
* Now filter out any obviously spurious interrupts
*/
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
if (i < 32)
mask |= 1 << i;
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
return mask;
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val)
int i;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val)
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
unsigned int status;
for_each_irq_desc(i, desc) {
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
desc->status = status & ~IRQ_AUTODETECT;
desc->chip->shutdown(i);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
mutex_unlock(&probing_active);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ba566c261ad..ecc3fa28f66 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
}
/* Ensure we don't have left over values from a previous use of this irq */
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status = IRQ_DISABLED;
desc->chip = &no_irq_chip;
desc->handle_irq = handle_bad_irq;
@@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
cpumask_clear(desc->pending_mask);
#endif
#endif
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
/**
@@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action) {
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
irq);
return;
@@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq)
desc->chip = &no_irq_chip;
desc->name = NULL;
clear_kstat_irqs(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
if (!chip)
chip = &no_irq_chip;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
irq_chip_set_defaults(chip);
desc->chip = chip;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type)
if (type == IRQ_TYPE_NONE)
return 0;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
ret = __irq_set_trigger(desc, irq, type);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_type);
@@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->handler_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
EXPORT_SYMBOL(set_irq_data);
@@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->msi_desc = entry;
if (entry)
entry->irq = irq;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data)
return -EINVAL;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->chip_data = data;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int irq, int nest)
if (!desc)
return;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (nest)
desc->status |= IRQ_NESTED_THREAD;
else
desc->status &= ~IRQ_NESTED_THREAD;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL_GPL(set_irq_nested_thread);
@@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq)
might_sleep();
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
action_ret = action->thread_fn(action->irq, action->dev_id);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
@@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
@@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
mask_ack_irq(desc, irq);
if (unlikely(desc->status & IRQ_INPROGRESS))
@@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
goto out_unlock;
desc->status |= IRQ_INPROGRESS;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
if (unlikely(desc->status & IRQ_ONESHOT))
@@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
desc->chip->unmask(irq);
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_level_irq);
@@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
struct irqaction *action;
irqreturn_t action_ret;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out;
@@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
desc->status |= IRQ_INPROGRESS;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
desc->chip->eoi(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -530,7 +530,7 @@ out:
void
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
{
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
}
desc->status &= ~IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
desc->status &= ~IRQ_INPROGRESS;
out_unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
/**
@@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
}
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/* Uninstall? */
if (handle == handle_bad_irq) {
@@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->depth = 0;
desc->chip->startup(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL_GPL(__set_irq_handler);
@@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void __init set_irq_probe(unsigned int irq)
@@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int irq)
return;
}
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
desc->status &= ~IRQ_NOPROBE;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 17c71bb565c..814940e7f48 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
};
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
@@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
- spin_lock_init(&desc->lock);
+ raw_spin_lock_init(&desc->lock);
desc->irq = irq;
#ifdef CONFIG_SMP
desc->node = node;
@@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
/*
* Protect the sparse_irqs:
*/
-DEFINE_SPINLOCK(sparse_irq_lock);
+DEFINE_RAW_SPINLOCK(sparse_irq_lock);
struct irq_desc **irq_desc_ptrs __read_mostly;
@@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
}
};
@@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
if (desc)
return desc;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
irq_desc_ptrs[irq] = desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
@@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
}
};
@@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq)
return 1;
}
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->ack)
desc->chip->ack(irq);
/*
@@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq)
for (;;) {
irqreturn_t action_ret;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
@@ -536,7 +536,7 @@ out:
* disabled while the handler was running.
*/
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return 1;
}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 1b5d742c6a7..b2821f070a3 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
extern void clear_kstat_irqs(struct irq_desc *desc);
-extern spinlock_t sparse_irq_lock;
+extern raw_spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7305b297d1e..eb6078ca60c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
cpu_relax();
/* Ok, that indicated we're done: double-check carefully. */
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
status = desc->status;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/* Oops, that failed? */
} while (status & IRQ_INPROGRESS);
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
if (!desc->chip->set_affinity)
return -EINVAL;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) {
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
}
#endif
desc->status |= IRQ_AFFINITY_SET;
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq)
unsigned long flags;
int ret;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
ret = setup_affinity(irq, desc);
if (!ret)
irq_set_thread_affinity(desc);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq)
return;
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq)
return;
chip_bus_lock(irq, desc);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, false);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
}
EXPORT_SYMBOL(enable_irq);
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
/* wakeup-capable irqs can be shared between drivers that
* don't need to have the same sleep mode behaviors.
*/
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
if (on) {
if (desc->wake_depth++ == 0) {
ret = set_irq_wake_real(irq, on);
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_irq_wake);
@@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct irqaction *action)
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
{
chip_bus_lock(irq, desc);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
desc->status &= ~IRQ_MASKED;
desc->chip->unmask(irq);
}
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(irq, desc);
}
@@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
return;
}
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
cpumask_copy(mask, desc->affinity);
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
@@ -545,7 +545,7 @@ static int irq_thread(void *data)
atomic_inc(&desc->threads_active);
- spin_lock_irq(&desc->lock);
+ raw_spin_lock_irq(&desc->lock);
if (unlikely(desc->status & IRQ_DISABLED)) {
/*
* CHECKME: We might need a dedicated
@@ -555,9 +555,9 @@ static int irq_thread(void *data)
* retriggers the interrupt itself --- tglx
*/
desc->status |= IRQ_PENDING;
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
} else {
- spin_unlock_irq(&desc->lock);
+ raw_spin_unlock_irq(&desc->lock);
action->thread_fn(action->irq, action->dev_id);
@@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/*
* The following block of code has to be executed atomically
*/
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
old_ptr = &desc->action;
old = *old_ptr;
if (old) {
@@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
__enable_irq(desc, irq, false);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
/*
* Strictly no need to wake it up, but hung_task complains
@@ -802,7 +802,7 @@ mismatch:
ret = -EBUSY;
out_thread:
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
if (new->thread) {
struct task_struct *t = new->thread;
@@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (!desc)
return NULL;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/*
* There can be multiple actions per IRQ descriptor, find the right
@@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (!action) {
WARN(1, "Trying to free already-free IRQ %d\n", irq);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return NULL;
}
@@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
desc->chip->disable(irq);
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
unregister_handler_proc(irq, action);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index fcb6c96f262..24196228083 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -27,7 +27,7 @@ void move_masked_irq(int irq)
if (!desc->chip->set_affinity)
return;
- assert_spin_locked(&desc->lock);
+ assert_raw_spin_locked(&desc->lock);
/*
* If there was a valid mask to work with, please
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 3fd30197da2..26bac9d8f86 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
"for migration.\n", irq);
return false;
}
- spin_lock_init(&desc->lock);
+ raw_spin_lock_init(&desc->lock);
desc->node = node;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
@@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
irq = old_desc->irq;
- spin_lock_irqsave(&sparse_irq_lock, flags);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
/* We have to check it to avoid races with another CPU */
desc = irq_desc_ptrs[irq];
@@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
}
irq_desc_ptrs[irq] = desc;
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
/* free the old one */
free_one_irq_desc(old_desc, desc);
@@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
return desc;
out_unlock:
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index a0bb09e7986..0d4005d85b0 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -28,9 +28,9 @@ void suspend_device_irqs(void)
for_each_irq_desc(irq, desc) {
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__disable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
for_each_irq_desc(irq, desc)
@@ -56,9 +56,9 @@ void resume_device_irqs(void)
if (!(desc->status & IRQ_SUSPENDED))
continue;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
EXPORT_SYMBOL_GPL(resume_device_irqs);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 0832145fea9..6f50eccc79c 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -179,7 +179,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
unsigned long flags;
int ret = 1;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
for (action = desc->action ; action; action = action->next) {
if ((action != new_action) && action->name &&
!strcmp(new_action->name, action->name)) {
@@ -187,7 +187,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
break;
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index e49ea1c5232..89fb90ae534 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
struct irqaction *action;
int ok = 0, work = 0;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
/* Already running on another processor */
if (desc->status & IRQ_INPROGRESS) {
/*
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc)
*/
if (desc->action && (desc->action->flags & IRQF_SHARED))
desc->status |= IRQ_PENDING;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return ok;
}
/* Honour the normal IRQ locking */
desc->status |= IRQ_INPROGRESS;
action = desc->action;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
while (action) {
/* Only shared IRQ handlers are safe to call */
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
}
local_irq_disable();
/* Now clean up the flags */
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
action = desc->action;
/*
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc)
* Perform real IRQ processing for the IRQ we deferred
*/
work = 1;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
handle_IRQ_event(irq, action);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
*/
if (work && desc->chip && desc->chip->end)
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return ok;
}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index f336e2107f9..a9a93d9ee7a 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -21,7 +21,7 @@
#include <linux/hardirq.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/numa.h>
#include <linux/suspend.h>
@@ -31,6 +31,7 @@
#include <linux/cpu.h>
#include <linux/console.h>
#include <linux/vmalloc.h>
+#include <linux/swap.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -1082,6 +1083,64 @@ void crash_kexec(struct pt_regs *regs)
}
}
+size_t crash_get_memory_size(void)
+{
+ size_t size;
+ mutex_lock(&kexec_mutex);
+ size = crashk_res.end - crashk_res.start + 1;
+ mutex_unlock(&kexec_mutex);
+ return size;
+}
+
+static void free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
+ ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
+ init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
+ free_page((unsigned long)__va(addr));
+ totalram_pages++;
+ }
+}
+
+int crash_shrink_memory(unsigned long new_size)
+{
+ int ret = 0;
+ unsigned long start, end;
+
+ mutex_lock(&kexec_mutex);
+
+ if (kexec_crash_image) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ start = crashk_res.start;
+ end = crashk_res.end;
+
+ if (new_size >= end - start + 1) {
+ ret = -EINVAL;
+ if (new_size == end - start + 1)
+ ret = 0;
+ goto unlock;
+ }
+
+ start = roundup(start, PAGE_SIZE);
+ end = roundup(start + new_size, PAGE_SIZE);
+
+ free_reserved_phys_range(end, crashk_res.end);
+
+ if (start == end) {
+ crashk_res.end = end;
+ release_resource(&crashk_res);
+ } else
+ crashk_res.end = end - 1;
+
+unlock:
+ mutex_unlock(&kexec_mutex);
+ return ret;
+}
+
static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
size_t data_len)
{
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 3765ff3c1bb..e92d519f93b 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -1,6 +1,7 @@
/*
- * A simple kernel FIFO implementation.
+ * A generic kernel FIFO implementation.
*
+ * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
* Copyright (C) 2004 Stelian Pop <stelian@popies.net>
*
* This program is free software; you can redistribute it and/or modify
@@ -25,50 +26,48 @@
#include <linux/err.h>
#include <linux/kfifo.h>
#include <linux/log2.h>
+#include <linux/uaccess.h>
+
+static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+ unsigned int size)
+{
+ fifo->buffer = buffer;
+ fifo->size = size;
+
+ kfifo_reset(fifo);
+}
/**
- * kfifo_init - allocates a new FIFO using a preallocated buffer
+ * kfifo_init - initialize a FIFO using a preallocated buffer
+ * @fifo: the fifo to assign the buffer
* @buffer: the preallocated buffer to be used.
* @size: the size of the internal buffer, this have to be a power of 2.
- * @gfp_mask: get_free_pages mask, passed to kmalloc()
- * @lock: the lock to be used to protect the fifo buffer
*
- * Do NOT pass the kfifo to kfifo_free() after use! Simply free the
- * &struct kfifo with kfree().
*/
-struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- gfp_t gfp_mask, spinlock_t *lock)
+void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size)
{
- struct kfifo *fifo;
-
/* size must be a power of 2 */
BUG_ON(!is_power_of_2(size));
- fifo = kmalloc(sizeof(struct kfifo), gfp_mask);
- if (!fifo)
- return ERR_PTR(-ENOMEM);
-
- fifo->buffer = buffer;
- fifo->size = size;
- fifo->in = fifo->out = 0;
- fifo->lock = lock;
-
- return fifo;
+ _kfifo_init(fifo, buffer, size);
}
EXPORT_SYMBOL(kfifo_init);
/**
- * kfifo_alloc - allocates a new FIFO and its internal buffer
- * @size: the size of the internal buffer to be allocated.
+ * kfifo_alloc - allocates a new FIFO internal buffer
+ * @fifo: the fifo to assign then new buffer
+ * @size: the size of the buffer to be allocated, this have to be a power of 2.
* @gfp_mask: get_free_pages mask, passed to kmalloc()
- * @lock: the lock to be used to protect the fifo buffer
+ *
+ * This function dynamically allocates a new fifo internal buffer
*
* The size will be rounded-up to a power of 2.
+ * The buffer will be release with kfifo_free().
+ * Return 0 if no error, otherwise the an error code
*/
-struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
+int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
{
unsigned char *buffer;
- struct kfifo *ret;
/*
* round up to the next power of 2, since our 'let the indices
@@ -80,48 +79,91 @@ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
}
buffer = kmalloc(size, gfp_mask);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
-
- ret = kfifo_init(buffer, size, gfp_mask, lock);
+ if (!buffer) {
+ _kfifo_init(fifo, 0, 0);
+ return -ENOMEM;
+ }
- if (IS_ERR(ret))
- kfree(buffer);
+ _kfifo_init(fifo, buffer, size);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(kfifo_alloc);
/**
- * kfifo_free - frees the FIFO
+ * kfifo_free - frees the FIFO internal buffer
* @fifo: the fifo to be freed.
*/
void kfifo_free(struct kfifo *fifo)
{
kfree(fifo->buffer);
- kfree(fifo);
}
EXPORT_SYMBOL(kfifo_free);
/**
- * __kfifo_put - puts some data into the FIFO, no locking version
+ * kfifo_skip - skip output data
* @fifo: the fifo to be used.
- * @buffer: the data to be added.
- * @len: the length of the data to be added.
- *
- * This function copies at most @len bytes from the @buffer into
- * the FIFO depending on the free space, and returns the number of
- * bytes copied.
- *
- * Note that with only one concurrent reader and one concurrent
- * writer, you don't need extra locking to use these functions.
+ * @len: number of bytes to skip
*/
-unsigned int __kfifo_put(struct kfifo *fifo,
- const unsigned char *buffer, unsigned int len)
+void kfifo_skip(struct kfifo *fifo, unsigned int len)
+{
+ if (len < kfifo_len(fifo)) {
+ __kfifo_add_out(fifo, len);
+ return;
+ }
+ kfifo_reset_out(fifo);
+}
+EXPORT_SYMBOL(kfifo_skip);
+
+static inline void __kfifo_in_data(struct kfifo *fifo,
+ const void *from, unsigned int len, unsigned int off)
{
unsigned int l;
- len = min(len, fifo->size - fifo->in + fifo->out);
+ /*
+ * Ensure that we sample the fifo->out index -before- we
+ * start putting bytes into the kfifo.
+ */
+
+ smp_mb();
+
+ off = __kfifo_off(fifo, fifo->in + off);
+
+ /* first put the data starting from fifo->in to buffer end */
+ l = min(len, fifo->size - off);
+ memcpy(fifo->buffer + off, from, l);
+
+ /* then put the rest (if any) at the beginning of the buffer */
+ memcpy(fifo->buffer, from + l, len - l);
+}
+
+static inline void __kfifo_out_data(struct kfifo *fifo,
+ void *to, unsigned int len, unsigned int off)
+{
+ unsigned int l;
+
+ /*
+ * Ensure that we sample the fifo->in index -before- we
+ * start removing bytes from the kfifo.
+ */
+
+ smp_rmb();
+
+ off = __kfifo_off(fifo, fifo->out + off);
+
+ /* first get the data from fifo->out until the end of the buffer */
+ l = min(len, fifo->size - off);
+ memcpy(to, fifo->buffer + off, l);
+
+ /* then get the rest (if any) from the beginning of the buffer */
+ memcpy(to + l, fifo->buffer, len - l);
+}
+
+static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned int off)
+{
+ unsigned int l;
+ int ret;
/*
* Ensure that we sample the fifo->out index -before- we
@@ -130,68 +172,229 @@ unsigned int __kfifo_put(struct kfifo *fifo,
smp_mb();
+ off = __kfifo_off(fifo, fifo->in + off);
+
/* first put the data starting from fifo->in to buffer end */
- l = min(len, fifo->size - (fifo->in & (fifo->size - 1)));
- memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l);
+ l = min(len, fifo->size - off);
+ ret = copy_from_user(fifo->buffer + off, from, l);
+
+ if (unlikely(ret))
+ return ret + len - l;
/* then put the rest (if any) at the beginning of the buffer */
- memcpy(fifo->buffer, buffer + l, len - l);
+ return copy_from_user(fifo->buffer, from + l, len - l);
+}
+
+static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo,
+ void __user *to, unsigned int len, unsigned int off)
+{
+ unsigned int l;
+ int ret;
/*
- * Ensure that we add the bytes to the kfifo -before-
- * we update the fifo->in index.
+ * Ensure that we sample the fifo->in index -before- we
+ * start removing bytes from the kfifo.
*/
- smp_wmb();
+ smp_rmb();
+
+ off = __kfifo_off(fifo, fifo->out + off);
+
+ /* first get the data from fifo->out until the end of the buffer */
+ l = min(len, fifo->size - off);
+ ret = copy_to_user(to, fifo->buffer + off, l);
+
+ if (unlikely(ret))
+ return ret + len - l;
+
+ /* then get the rest (if any) from the beginning of the buffer */
+ return copy_to_user(to + l, fifo->buffer, len - l);
+}
+
+unsigned int __kfifo_in_n(struct kfifo *fifo,
+ const void *from, unsigned int len, unsigned int recsize)
+{
+ if (kfifo_avail(fifo) < len + recsize)
+ return len + 1;
+
+ __kfifo_in_data(fifo, from, len, recsize);
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_in_n);
- fifo->in += len;
+/**
+ * kfifo_in - puts some data into the FIFO
+ * @fifo: the fifo to be used.
+ * @from: the data to be added.
+ * @len: the length of the data to be added.
+ *
+ * This function copies at most @len bytes from the @from buffer into
+ * the FIFO depending on the free space, and returns the number of
+ * bytes copied.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from,
+ unsigned int len)
+{
+ len = min(kfifo_avail(fifo), len);
+ __kfifo_in_data(fifo, from, len, 0);
+ __kfifo_add_in(fifo, len);
return len;
}
-EXPORT_SYMBOL(__kfifo_put);
+EXPORT_SYMBOL(kfifo_in);
+
+unsigned int __kfifo_in_generic(struct kfifo *fifo,
+ const void *from, unsigned int len, unsigned int recsize)
+{
+ return __kfifo_in_rec(fifo, from, len, recsize);
+}
+EXPORT_SYMBOL(__kfifo_in_generic);
+
+unsigned int __kfifo_out_n(struct kfifo *fifo,
+ void *to, unsigned int len, unsigned int recsize)
+{
+ if (kfifo_len(fifo) < len + recsize)
+ return len;
+
+ __kfifo_out_data(fifo, to, len, recsize);
+ __kfifo_add_out(fifo, len + recsize);
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_out_n);
/**
- * __kfifo_get - gets some data from the FIFO, no locking version
+ * kfifo_out - gets some data from the FIFO
* @fifo: the fifo to be used.
- * @buffer: where the data must be copied.
+ * @to: where the data must be copied.
* @len: the size of the destination buffer.
*
* This function copies at most @len bytes from the FIFO into the
- * @buffer and returns the number of copied bytes.
+ * @to buffer and returns the number of copied bytes.
*
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these functions.
*/
-unsigned int __kfifo_get(struct kfifo *fifo,
- unsigned char *buffer, unsigned int len)
+unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len)
{
- unsigned int l;
+ len = min(kfifo_len(fifo), len);
- len = min(len, fifo->in - fifo->out);
+ __kfifo_out_data(fifo, to, len, 0);
+ __kfifo_add_out(fifo, len);
- /*
- * Ensure that we sample the fifo->in index -before- we
- * start removing bytes from the kfifo.
- */
+ return len;
+}
+EXPORT_SYMBOL(kfifo_out);
- smp_rmb();
+unsigned int __kfifo_out_generic(struct kfifo *fifo,
+ void *to, unsigned int len, unsigned int recsize,
+ unsigned int *total)
+{
+ return __kfifo_out_rec(fifo, to, len, recsize, total);
+}
+EXPORT_SYMBOL(__kfifo_out_generic);
- /* first get the data from fifo->out until the end of the buffer */
- l = min(len, fifo->size - (fifo->out & (fifo->size - 1)));
- memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l);
+unsigned int __kfifo_from_user_n(struct kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned int recsize)
+{
+ if (kfifo_avail(fifo) < len + recsize)
+ return len + 1;
- /* then get the rest (if any) from the beginning of the buffer */
- memcpy(buffer + l, fifo->buffer, len - l);
+ return __kfifo_from_user_data(fifo, from, len, recsize);
+}
+EXPORT_SYMBOL(__kfifo_from_user_n);
- /*
- * Ensure that we remove the bytes from the kfifo -before-
- * we update the fifo->out index.
- */
+/**
+ * kfifo_from_user - puts some data from user space into the FIFO
+ * @fifo: the fifo to be used.
+ * @from: pointer to the data to be added.
+ * @len: the length of the data to be added.
+ *
+ * This function copies at most @len bytes from the @from into the
+ * FIFO depending and returns the number of copied bytes.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+unsigned int kfifo_from_user(struct kfifo *fifo,
+ const void __user *from, unsigned int len)
+{
+ len = min(kfifo_avail(fifo), len);
+ len -= __kfifo_from_user_data(fifo, from, len, 0);
+ __kfifo_add_in(fifo, len);
+ return len;
+}
+EXPORT_SYMBOL(kfifo_from_user);
- smp_mb();
+unsigned int __kfifo_from_user_generic(struct kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned int recsize)
+{
+ return __kfifo_from_user_rec(fifo, from, len, recsize);
+}
+EXPORT_SYMBOL(__kfifo_from_user_generic);
- fifo->out += len;
+unsigned int __kfifo_to_user_n(struct kfifo *fifo,
+ void __user *to, unsigned int len, unsigned int reclen,
+ unsigned int recsize)
+{
+ unsigned int ret;
+
+ if (kfifo_len(fifo) < reclen + recsize)
+ return len;
+
+ ret = __kfifo_to_user_data(fifo, to, reclen, recsize);
+ if (likely(ret == 0))
+ __kfifo_add_out(fifo, reclen + recsize);
+
+ return ret;
+}
+EXPORT_SYMBOL(__kfifo_to_user_n);
+
+/**
+ * kfifo_to_user - gets data from the FIFO and write it to user space
+ * @fifo: the fifo to be used.
+ * @to: where the data must be copied.
+ * @len: the size of the destination buffer.
+ *
+ * This function copies at most @len bytes from the FIFO into the
+ * @to buffer and returns the number of copied bytes.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these functions.
+ */
+unsigned int kfifo_to_user(struct kfifo *fifo,
+ void __user *to, unsigned int len)
+{
+ len = min(kfifo_len(fifo), len);
+ len -= __kfifo_to_user_data(fifo, to, len, 0);
+ __kfifo_add_out(fifo, len);
return len;
}
-EXPORT_SYMBOL(__kfifo_get);
+EXPORT_SYMBOL(kfifo_to_user);
+
+unsigned int __kfifo_to_user_generic(struct kfifo *fifo,
+ void __user *to, unsigned int len, unsigned int recsize,
+ unsigned int *total)
+{
+ return __kfifo_to_user_rec(fifo, to, len, recsize, total);
+}
+EXPORT_SYMBOL(__kfifo_to_user_generic);
+
+unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize)
+{
+ if (recsize == 0)
+ return kfifo_avail(fifo);
+
+ return __kfifo_peek_n(fifo, recsize);
+}
+EXPORT_SYMBOL(__kfifo_peek_generic);
+
+void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize)
+{
+ __kfifo_skip_rec(fifo, recsize);
+}
+EXPORT_SYMBOL(__kfifo_skip_generic);
+
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 7d701463402..2eb517e2351 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -129,6 +129,7 @@ struct task_struct *kgdb_usethread;
struct task_struct *kgdb_contthread;
int kgdb_single_step;
+pid_t kgdb_sstep_pid;
/* Our I/O buffers. */
static char remcom_in_buffer[BUFMAX];
@@ -541,12 +542,17 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid)
*/
if (tid == 0 || tid == -1)
tid = -atomic_read(&kgdb_active) - 2;
- if (tid < 0) {
+ if (tid < -1 && tid > -NR_CPUS - 2) {
if (kgdb_info[-tid - 2].task)
return kgdb_info[-tid - 2].task;
else
return idle_task(-tid - 2);
}
+ if (tid <= 0) {
+ printk(KERN_ERR "KGDB: Internal thread select error\n");
+ dump_stack();
+ return NULL;
+ }
/*
* find_task_by_pid_ns() does not take the tasklist lock anymore
@@ -619,7 +625,8 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
static int kgdb_activate_sw_breakpoints(void)
{
unsigned long addr;
- int error = 0;
+ int error;
+ int ret = 0;
int i;
for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
@@ -629,13 +636,16 @@ static int kgdb_activate_sw_breakpoints(void)
addr = kgdb_break[i].bpt_addr;
error = kgdb_arch_set_breakpoint(addr,
kgdb_break[i].saved_instr);
- if (error)
- return error;
+ if (error) {
+ ret = error;
+ printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+ continue;
+ }
kgdb_flush_swbreak_addr(addr);
kgdb_break[i].state = BP_ACTIVE;
}
- return 0;
+ return ret;
}
static int kgdb_set_sw_break(unsigned long addr)
@@ -682,7 +692,8 @@ static int kgdb_set_sw_break(unsigned long addr)
static int kgdb_deactivate_sw_breakpoints(void)
{
unsigned long addr;
- int error = 0;
+ int error;
+ int ret = 0;
int i;
for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
@@ -691,13 +702,15 @@ static int kgdb_deactivate_sw_breakpoints(void)
addr = kgdb_break[i].bpt_addr;
error = kgdb_arch_remove_breakpoint(addr,
kgdb_break[i].saved_instr);
- if (error)
- return error;
+ if (error) {
+ printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+ ret = error;
+ }
kgdb_flush_swbreak_addr(addr);
kgdb_break[i].state = BP_SET;
}
- return 0;
+ return ret;
}
static int kgdb_remove_sw_break(unsigned long addr)
@@ -1204,8 +1217,10 @@ static int gdb_cmd_exception_pass(struct kgdb_state *ks)
return 1;
} else {
- error_packet(remcom_out_buffer, -EINVAL);
- return 0;
+ kgdb_msg_write("KGDB only knows signal 9 (pass)"
+ " and 15 (pass and disconnect)\n"
+ "Executing a continue without signal passing\n", 0);
+ remcom_in_buffer[0] = 'c';
}
/* Indicate fall through */
@@ -1395,6 +1410,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
struct kgdb_state kgdb_var;
struct kgdb_state *ks = &kgdb_var;
unsigned long flags;
+ int sstep_tries = 100;
int error = 0;
int i, cpu;
@@ -1425,13 +1441,14 @@ acquirelock:
cpu_relax();
/*
- * Do not start the debugger connection on this CPU if the last
- * instance of the exception handler wanted to come into the
- * debugger on a different CPU via a single step
+ * For single stepping, try to only enter on the processor
+ * that was single stepping. To gaurd against a deadlock, the
+ * kernel will only try for the value of sstep_tries before
+ * giving up and continuing on.
*/
if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
-
+ (kgdb_info[cpu].task &&
+ kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog();
clocksource_touch_watchdog();
@@ -1524,6 +1541,13 @@ acquirelock:
}
kgdb_restore:
+ if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+ int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+ if (kgdb_info[sstep_cpu].task)
+ kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+ else
+ kgdb_sstep_pid = 0;
+ }
/* Free kgdb_active */
atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog();
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 528dd78e7e7..3feaf5a7451 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -100,6 +100,26 @@ static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
}
KERNEL_ATTR_RO(kexec_crash_loaded);
+static ssize_t kexec_crash_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%zu\n", crash_get_memory_size());
+}
+static ssize_t kexec_crash_size_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long cnt;
+ int ret;
+
+ if (strict_strtoul(buf, 0, &cnt))
+ return -EINVAL;
+
+ ret = crash_shrink_memory(cnt);
+ return ret < 0 ? ret : count;
+}
+KERNEL_ATTR_RW(kexec_crash_size);
+
static ssize_t vmcoreinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -147,6 +167,7 @@ static struct attribute * kernel_attrs[] = {
#ifdef CONFIG_KEXEC
&kexec_loaded_attr.attr,
&kexec_crash_loaded_attr.attr,
+ &kexec_crash_size_attr.attr,
&vmcoreinfo_attr.attr,
#endif
NULL
diff --git a/kernel/kthread.c b/kernel/kthread.c
index ab7ae57773e..fbb6222fe7e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -150,6 +150,29 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
EXPORT_SYMBOL(kthread_create);
/**
+ * kthread_bind - bind a just-created kthread to a cpu.
+ * @p: thread created by kthread_create().
+ * @cpu: cpu (might not be online, must be possible) for @k to run on.
+ *
+ * Description: This function is equivalent to set_cpus_allowed(),
+ * except that @cpu doesn't need to be online, and the thread must be
+ * stopped (i.e., just returned from kthread_create()).
+ */
+void kthread_bind(struct task_struct *p, unsigned int cpu)
+{
+ /* Must have done schedule() in kthread() before we set_task_cpu */
+ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
+ WARN_ON(1);
+ return;
+ }
+
+ p->cpus_allowed = cpumask_of_cpu(cpu);
+ p->rt.nr_cpus_allowed = 1;
+ p->flags |= PF_THREAD_BOUND;
+}
+EXPORT_SYMBOL(kthread_bind);
+
+/**
* kthread_stop - stop a thread created by kthread_create().
* @k: thread created by kthread_create().
*
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f5dcd36d315..5feaddcdbe4 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644);
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
-static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int graph_lock(void)
{
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
@@ -85,7 +85,7 @@ static int graph_lock(void)
* dropped already)
*/
if (!debug_locks) {
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return 0;
}
/* prevent any recursions within lockdep from causing deadlocks */
@@ -95,11 +95,11 @@ static int graph_lock(void)
static inline int graph_unlock(void)
{
- if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
+ if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
current->lockdep_recursion--;
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return 0;
}
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void)
{
int ret = debug_locks_off();
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
return ret;
}
@@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
}
#ifdef CONFIG_LOCK_STAT
-static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
+ cpu_lock_stats);
static inline u64 lockstat_clock(void)
{
@@ -168,7 +169,7 @@ static void lock_time_inc(struct lock_time *lt, u64 time)
if (time > lt->max)
lt->max = time;
- if (time < lt->min || !lt->min)
+ if (time < lt->min || !lt->nr)
lt->min = time;
lt->total += time;
@@ -177,8 +178,15 @@ static void lock_time_inc(struct lock_time *lt, u64 time)
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
- dst->min += src->min;
- dst->max += src->max;
+ if (!src->nr)
+ return;
+
+ if (src->max > dst->max)
+ dst->max = src->max;
+
+ if (src->min < dst->min || !dst->nr)
+ dst->min = src->min;
+
dst->total += src->total;
dst->nr += src->nr;
}
@@ -191,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class)
memset(&stats, 0, sizeof(struct lock_class_stats));
for_each_possible_cpu(cpu) {
struct lock_class_stats *pcs =
- &per_cpu(lock_stats, cpu)[class - lock_classes];
+ &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i];
@@ -218,7 +226,7 @@ void clear_lock_stats(struct lock_class *class)
for_each_possible_cpu(cpu) {
struct lock_class_stats *cpu_stats =
- &per_cpu(lock_stats, cpu)[class - lock_classes];
+ &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
}
@@ -228,12 +236,12 @@ void clear_lock_stats(struct lock_class *class)
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
- return &get_cpu_var(lock_stats)[class - lock_classes];
+ return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
}
static void put_lock_stats(struct lock_class_stats *stats)
{
- put_cpu_var(lock_stats);
+ put_cpu_var(cpu_lock_stats);
}
static void lock_release_holdtime(struct held_lock *hlock)
@@ -379,7 +387,8 @@ static int save_trace(struct stack_trace *trace)
* complete trace that maxes out the entries provided will be reported
* as incomplete, friggin useless </rant>
*/
- if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
+ if (trace->nr_entries != 0 &&
+ trace->entries[trace->nr_entries-1] == ULONG_MAX)
trace->nr_entries--;
trace->max_entries = trace->nr_entries;
@@ -1161,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.class = class;
local_irq_save(flags);
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
@@ -1188,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.class = class;
local_irq_save(flags);
- __raw_spin_lock(&lockdep_lock);
+ arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
- __raw_spin_unlock(&lockdep_lock);
+ arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
diff --git a/kernel/module.c b/kernel/module.c
index 5842a71cf05..e96b8ed1cb6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module);
#ifdef CONFIG_SMP
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
static void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
{
@@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme)
free_percpu(freeme);
}
-#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-/* Number of blocks used and allocated. */
-static unsigned int pcpu_num_used, pcpu_num_allocated;
-/* Size of each block. -ve means used. */
-static int *pcpu_size;
-
-static int split_block(unsigned int i, unsigned short size)
-{
- /* Reallocation required? */
- if (pcpu_num_used + 1 > pcpu_num_allocated) {
- int *new;
-
- new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
- GFP_KERNEL);
- if (!new)
- return 0;
-
- pcpu_num_allocated *= 2;
- pcpu_size = new;
- }
-
- /* Insert a new subblock */
- memmove(&pcpu_size[i+1], &pcpu_size[i],
- sizeof(pcpu_size[0]) * (pcpu_num_used - i));
- pcpu_num_used++;
-
- pcpu_size[i+1] -= size;
- pcpu_size[i] = size;
- return 1;
-}
-
-static inline unsigned int block_size(int val)
-{
- if (val < 0)
- return -val;
- return val;
-}
-
-static void *percpu_modalloc(unsigned long size, unsigned long align,
- const char *name)
-{
- unsigned long extra;
- unsigned int i;
- void *ptr;
- int cpu;
-
- if (align > PAGE_SIZE) {
- printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
- name, align, PAGE_SIZE);
- align = PAGE_SIZE;
- }
-
- ptr = __per_cpu_start;
- for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
- /* Extra for alignment requirement. */
- extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
- BUG_ON(i == 0 && extra != 0);
-
- if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
- continue;
-
- /* Transfer extra to previous block. */
- if (pcpu_size[i-1] < 0)
- pcpu_size[i-1] -= extra;
- else
- pcpu_size[i-1] += extra;
- pcpu_size[i] -= extra;
- ptr += extra;
-
- /* Split block if warranted */
- if (pcpu_size[i] - size > sizeof(unsigned long))
- if (!split_block(i, size))
- return NULL;
-
- /* add the per-cpu scanning areas */
- for_each_possible_cpu(cpu)
- kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
- GFP_KERNEL);
-
- /* Mark allocated */
- pcpu_size[i] = -pcpu_size[i];
- return ptr;
- }
-
- printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
- size);
- return NULL;
-}
-
-static void percpu_modfree(void *freeme)
-{
- unsigned int i;
- void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
- int cpu;
-
- /* First entry is core kernel percpu data. */
- for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
- if (ptr == freeme) {
- pcpu_size[i] = -pcpu_size[i];
- goto free;
- }
- }
- BUG();
-
- free:
- /* remove the per-cpu scanning areas */
- for_each_possible_cpu(cpu)
- kmemleak_free(freeme + per_cpu_offset(cpu));
-
- /* Merge with previous? */
- if (pcpu_size[i-1] >= 0) {
- pcpu_size[i-1] += pcpu_size[i];
- pcpu_num_used--;
- memmove(&pcpu_size[i], &pcpu_size[i+1],
- (pcpu_num_used - i) * sizeof(pcpu_size[0]));
- i--;
- }
- /* Merge with next? */
- if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
- pcpu_size[i] += pcpu_size[i+1];
- pcpu_num_used--;
- memmove(&pcpu_size[i+1], &pcpu_size[i+2],
- (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
- }
-}
-
-static int percpu_modinit(void)
-{
- pcpu_num_used = 2;
- pcpu_num_allocated = 2;
- pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
- GFP_KERNEL);
- /* Static in-kernel percpu data (used). */
- pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
- /* Free room. */
- pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
- if (pcpu_size[1] < 0) {
- printk(KERN_ERR "No per-cpu room for modules.\n");
- pcpu_num_used = 1;
- }
-
- return 0;
-}
-__initcall(percpu_modinit);
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
const char *secstrings)
@@ -1030,11 +880,23 @@ static int try_to_force_load(struct module *mod, const char *reason)
}
#ifdef CONFIG_MODVERSIONS
+/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
+static unsigned long maybe_relocated(unsigned long crc,
+ const struct module *crc_owner)
+{
+#ifdef ARCH_RELOCATES_KCRCTAB
+ if (crc_owner == NULL)
+ return crc - (unsigned long)reloc_start;
+#endif
+ return crc;
+}
+
static int check_version(Elf_Shdr *sechdrs,
unsigned int versindex,
const char *symname,
struct module *mod,
- const unsigned long *crc)
+ const unsigned long *crc,
+ const struct module *crc_owner)
{
unsigned int i, num_versions;
struct modversion_info *versions;
@@ -1055,10 +917,10 @@ static int check_version(Elf_Shdr *sechdrs,
if (strcmp(versions[i].name, symname) != 0)
continue;
- if (versions[i].crc == *crc)
+ if (versions[i].crc == maybe_relocated(*crc, crc_owner))
return 1;
DEBUGP("Found checksum %lX vs module %lX\n",
- *crc, versions[i].crc);
+ maybe_relocated(*crc, crc_owner), versions[i].crc);
goto bad_version;
}
@@ -1081,7 +943,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
&crc, true, false))
BUG();
- return check_version(sechdrs, versindex, "module_layout", mod, crc);
+ return check_version(sechdrs, versindex, "module_layout", mod, crc,
+ NULL);
}
/* First part is kernel version, which we ignore if module has crcs. */
@@ -1099,7 +962,8 @@ static inline int check_version(Elf_Shdr *sechdrs,
unsigned int versindex,
const char *symname,
struct module *mod,
- const unsigned long *crc)
+ const unsigned long *crc,
+ const struct module *crc_owner)
{
return 1;
}
@@ -1134,8 +998,8 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
/* use_module can fail due to OOM,
or module initialization or unloading */
if (sym) {
- if (!check_version(sechdrs, versindex, name, mod, crc) ||
- !use_module(mod, owner))
+ if (!check_version(sechdrs, versindex, name, mod, crc, owner)
+ || !use_module(mod, owner))
sym = NULL;
}
return sym;
@@ -2046,9 +1910,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
unsigned int i;
/* only scan the sections containing data */
- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
- (unsigned long)mod->module_core,
- sizeof(struct module), GFP_KERNEL);
+ kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
for (i = 1; i < hdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -2057,8 +1919,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
&& strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
continue;
- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
- (unsigned long)mod->module_core,
+ kmemleak_scan_area((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size, GFP_KERNEL);
}
}
@@ -2386,6 +2247,12 @@ static noinline struct module *load_module(void __user *umod,
"_ftrace_events",
sizeof(*mod->trace_events),
&mod->num_trace_events);
+ /*
+ * This section contains pointers to allocated objects in the trace
+ * code and not scanning it leads to false positives.
+ */
+ kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
+ mod->num_trace_events, GFP_KERNEL);
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index 6b2d735846a..57d527a16f9 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock)
\
DEBUG_LOCKS_WARN_ON(in_interrupt()); \
local_irq_save(flags); \
- __raw_spin_lock(&(lock)->raw_lock); \
+ arch_spin_lock(&(lock)->rlock.raw_lock);\
DEBUG_LOCKS_WARN_ON(l->magic != l); \
} while (0)
-#define spin_unlock_mutex(lock, flags) \
- do { \
- __raw_spin_unlock(&(lock)->raw_lock); \
- local_irq_restore(flags); \
- preempt_check_resched(); \
+#define spin_unlock_mutex(lock, flags) \
+ do { \
+ arch_spin_unlock(&(lock)->rlock.raw_lock); \
+ local_irq_restore(flags); \
+ preempt_check_resched(); \
} while (0)
diff --git a/kernel/panic.c b/kernel/panic.c
index 96b45d0b4ba..5827f7b9725 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -10,6 +10,7 @@
*/
#include <linux/debug_locks.h>
#include <linux/interrupt.h>
+#include <linux/kmsg_dump.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/module.h>
@@ -74,6 +75,7 @@ NORET_TYPE void panic(const char * fmt, ...)
dump_stack();
#endif
+ kmsg_dump(KMSG_DUMP_PANIC);
/*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
@@ -339,6 +341,7 @@ void oops_exit(void)
{
do_oops_enter_exit();
print_oops_end_marker();
+ kmsg_dump(KMSG_DUMP_OOPS);
}
#ifdef WANT_WARN_ON_SLOWPATH
diff --git a/kernel/params.c b/kernel/params.c
index d656c276508..cf1b6918312 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#if 0
#define DEBUGP printk
@@ -122,9 +123,7 @@ static char *next_arg(char *args, char **param, char **val)
next = args + i;
/* Chew up trailing spaces. */
- while (isspace(*next))
- next++;
- return next;
+ return skip_spaces(next);
}
/* Args looks like "foo=bar,bar2 baz=fuz wiz". */
@@ -139,8 +138,7 @@ int parse_args(const char *name,
DEBUGP("Parsing ARGS: %s\n", args);
/* Chew leading spaces */
- while (isspace(*args))
- args++;
+ args = skip_spaces(args);
while (*args) {
int ret;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 40a996ec39f..1f38270f08c 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -36,7 +36,7 @@
/*
* Each CPU has a list of per CPU events:
*/
-DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
+static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
int perf_max_events __read_mostly = 1;
static int perf_reserved_percpu __read_mostly;
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
- spin_lock_irqsave(&ctx->lock, *flags);
+ raw_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_event_ctxp)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
- spin_unlock_irqrestore(&ctx->lock, *flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
++ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
put_ctx(ctx);
}
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* Protect the list operation against NMI by disabling the
* events on a global level.
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
}
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
@@ -488,12 +488,12 @@ retry:
task_oncpu_function_call(task, __perf_event_remove_from_context,
event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active we need to retry the smp call.
*/
if (ctx->nr_active && !list_empty(&event->group_entry)) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -504,7 +504,7 @@ retry:
*/
if (!list_empty(&event->group_entry))
list_del_event(event, ctx);
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* If the event is on, turn it off.
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
event->state = PERF_EVENT_STATE_OFF;
}
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -567,7 +567,7 @@ static void __perf_event_disable(void *info)
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
-static void perf_event_disable(struct perf_event *event)
+void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -584,12 +584,12 @@ static void perf_event_disable(struct perf_event *event)
retry:
task_oncpu_function_call(task, __perf_event_disable, event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -602,7 +602,7 @@ static void perf_event_disable(struct perf_event *event)
event->state = PERF_EVENT_STATE_OFF;
}
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
static int
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -782,6 +782,9 @@ static void __perf_install_in_context(void *info)
add_event_to_ctx(event, ctx);
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ goto unlock;
+
/*
* Don't put the event on if it is disabled or if
* it is in a group and the group isn't on.
@@ -820,7 +823,7 @@ static void __perf_install_in_context(void *info)
unlock:
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -856,12 +859,12 @@ retry:
task_oncpu_function_call(task, __perf_install_in_context,
event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* we need to retry the smp call.
*/
if (ctx->is_active && list_empty(&event->group_entry)) {
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -872,7 +875,7 @@ retry:
*/
if (list_empty(&event->group_entry))
add_event_to_ctx(event, ctx);
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -917,7 +920,7 @@ static void __perf_event_enable(void *info)
cpuctx->task_ctx = ctx;
}
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx);
@@ -925,6 +928,9 @@ static void __perf_event_enable(void *info)
goto unlock;
__perf_event_mark_enabled(event, ctx);
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ goto unlock;
+
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
@@ -959,7 +965,7 @@ static void __perf_event_enable(void *info)
}
unlock:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -971,7 +977,7 @@ static void __perf_event_enable(void *info)
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
-static void perf_event_enable(struct perf_event *event)
+void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -985,7 +991,7 @@ static void perf_event_enable(struct perf_event *event)
return;
}
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
@@ -1000,10 +1006,10 @@ static void perf_event_enable(struct perf_event *event)
event->state = PERF_EVENT_STATE_OFF;
retry:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
task_oncpu_function_call(task, __perf_event_enable, event);
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
@@ -1020,7 +1026,7 @@ static void perf_event_enable(struct perf_event *event)
__perf_event_mark_enabled(event, ctx);
out:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
}
static int perf_event_refresh(struct perf_event *event, int refresh)
@@ -1042,7 +1048,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
{
struct perf_event *event;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 0;
if (likely(!ctx->nr_events))
goto out;
@@ -1055,7 +1061,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1193,8 +1199,8 @@ void perf_event_task_sched_out(struct task_struct *task,
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
- spin_lock(&ctx->lock);
- spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&ctx->lock);
+ raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
@@ -1208,8 +1214,8 @@ void perf_event_task_sched_out(struct task_struct *task,
perf_event_sync_stat(ctx, next_ctx);
}
- spin_unlock(&next_ctx->lock);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&next_ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
@@ -1251,7 +1257,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
struct perf_event *event;
int can_add_hw = 1;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
if (likely(!ctx->nr_events))
goto out;
@@ -1306,7 +1312,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
}
perf_enable();
out:
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1370,11 +1376,14 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
struct hw_perf_event *hwc;
u64 interrupts, freq;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ continue;
+
hwc = &event->hw;
interrupts = hwc->interrupts;
@@ -1425,7 +1434,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
perf_enable();
}
}
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
/*
@@ -1438,7 +1447,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
if (!ctx->nr_events)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
/*
* Rotate the first entry last (works just fine for group events too):
*/
@@ -1449,7 +1458,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
}
perf_enable();
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
}
void perf_event_task_tick(struct task_struct *curr, int cpu)
@@ -1498,7 +1507,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
__perf_event_task_sched_out(ctx);
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
list_for_each_entry(event, &ctx->group_list, group_entry) {
if (!event->attr.enable_on_exec)
@@ -1516,7 +1525,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
if (enabled)
unclone_ctx(ctx);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
perf_event_task_sched_in(task, smp_processor_id());
out:
@@ -1542,10 +1551,10 @@ static void __perf_event_read(void *info)
if (ctx->task && cpuctx->task_ctx != ctx)
return;
- spin_lock(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
update_context_time(ctx);
update_event_times(event);
- spin_unlock(&ctx->lock);
+ raw_spin_unlock(&ctx->lock);
event->pmu->read(event);
}
@@ -1563,10 +1572,10 @@ static u64 perf_event_read(struct perf_event *event)
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->lock, flags);
+ raw_spin_lock_irqsave(&ctx->lock, flags);
update_context_time(ctx);
update_event_times(event);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return atomic64_read(&event->count);
@@ -1579,8 +1588,7 @@ static void
__perf_event_init_context(struct perf_event_context *ctx,
struct task_struct *task)
{
- memset(ctx, 0, sizeof(*ctx));
- spin_lock_init(&ctx->lock);
+ raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->group_list);
INIT_LIST_HEAD(&ctx->event_list);
@@ -1596,15 +1604,12 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
unsigned long flags;
int err;
- /*
- * If cpu is not a wildcard then this is a percpu event:
- */
- if (cpu != -1) {
+ if (pid == -1 && cpu != -1) {
/* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
- if (cpu < 0 || cpu > num_possible_cpus())
+ if (cpu < 0 || cpu >= nr_cpumask_bits)
return ERR_PTR(-EINVAL);
/*
@@ -1612,7 +1617,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
- if (!cpu_isset(cpu, cpu_online_map))
+ if (!cpu_online(cpu))
return ERR_PTR(-ENODEV);
cpuctx = &per_cpu(perf_cpu_context, cpu);
@@ -1650,11 +1655,11 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
unclone_ctx(ctx);
- spin_unlock_irqrestore(&ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
if (!ctx) {
- ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+ ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
err = -ENOMEM;
if (!ctx)
goto errout;
@@ -1988,7 +1993,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
if (!value)
return -EINVAL;
- spin_lock_irq(&ctx->lock);
+ raw_spin_lock_irq(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
@@ -2001,7 +2006,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->hw.sample_period = value;
}
unlock:
- spin_unlock_irq(&ctx->lock);
+ raw_spin_unlock_irq(&ctx->lock);
return ret;
}
@@ -3263,6 +3268,9 @@ static void perf_event_task_output(struct perf_event *event,
static int perf_event_task_match(struct perf_event *event)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.comm || event->attr.mmap || event->attr.task)
return 1;
@@ -3288,12 +3296,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_task_ctx(&cpuctx->ctx, task_event);
- put_cpu_var(perf_cpu_context);
-
if (!ctx)
ctx = rcu_dereference(task_event->task->perf_event_ctxp);
if (ctx)
perf_event_task_ctx(ctx, task_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
}
@@ -3370,6 +3377,9 @@ static void perf_event_comm_output(struct perf_event *event,
static int perf_event_comm_match(struct perf_event *event)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.comm)
return 1;
@@ -3406,15 +3416,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
- put_cpu_var(perf_cpu_context);
-
- /*
- * doesn't really matter which of the child contexts the
- * events ends up in.
- */
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_comm_ctx(ctx, comm_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
}
@@ -3489,6 +3494,9 @@ static void perf_event_mmap_output(struct perf_event *event,
static int perf_event_mmap_match(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (event->attr.mmap)
return 1;
@@ -3562,15 +3570,10 @@ got_name:
rcu_read_lock();
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
- put_cpu_var(perf_cpu_context);
-
- /*
- * doesn't really matter which of the child contexts the
- * events ends up in.
- */
ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_mmap_ctx(ctx, mmap_event);
+ put_cpu_var(perf_cpu_context);
rcu_read_unlock();
kfree(buf);
@@ -3861,6 +3864,9 @@ static int perf_swevent_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
+ return 0;
+
if (!perf_swevent_is_counting(event))
return 0;
@@ -4011,6 +4017,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
event->pmu->read(event);
data.addr = 0;
+ data.raw = NULL;
data.period = event->hw.last_period;
regs = get_irq_regs();
/*
@@ -4080,8 +4087,7 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
u64 now;
now = cpu_clock(cpu);
- prev = atomic64_read(&event->hw.prev_count);
- atomic64_set(&event->hw.prev_count, now);
+ prev = atomic64_xchg(&event->hw.prev_count, now);
atomic64_add(now - prev, &event->count);
}
@@ -4286,15 +4292,8 @@ static void bp_perf_event_destroy(struct perf_event *event)
static const struct pmu *bp_perf_event_init(struct perf_event *bp)
{
int err;
- /*
- * The breakpoint is already filled if we haven't created the counter
- * through perf syscall
- * FIXME: manage to get trigerred to NULL if it comes from syscalls
- */
- if (!bp->callback)
- err = register_perf_hw_breakpoint(bp);
- else
- err = __register_perf_hw_breakpoint(bp);
+
+ err = register_perf_hw_breakpoint(bp);
if (err)
return ERR_PTR(err);
@@ -4308,6 +4307,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
struct perf_sample_data sample;
struct pt_regs *regs = data;
+ sample.raw = NULL;
sample.addr = bp->attr.bp_addr;
if (!perf_exclude_event(bp, regs))
@@ -4390,7 +4390,7 @@ perf_event_alloc(struct perf_event_attr *attr,
struct perf_event_context *ctx,
struct perf_event *group_leader,
struct perf_event *parent_event,
- perf_callback_t callback,
+ perf_overflow_handler_t overflow_handler,
gfp_t gfpflags)
{
const struct pmu *pmu;
@@ -4433,10 +4433,10 @@ perf_event_alloc(struct perf_event_attr *attr,
event->state = PERF_EVENT_STATE_INACTIVE;
- if (!callback && parent_event)
- callback = parent_event->callback;
+ if (!overflow_handler && parent_event)
+ overflow_handler = parent_event->overflow_handler;
- event->callback = callback;
+ event->overflow_handler = overflow_handler;
if (attr->disabled)
event->state = PERF_EVENT_STATE_OFF;
@@ -4571,7 +4571,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (attr->type >= PERF_TYPE_MAX)
return -EINVAL;
- if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
+ if (attr->__reserved_1 || attr->__reserved_2)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -4724,7 +4724,7 @@ SYSCALL_DEFINE5(perf_event_open,
if (IS_ERR(event))
goto err_put_context;
- err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0);
+ err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
if (err < 0)
goto err_free_put_context;
@@ -4776,7 +4776,8 @@ err_put_context:
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
- pid_t pid, perf_callback_t callback)
+ pid_t pid,
+ perf_overflow_handler_t overflow_handler)
{
struct perf_event *event;
struct perf_event_context *ctx;
@@ -4793,7 +4794,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
}
event = perf_event_alloc(attr, cpu, ctx, NULL,
- NULL, callback, GFP_KERNEL);
+ NULL, overflow_handler, GFP_KERNEL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_put_context;
@@ -4998,7 +4999,7 @@ void perf_event_exit_task(struct task_struct *child)
* reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
- spin_lock(&child_ctx->lock);
+ raw_spin_lock(&child_ctx->lock);
child->perf_event_ctxp = NULL;
/*
* If this context is a clone; unclone it so it can't get
@@ -5007,7 +5008,7 @@ void perf_event_exit_task(struct task_struct *child)
*/
unclone_ctx(child_ctx);
update_context_time(child_ctx);
- spin_unlock_irqrestore(&child_ctx->lock, flags);
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Report the task dead after unscheduling the events so that we
@@ -5090,7 +5091,7 @@ again:
*/
int perf_event_init_task(struct task_struct *child)
{
- struct perf_event_context *child_ctx, *parent_ctx;
+ struct perf_event_context *child_ctx = NULL, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
@@ -5106,20 +5107,6 @@ int perf_event_init_task(struct task_struct *child)
return 0;
/*
- * This is executed from the parent task context, so inherit
- * events that have been marked for cloning.
- * First allocate and initialize a context for the child.
- */
-
- child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL);
- if (!child_ctx)
- return -ENOMEM;
-
- __perf_event_init_context(child_ctx, child);
- child->perf_event_ctxp = child_ctx;
- get_task_struct(child);
-
- /*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
@@ -5149,6 +5136,26 @@ int perf_event_init_task(struct task_struct *child)
continue;
}
+ if (!child->perf_event_ctxp) {
+ /*
+ * This is executed from the parent task context, so
+ * inherit events that have been marked for cloning.
+ * First allocate and initialize a context for the
+ * child.
+ */
+
+ child_ctx = kzalloc(sizeof(struct perf_event_context),
+ GFP_KERNEL);
+ if (!child_ctx) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ __perf_event_init_context(child_ctx, child);
+ child->perf_event_ctxp = child_ctx;
+ get_task_struct(child);
+ }
+
ret = inherit_group(event, parent, parent_ctx,
child, child_ctx);
if (ret) {
@@ -5177,6 +5184,7 @@ int perf_event_init_task(struct task_struct *child)
get_ctx(child_ctx->parent_ctx);
}
+exit:
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
@@ -5291,11 +5299,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
- spin_lock_irq(&cpuctx->ctx.lock);
+ raw_spin_lock_irq(&cpuctx->ctx.lock);
mpt = min(perf_max_events - cpuctx->ctx.nr_events,
perf_max_events - perf_reserved_percpu);
cpuctx->max_pertask = mpt;
- spin_unlock_irq(&cpuctx->ctx.lock);
+ raw_spin_unlock_irq(&cpuctx->ctx.lock);
}
spin_unlock(&perf_resource_lock);
diff --git a/kernel/pid.c b/kernel/pid.c
index d3f722d20f9..2e17c9c92cb 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -141,11 +141,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
* installing it:
*/
spin_lock_irq(&pidmap_lock);
- if (map->page)
- kfree(page);
- else
+ if (!map->page) {
map->page = page;
+ page = NULL;
+ }
spin_unlock_irq(&pidmap_lock);
+ kfree(page);
if (unlikely(!map->page))
break;
}
@@ -268,12 +269,11 @@ struct pid *alloc_pid(struct pid_namespace *ns)
for (type = 0; type < PIDTYPE_MAX; ++type)
INIT_HLIST_HEAD(&pid->tasks[type]);
+ upid = pid->numbers + ns->level;
spin_lock_irq(&pidmap_lock);
- for (i = ns->level; i >= 0; i--) {
- upid = &pid->numbers[i];
+ for ( ; upid >= pid->numbers; --upid)
hlist_add_head_rcu(&upid->pid_chain,
&pid_hash[pid_hashfn(upid->nr, upid->ns)]);
- }
spin_unlock_irq(&pidmap_lock);
out:
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 5187136fe1d..218e5af9015 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -6,7 +6,7 @@
#include <linux/vt_kern.h>
#include <linux/kbd_kern.h>
-#include <linux/console.h>
+#include <linux/vt.h>
#include <linux/module.h>
#include "power.h"
@@ -21,8 +21,7 @@ int pm_prepare_console(void)
if (orig_fgconsole < 0)
return 1;
- orig_kmsg = kmsg_redirect;
- kmsg_redirect = SUSPEND_CONSOLE;
+ orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
return 0;
}
@@ -30,7 +29,7 @@ void pm_restore_console(void)
{
if (orig_fgconsole >= 0) {
vt_move_to_console(orig_fgconsole, 0);
- kmsg_redirect = orig_kmsg;
+ vt_kmsg_redirect(orig_kmsg);
}
}
#endif
diff --git a/kernel/printk.c b/kernel/printk.c
index b5ac4d99c66..17463ca2e22 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -34,6 +34,7 @@
#include <linux/syscalls.h>
#include <linux/kexec.h>
#include <linux/ratelimit.h>
+#include <linux/kmsg_dump.h>
#include <asm/uaccess.h>
@@ -1405,4 +1406,122 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies,
return false;
}
EXPORT_SYMBOL(printk_timed_ratelimit);
+
+static DEFINE_SPINLOCK(dump_list_lock);
+static LIST_HEAD(dump_list);
+
+/**
+ * kmsg_dump_register - register a kernel log dumper.
+ * @dumper: pointer to the kmsg_dumper structure
+ *
+ * Adds a kernel log dumper to the system. The dump callback in the
+ * structure will be called when the kernel oopses or panics and must be
+ * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
+ */
+int kmsg_dump_register(struct kmsg_dumper *dumper)
+{
+ unsigned long flags;
+ int err = -EBUSY;
+
+ /* The dump callback needs to be set */
+ if (!dumper->dump)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dump_list_lock, flags);
+ /* Don't allow registering multiple times */
+ if (!dumper->registered) {
+ dumper->registered = 1;
+ list_add_tail(&dumper->list, &dump_list);
+ err = 0;
+ }
+ spin_unlock_irqrestore(&dump_list_lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_register);
+
+/**
+ * kmsg_dump_unregister - unregister a kmsg dumper.
+ * @dumper: pointer to the kmsg_dumper structure
+ *
+ * Removes a dump device from the system. Returns zero on success and
+ * %-EINVAL otherwise.
+ */
+int kmsg_dump_unregister(struct kmsg_dumper *dumper)
+{
+ unsigned long flags;
+ int err = -EINVAL;
+
+ spin_lock_irqsave(&dump_list_lock, flags);
+ if (dumper->registered) {
+ dumper->registered = 0;
+ list_del(&dumper->list);
+ err = 0;
+ }
+ spin_unlock_irqrestore(&dump_list_lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
+
+static const char const *kmsg_reasons[] = {
+ [KMSG_DUMP_OOPS] = "oops",
+ [KMSG_DUMP_PANIC] = "panic",
+};
+
+static const char *kmsg_to_str(enum kmsg_dump_reason reason)
+{
+ if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0)
+ return "unknown";
+
+ return kmsg_reasons[reason];
+}
+
+/**
+ * kmsg_dump - dump kernel log to kernel message dumpers.
+ * @reason: the reason (oops, panic etc) for dumping
+ *
+ * Iterate through each of the dump devices and call the oops/panic
+ * callbacks with the log buffer.
+ */
+void kmsg_dump(enum kmsg_dump_reason reason)
+{
+ unsigned long end;
+ unsigned chars;
+ struct kmsg_dumper *dumper;
+ const char *s1, *s2;
+ unsigned long l1, l2;
+ unsigned long flags;
+
+ /* Theoretically, the log could move on after we do this, but
+ there's not a lot we can do about that. The new messages
+ will overwrite the start of what we dump. */
+ spin_lock_irqsave(&logbuf_lock, flags);
+ end = log_end & LOG_BUF_MASK;
+ chars = logged_chars;
+ spin_unlock_irqrestore(&logbuf_lock, flags);
+
+ if (logged_chars > end) {
+ s1 = log_buf + log_buf_len - logged_chars + end;
+ l1 = logged_chars - end;
+
+ s2 = log_buf;
+ l2 = end;
+ } else {
+ s1 = "";
+ l1 = 0;
+
+ s2 = log_buf + end - logged_chars;
+ l2 = logged_chars;
+ }
+
+ if (!spin_trylock_irqsave(&dump_list_lock, flags)) {
+ printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n",
+ kmsg_to_str(reason));
+ return;
+ }
+ list_for_each_entry(dumper, &dump_list, list)
+ dumper->dump(dumper, reason, s1, l1, s2, l2);
+ spin_unlock_irqrestore(&dump_list_lock, flags);
+}
#endif
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index a621a67ef4e..9bb52177af0 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_batch)[completed];
+ __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
preempt_enable();
cur_ops->readunlock(idx);
}
@@ -818,13 +818,13 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_count)[pipe_count];
+ __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- ++__get_cpu_var(rcu_torture_batch)[completed];
+ __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
preempt_enable();
cur_ops->readunlock(idx);
schedule();
diff --git a/kernel/relay.c b/kernel/relay.c
index 760c26209a3..c705a41b4ba 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1198,7 +1198,7 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
relay_consume_bytes(rbuf, buf->private);
}
-static struct pipe_buf_operations relay_pipe_buf_ops = {
+static const struct pipe_buf_operations relay_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
diff --git a/kernel/resource.c b/kernel/resource.c
index fb11a58b959..af96c1e4b54 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -308,35 +308,37 @@ static int find_resource(struct resource *root, struct resource *new,
void *alignf_data)
{
struct resource *this = root->child;
+ struct resource tmp = *new;
- new->start = root->start;
+ tmp.start = root->start;
/*
* Skip past an allocated resource that starts at 0, since the assignment
- * of this->start - 1 to new->end below would cause an underflow.
+ * of this->start - 1 to tmp->end below would cause an underflow.
*/
if (this && this->start == 0) {
- new->start = this->end + 1;
+ tmp.start = this->end + 1;
this = this->sibling;
}
for(;;) {
if (this)
- new->end = this->start - 1;
+ tmp.end = this->start - 1;
else
- new->end = root->end;
- if (new->start < min)
- new->start = min;
- if (new->end > max)
- new->end = max;
- new->start = ALIGN(new->start, align);
+ tmp.end = root->end;
+ if (tmp.start < min)
+ tmp.start = min;
+ if (tmp.end > max)
+ tmp.end = max;
+ tmp.start = ALIGN(tmp.start, align);
if (alignf)
- alignf(alignf_data, new, size, align);
- if (new->start < new->end && new->end - new->start >= size - 1) {
- new->end = new->start + size - 1;
+ alignf(alignf_data, &tmp, size, align);
+ if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
+ new->start = tmp.start;
+ new->end = tmp.start + size - 1;
return 0;
}
if (!this)
break;
- new->start = this->end + 1;
+ tmp.start = this->end + 1;
this = this->sibling;
}
return -EBUSY;
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 5fcb4fe645e..ddabb54bb5c 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -37,8 +37,8 @@ do { \
if (rt_trace_on) { \
rt_trace_on = 0; \
console_verbose(); \
- if (spin_is_locked(&current->pi_lock)) \
- spin_unlock(&current->pi_lock); \
+ if (raw_spin_is_locked(&current->pi_lock)) \
+ raw_spin_unlock(&current->pi_lock); \
} \
} while (0)
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 29bd4baf9e7..a9604815786 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
{
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
/*
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/*
* Task can not go away as we did a get_task() before !
*/
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
/*
@@ -231,8 +231,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto out_unlock_pi;
lock = waiter->lock;
- if (!spin_trylock(&lock->wait_lock)) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ if (!raw_spin_trylock(&lock->wait_lock)) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
goto retry;
}
@@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* Deadlock detection */
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
ret = deadlock_detect ? -EDEADLK : 0;
goto out_unlock_pi;
}
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
plist_add(&waiter->list_entry, &lock->wait_list);
/* Release the task */
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
put_task_struct(task);
/* Grab the next task */
task = rt_mutex_owner(lock);
get_task_struct(task);
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
/* Boost the owner */
@@ -277,10 +277,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
__rt_mutex_adjust_prio(task);
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
top_waiter = rt_mutex_top_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
goto again;
out_unlock_pi:
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
out_put_task:
put_task_struct(task);
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
if (pendowner == task)
return 1;
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
if (task->prio >= pendowner->prio) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 0;
}
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
* priority.
*/
if (likely(!rt_mutex_has_waiters(lock))) {
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 1;
}
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
next = rt_mutex_top_waiter(lock);
plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
__rt_mutex_adjust_prio(pendowner);
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
/*
* We are going to steal the lock and a waiter was
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
* might be task:
*/
if (likely(next->task != task)) {
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
plist_add(&next->pi_list_entry, &task->pi_waiters);
__rt_mutex_adjust_prio(task);
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
return 1;
}
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
unsigned long flags;
int chain_walk = 0, res;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
task->pi_blocked_on = waiter;
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
__rt_mutex_adjust_prio(owner);
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
chain_walk = 1;
@@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
*/
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
task);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
return res;
}
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
struct task_struct *pendowner;
unsigned long flags;
- spin_lock_irqsave(&current->pi_lock, flags);
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
waiter = rt_mutex_top_waiter(lock);
plist_del(&waiter->list_entry, &lock->wait_list);
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
- spin_unlock_irqrestore(&current->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
/*
* Clear the pi_blocked_on variable and enqueue a possible
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
* waiter with higher priority than pending-owner->normal_prio
* is blocked on the unboosted (pending) owner.
*/
- spin_lock_irqsave(&pendowner->pi_lock, flags);
+ raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
WARN_ON(!pendowner->pi_blocked_on);
WARN_ON(pendowner->pi_blocked_on != waiter);
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
next = rt_mutex_top_waiter(lock);
plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
}
- spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
wake_up_process(pendowner);
}
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock,
unsigned long flags;
int chain_walk = 0;
- spin_lock_irqsave(&current->pi_lock, flags);
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
plist_del(&waiter->list_entry, &lock->wait_list);
waiter->task = NULL;
current->pi_blocked_on = NULL;
- spin_unlock_irqrestore(&current->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
if (first && owner != current) {
- spin_lock_irqsave(&owner->pi_lock, flags);
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock,
if (owner->pi_blocked_on)
chain_walk = 1;
- spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
}
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
@@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mutex *lock,
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(owner);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
}
/*
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task)
struct rt_mutex_waiter *waiter;
unsigned long flags;
- spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
if (!waiter || waiter->list_entry.prio == task->prio) {
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
- spin_unlock_irqrestore(&task->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* gets dropped in rt_mutex_adjust_prio_chain()! */
get_task_struct(task);
@@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
break;
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
if (waiter->task)
schedule_rt_mutex(lock);
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
set_current_state(state);
}
@@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock)) {
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return 0;
}
@@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/* Remove pending timer: */
if (unlikely(timeout))
@@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
{
int ret = 0;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
if (likely(rt_mutex_owner(lock) != current)) {
@@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
fixup_rt_mutex_waiters(lock);
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return ret;
}
@@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
static void __sched
rt_mutex_slowunlock(struct rt_mutex *lock)
{
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
debug_rt_mutex_unlock(lock);
@@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
if (!rt_mutex_has_waiters(lock)) {
lock->owner = NULL;
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
return;
}
wakeup_next_waiter(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/* Undo pi boosting if necessary: */
rt_mutex_adjust_prio(current);
@@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
- spin_lock_init(&lock->wait_lock);
- plist_head_init(&lock->wait_list, &lock->wait_lock);
+ raw_spin_lock_init(&lock->wait_lock);
+ plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
debug_rt_mutex_init(lock, name);
}
@@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
{
int ret;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
mark_rt_mutex_waiters(lock);
@@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
/* We got the lock for task. */
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task, 0);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
rt_mutex_deadlock_account_lock(lock, task);
return 1;
}
@@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
*/
ret = 0;
}
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
@@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
{
int ret;
- spin_lock(&lock->wait_lock);
+ raw_spin_lock(&lock->wait_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
*/
fixup_rt_mutex_waiters(lock);
- spin_unlock(&lock->wait_lock);
+ raw_spin_unlock(&lock->wait_lock);
/*
* Readjust priority, when we did not get the lock. We might have been
diff --git a/kernel/sched.c b/kernel/sched.c
index e7f2cfa6a25..87f1f47beff 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -141,7 +141,7 @@ struct rt_prio_array {
struct rt_bandwidth {
/* nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
u64 rt_runtime;
struct hrtimer rt_period_timer;
@@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
- spin_lock_init(&rt_b->rt_runtime_lock);
+ raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
if (hrtimer_active(&rt_b->rt_period_timer))
return;
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
unsigned long delta;
ktime_t soft, hard;
@@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
HRTIMER_MODE_ABS_PINNED, 0);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
#ifdef CONFIG_RT_GROUP_SCHED
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
#endif /* CONFIG_RT_GROUP_SCHED */
#else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
@@ -470,7 +470,7 @@ struct rt_rq {
u64 rt_time;
u64 rt_runtime;
/* Nests inside the rq lock: */
- spinlock_t rt_runtime_lock;
+ raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
@@ -525,7 +525,7 @@ static struct root_domain def_root_domain;
*/
struct rq {
/* runqueue lock: */
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +685,7 @@ inline void update_rq_clock(struct rq *rq)
*/
int runqueue_is_locked(int cpu)
{
- return spin_is_locked(&cpu_rq(cpu)->lock);
+ return raw_spin_is_locked(&cpu_rq(cpu)->lock);
}
/*
@@ -814,6 +814,7 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
* default: 0.25ms
*/
unsigned int sysctl_sched_shares_ratelimit = 250000;
+unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
/*
* Inject some fuzzyness into changing the per-cpu group shares
@@ -892,7 +893,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
*/
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
}
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -916,9 +917,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
next->oncpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
#else
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
#endif
}
@@ -948,10 +949,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
{
for (;;) {
struct rq *rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
}
@@ -968,10 +969,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
for (;;) {
local_irq_save(*flags);
rq = task_rq(p);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (likely(rq == task_rq(p)))
return rq;
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
}
@@ -980,19 +981,19 @@ void task_rq_unlock_wait(struct task_struct *p)
struct rq *rq = task_rq(p);
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
- spin_unlock_wait(&rq->lock);
+ raw_spin_unlock_wait(&rq->lock);
}
static void __task_rq_unlock(struct rq *rq)
__releases(rq->lock)
{
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
__releases(rq->lock)
{
- spin_unlock_irqrestore(&rq->lock, *flags);
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
}
/*
@@ -1005,7 +1006,7 @@ static struct rq *this_rq_lock(void)
local_irq_disable();
rq = this_rq();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
return rq;
}
@@ -1052,10 +1053,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
return HRTIMER_NORESTART;
}
@@ -1068,10 +1069,10 @@ static void __hrtick_start(void *arg)
{
struct rq *rq = arg;
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
hrtimer_restart(&rq->hrtick_timer);
rq->hrtick_csd_pending = 0;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
/*
@@ -1178,7 +1179,7 @@ static void resched_task(struct task_struct *p)
{
int cpu;
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
if (test_tsk_need_resched(p))
return;
@@ -1200,10 +1201,10 @@ static void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!spin_trylock_irqsave(&rq->lock, flags))
+ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
return;
resched_task(cpu_curr(cpu));
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
#ifdef CONFIG_NO_HZ
@@ -1272,7 +1273,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
#else /* !CONFIG_SMP */
static void resched_task(struct task_struct *p)
{
- assert_spin_locked(&task_rq(p)->lock);
+ assert_raw_spin_locked(&task_rq(p)->lock);
set_tsk_need_resched(p);
}
@@ -1599,11 +1600,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
__set_se_shares(tg->se[cpu], shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
}
@@ -1614,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
*/
static int tg_shares_up(struct task_group *tg, void *data)
{
- unsigned long weight, rq_weight = 0, shares = 0;
+ unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
unsigned long *usd_rq_weight;
struct sched_domain *sd = data;
unsigned long flags;
@@ -1630,6 +1631,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
weight = tg->cfs_rq[i]->load.weight;
usd_rq_weight[i] = weight;
+ rq_weight += weight;
/*
* If there are currently no tasks on the cpu pretend there
* is one of average load so that when a new task gets to
@@ -1638,10 +1640,13 @@ static int tg_shares_up(struct task_group *tg, void *data)
if (!weight)
weight = NICE_0_LOAD;
- rq_weight += weight;
+ sum_weight += weight;
shares += tg->cfs_rq[i]->shares;
}
+ if (!rq_weight)
+ rq_weight = sum_weight;
+
if ((!shares && rq_weight) || shares > tg->shares)
shares = tg->shares;
@@ -1701,9 +1706,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
if (root_task_group_empty())
return;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
update_shares(sd);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
}
static void update_h_load(long cpu)
@@ -1743,7 +1748,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);
return 1;
@@ -1764,14 +1769,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
int ret = 0;
- if (unlikely(!spin_trylock(&busiest->lock))) {
+ if (unlikely(!raw_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
- spin_unlock(&this_rq->lock);
- spin_lock(&busiest->lock);
- spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_unlock(&this_rq->lock);
+ raw_spin_lock(&busiest->lock);
+ raw_spin_lock_nested(&this_rq->lock,
+ SINGLE_DEPTH_NESTING);
ret = 1;
} else
- spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock_nested(&busiest->lock,
+ SINGLE_DEPTH_NESTING);
}
return ret;
}
@@ -1785,7 +1792,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
BUG_ON(1);
}
@@ -1795,7 +1802,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- spin_unlock(&busiest->lock);
+ raw_spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
#endif
@@ -1810,6 +1817,22 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
#endif
static void calc_load_account_active(struct rq *this_rq);
+static void update_sysctl(void);
+static int get_update_sysctl_factor(void);
+
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+ set_task_rq(p, cpu);
+#ifdef CONFIG_SMP
+ /*
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+ * successfuly executed on another CPU. We must ensure that updates of
+ * per-task data have been completed by this moment.
+ */
+ smp_wmb();
+ task_thread_info(p)->cpu = cpu;
+#endif
+}
#include "sched_stats.h"
#include "sched_idletask.c"
@@ -1967,20 +1990,6 @@ inline int task_curr(const struct task_struct *p)
return cpu_curr(task_cpu(p)) == p;
}
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
- set_task_rq(p, cpu);
-#ifdef CONFIG_SMP
- /*
- * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
- * successfuly executed on another CPU. We must ensure that updates of
- * per-task data have been completed by this moment.
- */
- smp_wmb();
- task_thread_info(p)->cpu = cpu;
-#endif
-}
-
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio, int running)
@@ -1993,39 +2002,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p->sched_class->prio_changed(rq, p, oldprio, running);
}
-/**
- * kthread_bind - bind a just-created kthread to a cpu.
- * @p: thread created by kthread_create().
- * @cpu: cpu (might not be online, must be possible) for @k to run on.
- *
- * Description: This function is equivalent to set_cpus_allowed(),
- * except that @cpu doesn't need to be online, and the thread must be
- * stopped (i.e., just returned from kthread_create()).
- *
- * Function lives here instead of kthread.c because it messes with
- * scheduler internals which require locking.
- */
-void kthread_bind(struct task_struct *p, unsigned int cpu)
-{
- struct rq *rq = cpu_rq(cpu);
- unsigned long flags;
-
- /* Must have done schedule() in kthread() before we set_task_cpu */
- if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
- WARN_ON(1);
- return;
- }
-
- spin_lock_irqsave(&rq->lock, flags);
- update_rq_clock(rq);
- set_task_cpu(p, cpu);
- p->cpus_allowed = cpumask_of_cpu(cpu);
- p->rt.nr_cpus_allowed = 1;
- p->flags |= PF_THREAD_BOUND;
- spin_unlock_irqrestore(&rq->lock, flags);
-}
-EXPORT_SYMBOL(kthread_bind);
-
#ifdef CONFIG_SMP
/*
* Is this task likely cache-hot:
@@ -2035,6 +2011,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
{
s64 delta;
+ if (p->sched_class != &fair_sched_class)
+ return 0;
+
/*
* Buddy candidates are cache hot:
*/
@@ -2043,9 +2022,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
&p->se == cfs_rq_of(&p->se)->last))
return 1;
- if (p->sched_class != &fair_sched_class)
- return 0;
-
if (sysctl_sched_migration_cost == -1)
return 1;
if (sysctl_sched_migration_cost == 0)
@@ -2056,38 +2032,24 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
return delta < (s64)sysctl_sched_migration_cost;
}
-
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
- int old_cpu = task_cpu(p);
- struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
- struct cfs_rq *old_cfsrq = task_cfs_rq(p),
- *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
- u64 clock_offset;
-
- clock_offset = old_rq->clock - new_rq->clock;
+#ifdef CONFIG_SCHED_DEBUG
+ /*
+ * We should never call set_task_cpu() on a blocked task,
+ * ttwu() will sort out the placement.
+ */
+ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
+ !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
+#endif
trace_sched_migrate_task(p, new_cpu);
-#ifdef CONFIG_SCHEDSTATS
- if (p->se.wait_start)
- p->se.wait_start -= clock_offset;
- if (p->se.sleep_start)
- p->se.sleep_start -= clock_offset;
- if (p->se.block_start)
- p->se.block_start -= clock_offset;
-#endif
- if (old_cpu != new_cpu) {
- p->se.nr_migrations++;
-#ifdef CONFIG_SCHEDSTATS
- if (task_hot(p, old_rq->clock, NULL))
- schedstat_inc(p, se.nr_forced2_migrations);
-#endif
- perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
- 1, 1, NULL, 0);
- }
- p->se.vruntime -= old_cfsrq->min_vruntime -
- new_cfsrq->min_vruntime;
+ if (task_cpu(p) == new_cpu)
+ return;
+
+ p->se.nr_migrations++;
+ perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
__set_task_cpu(p, new_cpu);
}
@@ -2112,13 +2074,10 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
/*
* If the task is not on a runqueue (and not running), then
- * it is sufficient to simply update the task's cpu field.
+ * the next wake-up will properly place the task.
*/
- if (!p->se.on_rq && !task_running(rq, p)) {
- update_rq_clock(rq);
- set_task_cpu(p, dest_cpu);
+ if (!p->se.on_rq && !task_running(rq, p))
return 0;
- }
init_completion(&req->done);
req->task = p;
@@ -2323,6 +2282,77 @@ void task_oncpu_function_call(struct task_struct *p,
preempt_enable();
}
+#ifdef CONFIG_SMP
+static int select_fallback_rq(int cpu, struct task_struct *p)
+{
+ int dest_cpu;
+ const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
+
+ /* Look for allowed, online CPU in same node. */
+ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
+ return dest_cpu;
+
+ /* Any allowed, online CPU? */
+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
+ if (dest_cpu < nr_cpu_ids)
+ return dest_cpu;
+
+ /* No more Mr. Nice Guy. */
+ if (dest_cpu >= nr_cpu_ids) {
+ rcu_read_lock();
+ cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
+ rcu_read_unlock();
+ dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
+
+ /*
+ * Don't tell them about moving exiting tasks or
+ * kernel threads (both mm NULL), since they never
+ * leave kernel.
+ */
+ if (p->mm && printk_ratelimit()) {
+ printk(KERN_INFO "process %d (%s) no "
+ "longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, cpu);
+ }
+ }
+
+ return dest_cpu;
+}
+
+/*
+ * Called from:
+ *
+ * - fork, @p is stable because it isn't on the tasklist yet
+ *
+ * - exec, @p is unstable, retry loop
+ *
+ * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so
+ * we should be good.
+ */
+static inline
+int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
+{
+ int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+
+ /*
+ * In order not to call set_task_cpu() on a blocking task we need
+ * to rely on ttwu() to place the task on a valid ->cpus_allowed
+ * cpu.
+ *
+ * Since this is common to all placement strategies, this lives here.
+ *
+ * [ this allows ->select_task() to simply return task_cpu(p) and
+ * not worry about this generic constraint ]
+ */
+ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
+ !cpu_online(cpu)))
+ cpu = select_fallback_rq(task_cpu(p), p);
+
+ return cpu;
+}
+#endif
+
/***
* try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread
@@ -2374,17 +2404,18 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
p->state = TASK_WAKING;
- task_rq_unlock(rq, &flags);
- cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
- if (cpu != orig_cpu) {
- local_irq_save(flags);
- rq = cpu_rq(cpu);
- update_rq_clock(rq);
+ if (p->sched_class->task_waking)
+ p->sched_class->task_waking(rq, p);
+
+ __task_rq_unlock(rq);
+
+ cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+ if (cpu != orig_cpu)
set_task_cpu(p, cpu);
- local_irq_restore(flags);
- }
- rq = task_rq_lock(p, &flags);
+
+ rq = __task_rq_lock(p);
+ update_rq_clock(rq);
WARN_ON(p->state != TASK_WAKING);
cpu = task_cpu(p);
@@ -2440,8 +2471,8 @@ out_running:
p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
- if (p->sched_class->task_wake_up)
- p->sched_class->task_wake_up(rq, p);
+ if (p->sched_class->task_woken)
+ p->sched_class->task_woken(rq, p);
if (unlikely(rq->idle_stamp)) {
u64 delta = rq->clock - rq->idle_stamp;
@@ -2499,7 +2530,6 @@ static void __sched_fork(struct task_struct *p)
p->se.avg_overlap = 0;
p->se.start_runtime = 0;
p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
- p->se.avg_running = 0;
#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
@@ -2521,7 +2551,6 @@ static void __sched_fork(struct task_struct *p)
p->se.nr_failed_migrations_running = 0;
p->se.nr_failed_migrations_hot = 0;
p->se.nr_forced_migrations = 0;
- p->se.nr_forced2_migrations = 0;
p->se.nr_wakeups = 0;
p->se.nr_wakeups_sync = 0;
@@ -2542,14 +2571,6 @@ static void __sched_fork(struct task_struct *p)
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
#endif
-
- /*
- * We mark the process as running here, but have not actually
- * inserted it onto the runqueue yet. This guarantees that
- * nobody will actually run it, and a signal or other external
- * event cannot wake it up and insert it on the runqueue either.
- */
- p->state = TASK_RUNNING;
}
/*
@@ -2558,9 +2579,14 @@ static void __sched_fork(struct task_struct *p)
void sched_fork(struct task_struct *p, int clone_flags)
{
int cpu = get_cpu();
- unsigned long flags;
__sched_fork(p);
+ /*
+ * We mark the process as waking here. This guarantees that
+ * nobody will actually run it, and a signal or other external
+ * event cannot wake it up and insert it on the runqueue either.
+ */
+ p->state = TASK_WAKING;
/*
* Revert to default priority/policy on fork if requested.
@@ -2592,13 +2618,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;
+ if (p->sched_class->task_fork)
+ p->sched_class->task_fork(p);
+
#ifdef CONFIG_SMP
- cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
+ cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
#endif
- local_irq_save(flags);
- update_rq_clock(cpu_rq(cpu));
set_task_cpu(p, cpu);
- local_irq_restore(flags);
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
@@ -2629,24 +2655,15 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
struct rq *rq;
rq = task_rq_lock(p, &flags);
- BUG_ON(p->state != TASK_RUNNING);
+ BUG_ON(p->state != TASK_WAKING);
+ p->state = TASK_RUNNING;
update_rq_clock(rq);
-
- if (!p->sched_class->task_new || !current->se.on_rq) {
- activate_task(rq, p, 0);
- } else {
- /*
- * Let the scheduling class do new task startup
- * management (if any):
- */
- p->sched_class->task_new(rq, p);
- inc_nr_running(rq);
- }
+ activate_task(rq, p, 0);
trace_sched_wakeup_new(rq, p, 1);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
- if (p->sched_class->task_wake_up)
- p->sched_class->task_wake_up(rq, p);
+ if (p->sched_class->task_woken)
+ p->sched_class->task_woken(rq, p);
#endif
task_rq_unlock(rq, &flags);
}
@@ -2798,10 +2815,10 @@ static inline void post_schedule(struct rq *rq)
if (rq->post_schedule) {
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->post_schedule = 0;
}
@@ -3083,15 +3100,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
- spin_lock(&rq1->lock);
+ raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
- spin_lock(&rq1->lock);
- spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq1->lock);
+ raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
- spin_lock(&rq2->lock);
- spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+ raw_spin_lock(&rq2->lock);
+ raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
}
update_rq_clock(rq1);
@@ -3108,29 +3125,44 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
- spin_unlock(&rq1->lock);
+ raw_spin_unlock(&rq1->lock);
if (rq1 != rq2)
- spin_unlock(&rq2->lock);
+ raw_spin_unlock(&rq2->lock);
else
__release(rq2->lock);
}
/*
- * If dest_cpu is allowed for this process, migrate the task to it.
- * This is accomplished by forcing the cpu_allowed mask to only
- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
- * the cpu_allowed mask is restored.
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache footprint.
*/
-static void sched_migrate_task(struct task_struct *p, int dest_cpu)
+void sched_exec(void)
{
+ struct task_struct *p = current;
struct migration_req req;
+ int dest_cpu, this_cpu;
unsigned long flags;
struct rq *rq;
+again:
+ this_cpu = get_cpu();
+ dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
+ if (dest_cpu == this_cpu) {
+ put_cpu();
+ return;
+ }
+
rq = task_rq_lock(p, &flags);
+ put_cpu();
+
+ /*
+ * select_task_rq() can race against ->cpus_allowed
+ */
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
- || unlikely(!cpu_active(dest_cpu)))
- goto out;
+ || unlikely(!cpu_active(dest_cpu))) {
+ task_rq_unlock(rq, &flags);
+ goto again;
+ }
/* force the process onto the specified CPU */
if (migrate_task(p, dest_cpu, &req)) {
@@ -3145,24 +3177,10 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
return;
}
-out:
task_rq_unlock(rq, &flags);
}
/*
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
- */
-void sched_exec(void)
-{
- int new_cpu, this_cpu = get_cpu();
- new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
- put_cpu();
- if (new_cpu != this_cpu)
- sched_migrate_task(current, new_cpu);
-}
-
-/*
* pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked.
*/
@@ -3172,10 +3190,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
- /*
- * Note that idle threads have a prio of MAX_PRIO, for this test
- * to be always true for them.
- */
check_preempt_curr(this_rq, p, 0);
}
@@ -4134,7 +4148,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
unsigned long flags;
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
- cpumask_copy(cpus, cpu_online_mask);
+ cpumask_copy(cpus, cpu_active_mask);
/*
* When power savings policy is enabled for the parent domain, idle
@@ -4207,14 +4221,15 @@ redo:
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
- spin_lock_irqsave(&busiest->lock, flags);
+ raw_spin_lock_irqsave(&busiest->lock, flags);
/* don't kick the migration_thread, if the curr
* task on busiest cpu can't be moved to this_cpu
*/
if (!cpumask_test_cpu(this_cpu,
&busiest->curr->cpus_allowed)) {
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock,
+ flags);
all_pinned = 1;
goto out_one_pinned;
}
@@ -4224,7 +4239,7 @@ redo:
busiest->push_cpu = this_cpu;
active_balance = 1;
}
- spin_unlock_irqrestore(&busiest->lock, flags);
+ raw_spin_unlock_irqrestore(&busiest->lock, flags);
if (active_balance)
wake_up_process(busiest->migration_thread);
@@ -4297,7 +4312,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
int all_pinned = 0;
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
- cpumask_copy(cpus, cpu_online_mask);
+ cpumask_copy(cpus, cpu_active_mask);
/*
* When power savings policy is enabled for the parent domain, idle
@@ -4406,10 +4421,10 @@ redo:
/*
* Should not call ttwu while holding a rq->lock
*/
- spin_unlock(&this_rq->lock);
+ raw_spin_unlock(&this_rq->lock);
if (active_balance)
wake_up_process(busiest->migration_thread);
- spin_lock(&this_rq->lock);
+ raw_spin_lock(&this_rq->lock);
} else
sd->nr_balance_failed = 0;
@@ -4694,7 +4709,7 @@ int select_nohz_load_balancer(int stop_tick)
cpumask_set_cpu(cpu, nohz.cpu_mask);
/* time for ilb owner also to sleep */
- if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
+ if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) {
if (atomic_read(&nohz.load_balancer) == cpu)
atomic_set(&nohz.load_balancer, -1);
return 0;
@@ -5278,11 +5293,11 @@ void scheduler_tick(void)
sched_clock_tick();
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
update_rq_clock(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
perf_event_task_tick(curr, cpu);
@@ -5396,13 +5411,14 @@ static inline void schedule_debug(struct task_struct *prev)
#endif
}
-static void put_prev_task(struct rq *rq, struct task_struct *p)
+static void put_prev_task(struct rq *rq, struct task_struct *prev)
{
- u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
+ if (prev->state == TASK_RUNNING) {
+ u64 runtime = prev->se.sum_exec_runtime;
- update_avg(&p->se.avg_running, runtime);
+ runtime -= prev->se.prev_sum_exec_runtime;
+ runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
- if (p->state == TASK_RUNNING) {
/*
* In order to avoid avg_overlap growing stale when we are
* indeed overlapping and hence not getting put to sleep, grow
@@ -5412,12 +5428,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
* correlates to the amount of cache footprint a task can
* build up.
*/
- runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
- update_avg(&p->se.avg_overlap, runtime);
- } else {
- update_avg(&p->se.avg_running, 0);
+ update_avg(&prev->se.avg_overlap, runtime);
}
- p->sched_class->put_prev_task(rq, p);
+ prev->sched_class->put_prev_task(rq, prev);
}
/*
@@ -5478,7 +5491,7 @@ need_resched_nonpreemptible:
if (sched_feat(HRTICK))
hrtick_clear(rq);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
clear_tsk_need_resched(prev);
@@ -5514,7 +5527,7 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
@@ -5931,14 +5944,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
*/
bool try_wait_for_completion(struct completion *x)
{
+ unsigned long flags;
int ret = 1;
- spin_lock_irq(&x->wait.lock);
+ spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
- spin_unlock_irq(&x->wait.lock);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
@@ -5953,12 +5967,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
*/
bool completion_done(struct completion *x)
{
+ unsigned long flags;
int ret = 1;
- spin_lock_irq(&x->wait.lock);
+ spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
- spin_unlock_irq(&x->wait.lock);
+ spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(completion_done);
@@ -6343,7 +6358,7 @@ recheck:
* make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
*/
- spin_lock_irqsave(&p->pi_lock, flags);
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
/*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
@@ -6353,7 +6368,7 @@ recheck:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
update_rq_clock(rq);
@@ -6377,7 +6392,7 @@ recheck:
check_class_changed(rq, p, prev_class, oldprio, running);
}
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
rt_mutex_adjust_pi(p);
@@ -6477,7 +6492,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
return -EINVAL;
retval = -ESRCH;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
if (p) {
retval = security_task_getscheduler(p);
@@ -6485,7 +6500,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
retval = p->policy
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return retval;
}
@@ -6503,7 +6518,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
if (!param || pid < 0)
return -EINVAL;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
@@ -6514,7 +6529,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
goto out_unlock;
lp.sched_priority = p->rt_priority;
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
/*
* This one might sleep, we cannot do it with a spinlock held ...
@@ -6524,7 +6539,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
return retval;
out_unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return retval;
}
@@ -6535,22 +6550,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
int retval;
get_online_cpus();
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
put_online_cpus();
return -ESRCH;
}
- /*
- * It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
- * usage count and then drop tasklist_lock.
- */
+ /* Prevent p going away */
get_task_struct(p);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
@@ -6631,10 +6642,12 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
long sched_getaffinity(pid_t pid, struct cpumask *mask)
{
struct task_struct *p;
+ unsigned long flags;
+ struct rq *rq;
int retval;
get_online_cpus();
- read_lock(&tasklist_lock);
+ rcu_read_lock();
retval = -ESRCH;
p = find_process_by_pid(pid);
@@ -6645,10 +6658,12 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
if (retval)
goto out_unlock;
+ rq = task_rq_lock(p, &flags);
cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
+ task_rq_unlock(rq, &flags);
out_unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
put_online_cpus();
return retval;
@@ -6703,7 +6718,7 @@ SYSCALL_DEFINE0(sched_yield)
*/
__release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
- _raw_spin_unlock(&rq->lock);
+ do_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched();
schedule();
@@ -6883,6 +6898,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
{
struct task_struct *p;
unsigned int time_slice;
+ unsigned long flags;
+ struct rq *rq;
int retval;
struct timespec t;
@@ -6890,7 +6907,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
return -EINVAL;
retval = -ESRCH;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
if (!p)
goto out_unlock;
@@ -6899,15 +6916,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
if (retval)
goto out_unlock;
- time_slice = p->sched_class->get_rr_interval(p);
+ rq = task_rq_lock(p, &flags);
+ time_slice = p->sched_class->get_rr_interval(rq, p);
+ task_rq_unlock(rq, &flags);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
return retval;
out_unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return retval;
}
@@ -6995,12 +7014,12 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__sched_fork(idle);
+ idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
- idle->prio = idle->normal_prio = MAX_PRIO;
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
__set_task_cpu(idle, cpu);
@@ -7008,7 +7027,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
idle->oncpu = 1;
#endif
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
#if defined(CONFIG_PREEMPT)
@@ -7041,22 +7060,43 @@ cpumask_var_t nohz_cpu_mask;
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
-static inline void sched_init_granularity(void)
+static int get_update_sysctl_factor(void)
{
- unsigned int factor = 1 + ilog2(num_online_cpus());
- const unsigned long limit = 200000000;
+ unsigned int cpus = min_t(int, num_online_cpus(), 8);
+ unsigned int factor;
+
+ switch (sysctl_sched_tunable_scaling) {
+ case SCHED_TUNABLESCALING_NONE:
+ factor = 1;
+ break;
+ case SCHED_TUNABLESCALING_LINEAR:
+ factor = cpus;
+ break;
+ case SCHED_TUNABLESCALING_LOG:
+ default:
+ factor = 1 + ilog2(cpus);
+ break;
+ }
- sysctl_sched_min_granularity *= factor;
- if (sysctl_sched_min_granularity > limit)
- sysctl_sched_min_granularity = limit;
+ return factor;
+}
- sysctl_sched_latency *= factor;
- if (sysctl_sched_latency > limit)
- sysctl_sched_latency = limit;
+static void update_sysctl(void)
+{
+ unsigned int factor = get_update_sysctl_factor();
- sysctl_sched_wakeup_granularity *= factor;
+#define SET_SYSCTL(name) \
+ (sysctl_##name = (factor) * normalized_sysctl_##name)
+ SET_SYSCTL(sched_min_granularity);
+ SET_SYSCTL(sched_latency);
+ SET_SYSCTL(sched_wakeup_granularity);
+ SET_SYSCTL(sched_shares_ratelimit);
+#undef SET_SYSCTL
+}
- sysctl_sched_shares_ratelimit *= factor;
+static inline void sched_init_granularity(void)
+{
+ update_sysctl();
}
#ifdef CONFIG_SMP
@@ -7092,8 +7132,24 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
struct rq *rq;
int ret = 0;
+ /*
+ * Since we rely on wake-ups to migrate sleeping tasks, don't change
+ * the ->cpus_allowed mask from under waking tasks, which would be
+ * possible when we change rq->lock in ttwu(), so synchronize against
+ * TASK_WAKING to avoid that.
+ */
+again:
+ while (p->state == TASK_WAKING)
+ cpu_relax();
+
rq = task_rq_lock(p, &flags);
- if (!cpumask_intersects(new_mask, cpu_online_mask)) {
+
+ if (p->state == TASK_WAKING) {
+ task_rq_unlock(rq, &flags);
+ goto again;
+ }
+
+ if (!cpumask_intersects(new_mask, cpu_active_mask)) {
ret = -EINVAL;
goto out;
}
@@ -7115,7 +7171,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
- if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
+ if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
/* Need help from migration thread: drop lock and wait. */
struct task_struct *mt = rq->migration_thread;
@@ -7148,7 +7204,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
struct rq *rq_dest, *rq_src;
- int ret = 0, on_rq;
+ int ret = 0;
if (unlikely(!cpu_active(dest_cpu)))
return ret;
@@ -7164,12 +7220,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
goto fail;
- on_rq = p->se.on_rq;
- if (on_rq)
+ /*
+ * If we're not on a rq, the next wake-up will ensure we're
+ * placed properly.
+ */
+ if (p->se.on_rq) {
deactivate_task(rq_src, p, 0);
-
- set_task_cpu(p, dest_cpu);
- if (on_rq) {
+ set_task_cpu(p, dest_cpu);
activate_task(rq_dest, p, 0);
check_preempt_curr(rq_dest, p, 0);
}
@@ -7204,10 +7261,10 @@ static int migration_thread(void *data)
struct migration_req *req;
struct list_head *head;
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
if (cpu_is_offline(cpu)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
}
@@ -7219,7 +7276,7 @@ static int migration_thread(void *data)
head = &rq->migration_queue;
if (list_empty(head)) {
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
continue;
@@ -7228,14 +7285,14 @@ static int migration_thread(void *data)
list_del_init(head->next);
if (req->task != NULL) {
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
} else if (likely(cpu == (badcpu = smp_processor_id()))) {
req->dest_cpu = RCU_MIGRATION_GOT_QS;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
} else {
req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
}
local_irq_enable();
@@ -7265,37 +7322,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{
int dest_cpu;
- const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
again:
- /* Look for allowed, online CPU in same node. */
- for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
- goto move;
-
- /* Any allowed, online CPU? */
- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
- if (dest_cpu < nr_cpu_ids)
- goto move;
+ dest_cpu = select_fallback_rq(dead_cpu, p);
- /* No more Mr. Nice Guy. */
- if (dest_cpu >= nr_cpu_ids) {
- cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
- dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
-
- /*
- * Don't tell them about moving exiting tasks or
- * kernel threads (both mm NULL), since they never
- * leave kernel.
- */
- if (p->mm && printk_ratelimit()) {
- printk(KERN_INFO "process %d (%s) no "
- "longer affine to cpu%d\n",
- task_pid_nr(p), p->comm, dead_cpu);
- }
- }
-
-move:
/* It can have affinity changed while we were choosing. */
if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
goto again;
@@ -7310,7 +7340,7 @@ move:
*/
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
- struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
+ struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
unsigned long flags;
local_irq_save(flags);
@@ -7358,14 +7388,14 @@ void sched_idle_next(void)
* Strictly not necessary since rest of the CPUs are stopped by now
* and interrupts disabled on the current cpu.
*/
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
update_rq_clock(rq);
activate_task(rq, p, 0);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -7401,9 +7431,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
* that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
move_task_off_dead_cpu(dead_cpu, p);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
put_task_struct(p);
}
@@ -7563,7 +7593,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
static struct ctl_table_header *sd_sysctl_header;
static void register_sched_domain_sysctl(void)
{
- int i, cpu_num = num_online_cpus();
+ int i, cpu_num = num_possible_cpus();
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
char buf[32];
@@ -7573,7 +7603,7 @@ static void register_sched_domain_sysctl(void)
if (entry == NULL)
return;
- for_each_online_cpu(i) {
+ for_each_possible_cpu(i) {
snprintf(buf, 32, "cpu%d", i);
entry->procname = kstrdup(buf, GFP_KERNEL);
entry->mode = 0555;
@@ -7669,13 +7699,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#ifdef CONFIG_HOTPLUG_CPU
@@ -7700,14 +7730,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
put_task_struct(rq->migration_thread);
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
update_rq_clock(rq);
deactivate_task(rq, rq->idle, 0);
- rq->idle->static_prio = MAX_PRIO;
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq->idle->sched_class = &idle_sched_class;
migrate_dead_tasks(cpu);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
@@ -7717,30 +7746,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
* they didn't take sched_hotcpu_mutex. Just wake up
* the requestors.
*/
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
req = list_entry(rq->migration_queue.next,
struct migration_req, list);
list_del_init(&req->list);
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
complete(&req->done);
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
/* Update our root-domain */
rq = cpu_rq(cpu);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
break;
#endif
}
@@ -7970,7 +7999,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
struct root_domain *old_rd = NULL;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (rq->rd) {
old_rd = rq->rd;
@@ -7996,7 +8025,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
if (old_rd)
free_rootdomain(old_rd);
@@ -8282,14 +8311,14 @@ enum s_alloc {
*/
#ifdef CONFIG_SCHED_SMT
static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
static int
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *unused)
{
if (sg)
- *sg = &per_cpu(sched_group_cpus, cpu).sg;
+ *sg = &per_cpu(sched_groups, cpu).sg;
return cpu;
}
#endif /* CONFIG_SCHED_SMT */
@@ -9099,7 +9128,7 @@ match1:
if (doms_new == NULL) {
ndoms_cur = 0;
doms_new = &fallback_doms;
- cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
+ cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
}
@@ -9230,8 +9259,10 @@ static int update_sched_domains(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
partition_sched_domains(1, NULL, NULL);
return NOTIFY_OK;
@@ -9278,7 +9309,7 @@ void __init sched_init_smp(void)
#endif
get_online_cpus();
mutex_lock(&sched_domains_mutex);
- arch_init_sched_domains(cpu_online_mask);
+ arch_init_sched_domains(cpu_active_mask);
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
if (cpumask_empty(non_isolated_cpus))
cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -9351,13 +9382,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
#ifdef CONFIG_SMP
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+ plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
#endif
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
- spin_lock_init(&rt_rq->rt_runtime_lock);
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
#ifdef CONFIG_RT_GROUP_SCHED
rt_rq->rt_nr_boosted = 0;
@@ -9517,7 +9548,7 @@ void __init sched_init(void)
struct rq *rq;
rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ raw_spin_lock_init(&rq->lock);
rq->nr_running = 0;
rq->calc_load_active = 0;
rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9577,7 +9608,7 @@ void __init sched_init(void)
#elif defined CONFIG_USER_SCHED
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
init_tg_rt_entry(&init_task_group,
- &per_cpu(init_rt_rq, i),
+ &per_cpu(init_rt_rq_var, i),
&per_cpu(init_sched_rt_entity, i), i, 1,
root_task_group.rt_se[i]);
#endif
@@ -9615,7 +9646,7 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_RT_MUTEXES
- plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+ plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
#endif
/*
@@ -9659,7 +9690,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
- int nested = preempt_count() & ~PREEMPT_ACTIVE;
+ int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
}
@@ -9740,13 +9771,13 @@ void normalize_rt_tasks(void)
continue;
}
- spin_lock(&p->pi_lock);
+ raw_spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
- spin_unlock(&p->pi_lock);
+ raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock_irqrestore(&tasklist_lock, flags);
@@ -9842,13 +9873,15 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
se = kzalloc_node(sizeof(struct sched_entity),
GFP_KERNEL, cpu_to_node(i));
if (!se)
- goto err;
+ goto err_free_rq;
init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
}
return 1;
+ err_free_rq:
+ kfree(cfs_rq);
err:
return 0;
}
@@ -9930,13 +9963,15 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
GFP_KERNEL, cpu_to_node(i));
if (!rt_se)
- goto err;
+ goto err_free_rq;
init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
}
return 1;
+ err_free_rq:
+ kfree(rt_rq);
err:
return 0;
}
@@ -10070,7 +10105,7 @@ void sched_move_task(struct task_struct *tsk)
#ifdef CONFIG_FAIR_GROUP_SCHED
if (tsk->sched_class->moved_group)
- tsk->sched_class->moved_group(tsk);
+ tsk->sched_class->moved_group(tsk, on_rq);
#endif
if (unlikely(running))
@@ -10105,9 +10140,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
struct rq *rq = cfs_rq->rq;
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__set_se_shares(se, shares);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static DEFINE_MUTEX(shares_mutex);
@@ -10292,18 +10327,18 @@ static int tg_set_bandwidth(struct task_group *tg,
if (err)
goto unlock;
- spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+ raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
read_unlock(&tasklist_lock);
mutex_unlock(&rt_constraints_mutex);
@@ -10408,15 +10443,15 @@ static int sched_rt_global_constraints(void)
if (sysctl_sched_rt_runtime == 0)
return -EBUSY;
- spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
- spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+ raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
@@ -10707,9 +10742,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
@@ -10725,9 +10760,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
- spin_lock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
- spin_unlock_irq(&cpu_rq(cpu)->lock);
+ raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
@@ -10961,9 +10996,9 @@ void synchronize_sched_expedited(void)
init_completion(&req->done);
req->task = NULL;
req->dest_cpu = RCU_MIGRATION_NEED_QS;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
list_add(&req->list, &rq->migration_queue);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
wake_up_process(rq->migration_thread);
}
for_each_online_cpu(cpu) {
@@ -10971,11 +11006,11 @@ void synchronize_sched_expedited(void)
req = &per_cpu(rcu_migration_req, cpu);
rq = cpu_rq(cpu);
wait_for_completion(&req->done);
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
need_full_sync = 1;
req->dest_cpu = RCU_MIGRATION_IDLE;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
synchronize_sched_expedited_count++;
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 479ce5682d7..5b496132c28 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -236,6 +236,18 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
+unsigned long long cpu_clock(int cpu)
+{
+ unsigned long long clock;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ clock = sched_clock_cpu(cpu);
+ local_irq_restore(flags);
+
+ return clock;
+}
+
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void sched_clock_init(void)
@@ -251,17 +263,12 @@ u64 sched_clock_cpu(int cpu)
return sched_clock();
}
-#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
unsigned long long cpu_clock(int cpu)
{
- unsigned long long clock;
- unsigned long flags;
+ return sched_clock_cpu(cpu);
+}
- local_irq_save(flags);
- clock = sched_clock_cpu(cpu);
- local_irq_restore(flags);
+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
- return clock;
-}
EXPORT_SYMBOL_GPL(cpu_clock);
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 0f052fc674d..597b33099df 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -135,26 +135,26 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
if (likely(newpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
- spin_lock_irqsave(&vec->lock, flags);
+ raw_spin_lock_irqsave(&vec->lock, flags);
cpumask_set_cpu(cpu, vec->mask);
vec->count++;
if (vec->count == 1)
set_bit(newpri, cp->pri_active);
- spin_unlock_irqrestore(&vec->lock, flags);
+ raw_spin_unlock_irqrestore(&vec->lock, flags);
}
if (likely(oldpri != CPUPRI_INVALID)) {
struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
- spin_lock_irqsave(&vec->lock, flags);
+ raw_spin_lock_irqsave(&vec->lock, flags);
vec->count--;
if (!vec->count)
clear_bit(oldpri, cp->pri_active);
cpumask_clear_cpu(cpu, vec->mask);
- spin_unlock_irqrestore(&vec->lock, flags);
+ raw_spin_unlock_irqrestore(&vec->lock, flags);
}
*currpri = newpri;
@@ -180,7 +180,7 @@ int cpupri_init(struct cpupri *cp, bool bootmem)
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[i];
- spin_lock_init(&vec->lock);
+ raw_spin_lock_init(&vec->lock);
vec->count = 0;
if (!zalloc_cpumask_var(&vec->mask, gfp))
goto cleanup;
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 9a7e859b8fb..7cb5bb6b95b 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -12,7 +12,7 @@
/* values 2-101 are RT priorities 0-99 */
struct cpupri_vec {
- spinlock_t lock;
+ raw_spinlock_t lock;
int count;
cpumask_var_t mask;
};
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 6988cf08f70..67f95aada4b 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
SPLIT_NS(cfs_rq->exec_clock));
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
@@ -309,6 +309,12 @@ static void print_cpu(struct seq_file *m, int cpu)
print_rq(m, rq, cpu);
}
+static const char *sched_tunable_scaling_names[] = {
+ "none",
+ "logaritmic",
+ "linear"
+};
+
static int sched_debug_show(struct seq_file *m, void *v)
{
u64 now = ktime_to_ns(ktime_get());
@@ -334,6 +340,10 @@ static int sched_debug_show(struct seq_file *m, void *v)
#undef PN
#undef P
+ SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
+ sysctl_sched_tunable_scaling,
+ sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
+
for_each_online_cpu(cpu)
print_cpu(m, cpu);
@@ -399,7 +409,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.sum_exec_runtime);
PN(se.avg_overlap);
PN(se.avg_wakeup);
- PN(se.avg_running);
nr_switches = p->nvcsw + p->nivcsw;
@@ -423,7 +432,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.nr_failed_migrations_running);
P(se.nr_failed_migrations_hot);
P(se.nr_forced_migrations);
- P(se.nr_forced2_migrations);
P(se.nr_wakeups);
P(se.nr_wakeups_sync);
P(se.nr_wakeups_migrate);
@@ -499,7 +507,6 @@ void proc_sched_set_task(struct task_struct *p)
p->se.nr_failed_migrations_running = 0;
p->se.nr_failed_migrations_hot = 0;
p->se.nr_forced_migrations = 0;
- p->se.nr_forced2_migrations = 0;
p->se.nr_wakeups = 0;
p->se.nr_wakeups_sync = 0;
p->se.nr_wakeups_migrate = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f61837ad336..42ac3c9f66f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -21,6 +21,7 @@
*/
#include <linux/latencytop.h>
+#include <linux/sched.h>
/*
* Targeted preemption latency for CPU-bound tasks:
@@ -35,12 +36,26 @@
* run vmstat and monitor the context-switches (cs) field)
*/
unsigned int sysctl_sched_latency = 5000000ULL;
+unsigned int normalized_sysctl_sched_latency = 5000000ULL;
+
+/*
+ * The initial- and re-scaling of tunables is configurable
+ * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ *
+ * Options are:
+ * SCHED_TUNABLESCALING_NONE - unscaled, always *1
+ * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
+ * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
+ */
+enum sched_tunable_scaling sysctl_sched_tunable_scaling
+ = SCHED_TUNABLESCALING_LOG;
/*
* Minimal preemption granularity for CPU-bound tasks:
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
unsigned int sysctl_sched_min_granularity = 1000000ULL;
+unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
@@ -70,6 +85,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
* have immediate wakeup/sleep latencies.
*/
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
+unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
@@ -383,11 +399,12 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
*/
#ifdef CONFIG_SCHED_DEBUG
-int sched_nr_latency_handler(struct ctl_table *table, int write,
+int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ int factor = get_update_sysctl_factor();
if (ret || !write)
return ret;
@@ -395,6 +412,14 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
sysctl_sched_min_granularity);
+#define WRT_SYSCTL(name) \
+ (normalized_sysctl_##name = sysctl_##name / (factor))
+ WRT_SYSCTL(sched_min_granularity);
+ WRT_SYSCTL(sched_latency);
+ WRT_SYSCTL(sched_wakeup_granularity);
+ WRT_SYSCTL(sched_shares_ratelimit);
+#undef WRT_SYSCTL
+
return 0;
}
#endif
@@ -485,6 +510,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq, exec_clock, delta_exec);
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
+
curr->vruntime += delta_exec_weighted;
update_min_vruntime(cfs_rq);
}
@@ -740,16 +766,26 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
se->vruntime = vruntime;
}
+#define ENQUEUE_WAKEUP 1
+#define ENQUEUE_MIGRATE 2
+
static void
-enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
+enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
/*
+ * Update the normalized vruntime before updating min_vruntime
+ * through callig update_curr().
+ */
+ if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
+ se->vruntime += cfs_rq->min_vruntime;
+
+ /*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
account_entity_enqueue(cfs_rq, se);
- if (wakeup) {
+ if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0);
enqueue_sleeper(cfs_rq, se);
}
@@ -803,6 +839,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
__dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se);
update_min_vruntime(cfs_rq);
+
+ /*
+ * Normalize the entity after updating the min_vruntime because the
+ * update can refer to the ->curr item and we need to reflect this
+ * movement in our normalized position.
+ */
+ if (!sleep)
+ se->vruntime -= cfs_rq->min_vruntime;
}
/*
@@ -1013,13 +1057,19 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
+ int flags = 0;
+
+ if (wakeup)
+ flags |= ENQUEUE_WAKEUP;
+ if (p->state == TASK_WAKING)
+ flags |= ENQUEUE_MIGRATE;
for_each_sched_entity(se) {
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
- enqueue_entity(cfs_rq, se, wakeup);
- wakeup = 1;
+ enqueue_entity(cfs_rq, se, flags);
+ flags = ENQUEUE_WAKEUP;
}
hrtick_update(rq);
@@ -1095,6 +1145,14 @@ static void yield_task_fair(struct rq *rq)
#ifdef CONFIG_SMP
+static void task_waking_fair(struct rq *rq, struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ se->vruntime -= cfs_rq->min_vruntime;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* effective_load() calculates the load change as seen from the root_task_group
@@ -1403,8 +1461,10 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
new_cpu = prev_cpu;
}
- rcu_read_lock();
for_each_domain(cpu, tmp) {
+ if (!(tmp->flags & SD_LOAD_BALANCE))
+ continue;
+
/*
* If power savings logic is enabled for a domain, see if we
* are not overloaded, if so, don't balance wider.
@@ -1484,10 +1544,8 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
update_shares(tmp);
}
- if (affine_sd && wake_affine(affine_sd, p, sync)) {
- new_cpu = cpu;
- goto out;
- }
+ if (affine_sd && wake_affine(affine_sd, p, sync))
+ return cpu;
while (sd) {
int load_idx = sd->forkexec_idx;
@@ -1528,8 +1586,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
/* while loop will break here if sd == NULL */
}
-out:
- rcu_read_unlock();
return new_cpu;
}
#endif /* CONFIG_SMP */
@@ -1651,12 +1707,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
int sync = wake_flags & WF_SYNC;
int scale = cfs_rq->nr_running >= sched_nr_latency;
- update_curr(cfs_rq);
-
- if (unlikely(rt_prio(p->prio))) {
- resched_task(curr);
- return;
- }
+ if (unlikely(rt_prio(p->prio)))
+ goto preempt;
if (unlikely(p->sched_class != &fair_sched_class))
return;
@@ -1682,50 +1734,44 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
/* Idle tasks are by definition preempted by everybody. */
- if (unlikely(curr->policy == SCHED_IDLE)) {
- resched_task(curr);
- return;
- }
+ if (unlikely(curr->policy == SCHED_IDLE))
+ goto preempt;
- if ((sched_feat(WAKEUP_SYNC) && sync) ||
- (sched_feat(WAKEUP_OVERLAP) &&
- (se->avg_overlap < sysctl_sched_migration_cost &&
- pse->avg_overlap < sysctl_sched_migration_cost))) {
- resched_task(curr);
- return;
- }
+ if (sched_feat(WAKEUP_SYNC) && sync)
+ goto preempt;
- if (sched_feat(WAKEUP_RUNNING)) {
- if (pse->avg_running < se->avg_running) {
- set_next_buddy(pse);
- resched_task(curr);
- return;
- }
- }
+ if (sched_feat(WAKEUP_OVERLAP) &&
+ se->avg_overlap < sysctl_sched_migration_cost &&
+ pse->avg_overlap < sysctl_sched_migration_cost)
+ goto preempt;
if (!sched_feat(WAKEUP_PREEMPT))
return;
+ update_curr(cfs_rq);
find_matching_se(&se, &pse);
-
BUG_ON(!pse);
+ if (wakeup_preempt_entity(se, pse) == 1)
+ goto preempt;
- if (wakeup_preempt_entity(se, pse) == 1) {
- resched_task(curr);
- /*
- * Only set the backward buddy when the current task is still
- * on the rq. This can happen when a wakeup gets interleaved
- * with schedule on the ->pre_schedule() or idle_balance()
- * point, either of which can * drop the rq lock.
- *
- * Also, during early boot the idle thread is in the fair class,
- * for obvious reasons its a bad idea to schedule back to it.
- */
- if (unlikely(!se->on_rq || curr == rq->idle))
- return;
- if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
- set_last_buddy(se);
- }
+ return;
+
+preempt:
+ resched_task(curr);
+ /*
+ * Only set the backward buddy when the current task is still
+ * on the rq. This can happen when a wakeup gets interleaved
+ * with schedule on the ->pre_schedule() or idle_balance()
+ * point, either of which can * drop the rq lock.
+ *
+ * Also, during early boot the idle thread is in the fair class,
+ * for obvious reasons its a bad idea to schedule back to it.
+ */
+ if (unlikely(!se->on_rq || curr == rq->idle))
+ return;
+
+ if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
+ set_last_buddy(se);
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1905,6 +1951,17 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
+
+static void rq_online_fair(struct rq *rq)
+{
+ update_sysctl();
+}
+
+static void rq_offline_fair(struct rq *rq)
+{
+ update_sysctl();
+}
+
#endif /* CONFIG_SMP */
/*
@@ -1922,28 +1979,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
}
/*
- * Share the fairness runtime between parent and child, thus the
- * total amount of pressure for CPU stays equal - new tasks
- * get a chance to run but frequent forkers are not allowed to
- * monopolize the CPU. Note: the parent runqueue is locked,
- * the child is not running yet.
+ * called on fork with the child task as argument from the parent's context
+ * - child not yet on the tasklist
+ * - preemption disabled
*/
-static void task_new_fair(struct rq *rq, struct task_struct *p)
+static void task_fork_fair(struct task_struct *p)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
+ struct cfs_rq *cfs_rq = task_cfs_rq(current);
struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
int this_cpu = smp_processor_id();
+ struct rq *rq = this_rq();
+ unsigned long flags;
- sched_info_queued(p);
+ raw_spin_lock_irqsave(&rq->lock, flags);
+
+ if (unlikely(task_cpu(p) != this_cpu))
+ __set_task_cpu(p, this_cpu);
update_curr(cfs_rq);
+
if (curr)
se->vruntime = curr->vruntime;
place_entity(cfs_rq, se, 1);
- /* 'curr' will be NULL if the child belongs to a different group */
- if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
- curr && entity_before(curr, se)) {
+ if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
@@ -1952,7 +2011,9 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
resched_task(rq->curr);
}
- enqueue_task_fair(rq, p, 0);
+ se->vruntime -= cfs_rq->min_vruntime;
+
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -2005,30 +2066,27 @@ static void set_curr_task_fair(struct rq *rq)
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void moved_group_fair(struct task_struct *p)
+static void moved_group_fair(struct task_struct *p, int on_rq)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
update_curr(cfs_rq);
- place_entity(cfs_rq, &p->se, 1);
+ if (!on_rq)
+ place_entity(cfs_rq, &p->se, 1);
}
#endif
-unsigned int get_rr_interval_fair(struct task_struct *task)
+unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
{
struct sched_entity *se = &task->se;
- unsigned long flags;
- struct rq *rq;
unsigned int rr_interval = 0;
/*
* Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
* idle runqueue:
*/
- rq = task_rq_lock(task, &flags);
if (rq->cfs.load.weight)
rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
- task_rq_unlock(rq, &flags);
return rr_interval;
}
@@ -2052,11 +2110,15 @@ static const struct sched_class fair_sched_class = {
.load_balance = load_balance_fair,
.move_one_task = move_one_task_fair,
+ .rq_online = rq_online_fair,
+ .rq_offline = rq_offline_fair,
+
+ .task_waking = task_waking_fair,
#endif
.set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,
- .task_new = task_new_fair,
+ .task_fork = task_fork_fair,
.prio_changed = prio_changed_fair,
.switched_to = switched_to_fair,
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 0d94083582c..d5059fd761d 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -54,11 +54,6 @@ SCHED_FEAT(WAKEUP_SYNC, 0)
SCHED_FEAT(WAKEUP_OVERLAP, 0)
/*
- * Wakeup preemption towards tasks that run short
- */
-SCHED_FEAT(WAKEUP_RUNNING, 0)
-
-/*
* Use the SYNC wakeup hint, pipes and the likes use this to indicate
* the remote end is likely to consume the data we just wrote, and
* therefore has cache benefit from being placed on the same cpu, see
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index b133a28fcde..5f93b570d38 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
- spin_unlock_irq(&rq->lock);
+ raw_spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
- spin_lock_irq(&rq->lock);
+ raw_spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
@@ -97,7 +97,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
check_preempt_curr(rq, p, 0);
}
-unsigned int get_rr_interval_idle(struct task_struct *task)
+unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
{
return 0;
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 5c5fef37841..f48328ac216 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
weight = cpumask_weight(rd->span);
- spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
@@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
if (iter == rt_rq)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ raw_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
@@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
rt_rq->rt_runtime += diff;
more = 1;
if (rt_rq->rt_runtime == rt_period) {
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
}
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
return more;
}
@@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq)
s64 want;
int i;
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
@@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq)
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
@@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq)
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
- spin_lock(&iter->rt_runtime_lock);
+ raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
@@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq)
iter->rt_runtime -= want;
want -= want;
}
- spin_unlock(&iter->rt_runtime_lock);
+ raw_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
@@ -445,8 +445,8 @@ balanced:
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq)
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__disable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static void __enable_runtime(struct rq *rq)
@@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq)
for_each_leaf_rt_rq(rt_rq, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- spin_lock(&rt_b->rt_runtime_lock);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_b->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
- spin_unlock(&rt_b->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq)
{
unsigned long flags;
- spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
__enable_runtime(rq);
- spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static int balance_runtime(struct rt_rq *rt_rq)
@@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
int more = 0;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
}
return more;
@@ -524,11 +524,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
- spin_lock(&rq->lock);
+ raw_spin_lock(&rq->lock);
if (rt_rq->rt_time) {
u64 runtime;
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
@@ -539,13 +539,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running)
idle = 0;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
- spin_unlock(&rq->lock);
+ raw_spin_unlock(&rq->lock);
}
return idle;
@@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq)
rt_rq = rt_rq_of_se(rt_se);
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
- spin_lock(&rt_rq->rt_runtime_lock);
+ raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
if (sched_rt_runtime_exceeded(rt_rq))
resched_task(curr);
- spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
}
@@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
task_running(rq, task) ||
!task->se.on_rq)) {
- spin_unlock(&lowest_rq->lock);
+ raw_spin_unlock(&lowest_rq->lock);
lowest_rq = NULL;
break;
}
@@ -1472,7 +1472,7 @@ static void post_schedule_rt(struct rq *rq)
* If we are not running and we are not going to reschedule soon, we should
* try to push tasks away now
*/
-static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
+static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
if (!task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
@@ -1721,7 +1721,7 @@ static void set_curr_task_rt(struct rq *rq)
dequeue_pushable_task(rq, p);
}
-unsigned int get_rr_interval_rt(struct task_struct *task)
+unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
* Time slice is 0 for SCHED_FIFO tasks
@@ -1753,7 +1753,7 @@ static const struct sched_class rt_sched_class = {
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
.post_schedule = post_schedule_rt,
- .task_wake_up = task_wake_up_rt,
+ .task_woken = task_woken_rt,
.switched_from = switched_from_rt,
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 6b982f2cf52..d09692b4037 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -218,13 +218,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
struct user_struct *user;
/*
- * We won't get problems with the target's UID changing under us
- * because changing it requires RCU be used, and if t != current, the
- * caller must be holding the RCU readlock (by way of a spinlock) and
- * we use RCU protection here
+ * Protect access to @t credentials. This can go away when all
+ * callers hold rcu read lock.
*/
+ rcu_read_lock();
user = get_uid(__task_cred(t)->user);
atomic_inc(&user->sigpending);
+ rcu_read_unlock();
if (override_rlimit ||
atomic_read(&user->sigpending) <=
@@ -423,7 +423,7 @@ still_pending:
*/
info->si_signo = sig;
info->si_errno = 0;
- info->si_code = 0;
+ info->si_code = SI_USER;
info->si_pid = 0;
info->si_uid = 0;
}
@@ -607,6 +607,17 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
return 1;
}
+static inline int is_si_special(const struct siginfo *info)
+{
+ return info <= SEND_SIG_FORCED;
+}
+
+static inline bool si_fromuser(const struct siginfo *info)
+{
+ return info == SEND_SIG_NOINFO ||
+ (!is_si_special(info) && SI_FROMUSER(info));
+}
+
/*
* Bad permissions for sending the signal
* - the caller must hold at least the RCU read lock
@@ -621,7 +632,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
if (!valid_signal(sig))
return -EINVAL;
- if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
+ if (!si_fromuser(info))
return 0;
error = audit_signal_info(sig, t); /* Let audit system see the signal */
@@ -949,9 +960,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
int from_ancestor_ns = 0;
#ifdef CONFIG_PID_NS
- if (!is_si_special(info) && SI_FROMUSER(info) &&
- task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
- from_ancestor_ns = 1;
+ from_ancestor_ns = si_fromuser(info) &&
+ !task_pid_nr_ns(current, task_active_pid_ns(t));
#endif
return __send_signal(sig, info, t, group, from_ancestor_ns);
@@ -1052,12 +1062,6 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return ret;
}
-void
-force_sig_specific(int sig, struct task_struct *t)
-{
- force_sig_info(sig, SEND_SIG_FORCED, t);
-}
-
/*
* Nuke all other threads in the group.
*/
@@ -1175,19 +1179,19 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
int ret = -EINVAL;
struct task_struct *p;
const struct cred *pcred;
+ unsigned long flags;
if (!valid_signal(sig))
return ret;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = pid_task(pid, PIDTYPE_PID);
if (!p) {
ret = -ESRCH;
goto out_unlock;
}
pcred = __task_cred(p);
- if ((info == SEND_SIG_NOINFO ||
- (!is_si_special(info) && SI_FROMUSER(info))) &&
+ if (si_fromuser(info) &&
euid != pcred->suid && euid != pcred->uid &&
uid != pcred->suid && uid != pcred->uid) {
ret = -EPERM;
@@ -1196,14 +1200,16 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
ret = security_task_kill(p, info, sig, secid);
if (ret)
goto out_unlock;
- if (sig && p->sighand) {
- unsigned long flags;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- ret = __send_signal(sig, info, p, 1, 0);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+
+ if (sig) {
+ if (lock_task_sighand(p, &flags)) {
+ ret = __send_signal(sig, info, p, 1, 0);
+ unlock_task_sighand(p, &flags);
+ } else
+ ret = -ESRCH;
}
out_unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
@@ -1837,11 +1843,6 @@ relock:
for (;;) {
struct k_sigaction *ka;
-
- if (unlikely(signal->group_stop_count > 0) &&
- do_signal_stop(0))
- goto relock;
-
/*
* Tracing can induce an artifical signal and choose sigaction.
* The return value in @signr determines the default action,
@@ -1853,6 +1854,10 @@ relock:
if (unlikely(signr != 0))
ka = return_ka;
else {
+ if (unlikely(signal->group_stop_count > 0) &&
+ do_signal_stop(0))
+ goto relock;
+
signr = dequeue_signal(current, &current->blocked,
info);
diff --git a/kernel/smp.c b/kernel/smp.c
index a8c76069cf5..de735a6637d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
static struct {
struct list_head queue;
- spinlock_t lock;
+ raw_spinlock_t lock;
} call_function __cacheline_aligned_in_smp =
{
.queue = LIST_HEAD_INIT(call_function.queue),
- .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
};
enum {
@@ -35,7 +35,7 @@ struct call_function_data {
struct call_single_queue {
struct list_head list;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static DEFINE_PER_CPU(struct call_function_data, cfd_data);
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
for_each_possible_cpu(i) {
struct call_single_queue *q = &per_cpu(call_single_queue, i);
- spin_lock_init(&q->lock);
+ raw_spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
unsigned long flags;
int ipi;
- spin_lock_irqsave(&dst->lock, flags);
+ raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&data->list, &dst->list);
- spin_unlock_irqrestore(&dst->lock, flags);
+ raw_spin_unlock_irqrestore(&dst->lock, flags);
/*
* The list addition should be visible before sending the IPI
@@ -171,7 +171,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
void generic_smp_call_function_interrupt(void)
{
struct call_function_data *data;
- int cpu = get_cpu();
+ int cpu = smp_processor_id();
/*
* Shouldn't receive this interrupt on a cpu that is not yet online.
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
if (!refs) {
- spin_lock(&call_function.lock);
+ raw_spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
- spin_unlock(&call_function.lock);
+ raw_spin_unlock(&call_function.lock);
}
if (refs)
@@ -212,7 +212,6 @@ void generic_smp_call_function_interrupt(void)
csd_unlock(&data->csd);
}
- put_cpu();
}
/*
@@ -230,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void)
*/
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
- spin_lock(&q->lock);
+ raw_spin_lock(&q->lock);
list_replace_init(&q->list, &list);
- spin_unlock(&q->lock);
+ raw_spin_unlock(&q->lock);
while (!list_empty(&list)) {
struct call_single_data *data;
@@ -449,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask,
cpumask_clear_cpu(this_cpu, data->cpumask);
atomic_set(&data->refs, cpumask_weight(data->cpumask));
- spin_lock_irqsave(&call_function.lock, flags);
+ raw_spin_lock_irqsave(&call_function.lock, flags);
/*
* Place entry at the _HEAD_ of the list, so that any cpu still
* observing the entry in generic_smp_call_function_interrupt()
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
- spin_unlock_irqrestore(&call_function.lock, flags);
+ raw_spin_unlock_irqrestore(&call_function.lock, flags);
/*
* Make the list addition visible before sending the ipi.
@@ -501,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function);
void ipi_call_lock(void)
{
- spin_lock(&call_function.lock);
+ raw_spin_lock(&call_function.lock);
}
void ipi_call_unlock(void)
{
- spin_unlock(&call_function.lock);
+ raw_spin_unlock(&call_function.lock);
}
void ipi_call_lock_irq(void)
{
- spin_lock_irq(&call_function.lock);
+ raw_spin_lock_irq(&call_function.lock);
}
void ipi_call_unlock_irq(void)
{
- spin_unlock_irq(&call_function.lock);
+ raw_spin_unlock_irq(&call_function.lock);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 21939d9e830..a09502e2ef7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -697,7 +697,7 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
-static int ksoftirqd(void * __bind_cpu)
+static int run_ksoftirqd(void * __bind_cpu)
{
set_current_state(TASK_INTERRUPTIBLE);
@@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
+ p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
if (IS_ERR(p)) {
printk("ksoftirqd for %i failed\n", hotcpu);
return NOTIFY_BAD;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 81324d12eb3..d22579087e2 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -22,9 +22,9 @@
static DEFINE_SPINLOCK(print_lock);
-static DEFINE_PER_CPU(unsigned long, touch_timestamp);
-static DEFINE_PER_CPU(unsigned long, print_timestamp);
-static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
+static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
+static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
+static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static int __read_mostly did_panic;
int __read_mostly softlockup_thresh = 60;
@@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void)
{
int this_cpu = raw_smp_processor_id();
- __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
+ __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu);
}
void touch_softlockup_watchdog(void)
{
- __raw_get_cpu_var(touch_timestamp) = 0;
+ __raw_get_cpu_var(softlockup_touch_ts) = 0;
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void)
/* Cause each CPU to re-update its timestamp rather than complain */
for_each_online_cpu(cpu)
- per_cpu(touch_timestamp, cpu) = 0;
+ per_cpu(softlockup_touch_ts, cpu) = 0;
}
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
@@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
void softlockup_tick(void)
{
int this_cpu = smp_processor_id();
- unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
- unsigned long print_timestamp;
+ unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu);
+ unsigned long print_ts;
struct pt_regs *regs = get_irq_regs();
unsigned long now;
/* Is detection switched off? */
- if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
+ if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) {
/* Be sure we don't false trigger if switched back on */
- if (touch_timestamp)
- per_cpu(touch_timestamp, this_cpu) = 0;
+ if (touch_ts)
+ per_cpu(softlockup_touch_ts, this_cpu) = 0;
return;
}
- if (touch_timestamp == 0) {
+ if (touch_ts == 0) {
__touch_softlockup_watchdog();
return;
}
- print_timestamp = per_cpu(print_timestamp, this_cpu);
+ print_ts = per_cpu(softlockup_print_ts, this_cpu);
/* report at most once a second */
- if (print_timestamp == touch_timestamp || did_panic)
+ if (print_ts == touch_ts || did_panic)
return;
/* do not print during early bootup: */
@@ -140,18 +140,18 @@ void softlockup_tick(void)
* Wake up the high-prio watchdog task twice per
* threshold timespan.
*/
- if (now > touch_timestamp + softlockup_thresh/2)
- wake_up_process(per_cpu(watchdog_task, this_cpu));
+ if (now > touch_ts + softlockup_thresh/2)
+ wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
/* Warn about unreasonable delays: */
- if (now <= (touch_timestamp + softlockup_thresh))
+ if (now <= (touch_ts + softlockup_thresh))
return;
- per_cpu(print_timestamp, this_cpu) = touch_timestamp;
+ per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
spin_lock(&print_lock);
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
- this_cpu, now - touch_timestamp,
+ this_cpu, now - touch_ts,
current->comm, task_pid_nr(current));
print_modules();
print_irqtrace_events(current);
@@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- BUG_ON(per_cpu(watchdog_task, hotcpu));
+ BUG_ON(per_cpu(softlockup_watchdog, hotcpu));
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
if (IS_ERR(p)) {
printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
return NOTIFY_BAD;
}
- per_cpu(touch_timestamp, hotcpu) = 0;
- per_cpu(watchdog_task, hotcpu) = p;
+ per_cpu(softlockup_touch_ts, hotcpu) = 0;
+ per_cpu(softlockup_watchdog, hotcpu) = p;
kthread_bind(p, hotcpu);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- wake_up_process(per_cpu(watchdog_task, hotcpu));
+ wake_up_process(per_cpu(softlockup_watchdog, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
- if (!per_cpu(watchdog_task, hotcpu))
+ if (!per_cpu(softlockup_watchdog, hotcpu))
break;
/* Unbind so it can run. Fall thru. */
- kthread_bind(per_cpu(watchdog_task, hotcpu),
+ kthread_bind(per_cpu(softlockup_watchdog, hotcpu),
cpumask_any(cpu_online_mask));
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- p = per_cpu(watchdog_task, hotcpu);
- per_cpu(watchdog_task, hotcpu) = NULL;
+ p = per_cpu(softlockup_watchdog, hotcpu);
+ per_cpu(softlockup_watchdog, hotcpu) = NULL;
kthread_stop(p);
break;
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 41e042219ff..be6517fb9c1 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -32,6 +32,8 @@
* include/linux/spinlock_api_smp.h
*/
#else
+#define raw_read_can_lock(l) read_can_lock(l)
+#define raw_write_can_lock(l) write_can_lock(l)
/*
* We build the __lock_function inlines here. They are too large for
* inlining all over the place, but here is only one user per function
@@ -42,49 +44,49 @@
* towards that other CPU that it should break the lock ASAP.
*/
#define BUILD_LOCK_OPS(op, locktype) \
-void __lockfunc __##op##_lock(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
{ \
for (;;) { \
preempt_disable(); \
- if (likely(_raw_##op##_trylock(lock))) \
+ if (likely(do_raw_##op##_trylock(lock))) \
break; \
preempt_enable(); \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
- _raw_##op##_relax(&lock->raw_lock); \
+ while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+ arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
} \
\
-unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
+unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
for (;;) { \
preempt_disable(); \
local_irq_save(flags); \
- if (likely(_raw_##op##_trylock(lock))) \
+ if (likely(do_raw_##op##_trylock(lock))) \
break; \
local_irq_restore(flags); \
preempt_enable(); \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
- _raw_##op##_relax(&lock->raw_lock); \
+ while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
+ arch_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
return flags; \
} \
\
-void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
{ \
- _##op##_lock_irqsave(lock); \
+ _raw_##op##_lock_irqsave(lock); \
} \
\
-void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
+void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -93,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
/* irq-disabling. We use the generic preemption-aware */ \
/* function: */ \
/**/ \
- flags = _##op##_lock_irqsave(lock); \
+ flags = _raw_##op##_lock_irqsave(lock); \
local_bh_disable(); \
local_irq_restore(flags); \
} \
@@ -107,269 +109,269 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
* __[spin|read|write]_lock_irqsave()
* __[spin|read|write]_lock_bh()
*/
-BUILD_LOCK_OPS(spin, spinlock);
+BUILD_LOCK_OPS(spin, raw_spinlock);
BUILD_LOCK_OPS(read, rwlock);
BUILD_LOCK_OPS(write, rwlock);
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
{
- preempt_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ return __raw_spin_trylock(lock);
}
-EXPORT_SYMBOL(_spin_lock_nested);
+EXPORT_SYMBOL(_raw_spin_trylock);
+#endif
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
- int subclass)
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
- _raw_spin_lock_flags, &flags);
- return flags;
+ return __raw_spin_trylock_bh(lock);
}
-EXPORT_SYMBOL(_spin_lock_irqsave_nested);
+EXPORT_SYMBOL(_raw_spin_trylock_bh);
+#endif
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
- struct lockdep_map *nest_lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
{
- preempt_disable();
- spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ __raw_spin_lock(lock);
}
-EXPORT_SYMBOL(_spin_lock_nest_lock);
-
+EXPORT_SYMBOL(_raw_spin_lock);
#endif
-#ifndef CONFIG_INLINE_SPIN_TRYLOCK
-int __lockfunc _spin_trylock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
- return __spin_trylock(lock);
+ return __raw_spin_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_spin_trylock);
+EXPORT_SYMBOL(_raw_spin_lock_irqsave);
#endif
-#ifndef CONFIG_INLINE_READ_TRYLOCK
-int __lockfunc _read_trylock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
{
- return __read_trylock(lock);
+ __raw_spin_lock_irq(lock);
}
-EXPORT_SYMBOL(_read_trylock);
+EXPORT_SYMBOL(_raw_spin_lock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_TRYLOCK
-int __lockfunc _write_trylock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_LOCK_BH
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
{
- return __write_trylock(lock);
+ __raw_spin_lock_bh(lock);
}
-EXPORT_SYMBOL(_write_trylock);
+EXPORT_SYMBOL(_raw_spin_lock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK
-void __lockfunc _read_lock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
{
- __read_lock(lock);
+ __raw_spin_unlock(lock);
}
-EXPORT_SYMBOL(_read_lock);
+EXPORT_SYMBOL(_raw_spin_unlock);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
+void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
{
- return __spin_lock_irqsave(lock);
+ __raw_spin_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_spin_lock_irqsave);
+EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
-void __lockfunc _spin_lock_irq(spinlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
{
- __spin_lock_irq(lock);
+ __raw_spin_unlock_irq(lock);
}
-EXPORT_SYMBOL(_spin_lock_irq);
+EXPORT_SYMBOL(_raw_spin_unlock_irq);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK_BH
-void __lockfunc _spin_lock_bh(spinlock_t *lock)
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
{
- __spin_lock_bh(lock);
+ __raw_spin_unlock_bh(lock);
}
-EXPORT_SYMBOL(_spin_lock_bh);
+EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_TRYLOCK
+int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
- return __read_lock_irqsave(lock);
+ return __raw_read_trylock(lock);
}
-EXPORT_SYMBOL(_read_lock_irqsave);
+EXPORT_SYMBOL(_raw_read_trylock);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_IRQ
-void __lockfunc _read_lock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK
+void __lockfunc _raw_read_lock(rwlock_t *lock)
{
- __read_lock_irq(lock);
+ __raw_read_lock(lock);
}
-EXPORT_SYMBOL(_read_lock_irq);
+EXPORT_SYMBOL(_raw_read_lock);
#endif
-#ifndef CONFIG_INLINE_READ_LOCK_BH
-void __lockfunc _read_lock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
{
- __read_lock_bh(lock);
+ return __raw_read_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_read_lock_bh);
+EXPORT_SYMBOL(_raw_read_lock_irqsave);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_IRQ
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
{
- return __write_lock_irqsave(lock);
+ __raw_read_lock_irq(lock);
}
-EXPORT_SYMBOL(_write_lock_irqsave);
+EXPORT_SYMBOL(_raw_read_lock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
-void __lockfunc _write_lock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_LOCK_BH
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
{
- __write_lock_irq(lock);
+ __raw_read_lock_bh(lock);
}
-EXPORT_SYMBOL(_write_lock_irq);
+EXPORT_SYMBOL(_raw_read_lock_bh);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK_BH
-void __lockfunc _write_lock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_UNLOCK
+void __lockfunc _raw_read_unlock(rwlock_t *lock)
{
- __write_lock_bh(lock);
+ __raw_read_unlock(lock);
}
-EXPORT_SYMBOL(_write_lock_bh);
+EXPORT_SYMBOL(_raw_read_unlock);
#endif
-#ifndef CONFIG_INLINE_SPIN_LOCK
-void __lockfunc _spin_lock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- __spin_lock(lock);
+ __raw_read_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_spin_lock);
+EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
#endif
-#ifndef CONFIG_INLINE_WRITE_LOCK
-void __lockfunc _write_lock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
{
- __write_lock(lock);
+ __raw_read_unlock_irq(lock);
}
-EXPORT_SYMBOL(_write_lock);
+EXPORT_SYMBOL(_raw_read_unlock_irq);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK
-void __lockfunc _spin_unlock(spinlock_t *lock)
+#ifndef CONFIG_INLINE_READ_UNLOCK_BH
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
{
- __spin_unlock(lock);
+ __raw_read_unlock_bh(lock);
}
-EXPORT_SYMBOL(_spin_unlock);
+EXPORT_SYMBOL(_raw_read_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_WRITE_UNLOCK
-void __lockfunc _write_unlock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_TRYLOCK
+int __lockfunc _raw_write_trylock(rwlock_t *lock)
{
- __write_unlock(lock);
+ return __raw_write_trylock(lock);
}
-EXPORT_SYMBOL(_write_unlock);
+EXPORT_SYMBOL(_raw_write_trylock);
#endif
-#ifndef CONFIG_INLINE_READ_UNLOCK
-void __lockfunc _read_unlock(rwlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_LOCK
+void __lockfunc _raw_write_lock(rwlock_t *lock)
{
- __read_unlock(lock);
+ __raw_write_lock(lock);
}
-EXPORT_SYMBOL(_read_unlock);
+EXPORT_SYMBOL(_raw_write_lock);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
{
- __spin_unlock_irqrestore(lock, flags);
+ return __raw_write_lock_irqsave(lock);
}
-EXPORT_SYMBOL(_spin_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_write_lock_irqsave);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
{
- __spin_unlock_irq(lock);
+ __raw_write_lock_irq(lock);
}
-EXPORT_SYMBOL(_spin_unlock_irq);
+EXPORT_SYMBOL(_raw_write_lock_irq);
#endif
-#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_LOCK_BH
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
{
- __spin_unlock_bh(lock);
+ __raw_write_lock_bh(lock);
}
-EXPORT_SYMBOL(_spin_unlock_bh);
+EXPORT_SYMBOL(_raw_write_lock_bh);
#endif
-#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+#ifndef CONFIG_INLINE_WRITE_UNLOCK
+void __lockfunc _raw_write_unlock(rwlock_t *lock)
{
- __read_unlock_irqrestore(lock, flags);
+ __raw_write_unlock(lock);
}
-EXPORT_SYMBOL(_read_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_write_unlock);
#endif
-#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
-void __lockfunc _read_unlock_irq(rwlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- __read_unlock_irq(lock);
+ __raw_write_unlock_irqrestore(lock, flags);
}
-EXPORT_SYMBOL(_read_unlock_irq);
+EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
#endif
-#ifndef CONFIG_INLINE_READ_UNLOCK_BH
-void __lockfunc _read_unlock_bh(rwlock_t *lock)
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
{
- __read_unlock_bh(lock);
+ __raw_write_unlock_irq(lock);
}
-EXPORT_SYMBOL(_read_unlock_bh);
+EXPORT_SYMBOL(_raw_write_unlock_irq);
#endif
-#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
{
- __write_unlock_irqrestore(lock, flags);
+ __raw_write_unlock_bh(lock);
}
-EXPORT_SYMBOL(_write_unlock_irqrestore);
+EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
-#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-void __lockfunc _write_unlock_irq(rwlock_t *lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
{
- __write_unlock_irq(lock);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-EXPORT_SYMBOL(_write_unlock_irq);
-#endif
+EXPORT_SYMBOL(_raw_spin_lock_nested);
-#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
-void __lockfunc _write_unlock_bh(rwlock_t *lock)
+unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
+ int subclass)
{
- __write_unlock_bh(lock);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
+ do_raw_spin_lock_flags, &flags);
+ return flags;
}
-EXPORT_SYMBOL(_write_unlock_bh);
-#endif
+EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
-#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
-int __lockfunc _spin_trylock_bh(spinlock_t *lock)
+void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
+ struct lockdep_map *nest_lock)
{
- return __spin_trylock_bh(lock);
+ preempt_disable();
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-EXPORT_SYMBOL(_spin_trylock_bh);
+EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
+
#endif
notrace int in_lock_functions(unsigned long addr)
diff --git a/kernel/sys.c b/kernel/sys.c
index 585d6cd1004..26a6b73a6b8 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -162,6 +162,7 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
if (niceval > 19)
niceval = 19;
+ rcu_read_lock();
read_lock(&tasklist_lock);
switch (which) {
case PRIO_PROCESS:
@@ -189,16 +190,17 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
!(user = find_user(who)))
goto out_unlock; /* No processes for this user */
- do_each_thread(g, p)
+ do_each_thread(g, p) {
if (__task_cred(p)->uid == who)
error = set_one_prio(p, niceval, error);
- while_each_thread(g, p);
+ } while_each_thread(g, p);
if (who != cred->uid)
free_uid(user); /* For find_user() */
break;
}
out_unlock:
read_unlock(&tasklist_lock);
+ rcu_read_unlock();
out:
return error;
}
@@ -252,13 +254,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
!(user = find_user(who)))
goto out_unlock; /* No processes for this user */
- do_each_thread(g, p)
+ do_each_thread(g, p) {
if (__task_cred(p)->uid == who) {
niceval = 20 - task_nice(p);
if (niceval > retval)
retval = niceval;
}
- while_each_thread(g, p);
+ } while_each_thread(g, p);
if (who != cred->uid)
free_uid(user); /* for find_user() */
break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9327a26765c..8a68b244846 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -244,6 +244,10 @@ static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
+static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
+static int min_sched_shares_ratelimit = 100000; /* 100 usec */
+static int max_sched_shares_ratelimit = NSEC_PER_SEC; /* 1 second */
#endif
static struct ctl_table kern_table[] = {
@@ -260,7 +264,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_min_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = sched_nr_latency_handler,
+ .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
@@ -269,7 +273,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_latency,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = sched_nr_latency_handler,
+ .proc_handler = sched_proc_update_handler,
.extra1 = &min_sched_granularity_ns,
.extra2 = &max_sched_granularity_ns,
},
@@ -278,7 +282,7 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_wakeup_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = sched_proc_update_handler,
.extra1 = &min_wakeup_granularity_ns,
.extra2 = &max_wakeup_granularity_ns,
},
@@ -287,7 +291,18 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_shares_ratelimit,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = sched_proc_update_handler,
+ .extra1 = &min_sched_shares_ratelimit,
+ .extra2 = &max_sched_shares_ratelimit,
+ },
+ {
+ .procname = "sched_tunable_scaling",
+ .data = &sysctl_sched_tunable_scaling,
+ .maxlen = sizeof(enum sched_tunable_scaling),
+ .mode = 0644,
+ .proc_handler = sched_proc_update_handler,
+ .extra1 = &min_sched_tunable_scaling,
+ .extra2 = &max_sched_tunable_scaling,
},
{
.procname = "sched_shares_thresh",
@@ -298,13 +313,6 @@ static struct ctl_table kern_table[] = {
.extra1 = &zero,
},
{
- .procname = "sched_features",
- .data = &sysctl_sched_features,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "sched_migration_cost",
.data = &sysctl_sched_migration_cost,
.maxlen = sizeof(unsigned int),
@@ -1043,7 +1051,7 @@ static struct ctl_table vm_table[] = {
.extra2 = &one_hundred,
},
#ifdef CONFIG_HUGETLB_PAGE
- {
+ {
.procname = "nr_hugepages",
.data = NULL,
.maxlen = sizeof(unsigned long),
@@ -1051,7 +1059,18 @@ static struct ctl_table vm_table[] = {
.proc_handler = hugetlb_sysctl_handler,
.extra1 = (void *)&hugetlb_zero,
.extra2 = (void *)&hugetlb_infinity,
- },
+ },
+#ifdef CONFIG_NUMA
+ {
+ .procname = "nr_hugepages_mempolicy",
+ .data = NULL,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = &hugetlb_mempolicy_sysctl_handler,
+ .extra1 = (void *)&hugetlb_zero,
+ .extra2 = (void *)&hugetlb_infinity,
+ },
+#endif
{
.procname = "hugetlb_shm_group",
.data = &sysctl_hugetlb_shm_group,
@@ -1112,7 +1131,8 @@ static struct ctl_table vm_table[] = {
.data = &sysctl_max_map_count,
.maxlen = sizeof(sysctl_max_map_count),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
},
#else
{
@@ -1194,6 +1214,7 @@ static struct ctl_table vm_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
+#ifdef CONFIG_MMU
{
.procname = "mmap_min_addr",
.data = &dac_mmap_min_addr,
@@ -1201,6 +1222,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = mmap_min_addr_handler,
},
+#endif
#ifdef CONFIG_NUMA
{
.procname = "numa_zonelist_order",
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index b75dbf40f57..112533d5fc0 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1399,6 +1399,13 @@ static void deprecated_sysctl_warning(const int *name, int nlen)
{
int i;
+ /*
+ * CTL_KERN/KERN_VERSION is used by older glibc and cannot
+ * ever go away.
+ */
+ if (name[0] == CTL_KERN && name[1] == KERN_VERSION)
+ return;
+
if (printk_ratelimit()) {
printk(KERN_INFO
"warning: process `%s' used the deprecated sysctl "
diff --git a/kernel/time.c b/kernel/time.c
index c6324d96009..804798005d1 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -136,6 +136,7 @@ static inline void warp_clock(void)
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
+ update_xtime_cache(0);
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 20a8920029e..6f740d9f094 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -30,7 +30,7 @@ static LIST_HEAD(clockevents_released);
static RAW_NOTIFIER_HEAD(clockevents_chain);
/* Protection for the above */
-static DEFINE_SPINLOCK(clockevents_lock);
+static DEFINE_RAW_SPINLOCK(clockevents_lock);
/**
* clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
@@ -141,9 +141,9 @@ int clockevents_register_notifier(struct notifier_block *nb)
unsigned long flags;
int ret;
- spin_lock_irqsave(&clockevents_lock, flags);
+ raw_spin_lock_irqsave(&clockevents_lock, flags);
ret = raw_notifier_chain_register(&clockevents_chain, nb);
- spin_unlock_irqrestore(&clockevents_lock, flags);
+ raw_spin_unlock_irqrestore(&clockevents_lock, flags);
return ret;
}
@@ -185,13 +185,13 @@ void clockevents_register_device(struct clock_event_device *dev)
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
BUG_ON(!dev->cpumask);
- spin_lock_irqsave(&clockevents_lock, flags);
+ raw_spin_lock_irqsave(&clockevents_lock, flags);
list_add(&dev->list, &clockevent_devices);
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
clockevents_notify_released();
- spin_unlock_irqrestore(&clockevents_lock, flags);
+ raw_spin_unlock_irqrestore(&clockevents_lock, flags);
}
EXPORT_SYMBOL_GPL(clockevents_register_device);
@@ -238,10 +238,11 @@ void clockevents_exchange_device(struct clock_event_device *old,
*/
void clockevents_notify(unsigned long reason, void *arg)
{
- struct list_head *node, *tmp;
+ struct clock_event_device *dev, *tmp;
unsigned long flags;
+ int cpu;
- spin_lock_irqsave(&clockevents_lock, flags);
+ raw_spin_lock_irqsave(&clockevents_lock, flags);
clockevents_do_notify(reason, arg);
switch (reason) {
@@ -250,13 +251,24 @@ void clockevents_notify(unsigned long reason, void *arg)
* Unregister the clock event devices which were
* released from the users in the notify chain.
*/
- list_for_each_safe(node, tmp, &clockevents_released)
- list_del(node);
+ list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
+ list_del(&dev->list);
+ /*
+ * Now check whether the CPU has left unused per cpu devices
+ */
+ cpu = *((int *)arg);
+ list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
+ if (cpumask_test_cpu(cpu, dev->cpumask) &&
+ cpumask_weight(dev->cpumask) == 1) {
+ BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
+ list_del(&dev->list);
+ }
+ }
break;
default:
break;
}
- spin_unlock_irqrestore(&clockevents_lock, flags);
+ raw_spin_unlock_irqrestore(&clockevents_lock, flags);
}
EXPORT_SYMBOL_GPL(clockevents_notify);
#endif
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index c2ec25087a3..b3bafd5fc66 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device;
/* FIXME: Use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
static DECLARE_BITMAP(tmpmask, NR_CPUS);
-static DEFINE_SPINLOCK(tick_broadcast_lock);
+static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
static int tick_broadcast_force;
#ifdef CONFIG_TICK_ONESHOT
@@ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Devices might be registered with both periodic and oneshot
@@ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
tick_broadcast_clear_oneshot(cpu);
}
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}
@@ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask)
*/
static void tick_do_periodic_broadcast(void)
{
- spin_lock(&tick_broadcast_lock);
+ raw_spin_lock(&tick_broadcast_lock);
cpumask_and(to_cpumask(tmpmask),
cpu_online_mask, tick_get_broadcast_mask());
tick_do_broadcast(to_cpumask(tmpmask));
- spin_unlock(&tick_broadcast_lock);
+ raw_spin_unlock(&tick_broadcast_lock);
}
/*
@@ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
unsigned long flags;
int cpu, bc_stopped;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
@@ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
tick_broadcast_setup_oneshot(bc);
}
out:
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
@@ -299,7 +299,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
unsigned long flags;
unsigned int cpu = *cpup;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
@@ -309,7 +309,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
clockevents_shutdown(bc);
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
void tick_suspend_broadcast(void)
@@ -317,13 +317,13 @@ void tick_suspend_broadcast(void)
struct clock_event_device *bc;
unsigned long flags;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc)
clockevents_shutdown(bc);
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
int tick_resume_broadcast(void)
@@ -332,7 +332,7 @@ int tick_resume_broadcast(void)
unsigned long flags;
int broadcast = 0;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
@@ -351,7 +351,7 @@ int tick_resume_broadcast(void)
break;
}
}
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return broadcast;
}
@@ -405,7 +405,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
ktime_t now, next_event;
int cpu;
- spin_lock(&tick_broadcast_lock);
+ raw_spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
next_event.tv64 = KTIME_MAX;
@@ -443,7 +443,7 @@ again:
if (tick_broadcast_set_event(next_event, 0))
goto again;
}
- spin_unlock(&tick_broadcast_lock);
+ raw_spin_unlock(&tick_broadcast_lock);
}
/*
@@ -457,7 +457,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
unsigned long flags;
int cpu;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Periodic mode does not care about the enter/exit of power
@@ -492,7 +492,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
}
out:
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
@@ -563,13 +563,13 @@ void tick_broadcast_switch_to_oneshot(void)
struct clock_event_device *bc;
unsigned long flags;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
@@ -581,7 +581,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
unsigned long flags;
unsigned int cpu = *cpup;
- spin_lock_irqsave(&tick_broadcast_lock, flags);
+ raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Clear the broadcast mask flag for the dead cpu, but do not
@@ -589,7 +589,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
*/
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
- spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 83c4417b6a3..b6b898d2eee 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
ktime_t tick_next_period;
ktime_t tick_period;
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
-DEFINE_SPINLOCK(tick_device_lock);
+static DEFINE_RAW_SPINLOCK(tick_device_lock);
/*
* Debugging: see timer_list.c
@@ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
int cpu, ret = NOTIFY_OK;
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ raw_spin_lock_irqsave(&tick_device_lock, flags);
cpu = smp_processor_id();
if (!cpumask_test_cpu(cpu, newdev->cpumask))
@@ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
return NOTIFY_STOP;
out_bc:
@@ -278,7 +278,7 @@ out_bc:
if (tick_check_broadcast_device(newdev))
ret = NOTIFY_STOP;
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
return ret;
}
@@ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup)
struct clock_event_device *dev = td->evtdev;
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ raw_spin_lock_irqsave(&tick_device_lock, flags);
td->mode = TICKDEV_MODE_PERIODIC;
if (dev) {
/*
@@ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup)
clockevents_exchange_device(dev, NULL);
td->evtdev = NULL;
}
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
}
static void tick_suspend(void)
@@ -330,9 +330,9 @@ static void tick_suspend(void)
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
unsigned long flags;
- spin_lock_irqsave(&tick_device_lock, flags);
+ raw_spin_lock_irqsave(&tick_device_lock, flags);
clockevents_shutdown(td->evtdev);
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
}
static void tick_resume(void)
@@ -341,7 +341,7 @@ static void tick_resume(void)
unsigned long flags;
int broadcast = tick_resume_broadcast();
- spin_lock_irqsave(&tick_device_lock, flags);
+ raw_spin_lock_irqsave(&tick_device_lock, flags);
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
if (!broadcast) {
@@ -350,7 +350,7 @@ static void tick_resume(void)
else
tick_resume_oneshot();
}
- spin_unlock_irqrestore(&tick_device_lock, flags);
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
}
/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index b1c05bf75ee..290eefbc1f6 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -6,7 +6,6 @@
#define TICK_DO_TIMER_BOOT -2
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
-extern spinlock_t tick_device_lock;
extern ktime_t tick_next_period;
extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
index 96ff643a5a5..12f5c55090b 100644
--- a/kernel/time/timecompare.c
+++ b/kernel/time/timecompare.c
@@ -89,7 +89,7 @@ int timecompare_offset(struct timecompare *sync,
* source time
*/
sample.offset =
- ktime_to_ns(ktime_add(end, start)) / 2 -
+ (ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
ts;
/* simple insertion sort based on duration */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index af4135f0582..7faaa32fbf4 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -165,6 +165,13 @@ struct timespec raw_time;
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
+static struct timespec xtime_cache __attribute__ ((aligned (16)));
+void update_xtime_cache(u64 nsec)
+{
+ xtime_cache = xtime;
+ timespec_add_ns(&xtime_cache, nsec);
+}
+
/* must hold xtime_lock */
void timekeeping_leap_insert(int leapsecond)
{
@@ -325,6 +332,8 @@ int do_settimeofday(struct timespec *tv)
xtime = *tv;
+ update_xtime_cache(0);
+
timekeeper.ntp_error = 0;
ntp_clear();
@@ -550,6 +559,7 @@ void __init timekeeping_init(void)
}
set_normalized_timespec(&wall_to_monotonic,
-boot.tv_sec, -boot.tv_nsec);
+ update_xtime_cache(0);
total_sleep_time.tv_sec = 0;
total_sleep_time.tv_nsec = 0;
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -583,6 +593,7 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
total_sleep_time = timespec_add_safe(total_sleep_time, ts);
}
+ update_xtime_cache(0);
/* re-base the last cycle value */
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
@@ -722,6 +733,7 @@ static void timekeeping_adjust(s64 offset)
timekeeper.ntp_error_shift;
}
+
/**
* logarithmic_accumulation - shifted accumulation of cycles
*
@@ -765,6 +777,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
return offset;
}
+
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
@@ -774,6 +787,7 @@ void update_wall_time(void)
{
struct clocksource *clock;
cycle_t offset;
+ u64 nsecs;
int shift = 0, maxshift;
/* Make sure we're fully resumed: */
@@ -839,6 +853,9 @@ void update_wall_time(void)
timekeeper.ntp_error += timekeeper.xtime_nsec <<
timekeeper.ntp_error_shift;
+ nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
+ update_xtime_cache(nsecs);
+
/* check to see if there is a new clocksource to use */
update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
}
@@ -875,13 +892,13 @@ void monotonic_to_bootbased(struct timespec *ts)
unsigned long get_seconds(void)
{
- return xtime.tv_sec;
+ return xtime_cache.tv_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- return xtime;
+ return xtime_cache;
}
struct timespec current_kernel_time(void)
@@ -891,7 +908,8 @@ struct timespec current_kernel_time(void)
do {
seq = read_seqbegin(&xtime_lock);
- now = xtime;
+
+ now = xtime_cache;
} while (read_seqretry(&xtime_lock, seq));
return now;
@@ -905,7 +923,8 @@ struct timespec get_monotonic_coarse(void)
do {
seq = read_seqbegin(&xtime_lock);
- now = xtime;
+
+ now = xtime_cache;
mono = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 665c76edbf1..bdfb8dd1050 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
next_one:
i = 0;
- spin_lock_irqsave(&base->cpu_base->lock, flags);
+ raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
curr = base->first;
/*
@@ -100,13 +100,13 @@ next_one:
timer = rb_entry(curr, struct hrtimer, node);
tmp = *timer;
- spin_unlock_irqrestore(&base->cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
print_timer(m, timer, &tmp, i, now);
next++;
goto next_one;
}
- spin_unlock_irqrestore(&base->cpu_base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
}
static void
@@ -150,6 +150,9 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
P_ns(expires_next);
P(hres_active);
P(nr_events);
+ P(nr_retries);
+ P(nr_hangs);
+ P_ns(max_hang_time);
#endif
#undef P
#undef P_ns
@@ -234,10 +237,10 @@ static void timer_list_show_tickdevices(struct seq_file *m)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
print_tickdevice(m, tick_get_broadcast_device(), -1);
SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
- tick_get_broadcast_mask()->bits[0]);
+ cpumask_bits(tick_get_broadcast_mask())[0]);
#ifdef CONFIG_TICK_ONESHOT
SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
- tick_get_broadcast_oneshot_mask()->bits[0]);
+ cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
#endif
SEQ_printf(m, "\n");
#endif
@@ -254,7 +257,7 @@ static int timer_list_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Timer List Version: v0.4\n");
+ SEQ_printf(m, "Timer List Version: v0.5\n");
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index ee5681f8d7e..2f3b585b8d7 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
/*
* Per-CPU lookup locks for fast hash lookup:
*/
-static DEFINE_PER_CPU(spinlock_t, lookup_lock);
+static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock);
/*
* Mutex to serialize state changes with show-stats activities:
@@ -238,14 +238,14 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
/*
* It doesnt matter which lock we take:
*/
- spinlock_t *lock;
+ raw_spinlock_t *lock;
struct entry *entry, input;
unsigned long flags;
if (likely(!timer_stats_active))
return;
- lock = &per_cpu(lookup_lock, raw_smp_processor_id());
+ lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
input.timer = timer;
input.start_func = startf;
@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
input.pid = pid;
input.timer_flag = timer_flag;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
if (!timer_stats_active)
goto out_unlock;
@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
atomic_inc(&overflow_count);
out_unlock:
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
static void print_name_offset(struct seq_file *m, unsigned long addr)
@@ -348,9 +348,11 @@ static void sync_access(void)
int cpu;
for_each_online_cpu(cpu) {
- spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
+ raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
+
+ raw_spin_lock_irqsave(lock, flags);
/* nothing */
- spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
}
@@ -408,7 +410,7 @@ void __init init_timer_stats(void)
int cpu;
for_each_possible_cpu(cpu)
- spin_lock_init(&per_cpu(lookup_lock, cpu));
+ raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
}
static int __init init_tstats_procfs(void)
diff --git a/kernel/timer.c b/kernel/timer.c
index 5db5a8d2681..15533b79239 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -656,8 +656,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
debug_activate(timer, expires);
- new_base = __get_cpu_var(tvec_bases);
-
cpu = smp_processor_id();
#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e51a1bcb7be..7968762c816 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1724,7 +1724,7 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
return ftrace_match(str, regex, len, type);
}
-static void ftrace_match_records(char *buff, int len, int enable)
+static int ftrace_match_records(char *buff, int len, int enable)
{
unsigned int search_len;
struct ftrace_page *pg;
@@ -1733,6 +1733,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
char *search;
int type;
int not;
+ int found = 0;
flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
type = filter_parse_regex(buff, len, &search, &not);
@@ -1750,6 +1751,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
rec->flags &= ~flag;
else
rec->flags |= flag;
+ found = 1;
}
/*
* Only enable filtering if we have a function that
@@ -1759,6 +1761,8 @@ static void ftrace_match_records(char *buff, int len, int enable)
ftrace_filtered = 1;
} while_for_each_ftrace_rec();
mutex_unlock(&ftrace_lock);
+
+ return found;
}
static int
@@ -1780,7 +1784,7 @@ ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
return 1;
}
-static void ftrace_match_module_records(char *buff, char *mod, int enable)
+static int ftrace_match_module_records(char *buff, char *mod, int enable)
{
unsigned search_len = 0;
struct ftrace_page *pg;
@@ -1789,6 +1793,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
char *search = buff;
unsigned long flag;
int not = 0;
+ int found = 0;
flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
@@ -1819,12 +1824,15 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
rec->flags &= ~flag;
else
rec->flags |= flag;
+ found = 1;
}
if (enable && (rec->flags & FTRACE_FL_FILTER))
ftrace_filtered = 1;
} while_for_each_ftrace_rec();
mutex_unlock(&ftrace_lock);
+
+ return found;
}
/*
@@ -1853,8 +1861,9 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
if (!strlen(mod))
return -EINVAL;
- ftrace_match_module_records(func, mod, enable);
- return 0;
+ if (ftrace_match_module_records(func, mod, enable))
+ return 0;
+ return -EINVAL;
}
static struct ftrace_func_command ftrace_mod_cmd = {
@@ -2151,8 +2160,9 @@ static int ftrace_process_regex(char *buff, int len, int enable)
func = strsep(&next, ":");
if (!next) {
- ftrace_match_records(func, len, enable);
- return 0;
+ if (ftrace_match_records(func, len, enable))
+ return 0;
+ return ret;
}
/* command found */
@@ -2198,10 +2208,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
!trace_parser_cont(parser)) {
ret = ftrace_process_regex(parser->buffer,
parser->idx, enable);
+ trace_parser_clear(parser);
if (ret)
goto out_unlock;
-
- trace_parser_clear(parser);
}
ret = read;
@@ -2543,10 +2552,9 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
exists = true;
break;
}
- if (!exists) {
+ if (!exists)
array[(*idx)++] = rec->ip;
- found = 1;
- }
+ found = 1;
}
} while_for_each_ftrace_rec();
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index e06c6e3d56a..9f4f565b01e 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -14,7 +14,5 @@
#define CREATE_TRACE_POINTS
#include <trace/events/power.h>
-EXPORT_TRACEPOINT_SYMBOL_GPL(power_start);
-EXPORT_TRACEPOINT_SYMBOL_GPL(power_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5..2326b04c95c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
int cpu;
struct ring_buffer *buffer;
spinlock_t reader_lock; /* serialize readers */
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
struct lock_class_key lock_key;
struct list_head *pages;
struct buffer_page *head_page; /* read from head */
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
- cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
@@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
struct list_head *p;
unsigned i;
- atomic_inc(&cpu_buffer->record_disabled);
- synchronize_sched();
-
spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
@@ -1211,12 +1208,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
return;
rb_reset_cpu(cpu_buffer);
- spin_unlock_irq(&cpu_buffer->reader_lock);
-
rb_check_pages(cpu_buffer);
- atomic_dec(&cpu_buffer->record_disabled);
-
+ spin_unlock_irq(&cpu_buffer->reader_lock);
}
static void
@@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
struct list_head *p;
unsigned i;
- atomic_inc(&cpu_buffer->record_disabled);
- synchronize_sched();
-
spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer);
@@ -1242,11 +1233,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
list_add_tail(&bpage->list, cpu_buffer->pages);
}
rb_reset_cpu(cpu_buffer);
- spin_unlock_irq(&cpu_buffer->reader_lock);
-
rb_check_pages(cpu_buffer);
- atomic_dec(&cpu_buffer->record_disabled);
+ spin_unlock_irq(&cpu_buffer->reader_lock);
}
/**
@@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
* @buffer: the buffer to resize.
* @size: the new size.
*
- * The tracer is responsible for making sure that the buffer is
- * not being used while changing the size.
- * Note: We may be able to change the above requirement by using
- * RCU synchronizations.
- *
* Minimum size is 2 * BUF_PAGE_SIZE.
*
* Returns -1 on failure.
@@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
if (size == buffer_size)
return size;
+ atomic_inc(&buffer->record_disabled);
+
+ /* Make sure all writers are done with this buffer. */
+ synchronize_sched();
+
mutex_lock(&buffer->mutex);
get_online_cpus();
@@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
put_online_cpus();
mutex_unlock(&buffer->mutex);
+ atomic_dec(&buffer->record_disabled);
+
return size;
free_pages:
@@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
}
put_online_cpus();
mutex_unlock(&buffer->mutex);
+ atomic_dec(&buffer->record_disabled);
return -ENOMEM;
/*
@@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
out_fail:
put_online_cpus();
mutex_unlock(&buffer->mutex);
+ atomic_dec(&buffer->record_disabled);
return -1;
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
@@ -2834,7 +2827,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
int ret;
local_irq_save(flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
again:
/*
@@ -2923,7 +2916,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
goto again;
out:
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
return reader;
@@ -3286,9 +3279,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
synchronize_sched();
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_iter_reset(iter);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return iter;
@@ -3408,11 +3401,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
goto out;
- __raw_spin_lock(&cpu_buffer->lock);
+ arch_spin_lock(&cpu_buffer->lock);
rb_reset_cpu(cpu_buffer);
- __raw_spin_unlock(&cpu_buffer->lock);
+ arch_spin_unlock(&cpu_buffer->lock);
out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 874f2893cff..8b9f20ab8ee 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -12,7 +12,7 @@
* Copyright (C) 2004 William Lee Irwin III
*/
#include <linux/ring_buffer.h>
-#include <linux/utsrelease.h>
+#include <generated/utsrelease.h>
#include <linux/stacktrace.h>
#include <linux/writeback.h>
#include <linux/kallsyms.h>
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
*/
static int tracing_disabled = 1;
-DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
+DEFINE_PER_CPU(int, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
- local_inc(&__get_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
}
static inline void ftrace_enable_cpu(void)
{
- local_dec(&__get_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
preempt_enable();
}
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
*/
static struct trace_array max_tr;
-static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
+static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
@@ -313,7 +313,6 @@ static const char *trace_options[] = {
"bin",
"block",
"stacktrace",
- "sched-tree",
"trace_printk",
"ftrace_preempt",
"branch",
@@ -493,15 +492,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
- * This is defined as a raw_spinlock_t in order to help
+ * This is defined as a arch_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
-static raw_spinlock_t ftrace_max_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t ftrace_max_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
@@ -555,13 +554,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;
WARN_ON_ONCE(!irqs_disabled());
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
tr->buffer = max_tr.buffer;
max_tr.buffer = buf;
__update_max_tr(tr, tsk, cpu);
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
}
/**
@@ -581,7 +580,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;
WARN_ON_ONCE(!irqs_disabled());
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
ftrace_disable_cpu();
@@ -603,7 +602,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
__update_max_tr(tr, tsk, cpu);
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
}
#endif /* CONFIG_TRACER_MAX_TRACE */
@@ -802,7 +801,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
@@ -915,7 +914,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
- if (!__raw_spin_trylock(&trace_cmdline_lock))
+ if (!arch_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
@@ -940,7 +939,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
- __raw_spin_unlock(&trace_cmdline_lock);
+ arch_spin_unlock(&trace_cmdline_lock);
}
void trace_find_cmdline(int pid, char comm[])
@@ -958,14 +957,14 @@ void trace_find_cmdline(int pid, char comm[])
}
preempt_disable();
- __raw_spin_lock(&trace_cmdline_lock);
+ arch_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
strcpy(comm, saved_cmdlines[map]);
else
strcpy(comm, "<...>");
- __raw_spin_unlock(&trace_cmdline_lock);
+ arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
}
@@ -1085,7 +1084,7 @@ trace_function(struct trace_array *tr,
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -1151,6 +1150,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
__ftrace_trace_stack(tr->buffer, flags, skip, pc);
}
+/**
+ * trace_dump_stack - record a stack back trace in the trace buffer
+ */
+void trace_dump_stack(void)
+{
+ unsigned long flags;
+
+ if (tracing_disabled || tracing_selftest_running)
+ return;
+
+ local_save_flags(flags);
+
+ /* skipping 3 traces, seems to get us at the caller of this function */
+ __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
+}
+
void
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
{
@@ -1251,8 +1266,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
- static raw_spinlock_t trace_buf_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t trace_buf_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint;
@@ -1283,7 +1298,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
/* Lockdep uses trace_printk for lock tracing */
local_irq_save(flags);
- __raw_spin_lock(&trace_buf_lock);
+ arch_spin_lock(&trace_buf_lock);
len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
if (len > TRACE_BUF_SIZE || len < 0)
@@ -1304,7 +1319,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
ring_buffer_unlock_commit(buffer, event);
out_unlock:
- __raw_spin_unlock(&trace_buf_lock);
+ arch_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
out:
@@ -1334,7 +1349,7 @@ int trace_array_printk(struct trace_array *tr,
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
- static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
@@ -1360,12 +1375,8 @@ int trace_array_vprintk(struct trace_array *tr,
pause_graph_tracing();
raw_local_irq_save(irq_flags);
- __raw_spin_lock(&trace_buf_lock);
- if (args == NULL) {
- strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
- len = strlen(trace_buf);
- } else
- len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+ arch_spin_lock(&trace_buf_lock);
+ len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1;
buffer = tr->buffer;
@@ -1382,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr,
ring_buffer_unlock_commit(buffer, event);
out_unlock:
- __raw_spin_unlock(&trace_buf_lock);
+ arch_spin_unlock(&trace_buf_lock);
raw_local_irq_restore(irq_flags);
unpause_graph_tracing();
out:
@@ -1516,6 +1527,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
int i = (int)*pos;
void *ent;
+ WARN_ON_ONCE(iter->leftover);
+
(*pos)++;
/* can't go backwards */
@@ -1614,8 +1627,16 @@ static void *s_start(struct seq_file *m, loff_t *pos)
;
} else {
- l = *pos - 1;
- p = s_next(m, p, &l);
+ /*
+ * If we overflowed the seq_file before, then we want
+ * to just reuse the trace_seq buffer again.
+ */
+ if (iter->leftover)
+ p = iter;
+ else {
+ l = *pos - 1;
+ p = s_next(m, p, &l);
+ }
}
trace_event_read_lock();
@@ -1923,6 +1944,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
+ int ret;
if (iter->ent == NULL) {
if (iter->tr) {
@@ -1942,9 +1964,27 @@ static int s_show(struct seq_file *m, void *v)
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_func_help_header(m);
}
+ } else if (iter->leftover) {
+ /*
+ * If we filled the seq_file buffer earlier, we
+ * want to just show it now.
+ */
+ ret = trace_print_seq(m, &iter->seq);
+
+ /* ret should this time be zero, but you never know */
+ iter->leftover = ret;
+
} else {
print_trace_line(iter);
- trace_print_seq(m, &iter->seq);
+ ret = trace_print_seq(m, &iter->seq);
+ /*
+ * If we overflow the seq_file buffer, then it will
+ * ask us for this data again at start up.
+ * Use that instead.
+ * ret is 0 if seq_file write succeeded.
+ * -1 otherwise.
+ */
+ iter->leftover = ret;
}
return 0;
@@ -2254,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
mutex_lock(&tracing_cpumask_update_lock);
local_irq_disable();
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
/*
* Increase/decrease the disabled counter if we are
@@ -2269,7 +2309,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
atomic_dec(&global_trace.data[cpu]->disabled);
}
}
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
local_irq_enable();
cpumask_copy(tracing_cpumask, tracing_cpumask_new);
@@ -2291,67 +2331,49 @@ static const struct file_operations tracing_cpumask_fops = {
.write = tracing_cpumask_write,
};
-static ssize_t
-tracing_trace_options_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int tracing_trace_options_show(struct seq_file *m, void *v)
{
struct tracer_opt *trace_opts;
u32 tracer_flags;
- int len = 0;
- char *buf;
- int r = 0;
int i;
-
- /* calculate max size */
- for (i = 0; trace_options[i]; i++) {
- len += strlen(trace_options[i]);
- len += 3; /* "no" and newline */
- }
-
mutex_lock(&trace_types_lock);
tracer_flags = current_trace->flags->val;
trace_opts = current_trace->flags->opts;
- /*
- * Increase the size with names of options specific
- * of the current tracer.
- */
- for (i = 0; trace_opts[i].name; i++) {
- len += strlen(trace_opts[i].name);
- len += 3; /* "no" and newline */
- }
-
- /* +1 for \0 */
- buf = kmalloc(len + 1, GFP_KERNEL);
- if (!buf) {
- mutex_unlock(&trace_types_lock);
- return -ENOMEM;
- }
-
for (i = 0; trace_options[i]; i++) {
if (trace_flags & (1 << i))
- r += sprintf(buf + r, "%s\n", trace_options[i]);
+ seq_printf(m, "%s\n", trace_options[i]);
else
- r += sprintf(buf + r, "no%s\n", trace_options[i]);
+ seq_printf(m, "no%s\n", trace_options[i]);
}
for (i = 0; trace_opts[i].name; i++) {
if (tracer_flags & trace_opts[i].bit)
- r += sprintf(buf + r, "%s\n",
- trace_opts[i].name);
+ seq_printf(m, "%s\n", trace_opts[i].name);
else
- r += sprintf(buf + r, "no%s\n",
- trace_opts[i].name);
+ seq_printf(m, "no%s\n", trace_opts[i].name);
}
mutex_unlock(&trace_types_lock);
- WARN_ON(r >= len + 1);
+ return 0;
+}
- r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+static int __set_tracer_option(struct tracer *trace,
+ struct tracer_flags *tracer_flags,
+ struct tracer_opt *opts, int neg)
+{
+ int ret;
- kfree(buf);
- return r;
+ ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
+ if (ret)
+ return ret;
+
+ if (neg)
+ tracer_flags->val &= ~opts->bit;
+ else
+ tracer_flags->val |= opts->bit;
+ return 0;
}
/* Try to assign a tracer specific option */
@@ -2359,33 +2381,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
{
struct tracer_flags *tracer_flags = trace->flags;
struct tracer_opt *opts = NULL;
- int ret = 0, i = 0;
- int len;
+ int i;
for (i = 0; tracer_flags->opts[i].name; i++) {
opts = &tracer_flags->opts[i];
- len = strlen(opts->name);
- if (strncmp(cmp, opts->name, len) == 0) {
- ret = trace->set_flag(tracer_flags->val,
- opts->bit, !neg);
- break;
- }
+ if (strcmp(cmp, opts->name) == 0)
+ return __set_tracer_option(trace, trace->flags,
+ opts, neg);
}
- /* Not found */
- if (!tracer_flags->opts[i].name)
- return -EINVAL;
-
- /* Refused to handle */
- if (ret)
- return ret;
-
- if (neg)
- tracer_flags->val &= ~opts->bit;
- else
- tracer_flags->val |= opts->bit;
- return 0;
+ return -EINVAL;
}
static void set_tracer_flags(unsigned int mask, int enabled)
@@ -2405,7 +2411,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
- char *cmp = buf;
+ char *cmp;
int neg = 0;
int ret;
int i;
@@ -2417,16 +2423,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
return -EFAULT;
buf[cnt] = 0;
+ cmp = strstrip(buf);
- if (strncmp(buf, "no", 2) == 0) {
+ if (strncmp(cmp, "no", 2) == 0) {
neg = 1;
cmp += 2;
}
for (i = 0; trace_options[i]; i++) {
- int len = strlen(trace_options[i]);
-
- if (strncmp(cmp, trace_options[i], len) == 0) {
+ if (strcmp(cmp, trace_options[i]) == 0) {
set_tracer_flags(1 << i, !neg);
break;
}
@@ -2446,9 +2451,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
return cnt;
}
+static int tracing_trace_options_open(struct inode *inode, struct file *file)
+{
+ if (tracing_disabled)
+ return -ENODEV;
+ return single_open(file, tracing_trace_options_show, NULL);
+}
+
static const struct file_operations tracing_iter_fops = {
- .open = tracing_open_generic,
- .read = tracing_trace_options_read,
+ .open = tracing_trace_options_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
.write = tracing_trace_options_write,
};
@@ -2898,6 +2912,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
else
cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
+
+ if (iter->trace->pipe_close)
+ iter->trace->pipe_close(iter);
+
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
@@ -3104,7 +3122,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
__free_page(spd->pages[idx]);
}
-static struct pipe_buf_operations tracing_pipe_buf_ops = {
+static const struct pipe_buf_operations tracing_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
@@ -3320,6 +3338,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
return cnt;
}
+static int mark_printk(const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+ va_start(args, fmt);
+ ret = trace_vprintk(0, fmt, args);
+ va_end(args);
+ return ret;
+}
+
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
@@ -3346,28 +3374,25 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
} else
buf[cnt] = '\0';
- cnt = trace_vprintk(0, buf, NULL);
+ cnt = mark_printk("%s", buf);
kfree(buf);
*fpos += cnt;
return cnt;
}
-static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int tracing_clock_show(struct seq_file *m, void *v)
{
- char buf[64];
- int bufiter = 0;
int i;
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
- bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
+ seq_printf(m,
"%s%s%s%s", i ? " " : "",
i == trace_clock_id ? "[" : "", trace_clocks[i].name,
i == trace_clock_id ? "]" : "");
- bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
+ seq_putc(m, '\n');
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
+ return 0;
}
static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
@@ -3409,6 +3434,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
return cnt;
}
+static int tracing_clock_open(struct inode *inode, struct file *file)
+{
+ if (tracing_disabled)
+ return -ENODEV;
+ return single_open(file, tracing_clock_show, NULL);
+}
+
static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
@@ -3447,8 +3479,10 @@ static const struct file_operations tracing_mark_fops = {
};
static const struct file_operations trace_clock_fops = {
- .open = tracing_open_generic,
- .read = tracing_clock_read,
+ .open = tracing_clock_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
.write = tracing_clock_write,
};
@@ -3578,7 +3612,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
}
/* Pipe buffer operations for a buffer. */
-static struct pipe_buf_operations buffer_pipe_buf_ops = {
+static const struct pipe_buf_operations buffer_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
@@ -3909,39 +3943,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret < 0)
return ret;
- ret = 0;
- switch (val) {
- case 0:
- /* do nothing if already cleared */
- if (!(topt->flags->val & topt->opt->bit))
- break;
-
- mutex_lock(&trace_types_lock);
- if (current_trace->set_flag)
- ret = current_trace->set_flag(topt->flags->val,
- topt->opt->bit, 0);
- mutex_unlock(&trace_types_lock);
- if (ret)
- return ret;
- topt->flags->val &= ~topt->opt->bit;
- break;
- case 1:
- /* do nothing if already set */
- if (topt->flags->val & topt->opt->bit)
- break;
+ if (val != 0 && val != 1)
+ return -EINVAL;
+ if (!!(topt->flags->val & topt->opt->bit) != val) {
mutex_lock(&trace_types_lock);
- if (current_trace->set_flag)
- ret = current_trace->set_flag(topt->flags->val,
- topt->opt->bit, 1);
+ ret = __set_tracer_option(current_trace, topt->flags,
+ topt->opt, val);
mutex_unlock(&trace_types_lock);
if (ret)
return ret;
- topt->flags->val |= topt->opt->bit;
- break;
-
- default:
- return -EINVAL;
}
*ppos += cnt;
@@ -4268,8 +4279,8 @@ trace_printk_seq(struct trace_seq *s)
static void __ftrace_dump(bool disable_tracing)
{
- static raw_spinlock_t ftrace_dump_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ static arch_spinlock_t ftrace_dump_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
@@ -4279,7 +4290,7 @@ static void __ftrace_dump(bool disable_tracing)
/* only one dump */
local_irq_save(flags);
- __raw_spin_lock(&ftrace_dump_lock);
+ arch_spin_lock(&ftrace_dump_lock);
if (dump_ran)
goto out;
@@ -4354,7 +4365,7 @@ static void __ftrace_dump(bool disable_tracing)
}
out:
- __raw_spin_unlock(&ftrace_dump_lock);
+ arch_spin_unlock(&ftrace_dump_lock);
local_irq_restore(flags);
}
@@ -4415,7 +4426,7 @@ __init static int tracer_alloc_buffers(void)
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
- max_tr.data[i] = &per_cpu(max_data, i);
+ max_tr.data[i] = &per_cpu(max_tr_data, i);
}
trace_init_cmdlines();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1d7f4830a80..4df6a77eb19 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -272,6 +272,7 @@ struct tracer_flags {
* @pipe_open: called when the trace_pipe file is opened
* @wait_pipe: override how the user waits for traces on trace_pipe
* @close: called when the trace file is released
+ * @pipe_close: called when the trace_pipe file is released
* @read: override the default read callback on trace_pipe
* @splice_read: override the default splice_read callback on trace_pipe
* @selftest: selftest to run on boot (see trace_selftest.c)
@@ -290,6 +291,7 @@ struct tracer {
void (*pipe_open)(struct trace_iterator *iter);
void (*wait_pipe)(struct trace_iterator *iter);
void (*close)(struct trace_iterator *iter);
+ void (*pipe_close)(struct trace_iterator *iter);
ssize_t (*read)(struct trace_iterator *iter,
struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos);
@@ -441,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
extern int ring_buffer_expanded;
extern bool tracing_selftest_disabled;
-DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
+DECLARE_PER_CPU(int, ftrace_cpu_disabled);
#ifdef CONFIG_FTRACE_STARTUP_TEST
extern int trace_selftest_startup_function(struct tracer *trace,
@@ -595,18 +597,17 @@ enum trace_iterator_flags {
TRACE_ITER_BIN = 0x40,
TRACE_ITER_BLOCK = 0x80,
TRACE_ITER_STACKTRACE = 0x100,
- TRACE_ITER_SCHED_TREE = 0x200,
- TRACE_ITER_PRINTK = 0x400,
- TRACE_ITER_PREEMPTONLY = 0x800,
- TRACE_ITER_BRANCH = 0x1000,
- TRACE_ITER_ANNOTATE = 0x2000,
- TRACE_ITER_USERSTACKTRACE = 0x4000,
- TRACE_ITER_SYM_USEROBJ = 0x8000,
- TRACE_ITER_PRINTK_MSGONLY = 0x10000,
- TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
- TRACE_ITER_LATENCY_FMT = 0x40000,
- TRACE_ITER_SLEEP_TIME = 0x80000,
- TRACE_ITER_GRAPH_TIME = 0x100000,
+ TRACE_ITER_PRINTK = 0x200,
+ TRACE_ITER_PREEMPTONLY = 0x400,
+ TRACE_ITER_BRANCH = 0x800,
+ TRACE_ITER_ANNOTATE = 0x1000,
+ TRACE_ITER_USERSTACKTRACE = 0x2000,
+ TRACE_ITER_SYM_USEROBJ = 0x4000,
+ TRACE_ITER_PRINTK_MSGONLY = 0x8000,
+ TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */
+ TRACE_ITER_LATENCY_FMT = 0x20000,
+ TRACE_ITER_SLEEP_TIME = 0x40000,
+ TRACE_ITER_GRAPH_TIME = 0x80000,
};
/*
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 878c03f386b..84a3a7ba072 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
/* keep prev_time and lock in the same cacheline. */
static struct {
u64 prev_time;
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
} trace_clock_struct ____cacheline_aligned_in_smp =
{
- .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+ .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
};
u64 notrace trace_clock_global(void)
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void)
if (unlikely(in_nmi()))
goto out;
- __raw_spin_lock(&trace_clock_struct.lock);
+ arch_spin_lock(&trace_clock_struct.lock);
/*
* TODO: if this happens often then maybe we should reset
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void)
trace_clock_struct.prev_time = now;
- __raw_spin_unlock(&trace_clock_struct.lock);
+ arch_spin_unlock(&trace_clock_struct.lock);
out:
raw_local_irq_restore(flags);
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index d9c60f80aa0..9e25573242c 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -25,7 +25,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
char *buf;
int ret = -ENOMEM;
- if (atomic_inc_return(&event->profile_count))
+ if (event->profile_count++ > 0)
return 0;
if (!total_profile_count) {
@@ -56,7 +56,7 @@ fail_buf_nmi:
perf_trace_buf = NULL;
}
fail_buf:
- atomic_dec(&event->profile_count);
+ event->profile_count--;
return ret;
}
@@ -83,7 +83,7 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
{
char *buf, *nmi_buf;
- if (!atomic_add_negative(-1, &event->profile_count))
+ if (--event->profile_count > 0)
return;
event->profile_disable(event);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 1d18315dc83..189b09baf4f 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -78,7 +78,7 @@ EXPORT_SYMBOL_GPL(trace_define_field);
if (ret) \
return ret;
-int trace_define_common_fields(struct ftrace_event_call *call)
+static int trace_define_common_fields(struct ftrace_event_call *call)
{
int ret;
struct trace_entry ent;
@@ -91,7 +91,6 @@ int trace_define_common_fields(struct ftrace_event_call *call)
return ret;
}
-EXPORT_SYMBOL_GPL(trace_define_common_fields);
void trace_destroy_fields(struct ftrace_event_call *call)
{
@@ -105,9 +104,25 @@ void trace_destroy_fields(struct ftrace_event_call *call)
}
}
-static void ftrace_event_enable_disable(struct ftrace_event_call *call,
+int trace_event_raw_init(struct ftrace_event_call *call)
+{
+ int id;
+
+ id = register_ftrace_event(call->event);
+ if (!id)
+ return -ENODEV;
+ call->id = id;
+ INIT_LIST_HEAD(&call->fields);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(trace_event_raw_init);
+
+static int ftrace_event_enable_disable(struct ftrace_event_call *call,
int enable)
{
+ int ret = 0;
+
switch (enable) {
case 0:
if (call->enabled) {
@@ -118,12 +133,20 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call,
break;
case 1:
if (!call->enabled) {
- call->enabled = 1;
tracing_start_cmdline_record();
- call->regfunc(call);
+ ret = call->regfunc(call);
+ if (ret) {
+ tracing_stop_cmdline_record();
+ pr_info("event trace: Could not enable event "
+ "%s\n", call->name);
+ break;
+ }
+ call->enabled = 1;
}
break;
}
+
+ return ret;
}
static void ftrace_clear_events(void)
@@ -402,7 +425,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
case 0:
case 1:
mutex_lock(&event_mutex);
- ftrace_event_enable_disable(call, val);
+ ret = ftrace_event_enable_disable(call, val);
mutex_unlock(&event_mutex);
break;
@@ -412,7 +435,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
*ppos += cnt;
- return cnt;
+ return ret ? ret : cnt;
}
static ssize_t
@@ -913,7 +936,9 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
id);
if (call->define_fields) {
- ret = call->define_fields(call);
+ ret = trace_define_common_fields(call);
+ if (!ret)
+ ret = call->define_fields(call);
if (ret < 0) {
pr_warning("Could not initialize trace point"
" events/%s\n", call->name);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index dff8c84ddf1..458e5bfe26d 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -184,10 +184,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
struct struct_name field; \
int ret; \
\
- ret = trace_define_common_fields(event_call); \
- if (ret) \
- return ret; \
- \
tstruct; \
\
return ret; \
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 45e6c01b2e4..b1342c5d37c 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -14,9 +14,20 @@
#include "trace.h"
#include "trace_output.h"
-struct fgraph_data {
+struct fgraph_cpu_data {
pid_t last_pid;
int depth;
+ int ignore;
+};
+
+struct fgraph_data {
+ struct fgraph_cpu_data *cpu_data;
+
+ /* Place to preserve last processed entry. */
+ struct ftrace_graph_ent_entry ent;
+ struct ftrace_graph_ret_entry ret;
+ int failed;
+ int cpu;
};
#define TRACE_GRAPH_INDENT 2
@@ -176,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr,
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ent_entry *entry;
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
return 0;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -240,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr,
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ret_entry *entry;
- if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
@@ -384,7 +395,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
if (!data)
return TRACE_TYPE_HANDLED;
- last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
+ last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
if (*last_pid == pid)
return TRACE_TYPE_HANDLED;
@@ -435,26 +446,49 @@ static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *curr)
{
- struct ring_buffer_iter *ring_iter;
+ struct fgraph_data *data = iter->private;
+ struct ring_buffer_iter *ring_iter = NULL;
struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *next;
- ring_iter = iter->buffer_iter[iter->cpu];
+ /*
+ * If the previous output failed to write to the seq buffer,
+ * then we just reuse the data from before.
+ */
+ if (data && data->failed) {
+ curr = &data->ent;
+ next = &data->ret;
+ } else {
- /* First peek to compare current entry and the next one */
- if (ring_iter)
- event = ring_buffer_iter_peek(ring_iter, NULL);
- else {
- /* We need to consume the current entry to see the next one */
- ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
- event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
- NULL);
- }
+ ring_iter = iter->buffer_iter[iter->cpu];
+
+ /* First peek to compare current entry and the next one */
+ if (ring_iter)
+ event = ring_buffer_iter_peek(ring_iter, NULL);
+ else {
+ /*
+ * We need to consume the current entry to see
+ * the next one.
+ */
+ ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
+ event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
+ NULL);
+ }
- if (!event)
- return NULL;
+ if (!event)
+ return NULL;
+
+ next = ring_buffer_event_data(event);
- next = ring_buffer_event_data(event);
+ if (data) {
+ /*
+ * Save current and next entries for later reference
+ * if the output fails.
+ */
+ data->ent = *curr;
+ data->ret = *next;
+ }
+ }
if (next->ent.type != TRACE_GRAPH_RET)
return NULL;
@@ -640,7 +674,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
if (data) {
int cpu = iter->cpu;
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
/*
* Comments display at + 1 to depth. Since
@@ -688,7 +722,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
if (data) {
int cpu = iter->cpu;
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
*depth = call->depth;
}
@@ -782,19 +816,34 @@ static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
struct trace_iterator *iter)
{
- int cpu = iter->cpu;
+ struct fgraph_data *data = iter->private;
struct ftrace_graph_ent *call = &field->graph_ent;
struct ftrace_graph_ret_entry *leaf_ret;
+ static enum print_line_t ret;
+ int cpu = iter->cpu;
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
return TRACE_TYPE_PARTIAL_LINE;
leaf_ret = get_return_for_leaf(iter, field);
if (leaf_ret)
- return print_graph_entry_leaf(iter, field, leaf_ret, s);
+ ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
else
- return print_graph_entry_nested(iter, field, s, cpu);
+ ret = print_graph_entry_nested(iter, field, s, cpu);
+ if (data) {
+ /*
+ * If we failed to write our output, then we need to make
+ * note of it. Because we already consumed our entry.
+ */
+ if (s->full) {
+ data->failed = 1;
+ data->cpu = cpu;
+ } else
+ data->failed = 0;
+ }
+
+ return ret;
}
static enum print_line_t
@@ -810,7 +859,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (data) {
int cpu = iter->cpu;
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
/*
* Comments display at + 1 to depth. This is the
@@ -873,7 +922,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
int i;
if (data)
- depth = per_cpu_ptr(data, iter->cpu)->depth;
+ depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
if (print_graph_prologue(iter, s, 0, 0))
return TRACE_TYPE_PARTIAL_LINE;
@@ -941,8 +990,33 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
+ struct ftrace_graph_ent_entry *field;
+ struct fgraph_data *data = iter->private;
struct trace_entry *entry = iter->ent;
struct trace_seq *s = &iter->seq;
+ int cpu = iter->cpu;
+ int ret;
+
+ if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
+ per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
+ return TRACE_TYPE_HANDLED;
+ }
+
+ /*
+ * If the last output failed, there's a possibility we need
+ * to print out the missing entry which would never go out.
+ */
+ if (data && data->failed) {
+ field = &data->ent;
+ iter->cpu = data->cpu;
+ ret = print_graph_entry(field, s, iter);
+ if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
+ per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
+ ret = TRACE_TYPE_NO_CONSUME;
+ }
+ iter->cpu = cpu;
+ return ret;
+ }
switch (entry->type) {
case TRACE_GRAPH_ENT: {
@@ -952,7 +1026,7 @@ print_graph_function(struct trace_iterator *iter)
* sizeof(struct ftrace_graph_ent_entry) is very small,
* it can be safely saved at the stack.
*/
- struct ftrace_graph_ent_entry *field, saved;
+ struct ftrace_graph_ent_entry saved;
trace_assign_type(field, entry);
saved = *field;
return print_graph_entry(&saved, s, iter);
@@ -1030,31 +1104,54 @@ static void print_graph_headers(struct seq_file *s)
static void graph_trace_open(struct trace_iterator *iter)
{
/* pid and depth on the last trace processed */
- struct fgraph_data *data = alloc_percpu(struct fgraph_data);
+ struct fgraph_data *data;
int cpu;
+ iter->private = NULL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- pr_warning("function graph tracer: not enough memory\n");
- else
- for_each_possible_cpu(cpu) {
- pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
- *pid = -1;
- *depth = 0;
- }
+ goto out_err;
+
+ data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
+ if (!data->cpu_data)
+ goto out_err_free;
+
+ for_each_possible_cpu(cpu) {
+ pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
+ int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
+ *pid = -1;
+ *depth = 0;
+ *ignore = 0;
+ }
iter->private = data;
+
+ return;
+
+ out_err_free:
+ kfree(data);
+ out_err:
+ pr_warning("function graph tracer: not enough memory\n");
}
static void graph_trace_close(struct trace_iterator *iter)
{
- free_percpu(iter->private);
+ struct fgraph_data *data = iter->private;
+
+ if (data) {
+ free_percpu(data->cpu_data);
+ kfree(data);
+ }
}
static struct tracer graph_trace __read_mostly = {
.name = "function_graph",
.open = graph_trace_open,
+ .pipe_open = graph_trace_open,
.close = graph_trace_close,
+ .pipe_close = graph_trace_close,
.wait_pipe = poll_wait_pipe,
.init = graph_trace_init,
.reset = graph_trace_reset,
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 69543a905cd..7b97000745f 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -20,10 +20,10 @@
#define BTS_BUFFER_SIZE (1 << 13)
-static DEFINE_PER_CPU(struct bts_tracer *, tracer);
-static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
+static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
+static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
-#define this_tracer per_cpu(tracer, smp_processor_id())
+#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
static int trace_hw_branches_enabled __read_mostly;
static int trace_hw_branches_suspended __read_mostly;
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;
static void bts_trace_init_cpu(int cpu)
{
- per_cpu(tracer, cpu) =
- ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE,
- NULL, (size_t)-1, BTS_KERNEL);
+ per_cpu(hwb_tracer, cpu) =
+ ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
+ BTS_BUFFER_SIZE, NULL, (size_t)-1,
+ BTS_KERNEL);
- if (IS_ERR(per_cpu(tracer, cpu)))
- per_cpu(tracer, cpu) = NULL;
+ if (IS_ERR(per_cpu(hwb_tracer, cpu)))
+ per_cpu(hwb_tracer, cpu) = NULL;
}
static int bts_trace_init(struct trace_array *tr)
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
for_each_online_cpu(cpu) {
bts_trace_init_cpu(cpu);
- if (likely(per_cpu(tracer, cpu)))
+ if (likely(per_cpu(hwb_tracer, cpu)))
trace_hw_branches_enabled = 1;
}
trace_hw_branches_suspended = 0;
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu) {
- if (likely(per_cpu(tracer, cpu))) {
- ds_release_bts(per_cpu(tracer, cpu));
- per_cpu(tracer, cpu) = NULL;
+ if (likely(per_cpu(hwb_tracer, cpu))) {
+ ds_release_bts(per_cpu(hwb_tracer, cpu));
+ per_cpu(hwb_tracer, cpu) = NULL;
}
}
trace_hw_branches_enabled = 0;
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu)
- if (likely(per_cpu(tracer, cpu)))
- ds_resume_bts(per_cpu(tracer, cpu));
+ if (likely(per_cpu(hwb_tracer, cpu)))
+ ds_resume_bts(per_cpu(hwb_tracer, cpu));
trace_hw_branches_suspended = 0;
put_online_cpus();
}
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)
get_online_cpus();
for_each_online_cpu(cpu)
- if (likely(per_cpu(tracer, cpu)))
- ds_suspend_bts(per_cpu(tracer, cpu));
+ if (likely(per_cpu(hwb_tracer, cpu)))
+ ds_suspend_bts(per_cpu(hwb_tracer, cpu));
trace_hw_branches_suspended = 1;
put_online_cpus();
}
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
bts_trace_init_cpu(cpu);
if (trace_hw_branches_suspended &&
- likely(per_cpu(tracer, cpu)))
- ds_suspend_bts(per_cpu(tracer, cpu));
+ likely(per_cpu(hwb_tracer, cpu)))
+ ds_suspend_bts(per_cpu(hwb_tracer, cpu));
}
break;
case CPU_DOWN_PREPARE:
/* The notification is sent with interrupts enabled. */
- if (likely(per_cpu(tracer, cpu))) {
- ds_release_bts(per_cpu(tracer, cpu));
- per_cpu(tracer, cpu) = NULL;
+ if (likely(per_cpu(hwb_tracer, cpu))) {
+ ds_release_bts(per_cpu(hwb_tracer, cpu));
+ per_cpu(hwb_tracer, cpu) = NULL;
}
}
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
get_online_cpus();
for_each_online_cpu(cpu)
- if (likely(per_cpu(tracer, cpu)))
- ds_suspend_bts(per_cpu(tracer, cpu));
+ if (likely(per_cpu(hwb_tracer, cpu)))
+ ds_suspend_bts(per_cpu(hwb_tracer, cpu));
/*
* We need to collect the trace on the respective cpu since ftrace
* implicitly adds the record for the current cpu.
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
on_each_cpu(trace_bts_cpu, iter->tr, 1);
for_each_online_cpu(cpu)
- if (likely(per_cpu(tracer, cpu)))
- ds_resume_bts(per_cpu(tracer, cpu));
+ if (likely(per_cpu(hwb_tracer, cpu)))
+ ds_resume_bts(per_cpu(hwb_tracer, cpu));
put_online_cpus();
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 3aa7eaa2114..2974bc7538c 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -151,6 +151,8 @@ check_critical_timing(struct trace_array *tr,
goto out_unlock;
trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
+ /* Skip 5 functions to get to the irq/preempt enable function */
+ __trace_stack(tr, flags, 5, pc);
if (data->critical_sequence != max_sequence)
goto out_unlock;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index aff5f80b59b..375f81a568d 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -282,6 +282,18 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
struct pt_regs *regs);
+/* Check the name is good for event/group */
+static int check_event_name(const char *name)
+{
+ if (!isalpha(*name) && *name != '_')
+ return 0;
+ while (*++name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return 0;
+ }
+ return 1;
+}
+
/*
* Allocate new trace_probe and initialize it (including kprobes).
*/
@@ -293,10 +305,11 @@ static struct trace_probe *alloc_trace_probe(const char *group,
int nargs, int is_return)
{
struct trace_probe *tp;
+ int ret = -ENOMEM;
tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
if (!tp)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
if (symbol) {
tp->symbol = kstrdup(symbol, GFP_KERNEL);
@@ -312,14 +325,20 @@ static struct trace_probe *alloc_trace_probe(const char *group,
else
tp->rp.kp.pre_handler = kprobe_dispatcher;
- if (!event)
+ if (!event || !check_event_name(event)) {
+ ret = -EINVAL;
goto error;
+ }
+
tp->call.name = kstrdup(event, GFP_KERNEL);
if (!tp->call.name)
goto error;
- if (!group)
+ if (!group || !check_event_name(group)) {
+ ret = -EINVAL;
goto error;
+ }
+
tp->call.system = kstrdup(group, GFP_KERNEL);
if (!tp->call.system)
goto error;
@@ -330,7 +349,7 @@ error:
kfree(tp->call.name);
kfree(tp->symbol);
kfree(tp);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
static void free_probe_arg(struct probe_arg *arg)
@@ -606,23 +625,22 @@ static int create_trace_probe(int argc, char **argv)
*/
struct trace_probe *tp;
int i, ret = 0;
- int is_return = 0;
+ int is_return = 0, is_delete = 0;
char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
unsigned long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
- if (argc < 2) {
- pr_info("Probe point is not specified.\n");
- return -EINVAL;
- }
-
+ /* argc must be >= 1 */
if (argv[0][0] == 'p')
is_return = 0;
else if (argv[0][0] == 'r')
is_return = 1;
+ else if (argv[0][0] == '-')
+ is_delete = 1;
else {
- pr_info("Probe definition must be started with 'p' or 'r'.\n");
+ pr_info("Probe definition must be started with 'p', 'r' or"
+ " '-'.\n");
return -EINVAL;
}
@@ -642,7 +660,29 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL;
}
}
+ if (!group)
+ group = KPROBE_EVENT_SYSTEM;
+
+ if (is_delete) {
+ if (!event) {
+ pr_info("Delete command needs an event name.\n");
+ return -EINVAL;
+ }
+ tp = find_probe_event(event, group);
+ if (!tp) {
+ pr_info("Event %s/%s doesn't exist.\n", group, event);
+ return -ENOENT;
+ }
+ /* delete an event */
+ unregister_trace_probe(tp);
+ free_trace_probe(tp);
+ return 0;
+ }
+ if (argc < 2) {
+ pr_info("Probe point is not specified.\n");
+ return -EINVAL;
+ }
if (isdigit(argv[1][0])) {
if (is_return) {
pr_info("Return probe point must be a symbol.\n");
@@ -671,15 +711,13 @@ static int create_trace_probe(int argc, char **argv)
argc -= 2; argv += 2;
/* setup a probe */
- if (!group)
- group = KPROBE_EVENT_SYSTEM;
if (!event) {
/* Make a new event name */
if (symbol)
- snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld",
+ snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
is_return ? 'r' : 'p', symbol, offset);
else
- snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p",
+ snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
is_return ? 'r' : 'p', addr);
event = buf;
}
@@ -1113,10 +1151,6 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
struct kprobe_trace_entry field;
struct trace_probe *tp = (struct trace_probe *)event_call->data;
- ret = trace_define_common_fields(event_call);
- if (!ret)
- return ret;
-
DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
/* Set argument names as fields */
@@ -1131,10 +1165,6 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
struct kretprobe_trace_entry field;
struct trace_probe *tp = (struct trace_probe *)event_call->data;
- ret = trace_define_common_fields(event_call);
- if (!ret)
- return ret;
-
DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
@@ -1434,7 +1464,6 @@ static int register_probe_event(struct trace_probe *tp)
call->unregfunc = probe_event_disable;
#ifdef CONFIG_EVENT_PROFILE
- atomic_set(&call->profile_count, -1);
call->profile_enable = probe_profile_enable;
call->profile_disable = probe_profile_disable;
#endif
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
index ddfa0fd43bc..faf37fa4408 100644
--- a/kernel/trace/trace_ksym.c
+++ b/kernel/trace/trace_ksym.c
@@ -79,11 +79,12 @@ void ksym_collect_stats(unsigned long hbp_hit_addr)
}
#endif /* CONFIG_PROFILE_KSYM_TRACER */
-void ksym_hbp_handler(struct perf_event *hbp, void *data)
+void ksym_hbp_handler(struct perf_event *hbp, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
struct ring_buffer_event *event;
struct ksym_trace_entry *entry;
- struct pt_regs *regs = data;
struct ring_buffer *buffer;
int pc;
@@ -235,7 +236,8 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf,
mutex_lock(&ksym_tracer_mutex);
hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
- ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr);
+ ret = trace_seq_printf(s, "%pS:",
+ (void *)(unsigned long)entry->attr.bp_addr);
if (entry->attr.bp_type == HW_BREAKPOINT_R)
ret = trace_seq_puts(s, "r--\n");
else if (entry->attr.bp_type == HW_BREAKPOINT_W)
@@ -277,21 +279,20 @@ static ssize_t ksym_trace_filter_write(struct file *file,
{
struct trace_ksym *entry;
struct hlist_node *node;
- char *input_string, *ksymname = NULL;
+ char *buf, *input_string, *ksymname = NULL;
unsigned long ksym_addr = 0;
int ret, op, changed = 0;
- input_string = kzalloc(count + 1, GFP_KERNEL);
- if (!input_string)
+ buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
- if (copy_from_user(input_string, buffer, count)) {
- kfree(input_string);
- return -EFAULT;
- }
- input_string[count] = '\0';
+ ret = -EFAULT;
+ if (copy_from_user(buf, buffer, count))
+ goto out;
- strstrip(input_string);
+ buf[count] = '\0';
+ input_string = strstrip(buf);
/*
* Clear all breakpoints if:
@@ -299,18 +300,16 @@ static ssize_t ksym_trace_filter_write(struct file *file,
* 2: echo 0 > ksym_trace_filter
* 3: echo "*:---" > ksym_trace_filter
*/
- if (!input_string[0] || !strcmp(input_string, "0") ||
- !strcmp(input_string, "*:---")) {
+ if (!buf[0] || !strcmp(buf, "0") ||
+ !strcmp(buf, "*:---")) {
__ksym_trace_reset();
- kfree(input_string);
- return count;
+ ret = 0;
+ goto out;
}
ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr);
- if (ret < 0) {
- kfree(input_string);
- return ret;
- }
+ if (ret < 0)
+ goto out;
mutex_lock(&ksym_tracer_mutex);
@@ -321,7 +320,7 @@ static ssize_t ksym_trace_filter_write(struct file *file,
if (entry->attr.bp_type != op)
changed = 1;
else
- goto out;
+ goto out_unlock;
break;
}
}
@@ -336,28 +335,24 @@ static ssize_t ksym_trace_filter_write(struct file *file,
if (IS_ERR(entry->ksym_hbp))
ret = PTR_ERR(entry->ksym_hbp);
else
- goto out;
+ goto out_unlock;
}
/* Error or "symbol:---" case: drop it */
ksym_filter_entry_count--;
hlist_del_rcu(&(entry->ksym_hlist));
synchronize_rcu();
kfree(entry);
- goto out;
+ goto out_unlock;
} else {
/* Check for malformed request: (4) */
- if (op == 0)
- goto out;
- ret = process_new_ksym_entry(ksymname, op, ksym_addr);
+ if (op)
+ ret = process_new_ksym_entry(ksymname, op, ksym_addr);
}
-out:
+out_unlock:
mutex_unlock(&ksym_tracer_mutex);
-
- kfree(input_string);
-
- if (!ret)
- ret = count;
- return ret;
+out:
+ kfree(buf);
+ return !ret ? count : ret;
}
static const struct file_operations ksym_tracing_fops = {
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index b6c12c6a1bc..8e46b3323cd 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -23,13 +23,21 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
static int next_event_type = __TRACE_LAST_TYPE + 1;
-void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+int trace_print_seq(struct seq_file *m, struct trace_seq *s)
{
int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
+ int ret;
+
+ ret = seq_write(m, s->buffer, len);
- seq_write(m, s->buffer, len);
+ /*
+ * Only reset this buffer if we successfully wrote to the
+ * seq_file buffer.
+ */
+ if (!ret)
+ trace_seq_init(s);
- trace_seq_init(s);
+ return ret;
}
enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
@@ -85,7 +93,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
va_list ap;
int ret;
- if (!len)
+ if (s->full || !len)
return 0;
va_start(ap, fmt);
@@ -93,8 +101,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
va_end(ap);
/* If we can't write it all, don't bother writing anything */
- if (ret >= len)
+ if (ret >= len) {
+ s->full = 1;
return 0;
+ }
s->len += ret;
@@ -119,14 +129,16 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
int len = (PAGE_SIZE - 1) - s->len;
int ret;
- if (!len)
+ if (s->full || !len)
return 0;
ret = vsnprintf(s->buffer + s->len, len, fmt, args);
/* If we can't write it all, don't bother writing anything */
- if (ret >= len)
+ if (ret >= len) {
+ s->full = 1;
return 0;
+ }
s->len += ret;
@@ -139,14 +151,16 @@ int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
int len = (PAGE_SIZE - 1) - s->len;
int ret;
- if (!len)
+ if (s->full || !len)
return 0;
ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
/* If we can't write it all, don't bother writing anything */
- if (ret >= len)
+ if (ret >= len) {
+ s->full = 1;
return 0;
+ }
s->len += ret;
@@ -167,8 +181,13 @@ int trace_seq_puts(struct trace_seq *s, const char *str)
{
int len = strlen(str);
- if (len > ((PAGE_SIZE - 1) - s->len))
+ if (s->full)
+ return 0;
+
+ if (len > ((PAGE_SIZE - 1) - s->len)) {
+ s->full = 1;
return 0;
+ }
memcpy(s->buffer + s->len, str, len);
s->len += len;
@@ -178,9 +197,14 @@ int trace_seq_puts(struct trace_seq *s, const char *str)
int trace_seq_putc(struct trace_seq *s, unsigned char c)
{
- if (s->len >= (PAGE_SIZE - 1))
+ if (s->full)
return 0;
+ if (s->len >= (PAGE_SIZE - 1)) {
+ s->full = 1;
+ return 0;
+ }
+
s->buffer[s->len++] = c;
return 1;
@@ -188,9 +212,14 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c)
int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
{
- if (len > ((PAGE_SIZE - 1) - s->len))
+ if (s->full)
return 0;
+ if (len > ((PAGE_SIZE - 1) - s->len)) {
+ s->full = 1;
+ return 0;
+ }
+
memcpy(s->buffer + s->len, mem, len);
s->len += len;
@@ -203,6 +232,9 @@ int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
const unsigned char *data = mem;
int i, j;
+ if (s->full)
+ return 0;
+
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < len; i++) {
#else
@@ -220,8 +252,13 @@ void *trace_seq_reserve(struct trace_seq *s, size_t len)
{
void *ret;
- if (len > ((PAGE_SIZE - 1) - s->len))
+ if (s->full)
+ return 0;
+
+ if (len > ((PAGE_SIZE - 1) - s->len)) {
+ s->full = 1;
return NULL;
+ }
ret = s->buffer + s->len;
s->len += len;
@@ -233,8 +270,14 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
{
unsigned char *p;
- if (s->len >= (PAGE_SIZE - 1))
+ if (s->full)
+ return 0;
+
+ if (s->len >= (PAGE_SIZE - 1)) {
+ s->full = 1;
return 0;
+ }
+
p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
if (!IS_ERR(p)) {
p = mangle_path(s->buffer + s->len, p, "\n");
@@ -247,6 +290,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
return 1;
}
+ s->full = 1;
return 0;
}
@@ -373,6 +417,9 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
unsigned long vmstart = 0;
int ret = 1;
+ if (s->full)
+ return 0;
+
if (mm) {
const struct vm_area_struct *vma;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 26185d72767..0271742abb8 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -28,8 +28,8 @@ static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
static int wakeup_rt;
-static raw_spinlock_t wakeup_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t wakeup_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr);
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
goto out;
local_irq_save(flags);
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
out_unlock:
__wakeup_reset(wakeup_trace);
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr)
tracing_reset_online_cpus(tr);
local_irq_save(flags);
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
__wakeup_reset(tr);
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
}
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
goto out;
/* interrupts should be off from try_to_wake_up */
- __raw_spin_lock(&wakeup_lock);
+ arch_spin_lock(&wakeup_lock);
/* check for races. */
if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
out_locked:
- __raw_spin_unlock(&wakeup_lock);
+ arch_spin_unlock(&wakeup_lock);
out:
atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index dc98309e839..280fea470d6 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
/* Don't allow flipping of max traces now */
local_irq_save(flags);
- __raw_spin_lock(&ftrace_max_lock);
+ arch_spin_lock(&ftrace_max_lock);
cnt = ring_buffer_entries(tr->buffer);
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
break;
}
tracing_on();
- __raw_spin_unlock(&ftrace_max_lock);
+ arch_spin_unlock(&ftrace_max_lock);
local_irq_restore(flags);
if (count)
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 8504ac71e4e..678a5120ee3 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = {
};
static unsigned long max_stack_size;
-static raw_spinlock_t max_stack_lock =
- (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t max_stack_lock =
+ (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int stack_trace_disabled __read_mostly;
static DEFINE_PER_CPU(int, trace_active);
@@ -54,7 +54,7 @@ static inline void check_stack(void)
return;
local_irq_save(flags);
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
/* a race could have already updated it */
if (this_size <= max_stack_size)
@@ -103,7 +103,7 @@ static inline void check_stack(void)
}
out:
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_restore(flags);
}
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
return ret;
local_irq_save(flags);
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
*ptr = val;
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_restore(flags);
return count;
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
local_irq_disable();
- __raw_spin_lock(&max_stack_lock);
+ arch_spin_lock(&max_stack_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
static void t_stop(struct seq_file *m, void *p)
{
- __raw_spin_unlock(&max_stack_lock);
+ arch_spin_unlock(&max_stack_lock);
local_irq_enable();
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 57501d90096..75289f372dd 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -217,10 +217,6 @@ int syscall_enter_define_fields(struct ftrace_event_call *call)
int i;
int offset = offsetof(typeof(trace), args);
- ret = trace_define_common_fields(call);
- if (ret)
- return ret;
-
ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
if (ret)
return ret;
@@ -241,10 +237,6 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
struct syscall_trace_exit trace;
int ret;
- ret = trace_define_common_fields(call);
- if (ret)
- return ret;
-
ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
if (ret)
return ret;
@@ -333,10 +325,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
mutex_lock(&syscall_trace_lock);
if (!sys_refcount_enter)
ret = register_trace_sys_enter(ftrace_syscall_enter);
- if (ret) {
- pr_info("event trace: Could not activate"
- "syscall entry trace point");
- } else {
+ if (!ret) {
set_bit(num, enabled_enter_syscalls);
sys_refcount_enter++;
}
@@ -370,10 +359,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
mutex_lock(&syscall_trace_lock);
if (!sys_refcount_exit)
ret = register_trace_sys_exit(ftrace_syscall_exit);
- if (ret) {
- pr_info("event trace: Could not activate"
- "syscall exit trace point");
- } else {
+ if (!ret) {
set_bit(num, enabled_exit_syscalls);
sys_refcount_exit++;
}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index f6693969287..a7974a552ca 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -93,6 +93,7 @@ static const struct stacktrace_ops backtrace_ops = {
.warning_symbol = backtrace_warning_symbol,
.stack = backtrace_stack,
.address = backtrace_address,
+ .walk_stack = print_context_stack,
};
static int
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2f22cf4576d..25c3ed594c5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -360,6 +360,7 @@ config DEBUG_KMEMLEAK
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS
+ select CRC32
help
Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way
@@ -575,7 +576,7 @@ config DEBUG_BUGVERBOSE
depends on BUG
depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
- default !EMBEDDED
+ default y
help
Say Y here to make BUG() panics output the file name and line number
of the BUG call as well as the EIP and oops trace. This aids
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 5205a8dae5b..4b1b083f219 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -4,17 +4,10 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/slab.h>
#include <linux/module.h>
-static const char *skip_sep(const char *cp)
-{
- while (*cp && isspace(*cp))
- cp++;
-
- return cp;
-}
-
static const char *skip_arg(const char *cp)
{
while (*cp && !isspace(*cp))
@@ -28,7 +21,7 @@ static int count_argc(const char *str)
int count = 0;
while (*str) {
- str = skip_sep(str);
+ str = skip_spaces(str);
if (*str) {
count++;
str = skip_arg(str);
@@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
argvp = argv;
while (*str) {
- str = skip_sep(str);
+ str = skip_spaces(str);
if (*str) {
const char *p = str;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 702565821c9..11bf4975058 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
+
+void bitmap_set(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+EXPORT_SYMBOL(bitmap_set);
+
+void bitmap_clear(unsigned long *map, int start, int nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const int size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+EXPORT_SYMBOL(bitmap_clear);
+
+/*
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds is multiples of that
+ * power of 2. A @align_mask of 0 means no alignment is required.
+ */
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ unsigned long index, end, i;
+again:
+ index = find_next_zero_bit(map, size, start);
+
+ /* Align allocation */
+ index = __ALIGN_MASK(index, align_mask);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+EXPORT_SYMBOL(bitmap_find_next_zero_area);
+
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.
diff --git a/lib/checksum.c b/lib/checksum.c
index b2e2fd46846..097508732f3 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -37,7 +37,8 @@
#include <asm/byteorder.h>
-static inline unsigned short from32to16(unsigned long x)
+#ifndef do_csum
+static inline unsigned short from32to16(unsigned int x)
{
/* add up 16-bit and 16-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
@@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x)
static unsigned int do_csum(const unsigned char *buff, int len)
{
int odd, count;
- unsigned long result = 0;
+ unsigned int result = 0;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
#ifdef __LITTLE_ENDIAN
- result = *buff;
-#else
result += (*buff << 8);
+#else
+ result = *buff;
#endif
len--;
buff++;
@@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len)
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
- unsigned long carry = 0;
+ unsigned int carry = 0;
do {
- unsigned long w = *(unsigned int *) buff;
+ unsigned int w = *(unsigned int *) buff;
count--;
buff += 4;
result += carry;
@@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
out:
return result;
}
+#endif
/*
* This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/lib/crc32.c b/lib/crc32.c
index 49d1c9e3ce3..02e3b31b3a7 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -42,6 +42,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
MODULE_DESCRIPTION("Ethernet CRC32 calculations");
MODULE_LICENSE("GPL");
+#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
+
+static inline u32
+crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
+{
+# ifdef __LITTLE_ENDIAN
+# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8)
+# else
+# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
+# endif
+ const u32 *b = (const u32 *)buf;
+ size_t rem_len;
+
+ /* Align it */
+ if (unlikely((long)b & 3 && len)) {
+ u8 *p = (u8 *)b;
+ do {
+ DO_CRC(*p++);
+ } while ((--len) && ((long)p)&3);
+ b = (u32 *)p;
+ }
+ rem_len = len & 3;
+ /* load data 32 bits wide, xor data 32 bits wide. */
+ len = len >> 2;
+ for (--b; len; --len) {
+ crc ^= *++b; /* use pre increment for speed */
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ }
+ len = rem_len;
+ /* And the last few bytes */
+ if (len) {
+ u8 *p = (u8 *)(b + 1) - 1;
+ do {
+ DO_CRC(*++p); /* use pre increment for speed */
+ } while (--len);
+ }
+ return crc;
+}
+#endif
/**
* crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
* @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
@@ -72,48 +114,10 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_LE_BITS == 8
- const u32 *b =(u32 *)p;
const u32 *tab = crc32table_le;
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
-# else
-# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
-# endif
-
crc = __cpu_to_le32(crc);
- /* Align it */
- if(unlikely(((long)b)&3 && len)){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while ((--len) && ((long)b)&3 );
- }
- if(likely(len >= 4)){
- /* load data 32 bits wide, xor data 32 bits wide. */
- size_t save_len = len & 3;
- len = len >> 2;
- --b; /* use pre increment below(*++b) for speed */
- do {
- crc ^= *++b;
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- } while (--len);
- b++; /* point to next byte(s) */
- len = save_len;
- }
- /* And the last few bytes */
- if(len){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while (--len);
- }
-
+ crc = crc32_body(crc, p, len, tab);
return __le32_to_cpu(crc);
#undef ENDIAN_SHIFT
#undef DO_CRC
@@ -170,47 +174,10 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
{
# if CRC_BE_BITS == 8
- const u32 *b =(u32 *)p;
const u32 *tab = crc32table_be;
-# ifdef __LITTLE_ENDIAN
-# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
-# else
-# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
-# endif
-
crc = __cpu_to_be32(crc);
- /* Align it */
- if(unlikely(((long)b)&3 && len)){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (u32 *)p;
- } while ((--len) && ((long)b)&3 );
- }
- if(likely(len >= 4)){
- /* load data 32 bits wide, xor data 32 bits wide. */
- size_t save_len = len & 3;
- len = len >> 2;
- --b; /* use pre increment below(*++b) for speed */
- do {
- crc ^= *++b;
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- DO_CRC(0);
- } while (--len);
- b++; /* point to next byte(s) */
- len = save_len;
- }
- /* And the last few bytes */
- if(len){
- do {
- u8 *p = (u8 *)b;
- DO_CRC(*p++);
- b = (void *)p;
- } while (--len);
- }
+ crc = crc32_body(crc, p, len, tab);
return __be32_to_cpu(crc);
#undef ENDIAN_SHIFT
#undef DO_CRC
diff --git a/lib/ctype.c b/lib/ctype.c
index d02ace14a32..26baa620e95 100644
--- a/lib/ctype.c
+++ b/lib/ctype.c
@@ -7,30 +7,30 @@
#include <linux/ctype.h>
#include <linux/module.h>
-unsigned char _ctype[] = {
-_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
-_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
-_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
-_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
-_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
-_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
-_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
-_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
-_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
-_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
-_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
-_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
-_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
-_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
-_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
-_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
-_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
-_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
-_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
-_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
-_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+const unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
EXPORT_SYMBOL(_ctype);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index eae56fddfa3..a9a8996d286 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -26,14 +26,14 @@
struct debug_bucket {
struct hlist_head list;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
-static DEFINE_SPINLOCK(pool_lock);
+static DEFINE_RAW_SPINLOCK(pool_lock);
static HLIST_HEAD(obj_pool);
@@ -96,10 +96,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -133,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
- spin_lock(&pool_lock);
+ raw_spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -152,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
if (obj_pool_free < obj_pool_min_free)
obj_pool_min_free = obj_pool_free;
}
- spin_unlock(&pool_lock);
+ raw_spin_unlock(&pool_lock);
return obj;
}
@@ -165,7 +165,7 @@ static void free_obj_work(struct work_struct *work)
struct debug_obj *obj;
unsigned long flags;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
while (obj_pool_free > ODEBUG_POOL_SIZE) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
hlist_del(&obj->node);
@@ -174,11 +174,11 @@ static void free_obj_work(struct work_struct *work)
* We release pool_lock across kmem_cache_free() to
* avoid contention on pool_lock.
*/
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
}
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
}
/*
@@ -190,7 +190,7 @@ static void free_object(struct debug_obj *obj)
unsigned long flags;
int sched = 0;
- spin_lock_irqsave(&pool_lock, flags);
+ raw_spin_lock_irqsave(&pool_lock, flags);
/*
* schedule work when the pool is filled and the cache is
* initialized:
@@ -200,7 +200,7 @@ static void free_object(struct debug_obj *obj)
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock_irqrestore(&pool_lock, flags);
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
if (sched)
schedule_work(&debug_obj_work);
}
@@ -221,9 +221,9 @@ static void debug_objects_oom(void)
printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
hlist_move_list(&db->list, &freelist);
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -303,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj) {
obj = alloc_object(addr, db, descr);
if (!obj) {
debug_objects_enabled = 0;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_objects_oom();
return;
}
@@ -327,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "init");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_init, addr, state);
return;
@@ -338,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -385,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -398,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "activate");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_activate, addr, state);
return;
@@ -408,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
default:
break;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
return;
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/*
* This happens when a static object is activated. We
* let the type specific code decide whether this is
@@ -438,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (obj) {
@@ -463,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
debug_print_object(&o, "deactivate");
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -483,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -498,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "destroy");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_destroy, addr, state);
return;
@@ -509,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
break;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
/**
@@ -529,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj)
@@ -539,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "free");
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free, addr, state);
return;
default:
hlist_del(&obj->node);
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
out_unlock:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
}
#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -575,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
repeat:
cnt = 0;
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
cnt++;
oaddr = (unsigned long) obj->object;
@@ -587,7 +587,7 @@ repeat:
debug_print_object(obj, "free");
descr = obj->descr;
state = obj->state;
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
debug_object_fixup(descr->fixup_free,
(void *) oaddr, state);
goto repeat;
@@ -597,7 +597,7 @@ repeat:
break;
}
}
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
/* Now free them */
hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -783,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
db = get_bucket((unsigned long) addr);
- spin_lock_irqsave(&db->lock, flags);
+ raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
@@ -807,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
}
res = 0;
out:
- spin_unlock_irqrestore(&db->lock, flags);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
if (res)
debug_objects_enabled = 0;
return res;
@@ -907,7 +907,7 @@ void __init debug_objects_early_init(void)
int i;
for (i = 0; i < ODEBUG_HASH_SIZE; i++)
- spin_lock_init(&obj_hash[i].lock);
+ raw_spin_lock_init(&obj_hash[i].lock);
for (i = 0; i < ODEBUG_POOL_SIZE; i++)
hlist_add_head(&obj_static_pool[i].node, &obj_pool);
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 76074209f9a..a4e971dee10 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -637,6 +637,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
/* Allocate bunzip_data. Most fields initialize to zero. */
bd = *bdp = malloc(i);
+ if (!bd)
+ return RETVAL_OUT_OF_MEMORY;
memset(bd, 0, sizeof(struct bunzip_data));
/* Setup input buffer */
bd->inbuf = inbuf;
@@ -664,6 +666,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
bd->dbufSize = 100000*(i-BZh0);
bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
+ if (!bd->dbuf)
+ return RETVAL_OUT_OF_MEMORY;
return RETVAL_OK;
}
@@ -686,7 +690,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
if (!outbuf) {
error("Could not allocate output bufer");
- return -1;
+ return RETVAL_OUT_OF_MEMORY;
}
if (buf)
inbuf = buf;
@@ -694,6 +698,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
inbuf = malloc(BZIP2_IOBUF_SIZE);
if (!inbuf) {
error("Could not allocate input bufer");
+ i = RETVAL_OUT_OF_MEMORY;
goto exit_0;
}
i = start_bunzip(&bd, inbuf, len, fill);
@@ -720,11 +725,14 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
} else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
error("Compressed file ends unexpectedly");
}
+ if (!bd)
+ goto exit_1;
if (bd->dbuf)
large_free(bd->dbuf);
if (pos)
*pos = bd->inbufPos;
free(bd);
+exit_1:
if (!buf)
free(inbuf);
exit_0:
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e22c148e4b7..f9350291598 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/dynamic_debug.h>
#include <linux/debugfs.h>
@@ -209,8 +210,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
char *end;
/* Skip leading whitespace */
- while (*buf && isspace(*buf))
- buf++;
+ buf = skip_spaces(buf);
if (!*buf)
break; /* oh, it was trailing whitespace */
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eed2bdb865e..e67f97495dd 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/bitmap.h>
#include <linux/genalloc.h>
@@ -114,7 +115,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
struct gen_pool_chunk *chunk;
unsigned long addr, flags;
int order = pool->min_alloc_order;
- int nbits, bit, start_bit, end_bit;
+ int nbits, start_bit, end_bit;
if (size == 0)
return 0;
@@ -129,29 +130,19 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
end_bit -= nbits + 1;
spin_lock_irqsave(&chunk->lock, flags);
- bit = -1;
- while (bit + 1 < end_bit) {
- bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
- if (bit >= end_bit)
- break;
-
- start_bit = bit;
- if (nbits > 1) {
- bit = find_next_bit(chunk->bits, bit + nbits,
- bit + 1);
- if (bit - start_bit < nbits)
- continue;
- }
-
- addr = chunk->start_addr +
- ((unsigned long)start_bit << order);
- while (nbits--)
- __set_bit(start_bit++, chunk->bits);
+ start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
+ nbits, 0);
+ if (start_bit >= end_bit) {
spin_unlock_irqrestore(&chunk->lock, flags);
- read_unlock(&pool->lock);
- return addr;
+ continue;
}
+
+ addr = chunk->start_addr + ((unsigned long)start_bit << order);
+
+ bitmap_set(chunk->bits, start_bit, nbits);
spin_unlock_irqrestore(&chunk->lock, flags);
+ read_unlock(&pool->lock);
+ return addr;
}
read_unlock(&pool->lock);
return 0;
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 75dbda03f4f..c0251f4ad08 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -3,41 +3,7 @@
*/
#include <linux/module.h>
-#include <linux/bitops.h>
-
-static unsigned long find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
-{
- unsigned long index, end, i;
-again:
- index = find_next_zero_bit(map, size, start);
-
- /* Align allocation */
- index = (index + align_mask) & ~align_mask;
-
- end = index + nr;
- if (end >= size)
- return -1;
- for (i = index; i < end; i++) {
- if (test_bit(i, map)) {
- start = i+1;
- goto again;
- }
- }
- return index;
-}
-
-void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, map);
- i++;
- }
-}
+#include <linux/bitmap.h>
int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
@@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long align_mask)
{
unsigned long index;
+
+ /* We don't want the last of the limit */
+ size -= 1;
again:
- index = find_next_zero_area(map, size, start, nr, align_mask);
- if (index != -1) {
+ index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+ if (index < size) {
if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
/* we could do more effectively */
start = index + 1;
goto again;
}
- iommu_area_reserve(map, index, nr);
+ bitmap_set(map, index, nr);
+ return index;
}
- return index;
+ return -1;
}
EXPORT_SYMBOL(iommu_area_alloc);
-void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
-{
- unsigned long end = start + nr;
-
- while (start < end) {
- __clear_bit(start, map);
- start++;
- }
-}
-EXPORT_SYMBOL(iommu_area_free);
-
unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size)
{
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 4ebfa5a164d..b135d04aa48 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -23,7 +23,7 @@
*
* Don't use in new code.
*/
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
/*
@@ -36,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
* If it successfully gets the lock, it should increment
* the preemption count like any spinlock does.
*
- * (This works on UP too - _raw_spin_trylock will never
+ * (This works on UP too - do_raw_spin_trylock will never
* return false in that case)
*/
int __lockfunc __reacquire_kernel_lock(void)
{
- while (!_raw_spin_trylock(&kernel_flag)) {
+ while (!do_raw_spin_trylock(&kernel_flag)) {
if (need_resched())
return -EAGAIN;
cpu_relax();
@@ -52,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
void __lockfunc __release_kernel_lock(void)
{
- _raw_spin_unlock(&kernel_flag);
+ do_raw_spin_unlock(&kernel_flag);
preempt_enable_no_resched();
}
/*
* These are the BKL spinlocks - we try to be polite about preemption.
* If SMP is not on (ie UP preemption), this all goes away because the
- * _raw_spin_trylock() will always succeed.
+ * do_raw_spin_trylock() will always succeed.
*/
#ifdef CONFIG_PREEMPT
static inline void __lock_kernel(void)
{
preempt_disable();
- if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
+ if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
/*
* If preemption was disabled even before this
* was called, there's nothing we can be polite
* about - just spin.
*/
if (preempt_count() > 1) {
- _raw_spin_lock(&kernel_flag);
+ do_raw_spin_lock(&kernel_flag);
return;
}
@@ -82,10 +82,10 @@ static inline void __lock_kernel(void)
*/
do {
preempt_enable();
- while (spin_is_locked(&kernel_flag))
+ while (raw_spin_is_locked(&kernel_flag))
cpu_relax();
preempt_disable();
- } while (!_raw_spin_trylock(&kernel_flag));
+ } while (!do_raw_spin_trylock(&kernel_flag));
}
}
@@ -96,7 +96,7 @@ static inline void __lock_kernel(void)
*/
static inline void __lock_kernel(void)
{
- _raw_spin_lock(&kernel_flag);
+ do_raw_spin_lock(&kernel_flag);
}
#endif
@@ -106,7 +106,7 @@ static inline void __unlock_kernel(void)
* the BKL is not covered by lockdep, so we open-code the
* unlocking sequence (and thus avoid the dep-chain ops):
*/
- _raw_spin_unlock(&kernel_flag);
+ do_raw_spin_unlock(&kernel_flag);
preempt_enable();
}
@@ -122,8 +122,10 @@ void __lockfunc _lock_kernel(const char *func, const char *file, int line)
trace_lock_kernel(func, file, line);
- if (likely(!depth))
+ if (likely(!depth)) {
+ might_sleep();
__lock_kernel();
+ }
current->lock_depth = depth;
}
diff --git a/lib/lmb.c b/lib/lmb.c
index 0343c05609f..9cee17142b2 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -263,7 +263,7 @@ long __init lmb_reserve(u64 base, u64 size)
return lmb_add_region(_rgn, base, size);
}
-long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
+long lmb_overlaps_region(struct lmb_region *rgn, u64 base, u64 size)
{
unsigned long i;
@@ -493,6 +493,11 @@ int __init lmb_is_reserved(u64 addr)
return 0;
}
+int lmb_is_region_reserved(u64 base, u64 size)
+{
+ return lmb_overlaps_region(&lmb.reserved, base, size);
+}
+
/*
* Given a <base, len>, find which memory regions belong to this range.
* Adjust the request and return a contiguous chunk.
diff --git a/lib/parser.c b/lib/parser.c
index b00d02059a5..fb34977246b 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[])
args[argc].from = s;
switch (*p++) {
- case 's':
- if (strlen(s) == 0)
+ case 's': {
+ size_t str_len = strlen(s);
+
+ if (str_len == 0)
return 0;
- else if (len == -1 || len > strlen(s))
- len = strlen(s);
+ if (len == -1 || len > str_len)
+ len = str_len;
args[argc].to = s + len;
break;
+ }
case 'd':
simple_strtol(s, &args[argc].to, 0);
goto num;
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1..1471988d919 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head)
{
- WARN_ON(!head->lock);
- if (head->lock)
- WARN_ON_SMP(!spin_is_locked(head->lock));
+ WARN_ON(!head->rawlock && !head->spinlock);
+ if (head->rawlock)
+ WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
+ if (head->spinlock)
+ WARN_ON_SMP(!spin_is_locked(head->spinlock));
plist_check_list(&head->prio_list);
plist_check_list(&head->node_list);
}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 9df3ca56db1..ccf95bff798 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,6 +17,19 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
+int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ int ret = 1;
+ unsigned long flags;
+
+ if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
+ ret = (sem->activity != 0);
+ spin_unlock_irqrestore(&sem->wait_lock, flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rwsem_is_locked);
+
/*
* initialise the semaphore
*/
@@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
+EXPORT_SYMBOL(__init_rwsem);
/*
* handle the lock release when processes blocked on it that can now run
@@ -305,12 +319,3 @@ void __downgrade_write(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
}
-EXPORT_SYMBOL(__init_rwsem);
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write_nested);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490..4755b98b6df 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include <linux/module.h>
-void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
}
-EXPORT_SYMBOL(__spin_lock_init);
+EXPORT_SYMBOL(__raw_spin_lock_init);
void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
- lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
+ lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
lock->owner = SPINLOCK_OWNER_INIT;
lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
EXPORT_SYMBOL(__rwlock_init);
-static void spin_bug(spinlock_t *lock, const char *msg)
+static void spin_bug(raw_spinlock_t *lock, const char *msg)
{
struct task_struct *owner = NULL;
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
static inline void
-debug_spin_lock_before(spinlock_t *lock)
+debug_spin_lock_before(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
lock, "cpu recursion");
}
-static inline void debug_spin_lock_after(spinlock_t *lock)
+static inline void debug_spin_lock_after(raw_spinlock_t *lock)
{
lock->owner_cpu = raw_smp_processor_id();
lock->owner = current;
}
-static inline void debug_spin_unlock(spinlock_t *lock)
+static inline void debug_spin_unlock(raw_spinlock_t *lock)
{
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
- SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
+ SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
lock->owner_cpu = -1;
}
-static void __spin_lock_debug(spinlock_t *lock)
+static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_spin_trylock(&lock->raw_lock))
+ if (arch_spin_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
}
}
-void _raw_spin_lock(spinlock_t *lock)
+void do_raw_spin_lock(raw_spinlock_t *lock)
{
debug_spin_lock_before(lock);
- if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
+ if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
__spin_lock_debug(lock);
debug_spin_lock_after(lock);
}
-int _raw_spin_trylock(spinlock_t *lock)
+int do_raw_spin_trylock(raw_spinlock_t *lock)
{
- int ret = __raw_spin_trylock(&lock->raw_lock);
+ int ret = arch_spin_trylock(&lock->raw_lock);
if (ret)
debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
return ret;
}
-void _raw_spin_unlock(spinlock_t *lock)
+void do_raw_spin_unlock(raw_spinlock_t *lock)
{
debug_spin_unlock(lock);
- __raw_spin_unlock(&lock->raw_lock);
+ arch_spin_unlock(&lock->raw_lock);
}
static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_read_trylock(&lock->raw_lock))
+ if (arch_read_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
}
#endif
-void _raw_read_lock(rwlock_t *lock)
+void do_raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_lock(&lock->raw_lock);
+ arch_read_lock(&lock->raw_lock);
}
-int _raw_read_trylock(rwlock_t *lock)
+int do_raw_read_trylock(rwlock_t *lock)
{
- int ret = __raw_read_trylock(&lock->raw_lock);
+ int ret = arch_read_trylock(&lock->raw_lock);
#ifndef CONFIG_SMP
/*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
return ret;
}
-void _raw_read_unlock(rwlock_t *lock)
+void do_raw_read_unlock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_unlock(&lock->raw_lock);
+ arch_read_unlock(&lock->raw_lock);
}
static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
for (;;) {
for (i = 0; i < loops; i++) {
- if (__raw_write_trylock(&lock->raw_lock))
+ if (arch_write_trylock(&lock->raw_lock))
return;
__delay(1);
}
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
}
#endif
-void _raw_write_lock(rwlock_t *lock)
+void do_raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
- __raw_write_lock(&lock->raw_lock);
+ arch_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
-int _raw_write_trylock(rwlock_t *lock)
+int do_raw_write_trylock(rwlock_t *lock)
{
- int ret = __raw_write_trylock(&lock->raw_lock);
+ int ret = arch_write_trylock(&lock->raw_lock);
if (ret)
debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
return ret;
}
-void _raw_write_unlock(rwlock_t *lock)
+void do_raw_write_unlock(rwlock_t *lock)
{
debug_write_unlock(lock);
- __raw_write_unlock(&lock->raw_lock);
+ arch_write_unlock(&lock->raw_lock);
}
diff --git a/lib/string.c b/lib/string.c
index e96421ab9a9..9f75b4ec50b 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -338,20 +338,34 @@ EXPORT_SYMBOL(strnchr);
#endif
/**
- * strstrip - Removes leading and trailing whitespace from @s.
+ * skip_spaces - Removes leading whitespace from @str.
+ * @str: The string to be stripped.
+ *
+ * Returns a pointer to the first non-whitespace character in @str.
+ */
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+EXPORT_SYMBOL(skip_spaces);
+
+/**
+ * strim - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
-char *strstrip(char *s)
+char *strim(char *s)
{
size_t size;
char *end;
+ s = skip_spaces(s);
size = strlen(s);
-
if (!size)
return s;
@@ -360,12 +374,9 @@ char *strstrip(char *s)
end--;
*(end + 1) = '\0';
- while (*s && isspace(*s))
- s++;
-
return s;
}
-EXPORT_SYMBOL(strstrip);
+EXPORT_SYMBOL(strim);
#ifndef __HAVE_ARCH_STRLEN
/**
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5bc01803f8f..437eedb5a53 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -549,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
- if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
+ if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
@@ -571,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
/* Confirm address can be DMA'd by device */
- if (dev_addr + size > dma_mask) {
+ if (dev_addr + size - 1 > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 33bed5e67a2..d4996cf46eb 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -9,7 +9,7 @@
* Wirzenius wrote this portably, Torvalds fucked it up :-)
*/
-/*
+/*
* Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
* - changed to provide snprintf and vsnprintf functions
* So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
@@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp)
}
/**
- * simple_strtoul - convert a string to an unsigned long
+ * simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
+unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- unsigned long result = 0;
+ unsigned long long result = 0;
if (!base)
base = simple_guess_base(cp);
@@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
result = result * base + value;
cp++;
}
-
if (endp)
*endp = (char *)cp;
+
return result;
}
-EXPORT_SYMBOL(simple_strtoul);
+EXPORT_SYMBOL(simple_strtoull);
/**
- * simple_strtol - convert a string to a signed long
+ * simple_strtoul - convert a string to an unsigned long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-long simple_strtol(const char *cp, char **endp, unsigned int base)
+unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
- if(*cp == '-')
- return -simple_strtoul(cp + 1, endp, base);
- return simple_strtoul(cp, endp, base);
+ return simple_strtoull(cp, endp, base);
}
-EXPORT_SYMBOL(simple_strtol);
+EXPORT_SYMBOL(simple_strtoul);
/**
- * simple_strtoull - convert a string to an unsigned long long
+ * simple_strtol - convert a string to a signed long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*/
-unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
+long simple_strtol(const char *cp, char **endp, unsigned int base)
{
- unsigned long long result = 0;
-
- if (!base)
- base = simple_guess_base(cp);
-
- if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
- cp += 2;
-
- while (isxdigit(*cp)) {
- unsigned int value;
-
- value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
- if (value >= base)
- break;
- result = result * base + value;
- cp++;
- }
+ if (*cp == '-')
+ return -simple_strtoul(cp + 1, endp, base);
- if (endp)
- *endp = (char *)cp;
- return result;
+ return simple_strtoul(cp, endp, base);
}
-EXPORT_SYMBOL(simple_strtoull);
+EXPORT_SYMBOL(simple_strtol);
/**
* simple_strtoll - convert a string to a signed long long
@@ -132,8 +113,9 @@ EXPORT_SYMBOL(simple_strtoull);
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
- if(*cp=='-')
+ if (*cp == '-')
return -simple_strtoull(cp + 1, endp, base);
+
return simple_strtoull(cp, endp, base);
}
@@ -173,6 +155,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
val = simple_strtoul(cp, &tail, base);
if (tail == cp)
return -EINVAL;
+
if ((*tail == '\0') ||
((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
*res = val;
@@ -285,10 +268,11 @@ EXPORT_SYMBOL(strict_strtoll);
static int skip_atoi(const char **s)
{
- int i=0;
+ int i = 0;
while (isdigit(**s))
i = i*10 + *((*s)++) - '0';
+
return i;
}
@@ -302,7 +286,7 @@ static int skip_atoi(const char **s)
/* Formats correctly any integer in [0,99999].
* Outputs from one to five digits depending on input.
* On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
-static char* put_dec_trunc(char *buf, unsigned q)
+static char *put_dec_trunc(char *buf, unsigned q)
{
unsigned d3, d2, d1, d0;
d1 = (q>>4) & 0xf;
@@ -331,14 +315,15 @@ static char* put_dec_trunc(char *buf, unsigned q)
d3 = d3 - 10*q;
*buf++ = d3 + '0'; /* next digit */
if (q != 0)
- *buf++ = q + '0'; /* most sign. digit */
+ *buf++ = q + '0'; /* most sign. digit */
}
}
}
+
return buf;
}
/* Same with if's removed. Always emits five digits */
-static char* put_dec_full(char *buf, unsigned q)
+static char *put_dec_full(char *buf, unsigned q)
{
/* BTW, if q is in [0,9999], 8-bit ints will be enough, */
/* but anyway, gcc produces better code with full-sized ints */
@@ -347,14 +332,15 @@ static char* put_dec_full(char *buf, unsigned q)
d2 = (q>>8) & 0xf;
d3 = (q>>12);
- /* Possible ways to approx. divide by 10 */
- /* gcc -O2 replaces multiply with shifts and adds */
- // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
- // (x * 0x67) >> 10: 1100111
- // (x * 0x34) >> 9: 110100 - same
- // (x * 0x1a) >> 8: 11010 - same
- // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
-
+ /*
+ * Possible ways to approx. divide by 10
+ * gcc -O2 replaces multiply with shifts and adds
+ * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
+ * (x * 0x67) >> 10: 1100111
+ * (x * 0x34) >> 9: 110100 - same
+ * (x * 0x1a) >> 8: 11010 - same
+ * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
+ */
d0 = 6*(d3 + d2 + d1) + (q & 0xf);
q = (d0 * 0xcd) >> 11;
d0 = d0 - 10*q;
@@ -375,10 +361,11 @@ static char* put_dec_full(char *buf, unsigned q)
d3 = d3 - 10*q;
*buf++ = d3 + '0';
*buf++ = q + '0';
+
return buf;
}
/* No inlining helps gcc to use registers better */
-static noinline char* put_dec(char *buf, unsigned long long num)
+static noinline char *put_dec(char *buf, unsigned long long num)
{
while (1) {
unsigned rem;
@@ -448,9 +435,9 @@ static char *number(char *buf, char *end, unsigned long long num,
spec.flags &= ~ZEROPAD;
sign = 0;
if (spec.flags & SIGN) {
- if ((signed long long) num < 0) {
+ if ((signed long long)num < 0) {
sign = '-';
- num = - (signed long long) num;
+ num = -(signed long long)num;
spec.field_width--;
} else if (spec.flags & PLUS) {
sign = '+';
@@ -478,7 +465,9 @@ static char *number(char *buf, char *end, unsigned long long num,
else if (spec.base != 10) { /* 8 or 16 */
int mask = spec.base - 1;
int shift = 3;
- if (spec.base == 16) shift = 4;
+
+ if (spec.base == 16)
+ shift = 4;
do {
tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
num >>= shift;
@@ -493,7 +482,7 @@ static char *number(char *buf, char *end, unsigned long long num,
/* leading space padding */
spec.field_width -= spec.precision;
if (!(spec.flags & (ZEROPAD+LEFT))) {
- while(--spec.field_width >= 0) {
+ while (--spec.field_width >= 0) {
if (buf < end)
*buf = ' ';
++buf;
@@ -543,15 +532,16 @@ static char *number(char *buf, char *end, unsigned long long num,
*buf = ' ';
++buf;
}
+
return buf;
}
-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
{
int len, i;
if ((unsigned long)s < PAGE_SIZE)
- s = "<NULL>";
+ s = "(null)";
len = strnlen(s, spec.precision);
@@ -572,6 +562,7 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
*buf = ' ';
++buf;
}
+
return buf;
}
@@ -585,47 +576,101 @@ static char *symbol_string(char *buf, char *end, void *ptr,
sprint_symbol(sym, value);
else
kallsyms_lookup(value, NULL, NULL, NULL, sym);
+
return string(buf, end, sym, spec);
#else
- spec.field_width = 2*sizeof(void *);
+ spec.field_width = 2 * sizeof(void *);
spec.flags |= SPECIAL | SMALL | ZEROPAD;
spec.base = 16;
+
return number(buf, end, value, spec);
#endif
}
static char *resource_string(char *buf, char *end, struct resource *res,
- struct printf_spec spec)
+ struct printf_spec spec, const char *fmt)
{
#ifndef IO_RSRC_PRINTK_SIZE
-#define IO_RSRC_PRINTK_SIZE 4
+#define IO_RSRC_PRINTK_SIZE 6
#endif
#ifndef MEM_RSRC_PRINTK_SIZE
-#define MEM_RSRC_PRINTK_SIZE 8
+#define MEM_RSRC_PRINTK_SIZE 10
#endif
- struct printf_spec num_spec = {
+ struct printf_spec hex_spec = {
.base = 16,
.precision = -1,
.flags = SPECIAL | SMALL | ZEROPAD,
};
- /* room for the actual numbers, the two "0x", -, [, ] and the final zero */
- char sym[4*sizeof(resource_size_t) + 8];
+ struct printf_spec dec_spec = {
+ .base = 10,
+ .precision = -1,
+ .flags = 0,
+ };
+ struct printf_spec str_spec = {
+ .field_width = -1,
+ .precision = 10,
+ .flags = LEFT,
+ };
+ struct printf_spec flag_spec = {
+ .base = 16,
+ .precision = -1,
+ .flags = SPECIAL | SMALL,
+ };
+
+ /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
+ * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
+#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
+#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
+#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]")
+#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
+ char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
+ 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
+
char *p = sym, *pend = sym + sizeof(sym);
- int size = -1;
+ int size = -1, addr = 0;
+ int decode = (fmt[0] == 'R') ? 1 : 0;
- if (res->flags & IORESOURCE_IO)
+ if (res->flags & IORESOURCE_IO) {
size = IO_RSRC_PRINTK_SIZE;
- else if (res->flags & IORESOURCE_MEM)
+ addr = 1;
+ } else if (res->flags & IORESOURCE_MEM) {
size = MEM_RSRC_PRINTK_SIZE;
+ addr = 1;
+ }
*p++ = '[';
- num_spec.field_width = size;
- p = number(p, pend, res->start, num_spec);
- *p++ = '-';
- p = number(p, pend, res->end, num_spec);
+ if (res->flags & IORESOURCE_IO)
+ p = string(p, pend, "io ", str_spec);
+ else if (res->flags & IORESOURCE_MEM)
+ p = string(p, pend, "mem ", str_spec);
+ else if (res->flags & IORESOURCE_IRQ)
+ p = string(p, pend, "irq ", str_spec);
+ else if (res->flags & IORESOURCE_DMA)
+ p = string(p, pend, "dma ", str_spec);
+ else {
+ p = string(p, pend, "??? ", str_spec);
+ decode = 0;
+ }
+ hex_spec.field_width = size;
+ p = number(p, pend, res->start, addr ? hex_spec : dec_spec);
+ if (res->start != res->end) {
+ *p++ = '-';
+ p = number(p, pend, res->end, addr ? hex_spec : dec_spec);
+ }
+ if (decode) {
+ if (res->flags & IORESOURCE_MEM_64)
+ p = string(p, pend, " 64bit", str_spec);
+ if (res->flags & IORESOURCE_PREFETCH)
+ p = string(p, pend, " pref", str_spec);
+ if (res->flags & IORESOURCE_DISABLED)
+ p = string(p, pend, " disabled", str_spec);
+ } else {
+ p = string(p, pend, " flags ", str_spec);
+ p = number(p, pend, res->flags, flag_spec);
+ }
*p++ = ']';
- *p = 0;
+ *p = '\0';
return string(buf, end, sym, spec);
}
@@ -666,22 +711,19 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
if (i < 3)
*p++ = '.';
}
-
*p = '\0';
+
return p;
}
static char *ip6_compressed_string(char *p, const char *addr)
{
- int i;
- int j;
- int range;
+ int i, j, range;
unsigned char zerolength[8];
int longest = 1;
int colonpos = -1;
u16 word;
- u8 hi;
- u8 lo;
+ u8 hi, lo;
bool needcolon = false;
bool useIPv4;
struct in6_addr in6;
@@ -735,8 +777,9 @@ static char *ip6_compressed_string(char *p, const char *addr)
p = pack_hex_byte(p, hi);
else
*p++ = hex_asc_lo(hi);
+ p = pack_hex_byte(p, lo);
}
- if (hi || lo > 0x0f)
+ else if (lo > 0x0f)
p = pack_hex_byte(p, lo);
else
*p++ = hex_asc_lo(lo);
@@ -748,22 +791,23 @@ static char *ip6_compressed_string(char *p, const char *addr)
*p++ = ':';
p = ip4_string(p, &in6.s6_addr[12], false);
}
-
*p = '\0';
+
return p;
}
static char *ip6_string(char *p, const char *addr, const char *fmt)
{
int i;
+
for (i = 0; i < 8; i++) {
p = pack_hex_byte(p, *addr++);
p = pack_hex_byte(p, *addr++);
if (fmt[0] == 'I' && i != 7)
*p++ = ':';
}
-
*p = '\0';
+
return p;
}
@@ -790,6 +834,52 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
return string(buf, end, ip4_addr, spec);
}
+static char *uuid_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
+ char *p = uuid;
+ int i;
+ static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+ static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
+ const u8 *index = be;
+ bool uc = false;
+
+ switch (*(++fmt)) {
+ case 'L':
+ uc = true; /* fall-through */
+ case 'l':
+ index = le;
+ break;
+ case 'B':
+ uc = true;
+ break;
+ }
+
+ for (i = 0; i < 16; i++) {
+ p = pack_hex_byte(p, addr[index[i]]);
+ switch (i) {
+ case 3:
+ case 5:
+ case 7:
+ case 9:
+ *p++ = '-';
+ break;
+ }
+ }
+
+ *p = 0;
+
+ if (uc) {
+ p = uuid;
+ do {
+ *p = toupper(*p);
+ } while (*(++p));
+ }
+
+ return string(buf, end, uuid, spec);
+}
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -801,8 +891,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
* - 'f' For simple symbolic function names without offset
* - 'S' For symbolic direct pointers with offset
* - 's' For symbolic direct pointers without offset
- * - 'R' For a struct resource pointer, it prints the range of
- * addresses (not the name nor the flags)
+ * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
+ * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'M' For a 6-byte MAC address, it prints the address in the
* usual colon-separated hex notation
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
@@ -814,6 +904,18 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
* - 'I6c' for IPv6 addresses printed as specified by
* http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
+ * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
+ * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ * Options for %pU are:
+ * b big endian lower case hex (default)
+ * B big endian UPPER case hex
+ * l little endian lower case hex
+ * L little endian UPPER case hex
+ * big endian output byte order is:
+ * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
+ * little endian output byte order is:
+ * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
+ *
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
* pointer to the real address.
@@ -828,12 +930,13 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'F':
case 'f':
ptr = dereference_function_descriptor(ptr);
- case 's':
/* Fallthrough */
case 'S':
+ case 's':
return symbol_string(buf, end, ptr, spec, *fmt);
case 'R':
- return resource_string(buf, end, ptr, spec);
+ case 'r':
+ return resource_string(buf, end, ptr, spec, fmt);
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
return mac_address_string(buf, end, ptr, spec, fmt);
@@ -853,6 +956,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return ip4_addr_string(buf, end, ptr, spec, fmt);
}
break;
+ case 'U':
+ return uuid_string(buf, end, ptr, spec, fmt);
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
@@ -970,8 +1075,8 @@ precision:
qualifier:
/* get the conversion qualifier */
spec->qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z' || *fmt == 't') {
+ if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
+ TOLOWER(*fmt) == 'z' || *fmt == 't') {
spec->qualifier = *fmt++;
if (unlikely(spec->qualifier == *fmt)) {
if (spec->qualifier == 'l') {
@@ -1038,7 +1143,7 @@ qualifier:
spec->type = FORMAT_TYPE_LONG;
else
spec->type = FORMAT_TYPE_ULONG;
- } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') {
+ } else if (TOLOWER(spec->qualifier) == 'z') {
spec->type = FORMAT_TYPE_SIZE_T;
} else if (spec->qualifier == 't') {
spec->type = FORMAT_TYPE_PTRDIFF;
@@ -1074,7 +1179,18 @@ qualifier:
* %ps output the name of a text symbol without offset
* %pF output the name of a function pointer with its offset
* %pf output the name of a function pointer without its offset
- * %pR output the address range in a struct resource
+ * %pR output the address range in a struct resource with decoded flags
+ * %pr output the address range in a struct resource with raw flags
+ * %pM output a 6-byte MAC address with colons
+ * %pm output a 6-byte MAC address without colons
+ * %pI4 print an IPv4 address without leading zeros
+ * %pi4 print an IPv4 address with leading zeros
+ * %pI6 print an IPv6 address with colons
+ * %pi6 print an IPv6 address without colons
+ * %pI6c print an IPv6 address as specified by
+ * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
+ * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
+ * case.
* %n is ignored
*
* The return value is the number of characters which would
@@ -1091,8 +1207,7 @@ qualifier:
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
unsigned long long num;
- char *str, *end, c;
- int read;
+ char *str, *end;
struct printf_spec spec = {0};
/* Reject out-of-range values early. Large positive sizes are
@@ -1111,8 +1226,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
while (*fmt) {
const char *old_fmt = fmt;
-
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
@@ -1136,7 +1250,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
spec.precision = va_arg(args, int);
break;
- case FORMAT_TYPE_CHAR:
+ case FORMAT_TYPE_CHAR: {
+ char c;
+
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
@@ -1155,6 +1271,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
++str;
}
break;
+ }
case FORMAT_TYPE_STR:
str = string(str, end, va_arg(args, char *), spec);
@@ -1185,8 +1302,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
if (qualifier == 'l') {
long *ip = va_arg(args, long *);
*ip = (str - buf);
- } else if (qualifier == 'Z' ||
- qualifier == 'z') {
+ } else if (TOLOWER(qualifier) == 'z') {
size_t *ip = va_arg(args, size_t *);
*ip = (str - buf);
} else {
@@ -1269,7 +1385,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
- i=vsnprintf(buf,size,fmt,args);
+ i = vsnprintf(buf, size, fmt, args);
+
return (i >= size) ? (size - 1) : i;
}
EXPORT_SYMBOL(vscnprintf);
@@ -1288,14 +1405,15 @@ EXPORT_SYMBOL(vscnprintf);
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
-int snprintf(char * buf, size_t size, const char *fmt, ...)
+int snprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
- i=vsnprintf(buf,size,fmt,args);
+ i = vsnprintf(buf, size, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(snprintf);
@@ -1311,7 +1429,7 @@ EXPORT_SYMBOL(snprintf);
* the trailing '\0'. If @size is <= 0 the function returns 0.
*/
-int scnprintf(char * buf, size_t size, const char *fmt, ...)
+int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
@@ -1319,6 +1437,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...)
va_start(args, fmt);
i = vsnprintf(buf, size, fmt, args);
va_end(args);
+
return (i >= size) ? (size - 1) : i;
}
EXPORT_SYMBOL(scnprintf);
@@ -1356,14 +1475,15 @@ EXPORT_SYMBOL(vsprintf);
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
-int sprintf(char * buf, const char *fmt, ...)
+int sprintf(char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
- i=vsnprintf(buf, INT_MAX, fmt, args);
+ i = vsnprintf(buf, INT_MAX, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(sprintf);
@@ -1396,7 +1516,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
{
struct printf_spec spec = {0};
char *str, *end;
- int read;
str = (char *)bin_buf;
end = (char *)(bin_buf + size);
@@ -1421,14 +1540,15 @@ do { \
str += sizeof(type); \
} while (0)
-
while (*fmt) {
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
switch (spec.type) {
case FORMAT_TYPE_NONE:
+ case FORMAT_TYPE_INVALID:
+ case FORMAT_TYPE_PERCENT_CHAR:
break;
case FORMAT_TYPE_WIDTH:
@@ -1443,13 +1563,14 @@ do { \
case FORMAT_TYPE_STR: {
const char *save_str = va_arg(args, char *);
size_t len;
+
if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
|| (unsigned long)save_str < PAGE_SIZE)
- save_str = "<NULL>";
- len = strlen(save_str);
- if (str + len + 1 < end)
- memcpy(str, save_str, len + 1);
- str += len + 1;
+ save_str = "(null)";
+ len = strlen(save_str) + 1;
+ if (str + len < end)
+ memcpy(str, save_str, len);
+ str += len;
break;
}
@@ -1460,19 +1581,13 @@ do { \
fmt++;
break;
- case FORMAT_TYPE_PERCENT_CHAR:
- break;
-
- case FORMAT_TYPE_INVALID:
- break;
-
case FORMAT_TYPE_NRCHARS: {
/* skip %n 's argument */
int qualifier = spec.qualifier;
void *skip_arg;
if (qualifier == 'l')
skip_arg = va_arg(args, long *);
- else if (qualifier == 'Z' || qualifier == 'z')
+ else if (TOLOWER(qualifier) == 'z')
skip_arg = va_arg(args, size_t *);
else
skip_arg = va_arg(args, int *);
@@ -1508,8 +1623,8 @@ do { \
}
}
}
- return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
+ return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
#undef save_arg
}
EXPORT_SYMBOL_GPL(vbin_printf);
@@ -1538,11 +1653,9 @@ EXPORT_SYMBOL_GPL(vbin_printf);
*/
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
{
- unsigned long long num;
- char *str, *end, c;
- const char *args = (const char *)bin_buf;
-
struct printf_spec spec = {0};
+ char *str, *end;
+ const char *args = (const char *)bin_buf;
if (WARN_ON_ONCE((int) size < 0))
return 0;
@@ -1572,10 +1685,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
}
while (*fmt) {
- int read;
const char *old_fmt = fmt;
-
- read = format_decode(fmt, &spec);
+ int read = format_decode(fmt, &spec);
fmt += read;
@@ -1599,7 +1710,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
spec.precision = get_arg(int);
break;
- case FORMAT_TYPE_CHAR:
+ case FORMAT_TYPE_CHAR: {
+ char c;
+
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
@@ -1617,11 +1730,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
++str;
}
break;
+ }
case FORMAT_TYPE_STR: {
const char *str_arg = args;
- size_t len = strlen(str_arg);
- args += len + 1;
+ args += strlen(str_arg) + 1;
str = string(str, end, (char *)str_arg, spec);
break;
}
@@ -1633,11 +1746,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
break;
case FORMAT_TYPE_PERCENT_CHAR:
- if (str < end)
- *str = '%';
- ++str;
- break;
-
case FORMAT_TYPE_INVALID:
if (str < end)
*str = '%';
@@ -1648,15 +1756,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
/* skip */
break;
- default:
+ default: {
+ unsigned long long num;
+
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
num = get_arg(long long);
break;
case FORMAT_TYPE_ULONG:
- num = get_arg(unsigned long);
- break;
case FORMAT_TYPE_LONG:
num = get_arg(unsigned long);
break;
@@ -1686,8 +1794,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
}
str = number(str, end, num, spec);
- }
- }
+ } /* default: */
+ } /* switch(spec.type) */
+ } /* while(*fmt) */
if (size > 0) {
if (str < end)
@@ -1721,6 +1830,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
va_start(args, fmt);
ret = vbin_printf(bin_buf, size, fmt, args);
va_end(args);
+
return ret;
}
EXPORT_SYMBOL_GPL(bprintf);
@@ -1733,27 +1843,23 @@ EXPORT_SYMBOL_GPL(bprintf);
* @fmt: format of buffer
* @args: arguments
*/
-int vsscanf(const char * buf, const char * fmt, va_list args)
+int vsscanf(const char *buf, const char *fmt, va_list args)
{
const char *str = buf;
char *next;
char digit;
int num = 0;
- int qualifier;
- int base;
- int field_width;
- int is_sign = 0;
+ int qualifier, base, field_width;
+ bool is_sign;
- while(*fmt && *str) {
+ while (*fmt && *str) {
/* skip any white space in format */
/* white space in format matchs any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
- while (isspace(*fmt))
- ++fmt;
- while (isspace(*str))
- ++str;
+ fmt = skip_spaces(++fmt);
+ str = skip_spaces(str);
}
/* anything that is not a conversion must match exactly */
@@ -1766,7 +1872,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
if (!*fmt)
break;
++fmt;
-
+
/* skip this conversion.
* advance both strings to next white space
*/
@@ -1785,8 +1891,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
/* get conversion qualifier */
qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
- *fmt == 'Z' || *fmt == 'z') {
+ if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
+ TOLOWER(*fmt) == 'z') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'h') {
@@ -1798,16 +1904,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
}
}
}
- base = 10;
- is_sign = 0;
if (!*fmt || !*str)
break;
- switch(*fmt++) {
+ base = 10;
+ is_sign = 0;
+
+ switch (*fmt++) {
case 'c':
{
- char *s = (char *) va_arg(args,char*);
+ char *s = (char *)va_arg(args, char*);
if (field_width == -1)
field_width = 1;
do {
@@ -1818,17 +1925,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
continue;
case 's':
{
- char *s = (char *) va_arg(args, char *);
- if(field_width == -1)
+ char *s = (char *)va_arg(args, char *);
+ if (field_width == -1)
field_width = INT_MAX;
/* first, skip leading white space in buffer */
- while (isspace(*str))
- str++;
+ str = skip_spaces(str);
/* now copy until next white space */
- while (*str && !isspace(*str) && field_width--) {
+ while (*str && !isspace(*str) && field_width--)
*s++ = *str++;
- }
*s = '\0';
num++;
}
@@ -1836,7 +1941,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
case 'n':
/* return number of characters read so far */
{
- int *i = (int *)va_arg(args,int*);
+ int *i = (int *)va_arg(args, int*);
*i = str - buf;
}
continue;
@@ -1848,14 +1953,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
base = 16;
break;
case 'i':
- base = 0;
+ base = 0;
case 'd':
is_sign = 1;
case 'u':
break;
case '%':
/* looking for '%' in str */
- if (*str++ != '%')
+ if (*str++ != '%')
return num;
continue;
default:
@@ -1866,71 +1971,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
/* have some sort of integer conversion.
* first, skip white space in buffer.
*/
- while (isspace(*str))
- str++;
+ str = skip_spaces(str);
digit = *str;
if (is_sign && digit == '-')
digit = *(str + 1);
if (!digit
- || (base == 16 && !isxdigit(digit))
- || (base == 10 && !isdigit(digit))
- || (base == 8 && (!isdigit(digit) || digit > '7'))
- || (base == 0 && !isdigit(digit)))
- break;
+ || (base == 16 && !isxdigit(digit))
+ || (base == 10 && !isdigit(digit))
+ || (base == 8 && (!isdigit(digit) || digit > '7'))
+ || (base == 0 && !isdigit(digit)))
+ break;
- switch(qualifier) {
+ switch (qualifier) {
case 'H': /* that's 'hh' in format */
if (is_sign) {
- signed char *s = (signed char *) va_arg(args,signed char *);
- *s = (signed char) simple_strtol(str,&next,base);
+ signed char *s = (signed char *)va_arg(args, signed char *);
+ *s = (signed char)simple_strtol(str, &next, base);
} else {
- unsigned char *s = (unsigned char *) va_arg(args, unsigned char *);
- *s = (unsigned char) simple_strtoul(str, &next, base);
+ unsigned char *s = (unsigned char *)va_arg(args, unsigned char *);
+ *s = (unsigned char)simple_strtoul(str, &next, base);
}
break;
case 'h':
if (is_sign) {
- short *s = (short *) va_arg(args,short *);
- *s = (short) simple_strtol(str,&next,base);
+ short *s = (short *)va_arg(args, short *);
+ *s = (short)simple_strtol(str, &next, base);
} else {
- unsigned short *s = (unsigned short *) va_arg(args, unsigned short *);
- *s = (unsigned short) simple_strtoul(str, &next, base);
+ unsigned short *s = (unsigned short *)va_arg(args, unsigned short *);
+ *s = (unsigned short)simple_strtoul(str, &next, base);
}
break;
case 'l':
if (is_sign) {
- long *l = (long *) va_arg(args,long *);
- *l = simple_strtol(str,&next,base);
+ long *l = (long *)va_arg(args, long *);
+ *l = simple_strtol(str, &next, base);
} else {
- unsigned long *l = (unsigned long*) va_arg(args,unsigned long*);
- *l = simple_strtoul(str,&next,base);
+ unsigned long *l = (unsigned long *)va_arg(args, unsigned long *);
+ *l = simple_strtoul(str, &next, base);
}
break;
case 'L':
if (is_sign) {
- long long *l = (long long*) va_arg(args,long long *);
- *l = simple_strtoll(str,&next,base);
+ long long *l = (long long *)va_arg(args, long long *);
+ *l = simple_strtoll(str, &next, base);
} else {
- unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*);
- *l = simple_strtoull(str,&next,base);
+ unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *);
+ *l = simple_strtoull(str, &next, base);
}
break;
case 'Z':
case 'z':
{
- size_t *s = (size_t*) va_arg(args,size_t*);
- *s = (size_t) simple_strtoul(str,&next,base);
+ size_t *s = (size_t *)va_arg(args, size_t *);
+ *s = (size_t)simple_strtoul(str, &next, base);
}
break;
default:
if (is_sign) {
- int *i = (int *) va_arg(args, int*);
- *i = (int) simple_strtol(str,&next,base);
+ int *i = (int *)va_arg(args, int *);
+ *i = (int)simple_strtol(str, &next, base);
} else {
- unsigned int *i = (unsigned int*) va_arg(args, unsigned int*);
- *i = (unsigned int) simple_strtoul(str,&next,base);
+ unsigned int *i = (unsigned int *)va_arg(args, unsigned int*);
+ *i = (unsigned int)simple_strtoul(str, &next, base);
}
break;
}
@@ -1961,14 +2065,15 @@ EXPORT_SYMBOL(vsscanf);
* @fmt: formatting of buffer
* @...: resulting arguments
*/
-int sscanf(const char * buf, const char * fmt, ...)
+int sscanf(const char *buf, const char *fmt, ...)
{
va_list args;
int i;
- va_start(args,fmt);
- i = vsscanf(buf,fmt,args);
+ va_start(args, fmt);
+ i = vsscanf(buf, fmt, args);
va_end(args);
+
return i;
}
EXPORT_SYMBOL(sscanf);
diff --git a/mm/Kconfig b/mm/Kconfig
index 44cf6f0a3a6..ee9f3e0f2b6 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -158,11 +158,13 @@ config PAGEFLAGS_EXTENDED
# Default to 4 for wider testing, though 8 might be more appropriate.
# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
+# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
#
config SPLIT_PTLOCK_CPUS
int
- default "4096" if ARM && !CPU_CACHE_VIPT
- default "4096" if PARISC && !PA20
+ default "999999" if ARM && !CPU_CACHE_VIPT
+ default "999999" if PARISC && !PA20
+ default "999999" if DEBUG_SPINLOCK || DEBUG_LOCK_ALLOC
default "4"
#
@@ -200,14 +202,6 @@ config VIRT_TO_BUS
def_bool y
depends on !ARCH_NO_VIRT_TO_BUS
-config HAVE_MLOCK
- bool
- default y if MMU=y
-
-config HAVE_MLOCKED_PAGE_BIT
- bool
- default y if HAVE_MLOCK=y
-
config MMU_NOTIFIER
bool
@@ -218,7 +212,7 @@ config KSM
Enable Kernel Samepage Merging: KSM periodically scans those areas
of an application's address space that an app has advised may be
mergeable. When it finds pages of identical content, it replaces
- the many instances by a single resident page with that content, so
+ the many instances by a single page with that content, so
saving memory until one or another app needs to modify the content.
Recommended for use with KVM, or with other duplicative applications.
See Documentation/vm/ksm.txt for more information: KSM is inactive
@@ -227,6 +221,7 @@ config KSM
config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation"
+ depends on MMU
default 4096
help
This is the portion of low virtual memory which should be protected
@@ -257,8 +252,9 @@ config MEMORY_FAILURE
special hardware support and typically ECC memory.
config HWPOISON_INJECT
- tristate "Poison pages injector"
+ tristate "HWPoison pages injector"
depends on MEMORY_FAILURE && DEBUG_KERNEL
+ select PROC_PAGE_MONITOR
config NOMMU_INITIAL_TRIM_EXCESS
int "Turn on mmap() excess space trimming before booting"
diff --git a/mm/Makefile b/mm/Makefile
index ebf849042ed..7a68d2ab556 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
-obj-$(CONFIG_TMPFS_POSIX_ACL) += shmem_acl.o
obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_KSM) += ksm.o
@@ -34,11 +33,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
-ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
obj-$(CONFIG_SMP) += percpu.o
-else
-obj-$(CONFIG_SMP) += allocpercpu.o
-endif
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
deleted file mode 100644
index df34ceae0c6..00000000000
--- a/mm/allocpercpu.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * linux/mm/allocpercpu.c
- *
- * Separated from slab.c August 11, 2006 Christoph Lameter
- */
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/bootmem.h>
-#include <asm/sections.h>
-
-#ifndef cache_line_size
-#define cache_line_size() L1_CACHE_BYTES
-#endif
-
-/**
- * percpu_depopulate - depopulate per-cpu data for given cpu
- * @__pdata: per-cpu data to depopulate
- * @cpu: depopulate per-cpu data for this cpu
- *
- * Depopulating per-cpu data for a cpu going offline would be a typical
- * use case. You need to register a cpu hotplug handler for that purpose.
- */
-static void percpu_depopulate(void *__pdata, int cpu)
-{
- struct percpu_data *pdata = __percpu_disguise(__pdata);
-
- kfree(pdata->ptrs[cpu]);
- pdata->ptrs[cpu] = NULL;
-}
-
-/**
- * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
- * @__pdata: per-cpu data to depopulate
- * @mask: depopulate per-cpu data for cpu's selected through mask bits
- */
-static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
-{
- int cpu;
- for_each_cpu_mask_nr(cpu, *mask)
- percpu_depopulate(__pdata, cpu);
-}
-
-#define percpu_depopulate_mask(__pdata, mask) \
- __percpu_depopulate_mask((__pdata), &(mask))
-
-/**
- * percpu_populate - populate per-cpu data for given cpu
- * @__pdata: per-cpu data to populate further
- * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @cpu: populate per-data for this cpu
- *
- * Populating per-cpu data for a cpu coming online would be a typical
- * use case. You need to register a cpu hotplug handler for that purpose.
- * Per-cpu object is populated with zeroed buffer.
- */
-static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
-{
- struct percpu_data *pdata = __percpu_disguise(__pdata);
- int node = cpu_to_node(cpu);
-
- /*
- * We should make sure each CPU gets private memory.
- */
- size = roundup(size, cache_line_size());
-
- BUG_ON(pdata->ptrs[cpu]);
- if (node_online(node))
- pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
- else
- pdata->ptrs[cpu] = kzalloc(size, gfp);
- return pdata->ptrs[cpu];
-}
-
-/**
- * percpu_populate_mask - populate per-cpu data for more cpu's
- * @__pdata: per-cpu data to populate further
- * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @mask: populate per-cpu data for cpu's selected through mask bits
- *
- * Per-cpu objects are populated with zeroed buffers.
- */
-static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
- cpumask_t *mask)
-{
- cpumask_t populated;
- int cpu;
-
- cpus_clear(populated);
- for_each_cpu_mask_nr(cpu, *mask)
- if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
- __percpu_depopulate_mask(__pdata, &populated);
- return -ENOMEM;
- } else
- cpu_set(cpu, populated);
- return 0;
-}
-
-#define percpu_populate_mask(__pdata, size, gfp, mask) \
- __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
-
-/**
- * alloc_percpu - initial setup of per-cpu data
- * @size: size of per-cpu object
- * @align: alignment
- *
- * Allocate dynamic percpu area. Percpu objects are populated with
- * zeroed buffers.
- */
-void *__alloc_percpu(size_t size, size_t align)
-{
- /*
- * We allocate whole cache lines to avoid false sharing
- */
- size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
- void *pdata = kzalloc(sz, GFP_KERNEL);
- void *__pdata = __percpu_disguise(pdata);
-
- /*
- * Can't easily make larger alignment work with kmalloc. WARN
- * on it. Larger alignment should only be used for module
- * percpu sections on SMP for which this path isn't used.
- */
- WARN_ON_ONCE(align > SMP_CACHE_BYTES);
-
- if (unlikely(!pdata))
- return NULL;
- if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
- &cpu_possible_map)))
- return __pdata;
- kfree(pdata);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(__alloc_percpu);
-
-/**
- * free_percpu - final cleanup of per-cpu data
- * @__pdata: object to clean up
- *
- * We simply clean up any per-cpu object left. No need for the client to
- * track and specify through a bis mask which per-cpu objects are to free.
- */
-void free_percpu(void *__pdata)
-{
- if (unlikely(!__pdata))
- return;
- __percpu_depopulate_mask(__pdata, cpu_possible_mask);
- kfree(__percpu_disguise(__pdata));
-}
-EXPORT_SYMBOL_GPL(free_percpu);
-
-/*
- * Generic percpu area setup.
- */
-#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-
-EXPORT_SYMBOL(__per_cpu_offset);
-
-void __init setup_per_cpu_areas(void)
-{
- unsigned long size, i;
- char *ptr;
- unsigned long nr_possible_cpus = num_possible_cpus();
-
- /* Copy section for each CPU (we discard the original) */
- size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
- ptr = alloc_bootmem_pages(size * nr_possible_cpus);
-
- for_each_possible_cpu(i) {
- __per_cpu_offset[i] = ptr - __per_cpu_start;
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
- ptr += size;
- }
-}
-#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d1dc23cc7f1..7d1486875e1 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -432,8 +432,8 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
return mark_bootmem(start, end, 1, flags);
}
-static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
- unsigned long step)
+static unsigned long __init align_idx(struct bootmem_data *bdata,
+ unsigned long idx, unsigned long step)
{
unsigned long base = bdata->node_min_pfn;
@@ -445,8 +445,8 @@ static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
return ALIGN(base + idx, step) - base;
}
-static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
- unsigned long align)
+static unsigned long __init align_off(struct bootmem_data *bdata,
+ unsigned long off, unsigned long align)
{
unsigned long base = PFN_PHYS(bdata->node_min_pfn);
diff --git a/mm/filemap.c b/mm/filemap.c
index c3d3506ecab..96ac6b0eb6c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -260,27 +260,27 @@ int filemap_flush(struct address_space *mapping)
EXPORT_SYMBOL(filemap_flush);
/**
- * wait_on_page_writeback_range - wait for writeback to complete
- * @mapping: target address_space
- * @start: beginning page index
- * @end: ending page index
+ * filemap_fdatawait_range - wait for writeback to complete
+ * @mapping: address space structure to wait for
+ * @start_byte: offset in bytes where the range starts
+ * @end_byte: offset in bytes where the range ends (inclusive)
*
- * Wait for writeback to complete against pages indexed by start->end
- * inclusive
+ * Walk the list of under-writeback pages of the given address space
+ * in the given range and wait for all of them.
*/
-int wait_on_page_writeback_range(struct address_space *mapping,
- pgoff_t start, pgoff_t end)
+int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
+ loff_t end_byte)
{
+ pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
+ pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
struct pagevec pvec;
int nr_pages;
int ret = 0;
- pgoff_t index;
- if (end < start)
+ if (end_byte < start_byte)
return 0;
pagevec_init(&pvec, 0);
- index = start;
while ((index <= end) &&
(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_WRITEBACK,
@@ -310,25 +310,6 @@ int wait_on_page_writeback_range(struct address_space *mapping,
return ret;
}
-
-/**
- * filemap_fdatawait_range - wait for all under-writeback pages to complete in a given range
- * @mapping: address space structure to wait for
- * @start: offset in bytes where the range starts
- * @end: offset in bytes where the range ends (inclusive)
- *
- * Walk the list of under-writeback pages of the given address space
- * in the given range and wait for all of them.
- *
- * This is just a simple wrapper so that callers don't have to convert offsets
- * to page indexes themselves
- */
-int filemap_fdatawait_range(struct address_space *mapping, loff_t start,
- loff_t end)
-{
- return wait_on_page_writeback_range(mapping, start >> PAGE_CACHE_SHIFT,
- end >> PAGE_CACHE_SHIFT);
-}
EXPORT_SYMBOL(filemap_fdatawait_range);
/**
@@ -345,8 +326,7 @@ int filemap_fdatawait(struct address_space *mapping)
if (i_size == 0)
return 0;
- return wait_on_page_writeback_range(mapping, 0,
- (i_size - 1) >> PAGE_CACHE_SHIFT);
+ return filemap_fdatawait_range(mapping, 0, i_size - 1);
}
EXPORT_SYMBOL(filemap_fdatawait);
@@ -393,9 +373,8 @@ int filemap_write_and_wait_range(struct address_space *mapping,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
if (err != -EIO) {
- int err2 = wait_on_page_writeback_range(mapping,
- lstart >> PAGE_CACHE_SHIFT,
- lend >> PAGE_CACHE_SHIFT);
+ int err2 = filemap_fdatawait_range(mapping,
+ lstart, lend);
if (!err)
err = err2;
}
@@ -2261,7 +2240,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
size_t count, ssize_t written)
{
struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
ssize_t status;
struct iov_iter i;
@@ -2273,15 +2251,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
*ppos = pos + status;
}
- /*
- * If we get here for O_DIRECT writes then we must have fallen through
- * to buffered writes (block instantiation inside i_size). So we sync
- * the file data here, to try to honour O_DIRECT expectations.
- */
- if (unlikely(file->f_flags & O_DIRECT) && written)
- status = filemap_write_and_wait_range(mapping,
- pos, pos + written - 1);
-
return written ? written : status;
}
EXPORT_SYMBOL(generic_file_buffered_write);
@@ -2380,10 +2349,7 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
* semantics.
*/
endbyte = pos + written_buffered - written - 1;
- err = do_sync_mapping_range(file->f_mapping, pos, endbyte,
- SYNC_FILE_RANGE_WAIT_BEFORE|
- SYNC_FILE_RANGE_WRITE|
- SYNC_FILE_RANGE_WAIT_AFTER);
+ err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
if (err == 0) {
written = written_buffered;
invalidate_mapping_pages(mapping,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5d7601b0287..65f38c21820 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include <linux/hugetlb.h>
+#include <linux/node.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
@@ -622,42 +623,66 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
}
/*
- * Use a helper variable to find the next node and then
- * copy it back to next_nid_to_alloc afterwards:
- * otherwise there's a window in which a racer might
- * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
- * But we don't need to use a spin_lock here: it really
- * doesn't matter if occasionally a racer chooses the
- * same nid as we do. Move nid forward in the mask even
- * if we just successfully allocated a hugepage so that
- * the next caller gets hugepages on the next node.
+ * common helper functions for hstate_next_node_to_{alloc|free}.
+ * We may have allocated or freed a huge page based on a different
+ * nodes_allowed previously, so h->next_node_to_{alloc|free} might
+ * be outside of *nodes_allowed. Ensure that we use an allowed
+ * node for alloc or free.
*/
-static int hstate_next_node_to_alloc(struct hstate *h)
+static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
- int next_nid;
- next_nid = next_node(h->next_nid_to_alloc, node_online_map);
- if (next_nid == MAX_NUMNODES)
- next_nid = first_node(node_online_map);
- h->next_nid_to_alloc = next_nid;
- return next_nid;
+ nid = next_node(nid, *nodes_allowed);
+ if (nid == MAX_NUMNODES)
+ nid = first_node(*nodes_allowed);
+ VM_BUG_ON(nid >= MAX_NUMNODES);
+
+ return nid;
+}
+
+static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
+{
+ if (!node_isset(nid, *nodes_allowed))
+ nid = next_node_allowed(nid, nodes_allowed);
+ return nid;
+}
+
+/*
+ * returns the previously saved node ["this node"] from which to
+ * allocate a persistent huge page for the pool and advance the
+ * next node from which to allocate, handling wrap at end of node
+ * mask.
+ */
+static int hstate_next_node_to_alloc(struct hstate *h,
+ nodemask_t *nodes_allowed)
+{
+ int nid;
+
+ VM_BUG_ON(!nodes_allowed);
+
+ nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
+ h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
+
+ return nid;
}
-static int alloc_fresh_huge_page(struct hstate *h)
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
struct page *page;
int start_nid;
int next_nid;
int ret = 0;
- start_nid = h->next_nid_to_alloc;
+ start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
next_nid = start_nid;
do {
page = alloc_fresh_huge_page_node(h, next_nid);
- if (page)
+ if (page) {
ret = 1;
- next_nid = hstate_next_node_to_alloc(h);
- } while (!page && next_nid != start_nid);
+ break;
+ }
+ next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
+ } while (next_nid != start_nid);
if (ret)
count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -668,17 +693,21 @@ static int alloc_fresh_huge_page(struct hstate *h)
}
/*
- * helper for free_pool_huge_page() - find next node
- * from which to free a huge page
+ * helper for free_pool_huge_page() - return the previously saved
+ * node ["this node"] from which to free a huge page. Advance the
+ * next node id whether or not we find a free huge page to free so
+ * that the next attempt to free addresses the next node.
*/
-static int hstate_next_node_to_free(struct hstate *h)
+static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{
- int next_nid;
- next_nid = next_node(h->next_nid_to_free, node_online_map);
- if (next_nid == MAX_NUMNODES)
- next_nid = first_node(node_online_map);
- h->next_nid_to_free = next_nid;
- return next_nid;
+ int nid;
+
+ VM_BUG_ON(!nodes_allowed);
+
+ nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
+ h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
+
+ return nid;
}
/*
@@ -687,13 +716,14 @@ static int hstate_next_node_to_free(struct hstate *h)
* balanced over allowed nodes.
* Called with hugetlb_lock locked.
*/
-static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
+static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
+ bool acct_surplus)
{
int start_nid;
int next_nid;
int ret = 0;
- start_nid = h->next_nid_to_free;
+ start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
@@ -715,9 +745,10 @@ static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
}
update_and_free_page(h, page);
ret = 1;
+ break;
}
- next_nid = hstate_next_node_to_free(h);
- } while (!ret && next_nid != start_nid);
+ next_nid = hstate_next_node_to_free(h, nodes_allowed);
+ } while (next_nid != start_nid);
return ret;
}
@@ -911,14 +942,14 @@ static void return_unused_surplus_pages(struct hstate *h,
/*
* We want to release as many surplus pages as possible, spread
- * evenly across all nodes. Iterate across all nodes until we
- * can no longer free unreserved surplus pages. This occurs when
- * the nodes with surplus pages have no free pages.
- * free_pool_huge_page() will balance the the frees across the
- * on-line nodes for us and will handle the hstate accounting.
+ * evenly across all nodes with memory. Iterate across these nodes
+ * until we can no longer free unreserved surplus pages. This occurs
+ * when the nodes with surplus pages have no free pages.
+ * free_pool_huge_page() will balance the the freed pages across the
+ * on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
- if (!free_pool_huge_page(h, 1))
+ if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
break;
}
}
@@ -1022,16 +1053,16 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
- int nr_nodes = nodes_weight(node_online_map);
+ int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
while (nr_nodes) {
void *addr;
addr = __alloc_bootmem_node_nopanic(
- NODE_DATA(h->next_nid_to_alloc),
+ NODE_DATA(hstate_next_node_to_alloc(h,
+ &node_states[N_HIGH_MEMORY])),
huge_page_size(h), huge_page_size(h), 0);
- hstate_next_node_to_alloc(h);
if (addr) {
/*
* Use the beginning of the huge page to store the
@@ -1084,7 +1115,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
if (h->order >= MAX_ORDER) {
if (!alloc_bootmem_huge_page(h))
break;
- } else if (!alloc_fresh_huge_page(h))
+ } else if (!alloc_fresh_huge_page(h,
+ &node_states[N_HIGH_MEMORY]))
break;
}
h->max_huge_pages = i;
@@ -1126,14 +1158,15 @@ static void __init report_hugepages(void)
}
#ifdef CONFIG_HIGHMEM
-static void try_to_free_low(struct hstate *h, unsigned long count)
+static void try_to_free_low(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
int i;
if (h->order >= MAX_ORDER)
return;
- for (i = 0; i < MAX_NUMNODES; ++i) {
+ for_each_node_mask(i, *nodes_allowed) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(page, next, freel, lru) {
@@ -1149,7 +1182,8 @@ static void try_to_free_low(struct hstate *h, unsigned long count)
}
}
#else
-static inline void try_to_free_low(struct hstate *h, unsigned long count)
+static inline void try_to_free_low(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
}
#endif
@@ -1159,7 +1193,8 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
* balanced by operating on them in a round-robin fashion.
* Returns 1 if an adjustment was made.
*/
-static int adjust_pool_surplus(struct hstate *h, int delta)
+static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
+ int delta)
{
int start_nid, next_nid;
int ret = 0;
@@ -1167,29 +1202,33 @@ static int adjust_pool_surplus(struct hstate *h, int delta)
VM_BUG_ON(delta != -1 && delta != 1);
if (delta < 0)
- start_nid = h->next_nid_to_alloc;
+ start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
else
- start_nid = h->next_nid_to_free;
+ start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
int nid = next_nid;
if (delta < 0) {
- next_nid = hstate_next_node_to_alloc(h);
/*
* To shrink on this node, there must be a surplus page
*/
- if (!h->surplus_huge_pages_node[nid])
+ if (!h->surplus_huge_pages_node[nid]) {
+ next_nid = hstate_next_node_to_alloc(h,
+ nodes_allowed);
continue;
+ }
}
if (delta > 0) {
- next_nid = hstate_next_node_to_free(h);
/*
* Surplus cannot exceed the total number of pages
*/
if (h->surplus_huge_pages_node[nid] >=
- h->nr_huge_pages_node[nid])
+ h->nr_huge_pages_node[nid]) {
+ next_nid = hstate_next_node_to_free(h,
+ nodes_allowed);
continue;
+ }
}
h->surplus_huge_pages += delta;
@@ -1202,7 +1241,8 @@ static int adjust_pool_surplus(struct hstate *h, int delta)
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
+static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
@@ -1222,7 +1262,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
- if (!adjust_pool_surplus(h, -1))
+ if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
@@ -1233,11 +1273,14 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
- ret = alloc_fresh_huge_page(h);
+ ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
+ /* Bail for signals. Probably ctrl-c from user */
+ if (signal_pending(current))
+ goto out;
}
/*
@@ -1257,13 +1300,13 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
- try_to_free_low(h, min_count);
+ try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
- if (!free_pool_huge_page(h, 0))
+ if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
}
while (count < persistent_huge_pages(h)) {
- if (!adjust_pool_surplus(h, 1))
+ if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
@@ -1282,43 +1325,117 @@ out:
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
-static struct hstate *kobj_to_hstate(struct kobject *kobj)
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
+
+static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{
int i;
+
for (i = 0; i < HUGE_MAX_HSTATE; i++)
- if (hstate_kobjs[i] == kobj)
+ if (hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = NUMA_NO_NODE;
return &hstates[i];
- BUG();
- return NULL;
+ }
+
+ return kobj_to_node_hstate(kobj, nidp);
}
-static ssize_t nr_hugepages_show(struct kobject *kobj,
+static ssize_t nr_hugepages_show_common(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->nr_huge_pages);
+ struct hstate *h;
+ unsigned long nr_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ nr_huge_pages = h->nr_huge_pages;
+ else
+ nr_huge_pages = h->nr_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", nr_huge_pages);
}
-static ssize_t nr_hugepages_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
+static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t len)
{
int err;
- unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
+ int nid;
+ unsigned long count;
+ struct hstate *h;
+ NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
- err = strict_strtoul(buf, 10, &input);
+ err = strict_strtoul(buf, 10, &count);
if (err)
return 0;
- h->max_huge_pages = set_max_huge_pages(h, input);
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE) {
+ /*
+ * global hstate attribute
+ */
+ if (!(obey_mempolicy &&
+ init_nodemask_of_mempolicy(nodes_allowed))) {
+ NODEMASK_FREE(nodes_allowed);
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
+ }
+ } else if (nodes_allowed) {
+ /*
+ * per node hstate attribute: adjust count to global,
+ * but restrict alloc/free to the specified node.
+ */
+ count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
+ init_nodemask_of_node(nodes_allowed, nid);
+ } else
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
+
+ h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
- return count;
+ if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+ NODEMASK_FREE(nodes_allowed);
+
+ return len;
+}
+
+static ssize_t nr_hugepages_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t len)
+{
+ return nr_hugepages_store_common(false, kobj, attr, buf, len);
}
HSTATE_ATTR(nr_hugepages);
+#ifdef CONFIG_NUMA
+
+/*
+ * hstate attribute for optionally mempolicy-based constraint on persistent
+ * huge page alloc/free.
+ */
+static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t len)
+{
+ return nr_hugepages_store_common(true, kobj, attr, buf, len);
+}
+HSTATE_ATTR(nr_hugepages_mempolicy);
+#endif
+
+
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
@@ -1326,7 +1443,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
{
int err;
unsigned long input;
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
err = strict_strtoul(buf, 10, &input);
if (err)
@@ -1343,15 +1460,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->free_huge_pages);
+ struct hstate *h;
+ unsigned long free_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ free_huge_pages = h->free_huge_pages;
+ else
+ free_huge_pages = h->free_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", free_huge_pages);
}
HSTATE_ATTR_RO(free_hugepages);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->resv_huge_pages);
}
HSTATE_ATTR_RO(resv_hugepages);
@@ -1359,8 +1485,17 @@ HSTATE_ATTR_RO(resv_hugepages);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct hstate *h = kobj_to_hstate(kobj);
- return sprintf(buf, "%lu\n", h->surplus_huge_pages);
+ struct hstate *h;
+ unsigned long surplus_huge_pages;
+ int nid;
+
+ h = kobj_to_hstate(kobj, &nid);
+ if (nid == NUMA_NO_NODE)
+ surplus_huge_pages = h->surplus_huge_pages;
+ else
+ surplus_huge_pages = h->surplus_huge_pages_node[nid];
+
+ return sprintf(buf, "%lu\n", surplus_huge_pages);
}
HSTATE_ATTR_RO(surplus_hugepages);
@@ -1370,6 +1505,9 @@ static struct attribute *hstate_attrs[] = {
&free_hugepages_attr.attr,
&resv_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
+#ifdef CONFIG_NUMA
+ &nr_hugepages_mempolicy_attr.attr,
+#endif
NULL,
};
@@ -1377,19 +1515,21 @@ static struct attribute_group hstate_attr_group = {
.attrs = hstate_attrs,
};
-static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
+static int __init hugetlb_sysfs_add_hstate(struct hstate *h,
+ struct kobject *parent,
+ struct kobject **hstate_kobjs,
+ struct attribute_group *hstate_attr_group)
{
int retval;
+ int hi = h - hstates;
- hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
- hugepages_kobj);
- if (!hstate_kobjs[h - hstates])
+ hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
+ if (!hstate_kobjs[hi])
return -ENOMEM;
- retval = sysfs_create_group(hstate_kobjs[h - hstates],
- &hstate_attr_group);
+ retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
- kobject_put(hstate_kobjs[h - hstates]);
+ kobject_put(hstate_kobjs[hi]);
return retval;
}
@@ -1404,17 +1544,184 @@ static void __init hugetlb_sysfs_init(void)
return;
for_each_hstate(h) {
- err = hugetlb_sysfs_add_hstate(h);
+ err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
+ hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
+#ifdef CONFIG_NUMA
+
+/*
+ * node_hstate/s - associate per node hstate attributes, via their kobjects,
+ * with node sysdevs in node_devices[] using a parallel array. The array
+ * index of a node sysdev or _hstate == node id.
+ * This is here to avoid any static dependency of the node sysdev driver, in
+ * the base kernel, on the hugetlb module.
+ */
+struct node_hstate {
+ struct kobject *hugepages_kobj;
+ struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
+};
+struct node_hstate node_hstates[MAX_NUMNODES];
+
+/*
+ * A subset of global hstate attributes for node sysdevs
+ */
+static struct attribute *per_node_hstate_attrs[] = {
+ &nr_hugepages_attr.attr,
+ &free_hugepages_attr.attr,
+ &surplus_hugepages_attr.attr,
+ NULL,
+};
+
+static struct attribute_group per_node_hstate_attr_group = {
+ .attrs = per_node_hstate_attrs,
+};
+
+/*
+ * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * Returns node id via non-NULL nidp.
+ */
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ int nid;
+
+ for (nid = 0; nid < nr_node_ids; nid++) {
+ struct node_hstate *nhs = &node_hstates[nid];
+ int i;
+ for (i = 0; i < HUGE_MAX_HSTATE; i++)
+ if (nhs->hstate_kobjs[i] == kobj) {
+ if (nidp)
+ *nidp = nid;
+ return &hstates[i];
+ }
+ }
+
+ BUG();
+ return NULL;
+}
+
+/*
+ * Unregister hstate attributes from a single node sysdev.
+ * No-op if no hstate attributes attached.
+ */
+void hugetlb_unregister_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+
+ if (!nhs->hugepages_kobj)
+ return; /* no hstate attributes */
+
+ for_each_hstate(h)
+ if (nhs->hstate_kobjs[h - hstates]) {
+ kobject_put(nhs->hstate_kobjs[h - hstates]);
+ nhs->hstate_kobjs[h - hstates] = NULL;
+ }
+
+ kobject_put(nhs->hugepages_kobj);
+ nhs->hugepages_kobj = NULL;
+}
+
+/*
+ * hugetlb module exit: unregister hstate attributes from node sysdevs
+ * that have them.
+ */
+static void hugetlb_unregister_all_nodes(void)
+{
+ int nid;
+
+ /*
+ * disable node sysdev registrations.
+ */
+ register_hugetlbfs_with_node(NULL, NULL);
+
+ /*
+ * remove hstate attributes from any nodes that have them.
+ */
+ for (nid = 0; nid < nr_node_ids; nid++)
+ hugetlb_unregister_node(&node_devices[nid]);
+}
+
+/*
+ * Register hstate attributes for a single node sysdev.
+ * No-op if attributes already registered.
+ */
+void hugetlb_register_node(struct node *node)
+{
+ struct hstate *h;
+ struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+ int err;
+
+ if (nhs->hugepages_kobj)
+ return; /* already allocated */
+
+ nhs->hugepages_kobj = kobject_create_and_add("hugepages",
+ &node->sysdev.kobj);
+ if (!nhs->hugepages_kobj)
+ return;
+
+ for_each_hstate(h) {
+ err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
+ nhs->hstate_kobjs,
+ &per_node_hstate_attr_group);
+ if (err) {
+ printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
+ " for node %d\n",
+ h->name, node->sysdev.id);
+ hugetlb_unregister_node(node);
+ break;
+ }
+ }
+}
+
+/*
+ * hugetlb init time: register hstate attributes for all registered node
+ * sysdevs of nodes that have memory. All on-line nodes should have
+ * registered their associated sysdev by this time.
+ */
+static void hugetlb_register_all_nodes(void)
+{
+ int nid;
+
+ for_each_node_state(nid, N_HIGH_MEMORY) {
+ struct node *node = &node_devices[nid];
+ if (node->sysdev.id == nid)
+ hugetlb_register_node(node);
+ }
+
+ /*
+ * Let the node sysdev driver know we're here so it can
+ * [un]register hstate attributes on node hotplug.
+ */
+ register_hugetlbfs_with_node(hugetlb_register_node,
+ hugetlb_unregister_node);
+}
+#else /* !CONFIG_NUMA */
+
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+ BUG();
+ if (nidp)
+ *nidp = -1;
+ return NULL;
+}
+
+static void hugetlb_unregister_all_nodes(void) { }
+
+static void hugetlb_register_all_nodes(void) { }
+
+#endif
+
static void __exit hugetlb_exit(void)
{
struct hstate *h;
+ hugetlb_unregister_all_nodes();
+
for_each_hstate(h) {
kobject_put(hstate_kobjs[h - hstates]);
}
@@ -1449,6 +1756,8 @@ static int __init hugetlb_init(void)
hugetlb_sysfs_init();
+ hugetlb_register_all_nodes();
+
return 0;
}
module_init(hugetlb_init);
@@ -1472,8 +1781,8 @@ void __init hugetlb_add_hstate(unsigned order)
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
- h->next_nid_to_alloc = first_node(node_online_map);
- h->next_nid_to_free = first_node(node_online_map);
+ h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
+ h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
@@ -1536,9 +1845,9 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
}
#ifdef CONFIG_SYSCTL
-int hugetlb_sysctl_handler(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *length, loff_t *ppos)
+static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+ struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp;
@@ -1550,12 +1859,40 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
table->maxlen = sizeof(unsigned long);
proc_doulongvec_minmax(table, write, buffer, length, ppos);
- if (write)
- h->max_huge_pages = set_max_huge_pages(h, tmp);
+ if (write) {
+ NODEMASK_ALLOC(nodemask_t, nodes_allowed,
+ GFP_KERNEL | __GFP_NORETRY);
+ if (!(obey_mempolicy &&
+ init_nodemask_of_mempolicy(nodes_allowed))) {
+ NODEMASK_FREE(nodes_allowed);
+ nodes_allowed = &node_states[N_HIGH_MEMORY];
+ }
+ h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
+
+ if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+ NODEMASK_FREE(nodes_allowed);
+ }
return 0;
}
+int hugetlb_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+
+ return hugetlb_sysctl_handler_common(false, table, write,
+ buffer, length, ppos);
+}
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+{
+ return hugetlb_sysctl_handler_common(true, table, write,
+ buffer, length, ppos);
+}
+#endif /* CONFIG_NUMA */
+
int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
@@ -1903,6 +2240,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ (vma->vm_pgoff >> PAGE_SHIFT);
mapping = (struct address_space *)page_private(page);
+ /*
+ * Take the mapping lock for the duration of the table walk. As
+ * this mapping should be shared between all the VMAs,
+ * __unmap_hugepage_range() is called as the lock is already held
+ */
+ spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
@@ -1916,10 +2259,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* from the time of fork. This would look like data corruption
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
- unmap_hugepage_range(iter_vma,
+ __unmap_hugepage_range(iter_vma,
address, address + huge_page_size(h),
page);
}
+ spin_unlock(&mapping->i_mmap_lock);
return 1;
}
@@ -1959,6 +2303,9 @@ retry_avoidcopy:
outside_reserve = 1;
page_cache_get(old_page);
+
+ /* Drop page_table_lock as buddy allocator may be called */
+ spin_unlock(&mm->page_table_lock);
new_page = alloc_huge_page(vma, address, outside_reserve);
if (IS_ERR(new_page)) {
@@ -1976,19 +2323,25 @@ retry_avoidcopy:
if (unmap_ref_private(mm, vma, old_page, address)) {
BUG_ON(page_count(old_page) != 1);
BUG_ON(huge_pte_none(pte));
+ spin_lock(&mm->page_table_lock);
goto retry_avoidcopy;
}
WARN_ON_ONCE(1);
}
+ /* Caller expects lock to be held */
+ spin_lock(&mm->page_table_lock);
return -PTR_ERR(new_page);
}
- spin_unlock(&mm->page_table_lock);
copy_huge_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
- spin_lock(&mm->page_table_lock);
+ /*
+ * Retake the page_table_lock to check for racing updates
+ * before the page tables are altered
+ */
+ spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW */
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index e1d85137f08..10ea71905c1 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -3,18 +3,68 @@
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include "internal.h"
-static struct dentry *hwpoison_dir, *corrupt_pfn;
+static struct dentry *hwpoison_dir;
static int hwpoison_inject(void *data, u64 val)
{
+ unsigned long pfn = val;
+ struct page *p;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!hwpoison_filter_enable)
+ goto inject;
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+
+ p = pfn_to_page(pfn);
+ /*
+ * This implies unable to support free buddy pages.
+ */
+ if (!get_page_unless_zero(p))
+ return 0;
+
+ if (!PageLRU(p))
+ shake_page(p, 0);
+ /*
+ * This implies unable to support non-LRU pages.
+ */
+ if (!PageLRU(p))
+ return 0;
+
+ /*
+ * do a racy check with elevated page count, to make sure PG_hwpoison
+ * will only be set for the targeted owner (or on a free page).
+ * We temporarily take page lock for try_get_mem_cgroup_from_page().
+ * __memory_failure() will redo the check reliably inside page lock.
+ */
+ lock_page(p);
+ err = hwpoison_filter(p);
+ unlock_page(p);
+ if (err)
+ return 0;
+
+inject:
+ printk(KERN_INFO "Injecting memory failure at pfn %lx\n", pfn);
+ return __memory_failure(pfn, 18, MF_COUNT_INCREASED);
+}
+
+static int hwpoison_unpoison(void *data, u64 val)
+{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- printk(KERN_INFO "Injecting memory failure at pfn %Lx\n", val);
- return __memory_failure(val, 18, 0);
+
+ return unpoison_memory(val);
}
DEFINE_SIMPLE_ATTRIBUTE(hwpoison_fops, NULL, hwpoison_inject, "%lli\n");
+DEFINE_SIMPLE_ATTRIBUTE(unpoison_fops, NULL, hwpoison_unpoison, "%lli\n");
static void pfn_inject_exit(void)
{
@@ -24,16 +74,63 @@ static void pfn_inject_exit(void)
static int pfn_inject_init(void)
{
+ struct dentry *dentry;
+
hwpoison_dir = debugfs_create_dir("hwpoison", NULL);
if (hwpoison_dir == NULL)
return -ENOMEM;
- corrupt_pfn = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir,
+
+ /*
+ * Note that the below poison/unpoison interfaces do not involve
+ * hardware status change, hence do not require hardware support.
+ * They are mainly for testing hwpoison in software level.
+ */
+ dentry = debugfs_create_file("corrupt-pfn", 0600, hwpoison_dir,
NULL, &hwpoison_fops);
- if (corrupt_pfn == NULL) {
- pfn_inject_exit();
- return -ENOMEM;
- }
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_file("unpoison-pfn", 0600, hwpoison_dir,
+ NULL, &unpoison_fops);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u32("corrupt-filter-enable", 0600,
+ hwpoison_dir, &hwpoison_filter_enable);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u32("corrupt-filter-dev-major", 0600,
+ hwpoison_dir, &hwpoison_filter_dev_major);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u32("corrupt-filter-dev-minor", 0600,
+ hwpoison_dir, &hwpoison_filter_dev_minor);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u64("corrupt-filter-flags-mask", 0600,
+ hwpoison_dir, &hwpoison_filter_flags_mask);
+ if (!dentry)
+ goto fail;
+
+ dentry = debugfs_create_u64("corrupt-filter-flags-value", 0600,
+ hwpoison_dir, &hwpoison_filter_flags_value);
+ if (!dentry)
+ goto fail;
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+ dentry = debugfs_create_u64("corrupt-filter-memcg", 0600,
+ hwpoison_dir, &hwpoison_filter_memcg);
+ if (!dentry)
+ goto fail;
+#endif
+
return 0;
+fail:
+ pfn_inject_exit();
+ return -ENOMEM;
}
module_init(pfn_inject_init);
diff --git a/mm/internal.h b/mm/internal.h
index 22ec8d2b0fb..6a697bb97fc 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -50,6 +50,9 @@ extern void putback_lru_page(struct page *page);
*/
extern void __free_pages_bootmem(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned long order);
+#ifdef CONFIG_MEMORY_FAILURE
+extern bool is_free_buddy_page(struct page *page);
+#endif
/*
@@ -63,7 +66,7 @@ static inline unsigned long page_order(struct page *page)
return page_private(page);
}
-#ifdef CONFIG_HAVE_MLOCK
+#ifdef CONFIG_MMU
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
@@ -72,22 +75,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}
-#endif
/*
- * unevictable_migrate_page() called only from migrate_page_copy() to
- * migrate unevictable flag to new page.
- * Note that the old page has been isolated from the LRU lists at this
- * point so we don't need to worry about LRU statistics.
- */
-static inline void unevictable_migrate_page(struct page *new, struct page *old)
-{
- if (TestClearPageUnevictable(old))
- SetPageUnevictable(new);
-}
-
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
-/*
* Called only in fault path via page_evictable() for a new page
* to determine if it's being mapped into a LOCKED vma.
* If so, mark page as mlocked.
@@ -107,9 +96,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
}
/*
- * must be called with vma's mmap_sem held for read, and page locked.
+ * must be called with vma's mmap_sem held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
+extern void munlock_vma_page(struct page *page);
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
@@ -144,7 +134,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
}
}
-#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
+#else /* !CONFIG_MMU */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
return 0;
@@ -153,7 +143,7 @@ static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
-#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
+#endif /* !CONFIG_MMU */
/*
* Return the mem_map entry representing the 'offset' subpage within
@@ -260,3 +250,12 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
#define ZONE_RECLAIM_SOME 0
#define ZONE_RECLAIM_SUCCESS 1
#endif
+
+extern int hwpoison_filter(struct page *p);
+
+extern u32 hwpoison_filter_dev_major;
+extern u32 hwpoison_filter_dev_minor;
+extern u64 hwpoison_filter_flags_mask;
+extern u64 hwpoison_filter_flags_value;
+extern u64 hwpoison_filter_memcg;
+extern u32 hwpoison_filter_enable;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 13f33b3081e..5b069e4f5e4 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -93,6 +93,7 @@
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/workqueue.h>
+#include <linux/crc32.h>
#include <asm/sections.h>
#include <asm/processor.h>
@@ -108,7 +109,6 @@
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
-#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
#define BYTES_PER_POINTER sizeof(void *)
@@ -119,8 +119,8 @@
/* scanning area inside a memory block */
struct kmemleak_scan_area {
struct hlist_node node;
- unsigned long offset;
- size_t length;
+ unsigned long start;
+ size_t size;
};
#define KMEMLEAK_GREY 0
@@ -149,6 +149,8 @@ struct kmemleak_object {
int min_count;
/* the total number of pointers found pointing to this object */
int count;
+ /* checksum for detecting modified objects */
+ u32 checksum;
/* memory ranges to be scanned inside an object (empty for all) */
struct hlist_head area_list;
unsigned long trace[MAX_TRACE];
@@ -164,8 +166,6 @@ struct kmemleak_object {
#define OBJECT_REPORTED (1 << 1)
/* flag set to not scan the object */
#define OBJECT_NO_SCAN (1 << 2)
-/* flag set on newly allocated objects */
-#define OBJECT_NEW (1 << 3)
/* number of bytes to print per line; must be 16 or 32 */
#define HEX_ROW_SIZE 16
@@ -241,8 +241,6 @@ struct early_log {
const void *ptr; /* allocated/freed memory block */
size_t size; /* memory block size */
int min_count; /* minimum reference count */
- unsigned long offset; /* scan area offset */
- size_t length; /* scan area length */
unsigned long trace[MAX_TRACE]; /* stack trace */
unsigned int trace_len; /* stack trace length */
};
@@ -323,11 +321,6 @@ static bool color_gray(const struct kmemleak_object *object)
object->count >= object->min_count;
}
-static bool color_black(const struct kmemleak_object *object)
-{
- return object->min_count == KMEMLEAK_BLACK;
-}
-
/*
* Objects are considered unreferenced only if their color is white, they have
* not be deleted and have a minimum age to avoid false positives caused by
@@ -335,7 +328,7 @@ static bool color_black(const struct kmemleak_object *object)
*/
static bool unreferenced_object(struct kmemleak_object *object)
{
- return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
+ return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
time_before_eq(object->jiffies + jiffies_min_age,
jiffies_last_scan);
}
@@ -348,11 +341,13 @@ static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
+ unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
object->pointer, object->size);
- seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
- object->comm, object->pid, object->jiffies);
+ seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
+ object->comm, object->pid, object->jiffies,
+ msecs_age / 1000, msecs_age % 1000);
hex_dump_object(seq, object);
seq_printf(seq, " backtrace:\n");
@@ -381,6 +376,7 @@ static void dump_object_info(struct kmemleak_object *object)
pr_notice(" min_count = %d\n", object->min_count);
pr_notice(" count = %d\n", object->count);
pr_notice(" flags = 0x%lx\n", object->flags);
+ pr_notice(" checksum = %d\n", object->checksum);
pr_notice(" backtrace:\n");
print_stack_trace(&trace, 4);
}
@@ -522,12 +518,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
INIT_HLIST_HEAD(&object->area_list);
spin_lock_init(&object->lock);
atomic_set(&object->use_count, 1);
- object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
+ object->flags = OBJECT_ALLOCATED;
object->pointer = ptr;
object->size = size;
object->min_count = min_count;
- object->count = -1; /* no color initially */
+ object->count = 0; /* white color initially */
object->jiffies = jiffies;
+ object->checksum = 0;
/* task information */
if (in_irq()) {
@@ -720,14 +717,13 @@ static void make_black_object(unsigned long ptr)
* Add a scanning area to the object. If at least one such area is added,
* kmemleak will only scan these ranges rather than the whole memory block.
*/
-static void add_scan_area(unsigned long ptr, unsigned long offset,
- size_t length, gfp_t gfp)
+static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
{
unsigned long flags;
struct kmemleak_object *object;
struct kmemleak_scan_area *area;
- object = find_and_get_object(ptr, 0);
+ object = find_and_get_object(ptr, 1);
if (!object) {
kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
ptr);
@@ -741,7 +737,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
}
spin_lock_irqsave(&object->lock, flags);
- if (offset + length > object->size) {
+ if (ptr + size > object->pointer + object->size) {
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object);
kmem_cache_free(scan_area_cache, area);
@@ -749,8 +745,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
}
INIT_HLIST_NODE(&area->node);
- area->offset = offset;
- area->length = length;
+ area->start = ptr;
+ area->size = size;
hlist_add_head(&area->node, &object->area_list);
out_unlock:
@@ -786,7 +782,7 @@ static void object_no_scan(unsigned long ptr)
* processed later once kmemleak is fully initialized.
*/
static void __init log_early(int op_type, const void *ptr, size_t size,
- int min_count, unsigned long offset, size_t length)
+ int min_count)
{
unsigned long flags;
struct early_log *log;
@@ -808,8 +804,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
log->ptr = ptr;
log->size = size;
log->min_count = min_count;
- log->offset = offset;
- log->length = length;
if (op_type == KMEMLEAK_ALLOC)
log->trace_len = __save_stack_trace(log->trace);
crt_early_log++;
@@ -858,7 +852,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
create_object((unsigned long)ptr, size, min_count, gfp);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
+ log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);
@@ -873,7 +867,7 @@ void __ref kmemleak_free(const void *ptr)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
delete_object_full((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_FREE, ptr, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free);
@@ -888,7 +882,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
delete_object_part((unsigned long)ptr, size);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
+ log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);
@@ -903,7 +897,7 @@ void __ref kmemleak_not_leak(const void *ptr)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
make_gray_object((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_not_leak);
@@ -919,22 +913,21 @@ void __ref kmemleak_ignore(const void *ptr)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
make_black_object((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_ignore);
/*
* Limit the range to be scanned in an allocated memory block.
*/
-void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp)
+void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
- add_scan_area((unsigned long)ptr, offset, length, gfp);
+ add_scan_area((unsigned long)ptr, size, gfp);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
+ log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
}
EXPORT_SYMBOL(kmemleak_scan_area);
@@ -948,11 +941,25 @@ void __ref kmemleak_no_scan(const void *ptr)
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
object_no_scan((unsigned long)ptr);
else if (atomic_read(&kmemleak_early_log))
- log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
+ log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
}
EXPORT_SYMBOL(kmemleak_no_scan);
/*
+ * Update an object's checksum and return true if it was modified.
+ */
+static bool update_checksum(struct kmemleak_object *object)
+{
+ u32 old_csum = object->checksum;
+
+ if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
+ return false;
+
+ object->checksum = crc32(0, (void *)object->pointer, object->size);
+ return object->checksum != old_csum;
+}
+
+/*
* Memory scanning is a long process and it needs to be interruptable. This
* function checks whether such interrupt condition occured.
*/
@@ -1031,11 +1038,14 @@ static void scan_block(void *_start, void *_end,
* added to the gray_list.
*/
object->count++;
- if (color_gray(object))
+ if (color_gray(object)) {
list_add_tail(&object->gray_list, &gray_list);
- else
- put_object(object);
+ spin_unlock_irqrestore(&object->lock, flags);
+ continue;
+ }
+
spin_unlock_irqrestore(&object->lock, flags);
+ put_object(object);
}
}
@@ -1075,14 +1085,47 @@ static void scan_object(struct kmemleak_object *object)
}
} else
hlist_for_each_entry(area, elem, &object->area_list, node)
- scan_block((void *)(object->pointer + area->offset),
- (void *)(object->pointer + area->offset
- + area->length), object, 0);
+ scan_block((void *)area->start,
+ (void *)(area->start + area->size),
+ object, 0);
out:
spin_unlock_irqrestore(&object->lock, flags);
}
/*
+ * Scan the objects already referenced (gray objects). More objects will be
+ * referenced and, if there are no memory leaks, all the objects are scanned.
+ */
+static void scan_gray_list(void)
+{
+ struct kmemleak_object *object, *tmp;
+
+ /*
+ * The list traversal is safe for both tail additions and removals
+ * from inside the loop. The kmemleak objects cannot be freed from
+ * outside the loop because their use_count was incremented.
+ */
+ object = list_entry(gray_list.next, typeof(*object), gray_list);
+ while (&object->gray_list != &gray_list) {
+ cond_resched();
+
+ /* may add new objects to the list */
+ if (!scan_should_stop())
+ scan_object(object);
+
+ tmp = list_entry(object->gray_list.next, typeof(*object),
+ gray_list);
+
+ /* remove the object from the list and release it */
+ list_del(&object->gray_list);
+ put_object(object);
+
+ object = tmp;
+ }
+ WARN_ON(!list_empty(&gray_list));
+}
+
+/*
* Scan data sections and all the referenced memory blocks allocated via the
* kernel's standard allocators. This function must be called with the
* scan_mutex held.
@@ -1090,10 +1133,9 @@ out:
static void kmemleak_scan(void)
{
unsigned long flags;
- struct kmemleak_object *object, *tmp;
+ struct kmemleak_object *object;
int i;
int new_leaks = 0;
- int gray_list_pass = 0;
jiffies_last_scan = jiffies;
@@ -1114,7 +1156,6 @@ static void kmemleak_scan(void)
#endif
/* reset the reference count (whiten the object) */
object->count = 0;
- object->flags &= ~OBJECT_NEW;
if (color_gray(object) && get_object(object))
list_add_tail(&object->gray_list, &gray_list);
@@ -1172,62 +1213,36 @@ static void kmemleak_scan(void)
/*
* Scan the objects already referenced from the sections scanned
- * above. More objects will be referenced and, if there are no memory
- * leaks, all the objects will be scanned. The list traversal is safe
- * for both tail additions and removals from inside the loop. The
- * kmemleak objects cannot be freed from outside the loop because their
- * use_count was increased.
+ * above.
*/
-repeat:
- object = list_entry(gray_list.next, typeof(*object), gray_list);
- while (&object->gray_list != &gray_list) {
- cond_resched();
-
- /* may add new objects to the list */
- if (!scan_should_stop())
- scan_object(object);
-
- tmp = list_entry(object->gray_list.next, typeof(*object),
- gray_list);
-
- /* remove the object from the list and release it */
- list_del(&object->gray_list);
- put_object(object);
-
- object = tmp;
- }
-
- if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
- goto scan_end;
+ scan_gray_list();
/*
- * Check for new objects allocated during this scanning and add them
- * to the gray list.
+ * Check for new or unreferenced objects modified since the previous
+ * scan and color them gray until the next scan.
*/
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
spin_lock_irqsave(&object->lock, flags);
- if ((object->flags & OBJECT_NEW) && !color_black(object) &&
- get_object(object)) {
- object->flags &= ~OBJECT_NEW;
+ if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
+ && update_checksum(object) && get_object(object)) {
+ /* color it gray temporarily */
+ object->count = object->min_count;
list_add_tail(&object->gray_list, &gray_list);
}
spin_unlock_irqrestore(&object->lock, flags);
}
rcu_read_unlock();
- if (!list_empty(&gray_list))
- goto repeat;
-
-scan_end:
- WARN_ON(!list_empty(&gray_list));
+ /*
+ * Re-scan the gray list for modified unreferenced objects.
+ */
+ scan_gray_list();
/*
- * If scanning was stopped or new objects were being allocated at a
- * higher rate than gray list scanning, do not report any new
- * unreferenced objects.
+ * If scanning was stopped do not report any new unreferenced objects.
*/
- if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
+ if (scan_should_stop())
return;
/*
@@ -1642,8 +1657,7 @@ void __init kmemleak_init(void)
kmemleak_ignore(log->ptr);
break;
case KMEMLEAK_SCAN_AREA:
- kmemleak_scan_area(log->ptr, log->offset, log->length,
- GFP_KERNEL);
+ kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
break;
case KMEMLEAK_NO_SCAN:
kmemleak_no_scan(log->ptr);
diff --git a/mm/ksm.c b/mm/ksm.c
index 5575f8628fe..56a0da1f997 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -29,11 +29,13 @@
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
+#include <linux/memory.h>
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <linux/ksm.h>
#include <asm/tlbflush.h>
+#include "internal.h"
/*
* A few notes about the KSM scanning process,
@@ -79,13 +81,13 @@
* struct mm_slot - ksm information per mm that is being scanned
* @link: link to the mm_slots hash list
* @mm_list: link into the mm_slots list, rooted in ksm_mm_head
- * @rmap_list: head for this mm_slot's list of rmap_items
+ * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
* @mm: the mm that this information is valid for
*/
struct mm_slot {
struct hlist_node link;
struct list_head mm_list;
- struct list_head rmap_list;
+ struct rmap_item *rmap_list;
struct mm_struct *mm;
};
@@ -93,7 +95,7 @@ struct mm_slot {
* struct ksm_scan - cursor for scanning
* @mm_slot: the current mm_slot we are scanning
* @address: the next address inside that to be scanned
- * @rmap_item: the current rmap that we are scanning inside the rmap_list
+ * @rmap_list: link to the next rmap to be scanned in the rmap_list
* @seqnr: count of completed full scans (needed when removing unstable node)
*
* There is only the one ksm_scan instance of this cursor structure.
@@ -101,37 +103,51 @@ struct mm_slot {
struct ksm_scan {
struct mm_slot *mm_slot;
unsigned long address;
- struct rmap_item *rmap_item;
+ struct rmap_item **rmap_list;
unsigned long seqnr;
};
/**
+ * struct stable_node - node of the stable rbtree
+ * @node: rb node of this ksm page in the stable tree
+ * @hlist: hlist head of rmap_items using this ksm page
+ * @kpfn: page frame number of this ksm page
+ */
+struct stable_node {
+ struct rb_node node;
+ struct hlist_head hlist;
+ unsigned long kpfn;
+};
+
+/**
* struct rmap_item - reverse mapping item for virtual addresses
- * @link: link into mm_slot's rmap_list (rmap_list is per mm)
+ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
+ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
* @mm: the memory structure this rmap_item is pointing into
* @address: the virtual address this rmap_item tracks (+ flags in low bits)
* @oldchecksum: previous checksum of the page at that virtual address
- * @node: rb_node of this rmap_item in either unstable or stable tree
- * @next: next rmap_item hanging off the same node of the stable tree
- * @prev: previous rmap_item hanging off the same node of the stable tree
+ * @node: rb node of this rmap_item in the unstable tree
+ * @head: pointer to stable_node heading this list in the stable tree
+ * @hlist: link into hlist of rmap_items hanging off that stable_node
*/
struct rmap_item {
- struct list_head link;
+ struct rmap_item *rmap_list;
+ struct anon_vma *anon_vma; /* when stable */
struct mm_struct *mm;
unsigned long address; /* + low bits used for flags below */
+ unsigned int oldchecksum; /* when unstable */
union {
- unsigned int oldchecksum; /* when unstable */
- struct rmap_item *next; /* when stable */
- };
- union {
- struct rb_node node; /* when tree node */
- struct rmap_item *prev; /* in stable list */
+ struct rb_node node; /* when node of unstable tree */
+ struct { /* when listed from stable tree */
+ struct stable_node *head;
+ struct hlist_node hlist;
+ };
};
};
#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
-#define NODE_FLAG 0x100 /* is a node of unstable or stable tree */
-#define STABLE_FLAG 0x200 /* is a node or list item of stable tree */
+#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
+#define STABLE_FLAG 0x200 /* is listed from the stable tree */
/* The stable and unstable tree heads */
static struct rb_root root_stable_tree = RB_ROOT;
@@ -148,6 +164,7 @@ static struct ksm_scan ksm_scan = {
};
static struct kmem_cache *rmap_item_cache;
+static struct kmem_cache *stable_node_cache;
static struct kmem_cache *mm_slot_cache;
/* The number of nodes in the stable tree */
@@ -162,9 +179,6 @@ static unsigned long ksm_pages_unshared;
/* The number of rmap_items in use: to calculate pages_volatile */
static unsigned long ksm_rmap_items;
-/* Limit on the number of unswappable pages used */
-static unsigned long ksm_max_kernel_pages;
-
/* Number of pages ksmd should scan in one batch */
static unsigned int ksm_thread_pages_to_scan = 100;
@@ -190,13 +204,19 @@ static int __init ksm_slab_init(void)
if (!rmap_item_cache)
goto out;
+ stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
+ if (!stable_node_cache)
+ goto out_free1;
+
mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
if (!mm_slot_cache)
- goto out_free;
+ goto out_free2;
return 0;
-out_free:
+out_free2:
+ kmem_cache_destroy(stable_node_cache);
+out_free1:
kmem_cache_destroy(rmap_item_cache);
out:
return -ENOMEM;
@@ -205,6 +225,7 @@ out:
static void __init ksm_slab_free(void)
{
kmem_cache_destroy(mm_slot_cache);
+ kmem_cache_destroy(stable_node_cache);
kmem_cache_destroy(rmap_item_cache);
mm_slot_cache = NULL;
}
@@ -226,6 +247,16 @@ static inline void free_rmap_item(struct rmap_item *rmap_item)
kmem_cache_free(rmap_item_cache, rmap_item);
}
+static inline struct stable_node *alloc_stable_node(void)
+{
+ return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
+}
+
+static inline void free_stable_node(struct stable_node *stable_node)
+{
+ kmem_cache_free(stable_node_cache, stable_node);
+}
+
static inline struct mm_slot *alloc_mm_slot(void)
{
if (!mm_slot_cache) /* initialization failed */
@@ -275,7 +306,6 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
% MM_SLOTS_HASH_HEADS];
mm_slot->mm = mm;
- INIT_LIST_HEAD(&mm_slot->rmap_list);
hlist_add_head(&mm_slot->link, bucket);
}
@@ -284,6 +314,25 @@ static inline int in_stable_tree(struct rmap_item *rmap_item)
return rmap_item->address & STABLE_FLAG;
}
+static void hold_anon_vma(struct rmap_item *rmap_item,
+ struct anon_vma *anon_vma)
+{
+ rmap_item->anon_vma = anon_vma;
+ atomic_inc(&anon_vma->ksm_refcount);
+}
+
+static void drop_anon_vma(struct rmap_item *rmap_item)
+{
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+
+ if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) {
+ int empty = list_empty(&anon_vma->head);
+ spin_unlock(&anon_vma->lock);
+ if (empty)
+ anon_vma_free(anon_vma);
+ }
+}
+
/*
* ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
* page tables after it has passed through ksm_exit() - which, if necessary,
@@ -356,10 +405,18 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
}
-static void break_cow(struct mm_struct *mm, unsigned long addr)
+static void break_cow(struct rmap_item *rmap_item)
{
+ struct mm_struct *mm = rmap_item->mm;
+ unsigned long addr = rmap_item->address;
struct vm_area_struct *vma;
+ /*
+ * It is not an accident that whenever we want to break COW
+ * to undo, we also need to drop a reference to the anon_vma.
+ */
+ drop_anon_vma(rmap_item);
+
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
goto out;
@@ -403,21 +460,77 @@ out: page = NULL;
return page;
}
+static void remove_node_from_stable_tree(struct stable_node *stable_node)
+{
+ struct rmap_item *rmap_item;
+ struct hlist_node *hlist;
+
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ if (rmap_item->hlist.next)
+ ksm_pages_sharing--;
+ else
+ ksm_pages_shared--;
+ drop_anon_vma(rmap_item);
+ rmap_item->address &= PAGE_MASK;
+ cond_resched();
+ }
+
+ rb_erase(&stable_node->node, &root_stable_tree);
+ free_stable_node(stable_node);
+}
+
/*
- * get_ksm_page: checks if the page at the virtual address in rmap_item
- * is still PageKsm, in which case we can trust the content of the page,
- * and it returns the gotten page; but NULL if the page has been zapped.
+ * get_ksm_page: checks if the page indicated by the stable node
+ * is still its ksm page, despite having held no reference to it.
+ * In which case we can trust the content of the page, and it
+ * returns the gotten page; but if the page has now been zapped,
+ * remove the stale node from the stable tree and return NULL.
+ *
+ * You would expect the stable_node to hold a reference to the ksm page.
+ * But if it increments the page's count, swapping out has to wait for
+ * ksmd to come around again before it can free the page, which may take
+ * seconds or even minutes: much too unresponsive. So instead we use a
+ * "keyhole reference": access to the ksm page from the stable node peeps
+ * out through its keyhole to see if that page still holds the right key,
+ * pointing back to this stable node. This relies on freeing a PageAnon
+ * page to reset its page->mapping to NULL, and relies on no other use of
+ * a page to put something that might look like our key in page->mapping.
+ *
+ * include/linux/pagemap.h page_cache_get_speculative() is a good reference,
+ * but this is different - made simpler by ksm_thread_mutex being held, but
+ * interesting for assuming that no other use of the struct page could ever
+ * put our expected_mapping into page->mapping (or a field of the union which
+ * coincides with page->mapping). The RCU calls are not for KSM at all, but
+ * to keep the page_count protocol described with page_cache_get_speculative.
+ *
+ * Note: it is possible that get_ksm_page() will return NULL one moment,
+ * then page the next, if the page is in between page_freeze_refs() and
+ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page
+ * is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct rmap_item *rmap_item)
+static struct page *get_ksm_page(struct stable_node *stable_node)
{
struct page *page;
-
- page = get_mergeable_page(rmap_item);
- if (page && !PageKsm(page)) {
+ void *expected_mapping;
+
+ page = pfn_to_page(stable_node->kpfn);
+ expected_mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ rcu_read_lock();
+ if (page->mapping != expected_mapping)
+ goto stale;
+ if (!get_page_unless_zero(page))
+ goto stale;
+ if (page->mapping != expected_mapping) {
put_page(page);
- page = NULL;
+ goto stale;
}
+ rcu_read_unlock();
return page;
+stale:
+ rcu_read_unlock();
+ remove_node_from_stable_tree(stable_node);
+ return NULL;
}
/*
@@ -426,35 +539,29 @@ static struct page *get_ksm_page(struct rmap_item *rmap_item)
*/
static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
{
- if (in_stable_tree(rmap_item)) {
- struct rmap_item *next_item = rmap_item->next;
-
- if (rmap_item->address & NODE_FLAG) {
- if (next_item) {
- rb_replace_node(&rmap_item->node,
- &next_item->node,
- &root_stable_tree);
- next_item->address |= NODE_FLAG;
- ksm_pages_sharing--;
- } else {
- rb_erase(&rmap_item->node, &root_stable_tree);
- ksm_pages_shared--;
- }
- } else {
- struct rmap_item *prev_item = rmap_item->prev;
+ if (rmap_item->address & STABLE_FLAG) {
+ struct stable_node *stable_node;
+ struct page *page;
- BUG_ON(prev_item->next != rmap_item);
- prev_item->next = next_item;
- if (next_item) {
- BUG_ON(next_item->prev != rmap_item);
- next_item->prev = rmap_item->prev;
- }
+ stable_node = rmap_item->head;
+ page = get_ksm_page(stable_node);
+ if (!page)
+ goto out;
+
+ lock_page(page);
+ hlist_del(&rmap_item->hlist);
+ unlock_page(page);
+ put_page(page);
+
+ if (stable_node->hlist.first)
ksm_pages_sharing--;
- }
+ else
+ ksm_pages_shared--;
- rmap_item->next = NULL;
+ drop_anon_vma(rmap_item);
+ rmap_item->address &= PAGE_MASK;
- } else if (rmap_item->address & NODE_FLAG) {
+ } else if (rmap_item->address & UNSTABLE_FLAG) {
unsigned char age;
/*
* Usually ksmd can and must skip the rb_erase, because
@@ -467,24 +574,21 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
BUG_ON(age > 1);
if (!age)
rb_erase(&rmap_item->node, &root_unstable_tree);
+
ksm_pages_unshared--;
+ rmap_item->address &= PAGE_MASK;
}
-
- rmap_item->address &= PAGE_MASK;
-
+out:
cond_resched(); /* we're called from many long loops */
}
static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
- struct list_head *cur)
+ struct rmap_item **rmap_list)
{
- struct rmap_item *rmap_item;
-
- while (cur != &mm_slot->rmap_list) {
- rmap_item = list_entry(cur, struct rmap_item, link);
- cur = cur->next;
+ while (*rmap_list) {
+ struct rmap_item *rmap_item = *rmap_list;
+ *rmap_list = rmap_item->rmap_list;
remove_rmap_item_from_tree(rmap_item);
- list_del(&rmap_item->link);
free_rmap_item(rmap_item);
}
}
@@ -550,7 +654,7 @@ static int unmerge_and_remove_all_rmap_items(void)
goto error;
}
- remove_trailing_rmap_items(mm_slot, mm_slot->rmap_list.next);
+ remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -646,7 +750,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* Check that no O_DIRECT or similar I/O is in progress on the
* page
*/
- if ((page_mapcount(page) + 2 + swapped) != page_count(page)) {
+ if (page_mapcount(page) + 1 + swapped != page_count(page)) {
set_pte_at_notify(mm, addr, ptep, entry);
goto out_unlock;
}
@@ -664,15 +768,15 @@ out:
/**
* replace_page - replace page in vma by new ksm page
- * @vma: vma that holds the pte pointing to oldpage
- * @oldpage: the page we are replacing by newpage
- * @newpage: the ksm page we replace oldpage by
+ * @vma: vma that holds the pte pointing to page
+ * @page: the page we are replacing by kpage
+ * @kpage: the ksm page we replace page by
* @orig_pte: the original value of the pte
*
* Returns 0 on success, -EFAULT on failure.
*/
-static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
- struct page *newpage, pte_t orig_pte)
+static int replace_page(struct vm_area_struct *vma, struct page *page,
+ struct page *kpage, pte_t orig_pte)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
@@ -681,12 +785,9 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
pte_t *ptep;
spinlock_t *ptl;
unsigned long addr;
- pgprot_t prot;
int err = -EFAULT;
- prot = vm_get_page_prot(vma->vm_flags & ~VM_WRITE);
-
- addr = page_address_in_vma(oldpage, vma);
+ addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
goto out;
@@ -708,15 +809,15 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
goto out;
}
- get_page(newpage);
- page_add_ksm_rmap(newpage);
+ get_page(kpage);
+ page_add_anon_rmap(kpage, vma, addr);
flush_cache_page(vma, addr, pte_pfn(*ptep));
ptep_clear_flush(vma, addr, ptep);
- set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot));
+ set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
- page_remove_rmap(oldpage);
- put_page(oldpage);
+ page_remove_rmap(page);
+ put_page(page);
pte_unmap_unlock(ptep, ptl);
err = 0;
@@ -726,32 +827,27 @@ out:
/*
* try_to_merge_one_page - take two pages and merge them into one
- * @vma: the vma that hold the pte pointing into oldpage
- * @oldpage: the page that we want to replace with newpage
- * @newpage: the page that we want to map instead of oldpage
- *
- * Note:
- * oldpage should be a PageAnon page, while newpage should be a PageKsm page,
- * or a newly allocated kernel page which page_add_ksm_rmap will make PageKsm.
+ * @vma: the vma that holds the pte pointing to page
+ * @page: the PageAnon page that we want to replace with kpage
+ * @kpage: the PageKsm page that we want to map instead of page,
+ * or NULL the first time when we want to use page as kpage.
*
* This function returns 0 if the pages were merged, -EFAULT otherwise.
*/
static int try_to_merge_one_page(struct vm_area_struct *vma,
- struct page *oldpage,
- struct page *newpage)
+ struct page *page, struct page *kpage)
{
pte_t orig_pte = __pte(0);
int err = -EFAULT;
+ if (page == kpage) /* ksm page forked */
+ return 0;
+
if (!(vma->vm_flags & VM_MERGEABLE))
goto out;
-
- if (!PageAnon(oldpage))
+ if (!PageAnon(page))
goto out;
- get_page(newpage);
- get_page(oldpage);
-
/*
* We need the page lock to read a stable PageSwapCache in
* write_protect_page(). We use trylock_page() instead of
@@ -759,26 +855,39 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
* prefer to continue scanning and merging different pages,
* then come back to this page when it is unlocked.
*/
- if (!trylock_page(oldpage))
- goto out_putpage;
+ if (!trylock_page(page))
+ goto out;
/*
* If this anonymous page is mapped only here, its pte may need
* to be write-protected. If it's mapped elsewhere, all of its
* ptes are necessarily already write-protected. But in either
* case, we need to lock and check page_count is not raised.
*/
- if (write_protect_page(vma, oldpage, &orig_pte)) {
- unlock_page(oldpage);
- goto out_putpage;
+ if (write_protect_page(vma, page, &orig_pte) == 0) {
+ if (!kpage) {
+ /*
+ * While we hold page lock, upgrade page from
+ * PageAnon+anon_vma to PageKsm+NULL stable_node:
+ * stable_tree_insert() will update stable_node.
+ */
+ set_page_stable_node(page, NULL);
+ mark_page_accessed(page);
+ err = 0;
+ } else if (pages_identical(page, kpage))
+ err = replace_page(vma, page, kpage, orig_pte);
}
- unlock_page(oldpage);
- if (pages_identical(oldpage, newpage))
- err = replace_page(vma, oldpage, newpage, orig_pte);
+ if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
+ munlock_vma_page(page);
+ if (!PageMlocked(kpage)) {
+ unlock_page(page);
+ lock_page(kpage);
+ mlock_vma_page(kpage);
+ page = kpage; /* for final unlock */
+ }
+ }
-out_putpage:
- put_page(oldpage);
- put_page(newpage);
+ unlock_page(page);
out:
return err;
}
@@ -786,26 +895,31 @@ out:
/*
* try_to_merge_with_ksm_page - like try_to_merge_two_pages,
* but no new kernel page is allocated: kpage must already be a ksm page.
+ *
+ * This function returns 0 if the pages were merged, -EFAULT otherwise.
*/
-static int try_to_merge_with_ksm_page(struct mm_struct *mm1,
- unsigned long addr1,
- struct page *page1,
- struct page *kpage)
+static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
+ struct page *page, struct page *kpage)
{
+ struct mm_struct *mm = rmap_item->mm;
struct vm_area_struct *vma;
int err = -EFAULT;
- down_read(&mm1->mmap_sem);
- if (ksm_test_exit(mm1))
+ down_read(&mm->mmap_sem);
+ if (ksm_test_exit(mm))
+ goto out;
+ vma = find_vma(mm, rmap_item->address);
+ if (!vma || vma->vm_start > rmap_item->address)
goto out;
- vma = find_vma(mm1, addr1);
- if (!vma || vma->vm_start > addr1)
+ err = try_to_merge_one_page(vma, page, kpage);
+ if (err)
goto out;
- err = try_to_merge_one_page(vma, page1, kpage);
+ /* Must get reference to anon_vma while still holding mmap_sem */
+ hold_anon_vma(rmap_item, vma->anon_vma);
out:
- up_read(&mm1->mmap_sem);
+ up_read(&mm->mmap_sem);
return err;
}
@@ -813,109 +927,73 @@ out:
* try_to_merge_two_pages - take two identical pages and prepare them
* to be merged into one page.
*
- * This function returns 0 if we successfully mapped two identical pages
- * into one page, -EFAULT otherwise.
+ * This function returns the kpage if we successfully merged two identical
+ * pages into one ksm page, NULL otherwise.
*
- * Note that this function allocates a new kernel page: if one of the pages
+ * Note that this function upgrades page to ksm page: if one of the pages
* is already a ksm page, try_to_merge_with_ksm_page should be used.
*/
-static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
- struct page *page1, struct mm_struct *mm2,
- unsigned long addr2, struct page *page2)
+static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
+ struct page *page,
+ struct rmap_item *tree_rmap_item,
+ struct page *tree_page)
{
- struct vm_area_struct *vma;
- struct page *kpage;
- int err = -EFAULT;
-
- /*
- * The number of nodes in the stable tree
- * is the number of kernel pages that we hold.
- */
- if (ksm_max_kernel_pages &&
- ksm_max_kernel_pages <= ksm_pages_shared)
- return err;
-
- kpage = alloc_page(GFP_HIGHUSER);
- if (!kpage)
- return err;
-
- down_read(&mm1->mmap_sem);
- if (ksm_test_exit(mm1)) {
- up_read(&mm1->mmap_sem);
- goto out;
- }
- vma = find_vma(mm1, addr1);
- if (!vma || vma->vm_start > addr1) {
- up_read(&mm1->mmap_sem);
- goto out;
- }
-
- copy_user_highpage(kpage, page1, addr1, vma);
- err = try_to_merge_one_page(vma, page1, kpage);
- up_read(&mm1->mmap_sem);
+ int err;
+ err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
if (!err) {
- err = try_to_merge_with_ksm_page(mm2, addr2, page2, kpage);
+ err = try_to_merge_with_ksm_page(tree_rmap_item,
+ tree_page, page);
/*
* If that fails, we have a ksm page with only one pte
* pointing to it: so break it.
*/
if (err)
- break_cow(mm1, addr1);
+ break_cow(rmap_item);
}
-out:
- put_page(kpage);
- return err;
+ return err ? NULL : page;
}
/*
- * stable_tree_search - search page inside the stable tree
- * @page: the page that we are searching identical pages to.
- * @page2: pointer into identical page that we are holding inside the stable
- * tree that we have found.
- * @rmap_item: the reverse mapping item
+ * stable_tree_search - search for page inside the stable tree
*
* This function checks if there is a page inside the stable tree
* with identical content to the page that we are scanning right now.
*
- * This function return rmap_item pointer to the identical item if found,
+ * This function returns the stable tree node of identical content if found,
* NULL otherwise.
*/
-static struct rmap_item *stable_tree_search(struct page *page,
- struct page **page2,
- struct rmap_item *rmap_item)
+static struct page *stable_tree_search(struct page *page)
{
struct rb_node *node = root_stable_tree.rb_node;
+ struct stable_node *stable_node;
+
+ stable_node = page_stable_node(page);
+ if (stable_node) { /* ksm page forked */
+ get_page(page);
+ return page;
+ }
while (node) {
- struct rmap_item *tree_rmap_item, *next_rmap_item;
+ struct page *tree_page;
int ret;
- tree_rmap_item = rb_entry(node, struct rmap_item, node);
- while (tree_rmap_item) {
- BUG_ON(!in_stable_tree(tree_rmap_item));
- cond_resched();
- page2[0] = get_ksm_page(tree_rmap_item);
- if (page2[0])
- break;
- next_rmap_item = tree_rmap_item->next;
- remove_rmap_item_from_tree(tree_rmap_item);
- tree_rmap_item = next_rmap_item;
- }
- if (!tree_rmap_item)
+ cond_resched();
+ stable_node = rb_entry(node, struct stable_node, node);
+ tree_page = get_ksm_page(stable_node);
+ if (!tree_page)
return NULL;
- ret = memcmp_pages(page, page2[0]);
+ ret = memcmp_pages(page, tree_page);
if (ret < 0) {
- put_page(page2[0]);
+ put_page(tree_page);
node = node->rb_left;
} else if (ret > 0) {
- put_page(page2[0]);
+ put_page(tree_page);
node = node->rb_right;
- } else {
- return tree_rmap_item;
- }
+ } else
+ return tree_page;
}
return NULL;
@@ -925,38 +1003,26 @@ static struct rmap_item *stable_tree_search(struct page *page,
* stable_tree_insert - insert rmap_item pointing to new ksm page
* into the stable tree.
*
- * @page: the page that we are searching identical page to inside the stable
- * tree.
- * @rmap_item: pointer to the reverse mapping item.
- *
- * This function returns rmap_item if success, NULL otherwise.
+ * This function returns the stable tree node just allocated on success,
+ * NULL otherwise.
*/
-static struct rmap_item *stable_tree_insert(struct page *page,
- struct rmap_item *rmap_item)
+static struct stable_node *stable_tree_insert(struct page *kpage)
{
struct rb_node **new = &root_stable_tree.rb_node;
struct rb_node *parent = NULL;
+ struct stable_node *stable_node;
while (*new) {
- struct rmap_item *tree_rmap_item, *next_rmap_item;
struct page *tree_page;
int ret;
- tree_rmap_item = rb_entry(*new, struct rmap_item, node);
- while (tree_rmap_item) {
- BUG_ON(!in_stable_tree(tree_rmap_item));
- cond_resched();
- tree_page = get_ksm_page(tree_rmap_item);
- if (tree_page)
- break;
- next_rmap_item = tree_rmap_item->next;
- remove_rmap_item_from_tree(tree_rmap_item);
- tree_rmap_item = next_rmap_item;
- }
- if (!tree_rmap_item)
+ cond_resched();
+ stable_node = rb_entry(*new, struct stable_node, node);
+ tree_page = get_ksm_page(stable_node);
+ if (!tree_page)
return NULL;
- ret = memcmp_pages(page, tree_page);
+ ret = memcmp_pages(kpage, tree_page);
put_page(tree_page);
parent = *new;
@@ -974,22 +1040,24 @@ static struct rmap_item *stable_tree_insert(struct page *page,
}
}
- rmap_item->address |= NODE_FLAG | STABLE_FLAG;
- rmap_item->next = NULL;
- rb_link_node(&rmap_item->node, parent, new);
- rb_insert_color(&rmap_item->node, &root_stable_tree);
+ stable_node = alloc_stable_node();
+ if (!stable_node)
+ return NULL;
- ksm_pages_shared++;
- return rmap_item;
+ rb_link_node(&stable_node->node, parent, new);
+ rb_insert_color(&stable_node->node, &root_stable_tree);
+
+ INIT_HLIST_HEAD(&stable_node->hlist);
+
+ stable_node->kpfn = page_to_pfn(kpage);
+ set_page_stable_node(kpage, stable_node);
+
+ return stable_node;
}
/*
- * unstable_tree_search_insert - search and insert items into the unstable tree.
- *
- * @page: the page that we are going to search for identical page or to insert
- * into the unstable tree
- * @page2: pointer into identical page that was found inside the unstable tree
- * @rmap_item: the reverse mapping item of page
+ * unstable_tree_search_insert - search for identical page,
+ * else insert rmap_item into the unstable tree.
*
* This function searches for a page in the unstable tree identical to the
* page currently being scanned; and if no identical page is found in the
@@ -1001,47 +1069,50 @@ static struct rmap_item *stable_tree_insert(struct page *page,
* This function does both searching and inserting, because they share
* the same walking algorithm in an rbtree.
*/
-static struct rmap_item *unstable_tree_search_insert(struct page *page,
- struct page **page2,
- struct rmap_item *rmap_item)
+static
+struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
+ struct page *page,
+ struct page **tree_pagep)
+
{
struct rb_node **new = &root_unstable_tree.rb_node;
struct rb_node *parent = NULL;
while (*new) {
struct rmap_item *tree_rmap_item;
+ struct page *tree_page;
int ret;
cond_resched();
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
- page2[0] = get_mergeable_page(tree_rmap_item);
- if (!page2[0])
+ tree_page = get_mergeable_page(tree_rmap_item);
+ if (!tree_page)
return NULL;
/*
- * Don't substitute an unswappable ksm page
- * just for one good swappable forked page.
+ * Don't substitute a ksm page for a forked page.
*/
- if (page == page2[0]) {
- put_page(page2[0]);
+ if (page == tree_page) {
+ put_page(tree_page);
return NULL;
}
- ret = memcmp_pages(page, page2[0]);
+ ret = memcmp_pages(page, tree_page);
parent = *new;
if (ret < 0) {
- put_page(page2[0]);
+ put_page(tree_page);
new = &parent->rb_left;
} else if (ret > 0) {
- put_page(page2[0]);
+ put_page(tree_page);
new = &parent->rb_right;
} else {
+ *tree_pagep = tree_page;
return tree_rmap_item;
}
}
- rmap_item->address |= NODE_FLAG;
+ rmap_item->address |= UNSTABLE_FLAG;
rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
rb_link_node(&rmap_item->node, parent, new);
rb_insert_color(&rmap_item->node, &root_unstable_tree);
@@ -1056,18 +1127,16 @@ static struct rmap_item *unstable_tree_search_insert(struct page *page,
* the same ksm page.
*/
static void stable_tree_append(struct rmap_item *rmap_item,
- struct rmap_item *tree_rmap_item)
+ struct stable_node *stable_node)
{
- rmap_item->next = tree_rmap_item->next;
- rmap_item->prev = tree_rmap_item;
-
- if (tree_rmap_item->next)
- tree_rmap_item->next->prev = rmap_item;
-
- tree_rmap_item->next = rmap_item;
+ rmap_item->head = stable_node;
rmap_item->address |= STABLE_FLAG;
+ hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
- ksm_pages_sharing++;
+ if (rmap_item->hlist.next)
+ ksm_pages_sharing++;
+ else
+ ksm_pages_shared++;
}
/*
@@ -1081,49 +1150,37 @@ static void stable_tree_append(struct rmap_item *rmap_item,
*/
static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
{
- struct page *page2[1];
struct rmap_item *tree_rmap_item;
+ struct page *tree_page = NULL;
+ struct stable_node *stable_node;
+ struct page *kpage;
unsigned int checksum;
int err;
- if (in_stable_tree(rmap_item))
- remove_rmap_item_from_tree(rmap_item);
+ remove_rmap_item_from_tree(rmap_item);
/* We first start with searching the page inside the stable tree */
- tree_rmap_item = stable_tree_search(page, page2, rmap_item);
- if (tree_rmap_item) {
- if (page == page2[0]) /* forked */
- err = 0;
- else
- err = try_to_merge_with_ksm_page(rmap_item->mm,
- rmap_item->address,
- page, page2[0]);
- put_page(page2[0]);
-
+ kpage = stable_tree_search(page);
+ if (kpage) {
+ err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
if (!err) {
/*
* The page was successfully merged:
* add its rmap_item to the stable tree.
*/
- stable_tree_append(rmap_item, tree_rmap_item);
+ lock_page(kpage);
+ stable_tree_append(rmap_item, page_stable_node(kpage));
+ unlock_page(kpage);
}
+ put_page(kpage);
return;
}
/*
- * A ksm page might have got here by fork, but its other
- * references have already been removed from the stable tree.
- * Or it might be left over from a break_ksm which failed
- * when the mem_cgroup had reached its limit: try again now.
- */
- if (PageKsm(page))
- break_cow(rmap_item->mm, rmap_item->address);
-
- /*
- * In case the hash value of the page was changed from the last time we
- * have calculated it, this page to be changed frequely, therefore we
- * don't want to insert it to the unstable tree, and we don't want to
- * waste our time to search if there is something identical to it there.
+ * If the hash value of the page has changed from the last time
+ * we calculated it, this page is changing frequently: therefore we
+ * don't want to insert it in the unstable tree, and we don't want
+ * to waste our time searching for something identical to it there.
*/
checksum = calc_checksum(page);
if (rmap_item->oldchecksum != checksum) {
@@ -1131,21 +1188,27 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
return;
}
- tree_rmap_item = unstable_tree_search_insert(page, page2, rmap_item);
+ tree_rmap_item =
+ unstable_tree_search_insert(rmap_item, page, &tree_page);
if (tree_rmap_item) {
- err = try_to_merge_two_pages(rmap_item->mm,
- rmap_item->address, page,
- tree_rmap_item->mm,
- tree_rmap_item->address, page2[0]);
+ kpage = try_to_merge_two_pages(rmap_item, page,
+ tree_rmap_item, tree_page);
+ put_page(tree_page);
/*
* As soon as we merge this page, we want to remove the
* rmap_item of the page we have merged with from the unstable
* tree, and insert it instead as new node in the stable tree.
*/
- if (!err) {
- rb_erase(&tree_rmap_item->node, &root_unstable_tree);
- tree_rmap_item->address &= ~NODE_FLAG;
- ksm_pages_unshared--;
+ if (kpage) {
+ remove_rmap_item_from_tree(tree_rmap_item);
+
+ lock_page(kpage);
+ stable_node = stable_tree_insert(kpage);
+ if (stable_node) {
+ stable_tree_append(tree_rmap_item, stable_node);
+ stable_tree_append(rmap_item, stable_node);
+ }
+ unlock_page(kpage);
/*
* If we fail to insert the page into the stable tree,
@@ -1153,37 +1216,28 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
* to a ksm page left outside the stable tree,
* in which case we need to break_cow on both.
*/
- if (stable_tree_insert(page2[0], tree_rmap_item))
- stable_tree_append(rmap_item, tree_rmap_item);
- else {
- break_cow(tree_rmap_item->mm,
- tree_rmap_item->address);
- break_cow(rmap_item->mm, rmap_item->address);
+ if (!stable_node) {
+ break_cow(tree_rmap_item);
+ break_cow(rmap_item);
}
}
-
- put_page(page2[0]);
}
}
static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
- struct list_head *cur,
+ struct rmap_item **rmap_list,
unsigned long addr)
{
struct rmap_item *rmap_item;
- while (cur != &mm_slot->rmap_list) {
- rmap_item = list_entry(cur, struct rmap_item, link);
- if ((rmap_item->address & PAGE_MASK) == addr) {
- if (!in_stable_tree(rmap_item))
- remove_rmap_item_from_tree(rmap_item);
+ while (*rmap_list) {
+ rmap_item = *rmap_list;
+ if ((rmap_item->address & PAGE_MASK) == addr)
return rmap_item;
- }
if (rmap_item->address > addr)
break;
- cur = cur->next;
+ *rmap_list = rmap_item->rmap_list;
remove_rmap_item_from_tree(rmap_item);
- list_del(&rmap_item->link);
free_rmap_item(rmap_item);
}
@@ -1192,7 +1246,8 @@ static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
/* It has already been zeroed */
rmap_item->mm = mm_slot->mm;
rmap_item->address = addr;
- list_add_tail(&rmap_item->link, cur);
+ rmap_item->rmap_list = *rmap_list;
+ *rmap_list = rmap_item;
}
return rmap_item;
}
@@ -1217,8 +1272,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
spin_unlock(&ksm_mmlist_lock);
next_mm:
ksm_scan.address = 0;
- ksm_scan.rmap_item = list_entry(&slot->rmap_list,
- struct rmap_item, link);
+ ksm_scan.rmap_list = &slot->rmap_list;
}
mm = slot->mm;
@@ -1244,10 +1298,10 @@ next_mm:
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
rmap_item = get_next_rmap_item(slot,
- ksm_scan.rmap_item->link.next,
- ksm_scan.address);
+ ksm_scan.rmap_list, ksm_scan.address);
if (rmap_item) {
- ksm_scan.rmap_item = rmap_item;
+ ksm_scan.rmap_list =
+ &rmap_item->rmap_list;
ksm_scan.address += PAGE_SIZE;
} else
put_page(*page);
@@ -1263,14 +1317,13 @@ next_mm:
if (ksm_test_exit(mm)) {
ksm_scan.address = 0;
- ksm_scan.rmap_item = list_entry(&slot->rmap_list,
- struct rmap_item, link);
+ ksm_scan.rmap_list = &slot->rmap_list;
}
/*
* Nuke all the rmap_items that are above this current rmap:
* because there were no VM_MERGEABLE vmas with such addresses.
*/
- remove_trailing_rmap_items(slot, ksm_scan.rmap_item->link.next);
+ remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(slot->mm_list.next,
@@ -1323,14 +1376,6 @@ static void ksm_do_scan(unsigned int scan_npages)
return;
if (!PageKsm(page) || !in_stable_tree(rmap_item))
cmp_and_merge_page(page, rmap_item);
- else if (page_mapcount(page) == 1) {
- /*
- * Replace now-unshared ksm page by ordinary page.
- */
- break_cow(rmap_item->mm, rmap_item->address);
- remove_rmap_item_from_tree(rmap_item);
- rmap_item->oldchecksum = calc_checksum(page);
- }
put_page(page);
}
}
@@ -1375,7 +1420,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
VM_PFNMAP | VM_IO | VM_DONTEXPAND |
VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
- VM_MIXEDMAP | VM_SAO))
+ VM_NONLINEAR | VM_MIXEDMAP | VM_SAO))
return 0; /* just ignore the advice */
if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
@@ -1452,7 +1497,7 @@ void __ksm_exit(struct mm_struct *mm)
spin_lock(&ksm_mmlist_lock);
mm_slot = get_mm_slot(mm);
if (mm_slot && ksm_scan.mm_slot != mm_slot) {
- if (list_empty(&mm_slot->rmap_list)) {
+ if (!mm_slot->rmap_list) {
hlist_del(&mm_slot->link);
list_del(&mm_slot->mm_list);
easy_to_free = 1;
@@ -1473,6 +1518,249 @@ void __ksm_exit(struct mm_struct *mm)
}
}
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct page *new_page;
+
+ unlock_page(page); /* any racers will COW it, not modify it */
+
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (new_page) {
+ copy_user_highpage(new_page, page, address, vma);
+
+ SetPageDirty(new_page);
+ __SetPageUptodate(new_page);
+ SetPageSwapBacked(new_page);
+ __set_page_locked(new_page);
+
+ if (page_evictable(new_page, vma))
+ lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
+ else
+ add_page_to_unevictable_list(new_page);
+ }
+
+ page_cache_release(page);
+ return new_page;
+}
+
+int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
+ unsigned long *vm_flags)
+{
+ struct stable_node *stable_node;
+ struct rmap_item *rmap_item;
+ struct hlist_node *hlist;
+ unsigned int mapcount = page_mapcount(page);
+ int referenced = 0;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return 0;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
+ continue;
+
+ referenced += page_referenced_one(page, vma,
+ rmap_item->address, &mapcount, vm_flags);
+ if (!search_new_forks || !mapcount)
+ break;
+ }
+ spin_unlock(&anon_vma->lock);
+ if (!mapcount)
+ goto out;
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return referenced;
+}
+
+int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
+{
+ struct stable_node *stable_node;
+ struct hlist_node *hlist;
+ struct rmap_item *rmap_item;
+ int ret = SWAP_AGAIN;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return SWAP_FAIL;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ ret = try_to_unmap_one(page, vma,
+ rmap_item->address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page)) {
+ spin_unlock(&anon_vma->lock);
+ goto out;
+ }
+ }
+ spin_unlock(&anon_vma->lock);
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return ret;
+}
+
+#ifdef CONFIG_MIGRATION
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct stable_node *stable_node;
+ struct hlist_node *hlist;
+ struct rmap_item *rmap_item;
+ int ret = SWAP_AGAIN;
+ int search_new_forks = 0;
+
+ VM_BUG_ON(!PageKsm(page));
+ VM_BUG_ON(!PageLocked(page));
+
+ stable_node = page_stable_node(page);
+ if (!stable_node)
+ return ret;
+again:
+ hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
+ struct anon_vma *anon_vma = rmap_item->anon_vma;
+ struct vm_area_struct *vma;
+
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ if (rmap_item->address < vma->vm_start ||
+ rmap_item->address >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+ * rmap_item; but later, if there is still work to do,
+ * we examine covering vmas in other mms: in case they
+ * were forked from the original since ksmd passed.
+ */
+ if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
+ continue;
+
+ ret = rmap_one(page, vma, rmap_item->address, arg);
+ if (ret != SWAP_AGAIN) {
+ spin_unlock(&anon_vma->lock);
+ goto out;
+ }
+ }
+ spin_unlock(&anon_vma->lock);
+ }
+ if (!search_new_forks++)
+ goto again;
+out:
+ return ret;
+}
+
+void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+ struct stable_node *stable_node;
+
+ VM_BUG_ON(!PageLocked(oldpage));
+ VM_BUG_ON(!PageLocked(newpage));
+ VM_BUG_ON(newpage->mapping != oldpage->mapping);
+
+ stable_node = page_stable_node(newpage);
+ if (stable_node) {
+ VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
+ stable_node->kpfn = page_to_pfn(newpage);
+ }
+}
+#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ struct rb_node *node;
+
+ for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
+ struct stable_node *stable_node;
+
+ stable_node = rb_entry(node, struct stable_node, node);
+ if (stable_node->kpfn >= start_pfn &&
+ stable_node->kpfn < end_pfn)
+ return stable_node;
+ }
+ return NULL;
+}
+
+static int ksm_memory_callback(struct notifier_block *self,
+ unsigned long action, void *arg)
+{
+ struct memory_notify *mn = arg;
+ struct stable_node *stable_node;
+
+ switch (action) {
+ case MEM_GOING_OFFLINE:
+ /*
+ * Keep it very simple for now: just lock out ksmd and
+ * MADV_UNMERGEABLE while any memory is going offline.
+ */
+ mutex_lock(&ksm_thread_mutex);
+ break;
+
+ case MEM_OFFLINE:
+ /*
+ * Most of the work is done by page migration; but there might
+ * be a few stable_nodes left over, still pointing to struct
+ * pages which have been offlined: prune those from the tree.
+ */
+ while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
+ mn->start_pfn + mn->nr_pages)) != NULL)
+ remove_node_from_stable_tree(stable_node);
+ /* fallthrough */
+
+ case MEM_CANCEL_OFFLINE:
+ mutex_unlock(&ksm_thread_mutex);
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
#ifdef CONFIG_SYSFS
/*
* This all compiles without CONFIG_SYSFS, but is a waste of space.
@@ -1551,8 +1839,8 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
/*
* KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
* KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
- * breaking COW to free the unswappable pages_shared (but leaves
- * mm_slots on the list for when ksmd may be set running again).
+ * breaking COW to free the pages_shared (but leaves mm_slots
+ * on the list for when ksmd may be set running again).
*/
mutex_lock(&ksm_thread_mutex);
@@ -1577,29 +1865,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
}
KSM_ATTR(run);
-static ssize_t max_kernel_pages_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- int err;
- unsigned long nr_pages;
-
- err = strict_strtoul(buf, 10, &nr_pages);
- if (err)
- return -EINVAL;
-
- ksm_max_kernel_pages = nr_pages;
-
- return count;
-}
-
-static ssize_t max_kernel_pages_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
-}
-KSM_ATTR(max_kernel_pages);
-
static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -1649,7 +1914,6 @@ static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr,
&run_attr.attr,
- &max_kernel_pages_attr.attr,
&pages_shared_attr.attr,
&pages_sharing_attr.attr,
&pages_unshared_attr.attr,
@@ -1669,8 +1933,6 @@ static int __init ksm_init(void)
struct task_struct *ksm_thread;
int err;
- ksm_max_kernel_pages = totalram_pages / 4;
-
err = ksm_slab_init();
if (err)
goto out;
@@ -1698,6 +1960,13 @@ static int __init ksm_init(void)
#endif /* CONFIG_SYSFS */
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ /*
+ * Choose a high priority since the callback takes ksm_thread_mutex:
+ * later callbacks could only be taking locks which nest within that.
+ */
+ hotplug_memory_notifier(ksm_memory_callback, 100);
+#endif
return 0;
out_free2:
diff --git a/mm/madvise.c b/mm/madvise.c
index 35b1479b7c9..319528b8db7 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -9,6 +9,7 @@
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/mempolicy.h>
+#include <linux/page-isolation.h>
#include <linux/hugetlb.h>
#include <linux/sched.h>
#include <linux/ksm.h>
@@ -222,7 +223,7 @@ static long madvise_remove(struct vm_area_struct *vma,
/*
* Error injection support for memory error handling.
*/
-static int madvise_hwpoison(unsigned long start, unsigned long end)
+static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
{
int ret = 0;
@@ -230,15 +231,21 @@ static int madvise_hwpoison(unsigned long start, unsigned long end)
return -EPERM;
for (; start < end; start += PAGE_SIZE) {
struct page *p;
- int ret = get_user_pages(current, current->mm, start, 1,
- 0, 0, &p, NULL);
+ int ret = get_user_pages_fast(start, 1, 0, &p);
if (ret != 1)
return ret;
+ if (bhv == MADV_SOFT_OFFLINE) {
+ printk(KERN_INFO "Soft offlining page %lx at %lx\n",
+ page_to_pfn(p), start);
+ ret = soft_offline_page(p, MF_COUNT_INCREASED);
+ if (ret)
+ break;
+ continue;
+ }
printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
page_to_pfn(p), start);
/* Ignore return value for now */
- __memory_failure(page_to_pfn(p), 0, 1);
- put_page(p);
+ __memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
}
return ret;
}
@@ -335,8 +342,8 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
size_t len;
#ifdef CONFIG_MEMORY_FAILURE
- if (behavior == MADV_HWPOISON)
- return madvise_hwpoison(start, start+len_in);
+ if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
+ return madvise_hwpoison(behavior, start, start+len_in);
#endif
if (!madvise_behavior_valid(behavior))
return error;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c31a310aa14..488b644e0e8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -38,6 +38,7 @@
#include <linux/vmalloc.h>
#include <linux/mm_inline.h>
#include <linux/page_cgroup.h>
+#include <linux/cpu.h>
#include "internal.h"
#include <asm/uaccess.h>
@@ -54,7 +55,6 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
#define do_swap_account (0)
#endif
-static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
#define SOFTLIMIT_EVENTS_THRESH (1000)
/*
@@ -66,7 +66,7 @@ enum mem_cgroup_stat_index {
*/
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
@@ -275,6 +275,7 @@ enum charge_type {
static void mem_cgroup_get(struct mem_cgroup *mem);
static void mem_cgroup_put(struct mem_cgroup *mem);
static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
+static void drain_all_stock_async(void);
static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -282,6 +283,11 @@ mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}
+struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
+{
+ return &mem->css;
+}
+
static struct mem_cgroup_per_zone *
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
@@ -758,7 +764,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
task_unlock(task);
if (!curr)
return 0;
- if (curr->use_hierarchy)
+ /*
+ * We should check use_hierarchy of "mem" not "curr". Because checking
+ * use_hierarchy of "curr" here make this function true if hierarchy is
+ * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
+ * hierarchy(even if use_hierarchy is disabled in "mem").
+ */
+ if (mem->use_hierarchy)
ret = css_is_ancestor(&curr->css, &mem->css);
else
ret = (curr == mem);
@@ -1007,7 +1019,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
static char memcg_name[PATH_MAX];
int ret;
- if (!memcg)
+ if (!memcg || !p)
return;
@@ -1137,6 +1149,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
victim = mem_cgroup_select_victim(root_mem);
if (victim == root_mem) {
loop++;
+ if (loop >= 1)
+ drain_all_stock_async();
if (loop >= 2) {
/*
* If we have not been able to reclaim
@@ -1223,7 +1237,7 @@ static void record_last_oom(struct mem_cgroup *mem)
* Currently used to update mapped file statistics, but the routine can be
* generalized to update other statistics as well.
*/
-void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+void mem_cgroup_update_file_mapped(struct page *page, int val)
{
struct mem_cgroup *mem;
struct mem_cgroup_stat *stat;
@@ -1231,9 +1245,6 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
int cpu;
struct page_cgroup *pc;
- if (!page_is_file_cache(page))
- return;
-
pc = lookup_page_cgroup(page);
if (unlikely(!pc))
return;
@@ -1253,12 +1264,139 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
stat = &mem->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
done:
unlock_page_cgroup(pc);
}
/*
+ * size of first charge trial. "32" comes from vmscan.c's magic value.
+ * TODO: maybe necessary to use big numbers in big irons.
+ */
+#define CHARGE_SIZE (32 * PAGE_SIZE)
+struct memcg_stock_pcp {
+ struct mem_cgroup *cached; /* this never be root cgroup */
+ int charge;
+ struct work_struct work;
+};
+static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
+static atomic_t memcg_drain_count;
+
+/*
+ * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
+ * from local stock and true is returned. If the stock is 0 or charges from a
+ * cgroup which is not current target, returns false. This stock will be
+ * refilled.
+ */
+static bool consume_stock(struct mem_cgroup *mem)
+{
+ struct memcg_stock_pcp *stock;
+ bool ret = true;
+
+ stock = &get_cpu_var(memcg_stock);
+ if (mem == stock->cached && stock->charge)
+ stock->charge -= PAGE_SIZE;
+ else /* need to call res_counter_charge */
+ ret = false;
+ put_cpu_var(memcg_stock);
+ return ret;
+}
+
+/*
+ * Returns stocks cached in percpu to res_counter and reset cached information.
+ */
+static void drain_stock(struct memcg_stock_pcp *stock)
+{
+ struct mem_cgroup *old = stock->cached;
+
+ if (stock->charge) {
+ res_counter_uncharge(&old->res, stock->charge);
+ if (do_swap_account)
+ res_counter_uncharge(&old->memsw, stock->charge);
+ }
+ stock->cached = NULL;
+ stock->charge = 0;
+}
+
+/*
+ * This must be called under preempt disabled or must be called by
+ * a thread which is pinned to local cpu.
+ */
+static void drain_local_stock(struct work_struct *dummy)
+{
+ struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
+ drain_stock(stock);
+}
+
+/*
+ * Cache charges(val) which is from res_counter, to local per_cpu area.
+ * This will be consumed by consumt_stock() function, later.
+ */
+static void refill_stock(struct mem_cgroup *mem, int val)
+{
+ struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
+
+ if (stock->cached != mem) { /* reset if necessary */
+ drain_stock(stock);
+ stock->cached = mem;
+ }
+ stock->charge += val;
+ put_cpu_var(memcg_stock);
+}
+
+/*
+ * Tries to drain stocked charges in other cpus. This function is asynchronous
+ * and just put a work per cpu for draining localy on each cpu. Caller can
+ * expects some charges will be back to res_counter later but cannot wait for
+ * it.
+ */
+static void drain_all_stock_async(void)
+{
+ int cpu;
+ /* This function is for scheduling "drain" in asynchronous way.
+ * The result of "drain" is not directly handled by callers. Then,
+ * if someone is calling drain, we don't have to call drain more.
+ * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
+ * there is a race. We just do loose check here.
+ */
+ if (atomic_read(&memcg_drain_count))
+ return;
+ /* Notify other cpus that system-wide "drain" is running */
+ atomic_inc(&memcg_drain_count);
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
+ schedule_work_on(cpu, &stock->work);
+ }
+ put_online_cpus();
+ atomic_dec(&memcg_drain_count);
+ /* We don't wait for flush_work */
+}
+
+/* This is a synchronous drain interface. */
+static void drain_all_stock_sync(void)
+{
+ /* called when force_empty is called */
+ atomic_inc(&memcg_drain_count);
+ schedule_on_each_cpu(drain_local_stock);
+ atomic_dec(&memcg_drain_count);
+}
+
+static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
+ unsigned long action,
+ void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct memcg_stock_pcp *stock;
+
+ if (action != CPU_DEAD)
+ return NOTIFY_OK;
+ stock = &per_cpu(memcg_stock, cpu);
+ drain_stock(stock);
+ return NOTIFY_OK;
+}
+
+/*
* Unlike exported interface, "oom" parameter is added. if oom==true,
* oom-killer can be invoked.
*/
@@ -1269,6 +1407,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
struct mem_cgroup *mem, *mem_over_limit;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct res_counter *fail_res;
+ int csize = CHARGE_SIZE;
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
/* Don't account this! */
@@ -1293,23 +1432,25 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
return 0;
VM_BUG_ON(css_is_removed(&mem->css));
+ if (mem_cgroup_is_root(mem))
+ goto done;
while (1) {
int ret = 0;
unsigned long flags = 0;
- if (mem_cgroup_is_root(mem))
- goto done;
- ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
+ if (consume_stock(mem))
+ goto charged;
+
+ ret = res_counter_charge(&mem->res, csize, &fail_res);
if (likely(!ret)) {
if (!do_swap_account)
break;
- ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
- &fail_res);
+ ret = res_counter_charge(&mem->memsw, csize, &fail_res);
if (likely(!ret))
break;
/* mem+swap counter fails */
- res_counter_uncharge(&mem->res, PAGE_SIZE);
+ res_counter_uncharge(&mem->res, csize);
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
memsw);
@@ -1318,6 +1459,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
res);
+ /* reduce request size and retry */
+ if (csize > PAGE_SIZE) {
+ csize = PAGE_SIZE;
+ continue;
+ }
if (!(gfp_mask & __GFP_WAIT))
goto nomem;
@@ -1339,14 +1485,15 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
if (!nr_retries--) {
if (oom) {
- mutex_lock(&memcg_tasklist);
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
- mutex_unlock(&memcg_tasklist);
record_last_oom(mem_over_limit);
}
goto nomem;
}
}
+ if (csize > PAGE_SIZE)
+ refill_stock(mem, csize - PAGE_SIZE);
+charged:
/*
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
@@ -1361,6 +1508,21 @@ nomem:
}
/*
+ * Somemtimes we have to undo a charge we got by try_charge().
+ * This function is for that and do uncharge, put css's refcnt.
+ * gotten by try_charge().
+ */
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+{
+ if (!mem_cgroup_is_root(mem)) {
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ if (do_swap_account)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ }
+ css_put(&mem->css);
+}
+
+/*
* A helper function to get mem_cgroup from ID. must be called under
* rcu_read_lock(). The caller must check css_is_removed() or some if
* it's concern. (dropping refcnt from swap can be called against removed
@@ -1379,25 +1541,22 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
return container_of(css, struct mem_cgroup, css);
}
-static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
+struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
{
- struct mem_cgroup *mem;
+ struct mem_cgroup *mem = NULL;
struct page_cgroup *pc;
unsigned short id;
swp_entry_t ent;
VM_BUG_ON(!PageLocked(page));
- if (!PageSwapCache(page))
- return NULL;
-
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
if (mem && !css_tryget(&mem->css))
mem = NULL;
- } else {
+ } else if (PageSwapCache(page)) {
ent.val = page_private(page);
id = lookup_swap_cgroup(ent);
rcu_read_lock();
@@ -1426,12 +1585,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
- if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- }
- css_put(&mem->css);
+ mem_cgroup_cancel_charge(mem);
return;
}
@@ -1464,27 +1618,22 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
}
/**
- * mem_cgroup_move_account - move account of the page
+ * __mem_cgroup_move_account - move account of the page
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
*
* The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.)
- *
- * returns 0 at success,
- * returns -EBUSY when lock is busy or "pc" is unstable.
+ * - the pc is locked, used, and ->mem_cgroup points to @from.
*
* This function does "uncharge" from old cgroup but doesn't do "charge" to
* new cgroup. It should be done by a caller.
*/
-static int mem_cgroup_move_account(struct page_cgroup *pc,
+static void __mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to)
{
- struct mem_cgroup_per_zone *from_mz, *to_mz;
- int nid, zid;
- int ret = -EBUSY;
struct page *page;
int cpu;
struct mem_cgroup_stat *stat;
@@ -1492,38 +1641,27 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
-
- nid = page_cgroup_nid(pc);
- zid = page_cgroup_zid(pc);
- from_mz = mem_cgroup_zoneinfo(from, nid, zid);
- to_mz = mem_cgroup_zoneinfo(to, nid, zid);
-
- if (!trylock_page_cgroup(pc))
- return ret;
-
- if (!PageCgroupUsed(pc))
- goto out;
-
- if (pc->mem_cgroup != from)
- goto out;
+ VM_BUG_ON(!PageCgroupLocked(pc));
+ VM_BUG_ON(!PageCgroupUsed(pc));
+ VM_BUG_ON(pc->mem_cgroup != from);
if (!mem_cgroup_is_root(from))
res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
page = pc->page;
- if (page_is_file_cache(page) && page_mapped(page)) {
+ if (page_mapped(page) && !PageAnon(page)) {
cpu = smp_processor_id();
/* Update mapped_file data for mem_cgroup "from" */
stat = &from->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
-1);
/* Update mapped_file data for mem_cgroup "to" */
stat = &to->stat;
cpustat = &stat->cpustat[cpu];
- __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+ __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
1);
}
@@ -1534,15 +1672,28 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
css_get(&to->css);
pc->mem_cgroup = to;
mem_cgroup_charge_statistics(to, pc, true);
- ret = 0;
-out:
- unlock_page_cgroup(pc);
/*
* We charges against "to" which may not have any tasks. Then, "to"
* can be under rmdir(). But in current implementation, caller of
* this function is just force_empty() and it's garanteed that
* "to" is never removed. So, we don't check rmdir status here.
*/
+}
+
+/*
+ * check whether the @pc is valid for moving account and call
+ * __mem_cgroup_move_account()
+ */
+static int mem_cgroup_move_account(struct page_cgroup *pc,
+ struct mem_cgroup *from, struct mem_cgroup *to)
+{
+ int ret = -EINVAL;
+ lock_page_cgroup(pc);
+ if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
+ __mem_cgroup_move_account(pc, from, to);
+ ret = 0;
+ }
+ unlock_page_cgroup(pc);
return ret;
}
@@ -1564,45 +1715,27 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
if (!pcg)
return -EINVAL;
+ ret = -EBUSY;
+ if (!get_page_unless_zero(page))
+ goto out;
+ if (isolate_lru_page(page))
+ goto put;
parent = mem_cgroup_from_cont(pcg);
-
-
ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
if (ret || !parent)
- return ret;
-
- if (!get_page_unless_zero(page)) {
- ret = -EBUSY;
- goto uncharge;
- }
-
- ret = isolate_lru_page(page);
-
- if (ret)
- goto cancel;
+ goto put_back;
ret = mem_cgroup_move_account(pc, child, parent);
-
+ if (!ret)
+ css_put(&parent->css); /* drop extra refcnt by try_charge() */
+ else
+ mem_cgroup_cancel_charge(parent); /* does css_put */
+put_back:
putback_lru_page(page);
- if (!ret) {
- put_page(page);
- /* drop extra refcnt by try_charge() */
- css_put(&parent->css);
- return 0;
- }
-
-cancel:
+put:
put_page(page);
-uncharge:
- /* drop extra refcnt by try_charge() */
- css_put(&parent->css);
- /* uncharge if move fails */
- if (!mem_cgroup_is_root(parent)) {
- res_counter_uncharge(&parent->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&parent->memsw, PAGE_SIZE);
- }
+out:
return ret;
}
@@ -1737,12 +1870,13 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
goto charge_cur_mm;
/*
* A racing thread's fault, or swapoff, may have already updated
- * the pte, and even removed page from swap cache: return success
- * to go on to do_swap_page()'s pte_same() test, which should fail.
+ * the pte, and even removed page from swap cache: in those cases
+ * do_swap_page()'s pte_same() test will fail; but there's also a
+ * KSM case which does need to charge the page.
*/
if (!PageSwapCache(page))
- return 0;
- mem = try_get_mem_cgroup_from_swapcache(page);
+ goto charge_cur_mm;
+ mem = try_get_mem_cgroup_from_page(page);
if (!mem)
goto charge_cur_mm;
*ptr = mem;
@@ -1818,14 +1952,53 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
return;
if (!mem)
return;
- if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- }
- css_put(&mem->css);
+ mem_cgroup_cancel_charge(mem);
}
+static void
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+{
+ struct memcg_batch_info *batch = NULL;
+ bool uncharge_memsw = true;
+ /* If swapout, usage of swap doesn't decrease */
+ if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+ uncharge_memsw = false;
+ /*
+ * do_batch > 0 when unmapping pages or inode invalidate/truncate.
+ * In those cases, all pages freed continously can be expected to be in
+ * the same cgroup and we have chance to coalesce uncharges.
+ * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
+ * because we want to do uncharge as soon as possible.
+ */
+ if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
+ goto direct_uncharge;
+
+ batch = &current->memcg_batch;
+ /*
+ * In usual, we do css_get() when we remember memcg pointer.
+ * But in this case, we keep res->usage until end of a series of
+ * uncharges. Then, it's ok to ignore memcg's refcnt.
+ */
+ if (!batch->memcg)
+ batch->memcg = mem;
+ /*
+ * In typical case, batch->memcg == mem. This means we can
+ * merge a series of uncharges to an uncharge of res_counter.
+ * If not, we uncharge res_counter ony by one.
+ */
+ if (batch->memcg != mem)
+ goto direct_uncharge;
+ /* remember freed charge and uncharge it later */
+ batch->bytes += PAGE_SIZE;
+ if (uncharge_memsw)
+ batch->memsw_bytes += PAGE_SIZE;
+ return;
+direct_uncharge:
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ if (uncharge_memsw)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ return;
+}
/*
* uncharge if !page_mapped(page)
@@ -1874,12 +2047,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
break;
}
- if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- if (do_swap_account &&
- (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
- res_counter_uncharge(&mem->memsw, PAGE_SIZE);
- }
+ if (!mem_cgroup_is_root(mem))
+ __do_uncharge(mem, ctype);
if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
mem_cgroup_swap_statistics(mem, true);
mem_cgroup_charge_statistics(mem, pc, false);
@@ -1925,6 +2094,50 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
}
+/*
+ * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
+ * In that cases, pages are freed continuously and we can expect pages
+ * are in the same memcg. All these calls itself limits the number of
+ * pages freed at once, then uncharge_start/end() is called properly.
+ * This may be called prural(2) times in a context,
+ */
+
+void mem_cgroup_uncharge_start(void)
+{
+ current->memcg_batch.do_batch++;
+ /* We can do nest. */
+ if (current->memcg_batch.do_batch == 1) {
+ current->memcg_batch.memcg = NULL;
+ current->memcg_batch.bytes = 0;
+ current->memcg_batch.memsw_bytes = 0;
+ }
+}
+
+void mem_cgroup_uncharge_end(void)
+{
+ struct memcg_batch_info *batch = &current->memcg_batch;
+
+ if (!batch->do_batch)
+ return;
+
+ batch->do_batch--;
+ if (batch->do_batch) /* If stacked, do nothing. */
+ return;
+
+ if (!batch->memcg)
+ return;
+ /*
+ * This "batch->memcg" is valid without any css_get/put etc...
+ * bacause we hide charges behind us.
+ */
+ if (batch->bytes)
+ res_counter_uncharge(&batch->memcg->res, batch->bytes);
+ if (batch->memsw_bytes)
+ res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+ /* forget this pointer (for sanity check) */
+ batch->memcg = NULL;
+}
+
#ifdef CONFIG_SWAP
/*
* called after __delete_from_swap_cache() and drop "page" account.
@@ -2100,7 +2313,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
unsigned long long val)
{
int retry_count;
- int progress;
u64 memswlimit;
int ret = 0;
int children = mem_cgroup_count_children(memcg);
@@ -2144,8 +2356,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
if (!ret)
break;
- progress = mem_cgroup_hierarchical_reclaim(memcg, NULL,
- GFP_KERNEL,
+ mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
MEM_CGROUP_RECLAIM_SHRINK);
curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
/* Usage is reduced ? */
@@ -2384,6 +2595,7 @@ move_account:
goto out;
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
+ drain_all_stock_sync();
ret = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
@@ -2541,6 +2753,7 @@ static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
val += idx_val;
mem_cgroup_get_recursive_idx_stat(mem,
MEM_CGROUP_STAT_SWAPOUT, &idx_val);
+ val += idx_val;
val <<= PAGE_SHIFT;
} else
val = res_counter_read_u64(&mem->memsw, name);
@@ -2660,7 +2873,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
enum {
MCS_CACHE,
MCS_RSS,
- MCS_MAPPED_FILE,
+ MCS_FILE_MAPPED,
MCS_PGPGIN,
MCS_PGPGOUT,
MCS_SWAP,
@@ -2704,8 +2917,8 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s->stat[MCS_CACHE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
s->stat[MCS_RSS] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
- s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
+ val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED);
+ s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
s->stat[MCS_PGPGIN] += val;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
@@ -3097,11 +3310,18 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
/* root ? */
if (cont->parent == NULL) {
+ int cpu;
enable_swap_cgroup();
parent = NULL;
root_mem_cgroup = mem;
if (mem_cgroup_soft_limit_tree_init())
goto free_out;
+ for_each_possible_cpu(cpu) {
+ struct memcg_stock_pcp *stock =
+ &per_cpu(memcg_stock, cpu);
+ INIT_WORK(&stock->work, drain_local_stock);
+ }
+ hotcpu_notifier(memcg_stock_cpu_callback, 0);
} else {
parent = mem_cgroup_from_cont(cont->parent);
@@ -3170,12 +3390,10 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct task_struct *p,
bool threadgroup)
{
- mutex_lock(&memcg_tasklist);
/*
* FIXME: It's better to move charges of this process from old
* memcg to new memcg. But it's just on TODO-List now.
*/
- mutex_unlock(&memcg_tasklist);
}
struct cgroup_subsys mem_cgroup_subsys = {
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1ac49fef95a..6a0466ed5bf 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -34,12 +34,16 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
+#include <linux/kernel-page-flags.h>
#include <linux/sched.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/backing-dev.h>
+#include <linux/migrate.h>
+#include <linux/page-isolation.h>
+#include <linux/suspend.h>
#include "internal.h"
int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -48,6 +52,120 @@ int sysctl_memory_failure_recovery __read_mostly = 1;
atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
+u32 hwpoison_filter_enable = 0;
+u32 hwpoison_filter_dev_major = ~0U;
+u32 hwpoison_filter_dev_minor = ~0U;
+u64 hwpoison_filter_flags_mask;
+u64 hwpoison_filter_flags_value;
+EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
+EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
+EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
+EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
+EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
+
+static int hwpoison_filter_dev(struct page *p)
+{
+ struct address_space *mapping;
+ dev_t dev;
+
+ if (hwpoison_filter_dev_major == ~0U &&
+ hwpoison_filter_dev_minor == ~0U)
+ return 0;
+
+ /*
+ * page_mapping() does not accept slab page
+ */
+ if (PageSlab(p))
+ return -EINVAL;
+
+ mapping = page_mapping(p);
+ if (mapping == NULL || mapping->host == NULL)
+ return -EINVAL;
+
+ dev = mapping->host->i_sb->s_dev;
+ if (hwpoison_filter_dev_major != ~0U &&
+ hwpoison_filter_dev_major != MAJOR(dev))
+ return -EINVAL;
+ if (hwpoison_filter_dev_minor != ~0U &&
+ hwpoison_filter_dev_minor != MINOR(dev))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hwpoison_filter_flags(struct page *p)
+{
+ if (!hwpoison_filter_flags_mask)
+ return 0;
+
+ if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
+ hwpoison_filter_flags_value)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+/*
+ * This allows stress tests to limit test scope to a collection of tasks
+ * by putting them under some memcg. This prevents killing unrelated/important
+ * processes such as /sbin/init. Note that the target task may share clean
+ * pages with init (eg. libc text), which is harmless. If the target task
+ * share _dirty_ pages with another task B, the test scheme must make sure B
+ * is also included in the memcg. At last, due to race conditions this filter
+ * can only guarantee that the page either belongs to the memcg tasks, or is
+ * a freed page.
+ */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+u64 hwpoison_filter_memcg;
+EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
+static int hwpoison_filter_task(struct page *p)
+{
+ struct mem_cgroup *mem;
+ struct cgroup_subsys_state *css;
+ unsigned long ino;
+
+ if (!hwpoison_filter_memcg)
+ return 0;
+
+ mem = try_get_mem_cgroup_from_page(p);
+ if (!mem)
+ return -EINVAL;
+
+ css = mem_cgroup_css(mem);
+ /* root_mem_cgroup has NULL dentries */
+ if (!css->cgroup->dentry)
+ return -EINVAL;
+
+ ino = css->cgroup->dentry->d_inode->i_ino;
+ css_put(css);
+
+ if (ino != hwpoison_filter_memcg)
+ return -EINVAL;
+
+ return 0;
+}
+#else
+static int hwpoison_filter_task(struct page *p) { return 0; }
+#endif
+
+int hwpoison_filter(struct page *p)
+{
+ if (!hwpoison_filter_enable)
+ return 0;
+
+ if (hwpoison_filter_dev(p))
+ return -EINVAL;
+
+ if (hwpoison_filter_flags(p))
+ return -EINVAL;
+
+ if (hwpoison_filter_task(p))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hwpoison_filter);
+
/*
* Send all the processes who have the page mapped an ``action optional''
* signal.
@@ -83,6 +201,36 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
}
/*
+ * When a unknown page type is encountered drain as many buffers as possible
+ * in the hope to turn the page into a LRU or free page, which we can handle.
+ */
+void shake_page(struct page *p, int access)
+{
+ if (!PageSlab(p)) {
+ lru_add_drain_all();
+ if (PageLRU(p))
+ return;
+ drain_all_pages();
+ if (PageLRU(p) || is_free_buddy_page(p))
+ return;
+ }
+
+ /*
+ * Only all shrink_slab here (which would also
+ * shrink other caches) if access is not potentially fatal.
+ */
+ if (access) {
+ int nr;
+ do {
+ nr = shrink_slab(1000, GFP_KERNEL, 1000);
+ if (page_count(p) == 0)
+ break;
+ } while (nr > 10);
+ }
+}
+EXPORT_SYMBOL_GPL(shake_page);
+
+/*
* Kill all processes that have a poisoned page mapped and then isolate
* the page.
*
@@ -177,7 +325,6 @@ static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
* In case something went wrong with munmapping
* make sure the process doesn't catch the
* signal and then access the memory. Just kill it.
- * the signal handlers
*/
if (fail || tk->addr_valid == 0) {
printk(KERN_ERR
@@ -314,33 +461,49 @@ static void collect_procs(struct page *page, struct list_head *tokill)
*/
enum outcome {
- FAILED, /* Error handling failed */
+ IGNORED, /* Error: cannot be handled */
+ FAILED, /* Error: handling failed */
DELAYED, /* Will be handled later */
- IGNORED, /* Error safely ignored */
RECOVERED, /* Successfully recovered */
};
static const char *action_name[] = {
+ [IGNORED] = "Ignored",
[FAILED] = "Failed",
[DELAYED] = "Delayed",
- [IGNORED] = "Ignored",
[RECOVERED] = "Recovered",
};
/*
- * Error hit kernel page.
- * Do nothing, try to be lucky and not touch this instead. For a few cases we
- * could be more sophisticated.
+ * XXX: It is possible that a page is isolated from LRU cache,
+ * and then kept in swap cache or failed to remove from page cache.
+ * The page count will stop it from being freed by unpoison.
+ * Stress tests should be aware of this memory leak problem.
*/
-static int me_kernel(struct page *p, unsigned long pfn)
+static int delete_from_lru_cache(struct page *p)
{
- return DELAYED;
+ if (!isolate_lru_page(p)) {
+ /*
+ * Clear sensible page flags, so that the buddy system won't
+ * complain when the page is unpoison-and-freed.
+ */
+ ClearPageActive(p);
+ ClearPageUnevictable(p);
+ /*
+ * drop the page count elevated by isolate_lru_page()
+ */
+ page_cache_release(p);
+ return 0;
+ }
+ return -EIO;
}
/*
- * Already poisoned page.
+ * Error hit kernel page.
+ * Do nothing, try to be lucky and not touch this instead. For a few cases we
+ * could be more sophisticated.
*/
-static int me_ignore(struct page *p, unsigned long pfn)
+static int me_kernel(struct page *p, unsigned long pfn)
{
return IGNORED;
}
@@ -355,14 +518,6 @@ static int me_unknown(struct page *p, unsigned long pfn)
}
/*
- * Free memory
- */
-static int me_free(struct page *p, unsigned long pfn)
-{
- return DELAYED;
-}
-
-/*
* Clean (or cleaned) page cache page.
*/
static int me_pagecache_clean(struct page *p, unsigned long pfn)
@@ -371,6 +526,8 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
int ret = FAILED;
struct address_space *mapping;
+ delete_from_lru_cache(p);
+
/*
* For anonymous pages we're done the only reference left
* should be the one m_f() holds.
@@ -500,14 +657,20 @@ static int me_swapcache_dirty(struct page *p, unsigned long pfn)
/* Trigger EIO in shmem: */
ClearPageUptodate(p);
- return DELAYED;
+ if (!delete_from_lru_cache(p))
+ return DELAYED;
+ else
+ return FAILED;
}
static int me_swapcache_clean(struct page *p, unsigned long pfn)
{
delete_from_swap_cache(p);
- return RECOVERED;
+ if (!delete_from_lru_cache(p))
+ return RECOVERED;
+ else
+ return FAILED;
}
/*
@@ -550,7 +713,6 @@ static int me_huge_page(struct page *p, unsigned long pfn)
#define tail (1UL << PG_tail)
#define compound (1UL << PG_compound)
#define slab (1UL << PG_slab)
-#define buddy (1UL << PG_buddy)
#define reserved (1UL << PG_reserved)
static struct page_state {
@@ -559,8 +721,11 @@ static struct page_state {
char *msg;
int (*action)(struct page *p, unsigned long pfn);
} error_states[] = {
- { reserved, reserved, "reserved kernel", me_ignore },
- { buddy, buddy, "free kernel", me_free },
+ { reserved, reserved, "reserved kernel", me_kernel },
+ /*
+ * free pages are specially detected outside this table:
+ * PG_buddy pages only make a small fraction of all free pages.
+ */
/*
* Could in theory check if slab page is free or if we can drop
@@ -582,14 +747,11 @@ static struct page_state {
{ unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
{ unevict, unevict, "unevictable LRU", me_pagecache_clean},
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
{ mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
{ mlock, mlock, "mlocked LRU", me_pagecache_clean },
-#endif
{ lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
{ lru|dirty, lru, "clean LRU", me_pagecache_clean },
- { swapbacked, swapbacked, "anonymous", me_pagecache_clean },
/*
* Catchall entry: must be at end.
@@ -597,20 +759,31 @@ static struct page_state {
{ 0, 0, "unknown page state", me_unknown },
};
+#undef dirty
+#undef sc
+#undef unevict
+#undef mlock
+#undef writeback
+#undef lru
+#undef swapbacked
+#undef head
+#undef tail
+#undef compound
+#undef slab
+#undef reserved
+
static void action_result(unsigned long pfn, char *msg, int result)
{
- struct page *page = NULL;
- if (pfn_valid(pfn))
- page = pfn_to_page(pfn);
+ struct page *page = pfn_to_page(pfn);
printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
pfn,
- page && PageDirty(page) ? "dirty " : "",
+ PageDirty(page) ? "dirty " : "",
msg, action_name[result]);
}
static int page_action(struct page_state *ps, struct page *p,
- unsigned long pfn, int ref)
+ unsigned long pfn)
{
int result;
int count;
@@ -618,18 +791,22 @@ static int page_action(struct page_state *ps, struct page *p,
result = ps->action(p, pfn);
action_result(pfn, ps->msg, result);
- count = page_count(p) - 1 - ref;
- if (count != 0)
+ count = page_count(p) - 1;
+ if (ps->action == me_swapcache_dirty && result == DELAYED)
+ count--;
+ if (count != 0) {
printk(KERN_ERR
"MCE %#lx: %s page still referenced by %d users\n",
pfn, ps->msg, count);
+ result = FAILED;
+ }
/* Could do more checks here if page looks ok */
/*
* Could adjust zone counters here to correct for the missing page.
*/
- return result == RECOVERED ? 0 : -EBUSY;
+ return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
}
#define N_UNMAP_TRIES 5
@@ -638,7 +815,7 @@ static int page_action(struct page_state *ps, struct page *p,
* Do all that is necessary to remove user space mappings. Unmap
* the pages and send SIGBUS to the processes if the data was dirty.
*/
-static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
+static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
int trapno)
{
enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
@@ -648,15 +825,18 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
int i;
int kill = 1;
- if (PageReserved(p) || PageCompound(p) || PageSlab(p) || PageKsm(p))
- return;
+ if (PageReserved(p) || PageSlab(p))
+ return SWAP_SUCCESS;
/*
* This check implies we don't kill processes if their pages
* are in the swap cache early. Those are always late kills.
*/
if (!page_mapped(p))
- return;
+ return SWAP_SUCCESS;
+
+ if (PageCompound(p) || PageKsm(p))
+ return SWAP_FAIL;
if (PageSwapCache(p)) {
printk(KERN_ERR
@@ -667,6 +847,8 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
/*
* Propagate the dirty bit from PTEs to struct page first, because we
* need this to decide if we should kill or just drop the page.
+ * XXX: the dirty test could be racy: set_page_dirty() may not always
+ * be called inside page lock (it's recommended but not enforced).
*/
mapping = page_mapping(p);
if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
@@ -718,11 +900,12 @@ static void hwpoison_user_mappings(struct page *p, unsigned long pfn,
*/
kill_procs_ao(&tokill, !!PageDirty(p), trapno,
ret != SWAP_SUCCESS, pfn);
+
+ return ret;
}
-int __memory_failure(unsigned long pfn, int trapno, int ref)
+int __memory_failure(unsigned long pfn, int trapno, int flags)
{
- unsigned long lru_flag;
struct page_state *ps;
struct page *p;
int res;
@@ -731,13 +914,15 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
panic("Memory failure from trap %d on page %lx", trapno, pfn);
if (!pfn_valid(pfn)) {
- action_result(pfn, "memory outside kernel control", IGNORED);
- return -EIO;
+ printk(KERN_ERR
+ "MCE %#lx: memory outside kernel control\n",
+ pfn);
+ return -ENXIO;
}
p = pfn_to_page(pfn);
if (TestSetPageHWPoison(p)) {
- action_result(pfn, "already hardware poisoned", IGNORED);
+ printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
return 0;
}
@@ -754,9 +939,15 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
* In fact it's dangerous to directly bump up page count from 0,
* that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
*/
- if (!get_page_unless_zero(compound_head(p))) {
- action_result(pfn, "free or high order kernel", IGNORED);
- return PageBuddy(compound_head(p)) ? 0 : -EBUSY;
+ if (!(flags & MF_COUNT_INCREASED) &&
+ !get_page_unless_zero(compound_head(p))) {
+ if (is_free_buddy_page(p)) {
+ action_result(pfn, "free buddy", DELAYED);
+ return 0;
+ } else {
+ action_result(pfn, "high order kernel", IGNORED);
+ return -EBUSY;
+ }
}
/*
@@ -768,14 +959,19 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
* walked by the page reclaim code, however that's not a big loss.
*/
if (!PageLRU(p))
- lru_add_drain_all();
- lru_flag = p->flags & lru;
- if (isolate_lru_page(p)) {
+ shake_page(p, 0);
+ if (!PageLRU(p)) {
+ /*
+ * shake_page could have turned it free.
+ */
+ if (is_free_buddy_page(p)) {
+ action_result(pfn, "free buddy, 2nd try", DELAYED);
+ return 0;
+ }
action_result(pfn, "non LRU", IGNORED);
put_page(p);
return -EBUSY;
}
- page_cache_release(p);
/*
* Lock the page and wait for writeback to finish.
@@ -783,26 +979,48 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
* and in many cases impossible, so we just avoid it here.
*/
lock_page_nosync(p);
+
+ /*
+ * unpoison always clear PG_hwpoison inside page lock
+ */
+ if (!PageHWPoison(p)) {
+ printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+ res = 0;
+ goto out;
+ }
+ if (hwpoison_filter(p)) {
+ if (TestClearPageHWPoison(p))
+ atomic_long_dec(&mce_bad_pages);
+ unlock_page(p);
+ put_page(p);
+ return 0;
+ }
+
wait_on_page_writeback(p);
/*
* Now take care of user space mappings.
+ * Abort on fail: __remove_from_page_cache() assumes unmapped page.
*/
- hwpoison_user_mappings(p, pfn, trapno);
+ if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
+ printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+ res = -EBUSY;
+ goto out;
+ }
/*
* Torn down by someone else?
*/
- if ((lru_flag & lru) && !PageSwapCache(p) && p->mapping == NULL) {
+ if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
action_result(pfn, "already truncated LRU", IGNORED);
- res = 0;
+ res = -EBUSY;
goto out;
}
res = -EBUSY;
for (ps = error_states;; ps++) {
- if (((p->flags | lru_flag)& ps->mask) == ps->res) {
- res = page_action(ps, p, pfn, ref);
+ if ((p->flags & ps->mask) == ps->res) {
+ res = page_action(ps, p, pfn);
break;
}
}
@@ -833,3 +1051,235 @@ void memory_failure(unsigned long pfn, int trapno)
{
__memory_failure(pfn, trapno, 0);
}
+
+/**
+ * unpoison_memory - Unpoison a previously poisoned page
+ * @pfn: Page number of the to be unpoisoned page
+ *
+ * Software-unpoison a page that has been poisoned by
+ * memory_failure() earlier.
+ *
+ * This is only done on the software-level, so it only works
+ * for linux injected failures, not real hardware failures
+ *
+ * Returns 0 for success, otherwise -errno.
+ */
+int unpoison_memory(unsigned long pfn)
+{
+ struct page *page;
+ struct page *p;
+ int freeit = 0;
+
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+
+ p = pfn_to_page(pfn);
+ page = compound_head(p);
+
+ if (!PageHWPoison(p)) {
+ pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn);
+ return 0;
+ }
+
+ if (!get_page_unless_zero(page)) {
+ if (TestClearPageHWPoison(p))
+ atomic_long_dec(&mce_bad_pages);
+ pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
+ return 0;
+ }
+
+ lock_page_nosync(page);
+ /*
+ * This test is racy because PG_hwpoison is set outside of page lock.
+ * That's acceptable because that won't trigger kernel panic. Instead,
+ * the PG_hwpoison page will be caught and isolated on the entrance to
+ * the free buddy page pool.
+ */
+ if (TestClearPageHWPoison(p)) {
+ pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
+ atomic_long_dec(&mce_bad_pages);
+ freeit = 1;
+ }
+ unlock_page(page);
+
+ put_page(page);
+ if (freeit)
+ put_page(page);
+
+ return 0;
+}
+EXPORT_SYMBOL(unpoison_memory);
+
+static struct page *new_page(struct page *p, unsigned long private, int **x)
+{
+ int nid = page_to_nid(p);
+ return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
+}
+
+/*
+ * Safely get reference count of an arbitrary page.
+ * Returns 0 for a free page, -EIO for a zero refcount page
+ * that is not free, and 1 for any other page type.
+ * For 1 the page is returned with increased page count, otherwise not.
+ */
+static int get_any_page(struct page *p, unsigned long pfn, int flags)
+{
+ int ret;
+
+ if (flags & MF_COUNT_INCREASED)
+ return 1;
+
+ /*
+ * The lock_system_sleep prevents a race with memory hotplug,
+ * because the isolation assumes there's only a single user.
+ * This is a big hammer, a better would be nicer.
+ */
+ lock_system_sleep();
+
+ /*
+ * Isolate the page, so that it doesn't get reallocated if it
+ * was free.
+ */
+ set_migratetype_isolate(p);
+ if (!get_page_unless_zero(compound_head(p))) {
+ if (is_free_buddy_page(p)) {
+ pr_debug("get_any_page: %#lx free buddy page\n", pfn);
+ /* Set hwpoison bit while page is still isolated */
+ SetPageHWPoison(p);
+ ret = 0;
+ } else {
+ pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n",
+ pfn, p->flags);
+ ret = -EIO;
+ }
+ } else {
+ /* Not a free page */
+ ret = 1;
+ }
+ unset_migratetype_isolate(p);
+ unlock_system_sleep();
+ return ret;
+}
+
+/**
+ * soft_offline_page - Soft offline a page.
+ * @page: page to offline
+ * @flags: flags. Same as memory_failure().
+ *
+ * Returns 0 on success, otherwise negated errno.
+ *
+ * Soft offline a page, by migration or invalidation,
+ * without killing anything. This is for the case when
+ * a page is not corrupted yet (so it's still valid to access),
+ * but has had a number of corrected errors and is better taken
+ * out.
+ *
+ * The actual policy on when to do that is maintained by
+ * user space.
+ *
+ * This should never impact any application or cause data loss,
+ * however it might take some time.
+ *
+ * This is not a 100% solution for all memory, but tries to be
+ * ``good enough'' for the majority of memory.
+ */
+int soft_offline_page(struct page *page, int flags)
+{
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
+
+ ret = get_any_page(page, pfn, flags);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ goto done;
+
+ /*
+ * Page cache page we can handle?
+ */
+ if (!PageLRU(page)) {
+ /*
+ * Try to free it.
+ */
+ put_page(page);
+ shake_page(page, 1);
+
+ /*
+ * Did it turn free?
+ */
+ ret = get_any_page(page, pfn, 0);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ goto done;
+ }
+ if (!PageLRU(page)) {
+ pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n",
+ pfn, page->flags);
+ return -EIO;
+ }
+
+ lock_page(page);
+ wait_on_page_writeback(page);
+
+ /*
+ * Synchronized using the page lock with memory_failure()
+ */
+ if (PageHWPoison(page)) {
+ unlock_page(page);
+ put_page(page);
+ pr_debug("soft offline: %#lx page already poisoned\n", pfn);
+ return -EBUSY;
+ }
+
+ /*
+ * Try to invalidate first. This should work for
+ * non dirty unmapped page cache pages.
+ */
+ ret = invalidate_inode_page(page);
+ unlock_page(page);
+
+ /*
+ * Drop count because page migration doesn't like raised
+ * counts. The page could get re-allocated, but if it becomes
+ * LRU the isolation will just fail.
+ * RED-PEN would be better to keep it isolated here, but we
+ * would need to fix isolation locking first.
+ */
+ put_page(page);
+ if (ret == 1) {
+ ret = 0;
+ pr_debug("soft_offline: %#lx: invalidated\n", pfn);
+ goto done;
+ }
+
+ /*
+ * Simple invalidation didn't work.
+ * Try to migrate to a new page instead. migrate.c
+ * handles a large number of cases for us.
+ */
+ ret = isolate_lru_page(page);
+ if (!ret) {
+ LIST_HEAD(pagelist);
+
+ list_add(&page->lru, &pagelist);
+ ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+ if (ret) {
+ pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
+ pfn, ret, page->flags);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ } else {
+ pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
+ pfn, ret, page_count(page), page->flags);
+ }
+ if (ret)
+ return ret;
+
+done:
+ atomic_long_add(1, &mce_bad_pages);
+ SetPageHWPoison(page);
+ /* keep elevated page count for bad page */
+ return ret;
+}
diff --git a/mm/memory.c b/mm/memory.c
index 6ab19dd4a19..09e4b1be7b6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -572,7 +572,7 @@ out:
* covered by this vma.
*/
-static inline void
+static inline unsigned long
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss)
@@ -586,7 +586,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (!pte_file(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
- swap_duplicate(entry);
+ if (swap_duplicate(entry) < 0)
+ return entry.val;
+
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
@@ -635,6 +637,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
+ return 0;
}
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -646,6 +649,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
spinlock_t *src_ptl, *dst_ptl;
int progress = 0;
int rss[2];
+ swp_entry_t entry = (swp_entry_t){0};
again:
rss[1] = rss[0] = 0;
@@ -674,7 +678,10 @@ again:
progress++;
continue;
}
- copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
+ entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
+ vma, addr, rss);
+ if (entry.val)
+ break;
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
@@ -684,6 +691,12 @@ again:
add_mm_rss(dst_mm, rss[0], rss[1]);
pte_unmap_unlock(orig_dst_pte, dst_ptl);
cond_resched();
+
+ if (entry.val) {
+ if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
+ return -ENOMEM;
+ progress = 0;
+ }
if (addr != end)
goto again;
return 0;
@@ -943,6 +956,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
details = NULL;
BUG_ON(addr >= end);
+ mem_cgroup_uncharge_start();
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
@@ -955,6 +969,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
zap_work, details);
} while (pgd++, addr = next, (addr != end && *zap_work > 0));
tlb_end_vma(tlb, vma);
+ mem_cgroup_uncharge_end();
return addr;
}
@@ -2514,7 +2529,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_HWPOISON;
} else {
print_bad_pte(vma, address, orig_pte, NULL);
- ret = VM_FAULT_OOM;
+ ret = VM_FAULT_SIGBUS;
}
goto out;
}
@@ -2540,6 +2555,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
} else if (PageHWPoison(page)) {
+ /*
+ * hwpoisoned dirty swapcache pages are kept for killing
+ * owner processes (which may be unknown at hwpoison time)
+ */
ret = VM_FAULT_HWPOISON;
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
goto out_release;
@@ -2548,6 +2567,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+ page = ksm_might_need_to_copy(page, vma, address);
+ if (!page) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
ret = VM_FAULT_OOM;
goto out_page;
@@ -2910,7 +2935,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* Page table corrupted: show pte and kill process.
*/
print_bad_pte(vma, address, orig_pte, NULL);
- return VM_FAULT_OOM;
+ return VM_FAULT_SIGBUS;
}
pgoff = pte_to_pgoff(orig_pte);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2047465cd27..030ce8a5bb0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -27,6 +27,7 @@
#include <linux/page-isolation.h>
#include <linux/pfn.h>
#include <linux/suspend.h>
+#include <linux/mm_inline.h>
#include <asm/tlbflush.h>
@@ -71,7 +72,9 @@ static void get_page_bootmem(unsigned long info, struct page *page, int type)
atomic_inc(&page->_count);
}
-void put_page_bootmem(struct page *page)
+/* reference to __meminit __free_pages_bootmem is valid
+ * so use __ref to tell modpost not to generate a warning */
+void __ref put_page_bootmem(struct page *page)
{
int type;
@@ -672,6 +675,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (!ret) { /* Success */
list_add_tail(&page->lru, &source);
move_pages--;
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+
} else {
/* Becasue we don't have big zone->lock. we should
check this again here. */
@@ -694,7 +700,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (list_empty(&source))
goto out;
/* this function returns # of failed pages */
- ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
+ ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
out:
return ret;
@@ -747,7 +753,7 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
return offlined;
}
-int offline_pages(unsigned long start_pfn,
+static int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
unsigned long pfn, nr_pages, expire;
@@ -849,6 +855,10 @@ repeat:
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
+ if (!node_present_pages(node)) {
+ node_clear_state(node, N_HIGH_MEMORY);
+ kswapd_stop(node);
+ }
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4545d594424..290fb5bf044 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -85,10 +85,12 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/ctype.h>
+#include <linux/mm_inline.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -412,17 +414,11 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (!page)
continue;
/*
- * The check for PageReserved here is important to avoid
- * handling zero pages and other pages that may have been
- * marked special by the system.
- *
- * If the PageReserved would not be checked here then f.e.
- * the location of the zero page could have an influence
- * on MPOL_MF_STRICT, zero pages would be counted for
- * the per node stats, and there would be useless attempts
- * to put zero pages on the migration list.
+ * vm_normal_page() filters out zero pages, but there might
+ * still be PageReserved pages to skip, perhaps in a VDSO.
+ * And we cannot move PageKsm pages sensibly or safely yet.
*/
- if (PageReserved(page))
+ if (PageReserved(page) || PageKsm(page))
continue;
nid = page_to_nid(page);
if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
@@ -809,6 +805,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
if (!isolate_lru_page(page)) {
list_add_tail(&page->lru, pagelist);
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
}
}
}
@@ -836,7 +834,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist))
- err = migrate_pages(&pagelist, new_node_page, dest);
+ err = migrate_pages(&pagelist, new_node_page, dest, 0);
return err;
}
@@ -1053,7 +1051,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist))
nr_failed = migrate_pages(&pagelist, new_vma_page,
- (unsigned long)vma);
+ (unsigned long)vma, 0);
if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
@@ -1565,6 +1563,53 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
}
return zl;
}
+
+/*
+ * init_nodemask_of_mempolicy
+ *
+ * If the current task's mempolicy is "default" [NULL], return 'false'
+ * to indicate default policy. Otherwise, extract the policy nodemask
+ * for 'bind' or 'interleave' policy into the argument nodemask, or
+ * initialize the argument nodemask to contain the single node for
+ * 'preferred' or 'local' policy and return 'true' to indicate presence
+ * of non-default mempolicy.
+ *
+ * We don't bother with reference counting the mempolicy [mpol_get/put]
+ * because the current task is examining it's own mempolicy and a task's
+ * mempolicy is only ever changed by the task itself.
+ *
+ * N.B., it is the caller's responsibility to free a returned nodemask.
+ */
+bool init_nodemask_of_mempolicy(nodemask_t *mask)
+{
+ struct mempolicy *mempolicy;
+ int nid;
+
+ if (!(mask && current->mempolicy))
+ return false;
+
+ mempolicy = current->mempolicy;
+ switch (mempolicy->mode) {
+ case MPOL_PREFERRED:
+ if (mempolicy->flags & MPOL_F_LOCAL)
+ nid = numa_node_id();
+ else
+ nid = mempolicy->v.preferred_node;
+ init_nodemask_of_node(mask, nid);
+ break;
+
+ case MPOL_BIND:
+ /* Fall through */
+ case MPOL_INTERLEAVE:
+ *mask = mempolicy->v.nodes;
+ break;
+
+ default:
+ BUG();
+ }
+
+ return true;
+}
#endif
/* Allocate a page in interleaved policy.
diff --git a/mm/migrate.c b/mm/migrate.c
index 7dbcb22316d..efddbf0926b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -21,6 +21,7 @@
#include <linux/mm_inline.h>
#include <linux/nsproxy.h>
#include <linux/pagevec.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
@@ -78,8 +79,8 @@ int putback_lru_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
-static void remove_migration_pte(struct vm_area_struct *vma,
- struct page *old, struct page *new)
+static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ unsigned long addr, void *old)
{
struct mm_struct *mm = vma->vm_mm;
swp_entry_t entry;
@@ -88,40 +89,37 @@ static void remove_migration_pte(struct vm_area_struct *vma,
pmd_t *pmd;
pte_t *ptep, pte;
spinlock_t *ptl;
- unsigned long addr = page_address_in_vma(new, vma);
-
- if (addr == -EFAULT)
- return;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
- return;
+ goto out;
pud = pud_offset(pgd, addr);
if (!pud_present(*pud))
- return;
+ goto out;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
- return;
+ goto out;
ptep = pte_offset_map(pmd, addr);
if (!is_swap_pte(*ptep)) {
pte_unmap(ptep);
- return;
+ goto out;
}
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
pte = *ptep;
if (!is_swap_pte(pte))
- goto out;
+ goto unlock;
entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
- goto out;
+ if (!is_migration_entry(entry) ||
+ migration_entry_to_page(entry) != old)
+ goto unlock;
get_page(new);
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
@@ -137,58 +135,10 @@ static void remove_migration_pte(struct vm_area_struct *vma,
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, addr, pte);
-
-out:
+unlock:
pte_unmap_unlock(ptep, ptl);
-}
-
-/*
- * Note that remove_file_migration_ptes will only work on regular mappings,
- * Nonlinear mappings do not use migration entries.
- */
-static void remove_file_migration_ptes(struct page *old, struct page *new)
-{
- struct vm_area_struct *vma;
- struct address_space *mapping = new->mapping;
- struct prio_tree_iter iter;
- pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
- if (!mapping)
- return;
-
- spin_lock(&mapping->i_mmap_lock);
-
- vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
- remove_migration_pte(vma, old, new);
-
- spin_unlock(&mapping->i_mmap_lock);
-}
-
-/*
- * Must hold mmap_sem lock on at least one of the vmas containing
- * the page so that the anon_vma cannot vanish.
- */
-static void remove_anon_migration_ptes(struct page *old, struct page *new)
-{
- struct anon_vma *anon_vma;
- struct vm_area_struct *vma;
- unsigned long mapping;
-
- mapping = (unsigned long)new->mapping;
-
- if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
- return;
-
- /*
- * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
- */
- anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
- spin_lock(&anon_vma->lock);
-
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
- remove_migration_pte(vma, old, new);
-
- spin_unlock(&anon_vma->lock);
+out:
+ return SWAP_AGAIN;
}
/*
@@ -197,10 +147,7 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new)
*/
static void remove_migration_ptes(struct page *old, struct page *new)
{
- if (PageAnon(new))
- remove_anon_migration_ptes(old, new);
- else
- remove_file_migration_ptes(old, new);
+ rmap_walk(new, remove_migration_pte, old);
}
/*
@@ -341,8 +288,8 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
if (TestClearPageActive(page)) {
VM_BUG_ON(PageUnevictable(page));
SetPageActive(newpage);
- } else
- unevictable_migrate_page(newpage, page);
+ } else if (TestClearPageUnevictable(page))
+ SetPageUnevictable(newpage);
if (PageChecked(page))
SetPageChecked(newpage);
if (PageMappedToDisk(page))
@@ -361,6 +308,7 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
}
mlock_migrate_page(newpage, page);
+ ksm_migrate_page(newpage, page);
ClearPageSwapCache(page);
ClearPagePrivate(page);
@@ -580,9 +528,9 @@ static int move_to_new_page(struct page *newpage, struct page *page)
else
rc = fallback_migrate_page(mapping, newpage, page);
- if (!rc) {
+ if (!rc)
remove_migration_ptes(page, newpage);
- } else
+ else
newpage->mapping = NULL;
unlock_page(newpage);
@@ -595,7 +543,7 @@ static int move_to_new_page(struct page *newpage, struct page *page)
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
- struct page *page, int force)
+ struct page *page, int force, int offlining)
{
int rc = 0;
int *result = NULL;
@@ -621,6 +569,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
lock_page(page);
}
+ /*
+ * Only memory hotplug's offline_pages() caller has locked out KSM,
+ * and can safely migrate a KSM page. The other cases have skipped
+ * PageKsm along with PageReserved - but it is only now when we have
+ * the page lock that we can be certain it will not go KSM beneath us
+ * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
+ * its pagecount raised, but only here do we take the page lock which
+ * serializes that).
+ */
+ if (PageKsm(page) && !offlining) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
/* charge against new page */
charge = mem_cgroup_prepare_migration(page, &mem);
if (charge == -ENOMEM) {
@@ -737,7 +699,7 @@ move_newpage:
* Return: Number of pages not migrated or error code.
*/
int migrate_pages(struct list_head *from,
- new_page_t get_new_page, unsigned long private)
+ new_page_t get_new_page, unsigned long private, int offlining)
{
int retry = 1;
int nr_failed = 0;
@@ -746,13 +708,6 @@ int migrate_pages(struct list_head *from,
struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
int rc;
- unsigned long flags;
-
- local_irq_save(flags);
- list_for_each_entry(page, from, lru)
- __inc_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- local_irq_restore(flags);
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
@@ -764,7 +719,7 @@ int migrate_pages(struct list_head *from,
cond_resched();
rc = unmap_and_move(get_new_page, private,
- page, pass > 2);
+ page, pass > 2, offlining);
switch(rc) {
case -ENOMEM:
@@ -860,7 +815,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
if (!page)
goto set_status;
- if (PageReserved(page)) /* Check for zero page */
+ /* Use PageReserved to check for zero page */
+ if (PageReserved(page) || PageKsm(page))
goto put_and_set;
pp->page = page;
@@ -878,8 +834,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
goto put_and_set;
err = isolate_lru_page(page);
- if (!err)
+ if (!err) {
list_add_tail(&page->lru, &pagelist);
+ inc_zone_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_cache(page));
+ }
put_and_set:
/*
* Either remove the duplicate refcount from
@@ -894,7 +853,7 @@ set_status:
err = 0;
if (!list_empty(&pagelist))
err = migrate_pages(&pagelist, new_page_node,
- (unsigned long)pm);
+ (unsigned long)pm, 0);
up_read(&mm->mmap_sem);
return err;
@@ -1015,7 +974,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
err = -ENOENT;
/* Use PageReserved to check for zero page */
- if (!page || PageReserved(page))
+ if (!page || PageReserved(page) || PageKsm(page))
goto set_status;
err = page_to_nid(page);
@@ -1044,7 +1003,7 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
int err;
for (i = 0; i < nr_pages; i += chunk_nr) {
- if (chunk_nr + i > nr_pages)
+ if (chunk_nr > nr_pages - i)
chunk_nr = nr_pages - i;
err = copy_from_user(chunk_pages, &pages[i],
diff --git a/mm/mincore.c b/mm/mincore.c
index 8cb508f84ea..7a3436ef39e 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -14,6 +14,7 @@
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/hugetlb.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -72,6 +73,42 @@ static long do_mincore(unsigned long addr, unsigned char *vec, unsigned long pag
if (!vma || addr < vma->vm_start)
return -ENOMEM;
+#ifdef CONFIG_HUGETLB_PAGE
+ if (is_vm_hugetlb_page(vma)) {
+ struct hstate *h;
+ unsigned long nr_huge;
+ unsigned char present;
+
+ i = 0;
+ nr = min(pages, (vma->vm_end - addr) >> PAGE_SHIFT);
+ h = hstate_vma(vma);
+ nr_huge = ((addr + pages * PAGE_SIZE - 1) >> huge_page_shift(h))
+ - (addr >> huge_page_shift(h)) + 1;
+ nr_huge = min(nr_huge,
+ (vma->vm_end - addr) >> huge_page_shift(h));
+ while (1) {
+ /* hugepage always in RAM for now,
+ * but generally it needs to be check */
+ ptep = huge_pte_offset(current->mm,
+ addr & huge_page_mask(h));
+ present = !!(ptep &&
+ !huge_pte_none(huge_ptep_get(ptep)));
+ while (1) {
+ vec[i++] = present;
+ addr += PAGE_SIZE;
+ /* reach buffer limit */
+ if (i == nr)
+ return nr;
+ /* check hugepage border */
+ if (!((addr & ~huge_page_mask(h))
+ >> PAGE_SHIFT))
+ break;
+ }
+ }
+ return nr;
+ }
+#endif
+
/*
* Calculate how many pages there are left in the last level of the
* PTE array for our address.
diff --git a/mm/mlock.c b/mm/mlock.c
index bd6f0e466f6..2b8335a8940 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -88,25 +88,22 @@ void mlock_vma_page(struct page *page)
}
}
-/*
- * called from munlock()/munmap() path with page supposedly on the LRU.
+/**
+ * munlock_vma_page - munlock a vma page
+ * @page - page to be unlocked
*
- * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
- * [in try_to_munlock()] and then attempt to isolate the page. We must
- * isolate the page to keep others from messing with its unevictable
- * and mlocked state while trying to munlock. However, we pre-clear the
- * mlocked state anyway as we might lose the isolation race and we might
- * not get another chance to clear PageMlocked. If we successfully
- * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
- * mapping the page, it will restore the PageMlocked state, unless the page
- * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
- * perhaps redundantly.
- * If we lose the isolation race, and the page is mapped by other VM_LOCKED
- * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
- * either of which will restore the PageMlocked state by calling
- * mlock_vma_page() above, if it can grab the vma's mmap sem.
+ * called from munlock()/munmap() path with page supposedly on the LRU.
+ * When we munlock a page, because the vma where we found the page is being
+ * munlock()ed or munmap()ed, we want to check whether other vmas hold the
+ * page locked so that we can leave it on the unevictable lru list and not
+ * bother vmscan with it. However, to walk the page's rmap list in
+ * try_to_munlock() we must isolate the page from the LRU. If some other
+ * task has removed the page from the LRU, we won't be able to do that.
+ * So we clear the PageMlocked as we might not get another chance. If we
+ * can't isolate the page, we leave it for putback_lru_page() and vmscan
+ * [page_referenced()/try_to_unmap()] to deal with.
*/
-static void munlock_vma_page(struct page *page)
+void munlock_vma_page(struct page *page)
{
BUG_ON(!PageLocked(page));
@@ -117,18 +114,18 @@ static void munlock_vma_page(struct page *page)
/*
* did try_to_unlock() succeed or punt?
*/
- if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
+ if (ret != SWAP_MLOCK)
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
putback_lru_page(page);
} else {
/*
- * We lost the race. let try_to_unmap() deal
- * with it. At least we get the page state and
- * mlock stats right. However, page is still on
- * the noreclaim list. We'll fix that up when
- * the page is eventually freed or we scan the
- * noreclaim list.
+ * Some other task has removed the page from the LRU.
+ * putback_lru_page() will take care of removing the
+ * page from the unevictable list, if necessary.
+ * vmscan [page_referenced()] will move the page back
+ * to the unevictable list if some other vma has it
+ * mlocked.
*/
if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED);
diff --git a/mm/mmap.c b/mm/mmap.c
index 292ddc3cef9..d9c77b2dbe9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -931,13 +931,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
if (!(flags & MAP_FIXED))
addr = round_hint_to_min(addr);
- error = arch_mmap_check(addr, len, flags);
- if (error)
- return error;
-
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
- if (!len || len > TASK_SIZE)
+ if (!len)
return -ENOMEM;
/* offset overflow? */
@@ -948,24 +944,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
- if (flags & MAP_HUGETLB) {
- struct user_struct *user = NULL;
- if (file)
- return -EINVAL;
-
- /*
- * VM_NORESERVE is used because the reservations will be
- * taken when vm_ops->mmap() is called
- * A dummy user value is used because we are not locking
- * memory so no accounting is necessary
- */
- len = ALIGN(len, huge_page_size(&default_hstate));
- file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
- &user, HUGETLB_ANONHUGE_INODE);
- if (IS_ERR(file))
- return PTR_ERR(file);
- }
-
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
@@ -1220,8 +1198,20 @@ munmap_back:
goto free_vma;
}
- if (vma_wants_writenotify(vma))
+ if (vma_wants_writenotify(vma)) {
+ pgprot_t pprot = vma->vm_page_prot;
+
+ /* Can vma->vm_page_prot have changed??
+ *
+ * Answer: Yes, drivers may have changed it in their
+ * f_op->mmap method.
+ *
+ * Ensures that vmas marked as uncached stay that way.
+ */
vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
+ if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ }
vma_link(mm, vma, prev, rb_link, rb_parent);
file = vma->vm_file;
@@ -1455,6 +1445,14 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long (*get_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
+ unsigned long error = arch_mmap_check(addr, len, flags);
+ if (error)
+ return error;
+
+ /* Careful about overflows.. */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
get_area = current->mm->get_unmapped_area;
if (file && file->f_op && file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
@@ -1825,10 +1823,10 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
}
/*
- * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the tail.
+ * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
+ * munmap path where it doesn't make sense to fail.
*/
-int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long addr, int new_below)
{
struct mempolicy *pol;
@@ -1838,9 +1836,6 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
~(huge_page_mask(hstate_vma(vma)))))
return -EINVAL;
- if (mm->map_count >= sysctl_max_map_count)
- return -ENOMEM;
-
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new)
return -ENOMEM;
@@ -1880,6 +1875,19 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
return 0;
}
+/*
+ * Split a vma into two pieces at address 'addr', a new vma is allocated
+ * either for the first part or the tail.
+ */
+int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
+{
+ if (mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ return __split_vma(mm, vma, addr, new_below);
+}
+
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
@@ -1915,7 +1923,17 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
* places tmp vma above, and higher split_vma places tmp vma below.
*/
if (start > vma->vm_start) {
- int error = split_vma(mm, vma, start, 0);
+ int error;
+
+ /*
+ * Make sure that map_count on return from munmap() will
+ * not exceed its limit; but let map_count go just above
+ * its limit temporarily, to help free resources as expected.
+ */
+ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
+
+ error = __split_vma(mm, vma, start, 0);
if (error)
return error;
prev = vma;
@@ -1924,7 +1942,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
/* Does it split the last one? */
last = find_vma(mm, end);
if (last && end > last->vm_start) {
- int error = split_vma(mm, last, end, 1);
+ int error = __split_vma(mm, last, end, 1);
if (error)
return error;
}
@@ -1999,20 +2017,14 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
if (!len)
return addr;
- if ((addr + len) > TASK_SIZE || (addr + len) < addr)
- return -EINVAL;
-
- if (is_hugepage_only_range(mm, addr, len))
- return -EINVAL;
-
error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
if (error)
return error;
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
- error = arch_mmap_check(addr, len, flags);
- if (error)
+ error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ if (error & ~PAGE_MASK)
return error;
/*
diff --git a/mm/mremap.c b/mm/mremap.c
index 97bff254771..845190898d5 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -261,6 +261,137 @@ static unsigned long move_vma(struct vm_area_struct *vma,
return new_addr;
}
+static struct vm_area_struct *vma_to_resize(unsigned long addr,
+ unsigned long old_len, unsigned long new_len, unsigned long *p)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = find_vma(mm, addr);
+
+ if (!vma || vma->vm_start > addr)
+ goto Efault;
+
+ if (is_vm_hugetlb_page(vma))
+ goto Einval;
+
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto Efault;
+
+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
+ if (new_len > old_len)
+ goto Efault;
+ }
+
+ if (vma->vm_flags & VM_LOCKED) {
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ locked += new_len - old_len;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ goto Eagain;
+ }
+
+ if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
+ goto Enomem;
+
+ if (vma->vm_flags & VM_ACCOUNT) {
+ unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
+ if (security_vm_enough_memory(charged))
+ goto Efault;
+ *p = charged;
+ }
+
+ return vma;
+
+Efault: /* very odd choice for most of the cases, but... */
+ return ERR_PTR(-EFAULT);
+Einval:
+ return ERR_PTR(-EINVAL);
+Enomem:
+ return ERR_PTR(-ENOMEM);
+Eagain:
+ return ERR_PTR(-EAGAIN);
+}
+
+static unsigned long mremap_to(unsigned long addr,
+ unsigned long old_len, unsigned long new_addr,
+ unsigned long new_len)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+ unsigned long charged = 0;
+ unsigned long map_flags;
+
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+
+ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+ goto out;
+
+ /* Check if the location we're moving into overlaps the
+ * old location at all, and fail if it does.
+ */
+ if ((new_addr <= addr) && (new_addr+new_len) > addr)
+ goto out;
+
+ if ((addr <= new_addr) && (addr+old_len) > new_addr)
+ goto out;
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
+
+ ret = do_munmap(mm, new_addr, new_len);
+ if (ret)
+ goto out;
+
+ if (old_len >= new_len) {
+ ret = do_munmap(mm, addr+new_len, old_len - new_len);
+ if (ret && old_len != new_len)
+ goto out;
+ old_len = new_len;
+ }
+
+ vma = vma_to_resize(addr, old_len, new_len, &charged);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ map_flags = MAP_FIXED;
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+ ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
+ ((addr - vma->vm_start) >> PAGE_SHIFT),
+ map_flags);
+ if (ret & ~PAGE_MASK)
+ goto out1;
+
+ ret = move_vma(vma, addr, old_len, new_len, new_addr);
+ if (!(ret & ~PAGE_MASK))
+ goto out;
+out1:
+ vm_unacct_memory(charged);
+
+out:
+ return ret;
+}
+
+static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
+{
+ unsigned long end = vma->vm_end + delta;
+ if (end < vma->vm_end) /* overflow */
+ return 0;
+ if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
+ return 0;
+ if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
+ 0, MAP_FIXED) & ~PAGE_MASK)
+ return 0;
+ return 1;
+}
+
/*
* Expand (or shrink) an existing mapping, potentially moving it at the
* same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
@@ -294,32 +425,10 @@ unsigned long do_mremap(unsigned long addr,
if (!new_len)
goto out;
- /* new_addr is only valid if MREMAP_FIXED is specified */
if (flags & MREMAP_FIXED) {
- if (new_addr & ~PAGE_MASK)
- goto out;
- if (!(flags & MREMAP_MAYMOVE))
- goto out;
-
- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
- goto out;
-
- /* Check if the location we're moving into overlaps the
- * old location at all, and fail if it does.
- */
- if ((new_addr <= addr) && (new_addr+new_len) > addr)
- goto out;
-
- if ((addr <= new_addr) && (addr+old_len) > new_addr)
- goto out;
-
- ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
- if (ret)
- goto out;
-
- ret = do_munmap(mm, new_addr, new_len);
- if (ret)
- goto out;
+ if (flags & MREMAP_MAYMOVE)
+ ret = mremap_to(addr, old_len, new_addr, new_len);
+ goto out;
}
/*
@@ -332,60 +441,23 @@ unsigned long do_mremap(unsigned long addr,
if (ret && old_len != new_len)
goto out;
ret = addr;
- if (!(flags & MREMAP_FIXED) || (new_addr == addr))
- goto out;
- old_len = new_len;
+ goto out;
}
/*
- * Ok, we need to grow.. or relocate.
+ * Ok, we need to grow..
*/
- ret = -EFAULT;
- vma = find_vma(mm, addr);
- if (!vma || vma->vm_start > addr)
- goto out;
- if (is_vm_hugetlb_page(vma)) {
- ret = -EINVAL;
- goto out;
- }
- /* We can't remap across vm area boundaries */
- if (old_len > vma->vm_end - addr)
- goto out;
- if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
- if (new_len > old_len)
- goto out;
- }
- if (vma->vm_flags & VM_LOCKED) {
- unsigned long locked, lock_limit;
- locked = mm->locked_vm << PAGE_SHIFT;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
- locked += new_len - old_len;
- ret = -EAGAIN;
- if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- goto out;
- }
- if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
- ret = -ENOMEM;
+ vma = vma_to_resize(addr, old_len, new_len, &charged);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
goto out;
}
- if (vma->vm_flags & VM_ACCOUNT) {
- charged = (new_len - old_len) >> PAGE_SHIFT;
- if (security_vm_enough_memory(charged))
- goto out_nc;
- }
-
/* old_len exactly to the end of the area..
- * And we're not relocating the area.
*/
- if (old_len == vma->vm_end - addr &&
- !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
- (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
- unsigned long max_addr = TASK_SIZE;
- if (vma->vm_next)
- max_addr = vma->vm_next->vm_start;
+ if (old_len == vma->vm_end - addr) {
/* can we just expand the current mapping? */
- if (max_addr - addr >= new_len) {
+ if (vma_expandable(vma, new_len - old_len)) {
int pages = (new_len - old_len) >> PAGE_SHIFT;
vma_adjust(vma, vma->vm_start,
@@ -409,28 +481,27 @@ unsigned long do_mremap(unsigned long addr,
*/
ret = -ENOMEM;
if (flags & MREMAP_MAYMOVE) {
- if (!(flags & MREMAP_FIXED)) {
- unsigned long map_flags = 0;
- if (vma->vm_flags & VM_MAYSHARE)
- map_flags |= MAP_SHARED;
-
- new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
- vma->vm_pgoff, map_flags);
- if (new_addr & ~PAGE_MASK) {
- ret = new_addr;
- goto out;
- }
-
- ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
- if (ret)
- goto out;
+ unsigned long map_flags = 0;
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+ new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
+ vma->vm_pgoff +
+ ((addr - vma->vm_start) >> PAGE_SHIFT),
+ map_flags);
+ if (new_addr & ~PAGE_MASK) {
+ ret = new_addr;
+ goto out;
}
+
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
+ if (ret)
+ goto out;
ret = move_vma(vma, addr, old_len, new_len, new_addr);
}
out:
if (ret & ~PAGE_MASK)
vm_unacct_memory(charged);
-out_nc:
return ret;
}
diff --git a/mm/nommu.c b/mm/nommu.c
index 9876fa0c3ad..8687973462b 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1143,9 +1143,6 @@ static int do_mmap_private(struct vm_area_struct *vma,
if (ret < rlen)
memset(base + ret, 0, rlen - ret);
- } else {
- /* if it's an anonymous mapping, then just clear it */
- memset(base, 0, rlen);
}
return 0;
@@ -1343,6 +1340,11 @@ unsigned long do_mmap_pgoff(struct file *file,
goto error_just_free;
add_nommu_region(region);
+ /* clear anonymous mappings that don't ask for uninitialized data */
+ if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
+ memset((void *)region->vm_start, 0,
+ region->vm_end - region->vm_start);
+
/* okay... we have a mapping; now we have to register it */
result = vma->vm_start;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ea2147dabba..f52481b1c1e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -196,27 +196,46 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
/*
* Determine the type of allocation constraint.
*/
-static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
- gfp_t gfp_mask)
-{
#ifdef CONFIG_NUMA
+static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
+ gfp_t gfp_mask, nodemask_t *nodemask)
+{
struct zone *zone;
struct zoneref *z;
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
- nodemask_t nodes = node_states[N_HIGH_MEMORY];
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
- if (cpuset_zone_allowed_softwall(zone, gfp_mask))
- node_clear(zone_to_nid(zone), nodes);
- else
- return CONSTRAINT_CPUSET;
+ /*
+ * Reach here only when __GFP_NOFAIL is used. So, we should avoid
+ * to kill current.We have to random task kill in this case.
+ * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
+ */
+ if (gfp_mask & __GFP_THISNODE)
+ return CONSTRAINT_NONE;
- if (!nodes_empty(nodes))
+ /*
+ * The nodemask here is a nodemask passed to alloc_pages(). Now,
+ * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy
+ * feature. mempolicy is an only user of nodemask here.
+ * check mempolicy's nodemask contains all N_HIGH_MEMORY
+ */
+ if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask))
return CONSTRAINT_MEMORY_POLICY;
-#endif
+ /* Check this allocation failure is caused by cpuset's wall function */
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ high_zoneidx, nodemask)
+ if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
+ return CONSTRAINT_CPUSET;
+
+ return CONSTRAINT_NONE;
+}
+#else
+static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
+ gfp_t gfp_mask, nodemask_t *nodemask)
+{
return CONSTRAINT_NONE;
}
+#endif
/*
* Simple selection loop. We chose the process with the highest
@@ -337,6 +356,24 @@ static void dump_tasks(const struct mem_cgroup *mem)
} while_each_thread(g, p);
}
+static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
+ struct mem_cgroup *mem)
+{
+ pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
+ "oom_adj=%d\n",
+ current->comm, gfp_mask, order, current->signal->oom_adj);
+ task_lock(current);
+ cpuset_print_task_mems_allowed(current);
+ task_unlock(current);
+ dump_stack();
+ mem_cgroup_print_oom_info(mem, p);
+ show_mem();
+ if (sysctl_oom_dump_tasks)
+ dump_tasks(mem);
+}
+
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
/*
* Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
* flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
@@ -350,15 +387,23 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
return;
}
+ task_lock(p);
if (!p->mm) {
WARN_ON(1);
- printk(KERN_WARNING "tried to kill an mm-less task!\n");
+ printk(KERN_WARNING "tried to kill an mm-less task %d (%s)!\n",
+ task_pid_nr(p), p->comm);
+ task_unlock(p);
return;
}
if (verbose)
- printk(KERN_ERR "Killed process %d (%s)\n",
- task_pid_nr(p), p->comm);
+ printk(KERN_ERR "Killed process %d (%s) "
+ "vsz:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+ task_pid_nr(p), p->comm,
+ K(p->mm->total_vm),
+ K(get_mm_counter(p->mm, anon_rss)),
+ K(get_mm_counter(p->mm, file_rss)));
+ task_unlock(p);
/*
* We give our sacrificial lamb high priority and access to
@@ -395,20 +440,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
{
struct task_struct *c;
- if (printk_ratelimit()) {
- printk(KERN_WARNING "%s invoked oom-killer: "
- "gfp_mask=0x%x, order=%d, oom_adj=%d\n",
- current->comm, gfp_mask, order,
- current->signal->oom_adj);
- task_lock(current);
- cpuset_print_task_mems_allowed(current);
- task_unlock(current);
- dump_stack();
- mem_cgroup_print_oom_info(mem, current);
- show_mem();
- if (sysctl_oom_dump_tasks)
- dump_tasks(mem);
- }
+ if (printk_ratelimit())
+ dump_header(p, gfp_mask, order, mem);
/*
* If the task is already exiting, don't alarm the sysadmin or kill
@@ -544,6 +577,7 @@ retry:
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
read_unlock(&tasklist_lock);
+ dump_header(NULL, gfp_mask, order, NULL);
panic("Out of memory and no killable processes...\n");
}
@@ -599,7 +633,8 @@ rest_and_return:
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
+void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *nodemask)
{
unsigned long freed = 0;
enum oom_constraint constraint;
@@ -609,14 +644,16 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
/* Got some memory back in the last second. */
return;
- if (sysctl_panic_on_oom == 2)
+ if (sysctl_panic_on_oom == 2) {
+ dump_header(NULL, gfp_mask, order, NULL);
panic("out of memory. Compulsory panic_on_oom is selected.\n");
+ }
/*
* Check if there were limitations on the allocation (only relevant for
* NUMA) that may require different handling.
*/
- constraint = constrained_alloc(zonelist, gfp_mask);
+ constraint = constrained_alloc(zonelist, gfp_mask, nodemask);
read_lock(&tasklist_lock);
switch (constraint) {
@@ -626,8 +663,10 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
break;
case CONSTRAINT_NONE:
- if (sysctl_panic_on_oom)
+ if (sysctl_panic_on_oom) {
+ dump_header(NULL, gfp_mask, order, NULL);
panic("out of memory. panic_on_oom is selected\n");
+ }
/* Fall-through */
case CONSTRAINT_CPUSET:
__out_of_memory(gfp_mask, order);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2bc2ac63f41..d79b9258056 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -48,6 +48,7 @@
#include <linux/page_cgroup.h>
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
+#include <linux/memory.h>
#include <trace/events/kmem.h>
#include <asm/tlbflush.h>
@@ -486,7 +487,6 @@ static inline void __free_one_page(struct page *page,
zone->free_area[order].nr_free++;
}
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* free_page_mlock() -- clean up attempts to free and mlocked() page.
* Page should not be on lru, so no need to fix that up.
@@ -497,9 +497,6 @@ static inline void free_page_mlock(struct page *page)
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
-#else
-static void free_page_mlock(struct page *page) { }
-#endif
static inline int free_pages_check(struct page *page)
{
@@ -1658,12 +1655,22 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
if (page)
goto out;
- /* The OOM killer will not help higher order allocs */
- if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
- goto out;
-
+ if (!(gfp_mask & __GFP_NOFAIL)) {
+ /* The OOM killer will not help higher order allocs */
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ goto out;
+ /*
+ * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
+ * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
+ * The caller should handle page allocation failure by itself if
+ * it specifies __GFP_THISNODE.
+ * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
+ */
+ if (gfp_mask & __GFP_THISNODE)
+ goto out;
+ }
/* Exhausted what can be done so it's blamo time */
- out_of_memory(zonelist, gfp_mask, order);
+ out_of_memory(zonelist, gfp_mask, order, nodemask);
out:
clear_zonelist_oom(zonelist, gfp_mask);
@@ -3127,7 +3134,7 @@ static int __cpuinit process_zones(int cpu)
if (percpu_pagelist_fraction)
setup_pagelist_highmark(zone_pcp(zone, cpu),
- (zone->present_pages / percpu_pagelist_fraction));
+ (zone->present_pages / percpu_pagelist_fraction));
}
return 0;
@@ -3573,7 +3580,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
-static unsigned long __meminit __absent_pages_in_range(int nid,
+unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
@@ -4102,7 +4109,7 @@ static int __init cmp_node_active_region(const void *a, const void *b)
}
/* sort the node_map by start_pfn */
-static void __init sort_node_map(void)
+void __init sort_node_map(void)
{
sort(early_node_map, (size_t)nr_nodemap_entries,
sizeof(struct node_active_region),
@@ -5002,23 +5009,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
int set_migratetype_isolate(struct page *page)
{
struct zone *zone;
- unsigned long flags;
+ struct page *curr_page;
+ unsigned long flags, pfn, iter;
+ unsigned long immobile = 0;
+ struct memory_isolate_notify arg;
+ int notifier_ret;
int ret = -EBUSY;
int zone_idx;
zone = page_zone(page);
zone_idx = zone_idx(zone);
+
spin_lock_irqsave(&zone->lock, flags);
+ if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
+ zone_idx == ZONE_MOVABLE) {
+ ret = 0;
+ goto out;
+ }
+
+ pfn = page_to_pfn(page);
+ arg.start_pfn = pfn;
+ arg.nr_pages = pageblock_nr_pages;
+ arg.pages_found = 0;
+
/*
- * In future, more migrate types will be able to be isolation target.
+ * It may be possible to isolate a pageblock even if the
+ * migratetype is not MIGRATE_MOVABLE. The memory isolation
+ * notifier chain is used by balloon drivers to return the
+ * number of pages in a range that are held by the balloon
+ * driver to shrink memory. If all the pages are accounted for
+ * by balloons, are free, or on the LRU, isolation can continue.
+ * Later, for example, when memory hotplug notifier runs, these
+ * pages reported as "can be isolated" should be isolated(freed)
+ * by the balloon driver through the memory notifier chain.
*/
- if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
- zone_idx != ZONE_MOVABLE)
+ notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
+ notifier_ret = notifier_to_errno(notifier_ret);
+ if (notifier_ret || !arg.pages_found)
goto out;
- set_pageblock_migratetype(page, MIGRATE_ISOLATE);
- move_freepages_block(zone, page, MIGRATE_ISOLATE);
- ret = 0;
+
+ for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+
+ curr_page = pfn_to_page(iter);
+ if (!page_count(curr_page) || PageLRU(curr_page))
+ continue;
+
+ immobile++;
+ }
+
+ if (arg.pages_found == immobile)
+ ret = 0;
+
out:
+ if (!ret) {
+ set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+ move_freepages_block(zone, page, MIGRATE_ISOLATE);
+ }
+
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
drain_all_pages();
@@ -5085,3 +5134,24 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
spin_unlock_irqrestore(&zone->lock, flags);
}
#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+bool is_free_buddy_page(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long flags;
+ int order;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++) {
+ struct page *page_head = page - (pfn & ((1 << order) - 1));
+
+ if (PageBuddy(page_head) && page_order(page_head) >= order)
+ break;
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ return order < MAX_ORDER;
+}
+#endif
diff --git a/mm/page_io.c b/mm/page_io.c
index c6f3e5071de..a19af956ee1 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -19,20 +19,15 @@
#include <linux/writeback.h>
#include <asm/pgtable.h>
-static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
+static struct bio *get_swap_bio(gfp_t gfp_flags,
struct page *page, bio_end_io_t end_io)
{
struct bio *bio;
bio = bio_alloc(gfp_flags, 1);
if (bio) {
- struct swap_info_struct *sis;
- swp_entry_t entry = { .val = index, };
-
- sis = get_swap_info_struct(swp_type(entry));
- bio->bi_sector = map_swap_page(sis, swp_offset(entry)) *
- (PAGE_SIZE >> 9);
- bio->bi_bdev = sis->bdev;
+ bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
+ bio->bi_sector <<= PAGE_SHIFT - 9;
bio->bi_io_vec[0].bv_page = page;
bio->bi_io_vec[0].bv_len = PAGE_SIZE;
bio->bi_io_vec[0].bv_offset = 0;
@@ -102,8 +97,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
unlock_page(page);
goto out;
}
- bio = get_swap_bio(GFP_NOIO, page_private(page), page,
- end_swap_bio_write);
+ bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
if (bio == NULL) {
set_page_dirty(page);
unlock_page(page);
@@ -127,8 +121,7 @@ int swap_readpage(struct page *page)
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageUptodate(page));
- bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
- end_swap_bio_read);
+ bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
if (bio == NULL) {
unlock_page(page);
ret = -ENOMEM;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index d5878bed784..7b47a57b664 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -1,6 +1,7 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
+#include <linux/hugetlb.h>
static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
@@ -107,6 +108,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd_t *pgd;
unsigned long next;
int err = 0;
+ struct vm_area_struct *vma;
if (addr >= end)
return err;
@@ -117,11 +119,38 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd = pgd_offset(walk->mm, addr);
do {
next = pgd_addr_end(addr, end);
+
+ /*
+ * handle hugetlb vma individually because pagetable walk for
+ * the hugetlb page is dependent on the architecture and
+ * we can't handled it in the same manner as non-huge pages.
+ */
+ vma = find_vma(walk->mm, addr);
+#ifdef CONFIG_HUGETLB_PAGE
+ if (vma && is_vm_hugetlb_page(vma)) {
+ pte_t *pte;
+ struct hstate *hs;
+
+ if (vma->vm_end < next)
+ next = vma->vm_end;
+ hs = hstate_vma(vma);
+ pte = huge_pte_offset(walk->mm,
+ addr & huge_page_mask(hs));
+ if (pte && !huge_pte_none(huge_ptep_get(pte))
+ && walk->hugetlb_entry)
+ err = walk->hugetlb_entry(pte, addr,
+ next, walk);
+ if (err)
+ break;
+ continue;
+ }
+#endif
if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk);
if (err)
break;
+ pgd++;
continue;
}
if (walk->pgd_entry)
@@ -131,7 +160,8 @@ int walk_page_range(unsigned long addr, unsigned long end,
err = walk_pud_range(pgd, addr, next, walk);
if (err)
break;
- } while (pgd++, addr = next, addr != end);
+ pgd++;
+ } while (addr = next, addr != end);
return err;
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 5adfc268b40..442010cc91c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -46,8 +46,6 @@
*
* To use this allocator, arch code should do the followings.
*
- * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
- *
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
* regular address to percpu pointer and back if they need to be
* different from the default
@@ -74,6 +72,7 @@
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
+#include <asm/io.h>
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
@@ -1302,6 +1301,27 @@ void free_percpu(void *ptr)
}
EXPORT_SYMBOL_GPL(free_percpu);
+/**
+ * per_cpu_ptr_to_phys - convert translated percpu address to physical address
+ * @addr: the address to be converted to physical address
+ *
+ * Given @addr which is dereferenceable address obtained via one of
+ * percpu access macros, this function translates it into its physical
+ * address. The caller is responsible for ensuring @addr stays valid
+ * until this function finishes.
+ *
+ * RETURNS:
+ * The physical address for @addr.
+ */
+phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+ if ((unsigned long)addr < VMALLOC_START ||
+ (unsigned long)addr >= VMALLOC_END)
+ return __pa(addr);
+ else
+ return page_to_phys(vmalloc_to_page(addr));
+}
+
static inline size_t pcpu_calc_fc_sizes(size_t static_size,
size_t reserved_size,
ssize_t *dyn_sizep)
diff --git a/mm/readahead.c b/mm/readahead.c
index aa1aa234523..033bc135a41 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -547,5 +547,17 @@ page_cache_async_readahead(struct address_space *mapping,
/* do read-ahead */
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
+
+#ifdef CONFIG_BLOCK
+ /*
+ * Normally the current page is !uptodate and lock_page() will be
+ * immediately called to implicitly unplug the device. However this
+ * is not always true for RAID conifgurations, where data arrives
+ * not strictly in their submission order. In this case we need to
+ * explicitly kick off the IO.
+ */
+ if (PageUptodate(page))
+ blk_run_backing_dev(mapping->backing_dev_info, NULL);
+#endif
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
diff --git a/mm/rmap.c b/mm/rmap.c
index dd43373a483..278cd277bde 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -49,6 +49,7 @@
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/module.h>
@@ -67,7 +68,7 @@ static inline struct anon_vma *anon_vma_alloc(void)
return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
}
-static inline void anon_vma_free(struct anon_vma *anon_vma)
+void anon_vma_free(struct anon_vma *anon_vma)
{
kmem_cache_free(anon_vma_cachep, anon_vma);
}
@@ -171,7 +172,7 @@ void anon_vma_unlink(struct vm_area_struct *vma)
list_del(&vma->anon_vma_node);
/* We must garbage collect the anon_vma if it's empty */
- empty = list_empty(&anon_vma->head);
+ empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma);
spin_unlock(&anon_vma->lock);
if (empty)
@@ -183,6 +184,7 @@ static void anon_vma_ctor(void *data)
struct anon_vma *anon_vma = data;
spin_lock_init(&anon_vma->lock);
+ ksm_refcount_init(anon_vma);
INIT_LIST_HEAD(&anon_vma->head);
}
@@ -202,8 +204,8 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
unsigned long anon_mapping;
rcu_read_lock();
- anon_mapping = (unsigned long) page->mapping;
- if (!(anon_mapping & PAGE_MAPPING_ANON))
+ anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
+ if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
goto out;
@@ -248,8 +250,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
if (PageAnon(page)) {
- if ((void *)vma->anon_vma !=
- (void *)page->mapping - PAGE_MAPPING_ANON)
+ if (vma->anon_vma != page_anon_vma(page))
return -EFAULT;
} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
if (!vma->vm_file ||
@@ -337,21 +338,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
* Subfunctions of page_referenced: page_referenced_one called
* repeatedly from either page_referenced_anon or page_referenced_file.
*/
-static int page_referenced_one(struct page *page,
- struct vm_area_struct *vma,
- unsigned int *mapcount,
- unsigned long *vm_flags)
+int page_referenced_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, unsigned int *mapcount,
+ unsigned long *vm_flags)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
spinlock_t *ptl;
int referenced = 0;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
@@ -388,9 +383,10 @@ static int page_referenced_one(struct page *page,
out_unmap:
(*mapcount)--;
pte_unmap_unlock(pte, ptl);
-out:
+
if (referenced)
*vm_flags |= vma->vm_flags;
+out:
return referenced;
}
@@ -409,6 +405,9 @@ static int page_referenced_anon(struct page *page,
mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
@@ -416,7 +415,7 @@ static int page_referenced_anon(struct page *page,
*/
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue;
- referenced += page_referenced_one(page, vma,
+ referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags);
if (!mapcount)
break;
@@ -474,6 +473,9 @@ static int page_referenced_file(struct page *page,
mapcount = page_mapcount(page);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
@@ -481,7 +483,7 @@ static int page_referenced_file(struct page *page,
*/
if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
continue;
- referenced += page_referenced_one(page, vma,
+ referenced += page_referenced_one(page, vma, address,
&mapcount, vm_flags);
if (!mapcount)
break;
@@ -507,46 +509,47 @@ int page_referenced(struct page *page,
unsigned long *vm_flags)
{
int referenced = 0;
+ int we_locked = 0;
if (TestClearPageReferenced(page))
referenced++;
*vm_flags = 0;
- if (page_mapped(page) && page->mapping) {
- if (PageAnon(page))
+ if (page_mapped(page) && page_rmapping(page)) {
+ if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
+ we_locked = trylock_page(page);
+ if (!we_locked) {
+ referenced++;
+ goto out;
+ }
+ }
+ if (unlikely(PageKsm(page)))
+ referenced += page_referenced_ksm(page, mem_cont,
+ vm_flags);
+ else if (PageAnon(page))
referenced += page_referenced_anon(page, mem_cont,
vm_flags);
- else if (is_locked)
+ else if (page->mapping)
referenced += page_referenced_file(page, mem_cont,
vm_flags);
- else if (!trylock_page(page))
- referenced++;
- else {
- if (page->mapping)
- referenced += page_referenced_file(page,
- mem_cont, vm_flags);
+ if (we_locked)
unlock_page(page);
- }
}
-
+out:
if (page_test_and_clear_young(page))
referenced++;
return referenced;
}
-static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
+static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
spinlock_t *ptl;
int ret = 0;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 1);
if (!pte)
goto out;
@@ -578,8 +581,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- if (vma->vm_flags & VM_SHARED)
- ret += page_mkclean_one(page, vma);
+ if (vma->vm_flags & VM_SHARED) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret += page_mkclean_one(page, vma, address);
+ }
}
spin_unlock(&mapping->i_mmap_lock);
return ret;
@@ -620,14 +627,7 @@ static void __page_set_anon_rmap(struct page *page,
BUG_ON(!anon_vma);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
-
page->index = linear_page_index(vma, address);
-
- /*
- * nr_mapped state can be updated without turning off
- * interrupts because it is not modified via interrupt.
- */
- __inc_zone_page_state(page, NR_ANON_PAGES);
}
/**
@@ -665,14 +665,23 @@ static void __page_check_anon_rmap(struct page *page,
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * The caller needs to hold the pte lock and the page must be locked.
+ * The caller needs to hold the pte lock, and the page must be locked in
+ * the anon_vma case: to serialize mapping,index checking after setting,
+ * and to ensure that PageAnon is not being upgraded racily to PageKsm
+ * (but PageKsm is never downgraded to PageAnon).
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
+ int first = atomic_inc_and_test(&page->_mapcount);
+ if (first)
+ __inc_zone_page_state(page, NR_ANON_PAGES);
+ if (unlikely(PageKsm(page)))
+ return;
+
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
- if (atomic_inc_and_test(&page->_mapcount))
+ if (first)
__page_set_anon_rmap(page, vma, address);
else
__page_check_anon_rmap(page, vma, address);
@@ -694,6 +703,7 @@ void page_add_new_anon_rmap(struct page *page,
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
+ __inc_zone_page_state(page, NR_ANON_PAGES);
__page_set_anon_rmap(page, vma, address);
if (page_evictable(page, vma))
lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -711,7 +721,7 @@ void page_add_file_rmap(struct page *page)
{
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
- mem_cgroup_update_mapped_file_stat(page, 1);
+ mem_cgroup_update_file_mapped(page, 1);
}
}
@@ -743,8 +753,8 @@ void page_remove_rmap(struct page *page)
__dec_zone_page_state(page, NR_ANON_PAGES);
} else {
__dec_zone_page_state(page, NR_FILE_MAPPED);
+ mem_cgroup_update_file_mapped(page, -1);
}
- mem_cgroup_update_mapped_file_stat(page, -1);
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
@@ -760,20 +770,15 @@ void page_remove_rmap(struct page *page)
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- enum ttu_flags flags)
+int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ unsigned long address, enum ttu_flags flags)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long address;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
- address = vma_address(page, vma);
- if (address == -EFAULT)
- goto out;
-
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
@@ -784,10 +789,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* skipped over this mm) then we should reactivate it.
*/
if (!(flags & TTU_IGNORE_MLOCK)) {
- if (vma->vm_flags & VM_LOCKED) {
- ret = SWAP_MLOCK;
+ if (vma->vm_flags & VM_LOCKED)
+ goto out_mlock;
+
+ if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out_unmap;
- }
}
if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address, pte)) {
@@ -822,7 +828,11 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
- swap_duplicate(entry);
+ if (swap_duplicate(entry) < 0) {
+ set_pte_at(mm, address, pte, pteval);
+ ret = SWAP_FAIL;
+ goto out_unmap;
+ }
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
@@ -849,7 +859,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
} else
dec_mm_counter(mm, file_rss);
-
page_remove_rmap(page);
page_cache_release(page);
@@ -857,6 +866,27 @@ out_unmap:
pte_unmap_unlock(pte, ptl);
out:
return ret;
+
+out_mlock:
+ pte_unmap_unlock(pte, ptl);
+
+
+ /*
+ * We need mmap_sem locking, Otherwise VM_LOCKED check makes
+ * unstable result and race. Plus, We can't wait here because
+ * we now hold anon_vma->lock or mapping->i_mmap_lock.
+ * if trylock failed, the page remain in evictable lru and later
+ * vmscan could retry to move the page to unevictable lru if the
+ * page is actually mlocked.
+ */
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
+ if (vma->vm_flags & VM_LOCKED) {
+ mlock_vma_page(page);
+ ret = SWAP_MLOCK;
+ }
+ up_read(&vma->vm_mm->mmap_sem);
+ }
+ return ret;
}
/*
@@ -922,11 +952,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret;
/*
- * MLOCK_PAGES => feature is configured.
- * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
+ * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages.
*/
- if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */
@@ -976,29 +1005,11 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret;
}
-/*
- * common handling for pages mapped in VM_LOCKED vmas
- */
-static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
-{
- int mlocked = 0;
-
- if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
- if (vma->vm_flags & VM_LOCKED) {
- mlock_vma_page(page);
- mlocked++; /* really mlocked the page */
- }
- up_read(&vma->vm_mm->mmap_sem);
- }
- return mlocked;
-}
-
/**
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
* rmap method
* @page: the page to unmap/unlock
- * @unlock: request for unlock rather than unmap [unlikely]
- * @migration: unmapping for migration - ignored if @unlock
+ * @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
@@ -1014,42 +1025,22 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
- unsigned int mlocked = 0;
int ret = SWAP_AGAIN;
- int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
-
- if (MLOCK_PAGES && unlikely(unlock))
- ret = SWAP_SUCCESS; /* default for try_to_munlock() */
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!((vma->vm_flags & VM_LOCKED) &&
- page_mapped_in_vma(page, vma)))
- continue; /* must visit all unlocked vmas */
- ret = SWAP_MLOCK; /* saw at least one mlocked vma */
- } else {
- ret = try_to_unmap_one(page, vma, flags);
- if (ret == SWAP_FAIL || !page_mapped(page))
- break;
- }
- if (ret == SWAP_MLOCK) {
- mlocked = try_to_mlock_page(page, vma);
- if (mlocked)
- break; /* stop if actually mlocked page */
- }
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = try_to_unmap_one(page, vma, address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page))
+ break;
}
page_unlock_anon_vma(anon_vma);
-
- if (mlocked)
- ret = SWAP_MLOCK; /* actually mlocked the page */
- else if (ret == SWAP_MLOCK)
- ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
-
return ret;
}
@@ -1079,48 +1070,30 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
unsigned long max_nl_cursor = 0;
unsigned long max_nl_size = 0;
unsigned int mapcount;
- unsigned int mlocked = 0;
- int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
-
- if (MLOCK_PAGES && unlikely(unlock))
- ret = SWAP_SUCCESS; /* default for try_to_munlock() */
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!((vma->vm_flags & VM_LOCKED) &&
- page_mapped_in_vma(page, vma)))
- continue; /* must visit all vmas */
- ret = SWAP_MLOCK;
- } else {
- ret = try_to_unmap_one(page, vma, flags);
- if (ret == SWAP_FAIL || !page_mapped(page))
- goto out;
- }
- if (ret == SWAP_MLOCK) {
- mlocked = try_to_mlock_page(page, vma);
- if (mlocked)
- break; /* stop if actually mlocked page */
- }
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = try_to_unmap_one(page, vma, address, flags);
+ if (ret != SWAP_AGAIN || !page_mapped(page))
+ goto out;
}
- if (mlocked)
+ if (list_empty(&mapping->i_mmap_nonlinear))
goto out;
- if (list_empty(&mapping->i_mmap_nonlinear))
+ /*
+ * We don't bother to try to find the munlocked page in nonlinears.
+ * It's costly. Instead, later, page reclaim logic may call
+ * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
+ */
+ if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out;
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (MLOCK_PAGES && unlikely(unlock)) {
- if (!(vma->vm_flags & VM_LOCKED))
- continue; /* must visit all vmas */
- ret = SWAP_MLOCK; /* leave mlocked == 0 */
- goto out; /* no need to look further */
- }
- if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
- (vma->vm_flags & VM_LOCKED))
- continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_cursor = cursor;
@@ -1153,16 +1126,12 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
- (vma->vm_flags & VM_LOCKED))
- continue;
cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
- ret = try_to_unmap_cluster(cursor, &mapcount,
- vma, page);
- if (ret == SWAP_MLOCK)
- mlocked = 2; /* to return below */
+ if (try_to_unmap_cluster(cursor, &mapcount,
+ vma, page) == SWAP_MLOCK)
+ ret = SWAP_MLOCK;
cursor += CLUSTER_SIZE;
vma->vm_private_data = (void *) cursor;
if ((int)mapcount <= 0)
@@ -1183,10 +1152,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
vma->vm_private_data = NULL;
out:
spin_unlock(&mapping->i_mmap_lock);
- if (mlocked)
- ret = SWAP_MLOCK; /* actually mlocked the page */
- else if (ret == SWAP_MLOCK)
- ret = SWAP_AGAIN; /* saw VM_LOCKED vma */
return ret;
}
@@ -1210,7 +1175,9 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
BUG_ON(!PageLocked(page));
- if (PageAnon(page))
+ if (unlikely(PageKsm(page)))
+ ret = try_to_unmap_ksm(page, flags);
+ else if (PageAnon(page))
ret = try_to_unmap_anon(page, flags);
else
ret = try_to_unmap_file(page, flags);
@@ -1229,17 +1196,98 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
*
* Return values are:
*
- * SWAP_SUCCESS - no vma's holding page mlocked.
+ * SWAP_AGAIN - no vma is holding page mlocked, or,
* SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
+ * SWAP_FAIL - page cannot be located at present
* SWAP_MLOCK - page is now mlocked.
*/
int try_to_munlock(struct page *page)
{
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
- if (PageAnon(page))
+ if (unlikely(PageKsm(page)))
+ return try_to_unmap_ksm(page, TTU_MUNLOCK);
+ else if (PageAnon(page))
return try_to_unmap_anon(page, TTU_MUNLOCK);
else
return try_to_unmap_file(page, TTU_MUNLOCK);
}
+#ifdef CONFIG_MIGRATION
+/*
+ * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+ int ret = SWAP_AGAIN;
+
+ /*
+ * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
+ * because that depends on page_mapped(); but not all its usages
+ * are holding mmap_sem, which also gave the necessary guarantee
+ * (that this anon_vma's slab has not already been destroyed).
+ * This needs to be reviewed later: avoiding page_lock_anon_vma()
+ * is risky, and currently limits the usefulness of rmap_walk().
+ */
+ anon_vma = page_anon_vma(page);
+ if (!anon_vma)
+ return ret;
+ spin_lock(&anon_vma->lock);
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = rmap_one(page, vma, address, arg);
+ if (ret != SWAP_AGAIN)
+ break;
+ }
+ spin_unlock(&anon_vma->lock);
+ return ret;
+}
+
+static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ struct address_space *mapping = page->mapping;
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+ int ret = SWAP_AGAIN;
+
+ if (!mapping)
+ return ret;
+ spin_lock(&mapping->i_mmap_lock);
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ unsigned long address = vma_address(page, vma);
+ if (address == -EFAULT)
+ continue;
+ ret = rmap_one(page, vma, address, arg);
+ if (ret != SWAP_AGAIN)
+ break;
+ }
+ /*
+ * No nonlinear handling: being always shared, nonlinear vmas
+ * never contain migration ptes. Decide what to do about this
+ * limitation to linear when we need rmap_walk() on nonlinear.
+ */
+ spin_unlock(&mapping->i_mmap_lock);
+ return ret;
+}
+
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ VM_BUG_ON(!PageLocked(page));
+
+ if (unlikely(PageKsm(page)))
+ return rmap_walk_ksm(page, rmap_one, arg);
+ else if (PageAnon(page))
+ return rmap_walk_anon(page, rmap_one, arg);
+ else
+ return rmap_walk_file(page, rmap_one, arg);
+}
+#endif /* CONFIG_MIGRATION */
diff --git a/mm/shmem.c b/mm/shmem.c
index 356dd99566e..eef4ebea515 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -29,7 +29,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/swap.h>
-#include <linux/ima.h>
static struct vfsmount *shm_mnt;
@@ -42,6 +41,7 @@ static struct vfsmount *shm_mnt;
#include <linux/xattr.h>
#include <linux/exportfs.h>
+#include <linux/posix_acl.h>
#include <linux/generic_acl.h>
#include <linux/mman.h>
#include <linux/string.h>
@@ -810,7 +810,7 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
error = inode_setattr(inode, attr);
#ifdef CONFIG_TMPFS_POSIX_ACL
if (!error && (attr->ia_valid & ATTR_MODE))
- error = generic_acl_chmod(inode, &shmem_acl_ops);
+ error = generic_acl_chmod(inode);
#endif
if (page)
page_cache_release(page);
@@ -1017,7 +1017,14 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
goto out;
}
mutex_unlock(&shmem_swaplist_mutex);
-out: return found; /* 0 or 1 or -ENOMEM */
+ /*
+ * Can some race bring us here? We've been holding page lock,
+ * so I think not; but would rather try again later than BUG()
+ */
+ unlock_page(page);
+ page_cache_release(page);
+out:
+ return (found < 0) ? found : 0;
}
/*
@@ -1080,7 +1087,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
else
inode = NULL;
spin_unlock(&info->lock);
- swap_duplicate(swap);
+ swap_shmem_alloc(swap);
BUG_ON(page_mapped(page));
page_cache_release(page); /* pagecache ref */
swap_writepage(page, wbc);
@@ -1817,11 +1824,15 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
return error;
}
}
- error = shmem_acl_init(inode, dir);
+#ifdef CONFIG_TMPFS_POSIX_ACL
+ error = generic_acl_init(inode, dir);
if (error) {
iput(inode);
return error;
}
+#else
+ error = 0;
+#endif
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
if (S_ISDIR(mode))
@@ -2036,27 +2047,28 @@ static const struct inode_operations shmem_symlink_inode_operations = {
* filesystem level, though.
*/
-static size_t shmem_xattr_security_list(struct inode *inode, char *list,
+static size_t shmem_xattr_security_list(struct dentry *dentry, char *list,
size_t list_len, const char *name,
- size_t name_len)
+ size_t name_len, int handler_flags)
{
- return security_inode_listsecurity(inode, list, list_len);
+ return security_inode_listsecurity(dentry->d_inode, list, list_len);
}
-static int shmem_xattr_security_get(struct inode *inode, const char *name,
- void *buffer, size_t size)
+static int shmem_xattr_security_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int handler_flags)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return xattr_getsecurity(inode, name, buffer, size);
+ return xattr_getsecurity(dentry->d_inode, name, buffer, size);
}
-static int shmem_xattr_security_set(struct inode *inode, const char *name,
- const void *value, size_t size, int flags)
+static int shmem_xattr_security_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int handler_flags)
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return security_inode_setsecurity(inode, name, value, size, flags);
+ return security_inode_setsecurity(dentry->d_inode, name, value,
+ size, flags);
}
static struct xattr_handler shmem_xattr_security_handler = {
@@ -2067,8 +2079,8 @@ static struct xattr_handler shmem_xattr_security_handler = {
};
static struct xattr_handler *shmem_xattr_handlers[] = {
- &shmem_xattr_acl_access_handler,
- &shmem_xattr_acl_default_handler,
+ &generic_acl_access_handler,
+ &generic_acl_default_handler,
&shmem_xattr_security_handler,
NULL
};
@@ -2447,7 +2459,7 @@ static const struct inode_operations shmem_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .check_acl = shmem_check_acl,
+ .check_acl = generic_check_acl,
#endif
};
@@ -2470,7 +2482,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .check_acl = shmem_check_acl,
+ .check_acl = generic_check_acl,
#endif
};
@@ -2481,7 +2493,7 @@ static const struct inode_operations shmem_special_inode_operations = {
.getxattr = generic_getxattr,
.listxattr = generic_listxattr,
.removexattr = generic_removexattr,
- .check_acl = shmem_check_acl,
+ .check_acl = generic_check_acl,
#endif
};
@@ -2619,7 +2631,8 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
int error;
struct file *file;
struct inode *inode;
- struct dentry *dentry, *root;
+ struct path path;
+ struct dentry *root;
struct qstr this;
if (IS_ERR(shm_mnt))
@@ -2636,38 +2649,35 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
this.len = strlen(name);
this.hash = 0; /* will go */
root = shm_mnt->mnt_root;
- dentry = d_alloc(root, &this);
- if (!dentry)
+ path.dentry = d_alloc(root, &this);
+ if (!path.dentry)
goto put_memory;
-
- error = -ENFILE;
- file = get_empty_filp();
- if (!file)
- goto put_dentry;
+ path.mnt = mntget(shm_mnt);
error = -ENOSPC;
inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
if (!inode)
- goto close_file;
+ goto put_dentry;
- d_instantiate(dentry, inode);
+ d_instantiate(path.dentry, inode);
inode->i_size = size;
inode->i_nlink = 0; /* It is unlinked */
- init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
- &shmem_file_operations);
-
#ifndef CONFIG_MMU
error = ramfs_nommu_expand_for_mapping(inode, size);
if (error)
- goto close_file;
+ goto put_dentry;
#endif
- ima_counts_get(file);
+
+ error = -ENFILE;
+ file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
+ &shmem_file_operations);
+ if (!file)
+ goto put_dentry;
+
return file;
-close_file:
- put_filp(file);
put_dentry:
- dput(dentry);
+ path_put(&path);
put_memory:
shmem_unacct_size(flags, size);
return ERR_PTR(error);
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
deleted file mode 100644
index df2c87fdae5..00000000000
--- a/mm/shmem_acl.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * mm/shmem_acl.c
- *
- * (C) 2005 Andreas Gruenbacher <agruen@suse.de>
- *
- * This file is released under the GPL.
- */
-
-#include <linux/fs.h>
-#include <linux/shmem_fs.h>
-#include <linux/xattr.h>
-#include <linux/generic_acl.h>
-
-/**
- * shmem_get_acl - generic_acl_operations->getacl() operation
- */
-static struct posix_acl *
-shmem_get_acl(struct inode *inode, int type)
-{
- struct posix_acl *acl = NULL;
-
- spin_lock(&inode->i_lock);
- switch(type) {
- case ACL_TYPE_ACCESS:
- acl = posix_acl_dup(inode->i_acl);
- break;
-
- case ACL_TYPE_DEFAULT:
- acl = posix_acl_dup(inode->i_default_acl);
- break;
- }
- spin_unlock(&inode->i_lock);
-
- return acl;
-}
-
-/**
- * shmem_set_acl - generic_acl_operations->setacl() operation
- */
-static void
-shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
-{
- struct posix_acl *free = NULL;
-
- spin_lock(&inode->i_lock);
- switch(type) {
- case ACL_TYPE_ACCESS:
- free = inode->i_acl;
- inode->i_acl = posix_acl_dup(acl);
- break;
-
- case ACL_TYPE_DEFAULT:
- free = inode->i_default_acl;
- inode->i_default_acl = posix_acl_dup(acl);
- break;
- }
- spin_unlock(&inode->i_lock);
- posix_acl_release(free);
-}
-
-struct generic_acl_operations shmem_acl_ops = {
- .getacl = shmem_get_acl,
- .setacl = shmem_set_acl,
-};
-
-/**
- * shmem_list_acl_access, shmem_get_acl_access, shmem_set_acl_access,
- * shmem_xattr_acl_access_handler - plumbing code to implement the
- * system.posix_acl_access xattr using the generic acl functions.
- */
-
-static size_t
-shmem_list_acl_access(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
-{
- return generic_acl_list(inode, &shmem_acl_ops, ACL_TYPE_ACCESS,
- list, list_size);
-}
-
-static int
-shmem_get_acl_access(struct inode *inode, const char *name, void *buffer,
- size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return generic_acl_get(inode, &shmem_acl_ops, ACL_TYPE_ACCESS, buffer,
- size);
-}
-
-static int
-shmem_set_acl_access(struct inode *inode, const char *name, const void *value,
- size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return generic_acl_set(inode, &shmem_acl_ops, ACL_TYPE_ACCESS, value,
- size);
-}
-
-struct xattr_handler shmem_xattr_acl_access_handler = {
- .prefix = POSIX_ACL_XATTR_ACCESS,
- .list = shmem_list_acl_access,
- .get = shmem_get_acl_access,
- .set = shmem_set_acl_access,
-};
-
-/**
- * shmem_list_acl_default, shmem_get_acl_default, shmem_set_acl_default,
- * shmem_xattr_acl_default_handler - plumbing code to implement the
- * system.posix_acl_default xattr using the generic acl functions.
- */
-
-static size_t
-shmem_list_acl_default(struct inode *inode, char *list, size_t list_size,
- const char *name, size_t name_len)
-{
- return generic_acl_list(inode, &shmem_acl_ops, ACL_TYPE_DEFAULT,
- list, list_size);
-}
-
-static int
-shmem_get_acl_default(struct inode *inode, const char *name, void *buffer,
- size_t size)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return generic_acl_get(inode, &shmem_acl_ops, ACL_TYPE_DEFAULT, buffer,
- size);
-}
-
-static int
-shmem_set_acl_default(struct inode *inode, const char *name, const void *value,
- size_t size, int flags)
-{
- if (strcmp(name, "") != 0)
- return -EINVAL;
- return generic_acl_set(inode, &shmem_acl_ops, ACL_TYPE_DEFAULT, value,
- size);
-}
-
-struct xattr_handler shmem_xattr_acl_default_handler = {
- .prefix = POSIX_ACL_XATTR_DEFAULT,
- .list = shmem_list_acl_default,
- .get = shmem_get_acl_default,
- .set = shmem_set_acl_default,
-};
-
-/**
- * shmem_acl_init - Inizialize the acl(s) of a new inode
- */
-int
-shmem_acl_init(struct inode *inode, struct inode *dir)
-{
- return generic_acl_init(inode, dir, &shmem_acl_ops);
-}
-
-/**
- * shmem_check_acl - check_acl() callback for generic_permission()
- */
-int
-shmem_check_acl(struct inode *inode, int mask)
-{
- struct posix_acl *acl = shmem_get_acl(inode, ACL_TYPE_ACCESS);
-
- if (acl) {
- int error = posix_acl_permission(inode, acl, mask);
- posix_acl_release(acl);
- return error;
- }
- return -EAGAIN;
-}
diff --git a/mm/slab.c b/mm/slab.c
index 7dfa481c96b..7d41f15b48d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->buffer_size;
@@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = {
#define BAD_ALIEN_MAGIC 0x01020304ul
+/*
+ * chicken and egg problem: delay the per-cpu array allocation
+ * until the general caches are up.
+ */
+static enum {
+ NONE,
+ PARTIAL_AC,
+ PARTIAL_L3,
+ EARLY,
+ FULL
+} g_cpucache_up;
+
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
+{
+ return g_cpucache_up >= EARLY;
+}
+
#ifdef CONFIG_LOCKDEP
/*
@@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = {
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;
-static inline void init_lock_keys(void)
-
+static void init_node_lock_keys(int q)
{
- int q;
struct cache_sizes *s = malloc_sizes;
- while (s->cs_size != ULONG_MAX) {
- for_each_node(q) {
- struct array_cache **alc;
- int r;
- struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
- if (!l3 || OFF_SLAB(s->cs_cachep))
- continue;
- lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
- alc = l3->alien;
- /*
- * FIXME: This check for BAD_ALIEN_MAGIC
- * should go away when common slab code is taught to
- * work even without alien caches.
- * Currently, non NUMA code returns BAD_ALIEN_MAGIC
- * for alloc_alien_cache,
- */
- if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
- continue;
- for_each_node(r) {
- if (alc[r])
- lockdep_set_class(&alc[r]->lock,
- &on_slab_alc_key);
- }
+ if (g_cpucache_up != FULL)
+ return;
+
+ for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
+ struct array_cache **alc;
+ struct kmem_list3 *l3;
+ int r;
+
+ l3 = s->cs_cachep->nodelists[q];
+ if (!l3 || OFF_SLAB(s->cs_cachep))
+ return;
+ lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
+ alc = l3->alien;
+ /*
+ * FIXME: This check for BAD_ALIEN_MAGIC
+ * should go away when common slab code is taught to
+ * work even without alien caches.
+ * Currently, non NUMA code returns BAD_ALIEN_MAGIC
+ * for alloc_alien_cache,
+ */
+ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
+ return;
+ for_each_node(r) {
+ if (alc[r])
+ lockdep_set_class(&alc[r]->lock,
+ &on_slab_alc_key);
}
- s++;
}
}
+
+static inline void init_lock_keys(void)
+{
+ int node;
+
+ for_each_node(node)
+ init_node_lock_keys(node);
+}
#else
+static void init_node_lock_keys(int q)
+{
+}
+
static inline void init_lock_keys(void)
{
}
@@ -665,27 +697,7 @@ static inline void init_lock_keys(void)
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
-/*
- * chicken and egg problem: delay the per-cpu array allocation
- * until the general caches are up.
- */
-static enum {
- NONE,
- PARTIAL_AC,
- PARTIAL_L3,
- EARLY,
- FULL
-} g_cpucache_up;
-
-/*
- * used by boot code to determine if it can use slab based allocator
- */
-int slab_is_available(void)
-{
- return g_cpucache_up >= EARLY;
-}
-
-static DEFINE_PER_CPU(struct delayed_work, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
@@ -826,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
* objects freed on different nodes from which they were allocated) and the
* flushing of remote pcps by calling drain_node_pages.
*/
-static DEFINE_PER_CPU(unsigned long, reap_node);
+static DEFINE_PER_CPU(unsigned long, slab_reap_node);
static void init_reap_node(int cpu)
{
@@ -836,17 +848,17 @@ static void init_reap_node(int cpu)
if (node == MAX_NUMNODES)
node = first_node(node_online_map);
- per_cpu(reap_node, cpu) = node;
+ per_cpu(slab_reap_node, cpu) = node;
}
static void next_reap_node(void)
{
- int node = __get_cpu_var(reap_node);
+ int node = __get_cpu_var(slab_reap_node);
node = next_node(node, node_online_map);
if (unlikely(node >= MAX_NUMNODES))
node = first_node(node_online_map);
- __get_cpu_var(reap_node) = node;
+ __get_cpu_var(slab_reap_node) = node;
}
#else
@@ -863,7 +875,7 @@ static void next_reap_node(void)
*/
static void __cpuinit start_cpu_timer(int cpu)
{
- struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
+ struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
@@ -1027,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
*/
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
{
- int node = __get_cpu_var(reap_node);
+ int node = __get_cpu_var(slab_reap_node);
if (l3->alien) {
struct array_cache *ac = l3->alien[node];
@@ -1120,7 +1132,7 @@ static void __cpuinit cpuup_canceled(long cpu)
if (nc)
free_block(cachep, nc->entry, nc->avail, node);
- if (!cpus_empty(*mask)) {
+ if (!cpumask_empty(mask)) {
spin_unlock_irq(&l3->list_lock);
goto free_array_cache;
}
@@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu)
kfree(shared);
free_alien_cache(alien);
}
+ init_node_lock_keys(node);
+
return 0;
bad:
cpuup_canceled(cpu);
@@ -1286,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
* anything expensive but will only modify reap_work
* and reschedule the timer.
*/
- cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
+ cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
/* Now the cache_reaper is guaranteed to be not running. */
- per_cpu(reap_work, cpu).work.func = NULL;
+ per_cpu(slab_reap_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
@@ -2261,9 +2275,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/*
* Determine if the slab management is 'on' or 'off' slab.
* (bootstrapping cannot cope with offslab caches so don't do
- * it too early on.)
+ * it too early on. Always use on-slab management when
+ * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
*/
- if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
+ if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
+ !(flags & SLAB_NOLEAKTRACE))
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
@@ -2582,8 +2598,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
* kmemleak does not treat the ->s_mem pointer as a reference
* to the object. Otherwise we will not report the leak.
*/
- kmemleak_scan_area(slabp, offsetof(struct slab, list),
- sizeof(struct list_head), local_flags);
+ kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
+ local_flags);
if (!slabp)
return NULL;
} else {
@@ -3103,13 +3119,19 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
} else {
STATS_INC_ALLOCMISS(cachep);
objp = cache_alloc_refill(cachep, flags);
+ /*
+ * the 'ac' may be updated by cache_alloc_refill(),
+ * and kmemleak_erase() requires its correct value.
+ */
+ ac = cpu_cache_get(cachep);
}
/*
* To avoid a false negative, if an object that is in one of the
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
* treat the array pointers as a reference to the object.
*/
- kmemleak_erase(&ac->entry[ac->avail]);
+ if (objp)
+ kmemleak_erase(&ac->entry[ac->avail]);
return objp;
}
@@ -3306,7 +3328,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
- if (unlikely(nodeid == -1))
+ if (nodeid == -1)
nodeid = numa_node_id();
if (unlikely(!cachep->nodelists[nodeid])) {
@@ -3558,7 +3580,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
}
EXPORT_SYMBOL(kmem_cache_alloc);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags, __builtin_return_address(0));
@@ -3621,7 +3643,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid)
@@ -3649,7 +3671,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
return ret;
}
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node,
@@ -3669,7 +3691,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return __do_kmalloc_node(size, flags, node, NULL);
}
EXPORT_SYMBOL(__kmalloc_node);
-#endif /* CONFIG_DEBUG_SLAB */
+#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
#endif /* CONFIG_NUMA */
/**
@@ -3701,7 +3723,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
}
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, __builtin_return_address(0));
diff --git a/mm/slub.c b/mm/slub.c
index 4996fc71955..8d71aaf888d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1735,7 +1735,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
}
local_irq_restore(flags);
- if (unlikely((gfpflags & __GFP_ZERO) && object))
+ if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, objsize);
kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
@@ -1754,7 +1754,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
}
EXPORT_SYMBOL(kmem_cache_alloc);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{
return slab_alloc(s, gfpflags, -1, _RET_IP_);
@@ -1775,7 +1775,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node)
@@ -4371,12 +4371,28 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
return len + sprintf(buf + len, "\n");
}
+static void clear_stat(struct kmem_cache *s, enum stat_item si)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ get_cpu_slab(s, cpu)->stat[si] = 0;
+}
+
#define STAT_ATTR(si, text) \
static ssize_t text##_show(struct kmem_cache *s, char *buf) \
{ \
return show_stat(s, buf, si); \
} \
-SLAB_ATTR_RO(text); \
+static ssize_t text##_store(struct kmem_cache *s, \
+ const char *buf, size_t length) \
+{ \
+ if (buf[0] != '0') \
+ return -EINVAL; \
+ clear_stat(s, si); \
+ return length; \
+} \
+SLAB_ATTR(text); \
STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9c590eef791..6c0585b1641 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
@@ -35,11 +36,15 @@
#include <linux/swapops.h>
#include <linux/page_cgroup.h>
+static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
+ unsigned char);
+static void free_swap_count_continuations(struct swap_info_struct *);
+static sector_t map_swap_entry(swp_entry_t, struct block_device**);
+
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
long nr_swap_pages;
long total_swap_pages;
-static int swap_overflow;
static int least_priority;
static const char Bad_file[] = "Bad swap file entry ";
@@ -49,42 +54,20 @@ static const char Unused_offset[] = "Unused swap offset entry ";
static struct swap_list_t swap_list = {-1, -1};
-static struct swap_info_struct swap_info[MAX_SWAPFILES];
+static struct swap_info_struct *swap_info[MAX_SWAPFILES];
static DEFINE_MUTEX(swapon_mutex);
-/* For reference count accounting in swap_map */
-/* enum for swap_map[] handling. internal use only */
-enum {
- SWAP_MAP = 0, /* ops for reference from swap users */
- SWAP_CACHE, /* ops for reference from swap cache */
-};
-
-static inline int swap_count(unsigned short ent)
-{
- return ent & SWAP_COUNT_MASK;
-}
-
-static inline bool swap_has_cache(unsigned short ent)
+static inline unsigned char swap_count(unsigned char ent)
{
- return !!(ent & SWAP_HAS_CACHE);
+ return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
-static inline unsigned short encode_swapmap(int count, bool has_cache)
-{
- unsigned short ret = count;
-
- if (has_cache)
- return SWAP_HAS_CACHE | ret;
- return ret;
-}
-
-/* returnes 1 if swap entry is freed */
+/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
{
- int type = si - swap_info;
- swp_entry_t entry = swp_entry(type, offset);
+ swp_entry_t entry = swp_entry(si->type, offset);
struct page *page;
int ret = 0;
@@ -120,7 +103,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
down_read(&swap_unplug_sem);
entry.val = page_private(page);
if (PageSwapCache(page)) {
- struct block_device *bdev = swap_info[swp_type(entry)].bdev;
+ struct block_device *bdev = swap_info[swp_type(entry)]->bdev;
struct backing_dev_info *bdi;
/*
@@ -146,23 +129,28 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
static int discard_swap(struct swap_info_struct *si)
{
struct swap_extent *se;
+ sector_t start_block;
+ sector_t nr_blocks;
int err = 0;
- list_for_each_entry(se, &si->extent_list, list) {
- sector_t start_block = se->start_block << (PAGE_SHIFT - 9);
- sector_t nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
+ /* Do not discard the swap header page! */
+ se = &si->first_swap_extent;
+ start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
+ nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
+ if (nr_blocks) {
+ err = blkdev_issue_discard(si->bdev, start_block,
+ nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
+ if (err)
+ return err;
+ cond_resched();
+ }
- if (se->start_page == 0) {
- /* Do not discard the swap header page! */
- start_block += 1 << (PAGE_SHIFT - 9);
- nr_blocks -= 1 << (PAGE_SHIFT - 9);
- if (!nr_blocks)
- continue;
- }
+ list_for_each_entry(se, &si->first_swap_extent.list, list) {
+ start_block = se->start_block << (PAGE_SHIFT - 9);
+ nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
err = blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_KERNEL,
- DISCARD_FL_BARRIER);
+ nr_blocks, GFP_KERNEL, DISCARD_FL_BARRIER);
if (err)
break;
@@ -201,14 +189,11 @@ static void discard_swap_cluster(struct swap_info_struct *si,
start_block <<= PAGE_SHIFT - 9;
nr_blocks <<= PAGE_SHIFT - 9;
if (blkdev_issue_discard(si->bdev, start_block,
- nr_blocks, GFP_NOIO,
- DISCARD_FL_BARRIER))
+ nr_blocks, GFP_NOIO, DISCARD_FL_BARRIER))
break;
}
lh = se->list.next;
- if (lh == &si->extent_list)
- lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
}
}
@@ -223,7 +208,7 @@ static int wait_for_discard(void *word)
#define LATENCY_LIMIT 256
static inline unsigned long scan_swap_map(struct swap_info_struct *si,
- int cache)
+ unsigned char usage)
{
unsigned long offset;
unsigned long scan_base;
@@ -354,10 +339,7 @@ checks:
si->lowest_bit = si->max;
si->highest_bit = 0;
}
- if (cache == SWAP_CACHE) /* at usual swap-out via vmscan.c */
- si->swap_map[offset] = encode_swapmap(0, true);
- else /* at suspend */
- si->swap_map[offset] = encode_swapmap(1, false);
+ si->swap_map[offset] = usage;
si->cluster_next = offset + 1;
si->flags -= SWP_SCANNING;
@@ -467,10 +449,10 @@ swp_entry_t get_swap_page(void)
nr_swap_pages--;
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
- si = swap_info + type;
+ si = swap_info[type];
next = si->next;
if (next < 0 ||
- (!wrapped && si->prio != swap_info[next].prio)) {
+ (!wrapped && si->prio != swap_info[next]->prio)) {
next = swap_list.head;
wrapped++;
}
@@ -482,7 +464,7 @@ swp_entry_t get_swap_page(void)
swap_list.next = next;
/* This is called for allocating swap entry for cache */
- offset = scan_swap_map(si, SWAP_CACHE);
+ offset = scan_swap_map(si, SWAP_HAS_CACHE);
if (offset) {
spin_unlock(&swap_lock);
return swp_entry(type, offset);
@@ -503,11 +485,11 @@ swp_entry_t get_swap_page_of_type(int type)
pgoff_t offset;
spin_lock(&swap_lock);
- si = swap_info + type;
- if (si->flags & SWP_WRITEOK) {
+ si = swap_info[type];
+ if (si && (si->flags & SWP_WRITEOK)) {
nr_swap_pages--;
/* This is called for allocating swap entry, not cache */
- offset = scan_swap_map(si, SWAP_MAP);
+ offset = scan_swap_map(si, 1);
if (offset) {
spin_unlock(&swap_lock);
return swp_entry(type, offset);
@@ -518,9 +500,9 @@ swp_entry_t get_swap_page_of_type(int type)
return (swp_entry_t) {0};
}
-static struct swap_info_struct * swap_info_get(swp_entry_t entry)
+static struct swap_info_struct *swap_info_get(swp_entry_t entry)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
unsigned long offset, type;
if (!entry.val)
@@ -528,7 +510,7 @@ static struct swap_info_struct * swap_info_get(swp_entry_t entry)
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_nofile;
- p = & swap_info[type];
+ p = swap_info[type];
if (!(p->flags & SWP_USED))
goto bad_device;
offset = swp_offset(entry);
@@ -554,41 +536,56 @@ out:
return NULL;
}
-static int swap_entry_free(struct swap_info_struct *p,
- swp_entry_t ent, int cache)
+static unsigned char swap_entry_free(struct swap_info_struct *p,
+ swp_entry_t entry, unsigned char usage)
{
- unsigned long offset = swp_offset(ent);
- int count = swap_count(p->swap_map[offset]);
- bool has_cache;
+ unsigned long offset = swp_offset(entry);
+ unsigned char count;
+ unsigned char has_cache;
- has_cache = swap_has_cache(p->swap_map[offset]);
+ count = p->swap_map[offset];
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
- if (cache == SWAP_MAP) { /* dropping usage count of swap */
- if (count < SWAP_MAP_MAX) {
- count--;
- p->swap_map[offset] = encode_swapmap(count, has_cache);
- }
- } else { /* dropping swap cache flag */
+ if (usage == SWAP_HAS_CACHE) {
VM_BUG_ON(!has_cache);
- p->swap_map[offset] = encode_swapmap(count, false);
-
+ has_cache = 0;
+ } else if (count == SWAP_MAP_SHMEM) {
+ /*
+ * Or we could insist on shmem.c using a special
+ * swap_shmem_free() and free_shmem_swap_and_cache()...
+ */
+ count = 0;
+ } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
+ if (count == COUNT_CONTINUED) {
+ if (swap_count_continued(p, offset, count))
+ count = SWAP_MAP_MAX | COUNT_CONTINUED;
+ else
+ count = SWAP_MAP_MAX;
+ } else
+ count--;
}
- /* return code. */
- count = p->swap_map[offset];
+
+ if (!count)
+ mem_cgroup_uncharge_swap(entry);
+
+ usage = count | has_cache;
+ p->swap_map[offset] = usage;
+
/* free if no reference */
- if (!count) {
+ if (!usage) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
- if (p->prio > swap_info[swap_list.next].prio)
- swap_list.next = p - swap_info;
+ if (swap_list.next >= 0 &&
+ p->prio > swap_info[swap_list.next]->prio)
+ swap_list.next = p->type;
nr_swap_pages++;
p->inuse_pages--;
}
- if (!swap_count(count))
- mem_cgroup_uncharge_swap(ent);
- return count;
+
+ return usage;
}
/*
@@ -597,11 +594,11 @@ static int swap_entry_free(struct swap_info_struct *p,
*/
void swap_free(swp_entry_t entry)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
p = swap_info_get(entry);
if (p) {
- swap_entry_free(p, entry, SWAP_MAP);
+ swap_entry_free(p, entry, 1);
spin_unlock(&swap_lock);
}
}
@@ -612,26 +609,21 @@ void swap_free(swp_entry_t entry)
void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
- int ret;
+ unsigned char count;
p = swap_info_get(entry);
if (p) {
- ret = swap_entry_free(p, entry, SWAP_CACHE);
- if (page) {
- bool swapout;
- if (ret)
- swapout = true; /* the end of swap out */
- else
- swapout = false; /* no more swap users! */
- mem_cgroup_uncharge_swapcache(page, entry, swapout);
- }
+ count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
+ if (page)
+ mem_cgroup_uncharge_swapcache(page, entry, count != 0);
spin_unlock(&swap_lock);
}
- return;
}
/*
* How many references to page are currently swapped out?
+ * This does not give an exact answer when swap count is continued,
+ * but does include the high COUNT_CONTINUED flag to allow for that.
*/
static inline int page_swapcount(struct page *page)
{
@@ -659,6 +651,8 @@ int reuse_swap_page(struct page *page)
int count;
VM_BUG_ON(!PageLocked(page));
+ if (unlikely(PageKsm(page)))
+ return 0;
count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
@@ -667,7 +661,7 @@ int reuse_swap_page(struct page *page)
SetPageDirty(page);
}
}
- return count == 1;
+ return count <= 1;
}
/*
@@ -704,7 +698,7 @@ int free_swap_and_cache(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
- if (swap_entry_free(p, entry, SWAP_MAP) == SWAP_HAS_CACHE) {
+ if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
page = find_get_page(&swapper_space, entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
@@ -741,14 +735,14 @@ int free_swap_and_cache(swp_entry_t entry)
int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
{
struct block_device *bdev = NULL;
- int i;
+ int type;
if (device)
bdev = bdget(device);
spin_lock(&swap_lock);
- for (i = 0; i < nr_swapfiles; i++) {
- struct swap_info_struct *sis = swap_info + i;
+ for (type = 0; type < nr_swapfiles; type++) {
+ struct swap_info_struct *sis = swap_info[type];
if (!(sis->flags & SWP_WRITEOK))
continue;
@@ -758,20 +752,18 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
- return i;
+ return type;
}
if (bdev == sis->bdev) {
- struct swap_extent *se;
+ struct swap_extent *se = &sis->first_swap_extent;
- se = list_entry(sis->extent_list.next,
- struct swap_extent, list);
if (se->start_block == offset) {
if (bdev_p)
*bdev_p = bdgrab(sis->bdev);
spin_unlock(&swap_lock);
bdput(bdev);
- return i;
+ return type;
}
}
}
@@ -783,6 +775,21 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
}
/*
+ * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
+ * corresponding to given index in swap_info (swap type).
+ */
+sector_t swapdev_block(int type, pgoff_t offset)
+{
+ struct block_device *bdev;
+
+ if ((unsigned int)type >= nr_swapfiles)
+ return 0;
+ if (!(swap_info[type]->flags & SWP_WRITEOK))
+ return 0;
+ return map_swap_entry(swp_entry(type, offset), &bdev);
+}
+
+/*
* Return either the total number of swap pages of given type, or the number
* of free pages of that type (depending on @free)
*
@@ -792,18 +799,20 @@ unsigned int count_swap_pages(int type, int free)
{
unsigned int n = 0;
- if (type < nr_swapfiles) {
- spin_lock(&swap_lock);
- if (swap_info[type].flags & SWP_WRITEOK) {
- n = swap_info[type].pages;
+ spin_lock(&swap_lock);
+ if ((unsigned int)type < nr_swapfiles) {
+ struct swap_info_struct *sis = swap_info[type];
+
+ if (sis->flags & SWP_WRITEOK) {
+ n = sis->pages;
if (free)
- n -= swap_info[type].inuse_pages;
+ n -= sis->inuse_pages;
}
- spin_unlock(&swap_lock);
}
+ spin_unlock(&swap_lock);
return n;
}
-#endif
+#endif /* CONFIG_HIBERNATION */
/*
* No need to decide whether this PTE shares the swap entry with others,
@@ -932,7 +941,7 @@ static int unuse_vma(struct vm_area_struct *vma,
unsigned long addr, end, next;
int ret;
- if (page->mapping) {
+ if (page_anon_vma(page)) {
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
return 0;
@@ -988,7 +997,7 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
{
unsigned int max = si->max;
unsigned int i = prev;
- int count;
+ unsigned char count;
/*
* No need for swap_lock here: we're just looking
@@ -1024,16 +1033,14 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
*/
static int try_to_unuse(unsigned int type)
{
- struct swap_info_struct * si = &swap_info[type];
+ struct swap_info_struct *si = swap_info[type];
struct mm_struct *start_mm;
- unsigned short *swap_map;
- unsigned short swcount;
+ unsigned char *swap_map;
+ unsigned char swcount;
struct page *page;
swp_entry_t entry;
unsigned int i = 0;
int retval = 0;
- int reset_overflow = 0;
- int shmem;
/*
* When searching mms for an entry, a good strategy is to
@@ -1047,8 +1054,7 @@ static int try_to_unuse(unsigned int type)
* together, child after parent. If we race with dup_mmap(), we
* prefer to resolve parent before child, lest we miss entries
* duplicated after we scanned child: using last mm would invert
- * that. Though it's only a serious concern when an overflowed
- * swap count is reset from SWAP_MAP_MAX, preventing a rescan.
+ * that.
*/
start_mm = &init_mm;
atomic_inc(&init_mm.mm_users);
@@ -1110,17 +1116,18 @@ static int try_to_unuse(unsigned int type)
/*
* Remove all references to entry.
- * Whenever we reach init_mm, there's no address space
- * to search, but use it as a reminder to search shmem.
*/
- shmem = 0;
swcount = *swap_map;
- if (swap_count(swcount)) {
- if (start_mm == &init_mm)
- shmem = shmem_unuse(entry, page);
- else
- retval = unuse_mm(start_mm, entry, page);
+ if (swap_count(swcount) == SWAP_MAP_SHMEM) {
+ retval = shmem_unuse(entry, page);
+ /* page has already been unlocked and released */
+ if (retval < 0)
+ break;
+ continue;
}
+ if (swap_count(swcount) && start_mm != &init_mm)
+ retval = unuse_mm(start_mm, entry, page);
+
if (swap_count(*swap_map)) {
int set_start_mm = (*swap_map >= swcount);
struct list_head *p = &start_mm->mmlist;
@@ -1131,7 +1138,7 @@ static int try_to_unuse(unsigned int type)
atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock);
- while (swap_count(*swap_map) && !retval && !shmem &&
+ while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users))
@@ -1145,10 +1152,9 @@ static int try_to_unuse(unsigned int type)
swcount = *swap_map;
if (!swap_count(swcount)) /* any usage ? */
;
- else if (mm == &init_mm) {
+ else if (mm == &init_mm)
set_start_mm = 1;
- shmem = shmem_unuse(entry, page);
- } else
+ else
retval = unuse_mm(mm, entry, page);
if (set_start_mm && *swap_map < swcount) {
@@ -1164,13 +1170,6 @@ static int try_to_unuse(unsigned int type)
mmput(start_mm);
start_mm = new_start_mm;
}
- if (shmem) {
- /* page has already been unlocked and released */
- if (shmem > 0)
- continue;
- retval = shmem;
- break;
- }
if (retval) {
unlock_page(page);
page_cache_release(page);
@@ -1178,30 +1177,6 @@ static int try_to_unuse(unsigned int type)
}
/*
- * How could swap count reach 0x7ffe ?
- * There's no way to repeat a swap page within an mm
- * (except in shmem, where it's the shared object which takes
- * the reference count)?
- * We believe SWAP_MAP_MAX cannot occur.(if occur, unsigned
- * short is too small....)
- * If that's wrong, then we should worry more about
- * exit_mmap() and do_munmap() cases described above:
- * we might be resetting SWAP_MAP_MAX too early here.
- * We know "Undead"s can happen, they're okay, so don't
- * report them; but do report if we reset SWAP_MAP_MAX.
- */
- /* We might release the lock_page() in unuse_mm(). */
- if (!PageSwapCache(page) || page_private(page) != entry.val)
- goto retry;
-
- if (swap_count(*swap_map) == SWAP_MAP_MAX) {
- spin_lock(&swap_lock);
- *swap_map = encode_swapmap(0, true);
- spin_unlock(&swap_lock);
- reset_overflow = 1;
- }
-
- /*
* If a reference remains (rare), we would like to leave
* the page in the swap cache; but try_to_unmap could
* then re-duplicate the entry once we drop page lock,
@@ -1213,6 +1188,12 @@ static int try_to_unuse(unsigned int type)
* read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files.
+ *
+ * Given how unuse_vma() targets one particular offset
+ * in an anon_vma, once the anon_vma has been determined,
+ * this splitting happens to be just what is needed to
+ * handle where KSM pages have been swapped out: re-reading
+ * is unnecessarily slow, but we can fix that later on.
*/
if (swap_count(*swap_map) &&
PageDirty(page) && PageSwapCache(page)) {
@@ -1242,7 +1223,6 @@ static int try_to_unuse(unsigned int type)
* mark page dirty so shrink_page_list will preserve it.
*/
SetPageDirty(page);
-retry:
unlock_page(page);
page_cache_release(page);
@@ -1254,10 +1234,6 @@ retry:
}
mmput(start_mm);
- if (reset_overflow) {
- printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
- swap_overflow = 0;
- }
return retval;
}
@@ -1270,10 +1246,10 @@ retry:
static void drain_mmlist(void)
{
struct list_head *p, *next;
- unsigned int i;
+ unsigned int type;
- for (i = 0; i < nr_swapfiles; i++)
- if (swap_info[i].inuse_pages)
+ for (type = 0; type < nr_swapfiles; type++)
+ if (swap_info[type]->inuse_pages)
return;
spin_lock(&mmlist_lock);
list_for_each_safe(p, next, &init_mm.mmlist)
@@ -1283,12 +1259,23 @@ static void drain_mmlist(void)
/*
* Use this swapdev's extent info to locate the (PAGE_SIZE) block which
- * corresponds to page offset `offset'.
+ * corresponds to page offset for the specified swap entry.
+ * Note that the type of this function is sector_t, but it returns page offset
+ * into the bdev, not sector offset.
*/
-sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
+static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
{
- struct swap_extent *se = sis->curr_swap_extent;
- struct swap_extent *start_se = se;
+ struct swap_info_struct *sis;
+ struct swap_extent *start_se;
+ struct swap_extent *se;
+ pgoff_t offset;
+
+ sis = swap_info[swp_type(entry)];
+ *bdev = sis->bdev;
+
+ offset = swp_offset(entry);
+ start_se = sis->curr_swap_extent;
+ se = start_se;
for ( ; ; ) {
struct list_head *lh;
@@ -1298,40 +1285,31 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
return se->start_block + (offset - se->start_page);
}
lh = se->list.next;
- if (lh == &sis->extent_list)
- lh = lh->next;
se = list_entry(lh, struct swap_extent, list);
sis->curr_swap_extent = se;
BUG_ON(se == start_se); /* It *must* be present */
}
}
-#ifdef CONFIG_HIBERNATION
/*
- * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
- * corresponding to given index in swap_info (swap type).
+ * Returns the page offset into bdev for the specified page's swap entry.
*/
-sector_t swapdev_block(int swap_type, pgoff_t offset)
+sector_t map_swap_page(struct page *page, struct block_device **bdev)
{
- struct swap_info_struct *sis;
-
- if (swap_type >= nr_swapfiles)
- return 0;
-
- sis = swap_info + swap_type;
- return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
+ swp_entry_t entry;
+ entry.val = page_private(page);
+ return map_swap_entry(entry, bdev);
}
-#endif /* CONFIG_HIBERNATION */
/*
* Free all of a swapdev's extent information
*/
static void destroy_swap_extents(struct swap_info_struct *sis)
{
- while (!list_empty(&sis->extent_list)) {
+ while (!list_empty(&sis->first_swap_extent.list)) {
struct swap_extent *se;
- se = list_entry(sis->extent_list.next,
+ se = list_entry(sis->first_swap_extent.list.next,
struct swap_extent, list);
list_del(&se->list);
kfree(se);
@@ -1352,8 +1330,15 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
struct swap_extent *new_se;
struct list_head *lh;
- lh = sis->extent_list.prev; /* The highest page extent */
- if (lh != &sis->extent_list) {
+ if (start_page == 0) {
+ se = &sis->first_swap_extent;
+ sis->curr_swap_extent = se;
+ se->start_page = 0;
+ se->nr_pages = nr_pages;
+ se->start_block = start_block;
+ return 1;
+ } else {
+ lh = sis->first_swap_extent.list.prev; /* Highest extent */
se = list_entry(lh, struct swap_extent, list);
BUG_ON(se->start_page + se->nr_pages != start_page);
if (se->start_block + se->nr_pages == start_block) {
@@ -1373,7 +1358,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
new_se->nr_pages = nr_pages;
new_se->start_block = start_block;
- list_add_tail(&new_se->list, &sis->extent_list);
+ list_add_tail(&new_se->list, &sis->first_swap_extent.list);
return 1;
}
@@ -1425,7 +1410,7 @@ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
- goto done;
+ goto out;
}
blkbits = inode->i_blkbits;
@@ -1496,25 +1481,22 @@ reprobe:
sis->max = page_no;
sis->pages = page_no - 1;
sis->highest_bit = page_no - 1;
-done:
- sis->curr_swap_extent = list_entry(sis->extent_list.prev,
- struct swap_extent, list);
- goto out;
+out:
+ return ret;
bad_bmap:
printk(KERN_ERR "swapon: swapfile has holes\n");
ret = -EINVAL;
-out:
- return ret;
+ goto out;
}
SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
- struct swap_info_struct * p = NULL;
- unsigned short *swap_map;
+ struct swap_info_struct *p = NULL;
+ unsigned char *swap_map;
struct file *swap_file, *victim;
struct address_space *mapping;
struct inode *inode;
- char * pathname;
+ char *pathname;
int i, type, prev;
int err;
@@ -1535,8 +1517,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mapping = victim->f_mapping;
prev = -1;
spin_lock(&swap_lock);
- for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
- p = swap_info + type;
+ for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
+ p = swap_info[type];
if (p->flags & SWP_WRITEOK) {
if (p->swap_file->f_mapping == mapping)
break;
@@ -1555,18 +1537,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
goto out_dput;
}
- if (prev < 0) {
+ if (prev < 0)
swap_list.head = p->next;
- } else {
- swap_info[prev].next = p->next;
- }
+ else
+ swap_info[prev]->next = p->next;
if (type == swap_list.next) {
/* just pick something that's safe... */
swap_list.next = swap_list.head;
}
if (p->prio < 0) {
- for (i = p->next; i >= 0; i = swap_info[i].next)
- swap_info[i].prio = p->prio--;
+ for (i = p->next; i >= 0; i = swap_info[i]->next)
+ swap_info[i]->prio = p->prio--;
least_priority++;
}
nr_swap_pages -= p->pages;
@@ -1584,16 +1565,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
if (p->prio < 0)
p->prio = --least_priority;
prev = -1;
- for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
- if (p->prio >= swap_info[i].prio)
+ for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
+ if (p->prio >= swap_info[i]->prio)
break;
prev = i;
}
p->next = i;
if (prev < 0)
- swap_list.head = swap_list.next = p - swap_info;
+ swap_list.head = swap_list.next = type;
else
- swap_info[prev].next = p - swap_info;
+ swap_info[prev]->next = type;
nr_swap_pages += p->pages;
total_swap_pages += p->pages;
p->flags |= SWP_WRITEOK;
@@ -1606,6 +1587,9 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
up_write(&swap_unplug_sem);
destroy_swap_extents(p);
+ if (p->flags & SWP_CONTINUED)
+ free_swap_count_continuations(p);
+
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
drain_mmlist();
@@ -1653,8 +1637,8 @@ out:
/* iterator */
static void *swap_start(struct seq_file *swap, loff_t *pos)
{
- struct swap_info_struct *ptr = swap_info;
- int i;
+ struct swap_info_struct *si;
+ int type;
loff_t l = *pos;
mutex_lock(&swapon_mutex);
@@ -1662,11 +1646,13 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
if (!l)
return SEQ_START_TOKEN;
- for (i = 0; i < nr_swapfiles; i++, ptr++) {
- if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
+ for (type = 0; type < nr_swapfiles; type++) {
+ smp_rmb(); /* read nr_swapfiles before swap_info[type] */
+ si = swap_info[type];
+ if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
if (!--l)
- return ptr;
+ return si;
}
return NULL;
@@ -1674,21 +1660,21 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
{
- struct swap_info_struct *ptr;
- struct swap_info_struct *endptr = swap_info + nr_swapfiles;
+ struct swap_info_struct *si = v;
+ int type;
if (v == SEQ_START_TOKEN)
- ptr = swap_info;
- else {
- ptr = v;
- ptr++;
- }
+ type = 0;
+ else
+ type = si->type + 1;
- for (; ptr < endptr; ptr++) {
- if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
+ for (; type < nr_swapfiles; type++) {
+ smp_rmb(); /* read nr_swapfiles before swap_info[type] */
+ si = swap_info[type];
+ if (!(si->flags & SWP_USED) || !si->swap_map)
continue;
++*pos;
- return ptr;
+ return si;
}
return NULL;
@@ -1701,24 +1687,24 @@ static void swap_stop(struct seq_file *swap, void *v)
static int swap_show(struct seq_file *swap, void *v)
{
- struct swap_info_struct *ptr = v;
+ struct swap_info_struct *si = v;
struct file *file;
int len;
- if (ptr == SEQ_START_TOKEN) {
+ if (si == SEQ_START_TOKEN) {
seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
return 0;
}
- file = ptr->swap_file;
+ file = si->swap_file;
len = seq_path(swap, &file->f_path, " \t\n\\");
seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
len < 40 ? 40 - len : 1, " ",
S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
"partition" : "file\t",
- ptr->pages << (PAGE_SHIFT - 10),
- ptr->inuse_pages << (PAGE_SHIFT - 10),
- ptr->prio);
+ si->pages << (PAGE_SHIFT - 10),
+ si->inuse_pages << (PAGE_SHIFT - 10),
+ si->prio);
return 0;
}
@@ -1765,7 +1751,7 @@ late_initcall(max_swapfiles_check);
*/
SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
char *name = NULL;
struct block_device *bdev = NULL;
struct file *swap_file = NULL;
@@ -1779,30 +1765,52 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
sector_t span;
unsigned long maxpages = 1;
unsigned long swapfilepages;
- unsigned short *swap_map = NULL;
+ unsigned char *swap_map = NULL;
struct page *page = NULL;
struct inode *inode = NULL;
int did_down = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
spin_lock(&swap_lock);
- p = swap_info;
- for (type = 0 ; type < nr_swapfiles ; type++,p++)
- if (!(p->flags & SWP_USED))
+ for (type = 0; type < nr_swapfiles; type++) {
+ if (!(swap_info[type]->flags & SWP_USED))
break;
+ }
error = -EPERM;
if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
+ kfree(p);
goto out;
}
- if (type >= nr_swapfiles)
- nr_swapfiles = type+1;
- memset(p, 0, sizeof(*p));
- INIT_LIST_HEAD(&p->extent_list);
+ if (type >= nr_swapfiles) {
+ p->type = type;
+ swap_info[type] = p;
+ /*
+ * Write swap_info[type] before nr_swapfiles, in case a
+ * racing procfs swap_start() or swap_next() is reading them.
+ * (We never shrink nr_swapfiles, we never free this entry.)
+ */
+ smp_wmb();
+ nr_swapfiles++;
+ } else {
+ kfree(p);
+ p = swap_info[type];
+ /*
+ * Do not memset this entry: a racing procfs swap_next()
+ * would be relying on p->type to remain valid.
+ */
+ }
+ INIT_LIST_HEAD(&p->first_swap_extent.list);
p->flags = SWP_USED;
p->next = -1;
spin_unlock(&swap_lock);
+
name = getname(specialfile);
error = PTR_ERR(name);
if (IS_ERR(name)) {
@@ -1822,7 +1830,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -EBUSY;
for (i = 0; i < nr_swapfiles; i++) {
- struct swap_info_struct *q = &swap_info[i];
+ struct swap_info_struct *q = swap_info[i];
if (i == type || !q->swap_file)
continue;
@@ -1897,6 +1905,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
p->lowest_bit = 1;
p->cluster_next = 1;
+ p->cluster_nr = 0;
/*
* Find out how many pages are allowed for a single swap
@@ -1932,13 +1941,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap;
/* OK, set up the swap map and apply the bad block list */
- swap_map = vmalloc(maxpages * sizeof(short));
+ swap_map = vmalloc(maxpages);
if (!swap_map) {
error = -ENOMEM;
goto bad_swap;
}
- memset(swap_map, 0, maxpages * sizeof(short));
+ memset(swap_map, 0, maxpages);
for (i = 0; i < swap_header->info.nr_badpages; i++) {
int page_nr = swap_header->info.badpages[i];
if (page_nr <= 0 || page_nr >= swap_header->info.last_page) {
@@ -2003,18 +2012,16 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
/* insert swap space into swap_list: */
prev = -1;
- for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
- if (p->prio >= swap_info[i].prio) {
+ for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
+ if (p->prio >= swap_info[i]->prio)
break;
- }
prev = i;
}
p->next = i;
- if (prev < 0) {
- swap_list.head = swap_list.next = p - swap_info;
- } else {
- swap_info[prev].next = p - swap_info;
- }
+ if (prev < 0)
+ swap_list.head = swap_list.next = type;
+ else
+ swap_info[prev]->next = type;
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
error = 0;
@@ -2051,15 +2058,15 @@ out:
void si_swapinfo(struct sysinfo *val)
{
- unsigned int i;
+ unsigned int type;
unsigned long nr_to_be_unused = 0;
spin_lock(&swap_lock);
- for (i = 0; i < nr_swapfiles; i++) {
- if (!(swap_info[i].flags & SWP_USED) ||
- (swap_info[i].flags & SWP_WRITEOK))
- continue;
- nr_to_be_unused += swap_info[i].inuse_pages;
+ for (type = 0; type < nr_swapfiles; type++) {
+ struct swap_info_struct *si = swap_info[type];
+
+ if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
+ nr_to_be_unused += si->inuse_pages;
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
@@ -2069,101 +2076,107 @@ void si_swapinfo(struct sysinfo *val)
/*
* Verify that a swap entry is valid and increment its swap map count.
*
- * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
- * "permanent", but will be reclaimed by the next swapoff.
* Returns error code in following case.
* - success -> 0
* - swp_entry is invalid -> EINVAL
* - swp_entry is migration entry -> EINVAL
* - swap-cache reference is requested but there is already one. -> EEXIST
* - swap-cache reference is requested but the entry is not used. -> ENOENT
+ * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
*/
-static int __swap_duplicate(swp_entry_t entry, bool cache)
+static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
{
- struct swap_info_struct * p;
+ struct swap_info_struct *p;
unsigned long offset, type;
- int result = -EINVAL;
- int count;
- bool has_cache;
+ unsigned char count;
+ unsigned char has_cache;
+ int err = -EINVAL;
if (non_swap_entry(entry))
- return -EINVAL;
+ goto out;
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
- p = type + swap_info;
+ p = swap_info[type];
offset = swp_offset(entry);
spin_lock(&swap_lock);
-
if (unlikely(offset >= p->max))
goto unlock_out;
- count = swap_count(p->swap_map[offset]);
- has_cache = swap_has_cache(p->swap_map[offset]);
+ count = p->swap_map[offset];
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
+ err = 0;
- if (cache == SWAP_CACHE) { /* called for swapcache/swapin-readahead */
+ if (usage == SWAP_HAS_CACHE) {
/* set SWAP_HAS_CACHE if there is no cache and entry is used */
- if (!has_cache && count) {
- p->swap_map[offset] = encode_swapmap(count, true);
- result = 0;
- } else if (has_cache) /* someone added cache */
- result = -EEXIST;
- else if (!count) /* no users */
- result = -ENOENT;
+ if (!has_cache && count)
+ has_cache = SWAP_HAS_CACHE;
+ else if (has_cache) /* someone else added cache */
+ err = -EEXIST;
+ else /* no users remaining */
+ err = -ENOENT;
} else if (count || has_cache) {
- if (count < SWAP_MAP_MAX - 1) {
- p->swap_map[offset] = encode_swapmap(count + 1,
- has_cache);
- result = 0;
- } else if (count <= SWAP_MAP_MAX) {
- if (swap_overflow++ < 5)
- printk(KERN_WARNING
- "swap_dup: swap entry overflow\n");
- p->swap_map[offset] = encode_swapmap(SWAP_MAP_MAX,
- has_cache);
- result = 0;
- }
+
+ if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
+ count += usage;
+ else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
+ err = -EINVAL;
+ else if (swap_count_continued(p, offset, count))
+ count = COUNT_CONTINUED;
+ else
+ err = -ENOMEM;
} else
- result = -ENOENT; /* unused swap entry */
+ err = -ENOENT; /* unused swap entry */
+
+ p->swap_map[offset] = count | has_cache;
+
unlock_out:
spin_unlock(&swap_lock);
out:
- return result;
+ return err;
bad_file:
printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
goto out;
}
+
+/*
+ * Help swapoff by noting that swap entry belongs to shmem/tmpfs
+ * (in which case its reference count is never incremented).
+ */
+void swap_shmem_alloc(swp_entry_t entry)
+{
+ __swap_duplicate(entry, SWAP_MAP_SHMEM);
+}
+
/*
* increase reference count of swap entry by 1.
*/
-void swap_duplicate(swp_entry_t entry)
+int swap_duplicate(swp_entry_t entry)
{
- __swap_duplicate(entry, SWAP_MAP);
+ int err = 0;
+
+ while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
+ err = add_swap_count_continuation(entry, GFP_ATOMIC);
+ return err;
}
/*
* @entry: swap entry for which we allocate swap cache.
*
- * Called when allocating swap cache for exising swap entry,
+ * Called when allocating swap cache for existing swap entry,
* This can return error codes. Returns 0 at success.
* -EBUSY means there is a swap cache.
* Note: return code is different from swap_duplicate().
*/
int swapcache_prepare(swp_entry_t entry)
{
- return __swap_duplicate(entry, SWAP_CACHE);
-}
-
-
-struct swap_info_struct *
-get_swap_info_struct(unsigned type)
-{
- return &swap_info[type];
+ return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
/*
@@ -2181,7 +2194,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
if (!our_page_cluster) /* no readahead */
return 0;
- si = &swap_info[swp_type(entry)];
+ si = swap_info[swp_type(entry)];
target = swp_offset(entry);
base = (target >> our_page_cluster) << our_page_cluster;
end = base + (1 << our_page_cluster);
@@ -2217,3 +2230,219 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
*offset = ++toff;
return nr_pages? ++nr_pages: 0;
}
+
+/*
+ * add_swap_count_continuation - called when a swap count is duplicated
+ * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
+ * page of the original vmalloc'ed swap_map, to hold the continuation count
+ * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
+ * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
+ *
+ * These continuation pages are seldom referenced: the common paths all work
+ * on the original swap_map, only referring to a continuation page when the
+ * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
+ *
+ * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
+ * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
+ * can be called after dropping locks.
+ */
+int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+{
+ struct swap_info_struct *si;
+ struct page *head;
+ struct page *page;
+ struct page *list_page;
+ pgoff_t offset;
+ unsigned char count;
+
+ /*
+ * When debugging, it's easier to use __GFP_ZERO here; but it's better
+ * for latency not to zero a page while GFP_ATOMIC and holding locks.
+ */
+ page = alloc_page(gfp_mask | __GFP_HIGHMEM);
+
+ si = swap_info_get(entry);
+ if (!si) {
+ /*
+ * An acceptable race has occurred since the failing
+ * __swap_duplicate(): the swap entry has been freed,
+ * perhaps even the whole swap_map cleared for swapoff.
+ */
+ goto outer;
+ }
+
+ offset = swp_offset(entry);
+ count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
+
+ if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
+ /*
+ * The higher the swap count, the more likely it is that tasks
+ * will race to add swap count continuation: we need to avoid
+ * over-provisioning.
+ */
+ goto out;
+ }
+
+ if (!page) {
+ spin_unlock(&swap_lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * We are fortunate that although vmalloc_to_page uses pte_offset_map,
+ * no architecture is using highmem pages for kernel pagetables: so it
+ * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
+ */
+ head = vmalloc_to_page(si->swap_map + offset);
+ offset &= ~PAGE_MASK;
+
+ /*
+ * Page allocation does not initialize the page's lru field,
+ * but it does always reset its private field.
+ */
+ if (!page_private(head)) {
+ BUG_ON(count & COUNT_CONTINUED);
+ INIT_LIST_HEAD(&head->lru);
+ set_page_private(head, SWP_CONTINUED);
+ si->flags |= SWP_CONTINUED;
+ }
+
+ list_for_each_entry(list_page, &head->lru, lru) {
+ unsigned char *map;
+
+ /*
+ * If the previous map said no continuation, but we've found
+ * a continuation page, free our allocation and use this one.
+ */
+ if (!(count & COUNT_CONTINUED))
+ goto out;
+
+ map = kmap_atomic(list_page, KM_USER0) + offset;
+ count = *map;
+ kunmap_atomic(map, KM_USER0);
+
+ /*
+ * If this continuation count now has some space in it,
+ * free our allocation and use this one.
+ */
+ if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
+ goto out;
+ }
+
+ list_add_tail(&page->lru, &head->lru);
+ page = NULL; /* now it's attached, don't free it */
+out:
+ spin_unlock(&swap_lock);
+outer:
+ if (page)
+ __free_page(page);
+ return 0;
+}
+
+/*
+ * swap_count_continued - when the original swap_map count is incremented
+ * from SWAP_MAP_MAX, check if there is already a continuation page to carry
+ * into, carry if so, or else fail until a new continuation page is allocated;
+ * when the original swap_map count is decremented from 0 with continuation,
+ * borrow from the continuation and report whether it still holds more.
+ * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
+ */
+static bool swap_count_continued(struct swap_info_struct *si,
+ pgoff_t offset, unsigned char count)
+{
+ struct page *head;
+ struct page *page;
+ unsigned char *map;
+
+ head = vmalloc_to_page(si->swap_map + offset);
+ if (page_private(head) != SWP_CONTINUED) {
+ BUG_ON(count & COUNT_CONTINUED);
+ return false; /* need to add count continuation */
+ }
+
+ offset &= ~PAGE_MASK;
+ page = list_entry(head->lru.next, struct page, lru);
+ map = kmap_atomic(page, KM_USER0) + offset;
+
+ if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
+ goto init_map; /* jump over SWAP_CONT_MAX checks */
+
+ if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
+ /*
+ * Think of how you add 1 to 999
+ */
+ while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ BUG_ON(page == head);
+ map = kmap_atomic(page, KM_USER0) + offset;
+ }
+ if (*map == SWAP_CONT_MAX) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ if (page == head)
+ return false; /* add count continuation */
+ map = kmap_atomic(page, KM_USER0) + offset;
+init_map: *map = 0; /* we didn't zero the page */
+ }
+ *map += 1;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ while (page != head) {
+ map = kmap_atomic(page, KM_USER0) + offset;
+ *map = COUNT_CONTINUED;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ }
+ return true; /* incremented */
+
+ } else { /* decrementing */
+ /*
+ * Think of how you subtract 1 from 1000
+ */
+ BUG_ON(count != COUNT_CONTINUED);
+ while (*map == COUNT_CONTINUED) {
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.next, struct page, lru);
+ BUG_ON(page == head);
+ map = kmap_atomic(page, KM_USER0) + offset;
+ }
+ BUG_ON(*map == 0);
+ *map -= 1;
+ if (*map == 0)
+ count = 0;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ while (page != head) {
+ map = kmap_atomic(page, KM_USER0) + offset;
+ *map = SWAP_CONT_MAX | count;
+ count = COUNT_CONTINUED;
+ kunmap_atomic(map, KM_USER0);
+ page = list_entry(page->lru.prev, struct page, lru);
+ }
+ return count == COUNT_CONTINUED;
+ }
+}
+
+/*
+ * free_swap_count_continuations - swapoff free all the continuation pages
+ * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
+ */
+static void free_swap_count_continuations(struct swap_info_struct *si)
+{
+ pgoff_t offset;
+
+ for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
+ struct page *head;
+ head = vmalloc_to_page(si->swap_map + offset);
+ if (page_private(head)) {
+ struct list_head *this, *next;
+ list_for_each_safe(this, next, &head->lru) {
+ struct page *page;
+ page = list_entry(this, struct page, lru);
+ list_del(this);
+ __free_page(page);
+ }
+ }
+ }
+}
diff --git a/mm/truncate.c b/mm/truncate.c
index 2c147a7e5f2..342deee2268 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -272,6 +272,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
pagevec_release(&pvec);
break;
}
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -286,6 +287,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
unlock_page(page);
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
}
}
EXPORT_SYMBOL(truncate_inode_pages_range);
@@ -327,6 +329,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
pagevec_init(&pvec, 0);
while (next <= end &&
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t index;
@@ -354,6 +357,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
break;
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
cond_resched();
}
return ret;
@@ -428,6 +432,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
while (next <= end && !wrapped &&
pagevec_lookup(&pvec, mapping, next,
min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+ mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index;
@@ -477,6 +482,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
unlock_page(page);
}
pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
cond_resched();
}
return ret;
diff --git a/mm/util.c b/mm/util.c
index 7c35ad95f92..b377ce43080 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,10 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/sched.h>
+#include <linux/hugetlb.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
#include <asm/uaccess.h>
#define CREATE_TRACE_POINTS
@@ -268,6 +272,46 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
+SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, pgoff)
+{
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (unlikely(flags & MAP_HUGETLB))
+ return -EINVAL;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ } else if (flags & MAP_HUGETLB) {
+ struct user_struct *user = NULL;
+ /*
+ * VM_NORESERVE is used because the reservations will be
+ * taken when vm_ops->mmap() is called
+ * A dummy user value is used because we are not locking
+ * memory so no accounting is necessary
+ */
+ len = ALIGN(len, huge_page_size(&default_hstate));
+ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
+ &user, HUGETLB_ANONHUGE_INODE);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ }
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+ retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return retval;
+}
+
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0f551a4a44c..37e69295f25 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -761,7 +761,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
spin_lock(&vbq->lock);
list_add(&vb->free_list, &vbq->free);
spin_unlock(&vbq->lock);
- put_cpu_var(vmap_cpu_blocks);
+ put_cpu_var(vmap_block_queue);
return vb;
}
@@ -826,7 +826,7 @@ again:
}
spin_unlock(&vb->lock);
}
- put_cpu_var(vmap_cpu_blocks);
+ put_cpu_var(vmap_block_queue);
rcu_read_unlock();
if (!addr) {
@@ -1411,6 +1411,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
{
struct page **pages;
unsigned int nr_pages, array_size, i;
+ gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
@@ -1418,13 +1419,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
- pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
+ pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
PAGE_KERNEL, node, caller);
area->flags |= VM_VPAGES;
} else {
- pages = kmalloc_node(array_size,
- (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
- node);
+ pages = kmalloc_node(array_size, nested_gfp, node);
}
area->pages = pages;
area->caller = caller;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 777af57fd8c..885207a6b6b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -55,6 +55,11 @@ struct scan_control {
/* Number of pages freed so far during a call to shrink_zones() */
unsigned long nr_reclaimed;
+ /* How many pages shrink_list() should reclaim */
+ unsigned long nr_to_reclaim;
+
+ unsigned long hibernation_mode;
+
/* This context's GFP mask */
gfp_t gfp_mask;
@@ -66,12 +71,6 @@ struct scan_control {
/* Can pages be swapped as part of reclaim? */
int may_swap;
- /* This context's SWAP_CLUSTER_MAX. If freeing memory for
- * suspend, we effectively ignore SWAP_CLUSTER_MAX.
- * In this context, it doesn't matter that we scan the
- * whole list at once. */
- int swap_cluster_max;
-
int swappiness;
int all_unreclaimable;
@@ -358,7 +357,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* stalls if we need to run get_block(). We could test
* PagePrivate for that.
*
- * If this process is currently in generic_file_write() against
+ * If this process is currently in __generic_file_aio_write() against
* this page's queue, we can perform writeback even if that
* will block.
*
@@ -1132,7 +1131,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_anon;
unsigned long nr_file;
- nr_taken = sc->isolate_pages(sc->swap_cluster_max,
+ nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
&page_list, &nr_scan, sc->order, mode,
zone, sc->mem_cgroup, 0, file);
@@ -1166,10 +1165,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
__mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
__mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
- reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
- reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
- reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
- reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
+ reclaim_stat->recent_scanned[0] += nr_anon;
+ reclaim_stat->recent_scanned[1] += nr_file;
spin_unlock_irq(&zone->lru_lock);
@@ -1464,20 +1461,26 @@ static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
return low;
}
+static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
+ int file)
+{
+ if (file)
+ return inactive_file_is_low(zone, sc);
+ else
+ return inactive_anon_is_low(zone, sc);
+}
+
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct zone *zone, struct scan_control *sc, int priority)
{
int file = is_file_lru(lru);
- if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
- shrink_active_list(nr_to_scan, zone, sc, priority, file);
+ if (is_active_lru(lru)) {
+ if (inactive_list_is_low(zone, sc, file))
+ shrink_active_list(nr_to_scan, zone, sc, priority, file);
return 0;
}
- if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
- shrink_active_list(nr_to_scan, zone, sc, priority, file);
- return 0;
- }
return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
}
@@ -1567,15 +1570,14 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
* until we collected @swap_cluster_max pages to scan.
*/
static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
- unsigned long *nr_saved_scan,
- unsigned long swap_cluster_max)
+ unsigned long *nr_saved_scan)
{
unsigned long nr;
*nr_saved_scan += nr_to_scan;
nr = *nr_saved_scan;
- if (nr >= swap_cluster_max)
+ if (nr >= SWAP_CLUSTER_MAX)
*nr_saved_scan = 0;
else
nr = 0;
@@ -1594,7 +1596,7 @@ static void shrink_zone(int priority, struct zone *zone,
unsigned long percent[2]; /* anon @ 0; file @ 1 */
enum lru_list l;
unsigned long nr_reclaimed = sc->nr_reclaimed;
- unsigned long swap_cluster_max = sc->swap_cluster_max;
+ unsigned long nr_to_reclaim = sc->nr_to_reclaim;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
int noswap = 0;
@@ -1616,15 +1618,15 @@ static void shrink_zone(int priority, struct zone *zone,
scan = (scan * percent[file]) / 100;
}
nr[l] = nr_scan_try_batch(scan,
- &reclaim_stat->nr_saved_scan[l],
- swap_cluster_max);
+ &reclaim_stat->nr_saved_scan[l]);
}
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
for_each_evictable_lru(l) {
if (nr[l]) {
- nr_to_scan = min(nr[l], swap_cluster_max);
+ nr_to_scan = min_t(unsigned long,
+ nr[l], SWAP_CLUSTER_MAX);
nr[l] -= nr_to_scan;
nr_reclaimed += shrink_list(l, nr_to_scan,
@@ -1639,8 +1641,7 @@ static void shrink_zone(int priority, struct zone *zone,
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
- if (nr_reclaimed > swap_cluster_max &&
- priority < DEF_PRIORITY && !current_is_kswapd())
+ if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
break;
}
@@ -1738,6 +1739,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct zoneref *z;
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
+ unsigned long writeback_threshold;
delayacct_freepages_start();
@@ -1773,7 +1775,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
}
}
total_scanned += sc->nr_scanned;
- if (sc->nr_reclaimed >= sc->swap_cluster_max) {
+ if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
ret = sc->nr_reclaimed;
goto out;
}
@@ -1785,14 +1787,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
* that's undesirable in laptop mode, where we *want* lumpy
* writeout. So in laptop mode, write out the whole world.
*/
- if (total_scanned > sc->swap_cluster_max +
- sc->swap_cluster_max / 2) {
+ writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
+ if (total_scanned > writeback_threshold) {
wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
sc->may_writepage = 1;
}
/* Take a nap, wait for some writeback to complete */
- if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
+ if (!sc->hibernation_mode && sc->nr_scanned &&
+ priority < DEF_PRIORITY - 2)
congestion_wait(BLK_RW_ASYNC, HZ/10);
}
/* top priority shrink_zones still had more to do? don't OOM, then */
@@ -1831,7 +1834,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
struct scan_control sc = {
.gfp_mask = gfp_mask,
.may_writepage = !laptop_mode,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .nr_to_reclaim = SWAP_CLUSTER_MAX,
.may_unmap = 1,
.may_swap = 1,
.swappiness = vm_swappiness,
@@ -1855,7 +1858,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !noswap,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem,
@@ -1889,7 +1891,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !noswap,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .nr_to_reclaim = SWAP_CLUSTER_MAX,
.swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
@@ -1904,6 +1906,30 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
}
#endif
+/* is kswapd sleeping prematurely? */
+static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+{
+ int i;
+
+ /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
+ if (remaining)
+ return 1;
+
+ /* If after HZ/10, a zone is below the high mark, it's premature */
+ for (i = 0; i < pgdat->nr_zones; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
+ 0, 0))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at high_wmark_pages(zone).
@@ -1936,7 +1962,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
.may_swap = 1,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
+ /*
+ * kswapd doesn't want to be bailed out while reclaim. because
+ * we want to put equal scanning pressure on each zone.
+ */
+ .nr_to_reclaim = ULONG_MAX,
.swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
@@ -1961,6 +1991,7 @@ loop_again:
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long lru_pages = 0;
+ int has_under_min_watermark_zone = 0;
/* The swap token gets in the way of swapout... */
if (!priority)
@@ -2067,6 +2098,15 @@ loop_again:
if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
sc.may_writepage = 1;
+
+ /*
+ * We are still under min water mark. it mean we have
+ * GFP_ATOMIC allocation failure risk. Hurry up!
+ */
+ if (!zone_watermark_ok(zone, order, min_wmark_pages(zone),
+ end_zone, 0))
+ has_under_min_watermark_zone = 1;
+
}
if (all_zones_ok)
break; /* kswapd: all done */
@@ -2074,8 +2114,12 @@ loop_again:
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && priority < DEF_PRIORITY - 2)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+ if (has_under_min_watermark_zone)
+ count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
+ else
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+ }
/*
* We do this so kswapd doesn't build up large priorities for
@@ -2173,6 +2217,7 @@ static int kswapd(void *p)
order = 0;
for ( ; ; ) {
unsigned long new_order;
+ int ret;
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
new_order = pgdat->kswapd_max_order;
@@ -2184,19 +2229,45 @@ static int kswapd(void *p)
*/
order = new_order;
} else {
- if (!freezing(current))
- schedule();
+ if (!freezing(current) && !kthread_should_stop()) {
+ long remaining = 0;
+
+ /* Try to sleep for a short interval */
+ if (!sleeping_prematurely(pgdat, order, remaining)) {
+ remaining = schedule_timeout(HZ/10);
+ finish_wait(&pgdat->kswapd_wait, &wait);
+ prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+ }
+
+ /*
+ * After a short sleep, check if it was a
+ * premature sleep. If not, then go fully
+ * to sleep until explicitly woken up
+ */
+ if (!sleeping_prematurely(pgdat, order, remaining))
+ schedule();
+ else {
+ if (remaining)
+ count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+ else
+ count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
+ }
+ }
order = pgdat->kswapd_max_order;
}
finish_wait(&pgdat->kswapd_wait, &wait);
- if (!try_to_freeze()) {
- /* We can speed up thawing tasks if we don't call
- * balance_pgdat after returning from the refrigerator
- */
+ ret = try_to_freeze();
+ if (kthread_should_stop())
+ break;
+
+ /*
+ * We can speed up thawing tasks if we don't call balance_pgdat
+ * after returning from the refrigerator
+ */
+ if (!ret)
balance_pgdat(pgdat, order);
- }
}
return 0;
}
@@ -2260,148 +2331,43 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
#ifdef CONFIG_HIBERNATION
/*
- * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority.
- *
- * For pass > 3 we also try to shrink the LRU lists that contain a few pages
- */
-static void shrink_all_zones(unsigned long nr_pages, int prio,
- int pass, struct scan_control *sc)
-{
- struct zone *zone;
- unsigned long nr_reclaimed = 0;
- struct zone_reclaim_stat *reclaim_stat;
-
- for_each_populated_zone(zone) {
- enum lru_list l;
-
- if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
- continue;
-
- for_each_evictable_lru(l) {
- enum zone_stat_item ls = NR_LRU_BASE + l;
- unsigned long lru_pages = zone_page_state(zone, ls);
-
- /* For pass = 0, we don't shrink the active list */
- if (pass == 0 && (l == LRU_ACTIVE_ANON ||
- l == LRU_ACTIVE_FILE))
- continue;
-
- reclaim_stat = get_reclaim_stat(zone, sc);
- reclaim_stat->nr_saved_scan[l] +=
- (lru_pages >> prio) + 1;
- if (reclaim_stat->nr_saved_scan[l]
- >= nr_pages || pass > 3) {
- unsigned long nr_to_scan;
-
- reclaim_stat->nr_saved_scan[l] = 0;
- nr_to_scan = min(nr_pages, lru_pages);
- nr_reclaimed += shrink_list(l, nr_to_scan, zone,
- sc, prio);
- if (nr_reclaimed >= nr_pages) {
- sc->nr_reclaimed += nr_reclaimed;
- return;
- }
- }
- }
- }
- sc->nr_reclaimed += nr_reclaimed;
-}
-
-/*
- * Try to free `nr_pages' of memory, system-wide, and return the number of
+ * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
* freed pages.
*
* Rather than trying to age LRUs the aim is to preserve the overall
* LRU order by reclaiming preferentially
* inactive > active > active referenced > active mapped
*/
-unsigned long shrink_all_memory(unsigned long nr_pages)
+unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
{
- unsigned long lru_pages, nr_slab;
- int pass;
struct reclaim_state reclaim_state;
struct scan_control sc = {
- .gfp_mask = GFP_KERNEL,
- .may_unmap = 0,
+ .gfp_mask = GFP_HIGHUSER_MOVABLE,
+ .may_swap = 1,
+ .may_unmap = 1,
.may_writepage = 1,
+ .nr_to_reclaim = nr_to_reclaim,
+ .hibernation_mode = 1,
+ .swappiness = vm_swappiness,
+ .order = 0,
.isolate_pages = isolate_pages_global,
- .nr_reclaimed = 0,
};
+ struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
+ struct task_struct *p = current;
+ unsigned long nr_reclaimed;
- current->reclaim_state = &reclaim_state;
-
- lru_pages = global_reclaimable_pages();
- nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
- /* If slab caches are huge, it's better to hit them first */
- while (nr_slab >= lru_pages) {
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
- if (!reclaim_state.reclaimed_slab)
- break;
-
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- nr_slab -= reclaim_state.reclaimed_slab;
- }
-
- /*
- * We try to shrink LRUs in 5 passes:
- * 0 = Reclaim from inactive_list only
- * 1 = Reclaim from active list but don't reclaim mapped
- * 2 = 2nd pass of type 1
- * 3 = Reclaim mapped (normal reclaim)
- * 4 = 2nd pass of type 3
- */
- for (pass = 0; pass < 5; pass++) {
- int prio;
-
- /* Force reclaiming mapped pages in the passes #3 and #4 */
- if (pass > 2)
- sc.may_unmap = 1;
-
- for (prio = DEF_PRIORITY; prio >= 0; prio--) {
- unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
-
- sc.nr_scanned = 0;
- sc.swap_cluster_max = nr_to_scan;
- shrink_all_zones(nr_to_scan, prio, pass, &sc);
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(sc.nr_scanned, sc.gfp_mask,
- global_reclaimable_pages());
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- if (sc.nr_reclaimed >= nr_pages)
- goto out;
-
- if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
- congestion_wait(BLK_RW_ASYNC, HZ / 10);
- }
- }
-
- /*
- * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
- * something in slab caches
- */
- if (!sc.nr_reclaimed) {
- do {
- reclaim_state.reclaimed_slab = 0;
- shrink_slab(nr_pages, sc.gfp_mask,
- global_reclaimable_pages());
- sc.nr_reclaimed += reclaim_state.reclaimed_slab;
- } while (sc.nr_reclaimed < nr_pages &&
- reclaim_state.reclaimed_slab > 0);
- }
+ p->flags |= PF_MEMALLOC;
+ lockdep_set_current_reclaim_state(sc.gfp_mask);
+ reclaim_state.reclaimed_slab = 0;
+ p->reclaim_state = &reclaim_state;
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
-out:
- current->reclaim_state = NULL;
+ p->reclaim_state = NULL;
+ lockdep_clear_current_reclaim_state();
+ p->flags &= ~PF_MEMALLOC;
- return sc.nr_reclaimed;
+ return nr_reclaimed;
}
#endif /* CONFIG_HIBERNATION */
@@ -2451,6 +2417,17 @@ int kswapd_run(int nid)
return ret;
}
+/*
+ * Called by memory hotplug when all memory in a node is offlined.
+ */
+void kswapd_stop(int nid)
+{
+ struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+ if (kswapd)
+ kthread_stop(kswapd);
+}
+
static int __init kswapd_init(void)
{
int nid;
@@ -2553,8 +2530,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
.may_swap = 1,
- .swap_cluster_max = max_t(unsigned long, nr_pages,
- SWAP_CLUSTER_MAX),
+ .nr_to_reclaim = max_t(unsigned long, nr_pages,
+ SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
.order = order,
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c81321f9fee..6051fbab67b 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -683,6 +683,9 @@ static const char * const vmstat_text[] = {
"slabs_scanned",
"kswapd_steal",
"kswapd_inodesteal",
+ "kswapd_low_wmark_hit_quickly",
+ "kswapd_high_wmark_hit_quickly",
+ "kswapd_skip_congestion_wait",
"pageoutrun",
"allocstall",
@@ -883,11 +886,10 @@ static void vmstat_update(struct work_struct *w)
static void __cpuinit start_cpu_timer(int cpu)
{
- struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
+ struct delayed_work *work = &per_cpu(vmstat_work, cpu);
- INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
- schedule_delayed_work_on(cpu, vmstat_work,
- __round_jiffies_relative(HZ, cpu));
+ INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
+ schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
}
/*
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4dd873e3a1b..be1cb909d8c 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -42,6 +42,8 @@
#include <net/9p/client.h>
#include <net/9p/transport.h>
+#include <linux/syscalls.h> /* killme */
+
#define P9_PORT 564
#define MAX_SOCK_BUF (64*1024)
#define MAXPOLLWADDR 2
@@ -788,24 +790,41 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
static int p9_socket_open(struct p9_client *client, struct socket *csocket)
{
- int fd, ret;
+ struct p9_trans_fd *p;
+ int ret, fd;
+
+ p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
csocket->sk->sk_allocation = GFP_NOIO;
fd = sock_map_fd(csocket, 0);
if (fd < 0) {
P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
+ sock_release(csocket);
+ kfree(p);
return fd;
}
- ret = p9_fd_open(client, fd, fd);
- if (ret < 0) {
- P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
+ get_file(csocket->file);
+ get_file(csocket->file);
+ p->wr = p->rd = csocket->file;
+ client->trans = p;
+ client->status = Connected;
+
+ sys_close(fd); /* still racy */
+
+ p->rd->f_flags |= O_NONBLOCK;
+
+ p->conn = p9_conn_create(client);
+ if (IS_ERR(p->conn)) {
+ ret = PTR_ERR(p->conn);
+ p->conn = NULL;
+ kfree(p);
+ sockfd_put(csocket);
sockfd_put(csocket);
return ret;
}
-
- ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
-
return 0;
}
@@ -883,7 +902,6 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
struct socket *csocket;
struct sockaddr_in sin_server;
struct p9_fd_opts opts;
- struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
err = parse_opts(args, &opts);
if (err < 0)
@@ -897,12 +915,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
sin_server.sin_family = AF_INET;
sin_server.sin_addr.s_addr = in_aton(addr);
sin_server.sin_port = htons(opts.port);
- sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
+ err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
- if (!csocket) {
+ if (err) {
P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
- err = -EIO;
- goto error;
+ return err;
}
err = csocket->ops->connect(csocket,
@@ -912,30 +929,11 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
P9_EPRINTK(KERN_ERR,
"p9_trans_tcp: problem connecting socket to %s\n",
addr);
- goto error;
- }
-
- err = p9_socket_open(client, csocket);
- if (err < 0)
- goto error;
-
- p = (struct p9_trans_fd *) client->trans;
- p->conn = p9_conn_create(client);
- if (IS_ERR(p->conn)) {
- err = PTR_ERR(p->conn);
- p->conn = NULL;
- goto error;
- }
-
- return 0;
-
-error:
- if (csocket)
sock_release(csocket);
+ return err;
+ }
- kfree(p);
-
- return err;
+ return p9_socket_open(client, csocket);
}
static int
@@ -944,49 +942,33 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
int err;
struct socket *csocket;
struct sockaddr_un sun_server;
- struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
csocket = NULL;
if (strlen(addr) > UNIX_PATH_MAX) {
P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
addr);
- err = -ENAMETOOLONG;
- goto error;
+ return -ENAMETOOLONG;
}
sun_server.sun_family = PF_UNIX;
strcpy(sun_server.sun_path, addr);
- sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
+ err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
+ if (err < 0) {
+ P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
+ return err;
+ }
err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
sizeof(struct sockaddr_un) - 1, 0);
if (err < 0) {
P9_EPRINTK(KERN_ERR,
"p9_trans_unix: problem connecting socket: %s: %d\n",
addr, err);
- goto error;
- }
-
- err = p9_socket_open(client, csocket);
- if (err < 0)
- goto error;
-
- p = (struct p9_trans_fd *) client->trans;
- p->conn = p9_conn_create(client);
- if (IS_ERR(p->conn)) {
- err = PTR_ERR(p->conn);
- p->conn = NULL;
- goto error;
- }
-
- return 0;
-
-error:
- if (csocket)
sock_release(csocket);
+ return err;
+ }
- kfree(p);
- return err;
+ return p9_socket_open(client, csocket);
}
static int
@@ -994,7 +976,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
{
int err;
struct p9_fd_opts opts;
- struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
+ struct p9_trans_fd *p;
parse_opts(args, &opts);
@@ -1005,21 +987,19 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
err = p9_fd_open(client, opts.rfd, opts.wfd);
if (err < 0)
- goto error;
+ return err;
p = (struct p9_trans_fd *) client->trans;
p->conn = p9_conn_create(client);
if (IS_ERR(p->conn)) {
err = PTR_ERR(p->conn);
p->conn = NULL;
- goto error;
+ fput(p->rd);
+ fput(p->wr);
+ return err;
}
return 0;
-
-error:
- kfree(p);
- return err;
}
static struct p9_trans_module p9_tcp_trans = {
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 26a646d4eb3..c9230c39869 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -554,6 +554,12 @@ static const struct net_device_ops br2684_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static const struct net_device_ops br2684_netdev_ops_routed = {
+ .ndo_start_xmit = br2684_start_xmit,
+ .ndo_set_mac_address = br2684_mac_addr,
+ .ndo_change_mtu = eth_change_mtu
+};
+
static void br2684_setup(struct net_device *netdev)
{
struct br2684_dev *brdev = BRPRIV(netdev);
@@ -569,11 +575,10 @@ static void br2684_setup(struct net_device *netdev)
static void br2684_setup_routed(struct net_device *netdev)
{
struct br2684_dev *brdev = BRPRIV(netdev);
- brdev->net_dev = netdev;
+ brdev->net_dev = netdev;
netdev->hard_header_len = 0;
-
- netdev->netdev_ops = &br2684_netdev_ops;
+ netdev->netdev_ops = &br2684_netdev_ops_routed;
netdev->addr_len = 0;
netdev->mtu = 1500;
netdev->type = ARPHRD_PPP;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index b2d64456032..42749b7b917 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -62,7 +62,6 @@ static int lec_open(struct net_device *dev);
static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int lec_close(struct net_device *dev);
-static void lec_init(struct net_device *dev);
static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
const unsigned char *mac_addr);
static int lec_arp_remove(struct lec_priv *priv,
@@ -670,13 +669,6 @@ static const struct net_device_ops lec_netdev_ops = {
.ndo_set_multicast_list = lec_set_multicast_list,
};
-
-static void lec_init(struct net_device *dev)
-{
- dev->netdev_ops = &lec_netdev_ops;
- printk("%s: Initialized!\n", dev->name);
-}
-
static const unsigned char lec_ctrl_magic[] = {
0xff,
0x00,
@@ -893,6 +885,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
dev_lec[i] = alloc_etherdev(size);
if (!dev_lec[i])
return -ENOMEM;
+ dev_lec[i]->netdev_ops = &lec_netdev_ops;
snprintf(dev_lec[i]->name, IFNAMSIZ, "lec%d", i);
if (register_netdev(dev_lec[i])) {
free_netdev(dev_lec[i]);
@@ -901,7 +894,6 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
priv = netdev_priv(dev_lec[i]);
priv->is_trdev = is_trdev;
- lec_init(dev_lec[i]);
} else {
priv = netdev_priv(dev_lec[i]);
if (priv->lecd)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 569750010fd..18e7f5a43dc 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -770,7 +770,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid = hid_allocate_device();
if (IS_ERR(hid))
- return PTR_ERR(session->hid);
+ return PTR_ERR(hid);
session->hid = hid;
session->req = req;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 5129b88c8e5..1120cf14a54 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1212,6 +1212,7 @@ static void l2cap_monitor_timeout(unsigned long arg)
bh_lock_sock(sk);
if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
+ bh_unlock_sock(sk);
return;
}
@@ -3435,8 +3436,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
(pi->unacked_frames > 0))
__mod_retrans_timer();
- l2cap_ertm_send(sk);
pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ l2cap_ertm_send(sk);
}
break;
@@ -3471,9 +3472,9 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
if (rx_control & L2CAP_CTRL_POLL) {
- l2cap_retransmit_frame(sk, tx_seq);
pi->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(sk);
+ l2cap_retransmit_frame(sk, tx_seq);
l2cap_ertm_send(sk);
if (pi->conn_state & L2CAP_CONN_WAIT_F) {
pi->srej_save_reqseq = tx_seq;
diff --git a/net/compat.c b/net/compat.c
index e1a56ade803..a1fb1b079a8 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -754,26 +754,21 @@ asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
- struct timespec __user *timeout)
+ struct compat_timespec __user *timeout)
{
int datagrams;
struct timespec ktspec;
- struct compat_timespec __user *utspec;
if (timeout == NULL)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL);
- utspec = (struct compat_timespec __user *)timeout;
- if (get_user(ktspec.tv_sec, &utspec->tv_sec) ||
- get_user(ktspec.tv_nsec, &utspec->tv_nsec))
+ if (get_compat_timespec(&ktspec, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, &ktspec);
- if (datagrams > 0 &&
- (put_user(ktspec.tv_sec, &utspec->tv_sec) ||
- put_user(ktspec.tv_nsec, &utspec->tv_nsec)))
+ if (datagrams > 0 && put_compat_timespec(&ktspec, timeout))
datagrams = -EFAULT;
return datagrams;
diff --git a/net/core/dev.c b/net/core/dev.c
index c36a17aafcf..be9924f60ec 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4771,21 +4771,23 @@ static void net_set_todo(struct net_device *dev)
static void rollback_registered_many(struct list_head *head)
{
- struct net_device *dev;
+ struct net_device *dev, *tmp;
BUG_ON(dev_boot_phase);
ASSERT_RTNL();
- list_for_each_entry(dev, head, unreg_list) {
+ list_for_each_entry_safe(dev, tmp, head, unreg_list) {
/* Some devices call without registering
- * for initialization unwind.
+ * for initialization unwind. Remove those
+ * devices and proceed with the remaining.
*/
if (dev->reg_state == NETREG_UNINITIALIZED) {
pr_debug("unregister_netdevice: device %s/%p never "
"was registered\n", dev->name, dev);
WARN_ON(1);
- return;
+ list_del(&dev->unreg_list);
+ continue;
}
BUG_ON(dev->reg_state != NETREG_REGISTERED);
@@ -5033,6 +5035,11 @@ int register_netdevice(struct net_device *dev)
rollback_registered(dev);
dev->reg_state = NETREG_UNREGISTERED;
}
+ /*
+ * Prevent userspace races by waiting until the network
+ * device is fully setup before sending notifications.
+ */
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
out:
return ret;
@@ -5595,6 +5602,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Notify protocols, that a new device appeared. */
call_netdevice_notifiers(NETDEV_REGISTER, dev);
+ /*
+ * Prevent userspace races by waiting until the network
+ * device is fully setup before sending notifications.
+ */
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+
synchronize_net();
err = 0;
out:
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33148a56819..794bcb897ff 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1364,15 +1364,15 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_UNREGISTER:
rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
break;
- case NETDEV_REGISTER:
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
- break;
case NETDEV_UP:
case NETDEV_DOWN:
rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
break;
+ case NETDEV_POST_INIT:
+ case NETDEV_REGISTER:
case NETDEV_CHANGE:
case NETDEV_GOING_DOWN:
+ case NETDEV_UNREGISTER_BATCH:
break;
default:
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bfa3e7865a8..93c4e060c91 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -93,7 +93,7 @@ static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
/* Pipe buffer operations for a socket. */
-static struct pipe_buf_operations sock_pipe_buf_ops = {
+static const struct pipe_buf_operations sock_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index efbcfdc1279..dad7bc4878e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -408,7 +408,7 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
dccp_sync_mss(newsk, dst_mtu(dst));
- __inet_hash_nolisten(newsk);
+ __inet_hash_nolisten(newsk, NULL);
__inet_inherit_port(sk, newsk);
return newsk;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6574215a1f5..baf05cf43c2 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -46,7 +46,7 @@ static void dccp_v6_hash(struct sock *sk)
return;
}
local_bh_disable();
- __inet6_hash(sk);
+ __inet6_hash(sk, NULL);
local_bh_enable();
}
}
@@ -644,7 +644,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
- __inet6_hash(newsk);
+ __inet6_hash(newsk, NULL);
__inet_inherit_port(sk, newsk);
return newsk;
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index dc328425fa2..a1362dc8abb 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -43,7 +43,7 @@ static int bufsize = 64 * 1024;
static const char procname[] = "dccpprobe";
static struct {
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t lock;
wait_queue_head_t wait;
struct timespec tstart;
@@ -67,7 +67,7 @@ static void printl(const char *fmt, ...)
len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
va_end(args);
- kfifo_put(dccpw.fifo, tbuf, len);
+ kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
wake_up(&dccpw.wait);
}
@@ -109,7 +109,7 @@ static struct jprobe dccp_send_probe = {
static int dccpprobe_open(struct inode *inode, struct file *file)
{
- kfifo_reset(dccpw.fifo);
+ kfifo_reset(&dccpw.fifo);
getnstimeofday(&dccpw.tstart);
return 0;
}
@@ -131,11 +131,11 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf,
return -ENOMEM;
error = wait_event_interruptible(dccpw.wait,
- __kfifo_len(dccpw.fifo) != 0);
+ kfifo_len(&dccpw.fifo) != 0);
if (error)
goto out_free;
- cnt = kfifo_get(dccpw.fifo, tbuf, len);
+ cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
out_free:
@@ -156,10 +156,8 @@ static __init int dccpprobe_init(void)
init_waitqueue_head(&dccpw.wait);
spin_lock_init(&dccpw.lock);
- dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock);
- if (IS_ERR(dccpw.fifo))
- return PTR_ERR(dccpw.fifo);
-
+ if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
+ return ret;
if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
goto err0;
@@ -172,14 +170,14 @@ static __init int dccpprobe_init(void)
err1:
proc_net_remove(&init_net, procname);
err0:
- kfifo_free(dccpw.fifo);
+ kfifo_free(&dccpw.fifo);
return ret;
}
module_init(dccpprobe_init);
static __exit void dccpprobe_exit(void)
{
- kfifo_free(dccpw.fifo);
+ kfifo_free(&dccpw.fifo);
proc_net_remove(&init_net, procname);
unregister_jprobe(&dccp_send_probe);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 70491d9035e..0c94a1ac294 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -166,7 +166,7 @@ config IP_PNP_DHCP
If unsure, say Y. Note that if you want to use DHCP, a DHCP server
must be operating on your network. Read
- <file:Documentation/filesystems/nfsroot.txt> for details.
+ <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
config IP_PNP_BOOTP
bool "IP: BOOTP support"
@@ -181,7 +181,7 @@ config IP_PNP_BOOTP
does BOOTP itself, providing all necessary information on the kernel
command line, you can say N here. If unsure, say Y. Note that if you
want to use BOOTP, a BOOTP server must be operating on your network.
- Read <file:Documentation/filesystems/nfsroot.txt> for details.
+ Read <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
config IP_PNP_RARP
bool "IP: RARP support"
@@ -194,7 +194,7 @@ config IP_PNP_RARP
older protocol which is being obsoleted by BOOTP and DHCP), say Y
here. Note that if you want to use RARP, a RARP server must be
operating on your network. Read
- <file:Documentation/filesystems/nfsroot.txt> for details.
+ <file:Documentation/filesystems/nfs/nfsroot.txt> for details.
# not yet ready..
# bool ' IP: ARP support' CONFIG_IP_PNP_ARP
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 21e5e32d8c6..2b79377b468 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -351,12 +351,13 @@ static inline u32 inet_sk_port_offset(const struct sock *sk)
inet->inet_dport);
}
-void __inet_hash_nolisten(struct sock *sk)
+int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct hlist_nulls_head *list;
spinlock_t *lock;
struct inet_ehash_bucket *head;
+ int twrefcnt = 0;
WARN_ON(!sk_unhashed(sk));
@@ -367,8 +368,13 @@ void __inet_hash_nolisten(struct sock *sk)
spin_lock(lock);
__sk_nulls_add_node_rcu(sk, list);
+ if (tw) {
+ WARN_ON(sk->sk_hash != tw->tw_hash);
+ twrefcnt = inet_twsk_unhash(tw);
+ }
spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ return twrefcnt;
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
@@ -378,7 +384,7 @@ static void __inet_hash(struct sock *sk)
struct inet_listen_hashbucket *ilb;
if (sk->sk_state != TCP_LISTEN) {
- __inet_hash_nolisten(sk);
+ __inet_hash_nolisten(sk, NULL);
return;
}
@@ -427,7 +433,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **),
- void (*hash)(struct sock *sk))
+ int (*hash)(struct sock *sk, struct inet_timewait_sock *twp))
{
struct inet_hashinfo *hinfo = death_row->hashinfo;
const unsigned short snum = inet_sk(sk)->inet_num;
@@ -435,6 +441,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct inet_bind_bucket *tb;
int ret;
struct net *net = sock_net(sk);
+ int twrefcnt = 1;
if (!snum) {
int i, remaining, low, high, port;
@@ -493,13 +500,18 @@ ok:
inet_bind_hash(sk, tb, port);
if (sk_unhashed(sk)) {
inet_sk(sk)->inet_sport = htons(port);
- hash(sk);
+ twrefcnt += hash(sk, tw);
}
+ if (tw)
+ twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
spin_unlock(&head->lock);
if (tw) {
inet_twsk_deschedule(tw, death_row);
- inet_twsk_put(tw);
+ while (twrefcnt) {
+ twrefcnt--;
+ inet_twsk_put(tw);
+ }
}
ret = 0;
@@ -510,7 +522,7 @@ ok:
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
- hash(sk);
+ hash(sk, NULL);
spin_unlock_bh(&head->lock);
return 0;
} else {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 0fdf45e4c90..cc94cc2d8b2 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -15,9 +15,13 @@
#include <net/ip.h>
-/*
- * unhash a timewait socket from established hash
- * lock must be hold by caller
+/**
+ * inet_twsk_unhash - unhash a timewait socket from established hash
+ * @tw: timewait socket
+ *
+ * unhash a timewait socket from established hash, if hashed.
+ * ehash lock must be held by caller.
+ * Returns 1 if caller should call inet_twsk_put() after lock release.
*/
int inet_twsk_unhash(struct inet_timewait_sock *tw)
{
@@ -26,6 +30,37 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw)
hlist_nulls_del_rcu(&tw->tw_node);
sk_nulls_node_init(&tw->tw_node);
+ /*
+ * We cannot call inet_twsk_put() ourself under lock,
+ * caller must call it for us.
+ */
+ return 1;
+}
+
+/**
+ * inet_twsk_bind_unhash - unhash a timewait socket from bind hash
+ * @tw: timewait socket
+ * @hashinfo: hashinfo pointer
+ *
+ * unhash a timewait socket from bind hash, if hashed.
+ * bind hash lock must be held by caller.
+ * Returns 1 if caller should call inet_twsk_put() after lock release.
+ */
+int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+ struct inet_hashinfo *hashinfo)
+{
+ struct inet_bind_bucket *tb = tw->tw_tb;
+
+ if (!tb)
+ return 0;
+
+ __hlist_del(&tw->tw_bind_node);
+ tw->tw_tb = NULL;
+ inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
+ /*
+ * We cannot call inet_twsk_put() ourself under lock,
+ * caller must call it for us.
+ */
return 1;
}
@@ -34,7 +69,6 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo)
{
struct inet_bind_hashbucket *bhead;
- struct inet_bind_bucket *tb;
int refcnt;
/* Unlink from established hashes. */
spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
@@ -46,15 +80,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
/* Disassociate with bind bucket. */
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
hashinfo->bhash_size)];
+
spin_lock(&bhead->lock);
- tb = tw->tw_tb;
- if (tb) {
- __hlist_del(&tw->tw_bind_node);
- tw->tw_tb = NULL;
- inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
- refcnt++;
- }
+ refcnt += inet_twsk_bind_unhash(tw, hashinfo);
spin_unlock(&bhead->lock);
+
#ifdef SOCK_REFCNT_DEBUG
if (atomic_read(&tw->tw_refcnt) != 1) {
printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
@@ -126,7 +156,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
/*
* Notes :
- * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
+ * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
* - We add one reference for the bhash link
* - We add one reference for the ehash link
* - We want this refcnt update done before allowing other
@@ -136,7 +166,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_unlock(lock);
}
-
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
@@ -177,7 +206,6 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
return tw;
}
-
EXPORT_SYMBOL_GPL(inet_twsk_alloc);
/* Returns non-zero if quota exceeded. */
@@ -256,7 +284,6 @@ void inet_twdr_hangman(unsigned long data)
out:
spin_unlock(&twdr->death_lock);
}
-
EXPORT_SYMBOL_GPL(inet_twdr_hangman);
void inet_twdr_twkill_work(struct work_struct *work)
@@ -287,7 +314,6 @@ void inet_twdr_twkill_work(struct work_struct *work)
spin_unlock_bh(&twdr->death_lock);
}
}
-
EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
/* These are always called from BH context. See callers in
@@ -307,7 +333,6 @@ void inet_twsk_deschedule(struct inet_timewait_sock *tw,
spin_unlock(&twdr->death_lock);
__inet_twsk_kill(tw, twdr->hashinfo);
}
-
EXPORT_SYMBOL(inet_twsk_deschedule);
void inet_twsk_schedule(struct inet_timewait_sock *tw,
@@ -388,7 +413,6 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw,
mod_timer(&twdr->tw_timer, jiffies + twdr->period);
spin_unlock(&twdr->death_lock);
}
-
EXPORT_SYMBOL_GPL(inet_twsk_schedule);
void inet_twdr_twcal_tick(unsigned long data)
@@ -449,7 +473,6 @@ out:
#endif
spin_unlock(&twdr->death_lock);
}
-
EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 4e08b7f2331..10a6a604bf3 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1446,7 +1446,7 @@ late_initcall(ip_auto_config);
/*
* Decode any IP configuration options in the "ip=" or "nfsaddrs=" kernel
- * command line parameter. See Documentation/filesystems/nfsroot.txt.
+ * command line parameter. See Documentation/filesystems/nfs/nfsroot.txt.
*/
static int __init ic_proto_name(char *name)
{
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index fa2d6b6fc3e..331ead3ebd1 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -14,6 +14,7 @@
#include <net/route.h>
#include <net/ip.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
@@ -34,6 +35,20 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
return err;
}
+static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge &&
+ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+ return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
+#endif
+ if (hooknum == NF_INET_PRE_ROUTING)
+ return IP_DEFRAG_CONNTRACK_IN;
+ else
+ return IP_DEFRAG_CONNTRACK_OUT;
+}
+
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
@@ -50,10 +65,8 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
#endif
/* Gather fragments. */
if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
- if (nf_ct_ipv4_gather_frags(skb,
- hooknum == NF_INET_PRE_ROUTING ?
- IP_DEFRAG_CONNTRACK_IN :
- IP_DEFRAG_CONNTRACK_OUT))
+ enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
+ if (nf_ct_ipv4_gather_frags(skb, user))
return NF_STOLEN;
}
return NF_ACCEPT;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26399ad2a28..66fd80ef247 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -277,6 +277,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+ /* check for timestamp cookie support */
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+
+ if (tcp_opt.saw_tstamp)
+ cookie_check_timestamp(&tcp_opt);
+
ret = NULL;
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
if (!req)
@@ -292,6 +299,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
ireq->ecn_ok = 0;
+ ireq->snd_wscale = tcp_opt.snd_wscale;
+ ireq->rcv_wscale = tcp_opt.rcv_wscale;
+ ireq->sack_ok = tcp_opt.sack_ok;
+ ireq->wscale_ok = tcp_opt.wscale_ok;
+ ireq->tstamp_ok = tcp_opt.saw_tstamp;
+ req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
@@ -340,20 +353,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
}
}
- /* check for timestamp cookie support */
- memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
-
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
-
- ireq->snd_wscale = tcp_opt.snd_wscale;
- ireq->rcv_wscale = tcp_opt.rcv_wscale;
- ireq->sack_ok = tcp_opt.sack_ok;
- ireq->wscale_ok = tcp_opt.wscale_ok;
- ireq->tstamp_ok = tcp_opt.saw_tstamp;
- req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
-
/* Try to redo what tcp_v4_send_synack did. */
req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c8666b70cde..b0a26bb25e2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2540,11 +2540,6 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
ctd.tcpct_cookie_desired = cvp->cookie_desired;
ctd.tcpct_s_data_desired = cvp->s_data_desired;
- /* Cookie(s) saved, return as nonce */
- if (sizeof(ctd.tcpct_value) < cvp->cookie_pair_size) {
- /* impossible? */
- return -EINVAL;
- }
memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
cvp->cookie_pair_size);
ctd.tcpct_used = cvp->cookie_pair_size;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 57ae96a0422..28e02963249 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2717,6 +2717,35 @@ static void tcp_try_undo_dsack(struct sock *sk)
}
}
+/* We can clear retrans_stamp when there are no retransmissions in the
+ * window. It would seem that it is trivially available for us in
+ * tp->retrans_out, however, that kind of assumptions doesn't consider
+ * what will happen if errors occur when sending retransmission for the
+ * second time. ...It could the that such segment has only
+ * TCPCB_EVER_RETRANS set at the present time. It seems that checking
+ * the head skb is enough except for some reneging corner cases that
+ * are not worth the effort.
+ *
+ * Main reason for all this complexity is the fact that connection dying
+ * time now depends on the validity of the retrans_stamp, in particular,
+ * that successive retransmissions of a segment must not advance
+ * retrans_stamp under any conditions.
+ */
+static int tcp_any_retrans_done(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+
+ if (tp->retrans_out)
+ return 1;
+
+ skb = tcp_write_queue_head(sk);
+ if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
+ return 1;
+
+ return 0;
+}
+
/* Undo during fast recovery after partial ACK. */
static int tcp_try_undo_partial(struct sock *sk, int acked)
@@ -2729,7 +2758,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
/* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit.
*/
- if (tp->retrans_out == 0)
+ if (!tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
@@ -2788,7 +2817,7 @@ static void tcp_try_keep_open(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
int state = TCP_CA_Open;
- if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker)
+ if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
state = TCP_CA_Disorder;
if (inet_csk(sk)->icsk_ca_state != state) {
@@ -2803,7 +2832,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
tcp_verify_left_out(tp);
- if (!tp->frto_counter && tp->retrans_out == 0)
+ if (!tp->frto_counter && !tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
if (flag & FLAG_ECE)
@@ -3698,7 +3727,7 @@ old_ack:
* the fast version below fails.
*/
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
- u8 **hvpp, int estab, struct dst_entry *dst)
+ u8 **hvpp, int estab)
{
unsigned char *ptr;
struct tcphdr *th = tcp_hdr(skb);
@@ -3737,8 +3766,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
break;
case TCPOPT_WINDOW:
if (opsize == TCPOLEN_WINDOW && th->syn &&
- !estab && sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)) {
+ !estab && sysctl_tcp_window_scaling) {
__u8 snd_wscale = *(__u8 *)ptr;
opt_rx->wscale_ok = 1;
if (snd_wscale > 14) {
@@ -3754,8 +3782,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
case TCPOPT_TIMESTAMP:
if ((opsize == TCPOLEN_TIMESTAMP) &&
((estab && opt_rx->tstamp_ok) ||
- (!estab && sysctl_tcp_timestamps &&
- !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP)))) {
+ (!estab && sysctl_tcp_timestamps))) {
opt_rx->saw_tstamp = 1;
opt_rx->rcv_tsval = get_unaligned_be32(ptr);
opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
@@ -3763,8 +3790,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
break;
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM && th->syn &&
- !estab && sysctl_tcp_sack &&
- !dst_feature(dst, RTAX_FEATURE_NO_SACK)) {
+ !estab && sysctl_tcp_sack) {
opt_rx->sack_ok = 1;
tcp_sack_reset(opt_rx);
}
@@ -3849,7 +3875,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
if (tcp_parse_aligned_timestamp(tp, th))
return 1;
}
- tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
+ tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
return 1;
}
@@ -4104,10 +4130,8 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
- if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
- !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
int mib_idx;
if (before(seq, tp->rcv_nxt))
@@ -4136,15 +4160,13 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(sk);
- if (tcp_is_sack(tp) && sysctl_tcp_dsack &&
- !dst_feature(dst, RTAX_FEATURE_NO_DSACK)) {
+ if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
@@ -5399,11 +5421,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
u8 *hash_location;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
int saved_clamp = tp->rx_opt.mss_clamp;
- tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
if (th->ack) {
/* rfc793:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 29002ab26e0..65b8ebfd078 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1262,20 +1262,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
#endif
- ireq = inet_rsk(req);
- ireq->loc_addr = daddr;
- ireq->rmt_addr = saddr;
- ireq->no_srccheck = inet_sk(sk)->transparent;
- ireq->opt = tcp_v4_save_options(sk, skb);
-
- dst = inet_csk_route_req(sk, req);
- if(!dst)
- goto drop_and_free;
-
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1319,8 +1309,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb);
+ ireq = inet_rsk(req);
+ ireq->loc_addr = daddr;
+ ireq->rmt_addr = saddr;
+ ireq->no_srccheck = inet_sk(sk)->transparent;
+ ireq->opt = tcp_v4_save_options(sk, skb);
+
if (security_inet_conn_request(sk, skb, req))
- goto drop_and_release;
+ goto drop_and_free;
if (!want_cookie)
TCP_ECN_create_request(req, tcp_hdr(skb));
@@ -1345,6 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
+ (dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) {
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1464,7 +1461,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
#endif
- __inet_hash_nolisten(newsk);
+ __inet_hash_nolisten(newsk, NULL);
__inet_inherit_port(sk, newsk);
return newsk;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 87accec8d09..f206ee5dda8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -95,9 +95,9 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
int paws_reject = 0;
+ tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
- tmp_opt.tstamp_ok = 1;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -526,9 +526,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0;
- if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) {
- tmp_opt.tstamp_ok = 1;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
+ tmp_opt.saw_tstamp = 0;
+ if (th->doff > (sizeof(struct tcphdr)>>2)) {
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 93316a96d82..383ce237640 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -553,7 +553,6 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_md5sig_key **md5) {
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
- struct dst_entry *dst = __sk_dst_get(sk);
unsigned remaining = MAX_TCP_OPTION_SPACE;
u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
tcp_cookie_size_check(cvp->cookie_desired) :
@@ -581,22 +580,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
opts->mss = tcp_advertise_mss(sk);
remaining -= TCPOLEN_MSS_ALIGNED;
- if (likely(sysctl_tcp_timestamps &&
- !dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
- *md5 == NULL)) {
+ if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when;
opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
- if (likely(sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
+ if (likely(sysctl_tcp_window_scaling)) {
opts->ws = tp->rx_opt.rcv_wscale;
opts->options |= OPTION_WSCALE;
remaining -= TCPOLEN_WSCALE_ALIGNED;
}
- if (likely(sysctl_tcp_sack &&
- !dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
+ if (likely(sysctl_tcp_sack)) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!(OPTION_TS & opts->options)))
remaining -= TCPOLEN_SACKPERM_ALIGNED;
@@ -2527,9 +2522,7 @@ static void tcp_connect_init(struct sock *sk)
* See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
*/
tp->tcp_header_len = sizeof(struct tcphdr) +
- (sysctl_tcp_timestamps &&
- (!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) ?
- TCPOLEN_TSTAMP_ALIGNED : 0));
+ (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
#ifdef CONFIG_TCP_MD5SIG
if (tp->af_specific->md5_lookup(sk, sk) != NULL)
@@ -2555,8 +2548,7 @@ static void tcp_connect_init(struct sock *sk)
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
&tp->window_clamp,
- (sysctl_tcp_window_scaling &&
- !dst_feature(dst, RTAX_FEATURE_NO_WSCALE)),
+ sysctl_tcp_window_scaling,
&rcv_wscale);
tp->rx_opt.rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8353a538cd4..8816a20c259 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -132,6 +132,35 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
}
}
+/* This function calculates a "timeout" which is equivalent to the timeout of a
+ * TCP connection after "boundary" unsucessful, exponentially backed-off
+ * retransmissions with an initial RTO of TCP_RTO_MIN.
+ */
+static bool retransmits_timed_out(struct sock *sk,
+ unsigned int boundary)
+{
+ unsigned int timeout, linear_backoff_thresh;
+ unsigned int start_ts;
+
+ if (!inet_csk(sk)->icsk_retransmits)
+ return false;
+
+ if (unlikely(!tcp_sk(sk)->retrans_stamp))
+ start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
+ else
+ start_ts = tcp_sk(sk)->retrans_stamp;
+
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
+
+ if (boundary <= linear_backoff_thresh)
+ timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
+ else
+ timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
+ (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+
+ return (tcp_time_stamp - start_ts) >= timeout;
+}
+
/* A write timeout has occurred. Process the after effects. */
static int tcp_write_timeout(struct sock *sk)
{
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f9534846ca..f0126fdd7e0 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -216,9 +216,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
rand = (rand | 1) * (udptable->mask + 1);
- for (last = first + udptable->mask + 1;
- first != last;
- first++) {
+ last = first + udptable->mask + 1;
+ do {
hslot = udp_hashslot(udptable, net, first);
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
@@ -238,7 +237,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
- }
+ } while (++first != last);
goto fail;
} else {
hslot = udp_hashslot(udptable, net, snum);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index c813e294ec0..633a6c26613 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -22,9 +22,10 @@
#include <net/inet6_hashtables.h>
#include <net/ip.h>
-void __inet6_hash(struct sock *sk)
+int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+ int twrefcnt = 0;
WARN_ON(!sk_unhashed(sk));
@@ -45,10 +46,15 @@ void __inet6_hash(struct sock *sk)
lock = inet_ehash_lockp(hashinfo, hash);
spin_lock(lock);
__sk_nulls_add_node_rcu(sk, list);
+ if (tw) {
+ WARN_ON(sk->sk_hash != tw->tw_hash);
+ twrefcnt = inet_twsk_unhash(tw);
+ }
spin_unlock(lock);
}
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ return twrefcnt;
}
EXPORT_SYMBOL(__inet6_hash);
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 5f2ec208a8c..0956ebabbff 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -20,6 +20,7 @@
#include <net/ipv6.h>
#include <net/inet_frag.h>
+#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -187,6 +188,21 @@ out:
return nf_conntrack_confirm(skb);
}
+static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge &&
+ skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+ return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
+#endif
+ if (hooknum == NF_INET_PRE_ROUTING)
+ return IP6_DEFRAG_CONNTRACK_IN;
+ else
+ return IP6_DEFRAG_CONNTRACK_OUT;
+
+}
+
static unsigned int ipv6_defrag(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
@@ -199,8 +215,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
if (skb->nfct)
return NF_ACCEPT;
- reasm = nf_ct_frag6_gather(skb);
-
+ reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
/* queued */
if (reasm == NULL)
return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e0b9424fa1b..312c20adc83 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -168,13 +168,14 @@ out:
/* Creation primitives. */
static __inline__ struct nf_ct_frag6_queue *
-fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
+fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
unsigned int hash;
arg.id = id;
+ arg.user = user;
arg.src = src;
arg.dst = dst;
@@ -559,7 +560,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
return 0;
}
-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
{
struct sk_buff *clone;
struct net_device *dev = skb->dev;
@@ -605,7 +606,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
nf_ct_frag6_evictor();
- fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
+ fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr);
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
goto ret_orig;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 4d98549a686..2cddea3bd6b 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -72,6 +72,7 @@ struct frag_queue
struct inet_frag_queue q;
__be32 id; /* fragment id */
+ u32 user;
struct in6_addr saddr;
struct in6_addr daddr;
@@ -141,7 +142,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
struct ip6_create_arg *arg = a;
fq = container_of(q, struct frag_queue, q);
- return (fq->id == arg->id &&
+ return (fq->id == arg->id && fq->user == arg->user &&
ipv6_addr_equal(&fq->saddr, arg->src) &&
ipv6_addr_equal(&fq->daddr, arg->dst));
}
@@ -163,6 +164,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
struct ip6_create_arg *arg = a;
fq->id = arg->id;
+ fq->user = arg->user;
ipv6_addr_copy(&fq->saddr, arg->src);
ipv6_addr_copy(&fq->daddr, arg->dst);
}
@@ -243,6 +245,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
unsigned int hash;
arg.id = id;
+ arg.user = IP6_DEFRAG_LOCAL_DELIVER;
arg.src = src;
arg.dst = dst;
@@ -705,7 +708,8 @@ static void ip6_frags_ns_sysctl_unregister(struct net *net)
table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
- kfree(table);
+ if (!net_eq(net, &init_net))
+ kfree(table);
}
static struct ctl_table_header *ip6_ctl_header;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index db3b2730389..c2bd74c5f8d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2630,6 +2630,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
+ table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
}
return table;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5b9af508b8f..7208a06576c 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -185,6 +185,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
+ /* check for timestamp cookie support */
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+
+ if (tcp_opt.saw_tstamp)
+ cookie_check_timestamp(&tcp_opt);
+
ret = NULL;
req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
if (!req)
@@ -218,6 +225,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->expires = 0UL;
req->retrans = 0;
ireq->ecn_ok = 0;
+ ireq->snd_wscale = tcp_opt.snd_wscale;
+ ireq->rcv_wscale = tcp_opt.rcv_wscale;
+ ireq->sack_ok = tcp_opt.sack_ok;
+ ireq->wscale_ok = tcp_opt.wscale_ok;
+ ireq->tstamp_ok = tcp_opt.saw_tstamp;
+ req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
@@ -253,21 +266,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out_free;
}
- /* check for timestamp cookie support */
- memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
-
- if (tcp_opt.saw_tstamp)
- cookie_check_timestamp(&tcp_opt);
-
- req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
-
- ireq->snd_wscale = tcp_opt.snd_wscale;
- ireq->rcv_wscale = tcp_opt.rcv_wscale;
- ireq->sack_ok = tcp_opt.sack_ok;
- ireq->wscale_ok = tcp_opt.wscale_ok;
- ireq->tstamp_ok = tcp_opt.saw_tstamp;
-
req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index aadd7cef73b..febfd595a40 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -96,7 +96,7 @@ static void tcp_v6_hash(struct sock *sk)
return;
}
local_bh_disable();
- __inet6_hash(sk);
+ __inet6_hash(sk, NULL);
local_bh_enable();
}
}
@@ -1169,7 +1169,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct inet6_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
@@ -1208,7 +1207,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1496,7 +1495,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
#endif
- __inet6_hash(newsk);
+ __inet6_hash(newsk, NULL);
__inet_inherit_port(sk, newsk);
return newsk;
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index b001c361ad3..4300df35d37 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -249,6 +249,7 @@
#include <linux/poll.h>
#include <linux/capability.h>
#include <linux/ctype.h> /* isspace() */
+#include <linux/string.h> /* skip_spaces() */
#include <asm/uaccess.h>
#include <linux/init.h>
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 7dea882dbb7..156020d138b 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -76,9 +76,8 @@ irnet_ctrl_write(irnet_socket * ap,
/* Look at the next command */
start = next;
- /* Scrap whitespaces before the command */
- while(isspace(*start))
- start++;
+ /* Scrap whitespaces before the command */
+ start = skip_spaces(start);
/* ',' is our command separator */
next = strchr(start, ',');
@@ -133,8 +132,7 @@ irnet_ctrl_write(irnet_socket * ap,
char * endp;
/* Scrap whitespaces before the command */
- while(isspace(*begp))
- begp++;
+ begp = skip_spaces(begp);
/* Convert argument to a number (last arg is the base) */
addr = simple_strtoul(begp, &endp, 16);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 1e428863574..c18286a2167 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -221,7 +221,7 @@ static int afiucv_pm_restore_thaw(struct device *dev)
return 0;
}
-static struct dev_pm_ops afiucv_pm_ops = {
+static const struct dev_pm_ops afiucv_pm_ops = {
.prepare = afiucv_pm_prepare,
.complete = afiucv_pm_complete,
.freeze = afiucv_pm_freeze,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3b1f5f5f8de..fd8b28361a6 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -93,7 +93,7 @@ static int iucv_pm_freeze(struct device *);
static int iucv_pm_thaw(struct device *);
static int iucv_pm_restore(struct device *);
-static struct dev_pm_ops iucv_pm_ops = {
+static const struct dev_pm_ops iucv_pm_ops = {
.prepare = iucv_pm_prepare,
.complete = iucv_pm_complete,
.freeze = iucv_pm_freeze,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 84209fbbeb1..76fa6fef647 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1193,6 +1193,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->aalg->alg_key_len = key->sadb_key_bits;
memcpy(x->aalg->alg_key, key+1, keysize);
}
+ x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits;
x->props.aalgo = sa->sadb_sa_auth;
/* x->algo.flags = sa->sadb_sa_flags; */
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 93ee1fd5c08..6dc3579c0ac 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -354,7 +354,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->rx_packets = sta->rx_packets;
sinfo->tx_packets = sta->tx_packets;
- if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
+ if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
+ (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = (s8)sta->last_signal;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 419f186cfcf..91dc8636d64 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -746,6 +746,7 @@ struct ieee80211_local {
unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
bool pspolling;
+ bool scan_ps_enabled;
/*
* PS can only be enabled when we have exactly one managed
* interface (and monitors) in PS, this then points there.
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c0fe46493f7..6a433142959 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -427,7 +427,7 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
char *addr5, char *addr6)
{
int aelen = 0;
- memset(meshhdr, 0, sizeof(meshhdr));
+ memset(meshhdr, 0, sizeof(*meshhdr));
meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
sdata->u.mesh.mesh_seqnum++;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 31e10254186..85562c59d7d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -188,8 +188,9 @@ struct mesh_rmc {
*/
#define MESH_PREQ_MIN_INT 10
#define MESH_DIAM_TRAVERSAL_TIME 50
-/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their
- * expiration
+/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before
+ * timing out. This way it will remain ACTIVE and no data frames will be
+ * unnecesarily held in the pending queue.
*/
#define MESH_PATH_REFRESH_TIME 1000
#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 833b2f3670c..d28acb6b1f8 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -937,7 +937,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
if (mpath->flags & MESH_PATH_ACTIVE) {
if (time_after(jiffies,
- mpath->exp_time +
+ mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
!memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 6dc7b5ad9a4..d8d50fb5e82 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1083,8 +1083,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_set_wmm_default(sdata);
- ieee80211_recalc_idle(local);
-
/* channel(_type) changes are handled by ieee80211_hw_config */
local->oper_channel_type = NL80211_CHAN_NO_HT;
@@ -1370,6 +1368,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
if (!wk) {
ieee80211_set_disassoc(sdata, true);
+ ieee80211_recalc_idle(sdata->local);
} else {
list_del(&wk->list);
kfree(wk);
@@ -1403,6 +1402,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->dev->name, mgmt->sa, reason_code);
ieee80211_set_disassoc(sdata, false);
+ ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DISASSOC;
}
@@ -2117,6 +2117,7 @@ static void ieee80211_sta_work(struct work_struct *work)
" after %dms, disconnecting.\n",
bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
ieee80211_set_disassoc(sdata, true);
+ ieee80211_recalc_idle(local);
mutex_unlock(&ifmgd->mtx);
/*
* must be outside lock due to cfg80211,
@@ -2560,6 +2561,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
IEEE80211_STYPE_DEAUTH, req->reason_code,
cookie);
+ ieee80211_recalc_idle(sdata->local);
+
return 0;
}
@@ -2592,5 +2595,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
IEEE80211_STYPE_DISASSOC, req->reason_code,
cookie);
+
+ ieee80211_recalc_idle(sdata->local);
+
return 0;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f237df40837..9f2807aeaf5 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1712,7 +1712,6 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
mpp_path_add(proxied_addr, mpp_addr, sdata);
} else {
spin_lock_bh(&mppath->state_lock);
- mppath->exp_time = jiffies;
if (compare_ether_addr(mppath->mpp, mpp_addr) != 0)
memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
spin_unlock_bh(&mppath->state_lock);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 4cf387c944b..f1a4c716030 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -227,7 +227,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
- bool ps = false;
+
+ local->scan_ps_enabled = false;
/* FIXME: what to do when local->pspolling is true? */
@@ -235,12 +236,13 @@ static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
cancel_work_sync(&local->dynamic_ps_enable_work);
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- ps = true;
+ local->scan_ps_enabled = true;
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ if (!(local->scan_ps_enabled) ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
@@ -261,7 +263,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
if (!local->ps_sdata)
ieee80211_send_nullfunc(local, sdata, 0);
- else {
+ else if (local->scan_ps_enabled) {
/*
* In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
* will send a nullfunc frame with the powersave bit set
@@ -277,6 +279,16 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
*/
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ } else if (local->hw.conf.dynamic_ps_timeout > 0) {
+ /*
+ * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
+ * had been running before leaving the operating channel,
+ * restart the timer now and send a nullfunc frame to inform
+ * the AP that we are awake.
+ */
+ ieee80211_send_nullfunc(local, sdata, 0);
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d09f78bb244..78a6e924c7e 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -579,7 +579,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
if (elen > left)
break;
- if (calc_crc && id < 64 && (filter & BIT(id)))
+ if (calc_crc && id < 64 && (filter & (1ULL << id)))
crc = crc32_be(crc, pos - 2, elen + 2);
switch (id) {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b95699f0054..847ffca4018 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1366,6 +1366,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
== sysctl_ip_vs_sync_threshold[0])) ||
((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
+ (cp->state == IP_VS_TCP_S_CLOSE) ||
(cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
(cp->state == IP_VS_TCP_S_TIME_WAIT)))))
ip_vs_sync_conn(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e55a6861d26..6bde12da2fe 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2714,6 +2714,8 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
return -EINVAL;
+ memset(usvc, 0, sizeof(*usvc));
+
usvc->af = nla_get_u16(nla_af);
#ifdef CONFIG_IP_VS_IPV6
if (usvc->af != AF_INET && usvc->af != AF_INET6)
@@ -2901,6 +2903,8 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
if (!(nla_addr && nla_port))
return -EINVAL;
+ memset(udest, 0, sizeof(*udest));
+
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_u16(nla_port);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index eb0ceb84652..fc70a49c0af 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -482,8 +482,7 @@ static ssize_t recent_old_proc_write(struct file *file,
if (copy_from_user(buf, input, size))
return -EFAULT;
- while (isspace(*c))
- c++;
+ c = skip_spaces(c);
if (size - (c - buf) < 5)
return c - buf;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 020562164b5..e0516a22be2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -415,7 +415,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct net_device *dev;
__be16 proto = 0;
int err;
@@ -437,6 +437,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
*/
saddr->spkt_device[13] = 0;
+retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
@@ -456,58 +457,48 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
if (len > dev->mtu + dev->hard_header_len)
goto out_unlock;
- err = -ENOBUFS;
- skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
-
- /*
- * If the write buffer is full, then tough. At this level the user
- * gets to deal with the problem - do your own algorithmic backoffs.
- * That's far more flexible.
- */
-
- if (skb == NULL)
- goto out_unlock;
-
- /*
- * Fill it in
- */
-
- /* FIXME: Save some space for broken drivers that write a
- * hard header at transmission time by themselves. PPP is the
- * notable one here. This should really be fixed at the driver level.
- */
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
- skb_reset_network_header(skb);
-
- /* Try to align data part correctly */
- if (dev->header_ops) {
- skb->data -= dev->hard_header_len;
- skb->tail -= dev->hard_header_len;
- if (len < dev->hard_header_len)
- skb_reset_network_header(skb);
+ if (!skb) {
+ size_t reserved = LL_RESERVED_SPACE(dev);
+ unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
+
+ rcu_read_unlock();
+ skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
+ if (skb == NULL)
+ return -ENOBUFS;
+ /* FIXME: Save some space for broken drivers that write a hard
+ * header at transmission time by themselves. PPP is the notable
+ * one here. This should really be fixed at the driver level.
+ */
+ skb_reserve(skb, reserved);
+ skb_reset_network_header(skb);
+
+ /* Try to align data part correctly */
+ if (hhlen) {
+ skb->data -= hhlen;
+ skb->tail -= hhlen;
+ if (len < hhlen)
+ skb_reset_network_header(skb);
+ }
+ err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ if (err)
+ goto out_free;
+ goto retry;
}
- /* Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- if (err)
- goto out_free;
-
- /*
- * Now send it
- */
dev_queue_xmit(skb);
rcu_read_unlock();
return len;
-out_free:
- kfree_skb(skb);
out_unlock:
rcu_read_unlock();
+out_free:
+ kfree_skb(skb);
return err;
}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 536ebe5d3f6..3b899236104 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -182,8 +182,8 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
ic = conn->c_transport_data;
dev_addr = &ic->i_cm_id->route.addr.dev_addr;
- ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
- ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
+ rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
+ rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index db224f7c293..b28fa8525b2 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -184,8 +184,8 @@ static int rds_iw_conn_info_visitor(struct rds_connection *conn,
ic = conn->c_transport_data;
dev_addr = &ic->i_cm_id->route.addr.dev_addr;
- ib_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
- ib_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
+ rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
+ rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 448e5a0fcc2..c218e07e5ca 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -579,6 +579,8 @@ static ssize_t rfkill_name_show(struct device *dev,
static const char *rfkill_get_type_str(enum rfkill_type type)
{
+ BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
+
switch (type) {
case RFKILL_TYPE_WLAN:
return "wlan";
@@ -597,8 +599,6 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
default:
BUG();
}
-
- BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
}
static ssize_t rfkill_type_show(struct device *dev,
diff --git a/net/socket.c b/net/socket.c
index b94c3dd7101..769c386bd42 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -312,18 +312,6 @@ static struct file_system_type sock_fs_type = {
.kill_sb = kill_anon_super,
};
-static int sockfs_delete_dentry(struct dentry *dentry)
-{
- /*
- * At creation time, we pretended this dentry was hashed
- * (by clearing DCACHE_UNHASHED bit in d_flags)
- * At delete time, we restore the truth : not hashed.
- * (so that dput() can proceed correctly)
- */
- dentry->d_flags |= DCACHE_UNHASHED;
- return 0;
-}
-
/*
* sockfs_dname() is called from d_path().
*/
@@ -334,7 +322,6 @@ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
}
static const struct dentry_operations sockfs_dentry_operations = {
- .d_delete = sockfs_delete_dentry,
.d_dname = sockfs_dname,
};
@@ -355,68 +342,55 @@ static const struct dentry_operations sockfs_dentry_operations = {
* but we take care of internal coherence yet.
*/
-static int sock_alloc_fd(struct file **filep, int flags)
+static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
{
+ struct qstr name = { .name = "" };
+ struct path path;
+ struct file *file;
int fd;
fd = get_unused_fd_flags(flags);
- if (likely(fd >= 0)) {
- struct file *file = get_empty_filp();
-
- *filep = file;
- if (unlikely(!file)) {
- put_unused_fd(fd);
- return -ENFILE;
- }
- } else
- *filep = NULL;
- return fd;
-}
-
-static int sock_attach_fd(struct socket *sock, struct file *file, int flags)
-{
- struct dentry *dentry;
- struct qstr name = { .name = "" };
+ if (unlikely(fd < 0))
+ return fd;
- dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
- if (unlikely(!dentry))
+ path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
+ if (unlikely(!path.dentry)) {
+ put_unused_fd(fd);
return -ENOMEM;
+ }
+ path.mnt = mntget(sock_mnt);
- dentry->d_op = &sockfs_dentry_operations;
- /*
- * We dont want to push this dentry into global dentry hash table.
- * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
- * This permits a working /proc/$pid/fd/XXX on sockets
- */
- dentry->d_flags &= ~DCACHE_UNHASHED;
- d_instantiate(dentry, SOCK_INODE(sock));
+ path.dentry->d_op = &sockfs_dentry_operations;
+ d_instantiate(path.dentry, SOCK_INODE(sock));
+ SOCK_INODE(sock)->i_fop = &socket_file_ops;
- sock->file = file;
- init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE,
+ file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
&socket_file_ops);
- SOCK_INODE(sock)->i_fop = &socket_file_ops;
+ if (unlikely(!file)) {
+ /* drop dentry, keep inode */
+ atomic_inc(&path.dentry->d_inode->i_count);
+ path_put(&path);
+ put_unused_fd(fd);
+ return -ENFILE;
+ }
+
+ sock->file = file;
file->f_flags = O_RDWR | (flags & O_NONBLOCK);
file->f_pos = 0;
file->private_data = sock;
- return 0;
+ *f = file;
+ return fd;
}
int sock_map_fd(struct socket *sock, int flags)
{
struct file *newfile;
- int fd = sock_alloc_fd(&newfile, flags);
+ int fd = sock_alloc_file(sock, &newfile, flags);
- if (likely(fd >= 0)) {
- int err = sock_attach_fd(sock, newfile, flags);
-
- if (unlikely(err < 0)) {
- put_filp(newfile);
- put_unused_fd(fd);
- return err;
- }
+ if (likely(fd >= 0))
fd_install(fd, newfile);
- }
+
return fd;
}
@@ -1390,29 +1364,19 @@ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
if (err < 0)
goto out_release_both;
- fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC);
+ fd1 = sock_alloc_file(sock1, &newfile1, flags);
if (unlikely(fd1 < 0)) {
err = fd1;
goto out_release_both;
}
- fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC);
+ fd2 = sock_alloc_file(sock2, &newfile2, flags);
if (unlikely(fd2 < 0)) {
err = fd2;
- put_filp(newfile1);
- put_unused_fd(fd1);
- goto out_release_both;
- }
-
- err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK);
- if (unlikely(err < 0)) {
- goto out_fd2;
- }
-
- err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK);
- if (unlikely(err < 0)) {
fput(newfile1);
- goto out_fd1;
+ put_unused_fd(fd1);
+ sock_release(sock2);
+ goto out;
}
audit_fd_pair(fd1, fd2);
@@ -1438,16 +1402,6 @@ out_release_1:
sock_release(sock1);
out:
return err;
-
-out_fd2:
- put_filp(newfile1);
- sock_release(sock1);
-out_fd1:
- put_filp(newfile2);
- sock_release(sock2);
- put_unused_fd(fd1);
- put_unused_fd(fd2);
- goto out;
}
/*
@@ -1551,17 +1505,13 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
*/
__module_get(newsock->ops->owner);
- newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC);
+ newfd = sock_alloc_file(newsock, &newfile, flags);
if (unlikely(newfd < 0)) {
err = newfd;
sock_release(newsock);
goto out_put;
}
- err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK);
- if (err < 0)
- goto out_fd_simple;
-
err = security_socket_accept(sock, newsock);
if (err)
goto out_fd;
@@ -1591,11 +1541,6 @@ out_put:
fput_light(sock->file, fput_needed);
out:
return err;
-out_fd_simple:
- sock_release(newsock);
- put_filp(newfile);
- put_unused_fd(newfd);
- goto out_put;
out_fd:
fput(newfile);
put_unused_fd(newfd);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index c7450c8f0a7..6dcdd251781 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -55,16 +55,8 @@ static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
/*
* RFC 4291, Section 2.2.1
- *
- * To keep the result as short as possible, especially
- * since we don't shorthand, we don't want leading zeros
- * in each halfword, so avoid %pI6.
*/
- return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x",
- ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]),
- ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]),
- ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]),
- ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7]));
+ return snprintf(buf, buflen, "%pI6c", addr);
}
static size_t rpc_ntop6(const struct sockaddr *sap,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 7535a7bed2f..f394fc190a4 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -123,16 +123,19 @@ rpcauth_unhash_cred_locked(struct rpc_cred *cred)
clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
}
-static void
+static int
rpcauth_unhash_cred(struct rpc_cred *cred)
{
spinlock_t *cache_lock;
+ int ret;
cache_lock = &cred->cr_auth->au_credcache->lock;
spin_lock(cache_lock);
- if (atomic_read(&cred->cr_count) == 0)
+ ret = atomic_read(&cred->cr_count) == 0;
+ if (ret)
rpcauth_unhash_cred_locked(cred);
spin_unlock(cache_lock);
+ return ret;
}
/*
@@ -446,31 +449,35 @@ void
put_rpccred(struct rpc_cred *cred)
{
/* Fast path for unhashed credentials */
- if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0)
- goto need_lock;
-
- if (!atomic_dec_and_test(&cred->cr_count))
+ if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
+ if (atomic_dec_and_test(&cred->cr_count))
+ cred->cr_ops->crdestroy(cred);
return;
- goto out_destroy;
-need_lock:
+ }
+
if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock))
return;
if (!list_empty(&cred->cr_lru)) {
number_cred_unused--;
list_del_init(&cred->cr_lru);
}
- if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
- rpcauth_unhash_cred(cred);
if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
- cred->cr_expire = jiffies;
- list_add_tail(&cred->cr_lru, &cred_unused);
- number_cred_unused++;
- spin_unlock(&rpc_credcache_lock);
- return;
+ if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
+ cred->cr_expire = jiffies;
+ list_add_tail(&cred->cr_lru, &cred_unused);
+ number_cred_unused++;
+ goto out_nodestroy;
+ }
+ if (!rpcauth_unhash_cred(cred)) {
+ /* We were hashed and someone looked us up... */
+ goto out_nodestroy;
+ }
}
spin_unlock(&rpc_credcache_lock);
-out_destroy:
cred->cr_ops->crdestroy(cred);
+ return;
+out_nodestroy:
+ spin_unlock(&rpc_credcache_lock);
}
EXPORT_SYMBOL_GPL(put_rpccred);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index fc6a43ccd95..3c3c50f38a1 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -304,7 +304,7 @@ __gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
* to that upcall instead of adding the new upcall.
*/
static inline struct gss_upcall_msg *
-gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
+gss_add_msg(struct gss_upcall_msg *gss_msg)
{
struct rpc_inode *rpci = gss_msg->inode;
struct inode *inode = &rpci->vfs_inode;
@@ -445,7 +445,7 @@ gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cr
gss_new = gss_alloc_msg(gss_auth, uid, clnt, gss_cred->gc_machine_cred);
if (IS_ERR(gss_new))
return gss_new;
- gss_msg = gss_add_msg(gss_auth, gss_new);
+ gss_msg = gss_add_msg(gss_new);
if (gss_msg == gss_new) {
struct inode *inode = &gss_new->inode->vfs_inode;
int res = rpc_queue_upcall(inode, &gss_new->msg);
@@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task)
dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
cred->cr_uid);
gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
- if (IS_ERR(gss_msg) == -EAGAIN) {
+ if (PTR_ERR(gss_msg) == -EAGAIN) {
/* XXX: warning on the first, under the assumption we
* shouldn't normally hit this case on a refresh. */
warn_gssd();
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 38829e20500..154034b675b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -79,7 +79,7 @@ static void call_connect_status(struct rpc_task *task);
static __be32 *rpc_encode_header(struct rpc_task *task);
static __be32 *rpc_verify_header(struct rpc_task *task);
-static int rpc_ping(struct rpc_clnt *clnt, int flags);
+static int rpc_ping(struct rpc_clnt *clnt);
static void rpc_register_client(struct rpc_clnt *clnt)
{
@@ -340,7 +340,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
return clnt;
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
- int err = rpc_ping(clnt, RPC_TASK_SOFT);
+ int err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
@@ -528,7 +528,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
clnt->cl_prog = program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
- err = rpc_ping(clnt, RPC_TASK_SOFT);
+ err = rpc_ping(clnt);
if (err != 0) {
rpc_shutdown_client(clnt);
clnt = ERR_PTR(err);
@@ -1060,7 +1060,7 @@ call_bind_status(struct rpc_task *task)
goto retry_timeout;
case -EPFNOSUPPORT:
/* server doesn't support any rpcbind version we know of */
- dprintk("RPC: %5u remote rpcbind service unavailable\n",
+ dprintk("RPC: %5u unrecognized remote rpcbind service\n",
task->tk_pid);
break;
case -EPROTONOSUPPORT:
@@ -1069,6 +1069,21 @@ call_bind_status(struct rpc_task *task)
task->tk_status = 0;
task->tk_action = call_bind;
return;
+ case -ECONNREFUSED: /* connection problems */
+ case -ECONNRESET:
+ case -ENOTCONN:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -EPIPE:
+ dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
+ task->tk_pid, task->tk_status);
+ if (!RPC_IS_SOFTCONN(task)) {
+ rpc_delay(task, 5*HZ);
+ goto retry_timeout;
+ }
+ status = task->tk_status;
+ break;
default:
dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
task->tk_pid, -task->tk_status);
@@ -1180,11 +1195,25 @@ static void
call_transmit_status(struct rpc_task *task)
{
task->tk_action = call_status;
+
+ /*
+ * Common case: success. Force the compiler to put this
+ * test first.
+ */
+ if (task->tk_status == 0) {
+ xprt_end_transmit(task);
+ rpc_task_force_reencode(task);
+ return;
+ }
+
switch (task->tk_status) {
case -EAGAIN:
break;
default:
+ dprint_status(task);
xprt_end_transmit(task);
+ rpc_task_force_reencode(task);
+ break;
/*
* Special cases: if we've been waiting on the
* socket's write_space() callback, or if the
@@ -1192,11 +1221,16 @@ call_transmit_status(struct rpc_task *task)
* then hold onto the transport lock.
*/
case -ECONNREFUSED:
- case -ECONNRESET:
- case -ENOTCONN:
case -EHOSTDOWN:
case -EHOSTUNREACH:
case -ENETUNREACH:
+ if (RPC_IS_SOFTCONN(task)) {
+ xprt_end_transmit(task);
+ rpc_exit(task, task->tk_status);
+ break;
+ }
+ case -ECONNRESET:
+ case -ENOTCONN:
case -EPIPE:
rpc_task_force_reencode(task);
}
@@ -1346,6 +1380,10 @@ call_timeout(struct rpc_task *task)
dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
task->tk_timeouts++;
+ if (RPC_IS_SOFTCONN(task)) {
+ rpc_exit(task, -ETIMEDOUT);
+ return;
+ }
if (RPC_IS_SOFT(task)) {
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
@@ -1675,14 +1713,14 @@ static struct rpc_procinfo rpcproc_null = {
.p_decode = rpcproc_decode_null,
};
-static int rpc_ping(struct rpc_clnt *clnt, int flags)
+static int rpc_ping(struct rpc_clnt *clnt)
{
struct rpc_message msg = {
.rpc_proc = &rpcproc_null,
};
int err;
msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
- err = rpc_call_sync(clnt, &msg, flags);
+ err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN);
put_rpccred(msg.rpc_cred);
return err;
}
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 830faf4d999..3e3772d8eb9 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -20,6 +20,7 @@
#include <linux/in6.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/mutex.h>
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
@@ -110,6 +111,9 @@ static void rpcb_getport_done(struct rpc_task *, void *);
static void rpcb_map_release(void *data);
static struct rpc_program rpcb_program;
+static struct rpc_clnt * rpcb_local_clnt;
+static struct rpc_clnt * rpcb_local_clnt4;
+
struct rpcbind_args {
struct rpc_xprt * r_xprt;
@@ -163,21 +167,60 @@ static const struct sockaddr_in rpcb_inaddr_loopback = {
.sin_port = htons(RPCBIND_PORT),
};
-static struct rpc_clnt *rpcb_create_local(struct sockaddr *addr,
- size_t addrlen, u32 version)
+static DEFINE_MUTEX(rpcb_create_local_mutex);
+
+/*
+ * Returns zero on success, otherwise a negative errno value
+ * is returned.
+ */
+static int rpcb_create_local(void)
{
struct rpc_create_args args = {
- .protocol = XPRT_TRANSPORT_UDP,
- .address = addr,
- .addrsize = addrlen,
+ .protocol = XPRT_TRANSPORT_TCP,
+ .address = (struct sockaddr *)&rpcb_inaddr_loopback,
+ .addrsize = sizeof(rpcb_inaddr_loopback),
.servername = "localhost",
.program = &rpcb_program,
- .version = version,
+ .version = RPCBVERS_2,
.authflavor = RPC_AUTH_UNIX,
.flags = RPC_CLNT_CREATE_NOPING,
};
+ struct rpc_clnt *clnt, *clnt4;
+ int result = 0;
+
+ if (rpcb_local_clnt)
+ return result;
+
+ mutex_lock(&rpcb_create_local_mutex);
+ if (rpcb_local_clnt)
+ goto out;
+
+ clnt = rpc_create(&args);
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create local rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+ result = -PTR_ERR(clnt);
+ goto out;
+ }
- return rpc_create(&args);
+ /*
+ * This results in an RPC ping. On systems running portmapper,
+ * the v4 ping will fail. Proceed anyway, but disallow rpcb
+ * v4 upcalls.
+ */
+ clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
+ if (IS_ERR(clnt4)) {
+ dprintk("RPC: failed to create local rpcbind v4 "
+ "cleint (errno %ld).\n", PTR_ERR(clnt4));
+ clnt4 = NULL;
+ }
+
+ rpcb_local_clnt = clnt;
+ rpcb_local_clnt4 = clnt4;
+
+out:
+ mutex_unlock(&rpcb_create_local_mutex);
+ return result;
}
static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
@@ -209,22 +252,13 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
return rpc_create(&args);
}
-static int rpcb_register_call(const u32 version, struct rpc_message *msg)
+static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
{
- struct sockaddr *addr = (struct sockaddr *)&rpcb_inaddr_loopback;
- size_t addrlen = sizeof(rpcb_inaddr_loopback);
- struct rpc_clnt *rpcb_clnt;
int result, error = 0;
msg->rpc_resp = &result;
- rpcb_clnt = rpcb_create_local(addr, addrlen, version);
- if (!IS_ERR(rpcb_clnt)) {
- error = rpc_call_sync(rpcb_clnt, msg, 0);
- rpc_shutdown_client(rpcb_clnt);
- } else
- error = PTR_ERR(rpcb_clnt);
-
+ error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
@@ -279,6 +313,11 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
struct rpc_message msg = {
.rpc_argp = &map,
};
+ int error;
+
+ error = rpcb_create_local();
+ if (error)
+ return error;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
@@ -288,7 +327,7 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
if (port)
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
- return rpcb_register_call(RPCBVERS_2, &msg);
+ return rpcb_register_call(rpcb_local_clnt, &msg);
}
/*
@@ -313,7 +352,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
- result = rpcb_register_call(RPCBVERS_4, msg);
+ result = rpcb_register_call(rpcb_local_clnt4, msg);
kfree(map->r_addr);
return result;
}
@@ -340,7 +379,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
- result = rpcb_register_call(RPCBVERS_4, msg);
+ result = rpcb_register_call(rpcb_local_clnt4, msg);
kfree(map->r_addr);
return result;
}
@@ -356,7 +395,7 @@ static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- return rpcb_register_call(RPCBVERS_4, msg);
+ return rpcb_register_call(rpcb_local_clnt4, msg);
}
/**
@@ -414,6 +453,13 @@ int rpcb_v4_register(const u32 program, const u32 version,
struct rpc_message msg = {
.rpc_argp = &map,
};
+ int error;
+
+ error = rpcb_create_local();
+ if (error)
+ return error;
+ if (rpcb_local_clnt4 == NULL)
+ return -EPROTONOSUPPORT;
if (address == NULL)
return rpcb_unregister_all_protofamilies(&msg);
@@ -491,7 +537,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi
.rpc_message = &msg,
.callback_ops = &rpcb_getport_ops,
.callback_data = map,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN,
};
return rpc_run_task(&task_setup_data);
@@ -1027,3 +1073,15 @@ static struct rpc_program rpcb_program = {
.version = rpcb_version,
.stats = &rpcb_stats,
};
+
+/**
+ * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
+ *
+ */
+void cleanup_rpcb_clnt(void)
+{
+ if (rpcb_local_clnt4)
+ rpc_shutdown_client(rpcb_local_clnt4);
+ if (rpcb_local_clnt)
+ rpc_shutdown_client(rpcb_local_clnt);
+}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cef74ba0666..aae6907fd54 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -210,6 +210,7 @@ void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qnam
{
__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
}
+EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
@@ -385,6 +386,20 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r
}
/*
+ * Tests whether rpc queue is empty
+ */
+int rpc_queue_empty(struct rpc_wait_queue *queue)
+{
+ int res;
+
+ spin_lock_bh(&queue->lock);
+ res = queue->qlen;
+ spin_unlock_bh(&queue->lock);
+ return (res == 0);
+}
+EXPORT_SYMBOL_GPL(rpc_queue_empty);
+
+/*
* Wake up a task on a specific queue
*/
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 8cce9218901..f438347d817 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -24,6 +24,8 @@
extern struct cache_detail ip_map_cache, unix_gid_cache;
+extern void cleanup_rpcb_clnt(void);
+
static int __init
init_sunrpc(void)
{
@@ -53,6 +55,7 @@ out:
static void __exit
cleanup_sunrpc(void)
{
+ cleanup_rpcb_clnt();
rpcauth_remove_module();
cleanup_socket_xprt();
svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b845e2293df..1c924ee0a1e 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -16,8 +16,6 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
-#define SVC_MAX_WAKING 5
-
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
struct svc_pool *pool;
struct svc_rqst *rqstp;
int cpu;
- int thread_avail;
if (!(xprt->xpt_flags &
((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
spin_lock_bh(&pool->sp_lock);
+ if (!list_empty(&pool->sp_threads) &&
+ !list_empty(&pool->sp_sockets))
+ printk(KERN_ERR
+ "svc_xprt_enqueue: "
+ "threads and transports both waiting??\n");
+
if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
/* Don't enqueue dead transports */
dprintk("svc: transport %p is dead, not enqueued\n", xprt);
@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
}
process:
- /* Work out whether threads are available */
- thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
- if (pool->sp_nwaking >= SVC_MAX_WAKING) {
- /* too many threads are runnable and trying to wake up */
- thread_avail = 0;
- pool->sp_stats.overloads_avoided++;
- }
-
- if (thread_avail) {
+ if (!list_empty(&pool->sp_threads)) {
rqstp = list_entry(pool->sp_threads.next,
struct svc_rqst,
rq_list);
@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
- rqstp->rq_waking = 1;
- pool->sp_nwaking++;
pool->sp_stats.threads_woken++;
BUG_ON(xprt->xpt_pool != pool);
wake_up(&rqstp->rq_wait);
@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
return -EINTR;
spin_lock_bh(&pool->sp_lock);
- if (rqstp->rq_waking) {
- rqstp->rq_waking = 0;
- pool->sp_nwaking--;
- BUG_ON(pool->sp_nwaking < 0);
- }
xprt = svc_xprt_dequeue(pool);
if (xprt) {
rqstp->rq_xprt = xprt;
@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
struct svc_pool *pool = p;
if (p == SEQ_START_TOKEN) {
- seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n");
+ seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
return 0;
}
- seq_printf(m, "%u %lu %lu %lu %lu %lu\n",
+ seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id,
pool->sp_stats.packets,
pool->sp_stats.sockets_queued,
pool->sp_stats.threads_woken,
- pool->sp_stats.overloads_avoided,
pool->sp_stats.threads_timedout);
return 0;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 4a8f6558718..d8c04111449 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -655,23 +655,25 @@ static struct unix_gid *unix_gid_lookup(uid_t uid)
return NULL;
}
-static int unix_gid_find(uid_t uid, struct group_info **gip,
- struct svc_rqst *rqstp)
+static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
{
- struct unix_gid *ug = unix_gid_lookup(uid);
+ struct unix_gid *ug;
+ struct group_info *gi;
+ int ret;
+
+ ug = unix_gid_lookup(uid);
if (!ug)
- return -EAGAIN;
- switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) {
+ return ERR_PTR(-EAGAIN);
+ ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
+ switch (ret) {
case -ENOENT:
- *gip = NULL;
- return 0;
+ return ERR_PTR(-ENOENT);
case 0:
- *gip = ug->gi;
- get_group_info(*gip);
+ gi = get_group_info(ug->gi);
cache_put(&ug->h, &unix_gid_cache);
- return 0;
+ return gi;
default:
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
}
}
@@ -681,6 +683,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
struct sockaddr_in *sin;
struct sockaddr_in6 *sin6, sin6_storage;
struct ip_map *ipm;
+ struct group_info *gi;
+ struct svc_cred *cred = &rqstp->rq_cred;
switch (rqstp->rq_addr.ss_family) {
case AF_INET:
@@ -721,6 +725,17 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
ip_map_cached_put(rqstp, ipm);
break;
}
+
+ gi = unix_gid_find(cred->cr_uid, rqstp);
+ switch (PTR_ERR(gi)) {
+ case -EAGAIN:
+ return SVC_DROP;
+ case -ENOENT:
+ break;
+ default:
+ put_group_info(cred->cr_group_info);
+ cred->cr_group_info = gi;
+ }
return SVC_OK;
}
@@ -817,19 +832,11 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
slen = svc_getnl(argv); /* gids length */
if (slen > 16 || (len -= (slen + 2)*4) < 0)
goto badcred;
- if (unix_gid_find(cred->cr_uid, &cred->cr_group_info, rqstp)
- == -EAGAIN)
+ cred->cr_group_info = groups_alloc(slen);
+ if (cred->cr_group_info == NULL)
return SVC_DROP;
- if (cred->cr_group_info == NULL) {
- cred->cr_group_info = groups_alloc(slen);
- if (cred->cr_group_info == NULL)
- return SVC_DROP;
- for (i = 0; i < slen; i++)
- GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
- } else {
- for (i = 0; i < slen ; i++)
- svc_getnl(argv);
- }
+ for (i = 0; i < slen; i++)
+ GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
*authp = rpc_autherr_badverf;
return SVC_DENIED;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index fd46d42afa8..469de292c23 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -700,6 +700,10 @@ void xprt_connect(struct rpc_task *task)
}
if (!xprt_lock_write(xprt, task))
return;
+
+ if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
+ xprt->ops->close(xprt);
+
if (xprt_connected(xprt))
xprt_release_write(xprt, task);
else {
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 04732d09013..3d739e5d15d 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2019,7 +2019,7 @@ static void xs_connect(struct rpc_task *task)
if (xprt_test_and_set_connecting(xprt))
return;
- if (transport->sock != NULL) {
+ if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
dprintk("RPC: xs_connect delayed xprt %p for %lu "
"seconds\n",
xprt, xprt->reestablish_timeout / HZ);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index c01470e7de1..baa898add28 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -141,62 +141,35 @@ static const struct ieee80211_regdomain us_regdom = {
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
- /* IEEE 802.11a, channel 36 */
- REG_RULE(5180-10, 5180+10, 40, 6, 23, 0),
- /* IEEE 802.11a, channel 40 */
- REG_RULE(5200-10, 5200+10, 40, 6, 23, 0),
- /* IEEE 802.11a, channel 44 */
- REG_RULE(5220-10, 5220+10, 40, 6, 23, 0),
+ /* IEEE 802.11a, channel 36..48 */
+ REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
/* IEEE 802.11a, channels 48..64 */
- REG_RULE(5240-10, 5320+10, 40, 6, 23, 0),
+ REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
+ /* IEEE 802.11a, channels 100..124 */
+ REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
+ /* IEEE 802.11a, channels 132..144 */
+ REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
/* IEEE 802.11a, channels 149..165, outdoor */
REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
}
};
static const struct ieee80211_regdomain jp_regdom = {
- .n_reg_rules = 3,
+ .n_reg_rules = 6,
.alpha2 = "JP",
.reg_rules = {
- /* IEEE 802.11b/g, channels 1..14 */
- REG_RULE(2412-10, 2484+10, 40, 6, 20, 0),
- /* IEEE 802.11a, channels 34..48 */
- REG_RULE(5170-10, 5240+10, 40, 6, 20,
- NL80211_RRF_PASSIVE_SCAN),
+ /* IEEE 802.11b/g, channels 1..11 */
+ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
+ /* IEEE 802.11b/g, channels 12..13 */
+ REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
+ /* IEEE 802.11b/g, channel 14 */
+ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
+ /* IEEE 802.11a, channels 36..48 */
+ REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
/* IEEE 802.11a, channels 52..64 */
- REG_RULE(5260-10, 5320+10, 40, 6, 20,
- NL80211_RRF_NO_IBSS |
- NL80211_RRF_DFS),
- }
-};
-
-static const struct ieee80211_regdomain eu_regdom = {
- .n_reg_rules = 6,
- /*
- * This alpha2 is bogus, we leave it here just for stupid
- * backward compatibility
- */
- .alpha2 = "EU",
- .reg_rules = {
- /* IEEE 802.11b/g, channels 1..13 */
- REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
- /* IEEE 802.11a, channel 36 */
- REG_RULE(5180-10, 5180+10, 40, 6, 23,
- NL80211_RRF_PASSIVE_SCAN),
- /* IEEE 802.11a, channel 40 */
- REG_RULE(5200-10, 5200+10, 40, 6, 23,
- NL80211_RRF_PASSIVE_SCAN),
- /* IEEE 802.11a, channel 44 */
- REG_RULE(5220-10, 5220+10, 40, 6, 23,
- NL80211_RRF_PASSIVE_SCAN),
- /* IEEE 802.11a, channels 48..64 */
- REG_RULE(5240-10, 5320+10, 40, 6, 20,
- NL80211_RRF_NO_IBSS |
- NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 100..140 */
- REG_RULE(5500-10, 5700+10, 40, 6, 30,
- NL80211_RRF_NO_IBSS |
- NL80211_RRF_DFS),
+ REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
+ /* IEEE 802.11a, channels 100..144 */
+ REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
}
};
@@ -206,15 +179,17 @@ static const struct ieee80211_regdomain *static_regdom(char *alpha2)
return &us_regdom;
if (alpha2[0] == 'J' && alpha2[1] == 'P')
return &jp_regdom;
+ /* Use world roaming rules for "EU", since it was a pseudo
+ domain anyway... */
if (alpha2[0] == 'E' && alpha2[1] == 'U')
- return &eu_regdom;
- /* Default, as per the old rules */
- return &us_regdom;
+ return &world_regdom;
+ /* Default, world roaming rules */
+ return &world_regdom;
}
static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
{
- if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom)
+ if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
return true;
return false;
}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 584eb4826e0..54face3d442 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -479,6 +479,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
}
err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
}
+ wdev->wext.connect.privacy = false;
/*
* Applications using wireless extensions expect to be
* able to delete keys that don't exist, so allow that.
diff --git a/samples/hw_breakpoint/data_breakpoint.c b/samples/hw_breakpoint/data_breakpoint.c
index 29525500df0..c69cbe9b242 100644
--- a/samples/hw_breakpoint/data_breakpoint.c
+++ b/samples/hw_breakpoint/data_breakpoint.c
@@ -41,7 +41,9 @@ module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
" write operations on the kernel symbol");
-static void sample_hbp_handler(struct perf_event *temp, void *data)
+static void sample_hbp_handler(struct perf_event *bp, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
printk(KERN_INFO "%s value is changed\n", ksym_name);
dump_stack();
@@ -51,8 +53,9 @@ static void sample_hbp_handler(struct perf_event *temp, void *data)
static int __init hw_break_module_init(void)
{
int ret;
- DEFINE_BREAKPOINT_ATTR(attr);
+ struct perf_event_attr attr;
+ hw_breakpoint_init(&attr);
attr.bp_addr = kallsyms_lookup_name(ksym_name);
attr.bp_len = HW_BREAKPOINT_LEN_4;
attr.bp_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index c67e73ecd5b..ed2773edfe7 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -149,6 +149,12 @@ ld-option = $(call try-run,\
# $(Q)$(MAKE) $(build)=dir
build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
+###
+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.modbuiltin obj=
+# Usage:
+# $(Q)$(MAKE) $(modbuiltin)=dir
+modbuiltin := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.modbuiltin obj
+
# Prefix -I with $(srctree) if it is not an absolute path.
# skip if -I has no parameter
addtree = $(if $(patsubst -I%,%,$(1)), \
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 341b58902ff..0b94d2fa3a8 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -207,6 +207,7 @@ endif
ifdef CONFIG_FTRACE_MCOUNT_RECORD
cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
+ "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
"$(if $(CONFIG_64BIT),64,32)" \
"$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
"$(if $(part-of-module),1,0)" "$(@)";
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index ffdafb26f53..cd815ac2a50 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -127,6 +127,11 @@ _c_flags += $(if $(patsubst n%,, \
$(CFLAGS_GCOV))
endif
+ifdef CONFIG_SYMBOL_PREFIX
+_cpp_flags += -DSYMBOL_PREFIX=$(patsubst "%",%,$(CONFIG_SYMBOL_PREFIX))
+endif
+
+
# If building the kernel in a separate objtree expand all occurrences
# of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/').
@@ -208,7 +213,7 @@ cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -f -9 > $@) || \
# Bzip2 and LZMA do not include size in file... so we have to fake that;
# append the size as a 32-bit littleendian number as gzip does.
-size_append = /bin/echo -ne $(shell \
+size_append = printf $(shell \
dec_size=0; \
for F in $1; do \
fsize=$$(stat -c "%s" $$F); \
diff --git a/scripts/Makefile.modbuiltin b/scripts/Makefile.modbuiltin
new file mode 100644
index 00000000000..102a276f6ee
--- /dev/null
+++ b/scripts/Makefile.modbuiltin
@@ -0,0 +1,55 @@
+# ==========================================================================
+# Generating modules.builtin
+# ==========================================================================
+
+src := $(obj)
+
+PHONY := __modbuiltin
+__modbuiltin:
+
+-include include/config/auto.conf
+# tristate.conf sets tristate variables to uppercase 'Y' or 'M'
+# That way, we get the list of built-in modules in obj-Y
+-include include/config/tristate.conf
+
+include scripts/Kbuild.include
+
+# The filename Kbuild has precedence over Makefile
+kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
+kbuild-file := $(if $(wildcard $(kbuild-dir)/Kbuild),$(kbuild-dir)/Kbuild,$(kbuild-dir)/Makefile)
+include $(kbuild-file)
+
+include scripts/Makefile.lib
+__subdir-Y := $(patsubst %/,%,$(filter %/, $(obj-Y)))
+subdir-Y += $(__subdir-Y)
+subdir-ym := $(sort $(subdir-y) $(subdir-Y) $(subdir-m))
+subdir-ym := $(addprefix $(obj)/,$(subdir-ym))
+obj-Y := $(addprefix $(obj)/,$(obj-Y))
+
+modbuiltin-subdirs := $(patsubst %,%/modules.builtin, $(subdir-ym))
+modbuiltin-mods := $(filter %.ko, $(obj-Y:.o=.ko))
+modbuiltin-target := $(obj)/modules.builtin
+
+__modbuiltin: $(modbuiltin-target) $(subdir-ym)
+ @:
+
+$(modbuiltin-target): $(subdir-ym) FORCE
+ $(Q)(for m in $(modbuiltin-mods); do echo kernel/$$m; done; \
+ cat /dev/null $(modbuiltin-subdirs)) > $@
+
+PHONY += FORCE
+
+FORCE:
+
+# Descending
+# ---------------------------------------------------------------------------
+
+PHONY += $(subdir-ym)
+$(subdir-ym):
+ $(Q)$(MAKE) $(modbuiltin)=$@
+
+
+# Declare the contents of the .PHONY variable as phony. We keep that
+# information in a variable se we can use it in if_changed and friends.
+
+.PHONY: $(PHONY)
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 6bf21f83837..ea26b23de08 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -16,15 +16,15 @@
* tells make when to remake a file.
*
* To use this list as-is however has the drawback that virtually
- * every file in the kernel includes <linux/autoconf.h>.
+ * every file in the kernel includes autoconf.h.
*
- * If the user re-runs make *config, linux/autoconf.h will be
+ * If the user re-runs make *config, autoconf.h will be
* regenerated. make notices that and will rebuild every file which
* includes autoconf.h, i.e. basically all files. This is extremely
* annoying if the user just changed CONFIG_HIS_DRIVER from n to m.
*
* So we play the same trick that "mkdep" played before. We replace
- * the dependency on linux/autoconf.h by a dependency on every config
+ * the dependency on autoconf.h by a dependency on every config
* option which is mentioned in any of the listed prequisites.
*
* kconfig populates a tree in include/config/ with an empty file
@@ -73,7 +73,7 @@
* cmd_<target> = <cmdline>
*
* and then basically copies the .<target>.d file to stdout, in the
- * process filtering out the dependency on linux/autoconf.h and adding
+ * process filtering out the dependency on autoconf.h and adding
* dependencies on include/config/my/option.h for every
* CONFIG_MY_OPTION encountered in any of the prequisites.
*
@@ -324,7 +324,7 @@ static void parse_dep_file(void *map, size_t len)
p++;
}
memcpy(s, m, p-m); s[p-m] = 0;
- if (strrcmp(s, "include/linux/autoconf.h") &&
+ if (strrcmp(s, "include/generated/autoconf.h") &&
strrcmp(s, "arch/um/include/uml-config.h") &&
strrcmp(s, ".ver")) {
printf(" %s \\\n", s);
diff --git a/scripts/genksyms/keywords.c_shipped b/scripts/genksyms/keywords.c_shipped
index 287467a2e8c..8060e06798b 100644
--- a/scripts/genksyms/keywords.c_shipped
+++ b/scripts/genksyms/keywords.c_shipped
@@ -1,4 +1,4 @@
-/* ANSI-C code produced by gperf version 3.0.3 */
+/* ANSI-C code produced by gperf version 3.0.4 */
/* Command-line: gperf -L ANSI-C -a -C -E -g -H is_reserved_hash -k '1,3,$' -N is_reserved_word -p -t scripts/genksyms/keywords.gperf */
#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
@@ -34,7 +34,7 @@ struct resword;
static const struct resword *is_reserved_word(register const char *str, register unsigned int len);
#line 5 "scripts/genksyms/keywords.gperf"
struct resword { const char *name; int token; };
-/* maximum key range = 62, duplicates = 0 */
+/* maximum key range = 64, duplicates = 0 */
#ifdef __GNUC__
__inline
@@ -48,39 +48,39 @@ is_reserved_hash (register const char *str, register unsigned int len)
{
static const unsigned char asso_values[] =
{
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 5,
- 65, 65, 65, 65, 65, 65, 35, 65, 65, 65,
- 0, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 0, 65, 0, 65, 5,
- 20, 15, 10, 30, 65, 15, 65, 65, 20, 0,
- 10, 35, 20, 65, 10, 5, 0, 10, 5, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
- 65, 65, 65, 65, 65, 65
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 0,
+ 67, 67, 67, 67, 67, 67, 15, 67, 67, 67,
+ 0, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 0, 67, 0, 67, 5,
+ 25, 20, 15, 30, 67, 15, 67, 67, 10, 0,
+ 10, 40, 20, 67, 10, 5, 0, 10, 15, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67, 67, 67, 67, 67,
+ 67, 67, 67, 67, 67, 67
};
return len + asso_values[(unsigned char)str[2]] + asso_values[(unsigned char)str[0]] + asso_values[(unsigned char)str[len - 1]];
}
#ifdef __GNUC__
__inline
-#ifdef __GNUC_STDC_INLINE__
+#if defined __GNUC_STDC_INLINE__ || defined __GNUC_GNU_INLINE__
__attribute__ ((__gnu_inline__))
#endif
#endif
@@ -89,116 +89,119 @@ is_reserved_word (register const char *str, register unsigned int len)
{
enum
{
- TOTAL_KEYWORDS = 43,
+ TOTAL_KEYWORDS = 45,
MIN_WORD_LENGTH = 3,
MAX_WORD_LENGTH = 24,
MIN_HASH_VALUE = 3,
- MAX_HASH_VALUE = 64
+ MAX_HASH_VALUE = 66
};
static const struct resword wordlist[] =
{
{""}, {""}, {""},
-#line 28 "scripts/genksyms/keywords.gperf"
+#line 30 "scripts/genksyms/keywords.gperf"
{"asm", ASM_KEYW},
{""},
-#line 10 "scripts/genksyms/keywords.gperf"
+#line 12 "scripts/genksyms/keywords.gperf"
{"__asm", ASM_KEYW},
{""},
-#line 11 "scripts/genksyms/keywords.gperf"
+#line 13 "scripts/genksyms/keywords.gperf"
{"__asm__", ASM_KEYW},
{""}, {""},
-#line 54 "scripts/genksyms/keywords.gperf"
+#line 56 "scripts/genksyms/keywords.gperf"
{"__typeof__", TYPEOF_KEYW},
{""},
-#line 14 "scripts/genksyms/keywords.gperf"
+#line 16 "scripts/genksyms/keywords.gperf"
{"__const", CONST_KEYW},
-#line 13 "scripts/genksyms/keywords.gperf"
- {"__attribute__", ATTRIBUTE_KEYW},
#line 15 "scripts/genksyms/keywords.gperf"
+ {"__attribute__", ATTRIBUTE_KEYW},
+#line 17 "scripts/genksyms/keywords.gperf"
{"__const__", CONST_KEYW},
-#line 20 "scripts/genksyms/keywords.gperf"
+#line 22 "scripts/genksyms/keywords.gperf"
{"__signed__", SIGNED_KEYW},
-#line 46 "scripts/genksyms/keywords.gperf"
+#line 48 "scripts/genksyms/keywords.gperf"
{"static", STATIC_KEYW},
-#line 22 "scripts/genksyms/keywords.gperf"
- {"__volatile__", VOLATILE_KEYW},
-#line 41 "scripts/genksyms/keywords.gperf"
+ {""},
+#line 43 "scripts/genksyms/keywords.gperf"
{"int", INT_KEYW},
-#line 34 "scripts/genksyms/keywords.gperf"
+#line 36 "scripts/genksyms/keywords.gperf"
{"char", CHAR_KEYW},
-#line 35 "scripts/genksyms/keywords.gperf"
+#line 37 "scripts/genksyms/keywords.gperf"
{"const", CONST_KEYW},
-#line 47 "scripts/genksyms/keywords.gperf"
+#line 49 "scripts/genksyms/keywords.gperf"
{"struct", STRUCT_KEYW},
-#line 26 "scripts/genksyms/keywords.gperf"
+#line 28 "scripts/genksyms/keywords.gperf"
{"__restrict__", RESTRICT_KEYW},
-#line 27 "scripts/genksyms/keywords.gperf"
+#line 29 "scripts/genksyms/keywords.gperf"
{"restrict", RESTRICT_KEYW},
-#line 25 "scripts/genksyms/keywords.gperf"
- {"_restrict", RESTRICT_KEYW},
-#line 18 "scripts/genksyms/keywords.gperf"
+#line 9 "scripts/genksyms/keywords.gperf"
+ {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW},
+#line 20 "scripts/genksyms/keywords.gperf"
{"__inline__", INLINE_KEYW},
-#line 12 "scripts/genksyms/keywords.gperf"
- {"__attribute", ATTRIBUTE_KEYW},
{""},
-#line 16 "scripts/genksyms/keywords.gperf"
+#line 24 "scripts/genksyms/keywords.gperf"
+ {"__volatile__", VOLATILE_KEYW},
+#line 7 "scripts/genksyms/keywords.gperf"
+ {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW},
+#line 27 "scripts/genksyms/keywords.gperf"
+ {"_restrict", RESTRICT_KEYW},
+ {""},
+#line 14 "scripts/genksyms/keywords.gperf"
+ {"__attribute", ATTRIBUTE_KEYW},
+#line 8 "scripts/genksyms/keywords.gperf"
+ {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW},
+#line 18 "scripts/genksyms/keywords.gperf"
{"__extension__", EXTENSION_KEYW},
-#line 37 "scripts/genksyms/keywords.gperf"
+#line 39 "scripts/genksyms/keywords.gperf"
{"enum", ENUM_KEYW},
-#line 21 "scripts/genksyms/keywords.gperf"
- {"__volatile", VOLATILE_KEYW},
-#line 38 "scripts/genksyms/keywords.gperf"
+#line 10 "scripts/genksyms/keywords.gperf"
+ {"EXPORT_UNUSED_SYMBOL", EXPORT_SYMBOL_KEYW},
+#line 40 "scripts/genksyms/keywords.gperf"
{"extern", EXTERN_KEYW},
{""},
-#line 19 "scripts/genksyms/keywords.gperf"
+#line 21 "scripts/genksyms/keywords.gperf"
{"__signed", SIGNED_KEYW},
-#line 9 "scripts/genksyms/keywords.gperf"
- {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW},
- {""},
-#line 53 "scripts/genksyms/keywords.gperf"
+#line 11 "scripts/genksyms/keywords.gperf"
+ {"EXPORT_UNUSED_SYMBOL_GPL", EXPORT_SYMBOL_KEYW},
+#line 51 "scripts/genksyms/keywords.gperf"
+ {"union", UNION_KEYW},
+#line 55 "scripts/genksyms/keywords.gperf"
{"typeof", TYPEOF_KEYW},
-#line 48 "scripts/genksyms/keywords.gperf"
+#line 50 "scripts/genksyms/keywords.gperf"
{"typedef", TYPEDEF_KEYW},
-#line 17 "scripts/genksyms/keywords.gperf"
+#line 19 "scripts/genksyms/keywords.gperf"
{"__inline", INLINE_KEYW},
-#line 33 "scripts/genksyms/keywords.gperf"
+#line 35 "scripts/genksyms/keywords.gperf"
{"auto", AUTO_KEYW},
-#line 49 "scripts/genksyms/keywords.gperf"
- {"union", UNION_KEYW},
- {""}, {""},
-#line 50 "scripts/genksyms/keywords.gperf"
- {"unsigned", UNSIGNED_KEYW},
-#line 51 "scripts/genksyms/keywords.gperf"
- {"void", VOID_KEYW},
-#line 44 "scripts/genksyms/keywords.gperf"
- {"short", SHORT_KEYW},
+#line 23 "scripts/genksyms/keywords.gperf"
+ {"__volatile", VOLATILE_KEYW},
{""}, {""},
#line 52 "scripts/genksyms/keywords.gperf"
- {"volatile", VOLATILE_KEYW},
- {""},
-#line 39 "scripts/genksyms/keywords.gperf"
- {"float", FLOAT_KEYW},
-#line 36 "scripts/genksyms/keywords.gperf"
- {"double", DOUBLE_KEYW},
+ {"unsigned", UNSIGNED_KEYW},
{""},
-#line 7 "scripts/genksyms/keywords.gperf"
- {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW},
- {""}, {""},
-#line 40 "scripts/genksyms/keywords.gperf"
+#line 46 "scripts/genksyms/keywords.gperf"
+ {"short", SHORT_KEYW},
+#line 42 "scripts/genksyms/keywords.gperf"
{"inline", INLINE_KEYW},
-#line 8 "scripts/genksyms/keywords.gperf"
- {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW},
-#line 43 "scripts/genksyms/keywords.gperf"
- {"register", REGISTER_KEYW},
{""},
-#line 24 "scripts/genksyms/keywords.gperf"
+#line 54 "scripts/genksyms/keywords.gperf"
+ {"volatile", VOLATILE_KEYW},
+#line 44 "scripts/genksyms/keywords.gperf"
+ {"long", LONG_KEYW},
+#line 26 "scripts/genksyms/keywords.gperf"
{"_Bool", BOOL_KEYW},
-#line 45 "scripts/genksyms/keywords.gperf"
- {"signed", SIGNED_KEYW},
{""}, {""},
-#line 42 "scripts/genksyms/keywords.gperf"
- {"long", LONG_KEYW}
+#line 45 "scripts/genksyms/keywords.gperf"
+ {"register", REGISTER_KEYW},
+#line 53 "scripts/genksyms/keywords.gperf"
+ {"void", VOID_KEYW},
+#line 41 "scripts/genksyms/keywords.gperf"
+ {"float", FLOAT_KEYW},
+#line 38 "scripts/genksyms/keywords.gperf"
+ {"double", DOUBLE_KEYW},
+ {""}, {""}, {""}, {""},
+#line 47 "scripts/genksyms/keywords.gperf"
+ {"signed", SIGNED_KEYW}
};
if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
diff --git a/scripts/genksyms/keywords.gperf b/scripts/genksyms/keywords.gperf
index 8fe977a4d57..e6349acb6f2 100644
--- a/scripts/genksyms/keywords.gperf
+++ b/scripts/genksyms/keywords.gperf
@@ -7,6 +7,8 @@ struct resword { const char *name; int token; }
EXPORT_SYMBOL, EXPORT_SYMBOL_KEYW
EXPORT_SYMBOL_GPL, EXPORT_SYMBOL_KEYW
EXPORT_SYMBOL_GPL_FUTURE, EXPORT_SYMBOL_KEYW
+EXPORT_UNUSED_SYMBOL, EXPORT_SYMBOL_KEYW
+EXPORT_UNUSED_SYMBOL_GPL, EXPORT_SYMBOL_KEYW
__asm, ASM_KEYW
__asm__, ASM_KEYW
__attribute, ATTRIBUTE_KEYW
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 81a67a458e7..445e8845f0a 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
use strict;
my $P = $0;
-my $V = '0.21';
+my $V = '0.23';
use Getopt::Long qw(:config no_auto_abbrev);
@@ -23,16 +23,19 @@ my $email_usename = 1;
my $email_maintainer = 1;
my $email_list = 1;
my $email_subscriber_list = 0;
-my $email_git = 1;
my $email_git_penguin_chiefs = 0;
+my $email_git = 1;
+my $email_git_blame = 0;
my $email_git_min_signatures = 1;
my $email_git_max_maintainers = 5;
my $email_git_min_percent = 5;
my $email_git_since = "1-year-ago";
-my $email_git_blame = 0;
+my $email_hg_since = "-365";
my $email_remove_duplicates = 1;
my $output_multiline = 1;
my $output_separator = ", ";
+my $output_roles = 0;
+my $output_rolestats = 0;
my $scm = 0;
my $web = 0;
my $subsystem = 0;
@@ -64,21 +67,52 @@ my $penguin_chiefs = "\(" . join("|",@penguin_chief_names) . "\)";
my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
my $rfc822_char = '[\\000-\\377]';
+# VCS command support: class-like functions and strings
+
+my %VCS_cmds;
+
+my %VCS_cmds_git = (
+ "execute_cmd" => \&git_execute_cmd,
+ "available" => '(which("git") ne "") && (-d ".git")',
+ "find_signers_cmd" => "git log --since=\$email_git_since -- \$file",
+ "find_commit_signers_cmd" => "git log -1 \$commit",
+ "blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file",
+ "blame_file_cmd" => "git blame -l \$file",
+ "commit_pattern" => "^commit [0-9a-f]{40,40}",
+ "blame_commit_pattern" => "^([0-9a-f]+) "
+);
+
+my %VCS_cmds_hg = (
+ "execute_cmd" => \&hg_execute_cmd,
+ "available" => '(which("hg") ne "") && (-d ".hg")',
+ "find_signers_cmd" =>
+ "hg log --date=\$email_hg_since" .
+ " --template='commit {node}\\n{desc}\\n' -- \$file",
+ "find_commit_signers_cmd" => "hg log --template='{desc}\\n' -r \$commit",
+ "blame_range_cmd" => "", # not supported
+ "blame_file_cmd" => "hg blame -c \$file",
+ "commit_pattern" => "^commit [0-9a-f]{40,40}",
+ "blame_commit_pattern" => "^([0-9a-f]+):"
+);
+
if (!GetOptions(
'email!' => \$email,
'git!' => \$email_git,
+ 'git-blame!' => \$email_git_blame,
'git-chief-penguins!' => \$email_git_penguin_chiefs,
'git-min-signatures=i' => \$email_git_min_signatures,
'git-max-maintainers=i' => \$email_git_max_maintainers,
'git-min-percent=i' => \$email_git_min_percent,
'git-since=s' => \$email_git_since,
- 'git-blame!' => \$email_git_blame,
+ 'hg-since=s' => \$email_hg_since,
'remove-duplicates!' => \$email_remove_duplicates,
'm!' => \$email_maintainer,
'n!' => \$email_usename,
'l!' => \$email_list,
's!' => \$email_subscriber_list,
'multiline!' => \$output_multiline,
+ 'roles!' => \$output_roles,
+ 'rolestats!' => \$output_rolestats,
'separator=s' => \$output_separator,
'subsystem!' => \$subsystem,
'status!' => \$status,
@@ -90,8 +124,7 @@ if (!GetOptions(
'v|version' => \$version,
'h|help' => \$help,
)) {
- usage();
- die "$P: invalid argument\n";
+ die "$P: invalid argument - use --help if necessary\n";
}
if ($help != 0) {
@@ -113,6 +146,10 @@ if ($output_separator ne ", ") {
$output_multiline = 0;
}
+if ($output_rolestats) {
+ $output_roles = 1;
+}
+
my $selections = $email + $scm + $status + $subsystem + $web;
if ($selections == 0) {
usage();
@@ -175,7 +212,7 @@ if ($email_remove_duplicates) {
next if ($line =~ m/^\s*$/);
my ($name, $address) = parse_email($line);
- $line = format_email($name, $address);
+ $line = format_email($name, $address, $email_usename);
next if ($line =~ m/^\s*$/);
@@ -207,12 +244,10 @@ foreach my $file (@ARGV) {
push(@files, $file);
if (-f $file && $keywords) {
open(FILE, "<$file") or die "$P: Can't open ${file}\n";
- while (<FILE>) {
- my $patch_line = $_;
- foreach my $line (keys %keyword_hash) {
- if ($patch_line =~ m/^.*$keyword_hash{$line}/x) {
- push(@keyword_tvi, $line);
- }
+ my $text = do { local($/) ; <FILE> };
+ foreach my $line (keys %keyword_hash) {
+ if ($text =~ m/$keyword_hash{$line}/x) {
+ push(@keyword_tvi, $line);
}
}
close(FILE);
@@ -304,11 +339,11 @@ foreach my $file (@files) {
}
if ($email && $email_git) {
- recent_git_signoffs($file);
+ vcs_file_signoffs($file);
}
if ($email && $email_git_blame) {
- git_assign_blame($file);
+ vcs_file_blame($file);
}
}
@@ -324,11 +359,11 @@ if ($email) {
if ($chief =~ m/^(.*):(.*)/) {
my $email_address;
- $email_address = format_email($1, $2);
+ $email_address = format_email($1, $2, $email_usename);
if ($email_git_penguin_chiefs) {
- push(@email_to, $email_address);
+ push(@email_to, [$email_address, 'chief penguin']);
} else {
- @email_to = grep(!/${email_address}/, @email_to);
+ @email_to = grep($_->[0] !~ /${email_address}/, @email_to);
}
}
}
@@ -342,7 +377,7 @@ if ($email || $email_list) {
if ($email_list) {
@to = (@to, @list_to);
}
- output(uniq(@to));
+ output(merge_email(@to));
}
if ($scm) {
@@ -398,13 +433,16 @@ MAINTAINER field selection options:
--git-min-signatures => number of signatures required (default: 1)
--git-max-maintainers => maximum maintainers to add (default: 5)
--git-min-percent => minimum percentage of commits required (default: 5)
- --git-since => git history to use (default: 1-year-ago)
--git-blame => use git blame to find modified commits for patch or file
+ --git-since => git history to use (default: 1-year-ago)
+ --hg-since => hg history to use (default: -365)
--m => include maintainer(s) if any
--n => include name 'Full Name <addr\@domain.tld>'
--l => include list(s) if any
--s => include subscriber only list(s) if any
--remove-duplicates => minimize duplicate email names/addresses
+ --roles => show roles (status:subsystem, git-signer, list, etc...)
+ --rolestats => show roles and statistics (commits/total_commits, %)
--scm => print SCM tree(s) if any
--status => print status if any
--subsystem => print subsystem name if any
@@ -430,11 +468,24 @@ Notes:
directory are examined as git recurses directories.
Any specified X: (exclude) pattern matches are _not_ ignored.
Used with "--nogit", directory is used as a pattern match,
- no individual file within the directory or subdirectory
- is matched.
+ no individual file within the directory or subdirectory
+ is matched.
Used with "--git-blame", does not iterate all files in directory
Using "--git-blame" is slow and may add old committers and authors
that are no longer active maintainers to the output.
+ Using "--roles" or "--rolestats" with git send-email --cc-cmd or any
+ other automated tools that expect only ["name"] <email address>
+ may not work because of additional output after <email address>.
+ Using "--rolestats" and "--git-blame" shows the #/total=% commits,
+ not the percentage of the entire file authored. # of commits is
+ not a good measure of amount of code authored. 1 major commit may
+ contain a thousand lines, 5 trivial commits may modify a single line.
+ If git is not installed, but mercurial (hg) is installed and an .hg
+ repository exists, the following options apply to mercurial:
+ --git,
+ --git-min-signatures, --git-max-maintainers, --git-min-percent, and
+ --git-blame
+ Use --hg-since not --git-since to control date selection
EOT
}
@@ -493,7 +544,7 @@ sub parse_email {
}
sub format_email {
- my ($name, $address) = @_;
+ my ($name, $address, $usename) = @_;
my $formatted_email;
@@ -506,11 +557,11 @@ sub format_email {
$name = "\"$name\"";
}
- if ($email_usename) {
+ if ($usename) {
if ("$name" eq "") {
$formatted_email = "$address";
} else {
- $formatted_email = "$name <${address}>";
+ $formatted_email = "$name <$address>";
}
} else {
$formatted_email = $address;
@@ -547,6 +598,71 @@ sub find_ending_index {
return $index;
}
+sub get_maintainer_role {
+ my ($index) = @_;
+
+ my $i;
+ my $start = find_starting_index($index);
+ my $end = find_ending_index($index);
+
+ my $role;
+ my $subsystem = $typevalue[$start];
+ if (length($subsystem) > 20) {
+ $subsystem = substr($subsystem, 0, 17);
+ $subsystem =~ s/\s*$//;
+ $subsystem = $subsystem . "...";
+ }
+
+ for ($i = $start + 1; $i < $end; $i++) {
+ my $tv = $typevalue[$i];
+ if ($tv =~ m/^(\C):\s*(.*)/) {
+ my $ptype = $1;
+ my $pvalue = $2;
+ if ($ptype eq "S") {
+ $role = $pvalue;
+ }
+ }
+ }
+
+ $role = lc($role);
+ if ($role eq "supported") {
+ $role = "supporter";
+ } elsif ($role eq "maintained") {
+ $role = "maintainer";
+ } elsif ($role eq "odd fixes") {
+ $role = "odd fixer";
+ } elsif ($role eq "orphan") {
+ $role = "orphan minder";
+ } elsif ($role eq "obsolete") {
+ $role = "obsolete minder";
+ } elsif ($role eq "buried alive in reporters") {
+ $role = "chief penguin";
+ }
+
+ return $role . ":" . $subsystem;
+}
+
+sub get_list_role {
+ my ($index) = @_;
+
+ my $i;
+ my $start = find_starting_index($index);
+ my $end = find_ending_index($index);
+
+ my $subsystem = $typevalue[$start];
+ if (length($subsystem) > 20) {
+ $subsystem = substr($subsystem, 0, 17);
+ $subsystem =~ s/\s*$//;
+ $subsystem = $subsystem . "...";
+ }
+
+ if ($subsystem eq "THE REST") {
+ $subsystem = "";
+ }
+
+ return $subsystem;
+}
+
sub add_categories {
my ($index) = @_;
@@ -564,17 +680,22 @@ sub add_categories {
if ($ptype eq "L") {
my $list_address = $pvalue;
my $list_additional = "";
+ my $list_role = get_list_role($i);
+
+ if ($list_role ne "") {
+ $list_role = ":" . $list_role;
+ }
if ($list_address =~ m/([^\s]+)\s+(.*)$/) {
$list_address = $1;
$list_additional = $2;
}
if ($list_additional =~ m/subscribers-only/) {
if ($email_subscriber_list) {
- push(@list_to, $list_address);
+ push(@list_to, [$list_address, "subscriber list${list_role}"]);
}
} else {
if ($email_list) {
- push(@list_to, $list_address);
+ push(@list_to, [$list_address, "open list${list_role}"]);
}
}
} elsif ($ptype eq "M") {
@@ -585,13 +706,14 @@ sub add_categories {
if ($tv =~ m/^(\C):\s*(.*)/) {
if ($1 eq "P") {
$name = $2;
- $pvalue = format_email($name, $address);
+ $pvalue = format_email($name, $address, $email_usename);
}
}
}
}
if ($email_maintainer) {
- push_email_addresses($pvalue);
+ my $role = get_maintainer_role($i);
+ push_email_addresses($pvalue, $role);
}
} elsif ($ptype eq "T") {
push(@scm, $pvalue);
@@ -618,7 +740,7 @@ sub email_inuse {
}
sub push_email_address {
- my ($line) = @_;
+ my ($line, $role) = @_;
my ($name, $address) = parse_email($line);
@@ -627,9 +749,9 @@ sub push_email_address {
}
if (!$email_remove_duplicates) {
- push(@email_to, format_email($name, $address));
+ push(@email_to, [format_email($name, $address, $email_usename), $role]);
} elsif (!email_inuse($name, $address)) {
- push(@email_to, format_email($name, $address));
+ push(@email_to, [format_email($name, $address, $email_usename), $role]);
$email_hash_name{$name}++;
$email_hash_address{$address}++;
}
@@ -638,24 +760,52 @@ sub push_email_address {
}
sub push_email_addresses {
- my ($address) = @_;
+ my ($address, $role) = @_;
my @address_list = ();
if (rfc822_valid($address)) {
- push_email_address($address);
+ push_email_address($address, $role);
} elsif (@address_list = rfc822_validlist($address)) {
my $array_count = shift(@address_list);
while (my $entry = shift(@address_list)) {
- push_email_address($entry);
+ push_email_address($entry, $role);
}
} else {
- if (!push_email_address($address)) {
+ if (!push_email_address($address, $role)) {
warn("Invalid MAINTAINERS address: '" . $address . "'\n");
}
}
}
+sub add_role {
+ my ($line, $role) = @_;
+
+ my ($name, $address) = parse_email($line);
+ my $email = format_email($name, $address, $email_usename);
+
+ foreach my $entry (@email_to) {
+ if ($email_remove_duplicates) {
+ my ($entry_name, $entry_address) = parse_email($entry->[0]);
+ if ($name eq $entry_name || $address eq $entry_address) {
+ if ($entry->[1] eq "") {
+ $entry->[1] = "$role";
+ } else {
+ $entry->[1] = "$entry->[1],$role";
+ }
+ }
+ } else {
+ if ($email eq $entry->[0]) {
+ if ($entry->[1] eq "") {
+ $entry->[1] = "$role";
+ } else {
+ $entry->[1] = "$entry->[1],$role";
+ }
+ }
+ }
+ }
+}
+
sub which {
my ($bin) = @_;
@@ -669,7 +819,7 @@ sub which {
}
sub mailmap {
- my @lines = @_;
+ my (@lines) = @_;
my %hash;
foreach my $line (@lines) {
@@ -678,14 +828,14 @@ sub mailmap {
$hash{$name} = $address;
} elsif ($address ne $hash{$name}) {
$address = $hash{$name};
- $line = format_email($name, $address);
+ $line = format_email($name, $address, $email_usename);
}
if (exists($mailmap{$name})) {
my $obj = $mailmap{$name};
foreach my $map_address (@$obj) {
if (($map_address eq $address) &&
($map_address ne $hash{$name})) {
- $line = format_email($name, $hash{$name});
+ $line = format_email($name, $hash{$name}, $email_usename);
}
}
}
@@ -694,34 +844,38 @@ sub mailmap {
return @lines;
}
-sub recent_git_signoffs {
- my ($file) = @_;
-
- my $sign_offs = "";
- my $cmd = "";
- my $output = "";
- my $count = 0;
+sub git_execute_cmd {
+ my ($cmd) = @_;
my @lines = ();
- my %hash;
- my $total_sign_offs;
- if (which("git") eq "") {
- warn("$P: git not found. Add --nogit to options?\n");
- return;
- }
- if (!(-d ".git")) {
- warn("$P: .git directory not found. Use a git repository for better results.\n");
- warn("$P: perhaps 'git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git'\n");
- return;
- }
+ my $output = `$cmd`;
+ $output =~ s/^\s*//gm;
+ @lines = split("\n", $output);
- $cmd = "git log --since=${email_git_since} -- ${file}";
+ return @lines;
+}
- $output = `${cmd}`;
- $output =~ s/^\s*//gm;
+sub hg_execute_cmd {
+ my ($cmd) = @_;
+ my @lines = ();
+ my $output = `$cmd`;
@lines = split("\n", $output);
+ return @lines;
+}
+
+sub vcs_find_signers {
+ my ($cmd) = @_;
+ my @lines = ();
+ my $commits;
+
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+ my $pattern = $VCS_cmds{"commit_pattern"};
+
+ $commits = grep(/$pattern/, @lines); # of commits
+
@lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines);
if (!$email_git_penguin_chiefs) {
@lines = grep(!/${penguin_chiefs}/i, @lines);
@@ -729,111 +883,183 @@ sub recent_git_signoffs {
# cut -f2- -d":"
s/.*:\s*(.+)\s*/$1/ for (@lines);
- $total_sign_offs = @lines;
+## Reformat email addresses (with names) to avoid badly written signatures
- if ($email_remove_duplicates) {
- @lines = mailmap(@lines);
+ foreach my $line (@lines) {
+ my ($name, $address) = parse_email($line);
+ $line = format_email($name, $address, 1);
}
- @lines = sort(@lines);
-
- # uniq -c
- $hash{$_}++ for @lines;
-
- # sort -rn
- foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
- my $sign_offs = $hash{$line};
- $count++;
- last if ($sign_offs < $email_git_min_signatures ||
- $count > $email_git_max_maintainers ||
- $sign_offs * 100 / $total_sign_offs < $email_git_min_percent);
- push_email_address($line);
- }
+ return ($commits, @lines);
}
-sub save_commits {
- my ($cmd, @commits) = @_;
- my $output;
+sub vcs_save_commits {
+ my ($cmd) = @_;
my @lines = ();
+ my @commits = ();
- $output = `${cmd}`;
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
- @lines = split("\n", $output);
foreach my $line (@lines) {
- if ($line =~ m/^(\w+) /) {
- push (@commits, $1);
+ if ($line =~ m/$VCS_cmds{"blame_commit_pattern"}/) {
+ push(@commits, $1);
}
}
+
return @commits;
}
-sub git_assign_blame {
+sub vcs_blame {
my ($file) = @_;
-
- my @lines = ();
- my @commits = ();
my $cmd;
- my $output;
- my %hash;
- my $total_sign_offs;
- my $count;
+ my @commits = ();
+
+ return @commits if (!(-f $file));
+
+ if (@range && $VCS_cmds{"blame_range_cmd"} eq "") {
+ my @all_commits = ();
+
+ $cmd = $VCS_cmds{"blame_file_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ @all_commits = vcs_save_commits($cmd);
- if (@range) {
foreach my $file_range_diff (@range) {
next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
my $diff_file = $1;
my $diff_start = $2;
my $diff_length = $3;
- next if (!("$file" eq "$diff_file"));
- $cmd = "git blame -l -L $diff_start,+$diff_length $file";
- @commits = save_commits($cmd, @commits);
+ next if ("$file" ne "$diff_file");
+ for (my $i = $diff_start; $i < $diff_start + $diff_length; $i++) {
+ push(@commits, $all_commits[$i]);
+ }
}
- } else {
- if (-f $file) {
- $cmd = "git blame -l $file";
- @commits = save_commits($cmd, @commits);
+ } elsif (@range) {
+ foreach my $file_range_diff (@range) {
+ next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
+ my $diff_file = $1;
+ my $diff_start = $2;
+ my $diff_length = $3;
+ next if ("$file" ne "$diff_file");
+ $cmd = $VCS_cmds{"blame_range_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ push(@commits, vcs_save_commits($cmd));
}
+ } else {
+ $cmd = $VCS_cmds{"blame_file_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ @commits = vcs_save_commits($cmd);
}
- $total_sign_offs = 0;
- @commits = uniq(@commits);
- foreach my $commit (@commits) {
- $cmd = "git log -1 ${commit}";
+ return @commits;
+}
- $output = `${cmd}`;
- $output =~ s/^\s*//gm;
- @lines = split("\n", $output);
+my $printed_novcs = 0;
+sub vcs_exists {
+ %VCS_cmds = %VCS_cmds_git;
+ return 1 if eval $VCS_cmds{"available"};
+ %VCS_cmds = %VCS_cmds_hg;
+ return 1 if eval $VCS_cmds{"available"};
+ %VCS_cmds = ();
+ if (!$printed_novcs) {
+ warn("$P: No supported VCS found. Add --nogit to options?\n");
+ warn("Using a git repository produces better results.\n");
+ warn("Try Linus Torvalds' latest git repository using:\n");
+ warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git\n");
+ $printed_novcs = 1;
+ }
+ return 0;
+}
- @lines = grep(/^[-_ a-z]+by:.*\@.*$/i, @lines);
- if (!$email_git_penguin_chiefs) {
- @lines = grep(!/${penguin_chiefs}/i, @lines);
- }
+sub vcs_assign {
+ my ($role, $divisor, @lines) = @_;
- # cut -f2- -d":"
- s/.*:\s*(.+)\s*/$1/ for (@lines);
+ my %hash;
+ my $count = 0;
- $total_sign_offs += @lines;
+ return if (@lines <= 0);
- if ($email_remove_duplicates) {
- @lines = mailmap(@lines);
- }
+ if ($divisor <= 0) {
+ warn("Bad divisor in " . (caller(0))[3] . ": $divisor\n");
+ $divisor = 1;
+ }
- $hash{$_}++ for @lines;
+ if ($email_remove_duplicates) {
+ @lines = mailmap(@lines);
}
- $count = 0;
+ @lines = sort(@lines);
+
+ # uniq -c
+ $hash{$_}++ for @lines;
+
+ # sort -rn
foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
my $sign_offs = $hash{$line};
+ my $percent = $sign_offs * 100 / $divisor;
+
+ $percent = 100 if ($percent > 100);
$count++;
last if ($sign_offs < $email_git_min_signatures ||
$count > $email_git_max_maintainers ||
- $sign_offs * 100 / $total_sign_offs < $email_git_min_percent);
- push_email_address($line);
+ $percent < $email_git_min_percent);
+ push_email_address($line, '');
+ if ($output_rolestats) {
+ my $fmt_percent = sprintf("%.0f", $percent);
+ add_role($line, "$role:$sign_offs/$divisor=$fmt_percent%");
+ } else {
+ add_role($line, $role);
+ }
+ }
+}
+
+sub vcs_file_signoffs {
+ my ($file) = @_;
+
+ my @signers = ();
+ my $commits;
+
+ return if (!vcs_exists());
+
+ my $cmd = $VCS_cmds{"find_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
+
+ ($commits, @signers) = vcs_find_signers($cmd);
+ vcs_assign("commit_signer", $commits, @signers);
+}
+
+sub vcs_file_blame {
+ my ($file) = @_;
+
+ my @signers = ();
+ my @commits = ();
+ my $total_commits;
+
+ return if (!vcs_exists());
+
+ @commits = vcs_blame($file);
+ @commits = uniq(@commits);
+ $total_commits = @commits;
+
+ foreach my $commit (@commits) {
+ my $commit_count;
+ my @commit_signers = ();
+
+ my $cmd = $VCS_cmds{"find_commit_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+
+ ($commit_count, @commit_signers) = vcs_find_signers($cmd);
+ push(@signers, @commit_signers);
+ }
+
+ if ($from_filename) {
+ vcs_assign("commits", $total_commits, @signers);
+ } else {
+ vcs_assign("modified commits", $total_commits, @signers);
}
}
sub uniq {
- my @parms = @_;
+ my (@parms) = @_;
my %saw;
@parms = grep(!$saw{$_}++, @parms);
@@ -841,7 +1067,7 @@ sub uniq {
}
sub sort_and_uniq {
- my @parms = @_;
+ my (@parms) = @_;
my %saw;
@parms = sort @parms;
@@ -849,8 +1075,27 @@ sub sort_and_uniq {
return @parms;
}
+sub merge_email {
+ my @lines;
+ my %saw;
+
+ for (@_) {
+ my ($address, $role) = @$_;
+ if (!$saw{$address}) {
+ if ($output_roles) {
+ push(@lines, "$address ($role)");
+ } else {
+ push(@lines, $address);
+ }
+ $saw{$address} = 1;
+ }
+ }
+
+ return @lines;
+}
+
sub output {
- my @parms = @_;
+ my (@parms) = @_;
if ($output_multiline) {
foreach my $line (@parms) {
@@ -947,11 +1192,9 @@ sub rfc822_validlist ($) {
if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so &&
$s =~ m/^$rfc822_char*$/) {
while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) {
- push @r, $1;
+ push(@r, $1);
}
return wantarray ? (scalar(@r), @r) : 1;
}
- else {
- return wantarray ? () : 0;
- }
+ return wantarray ? () : 0;
}
diff --git a/scripts/headers.sh b/scripts/headers.sh
index 0308ecc10d5..1ddcdd38d97 100755
--- a/scripts/headers.sh
+++ b/scripts/headers.sh
@@ -8,8 +8,6 @@ do_command()
{
if [ -f ${srctree}/arch/$2/include/asm/Kbuild ]; then
make ARCH=$2 KBUILD_HEADERS=$1 headers_$1
- elif [ -f ${srctree}/include/asm-$2/Kbuild ]; then
- make ARCH=$2 KBUILD_HEADERS=$1 headers_$1
else
printf "Ignoring arch: %s\n" ${arch}
fi
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 80599e3a799..999e8a7d5bf 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -27,6 +27,7 @@ oldconfig: $(obj)/conf
$< -o $(Kconfig)
silentoldconfig: $(obj)/conf
+ $(Q)mkdir -p include/generated
$< -s $(Kconfig)
localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index b55e72ff2fc..c4dec80cfd8 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -677,7 +677,7 @@ int conf_write_autoconf(void)
struct symbol *sym;
const char *str;
const char *name;
- FILE *out, *out_h;
+ FILE *out, *tristate, *out_h;
time_t now;
int i, l;
@@ -692,9 +692,16 @@ int conf_write_autoconf(void)
if (!out)
return 1;
+ tristate = fopen(".tmpconfig_tristate", "w");
+ if (!tristate) {
+ fclose(out);
+ return 1;
+ }
+
out_h = fopen(".tmpconfig.h", "w");
if (!out_h) {
fclose(out);
+ fclose(tristate);
return 1;
}
@@ -707,6 +714,9 @@ int conf_write_autoconf(void)
"# %s"
"#\n",
sym_get_string_value(sym), ctime(&now));
+ fprintf(tristate, "#\n"
+ "# Automatically generated - do not edit\n"
+ "\n");
fprintf(out_h, "/*\n"
" * Automatically generated C config: don't edit\n"
" * Linux kernel version: %s\n"
@@ -727,10 +737,14 @@ int conf_write_autoconf(void)
break;
case mod:
fprintf(out, "CONFIG_%s=m\n", sym->name);
+ fprintf(tristate, "CONFIG_%s=M\n", sym->name);
fprintf(out_h, "#define CONFIG_%s_MODULE 1\n", sym->name);
break;
case yes:
fprintf(out, "CONFIG_%s=y\n", sym->name);
+ if (sym->type == S_TRISTATE)
+ fprintf(tristate, "CONFIG_%s=Y\n",
+ sym->name);
fprintf(out_h, "#define CONFIG_%s 1\n", sym->name);
break;
}
@@ -772,13 +786,19 @@ int conf_write_autoconf(void)
}
}
fclose(out);
+ fclose(tristate);
fclose(out_h);
name = getenv("KCONFIG_AUTOHEADER");
if (!name)
- name = "include/linux/autoconf.h";
+ name = "include/generated/autoconf.h";
if (rename(".tmpconfig.h", name))
return 1;
+ name = getenv("KCONFIG_TRISTATE");
+ if (!name)
+ name = "include/config/tristate.conf";
+ if (rename(".tmpconfig_tristate", name))
+ return 1;
name = conf_get_autoconfig_name();
/*
* This must be the last step, kbuild has a dependency on auto.conf
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index bce3d0fe6fb..23dbad80cce 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -14,7 +14,7 @@ vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
# So "sudo make install" won't change the "compiled by <user>"
# do "compiled by root"
-if [ -r $TARGET -a ! -O include/linux/autoconf.h ]; then
+if [ -r $TARGET -a ! -O include/generated/autoconf.h ]; then
vecho " SKIPPED $TARGET"
exit 0
fi
diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
index 11d69c35e5b..ff954f8168c 100644
--- a/scripts/mod/Makefile
+++ b/scripts/mod/Makefile
@@ -8,7 +8,7 @@ modpost-objs := modpost.o file2alias.o sumversion.o
$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h
quiet_cmd_elfconfig = MKELF $@
- cmd_elfconfig = $(obj)/mk_elfconfig $(ARCH) < $< > $@
+ cmd_elfconfig = $(obj)/mk_elfconfig < $< > $@
$(obj)/elfconfig.h: $(obj)/empty.o $(obj)/mk_elfconfig FORCE
$(call if_changed,elfconfig)
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 62a9025cdcc..6f426afbc52 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -104,7 +104,7 @@ static void device_id_check(const char *modname, const char *device_id,
static void do_usb_entry(struct usb_device_id *id,
unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
unsigned char range_lo, unsigned char range_hi,
- struct module *mod)
+ unsigned char max, struct module *mod)
{
char alias[500];
strcpy(alias, "usb:");
@@ -118,9 +118,22 @@ static void do_usb_entry(struct usb_device_id *id,
sprintf(alias + strlen(alias), "%0*X",
bcdDevice_initial_digits, bcdDevice_initial);
if (range_lo == range_hi)
- sprintf(alias + strlen(alias), "%u", range_lo);
- else if (range_lo > 0 || range_hi < 9)
- sprintf(alias + strlen(alias), "[%u-%u]", range_lo, range_hi);
+ sprintf(alias + strlen(alias), "%X", range_lo);
+ else if (range_lo > 0 || range_hi < max) {
+ if (range_lo > 0x9 || range_hi < 0xA)
+ sprintf(alias + strlen(alias),
+ "[%X-%X]",
+ range_lo,
+ range_hi);
+ else {
+ sprintf(alias + strlen(alias),
+ range_lo < 0x9 ? "[%X-9" : "[%X",
+ range_lo);
+ sprintf(alias + strlen(alias),
+ range_hi > 0xA ? "a-%X]" : "%X]",
+ range_lo);
+ }
+ }
if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1))
strcat(alias, "*");
@@ -147,10 +160,49 @@ static void do_usb_entry(struct usb_device_id *id,
"MODULE_ALIAS(\"%s\");\n", alias);
}
+/* Handles increment/decrement of BCD formatted integers */
+/* Returns the previous value, so it works like i++ or i-- */
+static unsigned int incbcd(unsigned int *bcd,
+ int inc,
+ unsigned char max,
+ size_t chars)
+{
+ unsigned int init = *bcd, i, j;
+ unsigned long long c, dec = 0;
+
+ /* If bcd is not in BCD format, just increment */
+ if (max > 0x9) {
+ *bcd += inc;
+ return init;
+ }
+
+ /* Convert BCD to Decimal */
+ for (i=0 ; i < chars ; i++) {
+ c = (*bcd >> (i << 2)) & 0xf;
+ c = c > 9 ? 9 : c; /* force to bcd just in case */
+ for (j=0 ; j < i ; j++)
+ c = c * 10;
+ dec += c;
+ }
+
+ /* Do our increment/decrement */
+ dec += inc;
+ *bcd = 0;
+
+ /* Convert back to BCD */
+ for (i=0 ; i < chars ; i++) {
+ for (c=1,j=0 ; j < i ; j++)
+ c = c * 10;
+ c = (dec / c) % 10;
+ *bcd += c << (i << 2);
+ }
+ return init;
+}
+
static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
{
unsigned int devlo, devhi;
- unsigned char chi, clo;
+ unsigned char chi, clo, max;
int ndigits;
id->match_flags = TO_NATIVE(id->match_flags);
@@ -162,6 +214,17 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ?
TO_NATIVE(id->bcdDevice_hi) : ~0x0U;
+ /* Figure out if this entry is in bcd or hex format */
+ max = 0x9; /* Default to decimal format */
+ for (ndigits = 0 ; ndigits < sizeof(id->bcdDevice_lo) * 2 ; ndigits++) {
+ clo = (devlo >> (ndigits << 2)) & 0xf;
+ chi = ((devhi > 0x9999 ? 0x9999 : devhi) >> (ndigits << 2)) & 0xf;
+ if (clo > max || chi > max) {
+ max = 0xf;
+ break;
+ }
+ }
+
/*
* Some modules (visor) have empty slots as placeholder for
* run-time specification that results in catch-all alias
@@ -173,21 +236,27 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) {
clo = devlo & 0xf;
chi = devhi & 0xf;
- if (chi > 9) /* it's bcd not hex */
- chi = 9;
+ if (chi > max) /* If we are in bcd mode, truncate if necessary */
+ chi = max;
devlo >>= 4;
devhi >>= 4;
if (devlo == devhi || !ndigits) {
- do_usb_entry(id, devlo, ndigits, clo, chi, mod);
+ do_usb_entry(id, devlo, ndigits, clo, chi, max, mod);
break;
}
- if (clo > 0)
- do_usb_entry(id, devlo++, ndigits, clo, 9, mod);
-
- if (chi < 9)
- do_usb_entry(id, devhi--, ndigits, 0, chi, mod);
+ if (clo > 0x0)
+ do_usb_entry(id,
+ incbcd(&devlo, 1, max,
+ sizeof(id->bcdDevice_lo) * 2),
+ ndigits, clo, max, max, mod);
+
+ if (chi < max)
+ do_usb_entry(id,
+ incbcd(&devhi, -1, max,
+ sizeof(id->bcdDevice_lo) * 2),
+ ndigits, 0x0, chi, max, mod);
}
}
diff --git a/scripts/mod/mk_elfconfig.c b/scripts/mod/mk_elfconfig.c
index 6a96d47bd1e..639bca7ba55 100644
--- a/scripts/mod/mk_elfconfig.c
+++ b/scripts/mod/mk_elfconfig.c
@@ -9,9 +9,6 @@ main(int argc, char **argv)
unsigned char ei[EI_NIDENT];
union { short s; char c[2]; } endian_test;
- if (argc != 2) {
- fprintf(stderr, "Error: no arch\n");
- }
if (fread(ei, 1, EI_NIDENT, stdin) != EI_NIDENT) {
fprintf(stderr, "Error: input truncated\n");
return 1;
@@ -55,12 +52,6 @@ main(int argc, char **argv)
else
exit(1);
- if ((strcmp(argv[1], "h8300") == 0)
- || (strcmp(argv[1], "blackfin") == 0))
- printf("#define MODULE_SYMBOL_PREFIX \"_\"\n");
- else
- printf("#define MODULE_SYMBOL_PREFIX \"\"\n");
-
return 0;
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 801a16a1754..20923613467 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -15,8 +15,17 @@
#include <stdio.h>
#include <ctype.h>
#include "modpost.h"
+#include "../../include/generated/autoconf.h"
#include "../../include/linux/license.h"
+/* Some toolchains use a `_' prefix for all user symbols. */
+#ifdef CONFIG_SYMBOL_PREFIX
+#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX
+#else
+#define MODULE_SYMBOL_PREFIX ""
+#endif
+
+
/* Are we using CONFIG_MODVERSIONS? */
int modversions = 0;
/* Warn about undefined symbols? (do so if we have vmlinux) */
@@ -451,8 +460,6 @@ static int parse_elf(struct elf_info *info, const char *filename)
info->export_unused_gpl_sec = i;
else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
info->export_gpl_future_sec = i;
- else if (strcmp(secname, "__markers_strings") == 0)
- info->markers_strings_sec = i;
if (sechdrs[i].sh_type != SHT_SYMTAB)
continue;
@@ -515,7 +522,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
break;
case SHN_ABS:
/* CRC'd symbol */
- if (memcmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
+ if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
crc = (unsigned int) sym->st_value;
sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
export);
@@ -559,7 +566,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
break;
default:
/* All exported symbols */
- if (memcmp(symname, KSYMTAB_PFX, strlen(KSYMTAB_PFX)) == 0) {
+ if (strncmp(symname, KSYMTAB_PFX, strlen(KSYMTAB_PFX)) == 0) {
sym_add_exported(symname + strlen(KSYMTAB_PFX), mod,
export);
}
@@ -1509,62 +1516,6 @@ static void check_sec_ref(struct module *mod, const char *modname,
}
}
-static void get_markers(struct elf_info *info, struct module *mod)
-{
- const Elf_Shdr *sh = &info->sechdrs[info->markers_strings_sec];
- const char *strings = (const char *) info->hdr + sh->sh_offset;
- const Elf_Sym *sym, *first_sym, *last_sym;
- size_t n;
-
- if (!info->markers_strings_sec)
- return;
-
- /*
- * First count the strings. We look for all the symbols defined
- * in the __markers_strings section named __mstrtab_*. For
- * these local names, the compiler puts a random .NNN suffix on,
- * so the names don't correspond exactly.
- */
- first_sym = last_sym = NULL;
- n = 0;
- for (sym = info->symtab_start; sym < info->symtab_stop; sym++)
- if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT &&
- sym->st_shndx == info->markers_strings_sec &&
- !strncmp(info->strtab + sym->st_name,
- "__mstrtab_", sizeof "__mstrtab_" - 1)) {
- if (first_sym == NULL)
- first_sym = sym;
- last_sym = sym;
- ++n;
- }
-
- if (n == 0)
- return;
-
- /*
- * Now collect each name and format into a line for the output.
- * Lines look like:
- * marker_name vmlinux marker %s format %d
- * The format string after the second \t can use whitespace.
- */
- mod->markers = NOFAIL(malloc(sizeof mod->markers[0] * n));
- mod->nmarkers = n;
-
- n = 0;
- for (sym = first_sym; sym <= last_sym; sym++)
- if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT &&
- sym->st_shndx == info->markers_strings_sec &&
- !strncmp(info->strtab + sym->st_name,
- "__mstrtab_", sizeof "__mstrtab_" - 1)) {
- const char *name = strings + sym->st_value;
- const char *fmt = strchr(name, '\0') + 1;
- char *line = NULL;
- asprintf(&line, "%s\t%s\t%s\n", name, mod->name, fmt);
- NOFAIL(line);
- mod->markers[n++] = line;
- }
-}
-
static void read_symbols(char *modname)
{
const char *symname;
@@ -1620,8 +1571,6 @@ static void read_symbols(char *modname)
get_src_version(modname, mod->srcversion,
sizeof(mod->srcversion)-1);
- get_markers(&info, mod);
-
parse_elf_finish(&info);
/* Our trick to get versioning for module struct etc. - it's
@@ -1976,96 +1925,6 @@ static void write_dump(const char *fname)
write_if_changed(&buf, fname);
}
-static void add_marker(struct module *mod, const char *name, const char *fmt)
-{
- char *line = NULL;
- asprintf(&line, "%s\t%s\t%s\n", name, mod->name, fmt);
- NOFAIL(line);
-
- mod->markers = NOFAIL(realloc(mod->markers, ((mod->nmarkers + 1) *
- sizeof mod->markers[0])));
- mod->markers[mod->nmarkers++] = line;
-}
-
-static void read_markers(const char *fname)
-{
- unsigned long size, pos = 0;
- void *file = grab_file(fname, &size);
- char *line;
-
- if (!file) /* No old markers, silently ignore */
- return;
-
- while ((line = get_next_line(&pos, file, size))) {
- char *marker, *modname, *fmt;
- struct module *mod;
-
- marker = line;
- modname = strchr(marker, '\t');
- if (!modname)
- goto fail;
- *modname++ = '\0';
- fmt = strchr(modname, '\t');
- if (!fmt)
- goto fail;
- *fmt++ = '\0';
- if (*marker == '\0' || *modname == '\0')
- goto fail;
-
- mod = find_module(modname);
- if (!mod) {
- mod = new_module(modname);
- mod->skip = 1;
- }
- if (is_vmlinux(modname)) {
- have_vmlinux = 1;
- mod->skip = 0;
- }
-
- if (!mod->skip)
- add_marker(mod, marker, fmt);
- }
- release_file(file, size);
- return;
-fail:
- fatal("parse error in markers list file\n");
-}
-
-static int compare_strings(const void *a, const void *b)
-{
- return strcmp(*(const char **) a, *(const char **) b);
-}
-
-static void write_markers(const char *fname)
-{
- struct buffer buf = { };
- struct module *mod;
- size_t i;
-
- for (mod = modules; mod; mod = mod->next)
- if ((!external_module || !mod->skip) && mod->markers != NULL) {
- /*
- * Sort the strings so we can skip duplicates when
- * we write them out.
- */
- qsort(mod->markers, mod->nmarkers,
- sizeof mod->markers[0], &compare_strings);
- for (i = 0; i < mod->nmarkers; ++i) {
- char *line = mod->markers[i];
- buf_write(&buf, line, strlen(line));
- while (i + 1 < mod->nmarkers &&
- !strcmp(mod->markers[i],
- mod->markers[i + 1]))
- free(mod->markers[i++]);
- free(mod->markers[i]);
- }
- free(mod->markers);
- mod->markers = NULL;
- }
-
- write_if_changed(&buf, fname);
-}
-
struct ext_sym_list {
struct ext_sym_list *next;
const char *file;
@@ -2077,8 +1936,6 @@ int main(int argc, char **argv)
struct buffer buf = { };
char *kernel_read = NULL, *module_read = NULL;
char *dump_write = NULL;
- char *markers_read = NULL;
- char *markers_write = NULL;
int opt;
int err;
struct ext_sym_list *extsym_iter;
@@ -2122,12 +1979,6 @@ int main(int argc, char **argv)
case 'w':
warn_unresolved = 1;
break;
- case 'M':
- markers_write = optarg;
- break;
- case 'K':
- markers_read = optarg;
- break;
default:
exit(1);
}
@@ -2182,11 +2033,5 @@ int main(int argc, char **argv)
"'make CONFIG_DEBUG_SECTION_MISMATCH=y'\n",
sec_mismatch_count);
- if (markers_read)
- read_markers(markers_read);
-
- if (markers_write)
- write_markers(markers_write);
-
return err;
}
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 09f58e33d22..be987a44f25 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -112,8 +112,6 @@ struct module {
int has_init;
int has_cleanup;
struct buffer dev_table_buf;
- char **markers;
- size_t nmarkers;
char srcversion[25];
};
@@ -128,7 +126,6 @@ struct elf_info {
Elf_Section export_gpl_sec;
Elf_Section export_unused_gpl_sec;
Elf_Section export_gpl_future_sec;
- Elf_Section markers_strings_sec;
const char *strtab;
char *modinfo;
unsigned int modinfo_len;
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index f67cc885c80..62fcc3a7f4d 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -77,9 +77,27 @@ clean-files += $(objtree)/binkernel.spec
# Deb target
# ---------------------------------------------------------------------------
+quiet_cmd_builddeb = BUILDDEB
+ cmd_builddeb = set -e; \
+ test `id -u` = 0 || \
+ test -n "$(KBUILD_PKG_ROOTCMD)" || { \
+ which fakeroot >/dev/null 2>&1 && \
+ KBUILD_PKG_ROOTCMD="fakeroot -u"; \
+ } || { \
+ echo; \
+ echo "builddeb must be run as root (or using fakeroot)."; \
+ echo "KBUILD_PKG_ROOTCMD is unset and fakeroot not found."; \
+ echo "Try setting KBUILD_PKG_ROOTCMD to a command to acquire"; \
+ echo "root privileges (e.g., 'fakeroot -u' or 'sudo')."; \
+ false; \
+ } && \
+ \
+ $$KBUILD_PKG_ROOTCMD $(CONFIG_SHELL) \
+ $(srctree)/scripts/package/builddeb
+
deb-pkg: FORCE
$(MAKE) KBUILD_SRC=
- $(CONFIG_SHELL) $(srctree)/scripts/package/builddeb
+ $(call cmd,builddeb)
clean-dirs += $(objtree)/debian/
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index b1fd48db164..51b2aa0acb8 100644
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -101,7 +101,11 @@ esac
#
(
cd "${tmpdir}"
- tar cf - . | ${compress} > "${tarball}${file_ext}"
+ opts=
+ if tar --owner=root --group=root --help >/dev/null 2>&1; then
+ opts="--owner=root --group=root"
+ fi
+ tar cf - . $opts | ${compress} > "${tarball}${file_ext}"
)
echo "Tarball successfully created in ${tarball}${file_ext}"
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index f0d14452632..92f09fe9639 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -113,13 +113,13 @@ $P =~ s@.*/@@g;
my $V = '0.1';
-if ($#ARGV != 10) {
- print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
+if ($#ARGV != 11) {
+ print "usage: $P arch endian bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
print "version: $V\n";
exit(1);
}
-my ($arch, $bits, $objdump, $objcopy, $cc,
+my ($arch, $endian, $bits, $objdump, $objcopy, $cc,
$ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
# This file refers to mcount and shouldn't be ftraced, so lets' ignore it
@@ -295,6 +295,61 @@ if ($arch eq "x86_64") {
$ld .= " -m elf64_sparc";
$cc .= " -m64";
$objcopy .= " -O elf64-sparc";
+} elsif ($arch eq "mips") {
+ # To enable module support, we need to enable the -mlong-calls option
+ # of gcc for module, after using this option, we can not get the real
+ # offset of the calling to _mcount, but the offset of the lui
+ # instruction or the addiu one. herein, we record the address of the
+ # first one, and then we can replace this instruction by a branch
+ # instruction to jump over the profiling function to filter the
+ # indicated functions, or swith back to the lui instruction to trace
+ # them, which means dynamic tracing.
+ #
+ # c: 3c030000 lui v1,0x0
+ # c: R_MIPS_HI16 _mcount
+ # c: R_MIPS_NONE *ABS*
+ # c: R_MIPS_NONE *ABS*
+ # 10: 64630000 daddiu v1,v1,0
+ # 10: R_MIPS_LO16 _mcount
+ # 10: R_MIPS_NONE *ABS*
+ # 10: R_MIPS_NONE *ABS*
+ # 14: 03e0082d move at,ra
+ # 18: 0060f809 jalr v1
+ #
+ # for the kernel:
+ #
+ # 10: 03e0082d move at,ra
+ # 14: 0c000000 jal 0 <loongson_halt>
+ # 14: R_MIPS_26 _mcount
+ # 14: R_MIPS_NONE *ABS*
+ # 14: R_MIPS_NONE *ABS*
+ # 18: 00020021 nop
+ if ($is_module eq "0") {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
+ } else {
+ $mcount_regex = "^\\s*([0-9a-fA-F]+): R_MIPS_HI16\\s+_mcount\$";
+ }
+ $objdump .= " -Melf-trad".$endian."mips ";
+
+ if ($endian eq "big") {
+ $endian = " -EB ";
+ $ld .= " -melf".$bits."btsmip";
+ } else {
+ $endian = " -EL ";
+ $ld .= " -melf".$bits."ltsmip";
+ }
+
+ $cc .= " -mno-abicalls -fno-pic -mabi=" . $bits . $endian;
+ $ld .= $endian;
+
+ if ($bits == 64) {
+ $function_regex =
+ "^([0-9a-fA-F]+)\\s+<(.|[^\$]L.*?|\$[^L].*?|[^\$][^L].*?)>:";
+ $type = ".dword";
+ }
+} elsif ($arch eq "microblaze") {
+ # Microblaze calls '_mcount' instead of plain 'mcount'.
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
} else {
die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
}
diff --git a/scripts/tags.sh b/scripts/tags.sh
index d52f7a01557..1a0c44d7c4a 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -89,7 +89,13 @@ all_defconfigs()
docscope()
{
- (echo \-k; echo \-q; all_sources) > cscope.files
+ # always use absolute paths for cscope, as recommended by cscope
+ # upstream
+ case "$tree" in
+ /*) ;;
+ *) tree=$PWD/$tree ;;
+ esac
+ (cd /; echo \-k; echo \-q; all_sources) > cscope.files
cscope -b -f cscope.out
}
diff --git a/scripts/unifdef.c b/scripts/unifdef.c
index 30d459fb070..44d39785e50 100644
--- a/scripts/unifdef.c
+++ b/scripts/unifdef.c
@@ -1,13 +1,5 @@
/*
- * Copyright (c) 2002 - 2005 Tony Finch <dot@dotat.at>. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by Dave Yost.
- * It was rewritten to support ANSI C by Tony Finch. The original version of
- * unifdef carried the following copyright notice. None of its code remains
- * in this version (though some of the names remain).
- *
- * Copyright (c) 1985, 1993
- * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 2002 - 2009 Tony Finch <dot@dotat.at>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,23 +23,20 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
+/*
+ * This code was derived from software contributed to Berkeley by Dave Yost.
+ * It was rewritten to support ANSI C by Tony Finch. The original version
+ * of unifdef carried the 4-clause BSD copyright licence. None of its code
+ * remains in this version (though some of the names remain) so it now
+ * carries a more liberal licence.
+ *
+ * The latest version is available from http://dotat.at/prog/unifdef
+ */
-#ifndef lint
-#if 0
-static const char copyright[] =
-"@(#) Copyright (c) 1985, 1993\n\
- The Regents of the University of California. All rights reserved.\n";
-#endif
-#ifdef __IDSTRING
-__IDSTRING(Berkeley, "@(#)unifdef.c 8.1 (Berkeley) 6/6/93");
-__IDSTRING(NetBSD, "$NetBSD: unifdef.c,v 1.8 2000/07/03 02:51:36 matt Exp $");
-__IDSTRING(dotat, "$dotat: things/unifdef.c,v 1.171 2005/03/08 12:38:48 fanf2 Exp $");
-#endif
-#endif /* not lint */
-#ifdef __FBSDID
-__FBSDID("$FreeBSD: /repoman/r/ncvs/src/usr.bin/unifdef/unifdef.c,v 1.20 2005/05/21 09:55:09 ru Exp $");
-#endif
+static const char * const copyright[] = {
+ "@(#) Copyright (c) 2002 - 2009 Tony Finch <dot@dotat.at>\n",
+ "$dotat: unifdef/unifdef.c,v 1.190 2009/11/27 17:21:26 fanf2 Exp $",
+};
/*
* unifdef - remove ifdef'ed lines
@@ -72,8 +61,6 @@ __FBSDID("$FreeBSD: /repoman/r/ncvs/src/usr.bin/unifdef/unifdef.c,v 1.20 2005/05
#include <string.h>
#include <unistd.h>
-size_t strlcpy(char *dst, const char *src, size_t siz);
-
/* types of input lines: */
typedef enum {
LT_TRUEI, /* a true #if with ignore flag */
@@ -90,6 +77,7 @@ typedef enum {
LT_DODGY_LAST = LT_DODGY + LT_ENDIF,
LT_PLAIN, /* ordinary line */
LT_EOF, /* end of file */
+ LT_ERROR, /* unevaluable #if */
LT_COUNT
} Linetype;
@@ -100,7 +88,7 @@ static char const * const linetype_name[] = {
"DODGY IF", "DODGY TRUE", "DODGY FALSE",
"DODGY ELIF", "DODGY ELTRUE", "DODGY ELFALSE",
"DODGY ELSE", "DODGY ENDIF",
- "PLAIN", "EOF"
+ "PLAIN", "EOF", "ERROR"
};
/* state of #if processing */
@@ -168,11 +156,13 @@ static char const * const linestate_name[] = {
* Globals.
*/
+static bool compblank; /* -B: compress blank lines */
+static bool lnblank; /* -b: blank deleted lines */
static bool complement; /* -c: do the complement */
static bool debugging; /* -d: debugging reports */
static bool iocccok; /* -e: fewer IOCCC errors */
+static bool strictlogic; /* -K: keep ambiguous #ifs */
static bool killconsts; /* -k: eval constant #ifs */
-static bool lnblank; /* -l: blank deleted lines */
static bool lnnum; /* -n: add #line directives */
static bool symlist; /* -s: output symbol list */
static bool text; /* -t: this is a text file */
@@ -196,7 +186,9 @@ static bool ignoring[MAXDEPTH]; /* ignore comments state */
static int stifline[MAXDEPTH]; /* start of current #if */
static int depth; /* current #if nesting */
static int delcount; /* count of deleted lines */
-static bool keepthis; /* don't delete constant #if */
+static unsigned blankcount; /* count of blank lines */
+static unsigned blankmax; /* maximum recent blankcount */
+static bool constexpr; /* constant #if expression */
static int exitstat; /* program exit status */
@@ -206,13 +198,14 @@ static void done(void);
static void error(const char *);
static int findsym(const char *);
static void flushline(bool);
-static Linetype get_line(void);
+static Linetype parseline(void);
static Linetype ifeval(const char **);
static void ignoreoff(void);
static void ignoreon(void);
static void keywordedit(const char *);
static void nest(void);
static void process(void);
+static const char *skipargs(const char *);
static const char *skipcomment(const char *);
static const char *skipsym(const char *);
static void state(Ifstate);
@@ -220,7 +213,7 @@ static int strlcmp(const char *, const char *, size_t);
static void unnest(void);
static void usage(void);
-#define endsym(c) (!isalpha((unsigned char)c) && !isdigit((unsigned char)c) && c != '_')
+#define endsym(c) (!isalnum((unsigned char)c) && c != '_')
/*
* The main program.
@@ -230,7 +223,7 @@ main(int argc, char *argv[])
{
int opt;
- while ((opt = getopt(argc, argv, "i:D:U:I:cdeklnst")) != -1)
+ while ((opt = getopt(argc, argv, "i:D:U:I:BbcdeKklnst")) != -1)
switch (opt) {
case 'i': /* treat stuff controlled by these symbols as text */
/*
@@ -255,6 +248,13 @@ main(int argc, char *argv[])
case 'I':
/* no-op for compatibility with cpp */
break;
+ case 'B': /* compress blank lines around removed section */
+ compblank = true;
+ break;
+ case 'b': /* blank deleted lines instead of omitting them */
+ case 'l': /* backwards compatibility */
+ lnblank = true;
+ break;
case 'c': /* treat -D as -U and vice versa */
complement = true;
break;
@@ -264,12 +264,12 @@ main(int argc, char *argv[])
case 'e': /* fewer errors from dodgy lines */
iocccok = true;
break;
+ case 'K': /* keep ambiguous #ifs */
+ strictlogic = true;
+ break;
case 'k': /* process constant #ifs */
killconsts = true;
break;
- case 'l': /* blank deleted lines instead of omitting them */
- lnblank = true;
- break;
case 'n': /* add #line directive after deleted lines */
lnnum = true;
break;
@@ -284,6 +284,8 @@ main(int argc, char *argv[])
}
argc -= optind;
argv += optind;
+ if (compblank && lnblank)
+ errx(2, "-B and -b are mutually exclusive");
if (argc > 1) {
errx(2, "can only do one file");
} else if (argc == 1 && strcmp(*argv, "-") != 0) {
@@ -302,7 +304,7 @@ main(int argc, char *argv[])
static void
usage(void)
{
- fprintf(stderr, "usage: unifdef [-cdeklnst] [-Ipath]"
+ fprintf(stderr, "usage: unifdef [-BbcdeKknst] [-Ipath]"
" [-Dsym[=val]] [-Usym] [-iDsym[=val]] [-iUsym] ... [file]\n");
exit(2);
}
@@ -383,46 +385,46 @@ static state_fn * const trans_table[IS_COUNT][LT_COUNT] = {
/* IS_OUTSIDE */
{ Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Eendif,
Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Eendif,
- print, done },
+ print, done, abort },
/* IS_FALSE_PREFIX */
{ Idrop, Idrop, Fdrop, Fdrop, Fdrop, Mpass, Strue, Sfalse,Selse, Dendif,
Idrop, Idrop, Fdrop, Fdrop, Fdrop, Mpass, Eioccc,Eioccc,Eioccc,Eioccc,
- drop, Eeof },
+ drop, Eeof, abort },
/* IS_TRUE_PREFIX */
{ Itrue, Ifalse,Fpass, Ftrue, Ffalse,Dfalse,Dfalse,Dfalse,Delse, Dendif,
Oiffy, Oiffy, Fpass, Oif, Oif, Eioccc,Eioccc,Eioccc,Eioccc,Eioccc,
- print, Eeof },
+ print, Eeof, abort },
/* IS_PASS_MIDDLE */
{ Itrue, Ifalse,Fpass, Ftrue, Ffalse,Pelif, Mtrue, Delif, Pelse, Pendif,
Oiffy, Oiffy, Fpass, Oif, Oif, Pelif, Oelif, Oelif, Pelse, Pendif,
- print, Eeof },
+ print, Eeof, abort },
/* IS_FALSE_MIDDLE */
{ Idrop, Idrop, Fdrop, Fdrop, Fdrop, Pelif, Mtrue, Delif, Pelse, Pendif,
Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eioccc,Eioccc,Eioccc,Eioccc,Eioccc,
- drop, Eeof },
+ drop, Eeof, abort },
/* IS_TRUE_MIDDLE */
{ Itrue, Ifalse,Fpass, Ftrue, Ffalse,Melif, Melif, Melif, Melse, Pendif,
Oiffy, Oiffy, Fpass, Oif, Oif, Eioccc,Eioccc,Eioccc,Eioccc,Pendif,
- print, Eeof },
+ print, Eeof, abort },
/* IS_PASS_ELSE */
{ Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Pendif,
Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Pendif,
- print, Eeof },
+ print, Eeof, abort },
/* IS_FALSE_ELSE */
{ Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eelif, Eelif, Eelif, Eelse, Dendif,
Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eelif, Eelif, Eelif, Eelse, Eioccc,
- drop, Eeof },
+ drop, Eeof, abort },
/* IS_TRUE_ELSE */
{ Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Dendif,
Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Eioccc,
- print, Eeof },
+ print, Eeof, abort },
/* IS_FALSE_TRAILER */
{ Idrop, Idrop, Fdrop, Fdrop, Fdrop, Dfalse,Dfalse,Dfalse,Delse, Dendif,
Idrop, Idrop, Fdrop, Fdrop, Fdrop, Dfalse,Dfalse,Dfalse,Delse, Eioccc,
- drop, Eeof }
+ drop, Eeof, abort }
/*TRUEI FALSEI IF TRUE FALSE ELIF ELTRUE ELFALSE ELSE ENDIF
TRUEI FALSEI IF TRUE FALSE ELIF ELTRUE ELFALSE ELSE ENDIF (DODGY)
- PLAIN EOF */
+ PLAIN EOF ERROR */
};
/*
@@ -463,9 +465,11 @@ keywordedit(const char *replacement)
static void
nest(void)
{
- depth += 1;
- if (depth >= MAXDEPTH)
+ if (depth > MAXDEPTH-1)
+ abort(); /* bug */
+ if (depth == MAXDEPTH-1)
error("Too many levels of nesting");
+ depth += 1;
stifline[depth] = linenum;
}
static void
@@ -490,15 +494,23 @@ flushline(bool keep)
if (symlist)
return;
if (keep ^ complement) {
- if (lnnum && delcount > 0)
- printf("#line %d\n", linenum);
- fputs(tline, stdout);
- delcount = 0;
+ bool blankline = tline[strspn(tline, " \t\n")] == '\0';
+ if (blankline && compblank && blankcount != blankmax) {
+ delcount += 1;
+ blankcount += 1;
+ } else {
+ if (lnnum && delcount > 0)
+ printf("#line %d\n", linenum);
+ fputs(tline, stdout);
+ delcount = 0;
+ blankmax = blankcount = blankline ? blankcount + 1 : 0;
+ }
} else {
if (lnblank)
putc('\n', stdout);
exitstat = 1;
delcount += 1;
+ blankcount = 0;
}
}
@@ -510,9 +522,12 @@ process(void)
{
Linetype lineval;
+ /* When compressing blank lines, act as if the file
+ is preceded by a large number of blank lines. */
+ blankmax = blankcount = 1000;
for (;;) {
linenum++;
- lineval = get_line();
+ lineval = parseline();
trans_table[ifstate[depth]][lineval]();
debug("process %s -> %s depth %d",
linetype_name[lineval],
@@ -526,7 +541,7 @@ process(void)
* help from skipcomment().
*/
static Linetype
-get_line(void)
+parseline(void)
{
const char *cp;
int cursym;
@@ -595,9 +610,21 @@ get_line(void)
if (incomment)
linestate = LS_DIRTY;
}
- /* skipcomment should have changed the state */
- if (linestate == LS_HASH)
- abort(); /* bug */
+ /* skipcomment normally changes the state, except
+ if the last line of the file lacks a newline, or
+ if there is too much whitespace in a directive */
+ if (linestate == LS_HASH) {
+ size_t len = cp - tline;
+ if (fgets(tline + len, MAXLINE - len, input) == NULL) {
+ /* append the missing newline */
+ tline[len+0] = '\n';
+ tline[len+1] = '\0';
+ cp++;
+ linestate = LS_START;
+ } else {
+ linestate = LS_DIRTY;
+ }
+ }
}
if (linestate == LS_DIRTY) {
while (*cp != '\0')
@@ -610,17 +637,40 @@ get_line(void)
/*
* These are the binary operators that are supported by the expression
- * evaluator. Note that if support for division is added then we also
- * need short-circuiting booleans because of divide-by-zero.
+ * evaluator.
*/
-static int op_lt(int a, int b) { return (a < b); }
-static int op_gt(int a, int b) { return (a > b); }
-static int op_le(int a, int b) { return (a <= b); }
-static int op_ge(int a, int b) { return (a >= b); }
-static int op_eq(int a, int b) { return (a == b); }
-static int op_ne(int a, int b) { return (a != b); }
-static int op_or(int a, int b) { return (a || b); }
-static int op_and(int a, int b) { return (a && b); }
+static Linetype op_strict(int *p, int v, Linetype at, Linetype bt) {
+ if(at == LT_IF || bt == LT_IF) return (LT_IF);
+ return (*p = v, v ? LT_TRUE : LT_FALSE);
+}
+static Linetype op_lt(int *p, Linetype at, int a, Linetype bt, int b) {
+ return op_strict(p, a < b, at, bt);
+}
+static Linetype op_gt(int *p, Linetype at, int a, Linetype bt, int b) {
+ return op_strict(p, a > b, at, bt);
+}
+static Linetype op_le(int *p, Linetype at, int a, Linetype bt, int b) {
+ return op_strict(p, a <= b, at, bt);
+}
+static Linetype op_ge(int *p, Linetype at, int a, Linetype bt, int b) {
+ return op_strict(p, a >= b, at, bt);
+}
+static Linetype op_eq(int *p, Linetype at, int a, Linetype bt, int b) {
+ return op_strict(p, a == b, at, bt);
+}
+static Linetype op_ne(int *p, Linetype at, int a, Linetype bt, int b) {
+ return op_strict(p, a != b, at, bt);
+}
+static Linetype op_or(int *p, Linetype at, int a, Linetype bt, int b) {
+ if (!strictlogic && (at == LT_TRUE || bt == LT_TRUE))
+ return (*p = 1, LT_TRUE);
+ return op_strict(p, a || b, at, bt);
+}
+static Linetype op_and(int *p, Linetype at, int a, Linetype bt, int b) {
+ if (!strictlogic && (at == LT_FALSE || bt == LT_FALSE))
+ return (*p = 0, LT_FALSE);
+ return op_strict(p, a && b, at, bt);
+}
/*
* An evaluation function takes three arguments, as follows: (1) a pointer to
@@ -629,8 +679,8 @@ static int op_and(int a, int b) { return (a && b); }
* value of the expression; and (3) a pointer to a char* that points to the
* expression to be evaluated and that is updated to the end of the expression
* when evaluation is complete. The function returns LT_FALSE if the value of
- * the expression is zero, LT_TRUE if it is non-zero, or LT_IF if the
- * expression could not be evaluated.
+ * the expression is zero, LT_TRUE if it is non-zero, LT_IF if the expression
+ * depends on an unknown symbol, or LT_ERROR if there is a parse failure.
*/
struct ops;
@@ -649,7 +699,7 @@ static const struct ops {
eval_fn *inner;
struct op {
const char *str;
- int (*fn)(int, int);
+ Linetype (*fn)(int *, Linetype, int, Linetype, int);
} op[5];
} eval_ops[] = {
{ eval_table, { { "||", op_or } } },
@@ -664,8 +714,8 @@ static const struct ops {
/*
* Function for evaluating the innermost parts of expressions,
- * viz. !expr (expr) defined(symbol) symbol number
- * We reset the keepthis flag when we find a non-constant subexpression.
+ * viz. !expr (expr) number defined(symbol) symbol
+ * We reset the constexpr flag in the last two cases.
*/
static Linetype
eval_unary(const struct ops *ops, int *valp, const char **cpp)
@@ -673,68 +723,83 @@ eval_unary(const struct ops *ops, int *valp, const char **cpp)
const char *cp;
char *ep;
int sym;
+ bool defparen;
+ Linetype lt;
cp = skipcomment(*cpp);
if (*cp == '!') {
debug("eval%d !", ops - eval_ops);
cp++;
- if (eval_unary(ops, valp, &cp) == LT_IF) {
- *cpp = cp;
- return (LT_IF);
+ lt = eval_unary(ops, valp, &cp);
+ if (lt == LT_ERROR)
+ return (LT_ERROR);
+ if (lt != LT_IF) {
+ *valp = !*valp;
+ lt = *valp ? LT_TRUE : LT_FALSE;
}
- *valp = !*valp;
} else if (*cp == '(') {
cp++;
debug("eval%d (", ops - eval_ops);
- if (eval_table(eval_ops, valp, &cp) == LT_IF)
- return (LT_IF);
+ lt = eval_table(eval_ops, valp, &cp);
+ if (lt == LT_ERROR)
+ return (LT_ERROR);
cp = skipcomment(cp);
if (*cp++ != ')')
- return (LT_IF);
+ return (LT_ERROR);
} else if (isdigit((unsigned char)*cp)) {
debug("eval%d number", ops - eval_ops);
*valp = strtol(cp, &ep, 0);
+ if (ep == cp)
+ return (LT_ERROR);
+ lt = *valp ? LT_TRUE : LT_FALSE;
cp = skipsym(cp);
} else if (strncmp(cp, "defined", 7) == 0 && endsym(cp[7])) {
cp = skipcomment(cp+7);
debug("eval%d defined", ops - eval_ops);
- if (*cp++ != '(')
- return (LT_IF);
- cp = skipcomment(cp);
+ if (*cp == '(') {
+ cp = skipcomment(cp+1);
+ defparen = true;
+ } else {
+ defparen = false;
+ }
sym = findsym(cp);
- cp = skipsym(cp);
- cp = skipcomment(cp);
- if (*cp++ != ')')
- return (LT_IF);
- if (sym >= 0)
+ if (sym < 0) {
+ lt = LT_IF;
+ } else {
*valp = (value[sym] != NULL);
- else {
- *cpp = cp;
- return (LT_IF);
+ lt = *valp ? LT_TRUE : LT_FALSE;
}
- keepthis = false;
+ cp = skipsym(cp);
+ cp = skipcomment(cp);
+ if (defparen && *cp++ != ')')
+ return (LT_ERROR);
+ constexpr = false;
} else if (!endsym(*cp)) {
debug("eval%d symbol", ops - eval_ops);
sym = findsym(cp);
- if (sym < 0)
- return (LT_IF);
- if (value[sym] == NULL)
+ cp = skipsym(cp);
+ if (sym < 0) {
+ lt = LT_IF;
+ cp = skipargs(cp);
+ } else if (value[sym] == NULL) {
*valp = 0;
- else {
+ lt = LT_FALSE;
+ } else {
*valp = strtol(value[sym], &ep, 0);
if (*ep != '\0' || ep == value[sym])
- return (LT_IF);
+ return (LT_ERROR);
+ lt = *valp ? LT_TRUE : LT_FALSE;
+ cp = skipargs(cp);
}
- cp = skipsym(cp);
- keepthis = false;
+ constexpr = false;
} else {
debug("eval%d bad expr", ops - eval_ops);
- return (LT_IF);
+ return (LT_ERROR);
}
*cpp = cp;
debug("eval%d = %d", ops - eval_ops, *valp);
- return (*valp ? LT_TRUE : LT_FALSE);
+ return (lt);
}
/*
@@ -746,11 +811,13 @@ eval_table(const struct ops *ops, int *valp, const char **cpp)
const struct op *op;
const char *cp;
int val;
- Linetype lhs, rhs;
+ Linetype lt, rt;
debug("eval%d", ops - eval_ops);
cp = *cpp;
- lhs = ops->inner(ops+1, valp, &cp);
+ lt = ops->inner(ops+1, valp, &cp);
+ if (lt == LT_ERROR)
+ return (LT_ERROR);
for (;;) {
cp = skipcomment(cp);
for (op = ops->op; op->str != NULL; op++)
@@ -760,32 +827,16 @@ eval_table(const struct ops *ops, int *valp, const char **cpp)
break;
cp += strlen(op->str);
debug("eval%d %s", ops - eval_ops, op->str);
- rhs = ops->inner(ops+1, &val, &cp);
- if (op->fn == op_and && (lhs == LT_FALSE || rhs == LT_FALSE)) {
- debug("eval%d: and always false", ops - eval_ops);
- if (lhs == LT_IF)
- *valp = val;
- lhs = LT_FALSE;
- continue;
- }
- if (op->fn == op_or && (lhs == LT_TRUE || rhs == LT_TRUE)) {
- debug("eval%d: or always true", ops - eval_ops);
- if (lhs == LT_IF)
- *valp = val;
- lhs = LT_TRUE;
- continue;
- }
- if (rhs == LT_IF)
- lhs = LT_IF;
- if (lhs != LT_IF)
- *valp = op->fn(*valp, val);
+ rt = ops->inner(ops+1, &val, &cp);
+ if (rt == LT_ERROR)
+ return (LT_ERROR);
+ lt = op->fn(valp, lt, *valp, rt, val);
}
*cpp = cp;
debug("eval%d = %d", ops - eval_ops, *valp);
- if (lhs != LT_IF)
- lhs = (*valp ? LT_TRUE : LT_FALSE);
- return lhs;
+ debug("eval%d lt = %s", ops - eval_ops, linetype_name[lt]);
+ return (lt);
}
/*
@@ -796,17 +847,14 @@ eval_table(const struct ops *ops, int *valp, const char **cpp)
static Linetype
ifeval(const char **cpp)
{
- const char *cp = *cpp;
int ret;
- int val;
+ int val = 0;
debug("eval %s", *cpp);
- keepthis = killconsts ? false : true;
- ret = eval_table(eval_ops, &val, &cp);
- if (ret != LT_IF)
- *cpp = cp;
+ constexpr = killconsts ? false : true;
+ ret = eval_table(eval_ops, &val, cpp);
debug("eval = %d", val);
- return (keepthis ? LT_IF : ret);
+ return (constexpr ? LT_IF : ret == LT_ERROR ? LT_IF : ret);
}
/*
@@ -918,6 +966,31 @@ skipcomment(const char *cp)
}
/*
+ * Skip macro arguments.
+ */
+static const char *
+skipargs(const char *cp)
+{
+ const char *ocp = cp;
+ int level = 0;
+ cp = skipcomment(cp);
+ if (*cp != '(')
+ return (cp);
+ do {
+ if (*cp == '(')
+ level++;
+ if (*cp == ')')
+ level--;
+ cp = skipcomment(cp+1);
+ } while (level != 0 && *cp != '\0');
+ if (level == 0)
+ return (cp);
+ else
+ /* Rewind and re-detect the syntax error later. */
+ return (ocp);
+}
+
+/*
* Skip over an identifier.
*/
static const char *
@@ -929,7 +1002,7 @@ skipsym(const char *cp)
}
/*
- * Look for the symbol in the symbol table. If is is found, we return
+ * Look for the symbol in the symbol table. If it is found, we return
* the symbol table index, else we return -1.
*/
static int
diff --git a/security/Makefile b/security/Makefile
index bb44e350c61..da20a193c8d 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -8,7 +8,8 @@ subdir-$(CONFIG_SECURITY_SMACK) += smack
subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
# always enable default capabilities
-obj-y += commoncap.o min_addr.o
+obj-y += commoncap.o
+obj-$(CONFIG_MMU) += min_addr.o
# Object file lists
obj-$(CONFIG_SECURITY) += security.o capability.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 165eb5397ea..c41afe6639a 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -97,7 +97,6 @@ static inline unsigned long ima_hash_key(u8 *digest)
/* iint cache flags */
#define IMA_MEASURED 1
-#define IMA_IINT_DUMP_STACK 512
/* integrity data associated with an inode */
struct ima_iint_cache {
@@ -128,8 +127,6 @@ void ima_template_show(struct seq_file *m, void *e,
*/
struct ima_iint_cache *ima_iint_insert(struct inode *inode);
struct ima_iint_cache *ima_iint_find_get(struct inode *inode);
-struct ima_iint_cache *ima_iint_find_insert_get(struct inode *inode);
-void ima_iint_delete(struct inode *inode);
void iint_free(struct kref *kref);
void iint_rcu_free(struct rcu_head *rcu);
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index a4e2b1dac94..fa592ff1ac1 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -19,8 +19,6 @@
#include <linux/radix-tree.h>
#include "ima.h"
-#define ima_iint_delete ima_inode_free
-
RADIX_TREE(ima_iint_store, GFP_ATOMIC);
DEFINE_SPINLOCK(ima_iint_lock);
@@ -45,22 +43,21 @@ out:
return iint;
}
-/* Allocate memory for the iint associated with the inode
- * from the iint_cache slab, initialize the iint, and
- * insert it into the radix tree.
- *
- * On success return a pointer to the iint; on failure return NULL.
+/**
+ * ima_inode_alloc - allocate an iint associated with an inode
+ * @inode: pointer to the inode
*/
-struct ima_iint_cache *ima_iint_insert(struct inode *inode)
+int ima_inode_alloc(struct inode *inode)
{
struct ima_iint_cache *iint = NULL;
int rc = 0;
if (!ima_initialized)
- return iint;
+ return 0;
+
iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
if (!iint)
- return iint;
+ return -ENOMEM;
rc = radix_tree_preload(GFP_NOFS);
if (rc < 0)
@@ -70,65 +67,13 @@ struct ima_iint_cache *ima_iint_insert(struct inode *inode)
rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
spin_unlock(&ima_iint_lock);
out:
- if (rc < 0) {
+ if (rc < 0)
kmem_cache_free(iint_cache, iint);
- if (rc == -EEXIST) {
- spin_lock(&ima_iint_lock);
- iint = radix_tree_lookup(&ima_iint_store,
- (unsigned long)inode);
- spin_unlock(&ima_iint_lock);
- } else
- iint = NULL;
- }
- radix_tree_preload_end();
- return iint;
-}
-
-/**
- * ima_inode_alloc - allocate an iint associated with an inode
- * @inode: pointer to the inode
- *
- * Return 0 on success, 1 on failure.
- */
-int ima_inode_alloc(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- if (!ima_initialized)
- return 0;
-
- iint = ima_iint_insert(inode);
- if (!iint)
- return 1;
- return 0;
-}
-
-/* ima_iint_find_insert_get - get the iint associated with an inode
- *
- * Most insertions are done at inode_alloc, except those allocated
- * before late_initcall. When the iint does not exist, allocate it,
- * initialize and insert it, and increment the iint refcount.
- *
- * (Can't initialize at security_initcall before any inodes are
- * allocated, got to wait at least until proc_init.)
- *
- * Return the iint.
- */
-struct ima_iint_cache *ima_iint_find_insert_get(struct inode *inode)
-{
- struct ima_iint_cache *iint = NULL;
- iint = ima_iint_find_get(inode);
- if (iint)
- return iint;
-
- iint = ima_iint_insert(inode);
- if (iint)
- kref_get(&iint->refcount);
+ radix_tree_preload_end();
- return iint;
+ return rc;
}
-EXPORT_SYMBOL_GPL(ima_iint_find_insert_get);
/* iint_free - called when the iint refcount goes to zero */
void iint_free(struct kref *kref)
@@ -164,12 +109,12 @@ void iint_rcu_free(struct rcu_head *rcu_head)
}
/**
- * ima_iint_delete - called on integrity_inode_free
+ * ima_inode_free - called on security_inode_free
* @inode: pointer to the inode
*
* Free the integrity information(iint) associated with an inode.
*/
-void ima_iint_delete(struct inode *inode)
+void ima_inode_free(struct inode *inode)
{
struct ima_iint_cache *iint;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index b85e61bcf24..a89f44d5e03 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -13,8 +13,8 @@
* License.
*
* File: ima_main.c
- * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
- * and ima_path_check.
+ * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
+ * and ima_path_check.
*/
#include <linux/module.h>
#include <linux/file.h>
@@ -35,6 +35,100 @@ static int __init hash_setup(char *str)
}
__setup("ima_hash=", hash_setup);
+struct ima_imbalance {
+ struct hlist_node node;
+ unsigned long fsmagic;
+};
+
+/*
+ * ima_limit_imbalance - emit one imbalance message per filesystem type
+ *
+ * Maintain list of filesystem types that do not measure files properly.
+ * Return false if unknown, true if known.
+ */
+static bool ima_limit_imbalance(struct file *file)
+{
+ static DEFINE_SPINLOCK(ima_imbalance_lock);
+ static HLIST_HEAD(ima_imbalance_list);
+
+ struct super_block *sb = file->f_dentry->d_sb;
+ struct ima_imbalance *entry;
+ struct hlist_node *node;
+ bool found = false;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, node, &ima_imbalance_list, node) {
+ if (entry->fsmagic == sb->s_magic) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (found)
+ goto out;
+
+ entry = kmalloc(sizeof(*entry), GFP_NOFS);
+ if (!entry)
+ goto out;
+ entry->fsmagic = sb->s_magic;
+ spin_lock(&ima_imbalance_lock);
+ /*
+ * we could have raced and something else might have added this fs
+ * to the list, but we don't really care
+ */
+ hlist_add_head_rcu(&entry->node, &ima_imbalance_list);
+ spin_unlock(&ima_imbalance_lock);
+ printk(KERN_INFO "IMA: unmeasured files on fsmagic: %lX\n",
+ entry->fsmagic);
+out:
+ return found;
+}
+
+/*
+ * Update the counts given an fmode_t
+ */
+static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode)
+{
+ BUG_ON(!mutex_is_locked(&iint->mutex));
+
+ iint->opencount++;
+ if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ iint->readcount++;
+ if (mode & FMODE_WRITE)
+ iint->writecount++;
+}
+
+/*
+ * Decrement ima counts
+ */
+static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
+ struct file *file)
+{
+ mode_t mode = file->f_mode;
+ BUG_ON(!mutex_is_locked(&iint->mutex));
+
+ iint->opencount--;
+ if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
+ iint->readcount--;
+ if (mode & FMODE_WRITE) {
+ iint->writecount--;
+ if (iint->writecount == 0) {
+ if (iint->version != inode->i_version)
+ iint->flags &= ~IMA_MEASURED;
+ }
+ }
+
+ if (((iint->opencount < 0) ||
+ (iint->readcount < 0) ||
+ (iint->writecount < 0)) &&
+ !ima_limit_imbalance(file)) {
+ printk(KERN_INFO "%s: open/free imbalance (r:%ld w:%ld o:%ld)\n",
+ __FUNCTION__, iint->readcount, iint->writecount,
+ iint->opencount);
+ dump_stack();
+ }
+}
+
/**
* ima_file_free - called on __fput()
* @file: pointer to file structure being freed
@@ -54,29 +148,7 @@ void ima_file_free(struct file *file)
return;
mutex_lock(&iint->mutex);
- if (iint->opencount <= 0) {
- printk(KERN_INFO
- "%s: %s open/free imbalance (r:%ld w:%ld o:%ld f:%ld)\n",
- __FUNCTION__, file->f_dentry->d_name.name,
- iint->readcount, iint->writecount,
- iint->opencount, atomic_long_read(&file->f_count));
- if (!(iint->flags & IMA_IINT_DUMP_STACK)) {
- dump_stack();
- iint->flags |= IMA_IINT_DUMP_STACK;
- }
- }
- iint->opencount--;
-
- if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- iint->readcount--;
-
- if (file->f_mode & FMODE_WRITE) {
- iint->writecount--;
- if (iint->writecount == 0) {
- if (iint->version != inode->i_version)
- iint->flags &= ~IMA_MEASURED;
- }
- }
+ ima_dec_counts(iint, inode, file);
mutex_unlock(&iint->mutex);
kref_put(&iint->refcount, iint_free);
}
@@ -116,8 +188,7 @@ static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
{
int rc = 0;
- iint->opencount++;
- iint->readcount++;
+ ima_inc_counts(iint, file->f_mode);
rc = ima_collect_measurement(iint, file);
if (!rc)
@@ -125,15 +196,6 @@ static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
return rc;
}
-static void ima_update_counts(struct ima_iint_cache *iint, int mask)
-{
- iint->opencount++;
- if ((mask & MAY_WRITE) || (mask == 0))
- iint->writecount++;
- else if (mask & (MAY_READ | MAY_EXEC))
- iint->readcount++;
-}
-
/**
* ima_path_check - based on policy, collect/store measurement.
* @path: contains a pointer to the path to be measured
@@ -152,7 +214,7 @@ static void ima_update_counts(struct ima_iint_cache *iint, int mask)
* Always return 0 and audit dentry_open failures.
* (Return code will be based upon measurement appraisal.)
*/
-int ima_path_check(struct path *path, int mask, int update_counts)
+int ima_path_check(struct path *path, int mask)
{
struct inode *inode = path->dentry->d_inode;
struct ima_iint_cache *iint;
@@ -161,13 +223,11 @@ int ima_path_check(struct path *path, int mask, int update_counts)
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
- iint = ima_iint_find_insert_get(inode);
+ iint = ima_iint_find_get(inode);
if (!iint)
return 0;
mutex_lock(&iint->mutex);
- if (update_counts)
- ima_update_counts(iint, mask);
rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK);
if (rc < 0)
@@ -219,7 +279,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
- iint = ima_iint_find_insert_get(inode);
+ iint = ima_iint_find_get(inode);
if (!iint)
return -ENOMEM;
@@ -238,39 +298,6 @@ out:
}
/*
- * ima_counts_put - decrement file counts
- *
- * File counts are incremented in ima_path_check. On file open
- * error, such as ETXTBSY, decrement the counts to prevent
- * unnecessary imbalance messages.
- */
-void ima_counts_put(struct path *path, int mask)
-{
- struct inode *inode = path->dentry->d_inode;
- struct ima_iint_cache *iint;
-
- /* The inode may already have been freed, freeing the iint
- * with it. Verify the inode is not NULL before dereferencing
- * it.
- */
- if (!ima_initialized || !inode || !S_ISREG(inode->i_mode))
- return;
- iint = ima_iint_find_insert_get(inode);
- if (!iint)
- return;
-
- mutex_lock(&iint->mutex);
- iint->opencount--;
- if ((mask & MAY_WRITE) || (mask == 0))
- iint->writecount--;
- else if (mask & (MAY_READ | MAY_EXEC))
- iint->readcount--;
- mutex_unlock(&iint->mutex);
-
- kref_put(&iint->refcount, iint_free);
-}
-
-/*
* ima_counts_get - increment file counts
*
* - for IPC shm and shmat file.
@@ -286,16 +313,11 @@ void ima_counts_get(struct file *file)
if (!ima_initialized || !S_ISREG(inode->i_mode))
return;
- iint = ima_iint_find_insert_get(inode);
+ iint = ima_iint_find_get(inode);
if (!iint)
return;
mutex_lock(&iint->mutex);
- iint->opencount++;
- if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- iint->readcount++;
-
- if (file->f_mode & FMODE_WRITE)
- iint->writecount++;
+ ima_inc_counts(iint, file->f_mode);
mutex_unlock(&iint->mutex);
kref_put(&iint->refcount, iint_free);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 06ec722897b..e9c2e7c584d 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1194,7 +1194,7 @@ long keyctl_get_security(key_serial_t keyid,
* have the authorisation token handy */
instkey = key_get_instantiation_authkey(keyid);
if (IS_ERR(instkey))
- return PTR_ERR(key_ref);
+ return PTR_ERR(instkey);
key_put(instkey);
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0);
@@ -1236,6 +1236,7 @@ long keyctl_get_security(key_serial_t keyid,
*/
long keyctl_session_to_parent(void)
{
+#ifdef TIF_NOTIFY_RESUME
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
struct cred *cred, *oldcred;
@@ -1326,6 +1327,15 @@ not_permitted:
error_keyring:
key_ref_put(keyring_r);
return ret;
+
+#else /* !TIF_NOTIFY_RESUME */
+ /*
+ * To be removed when TIF_NOTIFY_RESUME has been implemented on
+ * m68k/xtensa
+ */
+#warning TIF_NOTIFY_RESUME not implemented
+ return -EOPNOTSUPP;
+#endif /* !TIF_NOTIFY_RESUME */
}
/*****************************************************************************/
diff --git a/security/min_addr.c b/security/min_addr.c
index fc43c9d3708..e86f297522b 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -43,7 +43,7 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
return ret;
}
-int __init init_mmap_min_addr(void)
+static int __init init_mmap_min_addr(void)
{
update_mmap_min_addr();
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 8346938809b..9a6c58881c0 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -12,7 +12,6 @@
#include "common.h"
#include "tomoyo.h"
#include "realpath.h"
-#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
/*
* tomoyo_globally_readable_file_entry is a structure which is used for holding
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 1497dce1b04..c5699863643 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -172,14 +172,15 @@ static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
return v;
}
-static inline void aaci_chan_wait_ready(struct aaci_runtime *aacirun)
+static inline void
+aaci_chan_wait_ready(struct aaci_runtime *aacirun, unsigned long mask)
{
u32 val;
int timeout = 5000;
do {
val = readl(aacirun->base + AACI_SR);
- } while (val & (SR_TXB|SR_RXB) && timeout--);
+ } while (val & mask && timeout--);
}
@@ -208,8 +209,10 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
writel(0, aacirun->base + AACI_IE);
return;
}
- ptr = aacirun->ptr;
+ spin_lock(&aacirun->lock);
+
+ ptr = aacirun->ptr;
do {
unsigned int len = aacirun->fifosz;
u32 val;
@@ -217,9 +220,9 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
if (aacirun->bytes <= 0) {
aacirun->bytes += aacirun->period;
aacirun->ptr = ptr;
- spin_unlock(&aaci->lock);
+ spin_unlock(&aacirun->lock);
snd_pcm_period_elapsed(aacirun->substream);
- spin_lock(&aaci->lock);
+ spin_lock(&aacirun->lock);
}
if (!(aacirun->cr & CR_EN))
break;
@@ -245,7 +248,10 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
ptr = aacirun->start;
}
} while(1);
+
aacirun->ptr = ptr;
+
+ spin_unlock(&aacirun->lock);
}
if (mask & ISR_URINTR) {
@@ -263,6 +269,8 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
return;
}
+ spin_lock(&aacirun->lock);
+
ptr = aacirun->ptr;
do {
unsigned int len = aacirun->fifosz;
@@ -271,9 +279,9 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
if (aacirun->bytes <= 0) {
aacirun->bytes += aacirun->period;
aacirun->ptr = ptr;
- spin_unlock(&aaci->lock);
+ spin_unlock(&aacirun->lock);
snd_pcm_period_elapsed(aacirun->substream);
- spin_lock(&aaci->lock);
+ spin_lock(&aacirun->lock);
}
if (!(aacirun->cr & CR_EN))
break;
@@ -301,6 +309,8 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
} while (1);
aacirun->ptr = ptr;
+
+ spin_unlock(&aacirun->lock);
}
}
@@ -310,7 +320,6 @@ static irqreturn_t aaci_irq(int irq, void *devid)
u32 mask;
int i;
- spin_lock(&aaci->lock);
mask = readl(aaci->base + AACI_ALLINTS);
if (mask) {
u32 m = mask;
@@ -320,7 +329,6 @@ static irqreturn_t aaci_irq(int irq, void *devid)
}
}
}
- spin_unlock(&aaci->lock);
return mask ? IRQ_HANDLED : IRQ_NONE;
}
@@ -330,63 +338,6 @@ static irqreturn_t aaci_irq(int irq, void *devid)
/*
* ALSA support.
*/
-
-struct aaci_stream {
- unsigned char codec_idx;
- unsigned char rate_idx;
-};
-
-static struct aaci_stream aaci_streams[] = {
- [ACSTREAM_FRONT] = {
- .codec_idx = 0,
- .rate_idx = AC97_RATES_FRONT_DAC,
- },
- [ACSTREAM_SURROUND] = {
- .codec_idx = 0,
- .rate_idx = AC97_RATES_SURR_DAC,
- },
- [ACSTREAM_LFE] = {
- .codec_idx = 0,
- .rate_idx = AC97_RATES_LFE_DAC,
- },
-};
-
-static inline unsigned int aaci_rate_mask(struct aaci *aaci, int streamid)
-{
- struct aaci_stream *s = aaci_streams + streamid;
- return aaci->ac97_bus->codec[s->codec_idx]->rates[s->rate_idx];
-}
-
-static unsigned int rate_list[] = {
- 5512, 8000, 11025, 16000, 22050, 32000, 44100,
- 48000, 64000, 88200, 96000, 176400, 192000
-};
-
-/*
- * Double-rate rule: we can support double rate iff channels == 2
- * (unimplemented)
- */
-static int
-aaci_rule_rate_by_channels(struct snd_pcm_hw_params *p, struct snd_pcm_hw_rule *rule)
-{
- struct aaci *aaci = rule->private;
- unsigned int rate_mask = SNDRV_PCM_RATE_8000_48000|SNDRV_PCM_RATE_5512;
- struct snd_interval *c = hw_param_interval(p, SNDRV_PCM_HW_PARAM_CHANNELS);
-
- switch (c->max) {
- case 6:
- rate_mask &= aaci_rate_mask(aaci, ACSTREAM_LFE);
- case 4:
- rate_mask &= aaci_rate_mask(aaci, ACSTREAM_SURROUND);
- case 2:
- rate_mask &= aaci_rate_mask(aaci, ACSTREAM_FRONT);
- }
-
- return snd_interval_list(hw_param_interval(p, rule->var),
- ARRAY_SIZE(rate_list), rate_list,
- rate_mask);
-}
-
static struct snd_pcm_hardware aaci_hw_info = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
@@ -400,10 +351,7 @@ static struct snd_pcm_hardware aaci_hw_info = {
*/
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- /* should this be continuous or knot? */
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- .rate_max = 48000,
- .rate_min = 4000,
+ /* rates are setup from the AC'97 codec */
.channels_min = 2,
.channels_max = 6,
.buffer_bytes_max = 64 * 1024,
@@ -423,6 +371,12 @@ static int __aaci_pcm_open(struct aaci *aaci,
aacirun->substream = substream;
runtime->private_data = aacirun;
runtime->hw = aaci_hw_info;
+ runtime->hw.rates = aacirun->pcm->rates;
+ snd_pcm_limit_hw_rates(runtime);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ aacirun->pcm->r[1].slots)
+ snd_ac97_pcm_double_rate_rules(runtime);
/*
* FIXME: ALSA specifies fifo_size in bytes. If we're in normal
@@ -433,17 +387,6 @@ static int __aaci_pcm_open(struct aaci *aaci,
*/
runtime->hw.fifo_size = aaci->fifosize * 2;
- /*
- * Add rule describing hardware rate dependency
- * on the number of channels.
- */
- ret = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- aaci_rule_rate_by_channels, aaci,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- SNDRV_PCM_HW_PARAM_RATE, -1);
- if (ret)
- goto out;
-
ret = request_irq(aaci->dev->irq[0], aaci_irq, IRQF_SHARED|IRQF_DISABLED,
DRIVER_NAME, aaci);
if (ret)
@@ -507,18 +450,22 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(params));
- if (err < 0)
- goto out;
+ if (err >= 0) {
+ unsigned int rate = params_rate(params);
+ int dbl = rate > 48000;
- err = snd_ac97_pcm_open(aacirun->pcm, params_rate(params),
- params_channels(params),
- aacirun->pcm->r[0].slots);
- if (err)
- goto out;
+ err = snd_ac97_pcm_open(aacirun->pcm, rate,
+ params_channels(params),
+ aacirun->pcm->r[dbl].slots);
- aacirun->pcm_open = 1;
+ aacirun->pcm_open = err == 0;
+ aacirun->cr = CR_FEN | CR_COMPACT | CR_SZ16;
+ aacirun->fifosz = aaci->fifosize * 4;
+
+ if (aacirun->cr & CR_COMPACT)
+ aacirun->fifosz >>= 1;
+ }
- out:
return err;
}
@@ -527,7 +474,7 @@ static int aaci_pcm_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct aaci_runtime *aacirun = runtime->private_data;
- aacirun->start = (void *)runtime->dma_area;
+ aacirun->start = runtime->dma_area;
aacirun->end = aacirun->start + snd_pcm_lib_buffer_bytes(substream);
aacirun->ptr = aacirun->start;
aacirun->period =
@@ -627,14 +574,9 @@ static int aaci_pcm_playback_hw_params(struct snd_pcm_substream *substream,
* Enable FIFO, compact mode, 16 bits per sample.
* FIXME: double rate slots?
*/
- if (ret >= 0) {
- aacirun->cr = CR_FEN | CR_COMPACT | CR_SZ16;
+ if (ret >= 0)
aacirun->cr |= channels_to_txmask[channels];
- aacirun->fifosz = aaci->fifosize * 4;
- if (aacirun->cr & CR_COMPACT)
- aacirun->fifosz >>= 1;
- }
return ret;
}
@@ -646,7 +588,7 @@ static void aaci_pcm_playback_stop(struct aaci_runtime *aacirun)
ie &= ~(IE_URIE|IE_TXIE);
writel(ie, aacirun->base + AACI_IE);
aacirun->cr &= ~CR_EN;
- aaci_chan_wait_ready(aacirun);
+ aaci_chan_wait_ready(aacirun, SR_TXB);
writel(aacirun->cr, aacirun->base + AACI_TXCR);
}
@@ -654,7 +596,7 @@ static void aaci_pcm_playback_start(struct aaci_runtime *aacirun)
{
u32 ie;
- aaci_chan_wait_ready(aacirun);
+ aaci_chan_wait_ready(aacirun, SR_TXB);
aacirun->cr |= CR_EN;
ie = readl(aacirun->base + AACI_IE);
@@ -665,12 +607,12 @@ static void aaci_pcm_playback_start(struct aaci_runtime *aacirun)
static int aaci_pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd)
{
- struct aaci *aaci = substream->private_data;
struct aaci_runtime *aacirun = substream->runtime->private_data;
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&aaci->lock, flags);
+ spin_lock_irqsave(&aacirun->lock, flags);
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
aaci_pcm_playback_start(aacirun);
@@ -697,7 +639,8 @@ static int aaci_pcm_playback_trigger(struct snd_pcm_substream *substream, int cm
default:
ret = -EINVAL;
}
- spin_unlock_irqrestore(&aaci->lock, flags);
+
+ spin_unlock_irqrestore(&aacirun->lock, flags);
return ret;
}
@@ -721,18 +664,10 @@ static int aaci_pcm_capture_hw_params(struct snd_pcm_substream *substream,
int ret;
ret = aaci_pcm_hw_params(substream, aacirun, params);
-
- if (ret >= 0) {
- aacirun->cr = CR_FEN | CR_COMPACT | CR_SZ16;
-
+ if (ret >= 0)
/* Line in record: slot 3 and 4 */
aacirun->cr |= CR_SL3 | CR_SL4;
- aacirun->fifosz = aaci->fifosize * 4;
-
- if (aacirun->cr & CR_COMPACT)
- aacirun->fifosz >>= 1;
- }
return ret;
}
@@ -740,7 +675,7 @@ static void aaci_pcm_capture_stop(struct aaci_runtime *aacirun)
{
u32 ie;
- aaci_chan_wait_ready(aacirun);
+ aaci_chan_wait_ready(aacirun, SR_RXB);
ie = readl(aacirun->base + AACI_IE);
ie &= ~(IE_ORIE | IE_RXIE);
@@ -755,7 +690,7 @@ static void aaci_pcm_capture_start(struct aaci_runtime *aacirun)
{
u32 ie;
- aaci_chan_wait_ready(aacirun);
+ aaci_chan_wait_ready(aacirun, SR_RXB);
#ifdef DEBUG
/* RX Timeout value: bits 28:17 in RXCR */
@@ -772,12 +707,11 @@ static void aaci_pcm_capture_start(struct aaci_runtime *aacirun)
static int aaci_pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd)
{
- struct aaci *aaci = substream->private_data;
struct aaci_runtime *aacirun = substream->runtime->private_data;
unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&aaci->lock, flags);
+ spin_lock_irqsave(&aacirun->lock, flags);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -806,7 +740,7 @@ static int aaci_pcm_capture_trigger(struct snd_pcm_substream *substream, int cmd
ret = -EINVAL;
}
- spin_unlock_irqrestore(&aaci->lock, flags);
+ spin_unlock_irqrestore(&aacirun->lock, flags);
return ret;
}
@@ -889,6 +823,12 @@ static struct ac97_pcm ac97_defs[] __devinitdata = {
(1 << AC97_SLOT_PCM_SRIGHT) |
(1 << AC97_SLOT_LFE),
},
+ [1] = {
+ .slots = (1 << AC97_SLOT_PCM_LEFT) |
+ (1 << AC97_SLOT_PCM_RIGHT) |
+ (1 << AC97_SLOT_PCM_LEFT_0) |
+ (1 << AC97_SLOT_PCM_RIGHT_0),
+ },
},
},
[1] = { /* PCM in */
@@ -1001,7 +941,6 @@ static struct aaci * __devinit aaci_init_card(struct amba_device *dev)
aaci = card->private_data;
mutex_init(&aaci->ac97_sem);
- spin_lock_init(&aaci->lock);
aaci->card = card;
aaci->dev = dev;
@@ -1028,7 +967,7 @@ static int __devinit aaci_init_pcm(struct aaci *aaci)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &aaci_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &aaci_capture_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- NULL, 0, 64 * 104);
+ NULL, 0, 64 * 1024);
}
return ret;
@@ -1088,12 +1027,14 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
/*
* Playback uses AACI channel 0
*/
+ spin_lock_init(&aaci->playback.lock);
aaci->playback.base = aaci->base + AACI_CSCH1;
aaci->playback.fifo = aaci->base + AACI_DR1;
/*
* Capture uses AACI channel 0
*/
+ spin_lock_init(&aaci->capture.lock);
aaci->capture.base = aaci->base + AACI_CSCH1;
aaci->capture.fifo = aaci->base + AACI_DR1;
diff --git a/sound/arm/aaci.h b/sound/arm/aaci.h
index 924f69c1c44..6a4a2eebdda 100644
--- a/sound/arm/aaci.h
+++ b/sound/arm/aaci.h
@@ -202,6 +202,7 @@
struct aaci_runtime {
void __iomem *base;
void __iomem *fifo;
+ spinlock_t lock;
struct ac97_pcm *pcm;
int pcm_open;
@@ -232,7 +233,6 @@ struct aaci {
struct snd_ac97 *ac97;
u32 maincr;
- spinlock_t lock;
struct aaci_runtime playback;
struct aaci_runtime capture;
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index b4b48afb6de..5d9411839cd 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -159,7 +159,7 @@ static int pxa2xx_ac97_resume(struct device *dev)
return ret;
}
-static struct dev_pm_ops pxa2xx_ac97_pm_ops = {
+static const struct dev_pm_ops pxa2xx_ac97_pm_ops = {
.suspend = pxa2xx_ac97_suspend,
.resume = pxa2xx_ac97_resume,
};
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index 34c7d48f506..7f4d744ae40 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -37,14 +37,22 @@ static unsigned int resolution;
struct snd_hrtimer {
struct snd_timer *timer;
struct hrtimer hrt;
+ atomic_t running;
};
static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt)
{
struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
struct snd_timer *t = stime->timer;
+
+ if (!atomic_read(&stime->running))
+ return HRTIMER_NORESTART;
+
hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
snd_timer_interrupt(stime->timer, t->sticks);
+
+ if (!atomic_read(&stime->running))
+ return HRTIMER_NORESTART;
return HRTIMER_RESTART;
}
@@ -58,6 +66,7 @@ static int snd_hrtimer_open(struct snd_timer *t)
hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
stime->timer = t;
stime->hrt.function = snd_hrtimer_callback;
+ atomic_set(&stime->running, 0);
t->private_data = stime;
return 0;
}
@@ -78,16 +87,18 @@ static int snd_hrtimer_start(struct snd_timer *t)
{
struct snd_hrtimer *stime = t->private_data;
+ atomic_set(&stime->running, 0);
+ hrtimer_cancel(&stime->hrt);
hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
HRTIMER_MODE_REL);
+ atomic_set(&stime->running, 1);
return 0;
}
static int snd_hrtimer_stop(struct snd_timer *t)
{
struct snd_hrtimer *stime = t->private_data;
-
- hrtimer_cancel(&stime->hrt);
+ atomic_set(&stime->running, 0);
return 0;
}
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 30f410832a2..a27545b23ee 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -758,7 +758,7 @@ int snd_interval_ratnum(struct snd_interval *i,
int diff;
if (q == 0)
q = 1;
- den = div_down(num, q);
+ den = div_up(num, q);
if (den < rats[k].den_min)
continue;
if (den > rats[k].den_max)
@@ -794,7 +794,7 @@ int snd_interval_ratnum(struct snd_interval *i,
i->empty = 1;
return -EINVAL;
}
- den = div_up(num, q);
+ den = div_down(num, q);
if (den > rats[k].den_max)
continue;
if (den < rats[k].den_min)
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 2f766123b15..0f5a194695d 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1257,7 +1257,7 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
break;
count -= count1;
}
- if (file->f_flags & O_SYNC) {
+ if (file->f_flags & O_DSYNC) {
spin_lock_irq(&runtime->lock);
while (runtime->avail != runtime->buffer_size) {
wait_queue_t wait;
diff --git a/sound/isa/gus/gus_mem.c b/sound/isa/gus/gus_mem.c
index 661205c4dce..af888a022fc 100644
--- a/sound/isa/gus/gus_mem.c
+++ b/sound/isa/gus/gus_mem.c
@@ -127,7 +127,8 @@ static struct snd_gf1_mem_block *snd_gf1_mem_share(struct snd_gf1_mem * alloc,
!share_id[2] && !share_id[3])
return NULL;
for (block = alloc->first; block; block = block->next)
- if (!memcmp(share_id, block->share_id, sizeof(share_id)))
+ if (!memcmp(share_id, block->share_id,
+ sizeof(block->share_id)))
return block;
return NULL;
}
diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
index cb9aa4c4edd..4be562b2cf2 100644
--- a/sound/isa/msnd/msnd_midi.c
+++ b/sound/isa/msnd/msnd_midi.c
@@ -162,7 +162,7 @@ int snd_msndmidi_new(struct snd_card *card, int device)
err = snd_rawmidi_new(card, "MSND-MIDI", device, 1, 1, &rmidi);
if (err < 0)
return err;
- mpu = kcalloc(1, sizeof(*mpu), GFP_KERNEL);
+ mpu = kzalloc(sizeof(*mpu), GFP_KERNEL);
if (mpu == NULL) {
snd_device_free(card, rmidi);
return -ENOMEM;
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index 106be6e471f..c8a8da0d403 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -548,10 +548,13 @@ __skip_mpu:
static irqreturn_t snd_opti93x_interrupt(int irq, void *dev_id)
{
- struct snd_wss *codec = dev_id;
- struct snd_opti9xx *chip = codec->card->private_data;
+ struct snd_opti9xx *chip = dev_id;
+ struct snd_wss *codec = chip->codec;
unsigned char status;
+ if (!codec)
+ return IRQ_HANDLED;
+
status = snd_opti9xx_read(chip, OPTi9XX_MC_REG(11));
if ((status & OPTi93X_IRQ_PLAYBACK) && codec->playback_substream)
snd_pcm_period_elapsed(codec->playback_substream);
@@ -691,10 +694,9 @@ static void snd_card_opti9xx_free(struct snd_card *card)
if (chip) {
#ifdef OPTi93X
- struct snd_wss *codec = chip->codec;
- if (codec && codec->irq > 0) {
- disable_irq(codec->irq);
- free_irq(codec->irq, codec);
+ if (chip->irq > 0) {
+ disable_irq(chip->irq);
+ free_irq(chip->irq, chip);
}
release_and_free_resource(chip->res_mc_indir);
#endif
@@ -759,9 +761,9 @@ static int __devinit snd_opti9xx_probe(struct snd_card *card)
#endif
#ifdef OPTi93X
error = request_irq(irq, snd_opti93x_interrupt,
- IRQF_DISABLED, DEV_NAME" - WSS", codec);
+ IRQF_DISABLED, DEV_NAME" - WSS", chip);
if (error < 0) {
- snd_printk(KERN_ERR "opti9xx: can't grab IRQ %d\n", chip->irq);
+ snd_printk(KERN_ERR "opti9xx: can't grab IRQ %d\n", irq);
return error;
}
#endif
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
index 96678d5d383..751762f1c59 100644
--- a/sound/isa/sb/emu8000.c
+++ b/sound/isa/sb/emu8000.c
@@ -393,8 +393,6 @@ size_dram(struct snd_emu8000 *emu)
while (size < EMU8000_MAX_DRAM) {
- size += 512 * 1024; /* increment 512kbytes */
-
/* Write a unique data on the test address.
* if the address is out of range, the data is written on
* 0x200000(=EMU8000_DRAM_OFFSET). Then the id word is
@@ -414,7 +412,9 @@ size_dram(struct snd_emu8000 *emu)
/*snd_emu8000_read_wait(emu);*/
EMU8000_SMLD_READ(emu); /* discard stale data */
if (EMU8000_SMLD_READ(emu) != UNIQUE_ID2)
- break; /* we must have wrapped around */
+ break; /* no memory at this address */
+
+ size += 512 * 1024; /* increment 512kbytes */
snd_emu8000_read_wait(emu);
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 8691f4cf619..f1d9d16b548 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -609,7 +609,7 @@ static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream,
/* alloc virtual 'dma' area */
if (runtime->dma_area)
vfree(runtime->dma_area);
- runtime->dma_area = vmalloc(size);
+ runtime->dma_area = vmalloc_user(size);
if (runtime->dma_area == NULL)
return -ENOMEM;
runtime->dma_bytes = size;
diff --git a/sound/oss/pss.c b/sound/oss/pss.c
index 83f5ee236b1..e19dd5dcc2d 100644
--- a/sound/oss/pss.c
+++ b/sound/oss/pss.c
@@ -269,7 +269,7 @@ static int pss_reset_dsp(pss_confdata * devc)
unsigned long i, limit = jiffies + HZ/10;
outw(0x2000, REG(PSS_CONTROL));
- for (i = 0; i < 32768 && (limit-jiffies >= 0); i++)
+ for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
inw(REG(PSS_CONTROL));
outw(0x0000, REG(PSS_CONTROL));
return 1;
@@ -369,11 +369,11 @@ static int pss_download_boot(pss_confdata * devc, unsigned char *block, int size
outw(0, REG(PSS_DATA));
limit = jiffies + HZ/10;
- for (i = 0; i < 32768 && (limit - jiffies >= 0); i++)
+ for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
val = inw(REG(PSS_STATUS));
limit = jiffies + HZ/10;
- for (i = 0; i < 32768 && (limit-jiffies >= 0); i++)
+ for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
{
val = inw(REG(PSS_STATUS));
if (val & 0x4000)
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 20cb60afb20..c1192062300 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -2122,7 +2122,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
}
/* nothing should be in powerdown mode */
snd_ac97_write_cache(ac97, AC97_GENERAL_PURPOSE, 0);
- end_time = jiffies + msecs_to_jiffies(120);
+ end_time = jiffies + msecs_to_jiffies(5000);
do {
if ((snd_ac97_read(ac97, AC97_POWERDOWN) & 0x0f) == 0x0f)
goto __ready_ok;
diff --git a/sound/pci/cs5535audio/Makefile b/sound/pci/cs5535audio/Makefile
index fda7a94c992..ccc642269b9 100644
--- a/sound/pci/cs5535audio/Makefile
+++ b/sound/pci/cs5535audio/Makefile
@@ -4,9 +4,7 @@
snd-cs5535audio-y := cs5535audio.o cs5535audio_pcm.o
snd-cs5535audio-$(CONFIG_PM) += cs5535audio_pm.o
-ifdef CONFIG_MGEODE_LX
snd-cs5535audio-$(CONFIG_OLPC) += cs5535audio_olpc.o
-endif
# Toplevel Module Dependency
obj-$(CONFIG_SND_CS5535AUDIO) += snd-cs5535audio.o
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 05f56e04849..91e7faf69bb 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -389,6 +389,7 @@ probefail_out:
static void __devexit snd_cs5535audio_remove(struct pci_dev *pci)
{
+ olpc_quirks_cleanup();
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h
index 7a298ac662e..51966d782a3 100644
--- a/sound/pci/cs5535audio/cs5535audio.h
+++ b/sound/pci/cs5535audio/cs5535audio.h
@@ -99,10 +99,11 @@ int snd_cs5535audio_suspend(struct pci_dev *pci, pm_message_t state);
int snd_cs5535audio_resume(struct pci_dev *pci);
#endif
-#if defined(CONFIG_OLPC) && defined(CONFIG_MGEODE_LX)
+#ifdef CONFIG_OLPC
void __devinit olpc_prequirks(struct snd_card *card,
struct snd_ac97_template *ac97);
int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97);
+void __devexit olpc_quirks_cleanup(void);
void olpc_analog_input(struct snd_ac97 *ac97, int on);
void olpc_mic_bias(struct snd_ac97 *ac97, int on);
@@ -128,6 +129,7 @@ static inline int olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
{
return 0;
}
+static inline void olpc_quirks_cleanup(void) { }
static inline void olpc_analog_input(struct snd_ac97 *ac97, int on) { }
static inline void olpc_mic_bias(struct snd_ac97 *ac97, int on) { }
static inline void olpc_capture_open(struct snd_ac97 *ac97) { }
diff --git a/sound/pci/cs5535audio/cs5535audio_olpc.c b/sound/pci/cs5535audio/cs5535audio_olpc.c
index 5c6814335cd..50da49be9ae 100644
--- a/sound/pci/cs5535audio/cs5535audio_olpc.c
+++ b/sound/pci/cs5535audio/cs5535audio_olpc.c
@@ -13,10 +13,13 @@
#include <sound/info.h>
#include <sound/control.h>
#include <sound/ac97_codec.h>
+#include <linux/gpio.h>
#include <asm/olpc.h>
#include "cs5535audio.h"
+#define DRV_NAME "cs5535audio-olpc"
+
/*
* OLPC has an additional feature on top of the regular AD1888 codec features.
* It has an Analog Input mode that is switched into (after disabling the
@@ -38,10 +41,7 @@ void olpc_analog_input(struct snd_ac97 *ac97, int on)
}
/* set Analog Input through GPIO */
- if (on)
- geode_gpio_set(OLPC_GPIO_MIC_AC, GPIO_OUTPUT_VAL);
- else
- geode_gpio_clear(OLPC_GPIO_MIC_AC, GPIO_OUTPUT_VAL);
+ gpio_set_value(OLPC_GPIO_MIC_AC, on);
}
/*
@@ -73,8 +73,7 @@ static int olpc_dc_info(struct snd_kcontrol *kctl,
static int olpc_dc_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *v)
{
- v->value.integer.value[0] = geode_gpio_isset(OLPC_GPIO_MIC_AC,
- GPIO_OUTPUT_VAL);
+ v->value.integer.value[0] = gpio_get_value(OLPC_GPIO_MIC_AC);
return 0;
}
@@ -153,6 +152,12 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
if (!machine_is_olpc())
return 0;
+ if (gpio_request(OLPC_GPIO_MIC_AC, DRV_NAME)) {
+ printk(KERN_ERR DRV_NAME ": unable to allocate MIC GPIO\n");
+ return -EIO;
+ }
+ gpio_direction_output(OLPC_GPIO_MIC_AC, 0);
+
/* drop the original AD1888 HPF control */
memset(&elem, 0, sizeof(elem));
elem.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
@@ -169,11 +174,18 @@ int __devinit olpc_quirks(struct snd_card *card, struct snd_ac97 *ac97)
for (i = 0; i < ARRAY_SIZE(olpc_cs5535audio_ctls); i++) {
err = snd_ctl_add(card, snd_ctl_new1(&olpc_cs5535audio_ctls[i],
ac97->private_data));
- if (err < 0)
+ if (err < 0) {
+ gpio_free(OLPC_GPIO_MIC_AC);
return err;
+ }
}
/* turn off the mic by default */
olpc_mic_bias(ac97, 0);
return 0;
}
+
+void __devexit olpc_quirks_cleanup(void)
+{
+ gpio_free(OLPC_GPIO_MIC_AC);
+}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 2d627613aea..1d541b7f554 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -255,9 +255,13 @@ enum {
* in HD-audio specification
*/
#define AC_PINCAP_HDMI (1<<7) /* HDMI pin */
+#define AC_PINCAP_DP (1<<24) /* DisplayPort pin, can
+ * coexist with AC_PINCAP_HDMI
+ */
#define AC_PINCAP_VREF (0x37<<8)
#define AC_PINCAP_VREF_SHIFT 8
#define AC_PINCAP_EAPD (1<<16) /* EAPD capable */
+#define AC_PINCAP_HBR (1<<27) /* High Bit Rate */
/* Vref status (used in pin cap) */
#define AC_PINCAP_VREF_HIZ (1<<0) /* Hi-Z */
#define AC_PINCAP_VREF_50 (1<<1) /* 50% */
@@ -635,6 +639,7 @@ struct hda_bus {
unsigned int rirb_error:1; /* error in codec communication */
unsigned int response_reset:1; /* controller was reset */
unsigned int in_reset:1; /* during reset operation */
+ unsigned int power_keep_link_on:1; /* don't power off HDA link */
};
/*
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index d24328661c6..40ccb419b6e 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -24,6 +24,7 @@
#include <linux/compat.h>
#include <linux/mutex.h>
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/firmware.h>
#include <sound/core.h>
#include "hda_codec.h"
@@ -428,8 +429,7 @@ static int parse_hints(struct hda_codec *codec, const char *buf)
char *key, *val;
struct hda_hint *hint;
- while (isspace(*buf))
- buf++;
+ buf = skip_spaces(buf);
if (!*buf || *buf == '#' || *buf == '\n')
return 0;
if (*buf == '=')
@@ -444,8 +444,7 @@ static int parse_hints(struct hda_codec *codec, const char *buf)
return -EINVAL;
}
*val++ = 0;
- while (isspace(*val))
- val++;
+ val = skip_spaces(val);
remove_trail_spaces(key);
remove_trail_spaces(val);
hint = get_hint(codec, key);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index efcc4f7c57f..9b56f937913 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2082,7 +2082,8 @@ static void azx_power_notify(struct hda_bus *bus)
}
if (power_on)
azx_init_chip(chip);
- else if (chip->running && power_save_controller)
+ else if (chip->running && power_save_controller &&
+ !bus->power_keep_link_on)
azx_stop_chip(chip);
}
#endif /* CONFIG_SND_HDA_POWER_SAVE */
@@ -2712,6 +2713,9 @@ static struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x10de, 0x0ac1), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0ac2), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0ac3), .driver_data = AZX_DRIVER_NVIDIA },
+ { PCI_DEVICE(0x10de, 0x0be2), .driver_data = AZX_DRIVER_NVIDIA },
+ { PCI_DEVICE(0x10de, 0x0be3), .driver_data = AZX_DRIVER_NVIDIA },
+ { PCI_DEVICE(0x10de, 0x0be4), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d94), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d95), .driver_data = AZX_DRIVER_NVIDIA },
{ PCI_DEVICE(0x10de, 0x0d96), .driver_data = AZX_DRIVER_NVIDIA },
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 09476fc1ab6..c9afc04adac 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -240,9 +240,14 @@ static void print_pin_caps(struct snd_info_buffer *buffer,
/* Realtek uses this bit as a different meaning */
if ((codec->vendor_id >> 16) == 0x10ec)
snd_iprintf(buffer, " R/L");
- else
+ else {
+ if (caps & AC_PINCAP_HBR)
+ snd_iprintf(buffer, " HBR");
snd_iprintf(buffer, " HDMI");
+ }
}
+ if (caps & AC_PINCAP_DP)
+ snd_iprintf(buffer, " DP");
if (caps & AC_PINCAP_TRIG_REQ)
snd_iprintf(buffer, " Trigger");
if (caps & AC_PINCAP_IMP_SENSE)
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 447eda1f677..1a36137e13e 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -1789,6 +1789,14 @@ static int patch_ad1981(struct hda_codec *codec)
codec->patch_ops.init = ad1981_hp_init;
codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
+ /* set the upper-limit for mixer amp to 0dB for avoiding the
+ * possible damage by overloading
+ */
+ snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
+ (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
+ (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
+ (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+ (1 << AC_AMPCAP_MUTE_SHIFT));
break;
case AD1981_THINKPAD:
spec->mixers[0] = ad1981_thinkpad_mixers;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index a09c03c3f62..c578c28f368 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -29,6 +29,7 @@
#include "hda_codec.h"
#include "hda_local.h"
+#include "hda_beep.h"
#define CXT_PIN_DIR_IN 0x00
#define CXT_PIN_DIR_OUT 0x01
@@ -111,6 +112,7 @@ struct conexant_spec {
unsigned int dell_automute;
unsigned int port_d_mode;
unsigned char ext_mic_bias;
+ unsigned int dell_vostro;
};
static int conexant_playback_pcm_open(struct hda_pcm_stream *hinfo,
@@ -476,6 +478,7 @@ static void conexant_free(struct hda_codec *codec)
snd_array_free(&spec->jacks);
}
#endif
+ snd_hda_detach_beep_device(codec);
kfree(codec->spec);
}
@@ -2109,9 +2112,12 @@ static int cxt5066_mic_boost_mux_enum_get(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
int val;
+ hda_nid_t nid = kcontrol->private_value & 0xff;
+ int inout = (kcontrol->private_value & 0x100) ?
+ AC_AMP_GET_INPUT : AC_AMP_GET_OUTPUT;
- val = snd_hda_codec_read(codec, 0x17, 0,
- AC_VERB_GET_AMP_GAIN_MUTE, AC_AMP_GET_OUTPUT);
+ val = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_AMP_GAIN_MUTE, inout);
ucontrol->value.enumerated.item[0] = val & AC_AMP_GAIN;
return 0;
@@ -2123,6 +2129,9 @@ static int cxt5066_mic_boost_mux_enum_put(struct snd_kcontrol *kcontrol,
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
const struct hda_input_mux *imux = &cxt5066_analog_mic_boost;
unsigned int idx;
+ hda_nid_t nid = kcontrol->private_value & 0xff;
+ int inout = (kcontrol->private_value & 0x100) ?
+ AC_AMP_SET_INPUT : AC_AMP_SET_OUTPUT;
if (!imux->num_items)
return 0;
@@ -2130,9 +2139,9 @@ static int cxt5066_mic_boost_mux_enum_put(struct snd_kcontrol *kcontrol,
if (idx >= imux->num_items)
idx = imux->num_items - 1;
- snd_hda_codec_write_cache(codec, 0x17, 0,
+ snd_hda_codec_write_cache(codec, nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE,
- AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | AC_AMP_SET_OUTPUT |
+ AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | inout |
imux->items[idx].index);
return 1;
@@ -2201,10 +2210,11 @@ static struct snd_kcontrol_new cxt5066_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Analog Mic Boost Capture Enum",
+ .name = "Ext Mic Boost Capture Enum",
.info = cxt5066_mic_boost_mux_enum_info,
.get = cxt5066_mic_boost_mux_enum_get,
.put = cxt5066_mic_boost_mux_enum_put,
+ .private_value = 0x17,
},
HDA_BIND_VOL("Capture Volume", &cxt5066_bind_capture_vol_others),
@@ -2212,6 +2222,19 @@ static struct snd_kcontrol_new cxt5066_mixers[] = {
{}
};
+static struct snd_kcontrol_new cxt5066_vostro_mixers[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Int Mic Boost Capture Enum",
+ .info = cxt5066_mic_boost_mux_enum_info,
+ .get = cxt5066_mic_boost_mux_enum_get,
+ .put = cxt5066_mic_boost_mux_enum_put,
+ .private_value = 0x23 | 0x100,
+ },
+ HDA_CODEC_VOLUME_MONO("Beep Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
+ {}
+};
+
static struct hda_verb cxt5066_init_verbs[] = {
{0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port B */
{0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port C */
@@ -2397,11 +2420,16 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
/* initialize jack-sensing, too */
static int cxt5066_init(struct hda_codec *codec)
{
+ struct conexant_spec *spec = codec->spec;
+
snd_printdd("CXT5066: init\n");
conexant_init(codec);
if (codec->patch_ops.unsol_event) {
cxt5066_hp_automute(codec);
- cxt5066_automic(codec);
+ if (spec->dell_vostro)
+ cxt5066_vostro_automic(codec);
+ else
+ cxt5066_automic(codec);
}
return 0;
}
@@ -2500,7 +2528,10 @@ static int patch_cxt5066(struct hda_codec *codec)
spec->init_verbs[0] = cxt5066_init_verbs_vostro;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+ spec->mixers[spec->num_mixers++] = cxt5066_vostro_mixers;
spec->port_d_mode = 0;
+ spec->dell_vostro = 1;
+ snd_hda_attach_beep_device(codec, 0x13);
/* no S/PDIF out */
spec->multiout.dig_out_nid = 0;
diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
index 928df59be5d..918f40378d5 100644
--- a/sound/pci/hda/patch_intelhdmi.c
+++ b/sound/pci/hda/patch_intelhdmi.c
@@ -146,38 +146,78 @@ struct cea_channel_speaker_allocation {
};
/*
+ * ALSA sequence is:
+ *
+ * surround40 surround41 surround50 surround51 surround71
+ * ch0 front left = = = =
+ * ch1 front right = = = =
+ * ch2 rear left = = = =
+ * ch3 rear right = = = =
+ * ch4 LFE center center center
+ * ch5 LFE LFE
+ * ch6 side left
+ * ch7 side right
+ *
+ * surround71 = {FL, FR, RLC, RRC, FC, LFE, RL, RR}
+ */
+static int hdmi_channel_mapping[0x32][8] = {
+ /* stereo */
+ [0x00] = { 0x00, 0x11, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7 },
+ /* 2.1 */
+ [0x01] = { 0x00, 0x11, 0x22, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7 },
+ /* Dolby Surround */
+ [0x02] = { 0x00, 0x11, 0x23, 0xf2, 0xf4, 0xf5, 0xf6, 0xf7 },
+ /* surround40 */
+ [0x08] = { 0x00, 0x11, 0x24, 0x35, 0xf3, 0xf2, 0xf6, 0xf7 },
+ /* 4ch */
+ [0x03] = { 0x00, 0x11, 0x23, 0x32, 0x44, 0xf5, 0xf6, 0xf7 },
+ /* surround41 */
+ [0x09] = { 0x00, 0x11, 0x24, 0x34, 0x43, 0xf2, 0xf6, 0xf7 },
+ /* surround50 */
+ [0x0a] = { 0x00, 0x11, 0x24, 0x35, 0x43, 0xf2, 0xf6, 0xf7 },
+ /* surround51 */
+ [0x0b] = { 0x00, 0x11, 0x24, 0x35, 0x43, 0x52, 0xf6, 0xf7 },
+ /* 7.1 */
+ [0x13] = { 0x00, 0x11, 0x26, 0x37, 0x43, 0x52, 0x64, 0x75 },
+};
+
+/*
* This is an ordered list!
*
* The preceding ones have better chances to be selected by
* hdmi_setup_channel_allocation().
*/
static struct cea_channel_speaker_allocation channel_allocations[] = {
-/* channel: 8 7 6 5 4 3 2 1 */
+/* channel: 7 6 5 4 3 2 1 0 */
{ .ca_index = 0x00, .speakers = { 0, 0, 0, 0, 0, 0, FR, FL } },
/* 2.1 */
{ .ca_index = 0x01, .speakers = { 0, 0, 0, 0, 0, LFE, FR, FL } },
/* Dolby Surround */
{ .ca_index = 0x02, .speakers = { 0, 0, 0, 0, FC, 0, FR, FL } },
+ /* surround40 */
+{ .ca_index = 0x08, .speakers = { 0, 0, RR, RL, 0, 0, FR, FL } },
+ /* surround41 */
+{ .ca_index = 0x09, .speakers = { 0, 0, RR, RL, 0, LFE, FR, FL } },
+ /* surround50 */
+{ .ca_index = 0x0a, .speakers = { 0, 0, RR, RL, FC, 0, FR, FL } },
+ /* surround51 */
+{ .ca_index = 0x0b, .speakers = { 0, 0, RR, RL, FC, LFE, FR, FL } },
+ /* 6.1 */
+{ .ca_index = 0x0f, .speakers = { 0, RC, RR, RL, FC, LFE, FR, FL } },
+ /* surround71 */
+{ .ca_index = 0x13, .speakers = { RRC, RLC, RR, RL, FC, LFE, FR, FL } },
+
{ .ca_index = 0x03, .speakers = { 0, 0, 0, 0, FC, LFE, FR, FL } },
{ .ca_index = 0x04, .speakers = { 0, 0, 0, RC, 0, 0, FR, FL } },
{ .ca_index = 0x05, .speakers = { 0, 0, 0, RC, 0, LFE, FR, FL } },
{ .ca_index = 0x06, .speakers = { 0, 0, 0, RC, FC, 0, FR, FL } },
{ .ca_index = 0x07, .speakers = { 0, 0, 0, RC, FC, LFE, FR, FL } },
-{ .ca_index = 0x08, .speakers = { 0, 0, RR, RL, 0, 0, FR, FL } },
-{ .ca_index = 0x09, .speakers = { 0, 0, RR, RL, 0, LFE, FR, FL } },
-{ .ca_index = 0x0a, .speakers = { 0, 0, RR, RL, FC, 0, FR, FL } },
- /* 5.1 */
-{ .ca_index = 0x0b, .speakers = { 0, 0, RR, RL, FC, LFE, FR, FL } },
{ .ca_index = 0x0c, .speakers = { 0, RC, RR, RL, 0, 0, FR, FL } },
{ .ca_index = 0x0d, .speakers = { 0, RC, RR, RL, 0, LFE, FR, FL } },
{ .ca_index = 0x0e, .speakers = { 0, RC, RR, RL, FC, 0, FR, FL } },
- /* 6.1 */
-{ .ca_index = 0x0f, .speakers = { 0, RC, RR, RL, FC, LFE, FR, FL } },
{ .ca_index = 0x10, .speakers = { RRC, RLC, RR, RL, 0, 0, FR, FL } },
{ .ca_index = 0x11, .speakers = { RRC, RLC, RR, RL, 0, LFE, FR, FL } },
{ .ca_index = 0x12, .speakers = { RRC, RLC, RR, RL, FC, 0, FR, FL } },
- /* 7.1 */
-{ .ca_index = 0x13, .speakers = { RRC, RLC, RR, RL, FC, LFE, FR, FL } },
{ .ca_index = 0x14, .speakers = { FRC, FLC, 0, 0, 0, 0, FR, FL } },
{ .ca_index = 0x15, .speakers = { FRC, FLC, 0, 0, 0, LFE, FR, FL } },
{ .ca_index = 0x16, .speakers = { FRC, FLC, 0, 0, FC, 0, FR, FL } },
@@ -210,7 +250,6 @@ static struct cea_channel_speaker_allocation channel_allocations[] = {
{ .ca_index = 0x31, .speakers = { FRW, FLW, RR, RL, FC, LFE, FR, FL } },
};
-
/*
* HDA/HDMI auto parsing
*/
@@ -344,7 +383,7 @@ static int intel_hdmi_parse_codec(struct hda_codec *codec)
break;
case AC_WID_PIN:
caps = snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
- if (!(caps & AC_PINCAP_HDMI))
+ if (!(caps & (AC_PINCAP_HDMI | AC_PINCAP_DP)))
continue;
if (intel_hdmi_add_pin(codec, nid) < 0)
return -EINVAL;
@@ -352,6 +391,17 @@ static int intel_hdmi_parse_codec(struct hda_codec *codec)
}
}
+ /*
+ * G45/IbexPeak don't support EPSS: the unsolicited pin hot plug event
+ * can be lost and presence sense verb will become inaccurate if the
+ * HDA link is powered off at hot plug or hw initialization time.
+ */
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+ if (!(snd_hda_param_read(codec, codec->afg, AC_PAR_POWER_STATE) &
+ AC_PWRST_EPSS))
+ codec->bus->power_keep_link_on = 1;
+#endif
+
return 0;
}
@@ -436,14 +486,15 @@ static void hdmi_set_channel_count(struct hda_codec *codec,
AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
}
-static void hdmi_debug_channel_mapping(struct hda_codec *codec, hda_nid_t nid)
+static void hdmi_debug_channel_mapping(struct hda_codec *codec,
+ hda_nid_t pin_nid)
{
#ifdef CONFIG_SND_DEBUG_VERBOSE
int i;
int slot;
for (i = 0; i < 8; i++) {
- slot = snd_hda_codec_read(codec, nid, 0,
+ slot = snd_hda_codec_read(codec, pin_nid, 0,
AC_VERB_GET_HDMI_CHAN_SLOT, i);
printk(KERN_DEBUG "HDMI: ASP channel %d => slot %d\n",
slot >> 4, slot & 0xf);
@@ -619,25 +670,32 @@ static int hdmi_setup_channel_allocation(struct hda_codec *codec, hda_nid_t nid,
return ai->CA;
}
-static void hdmi_setup_channel_mapping(struct hda_codec *codec, hda_nid_t nid,
+static void hdmi_setup_channel_mapping(struct hda_codec *codec,
+ hda_nid_t pin_nid,
struct hdmi_audio_infoframe *ai)
{
int i;
+ int ca = ai->CA;
+ int err;
- if (!ai->CA)
- return;
-
- /*
- * TODO: adjust channel mapping if necessary
- * ALSA sequence is front/surr/clfe/side?
- */
+ if (hdmi_channel_mapping[ca][1] == 0) {
+ for (i = 0; i < channel_allocations[ca].channels; i++)
+ hdmi_channel_mapping[ca][i] = i | (i << 4);
+ for (; i < 8; i++)
+ hdmi_channel_mapping[ca][i] = 0xf | (i << 4);
+ }
- for (i = 0; i < 8; i++)
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_HDMI_CHAN_SLOT,
- (i << 4) | i);
+ for (i = 0; i < 8; i++) {
+ err = snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_HDMI_CHAN_SLOT,
+ hdmi_channel_mapping[ca][i]);
+ if (err) {
+ snd_printdd(KERN_INFO "HDMI: channel mapping failed\n");
+ break;
+ }
+ }
- hdmi_debug_channel_mapping(codec, nid);
+ hdmi_debug_channel_mapping(codec, pin_nid);
}
static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
@@ -676,7 +734,6 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
};
hdmi_setup_channel_allocation(codec, nid, &ai);
- hdmi_setup_channel_mapping(codec, nid, &ai);
for (i = 0; i < spec->num_pins; i++) {
if (spec->pin_cvt[i] != nid)
@@ -686,6 +743,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, hda_nid_t nid,
pin_nid = spec->pin[i];
if (!hdmi_infoframe_uptodate(codec, pin_nid, &ai)) {
+ hdmi_setup_channel_mapping(codec, pin_nid, &ai);
hdmi_stop_infoframe_trans(codec, pin_nid);
hdmi_fill_audio_infoframe(codec, pin_nid, &ai);
hdmi_start_infoframe_trans(codec, pin_nid);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 888b6313eec..c7465053d6b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -131,8 +131,8 @@ enum {
enum {
ALC269_BASIC,
ALC269_QUANTA_FL1,
- ALC269_ASUS_EEEPC_P703,
- ALC269_ASUS_EEEPC_P901,
+ ALC269_ASUS_AMIC,
+ ALC269_ASUS_DMIC,
ALC269_FUJITSU,
ALC269_LIFEBOOK,
ALC269_AUTO,
@@ -188,6 +188,8 @@ enum {
ALC663_ASUS_MODE4,
ALC663_ASUS_MODE5,
ALC663_ASUS_MODE6,
+ ALC663_ASUS_MODE7,
+ ALC663_ASUS_MODE8,
ALC272_DELL,
ALC272_DELL_ZM1,
ALC272_SAMSUNG_NC10,
@@ -335,6 +337,9 @@ struct alc_spec {
/* hooks */
void (*init_hook)(struct hda_codec *codec);
void (*unsol_event)(struct hda_codec *codec, unsigned int res);
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+ void (*power_hook)(struct hda_codec *codec, int power);
+#endif
/* for pin sensing */
unsigned int sense_updated: 1;
@@ -386,6 +391,7 @@ struct alc_config_preset {
void (*init_hook)(struct hda_codec *);
#ifdef CONFIG_SND_HDA_POWER_SAVE
struct hda_amp_list *loopbacks;
+ void (*power_hook)(struct hda_codec *codec, int power);
#endif
};
@@ -898,6 +904,7 @@ static void setup_preset(struct hda_codec *codec,
spec->unsol_event = preset->unsol_event;
spec->init_hook = preset->init_hook;
#ifdef CONFIG_SND_HDA_POWER_SAVE
+ spec->power_hook = preset->power_hook;
spec->loopback.amplist = preset->loopbacks;
#endif
@@ -1663,9 +1670,6 @@ static struct hda_verb alc889_acer_aspire_8930g_verbs[] = {
/* some bit here disables the other DACs. Init=0x4900 */
{0x20, AC_VERB_SET_COEF_INDEX, 0x08},
{0x20, AC_VERB_SET_PROC_COEF, 0x0000},
-/* Enable amplifiers */
- {0x14, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
- {0x15, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
/* DMIC fix
* This laptop has a stereo digital microphone. The mics are only 1cm apart
* which makes the stereo useless. However, either the mic or the ALC889
@@ -1778,6 +1782,25 @@ static struct snd_kcontrol_new alc888_base_mixer[] = {
{ } /* end */
};
+static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = {
+ HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x0,
+ HDA_OUTPUT),
+ HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 2, HDA_INPUT),
+ HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+ HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ { } /* end */
+};
+
+
static void alc888_acer_aspire_4930g_setup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -1808,6 +1831,16 @@ static void alc889_acer_aspire_8930g_setup(struct hda_codec *codec)
spec->autocfg.speaker_pins[2] = 0x1b;
}
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+static void alc889_power_eapd(struct hda_codec *codec, int power)
+{
+ snd_hda_codec_write(codec, 0x14, 0,
+ AC_VERB_SET_EAPD_BTLENABLE, power ? 2 : 0);
+ snd_hda_codec_write(codec, 0x15, 0,
+ AC_VERB_SET_EAPD_BTLENABLE, power ? 2 : 0);
+}
+#endif
+
/*
* ALC880 3-stack model
*
@@ -3601,12 +3634,29 @@ static void alc_free(struct hda_codec *codec)
snd_hda_detach_beep_device(codec);
}
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+static int alc_suspend(struct hda_codec *codec, pm_message_t state)
+{
+ struct alc_spec *spec = codec->spec;
+ if (spec && spec->power_hook)
+ spec->power_hook(codec, 0);
+ return 0;
+}
+#endif
+
#ifdef SND_HDA_NEEDS_RESUME
static int alc_resume(struct hda_codec *codec)
{
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+ struct alc_spec *spec = codec->spec;
+#endif
codec->patch_ops.init(codec);
snd_hda_codec_resume_amp(codec);
snd_hda_codec_resume_cache(codec);
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+ if (spec && spec->power_hook)
+ spec->power_hook(codec, 1);
+#endif
return 0;
}
#endif
@@ -3623,6 +3673,7 @@ static struct hda_codec_ops alc_patch_ops = {
.resume = alc_resume,
#endif
#ifdef CONFIG_SND_HDA_POWER_SAVE
+ .suspend = alc_suspend,
.check_power_status = alc_check_power_status,
#endif
};
@@ -6248,6 +6299,7 @@ static const char *alc260_models[ALC260_MODEL_LAST] = {
static struct snd_pci_quirk alc260_cfg_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x007b, "Acer C20x", ALC260_ACER),
+ SND_PCI_QUIRK(0x1025, 0x007f, "Acer", ALC260_WILL),
SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER),
SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100),
SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013),
@@ -8918,7 +8970,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x0579, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
- SND_PCI_QUIRK(0x1462, 0x2fb3, "MSI", ALC883_TARGA_2ch_DIG),
+ SND_PCI_QUIRK(0x1462, 0x2fb3, "MSI", ALC882_AUTO),
SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x3729, "MSI S420", ALC883_TARGA_DIG),
SND_PCI_QUIRK(0x1462, 0x3783, "NEC S970", ALC883_TARGA_DIG),
@@ -9281,6 +9333,7 @@ static struct alc_config_preset alc882_presets[] = {
.dac_nids = alc883_dac_nids,
.adc_nids = alc883_adc_nids_alt,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
+ .capsrc_nids = alc883_capsrc_nids,
.dig_out_nid = ALC883_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
.channel_mode = alc883_3ST_2ch_modes,
@@ -9377,10 +9430,11 @@ static struct alc_config_preset alc882_presets[] = {
.init_hook = alc_automute_amp,
},
[ALC888_ACER_ASPIRE_8930G] = {
- .mixers = { alc888_base_mixer,
+ .mixers = { alc889_acer_aspire_8930g_mixer,
alc883_chmode_mixer },
.init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
- alc889_acer_aspire_8930g_verbs },
+ alc889_acer_aspire_8930g_verbs,
+ alc889_eapd_verbs},
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
.dac_nids = alc883_dac_nids,
.num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
@@ -9397,6 +9451,9 @@ static struct alc_config_preset alc882_presets[] = {
.unsol_event = alc_automute_amp_unsol_event,
.setup = alc889_acer_aspire_8930g_setup,
.init_hook = alc_automute_amp,
+#ifdef CONFIG_SND_HDA_POWER_SAVE
+ .power_hook = alc889_power_eapd,
+#endif
},
[ALC888_ACER_ASPIRE_7730G] = {
.mixers = { alc883_3ST_6ch_mixer,
@@ -9427,6 +9484,7 @@ static struct alc_config_preset alc882_presets[] = {
.dac_nids = alc883_dac_nids,
.adc_nids = alc883_adc_nids_alt,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
+ .capsrc_nids = alc883_capsrc_nids,
.num_channel_mode = ARRAY_SIZE(alc883_sixstack_modes),
.channel_mode = alc883_sixstack_modes,
.input_mux = &alc883_capture_source,
@@ -9488,6 +9546,7 @@ static struct alc_config_preset alc882_presets[] = {
.dac_nids = alc883_dac_nids,
.adc_nids = alc883_adc_nids_alt,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_alt),
+ .capsrc_nids = alc883_capsrc_nids,
.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_lenovo_101e_capture_source,
@@ -9667,6 +9726,7 @@ static struct alc_config_preset alc882_presets[] = {
alc880_gpio1_init_verbs },
.adc_nids = alc883_adc_nids,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids),
+ .capsrc_nids = alc883_capsrc_nids,
.dac_nids = alc883_dac_nids,
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
.channel_mode = alc889A_mb31_6ch_modes,
@@ -10677,6 +10737,13 @@ static struct hda_verb alc262_lenovo_3000_unsol_verbs[] = {
{}
};
+static struct hda_verb alc262_lenovo_3000_init_verbs[] = {
+ /* Front Mic pin: input vref at 50% */
+ {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50},
+ {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+ {}
+};
+
static struct hda_input_mux alc262_fujitsu_capture_source = {
.num_items = 3,
.items = {
@@ -11719,7 +11786,8 @@ static struct alc_config_preset alc262_presets[] = {
[ALC262_LENOVO_3000] = {
.mixers = { alc262_lenovo_3000_mixer },
.init_verbs = { alc262_init_verbs, alc262_EAPD_verbs,
- alc262_lenovo_3000_unsol_verbs },
+ alc262_lenovo_3000_unsol_verbs,
+ alc262_lenovo_3000_init_verbs },
.num_dacs = ARRAY_SIZE(alc262_dac_nids),
.dac_nids = alc262_dac_nids,
.hp_nid = 0x03,
@@ -12856,7 +12924,7 @@ static int patch_alc268(struct hda_codec *codec)
int board_config;
int i, has_beep, err;
- spec = kcalloc(1, sizeof(*spec), GFP_KERNEL);
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (spec == NULL)
return -ENOMEM;
@@ -13231,10 +13299,12 @@ static struct hda_verb alc269_eeepc_amic_init_verbs[] = {
/* toggle speaker-output according to the hp-jack state */
static void alc269_speaker_automute(struct hda_codec *codec)
{
+ struct alc_spec *spec = codec->spec;
+ unsigned int nid = spec->autocfg.hp_pins[0];
unsigned int present;
unsigned char bits;
- present = snd_hda_jack_detect(codec, 0x15);
+ present = snd_hda_jack_detect(codec, nid);
bits = present ? AMP_IN_MUTE(0) : 0;
snd_hda_codec_amp_stereo(codec, 0x0c, HDA_INPUT, 0,
AMP_IN_MUTE(0), bits);
@@ -13459,8 +13529,8 @@ static void alc269_auto_init(struct hda_codec *codec)
static const char *alc269_models[ALC269_MODEL_LAST] = {
[ALC269_BASIC] = "basic",
[ALC269_QUANTA_FL1] = "quanta",
- [ALC269_ASUS_EEEPC_P703] = "eeepc-p703",
- [ALC269_ASUS_EEEPC_P901] = "eeepc-p901",
+ [ALC269_ASUS_AMIC] = "asus-amic",
+ [ALC269_ASUS_DMIC] = "asus-dmic",
[ALC269_FUJITSU] = "fujitsu",
[ALC269_LIFEBOOK] = "lifebook",
[ALC269_AUTO] = "auto",
@@ -13469,18 +13539,41 @@ static const char *alc269_models[ALC269_MODEL_LAST] = {
static struct snd_pci_quirk alc269_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_QUANTA_FL1),
SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A",
- ALC269_ASUS_EEEPC_P703),
- SND_PCI_QUIRK(0x1043, 0x1883, "ASUS F81Se", ALC269_ASUS_EEEPC_P703),
- SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS F5Q", ALC269_ASUS_EEEPC_P703),
- SND_PCI_QUIRK(0x1043, 0x1723, "ASUS P80", ALC269_ASUS_EEEPC_P703),
- SND_PCI_QUIRK(0x1043, 0x1773, "ASUS U20A", ALC269_ASUS_EEEPC_P703),
- SND_PCI_QUIRK(0x1043, 0x1743, "ASUS U80", ALC269_ASUS_EEEPC_P703),
- SND_PCI_QUIRK(0x1043, 0x1653, "ASUS U50", ALC269_ASUS_EEEPC_P703),
+ ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1133, "ASUS UJ20ft", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80JT", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1383, "ASUS UJ30Jc", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x13d3, "ASUS N61JA", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1413, "ASUS UL50", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1443, "ASUS UL30", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1453, "ASUS M60Jv", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1483, "ASUS UL80", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x14f3, "ASUS F83Vf", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS UL20", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1513, "ASUS UX30", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x15a3, "ASUS N60Jv", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x15b3, "ASUS N60Dp", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x15c3, "ASUS N70De", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x15e3, "ASUS F83T", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1643, "ASUS M60J", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1653, "ASUS U50", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1693, "ASUS F50N", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS F5Q", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_ASUS_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1723, "ASUS P80", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1743, "ASUS U80", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1773, "ASUS U20A", ALC269_ASUS_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1883, "ASUS F81Se", ALC269_ASUS_AMIC),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS Eeepc P901",
- ALC269_ASUS_EEEPC_P901),
+ ALC269_ASUS_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS Eeepc S101",
- ALC269_ASUS_EEEPC_P901),
- SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_ASUS_EEEPC_P901),
+ ALC269_ASUS_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_ASUS_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_ASUS_DMIC),
SND_PCI_QUIRK(0x1734, 0x115d, "FSC Amilo", ALC269_FUJITSU),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK),
{}
@@ -13510,7 +13603,7 @@ static struct alc_config_preset alc269_presets[] = {
.setup = alc269_quanta_fl1_setup,
.init_hook = alc269_quanta_fl1_init_hook,
},
- [ALC269_ASUS_EEEPC_P703] = {
+ [ALC269_ASUS_AMIC] = {
.mixers = { alc269_eeepc_mixer },
.cap_mixer = alc269_epc_capture_mixer,
.init_verbs = { alc269_init_verbs,
@@ -13524,7 +13617,7 @@ static struct alc_config_preset alc269_presets[] = {
.setup = alc269_eeepc_amic_setup,
.init_hook = alc269_eeepc_inithook,
},
- [ALC269_ASUS_EEEPC_P901] = {
+ [ALC269_ASUS_DMIC] = {
.mixers = { alc269_eeepc_mixer },
.cap_mixer = alc269_epc_capture_mixer,
.init_verbs = { alc269_init_verbs,
@@ -16159,6 +16252,52 @@ static struct snd_kcontrol_new alc663_g50v_mixer[] = {
{ } /* end */
};
+static struct hda_bind_ctls alc663_asus_mode7_8_all_bind_switch = {
+ .ops = &snd_hda_bind_sw,
+ .values = {
+ HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x15, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
+ 0
+ },
+};
+
+static struct hda_bind_ctls alc663_asus_mode7_8_sp_bind_switch = {
+ .ops = &snd_hda_bind_sw,
+ .values = {
+ HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
+ HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT),
+ 0
+ },
+};
+
+static struct snd_kcontrol_new alc663_mode7_mixer[] = {
+ HDA_BIND_SW("Master Playback Switch", &alc663_asus_mode7_8_all_bind_switch),
+ HDA_BIND_VOL("Speaker Playback Volume", &alc663_asus_bind_master_vol),
+ HDA_BIND_SW("Speaker Playback Switch", &alc663_asus_mode7_8_sp_bind_switch),
+ HDA_CODEC_MUTE("Headphone1 Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Headphone2 Playback Switch", 0x21, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("IntMic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_MUTE("IntMic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+ HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+ { } /* end */
+};
+
+static struct snd_kcontrol_new alc663_mode8_mixer[] = {
+ HDA_BIND_SW("Master Playback Switch", &alc663_asus_mode7_8_all_bind_switch),
+ HDA_BIND_VOL("Speaker Playback Volume", &alc663_asus_bind_master_vol),
+ HDA_BIND_SW("Speaker Playback Switch", &alc663_asus_mode7_8_sp_bind_switch),
+ HDA_CODEC_MUTE("Headphone1 Playback Switch", 0x15, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Headphone2 Playback Switch", 0x21, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ { } /* end */
+};
+
+
static struct snd_kcontrol_new alc662_chmode_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -16446,6 +16585,45 @@ static struct hda_verb alc272_dell_init_verbs[] = {
{}
};
+static struct hda_verb alc663_mode7_init_verbs[] = {
+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+ {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+ {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x1b, AC_VERB_SET_CONNECT_SEL, 0x01},
+ {0x21, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x21, AC_VERB_SET_CONNECT_SEL, 0x01}, /* Headphone */
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(9)},
+ {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_MIC_EVENT},
+ {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
+ {0x21, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
+ {}
+};
+
+static struct hda_verb alc663_mode8_init_verbs[] = {
+ {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x15, AC_VERB_SET_CONNECT_SEL, 0x01},
+ {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+ {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+ {0x21, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x21, AC_VERB_SET_CONNECT_SEL, 0x01}, /* Headphone */
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
+ {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(9)},
+ {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
+ {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_MIC_EVENT},
+ {0x21, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
+ {}
+};
+
static struct snd_kcontrol_new alc662_auto_capture_mixer[] = {
HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT),
HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT),
@@ -16625,6 +16803,54 @@ static void alc663_two_hp_m2_speaker_automute(struct hda_codec *codec)
}
}
+static void alc663_two_hp_m7_speaker_automute(struct hda_codec *codec)
+{
+ unsigned int present1, present2;
+
+ present1 = snd_hda_codec_read(codec, 0x1b, 0,
+ AC_VERB_GET_PIN_SENSE, 0)
+ & AC_PINSENSE_PRESENCE;
+ present2 = snd_hda_codec_read(codec, 0x21, 0,
+ AC_VERB_GET_PIN_SENSE, 0)
+ & AC_PINSENSE_PRESENCE;
+
+ if (present1 || present2) {
+ snd_hda_codec_write_cache(codec, 0x14, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ snd_hda_codec_write_cache(codec, 0x17, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ } else {
+ snd_hda_codec_write_cache(codec, 0x14, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ snd_hda_codec_write_cache(codec, 0x17, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ }
+}
+
+static void alc663_two_hp_m8_speaker_automute(struct hda_codec *codec)
+{
+ unsigned int present1, present2;
+
+ present1 = snd_hda_codec_read(codec, 0x21, 0,
+ AC_VERB_GET_PIN_SENSE, 0)
+ & AC_PINSENSE_PRESENCE;
+ present2 = snd_hda_codec_read(codec, 0x15, 0,
+ AC_VERB_GET_PIN_SENSE, 0)
+ & AC_PINSENSE_PRESENCE;
+
+ if (present1 || present2) {
+ snd_hda_codec_write_cache(codec, 0x14, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ snd_hda_codec_write_cache(codec, 0x17, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ } else {
+ snd_hda_codec_write_cache(codec, 0x14, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ snd_hda_codec_write_cache(codec, 0x17, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ }
+}
+
static void alc663_m51va_unsol_event(struct hda_codec *codec,
unsigned int res)
{
@@ -16644,7 +16870,7 @@ static void alc663_m51va_setup(struct hda_codec *codec)
spec->ext_mic.pin = 0x18;
spec->ext_mic.mux_idx = 0;
spec->int_mic.pin = 0x12;
- spec->int_mic.mux_idx = 1;
+ spec->int_mic.mux_idx = 9;
spec->auto_mic = 1;
}
@@ -16656,7 +16882,17 @@ static void alc663_m51va_inithook(struct hda_codec *codec)
/* ***************** Mode1 ******************************/
#define alc663_mode1_unsol_event alc663_m51va_unsol_event
-#define alc663_mode1_setup alc663_m51va_setup
+
+static void alc663_mode1_setup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ spec->ext_mic.pin = 0x18;
+ spec->ext_mic.mux_idx = 0;
+ spec->int_mic.pin = 0x19;
+ spec->int_mic.mux_idx = 1;
+ spec->auto_mic = 1;
+}
+
#define alc663_mode1_inithook alc663_m51va_inithook
/* ***************** Mode2 ******************************/
@@ -16673,7 +16909,7 @@ static void alc662_mode2_unsol_event(struct hda_codec *codec,
}
}
-#define alc662_mode2_setup alc663_m51va_setup
+#define alc662_mode2_setup alc663_mode1_setup
static void alc662_mode2_inithook(struct hda_codec *codec)
{
@@ -16694,7 +16930,7 @@ static void alc663_mode3_unsol_event(struct hda_codec *codec,
}
}
-#define alc663_mode3_setup alc663_m51va_setup
+#define alc663_mode3_setup alc663_mode1_setup
static void alc663_mode3_inithook(struct hda_codec *codec)
{
@@ -16715,7 +16951,7 @@ static void alc663_mode4_unsol_event(struct hda_codec *codec,
}
}
-#define alc663_mode4_setup alc663_m51va_setup
+#define alc663_mode4_setup alc663_mode1_setup
static void alc663_mode4_inithook(struct hda_codec *codec)
{
@@ -16736,7 +16972,7 @@ static void alc663_mode5_unsol_event(struct hda_codec *codec,
}
}
-#define alc663_mode5_setup alc663_m51va_setup
+#define alc663_mode5_setup alc663_mode1_setup
static void alc663_mode5_inithook(struct hda_codec *codec)
{
@@ -16757,7 +16993,7 @@ static void alc663_mode6_unsol_event(struct hda_codec *codec,
}
}
-#define alc663_mode6_setup alc663_m51va_setup
+#define alc663_mode6_setup alc663_mode1_setup
static void alc663_mode6_inithook(struct hda_codec *codec)
{
@@ -16765,6 +17001,50 @@ static void alc663_mode6_inithook(struct hda_codec *codec)
alc_mic_automute(codec);
}
+/* ***************** Mode7 ******************************/
+static void alc663_mode7_unsol_event(struct hda_codec *codec,
+ unsigned int res)
+{
+ switch (res >> 26) {
+ case ALC880_HP_EVENT:
+ alc663_two_hp_m7_speaker_automute(codec);
+ break;
+ case ALC880_MIC_EVENT:
+ alc_mic_automute(codec);
+ break;
+ }
+}
+
+#define alc663_mode7_setup alc663_mode1_setup
+
+static void alc663_mode7_inithook(struct hda_codec *codec)
+{
+ alc663_two_hp_m7_speaker_automute(codec);
+ alc_mic_automute(codec);
+}
+
+/* ***************** Mode8 ******************************/
+static void alc663_mode8_unsol_event(struct hda_codec *codec,
+ unsigned int res)
+{
+ switch (res >> 26) {
+ case ALC880_HP_EVENT:
+ alc663_two_hp_m8_speaker_automute(codec);
+ break;
+ case ALC880_MIC_EVENT:
+ alc_mic_automute(codec);
+ break;
+ }
+}
+
+#define alc663_mode8_setup alc663_m51va_setup
+
+static void alc663_mode8_inithook(struct hda_codec *codec)
+{
+ alc663_two_hp_m8_speaker_automute(codec);
+ alc_mic_automute(codec);
+}
+
static void alc663_g71v_hp_automute(struct hda_codec *codec)
{
unsigned int present;
@@ -16899,6 +17179,8 @@ static const char *alc662_models[ALC662_MODEL_LAST] = {
[ALC663_ASUS_MODE4] = "asus-mode4",
[ALC663_ASUS_MODE5] = "asus-mode5",
[ALC663_ASUS_MODE6] = "asus-mode6",
+ [ALC663_ASUS_MODE7] = "asus-mode7",
+ [ALC663_ASUS_MODE8] = "asus-mode8",
[ALC272_DELL] = "dell",
[ALC272_DELL_ZM1] = "dell-zm1",
[ALC272_SAMSUNG_NC10] = "samsung-nc10",
@@ -16915,12 +17197,22 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x11d3, "ASUS NB", ALC663_ASUS_MODE1),
SND_PCI_QUIRK(0x1043, 0x11f3, "ASUS NB", ALC662_ASUS_MODE2),
SND_PCI_QUIRK(0x1043, 0x1203, "ASUS NB", ALC663_ASUS_MODE1),
+ SND_PCI_QUIRK(0x1043, 0x1303, "ASUS G60J", ALC663_ASUS_MODE1),
+ SND_PCI_QUIRK(0x1043, 0x1333, "ASUS G60Jx", ALC663_ASUS_MODE1),
SND_PCI_QUIRK(0x1043, 0x1339, "ASUS NB", ALC662_ASUS_MODE2),
+ SND_PCI_QUIRK(0x1043, 0x13e3, "ASUS N71JA", ALC663_ASUS_MODE7),
+ SND_PCI_QUIRK(0x1043, 0x1463, "ASUS N71", ALC663_ASUS_MODE7),
+ SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G72", ALC663_ASUS_MODE8),
+ SND_PCI_QUIRK(0x1043, 0x1563, "ASUS N90", ALC663_ASUS_MODE3),
+ SND_PCI_QUIRK(0x1043, 0x15d3, "ASUS N50SF F50SF", ALC663_ASUS_MODE1),
SND_PCI_QUIRK(0x1043, 0x16c3, "ASUS NB", ALC662_ASUS_MODE2),
+ SND_PCI_QUIRK(0x1043, 0x16f3, "ASUS K40C K50C", ALC662_ASUS_MODE2),
+ SND_PCI_QUIRK(0x1043, 0x1733, "ASUS N81De", ALC663_ASUS_MODE1),
SND_PCI_QUIRK(0x1043, 0x1753, "ASUS NB", ALC662_ASUS_MODE2),
SND_PCI_QUIRK(0x1043, 0x1763, "ASUS NB", ALC663_ASUS_MODE6),
SND_PCI_QUIRK(0x1043, 0x1765, "ASUS NB", ALC663_ASUS_MODE6),
SND_PCI_QUIRK(0x1043, 0x1783, "ASUS NB", ALC662_ASUS_MODE2),
+ SND_PCI_QUIRK(0x1043, 0x1793, "ASUS F50GX", ALC663_ASUS_MODE1),
SND_PCI_QUIRK(0x1043, 0x17b3, "ASUS F70SL", ALC663_ASUS_MODE3),
SND_PCI_QUIRK(0x1043, 0x17c3, "ASUS UX20", ALC663_ASUS_M51VA),
SND_PCI_QUIRK(0x1043, 0x17f3, "ASUS X58LE", ALC662_ASUS_MODE2),
@@ -17204,6 +17496,36 @@ static struct alc_config_preset alc662_presets[] = {
.setup = alc663_mode6_setup,
.init_hook = alc663_mode6_inithook,
},
+ [ALC663_ASUS_MODE7] = {
+ .mixers = { alc663_mode7_mixer },
+ .cap_mixer = alc662_auto_capture_mixer,
+ .init_verbs = { alc662_init_verbs,
+ alc663_mode7_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc662_dac_nids),
+ .hp_nid = 0x03,
+ .dac_nids = alc662_dac_nids,
+ .dig_out_nid = ALC662_DIGOUT_NID,
+ .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
+ .channel_mode = alc662_3ST_2ch_modes,
+ .unsol_event = alc663_mode7_unsol_event,
+ .setup = alc663_mode7_setup,
+ .init_hook = alc663_mode7_inithook,
+ },
+ [ALC663_ASUS_MODE8] = {
+ .mixers = { alc663_mode8_mixer },
+ .cap_mixer = alc662_auto_capture_mixer,
+ .init_verbs = { alc662_init_verbs,
+ alc663_mode8_init_verbs },
+ .num_dacs = ARRAY_SIZE(alc662_dac_nids),
+ .hp_nid = 0x03,
+ .dac_nids = alc662_dac_nids,
+ .dig_out_nid = ALC662_DIGOUT_NID,
+ .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
+ .channel_mode = alc662_3ST_2ch_modes,
+ .unsol_event = alc663_mode8_unsol_event,
+ .setup = alc663_mode8_setup,
+ .init_hook = alc663_mode8_inithook,
+ },
[ALC272_DELL] = {
.mixers = { alc663_m51va_mixer },
.cap_mixer = alc272_auto_capture_mixer,
@@ -17687,7 +18009,9 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
{ .id = 0x10ec0268, .name = "ALC268", .patch = patch_alc268 },
{ .id = 0x10ec0269, .name = "ALC269", .patch = patch_alc269 },
+ { .id = 0x10ec0270, .name = "ALC270", .patch = patch_alc269 },
{ .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
+ { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
.patch = patch_alc861 },
{ .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6b0bc040c3b..3d59f832584 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -209,6 +209,7 @@ struct sigmatel_spec {
unsigned int gpio_data;
unsigned int gpio_mute;
unsigned int gpio_led;
+ unsigned int gpio_led_polarity;
/* stream */
unsigned int stream_delay;
@@ -1538,6 +1539,13 @@ static unsigned int alienware_m17x_pin_configs[13] = {
0x904601b0,
};
+static unsigned int intel_dg45id_pin_configs[14] = {
+ 0x02214230, 0x02A19240, 0x01013214, 0x01014210,
+ 0x01A19250, 0x01011212, 0x01016211, 0x40f000f0,
+ 0x40f000f0, 0x40f000f0, 0x40f000f0, 0x014510A0,
+ 0x074510B0, 0x40f000f0
+};
+
static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
[STAC_92HD73XX_REF] = ref92hd73xx_pin_configs,
[STAC_DELL_M6_AMIC] = dell_m6_pin_configs,
@@ -1545,6 +1553,7 @@ static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
[STAC_DELL_M6_BOTH] = dell_m6_pin_configs,
[STAC_DELL_EQ] = dell_m6_pin_configs,
[STAC_ALIENWARE_M17X] = alienware_m17x_pin_configs,
+ [STAC_92HD73XX_INTEL] = intel_dg45id_pin_configs,
};
static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
@@ -4724,13 +4733,61 @@ static void stac92xx_unsol_event(struct hda_codec *codec, unsigned int res)
}
}
-static int hp_bseries_system(u32 subsystem_id)
+/*
+ * This method searches for the mute LED GPIO configuration
+ * provided as OEM string in SMBIOS. The format of that string
+ * is HP_Mute_LED_P_G or HP_Mute_LED_P
+ * where P can be 0 or 1 and defines mute LED GPIO control state (low/high)
+ * that corresponds to the NOT muted state of the master volume
+ * and G is the index of the GPIO to use as the mute LED control (0..9)
+ * If _G portion is missing it is assigned based on the codec ID
+ *
+ * So, HP B-series like systems may have HP_Mute_LED_0 (current models)
+ * or HP_Mute_LED_0_3 (future models) OEM SMBIOS strings
+ */
+static int find_mute_led_gpio(struct hda_codec *codec)
+{
+ struct sigmatel_spec *spec = codec->spec;
+ const struct dmi_device *dev = NULL;
+
+ if ((codec->subsystem_id >> 16) == PCI_VENDOR_ID_HP) {
+ while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING,
+ NULL, dev))) {
+ if (sscanf(dev->name, "HP_Mute_LED_%d_%d",
+ &spec->gpio_led_polarity,
+ &spec->gpio_led) == 2) {
+ spec->gpio_led = 1 << spec->gpio_led;
+ return 1;
+ }
+ if (sscanf(dev->name, "HP_Mute_LED_%d",
+ &spec->gpio_led_polarity) == 1) {
+ switch (codec->vendor_id) {
+ case 0x111d7608:
+ /* GPIO 0 */
+ spec->gpio_led = 0x01;
+ return 1;
+ case 0x111d7600:
+ case 0x111d7601:
+ case 0x111d7602:
+ case 0x111d7603:
+ /* GPIO 3 */
+ spec->gpio_led = 0x08;
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int hp_blike_system(u32 subsystem_id)
{
switch (subsystem_id) {
- case 0x103c307e:
- case 0x103c307f:
- case 0x103c3080:
- case 0x103c3081:
+ case 0x103c1520:
+ case 0x103c1521:
+ case 0x103c1523:
+ case 0x103c1524:
+ case 0x103c1525:
case 0x103c1722:
case 0x103c1723:
case 0x103c1724:
@@ -4739,6 +4796,14 @@ static int hp_bseries_system(u32 subsystem_id)
case 0x103c1727:
case 0x103c1728:
case 0x103c1729:
+ case 0x103c172a:
+ case 0x103c172b:
+ case 0x103c307e:
+ case 0x103c307f:
+ case 0x103c3080:
+ case 0x103c3081:
+ case 0x103c7007:
+ case 0x103c7008:
return 1;
}
return 0;
@@ -4833,7 +4898,7 @@ static int stac92xx_hp_check_power_status(struct hda_codec *codec,
else
spec->gpio_data |= spec->gpio_led; /* white */
- if (hp_bseries_system(codec->subsystem_id)) {
+ if (!spec->gpio_led_polarity) {
/* LED state is inverted on these systems */
spec->gpio_data ^= spec->gpio_led;
}
@@ -5526,7 +5591,7 @@ again:
break;
}
- if (hp_bseries_system(codec->subsystem_id)) {
+ if (hp_blike_system(codec->subsystem_id)) {
pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f);
if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER ||
@@ -5544,26 +5609,10 @@ again:
}
}
- if ((codec->subsystem_id >> 16) == PCI_VENDOR_ID_HP) {
- const struct dmi_device *dev = NULL;
- while ((dev = dmi_find_device(DMI_DEV_TYPE_OEM_STRING,
- NULL, dev))) {
- if (strcmp(dev->name, "HP_Mute_LED_1")) {
- switch (codec->vendor_id) {
- case 0x111d7608:
- spec->gpio_led = 0x01;
- break;
- case 0x111d7600:
- case 0x111d7601:
- case 0x111d7602:
- case 0x111d7603:
- spec->gpio_led = 0x08;
- break;
- }
- break;
- }
- }
- }
+ if (find_mute_led_gpio(codec))
+ snd_printd("mute LED gpio %d polarity %d\n",
+ spec->gpio_led,
+ spec->gpio_led_polarity);
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 7717e01fc07..edaa729126b 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -143,7 +143,8 @@ static int snd_pdacf_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 16;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_FORCED_PULSE;
- // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ /* FIXME: This driver should be updated to allow for dynamic IRQ sharing */
+ /* link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE; */
link->irq.Handler = pdacf_interrupt;
link->conf.Attributes = CONF_ENABLE_IRQ;
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
index d057e648964..5cfa608823f 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
@@ -51,7 +51,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t s
return 0; /* already enough large */
vfree(runtime->dma_area);
}
- runtime->dma_area = vmalloc_32(size);
+ runtime->dma_area = vmalloc_32_user(size);
if (! runtime->dma_area)
return -ENOMEM;
runtime->dma_bytes = size;
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index b69861d5216..3ef16bbc8c8 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -470,7 +470,7 @@ EXPORT_SYMBOL_GPL(soc_codec_dev_ak4642);
static int __init ak4642_modinit(void)
{
- int ret;
+ int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&ak4642_i2c_driver);
#endif
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index bbc72c2ddfc..81b8c9dfe7f 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -191,6 +191,7 @@ static int ac97_analog_prepare(struct snd_pcm_substream *substream,
vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
vra |= 0x1; /* enable variable rate audio */
+ vra &= ~0x4; /* disable SPDIF output */
stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
@@ -221,22 +222,6 @@ static int ac97_digital_prepare(struct snd_pcm_substream *substream,
return stac9766_ac97_write(codec, reg, runtime->rate);
}
-static int ac97_digital_trigger(struct snd_pcm_substream *substream,
- int cmd, struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
- unsigned short vra;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_STOP:
- vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
- vra &= !0x04;
- stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
- break;
- }
- return 0;
-}
-
static int stac9766_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
@@ -315,7 +300,6 @@ static struct snd_soc_dai_ops stac9766_dai_ops_analog = {
static struct snd_soc_dai_ops stac9766_dai_ops_digital = {
.prepare = ac97_digital_prepare,
- .trigger = ac97_digital_trigger,
};
struct snd_soc_dai stac9766_dai[] = {
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 5f1681f6ca7..2a27f7b5672 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -26,7 +26,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -175,7 +175,7 @@ static int twl4030_write(struct snd_soc_codec *codec,
{
twl4030_write_reg_cache(codec, reg, value);
if (likely(reg < TWL4030_REG_SW_SHADOW))
- return twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value,
+ return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value,
reg);
else
return 0;
@@ -261,7 +261,7 @@ static void twl4030_power_up(struct snd_soc_codec *codec)
do {
/* this takes a little while, so don't slam i2c */
udelay(2000);
- twl4030_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte,
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte,
TWL4030_REG_ANAMICL);
} while ((i++ < 100) &&
((byte & TWL4030_CNCL_OFFSET_START) ==
@@ -542,7 +542,7 @@ static int pin_name##pga_event(struct snd_soc_dapm_widget *w, \
break; \
case SND_SOC_DAPM_POST_PMD: \
reg_val = twl4030_read_reg_cache(w->codec, reg); \
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \
reg_val & (~mask), \
reg); \
break; \
@@ -679,7 +679,7 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
twl4030->sysclk) + 1);
/* Bypass the reg_cache to mute the headset */
- twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
hs_gain & (~0x0f),
TWL4030_REG_HS_GAIN_SET);
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index f82125d9e85..ebbf11b653a 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1340,9 +1340,10 @@ static int wm8350_resume(struct platform_device *pdev)
return 0;
}
-static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data)
+static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
{
struct wm8350_data *priv = data;
+ struct wm8350 *wm8350 = priv->codec.control_data;
u16 reg;
int report;
int mask;
@@ -1365,7 +1366,7 @@ static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data)
if (!jack->jack) {
dev_warn(wm8350->dev, "Jack interrupt called with no jack\n");
- return;
+ return IRQ_NONE;
}
/* Debounce */
@@ -1378,6 +1379,8 @@ static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data)
report = 0;
snd_soc_jack_report(jack->jack, report, jack->report);
+
+ return IRQ_HANDLED;
}
/**
@@ -1421,9 +1424,7 @@ int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which,
wm8350_set_bits(wm8350, WM8350_JACK_DETECT, ena);
/* Sync status */
- wm8350_hp_jack_handler(wm8350, irq, priv);
-
- wm8350_unmask_irq(wm8350, irq);
+ wm8350_hp_jack_handler(irq, priv);
return 0;
}
@@ -1482,12 +1483,16 @@ static int wm8350_probe(struct platform_device *pdev)
wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME,
WM8350_OUT2_VU | WM8350_OUT2R_MUTE);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R);
+ /* Make sure jack detect is disabled to start off with */
+ wm8350_clear_bits(wm8350, WM8350_JACK_DETECT,
+ WM8350_JDL_ENA | WM8350_JDR_ENA);
+
wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L,
- wm8350_hp_jack_handler, priv);
+ wm8350_hp_jack_handler, 0, "Left jack detect",
+ priv);
wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R,
- wm8350_hp_jack_handler, priv);
+ wm8350_hp_jack_handler, 0, "Right jack detect",
+ priv);
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
if (ret < 0) {
@@ -1516,8 +1521,6 @@ static int wm8350_remove(struct platform_device *pdev)
WM8350_JDL_ENA | WM8350_JDR_ENA);
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L);
- wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R);
wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L);
wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R);
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index c9438dd62df..dbc368c0826 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -199,7 +199,7 @@ static void wm8900_reset(struct snd_soc_codec *codec)
snd_soc_write(codec, WM8900_REG_RESET, 0);
memcpy(codec->reg_cache, wm8900_reg_defaults,
- sizeof(codec->reg_cache));
+ sizeof(wm8900_reg_defaults));
}
static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 81c57b5c591..a808675388f 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -47,7 +47,7 @@ static const u16 wm8974_reg[WM8974_CACHEREGNUM] = {
};
#define WM8974_POWER1_BIASEN 0x08
-#define WM8974_POWER1_BUFIOEN 0x10
+#define WM8974_POWER1_BUFIOEN 0x04
struct wm8974_priv {
struct snd_soc_codec codec;
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index d49458a29bb..3db8a6c523f 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -23,9 +23,9 @@ obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
obj-$(CONFIG_SND_OMAP_SOC_AMS_DELTA) += snd-soc-ams-delta.o
obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o
obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o
-obj-$(CONFIG_MACH_OMAP2EVM) += snd-soc-omap2evm.o
-obj-$(CONFIG_MACH_OMAP3EVM) += snd-soc-omap3evm.o
-obj-$(CONFIG_MACH_OMAP3517EVM) += snd-soc-am3517evm.o
+obj-$(CONFIG_SND_OMAP_SOC_OMAP2EVM) += snd-soc-omap2evm.o
+obj-$(CONFIG_SND_OMAP_SOC_OMAP3EVM) += snd-soc-omap3evm.o
+obj-$(CONFIG_SND_OMAP_SOC_OMAP3517EVM) += snd-soc-am3517evm.o
obj-$(CONFIG_SND_OMAP_SOC_SDP3430) += snd-soc-sdp3430.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c
index c071f9603a3..3c85c0f9282 100644
--- a/sound/soc/omap/sdp3430.c
+++ b/sound/soc/omap/sdp3430.c
@@ -24,7 +24,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl4030.h>
+#include <linux/i2c/twl.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
@@ -321,11 +321,11 @@ static int __init sdp3430_soc_init(void)
*(unsigned int *)sdp3430_dai[1].cpu_dai->private_data = 2; /* McBSP3 */
/* Set TWL4030 GPIO6 as EXTMUTE signal */
- twl4030_i2c_read_u8(TWL4030_MODULE_INTBR, &pin_mux,
+ twl_i2c_read_u8(TWL4030_MODULE_INTBR, &pin_mux,
TWL4030_INTBR_PMBR1);
pin_mux &= ~TWL4030_GPIO6_PWM0_MUTE(0x03);
pin_mux |= TWL4030_GPIO6_PWM0_MUTE(0x02);
- twl4030_i2c_write_u8(TWL4030_MODULE_INTBR, pin_mux,
+ twl_i2c_write_u8(TWL4030_MODULE_INTBR, pin_mux,
TWL4030_INTBR_PMBR1);
ret = platform_device_add(sdp3430_snd_device);
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c
index d441c3b6463..4984754f329 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.c
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.c
@@ -312,7 +312,7 @@ int simtec_audio_resume(struct device *dev)
return 0;
}
-struct dev_pm_ops simtec_audio_pmops = {
+const struct dev_pm_ops simtec_audio_pmops = {
.resume = simtec_audio_resume,
};
EXPORT_SYMBOL_GPL(simtec_audio_pmops);
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/s3c24xx/s3c24xx_simtec.h
index 2714203af16..e18faee30cc 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.h
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.h
@@ -15,7 +15,7 @@ extern int simtec_audio_core_probe(struct platform_device *pdev,
extern int simtec_audio_remove(struct platform_device *pdev);
#ifdef CONFIG_PM
-extern struct dev_pm_ops simtec_audio_pmops;
+extern const struct dev_pm_ops simtec_audio_pmops;
#define simtec_audio_pm &simtec_audio_pmops
#else
#define simtec_audio_pm NULL
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index ef8f28284cb..0a6440c6f54 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1236,7 +1236,7 @@ static int soc_poweroff(struct device *dev)
return 0;
}
-static struct dev_pm_ops soc_pm_ops = {
+static const struct dev_pm_ops soc_pm_ops = {
.suspend = soc_suspend,
.resume = soc_resume,
.poweroff = soc_poweroff,
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index b074a594c59..4963defee18 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -752,7 +752,7 @@ static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t s
return 0; /* already large enough */
vfree(runtime->dma_area);
}
- runtime->dma_area = vmalloc(size);
+ runtime->dma_area = vmalloc_user(size);
if (!runtime->dma_area)
return -ENOMEM;
runtime->dma_bytes = size;
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
new file mode 100644
index 00000000000..8974e208cba
--- /dev/null
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -0,0 +1,55 @@
+perf-diff(1)
+==============
+
+NAME
+----
+perf-diff - Read two perf.data files and display the differential profile
+
+SYNOPSIS
+--------
+[verse]
+'perf diff' [oldfile] [newfile]
+
+DESCRIPTION
+-----------
+This command displays the performance difference amongst two perf.data files
+captured via perf record.
+
+If no parameters are passed it will assume perf.data.old and perf.data.
+
+OPTIONS
+-------
+-d::
+--dsos=::
+ Only consider symbols in these dsos. CSV that understands
+ file://filename entries.
+
+-C::
+--comms=::
+ Only consider symbols in these comms. CSV that understands
+ file://filename entries.
+
+-S::
+--symbols=::
+ Only consider these symbols. CSV that understands
+ file://filename entries.
+
+-s::
+--sort=::
+ Sort by key(s): pid, comm, dso, symbol.
+
+-t::
+--field-separator=::
+
+ Use a special separator character and don't pad with spaces, replacing
+ all occurances of this separator in symbol names (and other output)
+ with a '.' character, that thus it's the only non valid separator.
+
+-v::
+--verbose::
+ Be verbose, for instance, show the raw counts in addition to the
+ diff.
+
+SEE ALSO
+--------
+linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index 44b0ce35c28..eac4d852e7c 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -8,16 +8,16 @@ perf-kmem - Tool to trace/measure kernel memory(slab) properties
SYNOPSIS
--------
[verse]
-'perf kmem' {record} [<options>]
+'perf kmem' {record|stat} [<options>]
DESCRIPTION
-----------
-There's two variants of perf kmem:
+There are two variants of perf kmem:
'perf kmem record <command>' to record the kmem events
of an arbitrary workload.
- 'perf kmem' to report kernel memory statistics.
+ 'perf kmem stat' to report kernel memory statistics.
OPTIONS
-------
@@ -25,8 +25,11 @@ OPTIONS
--input=<file>::
Select the input file (default: perf.data)
---stat=<caller|alloc>::
- Select per callsite or per allocation statistics
+--caller::
+ Show per-callsite statistics
+
+--alloc::
+ Show per-allocation statistics
-s <key[,key2...]>::
--sort=<key[,key2...]>::
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index 9270594e6df..250e391b4bc 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -8,10 +8,13 @@ perf-probe - Define new dynamic tracepoints
SYNOPSIS
--------
[verse]
-'perf probe' [options] --add 'PROBE' [--add 'PROBE' ...]
+'perf probe' [options] --add='PROBE' [...]
or
-'perf probe' [options] 'PROBE' ['PROBE' ...]
-
+'perf probe' [options] PROBE
+or
+'perf probe' [options] --del='[GROUP:]EVENT' [...]
+or
+'perf probe' --list
DESCRIPTION
-----------
@@ -31,15 +34,24 @@ OPTIONS
Be more verbose (show parsed arguments, etc).
-a::
---add::
- Define a probe point (see PROBE SYNTAX for detail)
+--add=::
+ Define a probe event (see PROBE SYNTAX for detail).
+
+-d::
+--del=::
+ Delete a probe event.
+
+-l::
+--list::
+ List up current probe events.
PROBE SYNTAX
------------
Probe points are defined by following syntax.
- "FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]"
+ "[EVENT=]FUNC[+OFFS|:RLN|%return][@SRC]|SRC:ALN [ARG ...]"
+'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'.
'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, 'RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. In addition, 'SRC' specifies a source file which has that function.
It is also possible to specify a probe point by the source line number by using 'SRC:ALN' syntax, where 'SRC' is the source file path and 'ALN' is the line number.
'ARG' specifies the arguments of this probe point. You can use the name of local variable, or kprobe-tracer argument format (e.g. $retval, %ax, etc).
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 9dccb180b7a..abfabe9147a 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -39,6 +39,10 @@ OPTIONS
Only consider these symbols. CSV that understands
file://filename entries.
+-s::
+--sort=::
+ Sort by key(s): pid, comm, dso, symbol, parent.
+
-w::
--field-width=::
Force each column width to the provided list, for large terminal
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 07065efa60e..60e5900da48 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -8,18 +8,43 @@ perf-trace - Read perf.data (created by perf record) and display trace output
SYNOPSIS
--------
[verse]
-'perf trace' [-i <file> | --input=file] symbol_name
+'perf trace' {record <script> | report <script> [args] }
DESCRIPTION
-----------
This command reads the input file and displays the trace recorded.
+There are several variants of perf trace:
+
+ 'perf trace' to see a detailed trace of the workload that was
+ recorded.
+
+ 'perf trace record <script>' to record the events required for 'perf
+ trace report'. <script> is the name displayed in the output of
+ 'perf trace --list' i.e. the actual script name minus any language
+ extension.
+
+ 'perf trace report <script>' to run and display the results of
+ <script>. <script> is the name displayed in the output of 'perf
+ trace --list' i.e. the actual script name minus any language
+ extension. The perf.data output from a previous run of 'perf trace
+ record <script>' is used and should be present for this command to
+ succeed.
+
OPTIONS
-------
-D::
--dump-raw-trace=::
Display verbose dump of the trace data.
+-L::
+--Latency=::
+ Show latency attributes (irqs/preemption disabled, etc).
+
+-l::
+--list=::
+ Display a list of available trace scripts.
+
-s::
--script=::
Process trace data with the given script ([lang]:script[.ext]).
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 23ec66098bd..4390d225686 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -237,8 +237,8 @@ lib = lib
export prefix bindir sharedir sysconfdir
-CC = gcc
-AR = ar
+CC = $(CROSS_COMPILE)gcc
+AR = $(CROSS_COMPILE)ar
RM = rm -f
TAR = tar
FIND = find
@@ -356,7 +356,9 @@ LIB_H += util/parse-options.h
LIB_H += util/parse-events.h
LIB_H += util/quote.h
LIB_H += util/util.h
+LIB_H += util/header.h
LIB_H += util/help.h
+LIB_H += util/session.h
LIB_H += util/strbuf.h
LIB_H += util/string.h
LIB_H += util/strlist.h
@@ -368,7 +370,6 @@ LIB_H += util/values.h
LIB_H += util/sort.h
LIB_H += util/hist.h
LIB_H += util/thread.h
-LIB_H += util/data_map.h
LIB_H += util/probe-finder.h
LIB_H += util/probe-event.h
@@ -405,6 +406,7 @@ LIB_OBJS += util/callchain.o
LIB_OBJS += util/values.o
LIB_OBJS += util/debug.o
LIB_OBJS += util/map.o
+LIB_OBJS += util/session.o
LIB_OBJS += util/thread.o
LIB_OBJS += util/trace-event-parse.o
LIB_OBJS += util/trace-event-read.o
@@ -425,6 +427,7 @@ BUILTIN_OBJS += bench/sched-messaging.o
BUILTIN_OBJS += bench/sched-pipe.o
BUILTIN_OBJS += bench/mem-memcpy.o
+BUILTIN_OBJS += builtin-diff.o
BUILTIN_OBJS += builtin-help.o
BUILTIN_OBJS += builtin-sched.o
BUILTIN_OBJS += builtin-buildid-list.o
@@ -484,16 +487,19 @@ else
msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
endif
-ifneq ($(shell sh -c "(echo '\#include <libdwarf/dwarf.h>'; echo '\#include <libdwarf/libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#ifndef _MIPS_SZLONG'; echo '\#define _MIPS_SZLONG 0'; echo '\#endif'; echo '\#include <dwarf.h>'; echo '\#include <libdwarf.h>'; echo 'int main(void) { Dwarf_Debug dbg; Dwarf_Error err; Dwarf_Ranges *rng; dwarf_init(0, DW_DLC_READ, 0, 0, &dbg, &err); dwarf_get_ranges(dbg, 0, &rng, 0, 0, &err); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/libdwarf -ldwarf -lelf -o /dev/null $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
msg := $(warning No libdwarf.h found or old libdwarf.h found, disables dwarf support. Please install libdwarf-dev/libdwarf-devel >= 20081231);
BASIC_CFLAGS += -DNO_LIBDWARF
else
+ BASIC_CFLAGS += -I/usr/include/libdwarf
EXTLIBS += -lelf -ldwarf
LIB_OBJS += util/probe-finder.o
endif
+ifndef NO_LIBPERL
PERL_EMBED_LDOPTS = `perl -MExtUtils::Embed -e ldopts 2>/dev/null`
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+endif
ifneq ($(shell sh -c "(echo '\#include <EXTERN.h>'; echo '\#include <perl.h>'; echo 'int main(void) { perl_alloc(); return 0; }') | $(CC) -x c - $(PERL_EMBED_CCOPTS) -o /dev/null $(PERL_EMBED_LDOPTS) > /dev/null 2>&1 && echo y"), y)
BASIC_CFLAGS += -DNO_LIBPERL
@@ -991,8 +997,6 @@ install: all
$(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
- $(INSTALL) scripts/perl/Perf-Trace-Util/Makefile.PL -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util'
- $(INSTALL) scripts/perl/Perf-Trace-Util/README -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util'
ifdef BUILT_INS
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(INSTALL) $(BUILT_INS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 605a2a959aa..81cee78181f 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -1,6 +1,6 @@
/*
*
- * builtin-bench-messaging.c
+ * sched-messaging.c
*
* messaging: Benchmark for scheduler and IPC mechanisms
*
@@ -320,10 +320,12 @@ int bench_sched_messaging(int argc, const char **argv,
num_groups, num_groups * 2 * num_fds,
thread_mode ? "threads" : "processes");
printf(" %14s: %lu.%03lu [sec]\n", "Total time",
- diff.tv_sec, diff.tv_usec/1000);
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
break;
case BENCH_FORMAT_SIMPLE:
- printf("%lu.%03lu\n", diff.tv_sec, diff.tv_usec/1000);
+ printf("%lu.%03lu\n", diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
break;
default:
/* reaching here is something disaster */
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 238185f9797..4f77c7c2764 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -1,6 +1,6 @@
/*
*
- * builtin-bench-pipe.c
+ * sched-pipe.c
*
* pipe: Benchmark for pipe()
*
@@ -87,7 +87,8 @@ int bench_sched_pipe(int argc, const char **argv,
if (pid) {
retpid = waitpid(pid, &wait_stat, 0);
assert((retpid == pid) && WIFEXITED(wait_stat));
- return 0;
+ } else {
+ exit(0);
}
switch (bench_format) {
@@ -99,7 +100,8 @@ int bench_sched_pipe(int argc, const char **argv,
result_usec += diff.tv_usec;
printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
- diff.tv_sec, diff.tv_usec/1000);
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
printf(" %14lf usecs/op\n",
(double)result_usec / (double)loops);
@@ -110,7 +112,8 @@ int bench_sched_pipe(int argc, const char **argv,
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n",
- diff.tv_sec, diff.tv_usec / 1000);
+ diff.tv_sec,
+ (unsigned long) (diff.tv_usec / 1000));
break;
default:
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 0bf2e8f9af5..593ff25006d 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -25,7 +25,7 @@
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
-#include "util/data_map.h"
+#include "util/session.h"
static char const *input_name = "perf.data";
@@ -51,11 +51,6 @@ struct sym_priv {
struct sym_ext *ext;
};
-static struct symbol_conf symbol_conf = {
- .priv_size = sizeof(struct sym_priv),
- .try_vmlinux_path = true,
-};
-
static const char *sym_hist_filter;
static int symbol_filter(struct map *map __used, struct symbol *sym)
@@ -121,30 +116,32 @@ static void hist_hit(struct hist_entry *he, u64 ip)
h->ip[offset]);
}
-static int hist_entry__add(struct addr_location *al, u64 count)
+static int perf_session__add_hist_entry(struct perf_session *self,
+ struct addr_location *al, u64 count)
{
bool hit;
- struct hist_entry *he = __hist_entry__add(al, NULL, count, &hit);
+ struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL,
+ count, &hit);
if (he == NULL)
return -ENOMEM;
hist_hit(he, al->addr);
return 0;
}
-static int process_sample_event(event_t *event)
+static int process_sample_event(event_t *event, struct perf_session *session)
{
struct addr_location al;
dump_printf("(IP, %d): %d: %p\n", event->header.misc,
event->ip.pid, (void *)(long)event->ip.ip);
- if (event__preprocess_sample(event, &al, symbol_filter) < 0) {
+ if (event__preprocess_sample(event, session, &al, symbol_filter) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- if (hist_entry__add(&al, 1)) {
+ if (!al.filtered && perf_session__add_hist_entry(session, &al, 1)) {
fprintf(stderr, "problem incrementing symbol count, "
"skipping event\n");
return -1;
@@ -428,11 +425,11 @@ static void annotate_sym(struct hist_entry *he)
free_source_line(he, len);
}
-static void find_annotations(void)
+static void perf_session__find_annotations(struct perf_session *self)
{
struct rb_node *nd;
- for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct sym_priv *priv;
@@ -453,7 +450,7 @@ static void find_annotations(void)
}
}
-static struct perf_file_handler file_handler = {
+static struct perf_event_ops event_ops = {
.process_sample_event = process_sample_event,
.process_mmap_event = event__process_mmap,
.process_comm_event = event__process_comm,
@@ -462,33 +459,33 @@ static struct perf_file_handler file_handler = {
static int __cmd_annotate(void)
{
- struct perf_header *header;
- struct thread *idle;
int ret;
+ struct perf_session *session;
- idle = register_idle_thread();
- register_perf_file_handler(&file_handler);
+ session = perf_session__new(input_name, O_RDONLY, force);
+ if (session == NULL)
+ return -ENOMEM;
- ret = mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
+ ret = perf_session__process_events(session, &event_ops);
if (ret)
- return ret;
+ goto out_delete;
if (dump_trace) {
event__print_totals();
- return 0;
+ goto out_delete;
}
if (verbose > 3)
- threads__fprintf(stdout);
+ perf_session__fprintf(session, stdout);
if (verbose > 2)
dsos__fprintf(stdout);
- collapse__resort();
- output__resort(event__total[0]);
-
- find_annotations();
+ perf_session__collapse_resort(session);
+ perf_session__output_resort(session, session->event_total[0]);
+ perf_session__find_annotations(session);
+out_delete:
+ perf_session__delete(session);
return ret;
}
@@ -519,29 +516,17 @@ static const struct option options[] = {
OPT_END()
};
-static void setup_sorting(void)
+int cmd_annotate(int argc, const char **argv, const char *prefix __used)
{
- char *tmp, *tok, *str = strdup(sort_order);
-
- for (tok = strtok_r(str, ", ", &tmp);
- tok; tok = strtok_r(NULL, ", ", &tmp)) {
- if (sort_dimension__add(tok) < 0) {
- error("Unknown --sort key: `%s'", tok);
- usage_with_options(annotate_usage, options);
- }
- }
+ argc = parse_options(argc, argv, options, annotate_usage, 0);
- free(str);
-}
+ symbol_conf.priv_size = sizeof(struct sym_priv);
+ symbol_conf.try_vmlinux_path = true;
-int cmd_annotate(int argc, const char **argv, const char *prefix __used)
-{
- if (symbol__init(&symbol_conf) < 0)
+ if (symbol__init() < 0)
return -1;
- argc = parse_options(argc, argv, options, annotate_usage, 0);
-
- setup_sorting();
+ setup_sorting(annotate_usage, options);
if (argc) {
/*
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index e043eb83092..46996774e55 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -31,6 +31,9 @@ struct bench_suite {
const char *summary;
int (*fn)(int, const char **, const char *);
};
+ \
+/* sentinel: easy for help */
+#define suite_all { "all", "test all suite (pseudo suite)", NULL }
static struct bench_suite sched_suites[] = {
{ "messaging",
@@ -39,6 +42,7 @@ static struct bench_suite sched_suites[] = {
{ "pipe",
"Flood of communication over pipe() between two processes",
bench_sched_pipe },
+ suite_all,
{ NULL,
NULL,
NULL }
@@ -48,6 +52,7 @@ static struct bench_suite mem_suites[] = {
{ "memcpy",
"Simple memory copy in various ways",
bench_mem_memcpy },
+ suite_all,
{ NULL,
NULL,
NULL }
@@ -66,6 +71,9 @@ static struct bench_subsys subsystems[] = {
{ "mem",
"memory access performance",
mem_suites },
+ { "all", /* sentinel: easy for help */
+ "test all subsystem (pseudo subsystem)",
+ NULL },
{ NULL,
NULL,
NULL }
@@ -75,11 +83,11 @@ static void dump_suites(int subsys_index)
{
int i;
- printf("List of available suites for %s...\n\n",
+ printf("# List of available suites for %s...\n\n",
subsystems[subsys_index].name);
for (i = 0; subsystems[subsys_index].suites[i].name; i++)
- printf("\t%s: %s\n",
+ printf("%14s: %s\n",
subsystems[subsys_index].suites[i].name,
subsystems[subsys_index].suites[i].summary);
@@ -110,10 +118,10 @@ static void print_usage(void)
printf("\t%s\n", bench_usage[i]);
printf("\n");
- printf("List of available subsystems...\n\n");
+ printf("# List of available subsystems...\n\n");
for (i = 0; subsystems[i].name; i++)
- printf("\t%s: %s\n",
+ printf("%14s: %s\n",
subsystems[i].name, subsystems[i].summary);
printf("\n");
}
@@ -131,6 +139,37 @@ static int bench_str2int(char *str)
return BENCH_FORMAT_UNKNOWN;
}
+static void all_suite(struct bench_subsys *subsys) /* FROM HERE */
+{
+ int i;
+ const char *argv[2];
+ struct bench_suite *suites = subsys->suites;
+
+ argv[1] = NULL;
+ /*
+ * TODO:
+ * preparing preset parameters for
+ * embedded, ordinary PC, HPC, etc...
+ * will be helpful
+ */
+ for (i = 0; suites[i].fn; i++) {
+ printf("# Running %s/%s benchmark...\n",
+ subsys->name,
+ suites[i].name);
+
+ argv[1] = suites[i].name;
+ suites[i].fn(1, argv, NULL);
+ printf("\n");
+ }
+}
+
+static void all_subsystem(void)
+{
+ int i;
+ for (i = 0; subsystems[i].suites; i++)
+ all_suite(&subsystems[i]);
+}
+
int cmd_bench(int argc, const char **argv, const char *prefix __used)
{
int i, j, status = 0;
@@ -155,6 +194,11 @@ int cmd_bench(int argc, const char **argv, const char *prefix __used)
goto end;
}
+ if (!strcmp(argv[0], "all")) {
+ all_subsystem();
+ goto end;
+ }
+
for (i = 0; subsystems[i].name; i++) {
if (strcmp(subsystems[i].name, argv[0]))
continue;
@@ -165,6 +209,11 @@ int cmd_bench(int argc, const char **argv, const char *prefix __used)
goto end;
}
+ if (!strcmp(argv[1], "all")) {
+ all_suite(&subsystems[i]);
+ goto end;
+ }
+
for (j = 0; subsystems[i].suites[j].name; j++) {
if (strcmp(subsystems[i].suites[j].name, argv[1]))
continue;
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 7dee9d19ab7..e693e6777af 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -9,17 +9,16 @@
#include "builtin.h"
#include "perf.h"
#include "util/cache.h"
-#include "util/data_map.h"
#include "util/debug.h"
-#include "util/header.h"
#include "util/parse-options.h"
+#include "util/session.h"
#include "util/symbol.h"
static char const *input_name = "perf.data";
static int force;
static const char *const buildid_list_usage[] = {
- "perf report [<options>]",
+ "perf buildid-list [<options>]",
NULL
};
@@ -55,56 +54,18 @@ static int perf_file_section__process_buildids(struct perf_file_section *self,
static int __cmd_buildid_list(void)
{
int err = -1;
- struct perf_header *header;
- struct perf_file_header f_header;
- struct stat input_stat;
- int input = open(input_name, O_RDONLY);
+ struct perf_session *session;
- if (input < 0) {
- pr_err("failed to open file: %s", input_name);
- if (!strcmp(input_name, "perf.data"))
- pr_err(" (try 'perf record' first)");
- pr_err("\n");
- goto out;
- }
-
- err = fstat(input, &input_stat);
- if (err < 0) {
- perror("failed to stat file");
- goto out_close;
- }
-
- if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
- pr_err("file %s not owned by current user or root\n",
- input_name);
- goto out_close;
- }
-
- if (!input_stat.st_size) {
- pr_info("zero-sized file, nothing to do!\n");
- goto out_close;
- }
-
- err = -1;
- header = perf_header__new();
- if (header == NULL)
- goto out_close;
-
- if (perf_file_header__read(&f_header, header, input) < 0) {
- pr_warning("incompatible file format");
- goto out_close;
- }
+ session = perf_session__new(input_name, O_RDONLY, force);
+ if (session == NULL)
+ return -1;
- err = perf_header__process_sections(header, input,
+ err = perf_header__process_sections(&session->header, session->fd,
perf_file_section__process_buildids);
+ if (err >= 0)
+ dsos__fprintf_buildid(stdout);
- if (err < 0)
- goto out_close;
-
- dsos__fprintf_buildid(stdout);
-out_close:
- close(input);
-out:
+ perf_session__delete(session);
return err;
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
new file mode 100644
index 00000000000..4d33b55d558
--- /dev/null
+++ b/tools/perf/builtin-diff.c
@@ -0,0 +1,248 @@
+/*
+ * builtin-diff.c
+ *
+ * Builtin diff command: Analyze two perf.data input files, look up and read
+ * DSOs and symbol information, sort them and produce a diff.
+ */
+#include "builtin.h"
+
+#include "util/debug.h"
+#include "util/event.h"
+#include "util/hist.h"
+#include "util/session.h"
+#include "util/sort.h"
+#include "util/symbol.h"
+#include "util/util.h"
+
+#include <stdlib.h>
+
+static char const *input_old = "perf.data.old",
+ *input_new = "perf.data";
+static char diff__default_sort_order[] = "dso,symbol";
+static int force;
+static bool show_displacement;
+
+static int perf_session__add_hist_entry(struct perf_session *self,
+ struct addr_location *al, u64 count)
+{
+ bool hit;
+ struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL,
+ count, &hit);
+ if (he == NULL)
+ return -ENOMEM;
+
+ if (hit)
+ he->count += count;
+
+ return 0;
+}
+
+static int diff__process_sample_event(event_t *event, struct perf_session *session)
+{
+ struct addr_location al;
+ struct sample_data data = { .period = 1, };
+
+ dump_printf("(IP, %d): %d: %p\n", event->header.misc,
+ event->ip.pid, (void *)(long)event->ip.ip);
+
+ if (event__preprocess_sample(event, session, &al, NULL) < 0) {
+ pr_warning("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ if (al.filtered)
+ return 0;
+
+ event__parse_sample(event, session->sample_type, &data);
+
+ if (al.sym && perf_session__add_hist_entry(session, &al, data.period)) {
+ pr_warning("problem incrementing symbol count, skipping event\n");
+ return -1;
+ }
+
+ session->events_stats.total += data.period;
+ return 0;
+}
+
+static struct perf_event_ops event_ops = {
+ .process_sample_event = diff__process_sample_event,
+ .process_mmap_event = event__process_mmap,
+ .process_comm_event = event__process_comm,
+ .process_exit_event = event__process_task,
+ .process_fork_event = event__process_task,
+ .process_lost_event = event__process_lost,
+};
+
+static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+
+ while (*p != NULL) {
+ int cmp;
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ cmp = strcmp(he->map->dso->name, iter->map->dso->name);
+ if (cmp > 0)
+ p = &(*p)->rb_left;
+ else if (cmp < 0)
+ p = &(*p)->rb_right;
+ else {
+ cmp = strcmp(he->sym->name, iter->sym->name);
+ if (cmp > 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, root);
+}
+
+static void perf_session__resort_by_name(struct perf_session *self)
+{
+ unsigned long position = 1;
+ struct rb_root tmp = RB_ROOT;
+ struct rb_node *next = rb_first(&self->hists);
+
+ while (next != NULL) {
+ struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
+
+ next = rb_next(&n->rb_node);
+ rb_erase(&n->rb_node, &self->hists);
+ n->position = position++;
+ perf_session__insert_hist_entry_by_name(&tmp, n);
+ }
+
+ self->hists = tmp;
+}
+
+static struct hist_entry *
+perf_session__find_hist_entry_by_name(struct perf_session *self,
+ struct hist_entry *he)
+{
+ struct rb_node *n = self->hists.rb_node;
+
+ while (n) {
+ struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
+ int cmp = strcmp(he->map->dso->name, iter->map->dso->name);
+
+ if (cmp > 0)
+ n = n->rb_left;
+ else if (cmp < 0)
+ n = n->rb_right;
+ else {
+ cmp = strcmp(he->sym->name, iter->sym->name);
+ if (cmp > 0)
+ n = n->rb_left;
+ else if (cmp < 0)
+ n = n->rb_right;
+ else
+ return iter;
+ }
+ }
+
+ return NULL;
+}
+
+static void perf_session__match_hists(struct perf_session *old_session,
+ struct perf_session *new_session)
+{
+ struct rb_node *nd;
+
+ perf_session__resort_by_name(old_session);
+
+ for (nd = rb_first(&new_session->hists); nd; nd = rb_next(nd)) {
+ struct hist_entry *pos = rb_entry(nd, struct hist_entry, rb_node);
+ pos->pair = perf_session__find_hist_entry_by_name(old_session, pos);
+ }
+}
+
+static int __cmd_diff(void)
+{
+ int ret, i;
+ struct perf_session *session[2];
+
+ session[0] = perf_session__new(input_old, O_RDONLY, force);
+ session[1] = perf_session__new(input_new, O_RDONLY, force);
+ if (session[0] == NULL || session[1] == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < 2; ++i) {
+ ret = perf_session__process_events(session[i], &event_ops);
+ if (ret)
+ goto out_delete;
+ perf_session__output_resort(session[i], session[i]->events_stats.total);
+ }
+
+ perf_session__match_hists(session[0], session[1]);
+ perf_session__fprintf_hists(session[1], session[0],
+ show_displacement, stdout);
+out_delete:
+ for (i = 0; i < 2; ++i)
+ perf_session__delete(session[i]);
+ return ret;
+}
+
+static const char *const diff_usage[] = {
+ "perf diff [<options>] [old_file] [new_file]",
+};
+
+static const struct option options[] = {
+ OPT_BOOLEAN('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('m', "displacement", &show_displacement,
+ "Show position displacement relative to baseline"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
+ "load module symbols - WARNING: use only with -k and LIVE kernel"),
+ OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths,
+ "Don't shorten the pathnames taking into account the cwd"),
+ OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
+ "only consider symbols in these dsos"),
+ OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
+ "only consider symbols in these comms"),
+ OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
+ "only consider these symbols"),
+ OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
+ "sort by key(s): pid, comm, dso, symbol, parent"),
+ OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
+ "separator for columns, no spaces will be added between "
+ "columns '.' is reserved."),
+ OPT_END()
+};
+
+int cmd_diff(int argc, const char **argv, const char *prefix __used)
+{
+ sort_order = diff__default_sort_order;
+ argc = parse_options(argc, argv, options, diff_usage, 0);
+ if (argc) {
+ if (argc > 2)
+ usage_with_options(diff_usage, options);
+ if (argc == 2) {
+ input_old = argv[0];
+ input_new = argv[1];
+ } else
+ input_new = argv[0];
+ }
+
+ symbol_conf.exclude_other = false;
+ if (symbol__init() < 0)
+ return -1;
+
+ setup_sorting(diff_usage, options);
+ setup_pager();
+
+ sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL);
+ sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", NULL);
+ sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", NULL);
+
+ return __cmd_diff();
+}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 047fef74bd5..fc21ad79dd8 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -6,12 +6,12 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
+#include "util/session.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
#include "util/debug.h"
-#include "util/data_map.h"
#include <linux/rbtree.h>
@@ -20,9 +20,6 @@ typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
static char const *input_name = "perf.data";
-static struct perf_header *header;
-static u64 sample_type;
-
static int alloc_flag;
static int caller_flag;
@@ -57,11 +54,6 @@ static struct rb_root root_caller_sorted;
static unsigned long total_requested, total_allocated;
static unsigned long nr_allocs, nr_cross_allocs;
-struct raw_event_sample {
- u32 size;
- char data[0];
-};
-
#define PATH_SYS_NODE "/sys/devices/system/node"
static void init_cpunode_map(void)
@@ -201,7 +193,7 @@ static void insert_caller_stat(unsigned long call_site,
}
}
-static void process_alloc_event(struct raw_event_sample *raw,
+static void process_alloc_event(void *data,
struct event *event,
int cpu,
u64 timestamp __used,
@@ -214,10 +206,10 @@ static void process_alloc_event(struct raw_event_sample *raw,
int bytes_alloc;
int node1, node2;
- ptr = raw_field_value(event, "ptr", raw->data);
- call_site = raw_field_value(event, "call_site", raw->data);
- bytes_req = raw_field_value(event, "bytes_req", raw->data);
- bytes_alloc = raw_field_value(event, "bytes_alloc", raw->data);
+ ptr = raw_field_value(event, "ptr", data);
+ call_site = raw_field_value(event, "call_site", data);
+ bytes_req = raw_field_value(event, "bytes_req", data);
+ bytes_alloc = raw_field_value(event, "bytes_alloc", data);
insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
insert_caller_stat(call_site, bytes_req, bytes_alloc);
@@ -227,7 +219,7 @@ static void process_alloc_event(struct raw_event_sample *raw,
if (node) {
node1 = cpunode_map[cpu];
- node2 = raw_field_value(event, "node", raw->data);
+ node2 = raw_field_value(event, "node", data);
if (node1 != node2)
nr_cross_allocs++;
}
@@ -262,7 +254,7 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
return NULL;
}
-static void process_free_event(struct raw_event_sample *raw,
+static void process_free_event(void *data,
struct event *event,
int cpu,
u64 timestamp __used,
@@ -271,7 +263,7 @@ static void process_free_event(struct raw_event_sample *raw,
unsigned long ptr;
struct alloc_stat *s_alloc, *s_caller;
- ptr = raw_field_value(event, "ptr", raw->data);
+ ptr = raw_field_value(event, "ptr", data);
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
if (!s_alloc)
@@ -289,66 +281,53 @@ static void process_free_event(struct raw_event_sample *raw,
}
static void
-process_raw_event(event_t *raw_event __used, void *more_data,
+process_raw_event(event_t *raw_event __used, void *data,
int cpu, u64 timestamp, struct thread *thread)
{
- struct raw_event_sample *raw = more_data;
struct event *event;
int type;
- type = trace_parse_common_type(raw->data);
+ type = trace_parse_common_type(data);
event = trace_find_event(type);
if (!strcmp(event->name, "kmalloc") ||
!strcmp(event->name, "kmem_cache_alloc")) {
- process_alloc_event(raw, event, cpu, timestamp, thread, 0);
+ process_alloc_event(data, event, cpu, timestamp, thread, 0);
return;
}
if (!strcmp(event->name, "kmalloc_node") ||
!strcmp(event->name, "kmem_cache_alloc_node")) {
- process_alloc_event(raw, event, cpu, timestamp, thread, 1);
+ process_alloc_event(data, event, cpu, timestamp, thread, 1);
return;
}
if (!strcmp(event->name, "kfree") ||
!strcmp(event->name, "kmem_cache_free")) {
- process_free_event(raw, event, cpu, timestamp, thread);
+ process_free_event(data, event, cpu, timestamp, thread);
return;
}
}
-static int process_sample_event(event_t *event)
+static int process_sample_event(event_t *event, struct perf_session *session)
{
- u64 ip = event->ip.ip;
- u64 timestamp = -1;
- u32 cpu = -1;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
- struct thread *thread = threads__findnew(event->ip.pid);
-
- if (sample_type & PERF_SAMPLE_TIME) {
- timestamp = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ struct sample_data data;
+ struct thread *thread;
- if (sample_type & PERF_SAMPLE_CPU) {
- cpu = *(u32 *)more_data;
- more_data += sizeof(u32);
- more_data += sizeof(u32); /* reserved */
- }
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = 1;
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ event__parse_sample(event, session->sample_type, &data);
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
+ data.pid, data.tid,
+ (void *)(long)data.ip,
+ (long long)data.period);
+ thread = perf_session__findnew(session, event->ip.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -357,16 +336,15 @@ static int process_sample_event(event_t *event)
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- process_raw_event(event, more_data, cpu, timestamp, thread);
+ process_raw_event(event, data.raw_data, data.cpu,
+ data.time, thread);
return 0;
}
-static int sample_type_check(u64 type)
+static int sample_type_check(struct perf_session *session)
{
- sample_type = type;
-
- if (!(sample_type & PERF_SAMPLE_RAW)) {
+ if (!(session->sample_type & PERF_SAMPLE_RAW)) {
fprintf(stderr,
"No trace sample to read. Did you call perf record "
"without -R?");
@@ -376,21 +354,12 @@ static int sample_type_check(u64 type)
return 0;
}
-static struct perf_file_handler file_handler = {
+static struct perf_event_ops event_ops = {
.process_sample_event = process_sample_event,
.process_comm_event = event__process_comm,
.sample_type_check = sample_type_check,
};
-static int read_events(void)
-{
- register_idle_thread();
- register_perf_file_handler(&file_handler);
-
- return mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
-}
-
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
{
if (n_alloc == 0)
@@ -399,7 +368,8 @@ static double fragmentation(unsigned long n_req, unsigned long n_alloc)
return 100.0 - (100.0 * n_req / n_alloc);
}
-static void __print_result(struct rb_root *root, int n_lines, int is_caller)
+static void __print_result(struct rb_root *root, struct perf_session *session,
+ int n_lines, int is_caller)
{
struct rb_node *next;
@@ -420,7 +390,7 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller)
if (is_caller) {
addr = data->call_site;
if (!raw_ip)
- sym = thread__find_function(kthread, addr, NULL);
+ sym = map_groups__find_function(&session->kmaps, session, addr, NULL);
} else
addr = data->ptr;
@@ -461,12 +431,12 @@ static void print_summary(void)
printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
}
-static void print_result(void)
+static void print_result(struct perf_session *session)
{
if (caller_flag)
- __print_result(&root_caller_sorted, caller_lines, 1);
+ __print_result(&root_caller_sorted, session, caller_lines, 1);
if (alloc_flag)
- __print_result(&root_alloc_sorted, alloc_lines, 0);
+ __print_result(&root_alloc_sorted, session, alloc_lines, 0);
print_summary();
}
@@ -534,16 +504,24 @@ static void sort_result(void)
static int __cmd_kmem(void)
{
+ int err;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
+ if (session == NULL)
+ return -ENOMEM;
+
setup_pager();
- read_events();
+ err = perf_session__process_events(session, &event_ops);
+ if (err != 0)
+ goto out_delete;
sort_result();
- print_result();
-
- return 0;
+ print_result(session);
+out_delete:
+ perf_session__delete(session);
+ return err;
}
static const char * const kmem_usage[] = {
- "perf kmem [<options>] {record}",
+ "perf kmem [<options>] {record|stat}",
NULL
};
@@ -703,18 +681,17 @@ static int parse_sort_opt(const struct option *opt __used,
return 0;
}
-static int parse_stat_opt(const struct option *opt __used,
- const char *arg, int unset __used)
+static int parse_caller_opt(const struct option *opt __used,
+ const char *arg __used, int unset __used)
{
- if (!arg)
- return -1;
+ caller_flag = (alloc_flag + 1);
+ return 0;
+}
- if (strcmp(arg, "alloc") == 0)
- alloc_flag = (caller_flag + 1);
- else if (strcmp(arg, "caller") == 0)
- caller_flag = (alloc_flag + 1);
- else
- return -1;
+static int parse_alloc_opt(const struct option *opt __used,
+ const char *arg __used, int unset __used)
+{
+ alloc_flag = (caller_flag + 1);
return 0;
}
@@ -739,14 +716,17 @@ static int parse_line_opt(const struct option *opt __used,
static const struct option kmem_options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
- OPT_CALLBACK(0, "stat", NULL, "<alloc>|<caller>",
- "stat selector, Pass 'alloc' or 'caller'.",
- parse_stat_opt),
+ OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
+ "show per-callsite statistics",
+ parse_caller_opt),
+ OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
+ "show per-allocation statistics",
+ parse_alloc_opt),
OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
"sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
parse_sort_opt),
OPT_CALLBACK('l', "line", NULL, "num",
- "show n lins",
+ "show n lines",
parse_line_opt),
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
OPT_END()
@@ -786,22 +766,26 @@ static int __cmd_record(int argc, const char **argv)
int cmd_kmem(int argc, const char **argv, const char *prefix __used)
{
- symbol__init(0);
-
argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
- if (argc && !strncmp(argv[0], "rec", 3))
- return __cmd_record(argc, argv);
- else if (argc)
+ if (!argc)
usage_with_options(kmem_usage, kmem_options);
- if (list_empty(&caller_sort))
- setup_sorting(&caller_sort, default_sort_order);
- if (list_empty(&alloc_sort))
- setup_sorting(&alloc_sort, default_sort_order);
+ symbol__init();
+
+ if (!strncmp(argv[0], "rec", 3)) {
+ return __cmd_record(argc, argv);
+ } else if (!strcmp(argv[0], "stat")) {
+ setup_cpunode_map();
- setup_cpunode_map();
+ if (list_empty(&caller_sort))
+ setup_sorting(&caller_sort, default_sort_order);
+ if (list_empty(&alloc_sort))
+ setup_sorting(&alloc_sort, default_sort_order);
- return __cmd_kmem();
+ return __cmd_kmem();
+ }
+
+ return 0;
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index a58e11b7ea8..c1e6774fd3e 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -35,34 +35,33 @@
#include "perf.h"
#include "builtin.h"
#include "util/util.h"
+#include "util/strlist.h"
#include "util/event.h"
#include "util/debug.h"
+#include "util/debugfs.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/session.h"
#include "util/parse-options.h"
#include "util/parse-events.h" /* For debugfs_path */
#include "util/probe-finder.h"
#include "util/probe-event.h"
-/* Default vmlinux search paths */
-#define NR_SEARCH_PATH 3
-const char *default_search_path[NR_SEARCH_PATH] = {
-"/lib/modules/%s/build/vmlinux", /* Custom build kernel */
-"/usr/lib/debug/lib/modules/%s/vmlinux", /* Red Hat debuginfo */
-"/boot/vmlinux-debug-%s", /* Ubuntu */
-};
-
#define MAX_PATH_LEN 256
#define MAX_PROBES 128
/* Session management structure */
static struct {
- char *vmlinux;
- char *release;
- int need_dwarf;
+ bool need_dwarf;
+ bool list_events;
+ bool force_add;
int nr_probe;
struct probe_point probes[MAX_PROBES];
+ struct strlist *dellist;
+ struct perf_session *psession;
+ struct map *kmap;
} session;
-static bool listing;
/* Parse an event definition. Note that any error must die. */
static void parse_probe_event(const char *str)
@@ -74,11 +73,30 @@ static void parse_probe_event(const char *str)
die("Too many probes (> %d) are specified.", MAX_PROBES);
/* Parse perf-probe event into probe_point */
- session.need_dwarf = parse_perf_probe_event(str, pp);
+ parse_perf_probe_event(str, pp, &session.need_dwarf);
pr_debug("%d arguments\n", pp->nr_args);
}
+static void parse_probe_event_argv(int argc, const char **argv)
+{
+ int i, len;
+ char *buf;
+
+ /* Bind up rest arguments */
+ len = 0;
+ for (i = 0; i < argc; i++)
+ len += strlen(argv[i]) + 1;
+ buf = zalloc(len + 1);
+ if (!buf)
+ die("Failed to allocate memory for binding arguments.");
+ len = 0;
+ for (i = 0; i < argc; i++)
+ len += sprintf(&buf[len], "%s ", argv[i]);
+ parse_probe_event(buf);
+ free(buf);
+}
+
static int opt_add_probe_event(const struct option *opt __used,
const char *str, int unset __used)
{
@@ -87,40 +105,44 @@ static int opt_add_probe_event(const struct option *opt __used,
return 0;
}
-#ifndef NO_LIBDWARF
-static int open_default_vmlinux(void)
+static int opt_del_probe_event(const struct option *opt __used,
+ const char *str, int unset __used)
{
- struct utsname uts;
- char fname[MAX_PATH_LEN];
- int fd, ret, i;
-
- ret = uname(&uts);
- if (ret) {
- pr_debug("uname() failed.\n");
- return -errno;
+ if (str) {
+ if (!session.dellist)
+ session.dellist = strlist__new(true, NULL);
+ strlist__add(session.dellist, str);
}
- session.release = uts.release;
- for (i = 0; i < NR_SEARCH_PATH; i++) {
- ret = snprintf(fname, MAX_PATH_LEN,
- default_search_path[i], session.release);
- if (ret >= MAX_PATH_LEN || ret < 0) {
- pr_debug("Filename(%d,%s) is too long.\n", i,
- uts.release);
- errno = E2BIG;
- return -E2BIG;
- }
- pr_debug("try to open %s\n", fname);
- fd = open(fname, O_RDONLY);
- if (fd >= 0)
- break;
+ return 0;
+}
+
+/* Currently just checking function name from symbol map */
+static void evaluate_probe_point(struct probe_point *pp)
+{
+ struct symbol *sym;
+ sym = map__find_symbol_by_name(session.kmap, pp->function,
+ session.psession, NULL);
+ if (!sym)
+ die("Kernel symbol \'%s\' not found - probe not added.",
+ pp->function);
+}
+
+#ifndef NO_LIBDWARF
+static int open_vmlinux(void)
+{
+ if (map__load(session.kmap, session.psession, NULL) < 0) {
+ pr_debug("Failed to load kernel map.\n");
+ return -EINVAL;
}
- return fd;
+ pr_debug("Try to open %s\n", session.kmap->dso->long_name);
+ return open(session.kmap->dso->long_name, O_RDONLY);
}
#endif
static const char * const probe_usage[] = {
"perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
"perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
+ "perf probe [<options>] --del '[GROUP:]EVENT' ...",
"perf probe --list",
NULL
};
@@ -129,19 +151,22 @@ static const struct option options[] = {
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show parsed arguments, etc)"),
#ifndef NO_LIBDWARF
- OPT_STRING('k', "vmlinux", &session.vmlinux, "file",
- "vmlinux/module pathname"),
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
#endif
- OPT_BOOLEAN('l', "list", &listing, "list up current probes"),
+ OPT_BOOLEAN('l', "list", &session.list_events,
+ "list up current probe events"),
+ OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
+ opt_del_probe_event),
OPT_CALLBACK('a', "add", NULL,
#ifdef NO_LIBDWARF
- "FUNC[+OFFS|%return] [ARG ...]",
+ "[EVENT=]FUNC[+OFFS|%return] [ARG ...]",
#else
- "FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]",
+ "[EVENT=]FUNC[+OFFS|%return|:RLN][@SRC]|SRC:ALN [ARG ...]",
#endif
"probe point definition, where\n"
- "\t\tGRP:\tGroup name (optional)\n"
- "\t\tNAME:\tEvent name\n"
+ "\t\tGROUP:\tGroup name (optional)\n"
+ "\t\tEVENT:\tEvent name\n"
"\t\tFUNC:\tFunction name\n"
"\t\tOFFS:\tOffset from function entry (in byte)\n"
"\t\t%return:\tPut the probe at function return\n"
@@ -155,12 +180,14 @@ static const struct option options[] = {
#endif
"\t\t\tkprobe-tracer argument format.)\n",
opt_add_probe_event),
+ OPT_BOOLEAN('f', "force", &session.force_add, "forcibly add events"
+ " with existing name"),
OPT_END()
};
int cmd_probe(int argc, const char **argv, const char *prefix __used)
{
- int i, j, ret;
+ int i, ret;
#ifndef NO_LIBDWARF
int fd;
#endif
@@ -168,54 +195,92 @@ int cmd_probe(int argc, const char **argv, const char *prefix __used)
argc = parse_options(argc, argv, options, probe_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- for (i = 0; i < argc; i++)
- parse_probe_event(argv[i]);
+ if (argc > 0) {
+ if (strcmp(argv[0], "-") == 0) {
+ pr_warning(" Error: '-' is not supported.\n");
+ usage_with_options(probe_usage, options);
+ }
+ parse_probe_event_argv(argc, argv);
+ }
- if ((session.nr_probe == 0 && !listing) ||
- (session.nr_probe != 0 && listing))
+ if ((!session.nr_probe && !session.dellist && !session.list_events))
usage_with_options(probe_usage, options);
- if (listing) {
+ if (debugfs_valid_mountpoint(debugfs_path) < 0)
+ die("Failed to find debugfs path.");
+
+ if (session.list_events) {
+ if (session.nr_probe != 0 || session.dellist) {
+ pr_warning(" Error: Don't use --list with"
+ " --add/--del.\n");
+ usage_with_options(probe_usage, options);
+ }
show_perf_probe_events();
return 0;
}
+ if (session.dellist) {
+ del_trace_kprobe_events(session.dellist);
+ strlist__delete(session.dellist);
+ if (session.nr_probe == 0)
+ return 0;
+ }
+
+ /* Initialize symbol maps for vmlinux */
+ symbol_conf.sort_by_name = true;
+ if (symbol_conf.vmlinux_name == NULL)
+ symbol_conf.try_vmlinux_path = true;
+ if (symbol__init() < 0)
+ die("Failed to init symbol map.");
+ session.psession = perf_session__new(NULL, O_WRONLY, false);
+ if (session.psession == NULL)
+ die("Failed to init perf_session.");
+ session.kmap = map_groups__find_by_name(&session.psession->kmaps,
+ MAP__FUNCTION,
+ "[kernel.kallsyms]");
+ if (!session.kmap)
+ die("Could not find kernel map.\n");
+
if (session.need_dwarf)
#ifdef NO_LIBDWARF
die("Debuginfo-analysis is not supported");
#else /* !NO_LIBDWARF */
pr_debug("Some probes require debuginfo.\n");
- if (session.vmlinux)
- fd = open(session.vmlinux, O_RDONLY);
- else
- fd = open_default_vmlinux();
+ fd = open_vmlinux();
if (fd < 0) {
if (session.need_dwarf)
- die("Could not open vmlinux/module file.");
+ die("Could not open debuginfo file.");
- pr_warning("Could not open vmlinux/module file."
- " Try to use symbols.\n");
+ pr_debug("Could not open vmlinux/module file."
+ " Try to use symbols.\n");
goto end_dwarf;
}
/* Searching probe points */
- for (j = 0; j < session.nr_probe; j++) {
- pp = &session.probes[j];
+ for (i = 0; i < session.nr_probe; i++) {
+ pp = &session.probes[i];
if (pp->found)
continue;
lseek(fd, SEEK_SET, 0);
ret = find_probepoint(fd, pp);
- if (ret < 0) {
- if (session.need_dwarf)
- die("Could not analyze debuginfo.");
-
- pr_warning("An error occurred in debuginfo analysis. Try to use symbols.\n");
- break;
+ if (ret > 0)
+ continue;
+ if (ret == 0) { /* No error but failed to find probe point. */
+ synthesize_perf_probe_point(pp);
+ die("Probe point '%s' not found. - probe not added.",
+ pp->probes[0]);
+ }
+ /* Error path */
+ if (session.need_dwarf) {
+ if (ret == -ENOENT)
+ pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO=y.\n");
+ die("Could not analyze debuginfo.");
}
- if (ret == 0) /* No error but failed to find probe point. */
- die("No probe point found.");
+ pr_debug("An error occurred in debuginfo analysis."
+ " Try to use symbols.\n");
+ break;
}
close(fd);
@@ -223,11 +288,12 @@ end_dwarf:
#endif /* !NO_LIBDWARF */
/* Synthesize probes without dwarf */
- for (j = 0; j < session.nr_probe; j++) {
- pp = &session.probes[j];
+ for (i = 0; i < session.nr_probe; i++) {
+ pp = &session.probes[i];
if (pp->found) /* This probe is already found. */
continue;
+ evaluate_probe_point(pp);
ret = synthesize_trace_kprobe_event(pp);
if (ret == -E2BIG)
die("probe point definition becomes too long.");
@@ -236,7 +302,8 @@ end_dwarf:
}
/* Settng up probe points */
- add_trace_kprobe_events(session.probes, session.nr_probe);
+ add_trace_kprobe_events(session.probes, session.nr_probe,
+ session.force_add);
return 0;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0e519c667e3..63136d0534d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -17,6 +17,7 @@
#include "util/header.h"
#include "util/event.h"
#include "util/debug.h"
+#include "util/session.h"
#include "util/symbol.h"
#include <unistd.h>
@@ -62,7 +63,7 @@ static int nr_cpu = 0;
static int file_new = 1;
-struct perf_header *header = NULL;
+static struct perf_session *session;
struct mmap_data {
int counter;
@@ -122,7 +123,8 @@ static void write_event(event_t *buf, size_t size)
write_output(buf, size);
}
-static int process_synthesized_event(event_t *event)
+static int process_synthesized_event(event_t *event,
+ struct perf_session *self __used)
{
write_event(event, event->header.size);
return 0;
@@ -216,12 +218,12 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
{
struct perf_header_attr *h_attr;
- if (nr < header->attrs) {
- h_attr = header->attr[nr];
+ if (nr < session->header.attrs) {
+ h_attr = session->header.attr[nr];
} else {
h_attr = perf_header_attr__new(a);
if (h_attr != NULL)
- if (perf_header__add_attr(header, h_attr) < 0) {
+ if (perf_header__add_attr(&session->header, h_attr) < 0) {
perf_header_attr__delete(h_attr);
h_attr = NULL;
}
@@ -276,7 +278,7 @@ static void create_counter(int counter, int cpu, pid_t pid)
attr->mmap = track;
attr->comm = track;
- attr->inherit = (cpu < 0) && inherit;
+ attr->inherit = inherit;
attr->disabled = 1;
try_again:
@@ -395,12 +397,12 @@ static void open_counters(int cpu, pid_t pid)
static void atexit_header(void)
{
- header->data_size += bytes_written;
+ session->header.data_size += bytes_written;
- perf_header__write(header, output, true);
+ perf_header__write(&session->header, output, true);
}
-static int __cmd_record(int argc, const char **argv)
+static int __cmd_record(int argc __used, const char **argv)
{
int i, counter;
struct stat st;
@@ -408,6 +410,8 @@ static int __cmd_record(int argc, const char **argv)
int flags;
int err;
unsigned long waking = 0;
+ int child_ready_pipe[2], go_pipe[2];
+ char buf;
page_size = sysconf(_SC_PAGE_SIZE);
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
@@ -418,11 +422,25 @@ static int __cmd_record(int argc, const char **argv)
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
+ if (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0) {
+ perror("failed to create pipes");
+ exit(-1);
+ }
+
if (!stat(output_name, &st) && st.st_size) {
- if (!force && !append_file) {
- fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n",
- output_name);
- exit(-1);
+ if (!force) {
+ if (!append_file) {
+ pr_err("Error, output file %s exists, use -A "
+ "to append or -f to overwrite.\n",
+ output_name);
+ exit(-1);
+ }
+ } else {
+ char oldname[PATH_MAX];
+ snprintf(oldname, sizeof(oldname), "%s.old",
+ output_name);
+ unlink(oldname);
+ rename(output_name, oldname);
}
} else {
append_file = 0;
@@ -440,24 +458,24 @@ static int __cmd_record(int argc, const char **argv)
exit(-1);
}
- header = perf_header__new();
- if (header == NULL) {
+ session = perf_session__new(output_name, O_WRONLY, force);
+ if (session == NULL) {
pr_err("Not enough memory for reading perf file header\n");
return -1;
}
if (!file_new) {
- err = perf_header__read(header, output);
+ err = perf_header__read(&session->header, output);
if (err < 0)
return err;
}
if (raw_samples) {
- perf_header__set_feat(header, HEADER_TRACE_INFO);
+ perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
} else {
for (i = 0; i < nr_counters; i++) {
if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
- perf_header__set_feat(header, HEADER_TRACE_INFO);
+ perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
break;
}
}
@@ -465,56 +483,79 @@ static int __cmd_record(int argc, const char **argv)
atexit(atexit_header);
- if (!system_wide) {
- pid = target_pid;
- if (pid == -1)
- pid = getpid();
-
- open_counters(profile_cpu, pid);
- } else {
- if (profile_cpu != -1) {
- open_counters(profile_cpu, target_pid);
- } else {
- for (i = 0; i < nr_cpus; i++)
- open_counters(i, target_pid);
+ if (target_pid == -1) {
+ pid = fork();
+ if (pid < 0) {
+ perror("failed to fork");
+ exit(-1);
}
- }
- if (file_new) {
- err = perf_header__write(header, output, false);
- if (err < 0)
- return err;
- }
+ if (!pid) {
+ close(child_ready_pipe[0]);
+ close(go_pipe[1]);
+ fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
- if (!system_wide)
- event__synthesize_thread(pid, process_synthesized_event);
- else
- event__synthesize_threads(process_synthesized_event);
+ /*
+ * Do a dummy execvp to get the PLT entry resolved,
+ * so we avoid the resolver overhead on the real
+ * execvp call.
+ */
+ execvp("", (char **)argv);
- if (target_pid == -1 && argc) {
- pid = fork();
- if (pid < 0)
- die("failed to fork");
+ /*
+ * Tell the parent we're ready to go
+ */
+ close(child_ready_pipe[1]);
- if (!pid) {
- if (execvp(argv[0], (char **)argv)) {
- perror(argv[0]);
- exit(-1);
- }
- } else {
/*
- * Wait a bit for the execv'ed child to appear
- * and be updated in /proc
- * FIXME: Do you know a less heuristical solution?
+ * Wait until the parent tells us to go.
*/
- usleep(1000);
- event__synthesize_thread(pid,
- process_synthesized_event);
+ if (read(go_pipe[0], &buf, 1) == -1)
+ perror("unable to read pipe");
+
+ execvp(argv[0], (char **)argv);
+
+ perror(argv[0]);
+ exit(-1);
}
child_pid = pid;
+
+ if (!system_wide)
+ target_pid = pid;
+
+ close(child_ready_pipe[1]);
+ close(go_pipe[0]);
+ /*
+ * wait for child to settle
+ */
+ if (read(child_ready_pipe[0], &buf, 1) == -1) {
+ perror("unable to read pipe");
+ exit(-1);
+ }
+ close(child_ready_pipe[0]);
+ }
+
+
+ if ((!system_wide && !inherit) || profile_cpu != -1) {
+ open_counters(profile_cpu, target_pid);
+ } else {
+ for (i = 0; i < nr_cpus; i++)
+ open_counters(i, target_pid);
}
+ if (file_new) {
+ err = perf_header__write(&session->header, output, false);
+ if (err < 0)
+ return err;
+ }
+
+ if (!system_wide)
+ event__synthesize_thread(pid, process_synthesized_event,
+ session);
+ else
+ event__synthesize_threads(process_synthesized_event, session);
+
if (realtime_prio) {
struct sched_param param;
@@ -525,6 +566,11 @@ static int __cmd_record(int argc, const char **argv)
}
}
+ /*
+ * Let the child rip
+ */
+ close(go_pipe[1]);
+
for (;;) {
int hits = samples;
@@ -619,13 +665,13 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
{
int counter;
- symbol__init(0);
-
argc = parse_options(argc, argv, options, record_usage,
- PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc && target_pid == -1 && !system_wide)
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc && target_pid == -1 && (!system_wide || profile_cpu == -1))
usage_with_options(record_usage, options);
+ symbol__init();
+
if (!nr_counters) {
nr_counters = 1;
attrs[0].type = PERF_TYPE_HARDWARE;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 383c4ab4f9a..5c2ab5357ec 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -22,471 +22,46 @@
#include "perf.h"
#include "util/debug.h"
#include "util/header.h"
+#include "util/session.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
-#include "util/data_map.h"
#include "util/thread.h"
#include "util/sort.h"
#include "util/hist.h"
static char const *input_name = "perf.data";
-static char *dso_list_str, *comm_list_str, *sym_list_str,
- *col_width_list_str;
-static struct strlist *dso_list, *comm_list, *sym_list;
-
static int force;
-static int full_paths;
-static int show_nr_samples;
-
static int show_threads;
static struct perf_read_values show_threads_values;
static char default_pretty_printing_style[] = "normal";
static char *pretty_printing_style = default_pretty_printing_style;
-static int exclude_other = 1;
-
static char callchain_default_opt[] = "fractal,0.5";
-static struct perf_header *header;
-
-static u64 sample_type;
-
-struct symbol_conf symbol_conf;
-
-
-static size_t
-callchain__fprintf_left_margin(FILE *fp, int left_margin)
-{
- int i;
- int ret;
-
- ret = fprintf(fp, " ");
-
- for (i = 0; i < left_margin; i++)
- ret += fprintf(fp, " ");
-
- return ret;
-}
-
-static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
- int left_margin)
-{
- int i;
- size_t ret = 0;
-
- ret += callchain__fprintf_left_margin(fp, left_margin);
-
- for (i = 0; i < depth; i++)
- if (depth_mask & (1 << i))
- ret += fprintf(fp, "| ");
- else
- ret += fprintf(fp, " ");
-
- ret += fprintf(fp, "\n");
-
- return ret;
-}
-static size_t
-ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
- int depth_mask, int count, u64 total_samples,
- int hits, int left_margin)
-{
- int i;
- size_t ret = 0;
-
- ret += callchain__fprintf_left_margin(fp, left_margin);
- for (i = 0; i < depth; i++) {
- if (depth_mask & (1 << i))
- ret += fprintf(fp, "|");
- else
- ret += fprintf(fp, " ");
- if (!count && i == depth - 1) {
- double percent;
-
- percent = hits * 100.0 / total_samples;
- ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
- } else
- ret += fprintf(fp, "%s", " ");
- }
- if (chain->sym)
- ret += fprintf(fp, "%s\n", chain->sym->name);
- else
- ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
-
- return ret;
-}
-
-static struct symbol *rem_sq_bracket;
-static struct callchain_list rem_hits;
-
-static void init_rem_hits(void)
-{
- rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
- if (!rem_sq_bracket) {
- fprintf(stderr, "Not enough memory to display remaining hits\n");
- return;
- }
-
- strcpy(rem_sq_bracket->name, "[...]");
- rem_hits.sym = rem_sq_bracket;
-}
-
-static size_t
-__callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
- u64 total_samples, int depth, int depth_mask,
- int left_margin)
-{
- struct rb_node *node, *next;
- struct callchain_node *child;
- struct callchain_list *chain;
- int new_depth_mask = depth_mask;
- u64 new_total;
- u64 remaining;
- size_t ret = 0;
- int i;
-
- if (callchain_param.mode == CHAIN_GRAPH_REL)
- new_total = self->children_hit;
- else
- new_total = total_samples;
-
- remaining = new_total;
-
- node = rb_first(&self->rb_root);
- while (node) {
- u64 cumul;
-
- child = rb_entry(node, struct callchain_node, rb_node);
- cumul = cumul_hits(child);
- remaining -= cumul;
-
- /*
- * The depth mask manages the output of pipes that show
- * the depth. We don't want to keep the pipes of the current
- * level for the last child of this depth.
- * Except if we have remaining filtered hits. They will
- * supersede the last child
- */
- next = rb_next(node);
- if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
- new_depth_mask &= ~(1 << (depth - 1));
-
- /*
- * But we keep the older depth mask for the line seperator
- * to keep the level link until we reach the last child
- */
- ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
- left_margin);
- i = 0;
- list_for_each_entry(chain, &child->val, list) {
- if (chain->ip >= PERF_CONTEXT_MAX)
- continue;
- ret += ipchain__fprintf_graph(fp, chain, depth,
- new_depth_mask, i++,
- new_total,
- cumul,
- left_margin);
- }
- ret += __callchain__fprintf_graph(fp, child, new_total,
- depth + 1,
- new_depth_mask | (1 << depth),
- left_margin);
- node = next;
- }
-
- if (callchain_param.mode == CHAIN_GRAPH_REL &&
- remaining && remaining != new_total) {
-
- if (!rem_sq_bracket)
- return ret;
-
- new_depth_mask &= ~(1 << (depth - 1));
-
- ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
- new_depth_mask, 0, new_total,
- remaining, left_margin);
- }
-
- return ret;
-}
-
-
-static size_t
-callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
- u64 total_samples, int left_margin)
-{
- struct callchain_list *chain;
- bool printed = false;
- int i = 0;
- int ret = 0;
-
- list_for_each_entry(chain, &self->val, list) {
- if (chain->ip >= PERF_CONTEXT_MAX)
- continue;
-
- if (!i++ && sort__first_dimension == SORT_SYM)
- continue;
-
- if (!printed) {
- ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "|\n");
- ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "---");
-
- left_margin += 3;
- printed = true;
- } else
- ret += callchain__fprintf_left_margin(fp, left_margin);
-
- if (chain->sym)
- ret += fprintf(fp, " %s\n", chain->sym->name);
- else
- ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
- }
-
- ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
-
- return ret;
-}
-
-static size_t
-callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
- u64 total_samples)
-{
- struct callchain_list *chain;
- size_t ret = 0;
-
- if (!self)
- return 0;
-
- ret += callchain__fprintf_flat(fp, self->parent, total_samples);
-
-
- list_for_each_entry(chain, &self->val, list) {
- if (chain->ip >= PERF_CONTEXT_MAX)
- continue;
- if (chain->sym)
- ret += fprintf(fp, " %s\n", chain->sym->name);
- else
- ret += fprintf(fp, " %p\n",
- (void *)(long)chain->ip);
- }
-
- return ret;
-}
-
-static size_t
-hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
- u64 total_samples, int left_margin)
-{
- struct rb_node *rb_node;
- struct callchain_node *chain;
- size_t ret = 0;
-
- rb_node = rb_first(&self->sorted_chain);
- while (rb_node) {
- double percent;
-
- chain = rb_entry(rb_node, struct callchain_node, rb_node);
- percent = chain->hit * 100.0 / total_samples;
- switch (callchain_param.mode) {
- case CHAIN_FLAT:
- ret += percent_color_fprintf(fp, " %6.2f%%\n",
- percent);
- ret += callchain__fprintf_flat(fp, chain, total_samples);
- break;
- case CHAIN_GRAPH_ABS: /* Falldown */
- case CHAIN_GRAPH_REL:
- ret += callchain__fprintf_graph(fp, chain, total_samples,
- left_margin);
- case CHAIN_NONE:
- default:
- break;
- }
- ret += fprintf(fp, "\n");
- rb_node = rb_next(rb_node);
- }
-
- return ret;
-}
-
-static size_t
-hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
-{
- struct sort_entry *se;
- size_t ret;
-
- if (exclude_other && !self->parent)
- return 0;
-
- if (total_samples)
- ret = percent_color_fprintf(fp,
- field_sep ? "%.2f" : " %6.2f%%",
- (self->count * 100.0) / total_samples);
- else
- ret = fprintf(fp, field_sep ? "%lld" : "%12lld ", self->count);
-
- if (show_nr_samples) {
- if (field_sep)
- fprintf(fp, "%c%lld", *field_sep, self->count);
- else
- fprintf(fp, "%11lld", self->count);
- }
-
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- if (se->elide)
- continue;
-
- fprintf(fp, "%s", field_sep ?: " ");
- ret += se->print(fp, self, se->width ? *se->width : 0);
- }
-
- ret += fprintf(fp, "\n");
-
- if (callchain) {
- int left_margin = 0;
-
- if (sort__first_dimension == SORT_COMM) {
- se = list_first_entry(&hist_entry__sort_list, typeof(*se),
- list);
- left_margin = se->width ? *se->width : 0;
- left_margin -= thread__comm_len(self->thread);
- }
-
- hist_entry_callchain__fprintf(fp, self, total_samples,
- left_margin);
- }
-
- return ret;
-}
-
-/*
- *
- */
-
-static void dso__calc_col_width(struct dso *self)
-{
- if (!col_width_list_str && !field_sep &&
- (!dso_list || strlist__has_entry(dso_list, self->name))) {
- unsigned int slen = strlen(self->name);
- if (slen > dsos__col_width)
- dsos__col_width = slen;
- }
-
- self->slen_calculated = 1;
-}
-
-static void thread__comm_adjust(struct thread *self)
-{
- char *comm = self->comm;
-
- if (!col_width_list_str && !field_sep &&
- (!comm_list || strlist__has_entry(comm_list, comm))) {
- unsigned int slen = strlen(comm);
-
- if (slen > comms__col_width) {
- comms__col_width = slen;
- threads__col_width = slen + 6;
- }
- }
-}
-
-static int thread__set_comm_adjust(struct thread *self, const char *comm)
-{
- int ret = thread__set_comm(self, comm);
-
- if (ret)
- return ret;
-
- thread__comm_adjust(self);
-
- return 0;
-}
-
-static int call__match(struct symbol *sym)
-{
- if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
- return 1;
-
- return 0;
-}
-
-static struct symbol **resolve_callchain(struct thread *thread,
- struct ip_callchain *chain,
- struct symbol **parent)
-{
- u8 cpumode = PERF_RECORD_MISC_USER;
- struct symbol **syms = NULL;
- unsigned int i;
-
- if (callchain) {
- syms = calloc(chain->nr, sizeof(*syms));
- if (!syms) {
- fprintf(stderr, "Can't allocate memory for symbols\n");
- exit(-1);
- }
- }
-
- for (i = 0; i < chain->nr; i++) {
- u64 ip = chain->ips[i];
- struct addr_location al;
-
- if (ip >= PERF_CONTEXT_MAX) {
- switch (ip) {
- case PERF_CONTEXT_HV:
- cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
- case PERF_CONTEXT_KERNEL:
- cpumode = PERF_RECORD_MISC_KERNEL; break;
- case PERF_CONTEXT_USER:
- cpumode = PERF_RECORD_MISC_USER; break;
- default:
- break;
- }
- continue;
- }
-
- thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
- ip, &al, NULL);
- if (al.sym != NULL) {
- if (sort__has_parent && !*parent &&
- call__match(al.sym))
- *parent = al.sym;
- if (!callchain)
- break;
- syms[i] = al.sym;
- }
- }
-
- return syms;
-}
-
-/*
- * collect histogram counts
- */
-
-static int hist_entry__add(struct addr_location *al,
- struct ip_callchain *chain, u64 count)
+static int perf_session__add_hist_entry(struct perf_session *self,
+ struct addr_location *al,
+ struct ip_callchain *chain, u64 count)
{
struct symbol **syms = NULL, *parent = NULL;
bool hit;
struct hist_entry *he;
- if ((sort__has_parent || callchain) && chain)
- syms = resolve_callchain(al->thread, chain, &parent);
-
- he = __hist_entry__add(al, parent, count, &hit);
+ if ((sort__has_parent || symbol_conf.use_callchain) && chain)
+ syms = perf_session__resolve_callchain(self, al->thread,
+ chain, &parent);
+ he = __perf_session__add_hist_entry(self, al, parent, count, &hit);
if (he == NULL)
return -ENOMEM;
if (hit)
he->count += count;
- if (callchain) {
+ if (symbol_conf.use_callchain) {
if (!hit)
callchain_init(&he->callchain);
append_chain(&he->callchain, chain, syms);
@@ -496,100 +71,6 @@ static int hist_entry__add(struct addr_location *al,
return 0;
}
-static size_t output__fprintf(FILE *fp, u64 total_samples)
-{
- struct hist_entry *pos;
- struct sort_entry *se;
- struct rb_node *nd;
- size_t ret = 0;
- unsigned int width;
- char *col_width = col_width_list_str;
- int raw_printing_style;
-
- raw_printing_style = !strcmp(pretty_printing_style, "raw");
-
- init_rem_hits();
-
- fprintf(fp, "# Samples: %Ld\n", (u64)total_samples);
- fprintf(fp, "#\n");
-
- fprintf(fp, "# Overhead");
- if (show_nr_samples) {
- if (field_sep)
- fprintf(fp, "%cSamples", *field_sep);
- else
- fputs(" Samples ", fp);
- }
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- if (se->elide)
- continue;
- if (field_sep) {
- fprintf(fp, "%c%s", *field_sep, se->header);
- continue;
- }
- width = strlen(se->header);
- if (se->width) {
- if (col_width_list_str) {
- if (col_width) {
- *se->width = atoi(col_width);
- col_width = strchr(col_width, ',');
- if (col_width)
- ++col_width;
- }
- }
- width = *se->width = max(*se->width, width);
- }
- fprintf(fp, " %*s", width, se->header);
- }
- fprintf(fp, "\n");
-
- if (field_sep)
- goto print_entries;
-
- fprintf(fp, "# ........");
- if (show_nr_samples)
- fprintf(fp, " ..........");
- list_for_each_entry(se, &hist_entry__sort_list, list) {
- unsigned int i;
-
- if (se->elide)
- continue;
-
- fprintf(fp, " ");
- if (se->width)
- width = *se->width;
- else
- width = strlen(se->header);
- for (i = 0; i < width; i++)
- fprintf(fp, ".");
- }
- fprintf(fp, "\n");
-
- fprintf(fp, "#\n");
-
-print_entries:
- for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
- pos = rb_entry(nd, struct hist_entry, rb_node);
- ret += hist_entry__fprintf(fp, pos, total_samples);
- }
-
- if (sort_order == default_sort_order &&
- parent_pattern == default_parent_pattern) {
- fprintf(fp, "#\n");
- fprintf(fp, "# (For a higher level overview, try: perf report --sort comm,dso)\n");
- fprintf(fp, "#\n");
- }
- fprintf(fp, "\n");
-
- free(rem_sq_bracket);
-
- if (show_threads)
- perf_read_values_display(fp, &show_threads_values,
- raw_printing_style);
-
- return ret;
-}
-
static int validate_chain(struct ip_callchain *chain, event_t *event)
{
unsigned int chain_size;
@@ -603,108 +84,60 @@ static int validate_chain(struct ip_callchain *chain, event_t *event)
return 0;
}
-static int process_sample_event(event_t *event)
+static int process_sample_event(event_t *event, struct perf_session *session)
{
- u64 ip = event->ip.ip;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
- struct ip_callchain *chain = NULL;
- int cpumode;
+ struct sample_data data = { .period = 1, };
struct addr_location al;
- struct thread *thread = threads__findnew(event->ip.pid);
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ event__parse_sample(event, session->sample_type, &data);
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
+ data.pid, data.tid,
+ (void *)(long)data.ip,
+ (long long)data.period);
- if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
unsigned int i;
- chain = (void *)more_data;
+ dump_printf("... chain: nr:%Lu\n", data.callchain->nr);
- dump_printf("... chain: nr:%Lu\n", chain->nr);
-
- if (validate_chain(chain, event) < 0) {
+ if (validate_chain(data.callchain, event) < 0) {
pr_debug("call-chain problem with event, "
"skipping it.\n");
return 0;
}
if (dump_trace) {
- for (i = 0; i < chain->nr; i++)
- dump_printf("..... %2d: %016Lx\n", i, chain->ips[i]);
+ for (i = 0; i < data.callchain->nr; i++)
+ dump_printf("..... %2d: %016Lx\n",
+ i, data.callchain->ips[i]);
}
}
- if (thread == NULL) {
- pr_debug("problem processing %d event, skipping it.\n",
+ if (event__preprocess_sample(event, session, &al, NULL) < 0) {
+ fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
- if (comm_list && !strlist__has_entry(comm_list, thread->comm))
- return 0;
-
- cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
-
- thread__find_addr_location(thread, cpumode,
- MAP__FUNCTION, ip, &al, NULL);
- /*
- * We have to do this here as we may have a dso with no symbol hit that
- * has a name longer than the ones with symbols sampled.
- */
- if (al.map && !sort_dso.elide && !al.map->dso->slen_calculated)
- dso__calc_col_width(al.map->dso);
-
- if (dso_list &&
- (!al.map || !al.map->dso ||
- !(strlist__has_entry(dso_list, al.map->dso->short_name) ||
- (al.map->dso->short_name != al.map->dso->long_name &&
- strlist__has_entry(dso_list, al.map->dso->long_name)))))
+ if (al.filtered)
return 0;
- if (sym_list && al.sym && !strlist__has_entry(sym_list, al.sym->name))
- return 0;
-
- if (hist_entry__add(&al, chain, period)) {
+ if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) {
pr_debug("problem incrementing symbol count, skipping event\n");
return -1;
}
- event__stats.total += period;
-
+ session->events_stats.total += data.period;
return 0;
}
-static int process_comm_event(event_t *event)
-{
- struct thread *thread = threads__findnew(event->comm.pid);
-
- dump_printf(": %s:%d\n", event->comm.comm, event->comm.pid);
-
- if (thread == NULL ||
- thread__set_comm_adjust(thread, event->comm.comm)) {
- dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
- return -1;
- }
-
- return 0;
-}
-
-static int process_read_event(event_t *event)
+static int process_read_event(event_t *event, struct perf_session *session __used)
{
struct perf_event_attr *attr;
- attr = perf_header__find_attr(event->read.id, header);
+ attr = perf_header__find_attr(event->read.id, &session->header);
if (show_threads) {
const char *name = attr ? __event_name(attr->type, attr->config)
@@ -723,25 +156,23 @@ static int process_read_event(event_t *event)
return 0;
}
-static int sample_type_check(u64 type)
+static int sample_type_check(struct perf_session *session)
{
- sample_type = type;
-
- if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (!(session->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
fprintf(stderr, "selected --sort parent, but no"
" callchain data. Did you call"
" perf record without -g?\n");
return -1;
}
- if (callchain) {
+ if (symbol_conf.use_callchain) {
fprintf(stderr, "selected -g but no callchain data."
" Did you call perf record without"
" -g?\n");
return -1;
}
- } else if (callchain_param.mode != CHAIN_NONE && !callchain) {
- callchain = 1;
+ } else if (callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) {
+ symbol_conf.use_callchain = true;
if (register_callchain_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain"
" params\n");
@@ -752,10 +183,10 @@ static int sample_type_check(u64 type)
return 0;
}
-static struct perf_file_handler file_handler = {
+static struct perf_event_ops event_ops = {
.process_sample_event = process_sample_event,
.process_mmap_event = event__process_mmap,
- .process_comm_event = process_comm_event,
+ .process_comm_event = event__process_comm,
.process_exit_event = event__process_task,
.process_fork_event = event__process_task,
.process_lost_event = event__process_lost,
@@ -766,40 +197,47 @@ static struct perf_file_handler file_handler = {
static int __cmd_report(void)
{
- struct thread *idle;
int ret;
+ struct perf_session *session;
- idle = register_idle_thread();
- thread__comm_adjust(idle);
+ session = perf_session__new(input_name, O_RDONLY, force);
+ if (session == NULL)
+ return -ENOMEM;
if (show_threads)
perf_read_values_init(&show_threads_values);
- register_perf_file_handler(&file_handler);
-
- ret = mmap_dispatch_perf_file(&header, input_name, force,
- full_paths, &event__cwdlen, &event__cwd);
+ ret = perf_session__process_events(session, &event_ops);
if (ret)
- return ret;
+ goto out_delete;
if (dump_trace) {
event__print_totals();
- return 0;
+ goto out_delete;
}
if (verbose > 3)
- threads__fprintf(stdout);
+ perf_session__fprintf(session, stdout);
if (verbose > 2)
dsos__fprintf(stdout);
- collapse__resort();
- output__resort(event__stats.total);
- output__fprintf(stdout, event__stats.total);
+ perf_session__collapse_resort(session);
+ perf_session__output_resort(session, session->events_stats.total);
+ fprintf(stdout, "# Samples: %Ld\n#\n", session->events_stats.total);
+ perf_session__fprintf_hists(session, NULL, false, stdout);
+ if (sort_order == default_sort_order &&
+ parent_pattern == default_parent_pattern)
+ fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n");
- if (show_threads)
+ if (show_threads) {
+ bool raw_printing_style = !strcmp(pretty_printing_style, "raw");
+ perf_read_values_display(stdout, &show_threads_values,
+ raw_printing_style);
perf_read_values_destroy(&show_threads_values);
-
+ }
+out_delete:
+ perf_session__delete(session);
return ret;
}
@@ -810,7 +248,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
char *tok;
char *endptr;
- callchain = 1;
+ symbol_conf.use_callchain = true;
if (!arg)
return 0;
@@ -831,7 +269,7 @@ parse_callchain_opt(const struct option *opt __used, const char *arg,
else if (!strncmp(tok, "none", strlen(arg))) {
callchain_param.mode = CHAIN_NONE;
- callchain = 0;
+ symbol_conf.use_callchain = true;
return 0;
}
@@ -874,7 +312,7 @@ static const struct option options[] = {
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
- OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
+ OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
"Show a column with the number of samples"),
OPT_BOOLEAN('T', "threads", &show_threads,
"Show per-thread event counters"),
@@ -882,78 +320,46 @@ static const struct option options[] = {
"pretty printing style key: normal raw"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
- OPT_BOOLEAN('P', "full-paths", &full_paths,
+ OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths,
"Don't shorten the pathnames taking into account the cwd"),
OPT_STRING('p', "parent", &parent_pattern, "regex",
"regex filter to identify parent, see: '--sort parent'"),
- OPT_BOOLEAN('x', "exclude-other", &exclude_other,
+ OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
OPT_CALLBACK_DEFAULT('g', "call-graph", NULL, "output_type,min_percent",
"Display callchains using output_type and min percent threshold. "
"Default: fractal,0.5", &parse_callchain_opt, callchain_default_opt),
- OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
+ OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
- OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
+ OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only consider symbols in these comms"),
- OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
+ OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
- OPT_STRING('w', "column-widths", &col_width_list_str,
+ OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
"width[,width...]",
"don't try to adjust column width, use these fixed values"),
- OPT_STRING('t', "field-separator", &field_sep, "separator",
+ OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
OPT_END()
};
-static void setup_sorting(void)
+int cmd_report(int argc, const char **argv, const char *prefix __used)
{
- char *tmp, *tok, *str = strdup(sort_order);
-
- for (tok = strtok_r(str, ", ", &tmp);
- tok; tok = strtok_r(NULL, ", ", &tmp)) {
- if (sort_dimension__add(tok) < 0) {
- error("Unknown --sort key: `%s'", tok);
- usage_with_options(report_usage, options);
- }
- }
-
- free(str);
-}
+ argc = parse_options(argc, argv, options, report_usage, 0);
-static void setup_list(struct strlist **list, const char *list_str,
- struct sort_entry *se, const char *list_name,
- FILE *fp)
-{
- if (list_str) {
- *list = strlist__new(true, list_str);
- if (!*list) {
- fprintf(stderr, "problems parsing %s list\n",
- list_name);
- exit(129);
- }
- if (strlist__nr_entries(*list) == 1) {
- fprintf(fp, "# %s: %s\n", list_name,
- strlist__entry(*list, 0)->s);
- se->elide = true;
- }
- }
-}
+ setup_pager();
-int cmd_report(int argc, const char **argv, const char *prefix __used)
-{
- if (symbol__init(&symbol_conf) < 0)
+ if (symbol__init() < 0)
return -1;
- argc = parse_options(argc, argv, options, report_usage, 0);
-
- setup_sorting();
+ setup_sorting(report_usage, options);
if (parent_pattern != default_parent_pattern) {
sort_dimension__add("parent");
sort_parent.elide = 1;
} else
- exclude_other = 0;
+ symbol_conf.exclude_other = false;
/*
* Any (unrecognized) arguments left?
@@ -961,17 +367,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
if (argc)
usage_with_options(report_usage, options);
- setup_pager();
-
- setup_list(&dso_list, dso_list_str, &sort_dso, "dso", stdout);
- setup_list(&comm_list, comm_list_str, &sort_comm, "comm", stdout);
- setup_list(&sym_list, sym_list_str, &sort_sym, "symbol", stdout);
-
- if (field_sep && *field_sep == '.') {
- fputs("'.' is the only non valid --field-separator argument\n",
- stderr);
- exit(129);
- }
+ sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout);
+ sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
+ sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout);
return __cmd_report();
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 26b782f26ee..80209df6cfe 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -6,14 +6,13 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
+#include "util/session.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
#include "util/debug.h"
-#include "util/data_map.h"
-#include <sys/types.h>
#include <sys/prctl.h>
#include <semaphore.h>
@@ -22,9 +21,6 @@
static char const *input_name = "perf.data";
-static struct perf_header *header;
-static u64 sample_type;
-
static char default_sort_order[] = "avg, max, switch, runtime";
static char *sort_order = default_sort_order;
@@ -141,6 +137,7 @@ struct work_atoms {
struct thread *thread;
struct rb_node node;
u64 max_lat;
+ u64 max_lat_at;
u64 total_lat;
u64 nb_atoms;
u64 total_runtime;
@@ -414,34 +411,33 @@ static u64 get_cpu_usage_nsec_parent(void)
return sum;
}
-static u64 get_cpu_usage_nsec_self(void)
+static int self_open_counters(void)
{
- char filename [] = "/proc/1234567890/sched";
- unsigned long msecs, nsecs;
- char *line = NULL;
- u64 total = 0;
- size_t len = 0;
- ssize_t chars;
- FILE *file;
- int ret;
+ struct perf_event_attr attr;
+ int fd;
- sprintf(filename, "/proc/%d/sched", getpid());
- file = fopen(filename, "r");
- BUG_ON(!file);
+ memset(&attr, 0, sizeof(attr));
- while ((chars = getline(&line, &len, file)) != -1) {
- ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
- &msecs, &nsecs);
- if (ret == 2) {
- total = msecs*1e6 + nsecs;
- break;
- }
- }
- if (line)
- free(line);
- fclose(file);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_TASK_CLOCK;
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+
+ if (fd < 0)
+ die("Error: sys_perf_event_open() syscall returned"
+ "with %d (%s)\n", fd, strerror(errno));
+ return fd;
+}
+
+static u64 get_cpu_usage_nsec_self(int fd)
+{
+ u64 runtime;
+ int ret;
- return total;
+ ret = read(fd, &runtime, sizeof(runtime));
+ BUG_ON(ret != sizeof(runtime));
+
+ return runtime;
}
static void *thread_func(void *ctx)
@@ -450,9 +446,11 @@ static void *thread_func(void *ctx)
u64 cpu_usage_0, cpu_usage_1;
unsigned long i, ret;
char comm2[22];
+ int fd;
sprintf(comm2, ":%s", this_task->comm);
prctl(PR_SET_NAME, comm2);
+ fd = self_open_counters();
again:
ret = sem_post(&this_task->ready_for_work);
@@ -462,16 +460,15 @@ again:
ret = pthread_mutex_unlock(&start_work_mutex);
BUG_ON(ret);
- cpu_usage_0 = get_cpu_usage_nsec_self();
+ cpu_usage_0 = get_cpu_usage_nsec_self(fd);
for (i = 0; i < this_task->nr_events; i++) {
this_task->curr_event = i;
process_sched_event(this_task, this_task->atoms[i]);
}
- cpu_usage_1 = get_cpu_usage_nsec_self();
+ cpu_usage_1 = get_cpu_usage_nsec_self(fd);
this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
-
ret = sem_post(&this_task->work_done_sem);
BUG_ON(ret);
@@ -628,11 +625,6 @@ static void test_calibrations(void)
printf("the sleep test took %Ld nsecs\n", T1-T0);
}
-struct raw_event_sample {
- u32 size;
- char data[0];
-};
-
#define FILL_FIELD(ptr, field, event, data) \
ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
@@ -736,18 +728,21 @@ struct trace_migrate_task_event {
struct trace_sched_handler {
void (*switch_event)(struct trace_switch_event *,
+ struct perf_session *,
struct event *,
int cpu,
u64 timestamp,
struct thread *thread);
void (*runtime_event)(struct trace_runtime_event *,
+ struct perf_session *,
struct event *,
int cpu,
u64 timestamp,
struct thread *thread);
void (*wakeup_event)(struct trace_wakeup_event *,
+ struct perf_session *,
struct event *,
int cpu,
u64 timestamp,
@@ -760,6 +755,7 @@ struct trace_sched_handler {
struct thread *thread);
void (*migrate_task_event)(struct trace_migrate_task_event *,
+ struct perf_session *session,
struct event *,
int cpu,
u64 timestamp,
@@ -769,6 +765,7 @@ struct trace_sched_handler {
static void
replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
+ struct perf_session *session __used,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -795,6 +792,7 @@ static u64 cpu_last_switched[MAX_CPUS];
static void
replay_switch_event(struct trace_switch_event *switch_event,
+ struct perf_session *session __used,
struct event *event,
int cpu,
u64 timestamp,
@@ -1019,13 +1017,16 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
delta = atom->sched_in_time - atom->wake_up_time;
atoms->total_lat += delta;
- if (delta > atoms->max_lat)
+ if (delta > atoms->max_lat) {
atoms->max_lat = delta;
+ atoms->max_lat_at = timestamp;
+ }
atoms->nb_atoms++;
}
static void
latency_switch_event(struct trace_switch_event *switch_event,
+ struct perf_session *session,
struct event *event __used,
int cpu,
u64 timestamp,
@@ -1049,8 +1050,8 @@ latency_switch_event(struct trace_switch_event *switch_event,
die("hm, delta: %Ld < 0 ?\n", delta);
- sched_out = threads__findnew(switch_event->prev_pid);
- sched_in = threads__findnew(switch_event->next_pid);
+ sched_out = perf_session__findnew(session, switch_event->prev_pid);
+ sched_in = perf_session__findnew(session, switch_event->next_pid);
out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_events) {
@@ -1078,12 +1079,13 @@ latency_switch_event(struct trace_switch_event *switch_event,
static void
latency_runtime_event(struct trace_runtime_event *runtime_event,
+ struct perf_session *session,
struct event *event __used,
int cpu,
u64 timestamp,
struct thread *this_thread __used)
{
- struct thread *thread = threads__findnew(runtime_event->pid);
+ struct thread *thread = perf_session__findnew(session, runtime_event->pid);
struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -1100,6 +1102,7 @@ latency_runtime_event(struct trace_runtime_event *runtime_event,
static void
latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
+ struct perf_session *session,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -1113,7 +1116,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
if (!wakeup_event->success)
return;
- wakee = threads__findnew(wakeup_event->pid);
+ wakee = perf_session__findnew(session, wakeup_event->pid);
atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
if (!atoms) {
thread_atoms_insert(wakee);
@@ -1147,6 +1150,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
static void
latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
+ struct perf_session *session,
struct event *__event __used,
int cpu __used,
u64 timestamp,
@@ -1162,7 +1166,7 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
if (profile_cpu == -1)
return;
- migrant = threads__findnew(migrate_task_event->pid);
+ migrant = perf_session__findnew(session, migrate_task_event->pid);
atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
if (!atoms) {
thread_atoms_insert(migrant);
@@ -1216,10 +1220,11 @@ static void output_lat_thread(struct work_atoms *work_list)
avg = work_list->total_lat / work_list->nb_atoms;
- printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
+ printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
(double)work_list->total_runtime / 1e6,
work_list->nb_atoms, (double)avg / 1e6,
- (double)work_list->max_lat / 1e6);
+ (double)work_list->max_lat / 1e6,
+ (double)work_list->max_lat_at / 1e9);
}
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
@@ -1356,7 +1361,7 @@ static void sort_lat(void)
static struct trace_sched_handler *trace_handler;
static void
-process_sched_wakeup_event(struct raw_event_sample *raw,
+process_sched_wakeup_event(void *data, struct perf_session *session,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -1364,16 +1369,17 @@ process_sched_wakeup_event(struct raw_event_sample *raw,
{
struct trace_wakeup_event wakeup_event;
- FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
+ FILL_COMMON_FIELDS(wakeup_event, event, data);
- FILL_ARRAY(wakeup_event, comm, event, raw->data);
- FILL_FIELD(wakeup_event, pid, event, raw->data);
- FILL_FIELD(wakeup_event, prio, event, raw->data);
- FILL_FIELD(wakeup_event, success, event, raw->data);
- FILL_FIELD(wakeup_event, cpu, event, raw->data);
+ FILL_ARRAY(wakeup_event, comm, event, data);
+ FILL_FIELD(wakeup_event, pid, event, data);
+ FILL_FIELD(wakeup_event, prio, event, data);
+ FILL_FIELD(wakeup_event, success, event, data);
+ FILL_FIELD(wakeup_event, cpu, event, data);
if (trace_handler->wakeup_event)
- trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
+ trace_handler->wakeup_event(&wakeup_event, session, event,
+ cpu, timestamp, thread);
}
/*
@@ -1391,6 +1397,7 @@ static char next_shortname2 = '0';
static void
map_switch_event(struct trace_switch_event *switch_event,
+ struct perf_session *session,
struct event *event __used,
int this_cpu,
u64 timestamp,
@@ -1418,8 +1425,8 @@ map_switch_event(struct trace_switch_event *switch_event,
die("hm, delta: %Ld < 0 ?\n", delta);
- sched_out = threads__findnew(switch_event->prev_pid);
- sched_in = threads__findnew(switch_event->next_pid);
+ sched_out = perf_session__findnew(session, switch_event->prev_pid);
+ sched_in = perf_session__findnew(session, switch_event->next_pid);
curr_thread[this_cpu] = sched_in;
@@ -1469,7 +1476,7 @@ map_switch_event(struct trace_switch_event *switch_event,
static void
-process_sched_switch_event(struct raw_event_sample *raw,
+process_sched_switch_event(void *data, struct perf_session *session,
struct event *event,
int this_cpu,
u64 timestamp __used,
@@ -1477,15 +1484,15 @@ process_sched_switch_event(struct raw_event_sample *raw,
{
struct trace_switch_event switch_event;
- FILL_COMMON_FIELDS(switch_event, event, raw->data);
+ FILL_COMMON_FIELDS(switch_event, event, data);
- FILL_ARRAY(switch_event, prev_comm, event, raw->data);
- FILL_FIELD(switch_event, prev_pid, event, raw->data);
- FILL_FIELD(switch_event, prev_prio, event, raw->data);
- FILL_FIELD(switch_event, prev_state, event, raw->data);
- FILL_ARRAY(switch_event, next_comm, event, raw->data);
- FILL_FIELD(switch_event, next_pid, event, raw->data);
- FILL_FIELD(switch_event, next_prio, event, raw->data);
+ FILL_ARRAY(switch_event, prev_comm, event, data);
+ FILL_FIELD(switch_event, prev_pid, event, data);
+ FILL_FIELD(switch_event, prev_prio, event, data);
+ FILL_FIELD(switch_event, prev_state, event, data);
+ FILL_ARRAY(switch_event, next_comm, event, data);
+ FILL_FIELD(switch_event, next_pid, event, data);
+ FILL_FIELD(switch_event, next_prio, event, data);
if (curr_pid[this_cpu] != (u32)-1) {
/*
@@ -1496,13 +1503,14 @@ process_sched_switch_event(struct raw_event_sample *raw,
nr_context_switch_bugs++;
}
if (trace_handler->switch_event)
- trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
+ trace_handler->switch_event(&switch_event, session, event,
+ this_cpu, timestamp, thread);
curr_pid[this_cpu] = switch_event.next_pid;
}
static void
-process_sched_runtime_event(struct raw_event_sample *raw,
+process_sched_runtime_event(void *data, struct perf_session *session,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -1510,17 +1518,17 @@ process_sched_runtime_event(struct raw_event_sample *raw,
{
struct trace_runtime_event runtime_event;
- FILL_ARRAY(runtime_event, comm, event, raw->data);
- FILL_FIELD(runtime_event, pid, event, raw->data);
- FILL_FIELD(runtime_event, runtime, event, raw->data);
- FILL_FIELD(runtime_event, vruntime, event, raw->data);
+ FILL_ARRAY(runtime_event, comm, event, data);
+ FILL_FIELD(runtime_event, pid, event, data);
+ FILL_FIELD(runtime_event, runtime, event, data);
+ FILL_FIELD(runtime_event, vruntime, event, data);
if (trace_handler->runtime_event)
- trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
+ trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
}
static void
-process_sched_fork_event(struct raw_event_sample *raw,
+process_sched_fork_event(void *data,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -1528,15 +1536,16 @@ process_sched_fork_event(struct raw_event_sample *raw,
{
struct trace_fork_event fork_event;
- FILL_COMMON_FIELDS(fork_event, event, raw->data);
+ FILL_COMMON_FIELDS(fork_event, event, data);
- FILL_ARRAY(fork_event, parent_comm, event, raw->data);
- FILL_FIELD(fork_event, parent_pid, event, raw->data);
- FILL_ARRAY(fork_event, child_comm, event, raw->data);
- FILL_FIELD(fork_event, child_pid, event, raw->data);
+ FILL_ARRAY(fork_event, parent_comm, event, data);
+ FILL_FIELD(fork_event, parent_pid, event, data);
+ FILL_ARRAY(fork_event, child_comm, event, data);
+ FILL_FIELD(fork_event, child_pid, event, data);
if (trace_handler->fork_event)
- trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
+ trace_handler->fork_event(&fork_event, event,
+ cpu, timestamp, thread);
}
static void
@@ -1550,7 +1559,7 @@ process_sched_exit_event(struct event *event,
}
static void
-process_sched_migrate_task_event(struct raw_event_sample *raw,
+process_sched_migrate_task_event(void *data, struct perf_session *session,
struct event *event,
int cpu __used,
u64 timestamp __used,
@@ -1558,80 +1567,67 @@ process_sched_migrate_task_event(struct raw_event_sample *raw,
{
struct trace_migrate_task_event migrate_task_event;
- FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
+ FILL_COMMON_FIELDS(migrate_task_event, event, data);
- FILL_ARRAY(migrate_task_event, comm, event, raw->data);
- FILL_FIELD(migrate_task_event, pid, event, raw->data);
- FILL_FIELD(migrate_task_event, prio, event, raw->data);
- FILL_FIELD(migrate_task_event, cpu, event, raw->data);
+ FILL_ARRAY(migrate_task_event, comm, event, data);
+ FILL_FIELD(migrate_task_event, pid, event, data);
+ FILL_FIELD(migrate_task_event, prio, event, data);
+ FILL_FIELD(migrate_task_event, cpu, event, data);
if (trace_handler->migrate_task_event)
- trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
+ trace_handler->migrate_task_event(&migrate_task_event, session,
+ event, cpu, timestamp, thread);
}
static void
-process_raw_event(event_t *raw_event __used, void *more_data,
- int cpu, u64 timestamp, struct thread *thread)
+process_raw_event(event_t *raw_event __used, struct perf_session *session,
+ void *data, int cpu, u64 timestamp, struct thread *thread)
{
- struct raw_event_sample *raw = more_data;
struct event *event;
int type;
- type = trace_parse_common_type(raw->data);
+
+ type = trace_parse_common_type(data);
event = trace_find_event(type);
if (!strcmp(event->name, "sched_switch"))
- process_sched_switch_event(raw, event, cpu, timestamp, thread);
+ process_sched_switch_event(data, session, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_stat_runtime"))
- process_sched_runtime_event(raw, event, cpu, timestamp, thread);
+ process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_wakeup"))
- process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
+ process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_wakeup_new"))
- process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
+ process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_process_fork"))
- process_sched_fork_event(raw, event, cpu, timestamp, thread);
+ process_sched_fork_event(data, event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_process_exit"))
process_sched_exit_event(event, cpu, timestamp, thread);
if (!strcmp(event->name, "sched_migrate_task"))
- process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
+ process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
}
-static int process_sample_event(event_t *event)
+static int process_sample_event(event_t *event, struct perf_session *session)
{
+ struct sample_data data;
struct thread *thread;
- u64 ip = event->ip.ip;
- u64 timestamp = -1;
- u32 cpu = -1;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
- if (!(sample_type & PERF_SAMPLE_RAW))
+ if (!(session->sample_type & PERF_SAMPLE_RAW))
return 0;
- thread = threads__findnew(event->ip.pid);
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = -1;
- if (sample_type & PERF_SAMPLE_TIME) {
- timestamp = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
-
- if (sample_type & PERF_SAMPLE_CPU) {
- cpu = *(u32 *)more_data;
- more_data += sizeof(u32);
- more_data += sizeof(u32); /* reserved */
- }
-
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ event__parse_sample(event, session->sample_type, &data);
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
+ data.pid, data.tid,
+ (void *)(long)data.ip,
+ (long long)data.period);
+ thread = perf_session__findnew(session, data.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -1640,15 +1636,16 @@ static int process_sample_event(event_t *event)
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- if (profile_cpu != -1 && profile_cpu != (int) cpu)
+ if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
return 0;
- process_raw_event(event, more_data, cpu, timestamp, thread);
+ process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread);
return 0;
}
-static int process_lost_event(event_t *event __used)
+static int process_lost_event(event_t *event __used,
+ struct perf_session *session __used)
{
nr_lost_chunks++;
nr_lost_events += event->lost.lost;
@@ -1656,11 +1653,9 @@ static int process_lost_event(event_t *event __used)
return 0;
}
-static int sample_type_check(u64 type)
+static int sample_type_check(struct perf_session *session __used)
{
- sample_type = type;
-
- if (!(sample_type & PERF_SAMPLE_RAW)) {
+ if (!(session->sample_type & PERF_SAMPLE_RAW)) {
fprintf(stderr,
"No trace sample to read. Did you call perf record "
"without -R?");
@@ -1670,7 +1665,7 @@ static int sample_type_check(u64 type)
return 0;
}
-static struct perf_file_handler file_handler = {
+static struct perf_event_ops event_ops = {
.process_sample_event = process_sample_event,
.process_comm_event = event__process_comm,
.process_lost_event = process_lost_event,
@@ -1679,11 +1674,14 @@ static struct perf_file_handler file_handler = {
static int read_events(void)
{
- register_idle_thread();
- register_perf_file_handler(&file_handler);
+ int err;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
+ if (session == NULL)
+ return -ENOMEM;
- return mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
+ err = perf_session__process_events(session, &event_ops);
+ perf_session__delete(session);
+ return err;
}
static void print_bad_events(void)
@@ -1724,9 +1722,9 @@ static void __cmd_lat(void)
read_events();
sort_lat();
- printf("\n -----------------------------------------------------------------------------------------\n");
- printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
- printf(" -----------------------------------------------------------------------------------------\n");
+ printf("\n ---------------------------------------------------------------------------------------------------------------\n");
+ printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
+ printf(" ---------------------------------------------------------------------------------------------------------------\n");
next = rb_first(&sorted_atom_root);
@@ -1902,13 +1900,18 @@ static int __cmd_record(int argc, const char **argv)
int cmd_sched(int argc, const char **argv, const char *prefix __used)
{
- symbol__init(0);
-
argc = parse_options(argc, argv, sched_options, sched_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(sched_usage, sched_options);
+ /*
+ * Aliased to 'perf trace' for now:
+ */
+ if (!strcmp(argv[0], "trace"))
+ return cmd_trace(argc, argv, prefix);
+
+ symbol__init();
if (!strncmp(argv[0], "rec", 3)) {
return __cmd_record(argc, argv);
} else if (!strncmp(argv[0], "lat", 3)) {
@@ -1932,11 +1935,6 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
usage_with_options(replay_usage, replay_options);
}
__cmd_replay();
- } else if (!strcmp(argv[0], "trace")) {
- /*
- * Aliased to 'perf trace' for now:
- */
- return cmd_trace(argc, argv, prefix);
} else {
usage_with_options(sched_usage, sched_options);
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index cb58b6605fc..a589a43112d 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -30,15 +30,12 @@
#include "util/parse-options.h"
#include "util/parse-events.h"
#include "util/event.h"
-#include "util/data_map.h"
+#include "util/session.h"
#include "util/svghelper.h"
static char const *input_name = "perf.data";
static char const *output_name = "output.svg";
-
-static u64 sample_type;
-
static unsigned int numcpus;
static u64 min_freq; /* Lowest CPU frequency seen */
static u64 max_freq; /* Highest CPU frequency seen */
@@ -281,33 +278,30 @@ static int cpus_cstate_state[MAX_CPUS];
static u64 cpus_pstate_start_times[MAX_CPUS];
static u64 cpus_pstate_state[MAX_CPUS];
-static int
-process_comm_event(event_t *event)
+static int process_comm_event(event_t *event, struct perf_session *session __used)
{
pid_set_comm(event->comm.pid, event->comm.comm);
return 0;
}
-static int
-process_fork_event(event_t *event)
+
+static int process_fork_event(event_t *event, struct perf_session *session __used)
{
pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
return 0;
}
-static int
-process_exit_event(event_t *event)
+static int process_exit_event(event_t *event, struct perf_session *session __used)
{
pid_exit(event->fork.pid, event->fork.time);
return 0;
}
struct trace_entry {
- u32 size;
unsigned short type;
unsigned char flags;
unsigned char preempt_count;
int pid;
- int tgid;
+ int lock_depth;
};
struct power_entry {
@@ -481,46 +475,24 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
}
-static int
-process_sample_event(event_t *event)
+static int process_sample_event(event_t *event, struct perf_session *session)
{
- int cursor = 0;
- u64 addr = 0;
- u64 stamp = 0;
- u32 cpu = 0;
- u32 pid = 0;
+ struct sample_data data;
struct trace_entry *te;
- if (sample_type & PERF_SAMPLE_IP)
- cursor++;
-
- if (sample_type & PERF_SAMPLE_TID) {
- pid = event->sample.array[cursor]>>32;
- cursor++;
- }
- if (sample_type & PERF_SAMPLE_TIME) {
- stamp = event->sample.array[cursor++];
+ memset(&data, 0, sizeof(data));
- if (!first_time || first_time > stamp)
- first_time = stamp;
- if (last_time < stamp)
- last_time = stamp;
+ event__parse_sample(event, session->sample_type, &data);
+ if (session->sample_type & PERF_SAMPLE_TIME) {
+ if (!first_time || first_time > data.time)
+ first_time = data.time;
+ if (last_time < data.time)
+ last_time = data.time;
}
- if (sample_type & PERF_SAMPLE_ADDR)
- addr = event->sample.array[cursor++];
- if (sample_type & PERF_SAMPLE_ID)
- cursor++;
- if (sample_type & PERF_SAMPLE_STREAM_ID)
- cursor++;
- if (sample_type & PERF_SAMPLE_CPU)
- cpu = event->sample.array[cursor++] & 0xFFFFFFFF;
- if (sample_type & PERF_SAMPLE_PERIOD)
- cursor++;
-
- te = (void *)&event->sample.array[cursor];
- if (sample_type & PERF_SAMPLE_RAW && te->size > 0) {
+ te = (void *)data.raw_data;
+ if (session->sample_type & PERF_SAMPLE_RAW && data.raw_size > 0) {
char *event_str;
struct power_entry *pe;
@@ -532,19 +504,19 @@ process_sample_event(event_t *event)
return 0;
if (strcmp(event_str, "power:power_start") == 0)
- c_state_start(cpu, stamp, pe->value);
+ c_state_start(data.cpu, data.time, pe->value);
if (strcmp(event_str, "power:power_end") == 0)
- c_state_end(cpu, stamp);
+ c_state_end(data.cpu, data.time);
if (strcmp(event_str, "power:power_frequency") == 0)
- p_state_change(cpu, stamp, pe->value);
+ p_state_change(data.cpu, data.time, pe->value);
if (strcmp(event_str, "sched:sched_wakeup") == 0)
- sched_wakeup(cpu, stamp, pid, te);
+ sched_wakeup(data.cpu, data.time, data.pid, te);
if (strcmp(event_str, "sched:sched_switch") == 0)
- sched_switch(cpu, stamp, te);
+ sched_switch(data.cpu, data.time, te);
}
return 0;
}
@@ -597,16 +569,16 @@ static void end_sample_processing(void)
}
}
-static u64 sample_time(event_t *event)
+static u64 sample_time(event_t *event, const struct perf_session *session)
{
int cursor;
cursor = 0;
- if (sample_type & PERF_SAMPLE_IP)
+ if (session->sample_type & PERF_SAMPLE_IP)
cursor++;
- if (sample_type & PERF_SAMPLE_TID)
+ if (session->sample_type & PERF_SAMPLE_TID)
cursor++;
- if (sample_type & PERF_SAMPLE_TIME)
+ if (session->sample_type & PERF_SAMPLE_TIME)
return event->sample.array[cursor];
return 0;
}
@@ -616,8 +588,7 @@ static u64 sample_time(event_t *event)
* We first queue all events, sorted backwards by insertion.
* The order will get flipped later.
*/
-static int
-queue_sample_event(event_t *event)
+static int queue_sample_event(event_t *event, struct perf_session *session)
{
struct sample_wrapper *copy, *prev;
int size;
@@ -631,7 +602,7 @@ queue_sample_event(event_t *event)
memset(copy, 0, size);
copy->next = NULL;
- copy->timestamp = sample_time(event);
+ copy->timestamp = sample_time(event, session);
memcpy(&copy->data, event, event->sample.header.size);
@@ -1043,7 +1014,7 @@ static void write_svg_file(const char *filename)
svg_close();
}
-static void process_samples(void)
+static void process_samples(struct perf_session *session)
{
struct sample_wrapper *cursor;
event_t *event;
@@ -1054,15 +1025,13 @@ static void process_samples(void)
while (cursor) {
event = (void *)&cursor->data;
cursor = cursor->next;
- process_sample_event(event);
+ process_sample_event(event, session);
}
}
-static int sample_type_check(u64 type)
+static int sample_type_check(struct perf_session *session)
{
- sample_type = type;
-
- if (!(sample_type & PERF_SAMPLE_RAW)) {
+ if (!(session->sample_type & PERF_SAMPLE_RAW)) {
fprintf(stderr, "No trace samples found in the file.\n"
"Have you used 'perf timechart record' to record it?\n");
return -1;
@@ -1071,7 +1040,7 @@ static int sample_type_check(u64 type)
return 0;
}
-static struct perf_file_handler file_handler = {
+static struct perf_event_ops event_ops = {
.process_comm_event = process_comm_event,
.process_fork_event = process_fork_event,
.process_exit_event = process_exit_event,
@@ -1081,17 +1050,17 @@ static struct perf_file_handler file_handler = {
static int __cmd_timechart(void)
{
- struct perf_header *header;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
int ret;
- register_perf_file_handler(&file_handler);
+ if (session == NULL)
+ return -ENOMEM;
- ret = mmap_dispatch_perf_file(&header, input_name, 0, 0,
- &event__cwdlen, &event__cwd);
+ ret = perf_session__process_events(session, &event_ops);
if (ret)
- return EXIT_FAILURE;
+ goto out_delete;
- process_samples();
+ process_samples(session);
end_sample_processing();
@@ -1101,8 +1070,9 @@ static int __cmd_timechart(void)
pr_info("Written %2.1f seconds of trace to %s.\n",
(last_time - first_time) / 1000000000.0, output_name);
-
- return EXIT_SUCCESS;
+out_delete:
+ perf_session__delete(session);
+ return ret;
}
static const char * const timechart_usage[] = {
@@ -1167,11 +1137,11 @@ static const struct option options[] = {
int cmd_timechart(int argc, const char **argv, const char *prefix __used)
{
- symbol__init(0);
-
argc = parse_options(argc, argv, options, timechart_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ symbol__init();
+
if (argc && !strncmp(argv[0], "rec", 3))
return __cmd_record(argc, argv);
else if (argc)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e0a374d0e43..ddc584b6487 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -20,8 +20,9 @@
#include "perf.h"
-#include "util/symbol.h"
#include "util/color.h"
+#include "util/session.h"
+#include "util/symbol.h"
#include "util/thread.h"
#include "util/util.h"
#include <linux/rbtree.h>
@@ -79,7 +80,6 @@ static int dump_symtab = 0;
static bool hide_kernel_symbols = false;
static bool hide_user_symbols = false;
static struct winsize winsize;
-struct symbol_conf symbol_conf;
/*
* Source
@@ -926,7 +926,8 @@ static int symbol_filter(struct map *map, struct symbol *sym)
return 0;
}
-static void event__process_sample(const event_t *self, int counter)
+static void event__process_sample(const event_t *self,
+ struct perf_session *session, int counter)
{
u64 ip = self->ip.ip;
struct sym_entry *syme;
@@ -946,8 +947,8 @@ static void event__process_sample(const event_t *self, int counter)
return;
}
- if (event__preprocess_sample(self, &al, symbol_filter) < 0 ||
- al.sym == NULL)
+ if (event__preprocess_sample(self, session, &al, symbol_filter) < 0 ||
+ al.sym == NULL || al.filtered)
return;
syme = symbol__priv(al.sym);
@@ -965,14 +966,14 @@ static void event__process_sample(const event_t *self, int counter)
}
}
-static int event__process(event_t *event)
+static int event__process(event_t *event, struct perf_session *session)
{
switch (event->header.type) {
case PERF_RECORD_COMM:
- event__process_comm(event);
+ event__process_comm(event, session);
break;
case PERF_RECORD_MMAP:
- event__process_mmap(event);
+ event__process_mmap(event, session);
break;
default:
break;
@@ -999,7 +1000,8 @@ static unsigned int mmap_read_head(struct mmap_data *md)
return head;
}
-static void mmap_read_counter(struct mmap_data *md)
+static void perf_session__mmap_read_counter(struct perf_session *self,
+ struct mmap_data *md)
{
unsigned int head = mmap_read_head(md);
unsigned int old = md->prev;
@@ -1052,9 +1054,9 @@ static void mmap_read_counter(struct mmap_data *md)
}
if (event->header.type == PERF_RECORD_SAMPLE)
- event__process_sample(event, md->counter);
+ event__process_sample(event, self, md->counter);
else
- event__process(event);
+ event__process(event, self);
old += size;
}
@@ -1064,13 +1066,13 @@ static void mmap_read_counter(struct mmap_data *md)
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
-static void mmap_read(void)
+static void perf_session__mmap_read(struct perf_session *self)
{
int i, counter;
for (i = 0; i < nr_cpus; i++) {
for (counter = 0; counter < nr_counters; counter++)
- mmap_read_counter(&mmap_array[i][counter]);
+ perf_session__mmap_read_counter(self, &mmap_array[i][counter]);
}
}
@@ -1155,11 +1157,18 @@ static int __cmd_top(void)
pthread_t thread;
int i, counter;
int ret;
+ /*
+ * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
+ * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
+ */
+ struct perf_session *session = perf_session__new(NULL, O_WRONLY, false);
+ if (session == NULL)
+ return -ENOMEM;
if (target_pid != -1)
- event__synthesize_thread(target_pid, event__process);
+ event__synthesize_thread(target_pid, event__process, session);
else
- event__synthesize_threads(event__process);
+ event__synthesize_threads(event__process, session);
for (i = 0; i < nr_cpus; i++) {
group_fd = -1;
@@ -1170,7 +1179,7 @@ static int __cmd_top(void)
/* Wait for a minimal set of events before starting the snapshot */
poll(event_array, nr_poll, 100);
- mmap_read();
+ perf_session__mmap_read(session);
if (pthread_create(&thread, NULL, display_thread, NULL)) {
printf("Could not create display thread.\n");
@@ -1190,7 +1199,7 @@ static int __cmd_top(void)
while (1) {
int hits = samples;
- mmap_read();
+ perf_session__mmap_read(session);
if (hits == samples)
ret = poll(event_array, nr_poll, 100);
@@ -1273,7 +1282,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
(nr_counters + 1) * sizeof(unsigned long));
if (symbol_conf.vmlinux_name == NULL)
symbol_conf.try_vmlinux_path = true;
- if (symbol__init(&symbol_conf) < 0)
+ if (symbol__init() < 0)
return -1;
if (delay_secs < 1)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index abb914aa7be..e2285e28720 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -7,11 +7,14 @@
#include "util/header.h"
#include "util/exec_cmd.h"
#include "util/trace-event.h"
+#include "util/session.h"
static char const *script_name;
static char const *generate_script_lang;
-static int default_start_script(const char *script __attribute((unused)))
+static int default_start_script(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
{
return 0;
}
@@ -21,7 +24,7 @@ static int default_stop_script(void)
return 0;
}
-static int default_generate_script(const char *outfile __attribute ((unused)))
+static int default_generate_script(const char *outfile __unused)
{
return 0;
}
@@ -56,77 +59,53 @@ static int cleanup_scripting(void)
#include "util/debug.h"
#include "util/trace-event.h"
-#include "util/data_map.h"
#include "util/exec_cmd.h"
static char const *input_name = "perf.data";
-static struct perf_header *header;
-static u64 sample_type;
-
-static int process_sample_event(event_t *event)
+static int process_sample_event(event_t *event, struct perf_session *session)
{
- u64 ip = event->ip.ip;
- u64 timestamp = -1;
- u32 cpu = -1;
- u64 period = 1;
- void *more_data = event->ip.__more_data;
- struct thread *thread = threads__findnew(event->ip.pid);
-
- if (sample_type & PERF_SAMPLE_TIME) {
- timestamp = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ struct sample_data data;
+ struct thread *thread;
- if (sample_type & PERF_SAMPLE_CPU) {
- cpu = *(u32 *)more_data;
- more_data += sizeof(u32);
- more_data += sizeof(u32); /* reserved */
- }
+ memset(&data, 0, sizeof(data));
+ data.time = -1;
+ data.cpu = -1;
+ data.period = 1;
- if (sample_type & PERF_SAMPLE_PERIOD) {
- period = *(u64 *)more_data;
- more_data += sizeof(u64);
- }
+ event__parse_sample(event, session->sample_type, &data);
dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
event->header.misc,
- event->ip.pid, event->ip.tid,
- (void *)(long)ip,
- (long long)period);
+ data.pid, data.tid,
+ (void *)(long)data.ip,
+ (long long)data.period);
+ thread = perf_session__findnew(session, event->ip.pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
-
- if (sample_type & PERF_SAMPLE_RAW) {
- struct {
- u32 size;
- char data[0];
- } *raw = more_data;
-
+ if (session->sample_type & PERF_SAMPLE_RAW) {
/*
* FIXME: better resolve from pid from the struct trace_entry
* field, although it should be the same than this perf
* event pid
*/
- scripting_ops->process_event(cpu, raw->data, raw->size,
- timestamp, thread->comm);
+ scripting_ops->process_event(data.cpu, data.raw_data,
+ data.raw_size,
+ data.time, thread->comm);
}
- event__stats.total += period;
+ session->events_stats.total += data.period;
return 0;
}
-static int sample_type_check(u64 type)
+static int sample_type_check(struct perf_session *session)
{
- sample_type = type;
-
- if (!(sample_type & PERF_SAMPLE_RAW)) {
+ if (!(session->sample_type & PERF_SAMPLE_RAW)) {
fprintf(stderr,
"No trace sample to read. Did you call perf record "
"without -R?");
@@ -136,19 +115,15 @@ static int sample_type_check(u64 type)
return 0;
}
-static struct perf_file_handler file_handler = {
+static struct perf_event_ops event_ops = {
.process_sample_event = process_sample_event,
.process_comm_event = event__process_comm,
.sample_type_check = sample_type_check,
};
-static int __cmd_trace(void)
+static int __cmd_trace(struct perf_session *session)
{
- register_idle_thread();
- register_perf_file_handler(&file_handler);
-
- return mmap_dispatch_perf_file(&header, input_name,
- 0, 0, &event__cwdlen, &event__cwd);
+ return perf_session__process_events(session, &event_ops);
}
struct script_spec {
@@ -299,6 +274,244 @@ static int parse_scriptname(const struct option *opt __used,
return 0;
}
+#define for_each_lang(scripts_dir, lang_dirent, lang_next) \
+ while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \
+ lang_next) \
+ if (lang_dirent.d_type == DT_DIR && \
+ (strcmp(lang_dirent.d_name, ".")) && \
+ (strcmp(lang_dirent.d_name, "..")))
+
+#define for_each_script(lang_dir, script_dirent, script_next) \
+ while (!readdir_r(lang_dir, &script_dirent, &script_next) && \
+ script_next) \
+ if (script_dirent.d_type != DT_DIR)
+
+
+#define RECORD_SUFFIX "-record"
+#define REPORT_SUFFIX "-report"
+
+struct script_desc {
+ struct list_head node;
+ char *name;
+ char *half_liner;
+ char *args;
+};
+
+LIST_HEAD(script_descs);
+
+static struct script_desc *script_desc__new(const char *name)
+{
+ struct script_desc *s = zalloc(sizeof(*s));
+
+ if (s != NULL)
+ s->name = strdup(name);
+
+ return s;
+}
+
+static void script_desc__delete(struct script_desc *s)
+{
+ free(s->name);
+ free(s);
+}
+
+static void script_desc__add(struct script_desc *s)
+{
+ list_add_tail(&s->node, &script_descs);
+}
+
+static struct script_desc *script_desc__find(const char *name)
+{
+ struct script_desc *s;
+
+ list_for_each_entry(s, &script_descs, node)
+ if (strcasecmp(s->name, name) == 0)
+ return s;
+ return NULL;
+}
+
+static struct script_desc *script_desc__findnew(const char *name)
+{
+ struct script_desc *s = script_desc__find(name);
+
+ if (s)
+ return s;
+
+ s = script_desc__new(name);
+ if (!s)
+ goto out_delete_desc;
+
+ script_desc__add(s);
+
+ return s;
+
+out_delete_desc:
+ script_desc__delete(s);
+
+ return NULL;
+}
+
+static char *ends_with(char *str, const char *suffix)
+{
+ size_t suffix_len = strlen(suffix);
+ char *p = str;
+
+ if (strlen(str) > suffix_len) {
+ p = str + strlen(str) - suffix_len;
+ if (!strncmp(p, suffix, suffix_len))
+ return p;
+ }
+
+ return NULL;
+}
+
+static char *ltrim(char *str)
+{
+ int len = strlen(str);
+
+ while (len && isspace(*str)) {
+ len--;
+ str++;
+ }
+
+ return str;
+}
+
+static int read_script_info(struct script_desc *desc, const char *filename)
+{
+ char line[BUFSIZ], *p;
+ FILE *fp;
+
+ fp = fopen(filename, "r");
+ if (!fp)
+ return -1;
+
+ while (fgets(line, sizeof(line), fp)) {
+ p = ltrim(line);
+ if (strlen(p) == 0)
+ continue;
+ if (*p != '#')
+ continue;
+ p++;
+ if (strlen(p) && *p == '!')
+ continue;
+
+ p = ltrim(p);
+ if (strlen(p) && p[strlen(p) - 1] == '\n')
+ p[strlen(p) - 1] = '\0';
+
+ if (!strncmp(p, "description:", strlen("description:"))) {
+ p += strlen("description:");
+ desc->half_liner = strdup(ltrim(p));
+ continue;
+ }
+
+ if (!strncmp(p, "args:", strlen("args:"))) {
+ p += strlen("args:");
+ desc->args = strdup(ltrim(p));
+ continue;
+ }
+ }
+
+ fclose(fp);
+
+ return 0;
+}
+
+static int list_available_scripts(const struct option *opt __used,
+ const char *s __used, int unset __used)
+{
+ struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ char scripts_path[MAXPATHLEN];
+ DIR *scripts_dir, *lang_dir;
+ char script_path[MAXPATHLEN];
+ char lang_path[MAXPATHLEN];
+ struct script_desc *desc;
+ char first_half[BUFSIZ];
+ char *script_root;
+ char *str;
+
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+
+ scripts_dir = opendir(scripts_path);
+ if (!scripts_dir)
+ return -1;
+
+ for_each_lang(scripts_dir, lang_dirent, lang_next) {
+ snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+ lang_dirent.d_name);
+ lang_dir = opendir(lang_path);
+ if (!lang_dir)
+ continue;
+
+ for_each_script(lang_dir, script_dirent, script_next) {
+ script_root = strdup(script_dirent.d_name);
+ str = ends_with(script_root, REPORT_SUFFIX);
+ if (str) {
+ *str = '\0';
+ desc = script_desc__findnew(script_root);
+ snprintf(script_path, MAXPATHLEN, "%s/%s",
+ lang_path, script_dirent.d_name);
+ read_script_info(desc, script_path);
+ }
+ free(script_root);
+ }
+ }
+
+ fprintf(stdout, "List of available trace scripts:\n");
+ list_for_each_entry(desc, &script_descs, node) {
+ sprintf(first_half, "%s %s", desc->name,
+ desc->args ? desc->args : "");
+ fprintf(stdout, " %-36s %s\n", first_half,
+ desc->half_liner ? desc->half_liner : "");
+ }
+
+ exit(0);
+}
+
+static char *get_script_path(const char *script_root, const char *suffix)
+{
+ struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ char scripts_path[MAXPATHLEN];
+ char script_path[MAXPATHLEN];
+ DIR *scripts_dir, *lang_dir;
+ char lang_path[MAXPATHLEN];
+ char *str, *__script_root;
+ char *path = NULL;
+
+ snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path());
+
+ scripts_dir = opendir(scripts_path);
+ if (!scripts_dir)
+ return NULL;
+
+ for_each_lang(scripts_dir, lang_dirent, lang_next) {
+ snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+ lang_dirent.d_name);
+ lang_dir = opendir(lang_path);
+ if (!lang_dir)
+ continue;
+
+ for_each_script(lang_dir, script_dirent, script_next) {
+ __script_root = strdup(script_dirent.d_name);
+ str = ends_with(__script_root, suffix);
+ if (str) {
+ *str = '\0';
+ if (strcmp(__script_root, script_root))
+ continue;
+ snprintf(script_path, MAXPATHLEN, "%s/%s",
+ lang_path, script_dirent.d_name);
+ path = strdup(script_path);
+ free(__script_root);
+ break;
+ }
+ free(__script_root);
+ }
+ }
+
+ return path;
+}
+
static const char * const annotate_usage[] = {
"perf trace [<options>] <command>",
NULL
@@ -309,8 +522,10 @@ static const struct option options[] = {
"dump raw trace in ASCII"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('l', "latency", &latency_format,
+ OPT_BOOLEAN('L', "Latency", &latency_format,
"show latency attributes (irqs/preemption disabled, etc)"),
+ OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts",
+ list_available_scripts),
OPT_CALLBACK('s', "script", NULL, "name",
"script file name (lang:script name, script name, or *)",
parse_scriptname),
@@ -322,24 +537,61 @@ static const struct option options[] = {
int cmd_trace(int argc, const char **argv, const char *prefix __used)
{
- int err;
+ struct perf_session *session;
+ const char *suffix = NULL;
+ const char **__argv;
+ char *script_path;
+ int i, err;
+
+ if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) {
+ if (argc < 3) {
+ fprintf(stderr,
+ "Please specify a record script\n");
+ return -1;
+ }
+ suffix = RECORD_SUFFIX;
+ }
+
+ if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) {
+ if (argc < 3) {
+ fprintf(stderr,
+ "Please specify a report script\n");
+ return -1;
+ }
+ suffix = REPORT_SUFFIX;
+ }
- symbol__init(0);
+ if (suffix) {
+ script_path = get_script_path(argv[2], suffix);
+ if (!script_path) {
+ fprintf(stderr, "script not found\n");
+ return -1;
+ }
- setup_scripting();
+ __argv = malloc((argc + 1) * sizeof(const char *));
+ __argv[0] = "/bin/sh";
+ __argv[1] = script_path;
+ for (i = 3; i < argc; i++)
+ __argv[i - 1] = argv[i];
+ __argv[argc - 1] = NULL;
- argc = parse_options(argc, argv, options, annotate_usage, 0);
- if (argc) {
- /*
- * Special case: if there's an argument left then assume tha
- * it's a symbol filter:
- */
- if (argc > 1)
- usage_with_options(annotate_usage, options);
+ execvp("/bin/sh", (char **)__argv);
+ exit(-1);
}
+ setup_scripting();
+
+ argc = parse_options(argc, argv, options, annotate_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (symbol__init() < 0)
+ return -1;
setup_pager();
+ session = perf_session__new(input_name, O_RDONLY, 0);
+ if (session == NULL)
+ return -ENOMEM;
+
if (generate_script_lang) {
struct stat perf_stat;
@@ -366,23 +618,20 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
return -1;
}
- header = perf_header__new();
- if (header == NULL)
- return -1;
-
- perf_header__read(header, input);
+ perf_header__read(&session->header, input);
err = scripting_ops->generate_script("perf-trace");
goto out;
}
if (script_name) {
- err = scripting_ops->start_script(script_name);
+ err = scripting_ops->start_script(script_name, argc, argv);
if (err)
goto out;
}
- err = __cmd_trace();
+ err = __cmd_trace(session);
+ perf_session__delete(session);
cleanup_scripting();
out:
return err;
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index a3d8bf65f26..18035b1f16c 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -17,6 +17,7 @@ extern int check_pager_config(const char *cmd);
extern int cmd_annotate(int argc, const char **argv, const char *prefix);
extern int cmd_bench(int argc, const char **argv, const char *prefix);
extern int cmd_buildid_list(int argc, const char **argv, const char *prefix);
+extern int cmd_diff(int argc, const char **argv, const char *prefix);
extern int cmd_help(int argc, const char **argv, const char *prefix);
extern int cmd_sched(int argc, const char **argv, const char *prefix);
extern int cmd_list(int argc, const char **argv, const char *prefix);
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index 02b09ea17a3..71dc7c3fe7b 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -5,6 +5,7 @@
perf-annotate mainporcelain common
perf-bench mainporcelain common
perf-buildid-list mainporcelain common
+perf-diff mainporcelain common
perf-list mainporcelain common
perf-sched mainporcelain common
perf-record mainporcelain common
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index cf64049bc9b..873e55fab37 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -286,6 +286,7 @@ static void handle_internal_command(int argc, const char **argv)
const char *cmd = argv[0];
static struct cmd_struct commands[] = {
{ "buildid-list", cmd_buildid_list, 0 },
+ { "diff", cmd_diff, 0 },
{ "help", cmd_help, 0 },
{ "list", cmd_list, 0 },
{ "record", cmd_record, 0 },
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 454d5d55f32..75f941bfba9 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -59,6 +59,18 @@
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
#endif
+#ifdef __arm__
+#include "../../arch/arm/include/asm/unistd.h"
+/*
+ * Use the __kuser_memory_barrier helper in the CPU helper page. See
+ * arch/arm/kernel/entry-armv.S in the kernel source for details.
+ */
+#define rmb() asm volatile("mov r0, #0xffff0fff; mov lr, pc;" \
+ "sub pc, r0, #95" ::: "r0", "lr", "cc", \
+ "memory")
+#define cpu_relax() asm volatile("":::"memory")
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
diff --git a/tools/perf/scripts/perl/bin/check-perf-trace-report b/tools/perf/scripts/perl/bin/check-perf-trace-report
index 89948b01502..7fc4a033dd4 100644
--- a/tools/perf/scripts/perl/bin/check-perf-trace-report
+++ b/tools/perf/scripts/perl/bin/check-perf-trace-report
@@ -1,4 +1,5 @@
#!/bin/bash
+# description: useless but exhaustive test script
perf trace -s ~/libexec/perf-core/scripts/perl/check-perf-trace.pl
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-report b/tools/perf/scripts/perl/bin/rw-by-file-report
index f5dcf9cb5bd..eddb9ccce6a 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-report
+++ b/tools/perf/scripts/perl/bin/rw-by-file-report
@@ -1,5 +1,7 @@
#!/bin/bash
-perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl
+# description: r/w activity for a program, by file
+# args: <comm>
+perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-file.pl $1
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-report b/tools/perf/scripts/perl/bin/rw-by-pid-report
index cea16f78a3a..7f44c25cc85 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-report
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-report
@@ -1,4 +1,5 @@
#!/bin/bash
+# description: system-wide r/w activity
perf trace -s ~/libexec/perf-core/scripts/perl/rw-by-pid.pl
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-report b/tools/perf/scripts/perl/bin/wakeup-latency-report
index 85769dc456e..fce3adcb324 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-report
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-report
@@ -1,4 +1,5 @@
#!/bin/bash
+# description: system-wide min/max/avg wakeup latency
perf trace -s ~/libexec/perf-core/scripts/perl/wakeup-latency.pl
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report
index aa68435be92..71cfbd182fb 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-report
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-report
@@ -1,4 +1,5 @@
#!/bin/bash
+# description: workqueue stats (ins/exe/create/destroy)
perf trace -s ~/libexec/perf-core/scripts/perl/workqueue-stats.pl
diff --git a/tools/perf/scripts/perl/rw-by-file.pl b/tools/perf/scripts/perl/rw-by-file.pl
index 61f91561d84..2a39097687b 100644
--- a/tools/perf/scripts/perl/rw-by-file.pl
+++ b/tools/perf/scripts/perl/rw-by-file.pl
@@ -18,8 +18,9 @@ use lib "./Perf-Trace-Util/lib";
use Perf::Trace::Core;
use Perf::Trace::Util;
-# change this to the comm of the program you're interested in
-my $for_comm = "perf";
+my $usage = "perf trace -s rw-by-file.pl <comm>\n";
+
+my $for_comm = shift or die $usage;
my %reads;
my %writes;
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c
index ca0bedf637c..b557b836de3 100644
--- a/tools/perf/util/data_map.c
+++ b/tools/perf/util/data_map.c
@@ -1,20 +1,17 @@
-#include "data_map.h"
#include "symbol.h"
#include "util.h"
#include "debug.h"
+#include "thread.h"
+#include "session.h"
-
-static struct perf_file_handler *curr_handler;
-static unsigned long mmap_window = 32;
-static char __cwd[PATH_MAX];
-
-static int process_event_stub(event_t *event __used)
+static int process_event_stub(event_t *event __used,
+ struct perf_session *session __used)
{
dump_printf(": unhandled!\n");
return 0;
}
-void register_perf_file_handler(struct perf_file_handler *handler)
+static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
{
if (!handler->process_sample_event)
handler->process_sample_event = process_event_stub;
@@ -34,8 +31,6 @@ void register_perf_file_handler(struct perf_file_handler *handler)
handler->process_throttle_event = process_event_stub;
if (!handler->process_unthrottle_event)
handler->process_unthrottle_event = process_event_stub;
-
- curr_handler = handler;
}
static const char *event__name[] = {
@@ -61,8 +56,9 @@ void event__print_totals(void)
event__name[i], event__total[i]);
}
-static int
-process_event(event_t *event, unsigned long offset, unsigned long head)
+static int process_event(event_t *event, struct perf_session *session,
+ struct perf_event_ops *ops,
+ unsigned long offset, unsigned long head)
{
trace_event(event);
@@ -77,34 +73,34 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
- return curr_handler->process_sample_event(event);
+ return ops->process_sample_event(event, session);
case PERF_RECORD_MMAP:
- return curr_handler->process_mmap_event(event);
+ return ops->process_mmap_event(event, session);
case PERF_RECORD_COMM:
- return curr_handler->process_comm_event(event);
+ return ops->process_comm_event(event, session);
case PERF_RECORD_FORK:
- return curr_handler->process_fork_event(event);
+ return ops->process_fork_event(event, session);
case PERF_RECORD_EXIT:
- return curr_handler->process_exit_event(event);
+ return ops->process_exit_event(event, session);
case PERF_RECORD_LOST:
- return curr_handler->process_lost_event(event);
+ return ops->process_lost_event(event, session);
case PERF_RECORD_READ:
- return curr_handler->process_read_event(event);
+ return ops->process_read_event(event, session);
case PERF_RECORD_THROTTLE:
- return curr_handler->process_throttle_event(event);
+ return ops->process_throttle_event(event, session);
case PERF_RECORD_UNTHROTTLE:
- return curr_handler->process_unthrottle_event(event);
+ return ops->process_unthrottle_event(event, session);
default:
- curr_handler->total_unknown++;
+ ops->total_unknown++;
return -1;
}
}
-int perf_header__read_build_ids(int input, off_t offset, off_t size)
+int perf_header__read_build_ids(int input, u64 offset, u64 size)
{
struct build_id_event bev;
char filename[PATH_MAX];
- off_t limit = offset + size;
+ u64 limit = offset + size;
int err = -1;
while (offset < limit) {
@@ -129,88 +125,58 @@ out:
return err;
}
-int mmap_dispatch_perf_file(struct perf_header **pheader,
- const char *input_name,
- int force,
- int full_paths,
- int *cwdlen,
- char **cwd)
+static struct thread *perf_session__register_idle_thread(struct perf_session *self)
+{
+ struct thread *thread = perf_session__findnew(self, 0);
+
+ if (!thread || thread__set_comm(thread, "swapper")) {
+ pr_err("problem inserting idle task.\n");
+ thread = NULL;
+ }
+
+ return thread;
+}
+
+int perf_session__process_events(struct perf_session *self,
+ struct perf_event_ops *ops)
{
int err;
- struct perf_header *header;
unsigned long head, shift;
unsigned long offset = 0;
- struct stat input_stat;
size_t page_size;
- u64 sample_type;
event_t *event;
uint32_t size;
- int input;
char *buf;
- if (curr_handler == NULL) {
- pr_debug("Forgot to register perf file handler\n");
- return -EINVAL;
- }
-
- page_size = getpagesize();
-
- input = open(input_name, O_RDONLY);
- if (input < 0) {
- pr_err("Failed to open file: %s", input_name);
- if (!strcmp(input_name, "perf.data"))
- pr_err(" (try 'perf record' first)");
- pr_err("\n");
- return -errno;
- }
-
- if (fstat(input, &input_stat) < 0) {
- pr_err("failed to stat file");
- err = -errno;
- goto out_close;
- }
-
- err = -EACCES;
- if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
- pr_err("file: %s not owned by current user or root\n",
- input_name);
- goto out_close;
- }
-
- if (input_stat.st_size == 0) {
- pr_info("zero-sized file, nothing to do!\n");
- goto done;
- }
+ if (perf_session__register_idle_thread(self) == NULL)
+ return -ENOMEM;
- err = -ENOMEM;
- header = perf_header__new();
- if (header == NULL)
- goto out_close;
+ perf_event_ops__fill_defaults(ops);
- err = perf_header__read(header, input);
- if (err < 0)
- goto out_delete;
- *pheader = header;
- head = header->data_offset;
+ page_size = getpagesize();
- sample_type = perf_header__sample_type(header);
+ head = self->header.data_offset;
+ self->sample_type = perf_header__sample_type(&self->header);
err = -EINVAL;
- if (curr_handler->sample_type_check &&
- curr_handler->sample_type_check(sample_type) < 0)
- goto out_delete;
+ if (ops->sample_type_check && ops->sample_type_check(self) < 0)
+ goto out_err;
- if (!full_paths) {
- if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
- pr_err("failed to get the current directory\n");
+ if (!ops->full_paths) {
+ char bf[PATH_MAX];
+
+ if (getcwd(bf, sizeof(bf)) == NULL) {
err = -errno;
- goto out_delete;
+out_getcwd_err:
+ pr_err("failed to get the current directory\n");
+ goto out_err;
+ }
+ self->cwd = strdup(bf);
+ if (self->cwd == NULL) {
+ err = -ENOMEM;
+ goto out_getcwd_err;
}
- *cwd = __cwd;
- *cwdlen = strlen(*cwd);
- } else {
- *cwd = NULL;
- *cwdlen = 0;
+ self->cwdlen = strlen(self->cwd);
}
shift = page_size * (head / page_size);
@@ -218,12 +184,12 @@ int mmap_dispatch_perf_file(struct perf_header **pheader,
head -= shift;
remap:
- buf = mmap(NULL, page_size * mmap_window, PROT_READ,
- MAP_SHARED, input, offset);
+ buf = mmap(NULL, page_size * self->mmap_window, PROT_READ,
+ MAP_SHARED, self->fd, offset);
if (buf == MAP_FAILED) {
pr_err("failed to mmap file\n");
err = -errno;
- goto out_delete;
+ goto out_err;
}
more:
@@ -233,12 +199,12 @@ more:
if (!size)
size = 8;
- if (head + event->header.size >= page_size * mmap_window) {
+ if (head + event->header.size >= page_size * self->mmap_window) {
int munmap_ret;
shift = page_size * (head / page_size);
- munmap_ret = munmap(buf, page_size * mmap_window);
+ munmap_ret = munmap(buf, page_size * self->mmap_window);
assert(munmap_ret == 0);
offset += shift;
@@ -253,7 +219,7 @@ more:
(void *)(long)event->header.size,
event->header.type);
- if (!size || process_event(event, offset, head) < 0) {
+ if (!size || process_event(event, self, ops, offset, head) < 0) {
dump_printf("%p [%p]: skipping unknown header type: %d\n",
(void *)(offset + head),
@@ -273,19 +239,14 @@ more:
head += size;
- if (offset + head >= header->data_offset + header->data_size)
+ if (offset + head >= self->header.data_offset + self->header.data_size)
goto done;
- if (offset + head < (unsigned long)input_stat.st_size)
+ if (offset + head < self->size)
goto more;
done:
err = 0;
-out_close:
- close(input);
-
+out_err:
return err;
-out_delete:
- perf_header__delete(header);
- goto out_close;
}
diff --git a/tools/perf/util/data_map.h b/tools/perf/util/data_map.h
deleted file mode 100644
index 3180ff7e363..00000000000
--- a/tools/perf/util/data_map.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __PERF_DATAMAP_H
-#define __PERF_DATAMAP_H
-
-#include "event.h"
-#include "header.h"
-
-typedef int (*event_type_handler_t)(event_t *);
-
-struct perf_file_handler {
- event_type_handler_t process_sample_event;
- event_type_handler_t process_mmap_event;
- event_type_handler_t process_comm_event;
- event_type_handler_t process_fork_event;
- event_type_handler_t process_exit_event;
- event_type_handler_t process_lost_event;
- event_type_handler_t process_read_event;
- event_type_handler_t process_throttle_event;
- event_type_handler_t process_unthrottle_event;
- int (*sample_type_check)(u64 sample_type);
- unsigned long total_unknown;
-};
-
-void register_perf_file_handler(struct perf_file_handler *handler);
-int mmap_dispatch_perf_file(struct perf_header **pheader,
- const char *input_name,
- int force,
- int full_paths,
- int *cwdlen,
- char **cwd);
-int perf_header__read_build_ids(int input, off_t offset, off_t file_size);
-
-#endif
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 414b89d1bde..bb0fd6da2d5 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1,11 +1,16 @@
#include <linux/types.h>
#include "event.h"
#include "debug.h"
+#include "session.h"
+#include "sort.h"
#include "string.h"
+#include "strlist.h"
#include "thread.h"
static pid_t event__synthesize_comm(pid_t pid, int full,
- int (*process)(event_t *event))
+ int (*process)(event_t *event,
+ struct perf_session *session),
+ struct perf_session *session)
{
event_t ev;
char filename[PATH_MAX];
@@ -54,7 +59,7 @@ out_race:
if (!full) {
ev.comm.tid = pid;
- process(&ev);
+ process(&ev, session);
goto out_fclose;
}
@@ -72,7 +77,7 @@ out_race:
ev.comm.tid = pid;
- process(&ev);
+ process(&ev, session);
}
closedir(tasks);
@@ -86,7 +91,9 @@ out_failure:
}
static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
- int (*process)(event_t *event))
+ int (*process)(event_t *event,
+ struct perf_session *session),
+ struct perf_session *session)
{
char filename[PATH_MAX];
FILE *fp;
@@ -141,7 +148,7 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
ev.mmap.pid = tgid;
ev.mmap.tid = pid;
- process(&ev);
+ process(&ev, session);
}
}
@@ -149,15 +156,20 @@ static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
return 0;
}
-int event__synthesize_thread(pid_t pid, int (*process)(event_t *event))
+int event__synthesize_thread(pid_t pid,
+ int (*process)(event_t *event,
+ struct perf_session *session),
+ struct perf_session *session)
{
- pid_t tgid = event__synthesize_comm(pid, 1, process);
+ pid_t tgid = event__synthesize_comm(pid, 1, process, session);
if (tgid == -1)
return -1;
- return event__synthesize_mmap_events(pid, tgid, process);
+ return event__synthesize_mmap_events(pid, tgid, process, session);
}
-void event__synthesize_threads(int (*process)(event_t *event))
+void event__synthesize_threads(int (*process)(event_t *event,
+ struct perf_session *session),
+ struct perf_session *session)
{
DIR *proc;
struct dirent dirent, *next;
@@ -171,24 +183,47 @@ void event__synthesize_threads(int (*process)(event_t *event))
if (*end) /* only interested in proper numerical dirents */
continue;
- event__synthesize_thread(pid, process);
+ event__synthesize_thread(pid, process, session);
}
closedir(proc);
}
-char *event__cwd;
-int event__cwdlen;
+static void thread__comm_adjust(struct thread *self)
+{
+ char *comm = self->comm;
+
+ if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ (!symbol_conf.comm_list ||
+ strlist__has_entry(symbol_conf.comm_list, comm))) {
+ unsigned int slen = strlen(comm);
+
+ if (slen > comms__col_width) {
+ comms__col_width = slen;
+ threads__col_width = slen + 6;
+ }
+ }
+}
+
+static int thread__set_comm_adjust(struct thread *self, const char *comm)
+{
+ int ret = thread__set_comm(self, comm);
+
+ if (ret)
+ return ret;
-struct events_stats event__stats;
+ thread__comm_adjust(self);
-int event__process_comm(event_t *self)
+ return 0;
+}
+
+int event__process_comm(event_t *self, struct perf_session *session)
{
- struct thread *thread = threads__findnew(self->comm.pid);
+ struct thread *thread = perf_session__findnew(session, self->comm.pid);
dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid);
- if (thread == NULL || thread__set_comm(thread, self->comm.comm)) {
+ if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
}
@@ -196,18 +231,18 @@ int event__process_comm(event_t *self)
return 0;
}
-int event__process_lost(event_t *self)
+int event__process_lost(event_t *self, struct perf_session *session)
{
dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
- event__stats.lost += self->lost.lost;
+ session->events_stats.lost += self->lost.lost;
return 0;
}
-int event__process_mmap(event_t *self)
+int event__process_mmap(event_t *self, struct perf_session *session)
{
- struct thread *thread = threads__findnew(self->mmap.pid);
+ struct thread *thread = perf_session__findnew(session, self->mmap.pid);
struct map *map = map__new(&self->mmap, MAP__FUNCTION,
- event__cwd, event__cwdlen);
+ session->cwd, session->cwdlen);
dump_printf(" %d/%d: [%p(%p) @ %p]: %s\n",
self->mmap.pid, self->mmap.tid,
@@ -224,10 +259,10 @@ int event__process_mmap(event_t *self)
return 0;
}
-int event__process_task(event_t *self)
+int event__process_task(event_t *self, struct perf_session *session)
{
- struct thread *thread = threads__findnew(self->fork.pid);
- struct thread *parent = threads__findnew(self->fork.ppid);
+ struct thread *thread = perf_session__findnew(session, self->fork.pid);
+ struct thread *parent = perf_session__findnew(session, self->fork.ppid);
dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
self->fork.ppid, self->fork.ptid);
@@ -249,18 +284,20 @@ int event__process_task(event_t *self)
return 0;
}
-void thread__find_addr_location(struct thread *self, u8 cpumode,
+void thread__find_addr_location(struct thread *self,
+ struct perf_session *session, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
{
- struct thread *thread = al->thread = self;
+ struct map_groups *mg = &self->mg;
+ al->thread = self;
al->addr = addr;
if (cpumode & PERF_RECORD_MISC_KERNEL) {
al->level = 'k';
- thread = kthread;
+ mg = &session->kmaps;
} else if (cpumode & PERF_RECORD_MISC_USER)
al->level = '.';
else {
@@ -270,7 +307,7 @@ void thread__find_addr_location(struct thread *self, u8 cpumode,
return;
}
try_again:
- al->map = thread__find_map(thread, type, al->addr);
+ al->map = map_groups__find(mg, type, al->addr);
if (al->map == NULL) {
/*
* If this is outside of all known maps, and is a negative
@@ -281,32 +318,139 @@ try_again:
* "[vdso]" dso, but for now lets use the old trick of looking
* in the whole kernel symbol list.
*/
- if ((long long)al->addr < 0 && thread != kthread) {
- thread = kthread;
+ if ((long long)al->addr < 0 && mg != &session->kmaps) {
+ mg = &session->kmaps;
goto try_again;
}
al->sym = NULL;
} else {
al->addr = al->map->map_ip(al->map, al->addr);
- al->sym = map__find_symbol(al->map, al->addr, filter);
+ al->sym = map__find_symbol(al->map, session, al->addr, filter);
}
}
-int event__preprocess_sample(const event_t *self, struct addr_location *al,
- symbol_filter_t filter)
+static void dso__calc_col_width(struct dso *self)
+{
+ if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
+ (!symbol_conf.dso_list ||
+ strlist__has_entry(symbol_conf.dso_list, self->name))) {
+ unsigned int slen = strlen(self->name);
+ if (slen > dsos__col_width)
+ dsos__col_width = slen;
+ }
+
+ self->slen_calculated = 1;
+}
+
+int event__preprocess_sample(const event_t *self, struct perf_session *session,
+ struct addr_location *al, symbol_filter_t filter)
{
u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = threads__findnew(self->ip.pid);
+ struct thread *thread = perf_session__findnew(session, self->ip.pid);
if (thread == NULL)
return -1;
+ if (symbol_conf.comm_list &&
+ !strlist__has_entry(symbol_conf.comm_list, thread->comm))
+ goto out_filtered;
+
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
+ thread__find_addr_location(thread, session, cpumode, MAP__FUNCTION,
self->ip.ip, al, filter);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
+ /*
+ * We have to do this here as we may have a dso with no symbol hit that
+ * has a name longer than the ones with symbols sampled.
+ */
+ if (al->map && !sort_dso.elide && !al->map->dso->slen_calculated)
+ dso__calc_col_width(al->map->dso);
+
+ if (symbol_conf.dso_list &&
+ (!al->map || !al->map->dso ||
+ !(strlist__has_entry(symbol_conf.dso_list, al->map->dso->short_name) ||
+ (al->map->dso->short_name != al->map->dso->long_name &&
+ strlist__has_entry(symbol_conf.dso_list, al->map->dso->long_name)))))
+ goto out_filtered;
+
+ if (symbol_conf.sym_list && al->sym &&
+ !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
+ goto out_filtered;
+
+ al->filtered = false;
+ return 0;
+
+out_filtered:
+ al->filtered = true;
+ return 0;
+}
+
+int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
+{
+ u64 *array = event->sample.array;
+
+ if (type & PERF_SAMPLE_IP) {
+ data->ip = event->ip.ip;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TID) {
+ u32 *p = (u32 *)array;
+ data->pid = p[0];
+ data->tid = p[1];
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_TIME) {
+ data->time = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ADDR) {
+ data->addr = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_ID) {
+ data->id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_STREAM_ID) {
+ data->stream_id = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_CPU) {
+ u32 *p = (u32 *)array;
+ data->cpu = *p;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_PERIOD) {
+ data->period = *array;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_READ) {
+ pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
+ return -1;
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ data->callchain = (struct ip_callchain *)array;
+ array += 1 + data->callchain->nr;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u32 *p = (u32 *)array;
+ data->raw_size = *p;
+ p++;
+ data->raw_data = p;
+ }
+
return 0;
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index a4cc8105cf6..690a96d0467 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -56,11 +56,25 @@ struct read_event {
u64 id;
};
-struct sample_event{
+struct sample_event {
struct perf_event_header header;
u64 array[];
};
+struct sample_data {
+ u64 ip;
+ u32 pid, tid;
+ u64 time;
+ u64 addr;
+ u64 id;
+ u64 stream_id;
+ u32 cpu;
+ u64 period;
+ struct ip_callchain *callchain;
+ u32 raw_size;
+ void *raw_data;
+};
+
#define BUILD_ID_SIZE 20
struct build_id_event {
@@ -81,18 +95,19 @@ typedef union event_union {
} event_t;
struct events_stats {
- unsigned long total;
- unsigned long lost;
+ u64 total;
+ u64 lost;
};
void event__print_totals(void);
enum map_type {
MAP__FUNCTION = 0,
-
- MAP__NR_TYPES,
+ MAP__VARIABLE,
};
+#define MAP__NR_TYPES (MAP__VARIABLE + 1)
+
struct map {
union {
struct rb_node rb_node;
@@ -134,26 +149,35 @@ void map__delete(struct map *self);
struct map *map__clone(struct map *self);
int map__overlap(struct map *l, struct map *r);
size_t map__fprintf(struct map *self, FILE *fp);
-struct symbol *map__find_symbol(struct map *self, u64 addr,
- symbol_filter_t filter);
+
+struct perf_session;
+
+int map__load(struct map *self, struct perf_session *session,
+ symbol_filter_t filter);
+struct symbol *map__find_symbol(struct map *self, struct perf_session *session,
+ u64 addr, symbol_filter_t filter);
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ struct perf_session *session,
+ symbol_filter_t filter);
void map__fixup_start(struct map *self);
void map__fixup_end(struct map *self);
-int event__synthesize_thread(pid_t pid, int (*process)(event_t *event));
-void event__synthesize_threads(int (*process)(event_t *event));
-
-extern char *event__cwd;
-extern int event__cwdlen;
-extern struct events_stats event__stats;
-extern unsigned long event__total[PERF_RECORD_MAX];
+int event__synthesize_thread(pid_t pid,
+ int (*process)(event_t *event,
+ struct perf_session *session),
+ struct perf_session *session);
+void event__synthesize_threads(int (*process)(event_t *event,
+ struct perf_session *session),
+ struct perf_session *session);
-int event__process_comm(event_t *self);
-int event__process_lost(event_t *self);
-int event__process_mmap(event_t *self);
-int event__process_task(event_t *self);
+int event__process_comm(event_t *self, struct perf_session *session);
+int event__process_lost(event_t *self, struct perf_session *session);
+int event__process_mmap(event_t *self, struct perf_session *session);
+int event__process_task(event_t *self, struct perf_session *session);
struct addr_location;
-int event__preprocess_sample(const event_t *self, struct addr_location *al,
- symbol_filter_t filter);
+int event__preprocess_sample(const event_t *self, struct perf_session *session,
+ struct addr_location *al, symbol_filter_t filter);
+int event__parse_sample(event_t *event, u64 type, struct sample_data *data);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4805e6dfd23..8a0bca55106 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -8,8 +8,8 @@
#include "header.h"
#include "../perf.h"
#include "trace-event.h"
+#include "session.h"
#include "symbol.h"
-#include "data_map.h"
#include "debug.h"
/*
@@ -58,35 +58,19 @@ int perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
return 0;
}
-/*
- * Create new perf.data header:
- */
-struct perf_header *perf_header__new(void)
+int perf_header__init(struct perf_header *self)
{
- struct perf_header *self = zalloc(sizeof(*self));
-
- if (self != NULL) {
- self->size = 1;
- self->attr = malloc(sizeof(void *));
-
- if (self->attr == NULL) {
- free(self);
- self = NULL;
- }
- }
-
- return self;
+ self->size = 1;
+ self->attr = malloc(sizeof(void *));
+ return self->attr == NULL ? -ENOMEM : 0;
}
-void perf_header__delete(struct perf_header *self)
+void perf_header__exit(struct perf_header *self)
{
int i;
-
for (i = 0; i < self->attrs; ++i)
- perf_header_attr__delete(self->attr[i]);
-
+ perf_header_attr__delete(self->attr[i]);
free(self->attr);
- free(self);
}
int perf_header__add_attr(struct perf_header *self,
@@ -187,7 +171,9 @@ static int do_write(int fd, const void *buf, size_t size)
static int __dsos__write_buildid_table(struct list_head *head, int fd)
{
+#define NAME_ALIGN 64
struct dso *pos;
+ static const char zero_buf[NAME_ALIGN];
list_for_each_entry(pos, head, node) {
int err;
@@ -197,14 +183,17 @@ static int __dsos__write_buildid_table(struct list_head *head, int fd)
if (!pos->has_build_id)
continue;
len = pos->long_name_len + 1;
- len = ALIGN(len, 64);
+ len = ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
b.header.size = sizeof(b) + len;
err = do_write(fd, &b, sizeof(b));
if (err < 0)
return err;
- err = do_write(fd, pos->long_name, len);
+ err = do_write(fd, pos->long_name, pos->long_name_len + 1);
+ if (err < 0)
+ return err;
+ err = do_write(fd, zero_buf, len - pos->long_name_len - 1);
if (err < 0)
return err;
}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index d1dbe2b79c4..d118d05d3ab 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -55,8 +55,8 @@ struct perf_header {
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
-struct perf_header *perf_header__new(void);
-void perf_header__delete(struct perf_header *self);
+int perf_header__init(struct perf_header *self);
+void perf_header__exit(struct perf_header *self);
int perf_header__read(struct perf_header *self, int fd);
int perf_header__write(struct perf_header *self, int fd, bool at_exit);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 0ebf6ee16ca..e8daf5ca6fd 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,9 +1,7 @@
#include "hist.h"
-
-struct rb_root hist;
-struct rb_root collapse_hists;
-struct rb_root output_hists;
-int callchain;
+#include "session.h"
+#include "sort.h"
+#include <math.h>
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_REL,
@@ -14,11 +12,12 @@ struct callchain_param callchain_param = {
* histogram, sorted on item, collects counts
*/
-struct hist_entry *__hist_entry__add(struct addr_location *al,
- struct symbol *sym_parent,
- u64 count, bool *hit)
+struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self,
+ struct addr_location *al,
+ struct symbol *sym_parent,
+ u64 count, bool *hit)
{
- struct rb_node **p = &hist.rb_node;
+ struct rb_node **p = &self->hists.rb_node;
struct rb_node *parent = NULL;
struct hist_entry *he;
struct hist_entry entry = {
@@ -54,7 +53,7 @@ struct hist_entry *__hist_entry__add(struct addr_location *al,
return NULL;
*he = entry;
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &hist);
+ rb_insert_color(&he->rb_node, &self->hists);
*hit = false;
return he;
}
@@ -102,9 +101,9 @@ void hist_entry__free(struct hist_entry *he)
* collapse the histogram
*/
-void collapse__insert_entry(struct hist_entry *he)
+static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
{
- struct rb_node **p = &collapse_hists.rb_node;
+ struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
@@ -128,38 +127,45 @@ void collapse__insert_entry(struct hist_entry *he)
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &collapse_hists);
+ rb_insert_color(&he->rb_node, root);
}
-void collapse__resort(void)
+void perf_session__collapse_resort(struct perf_session *self)
{
+ struct rb_root tmp;
struct rb_node *next;
struct hist_entry *n;
if (!sort__need_collapse)
return;
- next = rb_first(&hist);
+ tmp = RB_ROOT;
+ next = rb_first(&self->hists);
+
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
- rb_erase(&n->rb_node, &hist);
- collapse__insert_entry(n);
+ rb_erase(&n->rb_node, &self->hists);
+ collapse__insert_entry(&tmp, n);
}
+
+ self->hists = tmp;
}
/*
* reverse the map, sort on count.
*/
-void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
+static void perf_session__insert_output_hist_entry(struct rb_root *root,
+ struct hist_entry *he,
+ u64 min_callchain_hits)
{
- struct rb_node **p = &output_hists.rb_node;
+ struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
- if (callchain)
+ if (symbol_conf.use_callchain)
callchain_param.sort(&he->sorted_chain, &he->callchain,
min_callchain_hits, &callchain_param);
@@ -174,29 +180,483 @@ void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
}
rb_link_node(&he->rb_node, parent, p);
- rb_insert_color(&he->rb_node, &output_hists);
+ rb_insert_color(&he->rb_node, root);
}
-void output__resort(u64 total_samples)
+void perf_session__output_resort(struct perf_session *self, u64 total_samples)
{
+ struct rb_root tmp;
struct rb_node *next;
struct hist_entry *n;
- struct rb_root *tree = &hist;
u64 min_callchain_hits;
min_callchain_hits =
total_samples * (callchain_param.min_percent / 100);
- if (sort__need_collapse)
- tree = &collapse_hists;
-
- next = rb_first(tree);
+ tmp = RB_ROOT;
+ next = rb_first(&self->hists);
while (next) {
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
- rb_erase(&n->rb_node, tree);
- output__insert_entry(n, min_callchain_hits);
+ rb_erase(&n->rb_node, &self->hists);
+ perf_session__insert_output_hist_entry(&tmp, n,
+ min_callchain_hits);
+ }
+
+ self->hists = tmp;
+}
+
+static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
+{
+ int i;
+ int ret = fprintf(fp, " ");
+
+ for (i = 0; i < left_margin; i++)
+ ret += fprintf(fp, " ");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
+ int left_margin)
+{
+ int i;
+ size_t ret = callchain__fprintf_left_margin(fp, left_margin);
+
+ for (i = 0; i < depth; i++)
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "| ");
+ else
+ ret += fprintf(fp, " ");
+
+ ret += fprintf(fp, "\n");
+
+ return ret;
+}
+
+static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
+ int depth, int depth_mask, int count,
+ u64 total_samples, int hits,
+ int left_margin)
+{
+ int i;
+ size_t ret = 0;
+
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ for (i = 0; i < depth; i++) {
+ if (depth_mask & (1 << i))
+ ret += fprintf(fp, "|");
+ else
+ ret += fprintf(fp, " ");
+ if (!count && i == depth - 1) {
+ double percent;
+
+ percent = hits * 100.0 / total_samples;
+ ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
+ } else
+ ret += fprintf(fp, "%s", " ");
+ }
+ if (chain->sym)
+ ret += fprintf(fp, "%s\n", chain->sym->name);
+ else
+ ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
+
+ return ret;
+}
+
+static struct symbol *rem_sq_bracket;
+static struct callchain_list rem_hits;
+
+static void init_rem_hits(void)
+{
+ rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
+ if (!rem_sq_bracket) {
+ fprintf(stderr, "Not enough memory to display remaining hits\n");
+ return;
+ }
+
+ strcpy(rem_sq_bracket->name, "[...]");
+ rem_hits.sym = rem_sq_bracket;
+}
+
+static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+ u64 total_samples, int depth,
+ int depth_mask, int left_margin)
+{
+ struct rb_node *node, *next;
+ struct callchain_node *child;
+ struct callchain_list *chain;
+ int new_depth_mask = depth_mask;
+ u64 new_total;
+ u64 remaining;
+ size_t ret = 0;
+ int i;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = self->children_hit;
+ else
+ new_total = total_samples;
+
+ remaining = new_total;
+
+ node = rb_first(&self->rb_root);
+ while (node) {
+ u64 cumul;
+
+ child = rb_entry(node, struct callchain_node, rb_node);
+ cumul = cumul_hits(child);
+ remaining -= cumul;
+
+ /*
+ * The depth mask manages the output of pipes that show
+ * the depth. We don't want to keep the pipes of the current
+ * level for the last child of this depth.
+ * Except if we have remaining filtered hits. They will
+ * supersede the last child
+ */
+ next = rb_next(node);
+ if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
+ new_depth_mask &= ~(1 << (depth - 1));
+
+ /*
+ * But we keep the older depth mask for the line seperator
+ * to keep the level link until we reach the last child
+ */
+ ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
+ left_margin);
+ i = 0;
+ list_for_each_entry(chain, &child->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ ret += ipchain__fprintf_graph(fp, chain, depth,
+ new_depth_mask, i++,
+ new_total,
+ cumul,
+ left_margin);
+ }
+ ret += __callchain__fprintf_graph(fp, child, new_total,
+ depth + 1,
+ new_depth_mask | (1 << depth),
+ left_margin);
+ node = next;
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL &&
+ remaining && remaining != new_total) {
+
+ if (!rem_sq_bracket)
+ return ret;
+
+ new_depth_mask &= ~(1 << (depth - 1));
+
+ ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
+ new_depth_mask, 0, new_total,
+ remaining, left_margin);
+ }
+
+ return ret;
+}
+
+static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
+ u64 total_samples, int left_margin)
+{
+ struct callchain_list *chain;
+ bool printed = false;
+ int i = 0;
+ int ret = 0;
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+
+ if (!i++ && sort__first_dimension == SORT_SYM)
+ continue;
+
+ if (!printed) {
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "|\n");
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+ ret += fprintf(fp, "---");
+
+ left_margin += 3;
+ printed = true;
+ } else
+ ret += callchain__fprintf_left_margin(fp, left_margin);
+
+ if (chain->sym)
+ ret += fprintf(fp, " %s\n", chain->sym->name);
+ else
+ ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
+ }
+
+ ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
+
+ return ret;
+}
+
+static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
+ u64 total_samples)
+{
+ struct callchain_list *chain;
+ size_t ret = 0;
+
+ if (!self)
+ return 0;
+
+ ret += callchain__fprintf_flat(fp, self->parent, total_samples);
+
+
+ list_for_each_entry(chain, &self->val, list) {
+ if (chain->ip >= PERF_CONTEXT_MAX)
+ continue;
+ if (chain->sym)
+ ret += fprintf(fp, " %s\n", chain->sym->name);
+ else
+ ret += fprintf(fp, " %p\n",
+ (void *)(long)chain->ip);
+ }
+
+ return ret;
+}
+
+static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
+ u64 total_samples, int left_margin)
+{
+ struct rb_node *rb_node;
+ struct callchain_node *chain;
+ size_t ret = 0;
+
+ rb_node = rb_first(&self->sorted_chain);
+ while (rb_node) {
+ double percent;
+
+ chain = rb_entry(rb_node, struct callchain_node, rb_node);
+ percent = chain->hit * 100.0 / total_samples;
+ switch (callchain_param.mode) {
+ case CHAIN_FLAT:
+ ret += percent_color_fprintf(fp, " %6.2f%%\n",
+ percent);
+ ret += callchain__fprintf_flat(fp, chain, total_samples);
+ break;
+ case CHAIN_GRAPH_ABS: /* Falldown */
+ case CHAIN_GRAPH_REL:
+ ret += callchain__fprintf_graph(fp, chain, total_samples,
+ left_margin);
+ case CHAIN_NONE:
+ default:
+ break;
+ }
+ ret += fprintf(fp, "\n");
+ rb_node = rb_next(rb_node);
+ }
+
+ return ret;
+}
+
+static size_t hist_entry__fprintf(struct hist_entry *self,
+ struct perf_session *session,
+ struct perf_session *pair_session,
+ bool show_displacement,
+ long displacement, FILE *fp)
+{
+ struct sort_entry *se;
+ u64 count, total;
+ const char *sep = symbol_conf.field_sep;
+ size_t ret;
+
+ if (symbol_conf.exclude_other && !self->parent)
+ return 0;
+
+ if (pair_session) {
+ count = self->pair ? self->pair->count : 0;
+ total = pair_session->events_stats.total;
+ } else {
+ count = self->count;
+ total = session->events_stats.total;
+ }
+
+ if (total)
+ ret = percent_color_fprintf(fp, sep ? "%.2f" : " %6.2f%%",
+ (count * 100.0) / total);
+ else
+ ret = fprintf(fp, sep ? "%lld" : "%12lld ", count);
+
+ if (symbol_conf.show_nr_samples) {
+ if (sep)
+ fprintf(fp, "%c%lld", *sep, count);
+ else
+ fprintf(fp, "%11lld", count);
+ }
+
+ if (pair_session) {
+ char bf[32];
+ double old_percent = 0, new_percent = 0, diff;
+
+ if (total > 0)
+ old_percent = (count * 100.0) / total;
+ if (session->events_stats.total > 0)
+ new_percent = (self->count * 100.0) / session->events_stats.total;
+
+ diff = new_percent - old_percent;
+
+ if (fabs(diff) >= 0.01)
+ snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
+ else
+ snprintf(bf, sizeof(bf), " ");
+
+ if (sep)
+ ret += fprintf(fp, "%c%s", *sep, bf);
+ else
+ ret += fprintf(fp, "%11.11s", bf);
+
+ if (show_displacement) {
+ if (displacement)
+ snprintf(bf, sizeof(bf), "%+4ld", displacement);
+ else
+ snprintf(bf, sizeof(bf), " ");
+
+ if (sep)
+ fprintf(fp, "%c%s", *sep, bf);
+ else
+ fprintf(fp, "%6.6s", bf);
+ }
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+
+ fprintf(fp, "%s", sep ?: " ");
+ ret += se->print(fp, self, se->width ? *se->width : 0);
+ }
+
+ ret += fprintf(fp, "\n");
+
+ if (symbol_conf.use_callchain) {
+ int left_margin = 0;
+
+ if (sort__first_dimension == SORT_COMM) {
+ se = list_first_entry(&hist_entry__sort_list, typeof(*se),
+ list);
+ left_margin = se->width ? *se->width : 0;
+ left_margin -= thread__comm_len(self->thread);
+ }
+
+ hist_entry_callchain__fprintf(fp, self, session->events_stats.total,
+ left_margin);
+ }
+
+ return ret;
+}
+
+size_t perf_session__fprintf_hists(struct perf_session *self,
+ struct perf_session *pair,
+ bool show_displacement, FILE *fp)
+{
+ struct sort_entry *se;
+ struct rb_node *nd;
+ size_t ret = 0;
+ unsigned long position = 1;
+ long displacement = 0;
+ unsigned int width;
+ const char *sep = symbol_conf.field_sep;
+ char *col_width = symbol_conf.col_width_list_str;
+
+ init_rem_hits();
+
+ fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
+
+ if (symbol_conf.show_nr_samples) {
+ if (sep)
+ fprintf(fp, "%cSamples", *sep);
+ else
+ fputs(" Samples ", fp);
+ }
+
+ if (pair) {
+ if (sep)
+ ret += fprintf(fp, "%cDelta", *sep);
+ else
+ ret += fprintf(fp, " Delta ");
+
+ if (show_displacement) {
+ if (sep)
+ ret += fprintf(fp, "%cDisplacement", *sep);
+ else
+ ret += fprintf(fp, " Displ");
+ }
+ }
+
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (se->elide)
+ continue;
+ if (sep) {
+ fprintf(fp, "%c%s", *sep, se->header);
+ continue;
+ }
+ width = strlen(se->header);
+ if (se->width) {
+ if (symbol_conf.col_width_list_str) {
+ if (col_width) {
+ *se->width = atoi(col_width);
+ col_width = strchr(col_width, ',');
+ if (col_width)
+ ++col_width;
+ }
+ }
+ width = *se->width = max(*se->width, width);
+ }
+ fprintf(fp, " %*s", width, se->header);
+ }
+ fprintf(fp, "\n");
+
+ if (sep)
+ goto print_entries;
+
+ fprintf(fp, "# ........");
+ if (symbol_conf.show_nr_samples)
+ fprintf(fp, " ..........");
+ if (pair) {
+ fprintf(fp, " ..........");
+ if (show_displacement)
+ fprintf(fp, " .....");
+ }
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ unsigned int i;
+
+ if (se->elide)
+ continue;
+
+ fprintf(fp, " ");
+ if (se->width)
+ width = *se->width;
+ else
+ width = strlen(se->header);
+ for (i = 0; i < width; i++)
+ fprintf(fp, ".");
+ }
+
+ fprintf(fp, "\n#\n");
+
+print_entries:
+ for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (show_displacement) {
+ if (h->pair != NULL)
+ displacement = ((long)h->pair->position -
+ (long)position);
+ else
+ displacement = 0;
+ ++position;
+ }
+ ret += hist_entry__fprintf(h, self, pair, show_displacement,
+ displacement, fp);
}
+
+ free(rem_sq_bracket);
+
+ return ret;
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 3020db0c929..e5f99b24048 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -1,50 +1,27 @@
#ifndef __PERF_HIST_H
#define __PERF_HIST_H
-#include "../builtin.h"
-#include "util.h"
-
-#include "color.h"
-#include <linux/list.h>
-#include "cache.h"
-#include <linux/rbtree.h>
-#include "symbol.h"
-#include "string.h"
+#include <linux/types.h>
#include "callchain.h"
-#include "strlist.h"
-#include "values.h"
-
-#include "../perf.h"
-#include "debug.h"
-#include "header.h"
-
-#include "parse-options.h"
-#include "parse-events.h"
-#include "thread.h"
-#include "sort.h"
-
-extern struct rb_root hist;
-extern struct rb_root collapse_hists;
-extern struct rb_root output_hists;
-extern int callchain;
extern struct callchain_param callchain_param;
-extern unsigned long total;
-extern unsigned long total_mmap;
-extern unsigned long total_comm;
-extern unsigned long total_fork;
-extern unsigned long total_unknown;
-extern unsigned long total_lost;
-struct hist_entry *__hist_entry__add(struct addr_location *al,
- struct symbol *parent,
- u64 count, bool *hit);
+struct perf_session;
+struct hist_entry;
+struct addr_location;
+struct symbol;
+
+struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self,
+ struct addr_location *al,
+ struct symbol *parent,
+ u64 count, bool *hit);
extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *);
extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *);
-extern void hist_entry__free(struct hist_entry *);
-extern void collapse__insert_entry(struct hist_entry *);
-extern void collapse__resort(void);
-extern void output__insert_entry(struct hist_entry *, u64);
-extern void output__resort(u64);
+void hist_entry__free(struct hist_entry *);
+void perf_session__output_resort(struct perf_session *self, u64 total_samples);
+void perf_session__collapse_resort(struct perf_session *self);
+size_t perf_session__fprintf_hists(struct perf_session *self,
+ struct perf_session *pair,
+ bool show_displacement, FILE *fp);
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 69f94fe9db2..c4d55a0da2e 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -104,43 +104,70 @@ void map__fixup_end(struct map *self)
#define DSO__DELETED "(deleted)"
-struct symbol *map__find_symbol(struct map *self, u64 addr,
- symbol_filter_t filter)
+int map__load(struct map *self, struct perf_session *session,
+ symbol_filter_t filter)
{
- if (!dso__loaded(self->dso, self->type)) {
- int nr = dso__load(self->dso, self, filter);
-
- if (nr < 0) {
- if (self->dso->has_build_id) {
- char sbuild_id[BUILD_ID_SIZE * 2 + 1];
-
- build_id__sprintf(self->dso->build_id,
- sizeof(self->dso->build_id),
- sbuild_id);
- pr_warning("%s with build id %s not found",
- self->dso->long_name, sbuild_id);
- } else
- pr_warning("Failed to open %s",
- self->dso->long_name);
- pr_warning(", continuing without symbols\n");
- return NULL;
- } else if (nr == 0) {
- const char *name = self->dso->long_name;
- const size_t len = strlen(name);
- const size_t real_len = len - sizeof(DSO__DELETED);
-
- if (len > sizeof(DSO__DELETED) &&
- strcmp(name + real_len + 1, DSO__DELETED) == 0) {
- pr_warning("%.*s was updated, restart the long running apps that use it!\n",
- (int)real_len, name);
- } else {
- pr_warning("no symbols found in %s, maybe install a debug package?\n", name);
- }
- return NULL;
+ const char *name = self->dso->long_name;
+ int nr;
+
+ if (dso__loaded(self->dso, self->type))
+ return 0;
+
+ nr = dso__load(self->dso, self, session, filter);
+ if (nr < 0) {
+ if (self->dso->has_build_id) {
+ char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+ build_id__sprintf(self->dso->build_id,
+ sizeof(self->dso->build_id),
+ sbuild_id);
+ pr_warning("%s with build id %s not found",
+ name, sbuild_id);
+ } else
+ pr_warning("Failed to open %s", name);
+
+ pr_warning(", continuing without symbols\n");
+ return -1;
+ } else if (nr == 0) {
+ const size_t len = strlen(name);
+ const size_t real_len = len - sizeof(DSO__DELETED);
+
+ if (len > sizeof(DSO__DELETED) &&
+ strcmp(name + real_len + 1, DSO__DELETED) == 0) {
+ pr_warning("%.*s was updated, restart the long "
+ "running apps that use it!\n",
+ (int)real_len, name);
+ } else {
+ pr_warning("no symbols found in %s, maybe install "
+ "a debug package?\n", name);
}
+
+ return -1;
}
- return self->dso->find_symbol(self->dso, self->type, addr);
+ return 0;
+}
+
+struct symbol *map__find_symbol(struct map *self, struct perf_session *session,
+ u64 addr, symbol_filter_t filter)
+{
+ if (map__load(self, session, filter) < 0)
+ return NULL;
+
+ return dso__find_symbol(self->dso, self->type, addr);
+}
+
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ struct perf_session *session,
+ symbol_filter_t filter)
+{
+ if (map__load(self, session, filter) < 0)
+ return NULL;
+
+ if (!dso__sorted_by_name(self->dso, self->type))
+ dso__sort_by_name(self->dso, self->type);
+
+ return dso__find_symbol_by_name(self->dso, self->type, name);
}
struct map *map__clone(struct map *self)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 9e5dbd66d34..e5bc0fb016b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -197,7 +197,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
if (id == config) {
closedir(evt_dir);
closedir(sys_dir);
- path = zalloc(sizeof(path));
+ path = zalloc(sizeof(*path));
path->system = malloc(MAX_EVENT_LENGTH);
if (!path->system) {
free(path);
@@ -467,7 +467,6 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
while ((evt_ent = readdir(evt_dir))) {
char event_opt[MAX_EVOPT_LEN + 1];
int len;
- unsigned int rem = MAX_EVOPT_LEN;
if (!strcmp(evt_ent->d_name, ".")
|| !strcmp(evt_ent->d_name, "..")
@@ -475,20 +474,12 @@ parse_subsystem_tracepoint_event(char *sys_name, char *flags)
|| !strcmp(evt_ent->d_name, "filter"))
continue;
- len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name,
- evt_ent->d_name);
+ len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name,
+ evt_ent->d_name, flags ? ":" : "",
+ flags ?: "");
if (len < 0)
return EVT_FAILED;
- rem -= len;
- if (flags) {
- if (rem < strlen(flags) + 1)
- return EVT_FAILED;
-
- strcat(event_opt, ":");
- strcat(event_opt, flags);
- }
-
if (parse_events(NULL, event_opt, 0))
return EVT_FAILED;
}
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 6d8af48c925..efebd5b476b 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -430,6 +430,9 @@ int usage_with_options_internal(const char * const *usagestr,
pos = fprintf(stderr, " ");
if (opts->short_name)
pos += fprintf(stderr, "-%c", opts->short_name);
+ else
+ pos += fprintf(stderr, " ");
+
if (opts->long_name && opts->short_name)
pos += fprintf(stderr, ", ");
if (opts->long_name)
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index cd7fbda5e2a..29465d44004 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -48,6 +48,9 @@
/* If there is no space to write, returns -E2BIG. */
static int e_snprintf(char *str, size_t size, const char *format, ...)
+ __attribute__((format(printf, 3, 4)));
+
+static int e_snprintf(char *str, size_t size, const char *format, ...)
{
int ret;
va_list ap;
@@ -59,6 +62,18 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
return ret;
}
+/* Check the name is good for event/group */
+static bool check_event_name(const char *name)
+{
+ if (!isalpha(*name) && *name != '_')
+ return false;
+ while (*++name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return false;
+ }
+ return true;
+}
+
/* Parse probepoint definition. */
static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
{
@@ -66,10 +81,26 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
char c, nc = 0;
/*
* <Syntax>
- * perf probe SRC:LN
- * perf probe FUNC[+OFFS|%return][@SRC]
+ * perf probe [EVENT=]SRC:LN
+ * perf probe [EVENT=]FUNC[+OFFS|%return][@SRC]
+ *
+ * TODO:Group name support
*/
+ ptr = strchr(arg, '=');
+ if (ptr) { /* Event name */
+ *ptr = '\0';
+ tmp = ptr + 1;
+ ptr = strchr(arg, ':');
+ if (ptr) /* Group name is not supported yet. */
+ semantic_error("Group name is not supported yet.");
+ if (!check_event_name(arg))
+ semantic_error("%s is bad for event name -it must "
+ "follow C symbol-naming rule.", arg);
+ pp->event = strdup(arg);
+ arg = tmp;
+ }
+
ptr = strpbrk(arg, ":+@%");
if (ptr) {
nc = *ptr;
@@ -147,10 +178,13 @@ static void parse_perf_probe_probepoint(char *arg, struct probe_point *pp)
}
/* Parse perf-probe event definition */
-int parse_perf_probe_event(const char *str, struct probe_point *pp)
+void parse_perf_probe_event(const char *str, struct probe_point *pp,
+ bool *need_dwarf)
{
char **argv;
- int argc, i, need_dwarf = 0;
+ int argc, i;
+
+ *need_dwarf = false;
argv = argv_split(str, &argc);
if (!argv)
@@ -161,7 +195,7 @@ int parse_perf_probe_event(const char *str, struct probe_point *pp)
/* Parse probe point */
parse_perf_probe_probepoint(argv[0], pp);
if (pp->file || pp->line)
- need_dwarf = 1;
+ *need_dwarf = true;
/* Copy arguments and ensure return probe has no C argument */
pp->nr_args = argc - 1;
@@ -174,17 +208,15 @@ int parse_perf_probe_event(const char *str, struct probe_point *pp)
if (pp->retprobe)
semantic_error("You can't specify local"
" variable for kretprobe");
- need_dwarf = 1;
+ *need_dwarf = true;
}
}
argv_free(argv);
- return need_dwarf;
}
/* Parse kprobe_events event into struct probe_point */
-void parse_trace_kprobe_event(const char *str, char **group, char **event,
- struct probe_point *pp)
+void parse_trace_kprobe_event(const char *str, struct probe_point *pp)
{
char pr;
char *p;
@@ -200,18 +232,17 @@ void parse_trace_kprobe_event(const char *str, char **group, char **event,
/* Scan event and group name. */
ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]",
- &pr, (float *)(void *)group, (float *)(void *)event);
+ &pr, (float *)(void *)&pp->group,
+ (float *)(void *)&pp->event);
if (ret != 3)
semantic_error("Failed to parse event name: %s", argv[0]);
- pr_debug("Group:%s Event:%s probe:%c\n", *group, *event, pr);
-
- if (!pp)
- goto end;
+ pr_debug("Group:%s Event:%s probe:%c\n", pp->group, pp->event, pr);
pp->retprobe = (pr == 'r');
/* Scan function name and offset */
- ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function, &pp->offset);
+ ret = sscanf(argv[1], "%a[^+]+%d", (float *)(void *)&pp->function,
+ &pp->offset);
if (ret == 1)
pp->offset = 0;
@@ -230,15 +261,15 @@ void parse_trace_kprobe_event(const char *str, char **group, char **event,
die("Failed to copy argument.");
}
-end:
argv_free(argv);
}
-int synthesize_perf_probe_event(struct probe_point *pp)
+/* Synthesize only probe point (not argument) */
+int synthesize_perf_probe_point(struct probe_point *pp)
{
char *buf;
char offs[64] = "", line[64] = "";
- int i, len, ret;
+ int ret;
pp->probes[0] = buf = zalloc(MAX_CMDLEN);
if (!buf)
@@ -258,11 +289,25 @@ int synthesize_perf_probe_event(struct probe_point *pp)
ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->function,
offs, pp->retprobe ? "%return" : "", line);
else
- ret = e_snprintf(buf, MAX_CMDLEN, "%s%s%s%s", pp->file, line);
- if (ret <= 0)
- goto error;
- len = ret;
+ ret = e_snprintf(buf, MAX_CMDLEN, "%s%s", pp->file, line);
+ if (ret <= 0) {
+error:
+ free(pp->probes[0]);
+ pp->probes[0] = NULL;
+ }
+ return ret;
+}
+
+int synthesize_perf_probe_event(struct probe_point *pp)
+{
+ char *buf;
+ int i, len, ret;
+
+ len = synthesize_perf_probe_point(pp);
+ if (len < 0)
+ return 0;
+ buf = pp->probes[0];
for (i = 0; i < pp->nr_args; i++) {
ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
pp->args[i]);
@@ -275,6 +320,7 @@ int synthesize_perf_probe_event(struct probe_point *pp)
return pp->found;
error:
free(pp->probes[0]);
+ pp->probes[0] = NULL;
return ret;
}
@@ -304,6 +350,7 @@ int synthesize_trace_kprobe_event(struct probe_point *pp)
return pp->found;
error:
free(pp->probes[0]);
+ pp->probes[0] = NULL;
return ret;
}
@@ -363,6 +410,10 @@ static void clear_probe_point(struct probe_point *pp)
{
int i;
+ if (pp->event)
+ free(pp->event);
+ if (pp->group)
+ free(pp->group);
if (pp->function)
free(pp->function);
if (pp->file)
@@ -373,15 +424,33 @@ static void clear_probe_point(struct probe_point *pp)
free(pp->args);
for (i = 0; i < pp->found; i++)
free(pp->probes[i]);
- memset(pp, 0, sizeof(pp));
+ memset(pp, 0, sizeof(*pp));
+}
+
+/* Show an event */
+static void show_perf_probe_event(const char *event, const char *place,
+ struct probe_point *pp)
+{
+ int i, ret;
+ char buf[128];
+
+ ret = e_snprintf(buf, 128, "%s:%s", pp->group, event);
+ if (ret < 0)
+ die("Failed to copy event: %s", strerror(-ret));
+ printf(" %-40s (on %s", buf, place);
+
+ if (pp->nr_args > 0) {
+ printf(" with");
+ for (i = 0; i < pp->nr_args; i++)
+ printf(" %s", pp->args[i]);
+ }
+ printf(")\n");
}
/* List up current perf-probe events */
void show_perf_probe_events(void)
{
- unsigned int i;
int fd;
- char *group, *event;
struct probe_point pp;
struct strlist *rawlist;
struct str_node *ent;
@@ -390,13 +459,12 @@ void show_perf_probe_events(void)
rawlist = get_trace_kprobe_event_rawlist(fd);
close(fd);
- for (i = 0; i < strlist__nr_entries(rawlist); i++) {
- ent = strlist__entry(rawlist, i);
- parse_trace_kprobe_event(ent->s, &group, &event, &pp);
- synthesize_perf_probe_event(&pp);
- printf("[%s:%s]\t%s\n", group, event, pp.probes[0]);
- free(group);
- free(event);
+ strlist__for_each(ent, rawlist) {
+ parse_trace_kprobe_event(ent->s, &pp);
+ /* Synthesize only event probe point */
+ synthesize_perf_probe_point(&pp);
+ /* Show an event */
+ show_perf_probe_event(pp.event, pp.probes[0], &pp);
clear_probe_point(&pp);
}
@@ -404,21 +472,27 @@ void show_perf_probe_events(void)
}
/* Get current perf-probe event names */
-static struct strlist *get_perf_event_names(int fd)
+static struct strlist *get_perf_event_names(int fd, bool include_group)
{
- unsigned int i;
- char *group, *event;
+ char buf[128];
struct strlist *sl, *rawlist;
struct str_node *ent;
+ struct probe_point pp;
+ memset(&pp, 0, sizeof(pp));
rawlist = get_trace_kprobe_event_rawlist(fd);
- sl = strlist__new(false, NULL);
- for (i = 0; i < strlist__nr_entries(rawlist); i++) {
- ent = strlist__entry(rawlist, i);
- parse_trace_kprobe_event(ent->s, &group, &event, NULL);
- strlist__add(sl, event);
- free(group);
+ sl = strlist__new(true, NULL);
+ strlist__for_each(ent, rawlist) {
+ parse_trace_kprobe_event(ent->s, &pp);
+ if (include_group) {
+ if (e_snprintf(buf, 128, "%s:%s", pp.group,
+ pp.event) < 0)
+ die("Failed to copy group:event name.");
+ strlist__add(sl, buf);
+ } else
+ strlist__add(sl, pp.event);
+ clear_probe_point(&pp);
}
strlist__delete(rawlist);
@@ -426,24 +500,36 @@ static struct strlist *get_perf_event_names(int fd)
return sl;
}
-static int write_trace_kprobe_event(int fd, const char *buf)
+static void write_trace_kprobe_event(int fd, const char *buf)
{
int ret;
+ pr_debug("Writing event: %s\n", buf);
ret = write(fd, buf, strlen(buf));
if (ret <= 0)
- die("Failed to create event.");
- else
- printf("Added new event: %s\n", buf);
-
- return ret;
+ die("Failed to write event: %s", strerror(errno));
}
static void get_new_event_name(char *buf, size_t len, const char *base,
- struct strlist *namelist)
+ struct strlist *namelist, bool allow_suffix)
{
int i, ret;
- for (i = 0; i < MAX_EVENT_INDEX; i++) {
+
+ /* Try no suffix */
+ ret = e_snprintf(buf, len, "%s", base);
+ if (ret < 0)
+ die("snprintf() failed: %s", strerror(-ret));
+ if (!strlist__has_entry(namelist, buf))
+ return;
+
+ if (!allow_suffix) {
+ pr_warning("Error: event \"%s\" already exists. "
+ "(Use -f to force duplicates.)\n", base);
+ die("Can't add new event.");
+ }
+
+ /* Try to add suffix */
+ for (i = 1; i < MAX_EVENT_INDEX; i++) {
ret = e_snprintf(buf, len, "%s_%d", base, i);
if (ret < 0)
die("snprintf() failed: %s", strerror(-ret));
@@ -454,31 +540,138 @@ static void get_new_event_name(char *buf, size_t len, const char *base,
die("Too many events are on the same function.");
}
-void add_trace_kprobe_events(struct probe_point *probes, int nr_probes)
+void add_trace_kprobe_events(struct probe_point *probes, int nr_probes,
+ bool force_add)
{
int i, j, fd;
struct probe_point *pp;
char buf[MAX_CMDLEN];
char event[64];
struct strlist *namelist;
+ bool allow_suffix;
fd = open_kprobe_events(O_RDWR, O_APPEND);
/* Get current event names */
- namelist = get_perf_event_names(fd);
+ namelist = get_perf_event_names(fd, false);
for (j = 0; j < nr_probes; j++) {
pp = probes + j;
+ if (!pp->event)
+ pp->event = strdup(pp->function);
+ if (!pp->group)
+ pp->group = strdup(PERFPROBE_GROUP);
+ DIE_IF(!pp->event || !pp->group);
+ /* If force_add is true, suffix search is allowed */
+ allow_suffix = force_add;
for (i = 0; i < pp->found; i++) {
/* Get an unused new event name */
- get_new_event_name(event, 64, pp->function, namelist);
+ get_new_event_name(event, 64, pp->event, namelist,
+ allow_suffix);
snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s\n",
pp->retprobe ? 'r' : 'p',
- PERFPROBE_GROUP, event,
+ pp->group, event,
pp->probes[i]);
write_trace_kprobe_event(fd, buf);
+ printf("Added new event:\n");
+ /* Get the first parameter (probe-point) */
+ sscanf(pp->probes[i], "%s", buf);
+ show_perf_probe_event(event, buf, pp);
/* Add added event name to namelist */
strlist__add(namelist, event);
+ /*
+ * Probes after the first probe which comes from same
+ * user input are always allowed to add suffix, because
+ * there might be several addresses corresponding to
+ * one code line.
+ */
+ allow_suffix = true;
+ }
+ }
+ /* Show how to use the event. */
+ printf("\nYou can now use it on all perf tools, such as:\n\n");
+ printf("\tperf record -e %s:%s -a sleep 1\n\n", PERFPROBE_GROUP, event);
+
+ strlist__delete(namelist);
+ close(fd);
+}
+
+static void __del_trace_kprobe_event(int fd, struct str_node *ent)
+{
+ char *p;
+ char buf[128];
+
+ /* Convert from perf-probe event to trace-kprobe event */
+ if (e_snprintf(buf, 128, "-:%s", ent->s) < 0)
+ die("Failed to copy event.");
+ p = strchr(buf + 2, ':');
+ if (!p)
+ die("Internal error: %s should have ':' but not.", ent->s);
+ *p = '/';
+
+ write_trace_kprobe_event(fd, buf);
+ printf("Remove event: %s\n", ent->s);
+}
+
+static void del_trace_kprobe_event(int fd, const char *group,
+ const char *event, struct strlist *namelist)
+{
+ char buf[128];
+ struct str_node *ent, *n;
+ int found = 0;
+
+ if (e_snprintf(buf, 128, "%s:%s", group, event) < 0)
+ die("Failed to copy event.");
+
+ if (strpbrk(buf, "*?")) { /* Glob-exp */
+ strlist__for_each_safe(ent, n, namelist)
+ if (strglobmatch(ent->s, buf)) {
+ found++;
+ __del_trace_kprobe_event(fd, ent);
+ strlist__remove(namelist, ent);
+ }
+ } else {
+ ent = strlist__find(namelist, buf);
+ if (ent) {
+ found++;
+ __del_trace_kprobe_event(fd, ent);
+ strlist__remove(namelist, ent);
}
}
+ if (found == 0)
+ pr_info("Info: event \"%s\" does not exist, could not remove it.\n", buf);
+}
+
+void del_trace_kprobe_events(struct strlist *dellist)
+{
+ int fd;
+ const char *group, *event;
+ char *p, *str;
+ struct str_node *ent;
+ struct strlist *namelist;
+
+ fd = open_kprobe_events(O_RDWR, O_APPEND);
+ /* Get current event names */
+ namelist = get_perf_event_names(fd, true);
+
+ strlist__for_each(ent, dellist) {
+ str = strdup(ent->s);
+ if (!str)
+ die("Failed to copy event.");
+ pr_debug("Parsing: %s\n", str);
+ p = strchr(str, ':');
+ if (p) {
+ group = str;
+ *p = '\0';
+ event = p + 1;
+ } else {
+ group = "*";
+ event = str;
+ }
+ pr_debug("Group: %s, Event: %s\n", group, event);
+ del_trace_kprobe_event(fd, group, event, namelist);
+ free(str);
+ }
+ strlist__delete(namelist);
close(fd);
}
+
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 0c6fe56fe38..7f1d499118c 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -1,15 +1,19 @@
#ifndef _PROBE_EVENT_H
#define _PROBE_EVENT_H
+#include <stdbool.h>
#include "probe-finder.h"
#include "strlist.h"
-extern int parse_perf_probe_event(const char *str, struct probe_point *pp);
+extern void parse_perf_probe_event(const char *str, struct probe_point *pp,
+ bool *need_dwarf);
+extern int synthesize_perf_probe_point(struct probe_point *pp);
extern int synthesize_perf_probe_event(struct probe_point *pp);
-extern void parse_trace_kprobe_event(const char *str, char **group,
- char **event, struct probe_point *pp);
+extern void parse_trace_kprobe_event(const char *str, struct probe_point *pp);
extern int synthesize_trace_kprobe_event(struct probe_point *pp);
-extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes);
+extern void add_trace_kprobe_events(struct probe_point *probes, int nr_probes,
+ bool force_add);
+extern void del_trace_kprobe_events(struct strlist *dellist);
extern void show_perf_probe_events(void);
/* Maximum index number of event-name postfix */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 293cdfc1b8c..4b852c0d16a 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -106,7 +106,7 @@ static int strtailcmp(const char *s1, const char *s2)
{
int i1 = strlen(s1);
int i2 = strlen(s2);
- while (--i1 > 0 && --i2 > 0) {
+ while (--i1 >= 0 && --i2 >= 0) {
if (s1[i1] != s2[i2])
return s1[i1] - s2[i2];
}
@@ -687,10 +687,8 @@ int find_probepoint(int fd, struct probe_point *pp)
struct probe_finder pf = {.pp = pp};
ret = dwarf_init(fd, DW_DLC_READ, 0, 0, &__dw_debug, &__dw_error);
- if (ret != DW_DLV_OK) {
- pr_warning("No dwarf info found in the vmlinux - please rebuild with CONFIG_DEBUG_INFO.\n");
+ if (ret != DW_DLV_OK)
return -ENOENT;
- }
pp->found = 0;
while (++cu_number) {
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index bdebca6697d..a4086aaddb7 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -1,9 +1,9 @@
#ifndef _PROBE_FINDER_H
#define _PROBE_FINDER_H
-#define MAX_PATH_LEN 256
-#define MAX_PROBE_BUFFER 1024
-#define MAX_PROBES 128
+#define MAX_PATH_LEN 256
+#define MAX_PROBE_BUFFER 1024
+#define MAX_PROBES 128
static inline int is_c_varname(const char *name)
{
@@ -12,45 +12,53 @@ static inline int is_c_varname(const char *name)
}
struct probe_point {
+ char *event; /* Event name */
+ char *group; /* Event group */
+
/* Inputs */
- char *file; /* File name */
- int line; /* Line number */
+ char *file; /* File name */
+ int line; /* Line number */
- char *function; /* Function name */
- int offset; /* Offset bytes */
+ char *function; /* Function name */
+ int offset; /* Offset bytes */
- int nr_args; /* Number of arguments */
- char **args; /* Arguments */
+ int nr_args; /* Number of arguments */
+ char **args; /* Arguments */
- int retprobe; /* Return probe */
+ int retprobe; /* Return probe */
/* Output */
- int found; /* Number of found probe points */
- char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/
+ int found; /* Number of found probe points */
+ char *probes[MAX_PROBES]; /* Output buffers (will be allocated)*/
};
#ifndef NO_LIBDWARF
extern int find_probepoint(int fd, struct probe_point *pp);
-#include <libdwarf/dwarf.h>
-#include <libdwarf/libdwarf.h>
+/* Workaround for undefined _MIPS_SZLONG bug in libdwarf.h: */
+#ifndef _MIPS_SZLONG
+# define _MIPS_SZLONG 0
+#endif
+
+#include <dwarf.h>
+#include <libdwarf.h>
struct probe_finder {
- struct probe_point *pp; /* Target probe point */
+ struct probe_point *pp; /* Target probe point */
/* For function searching */
- Dwarf_Addr addr; /* Address */
- Dwarf_Unsigned fno; /* File number */
- Dwarf_Unsigned lno; /* Line number */
- Dwarf_Off inl_offs; /* Inline offset */
- Dwarf_Die cu_die; /* Current CU */
+ Dwarf_Addr addr; /* Address */
+ Dwarf_Unsigned fno; /* File number */
+ Dwarf_Unsigned lno; /* Line number */
+ Dwarf_Off inl_offs; /* Inline offset */
+ Dwarf_Die cu_die; /* Current CU */
/* For variable searching */
- Dwarf_Addr cu_base; /* Current CU base address */
- Dwarf_Locdesc fbloc; /* Location of Current Frame Base */
- const char *var; /* Current variable name */
- char *buf; /* Current output buffer */
- int len; /* Length of output buffer */
+ Dwarf_Addr cu_base; /* Current CU base address */
+ Dwarf_Locdesc fbloc; /* Location of Current Frame Base */
+ const char *var; /* Current variable name */
+ char *buf; /* Current output buffer */
+ int len; /* Length of output buffer */
};
#endif /* NO_LIBDWARF */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
new file mode 100644
index 00000000000..ce3a6c8abe7
--- /dev/null
+++ b/tools/perf/util/session.c
@@ -0,0 +1,150 @@
+#include <linux/kernel.h>
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "session.h"
+#include "sort.h"
+#include "util.h"
+
+static int perf_session__open(struct perf_session *self, bool force)
+{
+ struct stat input_stat;
+
+ self->fd = open(self->filename, O_RDONLY);
+ if (self->fd < 0) {
+ pr_err("failed to open file: %s", self->filename);
+ if (!strcmp(self->filename, "perf.data"))
+ pr_err(" (try 'perf record' first)");
+ pr_err("\n");
+ return -errno;
+ }
+
+ if (fstat(self->fd, &input_stat) < 0)
+ goto out_close;
+
+ if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
+ pr_err("file %s not owned by current user or root\n",
+ self->filename);
+ goto out_close;
+ }
+
+ if (!input_stat.st_size) {
+ pr_info("zero-sized file (%s), nothing to do!\n",
+ self->filename);
+ goto out_close;
+ }
+
+ if (perf_header__read(&self->header, self->fd) < 0) {
+ pr_err("incompatible file format");
+ goto out_close;
+ }
+
+ self->size = input_stat.st_size;
+ return 0;
+
+out_close:
+ close(self->fd);
+ self->fd = -1;
+ return -1;
+}
+
+struct perf_session *perf_session__new(const char *filename, int mode, bool force)
+{
+ size_t len = filename ? strlen(filename) + 1 : 0;
+ struct perf_session *self = zalloc(sizeof(*self) + len);
+
+ if (self == NULL)
+ goto out;
+
+ if (perf_header__init(&self->header) < 0)
+ goto out_free;
+
+ memcpy(self->filename, filename, len);
+ self->threads = RB_ROOT;
+ self->last_match = NULL;
+ self->mmap_window = 32;
+ self->cwd = NULL;
+ self->cwdlen = 0;
+ map_groups__init(&self->kmaps);
+
+ if (perf_session__create_kernel_maps(self) < 0)
+ goto out_delete;
+
+ if (mode == O_RDONLY && perf_session__open(self, force) < 0)
+ goto out_delete;
+out:
+ return self;
+out_free:
+ free(self);
+ return NULL;
+out_delete:
+ perf_session__delete(self);
+ return NULL;
+}
+
+void perf_session__delete(struct perf_session *self)
+{
+ perf_header__exit(&self->header);
+ close(self->fd);
+ free(self->cwd);
+ free(self);
+}
+
+static bool symbol__match_parent_regex(struct symbol *sym)
+{
+ if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
+ return 1;
+
+ return 0;
+}
+
+struct symbol **perf_session__resolve_callchain(struct perf_session *self,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent)
+{
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ struct symbol **syms = NULL;
+ unsigned int i;
+
+ if (symbol_conf.use_callchain) {
+ syms = calloc(chain->nr, sizeof(*syms));
+ if (!syms) {
+ fprintf(stderr, "Can't allocate memory for symbols\n");
+ exit(-1);
+ }
+ }
+
+ for (i = 0; i < chain->nr; i++) {
+ u64 ip = chain->ips[i];
+ struct addr_location al;
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ switch (ip) {
+ case PERF_CONTEXT_HV:
+ cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
+ case PERF_CONTEXT_KERNEL:
+ cpumode = PERF_RECORD_MISC_KERNEL; break;
+ case PERF_CONTEXT_USER:
+ cpumode = PERF_RECORD_MISC_USER; break;
+ default:
+ break;
+ }
+ continue;
+ }
+
+ thread__find_addr_location(thread, self, cpumode,
+ MAP__FUNCTION, ip, &al, NULL);
+ if (al.sym != NULL) {
+ if (sort__has_parent && !*parent &&
+ symbol__match_parent_regex(al.sym))
+ *parent = al.sym;
+ if (!symbol_conf.use_callchain)
+ break;
+ syms[i] = al.sym;
+ }
+ }
+
+ return syms;
+}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
new file mode 100644
index 00000000000..32eaa1bada0
--- /dev/null
+++ b/tools/perf/util/session.h
@@ -0,0 +1,61 @@
+#ifndef __PERF_SESSION_H
+#define __PERF_SESSION_H
+
+#include "event.h"
+#include "header.h"
+#include "thread.h"
+#include <linux/rbtree.h>
+#include "../../../include/linux/perf_event.h"
+
+struct ip_callchain;
+struct thread;
+struct symbol;
+
+struct perf_session {
+ struct perf_header header;
+ unsigned long size;
+ unsigned long mmap_window;
+ struct map_groups kmaps;
+ struct rb_root threads;
+ struct thread *last_match;
+ struct events_stats events_stats;
+ unsigned long event_total[PERF_RECORD_MAX];
+ struct rb_root hists;
+ u64 sample_type;
+ int fd;
+ int cwdlen;
+ char *cwd;
+ char filename[0];
+};
+
+typedef int (*event_op)(event_t *self, struct perf_session *session);
+
+struct perf_event_ops {
+ event_op process_sample_event;
+ event_op process_mmap_event;
+ event_op process_comm_event;
+ event_op process_fork_event;
+ event_op process_exit_event;
+ event_op process_lost_event;
+ event_op process_read_event;
+ event_op process_throttle_event;
+ event_op process_unthrottle_event;
+ int (*sample_type_check)(struct perf_session *session);
+ unsigned long total_unknown;
+ bool full_paths;
+};
+
+struct perf_session *perf_session__new(const char *filename, int mode, bool force);
+void perf_session__delete(struct perf_session *self);
+
+int perf_session__process_events(struct perf_session *self,
+ struct perf_event_ops *event_ops);
+
+struct symbol **perf_session__resolve_callchain(struct perf_session *self,
+ struct thread *thread,
+ struct ip_callchain *chain,
+ struct symbol **parent);
+
+int perf_header__read_build_ids(int input, u64 offset, u64 file_size);
+
+#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index b490354d1b2..cb0f327de9e 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -288,3 +288,29 @@ int sort_dimension__add(const char *tok)
return -ESRCH;
}
+
+void setup_sorting(const char * const usagestr[], const struct option *opts)
+{
+ char *tmp, *tok, *str = strdup(sort_order);
+
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (sort_dimension__add(tok) < 0) {
+ error("Unknown --sort key: `%s'", tok);
+ usage_with_options(usagestr, opts);
+ }
+ }
+
+ free(str);
+}
+
+void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
+ const char *list_name, FILE *fp)
+{
+ if (list && strlist__nr_entries(list) == 1) {
+ if (fp != NULL)
+ fprintf(fp, "# %s: %s\n", list_name,
+ strlist__entry(list, 0)->s);
+ self->elide = true;
+ }
+}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 333e664ff45..753f9ea99fb 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -49,9 +49,13 @@ struct hist_entry {
struct symbol *sym;
u64 ip;
char level;
- struct symbol *parent;
+ struct symbol *parent;
struct callchain_node callchain;
- struct rb_root sorted_chain;
+ union {
+ unsigned long position;
+ struct hist_entry *pair;
+ struct rb_root sorted_chain;
+ };
};
enum sort_type {
@@ -81,6 +85,8 @@ struct sort_entry {
extern struct sort_entry sort_thread;
extern struct list_head hist_entry__sort_list;
+void setup_sorting(const char * const usagestr[], const struct option *opts);
+
extern int repsep_fprintf(FILE *fp, const char *fmt, ...);
extern size_t sort__thread_print(FILE *, struct hist_entry *, unsigned int);
extern size_t sort__comm_print(FILE *, struct hist_entry *, unsigned int);
@@ -95,5 +101,7 @@ extern int64_t sort__sym_cmp(struct hist_entry *, struct hist_entry *);
extern int64_t sort__parent_cmp(struct hist_entry *, struct hist_entry *);
extern size_t sort__parent_print(FILE *, struct hist_entry *, unsigned int);
extern int sort_dimension__add(const char *);
+void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
+ const char *list_name, FILE *fp);
#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index f24a8cc933d..5352d7dccc6 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -226,3 +226,28 @@ fail:
argv_free(argv);
return NULL;
}
+
+/* Glob expression pattern matching */
+bool strglobmatch(const char *str, const char *pat)
+{
+ while (*str && *pat && *pat != '*') {
+ if (*pat == '?') {
+ str++;
+ pat++;
+ } else
+ if (*str++ != *pat++)
+ return false;
+ }
+ /* Check wild card */
+ if (*pat == '*') {
+ while (*pat == '*')
+ pat++;
+ if (!*pat) /* Tail wild card matches all */
+ return true;
+ while (*str)
+ if (strglobmatch(str++, pat))
+ return true;
+ }
+ return !*str && !*pat;
+}
+
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index bfecec265a1..02ede58c54b 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -1,6 +1,7 @@
#ifndef __PERF_STRING_H_
#define __PERF_STRING_H_
+#include <stdbool.h>
#include "types.h"
int hex2u64(const char *ptr, u64 *val);
@@ -8,6 +9,7 @@ char *strxfrchar(char *s, char from, char to);
s64 perf_atoll(const char *str);
char **argv_split(const char *str, int *argcp);
void argv_free(char **argv);
+bool strglobmatch(const char *str, const char *pat);
#define _STR(x) #x
#define STR(x) _STR(x)
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 7ad38171dc2..6783a204355 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -102,7 +102,7 @@ void strlist__remove(struct strlist *self, struct str_node *sn)
str_node__delete(sn, self->dupstr);
}
-bool strlist__has_entry(struct strlist *self, const char *entry)
+struct str_node *strlist__find(struct strlist *self, const char *entry)
{
struct rb_node **p = &self->entries.rb_node;
struct rb_node *parent = NULL;
@@ -120,10 +120,10 @@ bool strlist__has_entry(struct strlist *self, const char *entry)
else if (rc < 0)
p = &(*p)->rb_right;
else
- return true;
+ return sn;
}
- return false;
+ return NULL;
}
static int strlist__parse_list_entry(struct strlist *self, const char *s)
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index cb4659306d7..3ba839007d2 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -23,7 +23,12 @@ int strlist__load(struct strlist *self, const char *filename);
int strlist__add(struct strlist *self, const char *str);
struct str_node *strlist__entry(const struct strlist *self, unsigned int idx);
-bool strlist__has_entry(struct strlist *self, const char *entry);
+struct str_node *strlist__find(struct strlist *self, const char *entry);
+
+static inline bool strlist__has_entry(struct strlist *self, const char *entry)
+{
+ return strlist__find(self, entry) != NULL;
+}
static inline bool strlist__empty(const struct strlist *self)
{
@@ -35,5 +40,39 @@ static inline unsigned int strlist__nr_entries(const struct strlist *self)
return self->nr_entries;
}
+/* For strlist iteration */
+static inline struct str_node *strlist__first(struct strlist *self)
+{
+ struct rb_node *rn = rb_first(&self->entries);
+ return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
+}
+static inline struct str_node *strlist__next(struct str_node *sn)
+{
+ struct rb_node *rn;
+ if (!sn)
+ return NULL;
+ rn = rb_next(&sn->rb_node);
+ return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
+}
+
+/**
+ * strlist_for_each - iterate over a strlist
+ * @pos: the &struct str_node to use as a loop cursor.
+ * @self: the &struct strlist for loop.
+ */
+#define strlist__for_each(pos, self) \
+ for (pos = strlist__first(self); pos; pos = strlist__next(pos))
+
+/**
+ * strlist_for_each_safe - iterate over a strlist safe against removal of
+ * str_node
+ * @pos: the &struct str_node to use as a loop cursor.
+ * @n: another &struct str_node to use as temporary storage.
+ * @self: the &struct strlist for loop.
+ */
+#define strlist__for_each_safe(pos, n, self) \
+ for (pos = strlist__first(self), n = strlist__next(pos); pos;\
+ pos = n, n = strlist__next(n))
+
int strlist__parse_list(struct strlist *self, const char *s);
#endif /* __PERF_STRLIST_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index fffcb937cdc..ab92763edb0 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,5 +1,7 @@
#include "util.h"
#include "../perf.h"
+#include "session.h"
+#include "sort.h"
#include "string.h"
#include "symbol.h"
#include "thread.h"
@@ -29,33 +31,50 @@ enum dso_origin {
};
static void dsos__add(struct list_head *head, struct dso *dso);
-static struct map *thread__find_map_by_name(struct thread *self, char *name);
static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
-struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
static int dso__load_kernel_sym(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter);
-unsigned int symbol__priv_size;
+ struct perf_session *session, symbol_filter_t filter);
static int vmlinux_path__nr_entries;
static char **vmlinux_path;
-static struct symbol_conf symbol_conf__defaults = {
+struct symbol_conf symbol_conf = {
+ .exclude_other = true,
.use_modules = true,
.try_vmlinux_path = true,
};
-static struct thread kthread_mem;
-struct thread *kthread = &kthread_mem;
-
bool dso__loaded(const struct dso *self, enum map_type type)
{
return self->loaded & (1 << type);
}
+bool dso__sorted_by_name(const struct dso *self, enum map_type type)
+{
+ return self->sorted_by_name & (1 << type);
+}
+
static void dso__set_loaded(struct dso *self, enum map_type type)
{
self->loaded |= (1 << type);
}
+static void dso__set_sorted_by_name(struct dso *self, enum map_type type)
+{
+ self->sorted_by_name |= (1 << type);
+}
+
+static bool symbol_type__is_a(char symbol_type, enum map_type map_type)
+{
+ switch (map_type) {
+ case MAP__FUNCTION:
+ return symbol_type == 'T' || symbol_type == 'W';
+ case MAP__VARIABLE:
+ return symbol_type == 'D' || symbol_type == 'd';
+ default:
+ return false;
+ }
+}
+
static void symbols__fixup_end(struct rb_root *self)
{
struct rb_node *nd, *prevnd = rb_first(self);
@@ -79,7 +98,7 @@ static void symbols__fixup_end(struct rb_root *self)
curr->end = roundup(curr->start, 4096);
}
-static void __thread__fixup_maps_end(struct thread *self, enum map_type type)
+static void __map_groups__fixup_end(struct map_groups *self, enum map_type type)
{
struct map *prev, *curr;
struct rb_node *nd, *prevnd = rb_first(&self->maps[type]);
@@ -102,23 +121,23 @@ static void __thread__fixup_maps_end(struct thread *self, enum map_type type)
curr->end = ~0UL;
}
-static void thread__fixup_maps_end(struct thread *self)
+static void map_groups__fixup_end(struct map_groups *self)
{
int i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- __thread__fixup_maps_end(self, i);
+ __map_groups__fixup_end(self, i);
}
static struct symbol *symbol__new(u64 start, u64 len, const char *name)
{
size_t namelen = strlen(name) + 1;
- struct symbol *self = zalloc(symbol__priv_size +
+ struct symbol *self = zalloc(symbol_conf.priv_size +
sizeof(*self) + namelen);
if (self == NULL)
return NULL;
- if (symbol__priv_size)
- self = ((void *)self) + symbol__priv_size;
+ if (symbol_conf.priv_size)
+ self = ((void *)self) + symbol_conf.priv_size;
self->start = start;
self->end = len ? start + len - 1 : start;
@@ -132,7 +151,7 @@ static struct symbol *symbol__new(u64 start, u64 len, const char *name)
static void symbol__delete(struct symbol *self)
{
- free(((void *)self) - symbol__priv_size);
+ free(((void *)self) - symbol_conf.priv_size);
}
static size_t symbol__fprintf(struct symbol *self, FILE *fp)
@@ -164,11 +183,11 @@ struct dso *dso__new(const char *name)
dso__set_long_name(self, self->name);
self->short_name = self->name;
for (i = 0; i < MAP__NR_TYPES; ++i)
- self->symbols[i] = RB_ROOT;
- self->find_symbol = dso__find_symbol;
+ self->symbols[i] = self->symbol_names[i] = RB_ROOT;
self->slen_calculated = 0;
self->origin = DSO__ORIG_NOT_FOUND;
self->loaded = 0;
+ self->sorted_by_name = 0;
self->has_build_id = 0;
}
@@ -246,11 +265,85 @@ static struct symbol *symbols__find(struct rb_root *self, u64 ip)
return NULL;
}
-struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr)
+struct symbol_name_rb_node {
+ struct rb_node rb_node;
+ struct symbol sym;
+};
+
+static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
+{
+ struct rb_node **p = &self->rb_node;
+ struct rb_node *parent = NULL;
+ struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s;
+
+ while (*p != NULL) {
+ parent = *p;
+ s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
+ if (strcmp(sym->name, s->sym.name) < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ rb_link_node(&symn->rb_node, parent, p);
+ rb_insert_color(&symn->rb_node, self);
+}
+
+static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source)
+{
+ struct rb_node *nd;
+
+ for (nd = rb_first(source); nd; nd = rb_next(nd)) {
+ struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
+ symbols__insert_by_name(self, pos);
+ }
+}
+
+static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name)
+{
+ struct rb_node *n;
+
+ if (self == NULL)
+ return NULL;
+
+ n = self->rb_node;
+
+ while (n) {
+ struct symbol_name_rb_node *s;
+ int cmp;
+
+ s = rb_entry(n, struct symbol_name_rb_node, rb_node);
+ cmp = strcmp(name, s->sym.name);
+
+ if (cmp < 0)
+ n = n->rb_left;
+ else if (cmp > 0)
+ n = n->rb_right;
+ else
+ return &s->sym;
+ }
+
+ return NULL;
+}
+
+struct symbol *dso__find_symbol(struct dso *self,
+ enum map_type type, u64 addr)
{
return symbols__find(&self->symbols[type], addr);
}
+struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
+ const char *name)
+{
+ return symbols__find_by_name(&self->symbol_names[type], name);
+}
+
+void dso__sort_by_name(struct dso *self, enum map_type type)
+{
+ dso__set_sorted_by_name(self, type);
+ return symbols__sort_by_name(&self->symbol_names[type],
+ &self->symbols[type]);
+}
+
int build_id__sprintf(u8 *self, int len, char *bf)
{
char *bid = bf;
@@ -327,10 +420,7 @@ static int dso__load_all_kallsyms(struct dso *self, struct map *map)
continue;
symbol_type = toupper(line[len]);
- /*
- * We're interested only in code ('T'ext)
- */
- if (symbol_type != 'T' && symbol_type != 'W')
+ if (!symbol_type__is_a(symbol_type, map->type))
continue;
symbol_name = line + len + 2;
@@ -364,8 +454,8 @@ out_failure:
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
-static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread *thread,
- symbol_filter_t filter)
+static int dso__split_kallsyms(struct dso *self, struct map *map,
+ struct perf_session *session, symbol_filter_t filter)
{
struct map *curr_map = map;
struct symbol *pos;
@@ -382,13 +472,13 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread
module = strchr(pos->name, '\t');
if (module) {
- if (!thread->use_modules)
+ if (!symbol_conf.use_modules)
goto discard_symbol;
*module++ = '\0';
if (strcmp(self->name, module)) {
- curr_map = thread__find_map_by_name(thread, module);
+ curr_map = map_groups__find_by_name(&session->kmaps, map->type, module);
if (curr_map == NULL) {
pr_debug("/proc/{kallsyms,modules} "
"inconsistency!\n");
@@ -419,7 +509,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map, struct thread
}
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
- __thread__insert_map(thread, curr_map);
+ map_groups__insert(&session->kmaps, curr_map);
++kernel_range;
}
@@ -440,7 +530,7 @@ discard_symbol: rb_erase(&pos->rb_node, root);
static int dso__load_kallsyms(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter)
+ struct perf_session *session, symbol_filter_t filter)
{
if (dso__load_all_kallsyms(self, map) < 0)
return -1;
@@ -448,14 +538,7 @@ static int dso__load_kallsyms(struct dso *self, struct map *map,
symbols__fixup_end(&self->symbols[map->type]);
self->origin = DSO__ORIG_KERNEL;
- return dso__split_kallsyms(self, map, thread, filter);
-}
-
-size_t kernel_maps__fprintf(FILE *fp)
-{
- size_t printed = fprintf(fp, "Kernel maps:\n");
- printed += thread__fprintf_maps(kthread, fp);
- return printed + fprintf(fp, "END kernel maps\n");
+ return dso__split_kallsyms(self, map, session, filter);
}
static int dso__load_perf_map(struct dso *self, struct map *map,
@@ -544,6 +627,13 @@ static inline int elf_sym__is_function(const GElf_Sym *sym)
sym->st_shndx != SHN_UNDEF;
}
+static inline bool elf_sym__is_object(const GElf_Sym *sym)
+{
+ return elf_sym__type(sym) == STT_OBJECT &&
+ sym->st_name != 0 &&
+ sym->st_shndx != SHN_UNDEF;
+}
+
static inline int elf_sym__is_label(const GElf_Sym *sym)
{
return elf_sym__type(sym) == STT_NOTYPE &&
@@ -564,6 +654,12 @@ static inline int elf_sec__is_text(const GElf_Shdr *shdr,
return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
}
+static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
+ const Elf_Data *secstrs)
+{
+ return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
+}
+
static inline const char *elf_sym__name(const GElf_Sym *sym,
const Elf_Data *symstrs)
{
@@ -744,8 +840,32 @@ out:
return 0;
}
+static bool elf_sym__is_a(GElf_Sym *self, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sym__is_function(self);
+ case MAP__VARIABLE:
+ return elf_sym__is_object(self);
+ default:
+ return false;
+ }
+}
+
+static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type)
+{
+ switch (type) {
+ case MAP__FUNCTION:
+ return elf_sec__is_text(self, secstrs);
+ case MAP__VARIABLE:
+ return elf_sec__is_data(self, secstrs);
+ default:
+ return false;
+ }
+}
+
static int dso__load_sym(struct dso *self, struct map *map,
- struct thread *thread, const char *name, int fd,
+ struct perf_session *session, const char *name, int fd,
symbol_filter_t filter, int kernel, int kmodule)
{
struct map *curr_map = map;
@@ -818,7 +938,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
int is_label = elf_sym__is_label(&sym);
const char *section_name;
- if (!is_label && !elf_sym__is_function(&sym))
+ if (!is_label && !elf_sym__is_a(&sym, map->type))
continue;
sec = elf_getscn(elf, sym.st_shndx);
@@ -827,7 +947,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
gelf_getshdr(sec, &shdr);
- if (is_label && !elf_sec__is_text(&shdr, secstrs))
+ if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type))
continue;
elf_name = elf_sym__name(&sym, symstrs);
@@ -849,7 +969,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
snprintf(dso_name, sizeof(dso_name),
"%s%s", self->short_name, section_name);
- curr_map = thread__find_map_by_name(thread, dso_name);
+ curr_map = map_groups__find_by_name(&session->kmaps, map->type, dso_name);
if (curr_map == NULL) {
u64 start = sym.st_value;
@@ -868,7 +988,7 @@ static int dso__load_sym(struct dso *self, struct map *map,
curr_map->map_ip = identity__map_ip;
curr_map->unmap_ip = identity__map_ip;
curr_dso->origin = DSO__ORIG_KERNEL;
- __thread__insert_map(kthread, curr_map);
+ map_groups__insert(&session->kmaps, curr_map);
dsos__add(&dsos__kernel, curr_dso);
} else
curr_dso = curr_map->dso;
@@ -938,8 +1058,9 @@ static bool __dsos__read_build_ids(struct list_head *head)
bool dsos__read_build_ids(void)
{
- return __dsos__read_build_ids(&dsos__kernel) ||
- __dsos__read_build_ids(&dsos__user);
+ bool kbuildids = __dsos__read_build_ids(&dsos__kernel),
+ ubuildids = __dsos__read_build_ids(&dsos__user);
+ return kbuildids || ubuildids;
}
/*
@@ -1082,7 +1203,8 @@ char dso__symtab_origin(const struct dso *self)
return origin[self->origin];
}
-int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
+int dso__load(struct dso *self, struct map *map, struct perf_session *session,
+ symbol_filter_t filter)
{
int size = PATH_MAX;
char *name;
@@ -1093,7 +1215,7 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
dso__set_loaded(self, map->type);
if (self->kernel)
- return dso__load_kernel_sym(self, map, kthread, filter);
+ return dso__load_kernel_sym(self, map, session, filter);
name = malloc(size);
if (!name)
@@ -1179,11 +1301,12 @@ out:
return ret;
}
-static struct map *thread__find_map_by_name(struct thread *self, char *name)
+struct map *map_groups__find_by_name(struct map_groups *self,
+ enum map_type type, const char *name)
{
struct rb_node *nd;
- for (nd = rb_first(&self->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
struct map *map = rb_entry(nd, struct map, rb_node);
if (map->dso && strcmp(map->dso->name, name) == 0)
@@ -1193,7 +1316,7 @@ static struct map *thread__find_map_by_name(struct thread *self, char *name)
return NULL;
}
-static int dsos__set_modules_path_dir(char *dirname)
+static int perf_session__set_modules_path_dir(struct perf_session *self, char *dirname)
{
struct dirent *dent;
DIR *dir = opendir(dirname);
@@ -1213,7 +1336,7 @@ static int dsos__set_modules_path_dir(char *dirname)
snprintf(path, sizeof(path), "%s/%s",
dirname, dent->d_name);
- if (dsos__set_modules_path_dir(path) < 0)
+ if (perf_session__set_modules_path_dir(self, path) < 0)
goto failure;
} else {
char *dot = strrchr(dent->d_name, '.'),
@@ -1227,7 +1350,7 @@ static int dsos__set_modules_path_dir(char *dirname)
(int)(dot - dent->d_name), dent->d_name);
strxfrchar(dso_name, '-', '_');
- map = thread__find_map_by_name(kthread, dso_name);
+ map = map_groups__find_by_name(&self->kmaps, MAP__FUNCTION, dso_name);
if (map == NULL)
continue;
@@ -1247,7 +1370,7 @@ failure:
return -1;
}
-static int dsos__set_modules_path(void)
+static int perf_session__set_modules_path(struct perf_session *self)
{
struct utsname uts;
char modules_path[PATH_MAX];
@@ -1258,7 +1381,7 @@ static int dsos__set_modules_path(void)
snprintf(modules_path, sizeof(modules_path), "/lib/modules/%s/kernel",
uts.release);
- return dsos__set_modules_path_dir(modules_path);
+ return perf_session__set_modules_path_dir(self, modules_path);
}
/*
@@ -1280,7 +1403,7 @@ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
return self;
}
-static int thread__create_module_maps(struct thread *self)
+static int perf_session__create_module_maps(struct perf_session *self)
{
char *line = NULL;
size_t n;
@@ -1337,14 +1460,14 @@ static int thread__create_module_maps(struct thread *self)
dso->has_build_id = true;
dso->origin = DSO__ORIG_KMODULE;
- __thread__insert_map(self, map);
+ map_groups__insert(&self->kmaps, map);
dsos__add(&dsos__kernel, dso);
}
free(line);
fclose(file);
- return dsos__set_modules_path();
+ return perf_session__set_modules_path(self);
out_delete_line:
free(line);
@@ -1352,7 +1475,8 @@ out_failure:
return -1;
}
-static int dso__load_vmlinux(struct dso *self, struct map *map, struct thread *thread,
+static int dso__load_vmlinux(struct dso *self, struct map *map,
+ struct perf_session *session,
const char *vmlinux, symbol_filter_t filter)
{
int err = -1, fd;
@@ -1386,14 +1510,14 @@ static int dso__load_vmlinux(struct dso *self, struct map *map, struct thread *t
return -1;
dso__set_loaded(self, map->type);
- err = dso__load_sym(self, map, thread, self->long_name, fd, filter, 1, 0);
+ err = dso__load_sym(self, map, session, self->long_name, fd, filter, 1, 0);
close(fd);
return err;
}
static int dso__load_kernel_sym(struct dso *self, struct map *map,
- struct thread *thread, symbol_filter_t filter)
+ struct perf_session *session, symbol_filter_t filter)
{
int err;
bool is_kallsyms;
@@ -1403,7 +1527,7 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
pr_debug("Looking at the vmlinux_path (%d entries long)\n",
vmlinux_path__nr_entries);
for (i = 0; i < vmlinux_path__nr_entries; ++i) {
- err = dso__load_vmlinux(self, map, thread,
+ err = dso__load_vmlinux(self, map, session,
vmlinux_path[i], filter);
if (err > 0) {
pr_debug("Using %s for symbols\n",
@@ -1419,12 +1543,12 @@ static int dso__load_kernel_sym(struct dso *self, struct map *map,
if (is_kallsyms)
goto do_kallsyms;
- err = dso__load_vmlinux(self, map, thread, self->long_name, filter);
+ err = dso__load_vmlinux(self, map, session, self->long_name, filter);
if (err <= 0) {
pr_info("The file %s cannot be used, "
"trying to use /proc/kallsyms...", self->long_name);
do_kallsyms:
- err = dso__load_kallsyms(self, map, thread, filter);
+ err = dso__load_kallsyms(self, map, session, filter);
if (err > 0 && !is_kallsyms)
dso__set_long_name(self, strdup("[kernel.kallsyms]"));
}
@@ -1507,42 +1631,59 @@ size_t dsos__fprintf_buildid(FILE *fp)
__dsos__fprintf_buildid(&dsos__user, fp));
}
-static int thread__create_kernel_map(struct thread *self, const char *vmlinux)
+static struct dso *dsos__create_kernel( const char *vmlinux)
{
- struct map *kmap;
struct dso *kernel = dso__new(vmlinux ?: "[kernel.kallsyms]");
if (kernel == NULL)
- return -1;
-
- kmap = map__new2(0, kernel, MAP__FUNCTION);
- if (kmap == NULL)
- goto out_delete_kernel_dso;
+ return NULL;
- kmap->map_ip = kmap->unmap_ip = identity__map_ip;
kernel->short_name = "[kernel]";
kernel->kernel = 1;
vdso = dso__new("[vdso]");
if (vdso == NULL)
- goto out_delete_kernel_map;
+ goto out_delete_kernel_dso;
dso__set_loaded(vdso, MAP__FUNCTION);
if (sysfs__read_build_id("/sys/kernel/notes", kernel->build_id,
sizeof(kernel->build_id)) == 0)
kernel->has_build_id = true;
- __thread__insert_map(self, kmap);
dsos__add(&dsos__kernel, kernel);
dsos__add(&dsos__user, vdso);
- return 0;
+ return kernel;
-out_delete_kernel_map:
- map__delete(kmap);
out_delete_kernel_dso:
dso__delete(kernel);
- return -1;
+ return NULL;
+}
+
+static int map_groups__create_kernel_maps(struct map_groups *self, const char *vmlinux)
+{
+ struct map *functions, *variables;
+ struct dso *kernel = dsos__create_kernel(vmlinux);
+
+ if (kernel == NULL)
+ return -1;
+
+ functions = map__new2(0, kernel, MAP__FUNCTION);
+ if (functions == NULL)
+ return -1;
+
+ variables = map__new2(0, kernel, MAP__VARIABLE);
+ if (variables == NULL) {
+ map__delete(functions);
+ return -1;
+ }
+
+ functions->map_ip = functions->unmap_ip =
+ variables->map_ip = variables->unmap_ip = identity__map_ip;
+ map_groups__insert(self, functions);
+ map_groups__insert(self, variables);
+
+ return 0;
}
static void vmlinux_path__exit(void)
@@ -1600,29 +1741,69 @@ out_fail:
return -1;
}
-int symbol__init(struct symbol_conf *conf)
+static int setup_list(struct strlist **list, const char *list_str,
+ const char *list_name)
{
- const struct symbol_conf *pconf = conf ?: &symbol_conf__defaults;
+ if (list_str == NULL)
+ return 0;
+ *list = strlist__new(true, list_str);
+ if (!*list) {
+ pr_err("problems parsing %s list\n", list_name);
+ return -1;
+ }
+ return 0;
+}
+
+int symbol__init(void)
+{
elf_version(EV_CURRENT);
- symbol__priv_size = pconf->priv_size;
- thread__init(kthread, 0);
+ if (symbol_conf.sort_by_name)
+ symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
+ sizeof(struct symbol));
- if (pconf->try_vmlinux_path && vmlinux_path__init() < 0)
+ if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0)
return -1;
- if (thread__create_kernel_map(kthread, pconf->vmlinux_name) < 0) {
- vmlinux_path__exit();
+ if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
+ pr_err("'.' is the only non valid --field-separator argument\n");
return -1;
}
- kthread->use_modules = pconf->use_modules;
- if (pconf->use_modules && thread__create_module_maps(kthread) < 0)
- pr_debug("Failed to load list of modules in use, "
- "continuing...\n");
+ if (setup_list(&symbol_conf.dso_list,
+ symbol_conf.dso_list_str, "dso") < 0)
+ return -1;
+
+ if (setup_list(&symbol_conf.comm_list,
+ symbol_conf.comm_list_str, "comm") < 0)
+ goto out_free_dso_list;
+
+ if (setup_list(&symbol_conf.sym_list,
+ symbol_conf.sym_list_str, "symbol") < 0)
+ goto out_free_comm_list;
+
+ return 0;
+
+out_free_dso_list:
+ strlist__delete(symbol_conf.dso_list);
+out_free_comm_list:
+ strlist__delete(symbol_conf.comm_list);
+ return -1;
+}
+
+int perf_session__create_kernel_maps(struct perf_session *self)
+{
+ if (map_groups__create_kernel_maps(&self->kmaps,
+ symbol_conf.vmlinux_name) < 0)
+ return -1;
+
+ if (symbol_conf.use_modules &&
+ perf_session__create_module_maps(self) < 0)
+ pr_debug("Failed to load list of modules for session %s, "
+ "continuing...\n", self->filename);
/*
* Now that we have all the maps created, just set the ->end of them:
*/
- thread__fixup_maps_end(kthread);
+ map_groups__fixup_end(&self->kmaps);
return 0;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 17003efa0b3..8aded2356f7 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -49,18 +49,32 @@ struct symbol {
char name[0];
};
+struct strlist;
+
struct symbol_conf {
unsigned short priv_size;
bool try_vmlinux_path,
- use_modules;
- const char *vmlinux_name;
+ use_modules,
+ sort_by_name,
+ show_nr_samples,
+ use_callchain,
+ exclude_other;
+ const char *vmlinux_name,
+ *field_sep;
+ char *dso_list_str,
+ *comm_list_str,
+ *sym_list_str,
+ *col_width_list_str;
+ struct strlist *dso_list,
+ *comm_list,
+ *sym_list;
};
-extern unsigned int symbol__priv_size;
+extern struct symbol_conf symbol_conf;
static inline void *symbol__priv(struct symbol *self)
{
- return ((void *)self) - symbol__priv_size;
+ return ((void *)self) - symbol_conf.priv_size;
}
struct addr_location {
@@ -69,18 +83,19 @@ struct addr_location {
struct symbol *sym;
u64 addr;
char level;
+ bool filtered;
};
struct dso {
struct list_head node;
struct rb_root symbols[MAP__NR_TYPES];
- struct symbol *(*find_symbol)(struct dso *self,
- enum map_type type, u64 addr);
+ struct rb_root symbol_names[MAP__NR_TYPES];
u8 adjust_symbols:1;
u8 slen_calculated:1;
u8 has_build_id:1;
u8 kernel:1;
unsigned char origin;
+ u8 sorted_by_name;
u8 loaded;
u8 build_id[BUILD_ID_SIZE];
u16 long_name_len;
@@ -93,9 +108,15 @@ struct dso *dso__new(const char *name);
void dso__delete(struct dso *self);
bool dso__loaded(const struct dso *self, enum map_type type);
+bool dso__sorted_by_name(const struct dso *self, enum map_type type);
+
+void dso__sort_by_name(struct dso *self, enum map_type type);
+
+struct perf_session;
struct dso *dsos__findnew(const char *name);
-int dso__load(struct dso *self, struct map *map, symbol_filter_t filter);
+int dso__load(struct dso *self, struct map *map, struct perf_session *session,
+ symbol_filter_t filter);
void dsos__fprintf(FILE *fp);
size_t dsos__fprintf_buildid(FILE *fp);
@@ -103,18 +124,18 @@ size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
char dso__symtab_origin(const struct dso *self);
void dso__set_build_id(struct dso *self, void *build_id);
+struct symbol *dso__find_symbol(struct dso *self, enum map_type type, u64 addr);
+struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type,
+ const char *name);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
bool dsos__read_build_ids(void);
int build_id__sprintf(u8 *self, int len, char *bf);
-size_t kernel_maps__fprintf(FILE *fp);
-
-int symbol__init(struct symbol_conf *conf);
+int symbol__init(void);
+int perf_session__create_kernel_maps(struct perf_session *self);
-struct thread;
-struct thread *kthread;
extern struct list_head dsos__user, dsos__kernel;
extern struct dso *vdso;
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 603f5610861..4a08dcf50b6 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -2,18 +2,14 @@
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
+#include "session.h"
#include "thread.h"
#include "util.h"
#include "debug.h"
-static struct rb_root threads;
-static struct thread *last_match;
-
-void thread__init(struct thread *self, pid_t pid)
+void map_groups__init(struct map_groups *self)
{
int i;
- self->pid = pid;
- self->comm = NULL;
for (i = 0; i < MAP__NR_TYPES; ++i) {
self->maps[i] = RB_ROOT;
INIT_LIST_HEAD(&self->removed_maps[i]);
@@ -25,7 +21,8 @@ static struct thread *thread__new(pid_t pid)
struct thread *self = zalloc(sizeof(*self));
if (self != NULL) {
- thread__init(self, pid);
+ map_groups__init(&self->mg);
+ self->pid = pid;
self->comm = malloc(32);
if (self->comm)
snprintf(self->comm, 32, ":%d", self->pid);
@@ -55,10 +52,11 @@ int thread__comm_len(struct thread *self)
static const char *map_type__name[MAP__NR_TYPES] = {
[MAP__FUNCTION] = "Functions",
+ [MAP__VARIABLE] = "Variables",
};
-static size_t __thread__fprintf_maps(struct thread *self,
- enum map_type type, FILE *fp)
+static size_t __map_groups__fprintf_maps(struct map_groups *self,
+ enum map_type type, FILE *fp)
{
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
struct rb_node *nd;
@@ -76,16 +74,16 @@ static size_t __thread__fprintf_maps(struct thread *self,
return printed;
}
-size_t thread__fprintf_maps(struct thread *self, FILE *fp)
+size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp)
{
size_t printed = 0, i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __thread__fprintf_maps(self, i, fp);
+ printed += __map_groups__fprintf_maps(self, i, fp);
return printed;
}
-static size_t __thread__fprintf_removed_maps(struct thread *self,
- enum map_type type, FILE *fp)
+static size_t __map_groups__fprintf_removed_maps(struct map_groups *self,
+ enum map_type type, FILE *fp)
{
struct map *pos;
size_t printed = 0;
@@ -101,25 +99,30 @@ static size_t __thread__fprintf_removed_maps(struct thread *self,
return printed;
}
-static size_t thread__fprintf_removed_maps(struct thread *self, FILE *fp)
+static size_t map_groups__fprintf_removed_maps(struct map_groups *self, FILE *fp)
{
size_t printed = 0, i;
for (i = 0; i < MAP__NR_TYPES; ++i)
- printed += __thread__fprintf_removed_maps(self, i, fp);
+ printed += __map_groups__fprintf_removed_maps(self, i, fp);
return printed;
}
-static size_t thread__fprintf(struct thread *self, FILE *fp)
+static size_t map_groups__fprintf(struct map_groups *self, FILE *fp)
{
- size_t printed = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
- printed += thread__fprintf_removed_maps(self, fp);
+ size_t printed = map_groups__fprintf_maps(self, fp);
printed += fprintf(fp, "Removed maps:\n");
- return printed + thread__fprintf_removed_maps(self, fp);
+ return printed + map_groups__fprintf_removed_maps(self, fp);
+}
+
+static size_t thread__fprintf(struct thread *self, FILE *fp)
+{
+ return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
+ map_groups__fprintf(&self->mg, fp);
}
-struct thread *threads__findnew(pid_t pid)
+struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
{
- struct rb_node **p = &threads.rb_node;
+ struct rb_node **p = &self->threads.rb_node;
struct rb_node *parent = NULL;
struct thread *th;
@@ -128,15 +131,15 @@ struct thread *threads__findnew(pid_t pid)
* so most of the time we dont have to look up
* the full rbtree:
*/
- if (last_match && last_match->pid == pid)
- return last_match;
+ if (self->last_match && self->last_match->pid == pid)
+ return self->last_match;
while (*p != NULL) {
parent = *p;
th = rb_entry(parent, struct thread, rb_node);
if (th->pid == pid) {
- last_match = th;
+ self->last_match = th;
return th;
}
@@ -149,26 +152,15 @@ struct thread *threads__findnew(pid_t pid)
th = thread__new(pid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
- rb_insert_color(&th->rb_node, &threads);
- last_match = th;
+ rb_insert_color(&th->rb_node, &self->threads);
+ self->last_match = th;
}
return th;
}
-struct thread *register_idle_thread(void)
-{
- struct thread *thread = threads__findnew(0);
-
- if (!thread || thread__set_comm(thread, "swapper")) {
- fprintf(stderr, "problem inserting idle task.\n");
- exit(-1);
- }
-
- return thread;
-}
-
-static void thread__remove_overlappings(struct thread *self, struct map *map)
+static void map_groups__remove_overlappings(struct map_groups *self,
+ struct map *map)
{
struct rb_root *root = &self->maps[map->type];
struct rb_node *next = rb_first(root);
@@ -238,12 +230,15 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
void thread__insert_map(struct thread *self, struct map *map)
{
- thread__remove_overlappings(self, map);
- maps__insert(&self->maps[map->type], map);
+ map_groups__remove_overlappings(&self->mg, map);
+ map_groups__insert(&self->mg, map);
}
-static int thread__clone_maps(struct thread *self, struct thread *parent,
- enum map_type type)
+/*
+ * XXX This should not really _copy_ te maps, but refcount them.
+ */
+static int map_groups__clone(struct map_groups *self,
+ struct map_groups *parent, enum map_type type)
{
struct rb_node *nd;
for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
@@ -251,7 +246,7 @@ static int thread__clone_maps(struct thread *self, struct thread *parent,
struct map *new = map__clone(map);
if (new == NULL)
return -ENOMEM;
- thread__insert_map(self, new);
+ map_groups__insert(self, new);
}
return 0;
}
@@ -267,17 +262,17 @@ int thread__fork(struct thread *self, struct thread *parent)
return -ENOMEM;
for (i = 0; i < MAP__NR_TYPES; ++i)
- if (thread__clone_maps(self, parent, i) < 0)
+ if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
return -ENOMEM;
return 0;
}
-size_t threads__fprintf(FILE *fp)
+size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
{
size_t ret = 0;
struct rb_node *nd;
- for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
+ for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
@@ -286,14 +281,15 @@ size_t threads__fprintf(FILE *fp)
return ret;
}
-struct symbol *thread__find_symbol(struct thread *self,
- enum map_type type, u64 addr,
- symbol_filter_t filter)
+struct symbol *map_groups__find_symbol(struct map_groups *self,
+ struct perf_session *session,
+ enum map_type type, u64 addr,
+ symbol_filter_t filter)
{
- struct map *map = thread__find_map(self, type, addr);
+ struct map *map = map_groups__find(self, type, addr);
if (map != NULL)
- return map__find_symbol(map, map->map_ip(map, addr), filter);
+ return map__find_symbol(map, session, map->map_ip(map, addr), filter);
return NULL;
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 686d6e914d9..c206f72c888 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -5,52 +5,66 @@
#include <unistd.h>
#include "symbol.h"
-struct thread {
- struct rb_node rb_node;
+struct map_groups {
struct rb_root maps[MAP__NR_TYPES];
struct list_head removed_maps[MAP__NR_TYPES];
+};
+
+struct thread {
+ struct rb_node rb_node;
+ struct map_groups mg;
pid_t pid;
- bool use_modules;
char shortname[3];
char *comm;
int comm_len;
};
-void thread__init(struct thread *self, pid_t pid);
+void map_groups__init(struct map_groups *self);
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
-struct thread *threads__findnew(pid_t pid);
-struct thread *register_idle_thread(void);
+struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
void thread__insert_map(struct thread *self, struct map *map);
int thread__fork(struct thread *self, struct thread *parent);
-size_t thread__fprintf_maps(struct thread *self, FILE *fp);
-size_t threads__fprintf(FILE *fp);
+size_t map_groups__fprintf_maps(struct map_groups *self, FILE *fp);
+size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
void maps__insert(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
-static inline struct map *thread__find_map(struct thread *self,
+static inline void map_groups__insert(struct map_groups *self, struct map *map)
+{
+ maps__insert(&self->maps[map->type], map);
+}
+
+static inline struct map *map_groups__find(struct map_groups *self,
enum map_type type, u64 addr)
{
- return self ? maps__find(&self->maps[type], addr) : NULL;
+ return maps__find(&self->maps[type], addr);
}
-static inline void __thread__insert_map(struct thread *self, struct map *map)
+static inline struct map *thread__find_map(struct thread *self,
+ enum map_type type, u64 addr)
{
- maps__insert(&self->maps[map->type], map);
+ return self ? map_groups__find(&self->mg, type, addr) : NULL;
}
-void thread__find_addr_location(struct thread *self, u8 cpumode,
+void thread__find_addr_location(struct thread *self,
+ struct perf_session *session, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter);
-struct symbol *thread__find_symbol(struct thread *self,
- enum map_type type, u64 addr,
- symbol_filter_t filter);
+struct symbol *map_groups__find_symbol(struct map_groups *self,
+ struct perf_session *session,
+ enum map_type type, u64 addr,
+ symbol_filter_t filter);
static inline struct symbol *
-thread__find_function(struct thread *self, u64 addr, symbol_filter_t filter)
+map_groups__find_function(struct map_groups *self, struct perf_session *session,
+ u64 addr, symbol_filter_t filter)
{
- return thread__find_symbol(self, MAP__FUNCTION, addr, filter);
+ return map_groups__find_symbol(self, session, MAP__FUNCTION, addr, filter);
}
+
+struct map *map_groups__find_by_name(struct map_groups *self,
+ enum map_type type, const char *name);
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 0302405aa2c..c5c32be040b 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -177,7 +177,7 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused)
func_count++;
}
- func_list = malloc_or_die(sizeof(*func_list) * func_count + 1);
+ func_list = malloc_or_die(sizeof(*func_list) * (func_count + 1));
i = 0;
while (list) {
@@ -1477,7 +1477,7 @@ process_fields(struct event *event, struct print_flag_sym **list, char **tok)
goto out_free;
field = malloc_or_die(sizeof(*field));
- memset(field, 0, sizeof(field));
+ memset(field, 0, sizeof(*field));
value = arg_eval(arg);
field->value = strdup(value);
diff --git a/tools/perf/util/trace-event-perl.c b/tools/perf/util/trace-event-perl.c
index 51e833fd58c..6d6d76b8a21 100644
--- a/tools/perf/util/trace-event-perl.c
+++ b/tools/perf/util/trace-event-perl.c
@@ -32,9 +32,6 @@
void xs_init(pTHX);
-void boot_Perf__Trace__Context(pTHX_ CV *cv);
-void boot_DynaLoader(pTHX_ CV *cv);
-
void xs_init(pTHX)
{
const char *file = __FILE__;
@@ -270,7 +267,7 @@ int common_lock_depth(struct scripting_context *context)
}
static void perl_process_event(int cpu, void *data,
- int size __attribute((unused)),
+ int size __unused,
unsigned long long nsecs, char *comm)
{
struct format_field *field;
@@ -362,28 +359,46 @@ static void run_start_sub(void)
/*
* Start trace script
*/
-static int perl_start_script(const char *script)
+static int perl_start_script(const char *script, int argc, const char **argv)
{
- const char *command_line[2] = { "", NULL };
+ const char **command_line;
+ int i, err = 0;
+ command_line = malloc((argc + 2) * sizeof(const char *));
+ command_line[0] = "";
command_line[1] = script;
+ for (i = 2; i < argc + 2; i++)
+ command_line[i] = argv[i - 2];
my_perl = perl_alloc();
perl_construct(my_perl);
- if (perl_parse(my_perl, xs_init, 2, (char **)command_line,
- (char **)NULL))
- return -1;
+ if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line,
+ (char **)NULL)) {
+ err = -1;
+ goto error;
+ }
- perl_run(my_perl);
- if (SvTRUE(ERRSV))
- return -1;
+ if (perl_run(my_perl)) {
+ err = -1;
+ goto error;
+ }
+
+ if (SvTRUE(ERRSV)) {
+ err = -1;
+ goto error;
+ }
run_start_sub();
+ free(command_line);
fprintf(stderr, "perf trace started with Perl script %s\n\n", script);
-
return 0;
+error:
+ perl_free(my_perl);
+ free(command_line);
+
+ return err;
}
/*
@@ -573,26 +588,74 @@ struct scripting_ops perl_scripting_ops = {
.generate_script = perl_generate_script,
};
-#ifdef NO_LIBPERL
-void setup_perl_scripting(void)
+static void print_unsupported_msg(void)
{
fprintf(stderr, "Perl scripting not supported."
- " Install libperl and rebuild perf to enable it. e.g. "
- "apt-get install libperl-dev (ubuntu), yum install "
- "perl-ExtUtils-Embed (Fedora), etc.\n");
+ " Install libperl and rebuild perf to enable it.\n"
+ "For example:\n # apt-get install libperl-dev (ubuntu)"
+ "\n # yum install perl-ExtUtils-Embed (Fedora)"
+ "\n etc.\n");
}
-#else
-void setup_perl_scripting(void)
+
+static int perl_start_script_unsupported(const char *script __unused,
+ int argc __unused,
+ const char **argv __unused)
+{
+ print_unsupported_msg();
+
+ return -1;
+}
+
+static int perl_stop_script_unsupported(void)
+{
+ return 0;
+}
+
+static void perl_process_event_unsupported(int cpu __unused,
+ void *data __unused,
+ int size __unused,
+ unsigned long long nsecs __unused,
+ char *comm __unused)
+{
+}
+
+static int perl_generate_script_unsupported(const char *outfile __unused)
+{
+ print_unsupported_msg();
+
+ return -1;
+}
+
+struct scripting_ops perl_scripting_unsupported_ops = {
+ .name = "Perl",
+ .start_script = perl_start_script_unsupported,
+ .stop_script = perl_stop_script_unsupported,
+ .process_event = perl_process_event_unsupported,
+ .generate_script = perl_generate_script_unsupported,
+};
+
+static void register_perl_scripting(struct scripting_ops *scripting_ops)
{
int err;
- err = script_spec_register("Perl", &perl_scripting_ops);
+ err = script_spec_register("Perl", scripting_ops);
if (err)
die("error registering Perl script extension");
- err = script_spec_register("pl", &perl_scripting_ops);
+ err = script_spec_register("pl", scripting_ops);
if (err)
die("error registering pl script extension");
scripting_context = malloc(sizeof(struct scripting_context));
}
+
+#ifdef NO_LIBPERL
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_unsupported_ops);
+}
+#else
+void setup_perl_scripting(void)
+{
+ register_perl_scripting(&perl_scripting_ops);
+}
#endif
diff --git a/tools/perf/util/trace-event-perl.h b/tools/perf/util/trace-event-perl.h
index 8fe0d866fe1..e88fb26137b 100644
--- a/tools/perf/util/trace-event-perl.h
+++ b/tools/perf/util/trace-event-perl.h
@@ -34,9 +34,13 @@ typedef int INTERP;
#define dXSUB_SYS
#define pTHX_
static inline void newXS(const char *a, void *b, const char *c) {}
+static void boot_Perf__Trace__Context(pTHX_ CV *cv) {}
+static void boot_DynaLoader(pTHX_ CV *cv) {}
#else
#include <EXTERN.h>
#include <perl.h>
+void boot_Perf__Trace__Context(pTHX_ CV *cv);
+void boot_DynaLoader(pTHX_ CV *cv);
typedef PerlInterpreter * INTERP;
#endif
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 342dfdd43f8..1744422cafc 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -145,8 +145,9 @@ static void read_proc_kallsyms(void)
if (!size)
return;
- buf = malloc_or_die(size);
+ buf = malloc_or_die(size + 1);
read_or_die(buf, size);
+ buf[size] = '\0';
parse_proc_kallsyms(buf, size);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 81698d5e650..6ad405620c9 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -270,7 +270,7 @@ enum trace_flag_type {
struct scripting_ops {
const char *name;
- int (*start_script) (const char *);
+ int (*start_script) (const char *script, int argc, const char **argv);
int (*stop_script) (void);
void (*process_event) (int cpu, void *data, int size,
unsigned long long nsecs, char *comm);
diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
index 83b3dde1a83..b2b3c2d1cf8 100644
--- a/usr/gen_init_cpio.c
+++ b/usr/gen_init_cpio.c
@@ -354,7 +354,10 @@ static int cpio_mkfile(const char *name, const char *location,
push_pad();
if (size) {
- fwrite(filebuf, size, 1, stdout);
+ if (fwrite(filebuf, size, 1, stdout) != 1) {
+ fprintf(stderr, "writing filebuf failed\n");
+ goto error;
+ }
offset += size;
push_pad();
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f92ba138007..b5af8816761 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -49,6 +49,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#include <asm-generic/bitops/le.h>
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
@@ -1071,8 +1072,8 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
unsigned long rel_gfn = gfn - memslot->base_gfn;
/* avoid RMW */
- if (!test_bit(rel_gfn, memslot->dirty_bitmap))
- set_bit(rel_gfn, memslot->dirty_bitmap);
+ if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
+ generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
}
}
@@ -1176,7 +1177,7 @@ static struct file_operations kvm_vcpu_fops = {
*/
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
- return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
+ return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
}
/*
@@ -1637,7 +1638,7 @@ static int kvm_dev_ioctl_create_vm(void)
kvm = kvm_create_vm();
if (IS_ERR(kvm))
return PTR_ERR(kvm);
- fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
+ fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
if (fd < 0)
kvm_put_kvm(kvm);